arc.c revision 217367
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26/* 27 * DVA-based Adjustable Replacement Cache 28 * 29 * While much of the theory of operation used here is 30 * based on the self-tuning, low overhead replacement cache 31 * presented by Megiddo and Modha at FAST 2003, there are some 32 * significant differences: 33 * 34 * 1. The Megiddo and Modha model assumes any page is evictable. 35 * Pages in its cache cannot be "locked" into memory. This makes 36 * the eviction algorithm simple: evict the last page in the list. 37 * This also make the performance characteristics easy to reason 38 * about. Our cache is not so simple. At any given moment, some 39 * subset of the blocks in the cache are un-evictable because we 40 * have handed out a reference to them. Blocks are only evictable 41 * when there are no external references active. This makes 42 * eviction far more problematic: we choose to evict the evictable 43 * blocks that are the "lowest" in the list. 44 * 45 * There are times when it is not possible to evict the requested 46 * space. In these circumstances we are unable to adjust the cache 47 * size. To prevent the cache growing unbounded at these times we 48 * implement a "cache throttle" that slows the flow of new data 49 * into the cache until we can make space available. 50 * 51 * 2. The Megiddo and Modha model assumes a fixed cache size. 52 * Pages are evicted when the cache is full and there is a cache 53 * miss. Our model has a variable sized cache. It grows with 54 * high use, but also tries to react to memory pressure from the 55 * operating system: decreasing its size when system memory is 56 * tight. 57 * 58 * 3. The Megiddo and Modha model assumes a fixed page size. All 59 * elements of the cache are therefor exactly the same size. So 60 * when adjusting the cache size following a cache miss, its simply 61 * a matter of choosing a single page to evict. In our model, we 62 * have variable sized cache blocks (rangeing from 512 bytes to 63 * 128K bytes). We therefor choose a set of blocks to evict to make 64 * space for a cache miss that approximates as closely as possible 65 * the space used by the new block. 66 * 67 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 68 * by N. Megiddo & D. Modha, FAST 2003 69 */ 70 71/* 72 * The locking model: 73 * 74 * A new reference to a cache buffer can be obtained in two 75 * ways: 1) via a hash table lookup using the DVA as a key, 76 * or 2) via one of the ARC lists. The arc_read() interface 77 * uses method 1, while the internal arc algorithms for 78 * adjusting the cache use method 2. We therefor provide two 79 * types of locks: 1) the hash table lock array, and 2) the 80 * arc list locks. 81 * 82 * Buffers do not have their own mutexs, rather they rely on the 83 * hash table mutexs for the bulk of their protection (i.e. most 84 * fields in the arc_buf_hdr_t are protected by these mutexs). 85 * 86 * buf_hash_find() returns the appropriate mutex (held) when it 87 * locates the requested buffer in the hash table. It returns 88 * NULL for the mutex if the buffer was not in the table. 89 * 90 * buf_hash_remove() expects the appropriate hash mutex to be 91 * already held before it is invoked. 92 * 93 * Each arc state also has a mutex which is used to protect the 94 * buffer list associated with the state. When attempting to 95 * obtain a hash table lock while holding an arc list lock you 96 * must use: mutex_tryenter() to avoid deadlock. Also note that 97 * the active state mutex must be held before the ghost state mutex. 98 * 99 * Arc buffers may have an associated eviction callback function. 100 * This function will be invoked prior to removing the buffer (e.g. 101 * in arc_do_user_evicts()). Note however that the data associated 102 * with the buffer may be evicted prior to the callback. The callback 103 * must be made with *no locks held* (to prevent deadlock). Additionally, 104 * the users of callbacks must ensure that their private data is 105 * protected from simultaneous callbacks from arc_buf_evict() 106 * and arc_do_user_evicts(). 107 * 108 * Note that the majority of the performance stats are manipulated 109 * with atomic operations. 110 * 111 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following: 112 * 113 * - L2ARC buflist creation 114 * - L2ARC buflist eviction 115 * - L2ARC write completion, which walks L2ARC buflists 116 * - ARC header destruction, as it removes from L2ARC buflists 117 * - ARC header release, as it removes from L2ARC buflists 118 */ 119 120#include <sys/spa.h> 121#include <sys/zio.h> 122#include <sys/zio_checksum.h> 123#include <sys/zfs_context.h> 124#include <sys/arc.h> 125#include <sys/refcount.h> 126#include <sys/vdev.h> 127#ifdef _KERNEL 128#include <sys/dnlc.h> 129#endif 130#include <sys/callb.h> 131#include <sys/kstat.h> 132#include <sys/sdt.h> 133 134#include <vm/vm_pageout.h> 135 136static kmutex_t arc_reclaim_thr_lock; 137static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 138static uint8_t arc_thread_exit; 139 140extern int zfs_write_limit_shift; 141extern uint64_t zfs_write_limit_max; 142extern kmutex_t zfs_write_limit_lock; 143 144#define ARC_REDUCE_DNLC_PERCENT 3 145uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 146 147typedef enum arc_reclaim_strategy { 148 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 149 ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 150} arc_reclaim_strategy_t; 151 152/* number of seconds before growing cache again */ 153static int arc_grow_retry = 60; 154 155/* shift of arc_c for calculating both min and max arc_p */ 156static int arc_p_min_shift = 4; 157 158/* log2(fraction of arc to reclaim) */ 159static int arc_shrink_shift = 5; 160 161/* 162 * minimum lifespan of a prefetch block in clock ticks 163 * (initialized in arc_init()) 164 */ 165static int arc_min_prefetch_lifespan; 166 167static int arc_dead; 168extern int zfs_prefetch_disable; 169 170/* 171 * The arc has filled available memory and has now warmed up. 172 */ 173static boolean_t arc_warm; 174 175/* 176 * These tunables are for performance analysis. 177 */ 178uint64_t zfs_arc_max; 179uint64_t zfs_arc_min; 180uint64_t zfs_arc_meta_limit = 0; 181int zfs_mdcomp_disable = 0; 182int zfs_arc_grow_retry = 0; 183int zfs_arc_shrink_shift = 0; 184int zfs_arc_p_min_shift = 0; 185 186TUNABLE_QUAD("vfs.zfs.arc_max", &zfs_arc_max); 187TUNABLE_QUAD("vfs.zfs.arc_min", &zfs_arc_min); 188TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit); 189TUNABLE_INT("vfs.zfs.mdcomp_disable", &zfs_mdcomp_disable); 190SYSCTL_DECL(_vfs_zfs); 191SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_max, CTLFLAG_RDTUN, &zfs_arc_max, 0, 192 "Maximum ARC size"); 193SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_min, CTLFLAG_RDTUN, &zfs_arc_min, 0, 194 "Minimum ARC size"); 195SYSCTL_INT(_vfs_zfs, OID_AUTO, mdcomp_disable, CTLFLAG_RDTUN, 196 &zfs_mdcomp_disable, 0, "Disable metadata compression"); 197 198/* 199 * Note that buffers can be in one of 6 states: 200 * ARC_anon - anonymous (discussed below) 201 * ARC_mru - recently used, currently cached 202 * ARC_mru_ghost - recentely used, no longer in cache 203 * ARC_mfu - frequently used, currently cached 204 * ARC_mfu_ghost - frequently used, no longer in cache 205 * ARC_l2c_only - exists in L2ARC but not other states 206 * When there are no active references to the buffer, they are 207 * are linked onto a list in one of these arc states. These are 208 * the only buffers that can be evicted or deleted. Within each 209 * state there are multiple lists, one for meta-data and one for 210 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, 211 * etc.) is tracked separately so that it can be managed more 212 * explicitly: favored over data, limited explicitly. 213 * 214 * Anonymous buffers are buffers that are not associated with 215 * a DVA. These are buffers that hold dirty block copies 216 * before they are written to stable storage. By definition, 217 * they are "ref'd" and are considered part of arc_mru 218 * that cannot be freed. Generally, they will aquire a DVA 219 * as they are written and migrate onto the arc_mru list. 220 * 221 * The ARC_l2c_only state is for buffers that are in the second 222 * level ARC but no longer in any of the ARC_m* lists. The second 223 * level ARC itself may also contain buffers that are in any of 224 * the ARC_m* states - meaning that a buffer can exist in two 225 * places. The reason for the ARC_l2c_only state is to keep the 226 * buffer header in the hash table, so that reads that hit the 227 * second level ARC benefit from these fast lookups. 228 */ 229 230#define ARCS_LOCK_PAD CACHE_LINE_SIZE 231struct arcs_lock { 232 kmutex_t arcs_lock; 233#ifdef _KERNEL 234 unsigned char pad[(ARCS_LOCK_PAD - sizeof (kmutex_t))]; 235#endif 236}; 237 238/* 239 * must be power of two for mask use to work 240 * 241 */ 242#define ARC_BUFC_NUMDATALISTS 16 243#define ARC_BUFC_NUMMETADATALISTS 16 244#define ARC_BUFC_NUMLISTS (ARC_BUFC_NUMMETADATALISTS + ARC_BUFC_NUMDATALISTS) 245 246typedef struct arc_state { 247 uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */ 248 uint64_t arcs_size; /* total amount of data in this state */ 249 list_t arcs_lists[ARC_BUFC_NUMLISTS]; /* list of evictable buffers */ 250 struct arcs_lock arcs_locks[ARC_BUFC_NUMLISTS] __aligned(CACHE_LINE_SIZE); 251} arc_state_t; 252 253#define ARCS_LOCK(s, i) (&((s)->arcs_locks[(i)].arcs_lock)) 254 255/* The 6 states: */ 256static arc_state_t ARC_anon; 257static arc_state_t ARC_mru; 258static arc_state_t ARC_mru_ghost; 259static arc_state_t ARC_mfu; 260static arc_state_t ARC_mfu_ghost; 261static arc_state_t ARC_l2c_only; 262 263typedef struct arc_stats { 264 kstat_named_t arcstat_hits; 265 kstat_named_t arcstat_misses; 266 kstat_named_t arcstat_demand_data_hits; 267 kstat_named_t arcstat_demand_data_misses; 268 kstat_named_t arcstat_demand_metadata_hits; 269 kstat_named_t arcstat_demand_metadata_misses; 270 kstat_named_t arcstat_prefetch_data_hits; 271 kstat_named_t arcstat_prefetch_data_misses; 272 kstat_named_t arcstat_prefetch_metadata_hits; 273 kstat_named_t arcstat_prefetch_metadata_misses; 274 kstat_named_t arcstat_mru_hits; 275 kstat_named_t arcstat_mru_ghost_hits; 276 kstat_named_t arcstat_mfu_hits; 277 kstat_named_t arcstat_mfu_ghost_hits; 278 kstat_named_t arcstat_allocated; 279 kstat_named_t arcstat_deleted; 280 kstat_named_t arcstat_stolen; 281 kstat_named_t arcstat_recycle_miss; 282 kstat_named_t arcstat_mutex_miss; 283 kstat_named_t arcstat_evict_skip; 284 kstat_named_t arcstat_evict_l2_cached; 285 kstat_named_t arcstat_evict_l2_eligible; 286 kstat_named_t arcstat_evict_l2_ineligible; 287 kstat_named_t arcstat_hash_elements; 288 kstat_named_t arcstat_hash_elements_max; 289 kstat_named_t arcstat_hash_collisions; 290 kstat_named_t arcstat_hash_chains; 291 kstat_named_t arcstat_hash_chain_max; 292 kstat_named_t arcstat_p; 293 kstat_named_t arcstat_c; 294 kstat_named_t arcstat_c_min; 295 kstat_named_t arcstat_c_max; 296 kstat_named_t arcstat_size; 297 kstat_named_t arcstat_hdr_size; 298 kstat_named_t arcstat_data_size; 299 kstat_named_t arcstat_other_size; 300 kstat_named_t arcstat_l2_hits; 301 kstat_named_t arcstat_l2_misses; 302 kstat_named_t arcstat_l2_feeds; 303 kstat_named_t arcstat_l2_rw_clash; 304 kstat_named_t arcstat_l2_read_bytes; 305 kstat_named_t arcstat_l2_write_bytes; 306 kstat_named_t arcstat_l2_writes_sent; 307 kstat_named_t arcstat_l2_writes_done; 308 kstat_named_t arcstat_l2_writes_error; 309 kstat_named_t arcstat_l2_writes_hdr_miss; 310 kstat_named_t arcstat_l2_evict_lock_retry; 311 kstat_named_t arcstat_l2_evict_reading; 312 kstat_named_t arcstat_l2_free_on_write; 313 kstat_named_t arcstat_l2_abort_lowmem; 314 kstat_named_t arcstat_l2_cksum_bad; 315 kstat_named_t arcstat_l2_io_error; 316 kstat_named_t arcstat_l2_size; 317 kstat_named_t arcstat_l2_hdr_size; 318 kstat_named_t arcstat_memory_throttle_count; 319 kstat_named_t arcstat_l2_write_trylock_fail; 320 kstat_named_t arcstat_l2_write_passed_headroom; 321 kstat_named_t arcstat_l2_write_spa_mismatch; 322 kstat_named_t arcstat_l2_write_in_l2; 323 kstat_named_t arcstat_l2_write_hdr_io_in_progress; 324 kstat_named_t arcstat_l2_write_not_cacheable; 325 kstat_named_t arcstat_l2_write_full; 326 kstat_named_t arcstat_l2_write_buffer_iter; 327 kstat_named_t arcstat_l2_write_pios; 328 kstat_named_t arcstat_l2_write_buffer_bytes_scanned; 329 kstat_named_t arcstat_l2_write_buffer_list_iter; 330 kstat_named_t arcstat_l2_write_buffer_list_null_iter; 331} arc_stats_t; 332 333static arc_stats_t arc_stats = { 334 { "hits", KSTAT_DATA_UINT64 }, 335 { "misses", KSTAT_DATA_UINT64 }, 336 { "demand_data_hits", KSTAT_DATA_UINT64 }, 337 { "demand_data_misses", KSTAT_DATA_UINT64 }, 338 { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 339 { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 340 { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 341 { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 342 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 343 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 344 { "mru_hits", KSTAT_DATA_UINT64 }, 345 { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 346 { "mfu_hits", KSTAT_DATA_UINT64 }, 347 { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 348 { "allocated", KSTAT_DATA_UINT64 }, 349 { "deleted", KSTAT_DATA_UINT64 }, 350 { "stolen", KSTAT_DATA_UINT64 }, 351 { "recycle_miss", KSTAT_DATA_UINT64 }, 352 { "mutex_miss", KSTAT_DATA_UINT64 }, 353 { "evict_skip", KSTAT_DATA_UINT64 }, 354 { "evict_l2_cached", KSTAT_DATA_UINT64 }, 355 { "evict_l2_eligible", KSTAT_DATA_UINT64 }, 356 { "evict_l2_ineligible", KSTAT_DATA_UINT64 }, 357 { "hash_elements", KSTAT_DATA_UINT64 }, 358 { "hash_elements_max", KSTAT_DATA_UINT64 }, 359 { "hash_collisions", KSTAT_DATA_UINT64 }, 360 { "hash_chains", KSTAT_DATA_UINT64 }, 361 { "hash_chain_max", KSTAT_DATA_UINT64 }, 362 { "p", KSTAT_DATA_UINT64 }, 363 { "c", KSTAT_DATA_UINT64 }, 364 { "c_min", KSTAT_DATA_UINT64 }, 365 { "c_max", KSTAT_DATA_UINT64 }, 366 { "size", KSTAT_DATA_UINT64 }, 367 { "hdr_size", KSTAT_DATA_UINT64 }, 368 { "data_size", KSTAT_DATA_UINT64 }, 369 { "other_size", KSTAT_DATA_UINT64 }, 370 { "l2_hits", KSTAT_DATA_UINT64 }, 371 { "l2_misses", KSTAT_DATA_UINT64 }, 372 { "l2_feeds", KSTAT_DATA_UINT64 }, 373 { "l2_rw_clash", KSTAT_DATA_UINT64 }, 374 { "l2_read_bytes", KSTAT_DATA_UINT64 }, 375 { "l2_write_bytes", KSTAT_DATA_UINT64 }, 376 { "l2_writes_sent", KSTAT_DATA_UINT64 }, 377 { "l2_writes_done", KSTAT_DATA_UINT64 }, 378 { "l2_writes_error", KSTAT_DATA_UINT64 }, 379 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 }, 380 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, 381 { "l2_evict_reading", KSTAT_DATA_UINT64 }, 382 { "l2_free_on_write", KSTAT_DATA_UINT64 }, 383 { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, 384 { "l2_cksum_bad", KSTAT_DATA_UINT64 }, 385 { "l2_io_error", KSTAT_DATA_UINT64 }, 386 { "l2_size", KSTAT_DATA_UINT64 }, 387 { "l2_hdr_size", KSTAT_DATA_UINT64 }, 388 { "memory_throttle_count", KSTAT_DATA_UINT64 }, 389 { "l2_write_trylock_fail", KSTAT_DATA_UINT64 }, 390 { "l2_write_passed_headroom", KSTAT_DATA_UINT64 }, 391 { "l2_write_spa_mismatch", KSTAT_DATA_UINT64 }, 392 { "l2_write_in_l2", KSTAT_DATA_UINT64 }, 393 { "l2_write_io_in_progress", KSTAT_DATA_UINT64 }, 394 { "l2_write_not_cacheable", KSTAT_DATA_UINT64 }, 395 { "l2_write_full", KSTAT_DATA_UINT64 }, 396 { "l2_write_buffer_iter", KSTAT_DATA_UINT64 }, 397 { "l2_write_pios", KSTAT_DATA_UINT64 }, 398 { "l2_write_buffer_bytes_scanned", KSTAT_DATA_UINT64 }, 399 { "l2_write_buffer_list_iter", KSTAT_DATA_UINT64 }, 400 { "l2_write_buffer_list_null_iter", KSTAT_DATA_UINT64 } 401}; 402 403#define ARCSTAT(stat) (arc_stats.stat.value.ui64) 404 405#define ARCSTAT_INCR(stat, val) \ 406 atomic_add_64(&arc_stats.stat.value.ui64, (val)); 407 408#define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 409#define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 410 411#define ARCSTAT_MAX(stat, val) { \ 412 uint64_t m; \ 413 while ((val) > (m = arc_stats.stat.value.ui64) && \ 414 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 415 continue; \ 416} 417 418#define ARCSTAT_MAXSTAT(stat) \ 419 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 420 421/* 422 * We define a macro to allow ARC hits/misses to be easily broken down by 423 * two separate conditions, giving a total of four different subtypes for 424 * each of hits and misses (so eight statistics total). 425 */ 426#define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 427 if (cond1) { \ 428 if (cond2) { \ 429 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 430 } else { \ 431 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 432 } \ 433 } else { \ 434 if (cond2) { \ 435 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 436 } else { \ 437 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 438 } \ 439 } 440 441kstat_t *arc_ksp; 442static arc_state_t *arc_anon; 443static arc_state_t *arc_mru; 444static arc_state_t *arc_mru_ghost; 445static arc_state_t *arc_mfu; 446static arc_state_t *arc_mfu_ghost; 447static arc_state_t *arc_l2c_only; 448 449/* 450 * There are several ARC variables that are critical to export as kstats -- 451 * but we don't want to have to grovel around in the kstat whenever we wish to 452 * manipulate them. For these variables, we therefore define them to be in 453 * terms of the statistic variable. This assures that we are not introducing 454 * the possibility of inconsistency by having shadow copies of the variables, 455 * while still allowing the code to be readable. 456 */ 457#define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 458#define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 459#define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 460#define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 461#define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 462 463static int arc_no_grow; /* Don't try to grow cache size */ 464static uint64_t arc_tempreserve; 465static uint64_t arc_loaned_bytes; 466static uint64_t arc_meta_used; 467static uint64_t arc_meta_limit; 468static uint64_t arc_meta_max = 0; 469SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_meta_used, CTLFLAG_RDTUN, 470 &arc_meta_used, 0, "ARC metadata used"); 471SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_meta_limit, CTLFLAG_RDTUN, 472 &arc_meta_limit, 0, "ARC metadata limit"); 473 474typedef struct l2arc_buf_hdr l2arc_buf_hdr_t; 475 476typedef struct arc_callback arc_callback_t; 477 478struct arc_callback { 479 void *acb_private; 480 arc_done_func_t *acb_done; 481 arc_buf_t *acb_buf; 482 zio_t *acb_zio_dummy; 483 arc_callback_t *acb_next; 484}; 485 486typedef struct arc_write_callback arc_write_callback_t; 487 488struct arc_write_callback { 489 void *awcb_private; 490 arc_done_func_t *awcb_ready; 491 arc_done_func_t *awcb_done; 492 arc_buf_t *awcb_buf; 493}; 494 495struct arc_buf_hdr { 496 /* protected by hash lock */ 497 dva_t b_dva; 498 uint64_t b_birth; 499 uint64_t b_cksum0; 500 501 kmutex_t b_freeze_lock; 502 zio_cksum_t *b_freeze_cksum; 503 504 arc_buf_hdr_t *b_hash_next; 505 arc_buf_t *b_buf; 506 uint32_t b_flags; 507 uint32_t b_datacnt; 508 509 arc_callback_t *b_acb; 510 kcondvar_t b_cv; 511 512 /* immutable */ 513 arc_buf_contents_t b_type; 514 uint64_t b_size; 515 uint64_t b_spa; 516 517 /* protected by arc state mutex */ 518 arc_state_t *b_state; 519 list_node_t b_arc_node; 520 521 /* updated atomically */ 522 clock_t b_arc_access; 523 524 /* self protecting */ 525 refcount_t b_refcnt; 526 527 l2arc_buf_hdr_t *b_l2hdr; 528 list_node_t b_l2node; 529}; 530 531static arc_buf_t *arc_eviction_list; 532static kmutex_t arc_eviction_mtx; 533static arc_buf_hdr_t arc_eviction_hdr; 534static void arc_get_data_buf(arc_buf_t *buf); 535static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 536static int arc_evict_needed(arc_buf_contents_t type); 537static void arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes); 538 539static boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab); 540 541#define GHOST_STATE(state) \ 542 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ 543 (state) == arc_l2c_only) 544 545/* 546 * Private ARC flags. These flags are private ARC only flags that will show up 547 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 548 * be passed in as arc_flags in things like arc_read. However, these flags 549 * should never be passed and should only be set by ARC code. When adding new 550 * public flags, make sure not to smash the private ones. 551 */ 552 553#define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 554#define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 555#define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 556#define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 557#define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 558#define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 559#define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */ 560#define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */ 561#define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */ 562#define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */ 563#define ARC_STORED (1 << 19) /* has been store()d to */ 564 565#define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 566#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 567#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 568#define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_PREFETCH) 569#define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 570#define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 571#define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS) 572#define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE) 573#define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \ 574 (hdr)->b_l2hdr != NULL) 575#define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING) 576#define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED) 577#define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD) 578 579/* 580 * Other sizes 581 */ 582 583#define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) 584#define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t)) 585 586/* 587 * Hash table routines 588 */ 589 590#define HT_LOCK_PAD CACHE_LINE_SIZE 591 592struct ht_lock { 593 kmutex_t ht_lock; 594#ifdef _KERNEL 595 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 596#endif 597}; 598 599#define BUF_LOCKS 256 600typedef struct buf_hash_table { 601 uint64_t ht_mask; 602 arc_buf_hdr_t **ht_table; 603 struct ht_lock ht_locks[BUF_LOCKS] __aligned(CACHE_LINE_SIZE); 604} buf_hash_table_t; 605 606static buf_hash_table_t buf_hash_table; 607 608#define BUF_HASH_INDEX(spa, dva, birth) \ 609 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 610#define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 611#define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 612#define HDR_LOCK(buf) \ 613 (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth))) 614 615uint64_t zfs_crc64_table[256]; 616 617/* 618 * Level 2 ARC 619 */ 620 621#define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ 622#define L2ARC_HEADROOM 2 /* num of writes */ 623#define L2ARC_FEED_SECS 1 /* caching interval secs */ 624#define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */ 625 626#define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) 627#define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) 628 629/* 630 * L2ARC Performance Tunables 631 */ 632uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ 633uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */ 634uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ 635uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ 636uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */ 637boolean_t l2arc_noprefetch = B_FALSE; /* don't cache prefetch bufs */ 638boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */ 639boolean_t l2arc_norw = B_TRUE; /* no reads during writes */ 640 641SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max, CTLFLAG_RW, 642 &l2arc_write_max, 0, "max write size"); 643SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_boost, CTLFLAG_RW, 644 &l2arc_write_boost, 0, "extra write during warmup"); 645SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom, CTLFLAG_RW, 646 &l2arc_headroom, 0, "number of dev writes"); 647SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_secs, CTLFLAG_RW, 648 &l2arc_feed_secs, 0, "interval seconds"); 649SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_min_ms, CTLFLAG_RW, 650 &l2arc_feed_min_ms, 0, "min interval milliseconds"); 651 652SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_noprefetch, CTLFLAG_RW, 653 &l2arc_noprefetch, 0, "don't cache prefetch bufs"); 654SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_feed_again, CTLFLAG_RW, 655 &l2arc_feed_again, 0, "turbo warmup"); 656SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw, CTLFLAG_RW, 657 &l2arc_norw, 0, "no reads during writes"); 658 659SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_size, CTLFLAG_RD, 660 &ARC_anon.arcs_size, 0, "size of anonymous state"); 661SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_metadata_lsize, CTLFLAG_RD, 662 &ARC_anon.arcs_lsize[ARC_BUFC_METADATA], 0, "size of anonymous state"); 663SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_data_lsize, CTLFLAG_RD, 664 &ARC_anon.arcs_lsize[ARC_BUFC_DATA], 0, "size of anonymous state"); 665 666SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_size, CTLFLAG_RD, 667 &ARC_mru.arcs_size, 0, "size of mru state"); 668SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_metadata_lsize, CTLFLAG_RD, 669 &ARC_mru.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mru state"); 670SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_data_lsize, CTLFLAG_RD, 671 &ARC_mru.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mru state"); 672 673SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_size, CTLFLAG_RD, 674 &ARC_mru_ghost.arcs_size, 0, "size of mru ghost state"); 675SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_metadata_lsize, CTLFLAG_RD, 676 &ARC_mru_ghost.arcs_lsize[ARC_BUFC_METADATA], 0, 677 "size of metadata in mru ghost state"); 678SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_data_lsize, CTLFLAG_RD, 679 &ARC_mru_ghost.arcs_lsize[ARC_BUFC_DATA], 0, 680 "size of data in mru ghost state"); 681 682SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_size, CTLFLAG_RD, 683 &ARC_mfu.arcs_size, 0, "size of mfu state"); 684SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_metadata_lsize, CTLFLAG_RD, 685 &ARC_mfu.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mfu state"); 686SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_data_lsize, CTLFLAG_RD, 687 &ARC_mfu.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mfu state"); 688 689SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_size, CTLFLAG_RD, 690 &ARC_mfu_ghost.arcs_size, 0, "size of mfu ghost state"); 691SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_metadata_lsize, CTLFLAG_RD, 692 &ARC_mfu_ghost.arcs_lsize[ARC_BUFC_METADATA], 0, 693 "size of metadata in mfu ghost state"); 694SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_data_lsize, CTLFLAG_RD, 695 &ARC_mfu_ghost.arcs_lsize[ARC_BUFC_DATA], 0, 696 "size of data in mfu ghost state"); 697 698SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2c_only_size, CTLFLAG_RD, 699 &ARC_l2c_only.arcs_size, 0, "size of mru state"); 700 701/* 702 * L2ARC Internals 703 */ 704typedef struct l2arc_dev { 705 vdev_t *l2ad_vdev; /* vdev */ 706 spa_t *l2ad_spa; /* spa */ 707 uint64_t l2ad_hand; /* next write location */ 708 uint64_t l2ad_write; /* desired write size, bytes */ 709 uint64_t l2ad_boost; /* warmup write boost, bytes */ 710 uint64_t l2ad_start; /* first addr on device */ 711 uint64_t l2ad_end; /* last addr on device */ 712 uint64_t l2ad_evict; /* last addr eviction reached */ 713 boolean_t l2ad_first; /* first sweep through */ 714 boolean_t l2ad_writing; /* currently writing */ 715 list_t *l2ad_buflist; /* buffer list */ 716 list_node_t l2ad_node; /* device list node */ 717} l2arc_dev_t; 718 719static list_t L2ARC_dev_list; /* device list */ 720static list_t *l2arc_dev_list; /* device list pointer */ 721static kmutex_t l2arc_dev_mtx; /* device list mutex */ 722static l2arc_dev_t *l2arc_dev_last; /* last device used */ 723static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */ 724static list_t L2ARC_free_on_write; /* free after write buf list */ 725static list_t *l2arc_free_on_write; /* free after write list ptr */ 726static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ 727static uint64_t l2arc_ndev; /* number of devices */ 728 729typedef struct l2arc_read_callback { 730 arc_buf_t *l2rcb_buf; /* read buffer */ 731 spa_t *l2rcb_spa; /* spa */ 732 blkptr_t l2rcb_bp; /* original blkptr */ 733 zbookmark_t l2rcb_zb; /* original bookmark */ 734 int l2rcb_flags; /* original flags */ 735} l2arc_read_callback_t; 736 737typedef struct l2arc_write_callback { 738 l2arc_dev_t *l2wcb_dev; /* device info */ 739 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ 740} l2arc_write_callback_t; 741 742struct l2arc_buf_hdr { 743 /* protected by arc_buf_hdr mutex */ 744 l2arc_dev_t *b_dev; /* L2ARC device */ 745 uint64_t b_daddr; /* disk address, offset byte */ 746}; 747 748typedef struct l2arc_data_free { 749 /* protected by l2arc_free_on_write_mtx */ 750 void *l2df_data; 751 size_t l2df_size; 752 void (*l2df_func)(void *, size_t); 753 list_node_t l2df_list_node; 754} l2arc_data_free_t; 755 756static kmutex_t l2arc_feed_thr_lock; 757static kcondvar_t l2arc_feed_thr_cv; 758static uint8_t l2arc_thread_exit; 759 760static void l2arc_read_done(zio_t *zio); 761static void l2arc_hdr_stat_add(void); 762static void l2arc_hdr_stat_remove(void); 763 764static uint64_t 765buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth) 766{ 767 uint8_t *vdva = (uint8_t *)dva; 768 uint64_t crc = -1ULL; 769 int i; 770 771 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 772 773 for (i = 0; i < sizeof (dva_t); i++) 774 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 775 776 crc ^= (spa>>8) ^ birth; 777 778 return (crc); 779} 780 781#define BUF_EMPTY(buf) \ 782 ((buf)->b_dva.dva_word[0] == 0 && \ 783 (buf)->b_dva.dva_word[1] == 0 && \ 784 (buf)->b_birth == 0) 785 786#define BUF_EQUAL(spa, dva, birth, buf) \ 787 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 788 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 789 ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 790 791static arc_buf_hdr_t * 792buf_hash_find(uint64_t spa, const dva_t *dva, uint64_t birth, kmutex_t **lockp) 793{ 794 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 795 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 796 arc_buf_hdr_t *buf; 797 798 mutex_enter(hash_lock); 799 for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 800 buf = buf->b_hash_next) { 801 if (BUF_EQUAL(spa, dva, birth, buf)) { 802 *lockp = hash_lock; 803 return (buf); 804 } 805 } 806 mutex_exit(hash_lock); 807 *lockp = NULL; 808 return (NULL); 809} 810 811/* 812 * Insert an entry into the hash table. If there is already an element 813 * equal to elem in the hash table, then the already existing element 814 * will be returned and the new element will not be inserted. 815 * Otherwise returns NULL. 816 */ 817static arc_buf_hdr_t * 818buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 819{ 820 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 821 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 822 arc_buf_hdr_t *fbuf; 823 uint32_t i; 824 825 ASSERT(!HDR_IN_HASH_TABLE(buf)); 826 *lockp = hash_lock; 827 mutex_enter(hash_lock); 828 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 829 fbuf = fbuf->b_hash_next, i++) { 830 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 831 return (fbuf); 832 } 833 834 buf->b_hash_next = buf_hash_table.ht_table[idx]; 835 buf_hash_table.ht_table[idx] = buf; 836 buf->b_flags |= ARC_IN_HASH_TABLE; 837 838 /* collect some hash table performance data */ 839 if (i > 0) { 840 ARCSTAT_BUMP(arcstat_hash_collisions); 841 if (i == 1) 842 ARCSTAT_BUMP(arcstat_hash_chains); 843 844 ARCSTAT_MAX(arcstat_hash_chain_max, i); 845 } 846 847 ARCSTAT_BUMP(arcstat_hash_elements); 848 ARCSTAT_MAXSTAT(arcstat_hash_elements); 849 850 return (NULL); 851} 852 853static void 854buf_hash_remove(arc_buf_hdr_t *buf) 855{ 856 arc_buf_hdr_t *fbuf, **bufp; 857 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 858 859 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 860 ASSERT(HDR_IN_HASH_TABLE(buf)); 861 862 bufp = &buf_hash_table.ht_table[idx]; 863 while ((fbuf = *bufp) != buf) { 864 ASSERT(fbuf != NULL); 865 bufp = &fbuf->b_hash_next; 866 } 867 *bufp = buf->b_hash_next; 868 buf->b_hash_next = NULL; 869 buf->b_flags &= ~ARC_IN_HASH_TABLE; 870 871 /* collect some hash table performance data */ 872 ARCSTAT_BUMPDOWN(arcstat_hash_elements); 873 874 if (buf_hash_table.ht_table[idx] && 875 buf_hash_table.ht_table[idx]->b_hash_next == NULL) 876 ARCSTAT_BUMPDOWN(arcstat_hash_chains); 877} 878 879/* 880 * Global data structures and functions for the buf kmem cache. 881 */ 882static kmem_cache_t *hdr_cache; 883static kmem_cache_t *buf_cache; 884 885static void 886buf_fini(void) 887{ 888 int i; 889 890 kmem_free(buf_hash_table.ht_table, 891 (buf_hash_table.ht_mask + 1) * sizeof (void *)); 892 for (i = 0; i < BUF_LOCKS; i++) 893 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 894 kmem_cache_destroy(hdr_cache); 895 kmem_cache_destroy(buf_cache); 896} 897 898/* 899 * Constructor callback - called when the cache is empty 900 * and a new buf is requested. 901 */ 902/* ARGSUSED */ 903static int 904hdr_cons(void *vbuf, void *unused, int kmflag) 905{ 906 arc_buf_hdr_t *buf = vbuf; 907 908 bzero(buf, sizeof (arc_buf_hdr_t)); 909 refcount_create(&buf->b_refcnt); 910 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 911 mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 912 arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS); 913 914 return (0); 915} 916 917/* ARGSUSED */ 918static int 919buf_cons(void *vbuf, void *unused, int kmflag) 920{ 921 arc_buf_t *buf = vbuf; 922 923 bzero(buf, sizeof (arc_buf_t)); 924 rw_init(&buf->b_lock, NULL, RW_DEFAULT, NULL); 925 arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS); 926 927 return (0); 928} 929 930/* 931 * Destructor callback - called when a cached buf is 932 * no longer required. 933 */ 934/* ARGSUSED */ 935static void 936hdr_dest(void *vbuf, void *unused) 937{ 938 arc_buf_hdr_t *buf = vbuf; 939 940 refcount_destroy(&buf->b_refcnt); 941 cv_destroy(&buf->b_cv); 942 mutex_destroy(&buf->b_freeze_lock); 943 arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS); 944} 945 946/* ARGSUSED */ 947static void 948buf_dest(void *vbuf, void *unused) 949{ 950 arc_buf_t *buf = vbuf; 951 952 rw_destroy(&buf->b_lock); 953 arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS); 954} 955 956/* 957 * Reclaim callback -- invoked when memory is low. 958 */ 959/* ARGSUSED */ 960static void 961hdr_recl(void *unused) 962{ 963 dprintf("hdr_recl called\n"); 964 /* 965 * umem calls the reclaim func when we destroy the buf cache, 966 * which is after we do arc_fini(). 967 */ 968 if (!arc_dead) 969 cv_signal(&arc_reclaim_thr_cv); 970} 971 972static void 973buf_init(void) 974{ 975 uint64_t *ct; 976 uint64_t hsize = 1ULL << 12; 977 int i, j; 978 979 /* 980 * The hash table is big enough to fill all of physical memory 981 * with an average 64K block size. The table will take up 982 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 983 */ 984 while (hsize * 65536 < (uint64_t)physmem * PAGESIZE) 985 hsize <<= 1; 986retry: 987 buf_hash_table.ht_mask = hsize - 1; 988 buf_hash_table.ht_table = 989 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 990 if (buf_hash_table.ht_table == NULL) { 991 ASSERT(hsize > (1ULL << 8)); 992 hsize >>= 1; 993 goto retry; 994 } 995 996 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 997 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 998 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 999 0, buf_cons, buf_dest, NULL, NULL, NULL, 0); 1000 1001 for (i = 0; i < 256; i++) 1002 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 1003 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 1004 1005 for (i = 0; i < BUF_LOCKS; i++) { 1006 mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 1007 NULL, MUTEX_DEFAULT, NULL); 1008 } 1009} 1010 1011#define ARC_MINTIME (hz>>4) /* 62 ms */ 1012 1013static void 1014arc_cksum_verify(arc_buf_t *buf) 1015{ 1016 zio_cksum_t zc; 1017 1018 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 1019 return; 1020 1021 mutex_enter(&buf->b_hdr->b_freeze_lock); 1022 if (buf->b_hdr->b_freeze_cksum == NULL || 1023 (buf->b_hdr->b_flags & ARC_IO_ERROR)) { 1024 mutex_exit(&buf->b_hdr->b_freeze_lock); 1025 return; 1026 } 1027 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 1028 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 1029 panic("buffer modified while frozen!"); 1030 mutex_exit(&buf->b_hdr->b_freeze_lock); 1031} 1032 1033static int 1034arc_cksum_equal(arc_buf_t *buf) 1035{ 1036 zio_cksum_t zc; 1037 int equal; 1038 1039 mutex_enter(&buf->b_hdr->b_freeze_lock); 1040 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 1041 equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc); 1042 mutex_exit(&buf->b_hdr->b_freeze_lock); 1043 1044 return (equal); 1045} 1046 1047static void 1048arc_cksum_compute(arc_buf_t *buf, boolean_t force) 1049{ 1050 if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY)) 1051 return; 1052 1053 mutex_enter(&buf->b_hdr->b_freeze_lock); 1054 if (buf->b_hdr->b_freeze_cksum != NULL) { 1055 mutex_exit(&buf->b_hdr->b_freeze_lock); 1056 return; 1057 } 1058 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 1059 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 1060 buf->b_hdr->b_freeze_cksum); 1061 mutex_exit(&buf->b_hdr->b_freeze_lock); 1062} 1063 1064void 1065arc_buf_thaw(arc_buf_t *buf) 1066{ 1067 if (zfs_flags & ZFS_DEBUG_MODIFY) { 1068 if (buf->b_hdr->b_state != arc_anon) 1069 panic("modifying non-anon buffer!"); 1070 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) 1071 panic("modifying buffer while i/o in progress!"); 1072 arc_cksum_verify(buf); 1073 } 1074 1075 mutex_enter(&buf->b_hdr->b_freeze_lock); 1076 if (buf->b_hdr->b_freeze_cksum != NULL) { 1077 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1078 buf->b_hdr->b_freeze_cksum = NULL; 1079 } 1080 mutex_exit(&buf->b_hdr->b_freeze_lock); 1081} 1082 1083void 1084arc_buf_freeze(arc_buf_t *buf) 1085{ 1086 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 1087 return; 1088 1089 ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 1090 buf->b_hdr->b_state == arc_anon); 1091 arc_cksum_compute(buf, B_FALSE); 1092} 1093 1094static void 1095get_buf_info(arc_buf_hdr_t *ab, arc_state_t *state, list_t **list, kmutex_t **lock) 1096{ 1097 uint64_t buf_hashid = buf_hash(ab->b_spa, &ab->b_dva, ab->b_birth); 1098 1099 if (ab->b_type == ARC_BUFC_METADATA) 1100 buf_hashid &= (ARC_BUFC_NUMMETADATALISTS - 1); 1101 else { 1102 buf_hashid &= (ARC_BUFC_NUMDATALISTS - 1); 1103 buf_hashid += ARC_BUFC_NUMMETADATALISTS; 1104 } 1105 1106 *list = &state->arcs_lists[buf_hashid]; 1107 *lock = ARCS_LOCK(state, buf_hashid); 1108} 1109 1110 1111static void 1112add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 1113{ 1114 1115 ASSERT(MUTEX_HELD(hash_lock)); 1116 1117 if ((refcount_add(&ab->b_refcnt, tag) == 1) && 1118 (ab->b_state != arc_anon)) { 1119 uint64_t delta = ab->b_size * ab->b_datacnt; 1120 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type]; 1121 list_t *list; 1122 kmutex_t *lock; 1123 1124 get_buf_info(ab, ab->b_state, &list, &lock); 1125 ASSERT(!MUTEX_HELD(lock)); 1126 mutex_enter(lock); 1127 ASSERT(list_link_active(&ab->b_arc_node)); 1128 list_remove(list, ab); 1129 if (GHOST_STATE(ab->b_state)) { 1130 ASSERT3U(ab->b_datacnt, ==, 0); 1131 ASSERT3P(ab->b_buf, ==, NULL); 1132 delta = ab->b_size; 1133 } 1134 ASSERT(delta > 0); 1135 ASSERT3U(*size, >=, delta); 1136 atomic_add_64(size, -delta); 1137 mutex_exit(lock); 1138 /* remove the prefetch flag if we get a reference */ 1139 if (ab->b_flags & ARC_PREFETCH) 1140 ab->b_flags &= ~ARC_PREFETCH; 1141 } 1142} 1143 1144static int 1145remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 1146{ 1147 int cnt; 1148 arc_state_t *state = ab->b_state; 1149 1150 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 1151 ASSERT(!GHOST_STATE(state)); 1152 1153 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 1154 (state != arc_anon)) { 1155 uint64_t *size = &state->arcs_lsize[ab->b_type]; 1156 list_t *list; 1157 kmutex_t *lock; 1158 1159 get_buf_info(ab, state, &list, &lock); 1160 ASSERT(!MUTEX_HELD(lock)); 1161 mutex_enter(lock); 1162 ASSERT(!list_link_active(&ab->b_arc_node)); 1163 list_insert_head(list, ab); 1164 ASSERT(ab->b_datacnt > 0); 1165 atomic_add_64(size, ab->b_size * ab->b_datacnt); 1166 mutex_exit(lock); 1167 } 1168 return (cnt); 1169} 1170 1171/* 1172 * Move the supplied buffer to the indicated state. The mutex 1173 * for the buffer must be held by the caller. 1174 */ 1175static void 1176arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 1177{ 1178 arc_state_t *old_state = ab->b_state; 1179 int64_t refcnt = refcount_count(&ab->b_refcnt); 1180 uint64_t from_delta, to_delta; 1181 list_t *list; 1182 kmutex_t *lock; 1183 1184 ASSERT(MUTEX_HELD(hash_lock)); 1185 ASSERT(new_state != old_state); 1186 ASSERT(refcnt == 0 || ab->b_datacnt > 0); 1187 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 1188 1189 from_delta = to_delta = ab->b_datacnt * ab->b_size; 1190 1191 /* 1192 * If this buffer is evictable, transfer it from the 1193 * old state list to the new state list. 1194 */ 1195 if (refcnt == 0) { 1196 if (old_state != arc_anon) { 1197 int use_mutex; 1198 uint64_t *size = &old_state->arcs_lsize[ab->b_type]; 1199 1200 get_buf_info(ab, old_state, &list, &lock); 1201 use_mutex = !MUTEX_HELD(lock); 1202 if (use_mutex) 1203 mutex_enter(lock); 1204 1205 ASSERT(list_link_active(&ab->b_arc_node)); 1206 list_remove(list, ab); 1207 1208 /* 1209 * If prefetching out of the ghost cache, 1210 * we will have a non-null datacnt. 1211 */ 1212 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 1213 /* ghost elements have a ghost size */ 1214 ASSERT(ab->b_buf == NULL); 1215 from_delta = ab->b_size; 1216 } 1217 ASSERT3U(*size, >=, from_delta); 1218 atomic_add_64(size, -from_delta); 1219 1220 if (use_mutex) 1221 mutex_exit(lock); 1222 } 1223 if (new_state != arc_anon) { 1224 int use_mutex; 1225 uint64_t *size = &new_state->arcs_lsize[ab->b_type]; 1226 1227 get_buf_info(ab, new_state, &list, &lock); 1228 use_mutex = !MUTEX_HELD(lock); 1229 if (use_mutex) 1230 mutex_enter(lock); 1231 1232 list_insert_head(list, ab); 1233 1234 /* ghost elements have a ghost size */ 1235 if (GHOST_STATE(new_state)) { 1236 ASSERT(ab->b_datacnt == 0); 1237 ASSERT(ab->b_buf == NULL); 1238 to_delta = ab->b_size; 1239 } 1240 atomic_add_64(size, to_delta); 1241 1242 if (use_mutex) 1243 mutex_exit(lock); 1244 } 1245 } 1246 1247 ASSERT(!BUF_EMPTY(ab)); 1248 if (new_state == arc_anon) { 1249 buf_hash_remove(ab); 1250 } 1251 1252 /* adjust state sizes */ 1253 if (to_delta) 1254 atomic_add_64(&new_state->arcs_size, to_delta); 1255 if (from_delta) { 1256 ASSERT3U(old_state->arcs_size, >=, from_delta); 1257 atomic_add_64(&old_state->arcs_size, -from_delta); 1258 } 1259 ab->b_state = new_state; 1260 1261 /* adjust l2arc hdr stats */ 1262 if (new_state == arc_l2c_only) 1263 l2arc_hdr_stat_add(); 1264 else if (old_state == arc_l2c_only) 1265 l2arc_hdr_stat_remove(); 1266} 1267 1268void 1269arc_space_consume(uint64_t space, arc_space_type_t type) 1270{ 1271 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 1272 1273 switch (type) { 1274 case ARC_SPACE_DATA: 1275 ARCSTAT_INCR(arcstat_data_size, space); 1276 break; 1277 case ARC_SPACE_OTHER: 1278 ARCSTAT_INCR(arcstat_other_size, space); 1279 break; 1280 case ARC_SPACE_HDRS: 1281 ARCSTAT_INCR(arcstat_hdr_size, space); 1282 break; 1283 case ARC_SPACE_L2HDRS: 1284 ARCSTAT_INCR(arcstat_l2_hdr_size, space); 1285 break; 1286 } 1287 1288 atomic_add_64(&arc_meta_used, space); 1289 atomic_add_64(&arc_size, space); 1290} 1291 1292void 1293arc_space_return(uint64_t space, arc_space_type_t type) 1294{ 1295 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 1296 1297 switch (type) { 1298 case ARC_SPACE_DATA: 1299 ARCSTAT_INCR(arcstat_data_size, -space); 1300 break; 1301 case ARC_SPACE_OTHER: 1302 ARCSTAT_INCR(arcstat_other_size, -space); 1303 break; 1304 case ARC_SPACE_HDRS: 1305 ARCSTAT_INCR(arcstat_hdr_size, -space); 1306 break; 1307 case ARC_SPACE_L2HDRS: 1308 ARCSTAT_INCR(arcstat_l2_hdr_size, -space); 1309 break; 1310 } 1311 1312 ASSERT(arc_meta_used >= space); 1313 if (arc_meta_max < arc_meta_used) 1314 arc_meta_max = arc_meta_used; 1315 atomic_add_64(&arc_meta_used, -space); 1316 ASSERT(arc_size >= space); 1317 atomic_add_64(&arc_size, -space); 1318} 1319 1320void * 1321arc_data_buf_alloc(uint64_t size) 1322{ 1323 if (arc_evict_needed(ARC_BUFC_DATA)) 1324 cv_signal(&arc_reclaim_thr_cv); 1325 atomic_add_64(&arc_size, size); 1326 return (zio_data_buf_alloc(size)); 1327} 1328 1329void 1330arc_data_buf_free(void *buf, uint64_t size) 1331{ 1332 zio_data_buf_free(buf, size); 1333 ASSERT(arc_size >= size); 1334 atomic_add_64(&arc_size, -size); 1335} 1336 1337arc_buf_t * 1338arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) 1339{ 1340 arc_buf_hdr_t *hdr; 1341 arc_buf_t *buf; 1342 1343 ASSERT3U(size, >, 0); 1344 hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 1345 ASSERT(BUF_EMPTY(hdr)); 1346 hdr->b_size = size; 1347 hdr->b_type = type; 1348 hdr->b_spa = spa_guid(spa); 1349 hdr->b_state = arc_anon; 1350 hdr->b_arc_access = 0; 1351 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1352 buf->b_hdr = hdr; 1353 buf->b_data = NULL; 1354 buf->b_efunc = NULL; 1355 buf->b_private = NULL; 1356 buf->b_next = NULL; 1357 hdr->b_buf = buf; 1358 arc_get_data_buf(buf); 1359 hdr->b_datacnt = 1; 1360 hdr->b_flags = 0; 1361 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1362 (void) refcount_add(&hdr->b_refcnt, tag); 1363 1364 return (buf); 1365} 1366 1367static char *arc_onloan_tag = "onloan"; 1368 1369/* 1370 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in 1371 * flight data by arc_tempreserve_space() until they are "returned". Loaned 1372 * buffers must be returned to the arc before they can be used by the DMU or 1373 * freed. 1374 */ 1375arc_buf_t * 1376arc_loan_buf(spa_t *spa, int size) 1377{ 1378 arc_buf_t *buf; 1379 1380 buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA); 1381 1382 atomic_add_64(&arc_loaned_bytes, size); 1383 return (buf); 1384} 1385 1386/* 1387 * Return a loaned arc buffer to the arc. 1388 */ 1389void 1390arc_return_buf(arc_buf_t *buf, void *tag) 1391{ 1392 arc_buf_hdr_t *hdr = buf->b_hdr; 1393 1394 ASSERT(hdr->b_state == arc_anon); 1395 ASSERT(buf->b_data != NULL); 1396 VERIFY(refcount_remove(&hdr->b_refcnt, arc_onloan_tag) == 0); 1397 VERIFY(refcount_add(&hdr->b_refcnt, tag) == 1); 1398 1399 atomic_add_64(&arc_loaned_bytes, -hdr->b_size); 1400} 1401 1402static arc_buf_t * 1403arc_buf_clone(arc_buf_t *from) 1404{ 1405 arc_buf_t *buf; 1406 arc_buf_hdr_t *hdr = from->b_hdr; 1407 uint64_t size = hdr->b_size; 1408 1409 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1410 buf->b_hdr = hdr; 1411 buf->b_data = NULL; 1412 buf->b_efunc = NULL; 1413 buf->b_private = NULL; 1414 buf->b_next = hdr->b_buf; 1415 hdr->b_buf = buf; 1416 arc_get_data_buf(buf); 1417 bcopy(from->b_data, buf->b_data, size); 1418 hdr->b_datacnt += 1; 1419 return (buf); 1420} 1421 1422void 1423arc_buf_add_ref(arc_buf_t *buf, void* tag) 1424{ 1425 arc_buf_hdr_t *hdr; 1426 kmutex_t *hash_lock; 1427 1428 /* 1429 * Check to see if this buffer is evicted. Callers 1430 * must verify b_data != NULL to know if the add_ref 1431 * was successful. 1432 */ 1433 rw_enter(&buf->b_lock, RW_READER); 1434 if (buf->b_data == NULL) { 1435 rw_exit(&buf->b_lock); 1436 return; 1437 } 1438 hdr = buf->b_hdr; 1439 ASSERT(hdr != NULL); 1440 hash_lock = HDR_LOCK(hdr); 1441 mutex_enter(hash_lock); 1442 rw_exit(&buf->b_lock); 1443 1444 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 1445 add_reference(hdr, hash_lock, tag); 1446 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 1447 arc_access(hdr, hash_lock); 1448 mutex_exit(hash_lock); 1449 ARCSTAT_BUMP(arcstat_hits); 1450 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 1451 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 1452 data, metadata, hits); 1453} 1454 1455/* 1456 * Free the arc data buffer. If it is an l2arc write in progress, 1457 * the buffer is placed on l2arc_free_on_write to be freed later. 1458 */ 1459static void 1460arc_buf_data_free(arc_buf_hdr_t *hdr, void (*free_func)(void *, size_t), 1461 void *data, size_t size) 1462{ 1463 if (HDR_L2_WRITING(hdr)) { 1464 l2arc_data_free_t *df; 1465 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP); 1466 df->l2df_data = data; 1467 df->l2df_size = size; 1468 df->l2df_func = free_func; 1469 mutex_enter(&l2arc_free_on_write_mtx); 1470 list_insert_head(l2arc_free_on_write, df); 1471 mutex_exit(&l2arc_free_on_write_mtx); 1472 ARCSTAT_BUMP(arcstat_l2_free_on_write); 1473 } else { 1474 free_func(data, size); 1475 } 1476} 1477 1478static void 1479arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 1480{ 1481 arc_buf_t **bufp; 1482 1483 /* free up data associated with the buf */ 1484 if (buf->b_data) { 1485 arc_state_t *state = buf->b_hdr->b_state; 1486 uint64_t size = buf->b_hdr->b_size; 1487 arc_buf_contents_t type = buf->b_hdr->b_type; 1488 1489 arc_cksum_verify(buf); 1490 if (!recycle) { 1491 if (type == ARC_BUFC_METADATA) { 1492 arc_buf_data_free(buf->b_hdr, zio_buf_free, 1493 buf->b_data, size); 1494 arc_space_return(size, ARC_SPACE_DATA); 1495 } else { 1496 ASSERT(type == ARC_BUFC_DATA); 1497 arc_buf_data_free(buf->b_hdr, 1498 zio_data_buf_free, buf->b_data, size); 1499 ARCSTAT_INCR(arcstat_data_size, -size); 1500 atomic_add_64(&arc_size, -size); 1501 } 1502 } 1503 if (list_link_active(&buf->b_hdr->b_arc_node)) { 1504 uint64_t *cnt = &state->arcs_lsize[type]; 1505 1506 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 1507 ASSERT(state != arc_anon); 1508 1509 ASSERT3U(*cnt, >=, size); 1510 atomic_add_64(cnt, -size); 1511 } 1512 ASSERT3U(state->arcs_size, >=, size); 1513 atomic_add_64(&state->arcs_size, -size); 1514 buf->b_data = NULL; 1515 ASSERT(buf->b_hdr->b_datacnt > 0); 1516 buf->b_hdr->b_datacnt -= 1; 1517 } 1518 1519 /* only remove the buf if requested */ 1520 if (!all) 1521 return; 1522 1523 /* remove the buf from the hdr list */ 1524 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 1525 continue; 1526 *bufp = buf->b_next; 1527 1528 ASSERT(buf->b_efunc == NULL); 1529 1530 /* clean up the buf */ 1531 buf->b_hdr = NULL; 1532 kmem_cache_free(buf_cache, buf); 1533} 1534 1535static void 1536arc_hdr_destroy(arc_buf_hdr_t *hdr) 1537{ 1538 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1539 ASSERT3P(hdr->b_state, ==, arc_anon); 1540 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1541 ASSERT(!(hdr->b_flags & ARC_STORED)); 1542 1543 if (hdr->b_l2hdr != NULL) { 1544 if (!MUTEX_HELD(&l2arc_buflist_mtx)) { 1545 /* 1546 * To prevent arc_free() and l2arc_evict() from 1547 * attempting to free the same buffer at the same time, 1548 * a FREE_IN_PROGRESS flag is given to arc_free() to 1549 * give it priority. l2arc_evict() can't destroy this 1550 * header while we are waiting on l2arc_buflist_mtx. 1551 * 1552 * The hdr may be removed from l2ad_buflist before we 1553 * grab l2arc_buflist_mtx, so b_l2hdr is rechecked. 1554 */ 1555 mutex_enter(&l2arc_buflist_mtx); 1556 if (hdr->b_l2hdr != NULL) { 1557 list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, 1558 hdr); 1559 } 1560 mutex_exit(&l2arc_buflist_mtx); 1561 } else { 1562 list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, hdr); 1563 } 1564 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); 1565 kmem_free(hdr->b_l2hdr, sizeof (l2arc_buf_hdr_t)); 1566 if (hdr->b_state == arc_l2c_only) 1567 l2arc_hdr_stat_remove(); 1568 hdr->b_l2hdr = NULL; 1569 } 1570 1571 if (!BUF_EMPTY(hdr)) { 1572 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1573 bzero(&hdr->b_dva, sizeof (dva_t)); 1574 hdr->b_birth = 0; 1575 hdr->b_cksum0 = 0; 1576 } 1577 while (hdr->b_buf) { 1578 arc_buf_t *buf = hdr->b_buf; 1579 1580 if (buf->b_efunc) { 1581 mutex_enter(&arc_eviction_mtx); 1582 rw_enter(&buf->b_lock, RW_WRITER); 1583 ASSERT(buf->b_hdr != NULL); 1584 arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 1585 hdr->b_buf = buf->b_next; 1586 buf->b_hdr = &arc_eviction_hdr; 1587 buf->b_next = arc_eviction_list; 1588 arc_eviction_list = buf; 1589 rw_exit(&buf->b_lock); 1590 mutex_exit(&arc_eviction_mtx); 1591 } else { 1592 arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 1593 } 1594 } 1595 if (hdr->b_freeze_cksum != NULL) { 1596 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1597 hdr->b_freeze_cksum = NULL; 1598 } 1599 1600 ASSERT(!list_link_active(&hdr->b_arc_node)); 1601 ASSERT3P(hdr->b_hash_next, ==, NULL); 1602 ASSERT3P(hdr->b_acb, ==, NULL); 1603 kmem_cache_free(hdr_cache, hdr); 1604} 1605 1606void 1607arc_buf_free(arc_buf_t *buf, void *tag) 1608{ 1609 arc_buf_hdr_t *hdr = buf->b_hdr; 1610 int hashed = hdr->b_state != arc_anon; 1611 1612 ASSERT(buf->b_efunc == NULL); 1613 ASSERT(buf->b_data != NULL); 1614 1615 if (hashed) { 1616 kmutex_t *hash_lock = HDR_LOCK(hdr); 1617 1618 mutex_enter(hash_lock); 1619 (void) remove_reference(hdr, hash_lock, tag); 1620 if (hdr->b_datacnt > 1) 1621 arc_buf_destroy(buf, FALSE, TRUE); 1622 else 1623 hdr->b_flags |= ARC_BUF_AVAILABLE; 1624 mutex_exit(hash_lock); 1625 } else if (HDR_IO_IN_PROGRESS(hdr)) { 1626 int destroy_hdr; 1627 /* 1628 * We are in the middle of an async write. Don't destroy 1629 * this buffer unless the write completes before we finish 1630 * decrementing the reference count. 1631 */ 1632 mutex_enter(&arc_eviction_mtx); 1633 (void) remove_reference(hdr, NULL, tag); 1634 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1635 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 1636 mutex_exit(&arc_eviction_mtx); 1637 if (destroy_hdr) 1638 arc_hdr_destroy(hdr); 1639 } else { 1640 if (remove_reference(hdr, NULL, tag) > 0) { 1641 ASSERT(HDR_IO_ERROR(hdr)); 1642 arc_buf_destroy(buf, FALSE, TRUE); 1643 } else { 1644 arc_hdr_destroy(hdr); 1645 } 1646 } 1647} 1648 1649int 1650arc_buf_remove_ref(arc_buf_t *buf, void* tag) 1651{ 1652 arc_buf_hdr_t *hdr = buf->b_hdr; 1653 kmutex_t *hash_lock = HDR_LOCK(hdr); 1654 int no_callback = (buf->b_efunc == NULL); 1655 1656 if (hdr->b_state == arc_anon) { 1657 arc_buf_free(buf, tag); 1658 return (no_callback); 1659 } 1660 1661 mutex_enter(hash_lock); 1662 ASSERT(hdr->b_state != arc_anon); 1663 ASSERT(buf->b_data != NULL); 1664 1665 (void) remove_reference(hdr, hash_lock, tag); 1666 if (hdr->b_datacnt > 1) { 1667 if (no_callback) 1668 arc_buf_destroy(buf, FALSE, TRUE); 1669 } else if (no_callback) { 1670 ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 1671 hdr->b_flags |= ARC_BUF_AVAILABLE; 1672 } 1673 ASSERT(no_callback || hdr->b_datacnt > 1 || 1674 refcount_is_zero(&hdr->b_refcnt)); 1675 mutex_exit(hash_lock); 1676 return (no_callback); 1677} 1678 1679int 1680arc_buf_size(arc_buf_t *buf) 1681{ 1682 return (buf->b_hdr->b_size); 1683} 1684 1685/* 1686 * Evict buffers from list until we've removed the specified number of 1687 * bytes. Move the removed buffers to the appropriate evict state. 1688 * If the recycle flag is set, then attempt to "recycle" a buffer: 1689 * - look for a buffer to evict that is `bytes' long. 1690 * - return the data block from this buffer rather than freeing it. 1691 * This flag is used by callers that are trying to make space for a 1692 * new buffer in a full arc cache. 1693 * 1694 * This function makes a "best effort". It skips over any buffers 1695 * it can't get a hash_lock on, and so may not catch all candidates. 1696 * It may also return without evicting as much space as requested. 1697 */ 1698static void * 1699arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle, 1700 arc_buf_contents_t type) 1701{ 1702 arc_state_t *evicted_state; 1703 uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 1704 int64_t bytes_remaining; 1705 arc_buf_hdr_t *ab, *ab_prev = NULL; 1706 list_t *evicted_list, *list, *evicted_list_start, *list_start; 1707 kmutex_t *lock, *evicted_lock; 1708 kmutex_t *hash_lock; 1709 boolean_t have_lock; 1710 void *stolen = NULL; 1711 static int evict_metadata_offset, evict_data_offset; 1712 int i, idx, offset, list_count, count; 1713 1714 ASSERT(state == arc_mru || state == arc_mfu); 1715 1716 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 1717 1718 if (type == ARC_BUFC_METADATA) { 1719 offset = 0; 1720 list_count = ARC_BUFC_NUMMETADATALISTS; 1721 list_start = &state->arcs_lists[0]; 1722 evicted_list_start = &evicted_state->arcs_lists[0]; 1723 idx = evict_metadata_offset; 1724 } else { 1725 offset = ARC_BUFC_NUMMETADATALISTS; 1726 list_start = &state->arcs_lists[offset]; 1727 evicted_list_start = &evicted_state->arcs_lists[offset]; 1728 list_count = ARC_BUFC_NUMDATALISTS; 1729 idx = evict_data_offset; 1730 } 1731 bytes_remaining = evicted_state->arcs_lsize[type]; 1732 count = 0; 1733 1734evict_start: 1735 list = &list_start[idx]; 1736 evicted_list = &evicted_list_start[idx]; 1737 lock = ARCS_LOCK(state, (offset + idx)); 1738 evicted_lock = ARCS_LOCK(evicted_state, (offset + idx)); 1739 1740 mutex_enter(lock); 1741 mutex_enter(evicted_lock); 1742 1743 for (ab = list_tail(list); ab; ab = ab_prev) { 1744 ab_prev = list_prev(list, ab); 1745 bytes_remaining -= (ab->b_size * ab->b_datacnt); 1746 /* prefetch buffers have a minimum lifespan */ 1747 if (HDR_IO_IN_PROGRESS(ab) || 1748 (spa && ab->b_spa != spa) || 1749 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 1750 LBOLT - ab->b_arc_access < arc_min_prefetch_lifespan)) { 1751 skipped++; 1752 continue; 1753 } 1754 /* "lookahead" for better eviction candidate */ 1755 if (recycle && ab->b_size != bytes && 1756 ab_prev && ab_prev->b_size == bytes) 1757 continue; 1758 hash_lock = HDR_LOCK(ab); 1759 have_lock = MUTEX_HELD(hash_lock); 1760 if (have_lock || mutex_tryenter(hash_lock)) { 1761 ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0); 1762 ASSERT(ab->b_datacnt > 0); 1763 while (ab->b_buf) { 1764 arc_buf_t *buf = ab->b_buf; 1765 if (!rw_tryenter(&buf->b_lock, RW_WRITER)) { 1766 missed += 1; 1767 break; 1768 } 1769 if (buf->b_data) { 1770 bytes_evicted += ab->b_size; 1771 if (recycle && ab->b_type == type && 1772 ab->b_size == bytes && 1773 !HDR_L2_WRITING(ab)) { 1774 stolen = buf->b_data; 1775 recycle = FALSE; 1776 } 1777 } 1778 if (buf->b_efunc) { 1779 mutex_enter(&arc_eviction_mtx); 1780 arc_buf_destroy(buf, 1781 buf->b_data == stolen, FALSE); 1782 ab->b_buf = buf->b_next; 1783 buf->b_hdr = &arc_eviction_hdr; 1784 buf->b_next = arc_eviction_list; 1785 arc_eviction_list = buf; 1786 mutex_exit(&arc_eviction_mtx); 1787 rw_exit(&buf->b_lock); 1788 } else { 1789 rw_exit(&buf->b_lock); 1790 arc_buf_destroy(buf, 1791 buf->b_data == stolen, TRUE); 1792 } 1793 } 1794 1795 if (ab->b_l2hdr) { 1796 ARCSTAT_INCR(arcstat_evict_l2_cached, 1797 ab->b_size); 1798 } else { 1799 if (l2arc_write_eligible(ab->b_spa, ab)) { 1800 ARCSTAT_INCR(arcstat_evict_l2_eligible, 1801 ab->b_size); 1802 } else { 1803 ARCSTAT_INCR( 1804 arcstat_evict_l2_ineligible, 1805 ab->b_size); 1806 } 1807 } 1808 1809 if (ab->b_datacnt == 0) { 1810 arc_change_state(evicted_state, ab, hash_lock); 1811 ASSERT(HDR_IN_HASH_TABLE(ab)); 1812 ab->b_flags |= ARC_IN_HASH_TABLE; 1813 ab->b_flags &= ~ARC_BUF_AVAILABLE; 1814 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 1815 } 1816 if (!have_lock) 1817 mutex_exit(hash_lock); 1818 if (bytes >= 0 && bytes_evicted >= bytes) 1819 break; 1820 if (bytes_remaining > 0) { 1821 mutex_exit(evicted_lock); 1822 mutex_exit(lock); 1823 idx = ((idx + 1) & (list_count - 1)); 1824 count++; 1825 goto evict_start; 1826 } 1827 } else { 1828 missed += 1; 1829 } 1830 } 1831 1832 mutex_exit(evicted_lock); 1833 mutex_exit(lock); 1834 1835 idx = ((idx + 1) & (list_count - 1)); 1836 count++; 1837 1838 if (bytes_evicted < bytes) { 1839 if (count < list_count) 1840 goto evict_start; 1841 else 1842 dprintf("only evicted %lld bytes from %x", 1843 (longlong_t)bytes_evicted, state); 1844 } 1845 if (type == ARC_BUFC_METADATA) 1846 evict_metadata_offset = idx; 1847 else 1848 evict_data_offset = idx; 1849 1850 if (skipped) 1851 ARCSTAT_INCR(arcstat_evict_skip, skipped); 1852 1853 if (missed) 1854 ARCSTAT_INCR(arcstat_mutex_miss, missed); 1855 1856 /* 1857 * We have just evicted some date into the ghost state, make 1858 * sure we also adjust the ghost state size if necessary. 1859 */ 1860 if (arc_no_grow && 1861 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) { 1862 int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size + 1863 arc_mru_ghost->arcs_size - arc_c; 1864 1865 if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) { 1866 int64_t todelete = 1867 MIN(arc_mru_ghost->arcs_lsize[type], mru_over); 1868 arc_evict_ghost(arc_mru_ghost, 0, todelete); 1869 } else if (arc_mfu_ghost->arcs_lsize[type] > 0) { 1870 int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type], 1871 arc_mru_ghost->arcs_size + 1872 arc_mfu_ghost->arcs_size - arc_c); 1873 arc_evict_ghost(arc_mfu_ghost, 0, todelete); 1874 } 1875 } 1876 if (stolen) 1877 ARCSTAT_BUMP(arcstat_stolen); 1878 1879 return (stolen); 1880} 1881 1882/* 1883 * Remove buffers from list until we've removed the specified number of 1884 * bytes. Destroy the buffers that are removed. 1885 */ 1886static void 1887arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes) 1888{ 1889 arc_buf_hdr_t *ab, *ab_prev; 1890 list_t *list, *list_start; 1891 kmutex_t *hash_lock, *lock; 1892 uint64_t bytes_deleted = 0; 1893 uint64_t bufs_skipped = 0; 1894 static int evict_offset; 1895 int list_count, idx = evict_offset; 1896 int offset, count = 0; 1897 1898 ASSERT(GHOST_STATE(state)); 1899 1900 /* 1901 * data lists come after metadata lists 1902 */ 1903 list_start = &state->arcs_lists[ARC_BUFC_NUMMETADATALISTS]; 1904 list_count = ARC_BUFC_NUMDATALISTS; 1905 offset = ARC_BUFC_NUMMETADATALISTS; 1906 1907evict_start: 1908 list = &list_start[idx]; 1909 lock = ARCS_LOCK(state, idx + offset); 1910 1911 mutex_enter(lock); 1912 for (ab = list_tail(list); ab; ab = ab_prev) { 1913 ab_prev = list_prev(list, ab); 1914 if (spa && ab->b_spa != spa) 1915 continue; 1916 hash_lock = HDR_LOCK(ab); 1917 if (mutex_tryenter(hash_lock)) { 1918 ASSERT(!HDR_IO_IN_PROGRESS(ab)); 1919 ASSERT(ab->b_buf == NULL); 1920 ARCSTAT_BUMP(arcstat_deleted); 1921 bytes_deleted += ab->b_size; 1922 1923 if (ab->b_l2hdr != NULL) { 1924 /* 1925 * This buffer is cached on the 2nd Level ARC; 1926 * don't destroy the header. 1927 */ 1928 arc_change_state(arc_l2c_only, ab, hash_lock); 1929 mutex_exit(hash_lock); 1930 } else { 1931 arc_change_state(arc_anon, ab, hash_lock); 1932 mutex_exit(hash_lock); 1933 arc_hdr_destroy(ab); 1934 } 1935 1936 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 1937 if (bytes >= 0 && bytes_deleted >= bytes) 1938 break; 1939 } else { 1940 if (bytes < 0) { 1941 /* 1942 * we're draining the ARC, retry 1943 */ 1944 mutex_exit(lock); 1945 mutex_enter(hash_lock); 1946 mutex_exit(hash_lock); 1947 goto evict_start; 1948 } 1949 bufs_skipped += 1; 1950 } 1951 } 1952 mutex_exit(lock); 1953 idx = ((idx + 1) & (ARC_BUFC_NUMDATALISTS - 1)); 1954 count++; 1955 1956 if (count < list_count) 1957 goto evict_start; 1958 1959 evict_offset = idx; 1960 if ((uintptr_t)list > (uintptr_t)&state->arcs_lists[ARC_BUFC_NUMMETADATALISTS] && 1961 (bytes < 0 || bytes_deleted < bytes)) { 1962 list_start = &state->arcs_lists[0]; 1963 list_count = ARC_BUFC_NUMMETADATALISTS; 1964 offset = count = 0; 1965 goto evict_start; 1966 } 1967 1968 if (bufs_skipped) { 1969 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); 1970 ASSERT(bytes >= 0); 1971 } 1972 1973 if (bytes_deleted < bytes) 1974 dprintf("only deleted %lld bytes from %p", 1975 (longlong_t)bytes_deleted, state); 1976} 1977 1978static void 1979arc_adjust(void) 1980{ 1981 int64_t adjustment, delta; 1982 1983 /* 1984 * Adjust MRU size 1985 */ 1986 1987 adjustment = MIN((int64_t)(arc_size - arc_c), 1988 (int64_t)(arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used - 1989 arc_p)); 1990 1991 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) { 1992 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment); 1993 (void) arc_evict(arc_mru, 0, delta, FALSE, ARC_BUFC_DATA); 1994 adjustment -= delta; 1995 } 1996 1997 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) { 1998 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment); 1999 (void) arc_evict(arc_mru, 0, delta, FALSE, 2000 ARC_BUFC_METADATA); 2001 } 2002 2003 /* 2004 * Adjust MFU size 2005 */ 2006 2007 adjustment = arc_size - arc_c; 2008 2009 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) { 2010 delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]); 2011 (void) arc_evict(arc_mfu, 0, delta, FALSE, ARC_BUFC_DATA); 2012 adjustment -= delta; 2013 } 2014 2015 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) { 2016 int64_t delta = MIN(adjustment, 2017 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]); 2018 (void) arc_evict(arc_mfu, 0, delta, FALSE, 2019 ARC_BUFC_METADATA); 2020 } 2021 2022 /* 2023 * Adjust ghost lists 2024 */ 2025 2026 adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c; 2027 2028 if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) { 2029 delta = MIN(arc_mru_ghost->arcs_size, adjustment); 2030 arc_evict_ghost(arc_mru_ghost, 0, delta); 2031 } 2032 2033 adjustment = 2034 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c; 2035 2036 if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) { 2037 delta = MIN(arc_mfu_ghost->arcs_size, adjustment); 2038 arc_evict_ghost(arc_mfu_ghost, 0, delta); 2039 } 2040} 2041 2042static void 2043arc_do_user_evicts(void) 2044{ 2045 static arc_buf_t *tmp_arc_eviction_list; 2046 2047 /* 2048 * Move list over to avoid LOR 2049 */ 2050restart: 2051 mutex_enter(&arc_eviction_mtx); 2052 tmp_arc_eviction_list = arc_eviction_list; 2053 arc_eviction_list = NULL; 2054 mutex_exit(&arc_eviction_mtx); 2055 2056 while (tmp_arc_eviction_list != NULL) { 2057 arc_buf_t *buf = tmp_arc_eviction_list; 2058 tmp_arc_eviction_list = buf->b_next; 2059 rw_enter(&buf->b_lock, RW_WRITER); 2060 buf->b_hdr = NULL; 2061 rw_exit(&buf->b_lock); 2062 2063 if (buf->b_efunc != NULL) 2064 VERIFY(buf->b_efunc(buf) == 0); 2065 2066 buf->b_efunc = NULL; 2067 buf->b_private = NULL; 2068 kmem_cache_free(buf_cache, buf); 2069 } 2070 2071 if (arc_eviction_list != NULL) 2072 goto restart; 2073} 2074 2075/* 2076 * Flush all *evictable* data from the cache for the given spa. 2077 * NOTE: this will not touch "active" (i.e. referenced) data. 2078 */ 2079void 2080arc_flush(spa_t *spa) 2081{ 2082 uint64_t guid = 0; 2083 2084 if (spa) 2085 guid = spa_guid(spa); 2086 2087 while (arc_mru->arcs_lsize[ARC_BUFC_DATA]) { 2088 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA); 2089 if (spa) 2090 break; 2091 } 2092 while (arc_mru->arcs_lsize[ARC_BUFC_METADATA]) { 2093 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA); 2094 if (spa) 2095 break; 2096 } 2097 while (arc_mfu->arcs_lsize[ARC_BUFC_DATA]) { 2098 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA); 2099 if (spa) 2100 break; 2101 } 2102 while (arc_mfu->arcs_lsize[ARC_BUFC_METADATA]) { 2103 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA); 2104 if (spa) 2105 break; 2106 } 2107 2108 arc_evict_ghost(arc_mru_ghost, guid, -1); 2109 arc_evict_ghost(arc_mfu_ghost, guid, -1); 2110 2111 mutex_enter(&arc_reclaim_thr_lock); 2112 arc_do_user_evicts(); 2113 mutex_exit(&arc_reclaim_thr_lock); 2114 ASSERT(spa || arc_eviction_list == NULL); 2115} 2116 2117void 2118arc_shrink(void) 2119{ 2120 if (arc_c > arc_c_min) { 2121 uint64_t to_free; 2122 2123#ifdef _KERNEL 2124 to_free = arc_c >> arc_shrink_shift; 2125#else 2126 to_free = arc_c >> arc_shrink_shift; 2127#endif 2128 if (arc_c > arc_c_min + to_free) 2129 atomic_add_64(&arc_c, -to_free); 2130 else 2131 arc_c = arc_c_min; 2132 2133 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 2134 if (arc_c > arc_size) 2135 arc_c = MAX(arc_size, arc_c_min); 2136 if (arc_p > arc_c) 2137 arc_p = (arc_c >> 1); 2138 ASSERT(arc_c >= arc_c_min); 2139 ASSERT((int64_t)arc_p >= 0); 2140 } 2141 2142 if (arc_size > arc_c) 2143 arc_adjust(); 2144} 2145 2146static int needfree = 0; 2147 2148static int 2149arc_reclaim_needed(void) 2150{ 2151#if 0 2152 uint64_t extra; 2153#endif 2154 2155#ifdef _KERNEL 2156 if (needfree) 2157 return (1); 2158 2159 /* 2160 * Cooperate with pagedaemon when it's time for it to scan 2161 * and reclaim some pages. 2162 */ 2163 if (vm_paging_needed()) 2164 return (1); 2165 2166#if 0 2167 /* 2168 * take 'desfree' extra pages, so we reclaim sooner, rather than later 2169 */ 2170 extra = desfree; 2171 2172 /* 2173 * check that we're out of range of the pageout scanner. It starts to 2174 * schedule paging if freemem is less than lotsfree and needfree. 2175 * lotsfree is the high-water mark for pageout, and needfree is the 2176 * number of needed free pages. We add extra pages here to make sure 2177 * the scanner doesn't start up while we're freeing memory. 2178 */ 2179 if (freemem < lotsfree + needfree + extra) 2180 return (1); 2181 2182 /* 2183 * check to make sure that swapfs has enough space so that anon 2184 * reservations can still succeed. anon_resvmem() checks that the 2185 * availrmem is greater than swapfs_minfree, and the number of reserved 2186 * swap pages. We also add a bit of extra here just to prevent 2187 * circumstances from getting really dire. 2188 */ 2189 if (availrmem < swapfs_minfree + swapfs_reserve + extra) 2190 return (1); 2191 2192#if defined(__i386) 2193 /* 2194 * If we're on an i386 platform, it's possible that we'll exhaust the 2195 * kernel heap space before we ever run out of available physical 2196 * memory. Most checks of the size of the heap_area compare against 2197 * tune.t_minarmem, which is the minimum available real memory that we 2198 * can have in the system. However, this is generally fixed at 25 pages 2199 * which is so low that it's useless. In this comparison, we seek to 2200 * calculate the total heap-size, and reclaim if more than 3/4ths of the 2201 * heap is allocated. (Or, in the calculation, if less than 1/4th is 2202 * free) 2203 */ 2204 if (btop(vmem_size(heap_arena, VMEM_FREE)) < 2205 (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) 2206 return (1); 2207#endif 2208#else 2209 if (kmem_used() > (kmem_size() * 3) / 4) 2210 return (1); 2211#endif 2212 2213#else 2214 if (spa_get_random(100) == 0) 2215 return (1); 2216#endif 2217 return (0); 2218} 2219 2220extern kmem_cache_t *zio_buf_cache[]; 2221extern kmem_cache_t *zio_data_buf_cache[]; 2222 2223static void 2224arc_kmem_reap_now(arc_reclaim_strategy_t strat) 2225{ 2226 size_t i; 2227 kmem_cache_t *prev_cache = NULL; 2228 kmem_cache_t *prev_data_cache = NULL; 2229 2230#ifdef _KERNEL 2231 if (arc_meta_used >= arc_meta_limit) { 2232 /* 2233 * We are exceeding our meta-data cache limit. 2234 * Purge some DNLC entries to release holds on meta-data. 2235 */ 2236 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 2237 } 2238#if defined(__i386) 2239 /* 2240 * Reclaim unused memory from all kmem caches. 2241 */ 2242 kmem_reap(); 2243#endif 2244#endif 2245 2246 /* 2247 * An aggressive reclamation will shrink the cache size as well as 2248 * reap free buffers from the arc kmem caches. 2249 */ 2250 if (strat == ARC_RECLAIM_AGGR) 2251 arc_shrink(); 2252 2253 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 2254 if (zio_buf_cache[i] != prev_cache) { 2255 prev_cache = zio_buf_cache[i]; 2256 kmem_cache_reap_now(zio_buf_cache[i]); 2257 } 2258 if (zio_data_buf_cache[i] != prev_data_cache) { 2259 prev_data_cache = zio_data_buf_cache[i]; 2260 kmem_cache_reap_now(zio_data_buf_cache[i]); 2261 } 2262 } 2263 kmem_cache_reap_now(buf_cache); 2264 kmem_cache_reap_now(hdr_cache); 2265} 2266 2267static void 2268arc_reclaim_thread(void *dummy __unused) 2269{ 2270 clock_t growtime = 0; 2271 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 2272 callb_cpr_t cpr; 2273 2274 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 2275 2276 mutex_enter(&arc_reclaim_thr_lock); 2277 while (arc_thread_exit == 0) { 2278 if (arc_reclaim_needed()) { 2279 2280 if (arc_no_grow) { 2281 if (last_reclaim == ARC_RECLAIM_CONS) { 2282 last_reclaim = ARC_RECLAIM_AGGR; 2283 } else { 2284 last_reclaim = ARC_RECLAIM_CONS; 2285 } 2286 } else { 2287 arc_no_grow = TRUE; 2288 last_reclaim = ARC_RECLAIM_AGGR; 2289 membar_producer(); 2290 } 2291 2292 /* reset the growth delay for every reclaim */ 2293 growtime = LBOLT + (arc_grow_retry * hz); 2294 2295 if (needfree && last_reclaim == ARC_RECLAIM_CONS) { 2296 /* 2297 * If needfree is TRUE our vm_lowmem hook 2298 * was called and in that case we must free some 2299 * memory, so switch to aggressive mode. 2300 */ 2301 arc_no_grow = TRUE; 2302 last_reclaim = ARC_RECLAIM_AGGR; 2303 } 2304 arc_kmem_reap_now(last_reclaim); 2305 arc_warm = B_TRUE; 2306 2307 } else if (arc_no_grow && LBOLT >= growtime) { 2308 arc_no_grow = FALSE; 2309 } 2310 2311 arc_adjust(); 2312 2313 if (arc_eviction_list != NULL) 2314 arc_do_user_evicts(); 2315 2316#ifdef _KERNEL 2317 if (needfree) { 2318 needfree = 0; 2319 wakeup(&needfree); 2320 } 2321#endif 2322 2323 /* block until needed, or one second, whichever is shorter */ 2324 CALLB_CPR_SAFE_BEGIN(&cpr); 2325 (void) cv_timedwait(&arc_reclaim_thr_cv, 2326 &arc_reclaim_thr_lock, hz); 2327 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 2328 } 2329 2330 arc_thread_exit = 0; 2331 cv_broadcast(&arc_reclaim_thr_cv); 2332 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 2333 thread_exit(); 2334} 2335 2336/* 2337 * Adapt arc info given the number of bytes we are trying to add and 2338 * the state that we are comming from. This function is only called 2339 * when we are adding new content to the cache. 2340 */ 2341static void 2342arc_adapt(int bytes, arc_state_t *state) 2343{ 2344 int mult; 2345 uint64_t arc_p_min = (arc_c >> arc_p_min_shift); 2346 2347 if (state == arc_l2c_only) 2348 return; 2349 2350 ASSERT(bytes > 0); 2351 /* 2352 * Adapt the target size of the MRU list: 2353 * - if we just hit in the MRU ghost list, then increase 2354 * the target size of the MRU list. 2355 * - if we just hit in the MFU ghost list, then increase 2356 * the target size of the MFU list by decreasing the 2357 * target size of the MRU list. 2358 */ 2359 if (state == arc_mru_ghost) { 2360 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? 2361 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); 2362 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */ 2363 2364 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult); 2365 } else if (state == arc_mfu_ghost) { 2366 uint64_t delta; 2367 2368 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? 2369 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); 2370 mult = MIN(mult, 10); 2371 2372 delta = MIN(bytes * mult, arc_p); 2373 arc_p = MAX(arc_p_min, arc_p - delta); 2374 } 2375 ASSERT((int64_t)arc_p >= 0); 2376 2377 if (arc_reclaim_needed()) { 2378 cv_signal(&arc_reclaim_thr_cv); 2379 return; 2380 } 2381 2382 if (arc_no_grow) 2383 return; 2384 2385 if (arc_c >= arc_c_max) 2386 return; 2387 2388 /* 2389 * If we're within (2 * maxblocksize) bytes of the target 2390 * cache size, increment the target cache size 2391 */ 2392 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 2393 atomic_add_64(&arc_c, (int64_t)bytes); 2394 if (arc_c > arc_c_max) 2395 arc_c = arc_c_max; 2396 else if (state == arc_anon) 2397 atomic_add_64(&arc_p, (int64_t)bytes); 2398 if (arc_p > arc_c) 2399 arc_p = arc_c; 2400 } 2401 ASSERT((int64_t)arc_p >= 0); 2402} 2403 2404/* 2405 * Check if the cache has reached its limits and eviction is required 2406 * prior to insert. 2407 */ 2408static int 2409arc_evict_needed(arc_buf_contents_t type) 2410{ 2411 if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit) 2412 return (1); 2413 2414#if 0 2415#ifdef _KERNEL 2416 /* 2417 * If zio data pages are being allocated out of a separate heap segment, 2418 * then enforce that the size of available vmem for this area remains 2419 * above about 1/32nd free. 2420 */ 2421 if (type == ARC_BUFC_DATA && zio_arena != NULL && 2422 vmem_size(zio_arena, VMEM_FREE) < 2423 (vmem_size(zio_arena, VMEM_ALLOC) >> 5)) 2424 return (1); 2425#endif 2426#endif 2427 2428 if (arc_reclaim_needed()) 2429 return (1); 2430 2431 return (arc_size > arc_c); 2432} 2433 2434/* 2435 * The buffer, supplied as the first argument, needs a data block. 2436 * So, if we are at cache max, determine which cache should be victimized. 2437 * We have the following cases: 2438 * 2439 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> 2440 * In this situation if we're out of space, but the resident size of the MFU is 2441 * under the limit, victimize the MFU cache to satisfy this insertion request. 2442 * 2443 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> 2444 * Here, we've used up all of the available space for the MRU, so we need to 2445 * evict from our own cache instead. Evict from the set of resident MRU 2446 * entries. 2447 * 2448 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> 2449 * c minus p represents the MFU space in the cache, since p is the size of the 2450 * cache that is dedicated to the MRU. In this situation there's still space on 2451 * the MFU side, so the MRU side needs to be victimized. 2452 * 2453 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> 2454 * MFU's resident set is consuming more space than it has been allotted. In 2455 * this situation, we must victimize our own cache, the MFU, for this insertion. 2456 */ 2457static void 2458arc_get_data_buf(arc_buf_t *buf) 2459{ 2460 arc_state_t *state = buf->b_hdr->b_state; 2461 uint64_t size = buf->b_hdr->b_size; 2462 arc_buf_contents_t type = buf->b_hdr->b_type; 2463 2464 arc_adapt(size, state); 2465 2466 /* 2467 * We have not yet reached cache maximum size, 2468 * just allocate a new buffer. 2469 */ 2470 if (!arc_evict_needed(type)) { 2471 if (type == ARC_BUFC_METADATA) { 2472 buf->b_data = zio_buf_alloc(size); 2473 arc_space_consume(size, ARC_SPACE_DATA); 2474 } else { 2475 ASSERT(type == ARC_BUFC_DATA); 2476 buf->b_data = zio_data_buf_alloc(size); 2477 ARCSTAT_INCR(arcstat_data_size, size); 2478 atomic_add_64(&arc_size, size); 2479 } 2480 goto out; 2481 } 2482 2483 /* 2484 * If we are prefetching from the mfu ghost list, this buffer 2485 * will end up on the mru list; so steal space from there. 2486 */ 2487 if (state == arc_mfu_ghost) 2488 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; 2489 else if (state == arc_mru_ghost) 2490 state = arc_mru; 2491 2492 if (state == arc_mru || state == arc_anon) { 2493 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; 2494 state = (arc_mfu->arcs_lsize[type] >= size && 2495 arc_p > mru_used) ? arc_mfu : arc_mru; 2496 } else { 2497 /* MFU cases */ 2498 uint64_t mfu_space = arc_c - arc_p; 2499 state = (arc_mru->arcs_lsize[type] >= size && 2500 mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; 2501 } 2502 if ((buf->b_data = arc_evict(state, 0, size, TRUE, type)) == NULL) { 2503 if (type == ARC_BUFC_METADATA) { 2504 buf->b_data = zio_buf_alloc(size); 2505 arc_space_consume(size, ARC_SPACE_DATA); 2506 } else { 2507 ASSERT(type == ARC_BUFC_DATA); 2508 buf->b_data = zio_data_buf_alloc(size); 2509 ARCSTAT_INCR(arcstat_data_size, size); 2510 atomic_add_64(&arc_size, size); 2511 } 2512 ARCSTAT_BUMP(arcstat_recycle_miss); 2513 } 2514 ASSERT(buf->b_data != NULL); 2515out: 2516 /* 2517 * Update the state size. Note that ghost states have a 2518 * "ghost size" and so don't need to be updated. 2519 */ 2520 if (!GHOST_STATE(buf->b_hdr->b_state)) { 2521 arc_buf_hdr_t *hdr = buf->b_hdr; 2522 2523 atomic_add_64(&hdr->b_state->arcs_size, size); 2524 if (list_link_active(&hdr->b_arc_node)) { 2525 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2526 atomic_add_64(&hdr->b_state->arcs_lsize[type], size); 2527 } 2528 /* 2529 * If we are growing the cache, and we are adding anonymous 2530 * data, and we have outgrown arc_p, update arc_p 2531 */ 2532 if (arc_size < arc_c && hdr->b_state == arc_anon && 2533 arc_anon->arcs_size + arc_mru->arcs_size > arc_p) 2534 arc_p = MIN(arc_c, arc_p + size); 2535 } 2536 ARCSTAT_BUMP(arcstat_allocated); 2537} 2538 2539/* 2540 * This routine is called whenever a buffer is accessed. 2541 * NOTE: the hash lock is dropped in this function. 2542 */ 2543static void 2544arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 2545{ 2546 ASSERT(MUTEX_HELD(hash_lock)); 2547 2548 if (buf->b_state == arc_anon) { 2549 /* 2550 * This buffer is not in the cache, and does not 2551 * appear in our "ghost" list. Add the new buffer 2552 * to the MRU state. 2553 */ 2554 2555 ASSERT(buf->b_arc_access == 0); 2556 buf->b_arc_access = LBOLT; 2557 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2558 arc_change_state(arc_mru, buf, hash_lock); 2559 2560 } else if (buf->b_state == arc_mru) { 2561 /* 2562 * If this buffer is here because of a prefetch, then either: 2563 * - clear the flag if this is a "referencing" read 2564 * (any subsequent access will bump this into the MFU state). 2565 * or 2566 * - move the buffer to the head of the list if this is 2567 * another prefetch (to make it less likely to be evicted). 2568 */ 2569 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2570 if (refcount_count(&buf->b_refcnt) == 0) { 2571 ASSERT(list_link_active(&buf->b_arc_node)); 2572 } else { 2573 buf->b_flags &= ~ARC_PREFETCH; 2574 ARCSTAT_BUMP(arcstat_mru_hits); 2575 } 2576 buf->b_arc_access = LBOLT; 2577 return; 2578 } 2579 2580 /* 2581 * This buffer has been "accessed" only once so far, 2582 * but it is still in the cache. Move it to the MFU 2583 * state. 2584 */ 2585 if (LBOLT > buf->b_arc_access + ARC_MINTIME) { 2586 /* 2587 * More than 125ms have passed since we 2588 * instantiated this buffer. Move it to the 2589 * most frequently used state. 2590 */ 2591 buf->b_arc_access = LBOLT; 2592 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2593 arc_change_state(arc_mfu, buf, hash_lock); 2594 } 2595 ARCSTAT_BUMP(arcstat_mru_hits); 2596 } else if (buf->b_state == arc_mru_ghost) { 2597 arc_state_t *new_state; 2598 /* 2599 * This buffer has been "accessed" recently, but 2600 * was evicted from the cache. Move it to the 2601 * MFU state. 2602 */ 2603 2604 if (buf->b_flags & ARC_PREFETCH) { 2605 new_state = arc_mru; 2606 if (refcount_count(&buf->b_refcnt) > 0) 2607 buf->b_flags &= ~ARC_PREFETCH; 2608 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2609 } else { 2610 new_state = arc_mfu; 2611 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2612 } 2613 2614 buf->b_arc_access = LBOLT; 2615 arc_change_state(new_state, buf, hash_lock); 2616 2617 ARCSTAT_BUMP(arcstat_mru_ghost_hits); 2618 } else if (buf->b_state == arc_mfu) { 2619 /* 2620 * This buffer has been accessed more than once and is 2621 * still in the cache. Keep it in the MFU state. 2622 * 2623 * NOTE: an add_reference() that occurred when we did 2624 * the arc_read() will have kicked this off the list. 2625 * If it was a prefetch, we will explicitly move it to 2626 * the head of the list now. 2627 */ 2628 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2629 ASSERT(refcount_count(&buf->b_refcnt) == 0); 2630 ASSERT(list_link_active(&buf->b_arc_node)); 2631 } 2632 ARCSTAT_BUMP(arcstat_mfu_hits); 2633 buf->b_arc_access = LBOLT; 2634 } else if (buf->b_state == arc_mfu_ghost) { 2635 arc_state_t *new_state = arc_mfu; 2636 /* 2637 * This buffer has been accessed more than once but has 2638 * been evicted from the cache. Move it back to the 2639 * MFU state. 2640 */ 2641 2642 if (buf->b_flags & ARC_PREFETCH) { 2643 /* 2644 * This is a prefetch access... 2645 * move this block back to the MRU state. 2646 */ 2647 ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0); 2648 new_state = arc_mru; 2649 } 2650 2651 buf->b_arc_access = LBOLT; 2652 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2653 arc_change_state(new_state, buf, hash_lock); 2654 2655 ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 2656 } else if (buf->b_state == arc_l2c_only) { 2657 /* 2658 * This buffer is on the 2nd Level ARC. 2659 */ 2660 2661 buf->b_arc_access = LBOLT; 2662 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2663 arc_change_state(arc_mfu, buf, hash_lock); 2664 } else { 2665 ASSERT(!"invalid arc state"); 2666 } 2667} 2668 2669/* a generic arc_done_func_t which you can use */ 2670/* ARGSUSED */ 2671void 2672arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 2673{ 2674 bcopy(buf->b_data, arg, buf->b_hdr->b_size); 2675 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2676} 2677 2678/* a generic arc_done_func_t */ 2679void 2680arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 2681{ 2682 arc_buf_t **bufp = arg; 2683 if (zio && zio->io_error) { 2684 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2685 *bufp = NULL; 2686 } else { 2687 *bufp = buf; 2688 } 2689} 2690 2691static void 2692arc_read_done(zio_t *zio) 2693{ 2694 arc_buf_hdr_t *hdr, *found; 2695 arc_buf_t *buf; 2696 arc_buf_t *abuf; /* buffer we're assigning to callback */ 2697 kmutex_t *hash_lock; 2698 arc_callback_t *callback_list, *acb; 2699 int freeable = FALSE; 2700 2701 buf = zio->io_private; 2702 hdr = buf->b_hdr; 2703 2704 /* 2705 * The hdr was inserted into hash-table and removed from lists 2706 * prior to starting I/O. We should find this header, since 2707 * it's in the hash table, and it should be legit since it's 2708 * not possible to evict it during the I/O. The only possible 2709 * reason for it not to be found is if we were freed during the 2710 * read. 2711 */ 2712 found = buf_hash_find(hdr->b_spa, &hdr->b_dva, hdr->b_birth, 2713 &hash_lock); 2714 2715 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || 2716 (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || 2717 (found == hdr && HDR_L2_READING(hdr))); 2718 2719 hdr->b_flags &= ~ARC_L2_EVICTED; 2720 if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH)) 2721 hdr->b_flags &= ~ARC_L2CACHE; 2722 2723 /* byteswap if necessary */ 2724 callback_list = hdr->b_acb; 2725 ASSERT(callback_list != NULL); 2726 if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) { 2727 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ? 2728 byteswap_uint64_array : 2729 dmu_ot[BP_GET_TYPE(zio->io_bp)].ot_byteswap; 2730 func(buf->b_data, hdr->b_size); 2731 } 2732 2733 arc_cksum_compute(buf, B_FALSE); 2734 2735 /* create copies of the data buffer for the callers */ 2736 abuf = buf; 2737 for (acb = callback_list; acb; acb = acb->acb_next) { 2738 if (acb->acb_done) { 2739 if (abuf == NULL) 2740 abuf = arc_buf_clone(buf); 2741 acb->acb_buf = abuf; 2742 abuf = NULL; 2743 } 2744 } 2745 hdr->b_acb = NULL; 2746 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2747 ASSERT(!HDR_BUF_AVAILABLE(hdr)); 2748 if (abuf == buf) 2749 hdr->b_flags |= ARC_BUF_AVAILABLE; 2750 2751 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 2752 2753 if (zio->io_error != 0) { 2754 hdr->b_flags |= ARC_IO_ERROR; 2755 if (hdr->b_state != arc_anon) 2756 arc_change_state(arc_anon, hdr, hash_lock); 2757 if (HDR_IN_HASH_TABLE(hdr)) 2758 buf_hash_remove(hdr); 2759 freeable = refcount_is_zero(&hdr->b_refcnt); 2760 } 2761 2762 /* 2763 * Broadcast before we drop the hash_lock to avoid the possibility 2764 * that the hdr (and hence the cv) might be freed before we get to 2765 * the cv_broadcast(). 2766 */ 2767 cv_broadcast(&hdr->b_cv); 2768 2769 if (hash_lock) { 2770 /* 2771 * Only call arc_access on anonymous buffers. This is because 2772 * if we've issued an I/O for an evicted buffer, we've already 2773 * called arc_access (to prevent any simultaneous readers from 2774 * getting confused). 2775 */ 2776 if (zio->io_error == 0 && hdr->b_state == arc_anon) 2777 arc_access(hdr, hash_lock); 2778 mutex_exit(hash_lock); 2779 } else { 2780 /* 2781 * This block was freed while we waited for the read to 2782 * complete. It has been removed from the hash table and 2783 * moved to the anonymous state (so that it won't show up 2784 * in the cache). 2785 */ 2786 ASSERT3P(hdr->b_state, ==, arc_anon); 2787 freeable = refcount_is_zero(&hdr->b_refcnt); 2788 } 2789 2790 /* execute each callback and free its structure */ 2791 while ((acb = callback_list) != NULL) { 2792 if (acb->acb_done) 2793 acb->acb_done(zio, acb->acb_buf, acb->acb_private); 2794 2795 if (acb->acb_zio_dummy != NULL) { 2796 acb->acb_zio_dummy->io_error = zio->io_error; 2797 zio_nowait(acb->acb_zio_dummy); 2798 } 2799 2800 callback_list = acb->acb_next; 2801 kmem_free(acb, sizeof (arc_callback_t)); 2802 } 2803 2804 if (freeable) 2805 arc_hdr_destroy(hdr); 2806} 2807 2808/* 2809 * "Read" the block block at the specified DVA (in bp) via the 2810 * cache. If the block is found in the cache, invoke the provided 2811 * callback immediately and return. Note that the `zio' parameter 2812 * in the callback will be NULL in this case, since no IO was 2813 * required. If the block is not in the cache pass the read request 2814 * on to the spa with a substitute callback function, so that the 2815 * requested block will be added to the cache. 2816 * 2817 * If a read request arrives for a block that has a read in-progress, 2818 * either wait for the in-progress read to complete (and return the 2819 * results); or, if this is a read with a "done" func, add a record 2820 * to the read to invoke the "done" func when the read completes, 2821 * and return; or just return. 2822 * 2823 * arc_read_done() will invoke all the requested "done" functions 2824 * for readers of this block. 2825 * 2826 * Normal callers should use arc_read and pass the arc buffer and offset 2827 * for the bp. But if you know you don't need locking, you can use 2828 * arc_read_bp. 2829 */ 2830int 2831arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_buf_t *pbuf, 2832 arc_done_func_t *done, void *private, int priority, int zio_flags, 2833 uint32_t *arc_flags, const zbookmark_t *zb) 2834{ 2835 int err; 2836 2837 ASSERT(!refcount_is_zero(&pbuf->b_hdr->b_refcnt)); 2838 ASSERT3U((char *)bp - (char *)pbuf->b_data, <, pbuf->b_hdr->b_size); 2839 rw_enter(&pbuf->b_lock, RW_READER); 2840 2841 err = arc_read_nolock(pio, spa, bp, done, private, priority, 2842 zio_flags, arc_flags, zb); 2843 rw_exit(&pbuf->b_lock); 2844 return (err); 2845} 2846 2847int 2848arc_read_nolock(zio_t *pio, spa_t *spa, blkptr_t *bp, 2849 arc_done_func_t *done, void *private, int priority, int zio_flags, 2850 uint32_t *arc_flags, const zbookmark_t *zb) 2851{ 2852 arc_buf_hdr_t *hdr; 2853 arc_buf_t *buf; 2854 kmutex_t *hash_lock; 2855 zio_t *rzio; 2856 uint64_t guid = spa_guid(spa); 2857 2858top: 2859 hdr = buf_hash_find(guid, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 2860 if (hdr && hdr->b_datacnt > 0) { 2861 2862 *arc_flags |= ARC_CACHED; 2863 2864 if (HDR_IO_IN_PROGRESS(hdr)) { 2865 2866 if (*arc_flags & ARC_WAIT) { 2867 cv_wait(&hdr->b_cv, hash_lock); 2868 mutex_exit(hash_lock); 2869 goto top; 2870 } 2871 ASSERT(*arc_flags & ARC_NOWAIT); 2872 2873 if (done) { 2874 arc_callback_t *acb = NULL; 2875 2876 acb = kmem_zalloc(sizeof (arc_callback_t), 2877 KM_SLEEP); 2878 acb->acb_done = done; 2879 acb->acb_private = private; 2880 if (pio != NULL) 2881 acb->acb_zio_dummy = zio_null(pio, 2882 spa, NULL, NULL, NULL, zio_flags); 2883 2884 ASSERT(acb->acb_done != NULL); 2885 acb->acb_next = hdr->b_acb; 2886 hdr->b_acb = acb; 2887 add_reference(hdr, hash_lock, private); 2888 mutex_exit(hash_lock); 2889 return (0); 2890 } 2891 mutex_exit(hash_lock); 2892 return (0); 2893 } 2894 2895 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2896 2897 if (done) { 2898 add_reference(hdr, hash_lock, private); 2899 /* 2900 * If this block is already in use, create a new 2901 * copy of the data so that we will be guaranteed 2902 * that arc_release() will always succeed. 2903 */ 2904 buf = hdr->b_buf; 2905 ASSERT(buf); 2906 ASSERT(buf->b_data); 2907 if (HDR_BUF_AVAILABLE(hdr)) { 2908 ASSERT(buf->b_efunc == NULL); 2909 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 2910 } else { 2911 buf = arc_buf_clone(buf); 2912 } 2913 } else if (*arc_flags & ARC_PREFETCH && 2914 refcount_count(&hdr->b_refcnt) == 0) { 2915 hdr->b_flags |= ARC_PREFETCH; 2916 } 2917 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 2918 arc_access(hdr, hash_lock); 2919 if (*arc_flags & ARC_L2CACHE) 2920 hdr->b_flags |= ARC_L2CACHE; 2921 mutex_exit(hash_lock); 2922 ARCSTAT_BUMP(arcstat_hits); 2923 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2924 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2925 data, metadata, hits); 2926 2927 if (done) 2928 done(NULL, buf, private); 2929 } else { 2930 uint64_t size = BP_GET_LSIZE(bp); 2931 arc_callback_t *acb; 2932 vdev_t *vd = NULL; 2933 uint64_t addr; 2934 boolean_t devw = B_FALSE; 2935 2936 if (hdr == NULL) { 2937 /* this block is not in the cache */ 2938 arc_buf_hdr_t *exists; 2939 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 2940 buf = arc_buf_alloc(spa, size, private, type); 2941 hdr = buf->b_hdr; 2942 hdr->b_dva = *BP_IDENTITY(bp); 2943 hdr->b_birth = bp->blk_birth; 2944 hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 2945 exists = buf_hash_insert(hdr, &hash_lock); 2946 if (exists) { 2947 /* somebody beat us to the hash insert */ 2948 mutex_exit(hash_lock); 2949 bzero(&hdr->b_dva, sizeof (dva_t)); 2950 hdr->b_birth = 0; 2951 hdr->b_cksum0 = 0; 2952 (void) arc_buf_remove_ref(buf, private); 2953 goto top; /* restart the IO request */ 2954 } 2955 /* if this is a prefetch, we don't have a reference */ 2956 if (*arc_flags & ARC_PREFETCH) { 2957 (void) remove_reference(hdr, hash_lock, 2958 private); 2959 hdr->b_flags |= ARC_PREFETCH; 2960 } 2961 if (*arc_flags & ARC_L2CACHE) 2962 hdr->b_flags |= ARC_L2CACHE; 2963 if (BP_GET_LEVEL(bp) > 0) 2964 hdr->b_flags |= ARC_INDIRECT; 2965 } else { 2966 /* this block is in the ghost cache */ 2967 ASSERT(GHOST_STATE(hdr->b_state)); 2968 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2969 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0); 2970 ASSERT(hdr->b_buf == NULL); 2971 2972 /* if this is a prefetch, we don't have a reference */ 2973 if (*arc_flags & ARC_PREFETCH) 2974 hdr->b_flags |= ARC_PREFETCH; 2975 else 2976 add_reference(hdr, hash_lock, private); 2977 if (*arc_flags & ARC_L2CACHE) 2978 hdr->b_flags |= ARC_L2CACHE; 2979 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 2980 buf->b_hdr = hdr; 2981 buf->b_data = NULL; 2982 buf->b_efunc = NULL; 2983 buf->b_private = NULL; 2984 buf->b_next = NULL; 2985 hdr->b_buf = buf; 2986 arc_get_data_buf(buf); 2987 ASSERT(hdr->b_datacnt == 0); 2988 hdr->b_datacnt = 1; 2989 2990 } 2991 2992 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 2993 acb->acb_done = done; 2994 acb->acb_private = private; 2995 2996 ASSERT(hdr->b_acb == NULL); 2997 hdr->b_acb = acb; 2998 hdr->b_flags |= ARC_IO_IN_PROGRESS; 2999 3000 /* 3001 * If the buffer has been evicted, migrate it to a present state 3002 * before issuing the I/O. Once we drop the hash-table lock, 3003 * the header will be marked as I/O in progress and have an 3004 * attached buffer. At this point, anybody who finds this 3005 * buffer ought to notice that it's legit but has a pending I/O. 3006 */ 3007 3008 if (GHOST_STATE(hdr->b_state)) 3009 arc_access(hdr, hash_lock); 3010 3011 if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL && 3012 (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) { 3013 devw = hdr->b_l2hdr->b_dev->l2ad_writing; 3014 addr = hdr->b_l2hdr->b_daddr; 3015 /* 3016 * Lock out device removal. 3017 */ 3018 if (vdev_is_dead(vd) || 3019 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER)) 3020 vd = NULL; 3021 } 3022 3023 mutex_exit(hash_lock); 3024 3025 ASSERT3U(hdr->b_size, ==, size); 3026 DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size, 3027 zbookmark_t *, zb); 3028 ARCSTAT_BUMP(arcstat_misses); 3029 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 3030 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 3031 data, metadata, misses); 3032 3033 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) { 3034 /* 3035 * Read from the L2ARC if the following are true: 3036 * 1. The L2ARC vdev was previously cached. 3037 * 2. This buffer still has L2ARC metadata. 3038 * 3. This buffer isn't currently writing to the L2ARC. 3039 * 4. The L2ARC entry wasn't evicted, which may 3040 * also have invalidated the vdev. 3041 * 5. This isn't prefetch and l2arc_noprefetch is set. 3042 */ 3043 if (hdr->b_l2hdr != NULL && 3044 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) && 3045 !(l2arc_noprefetch && HDR_PREFETCH(hdr))) { 3046 l2arc_read_callback_t *cb; 3047 3048 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); 3049 ARCSTAT_BUMP(arcstat_l2_hits); 3050 3051 cb = kmem_zalloc(sizeof (l2arc_read_callback_t), 3052 KM_SLEEP); 3053 cb->l2rcb_buf = buf; 3054 cb->l2rcb_spa = spa; 3055 cb->l2rcb_bp = *bp; 3056 cb->l2rcb_zb = *zb; 3057 cb->l2rcb_flags = zio_flags; 3058 3059 /* 3060 * l2arc read. The SCL_L2ARC lock will be 3061 * released by l2arc_read_done(). 3062 */ 3063 rzio = zio_read_phys(pio, vd, addr, size, 3064 buf->b_data, ZIO_CHECKSUM_OFF, 3065 l2arc_read_done, cb, priority, zio_flags | 3066 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL | 3067 ZIO_FLAG_DONT_PROPAGATE | 3068 ZIO_FLAG_DONT_RETRY, B_FALSE); 3069 DTRACE_PROBE2(l2arc__read, vdev_t *, vd, 3070 zio_t *, rzio); 3071 ARCSTAT_INCR(arcstat_l2_read_bytes, size); 3072 3073 if (*arc_flags & ARC_NOWAIT) { 3074 zio_nowait(rzio); 3075 return (0); 3076 } 3077 3078 ASSERT(*arc_flags & ARC_WAIT); 3079 if (zio_wait(rzio) == 0) 3080 return (0); 3081 3082 /* l2arc read error; goto zio_read() */ 3083 } else { 3084 DTRACE_PROBE1(l2arc__miss, 3085 arc_buf_hdr_t *, hdr); 3086 ARCSTAT_BUMP(arcstat_l2_misses); 3087 if (HDR_L2_WRITING(hdr)) 3088 ARCSTAT_BUMP(arcstat_l2_rw_clash); 3089 spa_config_exit(spa, SCL_L2ARC, vd); 3090 } 3091 } else { 3092 if (vd != NULL) 3093 spa_config_exit(spa, SCL_L2ARC, vd); 3094 if (l2arc_ndev != 0) { 3095 DTRACE_PROBE1(l2arc__miss, 3096 arc_buf_hdr_t *, hdr); 3097 ARCSTAT_BUMP(arcstat_l2_misses); 3098 } 3099 } 3100 3101 rzio = zio_read(pio, spa, bp, buf->b_data, size, 3102 arc_read_done, buf, priority, zio_flags, zb); 3103 3104 if (*arc_flags & ARC_WAIT) 3105 return (zio_wait(rzio)); 3106 3107 ASSERT(*arc_flags & ARC_NOWAIT); 3108 zio_nowait(rzio); 3109 } 3110 return (0); 3111} 3112 3113/* 3114 * arc_read() variant to support pool traversal. If the block is already 3115 * in the ARC, make a copy of it; otherwise, the caller will do the I/O. 3116 * The idea is that we don't want pool traversal filling up memory, but 3117 * if the ARC already has the data anyway, we shouldn't pay for the I/O. 3118 */ 3119int 3120arc_tryread(spa_t *spa, blkptr_t *bp, void *data) 3121{ 3122 arc_buf_hdr_t *hdr; 3123 kmutex_t *hash_mtx; 3124 uint64_t guid = spa_guid(spa); 3125 int rc = 0; 3126 3127 hdr = buf_hash_find(guid, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx); 3128 3129 if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) { 3130 arc_buf_t *buf = hdr->b_buf; 3131 3132 ASSERT(buf); 3133 while (buf->b_data == NULL) { 3134 buf = buf->b_next; 3135 ASSERT(buf); 3136 } 3137 bcopy(buf->b_data, data, hdr->b_size); 3138 } else { 3139 rc = ENOENT; 3140 } 3141 3142 if (hash_mtx) 3143 mutex_exit(hash_mtx); 3144 3145 return (rc); 3146} 3147 3148void 3149arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 3150{ 3151 ASSERT(buf->b_hdr != NULL); 3152 ASSERT(buf->b_hdr->b_state != arc_anon); 3153 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 3154 buf->b_efunc = func; 3155 buf->b_private = private; 3156} 3157 3158/* 3159 * This is used by the DMU to let the ARC know that a buffer is 3160 * being evicted, so the ARC should clean up. If this arc buf 3161 * is not yet in the evicted state, it will be put there. 3162 */ 3163int 3164arc_buf_evict(arc_buf_t *buf) 3165{ 3166 arc_buf_hdr_t *hdr; 3167 kmutex_t *hash_lock; 3168 arc_buf_t **bufp; 3169 list_t *list, *evicted_list; 3170 kmutex_t *lock, *evicted_lock; 3171 3172 rw_enter(&buf->b_lock, RW_WRITER); 3173 hdr = buf->b_hdr; 3174 if (hdr == NULL) { 3175 /* 3176 * We are in arc_do_user_evicts(). 3177 */ 3178 ASSERT(buf->b_data == NULL); 3179 rw_exit(&buf->b_lock); 3180 return (0); 3181 } else if (buf->b_data == NULL) { 3182 arc_buf_t copy = *buf; /* structure assignment */ 3183 /* 3184 * We are on the eviction list; process this buffer now 3185 * but let arc_do_user_evicts() do the reaping. 3186 */ 3187 buf->b_efunc = NULL; 3188 rw_exit(&buf->b_lock); 3189 VERIFY(copy.b_efunc(©) == 0); 3190 return (1); 3191 } 3192 hash_lock = HDR_LOCK(hdr); 3193 mutex_enter(hash_lock); 3194 3195 ASSERT(buf->b_hdr == hdr); 3196 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 3197 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 3198 3199 /* 3200 * Pull this buffer off of the hdr 3201 */ 3202 bufp = &hdr->b_buf; 3203 while (*bufp != buf) 3204 bufp = &(*bufp)->b_next; 3205 *bufp = buf->b_next; 3206 3207 ASSERT(buf->b_data != NULL); 3208 arc_buf_destroy(buf, FALSE, FALSE); 3209 3210 if (hdr->b_datacnt == 0) { 3211 arc_state_t *old_state = hdr->b_state; 3212 arc_state_t *evicted_state; 3213 3214 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 3215 3216 evicted_state = 3217 (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 3218 3219 get_buf_info(hdr, old_state, &list, &lock); 3220 get_buf_info(hdr, evicted_state, &evicted_list, &evicted_lock); 3221 mutex_enter(lock); 3222 mutex_enter(evicted_lock); 3223 3224 arc_change_state(evicted_state, hdr, hash_lock); 3225 ASSERT(HDR_IN_HASH_TABLE(hdr)); 3226 hdr->b_flags |= ARC_IN_HASH_TABLE; 3227 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 3228 3229 mutex_exit(evicted_lock); 3230 mutex_exit(lock); 3231 } 3232 mutex_exit(hash_lock); 3233 rw_exit(&buf->b_lock); 3234 3235 VERIFY(buf->b_efunc(buf) == 0); 3236 buf->b_efunc = NULL; 3237 buf->b_private = NULL; 3238 buf->b_hdr = NULL; 3239 kmem_cache_free(buf_cache, buf); 3240 return (1); 3241} 3242 3243/* 3244 * Release this buffer from the cache. This must be done 3245 * after a read and prior to modifying the buffer contents. 3246 * If the buffer has more than one reference, we must make 3247 * a new hdr for the buffer. 3248 */ 3249void 3250arc_release(arc_buf_t *buf, void *tag) 3251{ 3252 arc_buf_hdr_t *hdr; 3253 kmutex_t *hash_lock; 3254 l2arc_buf_hdr_t *l2hdr; 3255 uint64_t buf_size; 3256 boolean_t released = B_FALSE; 3257 3258 rw_enter(&buf->b_lock, RW_WRITER); 3259 hdr = buf->b_hdr; 3260 3261 /* this buffer is not on any list */ 3262 ASSERT(refcount_count(&hdr->b_refcnt) > 0); 3263 ASSERT(!(hdr->b_flags & ARC_STORED)); 3264 3265 if (hdr->b_state == arc_anon) { 3266 /* this buffer is already released */ 3267 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1); 3268 ASSERT(BUF_EMPTY(hdr)); 3269 ASSERT(buf->b_efunc == NULL); 3270 arc_buf_thaw(buf); 3271 rw_exit(&buf->b_lock); 3272 released = B_TRUE; 3273 } else { 3274 hash_lock = HDR_LOCK(hdr); 3275 mutex_enter(hash_lock); 3276 } 3277 3278 l2hdr = hdr->b_l2hdr; 3279 if (l2hdr) { 3280 mutex_enter(&l2arc_buflist_mtx); 3281 hdr->b_l2hdr = NULL; 3282 buf_size = hdr->b_size; 3283 } 3284 3285 if (released) 3286 goto out; 3287 3288 /* 3289 * Do we have more than one buf? 3290 */ 3291 if (hdr->b_datacnt > 1) { 3292 arc_buf_hdr_t *nhdr; 3293 arc_buf_t **bufp; 3294 uint64_t blksz = hdr->b_size; 3295 uint64_t spa = hdr->b_spa; 3296 arc_buf_contents_t type = hdr->b_type; 3297 uint32_t flags = hdr->b_flags; 3298 3299 ASSERT(hdr->b_buf != buf || buf->b_next != NULL); 3300 /* 3301 * Pull the data off of this buf and attach it to 3302 * a new anonymous buf. 3303 */ 3304 (void) remove_reference(hdr, hash_lock, tag); 3305 bufp = &hdr->b_buf; 3306 while (*bufp != buf) 3307 bufp = &(*bufp)->b_next; 3308 *bufp = (*bufp)->b_next; 3309 buf->b_next = NULL; 3310 3311 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); 3312 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); 3313 if (refcount_is_zero(&hdr->b_refcnt)) { 3314 uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type]; 3315 ASSERT3U(*size, >=, hdr->b_size); 3316 atomic_add_64(size, -hdr->b_size); 3317 } 3318 hdr->b_datacnt -= 1; 3319 arc_cksum_verify(buf); 3320 3321 mutex_exit(hash_lock); 3322 3323 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 3324 nhdr->b_size = blksz; 3325 nhdr->b_spa = spa; 3326 nhdr->b_type = type; 3327 nhdr->b_buf = buf; 3328 nhdr->b_state = arc_anon; 3329 nhdr->b_arc_access = 0; 3330 nhdr->b_flags = flags & ARC_L2_WRITING; 3331 nhdr->b_l2hdr = NULL; 3332 nhdr->b_datacnt = 1; 3333 nhdr->b_freeze_cksum = NULL; 3334 (void) refcount_add(&nhdr->b_refcnt, tag); 3335 buf->b_hdr = nhdr; 3336 rw_exit(&buf->b_lock); 3337 atomic_add_64(&arc_anon->arcs_size, blksz); 3338 } else { 3339 rw_exit(&buf->b_lock); 3340 ASSERT(refcount_count(&hdr->b_refcnt) == 1); 3341 ASSERT(!list_link_active(&hdr->b_arc_node)); 3342 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 3343 arc_change_state(arc_anon, hdr, hash_lock); 3344 hdr->b_arc_access = 0; 3345 mutex_exit(hash_lock); 3346 3347 bzero(&hdr->b_dva, sizeof (dva_t)); 3348 hdr->b_birth = 0; 3349 hdr->b_cksum0 = 0; 3350 arc_buf_thaw(buf); 3351 } 3352 buf->b_efunc = NULL; 3353 buf->b_private = NULL; 3354 3355out: 3356 if (l2hdr) { 3357 list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 3358 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 3359 ARCSTAT_INCR(arcstat_l2_size, -buf_size); 3360 mutex_exit(&l2arc_buflist_mtx); 3361 } 3362} 3363 3364int 3365arc_released(arc_buf_t *buf) 3366{ 3367 int released; 3368 3369 rw_enter(&buf->b_lock, RW_READER); 3370 released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); 3371 rw_exit(&buf->b_lock); 3372 return (released); 3373} 3374 3375int 3376arc_has_callback(arc_buf_t *buf) 3377{ 3378 int callback; 3379 3380 rw_enter(&buf->b_lock, RW_READER); 3381 callback = (buf->b_efunc != NULL); 3382 rw_exit(&buf->b_lock); 3383 return (callback); 3384} 3385 3386#ifdef ZFS_DEBUG 3387int 3388arc_referenced(arc_buf_t *buf) 3389{ 3390 int referenced; 3391 3392 rw_enter(&buf->b_lock, RW_READER); 3393 referenced = (refcount_count(&buf->b_hdr->b_refcnt)); 3394 rw_exit(&buf->b_lock); 3395 return (referenced); 3396} 3397#endif 3398 3399static void 3400arc_write_ready(zio_t *zio) 3401{ 3402 arc_write_callback_t *callback = zio->io_private; 3403 arc_buf_t *buf = callback->awcb_buf; 3404 arc_buf_hdr_t *hdr = buf->b_hdr; 3405 3406 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); 3407 callback->awcb_ready(zio, buf, callback->awcb_private); 3408 3409 /* 3410 * If the IO is already in progress, then this is a re-write 3411 * attempt, so we need to thaw and re-compute the cksum. 3412 * It is the responsibility of the callback to handle the 3413 * accounting for any re-write attempt. 3414 */ 3415 if (HDR_IO_IN_PROGRESS(hdr)) { 3416 mutex_enter(&hdr->b_freeze_lock); 3417 if (hdr->b_freeze_cksum != NULL) { 3418 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 3419 hdr->b_freeze_cksum = NULL; 3420 } 3421 mutex_exit(&hdr->b_freeze_lock); 3422 } 3423 arc_cksum_compute(buf, B_FALSE); 3424 hdr->b_flags |= ARC_IO_IN_PROGRESS; 3425} 3426 3427static void 3428arc_write_done(zio_t *zio) 3429{ 3430 arc_write_callback_t *callback = zio->io_private; 3431 arc_buf_t *buf = callback->awcb_buf; 3432 arc_buf_hdr_t *hdr = buf->b_hdr; 3433 3434 hdr->b_acb = NULL; 3435 3436 hdr->b_dva = *BP_IDENTITY(zio->io_bp); 3437 hdr->b_birth = zio->io_bp->blk_birth; 3438 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 3439 /* 3440 * If the block to be written was all-zero, we may have 3441 * compressed it away. In this case no write was performed 3442 * so there will be no dva/birth-date/checksum. The buffer 3443 * must therefor remain anonymous (and uncached). 3444 */ 3445 if (!BUF_EMPTY(hdr)) { 3446 arc_buf_hdr_t *exists; 3447 kmutex_t *hash_lock; 3448 3449 arc_cksum_verify(buf); 3450 3451 exists = buf_hash_insert(hdr, &hash_lock); 3452 if (exists) { 3453 /* 3454 * This can only happen if we overwrite for 3455 * sync-to-convergence, because we remove 3456 * buffers from the hash table when we arc_free(). 3457 */ 3458 ASSERT(zio->io_flags & ZIO_FLAG_IO_REWRITE); 3459 ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig), 3460 BP_IDENTITY(zio->io_bp))); 3461 ASSERT3U(zio->io_bp_orig.blk_birth, ==, 3462 zio->io_bp->blk_birth); 3463 3464 ASSERT(refcount_is_zero(&exists->b_refcnt)); 3465 arc_change_state(arc_anon, exists, hash_lock); 3466 mutex_exit(hash_lock); 3467 arc_hdr_destroy(exists); 3468 exists = buf_hash_insert(hdr, &hash_lock); 3469 ASSERT3P(exists, ==, NULL); 3470 } 3471 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3472 /* if it's not anon, we are doing a scrub */ 3473 if (hdr->b_state == arc_anon) 3474 arc_access(hdr, hash_lock); 3475 mutex_exit(hash_lock); 3476 } else if (callback->awcb_done == NULL) { 3477 int destroy_hdr; 3478 /* 3479 * This is an anonymous buffer with no user callback, 3480 * destroy it if there are no active references. 3481 */ 3482 mutex_enter(&arc_eviction_mtx); 3483 destroy_hdr = refcount_is_zero(&hdr->b_refcnt); 3484 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3485 mutex_exit(&arc_eviction_mtx); 3486 if (destroy_hdr) 3487 arc_hdr_destroy(hdr); 3488 } else { 3489 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3490 } 3491 hdr->b_flags &= ~ARC_STORED; 3492 3493 if (callback->awcb_done) { 3494 ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 3495 callback->awcb_done(zio, buf, callback->awcb_private); 3496 } 3497 3498 kmem_free(callback, sizeof (arc_write_callback_t)); 3499} 3500 3501static void 3502write_policy(spa_t *spa, const writeprops_t *wp, zio_prop_t *zp) 3503{ 3504 boolean_t ismd = (wp->wp_level > 0 || dmu_ot[wp->wp_type].ot_metadata); 3505 3506 /* Determine checksum setting */ 3507 if (ismd) { 3508 /* 3509 * Metadata always gets checksummed. If the data 3510 * checksum is multi-bit correctable, and it's not a 3511 * ZBT-style checksum, then it's suitable for metadata 3512 * as well. Otherwise, the metadata checksum defaults 3513 * to fletcher4. 3514 */ 3515 if (zio_checksum_table[wp->wp_oschecksum].ci_correctable && 3516 !zio_checksum_table[wp->wp_oschecksum].ci_zbt) 3517 zp->zp_checksum = wp->wp_oschecksum; 3518 else 3519 zp->zp_checksum = ZIO_CHECKSUM_FLETCHER_4; 3520 } else { 3521 zp->zp_checksum = zio_checksum_select(wp->wp_dnchecksum, 3522 wp->wp_oschecksum); 3523 } 3524 3525 /* Determine compression setting */ 3526 if (ismd) { 3527 /* 3528 * XXX -- we should design a compression algorithm 3529 * that specializes in arrays of bps. 3530 */ 3531 zp->zp_compress = zfs_mdcomp_disable ? ZIO_COMPRESS_EMPTY : 3532 ZIO_COMPRESS_LZJB; 3533 } else { 3534 zp->zp_compress = zio_compress_select(wp->wp_dncompress, 3535 wp->wp_oscompress); 3536 } 3537 3538 zp->zp_type = wp->wp_type; 3539 zp->zp_level = wp->wp_level; 3540 zp->zp_ndvas = MIN(wp->wp_copies + ismd, spa_max_replication(spa)); 3541} 3542 3543zio_t * 3544arc_write(zio_t *pio, spa_t *spa, const writeprops_t *wp, 3545 boolean_t l2arc, uint64_t txg, blkptr_t *bp, arc_buf_t *buf, 3546 arc_done_func_t *ready, arc_done_func_t *done, void *private, int priority, 3547 int zio_flags, const zbookmark_t *zb) 3548{ 3549 arc_buf_hdr_t *hdr = buf->b_hdr; 3550 arc_write_callback_t *callback; 3551 zio_t *zio; 3552 zio_prop_t zp; 3553 3554 ASSERT(ready != NULL); 3555 ASSERT(!HDR_IO_ERROR(hdr)); 3556 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 3557 ASSERT(hdr->b_acb == 0); 3558 if (l2arc) 3559 hdr->b_flags |= ARC_L2CACHE; 3560 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 3561 callback->awcb_ready = ready; 3562 callback->awcb_done = done; 3563 callback->awcb_private = private; 3564 callback->awcb_buf = buf; 3565 3566 write_policy(spa, wp, &zp); 3567 zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, &zp, 3568 arc_write_ready, arc_write_done, callback, priority, zio_flags, zb); 3569 3570 return (zio); 3571} 3572 3573int 3574arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 3575 zio_done_func_t *done, void *private, uint32_t arc_flags) 3576{ 3577 arc_buf_hdr_t *ab; 3578 kmutex_t *hash_lock; 3579 zio_t *zio; 3580 uint64_t guid = spa_guid(spa); 3581 3582 /* 3583 * If this buffer is in the cache, release it, so it 3584 * can be re-used. 3585 */ 3586 ab = buf_hash_find(guid, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 3587 if (ab != NULL) { 3588 /* 3589 * The checksum of blocks to free is not always 3590 * preserved (eg. on the deadlist). However, if it is 3591 * nonzero, it should match what we have in the cache. 3592 */ 3593 ASSERT(bp->blk_cksum.zc_word[0] == 0 || 3594 bp->blk_cksum.zc_word[0] == ab->b_cksum0 || 3595 bp->blk_fill == BLK_FILL_ALREADY_FREED); 3596 3597 if (ab->b_state != arc_anon) 3598 arc_change_state(arc_anon, ab, hash_lock); 3599 if (HDR_IO_IN_PROGRESS(ab)) { 3600 /* 3601 * This should only happen when we prefetch. 3602 */ 3603 ASSERT(ab->b_flags & ARC_PREFETCH); 3604 ASSERT3U(ab->b_datacnt, ==, 1); 3605 ab->b_flags |= ARC_FREED_IN_READ; 3606 if (HDR_IN_HASH_TABLE(ab)) 3607 buf_hash_remove(ab); 3608 ab->b_arc_access = 0; 3609 bzero(&ab->b_dva, sizeof (dva_t)); 3610 ab->b_birth = 0; 3611 ab->b_cksum0 = 0; 3612 ab->b_buf->b_efunc = NULL; 3613 ab->b_buf->b_private = NULL; 3614 mutex_exit(hash_lock); 3615 } else if (refcount_is_zero(&ab->b_refcnt)) { 3616 ab->b_flags |= ARC_FREE_IN_PROGRESS; 3617 mutex_exit(hash_lock); 3618 arc_hdr_destroy(ab); 3619 ARCSTAT_BUMP(arcstat_deleted); 3620 } else { 3621 /* 3622 * We still have an active reference on this 3623 * buffer. This can happen, e.g., from 3624 * dbuf_unoverride(). 3625 */ 3626 ASSERT(!HDR_IN_HASH_TABLE(ab)); 3627 ab->b_arc_access = 0; 3628 bzero(&ab->b_dva, sizeof (dva_t)); 3629 ab->b_birth = 0; 3630 ab->b_cksum0 = 0; 3631 ab->b_buf->b_efunc = NULL; 3632 ab->b_buf->b_private = NULL; 3633 mutex_exit(hash_lock); 3634 } 3635 } 3636 3637 zio = zio_free(pio, spa, txg, bp, done, private, ZIO_FLAG_MUSTSUCCEED); 3638 3639 if (arc_flags & ARC_WAIT) 3640 return (zio_wait(zio)); 3641 3642 ASSERT(arc_flags & ARC_NOWAIT); 3643 zio_nowait(zio); 3644 3645 return (0); 3646} 3647 3648static int 3649arc_memory_throttle(uint64_t reserve, uint64_t inflight_data, uint64_t txg) 3650{ 3651#ifdef _KERNEL 3652 uint64_t available_memory = ptoa((uintmax_t)cnt.v_free_count 3653 + cnt.v_cache_count); 3654 static uint64_t page_load = 0; 3655 static uint64_t last_txg = 0; 3656 3657#if 0 3658#if defined(__i386) 3659 available_memory = 3660 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); 3661#endif 3662#endif 3663 if (available_memory >= zfs_write_limit_max) 3664 return (0); 3665 3666 if (txg > last_txg) { 3667 last_txg = txg; 3668 page_load = 0; 3669 } 3670 /* 3671 * If we are in pageout, we know that memory is already tight, 3672 * the arc is already going to be evicting, so we just want to 3673 * continue to let page writes occur as quickly as possible. 3674 */ 3675 if (curproc == pageproc) { 3676 if (page_load > available_memory / 4) 3677 return (ERESTART); 3678 /* Note: reserve is inflated, so we deflate */ 3679 page_load += reserve / 8; 3680 return (0); 3681 } else if (page_load > 0 && arc_reclaim_needed()) { 3682 /* memory is low, delay before restarting */ 3683 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3684 return (EAGAIN); 3685 } 3686 page_load = 0; 3687 3688 if (arc_size > arc_c_min) { 3689 uint64_t evictable_memory = 3690 arc_mru->arcs_lsize[ARC_BUFC_DATA] + 3691 arc_mru->arcs_lsize[ARC_BUFC_METADATA] + 3692 arc_mfu->arcs_lsize[ARC_BUFC_DATA] + 3693 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]; 3694 available_memory += MIN(evictable_memory, arc_size - arc_c_min); 3695 } 3696 3697 if (inflight_data > available_memory / 4) { 3698 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3699 return (ERESTART); 3700 } 3701#endif 3702 return (0); 3703} 3704 3705void 3706arc_tempreserve_clear(uint64_t reserve) 3707{ 3708 atomic_add_64(&arc_tempreserve, -reserve); 3709 ASSERT((int64_t)arc_tempreserve >= 0); 3710} 3711 3712int 3713arc_tempreserve_space(uint64_t reserve, uint64_t txg) 3714{ 3715 int error; 3716 uint64_t anon_size; 3717 3718#ifdef ZFS_DEBUG 3719 /* 3720 * Once in a while, fail for no reason. Everything should cope. 3721 */ 3722 if (spa_get_random(10000) == 0) { 3723 dprintf("forcing random failure\n"); 3724 return (ERESTART); 3725 } 3726#endif 3727 if (reserve > arc_c/4 && !arc_no_grow) 3728 arc_c = MIN(arc_c_max, reserve * 4); 3729 if (reserve > arc_c) 3730 return (ENOMEM); 3731 3732 /* 3733 * Don't count loaned bufs as in flight dirty data to prevent long 3734 * network delays from blocking transactions that are ready to be 3735 * assigned to a txg. 3736 */ 3737 anon_size = MAX((int64_t)(arc_anon->arcs_size - arc_loaned_bytes), 0); 3738 3739 /* 3740 * Writes will, almost always, require additional memory allocations 3741 * in order to compress/encrypt/etc the data. We therefor need to 3742 * make sure that there is sufficient available memory for this. 3743 */ 3744 if (error = arc_memory_throttle(reserve, anon_size, txg)) 3745 return (error); 3746 3747 /* 3748 * Throttle writes when the amount of dirty data in the cache 3749 * gets too large. We try to keep the cache less than half full 3750 * of dirty blocks so that our sync times don't grow too large. 3751 * Note: if two requests come in concurrently, we might let them 3752 * both succeed, when one of them should fail. Not a huge deal. 3753 */ 3754 3755 if (reserve + arc_tempreserve + anon_size > arc_c / 2 && 3756 anon_size > arc_c / 4) { 3757 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " 3758 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", 3759 arc_tempreserve>>10, 3760 arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10, 3761 arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10, 3762 reserve>>10, arc_c>>10); 3763 return (ERESTART); 3764 } 3765 atomic_add_64(&arc_tempreserve, reserve); 3766 return (0); 3767} 3768 3769static kmutex_t arc_lowmem_lock; 3770#ifdef _KERNEL 3771static eventhandler_tag arc_event_lowmem = NULL; 3772 3773static void 3774arc_lowmem(void *arg __unused, int howto __unused) 3775{ 3776 3777 /* Serialize access via arc_lowmem_lock. */ 3778 mutex_enter(&arc_lowmem_lock); 3779 needfree = 1; 3780 cv_signal(&arc_reclaim_thr_cv); 3781 while (needfree) 3782 tsleep(&needfree, 0, "zfs:lowmem", hz / 5); 3783 mutex_exit(&arc_lowmem_lock); 3784} 3785#endif 3786 3787void 3788arc_init(void) 3789{ 3790 int prefetch_tunable_set = 0; 3791 int i; 3792 3793 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 3794 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 3795 mutex_init(&arc_lowmem_lock, NULL, MUTEX_DEFAULT, NULL); 3796 3797 /* Convert seconds to clock ticks */ 3798 arc_min_prefetch_lifespan = 1 * hz; 3799 3800 /* Start out with 1/8 of all memory */ 3801 arc_c = kmem_size() / 8; 3802#if 0 3803#ifdef _KERNEL 3804 /* 3805 * On architectures where the physical memory can be larger 3806 * than the addressable space (intel in 32-bit mode), we may 3807 * need to limit the cache to 1/8 of VM size. 3808 */ 3809 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 3810#endif 3811#endif 3812 /* set min cache to 1/32 of all memory, or 16MB, whichever is more */ 3813 arc_c_min = MAX(arc_c / 4, 64<<18); 3814 /* set max to 1/2 of all memory, or all but 1GB, whichever is more */ 3815 if (arc_c * 8 >= 1<<30) 3816 arc_c_max = (arc_c * 8) - (1<<30); 3817 else 3818 arc_c_max = arc_c_min; 3819 arc_c_max = MAX(arc_c * 5, arc_c_max); 3820#ifdef _KERNEL 3821 /* 3822 * Allow the tunables to override our calculations if they are 3823 * reasonable (ie. over 16MB) 3824 */ 3825 if (zfs_arc_max >= 64<<18 && zfs_arc_max < kmem_size()) 3826 arc_c_max = zfs_arc_max; 3827 if (zfs_arc_min >= 64<<18 && zfs_arc_min <= arc_c_max) 3828 arc_c_min = zfs_arc_min; 3829#endif 3830 arc_c = arc_c_max; 3831 arc_p = (arc_c >> 1); 3832 3833 /* limit meta-data to 1/4 of the arc capacity */ 3834 arc_meta_limit = arc_c_max / 4; 3835 3836 /* Allow the tunable to override if it is reasonable */ 3837 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) 3838 arc_meta_limit = zfs_arc_meta_limit; 3839 3840 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) 3841 arc_c_min = arc_meta_limit / 2; 3842 3843 if (zfs_arc_grow_retry > 0) 3844 arc_grow_retry = zfs_arc_grow_retry; 3845 3846 if (zfs_arc_shrink_shift > 0) 3847 arc_shrink_shift = zfs_arc_shrink_shift; 3848 3849 if (zfs_arc_p_min_shift > 0) 3850 arc_p_min_shift = zfs_arc_p_min_shift; 3851 3852 /* if kmem_flags are set, lets try to use less memory */ 3853 if (kmem_debugging()) 3854 arc_c = arc_c / 2; 3855 if (arc_c < arc_c_min) 3856 arc_c = arc_c_min; 3857 3858 zfs_arc_min = arc_c_min; 3859 zfs_arc_max = arc_c_max; 3860 3861 arc_anon = &ARC_anon; 3862 arc_mru = &ARC_mru; 3863 arc_mru_ghost = &ARC_mru_ghost; 3864 arc_mfu = &ARC_mfu; 3865 arc_mfu_ghost = &ARC_mfu_ghost; 3866 arc_l2c_only = &ARC_l2c_only; 3867 arc_size = 0; 3868 3869 for (i = 0; i < ARC_BUFC_NUMLISTS; i++) { 3870 mutex_init(&arc_anon->arcs_locks[i].arcs_lock, 3871 NULL, MUTEX_DEFAULT, NULL); 3872 mutex_init(&arc_mru->arcs_locks[i].arcs_lock, 3873 NULL, MUTEX_DEFAULT, NULL); 3874 mutex_init(&arc_mru_ghost->arcs_locks[i].arcs_lock, 3875 NULL, MUTEX_DEFAULT, NULL); 3876 mutex_init(&arc_mfu->arcs_locks[i].arcs_lock, 3877 NULL, MUTEX_DEFAULT, NULL); 3878 mutex_init(&arc_mfu_ghost->arcs_locks[i].arcs_lock, 3879 NULL, MUTEX_DEFAULT, NULL); 3880 mutex_init(&arc_l2c_only->arcs_locks[i].arcs_lock, 3881 NULL, MUTEX_DEFAULT, NULL); 3882 3883 list_create(&arc_mru->arcs_lists[i], 3884 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3885 list_create(&arc_mru_ghost->arcs_lists[i], 3886 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3887 list_create(&arc_mfu->arcs_lists[i], 3888 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3889 list_create(&arc_mfu_ghost->arcs_lists[i], 3890 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3891 list_create(&arc_mfu_ghost->arcs_lists[i], 3892 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3893 list_create(&arc_l2c_only->arcs_lists[i], 3894 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3895 } 3896 3897 buf_init(); 3898 3899 arc_thread_exit = 0; 3900 arc_eviction_list = NULL; 3901 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 3902 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 3903 3904 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 3905 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 3906 3907 if (arc_ksp != NULL) { 3908 arc_ksp->ks_data = &arc_stats; 3909 kstat_install(arc_ksp); 3910 } 3911 3912 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 3913 TS_RUN, minclsyspri); 3914 3915#ifdef _KERNEL 3916 arc_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem, arc_lowmem, NULL, 3917 EVENTHANDLER_PRI_FIRST); 3918#endif 3919 3920 arc_dead = FALSE; 3921 arc_warm = B_FALSE; 3922 3923 if (zfs_write_limit_max == 0) 3924 zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift; 3925 else 3926 zfs_write_limit_shift = 0; 3927 mutex_init(&zfs_write_limit_lock, NULL, MUTEX_DEFAULT, NULL); 3928 3929#ifdef _KERNEL 3930 if (TUNABLE_INT_FETCH("vfs.zfs.prefetch_disable", &zfs_prefetch_disable)) 3931 prefetch_tunable_set = 1; 3932 3933#ifdef __i386__ 3934 if (prefetch_tunable_set == 0) { 3935 printf("ZFS NOTICE: Prefetch is disabled by default on i386 " 3936 "-- to enable,\n"); 3937 printf(" add \"vfs.zfs.prefetch_disable=0\" " 3938 "to /boot/loader.conf.\n"); 3939 zfs_prefetch_disable=1; 3940 } 3941#else 3942 if ((((uint64_t)physmem * PAGESIZE) < (1ULL << 32)) && 3943 prefetch_tunable_set == 0) { 3944 printf("ZFS NOTICE: Prefetch is disabled by default if less " 3945 "than 4GB of RAM is present;\n" 3946 " to enable, add \"vfs.zfs.prefetch_disable=0\" " 3947 "to /boot/loader.conf.\n"); 3948 zfs_prefetch_disable=1; 3949 } 3950#endif 3951 /* Warn about ZFS memory and address space requirements. */ 3952 if (((uint64_t)physmem * PAGESIZE) < (256 + 128 + 64) * (1 << 20)) { 3953 printf("ZFS WARNING: Recommended minimum RAM size is 512MB; " 3954 "expect unstable behavior.\n"); 3955 } 3956 if (kmem_size() < 512 * (1 << 20)) { 3957 printf("ZFS WARNING: Recommended minimum kmem_size is 512MB; " 3958 "expect unstable behavior.\n"); 3959 printf(" Consider tuning vm.kmem_size and " 3960 "vm.kmem_size_max\n"); 3961 printf(" in /boot/loader.conf.\n"); 3962 } 3963#endif 3964} 3965 3966void 3967arc_fini(void) 3968{ 3969 int i; 3970 3971 mutex_enter(&arc_reclaim_thr_lock); 3972 arc_thread_exit = 1; 3973 cv_signal(&arc_reclaim_thr_cv); 3974 while (arc_thread_exit != 0) 3975 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 3976 mutex_exit(&arc_reclaim_thr_lock); 3977 3978 arc_flush(NULL); 3979 3980 arc_dead = TRUE; 3981 3982 if (arc_ksp != NULL) { 3983 kstat_delete(arc_ksp); 3984 arc_ksp = NULL; 3985 } 3986 3987 mutex_destroy(&arc_eviction_mtx); 3988 mutex_destroy(&arc_reclaim_thr_lock); 3989 cv_destroy(&arc_reclaim_thr_cv); 3990 3991 for (i = 0; i < ARC_BUFC_NUMLISTS; i++) { 3992 list_destroy(&arc_mru->arcs_lists[i]); 3993 list_destroy(&arc_mru_ghost->arcs_lists[i]); 3994 list_destroy(&arc_mfu->arcs_lists[i]); 3995 list_destroy(&arc_mfu_ghost->arcs_lists[i]); 3996 list_destroy(&arc_l2c_only->arcs_lists[i]); 3997 3998 mutex_destroy(&arc_anon->arcs_locks[i].arcs_lock); 3999 mutex_destroy(&arc_mru->arcs_locks[i].arcs_lock); 4000 mutex_destroy(&arc_mru_ghost->arcs_locks[i].arcs_lock); 4001 mutex_destroy(&arc_mfu->arcs_locks[i].arcs_lock); 4002 mutex_destroy(&arc_mfu_ghost->arcs_locks[i].arcs_lock); 4003 mutex_destroy(&arc_l2c_only->arcs_locks[i].arcs_lock); 4004 } 4005 4006 mutex_destroy(&zfs_write_limit_lock); 4007 4008 buf_fini(); 4009 4010 ASSERT(arc_loaned_bytes == 0); 4011 4012 mutex_destroy(&arc_lowmem_lock); 4013#ifdef _KERNEL 4014 if (arc_event_lowmem != NULL) 4015 EVENTHANDLER_DEREGISTER(vm_lowmem, arc_event_lowmem); 4016#endif 4017} 4018 4019/* 4020 * Level 2 ARC 4021 * 4022 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. 4023 * It uses dedicated storage devices to hold cached data, which are populated 4024 * using large infrequent writes. The main role of this cache is to boost 4025 * the performance of random read workloads. The intended L2ARC devices 4026 * include short-stroked disks, solid state disks, and other media with 4027 * substantially faster read latency than disk. 4028 * 4029 * +-----------------------+ 4030 * | ARC | 4031 * +-----------------------+ 4032 * | ^ ^ 4033 * | | | 4034 * l2arc_feed_thread() arc_read() 4035 * | | | 4036 * | l2arc read | 4037 * V | | 4038 * +---------------+ | 4039 * | L2ARC | | 4040 * +---------------+ | 4041 * | ^ | 4042 * l2arc_write() | | 4043 * | | | 4044 * V | | 4045 * +-------+ +-------+ 4046 * | vdev | | vdev | 4047 * | cache | | cache | 4048 * +-------+ +-------+ 4049 * +=========+ .-----. 4050 * : L2ARC : |-_____-| 4051 * : devices : | Disks | 4052 * +=========+ `-_____-' 4053 * 4054 * Read requests are satisfied from the following sources, in order: 4055 * 4056 * 1) ARC 4057 * 2) vdev cache of L2ARC devices 4058 * 3) L2ARC devices 4059 * 4) vdev cache of disks 4060 * 5) disks 4061 * 4062 * Some L2ARC device types exhibit extremely slow write performance. 4063 * To accommodate for this there are some significant differences between 4064 * the L2ARC and traditional cache design: 4065 * 4066 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from 4067 * the ARC behave as usual, freeing buffers and placing headers on ghost 4068 * lists. The ARC does not send buffers to the L2ARC during eviction as 4069 * this would add inflated write latencies for all ARC memory pressure. 4070 * 4071 * 2. The L2ARC attempts to cache data from the ARC before it is evicted. 4072 * It does this by periodically scanning buffers from the eviction-end of 4073 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are 4074 * not already there. It scans until a headroom of buffers is satisfied, 4075 * which itself is a buffer for ARC eviction. The thread that does this is 4076 * l2arc_feed_thread(), illustrated below; example sizes are included to 4077 * provide a better sense of ratio than this diagram: 4078 * 4079 * head --> tail 4080 * +---------------------+----------+ 4081 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC 4082 * +---------------------+----------+ | o L2ARC eligible 4083 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer 4084 * +---------------------+----------+ | 4085 * 15.9 Gbytes ^ 32 Mbytes | 4086 * headroom | 4087 * l2arc_feed_thread() 4088 * | 4089 * l2arc write hand <--[oooo]--' 4090 * | 8 Mbyte 4091 * | write max 4092 * V 4093 * +==============================+ 4094 * L2ARC dev |####|#|###|###| |####| ... | 4095 * +==============================+ 4096 * 32 Gbytes 4097 * 4098 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of 4099 * evicted, then the L2ARC has cached a buffer much sooner than it probably 4100 * needed to, potentially wasting L2ARC device bandwidth and storage. It is 4101 * safe to say that this is an uncommon case, since buffers at the end of 4102 * the ARC lists have moved there due to inactivity. 4103 * 4104 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, 4105 * then the L2ARC simply misses copying some buffers. This serves as a 4106 * pressure valve to prevent heavy read workloads from both stalling the ARC 4107 * with waits and clogging the L2ARC with writes. This also helps prevent 4108 * the potential for the L2ARC to churn if it attempts to cache content too 4109 * quickly, such as during backups of the entire pool. 4110 * 4111 * 5. After system boot and before the ARC has filled main memory, there are 4112 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru 4113 * lists can remain mostly static. Instead of searching from tail of these 4114 * lists as pictured, the l2arc_feed_thread() will search from the list heads 4115 * for eligible buffers, greatly increasing its chance of finding them. 4116 * 4117 * The L2ARC device write speed is also boosted during this time so that 4118 * the L2ARC warms up faster. Since there have been no ARC evictions yet, 4119 * there are no L2ARC reads, and no fear of degrading read performance 4120 * through increased writes. 4121 * 4122 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that 4123 * the vdev queue can aggregate them into larger and fewer writes. Each 4124 * device is written to in a rotor fashion, sweeping writes through 4125 * available space then repeating. 4126 * 4127 * 7. The L2ARC does not store dirty content. It never needs to flush 4128 * write buffers back to disk based storage. 4129 * 4130 * 8. If an ARC buffer is written (and dirtied) which also exists in the 4131 * L2ARC, the now stale L2ARC buffer is immediately dropped. 4132 * 4133 * The performance of the L2ARC can be tweaked by a number of tunables, which 4134 * may be necessary for different workloads: 4135 * 4136 * l2arc_write_max max write bytes per interval 4137 * l2arc_write_boost extra write bytes during device warmup 4138 * l2arc_noprefetch skip caching prefetched buffers 4139 * l2arc_headroom number of max device writes to precache 4140 * l2arc_feed_secs seconds between L2ARC writing 4141 * 4142 * Tunables may be removed or added as future performance improvements are 4143 * integrated, and also may become zpool properties. 4144 * 4145 * There are three key functions that control how the L2ARC warms up: 4146 * 4147 * l2arc_write_eligible() check if a buffer is eligible to cache 4148 * l2arc_write_size() calculate how much to write 4149 * l2arc_write_interval() calculate sleep delay between writes 4150 * 4151 * These three functions determine what to write, how much, and how quickly 4152 * to send writes. 4153 */ 4154 4155static boolean_t 4156l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab) 4157{ 4158 /* 4159 * A buffer is *not* eligible for the L2ARC if it: 4160 * 1. belongs to a different spa. 4161 * 2. is already cached on the L2ARC. 4162 * 3. has an I/O in progress (it may be an incomplete read). 4163 * 4. is flagged not eligible (zfs property). 4164 */ 4165 if (ab->b_spa != spa_guid) { 4166 ARCSTAT_BUMP(arcstat_l2_write_spa_mismatch); 4167 return (B_FALSE); 4168 } 4169 if (ab->b_l2hdr != NULL) { 4170 ARCSTAT_BUMP(arcstat_l2_write_in_l2); 4171 return (B_FALSE); 4172 } 4173 if (HDR_IO_IN_PROGRESS(ab)) { 4174 ARCSTAT_BUMP(arcstat_l2_write_hdr_io_in_progress); 4175 return (B_FALSE); 4176 } 4177 if (!HDR_L2CACHE(ab)) { 4178 ARCSTAT_BUMP(arcstat_l2_write_not_cacheable); 4179 return (B_FALSE); 4180 } 4181 4182 return (B_TRUE); 4183} 4184 4185static uint64_t 4186l2arc_write_size(l2arc_dev_t *dev) 4187{ 4188 uint64_t size; 4189 4190 size = dev->l2ad_write; 4191 4192 if (arc_warm == B_FALSE) 4193 size += dev->l2ad_boost; 4194 4195 return (size); 4196 4197} 4198 4199static clock_t 4200l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote) 4201{ 4202 clock_t interval, next; 4203 4204 /* 4205 * If the ARC lists are busy, increase our write rate; if the 4206 * lists are stale, idle back. This is achieved by checking 4207 * how much we previously wrote - if it was more than half of 4208 * what we wanted, schedule the next write much sooner. 4209 */ 4210 if (l2arc_feed_again && wrote > (wanted / 2)) 4211 interval = (hz * l2arc_feed_min_ms) / 1000; 4212 else 4213 interval = hz * l2arc_feed_secs; 4214 4215 next = MAX(LBOLT, MIN(LBOLT + interval, began + interval)); 4216 4217 return (next); 4218} 4219 4220static void 4221l2arc_hdr_stat_add(void) 4222{ 4223 ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE); 4224 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); 4225} 4226 4227static void 4228l2arc_hdr_stat_remove(void) 4229{ 4230 ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE)); 4231 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); 4232} 4233 4234/* 4235 * Cycle through L2ARC devices. This is how L2ARC load balances. 4236 * If a device is returned, this also returns holding the spa config lock. 4237 */ 4238static l2arc_dev_t * 4239l2arc_dev_get_next(void) 4240{ 4241 l2arc_dev_t *first, *next = NULL; 4242 4243 /* 4244 * Lock out the removal of spas (spa_namespace_lock), then removal 4245 * of cache devices (l2arc_dev_mtx). Once a device has been selected, 4246 * both locks will be dropped and a spa config lock held instead. 4247 */ 4248 mutex_enter(&spa_namespace_lock); 4249 mutex_enter(&l2arc_dev_mtx); 4250 4251 /* if there are no vdevs, there is nothing to do */ 4252 if (l2arc_ndev == 0) 4253 goto out; 4254 4255 first = NULL; 4256 next = l2arc_dev_last; 4257 do { 4258 /* loop around the list looking for a non-faulted vdev */ 4259 if (next == NULL) { 4260 next = list_head(l2arc_dev_list); 4261 } else { 4262 next = list_next(l2arc_dev_list, next); 4263 if (next == NULL) 4264 next = list_head(l2arc_dev_list); 4265 } 4266 4267 /* if we have come back to the start, bail out */ 4268 if (first == NULL) 4269 first = next; 4270 else if (next == first) 4271 break; 4272 4273 } while (vdev_is_dead(next->l2ad_vdev)); 4274 4275 /* if we were unable to find any usable vdevs, return NULL */ 4276 if (vdev_is_dead(next->l2ad_vdev)) 4277 next = NULL; 4278 4279 l2arc_dev_last = next; 4280 4281out: 4282 mutex_exit(&l2arc_dev_mtx); 4283 4284 /* 4285 * Grab the config lock to prevent the 'next' device from being 4286 * removed while we are writing to it. 4287 */ 4288 if (next != NULL) 4289 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER); 4290 mutex_exit(&spa_namespace_lock); 4291 4292 return (next); 4293} 4294 4295/* 4296 * Free buffers that were tagged for destruction. 4297 */ 4298static void 4299l2arc_do_free_on_write() 4300{ 4301 list_t *buflist; 4302 l2arc_data_free_t *df, *df_prev; 4303 4304 mutex_enter(&l2arc_free_on_write_mtx); 4305 buflist = l2arc_free_on_write; 4306 4307 for (df = list_tail(buflist); df; df = df_prev) { 4308 df_prev = list_prev(buflist, df); 4309 ASSERT(df->l2df_data != NULL); 4310 ASSERT(df->l2df_func != NULL); 4311 df->l2df_func(df->l2df_data, df->l2df_size); 4312 list_remove(buflist, df); 4313 kmem_free(df, sizeof (l2arc_data_free_t)); 4314 } 4315 4316 mutex_exit(&l2arc_free_on_write_mtx); 4317} 4318 4319/* 4320 * A write to a cache device has completed. Update all headers to allow 4321 * reads from these buffers to begin. 4322 */ 4323static void 4324l2arc_write_done(zio_t *zio) 4325{ 4326 l2arc_write_callback_t *cb; 4327 l2arc_dev_t *dev; 4328 list_t *buflist; 4329 arc_buf_hdr_t *head, *ab, *ab_prev; 4330 l2arc_buf_hdr_t *abl2; 4331 kmutex_t *hash_lock; 4332 4333 cb = zio->io_private; 4334 ASSERT(cb != NULL); 4335 dev = cb->l2wcb_dev; 4336 ASSERT(dev != NULL); 4337 head = cb->l2wcb_head; 4338 ASSERT(head != NULL); 4339 buflist = dev->l2ad_buflist; 4340 ASSERT(buflist != NULL); 4341 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, 4342 l2arc_write_callback_t *, cb); 4343 4344 if (zio->io_error != 0) 4345 ARCSTAT_BUMP(arcstat_l2_writes_error); 4346 4347 mutex_enter(&l2arc_buflist_mtx); 4348 4349 /* 4350 * All writes completed, or an error was hit. 4351 */ 4352 for (ab = list_prev(buflist, head); ab; ab = ab_prev) { 4353 ab_prev = list_prev(buflist, ab); 4354 4355 hash_lock = HDR_LOCK(ab); 4356 if (!mutex_tryenter(hash_lock)) { 4357 /* 4358 * This buffer misses out. It may be in a stage 4359 * of eviction. Its ARC_L2_WRITING flag will be 4360 * left set, denying reads to this buffer. 4361 */ 4362 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss); 4363 continue; 4364 } 4365 4366 if (zio->io_error != 0) { 4367 /* 4368 * Error - drop L2ARC entry. 4369 */ 4370 list_remove(buflist, ab); 4371 abl2 = ab->b_l2hdr; 4372 ab->b_l2hdr = NULL; 4373 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 4374 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 4375 } 4376 4377 /* 4378 * Allow ARC to begin reads to this L2ARC entry. 4379 */ 4380 ab->b_flags &= ~ARC_L2_WRITING; 4381 4382 mutex_exit(hash_lock); 4383 } 4384 4385 atomic_inc_64(&l2arc_writes_done); 4386 list_remove(buflist, head); 4387 kmem_cache_free(hdr_cache, head); 4388 mutex_exit(&l2arc_buflist_mtx); 4389 4390 l2arc_do_free_on_write(); 4391 4392 kmem_free(cb, sizeof (l2arc_write_callback_t)); 4393} 4394 4395/* 4396 * A read to a cache device completed. Validate buffer contents before 4397 * handing over to the regular ARC routines. 4398 */ 4399static void 4400l2arc_read_done(zio_t *zio) 4401{ 4402 l2arc_read_callback_t *cb; 4403 arc_buf_hdr_t *hdr; 4404 arc_buf_t *buf; 4405 kmutex_t *hash_lock; 4406 int equal; 4407 4408 ASSERT(zio->io_vd != NULL); 4409 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE); 4410 4411 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd); 4412 4413 cb = zio->io_private; 4414 ASSERT(cb != NULL); 4415 buf = cb->l2rcb_buf; 4416 ASSERT(buf != NULL); 4417 hdr = buf->b_hdr; 4418 ASSERT(hdr != NULL); 4419 4420 hash_lock = HDR_LOCK(hdr); 4421 mutex_enter(hash_lock); 4422 4423 /* 4424 * Check this survived the L2ARC journey. 4425 */ 4426 equal = arc_cksum_equal(buf); 4427 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { 4428 mutex_exit(hash_lock); 4429 zio->io_private = buf; 4430 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */ 4431 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */ 4432 arc_read_done(zio); 4433 } else { 4434 mutex_exit(hash_lock); 4435 /* 4436 * Buffer didn't survive caching. Increment stats and 4437 * reissue to the original storage device. 4438 */ 4439 if (zio->io_error != 0) { 4440 ARCSTAT_BUMP(arcstat_l2_io_error); 4441 } else { 4442 zio->io_error = EIO; 4443 } 4444 if (!equal) 4445 ARCSTAT_BUMP(arcstat_l2_cksum_bad); 4446 4447 /* 4448 * If there's no waiter, issue an async i/o to the primary 4449 * storage now. If there *is* a waiter, the caller must 4450 * issue the i/o in a context where it's OK to block. 4451 */ 4452 if (zio->io_waiter == NULL) { 4453 zio_t *pio = zio_unique_parent(zio); 4454 4455 ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL); 4456 4457 zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp, 4458 buf->b_data, zio->io_size, arc_read_done, buf, 4459 zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb)); 4460 } 4461 } 4462 4463 kmem_free(cb, sizeof (l2arc_read_callback_t)); 4464} 4465 4466/* 4467 * This is the list priority from which the L2ARC will search for pages to 4468 * cache. This is used within loops (0..3) to cycle through lists in the 4469 * desired order. This order can have a significant effect on cache 4470 * performance. 4471 * 4472 * Currently the metadata lists are hit first, MFU then MRU, followed by 4473 * the data lists. This function returns a locked list, and also returns 4474 * the lock pointer. 4475 */ 4476static list_t * 4477l2arc_list_locked(int list_num, kmutex_t **lock) 4478{ 4479 list_t *list; 4480 int idx; 4481 4482 ASSERT(list_num >= 0 && list_num < 2 * ARC_BUFC_NUMLISTS); 4483 4484 if (list_num < ARC_BUFC_NUMMETADATALISTS) { 4485 idx = list_num; 4486 list = &arc_mfu->arcs_lists[idx]; 4487 *lock = ARCS_LOCK(arc_mfu, idx); 4488 } else if (list_num < ARC_BUFC_NUMMETADATALISTS * 2) { 4489 idx = list_num - ARC_BUFC_NUMMETADATALISTS; 4490 list = &arc_mru->arcs_lists[idx]; 4491 *lock = ARCS_LOCK(arc_mru, idx); 4492 } else if (list_num < (ARC_BUFC_NUMMETADATALISTS * 2 + 4493 ARC_BUFC_NUMDATALISTS)) { 4494 idx = list_num - ARC_BUFC_NUMMETADATALISTS; 4495 list = &arc_mfu->arcs_lists[idx]; 4496 *lock = ARCS_LOCK(arc_mfu, idx); 4497 } else { 4498 idx = list_num - ARC_BUFC_NUMLISTS; 4499 list = &arc_mru->arcs_lists[idx]; 4500 *lock = ARCS_LOCK(arc_mru, idx); 4501 } 4502 4503 ASSERT(!(MUTEX_HELD(*lock))); 4504 mutex_enter(*lock); 4505 return (list); 4506} 4507 4508/* 4509 * Evict buffers from the device write hand to the distance specified in 4510 * bytes. This distance may span populated buffers, it may span nothing. 4511 * This is clearing a region on the L2ARC device ready for writing. 4512 * If the 'all' boolean is set, every buffer is evicted. 4513 */ 4514static void 4515l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) 4516{ 4517 list_t *buflist; 4518 l2arc_buf_hdr_t *abl2; 4519 arc_buf_hdr_t *ab, *ab_prev; 4520 kmutex_t *hash_lock; 4521 uint64_t taddr; 4522 4523 buflist = dev->l2ad_buflist; 4524 4525 if (buflist == NULL) 4526 return; 4527 4528 if (!all && dev->l2ad_first) { 4529 /* 4530 * This is the first sweep through the device. There is 4531 * nothing to evict. 4532 */ 4533 return; 4534 } 4535 4536 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) { 4537 /* 4538 * When nearing the end of the device, evict to the end 4539 * before the device write hand jumps to the start. 4540 */ 4541 taddr = dev->l2ad_end; 4542 } else { 4543 taddr = dev->l2ad_hand + distance; 4544 } 4545 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, 4546 uint64_t, taddr, boolean_t, all); 4547 4548top: 4549 mutex_enter(&l2arc_buflist_mtx); 4550 for (ab = list_tail(buflist); ab; ab = ab_prev) { 4551 ab_prev = list_prev(buflist, ab); 4552 4553 hash_lock = HDR_LOCK(ab); 4554 if (!mutex_tryenter(hash_lock)) { 4555 /* 4556 * Missed the hash lock. Retry. 4557 */ 4558 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); 4559 mutex_exit(&l2arc_buflist_mtx); 4560 mutex_enter(hash_lock); 4561 mutex_exit(hash_lock); 4562 goto top; 4563 } 4564 4565 if (HDR_L2_WRITE_HEAD(ab)) { 4566 /* 4567 * We hit a write head node. Leave it for 4568 * l2arc_write_done(). 4569 */ 4570 list_remove(buflist, ab); 4571 mutex_exit(hash_lock); 4572 continue; 4573 } 4574 4575 if (!all && ab->b_l2hdr != NULL && 4576 (ab->b_l2hdr->b_daddr > taddr || 4577 ab->b_l2hdr->b_daddr < dev->l2ad_hand)) { 4578 /* 4579 * We've evicted to the target address, 4580 * or the end of the device. 4581 */ 4582 mutex_exit(hash_lock); 4583 break; 4584 } 4585 4586 if (HDR_FREE_IN_PROGRESS(ab)) { 4587 /* 4588 * Already on the path to destruction. 4589 */ 4590 mutex_exit(hash_lock); 4591 continue; 4592 } 4593 4594 if (ab->b_state == arc_l2c_only) { 4595 ASSERT(!HDR_L2_READING(ab)); 4596 /* 4597 * This doesn't exist in the ARC. Destroy. 4598 * arc_hdr_destroy() will call list_remove() 4599 * and decrement arcstat_l2_size. 4600 */ 4601 arc_change_state(arc_anon, ab, hash_lock); 4602 arc_hdr_destroy(ab); 4603 } else { 4604 /* 4605 * Invalidate issued or about to be issued 4606 * reads, since we may be about to write 4607 * over this location. 4608 */ 4609 if (HDR_L2_READING(ab)) { 4610 ARCSTAT_BUMP(arcstat_l2_evict_reading); 4611 ab->b_flags |= ARC_L2_EVICTED; 4612 } 4613 4614 /* 4615 * Tell ARC this no longer exists in L2ARC. 4616 */ 4617 if (ab->b_l2hdr != NULL) { 4618 abl2 = ab->b_l2hdr; 4619 ab->b_l2hdr = NULL; 4620 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 4621 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 4622 } 4623 list_remove(buflist, ab); 4624 4625 /* 4626 * This may have been leftover after a 4627 * failed write. 4628 */ 4629 ab->b_flags &= ~ARC_L2_WRITING; 4630 } 4631 mutex_exit(hash_lock); 4632 } 4633 mutex_exit(&l2arc_buflist_mtx); 4634 4635 spa_l2cache_space_update(dev->l2ad_vdev, 0, -(taddr - dev->l2ad_evict)); 4636 dev->l2ad_evict = taddr; 4637} 4638 4639/* 4640 * Find and write ARC buffers to the L2ARC device. 4641 * 4642 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid 4643 * for reading until they have completed writing. 4644 */ 4645static uint64_t 4646l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz) 4647{ 4648 arc_buf_hdr_t *ab, *ab_prev, *head; 4649 l2arc_buf_hdr_t *hdrl2; 4650 list_t *list; 4651 uint64_t passed_sz, write_sz, buf_sz, headroom; 4652 void *buf_data; 4653 kmutex_t *hash_lock, *list_lock; 4654 boolean_t have_lock, full; 4655 l2arc_write_callback_t *cb; 4656 zio_t *pio, *wzio; 4657 uint64_t guid = spa_guid(spa); 4658 int try; 4659 4660 ASSERT(dev->l2ad_vdev != NULL); 4661 4662 pio = NULL; 4663 write_sz = 0; 4664 full = B_FALSE; 4665 head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 4666 head->b_flags |= ARC_L2_WRITE_HEAD; 4667 4668 ARCSTAT_BUMP(arcstat_l2_write_buffer_iter); 4669 /* 4670 * Copy buffers for L2ARC writing. 4671 */ 4672 mutex_enter(&l2arc_buflist_mtx); 4673 for (try = 0; try < 2 * ARC_BUFC_NUMLISTS; try++) { 4674 list = l2arc_list_locked(try, &list_lock); 4675 passed_sz = 0; 4676 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_iter); 4677 4678 /* 4679 * L2ARC fast warmup. 4680 * 4681 * Until the ARC is warm and starts to evict, read from the 4682 * head of the ARC lists rather than the tail. 4683 */ 4684 headroom = target_sz * l2arc_headroom; 4685 if (arc_warm == B_FALSE) 4686 ab = list_head(list); 4687 else 4688 ab = list_tail(list); 4689 if (ab == NULL) 4690 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_null_iter); 4691 4692 for (; ab; ab = ab_prev) { 4693 if (arc_warm == B_FALSE) 4694 ab_prev = list_next(list, ab); 4695 else 4696 ab_prev = list_prev(list, ab); 4697 ARCSTAT_INCR(arcstat_l2_write_buffer_bytes_scanned, ab->b_size); 4698 4699 hash_lock = HDR_LOCK(ab); 4700 have_lock = MUTEX_HELD(hash_lock); 4701 if (!have_lock && !mutex_tryenter(hash_lock)) { 4702 ARCSTAT_BUMP(arcstat_l2_write_trylock_fail); 4703 /* 4704 * Skip this buffer rather than waiting. 4705 */ 4706 continue; 4707 } 4708 4709 passed_sz += ab->b_size; 4710 if (passed_sz > headroom) { 4711 /* 4712 * Searched too far. 4713 */ 4714 mutex_exit(hash_lock); 4715 ARCSTAT_BUMP(arcstat_l2_write_passed_headroom); 4716 break; 4717 } 4718 4719 if (!l2arc_write_eligible(guid, ab)) { 4720 mutex_exit(hash_lock); 4721 continue; 4722 } 4723 4724 if ((write_sz + ab->b_size) > target_sz) { 4725 full = B_TRUE; 4726 mutex_exit(hash_lock); 4727 ARCSTAT_BUMP(arcstat_l2_write_full); 4728 break; 4729 } 4730 4731 if (pio == NULL) { 4732 /* 4733 * Insert a dummy header on the buflist so 4734 * l2arc_write_done() can find where the 4735 * write buffers begin without searching. 4736 */ 4737 list_insert_head(dev->l2ad_buflist, head); 4738 4739 cb = kmem_alloc( 4740 sizeof (l2arc_write_callback_t), KM_SLEEP); 4741 cb->l2wcb_dev = dev; 4742 cb->l2wcb_head = head; 4743 pio = zio_root(spa, l2arc_write_done, cb, 4744 ZIO_FLAG_CANFAIL); 4745 ARCSTAT_BUMP(arcstat_l2_write_pios); 4746 } 4747 4748 /* 4749 * Create and add a new L2ARC header. 4750 */ 4751 hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP); 4752 hdrl2->b_dev = dev; 4753 hdrl2->b_daddr = dev->l2ad_hand; 4754 4755 ab->b_flags |= ARC_L2_WRITING; 4756 ab->b_l2hdr = hdrl2; 4757 list_insert_head(dev->l2ad_buflist, ab); 4758 buf_data = ab->b_buf->b_data; 4759 buf_sz = ab->b_size; 4760 4761 /* 4762 * Compute and store the buffer cksum before 4763 * writing. On debug the cksum is verified first. 4764 */ 4765 arc_cksum_verify(ab->b_buf); 4766 arc_cksum_compute(ab->b_buf, B_TRUE); 4767 4768 mutex_exit(hash_lock); 4769 4770 wzio = zio_write_phys(pio, dev->l2ad_vdev, 4771 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF, 4772 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, 4773 ZIO_FLAG_CANFAIL, B_FALSE); 4774 4775 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, 4776 zio_t *, wzio); 4777 (void) zio_nowait(wzio); 4778 4779 /* 4780 * Keep the clock hand suitably device-aligned. 4781 */ 4782 buf_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz); 4783 4784 write_sz += buf_sz; 4785 dev->l2ad_hand += buf_sz; 4786 } 4787 4788 mutex_exit(list_lock); 4789 4790 if (full == B_TRUE) 4791 break; 4792 } 4793 mutex_exit(&l2arc_buflist_mtx); 4794 4795 if (pio == NULL) { 4796 ASSERT3U(write_sz, ==, 0); 4797 kmem_cache_free(hdr_cache, head); 4798 return (0); 4799 } 4800 4801 ASSERT3U(write_sz, <=, target_sz); 4802 ARCSTAT_BUMP(arcstat_l2_writes_sent); 4803 ARCSTAT_INCR(arcstat_l2_write_bytes, write_sz); 4804 ARCSTAT_INCR(arcstat_l2_size, write_sz); 4805 spa_l2cache_space_update(dev->l2ad_vdev, 0, write_sz); 4806 4807 /* 4808 * Bump device hand to the device start if it is approaching the end. 4809 * l2arc_evict() will already have evicted ahead for this case. 4810 */ 4811 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) { 4812 spa_l2cache_space_update(dev->l2ad_vdev, 0, 4813 dev->l2ad_end - dev->l2ad_hand); 4814 dev->l2ad_hand = dev->l2ad_start; 4815 dev->l2ad_evict = dev->l2ad_start; 4816 dev->l2ad_first = B_FALSE; 4817 } 4818 4819 dev->l2ad_writing = B_TRUE; 4820 (void) zio_wait(pio); 4821 dev->l2ad_writing = B_FALSE; 4822 4823 return (write_sz); 4824} 4825 4826/* 4827 * This thread feeds the L2ARC at regular intervals. This is the beating 4828 * heart of the L2ARC. 4829 */ 4830static void 4831l2arc_feed_thread(void *dummy __unused) 4832{ 4833 callb_cpr_t cpr; 4834 l2arc_dev_t *dev; 4835 spa_t *spa; 4836 uint64_t size, wrote; 4837 clock_t begin, next = LBOLT; 4838 4839 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); 4840 4841 mutex_enter(&l2arc_feed_thr_lock); 4842 4843 while (l2arc_thread_exit == 0) { 4844 CALLB_CPR_SAFE_BEGIN(&cpr); 4845 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, 4846 next - LBOLT); 4847 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); 4848 next = LBOLT + hz; 4849 4850 /* 4851 * Quick check for L2ARC devices. 4852 */ 4853 mutex_enter(&l2arc_dev_mtx); 4854 if (l2arc_ndev == 0) { 4855 mutex_exit(&l2arc_dev_mtx); 4856 continue; 4857 } 4858 mutex_exit(&l2arc_dev_mtx); 4859 begin = LBOLT; 4860 4861 /* 4862 * This selects the next l2arc device to write to, and in 4863 * doing so the next spa to feed from: dev->l2ad_spa. This 4864 * will return NULL if there are now no l2arc devices or if 4865 * they are all faulted. 4866 * 4867 * If a device is returned, its spa's config lock is also 4868 * held to prevent device removal. l2arc_dev_get_next() 4869 * will grab and release l2arc_dev_mtx. 4870 */ 4871 if ((dev = l2arc_dev_get_next()) == NULL) 4872 continue; 4873 4874 spa = dev->l2ad_spa; 4875 ASSERT(spa != NULL); 4876 4877 /* 4878 * Avoid contributing to memory pressure. 4879 */ 4880 if (arc_reclaim_needed()) { 4881 ARCSTAT_BUMP(arcstat_l2_abort_lowmem); 4882 spa_config_exit(spa, SCL_L2ARC, dev); 4883 continue; 4884 } 4885 4886 ARCSTAT_BUMP(arcstat_l2_feeds); 4887 4888 size = l2arc_write_size(dev); 4889 4890 /* 4891 * Evict L2ARC buffers that will be overwritten. 4892 */ 4893 l2arc_evict(dev, size, B_FALSE); 4894 4895 /* 4896 * Write ARC buffers. 4897 */ 4898 wrote = l2arc_write_buffers(spa, dev, size); 4899 4900 /* 4901 * Calculate interval between writes. 4902 */ 4903 next = l2arc_write_interval(begin, size, wrote); 4904 spa_config_exit(spa, SCL_L2ARC, dev); 4905 } 4906 4907 l2arc_thread_exit = 0; 4908 cv_broadcast(&l2arc_feed_thr_cv); 4909 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ 4910 thread_exit(); 4911} 4912 4913boolean_t 4914l2arc_vdev_present(vdev_t *vd) 4915{ 4916 l2arc_dev_t *dev; 4917 4918 mutex_enter(&l2arc_dev_mtx); 4919 for (dev = list_head(l2arc_dev_list); dev != NULL; 4920 dev = list_next(l2arc_dev_list, dev)) { 4921 if (dev->l2ad_vdev == vd) 4922 break; 4923 } 4924 mutex_exit(&l2arc_dev_mtx); 4925 4926 return (dev != NULL); 4927} 4928 4929/* 4930 * Add a vdev for use by the L2ARC. By this point the spa has already 4931 * validated the vdev and opened it. 4932 */ 4933void 4934l2arc_add_vdev(spa_t *spa, vdev_t *vd, uint64_t start, uint64_t end) 4935{ 4936 l2arc_dev_t *adddev; 4937 4938 ASSERT(!l2arc_vdev_present(vd)); 4939 4940 /* 4941 * Create a new l2arc device entry. 4942 */ 4943 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); 4944 adddev->l2ad_spa = spa; 4945 adddev->l2ad_vdev = vd; 4946 adddev->l2ad_write = l2arc_write_max; 4947 adddev->l2ad_boost = l2arc_write_boost; 4948 adddev->l2ad_start = start; 4949 adddev->l2ad_end = end; 4950 adddev->l2ad_hand = adddev->l2ad_start; 4951 adddev->l2ad_evict = adddev->l2ad_start; 4952 adddev->l2ad_first = B_TRUE; 4953 adddev->l2ad_writing = B_FALSE; 4954 ASSERT3U(adddev->l2ad_write, >, 0); 4955 4956 /* 4957 * This is a list of all ARC buffers that are still valid on the 4958 * device. 4959 */ 4960 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP); 4961 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), 4962 offsetof(arc_buf_hdr_t, b_l2node)); 4963 4964 spa_l2cache_space_update(vd, adddev->l2ad_end - adddev->l2ad_hand, 0); 4965 4966 /* 4967 * Add device to global list 4968 */ 4969 mutex_enter(&l2arc_dev_mtx); 4970 list_insert_head(l2arc_dev_list, adddev); 4971 atomic_inc_64(&l2arc_ndev); 4972 mutex_exit(&l2arc_dev_mtx); 4973} 4974 4975/* 4976 * Remove a vdev from the L2ARC. 4977 */ 4978void 4979l2arc_remove_vdev(vdev_t *vd) 4980{ 4981 l2arc_dev_t *dev, *nextdev, *remdev = NULL; 4982 4983 /* 4984 * Find the device by vdev 4985 */ 4986 mutex_enter(&l2arc_dev_mtx); 4987 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { 4988 nextdev = list_next(l2arc_dev_list, dev); 4989 if (vd == dev->l2ad_vdev) { 4990 remdev = dev; 4991 break; 4992 } 4993 } 4994 ASSERT(remdev != NULL); 4995 4996 /* 4997 * Remove device from global list 4998 */ 4999 list_remove(l2arc_dev_list, remdev); 5000 l2arc_dev_last = NULL; /* may have been invalidated */ 5001 atomic_dec_64(&l2arc_ndev); 5002 mutex_exit(&l2arc_dev_mtx); 5003 5004 /* 5005 * Clear all buflists and ARC references. L2ARC device flush. 5006 */ 5007 l2arc_evict(remdev, 0, B_TRUE); 5008 list_destroy(remdev->l2ad_buflist); 5009 kmem_free(remdev->l2ad_buflist, sizeof (list_t)); 5010 kmem_free(remdev, sizeof (l2arc_dev_t)); 5011} 5012 5013void 5014l2arc_init(void) 5015{ 5016 l2arc_thread_exit = 0; 5017 l2arc_ndev = 0; 5018 l2arc_writes_sent = 0; 5019 l2arc_writes_done = 0; 5020 5021 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); 5022 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); 5023 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); 5024 mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL); 5025 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); 5026 5027 l2arc_dev_list = &L2ARC_dev_list; 5028 l2arc_free_on_write = &L2ARC_free_on_write; 5029 list_create(l2arc_dev_list, sizeof (l2arc_dev_t), 5030 offsetof(l2arc_dev_t, l2ad_node)); 5031 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), 5032 offsetof(l2arc_data_free_t, l2df_list_node)); 5033} 5034 5035void 5036l2arc_fini(void) 5037{ 5038 /* 5039 * This is called from dmu_fini(), which is called from spa_fini(); 5040 * Because of this, we can assume that all l2arc devices have 5041 * already been removed when the pools themselves were removed. 5042 */ 5043 5044 l2arc_do_free_on_write(); 5045 5046 mutex_destroy(&l2arc_feed_thr_lock); 5047 cv_destroy(&l2arc_feed_thr_cv); 5048 mutex_destroy(&l2arc_dev_mtx); 5049 mutex_destroy(&l2arc_buflist_mtx); 5050 mutex_destroy(&l2arc_free_on_write_mtx); 5051 5052 list_destroy(l2arc_dev_list); 5053 list_destroy(l2arc_free_on_write); 5054} 5055 5056void 5057l2arc_start(void) 5058{ 5059 if (!(spa_mode_global & FWRITE)) 5060 return; 5061 5062 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, 5063 TS_RUN, minclsyspri); 5064} 5065 5066void 5067l2arc_stop(void) 5068{ 5069 if (!(spa_mode_global & FWRITE)) 5070 return; 5071 5072 mutex_enter(&l2arc_feed_thr_lock); 5073 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ 5074 l2arc_thread_exit = 1; 5075 while (l2arc_thread_exit != 0) 5076 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); 5077 mutex_exit(&l2arc_feed_thr_lock); 5078} 5079