arc.c revision 205133
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23 * Use is subject to license terms. 24 */ 25 26/* 27 * DVA-based Adjustable Replacement Cache 28 * 29 * While much of the theory of operation used here is 30 * based on the self-tuning, low overhead replacement cache 31 * presented by Megiddo and Modha at FAST 2003, there are some 32 * significant differences: 33 * 34 * 1. The Megiddo and Modha model assumes any page is evictable. 35 * Pages in its cache cannot be "locked" into memory. This makes 36 * the eviction algorithm simple: evict the last page in the list. 37 * This also make the performance characteristics easy to reason 38 * about. Our cache is not so simple. At any given moment, some 39 * subset of the blocks in the cache are un-evictable because we 40 * have handed out a reference to them. Blocks are only evictable 41 * when there are no external references active. This makes 42 * eviction far more problematic: we choose to evict the evictable 43 * blocks that are the "lowest" in the list. 44 * 45 * There are times when it is not possible to evict the requested 46 * space. In these circumstances we are unable to adjust the cache 47 * size. To prevent the cache growing unbounded at these times we 48 * implement a "cache throttle" that slows the flow of new data 49 * into the cache until we can make space available. 50 * 51 * 2. The Megiddo and Modha model assumes a fixed cache size. 52 * Pages are evicted when the cache is full and there is a cache 53 * miss. Our model has a variable sized cache. It grows with 54 * high use, but also tries to react to memory pressure from the 55 * operating system: decreasing its size when system memory is 56 * tight. 57 * 58 * 3. The Megiddo and Modha model assumes a fixed page size. All 59 * elements of the cache are therefor exactly the same size. So 60 * when adjusting the cache size following a cache miss, its simply 61 * a matter of choosing a single page to evict. In our model, we 62 * have variable sized cache blocks (rangeing from 512 bytes to 63 * 128K bytes). We therefor choose a set of blocks to evict to make 64 * space for a cache miss that approximates as closely as possible 65 * the space used by the new block. 66 * 67 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 68 * by N. Megiddo & D. Modha, FAST 2003 69 */ 70 71/* 72 * The locking model: 73 * 74 * A new reference to a cache buffer can be obtained in two 75 * ways: 1) via a hash table lookup using the DVA as a key, 76 * or 2) via one of the ARC lists. The arc_read() interface 77 * uses method 1, while the internal arc algorithms for 78 * adjusting the cache use method 2. We therefor provide two 79 * types of locks: 1) the hash table lock array, and 2) the 80 * arc list locks. 81 * 82 * Buffers do not have their own mutexs, rather they rely on the 83 * hash table mutexs for the bulk of their protection (i.e. most 84 * fields in the arc_buf_hdr_t are protected by these mutexs). 85 * 86 * buf_hash_find() returns the appropriate mutex (held) when it 87 * locates the requested buffer in the hash table. It returns 88 * NULL for the mutex if the buffer was not in the table. 89 * 90 * buf_hash_remove() expects the appropriate hash mutex to be 91 * already held before it is invoked. 92 * 93 * Each arc state also has a mutex which is used to protect the 94 * buffer list associated with the state. When attempting to 95 * obtain a hash table lock while holding an arc list lock you 96 * must use: mutex_tryenter() to avoid deadlock. Also note that 97 * the active state mutex must be held before the ghost state mutex. 98 * 99 * Arc buffers may have an associated eviction callback function. 100 * This function will be invoked prior to removing the buffer (e.g. 101 * in arc_do_user_evicts()). Note however that the data associated 102 * with the buffer may be evicted prior to the callback. The callback 103 * must be made with *no locks held* (to prevent deadlock). Additionally, 104 * the users of callbacks must ensure that their private data is 105 * protected from simultaneous callbacks from arc_buf_evict() 106 * and arc_do_user_evicts(). 107 * 108 * Note that the majority of the performance stats are manipulated 109 * with atomic operations. 110 * 111 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following: 112 * 113 * - L2ARC buflist creation 114 * - L2ARC buflist eviction 115 * - L2ARC write completion, which walks L2ARC buflists 116 * - ARC header destruction, as it removes from L2ARC buflists 117 * - ARC header release, as it removes from L2ARC buflists 118 */ 119 120#include <sys/spa.h> 121#include <sys/zio.h> 122#include <sys/zio_checksum.h> 123#include <sys/zfs_context.h> 124#include <sys/arc.h> 125#include <sys/refcount.h> 126#include <sys/vdev.h> 127#ifdef _KERNEL 128#include <sys/dnlc.h> 129#endif 130#include <sys/callb.h> 131#include <sys/kstat.h> 132#include <sys/sdt.h> 133 134#include <vm/vm_pageout.h> 135 136static kmutex_t arc_reclaim_thr_lock; 137static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 138static uint8_t arc_thread_exit; 139 140extern int zfs_write_limit_shift; 141extern uint64_t zfs_write_limit_max; 142extern kmutex_t zfs_write_limit_lock; 143 144#define ARC_REDUCE_DNLC_PERCENT 3 145uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 146 147typedef enum arc_reclaim_strategy { 148 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 149 ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 150} arc_reclaim_strategy_t; 151 152/* number of seconds before growing cache again */ 153static int arc_grow_retry = 60; 154 155/* 156 * minimum lifespan of a prefetch block in clock ticks 157 * (initialized in arc_init()) 158 */ 159static int arc_min_prefetch_lifespan; 160 161extern int zfs_prefetch_disable; 162static int arc_dead; 163 164/* 165 * The arc has filled available memory and has now warmed up. 166 */ 167static boolean_t arc_warm; 168 169/* 170 * These tunables are for performance analysis. 171 */ 172uint64_t zfs_arc_max; 173uint64_t zfs_arc_min; 174uint64_t zfs_arc_meta_limit = 0; 175int zfs_mdcomp_disable = 0; 176 177TUNABLE_QUAD("vfs.zfs.arc_max", &zfs_arc_max); 178TUNABLE_QUAD("vfs.zfs.arc_min", &zfs_arc_min); 179TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit); 180TUNABLE_INT("vfs.zfs.mdcomp_disable", &zfs_mdcomp_disable); 181SYSCTL_DECL(_vfs_zfs); 182SYSCTL_QUAD(_vfs_zfs, OID_AUTO, arc_max, CTLFLAG_RDTUN, &zfs_arc_max, 0, 183 "Maximum ARC size"); 184SYSCTL_QUAD(_vfs_zfs, OID_AUTO, arc_min, CTLFLAG_RDTUN, &zfs_arc_min, 0, 185 "Minimum ARC size"); 186SYSCTL_INT(_vfs_zfs, OID_AUTO, mdcomp_disable, CTLFLAG_RDTUN, 187 &zfs_mdcomp_disable, 0, "Disable metadata compression"); 188 189/* 190 * Note that buffers can be in one of 6 states: 191 * ARC_anon - anonymous (discussed below) 192 * ARC_mru - recently used, currently cached 193 * ARC_mru_ghost - recentely used, no longer in cache 194 * ARC_mfu - frequently used, currently cached 195 * ARC_mfu_ghost - frequently used, no longer in cache 196 * ARC_l2c_only - exists in L2ARC but not other states 197 * When there are no active references to the buffer, they are 198 * are linked onto a list in one of these arc states. These are 199 * the only buffers that can be evicted or deleted. Within each 200 * state there are multiple lists, one for meta-data and one for 201 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, 202 * etc.) is tracked separately so that it can be managed more 203 * explicitly: favored over data, limited explicitly. 204 * 205 * Anonymous buffers are buffers that are not associated with 206 * a DVA. These are buffers that hold dirty block copies 207 * before they are written to stable storage. By definition, 208 * they are "ref'd" and are considered part of arc_mru 209 * that cannot be freed. Generally, they will aquire a DVA 210 * as they are written and migrate onto the arc_mru list. 211 * 212 * The ARC_l2c_only state is for buffers that are in the second 213 * level ARC but no longer in any of the ARC_m* lists. The second 214 * level ARC itself may also contain buffers that are in any of 215 * the ARC_m* states - meaning that a buffer can exist in two 216 * places. The reason for the ARC_l2c_only state is to keep the 217 * buffer header in the hash table, so that reads that hit the 218 * second level ARC benefit from these fast lookups. 219 */ 220 221typedef struct arc_state { 222 list_t arcs_list[ARC_BUFC_NUMTYPES]; /* list of evictable buffers */ 223 uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */ 224 uint64_t arcs_size; /* total amount of data in this state */ 225 kmutex_t arcs_mtx; 226} arc_state_t; 227 228/* The 6 states: */ 229static arc_state_t ARC_anon; 230static arc_state_t ARC_mru; 231static arc_state_t ARC_mru_ghost; 232static arc_state_t ARC_mfu; 233static arc_state_t ARC_mfu_ghost; 234static arc_state_t ARC_l2c_only; 235 236typedef struct arc_stats { 237 kstat_named_t arcstat_hits; 238 kstat_named_t arcstat_misses; 239 kstat_named_t arcstat_demand_data_hits; 240 kstat_named_t arcstat_demand_data_misses; 241 kstat_named_t arcstat_demand_metadata_hits; 242 kstat_named_t arcstat_demand_metadata_misses; 243 kstat_named_t arcstat_prefetch_data_hits; 244 kstat_named_t arcstat_prefetch_data_misses; 245 kstat_named_t arcstat_prefetch_metadata_hits; 246 kstat_named_t arcstat_prefetch_metadata_misses; 247 kstat_named_t arcstat_mru_hits; 248 kstat_named_t arcstat_mru_ghost_hits; 249 kstat_named_t arcstat_mfu_hits; 250 kstat_named_t arcstat_mfu_ghost_hits; 251 kstat_named_t arcstat_deleted; 252 kstat_named_t arcstat_recycle_miss; 253 kstat_named_t arcstat_mutex_miss; 254 kstat_named_t arcstat_evict_skip; 255 kstat_named_t arcstat_hash_elements; 256 kstat_named_t arcstat_hash_elements_max; 257 kstat_named_t arcstat_hash_collisions; 258 kstat_named_t arcstat_hash_chains; 259 kstat_named_t arcstat_hash_chain_max; 260 kstat_named_t arcstat_p; 261 kstat_named_t arcstat_c; 262 kstat_named_t arcstat_c_min; 263 kstat_named_t arcstat_c_max; 264 kstat_named_t arcstat_size; 265 kstat_named_t arcstat_hdr_size; 266 kstat_named_t arcstat_l2_hits; 267 kstat_named_t arcstat_l2_misses; 268 kstat_named_t arcstat_l2_feeds; 269 kstat_named_t arcstat_l2_rw_clash; 270 kstat_named_t arcstat_l2_writes_sent; 271 kstat_named_t arcstat_l2_writes_done; 272 kstat_named_t arcstat_l2_writes_error; 273 kstat_named_t arcstat_l2_writes_hdr_miss; 274 kstat_named_t arcstat_l2_evict_lock_retry; 275 kstat_named_t arcstat_l2_evict_reading; 276 kstat_named_t arcstat_l2_free_on_write; 277 kstat_named_t arcstat_l2_abort_lowmem; 278 kstat_named_t arcstat_l2_cksum_bad; 279 kstat_named_t arcstat_l2_io_error; 280 kstat_named_t arcstat_l2_size; 281 kstat_named_t arcstat_l2_hdr_size; 282 kstat_named_t arcstat_memory_throttle_count; 283} arc_stats_t; 284 285static arc_stats_t arc_stats = { 286 { "hits", KSTAT_DATA_UINT64 }, 287 { "misses", KSTAT_DATA_UINT64 }, 288 { "demand_data_hits", KSTAT_DATA_UINT64 }, 289 { "demand_data_misses", KSTAT_DATA_UINT64 }, 290 { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 291 { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 292 { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 293 { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 294 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 295 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 296 { "mru_hits", KSTAT_DATA_UINT64 }, 297 { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 298 { "mfu_hits", KSTAT_DATA_UINT64 }, 299 { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 300 { "deleted", KSTAT_DATA_UINT64 }, 301 { "recycle_miss", KSTAT_DATA_UINT64 }, 302 { "mutex_miss", KSTAT_DATA_UINT64 }, 303 { "evict_skip", KSTAT_DATA_UINT64 }, 304 { "hash_elements", KSTAT_DATA_UINT64 }, 305 { "hash_elements_max", KSTAT_DATA_UINT64 }, 306 { "hash_collisions", KSTAT_DATA_UINT64 }, 307 { "hash_chains", KSTAT_DATA_UINT64 }, 308 { "hash_chain_max", KSTAT_DATA_UINT64 }, 309 { "p", KSTAT_DATA_UINT64 }, 310 { "c", KSTAT_DATA_UINT64 }, 311 { "c_min", KSTAT_DATA_UINT64 }, 312 { "c_max", KSTAT_DATA_UINT64 }, 313 { "size", KSTAT_DATA_UINT64 }, 314 { "hdr_size", KSTAT_DATA_UINT64 }, 315 { "l2_hits", KSTAT_DATA_UINT64 }, 316 { "l2_misses", KSTAT_DATA_UINT64 }, 317 { "l2_feeds", KSTAT_DATA_UINT64 }, 318 { "l2_rw_clash", KSTAT_DATA_UINT64 }, 319 { "l2_writes_sent", KSTAT_DATA_UINT64 }, 320 { "l2_writes_done", KSTAT_DATA_UINT64 }, 321 { "l2_writes_error", KSTAT_DATA_UINT64 }, 322 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 }, 323 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, 324 { "l2_evict_reading", KSTAT_DATA_UINT64 }, 325 { "l2_free_on_write", KSTAT_DATA_UINT64 }, 326 { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, 327 { "l2_cksum_bad", KSTAT_DATA_UINT64 }, 328 { "l2_io_error", KSTAT_DATA_UINT64 }, 329 { "l2_size", KSTAT_DATA_UINT64 }, 330 { "l2_hdr_size", KSTAT_DATA_UINT64 }, 331 { "memory_throttle_count", KSTAT_DATA_UINT64 } 332}; 333 334#define ARCSTAT(stat) (arc_stats.stat.value.ui64) 335 336#define ARCSTAT_INCR(stat, val) \ 337 atomic_add_64(&arc_stats.stat.value.ui64, (val)); 338 339#define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 340#define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 341 342#define ARCSTAT_MAX(stat, val) { \ 343 uint64_t m; \ 344 while ((val) > (m = arc_stats.stat.value.ui64) && \ 345 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 346 continue; \ 347} 348 349#define ARCSTAT_MAXSTAT(stat) \ 350 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 351 352/* 353 * We define a macro to allow ARC hits/misses to be easily broken down by 354 * two separate conditions, giving a total of four different subtypes for 355 * each of hits and misses (so eight statistics total). 356 */ 357#define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 358 if (cond1) { \ 359 if (cond2) { \ 360 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 361 } else { \ 362 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 363 } \ 364 } else { \ 365 if (cond2) { \ 366 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 367 } else { \ 368 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 369 } \ 370 } 371 372kstat_t *arc_ksp; 373static arc_state_t *arc_anon; 374static arc_state_t *arc_mru; 375static arc_state_t *arc_mru_ghost; 376static arc_state_t *arc_mfu; 377static arc_state_t *arc_mfu_ghost; 378static arc_state_t *arc_l2c_only; 379 380/* 381 * There are several ARC variables that are critical to export as kstats -- 382 * but we don't want to have to grovel around in the kstat whenever we wish to 383 * manipulate them. For these variables, we therefore define them to be in 384 * terms of the statistic variable. This assures that we are not introducing 385 * the possibility of inconsistency by having shadow copies of the variables, 386 * while still allowing the code to be readable. 387 */ 388#define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 389#define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 390#define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 391#define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 392#define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 393 394static int arc_no_grow; /* Don't try to grow cache size */ 395static uint64_t arc_tempreserve; 396static uint64_t arc_meta_used; 397static uint64_t arc_meta_limit; 398static uint64_t arc_meta_max = 0; 399SYSCTL_QUAD(_vfs_zfs, OID_AUTO, arc_meta_used, CTLFLAG_RDTUN, 400 &arc_meta_used, 0, "ARC metadata used"); 401SYSCTL_QUAD(_vfs_zfs, OID_AUTO, arc_meta_limit, CTLFLAG_RDTUN, 402 &arc_meta_limit, 0, "ARC metadata limit"); 403 404typedef struct l2arc_buf_hdr l2arc_buf_hdr_t; 405 406typedef struct arc_callback arc_callback_t; 407 408struct arc_callback { 409 void *acb_private; 410 arc_done_func_t *acb_done; 411 arc_buf_t *acb_buf; 412 zio_t *acb_zio_dummy; 413 arc_callback_t *acb_next; 414}; 415 416typedef struct arc_write_callback arc_write_callback_t; 417 418struct arc_write_callback { 419 void *awcb_private; 420 arc_done_func_t *awcb_ready; 421 arc_done_func_t *awcb_done; 422 arc_buf_t *awcb_buf; 423}; 424 425struct arc_buf_hdr { 426 /* protected by hash lock */ 427 dva_t b_dva; 428 uint64_t b_birth; 429 uint64_t b_cksum0; 430 431 kmutex_t b_freeze_lock; 432 zio_cksum_t *b_freeze_cksum; 433 434 arc_buf_hdr_t *b_hash_next; 435 arc_buf_t *b_buf; 436 uint32_t b_flags; 437 uint32_t b_datacnt; 438 439 arc_callback_t *b_acb; 440 kcondvar_t b_cv; 441 442 /* immutable */ 443 arc_buf_contents_t b_type; 444 uint64_t b_size; 445 spa_t *b_spa; 446 447 /* protected by arc state mutex */ 448 arc_state_t *b_state; 449 list_node_t b_arc_node; 450 451 /* updated atomically */ 452 clock_t b_arc_access; 453 454 /* self protecting */ 455 refcount_t b_refcnt; 456 457 l2arc_buf_hdr_t *b_l2hdr; 458 list_node_t b_l2node; 459}; 460 461static arc_buf_t *arc_eviction_list; 462static kmutex_t arc_eviction_mtx; 463static arc_buf_hdr_t arc_eviction_hdr; 464static void arc_get_data_buf(arc_buf_t *buf); 465static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 466static int arc_evict_needed(arc_buf_contents_t type); 467static void arc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes); 468 469#define GHOST_STATE(state) \ 470 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ 471 (state) == arc_l2c_only) 472 473/* 474 * Private ARC flags. These flags are private ARC only flags that will show up 475 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 476 * be passed in as arc_flags in things like arc_read. However, these flags 477 * should never be passed and should only be set by ARC code. When adding new 478 * public flags, make sure not to smash the private ones. 479 */ 480 481#define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 482#define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 483#define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 484#define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 485#define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 486#define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 487#define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */ 488#define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */ 489#define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */ 490#define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */ 491#define ARC_STORED (1 << 19) /* has been store()d to */ 492 493#define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 494#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 495#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 496#define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 497#define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 498#define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS) 499#define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE) 500#define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \ 501 (hdr)->b_l2hdr != NULL) 502#define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING) 503#define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED) 504#define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD) 505 506/* 507 * Other sizes 508 */ 509 510#define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) 511#define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t)) 512 513/* 514 * Hash table routines 515 */ 516 517#define HT_LOCK_PAD 128 518 519struct ht_lock { 520 kmutex_t ht_lock; 521#ifdef _KERNEL 522 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 523#endif 524}; 525 526#define BUF_LOCKS 256 527typedef struct buf_hash_table { 528 uint64_t ht_mask; 529 arc_buf_hdr_t **ht_table; 530 struct ht_lock ht_locks[BUF_LOCKS]; 531} buf_hash_table_t; 532 533static buf_hash_table_t buf_hash_table; 534 535#define BUF_HASH_INDEX(spa, dva, birth) \ 536 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 537#define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 538#define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 539#define HDR_LOCK(buf) \ 540 (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth))) 541 542uint64_t zfs_crc64_table[256]; 543 544#ifdef ZIO_USE_UMA 545extern kmem_cache_t *zio_buf_cache[]; 546extern kmem_cache_t *zio_data_buf_cache[]; 547#endif 548 549/* 550 * Level 2 ARC 551 */ 552 553#define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ 554#define L2ARC_HEADROOM 4 /* num of writes */ 555#define L2ARC_FEED_SECS 1 /* caching interval */ 556 557#define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) 558#define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) 559 560/* 561 * L2ARC Performance Tunables 562 */ 563uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ 564uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */ 565uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ 566uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ 567boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ 568 569/* 570 * L2ARC Internals 571 */ 572typedef struct l2arc_dev { 573 vdev_t *l2ad_vdev; /* vdev */ 574 spa_t *l2ad_spa; /* spa */ 575 uint64_t l2ad_hand; /* next write location */ 576 uint64_t l2ad_write; /* desired write size, bytes */ 577 uint64_t l2ad_boost; /* warmup write boost, bytes */ 578 uint64_t l2ad_start; /* first addr on device */ 579 uint64_t l2ad_end; /* last addr on device */ 580 uint64_t l2ad_evict; /* last addr eviction reached */ 581 boolean_t l2ad_first; /* first sweep through */ 582 list_t *l2ad_buflist; /* buffer list */ 583 list_node_t l2ad_node; /* device list node */ 584} l2arc_dev_t; 585 586static list_t L2ARC_dev_list; /* device list */ 587static list_t *l2arc_dev_list; /* device list pointer */ 588static kmutex_t l2arc_dev_mtx; /* device list mutex */ 589static l2arc_dev_t *l2arc_dev_last; /* last device used */ 590static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */ 591static list_t L2ARC_free_on_write; /* free after write buf list */ 592static list_t *l2arc_free_on_write; /* free after write list ptr */ 593static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ 594static uint64_t l2arc_ndev; /* number of devices */ 595 596typedef struct l2arc_read_callback { 597 arc_buf_t *l2rcb_buf; /* read buffer */ 598 spa_t *l2rcb_spa; /* spa */ 599 blkptr_t l2rcb_bp; /* original blkptr */ 600 zbookmark_t l2rcb_zb; /* original bookmark */ 601 int l2rcb_flags; /* original flags */ 602} l2arc_read_callback_t; 603 604typedef struct l2arc_write_callback { 605 l2arc_dev_t *l2wcb_dev; /* device info */ 606 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ 607} l2arc_write_callback_t; 608 609struct l2arc_buf_hdr { 610 /* protected by arc_buf_hdr mutex */ 611 l2arc_dev_t *b_dev; /* L2ARC device */ 612 daddr_t b_daddr; /* disk address, offset byte */ 613}; 614 615typedef struct l2arc_data_free { 616 /* protected by l2arc_free_on_write_mtx */ 617 void *l2df_data; 618 size_t l2df_size; 619 void (*l2df_func)(void *, size_t); 620 list_node_t l2df_list_node; 621} l2arc_data_free_t; 622 623static kmutex_t l2arc_feed_thr_lock; 624static kcondvar_t l2arc_feed_thr_cv; 625static uint8_t l2arc_thread_exit; 626 627static void l2arc_read_done(zio_t *zio); 628static void l2arc_hdr_stat_add(void); 629static void l2arc_hdr_stat_remove(void); 630 631static uint64_t 632buf_hash(spa_t *spa, const dva_t *dva, uint64_t birth) 633{ 634 uintptr_t spav = (uintptr_t)spa; 635 uint8_t *vdva = (uint8_t *)dva; 636 uint64_t crc = -1ULL; 637 int i; 638 639 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 640 641 for (i = 0; i < sizeof (dva_t); i++) 642 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 643 644 crc ^= (spav>>8) ^ birth; 645 646 return (crc); 647} 648 649#define BUF_EMPTY(buf) \ 650 ((buf)->b_dva.dva_word[0] == 0 && \ 651 (buf)->b_dva.dva_word[1] == 0 && \ 652 (buf)->b_birth == 0) 653 654#define BUF_EQUAL(spa, dva, birth, buf) \ 655 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 656 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 657 ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 658 659static arc_buf_hdr_t * 660buf_hash_find(spa_t *spa, const dva_t *dva, uint64_t birth, kmutex_t **lockp) 661{ 662 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 663 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 664 arc_buf_hdr_t *buf; 665 666 mutex_enter(hash_lock); 667 for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 668 buf = buf->b_hash_next) { 669 if (BUF_EQUAL(spa, dva, birth, buf)) { 670 *lockp = hash_lock; 671 return (buf); 672 } 673 } 674 mutex_exit(hash_lock); 675 *lockp = NULL; 676 return (NULL); 677} 678 679/* 680 * Insert an entry into the hash table. If there is already an element 681 * equal to elem in the hash table, then the already existing element 682 * will be returned and the new element will not be inserted. 683 * Otherwise returns NULL. 684 */ 685static arc_buf_hdr_t * 686buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 687{ 688 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 689 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 690 arc_buf_hdr_t *fbuf; 691 uint32_t i; 692 693 ASSERT(!HDR_IN_HASH_TABLE(buf)); 694 *lockp = hash_lock; 695 mutex_enter(hash_lock); 696 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 697 fbuf = fbuf->b_hash_next, i++) { 698 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 699 return (fbuf); 700 } 701 702 buf->b_hash_next = buf_hash_table.ht_table[idx]; 703 buf_hash_table.ht_table[idx] = buf; 704 buf->b_flags |= ARC_IN_HASH_TABLE; 705 706 /* collect some hash table performance data */ 707 if (i > 0) { 708 ARCSTAT_BUMP(arcstat_hash_collisions); 709 if (i == 1) 710 ARCSTAT_BUMP(arcstat_hash_chains); 711 712 ARCSTAT_MAX(arcstat_hash_chain_max, i); 713 } 714 715 ARCSTAT_BUMP(arcstat_hash_elements); 716 ARCSTAT_MAXSTAT(arcstat_hash_elements); 717 718 return (NULL); 719} 720 721static void 722buf_hash_remove(arc_buf_hdr_t *buf) 723{ 724 arc_buf_hdr_t *fbuf, **bufp; 725 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 726 727 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 728 ASSERT(HDR_IN_HASH_TABLE(buf)); 729 730 bufp = &buf_hash_table.ht_table[idx]; 731 while ((fbuf = *bufp) != buf) { 732 ASSERT(fbuf != NULL); 733 bufp = &fbuf->b_hash_next; 734 } 735 *bufp = buf->b_hash_next; 736 buf->b_hash_next = NULL; 737 buf->b_flags &= ~ARC_IN_HASH_TABLE; 738 739 /* collect some hash table performance data */ 740 ARCSTAT_BUMPDOWN(arcstat_hash_elements); 741 742 if (buf_hash_table.ht_table[idx] && 743 buf_hash_table.ht_table[idx]->b_hash_next == NULL) 744 ARCSTAT_BUMPDOWN(arcstat_hash_chains); 745} 746 747/* 748 * Global data structures and functions for the buf kmem cache. 749 */ 750static kmem_cache_t *hdr_cache; 751static kmem_cache_t *buf_cache; 752 753static void 754buf_fini(void) 755{ 756 int i; 757 758 kmem_free(buf_hash_table.ht_table, 759 (buf_hash_table.ht_mask + 1) * sizeof (void *)); 760 for (i = 0; i < BUF_LOCKS; i++) 761 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 762 kmem_cache_destroy(hdr_cache); 763 kmem_cache_destroy(buf_cache); 764} 765 766/* 767 * Constructor callback - called when the cache is empty 768 * and a new buf is requested. 769 */ 770/* ARGSUSED */ 771static int 772hdr_cons(void *vbuf, void *unused, int kmflag) 773{ 774 arc_buf_hdr_t *buf = vbuf; 775 776 bzero(buf, sizeof (arc_buf_hdr_t)); 777 refcount_create(&buf->b_refcnt); 778 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 779 mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 780 781 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); 782 return (0); 783} 784 785/* ARGSUSED */ 786static int 787buf_cons(void *vbuf, void *unused, int kmflag) 788{ 789 arc_buf_t *buf = vbuf; 790 791 bzero(buf, sizeof (arc_buf_t)); 792 rw_init(&buf->b_lock, NULL, RW_DEFAULT, NULL); 793 return (0); 794} 795 796/* 797 * Destructor callback - called when a cached buf is 798 * no longer required. 799 */ 800/* ARGSUSED */ 801static void 802hdr_dest(void *vbuf, void *unused) 803{ 804 arc_buf_hdr_t *buf = vbuf; 805 806 refcount_destroy(&buf->b_refcnt); 807 cv_destroy(&buf->b_cv); 808 mutex_destroy(&buf->b_freeze_lock); 809 810 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); 811} 812 813/* ARGSUSED */ 814static void 815buf_dest(void *vbuf, void *unused) 816{ 817 arc_buf_t *buf = vbuf; 818 819 rw_destroy(&buf->b_lock); 820} 821 822/* 823 * Reclaim callback -- invoked when memory is low. 824 */ 825/* ARGSUSED */ 826static void 827hdr_recl(void *unused) 828{ 829 dprintf("hdr_recl called\n"); 830 /* 831 * umem calls the reclaim func when we destroy the buf cache, 832 * which is after we do arc_fini(). 833 */ 834 if (!arc_dead) 835 cv_signal(&arc_reclaim_thr_cv); 836} 837 838static void 839buf_init(void) 840{ 841 uint64_t *ct; 842 uint64_t hsize = 1ULL << 12; 843 int i, j; 844 845 /* 846 * The hash table is big enough to fill all of physical memory 847 * with an average 64K block size. The table will take up 848 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 849 */ 850 while (hsize * 65536 < (uint64_t)physmem * PAGESIZE) 851 hsize <<= 1; 852retry: 853 buf_hash_table.ht_mask = hsize - 1; 854 buf_hash_table.ht_table = 855 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 856 if (buf_hash_table.ht_table == NULL) { 857 ASSERT(hsize > (1ULL << 8)); 858 hsize >>= 1; 859 goto retry; 860 } 861 862 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 863 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 864 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 865 0, buf_cons, buf_dest, NULL, NULL, NULL, 0); 866 867 for (i = 0; i < 256; i++) 868 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 869 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 870 871 for (i = 0; i < BUF_LOCKS; i++) { 872 mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 873 NULL, MUTEX_DEFAULT, NULL); 874 } 875} 876 877#define ARC_MINTIME (hz>>4) /* 62 ms */ 878 879static void 880arc_cksum_verify(arc_buf_t *buf) 881{ 882 zio_cksum_t zc; 883 884 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 885 return; 886 887 mutex_enter(&buf->b_hdr->b_freeze_lock); 888 if (buf->b_hdr->b_freeze_cksum == NULL || 889 (buf->b_hdr->b_flags & ARC_IO_ERROR)) { 890 mutex_exit(&buf->b_hdr->b_freeze_lock); 891 return; 892 } 893 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 894 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 895 panic("buffer modified while frozen!"); 896 mutex_exit(&buf->b_hdr->b_freeze_lock); 897} 898 899static int 900arc_cksum_equal(arc_buf_t *buf) 901{ 902 zio_cksum_t zc; 903 int equal; 904 905 mutex_enter(&buf->b_hdr->b_freeze_lock); 906 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 907 equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc); 908 mutex_exit(&buf->b_hdr->b_freeze_lock); 909 910 return (equal); 911} 912 913static void 914arc_cksum_compute(arc_buf_t *buf, boolean_t force) 915{ 916 if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY)) 917 return; 918 919 mutex_enter(&buf->b_hdr->b_freeze_lock); 920 if (buf->b_hdr->b_freeze_cksum != NULL) { 921 mutex_exit(&buf->b_hdr->b_freeze_lock); 922 return; 923 } 924 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 925 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 926 buf->b_hdr->b_freeze_cksum); 927 mutex_exit(&buf->b_hdr->b_freeze_lock); 928} 929 930void 931arc_buf_thaw(arc_buf_t *buf) 932{ 933 if (zfs_flags & ZFS_DEBUG_MODIFY) { 934 if (buf->b_hdr->b_state != arc_anon) 935 panic("modifying non-anon buffer!"); 936 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) 937 panic("modifying buffer while i/o in progress!"); 938 arc_cksum_verify(buf); 939 } 940 941 mutex_enter(&buf->b_hdr->b_freeze_lock); 942 if (buf->b_hdr->b_freeze_cksum != NULL) { 943 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 944 buf->b_hdr->b_freeze_cksum = NULL; 945 } 946 mutex_exit(&buf->b_hdr->b_freeze_lock); 947} 948 949void 950arc_buf_freeze(arc_buf_t *buf) 951{ 952 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 953 return; 954 955 ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 956 buf->b_hdr->b_state == arc_anon); 957 arc_cksum_compute(buf, B_FALSE); 958} 959 960static void 961add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 962{ 963 ASSERT(MUTEX_HELD(hash_lock)); 964 965 if ((refcount_add(&ab->b_refcnt, tag) == 1) && 966 (ab->b_state != arc_anon)) { 967 uint64_t delta = ab->b_size * ab->b_datacnt; 968 list_t *list = &ab->b_state->arcs_list[ab->b_type]; 969 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type]; 970 971 ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx)); 972 mutex_enter(&ab->b_state->arcs_mtx); 973 ASSERT(list_link_active(&ab->b_arc_node)); 974 list_remove(list, ab); 975 if (GHOST_STATE(ab->b_state)) { 976 ASSERT3U(ab->b_datacnt, ==, 0); 977 ASSERT3P(ab->b_buf, ==, NULL); 978 delta = ab->b_size; 979 } 980 ASSERT(delta > 0); 981 ASSERT3U(*size, >=, delta); 982 atomic_add_64(size, -delta); 983 mutex_exit(&ab->b_state->arcs_mtx); 984 /* remove the prefetch flag if we get a reference */ 985 if (ab->b_flags & ARC_PREFETCH) 986 ab->b_flags &= ~ARC_PREFETCH; 987 } 988} 989 990static int 991remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 992{ 993 int cnt; 994 arc_state_t *state = ab->b_state; 995 996 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 997 ASSERT(!GHOST_STATE(state)); 998 999 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 1000 (state != arc_anon)) { 1001 uint64_t *size = &state->arcs_lsize[ab->b_type]; 1002 1003 ASSERT(!MUTEX_HELD(&state->arcs_mtx)); 1004 mutex_enter(&state->arcs_mtx); 1005 ASSERT(!list_link_active(&ab->b_arc_node)); 1006 list_insert_head(&state->arcs_list[ab->b_type], ab); 1007 ASSERT(ab->b_datacnt > 0); 1008 atomic_add_64(size, ab->b_size * ab->b_datacnt); 1009 mutex_exit(&state->arcs_mtx); 1010 } 1011 return (cnt); 1012} 1013 1014/* 1015 * Move the supplied buffer to the indicated state. The mutex 1016 * for the buffer must be held by the caller. 1017 */ 1018static void 1019arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 1020{ 1021 arc_state_t *old_state = ab->b_state; 1022 int64_t refcnt = refcount_count(&ab->b_refcnt); 1023 uint64_t from_delta, to_delta; 1024 1025 ASSERT(MUTEX_HELD(hash_lock)); 1026 ASSERT(new_state != old_state); 1027 ASSERT(refcnt == 0 || ab->b_datacnt > 0); 1028 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 1029 1030 from_delta = to_delta = ab->b_datacnt * ab->b_size; 1031 1032 /* 1033 * If this buffer is evictable, transfer it from the 1034 * old state list to the new state list. 1035 */ 1036 if (refcnt == 0) { 1037 if (old_state != arc_anon) { 1038 int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx); 1039 uint64_t *size = &old_state->arcs_lsize[ab->b_type]; 1040 1041 if (use_mutex) 1042 mutex_enter(&old_state->arcs_mtx); 1043 1044 ASSERT(list_link_active(&ab->b_arc_node)); 1045 list_remove(&old_state->arcs_list[ab->b_type], ab); 1046 1047 /* 1048 * If prefetching out of the ghost cache, 1049 * we will have a non-null datacnt. 1050 */ 1051 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 1052 /* ghost elements have a ghost size */ 1053 ASSERT(ab->b_buf == NULL); 1054 from_delta = ab->b_size; 1055 } 1056 ASSERT3U(*size, >=, from_delta); 1057 atomic_add_64(size, -from_delta); 1058 1059 if (use_mutex) 1060 mutex_exit(&old_state->arcs_mtx); 1061 } 1062 if (new_state != arc_anon) { 1063 int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx); 1064 uint64_t *size = &new_state->arcs_lsize[ab->b_type]; 1065 1066 if (use_mutex) 1067 mutex_enter(&new_state->arcs_mtx); 1068 1069 list_insert_head(&new_state->arcs_list[ab->b_type], ab); 1070 1071 /* ghost elements have a ghost size */ 1072 if (GHOST_STATE(new_state)) { 1073 ASSERT(ab->b_datacnt == 0); 1074 ASSERT(ab->b_buf == NULL); 1075 to_delta = ab->b_size; 1076 } 1077 atomic_add_64(size, to_delta); 1078 1079 if (use_mutex) 1080 mutex_exit(&new_state->arcs_mtx); 1081 } 1082 } 1083 1084 ASSERT(!BUF_EMPTY(ab)); 1085 if (new_state == arc_anon) { 1086 buf_hash_remove(ab); 1087 } 1088 1089 /* adjust state sizes */ 1090 if (to_delta) 1091 atomic_add_64(&new_state->arcs_size, to_delta); 1092 if (from_delta) { 1093 ASSERT3U(old_state->arcs_size, >=, from_delta); 1094 atomic_add_64(&old_state->arcs_size, -from_delta); 1095 } 1096 ab->b_state = new_state; 1097 1098 /* adjust l2arc hdr stats */ 1099 if (new_state == arc_l2c_only) 1100 l2arc_hdr_stat_add(); 1101 else if (old_state == arc_l2c_only) 1102 l2arc_hdr_stat_remove(); 1103} 1104 1105void 1106arc_space_consume(uint64_t space) 1107{ 1108 atomic_add_64(&arc_meta_used, space); 1109 atomic_add_64(&arc_size, space); 1110} 1111 1112void 1113arc_space_return(uint64_t space) 1114{ 1115 ASSERT(arc_meta_used >= space); 1116 if (arc_meta_max < arc_meta_used) 1117 arc_meta_max = arc_meta_used; 1118 atomic_add_64(&arc_meta_used, -space); 1119 ASSERT(arc_size >= space); 1120 atomic_add_64(&arc_size, -space); 1121} 1122 1123void * 1124arc_data_buf_alloc(uint64_t size) 1125{ 1126 if (arc_evict_needed(ARC_BUFC_DATA)) 1127 cv_signal(&arc_reclaim_thr_cv); 1128 atomic_add_64(&arc_size, size); 1129 return (zio_data_buf_alloc(size)); 1130} 1131 1132void 1133arc_data_buf_free(void *buf, uint64_t size) 1134{ 1135 zio_data_buf_free(buf, size); 1136 ASSERT(arc_size >= size); 1137 atomic_add_64(&arc_size, -size); 1138} 1139 1140arc_buf_t * 1141arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) 1142{ 1143 arc_buf_hdr_t *hdr; 1144 arc_buf_t *buf; 1145 1146 ASSERT3U(size, >, 0); 1147 hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 1148 ASSERT(BUF_EMPTY(hdr)); 1149 hdr->b_size = size; 1150 hdr->b_type = type; 1151 hdr->b_spa = spa; 1152 hdr->b_state = arc_anon; 1153 hdr->b_arc_access = 0; 1154 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1155 buf->b_hdr = hdr; 1156 buf->b_data = NULL; 1157 buf->b_efunc = NULL; 1158 buf->b_private = NULL; 1159 buf->b_next = NULL; 1160 hdr->b_buf = buf; 1161 arc_get_data_buf(buf); 1162 hdr->b_datacnt = 1; 1163 hdr->b_flags = 0; 1164 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1165 (void) refcount_add(&hdr->b_refcnt, tag); 1166 1167 return (buf); 1168} 1169 1170static arc_buf_t * 1171arc_buf_clone(arc_buf_t *from) 1172{ 1173 arc_buf_t *buf; 1174 arc_buf_hdr_t *hdr = from->b_hdr; 1175 uint64_t size = hdr->b_size; 1176 1177 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1178 buf->b_hdr = hdr; 1179 buf->b_data = NULL; 1180 buf->b_efunc = NULL; 1181 buf->b_private = NULL; 1182 buf->b_next = hdr->b_buf; 1183 hdr->b_buf = buf; 1184 arc_get_data_buf(buf); 1185 bcopy(from->b_data, buf->b_data, size); 1186 hdr->b_datacnt += 1; 1187 return (buf); 1188} 1189 1190void 1191arc_buf_add_ref(arc_buf_t *buf, void* tag) 1192{ 1193 arc_buf_hdr_t *hdr; 1194 kmutex_t *hash_lock; 1195 1196 /* 1197 * Check to see if this buffer is evicted. Callers 1198 * must verify b_data != NULL to know if the add_ref 1199 * was successful. 1200 */ 1201 rw_enter(&buf->b_lock, RW_READER); 1202 if (buf->b_data == NULL) { 1203 rw_exit(&buf->b_lock); 1204 return; 1205 } 1206 hdr = buf->b_hdr; 1207 ASSERT(hdr != NULL); 1208 hash_lock = HDR_LOCK(hdr); 1209 mutex_enter(hash_lock); 1210 rw_exit(&buf->b_lock); 1211 1212 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 1213 add_reference(hdr, hash_lock, tag); 1214 arc_access(hdr, hash_lock); 1215 mutex_exit(hash_lock); 1216 ARCSTAT_BUMP(arcstat_hits); 1217 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 1218 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 1219 data, metadata, hits); 1220} 1221 1222/* 1223 * Free the arc data buffer. If it is an l2arc write in progress, 1224 * the buffer is placed on l2arc_free_on_write to be freed later. 1225 */ 1226static void 1227arc_buf_data_free(arc_buf_hdr_t *hdr, void (*free_func)(void *, size_t), 1228 void *data, size_t size) 1229{ 1230 if (HDR_L2_WRITING(hdr)) { 1231 l2arc_data_free_t *df; 1232 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP); 1233 df->l2df_data = data; 1234 df->l2df_size = size; 1235 df->l2df_func = free_func; 1236 mutex_enter(&l2arc_free_on_write_mtx); 1237 list_insert_head(l2arc_free_on_write, df); 1238 mutex_exit(&l2arc_free_on_write_mtx); 1239 ARCSTAT_BUMP(arcstat_l2_free_on_write); 1240 } else { 1241 free_func(data, size); 1242 } 1243} 1244 1245static void 1246arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 1247{ 1248 arc_buf_t **bufp; 1249 1250 /* free up data associated with the buf */ 1251 if (buf->b_data) { 1252 arc_state_t *state = buf->b_hdr->b_state; 1253 uint64_t size = buf->b_hdr->b_size; 1254 arc_buf_contents_t type = buf->b_hdr->b_type; 1255 1256 arc_cksum_verify(buf); 1257 if (!recycle) { 1258 if (type == ARC_BUFC_METADATA) { 1259 arc_buf_data_free(buf->b_hdr, zio_buf_free, 1260 buf->b_data, size); 1261 arc_space_return(size); 1262 } else { 1263 ASSERT(type == ARC_BUFC_DATA); 1264 arc_buf_data_free(buf->b_hdr, 1265 zio_data_buf_free, buf->b_data, size); 1266 atomic_add_64(&arc_size, -size); 1267 } 1268 } 1269 if (list_link_active(&buf->b_hdr->b_arc_node)) { 1270 uint64_t *cnt = &state->arcs_lsize[type]; 1271 1272 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 1273 ASSERT(state != arc_anon); 1274 1275 ASSERT3U(*cnt, >=, size); 1276 atomic_add_64(cnt, -size); 1277 } 1278 ASSERT3U(state->arcs_size, >=, size); 1279 atomic_add_64(&state->arcs_size, -size); 1280 buf->b_data = NULL; 1281 ASSERT(buf->b_hdr->b_datacnt > 0); 1282 buf->b_hdr->b_datacnt -= 1; 1283 } 1284 1285 /* only remove the buf if requested */ 1286 if (!all) 1287 return; 1288 1289 /* remove the buf from the hdr list */ 1290 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 1291 continue; 1292 *bufp = buf->b_next; 1293 1294 ASSERT(buf->b_efunc == NULL); 1295 1296 /* clean up the buf */ 1297 buf->b_hdr = NULL; 1298 kmem_cache_free(buf_cache, buf); 1299} 1300 1301static void 1302arc_hdr_destroy(arc_buf_hdr_t *hdr) 1303{ 1304 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1305 ASSERT3P(hdr->b_state, ==, arc_anon); 1306 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1307 ASSERT(!(hdr->b_flags & ARC_STORED)); 1308 1309 if (hdr->b_l2hdr != NULL) { 1310 if (!MUTEX_HELD(&l2arc_buflist_mtx)) { 1311 /* 1312 * To prevent arc_free() and l2arc_evict() from 1313 * attempting to free the same buffer at the same time, 1314 * a FREE_IN_PROGRESS flag is given to arc_free() to 1315 * give it priority. l2arc_evict() can't destroy this 1316 * header while we are waiting on l2arc_buflist_mtx. 1317 * 1318 * The hdr may be removed from l2ad_buflist before we 1319 * grab l2arc_buflist_mtx, so b_l2hdr is rechecked. 1320 */ 1321 mutex_enter(&l2arc_buflist_mtx); 1322 if (hdr->b_l2hdr != NULL) { 1323 list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, 1324 hdr); 1325 } 1326 mutex_exit(&l2arc_buflist_mtx); 1327 } else { 1328 list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, hdr); 1329 } 1330 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); 1331 kmem_free(hdr->b_l2hdr, sizeof (l2arc_buf_hdr_t)); 1332 if (hdr->b_state == arc_l2c_only) 1333 l2arc_hdr_stat_remove(); 1334 hdr->b_l2hdr = NULL; 1335 } 1336 1337 if (!BUF_EMPTY(hdr)) { 1338 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1339 bzero(&hdr->b_dva, sizeof (dva_t)); 1340 hdr->b_birth = 0; 1341 hdr->b_cksum0 = 0; 1342 } 1343 while (hdr->b_buf) { 1344 arc_buf_t *buf = hdr->b_buf; 1345 1346 if (buf->b_efunc) { 1347 mutex_enter(&arc_eviction_mtx); 1348 rw_enter(&buf->b_lock, RW_WRITER); 1349 ASSERT(buf->b_hdr != NULL); 1350 arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 1351 hdr->b_buf = buf->b_next; 1352 buf->b_hdr = &arc_eviction_hdr; 1353 buf->b_next = arc_eviction_list; 1354 arc_eviction_list = buf; 1355 rw_exit(&buf->b_lock); 1356 mutex_exit(&arc_eviction_mtx); 1357 } else { 1358 arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 1359 } 1360 } 1361 if (hdr->b_freeze_cksum != NULL) { 1362 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1363 hdr->b_freeze_cksum = NULL; 1364 } 1365 1366 ASSERT(!list_link_active(&hdr->b_arc_node)); 1367 ASSERT3P(hdr->b_hash_next, ==, NULL); 1368 ASSERT3P(hdr->b_acb, ==, NULL); 1369 kmem_cache_free(hdr_cache, hdr); 1370} 1371 1372void 1373arc_buf_free(arc_buf_t *buf, void *tag) 1374{ 1375 arc_buf_hdr_t *hdr = buf->b_hdr; 1376 int hashed = hdr->b_state != arc_anon; 1377 1378 ASSERT(buf->b_efunc == NULL); 1379 ASSERT(buf->b_data != NULL); 1380 1381 if (hashed) { 1382 kmutex_t *hash_lock = HDR_LOCK(hdr); 1383 1384 mutex_enter(hash_lock); 1385 (void) remove_reference(hdr, hash_lock, tag); 1386 if (hdr->b_datacnt > 1) 1387 arc_buf_destroy(buf, FALSE, TRUE); 1388 else 1389 hdr->b_flags |= ARC_BUF_AVAILABLE; 1390 mutex_exit(hash_lock); 1391 } else if (HDR_IO_IN_PROGRESS(hdr)) { 1392 int destroy_hdr; 1393 /* 1394 * We are in the middle of an async write. Don't destroy 1395 * this buffer unless the write completes before we finish 1396 * decrementing the reference count. 1397 */ 1398 mutex_enter(&arc_eviction_mtx); 1399 (void) remove_reference(hdr, NULL, tag); 1400 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1401 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 1402 mutex_exit(&arc_eviction_mtx); 1403 if (destroy_hdr) 1404 arc_hdr_destroy(hdr); 1405 } else { 1406 if (remove_reference(hdr, NULL, tag) > 0) { 1407 ASSERT(HDR_IO_ERROR(hdr)); 1408 arc_buf_destroy(buf, FALSE, TRUE); 1409 } else { 1410 arc_hdr_destroy(hdr); 1411 } 1412 } 1413} 1414 1415int 1416arc_buf_remove_ref(arc_buf_t *buf, void* tag) 1417{ 1418 arc_buf_hdr_t *hdr = buf->b_hdr; 1419 kmutex_t *hash_lock = HDR_LOCK(hdr); 1420 int no_callback = (buf->b_efunc == NULL); 1421 1422 if (hdr->b_state == arc_anon) { 1423 arc_buf_free(buf, tag); 1424 return (no_callback); 1425 } 1426 1427 mutex_enter(hash_lock); 1428 ASSERT(hdr->b_state != arc_anon); 1429 ASSERT(buf->b_data != NULL); 1430 1431 (void) remove_reference(hdr, hash_lock, tag); 1432 if (hdr->b_datacnt > 1) { 1433 if (no_callback) 1434 arc_buf_destroy(buf, FALSE, TRUE); 1435 } else if (no_callback) { 1436 ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 1437 hdr->b_flags |= ARC_BUF_AVAILABLE; 1438 } 1439 ASSERT(no_callback || hdr->b_datacnt > 1 || 1440 refcount_is_zero(&hdr->b_refcnt)); 1441 mutex_exit(hash_lock); 1442 return (no_callback); 1443} 1444 1445int 1446arc_buf_size(arc_buf_t *buf) 1447{ 1448 return (buf->b_hdr->b_size); 1449} 1450 1451/* 1452 * Evict buffers from list until we've removed the specified number of 1453 * bytes. Move the removed buffers to the appropriate evict state. 1454 * If the recycle flag is set, then attempt to "recycle" a buffer: 1455 * - look for a buffer to evict that is `bytes' long. 1456 * - return the data block from this buffer rather than freeing it. 1457 * This flag is used by callers that are trying to make space for a 1458 * new buffer in a full arc cache. 1459 * 1460 * This function makes a "best effort". It skips over any buffers 1461 * it can't get a hash_lock on, and so may not catch all candidates. 1462 * It may also return without evicting as much space as requested. 1463 */ 1464static void * 1465arc_evict(arc_state_t *state, spa_t *spa, int64_t bytes, boolean_t recycle, 1466 arc_buf_contents_t type) 1467{ 1468 arc_state_t *evicted_state; 1469 uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 1470 arc_buf_hdr_t *ab, *ab_prev = NULL; 1471 list_t *list = &state->arcs_list[type]; 1472 kmutex_t *hash_lock; 1473 boolean_t have_lock; 1474 void *stolen = NULL; 1475 1476 ASSERT(state == arc_mru || state == arc_mfu); 1477 1478 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 1479 1480 mutex_enter(&state->arcs_mtx); 1481 mutex_enter(&evicted_state->arcs_mtx); 1482 1483 for (ab = list_tail(list); ab; ab = ab_prev) { 1484 ab_prev = list_prev(list, ab); 1485 /* prefetch buffers have a minimum lifespan */ 1486 if (HDR_IO_IN_PROGRESS(ab) || 1487 (spa && ab->b_spa != spa) || 1488 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 1489 LBOLT - ab->b_arc_access < arc_min_prefetch_lifespan)) { 1490 skipped++; 1491 continue; 1492 } 1493 /* "lookahead" for better eviction candidate */ 1494 if (recycle && ab->b_size != bytes && 1495 ab_prev && ab_prev->b_size == bytes) 1496 continue; 1497 hash_lock = HDR_LOCK(ab); 1498 have_lock = MUTEX_HELD(hash_lock); 1499 if (have_lock || mutex_tryenter(hash_lock)) { 1500 ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0); 1501 ASSERT(ab->b_datacnt > 0); 1502 while (ab->b_buf) { 1503 arc_buf_t *buf = ab->b_buf; 1504 if (!rw_tryenter(&buf->b_lock, RW_WRITER)) { 1505 missed += 1; 1506 break; 1507 } 1508 if (buf->b_data) { 1509 bytes_evicted += ab->b_size; 1510 if (recycle && ab->b_type == type && 1511 ab->b_size == bytes && 1512 !HDR_L2_WRITING(ab)) { 1513 stolen = buf->b_data; 1514 recycle = FALSE; 1515 } 1516 } 1517 if (buf->b_efunc) { 1518 mutex_enter(&arc_eviction_mtx); 1519 arc_buf_destroy(buf, 1520 buf->b_data == stolen, FALSE); 1521 ab->b_buf = buf->b_next; 1522 buf->b_hdr = &arc_eviction_hdr; 1523 buf->b_next = arc_eviction_list; 1524 arc_eviction_list = buf; 1525 mutex_exit(&arc_eviction_mtx); 1526 rw_exit(&buf->b_lock); 1527 } else { 1528 rw_exit(&buf->b_lock); 1529 arc_buf_destroy(buf, 1530 buf->b_data == stolen, TRUE); 1531 } 1532 } 1533 if (ab->b_datacnt == 0) { 1534 arc_change_state(evicted_state, ab, hash_lock); 1535 ASSERT(HDR_IN_HASH_TABLE(ab)); 1536 ab->b_flags |= ARC_IN_HASH_TABLE; 1537 ab->b_flags &= ~ARC_BUF_AVAILABLE; 1538 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 1539 } 1540 if (!have_lock) 1541 mutex_exit(hash_lock); 1542 if (bytes >= 0 && bytes_evicted >= bytes) 1543 break; 1544 } else { 1545 missed += 1; 1546 } 1547 } 1548 1549 mutex_exit(&evicted_state->arcs_mtx); 1550 mutex_exit(&state->arcs_mtx); 1551 1552 if (bytes_evicted < bytes) 1553 dprintf("only evicted %lld bytes from %x", 1554 (longlong_t)bytes_evicted, state); 1555 1556 if (skipped) 1557 ARCSTAT_INCR(arcstat_evict_skip, skipped); 1558 1559 if (missed) 1560 ARCSTAT_INCR(arcstat_mutex_miss, missed); 1561 1562 /* 1563 * We have just evicted some date into the ghost state, make 1564 * sure we also adjust the ghost state size if necessary. 1565 */ 1566 if (arc_no_grow && 1567 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) { 1568 int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size + 1569 arc_mru_ghost->arcs_size - arc_c; 1570 1571 if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) { 1572 int64_t todelete = 1573 MIN(arc_mru_ghost->arcs_lsize[type], mru_over); 1574 arc_evict_ghost(arc_mru_ghost, NULL, todelete); 1575 } else if (arc_mfu_ghost->arcs_lsize[type] > 0) { 1576 int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type], 1577 arc_mru_ghost->arcs_size + 1578 arc_mfu_ghost->arcs_size - arc_c); 1579 arc_evict_ghost(arc_mfu_ghost, NULL, todelete); 1580 } 1581 } 1582 1583 return (stolen); 1584} 1585 1586/* 1587 * Remove buffers from list until we've removed the specified number of 1588 * bytes. Destroy the buffers that are removed. 1589 */ 1590static void 1591arc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes) 1592{ 1593 arc_buf_hdr_t *ab, *ab_prev; 1594 list_t *list = &state->arcs_list[ARC_BUFC_DATA]; 1595 kmutex_t *hash_lock; 1596 uint64_t bytes_deleted = 0; 1597 uint64_t bufs_skipped = 0; 1598 1599 ASSERT(GHOST_STATE(state)); 1600top: 1601 mutex_enter(&state->arcs_mtx); 1602 for (ab = list_tail(list); ab; ab = ab_prev) { 1603 ab_prev = list_prev(list, ab); 1604 if (spa && ab->b_spa != spa) 1605 continue; 1606 hash_lock = HDR_LOCK(ab); 1607 if (mutex_tryenter(hash_lock)) { 1608 ASSERT(!HDR_IO_IN_PROGRESS(ab)); 1609 ASSERT(ab->b_buf == NULL); 1610 ARCSTAT_BUMP(arcstat_deleted); 1611 bytes_deleted += ab->b_size; 1612 1613 if (ab->b_l2hdr != NULL) { 1614 /* 1615 * This buffer is cached on the 2nd Level ARC; 1616 * don't destroy the header. 1617 */ 1618 arc_change_state(arc_l2c_only, ab, hash_lock); 1619 mutex_exit(hash_lock); 1620 } else { 1621 arc_change_state(arc_anon, ab, hash_lock); 1622 mutex_exit(hash_lock); 1623 arc_hdr_destroy(ab); 1624 } 1625 1626 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 1627 if (bytes >= 0 && bytes_deleted >= bytes) 1628 break; 1629 } else { 1630 if (bytes < 0) { 1631 mutex_exit(&state->arcs_mtx); 1632 mutex_enter(hash_lock); 1633 mutex_exit(hash_lock); 1634 goto top; 1635 } 1636 bufs_skipped += 1; 1637 } 1638 } 1639 mutex_exit(&state->arcs_mtx); 1640 1641 if (list == &state->arcs_list[ARC_BUFC_DATA] && 1642 (bytes < 0 || bytes_deleted < bytes)) { 1643 list = &state->arcs_list[ARC_BUFC_METADATA]; 1644 goto top; 1645 } 1646 1647 if (bufs_skipped) { 1648 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); 1649 ASSERT(bytes >= 0); 1650 } 1651 1652 if (bytes_deleted < bytes) 1653 dprintf("only deleted %lld bytes from %p", 1654 (longlong_t)bytes_deleted, state); 1655} 1656 1657static void 1658arc_adjust(void) 1659{ 1660 int64_t top_sz, mru_over, arc_over, todelete; 1661 1662 top_sz = arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used; 1663 1664 if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) { 1665 int64_t toevict = 1666 MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], top_sz - arc_p); 1667 (void) arc_evict(arc_mru, NULL, toevict, FALSE, ARC_BUFC_DATA); 1668 top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1669 } 1670 1671 if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) { 1672 int64_t toevict = 1673 MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], top_sz - arc_p); 1674 (void) arc_evict(arc_mru, NULL, toevict, FALSE, 1675 ARC_BUFC_METADATA); 1676 top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1677 } 1678 1679 mru_over = top_sz + arc_mru_ghost->arcs_size - arc_c; 1680 1681 if (mru_over > 0) { 1682 if (arc_mru_ghost->arcs_size > 0) { 1683 todelete = MIN(arc_mru_ghost->arcs_size, mru_over); 1684 arc_evict_ghost(arc_mru_ghost, NULL, todelete); 1685 } 1686 } 1687 1688 if ((arc_over = arc_size - arc_c) > 0) { 1689 int64_t tbl_over; 1690 1691 if (arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) { 1692 int64_t toevict = 1693 MIN(arc_mfu->arcs_lsize[ARC_BUFC_DATA], arc_over); 1694 (void) arc_evict(arc_mfu, NULL, toevict, FALSE, 1695 ARC_BUFC_DATA); 1696 arc_over = arc_size - arc_c; 1697 } 1698 1699 if (arc_over > 0 && 1700 arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) { 1701 int64_t toevict = 1702 MIN(arc_mfu->arcs_lsize[ARC_BUFC_METADATA], 1703 arc_over); 1704 (void) arc_evict(arc_mfu, NULL, toevict, FALSE, 1705 ARC_BUFC_METADATA); 1706 } 1707 1708 tbl_over = arc_size + arc_mru_ghost->arcs_size + 1709 arc_mfu_ghost->arcs_size - arc_c * 2; 1710 1711 if (tbl_over > 0 && arc_mfu_ghost->arcs_size > 0) { 1712 todelete = MIN(arc_mfu_ghost->arcs_size, tbl_over); 1713 arc_evict_ghost(arc_mfu_ghost, NULL, todelete); 1714 } 1715 } 1716} 1717 1718static void 1719arc_do_user_evicts(void) 1720{ 1721 static arc_buf_t *tmp_arc_eviction_list; 1722 1723 /* 1724 * Move list over to avoid LOR 1725 */ 1726restart: 1727 mutex_enter(&arc_eviction_mtx); 1728 tmp_arc_eviction_list = arc_eviction_list; 1729 arc_eviction_list = NULL; 1730 mutex_exit(&arc_eviction_mtx); 1731 1732 while (tmp_arc_eviction_list != NULL) { 1733 arc_buf_t *buf = tmp_arc_eviction_list; 1734 tmp_arc_eviction_list = buf->b_next; 1735 rw_enter(&buf->b_lock, RW_WRITER); 1736 buf->b_hdr = NULL; 1737 rw_exit(&buf->b_lock); 1738 1739 if (buf->b_efunc != NULL) 1740 VERIFY(buf->b_efunc(buf) == 0); 1741 1742 buf->b_efunc = NULL; 1743 buf->b_private = NULL; 1744 kmem_cache_free(buf_cache, buf); 1745 } 1746 1747 if (arc_eviction_list != NULL) 1748 goto restart; 1749} 1750 1751/* 1752 * Flush all *evictable* data from the cache for the given spa. 1753 * NOTE: this will not touch "active" (i.e. referenced) data. 1754 */ 1755void 1756arc_flush(spa_t *spa) 1757{ 1758 while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) { 1759 (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_DATA); 1760 if (spa) 1761 break; 1762 } 1763 while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) { 1764 (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_METADATA); 1765 if (spa) 1766 break; 1767 } 1768 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) { 1769 (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_DATA); 1770 if (spa) 1771 break; 1772 } 1773 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) { 1774 (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_METADATA); 1775 if (spa) 1776 break; 1777 } 1778 1779 arc_evict_ghost(arc_mru_ghost, spa, -1); 1780 arc_evict_ghost(arc_mfu_ghost, spa, -1); 1781 1782 mutex_enter(&arc_reclaim_thr_lock); 1783 arc_do_user_evicts(); 1784 mutex_exit(&arc_reclaim_thr_lock); 1785 ASSERT(spa || arc_eviction_list == NULL); 1786} 1787 1788int arc_shrink_shift = 5; /* log2(fraction of arc to reclaim) */ 1789 1790void 1791arc_shrink(void) 1792{ 1793 if (arc_c > arc_c_min) { 1794 uint64_t to_free; 1795 1796#ifdef _KERNEL 1797 to_free = arc_c >> arc_shrink_shift; 1798#else 1799 to_free = arc_c >> arc_shrink_shift; 1800#endif 1801 if (arc_c > arc_c_min + to_free) 1802 atomic_add_64(&arc_c, -to_free); 1803 else 1804 arc_c = arc_c_min; 1805 1806 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 1807 if (arc_c > arc_size) 1808 arc_c = MAX(arc_size, arc_c_min); 1809 if (arc_p > arc_c) 1810 arc_p = (arc_c >> 1); 1811 ASSERT(arc_c >= arc_c_min); 1812 ASSERT((int64_t)arc_p >= 0); 1813 } 1814 1815 if (arc_size > arc_c) 1816 arc_adjust(); 1817} 1818 1819static int needfree = 0; 1820 1821static int 1822arc_reclaim_needed(void) 1823{ 1824#if 0 1825 uint64_t extra; 1826#endif 1827 1828#ifdef _KERNEL 1829 if (needfree) 1830 return (1); 1831 if (arc_size > arc_c_max) 1832 return (1); 1833 if (arc_size <= arc_c_min) 1834 return (0); 1835 1836 /* 1837 * If pages are needed or we're within 2048 pages 1838 * of needing to page need to reclaim 1839 */ 1840 if (vm_pages_needed || (vm_paging_target() > -2048)) 1841 return (1); 1842 1843#if 0 1844 /* 1845 * take 'desfree' extra pages, so we reclaim sooner, rather than later 1846 */ 1847 extra = desfree; 1848 1849 /* 1850 * check that we're out of range of the pageout scanner. It starts to 1851 * schedule paging if freemem is less than lotsfree and needfree. 1852 * lotsfree is the high-water mark for pageout, and needfree is the 1853 * number of needed free pages. We add extra pages here to make sure 1854 * the scanner doesn't start up while we're freeing memory. 1855 */ 1856 if (freemem < lotsfree + needfree + extra) 1857 return (1); 1858 1859 /* 1860 * check to make sure that swapfs has enough space so that anon 1861 * reservations can still succeed. anon_resvmem() checks that the 1862 * availrmem is greater than swapfs_minfree, and the number of reserved 1863 * swap pages. We also add a bit of extra here just to prevent 1864 * circumstances from getting really dire. 1865 */ 1866 if (availrmem < swapfs_minfree + swapfs_reserve + extra) 1867 return (1); 1868 1869#if defined(__i386) 1870 /* 1871 * If we're on an i386 platform, it's possible that we'll exhaust the 1872 * kernel heap space before we ever run out of available physical 1873 * memory. Most checks of the size of the heap_area compare against 1874 * tune.t_minarmem, which is the minimum available real memory that we 1875 * can have in the system. However, this is generally fixed at 25 pages 1876 * which is so low that it's useless. In this comparison, we seek to 1877 * calculate the total heap-size, and reclaim if more than 3/4ths of the 1878 * heap is allocated. (Or, in the calculation, if less than 1/4th is 1879 * free) 1880 */ 1881 if (btop(vmem_size(heap_arena, VMEM_FREE)) < 1882 (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) 1883 return (1); 1884#endif 1885#else 1886 if (kmem_used() > (kmem_size() * 3) / 4) 1887 return (1); 1888#endif 1889 1890#else 1891 if (spa_get_random(100) == 0) 1892 return (1); 1893#endif 1894 return (0); 1895} 1896 1897static void 1898arc_kmem_reap_now(arc_reclaim_strategy_t strat) 1899{ 1900#ifdef ZIO_USE_UMA 1901 size_t i; 1902 kmem_cache_t *prev_cache = NULL; 1903 kmem_cache_t *prev_data_cache = NULL; 1904#endif 1905 1906#ifdef _KERNEL 1907 if (arc_meta_used >= arc_meta_limit) { 1908 /* 1909 * We are exceeding our meta-data cache limit. 1910 * Purge some DNLC entries to release holds on meta-data. 1911 */ 1912 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 1913 } 1914#if defined(__i386) 1915 /* 1916 * Reclaim unused memory from all kmem caches. 1917 */ 1918 kmem_reap(); 1919#endif 1920#endif 1921 1922 /* 1923 * An aggressive reclamation will shrink the cache size as well as 1924 * reap free buffers from the arc kmem caches. 1925 */ 1926 if (strat == ARC_RECLAIM_AGGR) 1927 arc_shrink(); 1928 1929#ifdef ZIO_USE_UMA 1930 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 1931 if (zio_buf_cache[i] != prev_cache) { 1932 prev_cache = zio_buf_cache[i]; 1933 kmem_cache_reap_now(zio_buf_cache[i]); 1934 } 1935 if (zio_data_buf_cache[i] != prev_data_cache) { 1936 prev_data_cache = zio_data_buf_cache[i]; 1937 kmem_cache_reap_now(zio_data_buf_cache[i]); 1938 } 1939 } 1940#endif 1941 kmem_cache_reap_now(buf_cache); 1942 kmem_cache_reap_now(hdr_cache); 1943} 1944 1945static void 1946arc_reclaim_thread(void *dummy __unused) 1947{ 1948 clock_t growtime = 0; 1949 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 1950 callb_cpr_t cpr; 1951 1952 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 1953 1954 mutex_enter(&arc_reclaim_thr_lock); 1955 while (arc_thread_exit == 0) { 1956 if (arc_reclaim_needed()) { 1957 1958 if (arc_no_grow) { 1959 if (last_reclaim == ARC_RECLAIM_CONS) { 1960 last_reclaim = ARC_RECLAIM_AGGR; 1961 } else { 1962 last_reclaim = ARC_RECLAIM_CONS; 1963 } 1964 } else { 1965 arc_no_grow = TRUE; 1966 last_reclaim = ARC_RECLAIM_AGGR; 1967 membar_producer(); 1968 } 1969 1970 /* reset the growth delay for every reclaim */ 1971 growtime = LBOLT + (arc_grow_retry * hz); 1972 1973 if (needfree && last_reclaim == ARC_RECLAIM_CONS) { 1974 /* 1975 * If needfree is TRUE our vm_lowmem hook 1976 * was called and in that case we must free some 1977 * memory, so switch to aggressive mode. 1978 */ 1979 arc_no_grow = TRUE; 1980 last_reclaim = ARC_RECLAIM_AGGR; 1981 } 1982 arc_kmem_reap_now(last_reclaim); 1983 arc_warm = B_TRUE; 1984 1985 } else if (arc_no_grow && LBOLT >= growtime) { 1986 arc_no_grow = FALSE; 1987 } 1988 1989 if (needfree || 1990 (2 * arc_c < arc_size + 1991 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size)) 1992 arc_adjust(); 1993 1994 if (arc_eviction_list != NULL) 1995 arc_do_user_evicts(); 1996 1997 if (arc_reclaim_needed()) { 1998 needfree = 0; 1999#ifdef _KERNEL 2000 wakeup(&needfree); 2001#endif 2002 } 2003 2004 /* block until needed, or one second, whichever is shorter */ 2005 CALLB_CPR_SAFE_BEGIN(&cpr); 2006 (void) cv_timedwait(&arc_reclaim_thr_cv, 2007 &arc_reclaim_thr_lock, hz); 2008 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 2009 } 2010 2011 arc_thread_exit = 0; 2012 cv_broadcast(&arc_reclaim_thr_cv); 2013 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 2014 thread_exit(); 2015} 2016 2017/* 2018 * Adapt arc info given the number of bytes we are trying to add and 2019 * the state that we are comming from. This function is only called 2020 * when we are adding new content to the cache. 2021 */ 2022static void 2023arc_adapt(int bytes, arc_state_t *state) 2024{ 2025 int mult; 2026 2027 if (state == arc_l2c_only) 2028 return; 2029 2030 ASSERT(bytes > 0); 2031 /* 2032 * Adapt the target size of the MRU list: 2033 * - if we just hit in the MRU ghost list, then increase 2034 * the target size of the MRU list. 2035 * - if we just hit in the MFU ghost list, then increase 2036 * the target size of the MFU list by decreasing the 2037 * target size of the MRU list. 2038 */ 2039 if (state == arc_mru_ghost) { 2040 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? 2041 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); 2042 2043 arc_p = MIN(arc_c, arc_p + bytes * mult); 2044 } else if (state == arc_mfu_ghost) { 2045 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? 2046 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); 2047 2048 arc_p = MAX(0, (int64_t)arc_p - bytes * mult); 2049 } 2050 ASSERT((int64_t)arc_p >= 0); 2051 2052 if (arc_reclaim_needed()) { 2053 cv_signal(&arc_reclaim_thr_cv); 2054 return; 2055 } 2056 2057 if (arc_no_grow) 2058 return; 2059 2060 if (arc_c >= arc_c_max) 2061 return; 2062 2063 /* 2064 * If we're within (2 * maxblocksize) bytes of the target 2065 * cache size, increment the target cache size 2066 */ 2067 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 2068 atomic_add_64(&arc_c, (int64_t)bytes); 2069 if (arc_c > arc_c_max) 2070 arc_c = arc_c_max; 2071 else if (state == arc_anon) 2072 atomic_add_64(&arc_p, (int64_t)bytes); 2073 if (arc_p > arc_c) 2074 arc_p = arc_c; 2075 } 2076 ASSERT((int64_t)arc_p >= 0); 2077} 2078 2079/* 2080 * Check if the cache has reached its limits and eviction is required 2081 * prior to insert. 2082 */ 2083static int 2084arc_evict_needed(arc_buf_contents_t type) 2085{ 2086 if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit) 2087 return (1); 2088 2089#if 0 2090#ifdef _KERNEL 2091 /* 2092 * If zio data pages are being allocated out of a separate heap segment, 2093 * then enforce that the size of available vmem for this area remains 2094 * above about 1/32nd free. 2095 */ 2096 if (type == ARC_BUFC_DATA && zio_arena != NULL && 2097 vmem_size(zio_arena, VMEM_FREE) < 2098 (vmem_size(zio_arena, VMEM_ALLOC) >> 5)) 2099 return (1); 2100#endif 2101#endif 2102 2103 if (arc_reclaim_needed()) 2104 return (1); 2105 2106 return (arc_size > arc_c); 2107} 2108 2109/* 2110 * The buffer, supplied as the first argument, needs a data block. 2111 * So, if we are at cache max, determine which cache should be victimized. 2112 * We have the following cases: 2113 * 2114 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> 2115 * In this situation if we're out of space, but the resident size of the MFU is 2116 * under the limit, victimize the MFU cache to satisfy this insertion request. 2117 * 2118 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> 2119 * Here, we've used up all of the available space for the MRU, so we need to 2120 * evict from our own cache instead. Evict from the set of resident MRU 2121 * entries. 2122 * 2123 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> 2124 * c minus p represents the MFU space in the cache, since p is the size of the 2125 * cache that is dedicated to the MRU. In this situation there's still space on 2126 * the MFU side, so the MRU side needs to be victimized. 2127 * 2128 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> 2129 * MFU's resident set is consuming more space than it has been allotted. In 2130 * this situation, we must victimize our own cache, the MFU, for this insertion. 2131 */ 2132static void 2133arc_get_data_buf(arc_buf_t *buf) 2134{ 2135 arc_state_t *state = buf->b_hdr->b_state; 2136 uint64_t size = buf->b_hdr->b_size; 2137 arc_buf_contents_t type = buf->b_hdr->b_type; 2138 2139 arc_adapt(size, state); 2140 2141 /* 2142 * We have not yet reached cache maximum size, 2143 * just allocate a new buffer. 2144 */ 2145 if (!arc_evict_needed(type)) { 2146 if (type == ARC_BUFC_METADATA) { 2147 buf->b_data = zio_buf_alloc(size); 2148 arc_space_consume(size); 2149 } else { 2150 ASSERT(type == ARC_BUFC_DATA); 2151 buf->b_data = zio_data_buf_alloc(size); 2152 atomic_add_64(&arc_size, size); 2153 } 2154 goto out; 2155 } 2156 2157 /* 2158 * If we are prefetching from the mfu ghost list, this buffer 2159 * will end up on the mru list; so steal space from there. 2160 */ 2161 if (state == arc_mfu_ghost) 2162 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; 2163 else if (state == arc_mru_ghost) 2164 state = arc_mru; 2165 2166 if (state == arc_mru || state == arc_anon) { 2167 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; 2168 state = (arc_mfu->arcs_lsize[type] > 0 && 2169 arc_p > mru_used) ? arc_mfu : arc_mru; 2170 } else { 2171 /* MFU cases */ 2172 uint64_t mfu_space = arc_c - arc_p; 2173 state = (arc_mru->arcs_lsize[type] > 0 && 2174 mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; 2175 } 2176 if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) { 2177 if (type == ARC_BUFC_METADATA) { 2178 buf->b_data = zio_buf_alloc(size); 2179 arc_space_consume(size); 2180 } else { 2181 ASSERT(type == ARC_BUFC_DATA); 2182 buf->b_data = zio_data_buf_alloc(size); 2183 atomic_add_64(&arc_size, size); 2184 } 2185 ARCSTAT_BUMP(arcstat_recycle_miss); 2186 } 2187 ASSERT(buf->b_data != NULL); 2188out: 2189 /* 2190 * Update the state size. Note that ghost states have a 2191 * "ghost size" and so don't need to be updated. 2192 */ 2193 if (!GHOST_STATE(buf->b_hdr->b_state)) { 2194 arc_buf_hdr_t *hdr = buf->b_hdr; 2195 2196 atomic_add_64(&hdr->b_state->arcs_size, size); 2197 if (list_link_active(&hdr->b_arc_node)) { 2198 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2199 atomic_add_64(&hdr->b_state->arcs_lsize[type], size); 2200 } 2201 /* 2202 * If we are growing the cache, and we are adding anonymous 2203 * data, and we have outgrown arc_p, update arc_p 2204 */ 2205 if (arc_size < arc_c && hdr->b_state == arc_anon && 2206 arc_anon->arcs_size + arc_mru->arcs_size > arc_p) 2207 arc_p = MIN(arc_c, arc_p + size); 2208 } 2209} 2210 2211/* 2212 * This routine is called whenever a buffer is accessed. 2213 * NOTE: the hash lock is dropped in this function. 2214 */ 2215static void 2216arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 2217{ 2218 ASSERT(MUTEX_HELD(hash_lock)); 2219 2220 if (buf->b_state == arc_anon) { 2221 /* 2222 * This buffer is not in the cache, and does not 2223 * appear in our "ghost" list. Add the new buffer 2224 * to the MRU state. 2225 */ 2226 2227 ASSERT(buf->b_arc_access == 0); 2228 buf->b_arc_access = LBOLT; 2229 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2230 arc_change_state(arc_mru, buf, hash_lock); 2231 2232 } else if (buf->b_state == arc_mru) { 2233 /* 2234 * If this buffer is here because of a prefetch, then either: 2235 * - clear the flag if this is a "referencing" read 2236 * (any subsequent access will bump this into the MFU state). 2237 * or 2238 * - move the buffer to the head of the list if this is 2239 * another prefetch (to make it less likely to be evicted). 2240 */ 2241 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2242 if (refcount_count(&buf->b_refcnt) == 0) { 2243 ASSERT(list_link_active(&buf->b_arc_node)); 2244 } else { 2245 buf->b_flags &= ~ARC_PREFETCH; 2246 ARCSTAT_BUMP(arcstat_mru_hits); 2247 } 2248 buf->b_arc_access = LBOLT; 2249 return; 2250 } 2251 2252 /* 2253 * This buffer has been "accessed" only once so far, 2254 * but it is still in the cache. Move it to the MFU 2255 * state. 2256 */ 2257 if (LBOLT > buf->b_arc_access + ARC_MINTIME) { 2258 /* 2259 * More than 125ms have passed since we 2260 * instantiated this buffer. Move it to the 2261 * most frequently used state. 2262 */ 2263 buf->b_arc_access = LBOLT; 2264 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2265 arc_change_state(arc_mfu, buf, hash_lock); 2266 } 2267 ARCSTAT_BUMP(arcstat_mru_hits); 2268 } else if (buf->b_state == arc_mru_ghost) { 2269 arc_state_t *new_state; 2270 /* 2271 * This buffer has been "accessed" recently, but 2272 * was evicted from the cache. Move it to the 2273 * MFU state. 2274 */ 2275 2276 if (buf->b_flags & ARC_PREFETCH) { 2277 new_state = arc_mru; 2278 if (refcount_count(&buf->b_refcnt) > 0) 2279 buf->b_flags &= ~ARC_PREFETCH; 2280 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2281 } else { 2282 new_state = arc_mfu; 2283 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2284 } 2285 2286 buf->b_arc_access = LBOLT; 2287 arc_change_state(new_state, buf, hash_lock); 2288 2289 ARCSTAT_BUMP(arcstat_mru_ghost_hits); 2290 } else if (buf->b_state == arc_mfu) { 2291 /* 2292 * This buffer has been accessed more than once and is 2293 * still in the cache. Keep it in the MFU state. 2294 * 2295 * NOTE: an add_reference() that occurred when we did 2296 * the arc_read() will have kicked this off the list. 2297 * If it was a prefetch, we will explicitly move it to 2298 * the head of the list now. 2299 */ 2300 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2301 ASSERT(refcount_count(&buf->b_refcnt) == 0); 2302 ASSERT(list_link_active(&buf->b_arc_node)); 2303 } 2304 ARCSTAT_BUMP(arcstat_mfu_hits); 2305 buf->b_arc_access = LBOLT; 2306 } else if (buf->b_state == arc_mfu_ghost) { 2307 arc_state_t *new_state = arc_mfu; 2308 /* 2309 * This buffer has been accessed more than once but has 2310 * been evicted from the cache. Move it back to the 2311 * MFU state. 2312 */ 2313 2314 if (buf->b_flags & ARC_PREFETCH) { 2315 /* 2316 * This is a prefetch access... 2317 * move this block back to the MRU state. 2318 */ 2319 ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0); 2320 new_state = arc_mru; 2321 } 2322 2323 buf->b_arc_access = LBOLT; 2324 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2325 arc_change_state(new_state, buf, hash_lock); 2326 2327 ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 2328 } else if (buf->b_state == arc_l2c_only) { 2329 /* 2330 * This buffer is on the 2nd Level ARC. 2331 */ 2332 2333 buf->b_arc_access = LBOLT; 2334 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2335 arc_change_state(arc_mfu, buf, hash_lock); 2336 } else { 2337 ASSERT(!"invalid arc state"); 2338 } 2339} 2340 2341/* a generic arc_done_func_t which you can use */ 2342/* ARGSUSED */ 2343void 2344arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 2345{ 2346 bcopy(buf->b_data, arg, buf->b_hdr->b_size); 2347 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2348} 2349 2350/* a generic arc_done_func_t */ 2351void 2352arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 2353{ 2354 arc_buf_t **bufp = arg; 2355 if (zio && zio->io_error) { 2356 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2357 *bufp = NULL; 2358 } else { 2359 *bufp = buf; 2360 } 2361} 2362 2363static void 2364arc_read_done(zio_t *zio) 2365{ 2366 arc_buf_hdr_t *hdr, *found; 2367 arc_buf_t *buf; 2368 arc_buf_t *abuf; /* buffer we're assigning to callback */ 2369 kmutex_t *hash_lock; 2370 arc_callback_t *callback_list, *acb; 2371 int freeable = FALSE; 2372 2373 buf = zio->io_private; 2374 hdr = buf->b_hdr; 2375 2376 /* 2377 * The hdr was inserted into hash-table and removed from lists 2378 * prior to starting I/O. We should find this header, since 2379 * it's in the hash table, and it should be legit since it's 2380 * not possible to evict it during the I/O. The only possible 2381 * reason for it not to be found is if we were freed during the 2382 * read. 2383 */ 2384 found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth, 2385 &hash_lock); 2386 2387 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || 2388 (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || 2389 (found == hdr && HDR_L2_READING(hdr))); 2390 2391 hdr->b_flags &= ~ARC_L2_EVICTED; 2392 if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH)) 2393 hdr->b_flags &= ~ARC_L2CACHE; 2394 2395 /* byteswap if necessary */ 2396 callback_list = hdr->b_acb; 2397 ASSERT(callback_list != NULL); 2398 if (BP_SHOULD_BYTESWAP(zio->io_bp)) { 2399 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ? 2400 byteswap_uint64_array : 2401 dmu_ot[BP_GET_TYPE(zio->io_bp)].ot_byteswap; 2402 func(buf->b_data, hdr->b_size); 2403 } 2404 2405 arc_cksum_compute(buf, B_FALSE); 2406 2407 /* create copies of the data buffer for the callers */ 2408 abuf = buf; 2409 for (acb = callback_list; acb; acb = acb->acb_next) { 2410 if (acb->acb_done) { 2411 if (abuf == NULL) 2412 abuf = arc_buf_clone(buf); 2413 acb->acb_buf = abuf; 2414 abuf = NULL; 2415 } 2416 } 2417 hdr->b_acb = NULL; 2418 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2419 ASSERT(!HDR_BUF_AVAILABLE(hdr)); 2420 if (abuf == buf) 2421 hdr->b_flags |= ARC_BUF_AVAILABLE; 2422 2423 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 2424 2425 if (zio->io_error != 0) { 2426 hdr->b_flags |= ARC_IO_ERROR; 2427 if (hdr->b_state != arc_anon) 2428 arc_change_state(arc_anon, hdr, hash_lock); 2429 if (HDR_IN_HASH_TABLE(hdr)) 2430 buf_hash_remove(hdr); 2431 freeable = refcount_is_zero(&hdr->b_refcnt); 2432 } 2433 2434 /* 2435 * Broadcast before we drop the hash_lock to avoid the possibility 2436 * that the hdr (and hence the cv) might be freed before we get to 2437 * the cv_broadcast(). 2438 */ 2439 cv_broadcast(&hdr->b_cv); 2440 2441 if (hash_lock) { 2442 /* 2443 * Only call arc_access on anonymous buffers. This is because 2444 * if we've issued an I/O for an evicted buffer, we've already 2445 * called arc_access (to prevent any simultaneous readers from 2446 * getting confused). 2447 */ 2448 if (zio->io_error == 0 && hdr->b_state == arc_anon) 2449 arc_access(hdr, hash_lock); 2450 mutex_exit(hash_lock); 2451 } else { 2452 /* 2453 * This block was freed while we waited for the read to 2454 * complete. It has been removed from the hash table and 2455 * moved to the anonymous state (so that it won't show up 2456 * in the cache). 2457 */ 2458 ASSERT3P(hdr->b_state, ==, arc_anon); 2459 freeable = refcount_is_zero(&hdr->b_refcnt); 2460 } 2461 2462 /* execute each callback and free its structure */ 2463 while ((acb = callback_list) != NULL) { 2464 if (acb->acb_done) 2465 acb->acb_done(zio, acb->acb_buf, acb->acb_private); 2466 2467 if (acb->acb_zio_dummy != NULL) { 2468 acb->acb_zio_dummy->io_error = zio->io_error; 2469 zio_nowait(acb->acb_zio_dummy); 2470 } 2471 2472 callback_list = acb->acb_next; 2473 kmem_free(acb, sizeof (arc_callback_t)); 2474 } 2475 2476 if (freeable) 2477 arc_hdr_destroy(hdr); 2478} 2479 2480/* 2481 * "Read" the block block at the specified DVA (in bp) via the 2482 * cache. If the block is found in the cache, invoke the provided 2483 * callback immediately and return. Note that the `zio' parameter 2484 * in the callback will be NULL in this case, since no IO was 2485 * required. If the block is not in the cache pass the read request 2486 * on to the spa with a substitute callback function, so that the 2487 * requested block will be added to the cache. 2488 * 2489 * If a read request arrives for a block that has a read in-progress, 2490 * either wait for the in-progress read to complete (and return the 2491 * results); or, if this is a read with a "done" func, add a record 2492 * to the read to invoke the "done" func when the read completes, 2493 * and return; or just return. 2494 * 2495 * arc_read_done() will invoke all the requested "done" functions 2496 * for readers of this block. 2497 * 2498 * Normal callers should use arc_read and pass the arc buffer and offset 2499 * for the bp. But if you know you don't need locking, you can use 2500 * arc_read_bp. 2501 */ 2502int 2503arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_buf_t *pbuf, 2504 arc_done_func_t *done, void *private, int priority, int zio_flags, 2505 uint32_t *arc_flags, const zbookmark_t *zb) 2506{ 2507 int err; 2508 2509 ASSERT(!refcount_is_zero(&pbuf->b_hdr->b_refcnt)); 2510 ASSERT3U((char *)bp - (char *)pbuf->b_data, <, pbuf->b_hdr->b_size); 2511 rw_enter(&pbuf->b_lock, RW_READER); 2512 2513 err = arc_read_nolock(pio, spa, bp, done, private, priority, 2514 zio_flags, arc_flags, zb); 2515 2516 rw_exit(&pbuf->b_lock); 2517 2518 return (err); 2519} 2520 2521int 2522arc_read_nolock(zio_t *pio, spa_t *spa, blkptr_t *bp, 2523 arc_done_func_t *done, void *private, int priority, int zio_flags, 2524 uint32_t *arc_flags, const zbookmark_t *zb) 2525{ 2526 arc_buf_hdr_t *hdr; 2527 arc_buf_t *buf; 2528 kmutex_t *hash_lock; 2529 zio_t *rzio; 2530 2531top: 2532 hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 2533 if (hdr && hdr->b_datacnt > 0) { 2534 2535 *arc_flags |= ARC_CACHED; 2536 2537 if (HDR_IO_IN_PROGRESS(hdr)) { 2538 2539 if (*arc_flags & ARC_WAIT) { 2540 cv_wait(&hdr->b_cv, hash_lock); 2541 mutex_exit(hash_lock); 2542 goto top; 2543 } 2544 ASSERT(*arc_flags & ARC_NOWAIT); 2545 2546 if (done) { 2547 arc_callback_t *acb = NULL; 2548 2549 acb = kmem_zalloc(sizeof (arc_callback_t), 2550 KM_SLEEP); 2551 acb->acb_done = done; 2552 acb->acb_private = private; 2553 if (pio != NULL) 2554 acb->acb_zio_dummy = zio_null(pio, 2555 spa, NULL, NULL, zio_flags); 2556 2557 ASSERT(acb->acb_done != NULL); 2558 acb->acb_next = hdr->b_acb; 2559 hdr->b_acb = acb; 2560 add_reference(hdr, hash_lock, private); 2561 mutex_exit(hash_lock); 2562 return (0); 2563 } 2564 mutex_exit(hash_lock); 2565 return (0); 2566 } 2567 2568 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2569 2570 if (done) { 2571 add_reference(hdr, hash_lock, private); 2572 /* 2573 * If this block is already in use, create a new 2574 * copy of the data so that we will be guaranteed 2575 * that arc_release() will always succeed. 2576 */ 2577 buf = hdr->b_buf; 2578 ASSERT(buf); 2579 ASSERT(buf->b_data); 2580 if (HDR_BUF_AVAILABLE(hdr)) { 2581 ASSERT(buf->b_efunc == NULL); 2582 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 2583 } else { 2584 buf = arc_buf_clone(buf); 2585 } 2586 } else if (*arc_flags & ARC_PREFETCH && 2587 refcount_count(&hdr->b_refcnt) == 0) { 2588 hdr->b_flags |= ARC_PREFETCH; 2589 } 2590 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 2591 arc_access(hdr, hash_lock); 2592 if (*arc_flags & ARC_L2CACHE) 2593 hdr->b_flags |= ARC_L2CACHE; 2594 mutex_exit(hash_lock); 2595 ARCSTAT_BUMP(arcstat_hits); 2596 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2597 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2598 data, metadata, hits); 2599 2600 if (done) 2601 done(NULL, buf, private); 2602 } else { 2603 uint64_t size = BP_GET_LSIZE(bp); 2604 arc_callback_t *acb; 2605 vdev_t *vd = NULL; 2606 daddr_t addr; 2607 2608 if (hdr == NULL) { 2609 /* this block is not in the cache */ 2610 arc_buf_hdr_t *exists; 2611 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 2612 buf = arc_buf_alloc(spa, size, private, type); 2613 hdr = buf->b_hdr; 2614 hdr->b_dva = *BP_IDENTITY(bp); 2615 hdr->b_birth = bp->blk_birth; 2616 hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 2617 exists = buf_hash_insert(hdr, &hash_lock); 2618 if (exists) { 2619 /* somebody beat us to the hash insert */ 2620 mutex_exit(hash_lock); 2621 bzero(&hdr->b_dva, sizeof (dva_t)); 2622 hdr->b_birth = 0; 2623 hdr->b_cksum0 = 0; 2624 (void) arc_buf_remove_ref(buf, private); 2625 goto top; /* restart the IO request */ 2626 } 2627 /* if this is a prefetch, we don't have a reference */ 2628 if (*arc_flags & ARC_PREFETCH) { 2629 (void) remove_reference(hdr, hash_lock, 2630 private); 2631 hdr->b_flags |= ARC_PREFETCH; 2632 } 2633 if (*arc_flags & ARC_L2CACHE) 2634 hdr->b_flags |= ARC_L2CACHE; 2635 if (BP_GET_LEVEL(bp) > 0) 2636 hdr->b_flags |= ARC_INDIRECT; 2637 } else { 2638 /* this block is in the ghost cache */ 2639 ASSERT(GHOST_STATE(hdr->b_state)); 2640 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2641 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0); 2642 ASSERT(hdr->b_buf == NULL); 2643 2644 /* if this is a prefetch, we don't have a reference */ 2645 if (*arc_flags & ARC_PREFETCH) 2646 hdr->b_flags |= ARC_PREFETCH; 2647 else 2648 add_reference(hdr, hash_lock, private); 2649 if (*arc_flags & ARC_L2CACHE) 2650 hdr->b_flags |= ARC_L2CACHE; 2651 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 2652 buf->b_hdr = hdr; 2653 buf->b_data = NULL; 2654 buf->b_efunc = NULL; 2655 buf->b_private = NULL; 2656 buf->b_next = NULL; 2657 hdr->b_buf = buf; 2658 arc_get_data_buf(buf); 2659 ASSERT(hdr->b_datacnt == 0); 2660 hdr->b_datacnt = 1; 2661 2662 } 2663 2664 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 2665 acb->acb_done = done; 2666 acb->acb_private = private; 2667 2668 ASSERT(hdr->b_acb == NULL); 2669 hdr->b_acb = acb; 2670 hdr->b_flags |= ARC_IO_IN_PROGRESS; 2671 2672 /* 2673 * If the buffer has been evicted, migrate it to a present state 2674 * before issuing the I/O. Once we drop the hash-table lock, 2675 * the header will be marked as I/O in progress and have an 2676 * attached buffer. At this point, anybody who finds this 2677 * buffer ought to notice that it's legit but has a pending I/O. 2678 */ 2679 2680 if (GHOST_STATE(hdr->b_state)) 2681 arc_access(hdr, hash_lock); 2682 2683 if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL && 2684 (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) { 2685 addr = hdr->b_l2hdr->b_daddr; 2686 /* 2687 * Lock out device removal. 2688 */ 2689 if (vdev_is_dead(vd) || 2690 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER)) 2691 vd = NULL; 2692 } 2693 2694 mutex_exit(hash_lock); 2695 2696 ASSERT3U(hdr->b_size, ==, size); 2697 DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size, 2698 zbookmark_t *, zb); 2699 ARCSTAT_BUMP(arcstat_misses); 2700 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2701 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2702 data, metadata, misses); 2703 2704 if (vd != NULL) { 2705 /* 2706 * Read from the L2ARC if the following are true: 2707 * 1. The L2ARC vdev was previously cached. 2708 * 2. This buffer still has L2ARC metadata. 2709 * 3. This buffer isn't currently writing to the L2ARC. 2710 * 4. The L2ARC entry wasn't evicted, which may 2711 * also have invalidated the vdev. 2712 */ 2713 if (hdr->b_l2hdr != NULL && 2714 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr)) { 2715 l2arc_read_callback_t *cb; 2716 2717 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); 2718 ARCSTAT_BUMP(arcstat_l2_hits); 2719 2720 cb = kmem_zalloc(sizeof (l2arc_read_callback_t), 2721 KM_SLEEP); 2722 cb->l2rcb_buf = buf; 2723 cb->l2rcb_spa = spa; 2724 cb->l2rcb_bp = *bp; 2725 cb->l2rcb_zb = *zb; 2726 cb->l2rcb_flags = zio_flags; 2727 2728 /* 2729 * l2arc read. The SCL_L2ARC lock will be 2730 * released by l2arc_read_done(). 2731 */ 2732 rzio = zio_read_phys(pio, vd, addr, size, 2733 buf->b_data, ZIO_CHECKSUM_OFF, 2734 l2arc_read_done, cb, priority, zio_flags | 2735 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL | 2736 ZIO_FLAG_DONT_PROPAGATE | 2737 ZIO_FLAG_DONT_RETRY, B_FALSE); 2738 DTRACE_PROBE2(l2arc__read, vdev_t *, vd, 2739 zio_t *, rzio); 2740 2741 if (*arc_flags & ARC_NOWAIT) { 2742 zio_nowait(rzio); 2743 return (0); 2744 } 2745 2746 ASSERT(*arc_flags & ARC_WAIT); 2747 if (zio_wait(rzio) == 0) 2748 return (0); 2749 2750 /* l2arc read error; goto zio_read() */ 2751 } else { 2752 DTRACE_PROBE1(l2arc__miss, 2753 arc_buf_hdr_t *, hdr); 2754 ARCSTAT_BUMP(arcstat_l2_misses); 2755 if (HDR_L2_WRITING(hdr)) 2756 ARCSTAT_BUMP(arcstat_l2_rw_clash); 2757 spa_config_exit(spa, SCL_L2ARC, vd); 2758 } 2759 } 2760 2761 rzio = zio_read(pio, spa, bp, buf->b_data, size, 2762 arc_read_done, buf, priority, zio_flags, zb); 2763 2764 if (*arc_flags & ARC_WAIT) 2765 return (zio_wait(rzio)); 2766 2767 ASSERT(*arc_flags & ARC_NOWAIT); 2768 zio_nowait(rzio); 2769 } 2770 return (0); 2771} 2772 2773/* 2774 * arc_read() variant to support pool traversal. If the block is already 2775 * in the ARC, make a copy of it; otherwise, the caller will do the I/O. 2776 * The idea is that we don't want pool traversal filling up memory, but 2777 * if the ARC already has the data anyway, we shouldn't pay for the I/O. 2778 */ 2779int 2780arc_tryread(spa_t *spa, blkptr_t *bp, void *data) 2781{ 2782 arc_buf_hdr_t *hdr; 2783 kmutex_t *hash_mtx; 2784 int rc = 0; 2785 2786 hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx); 2787 2788 if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) { 2789 arc_buf_t *buf = hdr->b_buf; 2790 2791 ASSERT(buf); 2792 while (buf->b_data == NULL) { 2793 buf = buf->b_next; 2794 ASSERT(buf); 2795 } 2796 bcopy(buf->b_data, data, hdr->b_size); 2797 } else { 2798 rc = ENOENT; 2799 } 2800 2801 if (hash_mtx) 2802 mutex_exit(hash_mtx); 2803 2804 return (rc); 2805} 2806 2807void 2808arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 2809{ 2810 ASSERT(buf->b_hdr != NULL); 2811 ASSERT(buf->b_hdr->b_state != arc_anon); 2812 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 2813 buf->b_efunc = func; 2814 buf->b_private = private; 2815} 2816 2817/* 2818 * This is used by the DMU to let the ARC know that a buffer is 2819 * being evicted, so the ARC should clean up. If this arc buf 2820 * is not yet in the evicted state, it will be put there. 2821 */ 2822int 2823arc_buf_evict(arc_buf_t *buf) 2824{ 2825 arc_buf_hdr_t *hdr; 2826 kmutex_t *hash_lock; 2827 arc_buf_t **bufp; 2828 2829 rw_enter(&buf->b_lock, RW_WRITER); 2830 hdr = buf->b_hdr; 2831 if (hdr == NULL) { 2832 /* 2833 * We are in arc_do_user_evicts(). 2834 */ 2835 ASSERT(buf->b_data == NULL); 2836 rw_exit(&buf->b_lock); 2837 return (0); 2838 } else if (buf->b_data == NULL) { 2839 arc_buf_t copy = *buf; /* structure assignment */ 2840 /* 2841 * We are on the eviction list; process this buffer now 2842 * but let arc_do_user_evicts() do the reaping. 2843 */ 2844 buf->b_efunc = NULL; 2845 rw_exit(&buf->b_lock); 2846 VERIFY(copy.b_efunc(©) == 0); 2847 return (1); 2848 } 2849 hash_lock = HDR_LOCK(hdr); 2850 mutex_enter(hash_lock); 2851 2852 ASSERT(buf->b_hdr == hdr); 2853 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 2854 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2855 2856 /* 2857 * Pull this buffer off of the hdr 2858 */ 2859 bufp = &hdr->b_buf; 2860 while (*bufp != buf) 2861 bufp = &(*bufp)->b_next; 2862 *bufp = buf->b_next; 2863 2864 ASSERT(buf->b_data != NULL); 2865 arc_buf_destroy(buf, FALSE, FALSE); 2866 2867 if (hdr->b_datacnt == 0) { 2868 arc_state_t *old_state = hdr->b_state; 2869 arc_state_t *evicted_state; 2870 2871 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2872 2873 evicted_state = 2874 (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 2875 2876 mutex_enter(&old_state->arcs_mtx); 2877 mutex_enter(&evicted_state->arcs_mtx); 2878 2879 arc_change_state(evicted_state, hdr, hash_lock); 2880 ASSERT(HDR_IN_HASH_TABLE(hdr)); 2881 hdr->b_flags |= ARC_IN_HASH_TABLE; 2882 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 2883 2884 mutex_exit(&evicted_state->arcs_mtx); 2885 mutex_exit(&old_state->arcs_mtx); 2886 } 2887 mutex_exit(hash_lock); 2888 rw_exit(&buf->b_lock); 2889 2890 VERIFY(buf->b_efunc(buf) == 0); 2891 buf->b_efunc = NULL; 2892 buf->b_private = NULL; 2893 buf->b_hdr = NULL; 2894 kmem_cache_free(buf_cache, buf); 2895 return (1); 2896} 2897 2898/* 2899 * Release this buffer from the cache. This must be done 2900 * after a read and prior to modifying the buffer contents. 2901 * If the buffer has more than one reference, we must make 2902 * a new hdr for the buffer. 2903 */ 2904void 2905arc_release(arc_buf_t *buf, void *tag) 2906{ 2907 arc_buf_hdr_t *hdr; 2908 kmutex_t *hash_lock; 2909 l2arc_buf_hdr_t *l2hdr; 2910 uint64_t buf_size; 2911 2912 rw_enter(&buf->b_lock, RW_WRITER); 2913 hdr = buf->b_hdr; 2914 2915 /* this buffer is not on any list */ 2916 ASSERT(refcount_count(&hdr->b_refcnt) > 0); 2917 ASSERT(!(hdr->b_flags & ARC_STORED)); 2918 2919 if (hdr->b_state == arc_anon) { 2920 /* this buffer is already released */ 2921 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1); 2922 ASSERT(BUF_EMPTY(hdr)); 2923 ASSERT(buf->b_efunc == NULL); 2924 arc_buf_thaw(buf); 2925 rw_exit(&buf->b_lock); 2926 return; 2927 } 2928 2929 hash_lock = HDR_LOCK(hdr); 2930 mutex_enter(hash_lock); 2931 2932 l2hdr = hdr->b_l2hdr; 2933 if (l2hdr) { 2934 mutex_enter(&l2arc_buflist_mtx); 2935 hdr->b_l2hdr = NULL; 2936 buf_size = hdr->b_size; 2937 } 2938 2939 /* 2940 * Do we have more than one buf? 2941 */ 2942 if (hdr->b_datacnt > 1) { 2943 arc_buf_hdr_t *nhdr; 2944 arc_buf_t **bufp; 2945 uint64_t blksz = hdr->b_size; 2946 spa_t *spa = hdr->b_spa; 2947 arc_buf_contents_t type = hdr->b_type; 2948 uint32_t flags = hdr->b_flags; 2949 2950 ASSERT(hdr->b_buf != buf || buf->b_next != NULL); 2951 /* 2952 * Pull the data off of this buf and attach it to 2953 * a new anonymous buf. 2954 */ 2955 (void) remove_reference(hdr, hash_lock, tag); 2956 bufp = &hdr->b_buf; 2957 while (*bufp != buf) 2958 bufp = &(*bufp)->b_next; 2959 *bufp = (*bufp)->b_next; 2960 buf->b_next = NULL; 2961 2962 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); 2963 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); 2964 if (refcount_is_zero(&hdr->b_refcnt)) { 2965 uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type]; 2966 ASSERT3U(*size, >=, hdr->b_size); 2967 atomic_add_64(size, -hdr->b_size); 2968 } 2969 hdr->b_datacnt -= 1; 2970 arc_cksum_verify(buf); 2971 2972 mutex_exit(hash_lock); 2973 2974 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 2975 nhdr->b_size = blksz; 2976 nhdr->b_spa = spa; 2977 nhdr->b_type = type; 2978 nhdr->b_buf = buf; 2979 nhdr->b_state = arc_anon; 2980 nhdr->b_arc_access = 0; 2981 nhdr->b_flags = flags & ARC_L2_WRITING; 2982 nhdr->b_l2hdr = NULL; 2983 nhdr->b_datacnt = 1; 2984 nhdr->b_freeze_cksum = NULL; 2985 (void) refcount_add(&nhdr->b_refcnt, tag); 2986 buf->b_hdr = nhdr; 2987 rw_exit(&buf->b_lock); 2988 atomic_add_64(&arc_anon->arcs_size, blksz); 2989 } else { 2990 rw_exit(&buf->b_lock); 2991 ASSERT(refcount_count(&hdr->b_refcnt) == 1); 2992 ASSERT(!list_link_active(&hdr->b_arc_node)); 2993 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2994 arc_change_state(arc_anon, hdr, hash_lock); 2995 hdr->b_arc_access = 0; 2996 mutex_exit(hash_lock); 2997 2998 bzero(&hdr->b_dva, sizeof (dva_t)); 2999 hdr->b_birth = 0; 3000 hdr->b_cksum0 = 0; 3001 arc_buf_thaw(buf); 3002 } 3003 buf->b_efunc = NULL; 3004 buf->b_private = NULL; 3005 3006 if (l2hdr) { 3007 list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 3008 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 3009 ARCSTAT_INCR(arcstat_l2_size, -buf_size); 3010 mutex_exit(&l2arc_buflist_mtx); 3011 } 3012} 3013 3014int 3015arc_released(arc_buf_t *buf) 3016{ 3017 int released; 3018 3019 rw_enter(&buf->b_lock, RW_READER); 3020 released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); 3021 rw_exit(&buf->b_lock); 3022 return (released); 3023} 3024 3025int 3026arc_has_callback(arc_buf_t *buf) 3027{ 3028 int callback; 3029 3030 rw_enter(&buf->b_lock, RW_READER); 3031 callback = (buf->b_efunc != NULL); 3032 rw_exit(&buf->b_lock); 3033 return (callback); 3034} 3035 3036#ifdef ZFS_DEBUG 3037int 3038arc_referenced(arc_buf_t *buf) 3039{ 3040 int referenced; 3041 3042 rw_enter(&buf->b_lock, RW_READER); 3043 referenced = (refcount_count(&buf->b_hdr->b_refcnt)); 3044 rw_exit(&buf->b_lock); 3045 return (referenced); 3046} 3047#endif 3048 3049static void 3050arc_write_ready(zio_t *zio) 3051{ 3052 arc_write_callback_t *callback = zio->io_private; 3053 arc_buf_t *buf = callback->awcb_buf; 3054 arc_buf_hdr_t *hdr = buf->b_hdr; 3055 3056 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); 3057 callback->awcb_ready(zio, buf, callback->awcb_private); 3058 3059 /* 3060 * If the IO is already in progress, then this is a re-write 3061 * attempt, so we need to thaw and re-compute the cksum. 3062 * It is the responsibility of the callback to handle the 3063 * accounting for any re-write attempt. 3064 */ 3065 if (HDR_IO_IN_PROGRESS(hdr)) { 3066 mutex_enter(&hdr->b_freeze_lock); 3067 if (hdr->b_freeze_cksum != NULL) { 3068 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 3069 hdr->b_freeze_cksum = NULL; 3070 } 3071 mutex_exit(&hdr->b_freeze_lock); 3072 } 3073 arc_cksum_compute(buf, B_FALSE); 3074 hdr->b_flags |= ARC_IO_IN_PROGRESS; 3075} 3076 3077static void 3078arc_write_done(zio_t *zio) 3079{ 3080 arc_write_callback_t *callback = zio->io_private; 3081 arc_buf_t *buf = callback->awcb_buf; 3082 arc_buf_hdr_t *hdr = buf->b_hdr; 3083 3084 hdr->b_acb = NULL; 3085 3086 hdr->b_dva = *BP_IDENTITY(zio->io_bp); 3087 hdr->b_birth = zio->io_bp->blk_birth; 3088 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 3089 /* 3090 * If the block to be written was all-zero, we may have 3091 * compressed it away. In this case no write was performed 3092 * so there will be no dva/birth-date/checksum. The buffer 3093 * must therefor remain anonymous (and uncached). 3094 */ 3095 if (!BUF_EMPTY(hdr)) { 3096 arc_buf_hdr_t *exists; 3097 kmutex_t *hash_lock; 3098 3099 arc_cksum_verify(buf); 3100 3101 exists = buf_hash_insert(hdr, &hash_lock); 3102 if (exists) { 3103 /* 3104 * This can only happen if we overwrite for 3105 * sync-to-convergence, because we remove 3106 * buffers from the hash table when we arc_free(). 3107 */ 3108 ASSERT(zio->io_flags & ZIO_FLAG_IO_REWRITE); 3109 ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig), 3110 BP_IDENTITY(zio->io_bp))); 3111 ASSERT3U(zio->io_bp_orig.blk_birth, ==, 3112 zio->io_bp->blk_birth); 3113 3114 ASSERT(refcount_is_zero(&exists->b_refcnt)); 3115 arc_change_state(arc_anon, exists, hash_lock); 3116 mutex_exit(hash_lock); 3117 arc_hdr_destroy(exists); 3118 exists = buf_hash_insert(hdr, &hash_lock); 3119 ASSERT3P(exists, ==, NULL); 3120 } 3121 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3122 /* if it's not anon, we are doing a scrub */ 3123 if (hdr->b_state == arc_anon) 3124 arc_access(hdr, hash_lock); 3125 mutex_exit(hash_lock); 3126 } else if (callback->awcb_done == NULL) { 3127 int destroy_hdr; 3128 /* 3129 * This is an anonymous buffer with no user callback, 3130 * destroy it if there are no active references. 3131 */ 3132 mutex_enter(&arc_eviction_mtx); 3133 destroy_hdr = refcount_is_zero(&hdr->b_refcnt); 3134 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3135 mutex_exit(&arc_eviction_mtx); 3136 if (destroy_hdr) 3137 arc_hdr_destroy(hdr); 3138 } else { 3139 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3140 } 3141 hdr->b_flags &= ~ARC_STORED; 3142 3143 if (callback->awcb_done) { 3144 ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 3145 callback->awcb_done(zio, buf, callback->awcb_private); 3146 } 3147 3148 kmem_free(callback, sizeof (arc_write_callback_t)); 3149} 3150 3151static void 3152write_policy(spa_t *spa, const writeprops_t *wp, zio_prop_t *zp) 3153{ 3154 boolean_t ismd = (wp->wp_level > 0 || dmu_ot[wp->wp_type].ot_metadata); 3155 3156 /* Determine checksum setting */ 3157 if (ismd) { 3158 /* 3159 * Metadata always gets checksummed. If the data 3160 * checksum is multi-bit correctable, and it's not a 3161 * ZBT-style checksum, then it's suitable for metadata 3162 * as well. Otherwise, the metadata checksum defaults 3163 * to fletcher4. 3164 */ 3165 if (zio_checksum_table[wp->wp_oschecksum].ci_correctable && 3166 !zio_checksum_table[wp->wp_oschecksum].ci_zbt) 3167 zp->zp_checksum = wp->wp_oschecksum; 3168 else 3169 zp->zp_checksum = ZIO_CHECKSUM_FLETCHER_4; 3170 } else { 3171 zp->zp_checksum = zio_checksum_select(wp->wp_dnchecksum, 3172 wp->wp_oschecksum); 3173 } 3174 3175 /* Determine compression setting */ 3176 if (ismd) { 3177 /* 3178 * XXX -- we should design a compression algorithm 3179 * that specializes in arrays of bps. 3180 */ 3181 zp->zp_compress = zfs_mdcomp_disable ? ZIO_COMPRESS_EMPTY : 3182 ZIO_COMPRESS_LZJB; 3183 } else { 3184 zp->zp_compress = zio_compress_select(wp->wp_dncompress, 3185 wp->wp_oscompress); 3186 } 3187 3188 zp->zp_type = wp->wp_type; 3189 zp->zp_level = wp->wp_level; 3190 zp->zp_ndvas = MIN(wp->wp_copies + ismd, spa_max_replication(spa)); 3191} 3192 3193zio_t * 3194arc_write(zio_t *pio, spa_t *spa, const writeprops_t *wp, 3195 boolean_t l2arc, uint64_t txg, blkptr_t *bp, arc_buf_t *buf, 3196 arc_done_func_t *ready, arc_done_func_t *done, void *private, int priority, 3197 int zio_flags, const zbookmark_t *zb) 3198{ 3199 arc_buf_hdr_t *hdr = buf->b_hdr; 3200 arc_write_callback_t *callback; 3201 zio_t *zio; 3202 zio_prop_t zp; 3203 3204 ASSERT(ready != NULL); 3205 ASSERT(!HDR_IO_ERROR(hdr)); 3206 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 3207 ASSERT(hdr->b_acb == 0); 3208 if (l2arc) 3209 hdr->b_flags |= ARC_L2CACHE; 3210 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 3211 callback->awcb_ready = ready; 3212 callback->awcb_done = done; 3213 callback->awcb_private = private; 3214 callback->awcb_buf = buf; 3215 3216 write_policy(spa, wp, &zp); 3217 zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, &zp, 3218 arc_write_ready, arc_write_done, callback, priority, zio_flags, zb); 3219 3220 return (zio); 3221} 3222 3223int 3224arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 3225 zio_done_func_t *done, void *private, uint32_t arc_flags) 3226{ 3227 arc_buf_hdr_t *ab; 3228 kmutex_t *hash_lock; 3229 zio_t *zio; 3230 3231 /* 3232 * If this buffer is in the cache, release it, so it 3233 * can be re-used. 3234 */ 3235 ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 3236 if (ab != NULL) { 3237 /* 3238 * The checksum of blocks to free is not always 3239 * preserved (eg. on the deadlist). However, if it is 3240 * nonzero, it should match what we have in the cache. 3241 */ 3242 ASSERT(bp->blk_cksum.zc_word[0] == 0 || 3243 bp->blk_cksum.zc_word[0] == ab->b_cksum0 || 3244 bp->blk_fill == BLK_FILL_ALREADY_FREED); 3245 3246 if (ab->b_state != arc_anon) 3247 arc_change_state(arc_anon, ab, hash_lock); 3248 if (HDR_IO_IN_PROGRESS(ab)) { 3249 /* 3250 * This should only happen when we prefetch. 3251 */ 3252 ASSERT(ab->b_flags & ARC_PREFETCH); 3253 ASSERT3U(ab->b_datacnt, ==, 1); 3254 ab->b_flags |= ARC_FREED_IN_READ; 3255 if (HDR_IN_HASH_TABLE(ab)) 3256 buf_hash_remove(ab); 3257 ab->b_arc_access = 0; 3258 bzero(&ab->b_dva, sizeof (dva_t)); 3259 ab->b_birth = 0; 3260 ab->b_cksum0 = 0; 3261 ab->b_buf->b_efunc = NULL; 3262 ab->b_buf->b_private = NULL; 3263 mutex_exit(hash_lock); 3264 } else if (refcount_is_zero(&ab->b_refcnt)) { 3265 ab->b_flags |= ARC_FREE_IN_PROGRESS; 3266 mutex_exit(hash_lock); 3267 arc_hdr_destroy(ab); 3268 ARCSTAT_BUMP(arcstat_deleted); 3269 } else { 3270 /* 3271 * We still have an active reference on this 3272 * buffer. This can happen, e.g., from 3273 * dbuf_unoverride(). 3274 */ 3275 ASSERT(!HDR_IN_HASH_TABLE(ab)); 3276 ab->b_arc_access = 0; 3277 bzero(&ab->b_dva, sizeof (dva_t)); 3278 ab->b_birth = 0; 3279 ab->b_cksum0 = 0; 3280 ab->b_buf->b_efunc = NULL; 3281 ab->b_buf->b_private = NULL; 3282 mutex_exit(hash_lock); 3283 } 3284 } 3285 3286 zio = zio_free(pio, spa, txg, bp, done, private, ZIO_FLAG_MUSTSUCCEED); 3287 3288 if (arc_flags & ARC_WAIT) 3289 return (zio_wait(zio)); 3290 3291 ASSERT(arc_flags & ARC_NOWAIT); 3292 zio_nowait(zio); 3293 3294 return (0); 3295} 3296 3297static int 3298arc_memory_throttle(uint64_t reserve, uint64_t txg) 3299{ 3300#ifdef _KERNEL 3301 uint64_t inflight_data = arc_anon->arcs_size; 3302 uint64_t available_memory = ptoa((uintmax_t)cnt.v_free_count); 3303 static uint64_t page_load = 0; 3304 static uint64_t last_txg = 0; 3305 3306#if 0 3307#if defined(__i386) 3308 available_memory = 3309 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); 3310#endif 3311#endif 3312 if (available_memory >= zfs_write_limit_max) 3313 return (0); 3314 3315 if (txg > last_txg) { 3316 last_txg = txg; 3317 page_load = 0; 3318 } 3319 /* 3320 * If we are in pageout, we know that memory is already tight, 3321 * the arc is already going to be evicting, so we just want to 3322 * continue to let page writes occur as quickly as possible. 3323 */ 3324 if (curproc == pageproc) { 3325 if (page_load > available_memory / 4) 3326 return (ERESTART); 3327 /* Note: reserve is inflated, so we deflate */ 3328 page_load += reserve / 8; 3329 return (0); 3330 } else if (page_load > 0 && arc_reclaim_needed()) { 3331 /* memory is low, delay before restarting */ 3332 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3333 return (EAGAIN); 3334 } 3335 page_load = 0; 3336 3337 if (arc_size > arc_c_min) { 3338 uint64_t evictable_memory = 3339 arc_mru->arcs_lsize[ARC_BUFC_DATA] + 3340 arc_mru->arcs_lsize[ARC_BUFC_METADATA] + 3341 arc_mfu->arcs_lsize[ARC_BUFC_DATA] + 3342 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]; 3343 available_memory += MIN(evictable_memory, arc_size - arc_c_min); 3344 } 3345 3346 if (inflight_data > available_memory / 4) { 3347 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3348 return (ERESTART); 3349 } 3350#endif 3351 return (0); 3352} 3353 3354void 3355arc_tempreserve_clear(uint64_t reserve) 3356{ 3357 atomic_add_64(&arc_tempreserve, -reserve); 3358 ASSERT((int64_t)arc_tempreserve >= 0); 3359} 3360 3361int 3362arc_tempreserve_space(uint64_t reserve, uint64_t txg) 3363{ 3364 int error; 3365 3366#ifdef ZFS_DEBUG 3367 /* 3368 * Once in a while, fail for no reason. Everything should cope. 3369 */ 3370 if (spa_get_random(10000) == 0) { 3371 dprintf("forcing random failure\n"); 3372 return (ERESTART); 3373 } 3374#endif 3375 if (reserve > arc_c/4 && !arc_no_grow) 3376 arc_c = MIN(arc_c_max, reserve * 4); 3377 if (reserve > arc_c) 3378 return (ENOMEM); 3379 3380 /* 3381 * Writes will, almost always, require additional memory allocations 3382 * in order to compress/encrypt/etc the data. We therefor need to 3383 * make sure that there is sufficient available memory for this. 3384 */ 3385 if (error = arc_memory_throttle(reserve, txg)) 3386 return (error); 3387 3388 /* 3389 * Throttle writes when the amount of dirty data in the cache 3390 * gets too large. We try to keep the cache less than half full 3391 * of dirty blocks so that our sync times don't grow too large. 3392 * Note: if two requests come in concurrently, we might let them 3393 * both succeed, when one of them should fail. Not a huge deal. 3394 */ 3395 if (reserve + arc_tempreserve + arc_anon->arcs_size > arc_c / 2 && 3396 arc_anon->arcs_size > arc_c / 4) { 3397 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " 3398 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", 3399 arc_tempreserve>>10, 3400 arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10, 3401 arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10, 3402 reserve>>10, arc_c>>10); 3403 return (ERESTART); 3404 } 3405 atomic_add_64(&arc_tempreserve, reserve); 3406 return (0); 3407} 3408 3409static kmutex_t arc_lowmem_lock; 3410#ifdef _KERNEL 3411static eventhandler_tag arc_event_lowmem = NULL; 3412 3413static void 3414arc_lowmem(void *arg __unused, int howto __unused) 3415{ 3416 3417 /* Serialize access via arc_lowmem_lock. */ 3418 mutex_enter(&arc_lowmem_lock); 3419 needfree = 1; 3420 cv_signal(&arc_reclaim_thr_cv); 3421 while (needfree) 3422 tsleep(&needfree, 0, "zfs:lowmem", hz / 5); 3423 mutex_exit(&arc_lowmem_lock); 3424} 3425#endif 3426 3427void 3428arc_init(void) 3429{ 3430 int prefetch_tunable_set = 0; 3431 3432 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 3433 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 3434 mutex_init(&arc_lowmem_lock, NULL, MUTEX_DEFAULT, NULL); 3435 3436 /* Convert seconds to clock ticks */ 3437 arc_min_prefetch_lifespan = 1 * hz; 3438 3439 /* Start out with 1/8 of all memory */ 3440 arc_c = kmem_size() / 8; 3441#if 0 3442#ifdef _KERNEL 3443 /* 3444 * On architectures where the physical memory can be larger 3445 * than the addressable space (intel in 32-bit mode), we may 3446 * need to limit the cache to 1/8 of VM size. 3447 */ 3448 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 3449#endif 3450#endif 3451 /* set min cache to 1/32 of all memory, or 16MB, whichever is more */ 3452 arc_c_min = MAX(arc_c / 4, 64<<18); 3453 /* set max to 1/2 of all memory, or all but 1GB, whichever is more */ 3454 if (arc_c * 8 >= 1<<30) 3455 arc_c_max = (arc_c * 8) - (1<<30); 3456 else 3457 arc_c_max = arc_c_min; 3458 arc_c_max = MAX(arc_c * 5, arc_c_max); 3459#ifdef _KERNEL 3460 /* 3461 * Allow the tunables to override our calculations if they are 3462 * reasonable (ie. over 16MB) 3463 */ 3464 if (zfs_arc_max >= 64<<18 && zfs_arc_max < kmem_size()) 3465 arc_c_max = zfs_arc_max; 3466 if (zfs_arc_min >= 64<<18 && zfs_arc_min <= arc_c_max) 3467 arc_c_min = zfs_arc_min; 3468#endif 3469 arc_c = arc_c_max; 3470 arc_p = (arc_c >> 1); 3471 3472 /* limit meta-data to 1/4 of the arc capacity */ 3473 arc_meta_limit = arc_c_max / 4; 3474 3475 /* Allow the tunable to override if it is reasonable */ 3476 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) 3477 arc_meta_limit = zfs_arc_meta_limit; 3478 3479 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) 3480 arc_c_min = arc_meta_limit / 2; 3481 3482 /* if kmem_flags are set, lets try to use less memory */ 3483 if (kmem_debugging()) 3484 arc_c = arc_c / 2; 3485 if (arc_c < arc_c_min) 3486 arc_c = arc_c_min; 3487 3488 zfs_arc_min = arc_c_min; 3489 zfs_arc_max = arc_c_max; 3490 3491 arc_anon = &ARC_anon; 3492 arc_mru = &ARC_mru; 3493 arc_mru_ghost = &ARC_mru_ghost; 3494 arc_mfu = &ARC_mfu; 3495 arc_mfu_ghost = &ARC_mfu_ghost; 3496 arc_l2c_only = &ARC_l2c_only; 3497 arc_size = 0; 3498 3499 mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3500 mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3501 mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3502 mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3503 mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3504 mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3505 3506 list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA], 3507 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3508 list_create(&arc_mru->arcs_list[ARC_BUFC_DATA], 3509 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3510 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA], 3511 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3512 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA], 3513 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3514 list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA], 3515 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3516 list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA], 3517 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3518 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA], 3519 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3520 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA], 3521 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3522 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA], 3523 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3524 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA], 3525 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3526 3527 buf_init(); 3528 3529 arc_thread_exit = 0; 3530 arc_eviction_list = NULL; 3531 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 3532 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 3533 3534 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 3535 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 3536 3537 if (arc_ksp != NULL) { 3538 arc_ksp->ks_data = &arc_stats; 3539 kstat_install(arc_ksp); 3540 } 3541 3542 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 3543 TS_RUN, minclsyspri); 3544 3545#ifdef _KERNEL 3546 arc_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem, arc_lowmem, NULL, 3547 EVENTHANDLER_PRI_FIRST); 3548#endif 3549 3550 arc_dead = FALSE; 3551 arc_warm = B_FALSE; 3552 3553 if (zfs_write_limit_max == 0) 3554 zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift; 3555 else 3556 zfs_write_limit_shift = 0; 3557 mutex_init(&zfs_write_limit_lock, NULL, MUTEX_DEFAULT, NULL); 3558 3559#ifdef _KERNEL 3560 if (TUNABLE_INT_FETCH("vfs.zfs.prefetch_disable", &zfs_prefetch_disable)) 3561 prefetch_tunable_set = 1; 3562 3563#ifdef __i386__ 3564 if (prefetch_tunable_set == 0) { 3565 printf("ZFS NOTICE: Prefetch is disabled by default on i386 " 3566 "-- to enable,\n"); 3567 printf(" add \"vfs.zfs.prefetch_disable=0\" " 3568 "to /boot/loader.conf.\n"); 3569 zfs_prefetch_disable=1; 3570 } 3571#else 3572 if ((((uint64_t)physmem * PAGESIZE) < (1ULL << 32)) && 3573 prefetch_tunable_set == 0) { 3574 printf("ZFS NOTICE: Prefetch is disabled by default if less " 3575 "than 4GB of RAM is present;\n" 3576 " to enable, add \"vfs.zfs.prefetch_disable=0\" " 3577 "to /boot/loader.conf.\n"); 3578 zfs_prefetch_disable=1; 3579 } 3580#endif 3581 /* Warn about ZFS memory and address space requirements. */ 3582 if (((uint64_t)physmem * PAGESIZE) < (256 + 128 + 64) * (1 << 20)) { 3583 printf("ZFS WARNING: Recommended minimum RAM size is 512MB; " 3584 "expect unstable behavior.\n"); 3585 } 3586 if (kmem_size() < 512 * (1 << 20)) { 3587 printf("ZFS WARNING: Recommended minimum kmem_size is 512MB; " 3588 "expect unstable behavior.\n"); 3589 printf(" Consider tuning vm.kmem_size and " 3590 "vm.kmem_size_max\n"); 3591 printf(" in /boot/loader.conf.\n"); 3592 } 3593#endif 3594} 3595 3596void 3597arc_fini(void) 3598{ 3599 3600 mutex_enter(&arc_reclaim_thr_lock); 3601 arc_thread_exit = 1; 3602 cv_signal(&arc_reclaim_thr_cv); 3603 while (arc_thread_exit != 0) 3604 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 3605 mutex_exit(&arc_reclaim_thr_lock); 3606 3607 arc_flush(NULL); 3608 3609 arc_dead = TRUE; 3610 3611 if (arc_ksp != NULL) { 3612 kstat_delete(arc_ksp); 3613 arc_ksp = NULL; 3614 } 3615 3616 mutex_destroy(&arc_eviction_mtx); 3617 mutex_destroy(&arc_reclaim_thr_lock); 3618 cv_destroy(&arc_reclaim_thr_cv); 3619 3620 list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]); 3621 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); 3622 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]); 3623 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); 3624 list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]); 3625 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); 3626 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]); 3627 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); 3628 3629 mutex_destroy(&arc_anon->arcs_mtx); 3630 mutex_destroy(&arc_mru->arcs_mtx); 3631 mutex_destroy(&arc_mru_ghost->arcs_mtx); 3632 mutex_destroy(&arc_mfu->arcs_mtx); 3633 mutex_destroy(&arc_mfu_ghost->arcs_mtx); 3634 3635 mutex_destroy(&zfs_write_limit_lock); 3636 3637 buf_fini(); 3638 3639 mutex_destroy(&arc_lowmem_lock); 3640#ifdef _KERNEL 3641 if (arc_event_lowmem != NULL) 3642 EVENTHANDLER_DEREGISTER(vm_lowmem, arc_event_lowmem); 3643#endif 3644} 3645 3646/* 3647 * Level 2 ARC 3648 * 3649 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. 3650 * It uses dedicated storage devices to hold cached data, which are populated 3651 * using large infrequent writes. The main role of this cache is to boost 3652 * the performance of random read workloads. The intended L2ARC devices 3653 * include short-stroked disks, solid state disks, and other media with 3654 * substantially faster read latency than disk. 3655 * 3656 * +-----------------------+ 3657 * | ARC | 3658 * +-----------------------+ 3659 * | ^ ^ 3660 * | | | 3661 * l2arc_feed_thread() arc_read() 3662 * | | | 3663 * | l2arc read | 3664 * V | | 3665 * +---------------+ | 3666 * | L2ARC | | 3667 * +---------------+ | 3668 * | ^ | 3669 * l2arc_write() | | 3670 * | | | 3671 * V | | 3672 * +-------+ +-------+ 3673 * | vdev | | vdev | 3674 * | cache | | cache | 3675 * +-------+ +-------+ 3676 * +=========+ .-----. 3677 * : L2ARC : |-_____-| 3678 * : devices : | Disks | 3679 * +=========+ `-_____-' 3680 * 3681 * Read requests are satisfied from the following sources, in order: 3682 * 3683 * 1) ARC 3684 * 2) vdev cache of L2ARC devices 3685 * 3) L2ARC devices 3686 * 4) vdev cache of disks 3687 * 5) disks 3688 * 3689 * Some L2ARC device types exhibit extremely slow write performance. 3690 * To accommodate for this there are some significant differences between 3691 * the L2ARC and traditional cache design: 3692 * 3693 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from 3694 * the ARC behave as usual, freeing buffers and placing headers on ghost 3695 * lists. The ARC does not send buffers to the L2ARC during eviction as 3696 * this would add inflated write latencies for all ARC memory pressure. 3697 * 3698 * 2. The L2ARC attempts to cache data from the ARC before it is evicted. 3699 * It does this by periodically scanning buffers from the eviction-end of 3700 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are 3701 * not already there. It scans until a headroom of buffers is satisfied, 3702 * which itself is a buffer for ARC eviction. The thread that does this is 3703 * l2arc_feed_thread(), illustrated below; example sizes are included to 3704 * provide a better sense of ratio than this diagram: 3705 * 3706 * head --> tail 3707 * +---------------------+----------+ 3708 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC 3709 * +---------------------+----------+ | o L2ARC eligible 3710 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer 3711 * +---------------------+----------+ | 3712 * 15.9 Gbytes ^ 32 Mbytes | 3713 * headroom | 3714 * l2arc_feed_thread() 3715 * | 3716 * l2arc write hand <--[oooo]--' 3717 * | 8 Mbyte 3718 * | write max 3719 * V 3720 * +==============================+ 3721 * L2ARC dev |####|#|###|###| |####| ... | 3722 * +==============================+ 3723 * 32 Gbytes 3724 * 3725 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of 3726 * evicted, then the L2ARC has cached a buffer much sooner than it probably 3727 * needed to, potentially wasting L2ARC device bandwidth and storage. It is 3728 * safe to say that this is an uncommon case, since buffers at the end of 3729 * the ARC lists have moved there due to inactivity. 3730 * 3731 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, 3732 * then the L2ARC simply misses copying some buffers. This serves as a 3733 * pressure valve to prevent heavy read workloads from both stalling the ARC 3734 * with waits and clogging the L2ARC with writes. This also helps prevent 3735 * the potential for the L2ARC to churn if it attempts to cache content too 3736 * quickly, such as during backups of the entire pool. 3737 * 3738 * 5. After system boot and before the ARC has filled main memory, there are 3739 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru 3740 * lists can remain mostly static. Instead of searching from tail of these 3741 * lists as pictured, the l2arc_feed_thread() will search from the list heads 3742 * for eligible buffers, greatly increasing its chance of finding them. 3743 * 3744 * The L2ARC device write speed is also boosted during this time so that 3745 * the L2ARC warms up faster. Since there have been no ARC evictions yet, 3746 * there are no L2ARC reads, and no fear of degrading read performance 3747 * through increased writes. 3748 * 3749 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that 3750 * the vdev queue can aggregate them into larger and fewer writes. Each 3751 * device is written to in a rotor fashion, sweeping writes through 3752 * available space then repeating. 3753 * 3754 * 7. The L2ARC does not store dirty content. It never needs to flush 3755 * write buffers back to disk based storage. 3756 * 3757 * 8. If an ARC buffer is written (and dirtied) which also exists in the 3758 * L2ARC, the now stale L2ARC buffer is immediately dropped. 3759 * 3760 * The performance of the L2ARC can be tweaked by a number of tunables, which 3761 * may be necessary for different workloads: 3762 * 3763 * l2arc_write_max max write bytes per interval 3764 * l2arc_write_boost extra write bytes during device warmup 3765 * l2arc_noprefetch skip caching prefetched buffers 3766 * l2arc_headroom number of max device writes to precache 3767 * l2arc_feed_secs seconds between L2ARC writing 3768 * 3769 * Tunables may be removed or added as future performance improvements are 3770 * integrated, and also may become zpool properties. 3771 */ 3772 3773static void 3774l2arc_hdr_stat_add(void) 3775{ 3776 ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE); 3777 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); 3778} 3779 3780static void 3781l2arc_hdr_stat_remove(void) 3782{ 3783 ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE)); 3784 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); 3785} 3786 3787/* 3788 * Cycle through L2ARC devices. This is how L2ARC load balances. 3789 * If a device is returned, this also returns holding the spa config lock. 3790 */ 3791static l2arc_dev_t * 3792l2arc_dev_get_next(void) 3793{ 3794 l2arc_dev_t *first, *next = NULL; 3795 3796 /* 3797 * Lock out the removal of spas (spa_namespace_lock), then removal 3798 * of cache devices (l2arc_dev_mtx). Once a device has been selected, 3799 * both locks will be dropped and a spa config lock held instead. 3800 */ 3801 mutex_enter(&spa_namespace_lock); 3802 mutex_enter(&l2arc_dev_mtx); 3803 3804 /* if there are no vdevs, there is nothing to do */ 3805 if (l2arc_ndev == 0) 3806 goto out; 3807 3808 first = NULL; 3809 next = l2arc_dev_last; 3810 do { 3811 /* loop around the list looking for a non-faulted vdev */ 3812 if (next == NULL) { 3813 next = list_head(l2arc_dev_list); 3814 } else { 3815 next = list_next(l2arc_dev_list, next); 3816 if (next == NULL) 3817 next = list_head(l2arc_dev_list); 3818 } 3819 3820 /* if we have come back to the start, bail out */ 3821 if (first == NULL) 3822 first = next; 3823 else if (next == first) 3824 break; 3825 3826 } while (vdev_is_dead(next->l2ad_vdev)); 3827 3828 /* if we were unable to find any usable vdevs, return NULL */ 3829 if (vdev_is_dead(next->l2ad_vdev)) 3830 next = NULL; 3831 3832 l2arc_dev_last = next; 3833 3834out: 3835 mutex_exit(&l2arc_dev_mtx); 3836 3837 /* 3838 * Grab the config lock to prevent the 'next' device from being 3839 * removed while we are writing to it. 3840 */ 3841 if (next != NULL) 3842 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER); 3843 mutex_exit(&spa_namespace_lock); 3844 3845 return (next); 3846} 3847 3848/* 3849 * Free buffers that were tagged for destruction. 3850 */ 3851static void 3852l2arc_do_free_on_write() 3853{ 3854 list_t *buflist; 3855 l2arc_data_free_t *df, *df_prev; 3856 3857 mutex_enter(&l2arc_free_on_write_mtx); 3858 buflist = l2arc_free_on_write; 3859 3860 for (df = list_tail(buflist); df; df = df_prev) { 3861 df_prev = list_prev(buflist, df); 3862 ASSERT(df->l2df_data != NULL); 3863 ASSERT(df->l2df_func != NULL); 3864 df->l2df_func(df->l2df_data, df->l2df_size); 3865 list_remove(buflist, df); 3866 kmem_free(df, sizeof (l2arc_data_free_t)); 3867 } 3868 3869 mutex_exit(&l2arc_free_on_write_mtx); 3870} 3871 3872/* 3873 * A write to a cache device has completed. Update all headers to allow 3874 * reads from these buffers to begin. 3875 */ 3876static void 3877l2arc_write_done(zio_t *zio) 3878{ 3879 l2arc_write_callback_t *cb; 3880 l2arc_dev_t *dev; 3881 list_t *buflist; 3882 arc_buf_hdr_t *head, *ab, *ab_prev; 3883 l2arc_buf_hdr_t *abl2; 3884 kmutex_t *hash_lock; 3885 3886 cb = zio->io_private; 3887 ASSERT(cb != NULL); 3888 dev = cb->l2wcb_dev; 3889 ASSERT(dev != NULL); 3890 head = cb->l2wcb_head; 3891 ASSERT(head != NULL); 3892 buflist = dev->l2ad_buflist; 3893 ASSERT(buflist != NULL); 3894 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, 3895 l2arc_write_callback_t *, cb); 3896 3897 if (zio->io_error != 0) 3898 ARCSTAT_BUMP(arcstat_l2_writes_error); 3899 3900 mutex_enter(&l2arc_buflist_mtx); 3901 3902 /* 3903 * All writes completed, or an error was hit. 3904 */ 3905 for (ab = list_prev(buflist, head); ab; ab = ab_prev) { 3906 ab_prev = list_prev(buflist, ab); 3907 3908 hash_lock = HDR_LOCK(ab); 3909 if (!mutex_tryenter(hash_lock)) { 3910 /* 3911 * This buffer misses out. It may be in a stage 3912 * of eviction. Its ARC_L2_WRITING flag will be 3913 * left set, denying reads to this buffer. 3914 */ 3915 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss); 3916 continue; 3917 } 3918 3919 if (zio->io_error != 0) { 3920 /* 3921 * Error - drop L2ARC entry. 3922 */ 3923 list_remove(buflist, ab); 3924 abl2 = ab->b_l2hdr; 3925 ab->b_l2hdr = NULL; 3926 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 3927 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 3928 } 3929 3930 /* 3931 * Allow ARC to begin reads to this L2ARC entry. 3932 */ 3933 ab->b_flags &= ~ARC_L2_WRITING; 3934 3935 mutex_exit(hash_lock); 3936 } 3937 3938 atomic_inc_64(&l2arc_writes_done); 3939 list_remove(buflist, head); 3940 kmem_cache_free(hdr_cache, head); 3941 mutex_exit(&l2arc_buflist_mtx); 3942 3943 l2arc_do_free_on_write(); 3944 3945 kmem_free(cb, sizeof (l2arc_write_callback_t)); 3946} 3947 3948/* 3949 * A read to a cache device completed. Validate buffer contents before 3950 * handing over to the regular ARC routines. 3951 */ 3952static void 3953l2arc_read_done(zio_t *zio) 3954{ 3955 l2arc_read_callback_t *cb; 3956 arc_buf_hdr_t *hdr; 3957 arc_buf_t *buf; 3958 kmutex_t *hash_lock; 3959 int equal; 3960 3961 ASSERT(zio->io_vd != NULL); 3962 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE); 3963 3964 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd); 3965 3966 cb = zio->io_private; 3967 ASSERT(cb != NULL); 3968 buf = cb->l2rcb_buf; 3969 ASSERT(buf != NULL); 3970 hdr = buf->b_hdr; 3971 ASSERT(hdr != NULL); 3972 3973 hash_lock = HDR_LOCK(hdr); 3974 mutex_enter(hash_lock); 3975 3976 /* 3977 * Check this survived the L2ARC journey. 3978 */ 3979 equal = arc_cksum_equal(buf); 3980 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { 3981 mutex_exit(hash_lock); 3982 zio->io_private = buf; 3983 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */ 3984 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */ 3985 arc_read_done(zio); 3986 } else { 3987 mutex_exit(hash_lock); 3988 /* 3989 * Buffer didn't survive caching. Increment stats and 3990 * reissue to the original storage device. 3991 */ 3992 if (zio->io_error != 0) { 3993 ARCSTAT_BUMP(arcstat_l2_io_error); 3994 } else { 3995 zio->io_error = EIO; 3996 } 3997 if (!equal) 3998 ARCSTAT_BUMP(arcstat_l2_cksum_bad); 3999 4000 /* 4001 * If there's no waiter, issue an async i/o to the primary 4002 * storage now. If there *is* a waiter, the caller must 4003 * issue the i/o in a context where it's OK to block. 4004 */ 4005 if (zio->io_waiter == NULL) 4006 zio_nowait(zio_read(zio->io_parent, 4007 cb->l2rcb_spa, &cb->l2rcb_bp, 4008 buf->b_data, zio->io_size, arc_read_done, buf, 4009 zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb)); 4010 } 4011 4012 kmem_free(cb, sizeof (l2arc_read_callback_t)); 4013} 4014 4015/* 4016 * This is the list priority from which the L2ARC will search for pages to 4017 * cache. This is used within loops (0..3) to cycle through lists in the 4018 * desired order. This order can have a significant effect on cache 4019 * performance. 4020 * 4021 * Currently the metadata lists are hit first, MFU then MRU, followed by 4022 * the data lists. This function returns a locked list, and also returns 4023 * the lock pointer. 4024 */ 4025static list_t * 4026l2arc_list_locked(int list_num, kmutex_t **lock) 4027{ 4028 list_t *list; 4029 4030 ASSERT(list_num >= 0 && list_num <= 3); 4031 4032 switch (list_num) { 4033 case 0: 4034 list = &arc_mfu->arcs_list[ARC_BUFC_METADATA]; 4035 *lock = &arc_mfu->arcs_mtx; 4036 break; 4037 case 1: 4038 list = &arc_mru->arcs_list[ARC_BUFC_METADATA]; 4039 *lock = &arc_mru->arcs_mtx; 4040 break; 4041 case 2: 4042 list = &arc_mfu->arcs_list[ARC_BUFC_DATA]; 4043 *lock = &arc_mfu->arcs_mtx; 4044 break; 4045 case 3: 4046 list = &arc_mru->arcs_list[ARC_BUFC_DATA]; 4047 *lock = &arc_mru->arcs_mtx; 4048 break; 4049 } 4050 4051 ASSERT(!(MUTEX_HELD(*lock))); 4052 mutex_enter(*lock); 4053 return (list); 4054} 4055 4056/* 4057 * Evict buffers from the device write hand to the distance specified in 4058 * bytes. This distance may span populated buffers, it may span nothing. 4059 * This is clearing a region on the L2ARC device ready for writing. 4060 * If the 'all' boolean is set, every buffer is evicted. 4061 */ 4062static void 4063l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) 4064{ 4065 list_t *buflist; 4066 l2arc_buf_hdr_t *abl2; 4067 arc_buf_hdr_t *ab, *ab_prev; 4068 kmutex_t *hash_lock; 4069 uint64_t taddr; 4070 4071 buflist = dev->l2ad_buflist; 4072 4073 if (buflist == NULL) 4074 return; 4075 4076 if (!all && dev->l2ad_first) { 4077 /* 4078 * This is the first sweep through the device. There is 4079 * nothing to evict. 4080 */ 4081 return; 4082 } 4083 4084 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) { 4085 /* 4086 * When nearing the end of the device, evict to the end 4087 * before the device write hand jumps to the start. 4088 */ 4089 taddr = dev->l2ad_end; 4090 } else { 4091 taddr = dev->l2ad_hand + distance; 4092 } 4093 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, 4094 uint64_t, taddr, boolean_t, all); 4095 4096top: 4097 mutex_enter(&l2arc_buflist_mtx); 4098 for (ab = list_tail(buflist); ab; ab = ab_prev) { 4099 ab_prev = list_prev(buflist, ab); 4100 4101 hash_lock = HDR_LOCK(ab); 4102 if (!mutex_tryenter(hash_lock)) { 4103 /* 4104 * Missed the hash lock. Retry. 4105 */ 4106 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); 4107 mutex_exit(&l2arc_buflist_mtx); 4108 mutex_enter(hash_lock); 4109 mutex_exit(hash_lock); 4110 goto top; 4111 } 4112 4113 if (HDR_L2_WRITE_HEAD(ab)) { 4114 /* 4115 * We hit a write head node. Leave it for 4116 * l2arc_write_done(). 4117 */ 4118 list_remove(buflist, ab); 4119 mutex_exit(hash_lock); 4120 continue; 4121 } 4122 4123 if (!all && ab->b_l2hdr != NULL && 4124 (ab->b_l2hdr->b_daddr > taddr || 4125 ab->b_l2hdr->b_daddr < dev->l2ad_hand)) { 4126 /* 4127 * We've evicted to the target address, 4128 * or the end of the device. 4129 */ 4130 mutex_exit(hash_lock); 4131 break; 4132 } 4133 4134 if (HDR_FREE_IN_PROGRESS(ab)) { 4135 /* 4136 * Already on the path to destruction. 4137 */ 4138 mutex_exit(hash_lock); 4139 continue; 4140 } 4141 4142 if (ab->b_state == arc_l2c_only) { 4143 ASSERT(!HDR_L2_READING(ab)); 4144 /* 4145 * This doesn't exist in the ARC. Destroy. 4146 * arc_hdr_destroy() will call list_remove() 4147 * and decrement arcstat_l2_size. 4148 */ 4149 arc_change_state(arc_anon, ab, hash_lock); 4150 arc_hdr_destroy(ab); 4151 } else { 4152 /* 4153 * Invalidate issued or about to be issued 4154 * reads, since we may be about to write 4155 * over this location. 4156 */ 4157 if (HDR_L2_READING(ab)) { 4158 ARCSTAT_BUMP(arcstat_l2_evict_reading); 4159 ab->b_flags |= ARC_L2_EVICTED; 4160 } 4161 4162 /* 4163 * Tell ARC this no longer exists in L2ARC. 4164 */ 4165 if (ab->b_l2hdr != NULL) { 4166 abl2 = ab->b_l2hdr; 4167 ab->b_l2hdr = NULL; 4168 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 4169 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 4170 } 4171 list_remove(buflist, ab); 4172 4173 /* 4174 * This may have been leftover after a 4175 * failed write. 4176 */ 4177 ab->b_flags &= ~ARC_L2_WRITING; 4178 } 4179 mutex_exit(hash_lock); 4180 } 4181 mutex_exit(&l2arc_buflist_mtx); 4182 4183 spa_l2cache_space_update(dev->l2ad_vdev, 0, -(taddr - dev->l2ad_evict)); 4184 dev->l2ad_evict = taddr; 4185} 4186 4187/* 4188 * Find and write ARC buffers to the L2ARC device. 4189 * 4190 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid 4191 * for reading until they have completed writing. 4192 */ 4193static void 4194l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz) 4195{ 4196 arc_buf_hdr_t *ab, *ab_prev, *head; 4197 l2arc_buf_hdr_t *hdrl2; 4198 list_t *list; 4199 uint64_t passed_sz, write_sz, buf_sz, headroom; 4200 void *buf_data; 4201 kmutex_t *hash_lock, *list_lock; 4202 boolean_t have_lock, full; 4203 l2arc_write_callback_t *cb; 4204 zio_t *pio, *wzio; 4205 int try; 4206 4207 ASSERT(dev->l2ad_vdev != NULL); 4208 4209 pio = NULL; 4210 write_sz = 0; 4211 full = B_FALSE; 4212 head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 4213 head->b_flags |= ARC_L2_WRITE_HEAD; 4214 4215 /* 4216 * Copy buffers for L2ARC writing. 4217 */ 4218 mutex_enter(&l2arc_buflist_mtx); 4219 for (try = 0; try <= 3; try++) { 4220 list = l2arc_list_locked(try, &list_lock); 4221 passed_sz = 0; 4222 4223 /* 4224 * L2ARC fast warmup. 4225 * 4226 * Until the ARC is warm and starts to evict, read from the 4227 * head of the ARC lists rather than the tail. 4228 */ 4229 headroom = target_sz * l2arc_headroom; 4230 if (arc_warm == B_FALSE) 4231 ab = list_head(list); 4232 else 4233 ab = list_tail(list); 4234 4235 for (; ab; ab = ab_prev) { 4236 if (arc_warm == B_FALSE) 4237 ab_prev = list_next(list, ab); 4238 else 4239 ab_prev = list_prev(list, ab); 4240 4241 hash_lock = HDR_LOCK(ab); 4242 have_lock = MUTEX_HELD(hash_lock); 4243 if (!have_lock && !mutex_tryenter(hash_lock)) { 4244 /* 4245 * Skip this buffer rather than waiting. 4246 */ 4247 continue; 4248 } 4249 4250 passed_sz += ab->b_size; 4251 if (passed_sz > headroom) { 4252 /* 4253 * Searched too far. 4254 */ 4255 mutex_exit(hash_lock); 4256 break; 4257 } 4258 4259 if (ab->b_spa != spa) { 4260 mutex_exit(hash_lock); 4261 continue; 4262 } 4263 4264 if (ab->b_l2hdr != NULL) { 4265 /* 4266 * Already in L2ARC. 4267 */ 4268 mutex_exit(hash_lock); 4269 continue; 4270 } 4271 4272 if (HDR_IO_IN_PROGRESS(ab) || !HDR_L2CACHE(ab)) { 4273 mutex_exit(hash_lock); 4274 continue; 4275 } 4276 4277 if ((write_sz + ab->b_size) > target_sz) { 4278 full = B_TRUE; 4279 mutex_exit(hash_lock); 4280 break; 4281 } 4282 4283 if (ab->b_buf == NULL) { 4284 DTRACE_PROBE1(l2arc__buf__null, void *, ab); 4285 mutex_exit(hash_lock); 4286 continue; 4287 } 4288 4289 if (pio == NULL) { 4290 /* 4291 * Insert a dummy header on the buflist so 4292 * l2arc_write_done() can find where the 4293 * write buffers begin without searching. 4294 */ 4295 list_insert_head(dev->l2ad_buflist, head); 4296 4297 cb = kmem_alloc( 4298 sizeof (l2arc_write_callback_t), KM_SLEEP); 4299 cb->l2wcb_dev = dev; 4300 cb->l2wcb_head = head; 4301 pio = zio_root(spa, l2arc_write_done, cb, 4302 ZIO_FLAG_CANFAIL); 4303 } 4304 4305 /* 4306 * Create and add a new L2ARC header. 4307 */ 4308 hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP); 4309 hdrl2->b_dev = dev; 4310 hdrl2->b_daddr = dev->l2ad_hand; 4311 4312 ab->b_flags |= ARC_L2_WRITING; 4313 ab->b_l2hdr = hdrl2; 4314 list_insert_head(dev->l2ad_buflist, ab); 4315 buf_data = ab->b_buf->b_data; 4316 buf_sz = ab->b_size; 4317 4318 /* 4319 * Compute and store the buffer cksum before 4320 * writing. On debug the cksum is verified first. 4321 */ 4322 arc_cksum_verify(ab->b_buf); 4323 arc_cksum_compute(ab->b_buf, B_TRUE); 4324 4325 mutex_exit(hash_lock); 4326 4327 wzio = zio_write_phys(pio, dev->l2ad_vdev, 4328 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF, 4329 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, 4330 ZIO_FLAG_CANFAIL, B_FALSE); 4331 4332 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, 4333 zio_t *, wzio); 4334 (void) zio_nowait(wzio); 4335 4336 /* 4337 * Keep the clock hand suitably device-aligned. 4338 */ 4339 buf_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz); 4340 4341 write_sz += buf_sz; 4342 dev->l2ad_hand += buf_sz; 4343 } 4344 4345 mutex_exit(list_lock); 4346 4347 if (full == B_TRUE) 4348 break; 4349 } 4350 mutex_exit(&l2arc_buflist_mtx); 4351 4352 if (pio == NULL) { 4353 ASSERT3U(write_sz, ==, 0); 4354 kmem_cache_free(hdr_cache, head); 4355 return; 4356 } 4357 4358 ASSERT3U(write_sz, <=, target_sz); 4359 ARCSTAT_BUMP(arcstat_l2_writes_sent); 4360 ARCSTAT_INCR(arcstat_l2_size, write_sz); 4361 spa_l2cache_space_update(dev->l2ad_vdev, 0, write_sz); 4362 4363 /* 4364 * Bump device hand to the device start if it is approaching the end. 4365 * l2arc_evict() will already have evicted ahead for this case. 4366 */ 4367 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) { 4368 spa_l2cache_space_update(dev->l2ad_vdev, 0, 4369 dev->l2ad_end - dev->l2ad_hand); 4370 dev->l2ad_hand = dev->l2ad_start; 4371 dev->l2ad_evict = dev->l2ad_start; 4372 dev->l2ad_first = B_FALSE; 4373 } 4374 4375 (void) zio_wait(pio); 4376} 4377 4378/* 4379 * This thread feeds the L2ARC at regular intervals. This is the beating 4380 * heart of the L2ARC. 4381 */ 4382static void 4383l2arc_feed_thread(void *dummy __unused) 4384{ 4385 callb_cpr_t cpr; 4386 l2arc_dev_t *dev; 4387 spa_t *spa; 4388 uint64_t size; 4389 4390 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); 4391 4392 mutex_enter(&l2arc_feed_thr_lock); 4393 4394 while (l2arc_thread_exit == 0) { 4395 /* 4396 * Pause for l2arc_feed_secs seconds between writes. 4397 */ 4398 CALLB_CPR_SAFE_BEGIN(&cpr); 4399 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, 4400 hz * l2arc_feed_secs); 4401 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); 4402 4403 /* 4404 * Quick check for L2ARC devices. 4405 */ 4406 mutex_enter(&l2arc_dev_mtx); 4407 if (l2arc_ndev == 0) { 4408 mutex_exit(&l2arc_dev_mtx); 4409 continue; 4410 } 4411 mutex_exit(&l2arc_dev_mtx); 4412 4413 /* 4414 * This selects the next l2arc device to write to, and in 4415 * doing so the next spa to feed from: dev->l2ad_spa. This 4416 * will return NULL if there are now no l2arc devices or if 4417 * they are all faulted. 4418 * 4419 * If a device is returned, its spa's config lock is also 4420 * held to prevent device removal. l2arc_dev_get_next() 4421 * will grab and release l2arc_dev_mtx. 4422 */ 4423 if ((dev = l2arc_dev_get_next()) == NULL) 4424 continue; 4425 4426 spa = dev->l2ad_spa; 4427 ASSERT(spa != NULL); 4428 4429 /* 4430 * Avoid contributing to memory pressure. 4431 */ 4432 if (arc_reclaim_needed()) { 4433 ARCSTAT_BUMP(arcstat_l2_abort_lowmem); 4434 spa_config_exit(spa, SCL_L2ARC, dev); 4435 continue; 4436 } 4437 4438 ARCSTAT_BUMP(arcstat_l2_feeds); 4439 4440 size = dev->l2ad_write; 4441 if (arc_warm == B_FALSE) 4442 size += dev->l2ad_boost; 4443 4444 /* 4445 * Evict L2ARC buffers that will be overwritten. 4446 */ 4447 l2arc_evict(dev, size, B_FALSE); 4448 4449 /* 4450 * Write ARC buffers. 4451 */ 4452 l2arc_write_buffers(spa, dev, size); 4453 spa_config_exit(spa, SCL_L2ARC, dev); 4454 } 4455 4456 l2arc_thread_exit = 0; 4457 cv_broadcast(&l2arc_feed_thr_cv); 4458 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ 4459 thread_exit(); 4460} 4461 4462boolean_t 4463l2arc_vdev_present(vdev_t *vd) 4464{ 4465 l2arc_dev_t *dev; 4466 4467 mutex_enter(&l2arc_dev_mtx); 4468 for (dev = list_head(l2arc_dev_list); dev != NULL; 4469 dev = list_next(l2arc_dev_list, dev)) { 4470 if (dev->l2ad_vdev == vd) 4471 break; 4472 } 4473 mutex_exit(&l2arc_dev_mtx); 4474 4475 return (dev != NULL); 4476} 4477 4478/* 4479 * Add a vdev for use by the L2ARC. By this point the spa has already 4480 * validated the vdev and opened it. 4481 */ 4482void 4483l2arc_add_vdev(spa_t *spa, vdev_t *vd, uint64_t start, uint64_t end) 4484{ 4485 l2arc_dev_t *adddev; 4486 4487 ASSERT(!l2arc_vdev_present(vd)); 4488 4489 /* 4490 * Create a new l2arc device entry. 4491 */ 4492 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); 4493 adddev->l2ad_spa = spa; 4494 adddev->l2ad_vdev = vd; 4495 adddev->l2ad_write = l2arc_write_max; 4496 adddev->l2ad_boost = l2arc_write_boost; 4497 adddev->l2ad_start = start; 4498 adddev->l2ad_end = end; 4499 adddev->l2ad_hand = adddev->l2ad_start; 4500 adddev->l2ad_evict = adddev->l2ad_start; 4501 adddev->l2ad_first = B_TRUE; 4502 ASSERT3U(adddev->l2ad_write, >, 0); 4503 4504 /* 4505 * This is a list of all ARC buffers that are still valid on the 4506 * device. 4507 */ 4508 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP); 4509 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), 4510 offsetof(arc_buf_hdr_t, b_l2node)); 4511 4512 spa_l2cache_space_update(vd, adddev->l2ad_end - adddev->l2ad_hand, 0); 4513 4514 /* 4515 * Add device to global list 4516 */ 4517 mutex_enter(&l2arc_dev_mtx); 4518 list_insert_head(l2arc_dev_list, adddev); 4519 atomic_inc_64(&l2arc_ndev); 4520 mutex_exit(&l2arc_dev_mtx); 4521} 4522 4523/* 4524 * Remove a vdev from the L2ARC. 4525 */ 4526void 4527l2arc_remove_vdev(vdev_t *vd) 4528{ 4529 l2arc_dev_t *dev, *nextdev, *remdev = NULL; 4530 4531 /* 4532 * Find the device by vdev 4533 */ 4534 mutex_enter(&l2arc_dev_mtx); 4535 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { 4536 nextdev = list_next(l2arc_dev_list, dev); 4537 if (vd == dev->l2ad_vdev) { 4538 remdev = dev; 4539 break; 4540 } 4541 } 4542 ASSERT(remdev != NULL); 4543 4544 /* 4545 * Remove device from global list 4546 */ 4547 list_remove(l2arc_dev_list, remdev); 4548 l2arc_dev_last = NULL; /* may have been invalidated */ 4549 atomic_dec_64(&l2arc_ndev); 4550 mutex_exit(&l2arc_dev_mtx); 4551 4552 /* 4553 * Clear all buflists and ARC references. L2ARC device flush. 4554 */ 4555 l2arc_evict(remdev, 0, B_TRUE); 4556 list_destroy(remdev->l2ad_buflist); 4557 kmem_free(remdev->l2ad_buflist, sizeof (list_t)); 4558 kmem_free(remdev, sizeof (l2arc_dev_t)); 4559} 4560 4561void 4562l2arc_init(void) 4563{ 4564 l2arc_thread_exit = 0; 4565 l2arc_ndev = 0; 4566 l2arc_writes_sent = 0; 4567 l2arc_writes_done = 0; 4568 4569 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); 4570 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); 4571 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); 4572 mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL); 4573 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); 4574 4575 l2arc_dev_list = &L2ARC_dev_list; 4576 l2arc_free_on_write = &L2ARC_free_on_write; 4577 list_create(l2arc_dev_list, sizeof (l2arc_dev_t), 4578 offsetof(l2arc_dev_t, l2ad_node)); 4579 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), 4580 offsetof(l2arc_data_free_t, l2df_list_node)); 4581} 4582 4583void 4584l2arc_fini(void) 4585{ 4586 /* 4587 * This is called from dmu_fini(), which is called from spa_fini(); 4588 * Because of this, we can assume that all l2arc devices have 4589 * already been removed when the pools themselves were removed. 4590 */ 4591 4592 l2arc_do_free_on_write(); 4593 4594 mutex_destroy(&l2arc_feed_thr_lock); 4595 cv_destroy(&l2arc_feed_thr_cv); 4596 mutex_destroy(&l2arc_dev_mtx); 4597 mutex_destroy(&l2arc_buflist_mtx); 4598 mutex_destroy(&l2arc_free_on_write_mtx); 4599 4600 list_destroy(l2arc_dev_list); 4601 list_destroy(l2arc_free_on_write); 4602} 4603 4604void 4605l2arc_start(void) 4606{ 4607 if (!(spa_mode & FWRITE)) 4608 return; 4609 4610 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, 4611 TS_RUN, minclsyspri); 4612} 4613 4614void 4615l2arc_stop(void) 4616{ 4617 if (!(spa_mode & FWRITE)) 4618 return; 4619 4620 mutex_enter(&l2arc_feed_thr_lock); 4621 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ 4622 l2arc_thread_exit = 1; 4623 while (l2arc_thread_exit != 0) 4624 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); 4625 mutex_exit(&l2arc_feed_thr_lock); 4626} 4627