arc.c revision 268075
1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2013 by Delphix. All rights reserved. 24 * Copyright (c) 2014 by Saso Kiselkov. All rights reserved. 25 * Copyright 2013 Nexenta Systems, Inc. All rights reserved. 26 */ 27 28/* 29 * DVA-based Adjustable Replacement Cache 30 * 31 * While much of the theory of operation used here is 32 * based on the self-tuning, low overhead replacement cache 33 * presented by Megiddo and Modha at FAST 2003, there are some 34 * significant differences: 35 * 36 * 1. The Megiddo and Modha model assumes any page is evictable. 37 * Pages in its cache cannot be "locked" into memory. This makes 38 * the eviction algorithm simple: evict the last page in the list. 39 * This also make the performance characteristics easy to reason 40 * about. Our cache is not so simple. At any given moment, some 41 * subset of the blocks in the cache are un-evictable because we 42 * have handed out a reference to them. Blocks are only evictable 43 * when there are no external references active. This makes 44 * eviction far more problematic: we choose to evict the evictable 45 * blocks that are the "lowest" in the list. 46 * 47 * There are times when it is not possible to evict the requested 48 * space. In these circumstances we are unable to adjust the cache 49 * size. To prevent the cache growing unbounded at these times we 50 * implement a "cache throttle" that slows the flow of new data 51 * into the cache until we can make space available. 52 * 53 * 2. The Megiddo and Modha model assumes a fixed cache size. 54 * Pages are evicted when the cache is full and there is a cache 55 * miss. Our model has a variable sized cache. It grows with 56 * high use, but also tries to react to memory pressure from the 57 * operating system: decreasing its size when system memory is 58 * tight. 59 * 60 * 3. The Megiddo and Modha model assumes a fixed page size. All 61 * elements of the cache are therefore exactly the same size. So 62 * when adjusting the cache size following a cache miss, its simply 63 * a matter of choosing a single page to evict. In our model, we 64 * have variable sized cache blocks (rangeing from 512 bytes to 65 * 128K bytes). We therefore choose a set of blocks to evict to make 66 * space for a cache miss that approximates as closely as possible 67 * the space used by the new block. 68 * 69 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 70 * by N. Megiddo & D. Modha, FAST 2003 71 */ 72 73/* 74 * The locking model: 75 * 76 * A new reference to a cache buffer can be obtained in two 77 * ways: 1) via a hash table lookup using the DVA as a key, 78 * or 2) via one of the ARC lists. The arc_read() interface 79 * uses method 1, while the internal arc algorithms for 80 * adjusting the cache use method 2. We therefore provide two 81 * types of locks: 1) the hash table lock array, and 2) the 82 * arc list locks. 83 * 84 * Buffers do not have their own mutexs, rather they rely on the 85 * hash table mutexs for the bulk of their protection (i.e. most 86 * fields in the arc_buf_hdr_t are protected by these mutexs). 87 * 88 * buf_hash_find() returns the appropriate mutex (held) when it 89 * locates the requested buffer in the hash table. It returns 90 * NULL for the mutex if the buffer was not in the table. 91 * 92 * buf_hash_remove() expects the appropriate hash mutex to be 93 * already held before it is invoked. 94 * 95 * Each arc state also has a mutex which is used to protect the 96 * buffer list associated with the state. When attempting to 97 * obtain a hash table lock while holding an arc list lock you 98 * must use: mutex_tryenter() to avoid deadlock. Also note that 99 * the active state mutex must be held before the ghost state mutex. 100 * 101 * Arc buffers may have an associated eviction callback function. 102 * This function will be invoked prior to removing the buffer (e.g. 103 * in arc_do_user_evicts()). Note however that the data associated 104 * with the buffer may be evicted prior to the callback. The callback 105 * must be made with *no locks held* (to prevent deadlock). Additionally, 106 * the users of callbacks must ensure that their private data is 107 * protected from simultaneous callbacks from arc_buf_evict() 108 * and arc_do_user_evicts(). 109 * 110 * Note that the majority of the performance stats are manipulated 111 * with atomic operations. 112 * 113 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following: 114 * 115 * - L2ARC buflist creation 116 * - L2ARC buflist eviction 117 * - L2ARC write completion, which walks L2ARC buflists 118 * - ARC header destruction, as it removes from L2ARC buflists 119 * - ARC header release, as it removes from L2ARC buflists 120 */ 121 122#include <sys/spa.h> 123#include <sys/zio.h> 124#include <sys/zio_compress.h> 125#include <sys/zfs_context.h> 126#include <sys/arc.h> 127#include <sys/refcount.h> 128#include <sys/vdev.h> 129#include <sys/vdev_impl.h> 130#include <sys/dsl_pool.h> 131#ifdef _KERNEL 132#include <sys/dnlc.h> 133#endif 134#include <sys/callb.h> 135#include <sys/kstat.h> 136#include <sys/trim_map.h> 137#include <zfs_fletcher.h> 138#include <sys/sdt.h> 139 140#include <vm/vm_pageout.h> 141 142#ifdef illumos 143#ifndef _KERNEL 144/* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */ 145boolean_t arc_watch = B_FALSE; 146int arc_procfd; 147#endif 148#endif /* illumos */ 149 150static kmutex_t arc_reclaim_thr_lock; 151static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 152static uint8_t arc_thread_exit; 153 154#define ARC_REDUCE_DNLC_PERCENT 3 155uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 156 157typedef enum arc_reclaim_strategy { 158 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 159 ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 160} arc_reclaim_strategy_t; 161 162/* 163 * The number of iterations through arc_evict_*() before we 164 * drop & reacquire the lock. 165 */ 166int arc_evict_iterations = 100; 167 168/* number of seconds before growing cache again */ 169static int arc_grow_retry = 60; 170 171/* shift of arc_c for calculating both min and max arc_p */ 172static int arc_p_min_shift = 4; 173 174/* log2(fraction of arc to reclaim) */ 175static int arc_shrink_shift = 5; 176 177/* 178 * minimum lifespan of a prefetch block in clock ticks 179 * (initialized in arc_init()) 180 */ 181static int arc_min_prefetch_lifespan; 182 183/* 184 * If this percent of memory is free, don't throttle. 185 */ 186int arc_lotsfree_percent = 10; 187 188static int arc_dead; 189extern int zfs_prefetch_disable; 190 191/* 192 * The arc has filled available memory and has now warmed up. 193 */ 194static boolean_t arc_warm; 195 196/* 197 * These tunables are for performance analysis. 198 */ 199uint64_t zfs_arc_max; 200uint64_t zfs_arc_min; 201uint64_t zfs_arc_meta_limit = 0; 202int zfs_arc_grow_retry = 0; 203int zfs_arc_shrink_shift = 0; 204int zfs_arc_p_min_shift = 0; 205int zfs_disable_dup_eviction = 0; 206 207TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit); 208SYSCTL_DECL(_vfs_zfs); 209SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_max, CTLFLAG_RDTUN, &zfs_arc_max, 0, 210 "Maximum ARC size"); 211SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_min, CTLFLAG_RDTUN, &zfs_arc_min, 0, 212 "Minimum ARC size"); 213 214/* 215 * Note that buffers can be in one of 6 states: 216 * ARC_anon - anonymous (discussed below) 217 * ARC_mru - recently used, currently cached 218 * ARC_mru_ghost - recentely used, no longer in cache 219 * ARC_mfu - frequently used, currently cached 220 * ARC_mfu_ghost - frequently used, no longer in cache 221 * ARC_l2c_only - exists in L2ARC but not other states 222 * When there are no active references to the buffer, they are 223 * are linked onto a list in one of these arc states. These are 224 * the only buffers that can be evicted or deleted. Within each 225 * state there are multiple lists, one for meta-data and one for 226 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, 227 * etc.) is tracked separately so that it can be managed more 228 * explicitly: favored over data, limited explicitly. 229 * 230 * Anonymous buffers are buffers that are not associated with 231 * a DVA. These are buffers that hold dirty block copies 232 * before they are written to stable storage. By definition, 233 * they are "ref'd" and are considered part of arc_mru 234 * that cannot be freed. Generally, they will aquire a DVA 235 * as they are written and migrate onto the arc_mru list. 236 * 237 * The ARC_l2c_only state is for buffers that are in the second 238 * level ARC but no longer in any of the ARC_m* lists. The second 239 * level ARC itself may also contain buffers that are in any of 240 * the ARC_m* states - meaning that a buffer can exist in two 241 * places. The reason for the ARC_l2c_only state is to keep the 242 * buffer header in the hash table, so that reads that hit the 243 * second level ARC benefit from these fast lookups. 244 */ 245 246#define ARCS_LOCK_PAD CACHE_LINE_SIZE 247struct arcs_lock { 248 kmutex_t arcs_lock; 249#ifdef _KERNEL 250 unsigned char pad[(ARCS_LOCK_PAD - sizeof (kmutex_t))]; 251#endif 252}; 253 254/* 255 * must be power of two for mask use to work 256 * 257 */ 258#define ARC_BUFC_NUMDATALISTS 16 259#define ARC_BUFC_NUMMETADATALISTS 16 260#define ARC_BUFC_NUMLISTS (ARC_BUFC_NUMMETADATALISTS + ARC_BUFC_NUMDATALISTS) 261 262typedef struct arc_state { 263 uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */ 264 uint64_t arcs_size; /* total amount of data in this state */ 265 list_t arcs_lists[ARC_BUFC_NUMLISTS]; /* list of evictable buffers */ 266 struct arcs_lock arcs_locks[ARC_BUFC_NUMLISTS] __aligned(CACHE_LINE_SIZE); 267} arc_state_t; 268 269#define ARCS_LOCK(s, i) (&((s)->arcs_locks[(i)].arcs_lock)) 270 271/* The 6 states: */ 272static arc_state_t ARC_anon; 273static arc_state_t ARC_mru; 274static arc_state_t ARC_mru_ghost; 275static arc_state_t ARC_mfu; 276static arc_state_t ARC_mfu_ghost; 277static arc_state_t ARC_l2c_only; 278 279typedef struct arc_stats { 280 kstat_named_t arcstat_hits; 281 kstat_named_t arcstat_misses; 282 kstat_named_t arcstat_demand_data_hits; 283 kstat_named_t arcstat_demand_data_misses; 284 kstat_named_t arcstat_demand_metadata_hits; 285 kstat_named_t arcstat_demand_metadata_misses; 286 kstat_named_t arcstat_prefetch_data_hits; 287 kstat_named_t arcstat_prefetch_data_misses; 288 kstat_named_t arcstat_prefetch_metadata_hits; 289 kstat_named_t arcstat_prefetch_metadata_misses; 290 kstat_named_t arcstat_mru_hits; 291 kstat_named_t arcstat_mru_ghost_hits; 292 kstat_named_t arcstat_mfu_hits; 293 kstat_named_t arcstat_mfu_ghost_hits; 294 kstat_named_t arcstat_allocated; 295 kstat_named_t arcstat_deleted; 296 kstat_named_t arcstat_stolen; 297 kstat_named_t arcstat_recycle_miss; 298 /* 299 * Number of buffers that could not be evicted because the hash lock 300 * was held by another thread. The lock may not necessarily be held 301 * by something using the same buffer, since hash locks are shared 302 * by multiple buffers. 303 */ 304 kstat_named_t arcstat_mutex_miss; 305 /* 306 * Number of buffers skipped because they have I/O in progress, are 307 * indrect prefetch buffers that have not lived long enough, or are 308 * not from the spa we're trying to evict from. 309 */ 310 kstat_named_t arcstat_evict_skip; 311 kstat_named_t arcstat_evict_l2_cached; 312 kstat_named_t arcstat_evict_l2_eligible; 313 kstat_named_t arcstat_evict_l2_ineligible; 314 kstat_named_t arcstat_hash_elements; 315 kstat_named_t arcstat_hash_elements_max; 316 kstat_named_t arcstat_hash_collisions; 317 kstat_named_t arcstat_hash_chains; 318 kstat_named_t arcstat_hash_chain_max; 319 kstat_named_t arcstat_p; 320 kstat_named_t arcstat_c; 321 kstat_named_t arcstat_c_min; 322 kstat_named_t arcstat_c_max; 323 kstat_named_t arcstat_size; 324 kstat_named_t arcstat_hdr_size; 325 kstat_named_t arcstat_data_size; 326 kstat_named_t arcstat_other_size; 327 kstat_named_t arcstat_l2_hits; 328 kstat_named_t arcstat_l2_misses; 329 kstat_named_t arcstat_l2_feeds; 330 kstat_named_t arcstat_l2_rw_clash; 331 kstat_named_t arcstat_l2_read_bytes; 332 kstat_named_t arcstat_l2_write_bytes; 333 kstat_named_t arcstat_l2_writes_sent; 334 kstat_named_t arcstat_l2_writes_done; 335 kstat_named_t arcstat_l2_writes_error; 336 kstat_named_t arcstat_l2_writes_hdr_miss; 337 kstat_named_t arcstat_l2_evict_lock_retry; 338 kstat_named_t arcstat_l2_evict_reading; 339 kstat_named_t arcstat_l2_free_on_write; 340 kstat_named_t arcstat_l2_abort_lowmem; 341 kstat_named_t arcstat_l2_cksum_bad; 342 kstat_named_t arcstat_l2_io_error; 343 kstat_named_t arcstat_l2_size; 344 kstat_named_t arcstat_l2_asize; 345 kstat_named_t arcstat_l2_hdr_size; 346 kstat_named_t arcstat_l2_compress_successes; 347 kstat_named_t arcstat_l2_compress_zeros; 348 kstat_named_t arcstat_l2_compress_failures; 349 kstat_named_t arcstat_l2_write_trylock_fail; 350 kstat_named_t arcstat_l2_write_passed_headroom; 351 kstat_named_t arcstat_l2_write_spa_mismatch; 352 kstat_named_t arcstat_l2_write_in_l2; 353 kstat_named_t arcstat_l2_write_hdr_io_in_progress; 354 kstat_named_t arcstat_l2_write_not_cacheable; 355 kstat_named_t arcstat_l2_write_full; 356 kstat_named_t arcstat_l2_write_buffer_iter; 357 kstat_named_t arcstat_l2_write_pios; 358 kstat_named_t arcstat_l2_write_buffer_bytes_scanned; 359 kstat_named_t arcstat_l2_write_buffer_list_iter; 360 kstat_named_t arcstat_l2_write_buffer_list_null_iter; 361 kstat_named_t arcstat_memory_throttle_count; 362 kstat_named_t arcstat_duplicate_buffers; 363 kstat_named_t arcstat_duplicate_buffers_size; 364 kstat_named_t arcstat_duplicate_reads; 365} arc_stats_t; 366 367static arc_stats_t arc_stats = { 368 { "hits", KSTAT_DATA_UINT64 }, 369 { "misses", KSTAT_DATA_UINT64 }, 370 { "demand_data_hits", KSTAT_DATA_UINT64 }, 371 { "demand_data_misses", KSTAT_DATA_UINT64 }, 372 { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 373 { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 374 { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 375 { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 376 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 377 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 378 { "mru_hits", KSTAT_DATA_UINT64 }, 379 { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 380 { "mfu_hits", KSTAT_DATA_UINT64 }, 381 { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 382 { "allocated", KSTAT_DATA_UINT64 }, 383 { "deleted", KSTAT_DATA_UINT64 }, 384 { "stolen", KSTAT_DATA_UINT64 }, 385 { "recycle_miss", KSTAT_DATA_UINT64 }, 386 { "mutex_miss", KSTAT_DATA_UINT64 }, 387 { "evict_skip", KSTAT_DATA_UINT64 }, 388 { "evict_l2_cached", KSTAT_DATA_UINT64 }, 389 { "evict_l2_eligible", KSTAT_DATA_UINT64 }, 390 { "evict_l2_ineligible", KSTAT_DATA_UINT64 }, 391 { "hash_elements", KSTAT_DATA_UINT64 }, 392 { "hash_elements_max", KSTAT_DATA_UINT64 }, 393 { "hash_collisions", KSTAT_DATA_UINT64 }, 394 { "hash_chains", KSTAT_DATA_UINT64 }, 395 { "hash_chain_max", KSTAT_DATA_UINT64 }, 396 { "p", KSTAT_DATA_UINT64 }, 397 { "c", KSTAT_DATA_UINT64 }, 398 { "c_min", KSTAT_DATA_UINT64 }, 399 { "c_max", KSTAT_DATA_UINT64 }, 400 { "size", KSTAT_DATA_UINT64 }, 401 { "hdr_size", KSTAT_DATA_UINT64 }, 402 { "data_size", KSTAT_DATA_UINT64 }, 403 { "other_size", KSTAT_DATA_UINT64 }, 404 { "l2_hits", KSTAT_DATA_UINT64 }, 405 { "l2_misses", KSTAT_DATA_UINT64 }, 406 { "l2_feeds", KSTAT_DATA_UINT64 }, 407 { "l2_rw_clash", KSTAT_DATA_UINT64 }, 408 { "l2_read_bytes", KSTAT_DATA_UINT64 }, 409 { "l2_write_bytes", KSTAT_DATA_UINT64 }, 410 { "l2_writes_sent", KSTAT_DATA_UINT64 }, 411 { "l2_writes_done", KSTAT_DATA_UINT64 }, 412 { "l2_writes_error", KSTAT_DATA_UINT64 }, 413 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 }, 414 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, 415 { "l2_evict_reading", KSTAT_DATA_UINT64 }, 416 { "l2_free_on_write", KSTAT_DATA_UINT64 }, 417 { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, 418 { "l2_cksum_bad", KSTAT_DATA_UINT64 }, 419 { "l2_io_error", KSTAT_DATA_UINT64 }, 420 { "l2_size", KSTAT_DATA_UINT64 }, 421 { "l2_asize", KSTAT_DATA_UINT64 }, 422 { "l2_hdr_size", KSTAT_DATA_UINT64 }, 423 { "l2_compress_successes", KSTAT_DATA_UINT64 }, 424 { "l2_compress_zeros", KSTAT_DATA_UINT64 }, 425 { "l2_compress_failures", KSTAT_DATA_UINT64 }, 426 { "l2_write_trylock_fail", KSTAT_DATA_UINT64 }, 427 { "l2_write_passed_headroom", KSTAT_DATA_UINT64 }, 428 { "l2_write_spa_mismatch", KSTAT_DATA_UINT64 }, 429 { "l2_write_in_l2", KSTAT_DATA_UINT64 }, 430 { "l2_write_io_in_progress", KSTAT_DATA_UINT64 }, 431 { "l2_write_not_cacheable", KSTAT_DATA_UINT64 }, 432 { "l2_write_full", KSTAT_DATA_UINT64 }, 433 { "l2_write_buffer_iter", KSTAT_DATA_UINT64 }, 434 { "l2_write_pios", KSTAT_DATA_UINT64 }, 435 { "l2_write_buffer_bytes_scanned", KSTAT_DATA_UINT64 }, 436 { "l2_write_buffer_list_iter", KSTAT_DATA_UINT64 }, 437 { "l2_write_buffer_list_null_iter", KSTAT_DATA_UINT64 }, 438 { "memory_throttle_count", KSTAT_DATA_UINT64 }, 439 { "duplicate_buffers", KSTAT_DATA_UINT64 }, 440 { "duplicate_buffers_size", KSTAT_DATA_UINT64 }, 441 { "duplicate_reads", KSTAT_DATA_UINT64 } 442}; 443 444#define ARCSTAT(stat) (arc_stats.stat.value.ui64) 445 446#define ARCSTAT_INCR(stat, val) \ 447 atomic_add_64(&arc_stats.stat.value.ui64, (val)) 448 449#define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 450#define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 451 452#define ARCSTAT_MAX(stat, val) { \ 453 uint64_t m; \ 454 while ((val) > (m = arc_stats.stat.value.ui64) && \ 455 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 456 continue; \ 457} 458 459#define ARCSTAT_MAXSTAT(stat) \ 460 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 461 462/* 463 * We define a macro to allow ARC hits/misses to be easily broken down by 464 * two separate conditions, giving a total of four different subtypes for 465 * each of hits and misses (so eight statistics total). 466 */ 467#define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 468 if (cond1) { \ 469 if (cond2) { \ 470 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 471 } else { \ 472 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 473 } \ 474 } else { \ 475 if (cond2) { \ 476 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 477 } else { \ 478 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 479 } \ 480 } 481 482kstat_t *arc_ksp; 483static arc_state_t *arc_anon; 484static arc_state_t *arc_mru; 485static arc_state_t *arc_mru_ghost; 486static arc_state_t *arc_mfu; 487static arc_state_t *arc_mfu_ghost; 488static arc_state_t *arc_l2c_only; 489 490/* 491 * There are several ARC variables that are critical to export as kstats -- 492 * but we don't want to have to grovel around in the kstat whenever we wish to 493 * manipulate them. For these variables, we therefore define them to be in 494 * terms of the statistic variable. This assures that we are not introducing 495 * the possibility of inconsistency by having shadow copies of the variables, 496 * while still allowing the code to be readable. 497 */ 498#define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 499#define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 500#define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 501#define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 502#define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 503 504#define L2ARC_IS_VALID_COMPRESS(_c_) \ 505 ((_c_) == ZIO_COMPRESS_LZ4 || (_c_) == ZIO_COMPRESS_EMPTY) 506 507static int arc_no_grow; /* Don't try to grow cache size */ 508static uint64_t arc_tempreserve; 509static uint64_t arc_loaned_bytes; 510static uint64_t arc_meta_used; 511static uint64_t arc_meta_limit; 512static uint64_t arc_meta_max = 0; 513SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_meta_used, CTLFLAG_RD, &arc_meta_used, 0, 514 "ARC metadata used"); 515SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_meta_limit, CTLFLAG_RW, &arc_meta_limit, 0, 516 "ARC metadata limit"); 517 518typedef struct l2arc_buf_hdr l2arc_buf_hdr_t; 519 520typedef struct arc_callback arc_callback_t; 521 522struct arc_callback { 523 void *acb_private; 524 arc_done_func_t *acb_done; 525 arc_buf_t *acb_buf; 526 zio_t *acb_zio_dummy; 527 arc_callback_t *acb_next; 528}; 529 530typedef struct arc_write_callback arc_write_callback_t; 531 532struct arc_write_callback { 533 void *awcb_private; 534 arc_done_func_t *awcb_ready; 535 arc_done_func_t *awcb_physdone; 536 arc_done_func_t *awcb_done; 537 arc_buf_t *awcb_buf; 538}; 539 540struct arc_buf_hdr { 541 /* protected by hash lock */ 542 dva_t b_dva; 543 uint64_t b_birth; 544 uint64_t b_cksum0; 545 546 kmutex_t b_freeze_lock; 547 zio_cksum_t *b_freeze_cksum; 548 void *b_thawed; 549 550 arc_buf_hdr_t *b_hash_next; 551 arc_buf_t *b_buf; 552 uint32_t b_flags; 553 uint32_t b_datacnt; 554 555 arc_callback_t *b_acb; 556 kcondvar_t b_cv; 557 558 /* immutable */ 559 arc_buf_contents_t b_type; 560 uint64_t b_size; 561 uint64_t b_spa; 562 563 /* protected by arc state mutex */ 564 arc_state_t *b_state; 565 list_node_t b_arc_node; 566 567 /* updated atomically */ 568 clock_t b_arc_access; 569 570 /* self protecting */ 571 refcount_t b_refcnt; 572 573 l2arc_buf_hdr_t *b_l2hdr; 574 list_node_t b_l2node; 575}; 576 577static arc_buf_t *arc_eviction_list; 578static kmutex_t arc_eviction_mtx; 579static arc_buf_hdr_t arc_eviction_hdr; 580static void arc_get_data_buf(arc_buf_t *buf); 581static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 582static int arc_evict_needed(arc_buf_contents_t type); 583static void arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes); 584#ifdef illumos 585static void arc_buf_watch(arc_buf_t *buf); 586#endif /* illumos */ 587 588static boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab); 589 590#define GHOST_STATE(state) \ 591 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ 592 (state) == arc_l2c_only) 593 594/* 595 * Private ARC flags. These flags are private ARC only flags that will show up 596 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 597 * be passed in as arc_flags in things like arc_read. However, these flags 598 * should never be passed and should only be set by ARC code. When adding new 599 * public flags, make sure not to smash the private ones. 600 */ 601 602#define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 603#define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 604#define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 605#define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 606#define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 607#define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 608#define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */ 609#define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */ 610#define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */ 611#define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */ 612 613#define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 614#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 615#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 616#define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_PREFETCH) 617#define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 618#define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 619#define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS) 620#define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE) 621#define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \ 622 (hdr)->b_l2hdr != NULL) 623#define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING) 624#define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED) 625#define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD) 626 627/* 628 * Other sizes 629 */ 630 631#define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) 632#define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t)) 633 634/* 635 * Hash table routines 636 */ 637 638#define HT_LOCK_PAD CACHE_LINE_SIZE 639 640struct ht_lock { 641 kmutex_t ht_lock; 642#ifdef _KERNEL 643 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 644#endif 645}; 646 647#define BUF_LOCKS 256 648typedef struct buf_hash_table { 649 uint64_t ht_mask; 650 arc_buf_hdr_t **ht_table; 651 struct ht_lock ht_locks[BUF_LOCKS] __aligned(CACHE_LINE_SIZE); 652} buf_hash_table_t; 653 654static buf_hash_table_t buf_hash_table; 655 656#define BUF_HASH_INDEX(spa, dva, birth) \ 657 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 658#define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 659#define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 660#define HDR_LOCK(hdr) \ 661 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth))) 662 663uint64_t zfs_crc64_table[256]; 664 665/* 666 * Level 2 ARC 667 */ 668 669#define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ 670#define L2ARC_HEADROOM 2 /* num of writes */ 671/* 672 * If we discover during ARC scan any buffers to be compressed, we boost 673 * our headroom for the next scanning cycle by this percentage multiple. 674 */ 675#define L2ARC_HEADROOM_BOOST 200 676#define L2ARC_FEED_SECS 1 /* caching interval secs */ 677#define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */ 678 679#define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) 680#define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) 681 682/* L2ARC Performance Tunables */ 683uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ 684uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */ 685uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ 686uint64_t l2arc_headroom_boost = L2ARC_HEADROOM_BOOST; 687uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ 688uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */ 689boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ 690boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */ 691boolean_t l2arc_norw = B_TRUE; /* no reads during writes */ 692 693SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max, CTLFLAG_RW, 694 &l2arc_write_max, 0, "max write size"); 695SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_boost, CTLFLAG_RW, 696 &l2arc_write_boost, 0, "extra write during warmup"); 697SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom, CTLFLAG_RW, 698 &l2arc_headroom, 0, "number of dev writes"); 699SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_secs, CTLFLAG_RW, 700 &l2arc_feed_secs, 0, "interval seconds"); 701SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_min_ms, CTLFLAG_RW, 702 &l2arc_feed_min_ms, 0, "min interval milliseconds"); 703 704SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_noprefetch, CTLFLAG_RW, 705 &l2arc_noprefetch, 0, "don't cache prefetch bufs"); 706SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_feed_again, CTLFLAG_RW, 707 &l2arc_feed_again, 0, "turbo warmup"); 708SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw, CTLFLAG_RW, 709 &l2arc_norw, 0, "no reads during writes"); 710 711SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_size, CTLFLAG_RD, 712 &ARC_anon.arcs_size, 0, "size of anonymous state"); 713SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_metadata_lsize, CTLFLAG_RD, 714 &ARC_anon.arcs_lsize[ARC_BUFC_METADATA], 0, "size of anonymous state"); 715SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_data_lsize, CTLFLAG_RD, 716 &ARC_anon.arcs_lsize[ARC_BUFC_DATA], 0, "size of anonymous state"); 717 718SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_size, CTLFLAG_RD, 719 &ARC_mru.arcs_size, 0, "size of mru state"); 720SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_metadata_lsize, CTLFLAG_RD, 721 &ARC_mru.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mru state"); 722SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_data_lsize, CTLFLAG_RD, 723 &ARC_mru.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mru state"); 724 725SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_size, CTLFLAG_RD, 726 &ARC_mru_ghost.arcs_size, 0, "size of mru ghost state"); 727SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_metadata_lsize, CTLFLAG_RD, 728 &ARC_mru_ghost.arcs_lsize[ARC_BUFC_METADATA], 0, 729 "size of metadata in mru ghost state"); 730SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_data_lsize, CTLFLAG_RD, 731 &ARC_mru_ghost.arcs_lsize[ARC_BUFC_DATA], 0, 732 "size of data in mru ghost state"); 733 734SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_size, CTLFLAG_RD, 735 &ARC_mfu.arcs_size, 0, "size of mfu state"); 736SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_metadata_lsize, CTLFLAG_RD, 737 &ARC_mfu.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mfu state"); 738SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_data_lsize, CTLFLAG_RD, 739 &ARC_mfu.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mfu state"); 740 741SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_size, CTLFLAG_RD, 742 &ARC_mfu_ghost.arcs_size, 0, "size of mfu ghost state"); 743SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_metadata_lsize, CTLFLAG_RD, 744 &ARC_mfu_ghost.arcs_lsize[ARC_BUFC_METADATA], 0, 745 "size of metadata in mfu ghost state"); 746SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_data_lsize, CTLFLAG_RD, 747 &ARC_mfu_ghost.arcs_lsize[ARC_BUFC_DATA], 0, 748 "size of data in mfu ghost state"); 749 750SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2c_only_size, CTLFLAG_RD, 751 &ARC_l2c_only.arcs_size, 0, "size of mru state"); 752 753/* 754 * L2ARC Internals 755 */ 756typedef struct l2arc_dev { 757 vdev_t *l2ad_vdev; /* vdev */ 758 spa_t *l2ad_spa; /* spa */ 759 uint64_t l2ad_hand; /* next write location */ 760 uint64_t l2ad_start; /* first addr on device */ 761 uint64_t l2ad_end; /* last addr on device */ 762 uint64_t l2ad_evict; /* last addr eviction reached */ 763 boolean_t l2ad_first; /* first sweep through */ 764 boolean_t l2ad_writing; /* currently writing */ 765 list_t *l2ad_buflist; /* buffer list */ 766 list_node_t l2ad_node; /* device list node */ 767} l2arc_dev_t; 768 769static list_t L2ARC_dev_list; /* device list */ 770static list_t *l2arc_dev_list; /* device list pointer */ 771static kmutex_t l2arc_dev_mtx; /* device list mutex */ 772static l2arc_dev_t *l2arc_dev_last; /* last device used */ 773static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */ 774static list_t L2ARC_free_on_write; /* free after write buf list */ 775static list_t *l2arc_free_on_write; /* free after write list ptr */ 776static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ 777static uint64_t l2arc_ndev; /* number of devices */ 778 779typedef struct l2arc_read_callback { 780 arc_buf_t *l2rcb_buf; /* read buffer */ 781 spa_t *l2rcb_spa; /* spa */ 782 blkptr_t l2rcb_bp; /* original blkptr */ 783 zbookmark_t l2rcb_zb; /* original bookmark */ 784 int l2rcb_flags; /* original flags */ 785 enum zio_compress l2rcb_compress; /* applied compress */ 786} l2arc_read_callback_t; 787 788typedef struct l2arc_write_callback { 789 l2arc_dev_t *l2wcb_dev; /* device info */ 790 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ 791} l2arc_write_callback_t; 792 793struct l2arc_buf_hdr { 794 /* protected by arc_buf_hdr mutex */ 795 l2arc_dev_t *b_dev; /* L2ARC device */ 796 uint64_t b_daddr; /* disk address, offset byte */ 797 /* compression applied to buffer data */ 798 enum zio_compress b_compress; 799 /* real alloc'd buffer size depending on b_compress applied */ 800 int b_asize; 801 /* temporary buffer holder for in-flight compressed data */ 802 void *b_tmp_cdata; 803}; 804 805typedef struct l2arc_data_free { 806 /* protected by l2arc_free_on_write_mtx */ 807 void *l2df_data; 808 size_t l2df_size; 809 void (*l2df_func)(void *, size_t); 810 list_node_t l2df_list_node; 811} l2arc_data_free_t; 812 813static kmutex_t l2arc_feed_thr_lock; 814static kcondvar_t l2arc_feed_thr_cv; 815static uint8_t l2arc_thread_exit; 816 817static void l2arc_read_done(zio_t *zio); 818static void l2arc_hdr_stat_add(void); 819static void l2arc_hdr_stat_remove(void); 820 821static boolean_t l2arc_compress_buf(l2arc_buf_hdr_t *l2hdr); 822static void l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr, 823 enum zio_compress c); 824static void l2arc_release_cdata_buf(arc_buf_hdr_t *ab); 825 826static uint64_t 827buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth) 828{ 829 uint8_t *vdva = (uint8_t *)dva; 830 uint64_t crc = -1ULL; 831 int i; 832 833 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 834 835 for (i = 0; i < sizeof (dva_t); i++) 836 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 837 838 crc ^= (spa>>8) ^ birth; 839 840 return (crc); 841} 842 843#define BUF_EMPTY(buf) \ 844 ((buf)->b_dva.dva_word[0] == 0 && \ 845 (buf)->b_dva.dva_word[1] == 0 && \ 846 (buf)->b_cksum0 == 0) 847 848#define BUF_EQUAL(spa, dva, birth, buf) \ 849 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 850 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 851 ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 852 853static void 854buf_discard_identity(arc_buf_hdr_t *hdr) 855{ 856 hdr->b_dva.dva_word[0] = 0; 857 hdr->b_dva.dva_word[1] = 0; 858 hdr->b_birth = 0; 859 hdr->b_cksum0 = 0; 860} 861 862static arc_buf_hdr_t * 863buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp) 864{ 865 const dva_t *dva = BP_IDENTITY(bp); 866 uint64_t birth = BP_PHYSICAL_BIRTH(bp); 867 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 868 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 869 arc_buf_hdr_t *buf; 870 871 mutex_enter(hash_lock); 872 for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 873 buf = buf->b_hash_next) { 874 if (BUF_EQUAL(spa, dva, birth, buf)) { 875 *lockp = hash_lock; 876 return (buf); 877 } 878 } 879 mutex_exit(hash_lock); 880 *lockp = NULL; 881 return (NULL); 882} 883 884/* 885 * Insert an entry into the hash table. If there is already an element 886 * equal to elem in the hash table, then the already existing element 887 * will be returned and the new element will not be inserted. 888 * Otherwise returns NULL. 889 */ 890static arc_buf_hdr_t * 891buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 892{ 893 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 894 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 895 arc_buf_hdr_t *fbuf; 896 uint32_t i; 897 898 ASSERT(!DVA_IS_EMPTY(&buf->b_dva)); 899 ASSERT(buf->b_birth != 0); 900 ASSERT(!HDR_IN_HASH_TABLE(buf)); 901 *lockp = hash_lock; 902 mutex_enter(hash_lock); 903 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 904 fbuf = fbuf->b_hash_next, i++) { 905 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 906 return (fbuf); 907 } 908 909 buf->b_hash_next = buf_hash_table.ht_table[idx]; 910 buf_hash_table.ht_table[idx] = buf; 911 buf->b_flags |= ARC_IN_HASH_TABLE; 912 913 /* collect some hash table performance data */ 914 if (i > 0) { 915 ARCSTAT_BUMP(arcstat_hash_collisions); 916 if (i == 1) 917 ARCSTAT_BUMP(arcstat_hash_chains); 918 919 ARCSTAT_MAX(arcstat_hash_chain_max, i); 920 } 921 922 ARCSTAT_BUMP(arcstat_hash_elements); 923 ARCSTAT_MAXSTAT(arcstat_hash_elements); 924 925 return (NULL); 926} 927 928static void 929buf_hash_remove(arc_buf_hdr_t *buf) 930{ 931 arc_buf_hdr_t *fbuf, **bufp; 932 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 933 934 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 935 ASSERT(HDR_IN_HASH_TABLE(buf)); 936 937 bufp = &buf_hash_table.ht_table[idx]; 938 while ((fbuf = *bufp) != buf) { 939 ASSERT(fbuf != NULL); 940 bufp = &fbuf->b_hash_next; 941 } 942 *bufp = buf->b_hash_next; 943 buf->b_hash_next = NULL; 944 buf->b_flags &= ~ARC_IN_HASH_TABLE; 945 946 /* collect some hash table performance data */ 947 ARCSTAT_BUMPDOWN(arcstat_hash_elements); 948 949 if (buf_hash_table.ht_table[idx] && 950 buf_hash_table.ht_table[idx]->b_hash_next == NULL) 951 ARCSTAT_BUMPDOWN(arcstat_hash_chains); 952} 953 954/* 955 * Global data structures and functions for the buf kmem cache. 956 */ 957static kmem_cache_t *hdr_cache; 958static kmem_cache_t *buf_cache; 959 960static void 961buf_fini(void) 962{ 963 int i; 964 965 kmem_free(buf_hash_table.ht_table, 966 (buf_hash_table.ht_mask + 1) * sizeof (void *)); 967 for (i = 0; i < BUF_LOCKS; i++) 968 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 969 kmem_cache_destroy(hdr_cache); 970 kmem_cache_destroy(buf_cache); 971} 972 973/* 974 * Constructor callback - called when the cache is empty 975 * and a new buf is requested. 976 */ 977/* ARGSUSED */ 978static int 979hdr_cons(void *vbuf, void *unused, int kmflag) 980{ 981 arc_buf_hdr_t *buf = vbuf; 982 983 bzero(buf, sizeof (arc_buf_hdr_t)); 984 refcount_create(&buf->b_refcnt); 985 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 986 mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 987 arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS); 988 989 return (0); 990} 991 992/* ARGSUSED */ 993static int 994buf_cons(void *vbuf, void *unused, int kmflag) 995{ 996 arc_buf_t *buf = vbuf; 997 998 bzero(buf, sizeof (arc_buf_t)); 999 mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL); 1000 arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS); 1001 1002 return (0); 1003} 1004 1005/* 1006 * Destructor callback - called when a cached buf is 1007 * no longer required. 1008 */ 1009/* ARGSUSED */ 1010static void 1011hdr_dest(void *vbuf, void *unused) 1012{ 1013 arc_buf_hdr_t *buf = vbuf; 1014 1015 ASSERT(BUF_EMPTY(buf)); 1016 refcount_destroy(&buf->b_refcnt); 1017 cv_destroy(&buf->b_cv); 1018 mutex_destroy(&buf->b_freeze_lock); 1019 arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS); 1020} 1021 1022/* ARGSUSED */ 1023static void 1024buf_dest(void *vbuf, void *unused) 1025{ 1026 arc_buf_t *buf = vbuf; 1027 1028 mutex_destroy(&buf->b_evict_lock); 1029 arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS); 1030} 1031 1032/* 1033 * Reclaim callback -- invoked when memory is low. 1034 */ 1035/* ARGSUSED */ 1036static void 1037hdr_recl(void *unused) 1038{ 1039 dprintf("hdr_recl called\n"); 1040 /* 1041 * umem calls the reclaim func when we destroy the buf cache, 1042 * which is after we do arc_fini(). 1043 */ 1044 if (!arc_dead) 1045 cv_signal(&arc_reclaim_thr_cv); 1046} 1047 1048static void 1049buf_init(void) 1050{ 1051 uint64_t *ct; 1052 uint64_t hsize = 1ULL << 12; 1053 int i, j; 1054 1055 /* 1056 * The hash table is big enough to fill all of physical memory 1057 * with an average 64K block size. The table will take up 1058 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 1059 */ 1060 while (hsize * 65536 < (uint64_t)physmem * PAGESIZE) 1061 hsize <<= 1; 1062retry: 1063 buf_hash_table.ht_mask = hsize - 1; 1064 buf_hash_table.ht_table = 1065 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 1066 if (buf_hash_table.ht_table == NULL) { 1067 ASSERT(hsize > (1ULL << 8)); 1068 hsize >>= 1; 1069 goto retry; 1070 } 1071 1072 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 1073 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 1074 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 1075 0, buf_cons, buf_dest, NULL, NULL, NULL, 0); 1076 1077 for (i = 0; i < 256; i++) 1078 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 1079 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 1080 1081 for (i = 0; i < BUF_LOCKS; i++) { 1082 mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 1083 NULL, MUTEX_DEFAULT, NULL); 1084 } 1085} 1086 1087#define ARC_MINTIME (hz>>4) /* 62 ms */ 1088 1089static void 1090arc_cksum_verify(arc_buf_t *buf) 1091{ 1092 zio_cksum_t zc; 1093 1094 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 1095 return; 1096 1097 mutex_enter(&buf->b_hdr->b_freeze_lock); 1098 if (buf->b_hdr->b_freeze_cksum == NULL || 1099 (buf->b_hdr->b_flags & ARC_IO_ERROR)) { 1100 mutex_exit(&buf->b_hdr->b_freeze_lock); 1101 return; 1102 } 1103 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 1104 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 1105 panic("buffer modified while frozen!"); 1106 mutex_exit(&buf->b_hdr->b_freeze_lock); 1107} 1108 1109static int 1110arc_cksum_equal(arc_buf_t *buf) 1111{ 1112 zio_cksum_t zc; 1113 int equal; 1114 1115 mutex_enter(&buf->b_hdr->b_freeze_lock); 1116 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 1117 equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc); 1118 mutex_exit(&buf->b_hdr->b_freeze_lock); 1119 1120 return (equal); 1121} 1122 1123static void 1124arc_cksum_compute(arc_buf_t *buf, boolean_t force) 1125{ 1126 if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY)) 1127 return; 1128 1129 mutex_enter(&buf->b_hdr->b_freeze_lock); 1130 if (buf->b_hdr->b_freeze_cksum != NULL) { 1131 mutex_exit(&buf->b_hdr->b_freeze_lock); 1132 return; 1133 } 1134 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 1135 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 1136 buf->b_hdr->b_freeze_cksum); 1137 mutex_exit(&buf->b_hdr->b_freeze_lock); 1138#ifdef illumos 1139 arc_buf_watch(buf); 1140#endif /* illumos */ 1141} 1142 1143#ifdef illumos 1144#ifndef _KERNEL 1145typedef struct procctl { 1146 long cmd; 1147 prwatch_t prwatch; 1148} procctl_t; 1149#endif 1150 1151/* ARGSUSED */ 1152static void 1153arc_buf_unwatch(arc_buf_t *buf) 1154{ 1155#ifndef _KERNEL 1156 if (arc_watch) { 1157 int result; 1158 procctl_t ctl; 1159 ctl.cmd = PCWATCH; 1160 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data; 1161 ctl.prwatch.pr_size = 0; 1162 ctl.prwatch.pr_wflags = 0; 1163 result = write(arc_procfd, &ctl, sizeof (ctl)); 1164 ASSERT3U(result, ==, sizeof (ctl)); 1165 } 1166#endif 1167} 1168 1169/* ARGSUSED */ 1170static void 1171arc_buf_watch(arc_buf_t *buf) 1172{ 1173#ifndef _KERNEL 1174 if (arc_watch) { 1175 int result; 1176 procctl_t ctl; 1177 ctl.cmd = PCWATCH; 1178 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data; 1179 ctl.prwatch.pr_size = buf->b_hdr->b_size; 1180 ctl.prwatch.pr_wflags = WA_WRITE; 1181 result = write(arc_procfd, &ctl, sizeof (ctl)); 1182 ASSERT3U(result, ==, sizeof (ctl)); 1183 } 1184#endif 1185} 1186#endif /* illumos */ 1187 1188void 1189arc_buf_thaw(arc_buf_t *buf) 1190{ 1191 if (zfs_flags & ZFS_DEBUG_MODIFY) { 1192 if (buf->b_hdr->b_state != arc_anon) 1193 panic("modifying non-anon buffer!"); 1194 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) 1195 panic("modifying buffer while i/o in progress!"); 1196 arc_cksum_verify(buf); 1197 } 1198 1199 mutex_enter(&buf->b_hdr->b_freeze_lock); 1200 if (buf->b_hdr->b_freeze_cksum != NULL) { 1201 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1202 buf->b_hdr->b_freeze_cksum = NULL; 1203 } 1204 1205 if (zfs_flags & ZFS_DEBUG_MODIFY) { 1206 if (buf->b_hdr->b_thawed) 1207 kmem_free(buf->b_hdr->b_thawed, 1); 1208 buf->b_hdr->b_thawed = kmem_alloc(1, KM_SLEEP); 1209 } 1210 1211 mutex_exit(&buf->b_hdr->b_freeze_lock); 1212 1213#ifdef illumos 1214 arc_buf_unwatch(buf); 1215#endif /* illumos */ 1216} 1217 1218void 1219arc_buf_freeze(arc_buf_t *buf) 1220{ 1221 kmutex_t *hash_lock; 1222 1223 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 1224 return; 1225 1226 hash_lock = HDR_LOCK(buf->b_hdr); 1227 mutex_enter(hash_lock); 1228 1229 ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 1230 buf->b_hdr->b_state == arc_anon); 1231 arc_cksum_compute(buf, B_FALSE); 1232 mutex_exit(hash_lock); 1233 1234} 1235 1236static void 1237get_buf_info(arc_buf_hdr_t *ab, arc_state_t *state, list_t **list, kmutex_t **lock) 1238{ 1239 uint64_t buf_hashid = buf_hash(ab->b_spa, &ab->b_dva, ab->b_birth); 1240 1241 if (ab->b_type == ARC_BUFC_METADATA) 1242 buf_hashid &= (ARC_BUFC_NUMMETADATALISTS - 1); 1243 else { 1244 buf_hashid &= (ARC_BUFC_NUMDATALISTS - 1); 1245 buf_hashid += ARC_BUFC_NUMMETADATALISTS; 1246 } 1247 1248 *list = &state->arcs_lists[buf_hashid]; 1249 *lock = ARCS_LOCK(state, buf_hashid); 1250} 1251 1252 1253static void 1254add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 1255{ 1256 ASSERT(MUTEX_HELD(hash_lock)); 1257 1258 if ((refcount_add(&ab->b_refcnt, tag) == 1) && 1259 (ab->b_state != arc_anon)) { 1260 uint64_t delta = ab->b_size * ab->b_datacnt; 1261 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type]; 1262 list_t *list; 1263 kmutex_t *lock; 1264 1265 get_buf_info(ab, ab->b_state, &list, &lock); 1266 ASSERT(!MUTEX_HELD(lock)); 1267 mutex_enter(lock); 1268 ASSERT(list_link_active(&ab->b_arc_node)); 1269 list_remove(list, ab); 1270 if (GHOST_STATE(ab->b_state)) { 1271 ASSERT0(ab->b_datacnt); 1272 ASSERT3P(ab->b_buf, ==, NULL); 1273 delta = ab->b_size; 1274 } 1275 ASSERT(delta > 0); 1276 ASSERT3U(*size, >=, delta); 1277 atomic_add_64(size, -delta); 1278 mutex_exit(lock); 1279 /* remove the prefetch flag if we get a reference */ 1280 if (ab->b_flags & ARC_PREFETCH) 1281 ab->b_flags &= ~ARC_PREFETCH; 1282 } 1283} 1284 1285static int 1286remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 1287{ 1288 int cnt; 1289 arc_state_t *state = ab->b_state; 1290 1291 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 1292 ASSERT(!GHOST_STATE(state)); 1293 1294 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 1295 (state != arc_anon)) { 1296 uint64_t *size = &state->arcs_lsize[ab->b_type]; 1297 list_t *list; 1298 kmutex_t *lock; 1299 1300 get_buf_info(ab, state, &list, &lock); 1301 ASSERT(!MUTEX_HELD(lock)); 1302 mutex_enter(lock); 1303 ASSERT(!list_link_active(&ab->b_arc_node)); 1304 list_insert_head(list, ab); 1305 ASSERT(ab->b_datacnt > 0); 1306 atomic_add_64(size, ab->b_size * ab->b_datacnt); 1307 mutex_exit(lock); 1308 } 1309 return (cnt); 1310} 1311 1312/* 1313 * Move the supplied buffer to the indicated state. The mutex 1314 * for the buffer must be held by the caller. 1315 */ 1316static void 1317arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 1318{ 1319 arc_state_t *old_state = ab->b_state; 1320 int64_t refcnt = refcount_count(&ab->b_refcnt); 1321 uint64_t from_delta, to_delta; 1322 list_t *list; 1323 kmutex_t *lock; 1324 1325 ASSERT(MUTEX_HELD(hash_lock)); 1326 ASSERT3P(new_state, !=, old_state); 1327 ASSERT(refcnt == 0 || ab->b_datacnt > 0); 1328 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 1329 ASSERT(ab->b_datacnt <= 1 || old_state != arc_anon); 1330 1331 from_delta = to_delta = ab->b_datacnt * ab->b_size; 1332 1333 /* 1334 * If this buffer is evictable, transfer it from the 1335 * old state list to the new state list. 1336 */ 1337 if (refcnt == 0) { 1338 if (old_state != arc_anon) { 1339 int use_mutex; 1340 uint64_t *size = &old_state->arcs_lsize[ab->b_type]; 1341 1342 get_buf_info(ab, old_state, &list, &lock); 1343 use_mutex = !MUTEX_HELD(lock); 1344 if (use_mutex) 1345 mutex_enter(lock); 1346 1347 ASSERT(list_link_active(&ab->b_arc_node)); 1348 list_remove(list, ab); 1349 1350 /* 1351 * If prefetching out of the ghost cache, 1352 * we will have a non-zero datacnt. 1353 */ 1354 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 1355 /* ghost elements have a ghost size */ 1356 ASSERT(ab->b_buf == NULL); 1357 from_delta = ab->b_size; 1358 } 1359 ASSERT3U(*size, >=, from_delta); 1360 atomic_add_64(size, -from_delta); 1361 1362 if (use_mutex) 1363 mutex_exit(lock); 1364 } 1365 if (new_state != arc_anon) { 1366 int use_mutex; 1367 uint64_t *size = &new_state->arcs_lsize[ab->b_type]; 1368 1369 get_buf_info(ab, new_state, &list, &lock); 1370 use_mutex = !MUTEX_HELD(lock); 1371 if (use_mutex) 1372 mutex_enter(lock); 1373 1374 list_insert_head(list, ab); 1375 1376 /* ghost elements have a ghost size */ 1377 if (GHOST_STATE(new_state)) { 1378 ASSERT(ab->b_datacnt == 0); 1379 ASSERT(ab->b_buf == NULL); 1380 to_delta = ab->b_size; 1381 } 1382 atomic_add_64(size, to_delta); 1383 1384 if (use_mutex) 1385 mutex_exit(lock); 1386 } 1387 } 1388 1389 ASSERT(!BUF_EMPTY(ab)); 1390 if (new_state == arc_anon && HDR_IN_HASH_TABLE(ab)) 1391 buf_hash_remove(ab); 1392 1393 /* adjust state sizes */ 1394 if (to_delta) 1395 atomic_add_64(&new_state->arcs_size, to_delta); 1396 if (from_delta) { 1397 ASSERT3U(old_state->arcs_size, >=, from_delta); 1398 atomic_add_64(&old_state->arcs_size, -from_delta); 1399 } 1400 ab->b_state = new_state; 1401 1402 /* adjust l2arc hdr stats */ 1403 if (new_state == arc_l2c_only) 1404 l2arc_hdr_stat_add(); 1405 else if (old_state == arc_l2c_only) 1406 l2arc_hdr_stat_remove(); 1407} 1408 1409void 1410arc_space_consume(uint64_t space, arc_space_type_t type) 1411{ 1412 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 1413 1414 switch (type) { 1415 case ARC_SPACE_DATA: 1416 ARCSTAT_INCR(arcstat_data_size, space); 1417 break; 1418 case ARC_SPACE_OTHER: 1419 ARCSTAT_INCR(arcstat_other_size, space); 1420 break; 1421 case ARC_SPACE_HDRS: 1422 ARCSTAT_INCR(arcstat_hdr_size, space); 1423 break; 1424 case ARC_SPACE_L2HDRS: 1425 ARCSTAT_INCR(arcstat_l2_hdr_size, space); 1426 break; 1427 } 1428 1429 atomic_add_64(&arc_meta_used, space); 1430 atomic_add_64(&arc_size, space); 1431} 1432 1433void 1434arc_space_return(uint64_t space, arc_space_type_t type) 1435{ 1436 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 1437 1438 switch (type) { 1439 case ARC_SPACE_DATA: 1440 ARCSTAT_INCR(arcstat_data_size, -space); 1441 break; 1442 case ARC_SPACE_OTHER: 1443 ARCSTAT_INCR(arcstat_other_size, -space); 1444 break; 1445 case ARC_SPACE_HDRS: 1446 ARCSTAT_INCR(arcstat_hdr_size, -space); 1447 break; 1448 case ARC_SPACE_L2HDRS: 1449 ARCSTAT_INCR(arcstat_l2_hdr_size, -space); 1450 break; 1451 } 1452 1453 ASSERT(arc_meta_used >= space); 1454 if (arc_meta_max < arc_meta_used) 1455 arc_meta_max = arc_meta_used; 1456 atomic_add_64(&arc_meta_used, -space); 1457 ASSERT(arc_size >= space); 1458 atomic_add_64(&arc_size, -space); 1459} 1460 1461void * 1462arc_data_buf_alloc(uint64_t size) 1463{ 1464 if (arc_evict_needed(ARC_BUFC_DATA)) 1465 cv_signal(&arc_reclaim_thr_cv); 1466 atomic_add_64(&arc_size, size); 1467 return (zio_data_buf_alloc(size)); 1468} 1469 1470void 1471arc_data_buf_free(void *buf, uint64_t size) 1472{ 1473 zio_data_buf_free(buf, size); 1474 ASSERT(arc_size >= size); 1475 atomic_add_64(&arc_size, -size); 1476} 1477 1478arc_buf_t * 1479arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) 1480{ 1481 arc_buf_hdr_t *hdr; 1482 arc_buf_t *buf; 1483 1484 ASSERT3U(size, >, 0); 1485 hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 1486 ASSERT(BUF_EMPTY(hdr)); 1487 hdr->b_size = size; 1488 hdr->b_type = type; 1489 hdr->b_spa = spa_load_guid(spa); 1490 hdr->b_state = arc_anon; 1491 hdr->b_arc_access = 0; 1492 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1493 buf->b_hdr = hdr; 1494 buf->b_data = NULL; 1495 buf->b_efunc = NULL; 1496 buf->b_private = NULL; 1497 buf->b_next = NULL; 1498 hdr->b_buf = buf; 1499 arc_get_data_buf(buf); 1500 hdr->b_datacnt = 1; 1501 hdr->b_flags = 0; 1502 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1503 (void) refcount_add(&hdr->b_refcnt, tag); 1504 1505 return (buf); 1506} 1507 1508static char *arc_onloan_tag = "onloan"; 1509 1510/* 1511 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in 1512 * flight data by arc_tempreserve_space() until they are "returned". Loaned 1513 * buffers must be returned to the arc before they can be used by the DMU or 1514 * freed. 1515 */ 1516arc_buf_t * 1517arc_loan_buf(spa_t *spa, int size) 1518{ 1519 arc_buf_t *buf; 1520 1521 buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA); 1522 1523 atomic_add_64(&arc_loaned_bytes, size); 1524 return (buf); 1525} 1526 1527/* 1528 * Return a loaned arc buffer to the arc. 1529 */ 1530void 1531arc_return_buf(arc_buf_t *buf, void *tag) 1532{ 1533 arc_buf_hdr_t *hdr = buf->b_hdr; 1534 1535 ASSERT(buf->b_data != NULL); 1536 (void) refcount_add(&hdr->b_refcnt, tag); 1537 (void) refcount_remove(&hdr->b_refcnt, arc_onloan_tag); 1538 1539 atomic_add_64(&arc_loaned_bytes, -hdr->b_size); 1540} 1541 1542/* Detach an arc_buf from a dbuf (tag) */ 1543void 1544arc_loan_inuse_buf(arc_buf_t *buf, void *tag) 1545{ 1546 arc_buf_hdr_t *hdr; 1547 1548 ASSERT(buf->b_data != NULL); 1549 hdr = buf->b_hdr; 1550 (void) refcount_add(&hdr->b_refcnt, arc_onloan_tag); 1551 (void) refcount_remove(&hdr->b_refcnt, tag); 1552 buf->b_efunc = NULL; 1553 buf->b_private = NULL; 1554 1555 atomic_add_64(&arc_loaned_bytes, hdr->b_size); 1556} 1557 1558static arc_buf_t * 1559arc_buf_clone(arc_buf_t *from) 1560{ 1561 arc_buf_t *buf; 1562 arc_buf_hdr_t *hdr = from->b_hdr; 1563 uint64_t size = hdr->b_size; 1564 1565 ASSERT(hdr->b_state != arc_anon); 1566 1567 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1568 buf->b_hdr = hdr; 1569 buf->b_data = NULL; 1570 buf->b_efunc = NULL; 1571 buf->b_private = NULL; 1572 buf->b_next = hdr->b_buf; 1573 hdr->b_buf = buf; 1574 arc_get_data_buf(buf); 1575 bcopy(from->b_data, buf->b_data, size); 1576 1577 /* 1578 * This buffer already exists in the arc so create a duplicate 1579 * copy for the caller. If the buffer is associated with user data 1580 * then track the size and number of duplicates. These stats will be 1581 * updated as duplicate buffers are created and destroyed. 1582 */ 1583 if (hdr->b_type == ARC_BUFC_DATA) { 1584 ARCSTAT_BUMP(arcstat_duplicate_buffers); 1585 ARCSTAT_INCR(arcstat_duplicate_buffers_size, size); 1586 } 1587 hdr->b_datacnt += 1; 1588 return (buf); 1589} 1590 1591void 1592arc_buf_add_ref(arc_buf_t *buf, void* tag) 1593{ 1594 arc_buf_hdr_t *hdr; 1595 kmutex_t *hash_lock; 1596 1597 /* 1598 * Check to see if this buffer is evicted. Callers 1599 * must verify b_data != NULL to know if the add_ref 1600 * was successful. 1601 */ 1602 mutex_enter(&buf->b_evict_lock); 1603 if (buf->b_data == NULL) { 1604 mutex_exit(&buf->b_evict_lock); 1605 return; 1606 } 1607 hash_lock = HDR_LOCK(buf->b_hdr); 1608 mutex_enter(hash_lock); 1609 hdr = buf->b_hdr; 1610 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 1611 mutex_exit(&buf->b_evict_lock); 1612 1613 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 1614 add_reference(hdr, hash_lock, tag); 1615 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 1616 arc_access(hdr, hash_lock); 1617 mutex_exit(hash_lock); 1618 ARCSTAT_BUMP(arcstat_hits); 1619 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 1620 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 1621 data, metadata, hits); 1622} 1623 1624/* 1625 * Free the arc data buffer. If it is an l2arc write in progress, 1626 * the buffer is placed on l2arc_free_on_write to be freed later. 1627 */ 1628static void 1629arc_buf_data_free(arc_buf_t *buf, void (*free_func)(void *, size_t)) 1630{ 1631 arc_buf_hdr_t *hdr = buf->b_hdr; 1632 1633 if (HDR_L2_WRITING(hdr)) { 1634 l2arc_data_free_t *df; 1635 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP); 1636 df->l2df_data = buf->b_data; 1637 df->l2df_size = hdr->b_size; 1638 df->l2df_func = free_func; 1639 mutex_enter(&l2arc_free_on_write_mtx); 1640 list_insert_head(l2arc_free_on_write, df); 1641 mutex_exit(&l2arc_free_on_write_mtx); 1642 ARCSTAT_BUMP(arcstat_l2_free_on_write); 1643 } else { 1644 free_func(buf->b_data, hdr->b_size); 1645 } 1646} 1647 1648static void 1649arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 1650{ 1651 arc_buf_t **bufp; 1652 1653 /* free up data associated with the buf */ 1654 if (buf->b_data) { 1655 arc_state_t *state = buf->b_hdr->b_state; 1656 uint64_t size = buf->b_hdr->b_size; 1657 arc_buf_contents_t type = buf->b_hdr->b_type; 1658 1659 arc_cksum_verify(buf); 1660#ifdef illumos 1661 arc_buf_unwatch(buf); 1662#endif /* illumos */ 1663 1664 if (!recycle) { 1665 if (type == ARC_BUFC_METADATA) { 1666 arc_buf_data_free(buf, zio_buf_free); 1667 arc_space_return(size, ARC_SPACE_DATA); 1668 } else { 1669 ASSERT(type == ARC_BUFC_DATA); 1670 arc_buf_data_free(buf, zio_data_buf_free); 1671 ARCSTAT_INCR(arcstat_data_size, -size); 1672 atomic_add_64(&arc_size, -size); 1673 } 1674 } 1675 if (list_link_active(&buf->b_hdr->b_arc_node)) { 1676 uint64_t *cnt = &state->arcs_lsize[type]; 1677 1678 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 1679 ASSERT(state != arc_anon); 1680 1681 ASSERT3U(*cnt, >=, size); 1682 atomic_add_64(cnt, -size); 1683 } 1684 ASSERT3U(state->arcs_size, >=, size); 1685 atomic_add_64(&state->arcs_size, -size); 1686 buf->b_data = NULL; 1687 1688 /* 1689 * If we're destroying a duplicate buffer make sure 1690 * that the appropriate statistics are updated. 1691 */ 1692 if (buf->b_hdr->b_datacnt > 1 && 1693 buf->b_hdr->b_type == ARC_BUFC_DATA) { 1694 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers); 1695 ARCSTAT_INCR(arcstat_duplicate_buffers_size, -size); 1696 } 1697 ASSERT(buf->b_hdr->b_datacnt > 0); 1698 buf->b_hdr->b_datacnt -= 1; 1699 } 1700 1701 /* only remove the buf if requested */ 1702 if (!all) 1703 return; 1704 1705 /* remove the buf from the hdr list */ 1706 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 1707 continue; 1708 *bufp = buf->b_next; 1709 buf->b_next = NULL; 1710 1711 ASSERT(buf->b_efunc == NULL); 1712 1713 /* clean up the buf */ 1714 buf->b_hdr = NULL; 1715 kmem_cache_free(buf_cache, buf); 1716} 1717 1718static void 1719arc_hdr_destroy(arc_buf_hdr_t *hdr) 1720{ 1721 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1722 ASSERT3P(hdr->b_state, ==, arc_anon); 1723 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1724 l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr; 1725 1726 if (l2hdr != NULL) { 1727 boolean_t buflist_held = MUTEX_HELD(&l2arc_buflist_mtx); 1728 /* 1729 * To prevent arc_free() and l2arc_evict() from 1730 * attempting to free the same buffer at the same time, 1731 * a FREE_IN_PROGRESS flag is given to arc_free() to 1732 * give it priority. l2arc_evict() can't destroy this 1733 * header while we are waiting on l2arc_buflist_mtx. 1734 * 1735 * The hdr may be removed from l2ad_buflist before we 1736 * grab l2arc_buflist_mtx, so b_l2hdr is rechecked. 1737 */ 1738 if (!buflist_held) { 1739 mutex_enter(&l2arc_buflist_mtx); 1740 l2hdr = hdr->b_l2hdr; 1741 } 1742 1743 if (l2hdr != NULL) { 1744 trim_map_free(l2hdr->b_dev->l2ad_vdev, l2hdr->b_daddr, 1745 hdr->b_size, 0); 1746 list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 1747 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); 1748 ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize); 1749 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 1750 if (hdr->b_state == arc_l2c_only) 1751 l2arc_hdr_stat_remove(); 1752 hdr->b_l2hdr = NULL; 1753 } 1754 1755 if (!buflist_held) 1756 mutex_exit(&l2arc_buflist_mtx); 1757 } 1758 1759 if (!BUF_EMPTY(hdr)) { 1760 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1761 buf_discard_identity(hdr); 1762 } 1763 while (hdr->b_buf) { 1764 arc_buf_t *buf = hdr->b_buf; 1765 1766 if (buf->b_efunc) { 1767 mutex_enter(&arc_eviction_mtx); 1768 mutex_enter(&buf->b_evict_lock); 1769 ASSERT(buf->b_hdr != NULL); 1770 arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 1771 hdr->b_buf = buf->b_next; 1772 buf->b_hdr = &arc_eviction_hdr; 1773 buf->b_next = arc_eviction_list; 1774 arc_eviction_list = buf; 1775 mutex_exit(&buf->b_evict_lock); 1776 mutex_exit(&arc_eviction_mtx); 1777 } else { 1778 arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 1779 } 1780 } 1781 if (hdr->b_freeze_cksum != NULL) { 1782 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1783 hdr->b_freeze_cksum = NULL; 1784 } 1785 if (hdr->b_thawed) { 1786 kmem_free(hdr->b_thawed, 1); 1787 hdr->b_thawed = NULL; 1788 } 1789 1790 ASSERT(!list_link_active(&hdr->b_arc_node)); 1791 ASSERT3P(hdr->b_hash_next, ==, NULL); 1792 ASSERT3P(hdr->b_acb, ==, NULL); 1793 kmem_cache_free(hdr_cache, hdr); 1794} 1795 1796void 1797arc_buf_free(arc_buf_t *buf, void *tag) 1798{ 1799 arc_buf_hdr_t *hdr = buf->b_hdr; 1800 int hashed = hdr->b_state != arc_anon; 1801 1802 ASSERT(buf->b_efunc == NULL); 1803 ASSERT(buf->b_data != NULL); 1804 1805 if (hashed) { 1806 kmutex_t *hash_lock = HDR_LOCK(hdr); 1807 1808 mutex_enter(hash_lock); 1809 hdr = buf->b_hdr; 1810 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 1811 1812 (void) remove_reference(hdr, hash_lock, tag); 1813 if (hdr->b_datacnt > 1) { 1814 arc_buf_destroy(buf, FALSE, TRUE); 1815 } else { 1816 ASSERT(buf == hdr->b_buf); 1817 ASSERT(buf->b_efunc == NULL); 1818 hdr->b_flags |= ARC_BUF_AVAILABLE; 1819 } 1820 mutex_exit(hash_lock); 1821 } else if (HDR_IO_IN_PROGRESS(hdr)) { 1822 int destroy_hdr; 1823 /* 1824 * We are in the middle of an async write. Don't destroy 1825 * this buffer unless the write completes before we finish 1826 * decrementing the reference count. 1827 */ 1828 mutex_enter(&arc_eviction_mtx); 1829 (void) remove_reference(hdr, NULL, tag); 1830 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1831 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 1832 mutex_exit(&arc_eviction_mtx); 1833 if (destroy_hdr) 1834 arc_hdr_destroy(hdr); 1835 } else { 1836 if (remove_reference(hdr, NULL, tag) > 0) 1837 arc_buf_destroy(buf, FALSE, TRUE); 1838 else 1839 arc_hdr_destroy(hdr); 1840 } 1841} 1842 1843boolean_t 1844arc_buf_remove_ref(arc_buf_t *buf, void* tag) 1845{ 1846 arc_buf_hdr_t *hdr = buf->b_hdr; 1847 kmutex_t *hash_lock = HDR_LOCK(hdr); 1848 boolean_t no_callback = (buf->b_efunc == NULL); 1849 1850 if (hdr->b_state == arc_anon) { 1851 ASSERT(hdr->b_datacnt == 1); 1852 arc_buf_free(buf, tag); 1853 return (no_callback); 1854 } 1855 1856 mutex_enter(hash_lock); 1857 hdr = buf->b_hdr; 1858 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 1859 ASSERT(hdr->b_state != arc_anon); 1860 ASSERT(buf->b_data != NULL); 1861 1862 (void) remove_reference(hdr, hash_lock, tag); 1863 if (hdr->b_datacnt > 1) { 1864 if (no_callback) 1865 arc_buf_destroy(buf, FALSE, TRUE); 1866 } else if (no_callback) { 1867 ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 1868 ASSERT(buf->b_efunc == NULL); 1869 hdr->b_flags |= ARC_BUF_AVAILABLE; 1870 } 1871 ASSERT(no_callback || hdr->b_datacnt > 1 || 1872 refcount_is_zero(&hdr->b_refcnt)); 1873 mutex_exit(hash_lock); 1874 return (no_callback); 1875} 1876 1877int 1878arc_buf_size(arc_buf_t *buf) 1879{ 1880 return (buf->b_hdr->b_size); 1881} 1882 1883/* 1884 * Called from the DMU to determine if the current buffer should be 1885 * evicted. In order to ensure proper locking, the eviction must be initiated 1886 * from the DMU. Return true if the buffer is associated with user data and 1887 * duplicate buffers still exist. 1888 */ 1889boolean_t 1890arc_buf_eviction_needed(arc_buf_t *buf) 1891{ 1892 arc_buf_hdr_t *hdr; 1893 boolean_t evict_needed = B_FALSE; 1894 1895 if (zfs_disable_dup_eviction) 1896 return (B_FALSE); 1897 1898 mutex_enter(&buf->b_evict_lock); 1899 hdr = buf->b_hdr; 1900 if (hdr == NULL) { 1901 /* 1902 * We are in arc_do_user_evicts(); let that function 1903 * perform the eviction. 1904 */ 1905 ASSERT(buf->b_data == NULL); 1906 mutex_exit(&buf->b_evict_lock); 1907 return (B_FALSE); 1908 } else if (buf->b_data == NULL) { 1909 /* 1910 * We have already been added to the arc eviction list; 1911 * recommend eviction. 1912 */ 1913 ASSERT3P(hdr, ==, &arc_eviction_hdr); 1914 mutex_exit(&buf->b_evict_lock); 1915 return (B_TRUE); 1916 } 1917 1918 if (hdr->b_datacnt > 1 && hdr->b_type == ARC_BUFC_DATA) 1919 evict_needed = B_TRUE; 1920 1921 mutex_exit(&buf->b_evict_lock); 1922 return (evict_needed); 1923} 1924 1925/* 1926 * Evict buffers from list until we've removed the specified number of 1927 * bytes. Move the removed buffers to the appropriate evict state. 1928 * If the recycle flag is set, then attempt to "recycle" a buffer: 1929 * - look for a buffer to evict that is `bytes' long. 1930 * - return the data block from this buffer rather than freeing it. 1931 * This flag is used by callers that are trying to make space for a 1932 * new buffer in a full arc cache. 1933 * 1934 * This function makes a "best effort". It skips over any buffers 1935 * it can't get a hash_lock on, and so may not catch all candidates. 1936 * It may also return without evicting as much space as requested. 1937 */ 1938static void * 1939arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle, 1940 arc_buf_contents_t type) 1941{ 1942 arc_state_t *evicted_state; 1943 uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 1944 int64_t bytes_remaining; 1945 arc_buf_hdr_t *ab, *ab_prev = NULL; 1946 list_t *evicted_list, *list, *evicted_list_start, *list_start; 1947 kmutex_t *lock, *evicted_lock; 1948 kmutex_t *hash_lock; 1949 boolean_t have_lock; 1950 void *stolen = NULL; 1951 arc_buf_hdr_t marker = { 0 }; 1952 int count = 0; 1953 static int evict_metadata_offset, evict_data_offset; 1954 int i, idx, offset, list_count, lists; 1955 1956 ASSERT(state == arc_mru || state == arc_mfu); 1957 1958 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 1959 1960 if (type == ARC_BUFC_METADATA) { 1961 offset = 0; 1962 list_count = ARC_BUFC_NUMMETADATALISTS; 1963 list_start = &state->arcs_lists[0]; 1964 evicted_list_start = &evicted_state->arcs_lists[0]; 1965 idx = evict_metadata_offset; 1966 } else { 1967 offset = ARC_BUFC_NUMMETADATALISTS; 1968 list_start = &state->arcs_lists[offset]; 1969 evicted_list_start = &evicted_state->arcs_lists[offset]; 1970 list_count = ARC_BUFC_NUMDATALISTS; 1971 idx = evict_data_offset; 1972 } 1973 bytes_remaining = evicted_state->arcs_lsize[type]; 1974 lists = 0; 1975 1976evict_start: 1977 list = &list_start[idx]; 1978 evicted_list = &evicted_list_start[idx]; 1979 lock = ARCS_LOCK(state, (offset + idx)); 1980 evicted_lock = ARCS_LOCK(evicted_state, (offset + idx)); 1981 1982 mutex_enter(lock); 1983 mutex_enter(evicted_lock); 1984 1985 for (ab = list_tail(list); ab; ab = ab_prev) { 1986 ab_prev = list_prev(list, ab); 1987 bytes_remaining -= (ab->b_size * ab->b_datacnt); 1988 /* prefetch buffers have a minimum lifespan */ 1989 if (HDR_IO_IN_PROGRESS(ab) || 1990 (spa && ab->b_spa != spa) || 1991 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 1992 ddi_get_lbolt() - ab->b_arc_access < 1993 arc_min_prefetch_lifespan)) { 1994 skipped++; 1995 continue; 1996 } 1997 /* "lookahead" for better eviction candidate */ 1998 if (recycle && ab->b_size != bytes && 1999 ab_prev && ab_prev->b_size == bytes) 2000 continue; 2001 2002 /* ignore markers */ 2003 if (ab->b_spa == 0) 2004 continue; 2005 2006 /* 2007 * It may take a long time to evict all the bufs requested. 2008 * To avoid blocking all arc activity, periodically drop 2009 * the arcs_mtx and give other threads a chance to run 2010 * before reacquiring the lock. 2011 * 2012 * If we are looking for a buffer to recycle, we are in 2013 * the hot code path, so don't sleep. 2014 */ 2015 if (!recycle && count++ > arc_evict_iterations) { 2016 list_insert_after(list, ab, &marker); 2017 mutex_exit(evicted_lock); 2018 mutex_exit(lock); 2019 kpreempt(KPREEMPT_SYNC); 2020 mutex_enter(lock); 2021 mutex_enter(evicted_lock); 2022 ab_prev = list_prev(list, &marker); 2023 list_remove(list, &marker); 2024 count = 0; 2025 continue; 2026 } 2027 2028 hash_lock = HDR_LOCK(ab); 2029 have_lock = MUTEX_HELD(hash_lock); 2030 if (have_lock || mutex_tryenter(hash_lock)) { 2031 ASSERT0(refcount_count(&ab->b_refcnt)); 2032 ASSERT(ab->b_datacnt > 0); 2033 while (ab->b_buf) { 2034 arc_buf_t *buf = ab->b_buf; 2035 if (!mutex_tryenter(&buf->b_evict_lock)) { 2036 missed += 1; 2037 break; 2038 } 2039 if (buf->b_data) { 2040 bytes_evicted += ab->b_size; 2041 if (recycle && ab->b_type == type && 2042 ab->b_size == bytes && 2043 !HDR_L2_WRITING(ab)) { 2044 stolen = buf->b_data; 2045 recycle = FALSE; 2046 } 2047 } 2048 if (buf->b_efunc) { 2049 mutex_enter(&arc_eviction_mtx); 2050 arc_buf_destroy(buf, 2051 buf->b_data == stolen, FALSE); 2052 ab->b_buf = buf->b_next; 2053 buf->b_hdr = &arc_eviction_hdr; 2054 buf->b_next = arc_eviction_list; 2055 arc_eviction_list = buf; 2056 mutex_exit(&arc_eviction_mtx); 2057 mutex_exit(&buf->b_evict_lock); 2058 } else { 2059 mutex_exit(&buf->b_evict_lock); 2060 arc_buf_destroy(buf, 2061 buf->b_data == stolen, TRUE); 2062 } 2063 } 2064 2065 if (ab->b_l2hdr) { 2066 ARCSTAT_INCR(arcstat_evict_l2_cached, 2067 ab->b_size); 2068 } else { 2069 if (l2arc_write_eligible(ab->b_spa, ab)) { 2070 ARCSTAT_INCR(arcstat_evict_l2_eligible, 2071 ab->b_size); 2072 } else { 2073 ARCSTAT_INCR( 2074 arcstat_evict_l2_ineligible, 2075 ab->b_size); 2076 } 2077 } 2078 2079 if (ab->b_datacnt == 0) { 2080 arc_change_state(evicted_state, ab, hash_lock); 2081 ASSERT(HDR_IN_HASH_TABLE(ab)); 2082 ab->b_flags |= ARC_IN_HASH_TABLE; 2083 ab->b_flags &= ~ARC_BUF_AVAILABLE; 2084 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 2085 } 2086 if (!have_lock) 2087 mutex_exit(hash_lock); 2088 if (bytes >= 0 && bytes_evicted >= bytes) 2089 break; 2090 if (bytes_remaining > 0) { 2091 mutex_exit(evicted_lock); 2092 mutex_exit(lock); 2093 idx = ((idx + 1) & (list_count - 1)); 2094 lists++; 2095 goto evict_start; 2096 } 2097 } else { 2098 missed += 1; 2099 } 2100 } 2101 2102 mutex_exit(evicted_lock); 2103 mutex_exit(lock); 2104 2105 idx = ((idx + 1) & (list_count - 1)); 2106 lists++; 2107 2108 if (bytes_evicted < bytes) { 2109 if (lists < list_count) 2110 goto evict_start; 2111 else 2112 dprintf("only evicted %lld bytes from %x", 2113 (longlong_t)bytes_evicted, state); 2114 } 2115 if (type == ARC_BUFC_METADATA) 2116 evict_metadata_offset = idx; 2117 else 2118 evict_data_offset = idx; 2119 2120 if (skipped) 2121 ARCSTAT_INCR(arcstat_evict_skip, skipped); 2122 2123 if (missed) 2124 ARCSTAT_INCR(arcstat_mutex_miss, missed); 2125 2126 /* 2127 * Note: we have just evicted some data into the ghost state, 2128 * potentially putting the ghost size over the desired size. Rather 2129 * that evicting from the ghost list in this hot code path, leave 2130 * this chore to the arc_reclaim_thread(). 2131 */ 2132 2133 if (stolen) 2134 ARCSTAT_BUMP(arcstat_stolen); 2135 return (stolen); 2136} 2137 2138/* 2139 * Remove buffers from list until we've removed the specified number of 2140 * bytes. Destroy the buffers that are removed. 2141 */ 2142static void 2143arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes) 2144{ 2145 arc_buf_hdr_t *ab, *ab_prev; 2146 arc_buf_hdr_t marker = { 0 }; 2147 list_t *list, *list_start; 2148 kmutex_t *hash_lock, *lock; 2149 uint64_t bytes_deleted = 0; 2150 uint64_t bufs_skipped = 0; 2151 int count = 0; 2152 static int evict_offset; 2153 int list_count, idx = evict_offset; 2154 int offset, lists = 0; 2155 2156 ASSERT(GHOST_STATE(state)); 2157 2158 /* 2159 * data lists come after metadata lists 2160 */ 2161 list_start = &state->arcs_lists[ARC_BUFC_NUMMETADATALISTS]; 2162 list_count = ARC_BUFC_NUMDATALISTS; 2163 offset = ARC_BUFC_NUMMETADATALISTS; 2164 2165evict_start: 2166 list = &list_start[idx]; 2167 lock = ARCS_LOCK(state, idx + offset); 2168 2169 mutex_enter(lock); 2170 for (ab = list_tail(list); ab; ab = ab_prev) { 2171 ab_prev = list_prev(list, ab); 2172 if (ab->b_type > ARC_BUFC_NUMTYPES) 2173 panic("invalid ab=%p", (void *)ab); 2174 if (spa && ab->b_spa != spa) 2175 continue; 2176 2177 /* ignore markers */ 2178 if (ab->b_spa == 0) 2179 continue; 2180 2181 hash_lock = HDR_LOCK(ab); 2182 /* caller may be trying to modify this buffer, skip it */ 2183 if (MUTEX_HELD(hash_lock)) 2184 continue; 2185 2186 /* 2187 * It may take a long time to evict all the bufs requested. 2188 * To avoid blocking all arc activity, periodically drop 2189 * the arcs_mtx and give other threads a chance to run 2190 * before reacquiring the lock. 2191 */ 2192 if (count++ > arc_evict_iterations) { 2193 list_insert_after(list, ab, &marker); 2194 mutex_exit(lock); 2195 kpreempt(KPREEMPT_SYNC); 2196 mutex_enter(lock); 2197 ab_prev = list_prev(list, &marker); 2198 list_remove(list, &marker); 2199 count = 0; 2200 continue; 2201 } 2202 if (mutex_tryenter(hash_lock)) { 2203 ASSERT(!HDR_IO_IN_PROGRESS(ab)); 2204 ASSERT(ab->b_buf == NULL); 2205 ARCSTAT_BUMP(arcstat_deleted); 2206 bytes_deleted += ab->b_size; 2207 2208 if (ab->b_l2hdr != NULL) { 2209 /* 2210 * This buffer is cached on the 2nd Level ARC; 2211 * don't destroy the header. 2212 */ 2213 arc_change_state(arc_l2c_only, ab, hash_lock); 2214 mutex_exit(hash_lock); 2215 } else { 2216 arc_change_state(arc_anon, ab, hash_lock); 2217 mutex_exit(hash_lock); 2218 arc_hdr_destroy(ab); 2219 } 2220 2221 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 2222 if (bytes >= 0 && bytes_deleted >= bytes) 2223 break; 2224 } else if (bytes < 0) { 2225 /* 2226 * Insert a list marker and then wait for the 2227 * hash lock to become available. Once its 2228 * available, restart from where we left off. 2229 */ 2230 list_insert_after(list, ab, &marker); 2231 mutex_exit(lock); 2232 mutex_enter(hash_lock); 2233 mutex_exit(hash_lock); 2234 mutex_enter(lock); 2235 ab_prev = list_prev(list, &marker); 2236 list_remove(list, &marker); 2237 } else { 2238 bufs_skipped += 1; 2239 } 2240 2241 } 2242 mutex_exit(lock); 2243 idx = ((idx + 1) & (ARC_BUFC_NUMDATALISTS - 1)); 2244 lists++; 2245 2246 if (lists < list_count) 2247 goto evict_start; 2248 2249 evict_offset = idx; 2250 if ((uintptr_t)list > (uintptr_t)&state->arcs_lists[ARC_BUFC_NUMMETADATALISTS] && 2251 (bytes < 0 || bytes_deleted < bytes)) { 2252 list_start = &state->arcs_lists[0]; 2253 list_count = ARC_BUFC_NUMMETADATALISTS; 2254 offset = lists = 0; 2255 goto evict_start; 2256 } 2257 2258 if (bufs_skipped) { 2259 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); 2260 ASSERT(bytes >= 0); 2261 } 2262 2263 if (bytes_deleted < bytes) 2264 dprintf("only deleted %lld bytes from %p", 2265 (longlong_t)bytes_deleted, state); 2266} 2267 2268static void 2269arc_adjust(void) 2270{ 2271 int64_t adjustment, delta; 2272 2273 /* 2274 * Adjust MRU size 2275 */ 2276 2277 adjustment = MIN((int64_t)(arc_size - arc_c), 2278 (int64_t)(arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used - 2279 arc_p)); 2280 2281 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) { 2282 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment); 2283 (void) arc_evict(arc_mru, 0, delta, FALSE, ARC_BUFC_DATA); 2284 adjustment -= delta; 2285 } 2286 2287 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) { 2288 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment); 2289 (void) arc_evict(arc_mru, 0, delta, FALSE, 2290 ARC_BUFC_METADATA); 2291 } 2292 2293 /* 2294 * Adjust MFU size 2295 */ 2296 2297 adjustment = arc_size - arc_c; 2298 2299 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) { 2300 delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]); 2301 (void) arc_evict(arc_mfu, 0, delta, FALSE, ARC_BUFC_DATA); 2302 adjustment -= delta; 2303 } 2304 2305 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) { 2306 int64_t delta = MIN(adjustment, 2307 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]); 2308 (void) arc_evict(arc_mfu, 0, delta, FALSE, 2309 ARC_BUFC_METADATA); 2310 } 2311 2312 /* 2313 * Adjust ghost lists 2314 */ 2315 2316 adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c; 2317 2318 if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) { 2319 delta = MIN(arc_mru_ghost->arcs_size, adjustment); 2320 arc_evict_ghost(arc_mru_ghost, 0, delta); 2321 } 2322 2323 adjustment = 2324 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c; 2325 2326 if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) { 2327 delta = MIN(arc_mfu_ghost->arcs_size, adjustment); 2328 arc_evict_ghost(arc_mfu_ghost, 0, delta); 2329 } 2330} 2331 2332static void 2333arc_do_user_evicts(void) 2334{ 2335 static arc_buf_t *tmp_arc_eviction_list; 2336 2337 /* 2338 * Move list over to avoid LOR 2339 */ 2340restart: 2341 mutex_enter(&arc_eviction_mtx); 2342 tmp_arc_eviction_list = arc_eviction_list; 2343 arc_eviction_list = NULL; 2344 mutex_exit(&arc_eviction_mtx); 2345 2346 while (tmp_arc_eviction_list != NULL) { 2347 arc_buf_t *buf = tmp_arc_eviction_list; 2348 tmp_arc_eviction_list = buf->b_next; 2349 mutex_enter(&buf->b_evict_lock); 2350 buf->b_hdr = NULL; 2351 mutex_exit(&buf->b_evict_lock); 2352 2353 if (buf->b_efunc != NULL) 2354 VERIFY(buf->b_efunc(buf) == 0); 2355 2356 buf->b_efunc = NULL; 2357 buf->b_private = NULL; 2358 kmem_cache_free(buf_cache, buf); 2359 } 2360 2361 if (arc_eviction_list != NULL) 2362 goto restart; 2363} 2364 2365/* 2366 * Flush all *evictable* data from the cache for the given spa. 2367 * NOTE: this will not touch "active" (i.e. referenced) data. 2368 */ 2369void 2370arc_flush(spa_t *spa) 2371{ 2372 uint64_t guid = 0; 2373 2374 if (spa) 2375 guid = spa_load_guid(spa); 2376 2377 while (arc_mru->arcs_lsize[ARC_BUFC_DATA]) { 2378 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA); 2379 if (spa) 2380 break; 2381 } 2382 while (arc_mru->arcs_lsize[ARC_BUFC_METADATA]) { 2383 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA); 2384 if (spa) 2385 break; 2386 } 2387 while (arc_mfu->arcs_lsize[ARC_BUFC_DATA]) { 2388 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA); 2389 if (spa) 2390 break; 2391 } 2392 while (arc_mfu->arcs_lsize[ARC_BUFC_METADATA]) { 2393 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA); 2394 if (spa) 2395 break; 2396 } 2397 2398 arc_evict_ghost(arc_mru_ghost, guid, -1); 2399 arc_evict_ghost(arc_mfu_ghost, guid, -1); 2400 2401 mutex_enter(&arc_reclaim_thr_lock); 2402 arc_do_user_evicts(); 2403 mutex_exit(&arc_reclaim_thr_lock); 2404 ASSERT(spa || arc_eviction_list == NULL); 2405} 2406 2407void 2408arc_shrink(void) 2409{ 2410 if (arc_c > arc_c_min) { 2411 uint64_t to_free; 2412 2413#ifdef _KERNEL 2414 to_free = arc_c >> arc_shrink_shift; 2415#else 2416 to_free = arc_c >> arc_shrink_shift; 2417#endif 2418 if (arc_c > arc_c_min + to_free) 2419 atomic_add_64(&arc_c, -to_free); 2420 else 2421 arc_c = arc_c_min; 2422 2423 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 2424 if (arc_c > arc_size) 2425 arc_c = MAX(arc_size, arc_c_min); 2426 if (arc_p > arc_c) 2427 arc_p = (arc_c >> 1); 2428 ASSERT(arc_c >= arc_c_min); 2429 ASSERT((int64_t)arc_p >= 0); 2430 } 2431 2432 if (arc_size > arc_c) 2433 arc_adjust(); 2434} 2435 2436static int needfree = 0; 2437 2438static int 2439arc_reclaim_needed(void) 2440{ 2441 2442#ifdef _KERNEL 2443 2444 if (needfree) 2445 return (1); 2446 2447 /* 2448 * Cooperate with pagedaemon when it's time for it to scan 2449 * and reclaim some pages. 2450 */ 2451 if (vm_paging_needed()) 2452 return (1); 2453 2454#ifdef sun 2455 /* 2456 * take 'desfree' extra pages, so we reclaim sooner, rather than later 2457 */ 2458 extra = desfree; 2459 2460 /* 2461 * check that we're out of range of the pageout scanner. It starts to 2462 * schedule paging if freemem is less than lotsfree and needfree. 2463 * lotsfree is the high-water mark for pageout, and needfree is the 2464 * number of needed free pages. We add extra pages here to make sure 2465 * the scanner doesn't start up while we're freeing memory. 2466 */ 2467 if (freemem < lotsfree + needfree + extra) 2468 return (1); 2469 2470 /* 2471 * check to make sure that swapfs has enough space so that anon 2472 * reservations can still succeed. anon_resvmem() checks that the 2473 * availrmem is greater than swapfs_minfree, and the number of reserved 2474 * swap pages. We also add a bit of extra here just to prevent 2475 * circumstances from getting really dire. 2476 */ 2477 if (availrmem < swapfs_minfree + swapfs_reserve + extra) 2478 return (1); 2479 2480#if defined(__i386) 2481 /* 2482 * If we're on an i386 platform, it's possible that we'll exhaust the 2483 * kernel heap space before we ever run out of available physical 2484 * memory. Most checks of the size of the heap_area compare against 2485 * tune.t_minarmem, which is the minimum available real memory that we 2486 * can have in the system. However, this is generally fixed at 25 pages 2487 * which is so low that it's useless. In this comparison, we seek to 2488 * calculate the total heap-size, and reclaim if more than 3/4ths of the 2489 * heap is allocated. (Or, in the calculation, if less than 1/4th is 2490 * free) 2491 */ 2492 if (btop(vmem_size(heap_arena, VMEM_FREE)) < 2493 (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) 2494 return (1); 2495#endif 2496#else /* !sun */ 2497 if (kmem_used() > (kmem_size() * 3) / 4) 2498 return (1); 2499#endif /* sun */ 2500 2501#else 2502 if (spa_get_random(100) == 0) 2503 return (1); 2504#endif 2505 return (0); 2506} 2507 2508extern kmem_cache_t *zio_buf_cache[]; 2509extern kmem_cache_t *zio_data_buf_cache[]; 2510 2511static void 2512arc_kmem_reap_now(arc_reclaim_strategy_t strat) 2513{ 2514 size_t i; 2515 kmem_cache_t *prev_cache = NULL; 2516 kmem_cache_t *prev_data_cache = NULL; 2517 2518#ifdef _KERNEL 2519 if (arc_meta_used >= arc_meta_limit) { 2520 /* 2521 * We are exceeding our meta-data cache limit. 2522 * Purge some DNLC entries to release holds on meta-data. 2523 */ 2524 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 2525 } 2526#if defined(__i386) 2527 /* 2528 * Reclaim unused memory from all kmem caches. 2529 */ 2530 kmem_reap(); 2531#endif 2532#endif 2533 2534 /* 2535 * An aggressive reclamation will shrink the cache size as well as 2536 * reap free buffers from the arc kmem caches. 2537 */ 2538 if (strat == ARC_RECLAIM_AGGR) 2539 arc_shrink(); 2540 2541 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 2542 if (zio_buf_cache[i] != prev_cache) { 2543 prev_cache = zio_buf_cache[i]; 2544 kmem_cache_reap_now(zio_buf_cache[i]); 2545 } 2546 if (zio_data_buf_cache[i] != prev_data_cache) { 2547 prev_data_cache = zio_data_buf_cache[i]; 2548 kmem_cache_reap_now(zio_data_buf_cache[i]); 2549 } 2550 } 2551 kmem_cache_reap_now(buf_cache); 2552 kmem_cache_reap_now(hdr_cache); 2553} 2554 2555static void 2556arc_reclaim_thread(void *dummy __unused) 2557{ 2558 clock_t growtime = 0; 2559 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 2560 callb_cpr_t cpr; 2561 2562 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 2563 2564 mutex_enter(&arc_reclaim_thr_lock); 2565 while (arc_thread_exit == 0) { 2566 if (arc_reclaim_needed()) { 2567 2568 if (arc_no_grow) { 2569 if (last_reclaim == ARC_RECLAIM_CONS) { 2570 last_reclaim = ARC_RECLAIM_AGGR; 2571 } else { 2572 last_reclaim = ARC_RECLAIM_CONS; 2573 } 2574 } else { 2575 arc_no_grow = TRUE; 2576 last_reclaim = ARC_RECLAIM_AGGR; 2577 membar_producer(); 2578 } 2579 2580 /* reset the growth delay for every reclaim */ 2581 growtime = ddi_get_lbolt() + (arc_grow_retry * hz); 2582 2583 if (needfree && last_reclaim == ARC_RECLAIM_CONS) { 2584 /* 2585 * If needfree is TRUE our vm_lowmem hook 2586 * was called and in that case we must free some 2587 * memory, so switch to aggressive mode. 2588 */ 2589 arc_no_grow = TRUE; 2590 last_reclaim = ARC_RECLAIM_AGGR; 2591 } 2592 arc_kmem_reap_now(last_reclaim); 2593 arc_warm = B_TRUE; 2594 2595 } else if (arc_no_grow && ddi_get_lbolt() >= growtime) { 2596 arc_no_grow = FALSE; 2597 } 2598 2599 arc_adjust(); 2600 2601 if (arc_eviction_list != NULL) 2602 arc_do_user_evicts(); 2603 2604#ifdef _KERNEL 2605 if (needfree) { 2606 needfree = 0; 2607 wakeup(&needfree); 2608 } 2609#endif 2610 2611 /* block until needed, or one second, whichever is shorter */ 2612 CALLB_CPR_SAFE_BEGIN(&cpr); 2613 (void) cv_timedwait(&arc_reclaim_thr_cv, 2614 &arc_reclaim_thr_lock, hz); 2615 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 2616 } 2617 2618 arc_thread_exit = 0; 2619 cv_broadcast(&arc_reclaim_thr_cv); 2620 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 2621 thread_exit(); 2622} 2623 2624/* 2625 * Adapt arc info given the number of bytes we are trying to add and 2626 * the state that we are comming from. This function is only called 2627 * when we are adding new content to the cache. 2628 */ 2629static void 2630arc_adapt(int bytes, arc_state_t *state) 2631{ 2632 int mult; 2633 uint64_t arc_p_min = (arc_c >> arc_p_min_shift); 2634 2635 if (state == arc_l2c_only) 2636 return; 2637 2638 ASSERT(bytes > 0); 2639 /* 2640 * Adapt the target size of the MRU list: 2641 * - if we just hit in the MRU ghost list, then increase 2642 * the target size of the MRU list. 2643 * - if we just hit in the MFU ghost list, then increase 2644 * the target size of the MFU list by decreasing the 2645 * target size of the MRU list. 2646 */ 2647 if (state == arc_mru_ghost) { 2648 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? 2649 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); 2650 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */ 2651 2652 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult); 2653 } else if (state == arc_mfu_ghost) { 2654 uint64_t delta; 2655 2656 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? 2657 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); 2658 mult = MIN(mult, 10); 2659 2660 delta = MIN(bytes * mult, arc_p); 2661 arc_p = MAX(arc_p_min, arc_p - delta); 2662 } 2663 ASSERT((int64_t)arc_p >= 0); 2664 2665 if (arc_reclaim_needed()) { 2666 cv_signal(&arc_reclaim_thr_cv); 2667 return; 2668 } 2669 2670 if (arc_no_grow) 2671 return; 2672 2673 if (arc_c >= arc_c_max) 2674 return; 2675 2676 /* 2677 * If we're within (2 * maxblocksize) bytes of the target 2678 * cache size, increment the target cache size 2679 */ 2680 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 2681 atomic_add_64(&arc_c, (int64_t)bytes); 2682 if (arc_c > arc_c_max) 2683 arc_c = arc_c_max; 2684 else if (state == arc_anon) 2685 atomic_add_64(&arc_p, (int64_t)bytes); 2686 if (arc_p > arc_c) 2687 arc_p = arc_c; 2688 } 2689 ASSERT((int64_t)arc_p >= 0); 2690} 2691 2692/* 2693 * Check if the cache has reached its limits and eviction is required 2694 * prior to insert. 2695 */ 2696static int 2697arc_evict_needed(arc_buf_contents_t type) 2698{ 2699 if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit) 2700 return (1); 2701 2702#ifdef sun 2703#ifdef _KERNEL 2704 /* 2705 * If zio data pages are being allocated out of a separate heap segment, 2706 * then enforce that the size of available vmem for this area remains 2707 * above about 1/32nd free. 2708 */ 2709 if (type == ARC_BUFC_DATA && zio_arena != NULL && 2710 vmem_size(zio_arena, VMEM_FREE) < 2711 (vmem_size(zio_arena, VMEM_ALLOC) >> 5)) 2712 return (1); 2713#endif 2714#endif /* sun */ 2715 2716 if (arc_reclaim_needed()) 2717 return (1); 2718 2719 return (arc_size > arc_c); 2720} 2721 2722/* 2723 * The buffer, supplied as the first argument, needs a data block. 2724 * So, if we are at cache max, determine which cache should be victimized. 2725 * We have the following cases: 2726 * 2727 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> 2728 * In this situation if we're out of space, but the resident size of the MFU is 2729 * under the limit, victimize the MFU cache to satisfy this insertion request. 2730 * 2731 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> 2732 * Here, we've used up all of the available space for the MRU, so we need to 2733 * evict from our own cache instead. Evict from the set of resident MRU 2734 * entries. 2735 * 2736 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> 2737 * c minus p represents the MFU space in the cache, since p is the size of the 2738 * cache that is dedicated to the MRU. In this situation there's still space on 2739 * the MFU side, so the MRU side needs to be victimized. 2740 * 2741 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> 2742 * MFU's resident set is consuming more space than it has been allotted. In 2743 * this situation, we must victimize our own cache, the MFU, for this insertion. 2744 */ 2745static void 2746arc_get_data_buf(arc_buf_t *buf) 2747{ 2748 arc_state_t *state = buf->b_hdr->b_state; 2749 uint64_t size = buf->b_hdr->b_size; 2750 arc_buf_contents_t type = buf->b_hdr->b_type; 2751 2752 arc_adapt(size, state); 2753 2754 /* 2755 * We have not yet reached cache maximum size, 2756 * just allocate a new buffer. 2757 */ 2758 if (!arc_evict_needed(type)) { 2759 if (type == ARC_BUFC_METADATA) { 2760 buf->b_data = zio_buf_alloc(size); 2761 arc_space_consume(size, ARC_SPACE_DATA); 2762 } else { 2763 ASSERT(type == ARC_BUFC_DATA); 2764 buf->b_data = zio_data_buf_alloc(size); 2765 ARCSTAT_INCR(arcstat_data_size, size); 2766 atomic_add_64(&arc_size, size); 2767 } 2768 goto out; 2769 } 2770 2771 /* 2772 * If we are prefetching from the mfu ghost list, this buffer 2773 * will end up on the mru list; so steal space from there. 2774 */ 2775 if (state == arc_mfu_ghost) 2776 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; 2777 else if (state == arc_mru_ghost) 2778 state = arc_mru; 2779 2780 if (state == arc_mru || state == arc_anon) { 2781 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; 2782 state = (arc_mfu->arcs_lsize[type] >= size && 2783 arc_p > mru_used) ? arc_mfu : arc_mru; 2784 } else { 2785 /* MFU cases */ 2786 uint64_t mfu_space = arc_c - arc_p; 2787 state = (arc_mru->arcs_lsize[type] >= size && 2788 mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; 2789 } 2790 if ((buf->b_data = arc_evict(state, 0, size, TRUE, type)) == NULL) { 2791 if (type == ARC_BUFC_METADATA) { 2792 buf->b_data = zio_buf_alloc(size); 2793 arc_space_consume(size, ARC_SPACE_DATA); 2794 } else { 2795 ASSERT(type == ARC_BUFC_DATA); 2796 buf->b_data = zio_data_buf_alloc(size); 2797 ARCSTAT_INCR(arcstat_data_size, size); 2798 atomic_add_64(&arc_size, size); 2799 } 2800 ARCSTAT_BUMP(arcstat_recycle_miss); 2801 } 2802 ASSERT(buf->b_data != NULL); 2803out: 2804 /* 2805 * Update the state size. Note that ghost states have a 2806 * "ghost size" and so don't need to be updated. 2807 */ 2808 if (!GHOST_STATE(buf->b_hdr->b_state)) { 2809 arc_buf_hdr_t *hdr = buf->b_hdr; 2810 2811 atomic_add_64(&hdr->b_state->arcs_size, size); 2812 if (list_link_active(&hdr->b_arc_node)) { 2813 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2814 atomic_add_64(&hdr->b_state->arcs_lsize[type], size); 2815 } 2816 /* 2817 * If we are growing the cache, and we are adding anonymous 2818 * data, and we have outgrown arc_p, update arc_p 2819 */ 2820 if (arc_size < arc_c && hdr->b_state == arc_anon && 2821 arc_anon->arcs_size + arc_mru->arcs_size > arc_p) 2822 arc_p = MIN(arc_c, arc_p + size); 2823 } 2824 ARCSTAT_BUMP(arcstat_allocated); 2825} 2826 2827/* 2828 * This routine is called whenever a buffer is accessed. 2829 * NOTE: the hash lock is dropped in this function. 2830 */ 2831static void 2832arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 2833{ 2834 clock_t now; 2835 2836 ASSERT(MUTEX_HELD(hash_lock)); 2837 2838 if (buf->b_state == arc_anon) { 2839 /* 2840 * This buffer is not in the cache, and does not 2841 * appear in our "ghost" list. Add the new buffer 2842 * to the MRU state. 2843 */ 2844 2845 ASSERT(buf->b_arc_access == 0); 2846 buf->b_arc_access = ddi_get_lbolt(); 2847 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2848 arc_change_state(arc_mru, buf, hash_lock); 2849 2850 } else if (buf->b_state == arc_mru) { 2851 now = ddi_get_lbolt(); 2852 2853 /* 2854 * If this buffer is here because of a prefetch, then either: 2855 * - clear the flag if this is a "referencing" read 2856 * (any subsequent access will bump this into the MFU state). 2857 * or 2858 * - move the buffer to the head of the list if this is 2859 * another prefetch (to make it less likely to be evicted). 2860 */ 2861 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2862 if (refcount_count(&buf->b_refcnt) == 0) { 2863 ASSERT(list_link_active(&buf->b_arc_node)); 2864 } else { 2865 buf->b_flags &= ~ARC_PREFETCH; 2866 ARCSTAT_BUMP(arcstat_mru_hits); 2867 } 2868 buf->b_arc_access = now; 2869 return; 2870 } 2871 2872 /* 2873 * This buffer has been "accessed" only once so far, 2874 * but it is still in the cache. Move it to the MFU 2875 * state. 2876 */ 2877 if (now > buf->b_arc_access + ARC_MINTIME) { 2878 /* 2879 * More than 125ms have passed since we 2880 * instantiated this buffer. Move it to the 2881 * most frequently used state. 2882 */ 2883 buf->b_arc_access = now; 2884 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2885 arc_change_state(arc_mfu, buf, hash_lock); 2886 } 2887 ARCSTAT_BUMP(arcstat_mru_hits); 2888 } else if (buf->b_state == arc_mru_ghost) { 2889 arc_state_t *new_state; 2890 /* 2891 * This buffer has been "accessed" recently, but 2892 * was evicted from the cache. Move it to the 2893 * MFU state. 2894 */ 2895 2896 if (buf->b_flags & ARC_PREFETCH) { 2897 new_state = arc_mru; 2898 if (refcount_count(&buf->b_refcnt) > 0) 2899 buf->b_flags &= ~ARC_PREFETCH; 2900 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2901 } else { 2902 new_state = arc_mfu; 2903 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2904 } 2905 2906 buf->b_arc_access = ddi_get_lbolt(); 2907 arc_change_state(new_state, buf, hash_lock); 2908 2909 ARCSTAT_BUMP(arcstat_mru_ghost_hits); 2910 } else if (buf->b_state == arc_mfu) { 2911 /* 2912 * This buffer has been accessed more than once and is 2913 * still in the cache. Keep it in the MFU state. 2914 * 2915 * NOTE: an add_reference() that occurred when we did 2916 * the arc_read() will have kicked this off the list. 2917 * If it was a prefetch, we will explicitly move it to 2918 * the head of the list now. 2919 */ 2920 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2921 ASSERT(refcount_count(&buf->b_refcnt) == 0); 2922 ASSERT(list_link_active(&buf->b_arc_node)); 2923 } 2924 ARCSTAT_BUMP(arcstat_mfu_hits); 2925 buf->b_arc_access = ddi_get_lbolt(); 2926 } else if (buf->b_state == arc_mfu_ghost) { 2927 arc_state_t *new_state = arc_mfu; 2928 /* 2929 * This buffer has been accessed more than once but has 2930 * been evicted from the cache. Move it back to the 2931 * MFU state. 2932 */ 2933 2934 if (buf->b_flags & ARC_PREFETCH) { 2935 /* 2936 * This is a prefetch access... 2937 * move this block back to the MRU state. 2938 */ 2939 ASSERT0(refcount_count(&buf->b_refcnt)); 2940 new_state = arc_mru; 2941 } 2942 2943 buf->b_arc_access = ddi_get_lbolt(); 2944 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2945 arc_change_state(new_state, buf, hash_lock); 2946 2947 ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 2948 } else if (buf->b_state == arc_l2c_only) { 2949 /* 2950 * This buffer is on the 2nd Level ARC. 2951 */ 2952 2953 buf->b_arc_access = ddi_get_lbolt(); 2954 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2955 arc_change_state(arc_mfu, buf, hash_lock); 2956 } else { 2957 ASSERT(!"invalid arc state"); 2958 } 2959} 2960 2961/* a generic arc_done_func_t which you can use */ 2962/* ARGSUSED */ 2963void 2964arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 2965{ 2966 if (zio == NULL || zio->io_error == 0) 2967 bcopy(buf->b_data, arg, buf->b_hdr->b_size); 2968 VERIFY(arc_buf_remove_ref(buf, arg)); 2969} 2970 2971/* a generic arc_done_func_t */ 2972void 2973arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 2974{ 2975 arc_buf_t **bufp = arg; 2976 if (zio && zio->io_error) { 2977 VERIFY(arc_buf_remove_ref(buf, arg)); 2978 *bufp = NULL; 2979 } else { 2980 *bufp = buf; 2981 ASSERT(buf->b_data); 2982 } 2983} 2984 2985static void 2986arc_read_done(zio_t *zio) 2987{ 2988 arc_buf_hdr_t *hdr; 2989 arc_buf_t *buf; 2990 arc_buf_t *abuf; /* buffer we're assigning to callback */ 2991 kmutex_t *hash_lock = NULL; 2992 arc_callback_t *callback_list, *acb; 2993 int freeable = FALSE; 2994 2995 buf = zio->io_private; 2996 hdr = buf->b_hdr; 2997 2998 /* 2999 * The hdr was inserted into hash-table and removed from lists 3000 * prior to starting I/O. We should find this header, since 3001 * it's in the hash table, and it should be legit since it's 3002 * not possible to evict it during the I/O. The only possible 3003 * reason for it not to be found is if we were freed during the 3004 * read. 3005 */ 3006 if (HDR_IN_HASH_TABLE(hdr)) { 3007 ASSERT3U(hdr->b_birth, ==, BP_PHYSICAL_BIRTH(zio->io_bp)); 3008 ASSERT3U(hdr->b_dva.dva_word[0], ==, 3009 BP_IDENTITY(zio->io_bp)->dva_word[0]); 3010 ASSERT3U(hdr->b_dva.dva_word[1], ==, 3011 BP_IDENTITY(zio->io_bp)->dva_word[1]); 3012 3013 arc_buf_hdr_t *found = buf_hash_find(hdr->b_spa, zio->io_bp, 3014 &hash_lock); 3015 3016 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && 3017 hash_lock == NULL) || 3018 (found == hdr && 3019 DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || 3020 (found == hdr && HDR_L2_READING(hdr))); 3021 } 3022 3023 hdr->b_flags &= ~ARC_L2_EVICTED; 3024 if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH)) 3025 hdr->b_flags &= ~ARC_L2CACHE; 3026 3027 /* byteswap if necessary */ 3028 callback_list = hdr->b_acb; 3029 ASSERT(callback_list != NULL); 3030 if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) { 3031 dmu_object_byteswap_t bswap = 3032 DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp)); 3033 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ? 3034 byteswap_uint64_array : 3035 dmu_ot_byteswap[bswap].ob_func; 3036 func(buf->b_data, hdr->b_size); 3037 } 3038 3039 arc_cksum_compute(buf, B_FALSE); 3040#ifdef illumos 3041 arc_buf_watch(buf); 3042#endif /* illumos */ 3043 3044 if (hash_lock && zio->io_error == 0 && hdr->b_state == arc_anon) { 3045 /* 3046 * Only call arc_access on anonymous buffers. This is because 3047 * if we've issued an I/O for an evicted buffer, we've already 3048 * called arc_access (to prevent any simultaneous readers from 3049 * getting confused). 3050 */ 3051 arc_access(hdr, hash_lock); 3052 } 3053 3054 /* create copies of the data buffer for the callers */ 3055 abuf = buf; 3056 for (acb = callback_list; acb; acb = acb->acb_next) { 3057 if (acb->acb_done) { 3058 if (abuf == NULL) { 3059 ARCSTAT_BUMP(arcstat_duplicate_reads); 3060 abuf = arc_buf_clone(buf); 3061 } 3062 acb->acb_buf = abuf; 3063 abuf = NULL; 3064 } 3065 } 3066 hdr->b_acb = NULL; 3067 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3068 ASSERT(!HDR_BUF_AVAILABLE(hdr)); 3069 if (abuf == buf) { 3070 ASSERT(buf->b_efunc == NULL); 3071 ASSERT(hdr->b_datacnt == 1); 3072 hdr->b_flags |= ARC_BUF_AVAILABLE; 3073 } 3074 3075 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 3076 3077 if (zio->io_error != 0) { 3078 hdr->b_flags |= ARC_IO_ERROR; 3079 if (hdr->b_state != arc_anon) 3080 arc_change_state(arc_anon, hdr, hash_lock); 3081 if (HDR_IN_HASH_TABLE(hdr)) 3082 buf_hash_remove(hdr); 3083 freeable = refcount_is_zero(&hdr->b_refcnt); 3084 } 3085 3086 /* 3087 * Broadcast before we drop the hash_lock to avoid the possibility 3088 * that the hdr (and hence the cv) might be freed before we get to 3089 * the cv_broadcast(). 3090 */ 3091 cv_broadcast(&hdr->b_cv); 3092 3093 if (hash_lock) { 3094 mutex_exit(hash_lock); 3095 } else { 3096 /* 3097 * This block was freed while we waited for the read to 3098 * complete. It has been removed from the hash table and 3099 * moved to the anonymous state (so that it won't show up 3100 * in the cache). 3101 */ 3102 ASSERT3P(hdr->b_state, ==, arc_anon); 3103 freeable = refcount_is_zero(&hdr->b_refcnt); 3104 } 3105 3106 /* execute each callback and free its structure */ 3107 while ((acb = callback_list) != NULL) { 3108 if (acb->acb_done) 3109 acb->acb_done(zio, acb->acb_buf, acb->acb_private); 3110 3111 if (acb->acb_zio_dummy != NULL) { 3112 acb->acb_zio_dummy->io_error = zio->io_error; 3113 zio_nowait(acb->acb_zio_dummy); 3114 } 3115 3116 callback_list = acb->acb_next; 3117 kmem_free(acb, sizeof (arc_callback_t)); 3118 } 3119 3120 if (freeable) 3121 arc_hdr_destroy(hdr); 3122} 3123 3124/* 3125 * "Read" the block block at the specified DVA (in bp) via the 3126 * cache. If the block is found in the cache, invoke the provided 3127 * callback immediately and return. Note that the `zio' parameter 3128 * in the callback will be NULL in this case, since no IO was 3129 * required. If the block is not in the cache pass the read request 3130 * on to the spa with a substitute callback function, so that the 3131 * requested block will be added to the cache. 3132 * 3133 * If a read request arrives for a block that has a read in-progress, 3134 * either wait for the in-progress read to complete (and return the 3135 * results); or, if this is a read with a "done" func, add a record 3136 * to the read to invoke the "done" func when the read completes, 3137 * and return; or just return. 3138 * 3139 * arc_read_done() will invoke all the requested "done" functions 3140 * for readers of this block. 3141 */ 3142int 3143arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done, 3144 void *private, zio_priority_t priority, int zio_flags, uint32_t *arc_flags, 3145 const zbookmark_t *zb) 3146{ 3147 arc_buf_hdr_t *hdr = NULL; 3148 arc_buf_t *buf = NULL; 3149 kmutex_t *hash_lock = NULL; 3150 zio_t *rzio; 3151 uint64_t guid = spa_load_guid(spa); 3152 3153 ASSERT(!BP_IS_EMBEDDED(bp) || 3154 BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA); 3155 3156top: 3157 if (!BP_IS_EMBEDDED(bp)) { 3158 /* 3159 * Embedded BP's have no DVA and require no I/O to "read". 3160 * Create an anonymous arc buf to back it. 3161 */ 3162 hdr = buf_hash_find(guid, bp, &hash_lock); 3163 } 3164 3165 if (hdr != NULL && hdr->b_datacnt > 0) { 3166 3167 *arc_flags |= ARC_CACHED; 3168 3169 if (HDR_IO_IN_PROGRESS(hdr)) { 3170 3171 if (*arc_flags & ARC_WAIT) { 3172 cv_wait(&hdr->b_cv, hash_lock); 3173 mutex_exit(hash_lock); 3174 goto top; 3175 } 3176 ASSERT(*arc_flags & ARC_NOWAIT); 3177 3178 if (done) { 3179 arc_callback_t *acb = NULL; 3180 3181 acb = kmem_zalloc(sizeof (arc_callback_t), 3182 KM_SLEEP); 3183 acb->acb_done = done; 3184 acb->acb_private = private; 3185 if (pio != NULL) 3186 acb->acb_zio_dummy = zio_null(pio, 3187 spa, NULL, NULL, NULL, zio_flags); 3188 3189 ASSERT(acb->acb_done != NULL); 3190 acb->acb_next = hdr->b_acb; 3191 hdr->b_acb = acb; 3192 add_reference(hdr, hash_lock, private); 3193 mutex_exit(hash_lock); 3194 return (0); 3195 } 3196 mutex_exit(hash_lock); 3197 return (0); 3198 } 3199 3200 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 3201 3202 if (done) { 3203 add_reference(hdr, hash_lock, private); 3204 /* 3205 * If this block is already in use, create a new 3206 * copy of the data so that we will be guaranteed 3207 * that arc_release() will always succeed. 3208 */ 3209 buf = hdr->b_buf; 3210 ASSERT(buf); 3211 ASSERT(buf->b_data); 3212 if (HDR_BUF_AVAILABLE(hdr)) { 3213 ASSERT(buf->b_efunc == NULL); 3214 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 3215 } else { 3216 buf = arc_buf_clone(buf); 3217 } 3218 3219 } else if (*arc_flags & ARC_PREFETCH && 3220 refcount_count(&hdr->b_refcnt) == 0) { 3221 hdr->b_flags |= ARC_PREFETCH; 3222 } 3223 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 3224 arc_access(hdr, hash_lock); 3225 if (*arc_flags & ARC_L2CACHE) 3226 hdr->b_flags |= ARC_L2CACHE; 3227 if (*arc_flags & ARC_L2COMPRESS) 3228 hdr->b_flags |= ARC_L2COMPRESS; 3229 mutex_exit(hash_lock); 3230 ARCSTAT_BUMP(arcstat_hits); 3231 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 3232 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 3233 data, metadata, hits); 3234 3235 if (done) 3236 done(NULL, buf, private); 3237 } else { 3238 uint64_t size = BP_GET_LSIZE(bp); 3239 arc_callback_t *acb; 3240 vdev_t *vd = NULL; 3241 uint64_t addr = 0; 3242 boolean_t devw = B_FALSE; 3243 enum zio_compress b_compress = ZIO_COMPRESS_OFF; 3244 uint64_t b_asize = 0; 3245 3246 if (hdr == NULL) { 3247 /* this block is not in the cache */ 3248 arc_buf_hdr_t *exists = NULL; 3249 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 3250 buf = arc_buf_alloc(spa, size, private, type); 3251 hdr = buf->b_hdr; 3252 if (!BP_IS_EMBEDDED(bp)) { 3253 hdr->b_dva = *BP_IDENTITY(bp); 3254 hdr->b_birth = BP_PHYSICAL_BIRTH(bp); 3255 hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 3256 exists = buf_hash_insert(hdr, &hash_lock); 3257 } 3258 if (exists != NULL) { 3259 /* somebody beat us to the hash insert */ 3260 mutex_exit(hash_lock); 3261 buf_discard_identity(hdr); 3262 (void) arc_buf_remove_ref(buf, private); 3263 goto top; /* restart the IO request */ 3264 } 3265 /* if this is a prefetch, we don't have a reference */ 3266 if (*arc_flags & ARC_PREFETCH) { 3267 (void) remove_reference(hdr, hash_lock, 3268 private); 3269 hdr->b_flags |= ARC_PREFETCH; 3270 } 3271 if (*arc_flags & ARC_L2CACHE) 3272 hdr->b_flags |= ARC_L2CACHE; 3273 if (*arc_flags & ARC_L2COMPRESS) 3274 hdr->b_flags |= ARC_L2COMPRESS; 3275 if (BP_GET_LEVEL(bp) > 0) 3276 hdr->b_flags |= ARC_INDIRECT; 3277 } else { 3278 /* this block is in the ghost cache */ 3279 ASSERT(GHOST_STATE(hdr->b_state)); 3280 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 3281 ASSERT0(refcount_count(&hdr->b_refcnt)); 3282 ASSERT(hdr->b_buf == NULL); 3283 3284 /* if this is a prefetch, we don't have a reference */ 3285 if (*arc_flags & ARC_PREFETCH) 3286 hdr->b_flags |= ARC_PREFETCH; 3287 else 3288 add_reference(hdr, hash_lock, private); 3289 if (*arc_flags & ARC_L2CACHE) 3290 hdr->b_flags |= ARC_L2CACHE; 3291 if (*arc_flags & ARC_L2COMPRESS) 3292 hdr->b_flags |= ARC_L2COMPRESS; 3293 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 3294 buf->b_hdr = hdr; 3295 buf->b_data = NULL; 3296 buf->b_efunc = NULL; 3297 buf->b_private = NULL; 3298 buf->b_next = NULL; 3299 hdr->b_buf = buf; 3300 ASSERT(hdr->b_datacnt == 0); 3301 hdr->b_datacnt = 1; 3302 arc_get_data_buf(buf); 3303 arc_access(hdr, hash_lock); 3304 } 3305 3306 ASSERT(!GHOST_STATE(hdr->b_state)); 3307 3308 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 3309 acb->acb_done = done; 3310 acb->acb_private = private; 3311 3312 ASSERT(hdr->b_acb == NULL); 3313 hdr->b_acb = acb; 3314 hdr->b_flags |= ARC_IO_IN_PROGRESS; 3315 3316 if (hdr->b_l2hdr != NULL && 3317 (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) { 3318 devw = hdr->b_l2hdr->b_dev->l2ad_writing; 3319 addr = hdr->b_l2hdr->b_daddr; 3320 b_compress = hdr->b_l2hdr->b_compress; 3321 b_asize = hdr->b_l2hdr->b_asize; 3322 /* 3323 * Lock out device removal. 3324 */ 3325 if (vdev_is_dead(vd) || 3326 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER)) 3327 vd = NULL; 3328 } 3329 3330 if (hash_lock != NULL) 3331 mutex_exit(hash_lock); 3332 3333 /* 3334 * At this point, we have a level 1 cache miss. Try again in 3335 * L2ARC if possible. 3336 */ 3337 ASSERT3U(hdr->b_size, ==, size); 3338 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp, 3339 uint64_t, size, zbookmark_t *, zb); 3340 ARCSTAT_BUMP(arcstat_misses); 3341 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 3342 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 3343 data, metadata, misses); 3344#ifdef _KERNEL 3345 curthread->td_ru.ru_inblock++; 3346#endif 3347 3348 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) { 3349 /* 3350 * Read from the L2ARC if the following are true: 3351 * 1. The L2ARC vdev was previously cached. 3352 * 2. This buffer still has L2ARC metadata. 3353 * 3. This buffer isn't currently writing to the L2ARC. 3354 * 4. The L2ARC entry wasn't evicted, which may 3355 * also have invalidated the vdev. 3356 * 5. This isn't prefetch and l2arc_noprefetch is set. 3357 */ 3358 if (hdr->b_l2hdr != NULL && 3359 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) && 3360 !(l2arc_noprefetch && HDR_PREFETCH(hdr))) { 3361 l2arc_read_callback_t *cb; 3362 3363 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); 3364 ARCSTAT_BUMP(arcstat_l2_hits); 3365 3366 cb = kmem_zalloc(sizeof (l2arc_read_callback_t), 3367 KM_SLEEP); 3368 cb->l2rcb_buf = buf; 3369 cb->l2rcb_spa = spa; 3370 cb->l2rcb_bp = *bp; 3371 cb->l2rcb_zb = *zb; 3372 cb->l2rcb_flags = zio_flags; 3373 cb->l2rcb_compress = b_compress; 3374 3375 ASSERT(addr >= VDEV_LABEL_START_SIZE && 3376 addr + size < vd->vdev_psize - 3377 VDEV_LABEL_END_SIZE); 3378 3379 /* 3380 * l2arc read. The SCL_L2ARC lock will be 3381 * released by l2arc_read_done(). 3382 * Issue a null zio if the underlying buffer 3383 * was squashed to zero size by compression. 3384 */ 3385 if (b_compress == ZIO_COMPRESS_EMPTY) { 3386 rzio = zio_null(pio, spa, vd, 3387 l2arc_read_done, cb, 3388 zio_flags | ZIO_FLAG_DONT_CACHE | 3389 ZIO_FLAG_CANFAIL | 3390 ZIO_FLAG_DONT_PROPAGATE | 3391 ZIO_FLAG_DONT_RETRY); 3392 } else { 3393 rzio = zio_read_phys(pio, vd, addr, 3394 b_asize, buf->b_data, 3395 ZIO_CHECKSUM_OFF, 3396 l2arc_read_done, cb, priority, 3397 zio_flags | ZIO_FLAG_DONT_CACHE | 3398 ZIO_FLAG_CANFAIL | 3399 ZIO_FLAG_DONT_PROPAGATE | 3400 ZIO_FLAG_DONT_RETRY, B_FALSE); 3401 } 3402 DTRACE_PROBE2(l2arc__read, vdev_t *, vd, 3403 zio_t *, rzio); 3404 ARCSTAT_INCR(arcstat_l2_read_bytes, b_asize); 3405 3406 if (*arc_flags & ARC_NOWAIT) { 3407 zio_nowait(rzio); 3408 return (0); 3409 } 3410 3411 ASSERT(*arc_flags & ARC_WAIT); 3412 if (zio_wait(rzio) == 0) 3413 return (0); 3414 3415 /* l2arc read error; goto zio_read() */ 3416 } else { 3417 DTRACE_PROBE1(l2arc__miss, 3418 arc_buf_hdr_t *, hdr); 3419 ARCSTAT_BUMP(arcstat_l2_misses); 3420 if (HDR_L2_WRITING(hdr)) 3421 ARCSTAT_BUMP(arcstat_l2_rw_clash); 3422 spa_config_exit(spa, SCL_L2ARC, vd); 3423 } 3424 } else { 3425 if (vd != NULL) 3426 spa_config_exit(spa, SCL_L2ARC, vd); 3427 if (l2arc_ndev != 0) { 3428 DTRACE_PROBE1(l2arc__miss, 3429 arc_buf_hdr_t *, hdr); 3430 ARCSTAT_BUMP(arcstat_l2_misses); 3431 } 3432 } 3433 3434 rzio = zio_read(pio, spa, bp, buf->b_data, size, 3435 arc_read_done, buf, priority, zio_flags, zb); 3436 3437 if (*arc_flags & ARC_WAIT) 3438 return (zio_wait(rzio)); 3439 3440 ASSERT(*arc_flags & ARC_NOWAIT); 3441 zio_nowait(rzio); 3442 } 3443 return (0); 3444} 3445 3446void 3447arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 3448{ 3449 ASSERT(buf->b_hdr != NULL); 3450 ASSERT(buf->b_hdr->b_state != arc_anon); 3451 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 3452 ASSERT(buf->b_efunc == NULL); 3453 ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr)); 3454 3455 buf->b_efunc = func; 3456 buf->b_private = private; 3457} 3458 3459/* 3460 * Notify the arc that a block was freed, and thus will never be used again. 3461 */ 3462void 3463arc_freed(spa_t *spa, const blkptr_t *bp) 3464{ 3465 arc_buf_hdr_t *hdr; 3466 kmutex_t *hash_lock; 3467 uint64_t guid = spa_load_guid(spa); 3468 3469 ASSERT(!BP_IS_EMBEDDED(bp)); 3470 3471 hdr = buf_hash_find(guid, bp, &hash_lock); 3472 if (hdr == NULL) 3473 return; 3474 if (HDR_BUF_AVAILABLE(hdr)) { 3475 arc_buf_t *buf = hdr->b_buf; 3476 add_reference(hdr, hash_lock, FTAG); 3477 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 3478 mutex_exit(hash_lock); 3479 3480 arc_release(buf, FTAG); 3481 (void) arc_buf_remove_ref(buf, FTAG); 3482 } else { 3483 mutex_exit(hash_lock); 3484 } 3485 3486} 3487 3488/* 3489 * This is used by the DMU to let the ARC know that a buffer is 3490 * being evicted, so the ARC should clean up. If this arc buf 3491 * is not yet in the evicted state, it will be put there. 3492 */ 3493int 3494arc_buf_evict(arc_buf_t *buf) 3495{ 3496 arc_buf_hdr_t *hdr; 3497 kmutex_t *hash_lock; 3498 arc_buf_t **bufp; 3499 list_t *list, *evicted_list; 3500 kmutex_t *lock, *evicted_lock; 3501 3502 mutex_enter(&buf->b_evict_lock); 3503 hdr = buf->b_hdr; 3504 if (hdr == NULL) { 3505 /* 3506 * We are in arc_do_user_evicts(). 3507 */ 3508 ASSERT(buf->b_data == NULL); 3509 mutex_exit(&buf->b_evict_lock); 3510 return (0); 3511 } else if (buf->b_data == NULL) { 3512 arc_buf_t copy = *buf; /* structure assignment */ 3513 /* 3514 * We are on the eviction list; process this buffer now 3515 * but let arc_do_user_evicts() do the reaping. 3516 */ 3517 buf->b_efunc = NULL; 3518 mutex_exit(&buf->b_evict_lock); 3519 VERIFY(copy.b_efunc(©) == 0); 3520 return (1); 3521 } 3522 hash_lock = HDR_LOCK(hdr); 3523 mutex_enter(hash_lock); 3524 hdr = buf->b_hdr; 3525 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 3526 3527 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 3528 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 3529 3530 /* 3531 * Pull this buffer off of the hdr 3532 */ 3533 bufp = &hdr->b_buf; 3534 while (*bufp != buf) 3535 bufp = &(*bufp)->b_next; 3536 *bufp = buf->b_next; 3537 3538 ASSERT(buf->b_data != NULL); 3539 arc_buf_destroy(buf, FALSE, FALSE); 3540 3541 if (hdr->b_datacnt == 0) { 3542 arc_state_t *old_state = hdr->b_state; 3543 arc_state_t *evicted_state; 3544 3545 ASSERT(hdr->b_buf == NULL); 3546 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 3547 3548 evicted_state = 3549 (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 3550 3551 get_buf_info(hdr, old_state, &list, &lock); 3552 get_buf_info(hdr, evicted_state, &evicted_list, &evicted_lock); 3553 mutex_enter(lock); 3554 mutex_enter(evicted_lock); 3555 3556 arc_change_state(evicted_state, hdr, hash_lock); 3557 ASSERT(HDR_IN_HASH_TABLE(hdr)); 3558 hdr->b_flags |= ARC_IN_HASH_TABLE; 3559 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 3560 3561 mutex_exit(evicted_lock); 3562 mutex_exit(lock); 3563 } 3564 mutex_exit(hash_lock); 3565 mutex_exit(&buf->b_evict_lock); 3566 3567 VERIFY(buf->b_efunc(buf) == 0); 3568 buf->b_efunc = NULL; 3569 buf->b_private = NULL; 3570 buf->b_hdr = NULL; 3571 buf->b_next = NULL; 3572 kmem_cache_free(buf_cache, buf); 3573 return (1); 3574} 3575 3576/* 3577 * Release this buffer from the cache, making it an anonymous buffer. This 3578 * must be done after a read and prior to modifying the buffer contents. 3579 * If the buffer has more than one reference, we must make 3580 * a new hdr for the buffer. 3581 */ 3582void 3583arc_release(arc_buf_t *buf, void *tag) 3584{ 3585 arc_buf_hdr_t *hdr; 3586 kmutex_t *hash_lock = NULL; 3587 l2arc_buf_hdr_t *l2hdr; 3588 uint64_t buf_size; 3589 3590 /* 3591 * It would be nice to assert that if it's DMU metadata (level > 3592 * 0 || it's the dnode file), then it must be syncing context. 3593 * But we don't know that information at this level. 3594 */ 3595 3596 mutex_enter(&buf->b_evict_lock); 3597 hdr = buf->b_hdr; 3598 3599 /* this buffer is not on any list */ 3600 ASSERT(refcount_count(&hdr->b_refcnt) > 0); 3601 3602 if (hdr->b_state == arc_anon) { 3603 /* this buffer is already released */ 3604 ASSERT(buf->b_efunc == NULL); 3605 } else { 3606 hash_lock = HDR_LOCK(hdr); 3607 mutex_enter(hash_lock); 3608 hdr = buf->b_hdr; 3609 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 3610 } 3611 3612 l2hdr = hdr->b_l2hdr; 3613 if (l2hdr) { 3614 mutex_enter(&l2arc_buflist_mtx); 3615 hdr->b_l2hdr = NULL; 3616 list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 3617 } 3618 buf_size = hdr->b_size; 3619 3620 /* 3621 * Do we have more than one buf? 3622 */ 3623 if (hdr->b_datacnt > 1) { 3624 arc_buf_hdr_t *nhdr; 3625 arc_buf_t **bufp; 3626 uint64_t blksz = hdr->b_size; 3627 uint64_t spa = hdr->b_spa; 3628 arc_buf_contents_t type = hdr->b_type; 3629 uint32_t flags = hdr->b_flags; 3630 3631 ASSERT(hdr->b_buf != buf || buf->b_next != NULL); 3632 /* 3633 * Pull the data off of this hdr and attach it to 3634 * a new anonymous hdr. 3635 */ 3636 (void) remove_reference(hdr, hash_lock, tag); 3637 bufp = &hdr->b_buf; 3638 while (*bufp != buf) 3639 bufp = &(*bufp)->b_next; 3640 *bufp = buf->b_next; 3641 buf->b_next = NULL; 3642 3643 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); 3644 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); 3645 if (refcount_is_zero(&hdr->b_refcnt)) { 3646 uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type]; 3647 ASSERT3U(*size, >=, hdr->b_size); 3648 atomic_add_64(size, -hdr->b_size); 3649 } 3650 3651 /* 3652 * We're releasing a duplicate user data buffer, update 3653 * our statistics accordingly. 3654 */ 3655 if (hdr->b_type == ARC_BUFC_DATA) { 3656 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers); 3657 ARCSTAT_INCR(arcstat_duplicate_buffers_size, 3658 -hdr->b_size); 3659 } 3660 hdr->b_datacnt -= 1; 3661 arc_cksum_verify(buf); 3662#ifdef illumos 3663 arc_buf_unwatch(buf); 3664#endif /* illumos */ 3665 3666 mutex_exit(hash_lock); 3667 3668 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 3669 nhdr->b_size = blksz; 3670 nhdr->b_spa = spa; 3671 nhdr->b_type = type; 3672 nhdr->b_buf = buf; 3673 nhdr->b_state = arc_anon; 3674 nhdr->b_arc_access = 0; 3675 nhdr->b_flags = flags & ARC_L2_WRITING; 3676 nhdr->b_l2hdr = NULL; 3677 nhdr->b_datacnt = 1; 3678 nhdr->b_freeze_cksum = NULL; 3679 (void) refcount_add(&nhdr->b_refcnt, tag); 3680 buf->b_hdr = nhdr; 3681 mutex_exit(&buf->b_evict_lock); 3682 atomic_add_64(&arc_anon->arcs_size, blksz); 3683 } else { 3684 mutex_exit(&buf->b_evict_lock); 3685 ASSERT(refcount_count(&hdr->b_refcnt) == 1); 3686 ASSERT(!list_link_active(&hdr->b_arc_node)); 3687 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 3688 if (hdr->b_state != arc_anon) 3689 arc_change_state(arc_anon, hdr, hash_lock); 3690 hdr->b_arc_access = 0; 3691 if (hash_lock) 3692 mutex_exit(hash_lock); 3693 3694 buf_discard_identity(hdr); 3695 arc_buf_thaw(buf); 3696 } 3697 buf->b_efunc = NULL; 3698 buf->b_private = NULL; 3699 3700 if (l2hdr) { 3701 ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize); 3702 trim_map_free(l2hdr->b_dev->l2ad_vdev, l2hdr->b_daddr, 3703 hdr->b_size, 0); 3704 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 3705 ARCSTAT_INCR(arcstat_l2_size, -buf_size); 3706 mutex_exit(&l2arc_buflist_mtx); 3707 } 3708} 3709 3710int 3711arc_released(arc_buf_t *buf) 3712{ 3713 int released; 3714 3715 mutex_enter(&buf->b_evict_lock); 3716 released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); 3717 mutex_exit(&buf->b_evict_lock); 3718 return (released); 3719} 3720 3721int 3722arc_has_callback(arc_buf_t *buf) 3723{ 3724 int callback; 3725 3726 mutex_enter(&buf->b_evict_lock); 3727 callback = (buf->b_efunc != NULL); 3728 mutex_exit(&buf->b_evict_lock); 3729 return (callback); 3730} 3731 3732#ifdef ZFS_DEBUG 3733int 3734arc_referenced(arc_buf_t *buf) 3735{ 3736 int referenced; 3737 3738 mutex_enter(&buf->b_evict_lock); 3739 referenced = (refcount_count(&buf->b_hdr->b_refcnt)); 3740 mutex_exit(&buf->b_evict_lock); 3741 return (referenced); 3742} 3743#endif 3744 3745static void 3746arc_write_ready(zio_t *zio) 3747{ 3748 arc_write_callback_t *callback = zio->io_private; 3749 arc_buf_t *buf = callback->awcb_buf; 3750 arc_buf_hdr_t *hdr = buf->b_hdr; 3751 3752 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); 3753 callback->awcb_ready(zio, buf, callback->awcb_private); 3754 3755 /* 3756 * If the IO is already in progress, then this is a re-write 3757 * attempt, so we need to thaw and re-compute the cksum. 3758 * It is the responsibility of the callback to handle the 3759 * accounting for any re-write attempt. 3760 */ 3761 if (HDR_IO_IN_PROGRESS(hdr)) { 3762 mutex_enter(&hdr->b_freeze_lock); 3763 if (hdr->b_freeze_cksum != NULL) { 3764 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 3765 hdr->b_freeze_cksum = NULL; 3766 } 3767 mutex_exit(&hdr->b_freeze_lock); 3768 } 3769 arc_cksum_compute(buf, B_FALSE); 3770 hdr->b_flags |= ARC_IO_IN_PROGRESS; 3771} 3772 3773/* 3774 * The SPA calls this callback for each physical write that happens on behalf 3775 * of a logical write. See the comment in dbuf_write_physdone() for details. 3776 */ 3777static void 3778arc_write_physdone(zio_t *zio) 3779{ 3780 arc_write_callback_t *cb = zio->io_private; 3781 if (cb->awcb_physdone != NULL) 3782 cb->awcb_physdone(zio, cb->awcb_buf, cb->awcb_private); 3783} 3784 3785static void 3786arc_write_done(zio_t *zio) 3787{ 3788 arc_write_callback_t *callback = zio->io_private; 3789 arc_buf_t *buf = callback->awcb_buf; 3790 arc_buf_hdr_t *hdr = buf->b_hdr; 3791 3792 ASSERT(hdr->b_acb == NULL); 3793 3794 if (zio->io_error == 0) { 3795 if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) { 3796 buf_discard_identity(hdr); 3797 } else { 3798 hdr->b_dva = *BP_IDENTITY(zio->io_bp); 3799 hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp); 3800 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 3801 } 3802 } else { 3803 ASSERT(BUF_EMPTY(hdr)); 3804 } 3805 3806 /* 3807 * If the block to be written was all-zero or compressed enough to be 3808 * embedded in the BP, no write was performed so there will be no 3809 * dva/birth/checksum. The buffer must therefore remain anonymous 3810 * (and uncached). 3811 */ 3812 if (!BUF_EMPTY(hdr)) { 3813 arc_buf_hdr_t *exists; 3814 kmutex_t *hash_lock; 3815 3816 ASSERT(zio->io_error == 0); 3817 3818 arc_cksum_verify(buf); 3819 3820 exists = buf_hash_insert(hdr, &hash_lock); 3821 if (exists) { 3822 /* 3823 * This can only happen if we overwrite for 3824 * sync-to-convergence, because we remove 3825 * buffers from the hash table when we arc_free(). 3826 */ 3827 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { 3828 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) 3829 panic("bad overwrite, hdr=%p exists=%p", 3830 (void *)hdr, (void *)exists); 3831 ASSERT(refcount_is_zero(&exists->b_refcnt)); 3832 arc_change_state(arc_anon, exists, hash_lock); 3833 mutex_exit(hash_lock); 3834 arc_hdr_destroy(exists); 3835 exists = buf_hash_insert(hdr, &hash_lock); 3836 ASSERT3P(exists, ==, NULL); 3837 } else if (zio->io_flags & ZIO_FLAG_NOPWRITE) { 3838 /* nopwrite */ 3839 ASSERT(zio->io_prop.zp_nopwrite); 3840 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) 3841 panic("bad nopwrite, hdr=%p exists=%p", 3842 (void *)hdr, (void *)exists); 3843 } else { 3844 /* Dedup */ 3845 ASSERT(hdr->b_datacnt == 1); 3846 ASSERT(hdr->b_state == arc_anon); 3847 ASSERT(BP_GET_DEDUP(zio->io_bp)); 3848 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 3849 } 3850 } 3851 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3852 /* if it's not anon, we are doing a scrub */ 3853 if (!exists && hdr->b_state == arc_anon) 3854 arc_access(hdr, hash_lock); 3855 mutex_exit(hash_lock); 3856 } else { 3857 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3858 } 3859 3860 ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 3861 callback->awcb_done(zio, buf, callback->awcb_private); 3862 3863 kmem_free(callback, sizeof (arc_write_callback_t)); 3864} 3865 3866zio_t * 3867arc_write(zio_t *pio, spa_t *spa, uint64_t txg, 3868 blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, boolean_t l2arc_compress, 3869 const zio_prop_t *zp, arc_done_func_t *ready, arc_done_func_t *physdone, 3870 arc_done_func_t *done, void *private, zio_priority_t priority, 3871 int zio_flags, const zbookmark_t *zb) 3872{ 3873 arc_buf_hdr_t *hdr = buf->b_hdr; 3874 arc_write_callback_t *callback; 3875 zio_t *zio; 3876 3877 ASSERT(ready != NULL); 3878 ASSERT(done != NULL); 3879 ASSERT(!HDR_IO_ERROR(hdr)); 3880 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 3881 ASSERT(hdr->b_acb == NULL); 3882 if (l2arc) 3883 hdr->b_flags |= ARC_L2CACHE; 3884 if (l2arc_compress) 3885 hdr->b_flags |= ARC_L2COMPRESS; 3886 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 3887 callback->awcb_ready = ready; 3888 callback->awcb_physdone = physdone; 3889 callback->awcb_done = done; 3890 callback->awcb_private = private; 3891 callback->awcb_buf = buf; 3892 3893 zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp, 3894 arc_write_ready, arc_write_physdone, arc_write_done, callback, 3895 priority, zio_flags, zb); 3896 3897 return (zio); 3898} 3899 3900static int 3901arc_memory_throttle(uint64_t reserve, uint64_t txg) 3902{ 3903#ifdef _KERNEL 3904 uint64_t available_memory = 3905 ptoa((uintmax_t)vm_cnt.v_free_count + vm_cnt.v_cache_count); 3906 static uint64_t page_load = 0; 3907 static uint64_t last_txg = 0; 3908 3909#ifdef sun 3910#if defined(__i386) 3911 available_memory = 3912 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); 3913#endif 3914#endif /* sun */ 3915 3916 if (vm_cnt.v_free_count + vm_cnt.v_cache_count > 3917 (uint64_t)physmem * arc_lotsfree_percent / 100) 3918 return (0); 3919 3920 if (txg > last_txg) { 3921 last_txg = txg; 3922 page_load = 0; 3923 } 3924 /* 3925 * If we are in pageout, we know that memory is already tight, 3926 * the arc is already going to be evicting, so we just want to 3927 * continue to let page writes occur as quickly as possible. 3928 */ 3929 if (curproc == pageproc) { 3930 if (page_load > available_memory / 4) 3931 return (SET_ERROR(ERESTART)); 3932 /* Note: reserve is inflated, so we deflate */ 3933 page_load += reserve / 8; 3934 return (0); 3935 } else if (page_load > 0 && arc_reclaim_needed()) { 3936 /* memory is low, delay before restarting */ 3937 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3938 return (SET_ERROR(EAGAIN)); 3939 } 3940 page_load = 0; 3941#endif 3942 return (0); 3943} 3944 3945void 3946arc_tempreserve_clear(uint64_t reserve) 3947{ 3948 atomic_add_64(&arc_tempreserve, -reserve); 3949 ASSERT((int64_t)arc_tempreserve >= 0); 3950} 3951 3952int 3953arc_tempreserve_space(uint64_t reserve, uint64_t txg) 3954{ 3955 int error; 3956 uint64_t anon_size; 3957 3958 if (reserve > arc_c/4 && !arc_no_grow) 3959 arc_c = MIN(arc_c_max, reserve * 4); 3960 if (reserve > arc_c) 3961 return (SET_ERROR(ENOMEM)); 3962 3963 /* 3964 * Don't count loaned bufs as in flight dirty data to prevent long 3965 * network delays from blocking transactions that are ready to be 3966 * assigned to a txg. 3967 */ 3968 anon_size = MAX((int64_t)(arc_anon->arcs_size - arc_loaned_bytes), 0); 3969 3970 /* 3971 * Writes will, almost always, require additional memory allocations 3972 * in order to compress/encrypt/etc the data. We therefore need to 3973 * make sure that there is sufficient available memory for this. 3974 */ 3975 error = arc_memory_throttle(reserve, txg); 3976 if (error != 0) 3977 return (error); 3978 3979 /* 3980 * Throttle writes when the amount of dirty data in the cache 3981 * gets too large. We try to keep the cache less than half full 3982 * of dirty blocks so that our sync times don't grow too large. 3983 * Note: if two requests come in concurrently, we might let them 3984 * both succeed, when one of them should fail. Not a huge deal. 3985 */ 3986 3987 if (reserve + arc_tempreserve + anon_size > arc_c / 2 && 3988 anon_size > arc_c / 4) { 3989 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " 3990 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", 3991 arc_tempreserve>>10, 3992 arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10, 3993 arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10, 3994 reserve>>10, arc_c>>10); 3995 return (SET_ERROR(ERESTART)); 3996 } 3997 atomic_add_64(&arc_tempreserve, reserve); 3998 return (0); 3999} 4000 4001static kmutex_t arc_lowmem_lock; 4002#ifdef _KERNEL 4003static eventhandler_tag arc_event_lowmem = NULL; 4004 4005static void 4006arc_lowmem(void *arg __unused, int howto __unused) 4007{ 4008 4009 /* Serialize access via arc_lowmem_lock. */ 4010 mutex_enter(&arc_lowmem_lock); 4011 mutex_enter(&arc_reclaim_thr_lock); 4012 needfree = 1; 4013 cv_signal(&arc_reclaim_thr_cv); 4014 4015 /* 4016 * It is unsafe to block here in arbitrary threads, because we can come 4017 * here from ARC itself and may hold ARC locks and thus risk a deadlock 4018 * with ARC reclaim thread. 4019 */ 4020 if (curproc == pageproc) { 4021 while (needfree) 4022 msleep(&needfree, &arc_reclaim_thr_lock, 0, "zfs:lowmem", 0); 4023 } 4024 mutex_exit(&arc_reclaim_thr_lock); 4025 mutex_exit(&arc_lowmem_lock); 4026} 4027#endif 4028 4029void 4030arc_init(void) 4031{ 4032 int i, prefetch_tunable_set = 0; 4033 4034 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 4035 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 4036 mutex_init(&arc_lowmem_lock, NULL, MUTEX_DEFAULT, NULL); 4037 4038 /* Convert seconds to clock ticks */ 4039 arc_min_prefetch_lifespan = 1 * hz; 4040 4041 /* Start out with 1/8 of all memory */ 4042 arc_c = kmem_size() / 8; 4043 4044#ifdef sun 4045#ifdef _KERNEL 4046 /* 4047 * On architectures where the physical memory can be larger 4048 * than the addressable space (intel in 32-bit mode), we may 4049 * need to limit the cache to 1/8 of VM size. 4050 */ 4051 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 4052#endif 4053#endif /* sun */ 4054 /* set min cache to 1/32 of all memory, or 16MB, whichever is more */ 4055 arc_c_min = MAX(arc_c / 4, 64<<18); 4056 /* set max to 1/2 of all memory, or all but 1GB, whichever is more */ 4057 if (arc_c * 8 >= 1<<30) 4058 arc_c_max = (arc_c * 8) - (1<<30); 4059 else 4060 arc_c_max = arc_c_min; 4061 arc_c_max = MAX(arc_c * 5, arc_c_max); 4062 4063#ifdef _KERNEL 4064 /* 4065 * Allow the tunables to override our calculations if they are 4066 * reasonable (ie. over 16MB) 4067 */ 4068 if (zfs_arc_max > 64<<18 && zfs_arc_max < kmem_size()) 4069 arc_c_max = zfs_arc_max; 4070 if (zfs_arc_min > 64<<18 && zfs_arc_min <= arc_c_max) 4071 arc_c_min = zfs_arc_min; 4072#endif 4073 4074 arc_c = arc_c_max; 4075 arc_p = (arc_c >> 1); 4076 4077 /* limit meta-data to 1/4 of the arc capacity */ 4078 arc_meta_limit = arc_c_max / 4; 4079 4080 /* Allow the tunable to override if it is reasonable */ 4081 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) 4082 arc_meta_limit = zfs_arc_meta_limit; 4083 4084 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) 4085 arc_c_min = arc_meta_limit / 2; 4086 4087 if (zfs_arc_grow_retry > 0) 4088 arc_grow_retry = zfs_arc_grow_retry; 4089 4090 if (zfs_arc_shrink_shift > 0) 4091 arc_shrink_shift = zfs_arc_shrink_shift; 4092 4093 if (zfs_arc_p_min_shift > 0) 4094 arc_p_min_shift = zfs_arc_p_min_shift; 4095 4096 /* if kmem_flags are set, lets try to use less memory */ 4097 if (kmem_debugging()) 4098 arc_c = arc_c / 2; 4099 if (arc_c < arc_c_min) 4100 arc_c = arc_c_min; 4101 4102 zfs_arc_min = arc_c_min; 4103 zfs_arc_max = arc_c_max; 4104 4105 arc_anon = &ARC_anon; 4106 arc_mru = &ARC_mru; 4107 arc_mru_ghost = &ARC_mru_ghost; 4108 arc_mfu = &ARC_mfu; 4109 arc_mfu_ghost = &ARC_mfu_ghost; 4110 arc_l2c_only = &ARC_l2c_only; 4111 arc_size = 0; 4112 4113 for (i = 0; i < ARC_BUFC_NUMLISTS; i++) { 4114 mutex_init(&arc_anon->arcs_locks[i].arcs_lock, 4115 NULL, MUTEX_DEFAULT, NULL); 4116 mutex_init(&arc_mru->arcs_locks[i].arcs_lock, 4117 NULL, MUTEX_DEFAULT, NULL); 4118 mutex_init(&arc_mru_ghost->arcs_locks[i].arcs_lock, 4119 NULL, MUTEX_DEFAULT, NULL); 4120 mutex_init(&arc_mfu->arcs_locks[i].arcs_lock, 4121 NULL, MUTEX_DEFAULT, NULL); 4122 mutex_init(&arc_mfu_ghost->arcs_locks[i].arcs_lock, 4123 NULL, MUTEX_DEFAULT, NULL); 4124 mutex_init(&arc_l2c_only->arcs_locks[i].arcs_lock, 4125 NULL, MUTEX_DEFAULT, NULL); 4126 4127 list_create(&arc_mru->arcs_lists[i], 4128 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 4129 list_create(&arc_mru_ghost->arcs_lists[i], 4130 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 4131 list_create(&arc_mfu->arcs_lists[i], 4132 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 4133 list_create(&arc_mfu_ghost->arcs_lists[i], 4134 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 4135 list_create(&arc_mfu_ghost->arcs_lists[i], 4136 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 4137 list_create(&arc_l2c_only->arcs_lists[i], 4138 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 4139 } 4140 4141 buf_init(); 4142 4143 arc_thread_exit = 0; 4144 arc_eviction_list = NULL; 4145 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 4146 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 4147 4148 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 4149 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 4150 4151 if (arc_ksp != NULL) { 4152 arc_ksp->ks_data = &arc_stats; 4153 kstat_install(arc_ksp); 4154 } 4155 4156 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 4157 TS_RUN, minclsyspri); 4158 4159#ifdef _KERNEL 4160 arc_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem, arc_lowmem, NULL, 4161 EVENTHANDLER_PRI_FIRST); 4162#endif 4163 4164 arc_dead = FALSE; 4165 arc_warm = B_FALSE; 4166 4167 /* 4168 * Calculate maximum amount of dirty data per pool. 4169 * 4170 * If it has been set by /etc/system, take that. 4171 * Otherwise, use a percentage of physical memory defined by 4172 * zfs_dirty_data_max_percent (default 10%) with a cap at 4173 * zfs_dirty_data_max_max (default 4GB). 4174 */ 4175 if (zfs_dirty_data_max == 0) { 4176 zfs_dirty_data_max = ptob(physmem) * 4177 zfs_dirty_data_max_percent / 100; 4178 zfs_dirty_data_max = MIN(zfs_dirty_data_max, 4179 zfs_dirty_data_max_max); 4180 } 4181 4182#ifdef _KERNEL 4183 if (TUNABLE_INT_FETCH("vfs.zfs.prefetch_disable", &zfs_prefetch_disable)) 4184 prefetch_tunable_set = 1; 4185 4186#ifdef __i386__ 4187 if (prefetch_tunable_set == 0) { 4188 printf("ZFS NOTICE: Prefetch is disabled by default on i386 " 4189 "-- to enable,\n"); 4190 printf(" add \"vfs.zfs.prefetch_disable=0\" " 4191 "to /boot/loader.conf.\n"); 4192 zfs_prefetch_disable = 1; 4193 } 4194#else 4195 if ((((uint64_t)physmem * PAGESIZE) < (1ULL << 32)) && 4196 prefetch_tunable_set == 0) { 4197 printf("ZFS NOTICE: Prefetch is disabled by default if less " 4198 "than 4GB of RAM is present;\n" 4199 " to enable, add \"vfs.zfs.prefetch_disable=0\" " 4200 "to /boot/loader.conf.\n"); 4201 zfs_prefetch_disable = 1; 4202 } 4203#endif 4204 /* Warn about ZFS memory and address space requirements. */ 4205 if (((uint64_t)physmem * PAGESIZE) < (256 + 128 + 64) * (1 << 20)) { 4206 printf("ZFS WARNING: Recommended minimum RAM size is 512MB; " 4207 "expect unstable behavior.\n"); 4208 } 4209 if (kmem_size() < 512 * (1 << 20)) { 4210 printf("ZFS WARNING: Recommended minimum kmem_size is 512MB; " 4211 "expect unstable behavior.\n"); 4212 printf(" Consider tuning vm.kmem_size and " 4213 "vm.kmem_size_max\n"); 4214 printf(" in /boot/loader.conf.\n"); 4215 } 4216#endif 4217} 4218 4219void 4220arc_fini(void) 4221{ 4222 int i; 4223 4224 mutex_enter(&arc_reclaim_thr_lock); 4225 arc_thread_exit = 1; 4226 cv_signal(&arc_reclaim_thr_cv); 4227 while (arc_thread_exit != 0) 4228 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 4229 mutex_exit(&arc_reclaim_thr_lock); 4230 4231 arc_flush(NULL); 4232 4233 arc_dead = TRUE; 4234 4235 if (arc_ksp != NULL) { 4236 kstat_delete(arc_ksp); 4237 arc_ksp = NULL; 4238 } 4239 4240 mutex_destroy(&arc_eviction_mtx); 4241 mutex_destroy(&arc_reclaim_thr_lock); 4242 cv_destroy(&arc_reclaim_thr_cv); 4243 4244 for (i = 0; i < ARC_BUFC_NUMLISTS; i++) { 4245 list_destroy(&arc_mru->arcs_lists[i]); 4246 list_destroy(&arc_mru_ghost->arcs_lists[i]); 4247 list_destroy(&arc_mfu->arcs_lists[i]); 4248 list_destroy(&arc_mfu_ghost->arcs_lists[i]); 4249 list_destroy(&arc_l2c_only->arcs_lists[i]); 4250 4251 mutex_destroy(&arc_anon->arcs_locks[i].arcs_lock); 4252 mutex_destroy(&arc_mru->arcs_locks[i].arcs_lock); 4253 mutex_destroy(&arc_mru_ghost->arcs_locks[i].arcs_lock); 4254 mutex_destroy(&arc_mfu->arcs_locks[i].arcs_lock); 4255 mutex_destroy(&arc_mfu_ghost->arcs_locks[i].arcs_lock); 4256 mutex_destroy(&arc_l2c_only->arcs_locks[i].arcs_lock); 4257 } 4258 4259 buf_fini(); 4260 4261 ASSERT(arc_loaned_bytes == 0); 4262 4263 mutex_destroy(&arc_lowmem_lock); 4264#ifdef _KERNEL 4265 if (arc_event_lowmem != NULL) 4266 EVENTHANDLER_DEREGISTER(vm_lowmem, arc_event_lowmem); 4267#endif 4268} 4269 4270/* 4271 * Level 2 ARC 4272 * 4273 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. 4274 * It uses dedicated storage devices to hold cached data, which are populated 4275 * using large infrequent writes. The main role of this cache is to boost 4276 * the performance of random read workloads. The intended L2ARC devices 4277 * include short-stroked disks, solid state disks, and other media with 4278 * substantially faster read latency than disk. 4279 * 4280 * +-----------------------+ 4281 * | ARC | 4282 * +-----------------------+ 4283 * | ^ ^ 4284 * | | | 4285 * l2arc_feed_thread() arc_read() 4286 * | | | 4287 * | l2arc read | 4288 * V | | 4289 * +---------------+ | 4290 * | L2ARC | | 4291 * +---------------+ | 4292 * | ^ | 4293 * l2arc_write() | | 4294 * | | | 4295 * V | | 4296 * +-------+ +-------+ 4297 * | vdev | | vdev | 4298 * | cache | | cache | 4299 * +-------+ +-------+ 4300 * +=========+ .-----. 4301 * : L2ARC : |-_____-| 4302 * : devices : | Disks | 4303 * +=========+ `-_____-' 4304 * 4305 * Read requests are satisfied from the following sources, in order: 4306 * 4307 * 1) ARC 4308 * 2) vdev cache of L2ARC devices 4309 * 3) L2ARC devices 4310 * 4) vdev cache of disks 4311 * 5) disks 4312 * 4313 * Some L2ARC device types exhibit extremely slow write performance. 4314 * To accommodate for this there are some significant differences between 4315 * the L2ARC and traditional cache design: 4316 * 4317 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from 4318 * the ARC behave as usual, freeing buffers and placing headers on ghost 4319 * lists. The ARC does not send buffers to the L2ARC during eviction as 4320 * this would add inflated write latencies for all ARC memory pressure. 4321 * 4322 * 2. The L2ARC attempts to cache data from the ARC before it is evicted. 4323 * It does this by periodically scanning buffers from the eviction-end of 4324 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are 4325 * not already there. It scans until a headroom of buffers is satisfied, 4326 * which itself is a buffer for ARC eviction. If a compressible buffer is 4327 * found during scanning and selected for writing to an L2ARC device, we 4328 * temporarily boost scanning headroom during the next scan cycle to make 4329 * sure we adapt to compression effects (which might significantly reduce 4330 * the data volume we write to L2ARC). The thread that does this is 4331 * l2arc_feed_thread(), illustrated below; example sizes are included to 4332 * provide a better sense of ratio than this diagram: 4333 * 4334 * head --> tail 4335 * +---------------------+----------+ 4336 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC 4337 * +---------------------+----------+ | o L2ARC eligible 4338 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer 4339 * +---------------------+----------+ | 4340 * 15.9 Gbytes ^ 32 Mbytes | 4341 * headroom | 4342 * l2arc_feed_thread() 4343 * | 4344 * l2arc write hand <--[oooo]--' 4345 * | 8 Mbyte 4346 * | write max 4347 * V 4348 * +==============================+ 4349 * L2ARC dev |####|#|###|###| |####| ... | 4350 * +==============================+ 4351 * 32 Gbytes 4352 * 4353 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of 4354 * evicted, then the L2ARC has cached a buffer much sooner than it probably 4355 * needed to, potentially wasting L2ARC device bandwidth and storage. It is 4356 * safe to say that this is an uncommon case, since buffers at the end of 4357 * the ARC lists have moved there due to inactivity. 4358 * 4359 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, 4360 * then the L2ARC simply misses copying some buffers. This serves as a 4361 * pressure valve to prevent heavy read workloads from both stalling the ARC 4362 * with waits and clogging the L2ARC with writes. This also helps prevent 4363 * the potential for the L2ARC to churn if it attempts to cache content too 4364 * quickly, such as during backups of the entire pool. 4365 * 4366 * 5. After system boot and before the ARC has filled main memory, there are 4367 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru 4368 * lists can remain mostly static. Instead of searching from tail of these 4369 * lists as pictured, the l2arc_feed_thread() will search from the list heads 4370 * for eligible buffers, greatly increasing its chance of finding them. 4371 * 4372 * The L2ARC device write speed is also boosted during this time so that 4373 * the L2ARC warms up faster. Since there have been no ARC evictions yet, 4374 * there are no L2ARC reads, and no fear of degrading read performance 4375 * through increased writes. 4376 * 4377 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that 4378 * the vdev queue can aggregate them into larger and fewer writes. Each 4379 * device is written to in a rotor fashion, sweeping writes through 4380 * available space then repeating. 4381 * 4382 * 7. The L2ARC does not store dirty content. It never needs to flush 4383 * write buffers back to disk based storage. 4384 * 4385 * 8. If an ARC buffer is written (and dirtied) which also exists in the 4386 * L2ARC, the now stale L2ARC buffer is immediately dropped. 4387 * 4388 * The performance of the L2ARC can be tweaked by a number of tunables, which 4389 * may be necessary for different workloads: 4390 * 4391 * l2arc_write_max max write bytes per interval 4392 * l2arc_write_boost extra write bytes during device warmup 4393 * l2arc_noprefetch skip caching prefetched buffers 4394 * l2arc_headroom number of max device writes to precache 4395 * l2arc_headroom_boost when we find compressed buffers during ARC 4396 * scanning, we multiply headroom by this 4397 * percentage factor for the next scan cycle, 4398 * since more compressed buffers are likely to 4399 * be present 4400 * l2arc_feed_secs seconds between L2ARC writing 4401 * 4402 * Tunables may be removed or added as future performance improvements are 4403 * integrated, and also may become zpool properties. 4404 * 4405 * There are three key functions that control how the L2ARC warms up: 4406 * 4407 * l2arc_write_eligible() check if a buffer is eligible to cache 4408 * l2arc_write_size() calculate how much to write 4409 * l2arc_write_interval() calculate sleep delay between writes 4410 * 4411 * These three functions determine what to write, how much, and how quickly 4412 * to send writes. 4413 */ 4414 4415static boolean_t 4416l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab) 4417{ 4418 /* 4419 * A buffer is *not* eligible for the L2ARC if it: 4420 * 1. belongs to a different spa. 4421 * 2. is already cached on the L2ARC. 4422 * 3. has an I/O in progress (it may be an incomplete read). 4423 * 4. is flagged not eligible (zfs property). 4424 */ 4425 if (ab->b_spa != spa_guid) { 4426 ARCSTAT_BUMP(arcstat_l2_write_spa_mismatch); 4427 return (B_FALSE); 4428 } 4429 if (ab->b_l2hdr != NULL) { 4430 ARCSTAT_BUMP(arcstat_l2_write_in_l2); 4431 return (B_FALSE); 4432 } 4433 if (HDR_IO_IN_PROGRESS(ab)) { 4434 ARCSTAT_BUMP(arcstat_l2_write_hdr_io_in_progress); 4435 return (B_FALSE); 4436 } 4437 if (!HDR_L2CACHE(ab)) { 4438 ARCSTAT_BUMP(arcstat_l2_write_not_cacheable); 4439 return (B_FALSE); 4440 } 4441 4442 return (B_TRUE); 4443} 4444 4445static uint64_t 4446l2arc_write_size(void) 4447{ 4448 uint64_t size; 4449 4450 /* 4451 * Make sure our globals have meaningful values in case the user 4452 * altered them. 4453 */ 4454 size = l2arc_write_max; 4455 if (size == 0) { 4456 cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must " 4457 "be greater than zero, resetting it to the default (%d)", 4458 L2ARC_WRITE_SIZE); 4459 size = l2arc_write_max = L2ARC_WRITE_SIZE; 4460 } 4461 4462 if (arc_warm == B_FALSE) 4463 size += l2arc_write_boost; 4464 4465 return (size); 4466 4467} 4468 4469static clock_t 4470l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote) 4471{ 4472 clock_t interval, next, now; 4473 4474 /* 4475 * If the ARC lists are busy, increase our write rate; if the 4476 * lists are stale, idle back. This is achieved by checking 4477 * how much we previously wrote - if it was more than half of 4478 * what we wanted, schedule the next write much sooner. 4479 */ 4480 if (l2arc_feed_again && wrote > (wanted / 2)) 4481 interval = (hz * l2arc_feed_min_ms) / 1000; 4482 else 4483 interval = hz * l2arc_feed_secs; 4484 4485 now = ddi_get_lbolt(); 4486 next = MAX(now, MIN(now + interval, began + interval)); 4487 4488 return (next); 4489} 4490 4491static void 4492l2arc_hdr_stat_add(void) 4493{ 4494 ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE); 4495 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); 4496} 4497 4498static void 4499l2arc_hdr_stat_remove(void) 4500{ 4501 ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE)); 4502 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); 4503} 4504 4505/* 4506 * Cycle through L2ARC devices. This is how L2ARC load balances. 4507 * If a device is returned, this also returns holding the spa config lock. 4508 */ 4509static l2arc_dev_t * 4510l2arc_dev_get_next(void) 4511{ 4512 l2arc_dev_t *first, *next = NULL; 4513 4514 /* 4515 * Lock out the removal of spas (spa_namespace_lock), then removal 4516 * of cache devices (l2arc_dev_mtx). Once a device has been selected, 4517 * both locks will be dropped and a spa config lock held instead. 4518 */ 4519 mutex_enter(&spa_namespace_lock); 4520 mutex_enter(&l2arc_dev_mtx); 4521 4522 /* if there are no vdevs, there is nothing to do */ 4523 if (l2arc_ndev == 0) 4524 goto out; 4525 4526 first = NULL; 4527 next = l2arc_dev_last; 4528 do { 4529 /* loop around the list looking for a non-faulted vdev */ 4530 if (next == NULL) { 4531 next = list_head(l2arc_dev_list); 4532 } else { 4533 next = list_next(l2arc_dev_list, next); 4534 if (next == NULL) 4535 next = list_head(l2arc_dev_list); 4536 } 4537 4538 /* if we have come back to the start, bail out */ 4539 if (first == NULL) 4540 first = next; 4541 else if (next == first) 4542 break; 4543 4544 } while (vdev_is_dead(next->l2ad_vdev)); 4545 4546 /* if we were unable to find any usable vdevs, return NULL */ 4547 if (vdev_is_dead(next->l2ad_vdev)) 4548 next = NULL; 4549 4550 l2arc_dev_last = next; 4551 4552out: 4553 mutex_exit(&l2arc_dev_mtx); 4554 4555 /* 4556 * Grab the config lock to prevent the 'next' device from being 4557 * removed while we are writing to it. 4558 */ 4559 if (next != NULL) 4560 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER); 4561 mutex_exit(&spa_namespace_lock); 4562 4563 return (next); 4564} 4565 4566/* 4567 * Free buffers that were tagged for destruction. 4568 */ 4569static void 4570l2arc_do_free_on_write() 4571{ 4572 list_t *buflist; 4573 l2arc_data_free_t *df, *df_prev; 4574 4575 mutex_enter(&l2arc_free_on_write_mtx); 4576 buflist = l2arc_free_on_write; 4577 4578 for (df = list_tail(buflist); df; df = df_prev) { 4579 df_prev = list_prev(buflist, df); 4580 ASSERT(df->l2df_data != NULL); 4581 ASSERT(df->l2df_func != NULL); 4582 df->l2df_func(df->l2df_data, df->l2df_size); 4583 list_remove(buflist, df); 4584 kmem_free(df, sizeof (l2arc_data_free_t)); 4585 } 4586 4587 mutex_exit(&l2arc_free_on_write_mtx); 4588} 4589 4590/* 4591 * A write to a cache device has completed. Update all headers to allow 4592 * reads from these buffers to begin. 4593 */ 4594static void 4595l2arc_write_done(zio_t *zio) 4596{ 4597 l2arc_write_callback_t *cb; 4598 l2arc_dev_t *dev; 4599 list_t *buflist; 4600 arc_buf_hdr_t *head, *ab, *ab_prev; 4601 l2arc_buf_hdr_t *abl2; 4602 kmutex_t *hash_lock; 4603 4604 cb = zio->io_private; 4605 ASSERT(cb != NULL); 4606 dev = cb->l2wcb_dev; 4607 ASSERT(dev != NULL); 4608 head = cb->l2wcb_head; 4609 ASSERT(head != NULL); 4610 buflist = dev->l2ad_buflist; 4611 ASSERT(buflist != NULL); 4612 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, 4613 l2arc_write_callback_t *, cb); 4614 4615 if (zio->io_error != 0) 4616 ARCSTAT_BUMP(arcstat_l2_writes_error); 4617 4618 mutex_enter(&l2arc_buflist_mtx); 4619 4620 /* 4621 * All writes completed, or an error was hit. 4622 */ 4623 for (ab = list_prev(buflist, head); ab; ab = ab_prev) { 4624 ab_prev = list_prev(buflist, ab); 4625 abl2 = ab->b_l2hdr; 4626 4627 /* 4628 * Release the temporary compressed buffer as soon as possible. 4629 */ 4630 if (abl2->b_compress != ZIO_COMPRESS_OFF) 4631 l2arc_release_cdata_buf(ab); 4632 4633 hash_lock = HDR_LOCK(ab); 4634 if (!mutex_tryenter(hash_lock)) { 4635 /* 4636 * This buffer misses out. It may be in a stage 4637 * of eviction. Its ARC_L2_WRITING flag will be 4638 * left set, denying reads to this buffer. 4639 */ 4640 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss); 4641 continue; 4642 } 4643 4644 if (zio->io_error != 0) { 4645 /* 4646 * Error - drop L2ARC entry. 4647 */ 4648 list_remove(buflist, ab); 4649 ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize); 4650 ab->b_l2hdr = NULL; 4651 trim_map_free(abl2->b_dev->l2ad_vdev, abl2->b_daddr, 4652 ab->b_size, 0); 4653 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 4654 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 4655 } 4656 4657 /* 4658 * Allow ARC to begin reads to this L2ARC entry. 4659 */ 4660 ab->b_flags &= ~ARC_L2_WRITING; 4661 4662 mutex_exit(hash_lock); 4663 } 4664 4665 atomic_inc_64(&l2arc_writes_done); 4666 list_remove(buflist, head); 4667 kmem_cache_free(hdr_cache, head); 4668 mutex_exit(&l2arc_buflist_mtx); 4669 4670 l2arc_do_free_on_write(); 4671 4672 kmem_free(cb, sizeof (l2arc_write_callback_t)); 4673} 4674 4675/* 4676 * A read to a cache device completed. Validate buffer contents before 4677 * handing over to the regular ARC routines. 4678 */ 4679static void 4680l2arc_read_done(zio_t *zio) 4681{ 4682 l2arc_read_callback_t *cb; 4683 arc_buf_hdr_t *hdr; 4684 arc_buf_t *buf; 4685 kmutex_t *hash_lock; 4686 int equal; 4687 4688 ASSERT(zio->io_vd != NULL); 4689 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE); 4690 4691 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd); 4692 4693 cb = zio->io_private; 4694 ASSERT(cb != NULL); 4695 buf = cb->l2rcb_buf; 4696 ASSERT(buf != NULL); 4697 4698 hash_lock = HDR_LOCK(buf->b_hdr); 4699 mutex_enter(hash_lock); 4700 hdr = buf->b_hdr; 4701 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 4702 4703 /* 4704 * If the buffer was compressed, decompress it first. 4705 */ 4706 if (cb->l2rcb_compress != ZIO_COMPRESS_OFF) 4707 l2arc_decompress_zio(zio, hdr, cb->l2rcb_compress); 4708 ASSERT(zio->io_data != NULL); 4709 4710 /* 4711 * Check this survived the L2ARC journey. 4712 */ 4713 equal = arc_cksum_equal(buf); 4714 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { 4715 mutex_exit(hash_lock); 4716 zio->io_private = buf; 4717 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */ 4718 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */ 4719 arc_read_done(zio); 4720 } else { 4721 mutex_exit(hash_lock); 4722 /* 4723 * Buffer didn't survive caching. Increment stats and 4724 * reissue to the original storage device. 4725 */ 4726 if (zio->io_error != 0) { 4727 ARCSTAT_BUMP(arcstat_l2_io_error); 4728 } else { 4729 zio->io_error = SET_ERROR(EIO); 4730 } 4731 if (!equal) 4732 ARCSTAT_BUMP(arcstat_l2_cksum_bad); 4733 4734 /* 4735 * If there's no waiter, issue an async i/o to the primary 4736 * storage now. If there *is* a waiter, the caller must 4737 * issue the i/o in a context where it's OK to block. 4738 */ 4739 if (zio->io_waiter == NULL) { 4740 zio_t *pio = zio_unique_parent(zio); 4741 4742 ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL); 4743 4744 zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp, 4745 buf->b_data, zio->io_size, arc_read_done, buf, 4746 zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb)); 4747 } 4748 } 4749 4750 kmem_free(cb, sizeof (l2arc_read_callback_t)); 4751} 4752 4753/* 4754 * This is the list priority from which the L2ARC will search for pages to 4755 * cache. This is used within loops (0..3) to cycle through lists in the 4756 * desired order. This order can have a significant effect on cache 4757 * performance. 4758 * 4759 * Currently the metadata lists are hit first, MFU then MRU, followed by 4760 * the data lists. This function returns a locked list, and also returns 4761 * the lock pointer. 4762 */ 4763static list_t * 4764l2arc_list_locked(int list_num, kmutex_t **lock) 4765{ 4766 list_t *list = NULL; 4767 int idx; 4768 4769 ASSERT(list_num >= 0 && list_num < 2 * ARC_BUFC_NUMLISTS); 4770 4771 if (list_num < ARC_BUFC_NUMMETADATALISTS) { 4772 idx = list_num; 4773 list = &arc_mfu->arcs_lists[idx]; 4774 *lock = ARCS_LOCK(arc_mfu, idx); 4775 } else if (list_num < ARC_BUFC_NUMMETADATALISTS * 2) { 4776 idx = list_num - ARC_BUFC_NUMMETADATALISTS; 4777 list = &arc_mru->arcs_lists[idx]; 4778 *lock = ARCS_LOCK(arc_mru, idx); 4779 } else if (list_num < (ARC_BUFC_NUMMETADATALISTS * 2 + 4780 ARC_BUFC_NUMDATALISTS)) { 4781 idx = list_num - ARC_BUFC_NUMMETADATALISTS; 4782 list = &arc_mfu->arcs_lists[idx]; 4783 *lock = ARCS_LOCK(arc_mfu, idx); 4784 } else { 4785 idx = list_num - ARC_BUFC_NUMLISTS; 4786 list = &arc_mru->arcs_lists[idx]; 4787 *lock = ARCS_LOCK(arc_mru, idx); 4788 } 4789 4790 ASSERT(!(MUTEX_HELD(*lock))); 4791 mutex_enter(*lock); 4792 return (list); 4793} 4794 4795/* 4796 * Evict buffers from the device write hand to the distance specified in 4797 * bytes. This distance may span populated buffers, it may span nothing. 4798 * This is clearing a region on the L2ARC device ready for writing. 4799 * If the 'all' boolean is set, every buffer is evicted. 4800 */ 4801static void 4802l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) 4803{ 4804 list_t *buflist; 4805 l2arc_buf_hdr_t *abl2; 4806 arc_buf_hdr_t *ab, *ab_prev; 4807 kmutex_t *hash_lock; 4808 uint64_t taddr; 4809 4810 buflist = dev->l2ad_buflist; 4811 4812 if (buflist == NULL) 4813 return; 4814 4815 if (!all && dev->l2ad_first) { 4816 /* 4817 * This is the first sweep through the device. There is 4818 * nothing to evict. 4819 */ 4820 return; 4821 } 4822 4823 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) { 4824 /* 4825 * When nearing the end of the device, evict to the end 4826 * before the device write hand jumps to the start. 4827 */ 4828 taddr = dev->l2ad_end; 4829 } else { 4830 taddr = dev->l2ad_hand + distance; 4831 } 4832 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, 4833 uint64_t, taddr, boolean_t, all); 4834 4835top: 4836 mutex_enter(&l2arc_buflist_mtx); 4837 for (ab = list_tail(buflist); ab; ab = ab_prev) { 4838 ab_prev = list_prev(buflist, ab); 4839 4840 hash_lock = HDR_LOCK(ab); 4841 if (!mutex_tryenter(hash_lock)) { 4842 /* 4843 * Missed the hash lock. Retry. 4844 */ 4845 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); 4846 mutex_exit(&l2arc_buflist_mtx); 4847 mutex_enter(hash_lock); 4848 mutex_exit(hash_lock); 4849 goto top; 4850 } 4851 4852 if (HDR_L2_WRITE_HEAD(ab)) { 4853 /* 4854 * We hit a write head node. Leave it for 4855 * l2arc_write_done(). 4856 */ 4857 list_remove(buflist, ab); 4858 mutex_exit(hash_lock); 4859 continue; 4860 } 4861 4862 if (!all && ab->b_l2hdr != NULL && 4863 (ab->b_l2hdr->b_daddr > taddr || 4864 ab->b_l2hdr->b_daddr < dev->l2ad_hand)) { 4865 /* 4866 * We've evicted to the target address, 4867 * or the end of the device. 4868 */ 4869 mutex_exit(hash_lock); 4870 break; 4871 } 4872 4873 if (HDR_FREE_IN_PROGRESS(ab)) { 4874 /* 4875 * Already on the path to destruction. 4876 */ 4877 mutex_exit(hash_lock); 4878 continue; 4879 } 4880 4881 if (ab->b_state == arc_l2c_only) { 4882 ASSERT(!HDR_L2_READING(ab)); 4883 /* 4884 * This doesn't exist in the ARC. Destroy. 4885 * arc_hdr_destroy() will call list_remove() 4886 * and decrement arcstat_l2_size. 4887 */ 4888 arc_change_state(arc_anon, ab, hash_lock); 4889 arc_hdr_destroy(ab); 4890 } else { 4891 /* 4892 * Invalidate issued or about to be issued 4893 * reads, since we may be about to write 4894 * over this location. 4895 */ 4896 if (HDR_L2_READING(ab)) { 4897 ARCSTAT_BUMP(arcstat_l2_evict_reading); 4898 ab->b_flags |= ARC_L2_EVICTED; 4899 } 4900 4901 /* 4902 * Tell ARC this no longer exists in L2ARC. 4903 */ 4904 if (ab->b_l2hdr != NULL) { 4905 abl2 = ab->b_l2hdr; 4906 ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize); 4907 ab->b_l2hdr = NULL; 4908 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 4909 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 4910 } 4911 list_remove(buflist, ab); 4912 4913 /* 4914 * This may have been leftover after a 4915 * failed write. 4916 */ 4917 ab->b_flags &= ~ARC_L2_WRITING; 4918 } 4919 mutex_exit(hash_lock); 4920 } 4921 mutex_exit(&l2arc_buflist_mtx); 4922 4923 vdev_space_update(dev->l2ad_vdev, -(taddr - dev->l2ad_evict), 0, 0); 4924 dev->l2ad_evict = taddr; 4925} 4926 4927/* 4928 * Find and write ARC buffers to the L2ARC device. 4929 * 4930 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid 4931 * for reading until they have completed writing. 4932 * The headroom_boost is an in-out parameter used to maintain headroom boost 4933 * state between calls to this function. 4934 * 4935 * Returns the number of bytes actually written (which may be smaller than 4936 * the delta by which the device hand has changed due to alignment). 4937 */ 4938static uint64_t 4939l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz, 4940 boolean_t *headroom_boost) 4941{ 4942 arc_buf_hdr_t *ab, *ab_prev, *head; 4943 list_t *list; 4944 uint64_t write_asize, write_psize, write_sz, headroom, 4945 buf_compress_minsz; 4946 void *buf_data; 4947 kmutex_t *list_lock; 4948 boolean_t full; 4949 l2arc_write_callback_t *cb; 4950 zio_t *pio, *wzio; 4951 uint64_t guid = spa_load_guid(spa); 4952 const boolean_t do_headroom_boost = *headroom_boost; 4953 int try; 4954 4955 ASSERT(dev->l2ad_vdev != NULL); 4956 4957 /* Lower the flag now, we might want to raise it again later. */ 4958 *headroom_boost = B_FALSE; 4959 4960 pio = NULL; 4961 write_sz = write_asize = write_psize = 0; 4962 full = B_FALSE; 4963 head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 4964 head->b_flags |= ARC_L2_WRITE_HEAD; 4965 4966 ARCSTAT_BUMP(arcstat_l2_write_buffer_iter); 4967 /* 4968 * We will want to try to compress buffers that are at least 2x the 4969 * device sector size. 4970 */ 4971 buf_compress_minsz = 2 << dev->l2ad_vdev->vdev_ashift; 4972 4973 /* 4974 * Copy buffers for L2ARC writing. 4975 */ 4976 mutex_enter(&l2arc_buflist_mtx); 4977 for (try = 0; try < 2 * ARC_BUFC_NUMLISTS; try++) { 4978 uint64_t passed_sz = 0; 4979 4980 list = l2arc_list_locked(try, &list_lock); 4981 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_iter); 4982 4983 /* 4984 * L2ARC fast warmup. 4985 * 4986 * Until the ARC is warm and starts to evict, read from the 4987 * head of the ARC lists rather than the tail. 4988 */ 4989 if (arc_warm == B_FALSE) 4990 ab = list_head(list); 4991 else 4992 ab = list_tail(list); 4993 if (ab == NULL) 4994 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_null_iter); 4995 4996 headroom = target_sz * l2arc_headroom; 4997 if (do_headroom_boost) 4998 headroom = (headroom * l2arc_headroom_boost) / 100; 4999 5000 for (; ab; ab = ab_prev) { 5001 l2arc_buf_hdr_t *l2hdr; 5002 kmutex_t *hash_lock; 5003 uint64_t buf_sz; 5004 5005 if (arc_warm == B_FALSE) 5006 ab_prev = list_next(list, ab); 5007 else 5008 ab_prev = list_prev(list, ab); 5009 ARCSTAT_INCR(arcstat_l2_write_buffer_bytes_scanned, ab->b_size); 5010 5011 hash_lock = HDR_LOCK(ab); 5012 if (!mutex_tryenter(hash_lock)) { 5013 ARCSTAT_BUMP(arcstat_l2_write_trylock_fail); 5014 /* 5015 * Skip this buffer rather than waiting. 5016 */ 5017 continue; 5018 } 5019 5020 passed_sz += ab->b_size; 5021 if (passed_sz > headroom) { 5022 /* 5023 * Searched too far. 5024 */ 5025 mutex_exit(hash_lock); 5026 ARCSTAT_BUMP(arcstat_l2_write_passed_headroom); 5027 break; 5028 } 5029 5030 if (!l2arc_write_eligible(guid, ab)) { 5031 mutex_exit(hash_lock); 5032 continue; 5033 } 5034 5035 if ((write_sz + ab->b_size) > target_sz) { 5036 full = B_TRUE; 5037 mutex_exit(hash_lock); 5038 ARCSTAT_BUMP(arcstat_l2_write_full); 5039 break; 5040 } 5041 5042 if (pio == NULL) { 5043 /* 5044 * Insert a dummy header on the buflist so 5045 * l2arc_write_done() can find where the 5046 * write buffers begin without searching. 5047 */ 5048 list_insert_head(dev->l2ad_buflist, head); 5049 5050 cb = kmem_alloc( 5051 sizeof (l2arc_write_callback_t), KM_SLEEP); 5052 cb->l2wcb_dev = dev; 5053 cb->l2wcb_head = head; 5054 pio = zio_root(spa, l2arc_write_done, cb, 5055 ZIO_FLAG_CANFAIL); 5056 ARCSTAT_BUMP(arcstat_l2_write_pios); 5057 } 5058 5059 /* 5060 * Create and add a new L2ARC header. 5061 */ 5062 l2hdr = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP); 5063 l2hdr->b_dev = dev; 5064 ab->b_flags |= ARC_L2_WRITING; 5065 5066 /* 5067 * Temporarily stash the data buffer in b_tmp_cdata. 5068 * The subsequent write step will pick it up from 5069 * there. This is because can't access ab->b_buf 5070 * without holding the hash_lock, which we in turn 5071 * can't access without holding the ARC list locks 5072 * (which we want to avoid during compression/writing). 5073 */ 5074 l2hdr->b_compress = ZIO_COMPRESS_OFF; 5075 l2hdr->b_asize = ab->b_size; 5076 l2hdr->b_tmp_cdata = ab->b_buf->b_data; 5077 5078 buf_sz = ab->b_size; 5079 ab->b_l2hdr = l2hdr; 5080 5081 list_insert_head(dev->l2ad_buflist, ab); 5082 5083 /* 5084 * Compute and store the buffer cksum before 5085 * writing. On debug the cksum is verified first. 5086 */ 5087 arc_cksum_verify(ab->b_buf); 5088 arc_cksum_compute(ab->b_buf, B_TRUE); 5089 5090 mutex_exit(hash_lock); 5091 5092 write_sz += buf_sz; 5093 } 5094 5095 mutex_exit(list_lock); 5096 5097 if (full == B_TRUE) 5098 break; 5099 } 5100 5101 /* No buffers selected for writing? */ 5102 if (pio == NULL) { 5103 ASSERT0(write_sz); 5104 mutex_exit(&l2arc_buflist_mtx); 5105 kmem_cache_free(hdr_cache, head); 5106 return (0); 5107 } 5108 5109 /* 5110 * Now start writing the buffers. We're starting at the write head 5111 * and work backwards, retracing the course of the buffer selector 5112 * loop above. 5113 */ 5114 for (ab = list_prev(dev->l2ad_buflist, head); ab; 5115 ab = list_prev(dev->l2ad_buflist, ab)) { 5116 l2arc_buf_hdr_t *l2hdr; 5117 uint64_t buf_sz; 5118 5119 /* 5120 * We shouldn't need to lock the buffer here, since we flagged 5121 * it as ARC_L2_WRITING in the previous step, but we must take 5122 * care to only access its L2 cache parameters. In particular, 5123 * ab->b_buf may be invalid by now due to ARC eviction. 5124 */ 5125 l2hdr = ab->b_l2hdr; 5126 l2hdr->b_daddr = dev->l2ad_hand; 5127 5128 if ((ab->b_flags & ARC_L2COMPRESS) && 5129 l2hdr->b_asize >= buf_compress_minsz) { 5130 if (l2arc_compress_buf(l2hdr)) { 5131 /* 5132 * If compression succeeded, enable headroom 5133 * boost on the next scan cycle. 5134 */ 5135 *headroom_boost = B_TRUE; 5136 } 5137 } 5138 5139 /* 5140 * Pick up the buffer data we had previously stashed away 5141 * (and now potentially also compressed). 5142 */ 5143 buf_data = l2hdr->b_tmp_cdata; 5144 buf_sz = l2hdr->b_asize; 5145 5146 /* Compression may have squashed the buffer to zero length. */ 5147 if (buf_sz != 0) { 5148 uint64_t buf_p_sz; 5149 5150 wzio = zio_write_phys(pio, dev->l2ad_vdev, 5151 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF, 5152 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, 5153 ZIO_FLAG_CANFAIL, B_FALSE); 5154 5155 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, 5156 zio_t *, wzio); 5157 (void) zio_nowait(wzio); 5158 5159 write_asize += buf_sz; 5160 /* 5161 * Keep the clock hand suitably device-aligned. 5162 */ 5163 buf_p_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz); 5164 write_psize += buf_p_sz; 5165 dev->l2ad_hand += buf_p_sz; 5166 } 5167 } 5168 5169 mutex_exit(&l2arc_buflist_mtx); 5170 5171 ASSERT3U(write_asize, <=, target_sz); 5172 ARCSTAT_BUMP(arcstat_l2_writes_sent); 5173 ARCSTAT_INCR(arcstat_l2_write_bytes, write_asize); 5174 ARCSTAT_INCR(arcstat_l2_size, write_sz); 5175 ARCSTAT_INCR(arcstat_l2_asize, write_asize); 5176 vdev_space_update(dev->l2ad_vdev, write_psize, 0, 0); 5177 5178 /* 5179 * Bump device hand to the device start if it is approaching the end. 5180 * l2arc_evict() will already have evicted ahead for this case. 5181 */ 5182 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) { 5183 vdev_space_update(dev->l2ad_vdev, 5184 dev->l2ad_end - dev->l2ad_hand, 0, 0); 5185 dev->l2ad_hand = dev->l2ad_start; 5186 dev->l2ad_evict = dev->l2ad_start; 5187 dev->l2ad_first = B_FALSE; 5188 } 5189 5190 dev->l2ad_writing = B_TRUE; 5191 (void) zio_wait(pio); 5192 dev->l2ad_writing = B_FALSE; 5193 5194 return (write_asize); 5195} 5196 5197/* 5198 * Compresses an L2ARC buffer. 5199 * The data to be compressed must be prefilled in l2hdr->b_tmp_cdata and its 5200 * size in l2hdr->b_asize. This routine tries to compress the data and 5201 * depending on the compression result there are three possible outcomes: 5202 * *) The buffer was incompressible. The original l2hdr contents were left 5203 * untouched and are ready for writing to an L2 device. 5204 * *) The buffer was all-zeros, so there is no need to write it to an L2 5205 * device. To indicate this situation b_tmp_cdata is NULL'ed, b_asize is 5206 * set to zero and b_compress is set to ZIO_COMPRESS_EMPTY. 5207 * *) Compression succeeded and b_tmp_cdata was replaced with a temporary 5208 * data buffer which holds the compressed data to be written, and b_asize 5209 * tells us how much data there is. b_compress is set to the appropriate 5210 * compression algorithm. Once writing is done, invoke 5211 * l2arc_release_cdata_buf on this l2hdr to free this temporary buffer. 5212 * 5213 * Returns B_TRUE if compression succeeded, or B_FALSE if it didn't (the 5214 * buffer was incompressible). 5215 */ 5216static boolean_t 5217l2arc_compress_buf(l2arc_buf_hdr_t *l2hdr) 5218{ 5219 void *cdata; 5220 size_t csize, len, rounded; 5221 5222 ASSERT(l2hdr->b_compress == ZIO_COMPRESS_OFF); 5223 ASSERT(l2hdr->b_tmp_cdata != NULL); 5224 5225 len = l2hdr->b_asize; 5226 cdata = zio_data_buf_alloc(len); 5227 csize = zio_compress_data(ZIO_COMPRESS_LZ4, l2hdr->b_tmp_cdata, 5228 cdata, l2hdr->b_asize, (size_t)(1ULL << l2hdr->b_dev->l2ad_vdev->vdev_ashift)); 5229 5230 rounded = P2ROUNDUP(csize, (size_t)SPA_MINBLOCKSIZE); 5231 if (rounded > csize) { 5232 bzero((char *)cdata + csize, rounded - csize); 5233 csize = rounded; 5234 } 5235 5236 if (csize == 0) { 5237 /* zero block, indicate that there's nothing to write */ 5238 zio_data_buf_free(cdata, len); 5239 l2hdr->b_compress = ZIO_COMPRESS_EMPTY; 5240 l2hdr->b_asize = 0; 5241 l2hdr->b_tmp_cdata = NULL; 5242 ARCSTAT_BUMP(arcstat_l2_compress_zeros); 5243 return (B_TRUE); 5244 } else if (csize > 0 && csize < len) { 5245 /* 5246 * Compression succeeded, we'll keep the cdata around for 5247 * writing and release it afterwards. 5248 */ 5249 l2hdr->b_compress = ZIO_COMPRESS_LZ4; 5250 l2hdr->b_asize = csize; 5251 l2hdr->b_tmp_cdata = cdata; 5252 ARCSTAT_BUMP(arcstat_l2_compress_successes); 5253 return (B_TRUE); 5254 } else { 5255 /* 5256 * Compression failed, release the compressed buffer. 5257 * l2hdr will be left unmodified. 5258 */ 5259 zio_data_buf_free(cdata, len); 5260 ARCSTAT_BUMP(arcstat_l2_compress_failures); 5261 return (B_FALSE); 5262 } 5263} 5264 5265/* 5266 * Decompresses a zio read back from an l2arc device. On success, the 5267 * underlying zio's io_data buffer is overwritten by the uncompressed 5268 * version. On decompression error (corrupt compressed stream), the 5269 * zio->io_error value is set to signal an I/O error. 5270 * 5271 * Please note that the compressed data stream is not checksummed, so 5272 * if the underlying device is experiencing data corruption, we may feed 5273 * corrupt data to the decompressor, so the decompressor needs to be 5274 * able to handle this situation (LZ4 does). 5275 */ 5276static void 5277l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr, enum zio_compress c) 5278{ 5279 ASSERT(L2ARC_IS_VALID_COMPRESS(c)); 5280 5281 if (zio->io_error != 0) { 5282 /* 5283 * An io error has occured, just restore the original io 5284 * size in preparation for a main pool read. 5285 */ 5286 zio->io_orig_size = zio->io_size = hdr->b_size; 5287 return; 5288 } 5289 5290 if (c == ZIO_COMPRESS_EMPTY) { 5291 /* 5292 * An empty buffer results in a null zio, which means we 5293 * need to fill its io_data after we're done restoring the 5294 * buffer's contents. 5295 */ 5296 ASSERT(hdr->b_buf != NULL); 5297 bzero(hdr->b_buf->b_data, hdr->b_size); 5298 zio->io_data = zio->io_orig_data = hdr->b_buf->b_data; 5299 } else { 5300 ASSERT(zio->io_data != NULL); 5301 /* 5302 * We copy the compressed data from the start of the arc buffer 5303 * (the zio_read will have pulled in only what we need, the 5304 * rest is garbage which we will overwrite at decompression) 5305 * and then decompress back to the ARC data buffer. This way we 5306 * can minimize copying by simply decompressing back over the 5307 * original compressed data (rather than decompressing to an 5308 * aux buffer and then copying back the uncompressed buffer, 5309 * which is likely to be much larger). 5310 */ 5311 uint64_t csize; 5312 void *cdata; 5313 5314 csize = zio->io_size; 5315 cdata = zio_data_buf_alloc(csize); 5316 bcopy(zio->io_data, cdata, csize); 5317 if (zio_decompress_data(c, cdata, zio->io_data, csize, 5318 hdr->b_size) != 0) 5319 zio->io_error = EIO; 5320 zio_data_buf_free(cdata, csize); 5321 } 5322 5323 /* Restore the expected uncompressed IO size. */ 5324 zio->io_orig_size = zio->io_size = hdr->b_size; 5325} 5326 5327/* 5328 * Releases the temporary b_tmp_cdata buffer in an l2arc header structure. 5329 * This buffer serves as a temporary holder of compressed data while 5330 * the buffer entry is being written to an l2arc device. Once that is 5331 * done, we can dispose of it. 5332 */ 5333static void 5334l2arc_release_cdata_buf(arc_buf_hdr_t *ab) 5335{ 5336 l2arc_buf_hdr_t *l2hdr = ab->b_l2hdr; 5337 5338 if (l2hdr->b_compress == ZIO_COMPRESS_LZ4) { 5339 /* 5340 * If the data was compressed, then we've allocated a 5341 * temporary buffer for it, so now we need to release it. 5342 */ 5343 ASSERT(l2hdr->b_tmp_cdata != NULL); 5344 zio_data_buf_free(l2hdr->b_tmp_cdata, ab->b_size); 5345 } 5346 l2hdr->b_tmp_cdata = NULL; 5347} 5348 5349/* 5350 * This thread feeds the L2ARC at regular intervals. This is the beating 5351 * heart of the L2ARC. 5352 */ 5353static void 5354l2arc_feed_thread(void *dummy __unused) 5355{ 5356 callb_cpr_t cpr; 5357 l2arc_dev_t *dev; 5358 spa_t *spa; 5359 uint64_t size, wrote; 5360 clock_t begin, next = ddi_get_lbolt(); 5361 boolean_t headroom_boost = B_FALSE; 5362 5363 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); 5364 5365 mutex_enter(&l2arc_feed_thr_lock); 5366 5367 while (l2arc_thread_exit == 0) { 5368 CALLB_CPR_SAFE_BEGIN(&cpr); 5369 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, 5370 next - ddi_get_lbolt()); 5371 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); 5372 next = ddi_get_lbolt() + hz; 5373 5374 /* 5375 * Quick check for L2ARC devices. 5376 */ 5377 mutex_enter(&l2arc_dev_mtx); 5378 if (l2arc_ndev == 0) { 5379 mutex_exit(&l2arc_dev_mtx); 5380 continue; 5381 } 5382 mutex_exit(&l2arc_dev_mtx); 5383 begin = ddi_get_lbolt(); 5384 5385 /* 5386 * This selects the next l2arc device to write to, and in 5387 * doing so the next spa to feed from: dev->l2ad_spa. This 5388 * will return NULL if there are now no l2arc devices or if 5389 * they are all faulted. 5390 * 5391 * If a device is returned, its spa's config lock is also 5392 * held to prevent device removal. l2arc_dev_get_next() 5393 * will grab and release l2arc_dev_mtx. 5394 */ 5395 if ((dev = l2arc_dev_get_next()) == NULL) 5396 continue; 5397 5398 spa = dev->l2ad_spa; 5399 ASSERT(spa != NULL); 5400 5401 /* 5402 * If the pool is read-only then force the feed thread to 5403 * sleep a little longer. 5404 */ 5405 if (!spa_writeable(spa)) { 5406 next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz; 5407 spa_config_exit(spa, SCL_L2ARC, dev); 5408 continue; 5409 } 5410 5411 /* 5412 * Avoid contributing to memory pressure. 5413 */ 5414 if (arc_reclaim_needed()) { 5415 ARCSTAT_BUMP(arcstat_l2_abort_lowmem); 5416 spa_config_exit(spa, SCL_L2ARC, dev); 5417 continue; 5418 } 5419 5420 ARCSTAT_BUMP(arcstat_l2_feeds); 5421 5422 size = l2arc_write_size(); 5423 5424 /* 5425 * Evict L2ARC buffers that will be overwritten. 5426 */ 5427 l2arc_evict(dev, size, B_FALSE); 5428 5429 /* 5430 * Write ARC buffers. 5431 */ 5432 wrote = l2arc_write_buffers(spa, dev, size, &headroom_boost); 5433 5434 /* 5435 * Calculate interval between writes. 5436 */ 5437 next = l2arc_write_interval(begin, size, wrote); 5438 spa_config_exit(spa, SCL_L2ARC, dev); 5439 } 5440 5441 l2arc_thread_exit = 0; 5442 cv_broadcast(&l2arc_feed_thr_cv); 5443 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ 5444 thread_exit(); 5445} 5446 5447boolean_t 5448l2arc_vdev_present(vdev_t *vd) 5449{ 5450 l2arc_dev_t *dev; 5451 5452 mutex_enter(&l2arc_dev_mtx); 5453 for (dev = list_head(l2arc_dev_list); dev != NULL; 5454 dev = list_next(l2arc_dev_list, dev)) { 5455 if (dev->l2ad_vdev == vd) 5456 break; 5457 } 5458 mutex_exit(&l2arc_dev_mtx); 5459 5460 return (dev != NULL); 5461} 5462 5463/* 5464 * Add a vdev for use by the L2ARC. By this point the spa has already 5465 * validated the vdev and opened it. 5466 */ 5467void 5468l2arc_add_vdev(spa_t *spa, vdev_t *vd) 5469{ 5470 l2arc_dev_t *adddev; 5471 5472 ASSERT(!l2arc_vdev_present(vd)); 5473 5474 vdev_ashift_optimize(vd); 5475 5476 /* 5477 * Create a new l2arc device entry. 5478 */ 5479 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); 5480 adddev->l2ad_spa = spa; 5481 adddev->l2ad_vdev = vd; 5482 adddev->l2ad_start = VDEV_LABEL_START_SIZE; 5483 adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd); 5484 adddev->l2ad_hand = adddev->l2ad_start; 5485 adddev->l2ad_evict = adddev->l2ad_start; 5486 adddev->l2ad_first = B_TRUE; 5487 adddev->l2ad_writing = B_FALSE; 5488 5489 /* 5490 * This is a list of all ARC buffers that are still valid on the 5491 * device. 5492 */ 5493 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP); 5494 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), 5495 offsetof(arc_buf_hdr_t, b_l2node)); 5496 5497 vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand); 5498 5499 /* 5500 * Add device to global list 5501 */ 5502 mutex_enter(&l2arc_dev_mtx); 5503 list_insert_head(l2arc_dev_list, adddev); 5504 atomic_inc_64(&l2arc_ndev); 5505 mutex_exit(&l2arc_dev_mtx); 5506} 5507 5508/* 5509 * Remove a vdev from the L2ARC. 5510 */ 5511void 5512l2arc_remove_vdev(vdev_t *vd) 5513{ 5514 l2arc_dev_t *dev, *nextdev, *remdev = NULL; 5515 5516 /* 5517 * Find the device by vdev 5518 */ 5519 mutex_enter(&l2arc_dev_mtx); 5520 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { 5521 nextdev = list_next(l2arc_dev_list, dev); 5522 if (vd == dev->l2ad_vdev) { 5523 remdev = dev; 5524 break; 5525 } 5526 } 5527 ASSERT(remdev != NULL); 5528 5529 /* 5530 * Remove device from global list 5531 */ 5532 list_remove(l2arc_dev_list, remdev); 5533 l2arc_dev_last = NULL; /* may have been invalidated */ 5534 atomic_dec_64(&l2arc_ndev); 5535 mutex_exit(&l2arc_dev_mtx); 5536 5537 /* 5538 * Clear all buflists and ARC references. L2ARC device flush. 5539 */ 5540 l2arc_evict(remdev, 0, B_TRUE); 5541 list_destroy(remdev->l2ad_buflist); 5542 kmem_free(remdev->l2ad_buflist, sizeof (list_t)); 5543 kmem_free(remdev, sizeof (l2arc_dev_t)); 5544} 5545 5546void 5547l2arc_init(void) 5548{ 5549 l2arc_thread_exit = 0; 5550 l2arc_ndev = 0; 5551 l2arc_writes_sent = 0; 5552 l2arc_writes_done = 0; 5553 5554 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); 5555 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); 5556 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); 5557 mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL); 5558 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); 5559 5560 l2arc_dev_list = &L2ARC_dev_list; 5561 l2arc_free_on_write = &L2ARC_free_on_write; 5562 list_create(l2arc_dev_list, sizeof (l2arc_dev_t), 5563 offsetof(l2arc_dev_t, l2ad_node)); 5564 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), 5565 offsetof(l2arc_data_free_t, l2df_list_node)); 5566} 5567 5568void 5569l2arc_fini(void) 5570{ 5571 /* 5572 * This is called from dmu_fini(), which is called from spa_fini(); 5573 * Because of this, we can assume that all l2arc devices have 5574 * already been removed when the pools themselves were removed. 5575 */ 5576 5577 l2arc_do_free_on_write(); 5578 5579 mutex_destroy(&l2arc_feed_thr_lock); 5580 cv_destroy(&l2arc_feed_thr_cv); 5581 mutex_destroy(&l2arc_dev_mtx); 5582 mutex_destroy(&l2arc_buflist_mtx); 5583 mutex_destroy(&l2arc_free_on_write_mtx); 5584 5585 list_destroy(l2arc_dev_list); 5586 list_destroy(l2arc_free_on_write); 5587} 5588 5589void 5590l2arc_start(void) 5591{ 5592 if (!(spa_mode_global & FWRITE)) 5593 return; 5594 5595 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, 5596 TS_RUN, minclsyspri); 5597} 5598 5599void 5600l2arc_stop(void) 5601{ 5602 if (!(spa_mode_global & FWRITE)) 5603 return; 5604 5605 mutex_enter(&l2arc_feed_thr_lock); 5606 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ 5607 l2arc_thread_exit = 1; 5608 while (l2arc_thread_exit != 0) 5609 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); 5610 mutex_exit(&l2arc_feed_thr_lock); 5611} 5612