arc.c revision 185029
1168404Spjd/* 2168404Spjd * CDDL HEADER START 3168404Spjd * 4168404Spjd * The contents of this file are subject to the terms of the 5168404Spjd * Common Development and Distribution License (the "License"). 6168404Spjd * You may not use this file except in compliance with the License. 7168404Spjd * 8168404Spjd * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9168404Spjd * or http://www.opensolaris.org/os/licensing. 10168404Spjd * See the License for the specific language governing permissions 11168404Spjd * and limitations under the License. 12168404Spjd * 13168404Spjd * When distributing Covered Code, include this CDDL HEADER in each 14168404Spjd * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15168404Spjd * If applicable, add the following below this CDDL HEADER, with the 16168404Spjd * fields enclosed by brackets "[]" replaced with your own identifying 17168404Spjd * information: Portions Copyright [yyyy] [name of copyright owner] 18168404Spjd * 19168404Spjd * CDDL HEADER END 20168404Spjd */ 21168404Spjd/* 22185029Spjd * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23168404Spjd * Use is subject to license terms. 24168404Spjd */ 25168404Spjd 26168404Spjd/* 27168404Spjd * DVA-based Adjustable Replacement Cache 28168404Spjd * 29168404Spjd * While much of the theory of operation used here is 30168404Spjd * based on the self-tuning, low overhead replacement cache 31168404Spjd * presented by Megiddo and Modha at FAST 2003, there are some 32168404Spjd * significant differences: 33168404Spjd * 34168404Spjd * 1. The Megiddo and Modha model assumes any page is evictable. 35168404Spjd * Pages in its cache cannot be "locked" into memory. This makes 36168404Spjd * the eviction algorithm simple: evict the last page in the list. 37168404Spjd * This also make the performance characteristics easy to reason 38168404Spjd * about. Our cache is not so simple. At any given moment, some 39168404Spjd * subset of the blocks in the cache are un-evictable because we 40168404Spjd * have handed out a reference to them. Blocks are only evictable 41168404Spjd * when there are no external references active. This makes 42168404Spjd * eviction far more problematic: we choose to evict the evictable 43168404Spjd * blocks that are the "lowest" in the list. 44168404Spjd * 45168404Spjd * There are times when it is not possible to evict the requested 46168404Spjd * space. In these circumstances we are unable to adjust the cache 47168404Spjd * size. To prevent the cache growing unbounded at these times we 48185029Spjd * implement a "cache throttle" that slows the flow of new data 49185029Spjd * into the cache until we can make space available. 50168404Spjd * 51168404Spjd * 2. The Megiddo and Modha model assumes a fixed cache size. 52168404Spjd * Pages are evicted when the cache is full and there is a cache 53168404Spjd * miss. Our model has a variable sized cache. It grows with 54185029Spjd * high use, but also tries to react to memory pressure from the 55168404Spjd * operating system: decreasing its size when system memory is 56168404Spjd * tight. 57168404Spjd * 58168404Spjd * 3. The Megiddo and Modha model assumes a fixed page size. All 59168404Spjd * elements of the cache are therefor exactly the same size. So 60168404Spjd * when adjusting the cache size following a cache miss, its simply 61168404Spjd * a matter of choosing a single page to evict. In our model, we 62168404Spjd * have variable sized cache blocks (rangeing from 512 bytes to 63168404Spjd * 128K bytes). We therefor choose a set of blocks to evict to make 64168404Spjd * space for a cache miss that approximates as closely as possible 65168404Spjd * the space used by the new block. 66168404Spjd * 67168404Spjd * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 68168404Spjd * by N. Megiddo & D. Modha, FAST 2003 69168404Spjd */ 70168404Spjd 71168404Spjd/* 72168404Spjd * The locking model: 73168404Spjd * 74168404Spjd * A new reference to a cache buffer can be obtained in two 75168404Spjd * ways: 1) via a hash table lookup using the DVA as a key, 76185029Spjd * or 2) via one of the ARC lists. The arc_read() interface 77168404Spjd * uses method 1, while the internal arc algorithms for 78168404Spjd * adjusting the cache use method 2. We therefor provide two 79168404Spjd * types of locks: 1) the hash table lock array, and 2) the 80168404Spjd * arc list locks. 81168404Spjd * 82168404Spjd * Buffers do not have their own mutexs, rather they rely on the 83168404Spjd * hash table mutexs for the bulk of their protection (i.e. most 84168404Spjd * fields in the arc_buf_hdr_t are protected by these mutexs). 85168404Spjd * 86168404Spjd * buf_hash_find() returns the appropriate mutex (held) when it 87168404Spjd * locates the requested buffer in the hash table. It returns 88168404Spjd * NULL for the mutex if the buffer was not in the table. 89168404Spjd * 90168404Spjd * buf_hash_remove() expects the appropriate hash mutex to be 91168404Spjd * already held before it is invoked. 92168404Spjd * 93168404Spjd * Each arc state also has a mutex which is used to protect the 94168404Spjd * buffer list associated with the state. When attempting to 95168404Spjd * obtain a hash table lock while holding an arc list lock you 96168404Spjd * must use: mutex_tryenter() to avoid deadlock. Also note that 97168404Spjd * the active state mutex must be held before the ghost state mutex. 98168404Spjd * 99168404Spjd * Arc buffers may have an associated eviction callback function. 100168404Spjd * This function will be invoked prior to removing the buffer (e.g. 101168404Spjd * in arc_do_user_evicts()). Note however that the data associated 102168404Spjd * with the buffer may be evicted prior to the callback. The callback 103168404Spjd * must be made with *no locks held* (to prevent deadlock). Additionally, 104168404Spjd * the users of callbacks must ensure that their private data is 105168404Spjd * protected from simultaneous callbacks from arc_buf_evict() 106168404Spjd * and arc_do_user_evicts(). 107168404Spjd * 108168404Spjd * Note that the majority of the performance stats are manipulated 109168404Spjd * with atomic operations. 110185029Spjd * 111185029Spjd * The L2ARC uses the l2arc_buflist_mtx global mutex for the following: 112185029Spjd * 113185029Spjd * - L2ARC buflist creation 114185029Spjd * - L2ARC buflist eviction 115185029Spjd * - L2ARC write completion, which walks L2ARC buflists 116185029Spjd * - ARC header destruction, as it removes from L2ARC buflists 117185029Spjd * - ARC header release, as it removes from L2ARC buflists 118168404Spjd */ 119168404Spjd 120168404Spjd#include <sys/spa.h> 121168404Spjd#include <sys/zio.h> 122168404Spjd#include <sys/zio_checksum.h> 123168404Spjd#include <sys/zfs_context.h> 124168404Spjd#include <sys/arc.h> 125168404Spjd#include <sys/refcount.h> 126185029Spjd#include <sys/vdev.h> 127168404Spjd#ifdef _KERNEL 128168404Spjd#include <sys/dnlc.h> 129168404Spjd#endif 130168404Spjd#include <sys/callb.h> 131168404Spjd#include <sys/kstat.h> 132168404Spjd#include <sys/sdt.h> 133168404Spjd 134168404Spjdstatic kmutex_t arc_reclaim_thr_lock; 135168404Spjdstatic kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 136168404Spjdstatic uint8_t arc_thread_exit; 137168404Spjd 138185029Spjdextern int zfs_write_limit_shift; 139185029Spjdextern uint64_t zfs_write_limit_max; 140185029Spjdextern kmutex_t zfs_write_limit_lock; 141185029Spjd 142168404Spjd#define ARC_REDUCE_DNLC_PERCENT 3 143168404Spjduint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 144168404Spjd 145168404Spjdtypedef enum arc_reclaim_strategy { 146168404Spjd ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 147168404Spjd ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 148168404Spjd} arc_reclaim_strategy_t; 149168404Spjd 150168404Spjd/* number of seconds before growing cache again */ 151168404Spjdstatic int arc_grow_retry = 60; 152168404Spjd 153168404Spjd/* 154168404Spjd * minimum lifespan of a prefetch block in clock ticks 155168404Spjd * (initialized in arc_init()) 156168404Spjd */ 157168404Spjdstatic int arc_min_prefetch_lifespan; 158168404Spjd 159168404Spjdstatic int arc_dead; 160168404Spjd 161168404Spjd/* 162185029Spjd * The arc has filled available memory and has now warmed up. 163185029Spjd */ 164185029Spjdstatic boolean_t arc_warm; 165185029Spjd 166185029Spjd/* 167168404Spjd * These tunables are for performance analysis. 168168404Spjd */ 169185029Spjduint64_t zfs_arc_max; 170185029Spjduint64_t zfs_arc_min; 171185029Spjduint64_t zfs_arc_meta_limit = 0; 172185029Spjdint zfs_mdcomp_disable = 0; 173185029Spjd 174185029SpjdTUNABLE_QUAD("vfs.zfs.arc_max", &zfs_arc_max); 175185029SpjdTUNABLE_QUAD("vfs.zfs.arc_min", &zfs_arc_min); 176185029SpjdTUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit); 177185029SpjdTUNABLE_INT("vfs.zfs.mdcomp_disable", &zfs_mdcomp_disable); 178168473SpjdSYSCTL_DECL(_vfs_zfs); 179185029SpjdSYSCTL_QUAD(_vfs_zfs, OID_AUTO, arc_max, CTLFLAG_RDTUN, &zfs_arc_max, 0, 180168473Spjd "Maximum ARC size"); 181185029SpjdSYSCTL_QUAD(_vfs_zfs, OID_AUTO, arc_min, CTLFLAG_RDTUN, &zfs_arc_min, 0, 182168473Spjd "Minimum ARC size"); 183185029SpjdSYSCTL_INT(_vfs_zfs, OID_AUTO, mdcomp_disable, CTLFLAG_RDTUN, 184185029Spjd &zfs_mdcomp_disable, 0, "Disable metadata compression"); 185168404Spjd 186168404Spjd/* 187185029Spjd * Note that buffers can be in one of 6 states: 188168404Spjd * ARC_anon - anonymous (discussed below) 189168404Spjd * ARC_mru - recently used, currently cached 190168404Spjd * ARC_mru_ghost - recentely used, no longer in cache 191168404Spjd * ARC_mfu - frequently used, currently cached 192168404Spjd * ARC_mfu_ghost - frequently used, no longer in cache 193185029Spjd * ARC_l2c_only - exists in L2ARC but not other states 194185029Spjd * When there are no active references to the buffer, they are 195185029Spjd * are linked onto a list in one of these arc states. These are 196185029Spjd * the only buffers that can be evicted or deleted. Within each 197185029Spjd * state there are multiple lists, one for meta-data and one for 198185029Spjd * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, 199185029Spjd * etc.) is tracked separately so that it can be managed more 200185029Spjd * explicitly: favored over data, limited explicitly. 201168404Spjd * 202168404Spjd * Anonymous buffers are buffers that are not associated with 203168404Spjd * a DVA. These are buffers that hold dirty block copies 204168404Spjd * before they are written to stable storage. By definition, 205168404Spjd * they are "ref'd" and are considered part of arc_mru 206168404Spjd * that cannot be freed. Generally, they will aquire a DVA 207168404Spjd * as they are written and migrate onto the arc_mru list. 208185029Spjd * 209185029Spjd * The ARC_l2c_only state is for buffers that are in the second 210185029Spjd * level ARC but no longer in any of the ARC_m* lists. The second 211185029Spjd * level ARC itself may also contain buffers that are in any of 212185029Spjd * the ARC_m* states - meaning that a buffer can exist in two 213185029Spjd * places. The reason for the ARC_l2c_only state is to keep the 214185029Spjd * buffer header in the hash table, so that reads that hit the 215185029Spjd * second level ARC benefit from these fast lookups. 216168404Spjd */ 217168404Spjd 218168404Spjdtypedef struct arc_state { 219185029Spjd list_t arcs_list[ARC_BUFC_NUMTYPES]; /* list of evictable buffers */ 220185029Spjd uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */ 221185029Spjd uint64_t arcs_size; /* total amount of data in this state */ 222168404Spjd kmutex_t arcs_mtx; 223168404Spjd} arc_state_t; 224168404Spjd 225185029Spjd/* The 6 states: */ 226168404Spjdstatic arc_state_t ARC_anon; 227168404Spjdstatic arc_state_t ARC_mru; 228168404Spjdstatic arc_state_t ARC_mru_ghost; 229168404Spjdstatic arc_state_t ARC_mfu; 230168404Spjdstatic arc_state_t ARC_mfu_ghost; 231185029Spjdstatic arc_state_t ARC_l2c_only; 232168404Spjd 233168404Spjdtypedef struct arc_stats { 234168404Spjd kstat_named_t arcstat_hits; 235168404Spjd kstat_named_t arcstat_misses; 236168404Spjd kstat_named_t arcstat_demand_data_hits; 237168404Spjd kstat_named_t arcstat_demand_data_misses; 238168404Spjd kstat_named_t arcstat_demand_metadata_hits; 239168404Spjd kstat_named_t arcstat_demand_metadata_misses; 240168404Spjd kstat_named_t arcstat_prefetch_data_hits; 241168404Spjd kstat_named_t arcstat_prefetch_data_misses; 242168404Spjd kstat_named_t arcstat_prefetch_metadata_hits; 243168404Spjd kstat_named_t arcstat_prefetch_metadata_misses; 244168404Spjd kstat_named_t arcstat_mru_hits; 245168404Spjd kstat_named_t arcstat_mru_ghost_hits; 246168404Spjd kstat_named_t arcstat_mfu_hits; 247168404Spjd kstat_named_t arcstat_mfu_ghost_hits; 248168404Spjd kstat_named_t arcstat_deleted; 249168404Spjd kstat_named_t arcstat_recycle_miss; 250168404Spjd kstat_named_t arcstat_mutex_miss; 251168404Spjd kstat_named_t arcstat_evict_skip; 252168404Spjd kstat_named_t arcstat_hash_elements; 253168404Spjd kstat_named_t arcstat_hash_elements_max; 254168404Spjd kstat_named_t arcstat_hash_collisions; 255168404Spjd kstat_named_t arcstat_hash_chains; 256168404Spjd kstat_named_t arcstat_hash_chain_max; 257168404Spjd kstat_named_t arcstat_p; 258168404Spjd kstat_named_t arcstat_c; 259168404Spjd kstat_named_t arcstat_c_min; 260168404Spjd kstat_named_t arcstat_c_max; 261168404Spjd kstat_named_t arcstat_size; 262185029Spjd kstat_named_t arcstat_hdr_size; 263185029Spjd kstat_named_t arcstat_l2_hits; 264185029Spjd kstat_named_t arcstat_l2_misses; 265185029Spjd kstat_named_t arcstat_l2_feeds; 266185029Spjd kstat_named_t arcstat_l2_rw_clash; 267185029Spjd kstat_named_t arcstat_l2_writes_sent; 268185029Spjd kstat_named_t arcstat_l2_writes_done; 269185029Spjd kstat_named_t arcstat_l2_writes_error; 270185029Spjd kstat_named_t arcstat_l2_writes_hdr_miss; 271185029Spjd kstat_named_t arcstat_l2_evict_lock_retry; 272185029Spjd kstat_named_t arcstat_l2_evict_reading; 273185029Spjd kstat_named_t arcstat_l2_free_on_write; 274185029Spjd kstat_named_t arcstat_l2_abort_lowmem; 275185029Spjd kstat_named_t arcstat_l2_cksum_bad; 276185029Spjd kstat_named_t arcstat_l2_io_error; 277185029Spjd kstat_named_t arcstat_l2_size; 278185029Spjd kstat_named_t arcstat_l2_hdr_size; 279185029Spjd kstat_named_t arcstat_memory_throttle_count; 280168404Spjd} arc_stats_t; 281168404Spjd 282168404Spjdstatic arc_stats_t arc_stats = { 283168404Spjd { "hits", KSTAT_DATA_UINT64 }, 284168404Spjd { "misses", KSTAT_DATA_UINT64 }, 285168404Spjd { "demand_data_hits", KSTAT_DATA_UINT64 }, 286168404Spjd { "demand_data_misses", KSTAT_DATA_UINT64 }, 287168404Spjd { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 288168404Spjd { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 289168404Spjd { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 290168404Spjd { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 291168404Spjd { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 292168404Spjd { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 293168404Spjd { "mru_hits", KSTAT_DATA_UINT64 }, 294168404Spjd { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 295168404Spjd { "mfu_hits", KSTAT_DATA_UINT64 }, 296168404Spjd { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 297168404Spjd { "deleted", KSTAT_DATA_UINT64 }, 298168404Spjd { "recycle_miss", KSTAT_DATA_UINT64 }, 299168404Spjd { "mutex_miss", KSTAT_DATA_UINT64 }, 300168404Spjd { "evict_skip", KSTAT_DATA_UINT64 }, 301168404Spjd { "hash_elements", KSTAT_DATA_UINT64 }, 302168404Spjd { "hash_elements_max", KSTAT_DATA_UINT64 }, 303168404Spjd { "hash_collisions", KSTAT_DATA_UINT64 }, 304168404Spjd { "hash_chains", KSTAT_DATA_UINT64 }, 305168404Spjd { "hash_chain_max", KSTAT_DATA_UINT64 }, 306168404Spjd { "p", KSTAT_DATA_UINT64 }, 307168404Spjd { "c", KSTAT_DATA_UINT64 }, 308168404Spjd { "c_min", KSTAT_DATA_UINT64 }, 309168404Spjd { "c_max", KSTAT_DATA_UINT64 }, 310185029Spjd { "size", KSTAT_DATA_UINT64 }, 311185029Spjd { "hdr_size", KSTAT_DATA_UINT64 }, 312185029Spjd { "l2_hits", KSTAT_DATA_UINT64 }, 313185029Spjd { "l2_misses", KSTAT_DATA_UINT64 }, 314185029Spjd { "l2_feeds", KSTAT_DATA_UINT64 }, 315185029Spjd { "l2_rw_clash", KSTAT_DATA_UINT64 }, 316185029Spjd { "l2_writes_sent", KSTAT_DATA_UINT64 }, 317185029Spjd { "l2_writes_done", KSTAT_DATA_UINT64 }, 318185029Spjd { "l2_writes_error", KSTAT_DATA_UINT64 }, 319185029Spjd { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 }, 320185029Spjd { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, 321185029Spjd { "l2_evict_reading", KSTAT_DATA_UINT64 }, 322185029Spjd { "l2_free_on_write", KSTAT_DATA_UINT64 }, 323185029Spjd { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, 324185029Spjd { "l2_cksum_bad", KSTAT_DATA_UINT64 }, 325185029Spjd { "l2_io_error", KSTAT_DATA_UINT64 }, 326185029Spjd { "l2_size", KSTAT_DATA_UINT64 }, 327185029Spjd { "l2_hdr_size", KSTAT_DATA_UINT64 }, 328185029Spjd { "memory_throttle_count", KSTAT_DATA_UINT64 } 329168404Spjd}; 330168404Spjd 331168404Spjd#define ARCSTAT(stat) (arc_stats.stat.value.ui64) 332168404Spjd 333168404Spjd#define ARCSTAT_INCR(stat, val) \ 334168404Spjd atomic_add_64(&arc_stats.stat.value.ui64, (val)); 335168404Spjd 336168404Spjd#define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 337168404Spjd#define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 338168404Spjd 339168404Spjd#define ARCSTAT_MAX(stat, val) { \ 340168404Spjd uint64_t m; \ 341168404Spjd while ((val) > (m = arc_stats.stat.value.ui64) && \ 342168404Spjd (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 343168404Spjd continue; \ 344168404Spjd} 345168404Spjd 346168404Spjd#define ARCSTAT_MAXSTAT(stat) \ 347168404Spjd ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 348168404Spjd 349168404Spjd/* 350168404Spjd * We define a macro to allow ARC hits/misses to be easily broken down by 351168404Spjd * two separate conditions, giving a total of four different subtypes for 352168404Spjd * each of hits and misses (so eight statistics total). 353168404Spjd */ 354168404Spjd#define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 355168404Spjd if (cond1) { \ 356168404Spjd if (cond2) { \ 357168404Spjd ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 358168404Spjd } else { \ 359168404Spjd ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 360168404Spjd } \ 361168404Spjd } else { \ 362168404Spjd if (cond2) { \ 363168404Spjd ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 364168404Spjd } else { \ 365168404Spjd ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 366168404Spjd } \ 367168404Spjd } 368168404Spjd 369168404Spjdkstat_t *arc_ksp; 370168404Spjdstatic arc_state_t *arc_anon; 371168404Spjdstatic arc_state_t *arc_mru; 372168404Spjdstatic arc_state_t *arc_mru_ghost; 373168404Spjdstatic arc_state_t *arc_mfu; 374168404Spjdstatic arc_state_t *arc_mfu_ghost; 375185029Spjdstatic arc_state_t *arc_l2c_only; 376168404Spjd 377168404Spjd/* 378168404Spjd * There are several ARC variables that are critical to export as kstats -- 379168404Spjd * but we don't want to have to grovel around in the kstat whenever we wish to 380168404Spjd * manipulate them. For these variables, we therefore define them to be in 381168404Spjd * terms of the statistic variable. This assures that we are not introducing 382168404Spjd * the possibility of inconsistency by having shadow copies of the variables, 383168404Spjd * while still allowing the code to be readable. 384168404Spjd */ 385168404Spjd#define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 386168404Spjd#define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 387168404Spjd#define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 388168404Spjd#define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 389168404Spjd#define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 390168404Spjd 391168404Spjdstatic int arc_no_grow; /* Don't try to grow cache size */ 392168404Spjdstatic uint64_t arc_tempreserve; 393185029Spjdstatic uint64_t arc_meta_used; 394185029Spjdstatic uint64_t arc_meta_limit; 395185029Spjdstatic uint64_t arc_meta_max = 0; 396185029SpjdSYSCTL_QUAD(_vfs_zfs, OID_AUTO, arc_meta_used, CTLFLAG_RDTUN, 397185029Spjd &arc_meta_used, 0, "ARC metadata used"); 398185029SpjdSYSCTL_QUAD(_vfs_zfs, OID_AUTO, arc_meta_limit, CTLFLAG_RDTUN, 399185029Spjd &arc_meta_limit, 0, "ARC metadata limit"); 400168404Spjd 401185029Spjdtypedef struct l2arc_buf_hdr l2arc_buf_hdr_t; 402185029Spjd 403168404Spjdtypedef struct arc_callback arc_callback_t; 404168404Spjd 405168404Spjdstruct arc_callback { 406168404Spjd void *acb_private; 407168404Spjd arc_done_func_t *acb_done; 408168404Spjd arc_buf_t *acb_buf; 409168404Spjd zio_t *acb_zio_dummy; 410168404Spjd arc_callback_t *acb_next; 411168404Spjd}; 412168404Spjd 413168404Spjdtypedef struct arc_write_callback arc_write_callback_t; 414168404Spjd 415168404Spjdstruct arc_write_callback { 416168404Spjd void *awcb_private; 417168404Spjd arc_done_func_t *awcb_ready; 418168404Spjd arc_done_func_t *awcb_done; 419168404Spjd arc_buf_t *awcb_buf; 420168404Spjd}; 421168404Spjd 422168404Spjdstruct arc_buf_hdr { 423168404Spjd /* protected by hash lock */ 424168404Spjd dva_t b_dva; 425168404Spjd uint64_t b_birth; 426168404Spjd uint64_t b_cksum0; 427168404Spjd 428168404Spjd kmutex_t b_freeze_lock; 429168404Spjd zio_cksum_t *b_freeze_cksum; 430168404Spjd 431168404Spjd arc_buf_hdr_t *b_hash_next; 432168404Spjd arc_buf_t *b_buf; 433168404Spjd uint32_t b_flags; 434168404Spjd uint32_t b_datacnt; 435168404Spjd 436168404Spjd arc_callback_t *b_acb; 437168404Spjd kcondvar_t b_cv; 438168404Spjd 439168404Spjd /* immutable */ 440168404Spjd arc_buf_contents_t b_type; 441168404Spjd uint64_t b_size; 442168404Spjd spa_t *b_spa; 443168404Spjd 444168404Spjd /* protected by arc state mutex */ 445168404Spjd arc_state_t *b_state; 446168404Spjd list_node_t b_arc_node; 447168404Spjd 448168404Spjd /* updated atomically */ 449168404Spjd clock_t b_arc_access; 450168404Spjd 451168404Spjd /* self protecting */ 452168404Spjd refcount_t b_refcnt; 453185029Spjd 454185029Spjd l2arc_buf_hdr_t *b_l2hdr; 455185029Spjd list_node_t b_l2node; 456168404Spjd}; 457168404Spjd 458168404Spjdstatic arc_buf_t *arc_eviction_list; 459168404Spjdstatic kmutex_t arc_eviction_mtx; 460168404Spjdstatic arc_buf_hdr_t arc_eviction_hdr; 461168404Spjdstatic void arc_get_data_buf(arc_buf_t *buf); 462168404Spjdstatic void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 463185029Spjdstatic int arc_evict_needed(arc_buf_contents_t type); 464185029Spjdstatic void arc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes); 465168404Spjd 466168404Spjd#define GHOST_STATE(state) \ 467185029Spjd ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ 468185029Spjd (state) == arc_l2c_only) 469168404Spjd 470168404Spjd/* 471168404Spjd * Private ARC flags. These flags are private ARC only flags that will show up 472168404Spjd * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 473168404Spjd * be passed in as arc_flags in things like arc_read. However, these flags 474168404Spjd * should never be passed and should only be set by ARC code. When adding new 475168404Spjd * public flags, make sure not to smash the private ones. 476168404Spjd */ 477168404Spjd 478168404Spjd#define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 479168404Spjd#define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 480168404Spjd#define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 481168404Spjd#define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 482168404Spjd#define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 483168404Spjd#define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 484185029Spjd#define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */ 485185029Spjd#define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */ 486185029Spjd#define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */ 487185029Spjd#define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */ 488185029Spjd#define ARC_STORED (1 << 19) /* has been store()d to */ 489168404Spjd 490168404Spjd#define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 491168404Spjd#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 492168404Spjd#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 493168404Spjd#define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 494168404Spjd#define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 495185029Spjd#define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS) 496185029Spjd#define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE) 497185029Spjd#define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \ 498185029Spjd (hdr)->b_l2hdr != NULL) 499185029Spjd#define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING) 500185029Spjd#define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED) 501185029Spjd#define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD) 502168404Spjd 503168404Spjd/* 504185029Spjd * Other sizes 505185029Spjd */ 506185029Spjd 507185029Spjd#define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) 508185029Spjd#define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t)) 509185029Spjd 510185029Spjd/* 511168404Spjd * Hash table routines 512168404Spjd */ 513168404Spjd 514168404Spjd#define HT_LOCK_PAD 128 515168404Spjd 516168404Spjdstruct ht_lock { 517168404Spjd kmutex_t ht_lock; 518168404Spjd#ifdef _KERNEL 519168404Spjd unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 520168404Spjd#endif 521168404Spjd}; 522168404Spjd 523168404Spjd#define BUF_LOCKS 256 524168404Spjdtypedef struct buf_hash_table { 525168404Spjd uint64_t ht_mask; 526168404Spjd arc_buf_hdr_t **ht_table; 527168404Spjd struct ht_lock ht_locks[BUF_LOCKS]; 528168404Spjd} buf_hash_table_t; 529168404Spjd 530168404Spjdstatic buf_hash_table_t buf_hash_table; 531168404Spjd 532168404Spjd#define BUF_HASH_INDEX(spa, dva, birth) \ 533168404Spjd (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 534168404Spjd#define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 535168404Spjd#define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 536168404Spjd#define HDR_LOCK(buf) \ 537168404Spjd (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth))) 538168404Spjd 539168404Spjduint64_t zfs_crc64_table[256]; 540168404Spjd 541185029Spjd/* 542185029Spjd * Level 2 ARC 543185029Spjd */ 544185029Spjd 545185029Spjd#define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ 546185029Spjd#define L2ARC_HEADROOM 4 /* num of writes */ 547185029Spjd#define L2ARC_FEED_SECS 1 /* caching interval */ 548185029Spjd 549185029Spjd#define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) 550185029Spjd#define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) 551185029Spjd 552185029Spjd/* 553185029Spjd * L2ARC Performance Tunables 554185029Spjd */ 555185029Spjduint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ 556185029Spjduint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */ 557185029Spjduint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ 558185029Spjduint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ 559185029Spjdboolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ 560185029Spjd 561185029Spjd/* 562185029Spjd * L2ARC Internals 563185029Spjd */ 564185029Spjdtypedef struct l2arc_dev { 565185029Spjd vdev_t *l2ad_vdev; /* vdev */ 566185029Spjd spa_t *l2ad_spa; /* spa */ 567185029Spjd uint64_t l2ad_hand; /* next write location */ 568185029Spjd uint64_t l2ad_write; /* desired write size, bytes */ 569185029Spjd uint64_t l2ad_boost; /* warmup write boost, bytes */ 570185029Spjd uint64_t l2ad_start; /* first addr on device */ 571185029Spjd uint64_t l2ad_end; /* last addr on device */ 572185029Spjd uint64_t l2ad_evict; /* last addr eviction reached */ 573185029Spjd boolean_t l2ad_first; /* first sweep through */ 574185029Spjd list_t *l2ad_buflist; /* buffer list */ 575185029Spjd list_node_t l2ad_node; /* device list node */ 576185029Spjd} l2arc_dev_t; 577185029Spjd 578185029Spjdstatic list_t L2ARC_dev_list; /* device list */ 579185029Spjdstatic list_t *l2arc_dev_list; /* device list pointer */ 580185029Spjdstatic kmutex_t l2arc_dev_mtx; /* device list mutex */ 581185029Spjdstatic l2arc_dev_t *l2arc_dev_last; /* last device used */ 582185029Spjdstatic kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */ 583185029Spjdstatic list_t L2ARC_free_on_write; /* free after write buf list */ 584185029Spjdstatic list_t *l2arc_free_on_write; /* free after write list ptr */ 585185029Spjdstatic kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ 586185029Spjdstatic uint64_t l2arc_ndev; /* number of devices */ 587185029Spjd 588185029Spjdtypedef struct l2arc_read_callback { 589185029Spjd arc_buf_t *l2rcb_buf; /* read buffer */ 590185029Spjd spa_t *l2rcb_spa; /* spa */ 591185029Spjd blkptr_t l2rcb_bp; /* original blkptr */ 592185029Spjd zbookmark_t l2rcb_zb; /* original bookmark */ 593185029Spjd int l2rcb_flags; /* original flags */ 594185029Spjd} l2arc_read_callback_t; 595185029Spjd 596185029Spjdtypedef struct l2arc_write_callback { 597185029Spjd l2arc_dev_t *l2wcb_dev; /* device info */ 598185029Spjd arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ 599185029Spjd} l2arc_write_callback_t; 600185029Spjd 601185029Spjdstruct l2arc_buf_hdr { 602185029Spjd /* protected by arc_buf_hdr mutex */ 603185029Spjd l2arc_dev_t *b_dev; /* L2ARC device */ 604185029Spjd daddr_t b_daddr; /* disk address, offset byte */ 605185029Spjd}; 606185029Spjd 607185029Spjdtypedef struct l2arc_data_free { 608185029Spjd /* protected by l2arc_free_on_write_mtx */ 609185029Spjd void *l2df_data; 610185029Spjd size_t l2df_size; 611185029Spjd void (*l2df_func)(void *, size_t); 612185029Spjd list_node_t l2df_list_node; 613185029Spjd} l2arc_data_free_t; 614185029Spjd 615185029Spjdstatic kmutex_t l2arc_feed_thr_lock; 616185029Spjdstatic kcondvar_t l2arc_feed_thr_cv; 617185029Spjdstatic uint8_t l2arc_thread_exit; 618185029Spjd 619185029Spjdstatic void l2arc_read_done(zio_t *zio); 620185029Spjdstatic void l2arc_hdr_stat_add(void); 621185029Spjdstatic void l2arc_hdr_stat_remove(void); 622185029Spjd 623168404Spjdstatic uint64_t 624185029Spjdbuf_hash(spa_t *spa, const dva_t *dva, uint64_t birth) 625168404Spjd{ 626168404Spjd uintptr_t spav = (uintptr_t)spa; 627168404Spjd uint8_t *vdva = (uint8_t *)dva; 628168404Spjd uint64_t crc = -1ULL; 629168404Spjd int i; 630168404Spjd 631168404Spjd ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 632168404Spjd 633168404Spjd for (i = 0; i < sizeof (dva_t); i++) 634168404Spjd crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 635168404Spjd 636168404Spjd crc ^= (spav>>8) ^ birth; 637168404Spjd 638168404Spjd return (crc); 639168404Spjd} 640168404Spjd 641168404Spjd#define BUF_EMPTY(buf) \ 642168404Spjd ((buf)->b_dva.dva_word[0] == 0 && \ 643168404Spjd (buf)->b_dva.dva_word[1] == 0 && \ 644168404Spjd (buf)->b_birth == 0) 645168404Spjd 646168404Spjd#define BUF_EQUAL(spa, dva, birth, buf) \ 647168404Spjd ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 648168404Spjd ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 649168404Spjd ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 650168404Spjd 651168404Spjdstatic arc_buf_hdr_t * 652185029Spjdbuf_hash_find(spa_t *spa, const dva_t *dva, uint64_t birth, kmutex_t **lockp) 653168404Spjd{ 654168404Spjd uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 655168404Spjd kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 656168404Spjd arc_buf_hdr_t *buf; 657168404Spjd 658168404Spjd mutex_enter(hash_lock); 659168404Spjd for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 660168404Spjd buf = buf->b_hash_next) { 661168404Spjd if (BUF_EQUAL(spa, dva, birth, buf)) { 662168404Spjd *lockp = hash_lock; 663168404Spjd return (buf); 664168404Spjd } 665168404Spjd } 666168404Spjd mutex_exit(hash_lock); 667168404Spjd *lockp = NULL; 668168404Spjd return (NULL); 669168404Spjd} 670168404Spjd 671168404Spjd/* 672168404Spjd * Insert an entry into the hash table. If there is already an element 673168404Spjd * equal to elem in the hash table, then the already existing element 674168404Spjd * will be returned and the new element will not be inserted. 675168404Spjd * Otherwise returns NULL. 676168404Spjd */ 677168404Spjdstatic arc_buf_hdr_t * 678168404Spjdbuf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 679168404Spjd{ 680168404Spjd uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 681168404Spjd kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 682168404Spjd arc_buf_hdr_t *fbuf; 683168404Spjd uint32_t i; 684168404Spjd 685168404Spjd ASSERT(!HDR_IN_HASH_TABLE(buf)); 686168404Spjd *lockp = hash_lock; 687168404Spjd mutex_enter(hash_lock); 688168404Spjd for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 689168404Spjd fbuf = fbuf->b_hash_next, i++) { 690168404Spjd if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 691168404Spjd return (fbuf); 692168404Spjd } 693168404Spjd 694168404Spjd buf->b_hash_next = buf_hash_table.ht_table[idx]; 695168404Spjd buf_hash_table.ht_table[idx] = buf; 696168404Spjd buf->b_flags |= ARC_IN_HASH_TABLE; 697168404Spjd 698168404Spjd /* collect some hash table performance data */ 699168404Spjd if (i > 0) { 700168404Spjd ARCSTAT_BUMP(arcstat_hash_collisions); 701168404Spjd if (i == 1) 702168404Spjd ARCSTAT_BUMP(arcstat_hash_chains); 703168404Spjd 704168404Spjd ARCSTAT_MAX(arcstat_hash_chain_max, i); 705168404Spjd } 706168404Spjd 707168404Spjd ARCSTAT_BUMP(arcstat_hash_elements); 708168404Spjd ARCSTAT_MAXSTAT(arcstat_hash_elements); 709168404Spjd 710168404Spjd return (NULL); 711168404Spjd} 712168404Spjd 713168404Spjdstatic void 714168404Spjdbuf_hash_remove(arc_buf_hdr_t *buf) 715168404Spjd{ 716168404Spjd arc_buf_hdr_t *fbuf, **bufp; 717168404Spjd uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 718168404Spjd 719168404Spjd ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 720168404Spjd ASSERT(HDR_IN_HASH_TABLE(buf)); 721168404Spjd 722168404Spjd bufp = &buf_hash_table.ht_table[idx]; 723168404Spjd while ((fbuf = *bufp) != buf) { 724168404Spjd ASSERT(fbuf != NULL); 725168404Spjd bufp = &fbuf->b_hash_next; 726168404Spjd } 727168404Spjd *bufp = buf->b_hash_next; 728168404Spjd buf->b_hash_next = NULL; 729168404Spjd buf->b_flags &= ~ARC_IN_HASH_TABLE; 730168404Spjd 731168404Spjd /* collect some hash table performance data */ 732168404Spjd ARCSTAT_BUMPDOWN(arcstat_hash_elements); 733168404Spjd 734168404Spjd if (buf_hash_table.ht_table[idx] && 735168404Spjd buf_hash_table.ht_table[idx]->b_hash_next == NULL) 736168404Spjd ARCSTAT_BUMPDOWN(arcstat_hash_chains); 737168404Spjd} 738168404Spjd 739168404Spjd/* 740168404Spjd * Global data structures and functions for the buf kmem cache. 741168404Spjd */ 742168404Spjdstatic kmem_cache_t *hdr_cache; 743168404Spjdstatic kmem_cache_t *buf_cache; 744168404Spjd 745168404Spjdstatic void 746168404Spjdbuf_fini(void) 747168404Spjd{ 748168404Spjd int i; 749168404Spjd 750168404Spjd kmem_free(buf_hash_table.ht_table, 751168404Spjd (buf_hash_table.ht_mask + 1) * sizeof (void *)); 752168404Spjd for (i = 0; i < BUF_LOCKS; i++) 753168404Spjd mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 754168404Spjd kmem_cache_destroy(hdr_cache); 755168404Spjd kmem_cache_destroy(buf_cache); 756168404Spjd} 757168404Spjd 758168404Spjd/* 759168404Spjd * Constructor callback - called when the cache is empty 760168404Spjd * and a new buf is requested. 761168404Spjd */ 762168404Spjd/* ARGSUSED */ 763168404Spjdstatic int 764168404Spjdhdr_cons(void *vbuf, void *unused, int kmflag) 765168404Spjd{ 766168404Spjd arc_buf_hdr_t *buf = vbuf; 767168404Spjd 768168404Spjd bzero(buf, sizeof (arc_buf_hdr_t)); 769168404Spjd refcount_create(&buf->b_refcnt); 770168404Spjd cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 771185029Spjd mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 772185029Spjd 773185029Spjd ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); 774168404Spjd return (0); 775168404Spjd} 776168404Spjd 777185029Spjd/* ARGSUSED */ 778185029Spjdstatic int 779185029Spjdbuf_cons(void *vbuf, void *unused, int kmflag) 780185029Spjd{ 781185029Spjd arc_buf_t *buf = vbuf; 782185029Spjd 783185029Spjd bzero(buf, sizeof (arc_buf_t)); 784185029Spjd rw_init(&buf->b_lock, NULL, RW_DEFAULT, NULL); 785185029Spjd return (0); 786185029Spjd} 787185029Spjd 788168404Spjd/* 789168404Spjd * Destructor callback - called when a cached buf is 790168404Spjd * no longer required. 791168404Spjd */ 792168404Spjd/* ARGSUSED */ 793168404Spjdstatic void 794168404Spjdhdr_dest(void *vbuf, void *unused) 795168404Spjd{ 796168404Spjd arc_buf_hdr_t *buf = vbuf; 797168404Spjd 798168404Spjd refcount_destroy(&buf->b_refcnt); 799168404Spjd cv_destroy(&buf->b_cv); 800185029Spjd mutex_destroy(&buf->b_freeze_lock); 801185029Spjd 802185029Spjd ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); 803168404Spjd} 804168404Spjd 805185029Spjd/* ARGSUSED */ 806185029Spjdstatic void 807185029Spjdbuf_dest(void *vbuf, void *unused) 808185029Spjd{ 809185029Spjd arc_buf_t *buf = vbuf; 810185029Spjd 811185029Spjd rw_destroy(&buf->b_lock); 812185029Spjd} 813185029Spjd 814168404Spjd/* 815168404Spjd * Reclaim callback -- invoked when memory is low. 816168404Spjd */ 817168404Spjd/* ARGSUSED */ 818168404Spjdstatic void 819168404Spjdhdr_recl(void *unused) 820168404Spjd{ 821168404Spjd dprintf("hdr_recl called\n"); 822168404Spjd /* 823168404Spjd * umem calls the reclaim func when we destroy the buf cache, 824168404Spjd * which is after we do arc_fini(). 825168404Spjd */ 826168404Spjd if (!arc_dead) 827168404Spjd cv_signal(&arc_reclaim_thr_cv); 828168404Spjd} 829168404Spjd 830168404Spjdstatic void 831168404Spjdbuf_init(void) 832168404Spjd{ 833168404Spjd uint64_t *ct; 834168404Spjd uint64_t hsize = 1ULL << 12; 835168404Spjd int i, j; 836168404Spjd 837168404Spjd /* 838168404Spjd * The hash table is big enough to fill all of physical memory 839168404Spjd * with an average 64K block size. The table will take up 840168404Spjd * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 841168404Spjd */ 842168696Spjd while (hsize * 65536 < (uint64_t)physmem * PAGESIZE) 843168404Spjd hsize <<= 1; 844168404Spjdretry: 845168404Spjd buf_hash_table.ht_mask = hsize - 1; 846168404Spjd buf_hash_table.ht_table = 847168404Spjd kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 848168404Spjd if (buf_hash_table.ht_table == NULL) { 849168404Spjd ASSERT(hsize > (1ULL << 8)); 850168404Spjd hsize >>= 1; 851168404Spjd goto retry; 852168404Spjd } 853168404Spjd 854168404Spjd hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 855168404Spjd 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 856168404Spjd buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 857185029Spjd 0, buf_cons, buf_dest, NULL, NULL, NULL, 0); 858168404Spjd 859168404Spjd for (i = 0; i < 256; i++) 860168404Spjd for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 861168404Spjd *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 862168404Spjd 863168404Spjd for (i = 0; i < BUF_LOCKS; i++) { 864168404Spjd mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 865168404Spjd NULL, MUTEX_DEFAULT, NULL); 866168404Spjd } 867168404Spjd} 868168404Spjd 869168404Spjd#define ARC_MINTIME (hz>>4) /* 62 ms */ 870168404Spjd 871168404Spjdstatic void 872168404Spjdarc_cksum_verify(arc_buf_t *buf) 873168404Spjd{ 874168404Spjd zio_cksum_t zc; 875168404Spjd 876168404Spjd if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 877168404Spjd return; 878168404Spjd 879168404Spjd mutex_enter(&buf->b_hdr->b_freeze_lock); 880168404Spjd if (buf->b_hdr->b_freeze_cksum == NULL || 881168404Spjd (buf->b_hdr->b_flags & ARC_IO_ERROR)) { 882168404Spjd mutex_exit(&buf->b_hdr->b_freeze_lock); 883168404Spjd return; 884168404Spjd } 885168404Spjd fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 886168404Spjd if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 887168404Spjd panic("buffer modified while frozen!"); 888168404Spjd mutex_exit(&buf->b_hdr->b_freeze_lock); 889168404Spjd} 890168404Spjd 891185029Spjdstatic int 892185029Spjdarc_cksum_equal(arc_buf_t *buf) 893185029Spjd{ 894185029Spjd zio_cksum_t zc; 895185029Spjd int equal; 896185029Spjd 897185029Spjd mutex_enter(&buf->b_hdr->b_freeze_lock); 898185029Spjd fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 899185029Spjd equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc); 900185029Spjd mutex_exit(&buf->b_hdr->b_freeze_lock); 901185029Spjd 902185029Spjd return (equal); 903185029Spjd} 904185029Spjd 905168404Spjdstatic void 906185029Spjdarc_cksum_compute(arc_buf_t *buf, boolean_t force) 907168404Spjd{ 908185029Spjd if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY)) 909168404Spjd return; 910168404Spjd 911168404Spjd mutex_enter(&buf->b_hdr->b_freeze_lock); 912168404Spjd if (buf->b_hdr->b_freeze_cksum != NULL) { 913168404Spjd mutex_exit(&buf->b_hdr->b_freeze_lock); 914168404Spjd return; 915168404Spjd } 916168404Spjd buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 917168404Spjd fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 918168404Spjd buf->b_hdr->b_freeze_cksum); 919168404Spjd mutex_exit(&buf->b_hdr->b_freeze_lock); 920168404Spjd} 921168404Spjd 922168404Spjdvoid 923168404Spjdarc_buf_thaw(arc_buf_t *buf) 924168404Spjd{ 925185029Spjd if (zfs_flags & ZFS_DEBUG_MODIFY) { 926185029Spjd if (buf->b_hdr->b_state != arc_anon) 927185029Spjd panic("modifying non-anon buffer!"); 928185029Spjd if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) 929185029Spjd panic("modifying buffer while i/o in progress!"); 930185029Spjd arc_cksum_verify(buf); 931185029Spjd } 932168404Spjd 933168404Spjd mutex_enter(&buf->b_hdr->b_freeze_lock); 934168404Spjd if (buf->b_hdr->b_freeze_cksum != NULL) { 935168404Spjd kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 936168404Spjd buf->b_hdr->b_freeze_cksum = NULL; 937168404Spjd } 938168404Spjd mutex_exit(&buf->b_hdr->b_freeze_lock); 939168404Spjd} 940168404Spjd 941168404Spjdvoid 942168404Spjdarc_buf_freeze(arc_buf_t *buf) 943168404Spjd{ 944168404Spjd if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 945168404Spjd return; 946168404Spjd 947168404Spjd ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 948168404Spjd buf->b_hdr->b_state == arc_anon); 949185029Spjd arc_cksum_compute(buf, B_FALSE); 950168404Spjd} 951168404Spjd 952168404Spjdstatic void 953168404Spjdadd_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 954168404Spjd{ 955168404Spjd ASSERT(MUTEX_HELD(hash_lock)); 956168404Spjd 957168404Spjd if ((refcount_add(&ab->b_refcnt, tag) == 1) && 958168404Spjd (ab->b_state != arc_anon)) { 959168404Spjd uint64_t delta = ab->b_size * ab->b_datacnt; 960185029Spjd list_t *list = &ab->b_state->arcs_list[ab->b_type]; 961185029Spjd uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type]; 962168404Spjd 963168404Spjd ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx)); 964168404Spjd mutex_enter(&ab->b_state->arcs_mtx); 965168404Spjd ASSERT(list_link_active(&ab->b_arc_node)); 966185029Spjd list_remove(list, ab); 967168404Spjd if (GHOST_STATE(ab->b_state)) { 968168404Spjd ASSERT3U(ab->b_datacnt, ==, 0); 969168404Spjd ASSERT3P(ab->b_buf, ==, NULL); 970168404Spjd delta = ab->b_size; 971168404Spjd } 972168404Spjd ASSERT(delta > 0); 973185029Spjd ASSERT3U(*size, >=, delta); 974185029Spjd atomic_add_64(size, -delta); 975168404Spjd mutex_exit(&ab->b_state->arcs_mtx); 976185029Spjd /* remove the prefetch flag if we get a reference */ 977168404Spjd if (ab->b_flags & ARC_PREFETCH) 978168404Spjd ab->b_flags &= ~ARC_PREFETCH; 979168404Spjd } 980168404Spjd} 981168404Spjd 982168404Spjdstatic int 983168404Spjdremove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 984168404Spjd{ 985168404Spjd int cnt; 986168404Spjd arc_state_t *state = ab->b_state; 987168404Spjd 988168404Spjd ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 989168404Spjd ASSERT(!GHOST_STATE(state)); 990168404Spjd 991168404Spjd if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 992168404Spjd (state != arc_anon)) { 993185029Spjd uint64_t *size = &state->arcs_lsize[ab->b_type]; 994185029Spjd 995168404Spjd ASSERT(!MUTEX_HELD(&state->arcs_mtx)); 996168404Spjd mutex_enter(&state->arcs_mtx); 997168404Spjd ASSERT(!list_link_active(&ab->b_arc_node)); 998185029Spjd list_insert_head(&state->arcs_list[ab->b_type], ab); 999168404Spjd ASSERT(ab->b_datacnt > 0); 1000185029Spjd atomic_add_64(size, ab->b_size * ab->b_datacnt); 1001168404Spjd mutex_exit(&state->arcs_mtx); 1002168404Spjd } 1003168404Spjd return (cnt); 1004168404Spjd} 1005168404Spjd 1006168404Spjd/* 1007168404Spjd * Move the supplied buffer to the indicated state. The mutex 1008168404Spjd * for the buffer must be held by the caller. 1009168404Spjd */ 1010168404Spjdstatic void 1011168404Spjdarc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 1012168404Spjd{ 1013168404Spjd arc_state_t *old_state = ab->b_state; 1014168404Spjd int64_t refcnt = refcount_count(&ab->b_refcnt); 1015168404Spjd uint64_t from_delta, to_delta; 1016168404Spjd 1017168404Spjd ASSERT(MUTEX_HELD(hash_lock)); 1018168404Spjd ASSERT(new_state != old_state); 1019168404Spjd ASSERT(refcnt == 0 || ab->b_datacnt > 0); 1020168404Spjd ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 1021168404Spjd 1022168404Spjd from_delta = to_delta = ab->b_datacnt * ab->b_size; 1023168404Spjd 1024168404Spjd /* 1025168404Spjd * If this buffer is evictable, transfer it from the 1026168404Spjd * old state list to the new state list. 1027168404Spjd */ 1028168404Spjd if (refcnt == 0) { 1029168404Spjd if (old_state != arc_anon) { 1030168404Spjd int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx); 1031185029Spjd uint64_t *size = &old_state->arcs_lsize[ab->b_type]; 1032168404Spjd 1033168404Spjd if (use_mutex) 1034168404Spjd mutex_enter(&old_state->arcs_mtx); 1035168404Spjd 1036168404Spjd ASSERT(list_link_active(&ab->b_arc_node)); 1037185029Spjd list_remove(&old_state->arcs_list[ab->b_type], ab); 1038168404Spjd 1039168404Spjd /* 1040168404Spjd * If prefetching out of the ghost cache, 1041168404Spjd * we will have a non-null datacnt. 1042168404Spjd */ 1043168404Spjd if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 1044168404Spjd /* ghost elements have a ghost size */ 1045168404Spjd ASSERT(ab->b_buf == NULL); 1046168404Spjd from_delta = ab->b_size; 1047168404Spjd } 1048185029Spjd ASSERT3U(*size, >=, from_delta); 1049185029Spjd atomic_add_64(size, -from_delta); 1050168404Spjd 1051168404Spjd if (use_mutex) 1052168404Spjd mutex_exit(&old_state->arcs_mtx); 1053168404Spjd } 1054168404Spjd if (new_state != arc_anon) { 1055168404Spjd int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx); 1056185029Spjd uint64_t *size = &new_state->arcs_lsize[ab->b_type]; 1057168404Spjd 1058168404Spjd if (use_mutex) 1059168404Spjd mutex_enter(&new_state->arcs_mtx); 1060168404Spjd 1061185029Spjd list_insert_head(&new_state->arcs_list[ab->b_type], ab); 1062168404Spjd 1063168404Spjd /* ghost elements have a ghost size */ 1064168404Spjd if (GHOST_STATE(new_state)) { 1065168404Spjd ASSERT(ab->b_datacnt == 0); 1066168404Spjd ASSERT(ab->b_buf == NULL); 1067168404Spjd to_delta = ab->b_size; 1068168404Spjd } 1069185029Spjd atomic_add_64(size, to_delta); 1070168404Spjd 1071168404Spjd if (use_mutex) 1072168404Spjd mutex_exit(&new_state->arcs_mtx); 1073168404Spjd } 1074168404Spjd } 1075168404Spjd 1076168404Spjd ASSERT(!BUF_EMPTY(ab)); 1077185029Spjd if (new_state == arc_anon) { 1078168404Spjd buf_hash_remove(ab); 1079168404Spjd } 1080168404Spjd 1081168404Spjd /* adjust state sizes */ 1082168404Spjd if (to_delta) 1083168404Spjd atomic_add_64(&new_state->arcs_size, to_delta); 1084168404Spjd if (from_delta) { 1085168404Spjd ASSERT3U(old_state->arcs_size, >=, from_delta); 1086168404Spjd atomic_add_64(&old_state->arcs_size, -from_delta); 1087168404Spjd } 1088168404Spjd ab->b_state = new_state; 1089185029Spjd 1090185029Spjd /* adjust l2arc hdr stats */ 1091185029Spjd if (new_state == arc_l2c_only) 1092185029Spjd l2arc_hdr_stat_add(); 1093185029Spjd else if (old_state == arc_l2c_only) 1094185029Spjd l2arc_hdr_stat_remove(); 1095168404Spjd} 1096168404Spjd 1097185029Spjdvoid 1098185029Spjdarc_space_consume(uint64_t space) 1099185029Spjd{ 1100185029Spjd atomic_add_64(&arc_meta_used, space); 1101185029Spjd atomic_add_64(&arc_size, space); 1102185029Spjd} 1103185029Spjd 1104185029Spjdvoid 1105185029Spjdarc_space_return(uint64_t space) 1106185029Spjd{ 1107185029Spjd ASSERT(arc_meta_used >= space); 1108185029Spjd if (arc_meta_max < arc_meta_used) 1109185029Spjd arc_meta_max = arc_meta_used; 1110185029Spjd atomic_add_64(&arc_meta_used, -space); 1111185029Spjd ASSERT(arc_size >= space); 1112185029Spjd atomic_add_64(&arc_size, -space); 1113185029Spjd} 1114185029Spjd 1115185029Spjdvoid * 1116185029Spjdarc_data_buf_alloc(uint64_t size) 1117185029Spjd{ 1118185029Spjd if (arc_evict_needed(ARC_BUFC_DATA)) 1119185029Spjd cv_signal(&arc_reclaim_thr_cv); 1120185029Spjd atomic_add_64(&arc_size, size); 1121185029Spjd return (zio_data_buf_alloc(size)); 1122185029Spjd} 1123185029Spjd 1124185029Spjdvoid 1125185029Spjdarc_data_buf_free(void *buf, uint64_t size) 1126185029Spjd{ 1127185029Spjd zio_data_buf_free(buf, size); 1128185029Spjd ASSERT(arc_size >= size); 1129185029Spjd atomic_add_64(&arc_size, -size); 1130185029Spjd} 1131185029Spjd 1132168404Spjdarc_buf_t * 1133168404Spjdarc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) 1134168404Spjd{ 1135168404Spjd arc_buf_hdr_t *hdr; 1136168404Spjd arc_buf_t *buf; 1137168404Spjd 1138168404Spjd ASSERT3U(size, >, 0); 1139185029Spjd hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 1140168404Spjd ASSERT(BUF_EMPTY(hdr)); 1141168404Spjd hdr->b_size = size; 1142168404Spjd hdr->b_type = type; 1143168404Spjd hdr->b_spa = spa; 1144168404Spjd hdr->b_state = arc_anon; 1145168404Spjd hdr->b_arc_access = 0; 1146185029Spjd buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1147168404Spjd buf->b_hdr = hdr; 1148168404Spjd buf->b_data = NULL; 1149168404Spjd buf->b_efunc = NULL; 1150168404Spjd buf->b_private = NULL; 1151168404Spjd buf->b_next = NULL; 1152168404Spjd hdr->b_buf = buf; 1153168404Spjd arc_get_data_buf(buf); 1154168404Spjd hdr->b_datacnt = 1; 1155168404Spjd hdr->b_flags = 0; 1156168404Spjd ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1157168404Spjd (void) refcount_add(&hdr->b_refcnt, tag); 1158168404Spjd 1159168404Spjd return (buf); 1160168404Spjd} 1161168404Spjd 1162168404Spjdstatic arc_buf_t * 1163168404Spjdarc_buf_clone(arc_buf_t *from) 1164168404Spjd{ 1165168404Spjd arc_buf_t *buf; 1166168404Spjd arc_buf_hdr_t *hdr = from->b_hdr; 1167168404Spjd uint64_t size = hdr->b_size; 1168168404Spjd 1169185029Spjd buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1170168404Spjd buf->b_hdr = hdr; 1171168404Spjd buf->b_data = NULL; 1172168404Spjd buf->b_efunc = NULL; 1173168404Spjd buf->b_private = NULL; 1174168404Spjd buf->b_next = hdr->b_buf; 1175168404Spjd hdr->b_buf = buf; 1176168404Spjd arc_get_data_buf(buf); 1177168404Spjd bcopy(from->b_data, buf->b_data, size); 1178168404Spjd hdr->b_datacnt += 1; 1179168404Spjd return (buf); 1180168404Spjd} 1181168404Spjd 1182168404Spjdvoid 1183168404Spjdarc_buf_add_ref(arc_buf_t *buf, void* tag) 1184168404Spjd{ 1185168404Spjd arc_buf_hdr_t *hdr; 1186168404Spjd kmutex_t *hash_lock; 1187168404Spjd 1188168404Spjd /* 1189185029Spjd * Check to see if this buffer is evicted. Callers 1190185029Spjd * must verify b_data != NULL to know if the add_ref 1191185029Spjd * was successful. 1192168404Spjd */ 1193185029Spjd rw_enter(&buf->b_lock, RW_READER); 1194185029Spjd if (buf->b_data == NULL) { 1195185029Spjd rw_exit(&buf->b_lock); 1196168404Spjd return; 1197168404Spjd } 1198185029Spjd hdr = buf->b_hdr; 1199185029Spjd ASSERT(hdr != NULL); 1200168404Spjd hash_lock = HDR_LOCK(hdr); 1201168404Spjd mutex_enter(hash_lock); 1202185029Spjd rw_exit(&buf->b_lock); 1203168404Spjd 1204168404Spjd ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 1205168404Spjd add_reference(hdr, hash_lock, tag); 1206168404Spjd arc_access(hdr, hash_lock); 1207168404Spjd mutex_exit(hash_lock); 1208168404Spjd ARCSTAT_BUMP(arcstat_hits); 1209168404Spjd ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 1210168404Spjd demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 1211168404Spjd data, metadata, hits); 1212168404Spjd} 1213168404Spjd 1214185029Spjd/* 1215185029Spjd * Free the arc data buffer. If it is an l2arc write in progress, 1216185029Spjd * the buffer is placed on l2arc_free_on_write to be freed later. 1217185029Spjd */ 1218168404Spjdstatic void 1219185029Spjdarc_buf_data_free(arc_buf_hdr_t *hdr, void (*free_func)(void *, size_t), 1220185029Spjd void *data, size_t size) 1221185029Spjd{ 1222185029Spjd if (HDR_L2_WRITING(hdr)) { 1223185029Spjd l2arc_data_free_t *df; 1224185029Spjd df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP); 1225185029Spjd df->l2df_data = data; 1226185029Spjd df->l2df_size = size; 1227185029Spjd df->l2df_func = free_func; 1228185029Spjd mutex_enter(&l2arc_free_on_write_mtx); 1229185029Spjd list_insert_head(l2arc_free_on_write, df); 1230185029Spjd mutex_exit(&l2arc_free_on_write_mtx); 1231185029Spjd ARCSTAT_BUMP(arcstat_l2_free_on_write); 1232185029Spjd } else { 1233185029Spjd free_func(data, size); 1234185029Spjd } 1235185029Spjd} 1236185029Spjd 1237185029Spjdstatic void 1238168404Spjdarc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 1239168404Spjd{ 1240168404Spjd arc_buf_t **bufp; 1241168404Spjd 1242168404Spjd /* free up data associated with the buf */ 1243168404Spjd if (buf->b_data) { 1244168404Spjd arc_state_t *state = buf->b_hdr->b_state; 1245168404Spjd uint64_t size = buf->b_hdr->b_size; 1246168404Spjd arc_buf_contents_t type = buf->b_hdr->b_type; 1247168404Spjd 1248168404Spjd arc_cksum_verify(buf); 1249168404Spjd if (!recycle) { 1250168404Spjd if (type == ARC_BUFC_METADATA) { 1251185029Spjd arc_buf_data_free(buf->b_hdr, zio_buf_free, 1252185029Spjd buf->b_data, size); 1253185029Spjd arc_space_return(size); 1254168404Spjd } else { 1255168404Spjd ASSERT(type == ARC_BUFC_DATA); 1256185029Spjd arc_buf_data_free(buf->b_hdr, 1257185029Spjd zio_data_buf_free, buf->b_data, size); 1258185029Spjd atomic_add_64(&arc_size, -size); 1259168404Spjd } 1260168404Spjd } 1261168404Spjd if (list_link_active(&buf->b_hdr->b_arc_node)) { 1262185029Spjd uint64_t *cnt = &state->arcs_lsize[type]; 1263185029Spjd 1264168404Spjd ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 1265168404Spjd ASSERT(state != arc_anon); 1266185029Spjd 1267185029Spjd ASSERT3U(*cnt, >=, size); 1268185029Spjd atomic_add_64(cnt, -size); 1269168404Spjd } 1270168404Spjd ASSERT3U(state->arcs_size, >=, size); 1271168404Spjd atomic_add_64(&state->arcs_size, -size); 1272168404Spjd buf->b_data = NULL; 1273168404Spjd ASSERT(buf->b_hdr->b_datacnt > 0); 1274168404Spjd buf->b_hdr->b_datacnt -= 1; 1275168404Spjd } 1276168404Spjd 1277168404Spjd /* only remove the buf if requested */ 1278168404Spjd if (!all) 1279168404Spjd return; 1280168404Spjd 1281168404Spjd /* remove the buf from the hdr list */ 1282168404Spjd for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 1283168404Spjd continue; 1284168404Spjd *bufp = buf->b_next; 1285168404Spjd 1286168404Spjd ASSERT(buf->b_efunc == NULL); 1287168404Spjd 1288168404Spjd /* clean up the buf */ 1289168404Spjd buf->b_hdr = NULL; 1290168404Spjd kmem_cache_free(buf_cache, buf); 1291168404Spjd} 1292168404Spjd 1293168404Spjdstatic void 1294168404Spjdarc_hdr_destroy(arc_buf_hdr_t *hdr) 1295168404Spjd{ 1296168404Spjd ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1297168404Spjd ASSERT3P(hdr->b_state, ==, arc_anon); 1298168404Spjd ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1299185029Spjd ASSERT(!(hdr->b_flags & ARC_STORED)); 1300168404Spjd 1301185029Spjd if (hdr->b_l2hdr != NULL) { 1302185029Spjd if (!MUTEX_HELD(&l2arc_buflist_mtx)) { 1303185029Spjd /* 1304185029Spjd * To prevent arc_free() and l2arc_evict() from 1305185029Spjd * attempting to free the same buffer at the same time, 1306185029Spjd * a FREE_IN_PROGRESS flag is given to arc_free() to 1307185029Spjd * give it priority. l2arc_evict() can't destroy this 1308185029Spjd * header while we are waiting on l2arc_buflist_mtx. 1309185029Spjd * 1310185029Spjd * The hdr may be removed from l2ad_buflist before we 1311185029Spjd * grab l2arc_buflist_mtx, so b_l2hdr is rechecked. 1312185029Spjd */ 1313185029Spjd mutex_enter(&l2arc_buflist_mtx); 1314185029Spjd if (hdr->b_l2hdr != NULL) { 1315185029Spjd list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, 1316185029Spjd hdr); 1317185029Spjd } 1318185029Spjd mutex_exit(&l2arc_buflist_mtx); 1319185029Spjd } else { 1320185029Spjd list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, hdr); 1321185029Spjd } 1322185029Spjd ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); 1323185029Spjd kmem_free(hdr->b_l2hdr, sizeof (l2arc_buf_hdr_t)); 1324185029Spjd if (hdr->b_state == arc_l2c_only) 1325185029Spjd l2arc_hdr_stat_remove(); 1326185029Spjd hdr->b_l2hdr = NULL; 1327185029Spjd } 1328185029Spjd 1329168404Spjd if (!BUF_EMPTY(hdr)) { 1330168404Spjd ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1331168404Spjd bzero(&hdr->b_dva, sizeof (dva_t)); 1332168404Spjd hdr->b_birth = 0; 1333168404Spjd hdr->b_cksum0 = 0; 1334168404Spjd } 1335168404Spjd while (hdr->b_buf) { 1336168404Spjd arc_buf_t *buf = hdr->b_buf; 1337168404Spjd 1338168404Spjd if (buf->b_efunc) { 1339168404Spjd mutex_enter(&arc_eviction_mtx); 1340185029Spjd rw_enter(&buf->b_lock, RW_WRITER); 1341168404Spjd ASSERT(buf->b_hdr != NULL); 1342168404Spjd arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 1343168404Spjd hdr->b_buf = buf->b_next; 1344168404Spjd buf->b_hdr = &arc_eviction_hdr; 1345168404Spjd buf->b_next = arc_eviction_list; 1346168404Spjd arc_eviction_list = buf; 1347185029Spjd rw_exit(&buf->b_lock); 1348168404Spjd mutex_exit(&arc_eviction_mtx); 1349168404Spjd } else { 1350168404Spjd arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 1351168404Spjd } 1352168404Spjd } 1353168404Spjd if (hdr->b_freeze_cksum != NULL) { 1354168404Spjd kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1355168404Spjd hdr->b_freeze_cksum = NULL; 1356168404Spjd } 1357168404Spjd 1358168404Spjd ASSERT(!list_link_active(&hdr->b_arc_node)); 1359168404Spjd ASSERT3P(hdr->b_hash_next, ==, NULL); 1360168404Spjd ASSERT3P(hdr->b_acb, ==, NULL); 1361168404Spjd kmem_cache_free(hdr_cache, hdr); 1362168404Spjd} 1363168404Spjd 1364168404Spjdvoid 1365168404Spjdarc_buf_free(arc_buf_t *buf, void *tag) 1366168404Spjd{ 1367168404Spjd arc_buf_hdr_t *hdr = buf->b_hdr; 1368168404Spjd int hashed = hdr->b_state != arc_anon; 1369168404Spjd 1370168404Spjd ASSERT(buf->b_efunc == NULL); 1371168404Spjd ASSERT(buf->b_data != NULL); 1372168404Spjd 1373168404Spjd if (hashed) { 1374168404Spjd kmutex_t *hash_lock = HDR_LOCK(hdr); 1375168404Spjd 1376168404Spjd mutex_enter(hash_lock); 1377168404Spjd (void) remove_reference(hdr, hash_lock, tag); 1378168404Spjd if (hdr->b_datacnt > 1) 1379168404Spjd arc_buf_destroy(buf, FALSE, TRUE); 1380168404Spjd else 1381168404Spjd hdr->b_flags |= ARC_BUF_AVAILABLE; 1382168404Spjd mutex_exit(hash_lock); 1383168404Spjd } else if (HDR_IO_IN_PROGRESS(hdr)) { 1384168404Spjd int destroy_hdr; 1385168404Spjd /* 1386168404Spjd * We are in the middle of an async write. Don't destroy 1387168404Spjd * this buffer unless the write completes before we finish 1388168404Spjd * decrementing the reference count. 1389168404Spjd */ 1390168404Spjd mutex_enter(&arc_eviction_mtx); 1391168404Spjd (void) remove_reference(hdr, NULL, tag); 1392168404Spjd ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1393168404Spjd destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 1394168404Spjd mutex_exit(&arc_eviction_mtx); 1395168404Spjd if (destroy_hdr) 1396168404Spjd arc_hdr_destroy(hdr); 1397168404Spjd } else { 1398168404Spjd if (remove_reference(hdr, NULL, tag) > 0) { 1399168404Spjd ASSERT(HDR_IO_ERROR(hdr)); 1400168404Spjd arc_buf_destroy(buf, FALSE, TRUE); 1401168404Spjd } else { 1402168404Spjd arc_hdr_destroy(hdr); 1403168404Spjd } 1404168404Spjd } 1405168404Spjd} 1406168404Spjd 1407168404Spjdint 1408168404Spjdarc_buf_remove_ref(arc_buf_t *buf, void* tag) 1409168404Spjd{ 1410168404Spjd arc_buf_hdr_t *hdr = buf->b_hdr; 1411168404Spjd kmutex_t *hash_lock = HDR_LOCK(hdr); 1412168404Spjd int no_callback = (buf->b_efunc == NULL); 1413168404Spjd 1414168404Spjd if (hdr->b_state == arc_anon) { 1415168404Spjd arc_buf_free(buf, tag); 1416168404Spjd return (no_callback); 1417168404Spjd } 1418168404Spjd 1419168404Spjd mutex_enter(hash_lock); 1420168404Spjd ASSERT(hdr->b_state != arc_anon); 1421168404Spjd ASSERT(buf->b_data != NULL); 1422168404Spjd 1423168404Spjd (void) remove_reference(hdr, hash_lock, tag); 1424168404Spjd if (hdr->b_datacnt > 1) { 1425168404Spjd if (no_callback) 1426168404Spjd arc_buf_destroy(buf, FALSE, TRUE); 1427168404Spjd } else if (no_callback) { 1428168404Spjd ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 1429168404Spjd hdr->b_flags |= ARC_BUF_AVAILABLE; 1430168404Spjd } 1431168404Spjd ASSERT(no_callback || hdr->b_datacnt > 1 || 1432168404Spjd refcount_is_zero(&hdr->b_refcnt)); 1433168404Spjd mutex_exit(hash_lock); 1434168404Spjd return (no_callback); 1435168404Spjd} 1436168404Spjd 1437168404Spjdint 1438168404Spjdarc_buf_size(arc_buf_t *buf) 1439168404Spjd{ 1440168404Spjd return (buf->b_hdr->b_size); 1441168404Spjd} 1442168404Spjd 1443168404Spjd/* 1444168404Spjd * Evict buffers from list until we've removed the specified number of 1445168404Spjd * bytes. Move the removed buffers to the appropriate evict state. 1446168404Spjd * If the recycle flag is set, then attempt to "recycle" a buffer: 1447168404Spjd * - look for a buffer to evict that is `bytes' long. 1448168404Spjd * - return the data block from this buffer rather than freeing it. 1449168404Spjd * This flag is used by callers that are trying to make space for a 1450168404Spjd * new buffer in a full arc cache. 1451185029Spjd * 1452185029Spjd * This function makes a "best effort". It skips over any buffers 1453185029Spjd * it can't get a hash_lock on, and so may not catch all candidates. 1454185029Spjd * It may also return without evicting as much space as requested. 1455168404Spjd */ 1456168404Spjdstatic void * 1457185029Spjdarc_evict(arc_state_t *state, spa_t *spa, int64_t bytes, boolean_t recycle, 1458168404Spjd arc_buf_contents_t type) 1459168404Spjd{ 1460168404Spjd arc_state_t *evicted_state; 1461168404Spjd uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 1462168404Spjd arc_buf_hdr_t *ab, *ab_prev = NULL; 1463185029Spjd list_t *list = &state->arcs_list[type]; 1464168404Spjd kmutex_t *hash_lock; 1465168404Spjd boolean_t have_lock; 1466168404Spjd void *stolen = NULL; 1467168404Spjd 1468168404Spjd ASSERT(state == arc_mru || state == arc_mfu); 1469168404Spjd 1470168404Spjd evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 1471168404Spjd 1472168404Spjd mutex_enter(&state->arcs_mtx); 1473168404Spjd mutex_enter(&evicted_state->arcs_mtx); 1474168404Spjd 1475185029Spjd for (ab = list_tail(list); ab; ab = ab_prev) { 1476185029Spjd ab_prev = list_prev(list, ab); 1477168404Spjd /* prefetch buffers have a minimum lifespan */ 1478168404Spjd if (HDR_IO_IN_PROGRESS(ab) || 1479185029Spjd (spa && ab->b_spa != spa) || 1480168404Spjd (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 1481174049Sjb LBOLT - ab->b_arc_access < arc_min_prefetch_lifespan)) { 1482168404Spjd skipped++; 1483168404Spjd continue; 1484168404Spjd } 1485168404Spjd /* "lookahead" for better eviction candidate */ 1486168404Spjd if (recycle && ab->b_size != bytes && 1487168404Spjd ab_prev && ab_prev->b_size == bytes) 1488168404Spjd continue; 1489168404Spjd hash_lock = HDR_LOCK(ab); 1490168404Spjd have_lock = MUTEX_HELD(hash_lock); 1491168404Spjd if (have_lock || mutex_tryenter(hash_lock)) { 1492168404Spjd ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0); 1493168404Spjd ASSERT(ab->b_datacnt > 0); 1494168404Spjd while (ab->b_buf) { 1495168404Spjd arc_buf_t *buf = ab->b_buf; 1496185029Spjd if (!rw_tryenter(&buf->b_lock, RW_WRITER)) { 1497185029Spjd missed += 1; 1498185029Spjd break; 1499185029Spjd } 1500168404Spjd if (buf->b_data) { 1501168404Spjd bytes_evicted += ab->b_size; 1502168404Spjd if (recycle && ab->b_type == type && 1503185029Spjd ab->b_size == bytes && 1504185029Spjd !HDR_L2_WRITING(ab)) { 1505168404Spjd stolen = buf->b_data; 1506168404Spjd recycle = FALSE; 1507168404Spjd } 1508168404Spjd } 1509168404Spjd if (buf->b_efunc) { 1510168404Spjd mutex_enter(&arc_eviction_mtx); 1511168404Spjd arc_buf_destroy(buf, 1512168404Spjd buf->b_data == stolen, FALSE); 1513168404Spjd ab->b_buf = buf->b_next; 1514168404Spjd buf->b_hdr = &arc_eviction_hdr; 1515168404Spjd buf->b_next = arc_eviction_list; 1516168404Spjd arc_eviction_list = buf; 1517168404Spjd mutex_exit(&arc_eviction_mtx); 1518185029Spjd rw_exit(&buf->b_lock); 1519168404Spjd } else { 1520185029Spjd rw_exit(&buf->b_lock); 1521168404Spjd arc_buf_destroy(buf, 1522168404Spjd buf->b_data == stolen, TRUE); 1523168404Spjd } 1524168404Spjd } 1525185029Spjd if (ab->b_datacnt == 0) { 1526185029Spjd arc_change_state(evicted_state, ab, hash_lock); 1527185029Spjd ASSERT(HDR_IN_HASH_TABLE(ab)); 1528185029Spjd ab->b_flags |= ARC_IN_HASH_TABLE; 1529185029Spjd ab->b_flags &= ~ARC_BUF_AVAILABLE; 1530185029Spjd DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 1531185029Spjd } 1532168404Spjd if (!have_lock) 1533168404Spjd mutex_exit(hash_lock); 1534168404Spjd if (bytes >= 0 && bytes_evicted >= bytes) 1535168404Spjd break; 1536168404Spjd } else { 1537168404Spjd missed += 1; 1538168404Spjd } 1539168404Spjd } 1540168404Spjd 1541168404Spjd mutex_exit(&evicted_state->arcs_mtx); 1542168404Spjd mutex_exit(&state->arcs_mtx); 1543168404Spjd 1544168404Spjd if (bytes_evicted < bytes) 1545168404Spjd dprintf("only evicted %lld bytes from %x", 1546168404Spjd (longlong_t)bytes_evicted, state); 1547168404Spjd 1548168404Spjd if (skipped) 1549168404Spjd ARCSTAT_INCR(arcstat_evict_skip, skipped); 1550168404Spjd 1551168404Spjd if (missed) 1552168404Spjd ARCSTAT_INCR(arcstat_mutex_miss, missed); 1553168404Spjd 1554185029Spjd /* 1555185029Spjd * We have just evicted some date into the ghost state, make 1556185029Spjd * sure we also adjust the ghost state size if necessary. 1557185029Spjd */ 1558185029Spjd if (arc_no_grow && 1559185029Spjd arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) { 1560185029Spjd int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size + 1561185029Spjd arc_mru_ghost->arcs_size - arc_c; 1562185029Spjd 1563185029Spjd if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) { 1564185029Spjd int64_t todelete = 1565185029Spjd MIN(arc_mru_ghost->arcs_lsize[type], mru_over); 1566185029Spjd arc_evict_ghost(arc_mru_ghost, NULL, todelete); 1567185029Spjd } else if (arc_mfu_ghost->arcs_lsize[type] > 0) { 1568185029Spjd int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type], 1569185029Spjd arc_mru_ghost->arcs_size + 1570185029Spjd arc_mfu_ghost->arcs_size - arc_c); 1571185029Spjd arc_evict_ghost(arc_mfu_ghost, NULL, todelete); 1572185029Spjd } 1573185029Spjd } 1574185029Spjd 1575168404Spjd return (stolen); 1576168404Spjd} 1577168404Spjd 1578168404Spjd/* 1579168404Spjd * Remove buffers from list until we've removed the specified number of 1580168404Spjd * bytes. Destroy the buffers that are removed. 1581168404Spjd */ 1582168404Spjdstatic void 1583185029Spjdarc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes) 1584168404Spjd{ 1585168404Spjd arc_buf_hdr_t *ab, *ab_prev; 1586185029Spjd list_t *list = &state->arcs_list[ARC_BUFC_DATA]; 1587168404Spjd kmutex_t *hash_lock; 1588168404Spjd uint64_t bytes_deleted = 0; 1589168404Spjd uint64_t bufs_skipped = 0; 1590168404Spjd 1591168404Spjd ASSERT(GHOST_STATE(state)); 1592168404Spjdtop: 1593168404Spjd mutex_enter(&state->arcs_mtx); 1594185029Spjd for (ab = list_tail(list); ab; ab = ab_prev) { 1595185029Spjd ab_prev = list_prev(list, ab); 1596185029Spjd if (spa && ab->b_spa != spa) 1597185029Spjd continue; 1598168404Spjd hash_lock = HDR_LOCK(ab); 1599168404Spjd if (mutex_tryenter(hash_lock)) { 1600168404Spjd ASSERT(!HDR_IO_IN_PROGRESS(ab)); 1601168404Spjd ASSERT(ab->b_buf == NULL); 1602168404Spjd ARCSTAT_BUMP(arcstat_deleted); 1603168404Spjd bytes_deleted += ab->b_size; 1604185029Spjd 1605185029Spjd if (ab->b_l2hdr != NULL) { 1606185029Spjd /* 1607185029Spjd * This buffer is cached on the 2nd Level ARC; 1608185029Spjd * don't destroy the header. 1609185029Spjd */ 1610185029Spjd arc_change_state(arc_l2c_only, ab, hash_lock); 1611185029Spjd mutex_exit(hash_lock); 1612185029Spjd } else { 1613185029Spjd arc_change_state(arc_anon, ab, hash_lock); 1614185029Spjd mutex_exit(hash_lock); 1615185029Spjd arc_hdr_destroy(ab); 1616185029Spjd } 1617185029Spjd 1618168404Spjd DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 1619168404Spjd if (bytes >= 0 && bytes_deleted >= bytes) 1620168404Spjd break; 1621168404Spjd } else { 1622168404Spjd if (bytes < 0) { 1623168404Spjd mutex_exit(&state->arcs_mtx); 1624168404Spjd mutex_enter(hash_lock); 1625168404Spjd mutex_exit(hash_lock); 1626168404Spjd goto top; 1627168404Spjd } 1628168404Spjd bufs_skipped += 1; 1629168404Spjd } 1630168404Spjd } 1631168404Spjd mutex_exit(&state->arcs_mtx); 1632168404Spjd 1633185029Spjd if (list == &state->arcs_list[ARC_BUFC_DATA] && 1634185029Spjd (bytes < 0 || bytes_deleted < bytes)) { 1635185029Spjd list = &state->arcs_list[ARC_BUFC_METADATA]; 1636185029Spjd goto top; 1637185029Spjd } 1638185029Spjd 1639168404Spjd if (bufs_skipped) { 1640168404Spjd ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); 1641168404Spjd ASSERT(bytes >= 0); 1642168404Spjd } 1643168404Spjd 1644168404Spjd if (bytes_deleted < bytes) 1645168404Spjd dprintf("only deleted %lld bytes from %p", 1646168404Spjd (longlong_t)bytes_deleted, state); 1647168404Spjd} 1648168404Spjd 1649168404Spjdstatic void 1650168404Spjdarc_adjust(void) 1651168404Spjd{ 1652168404Spjd int64_t top_sz, mru_over, arc_over, todelete; 1653168404Spjd 1654185029Spjd top_sz = arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used; 1655168404Spjd 1656185029Spjd if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) { 1657185029Spjd int64_t toevict = 1658185029Spjd MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], top_sz - arc_p); 1659185029Spjd (void) arc_evict(arc_mru, NULL, toevict, FALSE, ARC_BUFC_DATA); 1660168404Spjd top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1661168404Spjd } 1662168404Spjd 1663185029Spjd if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) { 1664185029Spjd int64_t toevict = 1665185029Spjd MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], top_sz - arc_p); 1666185029Spjd (void) arc_evict(arc_mru, NULL, toevict, FALSE, 1667185029Spjd ARC_BUFC_METADATA); 1668185029Spjd top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1669185029Spjd } 1670185029Spjd 1671168404Spjd mru_over = top_sz + arc_mru_ghost->arcs_size - arc_c; 1672168404Spjd 1673168404Spjd if (mru_over > 0) { 1674185029Spjd if (arc_mru_ghost->arcs_size > 0) { 1675185029Spjd todelete = MIN(arc_mru_ghost->arcs_size, mru_over); 1676185029Spjd arc_evict_ghost(arc_mru_ghost, NULL, todelete); 1677168404Spjd } 1678168404Spjd } 1679168404Spjd 1680168404Spjd if ((arc_over = arc_size - arc_c) > 0) { 1681168404Spjd int64_t tbl_over; 1682168404Spjd 1683185029Spjd if (arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) { 1684185029Spjd int64_t toevict = 1685185029Spjd MIN(arc_mfu->arcs_lsize[ARC_BUFC_DATA], arc_over); 1686185029Spjd (void) arc_evict(arc_mfu, NULL, toevict, FALSE, 1687185029Spjd ARC_BUFC_DATA); 1688185029Spjd arc_over = arc_size - arc_c; 1689168404Spjd } 1690168404Spjd 1691185029Spjd if (arc_over > 0 && 1692185029Spjd arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) { 1693185029Spjd int64_t toevict = 1694185029Spjd MIN(arc_mfu->arcs_lsize[ARC_BUFC_METADATA], 1695185029Spjd arc_over); 1696185029Spjd (void) arc_evict(arc_mfu, NULL, toevict, FALSE, 1697185029Spjd ARC_BUFC_METADATA); 1698185029Spjd } 1699168404Spjd 1700185029Spjd tbl_over = arc_size + arc_mru_ghost->arcs_size + 1701185029Spjd arc_mfu_ghost->arcs_size - arc_c * 2; 1702185029Spjd 1703185029Spjd if (tbl_over > 0 && arc_mfu_ghost->arcs_size > 0) { 1704185029Spjd todelete = MIN(arc_mfu_ghost->arcs_size, tbl_over); 1705185029Spjd arc_evict_ghost(arc_mfu_ghost, NULL, todelete); 1706168404Spjd } 1707168404Spjd } 1708168404Spjd} 1709168404Spjd 1710168404Spjdstatic void 1711168404Spjdarc_do_user_evicts(void) 1712168404Spjd{ 1713168404Spjd mutex_enter(&arc_eviction_mtx); 1714168404Spjd while (arc_eviction_list != NULL) { 1715168404Spjd arc_buf_t *buf = arc_eviction_list; 1716168404Spjd arc_eviction_list = buf->b_next; 1717185029Spjd rw_enter(&buf->b_lock, RW_WRITER); 1718168404Spjd buf->b_hdr = NULL; 1719185029Spjd rw_exit(&buf->b_lock); 1720168404Spjd mutex_exit(&arc_eviction_mtx); 1721168404Spjd 1722168404Spjd if (buf->b_efunc != NULL) 1723168404Spjd VERIFY(buf->b_efunc(buf) == 0); 1724168404Spjd 1725168404Spjd buf->b_efunc = NULL; 1726168404Spjd buf->b_private = NULL; 1727168404Spjd kmem_cache_free(buf_cache, buf); 1728168404Spjd mutex_enter(&arc_eviction_mtx); 1729168404Spjd } 1730168404Spjd mutex_exit(&arc_eviction_mtx); 1731168404Spjd} 1732168404Spjd 1733168404Spjd/* 1734185029Spjd * Flush all *evictable* data from the cache for the given spa. 1735168404Spjd * NOTE: this will not touch "active" (i.e. referenced) data. 1736168404Spjd */ 1737168404Spjdvoid 1738185029Spjdarc_flush(spa_t *spa) 1739168404Spjd{ 1740185029Spjd while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) { 1741185029Spjd (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_DATA); 1742185029Spjd if (spa) 1743185029Spjd break; 1744185029Spjd } 1745185029Spjd while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) { 1746185029Spjd (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_METADATA); 1747185029Spjd if (spa) 1748185029Spjd break; 1749185029Spjd } 1750185029Spjd while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) { 1751185029Spjd (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_DATA); 1752185029Spjd if (spa) 1753185029Spjd break; 1754185029Spjd } 1755185029Spjd while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) { 1756185029Spjd (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_METADATA); 1757185029Spjd if (spa) 1758185029Spjd break; 1759185029Spjd } 1760168404Spjd 1761185029Spjd arc_evict_ghost(arc_mru_ghost, spa, -1); 1762185029Spjd arc_evict_ghost(arc_mfu_ghost, spa, -1); 1763168404Spjd 1764168404Spjd mutex_enter(&arc_reclaim_thr_lock); 1765168404Spjd arc_do_user_evicts(); 1766168404Spjd mutex_exit(&arc_reclaim_thr_lock); 1767185029Spjd ASSERT(spa || arc_eviction_list == NULL); 1768168404Spjd} 1769168404Spjd 1770168404Spjdint arc_shrink_shift = 5; /* log2(fraction of arc to reclaim) */ 1771168404Spjd 1772168404Spjdvoid 1773168404Spjdarc_shrink(void) 1774168404Spjd{ 1775168404Spjd if (arc_c > arc_c_min) { 1776168404Spjd uint64_t to_free; 1777168404Spjd 1778168404Spjd#ifdef _KERNEL 1779168404Spjd to_free = arc_c >> arc_shrink_shift; 1780168404Spjd#else 1781168404Spjd to_free = arc_c >> arc_shrink_shift; 1782168404Spjd#endif 1783168404Spjd if (arc_c > arc_c_min + to_free) 1784168404Spjd atomic_add_64(&arc_c, -to_free); 1785168404Spjd else 1786168404Spjd arc_c = arc_c_min; 1787168404Spjd 1788168404Spjd atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 1789168404Spjd if (arc_c > arc_size) 1790168404Spjd arc_c = MAX(arc_size, arc_c_min); 1791168404Spjd if (arc_p > arc_c) 1792168404Spjd arc_p = (arc_c >> 1); 1793168404Spjd ASSERT(arc_c >= arc_c_min); 1794168404Spjd ASSERT((int64_t)arc_p >= 0); 1795168404Spjd } 1796168404Spjd 1797168404Spjd if (arc_size > arc_c) 1798168404Spjd arc_adjust(); 1799168404Spjd} 1800168404Spjd 1801185029Spjdstatic int needfree = 0; 1802168404Spjd 1803168404Spjdstatic int 1804168404Spjdarc_reclaim_needed(void) 1805168404Spjd{ 1806168404Spjd#if 0 1807168404Spjd uint64_t extra; 1808168404Spjd#endif 1809168404Spjd 1810168404Spjd#ifdef _KERNEL 1811168404Spjd 1812185029Spjd if (needfree) 1813168404Spjd return (1); 1814168404Spjd 1815168404Spjd#if 0 1816168404Spjd /* 1817185029Spjd * take 'desfree' extra pages, so we reclaim sooner, rather than later 1818185029Spjd */ 1819185029Spjd extra = desfree; 1820185029Spjd 1821185029Spjd /* 1822185029Spjd * check that we're out of range of the pageout scanner. It starts to 1823185029Spjd * schedule paging if freemem is less than lotsfree and needfree. 1824185029Spjd * lotsfree is the high-water mark for pageout, and needfree is the 1825185029Spjd * number of needed free pages. We add extra pages here to make sure 1826185029Spjd * the scanner doesn't start up while we're freeing memory. 1827185029Spjd */ 1828185029Spjd if (freemem < lotsfree + needfree + extra) 1829185029Spjd return (1); 1830185029Spjd 1831185029Spjd /* 1832168404Spjd * check to make sure that swapfs has enough space so that anon 1833185029Spjd * reservations can still succeed. anon_resvmem() checks that the 1834168404Spjd * availrmem is greater than swapfs_minfree, and the number of reserved 1835168404Spjd * swap pages. We also add a bit of extra here just to prevent 1836168404Spjd * circumstances from getting really dire. 1837168404Spjd */ 1838168404Spjd if (availrmem < swapfs_minfree + swapfs_reserve + extra) 1839168404Spjd return (1); 1840168404Spjd 1841168404Spjd#if defined(__i386) 1842168404Spjd /* 1843168404Spjd * If we're on an i386 platform, it's possible that we'll exhaust the 1844168404Spjd * kernel heap space before we ever run out of available physical 1845168404Spjd * memory. Most checks of the size of the heap_area compare against 1846168404Spjd * tune.t_minarmem, which is the minimum available real memory that we 1847168404Spjd * can have in the system. However, this is generally fixed at 25 pages 1848168404Spjd * which is so low that it's useless. In this comparison, we seek to 1849168404Spjd * calculate the total heap-size, and reclaim if more than 3/4ths of the 1850185029Spjd * heap is allocated. (Or, in the calculation, if less than 1/4th is 1851168404Spjd * free) 1852168404Spjd */ 1853168404Spjd if (btop(vmem_size(heap_arena, VMEM_FREE)) < 1854168404Spjd (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) 1855168404Spjd return (1); 1856168404Spjd#endif 1857168404Spjd#else 1858175633Spjd if (kmem_used() > (kmem_size() * 3) / 4) 1859168404Spjd return (1); 1860168404Spjd#endif 1861168404Spjd 1862168404Spjd#else 1863168404Spjd if (spa_get_random(100) == 0) 1864168404Spjd return (1); 1865168404Spjd#endif 1866168404Spjd return (0); 1867168404Spjd} 1868168404Spjd 1869168404Spjdstatic void 1870168404Spjdarc_kmem_reap_now(arc_reclaim_strategy_t strat) 1871168404Spjd{ 1872168404Spjd#ifdef ZIO_USE_UMA 1873168404Spjd size_t i; 1874168404Spjd kmem_cache_t *prev_cache = NULL; 1875168404Spjd kmem_cache_t *prev_data_cache = NULL; 1876168404Spjd extern kmem_cache_t *zio_buf_cache[]; 1877168404Spjd extern kmem_cache_t *zio_data_buf_cache[]; 1878168404Spjd#endif 1879168404Spjd 1880168404Spjd#ifdef _KERNEL 1881185029Spjd if (arc_meta_used >= arc_meta_limit) { 1882185029Spjd /* 1883185029Spjd * We are exceeding our meta-data cache limit. 1884185029Spjd * Purge some DNLC entries to release holds on meta-data. 1885185029Spjd */ 1886185029Spjd dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 1887185029Spjd } 1888168404Spjd#if defined(__i386) 1889168404Spjd /* 1890168404Spjd * Reclaim unused memory from all kmem caches. 1891168404Spjd */ 1892168404Spjd kmem_reap(); 1893168404Spjd#endif 1894168404Spjd#endif 1895168404Spjd 1896168404Spjd /* 1897185029Spjd * An aggressive reclamation will shrink the cache size as well as 1898168404Spjd * reap free buffers from the arc kmem caches. 1899168404Spjd */ 1900168404Spjd if (strat == ARC_RECLAIM_AGGR) 1901168404Spjd arc_shrink(); 1902168404Spjd 1903168404Spjd#ifdef ZIO_USE_UMA 1904168404Spjd for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 1905168404Spjd if (zio_buf_cache[i] != prev_cache) { 1906168404Spjd prev_cache = zio_buf_cache[i]; 1907168404Spjd kmem_cache_reap_now(zio_buf_cache[i]); 1908168404Spjd } 1909168404Spjd if (zio_data_buf_cache[i] != prev_data_cache) { 1910168404Spjd prev_data_cache = zio_data_buf_cache[i]; 1911168404Spjd kmem_cache_reap_now(zio_data_buf_cache[i]); 1912168404Spjd } 1913168404Spjd } 1914168404Spjd#endif 1915168404Spjd kmem_cache_reap_now(buf_cache); 1916168404Spjd kmem_cache_reap_now(hdr_cache); 1917168404Spjd} 1918168404Spjd 1919168404Spjdstatic void 1920168404Spjdarc_reclaim_thread(void *dummy __unused) 1921168404Spjd{ 1922168404Spjd clock_t growtime = 0; 1923168404Spjd arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 1924168404Spjd callb_cpr_t cpr; 1925168404Spjd 1926168404Spjd CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 1927168404Spjd 1928168404Spjd mutex_enter(&arc_reclaim_thr_lock); 1929168404Spjd while (arc_thread_exit == 0) { 1930168404Spjd if (arc_reclaim_needed()) { 1931168404Spjd 1932168404Spjd if (arc_no_grow) { 1933168404Spjd if (last_reclaim == ARC_RECLAIM_CONS) { 1934168404Spjd last_reclaim = ARC_RECLAIM_AGGR; 1935168404Spjd } else { 1936168404Spjd last_reclaim = ARC_RECLAIM_CONS; 1937168404Spjd } 1938168404Spjd } else { 1939168404Spjd arc_no_grow = TRUE; 1940168404Spjd last_reclaim = ARC_RECLAIM_AGGR; 1941168404Spjd membar_producer(); 1942168404Spjd } 1943168404Spjd 1944168404Spjd /* reset the growth delay for every reclaim */ 1945174049Sjb growtime = LBOLT + (arc_grow_retry * hz); 1946168404Spjd 1947185029Spjd if (needfree && last_reclaim == ARC_RECLAIM_CONS) { 1948168404Spjd /* 1949185029Spjd * If needfree is TRUE our vm_lowmem hook 1950168404Spjd * was called and in that case we must free some 1951168404Spjd * memory, so switch to aggressive mode. 1952168404Spjd */ 1953168404Spjd arc_no_grow = TRUE; 1954168404Spjd last_reclaim = ARC_RECLAIM_AGGR; 1955168404Spjd } 1956168404Spjd arc_kmem_reap_now(last_reclaim); 1957185029Spjd arc_warm = B_TRUE; 1958185029Spjd 1959185029Spjd } else if (arc_no_grow && LBOLT >= growtime) { 1960168404Spjd arc_no_grow = FALSE; 1961168404Spjd } 1962168404Spjd 1963185029Spjd if (needfree || 1964168404Spjd (2 * arc_c < arc_size + 1965168404Spjd arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size)) 1966168404Spjd arc_adjust(); 1967168404Spjd 1968168404Spjd if (arc_eviction_list != NULL) 1969168404Spjd arc_do_user_evicts(); 1970168404Spjd 1971168404Spjd if (arc_reclaim_needed()) { 1972185029Spjd needfree = 0; 1973168404Spjd#ifdef _KERNEL 1974185029Spjd wakeup(&needfree); 1975168404Spjd#endif 1976168404Spjd } 1977168404Spjd 1978168404Spjd /* block until needed, or one second, whichever is shorter */ 1979168404Spjd CALLB_CPR_SAFE_BEGIN(&cpr); 1980168404Spjd (void) cv_timedwait(&arc_reclaim_thr_cv, 1981168404Spjd &arc_reclaim_thr_lock, hz); 1982168404Spjd CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 1983168404Spjd } 1984168404Spjd 1985168404Spjd arc_thread_exit = 0; 1986168404Spjd cv_broadcast(&arc_reclaim_thr_cv); 1987168404Spjd CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 1988168404Spjd thread_exit(); 1989168404Spjd} 1990168404Spjd 1991168404Spjd/* 1992168404Spjd * Adapt arc info given the number of bytes we are trying to add and 1993168404Spjd * the state that we are comming from. This function is only called 1994168404Spjd * when we are adding new content to the cache. 1995168404Spjd */ 1996168404Spjdstatic void 1997168404Spjdarc_adapt(int bytes, arc_state_t *state) 1998168404Spjd{ 1999168404Spjd int mult; 2000168404Spjd 2001185029Spjd if (state == arc_l2c_only) 2002185029Spjd return; 2003185029Spjd 2004168404Spjd ASSERT(bytes > 0); 2005168404Spjd /* 2006168404Spjd * Adapt the target size of the MRU list: 2007168404Spjd * - if we just hit in the MRU ghost list, then increase 2008168404Spjd * the target size of the MRU list. 2009168404Spjd * - if we just hit in the MFU ghost list, then increase 2010168404Spjd * the target size of the MFU list by decreasing the 2011168404Spjd * target size of the MRU list. 2012168404Spjd */ 2013168404Spjd if (state == arc_mru_ghost) { 2014168404Spjd mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? 2015168404Spjd 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); 2016168404Spjd 2017168404Spjd arc_p = MIN(arc_c, arc_p + bytes * mult); 2018168404Spjd } else if (state == arc_mfu_ghost) { 2019168404Spjd mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? 2020168404Spjd 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); 2021168404Spjd 2022168404Spjd arc_p = MAX(0, (int64_t)arc_p - bytes * mult); 2023168404Spjd } 2024168404Spjd ASSERT((int64_t)arc_p >= 0); 2025168404Spjd 2026168404Spjd if (arc_reclaim_needed()) { 2027168404Spjd cv_signal(&arc_reclaim_thr_cv); 2028168404Spjd return; 2029168404Spjd } 2030168404Spjd 2031168404Spjd if (arc_no_grow) 2032168404Spjd return; 2033168404Spjd 2034168404Spjd if (arc_c >= arc_c_max) 2035168404Spjd return; 2036168404Spjd 2037168404Spjd /* 2038168404Spjd * If we're within (2 * maxblocksize) bytes of the target 2039168404Spjd * cache size, increment the target cache size 2040168404Spjd */ 2041168404Spjd if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 2042168404Spjd atomic_add_64(&arc_c, (int64_t)bytes); 2043168404Spjd if (arc_c > arc_c_max) 2044168404Spjd arc_c = arc_c_max; 2045168404Spjd else if (state == arc_anon) 2046168404Spjd atomic_add_64(&arc_p, (int64_t)bytes); 2047168404Spjd if (arc_p > arc_c) 2048168404Spjd arc_p = arc_c; 2049168404Spjd } 2050168404Spjd ASSERT((int64_t)arc_p >= 0); 2051168404Spjd} 2052168404Spjd 2053168404Spjd/* 2054168404Spjd * Check if the cache has reached its limits and eviction is required 2055168404Spjd * prior to insert. 2056168404Spjd */ 2057168404Spjdstatic int 2058185029Spjdarc_evict_needed(arc_buf_contents_t type) 2059168404Spjd{ 2060185029Spjd if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit) 2061185029Spjd return (1); 2062185029Spjd 2063185029Spjd#if 0 2064185029Spjd#ifdef _KERNEL 2065185029Spjd /* 2066185029Spjd * If zio data pages are being allocated out of a separate heap segment, 2067185029Spjd * then enforce that the size of available vmem for this area remains 2068185029Spjd * above about 1/32nd free. 2069185029Spjd */ 2070185029Spjd if (type == ARC_BUFC_DATA && zio_arena != NULL && 2071185029Spjd vmem_size(zio_arena, VMEM_FREE) < 2072185029Spjd (vmem_size(zio_arena, VMEM_ALLOC) >> 5)) 2073185029Spjd return (1); 2074185029Spjd#endif 2075185029Spjd#endif 2076185029Spjd 2077168404Spjd if (arc_reclaim_needed()) 2078168404Spjd return (1); 2079168404Spjd 2080168404Spjd return (arc_size > arc_c); 2081168404Spjd} 2082168404Spjd 2083168404Spjd/* 2084168404Spjd * The buffer, supplied as the first argument, needs a data block. 2085168404Spjd * So, if we are at cache max, determine which cache should be victimized. 2086168404Spjd * We have the following cases: 2087168404Spjd * 2088168404Spjd * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> 2089168404Spjd * In this situation if we're out of space, but the resident size of the MFU is 2090168404Spjd * under the limit, victimize the MFU cache to satisfy this insertion request. 2091168404Spjd * 2092168404Spjd * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> 2093168404Spjd * Here, we've used up all of the available space for the MRU, so we need to 2094168404Spjd * evict from our own cache instead. Evict from the set of resident MRU 2095168404Spjd * entries. 2096168404Spjd * 2097168404Spjd * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> 2098168404Spjd * c minus p represents the MFU space in the cache, since p is the size of the 2099168404Spjd * cache that is dedicated to the MRU. In this situation there's still space on 2100168404Spjd * the MFU side, so the MRU side needs to be victimized. 2101168404Spjd * 2102168404Spjd * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> 2103168404Spjd * MFU's resident set is consuming more space than it has been allotted. In 2104168404Spjd * this situation, we must victimize our own cache, the MFU, for this insertion. 2105168404Spjd */ 2106168404Spjdstatic void 2107168404Spjdarc_get_data_buf(arc_buf_t *buf) 2108168404Spjd{ 2109168404Spjd arc_state_t *state = buf->b_hdr->b_state; 2110168404Spjd uint64_t size = buf->b_hdr->b_size; 2111168404Spjd arc_buf_contents_t type = buf->b_hdr->b_type; 2112168404Spjd 2113168404Spjd arc_adapt(size, state); 2114168404Spjd 2115168404Spjd /* 2116168404Spjd * We have not yet reached cache maximum size, 2117168404Spjd * just allocate a new buffer. 2118168404Spjd */ 2119185029Spjd if (!arc_evict_needed(type)) { 2120168404Spjd if (type == ARC_BUFC_METADATA) { 2121168404Spjd buf->b_data = zio_buf_alloc(size); 2122185029Spjd arc_space_consume(size); 2123168404Spjd } else { 2124168404Spjd ASSERT(type == ARC_BUFC_DATA); 2125168404Spjd buf->b_data = zio_data_buf_alloc(size); 2126185029Spjd atomic_add_64(&arc_size, size); 2127168404Spjd } 2128168404Spjd goto out; 2129168404Spjd } 2130168404Spjd 2131168404Spjd /* 2132168404Spjd * If we are prefetching from the mfu ghost list, this buffer 2133168404Spjd * will end up on the mru list; so steal space from there. 2134168404Spjd */ 2135168404Spjd if (state == arc_mfu_ghost) 2136168404Spjd state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; 2137168404Spjd else if (state == arc_mru_ghost) 2138168404Spjd state = arc_mru; 2139168404Spjd 2140168404Spjd if (state == arc_mru || state == arc_anon) { 2141168404Spjd uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; 2142185029Spjd state = (arc_mfu->arcs_lsize[type] > 0 && 2143185029Spjd arc_p > mru_used) ? arc_mfu : arc_mru; 2144168404Spjd } else { 2145168404Spjd /* MFU cases */ 2146168404Spjd uint64_t mfu_space = arc_c - arc_p; 2147185029Spjd state = (arc_mru->arcs_lsize[type] > 0 && 2148185029Spjd mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; 2149168404Spjd } 2150185029Spjd if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) { 2151168404Spjd if (type == ARC_BUFC_METADATA) { 2152168404Spjd buf->b_data = zio_buf_alloc(size); 2153185029Spjd arc_space_consume(size); 2154168404Spjd } else { 2155168404Spjd ASSERT(type == ARC_BUFC_DATA); 2156168404Spjd buf->b_data = zio_data_buf_alloc(size); 2157185029Spjd atomic_add_64(&arc_size, size); 2158168404Spjd } 2159168404Spjd ARCSTAT_BUMP(arcstat_recycle_miss); 2160168404Spjd } 2161168404Spjd ASSERT(buf->b_data != NULL); 2162168404Spjdout: 2163168404Spjd /* 2164168404Spjd * Update the state size. Note that ghost states have a 2165168404Spjd * "ghost size" and so don't need to be updated. 2166168404Spjd */ 2167168404Spjd if (!GHOST_STATE(buf->b_hdr->b_state)) { 2168168404Spjd arc_buf_hdr_t *hdr = buf->b_hdr; 2169168404Spjd 2170168404Spjd atomic_add_64(&hdr->b_state->arcs_size, size); 2171168404Spjd if (list_link_active(&hdr->b_arc_node)) { 2172168404Spjd ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2173185029Spjd atomic_add_64(&hdr->b_state->arcs_lsize[type], size); 2174168404Spjd } 2175168404Spjd /* 2176168404Spjd * If we are growing the cache, and we are adding anonymous 2177168404Spjd * data, and we have outgrown arc_p, update arc_p 2178168404Spjd */ 2179168404Spjd if (arc_size < arc_c && hdr->b_state == arc_anon && 2180168404Spjd arc_anon->arcs_size + arc_mru->arcs_size > arc_p) 2181168404Spjd arc_p = MIN(arc_c, arc_p + size); 2182168404Spjd } 2183168404Spjd} 2184168404Spjd 2185168404Spjd/* 2186168404Spjd * This routine is called whenever a buffer is accessed. 2187168404Spjd * NOTE: the hash lock is dropped in this function. 2188168404Spjd */ 2189168404Spjdstatic void 2190168404Spjdarc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 2191168404Spjd{ 2192168404Spjd ASSERT(MUTEX_HELD(hash_lock)); 2193168404Spjd 2194168404Spjd if (buf->b_state == arc_anon) { 2195168404Spjd /* 2196168404Spjd * This buffer is not in the cache, and does not 2197168404Spjd * appear in our "ghost" list. Add the new buffer 2198168404Spjd * to the MRU state. 2199168404Spjd */ 2200168404Spjd 2201168404Spjd ASSERT(buf->b_arc_access == 0); 2202174049Sjb buf->b_arc_access = LBOLT; 2203168404Spjd DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2204168404Spjd arc_change_state(arc_mru, buf, hash_lock); 2205168404Spjd 2206168404Spjd } else if (buf->b_state == arc_mru) { 2207168404Spjd /* 2208168404Spjd * If this buffer is here because of a prefetch, then either: 2209168404Spjd * - clear the flag if this is a "referencing" read 2210168404Spjd * (any subsequent access will bump this into the MFU state). 2211168404Spjd * or 2212168404Spjd * - move the buffer to the head of the list if this is 2213168404Spjd * another prefetch (to make it less likely to be evicted). 2214168404Spjd */ 2215168404Spjd if ((buf->b_flags & ARC_PREFETCH) != 0) { 2216168404Spjd if (refcount_count(&buf->b_refcnt) == 0) { 2217168404Spjd ASSERT(list_link_active(&buf->b_arc_node)); 2218168404Spjd } else { 2219168404Spjd buf->b_flags &= ~ARC_PREFETCH; 2220168404Spjd ARCSTAT_BUMP(arcstat_mru_hits); 2221168404Spjd } 2222174049Sjb buf->b_arc_access = LBOLT; 2223168404Spjd return; 2224168404Spjd } 2225168404Spjd 2226168404Spjd /* 2227168404Spjd * This buffer has been "accessed" only once so far, 2228168404Spjd * but it is still in the cache. Move it to the MFU 2229168404Spjd * state. 2230168404Spjd */ 2231174049Sjb if (LBOLT > buf->b_arc_access + ARC_MINTIME) { 2232168404Spjd /* 2233168404Spjd * More than 125ms have passed since we 2234168404Spjd * instantiated this buffer. Move it to the 2235168404Spjd * most frequently used state. 2236168404Spjd */ 2237174049Sjb buf->b_arc_access = LBOLT; 2238168404Spjd DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2239168404Spjd arc_change_state(arc_mfu, buf, hash_lock); 2240168404Spjd } 2241168404Spjd ARCSTAT_BUMP(arcstat_mru_hits); 2242168404Spjd } else if (buf->b_state == arc_mru_ghost) { 2243168404Spjd arc_state_t *new_state; 2244168404Spjd /* 2245168404Spjd * This buffer has been "accessed" recently, but 2246168404Spjd * was evicted from the cache. Move it to the 2247168404Spjd * MFU state. 2248168404Spjd */ 2249168404Spjd 2250168404Spjd if (buf->b_flags & ARC_PREFETCH) { 2251168404Spjd new_state = arc_mru; 2252168404Spjd if (refcount_count(&buf->b_refcnt) > 0) 2253168404Spjd buf->b_flags &= ~ARC_PREFETCH; 2254168404Spjd DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2255168404Spjd } else { 2256168404Spjd new_state = arc_mfu; 2257168404Spjd DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2258168404Spjd } 2259168404Spjd 2260174049Sjb buf->b_arc_access = LBOLT; 2261168404Spjd arc_change_state(new_state, buf, hash_lock); 2262168404Spjd 2263168404Spjd ARCSTAT_BUMP(arcstat_mru_ghost_hits); 2264168404Spjd } else if (buf->b_state == arc_mfu) { 2265168404Spjd /* 2266168404Spjd * This buffer has been accessed more than once and is 2267168404Spjd * still in the cache. Keep it in the MFU state. 2268168404Spjd * 2269168404Spjd * NOTE: an add_reference() that occurred when we did 2270168404Spjd * the arc_read() will have kicked this off the list. 2271168404Spjd * If it was a prefetch, we will explicitly move it to 2272168404Spjd * the head of the list now. 2273168404Spjd */ 2274168404Spjd if ((buf->b_flags & ARC_PREFETCH) != 0) { 2275168404Spjd ASSERT(refcount_count(&buf->b_refcnt) == 0); 2276168404Spjd ASSERT(list_link_active(&buf->b_arc_node)); 2277168404Spjd } 2278168404Spjd ARCSTAT_BUMP(arcstat_mfu_hits); 2279174049Sjb buf->b_arc_access = LBOLT; 2280168404Spjd } else if (buf->b_state == arc_mfu_ghost) { 2281168404Spjd arc_state_t *new_state = arc_mfu; 2282168404Spjd /* 2283168404Spjd * This buffer has been accessed more than once but has 2284168404Spjd * been evicted from the cache. Move it back to the 2285168404Spjd * MFU state. 2286168404Spjd */ 2287168404Spjd 2288168404Spjd if (buf->b_flags & ARC_PREFETCH) { 2289168404Spjd /* 2290168404Spjd * This is a prefetch access... 2291168404Spjd * move this block back to the MRU state. 2292168404Spjd */ 2293168404Spjd ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0); 2294168404Spjd new_state = arc_mru; 2295168404Spjd } 2296168404Spjd 2297174049Sjb buf->b_arc_access = LBOLT; 2298168404Spjd DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2299168404Spjd arc_change_state(new_state, buf, hash_lock); 2300168404Spjd 2301168404Spjd ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 2302185029Spjd } else if (buf->b_state == arc_l2c_only) { 2303185029Spjd /* 2304185029Spjd * This buffer is on the 2nd Level ARC. 2305185029Spjd */ 2306185029Spjd 2307185029Spjd buf->b_arc_access = LBOLT; 2308185029Spjd DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2309185029Spjd arc_change_state(arc_mfu, buf, hash_lock); 2310168404Spjd } else { 2311168404Spjd ASSERT(!"invalid arc state"); 2312168404Spjd } 2313168404Spjd} 2314168404Spjd 2315168404Spjd/* a generic arc_done_func_t which you can use */ 2316168404Spjd/* ARGSUSED */ 2317168404Spjdvoid 2318168404Spjdarc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 2319168404Spjd{ 2320168404Spjd bcopy(buf->b_data, arg, buf->b_hdr->b_size); 2321168404Spjd VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2322168404Spjd} 2323168404Spjd 2324185029Spjd/* a generic arc_done_func_t */ 2325168404Spjdvoid 2326168404Spjdarc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 2327168404Spjd{ 2328168404Spjd arc_buf_t **bufp = arg; 2329168404Spjd if (zio && zio->io_error) { 2330168404Spjd VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2331168404Spjd *bufp = NULL; 2332168404Spjd } else { 2333168404Spjd *bufp = buf; 2334168404Spjd } 2335168404Spjd} 2336168404Spjd 2337168404Spjdstatic void 2338168404Spjdarc_read_done(zio_t *zio) 2339168404Spjd{ 2340168404Spjd arc_buf_hdr_t *hdr, *found; 2341168404Spjd arc_buf_t *buf; 2342168404Spjd arc_buf_t *abuf; /* buffer we're assigning to callback */ 2343168404Spjd kmutex_t *hash_lock; 2344168404Spjd arc_callback_t *callback_list, *acb; 2345168404Spjd int freeable = FALSE; 2346168404Spjd 2347168404Spjd buf = zio->io_private; 2348168404Spjd hdr = buf->b_hdr; 2349168404Spjd 2350168404Spjd /* 2351168404Spjd * The hdr was inserted into hash-table and removed from lists 2352168404Spjd * prior to starting I/O. We should find this header, since 2353168404Spjd * it's in the hash table, and it should be legit since it's 2354168404Spjd * not possible to evict it during the I/O. The only possible 2355168404Spjd * reason for it not to be found is if we were freed during the 2356168404Spjd * read. 2357168404Spjd */ 2358168404Spjd found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth, 2359168404Spjd &hash_lock); 2360168404Spjd 2361168404Spjd ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || 2362185029Spjd (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || 2363185029Spjd (found == hdr && HDR_L2_READING(hdr))); 2364168404Spjd 2365185029Spjd hdr->b_flags &= ~ARC_L2_EVICTED; 2366185029Spjd if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH)) 2367185029Spjd hdr->b_flags &= ~ARC_L2CACHE; 2368185029Spjd 2369168404Spjd /* byteswap if necessary */ 2370168404Spjd callback_list = hdr->b_acb; 2371168404Spjd ASSERT(callback_list != NULL); 2372185029Spjd if (BP_SHOULD_BYTESWAP(zio->io_bp)) { 2373185029Spjd arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ? 2374185029Spjd byteswap_uint64_array : 2375185029Spjd dmu_ot[BP_GET_TYPE(zio->io_bp)].ot_byteswap; 2376185029Spjd func(buf->b_data, hdr->b_size); 2377185029Spjd } 2378168404Spjd 2379185029Spjd arc_cksum_compute(buf, B_FALSE); 2380168404Spjd 2381168404Spjd /* create copies of the data buffer for the callers */ 2382168404Spjd abuf = buf; 2383168404Spjd for (acb = callback_list; acb; acb = acb->acb_next) { 2384168404Spjd if (acb->acb_done) { 2385168404Spjd if (abuf == NULL) 2386168404Spjd abuf = arc_buf_clone(buf); 2387168404Spjd acb->acb_buf = abuf; 2388168404Spjd abuf = NULL; 2389168404Spjd } 2390168404Spjd } 2391168404Spjd hdr->b_acb = NULL; 2392168404Spjd hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2393168404Spjd ASSERT(!HDR_BUF_AVAILABLE(hdr)); 2394168404Spjd if (abuf == buf) 2395168404Spjd hdr->b_flags |= ARC_BUF_AVAILABLE; 2396168404Spjd 2397168404Spjd ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 2398168404Spjd 2399168404Spjd if (zio->io_error != 0) { 2400168404Spjd hdr->b_flags |= ARC_IO_ERROR; 2401168404Spjd if (hdr->b_state != arc_anon) 2402168404Spjd arc_change_state(arc_anon, hdr, hash_lock); 2403168404Spjd if (HDR_IN_HASH_TABLE(hdr)) 2404168404Spjd buf_hash_remove(hdr); 2405168404Spjd freeable = refcount_is_zero(&hdr->b_refcnt); 2406168404Spjd } 2407168404Spjd 2408168404Spjd /* 2409168404Spjd * Broadcast before we drop the hash_lock to avoid the possibility 2410168404Spjd * that the hdr (and hence the cv) might be freed before we get to 2411168404Spjd * the cv_broadcast(). 2412168404Spjd */ 2413168404Spjd cv_broadcast(&hdr->b_cv); 2414168404Spjd 2415168404Spjd if (hash_lock) { 2416168404Spjd /* 2417168404Spjd * Only call arc_access on anonymous buffers. This is because 2418168404Spjd * if we've issued an I/O for an evicted buffer, we've already 2419168404Spjd * called arc_access (to prevent any simultaneous readers from 2420168404Spjd * getting confused). 2421168404Spjd */ 2422168404Spjd if (zio->io_error == 0 && hdr->b_state == arc_anon) 2423168404Spjd arc_access(hdr, hash_lock); 2424168404Spjd mutex_exit(hash_lock); 2425168404Spjd } else { 2426168404Spjd /* 2427168404Spjd * This block was freed while we waited for the read to 2428168404Spjd * complete. It has been removed from the hash table and 2429168404Spjd * moved to the anonymous state (so that it won't show up 2430168404Spjd * in the cache). 2431168404Spjd */ 2432168404Spjd ASSERT3P(hdr->b_state, ==, arc_anon); 2433168404Spjd freeable = refcount_is_zero(&hdr->b_refcnt); 2434168404Spjd } 2435168404Spjd 2436168404Spjd /* execute each callback and free its structure */ 2437168404Spjd while ((acb = callback_list) != NULL) { 2438168404Spjd if (acb->acb_done) 2439168404Spjd acb->acb_done(zio, acb->acb_buf, acb->acb_private); 2440168404Spjd 2441168404Spjd if (acb->acb_zio_dummy != NULL) { 2442168404Spjd acb->acb_zio_dummy->io_error = zio->io_error; 2443168404Spjd zio_nowait(acb->acb_zio_dummy); 2444168404Spjd } 2445168404Spjd 2446168404Spjd callback_list = acb->acb_next; 2447168404Spjd kmem_free(acb, sizeof (arc_callback_t)); 2448168404Spjd } 2449168404Spjd 2450168404Spjd if (freeable) 2451168404Spjd arc_hdr_destroy(hdr); 2452168404Spjd} 2453168404Spjd 2454168404Spjd/* 2455168404Spjd * "Read" the block block at the specified DVA (in bp) via the 2456168404Spjd * cache. If the block is found in the cache, invoke the provided 2457168404Spjd * callback immediately and return. Note that the `zio' parameter 2458168404Spjd * in the callback will be NULL in this case, since no IO was 2459168404Spjd * required. If the block is not in the cache pass the read request 2460168404Spjd * on to the spa with a substitute callback function, so that the 2461168404Spjd * requested block will be added to the cache. 2462168404Spjd * 2463168404Spjd * If a read request arrives for a block that has a read in-progress, 2464168404Spjd * either wait for the in-progress read to complete (and return the 2465168404Spjd * results); or, if this is a read with a "done" func, add a record 2466168404Spjd * to the read to invoke the "done" func when the read completes, 2467168404Spjd * and return; or just return. 2468168404Spjd * 2469168404Spjd * arc_read_done() will invoke all the requested "done" functions 2470168404Spjd * for readers of this block. 2471185029Spjd * 2472185029Spjd * Normal callers should use arc_read and pass the arc buffer and offset 2473185029Spjd * for the bp. But if you know you don't need locking, you can use 2474185029Spjd * arc_read_bp. 2475168404Spjd */ 2476168404Spjdint 2477185029Spjdarc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_buf_t *pbuf, 2478185029Spjd arc_done_func_t *done, void *private, int priority, int zio_flags, 2479185029Spjd uint32_t *arc_flags, const zbookmark_t *zb) 2480168404Spjd{ 2481185029Spjd int err; 2482185029Spjd arc_buf_hdr_t *hdr = pbuf->b_hdr; 2483185029Spjd 2484185029Spjd ASSERT(!refcount_is_zero(&pbuf->b_hdr->b_refcnt)); 2485185029Spjd ASSERT3U((char *)bp - (char *)pbuf->b_data, <, pbuf->b_hdr->b_size); 2486185029Spjd rw_enter(&pbuf->b_lock, RW_READER); 2487185029Spjd 2488185029Spjd err = arc_read_nolock(pio, spa, bp, done, private, priority, 2489185029Spjd zio_flags, arc_flags, zb); 2490185029Spjd 2491185029Spjd ASSERT3P(hdr, ==, pbuf->b_hdr); 2492185029Spjd rw_exit(&pbuf->b_lock); 2493185029Spjd return (err); 2494185029Spjd} 2495185029Spjd 2496185029Spjdint 2497185029Spjdarc_read_nolock(zio_t *pio, spa_t *spa, blkptr_t *bp, 2498185029Spjd arc_done_func_t *done, void *private, int priority, int zio_flags, 2499185029Spjd uint32_t *arc_flags, const zbookmark_t *zb) 2500185029Spjd{ 2501168404Spjd arc_buf_hdr_t *hdr; 2502168404Spjd arc_buf_t *buf; 2503168404Spjd kmutex_t *hash_lock; 2504185029Spjd zio_t *rzio; 2505168404Spjd 2506168404Spjdtop: 2507168404Spjd hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 2508168404Spjd if (hdr && hdr->b_datacnt > 0) { 2509168404Spjd 2510168404Spjd *arc_flags |= ARC_CACHED; 2511168404Spjd 2512168404Spjd if (HDR_IO_IN_PROGRESS(hdr)) { 2513168404Spjd 2514168404Spjd if (*arc_flags & ARC_WAIT) { 2515168404Spjd cv_wait(&hdr->b_cv, hash_lock); 2516168404Spjd mutex_exit(hash_lock); 2517168404Spjd goto top; 2518168404Spjd } 2519168404Spjd ASSERT(*arc_flags & ARC_NOWAIT); 2520168404Spjd 2521168404Spjd if (done) { 2522168404Spjd arc_callback_t *acb = NULL; 2523168404Spjd 2524168404Spjd acb = kmem_zalloc(sizeof (arc_callback_t), 2525168404Spjd KM_SLEEP); 2526168404Spjd acb->acb_done = done; 2527168404Spjd acb->acb_private = private; 2528168404Spjd if (pio != NULL) 2529168404Spjd acb->acb_zio_dummy = zio_null(pio, 2530185029Spjd spa, NULL, NULL, zio_flags); 2531168404Spjd 2532168404Spjd ASSERT(acb->acb_done != NULL); 2533168404Spjd acb->acb_next = hdr->b_acb; 2534168404Spjd hdr->b_acb = acb; 2535168404Spjd add_reference(hdr, hash_lock, private); 2536168404Spjd mutex_exit(hash_lock); 2537168404Spjd return (0); 2538168404Spjd } 2539168404Spjd mutex_exit(hash_lock); 2540168404Spjd return (0); 2541168404Spjd } 2542168404Spjd 2543168404Spjd ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2544168404Spjd 2545168404Spjd if (done) { 2546168404Spjd add_reference(hdr, hash_lock, private); 2547168404Spjd /* 2548168404Spjd * If this block is already in use, create a new 2549168404Spjd * copy of the data so that we will be guaranteed 2550168404Spjd * that arc_release() will always succeed. 2551168404Spjd */ 2552168404Spjd buf = hdr->b_buf; 2553168404Spjd ASSERT(buf); 2554168404Spjd ASSERT(buf->b_data); 2555168404Spjd if (HDR_BUF_AVAILABLE(hdr)) { 2556168404Spjd ASSERT(buf->b_efunc == NULL); 2557168404Spjd hdr->b_flags &= ~ARC_BUF_AVAILABLE; 2558168404Spjd } else { 2559168404Spjd buf = arc_buf_clone(buf); 2560168404Spjd } 2561168404Spjd } else if (*arc_flags & ARC_PREFETCH && 2562168404Spjd refcount_count(&hdr->b_refcnt) == 0) { 2563168404Spjd hdr->b_flags |= ARC_PREFETCH; 2564168404Spjd } 2565168404Spjd DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 2566168404Spjd arc_access(hdr, hash_lock); 2567185029Spjd if (*arc_flags & ARC_L2CACHE) 2568185029Spjd hdr->b_flags |= ARC_L2CACHE; 2569168404Spjd mutex_exit(hash_lock); 2570168404Spjd ARCSTAT_BUMP(arcstat_hits); 2571168404Spjd ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2572168404Spjd demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2573168404Spjd data, metadata, hits); 2574168404Spjd 2575168404Spjd if (done) 2576168404Spjd done(NULL, buf, private); 2577168404Spjd } else { 2578168404Spjd uint64_t size = BP_GET_LSIZE(bp); 2579168404Spjd arc_callback_t *acb; 2580185029Spjd vdev_t *vd = NULL; 2581185029Spjd daddr_t addr; 2582168404Spjd 2583168404Spjd if (hdr == NULL) { 2584168404Spjd /* this block is not in the cache */ 2585168404Spjd arc_buf_hdr_t *exists; 2586168404Spjd arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 2587168404Spjd buf = arc_buf_alloc(spa, size, private, type); 2588168404Spjd hdr = buf->b_hdr; 2589168404Spjd hdr->b_dva = *BP_IDENTITY(bp); 2590168404Spjd hdr->b_birth = bp->blk_birth; 2591168404Spjd hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 2592168404Spjd exists = buf_hash_insert(hdr, &hash_lock); 2593168404Spjd if (exists) { 2594168404Spjd /* somebody beat us to the hash insert */ 2595168404Spjd mutex_exit(hash_lock); 2596168404Spjd bzero(&hdr->b_dva, sizeof (dva_t)); 2597168404Spjd hdr->b_birth = 0; 2598168404Spjd hdr->b_cksum0 = 0; 2599168404Spjd (void) arc_buf_remove_ref(buf, private); 2600168404Spjd goto top; /* restart the IO request */ 2601168404Spjd } 2602168404Spjd /* if this is a prefetch, we don't have a reference */ 2603168404Spjd if (*arc_flags & ARC_PREFETCH) { 2604168404Spjd (void) remove_reference(hdr, hash_lock, 2605168404Spjd private); 2606168404Spjd hdr->b_flags |= ARC_PREFETCH; 2607168404Spjd } 2608185029Spjd if (*arc_flags & ARC_L2CACHE) 2609185029Spjd hdr->b_flags |= ARC_L2CACHE; 2610168404Spjd if (BP_GET_LEVEL(bp) > 0) 2611168404Spjd hdr->b_flags |= ARC_INDIRECT; 2612168404Spjd } else { 2613168404Spjd /* this block is in the ghost cache */ 2614168404Spjd ASSERT(GHOST_STATE(hdr->b_state)); 2615168404Spjd ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2616168404Spjd ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0); 2617168404Spjd ASSERT(hdr->b_buf == NULL); 2618168404Spjd 2619168404Spjd /* if this is a prefetch, we don't have a reference */ 2620168404Spjd if (*arc_flags & ARC_PREFETCH) 2621168404Spjd hdr->b_flags |= ARC_PREFETCH; 2622168404Spjd else 2623168404Spjd add_reference(hdr, hash_lock, private); 2624185029Spjd if (*arc_flags & ARC_L2CACHE) 2625185029Spjd hdr->b_flags |= ARC_L2CACHE; 2626185029Spjd buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 2627168404Spjd buf->b_hdr = hdr; 2628168404Spjd buf->b_data = NULL; 2629168404Spjd buf->b_efunc = NULL; 2630168404Spjd buf->b_private = NULL; 2631168404Spjd buf->b_next = NULL; 2632168404Spjd hdr->b_buf = buf; 2633168404Spjd arc_get_data_buf(buf); 2634168404Spjd ASSERT(hdr->b_datacnt == 0); 2635168404Spjd hdr->b_datacnt = 1; 2636168404Spjd 2637168404Spjd } 2638168404Spjd 2639168404Spjd acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 2640168404Spjd acb->acb_done = done; 2641168404Spjd acb->acb_private = private; 2642168404Spjd 2643168404Spjd ASSERT(hdr->b_acb == NULL); 2644168404Spjd hdr->b_acb = acb; 2645168404Spjd hdr->b_flags |= ARC_IO_IN_PROGRESS; 2646168404Spjd 2647168404Spjd /* 2648168404Spjd * If the buffer has been evicted, migrate it to a present state 2649168404Spjd * before issuing the I/O. Once we drop the hash-table lock, 2650168404Spjd * the header will be marked as I/O in progress and have an 2651168404Spjd * attached buffer. At this point, anybody who finds this 2652168404Spjd * buffer ought to notice that it's legit but has a pending I/O. 2653168404Spjd */ 2654168404Spjd 2655168404Spjd if (GHOST_STATE(hdr->b_state)) 2656168404Spjd arc_access(hdr, hash_lock); 2657185029Spjd 2658185029Spjd if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL && 2659185029Spjd (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) { 2660185029Spjd addr = hdr->b_l2hdr->b_daddr; 2661185029Spjd /* 2662185029Spjd * Lock out device removal. 2663185029Spjd */ 2664185029Spjd if (vdev_is_dead(vd) || 2665185029Spjd !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER)) 2666185029Spjd vd = NULL; 2667185029Spjd } 2668185029Spjd 2669168404Spjd mutex_exit(hash_lock); 2670168404Spjd 2671168404Spjd ASSERT3U(hdr->b_size, ==, size); 2672168404Spjd DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size, 2673168404Spjd zbookmark_t *, zb); 2674168404Spjd ARCSTAT_BUMP(arcstat_misses); 2675168404Spjd ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2676168404Spjd demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2677168404Spjd data, metadata, misses); 2678168404Spjd 2679185029Spjd if (vd != NULL) { 2680185029Spjd /* 2681185029Spjd * Read from the L2ARC if the following are true: 2682185029Spjd * 1. The L2ARC vdev was previously cached. 2683185029Spjd * 2. This buffer still has L2ARC metadata. 2684185029Spjd * 3. This buffer isn't currently writing to the L2ARC. 2685185029Spjd * 4. The L2ARC entry wasn't evicted, which may 2686185029Spjd * also have invalidated the vdev. 2687185029Spjd */ 2688185029Spjd if (hdr->b_l2hdr != NULL && 2689185029Spjd !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr)) { 2690185029Spjd l2arc_read_callback_t *cb; 2691185029Spjd 2692185029Spjd DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); 2693185029Spjd ARCSTAT_BUMP(arcstat_l2_hits); 2694185029Spjd 2695185029Spjd cb = kmem_zalloc(sizeof (l2arc_read_callback_t), 2696185029Spjd KM_SLEEP); 2697185029Spjd cb->l2rcb_buf = buf; 2698185029Spjd cb->l2rcb_spa = spa; 2699185029Spjd cb->l2rcb_bp = *bp; 2700185029Spjd cb->l2rcb_zb = *zb; 2701185029Spjd cb->l2rcb_flags = zio_flags; 2702185029Spjd 2703185029Spjd /* 2704185029Spjd * l2arc read. The SCL_L2ARC lock will be 2705185029Spjd * released by l2arc_read_done(). 2706185029Spjd */ 2707185029Spjd rzio = zio_read_phys(pio, vd, addr, size, 2708185029Spjd buf->b_data, ZIO_CHECKSUM_OFF, 2709185029Spjd l2arc_read_done, cb, priority, zio_flags | 2710185029Spjd ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL | 2711185029Spjd ZIO_FLAG_DONT_PROPAGATE | 2712185029Spjd ZIO_FLAG_DONT_RETRY, B_FALSE); 2713185029Spjd DTRACE_PROBE2(l2arc__read, vdev_t *, vd, 2714185029Spjd zio_t *, rzio); 2715185029Spjd 2716185029Spjd if (*arc_flags & ARC_NOWAIT) { 2717185029Spjd zio_nowait(rzio); 2718185029Spjd return (0); 2719185029Spjd } 2720185029Spjd 2721185029Spjd ASSERT(*arc_flags & ARC_WAIT); 2722185029Spjd if (zio_wait(rzio) == 0) 2723185029Spjd return (0); 2724185029Spjd 2725185029Spjd /* l2arc read error; goto zio_read() */ 2726185029Spjd } else { 2727185029Spjd DTRACE_PROBE1(l2arc__miss, 2728185029Spjd arc_buf_hdr_t *, hdr); 2729185029Spjd ARCSTAT_BUMP(arcstat_l2_misses); 2730185029Spjd if (HDR_L2_WRITING(hdr)) 2731185029Spjd ARCSTAT_BUMP(arcstat_l2_rw_clash); 2732185029Spjd spa_config_exit(spa, SCL_L2ARC, vd); 2733185029Spjd } 2734185029Spjd } 2735185029Spjd 2736168404Spjd rzio = zio_read(pio, spa, bp, buf->b_data, size, 2737185029Spjd arc_read_done, buf, priority, zio_flags, zb); 2738168404Spjd 2739168404Spjd if (*arc_flags & ARC_WAIT) 2740168404Spjd return (zio_wait(rzio)); 2741168404Spjd 2742168404Spjd ASSERT(*arc_flags & ARC_NOWAIT); 2743168404Spjd zio_nowait(rzio); 2744168404Spjd } 2745168404Spjd return (0); 2746168404Spjd} 2747168404Spjd 2748168404Spjd/* 2749168404Spjd * arc_read() variant to support pool traversal. If the block is already 2750168404Spjd * in the ARC, make a copy of it; otherwise, the caller will do the I/O. 2751168404Spjd * The idea is that we don't want pool traversal filling up memory, but 2752168404Spjd * if the ARC already has the data anyway, we shouldn't pay for the I/O. 2753168404Spjd */ 2754168404Spjdint 2755168404Spjdarc_tryread(spa_t *spa, blkptr_t *bp, void *data) 2756168404Spjd{ 2757168404Spjd arc_buf_hdr_t *hdr; 2758168404Spjd kmutex_t *hash_mtx; 2759168404Spjd int rc = 0; 2760168404Spjd 2761168404Spjd hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx); 2762168404Spjd 2763168404Spjd if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) { 2764168404Spjd arc_buf_t *buf = hdr->b_buf; 2765168404Spjd 2766168404Spjd ASSERT(buf); 2767168404Spjd while (buf->b_data == NULL) { 2768168404Spjd buf = buf->b_next; 2769168404Spjd ASSERT(buf); 2770168404Spjd } 2771168404Spjd bcopy(buf->b_data, data, hdr->b_size); 2772168404Spjd } else { 2773168404Spjd rc = ENOENT; 2774168404Spjd } 2775168404Spjd 2776168404Spjd if (hash_mtx) 2777168404Spjd mutex_exit(hash_mtx); 2778168404Spjd 2779168404Spjd return (rc); 2780168404Spjd} 2781168404Spjd 2782168404Spjdvoid 2783168404Spjdarc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 2784168404Spjd{ 2785168404Spjd ASSERT(buf->b_hdr != NULL); 2786168404Spjd ASSERT(buf->b_hdr->b_state != arc_anon); 2787168404Spjd ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 2788168404Spjd buf->b_efunc = func; 2789168404Spjd buf->b_private = private; 2790168404Spjd} 2791168404Spjd 2792168404Spjd/* 2793168404Spjd * This is used by the DMU to let the ARC know that a buffer is 2794168404Spjd * being evicted, so the ARC should clean up. If this arc buf 2795168404Spjd * is not yet in the evicted state, it will be put there. 2796168404Spjd */ 2797168404Spjdint 2798168404Spjdarc_buf_evict(arc_buf_t *buf) 2799168404Spjd{ 2800168404Spjd arc_buf_hdr_t *hdr; 2801168404Spjd kmutex_t *hash_lock; 2802168404Spjd arc_buf_t **bufp; 2803168404Spjd 2804185029Spjd rw_enter(&buf->b_lock, RW_WRITER); 2805168404Spjd hdr = buf->b_hdr; 2806168404Spjd if (hdr == NULL) { 2807168404Spjd /* 2808168404Spjd * We are in arc_do_user_evicts(). 2809168404Spjd */ 2810168404Spjd ASSERT(buf->b_data == NULL); 2811185029Spjd rw_exit(&buf->b_lock); 2812168404Spjd return (0); 2813185029Spjd } else if (buf->b_data == NULL) { 2814185029Spjd arc_buf_t copy = *buf; /* structure assignment */ 2815185029Spjd /* 2816185029Spjd * We are on the eviction list; process this buffer now 2817185029Spjd * but let arc_do_user_evicts() do the reaping. 2818185029Spjd */ 2819185029Spjd buf->b_efunc = NULL; 2820185029Spjd rw_exit(&buf->b_lock); 2821185029Spjd VERIFY(copy.b_efunc(©) == 0); 2822185029Spjd return (1); 2823168404Spjd } 2824168404Spjd hash_lock = HDR_LOCK(hdr); 2825168404Spjd mutex_enter(hash_lock); 2826168404Spjd 2827168404Spjd ASSERT(buf->b_hdr == hdr); 2828168404Spjd ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 2829168404Spjd ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2830168404Spjd 2831168404Spjd /* 2832168404Spjd * Pull this buffer off of the hdr 2833168404Spjd */ 2834168404Spjd bufp = &hdr->b_buf; 2835168404Spjd while (*bufp != buf) 2836168404Spjd bufp = &(*bufp)->b_next; 2837168404Spjd *bufp = buf->b_next; 2838168404Spjd 2839168404Spjd ASSERT(buf->b_data != NULL); 2840168404Spjd arc_buf_destroy(buf, FALSE, FALSE); 2841168404Spjd 2842168404Spjd if (hdr->b_datacnt == 0) { 2843168404Spjd arc_state_t *old_state = hdr->b_state; 2844168404Spjd arc_state_t *evicted_state; 2845168404Spjd 2846168404Spjd ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2847168404Spjd 2848168404Spjd evicted_state = 2849168404Spjd (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 2850168404Spjd 2851168404Spjd mutex_enter(&old_state->arcs_mtx); 2852168404Spjd mutex_enter(&evicted_state->arcs_mtx); 2853168404Spjd 2854168404Spjd arc_change_state(evicted_state, hdr, hash_lock); 2855168404Spjd ASSERT(HDR_IN_HASH_TABLE(hdr)); 2856185029Spjd hdr->b_flags |= ARC_IN_HASH_TABLE; 2857185029Spjd hdr->b_flags &= ~ARC_BUF_AVAILABLE; 2858168404Spjd 2859168404Spjd mutex_exit(&evicted_state->arcs_mtx); 2860168404Spjd mutex_exit(&old_state->arcs_mtx); 2861168404Spjd } 2862168404Spjd mutex_exit(hash_lock); 2863185029Spjd rw_exit(&buf->b_lock); 2864168404Spjd 2865168404Spjd VERIFY(buf->b_efunc(buf) == 0); 2866168404Spjd buf->b_efunc = NULL; 2867168404Spjd buf->b_private = NULL; 2868168404Spjd buf->b_hdr = NULL; 2869168404Spjd kmem_cache_free(buf_cache, buf); 2870168404Spjd return (1); 2871168404Spjd} 2872168404Spjd 2873168404Spjd/* 2874168404Spjd * Release this buffer from the cache. This must be done 2875168404Spjd * after a read and prior to modifying the buffer contents. 2876168404Spjd * If the buffer has more than one reference, we must make 2877185029Spjd * a new hdr for the buffer. 2878168404Spjd */ 2879168404Spjdvoid 2880168404Spjdarc_release(arc_buf_t *buf, void *tag) 2881168404Spjd{ 2882185029Spjd arc_buf_hdr_t *hdr; 2883185029Spjd kmutex_t *hash_lock; 2884185029Spjd l2arc_buf_hdr_t *l2hdr; 2885185029Spjd uint64_t buf_size; 2886168404Spjd 2887185029Spjd rw_enter(&buf->b_lock, RW_WRITER); 2888185029Spjd hdr = buf->b_hdr; 2889185029Spjd 2890168404Spjd /* this buffer is not on any list */ 2891168404Spjd ASSERT(refcount_count(&hdr->b_refcnt) > 0); 2892185029Spjd ASSERT(!(hdr->b_flags & ARC_STORED)); 2893168404Spjd 2894168404Spjd if (hdr->b_state == arc_anon) { 2895168404Spjd /* this buffer is already released */ 2896168404Spjd ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1); 2897168404Spjd ASSERT(BUF_EMPTY(hdr)); 2898168404Spjd ASSERT(buf->b_efunc == NULL); 2899168404Spjd arc_buf_thaw(buf); 2900185029Spjd rw_exit(&buf->b_lock); 2901168404Spjd return; 2902168404Spjd } 2903168404Spjd 2904185029Spjd hash_lock = HDR_LOCK(hdr); 2905168404Spjd mutex_enter(hash_lock); 2906168404Spjd 2907185029Spjd l2hdr = hdr->b_l2hdr; 2908185029Spjd if (l2hdr) { 2909185029Spjd mutex_enter(&l2arc_buflist_mtx); 2910185029Spjd hdr->b_l2hdr = NULL; 2911185029Spjd buf_size = hdr->b_size; 2912185029Spjd } 2913185029Spjd 2914168404Spjd /* 2915168404Spjd * Do we have more than one buf? 2916168404Spjd */ 2917185029Spjd if (hdr->b_datacnt > 1) { 2918168404Spjd arc_buf_hdr_t *nhdr; 2919168404Spjd arc_buf_t **bufp; 2920168404Spjd uint64_t blksz = hdr->b_size; 2921168404Spjd spa_t *spa = hdr->b_spa; 2922168404Spjd arc_buf_contents_t type = hdr->b_type; 2923185029Spjd uint32_t flags = hdr->b_flags; 2924168404Spjd 2925185029Spjd ASSERT(hdr->b_buf != buf || buf->b_next != NULL); 2926168404Spjd /* 2927168404Spjd * Pull the data off of this buf and attach it to 2928168404Spjd * a new anonymous buf. 2929168404Spjd */ 2930168404Spjd (void) remove_reference(hdr, hash_lock, tag); 2931168404Spjd bufp = &hdr->b_buf; 2932168404Spjd while (*bufp != buf) 2933168404Spjd bufp = &(*bufp)->b_next; 2934168404Spjd *bufp = (*bufp)->b_next; 2935168404Spjd buf->b_next = NULL; 2936168404Spjd 2937168404Spjd ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); 2938168404Spjd atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); 2939168404Spjd if (refcount_is_zero(&hdr->b_refcnt)) { 2940185029Spjd uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type]; 2941185029Spjd ASSERT3U(*size, >=, hdr->b_size); 2942185029Spjd atomic_add_64(size, -hdr->b_size); 2943168404Spjd } 2944168404Spjd hdr->b_datacnt -= 1; 2945168404Spjd arc_cksum_verify(buf); 2946168404Spjd 2947168404Spjd mutex_exit(hash_lock); 2948168404Spjd 2949185029Spjd nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 2950168404Spjd nhdr->b_size = blksz; 2951168404Spjd nhdr->b_spa = spa; 2952168404Spjd nhdr->b_type = type; 2953168404Spjd nhdr->b_buf = buf; 2954168404Spjd nhdr->b_state = arc_anon; 2955168404Spjd nhdr->b_arc_access = 0; 2956185029Spjd nhdr->b_flags = flags & ARC_L2_WRITING; 2957185029Spjd nhdr->b_l2hdr = NULL; 2958168404Spjd nhdr->b_datacnt = 1; 2959168404Spjd nhdr->b_freeze_cksum = NULL; 2960168404Spjd (void) refcount_add(&nhdr->b_refcnt, tag); 2961168404Spjd buf->b_hdr = nhdr; 2962185029Spjd rw_exit(&buf->b_lock); 2963168404Spjd atomic_add_64(&arc_anon->arcs_size, blksz); 2964168404Spjd } else { 2965185029Spjd rw_exit(&buf->b_lock); 2966168404Spjd ASSERT(refcount_count(&hdr->b_refcnt) == 1); 2967168404Spjd ASSERT(!list_link_active(&hdr->b_arc_node)); 2968168404Spjd ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2969168404Spjd arc_change_state(arc_anon, hdr, hash_lock); 2970168404Spjd hdr->b_arc_access = 0; 2971168404Spjd mutex_exit(hash_lock); 2972185029Spjd 2973168404Spjd bzero(&hdr->b_dva, sizeof (dva_t)); 2974168404Spjd hdr->b_birth = 0; 2975168404Spjd hdr->b_cksum0 = 0; 2976168404Spjd arc_buf_thaw(buf); 2977168404Spjd } 2978168404Spjd buf->b_efunc = NULL; 2979168404Spjd buf->b_private = NULL; 2980185029Spjd 2981185029Spjd if (l2hdr) { 2982185029Spjd list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 2983185029Spjd kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 2984185029Spjd ARCSTAT_INCR(arcstat_l2_size, -buf_size); 2985185029Spjd mutex_exit(&l2arc_buflist_mtx); 2986185029Spjd } 2987168404Spjd} 2988168404Spjd 2989168404Spjdint 2990168404Spjdarc_released(arc_buf_t *buf) 2991168404Spjd{ 2992185029Spjd int released; 2993185029Spjd 2994185029Spjd rw_enter(&buf->b_lock, RW_READER); 2995185029Spjd released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); 2996185029Spjd rw_exit(&buf->b_lock); 2997185029Spjd return (released); 2998168404Spjd} 2999168404Spjd 3000168404Spjdint 3001168404Spjdarc_has_callback(arc_buf_t *buf) 3002168404Spjd{ 3003185029Spjd int callback; 3004185029Spjd 3005185029Spjd rw_enter(&buf->b_lock, RW_READER); 3006185029Spjd callback = (buf->b_efunc != NULL); 3007185029Spjd rw_exit(&buf->b_lock); 3008185029Spjd return (callback); 3009168404Spjd} 3010168404Spjd 3011168404Spjd#ifdef ZFS_DEBUG 3012168404Spjdint 3013168404Spjdarc_referenced(arc_buf_t *buf) 3014168404Spjd{ 3015185029Spjd int referenced; 3016185029Spjd 3017185029Spjd rw_enter(&buf->b_lock, RW_READER); 3018185029Spjd referenced = (refcount_count(&buf->b_hdr->b_refcnt)); 3019185029Spjd rw_exit(&buf->b_lock); 3020185029Spjd return (referenced); 3021168404Spjd} 3022168404Spjd#endif 3023168404Spjd 3024168404Spjdstatic void 3025168404Spjdarc_write_ready(zio_t *zio) 3026168404Spjd{ 3027168404Spjd arc_write_callback_t *callback = zio->io_private; 3028168404Spjd arc_buf_t *buf = callback->awcb_buf; 3029185029Spjd arc_buf_hdr_t *hdr = buf->b_hdr; 3030168404Spjd 3031185029Spjd ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); 3032185029Spjd callback->awcb_ready(zio, buf, callback->awcb_private); 3033185029Spjd 3034185029Spjd /* 3035185029Spjd * If the IO is already in progress, then this is a re-write 3036185029Spjd * attempt, so we need to thaw and re-compute the cksum. 3037185029Spjd * It is the responsibility of the callback to handle the 3038185029Spjd * accounting for any re-write attempt. 3039185029Spjd */ 3040185029Spjd if (HDR_IO_IN_PROGRESS(hdr)) { 3041185029Spjd mutex_enter(&hdr->b_freeze_lock); 3042185029Spjd if (hdr->b_freeze_cksum != NULL) { 3043185029Spjd kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 3044185029Spjd hdr->b_freeze_cksum = NULL; 3045185029Spjd } 3046185029Spjd mutex_exit(&hdr->b_freeze_lock); 3047168404Spjd } 3048185029Spjd arc_cksum_compute(buf, B_FALSE); 3049185029Spjd hdr->b_flags |= ARC_IO_IN_PROGRESS; 3050168404Spjd} 3051168404Spjd 3052168404Spjdstatic void 3053168404Spjdarc_write_done(zio_t *zio) 3054168404Spjd{ 3055168404Spjd arc_write_callback_t *callback = zio->io_private; 3056168404Spjd arc_buf_t *buf = callback->awcb_buf; 3057168404Spjd arc_buf_hdr_t *hdr = buf->b_hdr; 3058168404Spjd 3059168404Spjd hdr->b_acb = NULL; 3060168404Spjd 3061168404Spjd hdr->b_dva = *BP_IDENTITY(zio->io_bp); 3062168404Spjd hdr->b_birth = zio->io_bp->blk_birth; 3063168404Spjd hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 3064168404Spjd /* 3065168404Spjd * If the block to be written was all-zero, we may have 3066168404Spjd * compressed it away. In this case no write was performed 3067168404Spjd * so there will be no dva/birth-date/checksum. The buffer 3068168404Spjd * must therefor remain anonymous (and uncached). 3069168404Spjd */ 3070168404Spjd if (!BUF_EMPTY(hdr)) { 3071168404Spjd arc_buf_hdr_t *exists; 3072168404Spjd kmutex_t *hash_lock; 3073168404Spjd 3074168404Spjd arc_cksum_verify(buf); 3075168404Spjd 3076168404Spjd exists = buf_hash_insert(hdr, &hash_lock); 3077168404Spjd if (exists) { 3078168404Spjd /* 3079168404Spjd * This can only happen if we overwrite for 3080168404Spjd * sync-to-convergence, because we remove 3081168404Spjd * buffers from the hash table when we arc_free(). 3082168404Spjd */ 3083185029Spjd ASSERT(zio->io_flags & ZIO_FLAG_IO_REWRITE); 3084168404Spjd ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig), 3085168404Spjd BP_IDENTITY(zio->io_bp))); 3086168404Spjd ASSERT3U(zio->io_bp_orig.blk_birth, ==, 3087168404Spjd zio->io_bp->blk_birth); 3088168404Spjd 3089168404Spjd ASSERT(refcount_is_zero(&exists->b_refcnt)); 3090168404Spjd arc_change_state(arc_anon, exists, hash_lock); 3091168404Spjd mutex_exit(hash_lock); 3092168404Spjd arc_hdr_destroy(exists); 3093168404Spjd exists = buf_hash_insert(hdr, &hash_lock); 3094168404Spjd ASSERT3P(exists, ==, NULL); 3095168404Spjd } 3096168404Spjd hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3097185029Spjd /* if it's not anon, we are doing a scrub */ 3098185029Spjd if (hdr->b_state == arc_anon) 3099185029Spjd arc_access(hdr, hash_lock); 3100168404Spjd mutex_exit(hash_lock); 3101168404Spjd } else if (callback->awcb_done == NULL) { 3102168404Spjd int destroy_hdr; 3103168404Spjd /* 3104168404Spjd * This is an anonymous buffer with no user callback, 3105168404Spjd * destroy it if there are no active references. 3106168404Spjd */ 3107168404Spjd mutex_enter(&arc_eviction_mtx); 3108168404Spjd destroy_hdr = refcount_is_zero(&hdr->b_refcnt); 3109168404Spjd hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3110168404Spjd mutex_exit(&arc_eviction_mtx); 3111168404Spjd if (destroy_hdr) 3112168404Spjd arc_hdr_destroy(hdr); 3113168404Spjd } else { 3114168404Spjd hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3115168404Spjd } 3116185029Spjd hdr->b_flags &= ~ARC_STORED; 3117168404Spjd 3118168404Spjd if (callback->awcb_done) { 3119168404Spjd ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 3120168404Spjd callback->awcb_done(zio, buf, callback->awcb_private); 3121168404Spjd } 3122168404Spjd 3123168404Spjd kmem_free(callback, sizeof (arc_write_callback_t)); 3124168404Spjd} 3125168404Spjd 3126185029Spjdstatic void 3127185029Spjdwrite_policy(spa_t *spa, const writeprops_t *wp, zio_prop_t *zp) 3128185029Spjd{ 3129185029Spjd boolean_t ismd = (wp->wp_level > 0 || dmu_ot[wp->wp_type].ot_metadata); 3130185029Spjd 3131185029Spjd /* Determine checksum setting */ 3132185029Spjd if (ismd) { 3133185029Spjd /* 3134185029Spjd * Metadata always gets checksummed. If the data 3135185029Spjd * checksum is multi-bit correctable, and it's not a 3136185029Spjd * ZBT-style checksum, then it's suitable for metadata 3137185029Spjd * as well. Otherwise, the metadata checksum defaults 3138185029Spjd * to fletcher4. 3139185029Spjd */ 3140185029Spjd if (zio_checksum_table[wp->wp_oschecksum].ci_correctable && 3141185029Spjd !zio_checksum_table[wp->wp_oschecksum].ci_zbt) 3142185029Spjd zp->zp_checksum = wp->wp_oschecksum; 3143185029Spjd else 3144185029Spjd zp->zp_checksum = ZIO_CHECKSUM_FLETCHER_4; 3145185029Spjd } else { 3146185029Spjd zp->zp_checksum = zio_checksum_select(wp->wp_dnchecksum, 3147185029Spjd wp->wp_oschecksum); 3148185029Spjd } 3149185029Spjd 3150185029Spjd /* Determine compression setting */ 3151185029Spjd if (ismd) { 3152185029Spjd /* 3153185029Spjd * XXX -- we should design a compression algorithm 3154185029Spjd * that specializes in arrays of bps. 3155185029Spjd */ 3156185029Spjd zp->zp_compress = zfs_mdcomp_disable ? ZIO_COMPRESS_EMPTY : 3157185029Spjd ZIO_COMPRESS_LZJB; 3158185029Spjd } else { 3159185029Spjd zp->zp_compress = zio_compress_select(wp->wp_dncompress, 3160185029Spjd wp->wp_oscompress); 3161185029Spjd } 3162185029Spjd 3163185029Spjd zp->zp_type = wp->wp_type; 3164185029Spjd zp->zp_level = wp->wp_level; 3165185029Spjd zp->zp_ndvas = MIN(wp->wp_copies + ismd, spa_max_replication(spa)); 3166185029Spjd} 3167185029Spjd 3168168404Spjdzio_t * 3169185029Spjdarc_write(zio_t *pio, spa_t *spa, const writeprops_t *wp, 3170185029Spjd boolean_t l2arc, uint64_t txg, blkptr_t *bp, arc_buf_t *buf, 3171168404Spjd arc_done_func_t *ready, arc_done_func_t *done, void *private, int priority, 3172185029Spjd int zio_flags, const zbookmark_t *zb) 3173168404Spjd{ 3174168404Spjd arc_buf_hdr_t *hdr = buf->b_hdr; 3175168404Spjd arc_write_callback_t *callback; 3176185029Spjd zio_t *zio; 3177185029Spjd zio_prop_t zp; 3178168404Spjd 3179185029Spjd ASSERT(ready != NULL); 3180168404Spjd ASSERT(!HDR_IO_ERROR(hdr)); 3181168404Spjd ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 3182168404Spjd ASSERT(hdr->b_acb == 0); 3183185029Spjd if (l2arc) 3184185029Spjd hdr->b_flags |= ARC_L2CACHE; 3185168404Spjd callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 3186168404Spjd callback->awcb_ready = ready; 3187168404Spjd callback->awcb_done = done; 3188168404Spjd callback->awcb_private = private; 3189168404Spjd callback->awcb_buf = buf; 3190168404Spjd 3191185029Spjd write_policy(spa, wp, &zp); 3192185029Spjd zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, &zp, 3193185029Spjd arc_write_ready, arc_write_done, callback, priority, zio_flags, zb); 3194185029Spjd 3195168404Spjd return (zio); 3196168404Spjd} 3197168404Spjd 3198168404Spjdint 3199168404Spjdarc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 3200168404Spjd zio_done_func_t *done, void *private, uint32_t arc_flags) 3201168404Spjd{ 3202168404Spjd arc_buf_hdr_t *ab; 3203168404Spjd kmutex_t *hash_lock; 3204168404Spjd zio_t *zio; 3205168404Spjd 3206168404Spjd /* 3207168404Spjd * If this buffer is in the cache, release it, so it 3208168404Spjd * can be re-used. 3209168404Spjd */ 3210168404Spjd ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 3211168404Spjd if (ab != NULL) { 3212168404Spjd /* 3213168404Spjd * The checksum of blocks to free is not always 3214168404Spjd * preserved (eg. on the deadlist). However, if it is 3215168404Spjd * nonzero, it should match what we have in the cache. 3216168404Spjd */ 3217168404Spjd ASSERT(bp->blk_cksum.zc_word[0] == 0 || 3218185029Spjd bp->blk_cksum.zc_word[0] == ab->b_cksum0 || 3219185029Spjd bp->blk_fill == BLK_FILL_ALREADY_FREED); 3220185029Spjd 3221168404Spjd if (ab->b_state != arc_anon) 3222168404Spjd arc_change_state(arc_anon, ab, hash_lock); 3223168404Spjd if (HDR_IO_IN_PROGRESS(ab)) { 3224168404Spjd /* 3225168404Spjd * This should only happen when we prefetch. 3226168404Spjd */ 3227168404Spjd ASSERT(ab->b_flags & ARC_PREFETCH); 3228168404Spjd ASSERT3U(ab->b_datacnt, ==, 1); 3229168404Spjd ab->b_flags |= ARC_FREED_IN_READ; 3230168404Spjd if (HDR_IN_HASH_TABLE(ab)) 3231168404Spjd buf_hash_remove(ab); 3232168404Spjd ab->b_arc_access = 0; 3233168404Spjd bzero(&ab->b_dva, sizeof (dva_t)); 3234168404Spjd ab->b_birth = 0; 3235168404Spjd ab->b_cksum0 = 0; 3236168404Spjd ab->b_buf->b_efunc = NULL; 3237168404Spjd ab->b_buf->b_private = NULL; 3238168404Spjd mutex_exit(hash_lock); 3239168404Spjd } else if (refcount_is_zero(&ab->b_refcnt)) { 3240185029Spjd ab->b_flags |= ARC_FREE_IN_PROGRESS; 3241168404Spjd mutex_exit(hash_lock); 3242168404Spjd arc_hdr_destroy(ab); 3243168404Spjd ARCSTAT_BUMP(arcstat_deleted); 3244168404Spjd } else { 3245168404Spjd /* 3246168404Spjd * We still have an active reference on this 3247168404Spjd * buffer. This can happen, e.g., from 3248168404Spjd * dbuf_unoverride(). 3249168404Spjd */ 3250168404Spjd ASSERT(!HDR_IN_HASH_TABLE(ab)); 3251168404Spjd ab->b_arc_access = 0; 3252168404Spjd bzero(&ab->b_dva, sizeof (dva_t)); 3253168404Spjd ab->b_birth = 0; 3254168404Spjd ab->b_cksum0 = 0; 3255168404Spjd ab->b_buf->b_efunc = NULL; 3256168404Spjd ab->b_buf->b_private = NULL; 3257168404Spjd mutex_exit(hash_lock); 3258168404Spjd } 3259168404Spjd } 3260168404Spjd 3261185029Spjd zio = zio_free(pio, spa, txg, bp, done, private, ZIO_FLAG_MUSTSUCCEED); 3262168404Spjd 3263168404Spjd if (arc_flags & ARC_WAIT) 3264168404Spjd return (zio_wait(zio)); 3265168404Spjd 3266168404Spjd ASSERT(arc_flags & ARC_NOWAIT); 3267168404Spjd zio_nowait(zio); 3268168404Spjd 3269168404Spjd return (0); 3270168404Spjd} 3271168404Spjd 3272185029Spjdstatic int 3273185029Spjdarc_memory_throttle(uint64_t reserve, uint64_t txg) 3274185029Spjd{ 3275185029Spjd#ifdef _KERNEL 3276185029Spjd uint64_t inflight_data = arc_anon->arcs_size; 3277185029Spjd uint64_t available_memory = ptoa((uintmax_t)cnt.v_free_count); 3278185029Spjd static uint64_t page_load = 0; 3279185029Spjd static uint64_t last_txg = 0; 3280185029Spjd 3281185029Spjd#if 0 3282185029Spjd#if defined(__i386) 3283185029Spjd available_memory = 3284185029Spjd MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); 3285185029Spjd#endif 3286185029Spjd#endif 3287185029Spjd if (available_memory >= zfs_write_limit_max) 3288185029Spjd return (0); 3289185029Spjd 3290185029Spjd if (txg > last_txg) { 3291185029Spjd last_txg = txg; 3292185029Spjd page_load = 0; 3293185029Spjd } 3294185029Spjd /* 3295185029Spjd * If we are in pageout, we know that memory is already tight, 3296185029Spjd * the arc is already going to be evicting, so we just want to 3297185029Spjd * continue to let page writes occur as quickly as possible. 3298185029Spjd */ 3299185029Spjd if (curproc == pageproc) { 3300185029Spjd if (page_load > available_memory / 4) 3301185029Spjd return (ERESTART); 3302185029Spjd /* Note: reserve is inflated, so we deflate */ 3303185029Spjd page_load += reserve / 8; 3304185029Spjd return (0); 3305185029Spjd } else if (page_load > 0 && arc_reclaim_needed()) { 3306185029Spjd /* memory is low, delay before restarting */ 3307185029Spjd ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3308185029Spjd return (EAGAIN); 3309185029Spjd } 3310185029Spjd page_load = 0; 3311185029Spjd 3312185029Spjd if (arc_size > arc_c_min) { 3313185029Spjd uint64_t evictable_memory = 3314185029Spjd arc_mru->arcs_lsize[ARC_BUFC_DATA] + 3315185029Spjd arc_mru->arcs_lsize[ARC_BUFC_METADATA] + 3316185029Spjd arc_mfu->arcs_lsize[ARC_BUFC_DATA] + 3317185029Spjd arc_mfu->arcs_lsize[ARC_BUFC_METADATA]; 3318185029Spjd available_memory += MIN(evictable_memory, arc_size - arc_c_min); 3319185029Spjd } 3320185029Spjd 3321185029Spjd if (inflight_data > available_memory / 4) { 3322185029Spjd ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3323185029Spjd return (ERESTART); 3324185029Spjd } 3325185029Spjd#endif 3326185029Spjd return (0); 3327185029Spjd} 3328185029Spjd 3329168404Spjdvoid 3330185029Spjdarc_tempreserve_clear(uint64_t reserve) 3331168404Spjd{ 3332185029Spjd atomic_add_64(&arc_tempreserve, -reserve); 3333168404Spjd ASSERT((int64_t)arc_tempreserve >= 0); 3334168404Spjd} 3335168404Spjd 3336168404Spjdint 3337185029Spjdarc_tempreserve_space(uint64_t reserve, uint64_t txg) 3338168404Spjd{ 3339185029Spjd int error; 3340185029Spjd 3341168404Spjd#ifdef ZFS_DEBUG 3342168404Spjd /* 3343168404Spjd * Once in a while, fail for no reason. Everything should cope. 3344168404Spjd */ 3345168404Spjd if (spa_get_random(10000) == 0) { 3346168404Spjd dprintf("forcing random failure\n"); 3347168404Spjd return (ERESTART); 3348168404Spjd } 3349168404Spjd#endif 3350185029Spjd if (reserve > arc_c/4 && !arc_no_grow) 3351185029Spjd arc_c = MIN(arc_c_max, reserve * 4); 3352185029Spjd if (reserve > arc_c) 3353168404Spjd return (ENOMEM); 3354168404Spjd 3355168404Spjd /* 3356185029Spjd * Writes will, almost always, require additional memory allocations 3357185029Spjd * in order to compress/encrypt/etc the data. We therefor need to 3358185029Spjd * make sure that there is sufficient available memory for this. 3359185029Spjd */ 3360185029Spjd if (error = arc_memory_throttle(reserve, txg)) 3361185029Spjd return (error); 3362185029Spjd 3363185029Spjd /* 3364168404Spjd * Throttle writes when the amount of dirty data in the cache 3365168404Spjd * gets too large. We try to keep the cache less than half full 3366168404Spjd * of dirty blocks so that our sync times don't grow too large. 3367168404Spjd * Note: if two requests come in concurrently, we might let them 3368168404Spjd * both succeed, when one of them should fail. Not a huge deal. 3369168404Spjd */ 3370185029Spjd if (reserve + arc_tempreserve + arc_anon->arcs_size > arc_c / 2 && 3371185029Spjd arc_anon->arcs_size > arc_c / 4) { 3372185029Spjd dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " 3373185029Spjd "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", 3374185029Spjd arc_tempreserve>>10, 3375185029Spjd arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10, 3376185029Spjd arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10, 3377185029Spjd reserve>>10, arc_c>>10); 3378168404Spjd return (ERESTART); 3379168404Spjd } 3380185029Spjd atomic_add_64(&arc_tempreserve, reserve); 3381168404Spjd return (0); 3382168404Spjd} 3383168404Spjd 3384168582Spjdstatic kmutex_t arc_lowmem_lock; 3385168404Spjd#ifdef _KERNEL 3386168566Spjdstatic eventhandler_tag arc_event_lowmem = NULL; 3387168404Spjd 3388168404Spjdstatic void 3389168566Spjdarc_lowmem(void *arg __unused, int howto __unused) 3390168404Spjd{ 3391168404Spjd 3392168566Spjd /* Serialize access via arc_lowmem_lock. */ 3393168566Spjd mutex_enter(&arc_lowmem_lock); 3394185029Spjd needfree = 1; 3395168404Spjd cv_signal(&arc_reclaim_thr_cv); 3396185029Spjd while (needfree) 3397185029Spjd tsleep(&needfree, 0, "zfs:lowmem", hz / 5); 3398168566Spjd mutex_exit(&arc_lowmem_lock); 3399168404Spjd} 3400168404Spjd#endif 3401168404Spjd 3402168404Spjdvoid 3403168404Spjdarc_init(void) 3404168404Spjd{ 3405168404Spjd mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 3406168404Spjd cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 3407168566Spjd mutex_init(&arc_lowmem_lock, NULL, MUTEX_DEFAULT, NULL); 3408168404Spjd 3409168404Spjd /* Convert seconds to clock ticks */ 3410168404Spjd arc_min_prefetch_lifespan = 1 * hz; 3411168404Spjd 3412168404Spjd /* Start out with 1/8 of all memory */ 3413168566Spjd arc_c = kmem_size() / 8; 3414168404Spjd#if 0 3415168404Spjd#ifdef _KERNEL 3416168404Spjd /* 3417168404Spjd * On architectures where the physical memory can be larger 3418168404Spjd * than the addressable space (intel in 32-bit mode), we may 3419168404Spjd * need to limit the cache to 1/8 of VM size. 3420168404Spjd */ 3421168404Spjd arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 3422168404Spjd#endif 3423168404Spjd#endif 3424168566Spjd /* set min cache to 1/32 of all memory, or 16MB, whichever is more */ 3425168566Spjd arc_c_min = MAX(arc_c / 4, 64<<18); 3426168566Spjd /* set max to 1/2 of all memory, or all but 1GB, whichever is more */ 3427168404Spjd if (arc_c * 8 >= 1<<30) 3428168404Spjd arc_c_max = (arc_c * 8) - (1<<30); 3429168404Spjd else 3430168404Spjd arc_c_max = arc_c_min; 3431175633Spjd arc_c_max = MAX(arc_c * 5, arc_c_max); 3432168481Spjd#ifdef _KERNEL 3433168404Spjd /* 3434168404Spjd * Allow the tunables to override our calculations if they are 3435168566Spjd * reasonable (ie. over 16MB) 3436168404Spjd */ 3437168566Spjd if (zfs_arc_max >= 64<<18 && zfs_arc_max < kmem_size()) 3438168404Spjd arc_c_max = zfs_arc_max; 3439168566Spjd if (zfs_arc_min >= 64<<18 && zfs_arc_min <= arc_c_max) 3440168404Spjd arc_c_min = zfs_arc_min; 3441168481Spjd#endif 3442168404Spjd arc_c = arc_c_max; 3443168404Spjd arc_p = (arc_c >> 1); 3444168404Spjd 3445185029Spjd /* limit meta-data to 1/4 of the arc capacity */ 3446185029Spjd arc_meta_limit = arc_c_max / 4; 3447185029Spjd 3448185029Spjd /* Allow the tunable to override if it is reasonable */ 3449185029Spjd if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) 3450185029Spjd arc_meta_limit = zfs_arc_meta_limit; 3451185029Spjd 3452185029Spjd if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) 3453185029Spjd arc_c_min = arc_meta_limit / 2; 3454185029Spjd 3455168404Spjd /* if kmem_flags are set, lets try to use less memory */ 3456168404Spjd if (kmem_debugging()) 3457168404Spjd arc_c = arc_c / 2; 3458168404Spjd if (arc_c < arc_c_min) 3459168404Spjd arc_c = arc_c_min; 3460168404Spjd 3461168473Spjd zfs_arc_min = arc_c_min; 3462168473Spjd zfs_arc_max = arc_c_max; 3463168473Spjd 3464168404Spjd arc_anon = &ARC_anon; 3465168404Spjd arc_mru = &ARC_mru; 3466168404Spjd arc_mru_ghost = &ARC_mru_ghost; 3467168404Spjd arc_mfu = &ARC_mfu; 3468168404Spjd arc_mfu_ghost = &ARC_mfu_ghost; 3469185029Spjd arc_l2c_only = &ARC_l2c_only; 3470168404Spjd arc_size = 0; 3471168404Spjd 3472168404Spjd mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3473168404Spjd mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3474168404Spjd mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3475168404Spjd mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3476168404Spjd mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3477185029Spjd mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3478168404Spjd 3479185029Spjd list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA], 3480185029Spjd sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3481185029Spjd list_create(&arc_mru->arcs_list[ARC_BUFC_DATA], 3482185029Spjd sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3483185029Spjd list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA], 3484185029Spjd sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3485185029Spjd list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA], 3486185029Spjd sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3487185029Spjd list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA], 3488185029Spjd sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3489185029Spjd list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA], 3490185029Spjd sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3491185029Spjd list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA], 3492185029Spjd sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3493185029Spjd list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA], 3494185029Spjd sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3495185029Spjd list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA], 3496185029Spjd sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3497185029Spjd list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA], 3498185029Spjd sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3499168404Spjd 3500168404Spjd buf_init(); 3501168404Spjd 3502168404Spjd arc_thread_exit = 0; 3503168404Spjd arc_eviction_list = NULL; 3504168404Spjd mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 3505168404Spjd bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 3506168404Spjd 3507168404Spjd arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 3508168404Spjd sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 3509168404Spjd 3510168404Spjd if (arc_ksp != NULL) { 3511168404Spjd arc_ksp->ks_data = &arc_stats; 3512168404Spjd kstat_install(arc_ksp); 3513168404Spjd } 3514168404Spjd 3515168404Spjd (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 3516168404Spjd TS_RUN, minclsyspri); 3517168404Spjd 3518168404Spjd#ifdef _KERNEL 3519168566Spjd arc_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem, arc_lowmem, NULL, 3520168404Spjd EVENTHANDLER_PRI_FIRST); 3521168404Spjd#endif 3522168404Spjd 3523168404Spjd arc_dead = FALSE; 3524185029Spjd arc_warm = B_FALSE; 3525168566Spjd 3526185029Spjd if (zfs_write_limit_max == 0) 3527185029Spjd zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift; 3528185029Spjd else 3529185029Spjd zfs_write_limit_shift = 0; 3530185029Spjd mutex_init(&zfs_write_limit_lock, NULL, MUTEX_DEFAULT, NULL); 3531185029Spjd 3532168566Spjd#ifdef _KERNEL 3533175633Spjd /* Warn about ZFS memory and address space requirements. */ 3534168696Spjd if (((uint64_t)physmem * PAGESIZE) < (256 + 128 + 64) * (1 << 20)) { 3535168987Sbmah printf("ZFS WARNING: Recommended minimum RAM size is 512MB; " 3536168987Sbmah "expect unstable behavior.\n"); 3537175633Spjd } 3538175633Spjd if (kmem_size() < 512 * (1 << 20)) { 3539173419Spjd printf("ZFS WARNING: Recommended minimum kmem_size is 512MB; " 3540168987Sbmah "expect unstable behavior.\n"); 3541185029Spjd printf(" Consider tuning vm.kmem_size and " 3542173419Spjd "vm.kmem_size_max\n"); 3543185029Spjd printf(" in /boot/loader.conf.\n"); 3544168566Spjd } 3545168566Spjd#endif 3546168404Spjd} 3547168404Spjd 3548168404Spjdvoid 3549168404Spjdarc_fini(void) 3550168404Spjd{ 3551185029Spjd 3552168404Spjd mutex_enter(&arc_reclaim_thr_lock); 3553168404Spjd arc_thread_exit = 1; 3554168404Spjd cv_signal(&arc_reclaim_thr_cv); 3555168404Spjd while (arc_thread_exit != 0) 3556168404Spjd cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 3557168404Spjd mutex_exit(&arc_reclaim_thr_lock); 3558168404Spjd 3559185029Spjd arc_flush(NULL); 3560168404Spjd 3561168404Spjd arc_dead = TRUE; 3562168404Spjd 3563168404Spjd if (arc_ksp != NULL) { 3564168404Spjd kstat_delete(arc_ksp); 3565168404Spjd arc_ksp = NULL; 3566168404Spjd } 3567168404Spjd 3568168404Spjd mutex_destroy(&arc_eviction_mtx); 3569168404Spjd mutex_destroy(&arc_reclaim_thr_lock); 3570168404Spjd cv_destroy(&arc_reclaim_thr_cv); 3571168404Spjd 3572185029Spjd list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]); 3573185029Spjd list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); 3574185029Spjd list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]); 3575185029Spjd list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); 3576185029Spjd list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]); 3577185029Spjd list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); 3578185029Spjd list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]); 3579185029Spjd list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); 3580168404Spjd 3581168404Spjd mutex_destroy(&arc_anon->arcs_mtx); 3582168404Spjd mutex_destroy(&arc_mru->arcs_mtx); 3583168404Spjd mutex_destroy(&arc_mru_ghost->arcs_mtx); 3584168404Spjd mutex_destroy(&arc_mfu->arcs_mtx); 3585168404Spjd mutex_destroy(&arc_mfu_ghost->arcs_mtx); 3586168404Spjd 3587185029Spjd mutex_destroy(&zfs_write_limit_lock); 3588185029Spjd 3589168404Spjd buf_fini(); 3590168404Spjd 3591168582Spjd mutex_destroy(&arc_lowmem_lock); 3592168404Spjd#ifdef _KERNEL 3593168566Spjd if (arc_event_lowmem != NULL) 3594168566Spjd EVENTHANDLER_DEREGISTER(vm_lowmem, arc_event_lowmem); 3595168404Spjd#endif 3596168404Spjd} 3597185029Spjd 3598185029Spjd/* 3599185029Spjd * Level 2 ARC 3600185029Spjd * 3601185029Spjd * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. 3602185029Spjd * It uses dedicated storage devices to hold cached data, which are populated 3603185029Spjd * using large infrequent writes. The main role of this cache is to boost 3604185029Spjd * the performance of random read workloads. The intended L2ARC devices 3605185029Spjd * include short-stroked disks, solid state disks, and other media with 3606185029Spjd * substantially faster read latency than disk. 3607185029Spjd * 3608185029Spjd * +-----------------------+ 3609185029Spjd * | ARC | 3610185029Spjd * +-----------------------+ 3611185029Spjd * | ^ ^ 3612185029Spjd * | | | 3613185029Spjd * l2arc_feed_thread() arc_read() 3614185029Spjd * | | | 3615185029Spjd * | l2arc read | 3616185029Spjd * V | | 3617185029Spjd * +---------------+ | 3618185029Spjd * | L2ARC | | 3619185029Spjd * +---------------+ | 3620185029Spjd * | ^ | 3621185029Spjd * l2arc_write() | | 3622185029Spjd * | | | 3623185029Spjd * V | | 3624185029Spjd * +-------+ +-------+ 3625185029Spjd * | vdev | | vdev | 3626185029Spjd * | cache | | cache | 3627185029Spjd * +-------+ +-------+ 3628185029Spjd * +=========+ .-----. 3629185029Spjd * : L2ARC : |-_____-| 3630185029Spjd * : devices : | Disks | 3631185029Spjd * +=========+ `-_____-' 3632185029Spjd * 3633185029Spjd * Read requests are satisfied from the following sources, in order: 3634185029Spjd * 3635185029Spjd * 1) ARC 3636185029Spjd * 2) vdev cache of L2ARC devices 3637185029Spjd * 3) L2ARC devices 3638185029Spjd * 4) vdev cache of disks 3639185029Spjd * 5) disks 3640185029Spjd * 3641185029Spjd * Some L2ARC device types exhibit extremely slow write performance. 3642185029Spjd * To accommodate for this there are some significant differences between 3643185029Spjd * the L2ARC and traditional cache design: 3644185029Spjd * 3645185029Spjd * 1. There is no eviction path from the ARC to the L2ARC. Evictions from 3646185029Spjd * the ARC behave as usual, freeing buffers and placing headers on ghost 3647185029Spjd * lists. The ARC does not send buffers to the L2ARC during eviction as 3648185029Spjd * this would add inflated write latencies for all ARC memory pressure. 3649185029Spjd * 3650185029Spjd * 2. The L2ARC attempts to cache data from the ARC before it is evicted. 3651185029Spjd * It does this by periodically scanning buffers from the eviction-end of 3652185029Spjd * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are 3653185029Spjd * not already there. It scans until a headroom of buffers is satisfied, 3654185029Spjd * which itself is a buffer for ARC eviction. The thread that does this is 3655185029Spjd * l2arc_feed_thread(), illustrated below; example sizes are included to 3656185029Spjd * provide a better sense of ratio than this diagram: 3657185029Spjd * 3658185029Spjd * head --> tail 3659185029Spjd * +---------------------+----------+ 3660185029Spjd * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC 3661185029Spjd * +---------------------+----------+ | o L2ARC eligible 3662185029Spjd * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer 3663185029Spjd * +---------------------+----------+ | 3664185029Spjd * 15.9 Gbytes ^ 32 Mbytes | 3665185029Spjd * headroom | 3666185029Spjd * l2arc_feed_thread() 3667185029Spjd * | 3668185029Spjd * l2arc write hand <--[oooo]--' 3669185029Spjd * | 8 Mbyte 3670185029Spjd * | write max 3671185029Spjd * V 3672185029Spjd * +==============================+ 3673185029Spjd * L2ARC dev |####|#|###|###| |####| ... | 3674185029Spjd * +==============================+ 3675185029Spjd * 32 Gbytes 3676185029Spjd * 3677185029Spjd * 3. If an ARC buffer is copied to the L2ARC but then hit instead of 3678185029Spjd * evicted, then the L2ARC has cached a buffer much sooner than it probably 3679185029Spjd * needed to, potentially wasting L2ARC device bandwidth and storage. It is 3680185029Spjd * safe to say that this is an uncommon case, since buffers at the end of 3681185029Spjd * the ARC lists have moved there due to inactivity. 3682185029Spjd * 3683185029Spjd * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, 3684185029Spjd * then the L2ARC simply misses copying some buffers. This serves as a 3685185029Spjd * pressure valve to prevent heavy read workloads from both stalling the ARC 3686185029Spjd * with waits and clogging the L2ARC with writes. This also helps prevent 3687185029Spjd * the potential for the L2ARC to churn if it attempts to cache content too 3688185029Spjd * quickly, such as during backups of the entire pool. 3689185029Spjd * 3690185029Spjd * 5. After system boot and before the ARC has filled main memory, there are 3691185029Spjd * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru 3692185029Spjd * lists can remain mostly static. Instead of searching from tail of these 3693185029Spjd * lists as pictured, the l2arc_feed_thread() will search from the list heads 3694185029Spjd * for eligible buffers, greatly increasing its chance of finding them. 3695185029Spjd * 3696185029Spjd * The L2ARC device write speed is also boosted during this time so that 3697185029Spjd * the L2ARC warms up faster. Since there have been no ARC evictions yet, 3698185029Spjd * there are no L2ARC reads, and no fear of degrading read performance 3699185029Spjd * through increased writes. 3700185029Spjd * 3701185029Spjd * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that 3702185029Spjd * the vdev queue can aggregate them into larger and fewer writes. Each 3703185029Spjd * device is written to in a rotor fashion, sweeping writes through 3704185029Spjd * available space then repeating. 3705185029Spjd * 3706185029Spjd * 7. The L2ARC does not store dirty content. It never needs to flush 3707185029Spjd * write buffers back to disk based storage. 3708185029Spjd * 3709185029Spjd * 8. If an ARC buffer is written (and dirtied) which also exists in the 3710185029Spjd * L2ARC, the now stale L2ARC buffer is immediately dropped. 3711185029Spjd * 3712185029Spjd * The performance of the L2ARC can be tweaked by a number of tunables, which 3713185029Spjd * may be necessary for different workloads: 3714185029Spjd * 3715185029Spjd * l2arc_write_max max write bytes per interval 3716185029Spjd * l2arc_write_boost extra write bytes during device warmup 3717185029Spjd * l2arc_noprefetch skip caching prefetched buffers 3718185029Spjd * l2arc_headroom number of max device writes to precache 3719185029Spjd * l2arc_feed_secs seconds between L2ARC writing 3720185029Spjd * 3721185029Spjd * Tunables may be removed or added as future performance improvements are 3722185029Spjd * integrated, and also may become zpool properties. 3723185029Spjd */ 3724185029Spjd 3725185029Spjdstatic void 3726185029Spjdl2arc_hdr_stat_add(void) 3727185029Spjd{ 3728185029Spjd ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE); 3729185029Spjd ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); 3730185029Spjd} 3731185029Spjd 3732185029Spjdstatic void 3733185029Spjdl2arc_hdr_stat_remove(void) 3734185029Spjd{ 3735185029Spjd ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE)); 3736185029Spjd ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); 3737185029Spjd} 3738185029Spjd 3739185029Spjd/* 3740185029Spjd * Cycle through L2ARC devices. This is how L2ARC load balances. 3741185029Spjd * If a device is returned, this also returns holding the spa config lock. 3742185029Spjd */ 3743185029Spjdstatic l2arc_dev_t * 3744185029Spjdl2arc_dev_get_next(void) 3745185029Spjd{ 3746185029Spjd l2arc_dev_t *first, *next = NULL; 3747185029Spjd 3748185029Spjd /* 3749185029Spjd * Lock out the removal of spas (spa_namespace_lock), then removal 3750185029Spjd * of cache devices (l2arc_dev_mtx). Once a device has been selected, 3751185029Spjd * both locks will be dropped and a spa config lock held instead. 3752185029Spjd */ 3753185029Spjd mutex_enter(&spa_namespace_lock); 3754185029Spjd mutex_enter(&l2arc_dev_mtx); 3755185029Spjd 3756185029Spjd /* if there are no vdevs, there is nothing to do */ 3757185029Spjd if (l2arc_ndev == 0) 3758185029Spjd goto out; 3759185029Spjd 3760185029Spjd first = NULL; 3761185029Spjd next = l2arc_dev_last; 3762185029Spjd do { 3763185029Spjd /* loop around the list looking for a non-faulted vdev */ 3764185029Spjd if (next == NULL) { 3765185029Spjd next = list_head(l2arc_dev_list); 3766185029Spjd } else { 3767185029Spjd next = list_next(l2arc_dev_list, next); 3768185029Spjd if (next == NULL) 3769185029Spjd next = list_head(l2arc_dev_list); 3770185029Spjd } 3771185029Spjd 3772185029Spjd /* if we have come back to the start, bail out */ 3773185029Spjd if (first == NULL) 3774185029Spjd first = next; 3775185029Spjd else if (next == first) 3776185029Spjd break; 3777185029Spjd 3778185029Spjd } while (vdev_is_dead(next->l2ad_vdev)); 3779185029Spjd 3780185029Spjd /* if we were unable to find any usable vdevs, return NULL */ 3781185029Spjd if (vdev_is_dead(next->l2ad_vdev)) 3782185029Spjd next = NULL; 3783185029Spjd 3784185029Spjd l2arc_dev_last = next; 3785185029Spjd 3786185029Spjdout: 3787185029Spjd mutex_exit(&l2arc_dev_mtx); 3788185029Spjd 3789185029Spjd /* 3790185029Spjd * Grab the config lock to prevent the 'next' device from being 3791185029Spjd * removed while we are writing to it. 3792185029Spjd */ 3793185029Spjd if (next != NULL) 3794185029Spjd spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER); 3795185029Spjd mutex_exit(&spa_namespace_lock); 3796185029Spjd 3797185029Spjd return (next); 3798185029Spjd} 3799185029Spjd 3800185029Spjd/* 3801185029Spjd * Free buffers that were tagged for destruction. 3802185029Spjd */ 3803185029Spjdstatic void 3804185029Spjdl2arc_do_free_on_write() 3805185029Spjd{ 3806185029Spjd list_t *buflist; 3807185029Spjd l2arc_data_free_t *df, *df_prev; 3808185029Spjd 3809185029Spjd mutex_enter(&l2arc_free_on_write_mtx); 3810185029Spjd buflist = l2arc_free_on_write; 3811185029Spjd 3812185029Spjd for (df = list_tail(buflist); df; df = df_prev) { 3813185029Spjd df_prev = list_prev(buflist, df); 3814185029Spjd ASSERT(df->l2df_data != NULL); 3815185029Spjd ASSERT(df->l2df_func != NULL); 3816185029Spjd df->l2df_func(df->l2df_data, df->l2df_size); 3817185029Spjd list_remove(buflist, df); 3818185029Spjd kmem_free(df, sizeof (l2arc_data_free_t)); 3819185029Spjd } 3820185029Spjd 3821185029Spjd mutex_exit(&l2arc_free_on_write_mtx); 3822185029Spjd} 3823185029Spjd 3824185029Spjd/* 3825185029Spjd * A write to a cache device has completed. Update all headers to allow 3826185029Spjd * reads from these buffers to begin. 3827185029Spjd */ 3828185029Spjdstatic void 3829185029Spjdl2arc_write_done(zio_t *zio) 3830185029Spjd{ 3831185029Spjd l2arc_write_callback_t *cb; 3832185029Spjd l2arc_dev_t *dev; 3833185029Spjd list_t *buflist; 3834185029Spjd arc_buf_hdr_t *head, *ab, *ab_prev; 3835185029Spjd l2arc_buf_hdr_t *abl2; 3836185029Spjd kmutex_t *hash_lock; 3837185029Spjd 3838185029Spjd cb = zio->io_private; 3839185029Spjd ASSERT(cb != NULL); 3840185029Spjd dev = cb->l2wcb_dev; 3841185029Spjd ASSERT(dev != NULL); 3842185029Spjd head = cb->l2wcb_head; 3843185029Spjd ASSERT(head != NULL); 3844185029Spjd buflist = dev->l2ad_buflist; 3845185029Spjd ASSERT(buflist != NULL); 3846185029Spjd DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, 3847185029Spjd l2arc_write_callback_t *, cb); 3848185029Spjd 3849185029Spjd if (zio->io_error != 0) 3850185029Spjd ARCSTAT_BUMP(arcstat_l2_writes_error); 3851185029Spjd 3852185029Spjd mutex_enter(&l2arc_buflist_mtx); 3853185029Spjd 3854185029Spjd /* 3855185029Spjd * All writes completed, or an error was hit. 3856185029Spjd */ 3857185029Spjd for (ab = list_prev(buflist, head); ab; ab = ab_prev) { 3858185029Spjd ab_prev = list_prev(buflist, ab); 3859185029Spjd 3860185029Spjd hash_lock = HDR_LOCK(ab); 3861185029Spjd if (!mutex_tryenter(hash_lock)) { 3862185029Spjd /* 3863185029Spjd * This buffer misses out. It may be in a stage 3864185029Spjd * of eviction. Its ARC_L2_WRITING flag will be 3865185029Spjd * left set, denying reads to this buffer. 3866185029Spjd */ 3867185029Spjd ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss); 3868185029Spjd continue; 3869185029Spjd } 3870185029Spjd 3871185029Spjd if (zio->io_error != 0) { 3872185029Spjd /* 3873185029Spjd * Error - drop L2ARC entry. 3874185029Spjd */ 3875185029Spjd list_remove(buflist, ab); 3876185029Spjd abl2 = ab->b_l2hdr; 3877185029Spjd ab->b_l2hdr = NULL; 3878185029Spjd kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 3879185029Spjd ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 3880185029Spjd } 3881185029Spjd 3882185029Spjd /* 3883185029Spjd * Allow ARC to begin reads to this L2ARC entry. 3884185029Spjd */ 3885185029Spjd ab->b_flags &= ~ARC_L2_WRITING; 3886185029Spjd 3887185029Spjd mutex_exit(hash_lock); 3888185029Spjd } 3889185029Spjd 3890185029Spjd atomic_inc_64(&l2arc_writes_done); 3891185029Spjd list_remove(buflist, head); 3892185029Spjd kmem_cache_free(hdr_cache, head); 3893185029Spjd mutex_exit(&l2arc_buflist_mtx); 3894185029Spjd 3895185029Spjd l2arc_do_free_on_write(); 3896185029Spjd 3897185029Spjd kmem_free(cb, sizeof (l2arc_write_callback_t)); 3898185029Spjd} 3899185029Spjd 3900185029Spjd/* 3901185029Spjd * A read to a cache device completed. Validate buffer contents before 3902185029Spjd * handing over to the regular ARC routines. 3903185029Spjd */ 3904185029Spjdstatic void 3905185029Spjdl2arc_read_done(zio_t *zio) 3906185029Spjd{ 3907185029Spjd l2arc_read_callback_t *cb; 3908185029Spjd arc_buf_hdr_t *hdr; 3909185029Spjd arc_buf_t *buf; 3910185029Spjd kmutex_t *hash_lock; 3911185029Spjd int equal; 3912185029Spjd 3913185029Spjd ASSERT(zio->io_vd != NULL); 3914185029Spjd ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE); 3915185029Spjd 3916185029Spjd spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd); 3917185029Spjd 3918185029Spjd cb = zio->io_private; 3919185029Spjd ASSERT(cb != NULL); 3920185029Spjd buf = cb->l2rcb_buf; 3921185029Spjd ASSERT(buf != NULL); 3922185029Spjd hdr = buf->b_hdr; 3923185029Spjd ASSERT(hdr != NULL); 3924185029Spjd 3925185029Spjd hash_lock = HDR_LOCK(hdr); 3926185029Spjd mutex_enter(hash_lock); 3927185029Spjd 3928185029Spjd /* 3929185029Spjd * Check this survived the L2ARC journey. 3930185029Spjd */ 3931185029Spjd equal = arc_cksum_equal(buf); 3932185029Spjd if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { 3933185029Spjd mutex_exit(hash_lock); 3934185029Spjd zio->io_private = buf; 3935185029Spjd zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */ 3936185029Spjd zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */ 3937185029Spjd arc_read_done(zio); 3938185029Spjd } else { 3939185029Spjd mutex_exit(hash_lock); 3940185029Spjd /* 3941185029Spjd * Buffer didn't survive caching. Increment stats and 3942185029Spjd * reissue to the original storage device. 3943185029Spjd */ 3944185029Spjd if (zio->io_error != 0) { 3945185029Spjd ARCSTAT_BUMP(arcstat_l2_io_error); 3946185029Spjd } else { 3947185029Spjd zio->io_error = EIO; 3948185029Spjd } 3949185029Spjd if (!equal) 3950185029Spjd ARCSTAT_BUMP(arcstat_l2_cksum_bad); 3951185029Spjd 3952185029Spjd /* 3953185029Spjd * If there's no waiter, issue an async i/o to the primary 3954185029Spjd * storage now. If there *is* a waiter, the caller must 3955185029Spjd * issue the i/o in a context where it's OK to block. 3956185029Spjd */ 3957185029Spjd if (zio->io_waiter == NULL) 3958185029Spjd zio_nowait(zio_read(zio->io_parent, 3959185029Spjd cb->l2rcb_spa, &cb->l2rcb_bp, 3960185029Spjd buf->b_data, zio->io_size, arc_read_done, buf, 3961185029Spjd zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb)); 3962185029Spjd } 3963185029Spjd 3964185029Spjd kmem_free(cb, sizeof (l2arc_read_callback_t)); 3965185029Spjd} 3966185029Spjd 3967185029Spjd/* 3968185029Spjd * This is the list priority from which the L2ARC will search for pages to 3969185029Spjd * cache. This is used within loops (0..3) to cycle through lists in the 3970185029Spjd * desired order. This order can have a significant effect on cache 3971185029Spjd * performance. 3972185029Spjd * 3973185029Spjd * Currently the metadata lists are hit first, MFU then MRU, followed by 3974185029Spjd * the data lists. This function returns a locked list, and also returns 3975185029Spjd * the lock pointer. 3976185029Spjd */ 3977185029Spjdstatic list_t * 3978185029Spjdl2arc_list_locked(int list_num, kmutex_t **lock) 3979185029Spjd{ 3980185029Spjd list_t *list; 3981185029Spjd 3982185029Spjd ASSERT(list_num >= 0 && list_num <= 3); 3983185029Spjd 3984185029Spjd switch (list_num) { 3985185029Spjd case 0: 3986185029Spjd list = &arc_mfu->arcs_list[ARC_BUFC_METADATA]; 3987185029Spjd *lock = &arc_mfu->arcs_mtx; 3988185029Spjd break; 3989185029Spjd case 1: 3990185029Spjd list = &arc_mru->arcs_list[ARC_BUFC_METADATA]; 3991185029Spjd *lock = &arc_mru->arcs_mtx; 3992185029Spjd break; 3993185029Spjd case 2: 3994185029Spjd list = &arc_mfu->arcs_list[ARC_BUFC_DATA]; 3995185029Spjd *lock = &arc_mfu->arcs_mtx; 3996185029Spjd break; 3997185029Spjd case 3: 3998185029Spjd list = &arc_mru->arcs_list[ARC_BUFC_DATA]; 3999185029Spjd *lock = &arc_mru->arcs_mtx; 4000185029Spjd break; 4001185029Spjd } 4002185029Spjd 4003185029Spjd ASSERT(!(MUTEX_HELD(*lock))); 4004185029Spjd mutex_enter(*lock); 4005185029Spjd return (list); 4006185029Spjd} 4007185029Spjd 4008185029Spjd/* 4009185029Spjd * Evict buffers from the device write hand to the distance specified in 4010185029Spjd * bytes. This distance may span populated buffers, it may span nothing. 4011185029Spjd * This is clearing a region on the L2ARC device ready for writing. 4012185029Spjd * If the 'all' boolean is set, every buffer is evicted. 4013185029Spjd */ 4014185029Spjdstatic void 4015185029Spjdl2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) 4016185029Spjd{ 4017185029Spjd list_t *buflist; 4018185029Spjd l2arc_buf_hdr_t *abl2; 4019185029Spjd arc_buf_hdr_t *ab, *ab_prev; 4020185029Spjd kmutex_t *hash_lock; 4021185029Spjd uint64_t taddr; 4022185029Spjd 4023185029Spjd buflist = dev->l2ad_buflist; 4024185029Spjd 4025185029Spjd if (buflist == NULL) 4026185029Spjd return; 4027185029Spjd 4028185029Spjd if (!all && dev->l2ad_first) { 4029185029Spjd /* 4030185029Spjd * This is the first sweep through the device. There is 4031185029Spjd * nothing to evict. 4032185029Spjd */ 4033185029Spjd return; 4034185029Spjd } 4035185029Spjd 4036185029Spjd if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) { 4037185029Spjd /* 4038185029Spjd * When nearing the end of the device, evict to the end 4039185029Spjd * before the device write hand jumps to the start. 4040185029Spjd */ 4041185029Spjd taddr = dev->l2ad_end; 4042185029Spjd } else { 4043185029Spjd taddr = dev->l2ad_hand + distance; 4044185029Spjd } 4045185029Spjd DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, 4046185029Spjd uint64_t, taddr, boolean_t, all); 4047185029Spjd 4048185029Spjdtop: 4049185029Spjd mutex_enter(&l2arc_buflist_mtx); 4050185029Spjd for (ab = list_tail(buflist); ab; ab = ab_prev) { 4051185029Spjd ab_prev = list_prev(buflist, ab); 4052185029Spjd 4053185029Spjd hash_lock = HDR_LOCK(ab); 4054185029Spjd if (!mutex_tryenter(hash_lock)) { 4055185029Spjd /* 4056185029Spjd * Missed the hash lock. Retry. 4057185029Spjd */ 4058185029Spjd ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); 4059185029Spjd mutex_exit(&l2arc_buflist_mtx); 4060185029Spjd mutex_enter(hash_lock); 4061185029Spjd mutex_exit(hash_lock); 4062185029Spjd goto top; 4063185029Spjd } 4064185029Spjd 4065185029Spjd if (HDR_L2_WRITE_HEAD(ab)) { 4066185029Spjd /* 4067185029Spjd * We hit a write head node. Leave it for 4068185029Spjd * l2arc_write_done(). 4069185029Spjd */ 4070185029Spjd list_remove(buflist, ab); 4071185029Spjd mutex_exit(hash_lock); 4072185029Spjd continue; 4073185029Spjd } 4074185029Spjd 4075185029Spjd if (!all && ab->b_l2hdr != NULL && 4076185029Spjd (ab->b_l2hdr->b_daddr > taddr || 4077185029Spjd ab->b_l2hdr->b_daddr < dev->l2ad_hand)) { 4078185029Spjd /* 4079185029Spjd * We've evicted to the target address, 4080185029Spjd * or the end of the device. 4081185029Spjd */ 4082185029Spjd mutex_exit(hash_lock); 4083185029Spjd break; 4084185029Spjd } 4085185029Spjd 4086185029Spjd if (HDR_FREE_IN_PROGRESS(ab)) { 4087185029Spjd /* 4088185029Spjd * Already on the path to destruction. 4089185029Spjd */ 4090185029Spjd mutex_exit(hash_lock); 4091185029Spjd continue; 4092185029Spjd } 4093185029Spjd 4094185029Spjd if (ab->b_state == arc_l2c_only) { 4095185029Spjd ASSERT(!HDR_L2_READING(ab)); 4096185029Spjd /* 4097185029Spjd * This doesn't exist in the ARC. Destroy. 4098185029Spjd * arc_hdr_destroy() will call list_remove() 4099185029Spjd * and decrement arcstat_l2_size. 4100185029Spjd */ 4101185029Spjd arc_change_state(arc_anon, ab, hash_lock); 4102185029Spjd arc_hdr_destroy(ab); 4103185029Spjd } else { 4104185029Spjd /* 4105185029Spjd * Invalidate issued or about to be issued 4106185029Spjd * reads, since we may be about to write 4107185029Spjd * over this location. 4108185029Spjd */ 4109185029Spjd if (HDR_L2_READING(ab)) { 4110185029Spjd ARCSTAT_BUMP(arcstat_l2_evict_reading); 4111185029Spjd ab->b_flags |= ARC_L2_EVICTED; 4112185029Spjd } 4113185029Spjd 4114185029Spjd /* 4115185029Spjd * Tell ARC this no longer exists in L2ARC. 4116185029Spjd */ 4117185029Spjd if (ab->b_l2hdr != NULL) { 4118185029Spjd abl2 = ab->b_l2hdr; 4119185029Spjd ab->b_l2hdr = NULL; 4120185029Spjd kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 4121185029Spjd ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 4122185029Spjd } 4123185029Spjd list_remove(buflist, ab); 4124185029Spjd 4125185029Spjd /* 4126185029Spjd * This may have been leftover after a 4127185029Spjd * failed write. 4128185029Spjd */ 4129185029Spjd ab->b_flags &= ~ARC_L2_WRITING; 4130185029Spjd } 4131185029Spjd mutex_exit(hash_lock); 4132185029Spjd } 4133185029Spjd mutex_exit(&l2arc_buflist_mtx); 4134185029Spjd 4135185029Spjd spa_l2cache_space_update(dev->l2ad_vdev, 0, -(taddr - dev->l2ad_evict)); 4136185029Spjd dev->l2ad_evict = taddr; 4137185029Spjd} 4138185029Spjd 4139185029Spjd/* 4140185029Spjd * Find and write ARC buffers to the L2ARC device. 4141185029Spjd * 4142185029Spjd * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid 4143185029Spjd * for reading until they have completed writing. 4144185029Spjd */ 4145185029Spjdstatic void 4146185029Spjdl2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz) 4147185029Spjd{ 4148185029Spjd arc_buf_hdr_t *ab, *ab_prev, *head; 4149185029Spjd l2arc_buf_hdr_t *hdrl2; 4150185029Spjd list_t *list; 4151185029Spjd uint64_t passed_sz, write_sz, buf_sz, headroom; 4152185029Spjd void *buf_data; 4153185029Spjd kmutex_t *hash_lock, *list_lock; 4154185029Spjd boolean_t have_lock, full; 4155185029Spjd l2arc_write_callback_t *cb; 4156185029Spjd zio_t *pio, *wzio; 4157185029Spjd int try; 4158185029Spjd 4159185029Spjd ASSERT(dev->l2ad_vdev != NULL); 4160185029Spjd 4161185029Spjd pio = NULL; 4162185029Spjd write_sz = 0; 4163185029Spjd full = B_FALSE; 4164185029Spjd head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 4165185029Spjd head->b_flags |= ARC_L2_WRITE_HEAD; 4166185029Spjd 4167185029Spjd /* 4168185029Spjd * Copy buffers for L2ARC writing. 4169185029Spjd */ 4170185029Spjd mutex_enter(&l2arc_buflist_mtx); 4171185029Spjd for (try = 0; try <= 3; try++) { 4172185029Spjd list = l2arc_list_locked(try, &list_lock); 4173185029Spjd passed_sz = 0; 4174185029Spjd 4175185029Spjd /* 4176185029Spjd * L2ARC fast warmup. 4177185029Spjd * 4178185029Spjd * Until the ARC is warm and starts to evict, read from the 4179185029Spjd * head of the ARC lists rather than the tail. 4180185029Spjd */ 4181185029Spjd headroom = target_sz * l2arc_headroom; 4182185029Spjd if (arc_warm == B_FALSE) 4183185029Spjd ab = list_head(list); 4184185029Spjd else 4185185029Spjd ab = list_tail(list); 4186185029Spjd 4187185029Spjd for (; ab; ab = ab_prev) { 4188185029Spjd if (arc_warm == B_FALSE) 4189185029Spjd ab_prev = list_next(list, ab); 4190185029Spjd else 4191185029Spjd ab_prev = list_prev(list, ab); 4192185029Spjd 4193185029Spjd hash_lock = HDR_LOCK(ab); 4194185029Spjd have_lock = MUTEX_HELD(hash_lock); 4195185029Spjd if (!have_lock && !mutex_tryenter(hash_lock)) { 4196185029Spjd /* 4197185029Spjd * Skip this buffer rather than waiting. 4198185029Spjd */ 4199185029Spjd continue; 4200185029Spjd } 4201185029Spjd 4202185029Spjd passed_sz += ab->b_size; 4203185029Spjd if (passed_sz > headroom) { 4204185029Spjd /* 4205185029Spjd * Searched too far. 4206185029Spjd */ 4207185029Spjd mutex_exit(hash_lock); 4208185029Spjd break; 4209185029Spjd } 4210185029Spjd 4211185029Spjd if (ab->b_spa != spa) { 4212185029Spjd mutex_exit(hash_lock); 4213185029Spjd continue; 4214185029Spjd } 4215185029Spjd 4216185029Spjd if (ab->b_l2hdr != NULL) { 4217185029Spjd /* 4218185029Spjd * Already in L2ARC. 4219185029Spjd */ 4220185029Spjd mutex_exit(hash_lock); 4221185029Spjd continue; 4222185029Spjd } 4223185029Spjd 4224185029Spjd if (HDR_IO_IN_PROGRESS(ab) || !HDR_L2CACHE(ab)) { 4225185029Spjd mutex_exit(hash_lock); 4226185029Spjd continue; 4227185029Spjd } 4228185029Spjd 4229185029Spjd if ((write_sz + ab->b_size) > target_sz) { 4230185029Spjd full = B_TRUE; 4231185029Spjd mutex_exit(hash_lock); 4232185029Spjd break; 4233185029Spjd } 4234185029Spjd 4235185029Spjd if (ab->b_buf == NULL) { 4236185029Spjd DTRACE_PROBE1(l2arc__buf__null, void *, ab); 4237185029Spjd mutex_exit(hash_lock); 4238185029Spjd continue; 4239185029Spjd } 4240185029Spjd 4241185029Spjd if (pio == NULL) { 4242185029Spjd /* 4243185029Spjd * Insert a dummy header on the buflist so 4244185029Spjd * l2arc_write_done() can find where the 4245185029Spjd * write buffers begin without searching. 4246185029Spjd */ 4247185029Spjd list_insert_head(dev->l2ad_buflist, head); 4248185029Spjd 4249185029Spjd cb = kmem_alloc( 4250185029Spjd sizeof (l2arc_write_callback_t), KM_SLEEP); 4251185029Spjd cb->l2wcb_dev = dev; 4252185029Spjd cb->l2wcb_head = head; 4253185029Spjd pio = zio_root(spa, l2arc_write_done, cb, 4254185029Spjd ZIO_FLAG_CANFAIL); 4255185029Spjd } 4256185029Spjd 4257185029Spjd /* 4258185029Spjd * Create and add a new L2ARC header. 4259185029Spjd */ 4260185029Spjd hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP); 4261185029Spjd hdrl2->b_dev = dev; 4262185029Spjd hdrl2->b_daddr = dev->l2ad_hand; 4263185029Spjd 4264185029Spjd ab->b_flags |= ARC_L2_WRITING; 4265185029Spjd ab->b_l2hdr = hdrl2; 4266185029Spjd list_insert_head(dev->l2ad_buflist, ab); 4267185029Spjd buf_data = ab->b_buf->b_data; 4268185029Spjd buf_sz = ab->b_size; 4269185029Spjd 4270185029Spjd /* 4271185029Spjd * Compute and store the buffer cksum before 4272185029Spjd * writing. On debug the cksum is verified first. 4273185029Spjd */ 4274185029Spjd arc_cksum_verify(ab->b_buf); 4275185029Spjd arc_cksum_compute(ab->b_buf, B_TRUE); 4276185029Spjd 4277185029Spjd mutex_exit(hash_lock); 4278185029Spjd 4279185029Spjd wzio = zio_write_phys(pio, dev->l2ad_vdev, 4280185029Spjd dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF, 4281185029Spjd NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, 4282185029Spjd ZIO_FLAG_CANFAIL, B_FALSE); 4283185029Spjd 4284185029Spjd DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, 4285185029Spjd zio_t *, wzio); 4286185029Spjd (void) zio_nowait(wzio); 4287185029Spjd 4288185029Spjd /* 4289185029Spjd * Keep the clock hand suitably device-aligned. 4290185029Spjd */ 4291185029Spjd buf_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz); 4292185029Spjd 4293185029Spjd write_sz += buf_sz; 4294185029Spjd dev->l2ad_hand += buf_sz; 4295185029Spjd } 4296185029Spjd 4297185029Spjd mutex_exit(list_lock); 4298185029Spjd 4299185029Spjd if (full == B_TRUE) 4300185029Spjd break; 4301185029Spjd } 4302185029Spjd mutex_exit(&l2arc_buflist_mtx); 4303185029Spjd 4304185029Spjd if (pio == NULL) { 4305185029Spjd ASSERT3U(write_sz, ==, 0); 4306185029Spjd kmem_cache_free(hdr_cache, head); 4307185029Spjd return; 4308185029Spjd } 4309185029Spjd 4310185029Spjd ASSERT3U(write_sz, <=, target_sz); 4311185029Spjd ARCSTAT_BUMP(arcstat_l2_writes_sent); 4312185029Spjd ARCSTAT_INCR(arcstat_l2_size, write_sz); 4313185029Spjd spa_l2cache_space_update(dev->l2ad_vdev, 0, write_sz); 4314185029Spjd 4315185029Spjd /* 4316185029Spjd * Bump device hand to the device start if it is approaching the end. 4317185029Spjd * l2arc_evict() will already have evicted ahead for this case. 4318185029Spjd */ 4319185029Spjd if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) { 4320185029Spjd spa_l2cache_space_update(dev->l2ad_vdev, 0, 4321185029Spjd dev->l2ad_end - dev->l2ad_hand); 4322185029Spjd dev->l2ad_hand = dev->l2ad_start; 4323185029Spjd dev->l2ad_evict = dev->l2ad_start; 4324185029Spjd dev->l2ad_first = B_FALSE; 4325185029Spjd } 4326185029Spjd 4327185029Spjd (void) zio_wait(pio); 4328185029Spjd} 4329185029Spjd 4330185029Spjd/* 4331185029Spjd * This thread feeds the L2ARC at regular intervals. This is the beating 4332185029Spjd * heart of the L2ARC. 4333185029Spjd */ 4334185029Spjdstatic void 4335185029Spjdl2arc_feed_thread(void *dummy __unused) 4336185029Spjd{ 4337185029Spjd callb_cpr_t cpr; 4338185029Spjd l2arc_dev_t *dev; 4339185029Spjd spa_t *spa; 4340185029Spjd uint64_t size; 4341185029Spjd 4342185029Spjd CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); 4343185029Spjd 4344185029Spjd mutex_enter(&l2arc_feed_thr_lock); 4345185029Spjd 4346185029Spjd while (l2arc_thread_exit == 0) { 4347185029Spjd /* 4348185029Spjd * Pause for l2arc_feed_secs seconds between writes. 4349185029Spjd */ 4350185029Spjd CALLB_CPR_SAFE_BEGIN(&cpr); 4351185029Spjd (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, 4352185029Spjd hz * l2arc_feed_secs); 4353185029Spjd CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); 4354185029Spjd 4355185029Spjd /* 4356185029Spjd * Quick check for L2ARC devices. 4357185029Spjd */ 4358185029Spjd mutex_enter(&l2arc_dev_mtx); 4359185029Spjd if (l2arc_ndev == 0) { 4360185029Spjd mutex_exit(&l2arc_dev_mtx); 4361185029Spjd continue; 4362185029Spjd } 4363185029Spjd mutex_exit(&l2arc_dev_mtx); 4364185029Spjd 4365185029Spjd /* 4366185029Spjd * This selects the next l2arc device to write to, and in 4367185029Spjd * doing so the next spa to feed from: dev->l2ad_spa. This 4368185029Spjd * will return NULL if there are now no l2arc devices or if 4369185029Spjd * they are all faulted. 4370185029Spjd * 4371185029Spjd * If a device is returned, its spa's config lock is also 4372185029Spjd * held to prevent device removal. l2arc_dev_get_next() 4373185029Spjd * will grab and release l2arc_dev_mtx. 4374185029Spjd */ 4375185029Spjd if ((dev = l2arc_dev_get_next()) == NULL) 4376185029Spjd continue; 4377185029Spjd 4378185029Spjd spa = dev->l2ad_spa; 4379185029Spjd ASSERT(spa != NULL); 4380185029Spjd 4381185029Spjd /* 4382185029Spjd * Avoid contributing to memory pressure. 4383185029Spjd */ 4384185029Spjd if (arc_reclaim_needed()) { 4385185029Spjd ARCSTAT_BUMP(arcstat_l2_abort_lowmem); 4386185029Spjd spa_config_exit(spa, SCL_L2ARC, dev); 4387185029Spjd continue; 4388185029Spjd } 4389185029Spjd 4390185029Spjd ARCSTAT_BUMP(arcstat_l2_feeds); 4391185029Spjd 4392185029Spjd size = dev->l2ad_write; 4393185029Spjd if (arc_warm == B_FALSE) 4394185029Spjd size += dev->l2ad_boost; 4395185029Spjd 4396185029Spjd /* 4397185029Spjd * Evict L2ARC buffers that will be overwritten. 4398185029Spjd */ 4399185029Spjd l2arc_evict(dev, size, B_FALSE); 4400185029Spjd 4401185029Spjd /* 4402185029Spjd * Write ARC buffers. 4403185029Spjd */ 4404185029Spjd l2arc_write_buffers(spa, dev, size); 4405185029Spjd spa_config_exit(spa, SCL_L2ARC, dev); 4406185029Spjd } 4407185029Spjd 4408185029Spjd l2arc_thread_exit = 0; 4409185029Spjd cv_broadcast(&l2arc_feed_thr_cv); 4410185029Spjd CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ 4411185029Spjd thread_exit(); 4412185029Spjd} 4413185029Spjd 4414185029Spjdboolean_t 4415185029Spjdl2arc_vdev_present(vdev_t *vd) 4416185029Spjd{ 4417185029Spjd l2arc_dev_t *dev; 4418185029Spjd 4419185029Spjd mutex_enter(&l2arc_dev_mtx); 4420185029Spjd for (dev = list_head(l2arc_dev_list); dev != NULL; 4421185029Spjd dev = list_next(l2arc_dev_list, dev)) { 4422185029Spjd if (dev->l2ad_vdev == vd) 4423185029Spjd break; 4424185029Spjd } 4425185029Spjd mutex_exit(&l2arc_dev_mtx); 4426185029Spjd 4427185029Spjd return (dev != NULL); 4428185029Spjd} 4429185029Spjd 4430185029Spjd/* 4431185029Spjd * Add a vdev for use by the L2ARC. By this point the spa has already 4432185029Spjd * validated the vdev and opened it. 4433185029Spjd */ 4434185029Spjdvoid 4435185029Spjdl2arc_add_vdev(spa_t *spa, vdev_t *vd, uint64_t start, uint64_t end) 4436185029Spjd{ 4437185029Spjd l2arc_dev_t *adddev; 4438185029Spjd 4439185029Spjd ASSERT(!l2arc_vdev_present(vd)); 4440185029Spjd 4441185029Spjd /* 4442185029Spjd * Create a new l2arc device entry. 4443185029Spjd */ 4444185029Spjd adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); 4445185029Spjd adddev->l2ad_spa = spa; 4446185029Spjd adddev->l2ad_vdev = vd; 4447185029Spjd adddev->l2ad_write = l2arc_write_max; 4448185029Spjd adddev->l2ad_boost = l2arc_write_boost; 4449185029Spjd adddev->l2ad_start = start; 4450185029Spjd adddev->l2ad_end = end; 4451185029Spjd adddev->l2ad_hand = adddev->l2ad_start; 4452185029Spjd adddev->l2ad_evict = adddev->l2ad_start; 4453185029Spjd adddev->l2ad_first = B_TRUE; 4454185029Spjd ASSERT3U(adddev->l2ad_write, >, 0); 4455185029Spjd 4456185029Spjd /* 4457185029Spjd * This is a list of all ARC buffers that are still valid on the 4458185029Spjd * device. 4459185029Spjd */ 4460185029Spjd adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP); 4461185029Spjd list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), 4462185029Spjd offsetof(arc_buf_hdr_t, b_l2node)); 4463185029Spjd 4464185029Spjd spa_l2cache_space_update(vd, adddev->l2ad_end - adddev->l2ad_hand, 0); 4465185029Spjd 4466185029Spjd /* 4467185029Spjd * Add device to global list 4468185029Spjd */ 4469185029Spjd mutex_enter(&l2arc_dev_mtx); 4470185029Spjd list_insert_head(l2arc_dev_list, adddev); 4471185029Spjd atomic_inc_64(&l2arc_ndev); 4472185029Spjd mutex_exit(&l2arc_dev_mtx); 4473185029Spjd} 4474185029Spjd 4475185029Spjd/* 4476185029Spjd * Remove a vdev from the L2ARC. 4477185029Spjd */ 4478185029Spjdvoid 4479185029Spjdl2arc_remove_vdev(vdev_t *vd) 4480185029Spjd{ 4481185029Spjd l2arc_dev_t *dev, *nextdev, *remdev = NULL; 4482185029Spjd 4483185029Spjd /* 4484185029Spjd * Find the device by vdev 4485185029Spjd */ 4486185029Spjd mutex_enter(&l2arc_dev_mtx); 4487185029Spjd for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { 4488185029Spjd nextdev = list_next(l2arc_dev_list, dev); 4489185029Spjd if (vd == dev->l2ad_vdev) { 4490185029Spjd remdev = dev; 4491185029Spjd break; 4492185029Spjd } 4493185029Spjd } 4494185029Spjd ASSERT(remdev != NULL); 4495185029Spjd 4496185029Spjd /* 4497185029Spjd * Remove device from global list 4498185029Spjd */ 4499185029Spjd list_remove(l2arc_dev_list, remdev); 4500185029Spjd l2arc_dev_last = NULL; /* may have been invalidated */ 4501185029Spjd atomic_dec_64(&l2arc_ndev); 4502185029Spjd mutex_exit(&l2arc_dev_mtx); 4503185029Spjd 4504185029Spjd /* 4505185029Spjd * Clear all buflists and ARC references. L2ARC device flush. 4506185029Spjd */ 4507185029Spjd l2arc_evict(remdev, 0, B_TRUE); 4508185029Spjd list_destroy(remdev->l2ad_buflist); 4509185029Spjd kmem_free(remdev->l2ad_buflist, sizeof (list_t)); 4510185029Spjd kmem_free(remdev, sizeof (l2arc_dev_t)); 4511185029Spjd} 4512185029Spjd 4513185029Spjdvoid 4514185029Spjdl2arc_init(void) 4515185029Spjd{ 4516185029Spjd l2arc_thread_exit = 0; 4517185029Spjd l2arc_ndev = 0; 4518185029Spjd l2arc_writes_sent = 0; 4519185029Spjd l2arc_writes_done = 0; 4520185029Spjd 4521185029Spjd mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); 4522185029Spjd cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); 4523185029Spjd mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); 4524185029Spjd mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL); 4525185029Spjd mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); 4526185029Spjd 4527185029Spjd l2arc_dev_list = &L2ARC_dev_list; 4528185029Spjd l2arc_free_on_write = &L2ARC_free_on_write; 4529185029Spjd list_create(l2arc_dev_list, sizeof (l2arc_dev_t), 4530185029Spjd offsetof(l2arc_dev_t, l2ad_node)); 4531185029Spjd list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), 4532185029Spjd offsetof(l2arc_data_free_t, l2df_list_node)); 4533185029Spjd} 4534185029Spjd 4535185029Spjdvoid 4536185029Spjdl2arc_fini(void) 4537185029Spjd{ 4538185029Spjd /* 4539185029Spjd * This is called from dmu_fini(), which is called from spa_fini(); 4540185029Spjd * Because of this, we can assume that all l2arc devices have 4541185029Spjd * already been removed when the pools themselves were removed. 4542185029Spjd */ 4543185029Spjd 4544185029Spjd l2arc_do_free_on_write(); 4545185029Spjd 4546185029Spjd mutex_destroy(&l2arc_feed_thr_lock); 4547185029Spjd cv_destroy(&l2arc_feed_thr_cv); 4548185029Spjd mutex_destroy(&l2arc_dev_mtx); 4549185029Spjd mutex_destroy(&l2arc_buflist_mtx); 4550185029Spjd mutex_destroy(&l2arc_free_on_write_mtx); 4551185029Spjd 4552185029Spjd list_destroy(l2arc_dev_list); 4553185029Spjd list_destroy(l2arc_free_on_write); 4554185029Spjd} 4555185029Spjd 4556185029Spjdvoid 4557185029Spjdl2arc_start(void) 4558185029Spjd{ 4559185029Spjd if (!(spa_mode & FWRITE)) 4560185029Spjd return; 4561185029Spjd 4562185029Spjd (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, 4563185029Spjd TS_RUN, minclsyspri); 4564185029Spjd} 4565185029Spjd 4566185029Spjdvoid 4567185029Spjdl2arc_stop(void) 4568185029Spjd{ 4569185029Spjd if (!(spa_mode & FWRITE)) 4570185029Spjd return; 4571185029Spjd 4572185029Spjd mutex_enter(&l2arc_feed_thr_lock); 4573185029Spjd cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ 4574185029Spjd l2arc_thread_exit = 1; 4575185029Spjd while (l2arc_thread_exit != 0) 4576185029Spjd cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); 4577185029Spjd mutex_exit(&l2arc_feed_thr_lock); 4578185029Spjd} 4579