arc.c revision 196941
1168404Spjd/* 2168404Spjd * CDDL HEADER START 3168404Spjd * 4168404Spjd * The contents of this file are subject to the terms of the 5168404Spjd * Common Development and Distribution License (the "License"). 6168404Spjd * You may not use this file except in compliance with the License. 7168404Spjd * 8168404Spjd * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9168404Spjd * or http://www.opensolaris.org/os/licensing. 10168404Spjd * See the License for the specific language governing permissions 11168404Spjd * and limitations under the License. 12168404Spjd * 13168404Spjd * When distributing Covered Code, include this CDDL HEADER in each 14168404Spjd * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15168404Spjd * If applicable, add the following below this CDDL HEADER, with the 16168404Spjd * fields enclosed by brackets "[]" replaced with your own identifying 17168404Spjd * information: Portions Copyright [yyyy] [name of copyright owner] 18168404Spjd * 19168404Spjd * CDDL HEADER END 20168404Spjd */ 21168404Spjd/* 22185029Spjd * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23168404Spjd * Use is subject to license terms. 24168404Spjd */ 25168404Spjd 26168404Spjd/* 27168404Spjd * DVA-based Adjustable Replacement Cache 28168404Spjd * 29168404Spjd * While much of the theory of operation used here is 30168404Spjd * based on the self-tuning, low overhead replacement cache 31168404Spjd * presented by Megiddo and Modha at FAST 2003, there are some 32168404Spjd * significant differences: 33168404Spjd * 34168404Spjd * 1. The Megiddo and Modha model assumes any page is evictable. 35168404Spjd * Pages in its cache cannot be "locked" into memory. This makes 36168404Spjd * the eviction algorithm simple: evict the last page in the list. 37168404Spjd * This also make the performance characteristics easy to reason 38168404Spjd * about. Our cache is not so simple. At any given moment, some 39168404Spjd * subset of the blocks in the cache are un-evictable because we 40168404Spjd * have handed out a reference to them. Blocks are only evictable 41168404Spjd * when there are no external references active. This makes 42168404Spjd * eviction far more problematic: we choose to evict the evictable 43168404Spjd * blocks that are the "lowest" in the list. 44168404Spjd * 45168404Spjd * There are times when it is not possible to evict the requested 46168404Spjd * space. In these circumstances we are unable to adjust the cache 47168404Spjd * size. To prevent the cache growing unbounded at these times we 48185029Spjd * implement a "cache throttle" that slows the flow of new data 49185029Spjd * into the cache until we can make space available. 50168404Spjd * 51168404Spjd * 2. The Megiddo and Modha model assumes a fixed cache size. 52168404Spjd * Pages are evicted when the cache is full and there is a cache 53168404Spjd * miss. Our model has a variable sized cache. It grows with 54185029Spjd * high use, but also tries to react to memory pressure from the 55168404Spjd * operating system: decreasing its size when system memory is 56168404Spjd * tight. 57168404Spjd * 58168404Spjd * 3. The Megiddo and Modha model assumes a fixed page size. All 59168404Spjd * elements of the cache are therefor exactly the same size. So 60168404Spjd * when adjusting the cache size following a cache miss, its simply 61168404Spjd * a matter of choosing a single page to evict. In our model, we 62168404Spjd * have variable sized cache blocks (rangeing from 512 bytes to 63168404Spjd * 128K bytes). We therefor choose a set of blocks to evict to make 64168404Spjd * space for a cache miss that approximates as closely as possible 65168404Spjd * the space used by the new block. 66168404Spjd * 67168404Spjd * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 68168404Spjd * by N. Megiddo & D. Modha, FAST 2003 69168404Spjd */ 70168404Spjd 71168404Spjd/* 72168404Spjd * The locking model: 73168404Spjd * 74168404Spjd * A new reference to a cache buffer can be obtained in two 75168404Spjd * ways: 1) via a hash table lookup using the DVA as a key, 76185029Spjd * or 2) via one of the ARC lists. The arc_read() interface 77168404Spjd * uses method 1, while the internal arc algorithms for 78168404Spjd * adjusting the cache use method 2. We therefor provide two 79168404Spjd * types of locks: 1) the hash table lock array, and 2) the 80168404Spjd * arc list locks. 81168404Spjd * 82168404Spjd * Buffers do not have their own mutexs, rather they rely on the 83168404Spjd * hash table mutexs for the bulk of their protection (i.e. most 84168404Spjd * fields in the arc_buf_hdr_t are protected by these mutexs). 85168404Spjd * 86168404Spjd * buf_hash_find() returns the appropriate mutex (held) when it 87168404Spjd * locates the requested buffer in the hash table. It returns 88168404Spjd * NULL for the mutex if the buffer was not in the table. 89168404Spjd * 90168404Spjd * buf_hash_remove() expects the appropriate hash mutex to be 91168404Spjd * already held before it is invoked. 92168404Spjd * 93168404Spjd * Each arc state also has a mutex which is used to protect the 94168404Spjd * buffer list associated with the state. When attempting to 95168404Spjd * obtain a hash table lock while holding an arc list lock you 96168404Spjd * must use: mutex_tryenter() to avoid deadlock. Also note that 97168404Spjd * the active state mutex must be held before the ghost state mutex. 98168404Spjd * 99168404Spjd * Arc buffers may have an associated eviction callback function. 100168404Spjd * This function will be invoked prior to removing the buffer (e.g. 101168404Spjd * in arc_do_user_evicts()). Note however that the data associated 102168404Spjd * with the buffer may be evicted prior to the callback. The callback 103168404Spjd * must be made with *no locks held* (to prevent deadlock). Additionally, 104168404Spjd * the users of callbacks must ensure that their private data is 105168404Spjd * protected from simultaneous callbacks from arc_buf_evict() 106168404Spjd * and arc_do_user_evicts(). 107168404Spjd * 108168404Spjd * Note that the majority of the performance stats are manipulated 109168404Spjd * with atomic operations. 110185029Spjd * 111185029Spjd * The L2ARC uses the l2arc_buflist_mtx global mutex for the following: 112185029Spjd * 113185029Spjd * - L2ARC buflist creation 114185029Spjd * - L2ARC buflist eviction 115185029Spjd * - L2ARC write completion, which walks L2ARC buflists 116185029Spjd * - ARC header destruction, as it removes from L2ARC buflists 117185029Spjd * - ARC header release, as it removes from L2ARC buflists 118168404Spjd */ 119168404Spjd 120168404Spjd#include <sys/spa.h> 121168404Spjd#include <sys/zio.h> 122168404Spjd#include <sys/zio_checksum.h> 123168404Spjd#include <sys/zfs_context.h> 124168404Spjd#include <sys/arc.h> 125168404Spjd#include <sys/refcount.h> 126185029Spjd#include <sys/vdev.h> 127168404Spjd#ifdef _KERNEL 128168404Spjd#include <sys/dnlc.h> 129168404Spjd#endif 130168404Spjd#include <sys/callb.h> 131168404Spjd#include <sys/kstat.h> 132168404Spjd#include <sys/sdt.h> 133168404Spjd 134191902Skmacy#include <vm/vm_pageout.h> 135191902Skmacy 136168404Spjdstatic kmutex_t arc_reclaim_thr_lock; 137168404Spjdstatic kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 138168404Spjdstatic uint8_t arc_thread_exit; 139168404Spjd 140185029Spjdextern int zfs_write_limit_shift; 141185029Spjdextern uint64_t zfs_write_limit_max; 142185029Spjdextern kmutex_t zfs_write_limit_lock; 143185029Spjd 144168404Spjd#define ARC_REDUCE_DNLC_PERCENT 3 145168404Spjduint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 146168404Spjd 147168404Spjdtypedef enum arc_reclaim_strategy { 148168404Spjd ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 149168404Spjd ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 150168404Spjd} arc_reclaim_strategy_t; 151168404Spjd 152168404Spjd/* number of seconds before growing cache again */ 153168404Spjdstatic int arc_grow_retry = 60; 154168404Spjd 155168404Spjd/* 156168404Spjd * minimum lifespan of a prefetch block in clock ticks 157168404Spjd * (initialized in arc_init()) 158168404Spjd */ 159168404Spjdstatic int arc_min_prefetch_lifespan; 160168404Spjd 161194043Skmacyextern int zfs_prefetch_disable; 162168404Spjdstatic int arc_dead; 163168404Spjd 164168404Spjd/* 165185029Spjd * The arc has filled available memory and has now warmed up. 166185029Spjd */ 167185029Spjdstatic boolean_t arc_warm; 168185029Spjd 169185029Spjd/* 170168404Spjd * These tunables are for performance analysis. 171168404Spjd */ 172185029Spjduint64_t zfs_arc_max; 173185029Spjduint64_t zfs_arc_min; 174185029Spjduint64_t zfs_arc_meta_limit = 0; 175185029Spjdint zfs_mdcomp_disable = 0; 176185029Spjd 177185029SpjdTUNABLE_QUAD("vfs.zfs.arc_max", &zfs_arc_max); 178185029SpjdTUNABLE_QUAD("vfs.zfs.arc_min", &zfs_arc_min); 179185029SpjdTUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit); 180185029SpjdTUNABLE_INT("vfs.zfs.mdcomp_disable", &zfs_mdcomp_disable); 181168473SpjdSYSCTL_DECL(_vfs_zfs); 182185029SpjdSYSCTL_QUAD(_vfs_zfs, OID_AUTO, arc_max, CTLFLAG_RDTUN, &zfs_arc_max, 0, 183168473Spjd "Maximum ARC size"); 184185029SpjdSYSCTL_QUAD(_vfs_zfs, OID_AUTO, arc_min, CTLFLAG_RDTUN, &zfs_arc_min, 0, 185168473Spjd "Minimum ARC size"); 186185029SpjdSYSCTL_INT(_vfs_zfs, OID_AUTO, mdcomp_disable, CTLFLAG_RDTUN, 187185029Spjd &zfs_mdcomp_disable, 0, "Disable metadata compression"); 188168404Spjd 189168404Spjd/* 190185029Spjd * Note that buffers can be in one of 6 states: 191168404Spjd * ARC_anon - anonymous (discussed below) 192168404Spjd * ARC_mru - recently used, currently cached 193168404Spjd * ARC_mru_ghost - recentely used, no longer in cache 194168404Spjd * ARC_mfu - frequently used, currently cached 195168404Spjd * ARC_mfu_ghost - frequently used, no longer in cache 196185029Spjd * ARC_l2c_only - exists in L2ARC but not other states 197185029Spjd * When there are no active references to the buffer, they are 198185029Spjd * are linked onto a list in one of these arc states. These are 199185029Spjd * the only buffers that can be evicted or deleted. Within each 200185029Spjd * state there are multiple lists, one for meta-data and one for 201185029Spjd * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, 202185029Spjd * etc.) is tracked separately so that it can be managed more 203185029Spjd * explicitly: favored over data, limited explicitly. 204168404Spjd * 205168404Spjd * Anonymous buffers are buffers that are not associated with 206168404Spjd * a DVA. These are buffers that hold dirty block copies 207168404Spjd * before they are written to stable storage. By definition, 208168404Spjd * they are "ref'd" and are considered part of arc_mru 209168404Spjd * that cannot be freed. Generally, they will aquire a DVA 210168404Spjd * as they are written and migrate onto the arc_mru list. 211185029Spjd * 212185029Spjd * The ARC_l2c_only state is for buffers that are in the second 213185029Spjd * level ARC but no longer in any of the ARC_m* lists. The second 214185029Spjd * level ARC itself may also contain buffers that are in any of 215185029Spjd * the ARC_m* states - meaning that a buffer can exist in two 216185029Spjd * places. The reason for the ARC_l2c_only state is to keep the 217185029Spjd * buffer header in the hash table, so that reads that hit the 218185029Spjd * second level ARC benefit from these fast lookups. 219168404Spjd */ 220168404Spjd 221168404Spjdtypedef struct arc_state { 222185029Spjd list_t arcs_list[ARC_BUFC_NUMTYPES]; /* list of evictable buffers */ 223185029Spjd uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */ 224185029Spjd uint64_t arcs_size; /* total amount of data in this state */ 225168404Spjd kmutex_t arcs_mtx; 226168404Spjd} arc_state_t; 227168404Spjd 228185029Spjd/* The 6 states: */ 229168404Spjdstatic arc_state_t ARC_anon; 230168404Spjdstatic arc_state_t ARC_mru; 231168404Spjdstatic arc_state_t ARC_mru_ghost; 232168404Spjdstatic arc_state_t ARC_mfu; 233168404Spjdstatic arc_state_t ARC_mfu_ghost; 234185029Spjdstatic arc_state_t ARC_l2c_only; 235168404Spjd 236168404Spjdtypedef struct arc_stats { 237168404Spjd kstat_named_t arcstat_hits; 238168404Spjd kstat_named_t arcstat_misses; 239168404Spjd kstat_named_t arcstat_demand_data_hits; 240168404Spjd kstat_named_t arcstat_demand_data_misses; 241168404Spjd kstat_named_t arcstat_demand_metadata_hits; 242168404Spjd kstat_named_t arcstat_demand_metadata_misses; 243168404Spjd kstat_named_t arcstat_prefetch_data_hits; 244168404Spjd kstat_named_t arcstat_prefetch_data_misses; 245168404Spjd kstat_named_t arcstat_prefetch_metadata_hits; 246168404Spjd kstat_named_t arcstat_prefetch_metadata_misses; 247168404Spjd kstat_named_t arcstat_mru_hits; 248168404Spjd kstat_named_t arcstat_mru_ghost_hits; 249168404Spjd kstat_named_t arcstat_mfu_hits; 250168404Spjd kstat_named_t arcstat_mfu_ghost_hits; 251168404Spjd kstat_named_t arcstat_deleted; 252168404Spjd kstat_named_t arcstat_recycle_miss; 253168404Spjd kstat_named_t arcstat_mutex_miss; 254168404Spjd kstat_named_t arcstat_evict_skip; 255168404Spjd kstat_named_t arcstat_hash_elements; 256168404Spjd kstat_named_t arcstat_hash_elements_max; 257168404Spjd kstat_named_t arcstat_hash_collisions; 258168404Spjd kstat_named_t arcstat_hash_chains; 259168404Spjd kstat_named_t arcstat_hash_chain_max; 260168404Spjd kstat_named_t arcstat_p; 261168404Spjd kstat_named_t arcstat_c; 262168404Spjd kstat_named_t arcstat_c_min; 263168404Spjd kstat_named_t arcstat_c_max; 264168404Spjd kstat_named_t arcstat_size; 265185029Spjd kstat_named_t arcstat_hdr_size; 266185029Spjd kstat_named_t arcstat_l2_hits; 267185029Spjd kstat_named_t arcstat_l2_misses; 268185029Spjd kstat_named_t arcstat_l2_feeds; 269185029Spjd kstat_named_t arcstat_l2_rw_clash; 270185029Spjd kstat_named_t arcstat_l2_writes_sent; 271185029Spjd kstat_named_t arcstat_l2_writes_done; 272185029Spjd kstat_named_t arcstat_l2_writes_error; 273185029Spjd kstat_named_t arcstat_l2_writes_hdr_miss; 274185029Spjd kstat_named_t arcstat_l2_evict_lock_retry; 275185029Spjd kstat_named_t arcstat_l2_evict_reading; 276185029Spjd kstat_named_t arcstat_l2_free_on_write; 277185029Spjd kstat_named_t arcstat_l2_abort_lowmem; 278185029Spjd kstat_named_t arcstat_l2_cksum_bad; 279185029Spjd kstat_named_t arcstat_l2_io_error; 280185029Spjd kstat_named_t arcstat_l2_size; 281185029Spjd kstat_named_t arcstat_l2_hdr_size; 282185029Spjd kstat_named_t arcstat_memory_throttle_count; 283168404Spjd} arc_stats_t; 284168404Spjd 285168404Spjdstatic arc_stats_t arc_stats = { 286168404Spjd { "hits", KSTAT_DATA_UINT64 }, 287168404Spjd { "misses", KSTAT_DATA_UINT64 }, 288168404Spjd { "demand_data_hits", KSTAT_DATA_UINT64 }, 289168404Spjd { "demand_data_misses", KSTAT_DATA_UINT64 }, 290168404Spjd { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 291168404Spjd { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 292168404Spjd { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 293168404Spjd { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 294168404Spjd { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 295168404Spjd { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 296168404Spjd { "mru_hits", KSTAT_DATA_UINT64 }, 297168404Spjd { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 298168404Spjd { "mfu_hits", KSTAT_DATA_UINT64 }, 299168404Spjd { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 300168404Spjd { "deleted", KSTAT_DATA_UINT64 }, 301168404Spjd { "recycle_miss", KSTAT_DATA_UINT64 }, 302168404Spjd { "mutex_miss", KSTAT_DATA_UINT64 }, 303168404Spjd { "evict_skip", KSTAT_DATA_UINT64 }, 304168404Spjd { "hash_elements", KSTAT_DATA_UINT64 }, 305168404Spjd { "hash_elements_max", KSTAT_DATA_UINT64 }, 306168404Spjd { "hash_collisions", KSTAT_DATA_UINT64 }, 307168404Spjd { "hash_chains", KSTAT_DATA_UINT64 }, 308168404Spjd { "hash_chain_max", KSTAT_DATA_UINT64 }, 309168404Spjd { "p", KSTAT_DATA_UINT64 }, 310168404Spjd { "c", KSTAT_DATA_UINT64 }, 311168404Spjd { "c_min", KSTAT_DATA_UINT64 }, 312168404Spjd { "c_max", KSTAT_DATA_UINT64 }, 313185029Spjd { "size", KSTAT_DATA_UINT64 }, 314185029Spjd { "hdr_size", KSTAT_DATA_UINT64 }, 315185029Spjd { "l2_hits", KSTAT_DATA_UINT64 }, 316185029Spjd { "l2_misses", KSTAT_DATA_UINT64 }, 317185029Spjd { "l2_feeds", KSTAT_DATA_UINT64 }, 318185029Spjd { "l2_rw_clash", KSTAT_DATA_UINT64 }, 319185029Spjd { "l2_writes_sent", KSTAT_DATA_UINT64 }, 320185029Spjd { "l2_writes_done", KSTAT_DATA_UINT64 }, 321185029Spjd { "l2_writes_error", KSTAT_DATA_UINT64 }, 322185029Spjd { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 }, 323185029Spjd { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, 324185029Spjd { "l2_evict_reading", KSTAT_DATA_UINT64 }, 325185029Spjd { "l2_free_on_write", KSTAT_DATA_UINT64 }, 326185029Spjd { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, 327185029Spjd { "l2_cksum_bad", KSTAT_DATA_UINT64 }, 328185029Spjd { "l2_io_error", KSTAT_DATA_UINT64 }, 329185029Spjd { "l2_size", KSTAT_DATA_UINT64 }, 330185029Spjd { "l2_hdr_size", KSTAT_DATA_UINT64 }, 331185029Spjd { "memory_throttle_count", KSTAT_DATA_UINT64 } 332168404Spjd}; 333168404Spjd 334168404Spjd#define ARCSTAT(stat) (arc_stats.stat.value.ui64) 335168404Spjd 336168404Spjd#define ARCSTAT_INCR(stat, val) \ 337168404Spjd atomic_add_64(&arc_stats.stat.value.ui64, (val)); 338168404Spjd 339168404Spjd#define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 340168404Spjd#define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 341168404Spjd 342168404Spjd#define ARCSTAT_MAX(stat, val) { \ 343168404Spjd uint64_t m; \ 344168404Spjd while ((val) > (m = arc_stats.stat.value.ui64) && \ 345168404Spjd (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 346168404Spjd continue; \ 347168404Spjd} 348168404Spjd 349168404Spjd#define ARCSTAT_MAXSTAT(stat) \ 350168404Spjd ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 351168404Spjd 352168404Spjd/* 353168404Spjd * We define a macro to allow ARC hits/misses to be easily broken down by 354168404Spjd * two separate conditions, giving a total of four different subtypes for 355168404Spjd * each of hits and misses (so eight statistics total). 356168404Spjd */ 357168404Spjd#define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 358168404Spjd if (cond1) { \ 359168404Spjd if (cond2) { \ 360168404Spjd ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 361168404Spjd } else { \ 362168404Spjd ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 363168404Spjd } \ 364168404Spjd } else { \ 365168404Spjd if (cond2) { \ 366168404Spjd ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 367168404Spjd } else { \ 368168404Spjd ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 369168404Spjd } \ 370168404Spjd } 371168404Spjd 372168404Spjdkstat_t *arc_ksp; 373168404Spjdstatic arc_state_t *arc_anon; 374168404Spjdstatic arc_state_t *arc_mru; 375168404Spjdstatic arc_state_t *arc_mru_ghost; 376168404Spjdstatic arc_state_t *arc_mfu; 377168404Spjdstatic arc_state_t *arc_mfu_ghost; 378185029Spjdstatic arc_state_t *arc_l2c_only; 379168404Spjd 380168404Spjd/* 381168404Spjd * There are several ARC variables that are critical to export as kstats -- 382168404Spjd * but we don't want to have to grovel around in the kstat whenever we wish to 383168404Spjd * manipulate them. For these variables, we therefore define them to be in 384168404Spjd * terms of the statistic variable. This assures that we are not introducing 385168404Spjd * the possibility of inconsistency by having shadow copies of the variables, 386168404Spjd * while still allowing the code to be readable. 387168404Spjd */ 388168404Spjd#define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 389168404Spjd#define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 390168404Spjd#define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 391168404Spjd#define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 392168404Spjd#define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 393168404Spjd 394168404Spjdstatic int arc_no_grow; /* Don't try to grow cache size */ 395168404Spjdstatic uint64_t arc_tempreserve; 396185029Spjdstatic uint64_t arc_meta_used; 397185029Spjdstatic uint64_t arc_meta_limit; 398185029Spjdstatic uint64_t arc_meta_max = 0; 399185029SpjdSYSCTL_QUAD(_vfs_zfs, OID_AUTO, arc_meta_used, CTLFLAG_RDTUN, 400185029Spjd &arc_meta_used, 0, "ARC metadata used"); 401185029SpjdSYSCTL_QUAD(_vfs_zfs, OID_AUTO, arc_meta_limit, CTLFLAG_RDTUN, 402185029Spjd &arc_meta_limit, 0, "ARC metadata limit"); 403168404Spjd 404185029Spjdtypedef struct l2arc_buf_hdr l2arc_buf_hdr_t; 405185029Spjd 406168404Spjdtypedef struct arc_callback arc_callback_t; 407168404Spjd 408168404Spjdstruct arc_callback { 409168404Spjd void *acb_private; 410168404Spjd arc_done_func_t *acb_done; 411168404Spjd arc_buf_t *acb_buf; 412168404Spjd zio_t *acb_zio_dummy; 413168404Spjd arc_callback_t *acb_next; 414168404Spjd}; 415168404Spjd 416168404Spjdtypedef struct arc_write_callback arc_write_callback_t; 417168404Spjd 418168404Spjdstruct arc_write_callback { 419168404Spjd void *awcb_private; 420168404Spjd arc_done_func_t *awcb_ready; 421168404Spjd arc_done_func_t *awcb_done; 422168404Spjd arc_buf_t *awcb_buf; 423168404Spjd}; 424168404Spjd 425168404Spjdstruct arc_buf_hdr { 426168404Spjd /* protected by hash lock */ 427168404Spjd dva_t b_dva; 428168404Spjd uint64_t b_birth; 429168404Spjd uint64_t b_cksum0; 430168404Spjd 431168404Spjd kmutex_t b_freeze_lock; 432168404Spjd zio_cksum_t *b_freeze_cksum; 433168404Spjd 434168404Spjd arc_buf_hdr_t *b_hash_next; 435168404Spjd arc_buf_t *b_buf; 436168404Spjd uint32_t b_flags; 437168404Spjd uint32_t b_datacnt; 438168404Spjd 439168404Spjd arc_callback_t *b_acb; 440168404Spjd kcondvar_t b_cv; 441168404Spjd 442168404Spjd /* immutable */ 443168404Spjd arc_buf_contents_t b_type; 444168404Spjd uint64_t b_size; 445168404Spjd spa_t *b_spa; 446168404Spjd 447168404Spjd /* protected by arc state mutex */ 448168404Spjd arc_state_t *b_state; 449168404Spjd list_node_t b_arc_node; 450168404Spjd 451168404Spjd /* updated atomically */ 452168404Spjd clock_t b_arc_access; 453168404Spjd 454168404Spjd /* self protecting */ 455168404Spjd refcount_t b_refcnt; 456185029Spjd 457185029Spjd l2arc_buf_hdr_t *b_l2hdr; 458185029Spjd list_node_t b_l2node; 459168404Spjd}; 460168404Spjd 461168404Spjdstatic arc_buf_t *arc_eviction_list; 462168404Spjdstatic kmutex_t arc_eviction_mtx; 463168404Spjdstatic arc_buf_hdr_t arc_eviction_hdr; 464168404Spjdstatic void arc_get_data_buf(arc_buf_t *buf); 465168404Spjdstatic void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 466185029Spjdstatic int arc_evict_needed(arc_buf_contents_t type); 467185029Spjdstatic void arc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes); 468168404Spjd 469168404Spjd#define GHOST_STATE(state) \ 470185029Spjd ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ 471185029Spjd (state) == arc_l2c_only) 472168404Spjd 473168404Spjd/* 474168404Spjd * Private ARC flags. These flags are private ARC only flags that will show up 475168404Spjd * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 476168404Spjd * be passed in as arc_flags in things like arc_read. However, these flags 477168404Spjd * should never be passed and should only be set by ARC code. When adding new 478168404Spjd * public flags, make sure not to smash the private ones. 479168404Spjd */ 480168404Spjd 481168404Spjd#define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 482168404Spjd#define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 483168404Spjd#define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 484168404Spjd#define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 485168404Spjd#define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 486168404Spjd#define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 487185029Spjd#define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */ 488185029Spjd#define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */ 489185029Spjd#define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */ 490185029Spjd#define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */ 491185029Spjd#define ARC_STORED (1 << 19) /* has been store()d to */ 492168404Spjd 493168404Spjd#define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 494168404Spjd#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 495168404Spjd#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 496168404Spjd#define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 497168404Spjd#define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 498185029Spjd#define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS) 499185029Spjd#define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE) 500185029Spjd#define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \ 501185029Spjd (hdr)->b_l2hdr != NULL) 502185029Spjd#define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING) 503185029Spjd#define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED) 504185029Spjd#define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD) 505168404Spjd 506168404Spjd/* 507185029Spjd * Other sizes 508185029Spjd */ 509185029Spjd 510185029Spjd#define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) 511185029Spjd#define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t)) 512185029Spjd 513185029Spjd/* 514168404Spjd * Hash table routines 515168404Spjd */ 516168404Spjd 517168404Spjd#define HT_LOCK_PAD 128 518168404Spjd 519168404Spjdstruct ht_lock { 520168404Spjd kmutex_t ht_lock; 521168404Spjd#ifdef _KERNEL 522168404Spjd unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 523168404Spjd#endif 524168404Spjd}; 525168404Spjd 526168404Spjd#define BUF_LOCKS 256 527168404Spjdtypedef struct buf_hash_table { 528168404Spjd uint64_t ht_mask; 529168404Spjd arc_buf_hdr_t **ht_table; 530168404Spjd struct ht_lock ht_locks[BUF_LOCKS]; 531168404Spjd} buf_hash_table_t; 532168404Spjd 533168404Spjdstatic buf_hash_table_t buf_hash_table; 534168404Spjd 535168404Spjd#define BUF_HASH_INDEX(spa, dva, birth) \ 536168404Spjd (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 537168404Spjd#define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 538168404Spjd#define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 539168404Spjd#define HDR_LOCK(buf) \ 540168404Spjd (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth))) 541168404Spjd 542168404Spjduint64_t zfs_crc64_table[256]; 543168404Spjd 544185029Spjd/* 545185029Spjd * Level 2 ARC 546185029Spjd */ 547185029Spjd 548185029Spjd#define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ 549185029Spjd#define L2ARC_HEADROOM 4 /* num of writes */ 550185029Spjd#define L2ARC_FEED_SECS 1 /* caching interval */ 551185029Spjd 552185029Spjd#define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) 553185029Spjd#define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) 554185029Spjd 555185029Spjd/* 556185029Spjd * L2ARC Performance Tunables 557185029Spjd */ 558185029Spjduint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ 559185029Spjduint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */ 560185029Spjduint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ 561185029Spjduint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ 562185029Spjdboolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ 563185029Spjd 564185029Spjd/* 565185029Spjd * L2ARC Internals 566185029Spjd */ 567185029Spjdtypedef struct l2arc_dev { 568185029Spjd vdev_t *l2ad_vdev; /* vdev */ 569185029Spjd spa_t *l2ad_spa; /* spa */ 570185029Spjd uint64_t l2ad_hand; /* next write location */ 571185029Spjd uint64_t l2ad_write; /* desired write size, bytes */ 572185029Spjd uint64_t l2ad_boost; /* warmup write boost, bytes */ 573185029Spjd uint64_t l2ad_start; /* first addr on device */ 574185029Spjd uint64_t l2ad_end; /* last addr on device */ 575185029Spjd uint64_t l2ad_evict; /* last addr eviction reached */ 576185029Spjd boolean_t l2ad_first; /* first sweep through */ 577185029Spjd list_t *l2ad_buflist; /* buffer list */ 578185029Spjd list_node_t l2ad_node; /* device list node */ 579185029Spjd} l2arc_dev_t; 580185029Spjd 581185029Spjdstatic list_t L2ARC_dev_list; /* device list */ 582185029Spjdstatic list_t *l2arc_dev_list; /* device list pointer */ 583185029Spjdstatic kmutex_t l2arc_dev_mtx; /* device list mutex */ 584185029Spjdstatic l2arc_dev_t *l2arc_dev_last; /* last device used */ 585185029Spjdstatic kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */ 586185029Spjdstatic list_t L2ARC_free_on_write; /* free after write buf list */ 587185029Spjdstatic list_t *l2arc_free_on_write; /* free after write list ptr */ 588185029Spjdstatic kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ 589185029Spjdstatic uint64_t l2arc_ndev; /* number of devices */ 590185029Spjd 591185029Spjdtypedef struct l2arc_read_callback { 592185029Spjd arc_buf_t *l2rcb_buf; /* read buffer */ 593185029Spjd spa_t *l2rcb_spa; /* spa */ 594185029Spjd blkptr_t l2rcb_bp; /* original blkptr */ 595185029Spjd zbookmark_t l2rcb_zb; /* original bookmark */ 596185029Spjd int l2rcb_flags; /* original flags */ 597185029Spjd} l2arc_read_callback_t; 598185029Spjd 599185029Spjdtypedef struct l2arc_write_callback { 600185029Spjd l2arc_dev_t *l2wcb_dev; /* device info */ 601185029Spjd arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ 602185029Spjd} l2arc_write_callback_t; 603185029Spjd 604185029Spjdstruct l2arc_buf_hdr { 605185029Spjd /* protected by arc_buf_hdr mutex */ 606185029Spjd l2arc_dev_t *b_dev; /* L2ARC device */ 607185029Spjd daddr_t b_daddr; /* disk address, offset byte */ 608185029Spjd}; 609185029Spjd 610185029Spjdtypedef struct l2arc_data_free { 611185029Spjd /* protected by l2arc_free_on_write_mtx */ 612185029Spjd void *l2df_data; 613185029Spjd size_t l2df_size; 614185029Spjd void (*l2df_func)(void *, size_t); 615185029Spjd list_node_t l2df_list_node; 616185029Spjd} l2arc_data_free_t; 617185029Spjd 618185029Spjdstatic kmutex_t l2arc_feed_thr_lock; 619185029Spjdstatic kcondvar_t l2arc_feed_thr_cv; 620185029Spjdstatic uint8_t l2arc_thread_exit; 621185029Spjd 622185029Spjdstatic void l2arc_read_done(zio_t *zio); 623185029Spjdstatic void l2arc_hdr_stat_add(void); 624185029Spjdstatic void l2arc_hdr_stat_remove(void); 625185029Spjd 626168404Spjdstatic uint64_t 627185029Spjdbuf_hash(spa_t *spa, const dva_t *dva, uint64_t birth) 628168404Spjd{ 629168404Spjd uintptr_t spav = (uintptr_t)spa; 630168404Spjd uint8_t *vdva = (uint8_t *)dva; 631168404Spjd uint64_t crc = -1ULL; 632168404Spjd int i; 633168404Spjd 634168404Spjd ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 635168404Spjd 636168404Spjd for (i = 0; i < sizeof (dva_t); i++) 637168404Spjd crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 638168404Spjd 639168404Spjd crc ^= (spav>>8) ^ birth; 640168404Spjd 641168404Spjd return (crc); 642168404Spjd} 643168404Spjd 644168404Spjd#define BUF_EMPTY(buf) \ 645168404Spjd ((buf)->b_dva.dva_word[0] == 0 && \ 646168404Spjd (buf)->b_dva.dva_word[1] == 0 && \ 647168404Spjd (buf)->b_birth == 0) 648168404Spjd 649168404Spjd#define BUF_EQUAL(spa, dva, birth, buf) \ 650168404Spjd ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 651168404Spjd ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 652168404Spjd ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 653168404Spjd 654168404Spjdstatic arc_buf_hdr_t * 655185029Spjdbuf_hash_find(spa_t *spa, const dva_t *dva, uint64_t birth, kmutex_t **lockp) 656168404Spjd{ 657168404Spjd uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 658168404Spjd kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 659168404Spjd arc_buf_hdr_t *buf; 660168404Spjd 661168404Spjd mutex_enter(hash_lock); 662168404Spjd for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 663168404Spjd buf = buf->b_hash_next) { 664168404Spjd if (BUF_EQUAL(spa, dva, birth, buf)) { 665168404Spjd *lockp = hash_lock; 666168404Spjd return (buf); 667168404Spjd } 668168404Spjd } 669168404Spjd mutex_exit(hash_lock); 670168404Spjd *lockp = NULL; 671168404Spjd return (NULL); 672168404Spjd} 673168404Spjd 674168404Spjd/* 675168404Spjd * Insert an entry into the hash table. If there is already an element 676168404Spjd * equal to elem in the hash table, then the already existing element 677168404Spjd * will be returned and the new element will not be inserted. 678168404Spjd * Otherwise returns NULL. 679168404Spjd */ 680168404Spjdstatic arc_buf_hdr_t * 681168404Spjdbuf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 682168404Spjd{ 683168404Spjd uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 684168404Spjd kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 685168404Spjd arc_buf_hdr_t *fbuf; 686168404Spjd uint32_t i; 687168404Spjd 688168404Spjd ASSERT(!HDR_IN_HASH_TABLE(buf)); 689168404Spjd *lockp = hash_lock; 690168404Spjd mutex_enter(hash_lock); 691168404Spjd for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 692168404Spjd fbuf = fbuf->b_hash_next, i++) { 693168404Spjd if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 694168404Spjd return (fbuf); 695168404Spjd } 696168404Spjd 697168404Spjd buf->b_hash_next = buf_hash_table.ht_table[idx]; 698168404Spjd buf_hash_table.ht_table[idx] = buf; 699168404Spjd buf->b_flags |= ARC_IN_HASH_TABLE; 700168404Spjd 701168404Spjd /* collect some hash table performance data */ 702168404Spjd if (i > 0) { 703168404Spjd ARCSTAT_BUMP(arcstat_hash_collisions); 704168404Spjd if (i == 1) 705168404Spjd ARCSTAT_BUMP(arcstat_hash_chains); 706168404Spjd 707168404Spjd ARCSTAT_MAX(arcstat_hash_chain_max, i); 708168404Spjd } 709168404Spjd 710168404Spjd ARCSTAT_BUMP(arcstat_hash_elements); 711168404Spjd ARCSTAT_MAXSTAT(arcstat_hash_elements); 712168404Spjd 713168404Spjd return (NULL); 714168404Spjd} 715168404Spjd 716168404Spjdstatic void 717168404Spjdbuf_hash_remove(arc_buf_hdr_t *buf) 718168404Spjd{ 719168404Spjd arc_buf_hdr_t *fbuf, **bufp; 720168404Spjd uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 721168404Spjd 722168404Spjd ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 723168404Spjd ASSERT(HDR_IN_HASH_TABLE(buf)); 724168404Spjd 725168404Spjd bufp = &buf_hash_table.ht_table[idx]; 726168404Spjd while ((fbuf = *bufp) != buf) { 727168404Spjd ASSERT(fbuf != NULL); 728168404Spjd bufp = &fbuf->b_hash_next; 729168404Spjd } 730168404Spjd *bufp = buf->b_hash_next; 731168404Spjd buf->b_hash_next = NULL; 732168404Spjd buf->b_flags &= ~ARC_IN_HASH_TABLE; 733168404Spjd 734168404Spjd /* collect some hash table performance data */ 735168404Spjd ARCSTAT_BUMPDOWN(arcstat_hash_elements); 736168404Spjd 737168404Spjd if (buf_hash_table.ht_table[idx] && 738168404Spjd buf_hash_table.ht_table[idx]->b_hash_next == NULL) 739168404Spjd ARCSTAT_BUMPDOWN(arcstat_hash_chains); 740168404Spjd} 741168404Spjd 742168404Spjd/* 743168404Spjd * Global data structures and functions for the buf kmem cache. 744168404Spjd */ 745168404Spjdstatic kmem_cache_t *hdr_cache; 746168404Spjdstatic kmem_cache_t *buf_cache; 747168404Spjd 748168404Spjdstatic void 749168404Spjdbuf_fini(void) 750168404Spjd{ 751168404Spjd int i; 752168404Spjd 753168404Spjd kmem_free(buf_hash_table.ht_table, 754168404Spjd (buf_hash_table.ht_mask + 1) * sizeof (void *)); 755168404Spjd for (i = 0; i < BUF_LOCKS; i++) 756168404Spjd mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 757168404Spjd kmem_cache_destroy(hdr_cache); 758168404Spjd kmem_cache_destroy(buf_cache); 759168404Spjd} 760168404Spjd 761168404Spjd/* 762168404Spjd * Constructor callback - called when the cache is empty 763168404Spjd * and a new buf is requested. 764168404Spjd */ 765168404Spjd/* ARGSUSED */ 766168404Spjdstatic int 767168404Spjdhdr_cons(void *vbuf, void *unused, int kmflag) 768168404Spjd{ 769168404Spjd arc_buf_hdr_t *buf = vbuf; 770168404Spjd 771168404Spjd bzero(buf, sizeof (arc_buf_hdr_t)); 772168404Spjd refcount_create(&buf->b_refcnt); 773168404Spjd cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 774185029Spjd mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 775185029Spjd 776185029Spjd ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); 777168404Spjd return (0); 778168404Spjd} 779168404Spjd 780185029Spjd/* ARGSUSED */ 781185029Spjdstatic int 782185029Spjdbuf_cons(void *vbuf, void *unused, int kmflag) 783185029Spjd{ 784185029Spjd arc_buf_t *buf = vbuf; 785185029Spjd 786185029Spjd bzero(buf, sizeof (arc_buf_t)); 787185029Spjd rw_init(&buf->b_lock, NULL, RW_DEFAULT, NULL); 788185029Spjd return (0); 789185029Spjd} 790185029Spjd 791168404Spjd/* 792168404Spjd * Destructor callback - called when a cached buf is 793168404Spjd * no longer required. 794168404Spjd */ 795168404Spjd/* ARGSUSED */ 796168404Spjdstatic void 797168404Spjdhdr_dest(void *vbuf, void *unused) 798168404Spjd{ 799168404Spjd arc_buf_hdr_t *buf = vbuf; 800168404Spjd 801168404Spjd refcount_destroy(&buf->b_refcnt); 802168404Spjd cv_destroy(&buf->b_cv); 803185029Spjd mutex_destroy(&buf->b_freeze_lock); 804185029Spjd 805185029Spjd ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); 806168404Spjd} 807168404Spjd 808185029Spjd/* ARGSUSED */ 809185029Spjdstatic void 810185029Spjdbuf_dest(void *vbuf, void *unused) 811185029Spjd{ 812185029Spjd arc_buf_t *buf = vbuf; 813185029Spjd 814185029Spjd rw_destroy(&buf->b_lock); 815185029Spjd} 816185029Spjd 817168404Spjd/* 818168404Spjd * Reclaim callback -- invoked when memory is low. 819168404Spjd */ 820168404Spjd/* ARGSUSED */ 821168404Spjdstatic void 822168404Spjdhdr_recl(void *unused) 823168404Spjd{ 824168404Spjd dprintf("hdr_recl called\n"); 825168404Spjd /* 826168404Spjd * umem calls the reclaim func when we destroy the buf cache, 827168404Spjd * which is after we do arc_fini(). 828168404Spjd */ 829168404Spjd if (!arc_dead) 830168404Spjd cv_signal(&arc_reclaim_thr_cv); 831168404Spjd} 832168404Spjd 833168404Spjdstatic void 834168404Spjdbuf_init(void) 835168404Spjd{ 836168404Spjd uint64_t *ct; 837168404Spjd uint64_t hsize = 1ULL << 12; 838168404Spjd int i, j; 839168404Spjd 840168404Spjd /* 841168404Spjd * The hash table is big enough to fill all of physical memory 842168404Spjd * with an average 64K block size. The table will take up 843168404Spjd * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 844168404Spjd */ 845168696Spjd while (hsize * 65536 < (uint64_t)physmem * PAGESIZE) 846168404Spjd hsize <<= 1; 847168404Spjdretry: 848168404Spjd buf_hash_table.ht_mask = hsize - 1; 849168404Spjd buf_hash_table.ht_table = 850168404Spjd kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 851168404Spjd if (buf_hash_table.ht_table == NULL) { 852168404Spjd ASSERT(hsize > (1ULL << 8)); 853168404Spjd hsize >>= 1; 854168404Spjd goto retry; 855168404Spjd } 856168404Spjd 857168404Spjd hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 858168404Spjd 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 859168404Spjd buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 860185029Spjd 0, buf_cons, buf_dest, NULL, NULL, NULL, 0); 861168404Spjd 862168404Spjd for (i = 0; i < 256; i++) 863168404Spjd for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 864168404Spjd *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 865168404Spjd 866168404Spjd for (i = 0; i < BUF_LOCKS; i++) { 867168404Spjd mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 868168404Spjd NULL, MUTEX_DEFAULT, NULL); 869168404Spjd } 870168404Spjd} 871168404Spjd 872168404Spjd#define ARC_MINTIME (hz>>4) /* 62 ms */ 873168404Spjd 874168404Spjdstatic void 875168404Spjdarc_cksum_verify(arc_buf_t *buf) 876168404Spjd{ 877168404Spjd zio_cksum_t zc; 878168404Spjd 879168404Spjd if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 880168404Spjd return; 881168404Spjd 882168404Spjd mutex_enter(&buf->b_hdr->b_freeze_lock); 883168404Spjd if (buf->b_hdr->b_freeze_cksum == NULL || 884168404Spjd (buf->b_hdr->b_flags & ARC_IO_ERROR)) { 885168404Spjd mutex_exit(&buf->b_hdr->b_freeze_lock); 886168404Spjd return; 887168404Spjd } 888168404Spjd fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 889168404Spjd if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 890168404Spjd panic("buffer modified while frozen!"); 891168404Spjd mutex_exit(&buf->b_hdr->b_freeze_lock); 892168404Spjd} 893168404Spjd 894185029Spjdstatic int 895185029Spjdarc_cksum_equal(arc_buf_t *buf) 896185029Spjd{ 897185029Spjd zio_cksum_t zc; 898185029Spjd int equal; 899185029Spjd 900185029Spjd mutex_enter(&buf->b_hdr->b_freeze_lock); 901185029Spjd fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 902185029Spjd equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc); 903185029Spjd mutex_exit(&buf->b_hdr->b_freeze_lock); 904185029Spjd 905185029Spjd return (equal); 906185029Spjd} 907185029Spjd 908168404Spjdstatic void 909185029Spjdarc_cksum_compute(arc_buf_t *buf, boolean_t force) 910168404Spjd{ 911185029Spjd if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY)) 912168404Spjd return; 913168404Spjd 914168404Spjd mutex_enter(&buf->b_hdr->b_freeze_lock); 915168404Spjd if (buf->b_hdr->b_freeze_cksum != NULL) { 916168404Spjd mutex_exit(&buf->b_hdr->b_freeze_lock); 917168404Spjd return; 918168404Spjd } 919168404Spjd buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 920168404Spjd fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 921168404Spjd buf->b_hdr->b_freeze_cksum); 922168404Spjd mutex_exit(&buf->b_hdr->b_freeze_lock); 923168404Spjd} 924168404Spjd 925168404Spjdvoid 926168404Spjdarc_buf_thaw(arc_buf_t *buf) 927168404Spjd{ 928185029Spjd if (zfs_flags & ZFS_DEBUG_MODIFY) { 929185029Spjd if (buf->b_hdr->b_state != arc_anon) 930185029Spjd panic("modifying non-anon buffer!"); 931185029Spjd if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) 932185029Spjd panic("modifying buffer while i/o in progress!"); 933185029Spjd arc_cksum_verify(buf); 934185029Spjd } 935168404Spjd 936168404Spjd mutex_enter(&buf->b_hdr->b_freeze_lock); 937168404Spjd if (buf->b_hdr->b_freeze_cksum != NULL) { 938168404Spjd kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 939168404Spjd buf->b_hdr->b_freeze_cksum = NULL; 940168404Spjd } 941168404Spjd mutex_exit(&buf->b_hdr->b_freeze_lock); 942168404Spjd} 943168404Spjd 944168404Spjdvoid 945168404Spjdarc_buf_freeze(arc_buf_t *buf) 946168404Spjd{ 947168404Spjd if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 948168404Spjd return; 949168404Spjd 950168404Spjd ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 951168404Spjd buf->b_hdr->b_state == arc_anon); 952185029Spjd arc_cksum_compute(buf, B_FALSE); 953168404Spjd} 954168404Spjd 955168404Spjdstatic void 956168404Spjdadd_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 957168404Spjd{ 958168404Spjd ASSERT(MUTEX_HELD(hash_lock)); 959168404Spjd 960168404Spjd if ((refcount_add(&ab->b_refcnt, tag) == 1) && 961168404Spjd (ab->b_state != arc_anon)) { 962168404Spjd uint64_t delta = ab->b_size * ab->b_datacnt; 963185029Spjd list_t *list = &ab->b_state->arcs_list[ab->b_type]; 964185029Spjd uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type]; 965168404Spjd 966168404Spjd ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx)); 967168404Spjd mutex_enter(&ab->b_state->arcs_mtx); 968168404Spjd ASSERT(list_link_active(&ab->b_arc_node)); 969185029Spjd list_remove(list, ab); 970168404Spjd if (GHOST_STATE(ab->b_state)) { 971168404Spjd ASSERT3U(ab->b_datacnt, ==, 0); 972168404Spjd ASSERT3P(ab->b_buf, ==, NULL); 973168404Spjd delta = ab->b_size; 974168404Spjd } 975168404Spjd ASSERT(delta > 0); 976185029Spjd ASSERT3U(*size, >=, delta); 977185029Spjd atomic_add_64(size, -delta); 978168404Spjd mutex_exit(&ab->b_state->arcs_mtx); 979185029Spjd /* remove the prefetch flag if we get a reference */ 980168404Spjd if (ab->b_flags & ARC_PREFETCH) 981168404Spjd ab->b_flags &= ~ARC_PREFETCH; 982168404Spjd } 983168404Spjd} 984168404Spjd 985168404Spjdstatic int 986168404Spjdremove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 987168404Spjd{ 988168404Spjd int cnt; 989168404Spjd arc_state_t *state = ab->b_state; 990168404Spjd 991168404Spjd ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 992168404Spjd ASSERT(!GHOST_STATE(state)); 993168404Spjd 994168404Spjd if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 995168404Spjd (state != arc_anon)) { 996185029Spjd uint64_t *size = &state->arcs_lsize[ab->b_type]; 997185029Spjd 998168404Spjd ASSERT(!MUTEX_HELD(&state->arcs_mtx)); 999168404Spjd mutex_enter(&state->arcs_mtx); 1000168404Spjd ASSERT(!list_link_active(&ab->b_arc_node)); 1001185029Spjd list_insert_head(&state->arcs_list[ab->b_type], ab); 1002168404Spjd ASSERT(ab->b_datacnt > 0); 1003185029Spjd atomic_add_64(size, ab->b_size * ab->b_datacnt); 1004168404Spjd mutex_exit(&state->arcs_mtx); 1005168404Spjd } 1006168404Spjd return (cnt); 1007168404Spjd} 1008168404Spjd 1009168404Spjd/* 1010168404Spjd * Move the supplied buffer to the indicated state. The mutex 1011168404Spjd * for the buffer must be held by the caller. 1012168404Spjd */ 1013168404Spjdstatic void 1014168404Spjdarc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 1015168404Spjd{ 1016168404Spjd arc_state_t *old_state = ab->b_state; 1017168404Spjd int64_t refcnt = refcount_count(&ab->b_refcnt); 1018168404Spjd uint64_t from_delta, to_delta; 1019168404Spjd 1020168404Spjd ASSERT(MUTEX_HELD(hash_lock)); 1021168404Spjd ASSERT(new_state != old_state); 1022168404Spjd ASSERT(refcnt == 0 || ab->b_datacnt > 0); 1023168404Spjd ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 1024168404Spjd 1025168404Spjd from_delta = to_delta = ab->b_datacnt * ab->b_size; 1026168404Spjd 1027168404Spjd /* 1028168404Spjd * If this buffer is evictable, transfer it from the 1029168404Spjd * old state list to the new state list. 1030168404Spjd */ 1031168404Spjd if (refcnt == 0) { 1032168404Spjd if (old_state != arc_anon) { 1033168404Spjd int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx); 1034185029Spjd uint64_t *size = &old_state->arcs_lsize[ab->b_type]; 1035168404Spjd 1036168404Spjd if (use_mutex) 1037168404Spjd mutex_enter(&old_state->arcs_mtx); 1038168404Spjd 1039168404Spjd ASSERT(list_link_active(&ab->b_arc_node)); 1040185029Spjd list_remove(&old_state->arcs_list[ab->b_type], ab); 1041168404Spjd 1042168404Spjd /* 1043168404Spjd * If prefetching out of the ghost cache, 1044168404Spjd * we will have a non-null datacnt. 1045168404Spjd */ 1046168404Spjd if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 1047168404Spjd /* ghost elements have a ghost size */ 1048168404Spjd ASSERT(ab->b_buf == NULL); 1049168404Spjd from_delta = ab->b_size; 1050168404Spjd } 1051185029Spjd ASSERT3U(*size, >=, from_delta); 1052185029Spjd atomic_add_64(size, -from_delta); 1053168404Spjd 1054168404Spjd if (use_mutex) 1055168404Spjd mutex_exit(&old_state->arcs_mtx); 1056168404Spjd } 1057168404Spjd if (new_state != arc_anon) { 1058168404Spjd int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx); 1059185029Spjd uint64_t *size = &new_state->arcs_lsize[ab->b_type]; 1060168404Spjd 1061168404Spjd if (use_mutex) 1062168404Spjd mutex_enter(&new_state->arcs_mtx); 1063168404Spjd 1064185029Spjd list_insert_head(&new_state->arcs_list[ab->b_type], ab); 1065168404Spjd 1066168404Spjd /* ghost elements have a ghost size */ 1067168404Spjd if (GHOST_STATE(new_state)) { 1068168404Spjd ASSERT(ab->b_datacnt == 0); 1069168404Spjd ASSERT(ab->b_buf == NULL); 1070168404Spjd to_delta = ab->b_size; 1071168404Spjd } 1072185029Spjd atomic_add_64(size, to_delta); 1073168404Spjd 1074168404Spjd if (use_mutex) 1075168404Spjd mutex_exit(&new_state->arcs_mtx); 1076168404Spjd } 1077168404Spjd } 1078168404Spjd 1079168404Spjd ASSERT(!BUF_EMPTY(ab)); 1080185029Spjd if (new_state == arc_anon) { 1081168404Spjd buf_hash_remove(ab); 1082168404Spjd } 1083168404Spjd 1084168404Spjd /* adjust state sizes */ 1085168404Spjd if (to_delta) 1086168404Spjd atomic_add_64(&new_state->arcs_size, to_delta); 1087168404Spjd if (from_delta) { 1088168404Spjd ASSERT3U(old_state->arcs_size, >=, from_delta); 1089168404Spjd atomic_add_64(&old_state->arcs_size, -from_delta); 1090168404Spjd } 1091168404Spjd ab->b_state = new_state; 1092185029Spjd 1093185029Spjd /* adjust l2arc hdr stats */ 1094185029Spjd if (new_state == arc_l2c_only) 1095185029Spjd l2arc_hdr_stat_add(); 1096185029Spjd else if (old_state == arc_l2c_only) 1097185029Spjd l2arc_hdr_stat_remove(); 1098168404Spjd} 1099168404Spjd 1100185029Spjdvoid 1101185029Spjdarc_space_consume(uint64_t space) 1102185029Spjd{ 1103185029Spjd atomic_add_64(&arc_meta_used, space); 1104185029Spjd atomic_add_64(&arc_size, space); 1105185029Spjd} 1106185029Spjd 1107185029Spjdvoid 1108185029Spjdarc_space_return(uint64_t space) 1109185029Spjd{ 1110185029Spjd ASSERT(arc_meta_used >= space); 1111185029Spjd if (arc_meta_max < arc_meta_used) 1112185029Spjd arc_meta_max = arc_meta_used; 1113185029Spjd atomic_add_64(&arc_meta_used, -space); 1114185029Spjd ASSERT(arc_size >= space); 1115185029Spjd atomic_add_64(&arc_size, -space); 1116185029Spjd} 1117185029Spjd 1118185029Spjdvoid * 1119185029Spjdarc_data_buf_alloc(uint64_t size) 1120185029Spjd{ 1121185029Spjd if (arc_evict_needed(ARC_BUFC_DATA)) 1122185029Spjd cv_signal(&arc_reclaim_thr_cv); 1123185029Spjd atomic_add_64(&arc_size, size); 1124185029Spjd return (zio_data_buf_alloc(size)); 1125185029Spjd} 1126185029Spjd 1127185029Spjdvoid 1128185029Spjdarc_data_buf_free(void *buf, uint64_t size) 1129185029Spjd{ 1130185029Spjd zio_data_buf_free(buf, size); 1131185029Spjd ASSERT(arc_size >= size); 1132185029Spjd atomic_add_64(&arc_size, -size); 1133185029Spjd} 1134185029Spjd 1135168404Spjdarc_buf_t * 1136168404Spjdarc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) 1137168404Spjd{ 1138168404Spjd arc_buf_hdr_t *hdr; 1139168404Spjd arc_buf_t *buf; 1140168404Spjd 1141168404Spjd ASSERT3U(size, >, 0); 1142185029Spjd hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 1143168404Spjd ASSERT(BUF_EMPTY(hdr)); 1144168404Spjd hdr->b_size = size; 1145168404Spjd hdr->b_type = type; 1146168404Spjd hdr->b_spa = spa; 1147168404Spjd hdr->b_state = arc_anon; 1148168404Spjd hdr->b_arc_access = 0; 1149185029Spjd buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1150168404Spjd buf->b_hdr = hdr; 1151168404Spjd buf->b_data = NULL; 1152168404Spjd buf->b_efunc = NULL; 1153168404Spjd buf->b_private = NULL; 1154168404Spjd buf->b_next = NULL; 1155168404Spjd hdr->b_buf = buf; 1156168404Spjd arc_get_data_buf(buf); 1157168404Spjd hdr->b_datacnt = 1; 1158168404Spjd hdr->b_flags = 0; 1159168404Spjd ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1160168404Spjd (void) refcount_add(&hdr->b_refcnt, tag); 1161168404Spjd 1162168404Spjd return (buf); 1163168404Spjd} 1164168404Spjd 1165168404Spjdstatic arc_buf_t * 1166168404Spjdarc_buf_clone(arc_buf_t *from) 1167168404Spjd{ 1168168404Spjd arc_buf_t *buf; 1169168404Spjd arc_buf_hdr_t *hdr = from->b_hdr; 1170168404Spjd uint64_t size = hdr->b_size; 1171168404Spjd 1172185029Spjd buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1173168404Spjd buf->b_hdr = hdr; 1174168404Spjd buf->b_data = NULL; 1175168404Spjd buf->b_efunc = NULL; 1176168404Spjd buf->b_private = NULL; 1177168404Spjd buf->b_next = hdr->b_buf; 1178168404Spjd hdr->b_buf = buf; 1179168404Spjd arc_get_data_buf(buf); 1180168404Spjd bcopy(from->b_data, buf->b_data, size); 1181168404Spjd hdr->b_datacnt += 1; 1182168404Spjd return (buf); 1183168404Spjd} 1184168404Spjd 1185168404Spjdvoid 1186168404Spjdarc_buf_add_ref(arc_buf_t *buf, void* tag) 1187168404Spjd{ 1188168404Spjd arc_buf_hdr_t *hdr; 1189168404Spjd kmutex_t *hash_lock; 1190168404Spjd 1191168404Spjd /* 1192185029Spjd * Check to see if this buffer is evicted. Callers 1193185029Spjd * must verify b_data != NULL to know if the add_ref 1194185029Spjd * was successful. 1195168404Spjd */ 1196185029Spjd rw_enter(&buf->b_lock, RW_READER); 1197185029Spjd if (buf->b_data == NULL) { 1198185029Spjd rw_exit(&buf->b_lock); 1199168404Spjd return; 1200168404Spjd } 1201185029Spjd hdr = buf->b_hdr; 1202185029Spjd ASSERT(hdr != NULL); 1203168404Spjd hash_lock = HDR_LOCK(hdr); 1204168404Spjd mutex_enter(hash_lock); 1205185029Spjd rw_exit(&buf->b_lock); 1206168404Spjd 1207168404Spjd ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 1208168404Spjd add_reference(hdr, hash_lock, tag); 1209168404Spjd arc_access(hdr, hash_lock); 1210168404Spjd mutex_exit(hash_lock); 1211168404Spjd ARCSTAT_BUMP(arcstat_hits); 1212168404Spjd ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 1213168404Spjd demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 1214168404Spjd data, metadata, hits); 1215168404Spjd} 1216168404Spjd 1217185029Spjd/* 1218185029Spjd * Free the arc data buffer. If it is an l2arc write in progress, 1219185029Spjd * the buffer is placed on l2arc_free_on_write to be freed later. 1220185029Spjd */ 1221168404Spjdstatic void 1222185029Spjdarc_buf_data_free(arc_buf_hdr_t *hdr, void (*free_func)(void *, size_t), 1223185029Spjd void *data, size_t size) 1224185029Spjd{ 1225185029Spjd if (HDR_L2_WRITING(hdr)) { 1226185029Spjd l2arc_data_free_t *df; 1227185029Spjd df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP); 1228185029Spjd df->l2df_data = data; 1229185029Spjd df->l2df_size = size; 1230185029Spjd df->l2df_func = free_func; 1231185029Spjd mutex_enter(&l2arc_free_on_write_mtx); 1232185029Spjd list_insert_head(l2arc_free_on_write, df); 1233185029Spjd mutex_exit(&l2arc_free_on_write_mtx); 1234185029Spjd ARCSTAT_BUMP(arcstat_l2_free_on_write); 1235185029Spjd } else { 1236185029Spjd free_func(data, size); 1237185029Spjd } 1238185029Spjd} 1239185029Spjd 1240185029Spjdstatic void 1241168404Spjdarc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 1242168404Spjd{ 1243168404Spjd arc_buf_t **bufp; 1244168404Spjd 1245168404Spjd /* free up data associated with the buf */ 1246168404Spjd if (buf->b_data) { 1247168404Spjd arc_state_t *state = buf->b_hdr->b_state; 1248168404Spjd uint64_t size = buf->b_hdr->b_size; 1249168404Spjd arc_buf_contents_t type = buf->b_hdr->b_type; 1250168404Spjd 1251168404Spjd arc_cksum_verify(buf); 1252168404Spjd if (!recycle) { 1253168404Spjd if (type == ARC_BUFC_METADATA) { 1254185029Spjd arc_buf_data_free(buf->b_hdr, zio_buf_free, 1255185029Spjd buf->b_data, size); 1256185029Spjd arc_space_return(size); 1257168404Spjd } else { 1258168404Spjd ASSERT(type == ARC_BUFC_DATA); 1259185029Spjd arc_buf_data_free(buf->b_hdr, 1260185029Spjd zio_data_buf_free, buf->b_data, size); 1261185029Spjd atomic_add_64(&arc_size, -size); 1262168404Spjd } 1263168404Spjd } 1264168404Spjd if (list_link_active(&buf->b_hdr->b_arc_node)) { 1265185029Spjd uint64_t *cnt = &state->arcs_lsize[type]; 1266185029Spjd 1267168404Spjd ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 1268168404Spjd ASSERT(state != arc_anon); 1269185029Spjd 1270185029Spjd ASSERT3U(*cnt, >=, size); 1271185029Spjd atomic_add_64(cnt, -size); 1272168404Spjd } 1273168404Spjd ASSERT3U(state->arcs_size, >=, size); 1274168404Spjd atomic_add_64(&state->arcs_size, -size); 1275168404Spjd buf->b_data = NULL; 1276168404Spjd ASSERT(buf->b_hdr->b_datacnt > 0); 1277168404Spjd buf->b_hdr->b_datacnt -= 1; 1278168404Spjd } 1279168404Spjd 1280168404Spjd /* only remove the buf if requested */ 1281168404Spjd if (!all) 1282168404Spjd return; 1283168404Spjd 1284168404Spjd /* remove the buf from the hdr list */ 1285168404Spjd for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 1286168404Spjd continue; 1287168404Spjd *bufp = buf->b_next; 1288168404Spjd 1289168404Spjd ASSERT(buf->b_efunc == NULL); 1290168404Spjd 1291168404Spjd /* clean up the buf */ 1292168404Spjd buf->b_hdr = NULL; 1293168404Spjd kmem_cache_free(buf_cache, buf); 1294168404Spjd} 1295168404Spjd 1296168404Spjdstatic void 1297168404Spjdarc_hdr_destroy(arc_buf_hdr_t *hdr) 1298168404Spjd{ 1299168404Spjd ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1300168404Spjd ASSERT3P(hdr->b_state, ==, arc_anon); 1301168404Spjd ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1302185029Spjd ASSERT(!(hdr->b_flags & ARC_STORED)); 1303168404Spjd 1304185029Spjd if (hdr->b_l2hdr != NULL) { 1305185029Spjd if (!MUTEX_HELD(&l2arc_buflist_mtx)) { 1306185029Spjd /* 1307185029Spjd * To prevent arc_free() and l2arc_evict() from 1308185029Spjd * attempting to free the same buffer at the same time, 1309185029Spjd * a FREE_IN_PROGRESS flag is given to arc_free() to 1310185029Spjd * give it priority. l2arc_evict() can't destroy this 1311185029Spjd * header while we are waiting on l2arc_buflist_mtx. 1312185029Spjd * 1313185029Spjd * The hdr may be removed from l2ad_buflist before we 1314185029Spjd * grab l2arc_buflist_mtx, so b_l2hdr is rechecked. 1315185029Spjd */ 1316185029Spjd mutex_enter(&l2arc_buflist_mtx); 1317185029Spjd if (hdr->b_l2hdr != NULL) { 1318185029Spjd list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, 1319185029Spjd hdr); 1320185029Spjd } 1321185029Spjd mutex_exit(&l2arc_buflist_mtx); 1322185029Spjd } else { 1323185029Spjd list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, hdr); 1324185029Spjd } 1325185029Spjd ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); 1326185029Spjd kmem_free(hdr->b_l2hdr, sizeof (l2arc_buf_hdr_t)); 1327185029Spjd if (hdr->b_state == arc_l2c_only) 1328185029Spjd l2arc_hdr_stat_remove(); 1329185029Spjd hdr->b_l2hdr = NULL; 1330185029Spjd } 1331185029Spjd 1332168404Spjd if (!BUF_EMPTY(hdr)) { 1333168404Spjd ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1334168404Spjd bzero(&hdr->b_dva, sizeof (dva_t)); 1335168404Spjd hdr->b_birth = 0; 1336168404Spjd hdr->b_cksum0 = 0; 1337168404Spjd } 1338168404Spjd while (hdr->b_buf) { 1339168404Spjd arc_buf_t *buf = hdr->b_buf; 1340168404Spjd 1341168404Spjd if (buf->b_efunc) { 1342168404Spjd mutex_enter(&arc_eviction_mtx); 1343185029Spjd rw_enter(&buf->b_lock, RW_WRITER); 1344168404Spjd ASSERT(buf->b_hdr != NULL); 1345168404Spjd arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 1346168404Spjd hdr->b_buf = buf->b_next; 1347168404Spjd buf->b_hdr = &arc_eviction_hdr; 1348168404Spjd buf->b_next = arc_eviction_list; 1349168404Spjd arc_eviction_list = buf; 1350185029Spjd rw_exit(&buf->b_lock); 1351168404Spjd mutex_exit(&arc_eviction_mtx); 1352168404Spjd } else { 1353168404Spjd arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 1354168404Spjd } 1355168404Spjd } 1356168404Spjd if (hdr->b_freeze_cksum != NULL) { 1357168404Spjd kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1358168404Spjd hdr->b_freeze_cksum = NULL; 1359168404Spjd } 1360168404Spjd 1361168404Spjd ASSERT(!list_link_active(&hdr->b_arc_node)); 1362168404Spjd ASSERT3P(hdr->b_hash_next, ==, NULL); 1363168404Spjd ASSERT3P(hdr->b_acb, ==, NULL); 1364168404Spjd kmem_cache_free(hdr_cache, hdr); 1365168404Spjd} 1366168404Spjd 1367168404Spjdvoid 1368168404Spjdarc_buf_free(arc_buf_t *buf, void *tag) 1369168404Spjd{ 1370168404Spjd arc_buf_hdr_t *hdr = buf->b_hdr; 1371168404Spjd int hashed = hdr->b_state != arc_anon; 1372168404Spjd 1373168404Spjd ASSERT(buf->b_efunc == NULL); 1374168404Spjd ASSERT(buf->b_data != NULL); 1375168404Spjd 1376168404Spjd if (hashed) { 1377168404Spjd kmutex_t *hash_lock = HDR_LOCK(hdr); 1378168404Spjd 1379168404Spjd mutex_enter(hash_lock); 1380168404Spjd (void) remove_reference(hdr, hash_lock, tag); 1381168404Spjd if (hdr->b_datacnt > 1) 1382168404Spjd arc_buf_destroy(buf, FALSE, TRUE); 1383168404Spjd else 1384168404Spjd hdr->b_flags |= ARC_BUF_AVAILABLE; 1385168404Spjd mutex_exit(hash_lock); 1386168404Spjd } else if (HDR_IO_IN_PROGRESS(hdr)) { 1387168404Spjd int destroy_hdr; 1388168404Spjd /* 1389168404Spjd * We are in the middle of an async write. Don't destroy 1390168404Spjd * this buffer unless the write completes before we finish 1391168404Spjd * decrementing the reference count. 1392168404Spjd */ 1393168404Spjd mutex_enter(&arc_eviction_mtx); 1394168404Spjd (void) remove_reference(hdr, NULL, tag); 1395168404Spjd ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1396168404Spjd destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 1397168404Spjd mutex_exit(&arc_eviction_mtx); 1398168404Spjd if (destroy_hdr) 1399168404Spjd arc_hdr_destroy(hdr); 1400168404Spjd } else { 1401168404Spjd if (remove_reference(hdr, NULL, tag) > 0) { 1402168404Spjd ASSERT(HDR_IO_ERROR(hdr)); 1403168404Spjd arc_buf_destroy(buf, FALSE, TRUE); 1404168404Spjd } else { 1405168404Spjd arc_hdr_destroy(hdr); 1406168404Spjd } 1407168404Spjd } 1408168404Spjd} 1409168404Spjd 1410168404Spjdint 1411168404Spjdarc_buf_remove_ref(arc_buf_t *buf, void* tag) 1412168404Spjd{ 1413168404Spjd arc_buf_hdr_t *hdr = buf->b_hdr; 1414168404Spjd kmutex_t *hash_lock = HDR_LOCK(hdr); 1415168404Spjd int no_callback = (buf->b_efunc == NULL); 1416168404Spjd 1417168404Spjd if (hdr->b_state == arc_anon) { 1418168404Spjd arc_buf_free(buf, tag); 1419168404Spjd return (no_callback); 1420168404Spjd } 1421168404Spjd 1422168404Spjd mutex_enter(hash_lock); 1423168404Spjd ASSERT(hdr->b_state != arc_anon); 1424168404Spjd ASSERT(buf->b_data != NULL); 1425168404Spjd 1426168404Spjd (void) remove_reference(hdr, hash_lock, tag); 1427168404Spjd if (hdr->b_datacnt > 1) { 1428168404Spjd if (no_callback) 1429168404Spjd arc_buf_destroy(buf, FALSE, TRUE); 1430168404Spjd } else if (no_callback) { 1431168404Spjd ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 1432168404Spjd hdr->b_flags |= ARC_BUF_AVAILABLE; 1433168404Spjd } 1434168404Spjd ASSERT(no_callback || hdr->b_datacnt > 1 || 1435168404Spjd refcount_is_zero(&hdr->b_refcnt)); 1436168404Spjd mutex_exit(hash_lock); 1437168404Spjd return (no_callback); 1438168404Spjd} 1439168404Spjd 1440168404Spjdint 1441168404Spjdarc_buf_size(arc_buf_t *buf) 1442168404Spjd{ 1443168404Spjd return (buf->b_hdr->b_size); 1444168404Spjd} 1445168404Spjd 1446168404Spjd/* 1447168404Spjd * Evict buffers from list until we've removed the specified number of 1448168404Spjd * bytes. Move the removed buffers to the appropriate evict state. 1449168404Spjd * If the recycle flag is set, then attempt to "recycle" a buffer: 1450168404Spjd * - look for a buffer to evict that is `bytes' long. 1451168404Spjd * - return the data block from this buffer rather than freeing it. 1452168404Spjd * This flag is used by callers that are trying to make space for a 1453168404Spjd * new buffer in a full arc cache. 1454185029Spjd * 1455185029Spjd * This function makes a "best effort". It skips over any buffers 1456185029Spjd * it can't get a hash_lock on, and so may not catch all candidates. 1457185029Spjd * It may also return without evicting as much space as requested. 1458168404Spjd */ 1459168404Spjdstatic void * 1460185029Spjdarc_evict(arc_state_t *state, spa_t *spa, int64_t bytes, boolean_t recycle, 1461168404Spjd arc_buf_contents_t type) 1462168404Spjd{ 1463168404Spjd arc_state_t *evicted_state; 1464168404Spjd uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 1465168404Spjd arc_buf_hdr_t *ab, *ab_prev = NULL; 1466185029Spjd list_t *list = &state->arcs_list[type]; 1467168404Spjd kmutex_t *hash_lock; 1468168404Spjd boolean_t have_lock; 1469168404Spjd void *stolen = NULL; 1470168404Spjd 1471168404Spjd ASSERT(state == arc_mru || state == arc_mfu); 1472168404Spjd 1473168404Spjd evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 1474168404Spjd 1475168404Spjd mutex_enter(&state->arcs_mtx); 1476168404Spjd mutex_enter(&evicted_state->arcs_mtx); 1477168404Spjd 1478185029Spjd for (ab = list_tail(list); ab; ab = ab_prev) { 1479185029Spjd ab_prev = list_prev(list, ab); 1480168404Spjd /* prefetch buffers have a minimum lifespan */ 1481168404Spjd if (HDR_IO_IN_PROGRESS(ab) || 1482185029Spjd (spa && ab->b_spa != spa) || 1483168404Spjd (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 1484174049Sjb LBOLT - ab->b_arc_access < arc_min_prefetch_lifespan)) { 1485168404Spjd skipped++; 1486168404Spjd continue; 1487168404Spjd } 1488168404Spjd /* "lookahead" for better eviction candidate */ 1489168404Spjd if (recycle && ab->b_size != bytes && 1490168404Spjd ab_prev && ab_prev->b_size == bytes) 1491168404Spjd continue; 1492168404Spjd hash_lock = HDR_LOCK(ab); 1493168404Spjd have_lock = MUTEX_HELD(hash_lock); 1494168404Spjd if (have_lock || mutex_tryenter(hash_lock)) { 1495168404Spjd ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0); 1496168404Spjd ASSERT(ab->b_datacnt > 0); 1497168404Spjd while (ab->b_buf) { 1498168404Spjd arc_buf_t *buf = ab->b_buf; 1499185029Spjd if (!rw_tryenter(&buf->b_lock, RW_WRITER)) { 1500185029Spjd missed += 1; 1501185029Spjd break; 1502185029Spjd } 1503168404Spjd if (buf->b_data) { 1504168404Spjd bytes_evicted += ab->b_size; 1505168404Spjd if (recycle && ab->b_type == type && 1506185029Spjd ab->b_size == bytes && 1507185029Spjd !HDR_L2_WRITING(ab)) { 1508168404Spjd stolen = buf->b_data; 1509168404Spjd recycle = FALSE; 1510168404Spjd } 1511168404Spjd } 1512168404Spjd if (buf->b_efunc) { 1513168404Spjd mutex_enter(&arc_eviction_mtx); 1514168404Spjd arc_buf_destroy(buf, 1515168404Spjd buf->b_data == stolen, FALSE); 1516168404Spjd ab->b_buf = buf->b_next; 1517168404Spjd buf->b_hdr = &arc_eviction_hdr; 1518168404Spjd buf->b_next = arc_eviction_list; 1519168404Spjd arc_eviction_list = buf; 1520168404Spjd mutex_exit(&arc_eviction_mtx); 1521185029Spjd rw_exit(&buf->b_lock); 1522168404Spjd } else { 1523185029Spjd rw_exit(&buf->b_lock); 1524168404Spjd arc_buf_destroy(buf, 1525168404Spjd buf->b_data == stolen, TRUE); 1526168404Spjd } 1527168404Spjd } 1528185029Spjd if (ab->b_datacnt == 0) { 1529185029Spjd arc_change_state(evicted_state, ab, hash_lock); 1530185029Spjd ASSERT(HDR_IN_HASH_TABLE(ab)); 1531185029Spjd ab->b_flags |= ARC_IN_HASH_TABLE; 1532185029Spjd ab->b_flags &= ~ARC_BUF_AVAILABLE; 1533185029Spjd DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 1534185029Spjd } 1535168404Spjd if (!have_lock) 1536168404Spjd mutex_exit(hash_lock); 1537168404Spjd if (bytes >= 0 && bytes_evicted >= bytes) 1538168404Spjd break; 1539168404Spjd } else { 1540168404Spjd missed += 1; 1541168404Spjd } 1542168404Spjd } 1543168404Spjd 1544168404Spjd mutex_exit(&evicted_state->arcs_mtx); 1545168404Spjd mutex_exit(&state->arcs_mtx); 1546168404Spjd 1547168404Spjd if (bytes_evicted < bytes) 1548168404Spjd dprintf("only evicted %lld bytes from %x", 1549168404Spjd (longlong_t)bytes_evicted, state); 1550168404Spjd 1551168404Spjd if (skipped) 1552168404Spjd ARCSTAT_INCR(arcstat_evict_skip, skipped); 1553168404Spjd 1554168404Spjd if (missed) 1555168404Spjd ARCSTAT_INCR(arcstat_mutex_miss, missed); 1556168404Spjd 1557185029Spjd /* 1558185029Spjd * We have just evicted some date into the ghost state, make 1559185029Spjd * sure we also adjust the ghost state size if necessary. 1560185029Spjd */ 1561185029Spjd if (arc_no_grow && 1562185029Spjd arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) { 1563185029Spjd int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size + 1564185029Spjd arc_mru_ghost->arcs_size - arc_c; 1565185029Spjd 1566185029Spjd if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) { 1567185029Spjd int64_t todelete = 1568185029Spjd MIN(arc_mru_ghost->arcs_lsize[type], mru_over); 1569185029Spjd arc_evict_ghost(arc_mru_ghost, NULL, todelete); 1570185029Spjd } else if (arc_mfu_ghost->arcs_lsize[type] > 0) { 1571185029Spjd int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type], 1572185029Spjd arc_mru_ghost->arcs_size + 1573185029Spjd arc_mfu_ghost->arcs_size - arc_c); 1574185029Spjd arc_evict_ghost(arc_mfu_ghost, NULL, todelete); 1575185029Spjd } 1576185029Spjd } 1577185029Spjd 1578168404Spjd return (stolen); 1579168404Spjd} 1580168404Spjd 1581168404Spjd/* 1582168404Spjd * Remove buffers from list until we've removed the specified number of 1583168404Spjd * bytes. Destroy the buffers that are removed. 1584168404Spjd */ 1585168404Spjdstatic void 1586185029Spjdarc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes) 1587168404Spjd{ 1588168404Spjd arc_buf_hdr_t *ab, *ab_prev; 1589185029Spjd list_t *list = &state->arcs_list[ARC_BUFC_DATA]; 1590168404Spjd kmutex_t *hash_lock; 1591168404Spjd uint64_t bytes_deleted = 0; 1592168404Spjd uint64_t bufs_skipped = 0; 1593168404Spjd 1594168404Spjd ASSERT(GHOST_STATE(state)); 1595168404Spjdtop: 1596168404Spjd mutex_enter(&state->arcs_mtx); 1597185029Spjd for (ab = list_tail(list); ab; ab = ab_prev) { 1598185029Spjd ab_prev = list_prev(list, ab); 1599185029Spjd if (spa && ab->b_spa != spa) 1600185029Spjd continue; 1601168404Spjd hash_lock = HDR_LOCK(ab); 1602168404Spjd if (mutex_tryenter(hash_lock)) { 1603168404Spjd ASSERT(!HDR_IO_IN_PROGRESS(ab)); 1604168404Spjd ASSERT(ab->b_buf == NULL); 1605168404Spjd ARCSTAT_BUMP(arcstat_deleted); 1606168404Spjd bytes_deleted += ab->b_size; 1607185029Spjd 1608185029Spjd if (ab->b_l2hdr != NULL) { 1609185029Spjd /* 1610185029Spjd * This buffer is cached on the 2nd Level ARC; 1611185029Spjd * don't destroy the header. 1612185029Spjd */ 1613185029Spjd arc_change_state(arc_l2c_only, ab, hash_lock); 1614185029Spjd mutex_exit(hash_lock); 1615185029Spjd } else { 1616185029Spjd arc_change_state(arc_anon, ab, hash_lock); 1617185029Spjd mutex_exit(hash_lock); 1618185029Spjd arc_hdr_destroy(ab); 1619185029Spjd } 1620185029Spjd 1621168404Spjd DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 1622168404Spjd if (bytes >= 0 && bytes_deleted >= bytes) 1623168404Spjd break; 1624168404Spjd } else { 1625168404Spjd if (bytes < 0) { 1626168404Spjd mutex_exit(&state->arcs_mtx); 1627168404Spjd mutex_enter(hash_lock); 1628168404Spjd mutex_exit(hash_lock); 1629168404Spjd goto top; 1630168404Spjd } 1631168404Spjd bufs_skipped += 1; 1632168404Spjd } 1633168404Spjd } 1634168404Spjd mutex_exit(&state->arcs_mtx); 1635168404Spjd 1636185029Spjd if (list == &state->arcs_list[ARC_BUFC_DATA] && 1637185029Spjd (bytes < 0 || bytes_deleted < bytes)) { 1638185029Spjd list = &state->arcs_list[ARC_BUFC_METADATA]; 1639185029Spjd goto top; 1640185029Spjd } 1641185029Spjd 1642168404Spjd if (bufs_skipped) { 1643168404Spjd ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); 1644168404Spjd ASSERT(bytes >= 0); 1645168404Spjd } 1646168404Spjd 1647168404Spjd if (bytes_deleted < bytes) 1648168404Spjd dprintf("only deleted %lld bytes from %p", 1649168404Spjd (longlong_t)bytes_deleted, state); 1650168404Spjd} 1651168404Spjd 1652168404Spjdstatic void 1653168404Spjdarc_adjust(void) 1654168404Spjd{ 1655168404Spjd int64_t top_sz, mru_over, arc_over, todelete; 1656168404Spjd 1657185029Spjd top_sz = arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used; 1658168404Spjd 1659185029Spjd if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) { 1660185029Spjd int64_t toevict = 1661185029Spjd MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], top_sz - arc_p); 1662185029Spjd (void) arc_evict(arc_mru, NULL, toevict, FALSE, ARC_BUFC_DATA); 1663168404Spjd top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1664168404Spjd } 1665168404Spjd 1666185029Spjd if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) { 1667185029Spjd int64_t toevict = 1668185029Spjd MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], top_sz - arc_p); 1669185029Spjd (void) arc_evict(arc_mru, NULL, toevict, FALSE, 1670185029Spjd ARC_BUFC_METADATA); 1671185029Spjd top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1672185029Spjd } 1673185029Spjd 1674168404Spjd mru_over = top_sz + arc_mru_ghost->arcs_size - arc_c; 1675168404Spjd 1676168404Spjd if (mru_over > 0) { 1677185029Spjd if (arc_mru_ghost->arcs_size > 0) { 1678185029Spjd todelete = MIN(arc_mru_ghost->arcs_size, mru_over); 1679185029Spjd arc_evict_ghost(arc_mru_ghost, NULL, todelete); 1680168404Spjd } 1681168404Spjd } 1682168404Spjd 1683168404Spjd if ((arc_over = arc_size - arc_c) > 0) { 1684168404Spjd int64_t tbl_over; 1685168404Spjd 1686185029Spjd if (arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) { 1687185029Spjd int64_t toevict = 1688185029Spjd MIN(arc_mfu->arcs_lsize[ARC_BUFC_DATA], arc_over); 1689185029Spjd (void) arc_evict(arc_mfu, NULL, toevict, FALSE, 1690185029Spjd ARC_BUFC_DATA); 1691185029Spjd arc_over = arc_size - arc_c; 1692168404Spjd } 1693168404Spjd 1694185029Spjd if (arc_over > 0 && 1695185029Spjd arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) { 1696185029Spjd int64_t toevict = 1697185029Spjd MIN(arc_mfu->arcs_lsize[ARC_BUFC_METADATA], 1698185029Spjd arc_over); 1699185029Spjd (void) arc_evict(arc_mfu, NULL, toevict, FALSE, 1700185029Spjd ARC_BUFC_METADATA); 1701185029Spjd } 1702168404Spjd 1703185029Spjd tbl_over = arc_size + arc_mru_ghost->arcs_size + 1704185029Spjd arc_mfu_ghost->arcs_size - arc_c * 2; 1705185029Spjd 1706185029Spjd if (tbl_over > 0 && arc_mfu_ghost->arcs_size > 0) { 1707185029Spjd todelete = MIN(arc_mfu_ghost->arcs_size, tbl_over); 1708185029Spjd arc_evict_ghost(arc_mfu_ghost, NULL, todelete); 1709168404Spjd } 1710168404Spjd } 1711168404Spjd} 1712168404Spjd 1713168404Spjdstatic void 1714168404Spjdarc_do_user_evicts(void) 1715168404Spjd{ 1716191903Skmacy static arc_buf_t *tmp_arc_eviction_list; 1717191903Skmacy 1718191903Skmacy /* 1719191903Skmacy * Move list over to avoid LOR 1720191903Skmacy */ 1721191903Skmacyrestart: 1722168404Spjd mutex_enter(&arc_eviction_mtx); 1723191903Skmacy tmp_arc_eviction_list = arc_eviction_list; 1724191903Skmacy arc_eviction_list = NULL; 1725191903Skmacy mutex_exit(&arc_eviction_mtx); 1726191903Skmacy 1727191903Skmacy while (tmp_arc_eviction_list != NULL) { 1728191903Skmacy arc_buf_t *buf = tmp_arc_eviction_list; 1729191903Skmacy tmp_arc_eviction_list = buf->b_next; 1730185029Spjd rw_enter(&buf->b_lock, RW_WRITER); 1731168404Spjd buf->b_hdr = NULL; 1732185029Spjd rw_exit(&buf->b_lock); 1733168404Spjd 1734168404Spjd if (buf->b_efunc != NULL) 1735168404Spjd VERIFY(buf->b_efunc(buf) == 0); 1736168404Spjd 1737168404Spjd buf->b_efunc = NULL; 1738168404Spjd buf->b_private = NULL; 1739168404Spjd kmem_cache_free(buf_cache, buf); 1740168404Spjd } 1741191903Skmacy 1742191903Skmacy if (arc_eviction_list != NULL) 1743191903Skmacy goto restart; 1744168404Spjd} 1745168404Spjd 1746168404Spjd/* 1747185029Spjd * Flush all *evictable* data from the cache for the given spa. 1748168404Spjd * NOTE: this will not touch "active" (i.e. referenced) data. 1749168404Spjd */ 1750168404Spjdvoid 1751185029Spjdarc_flush(spa_t *spa) 1752168404Spjd{ 1753185029Spjd while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) { 1754185029Spjd (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_DATA); 1755185029Spjd if (spa) 1756185029Spjd break; 1757185029Spjd } 1758185029Spjd while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) { 1759185029Spjd (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_METADATA); 1760185029Spjd if (spa) 1761185029Spjd break; 1762185029Spjd } 1763185029Spjd while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) { 1764185029Spjd (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_DATA); 1765185029Spjd if (spa) 1766185029Spjd break; 1767185029Spjd } 1768185029Spjd while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) { 1769185029Spjd (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_METADATA); 1770185029Spjd if (spa) 1771185029Spjd break; 1772185029Spjd } 1773168404Spjd 1774185029Spjd arc_evict_ghost(arc_mru_ghost, spa, -1); 1775185029Spjd arc_evict_ghost(arc_mfu_ghost, spa, -1); 1776168404Spjd 1777168404Spjd mutex_enter(&arc_reclaim_thr_lock); 1778168404Spjd arc_do_user_evicts(); 1779168404Spjd mutex_exit(&arc_reclaim_thr_lock); 1780185029Spjd ASSERT(spa || arc_eviction_list == NULL); 1781168404Spjd} 1782168404Spjd 1783168404Spjdint arc_shrink_shift = 5; /* log2(fraction of arc to reclaim) */ 1784168404Spjd 1785168404Spjdvoid 1786168404Spjdarc_shrink(void) 1787168404Spjd{ 1788168404Spjd if (arc_c > arc_c_min) { 1789168404Spjd uint64_t to_free; 1790168404Spjd 1791168404Spjd#ifdef _KERNEL 1792168404Spjd to_free = arc_c >> arc_shrink_shift; 1793168404Spjd#else 1794168404Spjd to_free = arc_c >> arc_shrink_shift; 1795168404Spjd#endif 1796168404Spjd if (arc_c > arc_c_min + to_free) 1797168404Spjd atomic_add_64(&arc_c, -to_free); 1798168404Spjd else 1799168404Spjd arc_c = arc_c_min; 1800168404Spjd 1801168404Spjd atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 1802168404Spjd if (arc_c > arc_size) 1803168404Spjd arc_c = MAX(arc_size, arc_c_min); 1804168404Spjd if (arc_p > arc_c) 1805168404Spjd arc_p = (arc_c >> 1); 1806168404Spjd ASSERT(arc_c >= arc_c_min); 1807168404Spjd ASSERT((int64_t)arc_p >= 0); 1808168404Spjd } 1809168404Spjd 1810168404Spjd if (arc_size > arc_c) 1811168404Spjd arc_adjust(); 1812168404Spjd} 1813168404Spjd 1814185029Spjdstatic int needfree = 0; 1815168404Spjd 1816168404Spjdstatic int 1817168404Spjdarc_reclaim_needed(void) 1818168404Spjd{ 1819168404Spjd#if 0 1820168404Spjd uint64_t extra; 1821168404Spjd#endif 1822168404Spjd 1823168404Spjd#ifdef _KERNEL 1824168404Spjd 1825191902Skmacy /* 1826191902Skmacy * If pages are needed or we're within 2048 pages 1827191902Skmacy * of needing to page need to reclaim 1828191902Skmacy */ 1829191902Skmacy if (vm_pages_needed || (vm_paging_target() > -2048)) 1830191902Skmacy return (1); 1831191902Skmacy 1832185029Spjd if (needfree) 1833168404Spjd return (1); 1834168404Spjd 1835168404Spjd#if 0 1836168404Spjd /* 1837185029Spjd * take 'desfree' extra pages, so we reclaim sooner, rather than later 1838185029Spjd */ 1839185029Spjd extra = desfree; 1840185029Spjd 1841185029Spjd /* 1842185029Spjd * check that we're out of range of the pageout scanner. It starts to 1843185029Spjd * schedule paging if freemem is less than lotsfree and needfree. 1844185029Spjd * lotsfree is the high-water mark for pageout, and needfree is the 1845185029Spjd * number of needed free pages. We add extra pages here to make sure 1846185029Spjd * the scanner doesn't start up while we're freeing memory. 1847185029Spjd */ 1848185029Spjd if (freemem < lotsfree + needfree + extra) 1849185029Spjd return (1); 1850185029Spjd 1851185029Spjd /* 1852168404Spjd * check to make sure that swapfs has enough space so that anon 1853185029Spjd * reservations can still succeed. anon_resvmem() checks that the 1854168404Spjd * availrmem is greater than swapfs_minfree, and the number of reserved 1855168404Spjd * swap pages. We also add a bit of extra here just to prevent 1856168404Spjd * circumstances from getting really dire. 1857168404Spjd */ 1858168404Spjd if (availrmem < swapfs_minfree + swapfs_reserve + extra) 1859168404Spjd return (1); 1860168404Spjd 1861168404Spjd#if defined(__i386) 1862168404Spjd /* 1863168404Spjd * If we're on an i386 platform, it's possible that we'll exhaust the 1864168404Spjd * kernel heap space before we ever run out of available physical 1865168404Spjd * memory. Most checks of the size of the heap_area compare against 1866168404Spjd * tune.t_minarmem, which is the minimum available real memory that we 1867168404Spjd * can have in the system. However, this is generally fixed at 25 pages 1868168404Spjd * which is so low that it's useless. In this comparison, we seek to 1869168404Spjd * calculate the total heap-size, and reclaim if more than 3/4ths of the 1870185029Spjd * heap is allocated. (Or, in the calculation, if less than 1/4th is 1871168404Spjd * free) 1872168404Spjd */ 1873168404Spjd if (btop(vmem_size(heap_arena, VMEM_FREE)) < 1874168404Spjd (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) 1875168404Spjd return (1); 1876168404Spjd#endif 1877168404Spjd#else 1878175633Spjd if (kmem_used() > (kmem_size() * 3) / 4) 1879168404Spjd return (1); 1880168404Spjd#endif 1881168404Spjd 1882168404Spjd#else 1883168404Spjd if (spa_get_random(100) == 0) 1884168404Spjd return (1); 1885168404Spjd#endif 1886168404Spjd return (0); 1887168404Spjd} 1888168404Spjd 1889168404Spjdstatic void 1890168404Spjdarc_kmem_reap_now(arc_reclaim_strategy_t strat) 1891168404Spjd{ 1892168404Spjd#ifdef ZIO_USE_UMA 1893168404Spjd size_t i; 1894168404Spjd kmem_cache_t *prev_cache = NULL; 1895168404Spjd kmem_cache_t *prev_data_cache = NULL; 1896168404Spjd extern kmem_cache_t *zio_buf_cache[]; 1897168404Spjd extern kmem_cache_t *zio_data_buf_cache[]; 1898168404Spjd#endif 1899168404Spjd 1900168404Spjd#ifdef _KERNEL 1901185029Spjd if (arc_meta_used >= arc_meta_limit) { 1902185029Spjd /* 1903185029Spjd * We are exceeding our meta-data cache limit. 1904185029Spjd * Purge some DNLC entries to release holds on meta-data. 1905185029Spjd */ 1906185029Spjd dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 1907185029Spjd } 1908168404Spjd#if defined(__i386) 1909168404Spjd /* 1910168404Spjd * Reclaim unused memory from all kmem caches. 1911168404Spjd */ 1912168404Spjd kmem_reap(); 1913168404Spjd#endif 1914168404Spjd#endif 1915168404Spjd 1916168404Spjd /* 1917185029Spjd * An aggressive reclamation will shrink the cache size as well as 1918168404Spjd * reap free buffers from the arc kmem caches. 1919168404Spjd */ 1920168404Spjd if (strat == ARC_RECLAIM_AGGR) 1921168404Spjd arc_shrink(); 1922168404Spjd 1923168404Spjd#ifdef ZIO_USE_UMA 1924168404Spjd for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 1925168404Spjd if (zio_buf_cache[i] != prev_cache) { 1926168404Spjd prev_cache = zio_buf_cache[i]; 1927168404Spjd kmem_cache_reap_now(zio_buf_cache[i]); 1928168404Spjd } 1929168404Spjd if (zio_data_buf_cache[i] != prev_data_cache) { 1930168404Spjd prev_data_cache = zio_data_buf_cache[i]; 1931168404Spjd kmem_cache_reap_now(zio_data_buf_cache[i]); 1932168404Spjd } 1933168404Spjd } 1934168404Spjd#endif 1935168404Spjd kmem_cache_reap_now(buf_cache); 1936168404Spjd kmem_cache_reap_now(hdr_cache); 1937168404Spjd} 1938168404Spjd 1939168404Spjdstatic void 1940168404Spjdarc_reclaim_thread(void *dummy __unused) 1941168404Spjd{ 1942168404Spjd clock_t growtime = 0; 1943168404Spjd arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 1944168404Spjd callb_cpr_t cpr; 1945168404Spjd 1946168404Spjd CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 1947168404Spjd 1948168404Spjd mutex_enter(&arc_reclaim_thr_lock); 1949168404Spjd while (arc_thread_exit == 0) { 1950168404Spjd if (arc_reclaim_needed()) { 1951168404Spjd 1952168404Spjd if (arc_no_grow) { 1953168404Spjd if (last_reclaim == ARC_RECLAIM_CONS) { 1954168404Spjd last_reclaim = ARC_RECLAIM_AGGR; 1955168404Spjd } else { 1956168404Spjd last_reclaim = ARC_RECLAIM_CONS; 1957168404Spjd } 1958168404Spjd } else { 1959168404Spjd arc_no_grow = TRUE; 1960168404Spjd last_reclaim = ARC_RECLAIM_AGGR; 1961168404Spjd membar_producer(); 1962168404Spjd } 1963168404Spjd 1964168404Spjd /* reset the growth delay for every reclaim */ 1965174049Sjb growtime = LBOLT + (arc_grow_retry * hz); 1966168404Spjd 1967185029Spjd if (needfree && last_reclaim == ARC_RECLAIM_CONS) { 1968168404Spjd /* 1969185029Spjd * If needfree is TRUE our vm_lowmem hook 1970168404Spjd * was called and in that case we must free some 1971168404Spjd * memory, so switch to aggressive mode. 1972168404Spjd */ 1973168404Spjd arc_no_grow = TRUE; 1974168404Spjd last_reclaim = ARC_RECLAIM_AGGR; 1975168404Spjd } 1976168404Spjd arc_kmem_reap_now(last_reclaim); 1977185029Spjd arc_warm = B_TRUE; 1978185029Spjd 1979185029Spjd } else if (arc_no_grow && LBOLT >= growtime) { 1980168404Spjd arc_no_grow = FALSE; 1981168404Spjd } 1982168404Spjd 1983185029Spjd if (needfree || 1984168404Spjd (2 * arc_c < arc_size + 1985168404Spjd arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size)) 1986168404Spjd arc_adjust(); 1987168404Spjd 1988168404Spjd if (arc_eviction_list != NULL) 1989168404Spjd arc_do_user_evicts(); 1990168404Spjd 1991168404Spjd if (arc_reclaim_needed()) { 1992185029Spjd needfree = 0; 1993168404Spjd#ifdef _KERNEL 1994185029Spjd wakeup(&needfree); 1995168404Spjd#endif 1996168404Spjd } 1997168404Spjd 1998168404Spjd /* block until needed, or one second, whichever is shorter */ 1999168404Spjd CALLB_CPR_SAFE_BEGIN(&cpr); 2000168404Spjd (void) cv_timedwait(&arc_reclaim_thr_cv, 2001168404Spjd &arc_reclaim_thr_lock, hz); 2002168404Spjd CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 2003168404Spjd } 2004168404Spjd 2005168404Spjd arc_thread_exit = 0; 2006168404Spjd cv_broadcast(&arc_reclaim_thr_cv); 2007168404Spjd CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 2008168404Spjd thread_exit(); 2009168404Spjd} 2010168404Spjd 2011168404Spjd/* 2012168404Spjd * Adapt arc info given the number of bytes we are trying to add and 2013168404Spjd * the state that we are comming from. This function is only called 2014168404Spjd * when we are adding new content to the cache. 2015168404Spjd */ 2016168404Spjdstatic void 2017168404Spjdarc_adapt(int bytes, arc_state_t *state) 2018168404Spjd{ 2019168404Spjd int mult; 2020168404Spjd 2021185029Spjd if (state == arc_l2c_only) 2022185029Spjd return; 2023185029Spjd 2024168404Spjd ASSERT(bytes > 0); 2025168404Spjd /* 2026168404Spjd * Adapt the target size of the MRU list: 2027168404Spjd * - if we just hit in the MRU ghost list, then increase 2028168404Spjd * the target size of the MRU list. 2029168404Spjd * - if we just hit in the MFU ghost list, then increase 2030168404Spjd * the target size of the MFU list by decreasing the 2031168404Spjd * target size of the MRU list. 2032168404Spjd */ 2033168404Spjd if (state == arc_mru_ghost) { 2034168404Spjd mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? 2035168404Spjd 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); 2036168404Spjd 2037168404Spjd arc_p = MIN(arc_c, arc_p + bytes * mult); 2038168404Spjd } else if (state == arc_mfu_ghost) { 2039168404Spjd mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? 2040168404Spjd 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); 2041168404Spjd 2042168404Spjd arc_p = MAX(0, (int64_t)arc_p - bytes * mult); 2043168404Spjd } 2044168404Spjd ASSERT((int64_t)arc_p >= 0); 2045168404Spjd 2046168404Spjd if (arc_reclaim_needed()) { 2047168404Spjd cv_signal(&arc_reclaim_thr_cv); 2048168404Spjd return; 2049168404Spjd } 2050168404Spjd 2051168404Spjd if (arc_no_grow) 2052168404Spjd return; 2053168404Spjd 2054168404Spjd if (arc_c >= arc_c_max) 2055168404Spjd return; 2056168404Spjd 2057168404Spjd /* 2058168404Spjd * If we're within (2 * maxblocksize) bytes of the target 2059168404Spjd * cache size, increment the target cache size 2060168404Spjd */ 2061168404Spjd if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 2062168404Spjd atomic_add_64(&arc_c, (int64_t)bytes); 2063168404Spjd if (arc_c > arc_c_max) 2064168404Spjd arc_c = arc_c_max; 2065168404Spjd else if (state == arc_anon) 2066168404Spjd atomic_add_64(&arc_p, (int64_t)bytes); 2067168404Spjd if (arc_p > arc_c) 2068168404Spjd arc_p = arc_c; 2069168404Spjd } 2070168404Spjd ASSERT((int64_t)arc_p >= 0); 2071168404Spjd} 2072168404Spjd 2073168404Spjd/* 2074168404Spjd * Check if the cache has reached its limits and eviction is required 2075168404Spjd * prior to insert. 2076168404Spjd */ 2077168404Spjdstatic int 2078185029Spjdarc_evict_needed(arc_buf_contents_t type) 2079168404Spjd{ 2080185029Spjd if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit) 2081185029Spjd return (1); 2082185029Spjd 2083185029Spjd#if 0 2084185029Spjd#ifdef _KERNEL 2085185029Spjd /* 2086185029Spjd * If zio data pages are being allocated out of a separate heap segment, 2087185029Spjd * then enforce that the size of available vmem for this area remains 2088185029Spjd * above about 1/32nd free. 2089185029Spjd */ 2090185029Spjd if (type == ARC_BUFC_DATA && zio_arena != NULL && 2091185029Spjd vmem_size(zio_arena, VMEM_FREE) < 2092185029Spjd (vmem_size(zio_arena, VMEM_ALLOC) >> 5)) 2093185029Spjd return (1); 2094185029Spjd#endif 2095185029Spjd#endif 2096185029Spjd 2097168404Spjd if (arc_reclaim_needed()) 2098168404Spjd return (1); 2099168404Spjd 2100168404Spjd return (arc_size > arc_c); 2101168404Spjd} 2102168404Spjd 2103168404Spjd/* 2104168404Spjd * The buffer, supplied as the first argument, needs a data block. 2105168404Spjd * So, if we are at cache max, determine which cache should be victimized. 2106168404Spjd * We have the following cases: 2107168404Spjd * 2108168404Spjd * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> 2109168404Spjd * In this situation if we're out of space, but the resident size of the MFU is 2110168404Spjd * under the limit, victimize the MFU cache to satisfy this insertion request. 2111168404Spjd * 2112168404Spjd * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> 2113168404Spjd * Here, we've used up all of the available space for the MRU, so we need to 2114168404Spjd * evict from our own cache instead. Evict from the set of resident MRU 2115168404Spjd * entries. 2116168404Spjd * 2117168404Spjd * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> 2118168404Spjd * c minus p represents the MFU space in the cache, since p is the size of the 2119168404Spjd * cache that is dedicated to the MRU. In this situation there's still space on 2120168404Spjd * the MFU side, so the MRU side needs to be victimized. 2121168404Spjd * 2122168404Spjd * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> 2123168404Spjd * MFU's resident set is consuming more space than it has been allotted. In 2124168404Spjd * this situation, we must victimize our own cache, the MFU, for this insertion. 2125168404Spjd */ 2126168404Spjdstatic void 2127168404Spjdarc_get_data_buf(arc_buf_t *buf) 2128168404Spjd{ 2129168404Spjd arc_state_t *state = buf->b_hdr->b_state; 2130168404Spjd uint64_t size = buf->b_hdr->b_size; 2131168404Spjd arc_buf_contents_t type = buf->b_hdr->b_type; 2132168404Spjd 2133168404Spjd arc_adapt(size, state); 2134168404Spjd 2135168404Spjd /* 2136168404Spjd * We have not yet reached cache maximum size, 2137168404Spjd * just allocate a new buffer. 2138168404Spjd */ 2139185029Spjd if (!arc_evict_needed(type)) { 2140168404Spjd if (type == ARC_BUFC_METADATA) { 2141168404Spjd buf->b_data = zio_buf_alloc(size); 2142185029Spjd arc_space_consume(size); 2143168404Spjd } else { 2144168404Spjd ASSERT(type == ARC_BUFC_DATA); 2145168404Spjd buf->b_data = zio_data_buf_alloc(size); 2146185029Spjd atomic_add_64(&arc_size, size); 2147168404Spjd } 2148168404Spjd goto out; 2149168404Spjd } 2150168404Spjd 2151168404Spjd /* 2152168404Spjd * If we are prefetching from the mfu ghost list, this buffer 2153168404Spjd * will end up on the mru list; so steal space from there. 2154168404Spjd */ 2155168404Spjd if (state == arc_mfu_ghost) 2156168404Spjd state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; 2157168404Spjd else if (state == arc_mru_ghost) 2158168404Spjd state = arc_mru; 2159168404Spjd 2160168404Spjd if (state == arc_mru || state == arc_anon) { 2161168404Spjd uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; 2162185029Spjd state = (arc_mfu->arcs_lsize[type] > 0 && 2163185029Spjd arc_p > mru_used) ? arc_mfu : arc_mru; 2164168404Spjd } else { 2165168404Spjd /* MFU cases */ 2166168404Spjd uint64_t mfu_space = arc_c - arc_p; 2167185029Spjd state = (arc_mru->arcs_lsize[type] > 0 && 2168185029Spjd mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; 2169168404Spjd } 2170185029Spjd if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) { 2171168404Spjd if (type == ARC_BUFC_METADATA) { 2172168404Spjd buf->b_data = zio_buf_alloc(size); 2173185029Spjd arc_space_consume(size); 2174168404Spjd } else { 2175168404Spjd ASSERT(type == ARC_BUFC_DATA); 2176168404Spjd buf->b_data = zio_data_buf_alloc(size); 2177185029Spjd atomic_add_64(&arc_size, size); 2178168404Spjd } 2179168404Spjd ARCSTAT_BUMP(arcstat_recycle_miss); 2180168404Spjd } 2181168404Spjd ASSERT(buf->b_data != NULL); 2182168404Spjdout: 2183168404Spjd /* 2184168404Spjd * Update the state size. Note that ghost states have a 2185168404Spjd * "ghost size" and so don't need to be updated. 2186168404Spjd */ 2187168404Spjd if (!GHOST_STATE(buf->b_hdr->b_state)) { 2188168404Spjd arc_buf_hdr_t *hdr = buf->b_hdr; 2189168404Spjd 2190168404Spjd atomic_add_64(&hdr->b_state->arcs_size, size); 2191168404Spjd if (list_link_active(&hdr->b_arc_node)) { 2192168404Spjd ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2193185029Spjd atomic_add_64(&hdr->b_state->arcs_lsize[type], size); 2194168404Spjd } 2195168404Spjd /* 2196168404Spjd * If we are growing the cache, and we are adding anonymous 2197168404Spjd * data, and we have outgrown arc_p, update arc_p 2198168404Spjd */ 2199168404Spjd if (arc_size < arc_c && hdr->b_state == arc_anon && 2200168404Spjd arc_anon->arcs_size + arc_mru->arcs_size > arc_p) 2201168404Spjd arc_p = MIN(arc_c, arc_p + size); 2202168404Spjd } 2203168404Spjd} 2204168404Spjd 2205168404Spjd/* 2206168404Spjd * This routine is called whenever a buffer is accessed. 2207168404Spjd * NOTE: the hash lock is dropped in this function. 2208168404Spjd */ 2209168404Spjdstatic void 2210168404Spjdarc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 2211168404Spjd{ 2212168404Spjd ASSERT(MUTEX_HELD(hash_lock)); 2213168404Spjd 2214168404Spjd if (buf->b_state == arc_anon) { 2215168404Spjd /* 2216168404Spjd * This buffer is not in the cache, and does not 2217168404Spjd * appear in our "ghost" list. Add the new buffer 2218168404Spjd * to the MRU state. 2219168404Spjd */ 2220168404Spjd 2221168404Spjd ASSERT(buf->b_arc_access == 0); 2222174049Sjb buf->b_arc_access = LBOLT; 2223168404Spjd DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2224168404Spjd arc_change_state(arc_mru, buf, hash_lock); 2225168404Spjd 2226168404Spjd } else if (buf->b_state == arc_mru) { 2227168404Spjd /* 2228168404Spjd * If this buffer is here because of a prefetch, then either: 2229168404Spjd * - clear the flag if this is a "referencing" read 2230168404Spjd * (any subsequent access will bump this into the MFU state). 2231168404Spjd * or 2232168404Spjd * - move the buffer to the head of the list if this is 2233168404Spjd * another prefetch (to make it less likely to be evicted). 2234168404Spjd */ 2235168404Spjd if ((buf->b_flags & ARC_PREFETCH) != 0) { 2236168404Spjd if (refcount_count(&buf->b_refcnt) == 0) { 2237168404Spjd ASSERT(list_link_active(&buf->b_arc_node)); 2238168404Spjd } else { 2239168404Spjd buf->b_flags &= ~ARC_PREFETCH; 2240168404Spjd ARCSTAT_BUMP(arcstat_mru_hits); 2241168404Spjd } 2242174049Sjb buf->b_arc_access = LBOLT; 2243168404Spjd return; 2244168404Spjd } 2245168404Spjd 2246168404Spjd /* 2247168404Spjd * This buffer has been "accessed" only once so far, 2248168404Spjd * but it is still in the cache. Move it to the MFU 2249168404Spjd * state. 2250168404Spjd */ 2251174049Sjb if (LBOLT > buf->b_arc_access + ARC_MINTIME) { 2252168404Spjd /* 2253168404Spjd * More than 125ms have passed since we 2254168404Spjd * instantiated this buffer. Move it to the 2255168404Spjd * most frequently used state. 2256168404Spjd */ 2257174049Sjb buf->b_arc_access = LBOLT; 2258168404Spjd DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2259168404Spjd arc_change_state(arc_mfu, buf, hash_lock); 2260168404Spjd } 2261168404Spjd ARCSTAT_BUMP(arcstat_mru_hits); 2262168404Spjd } else if (buf->b_state == arc_mru_ghost) { 2263168404Spjd arc_state_t *new_state; 2264168404Spjd /* 2265168404Spjd * This buffer has been "accessed" recently, but 2266168404Spjd * was evicted from the cache. Move it to the 2267168404Spjd * MFU state. 2268168404Spjd */ 2269168404Spjd 2270168404Spjd if (buf->b_flags & ARC_PREFETCH) { 2271168404Spjd new_state = arc_mru; 2272168404Spjd if (refcount_count(&buf->b_refcnt) > 0) 2273168404Spjd buf->b_flags &= ~ARC_PREFETCH; 2274168404Spjd DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2275168404Spjd } else { 2276168404Spjd new_state = arc_mfu; 2277168404Spjd DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2278168404Spjd } 2279168404Spjd 2280174049Sjb buf->b_arc_access = LBOLT; 2281168404Spjd arc_change_state(new_state, buf, hash_lock); 2282168404Spjd 2283168404Spjd ARCSTAT_BUMP(arcstat_mru_ghost_hits); 2284168404Spjd } else if (buf->b_state == arc_mfu) { 2285168404Spjd /* 2286168404Spjd * This buffer has been accessed more than once and is 2287168404Spjd * still in the cache. Keep it in the MFU state. 2288168404Spjd * 2289168404Spjd * NOTE: an add_reference() that occurred when we did 2290168404Spjd * the arc_read() will have kicked this off the list. 2291168404Spjd * If it was a prefetch, we will explicitly move it to 2292168404Spjd * the head of the list now. 2293168404Spjd */ 2294168404Spjd if ((buf->b_flags & ARC_PREFETCH) != 0) { 2295168404Spjd ASSERT(refcount_count(&buf->b_refcnt) == 0); 2296168404Spjd ASSERT(list_link_active(&buf->b_arc_node)); 2297168404Spjd } 2298168404Spjd ARCSTAT_BUMP(arcstat_mfu_hits); 2299174049Sjb buf->b_arc_access = LBOLT; 2300168404Spjd } else if (buf->b_state == arc_mfu_ghost) { 2301168404Spjd arc_state_t *new_state = arc_mfu; 2302168404Spjd /* 2303168404Spjd * This buffer has been accessed more than once but has 2304168404Spjd * been evicted from the cache. Move it back to the 2305168404Spjd * MFU state. 2306168404Spjd */ 2307168404Spjd 2308168404Spjd if (buf->b_flags & ARC_PREFETCH) { 2309168404Spjd /* 2310168404Spjd * This is a prefetch access... 2311168404Spjd * move this block back to the MRU state. 2312168404Spjd */ 2313168404Spjd ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0); 2314168404Spjd new_state = arc_mru; 2315168404Spjd } 2316168404Spjd 2317174049Sjb buf->b_arc_access = LBOLT; 2318168404Spjd DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2319168404Spjd arc_change_state(new_state, buf, hash_lock); 2320168404Spjd 2321168404Spjd ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 2322185029Spjd } else if (buf->b_state == arc_l2c_only) { 2323185029Spjd /* 2324185029Spjd * This buffer is on the 2nd Level ARC. 2325185029Spjd */ 2326185029Spjd 2327185029Spjd buf->b_arc_access = LBOLT; 2328185029Spjd DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2329185029Spjd arc_change_state(arc_mfu, buf, hash_lock); 2330168404Spjd } else { 2331168404Spjd ASSERT(!"invalid arc state"); 2332168404Spjd } 2333168404Spjd} 2334168404Spjd 2335168404Spjd/* a generic arc_done_func_t which you can use */ 2336168404Spjd/* ARGSUSED */ 2337168404Spjdvoid 2338168404Spjdarc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 2339168404Spjd{ 2340168404Spjd bcopy(buf->b_data, arg, buf->b_hdr->b_size); 2341168404Spjd VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2342168404Spjd} 2343168404Spjd 2344185029Spjd/* a generic arc_done_func_t */ 2345168404Spjdvoid 2346168404Spjdarc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 2347168404Spjd{ 2348168404Spjd arc_buf_t **bufp = arg; 2349168404Spjd if (zio && zio->io_error) { 2350168404Spjd VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2351168404Spjd *bufp = NULL; 2352168404Spjd } else { 2353168404Spjd *bufp = buf; 2354168404Spjd } 2355168404Spjd} 2356168404Spjd 2357168404Spjdstatic void 2358168404Spjdarc_read_done(zio_t *zio) 2359168404Spjd{ 2360168404Spjd arc_buf_hdr_t *hdr, *found; 2361168404Spjd arc_buf_t *buf; 2362168404Spjd arc_buf_t *abuf; /* buffer we're assigning to callback */ 2363168404Spjd kmutex_t *hash_lock; 2364168404Spjd arc_callback_t *callback_list, *acb; 2365168404Spjd int freeable = FALSE; 2366168404Spjd 2367168404Spjd buf = zio->io_private; 2368168404Spjd hdr = buf->b_hdr; 2369168404Spjd 2370168404Spjd /* 2371168404Spjd * The hdr was inserted into hash-table and removed from lists 2372168404Spjd * prior to starting I/O. We should find this header, since 2373168404Spjd * it's in the hash table, and it should be legit since it's 2374168404Spjd * not possible to evict it during the I/O. The only possible 2375168404Spjd * reason for it not to be found is if we were freed during the 2376168404Spjd * read. 2377168404Spjd */ 2378168404Spjd found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth, 2379168404Spjd &hash_lock); 2380168404Spjd 2381168404Spjd ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || 2382185029Spjd (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || 2383185029Spjd (found == hdr && HDR_L2_READING(hdr))); 2384168404Spjd 2385185029Spjd hdr->b_flags &= ~ARC_L2_EVICTED; 2386185029Spjd if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH)) 2387185029Spjd hdr->b_flags &= ~ARC_L2CACHE; 2388185029Spjd 2389168404Spjd /* byteswap if necessary */ 2390168404Spjd callback_list = hdr->b_acb; 2391168404Spjd ASSERT(callback_list != NULL); 2392185029Spjd if (BP_SHOULD_BYTESWAP(zio->io_bp)) { 2393185029Spjd arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ? 2394185029Spjd byteswap_uint64_array : 2395185029Spjd dmu_ot[BP_GET_TYPE(zio->io_bp)].ot_byteswap; 2396185029Spjd func(buf->b_data, hdr->b_size); 2397185029Spjd } 2398168404Spjd 2399185029Spjd arc_cksum_compute(buf, B_FALSE); 2400168404Spjd 2401168404Spjd /* create copies of the data buffer for the callers */ 2402168404Spjd abuf = buf; 2403168404Spjd for (acb = callback_list; acb; acb = acb->acb_next) { 2404168404Spjd if (acb->acb_done) { 2405168404Spjd if (abuf == NULL) 2406168404Spjd abuf = arc_buf_clone(buf); 2407168404Spjd acb->acb_buf = abuf; 2408168404Spjd abuf = NULL; 2409168404Spjd } 2410168404Spjd } 2411168404Spjd hdr->b_acb = NULL; 2412168404Spjd hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2413168404Spjd ASSERT(!HDR_BUF_AVAILABLE(hdr)); 2414168404Spjd if (abuf == buf) 2415168404Spjd hdr->b_flags |= ARC_BUF_AVAILABLE; 2416168404Spjd 2417168404Spjd ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 2418168404Spjd 2419168404Spjd if (zio->io_error != 0) { 2420168404Spjd hdr->b_flags |= ARC_IO_ERROR; 2421168404Spjd if (hdr->b_state != arc_anon) 2422168404Spjd arc_change_state(arc_anon, hdr, hash_lock); 2423168404Spjd if (HDR_IN_HASH_TABLE(hdr)) 2424168404Spjd buf_hash_remove(hdr); 2425168404Spjd freeable = refcount_is_zero(&hdr->b_refcnt); 2426168404Spjd } 2427168404Spjd 2428168404Spjd /* 2429168404Spjd * Broadcast before we drop the hash_lock to avoid the possibility 2430168404Spjd * that the hdr (and hence the cv) might be freed before we get to 2431168404Spjd * the cv_broadcast(). 2432168404Spjd */ 2433168404Spjd cv_broadcast(&hdr->b_cv); 2434168404Spjd 2435168404Spjd if (hash_lock) { 2436168404Spjd /* 2437168404Spjd * Only call arc_access on anonymous buffers. This is because 2438168404Spjd * if we've issued an I/O for an evicted buffer, we've already 2439168404Spjd * called arc_access (to prevent any simultaneous readers from 2440168404Spjd * getting confused). 2441168404Spjd */ 2442168404Spjd if (zio->io_error == 0 && hdr->b_state == arc_anon) 2443168404Spjd arc_access(hdr, hash_lock); 2444168404Spjd mutex_exit(hash_lock); 2445168404Spjd } else { 2446168404Spjd /* 2447168404Spjd * This block was freed while we waited for the read to 2448168404Spjd * complete. It has been removed from the hash table and 2449168404Spjd * moved to the anonymous state (so that it won't show up 2450168404Spjd * in the cache). 2451168404Spjd */ 2452168404Spjd ASSERT3P(hdr->b_state, ==, arc_anon); 2453168404Spjd freeable = refcount_is_zero(&hdr->b_refcnt); 2454168404Spjd } 2455168404Spjd 2456168404Spjd /* execute each callback and free its structure */ 2457168404Spjd while ((acb = callback_list) != NULL) { 2458168404Spjd if (acb->acb_done) 2459168404Spjd acb->acb_done(zio, acb->acb_buf, acb->acb_private); 2460168404Spjd 2461168404Spjd if (acb->acb_zio_dummy != NULL) { 2462168404Spjd acb->acb_zio_dummy->io_error = zio->io_error; 2463168404Spjd zio_nowait(acb->acb_zio_dummy); 2464168404Spjd } 2465168404Spjd 2466168404Spjd callback_list = acb->acb_next; 2467168404Spjd kmem_free(acb, sizeof (arc_callback_t)); 2468168404Spjd } 2469168404Spjd 2470168404Spjd if (freeable) 2471168404Spjd arc_hdr_destroy(hdr); 2472168404Spjd} 2473168404Spjd 2474168404Spjd/* 2475168404Spjd * "Read" the block block at the specified DVA (in bp) via the 2476168404Spjd * cache. If the block is found in the cache, invoke the provided 2477168404Spjd * callback immediately and return. Note that the `zio' parameter 2478168404Spjd * in the callback will be NULL in this case, since no IO was 2479168404Spjd * required. If the block is not in the cache pass the read request 2480168404Spjd * on to the spa with a substitute callback function, so that the 2481168404Spjd * requested block will be added to the cache. 2482168404Spjd * 2483168404Spjd * If a read request arrives for a block that has a read in-progress, 2484168404Spjd * either wait for the in-progress read to complete (and return the 2485168404Spjd * results); or, if this is a read with a "done" func, add a record 2486168404Spjd * to the read to invoke the "done" func when the read completes, 2487168404Spjd * and return; or just return. 2488168404Spjd * 2489168404Spjd * arc_read_done() will invoke all the requested "done" functions 2490168404Spjd * for readers of this block. 2491185029Spjd * 2492185029Spjd * Normal callers should use arc_read and pass the arc buffer and offset 2493185029Spjd * for the bp. But if you know you don't need locking, you can use 2494185029Spjd * arc_read_bp. 2495168404Spjd */ 2496168404Spjdint 2497185029Spjdarc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_buf_t *pbuf, 2498185029Spjd arc_done_func_t *done, void *private, int priority, int zio_flags, 2499185029Spjd uint32_t *arc_flags, const zbookmark_t *zb) 2500168404Spjd{ 2501185029Spjd int err; 2502185029Spjd arc_buf_hdr_t *hdr = pbuf->b_hdr; 2503185029Spjd 2504185029Spjd ASSERT(!refcount_is_zero(&pbuf->b_hdr->b_refcnt)); 2505185029Spjd ASSERT3U((char *)bp - (char *)pbuf->b_data, <, pbuf->b_hdr->b_size); 2506185029Spjd rw_enter(&pbuf->b_lock, RW_READER); 2507185029Spjd 2508185029Spjd err = arc_read_nolock(pio, spa, bp, done, private, priority, 2509185029Spjd zio_flags, arc_flags, zb); 2510185029Spjd 2511185029Spjd ASSERT3P(hdr, ==, pbuf->b_hdr); 2512185029Spjd rw_exit(&pbuf->b_lock); 2513185029Spjd return (err); 2514185029Spjd} 2515185029Spjd 2516185029Spjdint 2517185029Spjdarc_read_nolock(zio_t *pio, spa_t *spa, blkptr_t *bp, 2518185029Spjd arc_done_func_t *done, void *private, int priority, int zio_flags, 2519185029Spjd uint32_t *arc_flags, const zbookmark_t *zb) 2520185029Spjd{ 2521168404Spjd arc_buf_hdr_t *hdr; 2522168404Spjd arc_buf_t *buf; 2523168404Spjd kmutex_t *hash_lock; 2524185029Spjd zio_t *rzio; 2525168404Spjd 2526168404Spjdtop: 2527168404Spjd hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 2528168404Spjd if (hdr && hdr->b_datacnt > 0) { 2529168404Spjd 2530168404Spjd *arc_flags |= ARC_CACHED; 2531168404Spjd 2532168404Spjd if (HDR_IO_IN_PROGRESS(hdr)) { 2533168404Spjd 2534168404Spjd if (*arc_flags & ARC_WAIT) { 2535168404Spjd cv_wait(&hdr->b_cv, hash_lock); 2536168404Spjd mutex_exit(hash_lock); 2537168404Spjd goto top; 2538168404Spjd } 2539168404Spjd ASSERT(*arc_flags & ARC_NOWAIT); 2540168404Spjd 2541168404Spjd if (done) { 2542168404Spjd arc_callback_t *acb = NULL; 2543168404Spjd 2544168404Spjd acb = kmem_zalloc(sizeof (arc_callback_t), 2545168404Spjd KM_SLEEP); 2546168404Spjd acb->acb_done = done; 2547168404Spjd acb->acb_private = private; 2548168404Spjd if (pio != NULL) 2549168404Spjd acb->acb_zio_dummy = zio_null(pio, 2550185029Spjd spa, NULL, NULL, zio_flags); 2551168404Spjd 2552168404Spjd ASSERT(acb->acb_done != NULL); 2553168404Spjd acb->acb_next = hdr->b_acb; 2554168404Spjd hdr->b_acb = acb; 2555168404Spjd add_reference(hdr, hash_lock, private); 2556168404Spjd mutex_exit(hash_lock); 2557168404Spjd return (0); 2558168404Spjd } 2559168404Spjd mutex_exit(hash_lock); 2560168404Spjd return (0); 2561168404Spjd } 2562168404Spjd 2563168404Spjd ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2564168404Spjd 2565168404Spjd if (done) { 2566168404Spjd add_reference(hdr, hash_lock, private); 2567168404Spjd /* 2568168404Spjd * If this block is already in use, create a new 2569168404Spjd * copy of the data so that we will be guaranteed 2570168404Spjd * that arc_release() will always succeed. 2571168404Spjd */ 2572168404Spjd buf = hdr->b_buf; 2573168404Spjd ASSERT(buf); 2574168404Spjd ASSERT(buf->b_data); 2575168404Spjd if (HDR_BUF_AVAILABLE(hdr)) { 2576168404Spjd ASSERT(buf->b_efunc == NULL); 2577168404Spjd hdr->b_flags &= ~ARC_BUF_AVAILABLE; 2578168404Spjd } else { 2579168404Spjd buf = arc_buf_clone(buf); 2580168404Spjd } 2581168404Spjd } else if (*arc_flags & ARC_PREFETCH && 2582168404Spjd refcount_count(&hdr->b_refcnt) == 0) { 2583168404Spjd hdr->b_flags |= ARC_PREFETCH; 2584168404Spjd } 2585168404Spjd DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 2586168404Spjd arc_access(hdr, hash_lock); 2587185029Spjd if (*arc_flags & ARC_L2CACHE) 2588185029Spjd hdr->b_flags |= ARC_L2CACHE; 2589168404Spjd mutex_exit(hash_lock); 2590168404Spjd ARCSTAT_BUMP(arcstat_hits); 2591168404Spjd ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2592168404Spjd demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2593168404Spjd data, metadata, hits); 2594168404Spjd 2595168404Spjd if (done) 2596168404Spjd done(NULL, buf, private); 2597168404Spjd } else { 2598168404Spjd uint64_t size = BP_GET_LSIZE(bp); 2599168404Spjd arc_callback_t *acb; 2600185029Spjd vdev_t *vd = NULL; 2601185029Spjd daddr_t addr; 2602168404Spjd 2603168404Spjd if (hdr == NULL) { 2604168404Spjd /* this block is not in the cache */ 2605168404Spjd arc_buf_hdr_t *exists; 2606168404Spjd arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 2607168404Spjd buf = arc_buf_alloc(spa, size, private, type); 2608168404Spjd hdr = buf->b_hdr; 2609168404Spjd hdr->b_dva = *BP_IDENTITY(bp); 2610168404Spjd hdr->b_birth = bp->blk_birth; 2611168404Spjd hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 2612168404Spjd exists = buf_hash_insert(hdr, &hash_lock); 2613168404Spjd if (exists) { 2614168404Spjd /* somebody beat us to the hash insert */ 2615168404Spjd mutex_exit(hash_lock); 2616168404Spjd bzero(&hdr->b_dva, sizeof (dva_t)); 2617168404Spjd hdr->b_birth = 0; 2618168404Spjd hdr->b_cksum0 = 0; 2619168404Spjd (void) arc_buf_remove_ref(buf, private); 2620168404Spjd goto top; /* restart the IO request */ 2621168404Spjd } 2622168404Spjd /* if this is a prefetch, we don't have a reference */ 2623168404Spjd if (*arc_flags & ARC_PREFETCH) { 2624168404Spjd (void) remove_reference(hdr, hash_lock, 2625168404Spjd private); 2626168404Spjd hdr->b_flags |= ARC_PREFETCH; 2627168404Spjd } 2628185029Spjd if (*arc_flags & ARC_L2CACHE) 2629185029Spjd hdr->b_flags |= ARC_L2CACHE; 2630168404Spjd if (BP_GET_LEVEL(bp) > 0) 2631168404Spjd hdr->b_flags |= ARC_INDIRECT; 2632168404Spjd } else { 2633168404Spjd /* this block is in the ghost cache */ 2634168404Spjd ASSERT(GHOST_STATE(hdr->b_state)); 2635168404Spjd ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2636168404Spjd ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0); 2637168404Spjd ASSERT(hdr->b_buf == NULL); 2638168404Spjd 2639168404Spjd /* if this is a prefetch, we don't have a reference */ 2640168404Spjd if (*arc_flags & ARC_PREFETCH) 2641168404Spjd hdr->b_flags |= ARC_PREFETCH; 2642168404Spjd else 2643168404Spjd add_reference(hdr, hash_lock, private); 2644185029Spjd if (*arc_flags & ARC_L2CACHE) 2645185029Spjd hdr->b_flags |= ARC_L2CACHE; 2646185029Spjd buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 2647168404Spjd buf->b_hdr = hdr; 2648168404Spjd buf->b_data = NULL; 2649168404Spjd buf->b_efunc = NULL; 2650168404Spjd buf->b_private = NULL; 2651168404Spjd buf->b_next = NULL; 2652168404Spjd hdr->b_buf = buf; 2653168404Spjd arc_get_data_buf(buf); 2654168404Spjd ASSERT(hdr->b_datacnt == 0); 2655168404Spjd hdr->b_datacnt = 1; 2656168404Spjd 2657168404Spjd } 2658168404Spjd 2659168404Spjd acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 2660168404Spjd acb->acb_done = done; 2661168404Spjd acb->acb_private = private; 2662168404Spjd 2663168404Spjd ASSERT(hdr->b_acb == NULL); 2664168404Spjd hdr->b_acb = acb; 2665168404Spjd hdr->b_flags |= ARC_IO_IN_PROGRESS; 2666168404Spjd 2667168404Spjd /* 2668168404Spjd * If the buffer has been evicted, migrate it to a present state 2669168404Spjd * before issuing the I/O. Once we drop the hash-table lock, 2670168404Spjd * the header will be marked as I/O in progress and have an 2671168404Spjd * attached buffer. At this point, anybody who finds this 2672168404Spjd * buffer ought to notice that it's legit but has a pending I/O. 2673168404Spjd */ 2674168404Spjd 2675168404Spjd if (GHOST_STATE(hdr->b_state)) 2676168404Spjd arc_access(hdr, hash_lock); 2677185029Spjd 2678185029Spjd if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL && 2679185029Spjd (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) { 2680185029Spjd addr = hdr->b_l2hdr->b_daddr; 2681185029Spjd /* 2682185029Spjd * Lock out device removal. 2683185029Spjd */ 2684185029Spjd if (vdev_is_dead(vd) || 2685185029Spjd !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER)) 2686185029Spjd vd = NULL; 2687185029Spjd } 2688185029Spjd 2689168404Spjd mutex_exit(hash_lock); 2690168404Spjd 2691168404Spjd ASSERT3U(hdr->b_size, ==, size); 2692168404Spjd DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size, 2693168404Spjd zbookmark_t *, zb); 2694168404Spjd ARCSTAT_BUMP(arcstat_misses); 2695168404Spjd ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2696168404Spjd demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2697168404Spjd data, metadata, misses); 2698168404Spjd 2699185029Spjd if (vd != NULL) { 2700185029Spjd /* 2701185029Spjd * Read from the L2ARC if the following are true: 2702185029Spjd * 1. The L2ARC vdev was previously cached. 2703185029Spjd * 2. This buffer still has L2ARC metadata. 2704185029Spjd * 3. This buffer isn't currently writing to the L2ARC. 2705185029Spjd * 4. The L2ARC entry wasn't evicted, which may 2706185029Spjd * also have invalidated the vdev. 2707185029Spjd */ 2708185029Spjd if (hdr->b_l2hdr != NULL && 2709185029Spjd !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr)) { 2710185029Spjd l2arc_read_callback_t *cb; 2711185029Spjd 2712185029Spjd DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); 2713185029Spjd ARCSTAT_BUMP(arcstat_l2_hits); 2714185029Spjd 2715185029Spjd cb = kmem_zalloc(sizeof (l2arc_read_callback_t), 2716185029Spjd KM_SLEEP); 2717185029Spjd cb->l2rcb_buf = buf; 2718185029Spjd cb->l2rcb_spa = spa; 2719185029Spjd cb->l2rcb_bp = *bp; 2720185029Spjd cb->l2rcb_zb = *zb; 2721185029Spjd cb->l2rcb_flags = zio_flags; 2722185029Spjd 2723185029Spjd /* 2724185029Spjd * l2arc read. The SCL_L2ARC lock will be 2725185029Spjd * released by l2arc_read_done(). 2726185029Spjd */ 2727185029Spjd rzio = zio_read_phys(pio, vd, addr, size, 2728185029Spjd buf->b_data, ZIO_CHECKSUM_OFF, 2729185029Spjd l2arc_read_done, cb, priority, zio_flags | 2730185029Spjd ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL | 2731185029Spjd ZIO_FLAG_DONT_PROPAGATE | 2732185029Spjd ZIO_FLAG_DONT_RETRY, B_FALSE); 2733185029Spjd DTRACE_PROBE2(l2arc__read, vdev_t *, vd, 2734185029Spjd zio_t *, rzio); 2735185029Spjd 2736185029Spjd if (*arc_flags & ARC_NOWAIT) { 2737185029Spjd zio_nowait(rzio); 2738185029Spjd return (0); 2739185029Spjd } 2740185029Spjd 2741185029Spjd ASSERT(*arc_flags & ARC_WAIT); 2742185029Spjd if (zio_wait(rzio) == 0) 2743185029Spjd return (0); 2744185029Spjd 2745185029Spjd /* l2arc read error; goto zio_read() */ 2746185029Spjd } else { 2747185029Spjd DTRACE_PROBE1(l2arc__miss, 2748185029Spjd arc_buf_hdr_t *, hdr); 2749185029Spjd ARCSTAT_BUMP(arcstat_l2_misses); 2750185029Spjd if (HDR_L2_WRITING(hdr)) 2751185029Spjd ARCSTAT_BUMP(arcstat_l2_rw_clash); 2752185029Spjd spa_config_exit(spa, SCL_L2ARC, vd); 2753185029Spjd } 2754185029Spjd } 2755185029Spjd 2756168404Spjd rzio = zio_read(pio, spa, bp, buf->b_data, size, 2757185029Spjd arc_read_done, buf, priority, zio_flags, zb); 2758168404Spjd 2759168404Spjd if (*arc_flags & ARC_WAIT) 2760168404Spjd return (zio_wait(rzio)); 2761168404Spjd 2762168404Spjd ASSERT(*arc_flags & ARC_NOWAIT); 2763168404Spjd zio_nowait(rzio); 2764168404Spjd } 2765168404Spjd return (0); 2766168404Spjd} 2767168404Spjd 2768168404Spjd/* 2769168404Spjd * arc_read() variant to support pool traversal. If the block is already 2770168404Spjd * in the ARC, make a copy of it; otherwise, the caller will do the I/O. 2771168404Spjd * The idea is that we don't want pool traversal filling up memory, but 2772168404Spjd * if the ARC already has the data anyway, we shouldn't pay for the I/O. 2773168404Spjd */ 2774168404Spjdint 2775168404Spjdarc_tryread(spa_t *spa, blkptr_t *bp, void *data) 2776168404Spjd{ 2777168404Spjd arc_buf_hdr_t *hdr; 2778168404Spjd kmutex_t *hash_mtx; 2779168404Spjd int rc = 0; 2780168404Spjd 2781168404Spjd hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx); 2782168404Spjd 2783168404Spjd if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) { 2784168404Spjd arc_buf_t *buf = hdr->b_buf; 2785168404Spjd 2786168404Spjd ASSERT(buf); 2787168404Spjd while (buf->b_data == NULL) { 2788168404Spjd buf = buf->b_next; 2789168404Spjd ASSERT(buf); 2790168404Spjd } 2791168404Spjd bcopy(buf->b_data, data, hdr->b_size); 2792168404Spjd } else { 2793168404Spjd rc = ENOENT; 2794168404Spjd } 2795168404Spjd 2796168404Spjd if (hash_mtx) 2797168404Spjd mutex_exit(hash_mtx); 2798168404Spjd 2799168404Spjd return (rc); 2800168404Spjd} 2801168404Spjd 2802168404Spjdvoid 2803168404Spjdarc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 2804168404Spjd{ 2805168404Spjd ASSERT(buf->b_hdr != NULL); 2806168404Spjd ASSERT(buf->b_hdr->b_state != arc_anon); 2807168404Spjd ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 2808168404Spjd buf->b_efunc = func; 2809168404Spjd buf->b_private = private; 2810168404Spjd} 2811168404Spjd 2812168404Spjd/* 2813168404Spjd * This is used by the DMU to let the ARC know that a buffer is 2814168404Spjd * being evicted, so the ARC should clean up. If this arc buf 2815168404Spjd * is not yet in the evicted state, it will be put there. 2816168404Spjd */ 2817168404Spjdint 2818168404Spjdarc_buf_evict(arc_buf_t *buf) 2819168404Spjd{ 2820168404Spjd arc_buf_hdr_t *hdr; 2821168404Spjd kmutex_t *hash_lock; 2822168404Spjd arc_buf_t **bufp; 2823168404Spjd 2824185029Spjd rw_enter(&buf->b_lock, RW_WRITER); 2825168404Spjd hdr = buf->b_hdr; 2826168404Spjd if (hdr == NULL) { 2827168404Spjd /* 2828168404Spjd * We are in arc_do_user_evicts(). 2829168404Spjd */ 2830168404Spjd ASSERT(buf->b_data == NULL); 2831185029Spjd rw_exit(&buf->b_lock); 2832168404Spjd return (0); 2833185029Spjd } else if (buf->b_data == NULL) { 2834185029Spjd arc_buf_t copy = *buf; /* structure assignment */ 2835185029Spjd /* 2836185029Spjd * We are on the eviction list; process this buffer now 2837185029Spjd * but let arc_do_user_evicts() do the reaping. 2838185029Spjd */ 2839185029Spjd buf->b_efunc = NULL; 2840185029Spjd rw_exit(&buf->b_lock); 2841185029Spjd VERIFY(copy.b_efunc(©) == 0); 2842185029Spjd return (1); 2843168404Spjd } 2844168404Spjd hash_lock = HDR_LOCK(hdr); 2845168404Spjd mutex_enter(hash_lock); 2846168404Spjd 2847168404Spjd ASSERT(buf->b_hdr == hdr); 2848168404Spjd ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 2849168404Spjd ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2850168404Spjd 2851168404Spjd /* 2852168404Spjd * Pull this buffer off of the hdr 2853168404Spjd */ 2854168404Spjd bufp = &hdr->b_buf; 2855168404Spjd while (*bufp != buf) 2856168404Spjd bufp = &(*bufp)->b_next; 2857168404Spjd *bufp = buf->b_next; 2858168404Spjd 2859168404Spjd ASSERT(buf->b_data != NULL); 2860168404Spjd arc_buf_destroy(buf, FALSE, FALSE); 2861168404Spjd 2862168404Spjd if (hdr->b_datacnt == 0) { 2863168404Spjd arc_state_t *old_state = hdr->b_state; 2864168404Spjd arc_state_t *evicted_state; 2865168404Spjd 2866168404Spjd ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2867168404Spjd 2868168404Spjd evicted_state = 2869168404Spjd (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 2870168404Spjd 2871168404Spjd mutex_enter(&old_state->arcs_mtx); 2872168404Spjd mutex_enter(&evicted_state->arcs_mtx); 2873168404Spjd 2874168404Spjd arc_change_state(evicted_state, hdr, hash_lock); 2875168404Spjd ASSERT(HDR_IN_HASH_TABLE(hdr)); 2876185029Spjd hdr->b_flags |= ARC_IN_HASH_TABLE; 2877185029Spjd hdr->b_flags &= ~ARC_BUF_AVAILABLE; 2878168404Spjd 2879168404Spjd mutex_exit(&evicted_state->arcs_mtx); 2880168404Spjd mutex_exit(&old_state->arcs_mtx); 2881168404Spjd } 2882168404Spjd mutex_exit(hash_lock); 2883185029Spjd rw_exit(&buf->b_lock); 2884168404Spjd 2885168404Spjd VERIFY(buf->b_efunc(buf) == 0); 2886168404Spjd buf->b_efunc = NULL; 2887168404Spjd buf->b_private = NULL; 2888168404Spjd buf->b_hdr = NULL; 2889168404Spjd kmem_cache_free(buf_cache, buf); 2890168404Spjd return (1); 2891168404Spjd} 2892168404Spjd 2893168404Spjd/* 2894168404Spjd * Release this buffer from the cache. This must be done 2895168404Spjd * after a read and prior to modifying the buffer contents. 2896168404Spjd * If the buffer has more than one reference, we must make 2897185029Spjd * a new hdr for the buffer. 2898168404Spjd */ 2899168404Spjdvoid 2900168404Spjdarc_release(arc_buf_t *buf, void *tag) 2901168404Spjd{ 2902185029Spjd arc_buf_hdr_t *hdr; 2903185029Spjd kmutex_t *hash_lock; 2904185029Spjd l2arc_buf_hdr_t *l2hdr; 2905185029Spjd uint64_t buf_size; 2906168404Spjd 2907185029Spjd rw_enter(&buf->b_lock, RW_WRITER); 2908185029Spjd hdr = buf->b_hdr; 2909185029Spjd 2910168404Spjd /* this buffer is not on any list */ 2911168404Spjd ASSERT(refcount_count(&hdr->b_refcnt) > 0); 2912185029Spjd ASSERT(!(hdr->b_flags & ARC_STORED)); 2913168404Spjd 2914168404Spjd if (hdr->b_state == arc_anon) { 2915168404Spjd /* this buffer is already released */ 2916168404Spjd ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1); 2917168404Spjd ASSERT(BUF_EMPTY(hdr)); 2918168404Spjd ASSERT(buf->b_efunc == NULL); 2919168404Spjd arc_buf_thaw(buf); 2920185029Spjd rw_exit(&buf->b_lock); 2921168404Spjd return; 2922168404Spjd } 2923168404Spjd 2924185029Spjd hash_lock = HDR_LOCK(hdr); 2925168404Spjd mutex_enter(hash_lock); 2926168404Spjd 2927185029Spjd l2hdr = hdr->b_l2hdr; 2928185029Spjd if (l2hdr) { 2929185029Spjd mutex_enter(&l2arc_buflist_mtx); 2930185029Spjd hdr->b_l2hdr = NULL; 2931185029Spjd buf_size = hdr->b_size; 2932185029Spjd } 2933185029Spjd 2934168404Spjd /* 2935168404Spjd * Do we have more than one buf? 2936168404Spjd */ 2937185029Spjd if (hdr->b_datacnt > 1) { 2938168404Spjd arc_buf_hdr_t *nhdr; 2939168404Spjd arc_buf_t **bufp; 2940168404Spjd uint64_t blksz = hdr->b_size; 2941168404Spjd spa_t *spa = hdr->b_spa; 2942168404Spjd arc_buf_contents_t type = hdr->b_type; 2943185029Spjd uint32_t flags = hdr->b_flags; 2944168404Spjd 2945185029Spjd ASSERT(hdr->b_buf != buf || buf->b_next != NULL); 2946168404Spjd /* 2947168404Spjd * Pull the data off of this buf and attach it to 2948168404Spjd * a new anonymous buf. 2949168404Spjd */ 2950168404Spjd (void) remove_reference(hdr, hash_lock, tag); 2951168404Spjd bufp = &hdr->b_buf; 2952168404Spjd while (*bufp != buf) 2953168404Spjd bufp = &(*bufp)->b_next; 2954168404Spjd *bufp = (*bufp)->b_next; 2955168404Spjd buf->b_next = NULL; 2956168404Spjd 2957168404Spjd ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); 2958168404Spjd atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); 2959168404Spjd if (refcount_is_zero(&hdr->b_refcnt)) { 2960185029Spjd uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type]; 2961185029Spjd ASSERT3U(*size, >=, hdr->b_size); 2962185029Spjd atomic_add_64(size, -hdr->b_size); 2963168404Spjd } 2964168404Spjd hdr->b_datacnt -= 1; 2965168404Spjd arc_cksum_verify(buf); 2966168404Spjd 2967168404Spjd mutex_exit(hash_lock); 2968168404Spjd 2969185029Spjd nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 2970168404Spjd nhdr->b_size = blksz; 2971168404Spjd nhdr->b_spa = spa; 2972168404Spjd nhdr->b_type = type; 2973168404Spjd nhdr->b_buf = buf; 2974168404Spjd nhdr->b_state = arc_anon; 2975168404Spjd nhdr->b_arc_access = 0; 2976185029Spjd nhdr->b_flags = flags & ARC_L2_WRITING; 2977185029Spjd nhdr->b_l2hdr = NULL; 2978168404Spjd nhdr->b_datacnt = 1; 2979168404Spjd nhdr->b_freeze_cksum = NULL; 2980168404Spjd (void) refcount_add(&nhdr->b_refcnt, tag); 2981168404Spjd buf->b_hdr = nhdr; 2982185029Spjd rw_exit(&buf->b_lock); 2983168404Spjd atomic_add_64(&arc_anon->arcs_size, blksz); 2984168404Spjd } else { 2985185029Spjd rw_exit(&buf->b_lock); 2986168404Spjd ASSERT(refcount_count(&hdr->b_refcnt) == 1); 2987168404Spjd ASSERT(!list_link_active(&hdr->b_arc_node)); 2988168404Spjd ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2989168404Spjd arc_change_state(arc_anon, hdr, hash_lock); 2990168404Spjd hdr->b_arc_access = 0; 2991168404Spjd mutex_exit(hash_lock); 2992185029Spjd 2993168404Spjd bzero(&hdr->b_dva, sizeof (dva_t)); 2994168404Spjd hdr->b_birth = 0; 2995168404Spjd hdr->b_cksum0 = 0; 2996168404Spjd arc_buf_thaw(buf); 2997168404Spjd } 2998168404Spjd buf->b_efunc = NULL; 2999168404Spjd buf->b_private = NULL; 3000185029Spjd 3001185029Spjd if (l2hdr) { 3002185029Spjd list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 3003185029Spjd kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 3004185029Spjd ARCSTAT_INCR(arcstat_l2_size, -buf_size); 3005185029Spjd mutex_exit(&l2arc_buflist_mtx); 3006185029Spjd } 3007168404Spjd} 3008168404Spjd 3009168404Spjdint 3010168404Spjdarc_released(arc_buf_t *buf) 3011168404Spjd{ 3012185029Spjd int released; 3013185029Spjd 3014185029Spjd rw_enter(&buf->b_lock, RW_READER); 3015185029Spjd released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); 3016185029Spjd rw_exit(&buf->b_lock); 3017185029Spjd return (released); 3018168404Spjd} 3019168404Spjd 3020168404Spjdint 3021168404Spjdarc_has_callback(arc_buf_t *buf) 3022168404Spjd{ 3023185029Spjd int callback; 3024185029Spjd 3025185029Spjd rw_enter(&buf->b_lock, RW_READER); 3026185029Spjd callback = (buf->b_efunc != NULL); 3027185029Spjd rw_exit(&buf->b_lock); 3028185029Spjd return (callback); 3029168404Spjd} 3030168404Spjd 3031168404Spjd#ifdef ZFS_DEBUG 3032168404Spjdint 3033168404Spjdarc_referenced(arc_buf_t *buf) 3034168404Spjd{ 3035185029Spjd int referenced; 3036185029Spjd 3037185029Spjd rw_enter(&buf->b_lock, RW_READER); 3038185029Spjd referenced = (refcount_count(&buf->b_hdr->b_refcnt)); 3039185029Spjd rw_exit(&buf->b_lock); 3040185029Spjd return (referenced); 3041168404Spjd} 3042168404Spjd#endif 3043168404Spjd 3044168404Spjdstatic void 3045168404Spjdarc_write_ready(zio_t *zio) 3046168404Spjd{ 3047168404Spjd arc_write_callback_t *callback = zio->io_private; 3048168404Spjd arc_buf_t *buf = callback->awcb_buf; 3049185029Spjd arc_buf_hdr_t *hdr = buf->b_hdr; 3050168404Spjd 3051185029Spjd ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); 3052185029Spjd callback->awcb_ready(zio, buf, callback->awcb_private); 3053185029Spjd 3054185029Spjd /* 3055185029Spjd * If the IO is already in progress, then this is a re-write 3056185029Spjd * attempt, so we need to thaw and re-compute the cksum. 3057185029Spjd * It is the responsibility of the callback to handle the 3058185029Spjd * accounting for any re-write attempt. 3059185029Spjd */ 3060185029Spjd if (HDR_IO_IN_PROGRESS(hdr)) { 3061185029Spjd mutex_enter(&hdr->b_freeze_lock); 3062185029Spjd if (hdr->b_freeze_cksum != NULL) { 3063185029Spjd kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 3064185029Spjd hdr->b_freeze_cksum = NULL; 3065185029Spjd } 3066185029Spjd mutex_exit(&hdr->b_freeze_lock); 3067168404Spjd } 3068185029Spjd arc_cksum_compute(buf, B_FALSE); 3069185029Spjd hdr->b_flags |= ARC_IO_IN_PROGRESS; 3070168404Spjd} 3071168404Spjd 3072168404Spjdstatic void 3073168404Spjdarc_write_done(zio_t *zio) 3074168404Spjd{ 3075168404Spjd arc_write_callback_t *callback = zio->io_private; 3076168404Spjd arc_buf_t *buf = callback->awcb_buf; 3077168404Spjd arc_buf_hdr_t *hdr = buf->b_hdr; 3078168404Spjd 3079168404Spjd hdr->b_acb = NULL; 3080168404Spjd 3081168404Spjd hdr->b_dva = *BP_IDENTITY(zio->io_bp); 3082168404Spjd hdr->b_birth = zio->io_bp->blk_birth; 3083168404Spjd hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 3084168404Spjd /* 3085168404Spjd * If the block to be written was all-zero, we may have 3086168404Spjd * compressed it away. In this case no write was performed 3087168404Spjd * so there will be no dva/birth-date/checksum. The buffer 3088168404Spjd * must therefor remain anonymous (and uncached). 3089168404Spjd */ 3090168404Spjd if (!BUF_EMPTY(hdr)) { 3091168404Spjd arc_buf_hdr_t *exists; 3092168404Spjd kmutex_t *hash_lock; 3093168404Spjd 3094168404Spjd arc_cksum_verify(buf); 3095168404Spjd 3096168404Spjd exists = buf_hash_insert(hdr, &hash_lock); 3097168404Spjd if (exists) { 3098168404Spjd /* 3099168404Spjd * This can only happen if we overwrite for 3100168404Spjd * sync-to-convergence, because we remove 3101168404Spjd * buffers from the hash table when we arc_free(). 3102168404Spjd */ 3103185029Spjd ASSERT(zio->io_flags & ZIO_FLAG_IO_REWRITE); 3104168404Spjd ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig), 3105168404Spjd BP_IDENTITY(zio->io_bp))); 3106168404Spjd ASSERT3U(zio->io_bp_orig.blk_birth, ==, 3107168404Spjd zio->io_bp->blk_birth); 3108168404Spjd 3109168404Spjd ASSERT(refcount_is_zero(&exists->b_refcnt)); 3110168404Spjd arc_change_state(arc_anon, exists, hash_lock); 3111168404Spjd mutex_exit(hash_lock); 3112168404Spjd arc_hdr_destroy(exists); 3113168404Spjd exists = buf_hash_insert(hdr, &hash_lock); 3114168404Spjd ASSERT3P(exists, ==, NULL); 3115168404Spjd } 3116168404Spjd hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3117185029Spjd /* if it's not anon, we are doing a scrub */ 3118185029Spjd if (hdr->b_state == arc_anon) 3119185029Spjd arc_access(hdr, hash_lock); 3120168404Spjd mutex_exit(hash_lock); 3121168404Spjd } else if (callback->awcb_done == NULL) { 3122168404Spjd int destroy_hdr; 3123168404Spjd /* 3124168404Spjd * This is an anonymous buffer with no user callback, 3125168404Spjd * destroy it if there are no active references. 3126168404Spjd */ 3127168404Spjd mutex_enter(&arc_eviction_mtx); 3128168404Spjd destroy_hdr = refcount_is_zero(&hdr->b_refcnt); 3129168404Spjd hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3130168404Spjd mutex_exit(&arc_eviction_mtx); 3131168404Spjd if (destroy_hdr) 3132168404Spjd arc_hdr_destroy(hdr); 3133168404Spjd } else { 3134168404Spjd hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3135168404Spjd } 3136185029Spjd hdr->b_flags &= ~ARC_STORED; 3137168404Spjd 3138168404Spjd if (callback->awcb_done) { 3139168404Spjd ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 3140168404Spjd callback->awcb_done(zio, buf, callback->awcb_private); 3141168404Spjd } 3142168404Spjd 3143168404Spjd kmem_free(callback, sizeof (arc_write_callback_t)); 3144168404Spjd} 3145168404Spjd 3146185029Spjdstatic void 3147185029Spjdwrite_policy(spa_t *spa, const writeprops_t *wp, zio_prop_t *zp) 3148185029Spjd{ 3149185029Spjd boolean_t ismd = (wp->wp_level > 0 || dmu_ot[wp->wp_type].ot_metadata); 3150185029Spjd 3151185029Spjd /* Determine checksum setting */ 3152185029Spjd if (ismd) { 3153185029Spjd /* 3154185029Spjd * Metadata always gets checksummed. If the data 3155185029Spjd * checksum is multi-bit correctable, and it's not a 3156185029Spjd * ZBT-style checksum, then it's suitable for metadata 3157185029Spjd * as well. Otherwise, the metadata checksum defaults 3158185029Spjd * to fletcher4. 3159185029Spjd */ 3160185029Spjd if (zio_checksum_table[wp->wp_oschecksum].ci_correctable && 3161185029Spjd !zio_checksum_table[wp->wp_oschecksum].ci_zbt) 3162185029Spjd zp->zp_checksum = wp->wp_oschecksum; 3163185029Spjd else 3164185029Spjd zp->zp_checksum = ZIO_CHECKSUM_FLETCHER_4; 3165185029Spjd } else { 3166185029Spjd zp->zp_checksum = zio_checksum_select(wp->wp_dnchecksum, 3167185029Spjd wp->wp_oschecksum); 3168185029Spjd } 3169185029Spjd 3170185029Spjd /* Determine compression setting */ 3171185029Spjd if (ismd) { 3172185029Spjd /* 3173185029Spjd * XXX -- we should design a compression algorithm 3174185029Spjd * that specializes in arrays of bps. 3175185029Spjd */ 3176185029Spjd zp->zp_compress = zfs_mdcomp_disable ? ZIO_COMPRESS_EMPTY : 3177185029Spjd ZIO_COMPRESS_LZJB; 3178185029Spjd } else { 3179185029Spjd zp->zp_compress = zio_compress_select(wp->wp_dncompress, 3180185029Spjd wp->wp_oscompress); 3181185029Spjd } 3182185029Spjd 3183185029Spjd zp->zp_type = wp->wp_type; 3184185029Spjd zp->zp_level = wp->wp_level; 3185185029Spjd zp->zp_ndvas = MIN(wp->wp_copies + ismd, spa_max_replication(spa)); 3186185029Spjd} 3187185029Spjd 3188168404Spjdzio_t * 3189185029Spjdarc_write(zio_t *pio, spa_t *spa, const writeprops_t *wp, 3190185029Spjd boolean_t l2arc, uint64_t txg, blkptr_t *bp, arc_buf_t *buf, 3191168404Spjd arc_done_func_t *ready, arc_done_func_t *done, void *private, int priority, 3192185029Spjd int zio_flags, const zbookmark_t *zb) 3193168404Spjd{ 3194168404Spjd arc_buf_hdr_t *hdr = buf->b_hdr; 3195168404Spjd arc_write_callback_t *callback; 3196185029Spjd zio_t *zio; 3197185029Spjd zio_prop_t zp; 3198168404Spjd 3199185029Spjd ASSERT(ready != NULL); 3200168404Spjd ASSERT(!HDR_IO_ERROR(hdr)); 3201168404Spjd ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 3202168404Spjd ASSERT(hdr->b_acb == 0); 3203185029Spjd if (l2arc) 3204185029Spjd hdr->b_flags |= ARC_L2CACHE; 3205168404Spjd callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 3206168404Spjd callback->awcb_ready = ready; 3207168404Spjd callback->awcb_done = done; 3208168404Spjd callback->awcb_private = private; 3209168404Spjd callback->awcb_buf = buf; 3210168404Spjd 3211185029Spjd write_policy(spa, wp, &zp); 3212185029Spjd zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, &zp, 3213185029Spjd arc_write_ready, arc_write_done, callback, priority, zio_flags, zb); 3214185029Spjd 3215168404Spjd return (zio); 3216168404Spjd} 3217168404Spjd 3218168404Spjdint 3219168404Spjdarc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 3220168404Spjd zio_done_func_t *done, void *private, uint32_t arc_flags) 3221168404Spjd{ 3222168404Spjd arc_buf_hdr_t *ab; 3223168404Spjd kmutex_t *hash_lock; 3224168404Spjd zio_t *zio; 3225168404Spjd 3226168404Spjd /* 3227168404Spjd * If this buffer is in the cache, release it, so it 3228168404Spjd * can be re-used. 3229168404Spjd */ 3230168404Spjd ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 3231168404Spjd if (ab != NULL) { 3232168404Spjd /* 3233168404Spjd * The checksum of blocks to free is not always 3234168404Spjd * preserved (eg. on the deadlist). However, if it is 3235168404Spjd * nonzero, it should match what we have in the cache. 3236168404Spjd */ 3237168404Spjd ASSERT(bp->blk_cksum.zc_word[0] == 0 || 3238185029Spjd bp->blk_cksum.zc_word[0] == ab->b_cksum0 || 3239185029Spjd bp->blk_fill == BLK_FILL_ALREADY_FREED); 3240185029Spjd 3241168404Spjd if (ab->b_state != arc_anon) 3242168404Spjd arc_change_state(arc_anon, ab, hash_lock); 3243168404Spjd if (HDR_IO_IN_PROGRESS(ab)) { 3244168404Spjd /* 3245168404Spjd * This should only happen when we prefetch. 3246168404Spjd */ 3247168404Spjd ASSERT(ab->b_flags & ARC_PREFETCH); 3248168404Spjd ASSERT3U(ab->b_datacnt, ==, 1); 3249168404Spjd ab->b_flags |= ARC_FREED_IN_READ; 3250168404Spjd if (HDR_IN_HASH_TABLE(ab)) 3251168404Spjd buf_hash_remove(ab); 3252168404Spjd ab->b_arc_access = 0; 3253168404Spjd bzero(&ab->b_dva, sizeof (dva_t)); 3254168404Spjd ab->b_birth = 0; 3255168404Spjd ab->b_cksum0 = 0; 3256168404Spjd ab->b_buf->b_efunc = NULL; 3257168404Spjd ab->b_buf->b_private = NULL; 3258168404Spjd mutex_exit(hash_lock); 3259168404Spjd } else if (refcount_is_zero(&ab->b_refcnt)) { 3260185029Spjd ab->b_flags |= ARC_FREE_IN_PROGRESS; 3261168404Spjd mutex_exit(hash_lock); 3262168404Spjd arc_hdr_destroy(ab); 3263168404Spjd ARCSTAT_BUMP(arcstat_deleted); 3264168404Spjd } else { 3265168404Spjd /* 3266168404Spjd * We still have an active reference on this 3267168404Spjd * buffer. This can happen, e.g., from 3268168404Spjd * dbuf_unoverride(). 3269168404Spjd */ 3270168404Spjd ASSERT(!HDR_IN_HASH_TABLE(ab)); 3271168404Spjd ab->b_arc_access = 0; 3272168404Spjd bzero(&ab->b_dva, sizeof (dva_t)); 3273168404Spjd ab->b_birth = 0; 3274168404Spjd ab->b_cksum0 = 0; 3275168404Spjd ab->b_buf->b_efunc = NULL; 3276168404Spjd ab->b_buf->b_private = NULL; 3277168404Spjd mutex_exit(hash_lock); 3278168404Spjd } 3279168404Spjd } 3280168404Spjd 3281185029Spjd zio = zio_free(pio, spa, txg, bp, done, private, ZIO_FLAG_MUSTSUCCEED); 3282168404Spjd 3283168404Spjd if (arc_flags & ARC_WAIT) 3284168404Spjd return (zio_wait(zio)); 3285168404Spjd 3286168404Spjd ASSERT(arc_flags & ARC_NOWAIT); 3287168404Spjd zio_nowait(zio); 3288168404Spjd 3289168404Spjd return (0); 3290168404Spjd} 3291168404Spjd 3292185029Spjdstatic int 3293185029Spjdarc_memory_throttle(uint64_t reserve, uint64_t txg) 3294185029Spjd{ 3295185029Spjd#ifdef _KERNEL 3296185029Spjd uint64_t inflight_data = arc_anon->arcs_size; 3297185029Spjd uint64_t available_memory = ptoa((uintmax_t)cnt.v_free_count); 3298185029Spjd static uint64_t page_load = 0; 3299185029Spjd static uint64_t last_txg = 0; 3300185029Spjd 3301185029Spjd#if 0 3302185029Spjd#if defined(__i386) 3303185029Spjd available_memory = 3304185029Spjd MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); 3305185029Spjd#endif 3306185029Spjd#endif 3307185029Spjd if (available_memory >= zfs_write_limit_max) 3308185029Spjd return (0); 3309185029Spjd 3310185029Spjd if (txg > last_txg) { 3311185029Spjd last_txg = txg; 3312185029Spjd page_load = 0; 3313185029Spjd } 3314185029Spjd /* 3315185029Spjd * If we are in pageout, we know that memory is already tight, 3316185029Spjd * the arc is already going to be evicting, so we just want to 3317185029Spjd * continue to let page writes occur as quickly as possible. 3318185029Spjd */ 3319185029Spjd if (curproc == pageproc) { 3320185029Spjd if (page_load > available_memory / 4) 3321185029Spjd return (ERESTART); 3322185029Spjd /* Note: reserve is inflated, so we deflate */ 3323185029Spjd page_load += reserve / 8; 3324185029Spjd return (0); 3325185029Spjd } else if (page_load > 0 && arc_reclaim_needed()) { 3326185029Spjd /* memory is low, delay before restarting */ 3327185029Spjd ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3328185029Spjd return (EAGAIN); 3329185029Spjd } 3330185029Spjd page_load = 0; 3331185029Spjd 3332185029Spjd if (arc_size > arc_c_min) { 3333185029Spjd uint64_t evictable_memory = 3334185029Spjd arc_mru->arcs_lsize[ARC_BUFC_DATA] + 3335185029Spjd arc_mru->arcs_lsize[ARC_BUFC_METADATA] + 3336185029Spjd arc_mfu->arcs_lsize[ARC_BUFC_DATA] + 3337185029Spjd arc_mfu->arcs_lsize[ARC_BUFC_METADATA]; 3338185029Spjd available_memory += MIN(evictable_memory, arc_size - arc_c_min); 3339185029Spjd } 3340185029Spjd 3341185029Spjd if (inflight_data > available_memory / 4) { 3342185029Spjd ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3343185029Spjd return (ERESTART); 3344185029Spjd } 3345185029Spjd#endif 3346185029Spjd return (0); 3347185029Spjd} 3348185029Spjd 3349168404Spjdvoid 3350185029Spjdarc_tempreserve_clear(uint64_t reserve) 3351168404Spjd{ 3352185029Spjd atomic_add_64(&arc_tempreserve, -reserve); 3353168404Spjd ASSERT((int64_t)arc_tempreserve >= 0); 3354168404Spjd} 3355168404Spjd 3356168404Spjdint 3357185029Spjdarc_tempreserve_space(uint64_t reserve, uint64_t txg) 3358168404Spjd{ 3359185029Spjd int error; 3360185029Spjd 3361168404Spjd#ifdef ZFS_DEBUG 3362168404Spjd /* 3363168404Spjd * Once in a while, fail for no reason. Everything should cope. 3364168404Spjd */ 3365168404Spjd if (spa_get_random(10000) == 0) { 3366168404Spjd dprintf("forcing random failure\n"); 3367168404Spjd return (ERESTART); 3368168404Spjd } 3369168404Spjd#endif 3370185029Spjd if (reserve > arc_c/4 && !arc_no_grow) 3371185029Spjd arc_c = MIN(arc_c_max, reserve * 4); 3372185029Spjd if (reserve > arc_c) 3373168404Spjd return (ENOMEM); 3374168404Spjd 3375168404Spjd /* 3376185029Spjd * Writes will, almost always, require additional memory allocations 3377185029Spjd * in order to compress/encrypt/etc the data. We therefor need to 3378185029Spjd * make sure that there is sufficient available memory for this. 3379185029Spjd */ 3380185029Spjd if (error = arc_memory_throttle(reserve, txg)) 3381185029Spjd return (error); 3382185029Spjd 3383185029Spjd /* 3384168404Spjd * Throttle writes when the amount of dirty data in the cache 3385168404Spjd * gets too large. We try to keep the cache less than half full 3386168404Spjd * of dirty blocks so that our sync times don't grow too large. 3387168404Spjd * Note: if two requests come in concurrently, we might let them 3388168404Spjd * both succeed, when one of them should fail. Not a huge deal. 3389168404Spjd */ 3390185029Spjd if (reserve + arc_tempreserve + arc_anon->arcs_size > arc_c / 2 && 3391185029Spjd arc_anon->arcs_size > arc_c / 4) { 3392185029Spjd dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " 3393185029Spjd "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", 3394185029Spjd arc_tempreserve>>10, 3395185029Spjd arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10, 3396185029Spjd arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10, 3397185029Spjd reserve>>10, arc_c>>10); 3398168404Spjd return (ERESTART); 3399168404Spjd } 3400185029Spjd atomic_add_64(&arc_tempreserve, reserve); 3401168404Spjd return (0); 3402168404Spjd} 3403168404Spjd 3404168582Spjdstatic kmutex_t arc_lowmem_lock; 3405168404Spjd#ifdef _KERNEL 3406168566Spjdstatic eventhandler_tag arc_event_lowmem = NULL; 3407168404Spjd 3408168404Spjdstatic void 3409168566Spjdarc_lowmem(void *arg __unused, int howto __unused) 3410168404Spjd{ 3411168404Spjd 3412168566Spjd /* Serialize access via arc_lowmem_lock. */ 3413168566Spjd mutex_enter(&arc_lowmem_lock); 3414185029Spjd needfree = 1; 3415168404Spjd cv_signal(&arc_reclaim_thr_cv); 3416185029Spjd while (needfree) 3417185029Spjd tsleep(&needfree, 0, "zfs:lowmem", hz / 5); 3418168566Spjd mutex_exit(&arc_lowmem_lock); 3419168404Spjd} 3420168404Spjd#endif 3421168404Spjd 3422168404Spjdvoid 3423168404Spjdarc_init(void) 3424168404Spjd{ 3425193953Skmacy int prefetch_tunable_set = 0; 3426193953Skmacy 3427168404Spjd mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 3428168404Spjd cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 3429168566Spjd mutex_init(&arc_lowmem_lock, NULL, MUTEX_DEFAULT, NULL); 3430168404Spjd 3431168404Spjd /* Convert seconds to clock ticks */ 3432168404Spjd arc_min_prefetch_lifespan = 1 * hz; 3433168404Spjd 3434168404Spjd /* Start out with 1/8 of all memory */ 3435168566Spjd arc_c = kmem_size() / 8; 3436192360Skmacy#if 0 3437192360Skmacy#ifdef _KERNEL 3438192360Skmacy /* 3439192360Skmacy * On architectures where the physical memory can be larger 3440192360Skmacy * than the addressable space (intel in 32-bit mode), we may 3441192360Skmacy * need to limit the cache to 1/8 of VM size. 3442192360Skmacy */ 3443192360Skmacy arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 3444192360Skmacy#endif 3445192360Skmacy#endif 3446168566Spjd /* set min cache to 1/32 of all memory, or 16MB, whichever is more */ 3447168566Spjd arc_c_min = MAX(arc_c / 4, 64<<18); 3448168566Spjd /* set max to 1/2 of all memory, or all but 1GB, whichever is more */ 3449168404Spjd if (arc_c * 8 >= 1<<30) 3450168404Spjd arc_c_max = (arc_c * 8) - (1<<30); 3451168404Spjd else 3452168404Spjd arc_c_max = arc_c_min; 3453175633Spjd arc_c_max = MAX(arc_c * 5, arc_c_max); 3454168481Spjd#ifdef _KERNEL 3455168404Spjd /* 3456168404Spjd * Allow the tunables to override our calculations if they are 3457168566Spjd * reasonable (ie. over 16MB) 3458168404Spjd */ 3459168566Spjd if (zfs_arc_max >= 64<<18 && zfs_arc_max < kmem_size()) 3460168404Spjd arc_c_max = zfs_arc_max; 3461168566Spjd if (zfs_arc_min >= 64<<18 && zfs_arc_min <= arc_c_max) 3462168404Spjd arc_c_min = zfs_arc_min; 3463168481Spjd#endif 3464168404Spjd arc_c = arc_c_max; 3465168404Spjd arc_p = (arc_c >> 1); 3466168404Spjd 3467185029Spjd /* limit meta-data to 1/4 of the arc capacity */ 3468185029Spjd arc_meta_limit = arc_c_max / 4; 3469185029Spjd 3470185029Spjd /* Allow the tunable to override if it is reasonable */ 3471185029Spjd if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) 3472185029Spjd arc_meta_limit = zfs_arc_meta_limit; 3473185029Spjd 3474185029Spjd if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) 3475185029Spjd arc_c_min = arc_meta_limit / 2; 3476185029Spjd 3477168404Spjd /* if kmem_flags are set, lets try to use less memory */ 3478168404Spjd if (kmem_debugging()) 3479168404Spjd arc_c = arc_c / 2; 3480168404Spjd if (arc_c < arc_c_min) 3481168404Spjd arc_c = arc_c_min; 3482168404Spjd 3483168473Spjd zfs_arc_min = arc_c_min; 3484168473Spjd zfs_arc_max = arc_c_max; 3485168473Spjd 3486168404Spjd arc_anon = &ARC_anon; 3487168404Spjd arc_mru = &ARC_mru; 3488168404Spjd arc_mru_ghost = &ARC_mru_ghost; 3489168404Spjd arc_mfu = &ARC_mfu; 3490168404Spjd arc_mfu_ghost = &ARC_mfu_ghost; 3491185029Spjd arc_l2c_only = &ARC_l2c_only; 3492168404Spjd arc_size = 0; 3493168404Spjd 3494168404Spjd mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3495168404Spjd mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3496168404Spjd mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3497168404Spjd mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3498168404Spjd mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3499185029Spjd mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3500168404Spjd 3501185029Spjd list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA], 3502185029Spjd sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3503185029Spjd list_create(&arc_mru->arcs_list[ARC_BUFC_DATA], 3504185029Spjd sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3505185029Spjd list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA], 3506185029Spjd sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3507185029Spjd list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA], 3508185029Spjd sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3509185029Spjd list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA], 3510185029Spjd sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3511185029Spjd list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA], 3512185029Spjd sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3513185029Spjd list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA], 3514185029Spjd sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3515185029Spjd list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA], 3516185029Spjd sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3517185029Spjd list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA], 3518185029Spjd sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3519185029Spjd list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA], 3520185029Spjd sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3521168404Spjd 3522168404Spjd buf_init(); 3523168404Spjd 3524168404Spjd arc_thread_exit = 0; 3525168404Spjd arc_eviction_list = NULL; 3526168404Spjd mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 3527168404Spjd bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 3528168404Spjd 3529168404Spjd arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 3530168404Spjd sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 3531168404Spjd 3532168404Spjd if (arc_ksp != NULL) { 3533168404Spjd arc_ksp->ks_data = &arc_stats; 3534168404Spjd kstat_install(arc_ksp); 3535168404Spjd } 3536168404Spjd 3537168404Spjd (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 3538168404Spjd TS_RUN, minclsyspri); 3539168404Spjd 3540168404Spjd#ifdef _KERNEL 3541168566Spjd arc_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem, arc_lowmem, NULL, 3542168404Spjd EVENTHANDLER_PRI_FIRST); 3543168404Spjd#endif 3544168404Spjd 3545168404Spjd arc_dead = FALSE; 3546185029Spjd arc_warm = B_FALSE; 3547168566Spjd 3548185029Spjd if (zfs_write_limit_max == 0) 3549185029Spjd zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift; 3550185029Spjd else 3551185029Spjd zfs_write_limit_shift = 0; 3552185029Spjd mutex_init(&zfs_write_limit_lock, NULL, MUTEX_DEFAULT, NULL); 3553185029Spjd 3554168566Spjd#ifdef _KERNEL 3555194043Skmacy if (TUNABLE_INT_FETCH("vfs.zfs.prefetch_disable", &zfs_prefetch_disable)) 3556193953Skmacy prefetch_tunable_set = 1; 3557193953Skmacy 3558193878Skmacy#ifdef __i386__ 3559193953Skmacy if (prefetch_tunable_set == 0) { 3560196863Strasz printf("ZFS NOTICE: Prefetch is disabled by default on i386 " 3561196863Strasz "-- to enable,\n"); 3562196863Strasz printf(" add \"vfs.zfs.prefetch_disable=0\" " 3563196863Strasz "to /boot/loader.conf.\n"); 3564194043Skmacy zfs_prefetch_disable=1; 3565193878Skmacy } 3566193953Skmacy#else 3567193878Skmacy if ((((uint64_t)physmem * PAGESIZE) < (1ULL << 32)) && 3568193953Skmacy prefetch_tunable_set == 0) { 3569196863Strasz printf("ZFS NOTICE: Prefetch is disabled by default if less " 3570196941Strasz "than 4GB of RAM is present;\n" 3571196863Strasz " to enable, add \"vfs.zfs.prefetch_disable=0\" " 3572196863Strasz "to /boot/loader.conf.\n"); 3573194043Skmacy zfs_prefetch_disable=1; 3574193878Skmacy } 3575193953Skmacy#endif 3576175633Spjd /* Warn about ZFS memory and address space requirements. */ 3577168696Spjd if (((uint64_t)physmem * PAGESIZE) < (256 + 128 + 64) * (1 << 20)) { 3578168987Sbmah printf("ZFS WARNING: Recommended minimum RAM size is 512MB; " 3579168987Sbmah "expect unstable behavior.\n"); 3580175633Spjd } 3581175633Spjd if (kmem_size() < 512 * (1 << 20)) { 3582173419Spjd printf("ZFS WARNING: Recommended minimum kmem_size is 512MB; " 3583168987Sbmah "expect unstable behavior.\n"); 3584185029Spjd printf(" Consider tuning vm.kmem_size and " 3585173419Spjd "vm.kmem_size_max\n"); 3586185029Spjd printf(" in /boot/loader.conf.\n"); 3587168566Spjd } 3588168566Spjd#endif 3589168404Spjd} 3590168404Spjd 3591168404Spjdvoid 3592168404Spjdarc_fini(void) 3593168404Spjd{ 3594185029Spjd 3595168404Spjd mutex_enter(&arc_reclaim_thr_lock); 3596168404Spjd arc_thread_exit = 1; 3597168404Spjd cv_signal(&arc_reclaim_thr_cv); 3598168404Spjd while (arc_thread_exit != 0) 3599168404Spjd cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 3600168404Spjd mutex_exit(&arc_reclaim_thr_lock); 3601168404Spjd 3602185029Spjd arc_flush(NULL); 3603168404Spjd 3604168404Spjd arc_dead = TRUE; 3605168404Spjd 3606168404Spjd if (arc_ksp != NULL) { 3607168404Spjd kstat_delete(arc_ksp); 3608168404Spjd arc_ksp = NULL; 3609168404Spjd } 3610168404Spjd 3611168404Spjd mutex_destroy(&arc_eviction_mtx); 3612168404Spjd mutex_destroy(&arc_reclaim_thr_lock); 3613168404Spjd cv_destroy(&arc_reclaim_thr_cv); 3614168404Spjd 3615185029Spjd list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]); 3616185029Spjd list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); 3617185029Spjd list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]); 3618185029Spjd list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); 3619185029Spjd list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]); 3620185029Spjd list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); 3621185029Spjd list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]); 3622185029Spjd list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); 3623168404Spjd 3624168404Spjd mutex_destroy(&arc_anon->arcs_mtx); 3625168404Spjd mutex_destroy(&arc_mru->arcs_mtx); 3626168404Spjd mutex_destroy(&arc_mru_ghost->arcs_mtx); 3627168404Spjd mutex_destroy(&arc_mfu->arcs_mtx); 3628168404Spjd mutex_destroy(&arc_mfu_ghost->arcs_mtx); 3629168404Spjd 3630185029Spjd mutex_destroy(&zfs_write_limit_lock); 3631185029Spjd 3632168404Spjd buf_fini(); 3633168404Spjd 3634168582Spjd mutex_destroy(&arc_lowmem_lock); 3635168404Spjd#ifdef _KERNEL 3636168566Spjd if (arc_event_lowmem != NULL) 3637168566Spjd EVENTHANDLER_DEREGISTER(vm_lowmem, arc_event_lowmem); 3638168404Spjd#endif 3639168404Spjd} 3640185029Spjd 3641185029Spjd/* 3642185029Spjd * Level 2 ARC 3643185029Spjd * 3644185029Spjd * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. 3645185029Spjd * It uses dedicated storage devices to hold cached data, which are populated 3646185029Spjd * using large infrequent writes. The main role of this cache is to boost 3647185029Spjd * the performance of random read workloads. The intended L2ARC devices 3648185029Spjd * include short-stroked disks, solid state disks, and other media with 3649185029Spjd * substantially faster read latency than disk. 3650185029Spjd * 3651185029Spjd * +-----------------------+ 3652185029Spjd * | ARC | 3653185029Spjd * +-----------------------+ 3654185029Spjd * | ^ ^ 3655185029Spjd * | | | 3656185029Spjd * l2arc_feed_thread() arc_read() 3657185029Spjd * | | | 3658185029Spjd * | l2arc read | 3659185029Spjd * V | | 3660185029Spjd * +---------------+ | 3661185029Spjd * | L2ARC | | 3662185029Spjd * +---------------+ | 3663185029Spjd * | ^ | 3664185029Spjd * l2arc_write() | | 3665185029Spjd * | | | 3666185029Spjd * V | | 3667185029Spjd * +-------+ +-------+ 3668185029Spjd * | vdev | | vdev | 3669185029Spjd * | cache | | cache | 3670185029Spjd * +-------+ +-------+ 3671185029Spjd * +=========+ .-----. 3672185029Spjd * : L2ARC : |-_____-| 3673185029Spjd * : devices : | Disks | 3674185029Spjd * +=========+ `-_____-' 3675185029Spjd * 3676185029Spjd * Read requests are satisfied from the following sources, in order: 3677185029Spjd * 3678185029Spjd * 1) ARC 3679185029Spjd * 2) vdev cache of L2ARC devices 3680185029Spjd * 3) L2ARC devices 3681185029Spjd * 4) vdev cache of disks 3682185029Spjd * 5) disks 3683185029Spjd * 3684185029Spjd * Some L2ARC device types exhibit extremely slow write performance. 3685185029Spjd * To accommodate for this there are some significant differences between 3686185029Spjd * the L2ARC and traditional cache design: 3687185029Spjd * 3688185029Spjd * 1. There is no eviction path from the ARC to the L2ARC. Evictions from 3689185029Spjd * the ARC behave as usual, freeing buffers and placing headers on ghost 3690185029Spjd * lists. The ARC does not send buffers to the L2ARC during eviction as 3691185029Spjd * this would add inflated write latencies for all ARC memory pressure. 3692185029Spjd * 3693185029Spjd * 2. The L2ARC attempts to cache data from the ARC before it is evicted. 3694185029Spjd * It does this by periodically scanning buffers from the eviction-end of 3695185029Spjd * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are 3696185029Spjd * not already there. It scans until a headroom of buffers is satisfied, 3697185029Spjd * which itself is a buffer for ARC eviction. The thread that does this is 3698185029Spjd * l2arc_feed_thread(), illustrated below; example sizes are included to 3699185029Spjd * provide a better sense of ratio than this diagram: 3700185029Spjd * 3701185029Spjd * head --> tail 3702185029Spjd * +---------------------+----------+ 3703185029Spjd * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC 3704185029Spjd * +---------------------+----------+ | o L2ARC eligible 3705185029Spjd * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer 3706185029Spjd * +---------------------+----------+ | 3707185029Spjd * 15.9 Gbytes ^ 32 Mbytes | 3708185029Spjd * headroom | 3709185029Spjd * l2arc_feed_thread() 3710185029Spjd * | 3711185029Spjd * l2arc write hand <--[oooo]--' 3712185029Spjd * | 8 Mbyte 3713185029Spjd * | write max 3714185029Spjd * V 3715185029Spjd * +==============================+ 3716185029Spjd * L2ARC dev |####|#|###|###| |####| ... | 3717185029Spjd * +==============================+ 3718185029Spjd * 32 Gbytes 3719185029Spjd * 3720185029Spjd * 3. If an ARC buffer is copied to the L2ARC but then hit instead of 3721185029Spjd * evicted, then the L2ARC has cached a buffer much sooner than it probably 3722185029Spjd * needed to, potentially wasting L2ARC device bandwidth and storage. It is 3723185029Spjd * safe to say that this is an uncommon case, since buffers at the end of 3724185029Spjd * the ARC lists have moved there due to inactivity. 3725185029Spjd * 3726185029Spjd * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, 3727185029Spjd * then the L2ARC simply misses copying some buffers. This serves as a 3728185029Spjd * pressure valve to prevent heavy read workloads from both stalling the ARC 3729185029Spjd * with waits and clogging the L2ARC with writes. This also helps prevent 3730185029Spjd * the potential for the L2ARC to churn if it attempts to cache content too 3731185029Spjd * quickly, such as during backups of the entire pool. 3732185029Spjd * 3733185029Spjd * 5. After system boot and before the ARC has filled main memory, there are 3734185029Spjd * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru 3735185029Spjd * lists can remain mostly static. Instead of searching from tail of these 3736185029Spjd * lists as pictured, the l2arc_feed_thread() will search from the list heads 3737185029Spjd * for eligible buffers, greatly increasing its chance of finding them. 3738185029Spjd * 3739185029Spjd * The L2ARC device write speed is also boosted during this time so that 3740185029Spjd * the L2ARC warms up faster. Since there have been no ARC evictions yet, 3741185029Spjd * there are no L2ARC reads, and no fear of degrading read performance 3742185029Spjd * through increased writes. 3743185029Spjd * 3744185029Spjd * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that 3745185029Spjd * the vdev queue can aggregate them into larger and fewer writes. Each 3746185029Spjd * device is written to in a rotor fashion, sweeping writes through 3747185029Spjd * available space then repeating. 3748185029Spjd * 3749185029Spjd * 7. The L2ARC does not store dirty content. It never needs to flush 3750185029Spjd * write buffers back to disk based storage. 3751185029Spjd * 3752185029Spjd * 8. If an ARC buffer is written (and dirtied) which also exists in the 3753185029Spjd * L2ARC, the now stale L2ARC buffer is immediately dropped. 3754185029Spjd * 3755185029Spjd * The performance of the L2ARC can be tweaked by a number of tunables, which 3756185029Spjd * may be necessary for different workloads: 3757185029Spjd * 3758185029Spjd * l2arc_write_max max write bytes per interval 3759185029Spjd * l2arc_write_boost extra write bytes during device warmup 3760185029Spjd * l2arc_noprefetch skip caching prefetched buffers 3761185029Spjd * l2arc_headroom number of max device writes to precache 3762185029Spjd * l2arc_feed_secs seconds between L2ARC writing 3763185029Spjd * 3764185029Spjd * Tunables may be removed or added as future performance improvements are 3765185029Spjd * integrated, and also may become zpool properties. 3766185029Spjd */ 3767185029Spjd 3768185029Spjdstatic void 3769185029Spjdl2arc_hdr_stat_add(void) 3770185029Spjd{ 3771185029Spjd ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE); 3772185029Spjd ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); 3773185029Spjd} 3774185029Spjd 3775185029Spjdstatic void 3776185029Spjdl2arc_hdr_stat_remove(void) 3777185029Spjd{ 3778185029Spjd ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE)); 3779185029Spjd ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); 3780185029Spjd} 3781185029Spjd 3782185029Spjd/* 3783185029Spjd * Cycle through L2ARC devices. This is how L2ARC load balances. 3784185029Spjd * If a device is returned, this also returns holding the spa config lock. 3785185029Spjd */ 3786185029Spjdstatic l2arc_dev_t * 3787185029Spjdl2arc_dev_get_next(void) 3788185029Spjd{ 3789185029Spjd l2arc_dev_t *first, *next = NULL; 3790185029Spjd 3791185029Spjd /* 3792185029Spjd * Lock out the removal of spas (spa_namespace_lock), then removal 3793185029Spjd * of cache devices (l2arc_dev_mtx). Once a device has been selected, 3794185029Spjd * both locks will be dropped and a spa config lock held instead. 3795185029Spjd */ 3796185029Spjd mutex_enter(&spa_namespace_lock); 3797185029Spjd mutex_enter(&l2arc_dev_mtx); 3798185029Spjd 3799185029Spjd /* if there are no vdevs, there is nothing to do */ 3800185029Spjd if (l2arc_ndev == 0) 3801185029Spjd goto out; 3802185029Spjd 3803185029Spjd first = NULL; 3804185029Spjd next = l2arc_dev_last; 3805185029Spjd do { 3806185029Spjd /* loop around the list looking for a non-faulted vdev */ 3807185029Spjd if (next == NULL) { 3808185029Spjd next = list_head(l2arc_dev_list); 3809185029Spjd } else { 3810185029Spjd next = list_next(l2arc_dev_list, next); 3811185029Spjd if (next == NULL) 3812185029Spjd next = list_head(l2arc_dev_list); 3813185029Spjd } 3814185029Spjd 3815185029Spjd /* if we have come back to the start, bail out */ 3816185029Spjd if (first == NULL) 3817185029Spjd first = next; 3818185029Spjd else if (next == first) 3819185029Spjd break; 3820185029Spjd 3821185029Spjd } while (vdev_is_dead(next->l2ad_vdev)); 3822185029Spjd 3823185029Spjd /* if we were unable to find any usable vdevs, return NULL */ 3824185029Spjd if (vdev_is_dead(next->l2ad_vdev)) 3825185029Spjd next = NULL; 3826185029Spjd 3827185029Spjd l2arc_dev_last = next; 3828185029Spjd 3829185029Spjdout: 3830185029Spjd mutex_exit(&l2arc_dev_mtx); 3831185029Spjd 3832185029Spjd /* 3833185029Spjd * Grab the config lock to prevent the 'next' device from being 3834185029Spjd * removed while we are writing to it. 3835185029Spjd */ 3836185029Spjd if (next != NULL) 3837185029Spjd spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER); 3838185029Spjd mutex_exit(&spa_namespace_lock); 3839185029Spjd 3840185029Spjd return (next); 3841185029Spjd} 3842185029Spjd 3843185029Spjd/* 3844185029Spjd * Free buffers that were tagged for destruction. 3845185029Spjd */ 3846185029Spjdstatic void 3847185029Spjdl2arc_do_free_on_write() 3848185029Spjd{ 3849185029Spjd list_t *buflist; 3850185029Spjd l2arc_data_free_t *df, *df_prev; 3851185029Spjd 3852185029Spjd mutex_enter(&l2arc_free_on_write_mtx); 3853185029Spjd buflist = l2arc_free_on_write; 3854185029Spjd 3855185029Spjd for (df = list_tail(buflist); df; df = df_prev) { 3856185029Spjd df_prev = list_prev(buflist, df); 3857185029Spjd ASSERT(df->l2df_data != NULL); 3858185029Spjd ASSERT(df->l2df_func != NULL); 3859185029Spjd df->l2df_func(df->l2df_data, df->l2df_size); 3860185029Spjd list_remove(buflist, df); 3861185029Spjd kmem_free(df, sizeof (l2arc_data_free_t)); 3862185029Spjd } 3863185029Spjd 3864185029Spjd mutex_exit(&l2arc_free_on_write_mtx); 3865185029Spjd} 3866185029Spjd 3867185029Spjd/* 3868185029Spjd * A write to a cache device has completed. Update all headers to allow 3869185029Spjd * reads from these buffers to begin. 3870185029Spjd */ 3871185029Spjdstatic void 3872185029Spjdl2arc_write_done(zio_t *zio) 3873185029Spjd{ 3874185029Spjd l2arc_write_callback_t *cb; 3875185029Spjd l2arc_dev_t *dev; 3876185029Spjd list_t *buflist; 3877185029Spjd arc_buf_hdr_t *head, *ab, *ab_prev; 3878185029Spjd l2arc_buf_hdr_t *abl2; 3879185029Spjd kmutex_t *hash_lock; 3880185029Spjd 3881185029Spjd cb = zio->io_private; 3882185029Spjd ASSERT(cb != NULL); 3883185029Spjd dev = cb->l2wcb_dev; 3884185029Spjd ASSERT(dev != NULL); 3885185029Spjd head = cb->l2wcb_head; 3886185029Spjd ASSERT(head != NULL); 3887185029Spjd buflist = dev->l2ad_buflist; 3888185029Spjd ASSERT(buflist != NULL); 3889185029Spjd DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, 3890185029Spjd l2arc_write_callback_t *, cb); 3891185029Spjd 3892185029Spjd if (zio->io_error != 0) 3893185029Spjd ARCSTAT_BUMP(arcstat_l2_writes_error); 3894185029Spjd 3895185029Spjd mutex_enter(&l2arc_buflist_mtx); 3896185029Spjd 3897185029Spjd /* 3898185029Spjd * All writes completed, or an error was hit. 3899185029Spjd */ 3900185029Spjd for (ab = list_prev(buflist, head); ab; ab = ab_prev) { 3901185029Spjd ab_prev = list_prev(buflist, ab); 3902185029Spjd 3903185029Spjd hash_lock = HDR_LOCK(ab); 3904185029Spjd if (!mutex_tryenter(hash_lock)) { 3905185029Spjd /* 3906185029Spjd * This buffer misses out. It may be in a stage 3907185029Spjd * of eviction. Its ARC_L2_WRITING flag will be 3908185029Spjd * left set, denying reads to this buffer. 3909185029Spjd */ 3910185029Spjd ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss); 3911185029Spjd continue; 3912185029Spjd } 3913185029Spjd 3914185029Spjd if (zio->io_error != 0) { 3915185029Spjd /* 3916185029Spjd * Error - drop L2ARC entry. 3917185029Spjd */ 3918185029Spjd list_remove(buflist, ab); 3919185029Spjd abl2 = ab->b_l2hdr; 3920185029Spjd ab->b_l2hdr = NULL; 3921185029Spjd kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 3922185029Spjd ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 3923185029Spjd } 3924185029Spjd 3925185029Spjd /* 3926185029Spjd * Allow ARC to begin reads to this L2ARC entry. 3927185029Spjd */ 3928185029Spjd ab->b_flags &= ~ARC_L2_WRITING; 3929185029Spjd 3930185029Spjd mutex_exit(hash_lock); 3931185029Spjd } 3932185029Spjd 3933185029Spjd atomic_inc_64(&l2arc_writes_done); 3934185029Spjd list_remove(buflist, head); 3935185029Spjd kmem_cache_free(hdr_cache, head); 3936185029Spjd mutex_exit(&l2arc_buflist_mtx); 3937185029Spjd 3938185029Spjd l2arc_do_free_on_write(); 3939185029Spjd 3940185029Spjd kmem_free(cb, sizeof (l2arc_write_callback_t)); 3941185029Spjd} 3942185029Spjd 3943185029Spjd/* 3944185029Spjd * A read to a cache device completed. Validate buffer contents before 3945185029Spjd * handing over to the regular ARC routines. 3946185029Spjd */ 3947185029Spjdstatic void 3948185029Spjdl2arc_read_done(zio_t *zio) 3949185029Spjd{ 3950185029Spjd l2arc_read_callback_t *cb; 3951185029Spjd arc_buf_hdr_t *hdr; 3952185029Spjd arc_buf_t *buf; 3953185029Spjd kmutex_t *hash_lock; 3954185029Spjd int equal; 3955185029Spjd 3956185029Spjd ASSERT(zio->io_vd != NULL); 3957185029Spjd ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE); 3958185029Spjd 3959185029Spjd spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd); 3960185029Spjd 3961185029Spjd cb = zio->io_private; 3962185029Spjd ASSERT(cb != NULL); 3963185029Spjd buf = cb->l2rcb_buf; 3964185029Spjd ASSERT(buf != NULL); 3965185029Spjd hdr = buf->b_hdr; 3966185029Spjd ASSERT(hdr != NULL); 3967185029Spjd 3968185029Spjd hash_lock = HDR_LOCK(hdr); 3969185029Spjd mutex_enter(hash_lock); 3970185029Spjd 3971185029Spjd /* 3972185029Spjd * Check this survived the L2ARC journey. 3973185029Spjd */ 3974185029Spjd equal = arc_cksum_equal(buf); 3975185029Spjd if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { 3976185029Spjd mutex_exit(hash_lock); 3977185029Spjd zio->io_private = buf; 3978185029Spjd zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */ 3979185029Spjd zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */ 3980185029Spjd arc_read_done(zio); 3981185029Spjd } else { 3982185029Spjd mutex_exit(hash_lock); 3983185029Spjd /* 3984185029Spjd * Buffer didn't survive caching. Increment stats and 3985185029Spjd * reissue to the original storage device. 3986185029Spjd */ 3987185029Spjd if (zio->io_error != 0) { 3988185029Spjd ARCSTAT_BUMP(arcstat_l2_io_error); 3989185029Spjd } else { 3990185029Spjd zio->io_error = EIO; 3991185029Spjd } 3992185029Spjd if (!equal) 3993185029Spjd ARCSTAT_BUMP(arcstat_l2_cksum_bad); 3994185029Spjd 3995185029Spjd /* 3996185029Spjd * If there's no waiter, issue an async i/o to the primary 3997185029Spjd * storage now. If there *is* a waiter, the caller must 3998185029Spjd * issue the i/o in a context where it's OK to block. 3999185029Spjd */ 4000185029Spjd if (zio->io_waiter == NULL) 4001185029Spjd zio_nowait(zio_read(zio->io_parent, 4002185029Spjd cb->l2rcb_spa, &cb->l2rcb_bp, 4003185029Spjd buf->b_data, zio->io_size, arc_read_done, buf, 4004185029Spjd zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb)); 4005185029Spjd } 4006185029Spjd 4007185029Spjd kmem_free(cb, sizeof (l2arc_read_callback_t)); 4008185029Spjd} 4009185029Spjd 4010185029Spjd/* 4011185029Spjd * This is the list priority from which the L2ARC will search for pages to 4012185029Spjd * cache. This is used within loops (0..3) to cycle through lists in the 4013185029Spjd * desired order. This order can have a significant effect on cache 4014185029Spjd * performance. 4015185029Spjd * 4016185029Spjd * Currently the metadata lists are hit first, MFU then MRU, followed by 4017185029Spjd * the data lists. This function returns a locked list, and also returns 4018185029Spjd * the lock pointer. 4019185029Spjd */ 4020185029Spjdstatic list_t * 4021185029Spjdl2arc_list_locked(int list_num, kmutex_t **lock) 4022185029Spjd{ 4023185029Spjd list_t *list; 4024185029Spjd 4025185029Spjd ASSERT(list_num >= 0 && list_num <= 3); 4026185029Spjd 4027185029Spjd switch (list_num) { 4028185029Spjd case 0: 4029185029Spjd list = &arc_mfu->arcs_list[ARC_BUFC_METADATA]; 4030185029Spjd *lock = &arc_mfu->arcs_mtx; 4031185029Spjd break; 4032185029Spjd case 1: 4033185029Spjd list = &arc_mru->arcs_list[ARC_BUFC_METADATA]; 4034185029Spjd *lock = &arc_mru->arcs_mtx; 4035185029Spjd break; 4036185029Spjd case 2: 4037185029Spjd list = &arc_mfu->arcs_list[ARC_BUFC_DATA]; 4038185029Spjd *lock = &arc_mfu->arcs_mtx; 4039185029Spjd break; 4040185029Spjd case 3: 4041185029Spjd list = &arc_mru->arcs_list[ARC_BUFC_DATA]; 4042185029Spjd *lock = &arc_mru->arcs_mtx; 4043185029Spjd break; 4044185029Spjd } 4045185029Spjd 4046185029Spjd ASSERT(!(MUTEX_HELD(*lock))); 4047185029Spjd mutex_enter(*lock); 4048185029Spjd return (list); 4049185029Spjd} 4050185029Spjd 4051185029Spjd/* 4052185029Spjd * Evict buffers from the device write hand to the distance specified in 4053185029Spjd * bytes. This distance may span populated buffers, it may span nothing. 4054185029Spjd * This is clearing a region on the L2ARC device ready for writing. 4055185029Spjd * If the 'all' boolean is set, every buffer is evicted. 4056185029Spjd */ 4057185029Spjdstatic void 4058185029Spjdl2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) 4059185029Spjd{ 4060185029Spjd list_t *buflist; 4061185029Spjd l2arc_buf_hdr_t *abl2; 4062185029Spjd arc_buf_hdr_t *ab, *ab_prev; 4063185029Spjd kmutex_t *hash_lock; 4064185029Spjd uint64_t taddr; 4065185029Spjd 4066185029Spjd buflist = dev->l2ad_buflist; 4067185029Spjd 4068185029Spjd if (buflist == NULL) 4069185029Spjd return; 4070185029Spjd 4071185029Spjd if (!all && dev->l2ad_first) { 4072185029Spjd /* 4073185029Spjd * This is the first sweep through the device. There is 4074185029Spjd * nothing to evict. 4075185029Spjd */ 4076185029Spjd return; 4077185029Spjd } 4078185029Spjd 4079185029Spjd if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) { 4080185029Spjd /* 4081185029Spjd * When nearing the end of the device, evict to the end 4082185029Spjd * before the device write hand jumps to the start. 4083185029Spjd */ 4084185029Spjd taddr = dev->l2ad_end; 4085185029Spjd } else { 4086185029Spjd taddr = dev->l2ad_hand + distance; 4087185029Spjd } 4088185029Spjd DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, 4089185029Spjd uint64_t, taddr, boolean_t, all); 4090185029Spjd 4091185029Spjdtop: 4092185029Spjd mutex_enter(&l2arc_buflist_mtx); 4093185029Spjd for (ab = list_tail(buflist); ab; ab = ab_prev) { 4094185029Spjd ab_prev = list_prev(buflist, ab); 4095185029Spjd 4096185029Spjd hash_lock = HDR_LOCK(ab); 4097185029Spjd if (!mutex_tryenter(hash_lock)) { 4098185029Spjd /* 4099185029Spjd * Missed the hash lock. Retry. 4100185029Spjd */ 4101185029Spjd ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); 4102185029Spjd mutex_exit(&l2arc_buflist_mtx); 4103185029Spjd mutex_enter(hash_lock); 4104185029Spjd mutex_exit(hash_lock); 4105185029Spjd goto top; 4106185029Spjd } 4107185029Spjd 4108185029Spjd if (HDR_L2_WRITE_HEAD(ab)) { 4109185029Spjd /* 4110185029Spjd * We hit a write head node. Leave it for 4111185029Spjd * l2arc_write_done(). 4112185029Spjd */ 4113185029Spjd list_remove(buflist, ab); 4114185029Spjd mutex_exit(hash_lock); 4115185029Spjd continue; 4116185029Spjd } 4117185029Spjd 4118185029Spjd if (!all && ab->b_l2hdr != NULL && 4119185029Spjd (ab->b_l2hdr->b_daddr > taddr || 4120185029Spjd ab->b_l2hdr->b_daddr < dev->l2ad_hand)) { 4121185029Spjd /* 4122185029Spjd * We've evicted to the target address, 4123185029Spjd * or the end of the device. 4124185029Spjd */ 4125185029Spjd mutex_exit(hash_lock); 4126185029Spjd break; 4127185029Spjd } 4128185029Spjd 4129185029Spjd if (HDR_FREE_IN_PROGRESS(ab)) { 4130185029Spjd /* 4131185029Spjd * Already on the path to destruction. 4132185029Spjd */ 4133185029Spjd mutex_exit(hash_lock); 4134185029Spjd continue; 4135185029Spjd } 4136185029Spjd 4137185029Spjd if (ab->b_state == arc_l2c_only) { 4138185029Spjd ASSERT(!HDR_L2_READING(ab)); 4139185029Spjd /* 4140185029Spjd * This doesn't exist in the ARC. Destroy. 4141185029Spjd * arc_hdr_destroy() will call list_remove() 4142185029Spjd * and decrement arcstat_l2_size. 4143185029Spjd */ 4144185029Spjd arc_change_state(arc_anon, ab, hash_lock); 4145185029Spjd arc_hdr_destroy(ab); 4146185029Spjd } else { 4147185029Spjd /* 4148185029Spjd * Invalidate issued or about to be issued 4149185029Spjd * reads, since we may be about to write 4150185029Spjd * over this location. 4151185029Spjd */ 4152185029Spjd if (HDR_L2_READING(ab)) { 4153185029Spjd ARCSTAT_BUMP(arcstat_l2_evict_reading); 4154185029Spjd ab->b_flags |= ARC_L2_EVICTED; 4155185029Spjd } 4156185029Spjd 4157185029Spjd /* 4158185029Spjd * Tell ARC this no longer exists in L2ARC. 4159185029Spjd */ 4160185029Spjd if (ab->b_l2hdr != NULL) { 4161185029Spjd abl2 = ab->b_l2hdr; 4162185029Spjd ab->b_l2hdr = NULL; 4163185029Spjd kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 4164185029Spjd ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 4165185029Spjd } 4166185029Spjd list_remove(buflist, ab); 4167185029Spjd 4168185029Spjd /* 4169185029Spjd * This may have been leftover after a 4170185029Spjd * failed write. 4171185029Spjd */ 4172185029Spjd ab->b_flags &= ~ARC_L2_WRITING; 4173185029Spjd } 4174185029Spjd mutex_exit(hash_lock); 4175185029Spjd } 4176185029Spjd mutex_exit(&l2arc_buflist_mtx); 4177185029Spjd 4178185029Spjd spa_l2cache_space_update(dev->l2ad_vdev, 0, -(taddr - dev->l2ad_evict)); 4179185029Spjd dev->l2ad_evict = taddr; 4180185029Spjd} 4181185029Spjd 4182185029Spjd/* 4183185029Spjd * Find and write ARC buffers to the L2ARC device. 4184185029Spjd * 4185185029Spjd * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid 4186185029Spjd * for reading until they have completed writing. 4187185029Spjd */ 4188185029Spjdstatic void 4189185029Spjdl2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz) 4190185029Spjd{ 4191185029Spjd arc_buf_hdr_t *ab, *ab_prev, *head; 4192185029Spjd l2arc_buf_hdr_t *hdrl2; 4193185029Spjd list_t *list; 4194185029Spjd uint64_t passed_sz, write_sz, buf_sz, headroom; 4195185029Spjd void *buf_data; 4196185029Spjd kmutex_t *hash_lock, *list_lock; 4197185029Spjd boolean_t have_lock, full; 4198185029Spjd l2arc_write_callback_t *cb; 4199185029Spjd zio_t *pio, *wzio; 4200185029Spjd int try; 4201185029Spjd 4202185029Spjd ASSERT(dev->l2ad_vdev != NULL); 4203185029Spjd 4204185029Spjd pio = NULL; 4205185029Spjd write_sz = 0; 4206185029Spjd full = B_FALSE; 4207185029Spjd head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 4208185029Spjd head->b_flags |= ARC_L2_WRITE_HEAD; 4209185029Spjd 4210185029Spjd /* 4211185029Spjd * Copy buffers for L2ARC writing. 4212185029Spjd */ 4213185029Spjd mutex_enter(&l2arc_buflist_mtx); 4214185029Spjd for (try = 0; try <= 3; try++) { 4215185029Spjd list = l2arc_list_locked(try, &list_lock); 4216185029Spjd passed_sz = 0; 4217185029Spjd 4218185029Spjd /* 4219185029Spjd * L2ARC fast warmup. 4220185029Spjd * 4221185029Spjd * Until the ARC is warm and starts to evict, read from the 4222185029Spjd * head of the ARC lists rather than the tail. 4223185029Spjd */ 4224185029Spjd headroom = target_sz * l2arc_headroom; 4225185029Spjd if (arc_warm == B_FALSE) 4226185029Spjd ab = list_head(list); 4227185029Spjd else 4228185029Spjd ab = list_tail(list); 4229185029Spjd 4230185029Spjd for (; ab; ab = ab_prev) { 4231185029Spjd if (arc_warm == B_FALSE) 4232185029Spjd ab_prev = list_next(list, ab); 4233185029Spjd else 4234185029Spjd ab_prev = list_prev(list, ab); 4235185029Spjd 4236185029Spjd hash_lock = HDR_LOCK(ab); 4237185029Spjd have_lock = MUTEX_HELD(hash_lock); 4238185029Spjd if (!have_lock && !mutex_tryenter(hash_lock)) { 4239185029Spjd /* 4240185029Spjd * Skip this buffer rather than waiting. 4241185029Spjd */ 4242185029Spjd continue; 4243185029Spjd } 4244185029Spjd 4245185029Spjd passed_sz += ab->b_size; 4246185029Spjd if (passed_sz > headroom) { 4247185029Spjd /* 4248185029Spjd * Searched too far. 4249185029Spjd */ 4250185029Spjd mutex_exit(hash_lock); 4251185029Spjd break; 4252185029Spjd } 4253185029Spjd 4254185029Spjd if (ab->b_spa != spa) { 4255185029Spjd mutex_exit(hash_lock); 4256185029Spjd continue; 4257185029Spjd } 4258185029Spjd 4259185029Spjd if (ab->b_l2hdr != NULL) { 4260185029Spjd /* 4261185029Spjd * Already in L2ARC. 4262185029Spjd */ 4263185029Spjd mutex_exit(hash_lock); 4264185029Spjd continue; 4265185029Spjd } 4266185029Spjd 4267185029Spjd if (HDR_IO_IN_PROGRESS(ab) || !HDR_L2CACHE(ab)) { 4268185029Spjd mutex_exit(hash_lock); 4269185029Spjd continue; 4270185029Spjd } 4271185029Spjd 4272185029Spjd if ((write_sz + ab->b_size) > target_sz) { 4273185029Spjd full = B_TRUE; 4274185029Spjd mutex_exit(hash_lock); 4275185029Spjd break; 4276185029Spjd } 4277185029Spjd 4278185029Spjd if (ab->b_buf == NULL) { 4279185029Spjd DTRACE_PROBE1(l2arc__buf__null, void *, ab); 4280185029Spjd mutex_exit(hash_lock); 4281185029Spjd continue; 4282185029Spjd } 4283185029Spjd 4284185029Spjd if (pio == NULL) { 4285185029Spjd /* 4286185029Spjd * Insert a dummy header on the buflist so 4287185029Spjd * l2arc_write_done() can find where the 4288185029Spjd * write buffers begin without searching. 4289185029Spjd */ 4290185029Spjd list_insert_head(dev->l2ad_buflist, head); 4291185029Spjd 4292185029Spjd cb = kmem_alloc( 4293185029Spjd sizeof (l2arc_write_callback_t), KM_SLEEP); 4294185029Spjd cb->l2wcb_dev = dev; 4295185029Spjd cb->l2wcb_head = head; 4296185029Spjd pio = zio_root(spa, l2arc_write_done, cb, 4297185029Spjd ZIO_FLAG_CANFAIL); 4298185029Spjd } 4299185029Spjd 4300185029Spjd /* 4301185029Spjd * Create and add a new L2ARC header. 4302185029Spjd */ 4303185029Spjd hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP); 4304185029Spjd hdrl2->b_dev = dev; 4305185029Spjd hdrl2->b_daddr = dev->l2ad_hand; 4306185029Spjd 4307185029Spjd ab->b_flags |= ARC_L2_WRITING; 4308185029Spjd ab->b_l2hdr = hdrl2; 4309185029Spjd list_insert_head(dev->l2ad_buflist, ab); 4310185029Spjd buf_data = ab->b_buf->b_data; 4311185029Spjd buf_sz = ab->b_size; 4312185029Spjd 4313185029Spjd /* 4314185029Spjd * Compute and store the buffer cksum before 4315185029Spjd * writing. On debug the cksum is verified first. 4316185029Spjd */ 4317185029Spjd arc_cksum_verify(ab->b_buf); 4318185029Spjd arc_cksum_compute(ab->b_buf, B_TRUE); 4319185029Spjd 4320185029Spjd mutex_exit(hash_lock); 4321185029Spjd 4322185029Spjd wzio = zio_write_phys(pio, dev->l2ad_vdev, 4323185029Spjd dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF, 4324185029Spjd NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, 4325185029Spjd ZIO_FLAG_CANFAIL, B_FALSE); 4326185029Spjd 4327185029Spjd DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, 4328185029Spjd zio_t *, wzio); 4329185029Spjd (void) zio_nowait(wzio); 4330185029Spjd 4331185029Spjd /* 4332185029Spjd * Keep the clock hand suitably device-aligned. 4333185029Spjd */ 4334185029Spjd buf_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz); 4335185029Spjd 4336185029Spjd write_sz += buf_sz; 4337185029Spjd dev->l2ad_hand += buf_sz; 4338185029Spjd } 4339185029Spjd 4340185029Spjd mutex_exit(list_lock); 4341185029Spjd 4342185029Spjd if (full == B_TRUE) 4343185029Spjd break; 4344185029Spjd } 4345185029Spjd mutex_exit(&l2arc_buflist_mtx); 4346185029Spjd 4347185029Spjd if (pio == NULL) { 4348185029Spjd ASSERT3U(write_sz, ==, 0); 4349185029Spjd kmem_cache_free(hdr_cache, head); 4350185029Spjd return; 4351185029Spjd } 4352185029Spjd 4353185029Spjd ASSERT3U(write_sz, <=, target_sz); 4354185029Spjd ARCSTAT_BUMP(arcstat_l2_writes_sent); 4355185029Spjd ARCSTAT_INCR(arcstat_l2_size, write_sz); 4356185029Spjd spa_l2cache_space_update(dev->l2ad_vdev, 0, write_sz); 4357185029Spjd 4358185029Spjd /* 4359185029Spjd * Bump device hand to the device start if it is approaching the end. 4360185029Spjd * l2arc_evict() will already have evicted ahead for this case. 4361185029Spjd */ 4362185029Spjd if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) { 4363185029Spjd spa_l2cache_space_update(dev->l2ad_vdev, 0, 4364185029Spjd dev->l2ad_end - dev->l2ad_hand); 4365185029Spjd dev->l2ad_hand = dev->l2ad_start; 4366185029Spjd dev->l2ad_evict = dev->l2ad_start; 4367185029Spjd dev->l2ad_first = B_FALSE; 4368185029Spjd } 4369185029Spjd 4370185029Spjd (void) zio_wait(pio); 4371185029Spjd} 4372185029Spjd 4373185029Spjd/* 4374185029Spjd * This thread feeds the L2ARC at regular intervals. This is the beating 4375185029Spjd * heart of the L2ARC. 4376185029Spjd */ 4377185029Spjdstatic void 4378185029Spjdl2arc_feed_thread(void *dummy __unused) 4379185029Spjd{ 4380185029Spjd callb_cpr_t cpr; 4381185029Spjd l2arc_dev_t *dev; 4382185029Spjd spa_t *spa; 4383185029Spjd uint64_t size; 4384185029Spjd 4385185029Spjd CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); 4386185029Spjd 4387185029Spjd mutex_enter(&l2arc_feed_thr_lock); 4388185029Spjd 4389185029Spjd while (l2arc_thread_exit == 0) { 4390185029Spjd /* 4391185029Spjd * Pause for l2arc_feed_secs seconds between writes. 4392185029Spjd */ 4393185029Spjd CALLB_CPR_SAFE_BEGIN(&cpr); 4394185029Spjd (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, 4395185029Spjd hz * l2arc_feed_secs); 4396185029Spjd CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); 4397185029Spjd 4398185029Spjd /* 4399185029Spjd * Quick check for L2ARC devices. 4400185029Spjd */ 4401185029Spjd mutex_enter(&l2arc_dev_mtx); 4402185029Spjd if (l2arc_ndev == 0) { 4403185029Spjd mutex_exit(&l2arc_dev_mtx); 4404185029Spjd continue; 4405185029Spjd } 4406185029Spjd mutex_exit(&l2arc_dev_mtx); 4407185029Spjd 4408185029Spjd /* 4409185029Spjd * This selects the next l2arc device to write to, and in 4410185029Spjd * doing so the next spa to feed from: dev->l2ad_spa. This 4411185029Spjd * will return NULL if there are now no l2arc devices or if 4412185029Spjd * they are all faulted. 4413185029Spjd * 4414185029Spjd * If a device is returned, its spa's config lock is also 4415185029Spjd * held to prevent device removal. l2arc_dev_get_next() 4416185029Spjd * will grab and release l2arc_dev_mtx. 4417185029Spjd */ 4418185029Spjd if ((dev = l2arc_dev_get_next()) == NULL) 4419185029Spjd continue; 4420185029Spjd 4421185029Spjd spa = dev->l2ad_spa; 4422185029Spjd ASSERT(spa != NULL); 4423185029Spjd 4424185029Spjd /* 4425185029Spjd * Avoid contributing to memory pressure. 4426185029Spjd */ 4427185029Spjd if (arc_reclaim_needed()) { 4428185029Spjd ARCSTAT_BUMP(arcstat_l2_abort_lowmem); 4429185029Spjd spa_config_exit(spa, SCL_L2ARC, dev); 4430185029Spjd continue; 4431185029Spjd } 4432185029Spjd 4433185029Spjd ARCSTAT_BUMP(arcstat_l2_feeds); 4434185029Spjd 4435185029Spjd size = dev->l2ad_write; 4436185029Spjd if (arc_warm == B_FALSE) 4437185029Spjd size += dev->l2ad_boost; 4438185029Spjd 4439185029Spjd /* 4440185029Spjd * Evict L2ARC buffers that will be overwritten. 4441185029Spjd */ 4442185029Spjd l2arc_evict(dev, size, B_FALSE); 4443185029Spjd 4444185029Spjd /* 4445185029Spjd * Write ARC buffers. 4446185029Spjd */ 4447185029Spjd l2arc_write_buffers(spa, dev, size); 4448185029Spjd spa_config_exit(spa, SCL_L2ARC, dev); 4449185029Spjd } 4450185029Spjd 4451185029Spjd l2arc_thread_exit = 0; 4452185029Spjd cv_broadcast(&l2arc_feed_thr_cv); 4453185029Spjd CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ 4454185029Spjd thread_exit(); 4455185029Spjd} 4456185029Spjd 4457185029Spjdboolean_t 4458185029Spjdl2arc_vdev_present(vdev_t *vd) 4459185029Spjd{ 4460185029Spjd l2arc_dev_t *dev; 4461185029Spjd 4462185029Spjd mutex_enter(&l2arc_dev_mtx); 4463185029Spjd for (dev = list_head(l2arc_dev_list); dev != NULL; 4464185029Spjd dev = list_next(l2arc_dev_list, dev)) { 4465185029Spjd if (dev->l2ad_vdev == vd) 4466185029Spjd break; 4467185029Spjd } 4468185029Spjd mutex_exit(&l2arc_dev_mtx); 4469185029Spjd 4470185029Spjd return (dev != NULL); 4471185029Spjd} 4472185029Spjd 4473185029Spjd/* 4474185029Spjd * Add a vdev for use by the L2ARC. By this point the spa has already 4475185029Spjd * validated the vdev and opened it. 4476185029Spjd */ 4477185029Spjdvoid 4478185029Spjdl2arc_add_vdev(spa_t *spa, vdev_t *vd, uint64_t start, uint64_t end) 4479185029Spjd{ 4480185029Spjd l2arc_dev_t *adddev; 4481185029Spjd 4482185029Spjd ASSERT(!l2arc_vdev_present(vd)); 4483185029Spjd 4484185029Spjd /* 4485185029Spjd * Create a new l2arc device entry. 4486185029Spjd */ 4487185029Spjd adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); 4488185029Spjd adddev->l2ad_spa = spa; 4489185029Spjd adddev->l2ad_vdev = vd; 4490185029Spjd adddev->l2ad_write = l2arc_write_max; 4491185029Spjd adddev->l2ad_boost = l2arc_write_boost; 4492185029Spjd adddev->l2ad_start = start; 4493185029Spjd adddev->l2ad_end = end; 4494185029Spjd adddev->l2ad_hand = adddev->l2ad_start; 4495185029Spjd adddev->l2ad_evict = adddev->l2ad_start; 4496185029Spjd adddev->l2ad_first = B_TRUE; 4497185029Spjd ASSERT3U(adddev->l2ad_write, >, 0); 4498185029Spjd 4499185029Spjd /* 4500185029Spjd * This is a list of all ARC buffers that are still valid on the 4501185029Spjd * device. 4502185029Spjd */ 4503185029Spjd adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP); 4504185029Spjd list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), 4505185029Spjd offsetof(arc_buf_hdr_t, b_l2node)); 4506185029Spjd 4507185029Spjd spa_l2cache_space_update(vd, adddev->l2ad_end - adddev->l2ad_hand, 0); 4508185029Spjd 4509185029Spjd /* 4510185029Spjd * Add device to global list 4511185029Spjd */ 4512185029Spjd mutex_enter(&l2arc_dev_mtx); 4513185029Spjd list_insert_head(l2arc_dev_list, adddev); 4514185029Spjd atomic_inc_64(&l2arc_ndev); 4515185029Spjd mutex_exit(&l2arc_dev_mtx); 4516185029Spjd} 4517185029Spjd 4518185029Spjd/* 4519185029Spjd * Remove a vdev from the L2ARC. 4520185029Spjd */ 4521185029Spjdvoid 4522185029Spjdl2arc_remove_vdev(vdev_t *vd) 4523185029Spjd{ 4524185029Spjd l2arc_dev_t *dev, *nextdev, *remdev = NULL; 4525185029Spjd 4526185029Spjd /* 4527185029Spjd * Find the device by vdev 4528185029Spjd */ 4529185029Spjd mutex_enter(&l2arc_dev_mtx); 4530185029Spjd for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { 4531185029Spjd nextdev = list_next(l2arc_dev_list, dev); 4532185029Spjd if (vd == dev->l2ad_vdev) { 4533185029Spjd remdev = dev; 4534185029Spjd break; 4535185029Spjd } 4536185029Spjd } 4537185029Spjd ASSERT(remdev != NULL); 4538185029Spjd 4539185029Spjd /* 4540185029Spjd * Remove device from global list 4541185029Spjd */ 4542185029Spjd list_remove(l2arc_dev_list, remdev); 4543185029Spjd l2arc_dev_last = NULL; /* may have been invalidated */ 4544185029Spjd atomic_dec_64(&l2arc_ndev); 4545185029Spjd mutex_exit(&l2arc_dev_mtx); 4546185029Spjd 4547185029Spjd /* 4548185029Spjd * Clear all buflists and ARC references. L2ARC device flush. 4549185029Spjd */ 4550185029Spjd l2arc_evict(remdev, 0, B_TRUE); 4551185029Spjd list_destroy(remdev->l2ad_buflist); 4552185029Spjd kmem_free(remdev->l2ad_buflist, sizeof (list_t)); 4553185029Spjd kmem_free(remdev, sizeof (l2arc_dev_t)); 4554185029Spjd} 4555185029Spjd 4556185029Spjdvoid 4557185029Spjdl2arc_init(void) 4558185029Spjd{ 4559185029Spjd l2arc_thread_exit = 0; 4560185029Spjd l2arc_ndev = 0; 4561185029Spjd l2arc_writes_sent = 0; 4562185029Spjd l2arc_writes_done = 0; 4563185029Spjd 4564185029Spjd mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); 4565185029Spjd cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); 4566185029Spjd mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); 4567185029Spjd mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL); 4568185029Spjd mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); 4569185029Spjd 4570185029Spjd l2arc_dev_list = &L2ARC_dev_list; 4571185029Spjd l2arc_free_on_write = &L2ARC_free_on_write; 4572185029Spjd list_create(l2arc_dev_list, sizeof (l2arc_dev_t), 4573185029Spjd offsetof(l2arc_dev_t, l2ad_node)); 4574185029Spjd list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), 4575185029Spjd offsetof(l2arc_data_free_t, l2df_list_node)); 4576185029Spjd} 4577185029Spjd 4578185029Spjdvoid 4579185029Spjdl2arc_fini(void) 4580185029Spjd{ 4581185029Spjd /* 4582185029Spjd * This is called from dmu_fini(), which is called from spa_fini(); 4583185029Spjd * Because of this, we can assume that all l2arc devices have 4584185029Spjd * already been removed when the pools themselves were removed. 4585185029Spjd */ 4586185029Spjd 4587185029Spjd l2arc_do_free_on_write(); 4588185029Spjd 4589185029Spjd mutex_destroy(&l2arc_feed_thr_lock); 4590185029Spjd cv_destroy(&l2arc_feed_thr_cv); 4591185029Spjd mutex_destroy(&l2arc_dev_mtx); 4592185029Spjd mutex_destroy(&l2arc_buflist_mtx); 4593185029Spjd mutex_destroy(&l2arc_free_on_write_mtx); 4594185029Spjd 4595185029Spjd list_destroy(l2arc_dev_list); 4596185029Spjd list_destroy(l2arc_free_on_write); 4597185029Spjd} 4598185029Spjd 4599185029Spjdvoid 4600185029Spjdl2arc_start(void) 4601185029Spjd{ 4602185029Spjd if (!(spa_mode & FWRITE)) 4603185029Spjd return; 4604185029Spjd 4605185029Spjd (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, 4606185029Spjd TS_RUN, minclsyspri); 4607185029Spjd} 4608185029Spjd 4609185029Spjdvoid 4610185029Spjdl2arc_stop(void) 4611185029Spjd{ 4612185029Spjd if (!(spa_mode & FWRITE)) 4613185029Spjd return; 4614185029Spjd 4615185029Spjd mutex_enter(&l2arc_feed_thr_lock); 4616185029Spjd cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ 4617185029Spjd l2arc_thread_exit = 1; 4618185029Spjd while (l2arc_thread_exit != 0) 4619185029Spjd cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); 4620185029Spjd mutex_exit(&l2arc_feed_thr_lock); 4621185029Spjd} 4622