arc.c revision 192360
1117395Skan/* 2169699Skan * CDDL HEADER START 3169699Skan * 4117395Skan * The contents of this file are subject to the terms of the 5117395Skan * Common Development and Distribution License (the "License"). 6117395Skan * You may not use this file except in compliance with the License. 7117395Skan * 8117395Skan * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9117395Skan * or http://www.opensolaris.org/os/licensing. 10117395Skan * See the License for the specific language governing permissions 11117395Skan * and limitations under the License. 12117395Skan * 13117395Skan * When distributing Covered Code, include this CDDL HEADER in each 14117395Skan * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15117395Skan * If applicable, add the following below this CDDL HEADER, with the 16117395Skan * fields enclosed by brackets "[]" replaced with your own identifying 17117395Skan * information: Portions Copyright [yyyy] [name of copyright owner] 18117395Skan * 19117395Skan * CDDL HEADER END 20169699Skan */ 21169699Skan/* 22117395Skan * Copyright 2008 Sun Microsystems, Inc. All rights reserved. 23132811Skan * Use is subject to license terms. 24189824Sdas */ 25132811Skan 26117395Skan/* 27117395Skan * DVA-based Adjustable Replacement Cache 28132718Skan * 29132718Skan * While much of the theory of operation used here is 30117395Skan * based on the self-tuning, low overhead replacement cache 31117395Skan * presented by Megiddo and Modha at FAST 2003, there are some 32117395Skan * significant differences: 33117395Skan * 34117395Skan * 1. The Megiddo and Modha model assumes any page is evictable. 35117395Skan * Pages in its cache cannot be "locked" into memory. This makes 36117395Skan * the eviction algorithm simple: evict the last page in the list. 37117395Skan * This also make the performance characteristics easy to reason 38117395Skan * about. Our cache is not so simple. At any given moment, some 39132718Skan * subset of the blocks in the cache are un-evictable because we 40132718Skan * have handed out a reference to them. Blocks are only evictable 41132718Skan * when there are no external references active. This makes 42132718Skan * eviction far more problematic: we choose to evict the evictable 43132718Skan * blocks that are the "lowest" in the list. 44169699Skan * 45117395Skan * There are times when it is not possible to evict the requested 46132718Skan * space. In these circumstances we are unable to adjust the cache 47132718Skan * size. To prevent the cache growing unbounded at these times we 48132718Skan * implement a "cache throttle" that slows the flow of new data 49132718Skan * into the cache until we can make space available. 50132718Skan * 51132718Skan * 2. The Megiddo and Modha model assumes a fixed cache size. 52132718Skan * Pages are evicted when the cache is full and there is a cache 53132718Skan * miss. Our model has a variable sized cache. It grows with 54169699Skan * high use, but also tries to react to memory pressure from the 55169699Skan * operating system: decreasing its size when system memory is 56169699Skan * tight. 57132718Skan * 58117395Skan * 3. The Megiddo and Modha model assumes a fixed page size. All 59117395Skan * elements of the cache are therefor exactly the same size. So 60117395Skan * when adjusting the cache size following a cache miss, its simply 61117395Skan * a matter of choosing a single page to evict. In our model, we 62132718Skan * have variable sized cache blocks (rangeing from 512 bytes to 63117395Skan * 128K bytes). We therefor choose a set of blocks to evict to make 64117395Skan * space for a cache miss that approximates as closely as possible 65117395Skan * the space used by the new block. 66117395Skan * 67117395Skan * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 68117395Skan * by N. Megiddo & D. Modha, FAST 2003 69117395Skan */ 70117395Skan 71117395Skan/* 72117395Skan * The locking model: 73117395Skan * 74132718Skan * A new reference to a cache buffer can be obtained in two 75132718Skan * ways: 1) via a hash table lookup using the DVA as a key, 76132718Skan * or 2) via one of the ARC lists. The arc_read() interface 77169699Skan * uses method 1, while the internal arc algorithms for 78169699Skan * adjusting the cache use method 2. We therefor provide two 79169699Skan * types of locks: 1) the hash table lock array, and 2) the 80117395Skan * arc list locks. 81117395Skan * 82117395Skan * Buffers do not have their own mutexs, rather they rely on the 83132718Skan * hash table mutexs for the bulk of their protection (i.e. most 84132718Skan * fields in the arc_buf_hdr_t are protected by these mutexs). 85117395Skan * 86169699Skan * buf_hash_find() returns the appropriate mutex (held) when it 87169699Skan * locates the requested buffer in the hash table. It returns 88169699Skan * NULL for the mutex if the buffer was not in the table. 89132718Skan * 90132718Skan * buf_hash_remove() expects the appropriate hash mutex to be 91117395Skan * already held before it is invoked. 92132718Skan * 93132718Skan * Each arc state also has a mutex which is used to protect the 94117395Skan * buffer list associated with the state. When attempting to 95132718Skan * obtain a hash table lock while holding an arc list lock you 96132718Skan * must use: mutex_tryenter() to avoid deadlock. Also note that 97117395Skan * the active state mutex must be held before the ghost state mutex. 98132718Skan * 99132718Skan * Arc buffers may have an associated eviction callback function. 100117395Skan * This function will be invoked prior to removing the buffer (e.g. 101132718Skan * in arc_do_user_evicts()). Note however that the data associated 102132718Skan * with the buffer may be evicted prior to the callback. The callback 103117395Skan * must be made with *no locks held* (to prevent deadlock). Additionally, 104169699Skan * the users of callbacks must ensure that their private data is 105169699Skan * protected from simultaneous callbacks from arc_buf_evict() 106169699Skan * and arc_do_user_evicts(). 107132718Skan * 108132718Skan * Note that the majority of the performance stats are manipulated 109117395Skan * with atomic operations. 110132718Skan * 111132718Skan * The L2ARC uses the l2arc_buflist_mtx global mutex for the following: 112117395Skan * 113132718Skan * - L2ARC buflist creation 114132718Skan * - L2ARC buflist eviction 115132718Skan * - L2ARC write completion, which walks L2ARC buflists 116132718Skan * - ARC header destruction, as it removes from L2ARC buflists 117132718Skan * - ARC header release, as it removes from L2ARC buflists 118132718Skan */ 119132718Skan 120132718Skan#include <sys/spa.h> 121132718Skan#include <sys/zio.h> 122132718Skan#include <sys/zio_checksum.h> 123132718Skan#include <sys/zfs_context.h> 124132718Skan#include <sys/arc.h> 125132718Skan#include <sys/refcount.h> 126117395Skan#include <sys/vdev.h> 127132718Skan#ifdef _KERNEL 128132718Skan#include <sys/dnlc.h> 129117395Skan#endif 130117395Skan#include <sys/callb.h> 131132718Skan#include <sys/kstat.h> 132132718Skan#include <sys/sdt.h> 133132718Skan 134117395Skan#include <vm/vm_pageout.h> 135117395Skan 136117395Skanstatic kmutex_t arc_reclaim_thr_lock; 137117395Skanstatic kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 138117395Skanstatic uint8_t arc_thread_exit; 139117395Skan 140132718Skanextern int zfs_write_limit_shift; 141132718Skanextern uint64_t zfs_write_limit_max; 142132718Skanextern kmutex_t zfs_write_limit_lock; 143132718Skan 144132718Skan#define ARC_REDUCE_DNLC_PERCENT 3 145132718Skanuint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 146117395Skan 147132718Skantypedef enum arc_reclaim_strategy { 148117395Skan ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 149117395Skan ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 150132718Skan} arc_reclaim_strategy_t; 151132718Skan 152132718Skan/* number of seconds before growing cache again */ 153132718Skanstatic int arc_grow_retry = 60; 154169699Skan 155117395Skan/* 156117395Skan * minimum lifespan of a prefetch block in clock ticks 157132718Skan * (initialized in arc_init()) 158169699Skan */ 159117395Skanstatic int arc_min_prefetch_lifespan; 160117395Skan 161132718Skanstatic int arc_dead; 162132718Skan 163169699Skan/* 164132718Skan * The arc has filled available memory and has now warmed up. 165132718Skan */ 166169699Skanstatic boolean_t arc_warm; 167132718Skan 168132718Skan/* 169132718Skan * These tunables are for performance analysis. 170132718Skan */ 171169699Skanuint64_t zfs_arc_max; 172169699Skanuint64_t zfs_arc_min; 173132718Skanuint64_t zfs_arc_meta_limit = 0; 174132718Skanint zfs_mdcomp_disable = 0; 175117395Skan 176117395SkanTUNABLE_QUAD("vfs.zfs.arc_max", &zfs_arc_max); 177117395SkanTUNABLE_QUAD("vfs.zfs.arc_min", &zfs_arc_min); 178132718SkanTUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit); 179132718SkanTUNABLE_INT("vfs.zfs.mdcomp_disable", &zfs_mdcomp_disable); 180117395SkanSYSCTL_DECL(_vfs_zfs); 181169699SkanSYSCTL_QUAD(_vfs_zfs, OID_AUTO, arc_max, CTLFLAG_RDTUN, &zfs_arc_max, 0, 182117395Skan "Maximum ARC size"); 183117395SkanSYSCTL_QUAD(_vfs_zfs, OID_AUTO, arc_min, CTLFLAG_RDTUN, &zfs_arc_min, 0, 184117395Skan "Minimum ARC size"); 185117395SkanSYSCTL_INT(_vfs_zfs, OID_AUTO, mdcomp_disable, CTLFLAG_RDTUN, 186169699Skan &zfs_mdcomp_disable, 0, "Disable metadata compression"); 187117395Skan 188117395Skan/* 189117395Skan * Note that buffers can be in one of 6 states: 190132718Skan * ARC_anon - anonymous (discussed below) 191117395Skan * ARC_mru - recently used, currently cached 192117395Skan * ARC_mru_ghost - recentely used, no longer in cache 193117395Skan * ARC_mfu - frequently used, currently cached 194117395Skan * ARC_mfu_ghost - frequently used, no longer in cache 195132718Skan * ARC_l2c_only - exists in L2ARC but not other states 196117395Skan * When there are no active references to the buffer, they are 197117395Skan * are linked onto a list in one of these arc states. These are 198117395Skan * the only buffers that can be evicted or deleted. Within each 199117395Skan * state there are multiple lists, one for meta-data and one for 200117395Skan * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, 201117395Skan * etc.) is tracked separately so that it can be managed more 202117395Skan * explicitly: favored over data, limited explicitly. 203132718Skan * 204146908Skan * Anonymous buffers are buffers that are not associated with 205117395Skan * a DVA. These are buffers that hold dirty block copies 206132718Skan * before they are written to stable storage. By definition, 207146908Skan * they are "ref'd" and are considered part of arc_mru 208117395Skan * that cannot be freed. Generally, they will aquire a DVA 209132718Skan * as they are written and migrate onto the arc_mru list. 210132718Skan * 211132718Skan * The ARC_l2c_only state is for buffers that are in the second 212132718Skan * level ARC but no longer in any of the ARC_m* lists. The second 213132718Skan * level ARC itself may also contain buffers that are in any of 214132718Skan * the ARC_m* states - meaning that a buffer can exist in two 215132718Skan * places. The reason for the ARC_l2c_only state is to keep the 216132718Skan * buffer header in the hash table, so that reads that hit the 217132718Skan * second level ARC benefit from these fast lookups. 218132718Skan */ 219132718Skan 220117395Skantypedef struct arc_state { 221132718Skan list_t arcs_list[ARC_BUFC_NUMTYPES]; /* list of evictable buffers */ 222169699Skan uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */ 223132718Skan uint64_t arcs_size; /* total amount of data in this state */ 224117395Skan kmutex_t arcs_mtx; 225132718Skan} arc_state_t; 226132718Skan 227117395Skan/* The 6 states: */ 228132718Skanstatic arc_state_t ARC_anon; 229132718Skanstatic arc_state_t ARC_mru; 230132718Skanstatic arc_state_t ARC_mru_ghost; 231117395Skanstatic arc_state_t ARC_mfu; 232132718Skanstatic arc_state_t ARC_mfu_ghost; 233132718Skanstatic arc_state_t ARC_l2c_only; 234169699Skan 235117395Skantypedef struct arc_stats { 236169699Skan kstat_named_t arcstat_hits; 237117395Skan kstat_named_t arcstat_misses; 238132718Skan kstat_named_t arcstat_demand_data_hits; 239117395Skan kstat_named_t arcstat_demand_data_misses; 240146908Skan kstat_named_t arcstat_demand_metadata_hits; 241146908Skan kstat_named_t arcstat_demand_metadata_misses; 242169699Skan kstat_named_t arcstat_prefetch_data_hits; 243169699Skan kstat_named_t arcstat_prefetch_data_misses; 244146908Skan kstat_named_t arcstat_prefetch_metadata_hits; 245169699Skan kstat_named_t arcstat_prefetch_metadata_misses; 246169699Skan kstat_named_t arcstat_mru_hits; 247169699Skan kstat_named_t arcstat_mru_ghost_hits; 248169699Skan kstat_named_t arcstat_mfu_hits; 249169699Skan kstat_named_t arcstat_mfu_ghost_hits; 250169699Skan kstat_named_t arcstat_deleted; 251169699Skan kstat_named_t arcstat_recycle_miss; 252169699Skan kstat_named_t arcstat_mutex_miss; 253169699Skan kstat_named_t arcstat_evict_skip; 254146908Skan kstat_named_t arcstat_hash_elements; 255169699Skan kstat_named_t arcstat_hash_elements_max; 256169699Skan kstat_named_t arcstat_hash_collisions; 257169699Skan kstat_named_t arcstat_hash_chains; 258146908Skan kstat_named_t arcstat_hash_chain_max; 259117395Skan kstat_named_t arcstat_p; 260169699Skan kstat_named_t arcstat_c; 261132718Skan kstat_named_t arcstat_c_min; 262132718Skan kstat_named_t arcstat_c_max; 263117395Skan kstat_named_t arcstat_size; 264132718Skan kstat_named_t arcstat_hdr_size; 265132718Skan kstat_named_t arcstat_l2_hits; 266132718Skan kstat_named_t arcstat_l2_misses; 267132718Skan kstat_named_t arcstat_l2_feeds; 268132718Skan kstat_named_t arcstat_l2_rw_clash; 269132718Skan kstat_named_t arcstat_l2_writes_sent; 270132718Skan kstat_named_t arcstat_l2_writes_done; 271132718Skan kstat_named_t arcstat_l2_writes_error; 272132718Skan kstat_named_t arcstat_l2_writes_hdr_miss; 273132718Skan kstat_named_t arcstat_l2_evict_lock_retry; 274117395Skan kstat_named_t arcstat_l2_evict_reading; 275169699Skan kstat_named_t arcstat_l2_free_on_write; 276169699Skan kstat_named_t arcstat_l2_abort_lowmem; 277169699Skan kstat_named_t arcstat_l2_cksum_bad; 278169699Skan kstat_named_t arcstat_l2_io_error; 279132718Skan kstat_named_t arcstat_l2_size; 280117395Skan kstat_named_t arcstat_l2_hdr_size; 281132718Skan kstat_named_t arcstat_memory_throttle_count; 282169699Skan} arc_stats_t; 283169699Skan 284169699Skanstatic arc_stats_t arc_stats = { 285169699Skan { "hits", KSTAT_DATA_UINT64 }, 286169699Skan { "misses", KSTAT_DATA_UINT64 }, 287169699Skan { "demand_data_hits", KSTAT_DATA_UINT64 }, 288169699Skan { "demand_data_misses", KSTAT_DATA_UINT64 }, 289132718Skan { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 290117395Skan { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 291132718Skan { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 292132718Skan { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 293117395Skan { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 294117395Skan { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 295132718Skan { "mru_hits", KSTAT_DATA_UINT64 }, 296132718Skan { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 297117395Skan { "mfu_hits", KSTAT_DATA_UINT64 }, 298117395Skan { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 299117395Skan { "deleted", KSTAT_DATA_UINT64 }, 300117395Skan { "recycle_miss", KSTAT_DATA_UINT64 }, 301117395Skan { "mutex_miss", KSTAT_DATA_UINT64 }, 302117395Skan { "evict_skip", KSTAT_DATA_UINT64 }, 303117395Skan { "hash_elements", KSTAT_DATA_UINT64 }, 304117395Skan { "hash_elements_max", KSTAT_DATA_UINT64 }, 305117395Skan { "hash_collisions", KSTAT_DATA_UINT64 }, 306117395Skan { "hash_chains", KSTAT_DATA_UINT64 }, 307117395Skan { "hash_chain_max", KSTAT_DATA_UINT64 }, 308132718Skan { "p", KSTAT_DATA_UINT64 }, 309132718Skan { "c", KSTAT_DATA_UINT64 }, 310132718Skan { "c_min", KSTAT_DATA_UINT64 }, 311132718Skan { "c_max", KSTAT_DATA_UINT64 }, 312117395Skan { "size", KSTAT_DATA_UINT64 }, 313117395Skan { "hdr_size", KSTAT_DATA_UINT64 }, 314117395Skan { "l2_hits", KSTAT_DATA_UINT64 }, 315117395Skan { "l2_misses", KSTAT_DATA_UINT64 }, 316117395Skan { "l2_feeds", KSTAT_DATA_UINT64 }, 317117395Skan { "l2_rw_clash", KSTAT_DATA_UINT64 }, 318117395Skan { "l2_writes_sent", KSTAT_DATA_UINT64 }, 319117395Skan { "l2_writes_done", KSTAT_DATA_UINT64 }, 320169699Skan { "l2_writes_error", KSTAT_DATA_UINT64 }, 321169699Skan { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 }, 322169699Skan { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, 323169699Skan { "l2_evict_reading", KSTAT_DATA_UINT64 }, 324132718Skan { "l2_free_on_write", KSTAT_DATA_UINT64 }, 325132718Skan { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, 326169699Skan { "l2_cksum_bad", KSTAT_DATA_UINT64 }, 327132718Skan { "l2_io_error", KSTAT_DATA_UINT64 }, 328132718Skan { "l2_size", KSTAT_DATA_UINT64 }, 329132718Skan { "l2_hdr_size", KSTAT_DATA_UINT64 }, 330132718Skan { "memory_throttle_count", KSTAT_DATA_UINT64 } 331132718Skan}; 332132718Skan 333169699Skan#define ARCSTAT(stat) (arc_stats.stat.value.ui64) 334132718Skan 335132718Skan#define ARCSTAT_INCR(stat, val) \ 336132718Skan atomic_add_64(&arc_stats.stat.value.ui64, (val)); 337117395Skan 338117395Skan#define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 339117395Skan#define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 340117395Skan 341117395Skan#define ARCSTAT_MAX(stat, val) { \ 342117395Skan uint64_t m; \ 343117395Skan while ((val) > (m = arc_stats.stat.value.ui64) && \ 344132718Skan (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 345117395Skan continue; \ 346117395Skan} 347117395Skan 348117395Skan#define ARCSTAT_MAXSTAT(stat) \ 349117395Skan ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 350117395Skan 351117395Skan/* 352117395Skan * We define a macro to allow ARC hits/misses to be easily broken down by 353117395Skan * two separate conditions, giving a total of four different subtypes for 354117395Skan * each of hits and misses (so eight statistics total). 355117395Skan */ 356117395Skan#define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 357117395Skan if (cond1) { \ 358117395Skan if (cond2) { \ 359117395Skan ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 360117395Skan } else { \ 361117395Skan ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 362117395Skan } \ 363117395Skan } else { \ 364117395Skan if (cond2) { \ 365117395Skan ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 366117395Skan } else { \ 367117395Skan ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 368117395Skan } \ 369117395Skan } 370117395Skan 371117395Skankstat_t *arc_ksp; 372117395Skanstatic arc_state_t *arc_anon; 373117395Skanstatic arc_state_t *arc_mru; 374117395Skanstatic arc_state_t *arc_mru_ghost; 375117395Skanstatic arc_state_t *arc_mfu; 376132718Skanstatic arc_state_t *arc_mfu_ghost; 377117395Skanstatic arc_state_t *arc_l2c_only; 378117395Skan 379132718Skan/* 380132718Skan * There are several ARC variables that are critical to export as kstats -- 381132718Skan * but we don't want to have to grovel around in the kstat whenever we wish to 382132718Skan * manipulate them. For these variables, we therefore define them to be in 383132718Skan * terms of the statistic variable. This assures that we are not introducing 384132718Skan * the possibility of inconsistency by having shadow copies of the variables, 385132718Skan * while still allowing the code to be readable. 386132718Skan */ 387117395Skan#define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 388132718Skan#define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 389132718Skan#define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 390132718Skan#define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 391132718Skan#define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 392132718Skan 393132718Skanstatic int arc_no_grow; /* Don't try to grow cache size */ 394132718Skanstatic uint64_t arc_tempreserve; 395132718Skanstatic uint64_t arc_meta_used; 396132718Skanstatic uint64_t arc_meta_limit; 397132718Skanstatic uint64_t arc_meta_max = 0; 398132718SkanSYSCTL_QUAD(_vfs_zfs, OID_AUTO, arc_meta_used, CTLFLAG_RDTUN, 399132718Skan &arc_meta_used, 0, "ARC metadata used"); 400169699SkanSYSCTL_QUAD(_vfs_zfs, OID_AUTO, arc_meta_limit, CTLFLAG_RDTUN, 401169699Skan &arc_meta_limit, 0, "ARC metadata limit"); 402132718Skan 403117395Skantypedef struct l2arc_buf_hdr l2arc_buf_hdr_t; 404132718Skan 405132718Skantypedef struct arc_callback arc_callback_t; 406117395Skan 407117395Skanstruct arc_callback { 408117395Skan void *acb_private; 409117395Skan arc_done_func_t *acb_done; 410117395Skan arc_buf_t *acb_buf; 411132718Skan zio_t *acb_zio_dummy; 412117395Skan arc_callback_t *acb_next; 413132718Skan}; 414117395Skan 415117395Skantypedef struct arc_write_callback arc_write_callback_t; 416132718Skan 417117395Skanstruct arc_write_callback { 418117395Skan void *awcb_private; 419117395Skan arc_done_func_t *awcb_ready; 420132718Skan arc_done_func_t *awcb_done; 421132718Skan arc_buf_t *awcb_buf; 422117395Skan}; 423117395Skan 424132718Skanstruct arc_buf_hdr { 425132718Skan /* protected by hash lock */ 426132718Skan dva_t b_dva; 427132718Skan uint64_t b_birth; 428117395Skan uint64_t b_cksum0; 429169699Skan 430169699Skan kmutex_t b_freeze_lock; 431117395Skan zio_cksum_t *b_freeze_cksum; 432117395Skan 433117395Skan arc_buf_hdr_t *b_hash_next; 434117395Skan arc_buf_t *b_buf; 435132718Skan uint32_t b_flags; 436117395Skan uint32_t b_datacnt; 437117395Skan 438117395Skan arc_callback_t *b_acb; 439132718Skan kcondvar_t b_cv; 440117395Skan 441117395Skan /* immutable */ 442117395Skan arc_buf_contents_t b_type; 443132718Skan uint64_t b_size; 444117395Skan spa_t *b_spa; 445117395Skan 446117395Skan /* protected by arc state mutex */ 447132718Skan arc_state_t *b_state; 448169699Skan list_node_t b_arc_node; 449117395Skan 450117395Skan /* updated atomically */ 451132718Skan clock_t b_arc_access; 452132718Skan 453117395Skan /* self protecting */ 454117395Skan refcount_t b_refcnt; 455117395Skan 456132718Skan l2arc_buf_hdr_t *b_l2hdr; 457117395Skan list_node_t b_l2node; 458117395Skan}; 459132718Skan 460117395Skanstatic arc_buf_t *arc_eviction_list; 461117395Skanstatic kmutex_t arc_eviction_mtx; 462117395Skanstatic arc_buf_hdr_t arc_eviction_hdr; 463117395Skanstatic void arc_get_data_buf(arc_buf_t *buf); 464132718Skanstatic void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 465117395Skanstatic int arc_evict_needed(arc_buf_contents_t type); 466117395Skanstatic void arc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes); 467117395Skan 468132718Skan#define GHOST_STATE(state) \ 469117395Skan ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ 470117395Skan (state) == arc_l2c_only) 471132718Skan 472132718Skan/* 473132718Skan * Private ARC flags. These flags are private ARC only flags that will show up 474132718Skan * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 475117395Skan * be passed in as arc_flags in things like arc_read. However, these flags 476132718Skan * should never be passed and should only be set by ARC code. When adding new 477117395Skan * public flags, make sure not to smash the private ones. 478117395Skan */ 479117395Skan 480117395Skan#define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 481117395Skan#define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 482169699Skan#define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 483169699Skan#define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 484117395Skan#define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 485117395Skan#define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 486117395Skan#define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */ 487132718Skan#define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */ 488117395Skan#define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */ 489117395Skan#define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */ 490169699Skan#define ARC_STORED (1 << 19) /* has been store()d to */ 491169699Skan 492169699Skan#define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 493169699Skan#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 494169699Skan#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 495169699Skan#define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 496169699Skan#define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 497169699Skan#define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS) 498169699Skan#define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE) 499169699Skan#define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \ 500169699Skan (hdr)->b_l2hdr != NULL) 501117395Skan#define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING) 502117395Skan#define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED) 503117395Skan#define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD) 504132718Skan 505117395Skan/* 506117395Skan * Other sizes 507169699Skan */ 508169699Skan 509117395Skan#define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) 510117395Skan#define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t)) 511117395Skan 512132718Skan/* 513117395Skan * Hash table routines 514117395Skan */ 515117395Skan 516132718Skan#define HT_LOCK_PAD 128 517117395Skan 518117395Skanstruct ht_lock { 519117395Skan kmutex_t ht_lock; 520132718Skan#ifdef _KERNEL 521117395Skan unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 522117395Skan#endif 523117395Skan}; 524132718Skan 525117395Skan#define BUF_LOCKS 256 526117395Skantypedef struct buf_hash_table { 527117395Skan uint64_t ht_mask; 528117395Skan arc_buf_hdr_t **ht_table; 529132718Skan struct ht_lock ht_locks[BUF_LOCKS]; 530132718Skan} buf_hash_table_t; 531117395Skan 532117395Skanstatic buf_hash_table_t buf_hash_table; 533117395Skan 534132718Skan#define BUF_HASH_INDEX(spa, dva, birth) \ 535117395Skan (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 536117395Skan#define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 537169699Skan#define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 538169699Skan#define HDR_LOCK(buf) \ 539169699Skan (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth))) 540169699Skan 541117395Skanuint64_t zfs_crc64_table[256]; 542169699Skan 543117395Skan/* 544132718Skan * Level 2 ARC 545169699Skan */ 546169699Skan 547169699Skan#define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ 548169699Skan#define L2ARC_HEADROOM 4 /* num of writes */ 549169699Skan#define L2ARC_FEED_SECS 1 /* caching interval */ 550169699Skan 551117395Skan#define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) 552132718Skan#define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) 553117395Skan 554117395Skan/* 555117395Skan * L2ARC Performance Tunables 556117395Skan */ 557117395Skanuint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ 558117395Skanuint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */ 559117395Skanuint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ 560117395Skanuint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ 561117395Skanboolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ 562117395Skan 563132718Skan/* 564117395Skan * L2ARC Internals 565132718Skan */ 566117395Skantypedef struct l2arc_dev { 567117395Skan vdev_t *l2ad_vdev; /* vdev */ 568117395Skan spa_t *l2ad_spa; /* spa */ 569117395Skan uint64_t l2ad_hand; /* next write location */ 570117395Skan uint64_t l2ad_write; /* desired write size, bytes */ 571132718Skan uint64_t l2ad_boost; /* warmup write boost, bytes */ 572117395Skan uint64_t l2ad_start; /* first addr on device */ 573132718Skan uint64_t l2ad_end; /* last addr on device */ 574117395Skan uint64_t l2ad_evict; /* last addr eviction reached */ 575117395Skan boolean_t l2ad_first; /* first sweep through */ 576117395Skan list_t *l2ad_buflist; /* buffer list */ 577117395Skan list_node_t l2ad_node; /* device list node */ 578132718Skan} l2arc_dev_t; 579117395Skan 580117395Skanstatic list_t L2ARC_dev_list; /* device list */ 581117395Skanstatic list_t *l2arc_dev_list; /* device list pointer */ 582117395Skanstatic kmutex_t l2arc_dev_mtx; /* device list mutex */ 583117395Skanstatic l2arc_dev_t *l2arc_dev_last; /* last device used */ 584117395Skanstatic kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */ 585117395Skanstatic list_t L2ARC_free_on_write; /* free after write buf list */ 586132718Skanstatic list_t *l2arc_free_on_write; /* free after write list ptr */ 587169699Skanstatic kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ 588117395Skanstatic uint64_t l2arc_ndev; /* number of devices */ 589117395Skan 590117395Skantypedef struct l2arc_read_callback { 591132718Skan arc_buf_t *l2rcb_buf; /* read buffer */ 592117395Skan spa_t *l2rcb_spa; /* spa */ 593117395Skan blkptr_t l2rcb_bp; /* original blkptr */ 594117395Skan zbookmark_t l2rcb_zb; /* original bookmark */ 595132718Skan int l2rcb_flags; /* original flags */ 596117395Skan} l2arc_read_callback_t; 597117395Skan 598117395Skantypedef struct l2arc_write_callback { 599132718Skan l2arc_dev_t *l2wcb_dev; /* device info */ 600117395Skan arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ 601117395Skan} l2arc_write_callback_t; 602117395Skan 603132718Skanstruct l2arc_buf_hdr { 604117395Skan /* protected by arc_buf_hdr mutex */ 605117395Skan l2arc_dev_t *b_dev; /* L2ARC device */ 606117395Skan daddr_t b_daddr; /* disk address, offset byte */ 607117395Skan}; 608117395Skan 609117395Skantypedef struct l2arc_data_free { 610132718Skan /* protected by l2arc_free_on_write_mtx */ 611117395Skan void *l2df_data; 612117395Skan size_t l2df_size; 613117395Skan void (*l2df_func)(void *, size_t); 614132718Skan list_node_t l2df_list_node; 615132718Skan} l2arc_data_free_t; 616117395Skan 617132718Skanstatic kmutex_t l2arc_feed_thr_lock; 618132718Skanstatic kcondvar_t l2arc_feed_thr_cv; 619117395Skanstatic uint8_t l2arc_thread_exit; 620132718Skan 621117395Skanstatic void l2arc_read_done(zio_t *zio); 622117395Skanstatic void l2arc_hdr_stat_add(void); 623117395Skanstatic void l2arc_hdr_stat_remove(void); 624117395Skan 625132718Skanstatic uint64_t 626117395Skanbuf_hash(spa_t *spa, const dva_t *dva, uint64_t birth) 627117395Skan{ 628117395Skan uintptr_t spav = (uintptr_t)spa; 629132718Skan uint8_t *vdva = (uint8_t *)dva; 630117395Skan uint64_t crc = -1ULL; 631117395Skan int i; 632117395Skan 633132718Skan ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 634117395Skan 635117395Skan for (i = 0; i < sizeof (dva_t); i++) 636117395Skan crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 637132718Skan 638117395Skan crc ^= (spav>>8) ^ birth; 639117395Skan 640117395Skan return (crc); 641132718Skan} 642117395Skan 643117395Skan#define BUF_EMPTY(buf) \ 644117395Skan ((buf)->b_dva.dva_word[0] == 0 && \ 645132718Skan (buf)->b_dva.dva_word[1] == 0 && \ 646117395Skan (buf)->b_birth == 0) 647117395Skan 648117395Skan#define BUF_EQUAL(spa, dva, birth, buf) \ 649132718Skan ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 650117395Skan ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 651117395Skan ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 652117395Skan 653132718Skanstatic arc_buf_hdr_t * 654117395Skanbuf_hash_find(spa_t *spa, const dva_t *dva, uint64_t birth, kmutex_t **lockp) 655117395Skan{ 656117395Skan uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 657132718Skan kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 658117395Skan arc_buf_hdr_t *buf; 659117395Skan 660132718Skan mutex_enter(hash_lock); 661117395Skan for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 662117395Skan buf = buf->b_hash_next) { 663117395Skan if (BUF_EQUAL(spa, dva, birth, buf)) { 664117395Skan *lockp = hash_lock; 665132718Skan return (buf); 666117395Skan } 667117395Skan } 668117395Skan mutex_exit(hash_lock); 669132718Skan *lockp = NULL; 670117395Skan return (NULL); 671117395Skan} 672117395Skan 673132718Skan/* 674117395Skan * Insert an entry into the hash table. If there is already an element 675117395Skan * equal to elem in the hash table, then the already existing element 676169699Skan * will be returned and the new element will not be inserted. 677169699Skan * Otherwise returns NULL. 678117395Skan */ 679117395Skanstatic arc_buf_hdr_t * 680117395Skanbuf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 681132718Skan{ 682117395Skan uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 683117395Skan kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 684117395Skan arc_buf_hdr_t *fbuf; 685132718Skan uint32_t i; 686117395Skan 687117395Skan ASSERT(!HDR_IN_HASH_TABLE(buf)); 688117395Skan *lockp = hash_lock; 689132718Skan mutex_enter(hash_lock); 690117395Skan for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 691117395Skan fbuf = fbuf->b_hash_next, i++) { 692117395Skan if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 693169699Skan return (fbuf); 694132718Skan } 695117395Skan 696117395Skan buf->b_hash_next = buf_hash_table.ht_table[idx]; 697117395Skan buf_hash_table.ht_table[idx] = buf; 698132718Skan buf->b_flags |= ARC_IN_HASH_TABLE; 699117395Skan 700117395Skan /* collect some hash table performance data */ 701117395Skan if (i > 0) { 702132718Skan ARCSTAT_BUMP(arcstat_hash_collisions); 703117395Skan if (i == 1) 704117395Skan ARCSTAT_BUMP(arcstat_hash_chains); 705117395Skan 706132718Skan ARCSTAT_MAX(arcstat_hash_chain_max, i); 707117395Skan } 708117395Skan 709117395Skan ARCSTAT_BUMP(arcstat_hash_elements); 710132718Skan ARCSTAT_MAXSTAT(arcstat_hash_elements); 711117395Skan 712117395Skan return (NULL); 713117395Skan} 714132718Skan 715117395Skanstatic void 716117395Skanbuf_hash_remove(arc_buf_hdr_t *buf) 717132718Skan{ 718132718Skan arc_buf_hdr_t *fbuf, **bufp; 719132718Skan uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 720132718Skan 721117395Skan ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 722132718Skan ASSERT(HDR_IN_HASH_TABLE(buf)); 723117395Skan 724117395Skan bufp = &buf_hash_table.ht_table[idx]; 725117395Skan while ((fbuf = *bufp) != buf) { 726132718Skan ASSERT(fbuf != NULL); 727117395Skan bufp = &fbuf->b_hash_next; 728117395Skan } 729117395Skan *bufp = buf->b_hash_next; 730132718Skan buf->b_hash_next = NULL; 731117395Skan buf->b_flags &= ~ARC_IN_HASH_TABLE; 732117395Skan 733132718Skan /* collect some hash table performance data */ 734132718Skan ARCSTAT_BUMPDOWN(arcstat_hash_elements); 735132718Skan 736132718Skan if (buf_hash_table.ht_table[idx] && 737169699Skan buf_hash_table.ht_table[idx]->b_hash_next == NULL) 738169699Skan ARCSTAT_BUMPDOWN(arcstat_hash_chains); 739169699Skan} 740169699Skan 741117395Skan/* 742132718Skan * Global data structures and functions for the buf kmem cache. 743117395Skan */ 744117395Skanstatic kmem_cache_t *hdr_cache; 745117395Skanstatic kmem_cache_t *buf_cache; 746132718Skan 747117395Skanstatic void 748117395Skanbuf_fini(void) 749132718Skan{ 750132718Skan int i; 751132718Skan 752169699Skan kmem_free(buf_hash_table.ht_table, 753117395Skan (buf_hash_table.ht_mask + 1) * sizeof (void *)); 754132718Skan for (i = 0; i < BUF_LOCKS; i++) 755132718Skan mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 756117395Skan kmem_cache_destroy(hdr_cache); 757117395Skan kmem_cache_destroy(buf_cache); 758117395Skan} 759117395Skan 760132718Skan/* 761117395Skan * Constructor callback - called when the cache is empty 762117395Skan * and a new buf is requested. 763117395Skan */ 764132718Skan/* ARGSUSED */ 765117395Skanstatic int 766117395Skanhdr_cons(void *vbuf, void *unused, int kmflag) 767117395Skan{ 768132718Skan arc_buf_hdr_t *buf = vbuf; 769117395Skan 770117395Skan bzero(buf, sizeof (arc_buf_hdr_t)); 771132718Skan refcount_create(&buf->b_refcnt); 772117395Skan cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 773132718Skan mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 774132718Skan 775117395Skan ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); 776117395Skan return (0); 777132718Skan} 778132718Skan 779117395Skan/* ARGSUSED */ 780117395Skanstatic int 781132718Skanbuf_cons(void *vbuf, void *unused, int kmflag) 782132718Skan{ 783117395Skan arc_buf_t *buf = vbuf; 784117395Skan 785132718Skan bzero(buf, sizeof (arc_buf_t)); 786132718Skan rw_init(&buf->b_lock, NULL, RW_DEFAULT, NULL); 787132718Skan return (0); 788132718Skan} 789132718Skan 790132718Skan/* 791132718Skan * Destructor callback - called when a cached buf is 792132718Skan * no longer required. 793117395Skan */ 794132718Skan/* ARGSUSED */ 795117395Skanstatic void 796169699Skanhdr_dest(void *vbuf, void *unused) 797169699Skan{ 798169699Skan arc_buf_hdr_t *buf = vbuf; 799169699Skan 800117395Skan refcount_destroy(&buf->b_refcnt); 801169699Skan cv_destroy(&buf->b_cv); 802169699Skan mutex_destroy(&buf->b_freeze_lock); 803169699Skan 804169699Skan ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); 805117395Skan} 806132718Skan 807117395Skan/* ARGSUSED */ 808117395Skanstatic void 809169699Skanbuf_dest(void *vbuf, void *unused) 810169699Skan{ 811169699Skan arc_buf_t *buf = vbuf; 812169699Skan 813132718Skan rw_destroy(&buf->b_lock); 814132718Skan} 815132718Skan 816132718Skan/* 817117395Skan * Reclaim callback -- invoked when memory is low. 818117395Skan */ 819117395Skan/* ARGSUSED */ 820117395Skanstatic void 821259268Spfghdr_recl(void *unused) 822259268Spfg{ 823259268Spfg dprintf("hdr_recl called\n"); 824259268Spfg /* 825259268Spfg * umem calls the reclaim func when we destroy the buf cache, 826259268Spfg * which is after we do arc_fini(). 827259268Spfg */ 828259268Spfg if (!arc_dead) 829259268Spfg cv_signal(&arc_reclaim_thr_cv); 830259268Spfg} 831259268Spfg 832259268Spfgstatic void 833132718Skanbuf_init(void) 834169699Skan{ 835132718Skan uint64_t *ct; 836132718Skan uint64_t hsize = 1ULL << 12; 837132718Skan int i, j; 838132718Skan 839132718Skan /* 840132718Skan * The hash table is big enough to fill all of physical memory 841132718Skan * with an average 64K block size. The table will take up 842169699Skan * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 843169699Skan */ 844169699Skan while (hsize * 65536 < (uint64_t)physmem * PAGESIZE) 845169699Skan hsize <<= 1; 846132718Skanretry: 847132718Skan buf_hash_table.ht_mask = hsize - 1; 848132718Skan buf_hash_table.ht_table = 849132718Skan kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 850169699Skan if (buf_hash_table.ht_table == NULL) { 851169699Skan ASSERT(hsize > (1ULL << 8)); 852169699Skan hsize >>= 1; 853169699Skan goto retry; 854132718Skan } 855132718Skan 856132718Skan hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 857132718Skan 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 858132718Skan buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 859169699Skan 0, buf_cons, buf_dest, NULL, NULL, NULL, 0); 860132718Skan 861132718Skan for (i = 0; i < 256; i++) 862132718Skan for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 863132718Skan *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 864132718Skan 865132718Skan for (i = 0; i < BUF_LOCKS; i++) { 866132718Skan mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 867132718Skan NULL, MUTEX_DEFAULT, NULL); 868132718Skan } 869132718Skan} 870117395Skan 871117395Skan#define ARC_MINTIME (hz>>4) /* 62 ms */ 872132718Skan 873117395Skanstatic void 874117395Skanarc_cksum_verify(arc_buf_t *buf) 875169699Skan{ 876169699Skan zio_cksum_t zc; 877169699Skan 878169699Skan if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 879117395Skan return; 880117395Skan 881117395Skan mutex_enter(&buf->b_hdr->b_freeze_lock); 882117395Skan if (buf->b_hdr->b_freeze_cksum == NULL || 883117395Skan (buf->b_hdr->b_flags & ARC_IO_ERROR)) { 884132718Skan mutex_exit(&buf->b_hdr->b_freeze_lock); 885117395Skan return; 886117395Skan } 887132718Skan fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 888132718Skan if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 889117395Skan panic("buffer modified while frozen!"); 890117395Skan mutex_exit(&buf->b_hdr->b_freeze_lock); 891117395Skan} 892117395Skan 893117395Skanstatic int 894117395Skanarc_cksum_equal(arc_buf_t *buf) 895132718Skan{ 896117395Skan zio_cksum_t zc; 897117395Skan int equal; 898117395Skan 899117395Skan mutex_enter(&buf->b_hdr->b_freeze_lock); 900117395Skan fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 901117395Skan equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc); 902117395Skan mutex_exit(&buf->b_hdr->b_freeze_lock); 903132718Skan 904117395Skan return (equal); 905117395Skan} 906117395Skan 907169699Skanstatic void 908169699Skanarc_cksum_compute(arc_buf_t *buf, boolean_t force) 909169699Skan{ 910169699Skan if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY)) 911117395Skan return; 912117395Skan 913117395Skan mutex_enter(&buf->b_hdr->b_freeze_lock); 914117395Skan if (buf->b_hdr->b_freeze_cksum != NULL) { 915117395Skan mutex_exit(&buf->b_hdr->b_freeze_lock); 916117395Skan return; 917169699Skan } 918169699Skan buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 919169699Skan fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 920169699Skan buf->b_hdr->b_freeze_cksum); 921169699Skan mutex_exit(&buf->b_hdr->b_freeze_lock); 922117395Skan} 923117395Skan 924117395Skanvoid 925117395Skanarc_buf_thaw(arc_buf_t *buf) 926132718Skan{ 927132718Skan if (zfs_flags & ZFS_DEBUG_MODIFY) { 928169699Skan if (buf->b_hdr->b_state != arc_anon) 929169699Skan panic("modifying non-anon buffer!"); 930117395Skan if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) 931117395Skan panic("modifying buffer while i/o in progress!"); 932117395Skan arc_cksum_verify(buf); 933117395Skan } 934117395Skan 935169699Skan mutex_enter(&buf->b_hdr->b_freeze_lock); 936169699Skan if (buf->b_hdr->b_freeze_cksum != NULL) { 937117395Skan kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 938117395Skan buf->b_hdr->b_freeze_cksum = NULL; 939117395Skan } 940169699Skan mutex_exit(&buf->b_hdr->b_freeze_lock); 941169699Skan} 942117395Skan 943117395Skanvoid 944117395Skanarc_buf_freeze(arc_buf_t *buf) 945117395Skan{ 946117395Skan if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 947117395Skan return; 948169699Skan 949169699Skan ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 950117395Skan buf->b_hdr->b_state == arc_anon); 951117395Skan arc_cksum_compute(buf, B_FALSE); 952117395Skan} 953117395Skan 954169699Skanstatic void 955169699Skanadd_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 956117395Skan{ 957117395Skan ASSERT(MUTEX_HELD(hash_lock)); 958117395Skan 959117395Skan if ((refcount_add(&ab->b_refcnt, tag) == 1) && 960117395Skan (ab->b_state != arc_anon)) { 961117395Skan uint64_t delta = ab->b_size * ab->b_datacnt; 962117395Skan list_t *list = &ab->b_state->arcs_list[ab->b_type]; 963117395Skan uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type]; 964117395Skan 965117395Skan ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx)); 966117395Skan mutex_enter(&ab->b_state->arcs_mtx); 967117395Skan ASSERT(list_link_active(&ab->b_arc_node)); 968117395Skan list_remove(list, ab); 969117395Skan if (GHOST_STATE(ab->b_state)) { 970117395Skan ASSERT3U(ab->b_datacnt, ==, 0); 971117395Skan ASSERT3P(ab->b_buf, ==, NULL); 972117395Skan delta = ab->b_size; 973117395Skan } 974117395Skan ASSERT(delta > 0); 975132718Skan ASSERT3U(*size, >=, delta); 976117395Skan atomic_add_64(size, -delta); 977117395Skan mutex_exit(&ab->b_state->arcs_mtx); 978117395Skan /* remove the prefetch flag if we get a reference */ 979117395Skan if (ab->b_flags & ARC_PREFETCH) 980117395Skan ab->b_flags &= ~ARC_PREFETCH; 981117395Skan } 982117395Skan} 983117395Skan 984132718Skanstatic int 985117395Skanremove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 986132718Skan{ 987132718Skan int cnt; 988117395Skan arc_state_t *state = ab->b_state; 989132718Skan 990132718Skan ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 991169699Skan ASSERT(!GHOST_STATE(state)); 992132718Skan 993132718Skan if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 994132718Skan (state != arc_anon)) { 995132718Skan uint64_t *size = &state->arcs_lsize[ab->b_type]; 996117395Skan 997117395Skan ASSERT(!MUTEX_HELD(&state->arcs_mtx)); 998117395Skan mutex_enter(&state->arcs_mtx); 999117395Skan ASSERT(!list_link_active(&ab->b_arc_node)); 1000117395Skan list_insert_head(&state->arcs_list[ab->b_type], ab); 1001117395Skan ASSERT(ab->b_datacnt > 0); 1002117395Skan atomic_add_64(size, ab->b_size * ab->b_datacnt); 1003117395Skan mutex_exit(&state->arcs_mtx); 1004117395Skan } 1005117395Skan return (cnt); 1006117395Skan} 1007169699Skan 1008132718Skan/* 1009132718Skan * Move the supplied buffer to the indicated state. The mutex 1010169699Skan * for the buffer must be held by the caller. 1011169699Skan */ 1012169699Skanstatic void 1013169699Skanarc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 1014169699Skan{ 1015169699Skan arc_state_t *old_state = ab->b_state; 1016117395Skan int64_t refcnt = refcount_count(&ab->b_refcnt); 1017117395Skan uint64_t from_delta, to_delta; 1018169699Skan 1019169699Skan ASSERT(MUTEX_HELD(hash_lock)); 1020169699Skan ASSERT(new_state != old_state); 1021169699Skan ASSERT(refcnt == 0 || ab->b_datacnt > 0); 1022169699Skan ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 1023117395Skan 1024189824Sdas from_delta = to_delta = ab->b_datacnt * ab->b_size; 1025189824Sdas 1026189824Sdas /* 1027189824Sdas * If this buffer is evictable, transfer it from the 1028189824Sdas * old state list to the new state list. 1029189824Sdas */ 1030169699Skan if (refcnt == 0) { 1031169699Skan if (old_state != arc_anon) { 1032169699Skan int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx); 1033169699Skan uint64_t *size = &old_state->arcs_lsize[ab->b_type]; 1034169699Skan 1035169699Skan if (use_mutex) 1036169699Skan mutex_enter(&old_state->arcs_mtx); 1037169699Skan 1038169699Skan ASSERT(list_link_active(&ab->b_arc_node)); 1039169699Skan list_remove(&old_state->arcs_list[ab->b_type], ab); 1040169699Skan 1041169699Skan /* 1042169699Skan * If prefetching out of the ghost cache, 1043169699Skan * we will have a non-null datacnt. 1044132718Skan */ 1045132718Skan if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 1046169699Skan /* ghost elements have a ghost size */ 1047169699Skan ASSERT(ab->b_buf == NULL); 1048169699Skan from_delta = ab->b_size; 1049169699Skan } 1050132718Skan ASSERT3U(*size, >=, from_delta); 1051169699Skan atomic_add_64(size, -from_delta); 1052169699Skan 1053169699Skan if (use_mutex) 1054169699Skan mutex_exit(&old_state->arcs_mtx); 1055169699Skan } 1056169699Skan if (new_state != arc_anon) { 1057169699Skan int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx); 1058169699Skan uint64_t *size = &new_state->arcs_lsize[ab->b_type]; 1059169699Skan 1060169699Skan if (use_mutex) 1061169699Skan mutex_enter(&new_state->arcs_mtx); 1062117395Skan 1063117395Skan list_insert_head(&new_state->arcs_list[ab->b_type], ab); 1064169699Skan 1065169699Skan /* ghost elements have a ghost size */ 1066169699Skan if (GHOST_STATE(new_state)) { 1067169699Skan ASSERT(ab->b_datacnt == 0); 1068169699Skan ASSERT(ab->b_buf == NULL); 1069169699Skan to_delta = ab->b_size; 1070169699Skan } 1071169699Skan atomic_add_64(size, to_delta); 1072169699Skan 1073169699Skan if (use_mutex) 1074169699Skan mutex_exit(&new_state->arcs_mtx); 1075169699Skan } 1076169699Skan } 1077117395Skan 1078169699Skan ASSERT(!BUF_EMPTY(ab)); 1079169699Skan if (new_state == arc_anon) { 1080169699Skan buf_hash_remove(ab); 1081169699Skan } 1082169699Skan 1083132718Skan /* adjust state sizes */ 1084132718Skan if (to_delta) 1085132718Skan atomic_add_64(&new_state->arcs_size, to_delta); 1086132718Skan if (from_delta) { 1087132718Skan ASSERT3U(old_state->arcs_size, >=, from_delta); 1088132718Skan atomic_add_64(&old_state->arcs_size, -from_delta); 1089132718Skan } 1090132718Skan ab->b_state = new_state; 1091132718Skan 1092132718Skan /* adjust l2arc hdr stats */ 1093132718Skan if (new_state == arc_l2c_only) 1094132718Skan l2arc_hdr_stat_add(); 1095132718Skan else if (old_state == arc_l2c_only) 1096132718Skan l2arc_hdr_stat_remove(); 1097132718Skan} 1098132718Skan 1099132718Skanvoid 1100132718Skanarc_space_consume(uint64_t space) 1101132718Skan{ 1102132718Skan atomic_add_64(&arc_meta_used, space); 1103132718Skan atomic_add_64(&arc_size, space); 1104132718Skan} 1105132718Skan 1106132718Skanvoid 1107132718Skanarc_space_return(uint64_t space) 1108132718Skan{ 1109132718Skan ASSERT(arc_meta_used >= space); 1110169699Skan if (arc_meta_max < arc_meta_used) 1111132718Skan arc_meta_max = arc_meta_used; 1112132718Skan atomic_add_64(&arc_meta_used, -space); 1113132718Skan ASSERT(arc_size >= space); 1114132718Skan atomic_add_64(&arc_size, -space); 1115132718Skan} 1116132718Skan 1117132718Skanvoid * 1118169699Skanarc_data_buf_alloc(uint64_t size) 1119132718Skan{ 1120117395Skan if (arc_evict_needed(ARC_BUFC_DATA)) 1121117395Skan cv_signal(&arc_reclaim_thr_cv); 1122117395Skan atomic_add_64(&arc_size, size); 1123117395Skan return (zio_data_buf_alloc(size)); 1124132718Skan} 1125132718Skan 1126146908Skanvoid 1127132718Skanarc_data_buf_free(void *buf, uint64_t size) 1128146908Skan{ 1129146908Skan zio_data_buf_free(buf, size); 1130146908Skan ASSERT(arc_size >= size); 1131146908Skan atomic_add_64(&arc_size, -size); 1132117395Skan} 1133132718Skan 1134169699Skanarc_buf_t * 1135132718Skanarc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) 1136117395Skan{ 1137132718Skan arc_buf_hdr_t *hdr; 1138117395Skan arc_buf_t *buf; 1139117395Skan 1140117395Skan ASSERT3U(size, >, 0); 1141132718Skan hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 1142132718Skan ASSERT(BUF_EMPTY(hdr)); 1143117395Skan hdr->b_size = size; 1144117395Skan hdr->b_type = type; 1145117395Skan hdr->b_spa = spa; 1146117395Skan hdr->b_state = arc_anon; 1147117395Skan hdr->b_arc_access = 0; 1148117395Skan buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1149117395Skan buf->b_hdr = hdr; 1150169699Skan buf->b_data = NULL; 1151132718Skan buf->b_efunc = NULL; 1152117395Skan buf->b_private = NULL; 1153132718Skan buf->b_next = NULL; 1154132718Skan hdr->b_buf = buf; 1155132718Skan arc_get_data_buf(buf); 1156117395Skan hdr->b_datacnt = 1; 1157169699Skan hdr->b_flags = 0; 1158169699Skan ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1159169699Skan (void) refcount_add(&hdr->b_refcnt, tag); 1160117395Skan 1161117395Skan return (buf); 1162132718Skan} 1163132718Skan 1164132718Skanstatic arc_buf_t * 1165117395Skanarc_buf_clone(arc_buf_t *from) 1166117395Skan{ 1167132718Skan arc_buf_t *buf; 1168117395Skan arc_buf_hdr_t *hdr = from->b_hdr; 1169117395Skan uint64_t size = hdr->b_size; 1170132718Skan 1171117395Skan buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1172117395Skan buf->b_hdr = hdr; 1173132718Skan buf->b_data = NULL; 1174132718Skan buf->b_efunc = NULL; 1175132718Skan buf->b_private = NULL; 1176169699Skan buf->b_next = hdr->b_buf; 1177132718Skan hdr->b_buf = buf; 1178169699Skan arc_get_data_buf(buf); 1179169699Skan bcopy(from->b_data, buf->b_data, size); 1180169699Skan hdr->b_datacnt += 1; 1181169699Skan return (buf); 1182132718Skan} 1183132718Skan 1184132718Skanvoid 1185169699Skanarc_buf_add_ref(arc_buf_t *buf, void* tag) 1186169699Skan{ 1187132718Skan arc_buf_hdr_t *hdr; 1188132718Skan kmutex_t *hash_lock; 1189169699Skan 1190169699Skan /* 1191132718Skan * Check to see if this buffer is evicted. Callers 1192169699Skan * must verify b_data != NULL to know if the add_ref 1193169699Skan * was successful. 1194169699Skan */ 1195132718Skan rw_enter(&buf->b_lock, RW_READER); 1196169699Skan if (buf->b_data == NULL) { 1197169699Skan rw_exit(&buf->b_lock); 1198132718Skan return; 1199169699Skan } 1200169699Skan hdr = buf->b_hdr; 1201169699Skan ASSERT(hdr != NULL); 1202169699Skan hash_lock = HDR_LOCK(hdr); 1203169699Skan mutex_enter(hash_lock); 1204169699Skan rw_exit(&buf->b_lock); 1205169699Skan 1206169699Skan ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 1207169699Skan add_reference(hdr, hash_lock, tag); 1208169699Skan arc_access(hdr, hash_lock); 1209169699Skan mutex_exit(hash_lock); 1210169699Skan ARCSTAT_BUMP(arcstat_hits); 1211169699Skan ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 1212169699Skan demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 1213169699Skan data, metadata, hits); 1214132718Skan} 1215132718Skan 1216117395Skan/* 1217117395Skan * Free the arc data buffer. If it is an l2arc write in progress, 1218132718Skan * the buffer is placed on l2arc_free_on_write to be freed later. 1219117395Skan */ 1220117395Skanstatic void 1221117395Skanarc_buf_data_free(arc_buf_hdr_t *hdr, void (*free_func)(void *, size_t), 1222117395Skan void *data, size_t size) 1223117395Skan{ 1224117395Skan if (HDR_L2_WRITING(hdr)) { 1225117395Skan l2arc_data_free_t *df; 1226117395Skan df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP); 1227117395Skan df->l2df_data = data; 1228117395Skan df->l2df_size = size; 1229117395Skan df->l2df_func = free_func; 1230117395Skan mutex_enter(&l2arc_free_on_write_mtx); 1231117395Skan list_insert_head(l2arc_free_on_write, df); 1232132718Skan mutex_exit(&l2arc_free_on_write_mtx); 1233117395Skan ARCSTAT_BUMP(arcstat_l2_free_on_write); 1234117395Skan } else { 1235117395Skan free_func(data, size); 1236117395Skan } 1237117395Skan} 1238117395Skan 1239117395Skanstatic void 1240117395Skanarc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 1241117395Skan{ 1242132718Skan arc_buf_t **bufp; 1243117395Skan 1244117395Skan /* free up data associated with the buf */ 1245132718Skan if (buf->b_data) { 1246117395Skan arc_state_t *state = buf->b_hdr->b_state; 1247117395Skan uint64_t size = buf->b_hdr->b_size; 1248117395Skan arc_buf_contents_t type = buf->b_hdr->b_type; 1249117395Skan 1250117395Skan arc_cksum_verify(buf); 1251117395Skan if (!recycle) { 1252117395Skan if (type == ARC_BUFC_METADATA) { 1253117395Skan arc_buf_data_free(buf->b_hdr, zio_buf_free, 1254117395Skan buf->b_data, size); 1255117395Skan arc_space_return(size); 1256132718Skan } else { 1257117395Skan ASSERT(type == ARC_BUFC_DATA); 1258117395Skan arc_buf_data_free(buf->b_hdr, 1259117395Skan zio_data_buf_free, buf->b_data, size); 1260117395Skan atomic_add_64(&arc_size, -size); 1261117395Skan } 1262117395Skan } 1263117395Skan if (list_link_active(&buf->b_hdr->b_arc_node)) { 1264117395Skan uint64_t *cnt = &state->arcs_lsize[type]; 1265117395Skan 1266117395Skan ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 1267117395Skan ASSERT(state != arc_anon); 1268117395Skan 1269117395Skan ASSERT3U(*cnt, >=, size); 1270117395Skan atomic_add_64(cnt, -size); 1271117395Skan } 1272117395Skan ASSERT3U(state->arcs_size, >=, size); 1273117395Skan atomic_add_64(&state->arcs_size, -size); 1274117395Skan buf->b_data = NULL; 1275117395Skan ASSERT(buf->b_hdr->b_datacnt > 0); 1276117395Skan buf->b_hdr->b_datacnt -= 1; 1277117395Skan } 1278117395Skan 1279117395Skan /* only remove the buf if requested */ 1280117395Skan if (!all) 1281117395Skan return; 1282117395Skan 1283117395Skan /* remove the buf from the hdr list */ 1284117395Skan for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 1285117395Skan continue; 1286117395Skan *bufp = buf->b_next; 1287117395Skan 1288117395Skan ASSERT(buf->b_efunc == NULL); 1289169699Skan 1290117395Skan /* clean up the buf */ 1291117395Skan buf->b_hdr = NULL; 1292117395Skan kmem_cache_free(buf_cache, buf); 1293117395Skan} 1294117395Skan 1295132718Skanstatic void 1296117395Skanarc_hdr_destroy(arc_buf_hdr_t *hdr) 1297117395Skan{ 1298169699Skan ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1299117395Skan ASSERT3P(hdr->b_state, ==, arc_anon); 1300169699Skan ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1301169699Skan ASSERT(!(hdr->b_flags & ARC_STORED)); 1302169699Skan 1303169699Skan if (hdr->b_l2hdr != NULL) { 1304169699Skan if (!MUTEX_HELD(&l2arc_buflist_mtx)) { 1305169699Skan /* 1306169699Skan * To prevent arc_free() and l2arc_evict() from 1307169699Skan * attempting to free the same buffer at the same time, 1308117395Skan * a FREE_IN_PROGRESS flag is given to arc_free() to 1309117395Skan * give it priority. l2arc_evict() can't destroy this 1310117395Skan * header while we are waiting on l2arc_buflist_mtx. 1311117395Skan * 1312132718Skan * The hdr may be removed from l2ad_buflist before we 1313169699Skan * grab l2arc_buflist_mtx, so b_l2hdr is rechecked. 1314117395Skan */ 1315117395Skan mutex_enter(&l2arc_buflist_mtx); 1316117395Skan if (hdr->b_l2hdr != NULL) { 1317117395Skan list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, 1318117395Skan hdr); 1319117395Skan } 1320132718Skan mutex_exit(&l2arc_buflist_mtx); 1321117395Skan } else { 1322117395Skan list_remove(hdr->b_l2hdr->b_dev->l2ad_buflist, hdr); 1323117395Skan } 1324117395Skan ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); 1325117395Skan kmem_free(hdr->b_l2hdr, sizeof (l2arc_buf_hdr_t)); 1326117395Skan if (hdr->b_state == arc_l2c_only) 1327117395Skan l2arc_hdr_stat_remove(); 1328117395Skan hdr->b_l2hdr = NULL; 1329132718Skan } 1330132718Skan 1331117395Skan if (!BUF_EMPTY(hdr)) { 1332117395Skan ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1333117395Skan bzero(&hdr->b_dva, sizeof (dva_t)); 1334169699Skan hdr->b_birth = 0; 1335132718Skan hdr->b_cksum0 = 0; 1336117395Skan } 1337132718Skan while (hdr->b_buf) { 1338132718Skan arc_buf_t *buf = hdr->b_buf; 1339132718Skan 1340146908Skan if (buf->b_efunc) { 1341117395Skan mutex_enter(&arc_eviction_mtx); 1342117395Skan rw_enter(&buf->b_lock, RW_WRITER); 1343117395Skan ASSERT(buf->b_hdr != NULL); 1344117395Skan arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 1345117395Skan hdr->b_buf = buf->b_next; 1346117395Skan buf->b_hdr = &arc_eviction_hdr; 1347117395Skan buf->b_next = arc_eviction_list; 1348117395Skan arc_eviction_list = buf; 1349117395Skan rw_exit(&buf->b_lock); 1350132718Skan mutex_exit(&arc_eviction_mtx); 1351169699Skan } else { 1352169699Skan arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 1353169699Skan } 1354169699Skan } 1355169699Skan if (hdr->b_freeze_cksum != NULL) { 1356132718Skan kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1357132718Skan hdr->b_freeze_cksum = NULL; 1358132718Skan } 1359132718Skan 1360132718Skan ASSERT(!list_link_active(&hdr->b_arc_node)); 1361132718Skan ASSERT3P(hdr->b_hash_next, ==, NULL); 1362117395Skan ASSERT3P(hdr->b_acb, ==, NULL); 1363117395Skan kmem_cache_free(hdr_cache, hdr); 1364132718Skan} 1365132718Skan 1366132718Skanvoid 1367132718Skanarc_buf_free(arc_buf_t *buf, void *tag) 1368132718Skan{ 1369132718Skan arc_buf_hdr_t *hdr = buf->b_hdr; 1370132718Skan int hashed = hdr->b_state != arc_anon; 1371132718Skan 1372132718Skan ASSERT(buf->b_efunc == NULL); 1373132718Skan ASSERT(buf->b_data != NULL); 1374132718Skan 1375132718Skan if (hashed) { 1376169699Skan kmutex_t *hash_lock = HDR_LOCK(hdr); 1377132718Skan 1378132718Skan mutex_enter(hash_lock); 1379132718Skan (void) remove_reference(hdr, hash_lock, tag); 1380132718Skan if (hdr->b_datacnt > 1) 1381169699Skan arc_buf_destroy(buf, FALSE, TRUE); 1382132718Skan else 1383132718Skan hdr->b_flags |= ARC_BUF_AVAILABLE; 1384132718Skan mutex_exit(hash_lock); 1385132718Skan } else if (HDR_IO_IN_PROGRESS(hdr)) { 1386132718Skan int destroy_hdr; 1387132718Skan /* 1388132718Skan * We are in the middle of an async write. Don't destroy 1389132718Skan * this buffer unless the write completes before we finish 1390132718Skan * decrementing the reference count. 1391132718Skan */ 1392169699Skan mutex_enter(&arc_eviction_mtx); 1393169699Skan (void) remove_reference(hdr, NULL, tag); 1394169699Skan ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1395169699Skan destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 1396132718Skan mutex_exit(&arc_eviction_mtx); 1397132718Skan if (destroy_hdr) 1398132718Skan arc_hdr_destroy(hdr); 1399132718Skan } else { 1400132718Skan if (remove_reference(hdr, NULL, tag) > 0) { 1401132718Skan ASSERT(HDR_IO_ERROR(hdr)); 1402132718Skan arc_buf_destroy(buf, FALSE, TRUE); 1403132718Skan } else { 1404132718Skan arc_hdr_destroy(hdr); 1405132718Skan } 1406132718Skan } 1407132718Skan} 1408132718Skan 1409132718Skanint 1410169699Skanarc_buf_remove_ref(arc_buf_t *buf, void* tag) 1411169699Skan{ 1412169699Skan arc_buf_hdr_t *hdr = buf->b_hdr; 1413169699Skan kmutex_t *hash_lock = HDR_LOCK(hdr); 1414132718Skan int no_callback = (buf->b_efunc == NULL); 1415132718Skan 1416132718Skan if (hdr->b_state == arc_anon) { 1417132718Skan arc_buf_free(buf, tag); 1418132718Skan return (no_callback); 1419132718Skan } 1420132718Skan 1421132718Skan mutex_enter(hash_lock); 1422132718Skan ASSERT(hdr->b_state != arc_anon); 1423132718Skan ASSERT(buf->b_data != NULL); 1424132718Skan 1425132718Skan (void) remove_reference(hdr, hash_lock, tag); 1426132718Skan if (hdr->b_datacnt > 1) { 1427132718Skan if (no_callback) 1428132718Skan arc_buf_destroy(buf, FALSE, TRUE); 1429132718Skan } else if (no_callback) { 1430132718Skan ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 1431132718Skan hdr->b_flags |= ARC_BUF_AVAILABLE; 1432132718Skan } 1433132718Skan ASSERT(no_callback || hdr->b_datacnt > 1 || 1434132718Skan refcount_is_zero(&hdr->b_refcnt)); 1435132718Skan mutex_exit(hash_lock); 1436132718Skan return (no_callback); 1437132718Skan} 1438132718Skan 1439132718Skanint 1440132718Skanarc_buf_size(arc_buf_t *buf) 1441132718Skan{ 1442132718Skan return (buf->b_hdr->b_size); 1443132718Skan} 1444132718Skan 1445132718Skan/* 1446132718Skan * Evict buffers from list until we've removed the specified number of 1447132718Skan * bytes. Move the removed buffers to the appropriate evict state. 1448132718Skan * If the recycle flag is set, then attempt to "recycle" a buffer: 1449132718Skan * - look for a buffer to evict that is `bytes' long. 1450132718Skan * - return the data block from this buffer rather than freeing it. 1451132718Skan * This flag is used by callers that are trying to make space for a 1452132718Skan * new buffer in a full arc cache. 1453132718Skan * 1454132718Skan * This function makes a "best effort". It skips over any buffers 1455132718Skan * it can't get a hash_lock on, and so may not catch all candidates. 1456132718Skan * It may also return without evicting as much space as requested. 1457132718Skan */ 1458169699Skanstatic void * 1459132718Skanarc_evict(arc_state_t *state, spa_t *spa, int64_t bytes, boolean_t recycle, 1460132718Skan arc_buf_contents_t type) 1461132718Skan{ 1462132718Skan arc_state_t *evicted_state; 1463132718Skan uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 1464132718Skan arc_buf_hdr_t *ab, *ab_prev = NULL; 1465132718Skan list_t *list = &state->arcs_list[type]; 1466132718Skan kmutex_t *hash_lock; 1467132718Skan boolean_t have_lock; 1468132718Skan void *stolen = NULL; 1469169699Skan 1470169699Skan ASSERT(state == arc_mru || state == arc_mfu); 1471132718Skan 1472132718Skan evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 1473132718Skan 1474169699Skan mutex_enter(&state->arcs_mtx); 1475132718Skan mutex_enter(&evicted_state->arcs_mtx); 1476132718Skan 1477132718Skan for (ab = list_tail(list); ab; ab = ab_prev) { 1478132718Skan ab_prev = list_prev(list, ab); 1479132718Skan /* prefetch buffers have a minimum lifespan */ 1480169699Skan if (HDR_IO_IN_PROGRESS(ab) || 1481132718Skan (spa && ab->b_spa != spa) || 1482132718Skan (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 1483132718Skan LBOLT - ab->b_arc_access < arc_min_prefetch_lifespan)) { 1484132718Skan skipped++; 1485132718Skan continue; 1486132718Skan } 1487132718Skan /* "lookahead" for better eviction candidate */ 1488132718Skan if (recycle && ab->b_size != bytes && 1489132718Skan ab_prev && ab_prev->b_size == bytes) 1490132718Skan continue; 1491132718Skan hash_lock = HDR_LOCK(ab); 1492132718Skan have_lock = MUTEX_HELD(hash_lock); 1493169699Skan if (have_lock || mutex_tryenter(hash_lock)) { 1494132718Skan ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0); 1495169699Skan ASSERT(ab->b_datacnt > 0); 1496169699Skan while (ab->b_buf) { 1497132718Skan arc_buf_t *buf = ab->b_buf; 1498132718Skan if (!rw_tryenter(&buf->b_lock, RW_WRITER)) { 1499117395Skan missed += 1; 1500117395Skan break; 1501117395Skan } 1502132718Skan if (buf->b_data) { 1503117395Skan bytes_evicted += ab->b_size; 1504117395Skan if (recycle && ab->b_type == type && 1505117395Skan ab->b_size == bytes && 1506117395Skan !HDR_L2_WRITING(ab)) { 1507117395Skan stolen = buf->b_data; 1508117395Skan recycle = FALSE; 1509117395Skan } 1510117395Skan } 1511117395Skan if (buf->b_efunc) { 1512117395Skan mutex_enter(&arc_eviction_mtx); 1513117395Skan arc_buf_destroy(buf, 1514117395Skan buf->b_data == stolen, FALSE); 1515132718Skan ab->b_buf = buf->b_next; 1516117395Skan buf->b_hdr = &arc_eviction_hdr; 1517117395Skan buf->b_next = arc_eviction_list; 1518117395Skan arc_eviction_list = buf; 1519117395Skan mutex_exit(&arc_eviction_mtx); 1520117395Skan rw_exit(&buf->b_lock); 1521117395Skan } else { 1522117395Skan rw_exit(&buf->b_lock); 1523117395Skan arc_buf_destroy(buf, 1524117395Skan buf->b_data == stolen, TRUE); 1525117395Skan } 1526117395Skan } 1527132718Skan if (ab->b_datacnt == 0) { 1528117395Skan arc_change_state(evicted_state, ab, hash_lock); 1529117395Skan ASSERT(HDR_IN_HASH_TABLE(ab)); 1530117395Skan ab->b_flags |= ARC_IN_HASH_TABLE; 1531117395Skan ab->b_flags &= ~ARC_BUF_AVAILABLE; 1532117395Skan DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 1533117395Skan } 1534117395Skan if (!have_lock) 1535117395Skan mutex_exit(hash_lock); 1536117395Skan if (bytes >= 0 && bytes_evicted >= bytes) 1537132718Skan break; 1538117395Skan } else { 1539117395Skan missed += 1; 1540117395Skan } 1541117395Skan } 1542117395Skan 1543117395Skan mutex_exit(&evicted_state->arcs_mtx); 1544117395Skan mutex_exit(&state->arcs_mtx); 1545117395Skan 1546117395Skan if (bytes_evicted < bytes) 1547117395Skan dprintf("only evicted %lld bytes from %x", 1548117395Skan (longlong_t)bytes_evicted, state); 1549117395Skan 1550117395Skan if (skipped) 1551117395Skan ARCSTAT_INCR(arcstat_evict_skip, skipped); 1552117395Skan 1553132718Skan if (missed) 1554117395Skan ARCSTAT_INCR(arcstat_mutex_miss, missed); 1555117395Skan 1556117395Skan /* 1557117395Skan * We have just evicted some date into the ghost state, make 1558117395Skan * sure we also adjust the ghost state size if necessary. 1559117395Skan */ 1560132718Skan if (arc_no_grow && 1561132718Skan arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) { 1562132718Skan int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size + 1563132718Skan arc_mru_ghost->arcs_size - arc_c; 1564117395Skan 1565117395Skan if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) { 1566117395Skan int64_t todelete = 1567132718Skan MIN(arc_mru_ghost->arcs_lsize[type], mru_over); 1568117395Skan arc_evict_ghost(arc_mru_ghost, NULL, todelete); 1569117395Skan } else if (arc_mfu_ghost->arcs_lsize[type] > 0) { 1570117395Skan int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type], 1571 arc_mru_ghost->arcs_size + 1572 arc_mfu_ghost->arcs_size - arc_c); 1573 arc_evict_ghost(arc_mfu_ghost, NULL, todelete); 1574 } 1575 } 1576 1577 return (stolen); 1578} 1579 1580/* 1581 * Remove buffers from list until we've removed the specified number of 1582 * bytes. Destroy the buffers that are removed. 1583 */ 1584static void 1585arc_evict_ghost(arc_state_t *state, spa_t *spa, int64_t bytes) 1586{ 1587 arc_buf_hdr_t *ab, *ab_prev; 1588 list_t *list = &state->arcs_list[ARC_BUFC_DATA]; 1589 kmutex_t *hash_lock; 1590 uint64_t bytes_deleted = 0; 1591 uint64_t bufs_skipped = 0; 1592 1593 ASSERT(GHOST_STATE(state)); 1594top: 1595 mutex_enter(&state->arcs_mtx); 1596 for (ab = list_tail(list); ab; ab = ab_prev) { 1597 ab_prev = list_prev(list, ab); 1598 if (spa && ab->b_spa != spa) 1599 continue; 1600 hash_lock = HDR_LOCK(ab); 1601 if (mutex_tryenter(hash_lock)) { 1602 ASSERT(!HDR_IO_IN_PROGRESS(ab)); 1603 ASSERT(ab->b_buf == NULL); 1604 ARCSTAT_BUMP(arcstat_deleted); 1605 bytes_deleted += ab->b_size; 1606 1607 if (ab->b_l2hdr != NULL) { 1608 /* 1609 * This buffer is cached on the 2nd Level ARC; 1610 * don't destroy the header. 1611 */ 1612 arc_change_state(arc_l2c_only, ab, hash_lock); 1613 mutex_exit(hash_lock); 1614 } else { 1615 arc_change_state(arc_anon, ab, hash_lock); 1616 mutex_exit(hash_lock); 1617 arc_hdr_destroy(ab); 1618 } 1619 1620 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 1621 if (bytes >= 0 && bytes_deleted >= bytes) 1622 break; 1623 } else { 1624 if (bytes < 0) { 1625 mutex_exit(&state->arcs_mtx); 1626 mutex_enter(hash_lock); 1627 mutex_exit(hash_lock); 1628 goto top; 1629 } 1630 bufs_skipped += 1; 1631 } 1632 } 1633 mutex_exit(&state->arcs_mtx); 1634 1635 if (list == &state->arcs_list[ARC_BUFC_DATA] && 1636 (bytes < 0 || bytes_deleted < bytes)) { 1637 list = &state->arcs_list[ARC_BUFC_METADATA]; 1638 goto top; 1639 } 1640 1641 if (bufs_skipped) { 1642 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); 1643 ASSERT(bytes >= 0); 1644 } 1645 1646 if (bytes_deleted < bytes) 1647 dprintf("only deleted %lld bytes from %p", 1648 (longlong_t)bytes_deleted, state); 1649} 1650 1651static void 1652arc_adjust(void) 1653{ 1654 int64_t top_sz, mru_over, arc_over, todelete; 1655 1656 top_sz = arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used; 1657 1658 if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) { 1659 int64_t toevict = 1660 MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], top_sz - arc_p); 1661 (void) arc_evict(arc_mru, NULL, toevict, FALSE, ARC_BUFC_DATA); 1662 top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1663 } 1664 1665 if (top_sz > arc_p && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) { 1666 int64_t toevict = 1667 MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], top_sz - arc_p); 1668 (void) arc_evict(arc_mru, NULL, toevict, FALSE, 1669 ARC_BUFC_METADATA); 1670 top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1671 } 1672 1673 mru_over = top_sz + arc_mru_ghost->arcs_size - arc_c; 1674 1675 if (mru_over > 0) { 1676 if (arc_mru_ghost->arcs_size > 0) { 1677 todelete = MIN(arc_mru_ghost->arcs_size, mru_over); 1678 arc_evict_ghost(arc_mru_ghost, NULL, todelete); 1679 } 1680 } 1681 1682 if ((arc_over = arc_size - arc_c) > 0) { 1683 int64_t tbl_over; 1684 1685 if (arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) { 1686 int64_t toevict = 1687 MIN(arc_mfu->arcs_lsize[ARC_BUFC_DATA], arc_over); 1688 (void) arc_evict(arc_mfu, NULL, toevict, FALSE, 1689 ARC_BUFC_DATA); 1690 arc_over = arc_size - arc_c; 1691 } 1692 1693 if (arc_over > 0 && 1694 arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) { 1695 int64_t toevict = 1696 MIN(arc_mfu->arcs_lsize[ARC_BUFC_METADATA], 1697 arc_over); 1698 (void) arc_evict(arc_mfu, NULL, toevict, FALSE, 1699 ARC_BUFC_METADATA); 1700 } 1701 1702 tbl_over = arc_size + arc_mru_ghost->arcs_size + 1703 arc_mfu_ghost->arcs_size - arc_c * 2; 1704 1705 if (tbl_over > 0 && arc_mfu_ghost->arcs_size > 0) { 1706 todelete = MIN(arc_mfu_ghost->arcs_size, tbl_over); 1707 arc_evict_ghost(arc_mfu_ghost, NULL, todelete); 1708 } 1709 } 1710} 1711 1712static void 1713arc_do_user_evicts(void) 1714{ 1715 static arc_buf_t *tmp_arc_eviction_list; 1716 1717 /* 1718 * Move list over to avoid LOR 1719 */ 1720restart: 1721 mutex_enter(&arc_eviction_mtx); 1722 tmp_arc_eviction_list = arc_eviction_list; 1723 arc_eviction_list = NULL; 1724 mutex_exit(&arc_eviction_mtx); 1725 1726 while (tmp_arc_eviction_list != NULL) { 1727 arc_buf_t *buf = tmp_arc_eviction_list; 1728 tmp_arc_eviction_list = buf->b_next; 1729 rw_enter(&buf->b_lock, RW_WRITER); 1730 buf->b_hdr = NULL; 1731 rw_exit(&buf->b_lock); 1732 1733 if (buf->b_efunc != NULL) 1734 VERIFY(buf->b_efunc(buf) == 0); 1735 1736 buf->b_efunc = NULL; 1737 buf->b_private = NULL; 1738 kmem_cache_free(buf_cache, buf); 1739 } 1740 1741 if (arc_eviction_list != NULL) 1742 goto restart; 1743} 1744 1745/* 1746 * Flush all *evictable* data from the cache for the given spa. 1747 * NOTE: this will not touch "active" (i.e. referenced) data. 1748 */ 1749void 1750arc_flush(spa_t *spa) 1751{ 1752 while (list_head(&arc_mru->arcs_list[ARC_BUFC_DATA])) { 1753 (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_DATA); 1754 if (spa) 1755 break; 1756 } 1757 while (list_head(&arc_mru->arcs_list[ARC_BUFC_METADATA])) { 1758 (void) arc_evict(arc_mru, spa, -1, FALSE, ARC_BUFC_METADATA); 1759 if (spa) 1760 break; 1761 } 1762 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_DATA])) { 1763 (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_DATA); 1764 if (spa) 1765 break; 1766 } 1767 while (list_head(&arc_mfu->arcs_list[ARC_BUFC_METADATA])) { 1768 (void) arc_evict(arc_mfu, spa, -1, FALSE, ARC_BUFC_METADATA); 1769 if (spa) 1770 break; 1771 } 1772 1773 arc_evict_ghost(arc_mru_ghost, spa, -1); 1774 arc_evict_ghost(arc_mfu_ghost, spa, -1); 1775 1776 mutex_enter(&arc_reclaim_thr_lock); 1777 arc_do_user_evicts(); 1778 mutex_exit(&arc_reclaim_thr_lock); 1779 ASSERT(spa || arc_eviction_list == NULL); 1780} 1781 1782int arc_shrink_shift = 5; /* log2(fraction of arc to reclaim) */ 1783 1784void 1785arc_shrink(void) 1786{ 1787 if (arc_c > arc_c_min) { 1788 uint64_t to_free; 1789 1790#ifdef _KERNEL 1791 to_free = arc_c >> arc_shrink_shift; 1792#else 1793 to_free = arc_c >> arc_shrink_shift; 1794#endif 1795 if (arc_c > arc_c_min + to_free) 1796 atomic_add_64(&arc_c, -to_free); 1797 else 1798 arc_c = arc_c_min; 1799 1800 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 1801 if (arc_c > arc_size) 1802 arc_c = MAX(arc_size, arc_c_min); 1803 if (arc_p > arc_c) 1804 arc_p = (arc_c >> 1); 1805 ASSERT(arc_c >= arc_c_min); 1806 ASSERT((int64_t)arc_p >= 0); 1807 } 1808 1809 if (arc_size > arc_c) 1810 arc_adjust(); 1811} 1812 1813static int needfree = 0; 1814 1815static int 1816arc_reclaim_needed(void) 1817{ 1818#if 0 1819 uint64_t extra; 1820#endif 1821 1822#ifdef _KERNEL 1823 1824 /* 1825 * If pages are needed or we're within 2048 pages 1826 * of needing to page need to reclaim 1827 */ 1828 if (vm_pages_needed || (vm_paging_target() > -2048)) 1829 return (1); 1830 1831 if (needfree) 1832 return (1); 1833 1834#if 0 1835 /* 1836 * take 'desfree' extra pages, so we reclaim sooner, rather than later 1837 */ 1838 extra = desfree; 1839 1840 /* 1841 * check that we're out of range of the pageout scanner. It starts to 1842 * schedule paging if freemem is less than lotsfree and needfree. 1843 * lotsfree is the high-water mark for pageout, and needfree is the 1844 * number of needed free pages. We add extra pages here to make sure 1845 * the scanner doesn't start up while we're freeing memory. 1846 */ 1847 if (freemem < lotsfree + needfree + extra) 1848 return (1); 1849 1850 /* 1851 * check to make sure that swapfs has enough space so that anon 1852 * reservations can still succeed. anon_resvmem() checks that the 1853 * availrmem is greater than swapfs_minfree, and the number of reserved 1854 * swap pages. We also add a bit of extra here just to prevent 1855 * circumstances from getting really dire. 1856 */ 1857 if (availrmem < swapfs_minfree + swapfs_reserve + extra) 1858 return (1); 1859 1860#if defined(__i386) 1861 /* 1862 * If we're on an i386 platform, it's possible that we'll exhaust the 1863 * kernel heap space before we ever run out of available physical 1864 * memory. Most checks of the size of the heap_area compare against 1865 * tune.t_minarmem, which is the minimum available real memory that we 1866 * can have in the system. However, this is generally fixed at 25 pages 1867 * which is so low that it's useless. In this comparison, we seek to 1868 * calculate the total heap-size, and reclaim if more than 3/4ths of the 1869 * heap is allocated. (Or, in the calculation, if less than 1/4th is 1870 * free) 1871 */ 1872 if (btop(vmem_size(heap_arena, VMEM_FREE)) < 1873 (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) 1874 return (1); 1875#endif 1876#else 1877 if (kmem_used() > (kmem_size() * 3) / 4) 1878 return (1); 1879#endif 1880 1881#else 1882 if (spa_get_random(100) == 0) 1883 return (1); 1884#endif 1885 return (0); 1886} 1887 1888static void 1889arc_kmem_reap_now(arc_reclaim_strategy_t strat) 1890{ 1891#ifdef ZIO_USE_UMA 1892 size_t i; 1893 kmem_cache_t *prev_cache = NULL; 1894 kmem_cache_t *prev_data_cache = NULL; 1895 extern kmem_cache_t *zio_buf_cache[]; 1896 extern kmem_cache_t *zio_data_buf_cache[]; 1897#endif 1898 1899#ifdef _KERNEL 1900 if (arc_meta_used >= arc_meta_limit) { 1901 /* 1902 * We are exceeding our meta-data cache limit. 1903 * Purge some DNLC entries to release holds on meta-data. 1904 */ 1905 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 1906 } 1907#if defined(__i386) 1908 /* 1909 * Reclaim unused memory from all kmem caches. 1910 */ 1911 kmem_reap(); 1912#endif 1913#endif 1914 1915 /* 1916 * An aggressive reclamation will shrink the cache size as well as 1917 * reap free buffers from the arc kmem caches. 1918 */ 1919 if (strat == ARC_RECLAIM_AGGR) 1920 arc_shrink(); 1921 1922#ifdef ZIO_USE_UMA 1923 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 1924 if (zio_buf_cache[i] != prev_cache) { 1925 prev_cache = zio_buf_cache[i]; 1926 kmem_cache_reap_now(zio_buf_cache[i]); 1927 } 1928 if (zio_data_buf_cache[i] != prev_data_cache) { 1929 prev_data_cache = zio_data_buf_cache[i]; 1930 kmem_cache_reap_now(zio_data_buf_cache[i]); 1931 } 1932 } 1933#endif 1934 kmem_cache_reap_now(buf_cache); 1935 kmem_cache_reap_now(hdr_cache); 1936} 1937 1938static void 1939arc_reclaim_thread(void *dummy __unused) 1940{ 1941 clock_t growtime = 0; 1942 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 1943 callb_cpr_t cpr; 1944 1945 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 1946 1947 mutex_enter(&arc_reclaim_thr_lock); 1948 while (arc_thread_exit == 0) { 1949 if (arc_reclaim_needed()) { 1950 1951 if (arc_no_grow) { 1952 if (last_reclaim == ARC_RECLAIM_CONS) { 1953 last_reclaim = ARC_RECLAIM_AGGR; 1954 } else { 1955 last_reclaim = ARC_RECLAIM_CONS; 1956 } 1957 } else { 1958 arc_no_grow = TRUE; 1959 last_reclaim = ARC_RECLAIM_AGGR; 1960 membar_producer(); 1961 } 1962 1963 /* reset the growth delay for every reclaim */ 1964 growtime = LBOLT + (arc_grow_retry * hz); 1965 1966 if (needfree && last_reclaim == ARC_RECLAIM_CONS) { 1967 /* 1968 * If needfree is TRUE our vm_lowmem hook 1969 * was called and in that case we must free some 1970 * memory, so switch to aggressive mode. 1971 */ 1972 arc_no_grow = TRUE; 1973 last_reclaim = ARC_RECLAIM_AGGR; 1974 } 1975 arc_kmem_reap_now(last_reclaim); 1976 arc_warm = B_TRUE; 1977 1978 } else if (arc_no_grow && LBOLT >= growtime) { 1979 arc_no_grow = FALSE; 1980 } 1981 1982 if (needfree || 1983 (2 * arc_c < arc_size + 1984 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size)) 1985 arc_adjust(); 1986 1987 if (arc_eviction_list != NULL) 1988 arc_do_user_evicts(); 1989 1990 if (arc_reclaim_needed()) { 1991 needfree = 0; 1992#ifdef _KERNEL 1993 wakeup(&needfree); 1994#endif 1995 } 1996 1997 /* block until needed, or one second, whichever is shorter */ 1998 CALLB_CPR_SAFE_BEGIN(&cpr); 1999 (void) cv_timedwait(&arc_reclaim_thr_cv, 2000 &arc_reclaim_thr_lock, hz); 2001 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 2002 } 2003 2004 arc_thread_exit = 0; 2005 cv_broadcast(&arc_reclaim_thr_cv); 2006 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 2007 thread_exit(); 2008} 2009 2010/* 2011 * Adapt arc info given the number of bytes we are trying to add and 2012 * the state that we are comming from. This function is only called 2013 * when we are adding new content to the cache. 2014 */ 2015static void 2016arc_adapt(int bytes, arc_state_t *state) 2017{ 2018 int mult; 2019 2020 if (state == arc_l2c_only) 2021 return; 2022 2023 ASSERT(bytes > 0); 2024 /* 2025 * Adapt the target size of the MRU list: 2026 * - if we just hit in the MRU ghost list, then increase 2027 * the target size of the MRU list. 2028 * - if we just hit in the MFU ghost list, then increase 2029 * the target size of the MFU list by decreasing the 2030 * target size of the MRU list. 2031 */ 2032 if (state == arc_mru_ghost) { 2033 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? 2034 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); 2035 2036 arc_p = MIN(arc_c, arc_p + bytes * mult); 2037 } else if (state == arc_mfu_ghost) { 2038 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? 2039 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); 2040 2041 arc_p = MAX(0, (int64_t)arc_p - bytes * mult); 2042 } 2043 ASSERT((int64_t)arc_p >= 0); 2044 2045 if (arc_reclaim_needed()) { 2046 cv_signal(&arc_reclaim_thr_cv); 2047 return; 2048 } 2049 2050 if (arc_no_grow) 2051 return; 2052 2053 if (arc_c >= arc_c_max) 2054 return; 2055 2056 /* 2057 * If we're within (2 * maxblocksize) bytes of the target 2058 * cache size, increment the target cache size 2059 */ 2060 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 2061 atomic_add_64(&arc_c, (int64_t)bytes); 2062 if (arc_c > arc_c_max) 2063 arc_c = arc_c_max; 2064 else if (state == arc_anon) 2065 atomic_add_64(&arc_p, (int64_t)bytes); 2066 if (arc_p > arc_c) 2067 arc_p = arc_c; 2068 } 2069 ASSERT((int64_t)arc_p >= 0); 2070} 2071 2072/* 2073 * Check if the cache has reached its limits and eviction is required 2074 * prior to insert. 2075 */ 2076static int 2077arc_evict_needed(arc_buf_contents_t type) 2078{ 2079 if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit) 2080 return (1); 2081 2082#if 0 2083#ifdef _KERNEL 2084 /* 2085 * If zio data pages are being allocated out of a separate heap segment, 2086 * then enforce that the size of available vmem for this area remains 2087 * above about 1/32nd free. 2088 */ 2089 if (type == ARC_BUFC_DATA && zio_arena != NULL && 2090 vmem_size(zio_arena, VMEM_FREE) < 2091 (vmem_size(zio_arena, VMEM_ALLOC) >> 5)) 2092 return (1); 2093#endif 2094#endif 2095 2096 if (arc_reclaim_needed()) 2097 return (1); 2098 2099 return (arc_size > arc_c); 2100} 2101 2102/* 2103 * The buffer, supplied as the first argument, needs a data block. 2104 * So, if we are at cache max, determine which cache should be victimized. 2105 * We have the following cases: 2106 * 2107 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> 2108 * In this situation if we're out of space, but the resident size of the MFU is 2109 * under the limit, victimize the MFU cache to satisfy this insertion request. 2110 * 2111 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> 2112 * Here, we've used up all of the available space for the MRU, so we need to 2113 * evict from our own cache instead. Evict from the set of resident MRU 2114 * entries. 2115 * 2116 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> 2117 * c minus p represents the MFU space in the cache, since p is the size of the 2118 * cache that is dedicated to the MRU. In this situation there's still space on 2119 * the MFU side, so the MRU side needs to be victimized. 2120 * 2121 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> 2122 * MFU's resident set is consuming more space than it has been allotted. In 2123 * this situation, we must victimize our own cache, the MFU, for this insertion. 2124 */ 2125static void 2126arc_get_data_buf(arc_buf_t *buf) 2127{ 2128 arc_state_t *state = buf->b_hdr->b_state; 2129 uint64_t size = buf->b_hdr->b_size; 2130 arc_buf_contents_t type = buf->b_hdr->b_type; 2131 2132 arc_adapt(size, state); 2133 2134 /* 2135 * We have not yet reached cache maximum size, 2136 * just allocate a new buffer. 2137 */ 2138 if (!arc_evict_needed(type)) { 2139 if (type == ARC_BUFC_METADATA) { 2140 buf->b_data = zio_buf_alloc(size); 2141 arc_space_consume(size); 2142 } else { 2143 ASSERT(type == ARC_BUFC_DATA); 2144 buf->b_data = zio_data_buf_alloc(size); 2145 atomic_add_64(&arc_size, size); 2146 } 2147 goto out; 2148 } 2149 2150 /* 2151 * If we are prefetching from the mfu ghost list, this buffer 2152 * will end up on the mru list; so steal space from there. 2153 */ 2154 if (state == arc_mfu_ghost) 2155 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; 2156 else if (state == arc_mru_ghost) 2157 state = arc_mru; 2158 2159 if (state == arc_mru || state == arc_anon) { 2160 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; 2161 state = (arc_mfu->arcs_lsize[type] > 0 && 2162 arc_p > mru_used) ? arc_mfu : arc_mru; 2163 } else { 2164 /* MFU cases */ 2165 uint64_t mfu_space = arc_c - arc_p; 2166 state = (arc_mru->arcs_lsize[type] > 0 && 2167 mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; 2168 } 2169 if ((buf->b_data = arc_evict(state, NULL, size, TRUE, type)) == NULL) { 2170 if (type == ARC_BUFC_METADATA) { 2171 buf->b_data = zio_buf_alloc(size); 2172 arc_space_consume(size); 2173 } else { 2174 ASSERT(type == ARC_BUFC_DATA); 2175 buf->b_data = zio_data_buf_alloc(size); 2176 atomic_add_64(&arc_size, size); 2177 } 2178 ARCSTAT_BUMP(arcstat_recycle_miss); 2179 } 2180 ASSERT(buf->b_data != NULL); 2181out: 2182 /* 2183 * Update the state size. Note that ghost states have a 2184 * "ghost size" and so don't need to be updated. 2185 */ 2186 if (!GHOST_STATE(buf->b_hdr->b_state)) { 2187 arc_buf_hdr_t *hdr = buf->b_hdr; 2188 2189 atomic_add_64(&hdr->b_state->arcs_size, size); 2190 if (list_link_active(&hdr->b_arc_node)) { 2191 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2192 atomic_add_64(&hdr->b_state->arcs_lsize[type], size); 2193 } 2194 /* 2195 * If we are growing the cache, and we are adding anonymous 2196 * data, and we have outgrown arc_p, update arc_p 2197 */ 2198 if (arc_size < arc_c && hdr->b_state == arc_anon && 2199 arc_anon->arcs_size + arc_mru->arcs_size > arc_p) 2200 arc_p = MIN(arc_c, arc_p + size); 2201 } 2202} 2203 2204/* 2205 * This routine is called whenever a buffer is accessed. 2206 * NOTE: the hash lock is dropped in this function. 2207 */ 2208static void 2209arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 2210{ 2211 ASSERT(MUTEX_HELD(hash_lock)); 2212 2213 if (buf->b_state == arc_anon) { 2214 /* 2215 * This buffer is not in the cache, and does not 2216 * appear in our "ghost" list. Add the new buffer 2217 * to the MRU state. 2218 */ 2219 2220 ASSERT(buf->b_arc_access == 0); 2221 buf->b_arc_access = LBOLT; 2222 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2223 arc_change_state(arc_mru, buf, hash_lock); 2224 2225 } else if (buf->b_state == arc_mru) { 2226 /* 2227 * If this buffer is here because of a prefetch, then either: 2228 * - clear the flag if this is a "referencing" read 2229 * (any subsequent access will bump this into the MFU state). 2230 * or 2231 * - move the buffer to the head of the list if this is 2232 * another prefetch (to make it less likely to be evicted). 2233 */ 2234 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2235 if (refcount_count(&buf->b_refcnt) == 0) { 2236 ASSERT(list_link_active(&buf->b_arc_node)); 2237 } else { 2238 buf->b_flags &= ~ARC_PREFETCH; 2239 ARCSTAT_BUMP(arcstat_mru_hits); 2240 } 2241 buf->b_arc_access = LBOLT; 2242 return; 2243 } 2244 2245 /* 2246 * This buffer has been "accessed" only once so far, 2247 * but it is still in the cache. Move it to the MFU 2248 * state. 2249 */ 2250 if (LBOLT > buf->b_arc_access + ARC_MINTIME) { 2251 /* 2252 * More than 125ms have passed since we 2253 * instantiated this buffer. Move it to the 2254 * most frequently used state. 2255 */ 2256 buf->b_arc_access = LBOLT; 2257 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2258 arc_change_state(arc_mfu, buf, hash_lock); 2259 } 2260 ARCSTAT_BUMP(arcstat_mru_hits); 2261 } else if (buf->b_state == arc_mru_ghost) { 2262 arc_state_t *new_state; 2263 /* 2264 * This buffer has been "accessed" recently, but 2265 * was evicted from the cache. Move it to the 2266 * MFU state. 2267 */ 2268 2269 if (buf->b_flags & ARC_PREFETCH) { 2270 new_state = arc_mru; 2271 if (refcount_count(&buf->b_refcnt) > 0) 2272 buf->b_flags &= ~ARC_PREFETCH; 2273 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2274 } else { 2275 new_state = arc_mfu; 2276 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2277 } 2278 2279 buf->b_arc_access = LBOLT; 2280 arc_change_state(new_state, buf, hash_lock); 2281 2282 ARCSTAT_BUMP(arcstat_mru_ghost_hits); 2283 } else if (buf->b_state == arc_mfu) { 2284 /* 2285 * This buffer has been accessed more than once and is 2286 * still in the cache. Keep it in the MFU state. 2287 * 2288 * NOTE: an add_reference() that occurred when we did 2289 * the arc_read() will have kicked this off the list. 2290 * If it was a prefetch, we will explicitly move it to 2291 * the head of the list now. 2292 */ 2293 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2294 ASSERT(refcount_count(&buf->b_refcnt) == 0); 2295 ASSERT(list_link_active(&buf->b_arc_node)); 2296 } 2297 ARCSTAT_BUMP(arcstat_mfu_hits); 2298 buf->b_arc_access = LBOLT; 2299 } else if (buf->b_state == arc_mfu_ghost) { 2300 arc_state_t *new_state = arc_mfu; 2301 /* 2302 * This buffer has been accessed more than once but has 2303 * been evicted from the cache. Move it back to the 2304 * MFU state. 2305 */ 2306 2307 if (buf->b_flags & ARC_PREFETCH) { 2308 /* 2309 * This is a prefetch access... 2310 * move this block back to the MRU state. 2311 */ 2312 ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0); 2313 new_state = arc_mru; 2314 } 2315 2316 buf->b_arc_access = LBOLT; 2317 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2318 arc_change_state(new_state, buf, hash_lock); 2319 2320 ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 2321 } else if (buf->b_state == arc_l2c_only) { 2322 /* 2323 * This buffer is on the 2nd Level ARC. 2324 */ 2325 2326 buf->b_arc_access = LBOLT; 2327 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2328 arc_change_state(arc_mfu, buf, hash_lock); 2329 } else { 2330 ASSERT(!"invalid arc state"); 2331 } 2332} 2333 2334/* a generic arc_done_func_t which you can use */ 2335/* ARGSUSED */ 2336void 2337arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 2338{ 2339 bcopy(buf->b_data, arg, buf->b_hdr->b_size); 2340 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2341} 2342 2343/* a generic arc_done_func_t */ 2344void 2345arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 2346{ 2347 arc_buf_t **bufp = arg; 2348 if (zio && zio->io_error) { 2349 VERIFY(arc_buf_remove_ref(buf, arg) == 1); 2350 *bufp = NULL; 2351 } else { 2352 *bufp = buf; 2353 } 2354} 2355 2356static void 2357arc_read_done(zio_t *zio) 2358{ 2359 arc_buf_hdr_t *hdr, *found; 2360 arc_buf_t *buf; 2361 arc_buf_t *abuf; /* buffer we're assigning to callback */ 2362 kmutex_t *hash_lock; 2363 arc_callback_t *callback_list, *acb; 2364 int freeable = FALSE; 2365 2366 buf = zio->io_private; 2367 hdr = buf->b_hdr; 2368 2369 /* 2370 * The hdr was inserted into hash-table and removed from lists 2371 * prior to starting I/O. We should find this header, since 2372 * it's in the hash table, and it should be legit since it's 2373 * not possible to evict it during the I/O. The only possible 2374 * reason for it not to be found is if we were freed during the 2375 * read. 2376 */ 2377 found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth, 2378 &hash_lock); 2379 2380 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || 2381 (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || 2382 (found == hdr && HDR_L2_READING(hdr))); 2383 2384 hdr->b_flags &= ~ARC_L2_EVICTED; 2385 if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH)) 2386 hdr->b_flags &= ~ARC_L2CACHE; 2387 2388 /* byteswap if necessary */ 2389 callback_list = hdr->b_acb; 2390 ASSERT(callback_list != NULL); 2391 if (BP_SHOULD_BYTESWAP(zio->io_bp)) { 2392 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ? 2393 byteswap_uint64_array : 2394 dmu_ot[BP_GET_TYPE(zio->io_bp)].ot_byteswap; 2395 func(buf->b_data, hdr->b_size); 2396 } 2397 2398 arc_cksum_compute(buf, B_FALSE); 2399 2400 /* create copies of the data buffer for the callers */ 2401 abuf = buf; 2402 for (acb = callback_list; acb; acb = acb->acb_next) { 2403 if (acb->acb_done) { 2404 if (abuf == NULL) 2405 abuf = arc_buf_clone(buf); 2406 acb->acb_buf = abuf; 2407 abuf = NULL; 2408 } 2409 } 2410 hdr->b_acb = NULL; 2411 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2412 ASSERT(!HDR_BUF_AVAILABLE(hdr)); 2413 if (abuf == buf) 2414 hdr->b_flags |= ARC_BUF_AVAILABLE; 2415 2416 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 2417 2418 if (zio->io_error != 0) { 2419 hdr->b_flags |= ARC_IO_ERROR; 2420 if (hdr->b_state != arc_anon) 2421 arc_change_state(arc_anon, hdr, hash_lock); 2422 if (HDR_IN_HASH_TABLE(hdr)) 2423 buf_hash_remove(hdr); 2424 freeable = refcount_is_zero(&hdr->b_refcnt); 2425 } 2426 2427 /* 2428 * Broadcast before we drop the hash_lock to avoid the possibility 2429 * that the hdr (and hence the cv) might be freed before we get to 2430 * the cv_broadcast(). 2431 */ 2432 cv_broadcast(&hdr->b_cv); 2433 2434 if (hash_lock) { 2435 /* 2436 * Only call arc_access on anonymous buffers. This is because 2437 * if we've issued an I/O for an evicted buffer, we've already 2438 * called arc_access (to prevent any simultaneous readers from 2439 * getting confused). 2440 */ 2441 if (zio->io_error == 0 && hdr->b_state == arc_anon) 2442 arc_access(hdr, hash_lock); 2443 mutex_exit(hash_lock); 2444 } else { 2445 /* 2446 * This block was freed while we waited for the read to 2447 * complete. It has been removed from the hash table and 2448 * moved to the anonymous state (so that it won't show up 2449 * in the cache). 2450 */ 2451 ASSERT3P(hdr->b_state, ==, arc_anon); 2452 freeable = refcount_is_zero(&hdr->b_refcnt); 2453 } 2454 2455 /* execute each callback and free its structure */ 2456 while ((acb = callback_list) != NULL) { 2457 if (acb->acb_done) 2458 acb->acb_done(zio, acb->acb_buf, acb->acb_private); 2459 2460 if (acb->acb_zio_dummy != NULL) { 2461 acb->acb_zio_dummy->io_error = zio->io_error; 2462 zio_nowait(acb->acb_zio_dummy); 2463 } 2464 2465 callback_list = acb->acb_next; 2466 kmem_free(acb, sizeof (arc_callback_t)); 2467 } 2468 2469 if (freeable) 2470 arc_hdr_destroy(hdr); 2471} 2472 2473/* 2474 * "Read" the block block at the specified DVA (in bp) via the 2475 * cache. If the block is found in the cache, invoke the provided 2476 * callback immediately and return. Note that the `zio' parameter 2477 * in the callback will be NULL in this case, since no IO was 2478 * required. If the block is not in the cache pass the read request 2479 * on to the spa with a substitute callback function, so that the 2480 * requested block will be added to the cache. 2481 * 2482 * If a read request arrives for a block that has a read in-progress, 2483 * either wait for the in-progress read to complete (and return the 2484 * results); or, if this is a read with a "done" func, add a record 2485 * to the read to invoke the "done" func when the read completes, 2486 * and return; or just return. 2487 * 2488 * arc_read_done() will invoke all the requested "done" functions 2489 * for readers of this block. 2490 * 2491 * Normal callers should use arc_read and pass the arc buffer and offset 2492 * for the bp. But if you know you don't need locking, you can use 2493 * arc_read_bp. 2494 */ 2495int 2496arc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_buf_t *pbuf, 2497 arc_done_func_t *done, void *private, int priority, int zio_flags, 2498 uint32_t *arc_flags, const zbookmark_t *zb) 2499{ 2500 int err; 2501 arc_buf_hdr_t *hdr = pbuf->b_hdr; 2502 2503 ASSERT(!refcount_is_zero(&pbuf->b_hdr->b_refcnt)); 2504 ASSERT3U((char *)bp - (char *)pbuf->b_data, <, pbuf->b_hdr->b_size); 2505 rw_enter(&pbuf->b_lock, RW_READER); 2506 2507 err = arc_read_nolock(pio, spa, bp, done, private, priority, 2508 zio_flags, arc_flags, zb); 2509 2510 ASSERT3P(hdr, ==, pbuf->b_hdr); 2511 rw_exit(&pbuf->b_lock); 2512 return (err); 2513} 2514 2515int 2516arc_read_nolock(zio_t *pio, spa_t *spa, blkptr_t *bp, 2517 arc_done_func_t *done, void *private, int priority, int zio_flags, 2518 uint32_t *arc_flags, const zbookmark_t *zb) 2519{ 2520 arc_buf_hdr_t *hdr; 2521 arc_buf_t *buf; 2522 kmutex_t *hash_lock; 2523 zio_t *rzio; 2524 2525top: 2526 hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 2527 if (hdr && hdr->b_datacnt > 0) { 2528 2529 *arc_flags |= ARC_CACHED; 2530 2531 if (HDR_IO_IN_PROGRESS(hdr)) { 2532 2533 if (*arc_flags & ARC_WAIT) { 2534 cv_wait(&hdr->b_cv, hash_lock); 2535 mutex_exit(hash_lock); 2536 goto top; 2537 } 2538 ASSERT(*arc_flags & ARC_NOWAIT); 2539 2540 if (done) { 2541 arc_callback_t *acb = NULL; 2542 2543 acb = kmem_zalloc(sizeof (arc_callback_t), 2544 KM_SLEEP); 2545 acb->acb_done = done; 2546 acb->acb_private = private; 2547 if (pio != NULL) 2548 acb->acb_zio_dummy = zio_null(pio, 2549 spa, NULL, NULL, zio_flags); 2550 2551 ASSERT(acb->acb_done != NULL); 2552 acb->acb_next = hdr->b_acb; 2553 hdr->b_acb = acb; 2554 add_reference(hdr, hash_lock, private); 2555 mutex_exit(hash_lock); 2556 return (0); 2557 } 2558 mutex_exit(hash_lock); 2559 return (0); 2560 } 2561 2562 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2563 2564 if (done) { 2565 add_reference(hdr, hash_lock, private); 2566 /* 2567 * If this block is already in use, create a new 2568 * copy of the data so that we will be guaranteed 2569 * that arc_release() will always succeed. 2570 */ 2571 buf = hdr->b_buf; 2572 ASSERT(buf); 2573 ASSERT(buf->b_data); 2574 if (HDR_BUF_AVAILABLE(hdr)) { 2575 ASSERT(buf->b_efunc == NULL); 2576 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 2577 } else { 2578 buf = arc_buf_clone(buf); 2579 } 2580 } else if (*arc_flags & ARC_PREFETCH && 2581 refcount_count(&hdr->b_refcnt) == 0) { 2582 hdr->b_flags |= ARC_PREFETCH; 2583 } 2584 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 2585 arc_access(hdr, hash_lock); 2586 if (*arc_flags & ARC_L2CACHE) 2587 hdr->b_flags |= ARC_L2CACHE; 2588 mutex_exit(hash_lock); 2589 ARCSTAT_BUMP(arcstat_hits); 2590 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2591 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2592 data, metadata, hits); 2593 2594 if (done) 2595 done(NULL, buf, private); 2596 } else { 2597 uint64_t size = BP_GET_LSIZE(bp); 2598 arc_callback_t *acb; 2599 vdev_t *vd = NULL; 2600 daddr_t addr; 2601 2602 if (hdr == NULL) { 2603 /* this block is not in the cache */ 2604 arc_buf_hdr_t *exists; 2605 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 2606 buf = arc_buf_alloc(spa, size, private, type); 2607 hdr = buf->b_hdr; 2608 hdr->b_dva = *BP_IDENTITY(bp); 2609 hdr->b_birth = bp->blk_birth; 2610 hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 2611 exists = buf_hash_insert(hdr, &hash_lock); 2612 if (exists) { 2613 /* somebody beat us to the hash insert */ 2614 mutex_exit(hash_lock); 2615 bzero(&hdr->b_dva, sizeof (dva_t)); 2616 hdr->b_birth = 0; 2617 hdr->b_cksum0 = 0; 2618 (void) arc_buf_remove_ref(buf, private); 2619 goto top; /* restart the IO request */ 2620 } 2621 /* if this is a prefetch, we don't have a reference */ 2622 if (*arc_flags & ARC_PREFETCH) { 2623 (void) remove_reference(hdr, hash_lock, 2624 private); 2625 hdr->b_flags |= ARC_PREFETCH; 2626 } 2627 if (*arc_flags & ARC_L2CACHE) 2628 hdr->b_flags |= ARC_L2CACHE; 2629 if (BP_GET_LEVEL(bp) > 0) 2630 hdr->b_flags |= ARC_INDIRECT; 2631 } else { 2632 /* this block is in the ghost cache */ 2633 ASSERT(GHOST_STATE(hdr->b_state)); 2634 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2635 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0); 2636 ASSERT(hdr->b_buf == NULL); 2637 2638 /* if this is a prefetch, we don't have a reference */ 2639 if (*arc_flags & ARC_PREFETCH) 2640 hdr->b_flags |= ARC_PREFETCH; 2641 else 2642 add_reference(hdr, hash_lock, private); 2643 if (*arc_flags & ARC_L2CACHE) 2644 hdr->b_flags |= ARC_L2CACHE; 2645 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 2646 buf->b_hdr = hdr; 2647 buf->b_data = NULL; 2648 buf->b_efunc = NULL; 2649 buf->b_private = NULL; 2650 buf->b_next = NULL; 2651 hdr->b_buf = buf; 2652 arc_get_data_buf(buf); 2653 ASSERT(hdr->b_datacnt == 0); 2654 hdr->b_datacnt = 1; 2655 2656 } 2657 2658 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 2659 acb->acb_done = done; 2660 acb->acb_private = private; 2661 2662 ASSERT(hdr->b_acb == NULL); 2663 hdr->b_acb = acb; 2664 hdr->b_flags |= ARC_IO_IN_PROGRESS; 2665 2666 /* 2667 * If the buffer has been evicted, migrate it to a present state 2668 * before issuing the I/O. Once we drop the hash-table lock, 2669 * the header will be marked as I/O in progress and have an 2670 * attached buffer. At this point, anybody who finds this 2671 * buffer ought to notice that it's legit but has a pending I/O. 2672 */ 2673 2674 if (GHOST_STATE(hdr->b_state)) 2675 arc_access(hdr, hash_lock); 2676 2677 if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL && 2678 (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) { 2679 addr = hdr->b_l2hdr->b_daddr; 2680 /* 2681 * Lock out device removal. 2682 */ 2683 if (vdev_is_dead(vd) || 2684 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER)) 2685 vd = NULL; 2686 } 2687 2688 mutex_exit(hash_lock); 2689 2690 ASSERT3U(hdr->b_size, ==, size); 2691 DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size, 2692 zbookmark_t *, zb); 2693 ARCSTAT_BUMP(arcstat_misses); 2694 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2695 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2696 data, metadata, misses); 2697 2698 if (vd != NULL) { 2699 /* 2700 * Read from the L2ARC if the following are true: 2701 * 1. The L2ARC vdev was previously cached. 2702 * 2. This buffer still has L2ARC metadata. 2703 * 3. This buffer isn't currently writing to the L2ARC. 2704 * 4. The L2ARC entry wasn't evicted, which may 2705 * also have invalidated the vdev. 2706 */ 2707 if (hdr->b_l2hdr != NULL && 2708 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr)) { 2709 l2arc_read_callback_t *cb; 2710 2711 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); 2712 ARCSTAT_BUMP(arcstat_l2_hits); 2713 2714 cb = kmem_zalloc(sizeof (l2arc_read_callback_t), 2715 KM_SLEEP); 2716 cb->l2rcb_buf = buf; 2717 cb->l2rcb_spa = spa; 2718 cb->l2rcb_bp = *bp; 2719 cb->l2rcb_zb = *zb; 2720 cb->l2rcb_flags = zio_flags; 2721 2722 /* 2723 * l2arc read. The SCL_L2ARC lock will be 2724 * released by l2arc_read_done(). 2725 */ 2726 rzio = zio_read_phys(pio, vd, addr, size, 2727 buf->b_data, ZIO_CHECKSUM_OFF, 2728 l2arc_read_done, cb, priority, zio_flags | 2729 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL | 2730 ZIO_FLAG_DONT_PROPAGATE | 2731 ZIO_FLAG_DONT_RETRY, B_FALSE); 2732 DTRACE_PROBE2(l2arc__read, vdev_t *, vd, 2733 zio_t *, rzio); 2734 2735 if (*arc_flags & ARC_NOWAIT) { 2736 zio_nowait(rzio); 2737 return (0); 2738 } 2739 2740 ASSERT(*arc_flags & ARC_WAIT); 2741 if (zio_wait(rzio) == 0) 2742 return (0); 2743 2744 /* l2arc read error; goto zio_read() */ 2745 } else { 2746 DTRACE_PROBE1(l2arc__miss, 2747 arc_buf_hdr_t *, hdr); 2748 ARCSTAT_BUMP(arcstat_l2_misses); 2749 if (HDR_L2_WRITING(hdr)) 2750 ARCSTAT_BUMP(arcstat_l2_rw_clash); 2751 spa_config_exit(spa, SCL_L2ARC, vd); 2752 } 2753 } 2754 2755 rzio = zio_read(pio, spa, bp, buf->b_data, size, 2756 arc_read_done, buf, priority, zio_flags, zb); 2757 2758 if (*arc_flags & ARC_WAIT) 2759 return (zio_wait(rzio)); 2760 2761 ASSERT(*arc_flags & ARC_NOWAIT); 2762 zio_nowait(rzio); 2763 } 2764 return (0); 2765} 2766 2767/* 2768 * arc_read() variant to support pool traversal. If the block is already 2769 * in the ARC, make a copy of it; otherwise, the caller will do the I/O. 2770 * The idea is that we don't want pool traversal filling up memory, but 2771 * if the ARC already has the data anyway, we shouldn't pay for the I/O. 2772 */ 2773int 2774arc_tryread(spa_t *spa, blkptr_t *bp, void *data) 2775{ 2776 arc_buf_hdr_t *hdr; 2777 kmutex_t *hash_mtx; 2778 int rc = 0; 2779 2780 hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx); 2781 2782 if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) { 2783 arc_buf_t *buf = hdr->b_buf; 2784 2785 ASSERT(buf); 2786 while (buf->b_data == NULL) { 2787 buf = buf->b_next; 2788 ASSERT(buf); 2789 } 2790 bcopy(buf->b_data, data, hdr->b_size); 2791 } else { 2792 rc = ENOENT; 2793 } 2794 2795 if (hash_mtx) 2796 mutex_exit(hash_mtx); 2797 2798 return (rc); 2799} 2800 2801void 2802arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 2803{ 2804 ASSERT(buf->b_hdr != NULL); 2805 ASSERT(buf->b_hdr->b_state != arc_anon); 2806 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 2807 buf->b_efunc = func; 2808 buf->b_private = private; 2809} 2810 2811/* 2812 * This is used by the DMU to let the ARC know that a buffer is 2813 * being evicted, so the ARC should clean up. If this arc buf 2814 * is not yet in the evicted state, it will be put there. 2815 */ 2816int 2817arc_buf_evict(arc_buf_t *buf) 2818{ 2819 arc_buf_hdr_t *hdr; 2820 kmutex_t *hash_lock; 2821 arc_buf_t **bufp; 2822 2823 rw_enter(&buf->b_lock, RW_WRITER); 2824 hdr = buf->b_hdr; 2825 if (hdr == NULL) { 2826 /* 2827 * We are in arc_do_user_evicts(). 2828 */ 2829 ASSERT(buf->b_data == NULL); 2830 rw_exit(&buf->b_lock); 2831 return (0); 2832 } else if (buf->b_data == NULL) { 2833 arc_buf_t copy = *buf; /* structure assignment */ 2834 /* 2835 * We are on the eviction list; process this buffer now 2836 * but let arc_do_user_evicts() do the reaping. 2837 */ 2838 buf->b_efunc = NULL; 2839 rw_exit(&buf->b_lock); 2840 VERIFY(copy.b_efunc(©) == 0); 2841 return (1); 2842 } 2843 hash_lock = HDR_LOCK(hdr); 2844 mutex_enter(hash_lock); 2845 2846 ASSERT(buf->b_hdr == hdr); 2847 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 2848 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2849 2850 /* 2851 * Pull this buffer off of the hdr 2852 */ 2853 bufp = &hdr->b_buf; 2854 while (*bufp != buf) 2855 bufp = &(*bufp)->b_next; 2856 *bufp = buf->b_next; 2857 2858 ASSERT(buf->b_data != NULL); 2859 arc_buf_destroy(buf, FALSE, FALSE); 2860 2861 if (hdr->b_datacnt == 0) { 2862 arc_state_t *old_state = hdr->b_state; 2863 arc_state_t *evicted_state; 2864 2865 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2866 2867 evicted_state = 2868 (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 2869 2870 mutex_enter(&old_state->arcs_mtx); 2871 mutex_enter(&evicted_state->arcs_mtx); 2872 2873 arc_change_state(evicted_state, hdr, hash_lock); 2874 ASSERT(HDR_IN_HASH_TABLE(hdr)); 2875 hdr->b_flags |= ARC_IN_HASH_TABLE; 2876 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 2877 2878 mutex_exit(&evicted_state->arcs_mtx); 2879 mutex_exit(&old_state->arcs_mtx); 2880 } 2881 mutex_exit(hash_lock); 2882 rw_exit(&buf->b_lock); 2883 2884 VERIFY(buf->b_efunc(buf) == 0); 2885 buf->b_efunc = NULL; 2886 buf->b_private = NULL; 2887 buf->b_hdr = NULL; 2888 kmem_cache_free(buf_cache, buf); 2889 return (1); 2890} 2891 2892/* 2893 * Release this buffer from the cache. This must be done 2894 * after a read and prior to modifying the buffer contents. 2895 * If the buffer has more than one reference, we must make 2896 * a new hdr for the buffer. 2897 */ 2898void 2899arc_release(arc_buf_t *buf, void *tag) 2900{ 2901 arc_buf_hdr_t *hdr; 2902 kmutex_t *hash_lock; 2903 l2arc_buf_hdr_t *l2hdr; 2904 uint64_t buf_size; 2905 2906 rw_enter(&buf->b_lock, RW_WRITER); 2907 hdr = buf->b_hdr; 2908 2909 /* this buffer is not on any list */ 2910 ASSERT(refcount_count(&hdr->b_refcnt) > 0); 2911 ASSERT(!(hdr->b_flags & ARC_STORED)); 2912 2913 if (hdr->b_state == arc_anon) { 2914 /* this buffer is already released */ 2915 ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1); 2916 ASSERT(BUF_EMPTY(hdr)); 2917 ASSERT(buf->b_efunc == NULL); 2918 arc_buf_thaw(buf); 2919 rw_exit(&buf->b_lock); 2920 return; 2921 } 2922 2923 hash_lock = HDR_LOCK(hdr); 2924 mutex_enter(hash_lock); 2925 2926 l2hdr = hdr->b_l2hdr; 2927 if (l2hdr) { 2928 mutex_enter(&l2arc_buflist_mtx); 2929 hdr->b_l2hdr = NULL; 2930 buf_size = hdr->b_size; 2931 } 2932 2933 /* 2934 * Do we have more than one buf? 2935 */ 2936 if (hdr->b_datacnt > 1) { 2937 arc_buf_hdr_t *nhdr; 2938 arc_buf_t **bufp; 2939 uint64_t blksz = hdr->b_size; 2940 spa_t *spa = hdr->b_spa; 2941 arc_buf_contents_t type = hdr->b_type; 2942 uint32_t flags = hdr->b_flags; 2943 2944 ASSERT(hdr->b_buf != buf || buf->b_next != NULL); 2945 /* 2946 * Pull the data off of this buf and attach it to 2947 * a new anonymous buf. 2948 */ 2949 (void) remove_reference(hdr, hash_lock, tag); 2950 bufp = &hdr->b_buf; 2951 while (*bufp != buf) 2952 bufp = &(*bufp)->b_next; 2953 *bufp = (*bufp)->b_next; 2954 buf->b_next = NULL; 2955 2956 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); 2957 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); 2958 if (refcount_is_zero(&hdr->b_refcnt)) { 2959 uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type]; 2960 ASSERT3U(*size, >=, hdr->b_size); 2961 atomic_add_64(size, -hdr->b_size); 2962 } 2963 hdr->b_datacnt -= 1; 2964 arc_cksum_verify(buf); 2965 2966 mutex_exit(hash_lock); 2967 2968 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 2969 nhdr->b_size = blksz; 2970 nhdr->b_spa = spa; 2971 nhdr->b_type = type; 2972 nhdr->b_buf = buf; 2973 nhdr->b_state = arc_anon; 2974 nhdr->b_arc_access = 0; 2975 nhdr->b_flags = flags & ARC_L2_WRITING; 2976 nhdr->b_l2hdr = NULL; 2977 nhdr->b_datacnt = 1; 2978 nhdr->b_freeze_cksum = NULL; 2979 (void) refcount_add(&nhdr->b_refcnt, tag); 2980 buf->b_hdr = nhdr; 2981 rw_exit(&buf->b_lock); 2982 atomic_add_64(&arc_anon->arcs_size, blksz); 2983 } else { 2984 rw_exit(&buf->b_lock); 2985 ASSERT(refcount_count(&hdr->b_refcnt) == 1); 2986 ASSERT(!list_link_active(&hdr->b_arc_node)); 2987 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2988 arc_change_state(arc_anon, hdr, hash_lock); 2989 hdr->b_arc_access = 0; 2990 mutex_exit(hash_lock); 2991 2992 bzero(&hdr->b_dva, sizeof (dva_t)); 2993 hdr->b_birth = 0; 2994 hdr->b_cksum0 = 0; 2995 arc_buf_thaw(buf); 2996 } 2997 buf->b_efunc = NULL; 2998 buf->b_private = NULL; 2999 3000 if (l2hdr) { 3001 list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 3002 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 3003 ARCSTAT_INCR(arcstat_l2_size, -buf_size); 3004 mutex_exit(&l2arc_buflist_mtx); 3005 } 3006} 3007 3008int 3009arc_released(arc_buf_t *buf) 3010{ 3011 int released; 3012 3013 rw_enter(&buf->b_lock, RW_READER); 3014 released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); 3015 rw_exit(&buf->b_lock); 3016 return (released); 3017} 3018 3019int 3020arc_has_callback(arc_buf_t *buf) 3021{ 3022 int callback; 3023 3024 rw_enter(&buf->b_lock, RW_READER); 3025 callback = (buf->b_efunc != NULL); 3026 rw_exit(&buf->b_lock); 3027 return (callback); 3028} 3029 3030#ifdef ZFS_DEBUG 3031int 3032arc_referenced(arc_buf_t *buf) 3033{ 3034 int referenced; 3035 3036 rw_enter(&buf->b_lock, RW_READER); 3037 referenced = (refcount_count(&buf->b_hdr->b_refcnt)); 3038 rw_exit(&buf->b_lock); 3039 return (referenced); 3040} 3041#endif 3042 3043static void 3044arc_write_ready(zio_t *zio) 3045{ 3046 arc_write_callback_t *callback = zio->io_private; 3047 arc_buf_t *buf = callback->awcb_buf; 3048 arc_buf_hdr_t *hdr = buf->b_hdr; 3049 3050 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); 3051 callback->awcb_ready(zio, buf, callback->awcb_private); 3052 3053 /* 3054 * If the IO is already in progress, then this is a re-write 3055 * attempt, so we need to thaw and re-compute the cksum. 3056 * It is the responsibility of the callback to handle the 3057 * accounting for any re-write attempt. 3058 */ 3059 if (HDR_IO_IN_PROGRESS(hdr)) { 3060 mutex_enter(&hdr->b_freeze_lock); 3061 if (hdr->b_freeze_cksum != NULL) { 3062 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 3063 hdr->b_freeze_cksum = NULL; 3064 } 3065 mutex_exit(&hdr->b_freeze_lock); 3066 } 3067 arc_cksum_compute(buf, B_FALSE); 3068 hdr->b_flags |= ARC_IO_IN_PROGRESS; 3069} 3070 3071static void 3072arc_write_done(zio_t *zio) 3073{ 3074 arc_write_callback_t *callback = zio->io_private; 3075 arc_buf_t *buf = callback->awcb_buf; 3076 arc_buf_hdr_t *hdr = buf->b_hdr; 3077 3078 hdr->b_acb = NULL; 3079 3080 hdr->b_dva = *BP_IDENTITY(zio->io_bp); 3081 hdr->b_birth = zio->io_bp->blk_birth; 3082 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 3083 /* 3084 * If the block to be written was all-zero, we may have 3085 * compressed it away. In this case no write was performed 3086 * so there will be no dva/birth-date/checksum. The buffer 3087 * must therefor remain anonymous (and uncached). 3088 */ 3089 if (!BUF_EMPTY(hdr)) { 3090 arc_buf_hdr_t *exists; 3091 kmutex_t *hash_lock; 3092 3093 arc_cksum_verify(buf); 3094 3095 exists = buf_hash_insert(hdr, &hash_lock); 3096 if (exists) { 3097 /* 3098 * This can only happen if we overwrite for 3099 * sync-to-convergence, because we remove 3100 * buffers from the hash table when we arc_free(). 3101 */ 3102 ASSERT(zio->io_flags & ZIO_FLAG_IO_REWRITE); 3103 ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig), 3104 BP_IDENTITY(zio->io_bp))); 3105 ASSERT3U(zio->io_bp_orig.blk_birth, ==, 3106 zio->io_bp->blk_birth); 3107 3108 ASSERT(refcount_is_zero(&exists->b_refcnt)); 3109 arc_change_state(arc_anon, exists, hash_lock); 3110 mutex_exit(hash_lock); 3111 arc_hdr_destroy(exists); 3112 exists = buf_hash_insert(hdr, &hash_lock); 3113 ASSERT3P(exists, ==, NULL); 3114 } 3115 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3116 /* if it's not anon, we are doing a scrub */ 3117 if (hdr->b_state == arc_anon) 3118 arc_access(hdr, hash_lock); 3119 mutex_exit(hash_lock); 3120 } else if (callback->awcb_done == NULL) { 3121 int destroy_hdr; 3122 /* 3123 * This is an anonymous buffer with no user callback, 3124 * destroy it if there are no active references. 3125 */ 3126 mutex_enter(&arc_eviction_mtx); 3127 destroy_hdr = refcount_is_zero(&hdr->b_refcnt); 3128 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3129 mutex_exit(&arc_eviction_mtx); 3130 if (destroy_hdr) 3131 arc_hdr_destroy(hdr); 3132 } else { 3133 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3134 } 3135 hdr->b_flags &= ~ARC_STORED; 3136 3137 if (callback->awcb_done) { 3138 ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 3139 callback->awcb_done(zio, buf, callback->awcb_private); 3140 } 3141 3142 kmem_free(callback, sizeof (arc_write_callback_t)); 3143} 3144 3145static void 3146write_policy(spa_t *spa, const writeprops_t *wp, zio_prop_t *zp) 3147{ 3148 boolean_t ismd = (wp->wp_level > 0 || dmu_ot[wp->wp_type].ot_metadata); 3149 3150 /* Determine checksum setting */ 3151 if (ismd) { 3152 /* 3153 * Metadata always gets checksummed. If the data 3154 * checksum is multi-bit correctable, and it's not a 3155 * ZBT-style checksum, then it's suitable for metadata 3156 * as well. Otherwise, the metadata checksum defaults 3157 * to fletcher4. 3158 */ 3159 if (zio_checksum_table[wp->wp_oschecksum].ci_correctable && 3160 !zio_checksum_table[wp->wp_oschecksum].ci_zbt) 3161 zp->zp_checksum = wp->wp_oschecksum; 3162 else 3163 zp->zp_checksum = ZIO_CHECKSUM_FLETCHER_4; 3164 } else { 3165 zp->zp_checksum = zio_checksum_select(wp->wp_dnchecksum, 3166 wp->wp_oschecksum); 3167 } 3168 3169 /* Determine compression setting */ 3170 if (ismd) { 3171 /* 3172 * XXX -- we should design a compression algorithm 3173 * that specializes in arrays of bps. 3174 */ 3175 zp->zp_compress = zfs_mdcomp_disable ? ZIO_COMPRESS_EMPTY : 3176 ZIO_COMPRESS_LZJB; 3177 } else { 3178 zp->zp_compress = zio_compress_select(wp->wp_dncompress, 3179 wp->wp_oscompress); 3180 } 3181 3182 zp->zp_type = wp->wp_type; 3183 zp->zp_level = wp->wp_level; 3184 zp->zp_ndvas = MIN(wp->wp_copies + ismd, spa_max_replication(spa)); 3185} 3186 3187zio_t * 3188arc_write(zio_t *pio, spa_t *spa, const writeprops_t *wp, 3189 boolean_t l2arc, uint64_t txg, blkptr_t *bp, arc_buf_t *buf, 3190 arc_done_func_t *ready, arc_done_func_t *done, void *private, int priority, 3191 int zio_flags, const zbookmark_t *zb) 3192{ 3193 arc_buf_hdr_t *hdr = buf->b_hdr; 3194 arc_write_callback_t *callback; 3195 zio_t *zio; 3196 zio_prop_t zp; 3197 3198 ASSERT(ready != NULL); 3199 ASSERT(!HDR_IO_ERROR(hdr)); 3200 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 3201 ASSERT(hdr->b_acb == 0); 3202 if (l2arc) 3203 hdr->b_flags |= ARC_L2CACHE; 3204 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 3205 callback->awcb_ready = ready; 3206 callback->awcb_done = done; 3207 callback->awcb_private = private; 3208 callback->awcb_buf = buf; 3209 3210 write_policy(spa, wp, &zp); 3211 zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, &zp, 3212 arc_write_ready, arc_write_done, callback, priority, zio_flags, zb); 3213 3214 return (zio); 3215} 3216 3217int 3218arc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 3219 zio_done_func_t *done, void *private, uint32_t arc_flags) 3220{ 3221 arc_buf_hdr_t *ab; 3222 kmutex_t *hash_lock; 3223 zio_t *zio; 3224 3225 /* 3226 * If this buffer is in the cache, release it, so it 3227 * can be re-used. 3228 */ 3229 ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 3230 if (ab != NULL) { 3231 /* 3232 * The checksum of blocks to free is not always 3233 * preserved (eg. on the deadlist). However, if it is 3234 * nonzero, it should match what we have in the cache. 3235 */ 3236 ASSERT(bp->blk_cksum.zc_word[0] == 0 || 3237 bp->blk_cksum.zc_word[0] == ab->b_cksum0 || 3238 bp->blk_fill == BLK_FILL_ALREADY_FREED); 3239 3240 if (ab->b_state != arc_anon) 3241 arc_change_state(arc_anon, ab, hash_lock); 3242 if (HDR_IO_IN_PROGRESS(ab)) { 3243 /* 3244 * This should only happen when we prefetch. 3245 */ 3246 ASSERT(ab->b_flags & ARC_PREFETCH); 3247 ASSERT3U(ab->b_datacnt, ==, 1); 3248 ab->b_flags |= ARC_FREED_IN_READ; 3249 if (HDR_IN_HASH_TABLE(ab)) 3250 buf_hash_remove(ab); 3251 ab->b_arc_access = 0; 3252 bzero(&ab->b_dva, sizeof (dva_t)); 3253 ab->b_birth = 0; 3254 ab->b_cksum0 = 0; 3255 ab->b_buf->b_efunc = NULL; 3256 ab->b_buf->b_private = NULL; 3257 mutex_exit(hash_lock); 3258 } else if (refcount_is_zero(&ab->b_refcnt)) { 3259 ab->b_flags |= ARC_FREE_IN_PROGRESS; 3260 mutex_exit(hash_lock); 3261 arc_hdr_destroy(ab); 3262 ARCSTAT_BUMP(arcstat_deleted); 3263 } else { 3264 /* 3265 * We still have an active reference on this 3266 * buffer. This can happen, e.g., from 3267 * dbuf_unoverride(). 3268 */ 3269 ASSERT(!HDR_IN_HASH_TABLE(ab)); 3270 ab->b_arc_access = 0; 3271 bzero(&ab->b_dva, sizeof (dva_t)); 3272 ab->b_birth = 0; 3273 ab->b_cksum0 = 0; 3274 ab->b_buf->b_efunc = NULL; 3275 ab->b_buf->b_private = NULL; 3276 mutex_exit(hash_lock); 3277 } 3278 } 3279 3280 zio = zio_free(pio, spa, txg, bp, done, private, ZIO_FLAG_MUSTSUCCEED); 3281 3282 if (arc_flags & ARC_WAIT) 3283 return (zio_wait(zio)); 3284 3285 ASSERT(arc_flags & ARC_NOWAIT); 3286 zio_nowait(zio); 3287 3288 return (0); 3289} 3290 3291static int 3292arc_memory_throttle(uint64_t reserve, uint64_t txg) 3293{ 3294#ifdef _KERNEL 3295 uint64_t inflight_data = arc_anon->arcs_size; 3296 uint64_t available_memory = ptoa((uintmax_t)cnt.v_free_count); 3297 static uint64_t page_load = 0; 3298 static uint64_t last_txg = 0; 3299 3300#if 0 3301#if defined(__i386) 3302 available_memory = 3303 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); 3304#endif 3305#endif 3306 if (available_memory >= zfs_write_limit_max) 3307 return (0); 3308 3309 if (txg > last_txg) { 3310 last_txg = txg; 3311 page_load = 0; 3312 } 3313 /* 3314 * If we are in pageout, we know that memory is already tight, 3315 * the arc is already going to be evicting, so we just want to 3316 * continue to let page writes occur as quickly as possible. 3317 */ 3318 if (curproc == pageproc) { 3319 if (page_load > available_memory / 4) 3320 return (ERESTART); 3321 /* Note: reserve is inflated, so we deflate */ 3322 page_load += reserve / 8; 3323 return (0); 3324 } else if (page_load > 0 && arc_reclaim_needed()) { 3325 /* memory is low, delay before restarting */ 3326 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3327 return (EAGAIN); 3328 } 3329 page_load = 0; 3330 3331 if (arc_size > arc_c_min) { 3332 uint64_t evictable_memory = 3333 arc_mru->arcs_lsize[ARC_BUFC_DATA] + 3334 arc_mru->arcs_lsize[ARC_BUFC_METADATA] + 3335 arc_mfu->arcs_lsize[ARC_BUFC_DATA] + 3336 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]; 3337 available_memory += MIN(evictable_memory, arc_size - arc_c_min); 3338 } 3339 3340 if (inflight_data > available_memory / 4) { 3341 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3342 return (ERESTART); 3343 } 3344#endif 3345 return (0); 3346} 3347 3348void 3349arc_tempreserve_clear(uint64_t reserve) 3350{ 3351 atomic_add_64(&arc_tempreserve, -reserve); 3352 ASSERT((int64_t)arc_tempreserve >= 0); 3353} 3354 3355int 3356arc_tempreserve_space(uint64_t reserve, uint64_t txg) 3357{ 3358 int error; 3359 3360#ifdef ZFS_DEBUG 3361 /* 3362 * Once in a while, fail for no reason. Everything should cope. 3363 */ 3364 if (spa_get_random(10000) == 0) { 3365 dprintf("forcing random failure\n"); 3366 return (ERESTART); 3367 } 3368#endif 3369 if (reserve > arc_c/4 && !arc_no_grow) 3370 arc_c = MIN(arc_c_max, reserve * 4); 3371 if (reserve > arc_c) 3372 return (ENOMEM); 3373 3374 /* 3375 * Writes will, almost always, require additional memory allocations 3376 * in order to compress/encrypt/etc the data. We therefor need to 3377 * make sure that there is sufficient available memory for this. 3378 */ 3379 if (error = arc_memory_throttle(reserve, txg)) 3380 return (error); 3381 3382 /* 3383 * Throttle writes when the amount of dirty data in the cache 3384 * gets too large. We try to keep the cache less than half full 3385 * of dirty blocks so that our sync times don't grow too large. 3386 * Note: if two requests come in concurrently, we might let them 3387 * both succeed, when one of them should fail. Not a huge deal. 3388 */ 3389 if (reserve + arc_tempreserve + arc_anon->arcs_size > arc_c / 2 && 3390 arc_anon->arcs_size > arc_c / 4) { 3391 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " 3392 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", 3393 arc_tempreserve>>10, 3394 arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10, 3395 arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10, 3396 reserve>>10, arc_c>>10); 3397 return (ERESTART); 3398 } 3399 atomic_add_64(&arc_tempreserve, reserve); 3400 return (0); 3401} 3402 3403static kmutex_t arc_lowmem_lock; 3404#ifdef _KERNEL 3405static eventhandler_tag arc_event_lowmem = NULL; 3406 3407static void 3408arc_lowmem(void *arg __unused, int howto __unused) 3409{ 3410 3411 /* Serialize access via arc_lowmem_lock. */ 3412 mutex_enter(&arc_lowmem_lock); 3413 needfree = 1; 3414 cv_signal(&arc_reclaim_thr_cv); 3415 while (needfree) 3416 tsleep(&needfree, 0, "zfs:lowmem", hz / 5); 3417 mutex_exit(&arc_lowmem_lock); 3418} 3419#endif 3420 3421void 3422arc_init(void) 3423{ 3424 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 3425 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 3426 mutex_init(&arc_lowmem_lock, NULL, MUTEX_DEFAULT, NULL); 3427 3428 /* Convert seconds to clock ticks */ 3429 arc_min_prefetch_lifespan = 1 * hz; 3430 3431 /* Start out with 1/8 of all memory */ 3432 arc_c = kmem_size() / 8; 3433#if 0 3434#ifdef _KERNEL 3435 /* 3436 * On architectures where the physical memory can be larger 3437 * than the addressable space (intel in 32-bit mode), we may 3438 * need to limit the cache to 1/8 of VM size. 3439 */ 3440 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 3441#endif 3442#endif 3443 /* set min cache to 1/32 of all memory, or 16MB, whichever is more */ 3444 arc_c_min = MAX(arc_c / 4, 64<<18); 3445 /* set max to 1/2 of all memory, or all but 1GB, whichever is more */ 3446 if (arc_c * 8 >= 1<<30) 3447 arc_c_max = (arc_c * 8) - (1<<30); 3448 else 3449 arc_c_max = arc_c_min; 3450 arc_c_max = MAX(arc_c * 5, arc_c_max); 3451#ifdef _KERNEL 3452 /* 3453 * Allow the tunables to override our calculations if they are 3454 * reasonable (ie. over 16MB) 3455 */ 3456 if (zfs_arc_max >= 64<<18 && zfs_arc_max < kmem_size()) 3457 arc_c_max = zfs_arc_max; 3458 if (zfs_arc_min >= 64<<18 && zfs_arc_min <= arc_c_max) 3459 arc_c_min = zfs_arc_min; 3460#endif 3461 arc_c = arc_c_max; 3462 arc_p = (arc_c >> 1); 3463 3464 /* limit meta-data to 1/4 of the arc capacity */ 3465 arc_meta_limit = arc_c_max / 4; 3466 3467 /* Allow the tunable to override if it is reasonable */ 3468 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) 3469 arc_meta_limit = zfs_arc_meta_limit; 3470 3471 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) 3472 arc_c_min = arc_meta_limit / 2; 3473 3474 /* if kmem_flags are set, lets try to use less memory */ 3475 if (kmem_debugging()) 3476 arc_c = arc_c / 2; 3477 if (arc_c < arc_c_min) 3478 arc_c = arc_c_min; 3479 3480 zfs_arc_min = arc_c_min; 3481 zfs_arc_max = arc_c_max; 3482 3483 arc_anon = &ARC_anon; 3484 arc_mru = &ARC_mru; 3485 arc_mru_ghost = &ARC_mru_ghost; 3486 arc_mfu = &ARC_mfu; 3487 arc_mfu_ghost = &ARC_mfu_ghost; 3488 arc_l2c_only = &ARC_l2c_only; 3489 arc_size = 0; 3490 3491 mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3492 mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3493 mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3494 mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3495 mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3496 mutex_init(&arc_l2c_only->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 3497 3498 list_create(&arc_mru->arcs_list[ARC_BUFC_METADATA], 3499 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3500 list_create(&arc_mru->arcs_list[ARC_BUFC_DATA], 3501 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3502 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA], 3503 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3504 list_create(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA], 3505 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3506 list_create(&arc_mfu->arcs_list[ARC_BUFC_METADATA], 3507 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3508 list_create(&arc_mfu->arcs_list[ARC_BUFC_DATA], 3509 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3510 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA], 3511 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3512 list_create(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA], 3513 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3514 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA], 3515 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3516 list_create(&arc_l2c_only->arcs_list[ARC_BUFC_DATA], 3517 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3518 3519 buf_init(); 3520 3521 arc_thread_exit = 0; 3522 arc_eviction_list = NULL; 3523 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 3524 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 3525 3526 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 3527 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 3528 3529 if (arc_ksp != NULL) { 3530 arc_ksp->ks_data = &arc_stats; 3531 kstat_install(arc_ksp); 3532 } 3533 3534 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 3535 TS_RUN, minclsyspri); 3536 3537#ifdef _KERNEL 3538 arc_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem, arc_lowmem, NULL, 3539 EVENTHANDLER_PRI_FIRST); 3540#endif 3541 3542 arc_dead = FALSE; 3543 arc_warm = B_FALSE; 3544 3545 if (zfs_write_limit_max == 0) 3546 zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift; 3547 else 3548 zfs_write_limit_shift = 0; 3549 mutex_init(&zfs_write_limit_lock, NULL, MUTEX_DEFAULT, NULL); 3550 3551#ifdef _KERNEL 3552 /* Warn about ZFS memory and address space requirements. */ 3553 if (((uint64_t)physmem * PAGESIZE) < (256 + 128 + 64) * (1 << 20)) { 3554 printf("ZFS WARNING: Recommended minimum RAM size is 512MB; " 3555 "expect unstable behavior.\n"); 3556 } 3557 if (kmem_size() < 512 * (1 << 20)) { 3558 printf("ZFS WARNING: Recommended minimum kmem_size is 512MB; " 3559 "expect unstable behavior.\n"); 3560 printf(" Consider tuning vm.kmem_size and " 3561 "vm.kmem_size_max\n"); 3562 printf(" in /boot/loader.conf.\n"); 3563 } 3564#endif 3565} 3566 3567void 3568arc_fini(void) 3569{ 3570 3571 mutex_enter(&arc_reclaim_thr_lock); 3572 arc_thread_exit = 1; 3573 cv_signal(&arc_reclaim_thr_cv); 3574 while (arc_thread_exit != 0) 3575 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 3576 mutex_exit(&arc_reclaim_thr_lock); 3577 3578 arc_flush(NULL); 3579 3580 arc_dead = TRUE; 3581 3582 if (arc_ksp != NULL) { 3583 kstat_delete(arc_ksp); 3584 arc_ksp = NULL; 3585 } 3586 3587 mutex_destroy(&arc_eviction_mtx); 3588 mutex_destroy(&arc_reclaim_thr_lock); 3589 cv_destroy(&arc_reclaim_thr_cv); 3590 3591 list_destroy(&arc_mru->arcs_list[ARC_BUFC_METADATA]); 3592 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); 3593 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_METADATA]); 3594 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); 3595 list_destroy(&arc_mru->arcs_list[ARC_BUFC_DATA]); 3596 list_destroy(&arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); 3597 list_destroy(&arc_mfu->arcs_list[ARC_BUFC_DATA]); 3598 list_destroy(&arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); 3599 3600 mutex_destroy(&arc_anon->arcs_mtx); 3601 mutex_destroy(&arc_mru->arcs_mtx); 3602 mutex_destroy(&arc_mru_ghost->arcs_mtx); 3603 mutex_destroy(&arc_mfu->arcs_mtx); 3604 mutex_destroy(&arc_mfu_ghost->arcs_mtx); 3605 3606 mutex_destroy(&zfs_write_limit_lock); 3607 3608 buf_fini(); 3609 3610 mutex_destroy(&arc_lowmem_lock); 3611#ifdef _KERNEL 3612 if (arc_event_lowmem != NULL) 3613 EVENTHANDLER_DEREGISTER(vm_lowmem, arc_event_lowmem); 3614#endif 3615} 3616 3617/* 3618 * Level 2 ARC 3619 * 3620 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. 3621 * It uses dedicated storage devices to hold cached data, which are populated 3622 * using large infrequent writes. The main role of this cache is to boost 3623 * the performance of random read workloads. The intended L2ARC devices 3624 * include short-stroked disks, solid state disks, and other media with 3625 * substantially faster read latency than disk. 3626 * 3627 * +-----------------------+ 3628 * | ARC | 3629 * +-----------------------+ 3630 * | ^ ^ 3631 * | | | 3632 * l2arc_feed_thread() arc_read() 3633 * | | | 3634 * | l2arc read | 3635 * V | | 3636 * +---------------+ | 3637 * | L2ARC | | 3638 * +---------------+ | 3639 * | ^ | 3640 * l2arc_write() | | 3641 * | | | 3642 * V | | 3643 * +-------+ +-------+ 3644 * | vdev | | vdev | 3645 * | cache | | cache | 3646 * +-------+ +-------+ 3647 * +=========+ .-----. 3648 * : L2ARC : |-_____-| 3649 * : devices : | Disks | 3650 * +=========+ `-_____-' 3651 * 3652 * Read requests are satisfied from the following sources, in order: 3653 * 3654 * 1) ARC 3655 * 2) vdev cache of L2ARC devices 3656 * 3) L2ARC devices 3657 * 4) vdev cache of disks 3658 * 5) disks 3659 * 3660 * Some L2ARC device types exhibit extremely slow write performance. 3661 * To accommodate for this there are some significant differences between 3662 * the L2ARC and traditional cache design: 3663 * 3664 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from 3665 * the ARC behave as usual, freeing buffers and placing headers on ghost 3666 * lists. The ARC does not send buffers to the L2ARC during eviction as 3667 * this would add inflated write latencies for all ARC memory pressure. 3668 * 3669 * 2. The L2ARC attempts to cache data from the ARC before it is evicted. 3670 * It does this by periodically scanning buffers from the eviction-end of 3671 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are 3672 * not already there. It scans until a headroom of buffers is satisfied, 3673 * which itself is a buffer for ARC eviction. The thread that does this is 3674 * l2arc_feed_thread(), illustrated below; example sizes are included to 3675 * provide a better sense of ratio than this diagram: 3676 * 3677 * head --> tail 3678 * +---------------------+----------+ 3679 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC 3680 * +---------------------+----------+ | o L2ARC eligible 3681 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer 3682 * +---------------------+----------+ | 3683 * 15.9 Gbytes ^ 32 Mbytes | 3684 * headroom | 3685 * l2arc_feed_thread() 3686 * | 3687 * l2arc write hand <--[oooo]--' 3688 * | 8 Mbyte 3689 * | write max 3690 * V 3691 * +==============================+ 3692 * L2ARC dev |####|#|###|###| |####| ... | 3693 * +==============================+ 3694 * 32 Gbytes 3695 * 3696 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of 3697 * evicted, then the L2ARC has cached a buffer much sooner than it probably 3698 * needed to, potentially wasting L2ARC device bandwidth and storage. It is 3699 * safe to say that this is an uncommon case, since buffers at the end of 3700 * the ARC lists have moved there due to inactivity. 3701 * 3702 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, 3703 * then the L2ARC simply misses copying some buffers. This serves as a 3704 * pressure valve to prevent heavy read workloads from both stalling the ARC 3705 * with waits and clogging the L2ARC with writes. This also helps prevent 3706 * the potential for the L2ARC to churn if it attempts to cache content too 3707 * quickly, such as during backups of the entire pool. 3708 * 3709 * 5. After system boot and before the ARC has filled main memory, there are 3710 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru 3711 * lists can remain mostly static. Instead of searching from tail of these 3712 * lists as pictured, the l2arc_feed_thread() will search from the list heads 3713 * for eligible buffers, greatly increasing its chance of finding them. 3714 * 3715 * The L2ARC device write speed is also boosted during this time so that 3716 * the L2ARC warms up faster. Since there have been no ARC evictions yet, 3717 * there are no L2ARC reads, and no fear of degrading read performance 3718 * through increased writes. 3719 * 3720 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that 3721 * the vdev queue can aggregate them into larger and fewer writes. Each 3722 * device is written to in a rotor fashion, sweeping writes through 3723 * available space then repeating. 3724 * 3725 * 7. The L2ARC does not store dirty content. It never needs to flush 3726 * write buffers back to disk based storage. 3727 * 3728 * 8. If an ARC buffer is written (and dirtied) which also exists in the 3729 * L2ARC, the now stale L2ARC buffer is immediately dropped. 3730 * 3731 * The performance of the L2ARC can be tweaked by a number of tunables, which 3732 * may be necessary for different workloads: 3733 * 3734 * l2arc_write_max max write bytes per interval 3735 * l2arc_write_boost extra write bytes during device warmup 3736 * l2arc_noprefetch skip caching prefetched buffers 3737 * l2arc_headroom number of max device writes to precache 3738 * l2arc_feed_secs seconds between L2ARC writing 3739 * 3740 * Tunables may be removed or added as future performance improvements are 3741 * integrated, and also may become zpool properties. 3742 */ 3743 3744static void 3745l2arc_hdr_stat_add(void) 3746{ 3747 ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE); 3748 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); 3749} 3750 3751static void 3752l2arc_hdr_stat_remove(void) 3753{ 3754 ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE)); 3755 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); 3756} 3757 3758/* 3759 * Cycle through L2ARC devices. This is how L2ARC load balances. 3760 * If a device is returned, this also returns holding the spa config lock. 3761 */ 3762static l2arc_dev_t * 3763l2arc_dev_get_next(void) 3764{ 3765 l2arc_dev_t *first, *next = NULL; 3766 3767 /* 3768 * Lock out the removal of spas (spa_namespace_lock), then removal 3769 * of cache devices (l2arc_dev_mtx). Once a device has been selected, 3770 * both locks will be dropped and a spa config lock held instead. 3771 */ 3772 mutex_enter(&spa_namespace_lock); 3773 mutex_enter(&l2arc_dev_mtx); 3774 3775 /* if there are no vdevs, there is nothing to do */ 3776 if (l2arc_ndev == 0) 3777 goto out; 3778 3779 first = NULL; 3780 next = l2arc_dev_last; 3781 do { 3782 /* loop around the list looking for a non-faulted vdev */ 3783 if (next == NULL) { 3784 next = list_head(l2arc_dev_list); 3785 } else { 3786 next = list_next(l2arc_dev_list, next); 3787 if (next == NULL) 3788 next = list_head(l2arc_dev_list); 3789 } 3790 3791 /* if we have come back to the start, bail out */ 3792 if (first == NULL) 3793 first = next; 3794 else if (next == first) 3795 break; 3796 3797 } while (vdev_is_dead(next->l2ad_vdev)); 3798 3799 /* if we were unable to find any usable vdevs, return NULL */ 3800 if (vdev_is_dead(next->l2ad_vdev)) 3801 next = NULL; 3802 3803 l2arc_dev_last = next; 3804 3805out: 3806 mutex_exit(&l2arc_dev_mtx); 3807 3808 /* 3809 * Grab the config lock to prevent the 'next' device from being 3810 * removed while we are writing to it. 3811 */ 3812 if (next != NULL) 3813 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER); 3814 mutex_exit(&spa_namespace_lock); 3815 3816 return (next); 3817} 3818 3819/* 3820 * Free buffers that were tagged for destruction. 3821 */ 3822static void 3823l2arc_do_free_on_write() 3824{ 3825 list_t *buflist; 3826 l2arc_data_free_t *df, *df_prev; 3827 3828 mutex_enter(&l2arc_free_on_write_mtx); 3829 buflist = l2arc_free_on_write; 3830 3831 for (df = list_tail(buflist); df; df = df_prev) { 3832 df_prev = list_prev(buflist, df); 3833 ASSERT(df->l2df_data != NULL); 3834 ASSERT(df->l2df_func != NULL); 3835 df->l2df_func(df->l2df_data, df->l2df_size); 3836 list_remove(buflist, df); 3837 kmem_free(df, sizeof (l2arc_data_free_t)); 3838 } 3839 3840 mutex_exit(&l2arc_free_on_write_mtx); 3841} 3842 3843/* 3844 * A write to a cache device has completed. Update all headers to allow 3845 * reads from these buffers to begin. 3846 */ 3847static void 3848l2arc_write_done(zio_t *zio) 3849{ 3850 l2arc_write_callback_t *cb; 3851 l2arc_dev_t *dev; 3852 list_t *buflist; 3853 arc_buf_hdr_t *head, *ab, *ab_prev; 3854 l2arc_buf_hdr_t *abl2; 3855 kmutex_t *hash_lock; 3856 3857 cb = zio->io_private; 3858 ASSERT(cb != NULL); 3859 dev = cb->l2wcb_dev; 3860 ASSERT(dev != NULL); 3861 head = cb->l2wcb_head; 3862 ASSERT(head != NULL); 3863 buflist = dev->l2ad_buflist; 3864 ASSERT(buflist != NULL); 3865 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, 3866 l2arc_write_callback_t *, cb); 3867 3868 if (zio->io_error != 0) 3869 ARCSTAT_BUMP(arcstat_l2_writes_error); 3870 3871 mutex_enter(&l2arc_buflist_mtx); 3872 3873 /* 3874 * All writes completed, or an error was hit. 3875 */ 3876 for (ab = list_prev(buflist, head); ab; ab = ab_prev) { 3877 ab_prev = list_prev(buflist, ab); 3878 3879 hash_lock = HDR_LOCK(ab); 3880 if (!mutex_tryenter(hash_lock)) { 3881 /* 3882 * This buffer misses out. It may be in a stage 3883 * of eviction. Its ARC_L2_WRITING flag will be 3884 * left set, denying reads to this buffer. 3885 */ 3886 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss); 3887 continue; 3888 } 3889 3890 if (zio->io_error != 0) { 3891 /* 3892 * Error - drop L2ARC entry. 3893 */ 3894 list_remove(buflist, ab); 3895 abl2 = ab->b_l2hdr; 3896 ab->b_l2hdr = NULL; 3897 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 3898 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 3899 } 3900 3901 /* 3902 * Allow ARC to begin reads to this L2ARC entry. 3903 */ 3904 ab->b_flags &= ~ARC_L2_WRITING; 3905 3906 mutex_exit(hash_lock); 3907 } 3908 3909 atomic_inc_64(&l2arc_writes_done); 3910 list_remove(buflist, head); 3911 kmem_cache_free(hdr_cache, head); 3912 mutex_exit(&l2arc_buflist_mtx); 3913 3914 l2arc_do_free_on_write(); 3915 3916 kmem_free(cb, sizeof (l2arc_write_callback_t)); 3917} 3918 3919/* 3920 * A read to a cache device completed. Validate buffer contents before 3921 * handing over to the regular ARC routines. 3922 */ 3923static void 3924l2arc_read_done(zio_t *zio) 3925{ 3926 l2arc_read_callback_t *cb; 3927 arc_buf_hdr_t *hdr; 3928 arc_buf_t *buf; 3929 kmutex_t *hash_lock; 3930 int equal; 3931 3932 ASSERT(zio->io_vd != NULL); 3933 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE); 3934 3935 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd); 3936 3937 cb = zio->io_private; 3938 ASSERT(cb != NULL); 3939 buf = cb->l2rcb_buf; 3940 ASSERT(buf != NULL); 3941 hdr = buf->b_hdr; 3942 ASSERT(hdr != NULL); 3943 3944 hash_lock = HDR_LOCK(hdr); 3945 mutex_enter(hash_lock); 3946 3947 /* 3948 * Check this survived the L2ARC journey. 3949 */ 3950 equal = arc_cksum_equal(buf); 3951 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { 3952 mutex_exit(hash_lock); 3953 zio->io_private = buf; 3954 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */ 3955 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */ 3956 arc_read_done(zio); 3957 } else { 3958 mutex_exit(hash_lock); 3959 /* 3960 * Buffer didn't survive caching. Increment stats and 3961 * reissue to the original storage device. 3962 */ 3963 if (zio->io_error != 0) { 3964 ARCSTAT_BUMP(arcstat_l2_io_error); 3965 } else { 3966 zio->io_error = EIO; 3967 } 3968 if (!equal) 3969 ARCSTAT_BUMP(arcstat_l2_cksum_bad); 3970 3971 /* 3972 * If there's no waiter, issue an async i/o to the primary 3973 * storage now. If there *is* a waiter, the caller must 3974 * issue the i/o in a context where it's OK to block. 3975 */ 3976 if (zio->io_waiter == NULL) 3977 zio_nowait(zio_read(zio->io_parent, 3978 cb->l2rcb_spa, &cb->l2rcb_bp, 3979 buf->b_data, zio->io_size, arc_read_done, buf, 3980 zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb)); 3981 } 3982 3983 kmem_free(cb, sizeof (l2arc_read_callback_t)); 3984} 3985 3986/* 3987 * This is the list priority from which the L2ARC will search for pages to 3988 * cache. This is used within loops (0..3) to cycle through lists in the 3989 * desired order. This order can have a significant effect on cache 3990 * performance. 3991 * 3992 * Currently the metadata lists are hit first, MFU then MRU, followed by 3993 * the data lists. This function returns a locked list, and also returns 3994 * the lock pointer. 3995 */ 3996static list_t * 3997l2arc_list_locked(int list_num, kmutex_t **lock) 3998{ 3999 list_t *list; 4000 4001 ASSERT(list_num >= 0 && list_num <= 3); 4002 4003 switch (list_num) { 4004 case 0: 4005 list = &arc_mfu->arcs_list[ARC_BUFC_METADATA]; 4006 *lock = &arc_mfu->arcs_mtx; 4007 break; 4008 case 1: 4009 list = &arc_mru->arcs_list[ARC_BUFC_METADATA]; 4010 *lock = &arc_mru->arcs_mtx; 4011 break; 4012 case 2: 4013 list = &arc_mfu->arcs_list[ARC_BUFC_DATA]; 4014 *lock = &arc_mfu->arcs_mtx; 4015 break; 4016 case 3: 4017 list = &arc_mru->arcs_list[ARC_BUFC_DATA]; 4018 *lock = &arc_mru->arcs_mtx; 4019 break; 4020 } 4021 4022 ASSERT(!(MUTEX_HELD(*lock))); 4023 mutex_enter(*lock); 4024 return (list); 4025} 4026 4027/* 4028 * Evict buffers from the device write hand to the distance specified in 4029 * bytes. This distance may span populated buffers, it may span nothing. 4030 * This is clearing a region on the L2ARC device ready for writing. 4031 * If the 'all' boolean is set, every buffer is evicted. 4032 */ 4033static void 4034l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) 4035{ 4036 list_t *buflist; 4037 l2arc_buf_hdr_t *abl2; 4038 arc_buf_hdr_t *ab, *ab_prev; 4039 kmutex_t *hash_lock; 4040 uint64_t taddr; 4041 4042 buflist = dev->l2ad_buflist; 4043 4044 if (buflist == NULL) 4045 return; 4046 4047 if (!all && dev->l2ad_first) { 4048 /* 4049 * This is the first sweep through the device. There is 4050 * nothing to evict. 4051 */ 4052 return; 4053 } 4054 4055 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) { 4056 /* 4057 * When nearing the end of the device, evict to the end 4058 * before the device write hand jumps to the start. 4059 */ 4060 taddr = dev->l2ad_end; 4061 } else { 4062 taddr = dev->l2ad_hand + distance; 4063 } 4064 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, 4065 uint64_t, taddr, boolean_t, all); 4066 4067top: 4068 mutex_enter(&l2arc_buflist_mtx); 4069 for (ab = list_tail(buflist); ab; ab = ab_prev) { 4070 ab_prev = list_prev(buflist, ab); 4071 4072 hash_lock = HDR_LOCK(ab); 4073 if (!mutex_tryenter(hash_lock)) { 4074 /* 4075 * Missed the hash lock. Retry. 4076 */ 4077 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); 4078 mutex_exit(&l2arc_buflist_mtx); 4079 mutex_enter(hash_lock); 4080 mutex_exit(hash_lock); 4081 goto top; 4082 } 4083 4084 if (HDR_L2_WRITE_HEAD(ab)) { 4085 /* 4086 * We hit a write head node. Leave it for 4087 * l2arc_write_done(). 4088 */ 4089 list_remove(buflist, ab); 4090 mutex_exit(hash_lock); 4091 continue; 4092 } 4093 4094 if (!all && ab->b_l2hdr != NULL && 4095 (ab->b_l2hdr->b_daddr > taddr || 4096 ab->b_l2hdr->b_daddr < dev->l2ad_hand)) { 4097 /* 4098 * We've evicted to the target address, 4099 * or the end of the device. 4100 */ 4101 mutex_exit(hash_lock); 4102 break; 4103 } 4104 4105 if (HDR_FREE_IN_PROGRESS(ab)) { 4106 /* 4107 * Already on the path to destruction. 4108 */ 4109 mutex_exit(hash_lock); 4110 continue; 4111 } 4112 4113 if (ab->b_state == arc_l2c_only) { 4114 ASSERT(!HDR_L2_READING(ab)); 4115 /* 4116 * This doesn't exist in the ARC. Destroy. 4117 * arc_hdr_destroy() will call list_remove() 4118 * and decrement arcstat_l2_size. 4119 */ 4120 arc_change_state(arc_anon, ab, hash_lock); 4121 arc_hdr_destroy(ab); 4122 } else { 4123 /* 4124 * Invalidate issued or about to be issued 4125 * reads, since we may be about to write 4126 * over this location. 4127 */ 4128 if (HDR_L2_READING(ab)) { 4129 ARCSTAT_BUMP(arcstat_l2_evict_reading); 4130 ab->b_flags |= ARC_L2_EVICTED; 4131 } 4132 4133 /* 4134 * Tell ARC this no longer exists in L2ARC. 4135 */ 4136 if (ab->b_l2hdr != NULL) { 4137 abl2 = ab->b_l2hdr; 4138 ab->b_l2hdr = NULL; 4139 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 4140 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 4141 } 4142 list_remove(buflist, ab); 4143 4144 /* 4145 * This may have been leftover after a 4146 * failed write. 4147 */ 4148 ab->b_flags &= ~ARC_L2_WRITING; 4149 } 4150 mutex_exit(hash_lock); 4151 } 4152 mutex_exit(&l2arc_buflist_mtx); 4153 4154 spa_l2cache_space_update(dev->l2ad_vdev, 0, -(taddr - dev->l2ad_evict)); 4155 dev->l2ad_evict = taddr; 4156} 4157 4158/* 4159 * Find and write ARC buffers to the L2ARC device. 4160 * 4161 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid 4162 * for reading until they have completed writing. 4163 */ 4164static void 4165l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz) 4166{ 4167 arc_buf_hdr_t *ab, *ab_prev, *head; 4168 l2arc_buf_hdr_t *hdrl2; 4169 list_t *list; 4170 uint64_t passed_sz, write_sz, buf_sz, headroom; 4171 void *buf_data; 4172 kmutex_t *hash_lock, *list_lock; 4173 boolean_t have_lock, full; 4174 l2arc_write_callback_t *cb; 4175 zio_t *pio, *wzio; 4176 int try; 4177 4178 ASSERT(dev->l2ad_vdev != NULL); 4179 4180 pio = NULL; 4181 write_sz = 0; 4182 full = B_FALSE; 4183 head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 4184 head->b_flags |= ARC_L2_WRITE_HEAD; 4185 4186 /* 4187 * Copy buffers for L2ARC writing. 4188 */ 4189 mutex_enter(&l2arc_buflist_mtx); 4190 for (try = 0; try <= 3; try++) { 4191 list = l2arc_list_locked(try, &list_lock); 4192 passed_sz = 0; 4193 4194 /* 4195 * L2ARC fast warmup. 4196 * 4197 * Until the ARC is warm and starts to evict, read from the 4198 * head of the ARC lists rather than the tail. 4199 */ 4200 headroom = target_sz * l2arc_headroom; 4201 if (arc_warm == B_FALSE) 4202 ab = list_head(list); 4203 else 4204 ab = list_tail(list); 4205 4206 for (; ab; ab = ab_prev) { 4207 if (arc_warm == B_FALSE) 4208 ab_prev = list_next(list, ab); 4209 else 4210 ab_prev = list_prev(list, ab); 4211 4212 hash_lock = HDR_LOCK(ab); 4213 have_lock = MUTEX_HELD(hash_lock); 4214 if (!have_lock && !mutex_tryenter(hash_lock)) { 4215 /* 4216 * Skip this buffer rather than waiting. 4217 */ 4218 continue; 4219 } 4220 4221 passed_sz += ab->b_size; 4222 if (passed_sz > headroom) { 4223 /* 4224 * Searched too far. 4225 */ 4226 mutex_exit(hash_lock); 4227 break; 4228 } 4229 4230 if (ab->b_spa != spa) { 4231 mutex_exit(hash_lock); 4232 continue; 4233 } 4234 4235 if (ab->b_l2hdr != NULL) { 4236 /* 4237 * Already in L2ARC. 4238 */ 4239 mutex_exit(hash_lock); 4240 continue; 4241 } 4242 4243 if (HDR_IO_IN_PROGRESS(ab) || !HDR_L2CACHE(ab)) { 4244 mutex_exit(hash_lock); 4245 continue; 4246 } 4247 4248 if ((write_sz + ab->b_size) > target_sz) { 4249 full = B_TRUE; 4250 mutex_exit(hash_lock); 4251 break; 4252 } 4253 4254 if (ab->b_buf == NULL) { 4255 DTRACE_PROBE1(l2arc__buf__null, void *, ab); 4256 mutex_exit(hash_lock); 4257 continue; 4258 } 4259 4260 if (pio == NULL) { 4261 /* 4262 * Insert a dummy header on the buflist so 4263 * l2arc_write_done() can find where the 4264 * write buffers begin without searching. 4265 */ 4266 list_insert_head(dev->l2ad_buflist, head); 4267 4268 cb = kmem_alloc( 4269 sizeof (l2arc_write_callback_t), KM_SLEEP); 4270 cb->l2wcb_dev = dev; 4271 cb->l2wcb_head = head; 4272 pio = zio_root(spa, l2arc_write_done, cb, 4273 ZIO_FLAG_CANFAIL); 4274 } 4275 4276 /* 4277 * Create and add a new L2ARC header. 4278 */ 4279 hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP); 4280 hdrl2->b_dev = dev; 4281 hdrl2->b_daddr = dev->l2ad_hand; 4282 4283 ab->b_flags |= ARC_L2_WRITING; 4284 ab->b_l2hdr = hdrl2; 4285 list_insert_head(dev->l2ad_buflist, ab); 4286 buf_data = ab->b_buf->b_data; 4287 buf_sz = ab->b_size; 4288 4289 /* 4290 * Compute and store the buffer cksum before 4291 * writing. On debug the cksum is verified first. 4292 */ 4293 arc_cksum_verify(ab->b_buf); 4294 arc_cksum_compute(ab->b_buf, B_TRUE); 4295 4296 mutex_exit(hash_lock); 4297 4298 wzio = zio_write_phys(pio, dev->l2ad_vdev, 4299 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF, 4300 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, 4301 ZIO_FLAG_CANFAIL, B_FALSE); 4302 4303 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, 4304 zio_t *, wzio); 4305 (void) zio_nowait(wzio); 4306 4307 /* 4308 * Keep the clock hand suitably device-aligned. 4309 */ 4310 buf_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz); 4311 4312 write_sz += buf_sz; 4313 dev->l2ad_hand += buf_sz; 4314 } 4315 4316 mutex_exit(list_lock); 4317 4318 if (full == B_TRUE) 4319 break; 4320 } 4321 mutex_exit(&l2arc_buflist_mtx); 4322 4323 if (pio == NULL) { 4324 ASSERT3U(write_sz, ==, 0); 4325 kmem_cache_free(hdr_cache, head); 4326 return; 4327 } 4328 4329 ASSERT3U(write_sz, <=, target_sz); 4330 ARCSTAT_BUMP(arcstat_l2_writes_sent); 4331 ARCSTAT_INCR(arcstat_l2_size, write_sz); 4332 spa_l2cache_space_update(dev->l2ad_vdev, 0, write_sz); 4333 4334 /* 4335 * Bump device hand to the device start if it is approaching the end. 4336 * l2arc_evict() will already have evicted ahead for this case. 4337 */ 4338 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) { 4339 spa_l2cache_space_update(dev->l2ad_vdev, 0, 4340 dev->l2ad_end - dev->l2ad_hand); 4341 dev->l2ad_hand = dev->l2ad_start; 4342 dev->l2ad_evict = dev->l2ad_start; 4343 dev->l2ad_first = B_FALSE; 4344 } 4345 4346 (void) zio_wait(pio); 4347} 4348 4349/* 4350 * This thread feeds the L2ARC at regular intervals. This is the beating 4351 * heart of the L2ARC. 4352 */ 4353static void 4354l2arc_feed_thread(void *dummy __unused) 4355{ 4356 callb_cpr_t cpr; 4357 l2arc_dev_t *dev; 4358 spa_t *spa; 4359 uint64_t size; 4360 4361 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); 4362 4363 mutex_enter(&l2arc_feed_thr_lock); 4364 4365 while (l2arc_thread_exit == 0) { 4366 /* 4367 * Pause for l2arc_feed_secs seconds between writes. 4368 */ 4369 CALLB_CPR_SAFE_BEGIN(&cpr); 4370 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, 4371 hz * l2arc_feed_secs); 4372 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); 4373 4374 /* 4375 * Quick check for L2ARC devices. 4376 */ 4377 mutex_enter(&l2arc_dev_mtx); 4378 if (l2arc_ndev == 0) { 4379 mutex_exit(&l2arc_dev_mtx); 4380 continue; 4381 } 4382 mutex_exit(&l2arc_dev_mtx); 4383 4384 /* 4385 * This selects the next l2arc device to write to, and in 4386 * doing so the next spa to feed from: dev->l2ad_spa. This 4387 * will return NULL if there are now no l2arc devices or if 4388 * they are all faulted. 4389 * 4390 * If a device is returned, its spa's config lock is also 4391 * held to prevent device removal. l2arc_dev_get_next() 4392 * will grab and release l2arc_dev_mtx. 4393 */ 4394 if ((dev = l2arc_dev_get_next()) == NULL) 4395 continue; 4396 4397 spa = dev->l2ad_spa; 4398 ASSERT(spa != NULL); 4399 4400 /* 4401 * Avoid contributing to memory pressure. 4402 */ 4403 if (arc_reclaim_needed()) { 4404 ARCSTAT_BUMP(arcstat_l2_abort_lowmem); 4405 spa_config_exit(spa, SCL_L2ARC, dev); 4406 continue; 4407 } 4408 4409 ARCSTAT_BUMP(arcstat_l2_feeds); 4410 4411 size = dev->l2ad_write; 4412 if (arc_warm == B_FALSE) 4413 size += dev->l2ad_boost; 4414 4415 /* 4416 * Evict L2ARC buffers that will be overwritten. 4417 */ 4418 l2arc_evict(dev, size, B_FALSE); 4419 4420 /* 4421 * Write ARC buffers. 4422 */ 4423 l2arc_write_buffers(spa, dev, size); 4424 spa_config_exit(spa, SCL_L2ARC, dev); 4425 } 4426 4427 l2arc_thread_exit = 0; 4428 cv_broadcast(&l2arc_feed_thr_cv); 4429 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ 4430 thread_exit(); 4431} 4432 4433boolean_t 4434l2arc_vdev_present(vdev_t *vd) 4435{ 4436 l2arc_dev_t *dev; 4437 4438 mutex_enter(&l2arc_dev_mtx); 4439 for (dev = list_head(l2arc_dev_list); dev != NULL; 4440 dev = list_next(l2arc_dev_list, dev)) { 4441 if (dev->l2ad_vdev == vd) 4442 break; 4443 } 4444 mutex_exit(&l2arc_dev_mtx); 4445 4446 return (dev != NULL); 4447} 4448 4449/* 4450 * Add a vdev for use by the L2ARC. By this point the spa has already 4451 * validated the vdev and opened it. 4452 */ 4453void 4454l2arc_add_vdev(spa_t *spa, vdev_t *vd, uint64_t start, uint64_t end) 4455{ 4456 l2arc_dev_t *adddev; 4457 4458 ASSERT(!l2arc_vdev_present(vd)); 4459 4460 /* 4461 * Create a new l2arc device entry. 4462 */ 4463 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); 4464 adddev->l2ad_spa = spa; 4465 adddev->l2ad_vdev = vd; 4466 adddev->l2ad_write = l2arc_write_max; 4467 adddev->l2ad_boost = l2arc_write_boost; 4468 adddev->l2ad_start = start; 4469 adddev->l2ad_end = end; 4470 adddev->l2ad_hand = adddev->l2ad_start; 4471 adddev->l2ad_evict = adddev->l2ad_start; 4472 adddev->l2ad_first = B_TRUE; 4473 ASSERT3U(adddev->l2ad_write, >, 0); 4474 4475 /* 4476 * This is a list of all ARC buffers that are still valid on the 4477 * device. 4478 */ 4479 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP); 4480 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), 4481 offsetof(arc_buf_hdr_t, b_l2node)); 4482 4483 spa_l2cache_space_update(vd, adddev->l2ad_end - adddev->l2ad_hand, 0); 4484 4485 /* 4486 * Add device to global list 4487 */ 4488 mutex_enter(&l2arc_dev_mtx); 4489 list_insert_head(l2arc_dev_list, adddev); 4490 atomic_inc_64(&l2arc_ndev); 4491 mutex_exit(&l2arc_dev_mtx); 4492} 4493 4494/* 4495 * Remove a vdev from the L2ARC. 4496 */ 4497void 4498l2arc_remove_vdev(vdev_t *vd) 4499{ 4500 l2arc_dev_t *dev, *nextdev, *remdev = NULL; 4501 4502 /* 4503 * Find the device by vdev 4504 */ 4505 mutex_enter(&l2arc_dev_mtx); 4506 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { 4507 nextdev = list_next(l2arc_dev_list, dev); 4508 if (vd == dev->l2ad_vdev) { 4509 remdev = dev; 4510 break; 4511 } 4512 } 4513 ASSERT(remdev != NULL); 4514 4515 /* 4516 * Remove device from global list 4517 */ 4518 list_remove(l2arc_dev_list, remdev); 4519 l2arc_dev_last = NULL; /* may have been invalidated */ 4520 atomic_dec_64(&l2arc_ndev); 4521 mutex_exit(&l2arc_dev_mtx); 4522 4523 /* 4524 * Clear all buflists and ARC references. L2ARC device flush. 4525 */ 4526 l2arc_evict(remdev, 0, B_TRUE); 4527 list_destroy(remdev->l2ad_buflist); 4528 kmem_free(remdev->l2ad_buflist, sizeof (list_t)); 4529 kmem_free(remdev, sizeof (l2arc_dev_t)); 4530} 4531 4532void 4533l2arc_init(void) 4534{ 4535 l2arc_thread_exit = 0; 4536 l2arc_ndev = 0; 4537 l2arc_writes_sent = 0; 4538 l2arc_writes_done = 0; 4539 4540 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); 4541 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); 4542 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); 4543 mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL); 4544 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); 4545 4546 l2arc_dev_list = &L2ARC_dev_list; 4547 l2arc_free_on_write = &L2ARC_free_on_write; 4548 list_create(l2arc_dev_list, sizeof (l2arc_dev_t), 4549 offsetof(l2arc_dev_t, l2ad_node)); 4550 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), 4551 offsetof(l2arc_data_free_t, l2df_list_node)); 4552} 4553 4554void 4555l2arc_fini(void) 4556{ 4557 /* 4558 * This is called from dmu_fini(), which is called from spa_fini(); 4559 * Because of this, we can assume that all l2arc devices have 4560 * already been removed when the pools themselves were removed. 4561 */ 4562 4563 l2arc_do_free_on_write(); 4564 4565 mutex_destroy(&l2arc_feed_thr_lock); 4566 cv_destroy(&l2arc_feed_thr_cv); 4567 mutex_destroy(&l2arc_dev_mtx); 4568 mutex_destroy(&l2arc_buflist_mtx); 4569 mutex_destroy(&l2arc_free_on_write_mtx); 4570 4571 list_destroy(l2arc_dev_list); 4572 list_destroy(l2arc_free_on_write); 4573} 4574 4575void 4576l2arc_start(void) 4577{ 4578 if (!(spa_mode & FWRITE)) 4579 return; 4580 4581 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, 4582 TS_RUN, minclsyspri); 4583} 4584 4585void 4586l2arc_stop(void) 4587{ 4588 if (!(spa_mode & FWRITE)) 4589 return; 4590 4591 mutex_enter(&l2arc_feed_thr_lock); 4592 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ 4593 l2arc_thread_exit = 1; 4594 while (l2arc_thread_exit != 0) 4595 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); 4596 mutex_exit(&l2arc_feed_thr_lock); 4597} 4598