arc.c revision 168404
1168404Spjd/* 2168404Spjd * CDDL HEADER START 3168404Spjd * 4168404Spjd * The contents of this file are subject to the terms of the 5168404Spjd * Common Development and Distribution License (the "License"). 6168404Spjd * You may not use this file except in compliance with the License. 7168404Spjd * 8168404Spjd * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9168404Spjd * or http://www.opensolaris.org/os/licensing. 10168404Spjd * See the License for the specific language governing permissions 11168404Spjd * and limitations under the License. 12168404Spjd * 13168404Spjd * When distributing Covered Code, include this CDDL HEADER in each 14168404Spjd * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15168404Spjd * If applicable, add the following below this CDDL HEADER, with the 16168404Spjd * fields enclosed by brackets "[]" replaced with your own identifying 17168404Spjd * information: Portions Copyright [yyyy] [name of copyright owner] 18168404Spjd * 19168404Spjd * CDDL HEADER END 20168404Spjd */ 21168404Spjd/* 22168404Spjd * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23168404Spjd * Use is subject to license terms. 24168404Spjd */ 25168404Spjd 26168404Spjd#pragma ident "%Z%%M% %I% %E% SMI" 27168404Spjd 28168404Spjd/* 29168404Spjd * DVA-based Adjustable Replacement Cache 30168404Spjd * 31168404Spjd * While much of the theory of operation used here is 32168404Spjd * based on the self-tuning, low overhead replacement cache 33168404Spjd * presented by Megiddo and Modha at FAST 2003, there are some 34168404Spjd * significant differences: 35168404Spjd * 36168404Spjd * 1. The Megiddo and Modha model assumes any page is evictable. 37168404Spjd * Pages in its cache cannot be "locked" into memory. This makes 38168404Spjd * the eviction algorithm simple: evict the last page in the list. 39168404Spjd * This also make the performance characteristics easy to reason 40168404Spjd * about. Our cache is not so simple. At any given moment, some 41168404Spjd * subset of the blocks in the cache are un-evictable because we 42168404Spjd * have handed out a reference to them. Blocks are only evictable 43168404Spjd * when there are no external references active. This makes 44168404Spjd * eviction far more problematic: we choose to evict the evictable 45168404Spjd * blocks that are the "lowest" in the list. 46168404Spjd * 47168404Spjd * There are times when it is not possible to evict the requested 48168404Spjd * space. In these circumstances we are unable to adjust the cache 49168404Spjd * size. To prevent the cache growing unbounded at these times we 50168404Spjd * implement a "cache throttle" that slowes the flow of new data 51168404Spjd * into the cache until we can make space avaiable. 52168404Spjd * 53168404Spjd * 2. The Megiddo and Modha model assumes a fixed cache size. 54168404Spjd * Pages are evicted when the cache is full and there is a cache 55168404Spjd * miss. Our model has a variable sized cache. It grows with 56168404Spjd * high use, but also tries to react to memory preasure from the 57168404Spjd * operating system: decreasing its size when system memory is 58168404Spjd * tight. 59168404Spjd * 60168404Spjd * 3. The Megiddo and Modha model assumes a fixed page size. All 61168404Spjd * elements of the cache are therefor exactly the same size. So 62168404Spjd * when adjusting the cache size following a cache miss, its simply 63168404Spjd * a matter of choosing a single page to evict. In our model, we 64168404Spjd * have variable sized cache blocks (rangeing from 512 bytes to 65168404Spjd * 128K bytes). We therefor choose a set of blocks to evict to make 66168404Spjd * space for a cache miss that approximates as closely as possible 67168404Spjd * the space used by the new block. 68168404Spjd * 69168404Spjd * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 70168404Spjd * by N. Megiddo & D. Modha, FAST 2003 71168404Spjd */ 72168404Spjd 73168404Spjd/* 74168404Spjd * The locking model: 75168404Spjd * 76168404Spjd * A new reference to a cache buffer can be obtained in two 77168404Spjd * ways: 1) via a hash table lookup using the DVA as a key, 78168404Spjd * or 2) via one of the ARC lists. The arc_read() inerface 79168404Spjd * uses method 1, while the internal arc algorithms for 80168404Spjd * adjusting the cache use method 2. We therefor provide two 81168404Spjd * types of locks: 1) the hash table lock array, and 2) the 82168404Spjd * arc list locks. 83168404Spjd * 84168404Spjd * Buffers do not have their own mutexs, rather they rely on the 85168404Spjd * hash table mutexs for the bulk of their protection (i.e. most 86168404Spjd * fields in the arc_buf_hdr_t are protected by these mutexs). 87168404Spjd * 88168404Spjd * buf_hash_find() returns the appropriate mutex (held) when it 89168404Spjd * locates the requested buffer in the hash table. It returns 90168404Spjd * NULL for the mutex if the buffer was not in the table. 91168404Spjd * 92168404Spjd * buf_hash_remove() expects the appropriate hash mutex to be 93168404Spjd * already held before it is invoked. 94168404Spjd * 95168404Spjd * Each arc state also has a mutex which is used to protect the 96168404Spjd * buffer list associated with the state. When attempting to 97168404Spjd * obtain a hash table lock while holding an arc list lock you 98168404Spjd * must use: mutex_tryenter() to avoid deadlock. Also note that 99168404Spjd * the active state mutex must be held before the ghost state mutex. 100168404Spjd * 101168404Spjd * Arc buffers may have an associated eviction callback function. 102168404Spjd * This function will be invoked prior to removing the buffer (e.g. 103168404Spjd * in arc_do_user_evicts()). Note however that the data associated 104168404Spjd * with the buffer may be evicted prior to the callback. The callback 105168404Spjd * must be made with *no locks held* (to prevent deadlock). Additionally, 106168404Spjd * the users of callbacks must ensure that their private data is 107168404Spjd * protected from simultaneous callbacks from arc_buf_evict() 108168404Spjd * and arc_do_user_evicts(). 109168404Spjd * 110168404Spjd * Note that the majority of the performance stats are manipulated 111168404Spjd * with atomic operations. 112168404Spjd */ 113168404Spjd 114168404Spjd#include <sys/spa.h> 115168404Spjd#include <sys/zio.h> 116168404Spjd#include <sys/zio_checksum.h> 117168404Spjd#include <sys/zfs_context.h> 118168404Spjd#include <sys/arc.h> 119168404Spjd#include <sys/refcount.h> 120168404Spjd#ifdef _KERNEL 121168404Spjd#include <sys/dnlc.h> 122168404Spjd#endif 123168404Spjd#include <sys/callb.h> 124168404Spjd#include <sys/kstat.h> 125168404Spjd#include <sys/sdt.h> 126168404Spjd 127168404Spjd#define ARC_FREE_AT_ONCE 4194304 128168404Spjd 129168404Spjdstatic kmutex_t arc_reclaim_thr_lock; 130168404Spjdstatic kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 131168404Spjdstatic uint8_t arc_thread_exit; 132168404Spjd 133168404Spjd#define ARC_REDUCE_DNLC_PERCENT 3 134168404Spjduint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 135168404Spjd 136168404Spjdtypedef enum arc_reclaim_strategy { 137168404Spjd ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 138168404Spjd ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 139168404Spjd} arc_reclaim_strategy_t; 140168404Spjd 141168404Spjd/* number of seconds before growing cache again */ 142168404Spjdstatic int arc_grow_retry = 60; 143168404Spjd 144168404Spjd/* 145168404Spjd * minimum lifespan of a prefetch block in clock ticks 146168404Spjd * (initialized in arc_init()) 147168404Spjd */ 148168404Spjdstatic int arc_min_prefetch_lifespan; 149168404Spjd 150168404Spjdstatic int arc_dead; 151168404Spjd 152168404Spjd/* 153168404Spjd * These tunables are for performance analysis. 154168404Spjd */ 155168404Spjduint64_t zfs_arc_max; 156168404Spjduint64_t zfs_arc_min; 157168404Spjd 158168404Spjd/* 159168404Spjd * Note that buffers can be on one of 5 states: 160168404Spjd * ARC_anon - anonymous (discussed below) 161168404Spjd * ARC_mru - recently used, currently cached 162168404Spjd * ARC_mru_ghost - recentely used, no longer in cache 163168404Spjd * ARC_mfu - frequently used, currently cached 164168404Spjd * ARC_mfu_ghost - frequently used, no longer in cache 165168404Spjd * When there are no active references to the buffer, they 166168404Spjd * are linked onto one of the lists in arc. These are the 167168404Spjd * only buffers that can be evicted or deleted. 168168404Spjd * 169168404Spjd * Anonymous buffers are buffers that are not associated with 170168404Spjd * a DVA. These are buffers that hold dirty block copies 171168404Spjd * before they are written to stable storage. By definition, 172168404Spjd * they are "ref'd" and are considered part of arc_mru 173168404Spjd * that cannot be freed. Generally, they will aquire a DVA 174168404Spjd * as they are written and migrate onto the arc_mru list. 175168404Spjd */ 176168404Spjd 177168404Spjdtypedef struct arc_state { 178168404Spjd list_t arcs_list; /* linked list of evictable buffer in state */ 179168404Spjd uint64_t arcs_lsize; /* total size of buffers in the linked list */ 180168404Spjd uint64_t arcs_size; /* total size of all buffers in this state */ 181168404Spjd kmutex_t arcs_mtx; 182168404Spjd} arc_state_t; 183168404Spjd 184168404Spjd/* The 5 states: */ 185168404Spjdstatic arc_state_t ARC_anon; 186168404Spjdstatic arc_state_t ARC_mru; 187168404Spjdstatic arc_state_t ARC_mru_ghost; 188168404Spjdstatic arc_state_t ARC_mfu; 189168404Spjdstatic arc_state_t ARC_mfu_ghost; 190168404Spjd 191168404Spjdtypedef struct arc_stats { 192168404Spjd kstat_named_t arcstat_hits; 193168404Spjd kstat_named_t arcstat_misses; 194168404Spjd kstat_named_t arcstat_demand_data_hits; 195168404Spjd kstat_named_t arcstat_demand_data_misses; 196168404Spjd kstat_named_t arcstat_demand_metadata_hits; 197168404Spjd kstat_named_t arcstat_demand_metadata_misses; 198168404Spjd kstat_named_t arcstat_prefetch_data_hits; 199168404Spjd kstat_named_t arcstat_prefetch_data_misses; 200168404Spjd kstat_named_t arcstat_prefetch_metadata_hits; 201168404Spjd kstat_named_t arcstat_prefetch_metadata_misses; 202168404Spjd kstat_named_t arcstat_mru_hits; 203168404Spjd kstat_named_t arcstat_mru_ghost_hits; 204168404Spjd kstat_named_t arcstat_mfu_hits; 205168404Spjd kstat_named_t arcstat_mfu_ghost_hits; 206168404Spjd kstat_named_t arcstat_deleted; 207168404Spjd kstat_named_t arcstat_recycle_miss; 208168404Spjd kstat_named_t arcstat_mutex_miss; 209168404Spjd kstat_named_t arcstat_evict_skip; 210168404Spjd kstat_named_t arcstat_hash_elements; 211168404Spjd kstat_named_t arcstat_hash_elements_max; 212168404Spjd kstat_named_t arcstat_hash_collisions; 213168404Spjd kstat_named_t arcstat_hash_chains; 214168404Spjd kstat_named_t arcstat_hash_chain_max; 215168404Spjd kstat_named_t arcstat_p; 216168404Spjd kstat_named_t arcstat_c; 217168404Spjd kstat_named_t arcstat_c_min; 218168404Spjd kstat_named_t arcstat_c_max; 219168404Spjd kstat_named_t arcstat_size; 220168404Spjd} arc_stats_t; 221168404Spjd 222168404Spjdstatic arc_stats_t arc_stats = { 223168404Spjd { "hits", KSTAT_DATA_UINT64 }, 224168404Spjd { "misses", KSTAT_DATA_UINT64 }, 225168404Spjd { "demand_data_hits", KSTAT_DATA_UINT64 }, 226168404Spjd { "demand_data_misses", KSTAT_DATA_UINT64 }, 227168404Spjd { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 228168404Spjd { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 229168404Spjd { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 230168404Spjd { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 231168404Spjd { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 232168404Spjd { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 233168404Spjd { "mru_hits", KSTAT_DATA_UINT64 }, 234168404Spjd { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 235168404Spjd { "mfu_hits", KSTAT_DATA_UINT64 }, 236168404Spjd { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 237168404Spjd { "deleted", KSTAT_DATA_UINT64 }, 238168404Spjd { "recycle_miss", KSTAT_DATA_UINT64 }, 239168404Spjd { "mutex_miss", KSTAT_DATA_UINT64 }, 240168404Spjd { "evict_skip", KSTAT_DATA_UINT64 }, 241168404Spjd { "hash_elements", KSTAT_DATA_UINT64 }, 242168404Spjd { "hash_elements_max", KSTAT_DATA_UINT64 }, 243168404Spjd { "hash_collisions", KSTAT_DATA_UINT64 }, 244168404Spjd { "hash_chains", KSTAT_DATA_UINT64 }, 245168404Spjd { "hash_chain_max", KSTAT_DATA_UINT64 }, 246168404Spjd { "p", KSTAT_DATA_UINT64 }, 247168404Spjd { "c", KSTAT_DATA_UINT64 }, 248168404Spjd { "c_min", KSTAT_DATA_UINT64 }, 249168404Spjd { "c_max", KSTAT_DATA_UINT64 }, 250168404Spjd { "size", KSTAT_DATA_UINT64 } 251168404Spjd}; 252168404Spjd 253168404Spjd#define ARCSTAT(stat) (arc_stats.stat.value.ui64) 254168404Spjd 255168404Spjd#define ARCSTAT_INCR(stat, val) \ 256168404Spjd atomic_add_64(&arc_stats.stat.value.ui64, (val)); 257168404Spjd 258168404Spjd#define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 259168404Spjd#define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 260168404Spjd 261168404Spjd#define ARCSTAT_MAX(stat, val) { \ 262168404Spjd uint64_t m; \ 263168404Spjd while ((val) > (m = arc_stats.stat.value.ui64) && \ 264168404Spjd (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 265168404Spjd continue; \ 266168404Spjd} 267168404Spjd 268168404Spjd#define ARCSTAT_MAXSTAT(stat) \ 269168404Spjd ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 270168404Spjd 271168404Spjd/* 272168404Spjd * We define a macro to allow ARC hits/misses to be easily broken down by 273168404Spjd * two separate conditions, giving a total of four different subtypes for 274168404Spjd * each of hits and misses (so eight statistics total). 275168404Spjd */ 276168404Spjd#define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 277168404Spjd if (cond1) { \ 278168404Spjd if (cond2) { \ 279168404Spjd ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 280168404Spjd } else { \ 281168404Spjd ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 282168404Spjd } \ 283168404Spjd } else { \ 284168404Spjd if (cond2) { \ 285168404Spjd ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 286168404Spjd } else { \ 287168404Spjd ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 288168404Spjd } \ 289168404Spjd } 290168404Spjd 291168404Spjdkstat_t *arc_ksp; 292168404Spjdstatic arc_state_t *arc_anon; 293168404Spjdstatic arc_state_t *arc_mru; 294168404Spjdstatic arc_state_t *arc_mru_ghost; 295168404Spjdstatic arc_state_t *arc_mfu; 296168404Spjdstatic arc_state_t *arc_mfu_ghost; 297168404Spjd 298168404Spjd/* 299168404Spjd * There are several ARC variables that are critical to export as kstats -- 300168404Spjd * but we don't want to have to grovel around in the kstat whenever we wish to 301168404Spjd * manipulate them. For these variables, we therefore define them to be in 302168404Spjd * terms of the statistic variable. This assures that we are not introducing 303168404Spjd * the possibility of inconsistency by having shadow copies of the variables, 304168404Spjd * while still allowing the code to be readable. 305168404Spjd */ 306168404Spjd#define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 307168404Spjd#define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 308168404Spjd#define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 309168404Spjd#define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 310168404Spjd#define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 311168404Spjd 312168404Spjdstatic int arc_no_grow; /* Don't try to grow cache size */ 313168404Spjdstatic uint64_t arc_tempreserve; 314168404Spjd 315168404Spjdtypedef struct arc_callback arc_callback_t; 316168404Spjd 317168404Spjdstruct arc_callback { 318168404Spjd void *acb_private; 319168404Spjd arc_done_func_t *acb_done; 320168404Spjd arc_byteswap_func_t *acb_byteswap; 321168404Spjd arc_buf_t *acb_buf; 322168404Spjd zio_t *acb_zio_dummy; 323168404Spjd arc_callback_t *acb_next; 324168404Spjd}; 325168404Spjd 326168404Spjdtypedef struct arc_write_callback arc_write_callback_t; 327168404Spjd 328168404Spjdstruct arc_write_callback { 329168404Spjd void *awcb_private; 330168404Spjd arc_done_func_t *awcb_ready; 331168404Spjd arc_done_func_t *awcb_done; 332168404Spjd arc_buf_t *awcb_buf; 333168404Spjd}; 334168404Spjd 335168404Spjdstruct arc_buf_hdr { 336168404Spjd /* protected by hash lock */ 337168404Spjd dva_t b_dva; 338168404Spjd uint64_t b_birth; 339168404Spjd uint64_t b_cksum0; 340168404Spjd 341168404Spjd kmutex_t b_freeze_lock; 342168404Spjd zio_cksum_t *b_freeze_cksum; 343168404Spjd 344168404Spjd arc_buf_hdr_t *b_hash_next; 345168404Spjd arc_buf_t *b_buf; 346168404Spjd uint32_t b_flags; 347168404Spjd uint32_t b_datacnt; 348168404Spjd 349168404Spjd arc_callback_t *b_acb; 350168404Spjd kcondvar_t b_cv; 351168404Spjd 352168404Spjd /* immutable */ 353168404Spjd arc_buf_contents_t b_type; 354168404Spjd uint64_t b_size; 355168404Spjd spa_t *b_spa; 356168404Spjd 357168404Spjd /* protected by arc state mutex */ 358168404Spjd arc_state_t *b_state; 359168404Spjd list_node_t b_arc_node; 360168404Spjd 361168404Spjd /* updated atomically */ 362168404Spjd clock_t b_arc_access; 363168404Spjd 364168404Spjd /* self protecting */ 365168404Spjd refcount_t b_refcnt; 366168404Spjd}; 367168404Spjd 368168404Spjdstatic arc_buf_t *arc_eviction_list; 369168404Spjdstatic kmutex_t arc_eviction_mtx; 370168404Spjdstatic arc_buf_hdr_t arc_eviction_hdr; 371168404Spjdstatic void arc_get_data_buf(arc_buf_t *buf); 372168404Spjdstatic void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 373168404Spjd 374168404Spjd#define GHOST_STATE(state) \ 375168404Spjd ((state) == arc_mru_ghost || (state) == arc_mfu_ghost) 376168404Spjd 377168404Spjd/* 378168404Spjd * Private ARC flags. These flags are private ARC only flags that will show up 379168404Spjd * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 380168404Spjd * be passed in as arc_flags in things like arc_read. However, these flags 381168404Spjd * should never be passed and should only be set by ARC code. When adding new 382168404Spjd * public flags, make sure not to smash the private ones. 383168404Spjd */ 384168404Spjd 385168404Spjd#define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 386168404Spjd#define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 387168404Spjd#define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 388168404Spjd#define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 389168404Spjd#define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 390168404Spjd#define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 391168404Spjd 392168404Spjd#define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 393168404Spjd#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 394168404Spjd#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 395168404Spjd#define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 396168404Spjd#define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 397168404Spjd 398168404Spjd/* 399168404Spjd * Hash table routines 400168404Spjd */ 401168404Spjd 402168404Spjd#define HT_LOCK_PAD 128 403168404Spjd 404168404Spjdstruct ht_lock { 405168404Spjd kmutex_t ht_lock; 406168404Spjd#ifdef _KERNEL 407168404Spjd unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 408168404Spjd#endif 409168404Spjd}; 410168404Spjd 411168404Spjd#define BUF_LOCKS 256 412168404Spjdtypedef struct buf_hash_table { 413168404Spjd uint64_t ht_mask; 414168404Spjd arc_buf_hdr_t **ht_table; 415168404Spjd struct ht_lock ht_locks[BUF_LOCKS]; 416168404Spjd} buf_hash_table_t; 417168404Spjd 418168404Spjdstatic buf_hash_table_t buf_hash_table; 419168404Spjd 420168404Spjd#define BUF_HASH_INDEX(spa, dva, birth) \ 421168404Spjd (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 422168404Spjd#define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 423168404Spjd#define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 424168404Spjd#define HDR_LOCK(buf) \ 425168404Spjd (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth))) 426168404Spjd 427168404Spjduint64_t zfs_crc64_table[256]; 428168404Spjd 429168404Spjdstatic uint64_t 430168404Spjdbuf_hash(spa_t *spa, dva_t *dva, uint64_t birth) 431168404Spjd{ 432168404Spjd uintptr_t spav = (uintptr_t)spa; 433168404Spjd uint8_t *vdva = (uint8_t *)dva; 434168404Spjd uint64_t crc = -1ULL; 435168404Spjd int i; 436168404Spjd 437168404Spjd ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 438168404Spjd 439168404Spjd for (i = 0; i < sizeof (dva_t); i++) 440168404Spjd crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 441168404Spjd 442168404Spjd crc ^= (spav>>8) ^ birth; 443168404Spjd 444168404Spjd return (crc); 445168404Spjd} 446168404Spjd 447168404Spjd#define BUF_EMPTY(buf) \ 448168404Spjd ((buf)->b_dva.dva_word[0] == 0 && \ 449168404Spjd (buf)->b_dva.dva_word[1] == 0 && \ 450168404Spjd (buf)->b_birth == 0) 451168404Spjd 452168404Spjd#define BUF_EQUAL(spa, dva, birth, buf) \ 453168404Spjd ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 454168404Spjd ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 455168404Spjd ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 456168404Spjd 457168404Spjdstatic arc_buf_hdr_t * 458168404Spjdbuf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp) 459168404Spjd{ 460168404Spjd uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 461168404Spjd kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 462168404Spjd arc_buf_hdr_t *buf; 463168404Spjd 464168404Spjd mutex_enter(hash_lock); 465168404Spjd for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 466168404Spjd buf = buf->b_hash_next) { 467168404Spjd if (BUF_EQUAL(spa, dva, birth, buf)) { 468168404Spjd *lockp = hash_lock; 469168404Spjd return (buf); 470168404Spjd } 471168404Spjd } 472168404Spjd mutex_exit(hash_lock); 473168404Spjd *lockp = NULL; 474168404Spjd return (NULL); 475168404Spjd} 476168404Spjd 477168404Spjd/* 478168404Spjd * Insert an entry into the hash table. If there is already an element 479168404Spjd * equal to elem in the hash table, then the already existing element 480168404Spjd * will be returned and the new element will not be inserted. 481168404Spjd * Otherwise returns NULL. 482168404Spjd */ 483168404Spjdstatic arc_buf_hdr_t * 484168404Spjdbuf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 485168404Spjd{ 486168404Spjd uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 487168404Spjd kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 488168404Spjd arc_buf_hdr_t *fbuf; 489168404Spjd uint32_t i; 490168404Spjd 491168404Spjd ASSERT(!HDR_IN_HASH_TABLE(buf)); 492168404Spjd *lockp = hash_lock; 493168404Spjd mutex_enter(hash_lock); 494168404Spjd for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 495168404Spjd fbuf = fbuf->b_hash_next, i++) { 496168404Spjd if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 497168404Spjd return (fbuf); 498168404Spjd } 499168404Spjd 500168404Spjd buf->b_hash_next = buf_hash_table.ht_table[idx]; 501168404Spjd buf_hash_table.ht_table[idx] = buf; 502168404Spjd buf->b_flags |= ARC_IN_HASH_TABLE; 503168404Spjd 504168404Spjd /* collect some hash table performance data */ 505168404Spjd if (i > 0) { 506168404Spjd ARCSTAT_BUMP(arcstat_hash_collisions); 507168404Spjd if (i == 1) 508168404Spjd ARCSTAT_BUMP(arcstat_hash_chains); 509168404Spjd 510168404Spjd ARCSTAT_MAX(arcstat_hash_chain_max, i); 511168404Spjd } 512168404Spjd 513168404Spjd ARCSTAT_BUMP(arcstat_hash_elements); 514168404Spjd ARCSTAT_MAXSTAT(arcstat_hash_elements); 515168404Spjd 516168404Spjd return (NULL); 517168404Spjd} 518168404Spjd 519168404Spjdstatic void 520168404Spjdbuf_hash_remove(arc_buf_hdr_t *buf) 521168404Spjd{ 522168404Spjd arc_buf_hdr_t *fbuf, **bufp; 523168404Spjd uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 524168404Spjd 525168404Spjd ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 526168404Spjd ASSERT(HDR_IN_HASH_TABLE(buf)); 527168404Spjd 528168404Spjd bufp = &buf_hash_table.ht_table[idx]; 529168404Spjd while ((fbuf = *bufp) != buf) { 530168404Spjd ASSERT(fbuf != NULL); 531168404Spjd bufp = &fbuf->b_hash_next; 532168404Spjd } 533168404Spjd *bufp = buf->b_hash_next; 534168404Spjd buf->b_hash_next = NULL; 535168404Spjd buf->b_flags &= ~ARC_IN_HASH_TABLE; 536168404Spjd 537168404Spjd /* collect some hash table performance data */ 538168404Spjd ARCSTAT_BUMPDOWN(arcstat_hash_elements); 539168404Spjd 540168404Spjd if (buf_hash_table.ht_table[idx] && 541168404Spjd buf_hash_table.ht_table[idx]->b_hash_next == NULL) 542168404Spjd ARCSTAT_BUMPDOWN(arcstat_hash_chains); 543168404Spjd} 544168404Spjd 545168404Spjd/* 546168404Spjd * Global data structures and functions for the buf kmem cache. 547168404Spjd */ 548168404Spjdstatic kmem_cache_t *hdr_cache; 549168404Spjdstatic kmem_cache_t *buf_cache; 550168404Spjd 551168404Spjdstatic void 552168404Spjdbuf_fini(void) 553168404Spjd{ 554168404Spjd int i; 555168404Spjd 556168404Spjd kmem_free(buf_hash_table.ht_table, 557168404Spjd (buf_hash_table.ht_mask + 1) * sizeof (void *)); 558168404Spjd for (i = 0; i < BUF_LOCKS; i++) 559168404Spjd mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 560168404Spjd kmem_cache_destroy(hdr_cache); 561168404Spjd kmem_cache_destroy(buf_cache); 562168404Spjd} 563168404Spjd 564168404Spjd/* 565168404Spjd * Constructor callback - called when the cache is empty 566168404Spjd * and a new buf is requested. 567168404Spjd */ 568168404Spjd/* ARGSUSED */ 569168404Spjdstatic int 570168404Spjdhdr_cons(void *vbuf, void *unused, int kmflag) 571168404Spjd{ 572168404Spjd arc_buf_hdr_t *buf = vbuf; 573168404Spjd 574168404Spjd bzero(buf, sizeof (arc_buf_hdr_t)); 575168404Spjd refcount_create(&buf->b_refcnt); 576168404Spjd cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 577168404Spjd return (0); 578168404Spjd} 579168404Spjd 580168404Spjd/* 581168404Spjd * Destructor callback - called when a cached buf is 582168404Spjd * no longer required. 583168404Spjd */ 584168404Spjd/* ARGSUSED */ 585168404Spjdstatic void 586168404Spjdhdr_dest(void *vbuf, void *unused) 587168404Spjd{ 588168404Spjd arc_buf_hdr_t *buf = vbuf; 589168404Spjd 590168404Spjd refcount_destroy(&buf->b_refcnt); 591168404Spjd cv_destroy(&buf->b_cv); 592168404Spjd} 593168404Spjd 594168404Spjd/* 595168404Spjd * Reclaim callback -- invoked when memory is low. 596168404Spjd */ 597168404Spjd/* ARGSUSED */ 598168404Spjdstatic void 599168404Spjdhdr_recl(void *unused) 600168404Spjd{ 601168404Spjd dprintf("hdr_recl called\n"); 602168404Spjd /* 603168404Spjd * umem calls the reclaim func when we destroy the buf cache, 604168404Spjd * which is after we do arc_fini(). 605168404Spjd */ 606168404Spjd if (!arc_dead) 607168404Spjd cv_signal(&arc_reclaim_thr_cv); 608168404Spjd} 609168404Spjd 610168404Spjdstatic void 611168404Spjdbuf_init(void) 612168404Spjd{ 613168404Spjd uint64_t *ct; 614168404Spjd uint64_t hsize = 1ULL << 12; 615168404Spjd int i, j; 616168404Spjd 617168404Spjd /* 618168404Spjd * The hash table is big enough to fill all of physical memory 619168404Spjd * with an average 64K block size. The table will take up 620168404Spjd * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 621168404Spjd */ 622168404Spjd while (hsize * 65536 < physmem * PAGESIZE) 623168404Spjd hsize <<= 1; 624168404Spjdretry: 625168404Spjd buf_hash_table.ht_mask = hsize - 1; 626168404Spjd buf_hash_table.ht_table = 627168404Spjd kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 628168404Spjd if (buf_hash_table.ht_table == NULL) { 629168404Spjd ASSERT(hsize > (1ULL << 8)); 630168404Spjd hsize >>= 1; 631168404Spjd goto retry; 632168404Spjd } 633168404Spjd 634168404Spjd hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 635168404Spjd 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 636168404Spjd buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 637168404Spjd 0, NULL, NULL, NULL, NULL, NULL, 0); 638168404Spjd 639168404Spjd for (i = 0; i < 256; i++) 640168404Spjd for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 641168404Spjd *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 642168404Spjd 643168404Spjd for (i = 0; i < BUF_LOCKS; i++) { 644168404Spjd mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 645168404Spjd NULL, MUTEX_DEFAULT, NULL); 646168404Spjd } 647168404Spjd} 648168404Spjd 649168404Spjd#define ARC_MINTIME (hz>>4) /* 62 ms */ 650168404Spjd 651168404Spjdstatic void 652168404Spjdarc_cksum_verify(arc_buf_t *buf) 653168404Spjd{ 654168404Spjd zio_cksum_t zc; 655168404Spjd 656168404Spjd if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 657168404Spjd return; 658168404Spjd 659168404Spjd mutex_enter(&buf->b_hdr->b_freeze_lock); 660168404Spjd if (buf->b_hdr->b_freeze_cksum == NULL || 661168404Spjd (buf->b_hdr->b_flags & ARC_IO_ERROR)) { 662168404Spjd mutex_exit(&buf->b_hdr->b_freeze_lock); 663168404Spjd return; 664168404Spjd } 665168404Spjd fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 666168404Spjd if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 667168404Spjd panic("buffer modified while frozen!"); 668168404Spjd mutex_exit(&buf->b_hdr->b_freeze_lock); 669168404Spjd} 670168404Spjd 671168404Spjdstatic void 672168404Spjdarc_cksum_compute(arc_buf_t *buf) 673168404Spjd{ 674168404Spjd if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 675168404Spjd return; 676168404Spjd 677168404Spjd mutex_enter(&buf->b_hdr->b_freeze_lock); 678168404Spjd if (buf->b_hdr->b_freeze_cksum != NULL) { 679168404Spjd mutex_exit(&buf->b_hdr->b_freeze_lock); 680168404Spjd return; 681168404Spjd } 682168404Spjd buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 683168404Spjd fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 684168404Spjd buf->b_hdr->b_freeze_cksum); 685168404Spjd mutex_exit(&buf->b_hdr->b_freeze_lock); 686168404Spjd} 687168404Spjd 688168404Spjdvoid 689168404Spjdarc_buf_thaw(arc_buf_t *buf) 690168404Spjd{ 691168404Spjd if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 692168404Spjd return; 693168404Spjd 694168404Spjd if (buf->b_hdr->b_state != arc_anon) 695168404Spjd panic("modifying non-anon buffer!"); 696168404Spjd if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) 697168404Spjd panic("modifying buffer while i/o in progress!"); 698168404Spjd arc_cksum_verify(buf); 699168404Spjd mutex_enter(&buf->b_hdr->b_freeze_lock); 700168404Spjd if (buf->b_hdr->b_freeze_cksum != NULL) { 701168404Spjd kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 702168404Spjd buf->b_hdr->b_freeze_cksum = NULL; 703168404Spjd } 704168404Spjd mutex_exit(&buf->b_hdr->b_freeze_lock); 705168404Spjd} 706168404Spjd 707168404Spjdvoid 708168404Spjdarc_buf_freeze(arc_buf_t *buf) 709168404Spjd{ 710168404Spjd if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 711168404Spjd return; 712168404Spjd 713168404Spjd ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 714168404Spjd buf->b_hdr->b_state == arc_anon); 715168404Spjd arc_cksum_compute(buf); 716168404Spjd} 717168404Spjd 718168404Spjdstatic void 719168404Spjdadd_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 720168404Spjd{ 721168404Spjd ASSERT(MUTEX_HELD(hash_lock)); 722168404Spjd 723168404Spjd if ((refcount_add(&ab->b_refcnt, tag) == 1) && 724168404Spjd (ab->b_state != arc_anon)) { 725168404Spjd uint64_t delta = ab->b_size * ab->b_datacnt; 726168404Spjd 727168404Spjd ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx)); 728168404Spjd mutex_enter(&ab->b_state->arcs_mtx); 729168404Spjd ASSERT(list_link_active(&ab->b_arc_node)); 730168404Spjd list_remove(&ab->b_state->arcs_list, ab); 731168404Spjd if (GHOST_STATE(ab->b_state)) { 732168404Spjd ASSERT3U(ab->b_datacnt, ==, 0); 733168404Spjd ASSERT3P(ab->b_buf, ==, NULL); 734168404Spjd delta = ab->b_size; 735168404Spjd } 736168404Spjd ASSERT(delta > 0); 737168404Spjd ASSERT3U(ab->b_state->arcs_lsize, >=, delta); 738168404Spjd atomic_add_64(&ab->b_state->arcs_lsize, -delta); 739168404Spjd mutex_exit(&ab->b_state->arcs_mtx); 740168404Spjd /* remove the prefetch flag is we get a reference */ 741168404Spjd if (ab->b_flags & ARC_PREFETCH) 742168404Spjd ab->b_flags &= ~ARC_PREFETCH; 743168404Spjd } 744168404Spjd} 745168404Spjd 746168404Spjdstatic int 747168404Spjdremove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 748168404Spjd{ 749168404Spjd int cnt; 750168404Spjd arc_state_t *state = ab->b_state; 751168404Spjd 752168404Spjd ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 753168404Spjd ASSERT(!GHOST_STATE(state)); 754168404Spjd 755168404Spjd if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 756168404Spjd (state != arc_anon)) { 757168404Spjd ASSERT(!MUTEX_HELD(&state->arcs_mtx)); 758168404Spjd mutex_enter(&state->arcs_mtx); 759168404Spjd ASSERT(!list_link_active(&ab->b_arc_node)); 760168404Spjd list_insert_head(&state->arcs_list, ab); 761168404Spjd ASSERT(ab->b_datacnt > 0); 762168404Spjd atomic_add_64(&state->arcs_lsize, ab->b_size * ab->b_datacnt); 763168404Spjd ASSERT3U(state->arcs_size, >=, state->arcs_lsize); 764168404Spjd mutex_exit(&state->arcs_mtx); 765168404Spjd } 766168404Spjd return (cnt); 767168404Spjd} 768168404Spjd 769168404Spjd/* 770168404Spjd * Move the supplied buffer to the indicated state. The mutex 771168404Spjd * for the buffer must be held by the caller. 772168404Spjd */ 773168404Spjdstatic void 774168404Spjdarc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 775168404Spjd{ 776168404Spjd arc_state_t *old_state = ab->b_state; 777168404Spjd int64_t refcnt = refcount_count(&ab->b_refcnt); 778168404Spjd uint64_t from_delta, to_delta; 779168404Spjd 780168404Spjd ASSERT(MUTEX_HELD(hash_lock)); 781168404Spjd ASSERT(new_state != old_state); 782168404Spjd ASSERT(refcnt == 0 || ab->b_datacnt > 0); 783168404Spjd ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 784168404Spjd 785168404Spjd from_delta = to_delta = ab->b_datacnt * ab->b_size; 786168404Spjd 787168404Spjd /* 788168404Spjd * If this buffer is evictable, transfer it from the 789168404Spjd * old state list to the new state list. 790168404Spjd */ 791168404Spjd if (refcnt == 0) { 792168404Spjd if (old_state != arc_anon) { 793168404Spjd int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx); 794168404Spjd 795168404Spjd if (use_mutex) 796168404Spjd mutex_enter(&old_state->arcs_mtx); 797168404Spjd 798168404Spjd ASSERT(list_link_active(&ab->b_arc_node)); 799168404Spjd list_remove(&old_state->arcs_list, ab); 800168404Spjd 801168404Spjd /* 802168404Spjd * If prefetching out of the ghost cache, 803168404Spjd * we will have a non-null datacnt. 804168404Spjd */ 805168404Spjd if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 806168404Spjd /* ghost elements have a ghost size */ 807168404Spjd ASSERT(ab->b_buf == NULL); 808168404Spjd from_delta = ab->b_size; 809168404Spjd } 810168404Spjd ASSERT3U(old_state->arcs_lsize, >=, from_delta); 811168404Spjd atomic_add_64(&old_state->arcs_lsize, -from_delta); 812168404Spjd 813168404Spjd if (use_mutex) 814168404Spjd mutex_exit(&old_state->arcs_mtx); 815168404Spjd } 816168404Spjd if (new_state != arc_anon) { 817168404Spjd int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx); 818168404Spjd 819168404Spjd if (use_mutex) 820168404Spjd mutex_enter(&new_state->arcs_mtx); 821168404Spjd 822168404Spjd list_insert_head(&new_state->arcs_list, ab); 823168404Spjd 824168404Spjd /* ghost elements have a ghost size */ 825168404Spjd if (GHOST_STATE(new_state)) { 826168404Spjd ASSERT(ab->b_datacnt == 0); 827168404Spjd ASSERT(ab->b_buf == NULL); 828168404Spjd to_delta = ab->b_size; 829168404Spjd } 830168404Spjd atomic_add_64(&new_state->arcs_lsize, to_delta); 831168404Spjd ASSERT3U(new_state->arcs_size + to_delta, >=, 832168404Spjd new_state->arcs_lsize); 833168404Spjd 834168404Spjd if (use_mutex) 835168404Spjd mutex_exit(&new_state->arcs_mtx); 836168404Spjd } 837168404Spjd } 838168404Spjd 839168404Spjd ASSERT(!BUF_EMPTY(ab)); 840168404Spjd if (new_state == arc_anon && old_state != arc_anon) { 841168404Spjd buf_hash_remove(ab); 842168404Spjd } 843168404Spjd 844168404Spjd /* adjust state sizes */ 845168404Spjd if (to_delta) 846168404Spjd atomic_add_64(&new_state->arcs_size, to_delta); 847168404Spjd if (from_delta) { 848168404Spjd ASSERT3U(old_state->arcs_size, >=, from_delta); 849168404Spjd atomic_add_64(&old_state->arcs_size, -from_delta); 850168404Spjd } 851168404Spjd ab->b_state = new_state; 852168404Spjd} 853168404Spjd 854168404Spjdarc_buf_t * 855168404Spjdarc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) 856168404Spjd{ 857168404Spjd arc_buf_hdr_t *hdr; 858168404Spjd arc_buf_t *buf; 859168404Spjd 860168404Spjd ASSERT3U(size, >, 0); 861168404Spjd hdr = kmem_cache_alloc(hdr_cache, KM_SLEEP); 862168404Spjd ASSERT(BUF_EMPTY(hdr)); 863168404Spjd hdr->b_size = size; 864168404Spjd hdr->b_type = type; 865168404Spjd hdr->b_spa = spa; 866168404Spjd hdr->b_state = arc_anon; 867168404Spjd hdr->b_arc_access = 0; 868168404Spjd mutex_init(&hdr->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 869168404Spjd buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 870168404Spjd buf->b_hdr = hdr; 871168404Spjd buf->b_data = NULL; 872168404Spjd buf->b_efunc = NULL; 873168404Spjd buf->b_private = NULL; 874168404Spjd buf->b_next = NULL; 875168404Spjd hdr->b_buf = buf; 876168404Spjd arc_get_data_buf(buf); 877168404Spjd hdr->b_datacnt = 1; 878168404Spjd hdr->b_flags = 0; 879168404Spjd ASSERT(refcount_is_zero(&hdr->b_refcnt)); 880168404Spjd (void) refcount_add(&hdr->b_refcnt, tag); 881168404Spjd 882168404Spjd return (buf); 883168404Spjd} 884168404Spjd 885168404Spjdstatic arc_buf_t * 886168404Spjdarc_buf_clone(arc_buf_t *from) 887168404Spjd{ 888168404Spjd arc_buf_t *buf; 889168404Spjd arc_buf_hdr_t *hdr = from->b_hdr; 890168404Spjd uint64_t size = hdr->b_size; 891168404Spjd 892168404Spjd buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 893168404Spjd buf->b_hdr = hdr; 894168404Spjd buf->b_data = NULL; 895168404Spjd buf->b_efunc = NULL; 896168404Spjd buf->b_private = NULL; 897168404Spjd buf->b_next = hdr->b_buf; 898168404Spjd hdr->b_buf = buf; 899168404Spjd arc_get_data_buf(buf); 900168404Spjd bcopy(from->b_data, buf->b_data, size); 901168404Spjd hdr->b_datacnt += 1; 902168404Spjd return (buf); 903168404Spjd} 904168404Spjd 905168404Spjdvoid 906168404Spjdarc_buf_add_ref(arc_buf_t *buf, void* tag) 907168404Spjd{ 908168404Spjd arc_buf_hdr_t *hdr; 909168404Spjd kmutex_t *hash_lock; 910168404Spjd 911168404Spjd /* 912168404Spjd * Check to see if this buffer is currently being evicted via 913168404Spjd * arc_do_user_evicts(). 914168404Spjd */ 915168404Spjd mutex_enter(&arc_eviction_mtx); 916168404Spjd hdr = buf->b_hdr; 917168404Spjd if (hdr == NULL) { 918168404Spjd mutex_exit(&arc_eviction_mtx); 919168404Spjd return; 920168404Spjd } 921168404Spjd hash_lock = HDR_LOCK(hdr); 922168404Spjd mutex_exit(&arc_eviction_mtx); 923168404Spjd 924168404Spjd mutex_enter(hash_lock); 925168404Spjd if (buf->b_data == NULL) { 926168404Spjd /* 927168404Spjd * This buffer is evicted. 928168404Spjd */ 929168404Spjd mutex_exit(hash_lock); 930168404Spjd return; 931168404Spjd } 932168404Spjd 933168404Spjd ASSERT(buf->b_hdr == hdr); 934168404Spjd ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 935168404Spjd add_reference(hdr, hash_lock, tag); 936168404Spjd arc_access(hdr, hash_lock); 937168404Spjd mutex_exit(hash_lock); 938168404Spjd ARCSTAT_BUMP(arcstat_hits); 939168404Spjd ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 940168404Spjd demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 941168404Spjd data, metadata, hits); 942168404Spjd} 943168404Spjd 944168404Spjdstatic void 945168404Spjdarc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 946168404Spjd{ 947168404Spjd arc_buf_t **bufp; 948168404Spjd 949168404Spjd /* free up data associated with the buf */ 950168404Spjd if (buf->b_data) { 951168404Spjd arc_state_t *state = buf->b_hdr->b_state; 952168404Spjd uint64_t size = buf->b_hdr->b_size; 953168404Spjd arc_buf_contents_t type = buf->b_hdr->b_type; 954168404Spjd 955168404Spjd arc_cksum_verify(buf); 956168404Spjd if (!recycle) { 957168404Spjd if (type == ARC_BUFC_METADATA) { 958168404Spjd zio_buf_free(buf->b_data, size); 959168404Spjd } else { 960168404Spjd ASSERT(type == ARC_BUFC_DATA); 961168404Spjd zio_data_buf_free(buf->b_data, size); 962168404Spjd } 963168404Spjd atomic_add_64(&arc_size, -size); 964168404Spjd } 965168404Spjd if (list_link_active(&buf->b_hdr->b_arc_node)) { 966168404Spjd ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 967168404Spjd ASSERT(state != arc_anon); 968168404Spjd ASSERT3U(state->arcs_lsize, >=, size); 969168404Spjd atomic_add_64(&state->arcs_lsize, -size); 970168404Spjd } 971168404Spjd ASSERT3U(state->arcs_size, >=, size); 972168404Spjd atomic_add_64(&state->arcs_size, -size); 973168404Spjd buf->b_data = NULL; 974168404Spjd ASSERT(buf->b_hdr->b_datacnt > 0); 975168404Spjd buf->b_hdr->b_datacnt -= 1; 976168404Spjd } 977168404Spjd 978168404Spjd /* only remove the buf if requested */ 979168404Spjd if (!all) 980168404Spjd return; 981168404Spjd 982168404Spjd /* remove the buf from the hdr list */ 983168404Spjd for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 984168404Spjd continue; 985168404Spjd *bufp = buf->b_next; 986168404Spjd 987168404Spjd ASSERT(buf->b_efunc == NULL); 988168404Spjd 989168404Spjd /* clean up the buf */ 990168404Spjd buf->b_hdr = NULL; 991168404Spjd kmem_cache_free(buf_cache, buf); 992168404Spjd} 993168404Spjd 994168404Spjdstatic void 995168404Spjdarc_hdr_destroy(arc_buf_hdr_t *hdr) 996168404Spjd{ 997168404Spjd ASSERT(refcount_is_zero(&hdr->b_refcnt)); 998168404Spjd ASSERT3P(hdr->b_state, ==, arc_anon); 999168404Spjd ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1000168404Spjd 1001168404Spjd if (!BUF_EMPTY(hdr)) { 1002168404Spjd ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1003168404Spjd bzero(&hdr->b_dva, sizeof (dva_t)); 1004168404Spjd hdr->b_birth = 0; 1005168404Spjd hdr->b_cksum0 = 0; 1006168404Spjd } 1007168404Spjd while (hdr->b_buf) { 1008168404Spjd arc_buf_t *buf = hdr->b_buf; 1009168404Spjd 1010168404Spjd if (buf->b_efunc) { 1011168404Spjd mutex_enter(&arc_eviction_mtx); 1012168404Spjd ASSERT(buf->b_hdr != NULL); 1013168404Spjd arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 1014168404Spjd hdr->b_buf = buf->b_next; 1015168404Spjd buf->b_hdr = &arc_eviction_hdr; 1016168404Spjd buf->b_next = arc_eviction_list; 1017168404Spjd arc_eviction_list = buf; 1018168404Spjd mutex_exit(&arc_eviction_mtx); 1019168404Spjd } else { 1020168404Spjd arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 1021168404Spjd } 1022168404Spjd } 1023168404Spjd if (hdr->b_freeze_cksum != NULL) { 1024168404Spjd kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1025168404Spjd hdr->b_freeze_cksum = NULL; 1026168404Spjd } 1027168404Spjd mutex_destroy(&hdr->b_freeze_lock); 1028168404Spjd 1029168404Spjd ASSERT(!list_link_active(&hdr->b_arc_node)); 1030168404Spjd ASSERT3P(hdr->b_hash_next, ==, NULL); 1031168404Spjd ASSERT3P(hdr->b_acb, ==, NULL); 1032168404Spjd kmem_cache_free(hdr_cache, hdr); 1033168404Spjd} 1034168404Spjd 1035168404Spjdvoid 1036168404Spjdarc_buf_free(arc_buf_t *buf, void *tag) 1037168404Spjd{ 1038168404Spjd arc_buf_hdr_t *hdr = buf->b_hdr; 1039168404Spjd int hashed = hdr->b_state != arc_anon; 1040168404Spjd 1041168404Spjd ASSERT(buf->b_efunc == NULL); 1042168404Spjd ASSERT(buf->b_data != NULL); 1043168404Spjd 1044168404Spjd if (hashed) { 1045168404Spjd kmutex_t *hash_lock = HDR_LOCK(hdr); 1046168404Spjd 1047168404Spjd mutex_enter(hash_lock); 1048168404Spjd (void) remove_reference(hdr, hash_lock, tag); 1049168404Spjd if (hdr->b_datacnt > 1) 1050168404Spjd arc_buf_destroy(buf, FALSE, TRUE); 1051168404Spjd else 1052168404Spjd hdr->b_flags |= ARC_BUF_AVAILABLE; 1053168404Spjd mutex_exit(hash_lock); 1054168404Spjd } else if (HDR_IO_IN_PROGRESS(hdr)) { 1055168404Spjd int destroy_hdr; 1056168404Spjd /* 1057168404Spjd * We are in the middle of an async write. Don't destroy 1058168404Spjd * this buffer unless the write completes before we finish 1059168404Spjd * decrementing the reference count. 1060168404Spjd */ 1061168404Spjd mutex_enter(&arc_eviction_mtx); 1062168404Spjd (void) remove_reference(hdr, NULL, tag); 1063168404Spjd ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1064168404Spjd destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 1065168404Spjd mutex_exit(&arc_eviction_mtx); 1066168404Spjd if (destroy_hdr) 1067168404Spjd arc_hdr_destroy(hdr); 1068168404Spjd } else { 1069168404Spjd if (remove_reference(hdr, NULL, tag) > 0) { 1070168404Spjd ASSERT(HDR_IO_ERROR(hdr)); 1071168404Spjd arc_buf_destroy(buf, FALSE, TRUE); 1072168404Spjd } else { 1073168404Spjd arc_hdr_destroy(hdr); 1074168404Spjd } 1075168404Spjd } 1076168404Spjd} 1077168404Spjd 1078168404Spjdint 1079168404Spjdarc_buf_remove_ref(arc_buf_t *buf, void* tag) 1080168404Spjd{ 1081168404Spjd arc_buf_hdr_t *hdr = buf->b_hdr; 1082168404Spjd kmutex_t *hash_lock = HDR_LOCK(hdr); 1083168404Spjd int no_callback = (buf->b_efunc == NULL); 1084168404Spjd 1085168404Spjd if (hdr->b_state == arc_anon) { 1086168404Spjd arc_buf_free(buf, tag); 1087168404Spjd return (no_callback); 1088168404Spjd } 1089168404Spjd 1090168404Spjd mutex_enter(hash_lock); 1091168404Spjd ASSERT(hdr->b_state != arc_anon); 1092168404Spjd ASSERT(buf->b_data != NULL); 1093168404Spjd 1094168404Spjd (void) remove_reference(hdr, hash_lock, tag); 1095168404Spjd if (hdr->b_datacnt > 1) { 1096168404Spjd if (no_callback) 1097168404Spjd arc_buf_destroy(buf, FALSE, TRUE); 1098168404Spjd } else if (no_callback) { 1099168404Spjd ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 1100168404Spjd hdr->b_flags |= ARC_BUF_AVAILABLE; 1101168404Spjd } 1102168404Spjd ASSERT(no_callback || hdr->b_datacnt > 1 || 1103168404Spjd refcount_is_zero(&hdr->b_refcnt)); 1104168404Spjd mutex_exit(hash_lock); 1105168404Spjd return (no_callback); 1106168404Spjd} 1107168404Spjd 1108168404Spjdint 1109168404Spjdarc_buf_size(arc_buf_t *buf) 1110168404Spjd{ 1111168404Spjd return (buf->b_hdr->b_size); 1112168404Spjd} 1113168404Spjd 1114168404Spjd/* 1115168404Spjd * Evict buffers from list until we've removed the specified number of 1116168404Spjd * bytes. Move the removed buffers to the appropriate evict state. 1117168404Spjd * If the recycle flag is set, then attempt to "recycle" a buffer: 1118168404Spjd * - look for a buffer to evict that is `bytes' long. 1119168404Spjd * - return the data block from this buffer rather than freeing it. 1120168404Spjd * This flag is used by callers that are trying to make space for a 1121168404Spjd * new buffer in a full arc cache. 1122168404Spjd */ 1123168404Spjdstatic void * 1124168404Spjdarc_evict(arc_state_t *state, int64_t bytes, boolean_t recycle, 1125168404Spjd arc_buf_contents_t type) 1126168404Spjd{ 1127168404Spjd arc_state_t *evicted_state; 1128168404Spjd uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 1129168404Spjd arc_buf_hdr_t *ab, *ab_prev = NULL; 1130168404Spjd kmutex_t *hash_lock; 1131168404Spjd boolean_t have_lock; 1132168404Spjd void *stolen = NULL; 1133168404Spjd 1134168404Spjd ASSERT(state == arc_mru || state == arc_mfu); 1135168404Spjd 1136168404Spjd evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 1137168404Spjd 1138168404Spjd mutex_enter(&state->arcs_mtx); 1139168404Spjd mutex_enter(&evicted_state->arcs_mtx); 1140168404Spjd 1141168404Spjd for (ab = list_tail(&state->arcs_list); ab; ab = ab_prev) { 1142168404Spjd ab_prev = list_prev(&state->arcs_list, ab); 1143168404Spjd /* prefetch buffers have a minimum lifespan */ 1144168404Spjd if (HDR_IO_IN_PROGRESS(ab) || 1145168404Spjd (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 1146168404Spjd lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) { 1147168404Spjd skipped++; 1148168404Spjd continue; 1149168404Spjd } 1150168404Spjd /* "lookahead" for better eviction candidate */ 1151168404Spjd if (recycle && ab->b_size != bytes && 1152168404Spjd ab_prev && ab_prev->b_size == bytes) 1153168404Spjd continue; 1154168404Spjd hash_lock = HDR_LOCK(ab); 1155168404Spjd have_lock = MUTEX_HELD(hash_lock); 1156168404Spjd if (have_lock || mutex_tryenter(hash_lock)) { 1157168404Spjd ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0); 1158168404Spjd ASSERT(ab->b_datacnt > 0); 1159168404Spjd while (ab->b_buf) { 1160168404Spjd arc_buf_t *buf = ab->b_buf; 1161168404Spjd if (buf->b_data) { 1162168404Spjd bytes_evicted += ab->b_size; 1163168404Spjd if (recycle && ab->b_type == type && 1164168404Spjd ab->b_size == bytes) { 1165168404Spjd stolen = buf->b_data; 1166168404Spjd recycle = FALSE; 1167168404Spjd } 1168168404Spjd } 1169168404Spjd if (buf->b_efunc) { 1170168404Spjd mutex_enter(&arc_eviction_mtx); 1171168404Spjd arc_buf_destroy(buf, 1172168404Spjd buf->b_data == stolen, FALSE); 1173168404Spjd ab->b_buf = buf->b_next; 1174168404Spjd buf->b_hdr = &arc_eviction_hdr; 1175168404Spjd buf->b_next = arc_eviction_list; 1176168404Spjd arc_eviction_list = buf; 1177168404Spjd mutex_exit(&arc_eviction_mtx); 1178168404Spjd } else { 1179168404Spjd arc_buf_destroy(buf, 1180168404Spjd buf->b_data == stolen, TRUE); 1181168404Spjd } 1182168404Spjd } 1183168404Spjd ASSERT(ab->b_datacnt == 0); 1184168404Spjd arc_change_state(evicted_state, ab, hash_lock); 1185168404Spjd ASSERT(HDR_IN_HASH_TABLE(ab)); 1186168404Spjd ab->b_flags = ARC_IN_HASH_TABLE; 1187168404Spjd DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 1188168404Spjd if (!have_lock) 1189168404Spjd mutex_exit(hash_lock); 1190168404Spjd if (bytes >= 0 && bytes_evicted >= bytes) 1191168404Spjd break; 1192168404Spjd } else { 1193168404Spjd missed += 1; 1194168404Spjd } 1195168404Spjd } 1196168404Spjd 1197168404Spjd mutex_exit(&evicted_state->arcs_mtx); 1198168404Spjd mutex_exit(&state->arcs_mtx); 1199168404Spjd 1200168404Spjd if (bytes_evicted < bytes) 1201168404Spjd dprintf("only evicted %lld bytes from %x", 1202168404Spjd (longlong_t)bytes_evicted, state); 1203168404Spjd 1204168404Spjd if (skipped) 1205168404Spjd ARCSTAT_INCR(arcstat_evict_skip, skipped); 1206168404Spjd 1207168404Spjd if (missed) 1208168404Spjd ARCSTAT_INCR(arcstat_mutex_miss, missed); 1209168404Spjd 1210168404Spjd return (stolen); 1211168404Spjd} 1212168404Spjd 1213168404Spjd/* 1214168404Spjd * Remove buffers from list until we've removed the specified number of 1215168404Spjd * bytes. Destroy the buffers that are removed. 1216168404Spjd */ 1217168404Spjdstatic void 1218168404Spjdarc_evict_ghost(arc_state_t *state, int64_t bytes) 1219168404Spjd{ 1220168404Spjd arc_buf_hdr_t *ab, *ab_prev; 1221168404Spjd kmutex_t *hash_lock; 1222168404Spjd uint64_t bytes_deleted = 0; 1223168404Spjd uint64_t bufs_skipped = 0; 1224168404Spjd 1225168404Spjd ASSERT(GHOST_STATE(state)); 1226168404Spjdtop: 1227168404Spjd mutex_enter(&state->arcs_mtx); 1228168404Spjd for (ab = list_tail(&state->arcs_list); ab; ab = ab_prev) { 1229168404Spjd ab_prev = list_prev(&state->arcs_list, ab); 1230168404Spjd hash_lock = HDR_LOCK(ab); 1231168404Spjd if (mutex_tryenter(hash_lock)) { 1232168404Spjd ASSERT(!HDR_IO_IN_PROGRESS(ab)); 1233168404Spjd ASSERT(ab->b_buf == NULL); 1234168404Spjd arc_change_state(arc_anon, ab, hash_lock); 1235168404Spjd mutex_exit(hash_lock); 1236168404Spjd ARCSTAT_BUMP(arcstat_deleted); 1237168404Spjd bytes_deleted += ab->b_size; 1238168404Spjd arc_hdr_destroy(ab); 1239168404Spjd DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 1240168404Spjd if (bytes >= 0 && bytes_deleted >= bytes) 1241168404Spjd break; 1242168404Spjd } else { 1243168404Spjd if (bytes < 0) { 1244168404Spjd mutex_exit(&state->arcs_mtx); 1245168404Spjd mutex_enter(hash_lock); 1246168404Spjd mutex_exit(hash_lock); 1247168404Spjd goto top; 1248168404Spjd } 1249168404Spjd bufs_skipped += 1; 1250168404Spjd } 1251168404Spjd } 1252168404Spjd mutex_exit(&state->arcs_mtx); 1253168404Spjd 1254168404Spjd if (bufs_skipped) { 1255168404Spjd ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); 1256168404Spjd ASSERT(bytes >= 0); 1257168404Spjd } 1258168404Spjd 1259168404Spjd if (bytes_deleted < bytes) 1260168404Spjd dprintf("only deleted %lld bytes from %p", 1261168404Spjd (longlong_t)bytes_deleted, state); 1262168404Spjd} 1263168404Spjd 1264168404Spjdstatic void 1265168404Spjdarc_adjust(void) 1266168404Spjd{ 1267168404Spjd int64_t top_sz, mru_over, arc_over, todelete; 1268168404Spjd 1269168404Spjd top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1270168404Spjd 1271168404Spjd if (top_sz > arc_p && arc_mru->arcs_lsize > 0) { 1272168404Spjd int64_t toevict = MIN(arc_mru->arcs_lsize, top_sz - arc_p); 1273168404Spjd (void) arc_evict(arc_mru, toevict, FALSE, ARC_BUFC_UNDEF); 1274168404Spjd top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1275168404Spjd } 1276168404Spjd 1277168404Spjd mru_over = top_sz + arc_mru_ghost->arcs_size - arc_c; 1278168404Spjd 1279168404Spjd if (mru_over > 0) { 1280168404Spjd if (arc_mru_ghost->arcs_lsize > 0) { 1281168404Spjd todelete = MIN(arc_mru_ghost->arcs_lsize, mru_over); 1282168404Spjd arc_evict_ghost(arc_mru_ghost, todelete); 1283168404Spjd } 1284168404Spjd } 1285168404Spjd 1286168404Spjd if ((arc_over = arc_size - arc_c) > 0) { 1287168404Spjd int64_t tbl_over; 1288168404Spjd 1289168404Spjd if (arc_mfu->arcs_lsize > 0) { 1290168404Spjd int64_t toevict = MIN(arc_mfu->arcs_lsize, arc_over); 1291168404Spjd (void) arc_evict(arc_mfu, toevict, FALSE, 1292168404Spjd ARC_BUFC_UNDEF); 1293168404Spjd } 1294168404Spjd 1295168404Spjd tbl_over = arc_size + arc_mru_ghost->arcs_lsize + 1296168404Spjd arc_mfu_ghost->arcs_lsize - arc_c*2; 1297168404Spjd 1298168404Spjd if (tbl_over > 0 && arc_mfu_ghost->arcs_lsize > 0) { 1299168404Spjd todelete = MIN(arc_mfu_ghost->arcs_lsize, tbl_over); 1300168404Spjd arc_evict_ghost(arc_mfu_ghost, todelete); 1301168404Spjd } 1302168404Spjd } 1303168404Spjd} 1304168404Spjd 1305168404Spjdstatic void 1306168404Spjdarc_do_user_evicts(void) 1307168404Spjd{ 1308168404Spjd mutex_enter(&arc_eviction_mtx); 1309168404Spjd while (arc_eviction_list != NULL) { 1310168404Spjd arc_buf_t *buf = arc_eviction_list; 1311168404Spjd arc_eviction_list = buf->b_next; 1312168404Spjd buf->b_hdr = NULL; 1313168404Spjd mutex_exit(&arc_eviction_mtx); 1314168404Spjd 1315168404Spjd if (buf->b_efunc != NULL) 1316168404Spjd VERIFY(buf->b_efunc(buf) == 0); 1317168404Spjd 1318168404Spjd buf->b_efunc = NULL; 1319168404Spjd buf->b_private = NULL; 1320168404Spjd kmem_cache_free(buf_cache, buf); 1321168404Spjd mutex_enter(&arc_eviction_mtx); 1322168404Spjd } 1323168404Spjd mutex_exit(&arc_eviction_mtx); 1324168404Spjd} 1325168404Spjd 1326168404Spjd/* 1327168404Spjd * Flush all *evictable* data from the cache. 1328168404Spjd * NOTE: this will not touch "active" (i.e. referenced) data. 1329168404Spjd */ 1330168404Spjdvoid 1331168404Spjdarc_flush(void) 1332168404Spjd{ 1333168404Spjd while (list_head(&arc_mru->arcs_list)) 1334168404Spjd (void) arc_evict(arc_mru, -1, FALSE, ARC_BUFC_UNDEF); 1335168404Spjd while (list_head(&arc_mfu->arcs_list)) 1336168404Spjd (void) arc_evict(arc_mfu, -1, FALSE, ARC_BUFC_UNDEF); 1337168404Spjd 1338168404Spjd arc_evict_ghost(arc_mru_ghost, -1); 1339168404Spjd arc_evict_ghost(arc_mfu_ghost, -1); 1340168404Spjd 1341168404Spjd mutex_enter(&arc_reclaim_thr_lock); 1342168404Spjd arc_do_user_evicts(); 1343168404Spjd mutex_exit(&arc_reclaim_thr_lock); 1344168404Spjd ASSERT(arc_eviction_list == NULL); 1345168404Spjd} 1346168404Spjd 1347168404Spjdint arc_shrink_shift = 5; /* log2(fraction of arc to reclaim) */ 1348168404Spjd 1349168404Spjdvoid 1350168404Spjdarc_shrink(void) 1351168404Spjd{ 1352168404Spjd if (arc_c > arc_c_min) { 1353168404Spjd uint64_t to_free; 1354168404Spjd 1355168404Spjd#ifdef _KERNEL 1356168404Spjd to_free = arc_c >> arc_shrink_shift; 1357168404Spjd#else 1358168404Spjd to_free = arc_c >> arc_shrink_shift; 1359168404Spjd#endif 1360168404Spjd if (arc_c > arc_c_min + to_free) 1361168404Spjd atomic_add_64(&arc_c, -to_free); 1362168404Spjd else 1363168404Spjd arc_c = arc_c_min; 1364168404Spjd 1365168404Spjd atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 1366168404Spjd if (arc_c > arc_size) 1367168404Spjd arc_c = MAX(arc_size, arc_c_min); 1368168404Spjd if (arc_p > arc_c) 1369168404Spjd arc_p = (arc_c >> 1); 1370168404Spjd ASSERT(arc_c >= arc_c_min); 1371168404Spjd ASSERT((int64_t)arc_p >= 0); 1372168404Spjd } 1373168404Spjd 1374168404Spjd if (arc_size > arc_c) 1375168404Spjd arc_adjust(); 1376168404Spjd} 1377168404Spjd 1378168404Spjdstatic int zfs_needfree = 0; 1379168404Spjd 1380168404Spjdstatic int 1381168404Spjdarc_reclaim_needed(void) 1382168404Spjd{ 1383168404Spjd#if 0 1384168404Spjd uint64_t extra; 1385168404Spjd#endif 1386168404Spjd 1387168404Spjd#ifdef _KERNEL 1388168404Spjd 1389168404Spjd if (zfs_needfree) 1390168404Spjd return (1); 1391168404Spjd 1392168404Spjd#if 0 1393168404Spjd /* 1394168404Spjd * check to make sure that swapfs has enough space so that anon 1395168404Spjd * reservations can still succeeed. anon_resvmem() checks that the 1396168404Spjd * availrmem is greater than swapfs_minfree, and the number of reserved 1397168404Spjd * swap pages. We also add a bit of extra here just to prevent 1398168404Spjd * circumstances from getting really dire. 1399168404Spjd */ 1400168404Spjd if (availrmem < swapfs_minfree + swapfs_reserve + extra) 1401168404Spjd return (1); 1402168404Spjd 1403168404Spjd /* 1404168404Spjd * If zio data pages are being allocated out of a separate heap segment, 1405168404Spjd * then check that the size of available vmem for this area remains 1406168404Spjd * above 1/4th free. This needs to be done when the size of the 1407168404Spjd * non-default segment is smaller than physical memory, so we could 1408168404Spjd * conceivably run out of VA in that segment before running out of 1409168404Spjd * physical memory. 1410168404Spjd */ 1411168404Spjd if (zio_arena != NULL) { 1412168404Spjd size_t arc_ziosize = 1413168404Spjd btop(vmem_size(zio_arena, VMEM_FREE | VMEM_ALLOC)); 1414168404Spjd 1415168404Spjd if ((physmem > arc_ziosize) && 1416168404Spjd (btop(vmem_size(zio_arena, VMEM_FREE)) < arc_ziosize >> 2)) 1417168404Spjd return (1); 1418168404Spjd } 1419168404Spjd 1420168404Spjd#if defined(__i386) 1421168404Spjd /* 1422168404Spjd * If we're on an i386 platform, it's possible that we'll exhaust the 1423168404Spjd * kernel heap space before we ever run out of available physical 1424168404Spjd * memory. Most checks of the size of the heap_area compare against 1425168404Spjd * tune.t_minarmem, which is the minimum available real memory that we 1426168404Spjd * can have in the system. However, this is generally fixed at 25 pages 1427168404Spjd * which is so low that it's useless. In this comparison, we seek to 1428168404Spjd * calculate the total heap-size, and reclaim if more than 3/4ths of the 1429168404Spjd * heap is allocated. (Or, in the caclulation, if less than 1/4th is 1430168404Spjd * free) 1431168404Spjd */ 1432168404Spjd if (btop(vmem_size(heap_arena, VMEM_FREE)) < 1433168404Spjd (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) 1434168404Spjd return (1); 1435168404Spjd#endif 1436168404Spjd#else 1437168404Spjd if (kmem_map->size > (vm_kmem_size * 3) / 4) 1438168404Spjd return (1); 1439168404Spjd#endif 1440168404Spjd 1441168404Spjd#else 1442168404Spjd if (spa_get_random(100) == 0) 1443168404Spjd return (1); 1444168404Spjd#endif 1445168404Spjd return (0); 1446168404Spjd} 1447168404Spjd 1448168404Spjdstatic void 1449168404Spjdarc_kmem_reap_now(arc_reclaim_strategy_t strat) 1450168404Spjd{ 1451168404Spjd#ifdef ZIO_USE_UMA 1452168404Spjd size_t i; 1453168404Spjd kmem_cache_t *prev_cache = NULL; 1454168404Spjd kmem_cache_t *prev_data_cache = NULL; 1455168404Spjd extern kmem_cache_t *zio_buf_cache[]; 1456168404Spjd extern kmem_cache_t *zio_data_buf_cache[]; 1457168404Spjd#endif 1458168404Spjd 1459168404Spjd#ifdef _KERNEL 1460168404Spjd /* 1461168404Spjd * First purge some DNLC entries, in case the DNLC is using 1462168404Spjd * up too much memory. 1463168404Spjd */ 1464168404Spjd dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 1465168404Spjd 1466168404Spjd#if defined(__i386) 1467168404Spjd /* 1468168404Spjd * Reclaim unused memory from all kmem caches. 1469168404Spjd */ 1470168404Spjd kmem_reap(); 1471168404Spjd#endif 1472168404Spjd#endif 1473168404Spjd 1474168404Spjd /* 1475168404Spjd * An agressive reclamation will shrink the cache size as well as 1476168404Spjd * reap free buffers from the arc kmem caches. 1477168404Spjd */ 1478168404Spjd if (strat == ARC_RECLAIM_AGGR) 1479168404Spjd arc_shrink(); 1480168404Spjd 1481168404Spjd#ifdef ZIO_USE_UMA 1482168404Spjd for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 1483168404Spjd if (zio_buf_cache[i] != prev_cache) { 1484168404Spjd prev_cache = zio_buf_cache[i]; 1485168404Spjd kmem_cache_reap_now(zio_buf_cache[i]); 1486168404Spjd } 1487168404Spjd if (zio_data_buf_cache[i] != prev_data_cache) { 1488168404Spjd prev_data_cache = zio_data_buf_cache[i]; 1489168404Spjd kmem_cache_reap_now(zio_data_buf_cache[i]); 1490168404Spjd } 1491168404Spjd } 1492168404Spjd#endif 1493168404Spjd kmem_cache_reap_now(buf_cache); 1494168404Spjd kmem_cache_reap_now(hdr_cache); 1495168404Spjd} 1496168404Spjd 1497168404Spjdstatic void 1498168404Spjdarc_reclaim_thread(void *dummy __unused) 1499168404Spjd{ 1500168404Spjd clock_t growtime = 0; 1501168404Spjd arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 1502168404Spjd callb_cpr_t cpr; 1503168404Spjd 1504168404Spjd CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 1505168404Spjd 1506168404Spjd mutex_enter(&arc_reclaim_thr_lock); 1507168404Spjd while (arc_thread_exit == 0) { 1508168404Spjd if (arc_reclaim_needed()) { 1509168404Spjd 1510168404Spjd if (arc_no_grow) { 1511168404Spjd if (last_reclaim == ARC_RECLAIM_CONS) { 1512168404Spjd last_reclaim = ARC_RECLAIM_AGGR; 1513168404Spjd } else { 1514168404Spjd last_reclaim = ARC_RECLAIM_CONS; 1515168404Spjd } 1516168404Spjd } else { 1517168404Spjd arc_no_grow = TRUE; 1518168404Spjd last_reclaim = ARC_RECLAIM_AGGR; 1519168404Spjd membar_producer(); 1520168404Spjd } 1521168404Spjd 1522168404Spjd /* reset the growth delay for every reclaim */ 1523168404Spjd growtime = lbolt + (arc_grow_retry * hz); 1524168404Spjd ASSERT(growtime > 0); 1525168404Spjd 1526168404Spjd if (zfs_needfree && last_reclaim == ARC_RECLAIM_CONS) { 1527168404Spjd /* 1528168404Spjd * If zfs_needfree is TRUE our vm_lowmem hook 1529168404Spjd * was called and in that case we must free some 1530168404Spjd * memory, so switch to aggressive mode. 1531168404Spjd */ 1532168404Spjd arc_no_grow = TRUE; 1533168404Spjd last_reclaim = ARC_RECLAIM_AGGR; 1534168404Spjd } 1535168404Spjd arc_kmem_reap_now(last_reclaim); 1536168404Spjd } else if ((growtime > 0) && ((growtime - lbolt) <= 0)) { 1537168404Spjd arc_no_grow = FALSE; 1538168404Spjd } 1539168404Spjd 1540168404Spjd if (zfs_needfree || 1541168404Spjd (2 * arc_c < arc_size + 1542168404Spjd arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size)) 1543168404Spjd arc_adjust(); 1544168404Spjd 1545168404Spjd if (arc_eviction_list != NULL) 1546168404Spjd arc_do_user_evicts(); 1547168404Spjd 1548168404Spjd if (arc_reclaim_needed()) { 1549168404Spjd zfs_needfree = 0; 1550168404Spjd#ifdef _KERNEL 1551168404Spjd wakeup(&zfs_needfree); 1552168404Spjd#endif 1553168404Spjd } 1554168404Spjd 1555168404Spjd /* block until needed, or one second, whichever is shorter */ 1556168404Spjd CALLB_CPR_SAFE_BEGIN(&cpr); 1557168404Spjd (void) cv_timedwait(&arc_reclaim_thr_cv, 1558168404Spjd &arc_reclaim_thr_lock, hz); 1559168404Spjd CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 1560168404Spjd } 1561168404Spjd 1562168404Spjd arc_thread_exit = 0; 1563168404Spjd cv_broadcast(&arc_reclaim_thr_cv); 1564168404Spjd CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 1565168404Spjd thread_exit(); 1566168404Spjd} 1567168404Spjd 1568168404Spjd/* 1569168404Spjd * Adapt arc info given the number of bytes we are trying to add and 1570168404Spjd * the state that we are comming from. This function is only called 1571168404Spjd * when we are adding new content to the cache. 1572168404Spjd */ 1573168404Spjdstatic void 1574168404Spjdarc_adapt(int bytes, arc_state_t *state) 1575168404Spjd{ 1576168404Spjd int mult; 1577168404Spjd 1578168404Spjd ASSERT(bytes > 0); 1579168404Spjd /* 1580168404Spjd * Adapt the target size of the MRU list: 1581168404Spjd * - if we just hit in the MRU ghost list, then increase 1582168404Spjd * the target size of the MRU list. 1583168404Spjd * - if we just hit in the MFU ghost list, then increase 1584168404Spjd * the target size of the MFU list by decreasing the 1585168404Spjd * target size of the MRU list. 1586168404Spjd */ 1587168404Spjd if (state == arc_mru_ghost) { 1588168404Spjd mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? 1589168404Spjd 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); 1590168404Spjd 1591168404Spjd arc_p = MIN(arc_c, arc_p + bytes * mult); 1592168404Spjd } else if (state == arc_mfu_ghost) { 1593168404Spjd mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? 1594168404Spjd 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); 1595168404Spjd 1596168404Spjd arc_p = MAX(0, (int64_t)arc_p - bytes * mult); 1597168404Spjd } 1598168404Spjd ASSERT((int64_t)arc_p >= 0); 1599168404Spjd 1600168404Spjd if (arc_reclaim_needed()) { 1601168404Spjd cv_signal(&arc_reclaim_thr_cv); 1602168404Spjd return; 1603168404Spjd } 1604168404Spjd 1605168404Spjd if (arc_no_grow) 1606168404Spjd return; 1607168404Spjd 1608168404Spjd if (arc_c >= arc_c_max) 1609168404Spjd return; 1610168404Spjd 1611168404Spjd /* 1612168404Spjd * If we're within (2 * maxblocksize) bytes of the target 1613168404Spjd * cache size, increment the target cache size 1614168404Spjd */ 1615168404Spjd if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 1616168404Spjd atomic_add_64(&arc_c, (int64_t)bytes); 1617168404Spjd if (arc_c > arc_c_max) 1618168404Spjd arc_c = arc_c_max; 1619168404Spjd else if (state == arc_anon) 1620168404Spjd atomic_add_64(&arc_p, (int64_t)bytes); 1621168404Spjd if (arc_p > arc_c) 1622168404Spjd arc_p = arc_c; 1623168404Spjd } 1624168404Spjd ASSERT((int64_t)arc_p >= 0); 1625168404Spjd} 1626168404Spjd 1627168404Spjd/* 1628168404Spjd * Check if the cache has reached its limits and eviction is required 1629168404Spjd * prior to insert. 1630168404Spjd */ 1631168404Spjdstatic int 1632168404Spjdarc_evict_needed() 1633168404Spjd{ 1634168404Spjd if (arc_reclaim_needed()) 1635168404Spjd return (1); 1636168404Spjd 1637168404Spjd return (arc_size > arc_c); 1638168404Spjd} 1639168404Spjd 1640168404Spjd/* 1641168404Spjd * The buffer, supplied as the first argument, needs a data block. 1642168404Spjd * So, if we are at cache max, determine which cache should be victimized. 1643168404Spjd * We have the following cases: 1644168404Spjd * 1645168404Spjd * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> 1646168404Spjd * In this situation if we're out of space, but the resident size of the MFU is 1647168404Spjd * under the limit, victimize the MFU cache to satisfy this insertion request. 1648168404Spjd * 1649168404Spjd * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> 1650168404Spjd * Here, we've used up all of the available space for the MRU, so we need to 1651168404Spjd * evict from our own cache instead. Evict from the set of resident MRU 1652168404Spjd * entries. 1653168404Spjd * 1654168404Spjd * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> 1655168404Spjd * c minus p represents the MFU space in the cache, since p is the size of the 1656168404Spjd * cache that is dedicated to the MRU. In this situation there's still space on 1657168404Spjd * the MFU side, so the MRU side needs to be victimized. 1658168404Spjd * 1659168404Spjd * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> 1660168404Spjd * MFU's resident set is consuming more space than it has been allotted. In 1661168404Spjd * this situation, we must victimize our own cache, the MFU, for this insertion. 1662168404Spjd */ 1663168404Spjdstatic void 1664168404Spjdarc_get_data_buf(arc_buf_t *buf) 1665168404Spjd{ 1666168404Spjd arc_state_t *state = buf->b_hdr->b_state; 1667168404Spjd uint64_t size = buf->b_hdr->b_size; 1668168404Spjd arc_buf_contents_t type = buf->b_hdr->b_type; 1669168404Spjd 1670168404Spjd arc_adapt(size, state); 1671168404Spjd 1672168404Spjd /* 1673168404Spjd * We have not yet reached cache maximum size, 1674168404Spjd * just allocate a new buffer. 1675168404Spjd */ 1676168404Spjd if (!arc_evict_needed()) { 1677168404Spjd if (type == ARC_BUFC_METADATA) { 1678168404Spjd buf->b_data = zio_buf_alloc(size); 1679168404Spjd } else { 1680168404Spjd ASSERT(type == ARC_BUFC_DATA); 1681168404Spjd buf->b_data = zio_data_buf_alloc(size); 1682168404Spjd } 1683168404Spjd atomic_add_64(&arc_size, size); 1684168404Spjd goto out; 1685168404Spjd } 1686168404Spjd 1687168404Spjd /* 1688168404Spjd * If we are prefetching from the mfu ghost list, this buffer 1689168404Spjd * will end up on the mru list; so steal space from there. 1690168404Spjd */ 1691168404Spjd if (state == arc_mfu_ghost) 1692168404Spjd state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; 1693168404Spjd else if (state == arc_mru_ghost) 1694168404Spjd state = arc_mru; 1695168404Spjd 1696168404Spjd if (state == arc_mru || state == arc_anon) { 1697168404Spjd uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; 1698168404Spjd state = (arc_p > mru_used) ? arc_mfu : arc_mru; 1699168404Spjd } else { 1700168404Spjd /* MFU cases */ 1701168404Spjd uint64_t mfu_space = arc_c - arc_p; 1702168404Spjd state = (mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; 1703168404Spjd } 1704168404Spjd if ((buf->b_data = arc_evict(state, size, TRUE, type)) == NULL) { 1705168404Spjd if (type == ARC_BUFC_METADATA) { 1706168404Spjd buf->b_data = zio_buf_alloc(size); 1707168404Spjd } else { 1708168404Spjd ASSERT(type == ARC_BUFC_DATA); 1709168404Spjd buf->b_data = zio_data_buf_alloc(size); 1710168404Spjd } 1711168404Spjd atomic_add_64(&arc_size, size); 1712168404Spjd ARCSTAT_BUMP(arcstat_recycle_miss); 1713168404Spjd } 1714168404Spjd ASSERT(buf->b_data != NULL); 1715168404Spjdout: 1716168404Spjd /* 1717168404Spjd * Update the state size. Note that ghost states have a 1718168404Spjd * "ghost size" and so don't need to be updated. 1719168404Spjd */ 1720168404Spjd if (!GHOST_STATE(buf->b_hdr->b_state)) { 1721168404Spjd arc_buf_hdr_t *hdr = buf->b_hdr; 1722168404Spjd 1723168404Spjd atomic_add_64(&hdr->b_state->arcs_size, size); 1724168404Spjd if (list_link_active(&hdr->b_arc_node)) { 1725168404Spjd ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1726168404Spjd atomic_add_64(&hdr->b_state->arcs_lsize, size); 1727168404Spjd } 1728168404Spjd /* 1729168404Spjd * If we are growing the cache, and we are adding anonymous 1730168404Spjd * data, and we have outgrown arc_p, update arc_p 1731168404Spjd */ 1732168404Spjd if (arc_size < arc_c && hdr->b_state == arc_anon && 1733168404Spjd arc_anon->arcs_size + arc_mru->arcs_size > arc_p) 1734168404Spjd arc_p = MIN(arc_c, arc_p + size); 1735168404Spjd } 1736168404Spjd} 1737168404Spjd 1738168404Spjd/* 1739168404Spjd * This routine is called whenever a buffer is accessed. 1740168404Spjd * NOTE: the hash lock is dropped in this function. 1741168404Spjd */ 1742168404Spjdstatic void 1743168404Spjdarc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 1744168404Spjd{ 1745168404Spjd ASSERT(MUTEX_HELD(hash_lock)); 1746168404Spjd 1747168404Spjd if (buf->b_state == arc_anon) { 1748168404Spjd /* 1749168404Spjd * This buffer is not in the cache, and does not 1750168404Spjd * appear in our "ghost" list. Add the new buffer 1751168404Spjd * to the MRU state. 1752168404Spjd */ 1753168404Spjd 1754168404Spjd ASSERT(buf->b_arc_access == 0); 1755168404Spjd buf->b_arc_access = lbolt; 1756168404Spjd DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 1757168404Spjd arc_change_state(arc_mru, buf, hash_lock); 1758168404Spjd 1759168404Spjd } else if (buf->b_state == arc_mru) { 1760168404Spjd /* 1761168404Spjd * If this buffer is here because of a prefetch, then either: 1762168404Spjd * - clear the flag if this is a "referencing" read 1763168404Spjd * (any subsequent access will bump this into the MFU state). 1764168404Spjd * or 1765168404Spjd * - move the buffer to the head of the list if this is 1766168404Spjd * another prefetch (to make it less likely to be evicted). 1767168404Spjd */ 1768168404Spjd if ((buf->b_flags & ARC_PREFETCH) != 0) { 1769168404Spjd if (refcount_count(&buf->b_refcnt) == 0) { 1770168404Spjd ASSERT(list_link_active(&buf->b_arc_node)); 1771168404Spjd mutex_enter(&arc_mru->arcs_mtx); 1772168404Spjd list_remove(&arc_mru->arcs_list, buf); 1773168404Spjd list_insert_head(&arc_mru->arcs_list, buf); 1774168404Spjd mutex_exit(&arc_mru->arcs_mtx); 1775168404Spjd } else { 1776168404Spjd buf->b_flags &= ~ARC_PREFETCH; 1777168404Spjd ARCSTAT_BUMP(arcstat_mru_hits); 1778168404Spjd } 1779168404Spjd buf->b_arc_access = lbolt; 1780168404Spjd return; 1781168404Spjd } 1782168404Spjd 1783168404Spjd /* 1784168404Spjd * This buffer has been "accessed" only once so far, 1785168404Spjd * but it is still in the cache. Move it to the MFU 1786168404Spjd * state. 1787168404Spjd */ 1788168404Spjd if (lbolt > buf->b_arc_access + ARC_MINTIME) { 1789168404Spjd /* 1790168404Spjd * More than 125ms have passed since we 1791168404Spjd * instantiated this buffer. Move it to the 1792168404Spjd * most frequently used state. 1793168404Spjd */ 1794168404Spjd buf->b_arc_access = lbolt; 1795168404Spjd DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 1796168404Spjd arc_change_state(arc_mfu, buf, hash_lock); 1797168404Spjd } 1798168404Spjd ARCSTAT_BUMP(arcstat_mru_hits); 1799168404Spjd } else if (buf->b_state == arc_mru_ghost) { 1800168404Spjd arc_state_t *new_state; 1801168404Spjd /* 1802168404Spjd * This buffer has been "accessed" recently, but 1803168404Spjd * was evicted from the cache. Move it to the 1804168404Spjd * MFU state. 1805168404Spjd */ 1806168404Spjd 1807168404Spjd if (buf->b_flags & ARC_PREFETCH) { 1808168404Spjd new_state = arc_mru; 1809168404Spjd if (refcount_count(&buf->b_refcnt) > 0) 1810168404Spjd buf->b_flags &= ~ARC_PREFETCH; 1811168404Spjd DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 1812168404Spjd } else { 1813168404Spjd new_state = arc_mfu; 1814168404Spjd DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 1815168404Spjd } 1816168404Spjd 1817168404Spjd buf->b_arc_access = lbolt; 1818168404Spjd arc_change_state(new_state, buf, hash_lock); 1819168404Spjd 1820168404Spjd ARCSTAT_BUMP(arcstat_mru_ghost_hits); 1821168404Spjd } else if (buf->b_state == arc_mfu) { 1822168404Spjd /* 1823168404Spjd * This buffer has been accessed more than once and is 1824168404Spjd * still in the cache. Keep it in the MFU state. 1825168404Spjd * 1826168404Spjd * NOTE: an add_reference() that occurred when we did 1827168404Spjd * the arc_read() will have kicked this off the list. 1828168404Spjd * If it was a prefetch, we will explicitly move it to 1829168404Spjd * the head of the list now. 1830168404Spjd */ 1831168404Spjd if ((buf->b_flags & ARC_PREFETCH) != 0) { 1832168404Spjd ASSERT(refcount_count(&buf->b_refcnt) == 0); 1833168404Spjd ASSERT(list_link_active(&buf->b_arc_node)); 1834168404Spjd mutex_enter(&arc_mfu->arcs_mtx); 1835168404Spjd list_remove(&arc_mfu->arcs_list, buf); 1836168404Spjd list_insert_head(&arc_mfu->arcs_list, buf); 1837168404Spjd mutex_exit(&arc_mfu->arcs_mtx); 1838168404Spjd } 1839168404Spjd ARCSTAT_BUMP(arcstat_mfu_hits); 1840168404Spjd buf->b_arc_access = lbolt; 1841168404Spjd } else if (buf->b_state == arc_mfu_ghost) { 1842168404Spjd arc_state_t *new_state = arc_mfu; 1843168404Spjd /* 1844168404Spjd * This buffer has been accessed more than once but has 1845168404Spjd * been evicted from the cache. Move it back to the 1846168404Spjd * MFU state. 1847168404Spjd */ 1848168404Spjd 1849168404Spjd if (buf->b_flags & ARC_PREFETCH) { 1850168404Spjd /* 1851168404Spjd * This is a prefetch access... 1852168404Spjd * move this block back to the MRU state. 1853168404Spjd */ 1854168404Spjd ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0); 1855168404Spjd new_state = arc_mru; 1856168404Spjd } 1857168404Spjd 1858168404Spjd buf->b_arc_access = lbolt; 1859168404Spjd DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 1860168404Spjd arc_change_state(new_state, buf, hash_lock); 1861168404Spjd 1862168404Spjd ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 1863168404Spjd } else { 1864168404Spjd ASSERT(!"invalid arc state"); 1865168404Spjd } 1866168404Spjd} 1867168404Spjd 1868168404Spjd/* a generic arc_done_func_t which you can use */ 1869168404Spjd/* ARGSUSED */ 1870168404Spjdvoid 1871168404Spjdarc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 1872168404Spjd{ 1873168404Spjd bcopy(buf->b_data, arg, buf->b_hdr->b_size); 1874168404Spjd VERIFY(arc_buf_remove_ref(buf, arg) == 1); 1875168404Spjd} 1876168404Spjd 1877168404Spjd/* a generic arc_done_func_t which you can use */ 1878168404Spjdvoid 1879168404Spjdarc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 1880168404Spjd{ 1881168404Spjd arc_buf_t **bufp = arg; 1882168404Spjd if (zio && zio->io_error) { 1883168404Spjd VERIFY(arc_buf_remove_ref(buf, arg) == 1); 1884168404Spjd *bufp = NULL; 1885168404Spjd } else { 1886168404Spjd *bufp = buf; 1887168404Spjd } 1888168404Spjd} 1889168404Spjd 1890168404Spjdstatic void 1891168404Spjdarc_read_done(zio_t *zio) 1892168404Spjd{ 1893168404Spjd arc_buf_hdr_t *hdr, *found; 1894168404Spjd arc_buf_t *buf; 1895168404Spjd arc_buf_t *abuf; /* buffer we're assigning to callback */ 1896168404Spjd kmutex_t *hash_lock; 1897168404Spjd arc_callback_t *callback_list, *acb; 1898168404Spjd int freeable = FALSE; 1899168404Spjd 1900168404Spjd buf = zio->io_private; 1901168404Spjd hdr = buf->b_hdr; 1902168404Spjd 1903168404Spjd /* 1904168404Spjd * The hdr was inserted into hash-table and removed from lists 1905168404Spjd * prior to starting I/O. We should find this header, since 1906168404Spjd * it's in the hash table, and it should be legit since it's 1907168404Spjd * not possible to evict it during the I/O. The only possible 1908168404Spjd * reason for it not to be found is if we were freed during the 1909168404Spjd * read. 1910168404Spjd */ 1911168404Spjd found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth, 1912168404Spjd &hash_lock); 1913168404Spjd 1914168404Spjd ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || 1915168404Spjd (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp)))); 1916168404Spjd 1917168404Spjd /* byteswap if necessary */ 1918168404Spjd callback_list = hdr->b_acb; 1919168404Spjd ASSERT(callback_list != NULL); 1920168404Spjd if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap) 1921168404Spjd callback_list->acb_byteswap(buf->b_data, hdr->b_size); 1922168404Spjd 1923168404Spjd arc_cksum_compute(buf); 1924168404Spjd 1925168404Spjd /* create copies of the data buffer for the callers */ 1926168404Spjd abuf = buf; 1927168404Spjd for (acb = callback_list; acb; acb = acb->acb_next) { 1928168404Spjd if (acb->acb_done) { 1929168404Spjd if (abuf == NULL) 1930168404Spjd abuf = arc_buf_clone(buf); 1931168404Spjd acb->acb_buf = abuf; 1932168404Spjd abuf = NULL; 1933168404Spjd } 1934168404Spjd } 1935168404Spjd hdr->b_acb = NULL; 1936168404Spjd hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 1937168404Spjd ASSERT(!HDR_BUF_AVAILABLE(hdr)); 1938168404Spjd if (abuf == buf) 1939168404Spjd hdr->b_flags |= ARC_BUF_AVAILABLE; 1940168404Spjd 1941168404Spjd ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 1942168404Spjd 1943168404Spjd if (zio->io_error != 0) { 1944168404Spjd hdr->b_flags |= ARC_IO_ERROR; 1945168404Spjd if (hdr->b_state != arc_anon) 1946168404Spjd arc_change_state(arc_anon, hdr, hash_lock); 1947168404Spjd if (HDR_IN_HASH_TABLE(hdr)) 1948168404Spjd buf_hash_remove(hdr); 1949168404Spjd freeable = refcount_is_zero(&hdr->b_refcnt); 1950168404Spjd /* convert checksum errors into IO errors */ 1951168404Spjd if (zio->io_error == ECKSUM) 1952168404Spjd zio->io_error = EIO; 1953168404Spjd } 1954168404Spjd 1955168404Spjd /* 1956168404Spjd * Broadcast before we drop the hash_lock to avoid the possibility 1957168404Spjd * that the hdr (and hence the cv) might be freed before we get to 1958168404Spjd * the cv_broadcast(). 1959168404Spjd */ 1960168404Spjd cv_broadcast(&hdr->b_cv); 1961168404Spjd 1962168404Spjd if (hash_lock) { 1963168404Spjd /* 1964168404Spjd * Only call arc_access on anonymous buffers. This is because 1965168404Spjd * if we've issued an I/O for an evicted buffer, we've already 1966168404Spjd * called arc_access (to prevent any simultaneous readers from 1967168404Spjd * getting confused). 1968168404Spjd */ 1969168404Spjd if (zio->io_error == 0 && hdr->b_state == arc_anon) 1970168404Spjd arc_access(hdr, hash_lock); 1971168404Spjd mutex_exit(hash_lock); 1972168404Spjd } else { 1973168404Spjd /* 1974168404Spjd * This block was freed while we waited for the read to 1975168404Spjd * complete. It has been removed from the hash table and 1976168404Spjd * moved to the anonymous state (so that it won't show up 1977168404Spjd * in the cache). 1978168404Spjd */ 1979168404Spjd ASSERT3P(hdr->b_state, ==, arc_anon); 1980168404Spjd freeable = refcount_is_zero(&hdr->b_refcnt); 1981168404Spjd } 1982168404Spjd 1983168404Spjd /* execute each callback and free its structure */ 1984168404Spjd while ((acb = callback_list) != NULL) { 1985168404Spjd if (acb->acb_done) 1986168404Spjd acb->acb_done(zio, acb->acb_buf, acb->acb_private); 1987168404Spjd 1988168404Spjd if (acb->acb_zio_dummy != NULL) { 1989168404Spjd acb->acb_zio_dummy->io_error = zio->io_error; 1990168404Spjd zio_nowait(acb->acb_zio_dummy); 1991168404Spjd } 1992168404Spjd 1993168404Spjd callback_list = acb->acb_next; 1994168404Spjd kmem_free(acb, sizeof (arc_callback_t)); 1995168404Spjd } 1996168404Spjd 1997168404Spjd if (freeable) 1998168404Spjd arc_hdr_destroy(hdr); 1999168404Spjd} 2000168404Spjd 2001168404Spjd/* 2002168404Spjd * "Read" the block block at the specified DVA (in bp) via the 2003168404Spjd * cache. If the block is found in the cache, invoke the provided 2004168404Spjd * callback immediately and return. Note that the `zio' parameter 2005168404Spjd * in the callback will be NULL in this case, since no IO was 2006168404Spjd * required. If the block is not in the cache pass the read request 2007168404Spjd * on to the spa with a substitute callback function, so that the 2008168404Spjd * requested block will be added to the cache. 2009168404Spjd * 2010168404Spjd * If a read request arrives for a block that has a read in-progress, 2011168404Spjd * either wait for the in-progress read to complete (and return the 2012168404Spjd * results); or, if this is a read with a "done" func, add a record 2013168404Spjd * to the read to invoke the "done" func when the read completes, 2014168404Spjd * and return; or just return. 2015168404Spjd * 2016168404Spjd * arc_read_done() will invoke all the requested "done" functions 2017168404Spjd * for readers of this block. 2018168404Spjd */ 2019168404Spjdint 2020168404Spjdarc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap, 2021168404Spjd arc_done_func_t *done, void *private, int priority, int flags, 2022168404Spjd uint32_t *arc_flags, zbookmark_t *zb) 2023168404Spjd{ 2024168404Spjd arc_buf_hdr_t *hdr; 2025168404Spjd arc_buf_t *buf; 2026168404Spjd kmutex_t *hash_lock; 2027168404Spjd zio_t *rzio; 2028168404Spjd 2029168404Spjdtop: 2030168404Spjd hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 2031168404Spjd if (hdr && hdr->b_datacnt > 0) { 2032168404Spjd 2033168404Spjd *arc_flags |= ARC_CACHED; 2034168404Spjd 2035168404Spjd if (HDR_IO_IN_PROGRESS(hdr)) { 2036168404Spjd 2037168404Spjd if (*arc_flags & ARC_WAIT) { 2038168404Spjd cv_wait(&hdr->b_cv, hash_lock); 2039168404Spjd mutex_exit(hash_lock); 2040168404Spjd goto top; 2041168404Spjd } 2042168404Spjd ASSERT(*arc_flags & ARC_NOWAIT); 2043168404Spjd 2044168404Spjd if (done) { 2045168404Spjd arc_callback_t *acb = NULL; 2046168404Spjd 2047168404Spjd acb = kmem_zalloc(sizeof (arc_callback_t), 2048168404Spjd KM_SLEEP); 2049168404Spjd acb->acb_done = done; 2050168404Spjd acb->acb_private = private; 2051168404Spjd acb->acb_byteswap = swap; 2052168404Spjd if (pio != NULL) 2053168404Spjd acb->acb_zio_dummy = zio_null(pio, 2054168404Spjd spa, NULL, NULL, flags); 2055168404Spjd 2056168404Spjd ASSERT(acb->acb_done != NULL); 2057168404Spjd acb->acb_next = hdr->b_acb; 2058168404Spjd hdr->b_acb = acb; 2059168404Spjd add_reference(hdr, hash_lock, private); 2060168404Spjd mutex_exit(hash_lock); 2061168404Spjd return (0); 2062168404Spjd } 2063168404Spjd mutex_exit(hash_lock); 2064168404Spjd return (0); 2065168404Spjd } 2066168404Spjd 2067168404Spjd ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2068168404Spjd 2069168404Spjd if (done) { 2070168404Spjd add_reference(hdr, hash_lock, private); 2071168404Spjd /* 2072168404Spjd * If this block is already in use, create a new 2073168404Spjd * copy of the data so that we will be guaranteed 2074168404Spjd * that arc_release() will always succeed. 2075168404Spjd */ 2076168404Spjd buf = hdr->b_buf; 2077168404Spjd ASSERT(buf); 2078168404Spjd ASSERT(buf->b_data); 2079168404Spjd if (HDR_BUF_AVAILABLE(hdr)) { 2080168404Spjd ASSERT(buf->b_efunc == NULL); 2081168404Spjd hdr->b_flags &= ~ARC_BUF_AVAILABLE; 2082168404Spjd } else { 2083168404Spjd buf = arc_buf_clone(buf); 2084168404Spjd } 2085168404Spjd } else if (*arc_flags & ARC_PREFETCH && 2086168404Spjd refcount_count(&hdr->b_refcnt) == 0) { 2087168404Spjd hdr->b_flags |= ARC_PREFETCH; 2088168404Spjd } 2089168404Spjd DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 2090168404Spjd arc_access(hdr, hash_lock); 2091168404Spjd mutex_exit(hash_lock); 2092168404Spjd ARCSTAT_BUMP(arcstat_hits); 2093168404Spjd ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2094168404Spjd demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2095168404Spjd data, metadata, hits); 2096168404Spjd 2097168404Spjd if (done) 2098168404Spjd done(NULL, buf, private); 2099168404Spjd } else { 2100168404Spjd uint64_t size = BP_GET_LSIZE(bp); 2101168404Spjd arc_callback_t *acb; 2102168404Spjd 2103168404Spjd if (hdr == NULL) { 2104168404Spjd /* this block is not in the cache */ 2105168404Spjd arc_buf_hdr_t *exists; 2106168404Spjd arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 2107168404Spjd buf = arc_buf_alloc(spa, size, private, type); 2108168404Spjd hdr = buf->b_hdr; 2109168404Spjd hdr->b_dva = *BP_IDENTITY(bp); 2110168404Spjd hdr->b_birth = bp->blk_birth; 2111168404Spjd hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 2112168404Spjd exists = buf_hash_insert(hdr, &hash_lock); 2113168404Spjd if (exists) { 2114168404Spjd /* somebody beat us to the hash insert */ 2115168404Spjd mutex_exit(hash_lock); 2116168404Spjd bzero(&hdr->b_dva, sizeof (dva_t)); 2117168404Spjd hdr->b_birth = 0; 2118168404Spjd hdr->b_cksum0 = 0; 2119168404Spjd (void) arc_buf_remove_ref(buf, private); 2120168404Spjd goto top; /* restart the IO request */ 2121168404Spjd } 2122168404Spjd /* if this is a prefetch, we don't have a reference */ 2123168404Spjd if (*arc_flags & ARC_PREFETCH) { 2124168404Spjd (void) remove_reference(hdr, hash_lock, 2125168404Spjd private); 2126168404Spjd hdr->b_flags |= ARC_PREFETCH; 2127168404Spjd } 2128168404Spjd if (BP_GET_LEVEL(bp) > 0) 2129168404Spjd hdr->b_flags |= ARC_INDIRECT; 2130168404Spjd } else { 2131168404Spjd /* this block is in the ghost cache */ 2132168404Spjd ASSERT(GHOST_STATE(hdr->b_state)); 2133168404Spjd ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2134168404Spjd ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0); 2135168404Spjd ASSERT(hdr->b_buf == NULL); 2136168404Spjd 2137168404Spjd /* if this is a prefetch, we don't have a reference */ 2138168404Spjd if (*arc_flags & ARC_PREFETCH) 2139168404Spjd hdr->b_flags |= ARC_PREFETCH; 2140168404Spjd else 2141168404Spjd add_reference(hdr, hash_lock, private); 2142168404Spjd buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 2143168404Spjd buf->b_hdr = hdr; 2144168404Spjd buf->b_data = NULL; 2145168404Spjd buf->b_efunc = NULL; 2146168404Spjd buf->b_private = NULL; 2147168404Spjd buf->b_next = NULL; 2148168404Spjd hdr->b_buf = buf; 2149168404Spjd arc_get_data_buf(buf); 2150168404Spjd ASSERT(hdr->b_datacnt == 0); 2151168404Spjd hdr->b_datacnt = 1; 2152168404Spjd 2153168404Spjd } 2154168404Spjd 2155168404Spjd acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 2156168404Spjd acb->acb_done = done; 2157168404Spjd acb->acb_private = private; 2158168404Spjd acb->acb_byteswap = swap; 2159168404Spjd 2160168404Spjd ASSERT(hdr->b_acb == NULL); 2161168404Spjd hdr->b_acb = acb; 2162168404Spjd hdr->b_flags |= ARC_IO_IN_PROGRESS; 2163168404Spjd 2164168404Spjd /* 2165168404Spjd * If the buffer has been evicted, migrate it to a present state 2166168404Spjd * before issuing the I/O. Once we drop the hash-table lock, 2167168404Spjd * the header will be marked as I/O in progress and have an 2168168404Spjd * attached buffer. At this point, anybody who finds this 2169168404Spjd * buffer ought to notice that it's legit but has a pending I/O. 2170168404Spjd */ 2171168404Spjd 2172168404Spjd if (GHOST_STATE(hdr->b_state)) 2173168404Spjd arc_access(hdr, hash_lock); 2174168404Spjd mutex_exit(hash_lock); 2175168404Spjd 2176168404Spjd ASSERT3U(hdr->b_size, ==, size); 2177168404Spjd DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size, 2178168404Spjd zbookmark_t *, zb); 2179168404Spjd ARCSTAT_BUMP(arcstat_misses); 2180168404Spjd ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2181168404Spjd demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2182168404Spjd data, metadata, misses); 2183168404Spjd 2184168404Spjd rzio = zio_read(pio, spa, bp, buf->b_data, size, 2185168404Spjd arc_read_done, buf, priority, flags, zb); 2186168404Spjd 2187168404Spjd if (*arc_flags & ARC_WAIT) 2188168404Spjd return (zio_wait(rzio)); 2189168404Spjd 2190168404Spjd ASSERT(*arc_flags & ARC_NOWAIT); 2191168404Spjd zio_nowait(rzio); 2192168404Spjd } 2193168404Spjd return (0); 2194168404Spjd} 2195168404Spjd 2196168404Spjd/* 2197168404Spjd * arc_read() variant to support pool traversal. If the block is already 2198168404Spjd * in the ARC, make a copy of it; otherwise, the caller will do the I/O. 2199168404Spjd * The idea is that we don't want pool traversal filling up memory, but 2200168404Spjd * if the ARC already has the data anyway, we shouldn't pay for the I/O. 2201168404Spjd */ 2202168404Spjdint 2203168404Spjdarc_tryread(spa_t *spa, blkptr_t *bp, void *data) 2204168404Spjd{ 2205168404Spjd arc_buf_hdr_t *hdr; 2206168404Spjd kmutex_t *hash_mtx; 2207168404Spjd int rc = 0; 2208168404Spjd 2209168404Spjd hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx); 2210168404Spjd 2211168404Spjd if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) { 2212168404Spjd arc_buf_t *buf = hdr->b_buf; 2213168404Spjd 2214168404Spjd ASSERT(buf); 2215168404Spjd while (buf->b_data == NULL) { 2216168404Spjd buf = buf->b_next; 2217168404Spjd ASSERT(buf); 2218168404Spjd } 2219168404Spjd bcopy(buf->b_data, data, hdr->b_size); 2220168404Spjd } else { 2221168404Spjd rc = ENOENT; 2222168404Spjd } 2223168404Spjd 2224168404Spjd if (hash_mtx) 2225168404Spjd mutex_exit(hash_mtx); 2226168404Spjd 2227168404Spjd return (rc); 2228168404Spjd} 2229168404Spjd 2230168404Spjdvoid 2231168404Spjdarc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 2232168404Spjd{ 2233168404Spjd ASSERT(buf->b_hdr != NULL); 2234168404Spjd ASSERT(buf->b_hdr->b_state != arc_anon); 2235168404Spjd ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 2236168404Spjd buf->b_efunc = func; 2237168404Spjd buf->b_private = private; 2238168404Spjd} 2239168404Spjd 2240168404Spjd/* 2241168404Spjd * This is used by the DMU to let the ARC know that a buffer is 2242168404Spjd * being evicted, so the ARC should clean up. If this arc buf 2243168404Spjd * is not yet in the evicted state, it will be put there. 2244168404Spjd */ 2245168404Spjdint 2246168404Spjdarc_buf_evict(arc_buf_t *buf) 2247168404Spjd{ 2248168404Spjd arc_buf_hdr_t *hdr; 2249168404Spjd kmutex_t *hash_lock; 2250168404Spjd arc_buf_t **bufp; 2251168404Spjd 2252168404Spjd mutex_enter(&arc_eviction_mtx); 2253168404Spjd hdr = buf->b_hdr; 2254168404Spjd if (hdr == NULL) { 2255168404Spjd /* 2256168404Spjd * We are in arc_do_user_evicts(). 2257168404Spjd */ 2258168404Spjd ASSERT(buf->b_data == NULL); 2259168404Spjd mutex_exit(&arc_eviction_mtx); 2260168404Spjd return (0); 2261168404Spjd } 2262168404Spjd hash_lock = HDR_LOCK(hdr); 2263168404Spjd mutex_exit(&arc_eviction_mtx); 2264168404Spjd 2265168404Spjd mutex_enter(hash_lock); 2266168404Spjd 2267168404Spjd if (buf->b_data == NULL) { 2268168404Spjd /* 2269168404Spjd * We are on the eviction list. 2270168404Spjd */ 2271168404Spjd mutex_exit(hash_lock); 2272168404Spjd mutex_enter(&arc_eviction_mtx); 2273168404Spjd if (buf->b_hdr == NULL) { 2274168404Spjd /* 2275168404Spjd * We are already in arc_do_user_evicts(). 2276168404Spjd */ 2277168404Spjd mutex_exit(&arc_eviction_mtx); 2278168404Spjd return (0); 2279168404Spjd } else { 2280168404Spjd arc_buf_t copy = *buf; /* structure assignment */ 2281168404Spjd /* 2282168404Spjd * Process this buffer now 2283168404Spjd * but let arc_do_user_evicts() do the reaping. 2284168404Spjd */ 2285168404Spjd buf->b_efunc = NULL; 2286168404Spjd mutex_exit(&arc_eviction_mtx); 2287168404Spjd VERIFY(copy.b_efunc(©) == 0); 2288168404Spjd return (1); 2289168404Spjd } 2290168404Spjd } 2291168404Spjd 2292168404Spjd ASSERT(buf->b_hdr == hdr); 2293168404Spjd ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 2294168404Spjd ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2295168404Spjd 2296168404Spjd /* 2297168404Spjd * Pull this buffer off of the hdr 2298168404Spjd */ 2299168404Spjd bufp = &hdr->b_buf; 2300168404Spjd while (*bufp != buf) 2301168404Spjd bufp = &(*bufp)->b_next; 2302168404Spjd *bufp = buf->b_next; 2303168404Spjd 2304168404Spjd ASSERT(buf->b_data != NULL); 2305168404Spjd arc_buf_destroy(buf, FALSE, FALSE); 2306168404Spjd 2307168404Spjd if (hdr->b_datacnt == 0) { 2308168404Spjd arc_state_t *old_state = hdr->b_state; 2309168404Spjd arc_state_t *evicted_state; 2310168404Spjd 2311168404Spjd ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2312168404Spjd 2313168404Spjd evicted_state = 2314168404Spjd (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 2315168404Spjd 2316168404Spjd mutex_enter(&old_state->arcs_mtx); 2317168404Spjd mutex_enter(&evicted_state->arcs_mtx); 2318168404Spjd 2319168404Spjd arc_change_state(evicted_state, hdr, hash_lock); 2320168404Spjd ASSERT(HDR_IN_HASH_TABLE(hdr)); 2321168404Spjd hdr->b_flags = ARC_IN_HASH_TABLE; 2322168404Spjd 2323168404Spjd mutex_exit(&evicted_state->arcs_mtx); 2324168404Spjd mutex_exit(&old_state->arcs_mtx); 2325168404Spjd } 2326168404Spjd mutex_exit(hash_lock); 2327168404Spjd 2328168404Spjd VERIFY(buf->b_efunc(buf) == 0); 2329168404Spjd buf->b_efunc = NULL; 2330168404Spjd buf->b_private = NULL; 2331168404Spjd buf->b_hdr = NULL; 2332168404Spjd kmem_cache_free(buf_cache, buf); 2333168404Spjd return (1); 2334168404Spjd} 2335168404Spjd 2336168404Spjd/* 2337168404Spjd * Release this buffer from the cache. This must be done 2338168404Spjd * after a read and prior to modifying the buffer contents. 2339168404Spjd * If the buffer has more than one reference, we must make 2340168404Spjd * make a new hdr for the buffer. 2341168404Spjd */ 2342168404Spjdvoid 2343168404Spjdarc_release(arc_buf_t *buf, void *tag) 2344168404Spjd{ 2345168404Spjd arc_buf_hdr_t *hdr = buf->b_hdr; 2346168404Spjd kmutex_t *hash_lock = HDR_LOCK(hdr); 2347168404Spjd 2348168404Spjd /* this buffer is not on any list */ 2349168404Spjd ASSERT(refcount_count(&hdr->b_refcnt) > 0); 2350168404Spjd 2351168404Spjd if (hdr->b_state == arc_anon) { 2352168404Spjd /* this buffer is already released */ 2353168404Spjd ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1); 2354168404Spjd ASSERT(BUF_EMPTY(hdr)); 2355168404Spjd ASSERT(buf->b_efunc == NULL); 2356168404Spjd arc_buf_thaw(buf); 2357168404Spjd return; 2358168404Spjd } 2359168404Spjd 2360168404Spjd mutex_enter(hash_lock); 2361168404Spjd 2362168404Spjd /* 2363168404Spjd * Do we have more than one buf? 2364168404Spjd */ 2365168404Spjd if (hdr->b_buf != buf || buf->b_next != NULL) { 2366168404Spjd arc_buf_hdr_t *nhdr; 2367168404Spjd arc_buf_t **bufp; 2368168404Spjd uint64_t blksz = hdr->b_size; 2369168404Spjd spa_t *spa = hdr->b_spa; 2370168404Spjd arc_buf_contents_t type = hdr->b_type; 2371168404Spjd 2372168404Spjd ASSERT(hdr->b_datacnt > 1); 2373168404Spjd /* 2374168404Spjd * Pull the data off of this buf and attach it to 2375168404Spjd * a new anonymous buf. 2376168404Spjd */ 2377168404Spjd (void) remove_reference(hdr, hash_lock, tag); 2378168404Spjd bufp = &hdr->b_buf; 2379168404Spjd while (*bufp != buf) 2380168404Spjd bufp = &(*bufp)->b_next; 2381168404Spjd *bufp = (*bufp)->b_next; 2382168404Spjd buf->b_next = NULL; 2383168404Spjd 2384168404Spjd ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); 2385168404Spjd atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); 2386168404Spjd if (refcount_is_zero(&hdr->b_refcnt)) { 2387168404Spjd ASSERT3U(hdr->b_state->arcs_lsize, >=, hdr->b_size); 2388168404Spjd atomic_add_64(&hdr->b_state->arcs_lsize, -hdr->b_size); 2389168404Spjd } 2390168404Spjd hdr->b_datacnt -= 1; 2391168404Spjd arc_cksum_verify(buf); 2392168404Spjd 2393168404Spjd mutex_exit(hash_lock); 2394168404Spjd 2395168404Spjd nhdr = kmem_cache_alloc(hdr_cache, KM_SLEEP); 2396168404Spjd nhdr->b_size = blksz; 2397168404Spjd nhdr->b_spa = spa; 2398168404Spjd nhdr->b_type = type; 2399168404Spjd nhdr->b_buf = buf; 2400168404Spjd nhdr->b_state = arc_anon; 2401168404Spjd nhdr->b_arc_access = 0; 2402168404Spjd nhdr->b_flags = 0; 2403168404Spjd nhdr->b_datacnt = 1; 2404168404Spjd nhdr->b_freeze_cksum = NULL; 2405168404Spjd (void) refcount_add(&nhdr->b_refcnt, tag); 2406168404Spjd buf->b_hdr = nhdr; 2407168404Spjd atomic_add_64(&arc_anon->arcs_size, blksz); 2408168404Spjd 2409168404Spjd hdr = nhdr; 2410168404Spjd } else { 2411168404Spjd ASSERT(refcount_count(&hdr->b_refcnt) == 1); 2412168404Spjd ASSERT(!list_link_active(&hdr->b_arc_node)); 2413168404Spjd ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2414168404Spjd arc_change_state(arc_anon, hdr, hash_lock); 2415168404Spjd hdr->b_arc_access = 0; 2416168404Spjd mutex_exit(hash_lock); 2417168404Spjd bzero(&hdr->b_dva, sizeof (dva_t)); 2418168404Spjd hdr->b_birth = 0; 2419168404Spjd hdr->b_cksum0 = 0; 2420168404Spjd arc_buf_thaw(buf); 2421168404Spjd } 2422168404Spjd buf->b_efunc = NULL; 2423168404Spjd buf->b_private = NULL; 2424168404Spjd} 2425168404Spjd 2426168404Spjdint 2427168404Spjdarc_released(arc_buf_t *buf) 2428168404Spjd{ 2429168404Spjd return (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); 2430168404Spjd} 2431168404Spjd 2432168404Spjdint 2433168404Spjdarc_has_callback(arc_buf_t *buf) 2434168404Spjd{ 2435168404Spjd return (buf->b_efunc != NULL); 2436168404Spjd} 2437168404Spjd 2438168404Spjd#ifdef ZFS_DEBUG 2439168404Spjdint 2440168404Spjdarc_referenced(arc_buf_t *buf) 2441168404Spjd{ 2442168404Spjd return (refcount_count(&buf->b_hdr->b_refcnt)); 2443168404Spjd} 2444168404Spjd#endif 2445168404Spjd 2446168404Spjdstatic void 2447168404Spjdarc_write_ready(zio_t *zio) 2448168404Spjd{ 2449168404Spjd arc_write_callback_t *callback = zio->io_private; 2450168404Spjd arc_buf_t *buf = callback->awcb_buf; 2451168404Spjd 2452168404Spjd if (callback->awcb_ready) { 2453168404Spjd ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); 2454168404Spjd callback->awcb_ready(zio, buf, callback->awcb_private); 2455168404Spjd } 2456168404Spjd arc_cksum_compute(buf); 2457168404Spjd} 2458168404Spjd 2459168404Spjdstatic void 2460168404Spjdarc_write_done(zio_t *zio) 2461168404Spjd{ 2462168404Spjd arc_write_callback_t *callback = zio->io_private; 2463168404Spjd arc_buf_t *buf = callback->awcb_buf; 2464168404Spjd arc_buf_hdr_t *hdr = buf->b_hdr; 2465168404Spjd 2466168404Spjd hdr->b_acb = NULL; 2467168404Spjd 2468168404Spjd /* this buffer is on no lists and is not in the hash table */ 2469168404Spjd ASSERT3P(hdr->b_state, ==, arc_anon); 2470168404Spjd 2471168404Spjd hdr->b_dva = *BP_IDENTITY(zio->io_bp); 2472168404Spjd hdr->b_birth = zio->io_bp->blk_birth; 2473168404Spjd hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 2474168404Spjd /* 2475168404Spjd * If the block to be written was all-zero, we may have 2476168404Spjd * compressed it away. In this case no write was performed 2477168404Spjd * so there will be no dva/birth-date/checksum. The buffer 2478168404Spjd * must therefor remain anonymous (and uncached). 2479168404Spjd */ 2480168404Spjd if (!BUF_EMPTY(hdr)) { 2481168404Spjd arc_buf_hdr_t *exists; 2482168404Spjd kmutex_t *hash_lock; 2483168404Spjd 2484168404Spjd arc_cksum_verify(buf); 2485168404Spjd 2486168404Spjd exists = buf_hash_insert(hdr, &hash_lock); 2487168404Spjd if (exists) { 2488168404Spjd /* 2489168404Spjd * This can only happen if we overwrite for 2490168404Spjd * sync-to-convergence, because we remove 2491168404Spjd * buffers from the hash table when we arc_free(). 2492168404Spjd */ 2493168404Spjd ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig), 2494168404Spjd BP_IDENTITY(zio->io_bp))); 2495168404Spjd ASSERT3U(zio->io_bp_orig.blk_birth, ==, 2496168404Spjd zio->io_bp->blk_birth); 2497168404Spjd 2498168404Spjd ASSERT(refcount_is_zero(&exists->b_refcnt)); 2499168404Spjd arc_change_state(arc_anon, exists, hash_lock); 2500168404Spjd mutex_exit(hash_lock); 2501168404Spjd arc_hdr_destroy(exists); 2502168404Spjd exists = buf_hash_insert(hdr, &hash_lock); 2503168404Spjd ASSERT3P(exists, ==, NULL); 2504168404Spjd } 2505168404Spjd hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2506168404Spjd arc_access(hdr, hash_lock); 2507168404Spjd mutex_exit(hash_lock); 2508168404Spjd } else if (callback->awcb_done == NULL) { 2509168404Spjd int destroy_hdr; 2510168404Spjd /* 2511168404Spjd * This is an anonymous buffer with no user callback, 2512168404Spjd * destroy it if there are no active references. 2513168404Spjd */ 2514168404Spjd mutex_enter(&arc_eviction_mtx); 2515168404Spjd destroy_hdr = refcount_is_zero(&hdr->b_refcnt); 2516168404Spjd hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2517168404Spjd mutex_exit(&arc_eviction_mtx); 2518168404Spjd if (destroy_hdr) 2519168404Spjd arc_hdr_destroy(hdr); 2520168404Spjd } else { 2521168404Spjd hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2522168404Spjd } 2523168404Spjd 2524168404Spjd if (callback->awcb_done) { 2525168404Spjd ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 2526168404Spjd callback->awcb_done(zio, buf, callback->awcb_private); 2527168404Spjd } 2528168404Spjd 2529168404Spjd kmem_free(callback, sizeof (arc_write_callback_t)); 2530168404Spjd} 2531168404Spjd 2532168404Spjdzio_t * 2533168404Spjdarc_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies, 2534168404Spjd uint64_t txg, blkptr_t *bp, arc_buf_t *buf, 2535168404Spjd arc_done_func_t *ready, arc_done_func_t *done, void *private, int priority, 2536168404Spjd int flags, zbookmark_t *zb) 2537168404Spjd{ 2538168404Spjd arc_buf_hdr_t *hdr = buf->b_hdr; 2539168404Spjd arc_write_callback_t *callback; 2540168404Spjd zio_t *zio; 2541168404Spjd 2542168404Spjd /* this is a private buffer - no locking required */ 2543168404Spjd ASSERT3P(hdr->b_state, ==, arc_anon); 2544168404Spjd ASSERT(BUF_EMPTY(hdr)); 2545168404Spjd ASSERT(!HDR_IO_ERROR(hdr)); 2546168404Spjd ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 2547168404Spjd ASSERT(hdr->b_acb == 0); 2548168404Spjd callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 2549168404Spjd callback->awcb_ready = ready; 2550168404Spjd callback->awcb_done = done; 2551168404Spjd callback->awcb_private = private; 2552168404Spjd callback->awcb_buf = buf; 2553168404Spjd hdr->b_flags |= ARC_IO_IN_PROGRESS; 2554168404Spjd zio = zio_write(pio, spa, checksum, compress, ncopies, txg, bp, 2555168404Spjd buf->b_data, hdr->b_size, arc_write_ready, arc_write_done, callback, 2556168404Spjd priority, flags, zb); 2557168404Spjd 2558168404Spjd return (zio); 2559168404Spjd} 2560168404Spjd 2561168404Spjdint 2562168404Spjdarc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 2563168404Spjd zio_done_func_t *done, void *private, uint32_t arc_flags) 2564168404Spjd{ 2565168404Spjd arc_buf_hdr_t *ab; 2566168404Spjd kmutex_t *hash_lock; 2567168404Spjd zio_t *zio; 2568168404Spjd 2569168404Spjd /* 2570168404Spjd * If this buffer is in the cache, release it, so it 2571168404Spjd * can be re-used. 2572168404Spjd */ 2573168404Spjd ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 2574168404Spjd if (ab != NULL) { 2575168404Spjd /* 2576168404Spjd * The checksum of blocks to free is not always 2577168404Spjd * preserved (eg. on the deadlist). However, if it is 2578168404Spjd * nonzero, it should match what we have in the cache. 2579168404Spjd */ 2580168404Spjd ASSERT(bp->blk_cksum.zc_word[0] == 0 || 2581168404Spjd ab->b_cksum0 == bp->blk_cksum.zc_word[0]); 2582168404Spjd if (ab->b_state != arc_anon) 2583168404Spjd arc_change_state(arc_anon, ab, hash_lock); 2584168404Spjd if (HDR_IO_IN_PROGRESS(ab)) { 2585168404Spjd /* 2586168404Spjd * This should only happen when we prefetch. 2587168404Spjd */ 2588168404Spjd ASSERT(ab->b_flags & ARC_PREFETCH); 2589168404Spjd ASSERT3U(ab->b_datacnt, ==, 1); 2590168404Spjd ab->b_flags |= ARC_FREED_IN_READ; 2591168404Spjd if (HDR_IN_HASH_TABLE(ab)) 2592168404Spjd buf_hash_remove(ab); 2593168404Spjd ab->b_arc_access = 0; 2594168404Spjd bzero(&ab->b_dva, sizeof (dva_t)); 2595168404Spjd ab->b_birth = 0; 2596168404Spjd ab->b_cksum0 = 0; 2597168404Spjd ab->b_buf->b_efunc = NULL; 2598168404Spjd ab->b_buf->b_private = NULL; 2599168404Spjd mutex_exit(hash_lock); 2600168404Spjd } else if (refcount_is_zero(&ab->b_refcnt)) { 2601168404Spjd mutex_exit(hash_lock); 2602168404Spjd arc_hdr_destroy(ab); 2603168404Spjd ARCSTAT_BUMP(arcstat_deleted); 2604168404Spjd } else { 2605168404Spjd /* 2606168404Spjd * We still have an active reference on this 2607168404Spjd * buffer. This can happen, e.g., from 2608168404Spjd * dbuf_unoverride(). 2609168404Spjd */ 2610168404Spjd ASSERT(!HDR_IN_HASH_TABLE(ab)); 2611168404Spjd ab->b_arc_access = 0; 2612168404Spjd bzero(&ab->b_dva, sizeof (dva_t)); 2613168404Spjd ab->b_birth = 0; 2614168404Spjd ab->b_cksum0 = 0; 2615168404Spjd ab->b_buf->b_efunc = NULL; 2616168404Spjd ab->b_buf->b_private = NULL; 2617168404Spjd mutex_exit(hash_lock); 2618168404Spjd } 2619168404Spjd } 2620168404Spjd 2621168404Spjd zio = zio_free(pio, spa, txg, bp, done, private); 2622168404Spjd 2623168404Spjd if (arc_flags & ARC_WAIT) 2624168404Spjd return (zio_wait(zio)); 2625168404Spjd 2626168404Spjd ASSERT(arc_flags & ARC_NOWAIT); 2627168404Spjd zio_nowait(zio); 2628168404Spjd 2629168404Spjd return (0); 2630168404Spjd} 2631168404Spjd 2632168404Spjdvoid 2633168404Spjdarc_tempreserve_clear(uint64_t tempreserve) 2634168404Spjd{ 2635168404Spjd atomic_add_64(&arc_tempreserve, -tempreserve); 2636168404Spjd ASSERT((int64_t)arc_tempreserve >= 0); 2637168404Spjd} 2638168404Spjd 2639168404Spjdint 2640168404Spjdarc_tempreserve_space(uint64_t tempreserve) 2641168404Spjd{ 2642168404Spjd#ifdef ZFS_DEBUG 2643168404Spjd /* 2644168404Spjd * Once in a while, fail for no reason. Everything should cope. 2645168404Spjd */ 2646168404Spjd if (spa_get_random(10000) == 0) { 2647168404Spjd dprintf("forcing random failure\n"); 2648168404Spjd return (ERESTART); 2649168404Spjd } 2650168404Spjd#endif 2651168404Spjd if (tempreserve > arc_c/4 && !arc_no_grow) 2652168404Spjd arc_c = MIN(arc_c_max, tempreserve * 4); 2653168404Spjd if (tempreserve > arc_c) 2654168404Spjd return (ENOMEM); 2655168404Spjd 2656168404Spjd /* 2657168404Spjd * Throttle writes when the amount of dirty data in the cache 2658168404Spjd * gets too large. We try to keep the cache less than half full 2659168404Spjd * of dirty blocks so that our sync times don't grow too large. 2660168404Spjd * Note: if two requests come in concurrently, we might let them 2661168404Spjd * both succeed, when one of them should fail. Not a huge deal. 2662168404Spjd * 2663168404Spjd * XXX The limit should be adjusted dynamically to keep the time 2664168404Spjd * to sync a dataset fixed (around 1-5 seconds?). 2665168404Spjd */ 2666168404Spjd 2667168404Spjd if (tempreserve + arc_tempreserve + arc_anon->arcs_size > arc_c / 2 && 2668168404Spjd arc_tempreserve + arc_anon->arcs_size > arc_c / 4) { 2669168404Spjd dprintf("failing, arc_tempreserve=%lluK anon=%lluK " 2670168404Spjd "tempreserve=%lluK arc_c=%lluK\n", 2671168404Spjd arc_tempreserve>>10, arc_anon->arcs_lsize>>10, 2672168404Spjd tempreserve>>10, arc_c>>10); 2673168404Spjd return (ERESTART); 2674168404Spjd } 2675168404Spjd atomic_add_64(&arc_tempreserve, tempreserve); 2676168404Spjd return (0); 2677168404Spjd} 2678168404Spjd 2679168404Spjd#ifdef _KERNEL 2680168404Spjdstatic eventhandler_tag zfs_event_lowmem = NULL; 2681168404Spjd 2682168404Spjdstatic void 2683168404Spjdzfs_lowmem(void *arg __unused, int howto __unused) 2684168404Spjd{ 2685168404Spjd 2686168404Spjd zfs_needfree = 1; 2687168404Spjd cv_signal(&arc_reclaim_thr_cv); 2688168404Spjd while (zfs_needfree) 2689168404Spjd tsleep(&zfs_needfree, 0, "zfs:lowmem", hz / 5); 2690168404Spjd} 2691168404Spjd#endif 2692168404Spjd 2693168404Spjdvoid 2694168404Spjdarc_init(void) 2695168404Spjd{ 2696168404Spjd mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 2697168404Spjd cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 2698168404Spjd 2699168404Spjd /* Convert seconds to clock ticks */ 2700168404Spjd arc_min_prefetch_lifespan = 1 * hz; 2701168404Spjd 2702168404Spjd /* Start out with 1/8 of all memory */ 2703168404Spjd arc_c = physmem * PAGESIZE / 8; 2704168404Spjd#if 0 2705168404Spjd#ifdef _KERNEL 2706168404Spjd /* 2707168404Spjd * On architectures where the physical memory can be larger 2708168404Spjd * than the addressable space (intel in 32-bit mode), we may 2709168404Spjd * need to limit the cache to 1/8 of VM size. 2710168404Spjd */ 2711168404Spjd arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 2712168404Spjd#endif 2713168404Spjd#endif 2714168404Spjd /* set min cache to 1/32 of all memory, or 64MB, whichever is more */ 2715168404Spjd arc_c_min = MAX(arc_c / 4, 64<<20); 2716168404Spjd /* set max to 3/4 of all memory, or all but 1GB, whichever is more */ 2717168404Spjd if (arc_c * 8 >= 1<<30) 2718168404Spjd arc_c_max = (arc_c * 8) - (1<<30); 2719168404Spjd else 2720168404Spjd arc_c_max = arc_c_min; 2721168404Spjd arc_c_max = MAX(arc_c * 6, arc_c_max); 2722168404Spjd#ifdef notyet 2723168404Spjd /* 2724168404Spjd * Allow the tunables to override our calculations if they are 2725168404Spjd * reasonable (ie. over 64MB) 2726168404Spjd */ 2727168404Spjd if (zfs_arc_max > 64<<20 && zfs_arc_max < physmem * PAGESIZE) 2728168404Spjd arc_c_max = zfs_arc_max; 2729168404Spjd if (zfs_arc_min > 64<<20 && zfs_arc_min <= arc_c_max) 2730168404Spjd arc_c_min = zfs_arc_min; 2731168404Spjd#endif 2732168404Spjd arc_c = arc_c_max; 2733168404Spjd arc_p = (arc_c >> 1); 2734168404Spjd 2735168404Spjd /* if kmem_flags are set, lets try to use less memory */ 2736168404Spjd if (kmem_debugging()) 2737168404Spjd arc_c = arc_c / 2; 2738168404Spjd if (arc_c < arc_c_min) 2739168404Spjd arc_c = arc_c_min; 2740168404Spjd 2741168404Spjd arc_anon = &ARC_anon; 2742168404Spjd arc_mru = &ARC_mru; 2743168404Spjd arc_mru_ghost = &ARC_mru_ghost; 2744168404Spjd arc_mfu = &ARC_mfu; 2745168404Spjd arc_mfu_ghost = &ARC_mfu_ghost; 2746168404Spjd arc_size = 0; 2747168404Spjd 2748168404Spjd mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 2749168404Spjd mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 2750168404Spjd mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 2751168404Spjd mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 2752168404Spjd mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 2753168404Spjd 2754168404Spjd list_create(&arc_mru->arcs_list, sizeof (arc_buf_hdr_t), 2755168404Spjd offsetof(arc_buf_hdr_t, b_arc_node)); 2756168404Spjd list_create(&arc_mru_ghost->arcs_list, sizeof (arc_buf_hdr_t), 2757168404Spjd offsetof(arc_buf_hdr_t, b_arc_node)); 2758168404Spjd list_create(&arc_mfu->arcs_list, sizeof (arc_buf_hdr_t), 2759168404Spjd offsetof(arc_buf_hdr_t, b_arc_node)); 2760168404Spjd list_create(&arc_mfu_ghost->arcs_list, sizeof (arc_buf_hdr_t), 2761168404Spjd offsetof(arc_buf_hdr_t, b_arc_node)); 2762168404Spjd 2763168404Spjd buf_init(); 2764168404Spjd 2765168404Spjd arc_thread_exit = 0; 2766168404Spjd arc_eviction_list = NULL; 2767168404Spjd mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 2768168404Spjd bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 2769168404Spjd 2770168404Spjd arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 2771168404Spjd sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 2772168404Spjd 2773168404Spjd if (arc_ksp != NULL) { 2774168404Spjd arc_ksp->ks_data = &arc_stats; 2775168404Spjd kstat_install(arc_ksp); 2776168404Spjd } 2777168404Spjd 2778168404Spjd (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 2779168404Spjd TS_RUN, minclsyspri); 2780168404Spjd 2781168404Spjd#ifdef _KERNEL 2782168404Spjd zfs_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem, zfs_lowmem, NULL, 2783168404Spjd EVENTHANDLER_PRI_FIRST); 2784168404Spjd#endif 2785168404Spjd 2786168404Spjd arc_dead = FALSE; 2787168404Spjd} 2788168404Spjd 2789168404Spjdvoid 2790168404Spjdarc_fini(void) 2791168404Spjd{ 2792168404Spjd mutex_enter(&arc_reclaim_thr_lock); 2793168404Spjd arc_thread_exit = 1; 2794168404Spjd cv_signal(&arc_reclaim_thr_cv); 2795168404Spjd while (arc_thread_exit != 0) 2796168404Spjd cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 2797168404Spjd mutex_exit(&arc_reclaim_thr_lock); 2798168404Spjd 2799168404Spjd arc_flush(); 2800168404Spjd 2801168404Spjd arc_dead = TRUE; 2802168404Spjd 2803168404Spjd if (arc_ksp != NULL) { 2804168404Spjd kstat_delete(arc_ksp); 2805168404Spjd arc_ksp = NULL; 2806168404Spjd } 2807168404Spjd 2808168404Spjd mutex_destroy(&arc_eviction_mtx); 2809168404Spjd mutex_destroy(&arc_reclaim_thr_lock); 2810168404Spjd cv_destroy(&arc_reclaim_thr_cv); 2811168404Spjd 2812168404Spjd list_destroy(&arc_mru->arcs_list); 2813168404Spjd list_destroy(&arc_mru_ghost->arcs_list); 2814168404Spjd list_destroy(&arc_mfu->arcs_list); 2815168404Spjd list_destroy(&arc_mfu_ghost->arcs_list); 2816168404Spjd 2817168404Spjd mutex_destroy(&arc_anon->arcs_mtx); 2818168404Spjd mutex_destroy(&arc_mru->arcs_mtx); 2819168404Spjd mutex_destroy(&arc_mru_ghost->arcs_mtx); 2820168404Spjd mutex_destroy(&arc_mfu->arcs_mtx); 2821168404Spjd mutex_destroy(&arc_mfu_ghost->arcs_mtx); 2822168404Spjd 2823168404Spjd buf_fini(); 2824168404Spjd 2825168404Spjd#ifdef _KERNEL 2826168404Spjd if (zfs_event_lowmem != NULL) 2827168404Spjd EVENTHANDLER_DEREGISTER(vm_lowmem, zfs_event_lowmem); 2828168404Spjd#endif 2829168404Spjd} 2830