arc.c revision 168696
1168404Spjd/* 2168404Spjd * CDDL HEADER START 3168404Spjd * 4168404Spjd * The contents of this file are subject to the terms of the 5168404Spjd * Common Development and Distribution License (the "License"). 6168404Spjd * You may not use this file except in compliance with the License. 7168404Spjd * 8168404Spjd * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9168404Spjd * or http://www.opensolaris.org/os/licensing. 10168404Spjd * See the License for the specific language governing permissions 11168404Spjd * and limitations under the License. 12168404Spjd * 13168404Spjd * When distributing Covered Code, include this CDDL HEADER in each 14168404Spjd * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15168404Spjd * If applicable, add the following below this CDDL HEADER, with the 16168404Spjd * fields enclosed by brackets "[]" replaced with your own identifying 17168404Spjd * information: Portions Copyright [yyyy] [name of copyright owner] 18168404Spjd * 19168404Spjd * CDDL HEADER END 20168404Spjd */ 21168404Spjd/* 22168404Spjd * Copyright 2007 Sun Microsystems, Inc. All rights reserved. 23168404Spjd * Use is subject to license terms. 24168404Spjd */ 25168404Spjd 26168404Spjd#pragma ident "%Z%%M% %I% %E% SMI" 27168404Spjd 28168404Spjd/* 29168404Spjd * DVA-based Adjustable Replacement Cache 30168404Spjd * 31168404Spjd * While much of the theory of operation used here is 32168404Spjd * based on the self-tuning, low overhead replacement cache 33168404Spjd * presented by Megiddo and Modha at FAST 2003, there are some 34168404Spjd * significant differences: 35168404Spjd * 36168404Spjd * 1. The Megiddo and Modha model assumes any page is evictable. 37168404Spjd * Pages in its cache cannot be "locked" into memory. This makes 38168404Spjd * the eviction algorithm simple: evict the last page in the list. 39168404Spjd * This also make the performance characteristics easy to reason 40168404Spjd * about. Our cache is not so simple. At any given moment, some 41168404Spjd * subset of the blocks in the cache are un-evictable because we 42168404Spjd * have handed out a reference to them. Blocks are only evictable 43168404Spjd * when there are no external references active. This makes 44168404Spjd * eviction far more problematic: we choose to evict the evictable 45168404Spjd * blocks that are the "lowest" in the list. 46168404Spjd * 47168404Spjd * There are times when it is not possible to evict the requested 48168404Spjd * space. In these circumstances we are unable to adjust the cache 49168404Spjd * size. To prevent the cache growing unbounded at these times we 50168404Spjd * implement a "cache throttle" that slowes the flow of new data 51168404Spjd * into the cache until we can make space avaiable. 52168404Spjd * 53168404Spjd * 2. The Megiddo and Modha model assumes a fixed cache size. 54168404Spjd * Pages are evicted when the cache is full and there is a cache 55168404Spjd * miss. Our model has a variable sized cache. It grows with 56168404Spjd * high use, but also tries to react to memory preasure from the 57168404Spjd * operating system: decreasing its size when system memory is 58168404Spjd * tight. 59168404Spjd * 60168404Spjd * 3. The Megiddo and Modha model assumes a fixed page size. All 61168404Spjd * elements of the cache are therefor exactly the same size. So 62168404Spjd * when adjusting the cache size following a cache miss, its simply 63168404Spjd * a matter of choosing a single page to evict. In our model, we 64168404Spjd * have variable sized cache blocks (rangeing from 512 bytes to 65168404Spjd * 128K bytes). We therefor choose a set of blocks to evict to make 66168404Spjd * space for a cache miss that approximates as closely as possible 67168404Spjd * the space used by the new block. 68168404Spjd * 69168404Spjd * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 70168404Spjd * by N. Megiddo & D. Modha, FAST 2003 71168404Spjd */ 72168404Spjd 73168404Spjd/* 74168404Spjd * The locking model: 75168404Spjd * 76168404Spjd * A new reference to a cache buffer can be obtained in two 77168404Spjd * ways: 1) via a hash table lookup using the DVA as a key, 78168404Spjd * or 2) via one of the ARC lists. The arc_read() inerface 79168404Spjd * uses method 1, while the internal arc algorithms for 80168404Spjd * adjusting the cache use method 2. We therefor provide two 81168404Spjd * types of locks: 1) the hash table lock array, and 2) the 82168404Spjd * arc list locks. 83168404Spjd * 84168404Spjd * Buffers do not have their own mutexs, rather they rely on the 85168404Spjd * hash table mutexs for the bulk of their protection (i.e. most 86168404Spjd * fields in the arc_buf_hdr_t are protected by these mutexs). 87168404Spjd * 88168404Spjd * buf_hash_find() returns the appropriate mutex (held) when it 89168404Spjd * locates the requested buffer in the hash table. It returns 90168404Spjd * NULL for the mutex if the buffer was not in the table. 91168404Spjd * 92168404Spjd * buf_hash_remove() expects the appropriate hash mutex to be 93168404Spjd * already held before it is invoked. 94168404Spjd * 95168404Spjd * Each arc state also has a mutex which is used to protect the 96168404Spjd * buffer list associated with the state. When attempting to 97168404Spjd * obtain a hash table lock while holding an arc list lock you 98168404Spjd * must use: mutex_tryenter() to avoid deadlock. Also note that 99168404Spjd * the active state mutex must be held before the ghost state mutex. 100168404Spjd * 101168404Spjd * Arc buffers may have an associated eviction callback function. 102168404Spjd * This function will be invoked prior to removing the buffer (e.g. 103168404Spjd * in arc_do_user_evicts()). Note however that the data associated 104168404Spjd * with the buffer may be evicted prior to the callback. The callback 105168404Spjd * must be made with *no locks held* (to prevent deadlock). Additionally, 106168404Spjd * the users of callbacks must ensure that their private data is 107168404Spjd * protected from simultaneous callbacks from arc_buf_evict() 108168404Spjd * and arc_do_user_evicts(). 109168404Spjd * 110168404Spjd * Note that the majority of the performance stats are manipulated 111168404Spjd * with atomic operations. 112168404Spjd */ 113168404Spjd 114168404Spjd#include <sys/spa.h> 115168404Spjd#include <sys/zio.h> 116168404Spjd#include <sys/zio_checksum.h> 117168404Spjd#include <sys/zfs_context.h> 118168404Spjd#include <sys/arc.h> 119168404Spjd#include <sys/refcount.h> 120168404Spjd#ifdef _KERNEL 121168404Spjd#include <sys/dnlc.h> 122168404Spjd#endif 123168404Spjd#include <sys/callb.h> 124168404Spjd#include <sys/kstat.h> 125168404Spjd#include <sys/sdt.h> 126168404Spjd 127168404Spjdstatic kmutex_t arc_reclaim_thr_lock; 128168404Spjdstatic kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 129168404Spjdstatic uint8_t arc_thread_exit; 130168404Spjd 131168404Spjd#define ARC_REDUCE_DNLC_PERCENT 3 132168404Spjduint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 133168404Spjd 134168404Spjdtypedef enum arc_reclaim_strategy { 135168404Spjd ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 136168404Spjd ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 137168404Spjd} arc_reclaim_strategy_t; 138168404Spjd 139168404Spjd/* number of seconds before growing cache again */ 140168404Spjdstatic int arc_grow_retry = 60; 141168404Spjd 142168404Spjd/* 143168404Spjd * minimum lifespan of a prefetch block in clock ticks 144168404Spjd * (initialized in arc_init()) 145168404Spjd */ 146168404Spjdstatic int arc_min_prefetch_lifespan; 147168404Spjd 148168404Spjdstatic int arc_dead; 149168404Spjd 150168404Spjd/* 151168404Spjd * These tunables are for performance analysis. 152168404Spjd */ 153168473Spjdu_long zfs_arc_max; 154168473Spjdu_long zfs_arc_min; 155168473SpjdTUNABLE_ULONG("vfs.zfs.arc_max", &zfs_arc_max); 156168473SpjdTUNABLE_ULONG("vfs.zfs.arc_min", &zfs_arc_min); 157168473SpjdSYSCTL_DECL(_vfs_zfs); 158168473SpjdSYSCTL_ULONG(_vfs_zfs, OID_AUTO, arc_max, CTLFLAG_RD, &zfs_arc_max, 0, 159168473Spjd "Maximum ARC size"); 160168473SpjdSYSCTL_ULONG(_vfs_zfs, OID_AUTO, arc_min, CTLFLAG_RD, &zfs_arc_min, 0, 161168473Spjd "Minimum ARC size"); 162168404Spjd 163168404Spjd/* 164168404Spjd * Note that buffers can be on one of 5 states: 165168404Spjd * ARC_anon - anonymous (discussed below) 166168404Spjd * ARC_mru - recently used, currently cached 167168404Spjd * ARC_mru_ghost - recentely used, no longer in cache 168168404Spjd * ARC_mfu - frequently used, currently cached 169168404Spjd * ARC_mfu_ghost - frequently used, no longer in cache 170168404Spjd * When there are no active references to the buffer, they 171168404Spjd * are linked onto one of the lists in arc. These are the 172168404Spjd * only buffers that can be evicted or deleted. 173168404Spjd * 174168404Spjd * Anonymous buffers are buffers that are not associated with 175168404Spjd * a DVA. These are buffers that hold dirty block copies 176168404Spjd * before they are written to stable storage. By definition, 177168404Spjd * they are "ref'd" and are considered part of arc_mru 178168404Spjd * that cannot be freed. Generally, they will aquire a DVA 179168404Spjd * as they are written and migrate onto the arc_mru list. 180168404Spjd */ 181168404Spjd 182168404Spjdtypedef struct arc_state { 183168404Spjd list_t arcs_list; /* linked list of evictable buffer in state */ 184168404Spjd uint64_t arcs_lsize; /* total size of buffers in the linked list */ 185168404Spjd uint64_t arcs_size; /* total size of all buffers in this state */ 186168404Spjd kmutex_t arcs_mtx; 187168404Spjd} arc_state_t; 188168404Spjd 189168404Spjd/* The 5 states: */ 190168404Spjdstatic arc_state_t ARC_anon; 191168404Spjdstatic arc_state_t ARC_mru; 192168404Spjdstatic arc_state_t ARC_mru_ghost; 193168404Spjdstatic arc_state_t ARC_mfu; 194168404Spjdstatic arc_state_t ARC_mfu_ghost; 195168404Spjd 196168404Spjdtypedef struct arc_stats { 197168404Spjd kstat_named_t arcstat_hits; 198168404Spjd kstat_named_t arcstat_misses; 199168404Spjd kstat_named_t arcstat_demand_data_hits; 200168404Spjd kstat_named_t arcstat_demand_data_misses; 201168404Spjd kstat_named_t arcstat_demand_metadata_hits; 202168404Spjd kstat_named_t arcstat_demand_metadata_misses; 203168404Spjd kstat_named_t arcstat_prefetch_data_hits; 204168404Spjd kstat_named_t arcstat_prefetch_data_misses; 205168404Spjd kstat_named_t arcstat_prefetch_metadata_hits; 206168404Spjd kstat_named_t arcstat_prefetch_metadata_misses; 207168404Spjd kstat_named_t arcstat_mru_hits; 208168404Spjd kstat_named_t arcstat_mru_ghost_hits; 209168404Spjd kstat_named_t arcstat_mfu_hits; 210168404Spjd kstat_named_t arcstat_mfu_ghost_hits; 211168404Spjd kstat_named_t arcstat_deleted; 212168404Spjd kstat_named_t arcstat_recycle_miss; 213168404Spjd kstat_named_t arcstat_mutex_miss; 214168404Spjd kstat_named_t arcstat_evict_skip; 215168404Spjd kstat_named_t arcstat_hash_elements; 216168404Spjd kstat_named_t arcstat_hash_elements_max; 217168404Spjd kstat_named_t arcstat_hash_collisions; 218168404Spjd kstat_named_t arcstat_hash_chains; 219168404Spjd kstat_named_t arcstat_hash_chain_max; 220168404Spjd kstat_named_t arcstat_p; 221168404Spjd kstat_named_t arcstat_c; 222168404Spjd kstat_named_t arcstat_c_min; 223168404Spjd kstat_named_t arcstat_c_max; 224168404Spjd kstat_named_t arcstat_size; 225168404Spjd} arc_stats_t; 226168404Spjd 227168404Spjdstatic arc_stats_t arc_stats = { 228168404Spjd { "hits", KSTAT_DATA_UINT64 }, 229168404Spjd { "misses", KSTAT_DATA_UINT64 }, 230168404Spjd { "demand_data_hits", KSTAT_DATA_UINT64 }, 231168404Spjd { "demand_data_misses", KSTAT_DATA_UINT64 }, 232168404Spjd { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 233168404Spjd { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 234168404Spjd { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 235168404Spjd { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 236168404Spjd { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 237168404Spjd { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 238168404Spjd { "mru_hits", KSTAT_DATA_UINT64 }, 239168404Spjd { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 240168404Spjd { "mfu_hits", KSTAT_DATA_UINT64 }, 241168404Spjd { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 242168404Spjd { "deleted", KSTAT_DATA_UINT64 }, 243168404Spjd { "recycle_miss", KSTAT_DATA_UINT64 }, 244168404Spjd { "mutex_miss", KSTAT_DATA_UINT64 }, 245168404Spjd { "evict_skip", KSTAT_DATA_UINT64 }, 246168404Spjd { "hash_elements", KSTAT_DATA_UINT64 }, 247168404Spjd { "hash_elements_max", KSTAT_DATA_UINT64 }, 248168404Spjd { "hash_collisions", KSTAT_DATA_UINT64 }, 249168404Spjd { "hash_chains", KSTAT_DATA_UINT64 }, 250168404Spjd { "hash_chain_max", KSTAT_DATA_UINT64 }, 251168404Spjd { "p", KSTAT_DATA_UINT64 }, 252168404Spjd { "c", KSTAT_DATA_UINT64 }, 253168404Spjd { "c_min", KSTAT_DATA_UINT64 }, 254168404Spjd { "c_max", KSTAT_DATA_UINT64 }, 255168404Spjd { "size", KSTAT_DATA_UINT64 } 256168404Spjd}; 257168404Spjd 258168404Spjd#define ARCSTAT(stat) (arc_stats.stat.value.ui64) 259168404Spjd 260168404Spjd#define ARCSTAT_INCR(stat, val) \ 261168404Spjd atomic_add_64(&arc_stats.stat.value.ui64, (val)); 262168404Spjd 263168404Spjd#define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 264168404Spjd#define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 265168404Spjd 266168404Spjd#define ARCSTAT_MAX(stat, val) { \ 267168404Spjd uint64_t m; \ 268168404Spjd while ((val) > (m = arc_stats.stat.value.ui64) && \ 269168404Spjd (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 270168404Spjd continue; \ 271168404Spjd} 272168404Spjd 273168404Spjd#define ARCSTAT_MAXSTAT(stat) \ 274168404Spjd ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 275168404Spjd 276168404Spjd/* 277168404Spjd * We define a macro to allow ARC hits/misses to be easily broken down by 278168404Spjd * two separate conditions, giving a total of four different subtypes for 279168404Spjd * each of hits and misses (so eight statistics total). 280168404Spjd */ 281168404Spjd#define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 282168404Spjd if (cond1) { \ 283168404Spjd if (cond2) { \ 284168404Spjd ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 285168404Spjd } else { \ 286168404Spjd ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 287168404Spjd } \ 288168404Spjd } else { \ 289168404Spjd if (cond2) { \ 290168404Spjd ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 291168404Spjd } else { \ 292168404Spjd ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 293168404Spjd } \ 294168404Spjd } 295168404Spjd 296168404Spjdkstat_t *arc_ksp; 297168404Spjdstatic arc_state_t *arc_anon; 298168404Spjdstatic arc_state_t *arc_mru; 299168404Spjdstatic arc_state_t *arc_mru_ghost; 300168404Spjdstatic arc_state_t *arc_mfu; 301168404Spjdstatic arc_state_t *arc_mfu_ghost; 302168404Spjd 303168404Spjd/* 304168404Spjd * There are several ARC variables that are critical to export as kstats -- 305168404Spjd * but we don't want to have to grovel around in the kstat whenever we wish to 306168404Spjd * manipulate them. For these variables, we therefore define them to be in 307168404Spjd * terms of the statistic variable. This assures that we are not introducing 308168404Spjd * the possibility of inconsistency by having shadow copies of the variables, 309168404Spjd * while still allowing the code to be readable. 310168404Spjd */ 311168404Spjd#define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 312168404Spjd#define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 313168404Spjd#define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 314168404Spjd#define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 315168404Spjd#define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 316168404Spjd 317168404Spjdstatic int arc_no_grow; /* Don't try to grow cache size */ 318168404Spjdstatic uint64_t arc_tempreserve; 319168404Spjd 320168404Spjdtypedef struct arc_callback arc_callback_t; 321168404Spjd 322168404Spjdstruct arc_callback { 323168404Spjd void *acb_private; 324168404Spjd arc_done_func_t *acb_done; 325168404Spjd arc_byteswap_func_t *acb_byteswap; 326168404Spjd arc_buf_t *acb_buf; 327168404Spjd zio_t *acb_zio_dummy; 328168404Spjd arc_callback_t *acb_next; 329168404Spjd}; 330168404Spjd 331168404Spjdtypedef struct arc_write_callback arc_write_callback_t; 332168404Spjd 333168404Spjdstruct arc_write_callback { 334168404Spjd void *awcb_private; 335168404Spjd arc_done_func_t *awcb_ready; 336168404Spjd arc_done_func_t *awcb_done; 337168404Spjd arc_buf_t *awcb_buf; 338168404Spjd}; 339168404Spjd 340168404Spjdstruct arc_buf_hdr { 341168404Spjd /* protected by hash lock */ 342168404Spjd dva_t b_dva; 343168404Spjd uint64_t b_birth; 344168404Spjd uint64_t b_cksum0; 345168404Spjd 346168404Spjd kmutex_t b_freeze_lock; 347168404Spjd zio_cksum_t *b_freeze_cksum; 348168404Spjd 349168404Spjd arc_buf_hdr_t *b_hash_next; 350168404Spjd arc_buf_t *b_buf; 351168404Spjd uint32_t b_flags; 352168404Spjd uint32_t b_datacnt; 353168404Spjd 354168404Spjd arc_callback_t *b_acb; 355168404Spjd kcondvar_t b_cv; 356168404Spjd 357168404Spjd /* immutable */ 358168404Spjd arc_buf_contents_t b_type; 359168404Spjd uint64_t b_size; 360168404Spjd spa_t *b_spa; 361168404Spjd 362168404Spjd /* protected by arc state mutex */ 363168404Spjd arc_state_t *b_state; 364168404Spjd list_node_t b_arc_node; 365168404Spjd 366168404Spjd /* updated atomically */ 367168404Spjd clock_t b_arc_access; 368168404Spjd 369168404Spjd /* self protecting */ 370168404Spjd refcount_t b_refcnt; 371168404Spjd}; 372168404Spjd 373168404Spjdstatic arc_buf_t *arc_eviction_list; 374168404Spjdstatic kmutex_t arc_eviction_mtx; 375168404Spjdstatic arc_buf_hdr_t arc_eviction_hdr; 376168404Spjdstatic void arc_get_data_buf(arc_buf_t *buf); 377168404Spjdstatic void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 378168404Spjd 379168404Spjd#define GHOST_STATE(state) \ 380168404Spjd ((state) == arc_mru_ghost || (state) == arc_mfu_ghost) 381168404Spjd 382168404Spjd/* 383168404Spjd * Private ARC flags. These flags are private ARC only flags that will show up 384168404Spjd * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 385168404Spjd * be passed in as arc_flags in things like arc_read. However, these flags 386168404Spjd * should never be passed and should only be set by ARC code. When adding new 387168404Spjd * public flags, make sure not to smash the private ones. 388168404Spjd */ 389168404Spjd 390168404Spjd#define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 391168404Spjd#define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 392168404Spjd#define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 393168404Spjd#define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 394168404Spjd#define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 395168404Spjd#define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 396168404Spjd 397168404Spjd#define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 398168404Spjd#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 399168404Spjd#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 400168404Spjd#define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 401168404Spjd#define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 402168404Spjd 403168404Spjd/* 404168404Spjd * Hash table routines 405168404Spjd */ 406168404Spjd 407168404Spjd#define HT_LOCK_PAD 128 408168404Spjd 409168404Spjdstruct ht_lock { 410168404Spjd kmutex_t ht_lock; 411168404Spjd#ifdef _KERNEL 412168404Spjd unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 413168404Spjd#endif 414168404Spjd}; 415168404Spjd 416168404Spjd#define BUF_LOCKS 256 417168404Spjdtypedef struct buf_hash_table { 418168404Spjd uint64_t ht_mask; 419168404Spjd arc_buf_hdr_t **ht_table; 420168404Spjd struct ht_lock ht_locks[BUF_LOCKS]; 421168404Spjd} buf_hash_table_t; 422168404Spjd 423168404Spjdstatic buf_hash_table_t buf_hash_table; 424168404Spjd 425168404Spjd#define BUF_HASH_INDEX(spa, dva, birth) \ 426168404Spjd (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 427168404Spjd#define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 428168404Spjd#define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 429168404Spjd#define HDR_LOCK(buf) \ 430168404Spjd (BUF_HASH_LOCK(BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth))) 431168404Spjd 432168404Spjduint64_t zfs_crc64_table[256]; 433168404Spjd 434168404Spjdstatic uint64_t 435168404Spjdbuf_hash(spa_t *spa, dva_t *dva, uint64_t birth) 436168404Spjd{ 437168404Spjd uintptr_t spav = (uintptr_t)spa; 438168404Spjd uint8_t *vdva = (uint8_t *)dva; 439168404Spjd uint64_t crc = -1ULL; 440168404Spjd int i; 441168404Spjd 442168404Spjd ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 443168404Spjd 444168404Spjd for (i = 0; i < sizeof (dva_t); i++) 445168404Spjd crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 446168404Spjd 447168404Spjd crc ^= (spav>>8) ^ birth; 448168404Spjd 449168404Spjd return (crc); 450168404Spjd} 451168404Spjd 452168404Spjd#define BUF_EMPTY(buf) \ 453168404Spjd ((buf)->b_dva.dva_word[0] == 0 && \ 454168404Spjd (buf)->b_dva.dva_word[1] == 0 && \ 455168404Spjd (buf)->b_birth == 0) 456168404Spjd 457168404Spjd#define BUF_EQUAL(spa, dva, birth, buf) \ 458168404Spjd ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 459168404Spjd ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 460168404Spjd ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 461168404Spjd 462168404Spjdstatic arc_buf_hdr_t * 463168404Spjdbuf_hash_find(spa_t *spa, dva_t *dva, uint64_t birth, kmutex_t **lockp) 464168404Spjd{ 465168404Spjd uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 466168404Spjd kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 467168404Spjd arc_buf_hdr_t *buf; 468168404Spjd 469168404Spjd mutex_enter(hash_lock); 470168404Spjd for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 471168404Spjd buf = buf->b_hash_next) { 472168404Spjd if (BUF_EQUAL(spa, dva, birth, buf)) { 473168404Spjd *lockp = hash_lock; 474168404Spjd return (buf); 475168404Spjd } 476168404Spjd } 477168404Spjd mutex_exit(hash_lock); 478168404Spjd *lockp = NULL; 479168404Spjd return (NULL); 480168404Spjd} 481168404Spjd 482168404Spjd/* 483168404Spjd * Insert an entry into the hash table. If there is already an element 484168404Spjd * equal to elem in the hash table, then the already existing element 485168404Spjd * will be returned and the new element will not be inserted. 486168404Spjd * Otherwise returns NULL. 487168404Spjd */ 488168404Spjdstatic arc_buf_hdr_t * 489168404Spjdbuf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 490168404Spjd{ 491168404Spjd uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 492168404Spjd kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 493168404Spjd arc_buf_hdr_t *fbuf; 494168404Spjd uint32_t i; 495168404Spjd 496168404Spjd ASSERT(!HDR_IN_HASH_TABLE(buf)); 497168404Spjd *lockp = hash_lock; 498168404Spjd mutex_enter(hash_lock); 499168404Spjd for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 500168404Spjd fbuf = fbuf->b_hash_next, i++) { 501168404Spjd if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 502168404Spjd return (fbuf); 503168404Spjd } 504168404Spjd 505168404Spjd buf->b_hash_next = buf_hash_table.ht_table[idx]; 506168404Spjd buf_hash_table.ht_table[idx] = buf; 507168404Spjd buf->b_flags |= ARC_IN_HASH_TABLE; 508168404Spjd 509168404Spjd /* collect some hash table performance data */ 510168404Spjd if (i > 0) { 511168404Spjd ARCSTAT_BUMP(arcstat_hash_collisions); 512168404Spjd if (i == 1) 513168404Spjd ARCSTAT_BUMP(arcstat_hash_chains); 514168404Spjd 515168404Spjd ARCSTAT_MAX(arcstat_hash_chain_max, i); 516168404Spjd } 517168404Spjd 518168404Spjd ARCSTAT_BUMP(arcstat_hash_elements); 519168404Spjd ARCSTAT_MAXSTAT(arcstat_hash_elements); 520168404Spjd 521168404Spjd return (NULL); 522168404Spjd} 523168404Spjd 524168404Spjdstatic void 525168404Spjdbuf_hash_remove(arc_buf_hdr_t *buf) 526168404Spjd{ 527168404Spjd arc_buf_hdr_t *fbuf, **bufp; 528168404Spjd uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 529168404Spjd 530168404Spjd ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 531168404Spjd ASSERT(HDR_IN_HASH_TABLE(buf)); 532168404Spjd 533168404Spjd bufp = &buf_hash_table.ht_table[idx]; 534168404Spjd while ((fbuf = *bufp) != buf) { 535168404Spjd ASSERT(fbuf != NULL); 536168404Spjd bufp = &fbuf->b_hash_next; 537168404Spjd } 538168404Spjd *bufp = buf->b_hash_next; 539168404Spjd buf->b_hash_next = NULL; 540168404Spjd buf->b_flags &= ~ARC_IN_HASH_TABLE; 541168404Spjd 542168404Spjd /* collect some hash table performance data */ 543168404Spjd ARCSTAT_BUMPDOWN(arcstat_hash_elements); 544168404Spjd 545168404Spjd if (buf_hash_table.ht_table[idx] && 546168404Spjd buf_hash_table.ht_table[idx]->b_hash_next == NULL) 547168404Spjd ARCSTAT_BUMPDOWN(arcstat_hash_chains); 548168404Spjd} 549168404Spjd 550168404Spjd/* 551168404Spjd * Global data structures and functions for the buf kmem cache. 552168404Spjd */ 553168404Spjdstatic kmem_cache_t *hdr_cache; 554168404Spjdstatic kmem_cache_t *buf_cache; 555168404Spjd 556168404Spjdstatic void 557168404Spjdbuf_fini(void) 558168404Spjd{ 559168404Spjd int i; 560168404Spjd 561168404Spjd kmem_free(buf_hash_table.ht_table, 562168404Spjd (buf_hash_table.ht_mask + 1) * sizeof (void *)); 563168404Spjd for (i = 0; i < BUF_LOCKS; i++) 564168404Spjd mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 565168404Spjd kmem_cache_destroy(hdr_cache); 566168404Spjd kmem_cache_destroy(buf_cache); 567168404Spjd} 568168404Spjd 569168404Spjd/* 570168404Spjd * Constructor callback - called when the cache is empty 571168404Spjd * and a new buf is requested. 572168404Spjd */ 573168404Spjd/* ARGSUSED */ 574168404Spjdstatic int 575168404Spjdhdr_cons(void *vbuf, void *unused, int kmflag) 576168404Spjd{ 577168404Spjd arc_buf_hdr_t *buf = vbuf; 578168404Spjd 579168404Spjd bzero(buf, sizeof (arc_buf_hdr_t)); 580168404Spjd refcount_create(&buf->b_refcnt); 581168404Spjd cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 582168404Spjd return (0); 583168404Spjd} 584168404Spjd 585168404Spjd/* 586168404Spjd * Destructor callback - called when a cached buf is 587168404Spjd * no longer required. 588168404Spjd */ 589168404Spjd/* ARGSUSED */ 590168404Spjdstatic void 591168404Spjdhdr_dest(void *vbuf, void *unused) 592168404Spjd{ 593168404Spjd arc_buf_hdr_t *buf = vbuf; 594168404Spjd 595168404Spjd refcount_destroy(&buf->b_refcnt); 596168404Spjd cv_destroy(&buf->b_cv); 597168404Spjd} 598168404Spjd 599168404Spjd/* 600168404Spjd * Reclaim callback -- invoked when memory is low. 601168404Spjd */ 602168404Spjd/* ARGSUSED */ 603168404Spjdstatic void 604168404Spjdhdr_recl(void *unused) 605168404Spjd{ 606168404Spjd dprintf("hdr_recl called\n"); 607168404Spjd /* 608168404Spjd * umem calls the reclaim func when we destroy the buf cache, 609168404Spjd * which is after we do arc_fini(). 610168404Spjd */ 611168404Spjd if (!arc_dead) 612168404Spjd cv_signal(&arc_reclaim_thr_cv); 613168404Spjd} 614168404Spjd 615168404Spjdstatic void 616168404Spjdbuf_init(void) 617168404Spjd{ 618168404Spjd uint64_t *ct; 619168404Spjd uint64_t hsize = 1ULL << 12; 620168404Spjd int i, j; 621168404Spjd 622168404Spjd /* 623168404Spjd * The hash table is big enough to fill all of physical memory 624168404Spjd * with an average 64K block size. The table will take up 625168404Spjd * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 626168404Spjd */ 627168696Spjd while (hsize * 65536 < (uint64_t)physmem * PAGESIZE) 628168404Spjd hsize <<= 1; 629168404Spjdretry: 630168404Spjd buf_hash_table.ht_mask = hsize - 1; 631168404Spjd buf_hash_table.ht_table = 632168404Spjd kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 633168404Spjd if (buf_hash_table.ht_table == NULL) { 634168404Spjd ASSERT(hsize > (1ULL << 8)); 635168404Spjd hsize >>= 1; 636168404Spjd goto retry; 637168404Spjd } 638168404Spjd 639168404Spjd hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 640168404Spjd 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 641168404Spjd buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 642168404Spjd 0, NULL, NULL, NULL, NULL, NULL, 0); 643168404Spjd 644168404Spjd for (i = 0; i < 256; i++) 645168404Spjd for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 646168404Spjd *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 647168404Spjd 648168404Spjd for (i = 0; i < BUF_LOCKS; i++) { 649168404Spjd mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 650168404Spjd NULL, MUTEX_DEFAULT, NULL); 651168404Spjd } 652168404Spjd} 653168404Spjd 654168404Spjd#define ARC_MINTIME (hz>>4) /* 62 ms */ 655168404Spjd 656168404Spjdstatic void 657168404Spjdarc_cksum_verify(arc_buf_t *buf) 658168404Spjd{ 659168404Spjd zio_cksum_t zc; 660168404Spjd 661168404Spjd if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 662168404Spjd return; 663168404Spjd 664168404Spjd mutex_enter(&buf->b_hdr->b_freeze_lock); 665168404Spjd if (buf->b_hdr->b_freeze_cksum == NULL || 666168404Spjd (buf->b_hdr->b_flags & ARC_IO_ERROR)) { 667168404Spjd mutex_exit(&buf->b_hdr->b_freeze_lock); 668168404Spjd return; 669168404Spjd } 670168404Spjd fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 671168404Spjd if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 672168404Spjd panic("buffer modified while frozen!"); 673168404Spjd mutex_exit(&buf->b_hdr->b_freeze_lock); 674168404Spjd} 675168404Spjd 676168404Spjdstatic void 677168404Spjdarc_cksum_compute(arc_buf_t *buf) 678168404Spjd{ 679168404Spjd if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 680168404Spjd return; 681168404Spjd 682168404Spjd mutex_enter(&buf->b_hdr->b_freeze_lock); 683168404Spjd if (buf->b_hdr->b_freeze_cksum != NULL) { 684168404Spjd mutex_exit(&buf->b_hdr->b_freeze_lock); 685168404Spjd return; 686168404Spjd } 687168404Spjd buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 688168404Spjd fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 689168404Spjd buf->b_hdr->b_freeze_cksum); 690168404Spjd mutex_exit(&buf->b_hdr->b_freeze_lock); 691168404Spjd} 692168404Spjd 693168404Spjdvoid 694168404Spjdarc_buf_thaw(arc_buf_t *buf) 695168404Spjd{ 696168404Spjd if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 697168404Spjd return; 698168404Spjd 699168404Spjd if (buf->b_hdr->b_state != arc_anon) 700168404Spjd panic("modifying non-anon buffer!"); 701168404Spjd if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) 702168404Spjd panic("modifying buffer while i/o in progress!"); 703168404Spjd arc_cksum_verify(buf); 704168404Spjd mutex_enter(&buf->b_hdr->b_freeze_lock); 705168404Spjd if (buf->b_hdr->b_freeze_cksum != NULL) { 706168404Spjd kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 707168404Spjd buf->b_hdr->b_freeze_cksum = NULL; 708168404Spjd } 709168404Spjd mutex_exit(&buf->b_hdr->b_freeze_lock); 710168404Spjd} 711168404Spjd 712168404Spjdvoid 713168404Spjdarc_buf_freeze(arc_buf_t *buf) 714168404Spjd{ 715168404Spjd if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 716168404Spjd return; 717168404Spjd 718168404Spjd ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 719168404Spjd buf->b_hdr->b_state == arc_anon); 720168404Spjd arc_cksum_compute(buf); 721168404Spjd} 722168404Spjd 723168404Spjdstatic void 724168404Spjdadd_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 725168404Spjd{ 726168404Spjd ASSERT(MUTEX_HELD(hash_lock)); 727168404Spjd 728168404Spjd if ((refcount_add(&ab->b_refcnt, tag) == 1) && 729168404Spjd (ab->b_state != arc_anon)) { 730168404Spjd uint64_t delta = ab->b_size * ab->b_datacnt; 731168404Spjd 732168404Spjd ASSERT(!MUTEX_HELD(&ab->b_state->arcs_mtx)); 733168404Spjd mutex_enter(&ab->b_state->arcs_mtx); 734168404Spjd ASSERT(list_link_active(&ab->b_arc_node)); 735168404Spjd list_remove(&ab->b_state->arcs_list, ab); 736168404Spjd if (GHOST_STATE(ab->b_state)) { 737168404Spjd ASSERT3U(ab->b_datacnt, ==, 0); 738168404Spjd ASSERT3P(ab->b_buf, ==, NULL); 739168404Spjd delta = ab->b_size; 740168404Spjd } 741168404Spjd ASSERT(delta > 0); 742168404Spjd ASSERT3U(ab->b_state->arcs_lsize, >=, delta); 743168404Spjd atomic_add_64(&ab->b_state->arcs_lsize, -delta); 744168404Spjd mutex_exit(&ab->b_state->arcs_mtx); 745168404Spjd /* remove the prefetch flag is we get a reference */ 746168404Spjd if (ab->b_flags & ARC_PREFETCH) 747168404Spjd ab->b_flags &= ~ARC_PREFETCH; 748168404Spjd } 749168404Spjd} 750168404Spjd 751168404Spjdstatic int 752168404Spjdremove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 753168404Spjd{ 754168404Spjd int cnt; 755168404Spjd arc_state_t *state = ab->b_state; 756168404Spjd 757168404Spjd ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 758168404Spjd ASSERT(!GHOST_STATE(state)); 759168404Spjd 760168404Spjd if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 761168404Spjd (state != arc_anon)) { 762168404Spjd ASSERT(!MUTEX_HELD(&state->arcs_mtx)); 763168404Spjd mutex_enter(&state->arcs_mtx); 764168404Spjd ASSERT(!list_link_active(&ab->b_arc_node)); 765168404Spjd list_insert_head(&state->arcs_list, ab); 766168404Spjd ASSERT(ab->b_datacnt > 0); 767168404Spjd atomic_add_64(&state->arcs_lsize, ab->b_size * ab->b_datacnt); 768168404Spjd ASSERT3U(state->arcs_size, >=, state->arcs_lsize); 769168404Spjd mutex_exit(&state->arcs_mtx); 770168404Spjd } 771168404Spjd return (cnt); 772168404Spjd} 773168404Spjd 774168404Spjd/* 775168404Spjd * Move the supplied buffer to the indicated state. The mutex 776168404Spjd * for the buffer must be held by the caller. 777168404Spjd */ 778168404Spjdstatic void 779168404Spjdarc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 780168404Spjd{ 781168404Spjd arc_state_t *old_state = ab->b_state; 782168404Spjd int64_t refcnt = refcount_count(&ab->b_refcnt); 783168404Spjd uint64_t from_delta, to_delta; 784168404Spjd 785168404Spjd ASSERT(MUTEX_HELD(hash_lock)); 786168404Spjd ASSERT(new_state != old_state); 787168404Spjd ASSERT(refcnt == 0 || ab->b_datacnt > 0); 788168404Spjd ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 789168404Spjd 790168404Spjd from_delta = to_delta = ab->b_datacnt * ab->b_size; 791168404Spjd 792168404Spjd /* 793168404Spjd * If this buffer is evictable, transfer it from the 794168404Spjd * old state list to the new state list. 795168404Spjd */ 796168404Spjd if (refcnt == 0) { 797168404Spjd if (old_state != arc_anon) { 798168404Spjd int use_mutex = !MUTEX_HELD(&old_state->arcs_mtx); 799168404Spjd 800168404Spjd if (use_mutex) 801168404Spjd mutex_enter(&old_state->arcs_mtx); 802168404Spjd 803168404Spjd ASSERT(list_link_active(&ab->b_arc_node)); 804168404Spjd list_remove(&old_state->arcs_list, ab); 805168404Spjd 806168404Spjd /* 807168404Spjd * If prefetching out of the ghost cache, 808168404Spjd * we will have a non-null datacnt. 809168404Spjd */ 810168404Spjd if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 811168404Spjd /* ghost elements have a ghost size */ 812168404Spjd ASSERT(ab->b_buf == NULL); 813168404Spjd from_delta = ab->b_size; 814168404Spjd } 815168404Spjd ASSERT3U(old_state->arcs_lsize, >=, from_delta); 816168404Spjd atomic_add_64(&old_state->arcs_lsize, -from_delta); 817168404Spjd 818168404Spjd if (use_mutex) 819168404Spjd mutex_exit(&old_state->arcs_mtx); 820168404Spjd } 821168404Spjd if (new_state != arc_anon) { 822168404Spjd int use_mutex = !MUTEX_HELD(&new_state->arcs_mtx); 823168404Spjd 824168404Spjd if (use_mutex) 825168404Spjd mutex_enter(&new_state->arcs_mtx); 826168404Spjd 827168404Spjd list_insert_head(&new_state->arcs_list, ab); 828168404Spjd 829168404Spjd /* ghost elements have a ghost size */ 830168404Spjd if (GHOST_STATE(new_state)) { 831168404Spjd ASSERT(ab->b_datacnt == 0); 832168404Spjd ASSERT(ab->b_buf == NULL); 833168404Spjd to_delta = ab->b_size; 834168404Spjd } 835168404Spjd atomic_add_64(&new_state->arcs_lsize, to_delta); 836168404Spjd ASSERT3U(new_state->arcs_size + to_delta, >=, 837168404Spjd new_state->arcs_lsize); 838168404Spjd 839168404Spjd if (use_mutex) 840168404Spjd mutex_exit(&new_state->arcs_mtx); 841168404Spjd } 842168404Spjd } 843168404Spjd 844168404Spjd ASSERT(!BUF_EMPTY(ab)); 845168404Spjd if (new_state == arc_anon && old_state != arc_anon) { 846168404Spjd buf_hash_remove(ab); 847168404Spjd } 848168404Spjd 849168404Spjd /* adjust state sizes */ 850168404Spjd if (to_delta) 851168404Spjd atomic_add_64(&new_state->arcs_size, to_delta); 852168404Spjd if (from_delta) { 853168404Spjd ASSERT3U(old_state->arcs_size, >=, from_delta); 854168404Spjd atomic_add_64(&old_state->arcs_size, -from_delta); 855168404Spjd } 856168404Spjd ab->b_state = new_state; 857168404Spjd} 858168404Spjd 859168404Spjdarc_buf_t * 860168404Spjdarc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) 861168404Spjd{ 862168404Spjd arc_buf_hdr_t *hdr; 863168404Spjd arc_buf_t *buf; 864168404Spjd 865168404Spjd ASSERT3U(size, >, 0); 866168404Spjd hdr = kmem_cache_alloc(hdr_cache, KM_SLEEP); 867168404Spjd ASSERT(BUF_EMPTY(hdr)); 868168404Spjd hdr->b_size = size; 869168404Spjd hdr->b_type = type; 870168404Spjd hdr->b_spa = spa; 871168404Spjd hdr->b_state = arc_anon; 872168404Spjd hdr->b_arc_access = 0; 873168404Spjd mutex_init(&hdr->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 874168404Spjd buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 875168404Spjd buf->b_hdr = hdr; 876168404Spjd buf->b_data = NULL; 877168404Spjd buf->b_efunc = NULL; 878168404Spjd buf->b_private = NULL; 879168404Spjd buf->b_next = NULL; 880168404Spjd hdr->b_buf = buf; 881168404Spjd arc_get_data_buf(buf); 882168404Spjd hdr->b_datacnt = 1; 883168404Spjd hdr->b_flags = 0; 884168404Spjd ASSERT(refcount_is_zero(&hdr->b_refcnt)); 885168404Spjd (void) refcount_add(&hdr->b_refcnt, tag); 886168404Spjd 887168404Spjd return (buf); 888168404Spjd} 889168404Spjd 890168404Spjdstatic arc_buf_t * 891168404Spjdarc_buf_clone(arc_buf_t *from) 892168404Spjd{ 893168404Spjd arc_buf_t *buf; 894168404Spjd arc_buf_hdr_t *hdr = from->b_hdr; 895168404Spjd uint64_t size = hdr->b_size; 896168404Spjd 897168404Spjd buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 898168404Spjd buf->b_hdr = hdr; 899168404Spjd buf->b_data = NULL; 900168404Spjd buf->b_efunc = NULL; 901168404Spjd buf->b_private = NULL; 902168404Spjd buf->b_next = hdr->b_buf; 903168404Spjd hdr->b_buf = buf; 904168404Spjd arc_get_data_buf(buf); 905168404Spjd bcopy(from->b_data, buf->b_data, size); 906168404Spjd hdr->b_datacnt += 1; 907168404Spjd return (buf); 908168404Spjd} 909168404Spjd 910168404Spjdvoid 911168404Spjdarc_buf_add_ref(arc_buf_t *buf, void* tag) 912168404Spjd{ 913168404Spjd arc_buf_hdr_t *hdr; 914168404Spjd kmutex_t *hash_lock; 915168404Spjd 916168404Spjd /* 917168404Spjd * Check to see if this buffer is currently being evicted via 918168404Spjd * arc_do_user_evicts(). 919168404Spjd */ 920168404Spjd mutex_enter(&arc_eviction_mtx); 921168404Spjd hdr = buf->b_hdr; 922168404Spjd if (hdr == NULL) { 923168404Spjd mutex_exit(&arc_eviction_mtx); 924168404Spjd return; 925168404Spjd } 926168404Spjd hash_lock = HDR_LOCK(hdr); 927168404Spjd mutex_exit(&arc_eviction_mtx); 928168404Spjd 929168404Spjd mutex_enter(hash_lock); 930168404Spjd if (buf->b_data == NULL) { 931168404Spjd /* 932168404Spjd * This buffer is evicted. 933168404Spjd */ 934168404Spjd mutex_exit(hash_lock); 935168404Spjd return; 936168404Spjd } 937168404Spjd 938168404Spjd ASSERT(buf->b_hdr == hdr); 939168404Spjd ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 940168404Spjd add_reference(hdr, hash_lock, tag); 941168404Spjd arc_access(hdr, hash_lock); 942168404Spjd mutex_exit(hash_lock); 943168404Spjd ARCSTAT_BUMP(arcstat_hits); 944168404Spjd ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 945168404Spjd demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 946168404Spjd data, metadata, hits); 947168404Spjd} 948168404Spjd 949168404Spjdstatic void 950168404Spjdarc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 951168404Spjd{ 952168404Spjd arc_buf_t **bufp; 953168404Spjd 954168404Spjd /* free up data associated with the buf */ 955168404Spjd if (buf->b_data) { 956168404Spjd arc_state_t *state = buf->b_hdr->b_state; 957168404Spjd uint64_t size = buf->b_hdr->b_size; 958168404Spjd arc_buf_contents_t type = buf->b_hdr->b_type; 959168404Spjd 960168404Spjd arc_cksum_verify(buf); 961168404Spjd if (!recycle) { 962168404Spjd if (type == ARC_BUFC_METADATA) { 963168404Spjd zio_buf_free(buf->b_data, size); 964168404Spjd } else { 965168404Spjd ASSERT(type == ARC_BUFC_DATA); 966168404Spjd zio_data_buf_free(buf->b_data, size); 967168404Spjd } 968168404Spjd atomic_add_64(&arc_size, -size); 969168404Spjd } 970168404Spjd if (list_link_active(&buf->b_hdr->b_arc_node)) { 971168404Spjd ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 972168404Spjd ASSERT(state != arc_anon); 973168404Spjd ASSERT3U(state->arcs_lsize, >=, size); 974168404Spjd atomic_add_64(&state->arcs_lsize, -size); 975168404Spjd } 976168404Spjd ASSERT3U(state->arcs_size, >=, size); 977168404Spjd atomic_add_64(&state->arcs_size, -size); 978168404Spjd buf->b_data = NULL; 979168404Spjd ASSERT(buf->b_hdr->b_datacnt > 0); 980168404Spjd buf->b_hdr->b_datacnt -= 1; 981168404Spjd } 982168404Spjd 983168404Spjd /* only remove the buf if requested */ 984168404Spjd if (!all) 985168404Spjd return; 986168404Spjd 987168404Spjd /* remove the buf from the hdr list */ 988168404Spjd for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 989168404Spjd continue; 990168404Spjd *bufp = buf->b_next; 991168404Spjd 992168404Spjd ASSERT(buf->b_efunc == NULL); 993168404Spjd 994168404Spjd /* clean up the buf */ 995168404Spjd buf->b_hdr = NULL; 996168404Spjd kmem_cache_free(buf_cache, buf); 997168404Spjd} 998168404Spjd 999168404Spjdstatic void 1000168404Spjdarc_hdr_destroy(arc_buf_hdr_t *hdr) 1001168404Spjd{ 1002168404Spjd ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1003168404Spjd ASSERT3P(hdr->b_state, ==, arc_anon); 1004168404Spjd ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1005168404Spjd 1006168404Spjd if (!BUF_EMPTY(hdr)) { 1007168404Spjd ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1008168404Spjd bzero(&hdr->b_dva, sizeof (dva_t)); 1009168404Spjd hdr->b_birth = 0; 1010168404Spjd hdr->b_cksum0 = 0; 1011168404Spjd } 1012168404Spjd while (hdr->b_buf) { 1013168404Spjd arc_buf_t *buf = hdr->b_buf; 1014168404Spjd 1015168404Spjd if (buf->b_efunc) { 1016168404Spjd mutex_enter(&arc_eviction_mtx); 1017168404Spjd ASSERT(buf->b_hdr != NULL); 1018168404Spjd arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 1019168404Spjd hdr->b_buf = buf->b_next; 1020168404Spjd buf->b_hdr = &arc_eviction_hdr; 1021168404Spjd buf->b_next = arc_eviction_list; 1022168404Spjd arc_eviction_list = buf; 1023168404Spjd mutex_exit(&arc_eviction_mtx); 1024168404Spjd } else { 1025168404Spjd arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 1026168404Spjd } 1027168404Spjd } 1028168404Spjd if (hdr->b_freeze_cksum != NULL) { 1029168404Spjd kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1030168404Spjd hdr->b_freeze_cksum = NULL; 1031168404Spjd } 1032168404Spjd mutex_destroy(&hdr->b_freeze_lock); 1033168404Spjd 1034168404Spjd ASSERT(!list_link_active(&hdr->b_arc_node)); 1035168404Spjd ASSERT3P(hdr->b_hash_next, ==, NULL); 1036168404Spjd ASSERT3P(hdr->b_acb, ==, NULL); 1037168404Spjd kmem_cache_free(hdr_cache, hdr); 1038168404Spjd} 1039168404Spjd 1040168404Spjdvoid 1041168404Spjdarc_buf_free(arc_buf_t *buf, void *tag) 1042168404Spjd{ 1043168404Spjd arc_buf_hdr_t *hdr = buf->b_hdr; 1044168404Spjd int hashed = hdr->b_state != arc_anon; 1045168404Spjd 1046168404Spjd ASSERT(buf->b_efunc == NULL); 1047168404Spjd ASSERT(buf->b_data != NULL); 1048168404Spjd 1049168404Spjd if (hashed) { 1050168404Spjd kmutex_t *hash_lock = HDR_LOCK(hdr); 1051168404Spjd 1052168404Spjd mutex_enter(hash_lock); 1053168404Spjd (void) remove_reference(hdr, hash_lock, tag); 1054168404Spjd if (hdr->b_datacnt > 1) 1055168404Spjd arc_buf_destroy(buf, FALSE, TRUE); 1056168404Spjd else 1057168404Spjd hdr->b_flags |= ARC_BUF_AVAILABLE; 1058168404Spjd mutex_exit(hash_lock); 1059168404Spjd } else if (HDR_IO_IN_PROGRESS(hdr)) { 1060168404Spjd int destroy_hdr; 1061168404Spjd /* 1062168404Spjd * We are in the middle of an async write. Don't destroy 1063168404Spjd * this buffer unless the write completes before we finish 1064168404Spjd * decrementing the reference count. 1065168404Spjd */ 1066168404Spjd mutex_enter(&arc_eviction_mtx); 1067168404Spjd (void) remove_reference(hdr, NULL, tag); 1068168404Spjd ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1069168404Spjd destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 1070168404Spjd mutex_exit(&arc_eviction_mtx); 1071168404Spjd if (destroy_hdr) 1072168404Spjd arc_hdr_destroy(hdr); 1073168404Spjd } else { 1074168404Spjd if (remove_reference(hdr, NULL, tag) > 0) { 1075168404Spjd ASSERT(HDR_IO_ERROR(hdr)); 1076168404Spjd arc_buf_destroy(buf, FALSE, TRUE); 1077168404Spjd } else { 1078168404Spjd arc_hdr_destroy(hdr); 1079168404Spjd } 1080168404Spjd } 1081168404Spjd} 1082168404Spjd 1083168404Spjdint 1084168404Spjdarc_buf_remove_ref(arc_buf_t *buf, void* tag) 1085168404Spjd{ 1086168404Spjd arc_buf_hdr_t *hdr = buf->b_hdr; 1087168404Spjd kmutex_t *hash_lock = HDR_LOCK(hdr); 1088168404Spjd int no_callback = (buf->b_efunc == NULL); 1089168404Spjd 1090168404Spjd if (hdr->b_state == arc_anon) { 1091168404Spjd arc_buf_free(buf, tag); 1092168404Spjd return (no_callback); 1093168404Spjd } 1094168404Spjd 1095168404Spjd mutex_enter(hash_lock); 1096168404Spjd ASSERT(hdr->b_state != arc_anon); 1097168404Spjd ASSERT(buf->b_data != NULL); 1098168404Spjd 1099168404Spjd (void) remove_reference(hdr, hash_lock, tag); 1100168404Spjd if (hdr->b_datacnt > 1) { 1101168404Spjd if (no_callback) 1102168404Spjd arc_buf_destroy(buf, FALSE, TRUE); 1103168404Spjd } else if (no_callback) { 1104168404Spjd ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 1105168404Spjd hdr->b_flags |= ARC_BUF_AVAILABLE; 1106168404Spjd } 1107168404Spjd ASSERT(no_callback || hdr->b_datacnt > 1 || 1108168404Spjd refcount_is_zero(&hdr->b_refcnt)); 1109168404Spjd mutex_exit(hash_lock); 1110168404Spjd return (no_callback); 1111168404Spjd} 1112168404Spjd 1113168404Spjdint 1114168404Spjdarc_buf_size(arc_buf_t *buf) 1115168404Spjd{ 1116168404Spjd return (buf->b_hdr->b_size); 1117168404Spjd} 1118168404Spjd 1119168404Spjd/* 1120168404Spjd * Evict buffers from list until we've removed the specified number of 1121168404Spjd * bytes. Move the removed buffers to the appropriate evict state. 1122168404Spjd * If the recycle flag is set, then attempt to "recycle" a buffer: 1123168404Spjd * - look for a buffer to evict that is `bytes' long. 1124168404Spjd * - return the data block from this buffer rather than freeing it. 1125168404Spjd * This flag is used by callers that are trying to make space for a 1126168404Spjd * new buffer in a full arc cache. 1127168404Spjd */ 1128168404Spjdstatic void * 1129168404Spjdarc_evict(arc_state_t *state, int64_t bytes, boolean_t recycle, 1130168404Spjd arc_buf_contents_t type) 1131168404Spjd{ 1132168404Spjd arc_state_t *evicted_state; 1133168404Spjd uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 1134168404Spjd arc_buf_hdr_t *ab, *ab_prev = NULL; 1135168404Spjd kmutex_t *hash_lock; 1136168404Spjd boolean_t have_lock; 1137168404Spjd void *stolen = NULL; 1138168404Spjd 1139168404Spjd ASSERT(state == arc_mru || state == arc_mfu); 1140168404Spjd 1141168404Spjd evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 1142168404Spjd 1143168404Spjd mutex_enter(&state->arcs_mtx); 1144168404Spjd mutex_enter(&evicted_state->arcs_mtx); 1145168404Spjd 1146168404Spjd for (ab = list_tail(&state->arcs_list); ab; ab = ab_prev) { 1147168404Spjd ab_prev = list_prev(&state->arcs_list, ab); 1148168404Spjd /* prefetch buffers have a minimum lifespan */ 1149168404Spjd if (HDR_IO_IN_PROGRESS(ab) || 1150168404Spjd (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 1151168404Spjd lbolt - ab->b_arc_access < arc_min_prefetch_lifespan)) { 1152168404Spjd skipped++; 1153168404Spjd continue; 1154168404Spjd } 1155168404Spjd /* "lookahead" for better eviction candidate */ 1156168404Spjd if (recycle && ab->b_size != bytes && 1157168404Spjd ab_prev && ab_prev->b_size == bytes) 1158168404Spjd continue; 1159168404Spjd hash_lock = HDR_LOCK(ab); 1160168404Spjd have_lock = MUTEX_HELD(hash_lock); 1161168404Spjd if (have_lock || mutex_tryenter(hash_lock)) { 1162168404Spjd ASSERT3U(refcount_count(&ab->b_refcnt), ==, 0); 1163168404Spjd ASSERT(ab->b_datacnt > 0); 1164168404Spjd while (ab->b_buf) { 1165168404Spjd arc_buf_t *buf = ab->b_buf; 1166168404Spjd if (buf->b_data) { 1167168404Spjd bytes_evicted += ab->b_size; 1168168404Spjd if (recycle && ab->b_type == type && 1169168404Spjd ab->b_size == bytes) { 1170168404Spjd stolen = buf->b_data; 1171168404Spjd recycle = FALSE; 1172168404Spjd } 1173168404Spjd } 1174168404Spjd if (buf->b_efunc) { 1175168404Spjd mutex_enter(&arc_eviction_mtx); 1176168404Spjd arc_buf_destroy(buf, 1177168404Spjd buf->b_data == stolen, FALSE); 1178168404Spjd ab->b_buf = buf->b_next; 1179168404Spjd buf->b_hdr = &arc_eviction_hdr; 1180168404Spjd buf->b_next = arc_eviction_list; 1181168404Spjd arc_eviction_list = buf; 1182168404Spjd mutex_exit(&arc_eviction_mtx); 1183168404Spjd } else { 1184168404Spjd arc_buf_destroy(buf, 1185168404Spjd buf->b_data == stolen, TRUE); 1186168404Spjd } 1187168404Spjd } 1188168404Spjd ASSERT(ab->b_datacnt == 0); 1189168404Spjd arc_change_state(evicted_state, ab, hash_lock); 1190168404Spjd ASSERT(HDR_IN_HASH_TABLE(ab)); 1191168404Spjd ab->b_flags = ARC_IN_HASH_TABLE; 1192168404Spjd DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 1193168404Spjd if (!have_lock) 1194168404Spjd mutex_exit(hash_lock); 1195168404Spjd if (bytes >= 0 && bytes_evicted >= bytes) 1196168404Spjd break; 1197168404Spjd } else { 1198168404Spjd missed += 1; 1199168404Spjd } 1200168404Spjd } 1201168404Spjd 1202168404Spjd mutex_exit(&evicted_state->arcs_mtx); 1203168404Spjd mutex_exit(&state->arcs_mtx); 1204168404Spjd 1205168404Spjd if (bytes_evicted < bytes) 1206168404Spjd dprintf("only evicted %lld bytes from %x", 1207168404Spjd (longlong_t)bytes_evicted, state); 1208168404Spjd 1209168404Spjd if (skipped) 1210168404Spjd ARCSTAT_INCR(arcstat_evict_skip, skipped); 1211168404Spjd 1212168404Spjd if (missed) 1213168404Spjd ARCSTAT_INCR(arcstat_mutex_miss, missed); 1214168404Spjd 1215168404Spjd return (stolen); 1216168404Spjd} 1217168404Spjd 1218168404Spjd/* 1219168404Spjd * Remove buffers from list until we've removed the specified number of 1220168404Spjd * bytes. Destroy the buffers that are removed. 1221168404Spjd */ 1222168404Spjdstatic void 1223168404Spjdarc_evict_ghost(arc_state_t *state, int64_t bytes) 1224168404Spjd{ 1225168404Spjd arc_buf_hdr_t *ab, *ab_prev; 1226168404Spjd kmutex_t *hash_lock; 1227168404Spjd uint64_t bytes_deleted = 0; 1228168404Spjd uint64_t bufs_skipped = 0; 1229168404Spjd 1230168404Spjd ASSERT(GHOST_STATE(state)); 1231168404Spjdtop: 1232168404Spjd mutex_enter(&state->arcs_mtx); 1233168404Spjd for (ab = list_tail(&state->arcs_list); ab; ab = ab_prev) { 1234168404Spjd ab_prev = list_prev(&state->arcs_list, ab); 1235168404Spjd hash_lock = HDR_LOCK(ab); 1236168404Spjd if (mutex_tryenter(hash_lock)) { 1237168404Spjd ASSERT(!HDR_IO_IN_PROGRESS(ab)); 1238168404Spjd ASSERT(ab->b_buf == NULL); 1239168404Spjd arc_change_state(arc_anon, ab, hash_lock); 1240168404Spjd mutex_exit(hash_lock); 1241168404Spjd ARCSTAT_BUMP(arcstat_deleted); 1242168404Spjd bytes_deleted += ab->b_size; 1243168404Spjd arc_hdr_destroy(ab); 1244168404Spjd DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 1245168404Spjd if (bytes >= 0 && bytes_deleted >= bytes) 1246168404Spjd break; 1247168404Spjd } else { 1248168404Spjd if (bytes < 0) { 1249168404Spjd mutex_exit(&state->arcs_mtx); 1250168404Spjd mutex_enter(hash_lock); 1251168404Spjd mutex_exit(hash_lock); 1252168404Spjd goto top; 1253168404Spjd } 1254168404Spjd bufs_skipped += 1; 1255168404Spjd } 1256168404Spjd } 1257168404Spjd mutex_exit(&state->arcs_mtx); 1258168404Spjd 1259168404Spjd if (bufs_skipped) { 1260168404Spjd ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); 1261168404Spjd ASSERT(bytes >= 0); 1262168404Spjd } 1263168404Spjd 1264168404Spjd if (bytes_deleted < bytes) 1265168404Spjd dprintf("only deleted %lld bytes from %p", 1266168404Spjd (longlong_t)bytes_deleted, state); 1267168404Spjd} 1268168404Spjd 1269168404Spjdstatic void 1270168404Spjdarc_adjust(void) 1271168404Spjd{ 1272168404Spjd int64_t top_sz, mru_over, arc_over, todelete; 1273168404Spjd 1274168404Spjd top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1275168404Spjd 1276168404Spjd if (top_sz > arc_p && arc_mru->arcs_lsize > 0) { 1277168404Spjd int64_t toevict = MIN(arc_mru->arcs_lsize, top_sz - arc_p); 1278168404Spjd (void) arc_evict(arc_mru, toevict, FALSE, ARC_BUFC_UNDEF); 1279168404Spjd top_sz = arc_anon->arcs_size + arc_mru->arcs_size; 1280168404Spjd } 1281168404Spjd 1282168404Spjd mru_over = top_sz + arc_mru_ghost->arcs_size - arc_c; 1283168404Spjd 1284168404Spjd if (mru_over > 0) { 1285168404Spjd if (arc_mru_ghost->arcs_lsize > 0) { 1286168404Spjd todelete = MIN(arc_mru_ghost->arcs_lsize, mru_over); 1287168404Spjd arc_evict_ghost(arc_mru_ghost, todelete); 1288168404Spjd } 1289168404Spjd } 1290168404Spjd 1291168404Spjd if ((arc_over = arc_size - arc_c) > 0) { 1292168404Spjd int64_t tbl_over; 1293168404Spjd 1294168404Spjd if (arc_mfu->arcs_lsize > 0) { 1295168404Spjd int64_t toevict = MIN(arc_mfu->arcs_lsize, arc_over); 1296168404Spjd (void) arc_evict(arc_mfu, toevict, FALSE, 1297168404Spjd ARC_BUFC_UNDEF); 1298168404Spjd } 1299168404Spjd 1300168404Spjd tbl_over = arc_size + arc_mru_ghost->arcs_lsize + 1301168404Spjd arc_mfu_ghost->arcs_lsize - arc_c*2; 1302168404Spjd 1303168404Spjd if (tbl_over > 0 && arc_mfu_ghost->arcs_lsize > 0) { 1304168404Spjd todelete = MIN(arc_mfu_ghost->arcs_lsize, tbl_over); 1305168404Spjd arc_evict_ghost(arc_mfu_ghost, todelete); 1306168404Spjd } 1307168404Spjd } 1308168404Spjd} 1309168404Spjd 1310168404Spjdstatic void 1311168404Spjdarc_do_user_evicts(void) 1312168404Spjd{ 1313168404Spjd mutex_enter(&arc_eviction_mtx); 1314168404Spjd while (arc_eviction_list != NULL) { 1315168404Spjd arc_buf_t *buf = arc_eviction_list; 1316168404Spjd arc_eviction_list = buf->b_next; 1317168404Spjd buf->b_hdr = NULL; 1318168404Spjd mutex_exit(&arc_eviction_mtx); 1319168404Spjd 1320168404Spjd if (buf->b_efunc != NULL) 1321168404Spjd VERIFY(buf->b_efunc(buf) == 0); 1322168404Spjd 1323168404Spjd buf->b_efunc = NULL; 1324168404Spjd buf->b_private = NULL; 1325168404Spjd kmem_cache_free(buf_cache, buf); 1326168404Spjd mutex_enter(&arc_eviction_mtx); 1327168404Spjd } 1328168404Spjd mutex_exit(&arc_eviction_mtx); 1329168404Spjd} 1330168404Spjd 1331168404Spjd/* 1332168404Spjd * Flush all *evictable* data from the cache. 1333168404Spjd * NOTE: this will not touch "active" (i.e. referenced) data. 1334168404Spjd */ 1335168404Spjdvoid 1336168404Spjdarc_flush(void) 1337168404Spjd{ 1338168404Spjd while (list_head(&arc_mru->arcs_list)) 1339168404Spjd (void) arc_evict(arc_mru, -1, FALSE, ARC_BUFC_UNDEF); 1340168404Spjd while (list_head(&arc_mfu->arcs_list)) 1341168404Spjd (void) arc_evict(arc_mfu, -1, FALSE, ARC_BUFC_UNDEF); 1342168404Spjd 1343168404Spjd arc_evict_ghost(arc_mru_ghost, -1); 1344168404Spjd arc_evict_ghost(arc_mfu_ghost, -1); 1345168404Spjd 1346168404Spjd mutex_enter(&arc_reclaim_thr_lock); 1347168404Spjd arc_do_user_evicts(); 1348168404Spjd mutex_exit(&arc_reclaim_thr_lock); 1349168404Spjd ASSERT(arc_eviction_list == NULL); 1350168404Spjd} 1351168404Spjd 1352168404Spjdint arc_shrink_shift = 5; /* log2(fraction of arc to reclaim) */ 1353168404Spjd 1354168404Spjdvoid 1355168404Spjdarc_shrink(void) 1356168404Spjd{ 1357168404Spjd if (arc_c > arc_c_min) { 1358168404Spjd uint64_t to_free; 1359168404Spjd 1360168404Spjd#ifdef _KERNEL 1361168404Spjd to_free = arc_c >> arc_shrink_shift; 1362168404Spjd#else 1363168404Spjd to_free = arc_c >> arc_shrink_shift; 1364168404Spjd#endif 1365168404Spjd if (arc_c > arc_c_min + to_free) 1366168404Spjd atomic_add_64(&arc_c, -to_free); 1367168404Spjd else 1368168404Spjd arc_c = arc_c_min; 1369168404Spjd 1370168404Spjd atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 1371168404Spjd if (arc_c > arc_size) 1372168404Spjd arc_c = MAX(arc_size, arc_c_min); 1373168404Spjd if (arc_p > arc_c) 1374168404Spjd arc_p = (arc_c >> 1); 1375168404Spjd ASSERT(arc_c >= arc_c_min); 1376168404Spjd ASSERT((int64_t)arc_p >= 0); 1377168404Spjd } 1378168404Spjd 1379168404Spjd if (arc_size > arc_c) 1380168404Spjd arc_adjust(); 1381168404Spjd} 1382168404Spjd 1383168404Spjdstatic int zfs_needfree = 0; 1384168404Spjd 1385168404Spjdstatic int 1386168404Spjdarc_reclaim_needed(void) 1387168404Spjd{ 1388168404Spjd#if 0 1389168404Spjd uint64_t extra; 1390168404Spjd#endif 1391168404Spjd 1392168404Spjd#ifdef _KERNEL 1393168404Spjd 1394168404Spjd if (zfs_needfree) 1395168404Spjd return (1); 1396168404Spjd 1397168404Spjd#if 0 1398168404Spjd /* 1399168404Spjd * check to make sure that swapfs has enough space so that anon 1400168404Spjd * reservations can still succeeed. anon_resvmem() checks that the 1401168404Spjd * availrmem is greater than swapfs_minfree, and the number of reserved 1402168404Spjd * swap pages. We also add a bit of extra here just to prevent 1403168404Spjd * circumstances from getting really dire. 1404168404Spjd */ 1405168404Spjd if (availrmem < swapfs_minfree + swapfs_reserve + extra) 1406168404Spjd return (1); 1407168404Spjd 1408168404Spjd /* 1409168404Spjd * If zio data pages are being allocated out of a separate heap segment, 1410168404Spjd * then check that the size of available vmem for this area remains 1411168404Spjd * above 1/4th free. This needs to be done when the size of the 1412168404Spjd * non-default segment is smaller than physical memory, so we could 1413168404Spjd * conceivably run out of VA in that segment before running out of 1414168404Spjd * physical memory. 1415168404Spjd */ 1416168404Spjd if (zio_arena != NULL) { 1417168404Spjd size_t arc_ziosize = 1418168404Spjd btop(vmem_size(zio_arena, VMEM_FREE | VMEM_ALLOC)); 1419168404Spjd 1420168404Spjd if ((physmem > arc_ziosize) && 1421168404Spjd (btop(vmem_size(zio_arena, VMEM_FREE)) < arc_ziosize >> 2)) 1422168404Spjd return (1); 1423168404Spjd } 1424168404Spjd 1425168404Spjd#if defined(__i386) 1426168404Spjd /* 1427168404Spjd * If we're on an i386 platform, it's possible that we'll exhaust the 1428168404Spjd * kernel heap space before we ever run out of available physical 1429168404Spjd * memory. Most checks of the size of the heap_area compare against 1430168404Spjd * tune.t_minarmem, which is the minimum available real memory that we 1431168404Spjd * can have in the system. However, this is generally fixed at 25 pages 1432168404Spjd * which is so low that it's useless. In this comparison, we seek to 1433168404Spjd * calculate the total heap-size, and reclaim if more than 3/4ths of the 1434168404Spjd * heap is allocated. (Or, in the caclulation, if less than 1/4th is 1435168404Spjd * free) 1436168404Spjd */ 1437168404Spjd if (btop(vmem_size(heap_arena, VMEM_FREE)) < 1438168404Spjd (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) 1439168404Spjd return (1); 1440168404Spjd#endif 1441168404Spjd#else 1442168566Spjd if (kmem_used() > kmem_size() / 2) 1443168404Spjd return (1); 1444168404Spjd#endif 1445168404Spjd 1446168404Spjd#else 1447168404Spjd if (spa_get_random(100) == 0) 1448168404Spjd return (1); 1449168404Spjd#endif 1450168404Spjd return (0); 1451168404Spjd} 1452168404Spjd 1453168404Spjdstatic void 1454168404Spjdarc_kmem_reap_now(arc_reclaim_strategy_t strat) 1455168404Spjd{ 1456168404Spjd#ifdef ZIO_USE_UMA 1457168404Spjd size_t i; 1458168404Spjd kmem_cache_t *prev_cache = NULL; 1459168404Spjd kmem_cache_t *prev_data_cache = NULL; 1460168404Spjd extern kmem_cache_t *zio_buf_cache[]; 1461168404Spjd extern kmem_cache_t *zio_data_buf_cache[]; 1462168404Spjd#endif 1463168404Spjd 1464168404Spjd#ifdef _KERNEL 1465168404Spjd /* 1466168404Spjd * First purge some DNLC entries, in case the DNLC is using 1467168404Spjd * up too much memory. 1468168404Spjd */ 1469168404Spjd dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 1470168404Spjd 1471168404Spjd#if defined(__i386) 1472168404Spjd /* 1473168404Spjd * Reclaim unused memory from all kmem caches. 1474168404Spjd */ 1475168404Spjd kmem_reap(); 1476168404Spjd#endif 1477168404Spjd#endif 1478168404Spjd 1479168404Spjd /* 1480168404Spjd * An agressive reclamation will shrink the cache size as well as 1481168404Spjd * reap free buffers from the arc kmem caches. 1482168404Spjd */ 1483168404Spjd if (strat == ARC_RECLAIM_AGGR) 1484168404Spjd arc_shrink(); 1485168404Spjd 1486168404Spjd#ifdef ZIO_USE_UMA 1487168404Spjd for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 1488168404Spjd if (zio_buf_cache[i] != prev_cache) { 1489168404Spjd prev_cache = zio_buf_cache[i]; 1490168404Spjd kmem_cache_reap_now(zio_buf_cache[i]); 1491168404Spjd } 1492168404Spjd if (zio_data_buf_cache[i] != prev_data_cache) { 1493168404Spjd prev_data_cache = zio_data_buf_cache[i]; 1494168404Spjd kmem_cache_reap_now(zio_data_buf_cache[i]); 1495168404Spjd } 1496168404Spjd } 1497168404Spjd#endif 1498168404Spjd kmem_cache_reap_now(buf_cache); 1499168404Spjd kmem_cache_reap_now(hdr_cache); 1500168404Spjd} 1501168404Spjd 1502168404Spjdstatic void 1503168404Spjdarc_reclaim_thread(void *dummy __unused) 1504168404Spjd{ 1505168404Spjd clock_t growtime = 0; 1506168404Spjd arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 1507168404Spjd callb_cpr_t cpr; 1508168404Spjd 1509168404Spjd CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 1510168404Spjd 1511168404Spjd mutex_enter(&arc_reclaim_thr_lock); 1512168404Spjd while (arc_thread_exit == 0) { 1513168404Spjd if (arc_reclaim_needed()) { 1514168404Spjd 1515168404Spjd if (arc_no_grow) { 1516168404Spjd if (last_reclaim == ARC_RECLAIM_CONS) { 1517168404Spjd last_reclaim = ARC_RECLAIM_AGGR; 1518168404Spjd } else { 1519168404Spjd last_reclaim = ARC_RECLAIM_CONS; 1520168404Spjd } 1521168404Spjd } else { 1522168404Spjd arc_no_grow = TRUE; 1523168404Spjd last_reclaim = ARC_RECLAIM_AGGR; 1524168404Spjd membar_producer(); 1525168404Spjd } 1526168404Spjd 1527168404Spjd /* reset the growth delay for every reclaim */ 1528168404Spjd growtime = lbolt + (arc_grow_retry * hz); 1529168404Spjd ASSERT(growtime > 0); 1530168404Spjd 1531168404Spjd if (zfs_needfree && last_reclaim == ARC_RECLAIM_CONS) { 1532168404Spjd /* 1533168404Spjd * If zfs_needfree is TRUE our vm_lowmem hook 1534168404Spjd * was called and in that case we must free some 1535168404Spjd * memory, so switch to aggressive mode. 1536168404Spjd */ 1537168404Spjd arc_no_grow = TRUE; 1538168404Spjd last_reclaim = ARC_RECLAIM_AGGR; 1539168404Spjd } 1540168404Spjd arc_kmem_reap_now(last_reclaim); 1541168404Spjd } else if ((growtime > 0) && ((growtime - lbolt) <= 0)) { 1542168404Spjd arc_no_grow = FALSE; 1543168404Spjd } 1544168404Spjd 1545168404Spjd if (zfs_needfree || 1546168404Spjd (2 * arc_c < arc_size + 1547168404Spjd arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size)) 1548168404Spjd arc_adjust(); 1549168404Spjd 1550168404Spjd if (arc_eviction_list != NULL) 1551168404Spjd arc_do_user_evicts(); 1552168404Spjd 1553168404Spjd if (arc_reclaim_needed()) { 1554168404Spjd zfs_needfree = 0; 1555168404Spjd#ifdef _KERNEL 1556168404Spjd wakeup(&zfs_needfree); 1557168404Spjd#endif 1558168404Spjd } 1559168404Spjd 1560168404Spjd /* block until needed, or one second, whichever is shorter */ 1561168404Spjd CALLB_CPR_SAFE_BEGIN(&cpr); 1562168404Spjd (void) cv_timedwait(&arc_reclaim_thr_cv, 1563168404Spjd &arc_reclaim_thr_lock, hz); 1564168404Spjd CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 1565168404Spjd } 1566168404Spjd 1567168404Spjd arc_thread_exit = 0; 1568168404Spjd cv_broadcast(&arc_reclaim_thr_cv); 1569168404Spjd CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 1570168404Spjd thread_exit(); 1571168404Spjd} 1572168404Spjd 1573168404Spjd/* 1574168404Spjd * Adapt arc info given the number of bytes we are trying to add and 1575168404Spjd * the state that we are comming from. This function is only called 1576168404Spjd * when we are adding new content to the cache. 1577168404Spjd */ 1578168404Spjdstatic void 1579168404Spjdarc_adapt(int bytes, arc_state_t *state) 1580168404Spjd{ 1581168404Spjd int mult; 1582168404Spjd 1583168404Spjd ASSERT(bytes > 0); 1584168404Spjd /* 1585168404Spjd * Adapt the target size of the MRU list: 1586168404Spjd * - if we just hit in the MRU ghost list, then increase 1587168404Spjd * the target size of the MRU list. 1588168404Spjd * - if we just hit in the MFU ghost list, then increase 1589168404Spjd * the target size of the MFU list by decreasing the 1590168404Spjd * target size of the MRU list. 1591168404Spjd */ 1592168404Spjd if (state == arc_mru_ghost) { 1593168404Spjd mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? 1594168404Spjd 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); 1595168404Spjd 1596168404Spjd arc_p = MIN(arc_c, arc_p + bytes * mult); 1597168404Spjd } else if (state == arc_mfu_ghost) { 1598168404Spjd mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? 1599168404Spjd 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); 1600168404Spjd 1601168404Spjd arc_p = MAX(0, (int64_t)arc_p - bytes * mult); 1602168404Spjd } 1603168404Spjd ASSERT((int64_t)arc_p >= 0); 1604168404Spjd 1605168404Spjd if (arc_reclaim_needed()) { 1606168404Spjd cv_signal(&arc_reclaim_thr_cv); 1607168404Spjd return; 1608168404Spjd } 1609168404Spjd 1610168404Spjd if (arc_no_grow) 1611168404Spjd return; 1612168404Spjd 1613168404Spjd if (arc_c >= arc_c_max) 1614168404Spjd return; 1615168404Spjd 1616168404Spjd /* 1617168404Spjd * If we're within (2 * maxblocksize) bytes of the target 1618168404Spjd * cache size, increment the target cache size 1619168404Spjd */ 1620168404Spjd if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 1621168404Spjd atomic_add_64(&arc_c, (int64_t)bytes); 1622168404Spjd if (arc_c > arc_c_max) 1623168404Spjd arc_c = arc_c_max; 1624168404Spjd else if (state == arc_anon) 1625168404Spjd atomic_add_64(&arc_p, (int64_t)bytes); 1626168404Spjd if (arc_p > arc_c) 1627168404Spjd arc_p = arc_c; 1628168404Spjd } 1629168404Spjd ASSERT((int64_t)arc_p >= 0); 1630168404Spjd} 1631168404Spjd 1632168404Spjd/* 1633168404Spjd * Check if the cache has reached its limits and eviction is required 1634168404Spjd * prior to insert. 1635168404Spjd */ 1636168404Spjdstatic int 1637168404Spjdarc_evict_needed() 1638168404Spjd{ 1639168404Spjd if (arc_reclaim_needed()) 1640168404Spjd return (1); 1641168404Spjd 1642168404Spjd return (arc_size > arc_c); 1643168404Spjd} 1644168404Spjd 1645168404Spjd/* 1646168404Spjd * The buffer, supplied as the first argument, needs a data block. 1647168404Spjd * So, if we are at cache max, determine which cache should be victimized. 1648168404Spjd * We have the following cases: 1649168404Spjd * 1650168404Spjd * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> 1651168404Spjd * In this situation if we're out of space, but the resident size of the MFU is 1652168404Spjd * under the limit, victimize the MFU cache to satisfy this insertion request. 1653168404Spjd * 1654168404Spjd * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> 1655168404Spjd * Here, we've used up all of the available space for the MRU, so we need to 1656168404Spjd * evict from our own cache instead. Evict from the set of resident MRU 1657168404Spjd * entries. 1658168404Spjd * 1659168404Spjd * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> 1660168404Spjd * c minus p represents the MFU space in the cache, since p is the size of the 1661168404Spjd * cache that is dedicated to the MRU. In this situation there's still space on 1662168404Spjd * the MFU side, so the MRU side needs to be victimized. 1663168404Spjd * 1664168404Spjd * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> 1665168404Spjd * MFU's resident set is consuming more space than it has been allotted. In 1666168404Spjd * this situation, we must victimize our own cache, the MFU, for this insertion. 1667168404Spjd */ 1668168404Spjdstatic void 1669168404Spjdarc_get_data_buf(arc_buf_t *buf) 1670168404Spjd{ 1671168404Spjd arc_state_t *state = buf->b_hdr->b_state; 1672168404Spjd uint64_t size = buf->b_hdr->b_size; 1673168404Spjd arc_buf_contents_t type = buf->b_hdr->b_type; 1674168404Spjd 1675168404Spjd arc_adapt(size, state); 1676168404Spjd 1677168404Spjd /* 1678168404Spjd * We have not yet reached cache maximum size, 1679168404Spjd * just allocate a new buffer. 1680168404Spjd */ 1681168404Spjd if (!arc_evict_needed()) { 1682168404Spjd if (type == ARC_BUFC_METADATA) { 1683168404Spjd buf->b_data = zio_buf_alloc(size); 1684168404Spjd } else { 1685168404Spjd ASSERT(type == ARC_BUFC_DATA); 1686168404Spjd buf->b_data = zio_data_buf_alloc(size); 1687168404Spjd } 1688168404Spjd atomic_add_64(&arc_size, size); 1689168404Spjd goto out; 1690168404Spjd } 1691168404Spjd 1692168404Spjd /* 1693168404Spjd * If we are prefetching from the mfu ghost list, this buffer 1694168404Spjd * will end up on the mru list; so steal space from there. 1695168404Spjd */ 1696168404Spjd if (state == arc_mfu_ghost) 1697168404Spjd state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; 1698168404Spjd else if (state == arc_mru_ghost) 1699168404Spjd state = arc_mru; 1700168404Spjd 1701168404Spjd if (state == arc_mru || state == arc_anon) { 1702168404Spjd uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; 1703168404Spjd state = (arc_p > mru_used) ? arc_mfu : arc_mru; 1704168404Spjd } else { 1705168404Spjd /* MFU cases */ 1706168404Spjd uint64_t mfu_space = arc_c - arc_p; 1707168404Spjd state = (mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; 1708168404Spjd } 1709168404Spjd if ((buf->b_data = arc_evict(state, size, TRUE, type)) == NULL) { 1710168404Spjd if (type == ARC_BUFC_METADATA) { 1711168404Spjd buf->b_data = zio_buf_alloc(size); 1712168404Spjd } else { 1713168404Spjd ASSERT(type == ARC_BUFC_DATA); 1714168404Spjd buf->b_data = zio_data_buf_alloc(size); 1715168404Spjd } 1716168404Spjd atomic_add_64(&arc_size, size); 1717168404Spjd ARCSTAT_BUMP(arcstat_recycle_miss); 1718168404Spjd } 1719168404Spjd ASSERT(buf->b_data != NULL); 1720168404Spjdout: 1721168404Spjd /* 1722168404Spjd * Update the state size. Note that ghost states have a 1723168404Spjd * "ghost size" and so don't need to be updated. 1724168404Spjd */ 1725168404Spjd if (!GHOST_STATE(buf->b_hdr->b_state)) { 1726168404Spjd arc_buf_hdr_t *hdr = buf->b_hdr; 1727168404Spjd 1728168404Spjd atomic_add_64(&hdr->b_state->arcs_size, size); 1729168404Spjd if (list_link_active(&hdr->b_arc_node)) { 1730168404Spjd ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1731168404Spjd atomic_add_64(&hdr->b_state->arcs_lsize, size); 1732168404Spjd } 1733168404Spjd /* 1734168404Spjd * If we are growing the cache, and we are adding anonymous 1735168404Spjd * data, and we have outgrown arc_p, update arc_p 1736168404Spjd */ 1737168404Spjd if (arc_size < arc_c && hdr->b_state == arc_anon && 1738168404Spjd arc_anon->arcs_size + arc_mru->arcs_size > arc_p) 1739168404Spjd arc_p = MIN(arc_c, arc_p + size); 1740168404Spjd } 1741168404Spjd} 1742168404Spjd 1743168404Spjd/* 1744168404Spjd * This routine is called whenever a buffer is accessed. 1745168404Spjd * NOTE: the hash lock is dropped in this function. 1746168404Spjd */ 1747168404Spjdstatic void 1748168404Spjdarc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 1749168404Spjd{ 1750168404Spjd ASSERT(MUTEX_HELD(hash_lock)); 1751168404Spjd 1752168404Spjd if (buf->b_state == arc_anon) { 1753168404Spjd /* 1754168404Spjd * This buffer is not in the cache, and does not 1755168404Spjd * appear in our "ghost" list. Add the new buffer 1756168404Spjd * to the MRU state. 1757168404Spjd */ 1758168404Spjd 1759168404Spjd ASSERT(buf->b_arc_access == 0); 1760168404Spjd buf->b_arc_access = lbolt; 1761168404Spjd DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 1762168404Spjd arc_change_state(arc_mru, buf, hash_lock); 1763168404Spjd 1764168404Spjd } else if (buf->b_state == arc_mru) { 1765168404Spjd /* 1766168404Spjd * If this buffer is here because of a prefetch, then either: 1767168404Spjd * - clear the flag if this is a "referencing" read 1768168404Spjd * (any subsequent access will bump this into the MFU state). 1769168404Spjd * or 1770168404Spjd * - move the buffer to the head of the list if this is 1771168404Spjd * another prefetch (to make it less likely to be evicted). 1772168404Spjd */ 1773168404Spjd if ((buf->b_flags & ARC_PREFETCH) != 0) { 1774168404Spjd if (refcount_count(&buf->b_refcnt) == 0) { 1775168404Spjd ASSERT(list_link_active(&buf->b_arc_node)); 1776168404Spjd mutex_enter(&arc_mru->arcs_mtx); 1777168404Spjd list_remove(&arc_mru->arcs_list, buf); 1778168404Spjd list_insert_head(&arc_mru->arcs_list, buf); 1779168404Spjd mutex_exit(&arc_mru->arcs_mtx); 1780168404Spjd } else { 1781168404Spjd buf->b_flags &= ~ARC_PREFETCH; 1782168404Spjd ARCSTAT_BUMP(arcstat_mru_hits); 1783168404Spjd } 1784168404Spjd buf->b_arc_access = lbolt; 1785168404Spjd return; 1786168404Spjd } 1787168404Spjd 1788168404Spjd /* 1789168404Spjd * This buffer has been "accessed" only once so far, 1790168404Spjd * but it is still in the cache. Move it to the MFU 1791168404Spjd * state. 1792168404Spjd */ 1793168404Spjd if (lbolt > buf->b_arc_access + ARC_MINTIME) { 1794168404Spjd /* 1795168404Spjd * More than 125ms have passed since we 1796168404Spjd * instantiated this buffer. Move it to the 1797168404Spjd * most frequently used state. 1798168404Spjd */ 1799168404Spjd buf->b_arc_access = lbolt; 1800168404Spjd DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 1801168404Spjd arc_change_state(arc_mfu, buf, hash_lock); 1802168404Spjd } 1803168404Spjd ARCSTAT_BUMP(arcstat_mru_hits); 1804168404Spjd } else if (buf->b_state == arc_mru_ghost) { 1805168404Spjd arc_state_t *new_state; 1806168404Spjd /* 1807168404Spjd * This buffer has been "accessed" recently, but 1808168404Spjd * was evicted from the cache. Move it to the 1809168404Spjd * MFU state. 1810168404Spjd */ 1811168404Spjd 1812168404Spjd if (buf->b_flags & ARC_PREFETCH) { 1813168404Spjd new_state = arc_mru; 1814168404Spjd if (refcount_count(&buf->b_refcnt) > 0) 1815168404Spjd buf->b_flags &= ~ARC_PREFETCH; 1816168404Spjd DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 1817168404Spjd } else { 1818168404Spjd new_state = arc_mfu; 1819168404Spjd DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 1820168404Spjd } 1821168404Spjd 1822168404Spjd buf->b_arc_access = lbolt; 1823168404Spjd arc_change_state(new_state, buf, hash_lock); 1824168404Spjd 1825168404Spjd ARCSTAT_BUMP(arcstat_mru_ghost_hits); 1826168404Spjd } else if (buf->b_state == arc_mfu) { 1827168404Spjd /* 1828168404Spjd * This buffer has been accessed more than once and is 1829168404Spjd * still in the cache. Keep it in the MFU state. 1830168404Spjd * 1831168404Spjd * NOTE: an add_reference() that occurred when we did 1832168404Spjd * the arc_read() will have kicked this off the list. 1833168404Spjd * If it was a prefetch, we will explicitly move it to 1834168404Spjd * the head of the list now. 1835168404Spjd */ 1836168404Spjd if ((buf->b_flags & ARC_PREFETCH) != 0) { 1837168404Spjd ASSERT(refcount_count(&buf->b_refcnt) == 0); 1838168404Spjd ASSERT(list_link_active(&buf->b_arc_node)); 1839168404Spjd mutex_enter(&arc_mfu->arcs_mtx); 1840168404Spjd list_remove(&arc_mfu->arcs_list, buf); 1841168404Spjd list_insert_head(&arc_mfu->arcs_list, buf); 1842168404Spjd mutex_exit(&arc_mfu->arcs_mtx); 1843168404Spjd } 1844168404Spjd ARCSTAT_BUMP(arcstat_mfu_hits); 1845168404Spjd buf->b_arc_access = lbolt; 1846168404Spjd } else if (buf->b_state == arc_mfu_ghost) { 1847168404Spjd arc_state_t *new_state = arc_mfu; 1848168404Spjd /* 1849168404Spjd * This buffer has been accessed more than once but has 1850168404Spjd * been evicted from the cache. Move it back to the 1851168404Spjd * MFU state. 1852168404Spjd */ 1853168404Spjd 1854168404Spjd if (buf->b_flags & ARC_PREFETCH) { 1855168404Spjd /* 1856168404Spjd * This is a prefetch access... 1857168404Spjd * move this block back to the MRU state. 1858168404Spjd */ 1859168404Spjd ASSERT3U(refcount_count(&buf->b_refcnt), ==, 0); 1860168404Spjd new_state = arc_mru; 1861168404Spjd } 1862168404Spjd 1863168404Spjd buf->b_arc_access = lbolt; 1864168404Spjd DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 1865168404Spjd arc_change_state(new_state, buf, hash_lock); 1866168404Spjd 1867168404Spjd ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 1868168404Spjd } else { 1869168404Spjd ASSERT(!"invalid arc state"); 1870168404Spjd } 1871168404Spjd} 1872168404Spjd 1873168404Spjd/* a generic arc_done_func_t which you can use */ 1874168404Spjd/* ARGSUSED */ 1875168404Spjdvoid 1876168404Spjdarc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 1877168404Spjd{ 1878168404Spjd bcopy(buf->b_data, arg, buf->b_hdr->b_size); 1879168404Spjd VERIFY(arc_buf_remove_ref(buf, arg) == 1); 1880168404Spjd} 1881168404Spjd 1882168404Spjd/* a generic arc_done_func_t which you can use */ 1883168404Spjdvoid 1884168404Spjdarc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 1885168404Spjd{ 1886168404Spjd arc_buf_t **bufp = arg; 1887168404Spjd if (zio && zio->io_error) { 1888168404Spjd VERIFY(arc_buf_remove_ref(buf, arg) == 1); 1889168404Spjd *bufp = NULL; 1890168404Spjd } else { 1891168404Spjd *bufp = buf; 1892168404Spjd } 1893168404Spjd} 1894168404Spjd 1895168404Spjdstatic void 1896168404Spjdarc_read_done(zio_t *zio) 1897168404Spjd{ 1898168404Spjd arc_buf_hdr_t *hdr, *found; 1899168404Spjd arc_buf_t *buf; 1900168404Spjd arc_buf_t *abuf; /* buffer we're assigning to callback */ 1901168404Spjd kmutex_t *hash_lock; 1902168404Spjd arc_callback_t *callback_list, *acb; 1903168404Spjd int freeable = FALSE; 1904168404Spjd 1905168404Spjd buf = zio->io_private; 1906168404Spjd hdr = buf->b_hdr; 1907168404Spjd 1908168404Spjd /* 1909168404Spjd * The hdr was inserted into hash-table and removed from lists 1910168404Spjd * prior to starting I/O. We should find this header, since 1911168404Spjd * it's in the hash table, and it should be legit since it's 1912168404Spjd * not possible to evict it during the I/O. The only possible 1913168404Spjd * reason for it not to be found is if we were freed during the 1914168404Spjd * read. 1915168404Spjd */ 1916168404Spjd found = buf_hash_find(zio->io_spa, &hdr->b_dva, hdr->b_birth, 1917168404Spjd &hash_lock); 1918168404Spjd 1919168404Spjd ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || 1920168404Spjd (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp)))); 1921168404Spjd 1922168404Spjd /* byteswap if necessary */ 1923168404Spjd callback_list = hdr->b_acb; 1924168404Spjd ASSERT(callback_list != NULL); 1925168404Spjd if (BP_SHOULD_BYTESWAP(zio->io_bp) && callback_list->acb_byteswap) 1926168404Spjd callback_list->acb_byteswap(buf->b_data, hdr->b_size); 1927168404Spjd 1928168404Spjd arc_cksum_compute(buf); 1929168404Spjd 1930168404Spjd /* create copies of the data buffer for the callers */ 1931168404Spjd abuf = buf; 1932168404Spjd for (acb = callback_list; acb; acb = acb->acb_next) { 1933168404Spjd if (acb->acb_done) { 1934168404Spjd if (abuf == NULL) 1935168404Spjd abuf = arc_buf_clone(buf); 1936168404Spjd acb->acb_buf = abuf; 1937168404Spjd abuf = NULL; 1938168404Spjd } 1939168404Spjd } 1940168404Spjd hdr->b_acb = NULL; 1941168404Spjd hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 1942168404Spjd ASSERT(!HDR_BUF_AVAILABLE(hdr)); 1943168404Spjd if (abuf == buf) 1944168404Spjd hdr->b_flags |= ARC_BUF_AVAILABLE; 1945168404Spjd 1946168404Spjd ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 1947168404Spjd 1948168404Spjd if (zio->io_error != 0) { 1949168404Spjd hdr->b_flags |= ARC_IO_ERROR; 1950168404Spjd if (hdr->b_state != arc_anon) 1951168404Spjd arc_change_state(arc_anon, hdr, hash_lock); 1952168404Spjd if (HDR_IN_HASH_TABLE(hdr)) 1953168404Spjd buf_hash_remove(hdr); 1954168404Spjd freeable = refcount_is_zero(&hdr->b_refcnt); 1955168404Spjd /* convert checksum errors into IO errors */ 1956168404Spjd if (zio->io_error == ECKSUM) 1957168404Spjd zio->io_error = EIO; 1958168404Spjd } 1959168404Spjd 1960168404Spjd /* 1961168404Spjd * Broadcast before we drop the hash_lock to avoid the possibility 1962168404Spjd * that the hdr (and hence the cv) might be freed before we get to 1963168404Spjd * the cv_broadcast(). 1964168404Spjd */ 1965168404Spjd cv_broadcast(&hdr->b_cv); 1966168404Spjd 1967168404Spjd if (hash_lock) { 1968168404Spjd /* 1969168404Spjd * Only call arc_access on anonymous buffers. This is because 1970168404Spjd * if we've issued an I/O for an evicted buffer, we've already 1971168404Spjd * called arc_access (to prevent any simultaneous readers from 1972168404Spjd * getting confused). 1973168404Spjd */ 1974168404Spjd if (zio->io_error == 0 && hdr->b_state == arc_anon) 1975168404Spjd arc_access(hdr, hash_lock); 1976168404Spjd mutex_exit(hash_lock); 1977168404Spjd } else { 1978168404Spjd /* 1979168404Spjd * This block was freed while we waited for the read to 1980168404Spjd * complete. It has been removed from the hash table and 1981168404Spjd * moved to the anonymous state (so that it won't show up 1982168404Spjd * in the cache). 1983168404Spjd */ 1984168404Spjd ASSERT3P(hdr->b_state, ==, arc_anon); 1985168404Spjd freeable = refcount_is_zero(&hdr->b_refcnt); 1986168404Spjd } 1987168404Spjd 1988168404Spjd /* execute each callback and free its structure */ 1989168404Spjd while ((acb = callback_list) != NULL) { 1990168404Spjd if (acb->acb_done) 1991168404Spjd acb->acb_done(zio, acb->acb_buf, acb->acb_private); 1992168404Spjd 1993168404Spjd if (acb->acb_zio_dummy != NULL) { 1994168404Spjd acb->acb_zio_dummy->io_error = zio->io_error; 1995168404Spjd zio_nowait(acb->acb_zio_dummy); 1996168404Spjd } 1997168404Spjd 1998168404Spjd callback_list = acb->acb_next; 1999168404Spjd kmem_free(acb, sizeof (arc_callback_t)); 2000168404Spjd } 2001168404Spjd 2002168404Spjd if (freeable) 2003168404Spjd arc_hdr_destroy(hdr); 2004168404Spjd} 2005168404Spjd 2006168404Spjd/* 2007168404Spjd * "Read" the block block at the specified DVA (in bp) via the 2008168404Spjd * cache. If the block is found in the cache, invoke the provided 2009168404Spjd * callback immediately and return. Note that the `zio' parameter 2010168404Spjd * in the callback will be NULL in this case, since no IO was 2011168404Spjd * required. If the block is not in the cache pass the read request 2012168404Spjd * on to the spa with a substitute callback function, so that the 2013168404Spjd * requested block will be added to the cache. 2014168404Spjd * 2015168404Spjd * If a read request arrives for a block that has a read in-progress, 2016168404Spjd * either wait for the in-progress read to complete (and return the 2017168404Spjd * results); or, if this is a read with a "done" func, add a record 2018168404Spjd * to the read to invoke the "done" func when the read completes, 2019168404Spjd * and return; or just return. 2020168404Spjd * 2021168404Spjd * arc_read_done() will invoke all the requested "done" functions 2022168404Spjd * for readers of this block. 2023168404Spjd */ 2024168404Spjdint 2025168404Spjdarc_read(zio_t *pio, spa_t *spa, blkptr_t *bp, arc_byteswap_func_t *swap, 2026168404Spjd arc_done_func_t *done, void *private, int priority, int flags, 2027168404Spjd uint32_t *arc_flags, zbookmark_t *zb) 2028168404Spjd{ 2029168404Spjd arc_buf_hdr_t *hdr; 2030168404Spjd arc_buf_t *buf; 2031168404Spjd kmutex_t *hash_lock; 2032168404Spjd zio_t *rzio; 2033168404Spjd 2034168404Spjdtop: 2035168404Spjd hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 2036168404Spjd if (hdr && hdr->b_datacnt > 0) { 2037168404Spjd 2038168404Spjd *arc_flags |= ARC_CACHED; 2039168404Spjd 2040168404Spjd if (HDR_IO_IN_PROGRESS(hdr)) { 2041168404Spjd 2042168404Spjd if (*arc_flags & ARC_WAIT) { 2043168404Spjd cv_wait(&hdr->b_cv, hash_lock); 2044168404Spjd mutex_exit(hash_lock); 2045168404Spjd goto top; 2046168404Spjd } 2047168404Spjd ASSERT(*arc_flags & ARC_NOWAIT); 2048168404Spjd 2049168404Spjd if (done) { 2050168404Spjd arc_callback_t *acb = NULL; 2051168404Spjd 2052168404Spjd acb = kmem_zalloc(sizeof (arc_callback_t), 2053168404Spjd KM_SLEEP); 2054168404Spjd acb->acb_done = done; 2055168404Spjd acb->acb_private = private; 2056168404Spjd acb->acb_byteswap = swap; 2057168404Spjd if (pio != NULL) 2058168404Spjd acb->acb_zio_dummy = zio_null(pio, 2059168404Spjd spa, NULL, NULL, flags); 2060168404Spjd 2061168404Spjd ASSERT(acb->acb_done != NULL); 2062168404Spjd acb->acb_next = hdr->b_acb; 2063168404Spjd hdr->b_acb = acb; 2064168404Spjd add_reference(hdr, hash_lock, private); 2065168404Spjd mutex_exit(hash_lock); 2066168404Spjd return (0); 2067168404Spjd } 2068168404Spjd mutex_exit(hash_lock); 2069168404Spjd return (0); 2070168404Spjd } 2071168404Spjd 2072168404Spjd ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2073168404Spjd 2074168404Spjd if (done) { 2075168404Spjd add_reference(hdr, hash_lock, private); 2076168404Spjd /* 2077168404Spjd * If this block is already in use, create a new 2078168404Spjd * copy of the data so that we will be guaranteed 2079168404Spjd * that arc_release() will always succeed. 2080168404Spjd */ 2081168404Spjd buf = hdr->b_buf; 2082168404Spjd ASSERT(buf); 2083168404Spjd ASSERT(buf->b_data); 2084168404Spjd if (HDR_BUF_AVAILABLE(hdr)) { 2085168404Spjd ASSERT(buf->b_efunc == NULL); 2086168404Spjd hdr->b_flags &= ~ARC_BUF_AVAILABLE; 2087168404Spjd } else { 2088168404Spjd buf = arc_buf_clone(buf); 2089168404Spjd } 2090168404Spjd } else if (*arc_flags & ARC_PREFETCH && 2091168404Spjd refcount_count(&hdr->b_refcnt) == 0) { 2092168404Spjd hdr->b_flags |= ARC_PREFETCH; 2093168404Spjd } 2094168404Spjd DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 2095168404Spjd arc_access(hdr, hash_lock); 2096168404Spjd mutex_exit(hash_lock); 2097168404Spjd ARCSTAT_BUMP(arcstat_hits); 2098168404Spjd ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2099168404Spjd demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2100168404Spjd data, metadata, hits); 2101168404Spjd 2102168404Spjd if (done) 2103168404Spjd done(NULL, buf, private); 2104168404Spjd } else { 2105168404Spjd uint64_t size = BP_GET_LSIZE(bp); 2106168404Spjd arc_callback_t *acb; 2107168404Spjd 2108168404Spjd if (hdr == NULL) { 2109168404Spjd /* this block is not in the cache */ 2110168404Spjd arc_buf_hdr_t *exists; 2111168404Spjd arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 2112168404Spjd buf = arc_buf_alloc(spa, size, private, type); 2113168404Spjd hdr = buf->b_hdr; 2114168404Spjd hdr->b_dva = *BP_IDENTITY(bp); 2115168404Spjd hdr->b_birth = bp->blk_birth; 2116168404Spjd hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 2117168404Spjd exists = buf_hash_insert(hdr, &hash_lock); 2118168404Spjd if (exists) { 2119168404Spjd /* somebody beat us to the hash insert */ 2120168404Spjd mutex_exit(hash_lock); 2121168404Spjd bzero(&hdr->b_dva, sizeof (dva_t)); 2122168404Spjd hdr->b_birth = 0; 2123168404Spjd hdr->b_cksum0 = 0; 2124168404Spjd (void) arc_buf_remove_ref(buf, private); 2125168404Spjd goto top; /* restart the IO request */ 2126168404Spjd } 2127168404Spjd /* if this is a prefetch, we don't have a reference */ 2128168404Spjd if (*arc_flags & ARC_PREFETCH) { 2129168404Spjd (void) remove_reference(hdr, hash_lock, 2130168404Spjd private); 2131168404Spjd hdr->b_flags |= ARC_PREFETCH; 2132168404Spjd } 2133168404Spjd if (BP_GET_LEVEL(bp) > 0) 2134168404Spjd hdr->b_flags |= ARC_INDIRECT; 2135168404Spjd } else { 2136168404Spjd /* this block is in the ghost cache */ 2137168404Spjd ASSERT(GHOST_STATE(hdr->b_state)); 2138168404Spjd ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2139168404Spjd ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 0); 2140168404Spjd ASSERT(hdr->b_buf == NULL); 2141168404Spjd 2142168404Spjd /* if this is a prefetch, we don't have a reference */ 2143168404Spjd if (*arc_flags & ARC_PREFETCH) 2144168404Spjd hdr->b_flags |= ARC_PREFETCH; 2145168404Spjd else 2146168404Spjd add_reference(hdr, hash_lock, private); 2147168404Spjd buf = kmem_cache_alloc(buf_cache, KM_SLEEP); 2148168404Spjd buf->b_hdr = hdr; 2149168404Spjd buf->b_data = NULL; 2150168404Spjd buf->b_efunc = NULL; 2151168404Spjd buf->b_private = NULL; 2152168404Spjd buf->b_next = NULL; 2153168404Spjd hdr->b_buf = buf; 2154168404Spjd arc_get_data_buf(buf); 2155168404Spjd ASSERT(hdr->b_datacnt == 0); 2156168404Spjd hdr->b_datacnt = 1; 2157168404Spjd 2158168404Spjd } 2159168404Spjd 2160168404Spjd acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 2161168404Spjd acb->acb_done = done; 2162168404Spjd acb->acb_private = private; 2163168404Spjd acb->acb_byteswap = swap; 2164168404Spjd 2165168404Spjd ASSERT(hdr->b_acb == NULL); 2166168404Spjd hdr->b_acb = acb; 2167168404Spjd hdr->b_flags |= ARC_IO_IN_PROGRESS; 2168168404Spjd 2169168404Spjd /* 2170168404Spjd * If the buffer has been evicted, migrate it to a present state 2171168404Spjd * before issuing the I/O. Once we drop the hash-table lock, 2172168404Spjd * the header will be marked as I/O in progress and have an 2173168404Spjd * attached buffer. At this point, anybody who finds this 2174168404Spjd * buffer ought to notice that it's legit but has a pending I/O. 2175168404Spjd */ 2176168404Spjd 2177168404Spjd if (GHOST_STATE(hdr->b_state)) 2178168404Spjd arc_access(hdr, hash_lock); 2179168404Spjd mutex_exit(hash_lock); 2180168404Spjd 2181168404Spjd ASSERT3U(hdr->b_size, ==, size); 2182168404Spjd DTRACE_PROBE3(arc__miss, blkptr_t *, bp, uint64_t, size, 2183168404Spjd zbookmark_t *, zb); 2184168404Spjd ARCSTAT_BUMP(arcstat_misses); 2185168404Spjd ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 2186168404Spjd demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 2187168404Spjd data, metadata, misses); 2188168404Spjd 2189168404Spjd rzio = zio_read(pio, spa, bp, buf->b_data, size, 2190168404Spjd arc_read_done, buf, priority, flags, zb); 2191168404Spjd 2192168404Spjd if (*arc_flags & ARC_WAIT) 2193168404Spjd return (zio_wait(rzio)); 2194168404Spjd 2195168404Spjd ASSERT(*arc_flags & ARC_NOWAIT); 2196168404Spjd zio_nowait(rzio); 2197168404Spjd } 2198168404Spjd return (0); 2199168404Spjd} 2200168404Spjd 2201168404Spjd/* 2202168404Spjd * arc_read() variant to support pool traversal. If the block is already 2203168404Spjd * in the ARC, make a copy of it; otherwise, the caller will do the I/O. 2204168404Spjd * The idea is that we don't want pool traversal filling up memory, but 2205168404Spjd * if the ARC already has the data anyway, we shouldn't pay for the I/O. 2206168404Spjd */ 2207168404Spjdint 2208168404Spjdarc_tryread(spa_t *spa, blkptr_t *bp, void *data) 2209168404Spjd{ 2210168404Spjd arc_buf_hdr_t *hdr; 2211168404Spjd kmutex_t *hash_mtx; 2212168404Spjd int rc = 0; 2213168404Spjd 2214168404Spjd hdr = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_mtx); 2215168404Spjd 2216168404Spjd if (hdr && hdr->b_datacnt > 0 && !HDR_IO_IN_PROGRESS(hdr)) { 2217168404Spjd arc_buf_t *buf = hdr->b_buf; 2218168404Spjd 2219168404Spjd ASSERT(buf); 2220168404Spjd while (buf->b_data == NULL) { 2221168404Spjd buf = buf->b_next; 2222168404Spjd ASSERT(buf); 2223168404Spjd } 2224168404Spjd bcopy(buf->b_data, data, hdr->b_size); 2225168404Spjd } else { 2226168404Spjd rc = ENOENT; 2227168404Spjd } 2228168404Spjd 2229168404Spjd if (hash_mtx) 2230168404Spjd mutex_exit(hash_mtx); 2231168404Spjd 2232168404Spjd return (rc); 2233168404Spjd} 2234168404Spjd 2235168404Spjdvoid 2236168404Spjdarc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 2237168404Spjd{ 2238168404Spjd ASSERT(buf->b_hdr != NULL); 2239168404Spjd ASSERT(buf->b_hdr->b_state != arc_anon); 2240168404Spjd ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 2241168404Spjd buf->b_efunc = func; 2242168404Spjd buf->b_private = private; 2243168404Spjd} 2244168404Spjd 2245168404Spjd/* 2246168404Spjd * This is used by the DMU to let the ARC know that a buffer is 2247168404Spjd * being evicted, so the ARC should clean up. If this arc buf 2248168404Spjd * is not yet in the evicted state, it will be put there. 2249168404Spjd */ 2250168404Spjdint 2251168404Spjdarc_buf_evict(arc_buf_t *buf) 2252168404Spjd{ 2253168404Spjd arc_buf_hdr_t *hdr; 2254168404Spjd kmutex_t *hash_lock; 2255168404Spjd arc_buf_t **bufp; 2256168404Spjd 2257168404Spjd mutex_enter(&arc_eviction_mtx); 2258168404Spjd hdr = buf->b_hdr; 2259168404Spjd if (hdr == NULL) { 2260168404Spjd /* 2261168404Spjd * We are in arc_do_user_evicts(). 2262168404Spjd */ 2263168404Spjd ASSERT(buf->b_data == NULL); 2264168404Spjd mutex_exit(&arc_eviction_mtx); 2265168404Spjd return (0); 2266168404Spjd } 2267168404Spjd hash_lock = HDR_LOCK(hdr); 2268168404Spjd mutex_exit(&arc_eviction_mtx); 2269168404Spjd 2270168404Spjd mutex_enter(hash_lock); 2271168404Spjd 2272168404Spjd if (buf->b_data == NULL) { 2273168404Spjd /* 2274168404Spjd * We are on the eviction list. 2275168404Spjd */ 2276168404Spjd mutex_exit(hash_lock); 2277168404Spjd mutex_enter(&arc_eviction_mtx); 2278168404Spjd if (buf->b_hdr == NULL) { 2279168404Spjd /* 2280168404Spjd * We are already in arc_do_user_evicts(). 2281168404Spjd */ 2282168404Spjd mutex_exit(&arc_eviction_mtx); 2283168404Spjd return (0); 2284168404Spjd } else { 2285168404Spjd arc_buf_t copy = *buf; /* structure assignment */ 2286168404Spjd /* 2287168404Spjd * Process this buffer now 2288168404Spjd * but let arc_do_user_evicts() do the reaping. 2289168404Spjd */ 2290168404Spjd buf->b_efunc = NULL; 2291168404Spjd mutex_exit(&arc_eviction_mtx); 2292168404Spjd VERIFY(copy.b_efunc(©) == 0); 2293168404Spjd return (1); 2294168404Spjd } 2295168404Spjd } 2296168404Spjd 2297168404Spjd ASSERT(buf->b_hdr == hdr); 2298168404Spjd ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 2299168404Spjd ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 2300168404Spjd 2301168404Spjd /* 2302168404Spjd * Pull this buffer off of the hdr 2303168404Spjd */ 2304168404Spjd bufp = &hdr->b_buf; 2305168404Spjd while (*bufp != buf) 2306168404Spjd bufp = &(*bufp)->b_next; 2307168404Spjd *bufp = buf->b_next; 2308168404Spjd 2309168404Spjd ASSERT(buf->b_data != NULL); 2310168404Spjd arc_buf_destroy(buf, FALSE, FALSE); 2311168404Spjd 2312168404Spjd if (hdr->b_datacnt == 0) { 2313168404Spjd arc_state_t *old_state = hdr->b_state; 2314168404Spjd arc_state_t *evicted_state; 2315168404Spjd 2316168404Spjd ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2317168404Spjd 2318168404Spjd evicted_state = 2319168404Spjd (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 2320168404Spjd 2321168404Spjd mutex_enter(&old_state->arcs_mtx); 2322168404Spjd mutex_enter(&evicted_state->arcs_mtx); 2323168404Spjd 2324168404Spjd arc_change_state(evicted_state, hdr, hash_lock); 2325168404Spjd ASSERT(HDR_IN_HASH_TABLE(hdr)); 2326168404Spjd hdr->b_flags = ARC_IN_HASH_TABLE; 2327168404Spjd 2328168404Spjd mutex_exit(&evicted_state->arcs_mtx); 2329168404Spjd mutex_exit(&old_state->arcs_mtx); 2330168404Spjd } 2331168404Spjd mutex_exit(hash_lock); 2332168404Spjd 2333168404Spjd VERIFY(buf->b_efunc(buf) == 0); 2334168404Spjd buf->b_efunc = NULL; 2335168404Spjd buf->b_private = NULL; 2336168404Spjd buf->b_hdr = NULL; 2337168404Spjd kmem_cache_free(buf_cache, buf); 2338168404Spjd return (1); 2339168404Spjd} 2340168404Spjd 2341168404Spjd/* 2342168404Spjd * Release this buffer from the cache. This must be done 2343168404Spjd * after a read and prior to modifying the buffer contents. 2344168404Spjd * If the buffer has more than one reference, we must make 2345168404Spjd * make a new hdr for the buffer. 2346168404Spjd */ 2347168404Spjdvoid 2348168404Spjdarc_release(arc_buf_t *buf, void *tag) 2349168404Spjd{ 2350168404Spjd arc_buf_hdr_t *hdr = buf->b_hdr; 2351168404Spjd kmutex_t *hash_lock = HDR_LOCK(hdr); 2352168404Spjd 2353168404Spjd /* this buffer is not on any list */ 2354168404Spjd ASSERT(refcount_count(&hdr->b_refcnt) > 0); 2355168404Spjd 2356168404Spjd if (hdr->b_state == arc_anon) { 2357168404Spjd /* this buffer is already released */ 2358168404Spjd ASSERT3U(refcount_count(&hdr->b_refcnt), ==, 1); 2359168404Spjd ASSERT(BUF_EMPTY(hdr)); 2360168404Spjd ASSERT(buf->b_efunc == NULL); 2361168404Spjd arc_buf_thaw(buf); 2362168404Spjd return; 2363168404Spjd } 2364168404Spjd 2365168404Spjd mutex_enter(hash_lock); 2366168404Spjd 2367168404Spjd /* 2368168404Spjd * Do we have more than one buf? 2369168404Spjd */ 2370168404Spjd if (hdr->b_buf != buf || buf->b_next != NULL) { 2371168404Spjd arc_buf_hdr_t *nhdr; 2372168404Spjd arc_buf_t **bufp; 2373168404Spjd uint64_t blksz = hdr->b_size; 2374168404Spjd spa_t *spa = hdr->b_spa; 2375168404Spjd arc_buf_contents_t type = hdr->b_type; 2376168404Spjd 2377168404Spjd ASSERT(hdr->b_datacnt > 1); 2378168404Spjd /* 2379168404Spjd * Pull the data off of this buf and attach it to 2380168404Spjd * a new anonymous buf. 2381168404Spjd */ 2382168404Spjd (void) remove_reference(hdr, hash_lock, tag); 2383168404Spjd bufp = &hdr->b_buf; 2384168404Spjd while (*bufp != buf) 2385168404Spjd bufp = &(*bufp)->b_next; 2386168404Spjd *bufp = (*bufp)->b_next; 2387168404Spjd buf->b_next = NULL; 2388168404Spjd 2389168404Spjd ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); 2390168404Spjd atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); 2391168404Spjd if (refcount_is_zero(&hdr->b_refcnt)) { 2392168404Spjd ASSERT3U(hdr->b_state->arcs_lsize, >=, hdr->b_size); 2393168404Spjd atomic_add_64(&hdr->b_state->arcs_lsize, -hdr->b_size); 2394168404Spjd } 2395168404Spjd hdr->b_datacnt -= 1; 2396168404Spjd arc_cksum_verify(buf); 2397168404Spjd 2398168404Spjd mutex_exit(hash_lock); 2399168404Spjd 2400168404Spjd nhdr = kmem_cache_alloc(hdr_cache, KM_SLEEP); 2401168404Spjd nhdr->b_size = blksz; 2402168404Spjd nhdr->b_spa = spa; 2403168404Spjd nhdr->b_type = type; 2404168404Spjd nhdr->b_buf = buf; 2405168404Spjd nhdr->b_state = arc_anon; 2406168404Spjd nhdr->b_arc_access = 0; 2407168404Spjd nhdr->b_flags = 0; 2408168404Spjd nhdr->b_datacnt = 1; 2409168404Spjd nhdr->b_freeze_cksum = NULL; 2410168460Spjd mutex_init(&nhdr->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 2411168404Spjd (void) refcount_add(&nhdr->b_refcnt, tag); 2412168404Spjd buf->b_hdr = nhdr; 2413168404Spjd atomic_add_64(&arc_anon->arcs_size, blksz); 2414168404Spjd 2415168404Spjd hdr = nhdr; 2416168404Spjd } else { 2417168404Spjd ASSERT(refcount_count(&hdr->b_refcnt) == 1); 2418168404Spjd ASSERT(!list_link_active(&hdr->b_arc_node)); 2419168404Spjd ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2420168404Spjd arc_change_state(arc_anon, hdr, hash_lock); 2421168404Spjd hdr->b_arc_access = 0; 2422168404Spjd mutex_exit(hash_lock); 2423168404Spjd bzero(&hdr->b_dva, sizeof (dva_t)); 2424168404Spjd hdr->b_birth = 0; 2425168404Spjd hdr->b_cksum0 = 0; 2426168404Spjd arc_buf_thaw(buf); 2427168404Spjd } 2428168404Spjd buf->b_efunc = NULL; 2429168404Spjd buf->b_private = NULL; 2430168404Spjd} 2431168404Spjd 2432168404Spjdint 2433168404Spjdarc_released(arc_buf_t *buf) 2434168404Spjd{ 2435168404Spjd return (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); 2436168404Spjd} 2437168404Spjd 2438168404Spjdint 2439168404Spjdarc_has_callback(arc_buf_t *buf) 2440168404Spjd{ 2441168404Spjd return (buf->b_efunc != NULL); 2442168404Spjd} 2443168404Spjd 2444168404Spjd#ifdef ZFS_DEBUG 2445168404Spjdint 2446168404Spjdarc_referenced(arc_buf_t *buf) 2447168404Spjd{ 2448168404Spjd return (refcount_count(&buf->b_hdr->b_refcnt)); 2449168404Spjd} 2450168404Spjd#endif 2451168404Spjd 2452168404Spjdstatic void 2453168404Spjdarc_write_ready(zio_t *zio) 2454168404Spjd{ 2455168404Spjd arc_write_callback_t *callback = zio->io_private; 2456168404Spjd arc_buf_t *buf = callback->awcb_buf; 2457168404Spjd 2458168404Spjd if (callback->awcb_ready) { 2459168404Spjd ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); 2460168404Spjd callback->awcb_ready(zio, buf, callback->awcb_private); 2461168404Spjd } 2462168404Spjd arc_cksum_compute(buf); 2463168404Spjd} 2464168404Spjd 2465168404Spjdstatic void 2466168404Spjdarc_write_done(zio_t *zio) 2467168404Spjd{ 2468168404Spjd arc_write_callback_t *callback = zio->io_private; 2469168404Spjd arc_buf_t *buf = callback->awcb_buf; 2470168404Spjd arc_buf_hdr_t *hdr = buf->b_hdr; 2471168404Spjd 2472168404Spjd hdr->b_acb = NULL; 2473168404Spjd 2474168404Spjd /* this buffer is on no lists and is not in the hash table */ 2475168404Spjd ASSERT3P(hdr->b_state, ==, arc_anon); 2476168404Spjd 2477168404Spjd hdr->b_dva = *BP_IDENTITY(zio->io_bp); 2478168404Spjd hdr->b_birth = zio->io_bp->blk_birth; 2479168404Spjd hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 2480168404Spjd /* 2481168404Spjd * If the block to be written was all-zero, we may have 2482168404Spjd * compressed it away. In this case no write was performed 2483168404Spjd * so there will be no dva/birth-date/checksum. The buffer 2484168404Spjd * must therefor remain anonymous (and uncached). 2485168404Spjd */ 2486168404Spjd if (!BUF_EMPTY(hdr)) { 2487168404Spjd arc_buf_hdr_t *exists; 2488168404Spjd kmutex_t *hash_lock; 2489168404Spjd 2490168404Spjd arc_cksum_verify(buf); 2491168404Spjd 2492168404Spjd exists = buf_hash_insert(hdr, &hash_lock); 2493168404Spjd if (exists) { 2494168404Spjd /* 2495168404Spjd * This can only happen if we overwrite for 2496168404Spjd * sync-to-convergence, because we remove 2497168404Spjd * buffers from the hash table when we arc_free(). 2498168404Spjd */ 2499168404Spjd ASSERT(DVA_EQUAL(BP_IDENTITY(&zio->io_bp_orig), 2500168404Spjd BP_IDENTITY(zio->io_bp))); 2501168404Spjd ASSERT3U(zio->io_bp_orig.blk_birth, ==, 2502168404Spjd zio->io_bp->blk_birth); 2503168404Spjd 2504168404Spjd ASSERT(refcount_is_zero(&exists->b_refcnt)); 2505168404Spjd arc_change_state(arc_anon, exists, hash_lock); 2506168404Spjd mutex_exit(hash_lock); 2507168404Spjd arc_hdr_destroy(exists); 2508168404Spjd exists = buf_hash_insert(hdr, &hash_lock); 2509168404Spjd ASSERT3P(exists, ==, NULL); 2510168404Spjd } 2511168404Spjd hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2512168404Spjd arc_access(hdr, hash_lock); 2513168404Spjd mutex_exit(hash_lock); 2514168404Spjd } else if (callback->awcb_done == NULL) { 2515168404Spjd int destroy_hdr; 2516168404Spjd /* 2517168404Spjd * This is an anonymous buffer with no user callback, 2518168404Spjd * destroy it if there are no active references. 2519168404Spjd */ 2520168404Spjd mutex_enter(&arc_eviction_mtx); 2521168404Spjd destroy_hdr = refcount_is_zero(&hdr->b_refcnt); 2522168404Spjd hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2523168404Spjd mutex_exit(&arc_eviction_mtx); 2524168404Spjd if (destroy_hdr) 2525168404Spjd arc_hdr_destroy(hdr); 2526168404Spjd } else { 2527168404Spjd hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2528168404Spjd } 2529168404Spjd 2530168404Spjd if (callback->awcb_done) { 2531168404Spjd ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 2532168404Spjd callback->awcb_done(zio, buf, callback->awcb_private); 2533168404Spjd } 2534168404Spjd 2535168404Spjd kmem_free(callback, sizeof (arc_write_callback_t)); 2536168404Spjd} 2537168404Spjd 2538168404Spjdzio_t * 2539168404Spjdarc_write(zio_t *pio, spa_t *spa, int checksum, int compress, int ncopies, 2540168404Spjd uint64_t txg, blkptr_t *bp, arc_buf_t *buf, 2541168404Spjd arc_done_func_t *ready, arc_done_func_t *done, void *private, int priority, 2542168404Spjd int flags, zbookmark_t *zb) 2543168404Spjd{ 2544168404Spjd arc_buf_hdr_t *hdr = buf->b_hdr; 2545168404Spjd arc_write_callback_t *callback; 2546168404Spjd zio_t *zio; 2547168404Spjd 2548168404Spjd /* this is a private buffer - no locking required */ 2549168404Spjd ASSERT3P(hdr->b_state, ==, arc_anon); 2550168404Spjd ASSERT(BUF_EMPTY(hdr)); 2551168404Spjd ASSERT(!HDR_IO_ERROR(hdr)); 2552168404Spjd ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 2553168404Spjd ASSERT(hdr->b_acb == 0); 2554168404Spjd callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 2555168404Spjd callback->awcb_ready = ready; 2556168404Spjd callback->awcb_done = done; 2557168404Spjd callback->awcb_private = private; 2558168404Spjd callback->awcb_buf = buf; 2559168404Spjd hdr->b_flags |= ARC_IO_IN_PROGRESS; 2560168404Spjd zio = zio_write(pio, spa, checksum, compress, ncopies, txg, bp, 2561168404Spjd buf->b_data, hdr->b_size, arc_write_ready, arc_write_done, callback, 2562168404Spjd priority, flags, zb); 2563168404Spjd 2564168404Spjd return (zio); 2565168404Spjd} 2566168404Spjd 2567168404Spjdint 2568168404Spjdarc_free(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, 2569168404Spjd zio_done_func_t *done, void *private, uint32_t arc_flags) 2570168404Spjd{ 2571168404Spjd arc_buf_hdr_t *ab; 2572168404Spjd kmutex_t *hash_lock; 2573168404Spjd zio_t *zio; 2574168404Spjd 2575168404Spjd /* 2576168404Spjd * If this buffer is in the cache, release it, so it 2577168404Spjd * can be re-used. 2578168404Spjd */ 2579168404Spjd ab = buf_hash_find(spa, BP_IDENTITY(bp), bp->blk_birth, &hash_lock); 2580168404Spjd if (ab != NULL) { 2581168404Spjd /* 2582168404Spjd * The checksum of blocks to free is not always 2583168404Spjd * preserved (eg. on the deadlist). However, if it is 2584168404Spjd * nonzero, it should match what we have in the cache. 2585168404Spjd */ 2586168404Spjd ASSERT(bp->blk_cksum.zc_word[0] == 0 || 2587168404Spjd ab->b_cksum0 == bp->blk_cksum.zc_word[0]); 2588168404Spjd if (ab->b_state != arc_anon) 2589168404Spjd arc_change_state(arc_anon, ab, hash_lock); 2590168404Spjd if (HDR_IO_IN_PROGRESS(ab)) { 2591168404Spjd /* 2592168404Spjd * This should only happen when we prefetch. 2593168404Spjd */ 2594168404Spjd ASSERT(ab->b_flags & ARC_PREFETCH); 2595168404Spjd ASSERT3U(ab->b_datacnt, ==, 1); 2596168404Spjd ab->b_flags |= ARC_FREED_IN_READ; 2597168404Spjd if (HDR_IN_HASH_TABLE(ab)) 2598168404Spjd buf_hash_remove(ab); 2599168404Spjd ab->b_arc_access = 0; 2600168404Spjd bzero(&ab->b_dva, sizeof (dva_t)); 2601168404Spjd ab->b_birth = 0; 2602168404Spjd ab->b_cksum0 = 0; 2603168404Spjd ab->b_buf->b_efunc = NULL; 2604168404Spjd ab->b_buf->b_private = NULL; 2605168404Spjd mutex_exit(hash_lock); 2606168404Spjd } else if (refcount_is_zero(&ab->b_refcnt)) { 2607168404Spjd mutex_exit(hash_lock); 2608168404Spjd arc_hdr_destroy(ab); 2609168404Spjd ARCSTAT_BUMP(arcstat_deleted); 2610168404Spjd } else { 2611168404Spjd /* 2612168404Spjd * We still have an active reference on this 2613168404Spjd * buffer. This can happen, e.g., from 2614168404Spjd * dbuf_unoverride(). 2615168404Spjd */ 2616168404Spjd ASSERT(!HDR_IN_HASH_TABLE(ab)); 2617168404Spjd ab->b_arc_access = 0; 2618168404Spjd bzero(&ab->b_dva, sizeof (dva_t)); 2619168404Spjd ab->b_birth = 0; 2620168404Spjd ab->b_cksum0 = 0; 2621168404Spjd ab->b_buf->b_efunc = NULL; 2622168404Spjd ab->b_buf->b_private = NULL; 2623168404Spjd mutex_exit(hash_lock); 2624168404Spjd } 2625168404Spjd } 2626168404Spjd 2627168404Spjd zio = zio_free(pio, spa, txg, bp, done, private); 2628168404Spjd 2629168404Spjd if (arc_flags & ARC_WAIT) 2630168404Spjd return (zio_wait(zio)); 2631168404Spjd 2632168404Spjd ASSERT(arc_flags & ARC_NOWAIT); 2633168404Spjd zio_nowait(zio); 2634168404Spjd 2635168404Spjd return (0); 2636168404Spjd} 2637168404Spjd 2638168404Spjdvoid 2639168404Spjdarc_tempreserve_clear(uint64_t tempreserve) 2640168404Spjd{ 2641168404Spjd atomic_add_64(&arc_tempreserve, -tempreserve); 2642168404Spjd ASSERT((int64_t)arc_tempreserve >= 0); 2643168404Spjd} 2644168404Spjd 2645168404Spjdint 2646168404Spjdarc_tempreserve_space(uint64_t tempreserve) 2647168404Spjd{ 2648168404Spjd#ifdef ZFS_DEBUG 2649168404Spjd /* 2650168404Spjd * Once in a while, fail for no reason. Everything should cope. 2651168404Spjd */ 2652168404Spjd if (spa_get_random(10000) == 0) { 2653168404Spjd dprintf("forcing random failure\n"); 2654168404Spjd return (ERESTART); 2655168404Spjd } 2656168404Spjd#endif 2657168404Spjd if (tempreserve > arc_c/4 && !arc_no_grow) 2658168404Spjd arc_c = MIN(arc_c_max, tempreserve * 4); 2659168404Spjd if (tempreserve > arc_c) 2660168404Spjd return (ENOMEM); 2661168404Spjd 2662168404Spjd /* 2663168404Spjd * Throttle writes when the amount of dirty data in the cache 2664168404Spjd * gets too large. We try to keep the cache less than half full 2665168404Spjd * of dirty blocks so that our sync times don't grow too large. 2666168404Spjd * Note: if two requests come in concurrently, we might let them 2667168404Spjd * both succeed, when one of them should fail. Not a huge deal. 2668168404Spjd * 2669168404Spjd * XXX The limit should be adjusted dynamically to keep the time 2670168404Spjd * to sync a dataset fixed (around 1-5 seconds?). 2671168404Spjd */ 2672168404Spjd 2673168404Spjd if (tempreserve + arc_tempreserve + arc_anon->arcs_size > arc_c / 2 && 2674168404Spjd arc_tempreserve + arc_anon->arcs_size > arc_c / 4) { 2675168404Spjd dprintf("failing, arc_tempreserve=%lluK anon=%lluK " 2676168404Spjd "tempreserve=%lluK arc_c=%lluK\n", 2677168404Spjd arc_tempreserve>>10, arc_anon->arcs_lsize>>10, 2678168404Spjd tempreserve>>10, arc_c>>10); 2679168404Spjd return (ERESTART); 2680168404Spjd } 2681168404Spjd atomic_add_64(&arc_tempreserve, tempreserve); 2682168404Spjd return (0); 2683168404Spjd} 2684168404Spjd 2685168582Spjdstatic kmutex_t arc_lowmem_lock; 2686168404Spjd#ifdef _KERNEL 2687168566Spjdstatic eventhandler_tag arc_event_lowmem = NULL; 2688168404Spjd 2689168404Spjdstatic void 2690168566Spjdarc_lowmem(void *arg __unused, int howto __unused) 2691168404Spjd{ 2692168404Spjd 2693168566Spjd /* Serialize access via arc_lowmem_lock. */ 2694168566Spjd mutex_enter(&arc_lowmem_lock); 2695168404Spjd zfs_needfree = 1; 2696168404Spjd cv_signal(&arc_reclaim_thr_cv); 2697168404Spjd while (zfs_needfree) 2698168404Spjd tsleep(&zfs_needfree, 0, "zfs:lowmem", hz / 5); 2699168566Spjd mutex_exit(&arc_lowmem_lock); 2700168404Spjd} 2701168404Spjd#endif 2702168404Spjd 2703168404Spjdvoid 2704168404Spjdarc_init(void) 2705168404Spjd{ 2706168404Spjd mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 2707168404Spjd cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 2708168566Spjd mutex_init(&arc_lowmem_lock, NULL, MUTEX_DEFAULT, NULL); 2709168404Spjd 2710168404Spjd /* Convert seconds to clock ticks */ 2711168404Spjd arc_min_prefetch_lifespan = 1 * hz; 2712168404Spjd 2713168404Spjd /* Start out with 1/8 of all memory */ 2714168566Spjd arc_c = kmem_size() / 8; 2715168404Spjd#if 0 2716168404Spjd#ifdef _KERNEL 2717168404Spjd /* 2718168404Spjd * On architectures where the physical memory can be larger 2719168404Spjd * than the addressable space (intel in 32-bit mode), we may 2720168404Spjd * need to limit the cache to 1/8 of VM size. 2721168404Spjd */ 2722168404Spjd arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 2723168404Spjd#endif 2724168404Spjd#endif 2725168566Spjd /* set min cache to 1/32 of all memory, or 16MB, whichever is more */ 2726168566Spjd arc_c_min = MAX(arc_c / 4, 64<<18); 2727168566Spjd /* set max to 1/2 of all memory, or all but 1GB, whichever is more */ 2728168404Spjd if (arc_c * 8 >= 1<<30) 2729168404Spjd arc_c_max = (arc_c * 8) - (1<<30); 2730168404Spjd else 2731168404Spjd arc_c_max = arc_c_min; 2732168566Spjd arc_c_max = MAX(arc_c * 4, arc_c_max); 2733168481Spjd#ifdef _KERNEL 2734168404Spjd /* 2735168404Spjd * Allow the tunables to override our calculations if they are 2736168566Spjd * reasonable (ie. over 16MB) 2737168404Spjd */ 2738168566Spjd if (zfs_arc_max >= 64<<18 && zfs_arc_max < kmem_size()) 2739168404Spjd arc_c_max = zfs_arc_max; 2740168566Spjd if (zfs_arc_min >= 64<<18 && zfs_arc_min <= arc_c_max) 2741168404Spjd arc_c_min = zfs_arc_min; 2742168481Spjd#endif 2743168404Spjd arc_c = arc_c_max; 2744168404Spjd arc_p = (arc_c >> 1); 2745168404Spjd 2746168404Spjd /* if kmem_flags are set, lets try to use less memory */ 2747168404Spjd if (kmem_debugging()) 2748168404Spjd arc_c = arc_c / 2; 2749168404Spjd if (arc_c < arc_c_min) 2750168404Spjd arc_c = arc_c_min; 2751168404Spjd 2752168473Spjd zfs_arc_min = arc_c_min; 2753168473Spjd zfs_arc_max = arc_c_max; 2754168473Spjd 2755168404Spjd arc_anon = &ARC_anon; 2756168404Spjd arc_mru = &ARC_mru; 2757168404Spjd arc_mru_ghost = &ARC_mru_ghost; 2758168404Spjd arc_mfu = &ARC_mfu; 2759168404Spjd arc_mfu_ghost = &ARC_mfu_ghost; 2760168404Spjd arc_size = 0; 2761168404Spjd 2762168404Spjd mutex_init(&arc_anon->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 2763168404Spjd mutex_init(&arc_mru->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 2764168404Spjd mutex_init(&arc_mru_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 2765168404Spjd mutex_init(&arc_mfu->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 2766168404Spjd mutex_init(&arc_mfu_ghost->arcs_mtx, NULL, MUTEX_DEFAULT, NULL); 2767168404Spjd 2768168404Spjd list_create(&arc_mru->arcs_list, sizeof (arc_buf_hdr_t), 2769168404Spjd offsetof(arc_buf_hdr_t, b_arc_node)); 2770168404Spjd list_create(&arc_mru_ghost->arcs_list, sizeof (arc_buf_hdr_t), 2771168404Spjd offsetof(arc_buf_hdr_t, b_arc_node)); 2772168404Spjd list_create(&arc_mfu->arcs_list, sizeof (arc_buf_hdr_t), 2773168404Spjd offsetof(arc_buf_hdr_t, b_arc_node)); 2774168404Spjd list_create(&arc_mfu_ghost->arcs_list, sizeof (arc_buf_hdr_t), 2775168404Spjd offsetof(arc_buf_hdr_t, b_arc_node)); 2776168404Spjd 2777168404Spjd buf_init(); 2778168404Spjd 2779168404Spjd arc_thread_exit = 0; 2780168404Spjd arc_eviction_list = NULL; 2781168404Spjd mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 2782168404Spjd bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 2783168404Spjd 2784168404Spjd arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 2785168404Spjd sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 2786168404Spjd 2787168404Spjd if (arc_ksp != NULL) { 2788168404Spjd arc_ksp->ks_data = &arc_stats; 2789168404Spjd kstat_install(arc_ksp); 2790168404Spjd } 2791168404Spjd 2792168404Spjd (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 2793168404Spjd TS_RUN, minclsyspri); 2794168404Spjd 2795168404Spjd#ifdef _KERNEL 2796168566Spjd arc_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem, arc_lowmem, NULL, 2797168404Spjd EVENTHANDLER_PRI_FIRST); 2798168404Spjd#endif 2799168404Spjd 2800168404Spjd arc_dead = FALSE; 2801168566Spjd 2802168566Spjd#ifdef _KERNEL 2803168566Spjd /* Warn about ZFS memory requirements. */ 2804168696Spjd if (((uint64_t)physmem * PAGESIZE) < (256 + 128 + 64) * (1 << 20)) { 2805168566Spjd printf("ZFS WARNING: Recomended minimum of RAM size is 512MB, " 2806168566Spjd "expect unstable behaviour.\n"); 2807168566Spjd } else if (kmem_size() < 256 * (1 << 20)) { 2808168566Spjd printf("ZFS WARNING: Recomended minimum of kmem_map size is " 2809168566Spjd "256MB, expect unstable behaviour.\n"); 2810168566Spjd printf(" Consider tunning vm.kmem_size and " 2811168566Spjd "vm.kmem_size_max in /boot/loader.conf.\n"); 2812168566Spjd } 2813168566Spjd#endif 2814168404Spjd} 2815168404Spjd 2816168404Spjdvoid 2817168404Spjdarc_fini(void) 2818168404Spjd{ 2819168404Spjd mutex_enter(&arc_reclaim_thr_lock); 2820168404Spjd arc_thread_exit = 1; 2821168404Spjd cv_signal(&arc_reclaim_thr_cv); 2822168404Spjd while (arc_thread_exit != 0) 2823168404Spjd cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 2824168404Spjd mutex_exit(&arc_reclaim_thr_lock); 2825168404Spjd 2826168404Spjd arc_flush(); 2827168404Spjd 2828168404Spjd arc_dead = TRUE; 2829168404Spjd 2830168404Spjd if (arc_ksp != NULL) { 2831168404Spjd kstat_delete(arc_ksp); 2832168404Spjd arc_ksp = NULL; 2833168404Spjd } 2834168404Spjd 2835168404Spjd mutex_destroy(&arc_eviction_mtx); 2836168404Spjd mutex_destroy(&arc_reclaim_thr_lock); 2837168404Spjd cv_destroy(&arc_reclaim_thr_cv); 2838168404Spjd 2839168404Spjd list_destroy(&arc_mru->arcs_list); 2840168404Spjd list_destroy(&arc_mru_ghost->arcs_list); 2841168404Spjd list_destroy(&arc_mfu->arcs_list); 2842168404Spjd list_destroy(&arc_mfu_ghost->arcs_list); 2843168404Spjd 2844168404Spjd mutex_destroy(&arc_anon->arcs_mtx); 2845168404Spjd mutex_destroy(&arc_mru->arcs_mtx); 2846168404Spjd mutex_destroy(&arc_mru_ghost->arcs_mtx); 2847168404Spjd mutex_destroy(&arc_mfu->arcs_mtx); 2848168404Spjd mutex_destroy(&arc_mfu_ghost->arcs_mtx); 2849168404Spjd 2850168404Spjd buf_fini(); 2851168404Spjd 2852168582Spjd mutex_destroy(&arc_lowmem_lock); 2853168404Spjd#ifdef _KERNEL 2854168566Spjd if (arc_event_lowmem != NULL) 2855168566Spjd EVENTHANDLER_DEREGISTER(vm_lowmem, arc_event_lowmem); 2856168404Spjd#endif 2857168404Spjd} 2858