arc.c revision 248572
197521Sphk/* 297521Sphk * CDDL HEADER START 397521Sphk * 4139815Simp * The contents of this file are subject to the terms of the 5139815Simp * Common Development and Distribution License (the "License"). 6139815Simp * You may not use this file except in compliance with the License. 797521Sphk * 897521Sphk * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 997521Sphk * or http://www.opensolaris.org/os/licensing. 1097521Sphk * See the License for the specific language governing permissions 1197521Sphk * and limitations under the License. 1297521Sphk * 1397521Sphk * When distributing Covered Code, include this CDDL HEADER in each 1497521Sphk * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 1597521Sphk * If applicable, add the following below this CDDL HEADER, with the 1697521Sphk * fields enclosed by brackets "[]" replaced with your own identifying 1797521Sphk * information: Portions Copyright [yyyy] [name of copyright owner] 1897521Sphk * 1997521Sphk * CDDL HEADER END 2097521Sphk */ 2197521Sphk/* 2297521Sphk * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 2397521Sphk * Copyright 2011 Nexenta Systems, Inc. All rights reserved. 2497521Sphk * Copyright (c) 2011 by Delphix. All rights reserved. 2597521Sphk */ 2697521Sphk 2797521Sphk/* 2897521Sphk * DVA-based Adjustable Replacement Cache 2997521Sphk * 3097521Sphk * While much of the theory of operation used here is 3197521Sphk * based on the self-tuning, low overhead replacement cache 3297521Sphk * presented by Megiddo and Modha at FAST 2003, there are some 3397521Sphk * significant differences: 3497521Sphk * 3597521Sphk * 1. The Megiddo and Modha model assumes any page is evictable. 3697521Sphk * Pages in its cache cannot be "locked" into memory. This makes 3797521Sphk * the eviction algorithm simple: evict the last page in the list. 3897521Sphk * This also make the performance characteristics easy to reason 3997521Sphk * about. Our cache is not so simple. At any given moment, some 4097521Sphk * subset of the blocks in the cache are un-evictable because we 4197521Sphk * have handed out a reference to them. Blocks are only evictable 4297521Sphk * when there are no external references active. This makes 4397521Sphk * eviction far more problematic: we choose to evict the evictable 4497521Sphk * blocks that are the "lowest" in the list. 45116189Sobrien * 46116189Sobrien * There are times when it is not possible to evict the requested 47116189Sobrien * space. In these circumstances we are unable to adjust the cache 4897521Sphk * size. To prevent the cache growing unbounded at these times we 4997521Sphk * implement a "cache throttle" that slows the flow of new data 5097521Sphk * into the cache until we can make space available. 51233517Smarius * 5297521Sphk * 2. The Megiddo and Modha model assumes a fixed cache size. 5397521Sphk * Pages are evicted when the cache is full and there is a cache 5497521Sphk * miss. Our model has a variable sized cache. It grows with 5597521Sphk * high use, but also tries to react to memory pressure from the 5697521Sphk * operating system: decreasing its size when system memory is 5797521Sphk * tight. 5897521Sphk * 5997521Sphk * 3. The Megiddo and Modha model assumes a fixed page size. All 6097521Sphk * elements of the cache are therefor exactly the same size. So 6197521Sphk * when adjusting the cache size following a cache miss, its simply 6297521Sphk * a matter of choosing a single page to evict. In our model, we 6397521Sphk * have variable sized cache blocks (rangeing from 512 bytes to 6497521Sphk * 128K bytes). We therefor choose a set of blocks to evict to make 6597521Sphk * space for a cache miss that approximates as closely as possible 6697521Sphk * the space used by the new block. 6797521Sphk * 6897521Sphk * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 6997521Sphk * by N. Megiddo & D. Modha, FAST 2003 7097521Sphk */ 7197521Sphk 7297521Sphk/* 7397521Sphk * The locking model: 7497521Sphk * 7597521Sphk * A new reference to a cache buffer can be obtained in two 7697521Sphk * ways: 1) via a hash table lookup using the DVA as a key, 7797521Sphk * or 2) via one of the ARC lists. The arc_read() interface 7897521Sphk * uses method 1, while the internal arc algorithms for 7997521Sphk * adjusting the cache use method 2. We therefor provide two 8097521Sphk * types of locks: 1) the hash table lock array, and 2) the 8197521Sphk * arc list locks. 8297521Sphk * 8397521Sphk * Buffers do not have their own mutexs, rather they rely on the 8497521Sphk * hash table mutexs for the bulk of their protection (i.e. most 8597521Sphk * fields in the arc_buf_hdr_t are protected by these mutexs). 8697521Sphk * 8797521Sphk * buf_hash_find() returns the appropriate mutex (held) when it 8897521Sphk * locates the requested buffer in the hash table. It returns 8997521Sphk * NULL for the mutex if the buffer was not in the table. 9097521Sphk * 9197521Sphk * buf_hash_remove() expects the appropriate hash mutex to be 9297521Sphk * already held before it is invoked. 9397521Sphk * 9497521Sphk * Each arc state also has a mutex which is used to protect the 9597521Sphk * buffer list associated with the state. When attempting to 9697521Sphk * obtain a hash table lock while holding an arc list lock you 97145604Smarcel * must use: mutex_tryenter() to avoid deadlock. Also note that 98145604Smarcel * the active state mutex must be held before the ghost state mutex. 99145604Smarcel * 100145604Smarcel * Arc buffers may have an associated eviction callback function. 101145611Smarcel * This function will be invoked prior to removing the buffer (e.g. 102145604Smarcel * in arc_do_user_evicts()). Note however that the data associated 103145604Smarcel * with the buffer may be evicted prior to the callback. The callback 104145604Smarcel * must be made with *no locks held* (to prevent deadlock). Additionally, 105145604Smarcel * the users of callbacks must ensure that their private data is 106145604Smarcel * protected from simultaneous callbacks from arc_buf_evict() 107145604Smarcel * and arc_do_user_evicts(). 108145604Smarcel * 109145604Smarcel * Note that the majority of the performance stats are manipulated 110145604Smarcel * with atomic operations. 111145604Smarcel * 112145604Smarcel * The L2ARC uses the l2arc_buflist_mtx global mutex for the following: 113145604Smarcel * 114145604Smarcel * - L2ARC buflist creation 115188605Srrs * - L2ARC buflist eviction 116188605Srrs * - L2ARC write completion, which walks L2ARC buflists 117188605Srrs * - ARC header destruction, as it removes from L2ARC buflists 118188605Srrs * - ARC header release, as it removes from L2ARC buflists 119188605Srrs */ 120188605Srrs 121188605Srrs#include <sys/spa.h> 122188605Srrs#include <sys/zio.h> 123188605Srrs#include <sys/zfs_context.h> 124188605Srrs#include <sys/arc.h> 125188605Srrs#include <sys/refcount.h> 126188605Srrs#include <sys/vdev.h> 127188605Srrs#include <sys/vdev_impl.h> 128188605Srrs#ifdef _KERNEL 129188605Srrs#include <sys/dnlc.h> 130188605Srrs#endif 131188605Srrs#include <sys/callb.h> 132188605Srrs#include <sys/kstat.h> 133188605Srrs#include <sys/trim_map.h> 134188605Srrs#include <zfs_fletcher.h> 135188605Srrs#include <sys/sdt.h> 136188605Srrs 137233517Smarius#include <vm/vm_pageout.h> 138188605Srrs 139188605Srrs#ifdef illumos 140188605Srrs#ifndef _KERNEL 141188605Srrs/* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */ 142188605Srrsboolean_t arc_watch = B_FALSE; 143188605Srrsint arc_procfd; 144188605Srrs#endif 145188605Srrs#endif /* illumos */ 146188605Srrs 147188605Srrsstatic kmutex_t arc_reclaim_thr_lock; 148188605Srrsstatic kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */ 149188605Srrsstatic uint8_t arc_thread_exit; 150188605Srrs 151188605Srrsextern int zfs_write_limit_shift; 152188605Srrsextern uint64_t zfs_write_limit_max; 153188605Srrsextern kmutex_t zfs_write_limit_lock; 154188605Srrs 155188605Srrs#define ARC_REDUCE_DNLC_PERCENT 3 156188605Srrsuint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT; 157188605Srrs 158188605Srrstypedef enum arc_reclaim_strategy { 159188605Srrs ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */ 160188605Srrs ARC_RECLAIM_CONS /* Conservative reclaim strategy */ 161188605Srrs} arc_reclaim_strategy_t; 162188605Srrs 163188605Srrs/* number of seconds before growing cache again */ 164188605Srrsstatic int arc_grow_retry = 60; 165188605Srrs 166188605Srrs/* shift of arc_c for calculating both min and max arc_p */ 167188605Srrsstatic int arc_p_min_shift = 4; 168188605Srrs 169188605Srrs/* log2(fraction of arc to reclaim) */ 170188605Srrsstatic int arc_shrink_shift = 5; 171188605Srrs 172188605Srrs/* 173188605Srrs * minimum lifespan of a prefetch block in clock ticks 174188605Srrs * (initialized in arc_init()) 175188605Srrs */ 176188605Srrsstatic int arc_min_prefetch_lifespan; 177188605Srrs 178188605Srrsstatic int arc_dead; 179188605Srrsextern int zfs_prefetch_disable; 180188605Srrs 181188605Srrs/* 182188605Srrs * The arc has filled available memory and has now warmed up. 183188605Srrs */ 184188605Srrsstatic boolean_t arc_warm; 185188605Srrs 186188605Srrs/* 187188605Srrs * These tunables are for performance analysis. 188188605Srrs */ 189188605Srrsuint64_t zfs_arc_max; 190188605Srrsuint64_t zfs_arc_min; 191188605Srrsuint64_t zfs_arc_meta_limit = 0; 192188605Srrsint zfs_arc_grow_retry = 0; 193188605Srrsint zfs_arc_shrink_shift = 0; 194188605Srrsint zfs_arc_p_min_shift = 0; 195188605Srrsint zfs_disable_dup_eviction = 0; 196188605Srrs 197188605SrrsTUNABLE_QUAD("vfs.zfs.arc_max", &zfs_arc_max); 198188605SrrsTUNABLE_QUAD("vfs.zfs.arc_min", &zfs_arc_min); 199188605SrrsTUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit); 200188605SrrsSYSCTL_DECL(_vfs_zfs); 201188605SrrsSYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_max, CTLFLAG_RDTUN, &zfs_arc_max, 0, 202188605Srrs "Maximum ARC size"); 203188605SrrsSYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_min, CTLFLAG_RDTUN, &zfs_arc_min, 0, 204188605Srrs "Minimum ARC size"); 205188605Srrs 206188605Srrs/* 207188605Srrs * Note that buffers can be in one of 6 states: 208188605Srrs * ARC_anon - anonymous (discussed below) 209188605Srrs * ARC_mru - recently used, currently cached 210188605Srrs * ARC_mru_ghost - recentely used, no longer in cache 211188605Srrs * ARC_mfu - frequently used, currently cached 212188605Srrs * ARC_mfu_ghost - frequently used, no longer in cache 213188605Srrs * ARC_l2c_only - exists in L2ARC but not other states 214188605Srrs * When there are no active references to the buffer, they are 215188605Srrs * are linked onto a list in one of these arc states. These are 216188605Srrs * the only buffers that can be evicted or deleted. Within each 217188605Srrs * state there are multiple lists, one for meta-data and one for 218188605Srrs * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, 219188605Srrs * etc.) is tracked separately so that it can be managed more 220188605Srrs * explicitly: favored over data, limited explicitly. 221188605Srrs * 222188605Srrs * Anonymous buffers are buffers that are not associated with 223188605Srrs * a DVA. These are buffers that hold dirty block copies 224188605Srrs * before they are written to stable storage. By definition, 225188605Srrs * they are "ref'd" and are considered part of arc_mru 226188605Srrs * that cannot be freed. Generally, they will aquire a DVA 227188605Srrs * as they are written and migrate onto the arc_mru list. 228188605Srrs * 229188605Srrs * The ARC_l2c_only state is for buffers that are in the second 230188605Srrs * level ARC but no longer in any of the ARC_m* lists. The second 231188605Srrs * level ARC itself may also contain buffers that are in any of 232188605Srrs * the ARC_m* states - meaning that a buffer can exist in two 233188605Srrs * places. The reason for the ARC_l2c_only state is to keep the 234188605Srrs * buffer header in the hash table, so that reads that hit the 235188605Srrs * second level ARC benefit from these fast lookups. 236188605Srrs */ 237188605Srrs 238188605Srrs#define ARCS_LOCK_PAD CACHE_LINE_SIZE 239188605Srrsstruct arcs_lock { 240188605Srrs kmutex_t arcs_lock; 241188605Srrs#ifdef _KERNEL 242188605Srrs unsigned char pad[(ARCS_LOCK_PAD - sizeof (kmutex_t))]; 243233517Smarius#endif 244188605Srrs}; 245188605Srrs 246188605Srrs/* 247188605Srrs * must be power of two for mask use to work 248188605Srrs * 249188605Srrs */ 250188605Srrs#define ARC_BUFC_NUMDATALISTS 16 251188605Srrs#define ARC_BUFC_NUMMETADATALISTS 16 252188605Srrs#define ARC_BUFC_NUMLISTS (ARC_BUFC_NUMMETADATALISTS + ARC_BUFC_NUMDATALISTS) 253188605Srrs 254188605Srrstypedef struct arc_state { 255188605Srrs uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */ 256188605Srrs uint64_t arcs_size; /* total amount of data in this state */ 257188605Srrs list_t arcs_lists[ARC_BUFC_NUMLISTS]; /* list of evictable buffers */ 258188605Srrs struct arcs_lock arcs_locks[ARC_BUFC_NUMLISTS] __aligned(CACHE_LINE_SIZE); 259188605Srrs} arc_state_t; 260188605Srrs 261188605Srrs#define ARCS_LOCK(s, i) (&((s)->arcs_locks[(i)].arcs_lock)) 262188605Srrs 263188605Srrs/* The 6 states: */ 264188605Srrsstatic arc_state_t ARC_anon; 265188605Srrsstatic arc_state_t ARC_mru; 266188605Srrsstatic arc_state_t ARC_mru_ghost; 267188605Srrsstatic arc_state_t ARC_mfu; 268188605Srrsstatic arc_state_t ARC_mfu_ghost; 269188605Srrsstatic arc_state_t ARC_l2c_only; 270188605Srrs 271188605Srrstypedef struct arc_stats { 272188605Srrs kstat_named_t arcstat_hits; 273188605Srrs kstat_named_t arcstat_misses; 274188605Srrs kstat_named_t arcstat_demand_data_hits; 275188605Srrs kstat_named_t arcstat_demand_data_misses; 276188605Srrs kstat_named_t arcstat_demand_metadata_hits; 277188605Srrs kstat_named_t arcstat_demand_metadata_misses; 278188605Srrs kstat_named_t arcstat_prefetch_data_hits; 279188605Srrs kstat_named_t arcstat_prefetch_data_misses; 280188605Srrs kstat_named_t arcstat_prefetch_metadata_hits; 281188605Srrs kstat_named_t arcstat_prefetch_metadata_misses; 282188605Srrs kstat_named_t arcstat_mru_hits; 283188605Srrs kstat_named_t arcstat_mru_ghost_hits; 284188605Srrs kstat_named_t arcstat_mfu_hits; 285188605Srrs kstat_named_t arcstat_mfu_ghost_hits; 286188605Srrs kstat_named_t arcstat_allocated; 287188605Srrs kstat_named_t arcstat_deleted; 288188605Srrs kstat_named_t arcstat_stolen; 289188605Srrs kstat_named_t arcstat_recycle_miss; 290188605Srrs kstat_named_t arcstat_mutex_miss; 291188605Srrs kstat_named_t arcstat_evict_skip; 292188605Srrs kstat_named_t arcstat_evict_l2_cached; 293188605Srrs kstat_named_t arcstat_evict_l2_eligible; 294188605Srrs kstat_named_t arcstat_evict_l2_ineligible; 295188605Srrs kstat_named_t arcstat_hash_elements; 296188605Srrs kstat_named_t arcstat_hash_elements_max; 297188605Srrs kstat_named_t arcstat_hash_collisions; 298188605Srrs kstat_named_t arcstat_hash_chains; 299233517Smarius kstat_named_t arcstat_hash_chain_max; 300188605Srrs kstat_named_t arcstat_p; 301188605Srrs kstat_named_t arcstat_c; 302188605Srrs kstat_named_t arcstat_c_min; 303188605Srrs kstat_named_t arcstat_c_max; 304188605Srrs kstat_named_t arcstat_size; 305188605Srrs kstat_named_t arcstat_hdr_size; 306188605Srrs kstat_named_t arcstat_data_size; 307188605Srrs kstat_named_t arcstat_other_size; 308188605Srrs kstat_named_t arcstat_l2_hits; 309188605Srrs kstat_named_t arcstat_l2_misses; 310188605Srrs kstat_named_t arcstat_l2_feeds; 311188605Srrs kstat_named_t arcstat_l2_rw_clash; 312188605Srrs kstat_named_t arcstat_l2_read_bytes; 313188605Srrs kstat_named_t arcstat_l2_write_bytes; 314188605Srrs kstat_named_t arcstat_l2_writes_sent; 315188605Srrs kstat_named_t arcstat_l2_writes_done; 316188605Srrs kstat_named_t arcstat_l2_writes_error; 317188605Srrs kstat_named_t arcstat_l2_writes_hdr_miss; 318188605Srrs kstat_named_t arcstat_l2_evict_lock_retry; 319188605Srrs kstat_named_t arcstat_l2_evict_reading; 320188605Srrs kstat_named_t arcstat_l2_free_on_write; 321188605Srrs kstat_named_t arcstat_l2_abort_lowmem; 322188605Srrs kstat_named_t arcstat_l2_cksum_bad; 323188605Srrs kstat_named_t arcstat_l2_io_error; 324188605Srrs kstat_named_t arcstat_l2_size; 325188605Srrs kstat_named_t arcstat_l2_hdr_size; 326188605Srrs kstat_named_t arcstat_l2_write_trylock_fail; 327188605Srrs kstat_named_t arcstat_l2_write_passed_headroom; 328188605Srrs kstat_named_t arcstat_l2_write_spa_mismatch; 329188605Srrs kstat_named_t arcstat_l2_write_in_l2; 330188605Srrs kstat_named_t arcstat_l2_write_hdr_io_in_progress; 331188605Srrs kstat_named_t arcstat_l2_write_not_cacheable; 332188605Srrs kstat_named_t arcstat_l2_write_full; 333188605Srrs kstat_named_t arcstat_l2_write_buffer_iter; 334188605Srrs kstat_named_t arcstat_l2_write_pios; 335188605Srrs kstat_named_t arcstat_l2_write_buffer_bytes_scanned; 336188605Srrs kstat_named_t arcstat_l2_write_buffer_list_iter; 337188605Srrs kstat_named_t arcstat_l2_write_buffer_list_null_iter; 338188605Srrs kstat_named_t arcstat_memory_throttle_count; 339188605Srrs kstat_named_t arcstat_duplicate_buffers; 340188605Srrs kstat_named_t arcstat_duplicate_buffers_size; 341188605Srrs kstat_named_t arcstat_duplicate_reads; 342188605Srrs} arc_stats_t; 343188605Srrs 344188605Srrsstatic arc_stats_t arc_stats = { 345188605Srrs { "hits", KSTAT_DATA_UINT64 }, 346188605Srrs { "misses", KSTAT_DATA_UINT64 }, 347188605Srrs { "demand_data_hits", KSTAT_DATA_UINT64 }, 348188605Srrs { "demand_data_misses", KSTAT_DATA_UINT64 }, 349188605Srrs { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 350188605Srrs { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 351188605Srrs { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 352188605Srrs { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 353188605Srrs { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 354188605Srrs { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 355233517Smarius { "mru_hits", KSTAT_DATA_UINT64 }, 356188605Srrs { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 357188605Srrs { "mfu_hits", KSTAT_DATA_UINT64 }, 358188605Srrs { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 359188605Srrs { "allocated", KSTAT_DATA_UINT64 }, 360188605Srrs { "deleted", KSTAT_DATA_UINT64 }, 361188605Srrs { "stolen", KSTAT_DATA_UINT64 }, 362188605Srrs { "recycle_miss", KSTAT_DATA_UINT64 }, 363188605Srrs { "mutex_miss", KSTAT_DATA_UINT64 }, 364188605Srrs { "evict_skip", KSTAT_DATA_UINT64 }, 365188605Srrs { "evict_l2_cached", KSTAT_DATA_UINT64 }, 366188605Srrs { "evict_l2_eligible", KSTAT_DATA_UINT64 }, 367188605Srrs { "evict_l2_ineligible", KSTAT_DATA_UINT64 }, 368188605Srrs { "hash_elements", KSTAT_DATA_UINT64 }, 369188605Srrs { "hash_elements_max", KSTAT_DATA_UINT64 }, 370188605Srrs { "hash_collisions", KSTAT_DATA_UINT64 }, 371188605Srrs { "hash_chains", KSTAT_DATA_UINT64 }, 372188605Srrs { "hash_chain_max", KSTAT_DATA_UINT64 }, 373188605Srrs { "p", KSTAT_DATA_UINT64 }, 374188605Srrs { "c", KSTAT_DATA_UINT64 }, 375188605Srrs { "c_min", KSTAT_DATA_UINT64 }, 376188605Srrs { "c_max", KSTAT_DATA_UINT64 }, 377188605Srrs { "size", KSTAT_DATA_UINT64 }, 378188605Srrs { "hdr_size", KSTAT_DATA_UINT64 }, 379188605Srrs { "data_size", KSTAT_DATA_UINT64 }, 380188605Srrs { "other_size", KSTAT_DATA_UINT64 }, 381188605Srrs { "l2_hits", KSTAT_DATA_UINT64 }, 382188605Srrs { "l2_misses", KSTAT_DATA_UINT64 }, 383188605Srrs { "l2_feeds", KSTAT_DATA_UINT64 }, 384188605Srrs { "l2_rw_clash", KSTAT_DATA_UINT64 }, 385188605Srrs { "l2_read_bytes", KSTAT_DATA_UINT64 }, 386188605Srrs { "l2_write_bytes", KSTAT_DATA_UINT64 }, 387188605Srrs { "l2_writes_sent", KSTAT_DATA_UINT64 }, 388188605Srrs { "l2_writes_done", KSTAT_DATA_UINT64 }, 389188605Srrs { "l2_writes_error", KSTAT_DATA_UINT64 }, 390188605Srrs { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 }, 391188605Srrs { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, 392188605Srrs { "l2_evict_reading", KSTAT_DATA_UINT64 }, 393188605Srrs { "l2_free_on_write", KSTAT_DATA_UINT64 }, 394188605Srrs { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, 395188605Srrs { "l2_cksum_bad", KSTAT_DATA_UINT64 }, 396188605Srrs { "l2_io_error", KSTAT_DATA_UINT64 }, 397188605Srrs { "l2_size", KSTAT_DATA_UINT64 }, 398188605Srrs { "l2_hdr_size", KSTAT_DATA_UINT64 }, 399188605Srrs { "l2_write_trylock_fail", KSTAT_DATA_UINT64 }, 400188605Srrs { "l2_write_passed_headroom", KSTAT_DATA_UINT64 }, 401188605Srrs { "l2_write_spa_mismatch", KSTAT_DATA_UINT64 }, 402188605Srrs { "l2_write_in_l2", KSTAT_DATA_UINT64 }, 403188605Srrs { "l2_write_io_in_progress", KSTAT_DATA_UINT64 }, 404188605Srrs { "l2_write_not_cacheable", KSTAT_DATA_UINT64 }, 405188605Srrs { "l2_write_full", KSTAT_DATA_UINT64 }, 406188605Srrs { "l2_write_buffer_iter", KSTAT_DATA_UINT64 }, 407188605Srrs { "l2_write_pios", KSTAT_DATA_UINT64 }, 408188605Srrs { "l2_write_buffer_bytes_scanned", KSTAT_DATA_UINT64 }, 409188605Srrs { "l2_write_buffer_list_iter", KSTAT_DATA_UINT64 }, 410188605Srrs { "l2_write_buffer_list_null_iter", KSTAT_DATA_UINT64 }, 411233517Smarius { "memory_throttle_count", KSTAT_DATA_UINT64 }, 412188605Srrs { "duplicate_buffers", KSTAT_DATA_UINT64 }, 413188605Srrs { "duplicate_buffers_size", KSTAT_DATA_UINT64 }, 414188605Srrs { "duplicate_reads", KSTAT_DATA_UINT64 } 415188605Srrs}; 416188605Srrs 417188605Srrs#define ARCSTAT(stat) (arc_stats.stat.value.ui64) 418188605Srrs 419188605Srrs#define ARCSTAT_INCR(stat, val) \ 420188605Srrs atomic_add_64(&arc_stats.stat.value.ui64, (val)); 421188605Srrs 422188605Srrs#define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 423188605Srrs#define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 424188605Srrs 425188605Srrs#define ARCSTAT_MAX(stat, val) { \ 426188605Srrs uint64_t m; \ 427188605Srrs while ((val) > (m = arc_stats.stat.value.ui64) && \ 428188605Srrs (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 429188605Srrs continue; \ 430188605Srrs} 431188605Srrs 432188605Srrs#define ARCSTAT_MAXSTAT(stat) \ 433188605Srrs ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 434188605Srrs 435188605Srrs/* 436188605Srrs * We define a macro to allow ARC hits/misses to be easily broken down by 437188605Srrs * two separate conditions, giving a total of four different subtypes for 438188605Srrs * each of hits and misses (so eight statistics total). 439188605Srrs */ 440188605Srrs#define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 441188605Srrs if (cond1) { \ 442188605Srrs if (cond2) { \ 443188605Srrs ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 444188605Srrs } else { \ 445188605Srrs ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 446188605Srrs } \ 447188605Srrs } else { \ 448188605Srrs if (cond2) { \ 449188605Srrs ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 450188605Srrs } else { \ 451188605Srrs ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 452188605Srrs } \ 453188605Srrs } 454188605Srrs 455188605Srrskstat_t *arc_ksp; 456188605Srrsstatic arc_state_t *arc_anon; 457188605Srrsstatic arc_state_t *arc_mru; 458188605Srrsstatic arc_state_t *arc_mru_ghost; 459188605Srrsstatic arc_state_t *arc_mfu; 460188605Srrsstatic arc_state_t *arc_mfu_ghost; 461188605Srrsstatic arc_state_t *arc_l2c_only; 462188605Srrs 463188605Srrs/* 464188605Srrs * There are several ARC variables that are critical to export as kstats -- 465188605Srrs * but we don't want to have to grovel around in the kstat whenever we wish to 466188605Srrs * manipulate them. For these variables, we therefore define them to be in 467233517Smarius * terms of the statistic variable. This assures that we are not introducing 468188605Srrs * the possibility of inconsistency by having shadow copies of the variables, 469188605Srrs * while still allowing the code to be readable. 470188605Srrs */ 471188605Srrs#define arc_size ARCSTAT(arcstat_size) /* actual total arc size */ 472188605Srrs#define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 473188605Srrs#define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 474188605Srrs#define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 475188605Srrs#define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 476188605Srrs 477188605Srrsstatic int arc_no_grow; /* Don't try to grow cache size */ 478188605Srrsstatic uint64_t arc_tempreserve; 479188605Srrsstatic uint64_t arc_loaned_bytes; 480188605Srrsstatic uint64_t arc_meta_used; 481188605Srrsstatic uint64_t arc_meta_limit; 482188605Srrsstatic uint64_t arc_meta_max = 0; 483188605SrrsSYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_meta_used, CTLFLAG_RD, &arc_meta_used, 0, 484188605Srrs "ARC metadata used"); 485188605SrrsSYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_meta_limit, CTLFLAG_RW, &arc_meta_limit, 0, 486188605Srrs "ARC metadata limit"); 487188605Srrs 488188605Srrstypedef struct l2arc_buf_hdr l2arc_buf_hdr_t; 489188605Srrs 490188605Srrstypedef struct arc_callback arc_callback_t; 491188605Srrs 492188605Srrsstruct arc_callback { 493188605Srrs void *acb_private; 494188605Srrs arc_done_func_t *acb_done; 495188605Srrs arc_buf_t *acb_buf; 496188605Srrs zio_t *acb_zio_dummy; 497188605Srrs arc_callback_t *acb_next; 498188605Srrs}; 499188605Srrs 500188605Srrstypedef struct arc_write_callback arc_write_callback_t; 501188605Srrs 502188605Srrsstruct arc_write_callback { 503188605Srrs void *awcb_private; 504188605Srrs arc_done_func_t *awcb_ready; 505188605Srrs arc_done_func_t *awcb_done; 506188605Srrs arc_buf_t *awcb_buf; 507188605Srrs}; 508188605Srrs 509188605Srrsstruct arc_buf_hdr { 510188605Srrs /* protected by hash lock */ 511188605Srrs dva_t b_dva; 512188605Srrs uint64_t b_birth; 513188605Srrs uint64_t b_cksum0; 514188605Srrs 515188605Srrs kmutex_t b_freeze_lock; 516188605Srrs zio_cksum_t *b_freeze_cksum; 517188605Srrs void *b_thawed; 518188605Srrs 519188605Srrs arc_buf_hdr_t *b_hash_next; 520188605Srrs arc_buf_t *b_buf; 521188605Srrs uint32_t b_flags; 522188605Srrs uint32_t b_datacnt; 523233517Smarius 524188605Srrs arc_callback_t *b_acb; 525188605Srrs kcondvar_t b_cv; 526188605Srrs 527188605Srrs /* immutable */ 528188605Srrs arc_buf_contents_t b_type; 529188605Srrs uint64_t b_size; 530188605Srrs uint64_t b_spa; 531188605Srrs 532188605Srrs /* protected by arc state mutex */ 533188605Srrs arc_state_t *b_state; 534188605Srrs list_node_t b_arc_node; 535188605Srrs 536188605Srrs /* updated atomically */ 537188605Srrs clock_t b_arc_access; 538188605Srrs 539188605Srrs /* self protecting */ 540188605Srrs refcount_t b_refcnt; 541188605Srrs 542188605Srrs l2arc_buf_hdr_t *b_l2hdr; 543188605Srrs list_node_t b_l2node; 544188605Srrs}; 545188605Srrs 546188605Srrsstatic arc_buf_t *arc_eviction_list; 547188605Srrsstatic kmutex_t arc_eviction_mtx; 548188605Srrsstatic arc_buf_hdr_t arc_eviction_hdr; 549188605Srrsstatic void arc_get_data_buf(arc_buf_t *buf); 550188605Srrsstatic void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock); 551188605Srrsstatic int arc_evict_needed(arc_buf_contents_t type); 552188605Srrsstatic void arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes); 553188605Srrs#ifdef illumos 554188605Srrsstatic void arc_buf_watch(arc_buf_t *buf); 555188605Srrs#endif /* illumos */ 556188605Srrs 557188605Srrsstatic boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab); 558188605Srrs 559188605Srrs#define GHOST_STATE(state) \ 560188605Srrs ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ 561188605Srrs (state) == arc_l2c_only) 562188605Srrs 563188605Srrs/* 564188605Srrs * Private ARC flags. These flags are private ARC only flags that will show up 565188605Srrs * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can 566188605Srrs * be passed in as arc_flags in things like arc_read. However, these flags 567188605Srrs * should never be passed and should only be set by ARC code. When adding new 568188605Srrs * public flags, make sure not to smash the private ones. 569188605Srrs */ 570188605Srrs 571188605Srrs#define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */ 572188605Srrs#define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */ 573188605Srrs#define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */ 574188605Srrs#define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */ 575188605Srrs#define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */ 576188605Srrs#define ARC_INDIRECT (1 << 14) /* this is an indirect block */ 577188605Srrs#define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */ 578188605Srrs#define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */ 579233517Smarius#define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */ 580188605Srrs#define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */ 581188605Srrs 582188605Srrs#define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE) 583188605Srrs#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS) 584188605Srrs#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR) 585188605Srrs#define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_PREFETCH) 586188605Srrs#define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ) 587188605Srrs#define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE) 588188605Srrs#define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS) 589188605Srrs#define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE) 590188605Srrs#define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \ 591188605Srrs (hdr)->b_l2hdr != NULL) 592188605Srrs#define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING) 593188605Srrs#define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED) 594188605Srrs#define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD) 595188605Srrs 596188605Srrs/* 597188605Srrs * Other sizes 598188605Srrs */ 599188605Srrs 600188605Srrs#define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) 601188605Srrs#define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t)) 602188605Srrs 603188605Srrs/* 604188605Srrs * Hash table routines 605188605Srrs */ 606188605Srrs 607188605Srrs#define HT_LOCK_PAD CACHE_LINE_SIZE 608188605Srrs 609188605Srrsstruct ht_lock { 610188605Srrs kmutex_t ht_lock; 611188605Srrs#ifdef _KERNEL 612188605Srrs unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 613188605Srrs#endif 614188605Srrs}; 615188605Srrs 616188605Srrs#define BUF_LOCKS 256 617188605Srrstypedef struct buf_hash_table { 618188605Srrs uint64_t ht_mask; 619188605Srrs arc_buf_hdr_t **ht_table; 620188605Srrs struct ht_lock ht_locks[BUF_LOCKS] __aligned(CACHE_LINE_SIZE); 621188605Srrs} buf_hash_table_t; 622188605Srrs 623188605Srrsstatic buf_hash_table_t buf_hash_table; 624188605Srrs 625188605Srrs#define BUF_HASH_INDEX(spa, dva, birth) \ 626188605Srrs (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 627188605Srrs#define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 628188605Srrs#define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 629188605Srrs#define HDR_LOCK(hdr) \ 630188605Srrs (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth))) 631188605Srrs 632188605Srrsuint64_t zfs_crc64_table[256]; 633188605Srrs 634188605Srrs/* 635233517Smarius * Level 2 ARC 636188605Srrs */ 637188605Srrs 638188605Srrs#define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ 639188605Srrs#define L2ARC_HEADROOM 2 /* num of writes */ 640188605Srrs#define L2ARC_FEED_SECS 1 /* caching interval secs */ 641188605Srrs#define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */ 642188605Srrs 643188605Srrs#define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) 644188605Srrs#define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) 645188605Srrs 646188605Srrs/* 647188605Srrs * L2ARC Performance Tunables 648188605Srrs */ 649188605Srrsuint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ 650188605Srrsuint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */ 651188605Srrsuint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ 652188605Srrsuint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ 653188605Srrsuint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */ 654188605Srrsboolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ 655188605Srrsboolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */ 656188605Srrsboolean_t l2arc_norw = B_TRUE; /* no reads during writes */ 657188605Srrs 658188605SrrsSYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max, CTLFLAG_RW, 659188605Srrs &l2arc_write_max, 0, "max write size"); 660188605SrrsSYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_boost, CTLFLAG_RW, 661188605Srrs &l2arc_write_boost, 0, "extra write during warmup"); 662188605SrrsSYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom, CTLFLAG_RW, 663188605Srrs &l2arc_headroom, 0, "number of dev writes"); 664188605SrrsSYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_secs, CTLFLAG_RW, 665188605Srrs &l2arc_feed_secs, 0, "interval seconds"); 666188605SrrsSYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_min_ms, CTLFLAG_RW, 667188605Srrs &l2arc_feed_min_ms, 0, "min interval milliseconds"); 668188605Srrs 669188605SrrsSYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_noprefetch, CTLFLAG_RW, 670188605Srrs &l2arc_noprefetch, 0, "don't cache prefetch bufs"); 671188605SrrsSYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_feed_again, CTLFLAG_RW, 672188605Srrs &l2arc_feed_again, 0, "turbo warmup"); 673188605SrrsSYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw, CTLFLAG_RW, 674188605Srrs &l2arc_norw, 0, "no reads during writes"); 675188605Srrs 676188605SrrsSYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_size, CTLFLAG_RD, 677188605Srrs &ARC_anon.arcs_size, 0, "size of anonymous state"); 678188605SrrsSYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_metadata_lsize, CTLFLAG_RD, 679188605Srrs &ARC_anon.arcs_lsize[ARC_BUFC_METADATA], 0, "size of anonymous state"); 680188605SrrsSYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_data_lsize, CTLFLAG_RD, 681188605Srrs &ARC_anon.arcs_lsize[ARC_BUFC_DATA], 0, "size of anonymous state"); 682188605Srrs 683188605SrrsSYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_size, CTLFLAG_RD, 684188605Srrs &ARC_mru.arcs_size, 0, "size of mru state"); 685188605SrrsSYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_metadata_lsize, CTLFLAG_RD, 686188605Srrs &ARC_mru.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mru state"); 687188605SrrsSYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_data_lsize, CTLFLAG_RD, 688188605Srrs &ARC_mru.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mru state"); 689188605Srrs 690188605SrrsSYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_size, CTLFLAG_RD, 691188605Srrs &ARC_mru_ghost.arcs_size, 0, "size of mru ghost state"); 692188605SrrsSYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_metadata_lsize, CTLFLAG_RD, 693188605Srrs &ARC_mru_ghost.arcs_lsize[ARC_BUFC_METADATA], 0, 694188605Srrs "size of metadata in mru ghost state"); 695188605SrrsSYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_data_lsize, CTLFLAG_RD, 696188605Srrs &ARC_mru_ghost.arcs_lsize[ARC_BUFC_DATA], 0, 697188605Srrs "size of data in mru ghost state"); 698188605Srrs 699188605SrrsSYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_size, CTLFLAG_RD, 700188605Srrs &ARC_mfu.arcs_size, 0, "size of mfu state"); 701188605SrrsSYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_metadata_lsize, CTLFLAG_RD, 702188605Srrs &ARC_mfu.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mfu state"); 703188605SrrsSYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_data_lsize, CTLFLAG_RD, 704188605Srrs &ARC_mfu.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mfu state"); 705188605Srrs 706188605SrrsSYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_size, CTLFLAG_RD, 707188605Srrs &ARC_mfu_ghost.arcs_size, 0, "size of mfu ghost state"); 708188605SrrsSYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_metadata_lsize, CTLFLAG_RD, 709188605Srrs &ARC_mfu_ghost.arcs_lsize[ARC_BUFC_METADATA], 0, 710188605Srrs "size of metadata in mfu ghost state"); 711188605SrrsSYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_data_lsize, CTLFLAG_RD, 712188605Srrs &ARC_mfu_ghost.arcs_lsize[ARC_BUFC_DATA], 0, 713188605Srrs "size of data in mfu ghost state"); 714188605Srrs 715188605SrrsSYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2c_only_size, CTLFLAG_RD, 716188605Srrs &ARC_l2c_only.arcs_size, 0, "size of mru state"); 717188605Srrs 718188605Srrs/* 719188605Srrs * L2ARC Internals 720188605Srrs */ 721188605Srrstypedef struct l2arc_dev { 722188605Srrs vdev_t *l2ad_vdev; /* vdev */ 723188605Srrs spa_t *l2ad_spa; /* spa */ 724188605Srrs uint64_t l2ad_hand; /* next write location */ 725188605Srrs uint64_t l2ad_write; /* desired write size, bytes */ 726188605Srrs uint64_t l2ad_boost; /* warmup write boost, bytes */ 727188605Srrs uint64_t l2ad_start; /* first addr on device */ 728188605Srrs uint64_t l2ad_end; /* last addr on device */ 729188605Srrs uint64_t l2ad_evict; /* last addr eviction reached */ 730188605Srrs boolean_t l2ad_first; /* first sweep through */ 731188605Srrs boolean_t l2ad_writing; /* currently writing */ 732188605Srrs list_t *l2ad_buflist; /* buffer list */ 733188605Srrs list_node_t l2ad_node; /* device list node */ 734188605Srrs} l2arc_dev_t; 735188605Srrs 736188605Srrsstatic list_t L2ARC_dev_list; /* device list */ 737188605Srrsstatic list_t *l2arc_dev_list; /* device list pointer */ 738188605Srrsstatic kmutex_t l2arc_dev_mtx; /* device list mutex */ 739188605Srrsstatic l2arc_dev_t *l2arc_dev_last; /* last device used */ 740188605Srrsstatic kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */ 741188605Srrsstatic list_t L2ARC_free_on_write; /* free after write buf list */ 742188605Srrsstatic list_t *l2arc_free_on_write; /* free after write list ptr */ 743188605Srrsstatic kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ 744188605Srrsstatic uint64_t l2arc_ndev; /* number of devices */ 745188605Srrs 746188605Srrstypedef struct l2arc_read_callback { 747188605Srrs arc_buf_t *l2rcb_buf; /* read buffer */ 748188605Srrs spa_t *l2rcb_spa; /* spa */ 749188605Srrs blkptr_t l2rcb_bp; /* original blkptr */ 750188605Srrs zbookmark_t l2rcb_zb; /* original bookmark */ 751188605Srrs int l2rcb_flags; /* original flags */ 752188605Srrs} l2arc_read_callback_t; 753188605Srrs 754188605Srrstypedef struct l2arc_write_callback { 755188605Srrs l2arc_dev_t *l2wcb_dev; /* device info */ 756188605Srrs arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ 757188605Srrs} l2arc_write_callback_t; 758 759struct l2arc_buf_hdr { 760 /* protected by arc_buf_hdr mutex */ 761 l2arc_dev_t *b_dev; /* L2ARC device */ 762 uint64_t b_daddr; /* disk address, offset byte */ 763}; 764 765typedef struct l2arc_data_free { 766 /* protected by l2arc_free_on_write_mtx */ 767 void *l2df_data; 768 size_t l2df_size; 769 void (*l2df_func)(void *, size_t); 770 list_node_t l2df_list_node; 771} l2arc_data_free_t; 772 773static kmutex_t l2arc_feed_thr_lock; 774static kcondvar_t l2arc_feed_thr_cv; 775static uint8_t l2arc_thread_exit; 776 777static void l2arc_read_done(zio_t *zio); 778static void l2arc_hdr_stat_add(void); 779static void l2arc_hdr_stat_remove(void); 780 781static uint64_t 782buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth) 783{ 784 uint8_t *vdva = (uint8_t *)dva; 785 uint64_t crc = -1ULL; 786 int i; 787 788 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY); 789 790 for (i = 0; i < sizeof (dva_t); i++) 791 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF]; 792 793 crc ^= (spa>>8) ^ birth; 794 795 return (crc); 796} 797 798#define BUF_EMPTY(buf) \ 799 ((buf)->b_dva.dva_word[0] == 0 && \ 800 (buf)->b_dva.dva_word[1] == 0 && \ 801 (buf)->b_birth == 0) 802 803#define BUF_EQUAL(spa, dva, birth, buf) \ 804 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 805 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 806 ((buf)->b_birth == birth) && ((buf)->b_spa == spa) 807 808static void 809buf_discard_identity(arc_buf_hdr_t *hdr) 810{ 811 hdr->b_dva.dva_word[0] = 0; 812 hdr->b_dva.dva_word[1] = 0; 813 hdr->b_birth = 0; 814 hdr->b_cksum0 = 0; 815} 816 817static arc_buf_hdr_t * 818buf_hash_find(uint64_t spa, const dva_t *dva, uint64_t birth, kmutex_t **lockp) 819{ 820 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 821 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 822 arc_buf_hdr_t *buf; 823 824 mutex_enter(hash_lock); 825 for (buf = buf_hash_table.ht_table[idx]; buf != NULL; 826 buf = buf->b_hash_next) { 827 if (BUF_EQUAL(spa, dva, birth, buf)) { 828 *lockp = hash_lock; 829 return (buf); 830 } 831 } 832 mutex_exit(hash_lock); 833 *lockp = NULL; 834 return (NULL); 835} 836 837/* 838 * Insert an entry into the hash table. If there is already an element 839 * equal to elem in the hash table, then the already existing element 840 * will be returned and the new element will not be inserted. 841 * Otherwise returns NULL. 842 */ 843static arc_buf_hdr_t * 844buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp) 845{ 846 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 847 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 848 arc_buf_hdr_t *fbuf; 849 uint32_t i; 850 851 ASSERT(!HDR_IN_HASH_TABLE(buf)); 852 *lockp = hash_lock; 853 mutex_enter(hash_lock); 854 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL; 855 fbuf = fbuf->b_hash_next, i++) { 856 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf)) 857 return (fbuf); 858 } 859 860 buf->b_hash_next = buf_hash_table.ht_table[idx]; 861 buf_hash_table.ht_table[idx] = buf; 862 buf->b_flags |= ARC_IN_HASH_TABLE; 863 864 /* collect some hash table performance data */ 865 if (i > 0) { 866 ARCSTAT_BUMP(arcstat_hash_collisions); 867 if (i == 1) 868 ARCSTAT_BUMP(arcstat_hash_chains); 869 870 ARCSTAT_MAX(arcstat_hash_chain_max, i); 871 } 872 873 ARCSTAT_BUMP(arcstat_hash_elements); 874 ARCSTAT_MAXSTAT(arcstat_hash_elements); 875 876 return (NULL); 877} 878 879static void 880buf_hash_remove(arc_buf_hdr_t *buf) 881{ 882 arc_buf_hdr_t *fbuf, **bufp; 883 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth); 884 885 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 886 ASSERT(HDR_IN_HASH_TABLE(buf)); 887 888 bufp = &buf_hash_table.ht_table[idx]; 889 while ((fbuf = *bufp) != buf) { 890 ASSERT(fbuf != NULL); 891 bufp = &fbuf->b_hash_next; 892 } 893 *bufp = buf->b_hash_next; 894 buf->b_hash_next = NULL; 895 buf->b_flags &= ~ARC_IN_HASH_TABLE; 896 897 /* collect some hash table performance data */ 898 ARCSTAT_BUMPDOWN(arcstat_hash_elements); 899 900 if (buf_hash_table.ht_table[idx] && 901 buf_hash_table.ht_table[idx]->b_hash_next == NULL) 902 ARCSTAT_BUMPDOWN(arcstat_hash_chains); 903} 904 905/* 906 * Global data structures and functions for the buf kmem cache. 907 */ 908static kmem_cache_t *hdr_cache; 909static kmem_cache_t *buf_cache; 910 911static void 912buf_fini(void) 913{ 914 int i; 915 916 kmem_free(buf_hash_table.ht_table, 917 (buf_hash_table.ht_mask + 1) * sizeof (void *)); 918 for (i = 0; i < BUF_LOCKS; i++) 919 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 920 kmem_cache_destroy(hdr_cache); 921 kmem_cache_destroy(buf_cache); 922} 923 924/* 925 * Constructor callback - called when the cache is empty 926 * and a new buf is requested. 927 */ 928/* ARGSUSED */ 929static int 930hdr_cons(void *vbuf, void *unused, int kmflag) 931{ 932 arc_buf_hdr_t *buf = vbuf; 933 934 bzero(buf, sizeof (arc_buf_hdr_t)); 935 refcount_create(&buf->b_refcnt); 936 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL); 937 mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 938 arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS); 939 940 return (0); 941} 942 943/* ARGSUSED */ 944static int 945buf_cons(void *vbuf, void *unused, int kmflag) 946{ 947 arc_buf_t *buf = vbuf; 948 949 bzero(buf, sizeof (arc_buf_t)); 950 mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL); 951 arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS); 952 953 return (0); 954} 955 956/* 957 * Destructor callback - called when a cached buf is 958 * no longer required. 959 */ 960/* ARGSUSED */ 961static void 962hdr_dest(void *vbuf, void *unused) 963{ 964 arc_buf_hdr_t *buf = vbuf; 965 966 ASSERT(BUF_EMPTY(buf)); 967 refcount_destroy(&buf->b_refcnt); 968 cv_destroy(&buf->b_cv); 969 mutex_destroy(&buf->b_freeze_lock); 970 arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS); 971} 972 973/* ARGSUSED */ 974static void 975buf_dest(void *vbuf, void *unused) 976{ 977 arc_buf_t *buf = vbuf; 978 979 mutex_destroy(&buf->b_evict_lock); 980 arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS); 981} 982 983/* 984 * Reclaim callback -- invoked when memory is low. 985 */ 986/* ARGSUSED */ 987static void 988hdr_recl(void *unused) 989{ 990 dprintf("hdr_recl called\n"); 991 /* 992 * umem calls the reclaim func when we destroy the buf cache, 993 * which is after we do arc_fini(). 994 */ 995 if (!arc_dead) 996 cv_signal(&arc_reclaim_thr_cv); 997} 998 999static void 1000buf_init(void) 1001{ 1002 uint64_t *ct; 1003 uint64_t hsize = 1ULL << 12; 1004 int i, j; 1005 1006 /* 1007 * The hash table is big enough to fill all of physical memory 1008 * with an average 64K block size. The table will take up 1009 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers). 1010 */ 1011 while (hsize * 65536 < (uint64_t)physmem * PAGESIZE) 1012 hsize <<= 1; 1013retry: 1014 buf_hash_table.ht_mask = hsize - 1; 1015 buf_hash_table.ht_table = 1016 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 1017 if (buf_hash_table.ht_table == NULL) { 1018 ASSERT(hsize > (1ULL << 8)); 1019 hsize >>= 1; 1020 goto retry; 1021 } 1022 1023 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t), 1024 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0); 1025 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 1026 0, buf_cons, buf_dest, NULL, NULL, NULL, 0); 1027 1028 for (i = 0; i < 256; i++) 1029 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 1030 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 1031 1032 for (i = 0; i < BUF_LOCKS; i++) { 1033 mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 1034 NULL, MUTEX_DEFAULT, NULL); 1035 } 1036} 1037 1038#define ARC_MINTIME (hz>>4) /* 62 ms */ 1039 1040static void 1041arc_cksum_verify(arc_buf_t *buf) 1042{ 1043 zio_cksum_t zc; 1044 1045 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 1046 return; 1047 1048 mutex_enter(&buf->b_hdr->b_freeze_lock); 1049 if (buf->b_hdr->b_freeze_cksum == NULL || 1050 (buf->b_hdr->b_flags & ARC_IO_ERROR)) { 1051 mutex_exit(&buf->b_hdr->b_freeze_lock); 1052 return; 1053 } 1054 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 1055 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc)) 1056 panic("buffer modified while frozen!"); 1057 mutex_exit(&buf->b_hdr->b_freeze_lock); 1058} 1059 1060static int 1061arc_cksum_equal(arc_buf_t *buf) 1062{ 1063 zio_cksum_t zc; 1064 int equal; 1065 1066 mutex_enter(&buf->b_hdr->b_freeze_lock); 1067 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc); 1068 equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc); 1069 mutex_exit(&buf->b_hdr->b_freeze_lock); 1070 1071 return (equal); 1072} 1073 1074static void 1075arc_cksum_compute(arc_buf_t *buf, boolean_t force) 1076{ 1077 if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY)) 1078 return; 1079 1080 mutex_enter(&buf->b_hdr->b_freeze_lock); 1081 if (buf->b_hdr->b_freeze_cksum != NULL) { 1082 mutex_exit(&buf->b_hdr->b_freeze_lock); 1083 return; 1084 } 1085 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP); 1086 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, 1087 buf->b_hdr->b_freeze_cksum); 1088 mutex_exit(&buf->b_hdr->b_freeze_lock); 1089#ifdef illumos 1090 arc_buf_watch(buf); 1091#endif /* illumos */ 1092} 1093 1094#ifdef illumos 1095#ifndef _KERNEL 1096typedef struct procctl { 1097 long cmd; 1098 prwatch_t prwatch; 1099} procctl_t; 1100#endif 1101 1102/* ARGSUSED */ 1103static void 1104arc_buf_unwatch(arc_buf_t *buf) 1105{ 1106#ifndef _KERNEL 1107 if (arc_watch) { 1108 int result; 1109 procctl_t ctl; 1110 ctl.cmd = PCWATCH; 1111 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data; 1112 ctl.prwatch.pr_size = 0; 1113 ctl.prwatch.pr_wflags = 0; 1114 result = write(arc_procfd, &ctl, sizeof (ctl)); 1115 ASSERT3U(result, ==, sizeof (ctl)); 1116 } 1117#endif 1118} 1119 1120/* ARGSUSED */ 1121static void 1122arc_buf_watch(arc_buf_t *buf) 1123{ 1124#ifndef _KERNEL 1125 if (arc_watch) { 1126 int result; 1127 procctl_t ctl; 1128 ctl.cmd = PCWATCH; 1129 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data; 1130 ctl.prwatch.pr_size = buf->b_hdr->b_size; 1131 ctl.prwatch.pr_wflags = WA_WRITE; 1132 result = write(arc_procfd, &ctl, sizeof (ctl)); 1133 ASSERT3U(result, ==, sizeof (ctl)); 1134 } 1135#endif 1136} 1137#endif /* illumos */ 1138 1139void 1140arc_buf_thaw(arc_buf_t *buf) 1141{ 1142 if (zfs_flags & ZFS_DEBUG_MODIFY) { 1143 if (buf->b_hdr->b_state != arc_anon) 1144 panic("modifying non-anon buffer!"); 1145 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS) 1146 panic("modifying buffer while i/o in progress!"); 1147 arc_cksum_verify(buf); 1148 } 1149 1150 mutex_enter(&buf->b_hdr->b_freeze_lock); 1151 if (buf->b_hdr->b_freeze_cksum != NULL) { 1152 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1153 buf->b_hdr->b_freeze_cksum = NULL; 1154 } 1155 1156 if (zfs_flags & ZFS_DEBUG_MODIFY) { 1157 if (buf->b_hdr->b_thawed) 1158 kmem_free(buf->b_hdr->b_thawed, 1); 1159 buf->b_hdr->b_thawed = kmem_alloc(1, KM_SLEEP); 1160 } 1161 1162 mutex_exit(&buf->b_hdr->b_freeze_lock); 1163 1164#ifdef illumos 1165 arc_buf_unwatch(buf); 1166#endif /* illumos */ 1167} 1168 1169void 1170arc_buf_freeze(arc_buf_t *buf) 1171{ 1172 kmutex_t *hash_lock; 1173 1174 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 1175 return; 1176 1177 hash_lock = HDR_LOCK(buf->b_hdr); 1178 mutex_enter(hash_lock); 1179 1180 ASSERT(buf->b_hdr->b_freeze_cksum != NULL || 1181 buf->b_hdr->b_state == arc_anon); 1182 arc_cksum_compute(buf, B_FALSE); 1183 mutex_exit(hash_lock); 1184 1185} 1186 1187static void 1188get_buf_info(arc_buf_hdr_t *ab, arc_state_t *state, list_t **list, kmutex_t **lock) 1189{ 1190 uint64_t buf_hashid = buf_hash(ab->b_spa, &ab->b_dva, ab->b_birth); 1191 1192 if (ab->b_type == ARC_BUFC_METADATA) 1193 buf_hashid &= (ARC_BUFC_NUMMETADATALISTS - 1); 1194 else { 1195 buf_hashid &= (ARC_BUFC_NUMDATALISTS - 1); 1196 buf_hashid += ARC_BUFC_NUMMETADATALISTS; 1197 } 1198 1199 *list = &state->arcs_lists[buf_hashid]; 1200 *lock = ARCS_LOCK(state, buf_hashid); 1201} 1202 1203 1204static void 1205add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 1206{ 1207 ASSERT(MUTEX_HELD(hash_lock)); 1208 1209 if ((refcount_add(&ab->b_refcnt, tag) == 1) && 1210 (ab->b_state != arc_anon)) { 1211 uint64_t delta = ab->b_size * ab->b_datacnt; 1212 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type]; 1213 list_t *list; 1214 kmutex_t *lock; 1215 1216 get_buf_info(ab, ab->b_state, &list, &lock); 1217 ASSERT(!MUTEX_HELD(lock)); 1218 mutex_enter(lock); 1219 ASSERT(list_link_active(&ab->b_arc_node)); 1220 list_remove(list, ab); 1221 if (GHOST_STATE(ab->b_state)) { 1222 ASSERT0(ab->b_datacnt); 1223 ASSERT3P(ab->b_buf, ==, NULL); 1224 delta = ab->b_size; 1225 } 1226 ASSERT(delta > 0); 1227 ASSERT3U(*size, >=, delta); 1228 atomic_add_64(size, -delta); 1229 mutex_exit(lock); 1230 /* remove the prefetch flag if we get a reference */ 1231 if (ab->b_flags & ARC_PREFETCH) 1232 ab->b_flags &= ~ARC_PREFETCH; 1233 } 1234} 1235 1236static int 1237remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag) 1238{ 1239 int cnt; 1240 arc_state_t *state = ab->b_state; 1241 1242 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 1243 ASSERT(!GHOST_STATE(state)); 1244 1245 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) && 1246 (state != arc_anon)) { 1247 uint64_t *size = &state->arcs_lsize[ab->b_type]; 1248 list_t *list; 1249 kmutex_t *lock; 1250 1251 get_buf_info(ab, state, &list, &lock); 1252 ASSERT(!MUTEX_HELD(lock)); 1253 mutex_enter(lock); 1254 ASSERT(!list_link_active(&ab->b_arc_node)); 1255 list_insert_head(list, ab); 1256 ASSERT(ab->b_datacnt > 0); 1257 atomic_add_64(size, ab->b_size * ab->b_datacnt); 1258 mutex_exit(lock); 1259 } 1260 return (cnt); 1261} 1262 1263/* 1264 * Move the supplied buffer to the indicated state. The mutex 1265 * for the buffer must be held by the caller. 1266 */ 1267static void 1268arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock) 1269{ 1270 arc_state_t *old_state = ab->b_state; 1271 int64_t refcnt = refcount_count(&ab->b_refcnt); 1272 uint64_t from_delta, to_delta; 1273 list_t *list; 1274 kmutex_t *lock; 1275 1276 ASSERT(MUTEX_HELD(hash_lock)); 1277 ASSERT(new_state != old_state); 1278 ASSERT(refcnt == 0 || ab->b_datacnt > 0); 1279 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state)); 1280 ASSERT(ab->b_datacnt <= 1 || old_state != arc_anon); 1281 1282 from_delta = to_delta = ab->b_datacnt * ab->b_size; 1283 1284 /* 1285 * If this buffer is evictable, transfer it from the 1286 * old state list to the new state list. 1287 */ 1288 if (refcnt == 0) { 1289 if (old_state != arc_anon) { 1290 int use_mutex; 1291 uint64_t *size = &old_state->arcs_lsize[ab->b_type]; 1292 1293 get_buf_info(ab, old_state, &list, &lock); 1294 use_mutex = !MUTEX_HELD(lock); 1295 if (use_mutex) 1296 mutex_enter(lock); 1297 1298 ASSERT(list_link_active(&ab->b_arc_node)); 1299 list_remove(list, ab); 1300 1301 /* 1302 * If prefetching out of the ghost cache, 1303 * we will have a non-zero datacnt. 1304 */ 1305 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) { 1306 /* ghost elements have a ghost size */ 1307 ASSERT(ab->b_buf == NULL); 1308 from_delta = ab->b_size; 1309 } 1310 ASSERT3U(*size, >=, from_delta); 1311 atomic_add_64(size, -from_delta); 1312 1313 if (use_mutex) 1314 mutex_exit(lock); 1315 } 1316 if (new_state != arc_anon) { 1317 int use_mutex; 1318 uint64_t *size = &new_state->arcs_lsize[ab->b_type]; 1319 1320 get_buf_info(ab, new_state, &list, &lock); 1321 use_mutex = !MUTEX_HELD(lock); 1322 if (use_mutex) 1323 mutex_enter(lock); 1324 1325 list_insert_head(list, ab); 1326 1327 /* ghost elements have a ghost size */ 1328 if (GHOST_STATE(new_state)) { 1329 ASSERT(ab->b_datacnt == 0); 1330 ASSERT(ab->b_buf == NULL); 1331 to_delta = ab->b_size; 1332 } 1333 atomic_add_64(size, to_delta); 1334 1335 if (use_mutex) 1336 mutex_exit(lock); 1337 } 1338 } 1339 1340 ASSERT(!BUF_EMPTY(ab)); 1341 if (new_state == arc_anon && HDR_IN_HASH_TABLE(ab)) 1342 buf_hash_remove(ab); 1343 1344 /* adjust state sizes */ 1345 if (to_delta) 1346 atomic_add_64(&new_state->arcs_size, to_delta); 1347 if (from_delta) { 1348 ASSERT3U(old_state->arcs_size, >=, from_delta); 1349 atomic_add_64(&old_state->arcs_size, -from_delta); 1350 } 1351 ab->b_state = new_state; 1352 1353 /* adjust l2arc hdr stats */ 1354 if (new_state == arc_l2c_only) 1355 l2arc_hdr_stat_add(); 1356 else if (old_state == arc_l2c_only) 1357 l2arc_hdr_stat_remove(); 1358} 1359 1360void 1361arc_space_consume(uint64_t space, arc_space_type_t type) 1362{ 1363 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 1364 1365 switch (type) { 1366 case ARC_SPACE_DATA: 1367 ARCSTAT_INCR(arcstat_data_size, space); 1368 break; 1369 case ARC_SPACE_OTHER: 1370 ARCSTAT_INCR(arcstat_other_size, space); 1371 break; 1372 case ARC_SPACE_HDRS: 1373 ARCSTAT_INCR(arcstat_hdr_size, space); 1374 break; 1375 case ARC_SPACE_L2HDRS: 1376 ARCSTAT_INCR(arcstat_l2_hdr_size, space); 1377 break; 1378 } 1379 1380 atomic_add_64(&arc_meta_used, space); 1381 atomic_add_64(&arc_size, space); 1382} 1383 1384void 1385arc_space_return(uint64_t space, arc_space_type_t type) 1386{ 1387 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 1388 1389 switch (type) { 1390 case ARC_SPACE_DATA: 1391 ARCSTAT_INCR(arcstat_data_size, -space); 1392 break; 1393 case ARC_SPACE_OTHER: 1394 ARCSTAT_INCR(arcstat_other_size, -space); 1395 break; 1396 case ARC_SPACE_HDRS: 1397 ARCSTAT_INCR(arcstat_hdr_size, -space); 1398 break; 1399 case ARC_SPACE_L2HDRS: 1400 ARCSTAT_INCR(arcstat_l2_hdr_size, -space); 1401 break; 1402 } 1403 1404 ASSERT(arc_meta_used >= space); 1405 if (arc_meta_max < arc_meta_used) 1406 arc_meta_max = arc_meta_used; 1407 atomic_add_64(&arc_meta_used, -space); 1408 ASSERT(arc_size >= space); 1409 atomic_add_64(&arc_size, -space); 1410} 1411 1412void * 1413arc_data_buf_alloc(uint64_t size) 1414{ 1415 if (arc_evict_needed(ARC_BUFC_DATA)) 1416 cv_signal(&arc_reclaim_thr_cv); 1417 atomic_add_64(&arc_size, size); 1418 return (zio_data_buf_alloc(size)); 1419} 1420 1421void 1422arc_data_buf_free(void *buf, uint64_t size) 1423{ 1424 zio_data_buf_free(buf, size); 1425 ASSERT(arc_size >= size); 1426 atomic_add_64(&arc_size, -size); 1427} 1428 1429arc_buf_t * 1430arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type) 1431{ 1432 arc_buf_hdr_t *hdr; 1433 arc_buf_t *buf; 1434 1435 ASSERT3U(size, >, 0); 1436 hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 1437 ASSERT(BUF_EMPTY(hdr)); 1438 hdr->b_size = size; 1439 hdr->b_type = type; 1440 hdr->b_spa = spa_load_guid(spa); 1441 hdr->b_state = arc_anon; 1442 hdr->b_arc_access = 0; 1443 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1444 buf->b_hdr = hdr; 1445 buf->b_data = NULL; 1446 buf->b_efunc = NULL; 1447 buf->b_private = NULL; 1448 buf->b_next = NULL; 1449 hdr->b_buf = buf; 1450 arc_get_data_buf(buf); 1451 hdr->b_datacnt = 1; 1452 hdr->b_flags = 0; 1453 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1454 (void) refcount_add(&hdr->b_refcnt, tag); 1455 1456 return (buf); 1457} 1458 1459static char *arc_onloan_tag = "onloan"; 1460 1461/* 1462 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in 1463 * flight data by arc_tempreserve_space() until they are "returned". Loaned 1464 * buffers must be returned to the arc before they can be used by the DMU or 1465 * freed. 1466 */ 1467arc_buf_t * 1468arc_loan_buf(spa_t *spa, int size) 1469{ 1470 arc_buf_t *buf; 1471 1472 buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA); 1473 1474 atomic_add_64(&arc_loaned_bytes, size); 1475 return (buf); 1476} 1477 1478/* 1479 * Return a loaned arc buffer to the arc. 1480 */ 1481void 1482arc_return_buf(arc_buf_t *buf, void *tag) 1483{ 1484 arc_buf_hdr_t *hdr = buf->b_hdr; 1485 1486 ASSERT(buf->b_data != NULL); 1487 (void) refcount_add(&hdr->b_refcnt, tag); 1488 (void) refcount_remove(&hdr->b_refcnt, arc_onloan_tag); 1489 1490 atomic_add_64(&arc_loaned_bytes, -hdr->b_size); 1491} 1492 1493/* Detach an arc_buf from a dbuf (tag) */ 1494void 1495arc_loan_inuse_buf(arc_buf_t *buf, void *tag) 1496{ 1497 arc_buf_hdr_t *hdr; 1498 1499 ASSERT(buf->b_data != NULL); 1500 hdr = buf->b_hdr; 1501 (void) refcount_add(&hdr->b_refcnt, arc_onloan_tag); 1502 (void) refcount_remove(&hdr->b_refcnt, tag); 1503 buf->b_efunc = NULL; 1504 buf->b_private = NULL; 1505 1506 atomic_add_64(&arc_loaned_bytes, hdr->b_size); 1507} 1508 1509static arc_buf_t * 1510arc_buf_clone(arc_buf_t *from) 1511{ 1512 arc_buf_t *buf; 1513 arc_buf_hdr_t *hdr = from->b_hdr; 1514 uint64_t size = hdr->b_size; 1515 1516 ASSERT(hdr->b_state != arc_anon); 1517 1518 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 1519 buf->b_hdr = hdr; 1520 buf->b_data = NULL; 1521 buf->b_efunc = NULL; 1522 buf->b_private = NULL; 1523 buf->b_next = hdr->b_buf; 1524 hdr->b_buf = buf; 1525 arc_get_data_buf(buf); 1526 bcopy(from->b_data, buf->b_data, size); 1527 1528 /* 1529 * This buffer already exists in the arc so create a duplicate 1530 * copy for the caller. If the buffer is associated with user data 1531 * then track the size and number of duplicates. These stats will be 1532 * updated as duplicate buffers are created and destroyed. 1533 */ 1534 if (hdr->b_type == ARC_BUFC_DATA) { 1535 ARCSTAT_BUMP(arcstat_duplicate_buffers); 1536 ARCSTAT_INCR(arcstat_duplicate_buffers_size, size); 1537 } 1538 hdr->b_datacnt += 1; 1539 return (buf); 1540} 1541 1542void 1543arc_buf_add_ref(arc_buf_t *buf, void* tag) 1544{ 1545 arc_buf_hdr_t *hdr; 1546 kmutex_t *hash_lock; 1547 1548 /* 1549 * Check to see if this buffer is evicted. Callers 1550 * must verify b_data != NULL to know if the add_ref 1551 * was successful. 1552 */ 1553 mutex_enter(&buf->b_evict_lock); 1554 if (buf->b_data == NULL) { 1555 mutex_exit(&buf->b_evict_lock); 1556 return; 1557 } 1558 hash_lock = HDR_LOCK(buf->b_hdr); 1559 mutex_enter(hash_lock); 1560 hdr = buf->b_hdr; 1561 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 1562 mutex_exit(&buf->b_evict_lock); 1563 1564 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 1565 add_reference(hdr, hash_lock, tag); 1566 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 1567 arc_access(hdr, hash_lock); 1568 mutex_exit(hash_lock); 1569 ARCSTAT_BUMP(arcstat_hits); 1570 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 1571 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 1572 data, metadata, hits); 1573} 1574 1575/* 1576 * Free the arc data buffer. If it is an l2arc write in progress, 1577 * the buffer is placed on l2arc_free_on_write to be freed later. 1578 */ 1579static void 1580arc_buf_data_free(arc_buf_t *buf, void (*free_func)(void *, size_t)) 1581{ 1582 arc_buf_hdr_t *hdr = buf->b_hdr; 1583 1584 if (HDR_L2_WRITING(hdr)) { 1585 l2arc_data_free_t *df; 1586 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP); 1587 df->l2df_data = buf->b_data; 1588 df->l2df_size = hdr->b_size; 1589 df->l2df_func = free_func; 1590 mutex_enter(&l2arc_free_on_write_mtx); 1591 list_insert_head(l2arc_free_on_write, df); 1592 mutex_exit(&l2arc_free_on_write_mtx); 1593 ARCSTAT_BUMP(arcstat_l2_free_on_write); 1594 } else { 1595 free_func(buf->b_data, hdr->b_size); 1596 } 1597} 1598 1599static void 1600arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all) 1601{ 1602 arc_buf_t **bufp; 1603 1604 /* free up data associated with the buf */ 1605 if (buf->b_data) { 1606 arc_state_t *state = buf->b_hdr->b_state; 1607 uint64_t size = buf->b_hdr->b_size; 1608 arc_buf_contents_t type = buf->b_hdr->b_type; 1609 1610 arc_cksum_verify(buf); 1611#ifdef illumos 1612 arc_buf_unwatch(buf); 1613#endif /* illumos */ 1614 1615 if (!recycle) { 1616 if (type == ARC_BUFC_METADATA) { 1617 arc_buf_data_free(buf, zio_buf_free); 1618 arc_space_return(size, ARC_SPACE_DATA); 1619 } else { 1620 ASSERT(type == ARC_BUFC_DATA); 1621 arc_buf_data_free(buf, zio_data_buf_free); 1622 ARCSTAT_INCR(arcstat_data_size, -size); 1623 atomic_add_64(&arc_size, -size); 1624 } 1625 } 1626 if (list_link_active(&buf->b_hdr->b_arc_node)) { 1627 uint64_t *cnt = &state->arcs_lsize[type]; 1628 1629 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt)); 1630 ASSERT(state != arc_anon); 1631 1632 ASSERT3U(*cnt, >=, size); 1633 atomic_add_64(cnt, -size); 1634 } 1635 ASSERT3U(state->arcs_size, >=, size); 1636 atomic_add_64(&state->arcs_size, -size); 1637 buf->b_data = NULL; 1638 1639 /* 1640 * If we're destroying a duplicate buffer make sure 1641 * that the appropriate statistics are updated. 1642 */ 1643 if (buf->b_hdr->b_datacnt > 1 && 1644 buf->b_hdr->b_type == ARC_BUFC_DATA) { 1645 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers); 1646 ARCSTAT_INCR(arcstat_duplicate_buffers_size, -size); 1647 } 1648 ASSERT(buf->b_hdr->b_datacnt > 0); 1649 buf->b_hdr->b_datacnt -= 1; 1650 } 1651 1652 /* only remove the buf if requested */ 1653 if (!all) 1654 return; 1655 1656 /* remove the buf from the hdr list */ 1657 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next) 1658 continue; 1659 *bufp = buf->b_next; 1660 buf->b_next = NULL; 1661 1662 ASSERT(buf->b_efunc == NULL); 1663 1664 /* clean up the buf */ 1665 buf->b_hdr = NULL; 1666 kmem_cache_free(buf_cache, buf); 1667} 1668 1669static void 1670arc_hdr_destroy(arc_buf_hdr_t *hdr) 1671{ 1672 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1673 ASSERT3P(hdr->b_state, ==, arc_anon); 1674 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 1675 l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr; 1676 1677 if (l2hdr != NULL) { 1678 boolean_t buflist_held = MUTEX_HELD(&l2arc_buflist_mtx); 1679 /* 1680 * To prevent arc_free() and l2arc_evict() from 1681 * attempting to free the same buffer at the same time, 1682 * a FREE_IN_PROGRESS flag is given to arc_free() to 1683 * give it priority. l2arc_evict() can't destroy this 1684 * header while we are waiting on l2arc_buflist_mtx. 1685 * 1686 * The hdr may be removed from l2ad_buflist before we 1687 * grab l2arc_buflist_mtx, so b_l2hdr is rechecked. 1688 */ 1689 if (!buflist_held) { 1690 mutex_enter(&l2arc_buflist_mtx); 1691 l2hdr = hdr->b_l2hdr; 1692 } 1693 1694 if (l2hdr != NULL) { 1695 trim_map_free(l2hdr->b_dev->l2ad_vdev, l2hdr->b_daddr, 1696 hdr->b_size); 1697 list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 1698 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size); 1699 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 1700 if (hdr->b_state == arc_l2c_only) 1701 l2arc_hdr_stat_remove(); 1702 hdr->b_l2hdr = NULL; 1703 } 1704 1705 if (!buflist_held) 1706 mutex_exit(&l2arc_buflist_mtx); 1707 } 1708 1709 if (!BUF_EMPTY(hdr)) { 1710 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1711 buf_discard_identity(hdr); 1712 } 1713 while (hdr->b_buf) { 1714 arc_buf_t *buf = hdr->b_buf; 1715 1716 if (buf->b_efunc) { 1717 mutex_enter(&arc_eviction_mtx); 1718 mutex_enter(&buf->b_evict_lock); 1719 ASSERT(buf->b_hdr != NULL); 1720 arc_buf_destroy(hdr->b_buf, FALSE, FALSE); 1721 hdr->b_buf = buf->b_next; 1722 buf->b_hdr = &arc_eviction_hdr; 1723 buf->b_next = arc_eviction_list; 1724 arc_eviction_list = buf; 1725 mutex_exit(&buf->b_evict_lock); 1726 mutex_exit(&arc_eviction_mtx); 1727 } else { 1728 arc_buf_destroy(hdr->b_buf, FALSE, TRUE); 1729 } 1730 } 1731 if (hdr->b_freeze_cksum != NULL) { 1732 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 1733 hdr->b_freeze_cksum = NULL; 1734 } 1735 if (hdr->b_thawed) { 1736 kmem_free(hdr->b_thawed, 1); 1737 hdr->b_thawed = NULL; 1738 } 1739 1740 ASSERT(!list_link_active(&hdr->b_arc_node)); 1741 ASSERT3P(hdr->b_hash_next, ==, NULL); 1742 ASSERT3P(hdr->b_acb, ==, NULL); 1743 kmem_cache_free(hdr_cache, hdr); 1744} 1745 1746void 1747arc_buf_free(arc_buf_t *buf, void *tag) 1748{ 1749 arc_buf_hdr_t *hdr = buf->b_hdr; 1750 int hashed = hdr->b_state != arc_anon; 1751 1752 ASSERT(buf->b_efunc == NULL); 1753 ASSERT(buf->b_data != NULL); 1754 1755 if (hashed) { 1756 kmutex_t *hash_lock = HDR_LOCK(hdr); 1757 1758 mutex_enter(hash_lock); 1759 hdr = buf->b_hdr; 1760 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 1761 1762 (void) remove_reference(hdr, hash_lock, tag); 1763 if (hdr->b_datacnt > 1) { 1764 arc_buf_destroy(buf, FALSE, TRUE); 1765 } else { 1766 ASSERT(buf == hdr->b_buf); 1767 ASSERT(buf->b_efunc == NULL); 1768 hdr->b_flags |= ARC_BUF_AVAILABLE; 1769 } 1770 mutex_exit(hash_lock); 1771 } else if (HDR_IO_IN_PROGRESS(hdr)) { 1772 int destroy_hdr; 1773 /* 1774 * We are in the middle of an async write. Don't destroy 1775 * this buffer unless the write completes before we finish 1776 * decrementing the reference count. 1777 */ 1778 mutex_enter(&arc_eviction_mtx); 1779 (void) remove_reference(hdr, NULL, tag); 1780 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 1781 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr); 1782 mutex_exit(&arc_eviction_mtx); 1783 if (destroy_hdr) 1784 arc_hdr_destroy(hdr); 1785 } else { 1786 if (remove_reference(hdr, NULL, tag) > 0) 1787 arc_buf_destroy(buf, FALSE, TRUE); 1788 else 1789 arc_hdr_destroy(hdr); 1790 } 1791} 1792 1793boolean_t 1794arc_buf_remove_ref(arc_buf_t *buf, void* tag) 1795{ 1796 arc_buf_hdr_t *hdr = buf->b_hdr; 1797 kmutex_t *hash_lock = HDR_LOCK(hdr); 1798 boolean_t no_callback = (buf->b_efunc == NULL); 1799 1800 if (hdr->b_state == arc_anon) { 1801 ASSERT(hdr->b_datacnt == 1); 1802 arc_buf_free(buf, tag); 1803 return (no_callback); 1804 } 1805 1806 mutex_enter(hash_lock); 1807 hdr = buf->b_hdr; 1808 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 1809 ASSERT(hdr->b_state != arc_anon); 1810 ASSERT(buf->b_data != NULL); 1811 1812 (void) remove_reference(hdr, hash_lock, tag); 1813 if (hdr->b_datacnt > 1) { 1814 if (no_callback) 1815 arc_buf_destroy(buf, FALSE, TRUE); 1816 } else if (no_callback) { 1817 ASSERT(hdr->b_buf == buf && buf->b_next == NULL); 1818 ASSERT(buf->b_efunc == NULL); 1819 hdr->b_flags |= ARC_BUF_AVAILABLE; 1820 } 1821 ASSERT(no_callback || hdr->b_datacnt > 1 || 1822 refcount_is_zero(&hdr->b_refcnt)); 1823 mutex_exit(hash_lock); 1824 return (no_callback); 1825} 1826 1827int 1828arc_buf_size(arc_buf_t *buf) 1829{ 1830 return (buf->b_hdr->b_size); 1831} 1832 1833/* 1834 * Called from the DMU to determine if the current buffer should be 1835 * evicted. In order to ensure proper locking, the eviction must be initiated 1836 * from the DMU. Return true if the buffer is associated with user data and 1837 * duplicate buffers still exist. 1838 */ 1839boolean_t 1840arc_buf_eviction_needed(arc_buf_t *buf) 1841{ 1842 arc_buf_hdr_t *hdr; 1843 boolean_t evict_needed = B_FALSE; 1844 1845 if (zfs_disable_dup_eviction) 1846 return (B_FALSE); 1847 1848 mutex_enter(&buf->b_evict_lock); 1849 hdr = buf->b_hdr; 1850 if (hdr == NULL) { 1851 /* 1852 * We are in arc_do_user_evicts(); let that function 1853 * perform the eviction. 1854 */ 1855 ASSERT(buf->b_data == NULL); 1856 mutex_exit(&buf->b_evict_lock); 1857 return (B_FALSE); 1858 } else if (buf->b_data == NULL) { 1859 /* 1860 * We have already been added to the arc eviction list; 1861 * recommend eviction. 1862 */ 1863 ASSERT3P(hdr, ==, &arc_eviction_hdr); 1864 mutex_exit(&buf->b_evict_lock); 1865 return (B_TRUE); 1866 } 1867 1868 if (hdr->b_datacnt > 1 && hdr->b_type == ARC_BUFC_DATA) 1869 evict_needed = B_TRUE; 1870 1871 mutex_exit(&buf->b_evict_lock); 1872 return (evict_needed); 1873} 1874 1875/* 1876 * Evict buffers from list until we've removed the specified number of 1877 * bytes. Move the removed buffers to the appropriate evict state. 1878 * If the recycle flag is set, then attempt to "recycle" a buffer: 1879 * - look for a buffer to evict that is `bytes' long. 1880 * - return the data block from this buffer rather than freeing it. 1881 * This flag is used by callers that are trying to make space for a 1882 * new buffer in a full arc cache. 1883 * 1884 * This function makes a "best effort". It skips over any buffers 1885 * it can't get a hash_lock on, and so may not catch all candidates. 1886 * It may also return without evicting as much space as requested. 1887 */ 1888static void * 1889arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle, 1890 arc_buf_contents_t type) 1891{ 1892 arc_state_t *evicted_state; 1893 uint64_t bytes_evicted = 0, skipped = 0, missed = 0; 1894 int64_t bytes_remaining; 1895 arc_buf_hdr_t *ab, *ab_prev = NULL; 1896 list_t *evicted_list, *list, *evicted_list_start, *list_start; 1897 kmutex_t *lock, *evicted_lock; 1898 kmutex_t *hash_lock; 1899 boolean_t have_lock; 1900 void *stolen = NULL; 1901 static int evict_metadata_offset, evict_data_offset; 1902 int i, idx, offset, list_count, count; 1903 1904 ASSERT(state == arc_mru || state == arc_mfu); 1905 1906 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 1907 1908 if (type == ARC_BUFC_METADATA) { 1909 offset = 0; 1910 list_count = ARC_BUFC_NUMMETADATALISTS; 1911 list_start = &state->arcs_lists[0]; 1912 evicted_list_start = &evicted_state->arcs_lists[0]; 1913 idx = evict_metadata_offset; 1914 } else { 1915 offset = ARC_BUFC_NUMMETADATALISTS; 1916 list_start = &state->arcs_lists[offset]; 1917 evicted_list_start = &evicted_state->arcs_lists[offset]; 1918 list_count = ARC_BUFC_NUMDATALISTS; 1919 idx = evict_data_offset; 1920 } 1921 bytes_remaining = evicted_state->arcs_lsize[type]; 1922 count = 0; 1923 1924evict_start: 1925 list = &list_start[idx]; 1926 evicted_list = &evicted_list_start[idx]; 1927 lock = ARCS_LOCK(state, (offset + idx)); 1928 evicted_lock = ARCS_LOCK(evicted_state, (offset + idx)); 1929 1930 mutex_enter(lock); 1931 mutex_enter(evicted_lock); 1932 1933 for (ab = list_tail(list); ab; ab = ab_prev) { 1934 ab_prev = list_prev(list, ab); 1935 bytes_remaining -= (ab->b_size * ab->b_datacnt); 1936 /* prefetch buffers have a minimum lifespan */ 1937 if (HDR_IO_IN_PROGRESS(ab) || 1938 (spa && ab->b_spa != spa) || 1939 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) && 1940 ddi_get_lbolt() - ab->b_arc_access < 1941 arc_min_prefetch_lifespan)) { 1942 skipped++; 1943 continue; 1944 } 1945 /* "lookahead" for better eviction candidate */ 1946 if (recycle && ab->b_size != bytes && 1947 ab_prev && ab_prev->b_size == bytes) 1948 continue; 1949 hash_lock = HDR_LOCK(ab); 1950 have_lock = MUTEX_HELD(hash_lock); 1951 if (have_lock || mutex_tryenter(hash_lock)) { 1952 ASSERT0(refcount_count(&ab->b_refcnt)); 1953 ASSERT(ab->b_datacnt > 0); 1954 while (ab->b_buf) { 1955 arc_buf_t *buf = ab->b_buf; 1956 if (!mutex_tryenter(&buf->b_evict_lock)) { 1957 missed += 1; 1958 break; 1959 } 1960 if (buf->b_data) { 1961 bytes_evicted += ab->b_size; 1962 if (recycle && ab->b_type == type && 1963 ab->b_size == bytes && 1964 !HDR_L2_WRITING(ab)) { 1965 stolen = buf->b_data; 1966 recycle = FALSE; 1967 } 1968 } 1969 if (buf->b_efunc) { 1970 mutex_enter(&arc_eviction_mtx); 1971 arc_buf_destroy(buf, 1972 buf->b_data == stolen, FALSE); 1973 ab->b_buf = buf->b_next; 1974 buf->b_hdr = &arc_eviction_hdr; 1975 buf->b_next = arc_eviction_list; 1976 arc_eviction_list = buf; 1977 mutex_exit(&arc_eviction_mtx); 1978 mutex_exit(&buf->b_evict_lock); 1979 } else { 1980 mutex_exit(&buf->b_evict_lock); 1981 arc_buf_destroy(buf, 1982 buf->b_data == stolen, TRUE); 1983 } 1984 } 1985 1986 if (ab->b_l2hdr) { 1987 ARCSTAT_INCR(arcstat_evict_l2_cached, 1988 ab->b_size); 1989 } else { 1990 if (l2arc_write_eligible(ab->b_spa, ab)) { 1991 ARCSTAT_INCR(arcstat_evict_l2_eligible, 1992 ab->b_size); 1993 } else { 1994 ARCSTAT_INCR( 1995 arcstat_evict_l2_ineligible, 1996 ab->b_size); 1997 } 1998 } 1999 2000 if (ab->b_datacnt == 0) { 2001 arc_change_state(evicted_state, ab, hash_lock); 2002 ASSERT(HDR_IN_HASH_TABLE(ab)); 2003 ab->b_flags |= ARC_IN_HASH_TABLE; 2004 ab->b_flags &= ~ARC_BUF_AVAILABLE; 2005 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab); 2006 } 2007 if (!have_lock) 2008 mutex_exit(hash_lock); 2009 if (bytes >= 0 && bytes_evicted >= bytes) 2010 break; 2011 if (bytes_remaining > 0) { 2012 mutex_exit(evicted_lock); 2013 mutex_exit(lock); 2014 idx = ((idx + 1) & (list_count - 1)); 2015 count++; 2016 goto evict_start; 2017 } 2018 } else { 2019 missed += 1; 2020 } 2021 } 2022 2023 mutex_exit(evicted_lock); 2024 mutex_exit(lock); 2025 2026 idx = ((idx + 1) & (list_count - 1)); 2027 count++; 2028 2029 if (bytes_evicted < bytes) { 2030 if (count < list_count) 2031 goto evict_start; 2032 else 2033 dprintf("only evicted %lld bytes from %x", 2034 (longlong_t)bytes_evicted, state); 2035 } 2036 if (type == ARC_BUFC_METADATA) 2037 evict_metadata_offset = idx; 2038 else 2039 evict_data_offset = idx; 2040 2041 if (skipped) 2042 ARCSTAT_INCR(arcstat_evict_skip, skipped); 2043 2044 if (missed) 2045 ARCSTAT_INCR(arcstat_mutex_miss, missed); 2046 2047 /* 2048 * We have just evicted some data into the ghost state, make 2049 * sure we also adjust the ghost state size if necessary. 2050 */ 2051 if (arc_no_grow && 2052 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) { 2053 int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size + 2054 arc_mru_ghost->arcs_size - arc_c; 2055 2056 if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) { 2057 int64_t todelete = 2058 MIN(arc_mru_ghost->arcs_lsize[type], mru_over); 2059 arc_evict_ghost(arc_mru_ghost, 0, todelete); 2060 } else if (arc_mfu_ghost->arcs_lsize[type] > 0) { 2061 int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type], 2062 arc_mru_ghost->arcs_size + 2063 arc_mfu_ghost->arcs_size - arc_c); 2064 arc_evict_ghost(arc_mfu_ghost, 0, todelete); 2065 } 2066 } 2067 if (stolen) 2068 ARCSTAT_BUMP(arcstat_stolen); 2069 2070 return (stolen); 2071} 2072 2073/* 2074 * Remove buffers from list until we've removed the specified number of 2075 * bytes. Destroy the buffers that are removed. 2076 */ 2077static void 2078arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes) 2079{ 2080 arc_buf_hdr_t *ab, *ab_prev; 2081 arc_buf_hdr_t marker = { 0 }; 2082 list_t *list, *list_start; 2083 kmutex_t *hash_lock, *lock; 2084 uint64_t bytes_deleted = 0; 2085 uint64_t bufs_skipped = 0; 2086 static int evict_offset; 2087 int list_count, idx = evict_offset; 2088 int offset, count = 0; 2089 2090 ASSERT(GHOST_STATE(state)); 2091 2092 /* 2093 * data lists come after metadata lists 2094 */ 2095 list_start = &state->arcs_lists[ARC_BUFC_NUMMETADATALISTS]; 2096 list_count = ARC_BUFC_NUMDATALISTS; 2097 offset = ARC_BUFC_NUMMETADATALISTS; 2098 2099evict_start: 2100 list = &list_start[idx]; 2101 lock = ARCS_LOCK(state, idx + offset); 2102 2103 mutex_enter(lock); 2104 for (ab = list_tail(list); ab; ab = ab_prev) { 2105 ab_prev = list_prev(list, ab); 2106 if (spa && ab->b_spa != spa) 2107 continue; 2108 2109 /* ignore markers */ 2110 if (ab->b_spa == 0) 2111 continue; 2112 2113 hash_lock = HDR_LOCK(ab); 2114 /* caller may be trying to modify this buffer, skip it */ 2115 if (MUTEX_HELD(hash_lock)) 2116 continue; 2117 if (mutex_tryenter(hash_lock)) { 2118 ASSERT(!HDR_IO_IN_PROGRESS(ab)); 2119 ASSERT(ab->b_buf == NULL); 2120 ARCSTAT_BUMP(arcstat_deleted); 2121 bytes_deleted += ab->b_size; 2122 2123 if (ab->b_l2hdr != NULL) { 2124 /* 2125 * This buffer is cached on the 2nd Level ARC; 2126 * don't destroy the header. 2127 */ 2128 arc_change_state(arc_l2c_only, ab, hash_lock); 2129 mutex_exit(hash_lock); 2130 } else { 2131 arc_change_state(arc_anon, ab, hash_lock); 2132 mutex_exit(hash_lock); 2133 arc_hdr_destroy(ab); 2134 } 2135 2136 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab); 2137 if (bytes >= 0 && bytes_deleted >= bytes) 2138 break; 2139 } else if (bytes < 0) { 2140 /* 2141 * Insert a list marker and then wait for the 2142 * hash lock to become available. Once its 2143 * available, restart from where we left off. 2144 */ 2145 list_insert_after(list, ab, &marker); 2146 mutex_exit(lock); 2147 mutex_enter(hash_lock); 2148 mutex_exit(hash_lock); 2149 mutex_enter(lock); 2150 ab_prev = list_prev(list, &marker); 2151 list_remove(list, &marker); 2152 } else 2153 bufs_skipped += 1; 2154 } 2155 mutex_exit(lock); 2156 idx = ((idx + 1) & (ARC_BUFC_NUMDATALISTS - 1)); 2157 count++; 2158 2159 if (count < list_count) 2160 goto evict_start; 2161 2162 evict_offset = idx; 2163 if ((uintptr_t)list > (uintptr_t)&state->arcs_lists[ARC_BUFC_NUMMETADATALISTS] && 2164 (bytes < 0 || bytes_deleted < bytes)) { 2165 list_start = &state->arcs_lists[0]; 2166 list_count = ARC_BUFC_NUMMETADATALISTS; 2167 offset = count = 0; 2168 goto evict_start; 2169 } 2170 2171 if (bufs_skipped) { 2172 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped); 2173 ASSERT(bytes >= 0); 2174 } 2175 2176 if (bytes_deleted < bytes) 2177 dprintf("only deleted %lld bytes from %p", 2178 (longlong_t)bytes_deleted, state); 2179} 2180 2181static void 2182arc_adjust(void) 2183{ 2184 int64_t adjustment, delta; 2185 2186 /* 2187 * Adjust MRU size 2188 */ 2189 2190 adjustment = MIN((int64_t)(arc_size - arc_c), 2191 (int64_t)(arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used - 2192 arc_p)); 2193 2194 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) { 2195 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment); 2196 (void) arc_evict(arc_mru, 0, delta, FALSE, ARC_BUFC_DATA); 2197 adjustment -= delta; 2198 } 2199 2200 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) { 2201 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment); 2202 (void) arc_evict(arc_mru, 0, delta, FALSE, 2203 ARC_BUFC_METADATA); 2204 } 2205 2206 /* 2207 * Adjust MFU size 2208 */ 2209 2210 adjustment = arc_size - arc_c; 2211 2212 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) { 2213 delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]); 2214 (void) arc_evict(arc_mfu, 0, delta, FALSE, ARC_BUFC_DATA); 2215 adjustment -= delta; 2216 } 2217 2218 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) { 2219 int64_t delta = MIN(adjustment, 2220 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]); 2221 (void) arc_evict(arc_mfu, 0, delta, FALSE, 2222 ARC_BUFC_METADATA); 2223 } 2224 2225 /* 2226 * Adjust ghost lists 2227 */ 2228 2229 adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c; 2230 2231 if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) { 2232 delta = MIN(arc_mru_ghost->arcs_size, adjustment); 2233 arc_evict_ghost(arc_mru_ghost, 0, delta); 2234 } 2235 2236 adjustment = 2237 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c; 2238 2239 if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) { 2240 delta = MIN(arc_mfu_ghost->arcs_size, adjustment); 2241 arc_evict_ghost(arc_mfu_ghost, 0, delta); 2242 } 2243} 2244 2245static void 2246arc_do_user_evicts(void) 2247{ 2248 static arc_buf_t *tmp_arc_eviction_list; 2249 2250 /* 2251 * Move list over to avoid LOR 2252 */ 2253restart: 2254 mutex_enter(&arc_eviction_mtx); 2255 tmp_arc_eviction_list = arc_eviction_list; 2256 arc_eviction_list = NULL; 2257 mutex_exit(&arc_eviction_mtx); 2258 2259 while (tmp_arc_eviction_list != NULL) { 2260 arc_buf_t *buf = tmp_arc_eviction_list; 2261 tmp_arc_eviction_list = buf->b_next; 2262 mutex_enter(&buf->b_evict_lock); 2263 buf->b_hdr = NULL; 2264 mutex_exit(&buf->b_evict_lock); 2265 2266 if (buf->b_efunc != NULL) 2267 VERIFY(buf->b_efunc(buf) == 0); 2268 2269 buf->b_efunc = NULL; 2270 buf->b_private = NULL; 2271 kmem_cache_free(buf_cache, buf); 2272 } 2273 2274 if (arc_eviction_list != NULL) 2275 goto restart; 2276} 2277 2278/* 2279 * Flush all *evictable* data from the cache for the given spa. 2280 * NOTE: this will not touch "active" (i.e. referenced) data. 2281 */ 2282void 2283arc_flush(spa_t *spa) 2284{ 2285 uint64_t guid = 0; 2286 2287 if (spa) 2288 guid = spa_load_guid(spa); 2289 2290 while (arc_mru->arcs_lsize[ARC_BUFC_DATA]) { 2291 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA); 2292 if (spa) 2293 break; 2294 } 2295 while (arc_mru->arcs_lsize[ARC_BUFC_METADATA]) { 2296 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA); 2297 if (spa) 2298 break; 2299 } 2300 while (arc_mfu->arcs_lsize[ARC_BUFC_DATA]) { 2301 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA); 2302 if (spa) 2303 break; 2304 } 2305 while (arc_mfu->arcs_lsize[ARC_BUFC_METADATA]) { 2306 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA); 2307 if (spa) 2308 break; 2309 } 2310 2311 arc_evict_ghost(arc_mru_ghost, guid, -1); 2312 arc_evict_ghost(arc_mfu_ghost, guid, -1); 2313 2314 mutex_enter(&arc_reclaim_thr_lock); 2315 arc_do_user_evicts(); 2316 mutex_exit(&arc_reclaim_thr_lock); 2317 ASSERT(spa || arc_eviction_list == NULL); 2318} 2319 2320void 2321arc_shrink(void) 2322{ 2323 if (arc_c > arc_c_min) { 2324 uint64_t to_free; 2325 2326#ifdef _KERNEL 2327 to_free = arc_c >> arc_shrink_shift; 2328#else 2329 to_free = arc_c >> arc_shrink_shift; 2330#endif 2331 if (arc_c > arc_c_min + to_free) 2332 atomic_add_64(&arc_c, -to_free); 2333 else 2334 arc_c = arc_c_min; 2335 2336 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 2337 if (arc_c > arc_size) 2338 arc_c = MAX(arc_size, arc_c_min); 2339 if (arc_p > arc_c) 2340 arc_p = (arc_c >> 1); 2341 ASSERT(arc_c >= arc_c_min); 2342 ASSERT((int64_t)arc_p >= 0); 2343 } 2344 2345 if (arc_size > arc_c) 2346 arc_adjust(); 2347} 2348 2349static int needfree = 0; 2350 2351static int 2352arc_reclaim_needed(void) 2353{ 2354 2355#ifdef _KERNEL 2356 2357 if (needfree) 2358 return (1); 2359 2360 /* 2361 * Cooperate with pagedaemon when it's time for it to scan 2362 * and reclaim some pages. 2363 */ 2364 if (vm_paging_needed()) 2365 return (1); 2366 2367#ifdef sun 2368 /* 2369 * take 'desfree' extra pages, so we reclaim sooner, rather than later 2370 */ 2371 extra = desfree; 2372 2373 /* 2374 * check that we're out of range of the pageout scanner. It starts to 2375 * schedule paging if freemem is less than lotsfree and needfree. 2376 * lotsfree is the high-water mark for pageout, and needfree is the 2377 * number of needed free pages. We add extra pages here to make sure 2378 * the scanner doesn't start up while we're freeing memory. 2379 */ 2380 if (freemem < lotsfree + needfree + extra) 2381 return (1); 2382 2383 /* 2384 * check to make sure that swapfs has enough space so that anon 2385 * reservations can still succeed. anon_resvmem() checks that the 2386 * availrmem is greater than swapfs_minfree, and the number of reserved 2387 * swap pages. We also add a bit of extra here just to prevent 2388 * circumstances from getting really dire. 2389 */ 2390 if (availrmem < swapfs_minfree + swapfs_reserve + extra) 2391 return (1); 2392 2393#if defined(__i386) 2394 /* 2395 * If we're on an i386 platform, it's possible that we'll exhaust the 2396 * kernel heap space before we ever run out of available physical 2397 * memory. Most checks of the size of the heap_area compare against 2398 * tune.t_minarmem, which is the minimum available real memory that we 2399 * can have in the system. However, this is generally fixed at 25 pages 2400 * which is so low that it's useless. In this comparison, we seek to 2401 * calculate the total heap-size, and reclaim if more than 3/4ths of the 2402 * heap is allocated. (Or, in the calculation, if less than 1/4th is 2403 * free) 2404 */ 2405 if (btop(vmem_size(heap_arena, VMEM_FREE)) < 2406 (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2)) 2407 return (1); 2408#endif 2409#else /* !sun */ 2410 if (kmem_used() > (kmem_size() * 3) / 4) 2411 return (1); 2412#endif /* sun */ 2413 2414#else 2415 if (spa_get_random(100) == 0) 2416 return (1); 2417#endif 2418 return (0); 2419} 2420 2421extern kmem_cache_t *zio_buf_cache[]; 2422extern kmem_cache_t *zio_data_buf_cache[]; 2423 2424static void 2425arc_kmem_reap_now(arc_reclaim_strategy_t strat) 2426{ 2427 size_t i; 2428 kmem_cache_t *prev_cache = NULL; 2429 kmem_cache_t *prev_data_cache = NULL; 2430 2431#ifdef _KERNEL 2432 if (arc_meta_used >= arc_meta_limit) { 2433 /* 2434 * We are exceeding our meta-data cache limit. 2435 * Purge some DNLC entries to release holds on meta-data. 2436 */ 2437 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 2438 } 2439#if defined(__i386) 2440 /* 2441 * Reclaim unused memory from all kmem caches. 2442 */ 2443 kmem_reap(); 2444#endif 2445#endif 2446 2447 /* 2448 * An aggressive reclamation will shrink the cache size as well as 2449 * reap free buffers from the arc kmem caches. 2450 */ 2451 if (strat == ARC_RECLAIM_AGGR) 2452 arc_shrink(); 2453 2454 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 2455 if (zio_buf_cache[i] != prev_cache) { 2456 prev_cache = zio_buf_cache[i]; 2457 kmem_cache_reap_now(zio_buf_cache[i]); 2458 } 2459 if (zio_data_buf_cache[i] != prev_data_cache) { 2460 prev_data_cache = zio_data_buf_cache[i]; 2461 kmem_cache_reap_now(zio_data_buf_cache[i]); 2462 } 2463 } 2464 kmem_cache_reap_now(buf_cache); 2465 kmem_cache_reap_now(hdr_cache); 2466} 2467 2468static void 2469arc_reclaim_thread(void *dummy __unused) 2470{ 2471 clock_t growtime = 0; 2472 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS; 2473 callb_cpr_t cpr; 2474 2475 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG); 2476 2477 mutex_enter(&arc_reclaim_thr_lock); 2478 while (arc_thread_exit == 0) { 2479 if (arc_reclaim_needed()) { 2480 2481 if (arc_no_grow) { 2482 if (last_reclaim == ARC_RECLAIM_CONS) { 2483 last_reclaim = ARC_RECLAIM_AGGR; 2484 } else { 2485 last_reclaim = ARC_RECLAIM_CONS; 2486 } 2487 } else { 2488 arc_no_grow = TRUE; 2489 last_reclaim = ARC_RECLAIM_AGGR; 2490 membar_producer(); 2491 } 2492 2493 /* reset the growth delay for every reclaim */ 2494 growtime = ddi_get_lbolt() + (arc_grow_retry * hz); 2495 2496 if (needfree && last_reclaim == ARC_RECLAIM_CONS) { 2497 /* 2498 * If needfree is TRUE our vm_lowmem hook 2499 * was called and in that case we must free some 2500 * memory, so switch to aggressive mode. 2501 */ 2502 arc_no_grow = TRUE; 2503 last_reclaim = ARC_RECLAIM_AGGR; 2504 } 2505 arc_kmem_reap_now(last_reclaim); 2506 arc_warm = B_TRUE; 2507 2508 } else if (arc_no_grow && ddi_get_lbolt() >= growtime) { 2509 arc_no_grow = FALSE; 2510 } 2511 2512 arc_adjust(); 2513 2514 if (arc_eviction_list != NULL) 2515 arc_do_user_evicts(); 2516 2517#ifdef _KERNEL 2518 if (needfree) { 2519 needfree = 0; 2520 wakeup(&needfree); 2521 } 2522#endif 2523 2524 /* block until needed, or one second, whichever is shorter */ 2525 CALLB_CPR_SAFE_BEGIN(&cpr); 2526 (void) cv_timedwait(&arc_reclaim_thr_cv, 2527 &arc_reclaim_thr_lock, hz); 2528 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock); 2529 } 2530 2531 arc_thread_exit = 0; 2532 cv_broadcast(&arc_reclaim_thr_cv); 2533 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */ 2534 thread_exit(); 2535} 2536 2537/* 2538 * Adapt arc info given the number of bytes we are trying to add and 2539 * the state that we are comming from. This function is only called 2540 * when we are adding new content to the cache. 2541 */ 2542static void 2543arc_adapt(int bytes, arc_state_t *state) 2544{ 2545 int mult; 2546 uint64_t arc_p_min = (arc_c >> arc_p_min_shift); 2547 2548 if (state == arc_l2c_only) 2549 return; 2550 2551 ASSERT(bytes > 0); 2552 /* 2553 * Adapt the target size of the MRU list: 2554 * - if we just hit in the MRU ghost list, then increase 2555 * the target size of the MRU list. 2556 * - if we just hit in the MFU ghost list, then increase 2557 * the target size of the MFU list by decreasing the 2558 * target size of the MRU list. 2559 */ 2560 if (state == arc_mru_ghost) { 2561 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ? 2562 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size)); 2563 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */ 2564 2565 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult); 2566 } else if (state == arc_mfu_ghost) { 2567 uint64_t delta; 2568 2569 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ? 2570 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size)); 2571 mult = MIN(mult, 10); 2572 2573 delta = MIN(bytes * mult, arc_p); 2574 arc_p = MAX(arc_p_min, arc_p - delta); 2575 } 2576 ASSERT((int64_t)arc_p >= 0); 2577 2578 if (arc_reclaim_needed()) { 2579 cv_signal(&arc_reclaim_thr_cv); 2580 return; 2581 } 2582 2583 if (arc_no_grow) 2584 return; 2585 2586 if (arc_c >= arc_c_max) 2587 return; 2588 2589 /* 2590 * If we're within (2 * maxblocksize) bytes of the target 2591 * cache size, increment the target cache size 2592 */ 2593 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) { 2594 atomic_add_64(&arc_c, (int64_t)bytes); 2595 if (arc_c > arc_c_max) 2596 arc_c = arc_c_max; 2597 else if (state == arc_anon) 2598 atomic_add_64(&arc_p, (int64_t)bytes); 2599 if (arc_p > arc_c) 2600 arc_p = arc_c; 2601 } 2602 ASSERT((int64_t)arc_p >= 0); 2603} 2604 2605/* 2606 * Check if the cache has reached its limits and eviction is required 2607 * prior to insert. 2608 */ 2609static int 2610arc_evict_needed(arc_buf_contents_t type) 2611{ 2612 if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit) 2613 return (1); 2614 2615#ifdef sun 2616#ifdef _KERNEL 2617 /* 2618 * If zio data pages are being allocated out of a separate heap segment, 2619 * then enforce that the size of available vmem for this area remains 2620 * above about 1/32nd free. 2621 */ 2622 if (type == ARC_BUFC_DATA && zio_arena != NULL && 2623 vmem_size(zio_arena, VMEM_FREE) < 2624 (vmem_size(zio_arena, VMEM_ALLOC) >> 5)) 2625 return (1); 2626#endif 2627#endif /* sun */ 2628 2629 if (arc_reclaim_needed()) 2630 return (1); 2631 2632 return (arc_size > arc_c); 2633} 2634 2635/* 2636 * The buffer, supplied as the first argument, needs a data block. 2637 * So, if we are at cache max, determine which cache should be victimized. 2638 * We have the following cases: 2639 * 2640 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) -> 2641 * In this situation if we're out of space, but the resident size of the MFU is 2642 * under the limit, victimize the MFU cache to satisfy this insertion request. 2643 * 2644 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) -> 2645 * Here, we've used up all of the available space for the MRU, so we need to 2646 * evict from our own cache instead. Evict from the set of resident MRU 2647 * entries. 2648 * 2649 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) -> 2650 * c minus p represents the MFU space in the cache, since p is the size of the 2651 * cache that is dedicated to the MRU. In this situation there's still space on 2652 * the MFU side, so the MRU side needs to be victimized. 2653 * 2654 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) -> 2655 * MFU's resident set is consuming more space than it has been allotted. In 2656 * this situation, we must victimize our own cache, the MFU, for this insertion. 2657 */ 2658static void 2659arc_get_data_buf(arc_buf_t *buf) 2660{ 2661 arc_state_t *state = buf->b_hdr->b_state; 2662 uint64_t size = buf->b_hdr->b_size; 2663 arc_buf_contents_t type = buf->b_hdr->b_type; 2664 2665 arc_adapt(size, state); 2666 2667 /* 2668 * We have not yet reached cache maximum size, 2669 * just allocate a new buffer. 2670 */ 2671 if (!arc_evict_needed(type)) { 2672 if (type == ARC_BUFC_METADATA) { 2673 buf->b_data = zio_buf_alloc(size); 2674 arc_space_consume(size, ARC_SPACE_DATA); 2675 } else { 2676 ASSERT(type == ARC_BUFC_DATA); 2677 buf->b_data = zio_data_buf_alloc(size); 2678 ARCSTAT_INCR(arcstat_data_size, size); 2679 atomic_add_64(&arc_size, size); 2680 } 2681 goto out; 2682 } 2683 2684 /* 2685 * If we are prefetching from the mfu ghost list, this buffer 2686 * will end up on the mru list; so steal space from there. 2687 */ 2688 if (state == arc_mfu_ghost) 2689 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu; 2690 else if (state == arc_mru_ghost) 2691 state = arc_mru; 2692 2693 if (state == arc_mru || state == arc_anon) { 2694 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size; 2695 state = (arc_mfu->arcs_lsize[type] >= size && 2696 arc_p > mru_used) ? arc_mfu : arc_mru; 2697 } else { 2698 /* MFU cases */ 2699 uint64_t mfu_space = arc_c - arc_p; 2700 state = (arc_mru->arcs_lsize[type] >= size && 2701 mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu; 2702 } 2703 if ((buf->b_data = arc_evict(state, 0, size, TRUE, type)) == NULL) { 2704 if (type == ARC_BUFC_METADATA) { 2705 buf->b_data = zio_buf_alloc(size); 2706 arc_space_consume(size, ARC_SPACE_DATA); 2707 } else { 2708 ASSERT(type == ARC_BUFC_DATA); 2709 buf->b_data = zio_data_buf_alloc(size); 2710 ARCSTAT_INCR(arcstat_data_size, size); 2711 atomic_add_64(&arc_size, size); 2712 } 2713 ARCSTAT_BUMP(arcstat_recycle_miss); 2714 } 2715 ASSERT(buf->b_data != NULL); 2716out: 2717 /* 2718 * Update the state size. Note that ghost states have a 2719 * "ghost size" and so don't need to be updated. 2720 */ 2721 if (!GHOST_STATE(buf->b_hdr->b_state)) { 2722 arc_buf_hdr_t *hdr = buf->b_hdr; 2723 2724 atomic_add_64(&hdr->b_state->arcs_size, size); 2725 if (list_link_active(&hdr->b_arc_node)) { 2726 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 2727 atomic_add_64(&hdr->b_state->arcs_lsize[type], size); 2728 } 2729 /* 2730 * If we are growing the cache, and we are adding anonymous 2731 * data, and we have outgrown arc_p, update arc_p 2732 */ 2733 if (arc_size < arc_c && hdr->b_state == arc_anon && 2734 arc_anon->arcs_size + arc_mru->arcs_size > arc_p) 2735 arc_p = MIN(arc_c, arc_p + size); 2736 } 2737 ARCSTAT_BUMP(arcstat_allocated); 2738} 2739 2740/* 2741 * This routine is called whenever a buffer is accessed. 2742 * NOTE: the hash lock is dropped in this function. 2743 */ 2744static void 2745arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock) 2746{ 2747 clock_t now; 2748 2749 ASSERT(MUTEX_HELD(hash_lock)); 2750 2751 if (buf->b_state == arc_anon) { 2752 /* 2753 * This buffer is not in the cache, and does not 2754 * appear in our "ghost" list. Add the new buffer 2755 * to the MRU state. 2756 */ 2757 2758 ASSERT(buf->b_arc_access == 0); 2759 buf->b_arc_access = ddi_get_lbolt(); 2760 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2761 arc_change_state(arc_mru, buf, hash_lock); 2762 2763 } else if (buf->b_state == arc_mru) { 2764 now = ddi_get_lbolt(); 2765 2766 /* 2767 * If this buffer is here because of a prefetch, then either: 2768 * - clear the flag if this is a "referencing" read 2769 * (any subsequent access will bump this into the MFU state). 2770 * or 2771 * - move the buffer to the head of the list if this is 2772 * another prefetch (to make it less likely to be evicted). 2773 */ 2774 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2775 if (refcount_count(&buf->b_refcnt) == 0) { 2776 ASSERT(list_link_active(&buf->b_arc_node)); 2777 } else { 2778 buf->b_flags &= ~ARC_PREFETCH; 2779 ARCSTAT_BUMP(arcstat_mru_hits); 2780 } 2781 buf->b_arc_access = now; 2782 return; 2783 } 2784 2785 /* 2786 * This buffer has been "accessed" only once so far, 2787 * but it is still in the cache. Move it to the MFU 2788 * state. 2789 */ 2790 if (now > buf->b_arc_access + ARC_MINTIME) { 2791 /* 2792 * More than 125ms have passed since we 2793 * instantiated this buffer. Move it to the 2794 * most frequently used state. 2795 */ 2796 buf->b_arc_access = now; 2797 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2798 arc_change_state(arc_mfu, buf, hash_lock); 2799 } 2800 ARCSTAT_BUMP(arcstat_mru_hits); 2801 } else if (buf->b_state == arc_mru_ghost) { 2802 arc_state_t *new_state; 2803 /* 2804 * This buffer has been "accessed" recently, but 2805 * was evicted from the cache. Move it to the 2806 * MFU state. 2807 */ 2808 2809 if (buf->b_flags & ARC_PREFETCH) { 2810 new_state = arc_mru; 2811 if (refcount_count(&buf->b_refcnt) > 0) 2812 buf->b_flags &= ~ARC_PREFETCH; 2813 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf); 2814 } else { 2815 new_state = arc_mfu; 2816 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2817 } 2818 2819 buf->b_arc_access = ddi_get_lbolt(); 2820 arc_change_state(new_state, buf, hash_lock); 2821 2822 ARCSTAT_BUMP(arcstat_mru_ghost_hits); 2823 } else if (buf->b_state == arc_mfu) { 2824 /* 2825 * This buffer has been accessed more than once and is 2826 * still in the cache. Keep it in the MFU state. 2827 * 2828 * NOTE: an add_reference() that occurred when we did 2829 * the arc_read() will have kicked this off the list. 2830 * If it was a prefetch, we will explicitly move it to 2831 * the head of the list now. 2832 */ 2833 if ((buf->b_flags & ARC_PREFETCH) != 0) { 2834 ASSERT(refcount_count(&buf->b_refcnt) == 0); 2835 ASSERT(list_link_active(&buf->b_arc_node)); 2836 } 2837 ARCSTAT_BUMP(arcstat_mfu_hits); 2838 buf->b_arc_access = ddi_get_lbolt(); 2839 } else if (buf->b_state == arc_mfu_ghost) { 2840 arc_state_t *new_state = arc_mfu; 2841 /* 2842 * This buffer has been accessed more than once but has 2843 * been evicted from the cache. Move it back to the 2844 * MFU state. 2845 */ 2846 2847 if (buf->b_flags & ARC_PREFETCH) { 2848 /* 2849 * This is a prefetch access... 2850 * move this block back to the MRU state. 2851 */ 2852 ASSERT0(refcount_count(&buf->b_refcnt)); 2853 new_state = arc_mru; 2854 } 2855 2856 buf->b_arc_access = ddi_get_lbolt(); 2857 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2858 arc_change_state(new_state, buf, hash_lock); 2859 2860 ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 2861 } else if (buf->b_state == arc_l2c_only) { 2862 /* 2863 * This buffer is on the 2nd Level ARC. 2864 */ 2865 2866 buf->b_arc_access = ddi_get_lbolt(); 2867 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf); 2868 arc_change_state(arc_mfu, buf, hash_lock); 2869 } else { 2870 ASSERT(!"invalid arc state"); 2871 } 2872} 2873 2874/* a generic arc_done_func_t which you can use */ 2875/* ARGSUSED */ 2876void 2877arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg) 2878{ 2879 if (zio == NULL || zio->io_error == 0) 2880 bcopy(buf->b_data, arg, buf->b_hdr->b_size); 2881 VERIFY(arc_buf_remove_ref(buf, arg)); 2882} 2883 2884/* a generic arc_done_func_t */ 2885void 2886arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg) 2887{ 2888 arc_buf_t **bufp = arg; 2889 if (zio && zio->io_error) { 2890 VERIFY(arc_buf_remove_ref(buf, arg)); 2891 *bufp = NULL; 2892 } else { 2893 *bufp = buf; 2894 ASSERT(buf->b_data); 2895 } 2896} 2897 2898static void 2899arc_read_done(zio_t *zio) 2900{ 2901 arc_buf_hdr_t *hdr, *found; 2902 arc_buf_t *buf; 2903 arc_buf_t *abuf; /* buffer we're assigning to callback */ 2904 kmutex_t *hash_lock; 2905 arc_callback_t *callback_list, *acb; 2906 int freeable = FALSE; 2907 2908 buf = zio->io_private; 2909 hdr = buf->b_hdr; 2910 2911 /* 2912 * The hdr was inserted into hash-table and removed from lists 2913 * prior to starting I/O. We should find this header, since 2914 * it's in the hash table, and it should be legit since it's 2915 * not possible to evict it during the I/O. The only possible 2916 * reason for it not to be found is if we were freed during the 2917 * read. 2918 */ 2919 found = buf_hash_find(hdr->b_spa, &hdr->b_dva, hdr->b_birth, 2920 &hash_lock); 2921 2922 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) || 2923 (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || 2924 (found == hdr && HDR_L2_READING(hdr))); 2925 2926 hdr->b_flags &= ~ARC_L2_EVICTED; 2927 if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH)) 2928 hdr->b_flags &= ~ARC_L2CACHE; 2929 2930 /* byteswap if necessary */ 2931 callback_list = hdr->b_acb; 2932 ASSERT(callback_list != NULL); 2933 if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) { 2934 dmu_object_byteswap_t bswap = 2935 DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp)); 2936 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ? 2937 byteswap_uint64_array : 2938 dmu_ot_byteswap[bswap].ob_func; 2939 func(buf->b_data, hdr->b_size); 2940 } 2941 2942 arc_cksum_compute(buf, B_FALSE); 2943#ifdef illumos 2944 arc_buf_watch(buf); 2945#endif /* illumos */ 2946 2947 if (hash_lock && zio->io_error == 0 && hdr->b_state == arc_anon) { 2948 /* 2949 * Only call arc_access on anonymous buffers. This is because 2950 * if we've issued an I/O for an evicted buffer, we've already 2951 * called arc_access (to prevent any simultaneous readers from 2952 * getting confused). 2953 */ 2954 arc_access(hdr, hash_lock); 2955 } 2956 2957 /* create copies of the data buffer for the callers */ 2958 abuf = buf; 2959 for (acb = callback_list; acb; acb = acb->acb_next) { 2960 if (acb->acb_done) { 2961 if (abuf == NULL) { 2962 ARCSTAT_BUMP(arcstat_duplicate_reads); 2963 abuf = arc_buf_clone(buf); 2964 } 2965 acb->acb_buf = abuf; 2966 abuf = NULL; 2967 } 2968 } 2969 hdr->b_acb = NULL; 2970 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 2971 ASSERT(!HDR_BUF_AVAILABLE(hdr)); 2972 if (abuf == buf) { 2973 ASSERT(buf->b_efunc == NULL); 2974 ASSERT(hdr->b_datacnt == 1); 2975 hdr->b_flags |= ARC_BUF_AVAILABLE; 2976 } 2977 2978 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL); 2979 2980 if (zio->io_error != 0) { 2981 hdr->b_flags |= ARC_IO_ERROR; 2982 if (hdr->b_state != arc_anon) 2983 arc_change_state(arc_anon, hdr, hash_lock); 2984 if (HDR_IN_HASH_TABLE(hdr)) 2985 buf_hash_remove(hdr); 2986 freeable = refcount_is_zero(&hdr->b_refcnt); 2987 } 2988 2989 /* 2990 * Broadcast before we drop the hash_lock to avoid the possibility 2991 * that the hdr (and hence the cv) might be freed before we get to 2992 * the cv_broadcast(). 2993 */ 2994 cv_broadcast(&hdr->b_cv); 2995 2996 if (hash_lock) { 2997 mutex_exit(hash_lock); 2998 } else { 2999 /* 3000 * This block was freed while we waited for the read to 3001 * complete. It has been removed from the hash table and 3002 * moved to the anonymous state (so that it won't show up 3003 * in the cache). 3004 */ 3005 ASSERT3P(hdr->b_state, ==, arc_anon); 3006 freeable = refcount_is_zero(&hdr->b_refcnt); 3007 } 3008 3009 /* execute each callback and free its structure */ 3010 while ((acb = callback_list) != NULL) { 3011 if (acb->acb_done) 3012 acb->acb_done(zio, acb->acb_buf, acb->acb_private); 3013 3014 if (acb->acb_zio_dummy != NULL) { 3015 acb->acb_zio_dummy->io_error = zio->io_error; 3016 zio_nowait(acb->acb_zio_dummy); 3017 } 3018 3019 callback_list = acb->acb_next; 3020 kmem_free(acb, sizeof (arc_callback_t)); 3021 } 3022 3023 if (freeable) 3024 arc_hdr_destroy(hdr); 3025} 3026 3027/* 3028 * "Read" the block block at the specified DVA (in bp) via the 3029 * cache. If the block is found in the cache, invoke the provided 3030 * callback immediately and return. Note that the `zio' parameter 3031 * in the callback will be NULL in this case, since no IO was 3032 * required. If the block is not in the cache pass the read request 3033 * on to the spa with a substitute callback function, so that the 3034 * requested block will be added to the cache. 3035 * 3036 * If a read request arrives for a block that has a read in-progress, 3037 * either wait for the in-progress read to complete (and return the 3038 * results); or, if this is a read with a "done" func, add a record 3039 * to the read to invoke the "done" func when the read completes, 3040 * and return; or just return. 3041 * 3042 * arc_read_done() will invoke all the requested "done" functions 3043 * for readers of this block. 3044 */ 3045int 3046arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done, 3047 void *private, int priority, int zio_flags, uint32_t *arc_flags, 3048 const zbookmark_t *zb) 3049{ 3050 arc_buf_hdr_t *hdr; 3051 arc_buf_t *buf = NULL; 3052 kmutex_t *hash_lock; 3053 zio_t *rzio; 3054 uint64_t guid = spa_load_guid(spa); 3055 3056top: 3057 hdr = buf_hash_find(guid, BP_IDENTITY(bp), BP_PHYSICAL_BIRTH(bp), 3058 &hash_lock); 3059 if (hdr && hdr->b_datacnt > 0) { 3060 3061 *arc_flags |= ARC_CACHED; 3062 3063 if (HDR_IO_IN_PROGRESS(hdr)) { 3064 3065 if (*arc_flags & ARC_WAIT) { 3066 cv_wait(&hdr->b_cv, hash_lock); 3067 mutex_exit(hash_lock); 3068 goto top; 3069 } 3070 ASSERT(*arc_flags & ARC_NOWAIT); 3071 3072 if (done) { 3073 arc_callback_t *acb = NULL; 3074 3075 acb = kmem_zalloc(sizeof (arc_callback_t), 3076 KM_SLEEP); 3077 acb->acb_done = done; 3078 acb->acb_private = private; 3079 if (pio != NULL) 3080 acb->acb_zio_dummy = zio_null(pio, 3081 spa, NULL, NULL, NULL, zio_flags); 3082 3083 ASSERT(acb->acb_done != NULL); 3084 acb->acb_next = hdr->b_acb; 3085 hdr->b_acb = acb; 3086 add_reference(hdr, hash_lock, private); 3087 mutex_exit(hash_lock); 3088 return (0); 3089 } 3090 mutex_exit(hash_lock); 3091 return (0); 3092 } 3093 3094 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 3095 3096 if (done) { 3097 add_reference(hdr, hash_lock, private); 3098 /* 3099 * If this block is already in use, create a new 3100 * copy of the data so that we will be guaranteed 3101 * that arc_release() will always succeed. 3102 */ 3103 buf = hdr->b_buf; 3104 ASSERT(buf); 3105 ASSERT(buf->b_data); 3106 if (HDR_BUF_AVAILABLE(hdr)) { 3107 ASSERT(buf->b_efunc == NULL); 3108 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 3109 } else { 3110 buf = arc_buf_clone(buf); 3111 } 3112 3113 } else if (*arc_flags & ARC_PREFETCH && 3114 refcount_count(&hdr->b_refcnt) == 0) { 3115 hdr->b_flags |= ARC_PREFETCH; 3116 } 3117 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 3118 arc_access(hdr, hash_lock); 3119 if (*arc_flags & ARC_L2CACHE) 3120 hdr->b_flags |= ARC_L2CACHE; 3121 mutex_exit(hash_lock); 3122 ARCSTAT_BUMP(arcstat_hits); 3123 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 3124 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 3125 data, metadata, hits); 3126 3127 if (done) 3128 done(NULL, buf, private); 3129 } else { 3130 uint64_t size = BP_GET_LSIZE(bp); 3131 arc_callback_t *acb; 3132 vdev_t *vd = NULL; 3133 uint64_t addr = 0; 3134 boolean_t devw = B_FALSE; 3135 3136 if (hdr == NULL) { 3137 /* this block is not in the cache */ 3138 arc_buf_hdr_t *exists; 3139 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 3140 buf = arc_buf_alloc(spa, size, private, type); 3141 hdr = buf->b_hdr; 3142 hdr->b_dva = *BP_IDENTITY(bp); 3143 hdr->b_birth = BP_PHYSICAL_BIRTH(bp); 3144 hdr->b_cksum0 = bp->blk_cksum.zc_word[0]; 3145 exists = buf_hash_insert(hdr, &hash_lock); 3146 if (exists) { 3147 /* somebody beat us to the hash insert */ 3148 mutex_exit(hash_lock); 3149 buf_discard_identity(hdr); 3150 (void) arc_buf_remove_ref(buf, private); 3151 goto top; /* restart the IO request */ 3152 } 3153 /* if this is a prefetch, we don't have a reference */ 3154 if (*arc_flags & ARC_PREFETCH) { 3155 (void) remove_reference(hdr, hash_lock, 3156 private); 3157 hdr->b_flags |= ARC_PREFETCH; 3158 } 3159 if (*arc_flags & ARC_L2CACHE) 3160 hdr->b_flags |= ARC_L2CACHE; 3161 if (BP_GET_LEVEL(bp) > 0) 3162 hdr->b_flags |= ARC_INDIRECT; 3163 } else { 3164 /* this block is in the ghost cache */ 3165 ASSERT(GHOST_STATE(hdr->b_state)); 3166 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 3167 ASSERT0(refcount_count(&hdr->b_refcnt)); 3168 ASSERT(hdr->b_buf == NULL); 3169 3170 /* if this is a prefetch, we don't have a reference */ 3171 if (*arc_flags & ARC_PREFETCH) 3172 hdr->b_flags |= ARC_PREFETCH; 3173 else 3174 add_reference(hdr, hash_lock, private); 3175 if (*arc_flags & ARC_L2CACHE) 3176 hdr->b_flags |= ARC_L2CACHE; 3177 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 3178 buf->b_hdr = hdr; 3179 buf->b_data = NULL; 3180 buf->b_efunc = NULL; 3181 buf->b_private = NULL; 3182 buf->b_next = NULL; 3183 hdr->b_buf = buf; 3184 ASSERT(hdr->b_datacnt == 0); 3185 hdr->b_datacnt = 1; 3186 arc_get_data_buf(buf); 3187 arc_access(hdr, hash_lock); 3188 } 3189 3190 ASSERT(!GHOST_STATE(hdr->b_state)); 3191 3192 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 3193 acb->acb_done = done; 3194 acb->acb_private = private; 3195 3196 ASSERT(hdr->b_acb == NULL); 3197 hdr->b_acb = acb; 3198 hdr->b_flags |= ARC_IO_IN_PROGRESS; 3199 3200 if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL && 3201 (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) { 3202 devw = hdr->b_l2hdr->b_dev->l2ad_writing; 3203 addr = hdr->b_l2hdr->b_daddr; 3204 /* 3205 * Lock out device removal. 3206 */ 3207 if (vdev_is_dead(vd) || 3208 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER)) 3209 vd = NULL; 3210 } 3211 3212 mutex_exit(hash_lock); 3213 3214 ASSERT3U(hdr->b_size, ==, size); 3215 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp, 3216 uint64_t, size, zbookmark_t *, zb); 3217 ARCSTAT_BUMP(arcstat_misses); 3218 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH), 3219 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA, 3220 data, metadata, misses); 3221#ifdef _KERNEL 3222 curthread->td_ru.ru_inblock++; 3223#endif 3224 3225 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) { 3226 /* 3227 * Read from the L2ARC if the following are true: 3228 * 1. The L2ARC vdev was previously cached. 3229 * 2. This buffer still has L2ARC metadata. 3230 * 3. This buffer isn't currently writing to the L2ARC. 3231 * 4. The L2ARC entry wasn't evicted, which may 3232 * also have invalidated the vdev. 3233 * 5. This isn't prefetch and l2arc_noprefetch is set. 3234 */ 3235 if (hdr->b_l2hdr != NULL && 3236 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) && 3237 !(l2arc_noprefetch && HDR_PREFETCH(hdr))) { 3238 l2arc_read_callback_t *cb; 3239 3240 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); 3241 ARCSTAT_BUMP(arcstat_l2_hits); 3242 3243 cb = kmem_zalloc(sizeof (l2arc_read_callback_t), 3244 KM_SLEEP); 3245 cb->l2rcb_buf = buf; 3246 cb->l2rcb_spa = spa; 3247 cb->l2rcb_bp = *bp; 3248 cb->l2rcb_zb = *zb; 3249 cb->l2rcb_flags = zio_flags; 3250 3251 ASSERT(addr >= VDEV_LABEL_START_SIZE && 3252 addr + size < vd->vdev_psize - 3253 VDEV_LABEL_END_SIZE); 3254 3255 /* 3256 * l2arc read. The SCL_L2ARC lock will be 3257 * released by l2arc_read_done(). 3258 */ 3259 rzio = zio_read_phys(pio, vd, addr, size, 3260 buf->b_data, ZIO_CHECKSUM_OFF, 3261 l2arc_read_done, cb, priority, zio_flags | 3262 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL | 3263 ZIO_FLAG_DONT_PROPAGATE | 3264 ZIO_FLAG_DONT_RETRY, B_FALSE); 3265 DTRACE_PROBE2(l2arc__read, vdev_t *, vd, 3266 zio_t *, rzio); 3267 ARCSTAT_INCR(arcstat_l2_read_bytes, size); 3268 3269 if (*arc_flags & ARC_NOWAIT) { 3270 zio_nowait(rzio); 3271 return (0); 3272 } 3273 3274 ASSERT(*arc_flags & ARC_WAIT); 3275 if (zio_wait(rzio) == 0) 3276 return (0); 3277 3278 /* l2arc read error; goto zio_read() */ 3279 } else { 3280 DTRACE_PROBE1(l2arc__miss, 3281 arc_buf_hdr_t *, hdr); 3282 ARCSTAT_BUMP(arcstat_l2_misses); 3283 if (HDR_L2_WRITING(hdr)) 3284 ARCSTAT_BUMP(arcstat_l2_rw_clash); 3285 spa_config_exit(spa, SCL_L2ARC, vd); 3286 } 3287 } else { 3288 if (vd != NULL) 3289 spa_config_exit(spa, SCL_L2ARC, vd); 3290 if (l2arc_ndev != 0) { 3291 DTRACE_PROBE1(l2arc__miss, 3292 arc_buf_hdr_t *, hdr); 3293 ARCSTAT_BUMP(arcstat_l2_misses); 3294 } 3295 } 3296 3297 rzio = zio_read(pio, spa, bp, buf->b_data, size, 3298 arc_read_done, buf, priority, zio_flags, zb); 3299 3300 if (*arc_flags & ARC_WAIT) 3301 return (zio_wait(rzio)); 3302 3303 ASSERT(*arc_flags & ARC_NOWAIT); 3304 zio_nowait(rzio); 3305 } 3306 return (0); 3307} 3308 3309void 3310arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private) 3311{ 3312 ASSERT(buf->b_hdr != NULL); 3313 ASSERT(buf->b_hdr->b_state != arc_anon); 3314 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL); 3315 ASSERT(buf->b_efunc == NULL); 3316 ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr)); 3317 3318 buf->b_efunc = func; 3319 buf->b_private = private; 3320} 3321 3322/* 3323 * This is used by the DMU to let the ARC know that a buffer is 3324 * being evicted, so the ARC should clean up. If this arc buf 3325 * is not yet in the evicted state, it will be put there. 3326 */ 3327int 3328arc_buf_evict(arc_buf_t *buf) 3329{ 3330 arc_buf_hdr_t *hdr; 3331 kmutex_t *hash_lock; 3332 arc_buf_t **bufp; 3333 list_t *list, *evicted_list; 3334 kmutex_t *lock, *evicted_lock; 3335 3336 mutex_enter(&buf->b_evict_lock); 3337 hdr = buf->b_hdr; 3338 if (hdr == NULL) { 3339 /* 3340 * We are in arc_do_user_evicts(). 3341 */ 3342 ASSERT(buf->b_data == NULL); 3343 mutex_exit(&buf->b_evict_lock); 3344 return (0); 3345 } else if (buf->b_data == NULL) { 3346 arc_buf_t copy = *buf; /* structure assignment */ 3347 /* 3348 * We are on the eviction list; process this buffer now 3349 * but let arc_do_user_evicts() do the reaping. 3350 */ 3351 buf->b_efunc = NULL; 3352 mutex_exit(&buf->b_evict_lock); 3353 VERIFY(copy.b_efunc(©) == 0); 3354 return (1); 3355 } 3356 hash_lock = HDR_LOCK(hdr); 3357 mutex_enter(hash_lock); 3358 hdr = buf->b_hdr; 3359 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 3360 3361 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt); 3362 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu); 3363 3364 /* 3365 * Pull this buffer off of the hdr 3366 */ 3367 bufp = &hdr->b_buf; 3368 while (*bufp != buf) 3369 bufp = &(*bufp)->b_next; 3370 *bufp = buf->b_next; 3371 3372 ASSERT(buf->b_data != NULL); 3373 arc_buf_destroy(buf, FALSE, FALSE); 3374 3375 if (hdr->b_datacnt == 0) { 3376 arc_state_t *old_state = hdr->b_state; 3377 arc_state_t *evicted_state; 3378 3379 ASSERT(hdr->b_buf == NULL); 3380 ASSERT(refcount_is_zero(&hdr->b_refcnt)); 3381 3382 evicted_state = 3383 (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 3384 3385 get_buf_info(hdr, old_state, &list, &lock); 3386 get_buf_info(hdr, evicted_state, &evicted_list, &evicted_lock); 3387 mutex_enter(lock); 3388 mutex_enter(evicted_lock); 3389 3390 arc_change_state(evicted_state, hdr, hash_lock); 3391 ASSERT(HDR_IN_HASH_TABLE(hdr)); 3392 hdr->b_flags |= ARC_IN_HASH_TABLE; 3393 hdr->b_flags &= ~ARC_BUF_AVAILABLE; 3394 3395 mutex_exit(evicted_lock); 3396 mutex_exit(lock); 3397 } 3398 mutex_exit(hash_lock); 3399 mutex_exit(&buf->b_evict_lock); 3400 3401 VERIFY(buf->b_efunc(buf) == 0); 3402 buf->b_efunc = NULL; 3403 buf->b_private = NULL; 3404 buf->b_hdr = NULL; 3405 buf->b_next = NULL; 3406 kmem_cache_free(buf_cache, buf); 3407 return (1); 3408} 3409 3410/* 3411 * Release this buffer from the cache. This must be done 3412 * after a read and prior to modifying the buffer contents. 3413 * If the buffer has more than one reference, we must make 3414 * a new hdr for the buffer. 3415 */ 3416void 3417arc_release(arc_buf_t *buf, void *tag) 3418{ 3419 arc_buf_hdr_t *hdr; 3420 kmutex_t *hash_lock = NULL; 3421 l2arc_buf_hdr_t *l2hdr; 3422 uint64_t buf_size; 3423 3424 /* 3425 * It would be nice to assert that if it's DMU metadata (level > 3426 * 0 || it's the dnode file), then it must be syncing context. 3427 * But we don't know that information at this level. 3428 */ 3429 3430 mutex_enter(&buf->b_evict_lock); 3431 hdr = buf->b_hdr; 3432 3433 /* this buffer is not on any list */ 3434 ASSERT(refcount_count(&hdr->b_refcnt) > 0); 3435 3436 if (hdr->b_state == arc_anon) { 3437 /* this buffer is already released */ 3438 ASSERT(buf->b_efunc == NULL); 3439 } else { 3440 hash_lock = HDR_LOCK(hdr); 3441 mutex_enter(hash_lock); 3442 hdr = buf->b_hdr; 3443 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 3444 } 3445 3446 l2hdr = hdr->b_l2hdr; 3447 if (l2hdr) { 3448 mutex_enter(&l2arc_buflist_mtx); 3449 hdr->b_l2hdr = NULL; 3450 } 3451 buf_size = hdr->b_size; 3452 3453 /* 3454 * Do we have more than one buf? 3455 */ 3456 if (hdr->b_datacnt > 1) { 3457 arc_buf_hdr_t *nhdr; 3458 arc_buf_t **bufp; 3459 uint64_t blksz = hdr->b_size; 3460 uint64_t spa = hdr->b_spa; 3461 arc_buf_contents_t type = hdr->b_type; 3462 uint32_t flags = hdr->b_flags; 3463 3464 ASSERT(hdr->b_buf != buf || buf->b_next != NULL); 3465 /* 3466 * Pull the data off of this hdr and attach it to 3467 * a new anonymous hdr. 3468 */ 3469 (void) remove_reference(hdr, hash_lock, tag); 3470 bufp = &hdr->b_buf; 3471 while (*bufp != buf) 3472 bufp = &(*bufp)->b_next; 3473 *bufp = buf->b_next; 3474 buf->b_next = NULL; 3475 3476 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size); 3477 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size); 3478 if (refcount_is_zero(&hdr->b_refcnt)) { 3479 uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type]; 3480 ASSERT3U(*size, >=, hdr->b_size); 3481 atomic_add_64(size, -hdr->b_size); 3482 } 3483 3484 /* 3485 * We're releasing a duplicate user data buffer, update 3486 * our statistics accordingly. 3487 */ 3488 if (hdr->b_type == ARC_BUFC_DATA) { 3489 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers); 3490 ARCSTAT_INCR(arcstat_duplicate_buffers_size, 3491 -hdr->b_size); 3492 } 3493 hdr->b_datacnt -= 1; 3494 arc_cksum_verify(buf); 3495#ifdef illumos 3496 arc_buf_unwatch(buf); 3497#endif /* illumos */ 3498 3499 mutex_exit(hash_lock); 3500 3501 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 3502 nhdr->b_size = blksz; 3503 nhdr->b_spa = spa; 3504 nhdr->b_type = type; 3505 nhdr->b_buf = buf; 3506 nhdr->b_state = arc_anon; 3507 nhdr->b_arc_access = 0; 3508 nhdr->b_flags = flags & ARC_L2_WRITING; 3509 nhdr->b_l2hdr = NULL; 3510 nhdr->b_datacnt = 1; 3511 nhdr->b_freeze_cksum = NULL; 3512 (void) refcount_add(&nhdr->b_refcnt, tag); 3513 buf->b_hdr = nhdr; 3514 mutex_exit(&buf->b_evict_lock); 3515 atomic_add_64(&arc_anon->arcs_size, blksz); 3516 } else { 3517 mutex_exit(&buf->b_evict_lock); 3518 ASSERT(refcount_count(&hdr->b_refcnt) == 1); 3519 ASSERT(!list_link_active(&hdr->b_arc_node)); 3520 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 3521 if (hdr->b_state != arc_anon) 3522 arc_change_state(arc_anon, hdr, hash_lock); 3523 hdr->b_arc_access = 0; 3524 if (hash_lock) 3525 mutex_exit(hash_lock); 3526 3527 buf_discard_identity(hdr); 3528 arc_buf_thaw(buf); 3529 } 3530 buf->b_efunc = NULL; 3531 buf->b_private = NULL; 3532 3533 if (l2hdr) { 3534 trim_map_free(l2hdr->b_dev->l2ad_vdev, l2hdr->b_daddr, 3535 hdr->b_size); 3536 list_remove(l2hdr->b_dev->l2ad_buflist, hdr); 3537 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t)); 3538 ARCSTAT_INCR(arcstat_l2_size, -buf_size); 3539 mutex_exit(&l2arc_buflist_mtx); 3540 } 3541} 3542 3543int 3544arc_released(arc_buf_t *buf) 3545{ 3546 int released; 3547 3548 mutex_enter(&buf->b_evict_lock); 3549 released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon); 3550 mutex_exit(&buf->b_evict_lock); 3551 return (released); 3552} 3553 3554int 3555arc_has_callback(arc_buf_t *buf) 3556{ 3557 int callback; 3558 3559 mutex_enter(&buf->b_evict_lock); 3560 callback = (buf->b_efunc != NULL); 3561 mutex_exit(&buf->b_evict_lock); 3562 return (callback); 3563} 3564 3565#ifdef ZFS_DEBUG 3566int 3567arc_referenced(arc_buf_t *buf) 3568{ 3569 int referenced; 3570 3571 mutex_enter(&buf->b_evict_lock); 3572 referenced = (refcount_count(&buf->b_hdr->b_refcnt)); 3573 mutex_exit(&buf->b_evict_lock); 3574 return (referenced); 3575} 3576#endif 3577 3578static void 3579arc_write_ready(zio_t *zio) 3580{ 3581 arc_write_callback_t *callback = zio->io_private; 3582 arc_buf_t *buf = callback->awcb_buf; 3583 arc_buf_hdr_t *hdr = buf->b_hdr; 3584 3585 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt)); 3586 callback->awcb_ready(zio, buf, callback->awcb_private); 3587 3588 /* 3589 * If the IO is already in progress, then this is a re-write 3590 * attempt, so we need to thaw and re-compute the cksum. 3591 * It is the responsibility of the callback to handle the 3592 * accounting for any re-write attempt. 3593 */ 3594 if (HDR_IO_IN_PROGRESS(hdr)) { 3595 mutex_enter(&hdr->b_freeze_lock); 3596 if (hdr->b_freeze_cksum != NULL) { 3597 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t)); 3598 hdr->b_freeze_cksum = NULL; 3599 } 3600 mutex_exit(&hdr->b_freeze_lock); 3601 } 3602 arc_cksum_compute(buf, B_FALSE); 3603 hdr->b_flags |= ARC_IO_IN_PROGRESS; 3604} 3605 3606static void 3607arc_write_done(zio_t *zio) 3608{ 3609 arc_write_callback_t *callback = zio->io_private; 3610 arc_buf_t *buf = callback->awcb_buf; 3611 arc_buf_hdr_t *hdr = buf->b_hdr; 3612 3613 ASSERT(hdr->b_acb == NULL); 3614 3615 if (zio->io_error == 0) { 3616 hdr->b_dva = *BP_IDENTITY(zio->io_bp); 3617 hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp); 3618 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0]; 3619 } else { 3620 ASSERT(BUF_EMPTY(hdr)); 3621 } 3622 3623 /* 3624 * If the block to be written was all-zero, we may have 3625 * compressed it away. In this case no write was performed 3626 * so there will be no dva/birth/checksum. The buffer must 3627 * therefore remain anonymous (and uncached). 3628 */ 3629 if (!BUF_EMPTY(hdr)) { 3630 arc_buf_hdr_t *exists; 3631 kmutex_t *hash_lock; 3632 3633 ASSERT(zio->io_error == 0); 3634 3635 arc_cksum_verify(buf); 3636 3637 exists = buf_hash_insert(hdr, &hash_lock); 3638 if (exists) { 3639 /* 3640 * This can only happen if we overwrite for 3641 * sync-to-convergence, because we remove 3642 * buffers from the hash table when we arc_free(). 3643 */ 3644 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { 3645 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) 3646 panic("bad overwrite, hdr=%p exists=%p", 3647 (void *)hdr, (void *)exists); 3648 ASSERT(refcount_is_zero(&exists->b_refcnt)); 3649 arc_change_state(arc_anon, exists, hash_lock); 3650 mutex_exit(hash_lock); 3651 arc_hdr_destroy(exists); 3652 exists = buf_hash_insert(hdr, &hash_lock); 3653 ASSERT3P(exists, ==, NULL); 3654 } else if (zio->io_flags & ZIO_FLAG_NOPWRITE) { 3655 /* nopwrite */ 3656 ASSERT(zio->io_prop.zp_nopwrite); 3657 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) 3658 panic("bad nopwrite, hdr=%p exists=%p", 3659 (void *)hdr, (void *)exists); 3660 } else { 3661 /* Dedup */ 3662 ASSERT(hdr->b_datacnt == 1); 3663 ASSERT(hdr->b_state == arc_anon); 3664 ASSERT(BP_GET_DEDUP(zio->io_bp)); 3665 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 3666 } 3667 } 3668 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3669 /* if it's not anon, we are doing a scrub */ 3670 if (!exists && hdr->b_state == arc_anon) 3671 arc_access(hdr, hash_lock); 3672 mutex_exit(hash_lock); 3673 } else { 3674 hdr->b_flags &= ~ARC_IO_IN_PROGRESS; 3675 } 3676 3677 ASSERT(!refcount_is_zero(&hdr->b_refcnt)); 3678 callback->awcb_done(zio, buf, callback->awcb_private); 3679 3680 kmem_free(callback, sizeof (arc_write_callback_t)); 3681} 3682 3683zio_t * 3684arc_write(zio_t *pio, spa_t *spa, uint64_t txg, 3685 blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, const zio_prop_t *zp, 3686 arc_done_func_t *ready, arc_done_func_t *done, void *private, 3687 int priority, int zio_flags, const zbookmark_t *zb) 3688{ 3689 arc_buf_hdr_t *hdr = buf->b_hdr; 3690 arc_write_callback_t *callback; 3691 zio_t *zio; 3692 3693 ASSERT(ready != NULL); 3694 ASSERT(done != NULL); 3695 ASSERT(!HDR_IO_ERROR(hdr)); 3696 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0); 3697 ASSERT(hdr->b_acb == NULL); 3698 if (l2arc) 3699 hdr->b_flags |= ARC_L2CACHE; 3700 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 3701 callback->awcb_ready = ready; 3702 callback->awcb_done = done; 3703 callback->awcb_private = private; 3704 callback->awcb_buf = buf; 3705 3706 zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp, 3707 arc_write_ready, arc_write_done, callback, priority, zio_flags, zb); 3708 3709 return (zio); 3710} 3711 3712static int 3713arc_memory_throttle(uint64_t reserve, uint64_t inflight_data, uint64_t txg) 3714{ 3715#ifdef _KERNEL 3716 uint64_t available_memory = 3717 ptoa((uintmax_t)cnt.v_free_count + cnt.v_cache_count); 3718 static uint64_t page_load = 0; 3719 static uint64_t last_txg = 0; 3720 3721#ifdef sun 3722#if defined(__i386) 3723 available_memory = 3724 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE)); 3725#endif 3726#endif /* sun */ 3727 if (available_memory >= zfs_write_limit_max) 3728 return (0); 3729 3730 if (txg > last_txg) { 3731 last_txg = txg; 3732 page_load = 0; 3733 } 3734 /* 3735 * If we are in pageout, we know that memory is already tight, 3736 * the arc is already going to be evicting, so we just want to 3737 * continue to let page writes occur as quickly as possible. 3738 */ 3739 if (curproc == pageproc) { 3740 if (page_load > available_memory / 4) 3741 return (ERESTART); 3742 /* Note: reserve is inflated, so we deflate */ 3743 page_load += reserve / 8; 3744 return (0); 3745 } else if (page_load > 0 && arc_reclaim_needed()) { 3746 /* memory is low, delay before restarting */ 3747 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3748 return (EAGAIN); 3749 } 3750 page_load = 0; 3751 3752 if (arc_size > arc_c_min) { 3753 uint64_t evictable_memory = 3754 arc_mru->arcs_lsize[ARC_BUFC_DATA] + 3755 arc_mru->arcs_lsize[ARC_BUFC_METADATA] + 3756 arc_mfu->arcs_lsize[ARC_BUFC_DATA] + 3757 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]; 3758 available_memory += MIN(evictable_memory, arc_size - arc_c_min); 3759 } 3760 3761 if (inflight_data > available_memory / 4) { 3762 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 3763 return (ERESTART); 3764 } 3765#endif 3766 return (0); 3767} 3768 3769void 3770arc_tempreserve_clear(uint64_t reserve) 3771{ 3772 atomic_add_64(&arc_tempreserve, -reserve); 3773 ASSERT((int64_t)arc_tempreserve >= 0); 3774} 3775 3776int 3777arc_tempreserve_space(uint64_t reserve, uint64_t txg) 3778{ 3779 int error; 3780 uint64_t anon_size; 3781 3782#ifdef ZFS_DEBUG 3783 /* 3784 * Once in a while, fail for no reason. Everything should cope. 3785 */ 3786 if (spa_get_random(10000) == 0) { 3787 dprintf("forcing random failure\n"); 3788 return (ERESTART); 3789 } 3790#endif 3791 if (reserve > arc_c/4 && !arc_no_grow) 3792 arc_c = MIN(arc_c_max, reserve * 4); 3793 if (reserve > arc_c) 3794 return (ENOMEM); 3795 3796 /* 3797 * Don't count loaned bufs as in flight dirty data to prevent long 3798 * network delays from blocking transactions that are ready to be 3799 * assigned to a txg. 3800 */ 3801 anon_size = MAX((int64_t)(arc_anon->arcs_size - arc_loaned_bytes), 0); 3802 3803 /* 3804 * Writes will, almost always, require additional memory allocations 3805 * in order to compress/encrypt/etc the data. We therefor need to 3806 * make sure that there is sufficient available memory for this. 3807 */ 3808 if (error = arc_memory_throttle(reserve, anon_size, txg)) 3809 return (error); 3810 3811 /* 3812 * Throttle writes when the amount of dirty data in the cache 3813 * gets too large. We try to keep the cache less than half full 3814 * of dirty blocks so that our sync times don't grow too large. 3815 * Note: if two requests come in concurrently, we might let them 3816 * both succeed, when one of them should fail. Not a huge deal. 3817 */ 3818 3819 if (reserve + arc_tempreserve + anon_size > arc_c / 2 && 3820 anon_size > arc_c / 4) { 3821 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " 3822 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", 3823 arc_tempreserve>>10, 3824 arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10, 3825 arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10, 3826 reserve>>10, arc_c>>10); 3827 return (ERESTART); 3828 } 3829 atomic_add_64(&arc_tempreserve, reserve); 3830 return (0); 3831} 3832 3833static kmutex_t arc_lowmem_lock; 3834#ifdef _KERNEL 3835static eventhandler_tag arc_event_lowmem = NULL; 3836 3837static void 3838arc_lowmem(void *arg __unused, int howto __unused) 3839{ 3840 3841 /* Serialize access via arc_lowmem_lock. */ 3842 mutex_enter(&arc_lowmem_lock); 3843 mutex_enter(&arc_reclaim_thr_lock); 3844 needfree = 1; 3845 cv_signal(&arc_reclaim_thr_cv); 3846 3847 /* 3848 * It is unsafe to block here in arbitrary threads, because we can come 3849 * here from ARC itself and may hold ARC locks and thus risk a deadlock 3850 * with ARC reclaim thread. 3851 */ 3852 if (curproc == pageproc) { 3853 while (needfree) 3854 msleep(&needfree, &arc_reclaim_thr_lock, 0, "zfs:lowmem", 0); 3855 } 3856 mutex_exit(&arc_reclaim_thr_lock); 3857 mutex_exit(&arc_lowmem_lock); 3858} 3859#endif 3860 3861void 3862arc_init(void) 3863{ 3864 int i, prefetch_tunable_set = 0; 3865 3866 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL); 3867 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL); 3868 mutex_init(&arc_lowmem_lock, NULL, MUTEX_DEFAULT, NULL); 3869 3870 /* Convert seconds to clock ticks */ 3871 arc_min_prefetch_lifespan = 1 * hz; 3872 3873 /* Start out with 1/8 of all memory */ 3874 arc_c = kmem_size() / 8; 3875 3876#ifdef sun 3877#ifdef _KERNEL 3878 /* 3879 * On architectures where the physical memory can be larger 3880 * than the addressable space (intel in 32-bit mode), we may 3881 * need to limit the cache to 1/8 of VM size. 3882 */ 3883 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8); 3884#endif 3885#endif /* sun */ 3886 /* set min cache to 1/32 of all memory, or 16MB, whichever is more */ 3887 arc_c_min = MAX(arc_c / 4, 64<<18); 3888 /* set max to 1/2 of all memory, or all but 1GB, whichever is more */ 3889 if (arc_c * 8 >= 1<<30) 3890 arc_c_max = (arc_c * 8) - (1<<30); 3891 else 3892 arc_c_max = arc_c_min; 3893 arc_c_max = MAX(arc_c * 5, arc_c_max); 3894 3895#ifdef _KERNEL 3896 /* 3897 * Allow the tunables to override our calculations if they are 3898 * reasonable (ie. over 16MB) 3899 */ 3900 if (zfs_arc_max > 64<<18 && zfs_arc_max < kmem_size()) 3901 arc_c_max = zfs_arc_max; 3902 if (zfs_arc_min > 64<<18 && zfs_arc_min <= arc_c_max) 3903 arc_c_min = zfs_arc_min; 3904#endif 3905 3906 arc_c = arc_c_max; 3907 arc_p = (arc_c >> 1); 3908 3909 /* limit meta-data to 1/4 of the arc capacity */ 3910 arc_meta_limit = arc_c_max / 4; 3911 3912 /* Allow the tunable to override if it is reasonable */ 3913 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) 3914 arc_meta_limit = zfs_arc_meta_limit; 3915 3916 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) 3917 arc_c_min = arc_meta_limit / 2; 3918 3919 if (zfs_arc_grow_retry > 0) 3920 arc_grow_retry = zfs_arc_grow_retry; 3921 3922 if (zfs_arc_shrink_shift > 0) 3923 arc_shrink_shift = zfs_arc_shrink_shift; 3924 3925 if (zfs_arc_p_min_shift > 0) 3926 arc_p_min_shift = zfs_arc_p_min_shift; 3927 3928 /* if kmem_flags are set, lets try to use less memory */ 3929 if (kmem_debugging()) 3930 arc_c = arc_c / 2; 3931 if (arc_c < arc_c_min) 3932 arc_c = arc_c_min; 3933 3934 zfs_arc_min = arc_c_min; 3935 zfs_arc_max = arc_c_max; 3936 3937 arc_anon = &ARC_anon; 3938 arc_mru = &ARC_mru; 3939 arc_mru_ghost = &ARC_mru_ghost; 3940 arc_mfu = &ARC_mfu; 3941 arc_mfu_ghost = &ARC_mfu_ghost; 3942 arc_l2c_only = &ARC_l2c_only; 3943 arc_size = 0; 3944 3945 for (i = 0; i < ARC_BUFC_NUMLISTS; i++) { 3946 mutex_init(&arc_anon->arcs_locks[i].arcs_lock, 3947 NULL, MUTEX_DEFAULT, NULL); 3948 mutex_init(&arc_mru->arcs_locks[i].arcs_lock, 3949 NULL, MUTEX_DEFAULT, NULL); 3950 mutex_init(&arc_mru_ghost->arcs_locks[i].arcs_lock, 3951 NULL, MUTEX_DEFAULT, NULL); 3952 mutex_init(&arc_mfu->arcs_locks[i].arcs_lock, 3953 NULL, MUTEX_DEFAULT, NULL); 3954 mutex_init(&arc_mfu_ghost->arcs_locks[i].arcs_lock, 3955 NULL, MUTEX_DEFAULT, NULL); 3956 mutex_init(&arc_l2c_only->arcs_locks[i].arcs_lock, 3957 NULL, MUTEX_DEFAULT, NULL); 3958 3959 list_create(&arc_mru->arcs_lists[i], 3960 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3961 list_create(&arc_mru_ghost->arcs_lists[i], 3962 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3963 list_create(&arc_mfu->arcs_lists[i], 3964 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3965 list_create(&arc_mfu_ghost->arcs_lists[i], 3966 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3967 list_create(&arc_mfu_ghost->arcs_lists[i], 3968 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3969 list_create(&arc_l2c_only->arcs_lists[i], 3970 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node)); 3971 } 3972 3973 buf_init(); 3974 3975 arc_thread_exit = 0; 3976 arc_eviction_list = NULL; 3977 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL); 3978 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t)); 3979 3980 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 3981 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 3982 3983 if (arc_ksp != NULL) { 3984 arc_ksp->ks_data = &arc_stats; 3985 kstat_install(arc_ksp); 3986 } 3987 3988 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0, 3989 TS_RUN, minclsyspri); 3990 3991#ifdef _KERNEL 3992 arc_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem, arc_lowmem, NULL, 3993 EVENTHANDLER_PRI_FIRST); 3994#endif 3995 3996 arc_dead = FALSE; 3997 arc_warm = B_FALSE; 3998 3999 if (zfs_write_limit_max == 0) 4000 zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift; 4001 else 4002 zfs_write_limit_shift = 0; 4003 mutex_init(&zfs_write_limit_lock, NULL, MUTEX_DEFAULT, NULL); 4004 4005#ifdef _KERNEL 4006 if (TUNABLE_INT_FETCH("vfs.zfs.prefetch_disable", &zfs_prefetch_disable)) 4007 prefetch_tunable_set = 1; 4008 4009#ifdef __i386__ 4010 if (prefetch_tunable_set == 0) { 4011 printf("ZFS NOTICE: Prefetch is disabled by default on i386 " 4012 "-- to enable,\n"); 4013 printf(" add \"vfs.zfs.prefetch_disable=0\" " 4014 "to /boot/loader.conf.\n"); 4015 zfs_prefetch_disable = 1; 4016 } 4017#else 4018 if ((((uint64_t)physmem * PAGESIZE) < (1ULL << 32)) && 4019 prefetch_tunable_set == 0) { 4020 printf("ZFS NOTICE: Prefetch is disabled by default if less " 4021 "than 4GB of RAM is present;\n" 4022 " to enable, add \"vfs.zfs.prefetch_disable=0\" " 4023 "to /boot/loader.conf.\n"); 4024 zfs_prefetch_disable = 1; 4025 } 4026#endif 4027 /* Warn about ZFS memory and address space requirements. */ 4028 if (((uint64_t)physmem * PAGESIZE) < (256 + 128 + 64) * (1 << 20)) { 4029 printf("ZFS WARNING: Recommended minimum RAM size is 512MB; " 4030 "expect unstable behavior.\n"); 4031 } 4032 if (kmem_size() < 512 * (1 << 20)) { 4033 printf("ZFS WARNING: Recommended minimum kmem_size is 512MB; " 4034 "expect unstable behavior.\n"); 4035 printf(" Consider tuning vm.kmem_size and " 4036 "vm.kmem_size_max\n"); 4037 printf(" in /boot/loader.conf.\n"); 4038 } 4039#endif 4040} 4041 4042void 4043arc_fini(void) 4044{ 4045 int i; 4046 4047 mutex_enter(&arc_reclaim_thr_lock); 4048 arc_thread_exit = 1; 4049 cv_signal(&arc_reclaim_thr_cv); 4050 while (arc_thread_exit != 0) 4051 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock); 4052 mutex_exit(&arc_reclaim_thr_lock); 4053 4054 arc_flush(NULL); 4055 4056 arc_dead = TRUE; 4057 4058 if (arc_ksp != NULL) { 4059 kstat_delete(arc_ksp); 4060 arc_ksp = NULL; 4061 } 4062 4063 mutex_destroy(&arc_eviction_mtx); 4064 mutex_destroy(&arc_reclaim_thr_lock); 4065 cv_destroy(&arc_reclaim_thr_cv); 4066 4067 for (i = 0; i < ARC_BUFC_NUMLISTS; i++) { 4068 list_destroy(&arc_mru->arcs_lists[i]); 4069 list_destroy(&arc_mru_ghost->arcs_lists[i]); 4070 list_destroy(&arc_mfu->arcs_lists[i]); 4071 list_destroy(&arc_mfu_ghost->arcs_lists[i]); 4072 list_destroy(&arc_l2c_only->arcs_lists[i]); 4073 4074 mutex_destroy(&arc_anon->arcs_locks[i].arcs_lock); 4075 mutex_destroy(&arc_mru->arcs_locks[i].arcs_lock); 4076 mutex_destroy(&arc_mru_ghost->arcs_locks[i].arcs_lock); 4077 mutex_destroy(&arc_mfu->arcs_locks[i].arcs_lock); 4078 mutex_destroy(&arc_mfu_ghost->arcs_locks[i].arcs_lock); 4079 mutex_destroy(&arc_l2c_only->arcs_locks[i].arcs_lock); 4080 } 4081 4082 mutex_destroy(&zfs_write_limit_lock); 4083 4084 buf_fini(); 4085 4086 ASSERT(arc_loaned_bytes == 0); 4087 4088 mutex_destroy(&arc_lowmem_lock); 4089#ifdef _KERNEL 4090 if (arc_event_lowmem != NULL) 4091 EVENTHANDLER_DEREGISTER(vm_lowmem, arc_event_lowmem); 4092#endif 4093} 4094 4095/* 4096 * Level 2 ARC 4097 * 4098 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. 4099 * It uses dedicated storage devices to hold cached data, which are populated 4100 * using large infrequent writes. The main role of this cache is to boost 4101 * the performance of random read workloads. The intended L2ARC devices 4102 * include short-stroked disks, solid state disks, and other media with 4103 * substantially faster read latency than disk. 4104 * 4105 * +-----------------------+ 4106 * | ARC | 4107 * +-----------------------+ 4108 * | ^ ^ 4109 * | | | 4110 * l2arc_feed_thread() arc_read() 4111 * | | | 4112 * | l2arc read | 4113 * V | | 4114 * +---------------+ | 4115 * | L2ARC | | 4116 * +---------------+ | 4117 * | ^ | 4118 * l2arc_write() | | 4119 * | | | 4120 * V | | 4121 * +-------+ +-------+ 4122 * | vdev | | vdev | 4123 * | cache | | cache | 4124 * +-------+ +-------+ 4125 * +=========+ .-----. 4126 * : L2ARC : |-_____-| 4127 * : devices : | Disks | 4128 * +=========+ `-_____-' 4129 * 4130 * Read requests are satisfied from the following sources, in order: 4131 * 4132 * 1) ARC 4133 * 2) vdev cache of L2ARC devices 4134 * 3) L2ARC devices 4135 * 4) vdev cache of disks 4136 * 5) disks 4137 * 4138 * Some L2ARC device types exhibit extremely slow write performance. 4139 * To accommodate for this there are some significant differences between 4140 * the L2ARC and traditional cache design: 4141 * 4142 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from 4143 * the ARC behave as usual, freeing buffers and placing headers on ghost 4144 * lists. The ARC does not send buffers to the L2ARC during eviction as 4145 * this would add inflated write latencies for all ARC memory pressure. 4146 * 4147 * 2. The L2ARC attempts to cache data from the ARC before it is evicted. 4148 * It does this by periodically scanning buffers from the eviction-end of 4149 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are 4150 * not already there. It scans until a headroom of buffers is satisfied, 4151 * which itself is a buffer for ARC eviction. The thread that does this is 4152 * l2arc_feed_thread(), illustrated below; example sizes are included to 4153 * provide a better sense of ratio than this diagram: 4154 * 4155 * head --> tail 4156 * +---------------------+----------+ 4157 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC 4158 * +---------------------+----------+ | o L2ARC eligible 4159 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer 4160 * +---------------------+----------+ | 4161 * 15.9 Gbytes ^ 32 Mbytes | 4162 * headroom | 4163 * l2arc_feed_thread() 4164 * | 4165 * l2arc write hand <--[oooo]--' 4166 * | 8 Mbyte 4167 * | write max 4168 * V 4169 * +==============================+ 4170 * L2ARC dev |####|#|###|###| |####| ... | 4171 * +==============================+ 4172 * 32 Gbytes 4173 * 4174 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of 4175 * evicted, then the L2ARC has cached a buffer much sooner than it probably 4176 * needed to, potentially wasting L2ARC device bandwidth and storage. It is 4177 * safe to say that this is an uncommon case, since buffers at the end of 4178 * the ARC lists have moved there due to inactivity. 4179 * 4180 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, 4181 * then the L2ARC simply misses copying some buffers. This serves as a 4182 * pressure valve to prevent heavy read workloads from both stalling the ARC 4183 * with waits and clogging the L2ARC with writes. This also helps prevent 4184 * the potential for the L2ARC to churn if it attempts to cache content too 4185 * quickly, such as during backups of the entire pool. 4186 * 4187 * 5. After system boot and before the ARC has filled main memory, there are 4188 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru 4189 * lists can remain mostly static. Instead of searching from tail of these 4190 * lists as pictured, the l2arc_feed_thread() will search from the list heads 4191 * for eligible buffers, greatly increasing its chance of finding them. 4192 * 4193 * The L2ARC device write speed is also boosted during this time so that 4194 * the L2ARC warms up faster. Since there have been no ARC evictions yet, 4195 * there are no L2ARC reads, and no fear of degrading read performance 4196 * through increased writes. 4197 * 4198 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that 4199 * the vdev queue can aggregate them into larger and fewer writes. Each 4200 * device is written to in a rotor fashion, sweeping writes through 4201 * available space then repeating. 4202 * 4203 * 7. The L2ARC does not store dirty content. It never needs to flush 4204 * write buffers back to disk based storage. 4205 * 4206 * 8. If an ARC buffer is written (and dirtied) which also exists in the 4207 * L2ARC, the now stale L2ARC buffer is immediately dropped. 4208 * 4209 * The performance of the L2ARC can be tweaked by a number of tunables, which 4210 * may be necessary for different workloads: 4211 * 4212 * l2arc_write_max max write bytes per interval 4213 * l2arc_write_boost extra write bytes during device warmup 4214 * l2arc_noprefetch skip caching prefetched buffers 4215 * l2arc_headroom number of max device writes to precache 4216 * l2arc_feed_secs seconds between L2ARC writing 4217 * 4218 * Tunables may be removed or added as future performance improvements are 4219 * integrated, and also may become zpool properties. 4220 * 4221 * There are three key functions that control how the L2ARC warms up: 4222 * 4223 * l2arc_write_eligible() check if a buffer is eligible to cache 4224 * l2arc_write_size() calculate how much to write 4225 * l2arc_write_interval() calculate sleep delay between writes 4226 * 4227 * These three functions determine what to write, how much, and how quickly 4228 * to send writes. 4229 */ 4230 4231static boolean_t 4232l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab) 4233{ 4234 /* 4235 * A buffer is *not* eligible for the L2ARC if it: 4236 * 1. belongs to a different spa. 4237 * 2. is already cached on the L2ARC. 4238 * 3. has an I/O in progress (it may be an incomplete read). 4239 * 4. is flagged not eligible (zfs property). 4240 */ 4241 if (ab->b_spa != spa_guid) { 4242 ARCSTAT_BUMP(arcstat_l2_write_spa_mismatch); 4243 return (B_FALSE); 4244 } 4245 if (ab->b_l2hdr != NULL) { 4246 ARCSTAT_BUMP(arcstat_l2_write_in_l2); 4247 return (B_FALSE); 4248 } 4249 if (HDR_IO_IN_PROGRESS(ab)) { 4250 ARCSTAT_BUMP(arcstat_l2_write_hdr_io_in_progress); 4251 return (B_FALSE); 4252 } 4253 if (!HDR_L2CACHE(ab)) { 4254 ARCSTAT_BUMP(arcstat_l2_write_not_cacheable); 4255 return (B_FALSE); 4256 } 4257 4258 return (B_TRUE); 4259} 4260 4261static uint64_t 4262l2arc_write_size(l2arc_dev_t *dev) 4263{ 4264 uint64_t size; 4265 4266 size = dev->l2ad_write; 4267 4268 if (arc_warm == B_FALSE) 4269 size += dev->l2ad_boost; 4270 4271 return (size); 4272 4273} 4274 4275static clock_t 4276l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote) 4277{ 4278 clock_t interval, next, now; 4279 4280 /* 4281 * If the ARC lists are busy, increase our write rate; if the 4282 * lists are stale, idle back. This is achieved by checking 4283 * how much we previously wrote - if it was more than half of 4284 * what we wanted, schedule the next write much sooner. 4285 */ 4286 if (l2arc_feed_again && wrote > (wanted / 2)) 4287 interval = (hz * l2arc_feed_min_ms) / 1000; 4288 else 4289 interval = hz * l2arc_feed_secs; 4290 4291 now = ddi_get_lbolt(); 4292 next = MAX(now, MIN(now + interval, began + interval)); 4293 4294 return (next); 4295} 4296 4297static void 4298l2arc_hdr_stat_add(void) 4299{ 4300 ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE); 4301 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE); 4302} 4303 4304static void 4305l2arc_hdr_stat_remove(void) 4306{ 4307 ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE)); 4308 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE); 4309} 4310 4311/* 4312 * Cycle through L2ARC devices. This is how L2ARC load balances. 4313 * If a device is returned, this also returns holding the spa config lock. 4314 */ 4315static l2arc_dev_t * 4316l2arc_dev_get_next(void) 4317{ 4318 l2arc_dev_t *first, *next = NULL; 4319 4320 /* 4321 * Lock out the removal of spas (spa_namespace_lock), then removal 4322 * of cache devices (l2arc_dev_mtx). Once a device has been selected, 4323 * both locks will be dropped and a spa config lock held instead. 4324 */ 4325 mutex_enter(&spa_namespace_lock); 4326 mutex_enter(&l2arc_dev_mtx); 4327 4328 /* if there are no vdevs, there is nothing to do */ 4329 if (l2arc_ndev == 0) 4330 goto out; 4331 4332 first = NULL; 4333 next = l2arc_dev_last; 4334 do { 4335 /* loop around the list looking for a non-faulted vdev */ 4336 if (next == NULL) { 4337 next = list_head(l2arc_dev_list); 4338 } else { 4339 next = list_next(l2arc_dev_list, next); 4340 if (next == NULL) 4341 next = list_head(l2arc_dev_list); 4342 } 4343 4344 /* if we have come back to the start, bail out */ 4345 if (first == NULL) 4346 first = next; 4347 else if (next == first) 4348 break; 4349 4350 } while (vdev_is_dead(next->l2ad_vdev)); 4351 4352 /* if we were unable to find any usable vdevs, return NULL */ 4353 if (vdev_is_dead(next->l2ad_vdev)) 4354 next = NULL; 4355 4356 l2arc_dev_last = next; 4357 4358out: 4359 mutex_exit(&l2arc_dev_mtx); 4360 4361 /* 4362 * Grab the config lock to prevent the 'next' device from being 4363 * removed while we are writing to it. 4364 */ 4365 if (next != NULL) 4366 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER); 4367 mutex_exit(&spa_namespace_lock); 4368 4369 return (next); 4370} 4371 4372/* 4373 * Free buffers that were tagged for destruction. 4374 */ 4375static void 4376l2arc_do_free_on_write() 4377{ 4378 list_t *buflist; 4379 l2arc_data_free_t *df, *df_prev; 4380 4381 mutex_enter(&l2arc_free_on_write_mtx); 4382 buflist = l2arc_free_on_write; 4383 4384 for (df = list_tail(buflist); df; df = df_prev) { 4385 df_prev = list_prev(buflist, df); 4386 ASSERT(df->l2df_data != NULL); 4387 ASSERT(df->l2df_func != NULL); 4388 df->l2df_func(df->l2df_data, df->l2df_size); 4389 list_remove(buflist, df); 4390 kmem_free(df, sizeof (l2arc_data_free_t)); 4391 } 4392 4393 mutex_exit(&l2arc_free_on_write_mtx); 4394} 4395 4396/* 4397 * A write to a cache device has completed. Update all headers to allow 4398 * reads from these buffers to begin. 4399 */ 4400static void 4401l2arc_write_done(zio_t *zio) 4402{ 4403 l2arc_write_callback_t *cb; 4404 l2arc_dev_t *dev; 4405 list_t *buflist; 4406 arc_buf_hdr_t *head, *ab, *ab_prev; 4407 l2arc_buf_hdr_t *abl2; 4408 kmutex_t *hash_lock; 4409 4410 cb = zio->io_private; 4411 ASSERT(cb != NULL); 4412 dev = cb->l2wcb_dev; 4413 ASSERT(dev != NULL); 4414 head = cb->l2wcb_head; 4415 ASSERT(head != NULL); 4416 buflist = dev->l2ad_buflist; 4417 ASSERT(buflist != NULL); 4418 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, 4419 l2arc_write_callback_t *, cb); 4420 4421 if (zio->io_error != 0) 4422 ARCSTAT_BUMP(arcstat_l2_writes_error); 4423 4424 mutex_enter(&l2arc_buflist_mtx); 4425 4426 /* 4427 * All writes completed, or an error was hit. 4428 */ 4429 for (ab = list_prev(buflist, head); ab; ab = ab_prev) { 4430 ab_prev = list_prev(buflist, ab); 4431 4432 hash_lock = HDR_LOCK(ab); 4433 if (!mutex_tryenter(hash_lock)) { 4434 /* 4435 * This buffer misses out. It may be in a stage 4436 * of eviction. Its ARC_L2_WRITING flag will be 4437 * left set, denying reads to this buffer. 4438 */ 4439 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss); 4440 continue; 4441 } 4442 4443 if (zio->io_error != 0) { 4444 /* 4445 * Error - drop L2ARC entry. 4446 */ 4447 list_remove(buflist, ab); 4448 abl2 = ab->b_l2hdr; 4449 ab->b_l2hdr = NULL; 4450 trim_map_free(abl2->b_dev->l2ad_vdev, abl2->b_daddr, 4451 ab->b_size); 4452 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 4453 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 4454 } 4455 4456 /* 4457 * Allow ARC to begin reads to this L2ARC entry. 4458 */ 4459 ab->b_flags &= ~ARC_L2_WRITING; 4460 4461 mutex_exit(hash_lock); 4462 } 4463 4464 atomic_inc_64(&l2arc_writes_done); 4465 list_remove(buflist, head); 4466 kmem_cache_free(hdr_cache, head); 4467 mutex_exit(&l2arc_buflist_mtx); 4468 4469 l2arc_do_free_on_write(); 4470 4471 kmem_free(cb, sizeof (l2arc_write_callback_t)); 4472} 4473 4474/* 4475 * A read to a cache device completed. Validate buffer contents before 4476 * handing over to the regular ARC routines. 4477 */ 4478static void 4479l2arc_read_done(zio_t *zio) 4480{ 4481 l2arc_read_callback_t *cb; 4482 arc_buf_hdr_t *hdr; 4483 arc_buf_t *buf; 4484 kmutex_t *hash_lock; 4485 int equal; 4486 4487 ASSERT(zio->io_vd != NULL); 4488 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE); 4489 4490 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd); 4491 4492 cb = zio->io_private; 4493 ASSERT(cb != NULL); 4494 buf = cb->l2rcb_buf; 4495 ASSERT(buf != NULL); 4496 4497 hash_lock = HDR_LOCK(buf->b_hdr); 4498 mutex_enter(hash_lock); 4499 hdr = buf->b_hdr; 4500 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 4501 4502 /* 4503 * Check this survived the L2ARC journey. 4504 */ 4505 equal = arc_cksum_equal(buf); 4506 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { 4507 mutex_exit(hash_lock); 4508 zio->io_private = buf; 4509 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */ 4510 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */ 4511 arc_read_done(zio); 4512 } else { 4513 mutex_exit(hash_lock); 4514 /* 4515 * Buffer didn't survive caching. Increment stats and 4516 * reissue to the original storage device. 4517 */ 4518 if (zio->io_error != 0) { 4519 ARCSTAT_BUMP(arcstat_l2_io_error); 4520 } else { 4521 zio->io_error = EIO; 4522 } 4523 if (!equal) 4524 ARCSTAT_BUMP(arcstat_l2_cksum_bad); 4525 4526 /* 4527 * If there's no waiter, issue an async i/o to the primary 4528 * storage now. If there *is* a waiter, the caller must 4529 * issue the i/o in a context where it's OK to block. 4530 */ 4531 if (zio->io_waiter == NULL) { 4532 zio_t *pio = zio_unique_parent(zio); 4533 4534 ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL); 4535 4536 zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp, 4537 buf->b_data, zio->io_size, arc_read_done, buf, 4538 zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb)); 4539 } 4540 } 4541 4542 kmem_free(cb, sizeof (l2arc_read_callback_t)); 4543} 4544 4545/* 4546 * This is the list priority from which the L2ARC will search for pages to 4547 * cache. This is used within loops (0..3) to cycle through lists in the 4548 * desired order. This order can have a significant effect on cache 4549 * performance. 4550 * 4551 * Currently the metadata lists are hit first, MFU then MRU, followed by 4552 * the data lists. This function returns a locked list, and also returns 4553 * the lock pointer. 4554 */ 4555static list_t * 4556l2arc_list_locked(int list_num, kmutex_t **lock) 4557{ 4558 list_t *list = NULL; 4559 int idx; 4560 4561 ASSERT(list_num >= 0 && list_num < 2 * ARC_BUFC_NUMLISTS); 4562 4563 if (list_num < ARC_BUFC_NUMMETADATALISTS) { 4564 idx = list_num; 4565 list = &arc_mfu->arcs_lists[idx]; 4566 *lock = ARCS_LOCK(arc_mfu, idx); 4567 } else if (list_num < ARC_BUFC_NUMMETADATALISTS * 2) { 4568 idx = list_num - ARC_BUFC_NUMMETADATALISTS; 4569 list = &arc_mru->arcs_lists[idx]; 4570 *lock = ARCS_LOCK(arc_mru, idx); 4571 } else if (list_num < (ARC_BUFC_NUMMETADATALISTS * 2 + 4572 ARC_BUFC_NUMDATALISTS)) { 4573 idx = list_num - ARC_BUFC_NUMMETADATALISTS; 4574 list = &arc_mfu->arcs_lists[idx]; 4575 *lock = ARCS_LOCK(arc_mfu, idx); 4576 } else { 4577 idx = list_num - ARC_BUFC_NUMLISTS; 4578 list = &arc_mru->arcs_lists[idx]; 4579 *lock = ARCS_LOCK(arc_mru, idx); 4580 } 4581 4582 ASSERT(!(MUTEX_HELD(*lock))); 4583 mutex_enter(*lock); 4584 return (list); 4585} 4586 4587/* 4588 * Evict buffers from the device write hand to the distance specified in 4589 * bytes. This distance may span populated buffers, it may span nothing. 4590 * This is clearing a region on the L2ARC device ready for writing. 4591 * If the 'all' boolean is set, every buffer is evicted. 4592 */ 4593static void 4594l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) 4595{ 4596 list_t *buflist; 4597 l2arc_buf_hdr_t *abl2; 4598 arc_buf_hdr_t *ab, *ab_prev; 4599 kmutex_t *hash_lock; 4600 uint64_t taddr; 4601 4602 buflist = dev->l2ad_buflist; 4603 4604 if (buflist == NULL) 4605 return; 4606 4607 if (!all && dev->l2ad_first) { 4608 /* 4609 * This is the first sweep through the device. There is 4610 * nothing to evict. 4611 */ 4612 return; 4613 } 4614 4615 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) { 4616 /* 4617 * When nearing the end of the device, evict to the end 4618 * before the device write hand jumps to the start. 4619 */ 4620 taddr = dev->l2ad_end; 4621 } else { 4622 taddr = dev->l2ad_hand + distance; 4623 } 4624 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, 4625 uint64_t, taddr, boolean_t, all); 4626 4627top: 4628 mutex_enter(&l2arc_buflist_mtx); 4629 for (ab = list_tail(buflist); ab; ab = ab_prev) { 4630 ab_prev = list_prev(buflist, ab); 4631 4632 hash_lock = HDR_LOCK(ab); 4633 if (!mutex_tryenter(hash_lock)) { 4634 /* 4635 * Missed the hash lock. Retry. 4636 */ 4637 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); 4638 mutex_exit(&l2arc_buflist_mtx); 4639 mutex_enter(hash_lock); 4640 mutex_exit(hash_lock); 4641 goto top; 4642 } 4643 4644 if (HDR_L2_WRITE_HEAD(ab)) { 4645 /* 4646 * We hit a write head node. Leave it for 4647 * l2arc_write_done(). 4648 */ 4649 list_remove(buflist, ab); 4650 mutex_exit(hash_lock); 4651 continue; 4652 } 4653 4654 if (!all && ab->b_l2hdr != NULL && 4655 (ab->b_l2hdr->b_daddr > taddr || 4656 ab->b_l2hdr->b_daddr < dev->l2ad_hand)) { 4657 /* 4658 * We've evicted to the target address, 4659 * or the end of the device. 4660 */ 4661 mutex_exit(hash_lock); 4662 break; 4663 } 4664 4665 if (HDR_FREE_IN_PROGRESS(ab)) { 4666 /* 4667 * Already on the path to destruction. 4668 */ 4669 mutex_exit(hash_lock); 4670 continue; 4671 } 4672 4673 if (ab->b_state == arc_l2c_only) { 4674 ASSERT(!HDR_L2_READING(ab)); 4675 /* 4676 * This doesn't exist in the ARC. Destroy. 4677 * arc_hdr_destroy() will call list_remove() 4678 * and decrement arcstat_l2_size. 4679 */ 4680 arc_change_state(arc_anon, ab, hash_lock); 4681 arc_hdr_destroy(ab); 4682 } else { 4683 /* 4684 * Invalidate issued or about to be issued 4685 * reads, since we may be about to write 4686 * over this location. 4687 */ 4688 if (HDR_L2_READING(ab)) { 4689 ARCSTAT_BUMP(arcstat_l2_evict_reading); 4690 ab->b_flags |= ARC_L2_EVICTED; 4691 } 4692 4693 /* 4694 * Tell ARC this no longer exists in L2ARC. 4695 */ 4696 if (ab->b_l2hdr != NULL) { 4697 abl2 = ab->b_l2hdr; 4698 ab->b_l2hdr = NULL; 4699 kmem_free(abl2, sizeof (l2arc_buf_hdr_t)); 4700 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size); 4701 } 4702 list_remove(buflist, ab); 4703 4704 /* 4705 * This may have been leftover after a 4706 * failed write. 4707 */ 4708 ab->b_flags &= ~ARC_L2_WRITING; 4709 } 4710 mutex_exit(hash_lock); 4711 } 4712 mutex_exit(&l2arc_buflist_mtx); 4713 4714 vdev_space_update(dev->l2ad_vdev, -(taddr - dev->l2ad_evict), 0, 0); 4715 dev->l2ad_evict = taddr; 4716} 4717 4718/* 4719 * Find and write ARC buffers to the L2ARC device. 4720 * 4721 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid 4722 * for reading until they have completed writing. 4723 */ 4724static uint64_t 4725l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz) 4726{ 4727 arc_buf_hdr_t *ab, *ab_prev, *head; 4728 l2arc_buf_hdr_t *hdrl2; 4729 list_t *list; 4730 uint64_t passed_sz, write_sz, buf_sz, headroom; 4731 void *buf_data; 4732 kmutex_t *hash_lock, *list_lock; 4733 boolean_t have_lock, full; 4734 l2arc_write_callback_t *cb; 4735 zio_t *pio, *wzio; 4736 uint64_t guid = spa_load_guid(spa); 4737 int try; 4738 4739 ASSERT(dev->l2ad_vdev != NULL); 4740 4741 pio = NULL; 4742 write_sz = 0; 4743 full = B_FALSE; 4744 head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE); 4745 head->b_flags |= ARC_L2_WRITE_HEAD; 4746 4747 ARCSTAT_BUMP(arcstat_l2_write_buffer_iter); 4748 /* 4749 * Copy buffers for L2ARC writing. 4750 */ 4751 mutex_enter(&l2arc_buflist_mtx); 4752 for (try = 0; try < 2 * ARC_BUFC_NUMLISTS; try++) { 4753 list = l2arc_list_locked(try, &list_lock); 4754 passed_sz = 0; 4755 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_iter); 4756 4757 /* 4758 * L2ARC fast warmup. 4759 * 4760 * Until the ARC is warm and starts to evict, read from the 4761 * head of the ARC lists rather than the tail. 4762 */ 4763 headroom = target_sz * l2arc_headroom; 4764 if (arc_warm == B_FALSE) 4765 ab = list_head(list); 4766 else 4767 ab = list_tail(list); 4768 if (ab == NULL) 4769 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_null_iter); 4770 4771 for (; ab; ab = ab_prev) { 4772 if (arc_warm == B_FALSE) 4773 ab_prev = list_next(list, ab); 4774 else 4775 ab_prev = list_prev(list, ab); 4776 ARCSTAT_INCR(arcstat_l2_write_buffer_bytes_scanned, ab->b_size); 4777 4778 hash_lock = HDR_LOCK(ab); 4779 have_lock = MUTEX_HELD(hash_lock); 4780 if (!have_lock && !mutex_tryenter(hash_lock)) { 4781 ARCSTAT_BUMP(arcstat_l2_write_trylock_fail); 4782 /* 4783 * Skip this buffer rather than waiting. 4784 */ 4785 continue; 4786 } 4787 4788 passed_sz += ab->b_size; 4789 if (passed_sz > headroom) { 4790 /* 4791 * Searched too far. 4792 */ 4793 mutex_exit(hash_lock); 4794 ARCSTAT_BUMP(arcstat_l2_write_passed_headroom); 4795 break; 4796 } 4797 4798 if (!l2arc_write_eligible(guid, ab)) { 4799 mutex_exit(hash_lock); 4800 continue; 4801 } 4802 4803 if ((write_sz + ab->b_size) > target_sz) { 4804 full = B_TRUE; 4805 mutex_exit(hash_lock); 4806 ARCSTAT_BUMP(arcstat_l2_write_full); 4807 break; 4808 } 4809 4810 if (pio == NULL) { 4811 /* 4812 * Insert a dummy header on the buflist so 4813 * l2arc_write_done() can find where the 4814 * write buffers begin without searching. 4815 */ 4816 list_insert_head(dev->l2ad_buflist, head); 4817 4818 cb = kmem_alloc( 4819 sizeof (l2arc_write_callback_t), KM_SLEEP); 4820 cb->l2wcb_dev = dev; 4821 cb->l2wcb_head = head; 4822 pio = zio_root(spa, l2arc_write_done, cb, 4823 ZIO_FLAG_CANFAIL); 4824 ARCSTAT_BUMP(arcstat_l2_write_pios); 4825 } 4826 4827 /* 4828 * Create and add a new L2ARC header. 4829 */ 4830 hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP); 4831 hdrl2->b_dev = dev; 4832 hdrl2->b_daddr = dev->l2ad_hand; 4833 4834 ab->b_flags |= ARC_L2_WRITING; 4835 ab->b_l2hdr = hdrl2; 4836 list_insert_head(dev->l2ad_buflist, ab); 4837 buf_data = ab->b_buf->b_data; 4838 buf_sz = ab->b_size; 4839 4840 /* 4841 * Compute and store the buffer cksum before 4842 * writing. On debug the cksum is verified first. 4843 */ 4844 arc_cksum_verify(ab->b_buf); 4845 arc_cksum_compute(ab->b_buf, B_TRUE); 4846 4847 mutex_exit(hash_lock); 4848 4849 wzio = zio_write_phys(pio, dev->l2ad_vdev, 4850 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF, 4851 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE, 4852 ZIO_FLAG_CANFAIL, B_FALSE); 4853 4854 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, 4855 zio_t *, wzio); 4856 (void) zio_nowait(wzio); 4857 4858 /* 4859 * Keep the clock hand suitably device-aligned. 4860 */ 4861 buf_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz); 4862 4863 write_sz += buf_sz; 4864 dev->l2ad_hand += buf_sz; 4865 } 4866 4867 mutex_exit(list_lock); 4868 4869 if (full == B_TRUE) 4870 break; 4871 } 4872 mutex_exit(&l2arc_buflist_mtx); 4873 4874 if (pio == NULL) { 4875 ASSERT0(write_sz); 4876 kmem_cache_free(hdr_cache, head); 4877 return (0); 4878 } 4879 4880 ASSERT3U(write_sz, <=, target_sz); 4881 ARCSTAT_BUMP(arcstat_l2_writes_sent); 4882 ARCSTAT_INCR(arcstat_l2_write_bytes, write_sz); 4883 ARCSTAT_INCR(arcstat_l2_size, write_sz); 4884 vdev_space_update(dev->l2ad_vdev, write_sz, 0, 0); 4885 4886 /* 4887 * Bump device hand to the device start if it is approaching the end. 4888 * l2arc_evict() will already have evicted ahead for this case. 4889 */ 4890 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) { 4891 vdev_space_update(dev->l2ad_vdev, 4892 dev->l2ad_end - dev->l2ad_hand, 0, 0); 4893 dev->l2ad_hand = dev->l2ad_start; 4894 dev->l2ad_evict = dev->l2ad_start; 4895 dev->l2ad_first = B_FALSE; 4896 } 4897 4898 dev->l2ad_writing = B_TRUE; 4899 (void) zio_wait(pio); 4900 dev->l2ad_writing = B_FALSE; 4901 4902 return (write_sz); 4903} 4904 4905/* 4906 * This thread feeds the L2ARC at regular intervals. This is the beating 4907 * heart of the L2ARC. 4908 */ 4909static void 4910l2arc_feed_thread(void *dummy __unused) 4911{ 4912 callb_cpr_t cpr; 4913 l2arc_dev_t *dev; 4914 spa_t *spa; 4915 uint64_t size, wrote; 4916 clock_t begin, next = ddi_get_lbolt(); 4917 4918 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); 4919 4920 mutex_enter(&l2arc_feed_thr_lock); 4921 4922 while (l2arc_thread_exit == 0) { 4923 CALLB_CPR_SAFE_BEGIN(&cpr); 4924 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, 4925 next - ddi_get_lbolt()); 4926 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); 4927 next = ddi_get_lbolt() + hz; 4928 4929 /* 4930 * Quick check for L2ARC devices. 4931 */ 4932 mutex_enter(&l2arc_dev_mtx); 4933 if (l2arc_ndev == 0) { 4934 mutex_exit(&l2arc_dev_mtx); 4935 continue; 4936 } 4937 mutex_exit(&l2arc_dev_mtx); 4938 begin = ddi_get_lbolt(); 4939 4940 /* 4941 * This selects the next l2arc device to write to, and in 4942 * doing so the next spa to feed from: dev->l2ad_spa. This 4943 * will return NULL if there are now no l2arc devices or if 4944 * they are all faulted. 4945 * 4946 * If a device is returned, its spa's config lock is also 4947 * held to prevent device removal. l2arc_dev_get_next() 4948 * will grab and release l2arc_dev_mtx. 4949 */ 4950 if ((dev = l2arc_dev_get_next()) == NULL) 4951 continue; 4952 4953 spa = dev->l2ad_spa; 4954 ASSERT(spa != NULL); 4955 4956 /* 4957 * If the pool is read-only then force the feed thread to 4958 * sleep a little longer. 4959 */ 4960 if (!spa_writeable(spa)) { 4961 next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz; 4962 spa_config_exit(spa, SCL_L2ARC, dev); 4963 continue; 4964 } 4965 4966 /* 4967 * Avoid contributing to memory pressure. 4968 */ 4969 if (arc_reclaim_needed()) { 4970 ARCSTAT_BUMP(arcstat_l2_abort_lowmem); 4971 spa_config_exit(spa, SCL_L2ARC, dev); 4972 continue; 4973 } 4974 4975 ARCSTAT_BUMP(arcstat_l2_feeds); 4976 4977 size = l2arc_write_size(dev); 4978 4979 /* 4980 * Evict L2ARC buffers that will be overwritten. 4981 */ 4982 l2arc_evict(dev, size, B_FALSE); 4983 4984 /* 4985 * Write ARC buffers. 4986 */ 4987 wrote = l2arc_write_buffers(spa, dev, size); 4988 4989 /* 4990 * Calculate interval between writes. 4991 */ 4992 next = l2arc_write_interval(begin, size, wrote); 4993 spa_config_exit(spa, SCL_L2ARC, dev); 4994 } 4995 4996 l2arc_thread_exit = 0; 4997 cv_broadcast(&l2arc_feed_thr_cv); 4998 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ 4999 thread_exit(); 5000} 5001 5002boolean_t 5003l2arc_vdev_present(vdev_t *vd) 5004{ 5005 l2arc_dev_t *dev; 5006 5007 mutex_enter(&l2arc_dev_mtx); 5008 for (dev = list_head(l2arc_dev_list); dev != NULL; 5009 dev = list_next(l2arc_dev_list, dev)) { 5010 if (dev->l2ad_vdev == vd) 5011 break; 5012 } 5013 mutex_exit(&l2arc_dev_mtx); 5014 5015 return (dev != NULL); 5016} 5017 5018/* 5019 * Add a vdev for use by the L2ARC. By this point the spa has already 5020 * validated the vdev and opened it. 5021 */ 5022void 5023l2arc_add_vdev(spa_t *spa, vdev_t *vd) 5024{ 5025 l2arc_dev_t *adddev; 5026 5027 ASSERT(!l2arc_vdev_present(vd)); 5028 5029 /* 5030 * Create a new l2arc device entry. 5031 */ 5032 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); 5033 adddev->l2ad_spa = spa; 5034 adddev->l2ad_vdev = vd; 5035 adddev->l2ad_write = l2arc_write_max; 5036 adddev->l2ad_boost = l2arc_write_boost; 5037 adddev->l2ad_start = VDEV_LABEL_START_SIZE; 5038 adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd); 5039 adddev->l2ad_hand = adddev->l2ad_start; 5040 adddev->l2ad_evict = adddev->l2ad_start; 5041 adddev->l2ad_first = B_TRUE; 5042 adddev->l2ad_writing = B_FALSE; 5043 ASSERT3U(adddev->l2ad_write, >, 0); 5044 5045 /* 5046 * This is a list of all ARC buffers that are still valid on the 5047 * device. 5048 */ 5049 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP); 5050 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), 5051 offsetof(arc_buf_hdr_t, b_l2node)); 5052 5053 vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand); 5054 5055 /* 5056 * Add device to global list 5057 */ 5058 mutex_enter(&l2arc_dev_mtx); 5059 list_insert_head(l2arc_dev_list, adddev); 5060 atomic_inc_64(&l2arc_ndev); 5061 mutex_exit(&l2arc_dev_mtx); 5062} 5063 5064/* 5065 * Remove a vdev from the L2ARC. 5066 */ 5067void 5068l2arc_remove_vdev(vdev_t *vd) 5069{ 5070 l2arc_dev_t *dev, *nextdev, *remdev = NULL; 5071 5072 /* 5073 * Find the device by vdev 5074 */ 5075 mutex_enter(&l2arc_dev_mtx); 5076 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { 5077 nextdev = list_next(l2arc_dev_list, dev); 5078 if (vd == dev->l2ad_vdev) { 5079 remdev = dev; 5080 break; 5081 } 5082 } 5083 ASSERT(remdev != NULL); 5084 5085 /* 5086 * Remove device from global list 5087 */ 5088 list_remove(l2arc_dev_list, remdev); 5089 l2arc_dev_last = NULL; /* may have been invalidated */ 5090 atomic_dec_64(&l2arc_ndev); 5091 mutex_exit(&l2arc_dev_mtx); 5092 5093 /* 5094 * Clear all buflists and ARC references. L2ARC device flush. 5095 */ 5096 l2arc_evict(remdev, 0, B_TRUE); 5097 list_destroy(remdev->l2ad_buflist); 5098 kmem_free(remdev->l2ad_buflist, sizeof (list_t)); 5099 kmem_free(remdev, sizeof (l2arc_dev_t)); 5100} 5101 5102void 5103l2arc_init(void) 5104{ 5105 l2arc_thread_exit = 0; 5106 l2arc_ndev = 0; 5107 l2arc_writes_sent = 0; 5108 l2arc_writes_done = 0; 5109 5110 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); 5111 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); 5112 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); 5113 mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL); 5114 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); 5115 5116 l2arc_dev_list = &L2ARC_dev_list; 5117 l2arc_free_on_write = &L2ARC_free_on_write; 5118 list_create(l2arc_dev_list, sizeof (l2arc_dev_t), 5119 offsetof(l2arc_dev_t, l2ad_node)); 5120 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), 5121 offsetof(l2arc_data_free_t, l2df_list_node)); 5122} 5123 5124void 5125l2arc_fini(void) 5126{ 5127 /* 5128 * This is called from dmu_fini(), which is called from spa_fini(); 5129 * Because of this, we can assume that all l2arc devices have 5130 * already been removed when the pools themselves were removed. 5131 */ 5132 5133 l2arc_do_free_on_write(); 5134 5135 mutex_destroy(&l2arc_feed_thr_lock); 5136 cv_destroy(&l2arc_feed_thr_cv); 5137 mutex_destroy(&l2arc_dev_mtx); 5138 mutex_destroy(&l2arc_buflist_mtx); 5139 mutex_destroy(&l2arc_free_on_write_mtx); 5140 5141 list_destroy(l2arc_dev_list); 5142 list_destroy(l2arc_free_on_write); 5143} 5144 5145void 5146l2arc_start(void) 5147{ 5148 if (!(spa_mode_global & FWRITE)) 5149 return; 5150 5151 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, 5152 TS_RUN, minclsyspri); 5153} 5154 5155void 5156l2arc_stop(void) 5157{ 5158 if (!(spa_mode_global & FWRITE)) 5159 return; 5160 5161 mutex_enter(&l2arc_feed_thr_lock); 5162 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ 5163 l2arc_thread_exit = 1; 5164 while (l2arc_thread_exit != 0) 5165 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); 5166 mutex_exit(&l2arc_feed_thr_lock); 5167} 5168