1/* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21/* 22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. 23 * Copyright (c) 2018, Joyent, Inc. 24 * Copyright (c) 2011, 2018 by Delphix. All rights reserved. 25 * Copyright (c) 2014 by Saso Kiselkov. All rights reserved. 26 * Copyright 2017 Nexenta Systems, Inc. All rights reserved. 27 */ 28 29/* 30 * DVA-based Adjustable Replacement Cache 31 * 32 * While much of the theory of operation used here is 33 * based on the self-tuning, low overhead replacement cache 34 * presented by Megiddo and Modha at FAST 2003, there are some 35 * significant differences: 36 * 37 * 1. The Megiddo and Modha model assumes any page is evictable. 38 * Pages in its cache cannot be "locked" into memory. This makes 39 * the eviction algorithm simple: evict the last page in the list. 40 * This also make the performance characteristics easy to reason 41 * about. Our cache is not so simple. At any given moment, some 42 * subset of the blocks in the cache are un-evictable because we 43 * have handed out a reference to them. Blocks are only evictable 44 * when there are no external references active. This makes 45 * eviction far more problematic: we choose to evict the evictable 46 * blocks that are the "lowest" in the list. 47 * 48 * There are times when it is not possible to evict the requested 49 * space. In these circumstances we are unable to adjust the cache 50 * size. To prevent the cache growing unbounded at these times we 51 * implement a "cache throttle" that slows the flow of new data 52 * into the cache until we can make space available. 53 * 54 * 2. The Megiddo and Modha model assumes a fixed cache size. 55 * Pages are evicted when the cache is full and there is a cache 56 * miss. Our model has a variable sized cache. It grows with 57 * high use, but also tries to react to memory pressure from the 58 * operating system: decreasing its size when system memory is 59 * tight. 60 * 61 * 3. The Megiddo and Modha model assumes a fixed page size. All 62 * elements of the cache are therefore exactly the same size. So 63 * when adjusting the cache size following a cache miss, its simply 64 * a matter of choosing a single page to evict. In our model, we 65 * have variable sized cache blocks (rangeing from 512 bytes to 66 * 128K bytes). We therefore choose a set of blocks to evict to make 67 * space for a cache miss that approximates as closely as possible 68 * the space used by the new block. 69 * 70 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache" 71 * by N. Megiddo & D. Modha, FAST 2003 72 */ 73 74/* 75 * The locking model: 76 * 77 * A new reference to a cache buffer can be obtained in two 78 * ways: 1) via a hash table lookup using the DVA as a key, 79 * or 2) via one of the ARC lists. The arc_read() interface 80 * uses method 1, while the internal ARC algorithms for 81 * adjusting the cache use method 2. We therefore provide two 82 * types of locks: 1) the hash table lock array, and 2) the 83 * ARC list locks. 84 * 85 * Buffers do not have their own mutexes, rather they rely on the 86 * hash table mutexes for the bulk of their protection (i.e. most 87 * fields in the arc_buf_hdr_t are protected by these mutexes). 88 * 89 * buf_hash_find() returns the appropriate mutex (held) when it 90 * locates the requested buffer in the hash table. It returns 91 * NULL for the mutex if the buffer was not in the table. 92 * 93 * buf_hash_remove() expects the appropriate hash mutex to be 94 * already held before it is invoked. 95 * 96 * Each ARC state also has a mutex which is used to protect the 97 * buffer list associated with the state. When attempting to 98 * obtain a hash table lock while holding an ARC list lock you 99 * must use: mutex_tryenter() to avoid deadlock. Also note that 100 * the active state mutex must be held before the ghost state mutex. 101 * 102 * Note that the majority of the performance stats are manipulated 103 * with atomic operations. 104 * 105 * The L2ARC uses the l2ad_mtx on each vdev for the following: 106 * 107 * - L2ARC buflist creation 108 * - L2ARC buflist eviction 109 * - L2ARC write completion, which walks L2ARC buflists 110 * - ARC header destruction, as it removes from L2ARC buflists 111 * - ARC header release, as it removes from L2ARC buflists 112 */ 113 114/* 115 * ARC operation: 116 * 117 * Every block that is in the ARC is tracked by an arc_buf_hdr_t structure. 118 * This structure can point either to a block that is still in the cache or to 119 * one that is only accessible in an L2 ARC device, or it can provide 120 * information about a block that was recently evicted. If a block is 121 * only accessible in the L2ARC, then the arc_buf_hdr_t only has enough 122 * information to retrieve it from the L2ARC device. This information is 123 * stored in the l2arc_buf_hdr_t sub-structure of the arc_buf_hdr_t. A block 124 * that is in this state cannot access the data directly. 125 * 126 * Blocks that are actively being referenced or have not been evicted 127 * are cached in the L1ARC. The L1ARC (l1arc_buf_hdr_t) is a structure within 128 * the arc_buf_hdr_t that will point to the data block in memory. A block can 129 * only be read by a consumer if it has an l1arc_buf_hdr_t. The L1ARC 130 * caches data in two ways -- in a list of ARC buffers (arc_buf_t) and 131 * also in the arc_buf_hdr_t's private physical data block pointer (b_pabd). 132 * 133 * The L1ARC's data pointer may or may not be uncompressed. The ARC has the 134 * ability to store the physical data (b_pabd) associated with the DVA of the 135 * arc_buf_hdr_t. Since the b_pabd is a copy of the on-disk physical block, 136 * it will match its on-disk compression characteristics. This behavior can be 137 * disabled by setting 'zfs_compressed_arc_enabled' to B_FALSE. When the 138 * compressed ARC functionality is disabled, the b_pabd will point to an 139 * uncompressed version of the on-disk data. 140 * 141 * Data in the L1ARC is not accessed by consumers of the ARC directly. Each 142 * arc_buf_hdr_t can have multiple ARC buffers (arc_buf_t) which reference it. 143 * Each ARC buffer (arc_buf_t) is being actively accessed by a specific ARC 144 * consumer. The ARC will provide references to this data and will keep it 145 * cached until it is no longer in use. The ARC caches only the L1ARC's physical 146 * data block and will evict any arc_buf_t that is no longer referenced. The 147 * amount of memory consumed by the arc_buf_ts' data buffers can be seen via the 148 * "overhead_size" kstat. 149 * 150 * Depending on the consumer, an arc_buf_t can be requested in uncompressed or 151 * compressed form. The typical case is that consumers will want uncompressed 152 * data, and when that happens a new data buffer is allocated where the data is 153 * decompressed for them to use. Currently the only consumer who wants 154 * compressed arc_buf_t's is "zfs send", when it streams data exactly as it 155 * exists on disk. When this happens, the arc_buf_t's data buffer is shared 156 * with the arc_buf_hdr_t. 157 * 158 * Here is a diagram showing an arc_buf_hdr_t referenced by two arc_buf_t's. The 159 * first one is owned by a compressed send consumer (and therefore references 160 * the same compressed data buffer as the arc_buf_hdr_t) and the second could be 161 * used by any other consumer (and has its own uncompressed copy of the data 162 * buffer). 163 * 164 * arc_buf_hdr_t 165 * +-----------+ 166 * | fields | 167 * | common to | 168 * | L1- and | 169 * | L2ARC | 170 * +-----------+ 171 * | l2arc_buf_hdr_t 172 * | | 173 * +-----------+ 174 * | l1arc_buf_hdr_t 175 * | | arc_buf_t 176 * | b_buf +------------>+-----------+ arc_buf_t 177 * | b_pabd +-+ |b_next +---->+-----------+ 178 * +-----------+ | |-----------| |b_next +-->NULL 179 * | |b_comp = T | +-----------+ 180 * | |b_data +-+ |b_comp = F | 181 * | +-----------+ | |b_data +-+ 182 * +->+------+ | +-----------+ | 183 * compressed | | | | 184 * data | |<--------------+ | uncompressed 185 * +------+ compressed, | data 186 * shared +-->+------+ 187 * data | | 188 * | | 189 * +------+ 190 * 191 * When a consumer reads a block, the ARC must first look to see if the 192 * arc_buf_hdr_t is cached. If the hdr is cached then the ARC allocates a new 193 * arc_buf_t and either copies uncompressed data into a new data buffer from an 194 * existing uncompressed arc_buf_t, decompresses the hdr's b_pabd buffer into a 195 * new data buffer, or shares the hdr's b_pabd buffer, depending on whether the 196 * hdr is compressed and the desired compression characteristics of the 197 * arc_buf_t consumer. If the arc_buf_t ends up sharing data with the 198 * arc_buf_hdr_t and both of them are uncompressed then the arc_buf_t must be 199 * the last buffer in the hdr's b_buf list, however a shared compressed buf can 200 * be anywhere in the hdr's list. 201 * 202 * The diagram below shows an example of an uncompressed ARC hdr that is 203 * sharing its data with an arc_buf_t (note that the shared uncompressed buf is 204 * the last element in the buf list): 205 * 206 * arc_buf_hdr_t 207 * +-----------+ 208 * | | 209 * | | 210 * | | 211 * +-----------+ 212 * l2arc_buf_hdr_t| | 213 * | | 214 * +-----------+ 215 * l1arc_buf_hdr_t| | 216 * | | arc_buf_t (shared) 217 * | b_buf +------------>+---------+ arc_buf_t 218 * | | |b_next +---->+---------+ 219 * | b_pabd +-+ |---------| |b_next +-->NULL 220 * +-----------+ | | | +---------+ 221 * | |b_data +-+ | | 222 * | +---------+ | |b_data +-+ 223 * +->+------+ | +---------+ | 224 * | | | | 225 * uncompressed | | | | 226 * data +------+ | | 227 * ^ +->+------+ | 228 * | uncompressed | | | 229 * | data | | | 230 * | +------+ | 231 * +---------------------------------+ 232 * 233 * Writing to the ARC requires that the ARC first discard the hdr's b_pabd 234 * since the physical block is about to be rewritten. The new data contents 235 * will be contained in the arc_buf_t. As the I/O pipeline performs the write, 236 * it may compress the data before writing it to disk. The ARC will be called 237 * with the transformed data and will bcopy the transformed on-disk block into 238 * a newly allocated b_pabd. Writes are always done into buffers which have 239 * either been loaned (and hence are new and don't have other readers) or 240 * buffers which have been released (and hence have their own hdr, if there 241 * were originally other readers of the buf's original hdr). This ensures that 242 * the ARC only needs to update a single buf and its hdr after a write occurs. 243 * 244 * When the L2ARC is in use, it will also take advantage of the b_pabd. The 245 * L2ARC will always write the contents of b_pabd to the L2ARC. This means 246 * that when compressed ARC is enabled that the L2ARC blocks are identical 247 * to the on-disk block in the main data pool. This provides a significant 248 * advantage since the ARC can leverage the bp's checksum when reading from the 249 * L2ARC to determine if the contents are valid. However, if the compressed 250 * ARC is disabled, then the L2ARC's block must be transformed to look 251 * like the physical block in the main data pool before comparing the 252 * checksum and determining its validity. 253 */ 254 255#include <sys/spa.h> 256#include <sys/zio.h> 257#include <sys/spa_impl.h> 258#include <sys/zio_compress.h> 259#include <sys/zio_checksum.h> 260#include <sys/zfs_context.h> 261#include <sys/arc.h> 262#include <sys/refcount.h> 263#include <sys/vdev.h> 264#include <sys/vdev_impl.h> 265#include <sys/dsl_pool.h> 266#include <sys/zio_checksum.h> 267#include <sys/multilist.h> 268#include <sys/abd.h> 269#ifdef _KERNEL 270#include <sys/dnlc.h> 271#include <sys/racct.h> 272#endif 273#include <sys/callb.h> 274#include <sys/kstat.h> 275#include <sys/trim_map.h> 276#include <sys/zthr.h> 277#include <zfs_fletcher.h> 278#include <sys/sdt.h> 279#include <sys/aggsum.h> 280#include <sys/cityhash.h> 281 282#include <machine/vmparam.h> 283 284#ifdef illumos 285#ifndef _KERNEL 286/* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */ 287boolean_t arc_watch = B_FALSE; 288int arc_procfd; 289#endif 290#endif /* illumos */ 291 292/* 293 * This thread's job is to keep enough free memory in the system, by 294 * calling arc_kmem_reap_now() plus arc_shrink(), which improves 295 * arc_available_memory(). 296 */ 297static zthr_t *arc_reap_zthr; 298 299/* 300 * This thread's job is to keep arc_size under arc_c, by calling 301 * arc_adjust(), which improves arc_is_overflowing(). 302 */ 303static zthr_t *arc_adjust_zthr; 304 305static kmutex_t arc_adjust_lock; 306static kcondvar_t arc_adjust_waiters_cv; 307static boolean_t arc_adjust_needed = B_FALSE; 308 309static kmutex_t arc_dnlc_evicts_lock; 310static kcondvar_t arc_dnlc_evicts_cv; 311static boolean_t arc_dnlc_evicts_thread_exit; 312 313uint_t arc_reduce_dnlc_percent = 3; 314 315/* 316 * The number of headers to evict in arc_evict_state_impl() before 317 * dropping the sublist lock and evicting from another sublist. A lower 318 * value means we're more likely to evict the "correct" header (i.e. the 319 * oldest header in the arc state), but comes with higher overhead 320 * (i.e. more invocations of arc_evict_state_impl()). 321 */ 322int zfs_arc_evict_batch_limit = 10; 323 324/* number of seconds before growing cache again */ 325int arc_grow_retry = 60; 326 327/* 328 * Minimum time between calls to arc_kmem_reap_soon(). Note that this will 329 * be converted to ticks, so with the default hz=100, a setting of 15 ms 330 * will actually wait 2 ticks, or 20ms. 331 */ 332int arc_kmem_cache_reap_retry_ms = 1000; 333 334/* shift of arc_c for calculating overflow limit in arc_get_data_impl */ 335int zfs_arc_overflow_shift = 8; 336 337/* shift of arc_c for calculating both min and max arc_p */ 338int arc_p_min_shift = 4; 339 340/* log2(fraction of arc to reclaim) */ 341int arc_shrink_shift = 7; 342 343/* 344 * log2(fraction of ARC which must be free to allow growing). 345 * I.e. If there is less than arc_c >> arc_no_grow_shift free memory, 346 * when reading a new block into the ARC, we will evict an equal-sized block 347 * from the ARC. 348 * 349 * This must be less than arc_shrink_shift, so that when we shrink the ARC, 350 * we will still not allow it to grow. 351 */ 352int arc_no_grow_shift = 5; 353 354 355/* 356 * minimum lifespan of a prefetch block in clock ticks 357 * (initialized in arc_init()) 358 */ 359static int zfs_arc_min_prefetch_ms = 1; 360static int zfs_arc_min_prescient_prefetch_ms = 6; 361 362/* 363 * If this percent of memory is free, don't throttle. 364 */ 365int arc_lotsfree_percent = 10; 366 367static boolean_t arc_initialized; 368extern boolean_t zfs_prefetch_disable; 369 370/* 371 * The arc has filled available memory and has now warmed up. 372 */ 373static boolean_t arc_warm; 374 375/* 376 * log2 fraction of the zio arena to keep free. 377 */ 378int arc_zio_arena_free_shift = 2; 379 380/* 381 * These tunables are for performance analysis. 382 */ 383uint64_t zfs_arc_max; 384uint64_t zfs_arc_min; 385uint64_t zfs_arc_meta_limit = 0; 386uint64_t zfs_arc_meta_min = 0; 387int zfs_arc_grow_retry = 0; 388int zfs_arc_shrink_shift = 0; 389int zfs_arc_no_grow_shift = 0; 390int zfs_arc_p_min_shift = 0; 391uint64_t zfs_arc_average_blocksize = 8 * 1024; /* 8KB */ 392u_int zfs_arc_free_target = 0; 393 394/* Absolute min for arc min / max is 16MB. */ 395static uint64_t arc_abs_min = 16 << 20; 396 397/* 398 * ARC dirty data constraints for arc_tempreserve_space() throttle 399 */ 400uint_t zfs_arc_dirty_limit_percent = 50; /* total dirty data limit */ 401uint_t zfs_arc_anon_limit_percent = 25; /* anon block dirty limit */ 402uint_t zfs_arc_pool_dirty_percent = 20; /* each pool's anon allowance */ 403 404boolean_t zfs_compressed_arc_enabled = B_TRUE; 405 406static int sysctl_vfs_zfs_arc_free_target(SYSCTL_HANDLER_ARGS); 407static int sysctl_vfs_zfs_arc_meta_limit(SYSCTL_HANDLER_ARGS); 408static int sysctl_vfs_zfs_arc_max(SYSCTL_HANDLER_ARGS); 409static int sysctl_vfs_zfs_arc_min(SYSCTL_HANDLER_ARGS); 410static int sysctl_vfs_zfs_arc_no_grow_shift(SYSCTL_HANDLER_ARGS); 411 412#if defined(__FreeBSD__) && defined(_KERNEL) 413static void 414arc_free_target_init(void *unused __unused) 415{ 416 417 zfs_arc_free_target = vm_pageout_wakeup_thresh; 418} 419SYSINIT(arc_free_target_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_ANY, 420 arc_free_target_init, NULL); 421 422TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit); 423TUNABLE_QUAD("vfs.zfs.arc_meta_min", &zfs_arc_meta_min); 424TUNABLE_INT("vfs.zfs.arc_shrink_shift", &zfs_arc_shrink_shift); 425TUNABLE_INT("vfs.zfs.arc_grow_retry", &zfs_arc_grow_retry); 426TUNABLE_INT("vfs.zfs.arc_no_grow_shift", &zfs_arc_no_grow_shift); 427SYSCTL_DECL(_vfs_zfs); 428SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_max, CTLTYPE_U64 | CTLFLAG_RWTUN, 429 0, sizeof(uint64_t), sysctl_vfs_zfs_arc_max, "QU", "Maximum ARC size"); 430SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_min, CTLTYPE_U64 | CTLFLAG_RWTUN, 431 0, sizeof(uint64_t), sysctl_vfs_zfs_arc_min, "QU", "Minimum ARC size"); 432SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_no_grow_shift, CTLTYPE_U32 | CTLFLAG_RWTUN, 433 0, sizeof(uint32_t), sysctl_vfs_zfs_arc_no_grow_shift, "U", 434 "log2(fraction of ARC which must be free to allow growing)"); 435SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_average_blocksize, CTLFLAG_RDTUN, 436 &zfs_arc_average_blocksize, 0, 437 "ARC average blocksize"); 438SYSCTL_INT(_vfs_zfs, OID_AUTO, arc_shrink_shift, CTLFLAG_RW, 439 &arc_shrink_shift, 0, 440 "log2(fraction of arc to reclaim)"); 441SYSCTL_INT(_vfs_zfs, OID_AUTO, arc_grow_retry, CTLFLAG_RW, 442 &arc_grow_retry, 0, 443 "Wait in seconds before considering growing ARC"); 444SYSCTL_INT(_vfs_zfs, OID_AUTO, compressed_arc_enabled, CTLFLAG_RDTUN, 445 &zfs_compressed_arc_enabled, 0, 446 "Enable compressed ARC"); 447SYSCTL_INT(_vfs_zfs, OID_AUTO, arc_kmem_cache_reap_retry_ms, CTLFLAG_RWTUN, 448 &arc_kmem_cache_reap_retry_ms, 0, 449 "Interval between ARC kmem_cache reapings"); 450 451/* 452 * We don't have a tunable for arc_free_target due to the dependency on 453 * pagedaemon initialisation. 454 */ 455SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_free_target, 456 CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(u_int), 457 sysctl_vfs_zfs_arc_free_target, "IU", 458 "Desired number of free pages below which ARC triggers reclaim"); 459 460static int 461sysctl_vfs_zfs_arc_free_target(SYSCTL_HANDLER_ARGS) 462{ 463 u_int val; 464 int err; 465 466 val = zfs_arc_free_target; 467 err = sysctl_handle_int(oidp, &val, 0, req); 468 if (err != 0 || req->newptr == NULL) 469 return (err); 470 471 if (val < minfree) 472 return (EINVAL); 473 if (val > vm_cnt.v_page_count) 474 return (EINVAL); 475 476 zfs_arc_free_target = val; 477 478 return (0); 479} 480 481/* 482 * Must be declared here, before the definition of corresponding kstat 483 * macro which uses the same names will confuse the compiler. 484 */ 485SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_meta_limit, 486 CTLTYPE_U64 | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(uint64_t), 487 sysctl_vfs_zfs_arc_meta_limit, "QU", 488 "ARC metadata limit"); 489#endif 490 491/* 492 * Note that buffers can be in one of 6 states: 493 * ARC_anon - anonymous (discussed below) 494 * ARC_mru - recently used, currently cached 495 * ARC_mru_ghost - recentely used, no longer in cache 496 * ARC_mfu - frequently used, currently cached 497 * ARC_mfu_ghost - frequently used, no longer in cache 498 * ARC_l2c_only - exists in L2ARC but not other states 499 * When there are no active references to the buffer, they are 500 * are linked onto a list in one of these arc states. These are 501 * the only buffers that can be evicted or deleted. Within each 502 * state there are multiple lists, one for meta-data and one for 503 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes, 504 * etc.) is tracked separately so that it can be managed more 505 * explicitly: favored over data, limited explicitly. 506 * 507 * Anonymous buffers are buffers that are not associated with 508 * a DVA. These are buffers that hold dirty block copies 509 * before they are written to stable storage. By definition, 510 * they are "ref'd" and are considered part of arc_mru 511 * that cannot be freed. Generally, they will aquire a DVA 512 * as they are written and migrate onto the arc_mru list. 513 * 514 * The ARC_l2c_only state is for buffers that are in the second 515 * level ARC but no longer in any of the ARC_m* lists. The second 516 * level ARC itself may also contain buffers that are in any of 517 * the ARC_m* states - meaning that a buffer can exist in two 518 * places. The reason for the ARC_l2c_only state is to keep the 519 * buffer header in the hash table, so that reads that hit the 520 * second level ARC benefit from these fast lookups. 521 */ 522 523typedef struct arc_state { 524 /* 525 * list of evictable buffers 526 */ 527 multilist_t *arcs_list[ARC_BUFC_NUMTYPES]; 528 /* 529 * total amount of evictable data in this state 530 */ 531 refcount_t arcs_esize[ARC_BUFC_NUMTYPES]; 532 /* 533 * total amount of data in this state; this includes: evictable, 534 * non-evictable, ARC_BUFC_DATA, and ARC_BUFC_METADATA. 535 */ 536 refcount_t arcs_size; 537} arc_state_t; 538 539/* The 6 states: */ 540static arc_state_t ARC_anon; 541static arc_state_t ARC_mru; 542static arc_state_t ARC_mru_ghost; 543static arc_state_t ARC_mfu; 544static arc_state_t ARC_mfu_ghost; 545static arc_state_t ARC_l2c_only; 546 547typedef struct arc_stats { 548 kstat_named_t arcstat_hits; 549 kstat_named_t arcstat_misses; 550 kstat_named_t arcstat_demand_data_hits; 551 kstat_named_t arcstat_demand_data_misses; 552 kstat_named_t arcstat_demand_metadata_hits; 553 kstat_named_t arcstat_demand_metadata_misses; 554 kstat_named_t arcstat_prefetch_data_hits; 555 kstat_named_t arcstat_prefetch_data_misses; 556 kstat_named_t arcstat_prefetch_metadata_hits; 557 kstat_named_t arcstat_prefetch_metadata_misses; 558 kstat_named_t arcstat_mru_hits; 559 kstat_named_t arcstat_mru_ghost_hits; 560 kstat_named_t arcstat_mfu_hits; 561 kstat_named_t arcstat_mfu_ghost_hits; 562 kstat_named_t arcstat_allocated; 563 kstat_named_t arcstat_deleted; 564 /* 565 * Number of buffers that could not be evicted because the hash lock 566 * was held by another thread. The lock may not necessarily be held 567 * by something using the same buffer, since hash locks are shared 568 * by multiple buffers. 569 */ 570 kstat_named_t arcstat_mutex_miss; 571 /* 572 * Number of buffers skipped when updating the access state due to the 573 * header having already been released after acquiring the hash lock. 574 */ 575 kstat_named_t arcstat_access_skip; 576 /* 577 * Number of buffers skipped because they have I/O in progress, are 578 * indirect prefetch buffers that have not lived long enough, or are 579 * not from the spa we're trying to evict from. 580 */ 581 kstat_named_t arcstat_evict_skip; 582 /* 583 * Number of times arc_evict_state() was unable to evict enough 584 * buffers to reach it's target amount. 585 */ 586 kstat_named_t arcstat_evict_not_enough; 587 kstat_named_t arcstat_evict_l2_cached; 588 kstat_named_t arcstat_evict_l2_eligible; 589 kstat_named_t arcstat_evict_l2_ineligible; 590 kstat_named_t arcstat_evict_l2_skip; 591 kstat_named_t arcstat_hash_elements; 592 kstat_named_t arcstat_hash_elements_max; 593 kstat_named_t arcstat_hash_collisions; 594 kstat_named_t arcstat_hash_chains; 595 kstat_named_t arcstat_hash_chain_max; 596 kstat_named_t arcstat_p; 597 kstat_named_t arcstat_c; 598 kstat_named_t arcstat_c_min; 599 kstat_named_t arcstat_c_max; 600 /* Not updated directly; only synced in arc_kstat_update. */ 601 kstat_named_t arcstat_size; 602 /* 603 * Number of compressed bytes stored in the arc_buf_hdr_t's b_pabd. 604 * Note that the compressed bytes may match the uncompressed bytes 605 * if the block is either not compressed or compressed arc is disabled. 606 */ 607 kstat_named_t arcstat_compressed_size; 608 /* 609 * Uncompressed size of the data stored in b_pabd. If compressed 610 * arc is disabled then this value will be identical to the stat 611 * above. 612 */ 613 kstat_named_t arcstat_uncompressed_size; 614 /* 615 * Number of bytes stored in all the arc_buf_t's. This is classified 616 * as "overhead" since this data is typically short-lived and will 617 * be evicted from the arc when it becomes unreferenced unless the 618 * zfs_keep_uncompressed_metadata or zfs_keep_uncompressed_level 619 * values have been set (see comment in dbuf.c for more information). 620 */ 621 kstat_named_t arcstat_overhead_size; 622 /* 623 * Number of bytes consumed by internal ARC structures necessary 624 * for tracking purposes; these structures are not actually 625 * backed by ARC buffers. This includes arc_buf_hdr_t structures 626 * (allocated via arc_buf_hdr_t_full and arc_buf_hdr_t_l2only 627 * caches), and arc_buf_t structures (allocated via arc_buf_t 628 * cache). 629 * Not updated directly; only synced in arc_kstat_update. 630 */ 631 kstat_named_t arcstat_hdr_size; 632 /* 633 * Number of bytes consumed by ARC buffers of type equal to 634 * ARC_BUFC_DATA. This is generally consumed by buffers backing 635 * on disk user data (e.g. plain file contents). 636 * Not updated directly; only synced in arc_kstat_update. 637 */ 638 kstat_named_t arcstat_data_size; 639 /* 640 * Number of bytes consumed by ARC buffers of type equal to 641 * ARC_BUFC_METADATA. This is generally consumed by buffers 642 * backing on disk data that is used for internal ZFS 643 * structures (e.g. ZAP, dnode, indirect blocks, etc). 644 * Not updated directly; only synced in arc_kstat_update. 645 */ 646 kstat_named_t arcstat_metadata_size; 647 /* 648 * Number of bytes consumed by various buffers and structures 649 * not actually backed with ARC buffers. This includes bonus 650 * buffers (allocated directly via zio_buf_* functions), 651 * dmu_buf_impl_t structures (allocated via dmu_buf_impl_t 652 * cache), and dnode_t structures (allocated via dnode_t cache). 653 * Not updated directly; only synced in arc_kstat_update. 654 */ 655 kstat_named_t arcstat_other_size; 656 /* 657 * Total number of bytes consumed by ARC buffers residing in the 658 * arc_anon state. This includes *all* buffers in the arc_anon 659 * state; e.g. data, metadata, evictable, and unevictable buffers 660 * are all included in this value. 661 * Not updated directly; only synced in arc_kstat_update. 662 */ 663 kstat_named_t arcstat_anon_size; 664 /* 665 * Number of bytes consumed by ARC buffers that meet the 666 * following criteria: backing buffers of type ARC_BUFC_DATA, 667 * residing in the arc_anon state, and are eligible for eviction 668 * (e.g. have no outstanding holds on the buffer). 669 * Not updated directly; only synced in arc_kstat_update. 670 */ 671 kstat_named_t arcstat_anon_evictable_data; 672 /* 673 * Number of bytes consumed by ARC buffers that meet the 674 * following criteria: backing buffers of type ARC_BUFC_METADATA, 675 * residing in the arc_anon state, and are eligible for eviction 676 * (e.g. have no outstanding holds on the buffer). 677 * Not updated directly; only synced in arc_kstat_update. 678 */ 679 kstat_named_t arcstat_anon_evictable_metadata; 680 /* 681 * Total number of bytes consumed by ARC buffers residing in the 682 * arc_mru state. This includes *all* buffers in the arc_mru 683 * state; e.g. data, metadata, evictable, and unevictable buffers 684 * are all included in this value. 685 * Not updated directly; only synced in arc_kstat_update. 686 */ 687 kstat_named_t arcstat_mru_size; 688 /* 689 * Number of bytes consumed by ARC buffers that meet the 690 * following criteria: backing buffers of type ARC_BUFC_DATA, 691 * residing in the arc_mru state, and are eligible for eviction 692 * (e.g. have no outstanding holds on the buffer). 693 * Not updated directly; only synced in arc_kstat_update. 694 */ 695 kstat_named_t arcstat_mru_evictable_data; 696 /* 697 * Number of bytes consumed by ARC buffers that meet the 698 * following criteria: backing buffers of type ARC_BUFC_METADATA, 699 * residing in the arc_mru state, and are eligible for eviction 700 * (e.g. have no outstanding holds on the buffer). 701 * Not updated directly; only synced in arc_kstat_update. 702 */ 703 kstat_named_t arcstat_mru_evictable_metadata; 704 /* 705 * Total number of bytes that *would have been* consumed by ARC 706 * buffers in the arc_mru_ghost state. The key thing to note 707 * here, is the fact that this size doesn't actually indicate 708 * RAM consumption. The ghost lists only consist of headers and 709 * don't actually have ARC buffers linked off of these headers. 710 * Thus, *if* the headers had associated ARC buffers, these 711 * buffers *would have* consumed this number of bytes. 712 * Not updated directly; only synced in arc_kstat_update. 713 */ 714 kstat_named_t arcstat_mru_ghost_size; 715 /* 716 * Number of bytes that *would have been* consumed by ARC 717 * buffers that are eligible for eviction, of type 718 * ARC_BUFC_DATA, and linked off the arc_mru_ghost state. 719 * Not updated directly; only synced in arc_kstat_update. 720 */ 721 kstat_named_t arcstat_mru_ghost_evictable_data; 722 /* 723 * Number of bytes that *would have been* consumed by ARC 724 * buffers that are eligible for eviction, of type 725 * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state. 726 * Not updated directly; only synced in arc_kstat_update. 727 */ 728 kstat_named_t arcstat_mru_ghost_evictable_metadata; 729 /* 730 * Total number of bytes consumed by ARC buffers residing in the 731 * arc_mfu state. This includes *all* buffers in the arc_mfu 732 * state; e.g. data, metadata, evictable, and unevictable buffers 733 * are all included in this value. 734 * Not updated directly; only synced in arc_kstat_update. 735 */ 736 kstat_named_t arcstat_mfu_size; 737 /* 738 * Number of bytes consumed by ARC buffers that are eligible for 739 * eviction, of type ARC_BUFC_DATA, and reside in the arc_mfu 740 * state. 741 * Not updated directly; only synced in arc_kstat_update. 742 */ 743 kstat_named_t arcstat_mfu_evictable_data; 744 /* 745 * Number of bytes consumed by ARC buffers that are eligible for 746 * eviction, of type ARC_BUFC_METADATA, and reside in the 747 * arc_mfu state. 748 * Not updated directly; only synced in arc_kstat_update. 749 */ 750 kstat_named_t arcstat_mfu_evictable_metadata; 751 /* 752 * Total number of bytes that *would have been* consumed by ARC 753 * buffers in the arc_mfu_ghost state. See the comment above 754 * arcstat_mru_ghost_size for more details. 755 * Not updated directly; only synced in arc_kstat_update. 756 */ 757 kstat_named_t arcstat_mfu_ghost_size; 758 /* 759 * Number of bytes that *would have been* consumed by ARC 760 * buffers that are eligible for eviction, of type 761 * ARC_BUFC_DATA, and linked off the arc_mfu_ghost state. 762 * Not updated directly; only synced in arc_kstat_update. 763 */ 764 kstat_named_t arcstat_mfu_ghost_evictable_data; 765 /* 766 * Number of bytes that *would have been* consumed by ARC 767 * buffers that are eligible for eviction, of type 768 * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state. 769 * Not updated directly; only synced in arc_kstat_update. 770 */ 771 kstat_named_t arcstat_mfu_ghost_evictable_metadata; 772 kstat_named_t arcstat_l2_hits; 773 kstat_named_t arcstat_l2_misses; 774 kstat_named_t arcstat_l2_feeds; 775 kstat_named_t arcstat_l2_rw_clash; 776 kstat_named_t arcstat_l2_read_bytes; 777 kstat_named_t arcstat_l2_write_bytes; 778 kstat_named_t arcstat_l2_writes_sent; 779 kstat_named_t arcstat_l2_writes_done; 780 kstat_named_t arcstat_l2_writes_error; 781 kstat_named_t arcstat_l2_writes_lock_retry; 782 kstat_named_t arcstat_l2_evict_lock_retry; 783 kstat_named_t arcstat_l2_evict_reading; 784 kstat_named_t arcstat_l2_evict_l1cached; 785 kstat_named_t arcstat_l2_free_on_write; 786 kstat_named_t arcstat_l2_abort_lowmem; 787 kstat_named_t arcstat_l2_cksum_bad; 788 kstat_named_t arcstat_l2_io_error; 789 kstat_named_t arcstat_l2_lsize; 790 kstat_named_t arcstat_l2_psize; 791 /* Not updated directly; only synced in arc_kstat_update. */ 792 kstat_named_t arcstat_l2_hdr_size; 793 kstat_named_t arcstat_l2_write_trylock_fail; 794 kstat_named_t arcstat_l2_write_passed_headroom; 795 kstat_named_t arcstat_l2_write_spa_mismatch; 796 kstat_named_t arcstat_l2_write_in_l2; 797 kstat_named_t arcstat_l2_write_hdr_io_in_progress; 798 kstat_named_t arcstat_l2_write_not_cacheable; 799 kstat_named_t arcstat_l2_write_full; 800 kstat_named_t arcstat_l2_write_buffer_iter; 801 kstat_named_t arcstat_l2_write_pios; 802 kstat_named_t arcstat_l2_write_buffer_bytes_scanned; 803 kstat_named_t arcstat_l2_write_buffer_list_iter; 804 kstat_named_t arcstat_l2_write_buffer_list_null_iter; 805 kstat_named_t arcstat_memory_throttle_count; 806 /* Not updated directly; only synced in arc_kstat_update. */ 807 kstat_named_t arcstat_meta_used; 808 kstat_named_t arcstat_meta_limit; 809 kstat_named_t arcstat_meta_max; 810 kstat_named_t arcstat_meta_min; 811 kstat_named_t arcstat_async_upgrade_sync; 812 kstat_named_t arcstat_demand_hit_predictive_prefetch; 813 kstat_named_t arcstat_demand_hit_prescient_prefetch; 814} arc_stats_t; 815 816static arc_stats_t arc_stats = { 817 { "hits", KSTAT_DATA_UINT64 }, 818 { "misses", KSTAT_DATA_UINT64 }, 819 { "demand_data_hits", KSTAT_DATA_UINT64 }, 820 { "demand_data_misses", KSTAT_DATA_UINT64 }, 821 { "demand_metadata_hits", KSTAT_DATA_UINT64 }, 822 { "demand_metadata_misses", KSTAT_DATA_UINT64 }, 823 { "prefetch_data_hits", KSTAT_DATA_UINT64 }, 824 { "prefetch_data_misses", KSTAT_DATA_UINT64 }, 825 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 }, 826 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 }, 827 { "mru_hits", KSTAT_DATA_UINT64 }, 828 { "mru_ghost_hits", KSTAT_DATA_UINT64 }, 829 { "mfu_hits", KSTAT_DATA_UINT64 }, 830 { "mfu_ghost_hits", KSTAT_DATA_UINT64 }, 831 { "allocated", KSTAT_DATA_UINT64 }, 832 { "deleted", KSTAT_DATA_UINT64 }, 833 { "mutex_miss", KSTAT_DATA_UINT64 }, 834 { "access_skip", KSTAT_DATA_UINT64 }, 835 { "evict_skip", KSTAT_DATA_UINT64 }, 836 { "evict_not_enough", KSTAT_DATA_UINT64 }, 837 { "evict_l2_cached", KSTAT_DATA_UINT64 }, 838 { "evict_l2_eligible", KSTAT_DATA_UINT64 }, 839 { "evict_l2_ineligible", KSTAT_DATA_UINT64 }, 840 { "evict_l2_skip", KSTAT_DATA_UINT64 }, 841 { "hash_elements", KSTAT_DATA_UINT64 }, 842 { "hash_elements_max", KSTAT_DATA_UINT64 }, 843 { "hash_collisions", KSTAT_DATA_UINT64 }, 844 { "hash_chains", KSTAT_DATA_UINT64 }, 845 { "hash_chain_max", KSTAT_DATA_UINT64 }, 846 { "p", KSTAT_DATA_UINT64 }, 847 { "c", KSTAT_DATA_UINT64 }, 848 { "c_min", KSTAT_DATA_UINT64 }, 849 { "c_max", KSTAT_DATA_UINT64 }, 850 { "size", KSTAT_DATA_UINT64 }, 851 { "compressed_size", KSTAT_DATA_UINT64 }, 852 { "uncompressed_size", KSTAT_DATA_UINT64 }, 853 { "overhead_size", KSTAT_DATA_UINT64 }, 854 { "hdr_size", KSTAT_DATA_UINT64 }, 855 { "data_size", KSTAT_DATA_UINT64 }, 856 { "metadata_size", KSTAT_DATA_UINT64 }, 857 { "other_size", KSTAT_DATA_UINT64 }, 858 { "anon_size", KSTAT_DATA_UINT64 }, 859 { "anon_evictable_data", KSTAT_DATA_UINT64 }, 860 { "anon_evictable_metadata", KSTAT_DATA_UINT64 }, 861 { "mru_size", KSTAT_DATA_UINT64 }, 862 { "mru_evictable_data", KSTAT_DATA_UINT64 }, 863 { "mru_evictable_metadata", KSTAT_DATA_UINT64 }, 864 { "mru_ghost_size", KSTAT_DATA_UINT64 }, 865 { "mru_ghost_evictable_data", KSTAT_DATA_UINT64 }, 866 { "mru_ghost_evictable_metadata", KSTAT_DATA_UINT64 }, 867 { "mfu_size", KSTAT_DATA_UINT64 }, 868 { "mfu_evictable_data", KSTAT_DATA_UINT64 }, 869 { "mfu_evictable_metadata", KSTAT_DATA_UINT64 }, 870 { "mfu_ghost_size", KSTAT_DATA_UINT64 }, 871 { "mfu_ghost_evictable_data", KSTAT_DATA_UINT64 }, 872 { "mfu_ghost_evictable_metadata", KSTAT_DATA_UINT64 }, 873 { "l2_hits", KSTAT_DATA_UINT64 }, 874 { "l2_misses", KSTAT_DATA_UINT64 }, 875 { "l2_feeds", KSTAT_DATA_UINT64 }, 876 { "l2_rw_clash", KSTAT_DATA_UINT64 }, 877 { "l2_read_bytes", KSTAT_DATA_UINT64 }, 878 { "l2_write_bytes", KSTAT_DATA_UINT64 }, 879 { "l2_writes_sent", KSTAT_DATA_UINT64 }, 880 { "l2_writes_done", KSTAT_DATA_UINT64 }, 881 { "l2_writes_error", KSTAT_DATA_UINT64 }, 882 { "l2_writes_lock_retry", KSTAT_DATA_UINT64 }, 883 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 }, 884 { "l2_evict_reading", KSTAT_DATA_UINT64 }, 885 { "l2_evict_l1cached", KSTAT_DATA_UINT64 }, 886 { "l2_free_on_write", KSTAT_DATA_UINT64 }, 887 { "l2_abort_lowmem", KSTAT_DATA_UINT64 }, 888 { "l2_cksum_bad", KSTAT_DATA_UINT64 }, 889 { "l2_io_error", KSTAT_DATA_UINT64 }, 890 { "l2_size", KSTAT_DATA_UINT64 }, 891 { "l2_asize", KSTAT_DATA_UINT64 }, 892 { "l2_hdr_size", KSTAT_DATA_UINT64 }, 893 { "l2_write_trylock_fail", KSTAT_DATA_UINT64 }, 894 { "l2_write_passed_headroom", KSTAT_DATA_UINT64 }, 895 { "l2_write_spa_mismatch", KSTAT_DATA_UINT64 }, 896 { "l2_write_in_l2", KSTAT_DATA_UINT64 }, 897 { "l2_write_io_in_progress", KSTAT_DATA_UINT64 }, 898 { "l2_write_not_cacheable", KSTAT_DATA_UINT64 }, 899 { "l2_write_full", KSTAT_DATA_UINT64 }, 900 { "l2_write_buffer_iter", KSTAT_DATA_UINT64 }, 901 { "l2_write_pios", KSTAT_DATA_UINT64 }, 902 { "l2_write_buffer_bytes_scanned", KSTAT_DATA_UINT64 }, 903 { "l2_write_buffer_list_iter", KSTAT_DATA_UINT64 }, 904 { "l2_write_buffer_list_null_iter", KSTAT_DATA_UINT64 }, 905 { "memory_throttle_count", KSTAT_DATA_UINT64 }, 906 { "arc_meta_used", KSTAT_DATA_UINT64 }, 907 { "arc_meta_limit", KSTAT_DATA_UINT64 }, 908 { "arc_meta_max", KSTAT_DATA_UINT64 }, 909 { "arc_meta_min", KSTAT_DATA_UINT64 }, 910 { "async_upgrade_sync", KSTAT_DATA_UINT64 }, 911 { "demand_hit_predictive_prefetch", KSTAT_DATA_UINT64 }, 912 { "demand_hit_prescient_prefetch", KSTAT_DATA_UINT64 }, 913}; 914 915#define ARCSTAT(stat) (arc_stats.stat.value.ui64) 916 917#define ARCSTAT_INCR(stat, val) \ 918 atomic_add_64(&arc_stats.stat.value.ui64, (val)) 919 920#define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1) 921#define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1) 922 923#define ARCSTAT_MAX(stat, val) { \ 924 uint64_t m; \ 925 while ((val) > (m = arc_stats.stat.value.ui64) && \ 926 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \ 927 continue; \ 928} 929 930#define ARCSTAT_MAXSTAT(stat) \ 931 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64) 932 933/* 934 * We define a macro to allow ARC hits/misses to be easily broken down by 935 * two separate conditions, giving a total of four different subtypes for 936 * each of hits and misses (so eight statistics total). 937 */ 938#define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \ 939 if (cond1) { \ 940 if (cond2) { \ 941 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \ 942 } else { \ 943 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \ 944 } \ 945 } else { \ 946 if (cond2) { \ 947 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \ 948 } else { \ 949 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\ 950 } \ 951 } 952 953kstat_t *arc_ksp; 954static arc_state_t *arc_anon; 955static arc_state_t *arc_mru; 956static arc_state_t *arc_mru_ghost; 957static arc_state_t *arc_mfu; 958static arc_state_t *arc_mfu_ghost; 959static arc_state_t *arc_l2c_only; 960 961/* 962 * There are several ARC variables that are critical to export as kstats -- 963 * but we don't want to have to grovel around in the kstat whenever we wish to 964 * manipulate them. For these variables, we therefore define them to be in 965 * terms of the statistic variable. This assures that we are not introducing 966 * the possibility of inconsistency by having shadow copies of the variables, 967 * while still allowing the code to be readable. 968 */ 969#define arc_p ARCSTAT(arcstat_p) /* target size of MRU */ 970#define arc_c ARCSTAT(arcstat_c) /* target size of cache */ 971#define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */ 972#define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */ 973#define arc_meta_limit ARCSTAT(arcstat_meta_limit) /* max size for metadata */ 974#define arc_meta_min ARCSTAT(arcstat_meta_min) /* min size for metadata */ 975#define arc_meta_max ARCSTAT(arcstat_meta_max) /* max size of metadata */ 976 977/* compressed size of entire arc */ 978#define arc_compressed_size ARCSTAT(arcstat_compressed_size) 979/* uncompressed size of entire arc */ 980#define arc_uncompressed_size ARCSTAT(arcstat_uncompressed_size) 981/* number of bytes in the arc from arc_buf_t's */ 982#define arc_overhead_size ARCSTAT(arcstat_overhead_size) 983 984/* 985 * There are also some ARC variables that we want to export, but that are 986 * updated so often that having the canonical representation be the statistic 987 * variable causes a performance bottleneck. We want to use aggsum_t's for these 988 * instead, but still be able to export the kstat in the same way as before. 989 * The solution is to always use the aggsum version, except in the kstat update 990 * callback. 991 */ 992aggsum_t arc_size; 993aggsum_t arc_meta_used; 994aggsum_t astat_data_size; 995aggsum_t astat_metadata_size; 996aggsum_t astat_hdr_size; 997aggsum_t astat_other_size; 998aggsum_t astat_l2_hdr_size; 999 1000static int arc_no_grow; /* Don't try to grow cache size */ 1001static hrtime_t arc_growtime; 1002static uint64_t arc_tempreserve; 1003static uint64_t arc_loaned_bytes; 1004 1005typedef struct arc_callback arc_callback_t; 1006 1007struct arc_callback { 1008 void *acb_private; 1009 arc_read_done_func_t *acb_done; 1010 arc_buf_t *acb_buf; 1011 boolean_t acb_compressed; 1012 zio_t *acb_zio_dummy; 1013 zio_t *acb_zio_head; 1014 arc_callback_t *acb_next; 1015}; 1016 1017typedef struct arc_write_callback arc_write_callback_t; 1018 1019struct arc_write_callback { 1020 void *awcb_private; 1021 arc_write_done_func_t *awcb_ready; 1022 arc_write_done_func_t *awcb_children_ready; 1023 arc_write_done_func_t *awcb_physdone; 1024 arc_write_done_func_t *awcb_done; 1025 arc_buf_t *awcb_buf; 1026}; 1027 1028/* 1029 * ARC buffers are separated into multiple structs as a memory saving measure: 1030 * - Common fields struct, always defined, and embedded within it: 1031 * - L2-only fields, always allocated but undefined when not in L2ARC 1032 * - L1-only fields, only allocated when in L1ARC 1033 * 1034 * Buffer in L1 Buffer only in L2 1035 * +------------------------+ +------------------------+ 1036 * | arc_buf_hdr_t | | arc_buf_hdr_t | 1037 * | | | | 1038 * | | | | 1039 * | | | | 1040 * +------------------------+ +------------------------+ 1041 * | l2arc_buf_hdr_t | | l2arc_buf_hdr_t | 1042 * | (undefined if L1-only) | | | 1043 * +------------------------+ +------------------------+ 1044 * | l1arc_buf_hdr_t | 1045 * | | 1046 * | | 1047 * | | 1048 * | | 1049 * +------------------------+ 1050 * 1051 * Because it's possible for the L2ARC to become extremely large, we can wind 1052 * up eating a lot of memory in L2ARC buffer headers, so the size of a header 1053 * is minimized by only allocating the fields necessary for an L1-cached buffer 1054 * when a header is actually in the L1 cache. The sub-headers (l1arc_buf_hdr and 1055 * l2arc_buf_hdr) are embedded rather than allocated separately to save a couple 1056 * words in pointers. arc_hdr_realloc() is used to switch a header between 1057 * these two allocation states. 1058 */ 1059typedef struct l1arc_buf_hdr { 1060 kmutex_t b_freeze_lock; 1061 zio_cksum_t *b_freeze_cksum; 1062#ifdef ZFS_DEBUG 1063 /* 1064 * Used for debugging with kmem_flags - by allocating and freeing 1065 * b_thawed when the buffer is thawed, we get a record of the stack 1066 * trace that thawed it. 1067 */ 1068 void *b_thawed; 1069#endif 1070 1071 arc_buf_t *b_buf; 1072 uint32_t b_bufcnt; 1073 /* for waiting on writes to complete */ 1074 kcondvar_t b_cv; 1075 uint8_t b_byteswap; 1076 1077 /* protected by arc state mutex */ 1078 arc_state_t *b_state; 1079 multilist_node_t b_arc_node; 1080 1081 /* updated atomically */ 1082 clock_t b_arc_access; 1083 1084 /* self protecting */ 1085 refcount_t b_refcnt; 1086 1087 arc_callback_t *b_acb; 1088 abd_t *b_pabd; 1089} l1arc_buf_hdr_t; 1090 1091typedef struct l2arc_dev l2arc_dev_t; 1092 1093typedef struct l2arc_buf_hdr { 1094 /* protected by arc_buf_hdr mutex */ 1095 l2arc_dev_t *b_dev; /* L2ARC device */ 1096 uint64_t b_daddr; /* disk address, offset byte */ 1097 1098 list_node_t b_l2node; 1099} l2arc_buf_hdr_t; 1100 1101struct arc_buf_hdr { 1102 /* protected by hash lock */ 1103 dva_t b_dva; 1104 uint64_t b_birth; 1105 1106 arc_buf_contents_t b_type; 1107 arc_buf_hdr_t *b_hash_next; 1108 arc_flags_t b_flags; 1109 1110 /* 1111 * This field stores the size of the data buffer after 1112 * compression, and is set in the arc's zio completion handlers. 1113 * It is in units of SPA_MINBLOCKSIZE (e.g. 1 == 512 bytes). 1114 * 1115 * While the block pointers can store up to 32MB in their psize 1116 * field, we can only store up to 32MB minus 512B. This is due 1117 * to the bp using a bias of 1, whereas we use a bias of 0 (i.e. 1118 * a field of zeros represents 512B in the bp). We can't use a 1119 * bias of 1 since we need to reserve a psize of zero, here, to 1120 * represent holes and embedded blocks. 1121 * 1122 * This isn't a problem in practice, since the maximum size of a 1123 * buffer is limited to 16MB, so we never need to store 32MB in 1124 * this field. Even in the upstream illumos code base, the 1125 * maximum size of a buffer is limited to 16MB. 1126 */ 1127 uint16_t b_psize; 1128 1129 /* 1130 * This field stores the size of the data buffer before 1131 * compression, and cannot change once set. It is in units 1132 * of SPA_MINBLOCKSIZE (e.g. 2 == 1024 bytes) 1133 */ 1134 uint16_t b_lsize; /* immutable */ 1135 uint64_t b_spa; /* immutable */ 1136 1137 /* L2ARC fields. Undefined when not in L2ARC. */ 1138 l2arc_buf_hdr_t b_l2hdr; 1139 /* L1ARC fields. Undefined when in l2arc_only state */ 1140 l1arc_buf_hdr_t b_l1hdr; 1141}; 1142 1143#if defined(__FreeBSD__) && defined(_KERNEL) 1144static int 1145sysctl_vfs_zfs_arc_meta_limit(SYSCTL_HANDLER_ARGS) 1146{ 1147 uint64_t val; 1148 int err; 1149 1150 val = arc_meta_limit; 1151 err = sysctl_handle_64(oidp, &val, 0, req); 1152 if (err != 0 || req->newptr == NULL) 1153 return (err); 1154 1155 if (val <= 0 || val > arc_c_max) 1156 return (EINVAL); 1157 1158 arc_meta_limit = val; 1159 return (0); 1160} 1161 1162static int 1163sysctl_vfs_zfs_arc_no_grow_shift(SYSCTL_HANDLER_ARGS) 1164{ 1165 uint32_t val; 1166 int err; 1167 1168 val = arc_no_grow_shift; 1169 err = sysctl_handle_32(oidp, &val, 0, req); 1170 if (err != 0 || req->newptr == NULL) 1171 return (err); 1172 1173 if (val >= arc_shrink_shift) 1174 return (EINVAL); 1175 1176 arc_no_grow_shift = val; 1177 return (0); 1178} 1179 1180static int 1181sysctl_vfs_zfs_arc_max(SYSCTL_HANDLER_ARGS) 1182{ 1183 uint64_t val; 1184 int err; 1185 1186 val = zfs_arc_max; 1187 err = sysctl_handle_64(oidp, &val, 0, req); 1188 if (err != 0 || req->newptr == NULL) 1189 return (err); 1190 1191 if (zfs_arc_max == 0) { 1192 /* Loader tunable so blindly set */ 1193 zfs_arc_max = val; 1194 return (0); 1195 } 1196 1197 if (val < arc_abs_min || val > kmem_size()) 1198 return (EINVAL); 1199 if (val < arc_c_min) 1200 return (EINVAL); 1201 if (zfs_arc_meta_limit > 0 && val < zfs_arc_meta_limit) 1202 return (EINVAL); 1203 1204 arc_c_max = val; 1205 1206 arc_c = arc_c_max; 1207 arc_p = (arc_c >> 1); 1208 1209 if (zfs_arc_meta_limit == 0) { 1210 /* limit meta-data to 1/4 of the arc capacity */ 1211 arc_meta_limit = arc_c_max / 4; 1212 } 1213 1214 /* if kmem_flags are set, lets try to use less memory */ 1215 if (kmem_debugging()) 1216 arc_c = arc_c / 2; 1217 1218 zfs_arc_max = arc_c; 1219 1220 return (0); 1221} 1222 1223static int 1224sysctl_vfs_zfs_arc_min(SYSCTL_HANDLER_ARGS) 1225{ 1226 uint64_t val; 1227 int err; 1228 1229 val = zfs_arc_min; 1230 err = sysctl_handle_64(oidp, &val, 0, req); 1231 if (err != 0 || req->newptr == NULL) 1232 return (err); 1233 1234 if (zfs_arc_min == 0) { 1235 /* Loader tunable so blindly set */ 1236 zfs_arc_min = val; 1237 return (0); 1238 } 1239 1240 if (val < arc_abs_min || val > arc_c_max) 1241 return (EINVAL); 1242 1243 arc_c_min = val; 1244 1245 if (zfs_arc_meta_min == 0) 1246 arc_meta_min = arc_c_min / 2; 1247 1248 if (arc_c < arc_c_min) 1249 arc_c = arc_c_min; 1250 1251 zfs_arc_min = arc_c_min; 1252 1253 return (0); 1254} 1255#endif 1256 1257#define GHOST_STATE(state) \ 1258 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \ 1259 (state) == arc_l2c_only) 1260 1261#define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE) 1262#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) 1263#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_FLAG_IO_ERROR) 1264#define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_FLAG_PREFETCH) 1265#define HDR_PRESCIENT_PREFETCH(hdr) \ 1266 ((hdr)->b_flags & ARC_FLAG_PRESCIENT_PREFETCH) 1267#define HDR_COMPRESSION_ENABLED(hdr) \ 1268 ((hdr)->b_flags & ARC_FLAG_COMPRESSED_ARC) 1269 1270#define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_FLAG_L2CACHE) 1271#define HDR_L2_READING(hdr) \ 1272 (((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) && \ 1273 ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR)) 1274#define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITING) 1275#define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_FLAG_L2_EVICTED) 1276#define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD) 1277#define HDR_SHARED_DATA(hdr) ((hdr)->b_flags & ARC_FLAG_SHARED_DATA) 1278 1279#define HDR_ISTYPE_METADATA(hdr) \ 1280 ((hdr)->b_flags & ARC_FLAG_BUFC_METADATA) 1281#define HDR_ISTYPE_DATA(hdr) (!HDR_ISTYPE_METADATA(hdr)) 1282 1283#define HDR_HAS_L1HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L1HDR) 1284#define HDR_HAS_L2HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR) 1285 1286/* For storing compression mode in b_flags */ 1287#define HDR_COMPRESS_OFFSET (highbit64(ARC_FLAG_COMPRESS_0) - 1) 1288 1289#define HDR_GET_COMPRESS(hdr) ((enum zio_compress)BF32_GET((hdr)->b_flags, \ 1290 HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS)) 1291#define HDR_SET_COMPRESS(hdr, cmp) BF32_SET((hdr)->b_flags, \ 1292 HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS, (cmp)); 1293 1294#define ARC_BUF_LAST(buf) ((buf)->b_next == NULL) 1295#define ARC_BUF_SHARED(buf) ((buf)->b_flags & ARC_BUF_FLAG_SHARED) 1296#define ARC_BUF_COMPRESSED(buf) ((buf)->b_flags & ARC_BUF_FLAG_COMPRESSED) 1297 1298/* 1299 * Other sizes 1300 */ 1301 1302#define HDR_FULL_SIZE ((int64_t)sizeof (arc_buf_hdr_t)) 1303#define HDR_L2ONLY_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_l1hdr)) 1304 1305/* 1306 * Hash table routines 1307 */ 1308 1309#define HT_LOCK_PAD CACHE_LINE_SIZE 1310 1311struct ht_lock { 1312 kmutex_t ht_lock; 1313#ifdef _KERNEL 1314 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))]; 1315#endif 1316}; 1317 1318#define BUF_LOCKS 256 1319typedef struct buf_hash_table { 1320 uint64_t ht_mask; 1321 arc_buf_hdr_t **ht_table; 1322 struct ht_lock ht_locks[BUF_LOCKS] __aligned(CACHE_LINE_SIZE); 1323} buf_hash_table_t; 1324 1325static buf_hash_table_t buf_hash_table; 1326 1327#define BUF_HASH_INDEX(spa, dva, birth) \ 1328 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask) 1329#define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)]) 1330#define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock)) 1331#define HDR_LOCK(hdr) \ 1332 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth))) 1333 1334uint64_t zfs_crc64_table[256]; 1335 1336/* 1337 * Level 2 ARC 1338 */ 1339 1340#define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */ 1341#define L2ARC_HEADROOM 2 /* num of writes */ 1342/* 1343 * If we discover during ARC scan any buffers to be compressed, we boost 1344 * our headroom for the next scanning cycle by this percentage multiple. 1345 */ 1346#define L2ARC_HEADROOM_BOOST 200 1347#define L2ARC_FEED_SECS 1 /* caching interval secs */ 1348#define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */ 1349 1350#define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent) 1351#define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done) 1352 1353/* L2ARC Performance Tunables */ 1354uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */ 1355uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */ 1356uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */ 1357uint64_t l2arc_headroom_boost = L2ARC_HEADROOM_BOOST; 1358uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */ 1359uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */ 1360boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */ 1361boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */ 1362boolean_t l2arc_norw = B_TRUE; /* no reads during writes */ 1363 1364SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max, CTLFLAG_RWTUN, 1365 &l2arc_write_max, 0, "max write size"); 1366SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_boost, CTLFLAG_RWTUN, 1367 &l2arc_write_boost, 0, "extra write during warmup"); 1368SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom, CTLFLAG_RWTUN, 1369 &l2arc_headroom, 0, "number of dev writes"); 1370SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_secs, CTLFLAG_RWTUN, 1371 &l2arc_feed_secs, 0, "interval seconds"); 1372SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_min_ms, CTLFLAG_RWTUN, 1373 &l2arc_feed_min_ms, 0, "min interval milliseconds"); 1374 1375SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_noprefetch, CTLFLAG_RWTUN, 1376 &l2arc_noprefetch, 0, "don't cache prefetch bufs"); 1377SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_feed_again, CTLFLAG_RWTUN, 1378 &l2arc_feed_again, 0, "turbo warmup"); 1379SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw, CTLFLAG_RWTUN, 1380 &l2arc_norw, 0, "no reads during writes"); 1381 1382SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_size, CTLFLAG_RD, 1383 &ARC_anon.arcs_size.rc_count, 0, "size of anonymous state"); 1384SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_metadata_esize, CTLFLAG_RD, 1385 &ARC_anon.arcs_esize[ARC_BUFC_METADATA].rc_count, 0, 1386 "size of anonymous state"); 1387SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_data_esize, CTLFLAG_RD, 1388 &ARC_anon.arcs_esize[ARC_BUFC_DATA].rc_count, 0, 1389 "size of anonymous state"); 1390 1391SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_size, CTLFLAG_RD, 1392 &ARC_mru.arcs_size.rc_count, 0, "size of mru state"); 1393SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_metadata_esize, CTLFLAG_RD, 1394 &ARC_mru.arcs_esize[ARC_BUFC_METADATA].rc_count, 0, 1395 "size of metadata in mru state"); 1396SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_data_esize, CTLFLAG_RD, 1397 &ARC_mru.arcs_esize[ARC_BUFC_DATA].rc_count, 0, 1398 "size of data in mru state"); 1399 1400SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_size, CTLFLAG_RD, 1401 &ARC_mru_ghost.arcs_size.rc_count, 0, "size of mru ghost state"); 1402SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_metadata_esize, CTLFLAG_RD, 1403 &ARC_mru_ghost.arcs_esize[ARC_BUFC_METADATA].rc_count, 0, 1404 "size of metadata in mru ghost state"); 1405SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_data_esize, CTLFLAG_RD, 1406 &ARC_mru_ghost.arcs_esize[ARC_BUFC_DATA].rc_count, 0, 1407 "size of data in mru ghost state"); 1408 1409SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_size, CTLFLAG_RD, 1410 &ARC_mfu.arcs_size.rc_count, 0, "size of mfu state"); 1411SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_metadata_esize, CTLFLAG_RD, 1412 &ARC_mfu.arcs_esize[ARC_BUFC_METADATA].rc_count, 0, 1413 "size of metadata in mfu state"); 1414SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_data_esize, CTLFLAG_RD, 1415 &ARC_mfu.arcs_esize[ARC_BUFC_DATA].rc_count, 0, 1416 "size of data in mfu state"); 1417 1418SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_size, CTLFLAG_RD, 1419 &ARC_mfu_ghost.arcs_size.rc_count, 0, "size of mfu ghost state"); 1420SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_metadata_esize, CTLFLAG_RD, 1421 &ARC_mfu_ghost.arcs_esize[ARC_BUFC_METADATA].rc_count, 0, 1422 "size of metadata in mfu ghost state"); 1423SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_data_esize, CTLFLAG_RD, 1424 &ARC_mfu_ghost.arcs_esize[ARC_BUFC_DATA].rc_count, 0, 1425 "size of data in mfu ghost state"); 1426 1427SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2c_only_size, CTLFLAG_RD, 1428 &ARC_l2c_only.arcs_size.rc_count, 0, "size of mru state"); 1429 1430SYSCTL_UINT(_vfs_zfs, OID_AUTO, arc_min_prefetch_ms, CTLFLAG_RW, 1431 &zfs_arc_min_prefetch_ms, 0, "Min life of prefetch block in ms"); 1432SYSCTL_UINT(_vfs_zfs, OID_AUTO, arc_min_prescient_prefetch_ms, CTLFLAG_RW, 1433 &zfs_arc_min_prescient_prefetch_ms, 0, "Min life of prescient prefetched block in ms"); 1434 1435/* 1436 * L2ARC Internals 1437 */ 1438struct l2arc_dev { 1439 vdev_t *l2ad_vdev; /* vdev */ 1440 spa_t *l2ad_spa; /* spa */ 1441 uint64_t l2ad_hand; /* next write location */ 1442 uint64_t l2ad_start; /* first addr on device */ 1443 uint64_t l2ad_end; /* last addr on device */ 1444 boolean_t l2ad_first; /* first sweep through */ 1445 boolean_t l2ad_writing; /* currently writing */ 1446 kmutex_t l2ad_mtx; /* lock for buffer list */ 1447 list_t l2ad_buflist; /* buffer list */ 1448 list_node_t l2ad_node; /* device list node */ 1449 refcount_t l2ad_alloc; /* allocated bytes */ 1450}; 1451 1452static list_t L2ARC_dev_list; /* device list */ 1453static list_t *l2arc_dev_list; /* device list pointer */ 1454static kmutex_t l2arc_dev_mtx; /* device list mutex */ 1455static l2arc_dev_t *l2arc_dev_last; /* last device used */ 1456static list_t L2ARC_free_on_write; /* free after write buf list */ 1457static list_t *l2arc_free_on_write; /* free after write list ptr */ 1458static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */ 1459static uint64_t l2arc_ndev; /* number of devices */ 1460 1461typedef struct l2arc_read_callback { 1462 arc_buf_hdr_t *l2rcb_hdr; /* read header */ 1463 blkptr_t l2rcb_bp; /* original blkptr */ 1464 zbookmark_phys_t l2rcb_zb; /* original bookmark */ 1465 int l2rcb_flags; /* original flags */ 1466 abd_t *l2rcb_abd; /* temporary buffer */ 1467} l2arc_read_callback_t; 1468 1469typedef struct l2arc_write_callback { 1470 l2arc_dev_t *l2wcb_dev; /* device info */ 1471 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */ 1472} l2arc_write_callback_t; 1473 1474typedef struct l2arc_data_free { 1475 /* protected by l2arc_free_on_write_mtx */ 1476 abd_t *l2df_abd; 1477 size_t l2df_size; 1478 arc_buf_contents_t l2df_type; 1479 list_node_t l2df_list_node; 1480} l2arc_data_free_t; 1481 1482static kmutex_t l2arc_feed_thr_lock; 1483static kcondvar_t l2arc_feed_thr_cv; 1484static uint8_t l2arc_thread_exit; 1485 1486static abd_t *arc_get_data_abd(arc_buf_hdr_t *, uint64_t, void *, boolean_t); 1487static void *arc_get_data_buf(arc_buf_hdr_t *, uint64_t, void *); 1488static void arc_get_data_impl(arc_buf_hdr_t *, uint64_t, void *, boolean_t); 1489static void arc_free_data_abd(arc_buf_hdr_t *, abd_t *, uint64_t, void *); 1490static void arc_free_data_buf(arc_buf_hdr_t *, void *, uint64_t, void *); 1491static void arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag); 1492static void arc_hdr_free_pabd(arc_buf_hdr_t *); 1493static void arc_hdr_alloc_pabd(arc_buf_hdr_t *, boolean_t); 1494static void arc_access(arc_buf_hdr_t *, kmutex_t *); 1495static boolean_t arc_is_overflowing(); 1496static void arc_buf_watch(arc_buf_t *); 1497 1498static arc_buf_contents_t arc_buf_type(arc_buf_hdr_t *); 1499static uint32_t arc_bufc_to_flags(arc_buf_contents_t); 1500static inline void arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags); 1501static inline void arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags); 1502 1503static boolean_t l2arc_write_eligible(uint64_t, arc_buf_hdr_t *); 1504static void l2arc_read_done(zio_t *); 1505 1506static void 1507l2arc_trim(const arc_buf_hdr_t *hdr) 1508{ 1509 l2arc_dev_t *dev = hdr->b_l2hdr.b_dev; 1510 1511 ASSERT(HDR_HAS_L2HDR(hdr)); 1512 ASSERT(MUTEX_HELD(&dev->l2ad_mtx)); 1513 1514 if (HDR_GET_PSIZE(hdr) != 0) { 1515 trim_map_free(dev->l2ad_vdev, hdr->b_l2hdr.b_daddr, 1516 HDR_GET_PSIZE(hdr), 0); 1517 } 1518} 1519 1520/* 1521 * We use Cityhash for this. It's fast, and has good hash properties without 1522 * requiring any large static buffers. 1523 */ 1524static uint64_t 1525buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth) 1526{ 1527 return (cityhash4(spa, dva->dva_word[0], dva->dva_word[1], birth)); 1528} 1529 1530#define HDR_EMPTY(hdr) \ 1531 ((hdr)->b_dva.dva_word[0] == 0 && \ 1532 (hdr)->b_dva.dva_word[1] == 0) 1533 1534#define HDR_EQUAL(spa, dva, birth, hdr) \ 1535 ((hdr)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \ 1536 ((hdr)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \ 1537 ((hdr)->b_birth == birth) && ((hdr)->b_spa == spa) 1538 1539static void 1540buf_discard_identity(arc_buf_hdr_t *hdr) 1541{ 1542 hdr->b_dva.dva_word[0] = 0; 1543 hdr->b_dva.dva_word[1] = 0; 1544 hdr->b_birth = 0; 1545} 1546 1547static arc_buf_hdr_t * 1548buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp) 1549{ 1550 const dva_t *dva = BP_IDENTITY(bp); 1551 uint64_t birth = BP_PHYSICAL_BIRTH(bp); 1552 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth); 1553 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 1554 arc_buf_hdr_t *hdr; 1555 1556 mutex_enter(hash_lock); 1557 for (hdr = buf_hash_table.ht_table[idx]; hdr != NULL; 1558 hdr = hdr->b_hash_next) { 1559 if (HDR_EQUAL(spa, dva, birth, hdr)) { 1560 *lockp = hash_lock; 1561 return (hdr); 1562 } 1563 } 1564 mutex_exit(hash_lock); 1565 *lockp = NULL; 1566 return (NULL); 1567} 1568 1569/* 1570 * Insert an entry into the hash table. If there is already an element 1571 * equal to elem in the hash table, then the already existing element 1572 * will be returned and the new element will not be inserted. 1573 * Otherwise returns NULL. 1574 * If lockp == NULL, the caller is assumed to already hold the hash lock. 1575 */ 1576static arc_buf_hdr_t * 1577buf_hash_insert(arc_buf_hdr_t *hdr, kmutex_t **lockp) 1578{ 1579 uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth); 1580 kmutex_t *hash_lock = BUF_HASH_LOCK(idx); 1581 arc_buf_hdr_t *fhdr; 1582 uint32_t i; 1583 1584 ASSERT(!DVA_IS_EMPTY(&hdr->b_dva)); 1585 ASSERT(hdr->b_birth != 0); 1586 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 1587 1588 if (lockp != NULL) { 1589 *lockp = hash_lock; 1590 mutex_enter(hash_lock); 1591 } else { 1592 ASSERT(MUTEX_HELD(hash_lock)); 1593 } 1594 1595 for (fhdr = buf_hash_table.ht_table[idx], i = 0; fhdr != NULL; 1596 fhdr = fhdr->b_hash_next, i++) { 1597 if (HDR_EQUAL(hdr->b_spa, &hdr->b_dva, hdr->b_birth, fhdr)) 1598 return (fhdr); 1599 } 1600 1601 hdr->b_hash_next = buf_hash_table.ht_table[idx]; 1602 buf_hash_table.ht_table[idx] = hdr; 1603 arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE); 1604 1605 /* collect some hash table performance data */ 1606 if (i > 0) { 1607 ARCSTAT_BUMP(arcstat_hash_collisions); 1608 if (i == 1) 1609 ARCSTAT_BUMP(arcstat_hash_chains); 1610 1611 ARCSTAT_MAX(arcstat_hash_chain_max, i); 1612 } 1613 1614 ARCSTAT_BUMP(arcstat_hash_elements); 1615 ARCSTAT_MAXSTAT(arcstat_hash_elements); 1616 1617 return (NULL); 1618} 1619 1620static void 1621buf_hash_remove(arc_buf_hdr_t *hdr) 1622{ 1623 arc_buf_hdr_t *fhdr, **hdrp; 1624 uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth); 1625 1626 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx))); 1627 ASSERT(HDR_IN_HASH_TABLE(hdr)); 1628 1629 hdrp = &buf_hash_table.ht_table[idx]; 1630 while ((fhdr = *hdrp) != hdr) { 1631 ASSERT3P(fhdr, !=, NULL); 1632 hdrp = &fhdr->b_hash_next; 1633 } 1634 *hdrp = hdr->b_hash_next; 1635 hdr->b_hash_next = NULL; 1636 arc_hdr_clear_flags(hdr, ARC_FLAG_IN_HASH_TABLE); 1637 1638 /* collect some hash table performance data */ 1639 ARCSTAT_BUMPDOWN(arcstat_hash_elements); 1640 1641 if (buf_hash_table.ht_table[idx] && 1642 buf_hash_table.ht_table[idx]->b_hash_next == NULL) 1643 ARCSTAT_BUMPDOWN(arcstat_hash_chains); 1644} 1645 1646/* 1647 * Global data structures and functions for the buf kmem cache. 1648 */ 1649static kmem_cache_t *hdr_full_cache; 1650static kmem_cache_t *hdr_l2only_cache; 1651static kmem_cache_t *buf_cache; 1652 1653static void 1654buf_fini(void) 1655{ 1656 int i; 1657 1658 kmem_free(buf_hash_table.ht_table, 1659 (buf_hash_table.ht_mask + 1) * sizeof (void *)); 1660 for (i = 0; i < BUF_LOCKS; i++) 1661 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock); 1662 kmem_cache_destroy(hdr_full_cache); 1663 kmem_cache_destroy(hdr_l2only_cache); 1664 kmem_cache_destroy(buf_cache); 1665} 1666 1667/* 1668 * Constructor callback - called when the cache is empty 1669 * and a new buf is requested. 1670 */ 1671/* ARGSUSED */ 1672static int 1673hdr_full_cons(void *vbuf, void *unused, int kmflag) 1674{ 1675 arc_buf_hdr_t *hdr = vbuf; 1676 1677 bzero(hdr, HDR_FULL_SIZE); 1678 cv_init(&hdr->b_l1hdr.b_cv, NULL, CV_DEFAULT, NULL); 1679 refcount_create(&hdr->b_l1hdr.b_refcnt); 1680 mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL); 1681 multilist_link_init(&hdr->b_l1hdr.b_arc_node); 1682 arc_space_consume(HDR_FULL_SIZE, ARC_SPACE_HDRS); 1683 1684 return (0); 1685} 1686 1687/* ARGSUSED */ 1688static int 1689hdr_l2only_cons(void *vbuf, void *unused, int kmflag) 1690{ 1691 arc_buf_hdr_t *hdr = vbuf; 1692 1693 bzero(hdr, HDR_L2ONLY_SIZE); 1694 arc_space_consume(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS); 1695 1696 return (0); 1697} 1698 1699/* ARGSUSED */ 1700static int 1701buf_cons(void *vbuf, void *unused, int kmflag) 1702{ 1703 arc_buf_t *buf = vbuf; 1704 1705 bzero(buf, sizeof (arc_buf_t)); 1706 mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL); 1707 arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS); 1708 1709 return (0); 1710} 1711 1712/* 1713 * Destructor callback - called when a cached buf is 1714 * no longer required. 1715 */ 1716/* ARGSUSED */ 1717static void 1718hdr_full_dest(void *vbuf, void *unused) 1719{ 1720 arc_buf_hdr_t *hdr = vbuf; 1721 1722 ASSERT(HDR_EMPTY(hdr)); 1723 cv_destroy(&hdr->b_l1hdr.b_cv); 1724 refcount_destroy(&hdr->b_l1hdr.b_refcnt); 1725 mutex_destroy(&hdr->b_l1hdr.b_freeze_lock); 1726 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); 1727 arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS); 1728} 1729 1730/* ARGSUSED */ 1731static void 1732hdr_l2only_dest(void *vbuf, void *unused) 1733{ 1734 arc_buf_hdr_t *hdr = vbuf; 1735 1736 ASSERT(HDR_EMPTY(hdr)); 1737 arc_space_return(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS); 1738} 1739 1740/* ARGSUSED */ 1741static void 1742buf_dest(void *vbuf, void *unused) 1743{ 1744 arc_buf_t *buf = vbuf; 1745 1746 mutex_destroy(&buf->b_evict_lock); 1747 arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS); 1748} 1749 1750/* 1751 * Reclaim callback -- invoked when memory is low. 1752 */ 1753/* ARGSUSED */ 1754static void 1755hdr_recl(void *unused) 1756{ 1757 dprintf("hdr_recl called\n"); 1758 /* 1759 * umem calls the reclaim func when we destroy the buf cache, 1760 * which is after we do arc_fini(). 1761 */ 1762 if (arc_initialized) 1763 zthr_wakeup(arc_reap_zthr); 1764} 1765 1766static void 1767buf_init(void) 1768{ 1769 uint64_t *ct; 1770 uint64_t hsize = 1ULL << 12; 1771 int i, j; 1772 1773 /* 1774 * The hash table is big enough to fill all of physical memory 1775 * with an average block size of zfs_arc_average_blocksize (default 8K). 1776 * By default, the table will take up 1777 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers). 1778 */ 1779 while (hsize * zfs_arc_average_blocksize < (uint64_t)physmem * PAGESIZE) 1780 hsize <<= 1; 1781retry: 1782 buf_hash_table.ht_mask = hsize - 1; 1783 buf_hash_table.ht_table = 1784 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP); 1785 if (buf_hash_table.ht_table == NULL) { 1786 ASSERT(hsize > (1ULL << 8)); 1787 hsize >>= 1; 1788 goto retry; 1789 } 1790 1791 hdr_full_cache = kmem_cache_create("arc_buf_hdr_t_full", HDR_FULL_SIZE, 1792 0, hdr_full_cons, hdr_full_dest, hdr_recl, NULL, NULL, 0); 1793 hdr_l2only_cache = kmem_cache_create("arc_buf_hdr_t_l2only", 1794 HDR_L2ONLY_SIZE, 0, hdr_l2only_cons, hdr_l2only_dest, hdr_recl, 1795 NULL, NULL, 0); 1796 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t), 1797 0, buf_cons, buf_dest, NULL, NULL, NULL, 0); 1798 1799 for (i = 0; i < 256; i++) 1800 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--) 1801 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY); 1802 1803 for (i = 0; i < BUF_LOCKS; i++) { 1804 mutex_init(&buf_hash_table.ht_locks[i].ht_lock, 1805 NULL, MUTEX_DEFAULT, NULL); 1806 } 1807} 1808 1809/* 1810 * This is the size that the buf occupies in memory. If the buf is compressed, 1811 * it will correspond to the compressed size. You should use this method of 1812 * getting the buf size unless you explicitly need the logical size. 1813 */ 1814int32_t 1815arc_buf_size(arc_buf_t *buf) 1816{ 1817 return (ARC_BUF_COMPRESSED(buf) ? 1818 HDR_GET_PSIZE(buf->b_hdr) : HDR_GET_LSIZE(buf->b_hdr)); 1819} 1820 1821int32_t 1822arc_buf_lsize(arc_buf_t *buf) 1823{ 1824 return (HDR_GET_LSIZE(buf->b_hdr)); 1825} 1826 1827enum zio_compress 1828arc_get_compression(arc_buf_t *buf) 1829{ 1830 return (ARC_BUF_COMPRESSED(buf) ? 1831 HDR_GET_COMPRESS(buf->b_hdr) : ZIO_COMPRESS_OFF); 1832} 1833 1834#define ARC_MINTIME (hz>>4) /* 62 ms */ 1835 1836static inline boolean_t 1837arc_buf_is_shared(arc_buf_t *buf) 1838{ 1839 boolean_t shared = (buf->b_data != NULL && 1840 buf->b_hdr->b_l1hdr.b_pabd != NULL && 1841 abd_is_linear(buf->b_hdr->b_l1hdr.b_pabd) && 1842 buf->b_data == abd_to_buf(buf->b_hdr->b_l1hdr.b_pabd)); 1843 IMPLY(shared, HDR_SHARED_DATA(buf->b_hdr)); 1844 IMPLY(shared, ARC_BUF_SHARED(buf)); 1845 IMPLY(shared, ARC_BUF_COMPRESSED(buf) || ARC_BUF_LAST(buf)); 1846 1847 /* 1848 * It would be nice to assert arc_can_share() too, but the "hdr isn't 1849 * already being shared" requirement prevents us from doing that. 1850 */ 1851 1852 return (shared); 1853} 1854 1855/* 1856 * Free the checksum associated with this header. If there is no checksum, this 1857 * is a no-op. 1858 */ 1859static inline void 1860arc_cksum_free(arc_buf_hdr_t *hdr) 1861{ 1862 ASSERT(HDR_HAS_L1HDR(hdr)); 1863 mutex_enter(&hdr->b_l1hdr.b_freeze_lock); 1864 if (hdr->b_l1hdr.b_freeze_cksum != NULL) { 1865 kmem_free(hdr->b_l1hdr.b_freeze_cksum, sizeof (zio_cksum_t)); 1866 hdr->b_l1hdr.b_freeze_cksum = NULL; 1867 } 1868 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); 1869} 1870 1871/* 1872 * Return true iff at least one of the bufs on hdr is not compressed. 1873 */ 1874static boolean_t 1875arc_hdr_has_uncompressed_buf(arc_buf_hdr_t *hdr) 1876{ 1877 for (arc_buf_t *b = hdr->b_l1hdr.b_buf; b != NULL; b = b->b_next) { 1878 if (!ARC_BUF_COMPRESSED(b)) { 1879 return (B_TRUE); 1880 } 1881 } 1882 return (B_FALSE); 1883} 1884 1885/* 1886 * If we've turned on the ZFS_DEBUG_MODIFY flag, verify that the buf's data 1887 * matches the checksum that is stored in the hdr. If there is no checksum, 1888 * or if the buf is compressed, this is a no-op. 1889 */ 1890static void 1891arc_cksum_verify(arc_buf_t *buf) 1892{ 1893 arc_buf_hdr_t *hdr = buf->b_hdr; 1894 zio_cksum_t zc; 1895 1896 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 1897 return; 1898 1899 if (ARC_BUF_COMPRESSED(buf)) { 1900 ASSERT(hdr->b_l1hdr.b_freeze_cksum == NULL || 1901 arc_hdr_has_uncompressed_buf(hdr)); 1902 return; 1903 } 1904 1905 ASSERT(HDR_HAS_L1HDR(hdr)); 1906 1907 mutex_enter(&hdr->b_l1hdr.b_freeze_lock); 1908 if (hdr->b_l1hdr.b_freeze_cksum == NULL || HDR_IO_ERROR(hdr)) { 1909 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); 1910 return; 1911 } 1912 1913 fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL, &zc); 1914 if (!ZIO_CHECKSUM_EQUAL(*hdr->b_l1hdr.b_freeze_cksum, zc)) 1915 panic("buffer modified while frozen!"); 1916 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); 1917} 1918 1919static boolean_t 1920arc_cksum_is_equal(arc_buf_hdr_t *hdr, zio_t *zio) 1921{ 1922 enum zio_compress compress = BP_GET_COMPRESS(zio->io_bp); 1923 boolean_t valid_cksum; 1924 1925 ASSERT(!BP_IS_EMBEDDED(zio->io_bp)); 1926 VERIFY3U(BP_GET_PSIZE(zio->io_bp), ==, HDR_GET_PSIZE(hdr)); 1927 1928 /* 1929 * We rely on the blkptr's checksum to determine if the block 1930 * is valid or not. When compressed arc is enabled, the l2arc 1931 * writes the block to the l2arc just as it appears in the pool. 1932 * This allows us to use the blkptr's checksum to validate the 1933 * data that we just read off of the l2arc without having to store 1934 * a separate checksum in the arc_buf_hdr_t. However, if compressed 1935 * arc is disabled, then the data written to the l2arc is always 1936 * uncompressed and won't match the block as it exists in the main 1937 * pool. When this is the case, we must first compress it if it is 1938 * compressed on the main pool before we can validate the checksum. 1939 */ 1940 if (!HDR_COMPRESSION_ENABLED(hdr) && compress != ZIO_COMPRESS_OFF) { 1941 ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF); 1942 uint64_t lsize = HDR_GET_LSIZE(hdr); 1943 uint64_t csize; 1944 1945 abd_t *cdata = abd_alloc_linear(HDR_GET_PSIZE(hdr), B_TRUE); 1946 csize = zio_compress_data(compress, zio->io_abd, 1947 abd_to_buf(cdata), lsize); 1948 1949 ASSERT3U(csize, <=, HDR_GET_PSIZE(hdr)); 1950 if (csize < HDR_GET_PSIZE(hdr)) { 1951 /* 1952 * Compressed blocks are always a multiple of the 1953 * smallest ashift in the pool. Ideally, we would 1954 * like to round up the csize to the next 1955 * spa_min_ashift but that value may have changed 1956 * since the block was last written. Instead, 1957 * we rely on the fact that the hdr's psize 1958 * was set to the psize of the block when it was 1959 * last written. We set the csize to that value 1960 * and zero out any part that should not contain 1961 * data. 1962 */ 1963 abd_zero_off(cdata, csize, HDR_GET_PSIZE(hdr) - csize); 1964 csize = HDR_GET_PSIZE(hdr); 1965 } 1966 zio_push_transform(zio, cdata, csize, HDR_GET_PSIZE(hdr), NULL); 1967 } 1968 1969 /* 1970 * Block pointers always store the checksum for the logical data. 1971 * If the block pointer has the gang bit set, then the checksum 1972 * it represents is for the reconstituted data and not for an 1973 * individual gang member. The zio pipeline, however, must be able to 1974 * determine the checksum of each of the gang constituents so it 1975 * treats the checksum comparison differently than what we need 1976 * for l2arc blocks. This prevents us from using the 1977 * zio_checksum_error() interface directly. Instead we must call the 1978 * zio_checksum_error_impl() so that we can ensure the checksum is 1979 * generated using the correct checksum algorithm and accounts for the 1980 * logical I/O size and not just a gang fragment. 1981 */ 1982 valid_cksum = (zio_checksum_error_impl(zio->io_spa, zio->io_bp, 1983 BP_GET_CHECKSUM(zio->io_bp), zio->io_abd, zio->io_size, 1984 zio->io_offset, NULL) == 0); 1985 zio_pop_transforms(zio); 1986 return (valid_cksum); 1987} 1988 1989/* 1990 * Given a buf full of data, if ZFS_DEBUG_MODIFY is enabled this computes a 1991 * checksum and attaches it to the buf's hdr so that we can ensure that the buf 1992 * isn't modified later on. If buf is compressed or there is already a checksum 1993 * on the hdr, this is a no-op (we only checksum uncompressed bufs). 1994 */ 1995static void 1996arc_cksum_compute(arc_buf_t *buf) 1997{ 1998 arc_buf_hdr_t *hdr = buf->b_hdr; 1999 2000 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 2001 return; 2002 2003 ASSERT(HDR_HAS_L1HDR(hdr)); 2004 2005 mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock); 2006 if (hdr->b_l1hdr.b_freeze_cksum != NULL) { 2007 ASSERT(arc_hdr_has_uncompressed_buf(hdr)); 2008 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); 2009 return; 2010 } else if (ARC_BUF_COMPRESSED(buf)) { 2011 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); 2012 return; 2013 } 2014 2015 ASSERT(!ARC_BUF_COMPRESSED(buf)); 2016 hdr->b_l1hdr.b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), 2017 KM_SLEEP); 2018 fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL, 2019 hdr->b_l1hdr.b_freeze_cksum); 2020 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); 2021#ifdef illumos 2022 arc_buf_watch(buf); 2023#endif 2024} 2025 2026#ifdef illumos 2027#ifndef _KERNEL 2028typedef struct procctl { 2029 long cmd; 2030 prwatch_t prwatch; 2031} procctl_t; 2032#endif 2033 2034/* ARGSUSED */ 2035static void 2036arc_buf_unwatch(arc_buf_t *buf) 2037{ 2038#ifndef _KERNEL 2039 if (arc_watch) { 2040 int result; 2041 procctl_t ctl; 2042 ctl.cmd = PCWATCH; 2043 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data; 2044 ctl.prwatch.pr_size = 0; 2045 ctl.prwatch.pr_wflags = 0; 2046 result = write(arc_procfd, &ctl, sizeof (ctl)); 2047 ASSERT3U(result, ==, sizeof (ctl)); 2048 } 2049#endif 2050} 2051 2052/* ARGSUSED */ 2053static void 2054arc_buf_watch(arc_buf_t *buf) 2055{ 2056#ifndef _KERNEL 2057 if (arc_watch) { 2058 int result; 2059 procctl_t ctl; 2060 ctl.cmd = PCWATCH; 2061 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data; 2062 ctl.prwatch.pr_size = arc_buf_size(buf); 2063 ctl.prwatch.pr_wflags = WA_WRITE; 2064 result = write(arc_procfd, &ctl, sizeof (ctl)); 2065 ASSERT3U(result, ==, sizeof (ctl)); 2066 } 2067#endif 2068} 2069#endif /* illumos */ 2070 2071static arc_buf_contents_t 2072arc_buf_type(arc_buf_hdr_t *hdr) 2073{ 2074 arc_buf_contents_t type; 2075 if (HDR_ISTYPE_METADATA(hdr)) { 2076 type = ARC_BUFC_METADATA; 2077 } else { 2078 type = ARC_BUFC_DATA; 2079 } 2080 VERIFY3U(hdr->b_type, ==, type); 2081 return (type); 2082} 2083 2084boolean_t 2085arc_is_metadata(arc_buf_t *buf) 2086{ 2087 return (HDR_ISTYPE_METADATA(buf->b_hdr) != 0); 2088} 2089 2090static uint32_t 2091arc_bufc_to_flags(arc_buf_contents_t type) 2092{ 2093 switch (type) { 2094 case ARC_BUFC_DATA: 2095 /* metadata field is 0 if buffer contains normal data */ 2096 return (0); 2097 case ARC_BUFC_METADATA: 2098 return (ARC_FLAG_BUFC_METADATA); 2099 default: 2100 break; 2101 } 2102 panic("undefined ARC buffer type!"); 2103 return ((uint32_t)-1); 2104} 2105 2106void 2107arc_buf_thaw(arc_buf_t *buf) 2108{ 2109 arc_buf_hdr_t *hdr = buf->b_hdr; 2110 2111 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); 2112 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 2113 2114 arc_cksum_verify(buf); 2115 2116 /* 2117 * Compressed buffers do not manipulate the b_freeze_cksum or 2118 * allocate b_thawed. 2119 */ 2120 if (ARC_BUF_COMPRESSED(buf)) { 2121 ASSERT(hdr->b_l1hdr.b_freeze_cksum == NULL || 2122 arc_hdr_has_uncompressed_buf(hdr)); 2123 return; 2124 } 2125 2126 ASSERT(HDR_HAS_L1HDR(hdr)); 2127 arc_cksum_free(hdr); 2128 2129 mutex_enter(&hdr->b_l1hdr.b_freeze_lock); 2130#ifdef ZFS_DEBUG 2131 if (zfs_flags & ZFS_DEBUG_MODIFY) { 2132 if (hdr->b_l1hdr.b_thawed != NULL) 2133 kmem_free(hdr->b_l1hdr.b_thawed, 1); 2134 hdr->b_l1hdr.b_thawed = kmem_alloc(1, KM_SLEEP); 2135 } 2136#endif 2137 2138 mutex_exit(&hdr->b_l1hdr.b_freeze_lock); 2139 2140#ifdef illumos 2141 arc_buf_unwatch(buf); 2142#endif 2143} 2144 2145void 2146arc_buf_freeze(arc_buf_t *buf) 2147{ 2148 arc_buf_hdr_t *hdr = buf->b_hdr; 2149 kmutex_t *hash_lock; 2150 2151 if (!(zfs_flags & ZFS_DEBUG_MODIFY)) 2152 return; 2153 2154 if (ARC_BUF_COMPRESSED(buf)) { 2155 ASSERT(hdr->b_l1hdr.b_freeze_cksum == NULL || 2156 arc_hdr_has_uncompressed_buf(hdr)); 2157 return; 2158 } 2159 2160 hash_lock = HDR_LOCK(hdr); 2161 mutex_enter(hash_lock); 2162 2163 ASSERT(HDR_HAS_L1HDR(hdr)); 2164 ASSERT(hdr->b_l1hdr.b_freeze_cksum != NULL || 2165 hdr->b_l1hdr.b_state == arc_anon); 2166 arc_cksum_compute(buf); 2167 mutex_exit(hash_lock); 2168} 2169 2170/* 2171 * The arc_buf_hdr_t's b_flags should never be modified directly. Instead, 2172 * the following functions should be used to ensure that the flags are 2173 * updated in a thread-safe way. When manipulating the flags either 2174 * the hash_lock must be held or the hdr must be undiscoverable. This 2175 * ensures that we're not racing with any other threads when updating 2176 * the flags. 2177 */ 2178static inline void 2179arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags) 2180{ 2181 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 2182 hdr->b_flags |= flags; 2183} 2184 2185static inline void 2186arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags) 2187{ 2188 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 2189 hdr->b_flags &= ~flags; 2190} 2191 2192/* 2193 * Setting the compression bits in the arc_buf_hdr_t's b_flags is 2194 * done in a special way since we have to clear and set bits 2195 * at the same time. Consumers that wish to set the compression bits 2196 * must use this function to ensure that the flags are updated in 2197 * thread-safe manner. 2198 */ 2199static void 2200arc_hdr_set_compress(arc_buf_hdr_t *hdr, enum zio_compress cmp) 2201{ 2202 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 2203 2204 /* 2205 * Holes and embedded blocks will always have a psize = 0 so 2206 * we ignore the compression of the blkptr and set the 2207 * arc_buf_hdr_t's compression to ZIO_COMPRESS_OFF. 2208 * Holes and embedded blocks remain anonymous so we don't 2209 * want to uncompress them. Mark them as uncompressed. 2210 */ 2211 if (!zfs_compressed_arc_enabled || HDR_GET_PSIZE(hdr) == 0) { 2212 arc_hdr_clear_flags(hdr, ARC_FLAG_COMPRESSED_ARC); 2213 HDR_SET_COMPRESS(hdr, ZIO_COMPRESS_OFF); 2214 ASSERT(!HDR_COMPRESSION_ENABLED(hdr)); 2215 ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF); 2216 } else { 2217 arc_hdr_set_flags(hdr, ARC_FLAG_COMPRESSED_ARC); 2218 HDR_SET_COMPRESS(hdr, cmp); 2219 ASSERT3U(HDR_GET_COMPRESS(hdr), ==, cmp); 2220 ASSERT(HDR_COMPRESSION_ENABLED(hdr)); 2221 } 2222} 2223 2224/* 2225 * Looks for another buf on the same hdr which has the data decompressed, copies 2226 * from it, and returns true. If no such buf exists, returns false. 2227 */ 2228static boolean_t 2229arc_buf_try_copy_decompressed_data(arc_buf_t *buf) 2230{ 2231 arc_buf_hdr_t *hdr = buf->b_hdr; 2232 boolean_t copied = B_FALSE; 2233 2234 ASSERT(HDR_HAS_L1HDR(hdr)); 2235 ASSERT3P(buf->b_data, !=, NULL); 2236 ASSERT(!ARC_BUF_COMPRESSED(buf)); 2237 2238 for (arc_buf_t *from = hdr->b_l1hdr.b_buf; from != NULL; 2239 from = from->b_next) { 2240 /* can't use our own data buffer */ 2241 if (from == buf) { 2242 continue; 2243 } 2244 2245 if (!ARC_BUF_COMPRESSED(from)) { 2246 bcopy(from->b_data, buf->b_data, arc_buf_size(buf)); 2247 copied = B_TRUE; 2248 break; 2249 } 2250 } 2251 2252 /* 2253 * There were no decompressed bufs, so there should not be a 2254 * checksum on the hdr either. 2255 */ 2256 EQUIV(!copied, hdr->b_l1hdr.b_freeze_cksum == NULL); 2257 2258 return (copied); 2259} 2260 2261/* 2262 * Given a buf that has a data buffer attached to it, this function will 2263 * efficiently fill the buf with data of the specified compression setting from 2264 * the hdr and update the hdr's b_freeze_cksum if necessary. If the buf and hdr 2265 * are already sharing a data buf, no copy is performed. 2266 * 2267 * If the buf is marked as compressed but uncompressed data was requested, this 2268 * will allocate a new data buffer for the buf, remove that flag, and fill the 2269 * buf with uncompressed data. You can't request a compressed buf on a hdr with 2270 * uncompressed data, and (since we haven't added support for it yet) if you 2271 * want compressed data your buf must already be marked as compressed and have 2272 * the correct-sized data buffer. 2273 */ 2274static int 2275arc_buf_fill(arc_buf_t *buf, boolean_t compressed) 2276{ 2277 arc_buf_hdr_t *hdr = buf->b_hdr; 2278 boolean_t hdr_compressed = (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF); 2279 dmu_object_byteswap_t bswap = hdr->b_l1hdr.b_byteswap; 2280 2281 ASSERT3P(buf->b_data, !=, NULL); 2282 IMPLY(compressed, hdr_compressed); 2283 IMPLY(compressed, ARC_BUF_COMPRESSED(buf)); 2284 2285 if (hdr_compressed == compressed) { 2286 if (!arc_buf_is_shared(buf)) { 2287 abd_copy_to_buf(buf->b_data, hdr->b_l1hdr.b_pabd, 2288 arc_buf_size(buf)); 2289 } 2290 } else { 2291 ASSERT(hdr_compressed); 2292 ASSERT(!compressed); 2293 ASSERT3U(HDR_GET_LSIZE(hdr), !=, HDR_GET_PSIZE(hdr)); 2294 2295 /* 2296 * If the buf is sharing its data with the hdr, unlink it and 2297 * allocate a new data buffer for the buf. 2298 */ 2299 if (arc_buf_is_shared(buf)) { 2300 ASSERT(ARC_BUF_COMPRESSED(buf)); 2301 2302 /* We need to give the buf it's own b_data */ 2303 buf->b_flags &= ~ARC_BUF_FLAG_SHARED; 2304 buf->b_data = 2305 arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf); 2306 arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA); 2307 2308 /* Previously overhead was 0; just add new overhead */ 2309 ARCSTAT_INCR(arcstat_overhead_size, HDR_GET_LSIZE(hdr)); 2310 } else if (ARC_BUF_COMPRESSED(buf)) { 2311 /* We need to reallocate the buf's b_data */ 2312 arc_free_data_buf(hdr, buf->b_data, HDR_GET_PSIZE(hdr), 2313 buf); 2314 buf->b_data = 2315 arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf); 2316 2317 /* We increased the size of b_data; update overhead */ 2318 ARCSTAT_INCR(arcstat_overhead_size, 2319 HDR_GET_LSIZE(hdr) - HDR_GET_PSIZE(hdr)); 2320 } 2321 2322 /* 2323 * Regardless of the buf's previous compression settings, it 2324 * should not be compressed at the end of this function. 2325 */ 2326 buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED; 2327 2328 /* 2329 * Try copying the data from another buf which already has a 2330 * decompressed version. If that's not possible, it's time to 2331 * bite the bullet and decompress the data from the hdr. 2332 */ 2333 if (arc_buf_try_copy_decompressed_data(buf)) { 2334 /* Skip byteswapping and checksumming (already done) */ 2335 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, !=, NULL); 2336 return (0); 2337 } else { 2338 int error = zio_decompress_data(HDR_GET_COMPRESS(hdr), 2339 hdr->b_l1hdr.b_pabd, buf->b_data, 2340 HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr)); 2341 2342 /* 2343 * Absent hardware errors or software bugs, this should 2344 * be impossible, but log it anyway so we can debug it. 2345 */ 2346 if (error != 0) { 2347 zfs_dbgmsg( 2348 "hdr %p, compress %d, psize %d, lsize %d", 2349 hdr, HDR_GET_COMPRESS(hdr), 2350 HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr)); 2351 return (SET_ERROR(EIO)); 2352 } 2353 } 2354 } 2355 2356 /* Byteswap the buf's data if necessary */ 2357 if (bswap != DMU_BSWAP_NUMFUNCS) { 2358 ASSERT(!HDR_SHARED_DATA(hdr)); 2359 ASSERT3U(bswap, <, DMU_BSWAP_NUMFUNCS); 2360 dmu_ot_byteswap[bswap].ob_func(buf->b_data, HDR_GET_LSIZE(hdr)); 2361 } 2362 2363 /* Compute the hdr's checksum if necessary */ 2364 arc_cksum_compute(buf); 2365 2366 return (0); 2367} 2368 2369int 2370arc_decompress(arc_buf_t *buf) 2371{ 2372 return (arc_buf_fill(buf, B_FALSE)); 2373} 2374 2375/* 2376 * Return the size of the block, b_pabd, that is stored in the arc_buf_hdr_t. 2377 */ 2378static uint64_t 2379arc_hdr_size(arc_buf_hdr_t *hdr) 2380{ 2381 uint64_t size; 2382 2383 if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF && 2384 HDR_GET_PSIZE(hdr) > 0) { 2385 size = HDR_GET_PSIZE(hdr); 2386 } else { 2387 ASSERT3U(HDR_GET_LSIZE(hdr), !=, 0); 2388 size = HDR_GET_LSIZE(hdr); 2389 } 2390 return (size); 2391} 2392 2393/* 2394 * Increment the amount of evictable space in the arc_state_t's refcount. 2395 * We account for the space used by the hdr and the arc buf individually 2396 * so that we can add and remove them from the refcount individually. 2397 */ 2398static void 2399arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_state_t *state) 2400{ 2401 arc_buf_contents_t type = arc_buf_type(hdr); 2402 2403 ASSERT(HDR_HAS_L1HDR(hdr)); 2404 2405 if (GHOST_STATE(state)) { 2406 ASSERT0(hdr->b_l1hdr.b_bufcnt); 2407 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 2408 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); 2409 (void) refcount_add_many(&state->arcs_esize[type], 2410 HDR_GET_LSIZE(hdr), hdr); 2411 return; 2412 } 2413 2414 ASSERT(!GHOST_STATE(state)); 2415 if (hdr->b_l1hdr.b_pabd != NULL) { 2416 (void) refcount_add_many(&state->arcs_esize[type], 2417 arc_hdr_size(hdr), hdr); 2418 } 2419 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; 2420 buf = buf->b_next) { 2421 if (arc_buf_is_shared(buf)) 2422 continue; 2423 (void) refcount_add_many(&state->arcs_esize[type], 2424 arc_buf_size(buf), buf); 2425 } 2426} 2427 2428/* 2429 * Decrement the amount of evictable space in the arc_state_t's refcount. 2430 * We account for the space used by the hdr and the arc buf individually 2431 * so that we can add and remove them from the refcount individually. 2432 */ 2433static void 2434arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_state_t *state) 2435{ 2436 arc_buf_contents_t type = arc_buf_type(hdr); 2437 2438 ASSERT(HDR_HAS_L1HDR(hdr)); 2439 2440 if (GHOST_STATE(state)) { 2441 ASSERT0(hdr->b_l1hdr.b_bufcnt); 2442 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 2443 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); 2444 (void) refcount_remove_many(&state->arcs_esize[type], 2445 HDR_GET_LSIZE(hdr), hdr); 2446 return; 2447 } 2448 2449 ASSERT(!GHOST_STATE(state)); 2450 if (hdr->b_l1hdr.b_pabd != NULL) { 2451 (void) refcount_remove_many(&state->arcs_esize[type], 2452 arc_hdr_size(hdr), hdr); 2453 } 2454 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; 2455 buf = buf->b_next) { 2456 if (arc_buf_is_shared(buf)) 2457 continue; 2458 (void) refcount_remove_many(&state->arcs_esize[type], 2459 arc_buf_size(buf), buf); 2460 } 2461} 2462 2463/* 2464 * Add a reference to this hdr indicating that someone is actively 2465 * referencing that memory. When the refcount transitions from 0 to 1, 2466 * we remove it from the respective arc_state_t list to indicate that 2467 * it is not evictable. 2468 */ 2469static void 2470add_reference(arc_buf_hdr_t *hdr, void *tag) 2471{ 2472 ASSERT(HDR_HAS_L1HDR(hdr)); 2473 if (!MUTEX_HELD(HDR_LOCK(hdr))) { 2474 ASSERT(hdr->b_l1hdr.b_state == arc_anon); 2475 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 2476 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 2477 } 2478 2479 arc_state_t *state = hdr->b_l1hdr.b_state; 2480 2481 if ((refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) && 2482 (state != arc_anon)) { 2483 /* We don't use the L2-only state list. */ 2484 if (state != arc_l2c_only) { 2485 multilist_remove(state->arcs_list[arc_buf_type(hdr)], 2486 hdr); 2487 arc_evictable_space_decrement(hdr, state); 2488 } 2489 /* remove the prefetch flag if we get a reference */ 2490 arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH); 2491 } 2492} 2493 2494/* 2495 * Remove a reference from this hdr. When the reference transitions from 2496 * 1 to 0 and we're not anonymous, then we add this hdr to the arc_state_t's 2497 * list making it eligible for eviction. 2498 */ 2499static int 2500remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag) 2501{ 2502 int cnt; 2503 arc_state_t *state = hdr->b_l1hdr.b_state; 2504 2505 ASSERT(HDR_HAS_L1HDR(hdr)); 2506 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock)); 2507 ASSERT(!GHOST_STATE(state)); 2508 2509 /* 2510 * arc_l2c_only counts as a ghost state so we don't need to explicitly 2511 * check to prevent usage of the arc_l2c_only list. 2512 */ 2513 if (((cnt = refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) && 2514 (state != arc_anon)) { 2515 multilist_insert(state->arcs_list[arc_buf_type(hdr)], hdr); 2516 ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0); 2517 arc_evictable_space_increment(hdr, state); 2518 } 2519 return (cnt); 2520} 2521 2522/* 2523 * Move the supplied buffer to the indicated state. The hash lock 2524 * for the buffer must be held by the caller. 2525 */ 2526static void 2527arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr, 2528 kmutex_t *hash_lock) 2529{ 2530 arc_state_t *old_state; 2531 int64_t refcnt; 2532 uint32_t bufcnt; 2533 boolean_t update_old, update_new; 2534 arc_buf_contents_t buftype = arc_buf_type(hdr); 2535 2536 /* 2537 * We almost always have an L1 hdr here, since we call arc_hdr_realloc() 2538 * in arc_read() when bringing a buffer out of the L2ARC. However, the 2539 * L1 hdr doesn't always exist when we change state to arc_anon before 2540 * destroying a header, in which case reallocating to add the L1 hdr is 2541 * pointless. 2542 */ 2543 if (HDR_HAS_L1HDR(hdr)) { 2544 old_state = hdr->b_l1hdr.b_state; 2545 refcnt = refcount_count(&hdr->b_l1hdr.b_refcnt); 2546 bufcnt = hdr->b_l1hdr.b_bufcnt; 2547 update_old = (bufcnt > 0 || hdr->b_l1hdr.b_pabd != NULL); 2548 } else { 2549 old_state = arc_l2c_only; 2550 refcnt = 0; 2551 bufcnt = 0; 2552 update_old = B_FALSE; 2553 } 2554 update_new = update_old; 2555 2556 ASSERT(MUTEX_HELD(hash_lock)); 2557 ASSERT3P(new_state, !=, old_state); 2558 ASSERT(!GHOST_STATE(new_state) || bufcnt == 0); 2559 ASSERT(old_state != arc_anon || bufcnt <= 1); 2560 2561 /* 2562 * If this buffer is evictable, transfer it from the 2563 * old state list to the new state list. 2564 */ 2565 if (refcnt == 0) { 2566 if (old_state != arc_anon && old_state != arc_l2c_only) { 2567 ASSERT(HDR_HAS_L1HDR(hdr)); 2568 multilist_remove(old_state->arcs_list[buftype], hdr); 2569 2570 if (GHOST_STATE(old_state)) { 2571 ASSERT0(bufcnt); 2572 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 2573 update_old = B_TRUE; 2574 } 2575 arc_evictable_space_decrement(hdr, old_state); 2576 } 2577 if (new_state != arc_anon && new_state != arc_l2c_only) { 2578 2579 /* 2580 * An L1 header always exists here, since if we're 2581 * moving to some L1-cached state (i.e. not l2c_only or 2582 * anonymous), we realloc the header to add an L1hdr 2583 * beforehand. 2584 */ 2585 ASSERT(HDR_HAS_L1HDR(hdr)); 2586 multilist_insert(new_state->arcs_list[buftype], hdr); 2587 2588 if (GHOST_STATE(new_state)) { 2589 ASSERT0(bufcnt); 2590 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 2591 update_new = B_TRUE; 2592 } 2593 arc_evictable_space_increment(hdr, new_state); 2594 } 2595 } 2596 2597 ASSERT(!HDR_EMPTY(hdr)); 2598 if (new_state == arc_anon && HDR_IN_HASH_TABLE(hdr)) 2599 buf_hash_remove(hdr); 2600 2601 /* adjust state sizes (ignore arc_l2c_only) */ 2602 2603 if (update_new && new_state != arc_l2c_only) { 2604 ASSERT(HDR_HAS_L1HDR(hdr)); 2605 if (GHOST_STATE(new_state)) { 2606 ASSERT0(bufcnt); 2607 2608 /* 2609 * When moving a header to a ghost state, we first 2610 * remove all arc buffers. Thus, we'll have a 2611 * bufcnt of zero, and no arc buffer to use for 2612 * the reference. As a result, we use the arc 2613 * header pointer for the reference. 2614 */ 2615 (void) refcount_add_many(&new_state->arcs_size, 2616 HDR_GET_LSIZE(hdr), hdr); 2617 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); 2618 } else { 2619 uint32_t buffers = 0; 2620 2621 /* 2622 * Each individual buffer holds a unique reference, 2623 * thus we must remove each of these references one 2624 * at a time. 2625 */ 2626 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; 2627 buf = buf->b_next) { 2628 ASSERT3U(bufcnt, !=, 0); 2629 buffers++; 2630 2631 /* 2632 * When the arc_buf_t is sharing the data 2633 * block with the hdr, the owner of the 2634 * reference belongs to the hdr. Only 2635 * add to the refcount if the arc_buf_t is 2636 * not shared. 2637 */ 2638 if (arc_buf_is_shared(buf)) 2639 continue; 2640 2641 (void) refcount_add_many(&new_state->arcs_size, 2642 arc_buf_size(buf), buf); 2643 } 2644 ASSERT3U(bufcnt, ==, buffers); 2645 2646 if (hdr->b_l1hdr.b_pabd != NULL) { 2647 (void) refcount_add_many(&new_state->arcs_size, 2648 arc_hdr_size(hdr), hdr); 2649 } else { 2650 ASSERT(GHOST_STATE(old_state)); 2651 } 2652 } 2653 } 2654 2655 if (update_old && old_state != arc_l2c_only) { 2656 ASSERT(HDR_HAS_L1HDR(hdr)); 2657 if (GHOST_STATE(old_state)) { 2658 ASSERT0(bufcnt); 2659 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); 2660 2661 /* 2662 * When moving a header off of a ghost state, 2663 * the header will not contain any arc buffers. 2664 * We use the arc header pointer for the reference 2665 * which is exactly what we did when we put the 2666 * header on the ghost state. 2667 */ 2668 2669 (void) refcount_remove_many(&old_state->arcs_size, 2670 HDR_GET_LSIZE(hdr), hdr); 2671 } else { 2672 uint32_t buffers = 0; 2673 2674 /* 2675 * Each individual buffer holds a unique reference, 2676 * thus we must remove each of these references one 2677 * at a time. 2678 */ 2679 for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL; 2680 buf = buf->b_next) { 2681 ASSERT3U(bufcnt, !=, 0); 2682 buffers++; 2683 2684 /* 2685 * When the arc_buf_t is sharing the data 2686 * block with the hdr, the owner of the 2687 * reference belongs to the hdr. Only 2688 * add to the refcount if the arc_buf_t is 2689 * not shared. 2690 */ 2691 if (arc_buf_is_shared(buf)) 2692 continue; 2693 2694 (void) refcount_remove_many( 2695 &old_state->arcs_size, arc_buf_size(buf), 2696 buf); 2697 } 2698 ASSERT3U(bufcnt, ==, buffers); 2699 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); 2700 (void) refcount_remove_many( 2701 &old_state->arcs_size, arc_hdr_size(hdr), hdr); 2702 } 2703 } 2704 2705 if (HDR_HAS_L1HDR(hdr)) 2706 hdr->b_l1hdr.b_state = new_state; 2707 2708 /* 2709 * L2 headers should never be on the L2 state list since they don't 2710 * have L1 headers allocated. 2711 */ 2712 ASSERT(multilist_is_empty(arc_l2c_only->arcs_list[ARC_BUFC_DATA]) && 2713 multilist_is_empty(arc_l2c_only->arcs_list[ARC_BUFC_METADATA])); 2714} 2715 2716void 2717arc_space_consume(uint64_t space, arc_space_type_t type) 2718{ 2719 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 2720 2721 switch (type) { 2722 case ARC_SPACE_DATA: 2723 aggsum_add(&astat_data_size, space); 2724 break; 2725 case ARC_SPACE_META: 2726 aggsum_add(&astat_metadata_size, space); 2727 break; 2728 case ARC_SPACE_OTHER: 2729 aggsum_add(&astat_other_size, space); 2730 break; 2731 case ARC_SPACE_HDRS: 2732 aggsum_add(&astat_hdr_size, space); 2733 break; 2734 case ARC_SPACE_L2HDRS: 2735 aggsum_add(&astat_l2_hdr_size, space); 2736 break; 2737 } 2738 2739 if (type != ARC_SPACE_DATA) 2740 aggsum_add(&arc_meta_used, space); 2741 2742 aggsum_add(&arc_size, space); 2743} 2744 2745void 2746arc_space_return(uint64_t space, arc_space_type_t type) 2747{ 2748 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES); 2749 2750 switch (type) { 2751 case ARC_SPACE_DATA: 2752 aggsum_add(&astat_data_size, -space); 2753 break; 2754 case ARC_SPACE_META: 2755 aggsum_add(&astat_metadata_size, -space); 2756 break; 2757 case ARC_SPACE_OTHER: 2758 aggsum_add(&astat_other_size, -space); 2759 break; 2760 case ARC_SPACE_HDRS: 2761 aggsum_add(&astat_hdr_size, -space); 2762 break; 2763 case ARC_SPACE_L2HDRS: 2764 aggsum_add(&astat_l2_hdr_size, -space); 2765 break; 2766 } 2767 2768 if (type != ARC_SPACE_DATA) { 2769 ASSERT(aggsum_compare(&arc_meta_used, space) >= 0); 2770 /* 2771 * We use the upper bound here rather than the precise value 2772 * because the arc_meta_max value doesn't need to be 2773 * precise. It's only consumed by humans via arcstats. 2774 */ 2775 if (arc_meta_max < aggsum_upper_bound(&arc_meta_used)) 2776 arc_meta_max = aggsum_upper_bound(&arc_meta_used); 2777 aggsum_add(&arc_meta_used, -space); 2778 } 2779 2780 ASSERT(aggsum_compare(&arc_size, space) >= 0); 2781 aggsum_add(&arc_size, -space); 2782} 2783 2784/* 2785 * Given a hdr and a buf, returns whether that buf can share its b_data buffer 2786 * with the hdr's b_pabd. 2787 */ 2788static boolean_t 2789arc_can_share(arc_buf_hdr_t *hdr, arc_buf_t *buf) 2790{ 2791 /* 2792 * The criteria for sharing a hdr's data are: 2793 * 1. the hdr's compression matches the buf's compression 2794 * 2. the hdr doesn't need to be byteswapped 2795 * 3. the hdr isn't already being shared 2796 * 4. the buf is either compressed or it is the last buf in the hdr list 2797 * 2798 * Criterion #4 maintains the invariant that shared uncompressed 2799 * bufs must be the final buf in the hdr's b_buf list. Reading this, you 2800 * might ask, "if a compressed buf is allocated first, won't that be the 2801 * last thing in the list?", but in that case it's impossible to create 2802 * a shared uncompressed buf anyway (because the hdr must be compressed 2803 * to have the compressed buf). You might also think that #3 is 2804 * sufficient to make this guarantee, however it's possible 2805 * (specifically in the rare L2ARC write race mentioned in 2806 * arc_buf_alloc_impl()) there will be an existing uncompressed buf that 2807 * is sharable, but wasn't at the time of its allocation. Rather than 2808 * allow a new shared uncompressed buf to be created and then shuffle 2809 * the list around to make it the last element, this simply disallows 2810 * sharing if the new buf isn't the first to be added. 2811 */ 2812 ASSERT3P(buf->b_hdr, ==, hdr); 2813 boolean_t hdr_compressed = HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF; 2814 boolean_t buf_compressed = ARC_BUF_COMPRESSED(buf) != 0; 2815 return (buf_compressed == hdr_compressed && 2816 hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS && 2817 !HDR_SHARED_DATA(hdr) && 2818 (ARC_BUF_LAST(buf) || ARC_BUF_COMPRESSED(buf))); 2819} 2820 2821/* 2822 * Allocate a buf for this hdr. If you care about the data that's in the hdr, 2823 * or if you want a compressed buffer, pass those flags in. Returns 0 if the 2824 * copy was made successfully, or an error code otherwise. 2825 */ 2826static int 2827arc_buf_alloc_impl(arc_buf_hdr_t *hdr, void *tag, boolean_t compressed, 2828 boolean_t fill, arc_buf_t **ret) 2829{ 2830 arc_buf_t *buf; 2831 2832 ASSERT(HDR_HAS_L1HDR(hdr)); 2833 ASSERT3U(HDR_GET_LSIZE(hdr), >, 0); 2834 VERIFY(hdr->b_type == ARC_BUFC_DATA || 2835 hdr->b_type == ARC_BUFC_METADATA); 2836 ASSERT3P(ret, !=, NULL); 2837 ASSERT3P(*ret, ==, NULL); 2838 2839 buf = *ret = kmem_cache_alloc(buf_cache, KM_PUSHPAGE); 2840 buf->b_hdr = hdr; 2841 buf->b_data = NULL; 2842 buf->b_next = hdr->b_l1hdr.b_buf; 2843 buf->b_flags = 0; 2844 2845 add_reference(hdr, tag); 2846 2847 /* 2848 * We're about to change the hdr's b_flags. We must either 2849 * hold the hash_lock or be undiscoverable. 2850 */ 2851 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 2852 2853 /* 2854 * Only honor requests for compressed bufs if the hdr is actually 2855 * compressed. 2856 */ 2857 if (compressed && HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF) 2858 buf->b_flags |= ARC_BUF_FLAG_COMPRESSED; 2859 2860 /* 2861 * If the hdr's data can be shared then we share the data buffer and 2862 * set the appropriate bit in the hdr's b_flags to indicate the hdr is 2863 * sharing it's b_pabd with the arc_buf_t. Otherwise, we allocate a new 2864 * buffer to store the buf's data. 2865 * 2866 * There are two additional restrictions here because we're sharing 2867 * hdr -> buf instead of the usual buf -> hdr. First, the hdr can't be 2868 * actively involved in an L2ARC write, because if this buf is used by 2869 * an arc_write() then the hdr's data buffer will be released when the 2870 * write completes, even though the L2ARC write might still be using it. 2871 * Second, the hdr's ABD must be linear so that the buf's user doesn't 2872 * need to be ABD-aware. 2873 */ 2874 boolean_t can_share = arc_can_share(hdr, buf) && !HDR_L2_WRITING(hdr) && 2875 abd_is_linear(hdr->b_l1hdr.b_pabd); 2876 2877 /* Set up b_data and sharing */ 2878 if (can_share) { 2879 buf->b_data = abd_to_buf(hdr->b_l1hdr.b_pabd); 2880 buf->b_flags |= ARC_BUF_FLAG_SHARED; 2881 arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA); 2882 } else { 2883 buf->b_data = 2884 arc_get_data_buf(hdr, arc_buf_size(buf), buf); 2885 ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf)); 2886 } 2887 VERIFY3P(buf->b_data, !=, NULL); 2888 2889 hdr->b_l1hdr.b_buf = buf; 2890 hdr->b_l1hdr.b_bufcnt += 1; 2891 2892 /* 2893 * If the user wants the data from the hdr, we need to either copy or 2894 * decompress the data. 2895 */ 2896 if (fill) { 2897 return (arc_buf_fill(buf, ARC_BUF_COMPRESSED(buf) != 0)); 2898 } 2899 2900 return (0); 2901} 2902 2903static char *arc_onloan_tag = "onloan"; 2904 2905static inline void 2906arc_loaned_bytes_update(int64_t delta) 2907{ 2908 atomic_add_64(&arc_loaned_bytes, delta); 2909 2910 /* assert that it did not wrap around */ 2911 ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0); 2912} 2913 2914/* 2915 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in 2916 * flight data by arc_tempreserve_space() until they are "returned". Loaned 2917 * buffers must be returned to the arc before they can be used by the DMU or 2918 * freed. 2919 */ 2920arc_buf_t * 2921arc_loan_buf(spa_t *spa, boolean_t is_metadata, int size) 2922{ 2923 arc_buf_t *buf = arc_alloc_buf(spa, arc_onloan_tag, 2924 is_metadata ? ARC_BUFC_METADATA : ARC_BUFC_DATA, size); 2925 2926 arc_loaned_bytes_update(arc_buf_size(buf)); 2927 2928 return (buf); 2929} 2930 2931arc_buf_t * 2932arc_loan_compressed_buf(spa_t *spa, uint64_t psize, uint64_t lsize, 2933 enum zio_compress compression_type) 2934{ 2935 arc_buf_t *buf = arc_alloc_compressed_buf(spa, arc_onloan_tag, 2936 psize, lsize, compression_type); 2937 2938 arc_loaned_bytes_update(arc_buf_size(buf)); 2939 2940 return (buf); 2941} 2942 2943 2944/* 2945 * Return a loaned arc buffer to the arc. 2946 */ 2947void 2948arc_return_buf(arc_buf_t *buf, void *tag) 2949{ 2950 arc_buf_hdr_t *hdr = buf->b_hdr; 2951 2952 ASSERT3P(buf->b_data, !=, NULL); 2953 ASSERT(HDR_HAS_L1HDR(hdr)); 2954 (void) refcount_add(&hdr->b_l1hdr.b_refcnt, tag); 2955 (void) refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag); 2956 2957 arc_loaned_bytes_update(-arc_buf_size(buf)); 2958} 2959 2960/* Detach an arc_buf from a dbuf (tag) */ 2961void 2962arc_loan_inuse_buf(arc_buf_t *buf, void *tag) 2963{ 2964 arc_buf_hdr_t *hdr = buf->b_hdr; 2965 2966 ASSERT3P(buf->b_data, !=, NULL); 2967 ASSERT(HDR_HAS_L1HDR(hdr)); 2968 (void) refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag); 2969 (void) refcount_remove(&hdr->b_l1hdr.b_refcnt, tag); 2970 2971 arc_loaned_bytes_update(arc_buf_size(buf)); 2972} 2973 2974static void 2975l2arc_free_abd_on_write(abd_t *abd, size_t size, arc_buf_contents_t type) 2976{ 2977 l2arc_data_free_t *df = kmem_alloc(sizeof (*df), KM_SLEEP); 2978 2979 df->l2df_abd = abd; 2980 df->l2df_size = size; 2981 df->l2df_type = type; 2982 mutex_enter(&l2arc_free_on_write_mtx); 2983 list_insert_head(l2arc_free_on_write, df); 2984 mutex_exit(&l2arc_free_on_write_mtx); 2985} 2986 2987static void 2988arc_hdr_free_on_write(arc_buf_hdr_t *hdr) 2989{ 2990 arc_state_t *state = hdr->b_l1hdr.b_state; 2991 arc_buf_contents_t type = arc_buf_type(hdr); 2992 uint64_t size = arc_hdr_size(hdr); 2993 2994 /* protected by hash lock, if in the hash table */ 2995 if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) { 2996 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 2997 ASSERT(state != arc_anon && state != arc_l2c_only); 2998 2999 (void) refcount_remove_many(&state->arcs_esize[type], 3000 size, hdr); 3001 } 3002 (void) refcount_remove_many(&state->arcs_size, size, hdr); 3003 if (type == ARC_BUFC_METADATA) { 3004 arc_space_return(size, ARC_SPACE_META); 3005 } else { 3006 ASSERT(type == ARC_BUFC_DATA); 3007 arc_space_return(size, ARC_SPACE_DATA); 3008 } 3009 3010 l2arc_free_abd_on_write(hdr->b_l1hdr.b_pabd, size, type); 3011} 3012 3013/* 3014 * Share the arc_buf_t's data with the hdr. Whenever we are sharing the 3015 * data buffer, we transfer the refcount ownership to the hdr and update 3016 * the appropriate kstats. 3017 */ 3018static void 3019arc_share_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf) 3020{ 3021 arc_state_t *state = hdr->b_l1hdr.b_state; 3022 3023 ASSERT(arc_can_share(hdr, buf)); 3024 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); 3025 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 3026 3027 /* 3028 * Start sharing the data buffer. We transfer the 3029 * refcount ownership to the hdr since it always owns 3030 * the refcount whenever an arc_buf_t is shared. 3031 */ 3032 refcount_transfer_ownership(&state->arcs_size, buf, hdr); 3033 hdr->b_l1hdr.b_pabd = abd_get_from_buf(buf->b_data, arc_buf_size(buf)); 3034 abd_take_ownership_of_buf(hdr->b_l1hdr.b_pabd, 3035 HDR_ISTYPE_METADATA(hdr)); 3036 arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA); 3037 buf->b_flags |= ARC_BUF_FLAG_SHARED; 3038 3039 /* 3040 * Since we've transferred ownership to the hdr we need 3041 * to increment its compressed and uncompressed kstats and 3042 * decrement the overhead size. 3043 */ 3044 ARCSTAT_INCR(arcstat_compressed_size, arc_hdr_size(hdr)); 3045 ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr)); 3046 ARCSTAT_INCR(arcstat_overhead_size, -arc_buf_size(buf)); 3047} 3048 3049static void 3050arc_unshare_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf) 3051{ 3052 arc_state_t *state = hdr->b_l1hdr.b_state; 3053 3054 ASSERT(arc_buf_is_shared(buf)); 3055 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); 3056 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 3057 3058 /* 3059 * We are no longer sharing this buffer so we need 3060 * to transfer its ownership to the rightful owner. 3061 */ 3062 refcount_transfer_ownership(&state->arcs_size, hdr, buf); 3063 arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA); 3064 abd_release_ownership_of_buf(hdr->b_l1hdr.b_pabd); 3065 abd_put(hdr->b_l1hdr.b_pabd); 3066 hdr->b_l1hdr.b_pabd = NULL; 3067 buf->b_flags &= ~ARC_BUF_FLAG_SHARED; 3068 3069 /* 3070 * Since the buffer is no longer shared between 3071 * the arc buf and the hdr, count it as overhead. 3072 */ 3073 ARCSTAT_INCR(arcstat_compressed_size, -arc_hdr_size(hdr)); 3074 ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr)); 3075 ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf)); 3076} 3077 3078/* 3079 * Remove an arc_buf_t from the hdr's buf list and return the last 3080 * arc_buf_t on the list. If no buffers remain on the list then return 3081 * NULL. 3082 */ 3083static arc_buf_t * 3084arc_buf_remove(arc_buf_hdr_t *hdr, arc_buf_t *buf) 3085{ 3086 ASSERT(HDR_HAS_L1HDR(hdr)); 3087 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 3088 3089 arc_buf_t **bufp = &hdr->b_l1hdr.b_buf; 3090 arc_buf_t *lastbuf = NULL; 3091 3092 /* 3093 * Remove the buf from the hdr list and locate the last 3094 * remaining buffer on the list. 3095 */ 3096 while (*bufp != NULL) { 3097 if (*bufp == buf) 3098 *bufp = buf->b_next; 3099 3100 /* 3101 * If we've removed a buffer in the middle of 3102 * the list then update the lastbuf and update 3103 * bufp. 3104 */ 3105 if (*bufp != NULL) { 3106 lastbuf = *bufp; 3107 bufp = &(*bufp)->b_next; 3108 } 3109 } 3110 buf->b_next = NULL; 3111 ASSERT3P(lastbuf, !=, buf); 3112 IMPLY(hdr->b_l1hdr.b_bufcnt > 0, lastbuf != NULL); 3113 IMPLY(hdr->b_l1hdr.b_bufcnt > 0, hdr->b_l1hdr.b_buf != NULL); 3114 IMPLY(lastbuf != NULL, ARC_BUF_LAST(lastbuf)); 3115 3116 return (lastbuf); 3117} 3118 3119/* 3120 * Free up buf->b_data and pull the arc_buf_t off of the the arc_buf_hdr_t's 3121 * list and free it. 3122 */ 3123static void 3124arc_buf_destroy_impl(arc_buf_t *buf) 3125{ 3126 arc_buf_hdr_t *hdr = buf->b_hdr; 3127 3128 /* 3129 * Free up the data associated with the buf but only if we're not 3130 * sharing this with the hdr. If we are sharing it with the hdr, the 3131 * hdr is responsible for doing the free. 3132 */ 3133 if (buf->b_data != NULL) { 3134 /* 3135 * We're about to change the hdr's b_flags. We must either 3136 * hold the hash_lock or be undiscoverable. 3137 */ 3138 ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr)); 3139 3140 arc_cksum_verify(buf); 3141#ifdef illumos 3142 arc_buf_unwatch(buf); 3143#endif 3144 3145 if (arc_buf_is_shared(buf)) { 3146 arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA); 3147 } else { 3148 uint64_t size = arc_buf_size(buf); 3149 arc_free_data_buf(hdr, buf->b_data, size, buf); 3150 ARCSTAT_INCR(arcstat_overhead_size, -size); 3151 } 3152 buf->b_data = NULL; 3153 3154 ASSERT(hdr->b_l1hdr.b_bufcnt > 0); 3155 hdr->b_l1hdr.b_bufcnt -= 1; 3156 } 3157 3158 arc_buf_t *lastbuf = arc_buf_remove(hdr, buf); 3159 3160 if (ARC_BUF_SHARED(buf) && !ARC_BUF_COMPRESSED(buf)) { 3161 /* 3162 * If the current arc_buf_t is sharing its data buffer with the 3163 * hdr, then reassign the hdr's b_pabd to share it with the new 3164 * buffer at the end of the list. The shared buffer is always 3165 * the last one on the hdr's buffer list. 3166 * 3167 * There is an equivalent case for compressed bufs, but since 3168 * they aren't guaranteed to be the last buf in the list and 3169 * that is an exceedingly rare case, we just allow that space be 3170 * wasted temporarily. 3171 */ 3172 if (lastbuf != NULL) { 3173 /* Only one buf can be shared at once */ 3174 VERIFY(!arc_buf_is_shared(lastbuf)); 3175 /* hdr is uncompressed so can't have compressed buf */ 3176 VERIFY(!ARC_BUF_COMPRESSED(lastbuf)); 3177 3178 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); 3179 arc_hdr_free_pabd(hdr); 3180 3181 /* 3182 * We must setup a new shared block between the 3183 * last buffer and the hdr. The data would have 3184 * been allocated by the arc buf so we need to transfer 3185 * ownership to the hdr since it's now being shared. 3186 */ 3187 arc_share_buf(hdr, lastbuf); 3188 } 3189 } else if (HDR_SHARED_DATA(hdr)) { 3190 /* 3191 * Uncompressed shared buffers are always at the end 3192 * of the list. Compressed buffers don't have the 3193 * same requirements. This makes it hard to 3194 * simply assert that the lastbuf is shared so 3195 * we rely on the hdr's compression flags to determine 3196 * if we have a compressed, shared buffer. 3197 */ 3198 ASSERT3P(lastbuf, !=, NULL); 3199 ASSERT(arc_buf_is_shared(lastbuf) || 3200 HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF); 3201 } 3202 3203 /* 3204 * Free the checksum if we're removing the last uncompressed buf from 3205 * this hdr. 3206 */ 3207 if (!arc_hdr_has_uncompressed_buf(hdr)) { 3208 arc_cksum_free(hdr); 3209 } 3210 3211 /* clean up the buf */ 3212 buf->b_hdr = NULL; 3213 kmem_cache_free(buf_cache, buf); 3214} 3215 3216static void 3217arc_hdr_alloc_pabd(arc_buf_hdr_t *hdr, boolean_t do_adapt) 3218{ 3219 ASSERT3U(HDR_GET_LSIZE(hdr), >, 0); 3220 ASSERT(HDR_HAS_L1HDR(hdr)); 3221 ASSERT(!HDR_SHARED_DATA(hdr)); 3222 3223 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); 3224 hdr->b_l1hdr.b_pabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr, do_adapt); 3225 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS; 3226 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); 3227 3228 ARCSTAT_INCR(arcstat_compressed_size, arc_hdr_size(hdr)); 3229 ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr)); 3230} 3231 3232static void 3233arc_hdr_free_pabd(arc_buf_hdr_t *hdr) 3234{ 3235 ASSERT(HDR_HAS_L1HDR(hdr)); 3236 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); 3237 3238 /* 3239 * If the hdr is currently being written to the l2arc then 3240 * we defer freeing the data by adding it to the l2arc_free_on_write 3241 * list. The l2arc will free the data once it's finished 3242 * writing it to the l2arc device. 3243 */ 3244 if (HDR_L2_WRITING(hdr)) { 3245 arc_hdr_free_on_write(hdr); 3246 ARCSTAT_BUMP(arcstat_l2_free_on_write); 3247 } else { 3248 arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, 3249 arc_hdr_size(hdr), hdr); 3250 } 3251 hdr->b_l1hdr.b_pabd = NULL; 3252 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS; 3253 3254 ARCSTAT_INCR(arcstat_compressed_size, -arc_hdr_size(hdr)); 3255 ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr)); 3256} 3257 3258static arc_buf_hdr_t * 3259arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize, 3260 enum zio_compress compression_type, arc_buf_contents_t type) 3261{ 3262 arc_buf_hdr_t *hdr; 3263 3264 VERIFY(type == ARC_BUFC_DATA || type == ARC_BUFC_METADATA); 3265 3266 hdr = kmem_cache_alloc(hdr_full_cache, KM_PUSHPAGE); 3267 ASSERT(HDR_EMPTY(hdr)); 3268 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); 3269 ASSERT3P(hdr->b_l1hdr.b_thawed, ==, NULL); 3270 HDR_SET_PSIZE(hdr, psize); 3271 HDR_SET_LSIZE(hdr, lsize); 3272 hdr->b_spa = spa; 3273 hdr->b_type = type; 3274 hdr->b_flags = 0; 3275 arc_hdr_set_flags(hdr, arc_bufc_to_flags(type) | ARC_FLAG_HAS_L1HDR); 3276 arc_hdr_set_compress(hdr, compression_type); 3277 3278 hdr->b_l1hdr.b_state = arc_anon; 3279 hdr->b_l1hdr.b_arc_access = 0; 3280 hdr->b_l1hdr.b_bufcnt = 0; 3281 hdr->b_l1hdr.b_buf = NULL; 3282 3283 /* 3284 * Allocate the hdr's buffer. This will contain either 3285 * the compressed or uncompressed data depending on the block 3286 * it references and compressed arc enablement. 3287 */ 3288 arc_hdr_alloc_pabd(hdr, B_TRUE); 3289 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 3290 3291 return (hdr); 3292} 3293 3294/* 3295 * Transition between the two allocation states for the arc_buf_hdr struct. 3296 * The arc_buf_hdr struct can be allocated with (hdr_full_cache) or without 3297 * (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller 3298 * version is used when a cache buffer is only in the L2ARC in order to reduce 3299 * memory usage. 3300 */ 3301static arc_buf_hdr_t * 3302arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new) 3303{ 3304 ASSERT(HDR_HAS_L2HDR(hdr)); 3305 3306 arc_buf_hdr_t *nhdr; 3307 l2arc_dev_t *dev = hdr->b_l2hdr.b_dev; 3308 3309 ASSERT((old == hdr_full_cache && new == hdr_l2only_cache) || 3310 (old == hdr_l2only_cache && new == hdr_full_cache)); 3311 3312 nhdr = kmem_cache_alloc(new, KM_PUSHPAGE); 3313 3314 ASSERT(MUTEX_HELD(HDR_LOCK(hdr))); 3315 buf_hash_remove(hdr); 3316 3317 bcopy(hdr, nhdr, HDR_L2ONLY_SIZE); 3318 3319 if (new == hdr_full_cache) { 3320 arc_hdr_set_flags(nhdr, ARC_FLAG_HAS_L1HDR); 3321 /* 3322 * arc_access and arc_change_state need to be aware that a 3323 * header has just come out of L2ARC, so we set its state to 3324 * l2c_only even though it's about to change. 3325 */ 3326 nhdr->b_l1hdr.b_state = arc_l2c_only; 3327 3328 /* Verify previous threads set to NULL before freeing */ 3329 ASSERT3P(nhdr->b_l1hdr.b_pabd, ==, NULL); 3330 } else { 3331 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 3332 ASSERT0(hdr->b_l1hdr.b_bufcnt); 3333 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); 3334 3335 /* 3336 * If we've reached here, We must have been called from 3337 * arc_evict_hdr(), as such we should have already been 3338 * removed from any ghost list we were previously on 3339 * (which protects us from racing with arc_evict_state), 3340 * thus no locking is needed during this check. 3341 */ 3342 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); 3343 3344 /* 3345 * A buffer must not be moved into the arc_l2c_only 3346 * state if it's not finished being written out to the 3347 * l2arc device. Otherwise, the b_l1hdr.b_pabd field 3348 * might try to be accessed, even though it was removed. 3349 */ 3350 VERIFY(!HDR_L2_WRITING(hdr)); 3351 VERIFY3P(hdr->b_l1hdr.b_pabd, ==, NULL); 3352 3353#ifdef ZFS_DEBUG 3354 if (hdr->b_l1hdr.b_thawed != NULL) { 3355 kmem_free(hdr->b_l1hdr.b_thawed, 1); 3356 hdr->b_l1hdr.b_thawed = NULL; 3357 } 3358#endif 3359 3360 arc_hdr_clear_flags(nhdr, ARC_FLAG_HAS_L1HDR); 3361 } 3362 /* 3363 * The header has been reallocated so we need to re-insert it into any 3364 * lists it was on. 3365 */ 3366 (void) buf_hash_insert(nhdr, NULL); 3367 3368 ASSERT(list_link_active(&hdr->b_l2hdr.b_l2node)); 3369 3370 mutex_enter(&dev->l2ad_mtx); 3371 3372 /* 3373 * We must place the realloc'ed header back into the list at 3374 * the same spot. Otherwise, if it's placed earlier in the list, 3375 * l2arc_write_buffers() could find it during the function's 3376 * write phase, and try to write it out to the l2arc. 3377 */ 3378 list_insert_after(&dev->l2ad_buflist, hdr, nhdr); 3379 list_remove(&dev->l2ad_buflist, hdr); 3380 3381 mutex_exit(&dev->l2ad_mtx); 3382 3383 /* 3384 * Since we're using the pointer address as the tag when 3385 * incrementing and decrementing the l2ad_alloc refcount, we 3386 * must remove the old pointer (that we're about to destroy) and 3387 * add the new pointer to the refcount. Otherwise we'd remove 3388 * the wrong pointer address when calling arc_hdr_destroy() later. 3389 */ 3390 3391 (void) refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr); 3392 (void) refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(nhdr), nhdr); 3393 3394 buf_discard_identity(hdr); 3395 kmem_cache_free(old, hdr); 3396 3397 return (nhdr); 3398} 3399 3400/* 3401 * Allocate a new arc_buf_hdr_t and arc_buf_t and return the buf to the caller. 3402 * The buf is returned thawed since we expect the consumer to modify it. 3403 */ 3404arc_buf_t * 3405arc_alloc_buf(spa_t *spa, void *tag, arc_buf_contents_t type, int32_t size) 3406{ 3407 arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), size, size, 3408 ZIO_COMPRESS_OFF, type); 3409 ASSERT(!MUTEX_HELD(HDR_LOCK(hdr))); 3410 3411 arc_buf_t *buf = NULL; 3412 VERIFY0(arc_buf_alloc_impl(hdr, tag, B_FALSE, B_FALSE, &buf)); 3413 arc_buf_thaw(buf); 3414 3415 return (buf); 3416} 3417 3418/* 3419 * Allocate a compressed buf in the same manner as arc_alloc_buf. Don't use this 3420 * for bufs containing metadata. 3421 */ 3422arc_buf_t * 3423arc_alloc_compressed_buf(spa_t *spa, void *tag, uint64_t psize, uint64_t lsize, 3424 enum zio_compress compression_type) 3425{ 3426 ASSERT3U(lsize, >, 0); 3427 ASSERT3U(lsize, >=, psize); 3428 ASSERT(compression_type > ZIO_COMPRESS_OFF); 3429 ASSERT(compression_type < ZIO_COMPRESS_FUNCTIONS); 3430 3431 arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize, 3432 compression_type, ARC_BUFC_DATA); 3433 ASSERT(!MUTEX_HELD(HDR_LOCK(hdr))); 3434 3435 arc_buf_t *buf = NULL; 3436 VERIFY0(arc_buf_alloc_impl(hdr, tag, B_TRUE, B_FALSE, &buf)); 3437 arc_buf_thaw(buf); 3438 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); 3439 3440 if (!arc_buf_is_shared(buf)) { 3441 /* 3442 * To ensure that the hdr has the correct data in it if we call 3443 * arc_decompress() on this buf before it's been written to 3444 * disk, it's easiest if we just set up sharing between the 3445 * buf and the hdr. 3446 */ 3447 ASSERT(!abd_is_linear(hdr->b_l1hdr.b_pabd)); 3448 arc_hdr_free_pabd(hdr); 3449 arc_share_buf(hdr, buf); 3450 } 3451 3452 return (buf); 3453} 3454 3455static void 3456arc_hdr_l2hdr_destroy(arc_buf_hdr_t *hdr) 3457{ 3458 l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr; 3459 l2arc_dev_t *dev = l2hdr->b_dev; 3460 uint64_t psize = arc_hdr_size(hdr); 3461 3462 ASSERT(MUTEX_HELD(&dev->l2ad_mtx)); 3463 ASSERT(HDR_HAS_L2HDR(hdr)); 3464 3465 list_remove(&dev->l2ad_buflist, hdr); 3466 3467 ARCSTAT_INCR(arcstat_l2_psize, -psize); 3468 ARCSTAT_INCR(arcstat_l2_lsize, -HDR_GET_LSIZE(hdr)); 3469 3470 vdev_space_update(dev->l2ad_vdev, -psize, 0, 0); 3471 3472 (void) refcount_remove_many(&dev->l2ad_alloc, psize, hdr); 3473 arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR); 3474} 3475 3476static void 3477arc_hdr_destroy(arc_buf_hdr_t *hdr) 3478{ 3479 if (HDR_HAS_L1HDR(hdr)) { 3480 ASSERT(hdr->b_l1hdr.b_buf == NULL || 3481 hdr->b_l1hdr.b_bufcnt > 0); 3482 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 3483 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); 3484 } 3485 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 3486 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 3487 3488 if (!HDR_EMPTY(hdr)) 3489 buf_discard_identity(hdr); 3490 3491 if (HDR_HAS_L2HDR(hdr)) { 3492 l2arc_dev_t *dev = hdr->b_l2hdr.b_dev; 3493 boolean_t buflist_held = MUTEX_HELD(&dev->l2ad_mtx); 3494 3495 if (!buflist_held) 3496 mutex_enter(&dev->l2ad_mtx); 3497 3498 /* 3499 * Even though we checked this conditional above, we 3500 * need to check this again now that we have the 3501 * l2ad_mtx. This is because we could be racing with 3502 * another thread calling l2arc_evict() which might have 3503 * destroyed this header's L2 portion as we were waiting 3504 * to acquire the l2ad_mtx. If that happens, we don't 3505 * want to re-destroy the header's L2 portion. 3506 */ 3507 if (HDR_HAS_L2HDR(hdr)) { 3508 l2arc_trim(hdr); 3509 arc_hdr_l2hdr_destroy(hdr); 3510 } 3511 3512 if (!buflist_held) 3513 mutex_exit(&dev->l2ad_mtx); 3514 } 3515 3516 if (HDR_HAS_L1HDR(hdr)) { 3517 arc_cksum_free(hdr); 3518 3519 while (hdr->b_l1hdr.b_buf != NULL) 3520 arc_buf_destroy_impl(hdr->b_l1hdr.b_buf); 3521 3522#ifdef ZFS_DEBUG 3523 if (hdr->b_l1hdr.b_thawed != NULL) { 3524 kmem_free(hdr->b_l1hdr.b_thawed, 1); 3525 hdr->b_l1hdr.b_thawed = NULL; 3526 } 3527#endif 3528 3529 if (hdr->b_l1hdr.b_pabd != NULL) { 3530 arc_hdr_free_pabd(hdr); 3531 } 3532 } 3533 3534 ASSERT3P(hdr->b_hash_next, ==, NULL); 3535 if (HDR_HAS_L1HDR(hdr)) { 3536 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); 3537 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); 3538 kmem_cache_free(hdr_full_cache, hdr); 3539 } else { 3540 kmem_cache_free(hdr_l2only_cache, hdr); 3541 } 3542} 3543 3544void 3545arc_buf_destroy(arc_buf_t *buf, void* tag) 3546{ 3547 arc_buf_hdr_t *hdr = buf->b_hdr; 3548 kmutex_t *hash_lock = HDR_LOCK(hdr); 3549 3550 if (hdr->b_l1hdr.b_state == arc_anon) { 3551 ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1); 3552 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 3553 VERIFY0(remove_reference(hdr, NULL, tag)); 3554 arc_hdr_destroy(hdr); 3555 return; 3556 } 3557 3558 mutex_enter(hash_lock); 3559 ASSERT3P(hdr, ==, buf->b_hdr); 3560 ASSERT(hdr->b_l1hdr.b_bufcnt > 0); 3561 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 3562 ASSERT3P(hdr->b_l1hdr.b_state, !=, arc_anon); 3563 ASSERT3P(buf->b_data, !=, NULL); 3564 3565 (void) remove_reference(hdr, hash_lock, tag); 3566 arc_buf_destroy_impl(buf); 3567 mutex_exit(hash_lock); 3568} 3569 3570/* 3571 * Evict the arc_buf_hdr that is provided as a parameter. The resultant 3572 * state of the header is dependent on it's state prior to entering this 3573 * function. The following transitions are possible: 3574 * 3575 * - arc_mru -> arc_mru_ghost 3576 * - arc_mfu -> arc_mfu_ghost 3577 * - arc_mru_ghost -> arc_l2c_only 3578 * - arc_mru_ghost -> deleted 3579 * - arc_mfu_ghost -> arc_l2c_only 3580 * - arc_mfu_ghost -> deleted 3581 */ 3582static int64_t 3583arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock) 3584{ 3585 arc_state_t *evicted_state, *state; 3586 int64_t bytes_evicted = 0; 3587 int min_lifetime = HDR_PRESCIENT_PREFETCH(hdr) ? 3588 zfs_arc_min_prescient_prefetch_ms : zfs_arc_min_prefetch_ms; 3589 3590 ASSERT(MUTEX_HELD(hash_lock)); 3591 ASSERT(HDR_HAS_L1HDR(hdr)); 3592 3593 state = hdr->b_l1hdr.b_state; 3594 if (GHOST_STATE(state)) { 3595 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 3596 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 3597 3598 /* 3599 * l2arc_write_buffers() relies on a header's L1 portion 3600 * (i.e. its b_pabd field) during it's write phase. 3601 * Thus, we cannot push a header onto the arc_l2c_only 3602 * state (removing it's L1 piece) until the header is 3603 * done being written to the l2arc. 3604 */ 3605 if (HDR_HAS_L2HDR(hdr) && HDR_L2_WRITING(hdr)) { 3606 ARCSTAT_BUMP(arcstat_evict_l2_skip); 3607 return (bytes_evicted); 3608 } 3609 3610 ARCSTAT_BUMP(arcstat_deleted); 3611 bytes_evicted += HDR_GET_LSIZE(hdr); 3612 3613 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr); 3614 3615 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); 3616 if (HDR_HAS_L2HDR(hdr)) { 3617 /* 3618 * This buffer is cached on the 2nd Level ARC; 3619 * don't destroy the header. 3620 */ 3621 arc_change_state(arc_l2c_only, hdr, hash_lock); 3622 /* 3623 * dropping from L1+L2 cached to L2-only, 3624 * realloc to remove the L1 header. 3625 */ 3626 hdr = arc_hdr_realloc(hdr, hdr_full_cache, 3627 hdr_l2only_cache); 3628 } else { 3629 arc_change_state(arc_anon, hdr, hash_lock); 3630 arc_hdr_destroy(hdr); 3631 } 3632 return (bytes_evicted); 3633 } 3634 3635 ASSERT(state == arc_mru || state == arc_mfu); 3636 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost; 3637 3638 /* prefetch buffers have a minimum lifespan */ 3639 if (HDR_IO_IN_PROGRESS(hdr) || 3640 ((hdr->b_flags & (ARC_FLAG_PREFETCH | ARC_FLAG_INDIRECT)) && 3641 ddi_get_lbolt() - hdr->b_l1hdr.b_arc_access < min_lifetime * hz)) { 3642 ARCSTAT_BUMP(arcstat_evict_skip); 3643 return (bytes_evicted); 3644 } 3645 3646 ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt)); 3647 while (hdr->b_l1hdr.b_buf) { 3648 arc_buf_t *buf = hdr->b_l1hdr.b_buf; 3649 if (!mutex_tryenter(&buf->b_evict_lock)) { 3650 ARCSTAT_BUMP(arcstat_mutex_miss); 3651 break; 3652 } 3653 if (buf->b_data != NULL) 3654 bytes_evicted += HDR_GET_LSIZE(hdr); 3655 mutex_exit(&buf->b_evict_lock); 3656 arc_buf_destroy_impl(buf); 3657 } 3658 3659 if (HDR_HAS_L2HDR(hdr)) { 3660 ARCSTAT_INCR(arcstat_evict_l2_cached, HDR_GET_LSIZE(hdr)); 3661 } else { 3662 if (l2arc_write_eligible(hdr->b_spa, hdr)) { 3663 ARCSTAT_INCR(arcstat_evict_l2_eligible, 3664 HDR_GET_LSIZE(hdr)); 3665 } else { 3666 ARCSTAT_INCR(arcstat_evict_l2_ineligible, 3667 HDR_GET_LSIZE(hdr)); 3668 } 3669 } 3670 3671 if (hdr->b_l1hdr.b_bufcnt == 0) { 3672 arc_cksum_free(hdr); 3673 3674 bytes_evicted += arc_hdr_size(hdr); 3675 3676 /* 3677 * If this hdr is being evicted and has a compressed 3678 * buffer then we discard it here before we change states. 3679 * This ensures that the accounting is updated correctly 3680 * in arc_free_data_impl(). 3681 */ 3682 arc_hdr_free_pabd(hdr); 3683 3684 arc_change_state(evicted_state, hdr, hash_lock); 3685 ASSERT(HDR_IN_HASH_TABLE(hdr)); 3686 arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE); 3687 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, hdr); 3688 } 3689 3690 return (bytes_evicted); 3691} 3692 3693static uint64_t 3694arc_evict_state_impl(multilist_t *ml, int idx, arc_buf_hdr_t *marker, 3695 uint64_t spa, int64_t bytes) 3696{ 3697 multilist_sublist_t *mls; 3698 uint64_t bytes_evicted = 0; 3699 arc_buf_hdr_t *hdr; 3700 kmutex_t *hash_lock; 3701 int evict_count = 0; 3702 3703 ASSERT3P(marker, !=, NULL); 3704 IMPLY(bytes < 0, bytes == ARC_EVICT_ALL); 3705 3706 mls = multilist_sublist_lock(ml, idx); 3707 3708 for (hdr = multilist_sublist_prev(mls, marker); hdr != NULL; 3709 hdr = multilist_sublist_prev(mls, marker)) { 3710 if ((bytes != ARC_EVICT_ALL && bytes_evicted >= bytes) || 3711 (evict_count >= zfs_arc_evict_batch_limit)) 3712 break; 3713 3714 /* 3715 * To keep our iteration location, move the marker 3716 * forward. Since we're not holding hdr's hash lock, we 3717 * must be very careful and not remove 'hdr' from the 3718 * sublist. Otherwise, other consumers might mistake the 3719 * 'hdr' as not being on a sublist when they call the 3720 * multilist_link_active() function (they all rely on 3721 * the hash lock protecting concurrent insertions and 3722 * removals). multilist_sublist_move_forward() was 3723 * specifically implemented to ensure this is the case 3724 * (only 'marker' will be removed and re-inserted). 3725 */ 3726 multilist_sublist_move_forward(mls, marker); 3727 3728 /* 3729 * The only case where the b_spa field should ever be 3730 * zero, is the marker headers inserted by 3731 * arc_evict_state(). It's possible for multiple threads 3732 * to be calling arc_evict_state() concurrently (e.g. 3733 * dsl_pool_close() and zio_inject_fault()), so we must 3734 * skip any markers we see from these other threads. 3735 */ 3736 if (hdr->b_spa == 0) 3737 continue; 3738 3739 /* we're only interested in evicting buffers of a certain spa */ 3740 if (spa != 0 && hdr->b_spa != spa) { 3741 ARCSTAT_BUMP(arcstat_evict_skip); 3742 continue; 3743 } 3744 3745 hash_lock = HDR_LOCK(hdr); 3746 3747 /* 3748 * We aren't calling this function from any code path 3749 * that would already be holding a hash lock, so we're 3750 * asserting on this assumption to be defensive in case 3751 * this ever changes. Without this check, it would be 3752 * possible to incorrectly increment arcstat_mutex_miss 3753 * below (e.g. if the code changed such that we called 3754 * this function with a hash lock held). 3755 */ 3756 ASSERT(!MUTEX_HELD(hash_lock)); 3757 3758 if (mutex_tryenter(hash_lock)) { 3759 uint64_t evicted = arc_evict_hdr(hdr, hash_lock); 3760 mutex_exit(hash_lock); 3761 3762 bytes_evicted += evicted; 3763 3764 /* 3765 * If evicted is zero, arc_evict_hdr() must have 3766 * decided to skip this header, don't increment 3767 * evict_count in this case. 3768 */ 3769 if (evicted != 0) 3770 evict_count++; 3771 3772 /* 3773 * If arc_size isn't overflowing, signal any 3774 * threads that might happen to be waiting. 3775 * 3776 * For each header evicted, we wake up a single 3777 * thread. If we used cv_broadcast, we could 3778 * wake up "too many" threads causing arc_size 3779 * to significantly overflow arc_c; since 3780 * arc_get_data_impl() doesn't check for overflow 3781 * when it's woken up (it doesn't because it's 3782 * possible for the ARC to be overflowing while 3783 * full of un-evictable buffers, and the 3784 * function should proceed in this case). 3785 * 3786 * If threads are left sleeping, due to not 3787 * using cv_broadcast here, they will be woken 3788 * up via cv_broadcast in arc_adjust_cb() just 3789 * before arc_adjust_zthr sleeps. 3790 */ 3791 mutex_enter(&arc_adjust_lock); 3792 if (!arc_is_overflowing()) 3793 cv_signal(&arc_adjust_waiters_cv); 3794 mutex_exit(&arc_adjust_lock); 3795 } else { 3796 ARCSTAT_BUMP(arcstat_mutex_miss); 3797 } 3798 } 3799 3800 multilist_sublist_unlock(mls); 3801 3802 return (bytes_evicted); 3803} 3804 3805/* 3806 * Evict buffers from the given arc state, until we've removed the 3807 * specified number of bytes. Move the removed buffers to the 3808 * appropriate evict state. 3809 * 3810 * This function makes a "best effort". It skips over any buffers 3811 * it can't get a hash_lock on, and so, may not catch all candidates. 3812 * It may also return without evicting as much space as requested. 3813 * 3814 * If bytes is specified using the special value ARC_EVICT_ALL, this 3815 * will evict all available (i.e. unlocked and evictable) buffers from 3816 * the given arc state; which is used by arc_flush(). 3817 */ 3818static uint64_t 3819arc_evict_state(arc_state_t *state, uint64_t spa, int64_t bytes, 3820 arc_buf_contents_t type) 3821{ 3822 uint64_t total_evicted = 0; 3823 multilist_t *ml = state->arcs_list[type]; 3824 int num_sublists; 3825 arc_buf_hdr_t **markers; 3826 3827 IMPLY(bytes < 0, bytes == ARC_EVICT_ALL); 3828 3829 num_sublists = multilist_get_num_sublists(ml); 3830 3831 /* 3832 * If we've tried to evict from each sublist, made some 3833 * progress, but still have not hit the target number of bytes 3834 * to evict, we want to keep trying. The markers allow us to 3835 * pick up where we left off for each individual sublist, rather 3836 * than starting from the tail each time. 3837 */ 3838 markers = kmem_zalloc(sizeof (*markers) * num_sublists, KM_SLEEP); 3839 for (int i = 0; i < num_sublists; i++) { 3840 markers[i] = kmem_cache_alloc(hdr_full_cache, KM_SLEEP); 3841 3842 /* 3843 * A b_spa of 0 is used to indicate that this header is 3844 * a marker. This fact is used in arc_adjust_type() and 3845 * arc_evict_state_impl(). 3846 */ 3847 markers[i]->b_spa = 0; 3848 3849 multilist_sublist_t *mls = multilist_sublist_lock(ml, i); 3850 multilist_sublist_insert_tail(mls, markers[i]); 3851 multilist_sublist_unlock(mls); 3852 } 3853 3854 /* 3855 * While we haven't hit our target number of bytes to evict, or 3856 * we're evicting all available buffers. 3857 */ 3858 while (total_evicted < bytes || bytes == ARC_EVICT_ALL) { 3859 /* 3860 * Start eviction using a randomly selected sublist, 3861 * this is to try and evenly balance eviction across all 3862 * sublists. Always starting at the same sublist 3863 * (e.g. index 0) would cause evictions to favor certain 3864 * sublists over others. 3865 */ 3866 int sublist_idx = multilist_get_random_index(ml); 3867 uint64_t scan_evicted = 0; 3868 3869 for (int i = 0; i < num_sublists; i++) { 3870 uint64_t bytes_remaining; 3871 uint64_t bytes_evicted; 3872 3873 if (bytes == ARC_EVICT_ALL) 3874 bytes_remaining = ARC_EVICT_ALL; 3875 else if (total_evicted < bytes) 3876 bytes_remaining = bytes - total_evicted; 3877 else 3878 break; 3879 3880 bytes_evicted = arc_evict_state_impl(ml, sublist_idx, 3881 markers[sublist_idx], spa, bytes_remaining); 3882 3883 scan_evicted += bytes_evicted; 3884 total_evicted += bytes_evicted; 3885 3886 /* we've reached the end, wrap to the beginning */ 3887 if (++sublist_idx >= num_sublists) 3888 sublist_idx = 0; 3889 } 3890 3891 /* 3892 * If we didn't evict anything during this scan, we have 3893 * no reason to believe we'll evict more during another 3894 * scan, so break the loop. 3895 */ 3896 if (scan_evicted == 0) { 3897 /* This isn't possible, let's make that obvious */ 3898 ASSERT3S(bytes, !=, 0); 3899 3900 /* 3901 * When bytes is ARC_EVICT_ALL, the only way to 3902 * break the loop is when scan_evicted is zero. 3903 * In that case, we actually have evicted enough, 3904 * so we don't want to increment the kstat. 3905 */ 3906 if (bytes != ARC_EVICT_ALL) { 3907 ASSERT3S(total_evicted, <, bytes); 3908 ARCSTAT_BUMP(arcstat_evict_not_enough); 3909 } 3910 3911 break; 3912 } 3913 } 3914 3915 for (int i = 0; i < num_sublists; i++) { 3916 multilist_sublist_t *mls = multilist_sublist_lock(ml, i); 3917 multilist_sublist_remove(mls, markers[i]); 3918 multilist_sublist_unlock(mls); 3919 3920 kmem_cache_free(hdr_full_cache, markers[i]); 3921 } 3922 kmem_free(markers, sizeof (*markers) * num_sublists); 3923 3924 return (total_evicted); 3925} 3926 3927/* 3928 * Flush all "evictable" data of the given type from the arc state 3929 * specified. This will not evict any "active" buffers (i.e. referenced). 3930 * 3931 * When 'retry' is set to B_FALSE, the function will make a single pass 3932 * over the state and evict any buffers that it can. Since it doesn't 3933 * continually retry the eviction, it might end up leaving some buffers 3934 * in the ARC due to lock misses. 3935 * 3936 * When 'retry' is set to B_TRUE, the function will continually retry the 3937 * eviction until *all* evictable buffers have been removed from the 3938 * state. As a result, if concurrent insertions into the state are 3939 * allowed (e.g. if the ARC isn't shutting down), this function might 3940 * wind up in an infinite loop, continually trying to evict buffers. 3941 */ 3942static uint64_t 3943arc_flush_state(arc_state_t *state, uint64_t spa, arc_buf_contents_t type, 3944 boolean_t retry) 3945{ 3946 uint64_t evicted = 0; 3947 3948 while (refcount_count(&state->arcs_esize[type]) != 0) { 3949 evicted += arc_evict_state(state, spa, ARC_EVICT_ALL, type); 3950 3951 if (!retry) 3952 break; 3953 } 3954 3955 return (evicted); 3956} 3957 3958/* 3959 * Evict the specified number of bytes from the state specified, 3960 * restricting eviction to the spa and type given. This function 3961 * prevents us from trying to evict more from a state's list than 3962 * is "evictable", and to skip evicting altogether when passed a 3963 * negative value for "bytes". In contrast, arc_evict_state() will 3964 * evict everything it can, when passed a negative value for "bytes". 3965 */ 3966static uint64_t 3967arc_adjust_impl(arc_state_t *state, uint64_t spa, int64_t bytes, 3968 arc_buf_contents_t type) 3969{ 3970 int64_t delta; 3971 3972 if (bytes > 0 && refcount_count(&state->arcs_esize[type]) > 0) { 3973 delta = MIN(refcount_count(&state->arcs_esize[type]), bytes); 3974 return (arc_evict_state(state, spa, delta, type)); 3975 } 3976 3977 return (0); 3978} 3979 3980/* 3981 * Evict metadata buffers from the cache, such that arc_meta_used is 3982 * capped by the arc_meta_limit tunable. 3983 */ 3984static uint64_t 3985arc_adjust_meta(uint64_t meta_used) 3986{ 3987 uint64_t total_evicted = 0; 3988 int64_t target; 3989 3990 /* 3991 * If we're over the meta limit, we want to evict enough 3992 * metadata to get back under the meta limit. We don't want to 3993 * evict so much that we drop the MRU below arc_p, though. If 3994 * we're over the meta limit more than we're over arc_p, we 3995 * evict some from the MRU here, and some from the MFU below. 3996 */ 3997 target = MIN((int64_t)(meta_used - arc_meta_limit), 3998 (int64_t)(refcount_count(&arc_anon->arcs_size) + 3999 refcount_count(&arc_mru->arcs_size) - arc_p)); 4000 4001 total_evicted += arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA); 4002 4003 /* 4004 * Similar to the above, we want to evict enough bytes to get us 4005 * below the meta limit, but not so much as to drop us below the 4006 * space allotted to the MFU (which is defined as arc_c - arc_p). 4007 */ 4008 target = MIN((int64_t)(meta_used - arc_meta_limit), 4009 (int64_t)(refcount_count(&arc_mfu->arcs_size) - 4010 (arc_c - arc_p))); 4011 4012 total_evicted += arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA); 4013 4014 return (total_evicted); 4015} 4016 4017/* 4018 * Return the type of the oldest buffer in the given arc state 4019 * 4020 * This function will select a random sublist of type ARC_BUFC_DATA and 4021 * a random sublist of type ARC_BUFC_METADATA. The tail of each sublist 4022 * is compared, and the type which contains the "older" buffer will be 4023 * returned. 4024 */ 4025static arc_buf_contents_t 4026arc_adjust_type(arc_state_t *state) 4027{ 4028 multilist_t *data_ml = state->arcs_list[ARC_BUFC_DATA]; 4029 multilist_t *meta_ml = state->arcs_list[ARC_BUFC_METADATA]; 4030 int data_idx = multilist_get_random_index(data_ml); 4031 int meta_idx = multilist_get_random_index(meta_ml); 4032 multilist_sublist_t *data_mls; 4033 multilist_sublist_t *meta_mls; 4034 arc_buf_contents_t type; 4035 arc_buf_hdr_t *data_hdr; 4036 arc_buf_hdr_t *meta_hdr; 4037 4038 /* 4039 * We keep the sublist lock until we're finished, to prevent 4040 * the headers from being destroyed via arc_evict_state(). 4041 */ 4042 data_mls = multilist_sublist_lock(data_ml, data_idx); 4043 meta_mls = multilist_sublist_lock(meta_ml, meta_idx); 4044 4045 /* 4046 * These two loops are to ensure we skip any markers that 4047 * might be at the tail of the lists due to arc_evict_state(). 4048 */ 4049 4050 for (data_hdr = multilist_sublist_tail(data_mls); data_hdr != NULL; 4051 data_hdr = multilist_sublist_prev(data_mls, data_hdr)) { 4052 if (data_hdr->b_spa != 0) 4053 break; 4054 } 4055 4056 for (meta_hdr = multilist_sublist_tail(meta_mls); meta_hdr != NULL; 4057 meta_hdr = multilist_sublist_prev(meta_mls, meta_hdr)) { 4058 if (meta_hdr->b_spa != 0) 4059 break; 4060 } 4061 4062 if (data_hdr == NULL && meta_hdr == NULL) { 4063 type = ARC_BUFC_DATA; 4064 } else if (data_hdr == NULL) { 4065 ASSERT3P(meta_hdr, !=, NULL); 4066 type = ARC_BUFC_METADATA; 4067 } else if (meta_hdr == NULL) { 4068 ASSERT3P(data_hdr, !=, NULL); 4069 type = ARC_BUFC_DATA; 4070 } else { 4071 ASSERT3P(data_hdr, !=, NULL); 4072 ASSERT3P(meta_hdr, !=, NULL); 4073 4074 /* The headers can't be on the sublist without an L1 header */ 4075 ASSERT(HDR_HAS_L1HDR(data_hdr)); 4076 ASSERT(HDR_HAS_L1HDR(meta_hdr)); 4077 4078 if (data_hdr->b_l1hdr.b_arc_access < 4079 meta_hdr->b_l1hdr.b_arc_access) { 4080 type = ARC_BUFC_DATA; 4081 } else { 4082 type = ARC_BUFC_METADATA; 4083 } 4084 } 4085 4086 multilist_sublist_unlock(meta_mls); 4087 multilist_sublist_unlock(data_mls); 4088 4089 return (type); 4090} 4091 4092/* 4093 * Evict buffers from the cache, such that arc_size is capped by arc_c. 4094 */ 4095static uint64_t 4096arc_adjust(void) 4097{ 4098 uint64_t total_evicted = 0; 4099 uint64_t bytes; 4100 int64_t target; 4101 uint64_t asize = aggsum_value(&arc_size); 4102 uint64_t ameta = aggsum_value(&arc_meta_used); 4103 4104 /* 4105 * If we're over arc_meta_limit, we want to correct that before 4106 * potentially evicting data buffers below. 4107 */ 4108 total_evicted += arc_adjust_meta(ameta); 4109 4110 /* 4111 * Adjust MRU size 4112 * 4113 * If we're over the target cache size, we want to evict enough 4114 * from the list to get back to our target size. We don't want 4115 * to evict too much from the MRU, such that it drops below 4116 * arc_p. So, if we're over our target cache size more than 4117 * the MRU is over arc_p, we'll evict enough to get back to 4118 * arc_p here, and then evict more from the MFU below. 4119 */ 4120 target = MIN((int64_t)(asize - arc_c), 4121 (int64_t)(refcount_count(&arc_anon->arcs_size) + 4122 refcount_count(&arc_mru->arcs_size) + ameta - arc_p)); 4123 4124 /* 4125 * If we're below arc_meta_min, always prefer to evict data. 4126 * Otherwise, try to satisfy the requested number of bytes to 4127 * evict from the type which contains older buffers; in an 4128 * effort to keep newer buffers in the cache regardless of their 4129 * type. If we cannot satisfy the number of bytes from this 4130 * type, spill over into the next type. 4131 */ 4132 if (arc_adjust_type(arc_mru) == ARC_BUFC_METADATA && 4133 ameta > arc_meta_min) { 4134 bytes = arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA); 4135 total_evicted += bytes; 4136 4137 /* 4138 * If we couldn't evict our target number of bytes from 4139 * metadata, we try to get the rest from data. 4140 */ 4141 target -= bytes; 4142 4143 total_evicted += 4144 arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_DATA); 4145 } else { 4146 bytes = arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_DATA); 4147 total_evicted += bytes; 4148 4149 /* 4150 * If we couldn't evict our target number of bytes from 4151 * data, we try to get the rest from metadata. 4152 */ 4153 target -= bytes; 4154 4155 total_evicted += 4156 arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA); 4157 } 4158 4159 /* 4160 * Re-sum ARC stats after the first round of evictions. 4161 */ 4162 asize = aggsum_value(&arc_size); 4163 ameta = aggsum_value(&arc_meta_used); 4164 4165 /* 4166 * Adjust MFU size 4167 * 4168 * Now that we've tried to evict enough from the MRU to get its 4169 * size back to arc_p, if we're still above the target cache 4170 * size, we evict the rest from the MFU. 4171 */ 4172 target = asize - arc_c; 4173 4174 if (arc_adjust_type(arc_mfu) == ARC_BUFC_METADATA && 4175 ameta > arc_meta_min) { 4176 bytes = arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA); 4177 total_evicted += bytes; 4178 4179 /* 4180 * If we couldn't evict our target number of bytes from 4181 * metadata, we try to get the rest from data. 4182 */ 4183 target -= bytes; 4184 4185 total_evicted += 4186 arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_DATA); 4187 } else { 4188 bytes = arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_DATA); 4189 total_evicted += bytes; 4190 4191 /* 4192 * If we couldn't evict our target number of bytes from 4193 * data, we try to get the rest from data. 4194 */ 4195 target -= bytes; 4196 4197 total_evicted += 4198 arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA); 4199 } 4200 4201 /* 4202 * Adjust ghost lists 4203 * 4204 * In addition to the above, the ARC also defines target values 4205 * for the ghost lists. The sum of the mru list and mru ghost 4206 * list should never exceed the target size of the cache, and 4207 * the sum of the mru list, mfu list, mru ghost list, and mfu 4208 * ghost list should never exceed twice the target size of the 4209 * cache. The following logic enforces these limits on the ghost 4210 * caches, and evicts from them as needed. 4211 */ 4212 target = refcount_count(&arc_mru->arcs_size) + 4213 refcount_count(&arc_mru_ghost->arcs_size) - arc_c; 4214 4215 bytes = arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_DATA); 4216 total_evicted += bytes; 4217 4218 target -= bytes; 4219 4220 total_evicted += 4221 arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_METADATA); 4222 4223 /* 4224 * We assume the sum of the mru list and mfu list is less than 4225 * or equal to arc_c (we enforced this above), which means we 4226 * can use the simpler of the two equations below: 4227 * 4228 * mru + mfu + mru ghost + mfu ghost <= 2 * arc_c 4229 * mru ghost + mfu ghost <= arc_c 4230 */ 4231 target = refcount_count(&arc_mru_ghost->arcs_size) + 4232 refcount_count(&arc_mfu_ghost->arcs_size) - arc_c; 4233 4234 bytes = arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_DATA); 4235 total_evicted += bytes; 4236 4237 target -= bytes; 4238 4239 total_evicted += 4240 arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_METADATA); 4241 4242 return (total_evicted); 4243} 4244 4245void 4246arc_flush(spa_t *spa, boolean_t retry) 4247{ 4248 uint64_t guid = 0; 4249 4250 /* 4251 * If retry is B_TRUE, a spa must not be specified since we have 4252 * no good way to determine if all of a spa's buffers have been 4253 * evicted from an arc state. 4254 */ 4255 ASSERT(!retry || spa == 0); 4256 4257 if (spa != NULL) 4258 guid = spa_load_guid(spa); 4259 4260 (void) arc_flush_state(arc_mru, guid, ARC_BUFC_DATA, retry); 4261 (void) arc_flush_state(arc_mru, guid, ARC_BUFC_METADATA, retry); 4262 4263 (void) arc_flush_state(arc_mfu, guid, ARC_BUFC_DATA, retry); 4264 (void) arc_flush_state(arc_mfu, guid, ARC_BUFC_METADATA, retry); 4265 4266 (void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_DATA, retry); 4267 (void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_METADATA, retry); 4268 4269 (void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_DATA, retry); 4270 (void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_METADATA, retry); 4271} 4272 4273static void 4274arc_reduce_target_size(int64_t to_free) 4275{ 4276 uint64_t asize = aggsum_value(&arc_size); 4277 if (arc_c > arc_c_min) { 4278 DTRACE_PROBE4(arc__shrink, uint64_t, arc_c, uint64_t, 4279 arc_c_min, uint64_t, arc_p, uint64_t, to_free); 4280 if (arc_c > arc_c_min + to_free) 4281 atomic_add_64(&arc_c, -to_free); 4282 else 4283 arc_c = arc_c_min; 4284 4285 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift)); 4286 if (asize < arc_c) 4287 arc_c = MAX(asize, arc_c_min); 4288 if (arc_p > arc_c) 4289 arc_p = (arc_c >> 1); 4290 4291 DTRACE_PROBE2(arc__shrunk, uint64_t, arc_c, uint64_t, 4292 arc_p); 4293 4294 ASSERT(arc_c >= arc_c_min); 4295 ASSERT((int64_t)arc_p >= 0); 4296 } 4297 4298 if (asize > arc_c) { 4299 DTRACE_PROBE2(arc__shrink_adjust, uint64_t, asize, 4300 uint64_t, arc_c); 4301 /* See comment in arc_adjust_cb_check() on why lock+flag */ 4302 mutex_enter(&arc_adjust_lock); 4303 arc_adjust_needed = B_TRUE; 4304 mutex_exit(&arc_adjust_lock); 4305 zthr_wakeup(arc_adjust_zthr); 4306 } 4307} 4308 4309typedef enum free_memory_reason_t { 4310 FMR_UNKNOWN, 4311 FMR_NEEDFREE, 4312 FMR_LOTSFREE, 4313 FMR_SWAPFS_MINFREE, 4314 FMR_PAGES_PP_MAXIMUM, 4315 FMR_HEAP_ARENA, 4316 FMR_ZIO_ARENA, 4317 FMR_ZIO_FRAG, 4318} free_memory_reason_t; 4319 4320int64_t last_free_memory; 4321free_memory_reason_t last_free_reason; 4322 4323/* 4324 * Additional reserve of pages for pp_reserve. 4325 */ 4326int64_t arc_pages_pp_reserve = 64; 4327 4328/* 4329 * Additional reserve of pages for swapfs. 4330 */ 4331int64_t arc_swapfs_reserve = 64; 4332 4333/* 4334 * Return the amount of memory that can be consumed before reclaim will be 4335 * needed. Positive if there is sufficient free memory, negative indicates 4336 * the amount of memory that needs to be freed up. 4337 */ 4338static int64_t 4339arc_available_memory(void) 4340{ 4341 int64_t lowest = INT64_MAX; 4342 int64_t n; 4343 free_memory_reason_t r = FMR_UNKNOWN; 4344 4345#ifdef _KERNEL 4346#ifdef __FreeBSD__ 4347 /* 4348 * Cooperate with pagedaemon when it's time for it to scan 4349 * and reclaim some pages. 4350 */ 4351 n = PAGESIZE * ((int64_t)freemem - zfs_arc_free_target); 4352 if (n < lowest) { 4353 lowest = n; 4354 r = FMR_LOTSFREE; 4355 } 4356 4357#else 4358 if (needfree > 0) { 4359 n = PAGESIZE * (-needfree); 4360 if (n < lowest) { 4361 lowest = n; 4362 r = FMR_NEEDFREE; 4363 } 4364 } 4365 4366 /* 4367 * check that we're out of range of the pageout scanner. It starts to 4368 * schedule paging if freemem is less than lotsfree and needfree. 4369 * lotsfree is the high-water mark for pageout, and needfree is the 4370 * number of needed free pages. We add extra pages here to make sure 4371 * the scanner doesn't start up while we're freeing memory. 4372 */ 4373 n = PAGESIZE * (freemem - lotsfree - needfree - desfree); 4374 if (n < lowest) { 4375 lowest = n; 4376 r = FMR_LOTSFREE; 4377 } 4378 4379 /* 4380 * check to make sure that swapfs has enough space so that anon 4381 * reservations can still succeed. anon_resvmem() checks that the 4382 * availrmem is greater than swapfs_minfree, and the number of reserved 4383 * swap pages. We also add a bit of extra here just to prevent 4384 * circumstances from getting really dire. 4385 */ 4386 n = PAGESIZE * (availrmem - swapfs_minfree - swapfs_reserve - 4387 desfree - arc_swapfs_reserve); 4388 if (n < lowest) { 4389 lowest = n; 4390 r = FMR_SWAPFS_MINFREE; 4391 } 4392 4393 4394 /* 4395 * Check that we have enough availrmem that memory locking (e.g., via 4396 * mlock(3C) or memcntl(2)) can still succeed. (pages_pp_maximum 4397 * stores the number of pages that cannot be locked; when availrmem 4398 * drops below pages_pp_maximum, page locking mechanisms such as 4399 * page_pp_lock() will fail.) 4400 */ 4401 n = PAGESIZE * (availrmem - pages_pp_maximum - 4402 arc_pages_pp_reserve); 4403 if (n < lowest) { 4404 lowest = n; 4405 r = FMR_PAGES_PP_MAXIMUM; 4406 } 4407 4408#endif /* __FreeBSD__ */ 4409#if defined(__i386) || !defined(UMA_MD_SMALL_ALLOC) 4410 /* 4411 * If we're on an i386 platform, it's possible that we'll exhaust the 4412 * kernel heap space before we ever run out of available physical 4413 * memory. Most checks of the size of the heap_area compare against 4414 * tune.t_minarmem, which is the minimum available real memory that we 4415 * can have in the system. However, this is generally fixed at 25 pages 4416 * which is so low that it's useless. In this comparison, we seek to 4417 * calculate the total heap-size, and reclaim if more than 3/4ths of the 4418 * heap is allocated. (Or, in the calculation, if less than 1/4th is 4419 * free) 4420 */ 4421 n = (int64_t)vmem_size(heap_arena, VMEM_FREE) - 4422 (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2); 4423 if (n < lowest) { 4424 lowest = n; 4425 r = FMR_HEAP_ARENA; 4426 } 4427#define zio_arena NULL 4428#else 4429#define zio_arena heap_arena 4430#endif 4431 4432 /* 4433 * If zio data pages are being allocated out of a separate heap segment, 4434 * then enforce that the size of available vmem for this arena remains 4435 * above about 1/4th (1/(2^arc_zio_arena_free_shift)) free. 4436 * 4437 * Note that reducing the arc_zio_arena_free_shift keeps more virtual 4438 * memory (in the zio_arena) free, which can avoid memory 4439 * fragmentation issues. 4440 */ 4441 if (zio_arena != NULL) { 4442 n = (int64_t)vmem_size(zio_arena, VMEM_FREE) - 4443 (vmem_size(zio_arena, VMEM_ALLOC) >> 4444 arc_zio_arena_free_shift); 4445 if (n < lowest) { 4446 lowest = n; 4447 r = FMR_ZIO_ARENA; 4448 } 4449 } 4450 4451 /* 4452 * Above limits know nothing about real level of KVA fragmentation. 4453 * Start aggressive reclamation if too little sequential KVA left. 4454 */ 4455 if (lowest > 0) { 4456 n = (vmem_size(heap_arena, VMEM_MAXFREE) < SPA_MAXBLOCKSIZE) ? 4457 -((int64_t)vmem_size(heap_arena, VMEM_ALLOC) >> 4) : 4458 INT64_MAX; 4459 if (n < lowest) { 4460 lowest = n; 4461 r = FMR_ZIO_FRAG; 4462 } 4463 } 4464 4465#else /* _KERNEL */ 4466 /* Every 100 calls, free a small amount */ 4467 if (spa_get_random(100) == 0) 4468 lowest = -1024; 4469#endif /* _KERNEL */ 4470 4471 last_free_memory = lowest; 4472 last_free_reason = r; 4473 DTRACE_PROBE2(arc__available_memory, int64_t, lowest, int, r); 4474 return (lowest); 4475} 4476 4477 4478/* 4479 * Determine if the system is under memory pressure and is asking 4480 * to reclaim memory. A return value of B_TRUE indicates that the system 4481 * is under memory pressure and that the arc should adjust accordingly. 4482 */ 4483static boolean_t 4484arc_reclaim_needed(void) 4485{ 4486 return (arc_available_memory() < 0); 4487} 4488 4489extern kmem_cache_t *zio_buf_cache[]; 4490extern kmem_cache_t *zio_data_buf_cache[]; 4491extern kmem_cache_t *range_seg_cache; 4492extern kmem_cache_t *abd_chunk_cache; 4493 4494static __noinline void 4495arc_kmem_reap_soon(void) 4496{ 4497 size_t i; 4498 kmem_cache_t *prev_cache = NULL; 4499 kmem_cache_t *prev_data_cache = NULL; 4500 4501 DTRACE_PROBE(arc__kmem_reap_start); 4502#ifdef _KERNEL 4503 if (aggsum_compare(&arc_meta_used, arc_meta_limit) >= 0) { 4504 /* 4505 * We are exceeding our meta-data cache limit. 4506 * Purge some DNLC entries to release holds on meta-data. 4507 */ 4508 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent); 4509 } 4510#if defined(__i386) 4511 /* 4512 * Reclaim unused memory from all kmem caches. 4513 */ 4514 kmem_reap(); 4515#endif 4516#endif 4517 4518 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) { 4519 if (zio_buf_cache[i] != prev_cache) { 4520 prev_cache = zio_buf_cache[i]; 4521 kmem_cache_reap_soon(zio_buf_cache[i]); 4522 } 4523 if (zio_data_buf_cache[i] != prev_data_cache) { 4524 prev_data_cache = zio_data_buf_cache[i]; 4525 kmem_cache_reap_soon(zio_data_buf_cache[i]); 4526 } 4527 } 4528 kmem_cache_reap_soon(abd_chunk_cache); 4529 kmem_cache_reap_soon(buf_cache); 4530 kmem_cache_reap_soon(hdr_full_cache); 4531 kmem_cache_reap_soon(hdr_l2only_cache); 4532 kmem_cache_reap_soon(range_seg_cache); 4533 4534#ifdef illumos 4535 if (zio_arena != NULL) { 4536 /* 4537 * Ask the vmem arena to reclaim unused memory from its 4538 * quantum caches. 4539 */ 4540 vmem_qcache_reap(zio_arena); 4541 } 4542#endif 4543 DTRACE_PROBE(arc__kmem_reap_end); 4544} 4545 4546/* ARGSUSED */ 4547static boolean_t 4548arc_adjust_cb_check(void *arg, zthr_t *zthr) 4549{ 4550 /* 4551 * This is necessary in order for the mdb ::arc dcmd to 4552 * show up to date information. Since the ::arc command 4553 * does not call the kstat's update function, without 4554 * this call, the command may show stale stats for the 4555 * anon, mru, mru_ghost, mfu, and mfu_ghost lists. Even 4556 * with this change, the data might be up to 1 second 4557 * out of date(the arc_adjust_zthr has a maximum sleep 4558 * time of 1 second); but that should suffice. The 4559 * arc_state_t structures can be queried directly if more 4560 * accurate information is needed. 4561 */ 4562 if (arc_ksp != NULL) 4563 arc_ksp->ks_update(arc_ksp, KSTAT_READ); 4564 4565 /* 4566 * We have to rely on arc_get_data_impl() to tell us when to adjust, 4567 * rather than checking if we are overflowing here, so that we are 4568 * sure to not leave arc_get_data_impl() waiting on 4569 * arc_adjust_waiters_cv. If we have become "not overflowing" since 4570 * arc_get_data_impl() checked, we need to wake it up. We could 4571 * broadcast the CV here, but arc_get_data_impl() may have not yet 4572 * gone to sleep. We would need to use a mutex to ensure that this 4573 * function doesn't broadcast until arc_get_data_impl() has gone to 4574 * sleep (e.g. the arc_adjust_lock). However, the lock ordering of 4575 * such a lock would necessarily be incorrect with respect to the 4576 * zthr_lock, which is held before this function is called, and is 4577 * held by arc_get_data_impl() when it calls zthr_wakeup(). 4578 */ 4579 return (arc_adjust_needed); 4580} 4581 4582/* 4583 * Keep arc_size under arc_c by running arc_adjust which evicts data 4584 * from the ARC. */ 4585/* ARGSUSED */ 4586static int 4587arc_adjust_cb(void *arg, zthr_t *zthr) 4588{ 4589 uint64_t evicted = 0; 4590 4591 /* Evict from cache */ 4592 evicted = arc_adjust(); 4593 4594 /* 4595 * If evicted is zero, we couldn't evict anything 4596 * via arc_adjust(). This could be due to hash lock 4597 * collisions, but more likely due to the majority of 4598 * arc buffers being unevictable. Therefore, even if 4599 * arc_size is above arc_c, another pass is unlikely to 4600 * be helpful and could potentially cause us to enter an 4601 * infinite loop. Additionally, zthr_iscancelled() is 4602 * checked here so that if the arc is shutting down, the 4603 * broadcast will wake any remaining arc adjust waiters. 4604 */ 4605 mutex_enter(&arc_adjust_lock); 4606 arc_adjust_needed = !zthr_iscancelled(arc_adjust_zthr) && 4607 evicted > 0 && aggsum_compare(&arc_size, arc_c) > 0; 4608 if (!arc_adjust_needed) { 4609 /* 4610 * We're either no longer overflowing, or we 4611 * can't evict anything more, so we should wake 4612 * up any waiters. 4613 */ 4614 cv_broadcast(&arc_adjust_waiters_cv); 4615 } 4616 mutex_exit(&arc_adjust_lock); 4617 4618 return (0); 4619} 4620 4621/* ARGSUSED */ 4622static boolean_t 4623arc_reap_cb_check(void *arg, zthr_t *zthr) 4624{ 4625 int64_t free_memory = arc_available_memory(); 4626 4627 /* 4628 * If a kmem reap is already active, don't schedule more. We must 4629 * check for this because kmem_cache_reap_soon() won't actually 4630 * block on the cache being reaped (this is to prevent callers from 4631 * becoming implicitly blocked by a system-wide kmem reap -- which, 4632 * on a system with many, many full magazines, can take minutes). 4633 */ 4634 if (!kmem_cache_reap_active() && 4635 free_memory < 0) { 4636 arc_no_grow = B_TRUE; 4637 arc_warm = B_TRUE; 4638 /* 4639 * Wait at least zfs_grow_retry (default 60) seconds 4640 * before considering growing. 4641 */ 4642 arc_growtime = gethrtime() + SEC2NSEC(arc_grow_retry); 4643 return (B_TRUE); 4644 } else if (free_memory < arc_c >> arc_no_grow_shift) { 4645 arc_no_grow = B_TRUE; 4646 } else if (gethrtime() >= arc_growtime) { 4647 arc_no_grow = B_FALSE; 4648 } 4649 4650 return (B_FALSE); 4651} 4652 4653/* 4654 * Keep enough free memory in the system by reaping the ARC's kmem 4655 * caches. To cause more slabs to be reapable, we may reduce the 4656 * target size of the cache (arc_c), causing the arc_adjust_cb() 4657 * to free more buffers. 4658 */ 4659/* ARGSUSED */ 4660static int 4661arc_reap_cb(void *arg, zthr_t *zthr) 4662{ 4663 int64_t free_memory; 4664 4665 /* 4666 * Kick off asynchronous kmem_reap()'s of all our caches. 4667 */ 4668 arc_kmem_reap_soon(); 4669 4670 /* 4671 * Wait at least arc_kmem_cache_reap_retry_ms between 4672 * arc_kmem_reap_soon() calls. Without this check it is possible to 4673 * end up in a situation where we spend lots of time reaping 4674 * caches, while we're near arc_c_min. Waiting here also gives the 4675 * subsequent free memory check a chance of finding that the 4676 * asynchronous reap has already freed enough memory, and we don't 4677 * need to call arc_reduce_target_size(). 4678 */ 4679 delay((hz * arc_kmem_cache_reap_retry_ms + 999) / 1000); 4680 4681 /* 4682 * Reduce the target size as needed to maintain the amount of free 4683 * memory in the system at a fraction of the arc_size (1/128th by 4684 * default). If oversubscribed (free_memory < 0) then reduce the 4685 * target arc_size by the deficit amount plus the fractional 4686 * amount. If free memory is positive but less then the fractional 4687 * amount, reduce by what is needed to hit the fractional amount. 4688 */ 4689 free_memory = arc_available_memory(); 4690 4691 int64_t to_free = 4692 (arc_c >> arc_shrink_shift) - free_memory; 4693 if (to_free > 0) { 4694#ifdef _KERNEL 4695#ifdef illumos 4696 to_free = MAX(to_free, ptob(needfree)); 4697#endif 4698#endif 4699 arc_reduce_target_size(to_free); 4700 } 4701 4702 return (0); 4703} 4704 4705static u_int arc_dnlc_evicts_arg; 4706extern struct vfsops zfs_vfsops; 4707 4708static void 4709arc_dnlc_evicts_thread(void *dummy __unused) 4710{ 4711 callb_cpr_t cpr; 4712 u_int percent; 4713 4714 CALLB_CPR_INIT(&cpr, &arc_dnlc_evicts_lock, callb_generic_cpr, FTAG); 4715 4716 mutex_enter(&arc_dnlc_evicts_lock); 4717 while (!arc_dnlc_evicts_thread_exit) { 4718 CALLB_CPR_SAFE_BEGIN(&cpr); 4719 (void) cv_wait(&arc_dnlc_evicts_cv, &arc_dnlc_evicts_lock); 4720 CALLB_CPR_SAFE_END(&cpr, &arc_dnlc_evicts_lock); 4721 if (arc_dnlc_evicts_arg != 0) { 4722 percent = arc_dnlc_evicts_arg; 4723 mutex_exit(&arc_dnlc_evicts_lock); 4724#ifdef _KERNEL 4725 vnlru_free(desiredvnodes * percent / 100, &zfs_vfsops); 4726#endif 4727 mutex_enter(&arc_dnlc_evicts_lock); 4728 /* 4729 * Clear our token only after vnlru_free() 4730 * pass is done, to avoid false queueing of 4731 * the requests. 4732 */ 4733 arc_dnlc_evicts_arg = 0; 4734 } 4735 } 4736 arc_dnlc_evicts_thread_exit = FALSE; 4737 cv_broadcast(&arc_dnlc_evicts_cv); 4738 CALLB_CPR_EXIT(&cpr); 4739 thread_exit(); 4740} 4741 4742void 4743dnlc_reduce_cache(void *arg) 4744{ 4745 u_int percent; 4746 4747 percent = (u_int)(uintptr_t)arg; 4748 mutex_enter(&arc_dnlc_evicts_lock); 4749 if (arc_dnlc_evicts_arg == 0) { 4750 arc_dnlc_evicts_arg = percent; 4751 cv_broadcast(&arc_dnlc_evicts_cv); 4752 } 4753 mutex_exit(&arc_dnlc_evicts_lock); 4754} 4755 4756/* 4757 * Adapt arc info given the number of bytes we are trying to add and 4758 * the state that we are comming from. This function is only called 4759 * when we are adding new content to the cache. 4760 */ 4761static void 4762arc_adapt(int bytes, arc_state_t *state) 4763{ 4764 int mult; 4765 uint64_t arc_p_min = (arc_c >> arc_p_min_shift); 4766 int64_t mrug_size = refcount_count(&arc_mru_ghost->arcs_size); 4767 int64_t mfug_size = refcount_count(&arc_mfu_ghost->arcs_size); 4768 4769 if (state == arc_l2c_only) 4770 return; 4771 4772 ASSERT(bytes > 0); 4773 /* 4774 * Adapt the target size of the MRU list: 4775 * - if we just hit in the MRU ghost list, then increase 4776 * the target size of the MRU list. 4777 * - if we just hit in the MFU ghost list, then increase 4778 * the target size of the MFU list by decreasing the 4779 * target size of the MRU list. 4780 */ 4781 if (state == arc_mru_ghost) { 4782 mult = (mrug_size >= mfug_size) ? 1 : (mfug_size / mrug_size); 4783 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */ 4784 4785 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult); 4786 } else if (state == arc_mfu_ghost) { 4787 uint64_t delta; 4788 4789 mult = (mfug_size >= mrug_size) ? 1 : (mrug_size / mfug_size); 4790 mult = MIN(mult, 10); 4791 4792 delta = MIN(bytes * mult, arc_p); 4793 arc_p = MAX(arc_p_min, arc_p - delta); 4794 } 4795 ASSERT((int64_t)arc_p >= 0); 4796 4797 /* 4798 * Wake reap thread if we do not have any available memory 4799 */ 4800 if (arc_reclaim_needed()) { 4801 zthr_wakeup(arc_reap_zthr); 4802 return; 4803 } 4804 4805 if (arc_no_grow) 4806 return; 4807 4808 if (arc_c >= arc_c_max) 4809 return; 4810 4811 /* 4812 * If we're within (2 * maxblocksize) bytes of the target 4813 * cache size, increment the target cache size 4814 */ 4815 if (aggsum_compare(&arc_size, arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) > 4816 0) { 4817 DTRACE_PROBE1(arc__inc_adapt, int, bytes); 4818 atomic_add_64(&arc_c, (int64_t)bytes); 4819 if (arc_c > arc_c_max) 4820 arc_c = arc_c_max; 4821 else if (state == arc_anon) 4822 atomic_add_64(&arc_p, (int64_t)bytes); 4823 if (arc_p > arc_c) 4824 arc_p = arc_c; 4825 } 4826 ASSERT((int64_t)arc_p >= 0); 4827} 4828 4829/* 4830 * Check if arc_size has grown past our upper threshold, determined by 4831 * zfs_arc_overflow_shift. 4832 */ 4833static boolean_t 4834arc_is_overflowing(void) 4835{ 4836 /* Always allow at least one block of overflow */ 4837 uint64_t overflow = MAX(SPA_MAXBLOCKSIZE, 4838 arc_c >> zfs_arc_overflow_shift); 4839 4840 /* 4841 * We just compare the lower bound here for performance reasons. Our 4842 * primary goals are to make sure that the arc never grows without 4843 * bound, and that it can reach its maximum size. This check 4844 * accomplishes both goals. The maximum amount we could run over by is 4845 * 2 * aggsum_borrow_multiplier * NUM_CPUS * the average size of a block 4846 * in the ARC. In practice, that's in the tens of MB, which is low 4847 * enough to be safe. 4848 */ 4849 return (aggsum_lower_bound(&arc_size) >= arc_c + overflow); 4850} 4851 4852static abd_t * 4853arc_get_data_abd(arc_buf_hdr_t *hdr, uint64_t size, void *tag, boolean_t do_adapt) 4854{ 4855 arc_buf_contents_t type = arc_buf_type(hdr); 4856 4857 arc_get_data_impl(hdr, size, tag, do_adapt); 4858 if (type == ARC_BUFC_METADATA) { 4859 return (abd_alloc(size, B_TRUE)); 4860 } else { 4861 ASSERT(type == ARC_BUFC_DATA); 4862 return (abd_alloc(size, B_FALSE)); 4863 } 4864} 4865 4866static void * 4867arc_get_data_buf(arc_buf_hdr_t *hdr, uint64_t size, void *tag) 4868{ 4869 arc_buf_contents_t type = arc_buf_type(hdr); 4870 4871 arc_get_data_impl(hdr, size, tag, B_TRUE); 4872 if (type == ARC_BUFC_METADATA) { 4873 return (zio_buf_alloc(size)); 4874 } else { 4875 ASSERT(type == ARC_BUFC_DATA); 4876 return (zio_data_buf_alloc(size)); 4877 } 4878} 4879 4880/* 4881 * Allocate a block and return it to the caller. If we are hitting the 4882 * hard limit for the cache size, we must sleep, waiting for the eviction 4883 * thread to catch up. If we're past the target size but below the hard 4884 * limit, we'll only signal the reclaim thread and continue on. 4885 */ 4886static void 4887arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag, boolean_t do_adapt) 4888{ 4889 arc_state_t *state = hdr->b_l1hdr.b_state; 4890 arc_buf_contents_t type = arc_buf_type(hdr); 4891 4892 if (do_adapt) 4893 arc_adapt(size, state); 4894 4895 /* 4896 * If arc_size is currently overflowing, and has grown past our 4897 * upper limit, we must be adding data faster than the evict 4898 * thread can evict. Thus, to ensure we don't compound the 4899 * problem by adding more data and forcing arc_size to grow even 4900 * further past it's target size, we halt and wait for the 4901 * eviction thread to catch up. 4902 * 4903 * It's also possible that the reclaim thread is unable to evict 4904 * enough buffers to get arc_size below the overflow limit (e.g. 4905 * due to buffers being un-evictable, or hash lock collisions). 4906 * In this case, we want to proceed regardless if we're 4907 * overflowing; thus we don't use a while loop here. 4908 */ 4909 if (arc_is_overflowing()) { 4910 mutex_enter(&arc_adjust_lock); 4911 4912 /* 4913 * Now that we've acquired the lock, we may no longer be 4914 * over the overflow limit, lets check. 4915 * 4916 * We're ignoring the case of spurious wake ups. If that 4917 * were to happen, it'd let this thread consume an ARC 4918 * buffer before it should have (i.e. before we're under 4919 * the overflow limit and were signalled by the reclaim 4920 * thread). As long as that is a rare occurrence, it 4921 * shouldn't cause any harm. 4922 */ 4923 if (arc_is_overflowing()) { 4924 arc_adjust_needed = B_TRUE; 4925 zthr_wakeup(arc_adjust_zthr); 4926 (void) cv_wait(&arc_adjust_waiters_cv, 4927 &arc_adjust_lock); 4928 } 4929 mutex_exit(&arc_adjust_lock); 4930 } 4931 4932 VERIFY3U(hdr->b_type, ==, type); 4933 if (type == ARC_BUFC_METADATA) { 4934 arc_space_consume(size, ARC_SPACE_META); 4935 } else { 4936 arc_space_consume(size, ARC_SPACE_DATA); 4937 } 4938 4939 /* 4940 * Update the state size. Note that ghost states have a 4941 * "ghost size" and so don't need to be updated. 4942 */ 4943 if (!GHOST_STATE(state)) { 4944 4945 (void) refcount_add_many(&state->arcs_size, size, tag); 4946 4947 /* 4948 * If this is reached via arc_read, the link is 4949 * protected by the hash lock. If reached via 4950 * arc_buf_alloc, the header should not be accessed by 4951 * any other thread. And, if reached via arc_read_done, 4952 * the hash lock will protect it if it's found in the 4953 * hash table; otherwise no other thread should be 4954 * trying to [add|remove]_reference it. 4955 */ 4956 if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) { 4957 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 4958 (void) refcount_add_many(&state->arcs_esize[type], 4959 size, tag); 4960 } 4961 4962 /* 4963 * If we are growing the cache, and we are adding anonymous 4964 * data, and we have outgrown arc_p, update arc_p 4965 */ 4966 if (aggsum_compare(&arc_size, arc_c) < 0 && 4967 hdr->b_l1hdr.b_state == arc_anon && 4968 (refcount_count(&arc_anon->arcs_size) + 4969 refcount_count(&arc_mru->arcs_size) > arc_p)) 4970 arc_p = MIN(arc_c, arc_p + size); 4971 } 4972 ARCSTAT_BUMP(arcstat_allocated); 4973} 4974 4975static void 4976arc_free_data_abd(arc_buf_hdr_t *hdr, abd_t *abd, uint64_t size, void *tag) 4977{ 4978 arc_free_data_impl(hdr, size, tag); 4979 abd_free(abd); 4980} 4981 4982static void 4983arc_free_data_buf(arc_buf_hdr_t *hdr, void *buf, uint64_t size, void *tag) 4984{ 4985 arc_buf_contents_t type = arc_buf_type(hdr); 4986 4987 arc_free_data_impl(hdr, size, tag); 4988 if (type == ARC_BUFC_METADATA) { 4989 zio_buf_free(buf, size); 4990 } else { 4991 ASSERT(type == ARC_BUFC_DATA); 4992 zio_data_buf_free(buf, size); 4993 } 4994} 4995 4996/* 4997 * Free the arc data buffer. 4998 */ 4999static void 5000arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag) 5001{ 5002 arc_state_t *state = hdr->b_l1hdr.b_state; 5003 arc_buf_contents_t type = arc_buf_type(hdr); 5004 5005 /* protected by hash lock, if in the hash table */ 5006 if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) { 5007 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 5008 ASSERT(state != arc_anon && state != arc_l2c_only); 5009 5010 (void) refcount_remove_many(&state->arcs_esize[type], 5011 size, tag); 5012 } 5013 (void) refcount_remove_many(&state->arcs_size, size, tag); 5014 5015 VERIFY3U(hdr->b_type, ==, type); 5016 if (type == ARC_BUFC_METADATA) { 5017 arc_space_return(size, ARC_SPACE_META); 5018 } else { 5019 ASSERT(type == ARC_BUFC_DATA); 5020 arc_space_return(size, ARC_SPACE_DATA); 5021 } 5022} 5023 5024/* 5025 * This routine is called whenever a buffer is accessed. 5026 * NOTE: the hash lock is dropped in this function. 5027 */ 5028static void 5029arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock) 5030{ 5031 clock_t now; 5032 5033 ASSERT(MUTEX_HELD(hash_lock)); 5034 ASSERT(HDR_HAS_L1HDR(hdr)); 5035 5036 if (hdr->b_l1hdr.b_state == arc_anon) { 5037 /* 5038 * This buffer is not in the cache, and does not 5039 * appear in our "ghost" list. Add the new buffer 5040 * to the MRU state. 5041 */ 5042 5043 ASSERT0(hdr->b_l1hdr.b_arc_access); 5044 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); 5045 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr); 5046 arc_change_state(arc_mru, hdr, hash_lock); 5047 5048 } else if (hdr->b_l1hdr.b_state == arc_mru) { 5049 now = ddi_get_lbolt(); 5050 5051 /* 5052 * If this buffer is here because of a prefetch, then either: 5053 * - clear the flag if this is a "referencing" read 5054 * (any subsequent access will bump this into the MFU state). 5055 * or 5056 * - move the buffer to the head of the list if this is 5057 * another prefetch (to make it less likely to be evicted). 5058 */ 5059 if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) { 5060 if (refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) { 5061 /* link protected by hash lock */ 5062 ASSERT(multilist_link_active( 5063 &hdr->b_l1hdr.b_arc_node)); 5064 } else { 5065 arc_hdr_clear_flags(hdr, 5066 ARC_FLAG_PREFETCH | 5067 ARC_FLAG_PRESCIENT_PREFETCH); 5068 ARCSTAT_BUMP(arcstat_mru_hits); 5069 } 5070 hdr->b_l1hdr.b_arc_access = now; 5071 return; 5072 } 5073 5074 /* 5075 * This buffer has been "accessed" only once so far, 5076 * but it is still in the cache. Move it to the MFU 5077 * state. 5078 */ 5079 if (now > hdr->b_l1hdr.b_arc_access + ARC_MINTIME) { 5080 /* 5081 * More than 125ms have passed since we 5082 * instantiated this buffer. Move it to the 5083 * most frequently used state. 5084 */ 5085 hdr->b_l1hdr.b_arc_access = now; 5086 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); 5087 arc_change_state(arc_mfu, hdr, hash_lock); 5088 } 5089 ARCSTAT_BUMP(arcstat_mru_hits); 5090 } else if (hdr->b_l1hdr.b_state == arc_mru_ghost) { 5091 arc_state_t *new_state; 5092 /* 5093 * This buffer has been "accessed" recently, but 5094 * was evicted from the cache. Move it to the 5095 * MFU state. 5096 */ 5097 5098 if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) { 5099 new_state = arc_mru; 5100 if (refcount_count(&hdr->b_l1hdr.b_refcnt) > 0) { 5101 arc_hdr_clear_flags(hdr, 5102 ARC_FLAG_PREFETCH | 5103 ARC_FLAG_PRESCIENT_PREFETCH); 5104 } 5105 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr); 5106 } else { 5107 new_state = arc_mfu; 5108 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); 5109 } 5110 5111 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); 5112 arc_change_state(new_state, hdr, hash_lock); 5113 5114 ARCSTAT_BUMP(arcstat_mru_ghost_hits); 5115 } else if (hdr->b_l1hdr.b_state == arc_mfu) { 5116 /* 5117 * This buffer has been accessed more than once and is 5118 * still in the cache. Keep it in the MFU state. 5119 * 5120 * NOTE: an add_reference() that occurred when we did 5121 * the arc_read() will have kicked this off the list. 5122 * If it was a prefetch, we will explicitly move it to 5123 * the head of the list now. 5124 */ 5125 5126 ARCSTAT_BUMP(arcstat_mfu_hits); 5127 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); 5128 } else if (hdr->b_l1hdr.b_state == arc_mfu_ghost) { 5129 arc_state_t *new_state = arc_mfu; 5130 /* 5131 * This buffer has been accessed more than once but has 5132 * been evicted from the cache. Move it back to the 5133 * MFU state. 5134 */ 5135 5136 if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) { 5137 /* 5138 * This is a prefetch access... 5139 * move this block back to the MRU state. 5140 */ 5141 new_state = arc_mru; 5142 } 5143 5144 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); 5145 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); 5146 arc_change_state(new_state, hdr, hash_lock); 5147 5148 ARCSTAT_BUMP(arcstat_mfu_ghost_hits); 5149 } else if (hdr->b_l1hdr.b_state == arc_l2c_only) { 5150 /* 5151 * This buffer is on the 2nd Level ARC. 5152 */ 5153 5154 hdr->b_l1hdr.b_arc_access = ddi_get_lbolt(); 5155 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr); 5156 arc_change_state(arc_mfu, hdr, hash_lock); 5157 } else { 5158 ASSERT(!"invalid arc state"); 5159 } 5160} 5161 5162/* 5163 * This routine is called by dbuf_hold() to update the arc_access() state 5164 * which otherwise would be skipped for entries in the dbuf cache. 5165 */ 5166void 5167arc_buf_access(arc_buf_t *buf) 5168{ 5169 mutex_enter(&buf->b_evict_lock); 5170 arc_buf_hdr_t *hdr = buf->b_hdr; 5171 5172 /* 5173 * Avoid taking the hash_lock when possible as an optimization. 5174 * The header must be checked again under the hash_lock in order 5175 * to handle the case where it is concurrently being released. 5176 */ 5177 if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr)) { 5178 mutex_exit(&buf->b_evict_lock); 5179 ARCSTAT_BUMP(arcstat_access_skip); 5180 return; 5181 } 5182 5183 kmutex_t *hash_lock = HDR_LOCK(hdr); 5184 mutex_enter(hash_lock); 5185 5186 if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr)) { 5187 mutex_exit(hash_lock); 5188 mutex_exit(&buf->b_evict_lock); 5189 ARCSTAT_BUMP(arcstat_access_skip); 5190 return; 5191 } 5192 5193 mutex_exit(&buf->b_evict_lock); 5194 5195 ASSERT(hdr->b_l1hdr.b_state == arc_mru || 5196 hdr->b_l1hdr.b_state == arc_mfu); 5197 5198 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 5199 arc_access(hdr, hash_lock); 5200 mutex_exit(hash_lock); 5201 5202 ARCSTAT_BUMP(arcstat_hits); 5203 ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr), 5204 demand, prefetch, !HDR_ISTYPE_METADATA(hdr), data, metadata, hits); 5205} 5206 5207/* a generic arc_read_done_func_t which you can use */ 5208/* ARGSUSED */ 5209void 5210arc_bcopy_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, 5211 arc_buf_t *buf, void *arg) 5212{ 5213 if (buf == NULL) 5214 return; 5215 5216 bcopy(buf->b_data, arg, arc_buf_size(buf)); 5217 arc_buf_destroy(buf, arg); 5218} 5219 5220/* a generic arc_read_done_func_t */ 5221/* ARGSUSED */ 5222void 5223arc_getbuf_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp, 5224 arc_buf_t *buf, void *arg) 5225{ 5226 arc_buf_t **bufp = arg; 5227 if (buf == NULL) { 5228 ASSERT(zio == NULL || zio->io_error != 0); 5229 *bufp = NULL; 5230 } else { 5231 ASSERT(zio == NULL || zio->io_error == 0); 5232 *bufp = buf; 5233 ASSERT(buf->b_data != NULL); 5234 } 5235} 5236 5237static void 5238arc_hdr_verify(arc_buf_hdr_t *hdr, blkptr_t *bp) 5239{ 5240 if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) { 5241 ASSERT3U(HDR_GET_PSIZE(hdr), ==, 0); 5242 ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF); 5243 } else { 5244 if (HDR_COMPRESSION_ENABLED(hdr)) { 5245 ASSERT3U(HDR_GET_COMPRESS(hdr), ==, 5246 BP_GET_COMPRESS(bp)); 5247 } 5248 ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp)); 5249 ASSERT3U(HDR_GET_PSIZE(hdr), ==, BP_GET_PSIZE(bp)); 5250 } 5251} 5252 5253static void 5254arc_read_done(zio_t *zio) 5255{ 5256 arc_buf_hdr_t *hdr = zio->io_private; 5257 kmutex_t *hash_lock = NULL; 5258 arc_callback_t *callback_list; 5259 arc_callback_t *acb; 5260 boolean_t freeable = B_FALSE; 5261 boolean_t no_zio_error = (zio->io_error == 0); 5262 5263 /* 5264 * The hdr was inserted into hash-table and removed from lists 5265 * prior to starting I/O. We should find this header, since 5266 * it's in the hash table, and it should be legit since it's 5267 * not possible to evict it during the I/O. The only possible 5268 * reason for it not to be found is if we were freed during the 5269 * read. 5270 */ 5271 if (HDR_IN_HASH_TABLE(hdr)) { 5272 ASSERT3U(hdr->b_birth, ==, BP_PHYSICAL_BIRTH(zio->io_bp)); 5273 ASSERT3U(hdr->b_dva.dva_word[0], ==, 5274 BP_IDENTITY(zio->io_bp)->dva_word[0]); 5275 ASSERT3U(hdr->b_dva.dva_word[1], ==, 5276 BP_IDENTITY(zio->io_bp)->dva_word[1]); 5277 5278 arc_buf_hdr_t *found = buf_hash_find(hdr->b_spa, zio->io_bp, 5279 &hash_lock); 5280 5281 ASSERT((found == hdr && 5282 DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) || 5283 (found == hdr && HDR_L2_READING(hdr))); 5284 ASSERT3P(hash_lock, !=, NULL); 5285 } 5286 5287 if (no_zio_error) { 5288 /* byteswap if necessary */ 5289 if (BP_SHOULD_BYTESWAP(zio->io_bp)) { 5290 if (BP_GET_LEVEL(zio->io_bp) > 0) { 5291 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64; 5292 } else { 5293 hdr->b_l1hdr.b_byteswap = 5294 DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp)); 5295 } 5296 } else { 5297 hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS; 5298 } 5299 } 5300 5301 arc_hdr_clear_flags(hdr, ARC_FLAG_L2_EVICTED); 5302 if (l2arc_noprefetch && HDR_PREFETCH(hdr)) 5303 arc_hdr_clear_flags(hdr, ARC_FLAG_L2CACHE); 5304 5305 callback_list = hdr->b_l1hdr.b_acb; 5306 ASSERT3P(callback_list, !=, NULL); 5307 5308 if (hash_lock && no_zio_error && hdr->b_l1hdr.b_state == arc_anon) { 5309 /* 5310 * Only call arc_access on anonymous buffers. This is because 5311 * if we've issued an I/O for an evicted buffer, we've already 5312 * called arc_access (to prevent any simultaneous readers from 5313 * getting confused). 5314 */ 5315 arc_access(hdr, hash_lock); 5316 } 5317 5318 /* 5319 * If a read request has a callback (i.e. acb_done is not NULL), then we 5320 * make a buf containing the data according to the parameters which were 5321 * passed in. The implementation of arc_buf_alloc_impl() ensures that we 5322 * aren't needlessly decompressing the data multiple times. 5323 */ 5324 int callback_cnt = 0; 5325 for (acb = callback_list; acb != NULL; acb = acb->acb_next) { 5326 if (!acb->acb_done) 5327 continue; 5328 5329 callback_cnt++; 5330 5331 if (no_zio_error) { 5332 int error = arc_buf_alloc_impl(hdr, acb->acb_private, 5333 acb->acb_compressed, zio->io_error == 0, 5334 &acb->acb_buf); 5335 if (error != 0) { 5336 /* 5337 * Decompression failed. Set io_error 5338 * so that when we call acb_done (below), 5339 * we will indicate that the read failed. 5340 * Note that in the unusual case where one 5341 * callback is compressed and another 5342 * uncompressed, we will mark all of them 5343 * as failed, even though the uncompressed 5344 * one can't actually fail. In this case, 5345 * the hdr will not be anonymous, because 5346 * if there are multiple callbacks, it's 5347 * because multiple threads found the same 5348 * arc buf in the hash table. 5349 */ 5350 zio->io_error = error; 5351 } 5352 } 5353 } 5354 /* 5355 * If there are multiple callbacks, we must have the hash lock, 5356 * because the only way for multiple threads to find this hdr is 5357 * in the hash table. This ensures that if there are multiple 5358 * callbacks, the hdr is not anonymous. If it were anonymous, 5359 * we couldn't use arc_buf_destroy() in the error case below. 5360 */ 5361 ASSERT(callback_cnt < 2 || hash_lock != NULL); 5362 5363 hdr->b_l1hdr.b_acb = NULL; 5364 arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); 5365 if (callback_cnt == 0) { 5366 ASSERT(HDR_PREFETCH(hdr)); 5367 ASSERT0(hdr->b_l1hdr.b_bufcnt); 5368 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); 5369 } 5370 5371 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt) || 5372 callback_list != NULL); 5373 5374 if (no_zio_error) { 5375 arc_hdr_verify(hdr, zio->io_bp); 5376 } else { 5377 arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR); 5378 if (hdr->b_l1hdr.b_state != arc_anon) 5379 arc_change_state(arc_anon, hdr, hash_lock); 5380 if (HDR_IN_HASH_TABLE(hdr)) 5381 buf_hash_remove(hdr); 5382 freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt); 5383 } 5384 5385 /* 5386 * Broadcast before we drop the hash_lock to avoid the possibility 5387 * that the hdr (and hence the cv) might be freed before we get to 5388 * the cv_broadcast(). 5389 */ 5390 cv_broadcast(&hdr->b_l1hdr.b_cv); 5391 5392 if (hash_lock != NULL) { 5393 mutex_exit(hash_lock); 5394 } else { 5395 /* 5396 * This block was freed while we waited for the read to 5397 * complete. It has been removed from the hash table and 5398 * moved to the anonymous state (so that it won't show up 5399 * in the cache). 5400 */ 5401 ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon); 5402 freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt); 5403 } 5404 5405 /* execute each callback and free its structure */ 5406 while ((acb = callback_list) != NULL) { 5407 if (acb->acb_done != NULL) { 5408 if (zio->io_error != 0 && acb->acb_buf != NULL) { 5409 /* 5410 * If arc_buf_alloc_impl() fails during 5411 * decompression, the buf will still be 5412 * allocated, and needs to be freed here. 5413 */ 5414 arc_buf_destroy(acb->acb_buf, acb->acb_private); 5415 acb->acb_buf = NULL; 5416 } 5417 acb->acb_done(zio, &zio->io_bookmark, zio->io_bp, 5418 acb->acb_buf, acb->acb_private); 5419 } 5420 5421 if (acb->acb_zio_dummy != NULL) { 5422 acb->acb_zio_dummy->io_error = zio->io_error; 5423 zio_nowait(acb->acb_zio_dummy); 5424 } 5425 5426 callback_list = acb->acb_next; 5427 kmem_free(acb, sizeof (arc_callback_t)); 5428 } 5429 5430 if (freeable) 5431 arc_hdr_destroy(hdr); 5432} 5433 5434/* 5435 * "Read" the block at the specified DVA (in bp) via the 5436 * cache. If the block is found in the cache, invoke the provided 5437 * callback immediately and return. Note that the `zio' parameter 5438 * in the callback will be NULL in this case, since no IO was 5439 * required. If the block is not in the cache pass the read request 5440 * on to the spa with a substitute callback function, so that the 5441 * requested block will be added to the cache. 5442 * 5443 * If a read request arrives for a block that has a read in-progress, 5444 * either wait for the in-progress read to complete (and return the 5445 * results); or, if this is a read with a "done" func, add a record 5446 * to the read to invoke the "done" func when the read completes, 5447 * and return; or just return. 5448 * 5449 * arc_read_done() will invoke all the requested "done" functions 5450 * for readers of this block. 5451 */ 5452int 5453arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_read_done_func_t *done, 5454 void *private, zio_priority_t priority, int zio_flags, 5455 arc_flags_t *arc_flags, const zbookmark_phys_t *zb) 5456{ 5457 arc_buf_hdr_t *hdr = NULL; 5458 kmutex_t *hash_lock = NULL; 5459 zio_t *rzio; 5460 uint64_t guid = spa_load_guid(spa); 5461 boolean_t compressed_read = (zio_flags & ZIO_FLAG_RAW) != 0; 5462 int rc = 0; 5463 5464 ASSERT(!BP_IS_EMBEDDED(bp) || 5465 BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA); 5466 5467top: 5468 if (!BP_IS_EMBEDDED(bp)) { 5469 /* 5470 * Embedded BP's have no DVA and require no I/O to "read". 5471 * Create an anonymous arc buf to back it. 5472 */ 5473 hdr = buf_hash_find(guid, bp, &hash_lock); 5474 } 5475 5476 if (hdr != NULL && HDR_HAS_L1HDR(hdr) && hdr->b_l1hdr.b_pabd != NULL) { 5477 arc_buf_t *buf = NULL; 5478 *arc_flags |= ARC_FLAG_CACHED; 5479 5480 if (HDR_IO_IN_PROGRESS(hdr)) { 5481 zio_t *head_zio = hdr->b_l1hdr.b_acb->acb_zio_head; 5482 5483 ASSERT3P(head_zio, !=, NULL); 5484 if ((hdr->b_flags & ARC_FLAG_PRIO_ASYNC_READ) && 5485 priority == ZIO_PRIORITY_SYNC_READ) { 5486 /* 5487 * This is a sync read that needs to wait for 5488 * an in-flight async read. Request that the 5489 * zio have its priority upgraded. 5490 */ 5491 zio_change_priority(head_zio, priority); 5492 DTRACE_PROBE1(arc__async__upgrade__sync, 5493 arc_buf_hdr_t *, hdr); 5494 ARCSTAT_BUMP(arcstat_async_upgrade_sync); 5495 } 5496 if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) { 5497 arc_hdr_clear_flags(hdr, 5498 ARC_FLAG_PREDICTIVE_PREFETCH); 5499 } 5500 5501 if (*arc_flags & ARC_FLAG_WAIT) { 5502 cv_wait(&hdr->b_l1hdr.b_cv, hash_lock); 5503 mutex_exit(hash_lock); 5504 goto top; 5505 } 5506 ASSERT(*arc_flags & ARC_FLAG_NOWAIT); 5507 5508 if (done) { 5509 arc_callback_t *acb = NULL; 5510 5511 acb = kmem_zalloc(sizeof (arc_callback_t), 5512 KM_SLEEP); 5513 acb->acb_done = done; 5514 acb->acb_private = private; 5515 acb->acb_compressed = compressed_read; 5516 if (pio != NULL) 5517 acb->acb_zio_dummy = zio_null(pio, 5518 spa, NULL, NULL, NULL, zio_flags); 5519 5520 ASSERT3P(acb->acb_done, !=, NULL); 5521 acb->acb_zio_head = head_zio; 5522 acb->acb_next = hdr->b_l1hdr.b_acb; 5523 hdr->b_l1hdr.b_acb = acb; 5524 mutex_exit(hash_lock); 5525 return (0); 5526 } 5527 mutex_exit(hash_lock); 5528 return (0); 5529 } 5530 5531 ASSERT(hdr->b_l1hdr.b_state == arc_mru || 5532 hdr->b_l1hdr.b_state == arc_mfu); 5533 5534 if (done) { 5535 if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) { 5536 /* 5537 * This is a demand read which does not have to 5538 * wait for i/o because we did a predictive 5539 * prefetch i/o for it, which has completed. 5540 */ 5541 DTRACE_PROBE1( 5542 arc__demand__hit__predictive__prefetch, 5543 arc_buf_hdr_t *, hdr); 5544 ARCSTAT_BUMP( 5545 arcstat_demand_hit_predictive_prefetch); 5546 arc_hdr_clear_flags(hdr, 5547 ARC_FLAG_PREDICTIVE_PREFETCH); 5548 } 5549 5550 if (hdr->b_flags & ARC_FLAG_PRESCIENT_PREFETCH) { 5551 ARCSTAT_BUMP( 5552 arcstat_demand_hit_prescient_prefetch); 5553 arc_hdr_clear_flags(hdr, 5554 ARC_FLAG_PRESCIENT_PREFETCH); 5555 } 5556 5557 ASSERT(!BP_IS_EMBEDDED(bp) || !BP_IS_HOLE(bp)); 5558 /* Get a buf with the desired data in it. */ 5559 rc = arc_buf_alloc_impl(hdr, private, 5560 compressed_read, B_TRUE, &buf); 5561 if (rc != 0) { 5562 arc_buf_destroy(buf, private); 5563 buf = NULL; 5564 } 5565 ASSERT((zio_flags & ZIO_FLAG_SPECULATIVE) || 5566 rc == 0 || rc != ENOENT); 5567 } else if (*arc_flags & ARC_FLAG_PREFETCH && 5568 refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) { 5569 arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH); 5570 } 5571 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr); 5572 arc_access(hdr, hash_lock); 5573 if (*arc_flags & ARC_FLAG_PRESCIENT_PREFETCH) 5574 arc_hdr_set_flags(hdr, ARC_FLAG_PRESCIENT_PREFETCH); 5575 if (*arc_flags & ARC_FLAG_L2CACHE) 5576 arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE); 5577 mutex_exit(hash_lock); 5578 ARCSTAT_BUMP(arcstat_hits); 5579 ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr), 5580 demand, prefetch, !HDR_ISTYPE_METADATA(hdr), 5581 data, metadata, hits); 5582 5583 if (done) 5584 done(NULL, zb, bp, buf, private); 5585 } else { 5586 uint64_t lsize = BP_GET_LSIZE(bp); 5587 uint64_t psize = BP_GET_PSIZE(bp); 5588 arc_callback_t *acb; 5589 vdev_t *vd = NULL; 5590 uint64_t addr = 0; 5591 boolean_t devw = B_FALSE; 5592 uint64_t size; 5593 5594 if (hdr == NULL) { 5595 /* this block is not in the cache */ 5596 arc_buf_hdr_t *exists = NULL; 5597 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp); 5598 hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize, 5599 BP_GET_COMPRESS(bp), type); 5600 5601 if (!BP_IS_EMBEDDED(bp)) { 5602 hdr->b_dva = *BP_IDENTITY(bp); 5603 hdr->b_birth = BP_PHYSICAL_BIRTH(bp); 5604 exists = buf_hash_insert(hdr, &hash_lock); 5605 } 5606 if (exists != NULL) { 5607 /* somebody beat us to the hash insert */ 5608 mutex_exit(hash_lock); 5609 buf_discard_identity(hdr); 5610 arc_hdr_destroy(hdr); 5611 goto top; /* restart the IO request */ 5612 } 5613 } else { 5614 /* 5615 * This block is in the ghost cache. If it was L2-only 5616 * (and thus didn't have an L1 hdr), we realloc the 5617 * header to add an L1 hdr. 5618 */ 5619 if (!HDR_HAS_L1HDR(hdr)) { 5620 hdr = arc_hdr_realloc(hdr, hdr_l2only_cache, 5621 hdr_full_cache); 5622 } 5623 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); 5624 ASSERT(GHOST_STATE(hdr->b_l1hdr.b_state)); 5625 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 5626 ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 5627 ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL); 5628 ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL); 5629 5630 /* 5631 * This is a delicate dance that we play here. 5632 * This hdr is in the ghost list so we access it 5633 * to move it out of the ghost list before we 5634 * initiate the read. If it's a prefetch then 5635 * it won't have a callback so we'll remove the 5636 * reference that arc_buf_alloc_impl() created. We 5637 * do this after we've called arc_access() to 5638 * avoid hitting an assert in remove_reference(). 5639 */ 5640 arc_adapt(arc_hdr_size(hdr), hdr->b_l1hdr.b_state); 5641 arc_access(hdr, hash_lock); 5642 arc_hdr_alloc_pabd(hdr, B_FALSE); 5643 } 5644 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); 5645 size = arc_hdr_size(hdr); 5646 5647 /* 5648 * If compression is enabled on the hdr, then will do 5649 * RAW I/O and will store the compressed data in the hdr's 5650 * data block. Otherwise, the hdr's data block will contain 5651 * the uncompressed data. 5652 */ 5653 if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF) { 5654 zio_flags |= ZIO_FLAG_RAW; 5655 } 5656 5657 if (*arc_flags & ARC_FLAG_PREFETCH) 5658 arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH); 5659 if (*arc_flags & ARC_FLAG_PRESCIENT_PREFETCH) 5660 arc_hdr_set_flags(hdr, ARC_FLAG_PRESCIENT_PREFETCH); 5661 5662 if (*arc_flags & ARC_FLAG_L2CACHE) 5663 arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE); 5664 if (BP_GET_LEVEL(bp) > 0) 5665 arc_hdr_set_flags(hdr, ARC_FLAG_INDIRECT); 5666 if (*arc_flags & ARC_FLAG_PREDICTIVE_PREFETCH) 5667 arc_hdr_set_flags(hdr, ARC_FLAG_PREDICTIVE_PREFETCH); 5668 ASSERT(!GHOST_STATE(hdr->b_l1hdr.b_state)); 5669 5670 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP); 5671 acb->acb_done = done; 5672 acb->acb_private = private; 5673 acb->acb_compressed = compressed_read; 5674 5675 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); 5676 hdr->b_l1hdr.b_acb = acb; 5677 arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); 5678 5679 if (HDR_HAS_L2HDR(hdr) && 5680 (vd = hdr->b_l2hdr.b_dev->l2ad_vdev) != NULL) { 5681 devw = hdr->b_l2hdr.b_dev->l2ad_writing; 5682 addr = hdr->b_l2hdr.b_daddr; 5683 /* 5684 * Lock out L2ARC device removal. 5685 */ 5686 if (vdev_is_dead(vd) || 5687 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER)) 5688 vd = NULL; 5689 } 5690 5691 /* 5692 * We count both async reads and scrub IOs as asynchronous so 5693 * that both can be upgraded in the event of a cache hit while 5694 * the read IO is still in-flight. 5695 */ 5696 if (priority == ZIO_PRIORITY_ASYNC_READ || 5697 priority == ZIO_PRIORITY_SCRUB) 5698 arc_hdr_set_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ); 5699 else 5700 arc_hdr_clear_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ); 5701 5702 /* 5703 * At this point, we have a level 1 cache miss. Try again in 5704 * L2ARC if possible. 5705 */ 5706 ASSERT3U(HDR_GET_LSIZE(hdr), ==, lsize); 5707 5708 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp, 5709 uint64_t, lsize, zbookmark_phys_t *, zb); 5710 ARCSTAT_BUMP(arcstat_misses); 5711 ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr), 5712 demand, prefetch, !HDR_ISTYPE_METADATA(hdr), 5713 data, metadata, misses); 5714#ifdef _KERNEL 5715#ifdef RACCT 5716 if (racct_enable) { 5717 PROC_LOCK(curproc); 5718 racct_add_force(curproc, RACCT_READBPS, size); 5719 racct_add_force(curproc, RACCT_READIOPS, 1); 5720 PROC_UNLOCK(curproc); 5721 } 5722#endif /* RACCT */ 5723 curthread->td_ru.ru_inblock++; 5724#endif 5725 5726 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) { 5727 /* 5728 * Read from the L2ARC if the following are true: 5729 * 1. The L2ARC vdev was previously cached. 5730 * 2. This buffer still has L2ARC metadata. 5731 * 3. This buffer isn't currently writing to the L2ARC. 5732 * 4. The L2ARC entry wasn't evicted, which may 5733 * also have invalidated the vdev. 5734 * 5. This isn't prefetch and l2arc_noprefetch is set. 5735 */ 5736 if (HDR_HAS_L2HDR(hdr) && 5737 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) && 5738 !(l2arc_noprefetch && HDR_PREFETCH(hdr))) { 5739 l2arc_read_callback_t *cb; 5740 abd_t *abd; 5741 uint64_t asize; 5742 5743 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr); 5744 ARCSTAT_BUMP(arcstat_l2_hits); 5745 5746 cb = kmem_zalloc(sizeof (l2arc_read_callback_t), 5747 KM_SLEEP); 5748 cb->l2rcb_hdr = hdr; 5749 cb->l2rcb_bp = *bp; 5750 cb->l2rcb_zb = *zb; 5751 cb->l2rcb_flags = zio_flags; 5752 5753 asize = vdev_psize_to_asize(vd, size); 5754 if (asize != size) { 5755 abd = abd_alloc_for_io(asize, 5756 HDR_ISTYPE_METADATA(hdr)); 5757 cb->l2rcb_abd = abd; 5758 } else { 5759 abd = hdr->b_l1hdr.b_pabd; 5760 } 5761 5762 ASSERT(addr >= VDEV_LABEL_START_SIZE && 5763 addr + asize <= vd->vdev_psize - 5764 VDEV_LABEL_END_SIZE); 5765 5766 /* 5767 * l2arc read. The SCL_L2ARC lock will be 5768 * released by l2arc_read_done(). 5769 * Issue a null zio if the underlying buffer 5770 * was squashed to zero size by compression. 5771 */ 5772 ASSERT3U(HDR_GET_COMPRESS(hdr), !=, 5773 ZIO_COMPRESS_EMPTY); 5774 rzio = zio_read_phys(pio, vd, addr, 5775 asize, abd, 5776 ZIO_CHECKSUM_OFF, 5777 l2arc_read_done, cb, priority, 5778 zio_flags | ZIO_FLAG_DONT_CACHE | 5779 ZIO_FLAG_CANFAIL | 5780 ZIO_FLAG_DONT_PROPAGATE | 5781 ZIO_FLAG_DONT_RETRY, B_FALSE); 5782 acb->acb_zio_head = rzio; 5783 5784 if (hash_lock != NULL) 5785 mutex_exit(hash_lock); 5786 5787 DTRACE_PROBE2(l2arc__read, vdev_t *, vd, 5788 zio_t *, rzio); 5789 ARCSTAT_INCR(arcstat_l2_read_bytes, size); 5790 5791 if (*arc_flags & ARC_FLAG_NOWAIT) { 5792 zio_nowait(rzio); 5793 return (0); 5794 } 5795 5796 ASSERT(*arc_flags & ARC_FLAG_WAIT); 5797 if (zio_wait(rzio) == 0) 5798 return (0); 5799 5800 /* l2arc read error; goto zio_read() */ 5801 if (hash_lock != NULL) 5802 mutex_enter(hash_lock); 5803 } else { 5804 DTRACE_PROBE1(l2arc__miss, 5805 arc_buf_hdr_t *, hdr); 5806 ARCSTAT_BUMP(arcstat_l2_misses); 5807 if (HDR_L2_WRITING(hdr)) 5808 ARCSTAT_BUMP(arcstat_l2_rw_clash); 5809 spa_config_exit(spa, SCL_L2ARC, vd); 5810 } 5811 } else { 5812 if (vd != NULL) 5813 spa_config_exit(spa, SCL_L2ARC, vd); 5814 if (l2arc_ndev != 0) { 5815 DTRACE_PROBE1(l2arc__miss, 5816 arc_buf_hdr_t *, hdr); 5817 ARCSTAT_BUMP(arcstat_l2_misses); 5818 } 5819 } 5820 5821 rzio = zio_read(pio, spa, bp, hdr->b_l1hdr.b_pabd, size, 5822 arc_read_done, hdr, priority, zio_flags, zb); 5823 acb->acb_zio_head = rzio; 5824 5825 if (hash_lock != NULL) 5826 mutex_exit(hash_lock); 5827 5828 if (*arc_flags & ARC_FLAG_WAIT) 5829 return (zio_wait(rzio)); 5830 5831 ASSERT(*arc_flags & ARC_FLAG_NOWAIT); 5832 zio_nowait(rzio); 5833 } 5834 return (0); 5835} 5836 5837/* 5838 * Notify the arc that a block was freed, and thus will never be used again. 5839 */ 5840void 5841arc_freed(spa_t *spa, const blkptr_t *bp) 5842{ 5843 arc_buf_hdr_t *hdr; 5844 kmutex_t *hash_lock; 5845 uint64_t guid = spa_load_guid(spa); 5846 5847 ASSERT(!BP_IS_EMBEDDED(bp)); 5848 5849 hdr = buf_hash_find(guid, bp, &hash_lock); 5850 if (hdr == NULL) 5851 return; 5852 5853 /* 5854 * We might be trying to free a block that is still doing I/O 5855 * (i.e. prefetch) or has a reference (i.e. a dedup-ed, 5856 * dmu_sync-ed block). If this block is being prefetched, then it 5857 * would still have the ARC_FLAG_IO_IN_PROGRESS flag set on the hdr 5858 * until the I/O completes. A block may also have a reference if it is 5859 * part of a dedup-ed, dmu_synced write. The dmu_sync() function would 5860 * have written the new block to its final resting place on disk but 5861 * without the dedup flag set. This would have left the hdr in the MRU 5862 * state and discoverable. When the txg finally syncs it detects that 5863 * the block was overridden in open context and issues an override I/O. 5864 * Since this is a dedup block, the override I/O will determine if the 5865 * block is already in the DDT. If so, then it will replace the io_bp 5866 * with the bp from the DDT and allow the I/O to finish. When the I/O 5867 * reaches the done callback, dbuf_write_override_done, it will 5868 * check to see if the io_bp and io_bp_override are identical. 5869 * If they are not, then it indicates that the bp was replaced with 5870 * the bp in the DDT and the override bp is freed. This allows 5871 * us to arrive here with a reference on a block that is being 5872 * freed. So if we have an I/O in progress, or a reference to 5873 * this hdr, then we don't destroy the hdr. 5874 */ 5875 if (!HDR_HAS_L1HDR(hdr) || (!HDR_IO_IN_PROGRESS(hdr) && 5876 refcount_is_zero(&hdr->b_l1hdr.b_refcnt))) { 5877 arc_change_state(arc_anon, hdr, hash_lock); 5878 arc_hdr_destroy(hdr); 5879 mutex_exit(hash_lock); 5880 } else { 5881 mutex_exit(hash_lock); 5882 } 5883 5884} 5885 5886/* 5887 * Release this buffer from the cache, making it an anonymous buffer. This 5888 * must be done after a read and prior to modifying the buffer contents. 5889 * If the buffer has more than one reference, we must make 5890 * a new hdr for the buffer. 5891 */ 5892void 5893arc_release(arc_buf_t *buf, void *tag) 5894{ 5895 arc_buf_hdr_t *hdr = buf->b_hdr; 5896 5897 /* 5898 * It would be nice to assert that if it's DMU metadata (level > 5899 * 0 || it's the dnode file), then it must be syncing context. 5900 * But we don't know that information at this level. 5901 */ 5902 5903 mutex_enter(&buf->b_evict_lock); 5904 5905 ASSERT(HDR_HAS_L1HDR(hdr)); 5906 5907 /* 5908 * We don't grab the hash lock prior to this check, because if 5909 * the buffer's header is in the arc_anon state, it won't be 5910 * linked into the hash table. 5911 */ 5912 if (hdr->b_l1hdr.b_state == arc_anon) { 5913 mutex_exit(&buf->b_evict_lock); 5914 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 5915 ASSERT(!HDR_IN_HASH_TABLE(hdr)); 5916 ASSERT(!HDR_HAS_L2HDR(hdr)); 5917 ASSERT(HDR_EMPTY(hdr)); 5918 ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1); 5919 ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1); 5920 ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node)); 5921 5922 hdr->b_l1hdr.b_arc_access = 0; 5923 5924 /* 5925 * If the buf is being overridden then it may already 5926 * have a hdr that is not empty. 5927 */ 5928 buf_discard_identity(hdr); 5929 arc_buf_thaw(buf); 5930 5931 return; 5932 } 5933 5934 kmutex_t *hash_lock = HDR_LOCK(hdr); 5935 mutex_enter(hash_lock); 5936 5937 /* 5938 * This assignment is only valid as long as the hash_lock is 5939 * held, we must be careful not to reference state or the 5940 * b_state field after dropping the lock. 5941 */ 5942 arc_state_t *state = hdr->b_l1hdr.b_state; 5943 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 5944 ASSERT3P(state, !=, arc_anon); 5945 5946 /* this buffer is not on any list */ 5947 ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0); 5948 5949 if (HDR_HAS_L2HDR(hdr)) { 5950 mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx); 5951 5952 /* 5953 * We have to recheck this conditional again now that 5954 * we're holding the l2ad_mtx to prevent a race with 5955 * another thread which might be concurrently calling 5956 * l2arc_evict(). In that case, l2arc_evict() might have 5957 * destroyed the header's L2 portion as we were waiting 5958 * to acquire the l2ad_mtx. 5959 */ 5960 if (HDR_HAS_L2HDR(hdr)) { 5961 l2arc_trim(hdr); 5962 arc_hdr_l2hdr_destroy(hdr); 5963 } 5964 5965 mutex_exit(&hdr->b_l2hdr.b_dev->l2ad_mtx); 5966 } 5967 5968 /* 5969 * Do we have more than one buf? 5970 */ 5971 if (hdr->b_l1hdr.b_bufcnt > 1) { 5972 arc_buf_hdr_t *nhdr; 5973 uint64_t spa = hdr->b_spa; 5974 uint64_t psize = HDR_GET_PSIZE(hdr); 5975 uint64_t lsize = HDR_GET_LSIZE(hdr); 5976 enum zio_compress compress = HDR_GET_COMPRESS(hdr); 5977 arc_buf_contents_t type = arc_buf_type(hdr); 5978 VERIFY3U(hdr->b_type, ==, type); 5979 5980 ASSERT(hdr->b_l1hdr.b_buf != buf || buf->b_next != NULL); 5981 (void) remove_reference(hdr, hash_lock, tag); 5982 5983 if (arc_buf_is_shared(buf) && !ARC_BUF_COMPRESSED(buf)) { 5984 ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf); 5985 ASSERT(ARC_BUF_LAST(buf)); 5986 } 5987 5988 /* 5989 * Pull the data off of this hdr and attach it to 5990 * a new anonymous hdr. Also find the last buffer 5991 * in the hdr's buffer list. 5992 */ 5993 arc_buf_t *lastbuf = arc_buf_remove(hdr, buf); 5994 ASSERT3P(lastbuf, !=, NULL); 5995 5996 /* 5997 * If the current arc_buf_t and the hdr are sharing their data 5998 * buffer, then we must stop sharing that block. 5999 */ 6000 if (arc_buf_is_shared(buf)) { 6001 VERIFY(!arc_buf_is_shared(lastbuf)); 6002 6003 /* 6004 * First, sever the block sharing relationship between 6005 * buf and the arc_buf_hdr_t. 6006 */ 6007 arc_unshare_buf(hdr, buf); 6008 6009 /* 6010 * Now we need to recreate the hdr's b_pabd. Since we 6011 * have lastbuf handy, we try to share with it, but if 6012 * we can't then we allocate a new b_pabd and copy the 6013 * data from buf into it. 6014 */ 6015 if (arc_can_share(hdr, lastbuf)) { 6016 arc_share_buf(hdr, lastbuf); 6017 } else { 6018 arc_hdr_alloc_pabd(hdr, B_TRUE); 6019 abd_copy_from_buf(hdr->b_l1hdr.b_pabd, 6020 buf->b_data, psize); 6021 } 6022 VERIFY3P(lastbuf->b_data, !=, NULL); 6023 } else if (HDR_SHARED_DATA(hdr)) { 6024 /* 6025 * Uncompressed shared buffers are always at the end 6026 * of the list. Compressed buffers don't have the 6027 * same requirements. This makes it hard to 6028 * simply assert that the lastbuf is shared so 6029 * we rely on the hdr's compression flags to determine 6030 * if we have a compressed, shared buffer. 6031 */ 6032 ASSERT(arc_buf_is_shared(lastbuf) || 6033 HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF); 6034 ASSERT(!ARC_BUF_SHARED(buf)); 6035 } 6036 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); 6037 ASSERT3P(state, !=, arc_l2c_only); 6038 6039 (void) refcount_remove_many(&state->arcs_size, 6040 arc_buf_size(buf), buf); 6041 6042 if (refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) { 6043 ASSERT3P(state, !=, arc_l2c_only); 6044 (void) refcount_remove_many(&state->arcs_esize[type], 6045 arc_buf_size(buf), buf); 6046 } 6047 6048 hdr->b_l1hdr.b_bufcnt -= 1; 6049 arc_cksum_verify(buf); 6050#ifdef illumos 6051 arc_buf_unwatch(buf); 6052#endif 6053 6054 mutex_exit(hash_lock); 6055 6056 /* 6057 * Allocate a new hdr. The new hdr will contain a b_pabd 6058 * buffer which will be freed in arc_write(). 6059 */ 6060 nhdr = arc_hdr_alloc(spa, psize, lsize, compress, type); 6061 ASSERT3P(nhdr->b_l1hdr.b_buf, ==, NULL); 6062 ASSERT0(nhdr->b_l1hdr.b_bufcnt); 6063 ASSERT0(refcount_count(&nhdr->b_l1hdr.b_refcnt)); 6064 VERIFY3U(nhdr->b_type, ==, type); 6065 ASSERT(!HDR_SHARED_DATA(nhdr)); 6066 6067 nhdr->b_l1hdr.b_buf = buf; 6068 nhdr->b_l1hdr.b_bufcnt = 1; 6069 (void) refcount_add(&nhdr->b_l1hdr.b_refcnt, tag); 6070 buf->b_hdr = nhdr; 6071 6072 mutex_exit(&buf->b_evict_lock); 6073 (void) refcount_add_many(&arc_anon->arcs_size, 6074 arc_buf_size(buf), buf); 6075 } else { 6076 mutex_exit(&buf->b_evict_lock); 6077 ASSERT(refcount_count(&hdr->b_l1hdr.b_refcnt) == 1); 6078 /* protected by hash lock, or hdr is on arc_anon */ 6079 ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node)); 6080 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 6081 arc_change_state(arc_anon, hdr, hash_lock); 6082 hdr->b_l1hdr.b_arc_access = 0; 6083 mutex_exit(hash_lock); 6084 6085 buf_discard_identity(hdr); 6086 arc_buf_thaw(buf); 6087 } 6088} 6089 6090int 6091arc_released(arc_buf_t *buf) 6092{ 6093 int released; 6094 6095 mutex_enter(&buf->b_evict_lock); 6096 released = (buf->b_data != NULL && 6097 buf->b_hdr->b_l1hdr.b_state == arc_anon); 6098 mutex_exit(&buf->b_evict_lock); 6099 return (released); 6100} 6101 6102#ifdef ZFS_DEBUG 6103int 6104arc_referenced(arc_buf_t *buf) 6105{ 6106 int referenced; 6107 6108 mutex_enter(&buf->b_evict_lock); 6109 referenced = (refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt)); 6110 mutex_exit(&buf->b_evict_lock); 6111 return (referenced); 6112} 6113#endif 6114 6115static void 6116arc_write_ready(zio_t *zio) 6117{ 6118 arc_write_callback_t *callback = zio->io_private; 6119 arc_buf_t *buf = callback->awcb_buf; 6120 arc_buf_hdr_t *hdr = buf->b_hdr; 6121 uint64_t psize = BP_IS_HOLE(zio->io_bp) ? 0 : BP_GET_PSIZE(zio->io_bp); 6122 6123 ASSERT(HDR_HAS_L1HDR(hdr)); 6124 ASSERT(!refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt)); 6125 ASSERT(hdr->b_l1hdr.b_bufcnt > 0); 6126 6127 /* 6128 * If we're reexecuting this zio because the pool suspended, then 6129 * cleanup any state that was previously set the first time the 6130 * callback was invoked. 6131 */ 6132 if (zio->io_flags & ZIO_FLAG_REEXECUTED) { 6133 arc_cksum_free(hdr); 6134#ifdef illumos 6135 arc_buf_unwatch(buf); 6136#endif 6137 if (hdr->b_l1hdr.b_pabd != NULL) { 6138 if (arc_buf_is_shared(buf)) { 6139 arc_unshare_buf(hdr, buf); 6140 } else { 6141 arc_hdr_free_pabd(hdr); 6142 } 6143 } 6144 } 6145 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); 6146 ASSERT(!HDR_SHARED_DATA(hdr)); 6147 ASSERT(!arc_buf_is_shared(buf)); 6148 6149 callback->awcb_ready(zio, buf, callback->awcb_private); 6150 6151 if (HDR_IO_IN_PROGRESS(hdr)) 6152 ASSERT(zio->io_flags & ZIO_FLAG_REEXECUTED); 6153 6154 arc_cksum_compute(buf); 6155 arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); 6156 6157 enum zio_compress compress; 6158 if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) { 6159 compress = ZIO_COMPRESS_OFF; 6160 } else { 6161 ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(zio->io_bp)); 6162 compress = BP_GET_COMPRESS(zio->io_bp); 6163 } 6164 HDR_SET_PSIZE(hdr, psize); 6165 arc_hdr_set_compress(hdr, compress); 6166 6167 6168 /* 6169 * Fill the hdr with data. If the hdr is compressed, the data we want 6170 * is available from the zio, otherwise we can take it from the buf. 6171 * 6172 * We might be able to share the buf's data with the hdr here. However, 6173 * doing so would cause the ARC to be full of linear ABDs if we write a 6174 * lot of shareable data. As a compromise, we check whether scattered 6175 * ABDs are allowed, and assume that if they are then the user wants 6176 * the ARC to be primarily filled with them regardless of the data being 6177 * written. Therefore, if they're allowed then we allocate one and copy 6178 * the data into it; otherwise, we share the data directly if we can. 6179 */ 6180 if (zfs_abd_scatter_enabled || !arc_can_share(hdr, buf)) { 6181 arc_hdr_alloc_pabd(hdr, B_TRUE); 6182 6183 /* 6184 * Ideally, we would always copy the io_abd into b_pabd, but the 6185 * user may have disabled compressed ARC, thus we must check the 6186 * hdr's compression setting rather than the io_bp's. 6187 */ 6188 if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF) { 6189 ASSERT3U(BP_GET_COMPRESS(zio->io_bp), !=, 6190 ZIO_COMPRESS_OFF); 6191 ASSERT3U(psize, >, 0); 6192 6193 abd_copy(hdr->b_l1hdr.b_pabd, zio->io_abd, psize); 6194 } else { 6195 ASSERT3U(zio->io_orig_size, ==, arc_hdr_size(hdr)); 6196 6197 abd_copy_from_buf(hdr->b_l1hdr.b_pabd, buf->b_data, 6198 arc_buf_size(buf)); 6199 } 6200 } else { 6201 ASSERT3P(buf->b_data, ==, abd_to_buf(zio->io_orig_abd)); 6202 ASSERT3U(zio->io_orig_size, ==, arc_buf_size(buf)); 6203 ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1); 6204 6205 arc_share_buf(hdr, buf); 6206 } 6207 6208 arc_hdr_verify(hdr, zio->io_bp); 6209} 6210 6211static void 6212arc_write_children_ready(zio_t *zio) 6213{ 6214 arc_write_callback_t *callback = zio->io_private; 6215 arc_buf_t *buf = callback->awcb_buf; 6216 6217 callback->awcb_children_ready(zio, buf, callback->awcb_private); 6218} 6219 6220/* 6221 * The SPA calls this callback for each physical write that happens on behalf 6222 * of a logical write. See the comment in dbuf_write_physdone() for details. 6223 */ 6224static void 6225arc_write_physdone(zio_t *zio) 6226{ 6227 arc_write_callback_t *cb = zio->io_private; 6228 if (cb->awcb_physdone != NULL) 6229 cb->awcb_physdone(zio, cb->awcb_buf, cb->awcb_private); 6230} 6231 6232static void 6233arc_write_done(zio_t *zio) 6234{ 6235 arc_write_callback_t *callback = zio->io_private; 6236 arc_buf_t *buf = callback->awcb_buf; 6237 arc_buf_hdr_t *hdr = buf->b_hdr; 6238 6239 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); 6240 6241 if (zio->io_error == 0) { 6242 arc_hdr_verify(hdr, zio->io_bp); 6243 6244 if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) { 6245 buf_discard_identity(hdr); 6246 } else { 6247 hdr->b_dva = *BP_IDENTITY(zio->io_bp); 6248 hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp); 6249 } 6250 } else { 6251 ASSERT(HDR_EMPTY(hdr)); 6252 } 6253 6254 /* 6255 * If the block to be written was all-zero or compressed enough to be 6256 * embedded in the BP, no write was performed so there will be no 6257 * dva/birth/checksum. The buffer must therefore remain anonymous 6258 * (and uncached). 6259 */ 6260 if (!HDR_EMPTY(hdr)) { 6261 arc_buf_hdr_t *exists; 6262 kmutex_t *hash_lock; 6263 6264 ASSERT3U(zio->io_error, ==, 0); 6265 6266 arc_cksum_verify(buf); 6267 6268 exists = buf_hash_insert(hdr, &hash_lock); 6269 if (exists != NULL) { 6270 /* 6271 * This can only happen if we overwrite for 6272 * sync-to-convergence, because we remove 6273 * buffers from the hash table when we arc_free(). 6274 */ 6275 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { 6276 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) 6277 panic("bad overwrite, hdr=%p exists=%p", 6278 (void *)hdr, (void *)exists); 6279 ASSERT(refcount_is_zero( 6280 &exists->b_l1hdr.b_refcnt)); 6281 arc_change_state(arc_anon, exists, hash_lock); 6282 mutex_exit(hash_lock); 6283 arc_hdr_destroy(exists); 6284 exists = buf_hash_insert(hdr, &hash_lock); 6285 ASSERT3P(exists, ==, NULL); 6286 } else if (zio->io_flags & ZIO_FLAG_NOPWRITE) { 6287 /* nopwrite */ 6288 ASSERT(zio->io_prop.zp_nopwrite); 6289 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp)) 6290 panic("bad nopwrite, hdr=%p exists=%p", 6291 (void *)hdr, (void *)exists); 6292 } else { 6293 /* Dedup */ 6294 ASSERT(hdr->b_l1hdr.b_bufcnt == 1); 6295 ASSERT(hdr->b_l1hdr.b_state == arc_anon); 6296 ASSERT(BP_GET_DEDUP(zio->io_bp)); 6297 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0); 6298 } 6299 } 6300 arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); 6301 /* if it's not anon, we are doing a scrub */ 6302 if (exists == NULL && hdr->b_l1hdr.b_state == arc_anon) 6303 arc_access(hdr, hash_lock); 6304 mutex_exit(hash_lock); 6305 } else { 6306 arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS); 6307 } 6308 6309 ASSERT(!refcount_is_zero(&hdr->b_l1hdr.b_refcnt)); 6310 callback->awcb_done(zio, buf, callback->awcb_private); 6311 6312 abd_put(zio->io_abd); 6313 kmem_free(callback, sizeof (arc_write_callback_t)); 6314} 6315 6316zio_t * 6317arc_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, arc_buf_t *buf, 6318 boolean_t l2arc, const zio_prop_t *zp, arc_write_done_func_t *ready, 6319 arc_write_done_func_t *children_ready, arc_write_done_func_t *physdone, 6320 arc_write_done_func_t *done, void *private, zio_priority_t priority, 6321 int zio_flags, const zbookmark_phys_t *zb) 6322{ 6323 arc_buf_hdr_t *hdr = buf->b_hdr; 6324 arc_write_callback_t *callback; 6325 zio_t *zio; 6326 zio_prop_t localprop = *zp; 6327 6328 ASSERT3P(ready, !=, NULL); 6329 ASSERT3P(done, !=, NULL); 6330 ASSERT(!HDR_IO_ERROR(hdr)); 6331 ASSERT(!HDR_IO_IN_PROGRESS(hdr)); 6332 ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL); 6333 ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0); 6334 if (l2arc) 6335 arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE); 6336 if (ARC_BUF_COMPRESSED(buf)) { 6337 /* 6338 * We're writing a pre-compressed buffer. Make the 6339 * compression algorithm requested by the zio_prop_t match 6340 * the pre-compressed buffer's compression algorithm. 6341 */ 6342 localprop.zp_compress = HDR_GET_COMPRESS(hdr); 6343 6344 ASSERT3U(HDR_GET_LSIZE(hdr), !=, arc_buf_size(buf)); 6345 zio_flags |= ZIO_FLAG_RAW; 6346 } 6347 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP); 6348 callback->awcb_ready = ready; 6349 callback->awcb_children_ready = children_ready; 6350 callback->awcb_physdone = physdone; 6351 callback->awcb_done = done; 6352 callback->awcb_private = private; 6353 callback->awcb_buf = buf; 6354 6355 /* 6356 * The hdr's b_pabd is now stale, free it now. A new data block 6357 * will be allocated when the zio pipeline calls arc_write_ready(). 6358 */ 6359 if (hdr->b_l1hdr.b_pabd != NULL) { 6360 /* 6361 * If the buf is currently sharing the data block with 6362 * the hdr then we need to break that relationship here. 6363 * The hdr will remain with a NULL data pointer and the 6364 * buf will take sole ownership of the block. 6365 */ 6366 if (arc_buf_is_shared(buf)) { 6367 arc_unshare_buf(hdr, buf); 6368 } else { 6369 arc_hdr_free_pabd(hdr); 6370 } 6371 VERIFY3P(buf->b_data, !=, NULL); 6372 arc_hdr_set_compress(hdr, ZIO_COMPRESS_OFF); 6373 } 6374 ASSERT(!arc_buf_is_shared(buf)); 6375 ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL); 6376 6377 zio = zio_write(pio, spa, txg, bp, 6378 abd_get_from_buf(buf->b_data, HDR_GET_LSIZE(hdr)), 6379 HDR_GET_LSIZE(hdr), arc_buf_size(buf), &localprop, arc_write_ready, 6380 (children_ready != NULL) ? arc_write_children_ready : NULL, 6381 arc_write_physdone, arc_write_done, callback, 6382 priority, zio_flags, zb); 6383 6384 return (zio); 6385} 6386 6387static int 6388arc_memory_throttle(spa_t *spa, uint64_t reserve, uint64_t txg) 6389{ 6390#ifdef _KERNEL 6391 uint64_t available_memory = ptob(freemem); 6392 6393#if defined(__i386) || !defined(UMA_MD_SMALL_ALLOC) 6394 available_memory = 6395 MIN(available_memory, ptob(vmem_size(heap_arena, VMEM_FREE))); 6396#endif 6397 6398 if (freemem > (uint64_t)physmem * arc_lotsfree_percent / 100) 6399 return (0); 6400 6401 if (txg > spa->spa_lowmem_last_txg) { 6402 spa->spa_lowmem_last_txg = txg; 6403 spa->spa_lowmem_page_load = 0; 6404 } 6405 /* 6406 * If we are in pageout, we know that memory is already tight, 6407 * the arc is already going to be evicting, so we just want to 6408 * continue to let page writes occur as quickly as possible. 6409 */ 6410 if (curproc == pageproc) { 6411 if (spa->spa_lowmem_page_load > 6412 MAX(ptob(minfree), available_memory) / 4) 6413 return (SET_ERROR(ERESTART)); 6414 /* Note: reserve is inflated, so we deflate */ 6415 atomic_add_64(&spa->spa_lowmem_page_load, reserve / 8); 6416 return (0); 6417 } else if (spa->spa_lowmem_page_load > 0 && arc_reclaim_needed()) { 6418 /* memory is low, delay before restarting */ 6419 ARCSTAT_INCR(arcstat_memory_throttle_count, 1); 6420 return (SET_ERROR(EAGAIN)); 6421 } 6422 spa->spa_lowmem_page_load = 0; 6423#endif /* _KERNEL */ 6424 return (0); 6425} 6426 6427void 6428arc_tempreserve_clear(uint64_t reserve) 6429{ 6430 atomic_add_64(&arc_tempreserve, -reserve); 6431 ASSERT((int64_t)arc_tempreserve >= 0); 6432} 6433 6434int 6435arc_tempreserve_space(spa_t *spa, uint64_t reserve, uint64_t txg) 6436{ 6437 int error; 6438 uint64_t anon_size; 6439 6440 if (reserve > arc_c/4 && !arc_no_grow) { 6441 arc_c = MIN(arc_c_max, reserve * 4); 6442 DTRACE_PROBE1(arc__set_reserve, uint64_t, arc_c); 6443 } 6444 if (reserve > arc_c) 6445 return (SET_ERROR(ENOMEM)); 6446 6447 /* 6448 * Don't count loaned bufs as in flight dirty data to prevent long 6449 * network delays from blocking transactions that are ready to be 6450 * assigned to a txg. 6451 */ 6452 6453 /* assert that it has not wrapped around */ 6454 ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0); 6455 6456 anon_size = MAX((int64_t)(refcount_count(&arc_anon->arcs_size) - 6457 arc_loaned_bytes), 0); 6458 6459 /* 6460 * Writes will, almost always, require additional memory allocations 6461 * in order to compress/encrypt/etc the data. We therefore need to 6462 * make sure that there is sufficient available memory for this. 6463 */ 6464 error = arc_memory_throttle(spa, reserve, txg); 6465 if (error != 0) 6466 return (error); 6467 6468 /* 6469 * Throttle writes when the amount of dirty data in the cache 6470 * gets too large. We try to keep the cache less than half full 6471 * of dirty blocks so that our sync times don't grow too large. 6472 * 6473 * In the case of one pool being built on another pool, we want 6474 * to make sure we don't end up throttling the lower (backing) 6475 * pool when the upper pool is the majority contributor to dirty 6476 * data. To insure we make forward progress during throttling, we 6477 * also check the current pool's net dirty data and only throttle 6478 * if it exceeds zfs_arc_pool_dirty_percent of the anonymous dirty 6479 * data in the cache. 6480 * 6481 * Note: if two requests come in concurrently, we might let them 6482 * both succeed, when one of them should fail. Not a huge deal. 6483 */ 6484 uint64_t total_dirty = reserve + arc_tempreserve + anon_size; 6485 uint64_t spa_dirty_anon = spa_dirty_data(spa); 6486 6487 if (total_dirty > arc_c * zfs_arc_dirty_limit_percent / 100 && 6488 anon_size > arc_c * zfs_arc_anon_limit_percent / 100 && 6489 spa_dirty_anon > anon_size * zfs_arc_pool_dirty_percent / 100) { 6490 uint64_t meta_esize = 6491 refcount_count(&arc_anon->arcs_esize[ARC_BUFC_METADATA]); 6492 uint64_t data_esize = 6493 refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]); 6494 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK " 6495 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n", 6496 arc_tempreserve >> 10, meta_esize >> 10, 6497 data_esize >> 10, reserve >> 10, arc_c >> 10); 6498 return (SET_ERROR(ERESTART)); 6499 } 6500 atomic_add_64(&arc_tempreserve, reserve); 6501 return (0); 6502} 6503 6504static void 6505arc_kstat_update_state(arc_state_t *state, kstat_named_t *size, 6506 kstat_named_t *evict_data, kstat_named_t *evict_metadata) 6507{ 6508 size->value.ui64 = refcount_count(&state->arcs_size); 6509 evict_data->value.ui64 = 6510 refcount_count(&state->arcs_esize[ARC_BUFC_DATA]); 6511 evict_metadata->value.ui64 = 6512 refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]); 6513} 6514 6515static int 6516arc_kstat_update(kstat_t *ksp, int rw) 6517{ 6518 arc_stats_t *as = ksp->ks_data; 6519 6520 if (rw == KSTAT_WRITE) { 6521 return (EACCES); 6522 } else { 6523 arc_kstat_update_state(arc_anon, 6524 &as->arcstat_anon_size, 6525 &as->arcstat_anon_evictable_data, 6526 &as->arcstat_anon_evictable_metadata); 6527 arc_kstat_update_state(arc_mru, 6528 &as->arcstat_mru_size, 6529 &as->arcstat_mru_evictable_data, 6530 &as->arcstat_mru_evictable_metadata); 6531 arc_kstat_update_state(arc_mru_ghost, 6532 &as->arcstat_mru_ghost_size, 6533 &as->arcstat_mru_ghost_evictable_data, 6534 &as->arcstat_mru_ghost_evictable_metadata); 6535 arc_kstat_update_state(arc_mfu, 6536 &as->arcstat_mfu_size, 6537 &as->arcstat_mfu_evictable_data, 6538 &as->arcstat_mfu_evictable_metadata); 6539 arc_kstat_update_state(arc_mfu_ghost, 6540 &as->arcstat_mfu_ghost_size, 6541 &as->arcstat_mfu_ghost_evictable_data, 6542 &as->arcstat_mfu_ghost_evictable_metadata); 6543 6544 ARCSTAT(arcstat_size) = aggsum_value(&arc_size); 6545 ARCSTAT(arcstat_meta_used) = aggsum_value(&arc_meta_used); 6546 ARCSTAT(arcstat_data_size) = aggsum_value(&astat_data_size); 6547 ARCSTAT(arcstat_metadata_size) = 6548 aggsum_value(&astat_metadata_size); 6549 ARCSTAT(arcstat_hdr_size) = aggsum_value(&astat_hdr_size); 6550 ARCSTAT(arcstat_other_size) = aggsum_value(&astat_other_size); 6551 ARCSTAT(arcstat_l2_hdr_size) = aggsum_value(&astat_l2_hdr_size); 6552 } 6553 6554 return (0); 6555} 6556 6557/* 6558 * This function *must* return indices evenly distributed between all 6559 * sublists of the multilist. This is needed due to how the ARC eviction 6560 * code is laid out; arc_evict_state() assumes ARC buffers are evenly 6561 * distributed between all sublists and uses this assumption when 6562 * deciding which sublist to evict from and how much to evict from it. 6563 */ 6564unsigned int 6565arc_state_multilist_index_func(multilist_t *ml, void *obj) 6566{ 6567 arc_buf_hdr_t *hdr = obj; 6568 6569 /* 6570 * We rely on b_dva to generate evenly distributed index 6571 * numbers using buf_hash below. So, as an added precaution, 6572 * let's make sure we never add empty buffers to the arc lists. 6573 */ 6574 ASSERT(!HDR_EMPTY(hdr)); 6575 6576 /* 6577 * The assumption here, is the hash value for a given 6578 * arc_buf_hdr_t will remain constant throughout it's lifetime 6579 * (i.e. it's b_spa, b_dva, and b_birth fields don't change). 6580 * Thus, we don't need to store the header's sublist index 6581 * on insertion, as this index can be recalculated on removal. 6582 * 6583 * Also, the low order bits of the hash value are thought to be 6584 * distributed evenly. Otherwise, in the case that the multilist 6585 * has a power of two number of sublists, each sublists' usage 6586 * would not be evenly distributed. 6587 */ 6588 return (buf_hash(hdr->b_spa, &hdr->b_dva, hdr->b_birth) % 6589 multilist_get_num_sublists(ml)); 6590} 6591 6592#ifdef _KERNEL 6593static eventhandler_tag arc_event_lowmem = NULL; 6594 6595static void 6596arc_lowmem(void *arg __unused, int howto __unused) 6597{ 6598 int64_t free_memory, to_free; 6599 6600 arc_no_grow = B_TRUE; 6601 arc_warm = B_TRUE; 6602 arc_growtime = gethrtime() + SEC2NSEC(arc_grow_retry); 6603 free_memory = arc_available_memory(); 6604 to_free = (arc_c >> arc_shrink_shift) - MIN(free_memory, 0); 6605 DTRACE_PROBE2(arc__needfree, int64_t, free_memory, int64_t, to_free); 6606 arc_reduce_target_size(to_free); 6607 6608 mutex_enter(&arc_adjust_lock); 6609 arc_adjust_needed = B_TRUE; 6610 zthr_wakeup(arc_adjust_zthr); 6611 6612 /* 6613 * It is unsafe to block here in arbitrary threads, because we can come 6614 * here from ARC itself and may hold ARC locks and thus risk a deadlock 6615 * with ARC reclaim thread. 6616 */ 6617 if (curproc == pageproc) 6618 (void) cv_wait(&arc_adjust_waiters_cv, &arc_adjust_lock); 6619 mutex_exit(&arc_adjust_lock); 6620} 6621#endif 6622 6623static void 6624arc_state_init(void) 6625{ 6626 arc_anon = &ARC_anon; 6627 arc_mru = &ARC_mru; 6628 arc_mru_ghost = &ARC_mru_ghost; 6629 arc_mfu = &ARC_mfu; 6630 arc_mfu_ghost = &ARC_mfu_ghost; 6631 arc_l2c_only = &ARC_l2c_only; 6632 6633 arc_mru->arcs_list[ARC_BUFC_METADATA] = 6634 multilist_create(sizeof (arc_buf_hdr_t), 6635 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 6636 arc_state_multilist_index_func); 6637 arc_mru->arcs_list[ARC_BUFC_DATA] = 6638 multilist_create(sizeof (arc_buf_hdr_t), 6639 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 6640 arc_state_multilist_index_func); 6641 arc_mru_ghost->arcs_list[ARC_BUFC_METADATA] = 6642 multilist_create(sizeof (arc_buf_hdr_t), 6643 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 6644 arc_state_multilist_index_func); 6645 arc_mru_ghost->arcs_list[ARC_BUFC_DATA] = 6646 multilist_create(sizeof (arc_buf_hdr_t), 6647 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 6648 arc_state_multilist_index_func); 6649 arc_mfu->arcs_list[ARC_BUFC_METADATA] = 6650 multilist_create(sizeof (arc_buf_hdr_t), 6651 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 6652 arc_state_multilist_index_func); 6653 arc_mfu->arcs_list[ARC_BUFC_DATA] = 6654 multilist_create(sizeof (arc_buf_hdr_t), 6655 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 6656 arc_state_multilist_index_func); 6657 arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA] = 6658 multilist_create(sizeof (arc_buf_hdr_t), 6659 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 6660 arc_state_multilist_index_func); 6661 arc_mfu_ghost->arcs_list[ARC_BUFC_DATA] = 6662 multilist_create(sizeof (arc_buf_hdr_t), 6663 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 6664 arc_state_multilist_index_func); 6665 arc_l2c_only->arcs_list[ARC_BUFC_METADATA] = 6666 multilist_create(sizeof (arc_buf_hdr_t), 6667 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 6668 arc_state_multilist_index_func); 6669 arc_l2c_only->arcs_list[ARC_BUFC_DATA] = 6670 multilist_create(sizeof (arc_buf_hdr_t), 6671 offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node), 6672 arc_state_multilist_index_func); 6673 6674 refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]); 6675 refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]); 6676 refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]); 6677 refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]); 6678 refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]); 6679 refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]); 6680 refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]); 6681 refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]); 6682 refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]); 6683 refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]); 6684 refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]); 6685 refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]); 6686 6687 refcount_create(&arc_anon->arcs_size); 6688 refcount_create(&arc_mru->arcs_size); 6689 refcount_create(&arc_mru_ghost->arcs_size); 6690 refcount_create(&arc_mfu->arcs_size); 6691 refcount_create(&arc_mfu_ghost->arcs_size); 6692 refcount_create(&arc_l2c_only->arcs_size); 6693 6694 aggsum_init(&arc_meta_used, 0); 6695 aggsum_init(&arc_size, 0); 6696 aggsum_init(&astat_data_size, 0); 6697 aggsum_init(&astat_metadata_size, 0); 6698 aggsum_init(&astat_hdr_size, 0); 6699 aggsum_init(&astat_other_size, 0); 6700 aggsum_init(&astat_l2_hdr_size, 0); 6701} 6702 6703static void 6704arc_state_fini(void) 6705{ 6706 refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]); 6707 refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]); 6708 refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]); 6709 refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]); 6710 refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]); 6711 refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]); 6712 refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]); 6713 refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]); 6714 refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]); 6715 refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]); 6716 refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]); 6717 refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]); 6718 6719 refcount_destroy(&arc_anon->arcs_size); 6720 refcount_destroy(&arc_mru->arcs_size); 6721 refcount_destroy(&arc_mru_ghost->arcs_size); 6722 refcount_destroy(&arc_mfu->arcs_size); 6723 refcount_destroy(&arc_mfu_ghost->arcs_size); 6724 refcount_destroy(&arc_l2c_only->arcs_size); 6725 6726 multilist_destroy(arc_mru->arcs_list[ARC_BUFC_METADATA]); 6727 multilist_destroy(arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]); 6728 multilist_destroy(arc_mfu->arcs_list[ARC_BUFC_METADATA]); 6729 multilist_destroy(arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]); 6730 multilist_destroy(arc_mru->arcs_list[ARC_BUFC_DATA]); 6731 multilist_destroy(arc_mru_ghost->arcs_list[ARC_BUFC_DATA]); 6732 multilist_destroy(arc_mfu->arcs_list[ARC_BUFC_DATA]); 6733 multilist_destroy(arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]); 6734} 6735 6736uint64_t 6737arc_max_bytes(void) 6738{ 6739 return (arc_c_max); 6740} 6741 6742void 6743arc_init(void) 6744{ 6745 int i, prefetch_tunable_set = 0; 6746 6747 /* 6748 * allmem is "all memory that we could possibly use". 6749 */ 6750#ifdef illumos 6751#ifdef _KERNEL 6752 uint64_t allmem = ptob(physmem - swapfs_minfree); 6753#else 6754 uint64_t allmem = (physmem * PAGESIZE) / 2; 6755#endif 6756#else 6757 uint64_t allmem = kmem_size(); 6758#endif 6759 mutex_init(&arc_adjust_lock, NULL, MUTEX_DEFAULT, NULL); 6760 cv_init(&arc_adjust_waiters_cv, NULL, CV_DEFAULT, NULL); 6761 6762 mutex_init(&arc_dnlc_evicts_lock, NULL, MUTEX_DEFAULT, NULL); 6763 cv_init(&arc_dnlc_evicts_cv, NULL, CV_DEFAULT, NULL); 6764 6765 /* set min cache to 1/32 of all memory, or arc_abs_min, whichever is more */ 6766 arc_c_min = MAX(allmem / 32, arc_abs_min); 6767 /* set max to 5/8 of all memory, or all but 1GB, whichever is more */ 6768 if (allmem >= 1 << 30) 6769 arc_c_max = allmem - (1 << 30); 6770 else 6771 arc_c_max = arc_c_min; 6772 arc_c_max = MAX(allmem * 5 / 8, arc_c_max); 6773 6774 /* 6775 * In userland, there's only the memory pressure that we artificially 6776 * create (see arc_available_memory()). Don't let arc_c get too 6777 * small, because it can cause transactions to be larger than 6778 * arc_c, causing arc_tempreserve_space() to fail. 6779 */ 6780#ifndef _KERNEL 6781 arc_c_min = arc_c_max / 2; 6782#endif 6783 6784#ifdef _KERNEL 6785 /* 6786 * Allow the tunables to override our calculations if they are 6787 * reasonable. 6788 */ 6789 if (zfs_arc_max > arc_abs_min && zfs_arc_max < allmem) { 6790 arc_c_max = zfs_arc_max; 6791 arc_c_min = MIN(arc_c_min, arc_c_max); 6792 } 6793 if (zfs_arc_min > arc_abs_min && zfs_arc_min <= arc_c_max) 6794 arc_c_min = zfs_arc_min; 6795#endif 6796 6797 arc_c = arc_c_max; 6798 arc_p = (arc_c >> 1); 6799 6800 /* limit meta-data to 1/4 of the arc capacity */ 6801 arc_meta_limit = arc_c_max / 4; 6802 6803#ifdef _KERNEL 6804 /* 6805 * Metadata is stored in the kernel's heap. Don't let us 6806 * use more than half the heap for the ARC. 6807 */ 6808 arc_meta_limit = MIN(arc_meta_limit, 6809 vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 2); 6810#endif 6811 6812 /* Allow the tunable to override if it is reasonable */ 6813 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max) 6814 arc_meta_limit = zfs_arc_meta_limit; 6815 6816 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0) 6817 arc_c_min = arc_meta_limit / 2; 6818 6819 if (zfs_arc_meta_min > 0) { 6820 arc_meta_min = zfs_arc_meta_min; 6821 } else { 6822 arc_meta_min = arc_c_min / 2; 6823 } 6824 6825 if (zfs_arc_grow_retry > 0) 6826 arc_grow_retry = zfs_arc_grow_retry; 6827 6828 if (zfs_arc_shrink_shift > 0) 6829 arc_shrink_shift = zfs_arc_shrink_shift; 6830 6831 if (zfs_arc_no_grow_shift > 0) 6832 arc_no_grow_shift = zfs_arc_no_grow_shift; 6833 /* 6834 * Ensure that arc_no_grow_shift is less than arc_shrink_shift. 6835 */ 6836 if (arc_no_grow_shift >= arc_shrink_shift) 6837 arc_no_grow_shift = arc_shrink_shift - 1; 6838 6839 if (zfs_arc_p_min_shift > 0) 6840 arc_p_min_shift = zfs_arc_p_min_shift; 6841 6842 /* if kmem_flags are set, lets try to use less memory */ 6843 if (kmem_debugging()) 6844 arc_c = arc_c / 2; 6845 if (arc_c < arc_c_min) 6846 arc_c = arc_c_min; 6847 6848 zfs_arc_min = arc_c_min; 6849 zfs_arc_max = arc_c_max; 6850 6851 arc_state_init(); 6852 6853 /* 6854 * The arc must be "uninitialized", so that hdr_recl() (which is 6855 * registered by buf_init()) will not access arc_reap_zthr before 6856 * it is created. 6857 */ 6858 ASSERT(!arc_initialized); 6859 buf_init(); 6860 6861 arc_dnlc_evicts_thread_exit = FALSE; 6862 6863 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED, 6864 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); 6865 6866 if (arc_ksp != NULL) { 6867 arc_ksp->ks_data = &arc_stats; 6868 arc_ksp->ks_update = arc_kstat_update; 6869 kstat_install(arc_ksp); 6870 } 6871 6872 arc_adjust_zthr = zthr_create_timer(arc_adjust_cb_check, 6873 arc_adjust_cb, NULL, SEC2NSEC(1)); 6874 arc_reap_zthr = zthr_create_timer(arc_reap_cb_check, 6875 arc_reap_cb, NULL, SEC2NSEC(1)); 6876 6877#ifdef _KERNEL 6878 arc_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem, arc_lowmem, NULL, 6879 EVENTHANDLER_PRI_FIRST); 6880#endif 6881 6882 (void) thread_create(NULL, 0, arc_dnlc_evicts_thread, NULL, 0, &p0, 6883 TS_RUN, minclsyspri); 6884 6885 arc_initialized = B_TRUE; 6886 arc_warm = B_FALSE; 6887 6888 /* 6889 * Calculate maximum amount of dirty data per pool. 6890 * 6891 * If it has been set by /etc/system, take that. 6892 * Otherwise, use a percentage of physical memory defined by 6893 * zfs_dirty_data_max_percent (default 10%) with a cap at 6894 * zfs_dirty_data_max_max (default 4GB). 6895 */ 6896 if (zfs_dirty_data_max == 0) { 6897 zfs_dirty_data_max = ptob(physmem) * 6898 zfs_dirty_data_max_percent / 100; 6899 zfs_dirty_data_max = MIN(zfs_dirty_data_max, 6900 zfs_dirty_data_max_max); 6901 } 6902 6903#ifdef _KERNEL 6904 if (TUNABLE_INT_FETCH("vfs.zfs.prefetch_disable", &zfs_prefetch_disable)) 6905 prefetch_tunable_set = 1; 6906 6907#ifdef __i386__ 6908 if (prefetch_tunable_set == 0) { 6909 printf("ZFS NOTICE: Prefetch is disabled by default on i386 " 6910 "-- to enable,\n"); 6911 printf(" add \"vfs.zfs.prefetch_disable=0\" " 6912 "to /boot/loader.conf.\n"); 6913 zfs_prefetch_disable = 1; 6914 } 6915#else 6916 if ((((uint64_t)physmem * PAGESIZE) < (1ULL << 32)) && 6917 prefetch_tunable_set == 0) { 6918 printf("ZFS NOTICE: Prefetch is disabled by default if less " 6919 "than 4GB of RAM is present;\n" 6920 " to enable, add \"vfs.zfs.prefetch_disable=0\" " 6921 "to /boot/loader.conf.\n"); 6922 zfs_prefetch_disable = 1; 6923 } 6924#endif 6925 /* Warn about ZFS memory and address space requirements. */ 6926 if (((uint64_t)physmem * PAGESIZE) < (256 + 128 + 64) * (1 << 20)) { 6927 printf("ZFS WARNING: Recommended minimum RAM size is 512MB; " 6928 "expect unstable behavior.\n"); 6929 } 6930 if (allmem < 512 * (1 << 20)) { 6931 printf("ZFS WARNING: Recommended minimum kmem_size is 512MB; " 6932 "expect unstable behavior.\n"); 6933 printf(" Consider tuning vm.kmem_size and " 6934 "vm.kmem_size_max\n"); 6935 printf(" in /boot/loader.conf.\n"); 6936 } 6937#endif 6938} 6939 6940void 6941arc_fini(void) 6942{ 6943#ifdef _KERNEL 6944 if (arc_event_lowmem != NULL) 6945 EVENTHANDLER_DEREGISTER(vm_lowmem, arc_event_lowmem); 6946#endif 6947 6948 /* Use B_TRUE to ensure *all* buffers are evicted */ 6949 arc_flush(NULL, B_TRUE); 6950 6951 mutex_enter(&arc_dnlc_evicts_lock); 6952 arc_dnlc_evicts_thread_exit = TRUE; 6953 /* 6954 * The user evicts thread will set arc_user_evicts_thread_exit 6955 * to FALSE when it is finished exiting; we're waiting for that. 6956 */ 6957 while (arc_dnlc_evicts_thread_exit) { 6958 cv_signal(&arc_dnlc_evicts_cv); 6959 cv_wait(&arc_dnlc_evicts_cv, &arc_dnlc_evicts_lock); 6960 } 6961 mutex_exit(&arc_dnlc_evicts_lock); 6962 6963 arc_initialized = B_FALSE; 6964 6965 if (arc_ksp != NULL) { 6966 kstat_delete(arc_ksp); 6967 arc_ksp = NULL; 6968 } 6969 6970 6971 (void) zthr_cancel(arc_adjust_zthr); 6972 zthr_destroy(arc_adjust_zthr); 6973 6974 mutex_destroy(&arc_dnlc_evicts_lock); 6975 cv_destroy(&arc_dnlc_evicts_cv); 6976 6977 (void) zthr_cancel(arc_reap_zthr); 6978 zthr_destroy(arc_reap_zthr); 6979 6980 mutex_destroy(&arc_adjust_lock); 6981 cv_destroy(&arc_adjust_waiters_cv); 6982 6983 arc_state_fini(); 6984 buf_fini(); 6985 6986 ASSERT0(arc_loaned_bytes); 6987} 6988 6989/* 6990 * Level 2 ARC 6991 * 6992 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk. 6993 * It uses dedicated storage devices to hold cached data, which are populated 6994 * using large infrequent writes. The main role of this cache is to boost 6995 * the performance of random read workloads. The intended L2ARC devices 6996 * include short-stroked disks, solid state disks, and other media with 6997 * substantially faster read latency than disk. 6998 * 6999 * +-----------------------+ 7000 * | ARC | 7001 * +-----------------------+ 7002 * | ^ ^ 7003 * | | | 7004 * l2arc_feed_thread() arc_read() 7005 * | | | 7006 * | l2arc read | 7007 * V | | 7008 * +---------------+ | 7009 * | L2ARC | | 7010 * +---------------+ | 7011 * | ^ | 7012 * l2arc_write() | | 7013 * | | | 7014 * V | | 7015 * +-------+ +-------+ 7016 * | vdev | | vdev | 7017 * | cache | | cache | 7018 * +-------+ +-------+ 7019 * +=========+ .-----. 7020 * : L2ARC : |-_____-| 7021 * : devices : | Disks | 7022 * +=========+ `-_____-' 7023 * 7024 * Read requests are satisfied from the following sources, in order: 7025 * 7026 * 1) ARC 7027 * 2) vdev cache of L2ARC devices 7028 * 3) L2ARC devices 7029 * 4) vdev cache of disks 7030 * 5) disks 7031 * 7032 * Some L2ARC device types exhibit extremely slow write performance. 7033 * To accommodate for this there are some significant differences between 7034 * the L2ARC and traditional cache design: 7035 * 7036 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from 7037 * the ARC behave as usual, freeing buffers and placing headers on ghost 7038 * lists. The ARC does not send buffers to the L2ARC during eviction as 7039 * this would add inflated write latencies for all ARC memory pressure. 7040 * 7041 * 2. The L2ARC attempts to cache data from the ARC before it is evicted. 7042 * It does this by periodically scanning buffers from the eviction-end of 7043 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are 7044 * not already there. It scans until a headroom of buffers is satisfied, 7045 * which itself is a buffer for ARC eviction. If a compressible buffer is 7046 * found during scanning and selected for writing to an L2ARC device, we 7047 * temporarily boost scanning headroom during the next scan cycle to make 7048 * sure we adapt to compression effects (which might significantly reduce 7049 * the data volume we write to L2ARC). The thread that does this is 7050 * l2arc_feed_thread(), illustrated below; example sizes are included to 7051 * provide a better sense of ratio than this diagram: 7052 * 7053 * head --> tail 7054 * +---------------------+----------+ 7055 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC 7056 * +---------------------+----------+ | o L2ARC eligible 7057 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer 7058 * +---------------------+----------+ | 7059 * 15.9 Gbytes ^ 32 Mbytes | 7060 * headroom | 7061 * l2arc_feed_thread() 7062 * | 7063 * l2arc write hand <--[oooo]--' 7064 * | 8 Mbyte 7065 * | write max 7066 * V 7067 * +==============================+ 7068 * L2ARC dev |####|#|###|###| |####| ... | 7069 * +==============================+ 7070 * 32 Gbytes 7071 * 7072 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of 7073 * evicted, then the L2ARC has cached a buffer much sooner than it probably 7074 * needed to, potentially wasting L2ARC device bandwidth and storage. It is 7075 * safe to say that this is an uncommon case, since buffers at the end of 7076 * the ARC lists have moved there due to inactivity. 7077 * 7078 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom, 7079 * then the L2ARC simply misses copying some buffers. This serves as a 7080 * pressure valve to prevent heavy read workloads from both stalling the ARC 7081 * with waits and clogging the L2ARC with writes. This also helps prevent 7082 * the potential for the L2ARC to churn if it attempts to cache content too 7083 * quickly, such as during backups of the entire pool. 7084 * 7085 * 5. After system boot and before the ARC has filled main memory, there are 7086 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru 7087 * lists can remain mostly static. Instead of searching from tail of these 7088 * lists as pictured, the l2arc_feed_thread() will search from the list heads 7089 * for eligible buffers, greatly increasing its chance of finding them. 7090 * 7091 * The L2ARC device write speed is also boosted during this time so that 7092 * the L2ARC warms up faster. Since there have been no ARC evictions yet, 7093 * there are no L2ARC reads, and no fear of degrading read performance 7094 * through increased writes. 7095 * 7096 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that 7097 * the vdev queue can aggregate them into larger and fewer writes. Each 7098 * device is written to in a rotor fashion, sweeping writes through 7099 * available space then repeating. 7100 * 7101 * 7. The L2ARC does not store dirty content. It never needs to flush 7102 * write buffers back to disk based storage. 7103 * 7104 * 8. If an ARC buffer is written (and dirtied) which also exists in the 7105 * L2ARC, the now stale L2ARC buffer is immediately dropped. 7106 * 7107 * The performance of the L2ARC can be tweaked by a number of tunables, which 7108 * may be necessary for different workloads: 7109 * 7110 * l2arc_write_max max write bytes per interval 7111 * l2arc_write_boost extra write bytes during device warmup 7112 * l2arc_noprefetch skip caching prefetched buffers 7113 * l2arc_headroom number of max device writes to precache 7114 * l2arc_headroom_boost when we find compressed buffers during ARC 7115 * scanning, we multiply headroom by this 7116 * percentage factor for the next scan cycle, 7117 * since more compressed buffers are likely to 7118 * be present 7119 * l2arc_feed_secs seconds between L2ARC writing 7120 * 7121 * Tunables may be removed or added as future performance improvements are 7122 * integrated, and also may become zpool properties. 7123 * 7124 * There are three key functions that control how the L2ARC warms up: 7125 * 7126 * l2arc_write_eligible() check if a buffer is eligible to cache 7127 * l2arc_write_size() calculate how much to write 7128 * l2arc_write_interval() calculate sleep delay between writes 7129 * 7130 * These three functions determine what to write, how much, and how quickly 7131 * to send writes. 7132 */ 7133 7134static boolean_t 7135l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *hdr) 7136{ 7137 /* 7138 * A buffer is *not* eligible for the L2ARC if it: 7139 * 1. belongs to a different spa. 7140 * 2. is already cached on the L2ARC. 7141 * 3. has an I/O in progress (it may be an incomplete read). 7142 * 4. is flagged not eligible (zfs property). 7143 */ 7144 if (hdr->b_spa != spa_guid) { 7145 ARCSTAT_BUMP(arcstat_l2_write_spa_mismatch); 7146 return (B_FALSE); 7147 } 7148 if (HDR_HAS_L2HDR(hdr)) { 7149 ARCSTAT_BUMP(arcstat_l2_write_in_l2); 7150 return (B_FALSE); 7151 } 7152 if (HDR_IO_IN_PROGRESS(hdr)) { 7153 ARCSTAT_BUMP(arcstat_l2_write_hdr_io_in_progress); 7154 return (B_FALSE); 7155 } 7156 if (!HDR_L2CACHE(hdr)) { 7157 ARCSTAT_BUMP(arcstat_l2_write_not_cacheable); 7158 return (B_FALSE); 7159 } 7160 7161 return (B_TRUE); 7162} 7163 7164static uint64_t 7165l2arc_write_size(void) 7166{ 7167 uint64_t size; 7168 7169 /* 7170 * Make sure our globals have meaningful values in case the user 7171 * altered them. 7172 */ 7173 size = l2arc_write_max; 7174 if (size == 0) { 7175 cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must " 7176 "be greater than zero, resetting it to the default (%d)", 7177 L2ARC_WRITE_SIZE); 7178 size = l2arc_write_max = L2ARC_WRITE_SIZE; 7179 } 7180 7181 if (arc_warm == B_FALSE) 7182 size += l2arc_write_boost; 7183 7184 return (size); 7185 7186} 7187 7188static clock_t 7189l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote) 7190{ 7191 clock_t interval, next, now; 7192 7193 /* 7194 * If the ARC lists are busy, increase our write rate; if the 7195 * lists are stale, idle back. This is achieved by checking 7196 * how much we previously wrote - if it was more than half of 7197 * what we wanted, schedule the next write much sooner. 7198 */ 7199 if (l2arc_feed_again && wrote > (wanted / 2)) 7200 interval = (hz * l2arc_feed_min_ms) / 1000; 7201 else 7202 interval = hz * l2arc_feed_secs; 7203 7204 now = ddi_get_lbolt(); 7205 next = MAX(now, MIN(now + interval, began + interval)); 7206 7207 return (next); 7208} 7209 7210/* 7211 * Cycle through L2ARC devices. This is how L2ARC load balances. 7212 * If a device is returned, this also returns holding the spa config lock. 7213 */ 7214static l2arc_dev_t * 7215l2arc_dev_get_next(void) 7216{ 7217 l2arc_dev_t *first, *next = NULL; 7218 7219 /* 7220 * Lock out the removal of spas (spa_namespace_lock), then removal 7221 * of cache devices (l2arc_dev_mtx). Once a device has been selected, 7222 * both locks will be dropped and a spa config lock held instead. 7223 */ 7224 mutex_enter(&spa_namespace_lock); 7225 mutex_enter(&l2arc_dev_mtx); 7226 7227 /* if there are no vdevs, there is nothing to do */ 7228 if (l2arc_ndev == 0) 7229 goto out; 7230 7231 first = NULL; 7232 next = l2arc_dev_last; 7233 do { 7234 /* loop around the list looking for a non-faulted vdev */ 7235 if (next == NULL) { 7236 next = list_head(l2arc_dev_list); 7237 } else { 7238 next = list_next(l2arc_dev_list, next); 7239 if (next == NULL) 7240 next = list_head(l2arc_dev_list); 7241 } 7242 7243 /* if we have come back to the start, bail out */ 7244 if (first == NULL) 7245 first = next; 7246 else if (next == first) 7247 break; 7248 7249 } while (vdev_is_dead(next->l2ad_vdev)); 7250 7251 /* if we were unable to find any usable vdevs, return NULL */ 7252 if (vdev_is_dead(next->l2ad_vdev)) 7253 next = NULL; 7254 7255 l2arc_dev_last = next; 7256 7257out: 7258 mutex_exit(&l2arc_dev_mtx); 7259 7260 /* 7261 * Grab the config lock to prevent the 'next' device from being 7262 * removed while we are writing to it. 7263 */ 7264 if (next != NULL) 7265 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER); 7266 mutex_exit(&spa_namespace_lock); 7267 7268 return (next); 7269} 7270 7271/* 7272 * Free buffers that were tagged for destruction. 7273 */ 7274static void 7275l2arc_do_free_on_write() 7276{ 7277 list_t *buflist; 7278 l2arc_data_free_t *df, *df_prev; 7279 7280 mutex_enter(&l2arc_free_on_write_mtx); 7281 buflist = l2arc_free_on_write; 7282 7283 for (df = list_tail(buflist); df; df = df_prev) { 7284 df_prev = list_prev(buflist, df); 7285 ASSERT3P(df->l2df_abd, !=, NULL); 7286 abd_free(df->l2df_abd); 7287 list_remove(buflist, df); 7288 kmem_free(df, sizeof (l2arc_data_free_t)); 7289 } 7290 7291 mutex_exit(&l2arc_free_on_write_mtx); 7292} 7293 7294/* 7295 * A write to a cache device has completed. Update all headers to allow 7296 * reads from these buffers to begin. 7297 */ 7298static void 7299l2arc_write_done(zio_t *zio) 7300{ 7301 l2arc_write_callback_t *cb; 7302 l2arc_dev_t *dev; 7303 list_t *buflist; 7304 arc_buf_hdr_t *head, *hdr, *hdr_prev; 7305 kmutex_t *hash_lock; 7306 int64_t bytes_dropped = 0; 7307 7308 cb = zio->io_private; 7309 ASSERT3P(cb, !=, NULL); 7310 dev = cb->l2wcb_dev; 7311 ASSERT3P(dev, !=, NULL); 7312 head = cb->l2wcb_head; 7313 ASSERT3P(head, !=, NULL); 7314 buflist = &dev->l2ad_buflist; 7315 ASSERT3P(buflist, !=, NULL); 7316 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio, 7317 l2arc_write_callback_t *, cb); 7318 7319 if (zio->io_error != 0) 7320 ARCSTAT_BUMP(arcstat_l2_writes_error); 7321 7322 /* 7323 * All writes completed, or an error was hit. 7324 */ 7325top: 7326 mutex_enter(&dev->l2ad_mtx); 7327 for (hdr = list_prev(buflist, head); hdr; hdr = hdr_prev) { 7328 hdr_prev = list_prev(buflist, hdr); 7329 7330 hash_lock = HDR_LOCK(hdr); 7331 7332 /* 7333 * We cannot use mutex_enter or else we can deadlock 7334 * with l2arc_write_buffers (due to swapping the order 7335 * the hash lock and l2ad_mtx are taken). 7336 */ 7337 if (!mutex_tryenter(hash_lock)) { 7338 /* 7339 * Missed the hash lock. We must retry so we 7340 * don't leave the ARC_FLAG_L2_WRITING bit set. 7341 */ 7342 ARCSTAT_BUMP(arcstat_l2_writes_lock_retry); 7343 7344 /* 7345 * We don't want to rescan the headers we've 7346 * already marked as having been written out, so 7347 * we reinsert the head node so we can pick up 7348 * where we left off. 7349 */ 7350 list_remove(buflist, head); 7351 list_insert_after(buflist, hdr, head); 7352 7353 mutex_exit(&dev->l2ad_mtx); 7354 7355 /* 7356 * We wait for the hash lock to become available 7357 * to try and prevent busy waiting, and increase 7358 * the chance we'll be able to acquire the lock 7359 * the next time around. 7360 */ 7361 mutex_enter(hash_lock); 7362 mutex_exit(hash_lock); 7363 goto top; 7364 } 7365 7366 /* 7367 * We could not have been moved into the arc_l2c_only 7368 * state while in-flight due to our ARC_FLAG_L2_WRITING 7369 * bit being set. Let's just ensure that's being enforced. 7370 */ 7371 ASSERT(HDR_HAS_L1HDR(hdr)); 7372 7373 if (zio->io_error != 0) { 7374 /* 7375 * Error - drop L2ARC entry. 7376 */ 7377 list_remove(buflist, hdr); 7378 l2arc_trim(hdr); 7379 arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR); 7380 7381 ARCSTAT_INCR(arcstat_l2_psize, -arc_hdr_size(hdr)); 7382 ARCSTAT_INCR(arcstat_l2_lsize, -HDR_GET_LSIZE(hdr)); 7383 7384 bytes_dropped += arc_hdr_size(hdr); 7385 (void) refcount_remove_many(&dev->l2ad_alloc, 7386 arc_hdr_size(hdr), hdr); 7387 } 7388 7389 /* 7390 * Allow ARC to begin reads and ghost list evictions to 7391 * this L2ARC entry. 7392 */ 7393 arc_hdr_clear_flags(hdr, ARC_FLAG_L2_WRITING); 7394 7395 mutex_exit(hash_lock); 7396 } 7397 7398 atomic_inc_64(&l2arc_writes_done); 7399 list_remove(buflist, head); 7400 ASSERT(!HDR_HAS_L1HDR(head)); 7401 kmem_cache_free(hdr_l2only_cache, head); 7402 mutex_exit(&dev->l2ad_mtx); 7403 7404 vdev_space_update(dev->l2ad_vdev, -bytes_dropped, 0, 0); 7405 7406 l2arc_do_free_on_write(); 7407 7408 kmem_free(cb, sizeof (l2arc_write_callback_t)); 7409} 7410 7411/* 7412 * A read to a cache device completed. Validate buffer contents before 7413 * handing over to the regular ARC routines. 7414 */ 7415static void 7416l2arc_read_done(zio_t *zio) 7417{ 7418 l2arc_read_callback_t *cb; 7419 arc_buf_hdr_t *hdr; 7420 kmutex_t *hash_lock; 7421 boolean_t valid_cksum; 7422 7423 ASSERT3P(zio->io_vd, !=, NULL); 7424 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE); 7425 7426 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd); 7427 7428 cb = zio->io_private; 7429 ASSERT3P(cb, !=, NULL); 7430 hdr = cb->l2rcb_hdr; 7431 ASSERT3P(hdr, !=, NULL); 7432 7433 hash_lock = HDR_LOCK(hdr); 7434 mutex_enter(hash_lock); 7435 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr)); 7436 7437 /* 7438 * If the data was read into a temporary buffer, 7439 * move it and free the buffer. 7440 */ 7441 if (cb->l2rcb_abd != NULL) { 7442 ASSERT3U(arc_hdr_size(hdr), <, zio->io_size); 7443 if (zio->io_error == 0) { 7444 abd_copy(hdr->b_l1hdr.b_pabd, cb->l2rcb_abd, 7445 arc_hdr_size(hdr)); 7446 } 7447 7448 /* 7449 * The following must be done regardless of whether 7450 * there was an error: 7451 * - free the temporary buffer 7452 * - point zio to the real ARC buffer 7453 * - set zio size accordingly 7454 * These are required because zio is either re-used for 7455 * an I/O of the block in the case of the error 7456 * or the zio is passed to arc_read_done() and it 7457 * needs real data. 7458 */ 7459 abd_free(cb->l2rcb_abd); 7460 zio->io_size = zio->io_orig_size = arc_hdr_size(hdr); 7461 zio->io_abd = zio->io_orig_abd = hdr->b_l1hdr.b_pabd; 7462 } 7463 7464 ASSERT3P(zio->io_abd, !=, NULL); 7465 7466 /* 7467 * Check this survived the L2ARC journey. 7468 */ 7469 ASSERT3P(zio->io_abd, ==, hdr->b_l1hdr.b_pabd); 7470 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */ 7471 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */ 7472 7473 valid_cksum = arc_cksum_is_equal(hdr, zio); 7474 if (valid_cksum && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) { 7475 mutex_exit(hash_lock); 7476 zio->io_private = hdr; 7477 arc_read_done(zio); 7478 } else { 7479 /* 7480 * Buffer didn't survive caching. Increment stats and 7481 * reissue to the original storage device. 7482 */ 7483 if (zio->io_error != 0) { 7484 ARCSTAT_BUMP(arcstat_l2_io_error); 7485 } else { 7486 zio->io_error = SET_ERROR(EIO); 7487 } 7488 if (!valid_cksum) 7489 ARCSTAT_BUMP(arcstat_l2_cksum_bad); 7490 7491 /* 7492 * If there's no waiter, issue an async i/o to the primary 7493 * storage now. If there *is* a waiter, the caller must 7494 * issue the i/o in a context where it's OK to block. 7495 */ 7496 if (zio->io_waiter == NULL) { 7497 zio_t *pio = zio_unique_parent(zio); 7498 7499 ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL); 7500 7501 zio = zio_read(pio, zio->io_spa, zio->io_bp, 7502 hdr->b_l1hdr.b_pabd, zio->io_size, arc_read_done, 7503 hdr, zio->io_priority, cb->l2rcb_flags, 7504 &cb->l2rcb_zb); 7505 for (struct arc_callback *acb = hdr->b_l1hdr.b_acb; 7506 acb != NULL; acb = acb->acb_next) 7507 acb->acb_zio_head = zio; 7508 mutex_exit(hash_lock); 7509 zio_nowait(zio); 7510 } else 7511 mutex_exit(hash_lock); 7512 } 7513 7514 kmem_free(cb, sizeof (l2arc_read_callback_t)); 7515} 7516 7517/* 7518 * This is the list priority from which the L2ARC will search for pages to 7519 * cache. This is used within loops (0..3) to cycle through lists in the 7520 * desired order. This order can have a significant effect on cache 7521 * performance. 7522 * 7523 * Currently the metadata lists are hit first, MFU then MRU, followed by 7524 * the data lists. This function returns a locked list, and also returns 7525 * the lock pointer. 7526 */ 7527static multilist_sublist_t * 7528l2arc_sublist_lock(int list_num) 7529{ 7530 multilist_t *ml = NULL; 7531 unsigned int idx; 7532 7533 ASSERT(list_num >= 0 && list_num <= 3); 7534 7535 switch (list_num) { 7536 case 0: 7537 ml = arc_mfu->arcs_list[ARC_BUFC_METADATA]; 7538 break; 7539 case 1: 7540 ml = arc_mru->arcs_list[ARC_BUFC_METADATA]; 7541 break; 7542 case 2: 7543 ml = arc_mfu->arcs_list[ARC_BUFC_DATA]; 7544 break; 7545 case 3: 7546 ml = arc_mru->arcs_list[ARC_BUFC_DATA]; 7547 break; 7548 } 7549 7550 /* 7551 * Return a randomly-selected sublist. This is acceptable 7552 * because the caller feeds only a little bit of data for each 7553 * call (8MB). Subsequent calls will result in different 7554 * sublists being selected. 7555 */ 7556 idx = multilist_get_random_index(ml); 7557 return (multilist_sublist_lock(ml, idx)); 7558} 7559 7560/* 7561 * Evict buffers from the device write hand to the distance specified in 7562 * bytes. This distance may span populated buffers, it may span nothing. 7563 * This is clearing a region on the L2ARC device ready for writing. 7564 * If the 'all' boolean is set, every buffer is evicted. 7565 */ 7566static void 7567l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all) 7568{ 7569 list_t *buflist; 7570 arc_buf_hdr_t *hdr, *hdr_prev; 7571 kmutex_t *hash_lock; 7572 uint64_t taddr; 7573 7574 buflist = &dev->l2ad_buflist; 7575 7576 if (!all && dev->l2ad_first) { 7577 /* 7578 * This is the first sweep through the device. There is 7579 * nothing to evict. 7580 */ 7581 return; 7582 } 7583 7584 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) { 7585 /* 7586 * When nearing the end of the device, evict to the end 7587 * before the device write hand jumps to the start. 7588 */ 7589 taddr = dev->l2ad_end; 7590 } else { 7591 taddr = dev->l2ad_hand + distance; 7592 } 7593 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist, 7594 uint64_t, taddr, boolean_t, all); 7595 7596top: 7597 mutex_enter(&dev->l2ad_mtx); 7598 for (hdr = list_tail(buflist); hdr; hdr = hdr_prev) { 7599 hdr_prev = list_prev(buflist, hdr); 7600 7601 hash_lock = HDR_LOCK(hdr); 7602 7603 /* 7604 * We cannot use mutex_enter or else we can deadlock 7605 * with l2arc_write_buffers (due to swapping the order 7606 * the hash lock and l2ad_mtx are taken). 7607 */ 7608 if (!mutex_tryenter(hash_lock)) { 7609 /* 7610 * Missed the hash lock. Retry. 7611 */ 7612 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry); 7613 mutex_exit(&dev->l2ad_mtx); 7614 mutex_enter(hash_lock); 7615 mutex_exit(hash_lock); 7616 goto top; 7617 } 7618 7619 /* 7620 * A header can't be on this list if it doesn't have L2 header. 7621 */ 7622 ASSERT(HDR_HAS_L2HDR(hdr)); 7623 7624 /* Ensure this header has finished being written. */ 7625 ASSERT(!HDR_L2_WRITING(hdr)); 7626 ASSERT(!HDR_L2_WRITE_HEAD(hdr)); 7627 7628 if (!all && (hdr->b_l2hdr.b_daddr >= taddr || 7629 hdr->b_l2hdr.b_daddr < dev->l2ad_hand)) { 7630 /* 7631 * We've evicted to the target address, 7632 * or the end of the device. 7633 */ 7634 mutex_exit(hash_lock); 7635 break; 7636 } 7637 7638 if (!HDR_HAS_L1HDR(hdr)) { 7639 ASSERT(!HDR_L2_READING(hdr)); 7640 /* 7641 * This doesn't exist in the ARC. Destroy. 7642 * arc_hdr_destroy() will call list_remove() 7643 * and decrement arcstat_l2_lsize. 7644 */ 7645 arc_change_state(arc_anon, hdr, hash_lock); 7646 arc_hdr_destroy(hdr); 7647 } else { 7648 ASSERT(hdr->b_l1hdr.b_state != arc_l2c_only); 7649 ARCSTAT_BUMP(arcstat_l2_evict_l1cached); 7650 /* 7651 * Invalidate issued or about to be issued 7652 * reads, since we may be about to write 7653 * over this location. 7654 */ 7655 if (HDR_L2_READING(hdr)) { 7656 ARCSTAT_BUMP(arcstat_l2_evict_reading); 7657 arc_hdr_set_flags(hdr, ARC_FLAG_L2_EVICTED); 7658 } 7659 7660 arc_hdr_l2hdr_destroy(hdr); 7661 } 7662 mutex_exit(hash_lock); 7663 } 7664 mutex_exit(&dev->l2ad_mtx); 7665} 7666 7667/* 7668 * Find and write ARC buffers to the L2ARC device. 7669 * 7670 * An ARC_FLAG_L2_WRITING flag is set so that the L2ARC buffers are not valid 7671 * for reading until they have completed writing. 7672 * The headroom_boost is an in-out parameter used to maintain headroom boost 7673 * state between calls to this function. 7674 * 7675 * Returns the number of bytes actually written (which may be smaller than 7676 * the delta by which the device hand has changed due to alignment). 7677 */ 7678static uint64_t 7679l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz) 7680{ 7681 arc_buf_hdr_t *hdr, *hdr_prev, *head; 7682 uint64_t write_asize, write_psize, write_lsize, headroom; 7683 boolean_t full; 7684 l2arc_write_callback_t *cb; 7685 zio_t *pio, *wzio; 7686 uint64_t guid = spa_load_guid(spa); 7687 int try; 7688 7689 ASSERT3P(dev->l2ad_vdev, !=, NULL); 7690 7691 pio = NULL; 7692 write_lsize = write_asize = write_psize = 0; 7693 full = B_FALSE; 7694 head = kmem_cache_alloc(hdr_l2only_cache, KM_PUSHPAGE); 7695 arc_hdr_set_flags(head, ARC_FLAG_L2_WRITE_HEAD | ARC_FLAG_HAS_L2HDR); 7696 7697 ARCSTAT_BUMP(arcstat_l2_write_buffer_iter); 7698 /* 7699 * Copy buffers for L2ARC writing. 7700 */ 7701 for (try = 0; try <= 3; try++) { 7702 multilist_sublist_t *mls = l2arc_sublist_lock(try); 7703 uint64_t passed_sz = 0; 7704 7705 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_iter); 7706 7707 /* 7708 * L2ARC fast warmup. 7709 * 7710 * Until the ARC is warm and starts to evict, read from the 7711 * head of the ARC lists rather than the tail. 7712 */ 7713 if (arc_warm == B_FALSE) 7714 hdr = multilist_sublist_head(mls); 7715 else 7716 hdr = multilist_sublist_tail(mls); 7717 if (hdr == NULL) 7718 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_null_iter); 7719 7720 headroom = target_sz * l2arc_headroom; 7721 if (zfs_compressed_arc_enabled) 7722 headroom = (headroom * l2arc_headroom_boost) / 100; 7723 7724 for (; hdr; hdr = hdr_prev) { 7725 kmutex_t *hash_lock; 7726 7727 if (arc_warm == B_FALSE) 7728 hdr_prev = multilist_sublist_next(mls, hdr); 7729 else 7730 hdr_prev = multilist_sublist_prev(mls, hdr); 7731 ARCSTAT_INCR(arcstat_l2_write_buffer_bytes_scanned, 7732 HDR_GET_LSIZE(hdr)); 7733 7734 hash_lock = HDR_LOCK(hdr); 7735 if (!mutex_tryenter(hash_lock)) { 7736 ARCSTAT_BUMP(arcstat_l2_write_trylock_fail); 7737 /* 7738 * Skip this buffer rather than waiting. 7739 */ 7740 continue; 7741 } 7742 7743 passed_sz += HDR_GET_LSIZE(hdr); 7744 if (passed_sz > headroom) { 7745 /* 7746 * Searched too far. 7747 */ 7748 mutex_exit(hash_lock); 7749 ARCSTAT_BUMP(arcstat_l2_write_passed_headroom); 7750 break; 7751 } 7752 7753 if (!l2arc_write_eligible(guid, hdr)) { 7754 mutex_exit(hash_lock); 7755 continue; 7756 } 7757 7758 /* 7759 * We rely on the L1 portion of the header below, so 7760 * it's invalid for this header to have been evicted out 7761 * of the ghost cache, prior to being written out. The 7762 * ARC_FLAG_L2_WRITING bit ensures this won't happen. 7763 */ 7764 ASSERT(HDR_HAS_L1HDR(hdr)); 7765 7766 ASSERT3U(HDR_GET_PSIZE(hdr), >, 0); 7767 ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL); 7768 ASSERT3U(arc_hdr_size(hdr), >, 0); 7769 uint64_t psize = arc_hdr_size(hdr); 7770 uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev, 7771 psize); 7772 7773 if ((write_asize + asize) > target_sz) { 7774 full = B_TRUE; 7775 mutex_exit(hash_lock); 7776 ARCSTAT_BUMP(arcstat_l2_write_full); 7777 break; 7778 } 7779 7780 if (pio == NULL) { 7781 /* 7782 * Insert a dummy header on the buflist so 7783 * l2arc_write_done() can find where the 7784 * write buffers begin without searching. 7785 */ 7786 mutex_enter(&dev->l2ad_mtx); 7787 list_insert_head(&dev->l2ad_buflist, head); 7788 mutex_exit(&dev->l2ad_mtx); 7789 7790 cb = kmem_alloc( 7791 sizeof (l2arc_write_callback_t), KM_SLEEP); 7792 cb->l2wcb_dev = dev; 7793 cb->l2wcb_head = head; 7794 pio = zio_root(spa, l2arc_write_done, cb, 7795 ZIO_FLAG_CANFAIL); 7796 ARCSTAT_BUMP(arcstat_l2_write_pios); 7797 } 7798 7799 hdr->b_l2hdr.b_dev = dev; 7800 hdr->b_l2hdr.b_daddr = dev->l2ad_hand; 7801 arc_hdr_set_flags(hdr, 7802 ARC_FLAG_L2_WRITING | ARC_FLAG_HAS_L2HDR); 7803 7804 mutex_enter(&dev->l2ad_mtx); 7805 list_insert_head(&dev->l2ad_buflist, hdr); 7806 mutex_exit(&dev->l2ad_mtx); 7807 7808 (void) refcount_add_many(&dev->l2ad_alloc, psize, hdr); 7809 7810 /* 7811 * Normally the L2ARC can use the hdr's data, but if 7812 * we're sharing data between the hdr and one of its 7813 * bufs, L2ARC needs its own copy of the data so that 7814 * the ZIO below can't race with the buf consumer. 7815 * Another case where we need to create a copy of the 7816 * data is when the buffer size is not device-aligned 7817 * and we need to pad the block to make it such. 7818 * That also keeps the clock hand suitably aligned. 7819 * 7820 * To ensure that the copy will be available for the 7821 * lifetime of the ZIO and be cleaned up afterwards, we 7822 * add it to the l2arc_free_on_write queue. 7823 */ 7824 abd_t *to_write; 7825 if (!HDR_SHARED_DATA(hdr) && psize == asize) { 7826 to_write = hdr->b_l1hdr.b_pabd; 7827 } else { 7828 to_write = abd_alloc_for_io(asize, 7829 HDR_ISTYPE_METADATA(hdr)); 7830 abd_copy(to_write, hdr->b_l1hdr.b_pabd, psize); 7831 if (asize != psize) { 7832 abd_zero_off(to_write, psize, 7833 asize - psize); 7834 } 7835 l2arc_free_abd_on_write(to_write, asize, 7836 arc_buf_type(hdr)); 7837 } 7838 wzio = zio_write_phys(pio, dev->l2ad_vdev, 7839 hdr->b_l2hdr.b_daddr, asize, to_write, 7840 ZIO_CHECKSUM_OFF, NULL, hdr, 7841 ZIO_PRIORITY_ASYNC_WRITE, 7842 ZIO_FLAG_CANFAIL, B_FALSE); 7843 7844 write_lsize += HDR_GET_LSIZE(hdr); 7845 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev, 7846 zio_t *, wzio); 7847 7848 write_psize += psize; 7849 write_asize += asize; 7850 dev->l2ad_hand += asize; 7851 7852 mutex_exit(hash_lock); 7853 7854 (void) zio_nowait(wzio); 7855 } 7856 7857 multilist_sublist_unlock(mls); 7858 7859 if (full == B_TRUE) 7860 break; 7861 } 7862 7863 /* No buffers selected for writing? */ 7864 if (pio == NULL) { 7865 ASSERT0(write_lsize); 7866 ASSERT(!HDR_HAS_L1HDR(head)); 7867 kmem_cache_free(hdr_l2only_cache, head); 7868 return (0); 7869 } 7870 7871 ASSERT3U(write_psize, <=, target_sz); 7872 ARCSTAT_BUMP(arcstat_l2_writes_sent); 7873 ARCSTAT_INCR(arcstat_l2_write_bytes, write_psize); 7874 ARCSTAT_INCR(arcstat_l2_lsize, write_lsize); 7875 ARCSTAT_INCR(arcstat_l2_psize, write_psize); 7876 vdev_space_update(dev->l2ad_vdev, write_psize, 0, 0); 7877 7878 /* 7879 * Bump device hand to the device start if it is approaching the end. 7880 * l2arc_evict() will already have evicted ahead for this case. 7881 */ 7882 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) { 7883 dev->l2ad_hand = dev->l2ad_start; 7884 dev->l2ad_first = B_FALSE; 7885 } 7886 7887 dev->l2ad_writing = B_TRUE; 7888 (void) zio_wait(pio); 7889 dev->l2ad_writing = B_FALSE; 7890 7891 return (write_asize); 7892} 7893 7894/* 7895 * This thread feeds the L2ARC at regular intervals. This is the beating 7896 * heart of the L2ARC. 7897 */ 7898/* ARGSUSED */ 7899static void 7900l2arc_feed_thread(void *unused __unused) 7901{ 7902 callb_cpr_t cpr; 7903 l2arc_dev_t *dev; 7904 spa_t *spa; 7905 uint64_t size, wrote; 7906 clock_t begin, next = ddi_get_lbolt(); 7907 7908 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG); 7909 7910 mutex_enter(&l2arc_feed_thr_lock); 7911 7912 while (l2arc_thread_exit == 0) { 7913 CALLB_CPR_SAFE_BEGIN(&cpr); 7914 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock, 7915 next - ddi_get_lbolt()); 7916 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock); 7917 next = ddi_get_lbolt() + hz; 7918 7919 /* 7920 * Quick check for L2ARC devices. 7921 */ 7922 mutex_enter(&l2arc_dev_mtx); 7923 if (l2arc_ndev == 0) { 7924 mutex_exit(&l2arc_dev_mtx); 7925 continue; 7926 } 7927 mutex_exit(&l2arc_dev_mtx); 7928 begin = ddi_get_lbolt(); 7929 7930 /* 7931 * This selects the next l2arc device to write to, and in 7932 * doing so the next spa to feed from: dev->l2ad_spa. This 7933 * will return NULL if there are now no l2arc devices or if 7934 * they are all faulted. 7935 * 7936 * If a device is returned, its spa's config lock is also 7937 * held to prevent device removal. l2arc_dev_get_next() 7938 * will grab and release l2arc_dev_mtx. 7939 */ 7940 if ((dev = l2arc_dev_get_next()) == NULL) 7941 continue; 7942 7943 spa = dev->l2ad_spa; 7944 ASSERT3P(spa, !=, NULL); 7945 7946 /* 7947 * If the pool is read-only then force the feed thread to 7948 * sleep a little longer. 7949 */ 7950 if (!spa_writeable(spa)) { 7951 next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz; 7952 spa_config_exit(spa, SCL_L2ARC, dev); 7953 continue; 7954 } 7955 7956 /* 7957 * Avoid contributing to memory pressure. 7958 */ 7959 if (arc_reclaim_needed()) { 7960 ARCSTAT_BUMP(arcstat_l2_abort_lowmem); 7961 spa_config_exit(spa, SCL_L2ARC, dev); 7962 continue; 7963 } 7964 7965 ARCSTAT_BUMP(arcstat_l2_feeds); 7966 7967 size = l2arc_write_size(); 7968 7969 /* 7970 * Evict L2ARC buffers that will be overwritten. 7971 */ 7972 l2arc_evict(dev, size, B_FALSE); 7973 7974 /* 7975 * Write ARC buffers. 7976 */ 7977 wrote = l2arc_write_buffers(spa, dev, size); 7978 7979 /* 7980 * Calculate interval between writes. 7981 */ 7982 next = l2arc_write_interval(begin, size, wrote); 7983 spa_config_exit(spa, SCL_L2ARC, dev); 7984 } 7985 7986 l2arc_thread_exit = 0; 7987 cv_broadcast(&l2arc_feed_thr_cv); 7988 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */ 7989 thread_exit(); 7990} 7991 7992boolean_t 7993l2arc_vdev_present(vdev_t *vd) 7994{ 7995 l2arc_dev_t *dev; 7996 7997 mutex_enter(&l2arc_dev_mtx); 7998 for (dev = list_head(l2arc_dev_list); dev != NULL; 7999 dev = list_next(l2arc_dev_list, dev)) { 8000 if (dev->l2ad_vdev == vd) 8001 break; 8002 } 8003 mutex_exit(&l2arc_dev_mtx); 8004 8005 return (dev != NULL); 8006} 8007 8008/* 8009 * Add a vdev for use by the L2ARC. By this point the spa has already 8010 * validated the vdev and opened it. 8011 */ 8012void 8013l2arc_add_vdev(spa_t *spa, vdev_t *vd) 8014{ 8015 l2arc_dev_t *adddev; 8016 8017 ASSERT(!l2arc_vdev_present(vd)); 8018 8019 vdev_ashift_optimize(vd); 8020 8021 /* 8022 * Create a new l2arc device entry. 8023 */ 8024 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP); 8025 adddev->l2ad_spa = spa; 8026 adddev->l2ad_vdev = vd; 8027 adddev->l2ad_start = VDEV_LABEL_START_SIZE; 8028 adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd); 8029 adddev->l2ad_hand = adddev->l2ad_start; 8030 adddev->l2ad_first = B_TRUE; 8031 adddev->l2ad_writing = B_FALSE; 8032 8033 mutex_init(&adddev->l2ad_mtx, NULL, MUTEX_DEFAULT, NULL); 8034 /* 8035 * This is a list of all ARC buffers that are still valid on the 8036 * device. 8037 */ 8038 list_create(&adddev->l2ad_buflist, sizeof (arc_buf_hdr_t), 8039 offsetof(arc_buf_hdr_t, b_l2hdr.b_l2node)); 8040 8041 vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand); 8042 refcount_create(&adddev->l2ad_alloc); 8043 8044 /* 8045 * Add device to global list 8046 */ 8047 mutex_enter(&l2arc_dev_mtx); 8048 list_insert_head(l2arc_dev_list, adddev); 8049 atomic_inc_64(&l2arc_ndev); 8050 mutex_exit(&l2arc_dev_mtx); 8051} 8052 8053/* 8054 * Remove a vdev from the L2ARC. 8055 */ 8056void 8057l2arc_remove_vdev(vdev_t *vd) 8058{ 8059 l2arc_dev_t *dev, *nextdev, *remdev = NULL; 8060 8061 /* 8062 * Find the device by vdev 8063 */ 8064 mutex_enter(&l2arc_dev_mtx); 8065 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) { 8066 nextdev = list_next(l2arc_dev_list, dev); 8067 if (vd == dev->l2ad_vdev) { 8068 remdev = dev; 8069 break; 8070 } 8071 } 8072 ASSERT3P(remdev, !=, NULL); 8073 8074 /* 8075 * Remove device from global list 8076 */ 8077 list_remove(l2arc_dev_list, remdev); 8078 l2arc_dev_last = NULL; /* may have been invalidated */ 8079 atomic_dec_64(&l2arc_ndev); 8080 mutex_exit(&l2arc_dev_mtx); 8081 8082 /* 8083 * Clear all buflists and ARC references. L2ARC device flush. 8084 */ 8085 l2arc_evict(remdev, 0, B_TRUE); 8086 list_destroy(&remdev->l2ad_buflist); 8087 mutex_destroy(&remdev->l2ad_mtx); 8088 refcount_destroy(&remdev->l2ad_alloc); 8089 kmem_free(remdev, sizeof (l2arc_dev_t)); 8090} 8091 8092void 8093l2arc_init(void) 8094{ 8095 l2arc_thread_exit = 0; 8096 l2arc_ndev = 0; 8097 l2arc_writes_sent = 0; 8098 l2arc_writes_done = 0; 8099 8100 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL); 8101 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL); 8102 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL); 8103 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL); 8104 8105 l2arc_dev_list = &L2ARC_dev_list; 8106 l2arc_free_on_write = &L2ARC_free_on_write; 8107 list_create(l2arc_dev_list, sizeof (l2arc_dev_t), 8108 offsetof(l2arc_dev_t, l2ad_node)); 8109 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t), 8110 offsetof(l2arc_data_free_t, l2df_list_node)); 8111} 8112 8113void 8114l2arc_fini(void) 8115{ 8116 /* 8117 * This is called from dmu_fini(), which is called from spa_fini(); 8118 * Because of this, we can assume that all l2arc devices have 8119 * already been removed when the pools themselves were removed. 8120 */ 8121 8122 l2arc_do_free_on_write(); 8123 8124 mutex_destroy(&l2arc_feed_thr_lock); 8125 cv_destroy(&l2arc_feed_thr_cv); 8126 mutex_destroy(&l2arc_dev_mtx); 8127 mutex_destroy(&l2arc_free_on_write_mtx); 8128 8129 list_destroy(l2arc_dev_list); 8130 list_destroy(l2arc_free_on_write); 8131} 8132 8133void 8134l2arc_start(void) 8135{ 8136 if (!(spa_mode_global & FWRITE)) 8137 return; 8138 8139 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0, 8140 TS_RUN, minclsyspri); 8141} 8142 8143void 8144l2arc_stop(void) 8145{ 8146 if (!(spa_mode_global & FWRITE)) 8147 return; 8148 8149 mutex_enter(&l2arc_feed_thr_lock); 8150 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */ 8151 l2arc_thread_exit = 1; 8152 while (l2arc_thread_exit != 0) 8153 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock); 8154 mutex_exit(&l2arc_feed_thr_lock); 8155} 8156