Deleted Added
full compact
arc.c (251631) arc.c (254591)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2013 by Delphix. All rights reserved.
25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26 */
27
28/*
29 * DVA-based Adjustable Replacement Cache
30 *
31 * While much of the theory of operation used here is
32 * based on the self-tuning, low overhead replacement cache
33 * presented by Megiddo and Modha at FAST 2003, there are some
34 * significant differences:
35 *
36 * 1. The Megiddo and Modha model assumes any page is evictable.
37 * Pages in its cache cannot be "locked" into memory. This makes
38 * the eviction algorithm simple: evict the last page in the list.
39 * This also make the performance characteristics easy to reason
40 * about. Our cache is not so simple. At any given moment, some
41 * subset of the blocks in the cache are un-evictable because we
42 * have handed out a reference to them. Blocks are only evictable
43 * when there are no external references active. This makes
44 * eviction far more problematic: we choose to evict the evictable
45 * blocks that are the "lowest" in the list.
46 *
47 * There are times when it is not possible to evict the requested
48 * space. In these circumstances we are unable to adjust the cache
49 * size. To prevent the cache growing unbounded at these times we
50 * implement a "cache throttle" that slows the flow of new data
51 * into the cache until we can make space available.
52 *
53 * 2. The Megiddo and Modha model assumes a fixed cache size.
54 * Pages are evicted when the cache is full and there is a cache
55 * miss. Our model has a variable sized cache. It grows with
56 * high use, but also tries to react to memory pressure from the
57 * operating system: decreasing its size when system memory is
58 * tight.
59 *
60 * 3. The Megiddo and Modha model assumes a fixed page size. All
61 * elements of the cache are therefore exactly the same size. So
62 * when adjusting the cache size following a cache miss, its simply
63 * a matter of choosing a single page to evict. In our model, we
64 * have variable sized cache blocks (rangeing from 512 bytes to
65 * 128K bytes). We therefore choose a set of blocks to evict to make
66 * space for a cache miss that approximates as closely as possible
67 * the space used by the new block.
68 *
69 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
70 * by N. Megiddo & D. Modha, FAST 2003
71 */
72
73/*
74 * The locking model:
75 *
76 * A new reference to a cache buffer can be obtained in two
77 * ways: 1) via a hash table lookup using the DVA as a key,
78 * or 2) via one of the ARC lists. The arc_read() interface
79 * uses method 1, while the internal arc algorithms for
80 * adjusting the cache use method 2. We therefore provide two
81 * types of locks: 1) the hash table lock array, and 2) the
82 * arc list locks.
83 *
84 * Buffers do not have their own mutexs, rather they rely on the
85 * hash table mutexs for the bulk of their protection (i.e. most
86 * fields in the arc_buf_hdr_t are protected by these mutexs).
87 *
88 * buf_hash_find() returns the appropriate mutex (held) when it
89 * locates the requested buffer in the hash table. It returns
90 * NULL for the mutex if the buffer was not in the table.
91 *
92 * buf_hash_remove() expects the appropriate hash mutex to be
93 * already held before it is invoked.
94 *
95 * Each arc state also has a mutex which is used to protect the
96 * buffer list associated with the state. When attempting to
97 * obtain a hash table lock while holding an arc list lock you
98 * must use: mutex_tryenter() to avoid deadlock. Also note that
99 * the active state mutex must be held before the ghost state mutex.
100 *
101 * Arc buffers may have an associated eviction callback function.
102 * This function will be invoked prior to removing the buffer (e.g.
103 * in arc_do_user_evicts()). Note however that the data associated
104 * with the buffer may be evicted prior to the callback. The callback
105 * must be made with *no locks held* (to prevent deadlock). Additionally,
106 * the users of callbacks must ensure that their private data is
107 * protected from simultaneous callbacks from arc_buf_evict()
108 * and arc_do_user_evicts().
109 *
110 * Note that the majority of the performance stats are manipulated
111 * with atomic operations.
112 *
113 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following:
114 *
115 * - L2ARC buflist creation
116 * - L2ARC buflist eviction
117 * - L2ARC write completion, which walks L2ARC buflists
118 * - ARC header destruction, as it removes from L2ARC buflists
119 * - ARC header release, as it removes from L2ARC buflists
120 */
121
122#include <sys/spa.h>
123#include <sys/zio.h>
124#include <sys/zio_compress.h>
125#include <sys/zfs_context.h>
126#include <sys/arc.h>
127#include <sys/refcount.h>
128#include <sys/vdev.h>
129#include <sys/vdev_impl.h>
130#ifdef _KERNEL
131#include <sys/dnlc.h>
132#endif
133#include <sys/callb.h>
134#include <sys/kstat.h>
135#include <sys/trim_map.h>
136#include <zfs_fletcher.h>
137#include <sys/sdt.h>
138
139#include <vm/vm_pageout.h>
140
141#ifdef illumos
142#ifndef _KERNEL
143/* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
144boolean_t arc_watch = B_FALSE;
145int arc_procfd;
146#endif
147#endif /* illumos */
148
149static kmutex_t arc_reclaim_thr_lock;
150static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */
151static uint8_t arc_thread_exit;
152
153extern int zfs_write_limit_shift;
154extern uint64_t zfs_write_limit_max;
155extern kmutex_t zfs_write_limit_lock;
156
157#define ARC_REDUCE_DNLC_PERCENT 3
158uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT;
159
160typedef enum arc_reclaim_strategy {
161 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */
162 ARC_RECLAIM_CONS /* Conservative reclaim strategy */
163} arc_reclaim_strategy_t;
164
165/* number of seconds before growing cache again */
166static int arc_grow_retry = 60;
167
168/* shift of arc_c for calculating both min and max arc_p */
169static int arc_p_min_shift = 4;
170
171/* log2(fraction of arc to reclaim) */
172static int arc_shrink_shift = 5;
173
174/*
175 * minimum lifespan of a prefetch block in clock ticks
176 * (initialized in arc_init())
177 */
178static int arc_min_prefetch_lifespan;
179
180static int arc_dead;
181extern int zfs_prefetch_disable;
182
183/*
184 * The arc has filled available memory and has now warmed up.
185 */
186static boolean_t arc_warm;
187
188/*
189 * These tunables are for performance analysis.
190 */
191uint64_t zfs_arc_max;
192uint64_t zfs_arc_min;
193uint64_t zfs_arc_meta_limit = 0;
194int zfs_arc_grow_retry = 0;
195int zfs_arc_shrink_shift = 0;
196int zfs_arc_p_min_shift = 0;
197int zfs_disable_dup_eviction = 0;
198
199TUNABLE_QUAD("vfs.zfs.arc_max", &zfs_arc_max);
200TUNABLE_QUAD("vfs.zfs.arc_min", &zfs_arc_min);
201TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit);
202SYSCTL_DECL(_vfs_zfs);
203SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_max, CTLFLAG_RDTUN, &zfs_arc_max, 0,
204 "Maximum ARC size");
205SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_min, CTLFLAG_RDTUN, &zfs_arc_min, 0,
206 "Minimum ARC size");
207
208/*
209 * Note that buffers can be in one of 6 states:
210 * ARC_anon - anonymous (discussed below)
211 * ARC_mru - recently used, currently cached
212 * ARC_mru_ghost - recentely used, no longer in cache
213 * ARC_mfu - frequently used, currently cached
214 * ARC_mfu_ghost - frequently used, no longer in cache
215 * ARC_l2c_only - exists in L2ARC but not other states
216 * When there are no active references to the buffer, they are
217 * are linked onto a list in one of these arc states. These are
218 * the only buffers that can be evicted or deleted. Within each
219 * state there are multiple lists, one for meta-data and one for
220 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes,
221 * etc.) is tracked separately so that it can be managed more
222 * explicitly: favored over data, limited explicitly.
223 *
224 * Anonymous buffers are buffers that are not associated with
225 * a DVA. These are buffers that hold dirty block copies
226 * before they are written to stable storage. By definition,
227 * they are "ref'd" and are considered part of arc_mru
228 * that cannot be freed. Generally, they will aquire a DVA
229 * as they are written and migrate onto the arc_mru list.
230 *
231 * The ARC_l2c_only state is for buffers that are in the second
232 * level ARC but no longer in any of the ARC_m* lists. The second
233 * level ARC itself may also contain buffers that are in any of
234 * the ARC_m* states - meaning that a buffer can exist in two
235 * places. The reason for the ARC_l2c_only state is to keep the
236 * buffer header in the hash table, so that reads that hit the
237 * second level ARC benefit from these fast lookups.
238 */
239
240#define ARCS_LOCK_PAD CACHE_LINE_SIZE
241struct arcs_lock {
242 kmutex_t arcs_lock;
243#ifdef _KERNEL
244 unsigned char pad[(ARCS_LOCK_PAD - sizeof (kmutex_t))];
245#endif
246};
247
248/*
249 * must be power of two for mask use to work
250 *
251 */
252#define ARC_BUFC_NUMDATALISTS 16
253#define ARC_BUFC_NUMMETADATALISTS 16
254#define ARC_BUFC_NUMLISTS (ARC_BUFC_NUMMETADATALISTS + ARC_BUFC_NUMDATALISTS)
255
256typedef struct arc_state {
257 uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */
258 uint64_t arcs_size; /* total amount of data in this state */
259 list_t arcs_lists[ARC_BUFC_NUMLISTS]; /* list of evictable buffers */
260 struct arcs_lock arcs_locks[ARC_BUFC_NUMLISTS] __aligned(CACHE_LINE_SIZE);
261} arc_state_t;
262
263#define ARCS_LOCK(s, i) (&((s)->arcs_locks[(i)].arcs_lock))
264
265/* The 6 states: */
266static arc_state_t ARC_anon;
267static arc_state_t ARC_mru;
268static arc_state_t ARC_mru_ghost;
269static arc_state_t ARC_mfu;
270static arc_state_t ARC_mfu_ghost;
271static arc_state_t ARC_l2c_only;
272
273typedef struct arc_stats {
274 kstat_named_t arcstat_hits;
275 kstat_named_t arcstat_misses;
276 kstat_named_t arcstat_demand_data_hits;
277 kstat_named_t arcstat_demand_data_misses;
278 kstat_named_t arcstat_demand_metadata_hits;
279 kstat_named_t arcstat_demand_metadata_misses;
280 kstat_named_t arcstat_prefetch_data_hits;
281 kstat_named_t arcstat_prefetch_data_misses;
282 kstat_named_t arcstat_prefetch_metadata_hits;
283 kstat_named_t arcstat_prefetch_metadata_misses;
284 kstat_named_t arcstat_mru_hits;
285 kstat_named_t arcstat_mru_ghost_hits;
286 kstat_named_t arcstat_mfu_hits;
287 kstat_named_t arcstat_mfu_ghost_hits;
288 kstat_named_t arcstat_allocated;
289 kstat_named_t arcstat_deleted;
290 kstat_named_t arcstat_stolen;
291 kstat_named_t arcstat_recycle_miss;
292 /*
293 * Number of buffers that could not be evicted because the hash lock
294 * was held by another thread. The lock may not necessarily be held
295 * by something using the same buffer, since hash locks are shared
296 * by multiple buffers.
297 */
298 kstat_named_t arcstat_mutex_miss;
299 /*
300 * Number of buffers skipped because they have I/O in progress, are
301 * indrect prefetch buffers that have not lived long enough, or are
302 * not from the spa we're trying to evict from.
303 */
304 kstat_named_t arcstat_evict_skip;
305 kstat_named_t arcstat_evict_l2_cached;
306 kstat_named_t arcstat_evict_l2_eligible;
307 kstat_named_t arcstat_evict_l2_ineligible;
308 kstat_named_t arcstat_hash_elements;
309 kstat_named_t arcstat_hash_elements_max;
310 kstat_named_t arcstat_hash_collisions;
311 kstat_named_t arcstat_hash_chains;
312 kstat_named_t arcstat_hash_chain_max;
313 kstat_named_t arcstat_p;
314 kstat_named_t arcstat_c;
315 kstat_named_t arcstat_c_min;
316 kstat_named_t arcstat_c_max;
317 kstat_named_t arcstat_size;
318 kstat_named_t arcstat_hdr_size;
319 kstat_named_t arcstat_data_size;
320 kstat_named_t arcstat_other_size;
321 kstat_named_t arcstat_l2_hits;
322 kstat_named_t arcstat_l2_misses;
323 kstat_named_t arcstat_l2_feeds;
324 kstat_named_t arcstat_l2_rw_clash;
325 kstat_named_t arcstat_l2_read_bytes;
326 kstat_named_t arcstat_l2_write_bytes;
327 kstat_named_t arcstat_l2_writes_sent;
328 kstat_named_t arcstat_l2_writes_done;
329 kstat_named_t arcstat_l2_writes_error;
330 kstat_named_t arcstat_l2_writes_hdr_miss;
331 kstat_named_t arcstat_l2_evict_lock_retry;
332 kstat_named_t arcstat_l2_evict_reading;
333 kstat_named_t arcstat_l2_free_on_write;
334 kstat_named_t arcstat_l2_abort_lowmem;
335 kstat_named_t arcstat_l2_cksum_bad;
336 kstat_named_t arcstat_l2_io_error;
337 kstat_named_t arcstat_l2_size;
338 kstat_named_t arcstat_l2_asize;
339 kstat_named_t arcstat_l2_hdr_size;
340 kstat_named_t arcstat_l2_compress_successes;
341 kstat_named_t arcstat_l2_compress_zeros;
342 kstat_named_t arcstat_l2_compress_failures;
343 kstat_named_t arcstat_l2_write_trylock_fail;
344 kstat_named_t arcstat_l2_write_passed_headroom;
345 kstat_named_t arcstat_l2_write_spa_mismatch;
346 kstat_named_t arcstat_l2_write_in_l2;
347 kstat_named_t arcstat_l2_write_hdr_io_in_progress;
348 kstat_named_t arcstat_l2_write_not_cacheable;
349 kstat_named_t arcstat_l2_write_full;
350 kstat_named_t arcstat_l2_write_buffer_iter;
351 kstat_named_t arcstat_l2_write_pios;
352 kstat_named_t arcstat_l2_write_buffer_bytes_scanned;
353 kstat_named_t arcstat_l2_write_buffer_list_iter;
354 kstat_named_t arcstat_l2_write_buffer_list_null_iter;
355 kstat_named_t arcstat_memory_throttle_count;
356 kstat_named_t arcstat_duplicate_buffers;
357 kstat_named_t arcstat_duplicate_buffers_size;
358 kstat_named_t arcstat_duplicate_reads;
359} arc_stats_t;
360
361static arc_stats_t arc_stats = {
362 { "hits", KSTAT_DATA_UINT64 },
363 { "misses", KSTAT_DATA_UINT64 },
364 { "demand_data_hits", KSTAT_DATA_UINT64 },
365 { "demand_data_misses", KSTAT_DATA_UINT64 },
366 { "demand_metadata_hits", KSTAT_DATA_UINT64 },
367 { "demand_metadata_misses", KSTAT_DATA_UINT64 },
368 { "prefetch_data_hits", KSTAT_DATA_UINT64 },
369 { "prefetch_data_misses", KSTAT_DATA_UINT64 },
370 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 },
371 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 },
372 { "mru_hits", KSTAT_DATA_UINT64 },
373 { "mru_ghost_hits", KSTAT_DATA_UINT64 },
374 { "mfu_hits", KSTAT_DATA_UINT64 },
375 { "mfu_ghost_hits", KSTAT_DATA_UINT64 },
376 { "allocated", KSTAT_DATA_UINT64 },
377 { "deleted", KSTAT_DATA_UINT64 },
378 { "stolen", KSTAT_DATA_UINT64 },
379 { "recycle_miss", KSTAT_DATA_UINT64 },
380 { "mutex_miss", KSTAT_DATA_UINT64 },
381 { "evict_skip", KSTAT_DATA_UINT64 },
382 { "evict_l2_cached", KSTAT_DATA_UINT64 },
383 { "evict_l2_eligible", KSTAT_DATA_UINT64 },
384 { "evict_l2_ineligible", KSTAT_DATA_UINT64 },
385 { "hash_elements", KSTAT_DATA_UINT64 },
386 { "hash_elements_max", KSTAT_DATA_UINT64 },
387 { "hash_collisions", KSTAT_DATA_UINT64 },
388 { "hash_chains", KSTAT_DATA_UINT64 },
389 { "hash_chain_max", KSTAT_DATA_UINT64 },
390 { "p", KSTAT_DATA_UINT64 },
391 { "c", KSTAT_DATA_UINT64 },
392 { "c_min", KSTAT_DATA_UINT64 },
393 { "c_max", KSTAT_DATA_UINT64 },
394 { "size", KSTAT_DATA_UINT64 },
395 { "hdr_size", KSTAT_DATA_UINT64 },
396 { "data_size", KSTAT_DATA_UINT64 },
397 { "other_size", KSTAT_DATA_UINT64 },
398 { "l2_hits", KSTAT_DATA_UINT64 },
399 { "l2_misses", KSTAT_DATA_UINT64 },
400 { "l2_feeds", KSTAT_DATA_UINT64 },
401 { "l2_rw_clash", KSTAT_DATA_UINT64 },
402 { "l2_read_bytes", KSTAT_DATA_UINT64 },
403 { "l2_write_bytes", KSTAT_DATA_UINT64 },
404 { "l2_writes_sent", KSTAT_DATA_UINT64 },
405 { "l2_writes_done", KSTAT_DATA_UINT64 },
406 { "l2_writes_error", KSTAT_DATA_UINT64 },
407 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 },
408 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 },
409 { "l2_evict_reading", KSTAT_DATA_UINT64 },
410 { "l2_free_on_write", KSTAT_DATA_UINT64 },
411 { "l2_abort_lowmem", KSTAT_DATA_UINT64 },
412 { "l2_cksum_bad", KSTAT_DATA_UINT64 },
413 { "l2_io_error", KSTAT_DATA_UINT64 },
414 { "l2_size", KSTAT_DATA_UINT64 },
415 { "l2_asize", KSTAT_DATA_UINT64 },
416 { "l2_hdr_size", KSTAT_DATA_UINT64 },
417 { "l2_compress_successes", KSTAT_DATA_UINT64 },
418 { "l2_compress_zeros", KSTAT_DATA_UINT64 },
419 { "l2_compress_failures", KSTAT_DATA_UINT64 },
420 { "l2_write_trylock_fail", KSTAT_DATA_UINT64 },
421 { "l2_write_passed_headroom", KSTAT_DATA_UINT64 },
422 { "l2_write_spa_mismatch", KSTAT_DATA_UINT64 },
423 { "l2_write_in_l2", KSTAT_DATA_UINT64 },
424 { "l2_write_io_in_progress", KSTAT_DATA_UINT64 },
425 { "l2_write_not_cacheable", KSTAT_DATA_UINT64 },
426 { "l2_write_full", KSTAT_DATA_UINT64 },
427 { "l2_write_buffer_iter", KSTAT_DATA_UINT64 },
428 { "l2_write_pios", KSTAT_DATA_UINT64 },
429 { "l2_write_buffer_bytes_scanned", KSTAT_DATA_UINT64 },
430 { "l2_write_buffer_list_iter", KSTAT_DATA_UINT64 },
431 { "l2_write_buffer_list_null_iter", KSTAT_DATA_UINT64 },
432 { "memory_throttle_count", KSTAT_DATA_UINT64 },
433 { "duplicate_buffers", KSTAT_DATA_UINT64 },
434 { "duplicate_buffers_size", KSTAT_DATA_UINT64 },
435 { "duplicate_reads", KSTAT_DATA_UINT64 }
436};
437
438#define ARCSTAT(stat) (arc_stats.stat.value.ui64)
439
440#define ARCSTAT_INCR(stat, val) \
441 atomic_add_64(&arc_stats.stat.value.ui64, (val))
442
443#define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1)
444#define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1)
445
446#define ARCSTAT_MAX(stat, val) { \
447 uint64_t m; \
448 while ((val) > (m = arc_stats.stat.value.ui64) && \
449 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
450 continue; \
451}
452
453#define ARCSTAT_MAXSTAT(stat) \
454 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
455
456/*
457 * We define a macro to allow ARC hits/misses to be easily broken down by
458 * two separate conditions, giving a total of four different subtypes for
459 * each of hits and misses (so eight statistics total).
460 */
461#define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
462 if (cond1) { \
463 if (cond2) { \
464 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
465 } else { \
466 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
467 } \
468 } else { \
469 if (cond2) { \
470 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
471 } else { \
472 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
473 } \
474 }
475
476kstat_t *arc_ksp;
477static arc_state_t *arc_anon;
478static arc_state_t *arc_mru;
479static arc_state_t *arc_mru_ghost;
480static arc_state_t *arc_mfu;
481static arc_state_t *arc_mfu_ghost;
482static arc_state_t *arc_l2c_only;
483
484/*
485 * There are several ARC variables that are critical to export as kstats --
486 * but we don't want to have to grovel around in the kstat whenever we wish to
487 * manipulate them. For these variables, we therefore define them to be in
488 * terms of the statistic variable. This assures that we are not introducing
489 * the possibility of inconsistency by having shadow copies of the variables,
490 * while still allowing the code to be readable.
491 */
492#define arc_size ARCSTAT(arcstat_size) /* actual total arc size */
493#define arc_p ARCSTAT(arcstat_p) /* target size of MRU */
494#define arc_c ARCSTAT(arcstat_c) /* target size of cache */
495#define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */
496#define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */
497
498#define L2ARC_IS_VALID_COMPRESS(_c_) \
499 ((_c_) == ZIO_COMPRESS_LZ4 || (_c_) == ZIO_COMPRESS_EMPTY)
500
501static int arc_no_grow; /* Don't try to grow cache size */
502static uint64_t arc_tempreserve;
503static uint64_t arc_loaned_bytes;
504static uint64_t arc_meta_used;
505static uint64_t arc_meta_limit;
506static uint64_t arc_meta_max = 0;
507SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_meta_used, CTLFLAG_RD, &arc_meta_used, 0,
508 "ARC metadata used");
509SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_meta_limit, CTLFLAG_RW, &arc_meta_limit, 0,
510 "ARC metadata limit");
511
512typedef struct l2arc_buf_hdr l2arc_buf_hdr_t;
513
514typedef struct arc_callback arc_callback_t;
515
516struct arc_callback {
517 void *acb_private;
518 arc_done_func_t *acb_done;
519 arc_buf_t *acb_buf;
520 zio_t *acb_zio_dummy;
521 arc_callback_t *acb_next;
522};
523
524typedef struct arc_write_callback arc_write_callback_t;
525
526struct arc_write_callback {
527 void *awcb_private;
528 arc_done_func_t *awcb_ready;
529 arc_done_func_t *awcb_done;
530 arc_buf_t *awcb_buf;
531};
532
533struct arc_buf_hdr {
534 /* protected by hash lock */
535 dva_t b_dva;
536 uint64_t b_birth;
537 uint64_t b_cksum0;
538
539 kmutex_t b_freeze_lock;
540 zio_cksum_t *b_freeze_cksum;
541 void *b_thawed;
542
543 arc_buf_hdr_t *b_hash_next;
544 arc_buf_t *b_buf;
545 uint32_t b_flags;
546 uint32_t b_datacnt;
547
548 arc_callback_t *b_acb;
549 kcondvar_t b_cv;
550
551 /* immutable */
552 arc_buf_contents_t b_type;
553 uint64_t b_size;
554 uint64_t b_spa;
555
556 /* protected by arc state mutex */
557 arc_state_t *b_state;
558 list_node_t b_arc_node;
559
560 /* updated atomically */
561 clock_t b_arc_access;
562
563 /* self protecting */
564 refcount_t b_refcnt;
565
566 l2arc_buf_hdr_t *b_l2hdr;
567 list_node_t b_l2node;
568};
569
570static arc_buf_t *arc_eviction_list;
571static kmutex_t arc_eviction_mtx;
572static arc_buf_hdr_t arc_eviction_hdr;
573static void arc_get_data_buf(arc_buf_t *buf);
574static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
575static int arc_evict_needed(arc_buf_contents_t type);
576static void arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes);
577#ifdef illumos
578static void arc_buf_watch(arc_buf_t *buf);
579#endif /* illumos */
580
581static boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab);
582
583#define GHOST_STATE(state) \
584 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
585 (state) == arc_l2c_only)
586
587/*
588 * Private ARC flags. These flags are private ARC only flags that will show up
589 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can
590 * be passed in as arc_flags in things like arc_read. However, these flags
591 * should never be passed and should only be set by ARC code. When adding new
592 * public flags, make sure not to smash the private ones.
593 */
594
595#define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */
596#define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */
597#define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */
598#define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */
599#define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */
600#define ARC_INDIRECT (1 << 14) /* this is an indirect block */
601#define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */
602#define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */
603#define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */
604#define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */
605
606#define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE)
607#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS)
608#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR)
609#define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_PREFETCH)
610#define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ)
611#define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE)
612#define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS)
613#define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE)
614#define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \
615 (hdr)->b_l2hdr != NULL)
616#define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING)
617#define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED)
618#define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD)
619
620/*
621 * Other sizes
622 */
623
624#define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
625#define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t))
626
627/*
628 * Hash table routines
629 */
630
631#define HT_LOCK_PAD CACHE_LINE_SIZE
632
633struct ht_lock {
634 kmutex_t ht_lock;
635#ifdef _KERNEL
636 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
637#endif
638};
639
640#define BUF_LOCKS 256
641typedef struct buf_hash_table {
642 uint64_t ht_mask;
643 arc_buf_hdr_t **ht_table;
644 struct ht_lock ht_locks[BUF_LOCKS] __aligned(CACHE_LINE_SIZE);
645} buf_hash_table_t;
646
647static buf_hash_table_t buf_hash_table;
648
649#define BUF_HASH_INDEX(spa, dva, birth) \
650 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
651#define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
652#define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
653#define HDR_LOCK(hdr) \
654 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
655
656uint64_t zfs_crc64_table[256];
657
658/*
659 * Level 2 ARC
660 */
661
662#define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
663#define L2ARC_HEADROOM 2 /* num of writes */
664/*
665 * If we discover during ARC scan any buffers to be compressed, we boost
666 * our headroom for the next scanning cycle by this percentage multiple.
667 */
668#define L2ARC_HEADROOM_BOOST 200
669#define L2ARC_FEED_SECS 1 /* caching interval secs */
670#define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */
671
672#define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent)
673#define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done)
674
675/* L2ARC Performance Tunables */
676uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */
677uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */
678uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */
679uint64_t l2arc_headroom_boost = L2ARC_HEADROOM_BOOST;
680uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */
681uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */
682boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
683boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */
684boolean_t l2arc_norw = B_TRUE; /* no reads during writes */
685
686SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max, CTLFLAG_RW,
687 &l2arc_write_max, 0, "max write size");
688SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_boost, CTLFLAG_RW,
689 &l2arc_write_boost, 0, "extra write during warmup");
690SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom, CTLFLAG_RW,
691 &l2arc_headroom, 0, "number of dev writes");
692SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_secs, CTLFLAG_RW,
693 &l2arc_feed_secs, 0, "interval seconds");
694SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_min_ms, CTLFLAG_RW,
695 &l2arc_feed_min_ms, 0, "min interval milliseconds");
696
697SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_noprefetch, CTLFLAG_RW,
698 &l2arc_noprefetch, 0, "don't cache prefetch bufs");
699SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_feed_again, CTLFLAG_RW,
700 &l2arc_feed_again, 0, "turbo warmup");
701SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw, CTLFLAG_RW,
702 &l2arc_norw, 0, "no reads during writes");
703
704SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_size, CTLFLAG_RD,
705 &ARC_anon.arcs_size, 0, "size of anonymous state");
706SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_metadata_lsize, CTLFLAG_RD,
707 &ARC_anon.arcs_lsize[ARC_BUFC_METADATA], 0, "size of anonymous state");
708SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_data_lsize, CTLFLAG_RD,
709 &ARC_anon.arcs_lsize[ARC_BUFC_DATA], 0, "size of anonymous state");
710
711SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_size, CTLFLAG_RD,
712 &ARC_mru.arcs_size, 0, "size of mru state");
713SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_metadata_lsize, CTLFLAG_RD,
714 &ARC_mru.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mru state");
715SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_data_lsize, CTLFLAG_RD,
716 &ARC_mru.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mru state");
717
718SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_size, CTLFLAG_RD,
719 &ARC_mru_ghost.arcs_size, 0, "size of mru ghost state");
720SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_metadata_lsize, CTLFLAG_RD,
721 &ARC_mru_ghost.arcs_lsize[ARC_BUFC_METADATA], 0,
722 "size of metadata in mru ghost state");
723SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_data_lsize, CTLFLAG_RD,
724 &ARC_mru_ghost.arcs_lsize[ARC_BUFC_DATA], 0,
725 "size of data in mru ghost state");
726
727SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_size, CTLFLAG_RD,
728 &ARC_mfu.arcs_size, 0, "size of mfu state");
729SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_metadata_lsize, CTLFLAG_RD,
730 &ARC_mfu.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mfu state");
731SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_data_lsize, CTLFLAG_RD,
732 &ARC_mfu.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mfu state");
733
734SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_size, CTLFLAG_RD,
735 &ARC_mfu_ghost.arcs_size, 0, "size of mfu ghost state");
736SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_metadata_lsize, CTLFLAG_RD,
737 &ARC_mfu_ghost.arcs_lsize[ARC_BUFC_METADATA], 0,
738 "size of metadata in mfu ghost state");
739SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_data_lsize, CTLFLAG_RD,
740 &ARC_mfu_ghost.arcs_lsize[ARC_BUFC_DATA], 0,
741 "size of data in mfu ghost state");
742
743SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2c_only_size, CTLFLAG_RD,
744 &ARC_l2c_only.arcs_size, 0, "size of mru state");
745
746/*
747 * L2ARC Internals
748 */
749typedef struct l2arc_dev {
750 vdev_t *l2ad_vdev; /* vdev */
751 spa_t *l2ad_spa; /* spa */
752 uint64_t l2ad_hand; /* next write location */
753 uint64_t l2ad_start; /* first addr on device */
754 uint64_t l2ad_end; /* last addr on device */
755 uint64_t l2ad_evict; /* last addr eviction reached */
756 boolean_t l2ad_first; /* first sweep through */
757 boolean_t l2ad_writing; /* currently writing */
758 list_t *l2ad_buflist; /* buffer list */
759 list_node_t l2ad_node; /* device list node */
760} l2arc_dev_t;
761
762static list_t L2ARC_dev_list; /* device list */
763static list_t *l2arc_dev_list; /* device list pointer */
764static kmutex_t l2arc_dev_mtx; /* device list mutex */
765static l2arc_dev_t *l2arc_dev_last; /* last device used */
766static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */
767static list_t L2ARC_free_on_write; /* free after write buf list */
768static list_t *l2arc_free_on_write; /* free after write list ptr */
769static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */
770static uint64_t l2arc_ndev; /* number of devices */
771
772typedef struct l2arc_read_callback {
773 arc_buf_t *l2rcb_buf; /* read buffer */
774 spa_t *l2rcb_spa; /* spa */
775 blkptr_t l2rcb_bp; /* original blkptr */
776 zbookmark_t l2rcb_zb; /* original bookmark */
777 int l2rcb_flags; /* original flags */
778 enum zio_compress l2rcb_compress; /* applied compress */
779} l2arc_read_callback_t;
780
781typedef struct l2arc_write_callback {
782 l2arc_dev_t *l2wcb_dev; /* device info */
783 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */
784} l2arc_write_callback_t;
785
786struct l2arc_buf_hdr {
787 /* protected by arc_buf_hdr mutex */
788 l2arc_dev_t *b_dev; /* L2ARC device */
789 uint64_t b_daddr; /* disk address, offset byte */
790 /* compression applied to buffer data */
791 enum zio_compress b_compress;
792 /* real alloc'd buffer size depending on b_compress applied */
793 int b_asize;
794 /* temporary buffer holder for in-flight compressed data */
795 void *b_tmp_cdata;
796};
797
798typedef struct l2arc_data_free {
799 /* protected by l2arc_free_on_write_mtx */
800 void *l2df_data;
801 size_t l2df_size;
802 void (*l2df_func)(void *, size_t);
803 list_node_t l2df_list_node;
804} l2arc_data_free_t;
805
806static kmutex_t l2arc_feed_thr_lock;
807static kcondvar_t l2arc_feed_thr_cv;
808static uint8_t l2arc_thread_exit;
809
810static void l2arc_read_done(zio_t *zio);
811static void l2arc_hdr_stat_add(void);
812static void l2arc_hdr_stat_remove(void);
813
814static boolean_t l2arc_compress_buf(l2arc_buf_hdr_t *l2hdr);
815static void l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr,
816 enum zio_compress c);
817static void l2arc_release_cdata_buf(arc_buf_hdr_t *ab);
818
819static uint64_t
820buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
821{
822 uint8_t *vdva = (uint8_t *)dva;
823 uint64_t crc = -1ULL;
824 int i;
825
826 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
827
828 for (i = 0; i < sizeof (dva_t); i++)
829 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
830
831 crc ^= (spa>>8) ^ birth;
832
833 return (crc);
834}
835
836#define BUF_EMPTY(buf) \
837 ((buf)->b_dva.dva_word[0] == 0 && \
838 (buf)->b_dva.dva_word[1] == 0 && \
839 (buf)->b_birth == 0)
840
841#define BUF_EQUAL(spa, dva, birth, buf) \
842 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
843 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
844 ((buf)->b_birth == birth) && ((buf)->b_spa == spa)
845
846static void
847buf_discard_identity(arc_buf_hdr_t *hdr)
848{
849 hdr->b_dva.dva_word[0] = 0;
850 hdr->b_dva.dva_word[1] = 0;
851 hdr->b_birth = 0;
852 hdr->b_cksum0 = 0;
853}
854
855static arc_buf_hdr_t *
856buf_hash_find(uint64_t spa, const dva_t *dva, uint64_t birth, kmutex_t **lockp)
857{
858 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
859 kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
860 arc_buf_hdr_t *buf;
861
862 mutex_enter(hash_lock);
863 for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
864 buf = buf->b_hash_next) {
865 if (BUF_EQUAL(spa, dva, birth, buf)) {
866 *lockp = hash_lock;
867 return (buf);
868 }
869 }
870 mutex_exit(hash_lock);
871 *lockp = NULL;
872 return (NULL);
873}
874
875/*
876 * Insert an entry into the hash table. If there is already an element
877 * equal to elem in the hash table, then the already existing element
878 * will be returned and the new element will not be inserted.
879 * Otherwise returns NULL.
880 */
881static arc_buf_hdr_t *
882buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
883{
884 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
885 kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
886 arc_buf_hdr_t *fbuf;
887 uint32_t i;
888
889 ASSERT(!HDR_IN_HASH_TABLE(buf));
890 *lockp = hash_lock;
891 mutex_enter(hash_lock);
892 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
893 fbuf = fbuf->b_hash_next, i++) {
894 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
895 return (fbuf);
896 }
897
898 buf->b_hash_next = buf_hash_table.ht_table[idx];
899 buf_hash_table.ht_table[idx] = buf;
900 buf->b_flags |= ARC_IN_HASH_TABLE;
901
902 /* collect some hash table performance data */
903 if (i > 0) {
904 ARCSTAT_BUMP(arcstat_hash_collisions);
905 if (i == 1)
906 ARCSTAT_BUMP(arcstat_hash_chains);
907
908 ARCSTAT_MAX(arcstat_hash_chain_max, i);
909 }
910
911 ARCSTAT_BUMP(arcstat_hash_elements);
912 ARCSTAT_MAXSTAT(arcstat_hash_elements);
913
914 return (NULL);
915}
916
917static void
918buf_hash_remove(arc_buf_hdr_t *buf)
919{
920 arc_buf_hdr_t *fbuf, **bufp;
921 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
922
923 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
924 ASSERT(HDR_IN_HASH_TABLE(buf));
925
926 bufp = &buf_hash_table.ht_table[idx];
927 while ((fbuf = *bufp) != buf) {
928 ASSERT(fbuf != NULL);
929 bufp = &fbuf->b_hash_next;
930 }
931 *bufp = buf->b_hash_next;
932 buf->b_hash_next = NULL;
933 buf->b_flags &= ~ARC_IN_HASH_TABLE;
934
935 /* collect some hash table performance data */
936 ARCSTAT_BUMPDOWN(arcstat_hash_elements);
937
938 if (buf_hash_table.ht_table[idx] &&
939 buf_hash_table.ht_table[idx]->b_hash_next == NULL)
940 ARCSTAT_BUMPDOWN(arcstat_hash_chains);
941}
942
943/*
944 * Global data structures and functions for the buf kmem cache.
945 */
946static kmem_cache_t *hdr_cache;
947static kmem_cache_t *buf_cache;
948
949static void
950buf_fini(void)
951{
952 int i;
953
954 kmem_free(buf_hash_table.ht_table,
955 (buf_hash_table.ht_mask + 1) * sizeof (void *));
956 for (i = 0; i < BUF_LOCKS; i++)
957 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
958 kmem_cache_destroy(hdr_cache);
959 kmem_cache_destroy(buf_cache);
960}
961
962/*
963 * Constructor callback - called when the cache is empty
964 * and a new buf is requested.
965 */
966/* ARGSUSED */
967static int
968hdr_cons(void *vbuf, void *unused, int kmflag)
969{
970 arc_buf_hdr_t *buf = vbuf;
971
972 bzero(buf, sizeof (arc_buf_hdr_t));
973 refcount_create(&buf->b_refcnt);
974 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
975 mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
976 arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
977
978 return (0);
979}
980
981/* ARGSUSED */
982static int
983buf_cons(void *vbuf, void *unused, int kmflag)
984{
985 arc_buf_t *buf = vbuf;
986
987 bzero(buf, sizeof (arc_buf_t));
988 mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL);
989 arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
990
991 return (0);
992}
993
994/*
995 * Destructor callback - called when a cached buf is
996 * no longer required.
997 */
998/* ARGSUSED */
999static void
1000hdr_dest(void *vbuf, void *unused)
1001{
1002 arc_buf_hdr_t *buf = vbuf;
1003
1004 ASSERT(BUF_EMPTY(buf));
1005 refcount_destroy(&buf->b_refcnt);
1006 cv_destroy(&buf->b_cv);
1007 mutex_destroy(&buf->b_freeze_lock);
1008 arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
1009}
1010
1011/* ARGSUSED */
1012static void
1013buf_dest(void *vbuf, void *unused)
1014{
1015 arc_buf_t *buf = vbuf;
1016
1017 mutex_destroy(&buf->b_evict_lock);
1018 arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
1019}
1020
1021/*
1022 * Reclaim callback -- invoked when memory is low.
1023 */
1024/* ARGSUSED */
1025static void
1026hdr_recl(void *unused)
1027{
1028 dprintf("hdr_recl called\n");
1029 /*
1030 * umem calls the reclaim func when we destroy the buf cache,
1031 * which is after we do arc_fini().
1032 */
1033 if (!arc_dead)
1034 cv_signal(&arc_reclaim_thr_cv);
1035}
1036
1037static void
1038buf_init(void)
1039{
1040 uint64_t *ct;
1041 uint64_t hsize = 1ULL << 12;
1042 int i, j;
1043
1044 /*
1045 * The hash table is big enough to fill all of physical memory
1046 * with an average 64K block size. The table will take up
1047 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers).
1048 */
1049 while (hsize * 65536 < (uint64_t)physmem * PAGESIZE)
1050 hsize <<= 1;
1051retry:
1052 buf_hash_table.ht_mask = hsize - 1;
1053 buf_hash_table.ht_table =
1054 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
1055 if (buf_hash_table.ht_table == NULL) {
1056 ASSERT(hsize > (1ULL << 8));
1057 hsize >>= 1;
1058 goto retry;
1059 }
1060
1061 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
1062 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
1063 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
1064 0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
1065
1066 for (i = 0; i < 256; i++)
1067 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
1068 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
1069
1070 for (i = 0; i < BUF_LOCKS; i++) {
1071 mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
1072 NULL, MUTEX_DEFAULT, NULL);
1073 }
1074}
1075
1076#define ARC_MINTIME (hz>>4) /* 62 ms */
1077
1078static void
1079arc_cksum_verify(arc_buf_t *buf)
1080{
1081 zio_cksum_t zc;
1082
1083 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
1084 return;
1085
1086 mutex_enter(&buf->b_hdr->b_freeze_lock);
1087 if (buf->b_hdr->b_freeze_cksum == NULL ||
1088 (buf->b_hdr->b_flags & ARC_IO_ERROR)) {
1089 mutex_exit(&buf->b_hdr->b_freeze_lock);
1090 return;
1091 }
1092 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
1093 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc))
1094 panic("buffer modified while frozen!");
1095 mutex_exit(&buf->b_hdr->b_freeze_lock);
1096}
1097
1098static int
1099arc_cksum_equal(arc_buf_t *buf)
1100{
1101 zio_cksum_t zc;
1102 int equal;
1103
1104 mutex_enter(&buf->b_hdr->b_freeze_lock);
1105 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
1106 equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc);
1107 mutex_exit(&buf->b_hdr->b_freeze_lock);
1108
1109 return (equal);
1110}
1111
1112static void
1113arc_cksum_compute(arc_buf_t *buf, boolean_t force)
1114{
1115 if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY))
1116 return;
1117
1118 mutex_enter(&buf->b_hdr->b_freeze_lock);
1119 if (buf->b_hdr->b_freeze_cksum != NULL) {
1120 mutex_exit(&buf->b_hdr->b_freeze_lock);
1121 return;
1122 }
1123 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
1124 fletcher_2_native(buf->b_data, buf->b_hdr->b_size,
1125 buf->b_hdr->b_freeze_cksum);
1126 mutex_exit(&buf->b_hdr->b_freeze_lock);
1127#ifdef illumos
1128 arc_buf_watch(buf);
1129#endif /* illumos */
1130}
1131
1132#ifdef illumos
1133#ifndef _KERNEL
1134typedef struct procctl {
1135 long cmd;
1136 prwatch_t prwatch;
1137} procctl_t;
1138#endif
1139
1140/* ARGSUSED */
1141static void
1142arc_buf_unwatch(arc_buf_t *buf)
1143{
1144#ifndef _KERNEL
1145 if (arc_watch) {
1146 int result;
1147 procctl_t ctl;
1148 ctl.cmd = PCWATCH;
1149 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
1150 ctl.prwatch.pr_size = 0;
1151 ctl.prwatch.pr_wflags = 0;
1152 result = write(arc_procfd, &ctl, sizeof (ctl));
1153 ASSERT3U(result, ==, sizeof (ctl));
1154 }
1155#endif
1156}
1157
1158/* ARGSUSED */
1159static void
1160arc_buf_watch(arc_buf_t *buf)
1161{
1162#ifndef _KERNEL
1163 if (arc_watch) {
1164 int result;
1165 procctl_t ctl;
1166 ctl.cmd = PCWATCH;
1167 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
1168 ctl.prwatch.pr_size = buf->b_hdr->b_size;
1169 ctl.prwatch.pr_wflags = WA_WRITE;
1170 result = write(arc_procfd, &ctl, sizeof (ctl));
1171 ASSERT3U(result, ==, sizeof (ctl));
1172 }
1173#endif
1174}
1175#endif /* illumos */
1176
1177void
1178arc_buf_thaw(arc_buf_t *buf)
1179{
1180 if (zfs_flags & ZFS_DEBUG_MODIFY) {
1181 if (buf->b_hdr->b_state != arc_anon)
1182 panic("modifying non-anon buffer!");
1183 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS)
1184 panic("modifying buffer while i/o in progress!");
1185 arc_cksum_verify(buf);
1186 }
1187
1188 mutex_enter(&buf->b_hdr->b_freeze_lock);
1189 if (buf->b_hdr->b_freeze_cksum != NULL) {
1190 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1191 buf->b_hdr->b_freeze_cksum = NULL;
1192 }
1193
1194 if (zfs_flags & ZFS_DEBUG_MODIFY) {
1195 if (buf->b_hdr->b_thawed)
1196 kmem_free(buf->b_hdr->b_thawed, 1);
1197 buf->b_hdr->b_thawed = kmem_alloc(1, KM_SLEEP);
1198 }
1199
1200 mutex_exit(&buf->b_hdr->b_freeze_lock);
1201
1202#ifdef illumos
1203 arc_buf_unwatch(buf);
1204#endif /* illumos */
1205}
1206
1207void
1208arc_buf_freeze(arc_buf_t *buf)
1209{
1210 kmutex_t *hash_lock;
1211
1212 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
1213 return;
1214
1215 hash_lock = HDR_LOCK(buf->b_hdr);
1216 mutex_enter(hash_lock);
1217
1218 ASSERT(buf->b_hdr->b_freeze_cksum != NULL ||
1219 buf->b_hdr->b_state == arc_anon);
1220 arc_cksum_compute(buf, B_FALSE);
1221 mutex_exit(hash_lock);
1222
1223}
1224
1225static void
1226get_buf_info(arc_buf_hdr_t *ab, arc_state_t *state, list_t **list, kmutex_t **lock)
1227{
1228 uint64_t buf_hashid = buf_hash(ab->b_spa, &ab->b_dva, ab->b_birth);
1229
1230 if (ab->b_type == ARC_BUFC_METADATA)
1231 buf_hashid &= (ARC_BUFC_NUMMETADATALISTS - 1);
1232 else {
1233 buf_hashid &= (ARC_BUFC_NUMDATALISTS - 1);
1234 buf_hashid += ARC_BUFC_NUMMETADATALISTS;
1235 }
1236
1237 *list = &state->arcs_lists[buf_hashid];
1238 *lock = ARCS_LOCK(state, buf_hashid);
1239}
1240
1241
1242static void
1243add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
1244{
1245 ASSERT(MUTEX_HELD(hash_lock));
1246
1247 if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
1248 (ab->b_state != arc_anon)) {
1249 uint64_t delta = ab->b_size * ab->b_datacnt;
1250 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type];
1251 list_t *list;
1252 kmutex_t *lock;
1253
1254 get_buf_info(ab, ab->b_state, &list, &lock);
1255 ASSERT(!MUTEX_HELD(lock));
1256 mutex_enter(lock);
1257 ASSERT(list_link_active(&ab->b_arc_node));
1258 list_remove(list, ab);
1259 if (GHOST_STATE(ab->b_state)) {
1260 ASSERT0(ab->b_datacnt);
1261 ASSERT3P(ab->b_buf, ==, NULL);
1262 delta = ab->b_size;
1263 }
1264 ASSERT(delta > 0);
1265 ASSERT3U(*size, >=, delta);
1266 atomic_add_64(size, -delta);
1267 mutex_exit(lock);
1268 /* remove the prefetch flag if we get a reference */
1269 if (ab->b_flags & ARC_PREFETCH)
1270 ab->b_flags &= ~ARC_PREFETCH;
1271 }
1272}
1273
1274static int
1275remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
1276{
1277 int cnt;
1278 arc_state_t *state = ab->b_state;
1279
1280 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
1281 ASSERT(!GHOST_STATE(state));
1282
1283 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
1284 (state != arc_anon)) {
1285 uint64_t *size = &state->arcs_lsize[ab->b_type];
1286 list_t *list;
1287 kmutex_t *lock;
1288
1289 get_buf_info(ab, state, &list, &lock);
1290 ASSERT(!MUTEX_HELD(lock));
1291 mutex_enter(lock);
1292 ASSERT(!list_link_active(&ab->b_arc_node));
1293 list_insert_head(list, ab);
1294 ASSERT(ab->b_datacnt > 0);
1295 atomic_add_64(size, ab->b_size * ab->b_datacnt);
1296 mutex_exit(lock);
1297 }
1298 return (cnt);
1299}
1300
1301/*
1302 * Move the supplied buffer to the indicated state. The mutex
1303 * for the buffer must be held by the caller.
1304 */
1305static void
1306arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
1307{
1308 arc_state_t *old_state = ab->b_state;
1309 int64_t refcnt = refcount_count(&ab->b_refcnt);
1310 uint64_t from_delta, to_delta;
1311 list_t *list;
1312 kmutex_t *lock;
1313
1314 ASSERT(MUTEX_HELD(hash_lock));
1315 ASSERT(new_state != old_state);
1316 ASSERT(refcnt == 0 || ab->b_datacnt > 0);
1317 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
1318 ASSERT(ab->b_datacnt <= 1 || old_state != arc_anon);
1319
1320 from_delta = to_delta = ab->b_datacnt * ab->b_size;
1321
1322 /*
1323 * If this buffer is evictable, transfer it from the
1324 * old state list to the new state list.
1325 */
1326 if (refcnt == 0) {
1327 if (old_state != arc_anon) {
1328 int use_mutex;
1329 uint64_t *size = &old_state->arcs_lsize[ab->b_type];
1330
1331 get_buf_info(ab, old_state, &list, &lock);
1332 use_mutex = !MUTEX_HELD(lock);
1333 if (use_mutex)
1334 mutex_enter(lock);
1335
1336 ASSERT(list_link_active(&ab->b_arc_node));
1337 list_remove(list, ab);
1338
1339 /*
1340 * If prefetching out of the ghost cache,
1341 * we will have a non-zero datacnt.
1342 */
1343 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) {
1344 /* ghost elements have a ghost size */
1345 ASSERT(ab->b_buf == NULL);
1346 from_delta = ab->b_size;
1347 }
1348 ASSERT3U(*size, >=, from_delta);
1349 atomic_add_64(size, -from_delta);
1350
1351 if (use_mutex)
1352 mutex_exit(lock);
1353 }
1354 if (new_state != arc_anon) {
1355 int use_mutex;
1356 uint64_t *size = &new_state->arcs_lsize[ab->b_type];
1357
1358 get_buf_info(ab, new_state, &list, &lock);
1359 use_mutex = !MUTEX_HELD(lock);
1360 if (use_mutex)
1361 mutex_enter(lock);
1362
1363 list_insert_head(list, ab);
1364
1365 /* ghost elements have a ghost size */
1366 if (GHOST_STATE(new_state)) {
1367 ASSERT(ab->b_datacnt == 0);
1368 ASSERT(ab->b_buf == NULL);
1369 to_delta = ab->b_size;
1370 }
1371 atomic_add_64(size, to_delta);
1372
1373 if (use_mutex)
1374 mutex_exit(lock);
1375 }
1376 }
1377
1378 ASSERT(!BUF_EMPTY(ab));
1379 if (new_state == arc_anon && HDR_IN_HASH_TABLE(ab))
1380 buf_hash_remove(ab);
1381
1382 /* adjust state sizes */
1383 if (to_delta)
1384 atomic_add_64(&new_state->arcs_size, to_delta);
1385 if (from_delta) {
1386 ASSERT3U(old_state->arcs_size, >=, from_delta);
1387 atomic_add_64(&old_state->arcs_size, -from_delta);
1388 }
1389 ab->b_state = new_state;
1390
1391 /* adjust l2arc hdr stats */
1392 if (new_state == arc_l2c_only)
1393 l2arc_hdr_stat_add();
1394 else if (old_state == arc_l2c_only)
1395 l2arc_hdr_stat_remove();
1396}
1397
1398void
1399arc_space_consume(uint64_t space, arc_space_type_t type)
1400{
1401 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1402
1403 switch (type) {
1404 case ARC_SPACE_DATA:
1405 ARCSTAT_INCR(arcstat_data_size, space);
1406 break;
1407 case ARC_SPACE_OTHER:
1408 ARCSTAT_INCR(arcstat_other_size, space);
1409 break;
1410 case ARC_SPACE_HDRS:
1411 ARCSTAT_INCR(arcstat_hdr_size, space);
1412 break;
1413 case ARC_SPACE_L2HDRS:
1414 ARCSTAT_INCR(arcstat_l2_hdr_size, space);
1415 break;
1416 }
1417
1418 atomic_add_64(&arc_meta_used, space);
1419 atomic_add_64(&arc_size, space);
1420}
1421
1422void
1423arc_space_return(uint64_t space, arc_space_type_t type)
1424{
1425 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1426
1427 switch (type) {
1428 case ARC_SPACE_DATA:
1429 ARCSTAT_INCR(arcstat_data_size, -space);
1430 break;
1431 case ARC_SPACE_OTHER:
1432 ARCSTAT_INCR(arcstat_other_size, -space);
1433 break;
1434 case ARC_SPACE_HDRS:
1435 ARCSTAT_INCR(arcstat_hdr_size, -space);
1436 break;
1437 case ARC_SPACE_L2HDRS:
1438 ARCSTAT_INCR(arcstat_l2_hdr_size, -space);
1439 break;
1440 }
1441
1442 ASSERT(arc_meta_used >= space);
1443 if (arc_meta_max < arc_meta_used)
1444 arc_meta_max = arc_meta_used;
1445 atomic_add_64(&arc_meta_used, -space);
1446 ASSERT(arc_size >= space);
1447 atomic_add_64(&arc_size, -space);
1448}
1449
1450void *
1451arc_data_buf_alloc(uint64_t size)
1452{
1453 if (arc_evict_needed(ARC_BUFC_DATA))
1454 cv_signal(&arc_reclaim_thr_cv);
1455 atomic_add_64(&arc_size, size);
1456 return (zio_data_buf_alloc(size));
1457}
1458
1459void
1460arc_data_buf_free(void *buf, uint64_t size)
1461{
1462 zio_data_buf_free(buf, size);
1463 ASSERT(arc_size >= size);
1464 atomic_add_64(&arc_size, -size);
1465}
1466
1467arc_buf_t *
1468arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type)
1469{
1470 arc_buf_hdr_t *hdr;
1471 arc_buf_t *buf;
1472
1473 ASSERT3U(size, >, 0);
1474 hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
1475 ASSERT(BUF_EMPTY(hdr));
1476 hdr->b_size = size;
1477 hdr->b_type = type;
1478 hdr->b_spa = spa_load_guid(spa);
1479 hdr->b_state = arc_anon;
1480 hdr->b_arc_access = 0;
1481 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1482 buf->b_hdr = hdr;
1483 buf->b_data = NULL;
1484 buf->b_efunc = NULL;
1485 buf->b_private = NULL;
1486 buf->b_next = NULL;
1487 hdr->b_buf = buf;
1488 arc_get_data_buf(buf);
1489 hdr->b_datacnt = 1;
1490 hdr->b_flags = 0;
1491 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1492 (void) refcount_add(&hdr->b_refcnt, tag);
1493
1494 return (buf);
1495}
1496
1497static char *arc_onloan_tag = "onloan";
1498
1499/*
1500 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in
1501 * flight data by arc_tempreserve_space() until they are "returned". Loaned
1502 * buffers must be returned to the arc before they can be used by the DMU or
1503 * freed.
1504 */
1505arc_buf_t *
1506arc_loan_buf(spa_t *spa, int size)
1507{
1508 arc_buf_t *buf;
1509
1510 buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA);
1511
1512 atomic_add_64(&arc_loaned_bytes, size);
1513 return (buf);
1514}
1515
1516/*
1517 * Return a loaned arc buffer to the arc.
1518 */
1519void
1520arc_return_buf(arc_buf_t *buf, void *tag)
1521{
1522 arc_buf_hdr_t *hdr = buf->b_hdr;
1523
1524 ASSERT(buf->b_data != NULL);
1525 (void) refcount_add(&hdr->b_refcnt, tag);
1526 (void) refcount_remove(&hdr->b_refcnt, arc_onloan_tag);
1527
1528 atomic_add_64(&arc_loaned_bytes, -hdr->b_size);
1529}
1530
1531/* Detach an arc_buf from a dbuf (tag) */
1532void
1533arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
1534{
1535 arc_buf_hdr_t *hdr;
1536
1537 ASSERT(buf->b_data != NULL);
1538 hdr = buf->b_hdr;
1539 (void) refcount_add(&hdr->b_refcnt, arc_onloan_tag);
1540 (void) refcount_remove(&hdr->b_refcnt, tag);
1541 buf->b_efunc = NULL;
1542 buf->b_private = NULL;
1543
1544 atomic_add_64(&arc_loaned_bytes, hdr->b_size);
1545}
1546
1547static arc_buf_t *
1548arc_buf_clone(arc_buf_t *from)
1549{
1550 arc_buf_t *buf;
1551 arc_buf_hdr_t *hdr = from->b_hdr;
1552 uint64_t size = hdr->b_size;
1553
1554 ASSERT(hdr->b_state != arc_anon);
1555
1556 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1557 buf->b_hdr = hdr;
1558 buf->b_data = NULL;
1559 buf->b_efunc = NULL;
1560 buf->b_private = NULL;
1561 buf->b_next = hdr->b_buf;
1562 hdr->b_buf = buf;
1563 arc_get_data_buf(buf);
1564 bcopy(from->b_data, buf->b_data, size);
1565
1566 /*
1567 * This buffer already exists in the arc so create a duplicate
1568 * copy for the caller. If the buffer is associated with user data
1569 * then track the size and number of duplicates. These stats will be
1570 * updated as duplicate buffers are created and destroyed.
1571 */
1572 if (hdr->b_type == ARC_BUFC_DATA) {
1573 ARCSTAT_BUMP(arcstat_duplicate_buffers);
1574 ARCSTAT_INCR(arcstat_duplicate_buffers_size, size);
1575 }
1576 hdr->b_datacnt += 1;
1577 return (buf);
1578}
1579
1580void
1581arc_buf_add_ref(arc_buf_t *buf, void* tag)
1582{
1583 arc_buf_hdr_t *hdr;
1584 kmutex_t *hash_lock;
1585
1586 /*
1587 * Check to see if this buffer is evicted. Callers
1588 * must verify b_data != NULL to know if the add_ref
1589 * was successful.
1590 */
1591 mutex_enter(&buf->b_evict_lock);
1592 if (buf->b_data == NULL) {
1593 mutex_exit(&buf->b_evict_lock);
1594 return;
1595 }
1596 hash_lock = HDR_LOCK(buf->b_hdr);
1597 mutex_enter(hash_lock);
1598 hdr = buf->b_hdr;
1599 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1600 mutex_exit(&buf->b_evict_lock);
1601
1602 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
1603 add_reference(hdr, hash_lock, tag);
1604 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
1605 arc_access(hdr, hash_lock);
1606 mutex_exit(hash_lock);
1607 ARCSTAT_BUMP(arcstat_hits);
1608 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
1609 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
1610 data, metadata, hits);
1611}
1612
1613/*
1614 * Free the arc data buffer. If it is an l2arc write in progress,
1615 * the buffer is placed on l2arc_free_on_write to be freed later.
1616 */
1617static void
1618arc_buf_data_free(arc_buf_t *buf, void (*free_func)(void *, size_t))
1619{
1620 arc_buf_hdr_t *hdr = buf->b_hdr;
1621
1622 if (HDR_L2_WRITING(hdr)) {
1623 l2arc_data_free_t *df;
1624 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP);
1625 df->l2df_data = buf->b_data;
1626 df->l2df_size = hdr->b_size;
1627 df->l2df_func = free_func;
1628 mutex_enter(&l2arc_free_on_write_mtx);
1629 list_insert_head(l2arc_free_on_write, df);
1630 mutex_exit(&l2arc_free_on_write_mtx);
1631 ARCSTAT_BUMP(arcstat_l2_free_on_write);
1632 } else {
1633 free_func(buf->b_data, hdr->b_size);
1634 }
1635}
1636
1637static void
1638arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all)
1639{
1640 arc_buf_t **bufp;
1641
1642 /* free up data associated with the buf */
1643 if (buf->b_data) {
1644 arc_state_t *state = buf->b_hdr->b_state;
1645 uint64_t size = buf->b_hdr->b_size;
1646 arc_buf_contents_t type = buf->b_hdr->b_type;
1647
1648 arc_cksum_verify(buf);
1649#ifdef illumos
1650 arc_buf_unwatch(buf);
1651#endif /* illumos */
1652
1653 if (!recycle) {
1654 if (type == ARC_BUFC_METADATA) {
1655 arc_buf_data_free(buf, zio_buf_free);
1656 arc_space_return(size, ARC_SPACE_DATA);
1657 } else {
1658 ASSERT(type == ARC_BUFC_DATA);
1659 arc_buf_data_free(buf, zio_data_buf_free);
1660 ARCSTAT_INCR(arcstat_data_size, -size);
1661 atomic_add_64(&arc_size, -size);
1662 }
1663 }
1664 if (list_link_active(&buf->b_hdr->b_arc_node)) {
1665 uint64_t *cnt = &state->arcs_lsize[type];
1666
1667 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt));
1668 ASSERT(state != arc_anon);
1669
1670 ASSERT3U(*cnt, >=, size);
1671 atomic_add_64(cnt, -size);
1672 }
1673 ASSERT3U(state->arcs_size, >=, size);
1674 atomic_add_64(&state->arcs_size, -size);
1675 buf->b_data = NULL;
1676
1677 /*
1678 * If we're destroying a duplicate buffer make sure
1679 * that the appropriate statistics are updated.
1680 */
1681 if (buf->b_hdr->b_datacnt > 1 &&
1682 buf->b_hdr->b_type == ARC_BUFC_DATA) {
1683 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
1684 ARCSTAT_INCR(arcstat_duplicate_buffers_size, -size);
1685 }
1686 ASSERT(buf->b_hdr->b_datacnt > 0);
1687 buf->b_hdr->b_datacnt -= 1;
1688 }
1689
1690 /* only remove the buf if requested */
1691 if (!all)
1692 return;
1693
1694 /* remove the buf from the hdr list */
1695 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next)
1696 continue;
1697 *bufp = buf->b_next;
1698 buf->b_next = NULL;
1699
1700 ASSERT(buf->b_efunc == NULL);
1701
1702 /* clean up the buf */
1703 buf->b_hdr = NULL;
1704 kmem_cache_free(buf_cache, buf);
1705}
1706
1707static void
1708arc_hdr_destroy(arc_buf_hdr_t *hdr)
1709{
1710 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1711 ASSERT3P(hdr->b_state, ==, arc_anon);
1712 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
1713 l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr;
1714
1715 if (l2hdr != NULL) {
1716 boolean_t buflist_held = MUTEX_HELD(&l2arc_buflist_mtx);
1717 /*
1718 * To prevent arc_free() and l2arc_evict() from
1719 * attempting to free the same buffer at the same time,
1720 * a FREE_IN_PROGRESS flag is given to arc_free() to
1721 * give it priority. l2arc_evict() can't destroy this
1722 * header while we are waiting on l2arc_buflist_mtx.
1723 *
1724 * The hdr may be removed from l2ad_buflist before we
1725 * grab l2arc_buflist_mtx, so b_l2hdr is rechecked.
1726 */
1727 if (!buflist_held) {
1728 mutex_enter(&l2arc_buflist_mtx);
1729 l2hdr = hdr->b_l2hdr;
1730 }
1731
1732 if (l2hdr != NULL) {
1733 trim_map_free(l2hdr->b_dev->l2ad_vdev, l2hdr->b_daddr,
1734 hdr->b_size, 0);
1735 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
1736 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
1737 ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
1738 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
1739 if (hdr->b_state == arc_l2c_only)
1740 l2arc_hdr_stat_remove();
1741 hdr->b_l2hdr = NULL;
1742 }
1743
1744 if (!buflist_held)
1745 mutex_exit(&l2arc_buflist_mtx);
1746 }
1747
1748 if (!BUF_EMPTY(hdr)) {
1749 ASSERT(!HDR_IN_HASH_TABLE(hdr));
1750 buf_discard_identity(hdr);
1751 }
1752 while (hdr->b_buf) {
1753 arc_buf_t *buf = hdr->b_buf;
1754
1755 if (buf->b_efunc) {
1756 mutex_enter(&arc_eviction_mtx);
1757 mutex_enter(&buf->b_evict_lock);
1758 ASSERT(buf->b_hdr != NULL);
1759 arc_buf_destroy(hdr->b_buf, FALSE, FALSE);
1760 hdr->b_buf = buf->b_next;
1761 buf->b_hdr = &arc_eviction_hdr;
1762 buf->b_next = arc_eviction_list;
1763 arc_eviction_list = buf;
1764 mutex_exit(&buf->b_evict_lock);
1765 mutex_exit(&arc_eviction_mtx);
1766 } else {
1767 arc_buf_destroy(hdr->b_buf, FALSE, TRUE);
1768 }
1769 }
1770 if (hdr->b_freeze_cksum != NULL) {
1771 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1772 hdr->b_freeze_cksum = NULL;
1773 }
1774 if (hdr->b_thawed) {
1775 kmem_free(hdr->b_thawed, 1);
1776 hdr->b_thawed = NULL;
1777 }
1778
1779 ASSERT(!list_link_active(&hdr->b_arc_node));
1780 ASSERT3P(hdr->b_hash_next, ==, NULL);
1781 ASSERT3P(hdr->b_acb, ==, NULL);
1782 kmem_cache_free(hdr_cache, hdr);
1783}
1784
1785void
1786arc_buf_free(arc_buf_t *buf, void *tag)
1787{
1788 arc_buf_hdr_t *hdr = buf->b_hdr;
1789 int hashed = hdr->b_state != arc_anon;
1790
1791 ASSERT(buf->b_efunc == NULL);
1792 ASSERT(buf->b_data != NULL);
1793
1794 if (hashed) {
1795 kmutex_t *hash_lock = HDR_LOCK(hdr);
1796
1797 mutex_enter(hash_lock);
1798 hdr = buf->b_hdr;
1799 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1800
1801 (void) remove_reference(hdr, hash_lock, tag);
1802 if (hdr->b_datacnt > 1) {
1803 arc_buf_destroy(buf, FALSE, TRUE);
1804 } else {
1805 ASSERT(buf == hdr->b_buf);
1806 ASSERT(buf->b_efunc == NULL);
1807 hdr->b_flags |= ARC_BUF_AVAILABLE;
1808 }
1809 mutex_exit(hash_lock);
1810 } else if (HDR_IO_IN_PROGRESS(hdr)) {
1811 int destroy_hdr;
1812 /*
1813 * We are in the middle of an async write. Don't destroy
1814 * this buffer unless the write completes before we finish
1815 * decrementing the reference count.
1816 */
1817 mutex_enter(&arc_eviction_mtx);
1818 (void) remove_reference(hdr, NULL, tag);
1819 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1820 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
1821 mutex_exit(&arc_eviction_mtx);
1822 if (destroy_hdr)
1823 arc_hdr_destroy(hdr);
1824 } else {
1825 if (remove_reference(hdr, NULL, tag) > 0)
1826 arc_buf_destroy(buf, FALSE, TRUE);
1827 else
1828 arc_hdr_destroy(hdr);
1829 }
1830}
1831
1832boolean_t
1833arc_buf_remove_ref(arc_buf_t *buf, void* tag)
1834{
1835 arc_buf_hdr_t *hdr = buf->b_hdr;
1836 kmutex_t *hash_lock = HDR_LOCK(hdr);
1837 boolean_t no_callback = (buf->b_efunc == NULL);
1838
1839 if (hdr->b_state == arc_anon) {
1840 ASSERT(hdr->b_datacnt == 1);
1841 arc_buf_free(buf, tag);
1842 return (no_callback);
1843 }
1844
1845 mutex_enter(hash_lock);
1846 hdr = buf->b_hdr;
1847 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1848 ASSERT(hdr->b_state != arc_anon);
1849 ASSERT(buf->b_data != NULL);
1850
1851 (void) remove_reference(hdr, hash_lock, tag);
1852 if (hdr->b_datacnt > 1) {
1853 if (no_callback)
1854 arc_buf_destroy(buf, FALSE, TRUE);
1855 } else if (no_callback) {
1856 ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
1857 ASSERT(buf->b_efunc == NULL);
1858 hdr->b_flags |= ARC_BUF_AVAILABLE;
1859 }
1860 ASSERT(no_callback || hdr->b_datacnt > 1 ||
1861 refcount_is_zero(&hdr->b_refcnt));
1862 mutex_exit(hash_lock);
1863 return (no_callback);
1864}
1865
1866int
1867arc_buf_size(arc_buf_t *buf)
1868{
1869 return (buf->b_hdr->b_size);
1870}
1871
1872/*
1873 * Called from the DMU to determine if the current buffer should be
1874 * evicted. In order to ensure proper locking, the eviction must be initiated
1875 * from the DMU. Return true if the buffer is associated with user data and
1876 * duplicate buffers still exist.
1877 */
1878boolean_t
1879arc_buf_eviction_needed(arc_buf_t *buf)
1880{
1881 arc_buf_hdr_t *hdr;
1882 boolean_t evict_needed = B_FALSE;
1883
1884 if (zfs_disable_dup_eviction)
1885 return (B_FALSE);
1886
1887 mutex_enter(&buf->b_evict_lock);
1888 hdr = buf->b_hdr;
1889 if (hdr == NULL) {
1890 /*
1891 * We are in arc_do_user_evicts(); let that function
1892 * perform the eviction.
1893 */
1894 ASSERT(buf->b_data == NULL);
1895 mutex_exit(&buf->b_evict_lock);
1896 return (B_FALSE);
1897 } else if (buf->b_data == NULL) {
1898 /*
1899 * We have already been added to the arc eviction list;
1900 * recommend eviction.
1901 */
1902 ASSERT3P(hdr, ==, &arc_eviction_hdr);
1903 mutex_exit(&buf->b_evict_lock);
1904 return (B_TRUE);
1905 }
1906
1907 if (hdr->b_datacnt > 1 && hdr->b_type == ARC_BUFC_DATA)
1908 evict_needed = B_TRUE;
1909
1910 mutex_exit(&buf->b_evict_lock);
1911 return (evict_needed);
1912}
1913
1914/*
1915 * Evict buffers from list until we've removed the specified number of
1916 * bytes. Move the removed buffers to the appropriate evict state.
1917 * If the recycle flag is set, then attempt to "recycle" a buffer:
1918 * - look for a buffer to evict that is `bytes' long.
1919 * - return the data block from this buffer rather than freeing it.
1920 * This flag is used by callers that are trying to make space for a
1921 * new buffer in a full arc cache.
1922 *
1923 * This function makes a "best effort". It skips over any buffers
1924 * it can't get a hash_lock on, and so may not catch all candidates.
1925 * It may also return without evicting as much space as requested.
1926 */
1927static void *
1928arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle,
1929 arc_buf_contents_t type)
1930{
1931 arc_state_t *evicted_state;
1932 uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
1933 int64_t bytes_remaining;
1934 arc_buf_hdr_t *ab, *ab_prev = NULL;
1935 list_t *evicted_list, *list, *evicted_list_start, *list_start;
1936 kmutex_t *lock, *evicted_lock;
1937 kmutex_t *hash_lock;
1938 boolean_t have_lock;
1939 void *stolen = NULL;
1940 static int evict_metadata_offset, evict_data_offset;
1941 int i, idx, offset, list_count, count;
1942
1943 ASSERT(state == arc_mru || state == arc_mfu);
1944
1945 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
1946
1947 if (type == ARC_BUFC_METADATA) {
1948 offset = 0;
1949 list_count = ARC_BUFC_NUMMETADATALISTS;
1950 list_start = &state->arcs_lists[0];
1951 evicted_list_start = &evicted_state->arcs_lists[0];
1952 idx = evict_metadata_offset;
1953 } else {
1954 offset = ARC_BUFC_NUMMETADATALISTS;
1955 list_start = &state->arcs_lists[offset];
1956 evicted_list_start = &evicted_state->arcs_lists[offset];
1957 list_count = ARC_BUFC_NUMDATALISTS;
1958 idx = evict_data_offset;
1959 }
1960 bytes_remaining = evicted_state->arcs_lsize[type];
1961 count = 0;
1962
1963evict_start:
1964 list = &list_start[idx];
1965 evicted_list = &evicted_list_start[idx];
1966 lock = ARCS_LOCK(state, (offset + idx));
1967 evicted_lock = ARCS_LOCK(evicted_state, (offset + idx));
1968
1969 mutex_enter(lock);
1970 mutex_enter(evicted_lock);
1971
1972 for (ab = list_tail(list); ab; ab = ab_prev) {
1973 ab_prev = list_prev(list, ab);
1974 bytes_remaining -= (ab->b_size * ab->b_datacnt);
1975 /* prefetch buffers have a minimum lifespan */
1976 if (HDR_IO_IN_PROGRESS(ab) ||
1977 (spa && ab->b_spa != spa) ||
1978 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
1979 ddi_get_lbolt() - ab->b_arc_access <
1980 arc_min_prefetch_lifespan)) {
1981 skipped++;
1982 continue;
1983 }
1984 /* "lookahead" for better eviction candidate */
1985 if (recycle && ab->b_size != bytes &&
1986 ab_prev && ab_prev->b_size == bytes)
1987 continue;
1988 hash_lock = HDR_LOCK(ab);
1989 have_lock = MUTEX_HELD(hash_lock);
1990 if (have_lock || mutex_tryenter(hash_lock)) {
1991 ASSERT0(refcount_count(&ab->b_refcnt));
1992 ASSERT(ab->b_datacnt > 0);
1993 while (ab->b_buf) {
1994 arc_buf_t *buf = ab->b_buf;
1995 if (!mutex_tryenter(&buf->b_evict_lock)) {
1996 missed += 1;
1997 break;
1998 }
1999 if (buf->b_data) {
2000 bytes_evicted += ab->b_size;
2001 if (recycle && ab->b_type == type &&
2002 ab->b_size == bytes &&
2003 !HDR_L2_WRITING(ab)) {
2004 stolen = buf->b_data;
2005 recycle = FALSE;
2006 }
2007 }
2008 if (buf->b_efunc) {
2009 mutex_enter(&arc_eviction_mtx);
2010 arc_buf_destroy(buf,
2011 buf->b_data == stolen, FALSE);
2012 ab->b_buf = buf->b_next;
2013 buf->b_hdr = &arc_eviction_hdr;
2014 buf->b_next = arc_eviction_list;
2015 arc_eviction_list = buf;
2016 mutex_exit(&arc_eviction_mtx);
2017 mutex_exit(&buf->b_evict_lock);
2018 } else {
2019 mutex_exit(&buf->b_evict_lock);
2020 arc_buf_destroy(buf,
2021 buf->b_data == stolen, TRUE);
2022 }
2023 }
2024
2025 if (ab->b_l2hdr) {
2026 ARCSTAT_INCR(arcstat_evict_l2_cached,
2027 ab->b_size);
2028 } else {
2029 if (l2arc_write_eligible(ab->b_spa, ab)) {
2030 ARCSTAT_INCR(arcstat_evict_l2_eligible,
2031 ab->b_size);
2032 } else {
2033 ARCSTAT_INCR(
2034 arcstat_evict_l2_ineligible,
2035 ab->b_size);
2036 }
2037 }
2038
2039 if (ab->b_datacnt == 0) {
2040 arc_change_state(evicted_state, ab, hash_lock);
2041 ASSERT(HDR_IN_HASH_TABLE(ab));
2042 ab->b_flags |= ARC_IN_HASH_TABLE;
2043 ab->b_flags &= ~ARC_BUF_AVAILABLE;
2044 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
2045 }
2046 if (!have_lock)
2047 mutex_exit(hash_lock);
2048 if (bytes >= 0 && bytes_evicted >= bytes)
2049 break;
2050 if (bytes_remaining > 0) {
2051 mutex_exit(evicted_lock);
2052 mutex_exit(lock);
2053 idx = ((idx + 1) & (list_count - 1));
2054 count++;
2055 goto evict_start;
2056 }
2057 } else {
2058 missed += 1;
2059 }
2060 }
2061
2062 mutex_exit(evicted_lock);
2063 mutex_exit(lock);
2064
2065 idx = ((idx + 1) & (list_count - 1));
2066 count++;
2067
2068 if (bytes_evicted < bytes) {
2069 if (count < list_count)
2070 goto evict_start;
2071 else
2072 dprintf("only evicted %lld bytes from %x",
2073 (longlong_t)bytes_evicted, state);
2074 }
2075 if (type == ARC_BUFC_METADATA)
2076 evict_metadata_offset = idx;
2077 else
2078 evict_data_offset = idx;
2079
2080 if (skipped)
2081 ARCSTAT_INCR(arcstat_evict_skip, skipped);
2082
2083 if (missed)
2084 ARCSTAT_INCR(arcstat_mutex_miss, missed);
2085
2086 /*
2087 * We have just evicted some data into the ghost state, make
2088 * sure we also adjust the ghost state size if necessary.
2089 */
2090 if (arc_no_grow &&
2091 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) {
2092 int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size +
2093 arc_mru_ghost->arcs_size - arc_c;
2094
2095 if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) {
2096 int64_t todelete =
2097 MIN(arc_mru_ghost->arcs_lsize[type], mru_over);
2098 arc_evict_ghost(arc_mru_ghost, 0, todelete);
2099 } else if (arc_mfu_ghost->arcs_lsize[type] > 0) {
2100 int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type],
2101 arc_mru_ghost->arcs_size +
2102 arc_mfu_ghost->arcs_size - arc_c);
2103 arc_evict_ghost(arc_mfu_ghost, 0, todelete);
2104 }
2105 }
2106 if (stolen)
2107 ARCSTAT_BUMP(arcstat_stolen);
2108
2109 return (stolen);
2110}
2111
2112/*
2113 * Remove buffers from list until we've removed the specified number of
2114 * bytes. Destroy the buffers that are removed.
2115 */
2116static void
2117arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes)
2118{
2119 arc_buf_hdr_t *ab, *ab_prev;
2120 arc_buf_hdr_t marker = { 0 };
2121 list_t *list, *list_start;
2122 kmutex_t *hash_lock, *lock;
2123 uint64_t bytes_deleted = 0;
2124 uint64_t bufs_skipped = 0;
2125 static int evict_offset;
2126 int list_count, idx = evict_offset;
2127 int offset, count = 0;
2128
2129 ASSERT(GHOST_STATE(state));
2130
2131 /*
2132 * data lists come after metadata lists
2133 */
2134 list_start = &state->arcs_lists[ARC_BUFC_NUMMETADATALISTS];
2135 list_count = ARC_BUFC_NUMDATALISTS;
2136 offset = ARC_BUFC_NUMMETADATALISTS;
2137
2138evict_start:
2139 list = &list_start[idx];
2140 lock = ARCS_LOCK(state, idx + offset);
2141
2142 mutex_enter(lock);
2143 for (ab = list_tail(list); ab; ab = ab_prev) {
2144 ab_prev = list_prev(list, ab);
2145 if (spa && ab->b_spa != spa)
2146 continue;
2147
2148 /* ignore markers */
2149 if (ab->b_spa == 0)
2150 continue;
2151
2152 hash_lock = HDR_LOCK(ab);
2153 /* caller may be trying to modify this buffer, skip it */
2154 if (MUTEX_HELD(hash_lock))
2155 continue;
2156 if (mutex_tryenter(hash_lock)) {
2157 ASSERT(!HDR_IO_IN_PROGRESS(ab));
2158 ASSERT(ab->b_buf == NULL);
2159 ARCSTAT_BUMP(arcstat_deleted);
2160 bytes_deleted += ab->b_size;
2161
2162 if (ab->b_l2hdr != NULL) {
2163 /*
2164 * This buffer is cached on the 2nd Level ARC;
2165 * don't destroy the header.
2166 */
2167 arc_change_state(arc_l2c_only, ab, hash_lock);
2168 mutex_exit(hash_lock);
2169 } else {
2170 arc_change_state(arc_anon, ab, hash_lock);
2171 mutex_exit(hash_lock);
2172 arc_hdr_destroy(ab);
2173 }
2174
2175 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
2176 if (bytes >= 0 && bytes_deleted >= bytes)
2177 break;
2178 } else if (bytes < 0) {
2179 /*
2180 * Insert a list marker and then wait for the
2181 * hash lock to become available. Once its
2182 * available, restart from where we left off.
2183 */
2184 list_insert_after(list, ab, &marker);
2185 mutex_exit(lock);
2186 mutex_enter(hash_lock);
2187 mutex_exit(hash_lock);
2188 mutex_enter(lock);
2189 ab_prev = list_prev(list, &marker);
2190 list_remove(list, &marker);
2191 } else
2192 bufs_skipped += 1;
2193 }
2194 mutex_exit(lock);
2195 idx = ((idx + 1) & (ARC_BUFC_NUMDATALISTS - 1));
2196 count++;
2197
2198 if (count < list_count)
2199 goto evict_start;
2200
2201 evict_offset = idx;
2202 if ((uintptr_t)list > (uintptr_t)&state->arcs_lists[ARC_BUFC_NUMMETADATALISTS] &&
2203 (bytes < 0 || bytes_deleted < bytes)) {
2204 list_start = &state->arcs_lists[0];
2205 list_count = ARC_BUFC_NUMMETADATALISTS;
2206 offset = count = 0;
2207 goto evict_start;
2208 }
2209
2210 if (bufs_skipped) {
2211 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped);
2212 ASSERT(bytes >= 0);
2213 }
2214
2215 if (bytes_deleted < bytes)
2216 dprintf("only deleted %lld bytes from %p",
2217 (longlong_t)bytes_deleted, state);
2218}
2219
2220static void
2221arc_adjust(void)
2222{
2223 int64_t adjustment, delta;
2224
2225 /*
2226 * Adjust MRU size
2227 */
2228
2229 adjustment = MIN((int64_t)(arc_size - arc_c),
2230 (int64_t)(arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used -
2231 arc_p));
2232
2233 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) {
2234 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment);
2235 (void) arc_evict(arc_mru, 0, delta, FALSE, ARC_BUFC_DATA);
2236 adjustment -= delta;
2237 }
2238
2239 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) {
2240 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment);
2241 (void) arc_evict(arc_mru, 0, delta, FALSE,
2242 ARC_BUFC_METADATA);
2243 }
2244
2245 /*
2246 * Adjust MFU size
2247 */
2248
2249 adjustment = arc_size - arc_c;
2250
2251 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) {
2252 delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]);
2253 (void) arc_evict(arc_mfu, 0, delta, FALSE, ARC_BUFC_DATA);
2254 adjustment -= delta;
2255 }
2256
2257 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) {
2258 int64_t delta = MIN(adjustment,
2259 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]);
2260 (void) arc_evict(arc_mfu, 0, delta, FALSE,
2261 ARC_BUFC_METADATA);
2262 }
2263
2264 /*
2265 * Adjust ghost lists
2266 */
2267
2268 adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c;
2269
2270 if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) {
2271 delta = MIN(arc_mru_ghost->arcs_size, adjustment);
2272 arc_evict_ghost(arc_mru_ghost, 0, delta);
2273 }
2274
2275 adjustment =
2276 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c;
2277
2278 if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) {
2279 delta = MIN(arc_mfu_ghost->arcs_size, adjustment);
2280 arc_evict_ghost(arc_mfu_ghost, 0, delta);
2281 }
2282}
2283
2284static void
2285arc_do_user_evicts(void)
2286{
2287 static arc_buf_t *tmp_arc_eviction_list;
2288
2289 /*
2290 * Move list over to avoid LOR
2291 */
2292restart:
2293 mutex_enter(&arc_eviction_mtx);
2294 tmp_arc_eviction_list = arc_eviction_list;
2295 arc_eviction_list = NULL;
2296 mutex_exit(&arc_eviction_mtx);
2297
2298 while (tmp_arc_eviction_list != NULL) {
2299 arc_buf_t *buf = tmp_arc_eviction_list;
2300 tmp_arc_eviction_list = buf->b_next;
2301 mutex_enter(&buf->b_evict_lock);
2302 buf->b_hdr = NULL;
2303 mutex_exit(&buf->b_evict_lock);
2304
2305 if (buf->b_efunc != NULL)
2306 VERIFY(buf->b_efunc(buf) == 0);
2307
2308 buf->b_efunc = NULL;
2309 buf->b_private = NULL;
2310 kmem_cache_free(buf_cache, buf);
2311 }
2312
2313 if (arc_eviction_list != NULL)
2314 goto restart;
2315}
2316
2317/*
2318 * Flush all *evictable* data from the cache for the given spa.
2319 * NOTE: this will not touch "active" (i.e. referenced) data.
2320 */
2321void
2322arc_flush(spa_t *spa)
2323{
2324 uint64_t guid = 0;
2325
2326 if (spa)
2327 guid = spa_load_guid(spa);
2328
2329 while (arc_mru->arcs_lsize[ARC_BUFC_DATA]) {
2330 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA);
2331 if (spa)
2332 break;
2333 }
2334 while (arc_mru->arcs_lsize[ARC_BUFC_METADATA]) {
2335 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA);
2336 if (spa)
2337 break;
2338 }
2339 while (arc_mfu->arcs_lsize[ARC_BUFC_DATA]) {
2340 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA);
2341 if (spa)
2342 break;
2343 }
2344 while (arc_mfu->arcs_lsize[ARC_BUFC_METADATA]) {
2345 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA);
2346 if (spa)
2347 break;
2348 }
2349
2350 arc_evict_ghost(arc_mru_ghost, guid, -1);
2351 arc_evict_ghost(arc_mfu_ghost, guid, -1);
2352
2353 mutex_enter(&arc_reclaim_thr_lock);
2354 arc_do_user_evicts();
2355 mutex_exit(&arc_reclaim_thr_lock);
2356 ASSERT(spa || arc_eviction_list == NULL);
2357}
2358
2359void
2360arc_shrink(void)
2361{
2362 if (arc_c > arc_c_min) {
2363 uint64_t to_free;
2364
2365#ifdef _KERNEL
2366 to_free = arc_c >> arc_shrink_shift;
2367#else
2368 to_free = arc_c >> arc_shrink_shift;
2369#endif
2370 if (arc_c > arc_c_min + to_free)
2371 atomic_add_64(&arc_c, -to_free);
2372 else
2373 arc_c = arc_c_min;
2374
2375 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
2376 if (arc_c > arc_size)
2377 arc_c = MAX(arc_size, arc_c_min);
2378 if (arc_p > arc_c)
2379 arc_p = (arc_c >> 1);
2380 ASSERT(arc_c >= arc_c_min);
2381 ASSERT((int64_t)arc_p >= 0);
2382 }
2383
2384 if (arc_size > arc_c)
2385 arc_adjust();
2386}
2387
2388static int needfree = 0;
2389
2390static int
2391arc_reclaim_needed(void)
2392{
2393
2394#ifdef _KERNEL
2395
2396 if (needfree)
2397 return (1);
2398
2399 /*
2400 * Cooperate with pagedaemon when it's time for it to scan
2401 * and reclaim some pages.
2402 */
2403 if (vm_paging_needed())
2404 return (1);
2405
2406#ifdef sun
2407 /*
2408 * take 'desfree' extra pages, so we reclaim sooner, rather than later
2409 */
2410 extra = desfree;
2411
2412 /*
2413 * check that we're out of range of the pageout scanner. It starts to
2414 * schedule paging if freemem is less than lotsfree and needfree.
2415 * lotsfree is the high-water mark for pageout, and needfree is the
2416 * number of needed free pages. We add extra pages here to make sure
2417 * the scanner doesn't start up while we're freeing memory.
2418 */
2419 if (freemem < lotsfree + needfree + extra)
2420 return (1);
2421
2422 /*
2423 * check to make sure that swapfs has enough space so that anon
2424 * reservations can still succeed. anon_resvmem() checks that the
2425 * availrmem is greater than swapfs_minfree, and the number of reserved
2426 * swap pages. We also add a bit of extra here just to prevent
2427 * circumstances from getting really dire.
2428 */
2429 if (availrmem < swapfs_minfree + swapfs_reserve + extra)
2430 return (1);
2431
2432#if defined(__i386)
2433 /*
2434 * If we're on an i386 platform, it's possible that we'll exhaust the
2435 * kernel heap space before we ever run out of available physical
2436 * memory. Most checks of the size of the heap_area compare against
2437 * tune.t_minarmem, which is the minimum available real memory that we
2438 * can have in the system. However, this is generally fixed at 25 pages
2439 * which is so low that it's useless. In this comparison, we seek to
2440 * calculate the total heap-size, and reclaim if more than 3/4ths of the
2441 * heap is allocated. (Or, in the calculation, if less than 1/4th is
2442 * free)
2443 */
2444 if (btop(vmem_size(heap_arena, VMEM_FREE)) <
2445 (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
2446 return (1);
2447#endif
2448#else /* !sun */
2449 if (kmem_used() > (kmem_size() * 3) / 4)
2450 return (1);
2451#endif /* sun */
2452
2453#else
2454 if (spa_get_random(100) == 0)
2455 return (1);
2456#endif
2457 return (0);
2458}
2459
2460extern kmem_cache_t *zio_buf_cache[];
2461extern kmem_cache_t *zio_data_buf_cache[];
2462
2463static void
2464arc_kmem_reap_now(arc_reclaim_strategy_t strat)
2465{
2466 size_t i;
2467 kmem_cache_t *prev_cache = NULL;
2468 kmem_cache_t *prev_data_cache = NULL;
2469
2470#ifdef _KERNEL
2471 if (arc_meta_used >= arc_meta_limit) {
2472 /*
2473 * We are exceeding our meta-data cache limit.
2474 * Purge some DNLC entries to release holds on meta-data.
2475 */
2476 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
2477 }
2478#if defined(__i386)
2479 /*
2480 * Reclaim unused memory from all kmem caches.
2481 */
2482 kmem_reap();
2483#endif
2484#endif
2485
2486 /*
2487 * An aggressive reclamation will shrink the cache size as well as
2488 * reap free buffers from the arc kmem caches.
2489 */
2490 if (strat == ARC_RECLAIM_AGGR)
2491 arc_shrink();
2492
2493 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
2494 if (zio_buf_cache[i] != prev_cache) {
2495 prev_cache = zio_buf_cache[i];
2496 kmem_cache_reap_now(zio_buf_cache[i]);
2497 }
2498 if (zio_data_buf_cache[i] != prev_data_cache) {
2499 prev_data_cache = zio_data_buf_cache[i];
2500 kmem_cache_reap_now(zio_data_buf_cache[i]);
2501 }
2502 }
2503 kmem_cache_reap_now(buf_cache);
2504 kmem_cache_reap_now(hdr_cache);
2505}
2506
2507static void
2508arc_reclaim_thread(void *dummy __unused)
2509{
2510 clock_t growtime = 0;
2511 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS;
2512 callb_cpr_t cpr;
2513
2514 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
2515
2516 mutex_enter(&arc_reclaim_thr_lock);
2517 while (arc_thread_exit == 0) {
2518 if (arc_reclaim_needed()) {
2519
2520 if (arc_no_grow) {
2521 if (last_reclaim == ARC_RECLAIM_CONS) {
2522 last_reclaim = ARC_RECLAIM_AGGR;
2523 } else {
2524 last_reclaim = ARC_RECLAIM_CONS;
2525 }
2526 } else {
2527 arc_no_grow = TRUE;
2528 last_reclaim = ARC_RECLAIM_AGGR;
2529 membar_producer();
2530 }
2531
2532 /* reset the growth delay for every reclaim */
2533 growtime = ddi_get_lbolt() + (arc_grow_retry * hz);
2534
2535 if (needfree && last_reclaim == ARC_RECLAIM_CONS) {
2536 /*
2537 * If needfree is TRUE our vm_lowmem hook
2538 * was called and in that case we must free some
2539 * memory, so switch to aggressive mode.
2540 */
2541 arc_no_grow = TRUE;
2542 last_reclaim = ARC_RECLAIM_AGGR;
2543 }
2544 arc_kmem_reap_now(last_reclaim);
2545 arc_warm = B_TRUE;
2546
2547 } else if (arc_no_grow && ddi_get_lbolt() >= growtime) {
2548 arc_no_grow = FALSE;
2549 }
2550
2551 arc_adjust();
2552
2553 if (arc_eviction_list != NULL)
2554 arc_do_user_evicts();
2555
2556#ifdef _KERNEL
2557 if (needfree) {
2558 needfree = 0;
2559 wakeup(&needfree);
2560 }
2561#endif
2562
2563 /* block until needed, or one second, whichever is shorter */
2564 CALLB_CPR_SAFE_BEGIN(&cpr);
2565 (void) cv_timedwait(&arc_reclaim_thr_cv,
2566 &arc_reclaim_thr_lock, hz);
2567 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
2568 }
2569
2570 arc_thread_exit = 0;
2571 cv_broadcast(&arc_reclaim_thr_cv);
2572 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */
2573 thread_exit();
2574}
2575
2576/*
2577 * Adapt arc info given the number of bytes we are trying to add and
2578 * the state that we are comming from. This function is only called
2579 * when we are adding new content to the cache.
2580 */
2581static void
2582arc_adapt(int bytes, arc_state_t *state)
2583{
2584 int mult;
2585 uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
2586
2587 if (state == arc_l2c_only)
2588 return;
2589
2590 ASSERT(bytes > 0);
2591 /*
2592 * Adapt the target size of the MRU list:
2593 * - if we just hit in the MRU ghost list, then increase
2594 * the target size of the MRU list.
2595 * - if we just hit in the MFU ghost list, then increase
2596 * the target size of the MFU list by decreasing the
2597 * target size of the MRU list.
2598 */
2599 if (state == arc_mru_ghost) {
2600 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ?
2601 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size));
2602 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */
2603
2604 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult);
2605 } else if (state == arc_mfu_ghost) {
2606 uint64_t delta;
2607
2608 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ?
2609 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size));
2610 mult = MIN(mult, 10);
2611
2612 delta = MIN(bytes * mult, arc_p);
2613 arc_p = MAX(arc_p_min, arc_p - delta);
2614 }
2615 ASSERT((int64_t)arc_p >= 0);
2616
2617 if (arc_reclaim_needed()) {
2618 cv_signal(&arc_reclaim_thr_cv);
2619 return;
2620 }
2621
2622 if (arc_no_grow)
2623 return;
2624
2625 if (arc_c >= arc_c_max)
2626 return;
2627
2628 /*
2629 * If we're within (2 * maxblocksize) bytes of the target
2630 * cache size, increment the target cache size
2631 */
2632 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
2633 atomic_add_64(&arc_c, (int64_t)bytes);
2634 if (arc_c > arc_c_max)
2635 arc_c = arc_c_max;
2636 else if (state == arc_anon)
2637 atomic_add_64(&arc_p, (int64_t)bytes);
2638 if (arc_p > arc_c)
2639 arc_p = arc_c;
2640 }
2641 ASSERT((int64_t)arc_p >= 0);
2642}
2643
2644/*
2645 * Check if the cache has reached its limits and eviction is required
2646 * prior to insert.
2647 */
2648static int
2649arc_evict_needed(arc_buf_contents_t type)
2650{
2651 if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit)
2652 return (1);
2653
2654#ifdef sun
2655#ifdef _KERNEL
2656 /*
2657 * If zio data pages are being allocated out of a separate heap segment,
2658 * then enforce that the size of available vmem for this area remains
2659 * above about 1/32nd free.
2660 */
2661 if (type == ARC_BUFC_DATA && zio_arena != NULL &&
2662 vmem_size(zio_arena, VMEM_FREE) <
2663 (vmem_size(zio_arena, VMEM_ALLOC) >> 5))
2664 return (1);
2665#endif
2666#endif /* sun */
2667
2668 if (arc_reclaim_needed())
2669 return (1);
2670
2671 return (arc_size > arc_c);
2672}
2673
2674/*
2675 * The buffer, supplied as the first argument, needs a data block.
2676 * So, if we are at cache max, determine which cache should be victimized.
2677 * We have the following cases:
2678 *
2679 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) ->
2680 * In this situation if we're out of space, but the resident size of the MFU is
2681 * under the limit, victimize the MFU cache to satisfy this insertion request.
2682 *
2683 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) ->
2684 * Here, we've used up all of the available space for the MRU, so we need to
2685 * evict from our own cache instead. Evict from the set of resident MRU
2686 * entries.
2687 *
2688 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) ->
2689 * c minus p represents the MFU space in the cache, since p is the size of the
2690 * cache that is dedicated to the MRU. In this situation there's still space on
2691 * the MFU side, so the MRU side needs to be victimized.
2692 *
2693 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) ->
2694 * MFU's resident set is consuming more space than it has been allotted. In
2695 * this situation, we must victimize our own cache, the MFU, for this insertion.
2696 */
2697static void
2698arc_get_data_buf(arc_buf_t *buf)
2699{
2700 arc_state_t *state = buf->b_hdr->b_state;
2701 uint64_t size = buf->b_hdr->b_size;
2702 arc_buf_contents_t type = buf->b_hdr->b_type;
2703
2704 arc_adapt(size, state);
2705
2706 /*
2707 * We have not yet reached cache maximum size,
2708 * just allocate a new buffer.
2709 */
2710 if (!arc_evict_needed(type)) {
2711 if (type == ARC_BUFC_METADATA) {
2712 buf->b_data = zio_buf_alloc(size);
2713 arc_space_consume(size, ARC_SPACE_DATA);
2714 } else {
2715 ASSERT(type == ARC_BUFC_DATA);
2716 buf->b_data = zio_data_buf_alloc(size);
2717 ARCSTAT_INCR(arcstat_data_size, size);
2718 atomic_add_64(&arc_size, size);
2719 }
2720 goto out;
2721 }
2722
2723 /*
2724 * If we are prefetching from the mfu ghost list, this buffer
2725 * will end up on the mru list; so steal space from there.
2726 */
2727 if (state == arc_mfu_ghost)
2728 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu;
2729 else if (state == arc_mru_ghost)
2730 state = arc_mru;
2731
2732 if (state == arc_mru || state == arc_anon) {
2733 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size;
2734 state = (arc_mfu->arcs_lsize[type] >= size &&
2735 arc_p > mru_used) ? arc_mfu : arc_mru;
2736 } else {
2737 /* MFU cases */
2738 uint64_t mfu_space = arc_c - arc_p;
2739 state = (arc_mru->arcs_lsize[type] >= size &&
2740 mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu;
2741 }
2742 if ((buf->b_data = arc_evict(state, 0, size, TRUE, type)) == NULL) {
2743 if (type == ARC_BUFC_METADATA) {
2744 buf->b_data = zio_buf_alloc(size);
2745 arc_space_consume(size, ARC_SPACE_DATA);
2746 } else {
2747 ASSERT(type == ARC_BUFC_DATA);
2748 buf->b_data = zio_data_buf_alloc(size);
2749 ARCSTAT_INCR(arcstat_data_size, size);
2750 atomic_add_64(&arc_size, size);
2751 }
2752 ARCSTAT_BUMP(arcstat_recycle_miss);
2753 }
2754 ASSERT(buf->b_data != NULL);
2755out:
2756 /*
2757 * Update the state size. Note that ghost states have a
2758 * "ghost size" and so don't need to be updated.
2759 */
2760 if (!GHOST_STATE(buf->b_hdr->b_state)) {
2761 arc_buf_hdr_t *hdr = buf->b_hdr;
2762
2763 atomic_add_64(&hdr->b_state->arcs_size, size);
2764 if (list_link_active(&hdr->b_arc_node)) {
2765 ASSERT(refcount_is_zero(&hdr->b_refcnt));
2766 atomic_add_64(&hdr->b_state->arcs_lsize[type], size);
2767 }
2768 /*
2769 * If we are growing the cache, and we are adding anonymous
2770 * data, and we have outgrown arc_p, update arc_p
2771 */
2772 if (arc_size < arc_c && hdr->b_state == arc_anon &&
2773 arc_anon->arcs_size + arc_mru->arcs_size > arc_p)
2774 arc_p = MIN(arc_c, arc_p + size);
2775 }
2776 ARCSTAT_BUMP(arcstat_allocated);
2777}
2778
2779/*
2780 * This routine is called whenever a buffer is accessed.
2781 * NOTE: the hash lock is dropped in this function.
2782 */
2783static void
2784arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
2785{
2786 clock_t now;
2787
2788 ASSERT(MUTEX_HELD(hash_lock));
2789
2790 if (buf->b_state == arc_anon) {
2791 /*
2792 * This buffer is not in the cache, and does not
2793 * appear in our "ghost" list. Add the new buffer
2794 * to the MRU state.
2795 */
2796
2797 ASSERT(buf->b_arc_access == 0);
2798 buf->b_arc_access = ddi_get_lbolt();
2799 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2800 arc_change_state(arc_mru, buf, hash_lock);
2801
2802 } else if (buf->b_state == arc_mru) {
2803 now = ddi_get_lbolt();
2804
2805 /*
2806 * If this buffer is here because of a prefetch, then either:
2807 * - clear the flag if this is a "referencing" read
2808 * (any subsequent access will bump this into the MFU state).
2809 * or
2810 * - move the buffer to the head of the list if this is
2811 * another prefetch (to make it less likely to be evicted).
2812 */
2813 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2814 if (refcount_count(&buf->b_refcnt) == 0) {
2815 ASSERT(list_link_active(&buf->b_arc_node));
2816 } else {
2817 buf->b_flags &= ~ARC_PREFETCH;
2818 ARCSTAT_BUMP(arcstat_mru_hits);
2819 }
2820 buf->b_arc_access = now;
2821 return;
2822 }
2823
2824 /*
2825 * This buffer has been "accessed" only once so far,
2826 * but it is still in the cache. Move it to the MFU
2827 * state.
2828 */
2829 if (now > buf->b_arc_access + ARC_MINTIME) {
2830 /*
2831 * More than 125ms have passed since we
2832 * instantiated this buffer. Move it to the
2833 * most frequently used state.
2834 */
2835 buf->b_arc_access = now;
2836 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2837 arc_change_state(arc_mfu, buf, hash_lock);
2838 }
2839 ARCSTAT_BUMP(arcstat_mru_hits);
2840 } else if (buf->b_state == arc_mru_ghost) {
2841 arc_state_t *new_state;
2842 /*
2843 * This buffer has been "accessed" recently, but
2844 * was evicted from the cache. Move it to the
2845 * MFU state.
2846 */
2847
2848 if (buf->b_flags & ARC_PREFETCH) {
2849 new_state = arc_mru;
2850 if (refcount_count(&buf->b_refcnt) > 0)
2851 buf->b_flags &= ~ARC_PREFETCH;
2852 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2853 } else {
2854 new_state = arc_mfu;
2855 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2856 }
2857
2858 buf->b_arc_access = ddi_get_lbolt();
2859 arc_change_state(new_state, buf, hash_lock);
2860
2861 ARCSTAT_BUMP(arcstat_mru_ghost_hits);
2862 } else if (buf->b_state == arc_mfu) {
2863 /*
2864 * This buffer has been accessed more than once and is
2865 * still in the cache. Keep it in the MFU state.
2866 *
2867 * NOTE: an add_reference() that occurred when we did
2868 * the arc_read() will have kicked this off the list.
2869 * If it was a prefetch, we will explicitly move it to
2870 * the head of the list now.
2871 */
2872 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2873 ASSERT(refcount_count(&buf->b_refcnt) == 0);
2874 ASSERT(list_link_active(&buf->b_arc_node));
2875 }
2876 ARCSTAT_BUMP(arcstat_mfu_hits);
2877 buf->b_arc_access = ddi_get_lbolt();
2878 } else if (buf->b_state == arc_mfu_ghost) {
2879 arc_state_t *new_state = arc_mfu;
2880 /*
2881 * This buffer has been accessed more than once but has
2882 * been evicted from the cache. Move it back to the
2883 * MFU state.
2884 */
2885
2886 if (buf->b_flags & ARC_PREFETCH) {
2887 /*
2888 * This is a prefetch access...
2889 * move this block back to the MRU state.
2890 */
2891 ASSERT0(refcount_count(&buf->b_refcnt));
2892 new_state = arc_mru;
2893 }
2894
2895 buf->b_arc_access = ddi_get_lbolt();
2896 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2897 arc_change_state(new_state, buf, hash_lock);
2898
2899 ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
2900 } else if (buf->b_state == arc_l2c_only) {
2901 /*
2902 * This buffer is on the 2nd Level ARC.
2903 */
2904
2905 buf->b_arc_access = ddi_get_lbolt();
2906 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2907 arc_change_state(arc_mfu, buf, hash_lock);
2908 } else {
2909 ASSERT(!"invalid arc state");
2910 }
2911}
2912
2913/* a generic arc_done_func_t which you can use */
2914/* ARGSUSED */
2915void
2916arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
2917{
2918 if (zio == NULL || zio->io_error == 0)
2919 bcopy(buf->b_data, arg, buf->b_hdr->b_size);
2920 VERIFY(arc_buf_remove_ref(buf, arg));
2921}
2922
2923/* a generic arc_done_func_t */
2924void
2925arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
2926{
2927 arc_buf_t **bufp = arg;
2928 if (zio && zio->io_error) {
2929 VERIFY(arc_buf_remove_ref(buf, arg));
2930 *bufp = NULL;
2931 } else {
2932 *bufp = buf;
2933 ASSERT(buf->b_data);
2934 }
2935}
2936
2937static void
2938arc_read_done(zio_t *zio)
2939{
2940 arc_buf_hdr_t *hdr, *found;
2941 arc_buf_t *buf;
2942 arc_buf_t *abuf; /* buffer we're assigning to callback */
2943 kmutex_t *hash_lock;
2944 arc_callback_t *callback_list, *acb;
2945 int freeable = FALSE;
2946
2947 buf = zio->io_private;
2948 hdr = buf->b_hdr;
2949
2950 /*
2951 * The hdr was inserted into hash-table and removed from lists
2952 * prior to starting I/O. We should find this header, since
2953 * it's in the hash table, and it should be legit since it's
2954 * not possible to evict it during the I/O. The only possible
2955 * reason for it not to be found is if we were freed during the
2956 * read.
2957 */
2958 found = buf_hash_find(hdr->b_spa, &hdr->b_dva, hdr->b_birth,
2959 &hash_lock);
2960
2961 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) ||
2962 (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
2963 (found == hdr && HDR_L2_READING(hdr)));
2964
2965 hdr->b_flags &= ~ARC_L2_EVICTED;
2966 if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH))
2967 hdr->b_flags &= ~ARC_L2CACHE;
2968
2969 /* byteswap if necessary */
2970 callback_list = hdr->b_acb;
2971 ASSERT(callback_list != NULL);
2972 if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) {
2973 dmu_object_byteswap_t bswap =
2974 DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp));
2975 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ?
2976 byteswap_uint64_array :
2977 dmu_ot_byteswap[bswap].ob_func;
2978 func(buf->b_data, hdr->b_size);
2979 }
2980
2981 arc_cksum_compute(buf, B_FALSE);
2982#ifdef illumos
2983 arc_buf_watch(buf);
2984#endif /* illumos */
2985
2986 if (hash_lock && zio->io_error == 0 && hdr->b_state == arc_anon) {
2987 /*
2988 * Only call arc_access on anonymous buffers. This is because
2989 * if we've issued an I/O for an evicted buffer, we've already
2990 * called arc_access (to prevent any simultaneous readers from
2991 * getting confused).
2992 */
2993 arc_access(hdr, hash_lock);
2994 }
2995
2996 /* create copies of the data buffer for the callers */
2997 abuf = buf;
2998 for (acb = callback_list; acb; acb = acb->acb_next) {
2999 if (acb->acb_done) {
3000 if (abuf == NULL) {
3001 ARCSTAT_BUMP(arcstat_duplicate_reads);
3002 abuf = arc_buf_clone(buf);
3003 }
3004 acb->acb_buf = abuf;
3005 abuf = NULL;
3006 }
3007 }
3008 hdr->b_acb = NULL;
3009 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3010 ASSERT(!HDR_BUF_AVAILABLE(hdr));
3011 if (abuf == buf) {
3012 ASSERT(buf->b_efunc == NULL);
3013 ASSERT(hdr->b_datacnt == 1);
3014 hdr->b_flags |= ARC_BUF_AVAILABLE;
3015 }
3016
3017 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
3018
3019 if (zio->io_error != 0) {
3020 hdr->b_flags |= ARC_IO_ERROR;
3021 if (hdr->b_state != arc_anon)
3022 arc_change_state(arc_anon, hdr, hash_lock);
3023 if (HDR_IN_HASH_TABLE(hdr))
3024 buf_hash_remove(hdr);
3025 freeable = refcount_is_zero(&hdr->b_refcnt);
3026 }
3027
3028 /*
3029 * Broadcast before we drop the hash_lock to avoid the possibility
3030 * that the hdr (and hence the cv) might be freed before we get to
3031 * the cv_broadcast().
3032 */
3033 cv_broadcast(&hdr->b_cv);
3034
3035 if (hash_lock) {
3036 mutex_exit(hash_lock);
3037 } else {
3038 /*
3039 * This block was freed while we waited for the read to
3040 * complete. It has been removed from the hash table and
3041 * moved to the anonymous state (so that it won't show up
3042 * in the cache).
3043 */
3044 ASSERT3P(hdr->b_state, ==, arc_anon);
3045 freeable = refcount_is_zero(&hdr->b_refcnt);
3046 }
3047
3048 /* execute each callback and free its structure */
3049 while ((acb = callback_list) != NULL) {
3050 if (acb->acb_done)
3051 acb->acb_done(zio, acb->acb_buf, acb->acb_private);
3052
3053 if (acb->acb_zio_dummy != NULL) {
3054 acb->acb_zio_dummy->io_error = zio->io_error;
3055 zio_nowait(acb->acb_zio_dummy);
3056 }
3057
3058 callback_list = acb->acb_next;
3059 kmem_free(acb, sizeof (arc_callback_t));
3060 }
3061
3062 if (freeable)
3063 arc_hdr_destroy(hdr);
3064}
3065
3066/*
3067 * "Read" the block block at the specified DVA (in bp) via the
3068 * cache. If the block is found in the cache, invoke the provided
3069 * callback immediately and return. Note that the `zio' parameter
3070 * in the callback will be NULL in this case, since no IO was
3071 * required. If the block is not in the cache pass the read request
3072 * on to the spa with a substitute callback function, so that the
3073 * requested block will be added to the cache.
3074 *
3075 * If a read request arrives for a block that has a read in-progress,
3076 * either wait for the in-progress read to complete (and return the
3077 * results); or, if this is a read with a "done" func, add a record
3078 * to the read to invoke the "done" func when the read completes,
3079 * and return; or just return.
3080 *
3081 * arc_read_done() will invoke all the requested "done" functions
3082 * for readers of this block.
3083 */
3084int
3085arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
3086 void *private, int priority, int zio_flags, uint32_t *arc_flags,
3087 const zbookmark_t *zb)
3088{
3089 arc_buf_hdr_t *hdr;
3090 arc_buf_t *buf = NULL;
3091 kmutex_t *hash_lock;
3092 zio_t *rzio;
3093 uint64_t guid = spa_load_guid(spa);
3094
3095top:
3096 hdr = buf_hash_find(guid, BP_IDENTITY(bp), BP_PHYSICAL_BIRTH(bp),
3097 &hash_lock);
3098 if (hdr && hdr->b_datacnt > 0) {
3099
3100 *arc_flags |= ARC_CACHED;
3101
3102 if (HDR_IO_IN_PROGRESS(hdr)) {
3103
3104 if (*arc_flags & ARC_WAIT) {
3105 cv_wait(&hdr->b_cv, hash_lock);
3106 mutex_exit(hash_lock);
3107 goto top;
3108 }
3109 ASSERT(*arc_flags & ARC_NOWAIT);
3110
3111 if (done) {
3112 arc_callback_t *acb = NULL;
3113
3114 acb = kmem_zalloc(sizeof (arc_callback_t),
3115 KM_SLEEP);
3116 acb->acb_done = done;
3117 acb->acb_private = private;
3118 if (pio != NULL)
3119 acb->acb_zio_dummy = zio_null(pio,
3120 spa, NULL, NULL, NULL, zio_flags);
3121
3122 ASSERT(acb->acb_done != NULL);
3123 acb->acb_next = hdr->b_acb;
3124 hdr->b_acb = acb;
3125 add_reference(hdr, hash_lock, private);
3126 mutex_exit(hash_lock);
3127 return (0);
3128 }
3129 mutex_exit(hash_lock);
3130 return (0);
3131 }
3132
3133 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
3134
3135 if (done) {
3136 add_reference(hdr, hash_lock, private);
3137 /*
3138 * If this block is already in use, create a new
3139 * copy of the data so that we will be guaranteed
3140 * that arc_release() will always succeed.
3141 */
3142 buf = hdr->b_buf;
3143 ASSERT(buf);
3144 ASSERT(buf->b_data);
3145 if (HDR_BUF_AVAILABLE(hdr)) {
3146 ASSERT(buf->b_efunc == NULL);
3147 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
3148 } else {
3149 buf = arc_buf_clone(buf);
3150 }
3151
3152 } else if (*arc_flags & ARC_PREFETCH &&
3153 refcount_count(&hdr->b_refcnt) == 0) {
3154 hdr->b_flags |= ARC_PREFETCH;
3155 }
3156 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
3157 arc_access(hdr, hash_lock);
3158 if (*arc_flags & ARC_L2CACHE)
3159 hdr->b_flags |= ARC_L2CACHE;
3160 if (*arc_flags & ARC_L2COMPRESS)
3161 hdr->b_flags |= ARC_L2COMPRESS;
3162 mutex_exit(hash_lock);
3163 ARCSTAT_BUMP(arcstat_hits);
3164 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
3165 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
3166 data, metadata, hits);
3167
3168 if (done)
3169 done(NULL, buf, private);
3170 } else {
3171 uint64_t size = BP_GET_LSIZE(bp);
3172 arc_callback_t *acb;
3173 vdev_t *vd = NULL;
3174 uint64_t addr = 0;
3175 boolean_t devw = B_FALSE;
3176
3177 if (hdr == NULL) {
3178 /* this block is not in the cache */
3179 arc_buf_hdr_t *exists;
3180 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
3181 buf = arc_buf_alloc(spa, size, private, type);
3182 hdr = buf->b_hdr;
3183 hdr->b_dva = *BP_IDENTITY(bp);
3184 hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
3185 hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
3186 exists = buf_hash_insert(hdr, &hash_lock);
3187 if (exists) {
3188 /* somebody beat us to the hash insert */
3189 mutex_exit(hash_lock);
3190 buf_discard_identity(hdr);
3191 (void) arc_buf_remove_ref(buf, private);
3192 goto top; /* restart the IO request */
3193 }
3194 /* if this is a prefetch, we don't have a reference */
3195 if (*arc_flags & ARC_PREFETCH) {
3196 (void) remove_reference(hdr, hash_lock,
3197 private);
3198 hdr->b_flags |= ARC_PREFETCH;
3199 }
3200 if (*arc_flags & ARC_L2CACHE)
3201 hdr->b_flags |= ARC_L2CACHE;
3202 if (*arc_flags & ARC_L2COMPRESS)
3203 hdr->b_flags |= ARC_L2COMPRESS;
3204 if (BP_GET_LEVEL(bp) > 0)
3205 hdr->b_flags |= ARC_INDIRECT;
3206 } else {
3207 /* this block is in the ghost cache */
3208 ASSERT(GHOST_STATE(hdr->b_state));
3209 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3210 ASSERT0(refcount_count(&hdr->b_refcnt));
3211 ASSERT(hdr->b_buf == NULL);
3212
3213 /* if this is a prefetch, we don't have a reference */
3214 if (*arc_flags & ARC_PREFETCH)
3215 hdr->b_flags |= ARC_PREFETCH;
3216 else
3217 add_reference(hdr, hash_lock, private);
3218 if (*arc_flags & ARC_L2CACHE)
3219 hdr->b_flags |= ARC_L2CACHE;
3220 if (*arc_flags & ARC_L2COMPRESS)
3221 hdr->b_flags |= ARC_L2COMPRESS;
3222 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
3223 buf->b_hdr = hdr;
3224 buf->b_data = NULL;
3225 buf->b_efunc = NULL;
3226 buf->b_private = NULL;
3227 buf->b_next = NULL;
3228 hdr->b_buf = buf;
3229 ASSERT(hdr->b_datacnt == 0);
3230 hdr->b_datacnt = 1;
3231 arc_get_data_buf(buf);
3232 arc_access(hdr, hash_lock);
3233 }
3234
3235 ASSERT(!GHOST_STATE(hdr->b_state));
3236
3237 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
3238 acb->acb_done = done;
3239 acb->acb_private = private;
3240
3241 ASSERT(hdr->b_acb == NULL);
3242 hdr->b_acb = acb;
3243 hdr->b_flags |= ARC_IO_IN_PROGRESS;
3244
3245 if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL &&
3246 (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) {
3247 devw = hdr->b_l2hdr->b_dev->l2ad_writing;
3248 addr = hdr->b_l2hdr->b_daddr;
3249 /*
3250 * Lock out device removal.
3251 */
3252 if (vdev_is_dead(vd) ||
3253 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
3254 vd = NULL;
3255 }
3256
3257 mutex_exit(hash_lock);
3258
3259 /*
3260 * At this point, we have a level 1 cache miss. Try again in
3261 * L2ARC if possible.
3262 */
3263 ASSERT3U(hdr->b_size, ==, size);
3264 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp,
3265 uint64_t, size, zbookmark_t *, zb);
3266 ARCSTAT_BUMP(arcstat_misses);
3267 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
3268 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
3269 data, metadata, misses);
3270#ifdef _KERNEL
3271 curthread->td_ru.ru_inblock++;
3272#endif
3273
3274 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) {
3275 /*
3276 * Read from the L2ARC if the following are true:
3277 * 1. The L2ARC vdev was previously cached.
3278 * 2. This buffer still has L2ARC metadata.
3279 * 3. This buffer isn't currently writing to the L2ARC.
3280 * 4. The L2ARC entry wasn't evicted, which may
3281 * also have invalidated the vdev.
3282 * 5. This isn't prefetch and l2arc_noprefetch is set.
3283 */
3284 if (hdr->b_l2hdr != NULL &&
3285 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) &&
3286 !(l2arc_noprefetch && HDR_PREFETCH(hdr))) {
3287 l2arc_read_callback_t *cb;
3288
3289 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
3290 ARCSTAT_BUMP(arcstat_l2_hits);
3291
3292 cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
3293 KM_SLEEP);
3294 cb->l2rcb_buf = buf;
3295 cb->l2rcb_spa = spa;
3296 cb->l2rcb_bp = *bp;
3297 cb->l2rcb_zb = *zb;
3298 cb->l2rcb_flags = zio_flags;
3299 cb->l2rcb_compress = hdr->b_l2hdr->b_compress;
3300
3301 ASSERT(addr >= VDEV_LABEL_START_SIZE &&
3302 addr + size < vd->vdev_psize -
3303 VDEV_LABEL_END_SIZE);
3304
3305 /*
3306 * l2arc read. The SCL_L2ARC lock will be
3307 * released by l2arc_read_done().
3308 * Issue a null zio if the underlying buffer
3309 * was squashed to zero size by compression.
3310 */
3311 if (hdr->b_l2hdr->b_compress ==
3312 ZIO_COMPRESS_EMPTY) {
3313 rzio = zio_null(pio, spa, vd,
3314 l2arc_read_done, cb,
3315 zio_flags | ZIO_FLAG_DONT_CACHE |
3316 ZIO_FLAG_CANFAIL |
3317 ZIO_FLAG_DONT_PROPAGATE |
3318 ZIO_FLAG_DONT_RETRY);
3319 } else {
3320 rzio = zio_read_phys(pio, vd, addr,
3321 hdr->b_l2hdr->b_asize,
3322 buf->b_data, ZIO_CHECKSUM_OFF,
3323 l2arc_read_done, cb, priority,
3324 zio_flags | ZIO_FLAG_DONT_CACHE |
3325 ZIO_FLAG_CANFAIL |
3326 ZIO_FLAG_DONT_PROPAGATE |
3327 ZIO_FLAG_DONT_RETRY, B_FALSE);
3328 }
3329 DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
3330 zio_t *, rzio);
3331 ARCSTAT_INCR(arcstat_l2_read_bytes,
3332 hdr->b_l2hdr->b_asize);
3333
3334 if (*arc_flags & ARC_NOWAIT) {
3335 zio_nowait(rzio);
3336 return (0);
3337 }
3338
3339 ASSERT(*arc_flags & ARC_WAIT);
3340 if (zio_wait(rzio) == 0)
3341 return (0);
3342
3343 /* l2arc read error; goto zio_read() */
3344 } else {
3345 DTRACE_PROBE1(l2arc__miss,
3346 arc_buf_hdr_t *, hdr);
3347 ARCSTAT_BUMP(arcstat_l2_misses);
3348 if (HDR_L2_WRITING(hdr))
3349 ARCSTAT_BUMP(arcstat_l2_rw_clash);
3350 spa_config_exit(spa, SCL_L2ARC, vd);
3351 }
3352 } else {
3353 if (vd != NULL)
3354 spa_config_exit(spa, SCL_L2ARC, vd);
3355 if (l2arc_ndev != 0) {
3356 DTRACE_PROBE1(l2arc__miss,
3357 arc_buf_hdr_t *, hdr);
3358 ARCSTAT_BUMP(arcstat_l2_misses);
3359 }
3360 }
3361
3362 rzio = zio_read(pio, spa, bp, buf->b_data, size,
3363 arc_read_done, buf, priority, zio_flags, zb);
3364
3365 if (*arc_flags & ARC_WAIT)
3366 return (zio_wait(rzio));
3367
3368 ASSERT(*arc_flags & ARC_NOWAIT);
3369 zio_nowait(rzio);
3370 }
3371 return (0);
3372}
3373
3374void
3375arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
3376{
3377 ASSERT(buf->b_hdr != NULL);
3378 ASSERT(buf->b_hdr->b_state != arc_anon);
3379 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
3380 ASSERT(buf->b_efunc == NULL);
3381 ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr));
3382
3383 buf->b_efunc = func;
3384 buf->b_private = private;
3385}
3386
3387/*
3388 * Notify the arc that a block was freed, and thus will never be used again.
3389 */
3390void
3391arc_freed(spa_t *spa, const blkptr_t *bp)
3392{
3393 arc_buf_hdr_t *hdr;
3394 kmutex_t *hash_lock;
3395 uint64_t guid = spa_load_guid(spa);
3396
3397 hdr = buf_hash_find(guid, BP_IDENTITY(bp), BP_PHYSICAL_BIRTH(bp),
3398 &hash_lock);
3399 if (hdr == NULL)
3400 return;
3401 if (HDR_BUF_AVAILABLE(hdr)) {
3402 arc_buf_t *buf = hdr->b_buf;
3403 add_reference(hdr, hash_lock, FTAG);
3404 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
3405 mutex_exit(hash_lock);
3406
3407 arc_release(buf, FTAG);
3408 (void) arc_buf_remove_ref(buf, FTAG);
3409 } else {
3410 mutex_exit(hash_lock);
3411 }
3412
3413}
3414
3415/*
3416 * This is used by the DMU to let the ARC know that a buffer is
3417 * being evicted, so the ARC should clean up. If this arc buf
3418 * is not yet in the evicted state, it will be put there.
3419 */
3420int
3421arc_buf_evict(arc_buf_t *buf)
3422{
3423 arc_buf_hdr_t *hdr;
3424 kmutex_t *hash_lock;
3425 arc_buf_t **bufp;
3426 list_t *list, *evicted_list;
3427 kmutex_t *lock, *evicted_lock;
3428
3429 mutex_enter(&buf->b_evict_lock);
3430 hdr = buf->b_hdr;
3431 if (hdr == NULL) {
3432 /*
3433 * We are in arc_do_user_evicts().
3434 */
3435 ASSERT(buf->b_data == NULL);
3436 mutex_exit(&buf->b_evict_lock);
3437 return (0);
3438 } else if (buf->b_data == NULL) {
3439 arc_buf_t copy = *buf; /* structure assignment */
3440 /*
3441 * We are on the eviction list; process this buffer now
3442 * but let arc_do_user_evicts() do the reaping.
3443 */
3444 buf->b_efunc = NULL;
3445 mutex_exit(&buf->b_evict_lock);
3446 VERIFY(copy.b_efunc(&copy) == 0);
3447 return (1);
3448 }
3449 hash_lock = HDR_LOCK(hdr);
3450 mutex_enter(hash_lock);
3451 hdr = buf->b_hdr;
3452 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3453
3454 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt);
3455 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
3456
3457 /*
3458 * Pull this buffer off of the hdr
3459 */
3460 bufp = &hdr->b_buf;
3461 while (*bufp != buf)
3462 bufp = &(*bufp)->b_next;
3463 *bufp = buf->b_next;
3464
3465 ASSERT(buf->b_data != NULL);
3466 arc_buf_destroy(buf, FALSE, FALSE);
3467
3468 if (hdr->b_datacnt == 0) {
3469 arc_state_t *old_state = hdr->b_state;
3470 arc_state_t *evicted_state;
3471
3472 ASSERT(hdr->b_buf == NULL);
3473 ASSERT(refcount_is_zero(&hdr->b_refcnt));
3474
3475 evicted_state =
3476 (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
3477
3478 get_buf_info(hdr, old_state, &list, &lock);
3479 get_buf_info(hdr, evicted_state, &evicted_list, &evicted_lock);
3480 mutex_enter(lock);
3481 mutex_enter(evicted_lock);
3482
3483 arc_change_state(evicted_state, hdr, hash_lock);
3484 ASSERT(HDR_IN_HASH_TABLE(hdr));
3485 hdr->b_flags |= ARC_IN_HASH_TABLE;
3486 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
3487
3488 mutex_exit(evicted_lock);
3489 mutex_exit(lock);
3490 }
3491 mutex_exit(hash_lock);
3492 mutex_exit(&buf->b_evict_lock);
3493
3494 VERIFY(buf->b_efunc(buf) == 0);
3495 buf->b_efunc = NULL;
3496 buf->b_private = NULL;
3497 buf->b_hdr = NULL;
3498 buf->b_next = NULL;
3499 kmem_cache_free(buf_cache, buf);
3500 return (1);
3501}
3502
3503/*
3504 * Release this buffer from the cache, making it an anonymous buffer. This
3505 * must be done after a read and prior to modifying the buffer contents.
3506 * If the buffer has more than one reference, we must make
3507 * a new hdr for the buffer.
3508 */
3509void
3510arc_release(arc_buf_t *buf, void *tag)
3511{
3512 arc_buf_hdr_t *hdr;
3513 kmutex_t *hash_lock = NULL;
3514 l2arc_buf_hdr_t *l2hdr;
3515 uint64_t buf_size;
3516
3517 /*
3518 * It would be nice to assert that if it's DMU metadata (level >
3519 * 0 || it's the dnode file), then it must be syncing context.
3520 * But we don't know that information at this level.
3521 */
3522
3523 mutex_enter(&buf->b_evict_lock);
3524 hdr = buf->b_hdr;
3525
3526 /* this buffer is not on any list */
3527 ASSERT(refcount_count(&hdr->b_refcnt) > 0);
3528
3529 if (hdr->b_state == arc_anon) {
3530 /* this buffer is already released */
3531 ASSERT(buf->b_efunc == NULL);
3532 } else {
3533 hash_lock = HDR_LOCK(hdr);
3534 mutex_enter(hash_lock);
3535 hdr = buf->b_hdr;
3536 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3537 }
3538
3539 l2hdr = hdr->b_l2hdr;
3540 if (l2hdr) {
3541 mutex_enter(&l2arc_buflist_mtx);
3542 hdr->b_l2hdr = NULL;
3543 }
3544 buf_size = hdr->b_size;
3545
3546 /*
3547 * Do we have more than one buf?
3548 */
3549 if (hdr->b_datacnt > 1) {
3550 arc_buf_hdr_t *nhdr;
3551 arc_buf_t **bufp;
3552 uint64_t blksz = hdr->b_size;
3553 uint64_t spa = hdr->b_spa;
3554 arc_buf_contents_t type = hdr->b_type;
3555 uint32_t flags = hdr->b_flags;
3556
3557 ASSERT(hdr->b_buf != buf || buf->b_next != NULL);
3558 /*
3559 * Pull the data off of this hdr and attach it to
3560 * a new anonymous hdr.
3561 */
3562 (void) remove_reference(hdr, hash_lock, tag);
3563 bufp = &hdr->b_buf;
3564 while (*bufp != buf)
3565 bufp = &(*bufp)->b_next;
3566 *bufp = buf->b_next;
3567 buf->b_next = NULL;
3568
3569 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size);
3570 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size);
3571 if (refcount_is_zero(&hdr->b_refcnt)) {
3572 uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type];
3573 ASSERT3U(*size, >=, hdr->b_size);
3574 atomic_add_64(size, -hdr->b_size);
3575 }
3576
3577 /*
3578 * We're releasing a duplicate user data buffer, update
3579 * our statistics accordingly.
3580 */
3581 if (hdr->b_type == ARC_BUFC_DATA) {
3582 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
3583 ARCSTAT_INCR(arcstat_duplicate_buffers_size,
3584 -hdr->b_size);
3585 }
3586 hdr->b_datacnt -= 1;
3587 arc_cksum_verify(buf);
3588#ifdef illumos
3589 arc_buf_unwatch(buf);
3590#endif /* illumos */
3591
3592 mutex_exit(hash_lock);
3593
3594 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
3595 nhdr->b_size = blksz;
3596 nhdr->b_spa = spa;
3597 nhdr->b_type = type;
3598 nhdr->b_buf = buf;
3599 nhdr->b_state = arc_anon;
3600 nhdr->b_arc_access = 0;
3601 nhdr->b_flags = flags & ARC_L2_WRITING;
3602 nhdr->b_l2hdr = NULL;
3603 nhdr->b_datacnt = 1;
3604 nhdr->b_freeze_cksum = NULL;
3605 (void) refcount_add(&nhdr->b_refcnt, tag);
3606 buf->b_hdr = nhdr;
3607 mutex_exit(&buf->b_evict_lock);
3608 atomic_add_64(&arc_anon->arcs_size, blksz);
3609 } else {
3610 mutex_exit(&buf->b_evict_lock);
3611 ASSERT(refcount_count(&hdr->b_refcnt) == 1);
3612 ASSERT(!list_link_active(&hdr->b_arc_node));
3613 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3614 if (hdr->b_state != arc_anon)
3615 arc_change_state(arc_anon, hdr, hash_lock);
3616 hdr->b_arc_access = 0;
3617 if (hash_lock)
3618 mutex_exit(hash_lock);
3619
3620 buf_discard_identity(hdr);
3621 arc_buf_thaw(buf);
3622 }
3623 buf->b_efunc = NULL;
3624 buf->b_private = NULL;
3625
3626 if (l2hdr) {
3627 ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
3628 trim_map_free(l2hdr->b_dev->l2ad_vdev, l2hdr->b_daddr,
3629 hdr->b_size, 0);
3630 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
3631 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
3632 ARCSTAT_INCR(arcstat_l2_size, -buf_size);
3633 mutex_exit(&l2arc_buflist_mtx);
3634 }
3635}
3636
3637int
3638arc_released(arc_buf_t *buf)
3639{
3640 int released;
3641
3642 mutex_enter(&buf->b_evict_lock);
3643 released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon);
3644 mutex_exit(&buf->b_evict_lock);
3645 return (released);
3646}
3647
3648int
3649arc_has_callback(arc_buf_t *buf)
3650{
3651 int callback;
3652
3653 mutex_enter(&buf->b_evict_lock);
3654 callback = (buf->b_efunc != NULL);
3655 mutex_exit(&buf->b_evict_lock);
3656 return (callback);
3657}
3658
3659#ifdef ZFS_DEBUG
3660int
3661arc_referenced(arc_buf_t *buf)
3662{
3663 int referenced;
3664
3665 mutex_enter(&buf->b_evict_lock);
3666 referenced = (refcount_count(&buf->b_hdr->b_refcnt));
3667 mutex_exit(&buf->b_evict_lock);
3668 return (referenced);
3669}
3670#endif
3671
3672static void
3673arc_write_ready(zio_t *zio)
3674{
3675 arc_write_callback_t *callback = zio->io_private;
3676 arc_buf_t *buf = callback->awcb_buf;
3677 arc_buf_hdr_t *hdr = buf->b_hdr;
3678
3679 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt));
3680 callback->awcb_ready(zio, buf, callback->awcb_private);
3681
3682 /*
3683 * If the IO is already in progress, then this is a re-write
3684 * attempt, so we need to thaw and re-compute the cksum.
3685 * It is the responsibility of the callback to handle the
3686 * accounting for any re-write attempt.
3687 */
3688 if (HDR_IO_IN_PROGRESS(hdr)) {
3689 mutex_enter(&hdr->b_freeze_lock);
3690 if (hdr->b_freeze_cksum != NULL) {
3691 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
3692 hdr->b_freeze_cksum = NULL;
3693 }
3694 mutex_exit(&hdr->b_freeze_lock);
3695 }
3696 arc_cksum_compute(buf, B_FALSE);
3697 hdr->b_flags |= ARC_IO_IN_PROGRESS;
3698}
3699
3700static void
3701arc_write_done(zio_t *zio)
3702{
3703 arc_write_callback_t *callback = zio->io_private;
3704 arc_buf_t *buf = callback->awcb_buf;
3705 arc_buf_hdr_t *hdr = buf->b_hdr;
3706
3707 ASSERT(hdr->b_acb == NULL);
3708
3709 if (zio->io_error == 0) {
3710 hdr->b_dva = *BP_IDENTITY(zio->io_bp);
3711 hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp);
3712 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
3713 } else {
3714 ASSERT(BUF_EMPTY(hdr));
3715 }
3716
3717 /*
3718 * If the block to be written was all-zero, we may have
3719 * compressed it away. In this case no write was performed
3720 * so there will be no dva/birth/checksum. The buffer must
3721 * therefore remain anonymous (and uncached).
3722 */
3723 if (!BUF_EMPTY(hdr)) {
3724 arc_buf_hdr_t *exists;
3725 kmutex_t *hash_lock;
3726
3727 ASSERT(zio->io_error == 0);
3728
3729 arc_cksum_verify(buf);
3730
3731 exists = buf_hash_insert(hdr, &hash_lock);
3732 if (exists) {
3733 /*
3734 * This can only happen if we overwrite for
3735 * sync-to-convergence, because we remove
3736 * buffers from the hash table when we arc_free().
3737 */
3738 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
3739 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
3740 panic("bad overwrite, hdr=%p exists=%p",
3741 (void *)hdr, (void *)exists);
3742 ASSERT(refcount_is_zero(&exists->b_refcnt));
3743 arc_change_state(arc_anon, exists, hash_lock);
3744 mutex_exit(hash_lock);
3745 arc_hdr_destroy(exists);
3746 exists = buf_hash_insert(hdr, &hash_lock);
3747 ASSERT3P(exists, ==, NULL);
3748 } else if (zio->io_flags & ZIO_FLAG_NOPWRITE) {
3749 /* nopwrite */
3750 ASSERT(zio->io_prop.zp_nopwrite);
3751 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
3752 panic("bad nopwrite, hdr=%p exists=%p",
3753 (void *)hdr, (void *)exists);
3754 } else {
3755 /* Dedup */
3756 ASSERT(hdr->b_datacnt == 1);
3757 ASSERT(hdr->b_state == arc_anon);
3758 ASSERT(BP_GET_DEDUP(zio->io_bp));
3759 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
3760 }
3761 }
3762 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3763 /* if it's not anon, we are doing a scrub */
3764 if (!exists && hdr->b_state == arc_anon)
3765 arc_access(hdr, hash_lock);
3766 mutex_exit(hash_lock);
3767 } else {
3768 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3769 }
3770
3771 ASSERT(!refcount_is_zero(&hdr->b_refcnt));
3772 callback->awcb_done(zio, buf, callback->awcb_private);
3773
3774 kmem_free(callback, sizeof (arc_write_callback_t));
3775}
3776
3777zio_t *
3778arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
3779 blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, boolean_t l2arc_compress,
3780 const zio_prop_t *zp, arc_done_func_t *ready, arc_done_func_t *done,
3781 void *private, int priority, int zio_flags, const zbookmark_t *zb)
3782{
3783 arc_buf_hdr_t *hdr = buf->b_hdr;
3784 arc_write_callback_t *callback;
3785 zio_t *zio;
3786
3787 ASSERT(ready != NULL);
3788 ASSERT(done != NULL);
3789 ASSERT(!HDR_IO_ERROR(hdr));
3790 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
3791 ASSERT(hdr->b_acb == NULL);
3792 if (l2arc)
3793 hdr->b_flags |= ARC_L2CACHE;
3794 if (l2arc_compress)
3795 hdr->b_flags |= ARC_L2COMPRESS;
3796 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
3797 callback->awcb_ready = ready;
3798 callback->awcb_done = done;
3799 callback->awcb_private = private;
3800 callback->awcb_buf = buf;
3801
3802 zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp,
3803 arc_write_ready, arc_write_done, callback, priority, zio_flags, zb);
3804
3805 return (zio);
3806}
3807
3808static int
3809arc_memory_throttle(uint64_t reserve, uint64_t inflight_data, uint64_t txg)
3810{
3811#ifdef _KERNEL
3812 uint64_t available_memory =
3813 ptoa((uintmax_t)cnt.v_free_count + cnt.v_cache_count);
3814 static uint64_t page_load = 0;
3815 static uint64_t last_txg = 0;
3816
3817#ifdef sun
3818#if defined(__i386)
3819 available_memory =
3820 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
3821#endif
3822#endif /* sun */
3823 if (available_memory >= zfs_write_limit_max)
3824 return (0);
3825
3826 if (txg > last_txg) {
3827 last_txg = txg;
3828 page_load = 0;
3829 }
3830 /*
3831 * If we are in pageout, we know that memory is already tight,
3832 * the arc is already going to be evicting, so we just want to
3833 * continue to let page writes occur as quickly as possible.
3834 */
3835 if (curproc == pageproc) {
3836 if (page_load > available_memory / 4)
3837 return (SET_ERROR(ERESTART));
3838 /* Note: reserve is inflated, so we deflate */
3839 page_load += reserve / 8;
3840 return (0);
3841 } else if (page_load > 0 && arc_reclaim_needed()) {
3842 /* memory is low, delay before restarting */
3843 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
3844 return (SET_ERROR(EAGAIN));
3845 }
3846 page_load = 0;
3847
3848 if (arc_size > arc_c_min) {
3849 uint64_t evictable_memory =
3850 arc_mru->arcs_lsize[ARC_BUFC_DATA] +
3851 arc_mru->arcs_lsize[ARC_BUFC_METADATA] +
3852 arc_mfu->arcs_lsize[ARC_BUFC_DATA] +
3853 arc_mfu->arcs_lsize[ARC_BUFC_METADATA];
3854 available_memory += MIN(evictable_memory, arc_size - arc_c_min);
3855 }
3856
3857 if (inflight_data > available_memory / 4) {
3858 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
3859 return (SET_ERROR(ERESTART));
3860 }
3861#endif
3862 return (0);
3863}
3864
3865void
3866arc_tempreserve_clear(uint64_t reserve)
3867{
3868 atomic_add_64(&arc_tempreserve, -reserve);
3869 ASSERT((int64_t)arc_tempreserve >= 0);
3870}
3871
3872int
3873arc_tempreserve_space(uint64_t reserve, uint64_t txg)
3874{
3875 int error;
3876 uint64_t anon_size;
3877
3878#ifdef ZFS_DEBUG
3879 /*
3880 * Once in a while, fail for no reason. Everything should cope.
3881 */
3882 if (spa_get_random(10000) == 0) {
3883 dprintf("forcing random failure\n");
3884 return (SET_ERROR(ERESTART));
3885 }
3886#endif
3887 if (reserve > arc_c/4 && !arc_no_grow)
3888 arc_c = MIN(arc_c_max, reserve * 4);
3889 if (reserve > arc_c)
3890 return (SET_ERROR(ENOMEM));
3891
3892 /*
3893 * Don't count loaned bufs as in flight dirty data to prevent long
3894 * network delays from blocking transactions that are ready to be
3895 * assigned to a txg.
3896 */
3897 anon_size = MAX((int64_t)(arc_anon->arcs_size - arc_loaned_bytes), 0);
3898
3899 /*
3900 * Writes will, almost always, require additional memory allocations
3901 * in order to compress/encrypt/etc the data. We therefore need to
3902 * make sure that there is sufficient available memory for this.
3903 */
3904 if (error = arc_memory_throttle(reserve, anon_size, txg))
3905 return (error);
3906
3907 /*
3908 * Throttle writes when the amount of dirty data in the cache
3909 * gets too large. We try to keep the cache less than half full
3910 * of dirty blocks so that our sync times don't grow too large.
3911 * Note: if two requests come in concurrently, we might let them
3912 * both succeed, when one of them should fail. Not a huge deal.
3913 */
3914
3915 if (reserve + arc_tempreserve + anon_size > arc_c / 2 &&
3916 anon_size > arc_c / 4) {
3917 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
3918 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
3919 arc_tempreserve>>10,
3920 arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10,
3921 arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10,
3922 reserve>>10, arc_c>>10);
3923 return (SET_ERROR(ERESTART));
3924 }
3925 atomic_add_64(&arc_tempreserve, reserve);
3926 return (0);
3927}
3928
3929static kmutex_t arc_lowmem_lock;
3930#ifdef _KERNEL
3931static eventhandler_tag arc_event_lowmem = NULL;
3932
3933static void
3934arc_lowmem(void *arg __unused, int howto __unused)
3935{
3936
3937 /* Serialize access via arc_lowmem_lock. */
3938 mutex_enter(&arc_lowmem_lock);
3939 mutex_enter(&arc_reclaim_thr_lock);
3940 needfree = 1;
3941 cv_signal(&arc_reclaim_thr_cv);
3942
3943 /*
3944 * It is unsafe to block here in arbitrary threads, because we can come
3945 * here from ARC itself and may hold ARC locks and thus risk a deadlock
3946 * with ARC reclaim thread.
3947 */
3948 if (curproc == pageproc) {
3949 while (needfree)
3950 msleep(&needfree, &arc_reclaim_thr_lock, 0, "zfs:lowmem", 0);
3951 }
3952 mutex_exit(&arc_reclaim_thr_lock);
3953 mutex_exit(&arc_lowmem_lock);
3954}
3955#endif
3956
3957void
3958arc_init(void)
3959{
3960 int i, prefetch_tunable_set = 0;
3961
3962 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
3963 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
3964 mutex_init(&arc_lowmem_lock, NULL, MUTEX_DEFAULT, NULL);
3965
3966 /* Convert seconds to clock ticks */
3967 arc_min_prefetch_lifespan = 1 * hz;
3968
3969 /* Start out with 1/8 of all memory */
3970 arc_c = kmem_size() / 8;
3971
3972#ifdef sun
3973#ifdef _KERNEL
3974 /*
3975 * On architectures where the physical memory can be larger
3976 * than the addressable space (intel in 32-bit mode), we may
3977 * need to limit the cache to 1/8 of VM size.
3978 */
3979 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
3980#endif
3981#endif /* sun */
3982 /* set min cache to 1/32 of all memory, or 16MB, whichever is more */
3983 arc_c_min = MAX(arc_c / 4, 64<<18);
3984 /* set max to 1/2 of all memory, or all but 1GB, whichever is more */
3985 if (arc_c * 8 >= 1<<30)
3986 arc_c_max = (arc_c * 8) - (1<<30);
3987 else
3988 arc_c_max = arc_c_min;
3989 arc_c_max = MAX(arc_c * 5, arc_c_max);
3990
3991#ifdef _KERNEL
3992 /*
3993 * Allow the tunables to override our calculations if they are
3994 * reasonable (ie. over 16MB)
3995 */
3996 if (zfs_arc_max > 64<<18 && zfs_arc_max < kmem_size())
3997 arc_c_max = zfs_arc_max;
3998 if (zfs_arc_min > 64<<18 && zfs_arc_min <= arc_c_max)
3999 arc_c_min = zfs_arc_min;
4000#endif
4001
4002 arc_c = arc_c_max;
4003 arc_p = (arc_c >> 1);
4004
4005 /* limit meta-data to 1/4 of the arc capacity */
4006 arc_meta_limit = arc_c_max / 4;
4007
4008 /* Allow the tunable to override if it is reasonable */
4009 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max)
4010 arc_meta_limit = zfs_arc_meta_limit;
4011
4012 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0)
4013 arc_c_min = arc_meta_limit / 2;
4014
4015 if (zfs_arc_grow_retry > 0)
4016 arc_grow_retry = zfs_arc_grow_retry;
4017
4018 if (zfs_arc_shrink_shift > 0)
4019 arc_shrink_shift = zfs_arc_shrink_shift;
4020
4021 if (zfs_arc_p_min_shift > 0)
4022 arc_p_min_shift = zfs_arc_p_min_shift;
4023
4024 /* if kmem_flags are set, lets try to use less memory */
4025 if (kmem_debugging())
4026 arc_c = arc_c / 2;
4027 if (arc_c < arc_c_min)
4028 arc_c = arc_c_min;
4029
4030 zfs_arc_min = arc_c_min;
4031 zfs_arc_max = arc_c_max;
4032
4033 arc_anon = &ARC_anon;
4034 arc_mru = &ARC_mru;
4035 arc_mru_ghost = &ARC_mru_ghost;
4036 arc_mfu = &ARC_mfu;
4037 arc_mfu_ghost = &ARC_mfu_ghost;
4038 arc_l2c_only = &ARC_l2c_only;
4039 arc_size = 0;
4040
4041 for (i = 0; i < ARC_BUFC_NUMLISTS; i++) {
4042 mutex_init(&arc_anon->arcs_locks[i].arcs_lock,
4043 NULL, MUTEX_DEFAULT, NULL);
4044 mutex_init(&arc_mru->arcs_locks[i].arcs_lock,
4045 NULL, MUTEX_DEFAULT, NULL);
4046 mutex_init(&arc_mru_ghost->arcs_locks[i].arcs_lock,
4047 NULL, MUTEX_DEFAULT, NULL);
4048 mutex_init(&arc_mfu->arcs_locks[i].arcs_lock,
4049 NULL, MUTEX_DEFAULT, NULL);
4050 mutex_init(&arc_mfu_ghost->arcs_locks[i].arcs_lock,
4051 NULL, MUTEX_DEFAULT, NULL);
4052 mutex_init(&arc_l2c_only->arcs_locks[i].arcs_lock,
4053 NULL, MUTEX_DEFAULT, NULL);
4054
4055 list_create(&arc_mru->arcs_lists[i],
4056 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4057 list_create(&arc_mru_ghost->arcs_lists[i],
4058 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4059 list_create(&arc_mfu->arcs_lists[i],
4060 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4061 list_create(&arc_mfu_ghost->arcs_lists[i],
4062 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4063 list_create(&arc_mfu_ghost->arcs_lists[i],
4064 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4065 list_create(&arc_l2c_only->arcs_lists[i],
4066 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4067 }
4068
4069 buf_init();
4070
4071 arc_thread_exit = 0;
4072 arc_eviction_list = NULL;
4073 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
4074 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t));
4075
4076 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
4077 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
4078
4079 if (arc_ksp != NULL) {
4080 arc_ksp->ks_data = &arc_stats;
4081 kstat_install(arc_ksp);
4082 }
4083
4084 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
4085 TS_RUN, minclsyspri);
4086
4087#ifdef _KERNEL
4088 arc_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem, arc_lowmem, NULL,
4089 EVENTHANDLER_PRI_FIRST);
4090#endif
4091
4092 arc_dead = FALSE;
4093 arc_warm = B_FALSE;
4094
4095 if (zfs_write_limit_max == 0)
4096 zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift;
4097 else
4098 zfs_write_limit_shift = 0;
4099 mutex_init(&zfs_write_limit_lock, NULL, MUTEX_DEFAULT, NULL);
4100
4101#ifdef _KERNEL
4102 if (TUNABLE_INT_FETCH("vfs.zfs.prefetch_disable", &zfs_prefetch_disable))
4103 prefetch_tunable_set = 1;
4104
4105#ifdef __i386__
4106 if (prefetch_tunable_set == 0) {
4107 printf("ZFS NOTICE: Prefetch is disabled by default on i386 "
4108 "-- to enable,\n");
4109 printf(" add \"vfs.zfs.prefetch_disable=0\" "
4110 "to /boot/loader.conf.\n");
4111 zfs_prefetch_disable = 1;
4112 }
4113#else
4114 if ((((uint64_t)physmem * PAGESIZE) < (1ULL << 32)) &&
4115 prefetch_tunable_set == 0) {
4116 printf("ZFS NOTICE: Prefetch is disabled by default if less "
4117 "than 4GB of RAM is present;\n"
4118 " to enable, add \"vfs.zfs.prefetch_disable=0\" "
4119 "to /boot/loader.conf.\n");
4120 zfs_prefetch_disable = 1;
4121 }
4122#endif
4123 /* Warn about ZFS memory and address space requirements. */
4124 if (((uint64_t)physmem * PAGESIZE) < (256 + 128 + 64) * (1 << 20)) {
4125 printf("ZFS WARNING: Recommended minimum RAM size is 512MB; "
4126 "expect unstable behavior.\n");
4127 }
4128 if (kmem_size() < 512 * (1 << 20)) {
4129 printf("ZFS WARNING: Recommended minimum kmem_size is 512MB; "
4130 "expect unstable behavior.\n");
4131 printf(" Consider tuning vm.kmem_size and "
4132 "vm.kmem_size_max\n");
4133 printf(" in /boot/loader.conf.\n");
4134 }
4135#endif
4136}
4137
4138void
4139arc_fini(void)
4140{
4141 int i;
4142
4143 mutex_enter(&arc_reclaim_thr_lock);
4144 arc_thread_exit = 1;
4145 cv_signal(&arc_reclaim_thr_cv);
4146 while (arc_thread_exit != 0)
4147 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
4148 mutex_exit(&arc_reclaim_thr_lock);
4149
4150 arc_flush(NULL);
4151
4152 arc_dead = TRUE;
4153
4154 if (arc_ksp != NULL) {
4155 kstat_delete(arc_ksp);
4156 arc_ksp = NULL;
4157 }
4158
4159 mutex_destroy(&arc_eviction_mtx);
4160 mutex_destroy(&arc_reclaim_thr_lock);
4161 cv_destroy(&arc_reclaim_thr_cv);
4162
4163 for (i = 0; i < ARC_BUFC_NUMLISTS; i++) {
4164 list_destroy(&arc_mru->arcs_lists[i]);
4165 list_destroy(&arc_mru_ghost->arcs_lists[i]);
4166 list_destroy(&arc_mfu->arcs_lists[i]);
4167 list_destroy(&arc_mfu_ghost->arcs_lists[i]);
4168 list_destroy(&arc_l2c_only->arcs_lists[i]);
4169
4170 mutex_destroy(&arc_anon->arcs_locks[i].arcs_lock);
4171 mutex_destroy(&arc_mru->arcs_locks[i].arcs_lock);
4172 mutex_destroy(&arc_mru_ghost->arcs_locks[i].arcs_lock);
4173 mutex_destroy(&arc_mfu->arcs_locks[i].arcs_lock);
4174 mutex_destroy(&arc_mfu_ghost->arcs_locks[i].arcs_lock);
4175 mutex_destroy(&arc_l2c_only->arcs_locks[i].arcs_lock);
4176 }
4177
4178 mutex_destroy(&zfs_write_limit_lock);
4179
4180 buf_fini();
4181
4182 ASSERT(arc_loaned_bytes == 0);
4183
4184 mutex_destroy(&arc_lowmem_lock);
4185#ifdef _KERNEL
4186 if (arc_event_lowmem != NULL)
4187 EVENTHANDLER_DEREGISTER(vm_lowmem, arc_event_lowmem);
4188#endif
4189}
4190
4191/*
4192 * Level 2 ARC
4193 *
4194 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
4195 * It uses dedicated storage devices to hold cached data, which are populated
4196 * using large infrequent writes. The main role of this cache is to boost
4197 * the performance of random read workloads. The intended L2ARC devices
4198 * include short-stroked disks, solid state disks, and other media with
4199 * substantially faster read latency than disk.
4200 *
4201 * +-----------------------+
4202 * | ARC |
4203 * +-----------------------+
4204 * | ^ ^
4205 * | | |
4206 * l2arc_feed_thread() arc_read()
4207 * | | |
4208 * | l2arc read |
4209 * V | |
4210 * +---------------+ |
4211 * | L2ARC | |
4212 * +---------------+ |
4213 * | ^ |
4214 * l2arc_write() | |
4215 * | | |
4216 * V | |
4217 * +-------+ +-------+
4218 * | vdev | | vdev |
4219 * | cache | | cache |
4220 * +-------+ +-------+
4221 * +=========+ .-----.
4222 * : L2ARC : |-_____-|
4223 * : devices : | Disks |
4224 * +=========+ `-_____-'
4225 *
4226 * Read requests are satisfied from the following sources, in order:
4227 *
4228 * 1) ARC
4229 * 2) vdev cache of L2ARC devices
4230 * 3) L2ARC devices
4231 * 4) vdev cache of disks
4232 * 5) disks
4233 *
4234 * Some L2ARC device types exhibit extremely slow write performance.
4235 * To accommodate for this there are some significant differences between
4236 * the L2ARC and traditional cache design:
4237 *
4238 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from
4239 * the ARC behave as usual, freeing buffers and placing headers on ghost
4240 * lists. The ARC does not send buffers to the L2ARC during eviction as
4241 * this would add inflated write latencies for all ARC memory pressure.
4242 *
4243 * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
4244 * It does this by periodically scanning buffers from the eviction-end of
4245 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
4246 * not already there. It scans until a headroom of buffers is satisfied,
4247 * which itself is a buffer for ARC eviction. If a compressible buffer is
4248 * found during scanning and selected for writing to an L2ARC device, we
4249 * temporarily boost scanning headroom during the next scan cycle to make
4250 * sure we adapt to compression effects (which might significantly reduce
4251 * the data volume we write to L2ARC). The thread that does this is
4252 * l2arc_feed_thread(), illustrated below; example sizes are included to
4253 * provide a better sense of ratio than this diagram:
4254 *
4255 * head --> tail
4256 * +---------------------+----------+
4257 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
4258 * +---------------------+----------+ | o L2ARC eligible
4259 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
4260 * +---------------------+----------+ |
4261 * 15.9 Gbytes ^ 32 Mbytes |
4262 * headroom |
4263 * l2arc_feed_thread()
4264 * |
4265 * l2arc write hand <--[oooo]--'
4266 * | 8 Mbyte
4267 * | write max
4268 * V
4269 * +==============================+
4270 * L2ARC dev |####|#|###|###| |####| ... |
4271 * +==============================+
4272 * 32 Gbytes
4273 *
4274 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
4275 * evicted, then the L2ARC has cached a buffer much sooner than it probably
4276 * needed to, potentially wasting L2ARC device bandwidth and storage. It is
4277 * safe to say that this is an uncommon case, since buffers at the end of
4278 * the ARC lists have moved there due to inactivity.
4279 *
4280 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
4281 * then the L2ARC simply misses copying some buffers. This serves as a
4282 * pressure valve to prevent heavy read workloads from both stalling the ARC
4283 * with waits and clogging the L2ARC with writes. This also helps prevent
4284 * the potential for the L2ARC to churn if it attempts to cache content too
4285 * quickly, such as during backups of the entire pool.
4286 *
4287 * 5. After system boot and before the ARC has filled main memory, there are
4288 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
4289 * lists can remain mostly static. Instead of searching from tail of these
4290 * lists as pictured, the l2arc_feed_thread() will search from the list heads
4291 * for eligible buffers, greatly increasing its chance of finding them.
4292 *
4293 * The L2ARC device write speed is also boosted during this time so that
4294 * the L2ARC warms up faster. Since there have been no ARC evictions yet,
4295 * there are no L2ARC reads, and no fear of degrading read performance
4296 * through increased writes.
4297 *
4298 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
4299 * the vdev queue can aggregate them into larger and fewer writes. Each
4300 * device is written to in a rotor fashion, sweeping writes through
4301 * available space then repeating.
4302 *
4303 * 7. The L2ARC does not store dirty content. It never needs to flush
4304 * write buffers back to disk based storage.
4305 *
4306 * 8. If an ARC buffer is written (and dirtied) which also exists in the
4307 * L2ARC, the now stale L2ARC buffer is immediately dropped.
4308 *
4309 * The performance of the L2ARC can be tweaked by a number of tunables, which
4310 * may be necessary for different workloads:
4311 *
4312 * l2arc_write_max max write bytes per interval
4313 * l2arc_write_boost extra write bytes during device warmup
4314 * l2arc_noprefetch skip caching prefetched buffers
4315 * l2arc_headroom number of max device writes to precache
4316 * l2arc_headroom_boost when we find compressed buffers during ARC
4317 * scanning, we multiply headroom by this
4318 * percentage factor for the next scan cycle,
4319 * since more compressed buffers are likely to
4320 * be present
4321 * l2arc_feed_secs seconds between L2ARC writing
4322 *
4323 * Tunables may be removed or added as future performance improvements are
4324 * integrated, and also may become zpool properties.
4325 *
4326 * There are three key functions that control how the L2ARC warms up:
4327 *
4328 * l2arc_write_eligible() check if a buffer is eligible to cache
4329 * l2arc_write_size() calculate how much to write
4330 * l2arc_write_interval() calculate sleep delay between writes
4331 *
4332 * These three functions determine what to write, how much, and how quickly
4333 * to send writes.
4334 */
4335
4336static boolean_t
4337l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab)
4338{
4339 /*
4340 * A buffer is *not* eligible for the L2ARC if it:
4341 * 1. belongs to a different spa.
4342 * 2. is already cached on the L2ARC.
4343 * 3. has an I/O in progress (it may be an incomplete read).
4344 * 4. is flagged not eligible (zfs property).
4345 */
4346 if (ab->b_spa != spa_guid) {
4347 ARCSTAT_BUMP(arcstat_l2_write_spa_mismatch);
4348 return (B_FALSE);
4349 }
4350 if (ab->b_l2hdr != NULL) {
4351 ARCSTAT_BUMP(arcstat_l2_write_in_l2);
4352 return (B_FALSE);
4353 }
4354 if (HDR_IO_IN_PROGRESS(ab)) {
4355 ARCSTAT_BUMP(arcstat_l2_write_hdr_io_in_progress);
4356 return (B_FALSE);
4357 }
4358 if (!HDR_L2CACHE(ab)) {
4359 ARCSTAT_BUMP(arcstat_l2_write_not_cacheable);
4360 return (B_FALSE);
4361 }
4362
4363 return (B_TRUE);
4364}
4365
4366static uint64_t
4367l2arc_write_size(void)
4368{
4369 uint64_t size;
4370
4371 /*
4372 * Make sure our globals have meaningful values in case the user
4373 * altered them.
4374 */
4375 size = l2arc_write_max;
4376 if (size == 0) {
4377 cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must "
4378 "be greater than zero, resetting it to the default (%d)",
4379 L2ARC_WRITE_SIZE);
4380 size = l2arc_write_max = L2ARC_WRITE_SIZE;
4381 }
4382
4383 if (arc_warm == B_FALSE)
4384 size += l2arc_write_boost;
4385
4386 return (size);
4387
4388}
4389
4390static clock_t
4391l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
4392{
4393 clock_t interval, next, now;
4394
4395 /*
4396 * If the ARC lists are busy, increase our write rate; if the
4397 * lists are stale, idle back. This is achieved by checking
4398 * how much we previously wrote - if it was more than half of
4399 * what we wanted, schedule the next write much sooner.
4400 */
4401 if (l2arc_feed_again && wrote > (wanted / 2))
4402 interval = (hz * l2arc_feed_min_ms) / 1000;
4403 else
4404 interval = hz * l2arc_feed_secs;
4405
4406 now = ddi_get_lbolt();
4407 next = MAX(now, MIN(now + interval, began + interval));
4408
4409 return (next);
4410}
4411
4412static void
4413l2arc_hdr_stat_add(void)
4414{
4415 ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE);
4416 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE);
4417}
4418
4419static void
4420l2arc_hdr_stat_remove(void)
4421{
4422 ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE));
4423 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE);
4424}
4425
4426/*
4427 * Cycle through L2ARC devices. This is how L2ARC load balances.
4428 * If a device is returned, this also returns holding the spa config lock.
4429 */
4430static l2arc_dev_t *
4431l2arc_dev_get_next(void)
4432{
4433 l2arc_dev_t *first, *next = NULL;
4434
4435 /*
4436 * Lock out the removal of spas (spa_namespace_lock), then removal
4437 * of cache devices (l2arc_dev_mtx). Once a device has been selected,
4438 * both locks will be dropped and a spa config lock held instead.
4439 */
4440 mutex_enter(&spa_namespace_lock);
4441 mutex_enter(&l2arc_dev_mtx);
4442
4443 /* if there are no vdevs, there is nothing to do */
4444 if (l2arc_ndev == 0)
4445 goto out;
4446
4447 first = NULL;
4448 next = l2arc_dev_last;
4449 do {
4450 /* loop around the list looking for a non-faulted vdev */
4451 if (next == NULL) {
4452 next = list_head(l2arc_dev_list);
4453 } else {
4454 next = list_next(l2arc_dev_list, next);
4455 if (next == NULL)
4456 next = list_head(l2arc_dev_list);
4457 }
4458
4459 /* if we have come back to the start, bail out */
4460 if (first == NULL)
4461 first = next;
4462 else if (next == first)
4463 break;
4464
4465 } while (vdev_is_dead(next->l2ad_vdev));
4466
4467 /* if we were unable to find any usable vdevs, return NULL */
4468 if (vdev_is_dead(next->l2ad_vdev))
4469 next = NULL;
4470
4471 l2arc_dev_last = next;
4472
4473out:
4474 mutex_exit(&l2arc_dev_mtx);
4475
4476 /*
4477 * Grab the config lock to prevent the 'next' device from being
4478 * removed while we are writing to it.
4479 */
4480 if (next != NULL)
4481 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
4482 mutex_exit(&spa_namespace_lock);
4483
4484 return (next);
4485}
4486
4487/*
4488 * Free buffers that were tagged for destruction.
4489 */
4490static void
4491l2arc_do_free_on_write()
4492{
4493 list_t *buflist;
4494 l2arc_data_free_t *df, *df_prev;
4495
4496 mutex_enter(&l2arc_free_on_write_mtx);
4497 buflist = l2arc_free_on_write;
4498
4499 for (df = list_tail(buflist); df; df = df_prev) {
4500 df_prev = list_prev(buflist, df);
4501 ASSERT(df->l2df_data != NULL);
4502 ASSERT(df->l2df_func != NULL);
4503 df->l2df_func(df->l2df_data, df->l2df_size);
4504 list_remove(buflist, df);
4505 kmem_free(df, sizeof (l2arc_data_free_t));
4506 }
4507
4508 mutex_exit(&l2arc_free_on_write_mtx);
4509}
4510
4511/*
4512 * A write to a cache device has completed. Update all headers to allow
4513 * reads from these buffers to begin.
4514 */
4515static void
4516l2arc_write_done(zio_t *zio)
4517{
4518 l2arc_write_callback_t *cb;
4519 l2arc_dev_t *dev;
4520 list_t *buflist;
4521 arc_buf_hdr_t *head, *ab, *ab_prev;
4522 l2arc_buf_hdr_t *abl2;
4523 kmutex_t *hash_lock;
4524
4525 cb = zio->io_private;
4526 ASSERT(cb != NULL);
4527 dev = cb->l2wcb_dev;
4528 ASSERT(dev != NULL);
4529 head = cb->l2wcb_head;
4530 ASSERT(head != NULL);
4531 buflist = dev->l2ad_buflist;
4532 ASSERT(buflist != NULL);
4533 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
4534 l2arc_write_callback_t *, cb);
4535
4536 if (zio->io_error != 0)
4537 ARCSTAT_BUMP(arcstat_l2_writes_error);
4538
4539 mutex_enter(&l2arc_buflist_mtx);
4540
4541 /*
4542 * All writes completed, or an error was hit.
4543 */
4544 for (ab = list_prev(buflist, head); ab; ab = ab_prev) {
4545 ab_prev = list_prev(buflist, ab);
4546
4547 hash_lock = HDR_LOCK(ab);
4548 if (!mutex_tryenter(hash_lock)) {
4549 /*
4550 * This buffer misses out. It may be in a stage
4551 * of eviction. Its ARC_L2_WRITING flag will be
4552 * left set, denying reads to this buffer.
4553 */
4554 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss);
4555 continue;
4556 }
4557
4558 abl2 = ab->b_l2hdr;
4559
4560 /*
4561 * Release the temporary compressed buffer as soon as possible.
4562 */
4563 if (abl2->b_compress != ZIO_COMPRESS_OFF)
4564 l2arc_release_cdata_buf(ab);
4565
4566 if (zio->io_error != 0) {
4567 /*
4568 * Error - drop L2ARC entry.
4569 */
4570 list_remove(buflist, ab);
4571 ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize);
4572 ab->b_l2hdr = NULL;
4573 trim_map_free(abl2->b_dev->l2ad_vdev, abl2->b_daddr,
4574 ab->b_size, 0);
4575 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
4576 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4577 }
4578
4579 /*
4580 * Allow ARC to begin reads to this L2ARC entry.
4581 */
4582 ab->b_flags &= ~ARC_L2_WRITING;
4583
4584 mutex_exit(hash_lock);
4585 }
4586
4587 atomic_inc_64(&l2arc_writes_done);
4588 list_remove(buflist, head);
4589 kmem_cache_free(hdr_cache, head);
4590 mutex_exit(&l2arc_buflist_mtx);
4591
4592 l2arc_do_free_on_write();
4593
4594 kmem_free(cb, sizeof (l2arc_write_callback_t));
4595}
4596
4597/*
4598 * A read to a cache device completed. Validate buffer contents before
4599 * handing over to the regular ARC routines.
4600 */
4601static void
4602l2arc_read_done(zio_t *zio)
4603{
4604 l2arc_read_callback_t *cb;
4605 arc_buf_hdr_t *hdr;
4606 arc_buf_t *buf;
4607 kmutex_t *hash_lock;
4608 int equal;
4609
4610 ASSERT(zio->io_vd != NULL);
4611 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
4612
4613 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
4614
4615 cb = zio->io_private;
4616 ASSERT(cb != NULL);
4617 buf = cb->l2rcb_buf;
4618 ASSERT(buf != NULL);
4619
4620 hash_lock = HDR_LOCK(buf->b_hdr);
4621 mutex_enter(hash_lock);
4622 hdr = buf->b_hdr;
4623 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
4624
4625 /*
4626 * If the buffer was compressed, decompress it first.
4627 */
4628 if (cb->l2rcb_compress != ZIO_COMPRESS_OFF)
4629 l2arc_decompress_zio(zio, hdr, cb->l2rcb_compress);
4630 ASSERT(zio->io_data != NULL);
4631
4632 /*
4633 * Check this survived the L2ARC journey.
4634 */
4635 equal = arc_cksum_equal(buf);
4636 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) {
4637 mutex_exit(hash_lock);
4638 zio->io_private = buf;
4639 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */
4640 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */
4641 arc_read_done(zio);
4642 } else {
4643 mutex_exit(hash_lock);
4644 /*
4645 * Buffer didn't survive caching. Increment stats and
4646 * reissue to the original storage device.
4647 */
4648 if (zio->io_error != 0) {
4649 ARCSTAT_BUMP(arcstat_l2_io_error);
4650 } else {
4651 zio->io_error = SET_ERROR(EIO);
4652 }
4653 if (!equal)
4654 ARCSTAT_BUMP(arcstat_l2_cksum_bad);
4655
4656 /*
4657 * If there's no waiter, issue an async i/o to the primary
4658 * storage now. If there *is* a waiter, the caller must
4659 * issue the i/o in a context where it's OK to block.
4660 */
4661 if (zio->io_waiter == NULL) {
4662 zio_t *pio = zio_unique_parent(zio);
4663
4664 ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
4665
4666 zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp,
4667 buf->b_data, zio->io_size, arc_read_done, buf,
4668 zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb));
4669 }
4670 }
4671
4672 kmem_free(cb, sizeof (l2arc_read_callback_t));
4673}
4674
4675/*
4676 * This is the list priority from which the L2ARC will search for pages to
4677 * cache. This is used within loops (0..3) to cycle through lists in the
4678 * desired order. This order can have a significant effect on cache
4679 * performance.
4680 *
4681 * Currently the metadata lists are hit first, MFU then MRU, followed by
4682 * the data lists. This function returns a locked list, and also returns
4683 * the lock pointer.
4684 */
4685static list_t *
4686l2arc_list_locked(int list_num, kmutex_t **lock)
4687{
4688 list_t *list = NULL;
4689 int idx;
4690
4691 ASSERT(list_num >= 0 && list_num < 2 * ARC_BUFC_NUMLISTS);
4692
4693 if (list_num < ARC_BUFC_NUMMETADATALISTS) {
4694 idx = list_num;
4695 list = &arc_mfu->arcs_lists[idx];
4696 *lock = ARCS_LOCK(arc_mfu, idx);
4697 } else if (list_num < ARC_BUFC_NUMMETADATALISTS * 2) {
4698 idx = list_num - ARC_BUFC_NUMMETADATALISTS;
4699 list = &arc_mru->arcs_lists[idx];
4700 *lock = ARCS_LOCK(arc_mru, idx);
4701 } else if (list_num < (ARC_BUFC_NUMMETADATALISTS * 2 +
4702 ARC_BUFC_NUMDATALISTS)) {
4703 idx = list_num - ARC_BUFC_NUMMETADATALISTS;
4704 list = &arc_mfu->arcs_lists[idx];
4705 *lock = ARCS_LOCK(arc_mfu, idx);
4706 } else {
4707 idx = list_num - ARC_BUFC_NUMLISTS;
4708 list = &arc_mru->arcs_lists[idx];
4709 *lock = ARCS_LOCK(arc_mru, idx);
4710 }
4711
4712 ASSERT(!(MUTEX_HELD(*lock)));
4713 mutex_enter(*lock);
4714 return (list);
4715}
4716
4717/*
4718 * Evict buffers from the device write hand to the distance specified in
4719 * bytes. This distance may span populated buffers, it may span nothing.
4720 * This is clearing a region on the L2ARC device ready for writing.
4721 * If the 'all' boolean is set, every buffer is evicted.
4722 */
4723static void
4724l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
4725{
4726 list_t *buflist;
4727 l2arc_buf_hdr_t *abl2;
4728 arc_buf_hdr_t *ab, *ab_prev;
4729 kmutex_t *hash_lock;
4730 uint64_t taddr;
4731
4732 buflist = dev->l2ad_buflist;
4733
4734 if (buflist == NULL)
4735 return;
4736
4737 if (!all && dev->l2ad_first) {
4738 /*
4739 * This is the first sweep through the device. There is
4740 * nothing to evict.
4741 */
4742 return;
4743 }
4744
4745 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) {
4746 /*
4747 * When nearing the end of the device, evict to the end
4748 * before the device write hand jumps to the start.
4749 */
4750 taddr = dev->l2ad_end;
4751 } else {
4752 taddr = dev->l2ad_hand + distance;
4753 }
4754 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
4755 uint64_t, taddr, boolean_t, all);
4756
4757top:
4758 mutex_enter(&l2arc_buflist_mtx);
4759 for (ab = list_tail(buflist); ab; ab = ab_prev) {
4760 ab_prev = list_prev(buflist, ab);
4761
4762 hash_lock = HDR_LOCK(ab);
4763 if (!mutex_tryenter(hash_lock)) {
4764 /*
4765 * Missed the hash lock. Retry.
4766 */
4767 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
4768 mutex_exit(&l2arc_buflist_mtx);
4769 mutex_enter(hash_lock);
4770 mutex_exit(hash_lock);
4771 goto top;
4772 }
4773
4774 if (HDR_L2_WRITE_HEAD(ab)) {
4775 /*
4776 * We hit a write head node. Leave it for
4777 * l2arc_write_done().
4778 */
4779 list_remove(buflist, ab);
4780 mutex_exit(hash_lock);
4781 continue;
4782 }
4783
4784 if (!all && ab->b_l2hdr != NULL &&
4785 (ab->b_l2hdr->b_daddr > taddr ||
4786 ab->b_l2hdr->b_daddr < dev->l2ad_hand)) {
4787 /*
4788 * We've evicted to the target address,
4789 * or the end of the device.
4790 */
4791 mutex_exit(hash_lock);
4792 break;
4793 }
4794
4795 if (HDR_FREE_IN_PROGRESS(ab)) {
4796 /*
4797 * Already on the path to destruction.
4798 */
4799 mutex_exit(hash_lock);
4800 continue;
4801 }
4802
4803 if (ab->b_state == arc_l2c_only) {
4804 ASSERT(!HDR_L2_READING(ab));
4805 /*
4806 * This doesn't exist in the ARC. Destroy.
4807 * arc_hdr_destroy() will call list_remove()
4808 * and decrement arcstat_l2_size.
4809 */
4810 arc_change_state(arc_anon, ab, hash_lock);
4811 arc_hdr_destroy(ab);
4812 } else {
4813 /*
4814 * Invalidate issued or about to be issued
4815 * reads, since we may be about to write
4816 * over this location.
4817 */
4818 if (HDR_L2_READING(ab)) {
4819 ARCSTAT_BUMP(arcstat_l2_evict_reading);
4820 ab->b_flags |= ARC_L2_EVICTED;
4821 }
4822
4823 /*
4824 * Tell ARC this no longer exists in L2ARC.
4825 */
4826 if (ab->b_l2hdr != NULL) {
4827 abl2 = ab->b_l2hdr;
4828 ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize);
4829 ab->b_l2hdr = NULL;
4830 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
4831 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4832 }
4833 list_remove(buflist, ab);
4834
4835 /*
4836 * This may have been leftover after a
4837 * failed write.
4838 */
4839 ab->b_flags &= ~ARC_L2_WRITING;
4840 }
4841 mutex_exit(hash_lock);
4842 }
4843 mutex_exit(&l2arc_buflist_mtx);
4844
4845 vdev_space_update(dev->l2ad_vdev, -(taddr - dev->l2ad_evict), 0, 0);
4846 dev->l2ad_evict = taddr;
4847}
4848
4849/*
4850 * Find and write ARC buffers to the L2ARC device.
4851 *
4852 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid
4853 * for reading until they have completed writing.
4854 * The headroom_boost is an in-out parameter used to maintain headroom boost
4855 * state between calls to this function.
4856 *
4857 * Returns the number of bytes actually written (which may be smaller than
4858 * the delta by which the device hand has changed due to alignment).
4859 */
4860static uint64_t
4861l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
4862 boolean_t *headroom_boost)
4863{
4864 arc_buf_hdr_t *ab, *ab_prev, *head;
4865 list_t *list;
4866 uint64_t write_asize, write_psize, write_sz, headroom,
4867 buf_compress_minsz;
4868 void *buf_data;
4869 kmutex_t *list_lock;
4870 boolean_t full;
4871 l2arc_write_callback_t *cb;
4872 zio_t *pio, *wzio;
4873 uint64_t guid = spa_load_guid(spa);
4874 const boolean_t do_headroom_boost = *headroom_boost;
4875 int try;
4876
4877 ASSERT(dev->l2ad_vdev != NULL);
4878
4879 /* Lower the flag now, we might want to raise it again later. */
4880 *headroom_boost = B_FALSE;
4881
4882 pio = NULL;
4883 write_sz = write_asize = write_psize = 0;
4884 full = B_FALSE;
4885 head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
4886 head->b_flags |= ARC_L2_WRITE_HEAD;
4887
4888 ARCSTAT_BUMP(arcstat_l2_write_buffer_iter);
4889 /*
4890 * We will want to try to compress buffers that are at least 2x the
4891 * device sector size.
4892 */
4893 buf_compress_minsz = 2 << dev->l2ad_vdev->vdev_ashift;
4894
4895 /*
4896 * Copy buffers for L2ARC writing.
4897 */
4898 mutex_enter(&l2arc_buflist_mtx);
4899 for (try = 0; try < 2 * ARC_BUFC_NUMLISTS; try++) {
4900 uint64_t passed_sz = 0;
4901
4902 list = l2arc_list_locked(try, &list_lock);
4903 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_iter);
4904
4905 /*
4906 * L2ARC fast warmup.
4907 *
4908 * Until the ARC is warm and starts to evict, read from the
4909 * head of the ARC lists rather than the tail.
4910 */
4911 if (arc_warm == B_FALSE)
4912 ab = list_head(list);
4913 else
4914 ab = list_tail(list);
4915 if (ab == NULL)
4916 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_null_iter);
4917
4918 headroom = target_sz * l2arc_headroom;
4919 if (do_headroom_boost)
4920 headroom = (headroom * l2arc_headroom_boost) / 100;
4921
4922 for (; ab; ab = ab_prev) {
4923 l2arc_buf_hdr_t *l2hdr;
4924 kmutex_t *hash_lock;
4925 uint64_t buf_sz;
4926
4927 if (arc_warm == B_FALSE)
4928 ab_prev = list_next(list, ab);
4929 else
4930 ab_prev = list_prev(list, ab);
4931 ARCSTAT_INCR(arcstat_l2_write_buffer_bytes_scanned, ab->b_size);
4932
4933 hash_lock = HDR_LOCK(ab);
4934 if (!mutex_tryenter(hash_lock)) {
4935 ARCSTAT_BUMP(arcstat_l2_write_trylock_fail);
4936 /*
4937 * Skip this buffer rather than waiting.
4938 */
4939 continue;
4940 }
4941
4942 passed_sz += ab->b_size;
4943 if (passed_sz > headroom) {
4944 /*
4945 * Searched too far.
4946 */
4947 mutex_exit(hash_lock);
4948 ARCSTAT_BUMP(arcstat_l2_write_passed_headroom);
4949 break;
4950 }
4951
4952 if (!l2arc_write_eligible(guid, ab)) {
4953 mutex_exit(hash_lock);
4954 continue;
4955 }
4956
4957 if ((write_sz + ab->b_size) > target_sz) {
4958 full = B_TRUE;
4959 mutex_exit(hash_lock);
4960 ARCSTAT_BUMP(arcstat_l2_write_full);
4961 break;
4962 }
4963
4964 if (pio == NULL) {
4965 /*
4966 * Insert a dummy header on the buflist so
4967 * l2arc_write_done() can find where the
4968 * write buffers begin without searching.
4969 */
4970 list_insert_head(dev->l2ad_buflist, head);
4971
4972 cb = kmem_alloc(
4973 sizeof (l2arc_write_callback_t), KM_SLEEP);
4974 cb->l2wcb_dev = dev;
4975 cb->l2wcb_head = head;
4976 pio = zio_root(spa, l2arc_write_done, cb,
4977 ZIO_FLAG_CANFAIL);
4978 ARCSTAT_BUMP(arcstat_l2_write_pios);
4979 }
4980
4981 /*
4982 * Create and add a new L2ARC header.
4983 */
4984 l2hdr = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP);
4985 l2hdr->b_dev = dev;
4986 ab->b_flags |= ARC_L2_WRITING;
4987
4988 /*
4989 * Temporarily stash the data buffer in b_tmp_cdata.
4990 * The subsequent write step will pick it up from
4991 * there. This is because can't access ab->b_buf
4992 * without holding the hash_lock, which we in turn
4993 * can't access without holding the ARC list locks
4994 * (which we want to avoid during compression/writing).
4995 */
4996 l2hdr->b_compress = ZIO_COMPRESS_OFF;
4997 l2hdr->b_asize = ab->b_size;
4998 l2hdr->b_tmp_cdata = ab->b_buf->b_data;
4999
5000 buf_sz = ab->b_size;
5001 ab->b_l2hdr = l2hdr;
5002
5003 list_insert_head(dev->l2ad_buflist, ab);
5004
5005 /*
5006 * Compute and store the buffer cksum before
5007 * writing. On debug the cksum is verified first.
5008 */
5009 arc_cksum_verify(ab->b_buf);
5010 arc_cksum_compute(ab->b_buf, B_TRUE);
5011
5012 mutex_exit(hash_lock);
5013
5014 write_sz += buf_sz;
5015 }
5016
5017 mutex_exit(list_lock);
5018
5019 if (full == B_TRUE)
5020 break;
5021 }
5022
5023 /* No buffers selected for writing? */
5024 if (pio == NULL) {
5025 ASSERT0(write_sz);
5026 mutex_exit(&l2arc_buflist_mtx);
5027 kmem_cache_free(hdr_cache, head);
5028 return (0);
5029 }
5030
5031 /*
5032 * Now start writing the buffers. We're starting at the write head
5033 * and work backwards, retracing the course of the buffer selector
5034 * loop above.
5035 */
5036 for (ab = list_prev(dev->l2ad_buflist, head); ab;
5037 ab = list_prev(dev->l2ad_buflist, ab)) {
5038 l2arc_buf_hdr_t *l2hdr;
5039 uint64_t buf_sz;
5040
5041 /*
5042 * We shouldn't need to lock the buffer here, since we flagged
5043 * it as ARC_L2_WRITING in the previous step, but we must take
5044 * care to only access its L2 cache parameters. In particular,
5045 * ab->b_buf may be invalid by now due to ARC eviction.
5046 */
5047 l2hdr = ab->b_l2hdr;
5048 l2hdr->b_daddr = dev->l2ad_hand;
5049
5050 if ((ab->b_flags & ARC_L2COMPRESS) &&
5051 l2hdr->b_asize >= buf_compress_minsz) {
5052 if (l2arc_compress_buf(l2hdr)) {
5053 /*
5054 * If compression succeeded, enable headroom
5055 * boost on the next scan cycle.
5056 */
5057 *headroom_boost = B_TRUE;
5058 }
5059 }
5060
5061 /*
5062 * Pick up the buffer data we had previously stashed away
5063 * (and now potentially also compressed).
5064 */
5065 buf_data = l2hdr->b_tmp_cdata;
5066 buf_sz = l2hdr->b_asize;
5067
5068 /* Compression may have squashed the buffer to zero length. */
5069 if (buf_sz != 0) {
5070 uint64_t buf_p_sz;
5071
5072 wzio = zio_write_phys(pio, dev->l2ad_vdev,
5073 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF,
5074 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE,
5075 ZIO_FLAG_CANFAIL, B_FALSE);
5076
5077 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
5078 zio_t *, wzio);
5079 (void) zio_nowait(wzio);
5080
5081 write_asize += buf_sz;
5082 /*
5083 * Keep the clock hand suitably device-aligned.
5084 */
5085 buf_p_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz);
5086 write_psize += buf_p_sz;
5087 dev->l2ad_hand += buf_p_sz;
5088 }
5089 }
5090
5091 mutex_exit(&l2arc_buflist_mtx);
5092
5093 ASSERT3U(write_asize, <=, target_sz);
5094 ARCSTAT_BUMP(arcstat_l2_writes_sent);
5095 ARCSTAT_INCR(arcstat_l2_write_bytes, write_asize);
5096 ARCSTAT_INCR(arcstat_l2_size, write_sz);
5097 ARCSTAT_INCR(arcstat_l2_asize, write_asize);
5098 vdev_space_update(dev->l2ad_vdev, write_psize, 0, 0);
5099
5100 /*
5101 * Bump device hand to the device start if it is approaching the end.
5102 * l2arc_evict() will already have evicted ahead for this case.
5103 */
5104 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) {
5105 vdev_space_update(dev->l2ad_vdev,
5106 dev->l2ad_end - dev->l2ad_hand, 0, 0);
5107 dev->l2ad_hand = dev->l2ad_start;
5108 dev->l2ad_evict = dev->l2ad_start;
5109 dev->l2ad_first = B_FALSE;
5110 }
5111
5112 dev->l2ad_writing = B_TRUE;
5113 (void) zio_wait(pio);
5114 dev->l2ad_writing = B_FALSE;
5115
5116 return (write_asize);
5117}
5118
5119/*
5120 * Compresses an L2ARC buffer.
5121 * The data to be compressed must be prefilled in l2hdr->b_tmp_cdata and its
5122 * size in l2hdr->b_asize. This routine tries to compress the data and
5123 * depending on the compression result there are three possible outcomes:
5124 * *) The buffer was incompressible. The original l2hdr contents were left
5125 * untouched and are ready for writing to an L2 device.
5126 * *) The buffer was all-zeros, so there is no need to write it to an L2
5127 * device. To indicate this situation b_tmp_cdata is NULL'ed, b_asize is
5128 * set to zero and b_compress is set to ZIO_COMPRESS_EMPTY.
5129 * *) Compression succeeded and b_tmp_cdata was replaced with a temporary
5130 * data buffer which holds the compressed data to be written, and b_asize
5131 * tells us how much data there is. b_compress is set to the appropriate
5132 * compression algorithm. Once writing is done, invoke
5133 * l2arc_release_cdata_buf on this l2hdr to free this temporary buffer.
5134 *
5135 * Returns B_TRUE if compression succeeded, or B_FALSE if it didn't (the
5136 * buffer was incompressible).
5137 */
5138static boolean_t
5139l2arc_compress_buf(l2arc_buf_hdr_t *l2hdr)
5140{
5141 void *cdata;
5142 size_t csize, len;
5143
5144 ASSERT(l2hdr->b_compress == ZIO_COMPRESS_OFF);
5145 ASSERT(l2hdr->b_tmp_cdata != NULL);
5146
5147 len = l2hdr->b_asize;
5148 cdata = zio_data_buf_alloc(len);
5149 csize = zio_compress_data(ZIO_COMPRESS_LZ4, l2hdr->b_tmp_cdata,
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2013 by Delphix. All rights reserved.
25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26 */
27
28/*
29 * DVA-based Adjustable Replacement Cache
30 *
31 * While much of the theory of operation used here is
32 * based on the self-tuning, low overhead replacement cache
33 * presented by Megiddo and Modha at FAST 2003, there are some
34 * significant differences:
35 *
36 * 1. The Megiddo and Modha model assumes any page is evictable.
37 * Pages in its cache cannot be "locked" into memory. This makes
38 * the eviction algorithm simple: evict the last page in the list.
39 * This also make the performance characteristics easy to reason
40 * about. Our cache is not so simple. At any given moment, some
41 * subset of the blocks in the cache are un-evictable because we
42 * have handed out a reference to them. Blocks are only evictable
43 * when there are no external references active. This makes
44 * eviction far more problematic: we choose to evict the evictable
45 * blocks that are the "lowest" in the list.
46 *
47 * There are times when it is not possible to evict the requested
48 * space. In these circumstances we are unable to adjust the cache
49 * size. To prevent the cache growing unbounded at these times we
50 * implement a "cache throttle" that slows the flow of new data
51 * into the cache until we can make space available.
52 *
53 * 2. The Megiddo and Modha model assumes a fixed cache size.
54 * Pages are evicted when the cache is full and there is a cache
55 * miss. Our model has a variable sized cache. It grows with
56 * high use, but also tries to react to memory pressure from the
57 * operating system: decreasing its size when system memory is
58 * tight.
59 *
60 * 3. The Megiddo and Modha model assumes a fixed page size. All
61 * elements of the cache are therefore exactly the same size. So
62 * when adjusting the cache size following a cache miss, its simply
63 * a matter of choosing a single page to evict. In our model, we
64 * have variable sized cache blocks (rangeing from 512 bytes to
65 * 128K bytes). We therefore choose a set of blocks to evict to make
66 * space for a cache miss that approximates as closely as possible
67 * the space used by the new block.
68 *
69 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
70 * by N. Megiddo & D. Modha, FAST 2003
71 */
72
73/*
74 * The locking model:
75 *
76 * A new reference to a cache buffer can be obtained in two
77 * ways: 1) via a hash table lookup using the DVA as a key,
78 * or 2) via one of the ARC lists. The arc_read() interface
79 * uses method 1, while the internal arc algorithms for
80 * adjusting the cache use method 2. We therefore provide two
81 * types of locks: 1) the hash table lock array, and 2) the
82 * arc list locks.
83 *
84 * Buffers do not have their own mutexs, rather they rely on the
85 * hash table mutexs for the bulk of their protection (i.e. most
86 * fields in the arc_buf_hdr_t are protected by these mutexs).
87 *
88 * buf_hash_find() returns the appropriate mutex (held) when it
89 * locates the requested buffer in the hash table. It returns
90 * NULL for the mutex if the buffer was not in the table.
91 *
92 * buf_hash_remove() expects the appropriate hash mutex to be
93 * already held before it is invoked.
94 *
95 * Each arc state also has a mutex which is used to protect the
96 * buffer list associated with the state. When attempting to
97 * obtain a hash table lock while holding an arc list lock you
98 * must use: mutex_tryenter() to avoid deadlock. Also note that
99 * the active state mutex must be held before the ghost state mutex.
100 *
101 * Arc buffers may have an associated eviction callback function.
102 * This function will be invoked prior to removing the buffer (e.g.
103 * in arc_do_user_evicts()). Note however that the data associated
104 * with the buffer may be evicted prior to the callback. The callback
105 * must be made with *no locks held* (to prevent deadlock). Additionally,
106 * the users of callbacks must ensure that their private data is
107 * protected from simultaneous callbacks from arc_buf_evict()
108 * and arc_do_user_evicts().
109 *
110 * Note that the majority of the performance stats are manipulated
111 * with atomic operations.
112 *
113 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following:
114 *
115 * - L2ARC buflist creation
116 * - L2ARC buflist eviction
117 * - L2ARC write completion, which walks L2ARC buflists
118 * - ARC header destruction, as it removes from L2ARC buflists
119 * - ARC header release, as it removes from L2ARC buflists
120 */
121
122#include <sys/spa.h>
123#include <sys/zio.h>
124#include <sys/zio_compress.h>
125#include <sys/zfs_context.h>
126#include <sys/arc.h>
127#include <sys/refcount.h>
128#include <sys/vdev.h>
129#include <sys/vdev_impl.h>
130#ifdef _KERNEL
131#include <sys/dnlc.h>
132#endif
133#include <sys/callb.h>
134#include <sys/kstat.h>
135#include <sys/trim_map.h>
136#include <zfs_fletcher.h>
137#include <sys/sdt.h>
138
139#include <vm/vm_pageout.h>
140
141#ifdef illumos
142#ifndef _KERNEL
143/* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
144boolean_t arc_watch = B_FALSE;
145int arc_procfd;
146#endif
147#endif /* illumos */
148
149static kmutex_t arc_reclaim_thr_lock;
150static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */
151static uint8_t arc_thread_exit;
152
153extern int zfs_write_limit_shift;
154extern uint64_t zfs_write_limit_max;
155extern kmutex_t zfs_write_limit_lock;
156
157#define ARC_REDUCE_DNLC_PERCENT 3
158uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT;
159
160typedef enum arc_reclaim_strategy {
161 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */
162 ARC_RECLAIM_CONS /* Conservative reclaim strategy */
163} arc_reclaim_strategy_t;
164
165/* number of seconds before growing cache again */
166static int arc_grow_retry = 60;
167
168/* shift of arc_c for calculating both min and max arc_p */
169static int arc_p_min_shift = 4;
170
171/* log2(fraction of arc to reclaim) */
172static int arc_shrink_shift = 5;
173
174/*
175 * minimum lifespan of a prefetch block in clock ticks
176 * (initialized in arc_init())
177 */
178static int arc_min_prefetch_lifespan;
179
180static int arc_dead;
181extern int zfs_prefetch_disable;
182
183/*
184 * The arc has filled available memory and has now warmed up.
185 */
186static boolean_t arc_warm;
187
188/*
189 * These tunables are for performance analysis.
190 */
191uint64_t zfs_arc_max;
192uint64_t zfs_arc_min;
193uint64_t zfs_arc_meta_limit = 0;
194int zfs_arc_grow_retry = 0;
195int zfs_arc_shrink_shift = 0;
196int zfs_arc_p_min_shift = 0;
197int zfs_disable_dup_eviction = 0;
198
199TUNABLE_QUAD("vfs.zfs.arc_max", &zfs_arc_max);
200TUNABLE_QUAD("vfs.zfs.arc_min", &zfs_arc_min);
201TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit);
202SYSCTL_DECL(_vfs_zfs);
203SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_max, CTLFLAG_RDTUN, &zfs_arc_max, 0,
204 "Maximum ARC size");
205SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_min, CTLFLAG_RDTUN, &zfs_arc_min, 0,
206 "Minimum ARC size");
207
208/*
209 * Note that buffers can be in one of 6 states:
210 * ARC_anon - anonymous (discussed below)
211 * ARC_mru - recently used, currently cached
212 * ARC_mru_ghost - recentely used, no longer in cache
213 * ARC_mfu - frequently used, currently cached
214 * ARC_mfu_ghost - frequently used, no longer in cache
215 * ARC_l2c_only - exists in L2ARC but not other states
216 * When there are no active references to the buffer, they are
217 * are linked onto a list in one of these arc states. These are
218 * the only buffers that can be evicted or deleted. Within each
219 * state there are multiple lists, one for meta-data and one for
220 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes,
221 * etc.) is tracked separately so that it can be managed more
222 * explicitly: favored over data, limited explicitly.
223 *
224 * Anonymous buffers are buffers that are not associated with
225 * a DVA. These are buffers that hold dirty block copies
226 * before they are written to stable storage. By definition,
227 * they are "ref'd" and are considered part of arc_mru
228 * that cannot be freed. Generally, they will aquire a DVA
229 * as they are written and migrate onto the arc_mru list.
230 *
231 * The ARC_l2c_only state is for buffers that are in the second
232 * level ARC but no longer in any of the ARC_m* lists. The second
233 * level ARC itself may also contain buffers that are in any of
234 * the ARC_m* states - meaning that a buffer can exist in two
235 * places. The reason for the ARC_l2c_only state is to keep the
236 * buffer header in the hash table, so that reads that hit the
237 * second level ARC benefit from these fast lookups.
238 */
239
240#define ARCS_LOCK_PAD CACHE_LINE_SIZE
241struct arcs_lock {
242 kmutex_t arcs_lock;
243#ifdef _KERNEL
244 unsigned char pad[(ARCS_LOCK_PAD - sizeof (kmutex_t))];
245#endif
246};
247
248/*
249 * must be power of two for mask use to work
250 *
251 */
252#define ARC_BUFC_NUMDATALISTS 16
253#define ARC_BUFC_NUMMETADATALISTS 16
254#define ARC_BUFC_NUMLISTS (ARC_BUFC_NUMMETADATALISTS + ARC_BUFC_NUMDATALISTS)
255
256typedef struct arc_state {
257 uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */
258 uint64_t arcs_size; /* total amount of data in this state */
259 list_t arcs_lists[ARC_BUFC_NUMLISTS]; /* list of evictable buffers */
260 struct arcs_lock arcs_locks[ARC_BUFC_NUMLISTS] __aligned(CACHE_LINE_SIZE);
261} arc_state_t;
262
263#define ARCS_LOCK(s, i) (&((s)->arcs_locks[(i)].arcs_lock))
264
265/* The 6 states: */
266static arc_state_t ARC_anon;
267static arc_state_t ARC_mru;
268static arc_state_t ARC_mru_ghost;
269static arc_state_t ARC_mfu;
270static arc_state_t ARC_mfu_ghost;
271static arc_state_t ARC_l2c_only;
272
273typedef struct arc_stats {
274 kstat_named_t arcstat_hits;
275 kstat_named_t arcstat_misses;
276 kstat_named_t arcstat_demand_data_hits;
277 kstat_named_t arcstat_demand_data_misses;
278 kstat_named_t arcstat_demand_metadata_hits;
279 kstat_named_t arcstat_demand_metadata_misses;
280 kstat_named_t arcstat_prefetch_data_hits;
281 kstat_named_t arcstat_prefetch_data_misses;
282 kstat_named_t arcstat_prefetch_metadata_hits;
283 kstat_named_t arcstat_prefetch_metadata_misses;
284 kstat_named_t arcstat_mru_hits;
285 kstat_named_t arcstat_mru_ghost_hits;
286 kstat_named_t arcstat_mfu_hits;
287 kstat_named_t arcstat_mfu_ghost_hits;
288 kstat_named_t arcstat_allocated;
289 kstat_named_t arcstat_deleted;
290 kstat_named_t arcstat_stolen;
291 kstat_named_t arcstat_recycle_miss;
292 /*
293 * Number of buffers that could not be evicted because the hash lock
294 * was held by another thread. The lock may not necessarily be held
295 * by something using the same buffer, since hash locks are shared
296 * by multiple buffers.
297 */
298 kstat_named_t arcstat_mutex_miss;
299 /*
300 * Number of buffers skipped because they have I/O in progress, are
301 * indrect prefetch buffers that have not lived long enough, or are
302 * not from the spa we're trying to evict from.
303 */
304 kstat_named_t arcstat_evict_skip;
305 kstat_named_t arcstat_evict_l2_cached;
306 kstat_named_t arcstat_evict_l2_eligible;
307 kstat_named_t arcstat_evict_l2_ineligible;
308 kstat_named_t arcstat_hash_elements;
309 kstat_named_t arcstat_hash_elements_max;
310 kstat_named_t arcstat_hash_collisions;
311 kstat_named_t arcstat_hash_chains;
312 kstat_named_t arcstat_hash_chain_max;
313 kstat_named_t arcstat_p;
314 kstat_named_t arcstat_c;
315 kstat_named_t arcstat_c_min;
316 kstat_named_t arcstat_c_max;
317 kstat_named_t arcstat_size;
318 kstat_named_t arcstat_hdr_size;
319 kstat_named_t arcstat_data_size;
320 kstat_named_t arcstat_other_size;
321 kstat_named_t arcstat_l2_hits;
322 kstat_named_t arcstat_l2_misses;
323 kstat_named_t arcstat_l2_feeds;
324 kstat_named_t arcstat_l2_rw_clash;
325 kstat_named_t arcstat_l2_read_bytes;
326 kstat_named_t arcstat_l2_write_bytes;
327 kstat_named_t arcstat_l2_writes_sent;
328 kstat_named_t arcstat_l2_writes_done;
329 kstat_named_t arcstat_l2_writes_error;
330 kstat_named_t arcstat_l2_writes_hdr_miss;
331 kstat_named_t arcstat_l2_evict_lock_retry;
332 kstat_named_t arcstat_l2_evict_reading;
333 kstat_named_t arcstat_l2_free_on_write;
334 kstat_named_t arcstat_l2_abort_lowmem;
335 kstat_named_t arcstat_l2_cksum_bad;
336 kstat_named_t arcstat_l2_io_error;
337 kstat_named_t arcstat_l2_size;
338 kstat_named_t arcstat_l2_asize;
339 kstat_named_t arcstat_l2_hdr_size;
340 kstat_named_t arcstat_l2_compress_successes;
341 kstat_named_t arcstat_l2_compress_zeros;
342 kstat_named_t arcstat_l2_compress_failures;
343 kstat_named_t arcstat_l2_write_trylock_fail;
344 kstat_named_t arcstat_l2_write_passed_headroom;
345 kstat_named_t arcstat_l2_write_spa_mismatch;
346 kstat_named_t arcstat_l2_write_in_l2;
347 kstat_named_t arcstat_l2_write_hdr_io_in_progress;
348 kstat_named_t arcstat_l2_write_not_cacheable;
349 kstat_named_t arcstat_l2_write_full;
350 kstat_named_t arcstat_l2_write_buffer_iter;
351 kstat_named_t arcstat_l2_write_pios;
352 kstat_named_t arcstat_l2_write_buffer_bytes_scanned;
353 kstat_named_t arcstat_l2_write_buffer_list_iter;
354 kstat_named_t arcstat_l2_write_buffer_list_null_iter;
355 kstat_named_t arcstat_memory_throttle_count;
356 kstat_named_t arcstat_duplicate_buffers;
357 kstat_named_t arcstat_duplicate_buffers_size;
358 kstat_named_t arcstat_duplicate_reads;
359} arc_stats_t;
360
361static arc_stats_t arc_stats = {
362 { "hits", KSTAT_DATA_UINT64 },
363 { "misses", KSTAT_DATA_UINT64 },
364 { "demand_data_hits", KSTAT_DATA_UINT64 },
365 { "demand_data_misses", KSTAT_DATA_UINT64 },
366 { "demand_metadata_hits", KSTAT_DATA_UINT64 },
367 { "demand_metadata_misses", KSTAT_DATA_UINT64 },
368 { "prefetch_data_hits", KSTAT_DATA_UINT64 },
369 { "prefetch_data_misses", KSTAT_DATA_UINT64 },
370 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 },
371 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 },
372 { "mru_hits", KSTAT_DATA_UINT64 },
373 { "mru_ghost_hits", KSTAT_DATA_UINT64 },
374 { "mfu_hits", KSTAT_DATA_UINT64 },
375 { "mfu_ghost_hits", KSTAT_DATA_UINT64 },
376 { "allocated", KSTAT_DATA_UINT64 },
377 { "deleted", KSTAT_DATA_UINT64 },
378 { "stolen", KSTAT_DATA_UINT64 },
379 { "recycle_miss", KSTAT_DATA_UINT64 },
380 { "mutex_miss", KSTAT_DATA_UINT64 },
381 { "evict_skip", KSTAT_DATA_UINT64 },
382 { "evict_l2_cached", KSTAT_DATA_UINT64 },
383 { "evict_l2_eligible", KSTAT_DATA_UINT64 },
384 { "evict_l2_ineligible", KSTAT_DATA_UINT64 },
385 { "hash_elements", KSTAT_DATA_UINT64 },
386 { "hash_elements_max", KSTAT_DATA_UINT64 },
387 { "hash_collisions", KSTAT_DATA_UINT64 },
388 { "hash_chains", KSTAT_DATA_UINT64 },
389 { "hash_chain_max", KSTAT_DATA_UINT64 },
390 { "p", KSTAT_DATA_UINT64 },
391 { "c", KSTAT_DATA_UINT64 },
392 { "c_min", KSTAT_DATA_UINT64 },
393 { "c_max", KSTAT_DATA_UINT64 },
394 { "size", KSTAT_DATA_UINT64 },
395 { "hdr_size", KSTAT_DATA_UINT64 },
396 { "data_size", KSTAT_DATA_UINT64 },
397 { "other_size", KSTAT_DATA_UINT64 },
398 { "l2_hits", KSTAT_DATA_UINT64 },
399 { "l2_misses", KSTAT_DATA_UINT64 },
400 { "l2_feeds", KSTAT_DATA_UINT64 },
401 { "l2_rw_clash", KSTAT_DATA_UINT64 },
402 { "l2_read_bytes", KSTAT_DATA_UINT64 },
403 { "l2_write_bytes", KSTAT_DATA_UINT64 },
404 { "l2_writes_sent", KSTAT_DATA_UINT64 },
405 { "l2_writes_done", KSTAT_DATA_UINT64 },
406 { "l2_writes_error", KSTAT_DATA_UINT64 },
407 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 },
408 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 },
409 { "l2_evict_reading", KSTAT_DATA_UINT64 },
410 { "l2_free_on_write", KSTAT_DATA_UINT64 },
411 { "l2_abort_lowmem", KSTAT_DATA_UINT64 },
412 { "l2_cksum_bad", KSTAT_DATA_UINT64 },
413 { "l2_io_error", KSTAT_DATA_UINT64 },
414 { "l2_size", KSTAT_DATA_UINT64 },
415 { "l2_asize", KSTAT_DATA_UINT64 },
416 { "l2_hdr_size", KSTAT_DATA_UINT64 },
417 { "l2_compress_successes", KSTAT_DATA_UINT64 },
418 { "l2_compress_zeros", KSTAT_DATA_UINT64 },
419 { "l2_compress_failures", KSTAT_DATA_UINT64 },
420 { "l2_write_trylock_fail", KSTAT_DATA_UINT64 },
421 { "l2_write_passed_headroom", KSTAT_DATA_UINT64 },
422 { "l2_write_spa_mismatch", KSTAT_DATA_UINT64 },
423 { "l2_write_in_l2", KSTAT_DATA_UINT64 },
424 { "l2_write_io_in_progress", KSTAT_DATA_UINT64 },
425 { "l2_write_not_cacheable", KSTAT_DATA_UINT64 },
426 { "l2_write_full", KSTAT_DATA_UINT64 },
427 { "l2_write_buffer_iter", KSTAT_DATA_UINT64 },
428 { "l2_write_pios", KSTAT_DATA_UINT64 },
429 { "l2_write_buffer_bytes_scanned", KSTAT_DATA_UINT64 },
430 { "l2_write_buffer_list_iter", KSTAT_DATA_UINT64 },
431 { "l2_write_buffer_list_null_iter", KSTAT_DATA_UINT64 },
432 { "memory_throttle_count", KSTAT_DATA_UINT64 },
433 { "duplicate_buffers", KSTAT_DATA_UINT64 },
434 { "duplicate_buffers_size", KSTAT_DATA_UINT64 },
435 { "duplicate_reads", KSTAT_DATA_UINT64 }
436};
437
438#define ARCSTAT(stat) (arc_stats.stat.value.ui64)
439
440#define ARCSTAT_INCR(stat, val) \
441 atomic_add_64(&arc_stats.stat.value.ui64, (val))
442
443#define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1)
444#define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1)
445
446#define ARCSTAT_MAX(stat, val) { \
447 uint64_t m; \
448 while ((val) > (m = arc_stats.stat.value.ui64) && \
449 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
450 continue; \
451}
452
453#define ARCSTAT_MAXSTAT(stat) \
454 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
455
456/*
457 * We define a macro to allow ARC hits/misses to be easily broken down by
458 * two separate conditions, giving a total of four different subtypes for
459 * each of hits and misses (so eight statistics total).
460 */
461#define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
462 if (cond1) { \
463 if (cond2) { \
464 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
465 } else { \
466 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
467 } \
468 } else { \
469 if (cond2) { \
470 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
471 } else { \
472 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
473 } \
474 }
475
476kstat_t *arc_ksp;
477static arc_state_t *arc_anon;
478static arc_state_t *arc_mru;
479static arc_state_t *arc_mru_ghost;
480static arc_state_t *arc_mfu;
481static arc_state_t *arc_mfu_ghost;
482static arc_state_t *arc_l2c_only;
483
484/*
485 * There are several ARC variables that are critical to export as kstats --
486 * but we don't want to have to grovel around in the kstat whenever we wish to
487 * manipulate them. For these variables, we therefore define them to be in
488 * terms of the statistic variable. This assures that we are not introducing
489 * the possibility of inconsistency by having shadow copies of the variables,
490 * while still allowing the code to be readable.
491 */
492#define arc_size ARCSTAT(arcstat_size) /* actual total arc size */
493#define arc_p ARCSTAT(arcstat_p) /* target size of MRU */
494#define arc_c ARCSTAT(arcstat_c) /* target size of cache */
495#define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */
496#define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */
497
498#define L2ARC_IS_VALID_COMPRESS(_c_) \
499 ((_c_) == ZIO_COMPRESS_LZ4 || (_c_) == ZIO_COMPRESS_EMPTY)
500
501static int arc_no_grow; /* Don't try to grow cache size */
502static uint64_t arc_tempreserve;
503static uint64_t arc_loaned_bytes;
504static uint64_t arc_meta_used;
505static uint64_t arc_meta_limit;
506static uint64_t arc_meta_max = 0;
507SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_meta_used, CTLFLAG_RD, &arc_meta_used, 0,
508 "ARC metadata used");
509SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_meta_limit, CTLFLAG_RW, &arc_meta_limit, 0,
510 "ARC metadata limit");
511
512typedef struct l2arc_buf_hdr l2arc_buf_hdr_t;
513
514typedef struct arc_callback arc_callback_t;
515
516struct arc_callback {
517 void *acb_private;
518 arc_done_func_t *acb_done;
519 arc_buf_t *acb_buf;
520 zio_t *acb_zio_dummy;
521 arc_callback_t *acb_next;
522};
523
524typedef struct arc_write_callback arc_write_callback_t;
525
526struct arc_write_callback {
527 void *awcb_private;
528 arc_done_func_t *awcb_ready;
529 arc_done_func_t *awcb_done;
530 arc_buf_t *awcb_buf;
531};
532
533struct arc_buf_hdr {
534 /* protected by hash lock */
535 dva_t b_dva;
536 uint64_t b_birth;
537 uint64_t b_cksum0;
538
539 kmutex_t b_freeze_lock;
540 zio_cksum_t *b_freeze_cksum;
541 void *b_thawed;
542
543 arc_buf_hdr_t *b_hash_next;
544 arc_buf_t *b_buf;
545 uint32_t b_flags;
546 uint32_t b_datacnt;
547
548 arc_callback_t *b_acb;
549 kcondvar_t b_cv;
550
551 /* immutable */
552 arc_buf_contents_t b_type;
553 uint64_t b_size;
554 uint64_t b_spa;
555
556 /* protected by arc state mutex */
557 arc_state_t *b_state;
558 list_node_t b_arc_node;
559
560 /* updated atomically */
561 clock_t b_arc_access;
562
563 /* self protecting */
564 refcount_t b_refcnt;
565
566 l2arc_buf_hdr_t *b_l2hdr;
567 list_node_t b_l2node;
568};
569
570static arc_buf_t *arc_eviction_list;
571static kmutex_t arc_eviction_mtx;
572static arc_buf_hdr_t arc_eviction_hdr;
573static void arc_get_data_buf(arc_buf_t *buf);
574static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
575static int arc_evict_needed(arc_buf_contents_t type);
576static void arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes);
577#ifdef illumos
578static void arc_buf_watch(arc_buf_t *buf);
579#endif /* illumos */
580
581static boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab);
582
583#define GHOST_STATE(state) \
584 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
585 (state) == arc_l2c_only)
586
587/*
588 * Private ARC flags. These flags are private ARC only flags that will show up
589 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can
590 * be passed in as arc_flags in things like arc_read. However, these flags
591 * should never be passed and should only be set by ARC code. When adding new
592 * public flags, make sure not to smash the private ones.
593 */
594
595#define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */
596#define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */
597#define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */
598#define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */
599#define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */
600#define ARC_INDIRECT (1 << 14) /* this is an indirect block */
601#define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */
602#define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */
603#define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */
604#define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */
605
606#define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE)
607#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS)
608#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR)
609#define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_PREFETCH)
610#define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ)
611#define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE)
612#define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS)
613#define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE)
614#define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \
615 (hdr)->b_l2hdr != NULL)
616#define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING)
617#define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED)
618#define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD)
619
620/*
621 * Other sizes
622 */
623
624#define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
625#define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t))
626
627/*
628 * Hash table routines
629 */
630
631#define HT_LOCK_PAD CACHE_LINE_SIZE
632
633struct ht_lock {
634 kmutex_t ht_lock;
635#ifdef _KERNEL
636 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
637#endif
638};
639
640#define BUF_LOCKS 256
641typedef struct buf_hash_table {
642 uint64_t ht_mask;
643 arc_buf_hdr_t **ht_table;
644 struct ht_lock ht_locks[BUF_LOCKS] __aligned(CACHE_LINE_SIZE);
645} buf_hash_table_t;
646
647static buf_hash_table_t buf_hash_table;
648
649#define BUF_HASH_INDEX(spa, dva, birth) \
650 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
651#define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
652#define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
653#define HDR_LOCK(hdr) \
654 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
655
656uint64_t zfs_crc64_table[256];
657
658/*
659 * Level 2 ARC
660 */
661
662#define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
663#define L2ARC_HEADROOM 2 /* num of writes */
664/*
665 * If we discover during ARC scan any buffers to be compressed, we boost
666 * our headroom for the next scanning cycle by this percentage multiple.
667 */
668#define L2ARC_HEADROOM_BOOST 200
669#define L2ARC_FEED_SECS 1 /* caching interval secs */
670#define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */
671
672#define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent)
673#define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done)
674
675/* L2ARC Performance Tunables */
676uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */
677uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */
678uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */
679uint64_t l2arc_headroom_boost = L2ARC_HEADROOM_BOOST;
680uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */
681uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */
682boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
683boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */
684boolean_t l2arc_norw = B_TRUE; /* no reads during writes */
685
686SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max, CTLFLAG_RW,
687 &l2arc_write_max, 0, "max write size");
688SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_boost, CTLFLAG_RW,
689 &l2arc_write_boost, 0, "extra write during warmup");
690SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom, CTLFLAG_RW,
691 &l2arc_headroom, 0, "number of dev writes");
692SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_secs, CTLFLAG_RW,
693 &l2arc_feed_secs, 0, "interval seconds");
694SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_min_ms, CTLFLAG_RW,
695 &l2arc_feed_min_ms, 0, "min interval milliseconds");
696
697SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_noprefetch, CTLFLAG_RW,
698 &l2arc_noprefetch, 0, "don't cache prefetch bufs");
699SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_feed_again, CTLFLAG_RW,
700 &l2arc_feed_again, 0, "turbo warmup");
701SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw, CTLFLAG_RW,
702 &l2arc_norw, 0, "no reads during writes");
703
704SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_size, CTLFLAG_RD,
705 &ARC_anon.arcs_size, 0, "size of anonymous state");
706SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_metadata_lsize, CTLFLAG_RD,
707 &ARC_anon.arcs_lsize[ARC_BUFC_METADATA], 0, "size of anonymous state");
708SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_data_lsize, CTLFLAG_RD,
709 &ARC_anon.arcs_lsize[ARC_BUFC_DATA], 0, "size of anonymous state");
710
711SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_size, CTLFLAG_RD,
712 &ARC_mru.arcs_size, 0, "size of mru state");
713SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_metadata_lsize, CTLFLAG_RD,
714 &ARC_mru.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mru state");
715SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_data_lsize, CTLFLAG_RD,
716 &ARC_mru.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mru state");
717
718SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_size, CTLFLAG_RD,
719 &ARC_mru_ghost.arcs_size, 0, "size of mru ghost state");
720SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_metadata_lsize, CTLFLAG_RD,
721 &ARC_mru_ghost.arcs_lsize[ARC_BUFC_METADATA], 0,
722 "size of metadata in mru ghost state");
723SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_data_lsize, CTLFLAG_RD,
724 &ARC_mru_ghost.arcs_lsize[ARC_BUFC_DATA], 0,
725 "size of data in mru ghost state");
726
727SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_size, CTLFLAG_RD,
728 &ARC_mfu.arcs_size, 0, "size of mfu state");
729SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_metadata_lsize, CTLFLAG_RD,
730 &ARC_mfu.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mfu state");
731SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_data_lsize, CTLFLAG_RD,
732 &ARC_mfu.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mfu state");
733
734SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_size, CTLFLAG_RD,
735 &ARC_mfu_ghost.arcs_size, 0, "size of mfu ghost state");
736SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_metadata_lsize, CTLFLAG_RD,
737 &ARC_mfu_ghost.arcs_lsize[ARC_BUFC_METADATA], 0,
738 "size of metadata in mfu ghost state");
739SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_data_lsize, CTLFLAG_RD,
740 &ARC_mfu_ghost.arcs_lsize[ARC_BUFC_DATA], 0,
741 "size of data in mfu ghost state");
742
743SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2c_only_size, CTLFLAG_RD,
744 &ARC_l2c_only.arcs_size, 0, "size of mru state");
745
746/*
747 * L2ARC Internals
748 */
749typedef struct l2arc_dev {
750 vdev_t *l2ad_vdev; /* vdev */
751 spa_t *l2ad_spa; /* spa */
752 uint64_t l2ad_hand; /* next write location */
753 uint64_t l2ad_start; /* first addr on device */
754 uint64_t l2ad_end; /* last addr on device */
755 uint64_t l2ad_evict; /* last addr eviction reached */
756 boolean_t l2ad_first; /* first sweep through */
757 boolean_t l2ad_writing; /* currently writing */
758 list_t *l2ad_buflist; /* buffer list */
759 list_node_t l2ad_node; /* device list node */
760} l2arc_dev_t;
761
762static list_t L2ARC_dev_list; /* device list */
763static list_t *l2arc_dev_list; /* device list pointer */
764static kmutex_t l2arc_dev_mtx; /* device list mutex */
765static l2arc_dev_t *l2arc_dev_last; /* last device used */
766static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */
767static list_t L2ARC_free_on_write; /* free after write buf list */
768static list_t *l2arc_free_on_write; /* free after write list ptr */
769static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */
770static uint64_t l2arc_ndev; /* number of devices */
771
772typedef struct l2arc_read_callback {
773 arc_buf_t *l2rcb_buf; /* read buffer */
774 spa_t *l2rcb_spa; /* spa */
775 blkptr_t l2rcb_bp; /* original blkptr */
776 zbookmark_t l2rcb_zb; /* original bookmark */
777 int l2rcb_flags; /* original flags */
778 enum zio_compress l2rcb_compress; /* applied compress */
779} l2arc_read_callback_t;
780
781typedef struct l2arc_write_callback {
782 l2arc_dev_t *l2wcb_dev; /* device info */
783 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */
784} l2arc_write_callback_t;
785
786struct l2arc_buf_hdr {
787 /* protected by arc_buf_hdr mutex */
788 l2arc_dev_t *b_dev; /* L2ARC device */
789 uint64_t b_daddr; /* disk address, offset byte */
790 /* compression applied to buffer data */
791 enum zio_compress b_compress;
792 /* real alloc'd buffer size depending on b_compress applied */
793 int b_asize;
794 /* temporary buffer holder for in-flight compressed data */
795 void *b_tmp_cdata;
796};
797
798typedef struct l2arc_data_free {
799 /* protected by l2arc_free_on_write_mtx */
800 void *l2df_data;
801 size_t l2df_size;
802 void (*l2df_func)(void *, size_t);
803 list_node_t l2df_list_node;
804} l2arc_data_free_t;
805
806static kmutex_t l2arc_feed_thr_lock;
807static kcondvar_t l2arc_feed_thr_cv;
808static uint8_t l2arc_thread_exit;
809
810static void l2arc_read_done(zio_t *zio);
811static void l2arc_hdr_stat_add(void);
812static void l2arc_hdr_stat_remove(void);
813
814static boolean_t l2arc_compress_buf(l2arc_buf_hdr_t *l2hdr);
815static void l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr,
816 enum zio_compress c);
817static void l2arc_release_cdata_buf(arc_buf_hdr_t *ab);
818
819static uint64_t
820buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
821{
822 uint8_t *vdva = (uint8_t *)dva;
823 uint64_t crc = -1ULL;
824 int i;
825
826 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
827
828 for (i = 0; i < sizeof (dva_t); i++)
829 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
830
831 crc ^= (spa>>8) ^ birth;
832
833 return (crc);
834}
835
836#define BUF_EMPTY(buf) \
837 ((buf)->b_dva.dva_word[0] == 0 && \
838 (buf)->b_dva.dva_word[1] == 0 && \
839 (buf)->b_birth == 0)
840
841#define BUF_EQUAL(spa, dva, birth, buf) \
842 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
843 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
844 ((buf)->b_birth == birth) && ((buf)->b_spa == spa)
845
846static void
847buf_discard_identity(arc_buf_hdr_t *hdr)
848{
849 hdr->b_dva.dva_word[0] = 0;
850 hdr->b_dva.dva_word[1] = 0;
851 hdr->b_birth = 0;
852 hdr->b_cksum0 = 0;
853}
854
855static arc_buf_hdr_t *
856buf_hash_find(uint64_t spa, const dva_t *dva, uint64_t birth, kmutex_t **lockp)
857{
858 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
859 kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
860 arc_buf_hdr_t *buf;
861
862 mutex_enter(hash_lock);
863 for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
864 buf = buf->b_hash_next) {
865 if (BUF_EQUAL(spa, dva, birth, buf)) {
866 *lockp = hash_lock;
867 return (buf);
868 }
869 }
870 mutex_exit(hash_lock);
871 *lockp = NULL;
872 return (NULL);
873}
874
875/*
876 * Insert an entry into the hash table. If there is already an element
877 * equal to elem in the hash table, then the already existing element
878 * will be returned and the new element will not be inserted.
879 * Otherwise returns NULL.
880 */
881static arc_buf_hdr_t *
882buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
883{
884 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
885 kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
886 arc_buf_hdr_t *fbuf;
887 uint32_t i;
888
889 ASSERT(!HDR_IN_HASH_TABLE(buf));
890 *lockp = hash_lock;
891 mutex_enter(hash_lock);
892 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
893 fbuf = fbuf->b_hash_next, i++) {
894 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
895 return (fbuf);
896 }
897
898 buf->b_hash_next = buf_hash_table.ht_table[idx];
899 buf_hash_table.ht_table[idx] = buf;
900 buf->b_flags |= ARC_IN_HASH_TABLE;
901
902 /* collect some hash table performance data */
903 if (i > 0) {
904 ARCSTAT_BUMP(arcstat_hash_collisions);
905 if (i == 1)
906 ARCSTAT_BUMP(arcstat_hash_chains);
907
908 ARCSTAT_MAX(arcstat_hash_chain_max, i);
909 }
910
911 ARCSTAT_BUMP(arcstat_hash_elements);
912 ARCSTAT_MAXSTAT(arcstat_hash_elements);
913
914 return (NULL);
915}
916
917static void
918buf_hash_remove(arc_buf_hdr_t *buf)
919{
920 arc_buf_hdr_t *fbuf, **bufp;
921 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
922
923 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
924 ASSERT(HDR_IN_HASH_TABLE(buf));
925
926 bufp = &buf_hash_table.ht_table[idx];
927 while ((fbuf = *bufp) != buf) {
928 ASSERT(fbuf != NULL);
929 bufp = &fbuf->b_hash_next;
930 }
931 *bufp = buf->b_hash_next;
932 buf->b_hash_next = NULL;
933 buf->b_flags &= ~ARC_IN_HASH_TABLE;
934
935 /* collect some hash table performance data */
936 ARCSTAT_BUMPDOWN(arcstat_hash_elements);
937
938 if (buf_hash_table.ht_table[idx] &&
939 buf_hash_table.ht_table[idx]->b_hash_next == NULL)
940 ARCSTAT_BUMPDOWN(arcstat_hash_chains);
941}
942
943/*
944 * Global data structures and functions for the buf kmem cache.
945 */
946static kmem_cache_t *hdr_cache;
947static kmem_cache_t *buf_cache;
948
949static void
950buf_fini(void)
951{
952 int i;
953
954 kmem_free(buf_hash_table.ht_table,
955 (buf_hash_table.ht_mask + 1) * sizeof (void *));
956 for (i = 0; i < BUF_LOCKS; i++)
957 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
958 kmem_cache_destroy(hdr_cache);
959 kmem_cache_destroy(buf_cache);
960}
961
962/*
963 * Constructor callback - called when the cache is empty
964 * and a new buf is requested.
965 */
966/* ARGSUSED */
967static int
968hdr_cons(void *vbuf, void *unused, int kmflag)
969{
970 arc_buf_hdr_t *buf = vbuf;
971
972 bzero(buf, sizeof (arc_buf_hdr_t));
973 refcount_create(&buf->b_refcnt);
974 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
975 mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
976 arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
977
978 return (0);
979}
980
981/* ARGSUSED */
982static int
983buf_cons(void *vbuf, void *unused, int kmflag)
984{
985 arc_buf_t *buf = vbuf;
986
987 bzero(buf, sizeof (arc_buf_t));
988 mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL);
989 arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
990
991 return (0);
992}
993
994/*
995 * Destructor callback - called when a cached buf is
996 * no longer required.
997 */
998/* ARGSUSED */
999static void
1000hdr_dest(void *vbuf, void *unused)
1001{
1002 arc_buf_hdr_t *buf = vbuf;
1003
1004 ASSERT(BUF_EMPTY(buf));
1005 refcount_destroy(&buf->b_refcnt);
1006 cv_destroy(&buf->b_cv);
1007 mutex_destroy(&buf->b_freeze_lock);
1008 arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
1009}
1010
1011/* ARGSUSED */
1012static void
1013buf_dest(void *vbuf, void *unused)
1014{
1015 arc_buf_t *buf = vbuf;
1016
1017 mutex_destroy(&buf->b_evict_lock);
1018 arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
1019}
1020
1021/*
1022 * Reclaim callback -- invoked when memory is low.
1023 */
1024/* ARGSUSED */
1025static void
1026hdr_recl(void *unused)
1027{
1028 dprintf("hdr_recl called\n");
1029 /*
1030 * umem calls the reclaim func when we destroy the buf cache,
1031 * which is after we do arc_fini().
1032 */
1033 if (!arc_dead)
1034 cv_signal(&arc_reclaim_thr_cv);
1035}
1036
1037static void
1038buf_init(void)
1039{
1040 uint64_t *ct;
1041 uint64_t hsize = 1ULL << 12;
1042 int i, j;
1043
1044 /*
1045 * The hash table is big enough to fill all of physical memory
1046 * with an average 64K block size. The table will take up
1047 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers).
1048 */
1049 while (hsize * 65536 < (uint64_t)physmem * PAGESIZE)
1050 hsize <<= 1;
1051retry:
1052 buf_hash_table.ht_mask = hsize - 1;
1053 buf_hash_table.ht_table =
1054 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
1055 if (buf_hash_table.ht_table == NULL) {
1056 ASSERT(hsize > (1ULL << 8));
1057 hsize >>= 1;
1058 goto retry;
1059 }
1060
1061 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
1062 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
1063 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
1064 0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
1065
1066 for (i = 0; i < 256; i++)
1067 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
1068 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
1069
1070 for (i = 0; i < BUF_LOCKS; i++) {
1071 mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
1072 NULL, MUTEX_DEFAULT, NULL);
1073 }
1074}
1075
1076#define ARC_MINTIME (hz>>4) /* 62 ms */
1077
1078static void
1079arc_cksum_verify(arc_buf_t *buf)
1080{
1081 zio_cksum_t zc;
1082
1083 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
1084 return;
1085
1086 mutex_enter(&buf->b_hdr->b_freeze_lock);
1087 if (buf->b_hdr->b_freeze_cksum == NULL ||
1088 (buf->b_hdr->b_flags & ARC_IO_ERROR)) {
1089 mutex_exit(&buf->b_hdr->b_freeze_lock);
1090 return;
1091 }
1092 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
1093 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc))
1094 panic("buffer modified while frozen!");
1095 mutex_exit(&buf->b_hdr->b_freeze_lock);
1096}
1097
1098static int
1099arc_cksum_equal(arc_buf_t *buf)
1100{
1101 zio_cksum_t zc;
1102 int equal;
1103
1104 mutex_enter(&buf->b_hdr->b_freeze_lock);
1105 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
1106 equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc);
1107 mutex_exit(&buf->b_hdr->b_freeze_lock);
1108
1109 return (equal);
1110}
1111
1112static void
1113arc_cksum_compute(arc_buf_t *buf, boolean_t force)
1114{
1115 if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY))
1116 return;
1117
1118 mutex_enter(&buf->b_hdr->b_freeze_lock);
1119 if (buf->b_hdr->b_freeze_cksum != NULL) {
1120 mutex_exit(&buf->b_hdr->b_freeze_lock);
1121 return;
1122 }
1123 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
1124 fletcher_2_native(buf->b_data, buf->b_hdr->b_size,
1125 buf->b_hdr->b_freeze_cksum);
1126 mutex_exit(&buf->b_hdr->b_freeze_lock);
1127#ifdef illumos
1128 arc_buf_watch(buf);
1129#endif /* illumos */
1130}
1131
1132#ifdef illumos
1133#ifndef _KERNEL
1134typedef struct procctl {
1135 long cmd;
1136 prwatch_t prwatch;
1137} procctl_t;
1138#endif
1139
1140/* ARGSUSED */
1141static void
1142arc_buf_unwatch(arc_buf_t *buf)
1143{
1144#ifndef _KERNEL
1145 if (arc_watch) {
1146 int result;
1147 procctl_t ctl;
1148 ctl.cmd = PCWATCH;
1149 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
1150 ctl.prwatch.pr_size = 0;
1151 ctl.prwatch.pr_wflags = 0;
1152 result = write(arc_procfd, &ctl, sizeof (ctl));
1153 ASSERT3U(result, ==, sizeof (ctl));
1154 }
1155#endif
1156}
1157
1158/* ARGSUSED */
1159static void
1160arc_buf_watch(arc_buf_t *buf)
1161{
1162#ifndef _KERNEL
1163 if (arc_watch) {
1164 int result;
1165 procctl_t ctl;
1166 ctl.cmd = PCWATCH;
1167 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
1168 ctl.prwatch.pr_size = buf->b_hdr->b_size;
1169 ctl.prwatch.pr_wflags = WA_WRITE;
1170 result = write(arc_procfd, &ctl, sizeof (ctl));
1171 ASSERT3U(result, ==, sizeof (ctl));
1172 }
1173#endif
1174}
1175#endif /* illumos */
1176
1177void
1178arc_buf_thaw(arc_buf_t *buf)
1179{
1180 if (zfs_flags & ZFS_DEBUG_MODIFY) {
1181 if (buf->b_hdr->b_state != arc_anon)
1182 panic("modifying non-anon buffer!");
1183 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS)
1184 panic("modifying buffer while i/o in progress!");
1185 arc_cksum_verify(buf);
1186 }
1187
1188 mutex_enter(&buf->b_hdr->b_freeze_lock);
1189 if (buf->b_hdr->b_freeze_cksum != NULL) {
1190 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1191 buf->b_hdr->b_freeze_cksum = NULL;
1192 }
1193
1194 if (zfs_flags & ZFS_DEBUG_MODIFY) {
1195 if (buf->b_hdr->b_thawed)
1196 kmem_free(buf->b_hdr->b_thawed, 1);
1197 buf->b_hdr->b_thawed = kmem_alloc(1, KM_SLEEP);
1198 }
1199
1200 mutex_exit(&buf->b_hdr->b_freeze_lock);
1201
1202#ifdef illumos
1203 arc_buf_unwatch(buf);
1204#endif /* illumos */
1205}
1206
1207void
1208arc_buf_freeze(arc_buf_t *buf)
1209{
1210 kmutex_t *hash_lock;
1211
1212 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
1213 return;
1214
1215 hash_lock = HDR_LOCK(buf->b_hdr);
1216 mutex_enter(hash_lock);
1217
1218 ASSERT(buf->b_hdr->b_freeze_cksum != NULL ||
1219 buf->b_hdr->b_state == arc_anon);
1220 arc_cksum_compute(buf, B_FALSE);
1221 mutex_exit(hash_lock);
1222
1223}
1224
1225static void
1226get_buf_info(arc_buf_hdr_t *ab, arc_state_t *state, list_t **list, kmutex_t **lock)
1227{
1228 uint64_t buf_hashid = buf_hash(ab->b_spa, &ab->b_dva, ab->b_birth);
1229
1230 if (ab->b_type == ARC_BUFC_METADATA)
1231 buf_hashid &= (ARC_BUFC_NUMMETADATALISTS - 1);
1232 else {
1233 buf_hashid &= (ARC_BUFC_NUMDATALISTS - 1);
1234 buf_hashid += ARC_BUFC_NUMMETADATALISTS;
1235 }
1236
1237 *list = &state->arcs_lists[buf_hashid];
1238 *lock = ARCS_LOCK(state, buf_hashid);
1239}
1240
1241
1242static void
1243add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
1244{
1245 ASSERT(MUTEX_HELD(hash_lock));
1246
1247 if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
1248 (ab->b_state != arc_anon)) {
1249 uint64_t delta = ab->b_size * ab->b_datacnt;
1250 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type];
1251 list_t *list;
1252 kmutex_t *lock;
1253
1254 get_buf_info(ab, ab->b_state, &list, &lock);
1255 ASSERT(!MUTEX_HELD(lock));
1256 mutex_enter(lock);
1257 ASSERT(list_link_active(&ab->b_arc_node));
1258 list_remove(list, ab);
1259 if (GHOST_STATE(ab->b_state)) {
1260 ASSERT0(ab->b_datacnt);
1261 ASSERT3P(ab->b_buf, ==, NULL);
1262 delta = ab->b_size;
1263 }
1264 ASSERT(delta > 0);
1265 ASSERT3U(*size, >=, delta);
1266 atomic_add_64(size, -delta);
1267 mutex_exit(lock);
1268 /* remove the prefetch flag if we get a reference */
1269 if (ab->b_flags & ARC_PREFETCH)
1270 ab->b_flags &= ~ARC_PREFETCH;
1271 }
1272}
1273
1274static int
1275remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
1276{
1277 int cnt;
1278 arc_state_t *state = ab->b_state;
1279
1280 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
1281 ASSERT(!GHOST_STATE(state));
1282
1283 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
1284 (state != arc_anon)) {
1285 uint64_t *size = &state->arcs_lsize[ab->b_type];
1286 list_t *list;
1287 kmutex_t *lock;
1288
1289 get_buf_info(ab, state, &list, &lock);
1290 ASSERT(!MUTEX_HELD(lock));
1291 mutex_enter(lock);
1292 ASSERT(!list_link_active(&ab->b_arc_node));
1293 list_insert_head(list, ab);
1294 ASSERT(ab->b_datacnt > 0);
1295 atomic_add_64(size, ab->b_size * ab->b_datacnt);
1296 mutex_exit(lock);
1297 }
1298 return (cnt);
1299}
1300
1301/*
1302 * Move the supplied buffer to the indicated state. The mutex
1303 * for the buffer must be held by the caller.
1304 */
1305static void
1306arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
1307{
1308 arc_state_t *old_state = ab->b_state;
1309 int64_t refcnt = refcount_count(&ab->b_refcnt);
1310 uint64_t from_delta, to_delta;
1311 list_t *list;
1312 kmutex_t *lock;
1313
1314 ASSERT(MUTEX_HELD(hash_lock));
1315 ASSERT(new_state != old_state);
1316 ASSERT(refcnt == 0 || ab->b_datacnt > 0);
1317 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
1318 ASSERT(ab->b_datacnt <= 1 || old_state != arc_anon);
1319
1320 from_delta = to_delta = ab->b_datacnt * ab->b_size;
1321
1322 /*
1323 * If this buffer is evictable, transfer it from the
1324 * old state list to the new state list.
1325 */
1326 if (refcnt == 0) {
1327 if (old_state != arc_anon) {
1328 int use_mutex;
1329 uint64_t *size = &old_state->arcs_lsize[ab->b_type];
1330
1331 get_buf_info(ab, old_state, &list, &lock);
1332 use_mutex = !MUTEX_HELD(lock);
1333 if (use_mutex)
1334 mutex_enter(lock);
1335
1336 ASSERT(list_link_active(&ab->b_arc_node));
1337 list_remove(list, ab);
1338
1339 /*
1340 * If prefetching out of the ghost cache,
1341 * we will have a non-zero datacnt.
1342 */
1343 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) {
1344 /* ghost elements have a ghost size */
1345 ASSERT(ab->b_buf == NULL);
1346 from_delta = ab->b_size;
1347 }
1348 ASSERT3U(*size, >=, from_delta);
1349 atomic_add_64(size, -from_delta);
1350
1351 if (use_mutex)
1352 mutex_exit(lock);
1353 }
1354 if (new_state != arc_anon) {
1355 int use_mutex;
1356 uint64_t *size = &new_state->arcs_lsize[ab->b_type];
1357
1358 get_buf_info(ab, new_state, &list, &lock);
1359 use_mutex = !MUTEX_HELD(lock);
1360 if (use_mutex)
1361 mutex_enter(lock);
1362
1363 list_insert_head(list, ab);
1364
1365 /* ghost elements have a ghost size */
1366 if (GHOST_STATE(new_state)) {
1367 ASSERT(ab->b_datacnt == 0);
1368 ASSERT(ab->b_buf == NULL);
1369 to_delta = ab->b_size;
1370 }
1371 atomic_add_64(size, to_delta);
1372
1373 if (use_mutex)
1374 mutex_exit(lock);
1375 }
1376 }
1377
1378 ASSERT(!BUF_EMPTY(ab));
1379 if (new_state == arc_anon && HDR_IN_HASH_TABLE(ab))
1380 buf_hash_remove(ab);
1381
1382 /* adjust state sizes */
1383 if (to_delta)
1384 atomic_add_64(&new_state->arcs_size, to_delta);
1385 if (from_delta) {
1386 ASSERT3U(old_state->arcs_size, >=, from_delta);
1387 atomic_add_64(&old_state->arcs_size, -from_delta);
1388 }
1389 ab->b_state = new_state;
1390
1391 /* adjust l2arc hdr stats */
1392 if (new_state == arc_l2c_only)
1393 l2arc_hdr_stat_add();
1394 else if (old_state == arc_l2c_only)
1395 l2arc_hdr_stat_remove();
1396}
1397
1398void
1399arc_space_consume(uint64_t space, arc_space_type_t type)
1400{
1401 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1402
1403 switch (type) {
1404 case ARC_SPACE_DATA:
1405 ARCSTAT_INCR(arcstat_data_size, space);
1406 break;
1407 case ARC_SPACE_OTHER:
1408 ARCSTAT_INCR(arcstat_other_size, space);
1409 break;
1410 case ARC_SPACE_HDRS:
1411 ARCSTAT_INCR(arcstat_hdr_size, space);
1412 break;
1413 case ARC_SPACE_L2HDRS:
1414 ARCSTAT_INCR(arcstat_l2_hdr_size, space);
1415 break;
1416 }
1417
1418 atomic_add_64(&arc_meta_used, space);
1419 atomic_add_64(&arc_size, space);
1420}
1421
1422void
1423arc_space_return(uint64_t space, arc_space_type_t type)
1424{
1425 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1426
1427 switch (type) {
1428 case ARC_SPACE_DATA:
1429 ARCSTAT_INCR(arcstat_data_size, -space);
1430 break;
1431 case ARC_SPACE_OTHER:
1432 ARCSTAT_INCR(arcstat_other_size, -space);
1433 break;
1434 case ARC_SPACE_HDRS:
1435 ARCSTAT_INCR(arcstat_hdr_size, -space);
1436 break;
1437 case ARC_SPACE_L2HDRS:
1438 ARCSTAT_INCR(arcstat_l2_hdr_size, -space);
1439 break;
1440 }
1441
1442 ASSERT(arc_meta_used >= space);
1443 if (arc_meta_max < arc_meta_used)
1444 arc_meta_max = arc_meta_used;
1445 atomic_add_64(&arc_meta_used, -space);
1446 ASSERT(arc_size >= space);
1447 atomic_add_64(&arc_size, -space);
1448}
1449
1450void *
1451arc_data_buf_alloc(uint64_t size)
1452{
1453 if (arc_evict_needed(ARC_BUFC_DATA))
1454 cv_signal(&arc_reclaim_thr_cv);
1455 atomic_add_64(&arc_size, size);
1456 return (zio_data_buf_alloc(size));
1457}
1458
1459void
1460arc_data_buf_free(void *buf, uint64_t size)
1461{
1462 zio_data_buf_free(buf, size);
1463 ASSERT(arc_size >= size);
1464 atomic_add_64(&arc_size, -size);
1465}
1466
1467arc_buf_t *
1468arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type)
1469{
1470 arc_buf_hdr_t *hdr;
1471 arc_buf_t *buf;
1472
1473 ASSERT3U(size, >, 0);
1474 hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
1475 ASSERT(BUF_EMPTY(hdr));
1476 hdr->b_size = size;
1477 hdr->b_type = type;
1478 hdr->b_spa = spa_load_guid(spa);
1479 hdr->b_state = arc_anon;
1480 hdr->b_arc_access = 0;
1481 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1482 buf->b_hdr = hdr;
1483 buf->b_data = NULL;
1484 buf->b_efunc = NULL;
1485 buf->b_private = NULL;
1486 buf->b_next = NULL;
1487 hdr->b_buf = buf;
1488 arc_get_data_buf(buf);
1489 hdr->b_datacnt = 1;
1490 hdr->b_flags = 0;
1491 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1492 (void) refcount_add(&hdr->b_refcnt, tag);
1493
1494 return (buf);
1495}
1496
1497static char *arc_onloan_tag = "onloan";
1498
1499/*
1500 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in
1501 * flight data by arc_tempreserve_space() until they are "returned". Loaned
1502 * buffers must be returned to the arc before they can be used by the DMU or
1503 * freed.
1504 */
1505arc_buf_t *
1506arc_loan_buf(spa_t *spa, int size)
1507{
1508 arc_buf_t *buf;
1509
1510 buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA);
1511
1512 atomic_add_64(&arc_loaned_bytes, size);
1513 return (buf);
1514}
1515
1516/*
1517 * Return a loaned arc buffer to the arc.
1518 */
1519void
1520arc_return_buf(arc_buf_t *buf, void *tag)
1521{
1522 arc_buf_hdr_t *hdr = buf->b_hdr;
1523
1524 ASSERT(buf->b_data != NULL);
1525 (void) refcount_add(&hdr->b_refcnt, tag);
1526 (void) refcount_remove(&hdr->b_refcnt, arc_onloan_tag);
1527
1528 atomic_add_64(&arc_loaned_bytes, -hdr->b_size);
1529}
1530
1531/* Detach an arc_buf from a dbuf (tag) */
1532void
1533arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
1534{
1535 arc_buf_hdr_t *hdr;
1536
1537 ASSERT(buf->b_data != NULL);
1538 hdr = buf->b_hdr;
1539 (void) refcount_add(&hdr->b_refcnt, arc_onloan_tag);
1540 (void) refcount_remove(&hdr->b_refcnt, tag);
1541 buf->b_efunc = NULL;
1542 buf->b_private = NULL;
1543
1544 atomic_add_64(&arc_loaned_bytes, hdr->b_size);
1545}
1546
1547static arc_buf_t *
1548arc_buf_clone(arc_buf_t *from)
1549{
1550 arc_buf_t *buf;
1551 arc_buf_hdr_t *hdr = from->b_hdr;
1552 uint64_t size = hdr->b_size;
1553
1554 ASSERT(hdr->b_state != arc_anon);
1555
1556 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1557 buf->b_hdr = hdr;
1558 buf->b_data = NULL;
1559 buf->b_efunc = NULL;
1560 buf->b_private = NULL;
1561 buf->b_next = hdr->b_buf;
1562 hdr->b_buf = buf;
1563 arc_get_data_buf(buf);
1564 bcopy(from->b_data, buf->b_data, size);
1565
1566 /*
1567 * This buffer already exists in the arc so create a duplicate
1568 * copy for the caller. If the buffer is associated with user data
1569 * then track the size and number of duplicates. These stats will be
1570 * updated as duplicate buffers are created and destroyed.
1571 */
1572 if (hdr->b_type == ARC_BUFC_DATA) {
1573 ARCSTAT_BUMP(arcstat_duplicate_buffers);
1574 ARCSTAT_INCR(arcstat_duplicate_buffers_size, size);
1575 }
1576 hdr->b_datacnt += 1;
1577 return (buf);
1578}
1579
1580void
1581arc_buf_add_ref(arc_buf_t *buf, void* tag)
1582{
1583 arc_buf_hdr_t *hdr;
1584 kmutex_t *hash_lock;
1585
1586 /*
1587 * Check to see if this buffer is evicted. Callers
1588 * must verify b_data != NULL to know if the add_ref
1589 * was successful.
1590 */
1591 mutex_enter(&buf->b_evict_lock);
1592 if (buf->b_data == NULL) {
1593 mutex_exit(&buf->b_evict_lock);
1594 return;
1595 }
1596 hash_lock = HDR_LOCK(buf->b_hdr);
1597 mutex_enter(hash_lock);
1598 hdr = buf->b_hdr;
1599 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1600 mutex_exit(&buf->b_evict_lock);
1601
1602 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
1603 add_reference(hdr, hash_lock, tag);
1604 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
1605 arc_access(hdr, hash_lock);
1606 mutex_exit(hash_lock);
1607 ARCSTAT_BUMP(arcstat_hits);
1608 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
1609 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
1610 data, metadata, hits);
1611}
1612
1613/*
1614 * Free the arc data buffer. If it is an l2arc write in progress,
1615 * the buffer is placed on l2arc_free_on_write to be freed later.
1616 */
1617static void
1618arc_buf_data_free(arc_buf_t *buf, void (*free_func)(void *, size_t))
1619{
1620 arc_buf_hdr_t *hdr = buf->b_hdr;
1621
1622 if (HDR_L2_WRITING(hdr)) {
1623 l2arc_data_free_t *df;
1624 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP);
1625 df->l2df_data = buf->b_data;
1626 df->l2df_size = hdr->b_size;
1627 df->l2df_func = free_func;
1628 mutex_enter(&l2arc_free_on_write_mtx);
1629 list_insert_head(l2arc_free_on_write, df);
1630 mutex_exit(&l2arc_free_on_write_mtx);
1631 ARCSTAT_BUMP(arcstat_l2_free_on_write);
1632 } else {
1633 free_func(buf->b_data, hdr->b_size);
1634 }
1635}
1636
1637static void
1638arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all)
1639{
1640 arc_buf_t **bufp;
1641
1642 /* free up data associated with the buf */
1643 if (buf->b_data) {
1644 arc_state_t *state = buf->b_hdr->b_state;
1645 uint64_t size = buf->b_hdr->b_size;
1646 arc_buf_contents_t type = buf->b_hdr->b_type;
1647
1648 arc_cksum_verify(buf);
1649#ifdef illumos
1650 arc_buf_unwatch(buf);
1651#endif /* illumos */
1652
1653 if (!recycle) {
1654 if (type == ARC_BUFC_METADATA) {
1655 arc_buf_data_free(buf, zio_buf_free);
1656 arc_space_return(size, ARC_SPACE_DATA);
1657 } else {
1658 ASSERT(type == ARC_BUFC_DATA);
1659 arc_buf_data_free(buf, zio_data_buf_free);
1660 ARCSTAT_INCR(arcstat_data_size, -size);
1661 atomic_add_64(&arc_size, -size);
1662 }
1663 }
1664 if (list_link_active(&buf->b_hdr->b_arc_node)) {
1665 uint64_t *cnt = &state->arcs_lsize[type];
1666
1667 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt));
1668 ASSERT(state != arc_anon);
1669
1670 ASSERT3U(*cnt, >=, size);
1671 atomic_add_64(cnt, -size);
1672 }
1673 ASSERT3U(state->arcs_size, >=, size);
1674 atomic_add_64(&state->arcs_size, -size);
1675 buf->b_data = NULL;
1676
1677 /*
1678 * If we're destroying a duplicate buffer make sure
1679 * that the appropriate statistics are updated.
1680 */
1681 if (buf->b_hdr->b_datacnt > 1 &&
1682 buf->b_hdr->b_type == ARC_BUFC_DATA) {
1683 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
1684 ARCSTAT_INCR(arcstat_duplicate_buffers_size, -size);
1685 }
1686 ASSERT(buf->b_hdr->b_datacnt > 0);
1687 buf->b_hdr->b_datacnt -= 1;
1688 }
1689
1690 /* only remove the buf if requested */
1691 if (!all)
1692 return;
1693
1694 /* remove the buf from the hdr list */
1695 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next)
1696 continue;
1697 *bufp = buf->b_next;
1698 buf->b_next = NULL;
1699
1700 ASSERT(buf->b_efunc == NULL);
1701
1702 /* clean up the buf */
1703 buf->b_hdr = NULL;
1704 kmem_cache_free(buf_cache, buf);
1705}
1706
1707static void
1708arc_hdr_destroy(arc_buf_hdr_t *hdr)
1709{
1710 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1711 ASSERT3P(hdr->b_state, ==, arc_anon);
1712 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
1713 l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr;
1714
1715 if (l2hdr != NULL) {
1716 boolean_t buflist_held = MUTEX_HELD(&l2arc_buflist_mtx);
1717 /*
1718 * To prevent arc_free() and l2arc_evict() from
1719 * attempting to free the same buffer at the same time,
1720 * a FREE_IN_PROGRESS flag is given to arc_free() to
1721 * give it priority. l2arc_evict() can't destroy this
1722 * header while we are waiting on l2arc_buflist_mtx.
1723 *
1724 * The hdr may be removed from l2ad_buflist before we
1725 * grab l2arc_buflist_mtx, so b_l2hdr is rechecked.
1726 */
1727 if (!buflist_held) {
1728 mutex_enter(&l2arc_buflist_mtx);
1729 l2hdr = hdr->b_l2hdr;
1730 }
1731
1732 if (l2hdr != NULL) {
1733 trim_map_free(l2hdr->b_dev->l2ad_vdev, l2hdr->b_daddr,
1734 hdr->b_size, 0);
1735 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
1736 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
1737 ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
1738 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
1739 if (hdr->b_state == arc_l2c_only)
1740 l2arc_hdr_stat_remove();
1741 hdr->b_l2hdr = NULL;
1742 }
1743
1744 if (!buflist_held)
1745 mutex_exit(&l2arc_buflist_mtx);
1746 }
1747
1748 if (!BUF_EMPTY(hdr)) {
1749 ASSERT(!HDR_IN_HASH_TABLE(hdr));
1750 buf_discard_identity(hdr);
1751 }
1752 while (hdr->b_buf) {
1753 arc_buf_t *buf = hdr->b_buf;
1754
1755 if (buf->b_efunc) {
1756 mutex_enter(&arc_eviction_mtx);
1757 mutex_enter(&buf->b_evict_lock);
1758 ASSERT(buf->b_hdr != NULL);
1759 arc_buf_destroy(hdr->b_buf, FALSE, FALSE);
1760 hdr->b_buf = buf->b_next;
1761 buf->b_hdr = &arc_eviction_hdr;
1762 buf->b_next = arc_eviction_list;
1763 arc_eviction_list = buf;
1764 mutex_exit(&buf->b_evict_lock);
1765 mutex_exit(&arc_eviction_mtx);
1766 } else {
1767 arc_buf_destroy(hdr->b_buf, FALSE, TRUE);
1768 }
1769 }
1770 if (hdr->b_freeze_cksum != NULL) {
1771 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1772 hdr->b_freeze_cksum = NULL;
1773 }
1774 if (hdr->b_thawed) {
1775 kmem_free(hdr->b_thawed, 1);
1776 hdr->b_thawed = NULL;
1777 }
1778
1779 ASSERT(!list_link_active(&hdr->b_arc_node));
1780 ASSERT3P(hdr->b_hash_next, ==, NULL);
1781 ASSERT3P(hdr->b_acb, ==, NULL);
1782 kmem_cache_free(hdr_cache, hdr);
1783}
1784
1785void
1786arc_buf_free(arc_buf_t *buf, void *tag)
1787{
1788 arc_buf_hdr_t *hdr = buf->b_hdr;
1789 int hashed = hdr->b_state != arc_anon;
1790
1791 ASSERT(buf->b_efunc == NULL);
1792 ASSERT(buf->b_data != NULL);
1793
1794 if (hashed) {
1795 kmutex_t *hash_lock = HDR_LOCK(hdr);
1796
1797 mutex_enter(hash_lock);
1798 hdr = buf->b_hdr;
1799 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1800
1801 (void) remove_reference(hdr, hash_lock, tag);
1802 if (hdr->b_datacnt > 1) {
1803 arc_buf_destroy(buf, FALSE, TRUE);
1804 } else {
1805 ASSERT(buf == hdr->b_buf);
1806 ASSERT(buf->b_efunc == NULL);
1807 hdr->b_flags |= ARC_BUF_AVAILABLE;
1808 }
1809 mutex_exit(hash_lock);
1810 } else if (HDR_IO_IN_PROGRESS(hdr)) {
1811 int destroy_hdr;
1812 /*
1813 * We are in the middle of an async write. Don't destroy
1814 * this buffer unless the write completes before we finish
1815 * decrementing the reference count.
1816 */
1817 mutex_enter(&arc_eviction_mtx);
1818 (void) remove_reference(hdr, NULL, tag);
1819 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1820 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
1821 mutex_exit(&arc_eviction_mtx);
1822 if (destroy_hdr)
1823 arc_hdr_destroy(hdr);
1824 } else {
1825 if (remove_reference(hdr, NULL, tag) > 0)
1826 arc_buf_destroy(buf, FALSE, TRUE);
1827 else
1828 arc_hdr_destroy(hdr);
1829 }
1830}
1831
1832boolean_t
1833arc_buf_remove_ref(arc_buf_t *buf, void* tag)
1834{
1835 arc_buf_hdr_t *hdr = buf->b_hdr;
1836 kmutex_t *hash_lock = HDR_LOCK(hdr);
1837 boolean_t no_callback = (buf->b_efunc == NULL);
1838
1839 if (hdr->b_state == arc_anon) {
1840 ASSERT(hdr->b_datacnt == 1);
1841 arc_buf_free(buf, tag);
1842 return (no_callback);
1843 }
1844
1845 mutex_enter(hash_lock);
1846 hdr = buf->b_hdr;
1847 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1848 ASSERT(hdr->b_state != arc_anon);
1849 ASSERT(buf->b_data != NULL);
1850
1851 (void) remove_reference(hdr, hash_lock, tag);
1852 if (hdr->b_datacnt > 1) {
1853 if (no_callback)
1854 arc_buf_destroy(buf, FALSE, TRUE);
1855 } else if (no_callback) {
1856 ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
1857 ASSERT(buf->b_efunc == NULL);
1858 hdr->b_flags |= ARC_BUF_AVAILABLE;
1859 }
1860 ASSERT(no_callback || hdr->b_datacnt > 1 ||
1861 refcount_is_zero(&hdr->b_refcnt));
1862 mutex_exit(hash_lock);
1863 return (no_callback);
1864}
1865
1866int
1867arc_buf_size(arc_buf_t *buf)
1868{
1869 return (buf->b_hdr->b_size);
1870}
1871
1872/*
1873 * Called from the DMU to determine if the current buffer should be
1874 * evicted. In order to ensure proper locking, the eviction must be initiated
1875 * from the DMU. Return true if the buffer is associated with user data and
1876 * duplicate buffers still exist.
1877 */
1878boolean_t
1879arc_buf_eviction_needed(arc_buf_t *buf)
1880{
1881 arc_buf_hdr_t *hdr;
1882 boolean_t evict_needed = B_FALSE;
1883
1884 if (zfs_disable_dup_eviction)
1885 return (B_FALSE);
1886
1887 mutex_enter(&buf->b_evict_lock);
1888 hdr = buf->b_hdr;
1889 if (hdr == NULL) {
1890 /*
1891 * We are in arc_do_user_evicts(); let that function
1892 * perform the eviction.
1893 */
1894 ASSERT(buf->b_data == NULL);
1895 mutex_exit(&buf->b_evict_lock);
1896 return (B_FALSE);
1897 } else if (buf->b_data == NULL) {
1898 /*
1899 * We have already been added to the arc eviction list;
1900 * recommend eviction.
1901 */
1902 ASSERT3P(hdr, ==, &arc_eviction_hdr);
1903 mutex_exit(&buf->b_evict_lock);
1904 return (B_TRUE);
1905 }
1906
1907 if (hdr->b_datacnt > 1 && hdr->b_type == ARC_BUFC_DATA)
1908 evict_needed = B_TRUE;
1909
1910 mutex_exit(&buf->b_evict_lock);
1911 return (evict_needed);
1912}
1913
1914/*
1915 * Evict buffers from list until we've removed the specified number of
1916 * bytes. Move the removed buffers to the appropriate evict state.
1917 * If the recycle flag is set, then attempt to "recycle" a buffer:
1918 * - look for a buffer to evict that is `bytes' long.
1919 * - return the data block from this buffer rather than freeing it.
1920 * This flag is used by callers that are trying to make space for a
1921 * new buffer in a full arc cache.
1922 *
1923 * This function makes a "best effort". It skips over any buffers
1924 * it can't get a hash_lock on, and so may not catch all candidates.
1925 * It may also return without evicting as much space as requested.
1926 */
1927static void *
1928arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle,
1929 arc_buf_contents_t type)
1930{
1931 arc_state_t *evicted_state;
1932 uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
1933 int64_t bytes_remaining;
1934 arc_buf_hdr_t *ab, *ab_prev = NULL;
1935 list_t *evicted_list, *list, *evicted_list_start, *list_start;
1936 kmutex_t *lock, *evicted_lock;
1937 kmutex_t *hash_lock;
1938 boolean_t have_lock;
1939 void *stolen = NULL;
1940 static int evict_metadata_offset, evict_data_offset;
1941 int i, idx, offset, list_count, count;
1942
1943 ASSERT(state == arc_mru || state == arc_mfu);
1944
1945 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
1946
1947 if (type == ARC_BUFC_METADATA) {
1948 offset = 0;
1949 list_count = ARC_BUFC_NUMMETADATALISTS;
1950 list_start = &state->arcs_lists[0];
1951 evicted_list_start = &evicted_state->arcs_lists[0];
1952 idx = evict_metadata_offset;
1953 } else {
1954 offset = ARC_BUFC_NUMMETADATALISTS;
1955 list_start = &state->arcs_lists[offset];
1956 evicted_list_start = &evicted_state->arcs_lists[offset];
1957 list_count = ARC_BUFC_NUMDATALISTS;
1958 idx = evict_data_offset;
1959 }
1960 bytes_remaining = evicted_state->arcs_lsize[type];
1961 count = 0;
1962
1963evict_start:
1964 list = &list_start[idx];
1965 evicted_list = &evicted_list_start[idx];
1966 lock = ARCS_LOCK(state, (offset + idx));
1967 evicted_lock = ARCS_LOCK(evicted_state, (offset + idx));
1968
1969 mutex_enter(lock);
1970 mutex_enter(evicted_lock);
1971
1972 for (ab = list_tail(list); ab; ab = ab_prev) {
1973 ab_prev = list_prev(list, ab);
1974 bytes_remaining -= (ab->b_size * ab->b_datacnt);
1975 /* prefetch buffers have a minimum lifespan */
1976 if (HDR_IO_IN_PROGRESS(ab) ||
1977 (spa && ab->b_spa != spa) ||
1978 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
1979 ddi_get_lbolt() - ab->b_arc_access <
1980 arc_min_prefetch_lifespan)) {
1981 skipped++;
1982 continue;
1983 }
1984 /* "lookahead" for better eviction candidate */
1985 if (recycle && ab->b_size != bytes &&
1986 ab_prev && ab_prev->b_size == bytes)
1987 continue;
1988 hash_lock = HDR_LOCK(ab);
1989 have_lock = MUTEX_HELD(hash_lock);
1990 if (have_lock || mutex_tryenter(hash_lock)) {
1991 ASSERT0(refcount_count(&ab->b_refcnt));
1992 ASSERT(ab->b_datacnt > 0);
1993 while (ab->b_buf) {
1994 arc_buf_t *buf = ab->b_buf;
1995 if (!mutex_tryenter(&buf->b_evict_lock)) {
1996 missed += 1;
1997 break;
1998 }
1999 if (buf->b_data) {
2000 bytes_evicted += ab->b_size;
2001 if (recycle && ab->b_type == type &&
2002 ab->b_size == bytes &&
2003 !HDR_L2_WRITING(ab)) {
2004 stolen = buf->b_data;
2005 recycle = FALSE;
2006 }
2007 }
2008 if (buf->b_efunc) {
2009 mutex_enter(&arc_eviction_mtx);
2010 arc_buf_destroy(buf,
2011 buf->b_data == stolen, FALSE);
2012 ab->b_buf = buf->b_next;
2013 buf->b_hdr = &arc_eviction_hdr;
2014 buf->b_next = arc_eviction_list;
2015 arc_eviction_list = buf;
2016 mutex_exit(&arc_eviction_mtx);
2017 mutex_exit(&buf->b_evict_lock);
2018 } else {
2019 mutex_exit(&buf->b_evict_lock);
2020 arc_buf_destroy(buf,
2021 buf->b_data == stolen, TRUE);
2022 }
2023 }
2024
2025 if (ab->b_l2hdr) {
2026 ARCSTAT_INCR(arcstat_evict_l2_cached,
2027 ab->b_size);
2028 } else {
2029 if (l2arc_write_eligible(ab->b_spa, ab)) {
2030 ARCSTAT_INCR(arcstat_evict_l2_eligible,
2031 ab->b_size);
2032 } else {
2033 ARCSTAT_INCR(
2034 arcstat_evict_l2_ineligible,
2035 ab->b_size);
2036 }
2037 }
2038
2039 if (ab->b_datacnt == 0) {
2040 arc_change_state(evicted_state, ab, hash_lock);
2041 ASSERT(HDR_IN_HASH_TABLE(ab));
2042 ab->b_flags |= ARC_IN_HASH_TABLE;
2043 ab->b_flags &= ~ARC_BUF_AVAILABLE;
2044 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
2045 }
2046 if (!have_lock)
2047 mutex_exit(hash_lock);
2048 if (bytes >= 0 && bytes_evicted >= bytes)
2049 break;
2050 if (bytes_remaining > 0) {
2051 mutex_exit(evicted_lock);
2052 mutex_exit(lock);
2053 idx = ((idx + 1) & (list_count - 1));
2054 count++;
2055 goto evict_start;
2056 }
2057 } else {
2058 missed += 1;
2059 }
2060 }
2061
2062 mutex_exit(evicted_lock);
2063 mutex_exit(lock);
2064
2065 idx = ((idx + 1) & (list_count - 1));
2066 count++;
2067
2068 if (bytes_evicted < bytes) {
2069 if (count < list_count)
2070 goto evict_start;
2071 else
2072 dprintf("only evicted %lld bytes from %x",
2073 (longlong_t)bytes_evicted, state);
2074 }
2075 if (type == ARC_BUFC_METADATA)
2076 evict_metadata_offset = idx;
2077 else
2078 evict_data_offset = idx;
2079
2080 if (skipped)
2081 ARCSTAT_INCR(arcstat_evict_skip, skipped);
2082
2083 if (missed)
2084 ARCSTAT_INCR(arcstat_mutex_miss, missed);
2085
2086 /*
2087 * We have just evicted some data into the ghost state, make
2088 * sure we also adjust the ghost state size if necessary.
2089 */
2090 if (arc_no_grow &&
2091 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size > arc_c) {
2092 int64_t mru_over = arc_anon->arcs_size + arc_mru->arcs_size +
2093 arc_mru_ghost->arcs_size - arc_c;
2094
2095 if (mru_over > 0 && arc_mru_ghost->arcs_lsize[type] > 0) {
2096 int64_t todelete =
2097 MIN(arc_mru_ghost->arcs_lsize[type], mru_over);
2098 arc_evict_ghost(arc_mru_ghost, 0, todelete);
2099 } else if (arc_mfu_ghost->arcs_lsize[type] > 0) {
2100 int64_t todelete = MIN(arc_mfu_ghost->arcs_lsize[type],
2101 arc_mru_ghost->arcs_size +
2102 arc_mfu_ghost->arcs_size - arc_c);
2103 arc_evict_ghost(arc_mfu_ghost, 0, todelete);
2104 }
2105 }
2106 if (stolen)
2107 ARCSTAT_BUMP(arcstat_stolen);
2108
2109 return (stolen);
2110}
2111
2112/*
2113 * Remove buffers from list until we've removed the specified number of
2114 * bytes. Destroy the buffers that are removed.
2115 */
2116static void
2117arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes)
2118{
2119 arc_buf_hdr_t *ab, *ab_prev;
2120 arc_buf_hdr_t marker = { 0 };
2121 list_t *list, *list_start;
2122 kmutex_t *hash_lock, *lock;
2123 uint64_t bytes_deleted = 0;
2124 uint64_t bufs_skipped = 0;
2125 static int evict_offset;
2126 int list_count, idx = evict_offset;
2127 int offset, count = 0;
2128
2129 ASSERT(GHOST_STATE(state));
2130
2131 /*
2132 * data lists come after metadata lists
2133 */
2134 list_start = &state->arcs_lists[ARC_BUFC_NUMMETADATALISTS];
2135 list_count = ARC_BUFC_NUMDATALISTS;
2136 offset = ARC_BUFC_NUMMETADATALISTS;
2137
2138evict_start:
2139 list = &list_start[idx];
2140 lock = ARCS_LOCK(state, idx + offset);
2141
2142 mutex_enter(lock);
2143 for (ab = list_tail(list); ab; ab = ab_prev) {
2144 ab_prev = list_prev(list, ab);
2145 if (spa && ab->b_spa != spa)
2146 continue;
2147
2148 /* ignore markers */
2149 if (ab->b_spa == 0)
2150 continue;
2151
2152 hash_lock = HDR_LOCK(ab);
2153 /* caller may be trying to modify this buffer, skip it */
2154 if (MUTEX_HELD(hash_lock))
2155 continue;
2156 if (mutex_tryenter(hash_lock)) {
2157 ASSERT(!HDR_IO_IN_PROGRESS(ab));
2158 ASSERT(ab->b_buf == NULL);
2159 ARCSTAT_BUMP(arcstat_deleted);
2160 bytes_deleted += ab->b_size;
2161
2162 if (ab->b_l2hdr != NULL) {
2163 /*
2164 * This buffer is cached on the 2nd Level ARC;
2165 * don't destroy the header.
2166 */
2167 arc_change_state(arc_l2c_only, ab, hash_lock);
2168 mutex_exit(hash_lock);
2169 } else {
2170 arc_change_state(arc_anon, ab, hash_lock);
2171 mutex_exit(hash_lock);
2172 arc_hdr_destroy(ab);
2173 }
2174
2175 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
2176 if (bytes >= 0 && bytes_deleted >= bytes)
2177 break;
2178 } else if (bytes < 0) {
2179 /*
2180 * Insert a list marker and then wait for the
2181 * hash lock to become available. Once its
2182 * available, restart from where we left off.
2183 */
2184 list_insert_after(list, ab, &marker);
2185 mutex_exit(lock);
2186 mutex_enter(hash_lock);
2187 mutex_exit(hash_lock);
2188 mutex_enter(lock);
2189 ab_prev = list_prev(list, &marker);
2190 list_remove(list, &marker);
2191 } else
2192 bufs_skipped += 1;
2193 }
2194 mutex_exit(lock);
2195 idx = ((idx + 1) & (ARC_BUFC_NUMDATALISTS - 1));
2196 count++;
2197
2198 if (count < list_count)
2199 goto evict_start;
2200
2201 evict_offset = idx;
2202 if ((uintptr_t)list > (uintptr_t)&state->arcs_lists[ARC_BUFC_NUMMETADATALISTS] &&
2203 (bytes < 0 || bytes_deleted < bytes)) {
2204 list_start = &state->arcs_lists[0];
2205 list_count = ARC_BUFC_NUMMETADATALISTS;
2206 offset = count = 0;
2207 goto evict_start;
2208 }
2209
2210 if (bufs_skipped) {
2211 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped);
2212 ASSERT(bytes >= 0);
2213 }
2214
2215 if (bytes_deleted < bytes)
2216 dprintf("only deleted %lld bytes from %p",
2217 (longlong_t)bytes_deleted, state);
2218}
2219
2220static void
2221arc_adjust(void)
2222{
2223 int64_t adjustment, delta;
2224
2225 /*
2226 * Adjust MRU size
2227 */
2228
2229 adjustment = MIN((int64_t)(arc_size - arc_c),
2230 (int64_t)(arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used -
2231 arc_p));
2232
2233 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) {
2234 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment);
2235 (void) arc_evict(arc_mru, 0, delta, FALSE, ARC_BUFC_DATA);
2236 adjustment -= delta;
2237 }
2238
2239 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) {
2240 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment);
2241 (void) arc_evict(arc_mru, 0, delta, FALSE,
2242 ARC_BUFC_METADATA);
2243 }
2244
2245 /*
2246 * Adjust MFU size
2247 */
2248
2249 adjustment = arc_size - arc_c;
2250
2251 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) {
2252 delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]);
2253 (void) arc_evict(arc_mfu, 0, delta, FALSE, ARC_BUFC_DATA);
2254 adjustment -= delta;
2255 }
2256
2257 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) {
2258 int64_t delta = MIN(adjustment,
2259 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]);
2260 (void) arc_evict(arc_mfu, 0, delta, FALSE,
2261 ARC_BUFC_METADATA);
2262 }
2263
2264 /*
2265 * Adjust ghost lists
2266 */
2267
2268 adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c;
2269
2270 if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) {
2271 delta = MIN(arc_mru_ghost->arcs_size, adjustment);
2272 arc_evict_ghost(arc_mru_ghost, 0, delta);
2273 }
2274
2275 adjustment =
2276 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c;
2277
2278 if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) {
2279 delta = MIN(arc_mfu_ghost->arcs_size, adjustment);
2280 arc_evict_ghost(arc_mfu_ghost, 0, delta);
2281 }
2282}
2283
2284static void
2285arc_do_user_evicts(void)
2286{
2287 static arc_buf_t *tmp_arc_eviction_list;
2288
2289 /*
2290 * Move list over to avoid LOR
2291 */
2292restart:
2293 mutex_enter(&arc_eviction_mtx);
2294 tmp_arc_eviction_list = arc_eviction_list;
2295 arc_eviction_list = NULL;
2296 mutex_exit(&arc_eviction_mtx);
2297
2298 while (tmp_arc_eviction_list != NULL) {
2299 arc_buf_t *buf = tmp_arc_eviction_list;
2300 tmp_arc_eviction_list = buf->b_next;
2301 mutex_enter(&buf->b_evict_lock);
2302 buf->b_hdr = NULL;
2303 mutex_exit(&buf->b_evict_lock);
2304
2305 if (buf->b_efunc != NULL)
2306 VERIFY(buf->b_efunc(buf) == 0);
2307
2308 buf->b_efunc = NULL;
2309 buf->b_private = NULL;
2310 kmem_cache_free(buf_cache, buf);
2311 }
2312
2313 if (arc_eviction_list != NULL)
2314 goto restart;
2315}
2316
2317/*
2318 * Flush all *evictable* data from the cache for the given spa.
2319 * NOTE: this will not touch "active" (i.e. referenced) data.
2320 */
2321void
2322arc_flush(spa_t *spa)
2323{
2324 uint64_t guid = 0;
2325
2326 if (spa)
2327 guid = spa_load_guid(spa);
2328
2329 while (arc_mru->arcs_lsize[ARC_BUFC_DATA]) {
2330 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA);
2331 if (spa)
2332 break;
2333 }
2334 while (arc_mru->arcs_lsize[ARC_BUFC_METADATA]) {
2335 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA);
2336 if (spa)
2337 break;
2338 }
2339 while (arc_mfu->arcs_lsize[ARC_BUFC_DATA]) {
2340 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA);
2341 if (spa)
2342 break;
2343 }
2344 while (arc_mfu->arcs_lsize[ARC_BUFC_METADATA]) {
2345 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA);
2346 if (spa)
2347 break;
2348 }
2349
2350 arc_evict_ghost(arc_mru_ghost, guid, -1);
2351 arc_evict_ghost(arc_mfu_ghost, guid, -1);
2352
2353 mutex_enter(&arc_reclaim_thr_lock);
2354 arc_do_user_evicts();
2355 mutex_exit(&arc_reclaim_thr_lock);
2356 ASSERT(spa || arc_eviction_list == NULL);
2357}
2358
2359void
2360arc_shrink(void)
2361{
2362 if (arc_c > arc_c_min) {
2363 uint64_t to_free;
2364
2365#ifdef _KERNEL
2366 to_free = arc_c >> arc_shrink_shift;
2367#else
2368 to_free = arc_c >> arc_shrink_shift;
2369#endif
2370 if (arc_c > arc_c_min + to_free)
2371 atomic_add_64(&arc_c, -to_free);
2372 else
2373 arc_c = arc_c_min;
2374
2375 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
2376 if (arc_c > arc_size)
2377 arc_c = MAX(arc_size, arc_c_min);
2378 if (arc_p > arc_c)
2379 arc_p = (arc_c >> 1);
2380 ASSERT(arc_c >= arc_c_min);
2381 ASSERT((int64_t)arc_p >= 0);
2382 }
2383
2384 if (arc_size > arc_c)
2385 arc_adjust();
2386}
2387
2388static int needfree = 0;
2389
2390static int
2391arc_reclaim_needed(void)
2392{
2393
2394#ifdef _KERNEL
2395
2396 if (needfree)
2397 return (1);
2398
2399 /*
2400 * Cooperate with pagedaemon when it's time for it to scan
2401 * and reclaim some pages.
2402 */
2403 if (vm_paging_needed())
2404 return (1);
2405
2406#ifdef sun
2407 /*
2408 * take 'desfree' extra pages, so we reclaim sooner, rather than later
2409 */
2410 extra = desfree;
2411
2412 /*
2413 * check that we're out of range of the pageout scanner. It starts to
2414 * schedule paging if freemem is less than lotsfree and needfree.
2415 * lotsfree is the high-water mark for pageout, and needfree is the
2416 * number of needed free pages. We add extra pages here to make sure
2417 * the scanner doesn't start up while we're freeing memory.
2418 */
2419 if (freemem < lotsfree + needfree + extra)
2420 return (1);
2421
2422 /*
2423 * check to make sure that swapfs has enough space so that anon
2424 * reservations can still succeed. anon_resvmem() checks that the
2425 * availrmem is greater than swapfs_minfree, and the number of reserved
2426 * swap pages. We also add a bit of extra here just to prevent
2427 * circumstances from getting really dire.
2428 */
2429 if (availrmem < swapfs_minfree + swapfs_reserve + extra)
2430 return (1);
2431
2432#if defined(__i386)
2433 /*
2434 * If we're on an i386 platform, it's possible that we'll exhaust the
2435 * kernel heap space before we ever run out of available physical
2436 * memory. Most checks of the size of the heap_area compare against
2437 * tune.t_minarmem, which is the minimum available real memory that we
2438 * can have in the system. However, this is generally fixed at 25 pages
2439 * which is so low that it's useless. In this comparison, we seek to
2440 * calculate the total heap-size, and reclaim if more than 3/4ths of the
2441 * heap is allocated. (Or, in the calculation, if less than 1/4th is
2442 * free)
2443 */
2444 if (btop(vmem_size(heap_arena, VMEM_FREE)) <
2445 (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
2446 return (1);
2447#endif
2448#else /* !sun */
2449 if (kmem_used() > (kmem_size() * 3) / 4)
2450 return (1);
2451#endif /* sun */
2452
2453#else
2454 if (spa_get_random(100) == 0)
2455 return (1);
2456#endif
2457 return (0);
2458}
2459
2460extern kmem_cache_t *zio_buf_cache[];
2461extern kmem_cache_t *zio_data_buf_cache[];
2462
2463static void
2464arc_kmem_reap_now(arc_reclaim_strategy_t strat)
2465{
2466 size_t i;
2467 kmem_cache_t *prev_cache = NULL;
2468 kmem_cache_t *prev_data_cache = NULL;
2469
2470#ifdef _KERNEL
2471 if (arc_meta_used >= arc_meta_limit) {
2472 /*
2473 * We are exceeding our meta-data cache limit.
2474 * Purge some DNLC entries to release holds on meta-data.
2475 */
2476 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
2477 }
2478#if defined(__i386)
2479 /*
2480 * Reclaim unused memory from all kmem caches.
2481 */
2482 kmem_reap();
2483#endif
2484#endif
2485
2486 /*
2487 * An aggressive reclamation will shrink the cache size as well as
2488 * reap free buffers from the arc kmem caches.
2489 */
2490 if (strat == ARC_RECLAIM_AGGR)
2491 arc_shrink();
2492
2493 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
2494 if (zio_buf_cache[i] != prev_cache) {
2495 prev_cache = zio_buf_cache[i];
2496 kmem_cache_reap_now(zio_buf_cache[i]);
2497 }
2498 if (zio_data_buf_cache[i] != prev_data_cache) {
2499 prev_data_cache = zio_data_buf_cache[i];
2500 kmem_cache_reap_now(zio_data_buf_cache[i]);
2501 }
2502 }
2503 kmem_cache_reap_now(buf_cache);
2504 kmem_cache_reap_now(hdr_cache);
2505}
2506
2507static void
2508arc_reclaim_thread(void *dummy __unused)
2509{
2510 clock_t growtime = 0;
2511 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS;
2512 callb_cpr_t cpr;
2513
2514 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
2515
2516 mutex_enter(&arc_reclaim_thr_lock);
2517 while (arc_thread_exit == 0) {
2518 if (arc_reclaim_needed()) {
2519
2520 if (arc_no_grow) {
2521 if (last_reclaim == ARC_RECLAIM_CONS) {
2522 last_reclaim = ARC_RECLAIM_AGGR;
2523 } else {
2524 last_reclaim = ARC_RECLAIM_CONS;
2525 }
2526 } else {
2527 arc_no_grow = TRUE;
2528 last_reclaim = ARC_RECLAIM_AGGR;
2529 membar_producer();
2530 }
2531
2532 /* reset the growth delay for every reclaim */
2533 growtime = ddi_get_lbolt() + (arc_grow_retry * hz);
2534
2535 if (needfree && last_reclaim == ARC_RECLAIM_CONS) {
2536 /*
2537 * If needfree is TRUE our vm_lowmem hook
2538 * was called and in that case we must free some
2539 * memory, so switch to aggressive mode.
2540 */
2541 arc_no_grow = TRUE;
2542 last_reclaim = ARC_RECLAIM_AGGR;
2543 }
2544 arc_kmem_reap_now(last_reclaim);
2545 arc_warm = B_TRUE;
2546
2547 } else if (arc_no_grow && ddi_get_lbolt() >= growtime) {
2548 arc_no_grow = FALSE;
2549 }
2550
2551 arc_adjust();
2552
2553 if (arc_eviction_list != NULL)
2554 arc_do_user_evicts();
2555
2556#ifdef _KERNEL
2557 if (needfree) {
2558 needfree = 0;
2559 wakeup(&needfree);
2560 }
2561#endif
2562
2563 /* block until needed, or one second, whichever is shorter */
2564 CALLB_CPR_SAFE_BEGIN(&cpr);
2565 (void) cv_timedwait(&arc_reclaim_thr_cv,
2566 &arc_reclaim_thr_lock, hz);
2567 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
2568 }
2569
2570 arc_thread_exit = 0;
2571 cv_broadcast(&arc_reclaim_thr_cv);
2572 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */
2573 thread_exit();
2574}
2575
2576/*
2577 * Adapt arc info given the number of bytes we are trying to add and
2578 * the state that we are comming from. This function is only called
2579 * when we are adding new content to the cache.
2580 */
2581static void
2582arc_adapt(int bytes, arc_state_t *state)
2583{
2584 int mult;
2585 uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
2586
2587 if (state == arc_l2c_only)
2588 return;
2589
2590 ASSERT(bytes > 0);
2591 /*
2592 * Adapt the target size of the MRU list:
2593 * - if we just hit in the MRU ghost list, then increase
2594 * the target size of the MRU list.
2595 * - if we just hit in the MFU ghost list, then increase
2596 * the target size of the MFU list by decreasing the
2597 * target size of the MRU list.
2598 */
2599 if (state == arc_mru_ghost) {
2600 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ?
2601 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size));
2602 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */
2603
2604 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult);
2605 } else if (state == arc_mfu_ghost) {
2606 uint64_t delta;
2607
2608 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ?
2609 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size));
2610 mult = MIN(mult, 10);
2611
2612 delta = MIN(bytes * mult, arc_p);
2613 arc_p = MAX(arc_p_min, arc_p - delta);
2614 }
2615 ASSERT((int64_t)arc_p >= 0);
2616
2617 if (arc_reclaim_needed()) {
2618 cv_signal(&arc_reclaim_thr_cv);
2619 return;
2620 }
2621
2622 if (arc_no_grow)
2623 return;
2624
2625 if (arc_c >= arc_c_max)
2626 return;
2627
2628 /*
2629 * If we're within (2 * maxblocksize) bytes of the target
2630 * cache size, increment the target cache size
2631 */
2632 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
2633 atomic_add_64(&arc_c, (int64_t)bytes);
2634 if (arc_c > arc_c_max)
2635 arc_c = arc_c_max;
2636 else if (state == arc_anon)
2637 atomic_add_64(&arc_p, (int64_t)bytes);
2638 if (arc_p > arc_c)
2639 arc_p = arc_c;
2640 }
2641 ASSERT((int64_t)arc_p >= 0);
2642}
2643
2644/*
2645 * Check if the cache has reached its limits and eviction is required
2646 * prior to insert.
2647 */
2648static int
2649arc_evict_needed(arc_buf_contents_t type)
2650{
2651 if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit)
2652 return (1);
2653
2654#ifdef sun
2655#ifdef _KERNEL
2656 /*
2657 * If zio data pages are being allocated out of a separate heap segment,
2658 * then enforce that the size of available vmem for this area remains
2659 * above about 1/32nd free.
2660 */
2661 if (type == ARC_BUFC_DATA && zio_arena != NULL &&
2662 vmem_size(zio_arena, VMEM_FREE) <
2663 (vmem_size(zio_arena, VMEM_ALLOC) >> 5))
2664 return (1);
2665#endif
2666#endif /* sun */
2667
2668 if (arc_reclaim_needed())
2669 return (1);
2670
2671 return (arc_size > arc_c);
2672}
2673
2674/*
2675 * The buffer, supplied as the first argument, needs a data block.
2676 * So, if we are at cache max, determine which cache should be victimized.
2677 * We have the following cases:
2678 *
2679 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) ->
2680 * In this situation if we're out of space, but the resident size of the MFU is
2681 * under the limit, victimize the MFU cache to satisfy this insertion request.
2682 *
2683 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) ->
2684 * Here, we've used up all of the available space for the MRU, so we need to
2685 * evict from our own cache instead. Evict from the set of resident MRU
2686 * entries.
2687 *
2688 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) ->
2689 * c minus p represents the MFU space in the cache, since p is the size of the
2690 * cache that is dedicated to the MRU. In this situation there's still space on
2691 * the MFU side, so the MRU side needs to be victimized.
2692 *
2693 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) ->
2694 * MFU's resident set is consuming more space than it has been allotted. In
2695 * this situation, we must victimize our own cache, the MFU, for this insertion.
2696 */
2697static void
2698arc_get_data_buf(arc_buf_t *buf)
2699{
2700 arc_state_t *state = buf->b_hdr->b_state;
2701 uint64_t size = buf->b_hdr->b_size;
2702 arc_buf_contents_t type = buf->b_hdr->b_type;
2703
2704 arc_adapt(size, state);
2705
2706 /*
2707 * We have not yet reached cache maximum size,
2708 * just allocate a new buffer.
2709 */
2710 if (!arc_evict_needed(type)) {
2711 if (type == ARC_BUFC_METADATA) {
2712 buf->b_data = zio_buf_alloc(size);
2713 arc_space_consume(size, ARC_SPACE_DATA);
2714 } else {
2715 ASSERT(type == ARC_BUFC_DATA);
2716 buf->b_data = zio_data_buf_alloc(size);
2717 ARCSTAT_INCR(arcstat_data_size, size);
2718 atomic_add_64(&arc_size, size);
2719 }
2720 goto out;
2721 }
2722
2723 /*
2724 * If we are prefetching from the mfu ghost list, this buffer
2725 * will end up on the mru list; so steal space from there.
2726 */
2727 if (state == arc_mfu_ghost)
2728 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu;
2729 else if (state == arc_mru_ghost)
2730 state = arc_mru;
2731
2732 if (state == arc_mru || state == arc_anon) {
2733 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size;
2734 state = (arc_mfu->arcs_lsize[type] >= size &&
2735 arc_p > mru_used) ? arc_mfu : arc_mru;
2736 } else {
2737 /* MFU cases */
2738 uint64_t mfu_space = arc_c - arc_p;
2739 state = (arc_mru->arcs_lsize[type] >= size &&
2740 mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu;
2741 }
2742 if ((buf->b_data = arc_evict(state, 0, size, TRUE, type)) == NULL) {
2743 if (type == ARC_BUFC_METADATA) {
2744 buf->b_data = zio_buf_alloc(size);
2745 arc_space_consume(size, ARC_SPACE_DATA);
2746 } else {
2747 ASSERT(type == ARC_BUFC_DATA);
2748 buf->b_data = zio_data_buf_alloc(size);
2749 ARCSTAT_INCR(arcstat_data_size, size);
2750 atomic_add_64(&arc_size, size);
2751 }
2752 ARCSTAT_BUMP(arcstat_recycle_miss);
2753 }
2754 ASSERT(buf->b_data != NULL);
2755out:
2756 /*
2757 * Update the state size. Note that ghost states have a
2758 * "ghost size" and so don't need to be updated.
2759 */
2760 if (!GHOST_STATE(buf->b_hdr->b_state)) {
2761 arc_buf_hdr_t *hdr = buf->b_hdr;
2762
2763 atomic_add_64(&hdr->b_state->arcs_size, size);
2764 if (list_link_active(&hdr->b_arc_node)) {
2765 ASSERT(refcount_is_zero(&hdr->b_refcnt));
2766 atomic_add_64(&hdr->b_state->arcs_lsize[type], size);
2767 }
2768 /*
2769 * If we are growing the cache, and we are adding anonymous
2770 * data, and we have outgrown arc_p, update arc_p
2771 */
2772 if (arc_size < arc_c && hdr->b_state == arc_anon &&
2773 arc_anon->arcs_size + arc_mru->arcs_size > arc_p)
2774 arc_p = MIN(arc_c, arc_p + size);
2775 }
2776 ARCSTAT_BUMP(arcstat_allocated);
2777}
2778
2779/*
2780 * This routine is called whenever a buffer is accessed.
2781 * NOTE: the hash lock is dropped in this function.
2782 */
2783static void
2784arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
2785{
2786 clock_t now;
2787
2788 ASSERT(MUTEX_HELD(hash_lock));
2789
2790 if (buf->b_state == arc_anon) {
2791 /*
2792 * This buffer is not in the cache, and does not
2793 * appear in our "ghost" list. Add the new buffer
2794 * to the MRU state.
2795 */
2796
2797 ASSERT(buf->b_arc_access == 0);
2798 buf->b_arc_access = ddi_get_lbolt();
2799 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2800 arc_change_state(arc_mru, buf, hash_lock);
2801
2802 } else if (buf->b_state == arc_mru) {
2803 now = ddi_get_lbolt();
2804
2805 /*
2806 * If this buffer is here because of a prefetch, then either:
2807 * - clear the flag if this is a "referencing" read
2808 * (any subsequent access will bump this into the MFU state).
2809 * or
2810 * - move the buffer to the head of the list if this is
2811 * another prefetch (to make it less likely to be evicted).
2812 */
2813 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2814 if (refcount_count(&buf->b_refcnt) == 0) {
2815 ASSERT(list_link_active(&buf->b_arc_node));
2816 } else {
2817 buf->b_flags &= ~ARC_PREFETCH;
2818 ARCSTAT_BUMP(arcstat_mru_hits);
2819 }
2820 buf->b_arc_access = now;
2821 return;
2822 }
2823
2824 /*
2825 * This buffer has been "accessed" only once so far,
2826 * but it is still in the cache. Move it to the MFU
2827 * state.
2828 */
2829 if (now > buf->b_arc_access + ARC_MINTIME) {
2830 /*
2831 * More than 125ms have passed since we
2832 * instantiated this buffer. Move it to the
2833 * most frequently used state.
2834 */
2835 buf->b_arc_access = now;
2836 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2837 arc_change_state(arc_mfu, buf, hash_lock);
2838 }
2839 ARCSTAT_BUMP(arcstat_mru_hits);
2840 } else if (buf->b_state == arc_mru_ghost) {
2841 arc_state_t *new_state;
2842 /*
2843 * This buffer has been "accessed" recently, but
2844 * was evicted from the cache. Move it to the
2845 * MFU state.
2846 */
2847
2848 if (buf->b_flags & ARC_PREFETCH) {
2849 new_state = arc_mru;
2850 if (refcount_count(&buf->b_refcnt) > 0)
2851 buf->b_flags &= ~ARC_PREFETCH;
2852 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2853 } else {
2854 new_state = arc_mfu;
2855 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2856 }
2857
2858 buf->b_arc_access = ddi_get_lbolt();
2859 arc_change_state(new_state, buf, hash_lock);
2860
2861 ARCSTAT_BUMP(arcstat_mru_ghost_hits);
2862 } else if (buf->b_state == arc_mfu) {
2863 /*
2864 * This buffer has been accessed more than once and is
2865 * still in the cache. Keep it in the MFU state.
2866 *
2867 * NOTE: an add_reference() that occurred when we did
2868 * the arc_read() will have kicked this off the list.
2869 * If it was a prefetch, we will explicitly move it to
2870 * the head of the list now.
2871 */
2872 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2873 ASSERT(refcount_count(&buf->b_refcnt) == 0);
2874 ASSERT(list_link_active(&buf->b_arc_node));
2875 }
2876 ARCSTAT_BUMP(arcstat_mfu_hits);
2877 buf->b_arc_access = ddi_get_lbolt();
2878 } else if (buf->b_state == arc_mfu_ghost) {
2879 arc_state_t *new_state = arc_mfu;
2880 /*
2881 * This buffer has been accessed more than once but has
2882 * been evicted from the cache. Move it back to the
2883 * MFU state.
2884 */
2885
2886 if (buf->b_flags & ARC_PREFETCH) {
2887 /*
2888 * This is a prefetch access...
2889 * move this block back to the MRU state.
2890 */
2891 ASSERT0(refcount_count(&buf->b_refcnt));
2892 new_state = arc_mru;
2893 }
2894
2895 buf->b_arc_access = ddi_get_lbolt();
2896 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2897 arc_change_state(new_state, buf, hash_lock);
2898
2899 ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
2900 } else if (buf->b_state == arc_l2c_only) {
2901 /*
2902 * This buffer is on the 2nd Level ARC.
2903 */
2904
2905 buf->b_arc_access = ddi_get_lbolt();
2906 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2907 arc_change_state(arc_mfu, buf, hash_lock);
2908 } else {
2909 ASSERT(!"invalid arc state");
2910 }
2911}
2912
2913/* a generic arc_done_func_t which you can use */
2914/* ARGSUSED */
2915void
2916arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
2917{
2918 if (zio == NULL || zio->io_error == 0)
2919 bcopy(buf->b_data, arg, buf->b_hdr->b_size);
2920 VERIFY(arc_buf_remove_ref(buf, arg));
2921}
2922
2923/* a generic arc_done_func_t */
2924void
2925arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
2926{
2927 arc_buf_t **bufp = arg;
2928 if (zio && zio->io_error) {
2929 VERIFY(arc_buf_remove_ref(buf, arg));
2930 *bufp = NULL;
2931 } else {
2932 *bufp = buf;
2933 ASSERT(buf->b_data);
2934 }
2935}
2936
2937static void
2938arc_read_done(zio_t *zio)
2939{
2940 arc_buf_hdr_t *hdr, *found;
2941 arc_buf_t *buf;
2942 arc_buf_t *abuf; /* buffer we're assigning to callback */
2943 kmutex_t *hash_lock;
2944 arc_callback_t *callback_list, *acb;
2945 int freeable = FALSE;
2946
2947 buf = zio->io_private;
2948 hdr = buf->b_hdr;
2949
2950 /*
2951 * The hdr was inserted into hash-table and removed from lists
2952 * prior to starting I/O. We should find this header, since
2953 * it's in the hash table, and it should be legit since it's
2954 * not possible to evict it during the I/O. The only possible
2955 * reason for it not to be found is if we were freed during the
2956 * read.
2957 */
2958 found = buf_hash_find(hdr->b_spa, &hdr->b_dva, hdr->b_birth,
2959 &hash_lock);
2960
2961 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) && hash_lock == NULL) ||
2962 (found == hdr && DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
2963 (found == hdr && HDR_L2_READING(hdr)));
2964
2965 hdr->b_flags &= ~ARC_L2_EVICTED;
2966 if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH))
2967 hdr->b_flags &= ~ARC_L2CACHE;
2968
2969 /* byteswap if necessary */
2970 callback_list = hdr->b_acb;
2971 ASSERT(callback_list != NULL);
2972 if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) {
2973 dmu_object_byteswap_t bswap =
2974 DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp));
2975 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ?
2976 byteswap_uint64_array :
2977 dmu_ot_byteswap[bswap].ob_func;
2978 func(buf->b_data, hdr->b_size);
2979 }
2980
2981 arc_cksum_compute(buf, B_FALSE);
2982#ifdef illumos
2983 arc_buf_watch(buf);
2984#endif /* illumos */
2985
2986 if (hash_lock && zio->io_error == 0 && hdr->b_state == arc_anon) {
2987 /*
2988 * Only call arc_access on anonymous buffers. This is because
2989 * if we've issued an I/O for an evicted buffer, we've already
2990 * called arc_access (to prevent any simultaneous readers from
2991 * getting confused).
2992 */
2993 arc_access(hdr, hash_lock);
2994 }
2995
2996 /* create copies of the data buffer for the callers */
2997 abuf = buf;
2998 for (acb = callback_list; acb; acb = acb->acb_next) {
2999 if (acb->acb_done) {
3000 if (abuf == NULL) {
3001 ARCSTAT_BUMP(arcstat_duplicate_reads);
3002 abuf = arc_buf_clone(buf);
3003 }
3004 acb->acb_buf = abuf;
3005 abuf = NULL;
3006 }
3007 }
3008 hdr->b_acb = NULL;
3009 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3010 ASSERT(!HDR_BUF_AVAILABLE(hdr));
3011 if (abuf == buf) {
3012 ASSERT(buf->b_efunc == NULL);
3013 ASSERT(hdr->b_datacnt == 1);
3014 hdr->b_flags |= ARC_BUF_AVAILABLE;
3015 }
3016
3017 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
3018
3019 if (zio->io_error != 0) {
3020 hdr->b_flags |= ARC_IO_ERROR;
3021 if (hdr->b_state != arc_anon)
3022 arc_change_state(arc_anon, hdr, hash_lock);
3023 if (HDR_IN_HASH_TABLE(hdr))
3024 buf_hash_remove(hdr);
3025 freeable = refcount_is_zero(&hdr->b_refcnt);
3026 }
3027
3028 /*
3029 * Broadcast before we drop the hash_lock to avoid the possibility
3030 * that the hdr (and hence the cv) might be freed before we get to
3031 * the cv_broadcast().
3032 */
3033 cv_broadcast(&hdr->b_cv);
3034
3035 if (hash_lock) {
3036 mutex_exit(hash_lock);
3037 } else {
3038 /*
3039 * This block was freed while we waited for the read to
3040 * complete. It has been removed from the hash table and
3041 * moved to the anonymous state (so that it won't show up
3042 * in the cache).
3043 */
3044 ASSERT3P(hdr->b_state, ==, arc_anon);
3045 freeable = refcount_is_zero(&hdr->b_refcnt);
3046 }
3047
3048 /* execute each callback and free its structure */
3049 while ((acb = callback_list) != NULL) {
3050 if (acb->acb_done)
3051 acb->acb_done(zio, acb->acb_buf, acb->acb_private);
3052
3053 if (acb->acb_zio_dummy != NULL) {
3054 acb->acb_zio_dummy->io_error = zio->io_error;
3055 zio_nowait(acb->acb_zio_dummy);
3056 }
3057
3058 callback_list = acb->acb_next;
3059 kmem_free(acb, sizeof (arc_callback_t));
3060 }
3061
3062 if (freeable)
3063 arc_hdr_destroy(hdr);
3064}
3065
3066/*
3067 * "Read" the block block at the specified DVA (in bp) via the
3068 * cache. If the block is found in the cache, invoke the provided
3069 * callback immediately and return. Note that the `zio' parameter
3070 * in the callback will be NULL in this case, since no IO was
3071 * required. If the block is not in the cache pass the read request
3072 * on to the spa with a substitute callback function, so that the
3073 * requested block will be added to the cache.
3074 *
3075 * If a read request arrives for a block that has a read in-progress,
3076 * either wait for the in-progress read to complete (and return the
3077 * results); or, if this is a read with a "done" func, add a record
3078 * to the read to invoke the "done" func when the read completes,
3079 * and return; or just return.
3080 *
3081 * arc_read_done() will invoke all the requested "done" functions
3082 * for readers of this block.
3083 */
3084int
3085arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
3086 void *private, int priority, int zio_flags, uint32_t *arc_flags,
3087 const zbookmark_t *zb)
3088{
3089 arc_buf_hdr_t *hdr;
3090 arc_buf_t *buf = NULL;
3091 kmutex_t *hash_lock;
3092 zio_t *rzio;
3093 uint64_t guid = spa_load_guid(spa);
3094
3095top:
3096 hdr = buf_hash_find(guid, BP_IDENTITY(bp), BP_PHYSICAL_BIRTH(bp),
3097 &hash_lock);
3098 if (hdr && hdr->b_datacnt > 0) {
3099
3100 *arc_flags |= ARC_CACHED;
3101
3102 if (HDR_IO_IN_PROGRESS(hdr)) {
3103
3104 if (*arc_flags & ARC_WAIT) {
3105 cv_wait(&hdr->b_cv, hash_lock);
3106 mutex_exit(hash_lock);
3107 goto top;
3108 }
3109 ASSERT(*arc_flags & ARC_NOWAIT);
3110
3111 if (done) {
3112 arc_callback_t *acb = NULL;
3113
3114 acb = kmem_zalloc(sizeof (arc_callback_t),
3115 KM_SLEEP);
3116 acb->acb_done = done;
3117 acb->acb_private = private;
3118 if (pio != NULL)
3119 acb->acb_zio_dummy = zio_null(pio,
3120 spa, NULL, NULL, NULL, zio_flags);
3121
3122 ASSERT(acb->acb_done != NULL);
3123 acb->acb_next = hdr->b_acb;
3124 hdr->b_acb = acb;
3125 add_reference(hdr, hash_lock, private);
3126 mutex_exit(hash_lock);
3127 return (0);
3128 }
3129 mutex_exit(hash_lock);
3130 return (0);
3131 }
3132
3133 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
3134
3135 if (done) {
3136 add_reference(hdr, hash_lock, private);
3137 /*
3138 * If this block is already in use, create a new
3139 * copy of the data so that we will be guaranteed
3140 * that arc_release() will always succeed.
3141 */
3142 buf = hdr->b_buf;
3143 ASSERT(buf);
3144 ASSERT(buf->b_data);
3145 if (HDR_BUF_AVAILABLE(hdr)) {
3146 ASSERT(buf->b_efunc == NULL);
3147 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
3148 } else {
3149 buf = arc_buf_clone(buf);
3150 }
3151
3152 } else if (*arc_flags & ARC_PREFETCH &&
3153 refcount_count(&hdr->b_refcnt) == 0) {
3154 hdr->b_flags |= ARC_PREFETCH;
3155 }
3156 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
3157 arc_access(hdr, hash_lock);
3158 if (*arc_flags & ARC_L2CACHE)
3159 hdr->b_flags |= ARC_L2CACHE;
3160 if (*arc_flags & ARC_L2COMPRESS)
3161 hdr->b_flags |= ARC_L2COMPRESS;
3162 mutex_exit(hash_lock);
3163 ARCSTAT_BUMP(arcstat_hits);
3164 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
3165 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
3166 data, metadata, hits);
3167
3168 if (done)
3169 done(NULL, buf, private);
3170 } else {
3171 uint64_t size = BP_GET_LSIZE(bp);
3172 arc_callback_t *acb;
3173 vdev_t *vd = NULL;
3174 uint64_t addr = 0;
3175 boolean_t devw = B_FALSE;
3176
3177 if (hdr == NULL) {
3178 /* this block is not in the cache */
3179 arc_buf_hdr_t *exists;
3180 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
3181 buf = arc_buf_alloc(spa, size, private, type);
3182 hdr = buf->b_hdr;
3183 hdr->b_dva = *BP_IDENTITY(bp);
3184 hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
3185 hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
3186 exists = buf_hash_insert(hdr, &hash_lock);
3187 if (exists) {
3188 /* somebody beat us to the hash insert */
3189 mutex_exit(hash_lock);
3190 buf_discard_identity(hdr);
3191 (void) arc_buf_remove_ref(buf, private);
3192 goto top; /* restart the IO request */
3193 }
3194 /* if this is a prefetch, we don't have a reference */
3195 if (*arc_flags & ARC_PREFETCH) {
3196 (void) remove_reference(hdr, hash_lock,
3197 private);
3198 hdr->b_flags |= ARC_PREFETCH;
3199 }
3200 if (*arc_flags & ARC_L2CACHE)
3201 hdr->b_flags |= ARC_L2CACHE;
3202 if (*arc_flags & ARC_L2COMPRESS)
3203 hdr->b_flags |= ARC_L2COMPRESS;
3204 if (BP_GET_LEVEL(bp) > 0)
3205 hdr->b_flags |= ARC_INDIRECT;
3206 } else {
3207 /* this block is in the ghost cache */
3208 ASSERT(GHOST_STATE(hdr->b_state));
3209 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3210 ASSERT0(refcount_count(&hdr->b_refcnt));
3211 ASSERT(hdr->b_buf == NULL);
3212
3213 /* if this is a prefetch, we don't have a reference */
3214 if (*arc_flags & ARC_PREFETCH)
3215 hdr->b_flags |= ARC_PREFETCH;
3216 else
3217 add_reference(hdr, hash_lock, private);
3218 if (*arc_flags & ARC_L2CACHE)
3219 hdr->b_flags |= ARC_L2CACHE;
3220 if (*arc_flags & ARC_L2COMPRESS)
3221 hdr->b_flags |= ARC_L2COMPRESS;
3222 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
3223 buf->b_hdr = hdr;
3224 buf->b_data = NULL;
3225 buf->b_efunc = NULL;
3226 buf->b_private = NULL;
3227 buf->b_next = NULL;
3228 hdr->b_buf = buf;
3229 ASSERT(hdr->b_datacnt == 0);
3230 hdr->b_datacnt = 1;
3231 arc_get_data_buf(buf);
3232 arc_access(hdr, hash_lock);
3233 }
3234
3235 ASSERT(!GHOST_STATE(hdr->b_state));
3236
3237 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
3238 acb->acb_done = done;
3239 acb->acb_private = private;
3240
3241 ASSERT(hdr->b_acb == NULL);
3242 hdr->b_acb = acb;
3243 hdr->b_flags |= ARC_IO_IN_PROGRESS;
3244
3245 if (HDR_L2CACHE(hdr) && hdr->b_l2hdr != NULL &&
3246 (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) {
3247 devw = hdr->b_l2hdr->b_dev->l2ad_writing;
3248 addr = hdr->b_l2hdr->b_daddr;
3249 /*
3250 * Lock out device removal.
3251 */
3252 if (vdev_is_dead(vd) ||
3253 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
3254 vd = NULL;
3255 }
3256
3257 mutex_exit(hash_lock);
3258
3259 /*
3260 * At this point, we have a level 1 cache miss. Try again in
3261 * L2ARC if possible.
3262 */
3263 ASSERT3U(hdr->b_size, ==, size);
3264 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp,
3265 uint64_t, size, zbookmark_t *, zb);
3266 ARCSTAT_BUMP(arcstat_misses);
3267 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
3268 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
3269 data, metadata, misses);
3270#ifdef _KERNEL
3271 curthread->td_ru.ru_inblock++;
3272#endif
3273
3274 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) {
3275 /*
3276 * Read from the L2ARC if the following are true:
3277 * 1. The L2ARC vdev was previously cached.
3278 * 2. This buffer still has L2ARC metadata.
3279 * 3. This buffer isn't currently writing to the L2ARC.
3280 * 4. The L2ARC entry wasn't evicted, which may
3281 * also have invalidated the vdev.
3282 * 5. This isn't prefetch and l2arc_noprefetch is set.
3283 */
3284 if (hdr->b_l2hdr != NULL &&
3285 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) &&
3286 !(l2arc_noprefetch && HDR_PREFETCH(hdr))) {
3287 l2arc_read_callback_t *cb;
3288
3289 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
3290 ARCSTAT_BUMP(arcstat_l2_hits);
3291
3292 cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
3293 KM_SLEEP);
3294 cb->l2rcb_buf = buf;
3295 cb->l2rcb_spa = spa;
3296 cb->l2rcb_bp = *bp;
3297 cb->l2rcb_zb = *zb;
3298 cb->l2rcb_flags = zio_flags;
3299 cb->l2rcb_compress = hdr->b_l2hdr->b_compress;
3300
3301 ASSERT(addr >= VDEV_LABEL_START_SIZE &&
3302 addr + size < vd->vdev_psize -
3303 VDEV_LABEL_END_SIZE);
3304
3305 /*
3306 * l2arc read. The SCL_L2ARC lock will be
3307 * released by l2arc_read_done().
3308 * Issue a null zio if the underlying buffer
3309 * was squashed to zero size by compression.
3310 */
3311 if (hdr->b_l2hdr->b_compress ==
3312 ZIO_COMPRESS_EMPTY) {
3313 rzio = zio_null(pio, spa, vd,
3314 l2arc_read_done, cb,
3315 zio_flags | ZIO_FLAG_DONT_CACHE |
3316 ZIO_FLAG_CANFAIL |
3317 ZIO_FLAG_DONT_PROPAGATE |
3318 ZIO_FLAG_DONT_RETRY);
3319 } else {
3320 rzio = zio_read_phys(pio, vd, addr,
3321 hdr->b_l2hdr->b_asize,
3322 buf->b_data, ZIO_CHECKSUM_OFF,
3323 l2arc_read_done, cb, priority,
3324 zio_flags | ZIO_FLAG_DONT_CACHE |
3325 ZIO_FLAG_CANFAIL |
3326 ZIO_FLAG_DONT_PROPAGATE |
3327 ZIO_FLAG_DONT_RETRY, B_FALSE);
3328 }
3329 DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
3330 zio_t *, rzio);
3331 ARCSTAT_INCR(arcstat_l2_read_bytes,
3332 hdr->b_l2hdr->b_asize);
3333
3334 if (*arc_flags & ARC_NOWAIT) {
3335 zio_nowait(rzio);
3336 return (0);
3337 }
3338
3339 ASSERT(*arc_flags & ARC_WAIT);
3340 if (zio_wait(rzio) == 0)
3341 return (0);
3342
3343 /* l2arc read error; goto zio_read() */
3344 } else {
3345 DTRACE_PROBE1(l2arc__miss,
3346 arc_buf_hdr_t *, hdr);
3347 ARCSTAT_BUMP(arcstat_l2_misses);
3348 if (HDR_L2_WRITING(hdr))
3349 ARCSTAT_BUMP(arcstat_l2_rw_clash);
3350 spa_config_exit(spa, SCL_L2ARC, vd);
3351 }
3352 } else {
3353 if (vd != NULL)
3354 spa_config_exit(spa, SCL_L2ARC, vd);
3355 if (l2arc_ndev != 0) {
3356 DTRACE_PROBE1(l2arc__miss,
3357 arc_buf_hdr_t *, hdr);
3358 ARCSTAT_BUMP(arcstat_l2_misses);
3359 }
3360 }
3361
3362 rzio = zio_read(pio, spa, bp, buf->b_data, size,
3363 arc_read_done, buf, priority, zio_flags, zb);
3364
3365 if (*arc_flags & ARC_WAIT)
3366 return (zio_wait(rzio));
3367
3368 ASSERT(*arc_flags & ARC_NOWAIT);
3369 zio_nowait(rzio);
3370 }
3371 return (0);
3372}
3373
3374void
3375arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
3376{
3377 ASSERT(buf->b_hdr != NULL);
3378 ASSERT(buf->b_hdr->b_state != arc_anon);
3379 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
3380 ASSERT(buf->b_efunc == NULL);
3381 ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr));
3382
3383 buf->b_efunc = func;
3384 buf->b_private = private;
3385}
3386
3387/*
3388 * Notify the arc that a block was freed, and thus will never be used again.
3389 */
3390void
3391arc_freed(spa_t *spa, const blkptr_t *bp)
3392{
3393 arc_buf_hdr_t *hdr;
3394 kmutex_t *hash_lock;
3395 uint64_t guid = spa_load_guid(spa);
3396
3397 hdr = buf_hash_find(guid, BP_IDENTITY(bp), BP_PHYSICAL_BIRTH(bp),
3398 &hash_lock);
3399 if (hdr == NULL)
3400 return;
3401 if (HDR_BUF_AVAILABLE(hdr)) {
3402 arc_buf_t *buf = hdr->b_buf;
3403 add_reference(hdr, hash_lock, FTAG);
3404 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
3405 mutex_exit(hash_lock);
3406
3407 arc_release(buf, FTAG);
3408 (void) arc_buf_remove_ref(buf, FTAG);
3409 } else {
3410 mutex_exit(hash_lock);
3411 }
3412
3413}
3414
3415/*
3416 * This is used by the DMU to let the ARC know that a buffer is
3417 * being evicted, so the ARC should clean up. If this arc buf
3418 * is not yet in the evicted state, it will be put there.
3419 */
3420int
3421arc_buf_evict(arc_buf_t *buf)
3422{
3423 arc_buf_hdr_t *hdr;
3424 kmutex_t *hash_lock;
3425 arc_buf_t **bufp;
3426 list_t *list, *evicted_list;
3427 kmutex_t *lock, *evicted_lock;
3428
3429 mutex_enter(&buf->b_evict_lock);
3430 hdr = buf->b_hdr;
3431 if (hdr == NULL) {
3432 /*
3433 * We are in arc_do_user_evicts().
3434 */
3435 ASSERT(buf->b_data == NULL);
3436 mutex_exit(&buf->b_evict_lock);
3437 return (0);
3438 } else if (buf->b_data == NULL) {
3439 arc_buf_t copy = *buf; /* structure assignment */
3440 /*
3441 * We are on the eviction list; process this buffer now
3442 * but let arc_do_user_evicts() do the reaping.
3443 */
3444 buf->b_efunc = NULL;
3445 mutex_exit(&buf->b_evict_lock);
3446 VERIFY(copy.b_efunc(&copy) == 0);
3447 return (1);
3448 }
3449 hash_lock = HDR_LOCK(hdr);
3450 mutex_enter(hash_lock);
3451 hdr = buf->b_hdr;
3452 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3453
3454 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt);
3455 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
3456
3457 /*
3458 * Pull this buffer off of the hdr
3459 */
3460 bufp = &hdr->b_buf;
3461 while (*bufp != buf)
3462 bufp = &(*bufp)->b_next;
3463 *bufp = buf->b_next;
3464
3465 ASSERT(buf->b_data != NULL);
3466 arc_buf_destroy(buf, FALSE, FALSE);
3467
3468 if (hdr->b_datacnt == 0) {
3469 arc_state_t *old_state = hdr->b_state;
3470 arc_state_t *evicted_state;
3471
3472 ASSERT(hdr->b_buf == NULL);
3473 ASSERT(refcount_is_zero(&hdr->b_refcnt));
3474
3475 evicted_state =
3476 (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
3477
3478 get_buf_info(hdr, old_state, &list, &lock);
3479 get_buf_info(hdr, evicted_state, &evicted_list, &evicted_lock);
3480 mutex_enter(lock);
3481 mutex_enter(evicted_lock);
3482
3483 arc_change_state(evicted_state, hdr, hash_lock);
3484 ASSERT(HDR_IN_HASH_TABLE(hdr));
3485 hdr->b_flags |= ARC_IN_HASH_TABLE;
3486 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
3487
3488 mutex_exit(evicted_lock);
3489 mutex_exit(lock);
3490 }
3491 mutex_exit(hash_lock);
3492 mutex_exit(&buf->b_evict_lock);
3493
3494 VERIFY(buf->b_efunc(buf) == 0);
3495 buf->b_efunc = NULL;
3496 buf->b_private = NULL;
3497 buf->b_hdr = NULL;
3498 buf->b_next = NULL;
3499 kmem_cache_free(buf_cache, buf);
3500 return (1);
3501}
3502
3503/*
3504 * Release this buffer from the cache, making it an anonymous buffer. This
3505 * must be done after a read and prior to modifying the buffer contents.
3506 * If the buffer has more than one reference, we must make
3507 * a new hdr for the buffer.
3508 */
3509void
3510arc_release(arc_buf_t *buf, void *tag)
3511{
3512 arc_buf_hdr_t *hdr;
3513 kmutex_t *hash_lock = NULL;
3514 l2arc_buf_hdr_t *l2hdr;
3515 uint64_t buf_size;
3516
3517 /*
3518 * It would be nice to assert that if it's DMU metadata (level >
3519 * 0 || it's the dnode file), then it must be syncing context.
3520 * But we don't know that information at this level.
3521 */
3522
3523 mutex_enter(&buf->b_evict_lock);
3524 hdr = buf->b_hdr;
3525
3526 /* this buffer is not on any list */
3527 ASSERT(refcount_count(&hdr->b_refcnt) > 0);
3528
3529 if (hdr->b_state == arc_anon) {
3530 /* this buffer is already released */
3531 ASSERT(buf->b_efunc == NULL);
3532 } else {
3533 hash_lock = HDR_LOCK(hdr);
3534 mutex_enter(hash_lock);
3535 hdr = buf->b_hdr;
3536 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3537 }
3538
3539 l2hdr = hdr->b_l2hdr;
3540 if (l2hdr) {
3541 mutex_enter(&l2arc_buflist_mtx);
3542 hdr->b_l2hdr = NULL;
3543 }
3544 buf_size = hdr->b_size;
3545
3546 /*
3547 * Do we have more than one buf?
3548 */
3549 if (hdr->b_datacnt > 1) {
3550 arc_buf_hdr_t *nhdr;
3551 arc_buf_t **bufp;
3552 uint64_t blksz = hdr->b_size;
3553 uint64_t spa = hdr->b_spa;
3554 arc_buf_contents_t type = hdr->b_type;
3555 uint32_t flags = hdr->b_flags;
3556
3557 ASSERT(hdr->b_buf != buf || buf->b_next != NULL);
3558 /*
3559 * Pull the data off of this hdr and attach it to
3560 * a new anonymous hdr.
3561 */
3562 (void) remove_reference(hdr, hash_lock, tag);
3563 bufp = &hdr->b_buf;
3564 while (*bufp != buf)
3565 bufp = &(*bufp)->b_next;
3566 *bufp = buf->b_next;
3567 buf->b_next = NULL;
3568
3569 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size);
3570 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size);
3571 if (refcount_is_zero(&hdr->b_refcnt)) {
3572 uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type];
3573 ASSERT3U(*size, >=, hdr->b_size);
3574 atomic_add_64(size, -hdr->b_size);
3575 }
3576
3577 /*
3578 * We're releasing a duplicate user data buffer, update
3579 * our statistics accordingly.
3580 */
3581 if (hdr->b_type == ARC_BUFC_DATA) {
3582 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
3583 ARCSTAT_INCR(arcstat_duplicate_buffers_size,
3584 -hdr->b_size);
3585 }
3586 hdr->b_datacnt -= 1;
3587 arc_cksum_verify(buf);
3588#ifdef illumos
3589 arc_buf_unwatch(buf);
3590#endif /* illumos */
3591
3592 mutex_exit(hash_lock);
3593
3594 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
3595 nhdr->b_size = blksz;
3596 nhdr->b_spa = spa;
3597 nhdr->b_type = type;
3598 nhdr->b_buf = buf;
3599 nhdr->b_state = arc_anon;
3600 nhdr->b_arc_access = 0;
3601 nhdr->b_flags = flags & ARC_L2_WRITING;
3602 nhdr->b_l2hdr = NULL;
3603 nhdr->b_datacnt = 1;
3604 nhdr->b_freeze_cksum = NULL;
3605 (void) refcount_add(&nhdr->b_refcnt, tag);
3606 buf->b_hdr = nhdr;
3607 mutex_exit(&buf->b_evict_lock);
3608 atomic_add_64(&arc_anon->arcs_size, blksz);
3609 } else {
3610 mutex_exit(&buf->b_evict_lock);
3611 ASSERT(refcount_count(&hdr->b_refcnt) == 1);
3612 ASSERT(!list_link_active(&hdr->b_arc_node));
3613 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3614 if (hdr->b_state != arc_anon)
3615 arc_change_state(arc_anon, hdr, hash_lock);
3616 hdr->b_arc_access = 0;
3617 if (hash_lock)
3618 mutex_exit(hash_lock);
3619
3620 buf_discard_identity(hdr);
3621 arc_buf_thaw(buf);
3622 }
3623 buf->b_efunc = NULL;
3624 buf->b_private = NULL;
3625
3626 if (l2hdr) {
3627 ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
3628 trim_map_free(l2hdr->b_dev->l2ad_vdev, l2hdr->b_daddr,
3629 hdr->b_size, 0);
3630 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
3631 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
3632 ARCSTAT_INCR(arcstat_l2_size, -buf_size);
3633 mutex_exit(&l2arc_buflist_mtx);
3634 }
3635}
3636
3637int
3638arc_released(arc_buf_t *buf)
3639{
3640 int released;
3641
3642 mutex_enter(&buf->b_evict_lock);
3643 released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon);
3644 mutex_exit(&buf->b_evict_lock);
3645 return (released);
3646}
3647
3648int
3649arc_has_callback(arc_buf_t *buf)
3650{
3651 int callback;
3652
3653 mutex_enter(&buf->b_evict_lock);
3654 callback = (buf->b_efunc != NULL);
3655 mutex_exit(&buf->b_evict_lock);
3656 return (callback);
3657}
3658
3659#ifdef ZFS_DEBUG
3660int
3661arc_referenced(arc_buf_t *buf)
3662{
3663 int referenced;
3664
3665 mutex_enter(&buf->b_evict_lock);
3666 referenced = (refcount_count(&buf->b_hdr->b_refcnt));
3667 mutex_exit(&buf->b_evict_lock);
3668 return (referenced);
3669}
3670#endif
3671
3672static void
3673arc_write_ready(zio_t *zio)
3674{
3675 arc_write_callback_t *callback = zio->io_private;
3676 arc_buf_t *buf = callback->awcb_buf;
3677 arc_buf_hdr_t *hdr = buf->b_hdr;
3678
3679 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt));
3680 callback->awcb_ready(zio, buf, callback->awcb_private);
3681
3682 /*
3683 * If the IO is already in progress, then this is a re-write
3684 * attempt, so we need to thaw and re-compute the cksum.
3685 * It is the responsibility of the callback to handle the
3686 * accounting for any re-write attempt.
3687 */
3688 if (HDR_IO_IN_PROGRESS(hdr)) {
3689 mutex_enter(&hdr->b_freeze_lock);
3690 if (hdr->b_freeze_cksum != NULL) {
3691 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
3692 hdr->b_freeze_cksum = NULL;
3693 }
3694 mutex_exit(&hdr->b_freeze_lock);
3695 }
3696 arc_cksum_compute(buf, B_FALSE);
3697 hdr->b_flags |= ARC_IO_IN_PROGRESS;
3698}
3699
3700static void
3701arc_write_done(zio_t *zio)
3702{
3703 arc_write_callback_t *callback = zio->io_private;
3704 arc_buf_t *buf = callback->awcb_buf;
3705 arc_buf_hdr_t *hdr = buf->b_hdr;
3706
3707 ASSERT(hdr->b_acb == NULL);
3708
3709 if (zio->io_error == 0) {
3710 hdr->b_dva = *BP_IDENTITY(zio->io_bp);
3711 hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp);
3712 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
3713 } else {
3714 ASSERT(BUF_EMPTY(hdr));
3715 }
3716
3717 /*
3718 * If the block to be written was all-zero, we may have
3719 * compressed it away. In this case no write was performed
3720 * so there will be no dva/birth/checksum. The buffer must
3721 * therefore remain anonymous (and uncached).
3722 */
3723 if (!BUF_EMPTY(hdr)) {
3724 arc_buf_hdr_t *exists;
3725 kmutex_t *hash_lock;
3726
3727 ASSERT(zio->io_error == 0);
3728
3729 arc_cksum_verify(buf);
3730
3731 exists = buf_hash_insert(hdr, &hash_lock);
3732 if (exists) {
3733 /*
3734 * This can only happen if we overwrite for
3735 * sync-to-convergence, because we remove
3736 * buffers from the hash table when we arc_free().
3737 */
3738 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
3739 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
3740 panic("bad overwrite, hdr=%p exists=%p",
3741 (void *)hdr, (void *)exists);
3742 ASSERT(refcount_is_zero(&exists->b_refcnt));
3743 arc_change_state(arc_anon, exists, hash_lock);
3744 mutex_exit(hash_lock);
3745 arc_hdr_destroy(exists);
3746 exists = buf_hash_insert(hdr, &hash_lock);
3747 ASSERT3P(exists, ==, NULL);
3748 } else if (zio->io_flags & ZIO_FLAG_NOPWRITE) {
3749 /* nopwrite */
3750 ASSERT(zio->io_prop.zp_nopwrite);
3751 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
3752 panic("bad nopwrite, hdr=%p exists=%p",
3753 (void *)hdr, (void *)exists);
3754 } else {
3755 /* Dedup */
3756 ASSERT(hdr->b_datacnt == 1);
3757 ASSERT(hdr->b_state == arc_anon);
3758 ASSERT(BP_GET_DEDUP(zio->io_bp));
3759 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
3760 }
3761 }
3762 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3763 /* if it's not anon, we are doing a scrub */
3764 if (!exists && hdr->b_state == arc_anon)
3765 arc_access(hdr, hash_lock);
3766 mutex_exit(hash_lock);
3767 } else {
3768 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3769 }
3770
3771 ASSERT(!refcount_is_zero(&hdr->b_refcnt));
3772 callback->awcb_done(zio, buf, callback->awcb_private);
3773
3774 kmem_free(callback, sizeof (arc_write_callback_t));
3775}
3776
3777zio_t *
3778arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
3779 blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, boolean_t l2arc_compress,
3780 const zio_prop_t *zp, arc_done_func_t *ready, arc_done_func_t *done,
3781 void *private, int priority, int zio_flags, const zbookmark_t *zb)
3782{
3783 arc_buf_hdr_t *hdr = buf->b_hdr;
3784 arc_write_callback_t *callback;
3785 zio_t *zio;
3786
3787 ASSERT(ready != NULL);
3788 ASSERT(done != NULL);
3789 ASSERT(!HDR_IO_ERROR(hdr));
3790 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
3791 ASSERT(hdr->b_acb == NULL);
3792 if (l2arc)
3793 hdr->b_flags |= ARC_L2CACHE;
3794 if (l2arc_compress)
3795 hdr->b_flags |= ARC_L2COMPRESS;
3796 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
3797 callback->awcb_ready = ready;
3798 callback->awcb_done = done;
3799 callback->awcb_private = private;
3800 callback->awcb_buf = buf;
3801
3802 zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp,
3803 arc_write_ready, arc_write_done, callback, priority, zio_flags, zb);
3804
3805 return (zio);
3806}
3807
3808static int
3809arc_memory_throttle(uint64_t reserve, uint64_t inflight_data, uint64_t txg)
3810{
3811#ifdef _KERNEL
3812 uint64_t available_memory =
3813 ptoa((uintmax_t)cnt.v_free_count + cnt.v_cache_count);
3814 static uint64_t page_load = 0;
3815 static uint64_t last_txg = 0;
3816
3817#ifdef sun
3818#if defined(__i386)
3819 available_memory =
3820 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
3821#endif
3822#endif /* sun */
3823 if (available_memory >= zfs_write_limit_max)
3824 return (0);
3825
3826 if (txg > last_txg) {
3827 last_txg = txg;
3828 page_load = 0;
3829 }
3830 /*
3831 * If we are in pageout, we know that memory is already tight,
3832 * the arc is already going to be evicting, so we just want to
3833 * continue to let page writes occur as quickly as possible.
3834 */
3835 if (curproc == pageproc) {
3836 if (page_load > available_memory / 4)
3837 return (SET_ERROR(ERESTART));
3838 /* Note: reserve is inflated, so we deflate */
3839 page_load += reserve / 8;
3840 return (0);
3841 } else if (page_load > 0 && arc_reclaim_needed()) {
3842 /* memory is low, delay before restarting */
3843 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
3844 return (SET_ERROR(EAGAIN));
3845 }
3846 page_load = 0;
3847
3848 if (arc_size > arc_c_min) {
3849 uint64_t evictable_memory =
3850 arc_mru->arcs_lsize[ARC_BUFC_DATA] +
3851 arc_mru->arcs_lsize[ARC_BUFC_METADATA] +
3852 arc_mfu->arcs_lsize[ARC_BUFC_DATA] +
3853 arc_mfu->arcs_lsize[ARC_BUFC_METADATA];
3854 available_memory += MIN(evictable_memory, arc_size - arc_c_min);
3855 }
3856
3857 if (inflight_data > available_memory / 4) {
3858 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
3859 return (SET_ERROR(ERESTART));
3860 }
3861#endif
3862 return (0);
3863}
3864
3865void
3866arc_tempreserve_clear(uint64_t reserve)
3867{
3868 atomic_add_64(&arc_tempreserve, -reserve);
3869 ASSERT((int64_t)arc_tempreserve >= 0);
3870}
3871
3872int
3873arc_tempreserve_space(uint64_t reserve, uint64_t txg)
3874{
3875 int error;
3876 uint64_t anon_size;
3877
3878#ifdef ZFS_DEBUG
3879 /*
3880 * Once in a while, fail for no reason. Everything should cope.
3881 */
3882 if (spa_get_random(10000) == 0) {
3883 dprintf("forcing random failure\n");
3884 return (SET_ERROR(ERESTART));
3885 }
3886#endif
3887 if (reserve > arc_c/4 && !arc_no_grow)
3888 arc_c = MIN(arc_c_max, reserve * 4);
3889 if (reserve > arc_c)
3890 return (SET_ERROR(ENOMEM));
3891
3892 /*
3893 * Don't count loaned bufs as in flight dirty data to prevent long
3894 * network delays from blocking transactions that are ready to be
3895 * assigned to a txg.
3896 */
3897 anon_size = MAX((int64_t)(arc_anon->arcs_size - arc_loaned_bytes), 0);
3898
3899 /*
3900 * Writes will, almost always, require additional memory allocations
3901 * in order to compress/encrypt/etc the data. We therefore need to
3902 * make sure that there is sufficient available memory for this.
3903 */
3904 if (error = arc_memory_throttle(reserve, anon_size, txg))
3905 return (error);
3906
3907 /*
3908 * Throttle writes when the amount of dirty data in the cache
3909 * gets too large. We try to keep the cache less than half full
3910 * of dirty blocks so that our sync times don't grow too large.
3911 * Note: if two requests come in concurrently, we might let them
3912 * both succeed, when one of them should fail. Not a huge deal.
3913 */
3914
3915 if (reserve + arc_tempreserve + anon_size > arc_c / 2 &&
3916 anon_size > arc_c / 4) {
3917 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
3918 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
3919 arc_tempreserve>>10,
3920 arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10,
3921 arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10,
3922 reserve>>10, arc_c>>10);
3923 return (SET_ERROR(ERESTART));
3924 }
3925 atomic_add_64(&arc_tempreserve, reserve);
3926 return (0);
3927}
3928
3929static kmutex_t arc_lowmem_lock;
3930#ifdef _KERNEL
3931static eventhandler_tag arc_event_lowmem = NULL;
3932
3933static void
3934arc_lowmem(void *arg __unused, int howto __unused)
3935{
3936
3937 /* Serialize access via arc_lowmem_lock. */
3938 mutex_enter(&arc_lowmem_lock);
3939 mutex_enter(&arc_reclaim_thr_lock);
3940 needfree = 1;
3941 cv_signal(&arc_reclaim_thr_cv);
3942
3943 /*
3944 * It is unsafe to block here in arbitrary threads, because we can come
3945 * here from ARC itself and may hold ARC locks and thus risk a deadlock
3946 * with ARC reclaim thread.
3947 */
3948 if (curproc == pageproc) {
3949 while (needfree)
3950 msleep(&needfree, &arc_reclaim_thr_lock, 0, "zfs:lowmem", 0);
3951 }
3952 mutex_exit(&arc_reclaim_thr_lock);
3953 mutex_exit(&arc_lowmem_lock);
3954}
3955#endif
3956
3957void
3958arc_init(void)
3959{
3960 int i, prefetch_tunable_set = 0;
3961
3962 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
3963 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
3964 mutex_init(&arc_lowmem_lock, NULL, MUTEX_DEFAULT, NULL);
3965
3966 /* Convert seconds to clock ticks */
3967 arc_min_prefetch_lifespan = 1 * hz;
3968
3969 /* Start out with 1/8 of all memory */
3970 arc_c = kmem_size() / 8;
3971
3972#ifdef sun
3973#ifdef _KERNEL
3974 /*
3975 * On architectures where the physical memory can be larger
3976 * than the addressable space (intel in 32-bit mode), we may
3977 * need to limit the cache to 1/8 of VM size.
3978 */
3979 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
3980#endif
3981#endif /* sun */
3982 /* set min cache to 1/32 of all memory, or 16MB, whichever is more */
3983 arc_c_min = MAX(arc_c / 4, 64<<18);
3984 /* set max to 1/2 of all memory, or all but 1GB, whichever is more */
3985 if (arc_c * 8 >= 1<<30)
3986 arc_c_max = (arc_c * 8) - (1<<30);
3987 else
3988 arc_c_max = arc_c_min;
3989 arc_c_max = MAX(arc_c * 5, arc_c_max);
3990
3991#ifdef _KERNEL
3992 /*
3993 * Allow the tunables to override our calculations if they are
3994 * reasonable (ie. over 16MB)
3995 */
3996 if (zfs_arc_max > 64<<18 && zfs_arc_max < kmem_size())
3997 arc_c_max = zfs_arc_max;
3998 if (zfs_arc_min > 64<<18 && zfs_arc_min <= arc_c_max)
3999 arc_c_min = zfs_arc_min;
4000#endif
4001
4002 arc_c = arc_c_max;
4003 arc_p = (arc_c >> 1);
4004
4005 /* limit meta-data to 1/4 of the arc capacity */
4006 arc_meta_limit = arc_c_max / 4;
4007
4008 /* Allow the tunable to override if it is reasonable */
4009 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max)
4010 arc_meta_limit = zfs_arc_meta_limit;
4011
4012 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0)
4013 arc_c_min = arc_meta_limit / 2;
4014
4015 if (zfs_arc_grow_retry > 0)
4016 arc_grow_retry = zfs_arc_grow_retry;
4017
4018 if (zfs_arc_shrink_shift > 0)
4019 arc_shrink_shift = zfs_arc_shrink_shift;
4020
4021 if (zfs_arc_p_min_shift > 0)
4022 arc_p_min_shift = zfs_arc_p_min_shift;
4023
4024 /* if kmem_flags are set, lets try to use less memory */
4025 if (kmem_debugging())
4026 arc_c = arc_c / 2;
4027 if (arc_c < arc_c_min)
4028 arc_c = arc_c_min;
4029
4030 zfs_arc_min = arc_c_min;
4031 zfs_arc_max = arc_c_max;
4032
4033 arc_anon = &ARC_anon;
4034 arc_mru = &ARC_mru;
4035 arc_mru_ghost = &ARC_mru_ghost;
4036 arc_mfu = &ARC_mfu;
4037 arc_mfu_ghost = &ARC_mfu_ghost;
4038 arc_l2c_only = &ARC_l2c_only;
4039 arc_size = 0;
4040
4041 for (i = 0; i < ARC_BUFC_NUMLISTS; i++) {
4042 mutex_init(&arc_anon->arcs_locks[i].arcs_lock,
4043 NULL, MUTEX_DEFAULT, NULL);
4044 mutex_init(&arc_mru->arcs_locks[i].arcs_lock,
4045 NULL, MUTEX_DEFAULT, NULL);
4046 mutex_init(&arc_mru_ghost->arcs_locks[i].arcs_lock,
4047 NULL, MUTEX_DEFAULT, NULL);
4048 mutex_init(&arc_mfu->arcs_locks[i].arcs_lock,
4049 NULL, MUTEX_DEFAULT, NULL);
4050 mutex_init(&arc_mfu_ghost->arcs_locks[i].arcs_lock,
4051 NULL, MUTEX_DEFAULT, NULL);
4052 mutex_init(&arc_l2c_only->arcs_locks[i].arcs_lock,
4053 NULL, MUTEX_DEFAULT, NULL);
4054
4055 list_create(&arc_mru->arcs_lists[i],
4056 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4057 list_create(&arc_mru_ghost->arcs_lists[i],
4058 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4059 list_create(&arc_mfu->arcs_lists[i],
4060 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4061 list_create(&arc_mfu_ghost->arcs_lists[i],
4062 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4063 list_create(&arc_mfu_ghost->arcs_lists[i],
4064 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4065 list_create(&arc_l2c_only->arcs_lists[i],
4066 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4067 }
4068
4069 buf_init();
4070
4071 arc_thread_exit = 0;
4072 arc_eviction_list = NULL;
4073 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
4074 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t));
4075
4076 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
4077 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
4078
4079 if (arc_ksp != NULL) {
4080 arc_ksp->ks_data = &arc_stats;
4081 kstat_install(arc_ksp);
4082 }
4083
4084 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
4085 TS_RUN, minclsyspri);
4086
4087#ifdef _KERNEL
4088 arc_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem, arc_lowmem, NULL,
4089 EVENTHANDLER_PRI_FIRST);
4090#endif
4091
4092 arc_dead = FALSE;
4093 arc_warm = B_FALSE;
4094
4095 if (zfs_write_limit_max == 0)
4096 zfs_write_limit_max = ptob(physmem) >> zfs_write_limit_shift;
4097 else
4098 zfs_write_limit_shift = 0;
4099 mutex_init(&zfs_write_limit_lock, NULL, MUTEX_DEFAULT, NULL);
4100
4101#ifdef _KERNEL
4102 if (TUNABLE_INT_FETCH("vfs.zfs.prefetch_disable", &zfs_prefetch_disable))
4103 prefetch_tunable_set = 1;
4104
4105#ifdef __i386__
4106 if (prefetch_tunable_set == 0) {
4107 printf("ZFS NOTICE: Prefetch is disabled by default on i386 "
4108 "-- to enable,\n");
4109 printf(" add \"vfs.zfs.prefetch_disable=0\" "
4110 "to /boot/loader.conf.\n");
4111 zfs_prefetch_disable = 1;
4112 }
4113#else
4114 if ((((uint64_t)physmem * PAGESIZE) < (1ULL << 32)) &&
4115 prefetch_tunable_set == 0) {
4116 printf("ZFS NOTICE: Prefetch is disabled by default if less "
4117 "than 4GB of RAM is present;\n"
4118 " to enable, add \"vfs.zfs.prefetch_disable=0\" "
4119 "to /boot/loader.conf.\n");
4120 zfs_prefetch_disable = 1;
4121 }
4122#endif
4123 /* Warn about ZFS memory and address space requirements. */
4124 if (((uint64_t)physmem * PAGESIZE) < (256 + 128 + 64) * (1 << 20)) {
4125 printf("ZFS WARNING: Recommended minimum RAM size is 512MB; "
4126 "expect unstable behavior.\n");
4127 }
4128 if (kmem_size() < 512 * (1 << 20)) {
4129 printf("ZFS WARNING: Recommended minimum kmem_size is 512MB; "
4130 "expect unstable behavior.\n");
4131 printf(" Consider tuning vm.kmem_size and "
4132 "vm.kmem_size_max\n");
4133 printf(" in /boot/loader.conf.\n");
4134 }
4135#endif
4136}
4137
4138void
4139arc_fini(void)
4140{
4141 int i;
4142
4143 mutex_enter(&arc_reclaim_thr_lock);
4144 arc_thread_exit = 1;
4145 cv_signal(&arc_reclaim_thr_cv);
4146 while (arc_thread_exit != 0)
4147 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
4148 mutex_exit(&arc_reclaim_thr_lock);
4149
4150 arc_flush(NULL);
4151
4152 arc_dead = TRUE;
4153
4154 if (arc_ksp != NULL) {
4155 kstat_delete(arc_ksp);
4156 arc_ksp = NULL;
4157 }
4158
4159 mutex_destroy(&arc_eviction_mtx);
4160 mutex_destroy(&arc_reclaim_thr_lock);
4161 cv_destroy(&arc_reclaim_thr_cv);
4162
4163 for (i = 0; i < ARC_BUFC_NUMLISTS; i++) {
4164 list_destroy(&arc_mru->arcs_lists[i]);
4165 list_destroy(&arc_mru_ghost->arcs_lists[i]);
4166 list_destroy(&arc_mfu->arcs_lists[i]);
4167 list_destroy(&arc_mfu_ghost->arcs_lists[i]);
4168 list_destroy(&arc_l2c_only->arcs_lists[i]);
4169
4170 mutex_destroy(&arc_anon->arcs_locks[i].arcs_lock);
4171 mutex_destroy(&arc_mru->arcs_locks[i].arcs_lock);
4172 mutex_destroy(&arc_mru_ghost->arcs_locks[i].arcs_lock);
4173 mutex_destroy(&arc_mfu->arcs_locks[i].arcs_lock);
4174 mutex_destroy(&arc_mfu_ghost->arcs_locks[i].arcs_lock);
4175 mutex_destroy(&arc_l2c_only->arcs_locks[i].arcs_lock);
4176 }
4177
4178 mutex_destroy(&zfs_write_limit_lock);
4179
4180 buf_fini();
4181
4182 ASSERT(arc_loaned_bytes == 0);
4183
4184 mutex_destroy(&arc_lowmem_lock);
4185#ifdef _KERNEL
4186 if (arc_event_lowmem != NULL)
4187 EVENTHANDLER_DEREGISTER(vm_lowmem, arc_event_lowmem);
4188#endif
4189}
4190
4191/*
4192 * Level 2 ARC
4193 *
4194 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
4195 * It uses dedicated storage devices to hold cached data, which are populated
4196 * using large infrequent writes. The main role of this cache is to boost
4197 * the performance of random read workloads. The intended L2ARC devices
4198 * include short-stroked disks, solid state disks, and other media with
4199 * substantially faster read latency than disk.
4200 *
4201 * +-----------------------+
4202 * | ARC |
4203 * +-----------------------+
4204 * | ^ ^
4205 * | | |
4206 * l2arc_feed_thread() arc_read()
4207 * | | |
4208 * | l2arc read |
4209 * V | |
4210 * +---------------+ |
4211 * | L2ARC | |
4212 * +---------------+ |
4213 * | ^ |
4214 * l2arc_write() | |
4215 * | | |
4216 * V | |
4217 * +-------+ +-------+
4218 * | vdev | | vdev |
4219 * | cache | | cache |
4220 * +-------+ +-------+
4221 * +=========+ .-----.
4222 * : L2ARC : |-_____-|
4223 * : devices : | Disks |
4224 * +=========+ `-_____-'
4225 *
4226 * Read requests are satisfied from the following sources, in order:
4227 *
4228 * 1) ARC
4229 * 2) vdev cache of L2ARC devices
4230 * 3) L2ARC devices
4231 * 4) vdev cache of disks
4232 * 5) disks
4233 *
4234 * Some L2ARC device types exhibit extremely slow write performance.
4235 * To accommodate for this there are some significant differences between
4236 * the L2ARC and traditional cache design:
4237 *
4238 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from
4239 * the ARC behave as usual, freeing buffers and placing headers on ghost
4240 * lists. The ARC does not send buffers to the L2ARC during eviction as
4241 * this would add inflated write latencies for all ARC memory pressure.
4242 *
4243 * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
4244 * It does this by periodically scanning buffers from the eviction-end of
4245 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
4246 * not already there. It scans until a headroom of buffers is satisfied,
4247 * which itself is a buffer for ARC eviction. If a compressible buffer is
4248 * found during scanning and selected for writing to an L2ARC device, we
4249 * temporarily boost scanning headroom during the next scan cycle to make
4250 * sure we adapt to compression effects (which might significantly reduce
4251 * the data volume we write to L2ARC). The thread that does this is
4252 * l2arc_feed_thread(), illustrated below; example sizes are included to
4253 * provide a better sense of ratio than this diagram:
4254 *
4255 * head --> tail
4256 * +---------------------+----------+
4257 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
4258 * +---------------------+----------+ | o L2ARC eligible
4259 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
4260 * +---------------------+----------+ |
4261 * 15.9 Gbytes ^ 32 Mbytes |
4262 * headroom |
4263 * l2arc_feed_thread()
4264 * |
4265 * l2arc write hand <--[oooo]--'
4266 * | 8 Mbyte
4267 * | write max
4268 * V
4269 * +==============================+
4270 * L2ARC dev |####|#|###|###| |####| ... |
4271 * +==============================+
4272 * 32 Gbytes
4273 *
4274 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
4275 * evicted, then the L2ARC has cached a buffer much sooner than it probably
4276 * needed to, potentially wasting L2ARC device bandwidth and storage. It is
4277 * safe to say that this is an uncommon case, since buffers at the end of
4278 * the ARC lists have moved there due to inactivity.
4279 *
4280 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
4281 * then the L2ARC simply misses copying some buffers. This serves as a
4282 * pressure valve to prevent heavy read workloads from both stalling the ARC
4283 * with waits and clogging the L2ARC with writes. This also helps prevent
4284 * the potential for the L2ARC to churn if it attempts to cache content too
4285 * quickly, such as during backups of the entire pool.
4286 *
4287 * 5. After system boot and before the ARC has filled main memory, there are
4288 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
4289 * lists can remain mostly static. Instead of searching from tail of these
4290 * lists as pictured, the l2arc_feed_thread() will search from the list heads
4291 * for eligible buffers, greatly increasing its chance of finding them.
4292 *
4293 * The L2ARC device write speed is also boosted during this time so that
4294 * the L2ARC warms up faster. Since there have been no ARC evictions yet,
4295 * there are no L2ARC reads, and no fear of degrading read performance
4296 * through increased writes.
4297 *
4298 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
4299 * the vdev queue can aggregate them into larger and fewer writes. Each
4300 * device is written to in a rotor fashion, sweeping writes through
4301 * available space then repeating.
4302 *
4303 * 7. The L2ARC does not store dirty content. It never needs to flush
4304 * write buffers back to disk based storage.
4305 *
4306 * 8. If an ARC buffer is written (and dirtied) which also exists in the
4307 * L2ARC, the now stale L2ARC buffer is immediately dropped.
4308 *
4309 * The performance of the L2ARC can be tweaked by a number of tunables, which
4310 * may be necessary for different workloads:
4311 *
4312 * l2arc_write_max max write bytes per interval
4313 * l2arc_write_boost extra write bytes during device warmup
4314 * l2arc_noprefetch skip caching prefetched buffers
4315 * l2arc_headroom number of max device writes to precache
4316 * l2arc_headroom_boost when we find compressed buffers during ARC
4317 * scanning, we multiply headroom by this
4318 * percentage factor for the next scan cycle,
4319 * since more compressed buffers are likely to
4320 * be present
4321 * l2arc_feed_secs seconds between L2ARC writing
4322 *
4323 * Tunables may be removed or added as future performance improvements are
4324 * integrated, and also may become zpool properties.
4325 *
4326 * There are three key functions that control how the L2ARC warms up:
4327 *
4328 * l2arc_write_eligible() check if a buffer is eligible to cache
4329 * l2arc_write_size() calculate how much to write
4330 * l2arc_write_interval() calculate sleep delay between writes
4331 *
4332 * These three functions determine what to write, how much, and how quickly
4333 * to send writes.
4334 */
4335
4336static boolean_t
4337l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab)
4338{
4339 /*
4340 * A buffer is *not* eligible for the L2ARC if it:
4341 * 1. belongs to a different spa.
4342 * 2. is already cached on the L2ARC.
4343 * 3. has an I/O in progress (it may be an incomplete read).
4344 * 4. is flagged not eligible (zfs property).
4345 */
4346 if (ab->b_spa != spa_guid) {
4347 ARCSTAT_BUMP(arcstat_l2_write_spa_mismatch);
4348 return (B_FALSE);
4349 }
4350 if (ab->b_l2hdr != NULL) {
4351 ARCSTAT_BUMP(arcstat_l2_write_in_l2);
4352 return (B_FALSE);
4353 }
4354 if (HDR_IO_IN_PROGRESS(ab)) {
4355 ARCSTAT_BUMP(arcstat_l2_write_hdr_io_in_progress);
4356 return (B_FALSE);
4357 }
4358 if (!HDR_L2CACHE(ab)) {
4359 ARCSTAT_BUMP(arcstat_l2_write_not_cacheable);
4360 return (B_FALSE);
4361 }
4362
4363 return (B_TRUE);
4364}
4365
4366static uint64_t
4367l2arc_write_size(void)
4368{
4369 uint64_t size;
4370
4371 /*
4372 * Make sure our globals have meaningful values in case the user
4373 * altered them.
4374 */
4375 size = l2arc_write_max;
4376 if (size == 0) {
4377 cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must "
4378 "be greater than zero, resetting it to the default (%d)",
4379 L2ARC_WRITE_SIZE);
4380 size = l2arc_write_max = L2ARC_WRITE_SIZE;
4381 }
4382
4383 if (arc_warm == B_FALSE)
4384 size += l2arc_write_boost;
4385
4386 return (size);
4387
4388}
4389
4390static clock_t
4391l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
4392{
4393 clock_t interval, next, now;
4394
4395 /*
4396 * If the ARC lists are busy, increase our write rate; if the
4397 * lists are stale, idle back. This is achieved by checking
4398 * how much we previously wrote - if it was more than half of
4399 * what we wanted, schedule the next write much sooner.
4400 */
4401 if (l2arc_feed_again && wrote > (wanted / 2))
4402 interval = (hz * l2arc_feed_min_ms) / 1000;
4403 else
4404 interval = hz * l2arc_feed_secs;
4405
4406 now = ddi_get_lbolt();
4407 next = MAX(now, MIN(now + interval, began + interval));
4408
4409 return (next);
4410}
4411
4412static void
4413l2arc_hdr_stat_add(void)
4414{
4415 ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE);
4416 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE);
4417}
4418
4419static void
4420l2arc_hdr_stat_remove(void)
4421{
4422 ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE));
4423 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE);
4424}
4425
4426/*
4427 * Cycle through L2ARC devices. This is how L2ARC load balances.
4428 * If a device is returned, this also returns holding the spa config lock.
4429 */
4430static l2arc_dev_t *
4431l2arc_dev_get_next(void)
4432{
4433 l2arc_dev_t *first, *next = NULL;
4434
4435 /*
4436 * Lock out the removal of spas (spa_namespace_lock), then removal
4437 * of cache devices (l2arc_dev_mtx). Once a device has been selected,
4438 * both locks will be dropped and a spa config lock held instead.
4439 */
4440 mutex_enter(&spa_namespace_lock);
4441 mutex_enter(&l2arc_dev_mtx);
4442
4443 /* if there are no vdevs, there is nothing to do */
4444 if (l2arc_ndev == 0)
4445 goto out;
4446
4447 first = NULL;
4448 next = l2arc_dev_last;
4449 do {
4450 /* loop around the list looking for a non-faulted vdev */
4451 if (next == NULL) {
4452 next = list_head(l2arc_dev_list);
4453 } else {
4454 next = list_next(l2arc_dev_list, next);
4455 if (next == NULL)
4456 next = list_head(l2arc_dev_list);
4457 }
4458
4459 /* if we have come back to the start, bail out */
4460 if (first == NULL)
4461 first = next;
4462 else if (next == first)
4463 break;
4464
4465 } while (vdev_is_dead(next->l2ad_vdev));
4466
4467 /* if we were unable to find any usable vdevs, return NULL */
4468 if (vdev_is_dead(next->l2ad_vdev))
4469 next = NULL;
4470
4471 l2arc_dev_last = next;
4472
4473out:
4474 mutex_exit(&l2arc_dev_mtx);
4475
4476 /*
4477 * Grab the config lock to prevent the 'next' device from being
4478 * removed while we are writing to it.
4479 */
4480 if (next != NULL)
4481 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
4482 mutex_exit(&spa_namespace_lock);
4483
4484 return (next);
4485}
4486
4487/*
4488 * Free buffers that were tagged for destruction.
4489 */
4490static void
4491l2arc_do_free_on_write()
4492{
4493 list_t *buflist;
4494 l2arc_data_free_t *df, *df_prev;
4495
4496 mutex_enter(&l2arc_free_on_write_mtx);
4497 buflist = l2arc_free_on_write;
4498
4499 for (df = list_tail(buflist); df; df = df_prev) {
4500 df_prev = list_prev(buflist, df);
4501 ASSERT(df->l2df_data != NULL);
4502 ASSERT(df->l2df_func != NULL);
4503 df->l2df_func(df->l2df_data, df->l2df_size);
4504 list_remove(buflist, df);
4505 kmem_free(df, sizeof (l2arc_data_free_t));
4506 }
4507
4508 mutex_exit(&l2arc_free_on_write_mtx);
4509}
4510
4511/*
4512 * A write to a cache device has completed. Update all headers to allow
4513 * reads from these buffers to begin.
4514 */
4515static void
4516l2arc_write_done(zio_t *zio)
4517{
4518 l2arc_write_callback_t *cb;
4519 l2arc_dev_t *dev;
4520 list_t *buflist;
4521 arc_buf_hdr_t *head, *ab, *ab_prev;
4522 l2arc_buf_hdr_t *abl2;
4523 kmutex_t *hash_lock;
4524
4525 cb = zio->io_private;
4526 ASSERT(cb != NULL);
4527 dev = cb->l2wcb_dev;
4528 ASSERT(dev != NULL);
4529 head = cb->l2wcb_head;
4530 ASSERT(head != NULL);
4531 buflist = dev->l2ad_buflist;
4532 ASSERT(buflist != NULL);
4533 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
4534 l2arc_write_callback_t *, cb);
4535
4536 if (zio->io_error != 0)
4537 ARCSTAT_BUMP(arcstat_l2_writes_error);
4538
4539 mutex_enter(&l2arc_buflist_mtx);
4540
4541 /*
4542 * All writes completed, or an error was hit.
4543 */
4544 for (ab = list_prev(buflist, head); ab; ab = ab_prev) {
4545 ab_prev = list_prev(buflist, ab);
4546
4547 hash_lock = HDR_LOCK(ab);
4548 if (!mutex_tryenter(hash_lock)) {
4549 /*
4550 * This buffer misses out. It may be in a stage
4551 * of eviction. Its ARC_L2_WRITING flag will be
4552 * left set, denying reads to this buffer.
4553 */
4554 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss);
4555 continue;
4556 }
4557
4558 abl2 = ab->b_l2hdr;
4559
4560 /*
4561 * Release the temporary compressed buffer as soon as possible.
4562 */
4563 if (abl2->b_compress != ZIO_COMPRESS_OFF)
4564 l2arc_release_cdata_buf(ab);
4565
4566 if (zio->io_error != 0) {
4567 /*
4568 * Error - drop L2ARC entry.
4569 */
4570 list_remove(buflist, ab);
4571 ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize);
4572 ab->b_l2hdr = NULL;
4573 trim_map_free(abl2->b_dev->l2ad_vdev, abl2->b_daddr,
4574 ab->b_size, 0);
4575 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
4576 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4577 }
4578
4579 /*
4580 * Allow ARC to begin reads to this L2ARC entry.
4581 */
4582 ab->b_flags &= ~ARC_L2_WRITING;
4583
4584 mutex_exit(hash_lock);
4585 }
4586
4587 atomic_inc_64(&l2arc_writes_done);
4588 list_remove(buflist, head);
4589 kmem_cache_free(hdr_cache, head);
4590 mutex_exit(&l2arc_buflist_mtx);
4591
4592 l2arc_do_free_on_write();
4593
4594 kmem_free(cb, sizeof (l2arc_write_callback_t));
4595}
4596
4597/*
4598 * A read to a cache device completed. Validate buffer contents before
4599 * handing over to the regular ARC routines.
4600 */
4601static void
4602l2arc_read_done(zio_t *zio)
4603{
4604 l2arc_read_callback_t *cb;
4605 arc_buf_hdr_t *hdr;
4606 arc_buf_t *buf;
4607 kmutex_t *hash_lock;
4608 int equal;
4609
4610 ASSERT(zio->io_vd != NULL);
4611 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
4612
4613 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
4614
4615 cb = zio->io_private;
4616 ASSERT(cb != NULL);
4617 buf = cb->l2rcb_buf;
4618 ASSERT(buf != NULL);
4619
4620 hash_lock = HDR_LOCK(buf->b_hdr);
4621 mutex_enter(hash_lock);
4622 hdr = buf->b_hdr;
4623 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
4624
4625 /*
4626 * If the buffer was compressed, decompress it first.
4627 */
4628 if (cb->l2rcb_compress != ZIO_COMPRESS_OFF)
4629 l2arc_decompress_zio(zio, hdr, cb->l2rcb_compress);
4630 ASSERT(zio->io_data != NULL);
4631
4632 /*
4633 * Check this survived the L2ARC journey.
4634 */
4635 equal = arc_cksum_equal(buf);
4636 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) {
4637 mutex_exit(hash_lock);
4638 zio->io_private = buf;
4639 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */
4640 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */
4641 arc_read_done(zio);
4642 } else {
4643 mutex_exit(hash_lock);
4644 /*
4645 * Buffer didn't survive caching. Increment stats and
4646 * reissue to the original storage device.
4647 */
4648 if (zio->io_error != 0) {
4649 ARCSTAT_BUMP(arcstat_l2_io_error);
4650 } else {
4651 zio->io_error = SET_ERROR(EIO);
4652 }
4653 if (!equal)
4654 ARCSTAT_BUMP(arcstat_l2_cksum_bad);
4655
4656 /*
4657 * If there's no waiter, issue an async i/o to the primary
4658 * storage now. If there *is* a waiter, the caller must
4659 * issue the i/o in a context where it's OK to block.
4660 */
4661 if (zio->io_waiter == NULL) {
4662 zio_t *pio = zio_unique_parent(zio);
4663
4664 ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
4665
4666 zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp,
4667 buf->b_data, zio->io_size, arc_read_done, buf,
4668 zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb));
4669 }
4670 }
4671
4672 kmem_free(cb, sizeof (l2arc_read_callback_t));
4673}
4674
4675/*
4676 * This is the list priority from which the L2ARC will search for pages to
4677 * cache. This is used within loops (0..3) to cycle through lists in the
4678 * desired order. This order can have a significant effect on cache
4679 * performance.
4680 *
4681 * Currently the metadata lists are hit first, MFU then MRU, followed by
4682 * the data lists. This function returns a locked list, and also returns
4683 * the lock pointer.
4684 */
4685static list_t *
4686l2arc_list_locked(int list_num, kmutex_t **lock)
4687{
4688 list_t *list = NULL;
4689 int idx;
4690
4691 ASSERT(list_num >= 0 && list_num < 2 * ARC_BUFC_NUMLISTS);
4692
4693 if (list_num < ARC_BUFC_NUMMETADATALISTS) {
4694 idx = list_num;
4695 list = &arc_mfu->arcs_lists[idx];
4696 *lock = ARCS_LOCK(arc_mfu, idx);
4697 } else if (list_num < ARC_BUFC_NUMMETADATALISTS * 2) {
4698 idx = list_num - ARC_BUFC_NUMMETADATALISTS;
4699 list = &arc_mru->arcs_lists[idx];
4700 *lock = ARCS_LOCK(arc_mru, idx);
4701 } else if (list_num < (ARC_BUFC_NUMMETADATALISTS * 2 +
4702 ARC_BUFC_NUMDATALISTS)) {
4703 idx = list_num - ARC_BUFC_NUMMETADATALISTS;
4704 list = &arc_mfu->arcs_lists[idx];
4705 *lock = ARCS_LOCK(arc_mfu, idx);
4706 } else {
4707 idx = list_num - ARC_BUFC_NUMLISTS;
4708 list = &arc_mru->arcs_lists[idx];
4709 *lock = ARCS_LOCK(arc_mru, idx);
4710 }
4711
4712 ASSERT(!(MUTEX_HELD(*lock)));
4713 mutex_enter(*lock);
4714 return (list);
4715}
4716
4717/*
4718 * Evict buffers from the device write hand to the distance specified in
4719 * bytes. This distance may span populated buffers, it may span nothing.
4720 * This is clearing a region on the L2ARC device ready for writing.
4721 * If the 'all' boolean is set, every buffer is evicted.
4722 */
4723static void
4724l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
4725{
4726 list_t *buflist;
4727 l2arc_buf_hdr_t *abl2;
4728 arc_buf_hdr_t *ab, *ab_prev;
4729 kmutex_t *hash_lock;
4730 uint64_t taddr;
4731
4732 buflist = dev->l2ad_buflist;
4733
4734 if (buflist == NULL)
4735 return;
4736
4737 if (!all && dev->l2ad_first) {
4738 /*
4739 * This is the first sweep through the device. There is
4740 * nothing to evict.
4741 */
4742 return;
4743 }
4744
4745 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) {
4746 /*
4747 * When nearing the end of the device, evict to the end
4748 * before the device write hand jumps to the start.
4749 */
4750 taddr = dev->l2ad_end;
4751 } else {
4752 taddr = dev->l2ad_hand + distance;
4753 }
4754 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
4755 uint64_t, taddr, boolean_t, all);
4756
4757top:
4758 mutex_enter(&l2arc_buflist_mtx);
4759 for (ab = list_tail(buflist); ab; ab = ab_prev) {
4760 ab_prev = list_prev(buflist, ab);
4761
4762 hash_lock = HDR_LOCK(ab);
4763 if (!mutex_tryenter(hash_lock)) {
4764 /*
4765 * Missed the hash lock. Retry.
4766 */
4767 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
4768 mutex_exit(&l2arc_buflist_mtx);
4769 mutex_enter(hash_lock);
4770 mutex_exit(hash_lock);
4771 goto top;
4772 }
4773
4774 if (HDR_L2_WRITE_HEAD(ab)) {
4775 /*
4776 * We hit a write head node. Leave it for
4777 * l2arc_write_done().
4778 */
4779 list_remove(buflist, ab);
4780 mutex_exit(hash_lock);
4781 continue;
4782 }
4783
4784 if (!all && ab->b_l2hdr != NULL &&
4785 (ab->b_l2hdr->b_daddr > taddr ||
4786 ab->b_l2hdr->b_daddr < dev->l2ad_hand)) {
4787 /*
4788 * We've evicted to the target address,
4789 * or the end of the device.
4790 */
4791 mutex_exit(hash_lock);
4792 break;
4793 }
4794
4795 if (HDR_FREE_IN_PROGRESS(ab)) {
4796 /*
4797 * Already on the path to destruction.
4798 */
4799 mutex_exit(hash_lock);
4800 continue;
4801 }
4802
4803 if (ab->b_state == arc_l2c_only) {
4804 ASSERT(!HDR_L2_READING(ab));
4805 /*
4806 * This doesn't exist in the ARC. Destroy.
4807 * arc_hdr_destroy() will call list_remove()
4808 * and decrement arcstat_l2_size.
4809 */
4810 arc_change_state(arc_anon, ab, hash_lock);
4811 arc_hdr_destroy(ab);
4812 } else {
4813 /*
4814 * Invalidate issued or about to be issued
4815 * reads, since we may be about to write
4816 * over this location.
4817 */
4818 if (HDR_L2_READING(ab)) {
4819 ARCSTAT_BUMP(arcstat_l2_evict_reading);
4820 ab->b_flags |= ARC_L2_EVICTED;
4821 }
4822
4823 /*
4824 * Tell ARC this no longer exists in L2ARC.
4825 */
4826 if (ab->b_l2hdr != NULL) {
4827 abl2 = ab->b_l2hdr;
4828 ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize);
4829 ab->b_l2hdr = NULL;
4830 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
4831 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4832 }
4833 list_remove(buflist, ab);
4834
4835 /*
4836 * This may have been leftover after a
4837 * failed write.
4838 */
4839 ab->b_flags &= ~ARC_L2_WRITING;
4840 }
4841 mutex_exit(hash_lock);
4842 }
4843 mutex_exit(&l2arc_buflist_mtx);
4844
4845 vdev_space_update(dev->l2ad_vdev, -(taddr - dev->l2ad_evict), 0, 0);
4846 dev->l2ad_evict = taddr;
4847}
4848
4849/*
4850 * Find and write ARC buffers to the L2ARC device.
4851 *
4852 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid
4853 * for reading until they have completed writing.
4854 * The headroom_boost is an in-out parameter used to maintain headroom boost
4855 * state between calls to this function.
4856 *
4857 * Returns the number of bytes actually written (which may be smaller than
4858 * the delta by which the device hand has changed due to alignment).
4859 */
4860static uint64_t
4861l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
4862 boolean_t *headroom_boost)
4863{
4864 arc_buf_hdr_t *ab, *ab_prev, *head;
4865 list_t *list;
4866 uint64_t write_asize, write_psize, write_sz, headroom,
4867 buf_compress_minsz;
4868 void *buf_data;
4869 kmutex_t *list_lock;
4870 boolean_t full;
4871 l2arc_write_callback_t *cb;
4872 zio_t *pio, *wzio;
4873 uint64_t guid = spa_load_guid(spa);
4874 const boolean_t do_headroom_boost = *headroom_boost;
4875 int try;
4876
4877 ASSERT(dev->l2ad_vdev != NULL);
4878
4879 /* Lower the flag now, we might want to raise it again later. */
4880 *headroom_boost = B_FALSE;
4881
4882 pio = NULL;
4883 write_sz = write_asize = write_psize = 0;
4884 full = B_FALSE;
4885 head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
4886 head->b_flags |= ARC_L2_WRITE_HEAD;
4887
4888 ARCSTAT_BUMP(arcstat_l2_write_buffer_iter);
4889 /*
4890 * We will want to try to compress buffers that are at least 2x the
4891 * device sector size.
4892 */
4893 buf_compress_minsz = 2 << dev->l2ad_vdev->vdev_ashift;
4894
4895 /*
4896 * Copy buffers for L2ARC writing.
4897 */
4898 mutex_enter(&l2arc_buflist_mtx);
4899 for (try = 0; try < 2 * ARC_BUFC_NUMLISTS; try++) {
4900 uint64_t passed_sz = 0;
4901
4902 list = l2arc_list_locked(try, &list_lock);
4903 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_iter);
4904
4905 /*
4906 * L2ARC fast warmup.
4907 *
4908 * Until the ARC is warm and starts to evict, read from the
4909 * head of the ARC lists rather than the tail.
4910 */
4911 if (arc_warm == B_FALSE)
4912 ab = list_head(list);
4913 else
4914 ab = list_tail(list);
4915 if (ab == NULL)
4916 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_null_iter);
4917
4918 headroom = target_sz * l2arc_headroom;
4919 if (do_headroom_boost)
4920 headroom = (headroom * l2arc_headroom_boost) / 100;
4921
4922 for (; ab; ab = ab_prev) {
4923 l2arc_buf_hdr_t *l2hdr;
4924 kmutex_t *hash_lock;
4925 uint64_t buf_sz;
4926
4927 if (arc_warm == B_FALSE)
4928 ab_prev = list_next(list, ab);
4929 else
4930 ab_prev = list_prev(list, ab);
4931 ARCSTAT_INCR(arcstat_l2_write_buffer_bytes_scanned, ab->b_size);
4932
4933 hash_lock = HDR_LOCK(ab);
4934 if (!mutex_tryenter(hash_lock)) {
4935 ARCSTAT_BUMP(arcstat_l2_write_trylock_fail);
4936 /*
4937 * Skip this buffer rather than waiting.
4938 */
4939 continue;
4940 }
4941
4942 passed_sz += ab->b_size;
4943 if (passed_sz > headroom) {
4944 /*
4945 * Searched too far.
4946 */
4947 mutex_exit(hash_lock);
4948 ARCSTAT_BUMP(arcstat_l2_write_passed_headroom);
4949 break;
4950 }
4951
4952 if (!l2arc_write_eligible(guid, ab)) {
4953 mutex_exit(hash_lock);
4954 continue;
4955 }
4956
4957 if ((write_sz + ab->b_size) > target_sz) {
4958 full = B_TRUE;
4959 mutex_exit(hash_lock);
4960 ARCSTAT_BUMP(arcstat_l2_write_full);
4961 break;
4962 }
4963
4964 if (pio == NULL) {
4965 /*
4966 * Insert a dummy header on the buflist so
4967 * l2arc_write_done() can find where the
4968 * write buffers begin without searching.
4969 */
4970 list_insert_head(dev->l2ad_buflist, head);
4971
4972 cb = kmem_alloc(
4973 sizeof (l2arc_write_callback_t), KM_SLEEP);
4974 cb->l2wcb_dev = dev;
4975 cb->l2wcb_head = head;
4976 pio = zio_root(spa, l2arc_write_done, cb,
4977 ZIO_FLAG_CANFAIL);
4978 ARCSTAT_BUMP(arcstat_l2_write_pios);
4979 }
4980
4981 /*
4982 * Create and add a new L2ARC header.
4983 */
4984 l2hdr = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP);
4985 l2hdr->b_dev = dev;
4986 ab->b_flags |= ARC_L2_WRITING;
4987
4988 /*
4989 * Temporarily stash the data buffer in b_tmp_cdata.
4990 * The subsequent write step will pick it up from
4991 * there. This is because can't access ab->b_buf
4992 * without holding the hash_lock, which we in turn
4993 * can't access without holding the ARC list locks
4994 * (which we want to avoid during compression/writing).
4995 */
4996 l2hdr->b_compress = ZIO_COMPRESS_OFF;
4997 l2hdr->b_asize = ab->b_size;
4998 l2hdr->b_tmp_cdata = ab->b_buf->b_data;
4999
5000 buf_sz = ab->b_size;
5001 ab->b_l2hdr = l2hdr;
5002
5003 list_insert_head(dev->l2ad_buflist, ab);
5004
5005 /*
5006 * Compute and store the buffer cksum before
5007 * writing. On debug the cksum is verified first.
5008 */
5009 arc_cksum_verify(ab->b_buf);
5010 arc_cksum_compute(ab->b_buf, B_TRUE);
5011
5012 mutex_exit(hash_lock);
5013
5014 write_sz += buf_sz;
5015 }
5016
5017 mutex_exit(list_lock);
5018
5019 if (full == B_TRUE)
5020 break;
5021 }
5022
5023 /* No buffers selected for writing? */
5024 if (pio == NULL) {
5025 ASSERT0(write_sz);
5026 mutex_exit(&l2arc_buflist_mtx);
5027 kmem_cache_free(hdr_cache, head);
5028 return (0);
5029 }
5030
5031 /*
5032 * Now start writing the buffers. We're starting at the write head
5033 * and work backwards, retracing the course of the buffer selector
5034 * loop above.
5035 */
5036 for (ab = list_prev(dev->l2ad_buflist, head); ab;
5037 ab = list_prev(dev->l2ad_buflist, ab)) {
5038 l2arc_buf_hdr_t *l2hdr;
5039 uint64_t buf_sz;
5040
5041 /*
5042 * We shouldn't need to lock the buffer here, since we flagged
5043 * it as ARC_L2_WRITING in the previous step, but we must take
5044 * care to only access its L2 cache parameters. In particular,
5045 * ab->b_buf may be invalid by now due to ARC eviction.
5046 */
5047 l2hdr = ab->b_l2hdr;
5048 l2hdr->b_daddr = dev->l2ad_hand;
5049
5050 if ((ab->b_flags & ARC_L2COMPRESS) &&
5051 l2hdr->b_asize >= buf_compress_minsz) {
5052 if (l2arc_compress_buf(l2hdr)) {
5053 /*
5054 * If compression succeeded, enable headroom
5055 * boost on the next scan cycle.
5056 */
5057 *headroom_boost = B_TRUE;
5058 }
5059 }
5060
5061 /*
5062 * Pick up the buffer data we had previously stashed away
5063 * (and now potentially also compressed).
5064 */
5065 buf_data = l2hdr->b_tmp_cdata;
5066 buf_sz = l2hdr->b_asize;
5067
5068 /* Compression may have squashed the buffer to zero length. */
5069 if (buf_sz != 0) {
5070 uint64_t buf_p_sz;
5071
5072 wzio = zio_write_phys(pio, dev->l2ad_vdev,
5073 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF,
5074 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE,
5075 ZIO_FLAG_CANFAIL, B_FALSE);
5076
5077 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
5078 zio_t *, wzio);
5079 (void) zio_nowait(wzio);
5080
5081 write_asize += buf_sz;
5082 /*
5083 * Keep the clock hand suitably device-aligned.
5084 */
5085 buf_p_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz);
5086 write_psize += buf_p_sz;
5087 dev->l2ad_hand += buf_p_sz;
5088 }
5089 }
5090
5091 mutex_exit(&l2arc_buflist_mtx);
5092
5093 ASSERT3U(write_asize, <=, target_sz);
5094 ARCSTAT_BUMP(arcstat_l2_writes_sent);
5095 ARCSTAT_INCR(arcstat_l2_write_bytes, write_asize);
5096 ARCSTAT_INCR(arcstat_l2_size, write_sz);
5097 ARCSTAT_INCR(arcstat_l2_asize, write_asize);
5098 vdev_space_update(dev->l2ad_vdev, write_psize, 0, 0);
5099
5100 /*
5101 * Bump device hand to the device start if it is approaching the end.
5102 * l2arc_evict() will already have evicted ahead for this case.
5103 */
5104 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) {
5105 vdev_space_update(dev->l2ad_vdev,
5106 dev->l2ad_end - dev->l2ad_hand, 0, 0);
5107 dev->l2ad_hand = dev->l2ad_start;
5108 dev->l2ad_evict = dev->l2ad_start;
5109 dev->l2ad_first = B_FALSE;
5110 }
5111
5112 dev->l2ad_writing = B_TRUE;
5113 (void) zio_wait(pio);
5114 dev->l2ad_writing = B_FALSE;
5115
5116 return (write_asize);
5117}
5118
5119/*
5120 * Compresses an L2ARC buffer.
5121 * The data to be compressed must be prefilled in l2hdr->b_tmp_cdata and its
5122 * size in l2hdr->b_asize. This routine tries to compress the data and
5123 * depending on the compression result there are three possible outcomes:
5124 * *) The buffer was incompressible. The original l2hdr contents were left
5125 * untouched and are ready for writing to an L2 device.
5126 * *) The buffer was all-zeros, so there is no need to write it to an L2
5127 * device. To indicate this situation b_tmp_cdata is NULL'ed, b_asize is
5128 * set to zero and b_compress is set to ZIO_COMPRESS_EMPTY.
5129 * *) Compression succeeded and b_tmp_cdata was replaced with a temporary
5130 * data buffer which holds the compressed data to be written, and b_asize
5131 * tells us how much data there is. b_compress is set to the appropriate
5132 * compression algorithm. Once writing is done, invoke
5133 * l2arc_release_cdata_buf on this l2hdr to free this temporary buffer.
5134 *
5135 * Returns B_TRUE if compression succeeded, or B_FALSE if it didn't (the
5136 * buffer was incompressible).
5137 */
5138static boolean_t
5139l2arc_compress_buf(l2arc_buf_hdr_t *l2hdr)
5140{
5141 void *cdata;
5142 size_t csize, len;
5143
5144 ASSERT(l2hdr->b_compress == ZIO_COMPRESS_OFF);
5145 ASSERT(l2hdr->b_tmp_cdata != NULL);
5146
5147 len = l2hdr->b_asize;
5148 cdata = zio_data_buf_alloc(len);
5149 csize = zio_compress_data(ZIO_COMPRESS_LZ4, l2hdr->b_tmp_cdata,
5150 cdata, l2hdr->b_asize);
5150 cdata, l2hdr->b_asize, (size_t)SPA_MINBLOCKSIZE);
5151
5152 if (csize == 0) {
5153 /* zero block, indicate that there's nothing to write */
5154 zio_data_buf_free(cdata, len);
5155 l2hdr->b_compress = ZIO_COMPRESS_EMPTY;
5156 l2hdr->b_asize = 0;
5157 l2hdr->b_tmp_cdata = NULL;
5158 ARCSTAT_BUMP(arcstat_l2_compress_zeros);
5159 return (B_TRUE);
5160 } else if (csize > 0 && csize < len) {
5161 /*
5162 * Compression succeeded, we'll keep the cdata around for
5163 * writing and release it afterwards.
5164 */
5165 l2hdr->b_compress = ZIO_COMPRESS_LZ4;
5166 l2hdr->b_asize = csize;
5167 l2hdr->b_tmp_cdata = cdata;
5168 ARCSTAT_BUMP(arcstat_l2_compress_successes);
5169 return (B_TRUE);
5170 } else {
5171 /*
5172 * Compression failed, release the compressed buffer.
5173 * l2hdr will be left unmodified.
5174 */
5175 zio_data_buf_free(cdata, len);
5176 ARCSTAT_BUMP(arcstat_l2_compress_failures);
5177 return (B_FALSE);
5178 }
5179}
5180
5181/*
5182 * Decompresses a zio read back from an l2arc device. On success, the
5183 * underlying zio's io_data buffer is overwritten by the uncompressed
5184 * version. On decompression error (corrupt compressed stream), the
5185 * zio->io_error value is set to signal an I/O error.
5186 *
5187 * Please note that the compressed data stream is not checksummed, so
5188 * if the underlying device is experiencing data corruption, we may feed
5189 * corrupt data to the decompressor, so the decompressor needs to be
5190 * able to handle this situation (LZ4 does).
5191 */
5192static void
5193l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr, enum zio_compress c)
5194{
5195 ASSERT(L2ARC_IS_VALID_COMPRESS(c));
5196
5197 if (zio->io_error != 0) {
5198 /*
5199 * An io error has occured, just restore the original io
5200 * size in preparation for a main pool read.
5201 */
5202 zio->io_orig_size = zio->io_size = hdr->b_size;
5203 return;
5204 }
5205
5206 if (c == ZIO_COMPRESS_EMPTY) {
5207 /*
5208 * An empty buffer results in a null zio, which means we
5209 * need to fill its io_data after we're done restoring the
5210 * buffer's contents.
5211 */
5212 ASSERT(hdr->b_buf != NULL);
5213 bzero(hdr->b_buf->b_data, hdr->b_size);
5214 zio->io_data = zio->io_orig_data = hdr->b_buf->b_data;
5215 } else {
5216 ASSERT(zio->io_data != NULL);
5217 /*
5218 * We copy the compressed data from the start of the arc buffer
5219 * (the zio_read will have pulled in only what we need, the
5220 * rest is garbage which we will overwrite at decompression)
5221 * and then decompress back to the ARC data buffer. This way we
5222 * can minimize copying by simply decompressing back over the
5223 * original compressed data (rather than decompressing to an
5224 * aux buffer and then copying back the uncompressed buffer,
5225 * which is likely to be much larger).
5226 */
5227 uint64_t csize;
5228 void *cdata;
5229
5230 csize = zio->io_size;
5231 cdata = zio_data_buf_alloc(csize);
5232 bcopy(zio->io_data, cdata, csize);
5233 if (zio_decompress_data(c, cdata, zio->io_data, csize,
5234 hdr->b_size) != 0)
5235 zio->io_error = EIO;
5236 zio_data_buf_free(cdata, csize);
5237 }
5238
5239 /* Restore the expected uncompressed IO size. */
5240 zio->io_orig_size = zio->io_size = hdr->b_size;
5241}
5242
5243/*
5244 * Releases the temporary b_tmp_cdata buffer in an l2arc header structure.
5245 * This buffer serves as a temporary holder of compressed data while
5246 * the buffer entry is being written to an l2arc device. Once that is
5247 * done, we can dispose of it.
5248 */
5249static void
5250l2arc_release_cdata_buf(arc_buf_hdr_t *ab)
5251{
5252 l2arc_buf_hdr_t *l2hdr = ab->b_l2hdr;
5253
5254 if (l2hdr->b_compress == ZIO_COMPRESS_LZ4) {
5255 /*
5256 * If the data was compressed, then we've allocated a
5257 * temporary buffer for it, so now we need to release it.
5258 */
5259 ASSERT(l2hdr->b_tmp_cdata != NULL);
5260 zio_data_buf_free(l2hdr->b_tmp_cdata, ab->b_size);
5261 }
5262 l2hdr->b_tmp_cdata = NULL;
5263}
5264
5265/*
5266 * This thread feeds the L2ARC at regular intervals. This is the beating
5267 * heart of the L2ARC.
5268 */
5269static void
5270l2arc_feed_thread(void *dummy __unused)
5271{
5272 callb_cpr_t cpr;
5273 l2arc_dev_t *dev;
5274 spa_t *spa;
5275 uint64_t size, wrote;
5276 clock_t begin, next = ddi_get_lbolt();
5277 boolean_t headroom_boost = B_FALSE;
5278
5279 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
5280
5281 mutex_enter(&l2arc_feed_thr_lock);
5282
5283 while (l2arc_thread_exit == 0) {
5284 CALLB_CPR_SAFE_BEGIN(&cpr);
5285 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock,
5286 next - ddi_get_lbolt());
5287 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
5288 next = ddi_get_lbolt() + hz;
5289
5290 /*
5291 * Quick check for L2ARC devices.
5292 */
5293 mutex_enter(&l2arc_dev_mtx);
5294 if (l2arc_ndev == 0) {
5295 mutex_exit(&l2arc_dev_mtx);
5296 continue;
5297 }
5298 mutex_exit(&l2arc_dev_mtx);
5299 begin = ddi_get_lbolt();
5300
5301 /*
5302 * This selects the next l2arc device to write to, and in
5303 * doing so the next spa to feed from: dev->l2ad_spa. This
5304 * will return NULL if there are now no l2arc devices or if
5305 * they are all faulted.
5306 *
5307 * If a device is returned, its spa's config lock is also
5308 * held to prevent device removal. l2arc_dev_get_next()
5309 * will grab and release l2arc_dev_mtx.
5310 */
5311 if ((dev = l2arc_dev_get_next()) == NULL)
5312 continue;
5313
5314 spa = dev->l2ad_spa;
5315 ASSERT(spa != NULL);
5316
5317 /*
5318 * If the pool is read-only then force the feed thread to
5319 * sleep a little longer.
5320 */
5321 if (!spa_writeable(spa)) {
5322 next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz;
5323 spa_config_exit(spa, SCL_L2ARC, dev);
5324 continue;
5325 }
5326
5327 /*
5328 * Avoid contributing to memory pressure.
5329 */
5330 if (arc_reclaim_needed()) {
5331 ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
5332 spa_config_exit(spa, SCL_L2ARC, dev);
5333 continue;
5334 }
5335
5336 ARCSTAT_BUMP(arcstat_l2_feeds);
5337
5338 size = l2arc_write_size();
5339
5340 /*
5341 * Evict L2ARC buffers that will be overwritten.
5342 */
5343 l2arc_evict(dev, size, B_FALSE);
5344
5345 /*
5346 * Write ARC buffers.
5347 */
5348 wrote = l2arc_write_buffers(spa, dev, size, &headroom_boost);
5349
5350 /*
5351 * Calculate interval between writes.
5352 */
5353 next = l2arc_write_interval(begin, size, wrote);
5354 spa_config_exit(spa, SCL_L2ARC, dev);
5355 }
5356
5357 l2arc_thread_exit = 0;
5358 cv_broadcast(&l2arc_feed_thr_cv);
5359 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */
5360 thread_exit();
5361}
5362
5363boolean_t
5364l2arc_vdev_present(vdev_t *vd)
5365{
5366 l2arc_dev_t *dev;
5367
5368 mutex_enter(&l2arc_dev_mtx);
5369 for (dev = list_head(l2arc_dev_list); dev != NULL;
5370 dev = list_next(l2arc_dev_list, dev)) {
5371 if (dev->l2ad_vdev == vd)
5372 break;
5373 }
5374 mutex_exit(&l2arc_dev_mtx);
5375
5376 return (dev != NULL);
5377}
5378
5379/*
5380 * Add a vdev for use by the L2ARC. By this point the spa has already
5381 * validated the vdev and opened it.
5382 */
5383void
5384l2arc_add_vdev(spa_t *spa, vdev_t *vd)
5385{
5386 l2arc_dev_t *adddev;
5387
5388 ASSERT(!l2arc_vdev_present(vd));
5389
5390 /*
5391 * Create a new l2arc device entry.
5392 */
5393 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
5394 adddev->l2ad_spa = spa;
5395 adddev->l2ad_vdev = vd;
5396 adddev->l2ad_start = VDEV_LABEL_START_SIZE;
5397 adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
5398 adddev->l2ad_hand = adddev->l2ad_start;
5399 adddev->l2ad_evict = adddev->l2ad_start;
5400 adddev->l2ad_first = B_TRUE;
5401 adddev->l2ad_writing = B_FALSE;
5402
5403 /*
5404 * This is a list of all ARC buffers that are still valid on the
5405 * device.
5406 */
5407 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP);
5408 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
5409 offsetof(arc_buf_hdr_t, b_l2node));
5410
5411 vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
5412
5413 /*
5414 * Add device to global list
5415 */
5416 mutex_enter(&l2arc_dev_mtx);
5417 list_insert_head(l2arc_dev_list, adddev);
5418 atomic_inc_64(&l2arc_ndev);
5419 mutex_exit(&l2arc_dev_mtx);
5420}
5421
5422/*
5423 * Remove a vdev from the L2ARC.
5424 */
5425void
5426l2arc_remove_vdev(vdev_t *vd)
5427{
5428 l2arc_dev_t *dev, *nextdev, *remdev = NULL;
5429
5430 /*
5431 * Find the device by vdev
5432 */
5433 mutex_enter(&l2arc_dev_mtx);
5434 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) {
5435 nextdev = list_next(l2arc_dev_list, dev);
5436 if (vd == dev->l2ad_vdev) {
5437 remdev = dev;
5438 break;
5439 }
5440 }
5441 ASSERT(remdev != NULL);
5442
5443 /*
5444 * Remove device from global list
5445 */
5446 list_remove(l2arc_dev_list, remdev);
5447 l2arc_dev_last = NULL; /* may have been invalidated */
5448 atomic_dec_64(&l2arc_ndev);
5449 mutex_exit(&l2arc_dev_mtx);
5450
5451 /*
5452 * Clear all buflists and ARC references. L2ARC device flush.
5453 */
5454 l2arc_evict(remdev, 0, B_TRUE);
5455 list_destroy(remdev->l2ad_buflist);
5456 kmem_free(remdev->l2ad_buflist, sizeof (list_t));
5457 kmem_free(remdev, sizeof (l2arc_dev_t));
5458}
5459
5460void
5461l2arc_init(void)
5462{
5463 l2arc_thread_exit = 0;
5464 l2arc_ndev = 0;
5465 l2arc_writes_sent = 0;
5466 l2arc_writes_done = 0;
5467
5468 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
5469 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
5470 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
5471 mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL);
5472 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
5473
5474 l2arc_dev_list = &L2ARC_dev_list;
5475 l2arc_free_on_write = &L2ARC_free_on_write;
5476 list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
5477 offsetof(l2arc_dev_t, l2ad_node));
5478 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
5479 offsetof(l2arc_data_free_t, l2df_list_node));
5480}
5481
5482void
5483l2arc_fini(void)
5484{
5485 /*
5486 * This is called from dmu_fini(), which is called from spa_fini();
5487 * Because of this, we can assume that all l2arc devices have
5488 * already been removed when the pools themselves were removed.
5489 */
5490
5491 l2arc_do_free_on_write();
5492
5493 mutex_destroy(&l2arc_feed_thr_lock);
5494 cv_destroy(&l2arc_feed_thr_cv);
5495 mutex_destroy(&l2arc_dev_mtx);
5496 mutex_destroy(&l2arc_buflist_mtx);
5497 mutex_destroy(&l2arc_free_on_write_mtx);
5498
5499 list_destroy(l2arc_dev_list);
5500 list_destroy(l2arc_free_on_write);
5501}
5502
5503void
5504l2arc_start(void)
5505{
5506 if (!(spa_mode_global & FWRITE))
5507 return;
5508
5509 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
5510 TS_RUN, minclsyspri);
5511}
5512
5513void
5514l2arc_stop(void)
5515{
5516 if (!(spa_mode_global & FWRITE))
5517 return;
5518
5519 mutex_enter(&l2arc_feed_thr_lock);
5520 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */
5521 l2arc_thread_exit = 1;
5522 while (l2arc_thread_exit != 0)
5523 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
5524 mutex_exit(&l2arc_feed_thr_lock);
5525}
5151
5152 if (csize == 0) {
5153 /* zero block, indicate that there's nothing to write */
5154 zio_data_buf_free(cdata, len);
5155 l2hdr->b_compress = ZIO_COMPRESS_EMPTY;
5156 l2hdr->b_asize = 0;
5157 l2hdr->b_tmp_cdata = NULL;
5158 ARCSTAT_BUMP(arcstat_l2_compress_zeros);
5159 return (B_TRUE);
5160 } else if (csize > 0 && csize < len) {
5161 /*
5162 * Compression succeeded, we'll keep the cdata around for
5163 * writing and release it afterwards.
5164 */
5165 l2hdr->b_compress = ZIO_COMPRESS_LZ4;
5166 l2hdr->b_asize = csize;
5167 l2hdr->b_tmp_cdata = cdata;
5168 ARCSTAT_BUMP(arcstat_l2_compress_successes);
5169 return (B_TRUE);
5170 } else {
5171 /*
5172 * Compression failed, release the compressed buffer.
5173 * l2hdr will be left unmodified.
5174 */
5175 zio_data_buf_free(cdata, len);
5176 ARCSTAT_BUMP(arcstat_l2_compress_failures);
5177 return (B_FALSE);
5178 }
5179}
5180
5181/*
5182 * Decompresses a zio read back from an l2arc device. On success, the
5183 * underlying zio's io_data buffer is overwritten by the uncompressed
5184 * version. On decompression error (corrupt compressed stream), the
5185 * zio->io_error value is set to signal an I/O error.
5186 *
5187 * Please note that the compressed data stream is not checksummed, so
5188 * if the underlying device is experiencing data corruption, we may feed
5189 * corrupt data to the decompressor, so the decompressor needs to be
5190 * able to handle this situation (LZ4 does).
5191 */
5192static void
5193l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr, enum zio_compress c)
5194{
5195 ASSERT(L2ARC_IS_VALID_COMPRESS(c));
5196
5197 if (zio->io_error != 0) {
5198 /*
5199 * An io error has occured, just restore the original io
5200 * size in preparation for a main pool read.
5201 */
5202 zio->io_orig_size = zio->io_size = hdr->b_size;
5203 return;
5204 }
5205
5206 if (c == ZIO_COMPRESS_EMPTY) {
5207 /*
5208 * An empty buffer results in a null zio, which means we
5209 * need to fill its io_data after we're done restoring the
5210 * buffer's contents.
5211 */
5212 ASSERT(hdr->b_buf != NULL);
5213 bzero(hdr->b_buf->b_data, hdr->b_size);
5214 zio->io_data = zio->io_orig_data = hdr->b_buf->b_data;
5215 } else {
5216 ASSERT(zio->io_data != NULL);
5217 /*
5218 * We copy the compressed data from the start of the arc buffer
5219 * (the zio_read will have pulled in only what we need, the
5220 * rest is garbage which we will overwrite at decompression)
5221 * and then decompress back to the ARC data buffer. This way we
5222 * can minimize copying by simply decompressing back over the
5223 * original compressed data (rather than decompressing to an
5224 * aux buffer and then copying back the uncompressed buffer,
5225 * which is likely to be much larger).
5226 */
5227 uint64_t csize;
5228 void *cdata;
5229
5230 csize = zio->io_size;
5231 cdata = zio_data_buf_alloc(csize);
5232 bcopy(zio->io_data, cdata, csize);
5233 if (zio_decompress_data(c, cdata, zio->io_data, csize,
5234 hdr->b_size) != 0)
5235 zio->io_error = EIO;
5236 zio_data_buf_free(cdata, csize);
5237 }
5238
5239 /* Restore the expected uncompressed IO size. */
5240 zio->io_orig_size = zio->io_size = hdr->b_size;
5241}
5242
5243/*
5244 * Releases the temporary b_tmp_cdata buffer in an l2arc header structure.
5245 * This buffer serves as a temporary holder of compressed data while
5246 * the buffer entry is being written to an l2arc device. Once that is
5247 * done, we can dispose of it.
5248 */
5249static void
5250l2arc_release_cdata_buf(arc_buf_hdr_t *ab)
5251{
5252 l2arc_buf_hdr_t *l2hdr = ab->b_l2hdr;
5253
5254 if (l2hdr->b_compress == ZIO_COMPRESS_LZ4) {
5255 /*
5256 * If the data was compressed, then we've allocated a
5257 * temporary buffer for it, so now we need to release it.
5258 */
5259 ASSERT(l2hdr->b_tmp_cdata != NULL);
5260 zio_data_buf_free(l2hdr->b_tmp_cdata, ab->b_size);
5261 }
5262 l2hdr->b_tmp_cdata = NULL;
5263}
5264
5265/*
5266 * This thread feeds the L2ARC at regular intervals. This is the beating
5267 * heart of the L2ARC.
5268 */
5269static void
5270l2arc_feed_thread(void *dummy __unused)
5271{
5272 callb_cpr_t cpr;
5273 l2arc_dev_t *dev;
5274 spa_t *spa;
5275 uint64_t size, wrote;
5276 clock_t begin, next = ddi_get_lbolt();
5277 boolean_t headroom_boost = B_FALSE;
5278
5279 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
5280
5281 mutex_enter(&l2arc_feed_thr_lock);
5282
5283 while (l2arc_thread_exit == 0) {
5284 CALLB_CPR_SAFE_BEGIN(&cpr);
5285 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock,
5286 next - ddi_get_lbolt());
5287 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
5288 next = ddi_get_lbolt() + hz;
5289
5290 /*
5291 * Quick check for L2ARC devices.
5292 */
5293 mutex_enter(&l2arc_dev_mtx);
5294 if (l2arc_ndev == 0) {
5295 mutex_exit(&l2arc_dev_mtx);
5296 continue;
5297 }
5298 mutex_exit(&l2arc_dev_mtx);
5299 begin = ddi_get_lbolt();
5300
5301 /*
5302 * This selects the next l2arc device to write to, and in
5303 * doing so the next spa to feed from: dev->l2ad_spa. This
5304 * will return NULL if there are now no l2arc devices or if
5305 * they are all faulted.
5306 *
5307 * If a device is returned, its spa's config lock is also
5308 * held to prevent device removal. l2arc_dev_get_next()
5309 * will grab and release l2arc_dev_mtx.
5310 */
5311 if ((dev = l2arc_dev_get_next()) == NULL)
5312 continue;
5313
5314 spa = dev->l2ad_spa;
5315 ASSERT(spa != NULL);
5316
5317 /*
5318 * If the pool is read-only then force the feed thread to
5319 * sleep a little longer.
5320 */
5321 if (!spa_writeable(spa)) {
5322 next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz;
5323 spa_config_exit(spa, SCL_L2ARC, dev);
5324 continue;
5325 }
5326
5327 /*
5328 * Avoid contributing to memory pressure.
5329 */
5330 if (arc_reclaim_needed()) {
5331 ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
5332 spa_config_exit(spa, SCL_L2ARC, dev);
5333 continue;
5334 }
5335
5336 ARCSTAT_BUMP(arcstat_l2_feeds);
5337
5338 size = l2arc_write_size();
5339
5340 /*
5341 * Evict L2ARC buffers that will be overwritten.
5342 */
5343 l2arc_evict(dev, size, B_FALSE);
5344
5345 /*
5346 * Write ARC buffers.
5347 */
5348 wrote = l2arc_write_buffers(spa, dev, size, &headroom_boost);
5349
5350 /*
5351 * Calculate interval between writes.
5352 */
5353 next = l2arc_write_interval(begin, size, wrote);
5354 spa_config_exit(spa, SCL_L2ARC, dev);
5355 }
5356
5357 l2arc_thread_exit = 0;
5358 cv_broadcast(&l2arc_feed_thr_cv);
5359 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */
5360 thread_exit();
5361}
5362
5363boolean_t
5364l2arc_vdev_present(vdev_t *vd)
5365{
5366 l2arc_dev_t *dev;
5367
5368 mutex_enter(&l2arc_dev_mtx);
5369 for (dev = list_head(l2arc_dev_list); dev != NULL;
5370 dev = list_next(l2arc_dev_list, dev)) {
5371 if (dev->l2ad_vdev == vd)
5372 break;
5373 }
5374 mutex_exit(&l2arc_dev_mtx);
5375
5376 return (dev != NULL);
5377}
5378
5379/*
5380 * Add a vdev for use by the L2ARC. By this point the spa has already
5381 * validated the vdev and opened it.
5382 */
5383void
5384l2arc_add_vdev(spa_t *spa, vdev_t *vd)
5385{
5386 l2arc_dev_t *adddev;
5387
5388 ASSERT(!l2arc_vdev_present(vd));
5389
5390 /*
5391 * Create a new l2arc device entry.
5392 */
5393 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
5394 adddev->l2ad_spa = spa;
5395 adddev->l2ad_vdev = vd;
5396 adddev->l2ad_start = VDEV_LABEL_START_SIZE;
5397 adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
5398 adddev->l2ad_hand = adddev->l2ad_start;
5399 adddev->l2ad_evict = adddev->l2ad_start;
5400 adddev->l2ad_first = B_TRUE;
5401 adddev->l2ad_writing = B_FALSE;
5402
5403 /*
5404 * This is a list of all ARC buffers that are still valid on the
5405 * device.
5406 */
5407 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP);
5408 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
5409 offsetof(arc_buf_hdr_t, b_l2node));
5410
5411 vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
5412
5413 /*
5414 * Add device to global list
5415 */
5416 mutex_enter(&l2arc_dev_mtx);
5417 list_insert_head(l2arc_dev_list, adddev);
5418 atomic_inc_64(&l2arc_ndev);
5419 mutex_exit(&l2arc_dev_mtx);
5420}
5421
5422/*
5423 * Remove a vdev from the L2ARC.
5424 */
5425void
5426l2arc_remove_vdev(vdev_t *vd)
5427{
5428 l2arc_dev_t *dev, *nextdev, *remdev = NULL;
5429
5430 /*
5431 * Find the device by vdev
5432 */
5433 mutex_enter(&l2arc_dev_mtx);
5434 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) {
5435 nextdev = list_next(l2arc_dev_list, dev);
5436 if (vd == dev->l2ad_vdev) {
5437 remdev = dev;
5438 break;
5439 }
5440 }
5441 ASSERT(remdev != NULL);
5442
5443 /*
5444 * Remove device from global list
5445 */
5446 list_remove(l2arc_dev_list, remdev);
5447 l2arc_dev_last = NULL; /* may have been invalidated */
5448 atomic_dec_64(&l2arc_ndev);
5449 mutex_exit(&l2arc_dev_mtx);
5450
5451 /*
5452 * Clear all buflists and ARC references. L2ARC device flush.
5453 */
5454 l2arc_evict(remdev, 0, B_TRUE);
5455 list_destroy(remdev->l2ad_buflist);
5456 kmem_free(remdev->l2ad_buflist, sizeof (list_t));
5457 kmem_free(remdev, sizeof (l2arc_dev_t));
5458}
5459
5460void
5461l2arc_init(void)
5462{
5463 l2arc_thread_exit = 0;
5464 l2arc_ndev = 0;
5465 l2arc_writes_sent = 0;
5466 l2arc_writes_done = 0;
5467
5468 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
5469 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
5470 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
5471 mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL);
5472 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
5473
5474 l2arc_dev_list = &L2ARC_dev_list;
5475 l2arc_free_on_write = &L2ARC_free_on_write;
5476 list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
5477 offsetof(l2arc_dev_t, l2ad_node));
5478 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
5479 offsetof(l2arc_data_free_t, l2df_list_node));
5480}
5481
5482void
5483l2arc_fini(void)
5484{
5485 /*
5486 * This is called from dmu_fini(), which is called from spa_fini();
5487 * Because of this, we can assume that all l2arc devices have
5488 * already been removed when the pools themselves were removed.
5489 */
5490
5491 l2arc_do_free_on_write();
5492
5493 mutex_destroy(&l2arc_feed_thr_lock);
5494 cv_destroy(&l2arc_feed_thr_cv);
5495 mutex_destroy(&l2arc_dev_mtx);
5496 mutex_destroy(&l2arc_buflist_mtx);
5497 mutex_destroy(&l2arc_free_on_write_mtx);
5498
5499 list_destroy(l2arc_dev_list);
5500 list_destroy(l2arc_free_on_write);
5501}
5502
5503void
5504l2arc_start(void)
5505{
5506 if (!(spa_mode_global & FWRITE))
5507 return;
5508
5509 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
5510 TS_RUN, minclsyspri);
5511}
5512
5513void
5514l2arc_stop(void)
5515{
5516 if (!(spa_mode_global & FWRITE))
5517 return;
5518
5519 mutex_enter(&l2arc_feed_thr_lock);
5520 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */
5521 l2arc_thread_exit = 1;
5522 while (l2arc_thread_exit != 0)
5523 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
5524 mutex_exit(&l2arc_feed_thr_lock);
5525}