Deleted Added
full compact
arc.c (268085) arc.c (268123)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2013 by Delphix. All rights reserved.
23 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
24 * Copyright (c) 2014 by Saso Kiselkov. All rights reserved.
25 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
26 */
27
28/*
29 * DVA-based Adjustable Replacement Cache
30 *
31 * While much of the theory of operation used here is
32 * based on the self-tuning, low overhead replacement cache
33 * presented by Megiddo and Modha at FAST 2003, there are some
34 * significant differences:
35 *
36 * 1. The Megiddo and Modha model assumes any page is evictable.
37 * Pages in its cache cannot be "locked" into memory. This makes
38 * the eviction algorithm simple: evict the last page in the list.
39 * This also make the performance characteristics easy to reason
40 * about. Our cache is not so simple. At any given moment, some
41 * subset of the blocks in the cache are un-evictable because we
42 * have handed out a reference to them. Blocks are only evictable
43 * when there are no external references active. This makes
44 * eviction far more problematic: we choose to evict the evictable
45 * blocks that are the "lowest" in the list.
46 *
47 * There are times when it is not possible to evict the requested
48 * space. In these circumstances we are unable to adjust the cache
49 * size. To prevent the cache growing unbounded at these times we
50 * implement a "cache throttle" that slows the flow of new data
51 * into the cache until we can make space available.
52 *
53 * 2. The Megiddo and Modha model assumes a fixed cache size.
54 * Pages are evicted when the cache is full and there is a cache
55 * miss. Our model has a variable sized cache. It grows with
56 * high use, but also tries to react to memory pressure from the
57 * operating system: decreasing its size when system memory is
58 * tight.
59 *
60 * 3. The Megiddo and Modha model assumes a fixed page size. All
61 * elements of the cache are therefore exactly the same size. So
62 * when adjusting the cache size following a cache miss, its simply
63 * a matter of choosing a single page to evict. In our model, we
64 * have variable sized cache blocks (rangeing from 512 bytes to
65 * 128K bytes). We therefore choose a set of blocks to evict to make
66 * space for a cache miss that approximates as closely as possible
67 * the space used by the new block.
68 *
69 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
70 * by N. Megiddo & D. Modha, FAST 2003
71 */
72
73/*
74 * The locking model:
75 *
76 * A new reference to a cache buffer can be obtained in two
77 * ways: 1) via a hash table lookup using the DVA as a key,
78 * or 2) via one of the ARC lists. The arc_read() interface
79 * uses method 1, while the internal arc algorithms for
80 * adjusting the cache use method 2. We therefore provide two
81 * types of locks: 1) the hash table lock array, and 2) the
82 * arc list locks.
83 *
84 * Buffers do not have their own mutexs, rather they rely on the
85 * hash table mutexs for the bulk of their protection (i.e. most
86 * fields in the arc_buf_hdr_t are protected by these mutexs).
87 *
88 * buf_hash_find() returns the appropriate mutex (held) when it
89 * locates the requested buffer in the hash table. It returns
90 * NULL for the mutex if the buffer was not in the table.
91 *
92 * buf_hash_remove() expects the appropriate hash mutex to be
93 * already held before it is invoked.
94 *
95 * Each arc state also has a mutex which is used to protect the
96 * buffer list associated with the state. When attempting to
97 * obtain a hash table lock while holding an arc list lock you
98 * must use: mutex_tryenter() to avoid deadlock. Also note that
99 * the active state mutex must be held before the ghost state mutex.
100 *
101 * Arc buffers may have an associated eviction callback function.
102 * This function will be invoked prior to removing the buffer (e.g.
103 * in arc_do_user_evicts()). Note however that the data associated
104 * with the buffer may be evicted prior to the callback. The callback
105 * must be made with *no locks held* (to prevent deadlock). Additionally,
106 * the users of callbacks must ensure that their private data is
107 * protected from simultaneous callbacks from arc_buf_evict()
108 * and arc_do_user_evicts().
109 *
110 * Note that the majority of the performance stats are manipulated
111 * with atomic operations.
112 *
113 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following:
114 *
115 * - L2ARC buflist creation
116 * - L2ARC buflist eviction
117 * - L2ARC write completion, which walks L2ARC buflists
118 * - ARC header destruction, as it removes from L2ARC buflists
119 * - ARC header release, as it removes from L2ARC buflists
120 */
121
122#include <sys/spa.h>
123#include <sys/zio.h>
124#include <sys/zio_compress.h>
125#include <sys/zfs_context.h>
126#include <sys/arc.h>
127#include <sys/refcount.h>
128#include <sys/vdev.h>
129#include <sys/vdev_impl.h>
130#include <sys/dsl_pool.h>
131#ifdef _KERNEL
132#include <sys/dnlc.h>
133#endif
134#include <sys/callb.h>
135#include <sys/kstat.h>
136#include <sys/trim_map.h>
137#include <zfs_fletcher.h>
138#include <sys/sdt.h>
139
140#include <vm/vm_pageout.h>
141
142#ifdef illumos
143#ifndef _KERNEL
144/* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
145boolean_t arc_watch = B_FALSE;
146int arc_procfd;
147#endif
148#endif /* illumos */
149
150static kmutex_t arc_reclaim_thr_lock;
151static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */
152static uint8_t arc_thread_exit;
153
154#define ARC_REDUCE_DNLC_PERCENT 3
155uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT;
156
157typedef enum arc_reclaim_strategy {
158 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */
159 ARC_RECLAIM_CONS /* Conservative reclaim strategy */
160} arc_reclaim_strategy_t;
161
162/*
163 * The number of iterations through arc_evict_*() before we
164 * drop & reacquire the lock.
165 */
166int arc_evict_iterations = 100;
167
168/* number of seconds before growing cache again */
169static int arc_grow_retry = 60;
170
171/* shift of arc_c for calculating both min and max arc_p */
172static int arc_p_min_shift = 4;
173
174/* log2(fraction of arc to reclaim) */
175static int arc_shrink_shift = 5;
176
177/*
178 * minimum lifespan of a prefetch block in clock ticks
179 * (initialized in arc_init())
180 */
181static int arc_min_prefetch_lifespan;
182
183/*
184 * If this percent of memory is free, don't throttle.
185 */
186int arc_lotsfree_percent = 10;
187
188static int arc_dead;
189extern int zfs_prefetch_disable;
190
191/*
192 * The arc has filled available memory and has now warmed up.
193 */
194static boolean_t arc_warm;
195
196/*
197 * These tunables are for performance analysis.
198 */
199uint64_t zfs_arc_max;
200uint64_t zfs_arc_min;
201uint64_t zfs_arc_meta_limit = 0;
202int zfs_arc_grow_retry = 0;
203int zfs_arc_shrink_shift = 0;
204int zfs_arc_p_min_shift = 0;
205int zfs_disable_dup_eviction = 0;
206
207TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit);
208SYSCTL_DECL(_vfs_zfs);
209SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_max, CTLFLAG_RDTUN, &zfs_arc_max, 0,
210 "Maximum ARC size");
211SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_min, CTLFLAG_RDTUN, &zfs_arc_min, 0,
212 "Minimum ARC size");
213
214/*
215 * Note that buffers can be in one of 6 states:
216 * ARC_anon - anonymous (discussed below)
217 * ARC_mru - recently used, currently cached
218 * ARC_mru_ghost - recentely used, no longer in cache
219 * ARC_mfu - frequently used, currently cached
220 * ARC_mfu_ghost - frequently used, no longer in cache
221 * ARC_l2c_only - exists in L2ARC but not other states
222 * When there are no active references to the buffer, they are
223 * are linked onto a list in one of these arc states. These are
224 * the only buffers that can be evicted or deleted. Within each
225 * state there are multiple lists, one for meta-data and one for
226 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes,
227 * etc.) is tracked separately so that it can be managed more
228 * explicitly: favored over data, limited explicitly.
229 *
230 * Anonymous buffers are buffers that are not associated with
231 * a DVA. These are buffers that hold dirty block copies
232 * before they are written to stable storage. By definition,
233 * they are "ref'd" and are considered part of arc_mru
234 * that cannot be freed. Generally, they will aquire a DVA
235 * as they are written and migrate onto the arc_mru list.
236 *
237 * The ARC_l2c_only state is for buffers that are in the second
238 * level ARC but no longer in any of the ARC_m* lists. The second
239 * level ARC itself may also contain buffers that are in any of
240 * the ARC_m* states - meaning that a buffer can exist in two
241 * places. The reason for the ARC_l2c_only state is to keep the
242 * buffer header in the hash table, so that reads that hit the
243 * second level ARC benefit from these fast lookups.
244 */
245
246#define ARCS_LOCK_PAD CACHE_LINE_SIZE
247struct arcs_lock {
248 kmutex_t arcs_lock;
249#ifdef _KERNEL
250 unsigned char pad[(ARCS_LOCK_PAD - sizeof (kmutex_t))];
251#endif
252};
253
254/*
255 * must be power of two for mask use to work
256 *
257 */
258#define ARC_BUFC_NUMDATALISTS 16
259#define ARC_BUFC_NUMMETADATALISTS 16
260#define ARC_BUFC_NUMLISTS (ARC_BUFC_NUMMETADATALISTS + ARC_BUFC_NUMDATALISTS)
261
262typedef struct arc_state {
263 uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */
264 uint64_t arcs_size; /* total amount of data in this state */
265 list_t arcs_lists[ARC_BUFC_NUMLISTS]; /* list of evictable buffers */
266 struct arcs_lock arcs_locks[ARC_BUFC_NUMLISTS] __aligned(CACHE_LINE_SIZE);
267} arc_state_t;
268
269#define ARCS_LOCK(s, i) (&((s)->arcs_locks[(i)].arcs_lock))
270
271/* The 6 states: */
272static arc_state_t ARC_anon;
273static arc_state_t ARC_mru;
274static arc_state_t ARC_mru_ghost;
275static arc_state_t ARC_mfu;
276static arc_state_t ARC_mfu_ghost;
277static arc_state_t ARC_l2c_only;
278
279typedef struct arc_stats {
280 kstat_named_t arcstat_hits;
281 kstat_named_t arcstat_misses;
282 kstat_named_t arcstat_demand_data_hits;
283 kstat_named_t arcstat_demand_data_misses;
284 kstat_named_t arcstat_demand_metadata_hits;
285 kstat_named_t arcstat_demand_metadata_misses;
286 kstat_named_t arcstat_prefetch_data_hits;
287 kstat_named_t arcstat_prefetch_data_misses;
288 kstat_named_t arcstat_prefetch_metadata_hits;
289 kstat_named_t arcstat_prefetch_metadata_misses;
290 kstat_named_t arcstat_mru_hits;
291 kstat_named_t arcstat_mru_ghost_hits;
292 kstat_named_t arcstat_mfu_hits;
293 kstat_named_t arcstat_mfu_ghost_hits;
294 kstat_named_t arcstat_allocated;
295 kstat_named_t arcstat_deleted;
296 kstat_named_t arcstat_stolen;
297 kstat_named_t arcstat_recycle_miss;
298 /*
299 * Number of buffers that could not be evicted because the hash lock
300 * was held by another thread. The lock may not necessarily be held
301 * by something using the same buffer, since hash locks are shared
302 * by multiple buffers.
303 */
304 kstat_named_t arcstat_mutex_miss;
305 /*
306 * Number of buffers skipped because they have I/O in progress, are
307 * indrect prefetch buffers that have not lived long enough, or are
308 * not from the spa we're trying to evict from.
309 */
310 kstat_named_t arcstat_evict_skip;
311 kstat_named_t arcstat_evict_l2_cached;
312 kstat_named_t arcstat_evict_l2_eligible;
313 kstat_named_t arcstat_evict_l2_ineligible;
314 kstat_named_t arcstat_hash_elements;
315 kstat_named_t arcstat_hash_elements_max;
316 kstat_named_t arcstat_hash_collisions;
317 kstat_named_t arcstat_hash_chains;
318 kstat_named_t arcstat_hash_chain_max;
319 kstat_named_t arcstat_p;
320 kstat_named_t arcstat_c;
321 kstat_named_t arcstat_c_min;
322 kstat_named_t arcstat_c_max;
323 kstat_named_t arcstat_size;
324 kstat_named_t arcstat_hdr_size;
325 kstat_named_t arcstat_data_size;
326 kstat_named_t arcstat_other_size;
327 kstat_named_t arcstat_l2_hits;
328 kstat_named_t arcstat_l2_misses;
329 kstat_named_t arcstat_l2_feeds;
330 kstat_named_t arcstat_l2_rw_clash;
331 kstat_named_t arcstat_l2_read_bytes;
332 kstat_named_t arcstat_l2_write_bytes;
333 kstat_named_t arcstat_l2_writes_sent;
334 kstat_named_t arcstat_l2_writes_done;
335 kstat_named_t arcstat_l2_writes_error;
336 kstat_named_t arcstat_l2_writes_hdr_miss;
337 kstat_named_t arcstat_l2_evict_lock_retry;
338 kstat_named_t arcstat_l2_evict_reading;
339 kstat_named_t arcstat_l2_free_on_write;
340 kstat_named_t arcstat_l2_abort_lowmem;
341 kstat_named_t arcstat_l2_cksum_bad;
342 kstat_named_t arcstat_l2_io_error;
343 kstat_named_t arcstat_l2_size;
344 kstat_named_t arcstat_l2_asize;
345 kstat_named_t arcstat_l2_hdr_size;
346 kstat_named_t arcstat_l2_compress_successes;
347 kstat_named_t arcstat_l2_compress_zeros;
348 kstat_named_t arcstat_l2_compress_failures;
349 kstat_named_t arcstat_l2_write_trylock_fail;
350 kstat_named_t arcstat_l2_write_passed_headroom;
351 kstat_named_t arcstat_l2_write_spa_mismatch;
352 kstat_named_t arcstat_l2_write_in_l2;
353 kstat_named_t arcstat_l2_write_hdr_io_in_progress;
354 kstat_named_t arcstat_l2_write_not_cacheable;
355 kstat_named_t arcstat_l2_write_full;
356 kstat_named_t arcstat_l2_write_buffer_iter;
357 kstat_named_t arcstat_l2_write_pios;
358 kstat_named_t arcstat_l2_write_buffer_bytes_scanned;
359 kstat_named_t arcstat_l2_write_buffer_list_iter;
360 kstat_named_t arcstat_l2_write_buffer_list_null_iter;
361 kstat_named_t arcstat_memory_throttle_count;
362 kstat_named_t arcstat_duplicate_buffers;
363 kstat_named_t arcstat_duplicate_buffers_size;
364 kstat_named_t arcstat_duplicate_reads;
365} arc_stats_t;
366
367static arc_stats_t arc_stats = {
368 { "hits", KSTAT_DATA_UINT64 },
369 { "misses", KSTAT_DATA_UINT64 },
370 { "demand_data_hits", KSTAT_DATA_UINT64 },
371 { "demand_data_misses", KSTAT_DATA_UINT64 },
372 { "demand_metadata_hits", KSTAT_DATA_UINT64 },
373 { "demand_metadata_misses", KSTAT_DATA_UINT64 },
374 { "prefetch_data_hits", KSTAT_DATA_UINT64 },
375 { "prefetch_data_misses", KSTAT_DATA_UINT64 },
376 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 },
377 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 },
378 { "mru_hits", KSTAT_DATA_UINT64 },
379 { "mru_ghost_hits", KSTAT_DATA_UINT64 },
380 { "mfu_hits", KSTAT_DATA_UINT64 },
381 { "mfu_ghost_hits", KSTAT_DATA_UINT64 },
382 { "allocated", KSTAT_DATA_UINT64 },
383 { "deleted", KSTAT_DATA_UINT64 },
384 { "stolen", KSTAT_DATA_UINT64 },
385 { "recycle_miss", KSTAT_DATA_UINT64 },
386 { "mutex_miss", KSTAT_DATA_UINT64 },
387 { "evict_skip", KSTAT_DATA_UINT64 },
388 { "evict_l2_cached", KSTAT_DATA_UINT64 },
389 { "evict_l2_eligible", KSTAT_DATA_UINT64 },
390 { "evict_l2_ineligible", KSTAT_DATA_UINT64 },
391 { "hash_elements", KSTAT_DATA_UINT64 },
392 { "hash_elements_max", KSTAT_DATA_UINT64 },
393 { "hash_collisions", KSTAT_DATA_UINT64 },
394 { "hash_chains", KSTAT_DATA_UINT64 },
395 { "hash_chain_max", KSTAT_DATA_UINT64 },
396 { "p", KSTAT_DATA_UINT64 },
397 { "c", KSTAT_DATA_UINT64 },
398 { "c_min", KSTAT_DATA_UINT64 },
399 { "c_max", KSTAT_DATA_UINT64 },
400 { "size", KSTAT_DATA_UINT64 },
401 { "hdr_size", KSTAT_DATA_UINT64 },
402 { "data_size", KSTAT_DATA_UINT64 },
403 { "other_size", KSTAT_DATA_UINT64 },
404 { "l2_hits", KSTAT_DATA_UINT64 },
405 { "l2_misses", KSTAT_DATA_UINT64 },
406 { "l2_feeds", KSTAT_DATA_UINT64 },
407 { "l2_rw_clash", KSTAT_DATA_UINT64 },
408 { "l2_read_bytes", KSTAT_DATA_UINT64 },
409 { "l2_write_bytes", KSTAT_DATA_UINT64 },
410 { "l2_writes_sent", KSTAT_DATA_UINT64 },
411 { "l2_writes_done", KSTAT_DATA_UINT64 },
412 { "l2_writes_error", KSTAT_DATA_UINT64 },
413 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 },
414 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 },
415 { "l2_evict_reading", KSTAT_DATA_UINT64 },
416 { "l2_free_on_write", KSTAT_DATA_UINT64 },
417 { "l2_abort_lowmem", KSTAT_DATA_UINT64 },
418 { "l2_cksum_bad", KSTAT_DATA_UINT64 },
419 { "l2_io_error", KSTAT_DATA_UINT64 },
420 { "l2_size", KSTAT_DATA_UINT64 },
421 { "l2_asize", KSTAT_DATA_UINT64 },
422 { "l2_hdr_size", KSTAT_DATA_UINT64 },
423 { "l2_compress_successes", KSTAT_DATA_UINT64 },
424 { "l2_compress_zeros", KSTAT_DATA_UINT64 },
425 { "l2_compress_failures", KSTAT_DATA_UINT64 },
426 { "l2_write_trylock_fail", KSTAT_DATA_UINT64 },
427 { "l2_write_passed_headroom", KSTAT_DATA_UINT64 },
428 { "l2_write_spa_mismatch", KSTAT_DATA_UINT64 },
429 { "l2_write_in_l2", KSTAT_DATA_UINT64 },
430 { "l2_write_io_in_progress", KSTAT_DATA_UINT64 },
431 { "l2_write_not_cacheable", KSTAT_DATA_UINT64 },
432 { "l2_write_full", KSTAT_DATA_UINT64 },
433 { "l2_write_buffer_iter", KSTAT_DATA_UINT64 },
434 { "l2_write_pios", KSTAT_DATA_UINT64 },
435 { "l2_write_buffer_bytes_scanned", KSTAT_DATA_UINT64 },
436 { "l2_write_buffer_list_iter", KSTAT_DATA_UINT64 },
437 { "l2_write_buffer_list_null_iter", KSTAT_DATA_UINT64 },
438 { "memory_throttle_count", KSTAT_DATA_UINT64 },
439 { "duplicate_buffers", KSTAT_DATA_UINT64 },
440 { "duplicate_buffers_size", KSTAT_DATA_UINT64 },
441 { "duplicate_reads", KSTAT_DATA_UINT64 }
442};
443
444#define ARCSTAT(stat) (arc_stats.stat.value.ui64)
445
446#define ARCSTAT_INCR(stat, val) \
447 atomic_add_64(&arc_stats.stat.value.ui64, (val))
448
449#define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1)
450#define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1)
451
452#define ARCSTAT_MAX(stat, val) { \
453 uint64_t m; \
454 while ((val) > (m = arc_stats.stat.value.ui64) && \
455 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
456 continue; \
457}
458
459#define ARCSTAT_MAXSTAT(stat) \
460 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
461
462/*
463 * We define a macro to allow ARC hits/misses to be easily broken down by
464 * two separate conditions, giving a total of four different subtypes for
465 * each of hits and misses (so eight statistics total).
466 */
467#define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
468 if (cond1) { \
469 if (cond2) { \
470 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
471 } else { \
472 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
473 } \
474 } else { \
475 if (cond2) { \
476 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
477 } else { \
478 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
479 } \
480 }
481
482kstat_t *arc_ksp;
483static arc_state_t *arc_anon;
484static arc_state_t *arc_mru;
485static arc_state_t *arc_mru_ghost;
486static arc_state_t *arc_mfu;
487static arc_state_t *arc_mfu_ghost;
488static arc_state_t *arc_l2c_only;
489
490/*
491 * There are several ARC variables that are critical to export as kstats --
492 * but we don't want to have to grovel around in the kstat whenever we wish to
493 * manipulate them. For these variables, we therefore define them to be in
494 * terms of the statistic variable. This assures that we are not introducing
495 * the possibility of inconsistency by having shadow copies of the variables,
496 * while still allowing the code to be readable.
497 */
498#define arc_size ARCSTAT(arcstat_size) /* actual total arc size */
499#define arc_p ARCSTAT(arcstat_p) /* target size of MRU */
500#define arc_c ARCSTAT(arcstat_c) /* target size of cache */
501#define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */
502#define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */
503
504#define L2ARC_IS_VALID_COMPRESS(_c_) \
505 ((_c_) == ZIO_COMPRESS_LZ4 || (_c_) == ZIO_COMPRESS_EMPTY)
506
507static int arc_no_grow; /* Don't try to grow cache size */
508static uint64_t arc_tempreserve;
509static uint64_t arc_loaned_bytes;
510static uint64_t arc_meta_used;
511static uint64_t arc_meta_limit;
512static uint64_t arc_meta_max = 0;
513SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_meta_used, CTLFLAG_RD, &arc_meta_used, 0,
514 "ARC metadata used");
515SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_meta_limit, CTLFLAG_RW, &arc_meta_limit, 0,
516 "ARC metadata limit");
517
518typedef struct l2arc_buf_hdr l2arc_buf_hdr_t;
519
520typedef struct arc_callback arc_callback_t;
521
522struct arc_callback {
523 void *acb_private;
524 arc_done_func_t *acb_done;
525 arc_buf_t *acb_buf;
526 zio_t *acb_zio_dummy;
527 arc_callback_t *acb_next;
528};
529
530typedef struct arc_write_callback arc_write_callback_t;
531
532struct arc_write_callback {
533 void *awcb_private;
534 arc_done_func_t *awcb_ready;
535 arc_done_func_t *awcb_physdone;
536 arc_done_func_t *awcb_done;
537 arc_buf_t *awcb_buf;
538};
539
540struct arc_buf_hdr {
541 /* protected by hash lock */
542 dva_t b_dva;
543 uint64_t b_birth;
544 uint64_t b_cksum0;
545
546 kmutex_t b_freeze_lock;
547 zio_cksum_t *b_freeze_cksum;
548 void *b_thawed;
549
550 arc_buf_hdr_t *b_hash_next;
551 arc_buf_t *b_buf;
552 uint32_t b_flags;
553 uint32_t b_datacnt;
554
555 arc_callback_t *b_acb;
556 kcondvar_t b_cv;
557
558 /* immutable */
559 arc_buf_contents_t b_type;
560 uint64_t b_size;
561 uint64_t b_spa;
562
563 /* protected by arc state mutex */
564 arc_state_t *b_state;
565 list_node_t b_arc_node;
566
567 /* updated atomically */
568 clock_t b_arc_access;
569
570 /* self protecting */
571 refcount_t b_refcnt;
572
573 l2arc_buf_hdr_t *b_l2hdr;
574 list_node_t b_l2node;
575};
576
577static arc_buf_t *arc_eviction_list;
578static kmutex_t arc_eviction_mtx;
579static arc_buf_hdr_t arc_eviction_hdr;
580static void arc_get_data_buf(arc_buf_t *buf);
581static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
582static int arc_evict_needed(arc_buf_contents_t type);
583static void arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes);
584#ifdef illumos
585static void arc_buf_watch(arc_buf_t *buf);
586#endif /* illumos */
587
588static boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab);
589
590#define GHOST_STATE(state) \
591 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
592 (state) == arc_l2c_only)
593
594/*
595 * Private ARC flags. These flags are private ARC only flags that will show up
596 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can
597 * be passed in as arc_flags in things like arc_read. However, these flags
598 * should never be passed and should only be set by ARC code. When adding new
599 * public flags, make sure not to smash the private ones.
600 */
601
602#define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */
603#define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */
604#define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */
605#define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */
606#define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */
607#define ARC_INDIRECT (1 << 14) /* this is an indirect block */
608#define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */
609#define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */
610#define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */
611#define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */
612
613#define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE)
614#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS)
615#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR)
616#define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_PREFETCH)
617#define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ)
618#define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE)
619#define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS)
620#define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE)
621#define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \
622 (hdr)->b_l2hdr != NULL)
623#define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING)
624#define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED)
625#define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD)
626
627/*
628 * Other sizes
629 */
630
631#define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
632#define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t))
633
634/*
635 * Hash table routines
636 */
637
638#define HT_LOCK_PAD CACHE_LINE_SIZE
639
640struct ht_lock {
641 kmutex_t ht_lock;
642#ifdef _KERNEL
643 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
644#endif
645};
646
647#define BUF_LOCKS 256
648typedef struct buf_hash_table {
649 uint64_t ht_mask;
650 arc_buf_hdr_t **ht_table;
651 struct ht_lock ht_locks[BUF_LOCKS] __aligned(CACHE_LINE_SIZE);
652} buf_hash_table_t;
653
654static buf_hash_table_t buf_hash_table;
655
656#define BUF_HASH_INDEX(spa, dva, birth) \
657 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
658#define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
659#define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
660#define HDR_LOCK(hdr) \
661 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
662
663uint64_t zfs_crc64_table[256];
664
665/*
666 * Level 2 ARC
667 */
668
669#define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
670#define L2ARC_HEADROOM 2 /* num of writes */
671/*
672 * If we discover during ARC scan any buffers to be compressed, we boost
673 * our headroom for the next scanning cycle by this percentage multiple.
674 */
675#define L2ARC_HEADROOM_BOOST 200
676#define L2ARC_FEED_SECS 1 /* caching interval secs */
677#define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */
678
679#define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent)
680#define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done)
681
682/* L2ARC Performance Tunables */
683uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */
684uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */
685uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */
686uint64_t l2arc_headroom_boost = L2ARC_HEADROOM_BOOST;
687uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */
688uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */
689boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
690boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */
691boolean_t l2arc_norw = B_TRUE; /* no reads during writes */
692
693SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max, CTLFLAG_RW,
694 &l2arc_write_max, 0, "max write size");
695SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_boost, CTLFLAG_RW,
696 &l2arc_write_boost, 0, "extra write during warmup");
697SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom, CTLFLAG_RW,
698 &l2arc_headroom, 0, "number of dev writes");
699SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_secs, CTLFLAG_RW,
700 &l2arc_feed_secs, 0, "interval seconds");
701SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_min_ms, CTLFLAG_RW,
702 &l2arc_feed_min_ms, 0, "min interval milliseconds");
703
704SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_noprefetch, CTLFLAG_RW,
705 &l2arc_noprefetch, 0, "don't cache prefetch bufs");
706SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_feed_again, CTLFLAG_RW,
707 &l2arc_feed_again, 0, "turbo warmup");
708SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw, CTLFLAG_RW,
709 &l2arc_norw, 0, "no reads during writes");
710
711SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_size, CTLFLAG_RD,
712 &ARC_anon.arcs_size, 0, "size of anonymous state");
713SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_metadata_lsize, CTLFLAG_RD,
714 &ARC_anon.arcs_lsize[ARC_BUFC_METADATA], 0, "size of anonymous state");
715SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_data_lsize, CTLFLAG_RD,
716 &ARC_anon.arcs_lsize[ARC_BUFC_DATA], 0, "size of anonymous state");
717
718SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_size, CTLFLAG_RD,
719 &ARC_mru.arcs_size, 0, "size of mru state");
720SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_metadata_lsize, CTLFLAG_RD,
721 &ARC_mru.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mru state");
722SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_data_lsize, CTLFLAG_RD,
723 &ARC_mru.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mru state");
724
725SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_size, CTLFLAG_RD,
726 &ARC_mru_ghost.arcs_size, 0, "size of mru ghost state");
727SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_metadata_lsize, CTLFLAG_RD,
728 &ARC_mru_ghost.arcs_lsize[ARC_BUFC_METADATA], 0,
729 "size of metadata in mru ghost state");
730SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_data_lsize, CTLFLAG_RD,
731 &ARC_mru_ghost.arcs_lsize[ARC_BUFC_DATA], 0,
732 "size of data in mru ghost state");
733
734SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_size, CTLFLAG_RD,
735 &ARC_mfu.arcs_size, 0, "size of mfu state");
736SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_metadata_lsize, CTLFLAG_RD,
737 &ARC_mfu.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mfu state");
738SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_data_lsize, CTLFLAG_RD,
739 &ARC_mfu.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mfu state");
740
741SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_size, CTLFLAG_RD,
742 &ARC_mfu_ghost.arcs_size, 0, "size of mfu ghost state");
743SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_metadata_lsize, CTLFLAG_RD,
744 &ARC_mfu_ghost.arcs_lsize[ARC_BUFC_METADATA], 0,
745 "size of metadata in mfu ghost state");
746SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_data_lsize, CTLFLAG_RD,
747 &ARC_mfu_ghost.arcs_lsize[ARC_BUFC_DATA], 0,
748 "size of data in mfu ghost state");
749
750SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2c_only_size, CTLFLAG_RD,
751 &ARC_l2c_only.arcs_size, 0, "size of mru state");
752
753/*
754 * L2ARC Internals
755 */
756typedef struct l2arc_dev {
757 vdev_t *l2ad_vdev; /* vdev */
758 spa_t *l2ad_spa; /* spa */
759 uint64_t l2ad_hand; /* next write location */
760 uint64_t l2ad_start; /* first addr on device */
761 uint64_t l2ad_end; /* last addr on device */
762 uint64_t l2ad_evict; /* last addr eviction reached */
763 boolean_t l2ad_first; /* first sweep through */
764 boolean_t l2ad_writing; /* currently writing */
765 list_t *l2ad_buflist; /* buffer list */
766 list_node_t l2ad_node; /* device list node */
767} l2arc_dev_t;
768
769static list_t L2ARC_dev_list; /* device list */
770static list_t *l2arc_dev_list; /* device list pointer */
771static kmutex_t l2arc_dev_mtx; /* device list mutex */
772static l2arc_dev_t *l2arc_dev_last; /* last device used */
773static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */
774static list_t L2ARC_free_on_write; /* free after write buf list */
775static list_t *l2arc_free_on_write; /* free after write list ptr */
776static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */
777static uint64_t l2arc_ndev; /* number of devices */
778
779typedef struct l2arc_read_callback {
780 arc_buf_t *l2rcb_buf; /* read buffer */
781 spa_t *l2rcb_spa; /* spa */
782 blkptr_t l2rcb_bp; /* original blkptr */
24 * Copyright (c) 2014 by Saso Kiselkov. All rights reserved.
25 * Copyright 2014 Nexenta Systems, Inc. All rights reserved.
26 */
27
28/*
29 * DVA-based Adjustable Replacement Cache
30 *
31 * While much of the theory of operation used here is
32 * based on the self-tuning, low overhead replacement cache
33 * presented by Megiddo and Modha at FAST 2003, there are some
34 * significant differences:
35 *
36 * 1. The Megiddo and Modha model assumes any page is evictable.
37 * Pages in its cache cannot be "locked" into memory. This makes
38 * the eviction algorithm simple: evict the last page in the list.
39 * This also make the performance characteristics easy to reason
40 * about. Our cache is not so simple. At any given moment, some
41 * subset of the blocks in the cache are un-evictable because we
42 * have handed out a reference to them. Blocks are only evictable
43 * when there are no external references active. This makes
44 * eviction far more problematic: we choose to evict the evictable
45 * blocks that are the "lowest" in the list.
46 *
47 * There are times when it is not possible to evict the requested
48 * space. In these circumstances we are unable to adjust the cache
49 * size. To prevent the cache growing unbounded at these times we
50 * implement a "cache throttle" that slows the flow of new data
51 * into the cache until we can make space available.
52 *
53 * 2. The Megiddo and Modha model assumes a fixed cache size.
54 * Pages are evicted when the cache is full and there is a cache
55 * miss. Our model has a variable sized cache. It grows with
56 * high use, but also tries to react to memory pressure from the
57 * operating system: decreasing its size when system memory is
58 * tight.
59 *
60 * 3. The Megiddo and Modha model assumes a fixed page size. All
61 * elements of the cache are therefore exactly the same size. So
62 * when adjusting the cache size following a cache miss, its simply
63 * a matter of choosing a single page to evict. In our model, we
64 * have variable sized cache blocks (rangeing from 512 bytes to
65 * 128K bytes). We therefore choose a set of blocks to evict to make
66 * space for a cache miss that approximates as closely as possible
67 * the space used by the new block.
68 *
69 * See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
70 * by N. Megiddo & D. Modha, FAST 2003
71 */
72
73/*
74 * The locking model:
75 *
76 * A new reference to a cache buffer can be obtained in two
77 * ways: 1) via a hash table lookup using the DVA as a key,
78 * or 2) via one of the ARC lists. The arc_read() interface
79 * uses method 1, while the internal arc algorithms for
80 * adjusting the cache use method 2. We therefore provide two
81 * types of locks: 1) the hash table lock array, and 2) the
82 * arc list locks.
83 *
84 * Buffers do not have their own mutexs, rather they rely on the
85 * hash table mutexs for the bulk of their protection (i.e. most
86 * fields in the arc_buf_hdr_t are protected by these mutexs).
87 *
88 * buf_hash_find() returns the appropriate mutex (held) when it
89 * locates the requested buffer in the hash table. It returns
90 * NULL for the mutex if the buffer was not in the table.
91 *
92 * buf_hash_remove() expects the appropriate hash mutex to be
93 * already held before it is invoked.
94 *
95 * Each arc state also has a mutex which is used to protect the
96 * buffer list associated with the state. When attempting to
97 * obtain a hash table lock while holding an arc list lock you
98 * must use: mutex_tryenter() to avoid deadlock. Also note that
99 * the active state mutex must be held before the ghost state mutex.
100 *
101 * Arc buffers may have an associated eviction callback function.
102 * This function will be invoked prior to removing the buffer (e.g.
103 * in arc_do_user_evicts()). Note however that the data associated
104 * with the buffer may be evicted prior to the callback. The callback
105 * must be made with *no locks held* (to prevent deadlock). Additionally,
106 * the users of callbacks must ensure that their private data is
107 * protected from simultaneous callbacks from arc_buf_evict()
108 * and arc_do_user_evicts().
109 *
110 * Note that the majority of the performance stats are manipulated
111 * with atomic operations.
112 *
113 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following:
114 *
115 * - L2ARC buflist creation
116 * - L2ARC buflist eviction
117 * - L2ARC write completion, which walks L2ARC buflists
118 * - ARC header destruction, as it removes from L2ARC buflists
119 * - ARC header release, as it removes from L2ARC buflists
120 */
121
122#include <sys/spa.h>
123#include <sys/zio.h>
124#include <sys/zio_compress.h>
125#include <sys/zfs_context.h>
126#include <sys/arc.h>
127#include <sys/refcount.h>
128#include <sys/vdev.h>
129#include <sys/vdev_impl.h>
130#include <sys/dsl_pool.h>
131#ifdef _KERNEL
132#include <sys/dnlc.h>
133#endif
134#include <sys/callb.h>
135#include <sys/kstat.h>
136#include <sys/trim_map.h>
137#include <zfs_fletcher.h>
138#include <sys/sdt.h>
139
140#include <vm/vm_pageout.h>
141
142#ifdef illumos
143#ifndef _KERNEL
144/* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
145boolean_t arc_watch = B_FALSE;
146int arc_procfd;
147#endif
148#endif /* illumos */
149
150static kmutex_t arc_reclaim_thr_lock;
151static kcondvar_t arc_reclaim_thr_cv; /* used to signal reclaim thr */
152static uint8_t arc_thread_exit;
153
154#define ARC_REDUCE_DNLC_PERCENT 3
155uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT;
156
157typedef enum arc_reclaim_strategy {
158 ARC_RECLAIM_AGGR, /* Aggressive reclaim strategy */
159 ARC_RECLAIM_CONS /* Conservative reclaim strategy */
160} arc_reclaim_strategy_t;
161
162/*
163 * The number of iterations through arc_evict_*() before we
164 * drop & reacquire the lock.
165 */
166int arc_evict_iterations = 100;
167
168/* number of seconds before growing cache again */
169static int arc_grow_retry = 60;
170
171/* shift of arc_c for calculating both min and max arc_p */
172static int arc_p_min_shift = 4;
173
174/* log2(fraction of arc to reclaim) */
175static int arc_shrink_shift = 5;
176
177/*
178 * minimum lifespan of a prefetch block in clock ticks
179 * (initialized in arc_init())
180 */
181static int arc_min_prefetch_lifespan;
182
183/*
184 * If this percent of memory is free, don't throttle.
185 */
186int arc_lotsfree_percent = 10;
187
188static int arc_dead;
189extern int zfs_prefetch_disable;
190
191/*
192 * The arc has filled available memory and has now warmed up.
193 */
194static boolean_t arc_warm;
195
196/*
197 * These tunables are for performance analysis.
198 */
199uint64_t zfs_arc_max;
200uint64_t zfs_arc_min;
201uint64_t zfs_arc_meta_limit = 0;
202int zfs_arc_grow_retry = 0;
203int zfs_arc_shrink_shift = 0;
204int zfs_arc_p_min_shift = 0;
205int zfs_disable_dup_eviction = 0;
206
207TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit);
208SYSCTL_DECL(_vfs_zfs);
209SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_max, CTLFLAG_RDTUN, &zfs_arc_max, 0,
210 "Maximum ARC size");
211SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_min, CTLFLAG_RDTUN, &zfs_arc_min, 0,
212 "Minimum ARC size");
213
214/*
215 * Note that buffers can be in one of 6 states:
216 * ARC_anon - anonymous (discussed below)
217 * ARC_mru - recently used, currently cached
218 * ARC_mru_ghost - recentely used, no longer in cache
219 * ARC_mfu - frequently used, currently cached
220 * ARC_mfu_ghost - frequently used, no longer in cache
221 * ARC_l2c_only - exists in L2ARC but not other states
222 * When there are no active references to the buffer, they are
223 * are linked onto a list in one of these arc states. These are
224 * the only buffers that can be evicted or deleted. Within each
225 * state there are multiple lists, one for meta-data and one for
226 * non-meta-data. Meta-data (indirect blocks, blocks of dnodes,
227 * etc.) is tracked separately so that it can be managed more
228 * explicitly: favored over data, limited explicitly.
229 *
230 * Anonymous buffers are buffers that are not associated with
231 * a DVA. These are buffers that hold dirty block copies
232 * before they are written to stable storage. By definition,
233 * they are "ref'd" and are considered part of arc_mru
234 * that cannot be freed. Generally, they will aquire a DVA
235 * as they are written and migrate onto the arc_mru list.
236 *
237 * The ARC_l2c_only state is for buffers that are in the second
238 * level ARC but no longer in any of the ARC_m* lists. The second
239 * level ARC itself may also contain buffers that are in any of
240 * the ARC_m* states - meaning that a buffer can exist in two
241 * places. The reason for the ARC_l2c_only state is to keep the
242 * buffer header in the hash table, so that reads that hit the
243 * second level ARC benefit from these fast lookups.
244 */
245
246#define ARCS_LOCK_PAD CACHE_LINE_SIZE
247struct arcs_lock {
248 kmutex_t arcs_lock;
249#ifdef _KERNEL
250 unsigned char pad[(ARCS_LOCK_PAD - sizeof (kmutex_t))];
251#endif
252};
253
254/*
255 * must be power of two for mask use to work
256 *
257 */
258#define ARC_BUFC_NUMDATALISTS 16
259#define ARC_BUFC_NUMMETADATALISTS 16
260#define ARC_BUFC_NUMLISTS (ARC_BUFC_NUMMETADATALISTS + ARC_BUFC_NUMDATALISTS)
261
262typedef struct arc_state {
263 uint64_t arcs_lsize[ARC_BUFC_NUMTYPES]; /* amount of evictable data */
264 uint64_t arcs_size; /* total amount of data in this state */
265 list_t arcs_lists[ARC_BUFC_NUMLISTS]; /* list of evictable buffers */
266 struct arcs_lock arcs_locks[ARC_BUFC_NUMLISTS] __aligned(CACHE_LINE_SIZE);
267} arc_state_t;
268
269#define ARCS_LOCK(s, i) (&((s)->arcs_locks[(i)].arcs_lock))
270
271/* The 6 states: */
272static arc_state_t ARC_anon;
273static arc_state_t ARC_mru;
274static arc_state_t ARC_mru_ghost;
275static arc_state_t ARC_mfu;
276static arc_state_t ARC_mfu_ghost;
277static arc_state_t ARC_l2c_only;
278
279typedef struct arc_stats {
280 kstat_named_t arcstat_hits;
281 kstat_named_t arcstat_misses;
282 kstat_named_t arcstat_demand_data_hits;
283 kstat_named_t arcstat_demand_data_misses;
284 kstat_named_t arcstat_demand_metadata_hits;
285 kstat_named_t arcstat_demand_metadata_misses;
286 kstat_named_t arcstat_prefetch_data_hits;
287 kstat_named_t arcstat_prefetch_data_misses;
288 kstat_named_t arcstat_prefetch_metadata_hits;
289 kstat_named_t arcstat_prefetch_metadata_misses;
290 kstat_named_t arcstat_mru_hits;
291 kstat_named_t arcstat_mru_ghost_hits;
292 kstat_named_t arcstat_mfu_hits;
293 kstat_named_t arcstat_mfu_ghost_hits;
294 kstat_named_t arcstat_allocated;
295 kstat_named_t arcstat_deleted;
296 kstat_named_t arcstat_stolen;
297 kstat_named_t arcstat_recycle_miss;
298 /*
299 * Number of buffers that could not be evicted because the hash lock
300 * was held by another thread. The lock may not necessarily be held
301 * by something using the same buffer, since hash locks are shared
302 * by multiple buffers.
303 */
304 kstat_named_t arcstat_mutex_miss;
305 /*
306 * Number of buffers skipped because they have I/O in progress, are
307 * indrect prefetch buffers that have not lived long enough, or are
308 * not from the spa we're trying to evict from.
309 */
310 kstat_named_t arcstat_evict_skip;
311 kstat_named_t arcstat_evict_l2_cached;
312 kstat_named_t arcstat_evict_l2_eligible;
313 kstat_named_t arcstat_evict_l2_ineligible;
314 kstat_named_t arcstat_hash_elements;
315 kstat_named_t arcstat_hash_elements_max;
316 kstat_named_t arcstat_hash_collisions;
317 kstat_named_t arcstat_hash_chains;
318 kstat_named_t arcstat_hash_chain_max;
319 kstat_named_t arcstat_p;
320 kstat_named_t arcstat_c;
321 kstat_named_t arcstat_c_min;
322 kstat_named_t arcstat_c_max;
323 kstat_named_t arcstat_size;
324 kstat_named_t arcstat_hdr_size;
325 kstat_named_t arcstat_data_size;
326 kstat_named_t arcstat_other_size;
327 kstat_named_t arcstat_l2_hits;
328 kstat_named_t arcstat_l2_misses;
329 kstat_named_t arcstat_l2_feeds;
330 kstat_named_t arcstat_l2_rw_clash;
331 kstat_named_t arcstat_l2_read_bytes;
332 kstat_named_t arcstat_l2_write_bytes;
333 kstat_named_t arcstat_l2_writes_sent;
334 kstat_named_t arcstat_l2_writes_done;
335 kstat_named_t arcstat_l2_writes_error;
336 kstat_named_t arcstat_l2_writes_hdr_miss;
337 kstat_named_t arcstat_l2_evict_lock_retry;
338 kstat_named_t arcstat_l2_evict_reading;
339 kstat_named_t arcstat_l2_free_on_write;
340 kstat_named_t arcstat_l2_abort_lowmem;
341 kstat_named_t arcstat_l2_cksum_bad;
342 kstat_named_t arcstat_l2_io_error;
343 kstat_named_t arcstat_l2_size;
344 kstat_named_t arcstat_l2_asize;
345 kstat_named_t arcstat_l2_hdr_size;
346 kstat_named_t arcstat_l2_compress_successes;
347 kstat_named_t arcstat_l2_compress_zeros;
348 kstat_named_t arcstat_l2_compress_failures;
349 kstat_named_t arcstat_l2_write_trylock_fail;
350 kstat_named_t arcstat_l2_write_passed_headroom;
351 kstat_named_t arcstat_l2_write_spa_mismatch;
352 kstat_named_t arcstat_l2_write_in_l2;
353 kstat_named_t arcstat_l2_write_hdr_io_in_progress;
354 kstat_named_t arcstat_l2_write_not_cacheable;
355 kstat_named_t arcstat_l2_write_full;
356 kstat_named_t arcstat_l2_write_buffer_iter;
357 kstat_named_t arcstat_l2_write_pios;
358 kstat_named_t arcstat_l2_write_buffer_bytes_scanned;
359 kstat_named_t arcstat_l2_write_buffer_list_iter;
360 kstat_named_t arcstat_l2_write_buffer_list_null_iter;
361 kstat_named_t arcstat_memory_throttle_count;
362 kstat_named_t arcstat_duplicate_buffers;
363 kstat_named_t arcstat_duplicate_buffers_size;
364 kstat_named_t arcstat_duplicate_reads;
365} arc_stats_t;
366
367static arc_stats_t arc_stats = {
368 { "hits", KSTAT_DATA_UINT64 },
369 { "misses", KSTAT_DATA_UINT64 },
370 { "demand_data_hits", KSTAT_DATA_UINT64 },
371 { "demand_data_misses", KSTAT_DATA_UINT64 },
372 { "demand_metadata_hits", KSTAT_DATA_UINT64 },
373 { "demand_metadata_misses", KSTAT_DATA_UINT64 },
374 { "prefetch_data_hits", KSTAT_DATA_UINT64 },
375 { "prefetch_data_misses", KSTAT_DATA_UINT64 },
376 { "prefetch_metadata_hits", KSTAT_DATA_UINT64 },
377 { "prefetch_metadata_misses", KSTAT_DATA_UINT64 },
378 { "mru_hits", KSTAT_DATA_UINT64 },
379 { "mru_ghost_hits", KSTAT_DATA_UINT64 },
380 { "mfu_hits", KSTAT_DATA_UINT64 },
381 { "mfu_ghost_hits", KSTAT_DATA_UINT64 },
382 { "allocated", KSTAT_DATA_UINT64 },
383 { "deleted", KSTAT_DATA_UINT64 },
384 { "stolen", KSTAT_DATA_UINT64 },
385 { "recycle_miss", KSTAT_DATA_UINT64 },
386 { "mutex_miss", KSTAT_DATA_UINT64 },
387 { "evict_skip", KSTAT_DATA_UINT64 },
388 { "evict_l2_cached", KSTAT_DATA_UINT64 },
389 { "evict_l2_eligible", KSTAT_DATA_UINT64 },
390 { "evict_l2_ineligible", KSTAT_DATA_UINT64 },
391 { "hash_elements", KSTAT_DATA_UINT64 },
392 { "hash_elements_max", KSTAT_DATA_UINT64 },
393 { "hash_collisions", KSTAT_DATA_UINT64 },
394 { "hash_chains", KSTAT_DATA_UINT64 },
395 { "hash_chain_max", KSTAT_DATA_UINT64 },
396 { "p", KSTAT_DATA_UINT64 },
397 { "c", KSTAT_DATA_UINT64 },
398 { "c_min", KSTAT_DATA_UINT64 },
399 { "c_max", KSTAT_DATA_UINT64 },
400 { "size", KSTAT_DATA_UINT64 },
401 { "hdr_size", KSTAT_DATA_UINT64 },
402 { "data_size", KSTAT_DATA_UINT64 },
403 { "other_size", KSTAT_DATA_UINT64 },
404 { "l2_hits", KSTAT_DATA_UINT64 },
405 { "l2_misses", KSTAT_DATA_UINT64 },
406 { "l2_feeds", KSTAT_DATA_UINT64 },
407 { "l2_rw_clash", KSTAT_DATA_UINT64 },
408 { "l2_read_bytes", KSTAT_DATA_UINT64 },
409 { "l2_write_bytes", KSTAT_DATA_UINT64 },
410 { "l2_writes_sent", KSTAT_DATA_UINT64 },
411 { "l2_writes_done", KSTAT_DATA_UINT64 },
412 { "l2_writes_error", KSTAT_DATA_UINT64 },
413 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 },
414 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 },
415 { "l2_evict_reading", KSTAT_DATA_UINT64 },
416 { "l2_free_on_write", KSTAT_DATA_UINT64 },
417 { "l2_abort_lowmem", KSTAT_DATA_UINT64 },
418 { "l2_cksum_bad", KSTAT_DATA_UINT64 },
419 { "l2_io_error", KSTAT_DATA_UINT64 },
420 { "l2_size", KSTAT_DATA_UINT64 },
421 { "l2_asize", KSTAT_DATA_UINT64 },
422 { "l2_hdr_size", KSTAT_DATA_UINT64 },
423 { "l2_compress_successes", KSTAT_DATA_UINT64 },
424 { "l2_compress_zeros", KSTAT_DATA_UINT64 },
425 { "l2_compress_failures", KSTAT_DATA_UINT64 },
426 { "l2_write_trylock_fail", KSTAT_DATA_UINT64 },
427 { "l2_write_passed_headroom", KSTAT_DATA_UINT64 },
428 { "l2_write_spa_mismatch", KSTAT_DATA_UINT64 },
429 { "l2_write_in_l2", KSTAT_DATA_UINT64 },
430 { "l2_write_io_in_progress", KSTAT_DATA_UINT64 },
431 { "l2_write_not_cacheable", KSTAT_DATA_UINT64 },
432 { "l2_write_full", KSTAT_DATA_UINT64 },
433 { "l2_write_buffer_iter", KSTAT_DATA_UINT64 },
434 { "l2_write_pios", KSTAT_DATA_UINT64 },
435 { "l2_write_buffer_bytes_scanned", KSTAT_DATA_UINT64 },
436 { "l2_write_buffer_list_iter", KSTAT_DATA_UINT64 },
437 { "l2_write_buffer_list_null_iter", KSTAT_DATA_UINT64 },
438 { "memory_throttle_count", KSTAT_DATA_UINT64 },
439 { "duplicate_buffers", KSTAT_DATA_UINT64 },
440 { "duplicate_buffers_size", KSTAT_DATA_UINT64 },
441 { "duplicate_reads", KSTAT_DATA_UINT64 }
442};
443
444#define ARCSTAT(stat) (arc_stats.stat.value.ui64)
445
446#define ARCSTAT_INCR(stat, val) \
447 atomic_add_64(&arc_stats.stat.value.ui64, (val))
448
449#define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1)
450#define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1)
451
452#define ARCSTAT_MAX(stat, val) { \
453 uint64_t m; \
454 while ((val) > (m = arc_stats.stat.value.ui64) && \
455 (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
456 continue; \
457}
458
459#define ARCSTAT_MAXSTAT(stat) \
460 ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
461
462/*
463 * We define a macro to allow ARC hits/misses to be easily broken down by
464 * two separate conditions, giving a total of four different subtypes for
465 * each of hits and misses (so eight statistics total).
466 */
467#define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
468 if (cond1) { \
469 if (cond2) { \
470 ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
471 } else { \
472 ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
473 } \
474 } else { \
475 if (cond2) { \
476 ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
477 } else { \
478 ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
479 } \
480 }
481
482kstat_t *arc_ksp;
483static arc_state_t *arc_anon;
484static arc_state_t *arc_mru;
485static arc_state_t *arc_mru_ghost;
486static arc_state_t *arc_mfu;
487static arc_state_t *arc_mfu_ghost;
488static arc_state_t *arc_l2c_only;
489
490/*
491 * There are several ARC variables that are critical to export as kstats --
492 * but we don't want to have to grovel around in the kstat whenever we wish to
493 * manipulate them. For these variables, we therefore define them to be in
494 * terms of the statistic variable. This assures that we are not introducing
495 * the possibility of inconsistency by having shadow copies of the variables,
496 * while still allowing the code to be readable.
497 */
498#define arc_size ARCSTAT(arcstat_size) /* actual total arc size */
499#define arc_p ARCSTAT(arcstat_p) /* target size of MRU */
500#define arc_c ARCSTAT(arcstat_c) /* target size of cache */
501#define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */
502#define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */
503
504#define L2ARC_IS_VALID_COMPRESS(_c_) \
505 ((_c_) == ZIO_COMPRESS_LZ4 || (_c_) == ZIO_COMPRESS_EMPTY)
506
507static int arc_no_grow; /* Don't try to grow cache size */
508static uint64_t arc_tempreserve;
509static uint64_t arc_loaned_bytes;
510static uint64_t arc_meta_used;
511static uint64_t arc_meta_limit;
512static uint64_t arc_meta_max = 0;
513SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_meta_used, CTLFLAG_RD, &arc_meta_used, 0,
514 "ARC metadata used");
515SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_meta_limit, CTLFLAG_RW, &arc_meta_limit, 0,
516 "ARC metadata limit");
517
518typedef struct l2arc_buf_hdr l2arc_buf_hdr_t;
519
520typedef struct arc_callback arc_callback_t;
521
522struct arc_callback {
523 void *acb_private;
524 arc_done_func_t *acb_done;
525 arc_buf_t *acb_buf;
526 zio_t *acb_zio_dummy;
527 arc_callback_t *acb_next;
528};
529
530typedef struct arc_write_callback arc_write_callback_t;
531
532struct arc_write_callback {
533 void *awcb_private;
534 arc_done_func_t *awcb_ready;
535 arc_done_func_t *awcb_physdone;
536 arc_done_func_t *awcb_done;
537 arc_buf_t *awcb_buf;
538};
539
540struct arc_buf_hdr {
541 /* protected by hash lock */
542 dva_t b_dva;
543 uint64_t b_birth;
544 uint64_t b_cksum0;
545
546 kmutex_t b_freeze_lock;
547 zio_cksum_t *b_freeze_cksum;
548 void *b_thawed;
549
550 arc_buf_hdr_t *b_hash_next;
551 arc_buf_t *b_buf;
552 uint32_t b_flags;
553 uint32_t b_datacnt;
554
555 arc_callback_t *b_acb;
556 kcondvar_t b_cv;
557
558 /* immutable */
559 arc_buf_contents_t b_type;
560 uint64_t b_size;
561 uint64_t b_spa;
562
563 /* protected by arc state mutex */
564 arc_state_t *b_state;
565 list_node_t b_arc_node;
566
567 /* updated atomically */
568 clock_t b_arc_access;
569
570 /* self protecting */
571 refcount_t b_refcnt;
572
573 l2arc_buf_hdr_t *b_l2hdr;
574 list_node_t b_l2node;
575};
576
577static arc_buf_t *arc_eviction_list;
578static kmutex_t arc_eviction_mtx;
579static arc_buf_hdr_t arc_eviction_hdr;
580static void arc_get_data_buf(arc_buf_t *buf);
581static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
582static int arc_evict_needed(arc_buf_contents_t type);
583static void arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes);
584#ifdef illumos
585static void arc_buf_watch(arc_buf_t *buf);
586#endif /* illumos */
587
588static boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab);
589
590#define GHOST_STATE(state) \
591 ((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
592 (state) == arc_l2c_only)
593
594/*
595 * Private ARC flags. These flags are private ARC only flags that will show up
596 * in b_flags in the arc_hdr_buf_t. Some flags are publicly declared, and can
597 * be passed in as arc_flags in things like arc_read. However, these flags
598 * should never be passed and should only be set by ARC code. When adding new
599 * public flags, make sure not to smash the private ones.
600 */
601
602#define ARC_IN_HASH_TABLE (1 << 9) /* this buffer is hashed */
603#define ARC_IO_IN_PROGRESS (1 << 10) /* I/O in progress for buf */
604#define ARC_IO_ERROR (1 << 11) /* I/O failed for buf */
605#define ARC_FREED_IN_READ (1 << 12) /* buf freed while in read */
606#define ARC_BUF_AVAILABLE (1 << 13) /* block not in active use */
607#define ARC_INDIRECT (1 << 14) /* this is an indirect block */
608#define ARC_FREE_IN_PROGRESS (1 << 15) /* hdr about to be freed */
609#define ARC_L2_WRITING (1 << 16) /* L2ARC write in progress */
610#define ARC_L2_EVICTED (1 << 17) /* evicted during I/O */
611#define ARC_L2_WRITE_HEAD (1 << 18) /* head of write list */
612
613#define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_IN_HASH_TABLE)
614#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS)
615#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_IO_ERROR)
616#define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_PREFETCH)
617#define HDR_FREED_IN_READ(hdr) ((hdr)->b_flags & ARC_FREED_IN_READ)
618#define HDR_BUF_AVAILABLE(hdr) ((hdr)->b_flags & ARC_BUF_AVAILABLE)
619#define HDR_FREE_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FREE_IN_PROGRESS)
620#define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_L2CACHE)
621#define HDR_L2_READING(hdr) ((hdr)->b_flags & ARC_IO_IN_PROGRESS && \
622 (hdr)->b_l2hdr != NULL)
623#define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_L2_WRITING)
624#define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_L2_EVICTED)
625#define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_L2_WRITE_HEAD)
626
627/*
628 * Other sizes
629 */
630
631#define HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
632#define L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t))
633
634/*
635 * Hash table routines
636 */
637
638#define HT_LOCK_PAD CACHE_LINE_SIZE
639
640struct ht_lock {
641 kmutex_t ht_lock;
642#ifdef _KERNEL
643 unsigned char pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
644#endif
645};
646
647#define BUF_LOCKS 256
648typedef struct buf_hash_table {
649 uint64_t ht_mask;
650 arc_buf_hdr_t **ht_table;
651 struct ht_lock ht_locks[BUF_LOCKS] __aligned(CACHE_LINE_SIZE);
652} buf_hash_table_t;
653
654static buf_hash_table_t buf_hash_table;
655
656#define BUF_HASH_INDEX(spa, dva, birth) \
657 (buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
658#define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
659#define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
660#define HDR_LOCK(hdr) \
661 (BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
662
663uint64_t zfs_crc64_table[256];
664
665/*
666 * Level 2 ARC
667 */
668
669#define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
670#define L2ARC_HEADROOM 2 /* num of writes */
671/*
672 * If we discover during ARC scan any buffers to be compressed, we boost
673 * our headroom for the next scanning cycle by this percentage multiple.
674 */
675#define L2ARC_HEADROOM_BOOST 200
676#define L2ARC_FEED_SECS 1 /* caching interval secs */
677#define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */
678
679#define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent)
680#define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done)
681
682/* L2ARC Performance Tunables */
683uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */
684uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */
685uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */
686uint64_t l2arc_headroom_boost = L2ARC_HEADROOM_BOOST;
687uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */
688uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */
689boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
690boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */
691boolean_t l2arc_norw = B_TRUE; /* no reads during writes */
692
693SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max, CTLFLAG_RW,
694 &l2arc_write_max, 0, "max write size");
695SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_boost, CTLFLAG_RW,
696 &l2arc_write_boost, 0, "extra write during warmup");
697SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom, CTLFLAG_RW,
698 &l2arc_headroom, 0, "number of dev writes");
699SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_secs, CTLFLAG_RW,
700 &l2arc_feed_secs, 0, "interval seconds");
701SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_min_ms, CTLFLAG_RW,
702 &l2arc_feed_min_ms, 0, "min interval milliseconds");
703
704SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_noprefetch, CTLFLAG_RW,
705 &l2arc_noprefetch, 0, "don't cache prefetch bufs");
706SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_feed_again, CTLFLAG_RW,
707 &l2arc_feed_again, 0, "turbo warmup");
708SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw, CTLFLAG_RW,
709 &l2arc_norw, 0, "no reads during writes");
710
711SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_size, CTLFLAG_RD,
712 &ARC_anon.arcs_size, 0, "size of anonymous state");
713SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_metadata_lsize, CTLFLAG_RD,
714 &ARC_anon.arcs_lsize[ARC_BUFC_METADATA], 0, "size of anonymous state");
715SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_data_lsize, CTLFLAG_RD,
716 &ARC_anon.arcs_lsize[ARC_BUFC_DATA], 0, "size of anonymous state");
717
718SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_size, CTLFLAG_RD,
719 &ARC_mru.arcs_size, 0, "size of mru state");
720SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_metadata_lsize, CTLFLAG_RD,
721 &ARC_mru.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mru state");
722SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_data_lsize, CTLFLAG_RD,
723 &ARC_mru.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mru state");
724
725SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_size, CTLFLAG_RD,
726 &ARC_mru_ghost.arcs_size, 0, "size of mru ghost state");
727SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_metadata_lsize, CTLFLAG_RD,
728 &ARC_mru_ghost.arcs_lsize[ARC_BUFC_METADATA], 0,
729 "size of metadata in mru ghost state");
730SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_data_lsize, CTLFLAG_RD,
731 &ARC_mru_ghost.arcs_lsize[ARC_BUFC_DATA], 0,
732 "size of data in mru ghost state");
733
734SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_size, CTLFLAG_RD,
735 &ARC_mfu.arcs_size, 0, "size of mfu state");
736SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_metadata_lsize, CTLFLAG_RD,
737 &ARC_mfu.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mfu state");
738SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_data_lsize, CTLFLAG_RD,
739 &ARC_mfu.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mfu state");
740
741SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_size, CTLFLAG_RD,
742 &ARC_mfu_ghost.arcs_size, 0, "size of mfu ghost state");
743SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_metadata_lsize, CTLFLAG_RD,
744 &ARC_mfu_ghost.arcs_lsize[ARC_BUFC_METADATA], 0,
745 "size of metadata in mfu ghost state");
746SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_data_lsize, CTLFLAG_RD,
747 &ARC_mfu_ghost.arcs_lsize[ARC_BUFC_DATA], 0,
748 "size of data in mfu ghost state");
749
750SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2c_only_size, CTLFLAG_RD,
751 &ARC_l2c_only.arcs_size, 0, "size of mru state");
752
753/*
754 * L2ARC Internals
755 */
756typedef struct l2arc_dev {
757 vdev_t *l2ad_vdev; /* vdev */
758 spa_t *l2ad_spa; /* spa */
759 uint64_t l2ad_hand; /* next write location */
760 uint64_t l2ad_start; /* first addr on device */
761 uint64_t l2ad_end; /* last addr on device */
762 uint64_t l2ad_evict; /* last addr eviction reached */
763 boolean_t l2ad_first; /* first sweep through */
764 boolean_t l2ad_writing; /* currently writing */
765 list_t *l2ad_buflist; /* buffer list */
766 list_node_t l2ad_node; /* device list node */
767} l2arc_dev_t;
768
769static list_t L2ARC_dev_list; /* device list */
770static list_t *l2arc_dev_list; /* device list pointer */
771static kmutex_t l2arc_dev_mtx; /* device list mutex */
772static l2arc_dev_t *l2arc_dev_last; /* last device used */
773static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */
774static list_t L2ARC_free_on_write; /* free after write buf list */
775static list_t *l2arc_free_on_write; /* free after write list ptr */
776static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */
777static uint64_t l2arc_ndev; /* number of devices */
778
779typedef struct l2arc_read_callback {
780 arc_buf_t *l2rcb_buf; /* read buffer */
781 spa_t *l2rcb_spa; /* spa */
782 blkptr_t l2rcb_bp; /* original blkptr */
783 zbookmark_t l2rcb_zb; /* original bookmark */
783 zbookmark_phys_t l2rcb_zb; /* original bookmark */
784 int l2rcb_flags; /* original flags */
785 enum zio_compress l2rcb_compress; /* applied compress */
786} l2arc_read_callback_t;
787
788typedef struct l2arc_write_callback {
789 l2arc_dev_t *l2wcb_dev; /* device info */
790 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */
791} l2arc_write_callback_t;
792
793struct l2arc_buf_hdr {
794 /* protected by arc_buf_hdr mutex */
795 l2arc_dev_t *b_dev; /* L2ARC device */
796 uint64_t b_daddr; /* disk address, offset byte */
797 /* compression applied to buffer data */
798 enum zio_compress b_compress;
799 /* real alloc'd buffer size depending on b_compress applied */
800 int b_asize;
801 /* temporary buffer holder for in-flight compressed data */
802 void *b_tmp_cdata;
803};
804
805typedef struct l2arc_data_free {
806 /* protected by l2arc_free_on_write_mtx */
807 void *l2df_data;
808 size_t l2df_size;
809 void (*l2df_func)(void *, size_t);
810 list_node_t l2df_list_node;
811} l2arc_data_free_t;
812
813static kmutex_t l2arc_feed_thr_lock;
814static kcondvar_t l2arc_feed_thr_cv;
815static uint8_t l2arc_thread_exit;
816
817static void l2arc_read_done(zio_t *zio);
818static void l2arc_hdr_stat_add(void);
819static void l2arc_hdr_stat_remove(void);
820
821static boolean_t l2arc_compress_buf(l2arc_buf_hdr_t *l2hdr);
822static void l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr,
823 enum zio_compress c);
824static void l2arc_release_cdata_buf(arc_buf_hdr_t *ab);
825
826static uint64_t
827buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
828{
829 uint8_t *vdva = (uint8_t *)dva;
830 uint64_t crc = -1ULL;
831 int i;
832
833 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
834
835 for (i = 0; i < sizeof (dva_t); i++)
836 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
837
838 crc ^= (spa>>8) ^ birth;
839
840 return (crc);
841}
842
843#define BUF_EMPTY(buf) \
844 ((buf)->b_dva.dva_word[0] == 0 && \
845 (buf)->b_dva.dva_word[1] == 0 && \
846 (buf)->b_cksum0 == 0)
847
848#define BUF_EQUAL(spa, dva, birth, buf) \
849 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
850 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
851 ((buf)->b_birth == birth) && ((buf)->b_spa == spa)
852
853static void
854buf_discard_identity(arc_buf_hdr_t *hdr)
855{
856 hdr->b_dva.dva_word[0] = 0;
857 hdr->b_dva.dva_word[1] = 0;
858 hdr->b_birth = 0;
859 hdr->b_cksum0 = 0;
860}
861
862static arc_buf_hdr_t *
863buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp)
864{
865 const dva_t *dva = BP_IDENTITY(bp);
866 uint64_t birth = BP_PHYSICAL_BIRTH(bp);
867 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
868 kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
869 arc_buf_hdr_t *buf;
870
871 mutex_enter(hash_lock);
872 for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
873 buf = buf->b_hash_next) {
874 if (BUF_EQUAL(spa, dva, birth, buf)) {
875 *lockp = hash_lock;
876 return (buf);
877 }
878 }
879 mutex_exit(hash_lock);
880 *lockp = NULL;
881 return (NULL);
882}
883
884/*
885 * Insert an entry into the hash table. If there is already an element
886 * equal to elem in the hash table, then the already existing element
887 * will be returned and the new element will not be inserted.
888 * Otherwise returns NULL.
889 */
890static arc_buf_hdr_t *
891buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
892{
893 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
894 kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
895 arc_buf_hdr_t *fbuf;
896 uint32_t i;
897
898 ASSERT(!DVA_IS_EMPTY(&buf->b_dva));
899 ASSERT(buf->b_birth != 0);
900 ASSERT(!HDR_IN_HASH_TABLE(buf));
901 *lockp = hash_lock;
902 mutex_enter(hash_lock);
903 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
904 fbuf = fbuf->b_hash_next, i++) {
905 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
906 return (fbuf);
907 }
908
909 buf->b_hash_next = buf_hash_table.ht_table[idx];
910 buf_hash_table.ht_table[idx] = buf;
911 buf->b_flags |= ARC_IN_HASH_TABLE;
912
913 /* collect some hash table performance data */
914 if (i > 0) {
915 ARCSTAT_BUMP(arcstat_hash_collisions);
916 if (i == 1)
917 ARCSTAT_BUMP(arcstat_hash_chains);
918
919 ARCSTAT_MAX(arcstat_hash_chain_max, i);
920 }
921
922 ARCSTAT_BUMP(arcstat_hash_elements);
923 ARCSTAT_MAXSTAT(arcstat_hash_elements);
924
925 return (NULL);
926}
927
928static void
929buf_hash_remove(arc_buf_hdr_t *buf)
930{
931 arc_buf_hdr_t *fbuf, **bufp;
932 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
933
934 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
935 ASSERT(HDR_IN_HASH_TABLE(buf));
936
937 bufp = &buf_hash_table.ht_table[idx];
938 while ((fbuf = *bufp) != buf) {
939 ASSERT(fbuf != NULL);
940 bufp = &fbuf->b_hash_next;
941 }
942 *bufp = buf->b_hash_next;
943 buf->b_hash_next = NULL;
944 buf->b_flags &= ~ARC_IN_HASH_TABLE;
945
946 /* collect some hash table performance data */
947 ARCSTAT_BUMPDOWN(arcstat_hash_elements);
948
949 if (buf_hash_table.ht_table[idx] &&
950 buf_hash_table.ht_table[idx]->b_hash_next == NULL)
951 ARCSTAT_BUMPDOWN(arcstat_hash_chains);
952}
953
954/*
955 * Global data structures and functions for the buf kmem cache.
956 */
957static kmem_cache_t *hdr_cache;
958static kmem_cache_t *buf_cache;
959
960static void
961buf_fini(void)
962{
963 int i;
964
965 kmem_free(buf_hash_table.ht_table,
966 (buf_hash_table.ht_mask + 1) * sizeof (void *));
967 for (i = 0; i < BUF_LOCKS; i++)
968 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
969 kmem_cache_destroy(hdr_cache);
970 kmem_cache_destroy(buf_cache);
971}
972
973/*
974 * Constructor callback - called when the cache is empty
975 * and a new buf is requested.
976 */
977/* ARGSUSED */
978static int
979hdr_cons(void *vbuf, void *unused, int kmflag)
980{
981 arc_buf_hdr_t *buf = vbuf;
982
983 bzero(buf, sizeof (arc_buf_hdr_t));
984 refcount_create(&buf->b_refcnt);
985 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
986 mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
987 arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
988
989 return (0);
990}
991
992/* ARGSUSED */
993static int
994buf_cons(void *vbuf, void *unused, int kmflag)
995{
996 arc_buf_t *buf = vbuf;
997
998 bzero(buf, sizeof (arc_buf_t));
999 mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL);
1000 arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
1001
1002 return (0);
1003}
1004
1005/*
1006 * Destructor callback - called when a cached buf is
1007 * no longer required.
1008 */
1009/* ARGSUSED */
1010static void
1011hdr_dest(void *vbuf, void *unused)
1012{
1013 arc_buf_hdr_t *buf = vbuf;
1014
1015 ASSERT(BUF_EMPTY(buf));
1016 refcount_destroy(&buf->b_refcnt);
1017 cv_destroy(&buf->b_cv);
1018 mutex_destroy(&buf->b_freeze_lock);
1019 arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
1020}
1021
1022/* ARGSUSED */
1023static void
1024buf_dest(void *vbuf, void *unused)
1025{
1026 arc_buf_t *buf = vbuf;
1027
1028 mutex_destroy(&buf->b_evict_lock);
1029 arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
1030}
1031
1032/*
1033 * Reclaim callback -- invoked when memory is low.
1034 */
1035/* ARGSUSED */
1036static void
1037hdr_recl(void *unused)
1038{
1039 dprintf("hdr_recl called\n");
1040 /*
1041 * umem calls the reclaim func when we destroy the buf cache,
1042 * which is after we do arc_fini().
1043 */
1044 if (!arc_dead)
1045 cv_signal(&arc_reclaim_thr_cv);
1046}
1047
1048static void
1049buf_init(void)
1050{
1051 uint64_t *ct;
1052 uint64_t hsize = 1ULL << 12;
1053 int i, j;
1054
1055 /*
1056 * The hash table is big enough to fill all of physical memory
1057 * with an average 64K block size. The table will take up
1058 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers).
1059 */
1060 while (hsize * 65536 < (uint64_t)physmem * PAGESIZE)
1061 hsize <<= 1;
1062retry:
1063 buf_hash_table.ht_mask = hsize - 1;
1064 buf_hash_table.ht_table =
1065 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
1066 if (buf_hash_table.ht_table == NULL) {
1067 ASSERT(hsize > (1ULL << 8));
1068 hsize >>= 1;
1069 goto retry;
1070 }
1071
1072 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
1073 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
1074 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
1075 0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
1076
1077 for (i = 0; i < 256; i++)
1078 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
1079 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
1080
1081 for (i = 0; i < BUF_LOCKS; i++) {
1082 mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
1083 NULL, MUTEX_DEFAULT, NULL);
1084 }
1085}
1086
1087#define ARC_MINTIME (hz>>4) /* 62 ms */
1088
1089static void
1090arc_cksum_verify(arc_buf_t *buf)
1091{
1092 zio_cksum_t zc;
1093
1094 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
1095 return;
1096
1097 mutex_enter(&buf->b_hdr->b_freeze_lock);
1098 if (buf->b_hdr->b_freeze_cksum == NULL ||
1099 (buf->b_hdr->b_flags & ARC_IO_ERROR)) {
1100 mutex_exit(&buf->b_hdr->b_freeze_lock);
1101 return;
1102 }
1103 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
1104 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc))
1105 panic("buffer modified while frozen!");
1106 mutex_exit(&buf->b_hdr->b_freeze_lock);
1107}
1108
1109static int
1110arc_cksum_equal(arc_buf_t *buf)
1111{
1112 zio_cksum_t zc;
1113 int equal;
1114
1115 mutex_enter(&buf->b_hdr->b_freeze_lock);
1116 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
1117 equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc);
1118 mutex_exit(&buf->b_hdr->b_freeze_lock);
1119
1120 return (equal);
1121}
1122
1123static void
1124arc_cksum_compute(arc_buf_t *buf, boolean_t force)
1125{
1126 if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY))
1127 return;
1128
1129 mutex_enter(&buf->b_hdr->b_freeze_lock);
1130 if (buf->b_hdr->b_freeze_cksum != NULL) {
1131 mutex_exit(&buf->b_hdr->b_freeze_lock);
1132 return;
1133 }
1134 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
1135 fletcher_2_native(buf->b_data, buf->b_hdr->b_size,
1136 buf->b_hdr->b_freeze_cksum);
1137 mutex_exit(&buf->b_hdr->b_freeze_lock);
1138#ifdef illumos
1139 arc_buf_watch(buf);
1140#endif /* illumos */
1141}
1142
1143#ifdef illumos
1144#ifndef _KERNEL
1145typedef struct procctl {
1146 long cmd;
1147 prwatch_t prwatch;
1148} procctl_t;
1149#endif
1150
1151/* ARGSUSED */
1152static void
1153arc_buf_unwatch(arc_buf_t *buf)
1154{
1155#ifndef _KERNEL
1156 if (arc_watch) {
1157 int result;
1158 procctl_t ctl;
1159 ctl.cmd = PCWATCH;
1160 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
1161 ctl.prwatch.pr_size = 0;
1162 ctl.prwatch.pr_wflags = 0;
1163 result = write(arc_procfd, &ctl, sizeof (ctl));
1164 ASSERT3U(result, ==, sizeof (ctl));
1165 }
1166#endif
1167}
1168
1169/* ARGSUSED */
1170static void
1171arc_buf_watch(arc_buf_t *buf)
1172{
1173#ifndef _KERNEL
1174 if (arc_watch) {
1175 int result;
1176 procctl_t ctl;
1177 ctl.cmd = PCWATCH;
1178 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
1179 ctl.prwatch.pr_size = buf->b_hdr->b_size;
1180 ctl.prwatch.pr_wflags = WA_WRITE;
1181 result = write(arc_procfd, &ctl, sizeof (ctl));
1182 ASSERT3U(result, ==, sizeof (ctl));
1183 }
1184#endif
1185}
1186#endif /* illumos */
1187
1188void
1189arc_buf_thaw(arc_buf_t *buf)
1190{
1191 if (zfs_flags & ZFS_DEBUG_MODIFY) {
1192 if (buf->b_hdr->b_state != arc_anon)
1193 panic("modifying non-anon buffer!");
1194 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS)
1195 panic("modifying buffer while i/o in progress!");
1196 arc_cksum_verify(buf);
1197 }
1198
1199 mutex_enter(&buf->b_hdr->b_freeze_lock);
1200 if (buf->b_hdr->b_freeze_cksum != NULL) {
1201 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1202 buf->b_hdr->b_freeze_cksum = NULL;
1203 }
1204
1205 if (zfs_flags & ZFS_DEBUG_MODIFY) {
1206 if (buf->b_hdr->b_thawed)
1207 kmem_free(buf->b_hdr->b_thawed, 1);
1208 buf->b_hdr->b_thawed = kmem_alloc(1, KM_SLEEP);
1209 }
1210
1211 mutex_exit(&buf->b_hdr->b_freeze_lock);
1212
1213#ifdef illumos
1214 arc_buf_unwatch(buf);
1215#endif /* illumos */
1216}
1217
1218void
1219arc_buf_freeze(arc_buf_t *buf)
1220{
1221 kmutex_t *hash_lock;
1222
1223 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
1224 return;
1225
1226 hash_lock = HDR_LOCK(buf->b_hdr);
1227 mutex_enter(hash_lock);
1228
1229 ASSERT(buf->b_hdr->b_freeze_cksum != NULL ||
1230 buf->b_hdr->b_state == arc_anon);
1231 arc_cksum_compute(buf, B_FALSE);
1232 mutex_exit(hash_lock);
1233
1234}
1235
1236static void
1237get_buf_info(arc_buf_hdr_t *ab, arc_state_t *state, list_t **list, kmutex_t **lock)
1238{
1239 uint64_t buf_hashid = buf_hash(ab->b_spa, &ab->b_dva, ab->b_birth);
1240
1241 if (ab->b_type == ARC_BUFC_METADATA)
1242 buf_hashid &= (ARC_BUFC_NUMMETADATALISTS - 1);
1243 else {
1244 buf_hashid &= (ARC_BUFC_NUMDATALISTS - 1);
1245 buf_hashid += ARC_BUFC_NUMMETADATALISTS;
1246 }
1247
1248 *list = &state->arcs_lists[buf_hashid];
1249 *lock = ARCS_LOCK(state, buf_hashid);
1250}
1251
1252
1253static void
1254add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
1255{
1256 ASSERT(MUTEX_HELD(hash_lock));
1257
1258 if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
1259 (ab->b_state != arc_anon)) {
1260 uint64_t delta = ab->b_size * ab->b_datacnt;
1261 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type];
1262 list_t *list;
1263 kmutex_t *lock;
1264
1265 get_buf_info(ab, ab->b_state, &list, &lock);
1266 ASSERT(!MUTEX_HELD(lock));
1267 mutex_enter(lock);
1268 ASSERT(list_link_active(&ab->b_arc_node));
1269 list_remove(list, ab);
1270 if (GHOST_STATE(ab->b_state)) {
1271 ASSERT0(ab->b_datacnt);
1272 ASSERT3P(ab->b_buf, ==, NULL);
1273 delta = ab->b_size;
1274 }
1275 ASSERT(delta > 0);
1276 ASSERT3U(*size, >=, delta);
1277 atomic_add_64(size, -delta);
1278 mutex_exit(lock);
1279 /* remove the prefetch flag if we get a reference */
1280 if (ab->b_flags & ARC_PREFETCH)
1281 ab->b_flags &= ~ARC_PREFETCH;
1282 }
1283}
1284
1285static int
1286remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
1287{
1288 int cnt;
1289 arc_state_t *state = ab->b_state;
1290
1291 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
1292 ASSERT(!GHOST_STATE(state));
1293
1294 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
1295 (state != arc_anon)) {
1296 uint64_t *size = &state->arcs_lsize[ab->b_type];
1297 list_t *list;
1298 kmutex_t *lock;
1299
1300 get_buf_info(ab, state, &list, &lock);
1301 ASSERT(!MUTEX_HELD(lock));
1302 mutex_enter(lock);
1303 ASSERT(!list_link_active(&ab->b_arc_node));
1304 list_insert_head(list, ab);
1305 ASSERT(ab->b_datacnt > 0);
1306 atomic_add_64(size, ab->b_size * ab->b_datacnt);
1307 mutex_exit(lock);
1308 }
1309 return (cnt);
1310}
1311
1312/*
1313 * Move the supplied buffer to the indicated state. The mutex
1314 * for the buffer must be held by the caller.
1315 */
1316static void
1317arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
1318{
1319 arc_state_t *old_state = ab->b_state;
1320 int64_t refcnt = refcount_count(&ab->b_refcnt);
1321 uint64_t from_delta, to_delta;
1322 list_t *list;
1323 kmutex_t *lock;
1324
1325 ASSERT(MUTEX_HELD(hash_lock));
1326 ASSERT3P(new_state, !=, old_state);
1327 ASSERT(refcnt == 0 || ab->b_datacnt > 0);
1328 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
1329 ASSERT(ab->b_datacnt <= 1 || old_state != arc_anon);
1330
1331 from_delta = to_delta = ab->b_datacnt * ab->b_size;
1332
1333 /*
1334 * If this buffer is evictable, transfer it from the
1335 * old state list to the new state list.
1336 */
1337 if (refcnt == 0) {
1338 if (old_state != arc_anon) {
1339 int use_mutex;
1340 uint64_t *size = &old_state->arcs_lsize[ab->b_type];
1341
1342 get_buf_info(ab, old_state, &list, &lock);
1343 use_mutex = !MUTEX_HELD(lock);
1344 if (use_mutex)
1345 mutex_enter(lock);
1346
1347 ASSERT(list_link_active(&ab->b_arc_node));
1348 list_remove(list, ab);
1349
1350 /*
1351 * If prefetching out of the ghost cache,
1352 * we will have a non-zero datacnt.
1353 */
1354 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) {
1355 /* ghost elements have a ghost size */
1356 ASSERT(ab->b_buf == NULL);
1357 from_delta = ab->b_size;
1358 }
1359 ASSERT3U(*size, >=, from_delta);
1360 atomic_add_64(size, -from_delta);
1361
1362 if (use_mutex)
1363 mutex_exit(lock);
1364 }
1365 if (new_state != arc_anon) {
1366 int use_mutex;
1367 uint64_t *size = &new_state->arcs_lsize[ab->b_type];
1368
1369 get_buf_info(ab, new_state, &list, &lock);
1370 use_mutex = !MUTEX_HELD(lock);
1371 if (use_mutex)
1372 mutex_enter(lock);
1373
1374 list_insert_head(list, ab);
1375
1376 /* ghost elements have a ghost size */
1377 if (GHOST_STATE(new_state)) {
1378 ASSERT(ab->b_datacnt == 0);
1379 ASSERT(ab->b_buf == NULL);
1380 to_delta = ab->b_size;
1381 }
1382 atomic_add_64(size, to_delta);
1383
1384 if (use_mutex)
1385 mutex_exit(lock);
1386 }
1387 }
1388
1389 ASSERT(!BUF_EMPTY(ab));
1390 if (new_state == arc_anon && HDR_IN_HASH_TABLE(ab))
1391 buf_hash_remove(ab);
1392
1393 /* adjust state sizes */
1394 if (to_delta)
1395 atomic_add_64(&new_state->arcs_size, to_delta);
1396 if (from_delta) {
1397 ASSERT3U(old_state->arcs_size, >=, from_delta);
1398 atomic_add_64(&old_state->arcs_size, -from_delta);
1399 }
1400 ab->b_state = new_state;
1401
1402 /* adjust l2arc hdr stats */
1403 if (new_state == arc_l2c_only)
1404 l2arc_hdr_stat_add();
1405 else if (old_state == arc_l2c_only)
1406 l2arc_hdr_stat_remove();
1407}
1408
1409void
1410arc_space_consume(uint64_t space, arc_space_type_t type)
1411{
1412 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1413
1414 switch (type) {
1415 case ARC_SPACE_DATA:
1416 ARCSTAT_INCR(arcstat_data_size, space);
1417 break;
1418 case ARC_SPACE_OTHER:
1419 ARCSTAT_INCR(arcstat_other_size, space);
1420 break;
1421 case ARC_SPACE_HDRS:
1422 ARCSTAT_INCR(arcstat_hdr_size, space);
1423 break;
1424 case ARC_SPACE_L2HDRS:
1425 ARCSTAT_INCR(arcstat_l2_hdr_size, space);
1426 break;
1427 }
1428
1429 atomic_add_64(&arc_meta_used, space);
1430 atomic_add_64(&arc_size, space);
1431}
1432
1433void
1434arc_space_return(uint64_t space, arc_space_type_t type)
1435{
1436 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1437
1438 switch (type) {
1439 case ARC_SPACE_DATA:
1440 ARCSTAT_INCR(arcstat_data_size, -space);
1441 break;
1442 case ARC_SPACE_OTHER:
1443 ARCSTAT_INCR(arcstat_other_size, -space);
1444 break;
1445 case ARC_SPACE_HDRS:
1446 ARCSTAT_INCR(arcstat_hdr_size, -space);
1447 break;
1448 case ARC_SPACE_L2HDRS:
1449 ARCSTAT_INCR(arcstat_l2_hdr_size, -space);
1450 break;
1451 }
1452
1453 ASSERT(arc_meta_used >= space);
1454 if (arc_meta_max < arc_meta_used)
1455 arc_meta_max = arc_meta_used;
1456 atomic_add_64(&arc_meta_used, -space);
1457 ASSERT(arc_size >= space);
1458 atomic_add_64(&arc_size, -space);
1459}
1460
1461void *
1462arc_data_buf_alloc(uint64_t size)
1463{
1464 if (arc_evict_needed(ARC_BUFC_DATA))
1465 cv_signal(&arc_reclaim_thr_cv);
1466 atomic_add_64(&arc_size, size);
1467 return (zio_data_buf_alloc(size));
1468}
1469
1470void
1471arc_data_buf_free(void *buf, uint64_t size)
1472{
1473 zio_data_buf_free(buf, size);
1474 ASSERT(arc_size >= size);
1475 atomic_add_64(&arc_size, -size);
1476}
1477
1478arc_buf_t *
1479arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type)
1480{
1481 arc_buf_hdr_t *hdr;
1482 arc_buf_t *buf;
1483
1484 ASSERT3U(size, >, 0);
1485 hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
1486 ASSERT(BUF_EMPTY(hdr));
1487 hdr->b_size = size;
1488 hdr->b_type = type;
1489 hdr->b_spa = spa_load_guid(spa);
1490 hdr->b_state = arc_anon;
1491 hdr->b_arc_access = 0;
1492 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1493 buf->b_hdr = hdr;
1494 buf->b_data = NULL;
1495 buf->b_efunc = NULL;
1496 buf->b_private = NULL;
1497 buf->b_next = NULL;
1498 hdr->b_buf = buf;
1499 arc_get_data_buf(buf);
1500 hdr->b_datacnt = 1;
1501 hdr->b_flags = 0;
1502 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1503 (void) refcount_add(&hdr->b_refcnt, tag);
1504
1505 return (buf);
1506}
1507
1508static char *arc_onloan_tag = "onloan";
1509
1510/*
1511 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in
1512 * flight data by arc_tempreserve_space() until they are "returned". Loaned
1513 * buffers must be returned to the arc before they can be used by the DMU or
1514 * freed.
1515 */
1516arc_buf_t *
1517arc_loan_buf(spa_t *spa, int size)
1518{
1519 arc_buf_t *buf;
1520
1521 buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA);
1522
1523 atomic_add_64(&arc_loaned_bytes, size);
1524 return (buf);
1525}
1526
1527/*
1528 * Return a loaned arc buffer to the arc.
1529 */
1530void
1531arc_return_buf(arc_buf_t *buf, void *tag)
1532{
1533 arc_buf_hdr_t *hdr = buf->b_hdr;
1534
1535 ASSERT(buf->b_data != NULL);
1536 (void) refcount_add(&hdr->b_refcnt, tag);
1537 (void) refcount_remove(&hdr->b_refcnt, arc_onloan_tag);
1538
1539 atomic_add_64(&arc_loaned_bytes, -hdr->b_size);
1540}
1541
1542/* Detach an arc_buf from a dbuf (tag) */
1543void
1544arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
1545{
1546 arc_buf_hdr_t *hdr;
1547
1548 ASSERT(buf->b_data != NULL);
1549 hdr = buf->b_hdr;
1550 (void) refcount_add(&hdr->b_refcnt, arc_onloan_tag);
1551 (void) refcount_remove(&hdr->b_refcnt, tag);
1552 buf->b_efunc = NULL;
1553 buf->b_private = NULL;
1554
1555 atomic_add_64(&arc_loaned_bytes, hdr->b_size);
1556}
1557
1558static arc_buf_t *
1559arc_buf_clone(arc_buf_t *from)
1560{
1561 arc_buf_t *buf;
1562 arc_buf_hdr_t *hdr = from->b_hdr;
1563 uint64_t size = hdr->b_size;
1564
1565 ASSERT(hdr->b_state != arc_anon);
1566
1567 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1568 buf->b_hdr = hdr;
1569 buf->b_data = NULL;
1570 buf->b_efunc = NULL;
1571 buf->b_private = NULL;
1572 buf->b_next = hdr->b_buf;
1573 hdr->b_buf = buf;
1574 arc_get_data_buf(buf);
1575 bcopy(from->b_data, buf->b_data, size);
1576
1577 /*
1578 * This buffer already exists in the arc so create a duplicate
1579 * copy for the caller. If the buffer is associated with user data
1580 * then track the size and number of duplicates. These stats will be
1581 * updated as duplicate buffers are created and destroyed.
1582 */
1583 if (hdr->b_type == ARC_BUFC_DATA) {
1584 ARCSTAT_BUMP(arcstat_duplicate_buffers);
1585 ARCSTAT_INCR(arcstat_duplicate_buffers_size, size);
1586 }
1587 hdr->b_datacnt += 1;
1588 return (buf);
1589}
1590
1591void
1592arc_buf_add_ref(arc_buf_t *buf, void* tag)
1593{
1594 arc_buf_hdr_t *hdr;
1595 kmutex_t *hash_lock;
1596
1597 /*
1598 * Check to see if this buffer is evicted. Callers
1599 * must verify b_data != NULL to know if the add_ref
1600 * was successful.
1601 */
1602 mutex_enter(&buf->b_evict_lock);
1603 if (buf->b_data == NULL) {
1604 mutex_exit(&buf->b_evict_lock);
1605 return;
1606 }
1607 hash_lock = HDR_LOCK(buf->b_hdr);
1608 mutex_enter(hash_lock);
1609 hdr = buf->b_hdr;
1610 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1611 mutex_exit(&buf->b_evict_lock);
1612
1613 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
1614 add_reference(hdr, hash_lock, tag);
1615 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
1616 arc_access(hdr, hash_lock);
1617 mutex_exit(hash_lock);
1618 ARCSTAT_BUMP(arcstat_hits);
1619 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
1620 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
1621 data, metadata, hits);
1622}
1623
1624/*
1625 * Free the arc data buffer. If it is an l2arc write in progress,
1626 * the buffer is placed on l2arc_free_on_write to be freed later.
1627 */
1628static void
1629arc_buf_data_free(arc_buf_t *buf, void (*free_func)(void *, size_t))
1630{
1631 arc_buf_hdr_t *hdr = buf->b_hdr;
1632
1633 if (HDR_L2_WRITING(hdr)) {
1634 l2arc_data_free_t *df;
1635 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP);
1636 df->l2df_data = buf->b_data;
1637 df->l2df_size = hdr->b_size;
1638 df->l2df_func = free_func;
1639 mutex_enter(&l2arc_free_on_write_mtx);
1640 list_insert_head(l2arc_free_on_write, df);
1641 mutex_exit(&l2arc_free_on_write_mtx);
1642 ARCSTAT_BUMP(arcstat_l2_free_on_write);
1643 } else {
1644 free_func(buf->b_data, hdr->b_size);
1645 }
1646}
1647
1648static void
1649arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all)
1650{
1651 arc_buf_t **bufp;
1652
1653 /* free up data associated with the buf */
1654 if (buf->b_data) {
1655 arc_state_t *state = buf->b_hdr->b_state;
1656 uint64_t size = buf->b_hdr->b_size;
1657 arc_buf_contents_t type = buf->b_hdr->b_type;
1658
1659 arc_cksum_verify(buf);
1660#ifdef illumos
1661 arc_buf_unwatch(buf);
1662#endif /* illumos */
1663
1664 if (!recycle) {
1665 if (type == ARC_BUFC_METADATA) {
1666 arc_buf_data_free(buf, zio_buf_free);
1667 arc_space_return(size, ARC_SPACE_DATA);
1668 } else {
1669 ASSERT(type == ARC_BUFC_DATA);
1670 arc_buf_data_free(buf, zio_data_buf_free);
1671 ARCSTAT_INCR(arcstat_data_size, -size);
1672 atomic_add_64(&arc_size, -size);
1673 }
1674 }
1675 if (list_link_active(&buf->b_hdr->b_arc_node)) {
1676 uint64_t *cnt = &state->arcs_lsize[type];
1677
1678 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt));
1679 ASSERT(state != arc_anon);
1680
1681 ASSERT3U(*cnt, >=, size);
1682 atomic_add_64(cnt, -size);
1683 }
1684 ASSERT3U(state->arcs_size, >=, size);
1685 atomic_add_64(&state->arcs_size, -size);
1686 buf->b_data = NULL;
1687
1688 /*
1689 * If we're destroying a duplicate buffer make sure
1690 * that the appropriate statistics are updated.
1691 */
1692 if (buf->b_hdr->b_datacnt > 1 &&
1693 buf->b_hdr->b_type == ARC_BUFC_DATA) {
1694 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
1695 ARCSTAT_INCR(arcstat_duplicate_buffers_size, -size);
1696 }
1697 ASSERT(buf->b_hdr->b_datacnt > 0);
1698 buf->b_hdr->b_datacnt -= 1;
1699 }
1700
1701 /* only remove the buf if requested */
1702 if (!all)
1703 return;
1704
1705 /* remove the buf from the hdr list */
1706 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next)
1707 continue;
1708 *bufp = buf->b_next;
1709 buf->b_next = NULL;
1710
1711 ASSERT(buf->b_efunc == NULL);
1712
1713 /* clean up the buf */
1714 buf->b_hdr = NULL;
1715 kmem_cache_free(buf_cache, buf);
1716}
1717
1718static void
1719arc_hdr_destroy(arc_buf_hdr_t *hdr)
1720{
1721 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1722 ASSERT3P(hdr->b_state, ==, arc_anon);
1723 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
1724 l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr;
1725
1726 if (l2hdr != NULL) {
1727 boolean_t buflist_held = MUTEX_HELD(&l2arc_buflist_mtx);
1728 /*
1729 * To prevent arc_free() and l2arc_evict() from
1730 * attempting to free the same buffer at the same time,
1731 * a FREE_IN_PROGRESS flag is given to arc_free() to
1732 * give it priority. l2arc_evict() can't destroy this
1733 * header while we are waiting on l2arc_buflist_mtx.
1734 *
1735 * The hdr may be removed from l2ad_buflist before we
1736 * grab l2arc_buflist_mtx, so b_l2hdr is rechecked.
1737 */
1738 if (!buflist_held) {
1739 mutex_enter(&l2arc_buflist_mtx);
1740 l2hdr = hdr->b_l2hdr;
1741 }
1742
1743 if (l2hdr != NULL) {
1744 trim_map_free(l2hdr->b_dev->l2ad_vdev, l2hdr->b_daddr,
1745 hdr->b_size, 0);
1746 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
1747 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
1748 ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
1749 vdev_space_update(l2hdr->b_dev->l2ad_vdev,
1750 -l2hdr->b_asize, 0, 0);
1751 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
1752 if (hdr->b_state == arc_l2c_only)
1753 l2arc_hdr_stat_remove();
1754 hdr->b_l2hdr = NULL;
1755 }
1756
1757 if (!buflist_held)
1758 mutex_exit(&l2arc_buflist_mtx);
1759 }
1760
1761 if (!BUF_EMPTY(hdr)) {
1762 ASSERT(!HDR_IN_HASH_TABLE(hdr));
1763 buf_discard_identity(hdr);
1764 }
1765 while (hdr->b_buf) {
1766 arc_buf_t *buf = hdr->b_buf;
1767
1768 if (buf->b_efunc) {
1769 mutex_enter(&arc_eviction_mtx);
1770 mutex_enter(&buf->b_evict_lock);
1771 ASSERT(buf->b_hdr != NULL);
1772 arc_buf_destroy(hdr->b_buf, FALSE, FALSE);
1773 hdr->b_buf = buf->b_next;
1774 buf->b_hdr = &arc_eviction_hdr;
1775 buf->b_next = arc_eviction_list;
1776 arc_eviction_list = buf;
1777 mutex_exit(&buf->b_evict_lock);
1778 mutex_exit(&arc_eviction_mtx);
1779 } else {
1780 arc_buf_destroy(hdr->b_buf, FALSE, TRUE);
1781 }
1782 }
1783 if (hdr->b_freeze_cksum != NULL) {
1784 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1785 hdr->b_freeze_cksum = NULL;
1786 }
1787 if (hdr->b_thawed) {
1788 kmem_free(hdr->b_thawed, 1);
1789 hdr->b_thawed = NULL;
1790 }
1791
1792 ASSERT(!list_link_active(&hdr->b_arc_node));
1793 ASSERT3P(hdr->b_hash_next, ==, NULL);
1794 ASSERT3P(hdr->b_acb, ==, NULL);
1795 kmem_cache_free(hdr_cache, hdr);
1796}
1797
1798void
1799arc_buf_free(arc_buf_t *buf, void *tag)
1800{
1801 arc_buf_hdr_t *hdr = buf->b_hdr;
1802 int hashed = hdr->b_state != arc_anon;
1803
1804 ASSERT(buf->b_efunc == NULL);
1805 ASSERT(buf->b_data != NULL);
1806
1807 if (hashed) {
1808 kmutex_t *hash_lock = HDR_LOCK(hdr);
1809
1810 mutex_enter(hash_lock);
1811 hdr = buf->b_hdr;
1812 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1813
1814 (void) remove_reference(hdr, hash_lock, tag);
1815 if (hdr->b_datacnt > 1) {
1816 arc_buf_destroy(buf, FALSE, TRUE);
1817 } else {
1818 ASSERT(buf == hdr->b_buf);
1819 ASSERT(buf->b_efunc == NULL);
1820 hdr->b_flags |= ARC_BUF_AVAILABLE;
1821 }
1822 mutex_exit(hash_lock);
1823 } else if (HDR_IO_IN_PROGRESS(hdr)) {
1824 int destroy_hdr;
1825 /*
1826 * We are in the middle of an async write. Don't destroy
1827 * this buffer unless the write completes before we finish
1828 * decrementing the reference count.
1829 */
1830 mutex_enter(&arc_eviction_mtx);
1831 (void) remove_reference(hdr, NULL, tag);
1832 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1833 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
1834 mutex_exit(&arc_eviction_mtx);
1835 if (destroy_hdr)
1836 arc_hdr_destroy(hdr);
1837 } else {
1838 if (remove_reference(hdr, NULL, tag) > 0)
1839 arc_buf_destroy(buf, FALSE, TRUE);
1840 else
1841 arc_hdr_destroy(hdr);
1842 }
1843}
1844
1845boolean_t
1846arc_buf_remove_ref(arc_buf_t *buf, void* tag)
1847{
1848 arc_buf_hdr_t *hdr = buf->b_hdr;
1849 kmutex_t *hash_lock = HDR_LOCK(hdr);
1850 boolean_t no_callback = (buf->b_efunc == NULL);
1851
1852 if (hdr->b_state == arc_anon) {
1853 ASSERT(hdr->b_datacnt == 1);
1854 arc_buf_free(buf, tag);
1855 return (no_callback);
1856 }
1857
1858 mutex_enter(hash_lock);
1859 hdr = buf->b_hdr;
1860 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1861 ASSERT(hdr->b_state != arc_anon);
1862 ASSERT(buf->b_data != NULL);
1863
1864 (void) remove_reference(hdr, hash_lock, tag);
1865 if (hdr->b_datacnt > 1) {
1866 if (no_callback)
1867 arc_buf_destroy(buf, FALSE, TRUE);
1868 } else if (no_callback) {
1869 ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
1870 ASSERT(buf->b_efunc == NULL);
1871 hdr->b_flags |= ARC_BUF_AVAILABLE;
1872 }
1873 ASSERT(no_callback || hdr->b_datacnt > 1 ||
1874 refcount_is_zero(&hdr->b_refcnt));
1875 mutex_exit(hash_lock);
1876 return (no_callback);
1877}
1878
1879int
1880arc_buf_size(arc_buf_t *buf)
1881{
1882 return (buf->b_hdr->b_size);
1883}
1884
1885/*
1886 * Called from the DMU to determine if the current buffer should be
1887 * evicted. In order to ensure proper locking, the eviction must be initiated
1888 * from the DMU. Return true if the buffer is associated with user data and
1889 * duplicate buffers still exist.
1890 */
1891boolean_t
1892arc_buf_eviction_needed(arc_buf_t *buf)
1893{
1894 arc_buf_hdr_t *hdr;
1895 boolean_t evict_needed = B_FALSE;
1896
1897 if (zfs_disable_dup_eviction)
1898 return (B_FALSE);
1899
1900 mutex_enter(&buf->b_evict_lock);
1901 hdr = buf->b_hdr;
1902 if (hdr == NULL) {
1903 /*
1904 * We are in arc_do_user_evicts(); let that function
1905 * perform the eviction.
1906 */
1907 ASSERT(buf->b_data == NULL);
1908 mutex_exit(&buf->b_evict_lock);
1909 return (B_FALSE);
1910 } else if (buf->b_data == NULL) {
1911 /*
1912 * We have already been added to the arc eviction list;
1913 * recommend eviction.
1914 */
1915 ASSERT3P(hdr, ==, &arc_eviction_hdr);
1916 mutex_exit(&buf->b_evict_lock);
1917 return (B_TRUE);
1918 }
1919
1920 if (hdr->b_datacnt > 1 && hdr->b_type == ARC_BUFC_DATA)
1921 evict_needed = B_TRUE;
1922
1923 mutex_exit(&buf->b_evict_lock);
1924 return (evict_needed);
1925}
1926
1927/*
1928 * Evict buffers from list until we've removed the specified number of
1929 * bytes. Move the removed buffers to the appropriate evict state.
1930 * If the recycle flag is set, then attempt to "recycle" a buffer:
1931 * - look for a buffer to evict that is `bytes' long.
1932 * - return the data block from this buffer rather than freeing it.
1933 * This flag is used by callers that are trying to make space for a
1934 * new buffer in a full arc cache.
1935 *
1936 * This function makes a "best effort". It skips over any buffers
1937 * it can't get a hash_lock on, and so may not catch all candidates.
1938 * It may also return without evicting as much space as requested.
1939 */
1940static void *
1941arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle,
1942 arc_buf_contents_t type)
1943{
1944 arc_state_t *evicted_state;
1945 uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
1946 int64_t bytes_remaining;
1947 arc_buf_hdr_t *ab, *ab_prev = NULL;
1948 list_t *evicted_list, *list, *evicted_list_start, *list_start;
1949 kmutex_t *lock, *evicted_lock;
1950 kmutex_t *hash_lock;
1951 boolean_t have_lock;
1952 void *stolen = NULL;
1953 arc_buf_hdr_t marker = { 0 };
1954 int count = 0;
1955 static int evict_metadata_offset, evict_data_offset;
1956 int i, idx, offset, list_count, lists;
1957
1958 ASSERT(state == arc_mru || state == arc_mfu);
1959
1960 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
1961
1962 if (type == ARC_BUFC_METADATA) {
1963 offset = 0;
1964 list_count = ARC_BUFC_NUMMETADATALISTS;
1965 list_start = &state->arcs_lists[0];
1966 evicted_list_start = &evicted_state->arcs_lists[0];
1967 idx = evict_metadata_offset;
1968 } else {
1969 offset = ARC_BUFC_NUMMETADATALISTS;
1970 list_start = &state->arcs_lists[offset];
1971 evicted_list_start = &evicted_state->arcs_lists[offset];
1972 list_count = ARC_BUFC_NUMDATALISTS;
1973 idx = evict_data_offset;
1974 }
1975 bytes_remaining = evicted_state->arcs_lsize[type];
1976 lists = 0;
1977
1978evict_start:
1979 list = &list_start[idx];
1980 evicted_list = &evicted_list_start[idx];
1981 lock = ARCS_LOCK(state, (offset + idx));
1982 evicted_lock = ARCS_LOCK(evicted_state, (offset + idx));
1983
1984 mutex_enter(lock);
1985 mutex_enter(evicted_lock);
1986
1987 for (ab = list_tail(list); ab; ab = ab_prev) {
1988 ab_prev = list_prev(list, ab);
1989 bytes_remaining -= (ab->b_size * ab->b_datacnt);
1990 /* prefetch buffers have a minimum lifespan */
1991 if (HDR_IO_IN_PROGRESS(ab) ||
1992 (spa && ab->b_spa != spa) ||
1993 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
1994 ddi_get_lbolt() - ab->b_arc_access <
1995 arc_min_prefetch_lifespan)) {
1996 skipped++;
1997 continue;
1998 }
1999 /* "lookahead" for better eviction candidate */
2000 if (recycle && ab->b_size != bytes &&
2001 ab_prev && ab_prev->b_size == bytes)
2002 continue;
2003
2004 /* ignore markers */
2005 if (ab->b_spa == 0)
2006 continue;
2007
2008 /*
2009 * It may take a long time to evict all the bufs requested.
2010 * To avoid blocking all arc activity, periodically drop
2011 * the arcs_mtx and give other threads a chance to run
2012 * before reacquiring the lock.
2013 *
2014 * If we are looking for a buffer to recycle, we are in
2015 * the hot code path, so don't sleep.
2016 */
2017 if (!recycle && count++ > arc_evict_iterations) {
2018 list_insert_after(list, ab, &marker);
2019 mutex_exit(evicted_lock);
2020 mutex_exit(lock);
2021 kpreempt(KPREEMPT_SYNC);
2022 mutex_enter(lock);
2023 mutex_enter(evicted_lock);
2024 ab_prev = list_prev(list, &marker);
2025 list_remove(list, &marker);
2026 count = 0;
2027 continue;
2028 }
2029
2030 hash_lock = HDR_LOCK(ab);
2031 have_lock = MUTEX_HELD(hash_lock);
2032 if (have_lock || mutex_tryenter(hash_lock)) {
2033 ASSERT0(refcount_count(&ab->b_refcnt));
2034 ASSERT(ab->b_datacnt > 0);
2035 while (ab->b_buf) {
2036 arc_buf_t *buf = ab->b_buf;
2037 if (!mutex_tryenter(&buf->b_evict_lock)) {
2038 missed += 1;
2039 break;
2040 }
2041 if (buf->b_data) {
2042 bytes_evicted += ab->b_size;
2043 if (recycle && ab->b_type == type &&
2044 ab->b_size == bytes &&
2045 !HDR_L2_WRITING(ab)) {
2046 stolen = buf->b_data;
2047 recycle = FALSE;
2048 }
2049 }
2050 if (buf->b_efunc) {
2051 mutex_enter(&arc_eviction_mtx);
2052 arc_buf_destroy(buf,
2053 buf->b_data == stolen, FALSE);
2054 ab->b_buf = buf->b_next;
2055 buf->b_hdr = &arc_eviction_hdr;
2056 buf->b_next = arc_eviction_list;
2057 arc_eviction_list = buf;
2058 mutex_exit(&arc_eviction_mtx);
2059 mutex_exit(&buf->b_evict_lock);
2060 } else {
2061 mutex_exit(&buf->b_evict_lock);
2062 arc_buf_destroy(buf,
2063 buf->b_data == stolen, TRUE);
2064 }
2065 }
2066
2067 if (ab->b_l2hdr) {
2068 ARCSTAT_INCR(arcstat_evict_l2_cached,
2069 ab->b_size);
2070 } else {
2071 if (l2arc_write_eligible(ab->b_spa, ab)) {
2072 ARCSTAT_INCR(arcstat_evict_l2_eligible,
2073 ab->b_size);
2074 } else {
2075 ARCSTAT_INCR(
2076 arcstat_evict_l2_ineligible,
2077 ab->b_size);
2078 }
2079 }
2080
2081 if (ab->b_datacnt == 0) {
2082 arc_change_state(evicted_state, ab, hash_lock);
2083 ASSERT(HDR_IN_HASH_TABLE(ab));
2084 ab->b_flags |= ARC_IN_HASH_TABLE;
2085 ab->b_flags &= ~ARC_BUF_AVAILABLE;
2086 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
2087 }
2088 if (!have_lock)
2089 mutex_exit(hash_lock);
2090 if (bytes >= 0 && bytes_evicted >= bytes)
2091 break;
2092 if (bytes_remaining > 0) {
2093 mutex_exit(evicted_lock);
2094 mutex_exit(lock);
2095 idx = ((idx + 1) & (list_count - 1));
2096 lists++;
2097 goto evict_start;
2098 }
2099 } else {
2100 missed += 1;
2101 }
2102 }
2103
2104 mutex_exit(evicted_lock);
2105 mutex_exit(lock);
2106
2107 idx = ((idx + 1) & (list_count - 1));
2108 lists++;
2109
2110 if (bytes_evicted < bytes) {
2111 if (lists < list_count)
2112 goto evict_start;
2113 else
2114 dprintf("only evicted %lld bytes from %x",
2115 (longlong_t)bytes_evicted, state);
2116 }
2117 if (type == ARC_BUFC_METADATA)
2118 evict_metadata_offset = idx;
2119 else
2120 evict_data_offset = idx;
2121
2122 if (skipped)
2123 ARCSTAT_INCR(arcstat_evict_skip, skipped);
2124
2125 if (missed)
2126 ARCSTAT_INCR(arcstat_mutex_miss, missed);
2127
2128 /*
2129 * Note: we have just evicted some data into the ghost state,
2130 * potentially putting the ghost size over the desired size. Rather
2131 * that evicting from the ghost list in this hot code path, leave
2132 * this chore to the arc_reclaim_thread().
2133 */
2134
2135 if (stolen)
2136 ARCSTAT_BUMP(arcstat_stolen);
2137 return (stolen);
2138}
2139
2140/*
2141 * Remove buffers from list until we've removed the specified number of
2142 * bytes. Destroy the buffers that are removed.
2143 */
2144static void
2145arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes)
2146{
2147 arc_buf_hdr_t *ab, *ab_prev;
2148 arc_buf_hdr_t marker = { 0 };
2149 list_t *list, *list_start;
2150 kmutex_t *hash_lock, *lock;
2151 uint64_t bytes_deleted = 0;
2152 uint64_t bufs_skipped = 0;
2153 int count = 0;
2154 static int evict_offset;
2155 int list_count, idx = evict_offset;
2156 int offset, lists = 0;
2157
2158 ASSERT(GHOST_STATE(state));
2159
2160 /*
2161 * data lists come after metadata lists
2162 */
2163 list_start = &state->arcs_lists[ARC_BUFC_NUMMETADATALISTS];
2164 list_count = ARC_BUFC_NUMDATALISTS;
2165 offset = ARC_BUFC_NUMMETADATALISTS;
2166
2167evict_start:
2168 list = &list_start[idx];
2169 lock = ARCS_LOCK(state, idx + offset);
2170
2171 mutex_enter(lock);
2172 for (ab = list_tail(list); ab; ab = ab_prev) {
2173 ab_prev = list_prev(list, ab);
2174 if (ab->b_type > ARC_BUFC_NUMTYPES)
2175 panic("invalid ab=%p", (void *)ab);
2176 if (spa && ab->b_spa != spa)
2177 continue;
2178
2179 /* ignore markers */
2180 if (ab->b_spa == 0)
2181 continue;
2182
2183 hash_lock = HDR_LOCK(ab);
2184 /* caller may be trying to modify this buffer, skip it */
2185 if (MUTEX_HELD(hash_lock))
2186 continue;
2187
2188 /*
2189 * It may take a long time to evict all the bufs requested.
2190 * To avoid blocking all arc activity, periodically drop
2191 * the arcs_mtx and give other threads a chance to run
2192 * before reacquiring the lock.
2193 */
2194 if (count++ > arc_evict_iterations) {
2195 list_insert_after(list, ab, &marker);
2196 mutex_exit(lock);
2197 kpreempt(KPREEMPT_SYNC);
2198 mutex_enter(lock);
2199 ab_prev = list_prev(list, &marker);
2200 list_remove(list, &marker);
2201 count = 0;
2202 continue;
2203 }
2204 if (mutex_tryenter(hash_lock)) {
2205 ASSERT(!HDR_IO_IN_PROGRESS(ab));
2206 ASSERT(ab->b_buf == NULL);
2207 ARCSTAT_BUMP(arcstat_deleted);
2208 bytes_deleted += ab->b_size;
2209
2210 if (ab->b_l2hdr != NULL) {
2211 /*
2212 * This buffer is cached on the 2nd Level ARC;
2213 * don't destroy the header.
2214 */
2215 arc_change_state(arc_l2c_only, ab, hash_lock);
2216 mutex_exit(hash_lock);
2217 } else {
2218 arc_change_state(arc_anon, ab, hash_lock);
2219 mutex_exit(hash_lock);
2220 arc_hdr_destroy(ab);
2221 }
2222
2223 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
2224 if (bytes >= 0 && bytes_deleted >= bytes)
2225 break;
2226 } else if (bytes < 0) {
2227 /*
2228 * Insert a list marker and then wait for the
2229 * hash lock to become available. Once its
2230 * available, restart from where we left off.
2231 */
2232 list_insert_after(list, ab, &marker);
2233 mutex_exit(lock);
2234 mutex_enter(hash_lock);
2235 mutex_exit(hash_lock);
2236 mutex_enter(lock);
2237 ab_prev = list_prev(list, &marker);
2238 list_remove(list, &marker);
2239 } else {
2240 bufs_skipped += 1;
2241 }
2242
2243 }
2244 mutex_exit(lock);
2245 idx = ((idx + 1) & (ARC_BUFC_NUMDATALISTS - 1));
2246 lists++;
2247
2248 if (lists < list_count)
2249 goto evict_start;
2250
2251 evict_offset = idx;
2252 if ((uintptr_t)list > (uintptr_t)&state->arcs_lists[ARC_BUFC_NUMMETADATALISTS] &&
2253 (bytes < 0 || bytes_deleted < bytes)) {
2254 list_start = &state->arcs_lists[0];
2255 list_count = ARC_BUFC_NUMMETADATALISTS;
2256 offset = lists = 0;
2257 goto evict_start;
2258 }
2259
2260 if (bufs_skipped) {
2261 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped);
2262 ASSERT(bytes >= 0);
2263 }
2264
2265 if (bytes_deleted < bytes)
2266 dprintf("only deleted %lld bytes from %p",
2267 (longlong_t)bytes_deleted, state);
2268}
2269
2270static void
2271arc_adjust(void)
2272{
2273 int64_t adjustment, delta;
2274
2275 /*
2276 * Adjust MRU size
2277 */
2278
2279 adjustment = MIN((int64_t)(arc_size - arc_c),
2280 (int64_t)(arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used -
2281 arc_p));
2282
2283 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) {
2284 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment);
2285 (void) arc_evict(arc_mru, 0, delta, FALSE, ARC_BUFC_DATA);
2286 adjustment -= delta;
2287 }
2288
2289 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) {
2290 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment);
2291 (void) arc_evict(arc_mru, 0, delta, FALSE,
2292 ARC_BUFC_METADATA);
2293 }
2294
2295 /*
2296 * Adjust MFU size
2297 */
2298
2299 adjustment = arc_size - arc_c;
2300
2301 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) {
2302 delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]);
2303 (void) arc_evict(arc_mfu, 0, delta, FALSE, ARC_BUFC_DATA);
2304 adjustment -= delta;
2305 }
2306
2307 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) {
2308 int64_t delta = MIN(adjustment,
2309 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]);
2310 (void) arc_evict(arc_mfu, 0, delta, FALSE,
2311 ARC_BUFC_METADATA);
2312 }
2313
2314 /*
2315 * Adjust ghost lists
2316 */
2317
2318 adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c;
2319
2320 if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) {
2321 delta = MIN(arc_mru_ghost->arcs_size, adjustment);
2322 arc_evict_ghost(arc_mru_ghost, 0, delta);
2323 }
2324
2325 adjustment =
2326 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c;
2327
2328 if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) {
2329 delta = MIN(arc_mfu_ghost->arcs_size, adjustment);
2330 arc_evict_ghost(arc_mfu_ghost, 0, delta);
2331 }
2332}
2333
2334static void
2335arc_do_user_evicts(void)
2336{
2337 static arc_buf_t *tmp_arc_eviction_list;
2338
2339 /*
2340 * Move list over to avoid LOR
2341 */
2342restart:
2343 mutex_enter(&arc_eviction_mtx);
2344 tmp_arc_eviction_list = arc_eviction_list;
2345 arc_eviction_list = NULL;
2346 mutex_exit(&arc_eviction_mtx);
2347
2348 while (tmp_arc_eviction_list != NULL) {
2349 arc_buf_t *buf = tmp_arc_eviction_list;
2350 tmp_arc_eviction_list = buf->b_next;
2351 mutex_enter(&buf->b_evict_lock);
2352 buf->b_hdr = NULL;
2353 mutex_exit(&buf->b_evict_lock);
2354
2355 if (buf->b_efunc != NULL)
2356 VERIFY(buf->b_efunc(buf) == 0);
2357
2358 buf->b_efunc = NULL;
2359 buf->b_private = NULL;
2360 kmem_cache_free(buf_cache, buf);
2361 }
2362
2363 if (arc_eviction_list != NULL)
2364 goto restart;
2365}
2366
2367/*
2368 * Flush all *evictable* data from the cache for the given spa.
2369 * NOTE: this will not touch "active" (i.e. referenced) data.
2370 */
2371void
2372arc_flush(spa_t *spa)
2373{
2374 uint64_t guid = 0;
2375
2376 if (spa)
2377 guid = spa_load_guid(spa);
2378
2379 while (arc_mru->arcs_lsize[ARC_BUFC_DATA]) {
2380 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA);
2381 if (spa)
2382 break;
2383 }
2384 while (arc_mru->arcs_lsize[ARC_BUFC_METADATA]) {
2385 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA);
2386 if (spa)
2387 break;
2388 }
2389 while (arc_mfu->arcs_lsize[ARC_BUFC_DATA]) {
2390 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA);
2391 if (spa)
2392 break;
2393 }
2394 while (arc_mfu->arcs_lsize[ARC_BUFC_METADATA]) {
2395 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA);
2396 if (spa)
2397 break;
2398 }
2399
2400 arc_evict_ghost(arc_mru_ghost, guid, -1);
2401 arc_evict_ghost(arc_mfu_ghost, guid, -1);
2402
2403 mutex_enter(&arc_reclaim_thr_lock);
2404 arc_do_user_evicts();
2405 mutex_exit(&arc_reclaim_thr_lock);
2406 ASSERT(spa || arc_eviction_list == NULL);
2407}
2408
2409void
2410arc_shrink(void)
2411{
2412 if (arc_c > arc_c_min) {
2413 uint64_t to_free;
2414
2415#ifdef _KERNEL
2416 to_free = arc_c >> arc_shrink_shift;
2417#else
2418 to_free = arc_c >> arc_shrink_shift;
2419#endif
2420 if (arc_c > arc_c_min + to_free)
2421 atomic_add_64(&arc_c, -to_free);
2422 else
2423 arc_c = arc_c_min;
2424
2425 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
2426 if (arc_c > arc_size)
2427 arc_c = MAX(arc_size, arc_c_min);
2428 if (arc_p > arc_c)
2429 arc_p = (arc_c >> 1);
2430 ASSERT(arc_c >= arc_c_min);
2431 ASSERT((int64_t)arc_p >= 0);
2432 }
2433
2434 if (arc_size > arc_c)
2435 arc_adjust();
2436}
2437
2438static int needfree = 0;
2439
2440static int
2441arc_reclaim_needed(void)
2442{
2443
2444#ifdef _KERNEL
2445
2446 if (needfree)
2447 return (1);
2448
2449 /*
2450 * Cooperate with pagedaemon when it's time for it to scan
2451 * and reclaim some pages.
2452 */
2453 if (vm_paging_needed())
2454 return (1);
2455
2456#ifdef sun
2457 /*
2458 * take 'desfree' extra pages, so we reclaim sooner, rather than later
2459 */
2460 extra = desfree;
2461
2462 /*
2463 * check that we're out of range of the pageout scanner. It starts to
2464 * schedule paging if freemem is less than lotsfree and needfree.
2465 * lotsfree is the high-water mark for pageout, and needfree is the
2466 * number of needed free pages. We add extra pages here to make sure
2467 * the scanner doesn't start up while we're freeing memory.
2468 */
2469 if (freemem < lotsfree + needfree + extra)
2470 return (1);
2471
2472 /*
2473 * check to make sure that swapfs has enough space so that anon
2474 * reservations can still succeed. anon_resvmem() checks that the
2475 * availrmem is greater than swapfs_minfree, and the number of reserved
2476 * swap pages. We also add a bit of extra here just to prevent
2477 * circumstances from getting really dire.
2478 */
2479 if (availrmem < swapfs_minfree + swapfs_reserve + extra)
2480 return (1);
2481
2482#if defined(__i386)
2483 /*
2484 * If we're on an i386 platform, it's possible that we'll exhaust the
2485 * kernel heap space before we ever run out of available physical
2486 * memory. Most checks of the size of the heap_area compare against
2487 * tune.t_minarmem, which is the minimum available real memory that we
2488 * can have in the system. However, this is generally fixed at 25 pages
2489 * which is so low that it's useless. In this comparison, we seek to
2490 * calculate the total heap-size, and reclaim if more than 3/4ths of the
2491 * heap is allocated. (Or, in the calculation, if less than 1/4th is
2492 * free)
2493 */
2494 if (btop(vmem_size(heap_arena, VMEM_FREE)) <
2495 (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
2496 return (1);
2497#endif
2498#else /* !sun */
2499 if (kmem_used() > (kmem_size() * 3) / 4)
2500 return (1);
2501#endif /* sun */
2502
2503#else
2504 if (spa_get_random(100) == 0)
2505 return (1);
2506#endif
2507 return (0);
2508}
2509
2510extern kmem_cache_t *zio_buf_cache[];
2511extern kmem_cache_t *zio_data_buf_cache[];
2512
2513static void
2514arc_kmem_reap_now(arc_reclaim_strategy_t strat)
2515{
2516 size_t i;
2517 kmem_cache_t *prev_cache = NULL;
2518 kmem_cache_t *prev_data_cache = NULL;
2519
2520#ifdef _KERNEL
2521 if (arc_meta_used >= arc_meta_limit) {
2522 /*
2523 * We are exceeding our meta-data cache limit.
2524 * Purge some DNLC entries to release holds on meta-data.
2525 */
2526 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
2527 }
2528#if defined(__i386)
2529 /*
2530 * Reclaim unused memory from all kmem caches.
2531 */
2532 kmem_reap();
2533#endif
2534#endif
2535
2536 /*
2537 * An aggressive reclamation will shrink the cache size as well as
2538 * reap free buffers from the arc kmem caches.
2539 */
2540 if (strat == ARC_RECLAIM_AGGR)
2541 arc_shrink();
2542
2543 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
2544 if (zio_buf_cache[i] != prev_cache) {
2545 prev_cache = zio_buf_cache[i];
2546 kmem_cache_reap_now(zio_buf_cache[i]);
2547 }
2548 if (zio_data_buf_cache[i] != prev_data_cache) {
2549 prev_data_cache = zio_data_buf_cache[i];
2550 kmem_cache_reap_now(zio_data_buf_cache[i]);
2551 }
2552 }
2553 kmem_cache_reap_now(buf_cache);
2554 kmem_cache_reap_now(hdr_cache);
2555}
2556
2557static void
2558arc_reclaim_thread(void *dummy __unused)
2559{
2560 clock_t growtime = 0;
2561 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS;
2562 callb_cpr_t cpr;
2563
2564 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
2565
2566 mutex_enter(&arc_reclaim_thr_lock);
2567 while (arc_thread_exit == 0) {
2568 if (arc_reclaim_needed()) {
2569
2570 if (arc_no_grow) {
2571 if (last_reclaim == ARC_RECLAIM_CONS) {
2572 last_reclaim = ARC_RECLAIM_AGGR;
2573 } else {
2574 last_reclaim = ARC_RECLAIM_CONS;
2575 }
2576 } else {
2577 arc_no_grow = TRUE;
2578 last_reclaim = ARC_RECLAIM_AGGR;
2579 membar_producer();
2580 }
2581
2582 /* reset the growth delay for every reclaim */
2583 growtime = ddi_get_lbolt() + (arc_grow_retry * hz);
2584
2585 if (needfree && last_reclaim == ARC_RECLAIM_CONS) {
2586 /*
2587 * If needfree is TRUE our vm_lowmem hook
2588 * was called and in that case we must free some
2589 * memory, so switch to aggressive mode.
2590 */
2591 arc_no_grow = TRUE;
2592 last_reclaim = ARC_RECLAIM_AGGR;
2593 }
2594 arc_kmem_reap_now(last_reclaim);
2595 arc_warm = B_TRUE;
2596
2597 } else if (arc_no_grow && ddi_get_lbolt() >= growtime) {
2598 arc_no_grow = FALSE;
2599 }
2600
2601 arc_adjust();
2602
2603 if (arc_eviction_list != NULL)
2604 arc_do_user_evicts();
2605
2606#ifdef _KERNEL
2607 if (needfree) {
2608 needfree = 0;
2609 wakeup(&needfree);
2610 }
2611#endif
2612
2613 /* block until needed, or one second, whichever is shorter */
2614 CALLB_CPR_SAFE_BEGIN(&cpr);
2615 (void) cv_timedwait(&arc_reclaim_thr_cv,
2616 &arc_reclaim_thr_lock, hz);
2617 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
2618 }
2619
2620 arc_thread_exit = 0;
2621 cv_broadcast(&arc_reclaim_thr_cv);
2622 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */
2623 thread_exit();
2624}
2625
2626/*
2627 * Adapt arc info given the number of bytes we are trying to add and
2628 * the state that we are comming from. This function is only called
2629 * when we are adding new content to the cache.
2630 */
2631static void
2632arc_adapt(int bytes, arc_state_t *state)
2633{
2634 int mult;
2635 uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
2636
2637 if (state == arc_l2c_only)
2638 return;
2639
2640 ASSERT(bytes > 0);
2641 /*
2642 * Adapt the target size of the MRU list:
2643 * - if we just hit in the MRU ghost list, then increase
2644 * the target size of the MRU list.
2645 * - if we just hit in the MFU ghost list, then increase
2646 * the target size of the MFU list by decreasing the
2647 * target size of the MRU list.
2648 */
2649 if (state == arc_mru_ghost) {
2650 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ?
2651 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size));
2652 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */
2653
2654 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult);
2655 } else if (state == arc_mfu_ghost) {
2656 uint64_t delta;
2657
2658 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ?
2659 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size));
2660 mult = MIN(mult, 10);
2661
2662 delta = MIN(bytes * mult, arc_p);
2663 arc_p = MAX(arc_p_min, arc_p - delta);
2664 }
2665 ASSERT((int64_t)arc_p >= 0);
2666
2667 if (arc_reclaim_needed()) {
2668 cv_signal(&arc_reclaim_thr_cv);
2669 return;
2670 }
2671
2672 if (arc_no_grow)
2673 return;
2674
2675 if (arc_c >= arc_c_max)
2676 return;
2677
2678 /*
2679 * If we're within (2 * maxblocksize) bytes of the target
2680 * cache size, increment the target cache size
2681 */
2682 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
2683 atomic_add_64(&arc_c, (int64_t)bytes);
2684 if (arc_c > arc_c_max)
2685 arc_c = arc_c_max;
2686 else if (state == arc_anon)
2687 atomic_add_64(&arc_p, (int64_t)bytes);
2688 if (arc_p > arc_c)
2689 arc_p = arc_c;
2690 }
2691 ASSERT((int64_t)arc_p >= 0);
2692}
2693
2694/*
2695 * Check if the cache has reached its limits and eviction is required
2696 * prior to insert.
2697 */
2698static int
2699arc_evict_needed(arc_buf_contents_t type)
2700{
2701 if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit)
2702 return (1);
2703
2704#ifdef sun
2705#ifdef _KERNEL
2706 /*
2707 * If zio data pages are being allocated out of a separate heap segment,
2708 * then enforce that the size of available vmem for this area remains
2709 * above about 1/32nd free.
2710 */
2711 if (type == ARC_BUFC_DATA && zio_arena != NULL &&
2712 vmem_size(zio_arena, VMEM_FREE) <
2713 (vmem_size(zio_arena, VMEM_ALLOC) >> 5))
2714 return (1);
2715#endif
2716#endif /* sun */
2717
2718 if (arc_reclaim_needed())
2719 return (1);
2720
2721 return (arc_size > arc_c);
2722}
2723
2724/*
2725 * The buffer, supplied as the first argument, needs a data block.
2726 * So, if we are at cache max, determine which cache should be victimized.
2727 * We have the following cases:
2728 *
2729 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) ->
2730 * In this situation if we're out of space, but the resident size of the MFU is
2731 * under the limit, victimize the MFU cache to satisfy this insertion request.
2732 *
2733 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) ->
2734 * Here, we've used up all of the available space for the MRU, so we need to
2735 * evict from our own cache instead. Evict from the set of resident MRU
2736 * entries.
2737 *
2738 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) ->
2739 * c minus p represents the MFU space in the cache, since p is the size of the
2740 * cache that is dedicated to the MRU. In this situation there's still space on
2741 * the MFU side, so the MRU side needs to be victimized.
2742 *
2743 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) ->
2744 * MFU's resident set is consuming more space than it has been allotted. In
2745 * this situation, we must victimize our own cache, the MFU, for this insertion.
2746 */
2747static void
2748arc_get_data_buf(arc_buf_t *buf)
2749{
2750 arc_state_t *state = buf->b_hdr->b_state;
2751 uint64_t size = buf->b_hdr->b_size;
2752 arc_buf_contents_t type = buf->b_hdr->b_type;
2753
2754 arc_adapt(size, state);
2755
2756 /*
2757 * We have not yet reached cache maximum size,
2758 * just allocate a new buffer.
2759 */
2760 if (!arc_evict_needed(type)) {
2761 if (type == ARC_BUFC_METADATA) {
2762 buf->b_data = zio_buf_alloc(size);
2763 arc_space_consume(size, ARC_SPACE_DATA);
2764 } else {
2765 ASSERT(type == ARC_BUFC_DATA);
2766 buf->b_data = zio_data_buf_alloc(size);
2767 ARCSTAT_INCR(arcstat_data_size, size);
2768 atomic_add_64(&arc_size, size);
2769 }
2770 goto out;
2771 }
2772
2773 /*
2774 * If we are prefetching from the mfu ghost list, this buffer
2775 * will end up on the mru list; so steal space from there.
2776 */
2777 if (state == arc_mfu_ghost)
2778 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu;
2779 else if (state == arc_mru_ghost)
2780 state = arc_mru;
2781
2782 if (state == arc_mru || state == arc_anon) {
2783 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size;
2784 state = (arc_mfu->arcs_lsize[type] >= size &&
2785 arc_p > mru_used) ? arc_mfu : arc_mru;
2786 } else {
2787 /* MFU cases */
2788 uint64_t mfu_space = arc_c - arc_p;
2789 state = (arc_mru->arcs_lsize[type] >= size &&
2790 mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu;
2791 }
2792 if ((buf->b_data = arc_evict(state, 0, size, TRUE, type)) == NULL) {
2793 if (type == ARC_BUFC_METADATA) {
2794 buf->b_data = zio_buf_alloc(size);
2795 arc_space_consume(size, ARC_SPACE_DATA);
2796 } else {
2797 ASSERT(type == ARC_BUFC_DATA);
2798 buf->b_data = zio_data_buf_alloc(size);
2799 ARCSTAT_INCR(arcstat_data_size, size);
2800 atomic_add_64(&arc_size, size);
2801 }
2802 ARCSTAT_BUMP(arcstat_recycle_miss);
2803 }
2804 ASSERT(buf->b_data != NULL);
2805out:
2806 /*
2807 * Update the state size. Note that ghost states have a
2808 * "ghost size" and so don't need to be updated.
2809 */
2810 if (!GHOST_STATE(buf->b_hdr->b_state)) {
2811 arc_buf_hdr_t *hdr = buf->b_hdr;
2812
2813 atomic_add_64(&hdr->b_state->arcs_size, size);
2814 if (list_link_active(&hdr->b_arc_node)) {
2815 ASSERT(refcount_is_zero(&hdr->b_refcnt));
2816 atomic_add_64(&hdr->b_state->arcs_lsize[type], size);
2817 }
2818 /*
2819 * If we are growing the cache, and we are adding anonymous
2820 * data, and we have outgrown arc_p, update arc_p
2821 */
2822 if (arc_size < arc_c && hdr->b_state == arc_anon &&
2823 arc_anon->arcs_size + arc_mru->arcs_size > arc_p)
2824 arc_p = MIN(arc_c, arc_p + size);
2825 }
2826 ARCSTAT_BUMP(arcstat_allocated);
2827}
2828
2829/*
2830 * This routine is called whenever a buffer is accessed.
2831 * NOTE: the hash lock is dropped in this function.
2832 */
2833static void
2834arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
2835{
2836 clock_t now;
2837
2838 ASSERT(MUTEX_HELD(hash_lock));
2839
2840 if (buf->b_state == arc_anon) {
2841 /*
2842 * This buffer is not in the cache, and does not
2843 * appear in our "ghost" list. Add the new buffer
2844 * to the MRU state.
2845 */
2846
2847 ASSERT(buf->b_arc_access == 0);
2848 buf->b_arc_access = ddi_get_lbolt();
2849 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2850 arc_change_state(arc_mru, buf, hash_lock);
2851
2852 } else if (buf->b_state == arc_mru) {
2853 now = ddi_get_lbolt();
2854
2855 /*
2856 * If this buffer is here because of a prefetch, then either:
2857 * - clear the flag if this is a "referencing" read
2858 * (any subsequent access will bump this into the MFU state).
2859 * or
2860 * - move the buffer to the head of the list if this is
2861 * another prefetch (to make it less likely to be evicted).
2862 */
2863 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2864 if (refcount_count(&buf->b_refcnt) == 0) {
2865 ASSERT(list_link_active(&buf->b_arc_node));
2866 } else {
2867 buf->b_flags &= ~ARC_PREFETCH;
2868 ARCSTAT_BUMP(arcstat_mru_hits);
2869 }
2870 buf->b_arc_access = now;
2871 return;
2872 }
2873
2874 /*
2875 * This buffer has been "accessed" only once so far,
2876 * but it is still in the cache. Move it to the MFU
2877 * state.
2878 */
2879 if (now > buf->b_arc_access + ARC_MINTIME) {
2880 /*
2881 * More than 125ms have passed since we
2882 * instantiated this buffer. Move it to the
2883 * most frequently used state.
2884 */
2885 buf->b_arc_access = now;
2886 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2887 arc_change_state(arc_mfu, buf, hash_lock);
2888 }
2889 ARCSTAT_BUMP(arcstat_mru_hits);
2890 } else if (buf->b_state == arc_mru_ghost) {
2891 arc_state_t *new_state;
2892 /*
2893 * This buffer has been "accessed" recently, but
2894 * was evicted from the cache. Move it to the
2895 * MFU state.
2896 */
2897
2898 if (buf->b_flags & ARC_PREFETCH) {
2899 new_state = arc_mru;
2900 if (refcount_count(&buf->b_refcnt) > 0)
2901 buf->b_flags &= ~ARC_PREFETCH;
2902 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2903 } else {
2904 new_state = arc_mfu;
2905 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2906 }
2907
2908 buf->b_arc_access = ddi_get_lbolt();
2909 arc_change_state(new_state, buf, hash_lock);
2910
2911 ARCSTAT_BUMP(arcstat_mru_ghost_hits);
2912 } else if (buf->b_state == arc_mfu) {
2913 /*
2914 * This buffer has been accessed more than once and is
2915 * still in the cache. Keep it in the MFU state.
2916 *
2917 * NOTE: an add_reference() that occurred when we did
2918 * the arc_read() will have kicked this off the list.
2919 * If it was a prefetch, we will explicitly move it to
2920 * the head of the list now.
2921 */
2922 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2923 ASSERT(refcount_count(&buf->b_refcnt) == 0);
2924 ASSERT(list_link_active(&buf->b_arc_node));
2925 }
2926 ARCSTAT_BUMP(arcstat_mfu_hits);
2927 buf->b_arc_access = ddi_get_lbolt();
2928 } else if (buf->b_state == arc_mfu_ghost) {
2929 arc_state_t *new_state = arc_mfu;
2930 /*
2931 * This buffer has been accessed more than once but has
2932 * been evicted from the cache. Move it back to the
2933 * MFU state.
2934 */
2935
2936 if (buf->b_flags & ARC_PREFETCH) {
2937 /*
2938 * This is a prefetch access...
2939 * move this block back to the MRU state.
2940 */
2941 ASSERT0(refcount_count(&buf->b_refcnt));
2942 new_state = arc_mru;
2943 }
2944
2945 buf->b_arc_access = ddi_get_lbolt();
2946 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2947 arc_change_state(new_state, buf, hash_lock);
2948
2949 ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
2950 } else if (buf->b_state == arc_l2c_only) {
2951 /*
2952 * This buffer is on the 2nd Level ARC.
2953 */
2954
2955 buf->b_arc_access = ddi_get_lbolt();
2956 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2957 arc_change_state(arc_mfu, buf, hash_lock);
2958 } else {
2959 ASSERT(!"invalid arc state");
2960 }
2961}
2962
2963/* a generic arc_done_func_t which you can use */
2964/* ARGSUSED */
2965void
2966arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
2967{
2968 if (zio == NULL || zio->io_error == 0)
2969 bcopy(buf->b_data, arg, buf->b_hdr->b_size);
2970 VERIFY(arc_buf_remove_ref(buf, arg));
2971}
2972
2973/* a generic arc_done_func_t */
2974void
2975arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
2976{
2977 arc_buf_t **bufp = arg;
2978 if (zio && zio->io_error) {
2979 VERIFY(arc_buf_remove_ref(buf, arg));
2980 *bufp = NULL;
2981 } else {
2982 *bufp = buf;
2983 ASSERT(buf->b_data);
2984 }
2985}
2986
2987static void
2988arc_read_done(zio_t *zio)
2989{
2990 arc_buf_hdr_t *hdr;
2991 arc_buf_t *buf;
2992 arc_buf_t *abuf; /* buffer we're assigning to callback */
2993 kmutex_t *hash_lock = NULL;
2994 arc_callback_t *callback_list, *acb;
2995 int freeable = FALSE;
2996
2997 buf = zio->io_private;
2998 hdr = buf->b_hdr;
2999
3000 /*
3001 * The hdr was inserted into hash-table and removed from lists
3002 * prior to starting I/O. We should find this header, since
3003 * it's in the hash table, and it should be legit since it's
3004 * not possible to evict it during the I/O. The only possible
3005 * reason for it not to be found is if we were freed during the
3006 * read.
3007 */
3008 if (HDR_IN_HASH_TABLE(hdr)) {
3009 ASSERT3U(hdr->b_birth, ==, BP_PHYSICAL_BIRTH(zio->io_bp));
3010 ASSERT3U(hdr->b_dva.dva_word[0], ==,
3011 BP_IDENTITY(zio->io_bp)->dva_word[0]);
3012 ASSERT3U(hdr->b_dva.dva_word[1], ==,
3013 BP_IDENTITY(zio->io_bp)->dva_word[1]);
3014
3015 arc_buf_hdr_t *found = buf_hash_find(hdr->b_spa, zio->io_bp,
3016 &hash_lock);
3017
3018 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) &&
3019 hash_lock == NULL) ||
3020 (found == hdr &&
3021 DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
3022 (found == hdr && HDR_L2_READING(hdr)));
3023 }
3024
3025 hdr->b_flags &= ~ARC_L2_EVICTED;
3026 if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH))
3027 hdr->b_flags &= ~ARC_L2CACHE;
3028
3029 /* byteswap if necessary */
3030 callback_list = hdr->b_acb;
3031 ASSERT(callback_list != NULL);
3032 if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) {
3033 dmu_object_byteswap_t bswap =
3034 DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp));
3035 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ?
3036 byteswap_uint64_array :
3037 dmu_ot_byteswap[bswap].ob_func;
3038 func(buf->b_data, hdr->b_size);
3039 }
3040
3041 arc_cksum_compute(buf, B_FALSE);
3042#ifdef illumos
3043 arc_buf_watch(buf);
3044#endif /* illumos */
3045
3046 if (hash_lock && zio->io_error == 0 && hdr->b_state == arc_anon) {
3047 /*
3048 * Only call arc_access on anonymous buffers. This is because
3049 * if we've issued an I/O for an evicted buffer, we've already
3050 * called arc_access (to prevent any simultaneous readers from
3051 * getting confused).
3052 */
3053 arc_access(hdr, hash_lock);
3054 }
3055
3056 /* create copies of the data buffer for the callers */
3057 abuf = buf;
3058 for (acb = callback_list; acb; acb = acb->acb_next) {
3059 if (acb->acb_done) {
3060 if (abuf == NULL) {
3061 ARCSTAT_BUMP(arcstat_duplicate_reads);
3062 abuf = arc_buf_clone(buf);
3063 }
3064 acb->acb_buf = abuf;
3065 abuf = NULL;
3066 }
3067 }
3068 hdr->b_acb = NULL;
3069 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3070 ASSERT(!HDR_BUF_AVAILABLE(hdr));
3071 if (abuf == buf) {
3072 ASSERT(buf->b_efunc == NULL);
3073 ASSERT(hdr->b_datacnt == 1);
3074 hdr->b_flags |= ARC_BUF_AVAILABLE;
3075 }
3076
3077 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
3078
3079 if (zio->io_error != 0) {
3080 hdr->b_flags |= ARC_IO_ERROR;
3081 if (hdr->b_state != arc_anon)
3082 arc_change_state(arc_anon, hdr, hash_lock);
3083 if (HDR_IN_HASH_TABLE(hdr))
3084 buf_hash_remove(hdr);
3085 freeable = refcount_is_zero(&hdr->b_refcnt);
3086 }
3087
3088 /*
3089 * Broadcast before we drop the hash_lock to avoid the possibility
3090 * that the hdr (and hence the cv) might be freed before we get to
3091 * the cv_broadcast().
3092 */
3093 cv_broadcast(&hdr->b_cv);
3094
3095 if (hash_lock) {
3096 mutex_exit(hash_lock);
3097 } else {
3098 /*
3099 * This block was freed while we waited for the read to
3100 * complete. It has been removed from the hash table and
3101 * moved to the anonymous state (so that it won't show up
3102 * in the cache).
3103 */
3104 ASSERT3P(hdr->b_state, ==, arc_anon);
3105 freeable = refcount_is_zero(&hdr->b_refcnt);
3106 }
3107
3108 /* execute each callback and free its structure */
3109 while ((acb = callback_list) != NULL) {
3110 if (acb->acb_done)
3111 acb->acb_done(zio, acb->acb_buf, acb->acb_private);
3112
3113 if (acb->acb_zio_dummy != NULL) {
3114 acb->acb_zio_dummy->io_error = zio->io_error;
3115 zio_nowait(acb->acb_zio_dummy);
3116 }
3117
3118 callback_list = acb->acb_next;
3119 kmem_free(acb, sizeof (arc_callback_t));
3120 }
3121
3122 if (freeable)
3123 arc_hdr_destroy(hdr);
3124}
3125
3126/*
3127 * "Read" the block block at the specified DVA (in bp) via the
3128 * cache. If the block is found in the cache, invoke the provided
3129 * callback immediately and return. Note that the `zio' parameter
3130 * in the callback will be NULL in this case, since no IO was
3131 * required. If the block is not in the cache pass the read request
3132 * on to the spa with a substitute callback function, so that the
3133 * requested block will be added to the cache.
3134 *
3135 * If a read request arrives for a block that has a read in-progress,
3136 * either wait for the in-progress read to complete (and return the
3137 * results); or, if this is a read with a "done" func, add a record
3138 * to the read to invoke the "done" func when the read completes,
3139 * and return; or just return.
3140 *
3141 * arc_read_done() will invoke all the requested "done" functions
3142 * for readers of this block.
3143 */
3144int
3145arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
3146 void *private, zio_priority_t priority, int zio_flags, uint32_t *arc_flags,
784 int l2rcb_flags; /* original flags */
785 enum zio_compress l2rcb_compress; /* applied compress */
786} l2arc_read_callback_t;
787
788typedef struct l2arc_write_callback {
789 l2arc_dev_t *l2wcb_dev; /* device info */
790 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */
791} l2arc_write_callback_t;
792
793struct l2arc_buf_hdr {
794 /* protected by arc_buf_hdr mutex */
795 l2arc_dev_t *b_dev; /* L2ARC device */
796 uint64_t b_daddr; /* disk address, offset byte */
797 /* compression applied to buffer data */
798 enum zio_compress b_compress;
799 /* real alloc'd buffer size depending on b_compress applied */
800 int b_asize;
801 /* temporary buffer holder for in-flight compressed data */
802 void *b_tmp_cdata;
803};
804
805typedef struct l2arc_data_free {
806 /* protected by l2arc_free_on_write_mtx */
807 void *l2df_data;
808 size_t l2df_size;
809 void (*l2df_func)(void *, size_t);
810 list_node_t l2df_list_node;
811} l2arc_data_free_t;
812
813static kmutex_t l2arc_feed_thr_lock;
814static kcondvar_t l2arc_feed_thr_cv;
815static uint8_t l2arc_thread_exit;
816
817static void l2arc_read_done(zio_t *zio);
818static void l2arc_hdr_stat_add(void);
819static void l2arc_hdr_stat_remove(void);
820
821static boolean_t l2arc_compress_buf(l2arc_buf_hdr_t *l2hdr);
822static void l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr,
823 enum zio_compress c);
824static void l2arc_release_cdata_buf(arc_buf_hdr_t *ab);
825
826static uint64_t
827buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
828{
829 uint8_t *vdva = (uint8_t *)dva;
830 uint64_t crc = -1ULL;
831 int i;
832
833 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
834
835 for (i = 0; i < sizeof (dva_t); i++)
836 crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
837
838 crc ^= (spa>>8) ^ birth;
839
840 return (crc);
841}
842
843#define BUF_EMPTY(buf) \
844 ((buf)->b_dva.dva_word[0] == 0 && \
845 (buf)->b_dva.dva_word[1] == 0 && \
846 (buf)->b_cksum0 == 0)
847
848#define BUF_EQUAL(spa, dva, birth, buf) \
849 ((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
850 ((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
851 ((buf)->b_birth == birth) && ((buf)->b_spa == spa)
852
853static void
854buf_discard_identity(arc_buf_hdr_t *hdr)
855{
856 hdr->b_dva.dva_word[0] = 0;
857 hdr->b_dva.dva_word[1] = 0;
858 hdr->b_birth = 0;
859 hdr->b_cksum0 = 0;
860}
861
862static arc_buf_hdr_t *
863buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp)
864{
865 const dva_t *dva = BP_IDENTITY(bp);
866 uint64_t birth = BP_PHYSICAL_BIRTH(bp);
867 uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
868 kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
869 arc_buf_hdr_t *buf;
870
871 mutex_enter(hash_lock);
872 for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
873 buf = buf->b_hash_next) {
874 if (BUF_EQUAL(spa, dva, birth, buf)) {
875 *lockp = hash_lock;
876 return (buf);
877 }
878 }
879 mutex_exit(hash_lock);
880 *lockp = NULL;
881 return (NULL);
882}
883
884/*
885 * Insert an entry into the hash table. If there is already an element
886 * equal to elem in the hash table, then the already existing element
887 * will be returned and the new element will not be inserted.
888 * Otherwise returns NULL.
889 */
890static arc_buf_hdr_t *
891buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
892{
893 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
894 kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
895 arc_buf_hdr_t *fbuf;
896 uint32_t i;
897
898 ASSERT(!DVA_IS_EMPTY(&buf->b_dva));
899 ASSERT(buf->b_birth != 0);
900 ASSERT(!HDR_IN_HASH_TABLE(buf));
901 *lockp = hash_lock;
902 mutex_enter(hash_lock);
903 for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
904 fbuf = fbuf->b_hash_next, i++) {
905 if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
906 return (fbuf);
907 }
908
909 buf->b_hash_next = buf_hash_table.ht_table[idx];
910 buf_hash_table.ht_table[idx] = buf;
911 buf->b_flags |= ARC_IN_HASH_TABLE;
912
913 /* collect some hash table performance data */
914 if (i > 0) {
915 ARCSTAT_BUMP(arcstat_hash_collisions);
916 if (i == 1)
917 ARCSTAT_BUMP(arcstat_hash_chains);
918
919 ARCSTAT_MAX(arcstat_hash_chain_max, i);
920 }
921
922 ARCSTAT_BUMP(arcstat_hash_elements);
923 ARCSTAT_MAXSTAT(arcstat_hash_elements);
924
925 return (NULL);
926}
927
928static void
929buf_hash_remove(arc_buf_hdr_t *buf)
930{
931 arc_buf_hdr_t *fbuf, **bufp;
932 uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
933
934 ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
935 ASSERT(HDR_IN_HASH_TABLE(buf));
936
937 bufp = &buf_hash_table.ht_table[idx];
938 while ((fbuf = *bufp) != buf) {
939 ASSERT(fbuf != NULL);
940 bufp = &fbuf->b_hash_next;
941 }
942 *bufp = buf->b_hash_next;
943 buf->b_hash_next = NULL;
944 buf->b_flags &= ~ARC_IN_HASH_TABLE;
945
946 /* collect some hash table performance data */
947 ARCSTAT_BUMPDOWN(arcstat_hash_elements);
948
949 if (buf_hash_table.ht_table[idx] &&
950 buf_hash_table.ht_table[idx]->b_hash_next == NULL)
951 ARCSTAT_BUMPDOWN(arcstat_hash_chains);
952}
953
954/*
955 * Global data structures and functions for the buf kmem cache.
956 */
957static kmem_cache_t *hdr_cache;
958static kmem_cache_t *buf_cache;
959
960static void
961buf_fini(void)
962{
963 int i;
964
965 kmem_free(buf_hash_table.ht_table,
966 (buf_hash_table.ht_mask + 1) * sizeof (void *));
967 for (i = 0; i < BUF_LOCKS; i++)
968 mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
969 kmem_cache_destroy(hdr_cache);
970 kmem_cache_destroy(buf_cache);
971}
972
973/*
974 * Constructor callback - called when the cache is empty
975 * and a new buf is requested.
976 */
977/* ARGSUSED */
978static int
979hdr_cons(void *vbuf, void *unused, int kmflag)
980{
981 arc_buf_hdr_t *buf = vbuf;
982
983 bzero(buf, sizeof (arc_buf_hdr_t));
984 refcount_create(&buf->b_refcnt);
985 cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
986 mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
987 arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
988
989 return (0);
990}
991
992/* ARGSUSED */
993static int
994buf_cons(void *vbuf, void *unused, int kmflag)
995{
996 arc_buf_t *buf = vbuf;
997
998 bzero(buf, sizeof (arc_buf_t));
999 mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL);
1000 arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
1001
1002 return (0);
1003}
1004
1005/*
1006 * Destructor callback - called when a cached buf is
1007 * no longer required.
1008 */
1009/* ARGSUSED */
1010static void
1011hdr_dest(void *vbuf, void *unused)
1012{
1013 arc_buf_hdr_t *buf = vbuf;
1014
1015 ASSERT(BUF_EMPTY(buf));
1016 refcount_destroy(&buf->b_refcnt);
1017 cv_destroy(&buf->b_cv);
1018 mutex_destroy(&buf->b_freeze_lock);
1019 arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
1020}
1021
1022/* ARGSUSED */
1023static void
1024buf_dest(void *vbuf, void *unused)
1025{
1026 arc_buf_t *buf = vbuf;
1027
1028 mutex_destroy(&buf->b_evict_lock);
1029 arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
1030}
1031
1032/*
1033 * Reclaim callback -- invoked when memory is low.
1034 */
1035/* ARGSUSED */
1036static void
1037hdr_recl(void *unused)
1038{
1039 dprintf("hdr_recl called\n");
1040 /*
1041 * umem calls the reclaim func when we destroy the buf cache,
1042 * which is after we do arc_fini().
1043 */
1044 if (!arc_dead)
1045 cv_signal(&arc_reclaim_thr_cv);
1046}
1047
1048static void
1049buf_init(void)
1050{
1051 uint64_t *ct;
1052 uint64_t hsize = 1ULL << 12;
1053 int i, j;
1054
1055 /*
1056 * The hash table is big enough to fill all of physical memory
1057 * with an average 64K block size. The table will take up
1058 * totalmem*sizeof(void*)/64K (eg. 128KB/GB with 8-byte pointers).
1059 */
1060 while (hsize * 65536 < (uint64_t)physmem * PAGESIZE)
1061 hsize <<= 1;
1062retry:
1063 buf_hash_table.ht_mask = hsize - 1;
1064 buf_hash_table.ht_table =
1065 kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
1066 if (buf_hash_table.ht_table == NULL) {
1067 ASSERT(hsize > (1ULL << 8));
1068 hsize >>= 1;
1069 goto retry;
1070 }
1071
1072 hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
1073 0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
1074 buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
1075 0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
1076
1077 for (i = 0; i < 256; i++)
1078 for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
1079 *ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
1080
1081 for (i = 0; i < BUF_LOCKS; i++) {
1082 mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
1083 NULL, MUTEX_DEFAULT, NULL);
1084 }
1085}
1086
1087#define ARC_MINTIME (hz>>4) /* 62 ms */
1088
1089static void
1090arc_cksum_verify(arc_buf_t *buf)
1091{
1092 zio_cksum_t zc;
1093
1094 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
1095 return;
1096
1097 mutex_enter(&buf->b_hdr->b_freeze_lock);
1098 if (buf->b_hdr->b_freeze_cksum == NULL ||
1099 (buf->b_hdr->b_flags & ARC_IO_ERROR)) {
1100 mutex_exit(&buf->b_hdr->b_freeze_lock);
1101 return;
1102 }
1103 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
1104 if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc))
1105 panic("buffer modified while frozen!");
1106 mutex_exit(&buf->b_hdr->b_freeze_lock);
1107}
1108
1109static int
1110arc_cksum_equal(arc_buf_t *buf)
1111{
1112 zio_cksum_t zc;
1113 int equal;
1114
1115 mutex_enter(&buf->b_hdr->b_freeze_lock);
1116 fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
1117 equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc);
1118 mutex_exit(&buf->b_hdr->b_freeze_lock);
1119
1120 return (equal);
1121}
1122
1123static void
1124arc_cksum_compute(arc_buf_t *buf, boolean_t force)
1125{
1126 if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY))
1127 return;
1128
1129 mutex_enter(&buf->b_hdr->b_freeze_lock);
1130 if (buf->b_hdr->b_freeze_cksum != NULL) {
1131 mutex_exit(&buf->b_hdr->b_freeze_lock);
1132 return;
1133 }
1134 buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
1135 fletcher_2_native(buf->b_data, buf->b_hdr->b_size,
1136 buf->b_hdr->b_freeze_cksum);
1137 mutex_exit(&buf->b_hdr->b_freeze_lock);
1138#ifdef illumos
1139 arc_buf_watch(buf);
1140#endif /* illumos */
1141}
1142
1143#ifdef illumos
1144#ifndef _KERNEL
1145typedef struct procctl {
1146 long cmd;
1147 prwatch_t prwatch;
1148} procctl_t;
1149#endif
1150
1151/* ARGSUSED */
1152static void
1153arc_buf_unwatch(arc_buf_t *buf)
1154{
1155#ifndef _KERNEL
1156 if (arc_watch) {
1157 int result;
1158 procctl_t ctl;
1159 ctl.cmd = PCWATCH;
1160 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
1161 ctl.prwatch.pr_size = 0;
1162 ctl.prwatch.pr_wflags = 0;
1163 result = write(arc_procfd, &ctl, sizeof (ctl));
1164 ASSERT3U(result, ==, sizeof (ctl));
1165 }
1166#endif
1167}
1168
1169/* ARGSUSED */
1170static void
1171arc_buf_watch(arc_buf_t *buf)
1172{
1173#ifndef _KERNEL
1174 if (arc_watch) {
1175 int result;
1176 procctl_t ctl;
1177 ctl.cmd = PCWATCH;
1178 ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
1179 ctl.prwatch.pr_size = buf->b_hdr->b_size;
1180 ctl.prwatch.pr_wflags = WA_WRITE;
1181 result = write(arc_procfd, &ctl, sizeof (ctl));
1182 ASSERT3U(result, ==, sizeof (ctl));
1183 }
1184#endif
1185}
1186#endif /* illumos */
1187
1188void
1189arc_buf_thaw(arc_buf_t *buf)
1190{
1191 if (zfs_flags & ZFS_DEBUG_MODIFY) {
1192 if (buf->b_hdr->b_state != arc_anon)
1193 panic("modifying non-anon buffer!");
1194 if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS)
1195 panic("modifying buffer while i/o in progress!");
1196 arc_cksum_verify(buf);
1197 }
1198
1199 mutex_enter(&buf->b_hdr->b_freeze_lock);
1200 if (buf->b_hdr->b_freeze_cksum != NULL) {
1201 kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1202 buf->b_hdr->b_freeze_cksum = NULL;
1203 }
1204
1205 if (zfs_flags & ZFS_DEBUG_MODIFY) {
1206 if (buf->b_hdr->b_thawed)
1207 kmem_free(buf->b_hdr->b_thawed, 1);
1208 buf->b_hdr->b_thawed = kmem_alloc(1, KM_SLEEP);
1209 }
1210
1211 mutex_exit(&buf->b_hdr->b_freeze_lock);
1212
1213#ifdef illumos
1214 arc_buf_unwatch(buf);
1215#endif /* illumos */
1216}
1217
1218void
1219arc_buf_freeze(arc_buf_t *buf)
1220{
1221 kmutex_t *hash_lock;
1222
1223 if (!(zfs_flags & ZFS_DEBUG_MODIFY))
1224 return;
1225
1226 hash_lock = HDR_LOCK(buf->b_hdr);
1227 mutex_enter(hash_lock);
1228
1229 ASSERT(buf->b_hdr->b_freeze_cksum != NULL ||
1230 buf->b_hdr->b_state == arc_anon);
1231 arc_cksum_compute(buf, B_FALSE);
1232 mutex_exit(hash_lock);
1233
1234}
1235
1236static void
1237get_buf_info(arc_buf_hdr_t *ab, arc_state_t *state, list_t **list, kmutex_t **lock)
1238{
1239 uint64_t buf_hashid = buf_hash(ab->b_spa, &ab->b_dva, ab->b_birth);
1240
1241 if (ab->b_type == ARC_BUFC_METADATA)
1242 buf_hashid &= (ARC_BUFC_NUMMETADATALISTS - 1);
1243 else {
1244 buf_hashid &= (ARC_BUFC_NUMDATALISTS - 1);
1245 buf_hashid += ARC_BUFC_NUMMETADATALISTS;
1246 }
1247
1248 *list = &state->arcs_lists[buf_hashid];
1249 *lock = ARCS_LOCK(state, buf_hashid);
1250}
1251
1252
1253static void
1254add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
1255{
1256 ASSERT(MUTEX_HELD(hash_lock));
1257
1258 if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
1259 (ab->b_state != arc_anon)) {
1260 uint64_t delta = ab->b_size * ab->b_datacnt;
1261 uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type];
1262 list_t *list;
1263 kmutex_t *lock;
1264
1265 get_buf_info(ab, ab->b_state, &list, &lock);
1266 ASSERT(!MUTEX_HELD(lock));
1267 mutex_enter(lock);
1268 ASSERT(list_link_active(&ab->b_arc_node));
1269 list_remove(list, ab);
1270 if (GHOST_STATE(ab->b_state)) {
1271 ASSERT0(ab->b_datacnt);
1272 ASSERT3P(ab->b_buf, ==, NULL);
1273 delta = ab->b_size;
1274 }
1275 ASSERT(delta > 0);
1276 ASSERT3U(*size, >=, delta);
1277 atomic_add_64(size, -delta);
1278 mutex_exit(lock);
1279 /* remove the prefetch flag if we get a reference */
1280 if (ab->b_flags & ARC_PREFETCH)
1281 ab->b_flags &= ~ARC_PREFETCH;
1282 }
1283}
1284
1285static int
1286remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
1287{
1288 int cnt;
1289 arc_state_t *state = ab->b_state;
1290
1291 ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
1292 ASSERT(!GHOST_STATE(state));
1293
1294 if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
1295 (state != arc_anon)) {
1296 uint64_t *size = &state->arcs_lsize[ab->b_type];
1297 list_t *list;
1298 kmutex_t *lock;
1299
1300 get_buf_info(ab, state, &list, &lock);
1301 ASSERT(!MUTEX_HELD(lock));
1302 mutex_enter(lock);
1303 ASSERT(!list_link_active(&ab->b_arc_node));
1304 list_insert_head(list, ab);
1305 ASSERT(ab->b_datacnt > 0);
1306 atomic_add_64(size, ab->b_size * ab->b_datacnt);
1307 mutex_exit(lock);
1308 }
1309 return (cnt);
1310}
1311
1312/*
1313 * Move the supplied buffer to the indicated state. The mutex
1314 * for the buffer must be held by the caller.
1315 */
1316static void
1317arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
1318{
1319 arc_state_t *old_state = ab->b_state;
1320 int64_t refcnt = refcount_count(&ab->b_refcnt);
1321 uint64_t from_delta, to_delta;
1322 list_t *list;
1323 kmutex_t *lock;
1324
1325 ASSERT(MUTEX_HELD(hash_lock));
1326 ASSERT3P(new_state, !=, old_state);
1327 ASSERT(refcnt == 0 || ab->b_datacnt > 0);
1328 ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
1329 ASSERT(ab->b_datacnt <= 1 || old_state != arc_anon);
1330
1331 from_delta = to_delta = ab->b_datacnt * ab->b_size;
1332
1333 /*
1334 * If this buffer is evictable, transfer it from the
1335 * old state list to the new state list.
1336 */
1337 if (refcnt == 0) {
1338 if (old_state != arc_anon) {
1339 int use_mutex;
1340 uint64_t *size = &old_state->arcs_lsize[ab->b_type];
1341
1342 get_buf_info(ab, old_state, &list, &lock);
1343 use_mutex = !MUTEX_HELD(lock);
1344 if (use_mutex)
1345 mutex_enter(lock);
1346
1347 ASSERT(list_link_active(&ab->b_arc_node));
1348 list_remove(list, ab);
1349
1350 /*
1351 * If prefetching out of the ghost cache,
1352 * we will have a non-zero datacnt.
1353 */
1354 if (GHOST_STATE(old_state) && ab->b_datacnt == 0) {
1355 /* ghost elements have a ghost size */
1356 ASSERT(ab->b_buf == NULL);
1357 from_delta = ab->b_size;
1358 }
1359 ASSERT3U(*size, >=, from_delta);
1360 atomic_add_64(size, -from_delta);
1361
1362 if (use_mutex)
1363 mutex_exit(lock);
1364 }
1365 if (new_state != arc_anon) {
1366 int use_mutex;
1367 uint64_t *size = &new_state->arcs_lsize[ab->b_type];
1368
1369 get_buf_info(ab, new_state, &list, &lock);
1370 use_mutex = !MUTEX_HELD(lock);
1371 if (use_mutex)
1372 mutex_enter(lock);
1373
1374 list_insert_head(list, ab);
1375
1376 /* ghost elements have a ghost size */
1377 if (GHOST_STATE(new_state)) {
1378 ASSERT(ab->b_datacnt == 0);
1379 ASSERT(ab->b_buf == NULL);
1380 to_delta = ab->b_size;
1381 }
1382 atomic_add_64(size, to_delta);
1383
1384 if (use_mutex)
1385 mutex_exit(lock);
1386 }
1387 }
1388
1389 ASSERT(!BUF_EMPTY(ab));
1390 if (new_state == arc_anon && HDR_IN_HASH_TABLE(ab))
1391 buf_hash_remove(ab);
1392
1393 /* adjust state sizes */
1394 if (to_delta)
1395 atomic_add_64(&new_state->arcs_size, to_delta);
1396 if (from_delta) {
1397 ASSERT3U(old_state->arcs_size, >=, from_delta);
1398 atomic_add_64(&old_state->arcs_size, -from_delta);
1399 }
1400 ab->b_state = new_state;
1401
1402 /* adjust l2arc hdr stats */
1403 if (new_state == arc_l2c_only)
1404 l2arc_hdr_stat_add();
1405 else if (old_state == arc_l2c_only)
1406 l2arc_hdr_stat_remove();
1407}
1408
1409void
1410arc_space_consume(uint64_t space, arc_space_type_t type)
1411{
1412 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1413
1414 switch (type) {
1415 case ARC_SPACE_DATA:
1416 ARCSTAT_INCR(arcstat_data_size, space);
1417 break;
1418 case ARC_SPACE_OTHER:
1419 ARCSTAT_INCR(arcstat_other_size, space);
1420 break;
1421 case ARC_SPACE_HDRS:
1422 ARCSTAT_INCR(arcstat_hdr_size, space);
1423 break;
1424 case ARC_SPACE_L2HDRS:
1425 ARCSTAT_INCR(arcstat_l2_hdr_size, space);
1426 break;
1427 }
1428
1429 atomic_add_64(&arc_meta_used, space);
1430 atomic_add_64(&arc_size, space);
1431}
1432
1433void
1434arc_space_return(uint64_t space, arc_space_type_t type)
1435{
1436 ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1437
1438 switch (type) {
1439 case ARC_SPACE_DATA:
1440 ARCSTAT_INCR(arcstat_data_size, -space);
1441 break;
1442 case ARC_SPACE_OTHER:
1443 ARCSTAT_INCR(arcstat_other_size, -space);
1444 break;
1445 case ARC_SPACE_HDRS:
1446 ARCSTAT_INCR(arcstat_hdr_size, -space);
1447 break;
1448 case ARC_SPACE_L2HDRS:
1449 ARCSTAT_INCR(arcstat_l2_hdr_size, -space);
1450 break;
1451 }
1452
1453 ASSERT(arc_meta_used >= space);
1454 if (arc_meta_max < arc_meta_used)
1455 arc_meta_max = arc_meta_used;
1456 atomic_add_64(&arc_meta_used, -space);
1457 ASSERT(arc_size >= space);
1458 atomic_add_64(&arc_size, -space);
1459}
1460
1461void *
1462arc_data_buf_alloc(uint64_t size)
1463{
1464 if (arc_evict_needed(ARC_BUFC_DATA))
1465 cv_signal(&arc_reclaim_thr_cv);
1466 atomic_add_64(&arc_size, size);
1467 return (zio_data_buf_alloc(size));
1468}
1469
1470void
1471arc_data_buf_free(void *buf, uint64_t size)
1472{
1473 zio_data_buf_free(buf, size);
1474 ASSERT(arc_size >= size);
1475 atomic_add_64(&arc_size, -size);
1476}
1477
1478arc_buf_t *
1479arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type)
1480{
1481 arc_buf_hdr_t *hdr;
1482 arc_buf_t *buf;
1483
1484 ASSERT3U(size, >, 0);
1485 hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
1486 ASSERT(BUF_EMPTY(hdr));
1487 hdr->b_size = size;
1488 hdr->b_type = type;
1489 hdr->b_spa = spa_load_guid(spa);
1490 hdr->b_state = arc_anon;
1491 hdr->b_arc_access = 0;
1492 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1493 buf->b_hdr = hdr;
1494 buf->b_data = NULL;
1495 buf->b_efunc = NULL;
1496 buf->b_private = NULL;
1497 buf->b_next = NULL;
1498 hdr->b_buf = buf;
1499 arc_get_data_buf(buf);
1500 hdr->b_datacnt = 1;
1501 hdr->b_flags = 0;
1502 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1503 (void) refcount_add(&hdr->b_refcnt, tag);
1504
1505 return (buf);
1506}
1507
1508static char *arc_onloan_tag = "onloan";
1509
1510/*
1511 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in
1512 * flight data by arc_tempreserve_space() until they are "returned". Loaned
1513 * buffers must be returned to the arc before they can be used by the DMU or
1514 * freed.
1515 */
1516arc_buf_t *
1517arc_loan_buf(spa_t *spa, int size)
1518{
1519 arc_buf_t *buf;
1520
1521 buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA);
1522
1523 atomic_add_64(&arc_loaned_bytes, size);
1524 return (buf);
1525}
1526
1527/*
1528 * Return a loaned arc buffer to the arc.
1529 */
1530void
1531arc_return_buf(arc_buf_t *buf, void *tag)
1532{
1533 arc_buf_hdr_t *hdr = buf->b_hdr;
1534
1535 ASSERT(buf->b_data != NULL);
1536 (void) refcount_add(&hdr->b_refcnt, tag);
1537 (void) refcount_remove(&hdr->b_refcnt, arc_onloan_tag);
1538
1539 atomic_add_64(&arc_loaned_bytes, -hdr->b_size);
1540}
1541
1542/* Detach an arc_buf from a dbuf (tag) */
1543void
1544arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
1545{
1546 arc_buf_hdr_t *hdr;
1547
1548 ASSERT(buf->b_data != NULL);
1549 hdr = buf->b_hdr;
1550 (void) refcount_add(&hdr->b_refcnt, arc_onloan_tag);
1551 (void) refcount_remove(&hdr->b_refcnt, tag);
1552 buf->b_efunc = NULL;
1553 buf->b_private = NULL;
1554
1555 atomic_add_64(&arc_loaned_bytes, hdr->b_size);
1556}
1557
1558static arc_buf_t *
1559arc_buf_clone(arc_buf_t *from)
1560{
1561 arc_buf_t *buf;
1562 arc_buf_hdr_t *hdr = from->b_hdr;
1563 uint64_t size = hdr->b_size;
1564
1565 ASSERT(hdr->b_state != arc_anon);
1566
1567 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1568 buf->b_hdr = hdr;
1569 buf->b_data = NULL;
1570 buf->b_efunc = NULL;
1571 buf->b_private = NULL;
1572 buf->b_next = hdr->b_buf;
1573 hdr->b_buf = buf;
1574 arc_get_data_buf(buf);
1575 bcopy(from->b_data, buf->b_data, size);
1576
1577 /*
1578 * This buffer already exists in the arc so create a duplicate
1579 * copy for the caller. If the buffer is associated with user data
1580 * then track the size and number of duplicates. These stats will be
1581 * updated as duplicate buffers are created and destroyed.
1582 */
1583 if (hdr->b_type == ARC_BUFC_DATA) {
1584 ARCSTAT_BUMP(arcstat_duplicate_buffers);
1585 ARCSTAT_INCR(arcstat_duplicate_buffers_size, size);
1586 }
1587 hdr->b_datacnt += 1;
1588 return (buf);
1589}
1590
1591void
1592arc_buf_add_ref(arc_buf_t *buf, void* tag)
1593{
1594 arc_buf_hdr_t *hdr;
1595 kmutex_t *hash_lock;
1596
1597 /*
1598 * Check to see if this buffer is evicted. Callers
1599 * must verify b_data != NULL to know if the add_ref
1600 * was successful.
1601 */
1602 mutex_enter(&buf->b_evict_lock);
1603 if (buf->b_data == NULL) {
1604 mutex_exit(&buf->b_evict_lock);
1605 return;
1606 }
1607 hash_lock = HDR_LOCK(buf->b_hdr);
1608 mutex_enter(hash_lock);
1609 hdr = buf->b_hdr;
1610 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1611 mutex_exit(&buf->b_evict_lock);
1612
1613 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
1614 add_reference(hdr, hash_lock, tag);
1615 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
1616 arc_access(hdr, hash_lock);
1617 mutex_exit(hash_lock);
1618 ARCSTAT_BUMP(arcstat_hits);
1619 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
1620 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
1621 data, metadata, hits);
1622}
1623
1624/*
1625 * Free the arc data buffer. If it is an l2arc write in progress,
1626 * the buffer is placed on l2arc_free_on_write to be freed later.
1627 */
1628static void
1629arc_buf_data_free(arc_buf_t *buf, void (*free_func)(void *, size_t))
1630{
1631 arc_buf_hdr_t *hdr = buf->b_hdr;
1632
1633 if (HDR_L2_WRITING(hdr)) {
1634 l2arc_data_free_t *df;
1635 df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP);
1636 df->l2df_data = buf->b_data;
1637 df->l2df_size = hdr->b_size;
1638 df->l2df_func = free_func;
1639 mutex_enter(&l2arc_free_on_write_mtx);
1640 list_insert_head(l2arc_free_on_write, df);
1641 mutex_exit(&l2arc_free_on_write_mtx);
1642 ARCSTAT_BUMP(arcstat_l2_free_on_write);
1643 } else {
1644 free_func(buf->b_data, hdr->b_size);
1645 }
1646}
1647
1648static void
1649arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t all)
1650{
1651 arc_buf_t **bufp;
1652
1653 /* free up data associated with the buf */
1654 if (buf->b_data) {
1655 arc_state_t *state = buf->b_hdr->b_state;
1656 uint64_t size = buf->b_hdr->b_size;
1657 arc_buf_contents_t type = buf->b_hdr->b_type;
1658
1659 arc_cksum_verify(buf);
1660#ifdef illumos
1661 arc_buf_unwatch(buf);
1662#endif /* illumos */
1663
1664 if (!recycle) {
1665 if (type == ARC_BUFC_METADATA) {
1666 arc_buf_data_free(buf, zio_buf_free);
1667 arc_space_return(size, ARC_SPACE_DATA);
1668 } else {
1669 ASSERT(type == ARC_BUFC_DATA);
1670 arc_buf_data_free(buf, zio_data_buf_free);
1671 ARCSTAT_INCR(arcstat_data_size, -size);
1672 atomic_add_64(&arc_size, -size);
1673 }
1674 }
1675 if (list_link_active(&buf->b_hdr->b_arc_node)) {
1676 uint64_t *cnt = &state->arcs_lsize[type];
1677
1678 ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt));
1679 ASSERT(state != arc_anon);
1680
1681 ASSERT3U(*cnt, >=, size);
1682 atomic_add_64(cnt, -size);
1683 }
1684 ASSERT3U(state->arcs_size, >=, size);
1685 atomic_add_64(&state->arcs_size, -size);
1686 buf->b_data = NULL;
1687
1688 /*
1689 * If we're destroying a duplicate buffer make sure
1690 * that the appropriate statistics are updated.
1691 */
1692 if (buf->b_hdr->b_datacnt > 1 &&
1693 buf->b_hdr->b_type == ARC_BUFC_DATA) {
1694 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
1695 ARCSTAT_INCR(arcstat_duplicate_buffers_size, -size);
1696 }
1697 ASSERT(buf->b_hdr->b_datacnt > 0);
1698 buf->b_hdr->b_datacnt -= 1;
1699 }
1700
1701 /* only remove the buf if requested */
1702 if (!all)
1703 return;
1704
1705 /* remove the buf from the hdr list */
1706 for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next)
1707 continue;
1708 *bufp = buf->b_next;
1709 buf->b_next = NULL;
1710
1711 ASSERT(buf->b_efunc == NULL);
1712
1713 /* clean up the buf */
1714 buf->b_hdr = NULL;
1715 kmem_cache_free(buf_cache, buf);
1716}
1717
1718static void
1719arc_hdr_destroy(arc_buf_hdr_t *hdr)
1720{
1721 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1722 ASSERT3P(hdr->b_state, ==, arc_anon);
1723 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
1724 l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr;
1725
1726 if (l2hdr != NULL) {
1727 boolean_t buflist_held = MUTEX_HELD(&l2arc_buflist_mtx);
1728 /*
1729 * To prevent arc_free() and l2arc_evict() from
1730 * attempting to free the same buffer at the same time,
1731 * a FREE_IN_PROGRESS flag is given to arc_free() to
1732 * give it priority. l2arc_evict() can't destroy this
1733 * header while we are waiting on l2arc_buflist_mtx.
1734 *
1735 * The hdr may be removed from l2ad_buflist before we
1736 * grab l2arc_buflist_mtx, so b_l2hdr is rechecked.
1737 */
1738 if (!buflist_held) {
1739 mutex_enter(&l2arc_buflist_mtx);
1740 l2hdr = hdr->b_l2hdr;
1741 }
1742
1743 if (l2hdr != NULL) {
1744 trim_map_free(l2hdr->b_dev->l2ad_vdev, l2hdr->b_daddr,
1745 hdr->b_size, 0);
1746 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
1747 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
1748 ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
1749 vdev_space_update(l2hdr->b_dev->l2ad_vdev,
1750 -l2hdr->b_asize, 0, 0);
1751 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
1752 if (hdr->b_state == arc_l2c_only)
1753 l2arc_hdr_stat_remove();
1754 hdr->b_l2hdr = NULL;
1755 }
1756
1757 if (!buflist_held)
1758 mutex_exit(&l2arc_buflist_mtx);
1759 }
1760
1761 if (!BUF_EMPTY(hdr)) {
1762 ASSERT(!HDR_IN_HASH_TABLE(hdr));
1763 buf_discard_identity(hdr);
1764 }
1765 while (hdr->b_buf) {
1766 arc_buf_t *buf = hdr->b_buf;
1767
1768 if (buf->b_efunc) {
1769 mutex_enter(&arc_eviction_mtx);
1770 mutex_enter(&buf->b_evict_lock);
1771 ASSERT(buf->b_hdr != NULL);
1772 arc_buf_destroy(hdr->b_buf, FALSE, FALSE);
1773 hdr->b_buf = buf->b_next;
1774 buf->b_hdr = &arc_eviction_hdr;
1775 buf->b_next = arc_eviction_list;
1776 arc_eviction_list = buf;
1777 mutex_exit(&buf->b_evict_lock);
1778 mutex_exit(&arc_eviction_mtx);
1779 } else {
1780 arc_buf_destroy(hdr->b_buf, FALSE, TRUE);
1781 }
1782 }
1783 if (hdr->b_freeze_cksum != NULL) {
1784 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1785 hdr->b_freeze_cksum = NULL;
1786 }
1787 if (hdr->b_thawed) {
1788 kmem_free(hdr->b_thawed, 1);
1789 hdr->b_thawed = NULL;
1790 }
1791
1792 ASSERT(!list_link_active(&hdr->b_arc_node));
1793 ASSERT3P(hdr->b_hash_next, ==, NULL);
1794 ASSERT3P(hdr->b_acb, ==, NULL);
1795 kmem_cache_free(hdr_cache, hdr);
1796}
1797
1798void
1799arc_buf_free(arc_buf_t *buf, void *tag)
1800{
1801 arc_buf_hdr_t *hdr = buf->b_hdr;
1802 int hashed = hdr->b_state != arc_anon;
1803
1804 ASSERT(buf->b_efunc == NULL);
1805 ASSERT(buf->b_data != NULL);
1806
1807 if (hashed) {
1808 kmutex_t *hash_lock = HDR_LOCK(hdr);
1809
1810 mutex_enter(hash_lock);
1811 hdr = buf->b_hdr;
1812 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1813
1814 (void) remove_reference(hdr, hash_lock, tag);
1815 if (hdr->b_datacnt > 1) {
1816 arc_buf_destroy(buf, FALSE, TRUE);
1817 } else {
1818 ASSERT(buf == hdr->b_buf);
1819 ASSERT(buf->b_efunc == NULL);
1820 hdr->b_flags |= ARC_BUF_AVAILABLE;
1821 }
1822 mutex_exit(hash_lock);
1823 } else if (HDR_IO_IN_PROGRESS(hdr)) {
1824 int destroy_hdr;
1825 /*
1826 * We are in the middle of an async write. Don't destroy
1827 * this buffer unless the write completes before we finish
1828 * decrementing the reference count.
1829 */
1830 mutex_enter(&arc_eviction_mtx);
1831 (void) remove_reference(hdr, NULL, tag);
1832 ASSERT(refcount_is_zero(&hdr->b_refcnt));
1833 destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
1834 mutex_exit(&arc_eviction_mtx);
1835 if (destroy_hdr)
1836 arc_hdr_destroy(hdr);
1837 } else {
1838 if (remove_reference(hdr, NULL, tag) > 0)
1839 arc_buf_destroy(buf, FALSE, TRUE);
1840 else
1841 arc_hdr_destroy(hdr);
1842 }
1843}
1844
1845boolean_t
1846arc_buf_remove_ref(arc_buf_t *buf, void* tag)
1847{
1848 arc_buf_hdr_t *hdr = buf->b_hdr;
1849 kmutex_t *hash_lock = HDR_LOCK(hdr);
1850 boolean_t no_callback = (buf->b_efunc == NULL);
1851
1852 if (hdr->b_state == arc_anon) {
1853 ASSERT(hdr->b_datacnt == 1);
1854 arc_buf_free(buf, tag);
1855 return (no_callback);
1856 }
1857
1858 mutex_enter(hash_lock);
1859 hdr = buf->b_hdr;
1860 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1861 ASSERT(hdr->b_state != arc_anon);
1862 ASSERT(buf->b_data != NULL);
1863
1864 (void) remove_reference(hdr, hash_lock, tag);
1865 if (hdr->b_datacnt > 1) {
1866 if (no_callback)
1867 arc_buf_destroy(buf, FALSE, TRUE);
1868 } else if (no_callback) {
1869 ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
1870 ASSERT(buf->b_efunc == NULL);
1871 hdr->b_flags |= ARC_BUF_AVAILABLE;
1872 }
1873 ASSERT(no_callback || hdr->b_datacnt > 1 ||
1874 refcount_is_zero(&hdr->b_refcnt));
1875 mutex_exit(hash_lock);
1876 return (no_callback);
1877}
1878
1879int
1880arc_buf_size(arc_buf_t *buf)
1881{
1882 return (buf->b_hdr->b_size);
1883}
1884
1885/*
1886 * Called from the DMU to determine if the current buffer should be
1887 * evicted. In order to ensure proper locking, the eviction must be initiated
1888 * from the DMU. Return true if the buffer is associated with user data and
1889 * duplicate buffers still exist.
1890 */
1891boolean_t
1892arc_buf_eviction_needed(arc_buf_t *buf)
1893{
1894 arc_buf_hdr_t *hdr;
1895 boolean_t evict_needed = B_FALSE;
1896
1897 if (zfs_disable_dup_eviction)
1898 return (B_FALSE);
1899
1900 mutex_enter(&buf->b_evict_lock);
1901 hdr = buf->b_hdr;
1902 if (hdr == NULL) {
1903 /*
1904 * We are in arc_do_user_evicts(); let that function
1905 * perform the eviction.
1906 */
1907 ASSERT(buf->b_data == NULL);
1908 mutex_exit(&buf->b_evict_lock);
1909 return (B_FALSE);
1910 } else if (buf->b_data == NULL) {
1911 /*
1912 * We have already been added to the arc eviction list;
1913 * recommend eviction.
1914 */
1915 ASSERT3P(hdr, ==, &arc_eviction_hdr);
1916 mutex_exit(&buf->b_evict_lock);
1917 return (B_TRUE);
1918 }
1919
1920 if (hdr->b_datacnt > 1 && hdr->b_type == ARC_BUFC_DATA)
1921 evict_needed = B_TRUE;
1922
1923 mutex_exit(&buf->b_evict_lock);
1924 return (evict_needed);
1925}
1926
1927/*
1928 * Evict buffers from list until we've removed the specified number of
1929 * bytes. Move the removed buffers to the appropriate evict state.
1930 * If the recycle flag is set, then attempt to "recycle" a buffer:
1931 * - look for a buffer to evict that is `bytes' long.
1932 * - return the data block from this buffer rather than freeing it.
1933 * This flag is used by callers that are trying to make space for a
1934 * new buffer in a full arc cache.
1935 *
1936 * This function makes a "best effort". It skips over any buffers
1937 * it can't get a hash_lock on, and so may not catch all candidates.
1938 * It may also return without evicting as much space as requested.
1939 */
1940static void *
1941arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle,
1942 arc_buf_contents_t type)
1943{
1944 arc_state_t *evicted_state;
1945 uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
1946 int64_t bytes_remaining;
1947 arc_buf_hdr_t *ab, *ab_prev = NULL;
1948 list_t *evicted_list, *list, *evicted_list_start, *list_start;
1949 kmutex_t *lock, *evicted_lock;
1950 kmutex_t *hash_lock;
1951 boolean_t have_lock;
1952 void *stolen = NULL;
1953 arc_buf_hdr_t marker = { 0 };
1954 int count = 0;
1955 static int evict_metadata_offset, evict_data_offset;
1956 int i, idx, offset, list_count, lists;
1957
1958 ASSERT(state == arc_mru || state == arc_mfu);
1959
1960 evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
1961
1962 if (type == ARC_BUFC_METADATA) {
1963 offset = 0;
1964 list_count = ARC_BUFC_NUMMETADATALISTS;
1965 list_start = &state->arcs_lists[0];
1966 evicted_list_start = &evicted_state->arcs_lists[0];
1967 idx = evict_metadata_offset;
1968 } else {
1969 offset = ARC_BUFC_NUMMETADATALISTS;
1970 list_start = &state->arcs_lists[offset];
1971 evicted_list_start = &evicted_state->arcs_lists[offset];
1972 list_count = ARC_BUFC_NUMDATALISTS;
1973 idx = evict_data_offset;
1974 }
1975 bytes_remaining = evicted_state->arcs_lsize[type];
1976 lists = 0;
1977
1978evict_start:
1979 list = &list_start[idx];
1980 evicted_list = &evicted_list_start[idx];
1981 lock = ARCS_LOCK(state, (offset + idx));
1982 evicted_lock = ARCS_LOCK(evicted_state, (offset + idx));
1983
1984 mutex_enter(lock);
1985 mutex_enter(evicted_lock);
1986
1987 for (ab = list_tail(list); ab; ab = ab_prev) {
1988 ab_prev = list_prev(list, ab);
1989 bytes_remaining -= (ab->b_size * ab->b_datacnt);
1990 /* prefetch buffers have a minimum lifespan */
1991 if (HDR_IO_IN_PROGRESS(ab) ||
1992 (spa && ab->b_spa != spa) ||
1993 (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
1994 ddi_get_lbolt() - ab->b_arc_access <
1995 arc_min_prefetch_lifespan)) {
1996 skipped++;
1997 continue;
1998 }
1999 /* "lookahead" for better eviction candidate */
2000 if (recycle && ab->b_size != bytes &&
2001 ab_prev && ab_prev->b_size == bytes)
2002 continue;
2003
2004 /* ignore markers */
2005 if (ab->b_spa == 0)
2006 continue;
2007
2008 /*
2009 * It may take a long time to evict all the bufs requested.
2010 * To avoid blocking all arc activity, periodically drop
2011 * the arcs_mtx and give other threads a chance to run
2012 * before reacquiring the lock.
2013 *
2014 * If we are looking for a buffer to recycle, we are in
2015 * the hot code path, so don't sleep.
2016 */
2017 if (!recycle && count++ > arc_evict_iterations) {
2018 list_insert_after(list, ab, &marker);
2019 mutex_exit(evicted_lock);
2020 mutex_exit(lock);
2021 kpreempt(KPREEMPT_SYNC);
2022 mutex_enter(lock);
2023 mutex_enter(evicted_lock);
2024 ab_prev = list_prev(list, &marker);
2025 list_remove(list, &marker);
2026 count = 0;
2027 continue;
2028 }
2029
2030 hash_lock = HDR_LOCK(ab);
2031 have_lock = MUTEX_HELD(hash_lock);
2032 if (have_lock || mutex_tryenter(hash_lock)) {
2033 ASSERT0(refcount_count(&ab->b_refcnt));
2034 ASSERT(ab->b_datacnt > 0);
2035 while (ab->b_buf) {
2036 arc_buf_t *buf = ab->b_buf;
2037 if (!mutex_tryenter(&buf->b_evict_lock)) {
2038 missed += 1;
2039 break;
2040 }
2041 if (buf->b_data) {
2042 bytes_evicted += ab->b_size;
2043 if (recycle && ab->b_type == type &&
2044 ab->b_size == bytes &&
2045 !HDR_L2_WRITING(ab)) {
2046 stolen = buf->b_data;
2047 recycle = FALSE;
2048 }
2049 }
2050 if (buf->b_efunc) {
2051 mutex_enter(&arc_eviction_mtx);
2052 arc_buf_destroy(buf,
2053 buf->b_data == stolen, FALSE);
2054 ab->b_buf = buf->b_next;
2055 buf->b_hdr = &arc_eviction_hdr;
2056 buf->b_next = arc_eviction_list;
2057 arc_eviction_list = buf;
2058 mutex_exit(&arc_eviction_mtx);
2059 mutex_exit(&buf->b_evict_lock);
2060 } else {
2061 mutex_exit(&buf->b_evict_lock);
2062 arc_buf_destroy(buf,
2063 buf->b_data == stolen, TRUE);
2064 }
2065 }
2066
2067 if (ab->b_l2hdr) {
2068 ARCSTAT_INCR(arcstat_evict_l2_cached,
2069 ab->b_size);
2070 } else {
2071 if (l2arc_write_eligible(ab->b_spa, ab)) {
2072 ARCSTAT_INCR(arcstat_evict_l2_eligible,
2073 ab->b_size);
2074 } else {
2075 ARCSTAT_INCR(
2076 arcstat_evict_l2_ineligible,
2077 ab->b_size);
2078 }
2079 }
2080
2081 if (ab->b_datacnt == 0) {
2082 arc_change_state(evicted_state, ab, hash_lock);
2083 ASSERT(HDR_IN_HASH_TABLE(ab));
2084 ab->b_flags |= ARC_IN_HASH_TABLE;
2085 ab->b_flags &= ~ARC_BUF_AVAILABLE;
2086 DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
2087 }
2088 if (!have_lock)
2089 mutex_exit(hash_lock);
2090 if (bytes >= 0 && bytes_evicted >= bytes)
2091 break;
2092 if (bytes_remaining > 0) {
2093 mutex_exit(evicted_lock);
2094 mutex_exit(lock);
2095 idx = ((idx + 1) & (list_count - 1));
2096 lists++;
2097 goto evict_start;
2098 }
2099 } else {
2100 missed += 1;
2101 }
2102 }
2103
2104 mutex_exit(evicted_lock);
2105 mutex_exit(lock);
2106
2107 idx = ((idx + 1) & (list_count - 1));
2108 lists++;
2109
2110 if (bytes_evicted < bytes) {
2111 if (lists < list_count)
2112 goto evict_start;
2113 else
2114 dprintf("only evicted %lld bytes from %x",
2115 (longlong_t)bytes_evicted, state);
2116 }
2117 if (type == ARC_BUFC_METADATA)
2118 evict_metadata_offset = idx;
2119 else
2120 evict_data_offset = idx;
2121
2122 if (skipped)
2123 ARCSTAT_INCR(arcstat_evict_skip, skipped);
2124
2125 if (missed)
2126 ARCSTAT_INCR(arcstat_mutex_miss, missed);
2127
2128 /*
2129 * Note: we have just evicted some data into the ghost state,
2130 * potentially putting the ghost size over the desired size. Rather
2131 * that evicting from the ghost list in this hot code path, leave
2132 * this chore to the arc_reclaim_thread().
2133 */
2134
2135 if (stolen)
2136 ARCSTAT_BUMP(arcstat_stolen);
2137 return (stolen);
2138}
2139
2140/*
2141 * Remove buffers from list until we've removed the specified number of
2142 * bytes. Destroy the buffers that are removed.
2143 */
2144static void
2145arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes)
2146{
2147 arc_buf_hdr_t *ab, *ab_prev;
2148 arc_buf_hdr_t marker = { 0 };
2149 list_t *list, *list_start;
2150 kmutex_t *hash_lock, *lock;
2151 uint64_t bytes_deleted = 0;
2152 uint64_t bufs_skipped = 0;
2153 int count = 0;
2154 static int evict_offset;
2155 int list_count, idx = evict_offset;
2156 int offset, lists = 0;
2157
2158 ASSERT(GHOST_STATE(state));
2159
2160 /*
2161 * data lists come after metadata lists
2162 */
2163 list_start = &state->arcs_lists[ARC_BUFC_NUMMETADATALISTS];
2164 list_count = ARC_BUFC_NUMDATALISTS;
2165 offset = ARC_BUFC_NUMMETADATALISTS;
2166
2167evict_start:
2168 list = &list_start[idx];
2169 lock = ARCS_LOCK(state, idx + offset);
2170
2171 mutex_enter(lock);
2172 for (ab = list_tail(list); ab; ab = ab_prev) {
2173 ab_prev = list_prev(list, ab);
2174 if (ab->b_type > ARC_BUFC_NUMTYPES)
2175 panic("invalid ab=%p", (void *)ab);
2176 if (spa && ab->b_spa != spa)
2177 continue;
2178
2179 /* ignore markers */
2180 if (ab->b_spa == 0)
2181 continue;
2182
2183 hash_lock = HDR_LOCK(ab);
2184 /* caller may be trying to modify this buffer, skip it */
2185 if (MUTEX_HELD(hash_lock))
2186 continue;
2187
2188 /*
2189 * It may take a long time to evict all the bufs requested.
2190 * To avoid blocking all arc activity, periodically drop
2191 * the arcs_mtx and give other threads a chance to run
2192 * before reacquiring the lock.
2193 */
2194 if (count++ > arc_evict_iterations) {
2195 list_insert_after(list, ab, &marker);
2196 mutex_exit(lock);
2197 kpreempt(KPREEMPT_SYNC);
2198 mutex_enter(lock);
2199 ab_prev = list_prev(list, &marker);
2200 list_remove(list, &marker);
2201 count = 0;
2202 continue;
2203 }
2204 if (mutex_tryenter(hash_lock)) {
2205 ASSERT(!HDR_IO_IN_PROGRESS(ab));
2206 ASSERT(ab->b_buf == NULL);
2207 ARCSTAT_BUMP(arcstat_deleted);
2208 bytes_deleted += ab->b_size;
2209
2210 if (ab->b_l2hdr != NULL) {
2211 /*
2212 * This buffer is cached on the 2nd Level ARC;
2213 * don't destroy the header.
2214 */
2215 arc_change_state(arc_l2c_only, ab, hash_lock);
2216 mutex_exit(hash_lock);
2217 } else {
2218 arc_change_state(arc_anon, ab, hash_lock);
2219 mutex_exit(hash_lock);
2220 arc_hdr_destroy(ab);
2221 }
2222
2223 DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
2224 if (bytes >= 0 && bytes_deleted >= bytes)
2225 break;
2226 } else if (bytes < 0) {
2227 /*
2228 * Insert a list marker and then wait for the
2229 * hash lock to become available. Once its
2230 * available, restart from where we left off.
2231 */
2232 list_insert_after(list, ab, &marker);
2233 mutex_exit(lock);
2234 mutex_enter(hash_lock);
2235 mutex_exit(hash_lock);
2236 mutex_enter(lock);
2237 ab_prev = list_prev(list, &marker);
2238 list_remove(list, &marker);
2239 } else {
2240 bufs_skipped += 1;
2241 }
2242
2243 }
2244 mutex_exit(lock);
2245 idx = ((idx + 1) & (ARC_BUFC_NUMDATALISTS - 1));
2246 lists++;
2247
2248 if (lists < list_count)
2249 goto evict_start;
2250
2251 evict_offset = idx;
2252 if ((uintptr_t)list > (uintptr_t)&state->arcs_lists[ARC_BUFC_NUMMETADATALISTS] &&
2253 (bytes < 0 || bytes_deleted < bytes)) {
2254 list_start = &state->arcs_lists[0];
2255 list_count = ARC_BUFC_NUMMETADATALISTS;
2256 offset = lists = 0;
2257 goto evict_start;
2258 }
2259
2260 if (bufs_skipped) {
2261 ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped);
2262 ASSERT(bytes >= 0);
2263 }
2264
2265 if (bytes_deleted < bytes)
2266 dprintf("only deleted %lld bytes from %p",
2267 (longlong_t)bytes_deleted, state);
2268}
2269
2270static void
2271arc_adjust(void)
2272{
2273 int64_t adjustment, delta;
2274
2275 /*
2276 * Adjust MRU size
2277 */
2278
2279 adjustment = MIN((int64_t)(arc_size - arc_c),
2280 (int64_t)(arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used -
2281 arc_p));
2282
2283 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) {
2284 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment);
2285 (void) arc_evict(arc_mru, 0, delta, FALSE, ARC_BUFC_DATA);
2286 adjustment -= delta;
2287 }
2288
2289 if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) {
2290 delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment);
2291 (void) arc_evict(arc_mru, 0, delta, FALSE,
2292 ARC_BUFC_METADATA);
2293 }
2294
2295 /*
2296 * Adjust MFU size
2297 */
2298
2299 adjustment = arc_size - arc_c;
2300
2301 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) {
2302 delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]);
2303 (void) arc_evict(arc_mfu, 0, delta, FALSE, ARC_BUFC_DATA);
2304 adjustment -= delta;
2305 }
2306
2307 if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) {
2308 int64_t delta = MIN(adjustment,
2309 arc_mfu->arcs_lsize[ARC_BUFC_METADATA]);
2310 (void) arc_evict(arc_mfu, 0, delta, FALSE,
2311 ARC_BUFC_METADATA);
2312 }
2313
2314 /*
2315 * Adjust ghost lists
2316 */
2317
2318 adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c;
2319
2320 if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) {
2321 delta = MIN(arc_mru_ghost->arcs_size, adjustment);
2322 arc_evict_ghost(arc_mru_ghost, 0, delta);
2323 }
2324
2325 adjustment =
2326 arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c;
2327
2328 if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) {
2329 delta = MIN(arc_mfu_ghost->arcs_size, adjustment);
2330 arc_evict_ghost(arc_mfu_ghost, 0, delta);
2331 }
2332}
2333
2334static void
2335arc_do_user_evicts(void)
2336{
2337 static arc_buf_t *tmp_arc_eviction_list;
2338
2339 /*
2340 * Move list over to avoid LOR
2341 */
2342restart:
2343 mutex_enter(&arc_eviction_mtx);
2344 tmp_arc_eviction_list = arc_eviction_list;
2345 arc_eviction_list = NULL;
2346 mutex_exit(&arc_eviction_mtx);
2347
2348 while (tmp_arc_eviction_list != NULL) {
2349 arc_buf_t *buf = tmp_arc_eviction_list;
2350 tmp_arc_eviction_list = buf->b_next;
2351 mutex_enter(&buf->b_evict_lock);
2352 buf->b_hdr = NULL;
2353 mutex_exit(&buf->b_evict_lock);
2354
2355 if (buf->b_efunc != NULL)
2356 VERIFY(buf->b_efunc(buf) == 0);
2357
2358 buf->b_efunc = NULL;
2359 buf->b_private = NULL;
2360 kmem_cache_free(buf_cache, buf);
2361 }
2362
2363 if (arc_eviction_list != NULL)
2364 goto restart;
2365}
2366
2367/*
2368 * Flush all *evictable* data from the cache for the given spa.
2369 * NOTE: this will not touch "active" (i.e. referenced) data.
2370 */
2371void
2372arc_flush(spa_t *spa)
2373{
2374 uint64_t guid = 0;
2375
2376 if (spa)
2377 guid = spa_load_guid(spa);
2378
2379 while (arc_mru->arcs_lsize[ARC_BUFC_DATA]) {
2380 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA);
2381 if (spa)
2382 break;
2383 }
2384 while (arc_mru->arcs_lsize[ARC_BUFC_METADATA]) {
2385 (void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA);
2386 if (spa)
2387 break;
2388 }
2389 while (arc_mfu->arcs_lsize[ARC_BUFC_DATA]) {
2390 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA);
2391 if (spa)
2392 break;
2393 }
2394 while (arc_mfu->arcs_lsize[ARC_BUFC_METADATA]) {
2395 (void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA);
2396 if (spa)
2397 break;
2398 }
2399
2400 arc_evict_ghost(arc_mru_ghost, guid, -1);
2401 arc_evict_ghost(arc_mfu_ghost, guid, -1);
2402
2403 mutex_enter(&arc_reclaim_thr_lock);
2404 arc_do_user_evicts();
2405 mutex_exit(&arc_reclaim_thr_lock);
2406 ASSERT(spa || arc_eviction_list == NULL);
2407}
2408
2409void
2410arc_shrink(void)
2411{
2412 if (arc_c > arc_c_min) {
2413 uint64_t to_free;
2414
2415#ifdef _KERNEL
2416 to_free = arc_c >> arc_shrink_shift;
2417#else
2418 to_free = arc_c >> arc_shrink_shift;
2419#endif
2420 if (arc_c > arc_c_min + to_free)
2421 atomic_add_64(&arc_c, -to_free);
2422 else
2423 arc_c = arc_c_min;
2424
2425 atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
2426 if (arc_c > arc_size)
2427 arc_c = MAX(arc_size, arc_c_min);
2428 if (arc_p > arc_c)
2429 arc_p = (arc_c >> 1);
2430 ASSERT(arc_c >= arc_c_min);
2431 ASSERT((int64_t)arc_p >= 0);
2432 }
2433
2434 if (arc_size > arc_c)
2435 arc_adjust();
2436}
2437
2438static int needfree = 0;
2439
2440static int
2441arc_reclaim_needed(void)
2442{
2443
2444#ifdef _KERNEL
2445
2446 if (needfree)
2447 return (1);
2448
2449 /*
2450 * Cooperate with pagedaemon when it's time for it to scan
2451 * and reclaim some pages.
2452 */
2453 if (vm_paging_needed())
2454 return (1);
2455
2456#ifdef sun
2457 /*
2458 * take 'desfree' extra pages, so we reclaim sooner, rather than later
2459 */
2460 extra = desfree;
2461
2462 /*
2463 * check that we're out of range of the pageout scanner. It starts to
2464 * schedule paging if freemem is less than lotsfree and needfree.
2465 * lotsfree is the high-water mark for pageout, and needfree is the
2466 * number of needed free pages. We add extra pages here to make sure
2467 * the scanner doesn't start up while we're freeing memory.
2468 */
2469 if (freemem < lotsfree + needfree + extra)
2470 return (1);
2471
2472 /*
2473 * check to make sure that swapfs has enough space so that anon
2474 * reservations can still succeed. anon_resvmem() checks that the
2475 * availrmem is greater than swapfs_minfree, and the number of reserved
2476 * swap pages. We also add a bit of extra here just to prevent
2477 * circumstances from getting really dire.
2478 */
2479 if (availrmem < swapfs_minfree + swapfs_reserve + extra)
2480 return (1);
2481
2482#if defined(__i386)
2483 /*
2484 * If we're on an i386 platform, it's possible that we'll exhaust the
2485 * kernel heap space before we ever run out of available physical
2486 * memory. Most checks of the size of the heap_area compare against
2487 * tune.t_minarmem, which is the minimum available real memory that we
2488 * can have in the system. However, this is generally fixed at 25 pages
2489 * which is so low that it's useless. In this comparison, we seek to
2490 * calculate the total heap-size, and reclaim if more than 3/4ths of the
2491 * heap is allocated. (Or, in the calculation, if less than 1/4th is
2492 * free)
2493 */
2494 if (btop(vmem_size(heap_arena, VMEM_FREE)) <
2495 (btop(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2))
2496 return (1);
2497#endif
2498#else /* !sun */
2499 if (kmem_used() > (kmem_size() * 3) / 4)
2500 return (1);
2501#endif /* sun */
2502
2503#else
2504 if (spa_get_random(100) == 0)
2505 return (1);
2506#endif
2507 return (0);
2508}
2509
2510extern kmem_cache_t *zio_buf_cache[];
2511extern kmem_cache_t *zio_data_buf_cache[];
2512
2513static void
2514arc_kmem_reap_now(arc_reclaim_strategy_t strat)
2515{
2516 size_t i;
2517 kmem_cache_t *prev_cache = NULL;
2518 kmem_cache_t *prev_data_cache = NULL;
2519
2520#ifdef _KERNEL
2521 if (arc_meta_used >= arc_meta_limit) {
2522 /*
2523 * We are exceeding our meta-data cache limit.
2524 * Purge some DNLC entries to release holds on meta-data.
2525 */
2526 dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
2527 }
2528#if defined(__i386)
2529 /*
2530 * Reclaim unused memory from all kmem caches.
2531 */
2532 kmem_reap();
2533#endif
2534#endif
2535
2536 /*
2537 * An aggressive reclamation will shrink the cache size as well as
2538 * reap free buffers from the arc kmem caches.
2539 */
2540 if (strat == ARC_RECLAIM_AGGR)
2541 arc_shrink();
2542
2543 for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
2544 if (zio_buf_cache[i] != prev_cache) {
2545 prev_cache = zio_buf_cache[i];
2546 kmem_cache_reap_now(zio_buf_cache[i]);
2547 }
2548 if (zio_data_buf_cache[i] != prev_data_cache) {
2549 prev_data_cache = zio_data_buf_cache[i];
2550 kmem_cache_reap_now(zio_data_buf_cache[i]);
2551 }
2552 }
2553 kmem_cache_reap_now(buf_cache);
2554 kmem_cache_reap_now(hdr_cache);
2555}
2556
2557static void
2558arc_reclaim_thread(void *dummy __unused)
2559{
2560 clock_t growtime = 0;
2561 arc_reclaim_strategy_t last_reclaim = ARC_RECLAIM_CONS;
2562 callb_cpr_t cpr;
2563
2564 CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
2565
2566 mutex_enter(&arc_reclaim_thr_lock);
2567 while (arc_thread_exit == 0) {
2568 if (arc_reclaim_needed()) {
2569
2570 if (arc_no_grow) {
2571 if (last_reclaim == ARC_RECLAIM_CONS) {
2572 last_reclaim = ARC_RECLAIM_AGGR;
2573 } else {
2574 last_reclaim = ARC_RECLAIM_CONS;
2575 }
2576 } else {
2577 arc_no_grow = TRUE;
2578 last_reclaim = ARC_RECLAIM_AGGR;
2579 membar_producer();
2580 }
2581
2582 /* reset the growth delay for every reclaim */
2583 growtime = ddi_get_lbolt() + (arc_grow_retry * hz);
2584
2585 if (needfree && last_reclaim == ARC_RECLAIM_CONS) {
2586 /*
2587 * If needfree is TRUE our vm_lowmem hook
2588 * was called and in that case we must free some
2589 * memory, so switch to aggressive mode.
2590 */
2591 arc_no_grow = TRUE;
2592 last_reclaim = ARC_RECLAIM_AGGR;
2593 }
2594 arc_kmem_reap_now(last_reclaim);
2595 arc_warm = B_TRUE;
2596
2597 } else if (arc_no_grow && ddi_get_lbolt() >= growtime) {
2598 arc_no_grow = FALSE;
2599 }
2600
2601 arc_adjust();
2602
2603 if (arc_eviction_list != NULL)
2604 arc_do_user_evicts();
2605
2606#ifdef _KERNEL
2607 if (needfree) {
2608 needfree = 0;
2609 wakeup(&needfree);
2610 }
2611#endif
2612
2613 /* block until needed, or one second, whichever is shorter */
2614 CALLB_CPR_SAFE_BEGIN(&cpr);
2615 (void) cv_timedwait(&arc_reclaim_thr_cv,
2616 &arc_reclaim_thr_lock, hz);
2617 CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
2618 }
2619
2620 arc_thread_exit = 0;
2621 cv_broadcast(&arc_reclaim_thr_cv);
2622 CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_thr_lock */
2623 thread_exit();
2624}
2625
2626/*
2627 * Adapt arc info given the number of bytes we are trying to add and
2628 * the state that we are comming from. This function is only called
2629 * when we are adding new content to the cache.
2630 */
2631static void
2632arc_adapt(int bytes, arc_state_t *state)
2633{
2634 int mult;
2635 uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
2636
2637 if (state == arc_l2c_only)
2638 return;
2639
2640 ASSERT(bytes > 0);
2641 /*
2642 * Adapt the target size of the MRU list:
2643 * - if we just hit in the MRU ghost list, then increase
2644 * the target size of the MRU list.
2645 * - if we just hit in the MFU ghost list, then increase
2646 * the target size of the MFU list by decreasing the
2647 * target size of the MRU list.
2648 */
2649 if (state == arc_mru_ghost) {
2650 mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ?
2651 1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size));
2652 mult = MIN(mult, 10); /* avoid wild arc_p adjustment */
2653
2654 arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult);
2655 } else if (state == arc_mfu_ghost) {
2656 uint64_t delta;
2657
2658 mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ?
2659 1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size));
2660 mult = MIN(mult, 10);
2661
2662 delta = MIN(bytes * mult, arc_p);
2663 arc_p = MAX(arc_p_min, arc_p - delta);
2664 }
2665 ASSERT((int64_t)arc_p >= 0);
2666
2667 if (arc_reclaim_needed()) {
2668 cv_signal(&arc_reclaim_thr_cv);
2669 return;
2670 }
2671
2672 if (arc_no_grow)
2673 return;
2674
2675 if (arc_c >= arc_c_max)
2676 return;
2677
2678 /*
2679 * If we're within (2 * maxblocksize) bytes of the target
2680 * cache size, increment the target cache size
2681 */
2682 if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
2683 atomic_add_64(&arc_c, (int64_t)bytes);
2684 if (arc_c > arc_c_max)
2685 arc_c = arc_c_max;
2686 else if (state == arc_anon)
2687 atomic_add_64(&arc_p, (int64_t)bytes);
2688 if (arc_p > arc_c)
2689 arc_p = arc_c;
2690 }
2691 ASSERT((int64_t)arc_p >= 0);
2692}
2693
2694/*
2695 * Check if the cache has reached its limits and eviction is required
2696 * prior to insert.
2697 */
2698static int
2699arc_evict_needed(arc_buf_contents_t type)
2700{
2701 if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit)
2702 return (1);
2703
2704#ifdef sun
2705#ifdef _KERNEL
2706 /*
2707 * If zio data pages are being allocated out of a separate heap segment,
2708 * then enforce that the size of available vmem for this area remains
2709 * above about 1/32nd free.
2710 */
2711 if (type == ARC_BUFC_DATA && zio_arena != NULL &&
2712 vmem_size(zio_arena, VMEM_FREE) <
2713 (vmem_size(zio_arena, VMEM_ALLOC) >> 5))
2714 return (1);
2715#endif
2716#endif /* sun */
2717
2718 if (arc_reclaim_needed())
2719 return (1);
2720
2721 return (arc_size > arc_c);
2722}
2723
2724/*
2725 * The buffer, supplied as the first argument, needs a data block.
2726 * So, if we are at cache max, determine which cache should be victimized.
2727 * We have the following cases:
2728 *
2729 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) ->
2730 * In this situation if we're out of space, but the resident size of the MFU is
2731 * under the limit, victimize the MFU cache to satisfy this insertion request.
2732 *
2733 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) ->
2734 * Here, we've used up all of the available space for the MRU, so we need to
2735 * evict from our own cache instead. Evict from the set of resident MRU
2736 * entries.
2737 *
2738 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) ->
2739 * c minus p represents the MFU space in the cache, since p is the size of the
2740 * cache that is dedicated to the MRU. In this situation there's still space on
2741 * the MFU side, so the MRU side needs to be victimized.
2742 *
2743 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) ->
2744 * MFU's resident set is consuming more space than it has been allotted. In
2745 * this situation, we must victimize our own cache, the MFU, for this insertion.
2746 */
2747static void
2748arc_get_data_buf(arc_buf_t *buf)
2749{
2750 arc_state_t *state = buf->b_hdr->b_state;
2751 uint64_t size = buf->b_hdr->b_size;
2752 arc_buf_contents_t type = buf->b_hdr->b_type;
2753
2754 arc_adapt(size, state);
2755
2756 /*
2757 * We have not yet reached cache maximum size,
2758 * just allocate a new buffer.
2759 */
2760 if (!arc_evict_needed(type)) {
2761 if (type == ARC_BUFC_METADATA) {
2762 buf->b_data = zio_buf_alloc(size);
2763 arc_space_consume(size, ARC_SPACE_DATA);
2764 } else {
2765 ASSERT(type == ARC_BUFC_DATA);
2766 buf->b_data = zio_data_buf_alloc(size);
2767 ARCSTAT_INCR(arcstat_data_size, size);
2768 atomic_add_64(&arc_size, size);
2769 }
2770 goto out;
2771 }
2772
2773 /*
2774 * If we are prefetching from the mfu ghost list, this buffer
2775 * will end up on the mru list; so steal space from there.
2776 */
2777 if (state == arc_mfu_ghost)
2778 state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu;
2779 else if (state == arc_mru_ghost)
2780 state = arc_mru;
2781
2782 if (state == arc_mru || state == arc_anon) {
2783 uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size;
2784 state = (arc_mfu->arcs_lsize[type] >= size &&
2785 arc_p > mru_used) ? arc_mfu : arc_mru;
2786 } else {
2787 /* MFU cases */
2788 uint64_t mfu_space = arc_c - arc_p;
2789 state = (arc_mru->arcs_lsize[type] >= size &&
2790 mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu;
2791 }
2792 if ((buf->b_data = arc_evict(state, 0, size, TRUE, type)) == NULL) {
2793 if (type == ARC_BUFC_METADATA) {
2794 buf->b_data = zio_buf_alloc(size);
2795 arc_space_consume(size, ARC_SPACE_DATA);
2796 } else {
2797 ASSERT(type == ARC_BUFC_DATA);
2798 buf->b_data = zio_data_buf_alloc(size);
2799 ARCSTAT_INCR(arcstat_data_size, size);
2800 atomic_add_64(&arc_size, size);
2801 }
2802 ARCSTAT_BUMP(arcstat_recycle_miss);
2803 }
2804 ASSERT(buf->b_data != NULL);
2805out:
2806 /*
2807 * Update the state size. Note that ghost states have a
2808 * "ghost size" and so don't need to be updated.
2809 */
2810 if (!GHOST_STATE(buf->b_hdr->b_state)) {
2811 arc_buf_hdr_t *hdr = buf->b_hdr;
2812
2813 atomic_add_64(&hdr->b_state->arcs_size, size);
2814 if (list_link_active(&hdr->b_arc_node)) {
2815 ASSERT(refcount_is_zero(&hdr->b_refcnt));
2816 atomic_add_64(&hdr->b_state->arcs_lsize[type], size);
2817 }
2818 /*
2819 * If we are growing the cache, and we are adding anonymous
2820 * data, and we have outgrown arc_p, update arc_p
2821 */
2822 if (arc_size < arc_c && hdr->b_state == arc_anon &&
2823 arc_anon->arcs_size + arc_mru->arcs_size > arc_p)
2824 arc_p = MIN(arc_c, arc_p + size);
2825 }
2826 ARCSTAT_BUMP(arcstat_allocated);
2827}
2828
2829/*
2830 * This routine is called whenever a buffer is accessed.
2831 * NOTE: the hash lock is dropped in this function.
2832 */
2833static void
2834arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
2835{
2836 clock_t now;
2837
2838 ASSERT(MUTEX_HELD(hash_lock));
2839
2840 if (buf->b_state == arc_anon) {
2841 /*
2842 * This buffer is not in the cache, and does not
2843 * appear in our "ghost" list. Add the new buffer
2844 * to the MRU state.
2845 */
2846
2847 ASSERT(buf->b_arc_access == 0);
2848 buf->b_arc_access = ddi_get_lbolt();
2849 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2850 arc_change_state(arc_mru, buf, hash_lock);
2851
2852 } else if (buf->b_state == arc_mru) {
2853 now = ddi_get_lbolt();
2854
2855 /*
2856 * If this buffer is here because of a prefetch, then either:
2857 * - clear the flag if this is a "referencing" read
2858 * (any subsequent access will bump this into the MFU state).
2859 * or
2860 * - move the buffer to the head of the list if this is
2861 * another prefetch (to make it less likely to be evicted).
2862 */
2863 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2864 if (refcount_count(&buf->b_refcnt) == 0) {
2865 ASSERT(list_link_active(&buf->b_arc_node));
2866 } else {
2867 buf->b_flags &= ~ARC_PREFETCH;
2868 ARCSTAT_BUMP(arcstat_mru_hits);
2869 }
2870 buf->b_arc_access = now;
2871 return;
2872 }
2873
2874 /*
2875 * This buffer has been "accessed" only once so far,
2876 * but it is still in the cache. Move it to the MFU
2877 * state.
2878 */
2879 if (now > buf->b_arc_access + ARC_MINTIME) {
2880 /*
2881 * More than 125ms have passed since we
2882 * instantiated this buffer. Move it to the
2883 * most frequently used state.
2884 */
2885 buf->b_arc_access = now;
2886 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2887 arc_change_state(arc_mfu, buf, hash_lock);
2888 }
2889 ARCSTAT_BUMP(arcstat_mru_hits);
2890 } else if (buf->b_state == arc_mru_ghost) {
2891 arc_state_t *new_state;
2892 /*
2893 * This buffer has been "accessed" recently, but
2894 * was evicted from the cache. Move it to the
2895 * MFU state.
2896 */
2897
2898 if (buf->b_flags & ARC_PREFETCH) {
2899 new_state = arc_mru;
2900 if (refcount_count(&buf->b_refcnt) > 0)
2901 buf->b_flags &= ~ARC_PREFETCH;
2902 DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2903 } else {
2904 new_state = arc_mfu;
2905 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2906 }
2907
2908 buf->b_arc_access = ddi_get_lbolt();
2909 arc_change_state(new_state, buf, hash_lock);
2910
2911 ARCSTAT_BUMP(arcstat_mru_ghost_hits);
2912 } else if (buf->b_state == arc_mfu) {
2913 /*
2914 * This buffer has been accessed more than once and is
2915 * still in the cache. Keep it in the MFU state.
2916 *
2917 * NOTE: an add_reference() that occurred when we did
2918 * the arc_read() will have kicked this off the list.
2919 * If it was a prefetch, we will explicitly move it to
2920 * the head of the list now.
2921 */
2922 if ((buf->b_flags & ARC_PREFETCH) != 0) {
2923 ASSERT(refcount_count(&buf->b_refcnt) == 0);
2924 ASSERT(list_link_active(&buf->b_arc_node));
2925 }
2926 ARCSTAT_BUMP(arcstat_mfu_hits);
2927 buf->b_arc_access = ddi_get_lbolt();
2928 } else if (buf->b_state == arc_mfu_ghost) {
2929 arc_state_t *new_state = arc_mfu;
2930 /*
2931 * This buffer has been accessed more than once but has
2932 * been evicted from the cache. Move it back to the
2933 * MFU state.
2934 */
2935
2936 if (buf->b_flags & ARC_PREFETCH) {
2937 /*
2938 * This is a prefetch access...
2939 * move this block back to the MRU state.
2940 */
2941 ASSERT0(refcount_count(&buf->b_refcnt));
2942 new_state = arc_mru;
2943 }
2944
2945 buf->b_arc_access = ddi_get_lbolt();
2946 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2947 arc_change_state(new_state, buf, hash_lock);
2948
2949 ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
2950 } else if (buf->b_state == arc_l2c_only) {
2951 /*
2952 * This buffer is on the 2nd Level ARC.
2953 */
2954
2955 buf->b_arc_access = ddi_get_lbolt();
2956 DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2957 arc_change_state(arc_mfu, buf, hash_lock);
2958 } else {
2959 ASSERT(!"invalid arc state");
2960 }
2961}
2962
2963/* a generic arc_done_func_t which you can use */
2964/* ARGSUSED */
2965void
2966arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
2967{
2968 if (zio == NULL || zio->io_error == 0)
2969 bcopy(buf->b_data, arg, buf->b_hdr->b_size);
2970 VERIFY(arc_buf_remove_ref(buf, arg));
2971}
2972
2973/* a generic arc_done_func_t */
2974void
2975arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
2976{
2977 arc_buf_t **bufp = arg;
2978 if (zio && zio->io_error) {
2979 VERIFY(arc_buf_remove_ref(buf, arg));
2980 *bufp = NULL;
2981 } else {
2982 *bufp = buf;
2983 ASSERT(buf->b_data);
2984 }
2985}
2986
2987static void
2988arc_read_done(zio_t *zio)
2989{
2990 arc_buf_hdr_t *hdr;
2991 arc_buf_t *buf;
2992 arc_buf_t *abuf; /* buffer we're assigning to callback */
2993 kmutex_t *hash_lock = NULL;
2994 arc_callback_t *callback_list, *acb;
2995 int freeable = FALSE;
2996
2997 buf = zio->io_private;
2998 hdr = buf->b_hdr;
2999
3000 /*
3001 * The hdr was inserted into hash-table and removed from lists
3002 * prior to starting I/O. We should find this header, since
3003 * it's in the hash table, and it should be legit since it's
3004 * not possible to evict it during the I/O. The only possible
3005 * reason for it not to be found is if we were freed during the
3006 * read.
3007 */
3008 if (HDR_IN_HASH_TABLE(hdr)) {
3009 ASSERT3U(hdr->b_birth, ==, BP_PHYSICAL_BIRTH(zio->io_bp));
3010 ASSERT3U(hdr->b_dva.dva_word[0], ==,
3011 BP_IDENTITY(zio->io_bp)->dva_word[0]);
3012 ASSERT3U(hdr->b_dva.dva_word[1], ==,
3013 BP_IDENTITY(zio->io_bp)->dva_word[1]);
3014
3015 arc_buf_hdr_t *found = buf_hash_find(hdr->b_spa, zio->io_bp,
3016 &hash_lock);
3017
3018 ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) &&
3019 hash_lock == NULL) ||
3020 (found == hdr &&
3021 DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
3022 (found == hdr && HDR_L2_READING(hdr)));
3023 }
3024
3025 hdr->b_flags &= ~ARC_L2_EVICTED;
3026 if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH))
3027 hdr->b_flags &= ~ARC_L2CACHE;
3028
3029 /* byteswap if necessary */
3030 callback_list = hdr->b_acb;
3031 ASSERT(callback_list != NULL);
3032 if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) {
3033 dmu_object_byteswap_t bswap =
3034 DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp));
3035 arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ?
3036 byteswap_uint64_array :
3037 dmu_ot_byteswap[bswap].ob_func;
3038 func(buf->b_data, hdr->b_size);
3039 }
3040
3041 arc_cksum_compute(buf, B_FALSE);
3042#ifdef illumos
3043 arc_buf_watch(buf);
3044#endif /* illumos */
3045
3046 if (hash_lock && zio->io_error == 0 && hdr->b_state == arc_anon) {
3047 /*
3048 * Only call arc_access on anonymous buffers. This is because
3049 * if we've issued an I/O for an evicted buffer, we've already
3050 * called arc_access (to prevent any simultaneous readers from
3051 * getting confused).
3052 */
3053 arc_access(hdr, hash_lock);
3054 }
3055
3056 /* create copies of the data buffer for the callers */
3057 abuf = buf;
3058 for (acb = callback_list; acb; acb = acb->acb_next) {
3059 if (acb->acb_done) {
3060 if (abuf == NULL) {
3061 ARCSTAT_BUMP(arcstat_duplicate_reads);
3062 abuf = arc_buf_clone(buf);
3063 }
3064 acb->acb_buf = abuf;
3065 abuf = NULL;
3066 }
3067 }
3068 hdr->b_acb = NULL;
3069 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3070 ASSERT(!HDR_BUF_AVAILABLE(hdr));
3071 if (abuf == buf) {
3072 ASSERT(buf->b_efunc == NULL);
3073 ASSERT(hdr->b_datacnt == 1);
3074 hdr->b_flags |= ARC_BUF_AVAILABLE;
3075 }
3076
3077 ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
3078
3079 if (zio->io_error != 0) {
3080 hdr->b_flags |= ARC_IO_ERROR;
3081 if (hdr->b_state != arc_anon)
3082 arc_change_state(arc_anon, hdr, hash_lock);
3083 if (HDR_IN_HASH_TABLE(hdr))
3084 buf_hash_remove(hdr);
3085 freeable = refcount_is_zero(&hdr->b_refcnt);
3086 }
3087
3088 /*
3089 * Broadcast before we drop the hash_lock to avoid the possibility
3090 * that the hdr (and hence the cv) might be freed before we get to
3091 * the cv_broadcast().
3092 */
3093 cv_broadcast(&hdr->b_cv);
3094
3095 if (hash_lock) {
3096 mutex_exit(hash_lock);
3097 } else {
3098 /*
3099 * This block was freed while we waited for the read to
3100 * complete. It has been removed from the hash table and
3101 * moved to the anonymous state (so that it won't show up
3102 * in the cache).
3103 */
3104 ASSERT3P(hdr->b_state, ==, arc_anon);
3105 freeable = refcount_is_zero(&hdr->b_refcnt);
3106 }
3107
3108 /* execute each callback and free its structure */
3109 while ((acb = callback_list) != NULL) {
3110 if (acb->acb_done)
3111 acb->acb_done(zio, acb->acb_buf, acb->acb_private);
3112
3113 if (acb->acb_zio_dummy != NULL) {
3114 acb->acb_zio_dummy->io_error = zio->io_error;
3115 zio_nowait(acb->acb_zio_dummy);
3116 }
3117
3118 callback_list = acb->acb_next;
3119 kmem_free(acb, sizeof (arc_callback_t));
3120 }
3121
3122 if (freeable)
3123 arc_hdr_destroy(hdr);
3124}
3125
3126/*
3127 * "Read" the block block at the specified DVA (in bp) via the
3128 * cache. If the block is found in the cache, invoke the provided
3129 * callback immediately and return. Note that the `zio' parameter
3130 * in the callback will be NULL in this case, since no IO was
3131 * required. If the block is not in the cache pass the read request
3132 * on to the spa with a substitute callback function, so that the
3133 * requested block will be added to the cache.
3134 *
3135 * If a read request arrives for a block that has a read in-progress,
3136 * either wait for the in-progress read to complete (and return the
3137 * results); or, if this is a read with a "done" func, add a record
3138 * to the read to invoke the "done" func when the read completes,
3139 * and return; or just return.
3140 *
3141 * arc_read_done() will invoke all the requested "done" functions
3142 * for readers of this block.
3143 */
3144int
3145arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
3146 void *private, zio_priority_t priority, int zio_flags, uint32_t *arc_flags,
3147 const zbookmark_t *zb)
3147 const zbookmark_phys_t *zb)
3148{
3149 arc_buf_hdr_t *hdr = NULL;
3150 arc_buf_t *buf = NULL;
3151 kmutex_t *hash_lock = NULL;
3152 zio_t *rzio;
3153 uint64_t guid = spa_load_guid(spa);
3154
3155 ASSERT(!BP_IS_EMBEDDED(bp) ||
3156 BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA);
3157
3158top:
3159 if (!BP_IS_EMBEDDED(bp)) {
3160 /*
3161 * Embedded BP's have no DVA and require no I/O to "read".
3162 * Create an anonymous arc buf to back it.
3163 */
3164 hdr = buf_hash_find(guid, bp, &hash_lock);
3165 }
3166
3167 if (hdr != NULL && hdr->b_datacnt > 0) {
3168
3169 *arc_flags |= ARC_CACHED;
3170
3171 if (HDR_IO_IN_PROGRESS(hdr)) {
3172
3173 if (*arc_flags & ARC_WAIT) {
3174 cv_wait(&hdr->b_cv, hash_lock);
3175 mutex_exit(hash_lock);
3176 goto top;
3177 }
3178 ASSERT(*arc_flags & ARC_NOWAIT);
3179
3180 if (done) {
3181 arc_callback_t *acb = NULL;
3182
3183 acb = kmem_zalloc(sizeof (arc_callback_t),
3184 KM_SLEEP);
3185 acb->acb_done = done;
3186 acb->acb_private = private;
3187 if (pio != NULL)
3188 acb->acb_zio_dummy = zio_null(pio,
3189 spa, NULL, NULL, NULL, zio_flags);
3190
3191 ASSERT(acb->acb_done != NULL);
3192 acb->acb_next = hdr->b_acb;
3193 hdr->b_acb = acb;
3194 add_reference(hdr, hash_lock, private);
3195 mutex_exit(hash_lock);
3196 return (0);
3197 }
3198 mutex_exit(hash_lock);
3199 return (0);
3200 }
3201
3202 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
3203
3204 if (done) {
3205 add_reference(hdr, hash_lock, private);
3206 /*
3207 * If this block is already in use, create a new
3208 * copy of the data so that we will be guaranteed
3209 * that arc_release() will always succeed.
3210 */
3211 buf = hdr->b_buf;
3212 ASSERT(buf);
3213 ASSERT(buf->b_data);
3214 if (HDR_BUF_AVAILABLE(hdr)) {
3215 ASSERT(buf->b_efunc == NULL);
3216 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
3217 } else {
3218 buf = arc_buf_clone(buf);
3219 }
3220
3221 } else if (*arc_flags & ARC_PREFETCH &&
3222 refcount_count(&hdr->b_refcnt) == 0) {
3223 hdr->b_flags |= ARC_PREFETCH;
3224 }
3225 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
3226 arc_access(hdr, hash_lock);
3227 if (*arc_flags & ARC_L2CACHE)
3228 hdr->b_flags |= ARC_L2CACHE;
3229 if (*arc_flags & ARC_L2COMPRESS)
3230 hdr->b_flags |= ARC_L2COMPRESS;
3231 mutex_exit(hash_lock);
3232 ARCSTAT_BUMP(arcstat_hits);
3233 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
3234 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
3235 data, metadata, hits);
3236
3237 if (done)
3238 done(NULL, buf, private);
3239 } else {
3240 uint64_t size = BP_GET_LSIZE(bp);
3241 arc_callback_t *acb;
3242 vdev_t *vd = NULL;
3243 uint64_t addr = 0;
3244 boolean_t devw = B_FALSE;
3245 enum zio_compress b_compress = ZIO_COMPRESS_OFF;
3246 uint64_t b_asize = 0;
3247
3248 if (hdr == NULL) {
3249 /* this block is not in the cache */
3250 arc_buf_hdr_t *exists = NULL;
3251 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
3252 buf = arc_buf_alloc(spa, size, private, type);
3253 hdr = buf->b_hdr;
3254 if (!BP_IS_EMBEDDED(bp)) {
3255 hdr->b_dva = *BP_IDENTITY(bp);
3256 hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
3257 hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
3258 exists = buf_hash_insert(hdr, &hash_lock);
3259 }
3260 if (exists != NULL) {
3261 /* somebody beat us to the hash insert */
3262 mutex_exit(hash_lock);
3263 buf_discard_identity(hdr);
3264 (void) arc_buf_remove_ref(buf, private);
3265 goto top; /* restart the IO request */
3266 }
3267 /* if this is a prefetch, we don't have a reference */
3268 if (*arc_flags & ARC_PREFETCH) {
3269 (void) remove_reference(hdr, hash_lock,
3270 private);
3271 hdr->b_flags |= ARC_PREFETCH;
3272 }
3273 if (*arc_flags & ARC_L2CACHE)
3274 hdr->b_flags |= ARC_L2CACHE;
3275 if (*arc_flags & ARC_L2COMPRESS)
3276 hdr->b_flags |= ARC_L2COMPRESS;
3277 if (BP_GET_LEVEL(bp) > 0)
3278 hdr->b_flags |= ARC_INDIRECT;
3279 } else {
3280 /* this block is in the ghost cache */
3281 ASSERT(GHOST_STATE(hdr->b_state));
3282 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3283 ASSERT0(refcount_count(&hdr->b_refcnt));
3284 ASSERT(hdr->b_buf == NULL);
3285
3286 /* if this is a prefetch, we don't have a reference */
3287 if (*arc_flags & ARC_PREFETCH)
3288 hdr->b_flags |= ARC_PREFETCH;
3289 else
3290 add_reference(hdr, hash_lock, private);
3291 if (*arc_flags & ARC_L2CACHE)
3292 hdr->b_flags |= ARC_L2CACHE;
3293 if (*arc_flags & ARC_L2COMPRESS)
3294 hdr->b_flags |= ARC_L2COMPRESS;
3295 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
3296 buf->b_hdr = hdr;
3297 buf->b_data = NULL;
3298 buf->b_efunc = NULL;
3299 buf->b_private = NULL;
3300 buf->b_next = NULL;
3301 hdr->b_buf = buf;
3302 ASSERT(hdr->b_datacnt == 0);
3303 hdr->b_datacnt = 1;
3304 arc_get_data_buf(buf);
3305 arc_access(hdr, hash_lock);
3306 }
3307
3308 ASSERT(!GHOST_STATE(hdr->b_state));
3309
3310 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
3311 acb->acb_done = done;
3312 acb->acb_private = private;
3313
3314 ASSERT(hdr->b_acb == NULL);
3315 hdr->b_acb = acb;
3316 hdr->b_flags |= ARC_IO_IN_PROGRESS;
3317
3318 if (hdr->b_l2hdr != NULL &&
3319 (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) {
3320 devw = hdr->b_l2hdr->b_dev->l2ad_writing;
3321 addr = hdr->b_l2hdr->b_daddr;
3322 b_compress = hdr->b_l2hdr->b_compress;
3323 b_asize = hdr->b_l2hdr->b_asize;
3324 /*
3325 * Lock out device removal.
3326 */
3327 if (vdev_is_dead(vd) ||
3328 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
3329 vd = NULL;
3330 }
3331
3332 if (hash_lock != NULL)
3333 mutex_exit(hash_lock);
3334
3335 /*
3336 * At this point, we have a level 1 cache miss. Try again in
3337 * L2ARC if possible.
3338 */
3339 ASSERT3U(hdr->b_size, ==, size);
3340 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp,
3148{
3149 arc_buf_hdr_t *hdr = NULL;
3150 arc_buf_t *buf = NULL;
3151 kmutex_t *hash_lock = NULL;
3152 zio_t *rzio;
3153 uint64_t guid = spa_load_guid(spa);
3154
3155 ASSERT(!BP_IS_EMBEDDED(bp) ||
3156 BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA);
3157
3158top:
3159 if (!BP_IS_EMBEDDED(bp)) {
3160 /*
3161 * Embedded BP's have no DVA and require no I/O to "read".
3162 * Create an anonymous arc buf to back it.
3163 */
3164 hdr = buf_hash_find(guid, bp, &hash_lock);
3165 }
3166
3167 if (hdr != NULL && hdr->b_datacnt > 0) {
3168
3169 *arc_flags |= ARC_CACHED;
3170
3171 if (HDR_IO_IN_PROGRESS(hdr)) {
3172
3173 if (*arc_flags & ARC_WAIT) {
3174 cv_wait(&hdr->b_cv, hash_lock);
3175 mutex_exit(hash_lock);
3176 goto top;
3177 }
3178 ASSERT(*arc_flags & ARC_NOWAIT);
3179
3180 if (done) {
3181 arc_callback_t *acb = NULL;
3182
3183 acb = kmem_zalloc(sizeof (arc_callback_t),
3184 KM_SLEEP);
3185 acb->acb_done = done;
3186 acb->acb_private = private;
3187 if (pio != NULL)
3188 acb->acb_zio_dummy = zio_null(pio,
3189 spa, NULL, NULL, NULL, zio_flags);
3190
3191 ASSERT(acb->acb_done != NULL);
3192 acb->acb_next = hdr->b_acb;
3193 hdr->b_acb = acb;
3194 add_reference(hdr, hash_lock, private);
3195 mutex_exit(hash_lock);
3196 return (0);
3197 }
3198 mutex_exit(hash_lock);
3199 return (0);
3200 }
3201
3202 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
3203
3204 if (done) {
3205 add_reference(hdr, hash_lock, private);
3206 /*
3207 * If this block is already in use, create a new
3208 * copy of the data so that we will be guaranteed
3209 * that arc_release() will always succeed.
3210 */
3211 buf = hdr->b_buf;
3212 ASSERT(buf);
3213 ASSERT(buf->b_data);
3214 if (HDR_BUF_AVAILABLE(hdr)) {
3215 ASSERT(buf->b_efunc == NULL);
3216 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
3217 } else {
3218 buf = arc_buf_clone(buf);
3219 }
3220
3221 } else if (*arc_flags & ARC_PREFETCH &&
3222 refcount_count(&hdr->b_refcnt) == 0) {
3223 hdr->b_flags |= ARC_PREFETCH;
3224 }
3225 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
3226 arc_access(hdr, hash_lock);
3227 if (*arc_flags & ARC_L2CACHE)
3228 hdr->b_flags |= ARC_L2CACHE;
3229 if (*arc_flags & ARC_L2COMPRESS)
3230 hdr->b_flags |= ARC_L2COMPRESS;
3231 mutex_exit(hash_lock);
3232 ARCSTAT_BUMP(arcstat_hits);
3233 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
3234 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
3235 data, metadata, hits);
3236
3237 if (done)
3238 done(NULL, buf, private);
3239 } else {
3240 uint64_t size = BP_GET_LSIZE(bp);
3241 arc_callback_t *acb;
3242 vdev_t *vd = NULL;
3243 uint64_t addr = 0;
3244 boolean_t devw = B_FALSE;
3245 enum zio_compress b_compress = ZIO_COMPRESS_OFF;
3246 uint64_t b_asize = 0;
3247
3248 if (hdr == NULL) {
3249 /* this block is not in the cache */
3250 arc_buf_hdr_t *exists = NULL;
3251 arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
3252 buf = arc_buf_alloc(spa, size, private, type);
3253 hdr = buf->b_hdr;
3254 if (!BP_IS_EMBEDDED(bp)) {
3255 hdr->b_dva = *BP_IDENTITY(bp);
3256 hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
3257 hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
3258 exists = buf_hash_insert(hdr, &hash_lock);
3259 }
3260 if (exists != NULL) {
3261 /* somebody beat us to the hash insert */
3262 mutex_exit(hash_lock);
3263 buf_discard_identity(hdr);
3264 (void) arc_buf_remove_ref(buf, private);
3265 goto top; /* restart the IO request */
3266 }
3267 /* if this is a prefetch, we don't have a reference */
3268 if (*arc_flags & ARC_PREFETCH) {
3269 (void) remove_reference(hdr, hash_lock,
3270 private);
3271 hdr->b_flags |= ARC_PREFETCH;
3272 }
3273 if (*arc_flags & ARC_L2CACHE)
3274 hdr->b_flags |= ARC_L2CACHE;
3275 if (*arc_flags & ARC_L2COMPRESS)
3276 hdr->b_flags |= ARC_L2COMPRESS;
3277 if (BP_GET_LEVEL(bp) > 0)
3278 hdr->b_flags |= ARC_INDIRECT;
3279 } else {
3280 /* this block is in the ghost cache */
3281 ASSERT(GHOST_STATE(hdr->b_state));
3282 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3283 ASSERT0(refcount_count(&hdr->b_refcnt));
3284 ASSERT(hdr->b_buf == NULL);
3285
3286 /* if this is a prefetch, we don't have a reference */
3287 if (*arc_flags & ARC_PREFETCH)
3288 hdr->b_flags |= ARC_PREFETCH;
3289 else
3290 add_reference(hdr, hash_lock, private);
3291 if (*arc_flags & ARC_L2CACHE)
3292 hdr->b_flags |= ARC_L2CACHE;
3293 if (*arc_flags & ARC_L2COMPRESS)
3294 hdr->b_flags |= ARC_L2COMPRESS;
3295 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
3296 buf->b_hdr = hdr;
3297 buf->b_data = NULL;
3298 buf->b_efunc = NULL;
3299 buf->b_private = NULL;
3300 buf->b_next = NULL;
3301 hdr->b_buf = buf;
3302 ASSERT(hdr->b_datacnt == 0);
3303 hdr->b_datacnt = 1;
3304 arc_get_data_buf(buf);
3305 arc_access(hdr, hash_lock);
3306 }
3307
3308 ASSERT(!GHOST_STATE(hdr->b_state));
3309
3310 acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
3311 acb->acb_done = done;
3312 acb->acb_private = private;
3313
3314 ASSERT(hdr->b_acb == NULL);
3315 hdr->b_acb = acb;
3316 hdr->b_flags |= ARC_IO_IN_PROGRESS;
3317
3318 if (hdr->b_l2hdr != NULL &&
3319 (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) {
3320 devw = hdr->b_l2hdr->b_dev->l2ad_writing;
3321 addr = hdr->b_l2hdr->b_daddr;
3322 b_compress = hdr->b_l2hdr->b_compress;
3323 b_asize = hdr->b_l2hdr->b_asize;
3324 /*
3325 * Lock out device removal.
3326 */
3327 if (vdev_is_dead(vd) ||
3328 !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
3329 vd = NULL;
3330 }
3331
3332 if (hash_lock != NULL)
3333 mutex_exit(hash_lock);
3334
3335 /*
3336 * At this point, we have a level 1 cache miss. Try again in
3337 * L2ARC if possible.
3338 */
3339 ASSERT3U(hdr->b_size, ==, size);
3340 DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp,
3341 uint64_t, size, zbookmark_t *, zb);
3341 uint64_t, size, zbookmark_phys_t *, zb);
3342 ARCSTAT_BUMP(arcstat_misses);
3343 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
3344 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
3345 data, metadata, misses);
3346#ifdef _KERNEL
3347 curthread->td_ru.ru_inblock++;
3348#endif
3349
3350 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) {
3351 /*
3352 * Read from the L2ARC if the following are true:
3353 * 1. The L2ARC vdev was previously cached.
3354 * 2. This buffer still has L2ARC metadata.
3355 * 3. This buffer isn't currently writing to the L2ARC.
3356 * 4. The L2ARC entry wasn't evicted, which may
3357 * also have invalidated the vdev.
3358 * 5. This isn't prefetch and l2arc_noprefetch is set.
3359 */
3360 if (hdr->b_l2hdr != NULL &&
3361 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) &&
3362 !(l2arc_noprefetch && HDR_PREFETCH(hdr))) {
3363 l2arc_read_callback_t *cb;
3364
3365 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
3366 ARCSTAT_BUMP(arcstat_l2_hits);
3367
3368 cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
3369 KM_SLEEP);
3370 cb->l2rcb_buf = buf;
3371 cb->l2rcb_spa = spa;
3372 cb->l2rcb_bp = *bp;
3373 cb->l2rcb_zb = *zb;
3374 cb->l2rcb_flags = zio_flags;
3375 cb->l2rcb_compress = b_compress;
3376
3377 ASSERT(addr >= VDEV_LABEL_START_SIZE &&
3378 addr + size < vd->vdev_psize -
3379 VDEV_LABEL_END_SIZE);
3380
3381 /*
3382 * l2arc read. The SCL_L2ARC lock will be
3383 * released by l2arc_read_done().
3384 * Issue a null zio if the underlying buffer
3385 * was squashed to zero size by compression.
3386 */
3387 if (b_compress == ZIO_COMPRESS_EMPTY) {
3388 rzio = zio_null(pio, spa, vd,
3389 l2arc_read_done, cb,
3390 zio_flags | ZIO_FLAG_DONT_CACHE |
3391 ZIO_FLAG_CANFAIL |
3392 ZIO_FLAG_DONT_PROPAGATE |
3393 ZIO_FLAG_DONT_RETRY);
3394 } else {
3395 rzio = zio_read_phys(pio, vd, addr,
3396 b_asize, buf->b_data,
3397 ZIO_CHECKSUM_OFF,
3398 l2arc_read_done, cb, priority,
3399 zio_flags | ZIO_FLAG_DONT_CACHE |
3400 ZIO_FLAG_CANFAIL |
3401 ZIO_FLAG_DONT_PROPAGATE |
3402 ZIO_FLAG_DONT_RETRY, B_FALSE);
3403 }
3404 DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
3405 zio_t *, rzio);
3406 ARCSTAT_INCR(arcstat_l2_read_bytes, b_asize);
3407
3408 if (*arc_flags & ARC_NOWAIT) {
3409 zio_nowait(rzio);
3410 return (0);
3411 }
3412
3413 ASSERT(*arc_flags & ARC_WAIT);
3414 if (zio_wait(rzio) == 0)
3415 return (0);
3416
3417 /* l2arc read error; goto zio_read() */
3418 } else {
3419 DTRACE_PROBE1(l2arc__miss,
3420 arc_buf_hdr_t *, hdr);
3421 ARCSTAT_BUMP(arcstat_l2_misses);
3422 if (HDR_L2_WRITING(hdr))
3423 ARCSTAT_BUMP(arcstat_l2_rw_clash);
3424 spa_config_exit(spa, SCL_L2ARC, vd);
3425 }
3426 } else {
3427 if (vd != NULL)
3428 spa_config_exit(spa, SCL_L2ARC, vd);
3429 if (l2arc_ndev != 0) {
3430 DTRACE_PROBE1(l2arc__miss,
3431 arc_buf_hdr_t *, hdr);
3432 ARCSTAT_BUMP(arcstat_l2_misses);
3433 }
3434 }
3435
3436 rzio = zio_read(pio, spa, bp, buf->b_data, size,
3437 arc_read_done, buf, priority, zio_flags, zb);
3438
3439 if (*arc_flags & ARC_WAIT)
3440 return (zio_wait(rzio));
3441
3442 ASSERT(*arc_flags & ARC_NOWAIT);
3443 zio_nowait(rzio);
3444 }
3445 return (0);
3446}
3447
3448void
3449arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
3450{
3451 ASSERT(buf->b_hdr != NULL);
3452 ASSERT(buf->b_hdr->b_state != arc_anon);
3453 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
3454 ASSERT(buf->b_efunc == NULL);
3455 ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr));
3456
3457 buf->b_efunc = func;
3458 buf->b_private = private;
3459}
3460
3461/*
3462 * Notify the arc that a block was freed, and thus will never be used again.
3463 */
3464void
3465arc_freed(spa_t *spa, const blkptr_t *bp)
3466{
3467 arc_buf_hdr_t *hdr;
3468 kmutex_t *hash_lock;
3469 uint64_t guid = spa_load_guid(spa);
3470
3471 ASSERT(!BP_IS_EMBEDDED(bp));
3472
3473 hdr = buf_hash_find(guid, bp, &hash_lock);
3474 if (hdr == NULL)
3475 return;
3476 if (HDR_BUF_AVAILABLE(hdr)) {
3477 arc_buf_t *buf = hdr->b_buf;
3478 add_reference(hdr, hash_lock, FTAG);
3479 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
3480 mutex_exit(hash_lock);
3481
3482 arc_release(buf, FTAG);
3483 (void) arc_buf_remove_ref(buf, FTAG);
3484 } else {
3485 mutex_exit(hash_lock);
3486 }
3487
3488}
3489
3490/*
3491 * This is used by the DMU to let the ARC know that a buffer is
3492 * being evicted, so the ARC should clean up. If this arc buf
3493 * is not yet in the evicted state, it will be put there.
3494 */
3495int
3496arc_buf_evict(arc_buf_t *buf)
3497{
3498 arc_buf_hdr_t *hdr;
3499 kmutex_t *hash_lock;
3500 arc_buf_t **bufp;
3501 list_t *list, *evicted_list;
3502 kmutex_t *lock, *evicted_lock;
3503
3504 mutex_enter(&buf->b_evict_lock);
3505 hdr = buf->b_hdr;
3506 if (hdr == NULL) {
3507 /*
3508 * We are in arc_do_user_evicts().
3509 */
3510 ASSERT(buf->b_data == NULL);
3511 mutex_exit(&buf->b_evict_lock);
3512 return (0);
3513 } else if (buf->b_data == NULL) {
3514 arc_buf_t copy = *buf; /* structure assignment */
3515 /*
3516 * We are on the eviction list; process this buffer now
3517 * but let arc_do_user_evicts() do the reaping.
3518 */
3519 buf->b_efunc = NULL;
3520 mutex_exit(&buf->b_evict_lock);
3521 VERIFY(copy.b_efunc(&copy) == 0);
3522 return (1);
3523 }
3524 hash_lock = HDR_LOCK(hdr);
3525 mutex_enter(hash_lock);
3526 hdr = buf->b_hdr;
3527 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3528
3529 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt);
3530 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
3531
3532 /*
3533 * Pull this buffer off of the hdr
3534 */
3535 bufp = &hdr->b_buf;
3536 while (*bufp != buf)
3537 bufp = &(*bufp)->b_next;
3538 *bufp = buf->b_next;
3539
3540 ASSERT(buf->b_data != NULL);
3541 arc_buf_destroy(buf, FALSE, FALSE);
3542
3543 if (hdr->b_datacnt == 0) {
3544 arc_state_t *old_state = hdr->b_state;
3545 arc_state_t *evicted_state;
3546
3547 ASSERT(hdr->b_buf == NULL);
3548 ASSERT(refcount_is_zero(&hdr->b_refcnt));
3549
3550 evicted_state =
3551 (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
3552
3553 get_buf_info(hdr, old_state, &list, &lock);
3554 get_buf_info(hdr, evicted_state, &evicted_list, &evicted_lock);
3555 mutex_enter(lock);
3556 mutex_enter(evicted_lock);
3557
3558 arc_change_state(evicted_state, hdr, hash_lock);
3559 ASSERT(HDR_IN_HASH_TABLE(hdr));
3560 hdr->b_flags |= ARC_IN_HASH_TABLE;
3561 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
3562
3563 mutex_exit(evicted_lock);
3564 mutex_exit(lock);
3565 }
3566 mutex_exit(hash_lock);
3567 mutex_exit(&buf->b_evict_lock);
3568
3569 VERIFY(buf->b_efunc(buf) == 0);
3570 buf->b_efunc = NULL;
3571 buf->b_private = NULL;
3572 buf->b_hdr = NULL;
3573 buf->b_next = NULL;
3574 kmem_cache_free(buf_cache, buf);
3575 return (1);
3576}
3577
3578/*
3579 * Release this buffer from the cache, making it an anonymous buffer. This
3580 * must be done after a read and prior to modifying the buffer contents.
3581 * If the buffer has more than one reference, we must make
3582 * a new hdr for the buffer.
3583 */
3584void
3585arc_release(arc_buf_t *buf, void *tag)
3586{
3587 arc_buf_hdr_t *hdr;
3588 kmutex_t *hash_lock = NULL;
3589 l2arc_buf_hdr_t *l2hdr;
3590 uint64_t buf_size;
3591
3592 /*
3593 * It would be nice to assert that if it's DMU metadata (level >
3594 * 0 || it's the dnode file), then it must be syncing context.
3595 * But we don't know that information at this level.
3596 */
3597
3598 mutex_enter(&buf->b_evict_lock);
3599 hdr = buf->b_hdr;
3600
3601 /* this buffer is not on any list */
3602 ASSERT(refcount_count(&hdr->b_refcnt) > 0);
3603
3604 if (hdr->b_state == arc_anon) {
3605 /* this buffer is already released */
3606 ASSERT(buf->b_efunc == NULL);
3607 } else {
3608 hash_lock = HDR_LOCK(hdr);
3609 mutex_enter(hash_lock);
3610 hdr = buf->b_hdr;
3611 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3612 }
3613
3614 l2hdr = hdr->b_l2hdr;
3615 if (l2hdr) {
3616 mutex_enter(&l2arc_buflist_mtx);
3617 hdr->b_l2hdr = NULL;
3618 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
3619 }
3620 buf_size = hdr->b_size;
3621
3622 /*
3623 * Do we have more than one buf?
3624 */
3625 if (hdr->b_datacnt > 1) {
3626 arc_buf_hdr_t *nhdr;
3627 arc_buf_t **bufp;
3628 uint64_t blksz = hdr->b_size;
3629 uint64_t spa = hdr->b_spa;
3630 arc_buf_contents_t type = hdr->b_type;
3631 uint32_t flags = hdr->b_flags;
3632
3633 ASSERT(hdr->b_buf != buf || buf->b_next != NULL);
3634 /*
3635 * Pull the data off of this hdr and attach it to
3636 * a new anonymous hdr.
3637 */
3638 (void) remove_reference(hdr, hash_lock, tag);
3639 bufp = &hdr->b_buf;
3640 while (*bufp != buf)
3641 bufp = &(*bufp)->b_next;
3642 *bufp = buf->b_next;
3643 buf->b_next = NULL;
3644
3645 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size);
3646 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size);
3647 if (refcount_is_zero(&hdr->b_refcnt)) {
3648 uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type];
3649 ASSERT3U(*size, >=, hdr->b_size);
3650 atomic_add_64(size, -hdr->b_size);
3651 }
3652
3653 /*
3654 * We're releasing a duplicate user data buffer, update
3655 * our statistics accordingly.
3656 */
3657 if (hdr->b_type == ARC_BUFC_DATA) {
3658 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
3659 ARCSTAT_INCR(arcstat_duplicate_buffers_size,
3660 -hdr->b_size);
3661 }
3662 hdr->b_datacnt -= 1;
3663 arc_cksum_verify(buf);
3664#ifdef illumos
3665 arc_buf_unwatch(buf);
3666#endif /* illumos */
3667
3668 mutex_exit(hash_lock);
3669
3670 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
3671 nhdr->b_size = blksz;
3672 nhdr->b_spa = spa;
3673 nhdr->b_type = type;
3674 nhdr->b_buf = buf;
3675 nhdr->b_state = arc_anon;
3676 nhdr->b_arc_access = 0;
3677 nhdr->b_flags = flags & ARC_L2_WRITING;
3678 nhdr->b_l2hdr = NULL;
3679 nhdr->b_datacnt = 1;
3680 nhdr->b_freeze_cksum = NULL;
3681 (void) refcount_add(&nhdr->b_refcnt, tag);
3682 buf->b_hdr = nhdr;
3683 mutex_exit(&buf->b_evict_lock);
3684 atomic_add_64(&arc_anon->arcs_size, blksz);
3685 } else {
3686 mutex_exit(&buf->b_evict_lock);
3687 ASSERT(refcount_count(&hdr->b_refcnt) == 1);
3688 ASSERT(!list_link_active(&hdr->b_arc_node));
3689 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3690 if (hdr->b_state != arc_anon)
3691 arc_change_state(arc_anon, hdr, hash_lock);
3692 hdr->b_arc_access = 0;
3693 if (hash_lock)
3694 mutex_exit(hash_lock);
3695
3696 buf_discard_identity(hdr);
3697 arc_buf_thaw(buf);
3698 }
3699 buf->b_efunc = NULL;
3700 buf->b_private = NULL;
3701
3702 if (l2hdr) {
3703 ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
3704 vdev_space_update(l2hdr->b_dev->l2ad_vdev,
3705 -l2hdr->b_asize, 0, 0);
3706 trim_map_free(l2hdr->b_dev->l2ad_vdev, l2hdr->b_daddr,
3707 hdr->b_size, 0);
3708 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
3709 ARCSTAT_INCR(arcstat_l2_size, -buf_size);
3710 mutex_exit(&l2arc_buflist_mtx);
3711 }
3712}
3713
3714int
3715arc_released(arc_buf_t *buf)
3716{
3717 int released;
3718
3719 mutex_enter(&buf->b_evict_lock);
3720 released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon);
3721 mutex_exit(&buf->b_evict_lock);
3722 return (released);
3723}
3724
3725int
3726arc_has_callback(arc_buf_t *buf)
3727{
3728 int callback;
3729
3730 mutex_enter(&buf->b_evict_lock);
3731 callback = (buf->b_efunc != NULL);
3732 mutex_exit(&buf->b_evict_lock);
3733 return (callback);
3734}
3735
3736#ifdef ZFS_DEBUG
3737int
3738arc_referenced(arc_buf_t *buf)
3739{
3740 int referenced;
3741
3742 mutex_enter(&buf->b_evict_lock);
3743 referenced = (refcount_count(&buf->b_hdr->b_refcnt));
3744 mutex_exit(&buf->b_evict_lock);
3745 return (referenced);
3746}
3747#endif
3748
3749static void
3750arc_write_ready(zio_t *zio)
3751{
3752 arc_write_callback_t *callback = zio->io_private;
3753 arc_buf_t *buf = callback->awcb_buf;
3754 arc_buf_hdr_t *hdr = buf->b_hdr;
3755
3756 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt));
3757 callback->awcb_ready(zio, buf, callback->awcb_private);
3758
3759 /*
3760 * If the IO is already in progress, then this is a re-write
3761 * attempt, so we need to thaw and re-compute the cksum.
3762 * It is the responsibility of the callback to handle the
3763 * accounting for any re-write attempt.
3764 */
3765 if (HDR_IO_IN_PROGRESS(hdr)) {
3766 mutex_enter(&hdr->b_freeze_lock);
3767 if (hdr->b_freeze_cksum != NULL) {
3768 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
3769 hdr->b_freeze_cksum = NULL;
3770 }
3771 mutex_exit(&hdr->b_freeze_lock);
3772 }
3773 arc_cksum_compute(buf, B_FALSE);
3774 hdr->b_flags |= ARC_IO_IN_PROGRESS;
3775}
3776
3777/*
3778 * The SPA calls this callback for each physical write that happens on behalf
3779 * of a logical write. See the comment in dbuf_write_physdone() for details.
3780 */
3781static void
3782arc_write_physdone(zio_t *zio)
3783{
3784 arc_write_callback_t *cb = zio->io_private;
3785 if (cb->awcb_physdone != NULL)
3786 cb->awcb_physdone(zio, cb->awcb_buf, cb->awcb_private);
3787}
3788
3789static void
3790arc_write_done(zio_t *zio)
3791{
3792 arc_write_callback_t *callback = zio->io_private;
3793 arc_buf_t *buf = callback->awcb_buf;
3794 arc_buf_hdr_t *hdr = buf->b_hdr;
3795
3796 ASSERT(hdr->b_acb == NULL);
3797
3798 if (zio->io_error == 0) {
3799 if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) {
3800 buf_discard_identity(hdr);
3801 } else {
3802 hdr->b_dva = *BP_IDENTITY(zio->io_bp);
3803 hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp);
3804 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
3805 }
3806 } else {
3807 ASSERT(BUF_EMPTY(hdr));
3808 }
3809
3810 /*
3811 * If the block to be written was all-zero or compressed enough to be
3812 * embedded in the BP, no write was performed so there will be no
3813 * dva/birth/checksum. The buffer must therefore remain anonymous
3814 * (and uncached).
3815 */
3816 if (!BUF_EMPTY(hdr)) {
3817 arc_buf_hdr_t *exists;
3818 kmutex_t *hash_lock;
3819
3820 ASSERT(zio->io_error == 0);
3821
3822 arc_cksum_verify(buf);
3823
3824 exists = buf_hash_insert(hdr, &hash_lock);
3825 if (exists) {
3826 /*
3827 * This can only happen if we overwrite for
3828 * sync-to-convergence, because we remove
3829 * buffers from the hash table when we arc_free().
3830 */
3831 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
3832 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
3833 panic("bad overwrite, hdr=%p exists=%p",
3834 (void *)hdr, (void *)exists);
3835 ASSERT(refcount_is_zero(&exists->b_refcnt));
3836 arc_change_state(arc_anon, exists, hash_lock);
3837 mutex_exit(hash_lock);
3838 arc_hdr_destroy(exists);
3839 exists = buf_hash_insert(hdr, &hash_lock);
3840 ASSERT3P(exists, ==, NULL);
3841 } else if (zio->io_flags & ZIO_FLAG_NOPWRITE) {
3842 /* nopwrite */
3843 ASSERT(zio->io_prop.zp_nopwrite);
3844 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
3845 panic("bad nopwrite, hdr=%p exists=%p",
3846 (void *)hdr, (void *)exists);
3847 } else {
3848 /* Dedup */
3849 ASSERT(hdr->b_datacnt == 1);
3850 ASSERT(hdr->b_state == arc_anon);
3851 ASSERT(BP_GET_DEDUP(zio->io_bp));
3852 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
3853 }
3854 }
3855 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3856 /* if it's not anon, we are doing a scrub */
3857 if (!exists && hdr->b_state == arc_anon)
3858 arc_access(hdr, hash_lock);
3859 mutex_exit(hash_lock);
3860 } else {
3861 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3862 }
3863
3864 ASSERT(!refcount_is_zero(&hdr->b_refcnt));
3865 callback->awcb_done(zio, buf, callback->awcb_private);
3866
3867 kmem_free(callback, sizeof (arc_write_callback_t));
3868}
3869
3870zio_t *
3871arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
3872 blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, boolean_t l2arc_compress,
3873 const zio_prop_t *zp, arc_done_func_t *ready, arc_done_func_t *physdone,
3874 arc_done_func_t *done, void *private, zio_priority_t priority,
3342 ARCSTAT_BUMP(arcstat_misses);
3343 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
3344 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
3345 data, metadata, misses);
3346#ifdef _KERNEL
3347 curthread->td_ru.ru_inblock++;
3348#endif
3349
3350 if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) {
3351 /*
3352 * Read from the L2ARC if the following are true:
3353 * 1. The L2ARC vdev was previously cached.
3354 * 2. This buffer still has L2ARC metadata.
3355 * 3. This buffer isn't currently writing to the L2ARC.
3356 * 4. The L2ARC entry wasn't evicted, which may
3357 * also have invalidated the vdev.
3358 * 5. This isn't prefetch and l2arc_noprefetch is set.
3359 */
3360 if (hdr->b_l2hdr != NULL &&
3361 !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) &&
3362 !(l2arc_noprefetch && HDR_PREFETCH(hdr))) {
3363 l2arc_read_callback_t *cb;
3364
3365 DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
3366 ARCSTAT_BUMP(arcstat_l2_hits);
3367
3368 cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
3369 KM_SLEEP);
3370 cb->l2rcb_buf = buf;
3371 cb->l2rcb_spa = spa;
3372 cb->l2rcb_bp = *bp;
3373 cb->l2rcb_zb = *zb;
3374 cb->l2rcb_flags = zio_flags;
3375 cb->l2rcb_compress = b_compress;
3376
3377 ASSERT(addr >= VDEV_LABEL_START_SIZE &&
3378 addr + size < vd->vdev_psize -
3379 VDEV_LABEL_END_SIZE);
3380
3381 /*
3382 * l2arc read. The SCL_L2ARC lock will be
3383 * released by l2arc_read_done().
3384 * Issue a null zio if the underlying buffer
3385 * was squashed to zero size by compression.
3386 */
3387 if (b_compress == ZIO_COMPRESS_EMPTY) {
3388 rzio = zio_null(pio, spa, vd,
3389 l2arc_read_done, cb,
3390 zio_flags | ZIO_FLAG_DONT_CACHE |
3391 ZIO_FLAG_CANFAIL |
3392 ZIO_FLAG_DONT_PROPAGATE |
3393 ZIO_FLAG_DONT_RETRY);
3394 } else {
3395 rzio = zio_read_phys(pio, vd, addr,
3396 b_asize, buf->b_data,
3397 ZIO_CHECKSUM_OFF,
3398 l2arc_read_done, cb, priority,
3399 zio_flags | ZIO_FLAG_DONT_CACHE |
3400 ZIO_FLAG_CANFAIL |
3401 ZIO_FLAG_DONT_PROPAGATE |
3402 ZIO_FLAG_DONT_RETRY, B_FALSE);
3403 }
3404 DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
3405 zio_t *, rzio);
3406 ARCSTAT_INCR(arcstat_l2_read_bytes, b_asize);
3407
3408 if (*arc_flags & ARC_NOWAIT) {
3409 zio_nowait(rzio);
3410 return (0);
3411 }
3412
3413 ASSERT(*arc_flags & ARC_WAIT);
3414 if (zio_wait(rzio) == 0)
3415 return (0);
3416
3417 /* l2arc read error; goto zio_read() */
3418 } else {
3419 DTRACE_PROBE1(l2arc__miss,
3420 arc_buf_hdr_t *, hdr);
3421 ARCSTAT_BUMP(arcstat_l2_misses);
3422 if (HDR_L2_WRITING(hdr))
3423 ARCSTAT_BUMP(arcstat_l2_rw_clash);
3424 spa_config_exit(spa, SCL_L2ARC, vd);
3425 }
3426 } else {
3427 if (vd != NULL)
3428 spa_config_exit(spa, SCL_L2ARC, vd);
3429 if (l2arc_ndev != 0) {
3430 DTRACE_PROBE1(l2arc__miss,
3431 arc_buf_hdr_t *, hdr);
3432 ARCSTAT_BUMP(arcstat_l2_misses);
3433 }
3434 }
3435
3436 rzio = zio_read(pio, spa, bp, buf->b_data, size,
3437 arc_read_done, buf, priority, zio_flags, zb);
3438
3439 if (*arc_flags & ARC_WAIT)
3440 return (zio_wait(rzio));
3441
3442 ASSERT(*arc_flags & ARC_NOWAIT);
3443 zio_nowait(rzio);
3444 }
3445 return (0);
3446}
3447
3448void
3449arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
3450{
3451 ASSERT(buf->b_hdr != NULL);
3452 ASSERT(buf->b_hdr->b_state != arc_anon);
3453 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
3454 ASSERT(buf->b_efunc == NULL);
3455 ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr));
3456
3457 buf->b_efunc = func;
3458 buf->b_private = private;
3459}
3460
3461/*
3462 * Notify the arc that a block was freed, and thus will never be used again.
3463 */
3464void
3465arc_freed(spa_t *spa, const blkptr_t *bp)
3466{
3467 arc_buf_hdr_t *hdr;
3468 kmutex_t *hash_lock;
3469 uint64_t guid = spa_load_guid(spa);
3470
3471 ASSERT(!BP_IS_EMBEDDED(bp));
3472
3473 hdr = buf_hash_find(guid, bp, &hash_lock);
3474 if (hdr == NULL)
3475 return;
3476 if (HDR_BUF_AVAILABLE(hdr)) {
3477 arc_buf_t *buf = hdr->b_buf;
3478 add_reference(hdr, hash_lock, FTAG);
3479 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
3480 mutex_exit(hash_lock);
3481
3482 arc_release(buf, FTAG);
3483 (void) arc_buf_remove_ref(buf, FTAG);
3484 } else {
3485 mutex_exit(hash_lock);
3486 }
3487
3488}
3489
3490/*
3491 * This is used by the DMU to let the ARC know that a buffer is
3492 * being evicted, so the ARC should clean up. If this arc buf
3493 * is not yet in the evicted state, it will be put there.
3494 */
3495int
3496arc_buf_evict(arc_buf_t *buf)
3497{
3498 arc_buf_hdr_t *hdr;
3499 kmutex_t *hash_lock;
3500 arc_buf_t **bufp;
3501 list_t *list, *evicted_list;
3502 kmutex_t *lock, *evicted_lock;
3503
3504 mutex_enter(&buf->b_evict_lock);
3505 hdr = buf->b_hdr;
3506 if (hdr == NULL) {
3507 /*
3508 * We are in arc_do_user_evicts().
3509 */
3510 ASSERT(buf->b_data == NULL);
3511 mutex_exit(&buf->b_evict_lock);
3512 return (0);
3513 } else if (buf->b_data == NULL) {
3514 arc_buf_t copy = *buf; /* structure assignment */
3515 /*
3516 * We are on the eviction list; process this buffer now
3517 * but let arc_do_user_evicts() do the reaping.
3518 */
3519 buf->b_efunc = NULL;
3520 mutex_exit(&buf->b_evict_lock);
3521 VERIFY(copy.b_efunc(&copy) == 0);
3522 return (1);
3523 }
3524 hash_lock = HDR_LOCK(hdr);
3525 mutex_enter(hash_lock);
3526 hdr = buf->b_hdr;
3527 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3528
3529 ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt);
3530 ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
3531
3532 /*
3533 * Pull this buffer off of the hdr
3534 */
3535 bufp = &hdr->b_buf;
3536 while (*bufp != buf)
3537 bufp = &(*bufp)->b_next;
3538 *bufp = buf->b_next;
3539
3540 ASSERT(buf->b_data != NULL);
3541 arc_buf_destroy(buf, FALSE, FALSE);
3542
3543 if (hdr->b_datacnt == 0) {
3544 arc_state_t *old_state = hdr->b_state;
3545 arc_state_t *evicted_state;
3546
3547 ASSERT(hdr->b_buf == NULL);
3548 ASSERT(refcount_is_zero(&hdr->b_refcnt));
3549
3550 evicted_state =
3551 (old_state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
3552
3553 get_buf_info(hdr, old_state, &list, &lock);
3554 get_buf_info(hdr, evicted_state, &evicted_list, &evicted_lock);
3555 mutex_enter(lock);
3556 mutex_enter(evicted_lock);
3557
3558 arc_change_state(evicted_state, hdr, hash_lock);
3559 ASSERT(HDR_IN_HASH_TABLE(hdr));
3560 hdr->b_flags |= ARC_IN_HASH_TABLE;
3561 hdr->b_flags &= ~ARC_BUF_AVAILABLE;
3562
3563 mutex_exit(evicted_lock);
3564 mutex_exit(lock);
3565 }
3566 mutex_exit(hash_lock);
3567 mutex_exit(&buf->b_evict_lock);
3568
3569 VERIFY(buf->b_efunc(buf) == 0);
3570 buf->b_efunc = NULL;
3571 buf->b_private = NULL;
3572 buf->b_hdr = NULL;
3573 buf->b_next = NULL;
3574 kmem_cache_free(buf_cache, buf);
3575 return (1);
3576}
3577
3578/*
3579 * Release this buffer from the cache, making it an anonymous buffer. This
3580 * must be done after a read and prior to modifying the buffer contents.
3581 * If the buffer has more than one reference, we must make
3582 * a new hdr for the buffer.
3583 */
3584void
3585arc_release(arc_buf_t *buf, void *tag)
3586{
3587 arc_buf_hdr_t *hdr;
3588 kmutex_t *hash_lock = NULL;
3589 l2arc_buf_hdr_t *l2hdr;
3590 uint64_t buf_size;
3591
3592 /*
3593 * It would be nice to assert that if it's DMU metadata (level >
3594 * 0 || it's the dnode file), then it must be syncing context.
3595 * But we don't know that information at this level.
3596 */
3597
3598 mutex_enter(&buf->b_evict_lock);
3599 hdr = buf->b_hdr;
3600
3601 /* this buffer is not on any list */
3602 ASSERT(refcount_count(&hdr->b_refcnt) > 0);
3603
3604 if (hdr->b_state == arc_anon) {
3605 /* this buffer is already released */
3606 ASSERT(buf->b_efunc == NULL);
3607 } else {
3608 hash_lock = HDR_LOCK(hdr);
3609 mutex_enter(hash_lock);
3610 hdr = buf->b_hdr;
3611 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3612 }
3613
3614 l2hdr = hdr->b_l2hdr;
3615 if (l2hdr) {
3616 mutex_enter(&l2arc_buflist_mtx);
3617 hdr->b_l2hdr = NULL;
3618 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
3619 }
3620 buf_size = hdr->b_size;
3621
3622 /*
3623 * Do we have more than one buf?
3624 */
3625 if (hdr->b_datacnt > 1) {
3626 arc_buf_hdr_t *nhdr;
3627 arc_buf_t **bufp;
3628 uint64_t blksz = hdr->b_size;
3629 uint64_t spa = hdr->b_spa;
3630 arc_buf_contents_t type = hdr->b_type;
3631 uint32_t flags = hdr->b_flags;
3632
3633 ASSERT(hdr->b_buf != buf || buf->b_next != NULL);
3634 /*
3635 * Pull the data off of this hdr and attach it to
3636 * a new anonymous hdr.
3637 */
3638 (void) remove_reference(hdr, hash_lock, tag);
3639 bufp = &hdr->b_buf;
3640 while (*bufp != buf)
3641 bufp = &(*bufp)->b_next;
3642 *bufp = buf->b_next;
3643 buf->b_next = NULL;
3644
3645 ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size);
3646 atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size);
3647 if (refcount_is_zero(&hdr->b_refcnt)) {
3648 uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type];
3649 ASSERT3U(*size, >=, hdr->b_size);
3650 atomic_add_64(size, -hdr->b_size);
3651 }
3652
3653 /*
3654 * We're releasing a duplicate user data buffer, update
3655 * our statistics accordingly.
3656 */
3657 if (hdr->b_type == ARC_BUFC_DATA) {
3658 ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
3659 ARCSTAT_INCR(arcstat_duplicate_buffers_size,
3660 -hdr->b_size);
3661 }
3662 hdr->b_datacnt -= 1;
3663 arc_cksum_verify(buf);
3664#ifdef illumos
3665 arc_buf_unwatch(buf);
3666#endif /* illumos */
3667
3668 mutex_exit(hash_lock);
3669
3670 nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
3671 nhdr->b_size = blksz;
3672 nhdr->b_spa = spa;
3673 nhdr->b_type = type;
3674 nhdr->b_buf = buf;
3675 nhdr->b_state = arc_anon;
3676 nhdr->b_arc_access = 0;
3677 nhdr->b_flags = flags & ARC_L2_WRITING;
3678 nhdr->b_l2hdr = NULL;
3679 nhdr->b_datacnt = 1;
3680 nhdr->b_freeze_cksum = NULL;
3681 (void) refcount_add(&nhdr->b_refcnt, tag);
3682 buf->b_hdr = nhdr;
3683 mutex_exit(&buf->b_evict_lock);
3684 atomic_add_64(&arc_anon->arcs_size, blksz);
3685 } else {
3686 mutex_exit(&buf->b_evict_lock);
3687 ASSERT(refcount_count(&hdr->b_refcnt) == 1);
3688 ASSERT(!list_link_active(&hdr->b_arc_node));
3689 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3690 if (hdr->b_state != arc_anon)
3691 arc_change_state(arc_anon, hdr, hash_lock);
3692 hdr->b_arc_access = 0;
3693 if (hash_lock)
3694 mutex_exit(hash_lock);
3695
3696 buf_discard_identity(hdr);
3697 arc_buf_thaw(buf);
3698 }
3699 buf->b_efunc = NULL;
3700 buf->b_private = NULL;
3701
3702 if (l2hdr) {
3703 ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
3704 vdev_space_update(l2hdr->b_dev->l2ad_vdev,
3705 -l2hdr->b_asize, 0, 0);
3706 trim_map_free(l2hdr->b_dev->l2ad_vdev, l2hdr->b_daddr,
3707 hdr->b_size, 0);
3708 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
3709 ARCSTAT_INCR(arcstat_l2_size, -buf_size);
3710 mutex_exit(&l2arc_buflist_mtx);
3711 }
3712}
3713
3714int
3715arc_released(arc_buf_t *buf)
3716{
3717 int released;
3718
3719 mutex_enter(&buf->b_evict_lock);
3720 released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon);
3721 mutex_exit(&buf->b_evict_lock);
3722 return (released);
3723}
3724
3725int
3726arc_has_callback(arc_buf_t *buf)
3727{
3728 int callback;
3729
3730 mutex_enter(&buf->b_evict_lock);
3731 callback = (buf->b_efunc != NULL);
3732 mutex_exit(&buf->b_evict_lock);
3733 return (callback);
3734}
3735
3736#ifdef ZFS_DEBUG
3737int
3738arc_referenced(arc_buf_t *buf)
3739{
3740 int referenced;
3741
3742 mutex_enter(&buf->b_evict_lock);
3743 referenced = (refcount_count(&buf->b_hdr->b_refcnt));
3744 mutex_exit(&buf->b_evict_lock);
3745 return (referenced);
3746}
3747#endif
3748
3749static void
3750arc_write_ready(zio_t *zio)
3751{
3752 arc_write_callback_t *callback = zio->io_private;
3753 arc_buf_t *buf = callback->awcb_buf;
3754 arc_buf_hdr_t *hdr = buf->b_hdr;
3755
3756 ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt));
3757 callback->awcb_ready(zio, buf, callback->awcb_private);
3758
3759 /*
3760 * If the IO is already in progress, then this is a re-write
3761 * attempt, so we need to thaw and re-compute the cksum.
3762 * It is the responsibility of the callback to handle the
3763 * accounting for any re-write attempt.
3764 */
3765 if (HDR_IO_IN_PROGRESS(hdr)) {
3766 mutex_enter(&hdr->b_freeze_lock);
3767 if (hdr->b_freeze_cksum != NULL) {
3768 kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
3769 hdr->b_freeze_cksum = NULL;
3770 }
3771 mutex_exit(&hdr->b_freeze_lock);
3772 }
3773 arc_cksum_compute(buf, B_FALSE);
3774 hdr->b_flags |= ARC_IO_IN_PROGRESS;
3775}
3776
3777/*
3778 * The SPA calls this callback for each physical write that happens on behalf
3779 * of a logical write. See the comment in dbuf_write_physdone() for details.
3780 */
3781static void
3782arc_write_physdone(zio_t *zio)
3783{
3784 arc_write_callback_t *cb = zio->io_private;
3785 if (cb->awcb_physdone != NULL)
3786 cb->awcb_physdone(zio, cb->awcb_buf, cb->awcb_private);
3787}
3788
3789static void
3790arc_write_done(zio_t *zio)
3791{
3792 arc_write_callback_t *callback = zio->io_private;
3793 arc_buf_t *buf = callback->awcb_buf;
3794 arc_buf_hdr_t *hdr = buf->b_hdr;
3795
3796 ASSERT(hdr->b_acb == NULL);
3797
3798 if (zio->io_error == 0) {
3799 if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) {
3800 buf_discard_identity(hdr);
3801 } else {
3802 hdr->b_dva = *BP_IDENTITY(zio->io_bp);
3803 hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp);
3804 hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
3805 }
3806 } else {
3807 ASSERT(BUF_EMPTY(hdr));
3808 }
3809
3810 /*
3811 * If the block to be written was all-zero or compressed enough to be
3812 * embedded in the BP, no write was performed so there will be no
3813 * dva/birth/checksum. The buffer must therefore remain anonymous
3814 * (and uncached).
3815 */
3816 if (!BUF_EMPTY(hdr)) {
3817 arc_buf_hdr_t *exists;
3818 kmutex_t *hash_lock;
3819
3820 ASSERT(zio->io_error == 0);
3821
3822 arc_cksum_verify(buf);
3823
3824 exists = buf_hash_insert(hdr, &hash_lock);
3825 if (exists) {
3826 /*
3827 * This can only happen if we overwrite for
3828 * sync-to-convergence, because we remove
3829 * buffers from the hash table when we arc_free().
3830 */
3831 if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
3832 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
3833 panic("bad overwrite, hdr=%p exists=%p",
3834 (void *)hdr, (void *)exists);
3835 ASSERT(refcount_is_zero(&exists->b_refcnt));
3836 arc_change_state(arc_anon, exists, hash_lock);
3837 mutex_exit(hash_lock);
3838 arc_hdr_destroy(exists);
3839 exists = buf_hash_insert(hdr, &hash_lock);
3840 ASSERT3P(exists, ==, NULL);
3841 } else if (zio->io_flags & ZIO_FLAG_NOPWRITE) {
3842 /* nopwrite */
3843 ASSERT(zio->io_prop.zp_nopwrite);
3844 if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
3845 panic("bad nopwrite, hdr=%p exists=%p",
3846 (void *)hdr, (void *)exists);
3847 } else {
3848 /* Dedup */
3849 ASSERT(hdr->b_datacnt == 1);
3850 ASSERT(hdr->b_state == arc_anon);
3851 ASSERT(BP_GET_DEDUP(zio->io_bp));
3852 ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
3853 }
3854 }
3855 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3856 /* if it's not anon, we are doing a scrub */
3857 if (!exists && hdr->b_state == arc_anon)
3858 arc_access(hdr, hash_lock);
3859 mutex_exit(hash_lock);
3860 } else {
3861 hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3862 }
3863
3864 ASSERT(!refcount_is_zero(&hdr->b_refcnt));
3865 callback->awcb_done(zio, buf, callback->awcb_private);
3866
3867 kmem_free(callback, sizeof (arc_write_callback_t));
3868}
3869
3870zio_t *
3871arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
3872 blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, boolean_t l2arc_compress,
3873 const zio_prop_t *zp, arc_done_func_t *ready, arc_done_func_t *physdone,
3874 arc_done_func_t *done, void *private, zio_priority_t priority,
3875 int zio_flags, const zbookmark_t *zb)
3875 int zio_flags, const zbookmark_phys_t *zb)
3876{
3877 arc_buf_hdr_t *hdr = buf->b_hdr;
3878 arc_write_callback_t *callback;
3879 zio_t *zio;
3880
3881 ASSERT(ready != NULL);
3882 ASSERT(done != NULL);
3883 ASSERT(!HDR_IO_ERROR(hdr));
3884 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
3885 ASSERT(hdr->b_acb == NULL);
3886 if (l2arc)
3887 hdr->b_flags |= ARC_L2CACHE;
3888 if (l2arc_compress)
3889 hdr->b_flags |= ARC_L2COMPRESS;
3890 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
3891 callback->awcb_ready = ready;
3892 callback->awcb_physdone = physdone;
3893 callback->awcb_done = done;
3894 callback->awcb_private = private;
3895 callback->awcb_buf = buf;
3896
3897 zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp,
3898 arc_write_ready, arc_write_physdone, arc_write_done, callback,
3899 priority, zio_flags, zb);
3900
3901 return (zio);
3902}
3903
3904static int
3905arc_memory_throttle(uint64_t reserve, uint64_t txg)
3906{
3907#ifdef _KERNEL
3908 uint64_t available_memory =
3909 ptoa((uintmax_t)vm_cnt.v_free_count + vm_cnt.v_cache_count);
3910 static uint64_t page_load = 0;
3911 static uint64_t last_txg = 0;
3912
3913#ifdef sun
3914#if defined(__i386)
3915 available_memory =
3916 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
3917#endif
3918#endif /* sun */
3919
3920 if (vm_cnt.v_free_count + vm_cnt.v_cache_count >
3921 (uint64_t)physmem * arc_lotsfree_percent / 100)
3922 return (0);
3923
3924 if (txg > last_txg) {
3925 last_txg = txg;
3926 page_load = 0;
3927 }
3928 /*
3929 * If we are in pageout, we know that memory is already tight,
3930 * the arc is already going to be evicting, so we just want to
3931 * continue to let page writes occur as quickly as possible.
3932 */
3933 if (curproc == pageproc) {
3934 if (page_load > available_memory / 4)
3935 return (SET_ERROR(ERESTART));
3936 /* Note: reserve is inflated, so we deflate */
3937 page_load += reserve / 8;
3938 return (0);
3939 } else if (page_load > 0 && arc_reclaim_needed()) {
3940 /* memory is low, delay before restarting */
3941 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
3942 return (SET_ERROR(EAGAIN));
3943 }
3944 page_load = 0;
3945#endif
3946 return (0);
3947}
3948
3949void
3950arc_tempreserve_clear(uint64_t reserve)
3951{
3952 atomic_add_64(&arc_tempreserve, -reserve);
3953 ASSERT((int64_t)arc_tempreserve >= 0);
3954}
3955
3956int
3957arc_tempreserve_space(uint64_t reserve, uint64_t txg)
3958{
3959 int error;
3960 uint64_t anon_size;
3961
3962 if (reserve > arc_c/4 && !arc_no_grow)
3963 arc_c = MIN(arc_c_max, reserve * 4);
3964 if (reserve > arc_c)
3965 return (SET_ERROR(ENOMEM));
3966
3967 /*
3968 * Don't count loaned bufs as in flight dirty data to prevent long
3969 * network delays from blocking transactions that are ready to be
3970 * assigned to a txg.
3971 */
3972 anon_size = MAX((int64_t)(arc_anon->arcs_size - arc_loaned_bytes), 0);
3973
3974 /*
3975 * Writes will, almost always, require additional memory allocations
3976 * in order to compress/encrypt/etc the data. We therefore need to
3977 * make sure that there is sufficient available memory for this.
3978 */
3979 error = arc_memory_throttle(reserve, txg);
3980 if (error != 0)
3981 return (error);
3982
3983 /*
3984 * Throttle writes when the amount of dirty data in the cache
3985 * gets too large. We try to keep the cache less than half full
3986 * of dirty blocks so that our sync times don't grow too large.
3987 * Note: if two requests come in concurrently, we might let them
3988 * both succeed, when one of them should fail. Not a huge deal.
3989 */
3990
3991 if (reserve + arc_tempreserve + anon_size > arc_c / 2 &&
3992 anon_size > arc_c / 4) {
3993 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
3994 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
3995 arc_tempreserve>>10,
3996 arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10,
3997 arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10,
3998 reserve>>10, arc_c>>10);
3999 return (SET_ERROR(ERESTART));
4000 }
4001 atomic_add_64(&arc_tempreserve, reserve);
4002 return (0);
4003}
4004
4005static kmutex_t arc_lowmem_lock;
4006#ifdef _KERNEL
4007static eventhandler_tag arc_event_lowmem = NULL;
4008
4009static void
4010arc_lowmem(void *arg __unused, int howto __unused)
4011{
4012
4013 /* Serialize access via arc_lowmem_lock. */
4014 mutex_enter(&arc_lowmem_lock);
4015 mutex_enter(&arc_reclaim_thr_lock);
4016 needfree = 1;
4017 cv_signal(&arc_reclaim_thr_cv);
4018
4019 /*
4020 * It is unsafe to block here in arbitrary threads, because we can come
4021 * here from ARC itself and may hold ARC locks and thus risk a deadlock
4022 * with ARC reclaim thread.
4023 */
4024 if (curproc == pageproc) {
4025 while (needfree)
4026 msleep(&needfree, &arc_reclaim_thr_lock, 0, "zfs:lowmem", 0);
4027 }
4028 mutex_exit(&arc_reclaim_thr_lock);
4029 mutex_exit(&arc_lowmem_lock);
4030}
4031#endif
4032
4033void
4034arc_init(void)
4035{
4036 int i, prefetch_tunable_set = 0;
4037
4038 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
4039 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
4040 mutex_init(&arc_lowmem_lock, NULL, MUTEX_DEFAULT, NULL);
4041
4042 /* Convert seconds to clock ticks */
4043 arc_min_prefetch_lifespan = 1 * hz;
4044
4045 /* Start out with 1/8 of all memory */
4046 arc_c = kmem_size() / 8;
4047
4048#ifdef sun
4049#ifdef _KERNEL
4050 /*
4051 * On architectures where the physical memory can be larger
4052 * than the addressable space (intel in 32-bit mode), we may
4053 * need to limit the cache to 1/8 of VM size.
4054 */
4055 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
4056#endif
4057#endif /* sun */
4058 /* set min cache to 1/32 of all memory, or 16MB, whichever is more */
4059 arc_c_min = MAX(arc_c / 4, 64<<18);
4060 /* set max to 1/2 of all memory, or all but 1GB, whichever is more */
4061 if (arc_c * 8 >= 1<<30)
4062 arc_c_max = (arc_c * 8) - (1<<30);
4063 else
4064 arc_c_max = arc_c_min;
4065 arc_c_max = MAX(arc_c * 5, arc_c_max);
4066
4067#ifdef _KERNEL
4068 /*
4069 * Allow the tunables to override our calculations if they are
4070 * reasonable (ie. over 16MB)
4071 */
4072 if (zfs_arc_max > 64<<18 && zfs_arc_max < kmem_size())
4073 arc_c_max = zfs_arc_max;
4074 if (zfs_arc_min > 64<<18 && zfs_arc_min <= arc_c_max)
4075 arc_c_min = zfs_arc_min;
4076#endif
4077
4078 arc_c = arc_c_max;
4079 arc_p = (arc_c >> 1);
4080
4081 /* limit meta-data to 1/4 of the arc capacity */
4082 arc_meta_limit = arc_c_max / 4;
4083
4084 /* Allow the tunable to override if it is reasonable */
4085 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max)
4086 arc_meta_limit = zfs_arc_meta_limit;
4087
4088 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0)
4089 arc_c_min = arc_meta_limit / 2;
4090
4091 if (zfs_arc_grow_retry > 0)
4092 arc_grow_retry = zfs_arc_grow_retry;
4093
4094 if (zfs_arc_shrink_shift > 0)
4095 arc_shrink_shift = zfs_arc_shrink_shift;
4096
4097 if (zfs_arc_p_min_shift > 0)
4098 arc_p_min_shift = zfs_arc_p_min_shift;
4099
4100 /* if kmem_flags are set, lets try to use less memory */
4101 if (kmem_debugging())
4102 arc_c = arc_c / 2;
4103 if (arc_c < arc_c_min)
4104 arc_c = arc_c_min;
4105
4106 zfs_arc_min = arc_c_min;
4107 zfs_arc_max = arc_c_max;
4108
4109 arc_anon = &ARC_anon;
4110 arc_mru = &ARC_mru;
4111 arc_mru_ghost = &ARC_mru_ghost;
4112 arc_mfu = &ARC_mfu;
4113 arc_mfu_ghost = &ARC_mfu_ghost;
4114 arc_l2c_only = &ARC_l2c_only;
4115 arc_size = 0;
4116
4117 for (i = 0; i < ARC_BUFC_NUMLISTS; i++) {
4118 mutex_init(&arc_anon->arcs_locks[i].arcs_lock,
4119 NULL, MUTEX_DEFAULT, NULL);
4120 mutex_init(&arc_mru->arcs_locks[i].arcs_lock,
4121 NULL, MUTEX_DEFAULT, NULL);
4122 mutex_init(&arc_mru_ghost->arcs_locks[i].arcs_lock,
4123 NULL, MUTEX_DEFAULT, NULL);
4124 mutex_init(&arc_mfu->arcs_locks[i].arcs_lock,
4125 NULL, MUTEX_DEFAULT, NULL);
4126 mutex_init(&arc_mfu_ghost->arcs_locks[i].arcs_lock,
4127 NULL, MUTEX_DEFAULT, NULL);
4128 mutex_init(&arc_l2c_only->arcs_locks[i].arcs_lock,
4129 NULL, MUTEX_DEFAULT, NULL);
4130
4131 list_create(&arc_mru->arcs_lists[i],
4132 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4133 list_create(&arc_mru_ghost->arcs_lists[i],
4134 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4135 list_create(&arc_mfu->arcs_lists[i],
4136 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4137 list_create(&arc_mfu_ghost->arcs_lists[i],
4138 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4139 list_create(&arc_mfu_ghost->arcs_lists[i],
4140 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4141 list_create(&arc_l2c_only->arcs_lists[i],
4142 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4143 }
4144
4145 buf_init();
4146
4147 arc_thread_exit = 0;
4148 arc_eviction_list = NULL;
4149 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
4150 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t));
4151
4152 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
4153 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
4154
4155 if (arc_ksp != NULL) {
4156 arc_ksp->ks_data = &arc_stats;
4157 kstat_install(arc_ksp);
4158 }
4159
4160 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
4161 TS_RUN, minclsyspri);
4162
4163#ifdef _KERNEL
4164 arc_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem, arc_lowmem, NULL,
4165 EVENTHANDLER_PRI_FIRST);
4166#endif
4167
4168 arc_dead = FALSE;
4169 arc_warm = B_FALSE;
4170
4171 /*
4172 * Calculate maximum amount of dirty data per pool.
4173 *
4174 * If it has been set by /etc/system, take that.
4175 * Otherwise, use a percentage of physical memory defined by
4176 * zfs_dirty_data_max_percent (default 10%) with a cap at
4177 * zfs_dirty_data_max_max (default 4GB).
4178 */
4179 if (zfs_dirty_data_max == 0) {
4180 zfs_dirty_data_max = ptob(physmem) *
4181 zfs_dirty_data_max_percent / 100;
4182 zfs_dirty_data_max = MIN(zfs_dirty_data_max,
4183 zfs_dirty_data_max_max);
4184 }
4185
4186#ifdef _KERNEL
4187 if (TUNABLE_INT_FETCH("vfs.zfs.prefetch_disable", &zfs_prefetch_disable))
4188 prefetch_tunable_set = 1;
4189
4190#ifdef __i386__
4191 if (prefetch_tunable_set == 0) {
4192 printf("ZFS NOTICE: Prefetch is disabled by default on i386 "
4193 "-- to enable,\n");
4194 printf(" add \"vfs.zfs.prefetch_disable=0\" "
4195 "to /boot/loader.conf.\n");
4196 zfs_prefetch_disable = 1;
4197 }
4198#else
4199 if ((((uint64_t)physmem * PAGESIZE) < (1ULL << 32)) &&
4200 prefetch_tunable_set == 0) {
4201 printf("ZFS NOTICE: Prefetch is disabled by default if less "
4202 "than 4GB of RAM is present;\n"
4203 " to enable, add \"vfs.zfs.prefetch_disable=0\" "
4204 "to /boot/loader.conf.\n");
4205 zfs_prefetch_disable = 1;
4206 }
4207#endif
4208 /* Warn about ZFS memory and address space requirements. */
4209 if (((uint64_t)physmem * PAGESIZE) < (256 + 128 + 64) * (1 << 20)) {
4210 printf("ZFS WARNING: Recommended minimum RAM size is 512MB; "
4211 "expect unstable behavior.\n");
4212 }
4213 if (kmem_size() < 512 * (1 << 20)) {
4214 printf("ZFS WARNING: Recommended minimum kmem_size is 512MB; "
4215 "expect unstable behavior.\n");
4216 printf(" Consider tuning vm.kmem_size and "
4217 "vm.kmem_size_max\n");
4218 printf(" in /boot/loader.conf.\n");
4219 }
4220#endif
4221}
4222
4223void
4224arc_fini(void)
4225{
4226 int i;
4227
4228 mutex_enter(&arc_reclaim_thr_lock);
4229 arc_thread_exit = 1;
4230 cv_signal(&arc_reclaim_thr_cv);
4231 while (arc_thread_exit != 0)
4232 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
4233 mutex_exit(&arc_reclaim_thr_lock);
4234
4235 arc_flush(NULL);
4236
4237 arc_dead = TRUE;
4238
4239 if (arc_ksp != NULL) {
4240 kstat_delete(arc_ksp);
4241 arc_ksp = NULL;
4242 }
4243
4244 mutex_destroy(&arc_eviction_mtx);
4245 mutex_destroy(&arc_reclaim_thr_lock);
4246 cv_destroy(&arc_reclaim_thr_cv);
4247
4248 for (i = 0; i < ARC_BUFC_NUMLISTS; i++) {
4249 list_destroy(&arc_mru->arcs_lists[i]);
4250 list_destroy(&arc_mru_ghost->arcs_lists[i]);
4251 list_destroy(&arc_mfu->arcs_lists[i]);
4252 list_destroy(&arc_mfu_ghost->arcs_lists[i]);
4253 list_destroy(&arc_l2c_only->arcs_lists[i]);
4254
4255 mutex_destroy(&arc_anon->arcs_locks[i].arcs_lock);
4256 mutex_destroy(&arc_mru->arcs_locks[i].arcs_lock);
4257 mutex_destroy(&arc_mru_ghost->arcs_locks[i].arcs_lock);
4258 mutex_destroy(&arc_mfu->arcs_locks[i].arcs_lock);
4259 mutex_destroy(&arc_mfu_ghost->arcs_locks[i].arcs_lock);
4260 mutex_destroy(&arc_l2c_only->arcs_locks[i].arcs_lock);
4261 }
4262
4263 buf_fini();
4264
4265 ASSERT(arc_loaned_bytes == 0);
4266
4267 mutex_destroy(&arc_lowmem_lock);
4268#ifdef _KERNEL
4269 if (arc_event_lowmem != NULL)
4270 EVENTHANDLER_DEREGISTER(vm_lowmem, arc_event_lowmem);
4271#endif
4272}
4273
4274/*
4275 * Level 2 ARC
4276 *
4277 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
4278 * It uses dedicated storage devices to hold cached data, which are populated
4279 * using large infrequent writes. The main role of this cache is to boost
4280 * the performance of random read workloads. The intended L2ARC devices
4281 * include short-stroked disks, solid state disks, and other media with
4282 * substantially faster read latency than disk.
4283 *
4284 * +-----------------------+
4285 * | ARC |
4286 * +-----------------------+
4287 * | ^ ^
4288 * | | |
4289 * l2arc_feed_thread() arc_read()
4290 * | | |
4291 * | l2arc read |
4292 * V | |
4293 * +---------------+ |
4294 * | L2ARC | |
4295 * +---------------+ |
4296 * | ^ |
4297 * l2arc_write() | |
4298 * | | |
4299 * V | |
4300 * +-------+ +-------+
4301 * | vdev | | vdev |
4302 * | cache | | cache |
4303 * +-------+ +-------+
4304 * +=========+ .-----.
4305 * : L2ARC : |-_____-|
4306 * : devices : | Disks |
4307 * +=========+ `-_____-'
4308 *
4309 * Read requests are satisfied from the following sources, in order:
4310 *
4311 * 1) ARC
4312 * 2) vdev cache of L2ARC devices
4313 * 3) L2ARC devices
4314 * 4) vdev cache of disks
4315 * 5) disks
4316 *
4317 * Some L2ARC device types exhibit extremely slow write performance.
4318 * To accommodate for this there are some significant differences between
4319 * the L2ARC and traditional cache design:
4320 *
4321 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from
4322 * the ARC behave as usual, freeing buffers and placing headers on ghost
4323 * lists. The ARC does not send buffers to the L2ARC during eviction as
4324 * this would add inflated write latencies for all ARC memory pressure.
4325 *
4326 * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
4327 * It does this by periodically scanning buffers from the eviction-end of
4328 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
4329 * not already there. It scans until a headroom of buffers is satisfied,
4330 * which itself is a buffer for ARC eviction. If a compressible buffer is
4331 * found during scanning and selected for writing to an L2ARC device, we
4332 * temporarily boost scanning headroom during the next scan cycle to make
4333 * sure we adapt to compression effects (which might significantly reduce
4334 * the data volume we write to L2ARC). The thread that does this is
4335 * l2arc_feed_thread(), illustrated below; example sizes are included to
4336 * provide a better sense of ratio than this diagram:
4337 *
4338 * head --> tail
4339 * +---------------------+----------+
4340 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
4341 * +---------------------+----------+ | o L2ARC eligible
4342 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
4343 * +---------------------+----------+ |
4344 * 15.9 Gbytes ^ 32 Mbytes |
4345 * headroom |
4346 * l2arc_feed_thread()
4347 * |
4348 * l2arc write hand <--[oooo]--'
4349 * | 8 Mbyte
4350 * | write max
4351 * V
4352 * +==============================+
4353 * L2ARC dev |####|#|###|###| |####| ... |
4354 * +==============================+
4355 * 32 Gbytes
4356 *
4357 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
4358 * evicted, then the L2ARC has cached a buffer much sooner than it probably
4359 * needed to, potentially wasting L2ARC device bandwidth and storage. It is
4360 * safe to say that this is an uncommon case, since buffers at the end of
4361 * the ARC lists have moved there due to inactivity.
4362 *
4363 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
4364 * then the L2ARC simply misses copying some buffers. This serves as a
4365 * pressure valve to prevent heavy read workloads from both stalling the ARC
4366 * with waits and clogging the L2ARC with writes. This also helps prevent
4367 * the potential for the L2ARC to churn if it attempts to cache content too
4368 * quickly, such as during backups of the entire pool.
4369 *
4370 * 5. After system boot and before the ARC has filled main memory, there are
4371 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
4372 * lists can remain mostly static. Instead of searching from tail of these
4373 * lists as pictured, the l2arc_feed_thread() will search from the list heads
4374 * for eligible buffers, greatly increasing its chance of finding them.
4375 *
4376 * The L2ARC device write speed is also boosted during this time so that
4377 * the L2ARC warms up faster. Since there have been no ARC evictions yet,
4378 * there are no L2ARC reads, and no fear of degrading read performance
4379 * through increased writes.
4380 *
4381 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
4382 * the vdev queue can aggregate them into larger and fewer writes. Each
4383 * device is written to in a rotor fashion, sweeping writes through
4384 * available space then repeating.
4385 *
4386 * 7. The L2ARC does not store dirty content. It never needs to flush
4387 * write buffers back to disk based storage.
4388 *
4389 * 8. If an ARC buffer is written (and dirtied) which also exists in the
4390 * L2ARC, the now stale L2ARC buffer is immediately dropped.
4391 *
4392 * The performance of the L2ARC can be tweaked by a number of tunables, which
4393 * may be necessary for different workloads:
4394 *
4395 * l2arc_write_max max write bytes per interval
4396 * l2arc_write_boost extra write bytes during device warmup
4397 * l2arc_noprefetch skip caching prefetched buffers
4398 * l2arc_headroom number of max device writes to precache
4399 * l2arc_headroom_boost when we find compressed buffers during ARC
4400 * scanning, we multiply headroom by this
4401 * percentage factor for the next scan cycle,
4402 * since more compressed buffers are likely to
4403 * be present
4404 * l2arc_feed_secs seconds between L2ARC writing
4405 *
4406 * Tunables may be removed or added as future performance improvements are
4407 * integrated, and also may become zpool properties.
4408 *
4409 * There are three key functions that control how the L2ARC warms up:
4410 *
4411 * l2arc_write_eligible() check if a buffer is eligible to cache
4412 * l2arc_write_size() calculate how much to write
4413 * l2arc_write_interval() calculate sleep delay between writes
4414 *
4415 * These three functions determine what to write, how much, and how quickly
4416 * to send writes.
4417 */
4418
4419static boolean_t
4420l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab)
4421{
4422 /*
4423 * A buffer is *not* eligible for the L2ARC if it:
4424 * 1. belongs to a different spa.
4425 * 2. is already cached on the L2ARC.
4426 * 3. has an I/O in progress (it may be an incomplete read).
4427 * 4. is flagged not eligible (zfs property).
4428 */
4429 if (ab->b_spa != spa_guid) {
4430 ARCSTAT_BUMP(arcstat_l2_write_spa_mismatch);
4431 return (B_FALSE);
4432 }
4433 if (ab->b_l2hdr != NULL) {
4434 ARCSTAT_BUMP(arcstat_l2_write_in_l2);
4435 return (B_FALSE);
4436 }
4437 if (HDR_IO_IN_PROGRESS(ab)) {
4438 ARCSTAT_BUMP(arcstat_l2_write_hdr_io_in_progress);
4439 return (B_FALSE);
4440 }
4441 if (!HDR_L2CACHE(ab)) {
4442 ARCSTAT_BUMP(arcstat_l2_write_not_cacheable);
4443 return (B_FALSE);
4444 }
4445
4446 return (B_TRUE);
4447}
4448
4449static uint64_t
4450l2arc_write_size(void)
4451{
4452 uint64_t size;
4453
4454 /*
4455 * Make sure our globals have meaningful values in case the user
4456 * altered them.
4457 */
4458 size = l2arc_write_max;
4459 if (size == 0) {
4460 cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must "
4461 "be greater than zero, resetting it to the default (%d)",
4462 L2ARC_WRITE_SIZE);
4463 size = l2arc_write_max = L2ARC_WRITE_SIZE;
4464 }
4465
4466 if (arc_warm == B_FALSE)
4467 size += l2arc_write_boost;
4468
4469 return (size);
4470
4471}
4472
4473static clock_t
4474l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
4475{
4476 clock_t interval, next, now;
4477
4478 /*
4479 * If the ARC lists are busy, increase our write rate; if the
4480 * lists are stale, idle back. This is achieved by checking
4481 * how much we previously wrote - if it was more than half of
4482 * what we wanted, schedule the next write much sooner.
4483 */
4484 if (l2arc_feed_again && wrote > (wanted / 2))
4485 interval = (hz * l2arc_feed_min_ms) / 1000;
4486 else
4487 interval = hz * l2arc_feed_secs;
4488
4489 now = ddi_get_lbolt();
4490 next = MAX(now, MIN(now + interval, began + interval));
4491
4492 return (next);
4493}
4494
4495static void
4496l2arc_hdr_stat_add(void)
4497{
4498 ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE);
4499 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE);
4500}
4501
4502static void
4503l2arc_hdr_stat_remove(void)
4504{
4505 ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE));
4506 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE);
4507}
4508
4509/*
4510 * Cycle through L2ARC devices. This is how L2ARC load balances.
4511 * If a device is returned, this also returns holding the spa config lock.
4512 */
4513static l2arc_dev_t *
4514l2arc_dev_get_next(void)
4515{
4516 l2arc_dev_t *first, *next = NULL;
4517
4518 /*
4519 * Lock out the removal of spas (spa_namespace_lock), then removal
4520 * of cache devices (l2arc_dev_mtx). Once a device has been selected,
4521 * both locks will be dropped and a spa config lock held instead.
4522 */
4523 mutex_enter(&spa_namespace_lock);
4524 mutex_enter(&l2arc_dev_mtx);
4525
4526 /* if there are no vdevs, there is nothing to do */
4527 if (l2arc_ndev == 0)
4528 goto out;
4529
4530 first = NULL;
4531 next = l2arc_dev_last;
4532 do {
4533 /* loop around the list looking for a non-faulted vdev */
4534 if (next == NULL) {
4535 next = list_head(l2arc_dev_list);
4536 } else {
4537 next = list_next(l2arc_dev_list, next);
4538 if (next == NULL)
4539 next = list_head(l2arc_dev_list);
4540 }
4541
4542 /* if we have come back to the start, bail out */
4543 if (first == NULL)
4544 first = next;
4545 else if (next == first)
4546 break;
4547
4548 } while (vdev_is_dead(next->l2ad_vdev));
4549
4550 /* if we were unable to find any usable vdevs, return NULL */
4551 if (vdev_is_dead(next->l2ad_vdev))
4552 next = NULL;
4553
4554 l2arc_dev_last = next;
4555
4556out:
4557 mutex_exit(&l2arc_dev_mtx);
4558
4559 /*
4560 * Grab the config lock to prevent the 'next' device from being
4561 * removed while we are writing to it.
4562 */
4563 if (next != NULL)
4564 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
4565 mutex_exit(&spa_namespace_lock);
4566
4567 return (next);
4568}
4569
4570/*
4571 * Free buffers that were tagged for destruction.
4572 */
4573static void
4574l2arc_do_free_on_write()
4575{
4576 list_t *buflist;
4577 l2arc_data_free_t *df, *df_prev;
4578
4579 mutex_enter(&l2arc_free_on_write_mtx);
4580 buflist = l2arc_free_on_write;
4581
4582 for (df = list_tail(buflist); df; df = df_prev) {
4583 df_prev = list_prev(buflist, df);
4584 ASSERT(df->l2df_data != NULL);
4585 ASSERT(df->l2df_func != NULL);
4586 df->l2df_func(df->l2df_data, df->l2df_size);
4587 list_remove(buflist, df);
4588 kmem_free(df, sizeof (l2arc_data_free_t));
4589 }
4590
4591 mutex_exit(&l2arc_free_on_write_mtx);
4592}
4593
4594/*
4595 * A write to a cache device has completed. Update all headers to allow
4596 * reads from these buffers to begin.
4597 */
4598static void
4599l2arc_write_done(zio_t *zio)
4600{
4601 l2arc_write_callback_t *cb;
4602 l2arc_dev_t *dev;
4603 list_t *buflist;
4604 arc_buf_hdr_t *head, *ab, *ab_prev;
4605 l2arc_buf_hdr_t *abl2;
4606 kmutex_t *hash_lock;
4607 int64_t bytes_dropped = 0;
4608
4609 cb = zio->io_private;
4610 ASSERT(cb != NULL);
4611 dev = cb->l2wcb_dev;
4612 ASSERT(dev != NULL);
4613 head = cb->l2wcb_head;
4614 ASSERT(head != NULL);
4615 buflist = dev->l2ad_buflist;
4616 ASSERT(buflist != NULL);
4617 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
4618 l2arc_write_callback_t *, cb);
4619
4620 if (zio->io_error != 0)
4621 ARCSTAT_BUMP(arcstat_l2_writes_error);
4622
4623 mutex_enter(&l2arc_buflist_mtx);
4624
4625 /*
4626 * All writes completed, or an error was hit.
4627 */
4628 for (ab = list_prev(buflist, head); ab; ab = ab_prev) {
4629 ab_prev = list_prev(buflist, ab);
4630 abl2 = ab->b_l2hdr;
4631
4632 /*
4633 * Release the temporary compressed buffer as soon as possible.
4634 */
4635 if (abl2->b_compress != ZIO_COMPRESS_OFF)
4636 l2arc_release_cdata_buf(ab);
4637
4638 hash_lock = HDR_LOCK(ab);
4639 if (!mutex_tryenter(hash_lock)) {
4640 /*
4641 * This buffer misses out. It may be in a stage
4642 * of eviction. Its ARC_L2_WRITING flag will be
4643 * left set, denying reads to this buffer.
4644 */
4645 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss);
4646 continue;
4647 }
4648
4649 if (zio->io_error != 0) {
4650 /*
4651 * Error - drop L2ARC entry.
4652 */
4653 list_remove(buflist, ab);
4654 ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize);
4655 bytes_dropped += abl2->b_asize;
4656 ab->b_l2hdr = NULL;
4657 trim_map_free(abl2->b_dev->l2ad_vdev, abl2->b_daddr,
4658 ab->b_size, 0);
4659 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
4660 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4661 }
4662
4663 /*
4664 * Allow ARC to begin reads to this L2ARC entry.
4665 */
4666 ab->b_flags &= ~ARC_L2_WRITING;
4667
4668 mutex_exit(hash_lock);
4669 }
4670
4671 atomic_inc_64(&l2arc_writes_done);
4672 list_remove(buflist, head);
4673 kmem_cache_free(hdr_cache, head);
4674 mutex_exit(&l2arc_buflist_mtx);
4675
4676 vdev_space_update(dev->l2ad_vdev, -bytes_dropped, 0, 0);
4677
4678 l2arc_do_free_on_write();
4679
4680 kmem_free(cb, sizeof (l2arc_write_callback_t));
4681}
4682
4683/*
4684 * A read to a cache device completed. Validate buffer contents before
4685 * handing over to the regular ARC routines.
4686 */
4687static void
4688l2arc_read_done(zio_t *zio)
4689{
4690 l2arc_read_callback_t *cb;
4691 arc_buf_hdr_t *hdr;
4692 arc_buf_t *buf;
4693 kmutex_t *hash_lock;
4694 int equal;
4695
4696 ASSERT(zio->io_vd != NULL);
4697 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
4698
4699 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
4700
4701 cb = zio->io_private;
4702 ASSERT(cb != NULL);
4703 buf = cb->l2rcb_buf;
4704 ASSERT(buf != NULL);
4705
4706 hash_lock = HDR_LOCK(buf->b_hdr);
4707 mutex_enter(hash_lock);
4708 hdr = buf->b_hdr;
4709 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
4710
4711 /*
4712 * If the buffer was compressed, decompress it first.
4713 */
4714 if (cb->l2rcb_compress != ZIO_COMPRESS_OFF)
4715 l2arc_decompress_zio(zio, hdr, cb->l2rcb_compress);
4716 ASSERT(zio->io_data != NULL);
4717
4718 /*
4719 * Check this survived the L2ARC journey.
4720 */
4721 equal = arc_cksum_equal(buf);
4722 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) {
4723 mutex_exit(hash_lock);
4724 zio->io_private = buf;
4725 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */
4726 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */
4727 arc_read_done(zio);
4728 } else {
4729 mutex_exit(hash_lock);
4730 /*
4731 * Buffer didn't survive caching. Increment stats and
4732 * reissue to the original storage device.
4733 */
4734 if (zio->io_error != 0) {
4735 ARCSTAT_BUMP(arcstat_l2_io_error);
4736 } else {
4737 zio->io_error = SET_ERROR(EIO);
4738 }
4739 if (!equal)
4740 ARCSTAT_BUMP(arcstat_l2_cksum_bad);
4741
4742 /*
4743 * If there's no waiter, issue an async i/o to the primary
4744 * storage now. If there *is* a waiter, the caller must
4745 * issue the i/o in a context where it's OK to block.
4746 */
4747 if (zio->io_waiter == NULL) {
4748 zio_t *pio = zio_unique_parent(zio);
4749
4750 ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
4751
4752 zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp,
4753 buf->b_data, zio->io_size, arc_read_done, buf,
4754 zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb));
4755 }
4756 }
4757
4758 kmem_free(cb, sizeof (l2arc_read_callback_t));
4759}
4760
4761/*
4762 * This is the list priority from which the L2ARC will search for pages to
4763 * cache. This is used within loops (0..3) to cycle through lists in the
4764 * desired order. This order can have a significant effect on cache
4765 * performance.
4766 *
4767 * Currently the metadata lists are hit first, MFU then MRU, followed by
4768 * the data lists. This function returns a locked list, and also returns
4769 * the lock pointer.
4770 */
4771static list_t *
4772l2arc_list_locked(int list_num, kmutex_t **lock)
4773{
4774 list_t *list = NULL;
4775 int idx;
4776
4777 ASSERT(list_num >= 0 && list_num < 2 * ARC_BUFC_NUMLISTS);
4778
4779 if (list_num < ARC_BUFC_NUMMETADATALISTS) {
4780 idx = list_num;
4781 list = &arc_mfu->arcs_lists[idx];
4782 *lock = ARCS_LOCK(arc_mfu, idx);
4783 } else if (list_num < ARC_BUFC_NUMMETADATALISTS * 2) {
4784 idx = list_num - ARC_BUFC_NUMMETADATALISTS;
4785 list = &arc_mru->arcs_lists[idx];
4786 *lock = ARCS_LOCK(arc_mru, idx);
4787 } else if (list_num < (ARC_BUFC_NUMMETADATALISTS * 2 +
4788 ARC_BUFC_NUMDATALISTS)) {
4789 idx = list_num - ARC_BUFC_NUMMETADATALISTS;
4790 list = &arc_mfu->arcs_lists[idx];
4791 *lock = ARCS_LOCK(arc_mfu, idx);
4792 } else {
4793 idx = list_num - ARC_BUFC_NUMLISTS;
4794 list = &arc_mru->arcs_lists[idx];
4795 *lock = ARCS_LOCK(arc_mru, idx);
4796 }
4797
4798 ASSERT(!(MUTEX_HELD(*lock)));
4799 mutex_enter(*lock);
4800 return (list);
4801}
4802
4803/*
4804 * Evict buffers from the device write hand to the distance specified in
4805 * bytes. This distance may span populated buffers, it may span nothing.
4806 * This is clearing a region on the L2ARC device ready for writing.
4807 * If the 'all' boolean is set, every buffer is evicted.
4808 */
4809static void
4810l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
4811{
4812 list_t *buflist;
4813 l2arc_buf_hdr_t *abl2;
4814 arc_buf_hdr_t *ab, *ab_prev;
4815 kmutex_t *hash_lock;
4816 uint64_t taddr;
4817 int64_t bytes_evicted = 0;
4818
4819 buflist = dev->l2ad_buflist;
4820
4821 if (buflist == NULL)
4822 return;
4823
4824 if (!all && dev->l2ad_first) {
4825 /*
4826 * This is the first sweep through the device. There is
4827 * nothing to evict.
4828 */
4829 return;
4830 }
4831
4832 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) {
4833 /*
4834 * When nearing the end of the device, evict to the end
4835 * before the device write hand jumps to the start.
4836 */
4837 taddr = dev->l2ad_end;
4838 } else {
4839 taddr = dev->l2ad_hand + distance;
4840 }
4841 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
4842 uint64_t, taddr, boolean_t, all);
4843
4844top:
4845 mutex_enter(&l2arc_buflist_mtx);
4846 for (ab = list_tail(buflist); ab; ab = ab_prev) {
4847 ab_prev = list_prev(buflist, ab);
4848
4849 hash_lock = HDR_LOCK(ab);
4850 if (!mutex_tryenter(hash_lock)) {
4851 /*
4852 * Missed the hash lock. Retry.
4853 */
4854 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
4855 mutex_exit(&l2arc_buflist_mtx);
4856 mutex_enter(hash_lock);
4857 mutex_exit(hash_lock);
4858 goto top;
4859 }
4860
4861 if (HDR_L2_WRITE_HEAD(ab)) {
4862 /*
4863 * We hit a write head node. Leave it for
4864 * l2arc_write_done().
4865 */
4866 list_remove(buflist, ab);
4867 mutex_exit(hash_lock);
4868 continue;
4869 }
4870
4871 if (!all && ab->b_l2hdr != NULL &&
4872 (ab->b_l2hdr->b_daddr > taddr ||
4873 ab->b_l2hdr->b_daddr < dev->l2ad_hand)) {
4874 /*
4875 * We've evicted to the target address,
4876 * or the end of the device.
4877 */
4878 mutex_exit(hash_lock);
4879 break;
4880 }
4881
4882 if (HDR_FREE_IN_PROGRESS(ab)) {
4883 /*
4884 * Already on the path to destruction.
4885 */
4886 mutex_exit(hash_lock);
4887 continue;
4888 }
4889
4890 if (ab->b_state == arc_l2c_only) {
4891 ASSERT(!HDR_L2_READING(ab));
4892 /*
4893 * This doesn't exist in the ARC. Destroy.
4894 * arc_hdr_destroy() will call list_remove()
4895 * and decrement arcstat_l2_size.
4896 */
4897 arc_change_state(arc_anon, ab, hash_lock);
4898 arc_hdr_destroy(ab);
4899 } else {
4900 /*
4901 * Invalidate issued or about to be issued
4902 * reads, since we may be about to write
4903 * over this location.
4904 */
4905 if (HDR_L2_READING(ab)) {
4906 ARCSTAT_BUMP(arcstat_l2_evict_reading);
4907 ab->b_flags |= ARC_L2_EVICTED;
4908 }
4909
4910 /*
4911 * Tell ARC this no longer exists in L2ARC.
4912 */
4913 if (ab->b_l2hdr != NULL) {
4914 abl2 = ab->b_l2hdr;
4915 ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize);
4916 bytes_evicted += abl2->b_asize;
4917 ab->b_l2hdr = NULL;
4918 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
4919 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4920 }
4921 list_remove(buflist, ab);
4922
4923 /*
4924 * This may have been leftover after a
4925 * failed write.
4926 */
4927 ab->b_flags &= ~ARC_L2_WRITING;
4928 }
4929 mutex_exit(hash_lock);
4930 }
4931 mutex_exit(&l2arc_buflist_mtx);
4932
4933 vdev_space_update(dev->l2ad_vdev, -bytes_evicted, 0, 0);
4934 dev->l2ad_evict = taddr;
4935}
4936
4937/*
4938 * Find and write ARC buffers to the L2ARC device.
4939 *
4940 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid
4941 * for reading until they have completed writing.
4942 * The headroom_boost is an in-out parameter used to maintain headroom boost
4943 * state between calls to this function.
4944 *
4945 * Returns the number of bytes actually written (which may be smaller than
4946 * the delta by which the device hand has changed due to alignment).
4947 */
4948static uint64_t
4949l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
4950 boolean_t *headroom_boost)
4951{
4952 arc_buf_hdr_t *ab, *ab_prev, *head;
4953 list_t *list;
4954 uint64_t write_asize, write_psize, write_sz, headroom,
4955 buf_compress_minsz;
4956 void *buf_data;
4957 kmutex_t *list_lock;
4958 boolean_t full;
4959 l2arc_write_callback_t *cb;
4960 zio_t *pio, *wzio;
4961 uint64_t guid = spa_load_guid(spa);
4962 const boolean_t do_headroom_boost = *headroom_boost;
4963 int try;
4964
4965 ASSERT(dev->l2ad_vdev != NULL);
4966
4967 /* Lower the flag now, we might want to raise it again later. */
4968 *headroom_boost = B_FALSE;
4969
4970 pio = NULL;
4971 write_sz = write_asize = write_psize = 0;
4972 full = B_FALSE;
4973 head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
4974 head->b_flags |= ARC_L2_WRITE_HEAD;
4975
4976 ARCSTAT_BUMP(arcstat_l2_write_buffer_iter);
4977 /*
4978 * We will want to try to compress buffers that are at least 2x the
4979 * device sector size.
4980 */
4981 buf_compress_minsz = 2 << dev->l2ad_vdev->vdev_ashift;
4982
4983 /*
4984 * Copy buffers for L2ARC writing.
4985 */
4986 mutex_enter(&l2arc_buflist_mtx);
4987 for (try = 0; try < 2 * ARC_BUFC_NUMLISTS; try++) {
4988 uint64_t passed_sz = 0;
4989
4990 list = l2arc_list_locked(try, &list_lock);
4991 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_iter);
4992
4993 /*
4994 * L2ARC fast warmup.
4995 *
4996 * Until the ARC is warm and starts to evict, read from the
4997 * head of the ARC lists rather than the tail.
4998 */
4999 if (arc_warm == B_FALSE)
5000 ab = list_head(list);
5001 else
5002 ab = list_tail(list);
5003 if (ab == NULL)
5004 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_null_iter);
5005
5006 headroom = target_sz * l2arc_headroom;
5007 if (do_headroom_boost)
5008 headroom = (headroom * l2arc_headroom_boost) / 100;
5009
5010 for (; ab; ab = ab_prev) {
5011 l2arc_buf_hdr_t *l2hdr;
5012 kmutex_t *hash_lock;
5013 uint64_t buf_sz;
5014
5015 if (arc_warm == B_FALSE)
5016 ab_prev = list_next(list, ab);
5017 else
5018 ab_prev = list_prev(list, ab);
5019 ARCSTAT_INCR(arcstat_l2_write_buffer_bytes_scanned, ab->b_size);
5020
5021 hash_lock = HDR_LOCK(ab);
5022 if (!mutex_tryenter(hash_lock)) {
5023 ARCSTAT_BUMP(arcstat_l2_write_trylock_fail);
5024 /*
5025 * Skip this buffer rather than waiting.
5026 */
5027 continue;
5028 }
5029
5030 passed_sz += ab->b_size;
5031 if (passed_sz > headroom) {
5032 /*
5033 * Searched too far.
5034 */
5035 mutex_exit(hash_lock);
5036 ARCSTAT_BUMP(arcstat_l2_write_passed_headroom);
5037 break;
5038 }
5039
5040 if (!l2arc_write_eligible(guid, ab)) {
5041 mutex_exit(hash_lock);
5042 continue;
5043 }
5044
5045 if ((write_sz + ab->b_size) > target_sz) {
5046 full = B_TRUE;
5047 mutex_exit(hash_lock);
5048 ARCSTAT_BUMP(arcstat_l2_write_full);
5049 break;
5050 }
5051
5052 if (pio == NULL) {
5053 /*
5054 * Insert a dummy header on the buflist so
5055 * l2arc_write_done() can find where the
5056 * write buffers begin without searching.
5057 */
5058 list_insert_head(dev->l2ad_buflist, head);
5059
5060 cb = kmem_alloc(
5061 sizeof (l2arc_write_callback_t), KM_SLEEP);
5062 cb->l2wcb_dev = dev;
5063 cb->l2wcb_head = head;
5064 pio = zio_root(spa, l2arc_write_done, cb,
5065 ZIO_FLAG_CANFAIL);
5066 ARCSTAT_BUMP(arcstat_l2_write_pios);
5067 }
5068
5069 /*
5070 * Create and add a new L2ARC header.
5071 */
5072 l2hdr = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP);
5073 l2hdr->b_dev = dev;
5074 ab->b_flags |= ARC_L2_WRITING;
5075
5076 /*
5077 * Temporarily stash the data buffer in b_tmp_cdata.
5078 * The subsequent write step will pick it up from
5079 * there. This is because can't access ab->b_buf
5080 * without holding the hash_lock, which we in turn
5081 * can't access without holding the ARC list locks
5082 * (which we want to avoid during compression/writing).
5083 */
5084 l2hdr->b_compress = ZIO_COMPRESS_OFF;
5085 l2hdr->b_asize = ab->b_size;
5086 l2hdr->b_tmp_cdata = ab->b_buf->b_data;
5087
5088 buf_sz = ab->b_size;
5089 ab->b_l2hdr = l2hdr;
5090
5091 list_insert_head(dev->l2ad_buflist, ab);
5092
5093 /*
5094 * Compute and store the buffer cksum before
5095 * writing. On debug the cksum is verified first.
5096 */
5097 arc_cksum_verify(ab->b_buf);
5098 arc_cksum_compute(ab->b_buf, B_TRUE);
5099
5100 mutex_exit(hash_lock);
5101
5102 write_sz += buf_sz;
5103 }
5104
5105 mutex_exit(list_lock);
5106
5107 if (full == B_TRUE)
5108 break;
5109 }
5110
5111 /* No buffers selected for writing? */
5112 if (pio == NULL) {
5113 ASSERT0(write_sz);
5114 mutex_exit(&l2arc_buflist_mtx);
5115 kmem_cache_free(hdr_cache, head);
5116 return (0);
5117 }
5118
5119 /*
5120 * Now start writing the buffers. We're starting at the write head
5121 * and work backwards, retracing the course of the buffer selector
5122 * loop above.
5123 */
5124 for (ab = list_prev(dev->l2ad_buflist, head); ab;
5125 ab = list_prev(dev->l2ad_buflist, ab)) {
5126 l2arc_buf_hdr_t *l2hdr;
5127 uint64_t buf_sz;
5128
5129 /*
5130 * We shouldn't need to lock the buffer here, since we flagged
5131 * it as ARC_L2_WRITING in the previous step, but we must take
5132 * care to only access its L2 cache parameters. In particular,
5133 * ab->b_buf may be invalid by now due to ARC eviction.
5134 */
5135 l2hdr = ab->b_l2hdr;
5136 l2hdr->b_daddr = dev->l2ad_hand;
5137
5138 if ((ab->b_flags & ARC_L2COMPRESS) &&
5139 l2hdr->b_asize >= buf_compress_minsz) {
5140 if (l2arc_compress_buf(l2hdr)) {
5141 /*
5142 * If compression succeeded, enable headroom
5143 * boost on the next scan cycle.
5144 */
5145 *headroom_boost = B_TRUE;
5146 }
5147 }
5148
5149 /*
5150 * Pick up the buffer data we had previously stashed away
5151 * (and now potentially also compressed).
5152 */
5153 buf_data = l2hdr->b_tmp_cdata;
5154 buf_sz = l2hdr->b_asize;
5155
5156 /* Compression may have squashed the buffer to zero length. */
5157 if (buf_sz != 0) {
5158 uint64_t buf_p_sz;
5159
5160 wzio = zio_write_phys(pio, dev->l2ad_vdev,
5161 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF,
5162 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE,
5163 ZIO_FLAG_CANFAIL, B_FALSE);
5164
5165 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
5166 zio_t *, wzio);
5167 (void) zio_nowait(wzio);
5168
5169 write_asize += buf_sz;
5170 /*
5171 * Keep the clock hand suitably device-aligned.
5172 */
5173 buf_p_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz);
5174 write_psize += buf_p_sz;
5175 dev->l2ad_hand += buf_p_sz;
5176 }
5177 }
5178
5179 mutex_exit(&l2arc_buflist_mtx);
5180
5181 ASSERT3U(write_asize, <=, target_sz);
5182 ARCSTAT_BUMP(arcstat_l2_writes_sent);
5183 ARCSTAT_INCR(arcstat_l2_write_bytes, write_asize);
5184 ARCSTAT_INCR(arcstat_l2_size, write_sz);
5185 ARCSTAT_INCR(arcstat_l2_asize, write_asize);
5186 vdev_space_update(dev->l2ad_vdev, write_asize, 0, 0);
5187
5188 /*
5189 * Bump device hand to the device start if it is approaching the end.
5190 * l2arc_evict() will already have evicted ahead for this case.
5191 */
5192 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) {
5193 dev->l2ad_hand = dev->l2ad_start;
5194 dev->l2ad_evict = dev->l2ad_start;
5195 dev->l2ad_first = B_FALSE;
5196 }
5197
5198 dev->l2ad_writing = B_TRUE;
5199 (void) zio_wait(pio);
5200 dev->l2ad_writing = B_FALSE;
5201
5202 return (write_asize);
5203}
5204
5205/*
5206 * Compresses an L2ARC buffer.
5207 * The data to be compressed must be prefilled in l2hdr->b_tmp_cdata and its
5208 * size in l2hdr->b_asize. This routine tries to compress the data and
5209 * depending on the compression result there are three possible outcomes:
5210 * *) The buffer was incompressible. The original l2hdr contents were left
5211 * untouched and are ready for writing to an L2 device.
5212 * *) The buffer was all-zeros, so there is no need to write it to an L2
5213 * device. To indicate this situation b_tmp_cdata is NULL'ed, b_asize is
5214 * set to zero and b_compress is set to ZIO_COMPRESS_EMPTY.
5215 * *) Compression succeeded and b_tmp_cdata was replaced with a temporary
5216 * data buffer which holds the compressed data to be written, and b_asize
5217 * tells us how much data there is. b_compress is set to the appropriate
5218 * compression algorithm. Once writing is done, invoke
5219 * l2arc_release_cdata_buf on this l2hdr to free this temporary buffer.
5220 *
5221 * Returns B_TRUE if compression succeeded, or B_FALSE if it didn't (the
5222 * buffer was incompressible).
5223 */
5224static boolean_t
5225l2arc_compress_buf(l2arc_buf_hdr_t *l2hdr)
5226{
5227 void *cdata;
5228 size_t csize, len, rounded;
5229
5230 ASSERT(l2hdr->b_compress == ZIO_COMPRESS_OFF);
5231 ASSERT(l2hdr->b_tmp_cdata != NULL);
5232
5233 len = l2hdr->b_asize;
5234 cdata = zio_data_buf_alloc(len);
5235 csize = zio_compress_data(ZIO_COMPRESS_LZ4, l2hdr->b_tmp_cdata,
5236 cdata, l2hdr->b_asize, (size_t)(1ULL << l2hdr->b_dev->l2ad_vdev->vdev_ashift));
5237
5238 rounded = P2ROUNDUP(csize, (size_t)SPA_MINBLOCKSIZE);
5239 if (rounded > csize) {
5240 bzero((char *)cdata + csize, rounded - csize);
5241 csize = rounded;
5242 }
5243
5244 if (csize == 0) {
5245 /* zero block, indicate that there's nothing to write */
5246 zio_data_buf_free(cdata, len);
5247 l2hdr->b_compress = ZIO_COMPRESS_EMPTY;
5248 l2hdr->b_asize = 0;
5249 l2hdr->b_tmp_cdata = NULL;
5250 ARCSTAT_BUMP(arcstat_l2_compress_zeros);
5251 return (B_TRUE);
5252 } else if (csize > 0 && csize < len) {
5253 /*
5254 * Compression succeeded, we'll keep the cdata around for
5255 * writing and release it afterwards.
5256 */
5257 l2hdr->b_compress = ZIO_COMPRESS_LZ4;
5258 l2hdr->b_asize = csize;
5259 l2hdr->b_tmp_cdata = cdata;
5260 ARCSTAT_BUMP(arcstat_l2_compress_successes);
5261 return (B_TRUE);
5262 } else {
5263 /*
5264 * Compression failed, release the compressed buffer.
5265 * l2hdr will be left unmodified.
5266 */
5267 zio_data_buf_free(cdata, len);
5268 ARCSTAT_BUMP(arcstat_l2_compress_failures);
5269 return (B_FALSE);
5270 }
5271}
5272
5273/*
5274 * Decompresses a zio read back from an l2arc device. On success, the
5275 * underlying zio's io_data buffer is overwritten by the uncompressed
5276 * version. On decompression error (corrupt compressed stream), the
5277 * zio->io_error value is set to signal an I/O error.
5278 *
5279 * Please note that the compressed data stream is not checksummed, so
5280 * if the underlying device is experiencing data corruption, we may feed
5281 * corrupt data to the decompressor, so the decompressor needs to be
5282 * able to handle this situation (LZ4 does).
5283 */
5284static void
5285l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr, enum zio_compress c)
5286{
5287 ASSERT(L2ARC_IS_VALID_COMPRESS(c));
5288
5289 if (zio->io_error != 0) {
5290 /*
5291 * An io error has occured, just restore the original io
5292 * size in preparation for a main pool read.
5293 */
5294 zio->io_orig_size = zio->io_size = hdr->b_size;
5295 return;
5296 }
5297
5298 if (c == ZIO_COMPRESS_EMPTY) {
5299 /*
5300 * An empty buffer results in a null zio, which means we
5301 * need to fill its io_data after we're done restoring the
5302 * buffer's contents.
5303 */
5304 ASSERT(hdr->b_buf != NULL);
5305 bzero(hdr->b_buf->b_data, hdr->b_size);
5306 zio->io_data = zio->io_orig_data = hdr->b_buf->b_data;
5307 } else {
5308 ASSERT(zio->io_data != NULL);
5309 /*
5310 * We copy the compressed data from the start of the arc buffer
5311 * (the zio_read will have pulled in only what we need, the
5312 * rest is garbage which we will overwrite at decompression)
5313 * and then decompress back to the ARC data buffer. This way we
5314 * can minimize copying by simply decompressing back over the
5315 * original compressed data (rather than decompressing to an
5316 * aux buffer and then copying back the uncompressed buffer,
5317 * which is likely to be much larger).
5318 */
5319 uint64_t csize;
5320 void *cdata;
5321
5322 csize = zio->io_size;
5323 cdata = zio_data_buf_alloc(csize);
5324 bcopy(zio->io_data, cdata, csize);
5325 if (zio_decompress_data(c, cdata, zio->io_data, csize,
5326 hdr->b_size) != 0)
5327 zio->io_error = EIO;
5328 zio_data_buf_free(cdata, csize);
5329 }
5330
5331 /* Restore the expected uncompressed IO size. */
5332 zio->io_orig_size = zio->io_size = hdr->b_size;
5333}
5334
5335/*
5336 * Releases the temporary b_tmp_cdata buffer in an l2arc header structure.
5337 * This buffer serves as a temporary holder of compressed data while
5338 * the buffer entry is being written to an l2arc device. Once that is
5339 * done, we can dispose of it.
5340 */
5341static void
5342l2arc_release_cdata_buf(arc_buf_hdr_t *ab)
5343{
5344 l2arc_buf_hdr_t *l2hdr = ab->b_l2hdr;
5345
5346 if (l2hdr->b_compress == ZIO_COMPRESS_LZ4) {
5347 /*
5348 * If the data was compressed, then we've allocated a
5349 * temporary buffer for it, so now we need to release it.
5350 */
5351 ASSERT(l2hdr->b_tmp_cdata != NULL);
5352 zio_data_buf_free(l2hdr->b_tmp_cdata, ab->b_size);
5353 }
5354 l2hdr->b_tmp_cdata = NULL;
5355}
5356
5357/*
5358 * This thread feeds the L2ARC at regular intervals. This is the beating
5359 * heart of the L2ARC.
5360 */
5361static void
5362l2arc_feed_thread(void *dummy __unused)
5363{
5364 callb_cpr_t cpr;
5365 l2arc_dev_t *dev;
5366 spa_t *spa;
5367 uint64_t size, wrote;
5368 clock_t begin, next = ddi_get_lbolt();
5369 boolean_t headroom_boost = B_FALSE;
5370
5371 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
5372
5373 mutex_enter(&l2arc_feed_thr_lock);
5374
5375 while (l2arc_thread_exit == 0) {
5376 CALLB_CPR_SAFE_BEGIN(&cpr);
5377 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock,
5378 next - ddi_get_lbolt());
5379 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
5380 next = ddi_get_lbolt() + hz;
5381
5382 /*
5383 * Quick check for L2ARC devices.
5384 */
5385 mutex_enter(&l2arc_dev_mtx);
5386 if (l2arc_ndev == 0) {
5387 mutex_exit(&l2arc_dev_mtx);
5388 continue;
5389 }
5390 mutex_exit(&l2arc_dev_mtx);
5391 begin = ddi_get_lbolt();
5392
5393 /*
5394 * This selects the next l2arc device to write to, and in
5395 * doing so the next spa to feed from: dev->l2ad_spa. This
5396 * will return NULL if there are now no l2arc devices or if
5397 * they are all faulted.
5398 *
5399 * If a device is returned, its spa's config lock is also
5400 * held to prevent device removal. l2arc_dev_get_next()
5401 * will grab and release l2arc_dev_mtx.
5402 */
5403 if ((dev = l2arc_dev_get_next()) == NULL)
5404 continue;
5405
5406 spa = dev->l2ad_spa;
5407 ASSERT(spa != NULL);
5408
5409 /*
5410 * If the pool is read-only then force the feed thread to
5411 * sleep a little longer.
5412 */
5413 if (!spa_writeable(spa)) {
5414 next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz;
5415 spa_config_exit(spa, SCL_L2ARC, dev);
5416 continue;
5417 }
5418
5419 /*
5420 * Avoid contributing to memory pressure.
5421 */
5422 if (arc_reclaim_needed()) {
5423 ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
5424 spa_config_exit(spa, SCL_L2ARC, dev);
5425 continue;
5426 }
5427
5428 ARCSTAT_BUMP(arcstat_l2_feeds);
5429
5430 size = l2arc_write_size();
5431
5432 /*
5433 * Evict L2ARC buffers that will be overwritten.
5434 */
5435 l2arc_evict(dev, size, B_FALSE);
5436
5437 /*
5438 * Write ARC buffers.
5439 */
5440 wrote = l2arc_write_buffers(spa, dev, size, &headroom_boost);
5441
5442 /*
5443 * Calculate interval between writes.
5444 */
5445 next = l2arc_write_interval(begin, size, wrote);
5446 spa_config_exit(spa, SCL_L2ARC, dev);
5447 }
5448
5449 l2arc_thread_exit = 0;
5450 cv_broadcast(&l2arc_feed_thr_cv);
5451 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */
5452 thread_exit();
5453}
5454
5455boolean_t
5456l2arc_vdev_present(vdev_t *vd)
5457{
5458 l2arc_dev_t *dev;
5459
5460 mutex_enter(&l2arc_dev_mtx);
5461 for (dev = list_head(l2arc_dev_list); dev != NULL;
5462 dev = list_next(l2arc_dev_list, dev)) {
5463 if (dev->l2ad_vdev == vd)
5464 break;
5465 }
5466 mutex_exit(&l2arc_dev_mtx);
5467
5468 return (dev != NULL);
5469}
5470
5471/*
5472 * Add a vdev for use by the L2ARC. By this point the spa has already
5473 * validated the vdev and opened it.
5474 */
5475void
5476l2arc_add_vdev(spa_t *spa, vdev_t *vd)
5477{
5478 l2arc_dev_t *adddev;
5479
5480 ASSERT(!l2arc_vdev_present(vd));
5481
5482 vdev_ashift_optimize(vd);
5483
5484 /*
5485 * Create a new l2arc device entry.
5486 */
5487 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
5488 adddev->l2ad_spa = spa;
5489 adddev->l2ad_vdev = vd;
5490 adddev->l2ad_start = VDEV_LABEL_START_SIZE;
5491 adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
5492 adddev->l2ad_hand = adddev->l2ad_start;
5493 adddev->l2ad_evict = adddev->l2ad_start;
5494 adddev->l2ad_first = B_TRUE;
5495 adddev->l2ad_writing = B_FALSE;
5496
5497 /*
5498 * This is a list of all ARC buffers that are still valid on the
5499 * device.
5500 */
5501 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP);
5502 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
5503 offsetof(arc_buf_hdr_t, b_l2node));
5504
5505 vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
5506
5507 /*
5508 * Add device to global list
5509 */
5510 mutex_enter(&l2arc_dev_mtx);
5511 list_insert_head(l2arc_dev_list, adddev);
5512 atomic_inc_64(&l2arc_ndev);
5513 mutex_exit(&l2arc_dev_mtx);
5514}
5515
5516/*
5517 * Remove a vdev from the L2ARC.
5518 */
5519void
5520l2arc_remove_vdev(vdev_t *vd)
5521{
5522 l2arc_dev_t *dev, *nextdev, *remdev = NULL;
5523
5524 /*
5525 * Find the device by vdev
5526 */
5527 mutex_enter(&l2arc_dev_mtx);
5528 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) {
5529 nextdev = list_next(l2arc_dev_list, dev);
5530 if (vd == dev->l2ad_vdev) {
5531 remdev = dev;
5532 break;
5533 }
5534 }
5535 ASSERT(remdev != NULL);
5536
5537 /*
5538 * Remove device from global list
5539 */
5540 list_remove(l2arc_dev_list, remdev);
5541 l2arc_dev_last = NULL; /* may have been invalidated */
5542 atomic_dec_64(&l2arc_ndev);
5543 mutex_exit(&l2arc_dev_mtx);
5544
5545 /*
5546 * Clear all buflists and ARC references. L2ARC device flush.
5547 */
5548 l2arc_evict(remdev, 0, B_TRUE);
5549 list_destroy(remdev->l2ad_buflist);
5550 kmem_free(remdev->l2ad_buflist, sizeof (list_t));
5551 kmem_free(remdev, sizeof (l2arc_dev_t));
5552}
5553
5554void
5555l2arc_init(void)
5556{
5557 l2arc_thread_exit = 0;
5558 l2arc_ndev = 0;
5559 l2arc_writes_sent = 0;
5560 l2arc_writes_done = 0;
5561
5562 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
5563 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
5564 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
5565 mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL);
5566 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
5567
5568 l2arc_dev_list = &L2ARC_dev_list;
5569 l2arc_free_on_write = &L2ARC_free_on_write;
5570 list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
5571 offsetof(l2arc_dev_t, l2ad_node));
5572 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
5573 offsetof(l2arc_data_free_t, l2df_list_node));
5574}
5575
5576void
5577l2arc_fini(void)
5578{
5579 /*
5580 * This is called from dmu_fini(), which is called from spa_fini();
5581 * Because of this, we can assume that all l2arc devices have
5582 * already been removed when the pools themselves were removed.
5583 */
5584
5585 l2arc_do_free_on_write();
5586
5587 mutex_destroy(&l2arc_feed_thr_lock);
5588 cv_destroy(&l2arc_feed_thr_cv);
5589 mutex_destroy(&l2arc_dev_mtx);
5590 mutex_destroy(&l2arc_buflist_mtx);
5591 mutex_destroy(&l2arc_free_on_write_mtx);
5592
5593 list_destroy(l2arc_dev_list);
5594 list_destroy(l2arc_free_on_write);
5595}
5596
5597void
5598l2arc_start(void)
5599{
5600 if (!(spa_mode_global & FWRITE))
5601 return;
5602
5603 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
5604 TS_RUN, minclsyspri);
5605}
5606
5607void
5608l2arc_stop(void)
5609{
5610 if (!(spa_mode_global & FWRITE))
5611 return;
5612
5613 mutex_enter(&l2arc_feed_thr_lock);
5614 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */
5615 l2arc_thread_exit = 1;
5616 while (l2arc_thread_exit != 0)
5617 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
5618 mutex_exit(&l2arc_feed_thr_lock);
5619}
3876{
3877 arc_buf_hdr_t *hdr = buf->b_hdr;
3878 arc_write_callback_t *callback;
3879 zio_t *zio;
3880
3881 ASSERT(ready != NULL);
3882 ASSERT(done != NULL);
3883 ASSERT(!HDR_IO_ERROR(hdr));
3884 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
3885 ASSERT(hdr->b_acb == NULL);
3886 if (l2arc)
3887 hdr->b_flags |= ARC_L2CACHE;
3888 if (l2arc_compress)
3889 hdr->b_flags |= ARC_L2COMPRESS;
3890 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
3891 callback->awcb_ready = ready;
3892 callback->awcb_physdone = physdone;
3893 callback->awcb_done = done;
3894 callback->awcb_private = private;
3895 callback->awcb_buf = buf;
3896
3897 zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp,
3898 arc_write_ready, arc_write_physdone, arc_write_done, callback,
3899 priority, zio_flags, zb);
3900
3901 return (zio);
3902}
3903
3904static int
3905arc_memory_throttle(uint64_t reserve, uint64_t txg)
3906{
3907#ifdef _KERNEL
3908 uint64_t available_memory =
3909 ptoa((uintmax_t)vm_cnt.v_free_count + vm_cnt.v_cache_count);
3910 static uint64_t page_load = 0;
3911 static uint64_t last_txg = 0;
3912
3913#ifdef sun
3914#if defined(__i386)
3915 available_memory =
3916 MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
3917#endif
3918#endif /* sun */
3919
3920 if (vm_cnt.v_free_count + vm_cnt.v_cache_count >
3921 (uint64_t)physmem * arc_lotsfree_percent / 100)
3922 return (0);
3923
3924 if (txg > last_txg) {
3925 last_txg = txg;
3926 page_load = 0;
3927 }
3928 /*
3929 * If we are in pageout, we know that memory is already tight,
3930 * the arc is already going to be evicting, so we just want to
3931 * continue to let page writes occur as quickly as possible.
3932 */
3933 if (curproc == pageproc) {
3934 if (page_load > available_memory / 4)
3935 return (SET_ERROR(ERESTART));
3936 /* Note: reserve is inflated, so we deflate */
3937 page_load += reserve / 8;
3938 return (0);
3939 } else if (page_load > 0 && arc_reclaim_needed()) {
3940 /* memory is low, delay before restarting */
3941 ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
3942 return (SET_ERROR(EAGAIN));
3943 }
3944 page_load = 0;
3945#endif
3946 return (0);
3947}
3948
3949void
3950arc_tempreserve_clear(uint64_t reserve)
3951{
3952 atomic_add_64(&arc_tempreserve, -reserve);
3953 ASSERT((int64_t)arc_tempreserve >= 0);
3954}
3955
3956int
3957arc_tempreserve_space(uint64_t reserve, uint64_t txg)
3958{
3959 int error;
3960 uint64_t anon_size;
3961
3962 if (reserve > arc_c/4 && !arc_no_grow)
3963 arc_c = MIN(arc_c_max, reserve * 4);
3964 if (reserve > arc_c)
3965 return (SET_ERROR(ENOMEM));
3966
3967 /*
3968 * Don't count loaned bufs as in flight dirty data to prevent long
3969 * network delays from blocking transactions that are ready to be
3970 * assigned to a txg.
3971 */
3972 anon_size = MAX((int64_t)(arc_anon->arcs_size - arc_loaned_bytes), 0);
3973
3974 /*
3975 * Writes will, almost always, require additional memory allocations
3976 * in order to compress/encrypt/etc the data. We therefore need to
3977 * make sure that there is sufficient available memory for this.
3978 */
3979 error = arc_memory_throttle(reserve, txg);
3980 if (error != 0)
3981 return (error);
3982
3983 /*
3984 * Throttle writes when the amount of dirty data in the cache
3985 * gets too large. We try to keep the cache less than half full
3986 * of dirty blocks so that our sync times don't grow too large.
3987 * Note: if two requests come in concurrently, we might let them
3988 * both succeed, when one of them should fail. Not a huge deal.
3989 */
3990
3991 if (reserve + arc_tempreserve + anon_size > arc_c / 2 &&
3992 anon_size > arc_c / 4) {
3993 dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
3994 "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
3995 arc_tempreserve>>10,
3996 arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10,
3997 arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10,
3998 reserve>>10, arc_c>>10);
3999 return (SET_ERROR(ERESTART));
4000 }
4001 atomic_add_64(&arc_tempreserve, reserve);
4002 return (0);
4003}
4004
4005static kmutex_t arc_lowmem_lock;
4006#ifdef _KERNEL
4007static eventhandler_tag arc_event_lowmem = NULL;
4008
4009static void
4010arc_lowmem(void *arg __unused, int howto __unused)
4011{
4012
4013 /* Serialize access via arc_lowmem_lock. */
4014 mutex_enter(&arc_lowmem_lock);
4015 mutex_enter(&arc_reclaim_thr_lock);
4016 needfree = 1;
4017 cv_signal(&arc_reclaim_thr_cv);
4018
4019 /*
4020 * It is unsafe to block here in arbitrary threads, because we can come
4021 * here from ARC itself and may hold ARC locks and thus risk a deadlock
4022 * with ARC reclaim thread.
4023 */
4024 if (curproc == pageproc) {
4025 while (needfree)
4026 msleep(&needfree, &arc_reclaim_thr_lock, 0, "zfs:lowmem", 0);
4027 }
4028 mutex_exit(&arc_reclaim_thr_lock);
4029 mutex_exit(&arc_lowmem_lock);
4030}
4031#endif
4032
4033void
4034arc_init(void)
4035{
4036 int i, prefetch_tunable_set = 0;
4037
4038 mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
4039 cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
4040 mutex_init(&arc_lowmem_lock, NULL, MUTEX_DEFAULT, NULL);
4041
4042 /* Convert seconds to clock ticks */
4043 arc_min_prefetch_lifespan = 1 * hz;
4044
4045 /* Start out with 1/8 of all memory */
4046 arc_c = kmem_size() / 8;
4047
4048#ifdef sun
4049#ifdef _KERNEL
4050 /*
4051 * On architectures where the physical memory can be larger
4052 * than the addressable space (intel in 32-bit mode), we may
4053 * need to limit the cache to 1/8 of VM size.
4054 */
4055 arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
4056#endif
4057#endif /* sun */
4058 /* set min cache to 1/32 of all memory, or 16MB, whichever is more */
4059 arc_c_min = MAX(arc_c / 4, 64<<18);
4060 /* set max to 1/2 of all memory, or all but 1GB, whichever is more */
4061 if (arc_c * 8 >= 1<<30)
4062 arc_c_max = (arc_c * 8) - (1<<30);
4063 else
4064 arc_c_max = arc_c_min;
4065 arc_c_max = MAX(arc_c * 5, arc_c_max);
4066
4067#ifdef _KERNEL
4068 /*
4069 * Allow the tunables to override our calculations if they are
4070 * reasonable (ie. over 16MB)
4071 */
4072 if (zfs_arc_max > 64<<18 && zfs_arc_max < kmem_size())
4073 arc_c_max = zfs_arc_max;
4074 if (zfs_arc_min > 64<<18 && zfs_arc_min <= arc_c_max)
4075 arc_c_min = zfs_arc_min;
4076#endif
4077
4078 arc_c = arc_c_max;
4079 arc_p = (arc_c >> 1);
4080
4081 /* limit meta-data to 1/4 of the arc capacity */
4082 arc_meta_limit = arc_c_max / 4;
4083
4084 /* Allow the tunable to override if it is reasonable */
4085 if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max)
4086 arc_meta_limit = zfs_arc_meta_limit;
4087
4088 if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0)
4089 arc_c_min = arc_meta_limit / 2;
4090
4091 if (zfs_arc_grow_retry > 0)
4092 arc_grow_retry = zfs_arc_grow_retry;
4093
4094 if (zfs_arc_shrink_shift > 0)
4095 arc_shrink_shift = zfs_arc_shrink_shift;
4096
4097 if (zfs_arc_p_min_shift > 0)
4098 arc_p_min_shift = zfs_arc_p_min_shift;
4099
4100 /* if kmem_flags are set, lets try to use less memory */
4101 if (kmem_debugging())
4102 arc_c = arc_c / 2;
4103 if (arc_c < arc_c_min)
4104 arc_c = arc_c_min;
4105
4106 zfs_arc_min = arc_c_min;
4107 zfs_arc_max = arc_c_max;
4108
4109 arc_anon = &ARC_anon;
4110 arc_mru = &ARC_mru;
4111 arc_mru_ghost = &ARC_mru_ghost;
4112 arc_mfu = &ARC_mfu;
4113 arc_mfu_ghost = &ARC_mfu_ghost;
4114 arc_l2c_only = &ARC_l2c_only;
4115 arc_size = 0;
4116
4117 for (i = 0; i < ARC_BUFC_NUMLISTS; i++) {
4118 mutex_init(&arc_anon->arcs_locks[i].arcs_lock,
4119 NULL, MUTEX_DEFAULT, NULL);
4120 mutex_init(&arc_mru->arcs_locks[i].arcs_lock,
4121 NULL, MUTEX_DEFAULT, NULL);
4122 mutex_init(&arc_mru_ghost->arcs_locks[i].arcs_lock,
4123 NULL, MUTEX_DEFAULT, NULL);
4124 mutex_init(&arc_mfu->arcs_locks[i].arcs_lock,
4125 NULL, MUTEX_DEFAULT, NULL);
4126 mutex_init(&arc_mfu_ghost->arcs_locks[i].arcs_lock,
4127 NULL, MUTEX_DEFAULT, NULL);
4128 mutex_init(&arc_l2c_only->arcs_locks[i].arcs_lock,
4129 NULL, MUTEX_DEFAULT, NULL);
4130
4131 list_create(&arc_mru->arcs_lists[i],
4132 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4133 list_create(&arc_mru_ghost->arcs_lists[i],
4134 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4135 list_create(&arc_mfu->arcs_lists[i],
4136 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4137 list_create(&arc_mfu_ghost->arcs_lists[i],
4138 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4139 list_create(&arc_mfu_ghost->arcs_lists[i],
4140 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4141 list_create(&arc_l2c_only->arcs_lists[i],
4142 sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4143 }
4144
4145 buf_init();
4146
4147 arc_thread_exit = 0;
4148 arc_eviction_list = NULL;
4149 mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
4150 bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t));
4151
4152 arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
4153 sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
4154
4155 if (arc_ksp != NULL) {
4156 arc_ksp->ks_data = &arc_stats;
4157 kstat_install(arc_ksp);
4158 }
4159
4160 (void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
4161 TS_RUN, minclsyspri);
4162
4163#ifdef _KERNEL
4164 arc_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem, arc_lowmem, NULL,
4165 EVENTHANDLER_PRI_FIRST);
4166#endif
4167
4168 arc_dead = FALSE;
4169 arc_warm = B_FALSE;
4170
4171 /*
4172 * Calculate maximum amount of dirty data per pool.
4173 *
4174 * If it has been set by /etc/system, take that.
4175 * Otherwise, use a percentage of physical memory defined by
4176 * zfs_dirty_data_max_percent (default 10%) with a cap at
4177 * zfs_dirty_data_max_max (default 4GB).
4178 */
4179 if (zfs_dirty_data_max == 0) {
4180 zfs_dirty_data_max = ptob(physmem) *
4181 zfs_dirty_data_max_percent / 100;
4182 zfs_dirty_data_max = MIN(zfs_dirty_data_max,
4183 zfs_dirty_data_max_max);
4184 }
4185
4186#ifdef _KERNEL
4187 if (TUNABLE_INT_FETCH("vfs.zfs.prefetch_disable", &zfs_prefetch_disable))
4188 prefetch_tunable_set = 1;
4189
4190#ifdef __i386__
4191 if (prefetch_tunable_set == 0) {
4192 printf("ZFS NOTICE: Prefetch is disabled by default on i386 "
4193 "-- to enable,\n");
4194 printf(" add \"vfs.zfs.prefetch_disable=0\" "
4195 "to /boot/loader.conf.\n");
4196 zfs_prefetch_disable = 1;
4197 }
4198#else
4199 if ((((uint64_t)physmem * PAGESIZE) < (1ULL << 32)) &&
4200 prefetch_tunable_set == 0) {
4201 printf("ZFS NOTICE: Prefetch is disabled by default if less "
4202 "than 4GB of RAM is present;\n"
4203 " to enable, add \"vfs.zfs.prefetch_disable=0\" "
4204 "to /boot/loader.conf.\n");
4205 zfs_prefetch_disable = 1;
4206 }
4207#endif
4208 /* Warn about ZFS memory and address space requirements. */
4209 if (((uint64_t)physmem * PAGESIZE) < (256 + 128 + 64) * (1 << 20)) {
4210 printf("ZFS WARNING: Recommended minimum RAM size is 512MB; "
4211 "expect unstable behavior.\n");
4212 }
4213 if (kmem_size() < 512 * (1 << 20)) {
4214 printf("ZFS WARNING: Recommended minimum kmem_size is 512MB; "
4215 "expect unstable behavior.\n");
4216 printf(" Consider tuning vm.kmem_size and "
4217 "vm.kmem_size_max\n");
4218 printf(" in /boot/loader.conf.\n");
4219 }
4220#endif
4221}
4222
4223void
4224arc_fini(void)
4225{
4226 int i;
4227
4228 mutex_enter(&arc_reclaim_thr_lock);
4229 arc_thread_exit = 1;
4230 cv_signal(&arc_reclaim_thr_cv);
4231 while (arc_thread_exit != 0)
4232 cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
4233 mutex_exit(&arc_reclaim_thr_lock);
4234
4235 arc_flush(NULL);
4236
4237 arc_dead = TRUE;
4238
4239 if (arc_ksp != NULL) {
4240 kstat_delete(arc_ksp);
4241 arc_ksp = NULL;
4242 }
4243
4244 mutex_destroy(&arc_eviction_mtx);
4245 mutex_destroy(&arc_reclaim_thr_lock);
4246 cv_destroy(&arc_reclaim_thr_cv);
4247
4248 for (i = 0; i < ARC_BUFC_NUMLISTS; i++) {
4249 list_destroy(&arc_mru->arcs_lists[i]);
4250 list_destroy(&arc_mru_ghost->arcs_lists[i]);
4251 list_destroy(&arc_mfu->arcs_lists[i]);
4252 list_destroy(&arc_mfu_ghost->arcs_lists[i]);
4253 list_destroy(&arc_l2c_only->arcs_lists[i]);
4254
4255 mutex_destroy(&arc_anon->arcs_locks[i].arcs_lock);
4256 mutex_destroy(&arc_mru->arcs_locks[i].arcs_lock);
4257 mutex_destroy(&arc_mru_ghost->arcs_locks[i].arcs_lock);
4258 mutex_destroy(&arc_mfu->arcs_locks[i].arcs_lock);
4259 mutex_destroy(&arc_mfu_ghost->arcs_locks[i].arcs_lock);
4260 mutex_destroy(&arc_l2c_only->arcs_locks[i].arcs_lock);
4261 }
4262
4263 buf_fini();
4264
4265 ASSERT(arc_loaned_bytes == 0);
4266
4267 mutex_destroy(&arc_lowmem_lock);
4268#ifdef _KERNEL
4269 if (arc_event_lowmem != NULL)
4270 EVENTHANDLER_DEREGISTER(vm_lowmem, arc_event_lowmem);
4271#endif
4272}
4273
4274/*
4275 * Level 2 ARC
4276 *
4277 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
4278 * It uses dedicated storage devices to hold cached data, which are populated
4279 * using large infrequent writes. The main role of this cache is to boost
4280 * the performance of random read workloads. The intended L2ARC devices
4281 * include short-stroked disks, solid state disks, and other media with
4282 * substantially faster read latency than disk.
4283 *
4284 * +-----------------------+
4285 * | ARC |
4286 * +-----------------------+
4287 * | ^ ^
4288 * | | |
4289 * l2arc_feed_thread() arc_read()
4290 * | | |
4291 * | l2arc read |
4292 * V | |
4293 * +---------------+ |
4294 * | L2ARC | |
4295 * +---------------+ |
4296 * | ^ |
4297 * l2arc_write() | |
4298 * | | |
4299 * V | |
4300 * +-------+ +-------+
4301 * | vdev | | vdev |
4302 * | cache | | cache |
4303 * +-------+ +-------+
4304 * +=========+ .-----.
4305 * : L2ARC : |-_____-|
4306 * : devices : | Disks |
4307 * +=========+ `-_____-'
4308 *
4309 * Read requests are satisfied from the following sources, in order:
4310 *
4311 * 1) ARC
4312 * 2) vdev cache of L2ARC devices
4313 * 3) L2ARC devices
4314 * 4) vdev cache of disks
4315 * 5) disks
4316 *
4317 * Some L2ARC device types exhibit extremely slow write performance.
4318 * To accommodate for this there are some significant differences between
4319 * the L2ARC and traditional cache design:
4320 *
4321 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from
4322 * the ARC behave as usual, freeing buffers and placing headers on ghost
4323 * lists. The ARC does not send buffers to the L2ARC during eviction as
4324 * this would add inflated write latencies for all ARC memory pressure.
4325 *
4326 * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
4327 * It does this by periodically scanning buffers from the eviction-end of
4328 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
4329 * not already there. It scans until a headroom of buffers is satisfied,
4330 * which itself is a buffer for ARC eviction. If a compressible buffer is
4331 * found during scanning and selected for writing to an L2ARC device, we
4332 * temporarily boost scanning headroom during the next scan cycle to make
4333 * sure we adapt to compression effects (which might significantly reduce
4334 * the data volume we write to L2ARC). The thread that does this is
4335 * l2arc_feed_thread(), illustrated below; example sizes are included to
4336 * provide a better sense of ratio than this diagram:
4337 *
4338 * head --> tail
4339 * +---------------------+----------+
4340 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
4341 * +---------------------+----------+ | o L2ARC eligible
4342 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
4343 * +---------------------+----------+ |
4344 * 15.9 Gbytes ^ 32 Mbytes |
4345 * headroom |
4346 * l2arc_feed_thread()
4347 * |
4348 * l2arc write hand <--[oooo]--'
4349 * | 8 Mbyte
4350 * | write max
4351 * V
4352 * +==============================+
4353 * L2ARC dev |####|#|###|###| |####| ... |
4354 * +==============================+
4355 * 32 Gbytes
4356 *
4357 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
4358 * evicted, then the L2ARC has cached a buffer much sooner than it probably
4359 * needed to, potentially wasting L2ARC device bandwidth and storage. It is
4360 * safe to say that this is an uncommon case, since buffers at the end of
4361 * the ARC lists have moved there due to inactivity.
4362 *
4363 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
4364 * then the L2ARC simply misses copying some buffers. This serves as a
4365 * pressure valve to prevent heavy read workloads from both stalling the ARC
4366 * with waits and clogging the L2ARC with writes. This also helps prevent
4367 * the potential for the L2ARC to churn if it attempts to cache content too
4368 * quickly, such as during backups of the entire pool.
4369 *
4370 * 5. After system boot and before the ARC has filled main memory, there are
4371 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
4372 * lists can remain mostly static. Instead of searching from tail of these
4373 * lists as pictured, the l2arc_feed_thread() will search from the list heads
4374 * for eligible buffers, greatly increasing its chance of finding them.
4375 *
4376 * The L2ARC device write speed is also boosted during this time so that
4377 * the L2ARC warms up faster. Since there have been no ARC evictions yet,
4378 * there are no L2ARC reads, and no fear of degrading read performance
4379 * through increased writes.
4380 *
4381 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
4382 * the vdev queue can aggregate them into larger and fewer writes. Each
4383 * device is written to in a rotor fashion, sweeping writes through
4384 * available space then repeating.
4385 *
4386 * 7. The L2ARC does not store dirty content. It never needs to flush
4387 * write buffers back to disk based storage.
4388 *
4389 * 8. If an ARC buffer is written (and dirtied) which also exists in the
4390 * L2ARC, the now stale L2ARC buffer is immediately dropped.
4391 *
4392 * The performance of the L2ARC can be tweaked by a number of tunables, which
4393 * may be necessary for different workloads:
4394 *
4395 * l2arc_write_max max write bytes per interval
4396 * l2arc_write_boost extra write bytes during device warmup
4397 * l2arc_noprefetch skip caching prefetched buffers
4398 * l2arc_headroom number of max device writes to precache
4399 * l2arc_headroom_boost when we find compressed buffers during ARC
4400 * scanning, we multiply headroom by this
4401 * percentage factor for the next scan cycle,
4402 * since more compressed buffers are likely to
4403 * be present
4404 * l2arc_feed_secs seconds between L2ARC writing
4405 *
4406 * Tunables may be removed or added as future performance improvements are
4407 * integrated, and also may become zpool properties.
4408 *
4409 * There are three key functions that control how the L2ARC warms up:
4410 *
4411 * l2arc_write_eligible() check if a buffer is eligible to cache
4412 * l2arc_write_size() calculate how much to write
4413 * l2arc_write_interval() calculate sleep delay between writes
4414 *
4415 * These three functions determine what to write, how much, and how quickly
4416 * to send writes.
4417 */
4418
4419static boolean_t
4420l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab)
4421{
4422 /*
4423 * A buffer is *not* eligible for the L2ARC if it:
4424 * 1. belongs to a different spa.
4425 * 2. is already cached on the L2ARC.
4426 * 3. has an I/O in progress (it may be an incomplete read).
4427 * 4. is flagged not eligible (zfs property).
4428 */
4429 if (ab->b_spa != spa_guid) {
4430 ARCSTAT_BUMP(arcstat_l2_write_spa_mismatch);
4431 return (B_FALSE);
4432 }
4433 if (ab->b_l2hdr != NULL) {
4434 ARCSTAT_BUMP(arcstat_l2_write_in_l2);
4435 return (B_FALSE);
4436 }
4437 if (HDR_IO_IN_PROGRESS(ab)) {
4438 ARCSTAT_BUMP(arcstat_l2_write_hdr_io_in_progress);
4439 return (B_FALSE);
4440 }
4441 if (!HDR_L2CACHE(ab)) {
4442 ARCSTAT_BUMP(arcstat_l2_write_not_cacheable);
4443 return (B_FALSE);
4444 }
4445
4446 return (B_TRUE);
4447}
4448
4449static uint64_t
4450l2arc_write_size(void)
4451{
4452 uint64_t size;
4453
4454 /*
4455 * Make sure our globals have meaningful values in case the user
4456 * altered them.
4457 */
4458 size = l2arc_write_max;
4459 if (size == 0) {
4460 cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must "
4461 "be greater than zero, resetting it to the default (%d)",
4462 L2ARC_WRITE_SIZE);
4463 size = l2arc_write_max = L2ARC_WRITE_SIZE;
4464 }
4465
4466 if (arc_warm == B_FALSE)
4467 size += l2arc_write_boost;
4468
4469 return (size);
4470
4471}
4472
4473static clock_t
4474l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
4475{
4476 clock_t interval, next, now;
4477
4478 /*
4479 * If the ARC lists are busy, increase our write rate; if the
4480 * lists are stale, idle back. This is achieved by checking
4481 * how much we previously wrote - if it was more than half of
4482 * what we wanted, schedule the next write much sooner.
4483 */
4484 if (l2arc_feed_again && wrote > (wanted / 2))
4485 interval = (hz * l2arc_feed_min_ms) / 1000;
4486 else
4487 interval = hz * l2arc_feed_secs;
4488
4489 now = ddi_get_lbolt();
4490 next = MAX(now, MIN(now + interval, began + interval));
4491
4492 return (next);
4493}
4494
4495static void
4496l2arc_hdr_stat_add(void)
4497{
4498 ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE);
4499 ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE);
4500}
4501
4502static void
4503l2arc_hdr_stat_remove(void)
4504{
4505 ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE));
4506 ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE);
4507}
4508
4509/*
4510 * Cycle through L2ARC devices. This is how L2ARC load balances.
4511 * If a device is returned, this also returns holding the spa config lock.
4512 */
4513static l2arc_dev_t *
4514l2arc_dev_get_next(void)
4515{
4516 l2arc_dev_t *first, *next = NULL;
4517
4518 /*
4519 * Lock out the removal of spas (spa_namespace_lock), then removal
4520 * of cache devices (l2arc_dev_mtx). Once a device has been selected,
4521 * both locks will be dropped and a spa config lock held instead.
4522 */
4523 mutex_enter(&spa_namespace_lock);
4524 mutex_enter(&l2arc_dev_mtx);
4525
4526 /* if there are no vdevs, there is nothing to do */
4527 if (l2arc_ndev == 0)
4528 goto out;
4529
4530 first = NULL;
4531 next = l2arc_dev_last;
4532 do {
4533 /* loop around the list looking for a non-faulted vdev */
4534 if (next == NULL) {
4535 next = list_head(l2arc_dev_list);
4536 } else {
4537 next = list_next(l2arc_dev_list, next);
4538 if (next == NULL)
4539 next = list_head(l2arc_dev_list);
4540 }
4541
4542 /* if we have come back to the start, bail out */
4543 if (first == NULL)
4544 first = next;
4545 else if (next == first)
4546 break;
4547
4548 } while (vdev_is_dead(next->l2ad_vdev));
4549
4550 /* if we were unable to find any usable vdevs, return NULL */
4551 if (vdev_is_dead(next->l2ad_vdev))
4552 next = NULL;
4553
4554 l2arc_dev_last = next;
4555
4556out:
4557 mutex_exit(&l2arc_dev_mtx);
4558
4559 /*
4560 * Grab the config lock to prevent the 'next' device from being
4561 * removed while we are writing to it.
4562 */
4563 if (next != NULL)
4564 spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
4565 mutex_exit(&spa_namespace_lock);
4566
4567 return (next);
4568}
4569
4570/*
4571 * Free buffers that were tagged for destruction.
4572 */
4573static void
4574l2arc_do_free_on_write()
4575{
4576 list_t *buflist;
4577 l2arc_data_free_t *df, *df_prev;
4578
4579 mutex_enter(&l2arc_free_on_write_mtx);
4580 buflist = l2arc_free_on_write;
4581
4582 for (df = list_tail(buflist); df; df = df_prev) {
4583 df_prev = list_prev(buflist, df);
4584 ASSERT(df->l2df_data != NULL);
4585 ASSERT(df->l2df_func != NULL);
4586 df->l2df_func(df->l2df_data, df->l2df_size);
4587 list_remove(buflist, df);
4588 kmem_free(df, sizeof (l2arc_data_free_t));
4589 }
4590
4591 mutex_exit(&l2arc_free_on_write_mtx);
4592}
4593
4594/*
4595 * A write to a cache device has completed. Update all headers to allow
4596 * reads from these buffers to begin.
4597 */
4598static void
4599l2arc_write_done(zio_t *zio)
4600{
4601 l2arc_write_callback_t *cb;
4602 l2arc_dev_t *dev;
4603 list_t *buflist;
4604 arc_buf_hdr_t *head, *ab, *ab_prev;
4605 l2arc_buf_hdr_t *abl2;
4606 kmutex_t *hash_lock;
4607 int64_t bytes_dropped = 0;
4608
4609 cb = zio->io_private;
4610 ASSERT(cb != NULL);
4611 dev = cb->l2wcb_dev;
4612 ASSERT(dev != NULL);
4613 head = cb->l2wcb_head;
4614 ASSERT(head != NULL);
4615 buflist = dev->l2ad_buflist;
4616 ASSERT(buflist != NULL);
4617 DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
4618 l2arc_write_callback_t *, cb);
4619
4620 if (zio->io_error != 0)
4621 ARCSTAT_BUMP(arcstat_l2_writes_error);
4622
4623 mutex_enter(&l2arc_buflist_mtx);
4624
4625 /*
4626 * All writes completed, or an error was hit.
4627 */
4628 for (ab = list_prev(buflist, head); ab; ab = ab_prev) {
4629 ab_prev = list_prev(buflist, ab);
4630 abl2 = ab->b_l2hdr;
4631
4632 /*
4633 * Release the temporary compressed buffer as soon as possible.
4634 */
4635 if (abl2->b_compress != ZIO_COMPRESS_OFF)
4636 l2arc_release_cdata_buf(ab);
4637
4638 hash_lock = HDR_LOCK(ab);
4639 if (!mutex_tryenter(hash_lock)) {
4640 /*
4641 * This buffer misses out. It may be in a stage
4642 * of eviction. Its ARC_L2_WRITING flag will be
4643 * left set, denying reads to this buffer.
4644 */
4645 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss);
4646 continue;
4647 }
4648
4649 if (zio->io_error != 0) {
4650 /*
4651 * Error - drop L2ARC entry.
4652 */
4653 list_remove(buflist, ab);
4654 ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize);
4655 bytes_dropped += abl2->b_asize;
4656 ab->b_l2hdr = NULL;
4657 trim_map_free(abl2->b_dev->l2ad_vdev, abl2->b_daddr,
4658 ab->b_size, 0);
4659 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
4660 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4661 }
4662
4663 /*
4664 * Allow ARC to begin reads to this L2ARC entry.
4665 */
4666 ab->b_flags &= ~ARC_L2_WRITING;
4667
4668 mutex_exit(hash_lock);
4669 }
4670
4671 atomic_inc_64(&l2arc_writes_done);
4672 list_remove(buflist, head);
4673 kmem_cache_free(hdr_cache, head);
4674 mutex_exit(&l2arc_buflist_mtx);
4675
4676 vdev_space_update(dev->l2ad_vdev, -bytes_dropped, 0, 0);
4677
4678 l2arc_do_free_on_write();
4679
4680 kmem_free(cb, sizeof (l2arc_write_callback_t));
4681}
4682
4683/*
4684 * A read to a cache device completed. Validate buffer contents before
4685 * handing over to the regular ARC routines.
4686 */
4687static void
4688l2arc_read_done(zio_t *zio)
4689{
4690 l2arc_read_callback_t *cb;
4691 arc_buf_hdr_t *hdr;
4692 arc_buf_t *buf;
4693 kmutex_t *hash_lock;
4694 int equal;
4695
4696 ASSERT(zio->io_vd != NULL);
4697 ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
4698
4699 spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
4700
4701 cb = zio->io_private;
4702 ASSERT(cb != NULL);
4703 buf = cb->l2rcb_buf;
4704 ASSERT(buf != NULL);
4705
4706 hash_lock = HDR_LOCK(buf->b_hdr);
4707 mutex_enter(hash_lock);
4708 hdr = buf->b_hdr;
4709 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
4710
4711 /*
4712 * If the buffer was compressed, decompress it first.
4713 */
4714 if (cb->l2rcb_compress != ZIO_COMPRESS_OFF)
4715 l2arc_decompress_zio(zio, hdr, cb->l2rcb_compress);
4716 ASSERT(zio->io_data != NULL);
4717
4718 /*
4719 * Check this survived the L2ARC journey.
4720 */
4721 equal = arc_cksum_equal(buf);
4722 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) {
4723 mutex_exit(hash_lock);
4724 zio->io_private = buf;
4725 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */
4726 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */
4727 arc_read_done(zio);
4728 } else {
4729 mutex_exit(hash_lock);
4730 /*
4731 * Buffer didn't survive caching. Increment stats and
4732 * reissue to the original storage device.
4733 */
4734 if (zio->io_error != 0) {
4735 ARCSTAT_BUMP(arcstat_l2_io_error);
4736 } else {
4737 zio->io_error = SET_ERROR(EIO);
4738 }
4739 if (!equal)
4740 ARCSTAT_BUMP(arcstat_l2_cksum_bad);
4741
4742 /*
4743 * If there's no waiter, issue an async i/o to the primary
4744 * storage now. If there *is* a waiter, the caller must
4745 * issue the i/o in a context where it's OK to block.
4746 */
4747 if (zio->io_waiter == NULL) {
4748 zio_t *pio = zio_unique_parent(zio);
4749
4750 ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
4751
4752 zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp,
4753 buf->b_data, zio->io_size, arc_read_done, buf,
4754 zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb));
4755 }
4756 }
4757
4758 kmem_free(cb, sizeof (l2arc_read_callback_t));
4759}
4760
4761/*
4762 * This is the list priority from which the L2ARC will search for pages to
4763 * cache. This is used within loops (0..3) to cycle through lists in the
4764 * desired order. This order can have a significant effect on cache
4765 * performance.
4766 *
4767 * Currently the metadata lists are hit first, MFU then MRU, followed by
4768 * the data lists. This function returns a locked list, and also returns
4769 * the lock pointer.
4770 */
4771static list_t *
4772l2arc_list_locked(int list_num, kmutex_t **lock)
4773{
4774 list_t *list = NULL;
4775 int idx;
4776
4777 ASSERT(list_num >= 0 && list_num < 2 * ARC_BUFC_NUMLISTS);
4778
4779 if (list_num < ARC_BUFC_NUMMETADATALISTS) {
4780 idx = list_num;
4781 list = &arc_mfu->arcs_lists[idx];
4782 *lock = ARCS_LOCK(arc_mfu, idx);
4783 } else if (list_num < ARC_BUFC_NUMMETADATALISTS * 2) {
4784 idx = list_num - ARC_BUFC_NUMMETADATALISTS;
4785 list = &arc_mru->arcs_lists[idx];
4786 *lock = ARCS_LOCK(arc_mru, idx);
4787 } else if (list_num < (ARC_BUFC_NUMMETADATALISTS * 2 +
4788 ARC_BUFC_NUMDATALISTS)) {
4789 idx = list_num - ARC_BUFC_NUMMETADATALISTS;
4790 list = &arc_mfu->arcs_lists[idx];
4791 *lock = ARCS_LOCK(arc_mfu, idx);
4792 } else {
4793 idx = list_num - ARC_BUFC_NUMLISTS;
4794 list = &arc_mru->arcs_lists[idx];
4795 *lock = ARCS_LOCK(arc_mru, idx);
4796 }
4797
4798 ASSERT(!(MUTEX_HELD(*lock)));
4799 mutex_enter(*lock);
4800 return (list);
4801}
4802
4803/*
4804 * Evict buffers from the device write hand to the distance specified in
4805 * bytes. This distance may span populated buffers, it may span nothing.
4806 * This is clearing a region on the L2ARC device ready for writing.
4807 * If the 'all' boolean is set, every buffer is evicted.
4808 */
4809static void
4810l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
4811{
4812 list_t *buflist;
4813 l2arc_buf_hdr_t *abl2;
4814 arc_buf_hdr_t *ab, *ab_prev;
4815 kmutex_t *hash_lock;
4816 uint64_t taddr;
4817 int64_t bytes_evicted = 0;
4818
4819 buflist = dev->l2ad_buflist;
4820
4821 if (buflist == NULL)
4822 return;
4823
4824 if (!all && dev->l2ad_first) {
4825 /*
4826 * This is the first sweep through the device. There is
4827 * nothing to evict.
4828 */
4829 return;
4830 }
4831
4832 if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) {
4833 /*
4834 * When nearing the end of the device, evict to the end
4835 * before the device write hand jumps to the start.
4836 */
4837 taddr = dev->l2ad_end;
4838 } else {
4839 taddr = dev->l2ad_hand + distance;
4840 }
4841 DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
4842 uint64_t, taddr, boolean_t, all);
4843
4844top:
4845 mutex_enter(&l2arc_buflist_mtx);
4846 for (ab = list_tail(buflist); ab; ab = ab_prev) {
4847 ab_prev = list_prev(buflist, ab);
4848
4849 hash_lock = HDR_LOCK(ab);
4850 if (!mutex_tryenter(hash_lock)) {
4851 /*
4852 * Missed the hash lock. Retry.
4853 */
4854 ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
4855 mutex_exit(&l2arc_buflist_mtx);
4856 mutex_enter(hash_lock);
4857 mutex_exit(hash_lock);
4858 goto top;
4859 }
4860
4861 if (HDR_L2_WRITE_HEAD(ab)) {
4862 /*
4863 * We hit a write head node. Leave it for
4864 * l2arc_write_done().
4865 */
4866 list_remove(buflist, ab);
4867 mutex_exit(hash_lock);
4868 continue;
4869 }
4870
4871 if (!all && ab->b_l2hdr != NULL &&
4872 (ab->b_l2hdr->b_daddr > taddr ||
4873 ab->b_l2hdr->b_daddr < dev->l2ad_hand)) {
4874 /*
4875 * We've evicted to the target address,
4876 * or the end of the device.
4877 */
4878 mutex_exit(hash_lock);
4879 break;
4880 }
4881
4882 if (HDR_FREE_IN_PROGRESS(ab)) {
4883 /*
4884 * Already on the path to destruction.
4885 */
4886 mutex_exit(hash_lock);
4887 continue;
4888 }
4889
4890 if (ab->b_state == arc_l2c_only) {
4891 ASSERT(!HDR_L2_READING(ab));
4892 /*
4893 * This doesn't exist in the ARC. Destroy.
4894 * arc_hdr_destroy() will call list_remove()
4895 * and decrement arcstat_l2_size.
4896 */
4897 arc_change_state(arc_anon, ab, hash_lock);
4898 arc_hdr_destroy(ab);
4899 } else {
4900 /*
4901 * Invalidate issued or about to be issued
4902 * reads, since we may be about to write
4903 * over this location.
4904 */
4905 if (HDR_L2_READING(ab)) {
4906 ARCSTAT_BUMP(arcstat_l2_evict_reading);
4907 ab->b_flags |= ARC_L2_EVICTED;
4908 }
4909
4910 /*
4911 * Tell ARC this no longer exists in L2ARC.
4912 */
4913 if (ab->b_l2hdr != NULL) {
4914 abl2 = ab->b_l2hdr;
4915 ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize);
4916 bytes_evicted += abl2->b_asize;
4917 ab->b_l2hdr = NULL;
4918 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
4919 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4920 }
4921 list_remove(buflist, ab);
4922
4923 /*
4924 * This may have been leftover after a
4925 * failed write.
4926 */
4927 ab->b_flags &= ~ARC_L2_WRITING;
4928 }
4929 mutex_exit(hash_lock);
4930 }
4931 mutex_exit(&l2arc_buflist_mtx);
4932
4933 vdev_space_update(dev->l2ad_vdev, -bytes_evicted, 0, 0);
4934 dev->l2ad_evict = taddr;
4935}
4936
4937/*
4938 * Find and write ARC buffers to the L2ARC device.
4939 *
4940 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid
4941 * for reading until they have completed writing.
4942 * The headroom_boost is an in-out parameter used to maintain headroom boost
4943 * state between calls to this function.
4944 *
4945 * Returns the number of bytes actually written (which may be smaller than
4946 * the delta by which the device hand has changed due to alignment).
4947 */
4948static uint64_t
4949l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
4950 boolean_t *headroom_boost)
4951{
4952 arc_buf_hdr_t *ab, *ab_prev, *head;
4953 list_t *list;
4954 uint64_t write_asize, write_psize, write_sz, headroom,
4955 buf_compress_minsz;
4956 void *buf_data;
4957 kmutex_t *list_lock;
4958 boolean_t full;
4959 l2arc_write_callback_t *cb;
4960 zio_t *pio, *wzio;
4961 uint64_t guid = spa_load_guid(spa);
4962 const boolean_t do_headroom_boost = *headroom_boost;
4963 int try;
4964
4965 ASSERT(dev->l2ad_vdev != NULL);
4966
4967 /* Lower the flag now, we might want to raise it again later. */
4968 *headroom_boost = B_FALSE;
4969
4970 pio = NULL;
4971 write_sz = write_asize = write_psize = 0;
4972 full = B_FALSE;
4973 head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
4974 head->b_flags |= ARC_L2_WRITE_HEAD;
4975
4976 ARCSTAT_BUMP(arcstat_l2_write_buffer_iter);
4977 /*
4978 * We will want to try to compress buffers that are at least 2x the
4979 * device sector size.
4980 */
4981 buf_compress_minsz = 2 << dev->l2ad_vdev->vdev_ashift;
4982
4983 /*
4984 * Copy buffers for L2ARC writing.
4985 */
4986 mutex_enter(&l2arc_buflist_mtx);
4987 for (try = 0; try < 2 * ARC_BUFC_NUMLISTS; try++) {
4988 uint64_t passed_sz = 0;
4989
4990 list = l2arc_list_locked(try, &list_lock);
4991 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_iter);
4992
4993 /*
4994 * L2ARC fast warmup.
4995 *
4996 * Until the ARC is warm and starts to evict, read from the
4997 * head of the ARC lists rather than the tail.
4998 */
4999 if (arc_warm == B_FALSE)
5000 ab = list_head(list);
5001 else
5002 ab = list_tail(list);
5003 if (ab == NULL)
5004 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_null_iter);
5005
5006 headroom = target_sz * l2arc_headroom;
5007 if (do_headroom_boost)
5008 headroom = (headroom * l2arc_headroom_boost) / 100;
5009
5010 for (; ab; ab = ab_prev) {
5011 l2arc_buf_hdr_t *l2hdr;
5012 kmutex_t *hash_lock;
5013 uint64_t buf_sz;
5014
5015 if (arc_warm == B_FALSE)
5016 ab_prev = list_next(list, ab);
5017 else
5018 ab_prev = list_prev(list, ab);
5019 ARCSTAT_INCR(arcstat_l2_write_buffer_bytes_scanned, ab->b_size);
5020
5021 hash_lock = HDR_LOCK(ab);
5022 if (!mutex_tryenter(hash_lock)) {
5023 ARCSTAT_BUMP(arcstat_l2_write_trylock_fail);
5024 /*
5025 * Skip this buffer rather than waiting.
5026 */
5027 continue;
5028 }
5029
5030 passed_sz += ab->b_size;
5031 if (passed_sz > headroom) {
5032 /*
5033 * Searched too far.
5034 */
5035 mutex_exit(hash_lock);
5036 ARCSTAT_BUMP(arcstat_l2_write_passed_headroom);
5037 break;
5038 }
5039
5040 if (!l2arc_write_eligible(guid, ab)) {
5041 mutex_exit(hash_lock);
5042 continue;
5043 }
5044
5045 if ((write_sz + ab->b_size) > target_sz) {
5046 full = B_TRUE;
5047 mutex_exit(hash_lock);
5048 ARCSTAT_BUMP(arcstat_l2_write_full);
5049 break;
5050 }
5051
5052 if (pio == NULL) {
5053 /*
5054 * Insert a dummy header on the buflist so
5055 * l2arc_write_done() can find where the
5056 * write buffers begin without searching.
5057 */
5058 list_insert_head(dev->l2ad_buflist, head);
5059
5060 cb = kmem_alloc(
5061 sizeof (l2arc_write_callback_t), KM_SLEEP);
5062 cb->l2wcb_dev = dev;
5063 cb->l2wcb_head = head;
5064 pio = zio_root(spa, l2arc_write_done, cb,
5065 ZIO_FLAG_CANFAIL);
5066 ARCSTAT_BUMP(arcstat_l2_write_pios);
5067 }
5068
5069 /*
5070 * Create and add a new L2ARC header.
5071 */
5072 l2hdr = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP);
5073 l2hdr->b_dev = dev;
5074 ab->b_flags |= ARC_L2_WRITING;
5075
5076 /*
5077 * Temporarily stash the data buffer in b_tmp_cdata.
5078 * The subsequent write step will pick it up from
5079 * there. This is because can't access ab->b_buf
5080 * without holding the hash_lock, which we in turn
5081 * can't access without holding the ARC list locks
5082 * (which we want to avoid during compression/writing).
5083 */
5084 l2hdr->b_compress = ZIO_COMPRESS_OFF;
5085 l2hdr->b_asize = ab->b_size;
5086 l2hdr->b_tmp_cdata = ab->b_buf->b_data;
5087
5088 buf_sz = ab->b_size;
5089 ab->b_l2hdr = l2hdr;
5090
5091 list_insert_head(dev->l2ad_buflist, ab);
5092
5093 /*
5094 * Compute and store the buffer cksum before
5095 * writing. On debug the cksum is verified first.
5096 */
5097 arc_cksum_verify(ab->b_buf);
5098 arc_cksum_compute(ab->b_buf, B_TRUE);
5099
5100 mutex_exit(hash_lock);
5101
5102 write_sz += buf_sz;
5103 }
5104
5105 mutex_exit(list_lock);
5106
5107 if (full == B_TRUE)
5108 break;
5109 }
5110
5111 /* No buffers selected for writing? */
5112 if (pio == NULL) {
5113 ASSERT0(write_sz);
5114 mutex_exit(&l2arc_buflist_mtx);
5115 kmem_cache_free(hdr_cache, head);
5116 return (0);
5117 }
5118
5119 /*
5120 * Now start writing the buffers. We're starting at the write head
5121 * and work backwards, retracing the course of the buffer selector
5122 * loop above.
5123 */
5124 for (ab = list_prev(dev->l2ad_buflist, head); ab;
5125 ab = list_prev(dev->l2ad_buflist, ab)) {
5126 l2arc_buf_hdr_t *l2hdr;
5127 uint64_t buf_sz;
5128
5129 /*
5130 * We shouldn't need to lock the buffer here, since we flagged
5131 * it as ARC_L2_WRITING in the previous step, but we must take
5132 * care to only access its L2 cache parameters. In particular,
5133 * ab->b_buf may be invalid by now due to ARC eviction.
5134 */
5135 l2hdr = ab->b_l2hdr;
5136 l2hdr->b_daddr = dev->l2ad_hand;
5137
5138 if ((ab->b_flags & ARC_L2COMPRESS) &&
5139 l2hdr->b_asize >= buf_compress_minsz) {
5140 if (l2arc_compress_buf(l2hdr)) {
5141 /*
5142 * If compression succeeded, enable headroom
5143 * boost on the next scan cycle.
5144 */
5145 *headroom_boost = B_TRUE;
5146 }
5147 }
5148
5149 /*
5150 * Pick up the buffer data we had previously stashed away
5151 * (and now potentially also compressed).
5152 */
5153 buf_data = l2hdr->b_tmp_cdata;
5154 buf_sz = l2hdr->b_asize;
5155
5156 /* Compression may have squashed the buffer to zero length. */
5157 if (buf_sz != 0) {
5158 uint64_t buf_p_sz;
5159
5160 wzio = zio_write_phys(pio, dev->l2ad_vdev,
5161 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF,
5162 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE,
5163 ZIO_FLAG_CANFAIL, B_FALSE);
5164
5165 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
5166 zio_t *, wzio);
5167 (void) zio_nowait(wzio);
5168
5169 write_asize += buf_sz;
5170 /*
5171 * Keep the clock hand suitably device-aligned.
5172 */
5173 buf_p_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz);
5174 write_psize += buf_p_sz;
5175 dev->l2ad_hand += buf_p_sz;
5176 }
5177 }
5178
5179 mutex_exit(&l2arc_buflist_mtx);
5180
5181 ASSERT3U(write_asize, <=, target_sz);
5182 ARCSTAT_BUMP(arcstat_l2_writes_sent);
5183 ARCSTAT_INCR(arcstat_l2_write_bytes, write_asize);
5184 ARCSTAT_INCR(arcstat_l2_size, write_sz);
5185 ARCSTAT_INCR(arcstat_l2_asize, write_asize);
5186 vdev_space_update(dev->l2ad_vdev, write_asize, 0, 0);
5187
5188 /*
5189 * Bump device hand to the device start if it is approaching the end.
5190 * l2arc_evict() will already have evicted ahead for this case.
5191 */
5192 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) {
5193 dev->l2ad_hand = dev->l2ad_start;
5194 dev->l2ad_evict = dev->l2ad_start;
5195 dev->l2ad_first = B_FALSE;
5196 }
5197
5198 dev->l2ad_writing = B_TRUE;
5199 (void) zio_wait(pio);
5200 dev->l2ad_writing = B_FALSE;
5201
5202 return (write_asize);
5203}
5204
5205/*
5206 * Compresses an L2ARC buffer.
5207 * The data to be compressed must be prefilled in l2hdr->b_tmp_cdata and its
5208 * size in l2hdr->b_asize. This routine tries to compress the data and
5209 * depending on the compression result there are three possible outcomes:
5210 * *) The buffer was incompressible. The original l2hdr contents were left
5211 * untouched and are ready for writing to an L2 device.
5212 * *) The buffer was all-zeros, so there is no need to write it to an L2
5213 * device. To indicate this situation b_tmp_cdata is NULL'ed, b_asize is
5214 * set to zero and b_compress is set to ZIO_COMPRESS_EMPTY.
5215 * *) Compression succeeded and b_tmp_cdata was replaced with a temporary
5216 * data buffer which holds the compressed data to be written, and b_asize
5217 * tells us how much data there is. b_compress is set to the appropriate
5218 * compression algorithm. Once writing is done, invoke
5219 * l2arc_release_cdata_buf on this l2hdr to free this temporary buffer.
5220 *
5221 * Returns B_TRUE if compression succeeded, or B_FALSE if it didn't (the
5222 * buffer was incompressible).
5223 */
5224static boolean_t
5225l2arc_compress_buf(l2arc_buf_hdr_t *l2hdr)
5226{
5227 void *cdata;
5228 size_t csize, len, rounded;
5229
5230 ASSERT(l2hdr->b_compress == ZIO_COMPRESS_OFF);
5231 ASSERT(l2hdr->b_tmp_cdata != NULL);
5232
5233 len = l2hdr->b_asize;
5234 cdata = zio_data_buf_alloc(len);
5235 csize = zio_compress_data(ZIO_COMPRESS_LZ4, l2hdr->b_tmp_cdata,
5236 cdata, l2hdr->b_asize, (size_t)(1ULL << l2hdr->b_dev->l2ad_vdev->vdev_ashift));
5237
5238 rounded = P2ROUNDUP(csize, (size_t)SPA_MINBLOCKSIZE);
5239 if (rounded > csize) {
5240 bzero((char *)cdata + csize, rounded - csize);
5241 csize = rounded;
5242 }
5243
5244 if (csize == 0) {
5245 /* zero block, indicate that there's nothing to write */
5246 zio_data_buf_free(cdata, len);
5247 l2hdr->b_compress = ZIO_COMPRESS_EMPTY;
5248 l2hdr->b_asize = 0;
5249 l2hdr->b_tmp_cdata = NULL;
5250 ARCSTAT_BUMP(arcstat_l2_compress_zeros);
5251 return (B_TRUE);
5252 } else if (csize > 0 && csize < len) {
5253 /*
5254 * Compression succeeded, we'll keep the cdata around for
5255 * writing and release it afterwards.
5256 */
5257 l2hdr->b_compress = ZIO_COMPRESS_LZ4;
5258 l2hdr->b_asize = csize;
5259 l2hdr->b_tmp_cdata = cdata;
5260 ARCSTAT_BUMP(arcstat_l2_compress_successes);
5261 return (B_TRUE);
5262 } else {
5263 /*
5264 * Compression failed, release the compressed buffer.
5265 * l2hdr will be left unmodified.
5266 */
5267 zio_data_buf_free(cdata, len);
5268 ARCSTAT_BUMP(arcstat_l2_compress_failures);
5269 return (B_FALSE);
5270 }
5271}
5272
5273/*
5274 * Decompresses a zio read back from an l2arc device. On success, the
5275 * underlying zio's io_data buffer is overwritten by the uncompressed
5276 * version. On decompression error (corrupt compressed stream), the
5277 * zio->io_error value is set to signal an I/O error.
5278 *
5279 * Please note that the compressed data stream is not checksummed, so
5280 * if the underlying device is experiencing data corruption, we may feed
5281 * corrupt data to the decompressor, so the decompressor needs to be
5282 * able to handle this situation (LZ4 does).
5283 */
5284static void
5285l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr, enum zio_compress c)
5286{
5287 ASSERT(L2ARC_IS_VALID_COMPRESS(c));
5288
5289 if (zio->io_error != 0) {
5290 /*
5291 * An io error has occured, just restore the original io
5292 * size in preparation for a main pool read.
5293 */
5294 zio->io_orig_size = zio->io_size = hdr->b_size;
5295 return;
5296 }
5297
5298 if (c == ZIO_COMPRESS_EMPTY) {
5299 /*
5300 * An empty buffer results in a null zio, which means we
5301 * need to fill its io_data after we're done restoring the
5302 * buffer's contents.
5303 */
5304 ASSERT(hdr->b_buf != NULL);
5305 bzero(hdr->b_buf->b_data, hdr->b_size);
5306 zio->io_data = zio->io_orig_data = hdr->b_buf->b_data;
5307 } else {
5308 ASSERT(zio->io_data != NULL);
5309 /*
5310 * We copy the compressed data from the start of the arc buffer
5311 * (the zio_read will have pulled in only what we need, the
5312 * rest is garbage which we will overwrite at decompression)
5313 * and then decompress back to the ARC data buffer. This way we
5314 * can minimize copying by simply decompressing back over the
5315 * original compressed data (rather than decompressing to an
5316 * aux buffer and then copying back the uncompressed buffer,
5317 * which is likely to be much larger).
5318 */
5319 uint64_t csize;
5320 void *cdata;
5321
5322 csize = zio->io_size;
5323 cdata = zio_data_buf_alloc(csize);
5324 bcopy(zio->io_data, cdata, csize);
5325 if (zio_decompress_data(c, cdata, zio->io_data, csize,
5326 hdr->b_size) != 0)
5327 zio->io_error = EIO;
5328 zio_data_buf_free(cdata, csize);
5329 }
5330
5331 /* Restore the expected uncompressed IO size. */
5332 zio->io_orig_size = zio->io_size = hdr->b_size;
5333}
5334
5335/*
5336 * Releases the temporary b_tmp_cdata buffer in an l2arc header structure.
5337 * This buffer serves as a temporary holder of compressed data while
5338 * the buffer entry is being written to an l2arc device. Once that is
5339 * done, we can dispose of it.
5340 */
5341static void
5342l2arc_release_cdata_buf(arc_buf_hdr_t *ab)
5343{
5344 l2arc_buf_hdr_t *l2hdr = ab->b_l2hdr;
5345
5346 if (l2hdr->b_compress == ZIO_COMPRESS_LZ4) {
5347 /*
5348 * If the data was compressed, then we've allocated a
5349 * temporary buffer for it, so now we need to release it.
5350 */
5351 ASSERT(l2hdr->b_tmp_cdata != NULL);
5352 zio_data_buf_free(l2hdr->b_tmp_cdata, ab->b_size);
5353 }
5354 l2hdr->b_tmp_cdata = NULL;
5355}
5356
5357/*
5358 * This thread feeds the L2ARC at regular intervals. This is the beating
5359 * heart of the L2ARC.
5360 */
5361static void
5362l2arc_feed_thread(void *dummy __unused)
5363{
5364 callb_cpr_t cpr;
5365 l2arc_dev_t *dev;
5366 spa_t *spa;
5367 uint64_t size, wrote;
5368 clock_t begin, next = ddi_get_lbolt();
5369 boolean_t headroom_boost = B_FALSE;
5370
5371 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
5372
5373 mutex_enter(&l2arc_feed_thr_lock);
5374
5375 while (l2arc_thread_exit == 0) {
5376 CALLB_CPR_SAFE_BEGIN(&cpr);
5377 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock,
5378 next - ddi_get_lbolt());
5379 CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
5380 next = ddi_get_lbolt() + hz;
5381
5382 /*
5383 * Quick check for L2ARC devices.
5384 */
5385 mutex_enter(&l2arc_dev_mtx);
5386 if (l2arc_ndev == 0) {
5387 mutex_exit(&l2arc_dev_mtx);
5388 continue;
5389 }
5390 mutex_exit(&l2arc_dev_mtx);
5391 begin = ddi_get_lbolt();
5392
5393 /*
5394 * This selects the next l2arc device to write to, and in
5395 * doing so the next spa to feed from: dev->l2ad_spa. This
5396 * will return NULL if there are now no l2arc devices or if
5397 * they are all faulted.
5398 *
5399 * If a device is returned, its spa's config lock is also
5400 * held to prevent device removal. l2arc_dev_get_next()
5401 * will grab and release l2arc_dev_mtx.
5402 */
5403 if ((dev = l2arc_dev_get_next()) == NULL)
5404 continue;
5405
5406 spa = dev->l2ad_spa;
5407 ASSERT(spa != NULL);
5408
5409 /*
5410 * If the pool is read-only then force the feed thread to
5411 * sleep a little longer.
5412 */
5413 if (!spa_writeable(spa)) {
5414 next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz;
5415 spa_config_exit(spa, SCL_L2ARC, dev);
5416 continue;
5417 }
5418
5419 /*
5420 * Avoid contributing to memory pressure.
5421 */
5422 if (arc_reclaim_needed()) {
5423 ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
5424 spa_config_exit(spa, SCL_L2ARC, dev);
5425 continue;
5426 }
5427
5428 ARCSTAT_BUMP(arcstat_l2_feeds);
5429
5430 size = l2arc_write_size();
5431
5432 /*
5433 * Evict L2ARC buffers that will be overwritten.
5434 */
5435 l2arc_evict(dev, size, B_FALSE);
5436
5437 /*
5438 * Write ARC buffers.
5439 */
5440 wrote = l2arc_write_buffers(spa, dev, size, &headroom_boost);
5441
5442 /*
5443 * Calculate interval between writes.
5444 */
5445 next = l2arc_write_interval(begin, size, wrote);
5446 spa_config_exit(spa, SCL_L2ARC, dev);
5447 }
5448
5449 l2arc_thread_exit = 0;
5450 cv_broadcast(&l2arc_feed_thr_cv);
5451 CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */
5452 thread_exit();
5453}
5454
5455boolean_t
5456l2arc_vdev_present(vdev_t *vd)
5457{
5458 l2arc_dev_t *dev;
5459
5460 mutex_enter(&l2arc_dev_mtx);
5461 for (dev = list_head(l2arc_dev_list); dev != NULL;
5462 dev = list_next(l2arc_dev_list, dev)) {
5463 if (dev->l2ad_vdev == vd)
5464 break;
5465 }
5466 mutex_exit(&l2arc_dev_mtx);
5467
5468 return (dev != NULL);
5469}
5470
5471/*
5472 * Add a vdev for use by the L2ARC. By this point the spa has already
5473 * validated the vdev and opened it.
5474 */
5475void
5476l2arc_add_vdev(spa_t *spa, vdev_t *vd)
5477{
5478 l2arc_dev_t *adddev;
5479
5480 ASSERT(!l2arc_vdev_present(vd));
5481
5482 vdev_ashift_optimize(vd);
5483
5484 /*
5485 * Create a new l2arc device entry.
5486 */
5487 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
5488 adddev->l2ad_spa = spa;
5489 adddev->l2ad_vdev = vd;
5490 adddev->l2ad_start = VDEV_LABEL_START_SIZE;
5491 adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
5492 adddev->l2ad_hand = adddev->l2ad_start;
5493 adddev->l2ad_evict = adddev->l2ad_start;
5494 adddev->l2ad_first = B_TRUE;
5495 adddev->l2ad_writing = B_FALSE;
5496
5497 /*
5498 * This is a list of all ARC buffers that are still valid on the
5499 * device.
5500 */
5501 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP);
5502 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
5503 offsetof(arc_buf_hdr_t, b_l2node));
5504
5505 vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
5506
5507 /*
5508 * Add device to global list
5509 */
5510 mutex_enter(&l2arc_dev_mtx);
5511 list_insert_head(l2arc_dev_list, adddev);
5512 atomic_inc_64(&l2arc_ndev);
5513 mutex_exit(&l2arc_dev_mtx);
5514}
5515
5516/*
5517 * Remove a vdev from the L2ARC.
5518 */
5519void
5520l2arc_remove_vdev(vdev_t *vd)
5521{
5522 l2arc_dev_t *dev, *nextdev, *remdev = NULL;
5523
5524 /*
5525 * Find the device by vdev
5526 */
5527 mutex_enter(&l2arc_dev_mtx);
5528 for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) {
5529 nextdev = list_next(l2arc_dev_list, dev);
5530 if (vd == dev->l2ad_vdev) {
5531 remdev = dev;
5532 break;
5533 }
5534 }
5535 ASSERT(remdev != NULL);
5536
5537 /*
5538 * Remove device from global list
5539 */
5540 list_remove(l2arc_dev_list, remdev);
5541 l2arc_dev_last = NULL; /* may have been invalidated */
5542 atomic_dec_64(&l2arc_ndev);
5543 mutex_exit(&l2arc_dev_mtx);
5544
5545 /*
5546 * Clear all buflists and ARC references. L2ARC device flush.
5547 */
5548 l2arc_evict(remdev, 0, B_TRUE);
5549 list_destroy(remdev->l2ad_buflist);
5550 kmem_free(remdev->l2ad_buflist, sizeof (list_t));
5551 kmem_free(remdev, sizeof (l2arc_dev_t));
5552}
5553
5554void
5555l2arc_init(void)
5556{
5557 l2arc_thread_exit = 0;
5558 l2arc_ndev = 0;
5559 l2arc_writes_sent = 0;
5560 l2arc_writes_done = 0;
5561
5562 mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
5563 cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
5564 mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
5565 mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL);
5566 mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
5567
5568 l2arc_dev_list = &L2ARC_dev_list;
5569 l2arc_free_on_write = &L2ARC_free_on_write;
5570 list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
5571 offsetof(l2arc_dev_t, l2ad_node));
5572 list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
5573 offsetof(l2arc_data_free_t, l2df_list_node));
5574}
5575
5576void
5577l2arc_fini(void)
5578{
5579 /*
5580 * This is called from dmu_fini(), which is called from spa_fini();
5581 * Because of this, we can assume that all l2arc devices have
5582 * already been removed when the pools themselves were removed.
5583 */
5584
5585 l2arc_do_free_on_write();
5586
5587 mutex_destroy(&l2arc_feed_thr_lock);
5588 cv_destroy(&l2arc_feed_thr_cv);
5589 mutex_destroy(&l2arc_dev_mtx);
5590 mutex_destroy(&l2arc_buflist_mtx);
5591 mutex_destroy(&l2arc_free_on_write_mtx);
5592
5593 list_destroy(l2arc_dev_list);
5594 list_destroy(l2arc_free_on_write);
5595}
5596
5597void
5598l2arc_start(void)
5599{
5600 if (!(spa_mode_global & FWRITE))
5601 return;
5602
5603 (void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
5604 TS_RUN, minclsyspri);
5605}
5606
5607void
5608l2arc_stop(void)
5609{
5610 if (!(spa_mode_global & FWRITE))
5611 return;
5612
5613 mutex_enter(&l2arc_feed_thr_lock);
5614 cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */
5615 l2arc_thread_exit = 1;
5616 while (l2arc_thread_exit != 0)
5617 cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
5618 mutex_exit(&l2arc_feed_thr_lock);
5619}