arc.c revision 288564
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2012, Joyent, Inc. All rights reserved.
24 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
25 * Copyright (c) 2014 by Saso Kiselkov. All rights reserved.
26 * Copyright 2014 Nexenta Systems, Inc.  All rights reserved.
27 */
28
29/*
30 * DVA-based Adjustable Replacement Cache
31 *
32 * While much of the theory of operation used here is
33 * based on the self-tuning, low overhead replacement cache
34 * presented by Megiddo and Modha at FAST 2003, there are some
35 * significant differences:
36 *
37 * 1. The Megiddo and Modha model assumes any page is evictable.
38 * Pages in its cache cannot be "locked" into memory.  This makes
39 * the eviction algorithm simple: evict the last page in the list.
40 * This also make the performance characteristics easy to reason
41 * about.  Our cache is not so simple.  At any given moment, some
42 * subset of the blocks in the cache are un-evictable because we
43 * have handed out a reference to them.  Blocks are only evictable
44 * when there are no external references active.  This makes
45 * eviction far more problematic:  we choose to evict the evictable
46 * blocks that are the "lowest" in the list.
47 *
48 * There are times when it is not possible to evict the requested
49 * space.  In these circumstances we are unable to adjust the cache
50 * size.  To prevent the cache growing unbounded at these times we
51 * implement a "cache throttle" that slows the flow of new data
52 * into the cache until we can make space available.
53 *
54 * 2. The Megiddo and Modha model assumes a fixed cache size.
55 * Pages are evicted when the cache is full and there is a cache
56 * miss.  Our model has a variable sized cache.  It grows with
57 * high use, but also tries to react to memory pressure from the
58 * operating system: decreasing its size when system memory is
59 * tight.
60 *
61 * 3. The Megiddo and Modha model assumes a fixed page size. All
62 * elements of the cache are therefore exactly the same size.  So
63 * when adjusting the cache size following a cache miss, its simply
64 * a matter of choosing a single page to evict.  In our model, we
65 * have variable sized cache blocks (rangeing from 512 bytes to
66 * 128K bytes).  We therefore choose a set of blocks to evict to make
67 * space for a cache miss that approximates as closely as possible
68 * the space used by the new block.
69 *
70 * See also:  "ARC: A Self-Tuning, Low Overhead Replacement Cache"
71 * by N. Megiddo & D. Modha, FAST 2003
72 */
73
74/*
75 * The locking model:
76 *
77 * A new reference to a cache buffer can be obtained in two
78 * ways: 1) via a hash table lookup using the DVA as a key,
79 * or 2) via one of the ARC lists.  The arc_read() interface
80 * uses method 1, while the internal arc algorithms for
81 * adjusting the cache use method 2.  We therefore provide two
82 * types of locks: 1) the hash table lock array, and 2) the
83 * arc list locks.
84 *
85 * Buffers do not have their own mutexs, rather they rely on the
86 * hash table mutexs for the bulk of their protection (i.e. most
87 * fields in the arc_buf_hdr_t are protected by these mutexs).
88 *
89 * buf_hash_find() returns the appropriate mutex (held) when it
90 * locates the requested buffer in the hash table.  It returns
91 * NULL for the mutex if the buffer was not in the table.
92 *
93 * buf_hash_remove() expects the appropriate hash mutex to be
94 * already held before it is invoked.
95 *
96 * Each arc state also has a mutex which is used to protect the
97 * buffer list associated with the state.  When attempting to
98 * obtain a hash table lock while holding an arc list lock you
99 * must use: mutex_tryenter() to avoid deadlock.  Also note that
100 * the active state mutex must be held before the ghost state mutex.
101 *
102 * Arc buffers may have an associated eviction callback function.
103 * This function will be invoked prior to removing the buffer (e.g.
104 * in arc_do_user_evicts()).  Note however that the data associated
105 * with the buffer may be evicted prior to the callback.  The callback
106 * must be made with *no locks held* (to prevent deadlock).  Additionally,
107 * the users of callbacks must ensure that their private data is
108 * protected from simultaneous callbacks from arc_clear_callback()
109 * and arc_do_user_evicts().
110 *
111 * Note that the majority of the performance stats are manipulated
112 * with atomic operations.
113 *
114 * The L2ARC uses the l2ad_mtx on each vdev for the following:
115 *
116 *	- L2ARC buflist creation
117 *	- L2ARC buflist eviction
118 *	- L2ARC write completion, which walks L2ARC buflists
119 *	- ARC header destruction, as it removes from L2ARC buflists
120 *	- ARC header release, as it removes from L2ARC buflists
121 */
122
123#include <sys/spa.h>
124#include <sys/zio.h>
125#include <sys/zio_compress.h>
126#include <sys/zfs_context.h>
127#include <sys/arc.h>
128#include <sys/refcount.h>
129#include <sys/vdev.h>
130#include <sys/vdev_impl.h>
131#include <sys/dsl_pool.h>
132#ifdef _KERNEL
133#include <sys/dnlc.h>
134#endif
135#include <sys/callb.h>
136#include <sys/kstat.h>
137#include <sys/trim_map.h>
138#include <zfs_fletcher.h>
139#include <sys/sdt.h>
140
141#include <vm/vm_pageout.h>
142#include <machine/vmparam.h>
143
144#ifdef illumos
145#ifndef _KERNEL
146/* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
147boolean_t arc_watch = B_FALSE;
148int arc_procfd;
149#endif
150#endif /* illumos */
151
152static kmutex_t		arc_reclaim_thr_lock;
153static kcondvar_t	arc_reclaim_thr_cv;	/* used to signal reclaim thr */
154static uint8_t		arc_thread_exit;
155
156uint_t arc_reduce_dnlc_percent = 3;
157
158/*
159 * The number of iterations through arc_evict_*() before we
160 * drop & reacquire the lock.
161 */
162int arc_evict_iterations = 100;
163
164/* number of seconds before growing cache again */
165static int		arc_grow_retry = 60;
166
167/* shift of arc_c for calculating both min and max arc_p */
168static int		arc_p_min_shift = 4;
169
170/* log2(fraction of arc to reclaim) */
171static int		arc_shrink_shift = 7;
172
173/*
174 * log2(fraction of ARC which must be free to allow growing).
175 * I.e. If there is less than arc_c >> arc_no_grow_shift free memory,
176 * when reading a new block into the ARC, we will evict an equal-sized block
177 * from the ARC.
178 *
179 * This must be less than arc_shrink_shift, so that when we shrink the ARC,
180 * we will still not allow it to grow.
181 */
182int			arc_no_grow_shift = 5;
183
184
185/*
186 * minimum lifespan of a prefetch block in clock ticks
187 * (initialized in arc_init())
188 */
189static int		arc_min_prefetch_lifespan;
190
191/*
192 * If this percent of memory is free, don't throttle.
193 */
194int arc_lotsfree_percent = 10;
195
196static int arc_dead;
197extern int zfs_prefetch_disable;
198
199/*
200 * The arc has filled available memory and has now warmed up.
201 */
202static boolean_t arc_warm;
203
204uint64_t zfs_arc_max;
205uint64_t zfs_arc_min;
206uint64_t zfs_arc_meta_limit = 0;
207uint64_t zfs_arc_meta_min = 0;
208int zfs_arc_grow_retry = 0;
209int zfs_arc_shrink_shift = 0;
210int zfs_arc_p_min_shift = 0;
211int zfs_disable_dup_eviction = 0;
212uint64_t zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
213u_int zfs_arc_free_target = 0;
214
215static int sysctl_vfs_zfs_arc_free_target(SYSCTL_HANDLER_ARGS);
216static int sysctl_vfs_zfs_arc_meta_limit(SYSCTL_HANDLER_ARGS);
217
218#ifdef _KERNEL
219static void
220arc_free_target_init(void *unused __unused)
221{
222
223	zfs_arc_free_target = vm_pageout_wakeup_thresh;
224}
225SYSINIT(arc_free_target_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_ANY,
226    arc_free_target_init, NULL);
227
228TUNABLE_QUAD("vfs.zfs.arc_max", &zfs_arc_max);
229TUNABLE_QUAD("vfs.zfs.arc_min", &zfs_arc_min);
230TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit);
231TUNABLE_QUAD("vfs.zfs.arc_meta_min", &zfs_arc_meta_min);
232TUNABLE_QUAD("vfs.zfs.arc_average_blocksize", &zfs_arc_average_blocksize);
233TUNABLE_INT("vfs.zfs.arc_shrink_shift", &zfs_arc_shrink_shift);
234SYSCTL_DECL(_vfs_zfs);
235SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_max, CTLFLAG_RDTUN, &zfs_arc_max, 0,
236    "Maximum ARC size");
237SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_min, CTLFLAG_RDTUN, &zfs_arc_min, 0,
238    "Minimum ARC size");
239SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_average_blocksize, CTLFLAG_RDTUN,
240    &zfs_arc_average_blocksize, 0,
241    "ARC average blocksize");
242SYSCTL_INT(_vfs_zfs, OID_AUTO, arc_shrink_shift, CTLFLAG_RW,
243    &arc_shrink_shift, 0,
244    "log2(fraction of arc to reclaim)");
245
246/*
247 * We don't have a tunable for arc_free_target due to the dependency on
248 * pagedaemon initialisation.
249 */
250SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_free_target,
251    CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(u_int),
252    sysctl_vfs_zfs_arc_free_target, "IU",
253    "Desired number of free pages below which ARC triggers reclaim");
254
255static int
256sysctl_vfs_zfs_arc_free_target(SYSCTL_HANDLER_ARGS)
257{
258	u_int val;
259	int err;
260
261	val = zfs_arc_free_target;
262	err = sysctl_handle_int(oidp, &val, 0, req);
263	if (err != 0 || req->newptr == NULL)
264		return (err);
265
266	if (val < minfree)
267		return (EINVAL);
268	if (val > cnt.v_page_count)
269		return (EINVAL);
270
271	zfs_arc_free_target = val;
272
273	return (0);
274}
275
276/*
277 * Must be declared here, before the definition of corresponding kstat
278 * macro which uses the same names will confuse the compiler.
279 */
280SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_meta_limit,
281    CTLTYPE_U64 | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(uint64_t),
282    sysctl_vfs_zfs_arc_meta_limit, "QU",
283    "ARC metadata limit");
284#endif
285
286/*
287 * Note that buffers can be in one of 6 states:
288 *	ARC_anon	- anonymous (discussed below)
289 *	ARC_mru		- recently used, currently cached
290 *	ARC_mru_ghost	- recentely used, no longer in cache
291 *	ARC_mfu		- frequently used, currently cached
292 *	ARC_mfu_ghost	- frequently used, no longer in cache
293 *	ARC_l2c_only	- exists in L2ARC but not other states
294 * When there are no active references to the buffer, they are
295 * are linked onto a list in one of these arc states.  These are
296 * the only buffers that can be evicted or deleted.  Within each
297 * state there are multiple lists, one for meta-data and one for
298 * non-meta-data.  Meta-data (indirect blocks, blocks of dnodes,
299 * etc.) is tracked separately so that it can be managed more
300 * explicitly: favored over data, limited explicitly.
301 *
302 * Anonymous buffers are buffers that are not associated with
303 * a DVA.  These are buffers that hold dirty block copies
304 * before they are written to stable storage.  By definition,
305 * they are "ref'd" and are considered part of arc_mru
306 * that cannot be freed.  Generally, they will aquire a DVA
307 * as they are written and migrate onto the arc_mru list.
308 *
309 * The ARC_l2c_only state is for buffers that are in the second
310 * level ARC but no longer in any of the ARC_m* lists.  The second
311 * level ARC itself may also contain buffers that are in any of
312 * the ARC_m* states - meaning that a buffer can exist in two
313 * places.  The reason for the ARC_l2c_only state is to keep the
314 * buffer header in the hash table, so that reads that hit the
315 * second level ARC benefit from these fast lookups.
316 */
317
318#define	ARCS_LOCK_PAD		CACHE_LINE_SIZE
319struct arcs_lock {
320	kmutex_t	arcs_lock;
321#ifdef _KERNEL
322	unsigned char	pad[(ARCS_LOCK_PAD - sizeof (kmutex_t))];
323#endif
324};
325
326/*
327 * must be power of two for mask use to work
328 *
329 */
330#define ARC_BUFC_NUMDATALISTS		16
331#define ARC_BUFC_NUMMETADATALISTS	16
332#define ARC_BUFC_NUMLISTS	(ARC_BUFC_NUMMETADATALISTS + ARC_BUFC_NUMDATALISTS)
333
334typedef struct arc_state {
335	uint64_t arcs_lsize[ARC_BUFC_NUMTYPES];	/* amount of evictable data */
336	uint64_t arcs_size;	/* total amount of data in this state */
337	list_t	arcs_lists[ARC_BUFC_NUMLISTS]; /* list of evictable buffers */
338	struct arcs_lock arcs_locks[ARC_BUFC_NUMLISTS] __aligned(CACHE_LINE_SIZE);
339} arc_state_t;
340
341#define ARCS_LOCK(s, i)	(&((s)->arcs_locks[(i)].arcs_lock))
342
343/* The 6 states: */
344static arc_state_t ARC_anon;
345static arc_state_t ARC_mru;
346static arc_state_t ARC_mru_ghost;
347static arc_state_t ARC_mfu;
348static arc_state_t ARC_mfu_ghost;
349static arc_state_t ARC_l2c_only;
350
351typedef struct arc_stats {
352	kstat_named_t arcstat_hits;
353	kstat_named_t arcstat_misses;
354	kstat_named_t arcstat_demand_data_hits;
355	kstat_named_t arcstat_demand_data_misses;
356	kstat_named_t arcstat_demand_metadata_hits;
357	kstat_named_t arcstat_demand_metadata_misses;
358	kstat_named_t arcstat_prefetch_data_hits;
359	kstat_named_t arcstat_prefetch_data_misses;
360	kstat_named_t arcstat_prefetch_metadata_hits;
361	kstat_named_t arcstat_prefetch_metadata_misses;
362	kstat_named_t arcstat_mru_hits;
363	kstat_named_t arcstat_mru_ghost_hits;
364	kstat_named_t arcstat_mfu_hits;
365	kstat_named_t arcstat_mfu_ghost_hits;
366	kstat_named_t arcstat_allocated;
367	kstat_named_t arcstat_deleted;
368	kstat_named_t arcstat_stolen;
369	kstat_named_t arcstat_recycle_miss;
370	/*
371	 * Number of buffers that could not be evicted because the hash lock
372	 * was held by another thread.  The lock may not necessarily be held
373	 * by something using the same buffer, since hash locks are shared
374	 * by multiple buffers.
375	 */
376	kstat_named_t arcstat_mutex_miss;
377	/*
378	 * Number of buffers skipped because they have I/O in progress, are
379	 * indrect prefetch buffers that have not lived long enough, or are
380	 * not from the spa we're trying to evict from.
381	 */
382	kstat_named_t arcstat_evict_skip;
383	kstat_named_t arcstat_evict_l2_cached;
384	kstat_named_t arcstat_evict_l2_eligible;
385	kstat_named_t arcstat_evict_l2_ineligible;
386	kstat_named_t arcstat_hash_elements;
387	kstat_named_t arcstat_hash_elements_max;
388	kstat_named_t arcstat_hash_collisions;
389	kstat_named_t arcstat_hash_chains;
390	kstat_named_t arcstat_hash_chain_max;
391	kstat_named_t arcstat_p;
392	kstat_named_t arcstat_c;
393	kstat_named_t arcstat_c_min;
394	kstat_named_t arcstat_c_max;
395	kstat_named_t arcstat_size;
396	/*
397	 * Number of bytes consumed by internal ARC structures necessary
398	 * for tracking purposes; these structures are not actually
399	 * backed by ARC buffers. This includes arc_buf_hdr_t structures
400	 * (allocated via arc_buf_hdr_t_full and arc_buf_hdr_t_l2only
401	 * caches), and arc_buf_t structures (allocated via arc_buf_t
402	 * cache).
403	 */
404	kstat_named_t arcstat_hdr_size;
405	/*
406	 * Number of bytes consumed by ARC buffers of type equal to
407	 * ARC_BUFC_DATA. This is generally consumed by buffers backing
408	 * on disk user data (e.g. plain file contents).
409	 */
410	kstat_named_t arcstat_data_size;
411	/*
412	 * Number of bytes consumed by ARC buffers of type equal to
413	 * ARC_BUFC_METADATA. This is generally consumed by buffers
414	 * backing on disk data that is used for internal ZFS
415	 * structures (e.g. ZAP, dnode, indirect blocks, etc).
416	 */
417	kstat_named_t arcstat_metadata_size;
418	/*
419	 * Number of bytes consumed by various buffers and structures
420	 * not actually backed with ARC buffers. This includes bonus
421	 * buffers (allocated directly via zio_buf_* functions),
422	 * dmu_buf_impl_t structures (allocated via dmu_buf_impl_t
423	 * cache), and dnode_t structures (allocated via dnode_t cache).
424	 */
425	kstat_named_t arcstat_other_size;
426	/*
427	 * Total number of bytes consumed by ARC buffers residing in the
428	 * arc_anon state. This includes *all* buffers in the arc_anon
429	 * state; e.g. data, metadata, evictable, and unevictable buffers
430	 * are all included in this value.
431	 */
432	kstat_named_t arcstat_anon_size;
433	/*
434	 * Number of bytes consumed by ARC buffers that meet the
435	 * following criteria: backing buffers of type ARC_BUFC_DATA,
436	 * residing in the arc_anon state, and are eligible for eviction
437	 * (e.g. have no outstanding holds on the buffer).
438	 */
439	kstat_named_t arcstat_anon_evictable_data;
440	/*
441	 * Number of bytes consumed by ARC buffers that meet the
442	 * following criteria: backing buffers of type ARC_BUFC_METADATA,
443	 * residing in the arc_anon state, and are eligible for eviction
444	 * (e.g. have no outstanding holds on the buffer).
445	 */
446	kstat_named_t arcstat_anon_evictable_metadata;
447	/*
448	 * Total number of bytes consumed by ARC buffers residing in the
449	 * arc_mru state. This includes *all* buffers in the arc_mru
450	 * state; e.g. data, metadata, evictable, and unevictable buffers
451	 * are all included in this value.
452	 */
453	kstat_named_t arcstat_mru_size;
454	/*
455	 * Number of bytes consumed by ARC buffers that meet the
456	 * following criteria: backing buffers of type ARC_BUFC_DATA,
457	 * residing in the arc_mru state, and are eligible for eviction
458	 * (e.g. have no outstanding holds on the buffer).
459	 */
460	kstat_named_t arcstat_mru_evictable_data;
461	/*
462	 * Number of bytes consumed by ARC buffers that meet the
463	 * following criteria: backing buffers of type ARC_BUFC_METADATA,
464	 * residing in the arc_mru state, and are eligible for eviction
465	 * (e.g. have no outstanding holds on the buffer).
466	 */
467	kstat_named_t arcstat_mru_evictable_metadata;
468	/*
469	 * Total number of bytes that *would have been* consumed by ARC
470	 * buffers in the arc_mru_ghost state. The key thing to note
471	 * here, is the fact that this size doesn't actually indicate
472	 * RAM consumption. The ghost lists only consist of headers and
473	 * don't actually have ARC buffers linked off of these headers.
474	 * Thus, *if* the headers had associated ARC buffers, these
475	 * buffers *would have* consumed this number of bytes.
476	 */
477	kstat_named_t arcstat_mru_ghost_size;
478	/*
479	 * Number of bytes that *would have been* consumed by ARC
480	 * buffers that are eligible for eviction, of type
481	 * ARC_BUFC_DATA, and linked off the arc_mru_ghost state.
482	 */
483	kstat_named_t arcstat_mru_ghost_evictable_data;
484	/*
485	 * Number of bytes that *would have been* consumed by ARC
486	 * buffers that are eligible for eviction, of type
487	 * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state.
488	 */
489	kstat_named_t arcstat_mru_ghost_evictable_metadata;
490	/*
491	 * Total number of bytes consumed by ARC buffers residing in the
492	 * arc_mfu state. This includes *all* buffers in the arc_mfu
493	 * state; e.g. data, metadata, evictable, and unevictable buffers
494	 * are all included in this value.
495	 */
496	kstat_named_t arcstat_mfu_size;
497	/*
498	 * Number of bytes consumed by ARC buffers that are eligible for
499	 * eviction, of type ARC_BUFC_DATA, and reside in the arc_mfu
500	 * state.
501	 */
502	kstat_named_t arcstat_mfu_evictable_data;
503	/*
504	 * Number of bytes consumed by ARC buffers that are eligible for
505	 * eviction, of type ARC_BUFC_METADATA, and reside in the
506	 * arc_mfu state.
507	 */
508	kstat_named_t arcstat_mfu_evictable_metadata;
509	/*
510	 * Total number of bytes that *would have been* consumed by ARC
511	 * buffers in the arc_mfu_ghost state. See the comment above
512	 * arcstat_mru_ghost_size for more details.
513	 */
514	kstat_named_t arcstat_mfu_ghost_size;
515	/*
516	 * Number of bytes that *would have been* consumed by ARC
517	 * buffers that are eligible for eviction, of type
518	 * ARC_BUFC_DATA, and linked off the arc_mfu_ghost state.
519	 */
520	kstat_named_t arcstat_mfu_ghost_evictable_data;
521	/*
522	 * Number of bytes that *would have been* consumed by ARC
523	 * buffers that are eligible for eviction, of type
524	 * ARC_BUFC_METADATA, and linked off the arc_mru_ghost state.
525	 */
526	kstat_named_t arcstat_mfu_ghost_evictable_metadata;
527	kstat_named_t arcstat_l2_hits;
528	kstat_named_t arcstat_l2_misses;
529	kstat_named_t arcstat_l2_feeds;
530	kstat_named_t arcstat_l2_rw_clash;
531	kstat_named_t arcstat_l2_read_bytes;
532	kstat_named_t arcstat_l2_write_bytes;
533	kstat_named_t arcstat_l2_writes_sent;
534	kstat_named_t arcstat_l2_writes_done;
535	kstat_named_t arcstat_l2_writes_error;
536	kstat_named_t arcstat_l2_writes_hdr_miss;
537	kstat_named_t arcstat_l2_evict_lock_retry;
538	kstat_named_t arcstat_l2_evict_reading;
539	kstat_named_t arcstat_l2_evict_l1cached;
540	kstat_named_t arcstat_l2_free_on_write;
541	kstat_named_t arcstat_l2_cdata_free_on_write;
542	kstat_named_t arcstat_l2_abort_lowmem;
543	kstat_named_t arcstat_l2_cksum_bad;
544	kstat_named_t arcstat_l2_io_error;
545	kstat_named_t arcstat_l2_size;
546	kstat_named_t arcstat_l2_asize;
547	kstat_named_t arcstat_l2_hdr_size;
548	kstat_named_t arcstat_l2_compress_successes;
549	kstat_named_t arcstat_l2_compress_zeros;
550	kstat_named_t arcstat_l2_compress_failures;
551	kstat_named_t arcstat_l2_write_trylock_fail;
552	kstat_named_t arcstat_l2_write_passed_headroom;
553	kstat_named_t arcstat_l2_write_spa_mismatch;
554	kstat_named_t arcstat_l2_write_in_l2;
555	kstat_named_t arcstat_l2_write_hdr_io_in_progress;
556	kstat_named_t arcstat_l2_write_not_cacheable;
557	kstat_named_t arcstat_l2_write_full;
558	kstat_named_t arcstat_l2_write_buffer_iter;
559	kstat_named_t arcstat_l2_write_pios;
560	kstat_named_t arcstat_l2_write_buffer_bytes_scanned;
561	kstat_named_t arcstat_l2_write_buffer_list_iter;
562	kstat_named_t arcstat_l2_write_buffer_list_null_iter;
563	kstat_named_t arcstat_memory_throttle_count;
564	kstat_named_t arcstat_duplicate_buffers;
565	kstat_named_t arcstat_duplicate_buffers_size;
566	kstat_named_t arcstat_duplicate_reads;
567	kstat_named_t arcstat_meta_used;
568	kstat_named_t arcstat_meta_limit;
569	kstat_named_t arcstat_meta_max;
570	kstat_named_t arcstat_meta_min;
571} arc_stats_t;
572
573static arc_stats_t arc_stats = {
574	{ "hits",			KSTAT_DATA_UINT64 },
575	{ "misses",			KSTAT_DATA_UINT64 },
576	{ "demand_data_hits",		KSTAT_DATA_UINT64 },
577	{ "demand_data_misses",		KSTAT_DATA_UINT64 },
578	{ "demand_metadata_hits",	KSTAT_DATA_UINT64 },
579	{ "demand_metadata_misses",	KSTAT_DATA_UINT64 },
580	{ "prefetch_data_hits",		KSTAT_DATA_UINT64 },
581	{ "prefetch_data_misses",	KSTAT_DATA_UINT64 },
582	{ "prefetch_metadata_hits",	KSTAT_DATA_UINT64 },
583	{ "prefetch_metadata_misses",	KSTAT_DATA_UINT64 },
584	{ "mru_hits",			KSTAT_DATA_UINT64 },
585	{ "mru_ghost_hits",		KSTAT_DATA_UINT64 },
586	{ "mfu_hits",			KSTAT_DATA_UINT64 },
587	{ "mfu_ghost_hits",		KSTAT_DATA_UINT64 },
588	{ "allocated",			KSTAT_DATA_UINT64 },
589	{ "deleted",			KSTAT_DATA_UINT64 },
590	{ "stolen",			KSTAT_DATA_UINT64 },
591	{ "recycle_miss",		KSTAT_DATA_UINT64 },
592	{ "mutex_miss",			KSTAT_DATA_UINT64 },
593	{ "evict_skip",			KSTAT_DATA_UINT64 },
594	{ "evict_l2_cached",		KSTAT_DATA_UINT64 },
595	{ "evict_l2_eligible",		KSTAT_DATA_UINT64 },
596	{ "evict_l2_ineligible",	KSTAT_DATA_UINT64 },
597	{ "hash_elements",		KSTAT_DATA_UINT64 },
598	{ "hash_elements_max",		KSTAT_DATA_UINT64 },
599	{ "hash_collisions",		KSTAT_DATA_UINT64 },
600	{ "hash_chains",		KSTAT_DATA_UINT64 },
601	{ "hash_chain_max",		KSTAT_DATA_UINT64 },
602	{ "p",				KSTAT_DATA_UINT64 },
603	{ "c",				KSTAT_DATA_UINT64 },
604	{ "c_min",			KSTAT_DATA_UINT64 },
605	{ "c_max",			KSTAT_DATA_UINT64 },
606	{ "size",			KSTAT_DATA_UINT64 },
607	{ "hdr_size",			KSTAT_DATA_UINT64 },
608	{ "data_size",			KSTAT_DATA_UINT64 },
609	{ "metadata_size",		KSTAT_DATA_UINT64 },
610	{ "other_size",			KSTAT_DATA_UINT64 },
611	{ "anon_size",			KSTAT_DATA_UINT64 },
612	{ "anon_evictable_data",	KSTAT_DATA_UINT64 },
613	{ "anon_evictable_metadata",	KSTAT_DATA_UINT64 },
614	{ "mru_size",			KSTAT_DATA_UINT64 },
615	{ "mru_evictable_data",		KSTAT_DATA_UINT64 },
616	{ "mru_evictable_metadata",	KSTAT_DATA_UINT64 },
617	{ "mru_ghost_size",		KSTAT_DATA_UINT64 },
618	{ "mru_ghost_evictable_data",	KSTAT_DATA_UINT64 },
619	{ "mru_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
620	{ "mfu_size",			KSTAT_DATA_UINT64 },
621	{ "mfu_evictable_data",		KSTAT_DATA_UINT64 },
622	{ "mfu_evictable_metadata",	KSTAT_DATA_UINT64 },
623	{ "mfu_ghost_size",		KSTAT_DATA_UINT64 },
624	{ "mfu_ghost_evictable_data",	KSTAT_DATA_UINT64 },
625	{ "mfu_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
626	{ "l2_hits",			KSTAT_DATA_UINT64 },
627	{ "l2_misses",			KSTAT_DATA_UINT64 },
628	{ "l2_feeds",			KSTAT_DATA_UINT64 },
629	{ "l2_rw_clash",		KSTAT_DATA_UINT64 },
630	{ "l2_read_bytes",		KSTAT_DATA_UINT64 },
631	{ "l2_write_bytes",		KSTAT_DATA_UINT64 },
632	{ "l2_writes_sent",		KSTAT_DATA_UINT64 },
633	{ "l2_writes_done",		KSTAT_DATA_UINT64 },
634	{ "l2_writes_error",		KSTAT_DATA_UINT64 },
635	{ "l2_writes_hdr_miss",		KSTAT_DATA_UINT64 },
636	{ "l2_evict_lock_retry",	KSTAT_DATA_UINT64 },
637	{ "l2_evict_reading",		KSTAT_DATA_UINT64 },
638	{ "l2_evict_l1cached",		KSTAT_DATA_UINT64 },
639	{ "l2_free_on_write",		KSTAT_DATA_UINT64 },
640	{ "l2_cdata_free_on_write",	KSTAT_DATA_UINT64 },
641	{ "l2_abort_lowmem",		KSTAT_DATA_UINT64 },
642	{ "l2_cksum_bad",		KSTAT_DATA_UINT64 },
643	{ "l2_io_error",		KSTAT_DATA_UINT64 },
644	{ "l2_size",			KSTAT_DATA_UINT64 },
645	{ "l2_asize",			KSTAT_DATA_UINT64 },
646	{ "l2_hdr_size",		KSTAT_DATA_UINT64 },
647	{ "l2_compress_successes",	KSTAT_DATA_UINT64 },
648	{ "l2_compress_zeros",		KSTAT_DATA_UINT64 },
649	{ "l2_compress_failures",	KSTAT_DATA_UINT64 },
650	{ "l2_write_trylock_fail",	KSTAT_DATA_UINT64 },
651	{ "l2_write_passed_headroom",	KSTAT_DATA_UINT64 },
652	{ "l2_write_spa_mismatch",	KSTAT_DATA_UINT64 },
653	{ "l2_write_in_l2",		KSTAT_DATA_UINT64 },
654	{ "l2_write_io_in_progress",	KSTAT_DATA_UINT64 },
655	{ "l2_write_not_cacheable",	KSTAT_DATA_UINT64 },
656	{ "l2_write_full",		KSTAT_DATA_UINT64 },
657	{ "l2_write_buffer_iter",	KSTAT_DATA_UINT64 },
658	{ "l2_write_pios",		KSTAT_DATA_UINT64 },
659	{ "l2_write_buffer_bytes_scanned", KSTAT_DATA_UINT64 },
660	{ "l2_write_buffer_list_iter",	KSTAT_DATA_UINT64 },
661	{ "l2_write_buffer_list_null_iter", KSTAT_DATA_UINT64 },
662	{ "memory_throttle_count",	KSTAT_DATA_UINT64 },
663	{ "duplicate_buffers",		KSTAT_DATA_UINT64 },
664	{ "duplicate_buffers_size",	KSTAT_DATA_UINT64 },
665	{ "duplicate_reads",		KSTAT_DATA_UINT64 },
666	{ "arc_meta_used",		KSTAT_DATA_UINT64 },
667	{ "arc_meta_limit",		KSTAT_DATA_UINT64 },
668	{ "arc_meta_max",		KSTAT_DATA_UINT64 },
669	{ "arc_meta_min",		KSTAT_DATA_UINT64 }
670};
671
672#define	ARCSTAT(stat)	(arc_stats.stat.value.ui64)
673
674#define	ARCSTAT_INCR(stat, val) \
675	atomic_add_64(&arc_stats.stat.value.ui64, (val))
676
677#define	ARCSTAT_BUMP(stat)	ARCSTAT_INCR(stat, 1)
678#define	ARCSTAT_BUMPDOWN(stat)	ARCSTAT_INCR(stat, -1)
679
680#define	ARCSTAT_MAX(stat, val) {					\
681	uint64_t m;							\
682	while ((val) > (m = arc_stats.stat.value.ui64) &&		\
683	    (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val))))	\
684		continue;						\
685}
686
687#define	ARCSTAT_MAXSTAT(stat) \
688	ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
689
690/*
691 * We define a macro to allow ARC hits/misses to be easily broken down by
692 * two separate conditions, giving a total of four different subtypes for
693 * each of hits and misses (so eight statistics total).
694 */
695#define	ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
696	if (cond1) {							\
697		if (cond2) {						\
698			ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
699		} else {						\
700			ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
701		}							\
702	} else {							\
703		if (cond2) {						\
704			ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
705		} else {						\
706			ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
707		}							\
708	}
709
710kstat_t			*arc_ksp;
711static arc_state_t	*arc_anon;
712static arc_state_t	*arc_mru;
713static arc_state_t	*arc_mru_ghost;
714static arc_state_t	*arc_mfu;
715static arc_state_t	*arc_mfu_ghost;
716static arc_state_t	*arc_l2c_only;
717
718/*
719 * There are several ARC variables that are critical to export as kstats --
720 * but we don't want to have to grovel around in the kstat whenever we wish to
721 * manipulate them.  For these variables, we therefore define them to be in
722 * terms of the statistic variable.  This assures that we are not introducing
723 * the possibility of inconsistency by having shadow copies of the variables,
724 * while still allowing the code to be readable.
725 */
726#define	arc_size	ARCSTAT(arcstat_size)	/* actual total arc size */
727#define	arc_p		ARCSTAT(arcstat_p)	/* target size of MRU */
728#define	arc_c		ARCSTAT(arcstat_c)	/* target size of cache */
729#define	arc_c_min	ARCSTAT(arcstat_c_min)	/* min target cache size */
730#define	arc_c_max	ARCSTAT(arcstat_c_max)	/* max target cache size */
731#define	arc_meta_limit	ARCSTAT(arcstat_meta_limit) /* max size for metadata */
732#define	arc_meta_min	ARCSTAT(arcstat_meta_min) /* min size for metadata */
733#define	arc_meta_used	ARCSTAT(arcstat_meta_used) /* size of metadata */
734#define	arc_meta_max	ARCSTAT(arcstat_meta_max) /* max size of metadata */
735
736#define	L2ARC_IS_VALID_COMPRESS(_c_) \
737	((_c_) == ZIO_COMPRESS_LZ4 || (_c_) == ZIO_COMPRESS_EMPTY)
738
739static int		arc_no_grow;	/* Don't try to grow cache size */
740static uint64_t		arc_tempreserve;
741static uint64_t		arc_loaned_bytes;
742
743typedef struct arc_callback arc_callback_t;
744
745struct arc_callback {
746	void			*acb_private;
747	arc_done_func_t		*acb_done;
748	arc_buf_t		*acb_buf;
749	zio_t			*acb_zio_dummy;
750	arc_callback_t		*acb_next;
751};
752
753typedef struct arc_write_callback arc_write_callback_t;
754
755struct arc_write_callback {
756	void		*awcb_private;
757	arc_done_func_t	*awcb_ready;
758	arc_done_func_t	*awcb_physdone;
759	arc_done_func_t	*awcb_done;
760	arc_buf_t	*awcb_buf;
761};
762
763/*
764 * ARC buffers are separated into multiple structs as a memory saving measure:
765 *   - Common fields struct, always defined, and embedded within it:
766 *       - L2-only fields, always allocated but undefined when not in L2ARC
767 *       - L1-only fields, only allocated when in L1ARC
768 *
769 *           Buffer in L1                     Buffer only in L2
770 *    +------------------------+          +------------------------+
771 *    | arc_buf_hdr_t          |          | arc_buf_hdr_t          |
772 *    |                        |          |                        |
773 *    |                        |          |                        |
774 *    |                        |          |                        |
775 *    +------------------------+          +------------------------+
776 *    | l2arc_buf_hdr_t        |          | l2arc_buf_hdr_t        |
777 *    | (undefined if L1-only) |          |                        |
778 *    +------------------------+          +------------------------+
779 *    | l1arc_buf_hdr_t        |
780 *    |                        |
781 *    |                        |
782 *    |                        |
783 *    |                        |
784 *    +------------------------+
785 *
786 * Because it's possible for the L2ARC to become extremely large, we can wind
787 * up eating a lot of memory in L2ARC buffer headers, so the size of a header
788 * is minimized by only allocating the fields necessary for an L1-cached buffer
789 * when a header is actually in the L1 cache. The sub-headers (l1arc_buf_hdr and
790 * l2arc_buf_hdr) are embedded rather than allocated separately to save a couple
791 * words in pointers. arc_hdr_realloc() is used to switch a header between
792 * these two allocation states.
793 */
794typedef struct l1arc_buf_hdr {
795	kmutex_t		b_freeze_lock;
796#ifdef ZFS_DEBUG
797	/*
798	 * used for debugging wtih kmem_flags - by allocating and freeing
799	 * b_thawed when the buffer is thawed, we get a record of the stack
800	 * trace that thawed it.
801	 */
802	void			*b_thawed;
803#endif
804
805	arc_buf_t		*b_buf;
806	uint32_t		b_datacnt;
807	/* for waiting on writes to complete */
808	kcondvar_t		b_cv;
809
810	/* protected by arc state mutex */
811	arc_state_t		*b_state;
812	list_node_t		b_arc_node;
813
814	/* updated atomically */
815	clock_t			b_arc_access;
816
817	/* self protecting */
818	refcount_t		b_refcnt;
819
820	arc_callback_t		*b_acb;
821	/* temporary buffer holder for in-flight compressed data */
822	void			*b_tmp_cdata;
823} l1arc_buf_hdr_t;
824
825typedef struct l2arc_dev l2arc_dev_t;
826
827typedef struct l2arc_buf_hdr {
828	/* protected by arc_buf_hdr mutex */
829	l2arc_dev_t		*b_dev;		/* L2ARC device */
830	uint64_t		b_daddr;	/* disk address, offset byte */
831	/* real alloc'd buffer size depending on b_compress applied */
832	int32_t			b_asize;
833
834	list_node_t		b_l2node;
835} l2arc_buf_hdr_t;
836
837struct arc_buf_hdr {
838	/* protected by hash lock */
839	dva_t			b_dva;
840	uint64_t		b_birth;
841	/*
842	 * Even though this checksum is only set/verified when a buffer is in
843	 * the L1 cache, it needs to be in the set of common fields because it
844	 * must be preserved from the time before a buffer is written out to
845	 * L2ARC until after it is read back in.
846	 */
847	zio_cksum_t		*b_freeze_cksum;
848
849	arc_buf_hdr_t		*b_hash_next;
850	arc_flags_t		b_flags;
851
852	/* immutable */
853	int32_t			b_size;
854	uint64_t		b_spa;
855
856	/* L2ARC fields. Undefined when not in L2ARC. */
857	l2arc_buf_hdr_t		b_l2hdr;
858	/* L1ARC fields. Undefined when in l2arc_only state */
859	l1arc_buf_hdr_t		b_l1hdr;
860};
861
862#ifdef _KERNEL
863static int
864sysctl_vfs_zfs_arc_meta_limit(SYSCTL_HANDLER_ARGS)
865{
866	uint64_t val;
867	int err;
868
869	val = arc_meta_limit;
870	err = sysctl_handle_64(oidp, &val, 0, req);
871	if (err != 0 || req->newptr == NULL)
872		return (err);
873
874        if (val <= 0 || val > arc_c_max)
875		return (EINVAL);
876
877	arc_meta_limit = val;
878	return (0);
879}
880#endif
881
882static arc_buf_t *arc_eviction_list;
883static kmutex_t arc_eviction_mtx;
884static arc_buf_hdr_t arc_eviction_hdr;
885
886#define	GHOST_STATE(state)	\
887	((state) == arc_mru_ghost || (state) == arc_mfu_ghost ||	\
888	(state) == arc_l2c_only)
889
890#define	HDR_IN_HASH_TABLE(hdr)	((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE)
891#define	HDR_IO_IN_PROGRESS(hdr)	((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS)
892#define	HDR_IO_ERROR(hdr)	((hdr)->b_flags & ARC_FLAG_IO_ERROR)
893#define	HDR_PREFETCH(hdr)	((hdr)->b_flags & ARC_FLAG_PREFETCH)
894#define	HDR_FREED_IN_READ(hdr)	((hdr)->b_flags & ARC_FLAG_FREED_IN_READ)
895#define	HDR_BUF_AVAILABLE(hdr)	((hdr)->b_flags & ARC_FLAG_BUF_AVAILABLE)
896
897#define	HDR_L2CACHE(hdr)	((hdr)->b_flags & ARC_FLAG_L2CACHE)
898#define	HDR_L2COMPRESS(hdr)	((hdr)->b_flags & ARC_FLAG_L2COMPRESS)
899#define	HDR_L2_READING(hdr)	\
900	    (((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) &&	\
901	    ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR))
902#define	HDR_L2_WRITING(hdr)	((hdr)->b_flags & ARC_FLAG_L2_WRITING)
903#define	HDR_L2_EVICTED(hdr)	((hdr)->b_flags & ARC_FLAG_L2_EVICTED)
904#define	HDR_L2_WRITE_HEAD(hdr)	((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD)
905
906#define	HDR_ISTYPE_METADATA(hdr)	\
907	    ((hdr)->b_flags & ARC_FLAG_BUFC_METADATA)
908#define	HDR_ISTYPE_DATA(hdr)	(!HDR_ISTYPE_METADATA(hdr))
909
910#define	HDR_HAS_L1HDR(hdr)	((hdr)->b_flags & ARC_FLAG_HAS_L1HDR)
911#define	HDR_HAS_L2HDR(hdr)	((hdr)->b_flags & ARC_FLAG_HAS_L2HDR)
912
913/* For storing compression mode in b_flags */
914#define	HDR_COMPRESS_OFFSET	24
915#define	HDR_COMPRESS_NBITS	7
916
917#define	HDR_GET_COMPRESS(hdr)	((enum zio_compress)BF32_GET(hdr->b_flags, \
918	    HDR_COMPRESS_OFFSET, HDR_COMPRESS_NBITS))
919#define	HDR_SET_COMPRESS(hdr, cmp) BF32_SET(hdr->b_flags, \
920	    HDR_COMPRESS_OFFSET, HDR_COMPRESS_NBITS, (cmp))
921
922/*
923 * Other sizes
924 */
925
926#define	HDR_FULL_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
927#define	HDR_L2ONLY_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_l1hdr))
928
929/*
930 * Hash table routines
931 */
932
933#define	HT_LOCK_PAD	CACHE_LINE_SIZE
934
935struct ht_lock {
936	kmutex_t	ht_lock;
937#ifdef _KERNEL
938	unsigned char	pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
939#endif
940};
941
942#define	BUF_LOCKS 256
943typedef struct buf_hash_table {
944	uint64_t ht_mask;
945	arc_buf_hdr_t **ht_table;
946	struct ht_lock ht_locks[BUF_LOCKS] __aligned(CACHE_LINE_SIZE);
947} buf_hash_table_t;
948
949static buf_hash_table_t buf_hash_table;
950
951#define	BUF_HASH_INDEX(spa, dva, birth) \
952	(buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
953#define	BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
954#define	BUF_HASH_LOCK(idx)	(&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
955#define	HDR_LOCK(hdr) \
956	(BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
957
958uint64_t zfs_crc64_table[256];
959
960/*
961 * Level 2 ARC
962 */
963
964#define	L2ARC_WRITE_SIZE	(8 * 1024 * 1024)	/* initial write max */
965#define	L2ARC_HEADROOM		2			/* num of writes */
966/*
967 * If we discover during ARC scan any buffers to be compressed, we boost
968 * our headroom for the next scanning cycle by this percentage multiple.
969 */
970#define	L2ARC_HEADROOM_BOOST	200
971#define	L2ARC_FEED_SECS		1		/* caching interval secs */
972#define	L2ARC_FEED_MIN_MS	200		/* min caching interval ms */
973
974/*
975 * Used to distinguish headers that are being process by
976 * l2arc_write_buffers(), but have yet to be assigned to a l2arc disk
977 * address. This can happen when the header is added to the l2arc's list
978 * of buffers to write in the first stage of l2arc_write_buffers(), but
979 * has not yet been written out which happens in the second stage of
980 * l2arc_write_buffers().
981 */
982#define	L2ARC_ADDR_UNSET	((uint64_t)(-1))
983
984#define	l2arc_writes_sent	ARCSTAT(arcstat_l2_writes_sent)
985#define	l2arc_writes_done	ARCSTAT(arcstat_l2_writes_done)
986
987/* L2ARC Performance Tunables */
988uint64_t l2arc_write_max = L2ARC_WRITE_SIZE;	/* default max write size */
989uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE;	/* extra write during warmup */
990uint64_t l2arc_headroom = L2ARC_HEADROOM;	/* number of dev writes */
991uint64_t l2arc_headroom_boost = L2ARC_HEADROOM_BOOST;
992uint64_t l2arc_feed_secs = L2ARC_FEED_SECS;	/* interval seconds */
993uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS;	/* min interval milliseconds */
994boolean_t l2arc_noprefetch = B_TRUE;		/* don't cache prefetch bufs */
995boolean_t l2arc_feed_again = B_TRUE;		/* turbo warmup */
996boolean_t l2arc_norw = B_TRUE;			/* no reads during writes */
997
998SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max, CTLFLAG_RW,
999    &l2arc_write_max, 0, "max write size");
1000SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_boost, CTLFLAG_RW,
1001    &l2arc_write_boost, 0, "extra write during warmup");
1002SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom, CTLFLAG_RW,
1003    &l2arc_headroom, 0, "number of dev writes");
1004SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_secs, CTLFLAG_RW,
1005    &l2arc_feed_secs, 0, "interval seconds");
1006SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_min_ms, CTLFLAG_RW,
1007    &l2arc_feed_min_ms, 0, "min interval milliseconds");
1008
1009SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_noprefetch, CTLFLAG_RW,
1010    &l2arc_noprefetch, 0, "don't cache prefetch bufs");
1011SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_feed_again, CTLFLAG_RW,
1012    &l2arc_feed_again, 0, "turbo warmup");
1013SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw, CTLFLAG_RW,
1014    &l2arc_norw, 0, "no reads during writes");
1015
1016SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_size, CTLFLAG_RD,
1017    &ARC_anon.arcs_size, 0, "size of anonymous state");
1018SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_metadata_lsize, CTLFLAG_RD,
1019    &ARC_anon.arcs_lsize[ARC_BUFC_METADATA], 0, "size of anonymous state");
1020SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_data_lsize, CTLFLAG_RD,
1021    &ARC_anon.arcs_lsize[ARC_BUFC_DATA], 0, "size of anonymous state");
1022
1023SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_size, CTLFLAG_RD,
1024    &ARC_mru.arcs_size, 0, "size of mru state");
1025SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_metadata_lsize, CTLFLAG_RD,
1026    &ARC_mru.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mru state");
1027SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_data_lsize, CTLFLAG_RD,
1028    &ARC_mru.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mru state");
1029
1030SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_size, CTLFLAG_RD,
1031    &ARC_mru_ghost.arcs_size, 0, "size of mru ghost state");
1032SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_metadata_lsize, CTLFLAG_RD,
1033    &ARC_mru_ghost.arcs_lsize[ARC_BUFC_METADATA], 0,
1034    "size of metadata in mru ghost state");
1035SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_data_lsize, CTLFLAG_RD,
1036    &ARC_mru_ghost.arcs_lsize[ARC_BUFC_DATA], 0,
1037    "size of data in mru ghost state");
1038
1039SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_size, CTLFLAG_RD,
1040    &ARC_mfu.arcs_size, 0, "size of mfu state");
1041SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_metadata_lsize, CTLFLAG_RD,
1042    &ARC_mfu.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mfu state");
1043SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_data_lsize, CTLFLAG_RD,
1044    &ARC_mfu.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mfu state");
1045
1046SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_size, CTLFLAG_RD,
1047    &ARC_mfu_ghost.arcs_size, 0, "size of mfu ghost state");
1048SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_metadata_lsize, CTLFLAG_RD,
1049    &ARC_mfu_ghost.arcs_lsize[ARC_BUFC_METADATA], 0,
1050    "size of metadata in mfu ghost state");
1051SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_data_lsize, CTLFLAG_RD,
1052    &ARC_mfu_ghost.arcs_lsize[ARC_BUFC_DATA], 0,
1053    "size of data in mfu ghost state");
1054
1055SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2c_only_size, CTLFLAG_RD,
1056    &ARC_l2c_only.arcs_size, 0, "size of mru state");
1057
1058/*
1059 * L2ARC Internals
1060 */
1061struct l2arc_dev {
1062	vdev_t			*l2ad_vdev;	/* vdev */
1063	spa_t			*l2ad_spa;	/* spa */
1064	uint64_t		l2ad_hand;	/* next write location */
1065	uint64_t		l2ad_start;	/* first addr on device */
1066	uint64_t		l2ad_end;	/* last addr on device */
1067	boolean_t		l2ad_first;	/* first sweep through */
1068	boolean_t		l2ad_writing;	/* currently writing */
1069	kmutex_t		l2ad_mtx;	/* lock for buffer list */
1070	list_t			l2ad_buflist;	/* buffer list */
1071	list_node_t		l2ad_node;	/* device list node */
1072	refcount_t		l2ad_alloc;	/* allocated bytes */
1073};
1074
1075static list_t L2ARC_dev_list;			/* device list */
1076static list_t *l2arc_dev_list;			/* device list pointer */
1077static kmutex_t l2arc_dev_mtx;			/* device list mutex */
1078static l2arc_dev_t *l2arc_dev_last;		/* last device used */
1079static list_t L2ARC_free_on_write;		/* free after write buf list */
1080static list_t *l2arc_free_on_write;		/* free after write list ptr */
1081static kmutex_t l2arc_free_on_write_mtx;	/* mutex for list */
1082static uint64_t l2arc_ndev;			/* number of devices */
1083
1084typedef struct l2arc_read_callback {
1085	arc_buf_t		*l2rcb_buf;		/* read buffer */
1086	spa_t			*l2rcb_spa;		/* spa */
1087	blkptr_t		l2rcb_bp;		/* original blkptr */
1088	zbookmark_phys_t	l2rcb_zb;		/* original bookmark */
1089	int			l2rcb_flags;		/* original flags */
1090	enum zio_compress	l2rcb_compress;		/* applied compress */
1091} l2arc_read_callback_t;
1092
1093typedef struct l2arc_write_callback {
1094	l2arc_dev_t	*l2wcb_dev;		/* device info */
1095	arc_buf_hdr_t	*l2wcb_head;		/* head of write buflist */
1096} l2arc_write_callback_t;
1097
1098typedef struct l2arc_data_free {
1099	/* protected by l2arc_free_on_write_mtx */
1100	void		*l2df_data;
1101	size_t		l2df_size;
1102	void		(*l2df_func)(void *, size_t);
1103	list_node_t	l2df_list_node;
1104} l2arc_data_free_t;
1105
1106static kmutex_t l2arc_feed_thr_lock;
1107static kcondvar_t l2arc_feed_thr_cv;
1108static uint8_t l2arc_thread_exit;
1109
1110static void arc_get_data_buf(arc_buf_t *);
1111static void arc_access(arc_buf_hdr_t *, kmutex_t *);
1112static int arc_evict_needed(arc_buf_contents_t);
1113static void arc_evict_ghost(arc_state_t *, uint64_t, int64_t);
1114static void arc_buf_watch(arc_buf_t *);
1115
1116static arc_buf_contents_t arc_buf_type(arc_buf_hdr_t *);
1117static uint32_t arc_bufc_to_flags(arc_buf_contents_t);
1118
1119static boolean_t l2arc_write_eligible(uint64_t, arc_buf_hdr_t *);
1120static void l2arc_read_done(zio_t *);
1121
1122static boolean_t l2arc_compress_buf(arc_buf_hdr_t *);
1123static void l2arc_decompress_zio(zio_t *, arc_buf_hdr_t *, enum zio_compress);
1124static void l2arc_release_cdata_buf(arc_buf_hdr_t *);
1125
1126static uint64_t
1127buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
1128{
1129	uint8_t *vdva = (uint8_t *)dva;
1130	uint64_t crc = -1ULL;
1131	int i;
1132
1133	ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
1134
1135	for (i = 0; i < sizeof (dva_t); i++)
1136		crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
1137
1138	crc ^= (spa>>8) ^ birth;
1139
1140	return (crc);
1141}
1142
1143#define	BUF_EMPTY(buf)						\
1144	((buf)->b_dva.dva_word[0] == 0 &&			\
1145	(buf)->b_dva.dva_word[1] == 0)
1146
1147#define	BUF_EQUAL(spa, dva, birth, buf)				\
1148	((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) &&	\
1149	((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) &&	\
1150	((buf)->b_birth == birth) && ((buf)->b_spa == spa)
1151
1152static void
1153buf_discard_identity(arc_buf_hdr_t *hdr)
1154{
1155	hdr->b_dva.dva_word[0] = 0;
1156	hdr->b_dva.dva_word[1] = 0;
1157	hdr->b_birth = 0;
1158}
1159
1160static arc_buf_hdr_t *
1161buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp)
1162{
1163	const dva_t *dva = BP_IDENTITY(bp);
1164	uint64_t birth = BP_PHYSICAL_BIRTH(bp);
1165	uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
1166	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
1167	arc_buf_hdr_t *hdr;
1168
1169	mutex_enter(hash_lock);
1170	for (hdr = buf_hash_table.ht_table[idx]; hdr != NULL;
1171	    hdr = hdr->b_hash_next) {
1172		if (BUF_EQUAL(spa, dva, birth, hdr)) {
1173			*lockp = hash_lock;
1174			return (hdr);
1175		}
1176	}
1177	mutex_exit(hash_lock);
1178	*lockp = NULL;
1179	return (NULL);
1180}
1181
1182/*
1183 * Insert an entry into the hash table.  If there is already an element
1184 * equal to elem in the hash table, then the already existing element
1185 * will be returned and the new element will not be inserted.
1186 * Otherwise returns NULL.
1187 * If lockp == NULL, the caller is assumed to already hold the hash lock.
1188 */
1189static arc_buf_hdr_t *
1190buf_hash_insert(arc_buf_hdr_t *hdr, kmutex_t **lockp)
1191{
1192	uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
1193	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
1194	arc_buf_hdr_t *fhdr;
1195	uint32_t i;
1196
1197	ASSERT(!DVA_IS_EMPTY(&hdr->b_dva));
1198	ASSERT(hdr->b_birth != 0);
1199	ASSERT(!HDR_IN_HASH_TABLE(hdr));
1200
1201	if (lockp != NULL) {
1202		*lockp = hash_lock;
1203		mutex_enter(hash_lock);
1204	} else {
1205		ASSERT(MUTEX_HELD(hash_lock));
1206	}
1207
1208	for (fhdr = buf_hash_table.ht_table[idx], i = 0; fhdr != NULL;
1209	    fhdr = fhdr->b_hash_next, i++) {
1210		if (BUF_EQUAL(hdr->b_spa, &hdr->b_dva, hdr->b_birth, fhdr))
1211			return (fhdr);
1212	}
1213
1214	hdr->b_hash_next = buf_hash_table.ht_table[idx];
1215	buf_hash_table.ht_table[idx] = hdr;
1216	hdr->b_flags |= ARC_FLAG_IN_HASH_TABLE;
1217
1218	/* collect some hash table performance data */
1219	if (i > 0) {
1220		ARCSTAT_BUMP(arcstat_hash_collisions);
1221		if (i == 1)
1222			ARCSTAT_BUMP(arcstat_hash_chains);
1223
1224		ARCSTAT_MAX(arcstat_hash_chain_max, i);
1225	}
1226
1227	ARCSTAT_BUMP(arcstat_hash_elements);
1228	ARCSTAT_MAXSTAT(arcstat_hash_elements);
1229
1230	return (NULL);
1231}
1232
1233static void
1234buf_hash_remove(arc_buf_hdr_t *hdr)
1235{
1236	arc_buf_hdr_t *fhdr, **hdrp;
1237	uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
1238
1239	ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
1240	ASSERT(HDR_IN_HASH_TABLE(hdr));
1241
1242	hdrp = &buf_hash_table.ht_table[idx];
1243	while ((fhdr = *hdrp) != hdr) {
1244		ASSERT(fhdr != NULL);
1245		hdrp = &fhdr->b_hash_next;
1246	}
1247	*hdrp = hdr->b_hash_next;
1248	hdr->b_hash_next = NULL;
1249	hdr->b_flags &= ~ARC_FLAG_IN_HASH_TABLE;
1250
1251	/* collect some hash table performance data */
1252	ARCSTAT_BUMPDOWN(arcstat_hash_elements);
1253
1254	if (buf_hash_table.ht_table[idx] &&
1255	    buf_hash_table.ht_table[idx]->b_hash_next == NULL)
1256		ARCSTAT_BUMPDOWN(arcstat_hash_chains);
1257}
1258
1259/*
1260 * Global data structures and functions for the buf kmem cache.
1261 */
1262static kmem_cache_t *hdr_full_cache;
1263static kmem_cache_t *hdr_l2only_cache;
1264static kmem_cache_t *buf_cache;
1265
1266static void
1267buf_fini(void)
1268{
1269	int i;
1270
1271	kmem_free(buf_hash_table.ht_table,
1272	    (buf_hash_table.ht_mask + 1) * sizeof (void *));
1273	for (i = 0; i < BUF_LOCKS; i++)
1274		mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
1275	kmem_cache_destroy(hdr_full_cache);
1276	kmem_cache_destroy(hdr_l2only_cache);
1277	kmem_cache_destroy(buf_cache);
1278}
1279
1280/*
1281 * Constructor callback - called when the cache is empty
1282 * and a new buf is requested.
1283 */
1284/* ARGSUSED */
1285static int
1286hdr_full_cons(void *vbuf, void *unused, int kmflag)
1287{
1288	arc_buf_hdr_t *hdr = vbuf;
1289
1290	bzero(hdr, HDR_FULL_SIZE);
1291	cv_init(&hdr->b_l1hdr.b_cv, NULL, CV_DEFAULT, NULL);
1292	refcount_create(&hdr->b_l1hdr.b_refcnt);
1293	mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
1294	arc_space_consume(HDR_FULL_SIZE, ARC_SPACE_HDRS);
1295
1296	return (0);
1297}
1298
1299/* ARGSUSED */
1300static int
1301hdr_l2only_cons(void *vbuf, void *unused, int kmflag)
1302{
1303	arc_buf_hdr_t *hdr = vbuf;
1304
1305	bzero(hdr, HDR_L2ONLY_SIZE);
1306	arc_space_consume(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
1307
1308	return (0);
1309}
1310
1311/* ARGSUSED */
1312static int
1313buf_cons(void *vbuf, void *unused, int kmflag)
1314{
1315	arc_buf_t *buf = vbuf;
1316
1317	bzero(buf, sizeof (arc_buf_t));
1318	mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL);
1319	arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
1320
1321	return (0);
1322}
1323
1324/*
1325 * Destructor callback - called when a cached buf is
1326 * no longer required.
1327 */
1328/* ARGSUSED */
1329static void
1330hdr_full_dest(void *vbuf, void *unused)
1331{
1332	arc_buf_hdr_t *hdr = vbuf;
1333
1334	ASSERT(BUF_EMPTY(hdr));
1335	cv_destroy(&hdr->b_l1hdr.b_cv);
1336	refcount_destroy(&hdr->b_l1hdr.b_refcnt);
1337	mutex_destroy(&hdr->b_l1hdr.b_freeze_lock);
1338	arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS);
1339}
1340
1341/* ARGSUSED */
1342static void
1343hdr_l2only_dest(void *vbuf, void *unused)
1344{
1345	arc_buf_hdr_t *hdr = vbuf;
1346
1347	ASSERT(BUF_EMPTY(hdr));
1348	arc_space_return(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
1349}
1350
1351/* ARGSUSED */
1352static void
1353buf_dest(void *vbuf, void *unused)
1354{
1355	arc_buf_t *buf = vbuf;
1356
1357	mutex_destroy(&buf->b_evict_lock);
1358	arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
1359}
1360
1361/*
1362 * Reclaim callback -- invoked when memory is low.
1363 */
1364/* ARGSUSED */
1365static void
1366hdr_recl(void *unused)
1367{
1368	dprintf("hdr_recl called\n");
1369	/*
1370	 * umem calls the reclaim func when we destroy the buf cache,
1371	 * which is after we do arc_fini().
1372	 */
1373	if (!arc_dead)
1374		cv_signal(&arc_reclaim_thr_cv);
1375}
1376
1377static void
1378buf_init(void)
1379{
1380	uint64_t *ct;
1381	uint64_t hsize = 1ULL << 12;
1382	int i, j;
1383
1384	/*
1385	 * The hash table is big enough to fill all of physical memory
1386	 * with an average block size of zfs_arc_average_blocksize (default 8K).
1387	 * By default, the table will take up
1388	 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
1389	 */
1390	while (hsize * zfs_arc_average_blocksize < (uint64_t)physmem * PAGESIZE)
1391		hsize <<= 1;
1392retry:
1393	buf_hash_table.ht_mask = hsize - 1;
1394	buf_hash_table.ht_table =
1395	    kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
1396	if (buf_hash_table.ht_table == NULL) {
1397		ASSERT(hsize > (1ULL << 8));
1398		hsize >>= 1;
1399		goto retry;
1400	}
1401
1402	hdr_full_cache = kmem_cache_create("arc_buf_hdr_t_full", HDR_FULL_SIZE,
1403	    0, hdr_full_cons, hdr_full_dest, hdr_recl, NULL, NULL, 0);
1404	hdr_l2only_cache = kmem_cache_create("arc_buf_hdr_t_l2only",
1405	    HDR_L2ONLY_SIZE, 0, hdr_l2only_cons, hdr_l2only_dest, hdr_recl,
1406	    NULL, NULL, 0);
1407	buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
1408	    0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
1409
1410	for (i = 0; i < 256; i++)
1411		for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
1412			*ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
1413
1414	for (i = 0; i < BUF_LOCKS; i++) {
1415		mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
1416		    NULL, MUTEX_DEFAULT, NULL);
1417	}
1418}
1419
1420/*
1421 * Transition between the two allocation states for the arc_buf_hdr struct.
1422 * The arc_buf_hdr struct can be allocated with (hdr_full_cache) or without
1423 * (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller
1424 * version is used when a cache buffer is only in the L2ARC in order to reduce
1425 * memory usage.
1426 */
1427static arc_buf_hdr_t *
1428arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new)
1429{
1430	ASSERT(HDR_HAS_L2HDR(hdr));
1431
1432	arc_buf_hdr_t *nhdr;
1433	l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
1434
1435	ASSERT((old == hdr_full_cache && new == hdr_l2only_cache) ||
1436	    (old == hdr_l2only_cache && new == hdr_full_cache));
1437
1438	nhdr = kmem_cache_alloc(new, KM_PUSHPAGE);
1439
1440	ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
1441	buf_hash_remove(hdr);
1442
1443	bcopy(hdr, nhdr, HDR_L2ONLY_SIZE);
1444
1445	if (new == hdr_full_cache) {
1446		nhdr->b_flags |= ARC_FLAG_HAS_L1HDR;
1447		/*
1448		 * arc_access and arc_change_state need to be aware that a
1449		 * header has just come out of L2ARC, so we set its state to
1450		 * l2c_only even though it's about to change.
1451		 */
1452		nhdr->b_l1hdr.b_state = arc_l2c_only;
1453	} else {
1454		ASSERT(hdr->b_l1hdr.b_buf == NULL);
1455		ASSERT0(hdr->b_l1hdr.b_datacnt);
1456		ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node));
1457		/*
1458		 * We might be removing the L1hdr of a buffer which was just
1459		 * written out to L2ARC. If such a buffer is compressed then we
1460		 * need to free its b_tmp_cdata before destroying the header.
1461		 */
1462		if (hdr->b_l1hdr.b_tmp_cdata != NULL &&
1463		    HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF)
1464			l2arc_release_cdata_buf(hdr);
1465		nhdr->b_flags &= ~ARC_FLAG_HAS_L1HDR;
1466	}
1467	/*
1468	 * The header has been reallocated so we need to re-insert it into any
1469	 * lists it was on.
1470	 */
1471	(void) buf_hash_insert(nhdr, NULL);
1472
1473	ASSERT(list_link_active(&hdr->b_l2hdr.b_l2node));
1474
1475	mutex_enter(&dev->l2ad_mtx);
1476
1477	/*
1478	 * We must place the realloc'ed header back into the list at
1479	 * the same spot. Otherwise, if it's placed earlier in the list,
1480	 * l2arc_write_buffers() could find it during the function's
1481	 * write phase, and try to write it out to the l2arc.
1482	 */
1483	list_insert_after(&dev->l2ad_buflist, hdr, nhdr);
1484	list_remove(&dev->l2ad_buflist, hdr);
1485
1486	mutex_exit(&dev->l2ad_mtx);
1487
1488	/*
1489	 * Since we're using the pointer address as the tag when
1490	 * incrementing and decrementing the l2ad_alloc refcount, we
1491	 * must remove the old pointer (that we're about to destroy) and
1492	 * add the new pointer to the refcount. Otherwise we'd remove
1493	 * the wrong pointer address when calling arc_hdr_destroy() later.
1494	 */
1495
1496	(void) refcount_remove_many(&dev->l2ad_alloc,
1497	    hdr->b_l2hdr.b_asize, hdr);
1498
1499	(void) refcount_add_many(&dev->l2ad_alloc,
1500	    nhdr->b_l2hdr.b_asize, nhdr);
1501
1502	buf_discard_identity(hdr);
1503	hdr->b_freeze_cksum = NULL;
1504	kmem_cache_free(old, hdr);
1505
1506	return (nhdr);
1507}
1508
1509
1510#define	ARC_MINTIME	(hz>>4) /* 62 ms */
1511
1512static void
1513arc_cksum_verify(arc_buf_t *buf)
1514{
1515	zio_cksum_t zc;
1516
1517	if (!(zfs_flags & ZFS_DEBUG_MODIFY))
1518		return;
1519
1520	mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock);
1521	if (buf->b_hdr->b_freeze_cksum == NULL || HDR_IO_ERROR(buf->b_hdr)) {
1522		mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock);
1523		return;
1524	}
1525	fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
1526	if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc))
1527		panic("buffer modified while frozen!");
1528	mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock);
1529}
1530
1531static int
1532arc_cksum_equal(arc_buf_t *buf)
1533{
1534	zio_cksum_t zc;
1535	int equal;
1536
1537	mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock);
1538	fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
1539	equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc);
1540	mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock);
1541
1542	return (equal);
1543}
1544
1545static void
1546arc_cksum_compute(arc_buf_t *buf, boolean_t force)
1547{
1548	if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY))
1549		return;
1550
1551	mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock);
1552	if (buf->b_hdr->b_freeze_cksum != NULL) {
1553		mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock);
1554		return;
1555	}
1556	buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
1557	fletcher_2_native(buf->b_data, buf->b_hdr->b_size,
1558	    buf->b_hdr->b_freeze_cksum);
1559	mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock);
1560#ifdef illumos
1561	arc_buf_watch(buf);
1562#endif /* illumos */
1563}
1564
1565#ifdef illumos
1566#ifndef _KERNEL
1567typedef struct procctl {
1568	long cmd;
1569	prwatch_t prwatch;
1570} procctl_t;
1571#endif
1572
1573/* ARGSUSED */
1574static void
1575arc_buf_unwatch(arc_buf_t *buf)
1576{
1577#ifndef _KERNEL
1578	if (arc_watch) {
1579		int result;
1580		procctl_t ctl;
1581		ctl.cmd = PCWATCH;
1582		ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
1583		ctl.prwatch.pr_size = 0;
1584		ctl.prwatch.pr_wflags = 0;
1585		result = write(arc_procfd, &ctl, sizeof (ctl));
1586		ASSERT3U(result, ==, sizeof (ctl));
1587	}
1588#endif
1589}
1590
1591/* ARGSUSED */
1592static void
1593arc_buf_watch(arc_buf_t *buf)
1594{
1595#ifndef _KERNEL
1596	if (arc_watch) {
1597		int result;
1598		procctl_t ctl;
1599		ctl.cmd = PCWATCH;
1600		ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
1601		ctl.prwatch.pr_size = buf->b_hdr->b_size;
1602		ctl.prwatch.pr_wflags = WA_WRITE;
1603		result = write(arc_procfd, &ctl, sizeof (ctl));
1604		ASSERT3U(result, ==, sizeof (ctl));
1605	}
1606#endif
1607}
1608#endif /* illumos */
1609
1610static arc_buf_contents_t
1611arc_buf_type(arc_buf_hdr_t *hdr)
1612{
1613	if (HDR_ISTYPE_METADATA(hdr)) {
1614		return (ARC_BUFC_METADATA);
1615	} else {
1616		return (ARC_BUFC_DATA);
1617	}
1618}
1619
1620static uint32_t
1621arc_bufc_to_flags(arc_buf_contents_t type)
1622{
1623	switch (type) {
1624	case ARC_BUFC_DATA:
1625		/* metadata field is 0 if buffer contains normal data */
1626		return (0);
1627	case ARC_BUFC_METADATA:
1628		return (ARC_FLAG_BUFC_METADATA);
1629	default:
1630		break;
1631	}
1632	panic("undefined ARC buffer type!");
1633	return ((uint32_t)-1);
1634}
1635
1636void
1637arc_buf_thaw(arc_buf_t *buf)
1638{
1639	if (zfs_flags & ZFS_DEBUG_MODIFY) {
1640		if (buf->b_hdr->b_l1hdr.b_state != arc_anon)
1641			panic("modifying non-anon buffer!");
1642		if (HDR_IO_IN_PROGRESS(buf->b_hdr))
1643			panic("modifying buffer while i/o in progress!");
1644		arc_cksum_verify(buf);
1645	}
1646
1647	mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock);
1648	if (buf->b_hdr->b_freeze_cksum != NULL) {
1649		kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1650		buf->b_hdr->b_freeze_cksum = NULL;
1651	}
1652
1653#ifdef ZFS_DEBUG
1654	if (zfs_flags & ZFS_DEBUG_MODIFY) {
1655		if (buf->b_hdr->b_l1hdr.b_thawed != NULL)
1656			kmem_free(buf->b_hdr->b_l1hdr.b_thawed, 1);
1657		buf->b_hdr->b_l1hdr.b_thawed = kmem_alloc(1, KM_SLEEP);
1658	}
1659#endif
1660
1661	mutex_exit(&buf->b_hdr->b_l1hdr.b_freeze_lock);
1662
1663#ifdef illumos
1664	arc_buf_unwatch(buf);
1665#endif /* illumos */
1666}
1667
1668void
1669arc_buf_freeze(arc_buf_t *buf)
1670{
1671	kmutex_t *hash_lock;
1672
1673	if (!(zfs_flags & ZFS_DEBUG_MODIFY))
1674		return;
1675
1676	hash_lock = HDR_LOCK(buf->b_hdr);
1677	mutex_enter(hash_lock);
1678
1679	ASSERT(buf->b_hdr->b_freeze_cksum != NULL ||
1680	    buf->b_hdr->b_l1hdr.b_state == arc_anon);
1681	arc_cksum_compute(buf, B_FALSE);
1682	mutex_exit(hash_lock);
1683
1684}
1685
1686static void
1687get_buf_info(arc_buf_hdr_t *hdr, arc_state_t *state, list_t **list, kmutex_t **lock)
1688{
1689	uint64_t buf_hashid = buf_hash(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
1690
1691	if (arc_buf_type(hdr) == ARC_BUFC_METADATA)
1692		buf_hashid &= (ARC_BUFC_NUMMETADATALISTS - 1);
1693	else {
1694		buf_hashid &= (ARC_BUFC_NUMDATALISTS - 1);
1695		buf_hashid += ARC_BUFC_NUMMETADATALISTS;
1696	}
1697
1698	*list = &state->arcs_lists[buf_hashid];
1699	*lock = ARCS_LOCK(state, buf_hashid);
1700}
1701
1702
1703static void
1704add_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag)
1705{
1706	ASSERT(HDR_HAS_L1HDR(hdr));
1707	ASSERT(MUTEX_HELD(hash_lock));
1708	arc_state_t *state = hdr->b_l1hdr.b_state;
1709
1710	if ((refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) &&
1711	    (state != arc_anon)) {
1712		/* We don't use the L2-only state list. */
1713		if (state != arc_l2c_only) {
1714			uint64_t delta = hdr->b_size * hdr->b_l1hdr.b_datacnt;
1715			uint64_t *size = &state->arcs_lsize[arc_buf_type(hdr)];
1716			list_t *list;
1717			kmutex_t *lock;
1718
1719			get_buf_info(hdr, state, &list, &lock);
1720			ASSERT(!MUTEX_HELD(lock));
1721			mutex_enter(lock);
1722			ASSERT(list_link_active(&hdr->b_l1hdr.b_arc_node));
1723			list_remove(list, hdr);
1724			if (GHOST_STATE(state)) {
1725				ASSERT0(hdr->b_l1hdr.b_datacnt);
1726				ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
1727				delta = hdr->b_size;
1728			}
1729			ASSERT(delta > 0);
1730			ASSERT3U(*size, >=, delta);
1731			atomic_add_64(size, -delta);
1732			mutex_exit(lock);
1733		}
1734		/* remove the prefetch flag if we get a reference */
1735		hdr->b_flags &= ~ARC_FLAG_PREFETCH;
1736	}
1737}
1738
1739static int
1740remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag)
1741{
1742	int cnt;
1743	arc_state_t *state = hdr->b_l1hdr.b_state;
1744
1745	ASSERT(HDR_HAS_L1HDR(hdr));
1746	ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
1747	ASSERT(!GHOST_STATE(state));
1748
1749	/*
1750	 * arc_l2c_only counts as a ghost state so we don't need to explicitly
1751	 * check to prevent usage of the arc_l2c_only list.
1752	 */
1753	if (((cnt = refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) &&
1754	    (state != arc_anon)) {
1755		uint64_t *size = &state->arcs_lsize[arc_buf_type(hdr)];
1756		list_t *list;
1757		kmutex_t *lock;
1758
1759		get_buf_info(hdr, state, &list, &lock);
1760		ASSERT(!MUTEX_HELD(lock));
1761		mutex_enter(lock);
1762		ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node));
1763		list_insert_head(list, hdr);
1764		ASSERT(hdr->b_l1hdr.b_datacnt > 0);
1765		atomic_add_64(size, hdr->b_size *
1766		    hdr->b_l1hdr.b_datacnt);
1767		mutex_exit(lock);
1768	}
1769	return (cnt);
1770}
1771
1772/*
1773 * Move the supplied buffer to the indicated state.  The mutex
1774 * for the buffer must be held by the caller.
1775 */
1776static void
1777arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
1778    kmutex_t *hash_lock)
1779{
1780	arc_state_t *old_state;
1781	int64_t refcnt;
1782	uint32_t datacnt;
1783	uint64_t from_delta, to_delta;
1784	arc_buf_contents_t buftype = arc_buf_type(hdr);
1785	list_t *list;
1786	kmutex_t *lock;
1787
1788	/*
1789	 * We almost always have an L1 hdr here, since we call arc_hdr_realloc()
1790	 * in arc_read() when bringing a buffer out of the L2ARC.  However, the
1791	 * L1 hdr doesn't always exist when we change state to arc_anon before
1792	 * destroying a header, in which case reallocating to add the L1 hdr is
1793	 * pointless.
1794	 */
1795	if (HDR_HAS_L1HDR(hdr)) {
1796		old_state = hdr->b_l1hdr.b_state;
1797		refcnt = refcount_count(&hdr->b_l1hdr.b_refcnt);
1798		datacnt = hdr->b_l1hdr.b_datacnt;
1799	} else {
1800		old_state = arc_l2c_only;
1801		refcnt = 0;
1802		datacnt = 0;
1803	}
1804
1805	ASSERT(MUTEX_HELD(hash_lock));
1806	ASSERT3P(new_state, !=, old_state);
1807	ASSERT(refcnt == 0 || datacnt > 0);
1808	ASSERT(!GHOST_STATE(new_state) || datacnt == 0);
1809	ASSERT(old_state != arc_anon || datacnt <= 1);
1810
1811	from_delta = to_delta = datacnt * hdr->b_size;
1812
1813	/*
1814	 * If this buffer is evictable, transfer it from the
1815	 * old state list to the new state list.
1816	 */
1817	if (refcnt == 0) {
1818		if (old_state != arc_anon && old_state != arc_l2c_only) {
1819			int use_mutex;
1820			uint64_t *size = &old_state->arcs_lsize[buftype];
1821
1822			get_buf_info(hdr, old_state, &list, &lock);
1823			use_mutex = !MUTEX_HELD(lock);
1824			if (use_mutex)
1825				mutex_enter(lock);
1826
1827			ASSERT(HDR_HAS_L1HDR(hdr));
1828			ASSERT(list_link_active(&hdr->b_l1hdr.b_arc_node));
1829			list_remove(list, hdr);
1830
1831			/*
1832			 * If prefetching out of the ghost cache,
1833			 * we will have a non-zero datacnt.
1834			 */
1835			if (GHOST_STATE(old_state) && datacnt == 0) {
1836				/* ghost elements have a ghost size */
1837				ASSERT(hdr->b_l1hdr.b_buf == NULL);
1838				from_delta = hdr->b_size;
1839			}
1840			ASSERT3U(*size, >=, from_delta);
1841			atomic_add_64(size, -from_delta);
1842
1843			if (use_mutex)
1844				mutex_exit(lock);
1845		}
1846		if (new_state != arc_anon && new_state != arc_l2c_only) {
1847			int use_mutex;
1848			uint64_t *size = &new_state->arcs_lsize[buftype];
1849
1850			/*
1851			 * An L1 header always exists here, since if we're
1852			 * moving to some L1-cached state (i.e. not l2c_only or
1853			 * anonymous), we realloc the header to add an L1hdr
1854			 * beforehand.
1855			 */
1856			ASSERT(HDR_HAS_L1HDR(hdr));
1857			get_buf_info(hdr, new_state, &list, &lock);
1858			use_mutex = !MUTEX_HELD(lock);
1859			if (use_mutex)
1860				mutex_enter(lock);
1861
1862			list_insert_head(list, hdr);
1863
1864			/* ghost elements have a ghost size */
1865			if (GHOST_STATE(new_state)) {
1866				ASSERT(datacnt == 0);
1867				ASSERT(hdr->b_l1hdr.b_buf == NULL);
1868				to_delta = hdr->b_size;
1869			}
1870			atomic_add_64(size, to_delta);
1871
1872			if (use_mutex)
1873				mutex_exit(lock);
1874		}
1875	}
1876
1877	ASSERT(!BUF_EMPTY(hdr));
1878	if (new_state == arc_anon && HDR_IN_HASH_TABLE(hdr))
1879		buf_hash_remove(hdr);
1880
1881	/* adjust state sizes (ignore arc_l2c_only) */
1882	if (to_delta && new_state != arc_l2c_only)
1883		atomic_add_64(&new_state->arcs_size, to_delta);
1884	if (from_delta && old_state != arc_l2c_only) {
1885		ASSERT3U(old_state->arcs_size, >=, from_delta);
1886		atomic_add_64(&old_state->arcs_size, -from_delta);
1887	}
1888	if (HDR_HAS_L1HDR(hdr))
1889		hdr->b_l1hdr.b_state = new_state;
1890
1891	/*
1892	 * L2 headers should never be on the L2 state list since they don't
1893	 * have L1 headers allocated.
1894	 */
1895#ifdef illumos
1896	ASSERT(list_is_empty(&arc_l2c_only->arcs_list[ARC_BUFC_DATA]) &&
1897	    list_is_empty(&arc_l2c_only->arcs_list[ARC_BUFC_METADATA]));
1898#endif
1899}
1900
1901void
1902arc_space_consume(uint64_t space, arc_space_type_t type)
1903{
1904	ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1905
1906	switch (type) {
1907	case ARC_SPACE_DATA:
1908		ARCSTAT_INCR(arcstat_data_size, space);
1909		break;
1910	case ARC_SPACE_META:
1911		ARCSTAT_INCR(arcstat_metadata_size, space);
1912		break;
1913	case ARC_SPACE_OTHER:
1914		ARCSTAT_INCR(arcstat_other_size, space);
1915		break;
1916	case ARC_SPACE_HDRS:
1917		ARCSTAT_INCR(arcstat_hdr_size, space);
1918		break;
1919	case ARC_SPACE_L2HDRS:
1920		ARCSTAT_INCR(arcstat_l2_hdr_size, space);
1921		break;
1922	}
1923
1924	if (type != ARC_SPACE_DATA)
1925		ARCSTAT_INCR(arcstat_meta_used, space);
1926
1927	atomic_add_64(&arc_size, space);
1928}
1929
1930void
1931arc_space_return(uint64_t space, arc_space_type_t type)
1932{
1933	ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1934
1935	switch (type) {
1936	case ARC_SPACE_DATA:
1937		ARCSTAT_INCR(arcstat_data_size, -space);
1938		break;
1939	case ARC_SPACE_META:
1940		ARCSTAT_INCR(arcstat_metadata_size, -space);
1941		break;
1942	case ARC_SPACE_OTHER:
1943		ARCSTAT_INCR(arcstat_other_size, -space);
1944		break;
1945	case ARC_SPACE_HDRS:
1946		ARCSTAT_INCR(arcstat_hdr_size, -space);
1947		break;
1948	case ARC_SPACE_L2HDRS:
1949		ARCSTAT_INCR(arcstat_l2_hdr_size, -space);
1950		break;
1951	}
1952
1953	if (type != ARC_SPACE_DATA) {
1954		ASSERT(arc_meta_used >= space);
1955		if (arc_meta_max < arc_meta_used)
1956			arc_meta_max = arc_meta_used;
1957		ARCSTAT_INCR(arcstat_meta_used, -space);
1958	}
1959
1960	ASSERT(arc_size >= space);
1961	atomic_add_64(&arc_size, -space);
1962}
1963
1964arc_buf_t *
1965arc_buf_alloc(spa_t *spa, int32_t size, void *tag, arc_buf_contents_t type)
1966{
1967	arc_buf_hdr_t *hdr;
1968	arc_buf_t *buf;
1969
1970	ASSERT3U(size, >, 0);
1971	hdr = kmem_cache_alloc(hdr_full_cache, KM_PUSHPAGE);
1972	ASSERT(BUF_EMPTY(hdr));
1973	ASSERT3P(hdr->b_freeze_cksum, ==, NULL);
1974	hdr->b_size = size;
1975	hdr->b_spa = spa_load_guid(spa);
1976
1977	buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1978	buf->b_hdr = hdr;
1979	buf->b_data = NULL;
1980	buf->b_efunc = NULL;
1981	buf->b_private = NULL;
1982	buf->b_next = NULL;
1983
1984	hdr->b_flags = arc_bufc_to_flags(type);
1985	hdr->b_flags |= ARC_FLAG_HAS_L1HDR;
1986
1987	hdr->b_l1hdr.b_buf = buf;
1988	hdr->b_l1hdr.b_state = arc_anon;
1989	hdr->b_l1hdr.b_arc_access = 0;
1990	hdr->b_l1hdr.b_datacnt = 1;
1991
1992	arc_get_data_buf(buf);
1993	ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
1994	(void) refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
1995
1996	return (buf);
1997}
1998
1999static char *arc_onloan_tag = "onloan";
2000
2001/*
2002 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in
2003 * flight data by arc_tempreserve_space() until they are "returned". Loaned
2004 * buffers must be returned to the arc before they can be used by the DMU or
2005 * freed.
2006 */
2007arc_buf_t *
2008arc_loan_buf(spa_t *spa, int size)
2009{
2010	arc_buf_t *buf;
2011
2012	buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA);
2013
2014	atomic_add_64(&arc_loaned_bytes, size);
2015	return (buf);
2016}
2017
2018/*
2019 * Return a loaned arc buffer to the arc.
2020 */
2021void
2022arc_return_buf(arc_buf_t *buf, void *tag)
2023{
2024	arc_buf_hdr_t *hdr = buf->b_hdr;
2025
2026	ASSERT(buf->b_data != NULL);
2027	ASSERT(HDR_HAS_L1HDR(hdr));
2028	(void) refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
2029	(void) refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
2030
2031	atomic_add_64(&arc_loaned_bytes, -hdr->b_size);
2032}
2033
2034/* Detach an arc_buf from a dbuf (tag) */
2035void
2036arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
2037{
2038	arc_buf_hdr_t *hdr = buf->b_hdr;
2039
2040	ASSERT(buf->b_data != NULL);
2041	ASSERT(HDR_HAS_L1HDR(hdr));
2042	(void) refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
2043	(void) refcount_remove(&hdr->b_l1hdr.b_refcnt, tag);
2044	buf->b_efunc = NULL;
2045	buf->b_private = NULL;
2046
2047	atomic_add_64(&arc_loaned_bytes, hdr->b_size);
2048}
2049
2050static arc_buf_t *
2051arc_buf_clone(arc_buf_t *from)
2052{
2053	arc_buf_t *buf;
2054	arc_buf_hdr_t *hdr = from->b_hdr;
2055	uint64_t size = hdr->b_size;
2056
2057	ASSERT(HDR_HAS_L1HDR(hdr));
2058	ASSERT(hdr->b_l1hdr.b_state != arc_anon);
2059
2060	buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
2061	buf->b_hdr = hdr;
2062	buf->b_data = NULL;
2063	buf->b_efunc = NULL;
2064	buf->b_private = NULL;
2065	buf->b_next = hdr->b_l1hdr.b_buf;
2066	hdr->b_l1hdr.b_buf = buf;
2067	arc_get_data_buf(buf);
2068	bcopy(from->b_data, buf->b_data, size);
2069
2070	/*
2071	 * This buffer already exists in the arc so create a duplicate
2072	 * copy for the caller.  If the buffer is associated with user data
2073	 * then track the size and number of duplicates.  These stats will be
2074	 * updated as duplicate buffers are created and destroyed.
2075	 */
2076	if (HDR_ISTYPE_DATA(hdr)) {
2077		ARCSTAT_BUMP(arcstat_duplicate_buffers);
2078		ARCSTAT_INCR(arcstat_duplicate_buffers_size, size);
2079	}
2080	hdr->b_l1hdr.b_datacnt += 1;
2081	return (buf);
2082}
2083
2084void
2085arc_buf_add_ref(arc_buf_t *buf, void* tag)
2086{
2087	arc_buf_hdr_t *hdr;
2088	kmutex_t *hash_lock;
2089
2090	/*
2091	 * Check to see if this buffer is evicted.  Callers
2092	 * must verify b_data != NULL to know if the add_ref
2093	 * was successful.
2094	 */
2095	mutex_enter(&buf->b_evict_lock);
2096	if (buf->b_data == NULL) {
2097		mutex_exit(&buf->b_evict_lock);
2098		return;
2099	}
2100	hash_lock = HDR_LOCK(buf->b_hdr);
2101	mutex_enter(hash_lock);
2102	hdr = buf->b_hdr;
2103	ASSERT(HDR_HAS_L1HDR(hdr));
2104	ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
2105	mutex_exit(&buf->b_evict_lock);
2106
2107	ASSERT(hdr->b_l1hdr.b_state == arc_mru ||
2108	    hdr->b_l1hdr.b_state == arc_mfu);
2109
2110	add_reference(hdr, hash_lock, tag);
2111	DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
2112	arc_access(hdr, hash_lock);
2113	mutex_exit(hash_lock);
2114	ARCSTAT_BUMP(arcstat_hits);
2115	ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr),
2116	    demand, prefetch, !HDR_ISTYPE_METADATA(hdr),
2117	    data, metadata, hits);
2118}
2119
2120static void
2121arc_buf_free_on_write(void *data, size_t size,
2122    void (*free_func)(void *, size_t))
2123{
2124	l2arc_data_free_t *df;
2125
2126	df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP);
2127	df->l2df_data = data;
2128	df->l2df_size = size;
2129	df->l2df_func = free_func;
2130	mutex_enter(&l2arc_free_on_write_mtx);
2131	list_insert_head(l2arc_free_on_write, df);
2132	mutex_exit(&l2arc_free_on_write_mtx);
2133}
2134
2135/*
2136 * Free the arc data buffer.  If it is an l2arc write in progress,
2137 * the buffer is placed on l2arc_free_on_write to be freed later.
2138 */
2139static void
2140arc_buf_data_free(arc_buf_t *buf, void (*free_func)(void *, size_t))
2141{
2142	arc_buf_hdr_t *hdr = buf->b_hdr;
2143
2144	if (HDR_L2_WRITING(hdr)) {
2145		arc_buf_free_on_write(buf->b_data, hdr->b_size, free_func);
2146		ARCSTAT_BUMP(arcstat_l2_free_on_write);
2147	} else {
2148		free_func(buf->b_data, hdr->b_size);
2149	}
2150}
2151
2152/*
2153 * Free up buf->b_data and if 'remove' is set, then pull the
2154 * arc_buf_t off of the the arc_buf_hdr_t's list and free it.
2155 */
2156static void
2157arc_buf_l2_cdata_free(arc_buf_hdr_t *hdr)
2158{
2159	ASSERT(HDR_HAS_L2HDR(hdr));
2160	ASSERT(MUTEX_HELD(&hdr->b_l2hdr.b_dev->l2ad_mtx));
2161
2162	/*
2163	 * The b_tmp_cdata field is linked off of the b_l1hdr, so if
2164	 * that doesn't exist, the header is in the arc_l2c_only state,
2165	 * and there isn't anything to free (it's already been freed).
2166	 */
2167	if (!HDR_HAS_L1HDR(hdr))
2168		return;
2169
2170	if (hdr->b_l1hdr.b_tmp_cdata == NULL)
2171		return;
2172
2173	ASSERT(HDR_L2_WRITING(hdr));
2174	arc_buf_free_on_write(hdr->b_l1hdr.b_tmp_cdata, hdr->b_size,
2175	    zio_data_buf_free);
2176
2177	ARCSTAT_BUMP(arcstat_l2_cdata_free_on_write);
2178	hdr->b_l1hdr.b_tmp_cdata = NULL;
2179}
2180
2181static void
2182arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t remove)
2183{
2184	arc_buf_t **bufp;
2185
2186	/* free up data associated with the buf */
2187	if (buf->b_data != NULL) {
2188		arc_state_t *state = buf->b_hdr->b_l1hdr.b_state;
2189		uint64_t size = buf->b_hdr->b_size;
2190		arc_buf_contents_t type = arc_buf_type(buf->b_hdr);
2191
2192		arc_cksum_verify(buf);
2193#ifdef illumos
2194		arc_buf_unwatch(buf);
2195#endif /* illumos */
2196
2197		if (!recycle) {
2198			if (type == ARC_BUFC_METADATA) {
2199				arc_buf_data_free(buf, zio_buf_free);
2200				arc_space_return(size, ARC_SPACE_META);
2201			} else {
2202				ASSERT(type == ARC_BUFC_DATA);
2203				arc_buf_data_free(buf, zio_data_buf_free);
2204				arc_space_return(size, ARC_SPACE_DATA);
2205			}
2206		}
2207		if (list_link_active(&buf->b_hdr->b_l1hdr.b_arc_node)) {
2208			uint64_t *cnt = &state->arcs_lsize[type];
2209
2210			ASSERT(refcount_is_zero(
2211			    &buf->b_hdr->b_l1hdr.b_refcnt));
2212			ASSERT(state != arc_anon && state != arc_l2c_only);
2213
2214			ASSERT3U(*cnt, >=, size);
2215			atomic_add_64(cnt, -size);
2216		}
2217		ASSERT3U(state->arcs_size, >=, size);
2218		atomic_add_64(&state->arcs_size, -size);
2219		buf->b_data = NULL;
2220
2221		/*
2222		 * If we're destroying a duplicate buffer make sure
2223		 * that the appropriate statistics are updated.
2224		 */
2225		if (buf->b_hdr->b_l1hdr.b_datacnt > 1 &&
2226		    HDR_ISTYPE_DATA(buf->b_hdr)) {
2227			ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
2228			ARCSTAT_INCR(arcstat_duplicate_buffers_size, -size);
2229		}
2230		ASSERT(buf->b_hdr->b_l1hdr.b_datacnt > 0);
2231		buf->b_hdr->b_l1hdr.b_datacnt -= 1;
2232	}
2233
2234	/* only remove the buf if requested */
2235	if (!remove)
2236		return;
2237
2238	/* remove the buf from the hdr list */
2239	for (bufp = &buf->b_hdr->b_l1hdr.b_buf; *bufp != buf;
2240	    bufp = &(*bufp)->b_next)
2241		continue;
2242	*bufp = buf->b_next;
2243	buf->b_next = NULL;
2244
2245	ASSERT(buf->b_efunc == NULL);
2246
2247	/* clean up the buf */
2248	buf->b_hdr = NULL;
2249	kmem_cache_free(buf_cache, buf);
2250}
2251
2252static void
2253arc_hdr_l2hdr_destroy(arc_buf_hdr_t *hdr)
2254{
2255	l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr;
2256	l2arc_dev_t *dev = l2hdr->b_dev;
2257
2258	ASSERT(MUTEX_HELD(&dev->l2ad_mtx));
2259	ASSERT(HDR_HAS_L2HDR(hdr));
2260
2261	list_remove(&dev->l2ad_buflist, hdr);
2262
2263	/*
2264	 * We don't want to leak the b_tmp_cdata buffer that was
2265	 * allocated in l2arc_write_buffers()
2266	 */
2267	arc_buf_l2_cdata_free(hdr);
2268
2269	/*
2270	 * If the l2hdr's b_daddr is equal to L2ARC_ADDR_UNSET, then
2271	 * this header is being processed by l2arc_write_buffers() (i.e.
2272	 * it's in the first stage of l2arc_write_buffers()).
2273	 * Re-affirming that truth here, just to serve as a reminder. If
2274	 * b_daddr does not equal L2ARC_ADDR_UNSET, then the header may or
2275	 * may not have its HDR_L2_WRITING flag set. (the write may have
2276	 * completed, in which case HDR_L2_WRITING will be false and the
2277	 * b_daddr field will point to the address of the buffer on disk).
2278	 */
2279	IMPLY(l2hdr->b_daddr == L2ARC_ADDR_UNSET, HDR_L2_WRITING(hdr));
2280
2281	/*
2282	 * If b_daddr is equal to L2ARC_ADDR_UNSET, we're racing with
2283	 * l2arc_write_buffers(). Since we've just removed this header
2284	 * from the l2arc buffer list, this header will never reach the
2285	 * second stage of l2arc_write_buffers(), which increments the
2286	 * accounting stats for this header. Thus, we must be careful
2287	 * not to decrement them for this header either.
2288	 */
2289	if (l2hdr->b_daddr != L2ARC_ADDR_UNSET) {
2290		ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
2291		ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
2292
2293		vdev_space_update(dev->l2ad_vdev,
2294		    -l2hdr->b_asize, 0, 0);
2295
2296		(void) refcount_remove_many(&dev->l2ad_alloc,
2297		    l2hdr->b_asize, hdr);
2298	}
2299
2300	hdr->b_flags &= ~ARC_FLAG_HAS_L2HDR;
2301}
2302
2303static void
2304arc_hdr_destroy(arc_buf_hdr_t *hdr)
2305{
2306	if (HDR_HAS_L1HDR(hdr)) {
2307		ASSERT(hdr->b_l1hdr.b_buf == NULL ||
2308		    hdr->b_l1hdr.b_datacnt > 0);
2309		ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
2310		ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
2311	}
2312	ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2313	ASSERT(!HDR_IN_HASH_TABLE(hdr));
2314
2315	if (HDR_HAS_L2HDR(hdr)) {
2316		l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
2317		boolean_t buflist_held = MUTEX_HELD(&dev->l2ad_mtx);
2318
2319		if (!buflist_held)
2320			mutex_enter(&dev->l2ad_mtx);
2321
2322		/*
2323		 * Even though we checked this conditional above, we
2324		 * need to check this again now that we have the
2325		 * l2ad_mtx. This is because we could be racing with
2326		 * another thread calling l2arc_evict() which might have
2327		 * destroyed this header's L2 portion as we were waiting
2328		 * to acquire the l2ad_mtx. If that happens, we don't
2329		 * want to re-destroy the header's L2 portion.
2330		 */
2331		if (HDR_HAS_L2HDR(hdr)) {
2332			trim_map_free(dev->l2ad_vdev, hdr->b_l2hdr.b_daddr,
2333			    hdr->b_l2hdr.b_asize, 0);
2334			arc_hdr_l2hdr_destroy(hdr);
2335		}
2336
2337		if (!buflist_held)
2338			mutex_exit(&dev->l2ad_mtx);
2339	}
2340
2341	if (!BUF_EMPTY(hdr))
2342		buf_discard_identity(hdr);
2343	if (hdr->b_freeze_cksum != NULL) {
2344		kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
2345		hdr->b_freeze_cksum = NULL;
2346	}
2347
2348	if (HDR_HAS_L1HDR(hdr)) {
2349		while (hdr->b_l1hdr.b_buf) {
2350			arc_buf_t *buf = hdr->b_l1hdr.b_buf;
2351
2352			if (buf->b_efunc != NULL) {
2353				mutex_enter(&arc_eviction_mtx);
2354				mutex_enter(&buf->b_evict_lock);
2355				ASSERT(buf->b_hdr != NULL);
2356				arc_buf_destroy(hdr->b_l1hdr.b_buf, FALSE,
2357				    FALSE);
2358				hdr->b_l1hdr.b_buf = buf->b_next;
2359				buf->b_hdr = &arc_eviction_hdr;
2360				buf->b_next = arc_eviction_list;
2361				arc_eviction_list = buf;
2362				mutex_exit(&buf->b_evict_lock);
2363				mutex_exit(&arc_eviction_mtx);
2364			} else {
2365				arc_buf_destroy(hdr->b_l1hdr.b_buf, FALSE,
2366				    TRUE);
2367			}
2368		}
2369#ifdef ZFS_DEBUG
2370		if (hdr->b_l1hdr.b_thawed != NULL) {
2371			kmem_free(hdr->b_l1hdr.b_thawed, 1);
2372			hdr->b_l1hdr.b_thawed = NULL;
2373		}
2374#endif
2375	}
2376
2377	ASSERT3P(hdr->b_hash_next, ==, NULL);
2378	if (HDR_HAS_L1HDR(hdr)) {
2379		ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node));
2380		ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
2381		kmem_cache_free(hdr_full_cache, hdr);
2382	} else {
2383		kmem_cache_free(hdr_l2only_cache, hdr);
2384	}
2385}
2386
2387void
2388arc_buf_free(arc_buf_t *buf, void *tag)
2389{
2390	arc_buf_hdr_t *hdr = buf->b_hdr;
2391	int hashed = hdr->b_l1hdr.b_state != arc_anon;
2392
2393	ASSERT(buf->b_efunc == NULL);
2394	ASSERT(buf->b_data != NULL);
2395
2396	if (hashed) {
2397		kmutex_t *hash_lock = HDR_LOCK(hdr);
2398
2399		mutex_enter(hash_lock);
2400		hdr = buf->b_hdr;
2401		ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
2402
2403		(void) remove_reference(hdr, hash_lock, tag);
2404		if (hdr->b_l1hdr.b_datacnt > 1) {
2405			arc_buf_destroy(buf, FALSE, TRUE);
2406		} else {
2407			ASSERT(buf == hdr->b_l1hdr.b_buf);
2408			ASSERT(buf->b_efunc == NULL);
2409			hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE;
2410		}
2411		mutex_exit(hash_lock);
2412	} else if (HDR_IO_IN_PROGRESS(hdr)) {
2413		int destroy_hdr;
2414		/*
2415		 * We are in the middle of an async write.  Don't destroy
2416		 * this buffer unless the write completes before we finish
2417		 * decrementing the reference count.
2418		 */
2419		mutex_enter(&arc_eviction_mtx);
2420		(void) remove_reference(hdr, NULL, tag);
2421		ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
2422		destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
2423		mutex_exit(&arc_eviction_mtx);
2424		if (destroy_hdr)
2425			arc_hdr_destroy(hdr);
2426	} else {
2427		if (remove_reference(hdr, NULL, tag) > 0)
2428			arc_buf_destroy(buf, FALSE, TRUE);
2429		else
2430			arc_hdr_destroy(hdr);
2431	}
2432}
2433
2434boolean_t
2435arc_buf_remove_ref(arc_buf_t *buf, void* tag)
2436{
2437	arc_buf_hdr_t *hdr = buf->b_hdr;
2438	kmutex_t *hash_lock = HDR_LOCK(hdr);
2439	boolean_t no_callback = (buf->b_efunc == NULL);
2440
2441	if (hdr->b_l1hdr.b_state == arc_anon) {
2442		ASSERT(hdr->b_l1hdr.b_datacnt == 1);
2443		arc_buf_free(buf, tag);
2444		return (no_callback);
2445	}
2446
2447	mutex_enter(hash_lock);
2448	hdr = buf->b_hdr;
2449	ASSERT(hdr->b_l1hdr.b_datacnt > 0);
2450	ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
2451	ASSERT(hdr->b_l1hdr.b_state != arc_anon);
2452	ASSERT(buf->b_data != NULL);
2453
2454	(void) remove_reference(hdr, hash_lock, tag);
2455	if (hdr->b_l1hdr.b_datacnt > 1) {
2456		if (no_callback)
2457			arc_buf_destroy(buf, FALSE, TRUE);
2458	} else if (no_callback) {
2459		ASSERT(hdr->b_l1hdr.b_buf == buf && buf->b_next == NULL);
2460		ASSERT(buf->b_efunc == NULL);
2461		hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE;
2462	}
2463	ASSERT(no_callback || hdr->b_l1hdr.b_datacnt > 1 ||
2464	    refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
2465	mutex_exit(hash_lock);
2466	return (no_callback);
2467}
2468
2469int32_t
2470arc_buf_size(arc_buf_t *buf)
2471{
2472	return (buf->b_hdr->b_size);
2473}
2474
2475/*
2476 * Called from the DMU to determine if the current buffer should be
2477 * evicted. In order to ensure proper locking, the eviction must be initiated
2478 * from the DMU. Return true if the buffer is associated with user data and
2479 * duplicate buffers still exist.
2480 */
2481boolean_t
2482arc_buf_eviction_needed(arc_buf_t *buf)
2483{
2484	arc_buf_hdr_t *hdr;
2485	boolean_t evict_needed = B_FALSE;
2486
2487	if (zfs_disable_dup_eviction)
2488		return (B_FALSE);
2489
2490	mutex_enter(&buf->b_evict_lock);
2491	hdr = buf->b_hdr;
2492	if (hdr == NULL) {
2493		/*
2494		 * We are in arc_do_user_evicts(); let that function
2495		 * perform the eviction.
2496		 */
2497		ASSERT(buf->b_data == NULL);
2498		mutex_exit(&buf->b_evict_lock);
2499		return (B_FALSE);
2500	} else if (buf->b_data == NULL) {
2501		/*
2502		 * We have already been added to the arc eviction list;
2503		 * recommend eviction.
2504		 */
2505		ASSERT3P(hdr, ==, &arc_eviction_hdr);
2506		mutex_exit(&buf->b_evict_lock);
2507		return (B_TRUE);
2508	}
2509
2510	if (hdr->b_l1hdr.b_datacnt > 1 && HDR_ISTYPE_DATA(hdr))
2511		evict_needed = B_TRUE;
2512
2513	mutex_exit(&buf->b_evict_lock);
2514	return (evict_needed);
2515}
2516
2517/*
2518 * Evict buffers from list until we've removed the specified number of
2519 * bytes.  Move the removed buffers to the appropriate evict state.
2520 * If the recycle flag is set, then attempt to "recycle" a buffer:
2521 * - look for a buffer to evict that is `bytes' long.
2522 * - return the data block from this buffer rather than freeing it.
2523 * This flag is used by callers that are trying to make space for a
2524 * new buffer in a full arc cache.
2525 *
2526 * This function makes a "best effort".  It skips over any buffers
2527 * it can't get a hash_lock on, and so may not catch all candidates.
2528 * It may also return without evicting as much space as requested.
2529 */
2530static void *
2531arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle,
2532    arc_buf_contents_t type)
2533{
2534	arc_state_t *evicted_state;
2535	uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
2536	int64_t bytes_remaining;
2537	arc_buf_hdr_t *hdr, *hdr_prev = NULL;
2538	list_t *evicted_list, *list, *evicted_list_start, *list_start;
2539	kmutex_t *lock, *evicted_lock;
2540	kmutex_t *hash_lock;
2541	boolean_t have_lock;
2542	void *stolen = NULL;
2543	arc_buf_hdr_t marker = { 0 };
2544	int count = 0;
2545	static int evict_metadata_offset, evict_data_offset;
2546	int i, idx, offset, list_count, lists;
2547
2548	ASSERT(state == arc_mru || state == arc_mfu);
2549
2550	evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
2551
2552	/*
2553	 * Decide which "type" (data vs metadata) to recycle from.
2554	 *
2555	 * If we are over the metadata limit, recycle from metadata.
2556	 * If we are under the metadata minimum, recycle from data.
2557	 * Otherwise, recycle from whichever type has the oldest (least
2558	 * recently accessed) header.  This is not yet implemented.
2559	 */
2560	if (recycle) {
2561		arc_buf_contents_t realtype;
2562		if (state->arcs_lsize[ARC_BUFC_DATA] == 0) {
2563			realtype = ARC_BUFC_METADATA;
2564		} else if (state->arcs_lsize[ARC_BUFC_METADATA] == 0) {
2565			realtype = ARC_BUFC_DATA;
2566		} else if (arc_meta_used >= arc_meta_limit) {
2567			realtype = ARC_BUFC_METADATA;
2568		} else if (arc_meta_used <= arc_meta_min) {
2569			realtype = ARC_BUFC_DATA;
2570#ifdef illumos
2571		} else if (HDR_HAS_L1HDR(data_hdr) &&
2572		    HDR_HAS_L1HDR(metadata_hdr) &&
2573		    data_hdr->b_l1hdr.b_arc_access <
2574		    metadata_hdr->b_l1hdr.b_arc_access) {
2575			realtype = ARC_BUFC_DATA;
2576		} else {
2577			realtype = ARC_BUFC_METADATA;
2578#else
2579		} else {
2580			/* TODO */
2581			realtype = type;
2582#endif
2583		}
2584		if (realtype != type) {
2585			/*
2586			 * If we want to evict from a different list,
2587			 * we can not recycle, because DATA vs METADATA
2588			 * buffers are segregated into different kmem
2589			 * caches (and vmem arenas).
2590			 */
2591			type = realtype;
2592			recycle = B_FALSE;
2593		}
2594	}
2595
2596	if (type == ARC_BUFC_METADATA) {
2597		offset = 0;
2598		list_count = ARC_BUFC_NUMMETADATALISTS;
2599		list_start = &state->arcs_lists[0];
2600		evicted_list_start = &evicted_state->arcs_lists[0];
2601		idx = evict_metadata_offset;
2602	} else {
2603		offset = ARC_BUFC_NUMMETADATALISTS;
2604		list_start = &state->arcs_lists[offset];
2605		evicted_list_start = &evicted_state->arcs_lists[offset];
2606		list_count = ARC_BUFC_NUMDATALISTS;
2607		idx = evict_data_offset;
2608	}
2609	bytes_remaining = evicted_state->arcs_lsize[type];
2610	lists = 0;
2611
2612evict_start:
2613	list = &list_start[idx];
2614	evicted_list = &evicted_list_start[idx];
2615	lock = ARCS_LOCK(state, (offset + idx));
2616	evicted_lock = ARCS_LOCK(evicted_state, (offset + idx));
2617
2618	/*
2619	 * The ghost list lock must be acquired first in order to prevent
2620	 * a 3 party deadlock:
2621	 *
2622	 *  - arc_evict_ghost acquires arc_*_ghost->arcs_mtx, followed by
2623	 *    l2ad_mtx in arc_hdr_realloc
2624	 *  - l2arc_write_buffers acquires l2ad_mtx, followed by arc_*->arcs_mtx
2625	 *  - arc_evict acquires arc_*_ghost->arcs_mtx, followed by
2626	 *    arc_*_ghost->arcs_mtx and forms a deadlock cycle.
2627	 *
2628	 * This situation is avoided by acquiring the ghost list lock first.
2629	 */
2630	mutex_enter(evicted_lock);
2631	mutex_enter(lock);
2632
2633	for (hdr = list_tail(list); hdr; hdr = hdr_prev) {
2634		hdr_prev = list_prev(list, hdr);
2635		if (HDR_HAS_L1HDR(hdr)) {
2636			bytes_remaining -=
2637			    (hdr->b_size * hdr->b_l1hdr.b_datacnt);
2638		}
2639		/* prefetch buffers have a minimum lifespan */
2640		if (HDR_IO_IN_PROGRESS(hdr) ||
2641		    (spa && hdr->b_spa != spa) ||
2642		    ((hdr->b_flags & (ARC_FLAG_PREFETCH | ARC_FLAG_INDIRECT)) &&
2643		    ddi_get_lbolt() - hdr->b_l1hdr.b_arc_access <
2644		    arc_min_prefetch_lifespan)) {
2645			skipped++;
2646			continue;
2647		}
2648		/* "lookahead" for better eviction candidate */
2649		if (recycle && hdr->b_size != bytes &&
2650		    hdr_prev && hdr_prev->b_size == bytes)
2651			continue;
2652
2653		/* ignore markers */
2654		if (hdr->b_spa == 0)
2655			continue;
2656
2657		/*
2658		 * It may take a long time to evict all the bufs requested.
2659		 * To avoid blocking all arc activity, periodically drop
2660		 * the arcs_mtx and give other threads a chance to run
2661		 * before reacquiring the lock.
2662		 *
2663		 * If we are looking for a buffer to recycle, we are in
2664		 * the hot code path, so don't sleep.
2665		 */
2666		if (!recycle && count++ > arc_evict_iterations) {
2667			list_insert_after(list, hdr, &marker);
2668			mutex_exit(lock);
2669			mutex_exit(evicted_lock);
2670			kpreempt(KPREEMPT_SYNC);
2671			mutex_enter(evicted_lock);
2672			mutex_enter(lock);
2673			hdr_prev = list_prev(list, &marker);
2674			list_remove(list, &marker);
2675			count = 0;
2676			continue;
2677		}
2678
2679		hash_lock = HDR_LOCK(hdr);
2680		have_lock = MUTEX_HELD(hash_lock);
2681		if (have_lock || mutex_tryenter(hash_lock)) {
2682			ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt));
2683			ASSERT3U(hdr->b_l1hdr.b_datacnt, >, 0);
2684			while (hdr->b_l1hdr.b_buf) {
2685				arc_buf_t *buf = hdr->b_l1hdr.b_buf;
2686				if (!mutex_tryenter(&buf->b_evict_lock)) {
2687					missed += 1;
2688					break;
2689				}
2690				if (buf->b_data != NULL) {
2691					bytes_evicted += hdr->b_size;
2692					if (recycle &&
2693					    arc_buf_type(hdr) == type &&
2694					    hdr->b_size == bytes &&
2695					    !HDR_L2_WRITING(hdr)) {
2696						stolen = buf->b_data;
2697						recycle = FALSE;
2698					}
2699				}
2700				if (buf->b_efunc != NULL) {
2701					mutex_enter(&arc_eviction_mtx);
2702					arc_buf_destroy(buf,
2703					    buf->b_data == stolen, FALSE);
2704					hdr->b_l1hdr.b_buf = buf->b_next;
2705					buf->b_hdr = &arc_eviction_hdr;
2706					buf->b_next = arc_eviction_list;
2707					arc_eviction_list = buf;
2708					mutex_exit(&arc_eviction_mtx);
2709					mutex_exit(&buf->b_evict_lock);
2710				} else {
2711					mutex_exit(&buf->b_evict_lock);
2712					arc_buf_destroy(buf,
2713					    buf->b_data == stolen, TRUE);
2714				}
2715			}
2716
2717			if (HDR_HAS_L2HDR(hdr)) {
2718				ARCSTAT_INCR(arcstat_evict_l2_cached,
2719				    hdr->b_size);
2720			} else {
2721				if (l2arc_write_eligible(hdr->b_spa, hdr)) {
2722					ARCSTAT_INCR(arcstat_evict_l2_eligible,
2723					    hdr->b_size);
2724				} else {
2725					ARCSTAT_INCR(
2726					    arcstat_evict_l2_ineligible,
2727					    hdr->b_size);
2728				}
2729			}
2730
2731			if (hdr->b_l1hdr.b_datacnt == 0) {
2732				arc_change_state(evicted_state, hdr, hash_lock);
2733				ASSERT(HDR_IN_HASH_TABLE(hdr));
2734				hdr->b_flags |= ARC_FLAG_IN_HASH_TABLE;
2735				hdr->b_flags &= ~ARC_FLAG_BUF_AVAILABLE;
2736				DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, hdr);
2737			}
2738			if (!have_lock)
2739				mutex_exit(hash_lock);
2740			if (bytes >= 0 && bytes_evicted >= bytes)
2741				break;
2742			if (bytes_remaining > 0) {
2743				mutex_exit(evicted_lock);
2744				mutex_exit(lock);
2745				idx  = ((idx + 1) & (list_count - 1));
2746				lists++;
2747				goto evict_start;
2748			}
2749		} else {
2750			missed += 1;
2751		}
2752	}
2753
2754	mutex_exit(lock);
2755	mutex_exit(evicted_lock);
2756
2757	idx  = ((idx + 1) & (list_count - 1));
2758	lists++;
2759
2760	if (bytes_evicted < bytes) {
2761		if (lists < list_count)
2762			goto evict_start;
2763		else
2764			dprintf("only evicted %lld bytes from %x",
2765			    (longlong_t)bytes_evicted, state);
2766	}
2767	if (type == ARC_BUFC_METADATA)
2768		evict_metadata_offset = idx;
2769	else
2770		evict_data_offset = idx;
2771
2772	if (skipped)
2773		ARCSTAT_INCR(arcstat_evict_skip, skipped);
2774
2775	if (missed)
2776		ARCSTAT_INCR(arcstat_mutex_miss, missed);
2777
2778	/*
2779	 * Note: we have just evicted some data into the ghost state,
2780	 * potentially putting the ghost size over the desired size.  Rather
2781	 * that evicting from the ghost list in this hot code path, leave
2782	 * this chore to the arc_reclaim_thread().
2783	 */
2784
2785	if (stolen)
2786		ARCSTAT_BUMP(arcstat_stolen);
2787	return (stolen);
2788}
2789
2790/*
2791 * Remove buffers from list until we've removed the specified number of
2792 * bytes.  Destroy the buffers that are removed.
2793 */
2794static void
2795arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes)
2796{
2797	arc_buf_hdr_t *hdr, *hdr_prev;
2798	arc_buf_hdr_t marker = { 0 };
2799	list_t *list, *list_start;
2800	kmutex_t *hash_lock, *lock;
2801	uint64_t bytes_deleted = 0;
2802	uint64_t bufs_skipped = 0;
2803	int count = 0;
2804	static int evict_offset;
2805	int list_count, idx = evict_offset;
2806	int offset, lists = 0;
2807
2808	ASSERT(GHOST_STATE(state));
2809
2810	/*
2811	 * data lists come after metadata lists
2812	 */
2813	list_start = &state->arcs_lists[ARC_BUFC_NUMMETADATALISTS];
2814	list_count = ARC_BUFC_NUMDATALISTS;
2815	offset = ARC_BUFC_NUMMETADATALISTS;
2816
2817evict_start:
2818	list = &list_start[idx];
2819	lock = ARCS_LOCK(state, idx + offset);
2820
2821	mutex_enter(lock);
2822	for (hdr = list_tail(list); hdr; hdr = hdr_prev) {
2823		hdr_prev = list_prev(list, hdr);
2824		if (arc_buf_type(hdr) >= ARC_BUFC_NUMTYPES)
2825			panic("invalid hdr=%p", (void *)hdr);
2826		if (spa && hdr->b_spa != spa)
2827			continue;
2828
2829		/* ignore markers */
2830		if (hdr->b_spa == 0)
2831			continue;
2832
2833		hash_lock = HDR_LOCK(hdr);
2834		/* caller may be trying to modify this buffer, skip it */
2835		if (MUTEX_HELD(hash_lock))
2836			continue;
2837
2838		/*
2839		 * It may take a long time to evict all the bufs requested.
2840		 * To avoid blocking all arc activity, periodically drop
2841		 * the arcs_mtx and give other threads a chance to run
2842		 * before reacquiring the lock.
2843		 */
2844		if (count++ > arc_evict_iterations) {
2845			list_insert_after(list, hdr, &marker);
2846			mutex_exit(lock);
2847			kpreempt(KPREEMPT_SYNC);
2848			mutex_enter(lock);
2849			hdr_prev = list_prev(list, &marker);
2850			list_remove(list, &marker);
2851			count = 0;
2852			continue;
2853		}
2854		if (mutex_tryenter(hash_lock)) {
2855			ASSERT(!HDR_IO_IN_PROGRESS(hdr));
2856			ASSERT(!HDR_HAS_L1HDR(hdr) ||
2857			    hdr->b_l1hdr.b_buf == NULL);
2858			ARCSTAT_BUMP(arcstat_deleted);
2859			bytes_deleted += hdr->b_size;
2860
2861			if (HDR_HAS_L2HDR(hdr)) {
2862				/*
2863				 * This buffer is cached on the 2nd Level ARC;
2864				 * don't destroy the header.
2865				 */
2866				arc_change_state(arc_l2c_only, hdr, hash_lock);
2867				/*
2868				 * dropping from L1+L2 cached to L2-only,
2869				 * realloc to remove the L1 header.
2870				 */
2871				hdr = arc_hdr_realloc(hdr, hdr_full_cache,
2872				    hdr_l2only_cache);
2873				mutex_exit(hash_lock);
2874			} else {
2875				arc_change_state(arc_anon, hdr, hash_lock);
2876				mutex_exit(hash_lock);
2877				arc_hdr_destroy(hdr);
2878			}
2879
2880			DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr);
2881			if (bytes >= 0 && bytes_deleted >= bytes)
2882				break;
2883		} else if (bytes < 0) {
2884			/*
2885			 * Insert a list marker and then wait for the
2886			 * hash lock to become available. Once its
2887			 * available, restart from where we left off.
2888			 */
2889			list_insert_after(list, hdr, &marker);
2890			mutex_exit(lock);
2891			mutex_enter(hash_lock);
2892			mutex_exit(hash_lock);
2893			mutex_enter(lock);
2894			hdr_prev = list_prev(list, &marker);
2895			list_remove(list, &marker);
2896		} else {
2897			bufs_skipped += 1;
2898		}
2899
2900	}
2901	mutex_exit(lock);
2902	idx  = ((idx + 1) & (ARC_BUFC_NUMDATALISTS - 1));
2903	lists++;
2904
2905	if (lists < list_count)
2906		goto evict_start;
2907
2908	evict_offset = idx;
2909	if ((uintptr_t)list > (uintptr_t)&state->arcs_lists[ARC_BUFC_NUMMETADATALISTS] &&
2910	    (bytes < 0 || bytes_deleted < bytes)) {
2911		list_start = &state->arcs_lists[0];
2912		list_count = ARC_BUFC_NUMMETADATALISTS;
2913		offset = lists = 0;
2914		goto evict_start;
2915	}
2916
2917	if (bufs_skipped) {
2918		ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped);
2919		ASSERT(bytes >= 0);
2920	}
2921
2922	if (bytes_deleted < bytes)
2923		dprintf("only deleted %lld bytes from %p",
2924		    (longlong_t)bytes_deleted, state);
2925}
2926
2927static void
2928arc_adjust(void)
2929{
2930	int64_t adjustment, delta;
2931
2932	/*
2933	 * Adjust MRU size
2934	 */
2935
2936	adjustment = MIN((int64_t)(arc_size - arc_c),
2937	    (int64_t)(arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used -
2938	    arc_p));
2939
2940	if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) {
2941		delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment);
2942		(void) arc_evict(arc_mru, 0, delta, FALSE, ARC_BUFC_DATA);
2943		adjustment -= delta;
2944	}
2945
2946	if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) {
2947		delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment);
2948		(void) arc_evict(arc_mru, 0, delta, FALSE,
2949		    ARC_BUFC_METADATA);
2950	}
2951
2952	/*
2953	 * Adjust MFU size
2954	 */
2955
2956	adjustment = arc_size - arc_c;
2957
2958	if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) {
2959		delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]);
2960		(void) arc_evict(arc_mfu, 0, delta, FALSE, ARC_BUFC_DATA);
2961		adjustment -= delta;
2962	}
2963
2964	if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) {
2965		int64_t delta = MIN(adjustment,
2966		    arc_mfu->arcs_lsize[ARC_BUFC_METADATA]);
2967		(void) arc_evict(arc_mfu, 0, delta, FALSE,
2968		    ARC_BUFC_METADATA);
2969	}
2970
2971	/*
2972	 * Adjust ghost lists
2973	 */
2974
2975	adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c;
2976
2977	if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) {
2978		delta = MIN(arc_mru_ghost->arcs_size, adjustment);
2979		arc_evict_ghost(arc_mru_ghost, 0, delta);
2980	}
2981
2982	adjustment =
2983	    arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c;
2984
2985	if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) {
2986		delta = MIN(arc_mfu_ghost->arcs_size, adjustment);
2987		arc_evict_ghost(arc_mfu_ghost, 0, delta);
2988	}
2989}
2990
2991static void
2992arc_do_user_evicts(void)
2993{
2994	static arc_buf_t *tmp_arc_eviction_list;
2995
2996	/*
2997	 * Move list over to avoid LOR
2998	 */
2999restart:
3000	mutex_enter(&arc_eviction_mtx);
3001	tmp_arc_eviction_list = arc_eviction_list;
3002	arc_eviction_list = NULL;
3003	mutex_exit(&arc_eviction_mtx);
3004
3005	while (tmp_arc_eviction_list != NULL) {
3006		arc_buf_t *buf = tmp_arc_eviction_list;
3007		tmp_arc_eviction_list = buf->b_next;
3008		mutex_enter(&buf->b_evict_lock);
3009		buf->b_hdr = NULL;
3010		mutex_exit(&buf->b_evict_lock);
3011
3012		if (buf->b_efunc != NULL)
3013			VERIFY0(buf->b_efunc(buf->b_private));
3014
3015		buf->b_efunc = NULL;
3016		buf->b_private = NULL;
3017		kmem_cache_free(buf_cache, buf);
3018	}
3019
3020	if (arc_eviction_list != NULL)
3021		goto restart;
3022}
3023
3024/*
3025 * Flush all *evictable* data from the cache for the given spa.
3026 * NOTE: this will not touch "active" (i.e. referenced) data.
3027 */
3028void
3029arc_flush(spa_t *spa)
3030{
3031	uint64_t guid = 0;
3032
3033	if (spa != NULL)
3034		guid = spa_load_guid(spa);
3035
3036	while (arc_mru->arcs_lsize[ARC_BUFC_DATA]) {
3037		(void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA);
3038		if (spa != NULL)
3039			break;
3040	}
3041	while (arc_mru->arcs_lsize[ARC_BUFC_METADATA]) {
3042		(void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA);
3043		if (spa != NULL)
3044			break;
3045	}
3046	while (arc_mfu->arcs_lsize[ARC_BUFC_DATA]) {
3047		(void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA);
3048		if (spa != NULL)
3049			break;
3050	}
3051	while (arc_mfu->arcs_lsize[ARC_BUFC_METADATA]) {
3052		(void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA);
3053		if (spa != NULL)
3054			break;
3055	}
3056
3057	arc_evict_ghost(arc_mru_ghost, guid, -1);
3058	arc_evict_ghost(arc_mfu_ghost, guid, -1);
3059
3060	mutex_enter(&arc_reclaim_thr_lock);
3061	arc_do_user_evicts();
3062	mutex_exit(&arc_reclaim_thr_lock);
3063	ASSERT(spa || arc_eviction_list == NULL);
3064}
3065
3066void
3067arc_shrink(int64_t to_free)
3068{
3069
3070	if (arc_c > arc_c_min) {
3071		DTRACE_PROBE4(arc__shrink, uint64_t, arc_c, uint64_t,
3072			arc_c_min, uint64_t, arc_p, uint64_t, to_free);
3073		if (arc_c > arc_c_min + to_free)
3074			atomic_add_64(&arc_c, -to_free);
3075		else
3076			arc_c = arc_c_min;
3077
3078		atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
3079		if (arc_c > arc_size)
3080			arc_c = MAX(arc_size, arc_c_min);
3081		if (arc_p > arc_c)
3082			arc_p = (arc_c >> 1);
3083
3084		DTRACE_PROBE2(arc__shrunk, uint64_t, arc_c, uint64_t,
3085			arc_p);
3086
3087		ASSERT(arc_c >= arc_c_min);
3088		ASSERT((int64_t)arc_p >= 0);
3089	}
3090
3091	if (arc_size > arc_c) {
3092		DTRACE_PROBE2(arc__shrink_adjust, uint64_t, arc_size,
3093			uint64_t, arc_c);
3094		arc_adjust();
3095	}
3096}
3097
3098static long needfree = 0;
3099
3100typedef enum free_memory_reason_t {
3101	FMR_UNKNOWN,
3102	FMR_NEEDFREE,
3103	FMR_LOTSFREE,
3104	FMR_SWAPFS_MINFREE,
3105	FMR_PAGES_PP_MAXIMUM,
3106	FMR_HEAP_ARENA,
3107	FMR_ZIO_ARENA,
3108	FMR_ZIO_FRAG,
3109} free_memory_reason_t;
3110
3111int64_t last_free_memory;
3112free_memory_reason_t last_free_reason;
3113
3114/*
3115 * Additional reserve of pages for pp_reserve.
3116 */
3117int64_t arc_pages_pp_reserve = 64;
3118
3119/*
3120 * Additional reserve of pages for swapfs.
3121 */
3122int64_t arc_swapfs_reserve = 64;
3123
3124/*
3125 * Return the amount of memory that can be consumed before reclaim will be
3126 * needed.  Positive if there is sufficient free memory, negative indicates
3127 * the amount of memory that needs to be freed up.
3128 */
3129static int64_t
3130arc_available_memory(void)
3131{
3132	int64_t lowest = INT64_MAX;
3133	int64_t n;
3134	free_memory_reason_t r = FMR_UNKNOWN;
3135
3136#ifdef _KERNEL
3137	if (needfree > 0) {
3138		n = PAGESIZE * (-needfree);
3139		if (n < lowest) {
3140			lowest = n;
3141			r = FMR_NEEDFREE;
3142		}
3143	}
3144
3145	/*
3146	 * Cooperate with pagedaemon when it's time for it to scan
3147	 * and reclaim some pages.
3148	 */
3149	n = PAGESIZE * (int64_t)(freemem - zfs_arc_free_target);
3150	if (n < lowest) {
3151		lowest = n;
3152		r = FMR_LOTSFREE;
3153	}
3154
3155#ifdef sun
3156	/*
3157	 * check that we're out of range of the pageout scanner.  It starts to
3158	 * schedule paging if freemem is less than lotsfree and needfree.
3159	 * lotsfree is the high-water mark for pageout, and needfree is the
3160	 * number of needed free pages.  We add extra pages here to make sure
3161	 * the scanner doesn't start up while we're freeing memory.
3162	 */
3163	n = PAGESIZE * (freemem - lotsfree - needfree - desfree);
3164	if (n < lowest) {
3165		lowest = n;
3166		r = FMR_LOTSFREE;
3167	}
3168
3169	/*
3170	 * check to make sure that swapfs has enough space so that anon
3171	 * reservations can still succeed. anon_resvmem() checks that the
3172	 * availrmem is greater than swapfs_minfree, and the number of reserved
3173	 * swap pages.  We also add a bit of extra here just to prevent
3174	 * circumstances from getting really dire.
3175	 */
3176	n = PAGESIZE * (availrmem - swapfs_minfree - swapfs_reserve -
3177	    desfree - arc_swapfs_reserve);
3178	if (n < lowest) {
3179		lowest = n;
3180		r = FMR_SWAPFS_MINFREE;
3181	}
3182
3183
3184	/*
3185	 * Check that we have enough availrmem that memory locking (e.g., via
3186	 * mlock(3C) or memcntl(2)) can still succeed.  (pages_pp_maximum
3187	 * stores the number of pages that cannot be locked; when availrmem
3188	 * drops below pages_pp_maximum, page locking mechanisms such as
3189	 * page_pp_lock() will fail.)
3190	 */
3191	n = PAGESIZE * (availrmem - pages_pp_maximum -
3192	    arc_pages_pp_reserve);
3193	if (n < lowest) {
3194		lowest = n;
3195		r = FMR_PAGES_PP_MAXIMUM;
3196	}
3197
3198#endif	/* sun */
3199#if defined(__i386) || !defined(UMA_MD_SMALL_ALLOC)
3200	/*
3201	 * If we're on an i386 platform, it's possible that we'll exhaust the
3202	 * kernel heap space before we ever run out of available physical
3203	 * memory.  Most checks of the size of the heap_area compare against
3204	 * tune.t_minarmem, which is the minimum available real memory that we
3205	 * can have in the system.  However, this is generally fixed at 25 pages
3206	 * which is so low that it's useless.  In this comparison, we seek to
3207	 * calculate the total heap-size, and reclaim if more than 3/4ths of the
3208	 * heap is allocated.  (Or, in the calculation, if less than 1/4th is
3209	 * free)
3210	 */
3211	n = vmem_size(heap_arena, VMEM_FREE) -
3212	    (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2);
3213	if (n < lowest) {
3214		lowest = n;
3215		r = FMR_HEAP_ARENA;
3216	}
3217#define	zio_arena	NULL
3218#else
3219#define	zio_arena	heap_arena
3220#endif
3221
3222	/*
3223	 * If zio data pages are being allocated out of a separate heap segment,
3224	 * then enforce that the size of available vmem for this arena remains
3225	 * above about 1/16th free.
3226	 *
3227	 * Note: The 1/16th arena free requirement was put in place
3228	 * to aggressively evict memory from the arc in order to avoid
3229	 * memory fragmentation issues.
3230	 */
3231	if (zio_arena != NULL) {
3232		n = vmem_size(zio_arena, VMEM_FREE) -
3233		    (vmem_size(zio_arena, VMEM_ALLOC) >> 4);
3234		if (n < lowest) {
3235			lowest = n;
3236			r = FMR_ZIO_ARENA;
3237		}
3238	}
3239
3240	/*
3241	 * Above limits know nothing about real level of KVA fragmentation.
3242	 * Start aggressive reclamation if too little sequential KVA left.
3243	 */
3244	if (lowest > 0) {
3245		n = (vmem_size(heap_arena, VMEM_MAXFREE) < zfs_max_recordsize) ?
3246		    -(vmem_size(heap_arena, VMEM_ALLOC) >> 4) : INT64_MAX;
3247		if (n < lowest) {
3248			lowest = n;
3249			r = FMR_ZIO_FRAG;
3250		}
3251	}
3252
3253#else	/* _KERNEL */
3254	/* Every 100 calls, free a small amount */
3255	if (spa_get_random(100) == 0)
3256		lowest = -1024;
3257#endif	/* _KERNEL */
3258
3259	last_free_memory = lowest;
3260	last_free_reason = r;
3261	DTRACE_PROBE2(arc__available_memory, int64_t, lowest, int, r);
3262	return (lowest);
3263}
3264
3265
3266/*
3267 * Determine if the system is under memory pressure and is asking
3268 * to reclaim memory. A return value of TRUE indicates that the system
3269 * is under memory pressure and that the arc should adjust accordingly.
3270 */
3271static boolean_t
3272arc_reclaim_needed(void)
3273{
3274	return (arc_available_memory() < 0);
3275}
3276
3277extern kmem_cache_t	*zio_buf_cache[];
3278extern kmem_cache_t	*zio_data_buf_cache[];
3279extern kmem_cache_t	*range_seg_cache;
3280
3281static __noinline void
3282arc_kmem_reap_now(void)
3283{
3284	size_t			i;
3285	kmem_cache_t		*prev_cache = NULL;
3286	kmem_cache_t		*prev_data_cache = NULL;
3287
3288	DTRACE_PROBE(arc__kmem_reap_start);
3289#ifdef _KERNEL
3290	if (arc_meta_used >= arc_meta_limit) {
3291		/*
3292		 * We are exceeding our meta-data cache limit.
3293		 * Purge some DNLC entries to release holds on meta-data.
3294		 */
3295		dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
3296	}
3297#if defined(__i386)
3298	/*
3299	 * Reclaim unused memory from all kmem caches.
3300	 */
3301	kmem_reap();
3302#endif
3303#endif
3304
3305	for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
3306		if (zio_buf_cache[i] != prev_cache) {
3307			prev_cache = zio_buf_cache[i];
3308			kmem_cache_reap_now(zio_buf_cache[i]);
3309		}
3310		if (zio_data_buf_cache[i] != prev_data_cache) {
3311			prev_data_cache = zio_data_buf_cache[i];
3312			kmem_cache_reap_now(zio_data_buf_cache[i]);
3313		}
3314	}
3315	kmem_cache_reap_now(buf_cache);
3316	kmem_cache_reap_now(hdr_full_cache);
3317	kmem_cache_reap_now(hdr_l2only_cache);
3318	kmem_cache_reap_now(range_seg_cache);
3319
3320#ifdef sun
3321	if (zio_arena != NULL) {
3322		/*
3323		 * Ask the vmem arena to reclaim unused memory from its
3324		 * quantum caches.
3325		 */
3326		vmem_qcache_reap(zio_arena);
3327	}
3328#endif
3329	DTRACE_PROBE(arc__kmem_reap_end);
3330}
3331
3332static void
3333arc_reclaim_thread(void *dummy __unused)
3334{
3335	clock_t			growtime = 0;
3336	callb_cpr_t		cpr;
3337
3338	CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
3339
3340	mutex_enter(&arc_reclaim_thr_lock);
3341	while (arc_thread_exit == 0) {
3342		int64_t free_memory = arc_available_memory();
3343		if (free_memory < 0) {
3344
3345			arc_no_grow = B_TRUE;
3346			arc_warm = B_TRUE;
3347
3348			/*
3349			 * Wait at least zfs_grow_retry (default 60) seconds
3350			 * before considering growing.
3351			 */
3352			growtime = ddi_get_lbolt() + (arc_grow_retry * hz);
3353
3354			arc_kmem_reap_now();
3355
3356			/*
3357			 * If we are still low on memory, shrink the ARC
3358			 * so that we have arc_shrink_min free space.
3359			 */
3360			free_memory = arc_available_memory();
3361
3362			int64_t to_free =
3363			    (arc_c >> arc_shrink_shift) - free_memory;
3364			if (to_free > 0) {
3365#ifdef _KERNEL
3366				to_free = MAX(to_free, ptob(needfree));
3367#endif
3368				arc_shrink(to_free);
3369			}
3370		} else if (free_memory < arc_c >> arc_no_grow_shift) {
3371			arc_no_grow = B_TRUE;
3372		} else if (ddi_get_lbolt() >= growtime) {
3373			arc_no_grow = B_FALSE;
3374		}
3375
3376		arc_adjust();
3377
3378		if (arc_eviction_list != NULL)
3379			arc_do_user_evicts();
3380
3381#ifdef _KERNEL
3382		if (needfree) {
3383			needfree = 0;
3384			wakeup(&needfree);
3385		}
3386#endif
3387
3388		/*
3389		 * This is necessary in order for the mdb ::arc dcmd to
3390		 * show up to date information. Since the ::arc command
3391		 * does not call the kstat's update function, without
3392		 * this call, the command may show stale stats for the
3393		 * anon, mru, mru_ghost, mfu, and mfu_ghost lists. Even
3394		 * with this change, the data might be up to 1 second
3395		 * out of date; but that should suffice. The arc_state_t
3396		 * structures can be queried directly if more accurate
3397		 * information is needed.
3398		 */
3399		if (arc_ksp != NULL)
3400			arc_ksp->ks_update(arc_ksp, KSTAT_READ);
3401
3402		/* block until needed, or one second, whichever is shorter */
3403		CALLB_CPR_SAFE_BEGIN(&cpr);
3404		(void) cv_timedwait(&arc_reclaim_thr_cv,
3405		    &arc_reclaim_thr_lock, hz);
3406		CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
3407	}
3408
3409	arc_thread_exit = 0;
3410	cv_broadcast(&arc_reclaim_thr_cv);
3411	CALLB_CPR_EXIT(&cpr);		/* drops arc_reclaim_thr_lock */
3412	thread_exit();
3413}
3414
3415/*
3416 * Adapt arc info given the number of bytes we are trying to add and
3417 * the state that we are comming from.  This function is only called
3418 * when we are adding new content to the cache.
3419 */
3420static void
3421arc_adapt(int bytes, arc_state_t *state)
3422{
3423	int mult;
3424	uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
3425
3426	if (state == arc_l2c_only)
3427		return;
3428
3429	ASSERT(bytes > 0);
3430	/*
3431	 * Adapt the target size of the MRU list:
3432	 *	- if we just hit in the MRU ghost list, then increase
3433	 *	  the target size of the MRU list.
3434	 *	- if we just hit in the MFU ghost list, then increase
3435	 *	  the target size of the MFU list by decreasing the
3436	 *	  target size of the MRU list.
3437	 */
3438	if (state == arc_mru_ghost) {
3439		mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ?
3440		    1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size));
3441		mult = MIN(mult, 10); /* avoid wild arc_p adjustment */
3442
3443		arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult);
3444	} else if (state == arc_mfu_ghost) {
3445		uint64_t delta;
3446
3447		mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ?
3448		    1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size));
3449		mult = MIN(mult, 10);
3450
3451		delta = MIN(bytes * mult, arc_p);
3452		arc_p = MAX(arc_p_min, arc_p - delta);
3453	}
3454	ASSERT((int64_t)arc_p >= 0);
3455
3456	if (arc_reclaim_needed()) {
3457		cv_signal(&arc_reclaim_thr_cv);
3458		return;
3459	}
3460
3461	if (arc_no_grow)
3462		return;
3463
3464	if (arc_c >= arc_c_max)
3465		return;
3466
3467	/*
3468	 * If we're within (2 * maxblocksize) bytes of the target
3469	 * cache size, increment the target cache size
3470	 */
3471	if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
3472		DTRACE_PROBE1(arc__inc_adapt, int, bytes);
3473		atomic_add_64(&arc_c, (int64_t)bytes);
3474		if (arc_c > arc_c_max)
3475			arc_c = arc_c_max;
3476		else if (state == arc_anon)
3477			atomic_add_64(&arc_p, (int64_t)bytes);
3478		if (arc_p > arc_c)
3479			arc_p = arc_c;
3480	}
3481	ASSERT((int64_t)arc_p >= 0);
3482}
3483
3484/*
3485 * Check if the cache has reached its limits and eviction is required
3486 * prior to insert.
3487 */
3488static int
3489arc_evict_needed(arc_buf_contents_t type)
3490{
3491	if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit)
3492		return (1);
3493
3494	if (arc_reclaim_needed())
3495		return (1);
3496
3497	return (arc_size > arc_c);
3498}
3499
3500/*
3501 * The buffer, supplied as the first argument, needs a data block.
3502 * So, if we are at cache max, determine which cache should be victimized.
3503 * We have the following cases:
3504 *
3505 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) ->
3506 * In this situation if we're out of space, but the resident size of the MFU is
3507 * under the limit, victimize the MFU cache to satisfy this insertion request.
3508 *
3509 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) ->
3510 * Here, we've used up all of the available space for the MRU, so we need to
3511 * evict from our own cache instead.  Evict from the set of resident MRU
3512 * entries.
3513 *
3514 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) ->
3515 * c minus p represents the MFU space in the cache, since p is the size of the
3516 * cache that is dedicated to the MRU.  In this situation there's still space on
3517 * the MFU side, so the MRU side needs to be victimized.
3518 *
3519 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) ->
3520 * MFU's resident set is consuming more space than it has been allotted.  In
3521 * this situation, we must victimize our own cache, the MFU, for this insertion.
3522 */
3523static void
3524arc_get_data_buf(arc_buf_t *buf)
3525{
3526	arc_state_t		*state = buf->b_hdr->b_l1hdr.b_state;
3527	uint64_t		size = buf->b_hdr->b_size;
3528	arc_buf_contents_t	type = arc_buf_type(buf->b_hdr);
3529
3530	arc_adapt(size, state);
3531
3532	/*
3533	 * We have not yet reached cache maximum size,
3534	 * just allocate a new buffer.
3535	 */
3536	if (!arc_evict_needed(type)) {
3537		if (type == ARC_BUFC_METADATA) {
3538			buf->b_data = zio_buf_alloc(size);
3539			arc_space_consume(size, ARC_SPACE_META);
3540		} else {
3541			ASSERT(type == ARC_BUFC_DATA);
3542			buf->b_data = zio_data_buf_alloc(size);
3543			arc_space_consume(size, ARC_SPACE_DATA);
3544		}
3545		goto out;
3546	}
3547
3548	/*
3549	 * If we are prefetching from the mfu ghost list, this buffer
3550	 * will end up on the mru list; so steal space from there.
3551	 */
3552	if (state == arc_mfu_ghost)
3553		state = HDR_PREFETCH(buf->b_hdr) ? arc_mru : arc_mfu;
3554	else if (state == arc_mru_ghost)
3555		state = arc_mru;
3556
3557	if (state == arc_mru || state == arc_anon) {
3558		uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size;
3559		state = (arc_mfu->arcs_lsize[type] >= size &&
3560		    arc_p > mru_used) ? arc_mfu : arc_mru;
3561	} else {
3562		/* MFU cases */
3563		uint64_t mfu_space = arc_c - arc_p;
3564		state =  (arc_mru->arcs_lsize[type] >= size &&
3565		    mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu;
3566	}
3567	if ((buf->b_data = arc_evict(state, 0, size, TRUE, type)) == NULL) {
3568		if (type == ARC_BUFC_METADATA) {
3569			buf->b_data = zio_buf_alloc(size);
3570			arc_space_consume(size, ARC_SPACE_META);
3571		} else {
3572			ASSERT(type == ARC_BUFC_DATA);
3573			buf->b_data = zio_data_buf_alloc(size);
3574			arc_space_consume(size, ARC_SPACE_DATA);
3575		}
3576		ARCSTAT_BUMP(arcstat_recycle_miss);
3577	}
3578	ASSERT(buf->b_data != NULL);
3579out:
3580	/*
3581	 * Update the state size.  Note that ghost states have a
3582	 * "ghost size" and so don't need to be updated.
3583	 */
3584	if (!GHOST_STATE(buf->b_hdr->b_l1hdr.b_state)) {
3585		arc_buf_hdr_t *hdr = buf->b_hdr;
3586
3587		atomic_add_64(&hdr->b_l1hdr.b_state->arcs_size, size);
3588		if (list_link_active(&hdr->b_l1hdr.b_arc_node)) {
3589			ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
3590			atomic_add_64(&hdr->b_l1hdr.b_state->arcs_lsize[type],
3591			    size);
3592		}
3593		/*
3594		 * If we are growing the cache, and we are adding anonymous
3595		 * data, and we have outgrown arc_p, update arc_p
3596		 */
3597		if (arc_size < arc_c && hdr->b_l1hdr.b_state == arc_anon &&
3598		    arc_anon->arcs_size + arc_mru->arcs_size > arc_p)
3599			arc_p = MIN(arc_c, arc_p + size);
3600	}
3601	ARCSTAT_BUMP(arcstat_allocated);
3602}
3603
3604/*
3605 * This routine is called whenever a buffer is accessed.
3606 * NOTE: the hash lock is dropped in this function.
3607 */
3608static void
3609arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
3610{
3611	clock_t now;
3612
3613	ASSERT(MUTEX_HELD(hash_lock));
3614	ASSERT(HDR_HAS_L1HDR(hdr));
3615
3616	if (hdr->b_l1hdr.b_state == arc_anon) {
3617		/*
3618		 * This buffer is not in the cache, and does not
3619		 * appear in our "ghost" list.  Add the new buffer
3620		 * to the MRU state.
3621		 */
3622
3623		ASSERT0(hdr->b_l1hdr.b_arc_access);
3624		hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
3625		DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
3626		arc_change_state(arc_mru, hdr, hash_lock);
3627
3628	} else if (hdr->b_l1hdr.b_state == arc_mru) {
3629		now = ddi_get_lbolt();
3630
3631		/*
3632		 * If this buffer is here because of a prefetch, then either:
3633		 * - clear the flag if this is a "referencing" read
3634		 *   (any subsequent access will bump this into the MFU state).
3635		 * or
3636		 * - move the buffer to the head of the list if this is
3637		 *   another prefetch (to make it less likely to be evicted).
3638		 */
3639		if (HDR_PREFETCH(hdr)) {
3640			if (refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
3641				ASSERT(list_link_active(
3642				    &hdr->b_l1hdr.b_arc_node));
3643			} else {
3644				hdr->b_flags &= ~ARC_FLAG_PREFETCH;
3645				ARCSTAT_BUMP(arcstat_mru_hits);
3646			}
3647			hdr->b_l1hdr.b_arc_access = now;
3648			return;
3649		}
3650
3651		/*
3652		 * This buffer has been "accessed" only once so far,
3653		 * but it is still in the cache. Move it to the MFU
3654		 * state.
3655		 */
3656		if (now > hdr->b_l1hdr.b_arc_access + ARC_MINTIME) {
3657			/*
3658			 * More than 125ms have passed since we
3659			 * instantiated this buffer.  Move it to the
3660			 * most frequently used state.
3661			 */
3662			hdr->b_l1hdr.b_arc_access = now;
3663			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
3664			arc_change_state(arc_mfu, hdr, hash_lock);
3665		}
3666		ARCSTAT_BUMP(arcstat_mru_hits);
3667	} else if (hdr->b_l1hdr.b_state == arc_mru_ghost) {
3668		arc_state_t	*new_state;
3669		/*
3670		 * This buffer has been "accessed" recently, but
3671		 * was evicted from the cache.  Move it to the
3672		 * MFU state.
3673		 */
3674
3675		if (HDR_PREFETCH(hdr)) {
3676			new_state = arc_mru;
3677			if (refcount_count(&hdr->b_l1hdr.b_refcnt) > 0)
3678				hdr->b_flags &= ~ARC_FLAG_PREFETCH;
3679			DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
3680		} else {
3681			new_state = arc_mfu;
3682			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
3683		}
3684
3685		hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
3686		arc_change_state(new_state, hdr, hash_lock);
3687
3688		ARCSTAT_BUMP(arcstat_mru_ghost_hits);
3689	} else if (hdr->b_l1hdr.b_state == arc_mfu) {
3690		/*
3691		 * This buffer has been accessed more than once and is
3692		 * still in the cache.  Keep it in the MFU state.
3693		 *
3694		 * NOTE: an add_reference() that occurred when we did
3695		 * the arc_read() will have kicked this off the list.
3696		 * If it was a prefetch, we will explicitly move it to
3697		 * the head of the list now.
3698		 */
3699		if ((HDR_PREFETCH(hdr)) != 0) {
3700			ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
3701			ASSERT(list_link_active(&hdr->b_l1hdr.b_arc_node));
3702		}
3703		ARCSTAT_BUMP(arcstat_mfu_hits);
3704		hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
3705	} else if (hdr->b_l1hdr.b_state == arc_mfu_ghost) {
3706		arc_state_t	*new_state = arc_mfu;
3707		/*
3708		 * This buffer has been accessed more than once but has
3709		 * been evicted from the cache.  Move it back to the
3710		 * MFU state.
3711		 */
3712
3713		if (HDR_PREFETCH(hdr)) {
3714			/*
3715			 * This is a prefetch access...
3716			 * move this block back to the MRU state.
3717			 */
3718			ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt));
3719			new_state = arc_mru;
3720		}
3721
3722		hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
3723		DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
3724		arc_change_state(new_state, hdr, hash_lock);
3725
3726		ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
3727	} else if (hdr->b_l1hdr.b_state == arc_l2c_only) {
3728		/*
3729		 * This buffer is on the 2nd Level ARC.
3730		 */
3731
3732		hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
3733		DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
3734		arc_change_state(arc_mfu, hdr, hash_lock);
3735	} else {
3736		ASSERT(!"invalid arc state");
3737	}
3738}
3739
3740/* a generic arc_done_func_t which you can use */
3741/* ARGSUSED */
3742void
3743arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
3744{
3745	if (zio == NULL || zio->io_error == 0)
3746		bcopy(buf->b_data, arg, buf->b_hdr->b_size);
3747	VERIFY(arc_buf_remove_ref(buf, arg));
3748}
3749
3750/* a generic arc_done_func_t */
3751void
3752arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
3753{
3754	arc_buf_t **bufp = arg;
3755	if (zio && zio->io_error) {
3756		VERIFY(arc_buf_remove_ref(buf, arg));
3757		*bufp = NULL;
3758	} else {
3759		*bufp = buf;
3760		ASSERT(buf->b_data);
3761	}
3762}
3763
3764static void
3765arc_read_done(zio_t *zio)
3766{
3767	arc_buf_hdr_t	*hdr;
3768	arc_buf_t	*buf;
3769	arc_buf_t	*abuf;	/* buffer we're assigning to callback */
3770	kmutex_t	*hash_lock = NULL;
3771	arc_callback_t	*callback_list, *acb;
3772	int		freeable = FALSE;
3773
3774	buf = zio->io_private;
3775	hdr = buf->b_hdr;
3776
3777	/*
3778	 * The hdr was inserted into hash-table and removed from lists
3779	 * prior to starting I/O.  We should find this header, since
3780	 * it's in the hash table, and it should be legit since it's
3781	 * not possible to evict it during the I/O.  The only possible
3782	 * reason for it not to be found is if we were freed during the
3783	 * read.
3784	 */
3785	if (HDR_IN_HASH_TABLE(hdr)) {
3786		ASSERT3U(hdr->b_birth, ==, BP_PHYSICAL_BIRTH(zio->io_bp));
3787		ASSERT3U(hdr->b_dva.dva_word[0], ==,
3788		    BP_IDENTITY(zio->io_bp)->dva_word[0]);
3789		ASSERT3U(hdr->b_dva.dva_word[1], ==,
3790		    BP_IDENTITY(zio->io_bp)->dva_word[1]);
3791
3792		arc_buf_hdr_t *found = buf_hash_find(hdr->b_spa, zio->io_bp,
3793		    &hash_lock);
3794
3795		ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) &&
3796		    hash_lock == NULL) ||
3797		    (found == hdr &&
3798		    DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
3799		    (found == hdr && HDR_L2_READING(hdr)));
3800	}
3801
3802	hdr->b_flags &= ~ARC_FLAG_L2_EVICTED;
3803	if (l2arc_noprefetch && HDR_PREFETCH(hdr))
3804		hdr->b_flags &= ~ARC_FLAG_L2CACHE;
3805
3806	/* byteswap if necessary */
3807	callback_list = hdr->b_l1hdr.b_acb;
3808	ASSERT(callback_list != NULL);
3809	if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) {
3810		dmu_object_byteswap_t bswap =
3811		    DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp));
3812		arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ?
3813		    byteswap_uint64_array :
3814		    dmu_ot_byteswap[bswap].ob_func;
3815		func(buf->b_data, hdr->b_size);
3816	}
3817
3818	arc_cksum_compute(buf, B_FALSE);
3819#ifdef illumos
3820	arc_buf_watch(buf);
3821#endif /* illumos */
3822
3823	if (hash_lock && zio->io_error == 0 &&
3824	    hdr->b_l1hdr.b_state == arc_anon) {
3825		/*
3826		 * Only call arc_access on anonymous buffers.  This is because
3827		 * if we've issued an I/O for an evicted buffer, we've already
3828		 * called arc_access (to prevent any simultaneous readers from
3829		 * getting confused).
3830		 */
3831		arc_access(hdr, hash_lock);
3832	}
3833
3834	/* create copies of the data buffer for the callers */
3835	abuf = buf;
3836	for (acb = callback_list; acb; acb = acb->acb_next) {
3837		if (acb->acb_done) {
3838			if (abuf == NULL) {
3839				ARCSTAT_BUMP(arcstat_duplicate_reads);
3840				abuf = arc_buf_clone(buf);
3841			}
3842			acb->acb_buf = abuf;
3843			abuf = NULL;
3844		}
3845	}
3846	hdr->b_l1hdr.b_acb = NULL;
3847	hdr->b_flags &= ~ARC_FLAG_IO_IN_PROGRESS;
3848	ASSERT(!HDR_BUF_AVAILABLE(hdr));
3849	if (abuf == buf) {
3850		ASSERT(buf->b_efunc == NULL);
3851		ASSERT(hdr->b_l1hdr.b_datacnt == 1);
3852		hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE;
3853	}
3854
3855	ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt) ||
3856	    callback_list != NULL);
3857
3858	if (zio->io_error != 0) {
3859		hdr->b_flags |= ARC_FLAG_IO_ERROR;
3860		if (hdr->b_l1hdr.b_state != arc_anon)
3861			arc_change_state(arc_anon, hdr, hash_lock);
3862		if (HDR_IN_HASH_TABLE(hdr))
3863			buf_hash_remove(hdr);
3864		freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
3865	}
3866
3867	/*
3868	 * Broadcast before we drop the hash_lock to avoid the possibility
3869	 * that the hdr (and hence the cv) might be freed before we get to
3870	 * the cv_broadcast().
3871	 */
3872	cv_broadcast(&hdr->b_l1hdr.b_cv);
3873
3874	if (hash_lock != NULL) {
3875		mutex_exit(hash_lock);
3876	} else {
3877		/*
3878		 * This block was freed while we waited for the read to
3879		 * complete.  It has been removed from the hash table and
3880		 * moved to the anonymous state (so that it won't show up
3881		 * in the cache).
3882		 */
3883		ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
3884		freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
3885	}
3886
3887	/* execute each callback and free its structure */
3888	while ((acb = callback_list) != NULL) {
3889		if (acb->acb_done)
3890			acb->acb_done(zio, acb->acb_buf, acb->acb_private);
3891
3892		if (acb->acb_zio_dummy != NULL) {
3893			acb->acb_zio_dummy->io_error = zio->io_error;
3894			zio_nowait(acb->acb_zio_dummy);
3895		}
3896
3897		callback_list = acb->acb_next;
3898		kmem_free(acb, sizeof (arc_callback_t));
3899	}
3900
3901	if (freeable)
3902		arc_hdr_destroy(hdr);
3903}
3904
3905/*
3906 * "Read" the block block at the specified DVA (in bp) via the
3907 * cache.  If the block is found in the cache, invoke the provided
3908 * callback immediately and return.  Note that the `zio' parameter
3909 * in the callback will be NULL in this case, since no IO was
3910 * required.  If the block is not in the cache pass the read request
3911 * on to the spa with a substitute callback function, so that the
3912 * requested block will be added to the cache.
3913 *
3914 * If a read request arrives for a block that has a read in-progress,
3915 * either wait for the in-progress read to complete (and return the
3916 * results); or, if this is a read with a "done" func, add a record
3917 * to the read to invoke the "done" func when the read completes,
3918 * and return; or just return.
3919 *
3920 * arc_read_done() will invoke all the requested "done" functions
3921 * for readers of this block.
3922 */
3923int
3924arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
3925    void *private, zio_priority_t priority, int zio_flags,
3926    arc_flags_t *arc_flags, const zbookmark_phys_t *zb)
3927{
3928	arc_buf_hdr_t *hdr = NULL;
3929	arc_buf_t *buf = NULL;
3930	kmutex_t *hash_lock = NULL;
3931	zio_t *rzio;
3932	uint64_t guid = spa_load_guid(spa);
3933
3934	ASSERT(!BP_IS_EMBEDDED(bp) ||
3935	    BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA);
3936
3937top:
3938	if (!BP_IS_EMBEDDED(bp)) {
3939		/*
3940		 * Embedded BP's have no DVA and require no I/O to "read".
3941		 * Create an anonymous arc buf to back it.
3942		 */
3943		hdr = buf_hash_find(guid, bp, &hash_lock);
3944	}
3945
3946	if (hdr != NULL && HDR_HAS_L1HDR(hdr) && hdr->b_l1hdr.b_datacnt > 0) {
3947
3948		*arc_flags |= ARC_FLAG_CACHED;
3949
3950		if (HDR_IO_IN_PROGRESS(hdr)) {
3951
3952			if (*arc_flags & ARC_FLAG_WAIT) {
3953				cv_wait(&hdr->b_l1hdr.b_cv, hash_lock);
3954				mutex_exit(hash_lock);
3955				goto top;
3956			}
3957			ASSERT(*arc_flags & ARC_FLAG_NOWAIT);
3958
3959			if (done) {
3960				arc_callback_t	*acb = NULL;
3961
3962				acb = kmem_zalloc(sizeof (arc_callback_t),
3963				    KM_SLEEP);
3964				acb->acb_done = done;
3965				acb->acb_private = private;
3966				if (pio != NULL)
3967					acb->acb_zio_dummy = zio_null(pio,
3968					    spa, NULL, NULL, NULL, zio_flags);
3969
3970				ASSERT(acb->acb_done != NULL);
3971				acb->acb_next = hdr->b_l1hdr.b_acb;
3972				hdr->b_l1hdr.b_acb = acb;
3973				add_reference(hdr, hash_lock, private);
3974				mutex_exit(hash_lock);
3975				return (0);
3976			}
3977			mutex_exit(hash_lock);
3978			return (0);
3979		}
3980
3981		ASSERT(hdr->b_l1hdr.b_state == arc_mru ||
3982		    hdr->b_l1hdr.b_state == arc_mfu);
3983
3984		if (done) {
3985			add_reference(hdr, hash_lock, private);
3986			/*
3987			 * If this block is already in use, create a new
3988			 * copy of the data so that we will be guaranteed
3989			 * that arc_release() will always succeed.
3990			 */
3991			buf = hdr->b_l1hdr.b_buf;
3992			ASSERT(buf);
3993			ASSERT(buf->b_data);
3994			if (HDR_BUF_AVAILABLE(hdr)) {
3995				ASSERT(buf->b_efunc == NULL);
3996				hdr->b_flags &= ~ARC_FLAG_BUF_AVAILABLE;
3997			} else {
3998				buf = arc_buf_clone(buf);
3999			}
4000
4001		} else if (*arc_flags & ARC_FLAG_PREFETCH &&
4002		    refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
4003			hdr->b_flags |= ARC_FLAG_PREFETCH;
4004		}
4005		DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
4006		arc_access(hdr, hash_lock);
4007		if (*arc_flags & ARC_FLAG_L2CACHE)
4008			hdr->b_flags |= ARC_FLAG_L2CACHE;
4009		if (*arc_flags & ARC_FLAG_L2COMPRESS)
4010			hdr->b_flags |= ARC_FLAG_L2COMPRESS;
4011		mutex_exit(hash_lock);
4012		ARCSTAT_BUMP(arcstat_hits);
4013		ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr),
4014		    demand, prefetch, !HDR_ISTYPE_METADATA(hdr),
4015		    data, metadata, hits);
4016
4017		if (done)
4018			done(NULL, buf, private);
4019	} else {
4020		uint64_t size = BP_GET_LSIZE(bp);
4021		arc_callback_t *acb;
4022		vdev_t *vd = NULL;
4023		uint64_t addr = 0;
4024		boolean_t devw = B_FALSE;
4025		enum zio_compress b_compress = ZIO_COMPRESS_OFF;
4026		int32_t b_asize = 0;
4027
4028		if (hdr == NULL) {
4029			/* this block is not in the cache */
4030			arc_buf_hdr_t *exists = NULL;
4031			arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
4032			buf = arc_buf_alloc(spa, size, private, type);
4033			hdr = buf->b_hdr;
4034			if (!BP_IS_EMBEDDED(bp)) {
4035				hdr->b_dva = *BP_IDENTITY(bp);
4036				hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
4037				exists = buf_hash_insert(hdr, &hash_lock);
4038			}
4039			if (exists != NULL) {
4040				/* somebody beat us to the hash insert */
4041				mutex_exit(hash_lock);
4042				buf_discard_identity(hdr);
4043				(void) arc_buf_remove_ref(buf, private);
4044				goto top; /* restart the IO request */
4045			}
4046
4047			/* if this is a prefetch, we don't have a reference */
4048			if (*arc_flags & ARC_FLAG_PREFETCH) {
4049				(void) remove_reference(hdr, hash_lock,
4050				    private);
4051				hdr->b_flags |= ARC_FLAG_PREFETCH;
4052			}
4053			if (*arc_flags & ARC_FLAG_L2CACHE)
4054				hdr->b_flags |= ARC_FLAG_L2CACHE;
4055			if (*arc_flags & ARC_FLAG_L2COMPRESS)
4056				hdr->b_flags |= ARC_FLAG_L2COMPRESS;
4057			if (BP_GET_LEVEL(bp) > 0)
4058				hdr->b_flags |= ARC_FLAG_INDIRECT;
4059		} else {
4060			/*
4061			 * This block is in the ghost cache. If it was L2-only
4062			 * (and thus didn't have an L1 hdr), we realloc the
4063			 * header to add an L1 hdr.
4064			 */
4065			if (!HDR_HAS_L1HDR(hdr)) {
4066				hdr = arc_hdr_realloc(hdr, hdr_l2only_cache,
4067				    hdr_full_cache);
4068			}
4069
4070			ASSERT(GHOST_STATE(hdr->b_l1hdr.b_state));
4071			ASSERT(!HDR_IO_IN_PROGRESS(hdr));
4072			ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
4073			ASSERT(hdr->b_l1hdr.b_buf == NULL);
4074
4075			/* if this is a prefetch, we don't have a reference */
4076			if (*arc_flags & ARC_FLAG_PREFETCH)
4077				hdr->b_flags |= ARC_FLAG_PREFETCH;
4078			else
4079				add_reference(hdr, hash_lock, private);
4080			if (*arc_flags & ARC_FLAG_L2CACHE)
4081				hdr->b_flags |= ARC_FLAG_L2CACHE;
4082			if (*arc_flags & ARC_FLAG_L2COMPRESS)
4083				hdr->b_flags |= ARC_FLAG_L2COMPRESS;
4084			buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
4085			buf->b_hdr = hdr;
4086			buf->b_data = NULL;
4087			buf->b_efunc = NULL;
4088			buf->b_private = NULL;
4089			buf->b_next = NULL;
4090			hdr->b_l1hdr.b_buf = buf;
4091			ASSERT0(hdr->b_l1hdr.b_datacnt);
4092			hdr->b_l1hdr.b_datacnt = 1;
4093			arc_get_data_buf(buf);
4094			arc_access(hdr, hash_lock);
4095		}
4096
4097		ASSERT(!GHOST_STATE(hdr->b_l1hdr.b_state));
4098
4099		acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
4100		acb->acb_done = done;
4101		acb->acb_private = private;
4102
4103		ASSERT(hdr->b_l1hdr.b_acb == NULL);
4104		hdr->b_l1hdr.b_acb = acb;
4105		hdr->b_flags |= ARC_FLAG_IO_IN_PROGRESS;
4106
4107		if (HDR_HAS_L2HDR(hdr) &&
4108		    (vd = hdr->b_l2hdr.b_dev->l2ad_vdev) != NULL) {
4109			devw = hdr->b_l2hdr.b_dev->l2ad_writing;
4110			addr = hdr->b_l2hdr.b_daddr;
4111			b_compress = HDR_GET_COMPRESS(hdr);
4112			b_asize = hdr->b_l2hdr.b_asize;
4113			/*
4114			 * Lock out device removal.
4115			 */
4116			if (vdev_is_dead(vd) ||
4117			    !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
4118				vd = NULL;
4119		}
4120
4121		if (hash_lock != NULL)
4122			mutex_exit(hash_lock);
4123
4124		/*
4125		 * At this point, we have a level 1 cache miss.  Try again in
4126		 * L2ARC if possible.
4127		 */
4128		ASSERT3U(hdr->b_size, ==, size);
4129		DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp,
4130		    uint64_t, size, zbookmark_phys_t *, zb);
4131		ARCSTAT_BUMP(arcstat_misses);
4132		ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr),
4133		    demand, prefetch, !HDR_ISTYPE_METADATA(hdr),
4134		    data, metadata, misses);
4135#ifdef _KERNEL
4136		curthread->td_ru.ru_inblock++;
4137#endif
4138
4139		if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) {
4140			/*
4141			 * Read from the L2ARC if the following are true:
4142			 * 1. The L2ARC vdev was previously cached.
4143			 * 2. This buffer still has L2ARC metadata.
4144			 * 3. This buffer isn't currently writing to the L2ARC.
4145			 * 4. The L2ARC entry wasn't evicted, which may
4146			 *    also have invalidated the vdev.
4147			 * 5. This isn't prefetch and l2arc_noprefetch is set.
4148			 */
4149			if (HDR_HAS_L2HDR(hdr) &&
4150			    !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) &&
4151			    !(l2arc_noprefetch && HDR_PREFETCH(hdr))) {
4152				l2arc_read_callback_t *cb;
4153
4154				DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
4155				ARCSTAT_BUMP(arcstat_l2_hits);
4156
4157				cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
4158				    KM_SLEEP);
4159				cb->l2rcb_buf = buf;
4160				cb->l2rcb_spa = spa;
4161				cb->l2rcb_bp = *bp;
4162				cb->l2rcb_zb = *zb;
4163				cb->l2rcb_flags = zio_flags;
4164				cb->l2rcb_compress = b_compress;
4165
4166				ASSERT(addr >= VDEV_LABEL_START_SIZE &&
4167				    addr + size < vd->vdev_psize -
4168				    VDEV_LABEL_END_SIZE);
4169
4170				/*
4171				 * l2arc read.  The SCL_L2ARC lock will be
4172				 * released by l2arc_read_done().
4173				 * Issue a null zio if the underlying buffer
4174				 * was squashed to zero size by compression.
4175				 */
4176				if (b_compress == ZIO_COMPRESS_EMPTY) {
4177					rzio = zio_null(pio, spa, vd,
4178					    l2arc_read_done, cb,
4179					    zio_flags | ZIO_FLAG_DONT_CACHE |
4180					    ZIO_FLAG_CANFAIL |
4181					    ZIO_FLAG_DONT_PROPAGATE |
4182					    ZIO_FLAG_DONT_RETRY);
4183				} else {
4184					rzio = zio_read_phys(pio, vd, addr,
4185					    b_asize, buf->b_data,
4186					    ZIO_CHECKSUM_OFF,
4187					    l2arc_read_done, cb, priority,
4188					    zio_flags | ZIO_FLAG_DONT_CACHE |
4189					    ZIO_FLAG_CANFAIL |
4190					    ZIO_FLAG_DONT_PROPAGATE |
4191					    ZIO_FLAG_DONT_RETRY, B_FALSE);
4192				}
4193				DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
4194				    zio_t *, rzio);
4195				ARCSTAT_INCR(arcstat_l2_read_bytes, b_asize);
4196
4197				if (*arc_flags & ARC_FLAG_NOWAIT) {
4198					zio_nowait(rzio);
4199					return (0);
4200				}
4201
4202				ASSERT(*arc_flags & ARC_FLAG_WAIT);
4203				if (zio_wait(rzio) == 0)
4204					return (0);
4205
4206				/* l2arc read error; goto zio_read() */
4207			} else {
4208				DTRACE_PROBE1(l2arc__miss,
4209				    arc_buf_hdr_t *, hdr);
4210				ARCSTAT_BUMP(arcstat_l2_misses);
4211				if (HDR_L2_WRITING(hdr))
4212					ARCSTAT_BUMP(arcstat_l2_rw_clash);
4213				spa_config_exit(spa, SCL_L2ARC, vd);
4214			}
4215		} else {
4216			if (vd != NULL)
4217				spa_config_exit(spa, SCL_L2ARC, vd);
4218			if (l2arc_ndev != 0) {
4219				DTRACE_PROBE1(l2arc__miss,
4220				    arc_buf_hdr_t *, hdr);
4221				ARCSTAT_BUMP(arcstat_l2_misses);
4222			}
4223		}
4224
4225		rzio = zio_read(pio, spa, bp, buf->b_data, size,
4226		    arc_read_done, buf, priority, zio_flags, zb);
4227
4228		if (*arc_flags & ARC_FLAG_WAIT)
4229			return (zio_wait(rzio));
4230
4231		ASSERT(*arc_flags & ARC_FLAG_NOWAIT);
4232		zio_nowait(rzio);
4233	}
4234	return (0);
4235}
4236
4237void
4238arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
4239{
4240	ASSERT(buf->b_hdr != NULL);
4241	ASSERT(buf->b_hdr->b_l1hdr.b_state != arc_anon);
4242	ASSERT(!refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt) ||
4243	    func == NULL);
4244	ASSERT(buf->b_efunc == NULL);
4245	ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr));
4246
4247	buf->b_efunc = func;
4248	buf->b_private = private;
4249}
4250
4251/*
4252 * Notify the arc that a block was freed, and thus will never be used again.
4253 */
4254void
4255arc_freed(spa_t *spa, const blkptr_t *bp)
4256{
4257	arc_buf_hdr_t *hdr;
4258	kmutex_t *hash_lock;
4259	uint64_t guid = spa_load_guid(spa);
4260
4261	ASSERT(!BP_IS_EMBEDDED(bp));
4262
4263	hdr = buf_hash_find(guid, bp, &hash_lock);
4264	if (hdr == NULL)
4265		return;
4266	if (HDR_BUF_AVAILABLE(hdr)) {
4267		arc_buf_t *buf = hdr->b_l1hdr.b_buf;
4268		add_reference(hdr, hash_lock, FTAG);
4269		hdr->b_flags &= ~ARC_FLAG_BUF_AVAILABLE;
4270		mutex_exit(hash_lock);
4271
4272		arc_release(buf, FTAG);
4273		(void) arc_buf_remove_ref(buf, FTAG);
4274	} else {
4275		mutex_exit(hash_lock);
4276	}
4277
4278}
4279
4280/*
4281 * Clear the user eviction callback set by arc_set_callback(), first calling
4282 * it if it exists.  Because the presence of a callback keeps an arc_buf cached
4283 * clearing the callback may result in the arc_buf being destroyed.  However,
4284 * it will not result in the *last* arc_buf being destroyed, hence the data
4285 * will remain cached in the ARC. We make a copy of the arc buffer here so
4286 * that we can process the callback without holding any locks.
4287 *
4288 * It's possible that the callback is already in the process of being cleared
4289 * by another thread.  In this case we can not clear the callback.
4290 *
4291 * Returns B_TRUE if the callback was successfully called and cleared.
4292 */
4293boolean_t
4294arc_clear_callback(arc_buf_t *buf)
4295{
4296	arc_buf_hdr_t *hdr;
4297	kmutex_t *hash_lock;
4298	arc_evict_func_t *efunc = buf->b_efunc;
4299	void *private = buf->b_private;
4300	list_t *list, *evicted_list;
4301	kmutex_t *lock, *evicted_lock;
4302
4303	mutex_enter(&buf->b_evict_lock);
4304	hdr = buf->b_hdr;
4305	if (hdr == NULL) {
4306		/*
4307		 * We are in arc_do_user_evicts().
4308		 */
4309		ASSERT(buf->b_data == NULL);
4310		mutex_exit(&buf->b_evict_lock);
4311		return (B_FALSE);
4312	} else if (buf->b_data == NULL) {
4313		/*
4314		 * We are on the eviction list; process this buffer now
4315		 * but let arc_do_user_evicts() do the reaping.
4316		 */
4317		buf->b_efunc = NULL;
4318		mutex_exit(&buf->b_evict_lock);
4319		VERIFY0(efunc(private));
4320		return (B_TRUE);
4321	}
4322	hash_lock = HDR_LOCK(hdr);
4323	mutex_enter(hash_lock);
4324	hdr = buf->b_hdr;
4325	ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
4326
4327	ASSERT3U(refcount_count(&hdr->b_l1hdr.b_refcnt), <,
4328	    hdr->b_l1hdr.b_datacnt);
4329	ASSERT(hdr->b_l1hdr.b_state == arc_mru ||
4330	    hdr->b_l1hdr.b_state == arc_mfu);
4331
4332	buf->b_efunc = NULL;
4333	buf->b_private = NULL;
4334
4335	if (hdr->b_l1hdr.b_datacnt > 1) {
4336		mutex_exit(&buf->b_evict_lock);
4337		arc_buf_destroy(buf, FALSE, TRUE);
4338	} else {
4339		ASSERT(buf == hdr->b_l1hdr.b_buf);
4340		hdr->b_flags |= ARC_FLAG_BUF_AVAILABLE;
4341		mutex_exit(&buf->b_evict_lock);
4342	}
4343
4344	mutex_exit(hash_lock);
4345	VERIFY0(efunc(private));
4346	return (B_TRUE);
4347}
4348
4349/*
4350 * Release this buffer from the cache, making it an anonymous buffer.  This
4351 * must be done after a read and prior to modifying the buffer contents.
4352 * If the buffer has more than one reference, we must make
4353 * a new hdr for the buffer.
4354 */
4355void
4356arc_release(arc_buf_t *buf, void *tag)
4357{
4358	arc_buf_hdr_t *hdr = buf->b_hdr;
4359
4360	/*
4361	 * It would be nice to assert that if it's DMU metadata (level >
4362	 * 0 || it's the dnode file), then it must be syncing context.
4363	 * But we don't know that information at this level.
4364	 */
4365
4366	mutex_enter(&buf->b_evict_lock);
4367	/*
4368	 * We don't grab the hash lock prior to this check, because if
4369	 * the buffer's header is in the arc_anon state, it won't be
4370	 * linked into the hash table.
4371	 */
4372	if (hdr->b_l1hdr.b_state == arc_anon) {
4373		mutex_exit(&buf->b_evict_lock);
4374		ASSERT(!HDR_IO_IN_PROGRESS(hdr));
4375		ASSERT(!HDR_IN_HASH_TABLE(hdr));
4376		ASSERT(!HDR_HAS_L2HDR(hdr));
4377		ASSERT(BUF_EMPTY(hdr));
4378		ASSERT3U(hdr->b_l1hdr.b_datacnt, ==, 1);
4379		ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1);
4380		ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node));
4381
4382		ASSERT3P(buf->b_efunc, ==, NULL);
4383		ASSERT3P(buf->b_private, ==, NULL);
4384
4385		hdr->b_l1hdr.b_arc_access = 0;
4386		arc_buf_thaw(buf);
4387
4388		return;
4389	}
4390
4391	kmutex_t *hash_lock = HDR_LOCK(hdr);
4392	mutex_enter(hash_lock);
4393
4394	/*
4395	 * This assignment is only valid as long as the hash_lock is
4396	 * held, we must be careful not to reference state or the
4397	 * b_state field after dropping the lock.
4398	 */
4399	arc_state_t *state = hdr->b_l1hdr.b_state;
4400	ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
4401	ASSERT3P(state, !=, arc_anon);
4402
4403	/* this buffer is not on any list */
4404	ASSERT(refcount_count(&hdr->b_l1hdr.b_refcnt) > 0);
4405
4406	if (HDR_HAS_L2HDR(hdr)) {
4407		mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx);
4408
4409		/*
4410		 * We have to recheck this conditional again now that
4411		 * we're holding the l2ad_mtx to prevent a race with
4412		 * another thread which might be concurrently calling
4413		 * l2arc_evict(). In that case, l2arc_evict() might have
4414		 * destroyed the header's L2 portion as we were waiting
4415		 * to acquire the l2ad_mtx.
4416		 */
4417		if (HDR_HAS_L2HDR(hdr)) {
4418			trim_map_free(hdr->b_l2hdr.b_dev->l2ad_vdev,
4419			    hdr->b_l2hdr.b_daddr, hdr->b_l2hdr.b_asize, 0);
4420			arc_hdr_l2hdr_destroy(hdr);
4421		}
4422
4423		mutex_exit(&hdr->b_l2hdr.b_dev->l2ad_mtx);
4424	}
4425
4426	/*
4427	 * Do we have more than one buf?
4428	 */
4429	if (hdr->b_l1hdr.b_datacnt > 1) {
4430		arc_buf_hdr_t *nhdr;
4431		arc_buf_t **bufp;
4432		uint64_t blksz = hdr->b_size;
4433		uint64_t spa = hdr->b_spa;
4434		arc_buf_contents_t type = arc_buf_type(hdr);
4435		uint32_t flags = hdr->b_flags;
4436
4437		ASSERT(hdr->b_l1hdr.b_buf != buf || buf->b_next != NULL);
4438		/*
4439		 * Pull the data off of this hdr and attach it to
4440		 * a new anonymous hdr.
4441		 */
4442		(void) remove_reference(hdr, hash_lock, tag);
4443		bufp = &hdr->b_l1hdr.b_buf;
4444		while (*bufp != buf)
4445			bufp = &(*bufp)->b_next;
4446		*bufp = buf->b_next;
4447		buf->b_next = NULL;
4448
4449		ASSERT3P(state, !=, arc_l2c_only);
4450		ASSERT3U(state->arcs_size, >=, hdr->b_size);
4451		atomic_add_64(&state->arcs_size, -hdr->b_size);
4452		if (refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
4453			ASSERT3P(state, !=, arc_l2c_only);
4454			uint64_t *size = &state->arcs_lsize[type];
4455			ASSERT3U(*size, >=, hdr->b_size);
4456			atomic_add_64(size, -hdr->b_size);
4457		}
4458
4459		/*
4460		 * We're releasing a duplicate user data buffer, update
4461		 * our statistics accordingly.
4462		 */
4463		if (HDR_ISTYPE_DATA(hdr)) {
4464			ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
4465			ARCSTAT_INCR(arcstat_duplicate_buffers_size,
4466			    -hdr->b_size);
4467		}
4468		hdr->b_l1hdr.b_datacnt -= 1;
4469		arc_cksum_verify(buf);
4470#ifdef illumos
4471		arc_buf_unwatch(buf);
4472#endif /* illumos */
4473
4474		mutex_exit(hash_lock);
4475
4476		nhdr = kmem_cache_alloc(hdr_full_cache, KM_PUSHPAGE);
4477		nhdr->b_size = blksz;
4478		nhdr->b_spa = spa;
4479
4480		nhdr->b_flags = flags & ARC_FLAG_L2_WRITING;
4481		nhdr->b_flags |= arc_bufc_to_flags(type);
4482		nhdr->b_flags |= ARC_FLAG_HAS_L1HDR;
4483
4484		nhdr->b_l1hdr.b_buf = buf;
4485		nhdr->b_l1hdr.b_datacnt = 1;
4486		nhdr->b_l1hdr.b_state = arc_anon;
4487		nhdr->b_l1hdr.b_arc_access = 0;
4488		nhdr->b_freeze_cksum = NULL;
4489
4490		(void) refcount_add(&nhdr->b_l1hdr.b_refcnt, tag);
4491		buf->b_hdr = nhdr;
4492		mutex_exit(&buf->b_evict_lock);
4493		atomic_add_64(&arc_anon->arcs_size, blksz);
4494	} else {
4495		mutex_exit(&buf->b_evict_lock);
4496		ASSERT(refcount_count(&hdr->b_l1hdr.b_refcnt) == 1);
4497		/* protected by hash lock */
4498		ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node));
4499		ASSERT(!HDR_IO_IN_PROGRESS(hdr));
4500		arc_change_state(arc_anon, hdr, hash_lock);
4501		hdr->b_l1hdr.b_arc_access = 0;
4502		mutex_exit(hash_lock);
4503
4504		buf_discard_identity(hdr);
4505		arc_buf_thaw(buf);
4506	}
4507	buf->b_efunc = NULL;
4508	buf->b_private = NULL;
4509}
4510
4511int
4512arc_released(arc_buf_t *buf)
4513{
4514	int released;
4515
4516	mutex_enter(&buf->b_evict_lock);
4517	released = (buf->b_data != NULL &&
4518	    buf->b_hdr->b_l1hdr.b_state == arc_anon);
4519	mutex_exit(&buf->b_evict_lock);
4520	return (released);
4521}
4522
4523#ifdef ZFS_DEBUG
4524int
4525arc_referenced(arc_buf_t *buf)
4526{
4527	int referenced;
4528
4529	mutex_enter(&buf->b_evict_lock);
4530	referenced = (refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt));
4531	mutex_exit(&buf->b_evict_lock);
4532	return (referenced);
4533}
4534#endif
4535
4536static void
4537arc_write_ready(zio_t *zio)
4538{
4539	arc_write_callback_t *callback = zio->io_private;
4540	arc_buf_t *buf = callback->awcb_buf;
4541	arc_buf_hdr_t *hdr = buf->b_hdr;
4542
4543	ASSERT(HDR_HAS_L1HDR(hdr));
4544	ASSERT(!refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt));
4545	ASSERT(hdr->b_l1hdr.b_datacnt > 0);
4546	callback->awcb_ready(zio, buf, callback->awcb_private);
4547
4548	/*
4549	 * If the IO is already in progress, then this is a re-write
4550	 * attempt, so we need to thaw and re-compute the cksum.
4551	 * It is the responsibility of the callback to handle the
4552	 * accounting for any re-write attempt.
4553	 */
4554	if (HDR_IO_IN_PROGRESS(hdr)) {
4555		mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
4556		if (hdr->b_freeze_cksum != NULL) {
4557			kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
4558			hdr->b_freeze_cksum = NULL;
4559		}
4560		mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
4561	}
4562	arc_cksum_compute(buf, B_FALSE);
4563	hdr->b_flags |= ARC_FLAG_IO_IN_PROGRESS;
4564}
4565
4566/*
4567 * The SPA calls this callback for each physical write that happens on behalf
4568 * of a logical write.  See the comment in dbuf_write_physdone() for details.
4569 */
4570static void
4571arc_write_physdone(zio_t *zio)
4572{
4573	arc_write_callback_t *cb = zio->io_private;
4574	if (cb->awcb_physdone != NULL)
4575		cb->awcb_physdone(zio, cb->awcb_buf, cb->awcb_private);
4576}
4577
4578static void
4579arc_write_done(zio_t *zio)
4580{
4581	arc_write_callback_t *callback = zio->io_private;
4582	arc_buf_t *buf = callback->awcb_buf;
4583	arc_buf_hdr_t *hdr = buf->b_hdr;
4584
4585	ASSERT(hdr->b_l1hdr.b_acb == NULL);
4586
4587	if (zio->io_error == 0) {
4588		if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) {
4589			buf_discard_identity(hdr);
4590		} else {
4591			hdr->b_dva = *BP_IDENTITY(zio->io_bp);
4592			hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp);
4593		}
4594	} else {
4595		ASSERT(BUF_EMPTY(hdr));
4596	}
4597
4598	/*
4599	 * If the block to be written was all-zero or compressed enough to be
4600	 * embedded in the BP, no write was performed so there will be no
4601	 * dva/birth/checksum.  The buffer must therefore remain anonymous
4602	 * (and uncached).
4603	 */
4604	if (!BUF_EMPTY(hdr)) {
4605		arc_buf_hdr_t *exists;
4606		kmutex_t *hash_lock;
4607
4608		ASSERT(zio->io_error == 0);
4609
4610		arc_cksum_verify(buf);
4611
4612		exists = buf_hash_insert(hdr, &hash_lock);
4613		if (exists != NULL) {
4614			/*
4615			 * This can only happen if we overwrite for
4616			 * sync-to-convergence, because we remove
4617			 * buffers from the hash table when we arc_free().
4618			 */
4619			if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
4620				if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
4621					panic("bad overwrite, hdr=%p exists=%p",
4622					    (void *)hdr, (void *)exists);
4623				ASSERT(refcount_is_zero(
4624				    &exists->b_l1hdr.b_refcnt));
4625				arc_change_state(arc_anon, exists, hash_lock);
4626				mutex_exit(hash_lock);
4627				arc_hdr_destroy(exists);
4628				exists = buf_hash_insert(hdr, &hash_lock);
4629				ASSERT3P(exists, ==, NULL);
4630			} else if (zio->io_flags & ZIO_FLAG_NOPWRITE) {
4631				/* nopwrite */
4632				ASSERT(zio->io_prop.zp_nopwrite);
4633				if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
4634					panic("bad nopwrite, hdr=%p exists=%p",
4635					    (void *)hdr, (void *)exists);
4636			} else {
4637				/* Dedup */
4638				ASSERT(hdr->b_l1hdr.b_datacnt == 1);
4639				ASSERT(hdr->b_l1hdr.b_state == arc_anon);
4640				ASSERT(BP_GET_DEDUP(zio->io_bp));
4641				ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
4642			}
4643		}
4644		hdr->b_flags &= ~ARC_FLAG_IO_IN_PROGRESS;
4645		/* if it's not anon, we are doing a scrub */
4646		if (exists == NULL && hdr->b_l1hdr.b_state == arc_anon)
4647			arc_access(hdr, hash_lock);
4648		mutex_exit(hash_lock);
4649	} else {
4650		hdr->b_flags &= ~ARC_FLAG_IO_IN_PROGRESS;
4651	}
4652
4653	ASSERT(!refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
4654	callback->awcb_done(zio, buf, callback->awcb_private);
4655
4656	kmem_free(callback, sizeof (arc_write_callback_t));
4657}
4658
4659zio_t *
4660arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
4661    blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, boolean_t l2arc_compress,
4662    const zio_prop_t *zp, arc_done_func_t *ready, arc_done_func_t *physdone,
4663    arc_done_func_t *done, void *private, zio_priority_t priority,
4664    int zio_flags, const zbookmark_phys_t *zb)
4665{
4666	arc_buf_hdr_t *hdr = buf->b_hdr;
4667	arc_write_callback_t *callback;
4668	zio_t *zio;
4669
4670	ASSERT(ready != NULL);
4671	ASSERT(done != NULL);
4672	ASSERT(!HDR_IO_ERROR(hdr));
4673	ASSERT(!HDR_IO_IN_PROGRESS(hdr));
4674	ASSERT(hdr->b_l1hdr.b_acb == NULL);
4675	ASSERT(hdr->b_l1hdr.b_datacnt > 0);
4676	if (l2arc)
4677		hdr->b_flags |= ARC_FLAG_L2CACHE;
4678	if (l2arc_compress)
4679		hdr->b_flags |= ARC_FLAG_L2COMPRESS;
4680	callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
4681	callback->awcb_ready = ready;
4682	callback->awcb_physdone = physdone;
4683	callback->awcb_done = done;
4684	callback->awcb_private = private;
4685	callback->awcb_buf = buf;
4686
4687	zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp,
4688	    arc_write_ready, arc_write_physdone, arc_write_done, callback,
4689	    priority, zio_flags, zb);
4690
4691	return (zio);
4692}
4693
4694static int
4695arc_memory_throttle(uint64_t reserve, uint64_t txg)
4696{
4697#ifdef _KERNEL
4698	uint64_t available_memory = ptob(freemem);
4699	static uint64_t page_load = 0;
4700	static uint64_t last_txg = 0;
4701
4702#if defined(__i386) || !defined(UMA_MD_SMALL_ALLOC)
4703	available_memory =
4704	    MIN(available_memory, ptob(vmem_size(heap_arena, VMEM_FREE)));
4705#endif
4706
4707	if (freemem > (uint64_t)physmem * arc_lotsfree_percent / 100)
4708		return (0);
4709
4710	if (txg > last_txg) {
4711		last_txg = txg;
4712		page_load = 0;
4713	}
4714	/*
4715	 * If we are in pageout, we know that memory is already tight,
4716	 * the arc is already going to be evicting, so we just want to
4717	 * continue to let page writes occur as quickly as possible.
4718	 */
4719	if (curproc == pageproc) {
4720		if (page_load > MAX(ptob(minfree), available_memory) / 4)
4721			return (SET_ERROR(ERESTART));
4722		/* Note: reserve is inflated, so we deflate */
4723		page_load += reserve / 8;
4724		return (0);
4725	} else if (page_load > 0 && arc_reclaim_needed()) {
4726		/* memory is low, delay before restarting */
4727		ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
4728		return (SET_ERROR(EAGAIN));
4729	}
4730	page_load = 0;
4731#endif
4732	return (0);
4733}
4734
4735void
4736arc_tempreserve_clear(uint64_t reserve)
4737{
4738	atomic_add_64(&arc_tempreserve, -reserve);
4739	ASSERT((int64_t)arc_tempreserve >= 0);
4740}
4741
4742int
4743arc_tempreserve_space(uint64_t reserve, uint64_t txg)
4744{
4745	int error;
4746	uint64_t anon_size;
4747
4748	if (reserve > arc_c/4 && !arc_no_grow) {
4749		arc_c = MIN(arc_c_max, reserve * 4);
4750		DTRACE_PROBE1(arc__set_reserve, uint64_t, arc_c);
4751	}
4752	if (reserve > arc_c)
4753		return (SET_ERROR(ENOMEM));
4754
4755	/*
4756	 * Don't count loaned bufs as in flight dirty data to prevent long
4757	 * network delays from blocking transactions that are ready to be
4758	 * assigned to a txg.
4759	 */
4760	anon_size = MAX((int64_t)(arc_anon->arcs_size - arc_loaned_bytes), 0);
4761
4762	/*
4763	 * Writes will, almost always, require additional memory allocations
4764	 * in order to compress/encrypt/etc the data.  We therefore need to
4765	 * make sure that there is sufficient available memory for this.
4766	 */
4767	error = arc_memory_throttle(reserve, txg);
4768	if (error != 0)
4769		return (error);
4770
4771	/*
4772	 * Throttle writes when the amount of dirty data in the cache
4773	 * gets too large.  We try to keep the cache less than half full
4774	 * of dirty blocks so that our sync times don't grow too large.
4775	 * Note: if two requests come in concurrently, we might let them
4776	 * both succeed, when one of them should fail.  Not a huge deal.
4777	 */
4778
4779	if (reserve + arc_tempreserve + anon_size > arc_c / 2 &&
4780	    anon_size > arc_c / 4) {
4781		dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
4782		    "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
4783		    arc_tempreserve>>10,
4784		    arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10,
4785		    arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10,
4786		    reserve>>10, arc_c>>10);
4787		return (SET_ERROR(ERESTART));
4788	}
4789	atomic_add_64(&arc_tempreserve, reserve);
4790	return (0);
4791}
4792
4793static void
4794arc_kstat_update_state(arc_state_t *state, kstat_named_t *size,
4795    kstat_named_t *evict_data, kstat_named_t *evict_metadata)
4796{
4797	size->value.ui64 = state->arcs_size;
4798	evict_data->value.ui64 = state->arcs_lsize[ARC_BUFC_DATA];
4799	evict_metadata->value.ui64 = state->arcs_lsize[ARC_BUFC_METADATA];
4800}
4801
4802static int
4803arc_kstat_update(kstat_t *ksp, int rw)
4804{
4805	arc_stats_t *as = ksp->ks_data;
4806
4807	if (rw == KSTAT_WRITE) {
4808		return (EACCES);
4809	} else {
4810		arc_kstat_update_state(arc_anon,
4811		    &as->arcstat_anon_size,
4812		    &as->arcstat_anon_evictable_data,
4813		    &as->arcstat_anon_evictable_metadata);
4814		arc_kstat_update_state(arc_mru,
4815		    &as->arcstat_mru_size,
4816		    &as->arcstat_mru_evictable_data,
4817		    &as->arcstat_mru_evictable_metadata);
4818		arc_kstat_update_state(arc_mru_ghost,
4819		    &as->arcstat_mru_ghost_size,
4820		    &as->arcstat_mru_ghost_evictable_data,
4821		    &as->arcstat_mru_ghost_evictable_metadata);
4822		arc_kstat_update_state(arc_mfu,
4823		    &as->arcstat_mfu_size,
4824		    &as->arcstat_mfu_evictable_data,
4825		    &as->arcstat_mfu_evictable_metadata);
4826		arc_kstat_update_state(arc_mfu_ghost,
4827		    &as->arcstat_mfu_ghost_size,
4828		    &as->arcstat_mfu_ghost_evictable_data,
4829		    &as->arcstat_mfu_ghost_evictable_metadata);
4830	}
4831
4832	return (0);
4833}
4834
4835#ifdef _KERNEL
4836static eventhandler_tag arc_event_lowmem = NULL;
4837
4838static void
4839arc_lowmem(void *arg __unused, int howto __unused)
4840{
4841
4842	mutex_enter(&arc_reclaim_thr_lock);
4843	/* XXX: Memory deficit should be passed as argument. */
4844	needfree = btoc(arc_c >> arc_shrink_shift);
4845	DTRACE_PROBE(arc__needfree);
4846	cv_signal(&arc_reclaim_thr_cv);
4847
4848	/*
4849	 * It is unsafe to block here in arbitrary threads, because we can come
4850	 * here from ARC itself and may hold ARC locks and thus risk a deadlock
4851	 * with ARC reclaim thread.
4852	 */
4853	if (curproc == pageproc)
4854		msleep(&needfree, &arc_reclaim_thr_lock, 0, "zfs:lowmem", 0);
4855	mutex_exit(&arc_reclaim_thr_lock);
4856}
4857#endif
4858
4859void
4860arc_init(void)
4861{
4862	int i, prefetch_tunable_set = 0;
4863
4864	mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
4865	cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
4866
4867	/* Convert seconds to clock ticks */
4868	arc_min_prefetch_lifespan = 1 * hz;
4869
4870	/* Start out with 1/8 of all memory */
4871	arc_c = kmem_size() / 8;
4872
4873#ifdef sun
4874#ifdef _KERNEL
4875	/*
4876	 * On architectures where the physical memory can be larger
4877	 * than the addressable space (intel in 32-bit mode), we may
4878	 * need to limit the cache to 1/8 of VM size.
4879	 */
4880	arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
4881#endif
4882#endif	/* sun */
4883	/* set min cache to 1/32 of all memory, or 16MB, whichever is more */
4884	arc_c_min = MAX(arc_c / 4, 16 << 20);
4885	/* set max to 1/2 of all memory, or all but 1GB, whichever is more */
4886	if (arc_c * 8 >= 1 << 30)
4887		arc_c_max = (arc_c * 8) - (1 << 30);
4888	else
4889		arc_c_max = arc_c_min;
4890	arc_c_max = MAX(arc_c * 5, arc_c_max);
4891
4892#ifdef _KERNEL
4893	/*
4894	 * Allow the tunables to override our calculations if they are
4895	 * reasonable (ie. over 16MB)
4896	 */
4897	if (zfs_arc_max > 16 << 20 && zfs_arc_max < kmem_size())
4898		arc_c_max = zfs_arc_max;
4899	if (zfs_arc_min > 16 << 20 && zfs_arc_min <= arc_c_max)
4900		arc_c_min = zfs_arc_min;
4901#endif
4902
4903	arc_c = arc_c_max;
4904	arc_p = (arc_c >> 1);
4905
4906	/* limit meta-data to 1/4 of the arc capacity */
4907	arc_meta_limit = arc_c_max / 4;
4908
4909	/* Allow the tunable to override if it is reasonable */
4910	if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max)
4911		arc_meta_limit = zfs_arc_meta_limit;
4912
4913	if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0)
4914		arc_c_min = arc_meta_limit / 2;
4915
4916	if (zfs_arc_meta_min > 0) {
4917		arc_meta_min = zfs_arc_meta_min;
4918	} else {
4919		arc_meta_min = arc_c_min / 2;
4920	}
4921
4922	if (zfs_arc_grow_retry > 0)
4923		arc_grow_retry = zfs_arc_grow_retry;
4924
4925	if (zfs_arc_shrink_shift > 0)
4926		arc_shrink_shift = zfs_arc_shrink_shift;
4927
4928	/*
4929	 * Ensure that arc_no_grow_shift is less than arc_shrink_shift.
4930	 */
4931	if (arc_no_grow_shift >= arc_shrink_shift)
4932		arc_no_grow_shift = arc_shrink_shift - 1;
4933
4934	if (zfs_arc_p_min_shift > 0)
4935		arc_p_min_shift = zfs_arc_p_min_shift;
4936
4937	/* if kmem_flags are set, lets try to use less memory */
4938	if (kmem_debugging())
4939		arc_c = arc_c / 2;
4940	if (arc_c < arc_c_min)
4941		arc_c = arc_c_min;
4942
4943	zfs_arc_min = arc_c_min;
4944	zfs_arc_max = arc_c_max;
4945
4946	arc_anon = &ARC_anon;
4947	arc_mru = &ARC_mru;
4948	arc_mru_ghost = &ARC_mru_ghost;
4949	arc_mfu = &ARC_mfu;
4950	arc_mfu_ghost = &ARC_mfu_ghost;
4951	arc_l2c_only = &ARC_l2c_only;
4952	arc_size = 0;
4953
4954	for (i = 0; i < ARC_BUFC_NUMLISTS; i++) {
4955		mutex_init(&arc_anon->arcs_locks[i].arcs_lock,
4956		    NULL, MUTEX_DEFAULT, NULL);
4957		mutex_init(&arc_mru->arcs_locks[i].arcs_lock,
4958		    NULL, MUTEX_DEFAULT, NULL);
4959		mutex_init(&arc_mru_ghost->arcs_locks[i].arcs_lock,
4960		    NULL, MUTEX_DEFAULT, NULL);
4961		mutex_init(&arc_mfu->arcs_locks[i].arcs_lock,
4962		    NULL, MUTEX_DEFAULT, NULL);
4963		mutex_init(&arc_mfu_ghost->arcs_locks[i].arcs_lock,
4964		    NULL, MUTEX_DEFAULT, NULL);
4965		mutex_init(&arc_l2c_only->arcs_locks[i].arcs_lock,
4966		    NULL, MUTEX_DEFAULT, NULL);
4967
4968		list_create(&arc_mru->arcs_lists[i],
4969		    sizeof (arc_buf_hdr_t),
4970		    offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node));
4971		list_create(&arc_mru_ghost->arcs_lists[i],
4972		    sizeof (arc_buf_hdr_t),
4973		    offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node));
4974		list_create(&arc_mfu->arcs_lists[i],
4975		    sizeof (arc_buf_hdr_t),
4976		    offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node));
4977		list_create(&arc_mfu_ghost->arcs_lists[i],
4978		    sizeof (arc_buf_hdr_t),
4979		    offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node));
4980		list_create(&arc_mfu_ghost->arcs_lists[i],
4981		    sizeof (arc_buf_hdr_t),
4982		    offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node));
4983		list_create(&arc_l2c_only->arcs_lists[i],
4984		    sizeof (arc_buf_hdr_t),
4985		    offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node));
4986	}
4987
4988	buf_init();
4989
4990	arc_thread_exit = 0;
4991	arc_eviction_list = NULL;
4992	mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
4993	bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t));
4994
4995	arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
4996	    sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
4997
4998	if (arc_ksp != NULL) {
4999		arc_ksp->ks_data = &arc_stats;
5000		arc_ksp->ks_update = arc_kstat_update;
5001		kstat_install(arc_ksp);
5002	}
5003
5004	(void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
5005	    TS_RUN, minclsyspri);
5006
5007#ifdef _KERNEL
5008	arc_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem, arc_lowmem, NULL,
5009	    EVENTHANDLER_PRI_FIRST);
5010#endif
5011
5012	arc_dead = FALSE;
5013	arc_warm = B_FALSE;
5014
5015	/*
5016	 * Calculate maximum amount of dirty data per pool.
5017	 *
5018	 * If it has been set by /etc/system, take that.
5019	 * Otherwise, use a percentage of physical memory defined by
5020	 * zfs_dirty_data_max_percent (default 10%) with a cap at
5021	 * zfs_dirty_data_max_max (default 4GB).
5022	 */
5023	if (zfs_dirty_data_max == 0) {
5024		zfs_dirty_data_max = ptob(physmem) *
5025		    zfs_dirty_data_max_percent / 100;
5026		zfs_dirty_data_max = MIN(zfs_dirty_data_max,
5027		    zfs_dirty_data_max_max);
5028	}
5029
5030#ifdef _KERNEL
5031	if (TUNABLE_INT_FETCH("vfs.zfs.prefetch_disable", &zfs_prefetch_disable))
5032		prefetch_tunable_set = 1;
5033
5034#ifdef __i386__
5035	if (prefetch_tunable_set == 0) {
5036		printf("ZFS NOTICE: Prefetch is disabled by default on i386 "
5037		    "-- to enable,\n");
5038		printf("            add \"vfs.zfs.prefetch_disable=0\" "
5039		    "to /boot/loader.conf.\n");
5040		zfs_prefetch_disable = 1;
5041	}
5042#else
5043	if ((((uint64_t)physmem * PAGESIZE) < (1ULL << 32)) &&
5044	    prefetch_tunable_set == 0) {
5045		printf("ZFS NOTICE: Prefetch is disabled by default if less "
5046		    "than 4GB of RAM is present;\n"
5047		    "            to enable, add \"vfs.zfs.prefetch_disable=0\" "
5048		    "to /boot/loader.conf.\n");
5049		zfs_prefetch_disable = 1;
5050	}
5051#endif
5052	/* Warn about ZFS memory and address space requirements. */
5053	if (((uint64_t)physmem * PAGESIZE) < (256 + 128 + 64) * (1 << 20)) {
5054		printf("ZFS WARNING: Recommended minimum RAM size is 512MB; "
5055		    "expect unstable behavior.\n");
5056	}
5057	if (kmem_size() < 512 * (1 << 20)) {
5058		printf("ZFS WARNING: Recommended minimum kmem_size is 512MB; "
5059		    "expect unstable behavior.\n");
5060		printf("             Consider tuning vm.kmem_size and "
5061		    "vm.kmem_size_max\n");
5062		printf("             in /boot/loader.conf.\n");
5063	}
5064#endif
5065}
5066
5067void
5068arc_fini(void)
5069{
5070	int i;
5071
5072	mutex_enter(&arc_reclaim_thr_lock);
5073	arc_thread_exit = 1;
5074	cv_signal(&arc_reclaim_thr_cv);
5075	while (arc_thread_exit != 0)
5076		cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
5077	mutex_exit(&arc_reclaim_thr_lock);
5078
5079	arc_flush(NULL);
5080
5081	arc_dead = TRUE;
5082
5083	if (arc_ksp != NULL) {
5084		kstat_delete(arc_ksp);
5085		arc_ksp = NULL;
5086	}
5087
5088	mutex_destroy(&arc_eviction_mtx);
5089	mutex_destroy(&arc_reclaim_thr_lock);
5090	cv_destroy(&arc_reclaim_thr_cv);
5091
5092	for (i = 0; i < ARC_BUFC_NUMLISTS; i++) {
5093		list_destroy(&arc_mru->arcs_lists[i]);
5094		list_destroy(&arc_mru_ghost->arcs_lists[i]);
5095		list_destroy(&arc_mfu->arcs_lists[i]);
5096		list_destroy(&arc_mfu_ghost->arcs_lists[i]);
5097		list_destroy(&arc_l2c_only->arcs_lists[i]);
5098
5099		mutex_destroy(&arc_anon->arcs_locks[i].arcs_lock);
5100		mutex_destroy(&arc_mru->arcs_locks[i].arcs_lock);
5101		mutex_destroy(&arc_mru_ghost->arcs_locks[i].arcs_lock);
5102		mutex_destroy(&arc_mfu->arcs_locks[i].arcs_lock);
5103		mutex_destroy(&arc_mfu_ghost->arcs_locks[i].arcs_lock);
5104		mutex_destroy(&arc_l2c_only->arcs_locks[i].arcs_lock);
5105	}
5106
5107	buf_fini();
5108
5109	ASSERT0(arc_loaned_bytes);
5110
5111#ifdef _KERNEL
5112	if (arc_event_lowmem != NULL)
5113		EVENTHANDLER_DEREGISTER(vm_lowmem, arc_event_lowmem);
5114#endif
5115}
5116
5117/*
5118 * Level 2 ARC
5119 *
5120 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
5121 * It uses dedicated storage devices to hold cached data, which are populated
5122 * using large infrequent writes.  The main role of this cache is to boost
5123 * the performance of random read workloads.  The intended L2ARC devices
5124 * include short-stroked disks, solid state disks, and other media with
5125 * substantially faster read latency than disk.
5126 *
5127 *                 +-----------------------+
5128 *                 |         ARC           |
5129 *                 +-----------------------+
5130 *                    |         ^     ^
5131 *                    |         |     |
5132 *      l2arc_feed_thread()    arc_read()
5133 *                    |         |     |
5134 *                    |  l2arc read   |
5135 *                    V         |     |
5136 *               +---------------+    |
5137 *               |     L2ARC     |    |
5138 *               +---------------+    |
5139 *                   |    ^           |
5140 *          l2arc_write() |           |
5141 *                   |    |           |
5142 *                   V    |           |
5143 *                 +-------+      +-------+
5144 *                 | vdev  |      | vdev  |
5145 *                 | cache |      | cache |
5146 *                 +-------+      +-------+
5147 *                 +=========+     .-----.
5148 *                 :  L2ARC  :    |-_____-|
5149 *                 : devices :    | Disks |
5150 *                 +=========+    `-_____-'
5151 *
5152 * Read requests are satisfied from the following sources, in order:
5153 *
5154 *	1) ARC
5155 *	2) vdev cache of L2ARC devices
5156 *	3) L2ARC devices
5157 *	4) vdev cache of disks
5158 *	5) disks
5159 *
5160 * Some L2ARC device types exhibit extremely slow write performance.
5161 * To accommodate for this there are some significant differences between
5162 * the L2ARC and traditional cache design:
5163 *
5164 * 1. There is no eviction path from the ARC to the L2ARC.  Evictions from
5165 * the ARC behave as usual, freeing buffers and placing headers on ghost
5166 * lists.  The ARC does not send buffers to the L2ARC during eviction as
5167 * this would add inflated write latencies for all ARC memory pressure.
5168 *
5169 * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
5170 * It does this by periodically scanning buffers from the eviction-end of
5171 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
5172 * not already there. It scans until a headroom of buffers is satisfied,
5173 * which itself is a buffer for ARC eviction. If a compressible buffer is
5174 * found during scanning and selected for writing to an L2ARC device, we
5175 * temporarily boost scanning headroom during the next scan cycle to make
5176 * sure we adapt to compression effects (which might significantly reduce
5177 * the data volume we write to L2ARC). The thread that does this is
5178 * l2arc_feed_thread(), illustrated below; example sizes are included to
5179 * provide a better sense of ratio than this diagram:
5180 *
5181 *	       head -->                        tail
5182 *	        +---------------------+----------+
5183 *	ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->.   # already on L2ARC
5184 *	        +---------------------+----------+   |   o L2ARC eligible
5185 *	ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->|   : ARC buffer
5186 *	        +---------------------+----------+   |
5187 *	             15.9 Gbytes      ^ 32 Mbytes    |
5188 *	                           headroom          |
5189 *	                                      l2arc_feed_thread()
5190 *	                                             |
5191 *	                 l2arc write hand <--[oooo]--'
5192 *	                         |           8 Mbyte
5193 *	                         |          write max
5194 *	                         V
5195 *		  +==============================+
5196 *	L2ARC dev |####|#|###|###|    |####| ... |
5197 *	          +==============================+
5198 *	                     32 Gbytes
5199 *
5200 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
5201 * evicted, then the L2ARC has cached a buffer much sooner than it probably
5202 * needed to, potentially wasting L2ARC device bandwidth and storage.  It is
5203 * safe to say that this is an uncommon case, since buffers at the end of
5204 * the ARC lists have moved there due to inactivity.
5205 *
5206 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
5207 * then the L2ARC simply misses copying some buffers.  This serves as a
5208 * pressure valve to prevent heavy read workloads from both stalling the ARC
5209 * with waits and clogging the L2ARC with writes.  This also helps prevent
5210 * the potential for the L2ARC to churn if it attempts to cache content too
5211 * quickly, such as during backups of the entire pool.
5212 *
5213 * 5. After system boot and before the ARC has filled main memory, there are
5214 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
5215 * lists can remain mostly static.  Instead of searching from tail of these
5216 * lists as pictured, the l2arc_feed_thread() will search from the list heads
5217 * for eligible buffers, greatly increasing its chance of finding them.
5218 *
5219 * The L2ARC device write speed is also boosted during this time so that
5220 * the L2ARC warms up faster.  Since there have been no ARC evictions yet,
5221 * there are no L2ARC reads, and no fear of degrading read performance
5222 * through increased writes.
5223 *
5224 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
5225 * the vdev queue can aggregate them into larger and fewer writes.  Each
5226 * device is written to in a rotor fashion, sweeping writes through
5227 * available space then repeating.
5228 *
5229 * 7. The L2ARC does not store dirty content.  It never needs to flush
5230 * write buffers back to disk based storage.
5231 *
5232 * 8. If an ARC buffer is written (and dirtied) which also exists in the
5233 * L2ARC, the now stale L2ARC buffer is immediately dropped.
5234 *
5235 * The performance of the L2ARC can be tweaked by a number of tunables, which
5236 * may be necessary for different workloads:
5237 *
5238 *	l2arc_write_max		max write bytes per interval
5239 *	l2arc_write_boost	extra write bytes during device warmup
5240 *	l2arc_noprefetch	skip caching prefetched buffers
5241 *	l2arc_headroom		number of max device writes to precache
5242 *	l2arc_headroom_boost	when we find compressed buffers during ARC
5243 *				scanning, we multiply headroom by this
5244 *				percentage factor for the next scan cycle,
5245 *				since more compressed buffers are likely to
5246 *				be present
5247 *	l2arc_feed_secs		seconds between L2ARC writing
5248 *
5249 * Tunables may be removed or added as future performance improvements are
5250 * integrated, and also may become zpool properties.
5251 *
5252 * There are three key functions that control how the L2ARC warms up:
5253 *
5254 *	l2arc_write_eligible()	check if a buffer is eligible to cache
5255 *	l2arc_write_size()	calculate how much to write
5256 *	l2arc_write_interval()	calculate sleep delay between writes
5257 *
5258 * These three functions determine what to write, how much, and how quickly
5259 * to send writes.
5260 */
5261
5262static boolean_t
5263l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *hdr)
5264{
5265	/*
5266	 * A buffer is *not* eligible for the L2ARC if it:
5267	 * 1. belongs to a different spa.
5268	 * 2. is already cached on the L2ARC.
5269	 * 3. has an I/O in progress (it may be an incomplete read).
5270	 * 4. is flagged not eligible (zfs property).
5271	 */
5272	if (hdr->b_spa != spa_guid) {
5273		ARCSTAT_BUMP(arcstat_l2_write_spa_mismatch);
5274		return (B_FALSE);
5275	}
5276	if (HDR_HAS_L2HDR(hdr)) {
5277		ARCSTAT_BUMP(arcstat_l2_write_in_l2);
5278		return (B_FALSE);
5279	}
5280	if (HDR_IO_IN_PROGRESS(hdr)) {
5281		ARCSTAT_BUMP(arcstat_l2_write_hdr_io_in_progress);
5282		return (B_FALSE);
5283	}
5284	if (!HDR_L2CACHE(hdr)) {
5285		ARCSTAT_BUMP(arcstat_l2_write_not_cacheable);
5286		return (B_FALSE);
5287	}
5288
5289	return (B_TRUE);
5290}
5291
5292static uint64_t
5293l2arc_write_size(void)
5294{
5295	uint64_t size;
5296
5297	/*
5298	 * Make sure our globals have meaningful values in case the user
5299	 * altered them.
5300	 */
5301	size = l2arc_write_max;
5302	if (size == 0) {
5303		cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must "
5304		    "be greater than zero, resetting it to the default (%d)",
5305		    L2ARC_WRITE_SIZE);
5306		size = l2arc_write_max = L2ARC_WRITE_SIZE;
5307	}
5308
5309	if (arc_warm == B_FALSE)
5310		size += l2arc_write_boost;
5311
5312	return (size);
5313
5314}
5315
5316static clock_t
5317l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
5318{
5319	clock_t interval, next, now;
5320
5321	/*
5322	 * If the ARC lists are busy, increase our write rate; if the
5323	 * lists are stale, idle back.  This is achieved by checking
5324	 * how much we previously wrote - if it was more than half of
5325	 * what we wanted, schedule the next write much sooner.
5326	 */
5327	if (l2arc_feed_again && wrote > (wanted / 2))
5328		interval = (hz * l2arc_feed_min_ms) / 1000;
5329	else
5330		interval = hz * l2arc_feed_secs;
5331
5332	now = ddi_get_lbolt();
5333	next = MAX(now, MIN(now + interval, began + interval));
5334
5335	return (next);
5336}
5337
5338/*
5339 * Cycle through L2ARC devices.  This is how L2ARC load balances.
5340 * If a device is returned, this also returns holding the spa config lock.
5341 */
5342static l2arc_dev_t *
5343l2arc_dev_get_next(void)
5344{
5345	l2arc_dev_t *first, *next = NULL;
5346
5347	/*
5348	 * Lock out the removal of spas (spa_namespace_lock), then removal
5349	 * of cache devices (l2arc_dev_mtx).  Once a device has been selected,
5350	 * both locks will be dropped and a spa config lock held instead.
5351	 */
5352	mutex_enter(&spa_namespace_lock);
5353	mutex_enter(&l2arc_dev_mtx);
5354
5355	/* if there are no vdevs, there is nothing to do */
5356	if (l2arc_ndev == 0)
5357		goto out;
5358
5359	first = NULL;
5360	next = l2arc_dev_last;
5361	do {
5362		/* loop around the list looking for a non-faulted vdev */
5363		if (next == NULL) {
5364			next = list_head(l2arc_dev_list);
5365		} else {
5366			next = list_next(l2arc_dev_list, next);
5367			if (next == NULL)
5368				next = list_head(l2arc_dev_list);
5369		}
5370
5371		/* if we have come back to the start, bail out */
5372		if (first == NULL)
5373			first = next;
5374		else if (next == first)
5375			break;
5376
5377	} while (vdev_is_dead(next->l2ad_vdev));
5378
5379	/* if we were unable to find any usable vdevs, return NULL */
5380	if (vdev_is_dead(next->l2ad_vdev))
5381		next = NULL;
5382
5383	l2arc_dev_last = next;
5384
5385out:
5386	mutex_exit(&l2arc_dev_mtx);
5387
5388	/*
5389	 * Grab the config lock to prevent the 'next' device from being
5390	 * removed while we are writing to it.
5391	 */
5392	if (next != NULL)
5393		spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
5394	mutex_exit(&spa_namespace_lock);
5395
5396	return (next);
5397}
5398
5399/*
5400 * Free buffers that were tagged for destruction.
5401 */
5402static void
5403l2arc_do_free_on_write()
5404{
5405	list_t *buflist;
5406	l2arc_data_free_t *df, *df_prev;
5407
5408	mutex_enter(&l2arc_free_on_write_mtx);
5409	buflist = l2arc_free_on_write;
5410
5411	for (df = list_tail(buflist); df; df = df_prev) {
5412		df_prev = list_prev(buflist, df);
5413		ASSERT(df->l2df_data != NULL);
5414		ASSERT(df->l2df_func != NULL);
5415		df->l2df_func(df->l2df_data, df->l2df_size);
5416		list_remove(buflist, df);
5417		kmem_free(df, sizeof (l2arc_data_free_t));
5418	}
5419
5420	mutex_exit(&l2arc_free_on_write_mtx);
5421}
5422
5423/*
5424 * A write to a cache device has completed.  Update all headers to allow
5425 * reads from these buffers to begin.
5426 */
5427static void
5428l2arc_write_done(zio_t *zio)
5429{
5430	l2arc_write_callback_t *cb;
5431	l2arc_dev_t *dev;
5432	list_t *buflist;
5433	arc_buf_hdr_t *head, *hdr, *hdr_prev;
5434	kmutex_t *hash_lock;
5435	int64_t bytes_dropped = 0;
5436
5437	cb = zio->io_private;
5438	ASSERT(cb != NULL);
5439	dev = cb->l2wcb_dev;
5440	ASSERT(dev != NULL);
5441	head = cb->l2wcb_head;
5442	ASSERT(head != NULL);
5443	buflist = &dev->l2ad_buflist;
5444	ASSERT(buflist != NULL);
5445	DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
5446	    l2arc_write_callback_t *, cb);
5447
5448	if (zio->io_error != 0)
5449		ARCSTAT_BUMP(arcstat_l2_writes_error);
5450
5451	mutex_enter(&dev->l2ad_mtx);
5452
5453	/*
5454	 * All writes completed, or an error was hit.
5455	 */
5456	for (hdr = list_prev(buflist, head); hdr; hdr = hdr_prev) {
5457		hdr_prev = list_prev(buflist, hdr);
5458
5459		hash_lock = HDR_LOCK(hdr);
5460		if (!mutex_tryenter(hash_lock)) {
5461			/*
5462			 * This buffer misses out.  It may be in a stage
5463			 * of eviction.  Its ARC_FLAG_L2_WRITING flag will be
5464			 * left set, denying reads to this buffer.
5465			 */
5466			ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss);
5467			continue;
5468		}
5469
5470		/*
5471		 * It's possible that this buffer got evicted from the L1 cache
5472		 * before we grabbed the vdev + hash locks, in which case
5473		 * arc_hdr_realloc freed b_tmp_cdata for us if it was allocated.
5474		 * Only free the buffer if we still have an L1 hdr.
5475		 */
5476		if (HDR_HAS_L1HDR(hdr) && hdr->b_l1hdr.b_tmp_cdata != NULL &&
5477		    HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF)
5478			l2arc_release_cdata_buf(hdr);
5479
5480		if (zio->io_error != 0) {
5481			/*
5482			 * Error - drop L2ARC entry.
5483			 */
5484			trim_map_free(hdr->b_l2hdr.b_dev->l2ad_vdev,
5485			    hdr->b_l2hdr.b_daddr, hdr->b_l2hdr.b_asize, 0);
5486			hdr->b_flags &= ~ARC_FLAG_HAS_L2HDR;
5487
5488			ARCSTAT_INCR(arcstat_l2_asize, -hdr->b_l2hdr.b_asize);
5489			ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
5490
5491			bytes_dropped += hdr->b_l2hdr.b_asize;
5492			(void) refcount_remove_many(&dev->l2ad_alloc,
5493			    hdr->b_l2hdr.b_asize, hdr);
5494		}
5495
5496		/*
5497		 * Allow ARC to begin reads to this L2ARC entry.
5498		 */
5499		hdr->b_flags &= ~ARC_FLAG_L2_WRITING;
5500
5501		mutex_exit(hash_lock);
5502	}
5503
5504	atomic_inc_64(&l2arc_writes_done);
5505	list_remove(buflist, head);
5506	ASSERT(!HDR_HAS_L1HDR(head));
5507	kmem_cache_free(hdr_l2only_cache, head);
5508	mutex_exit(&dev->l2ad_mtx);
5509
5510	vdev_space_update(dev->l2ad_vdev, -bytes_dropped, 0, 0);
5511
5512	l2arc_do_free_on_write();
5513
5514	kmem_free(cb, sizeof (l2arc_write_callback_t));
5515}
5516
5517/*
5518 * A read to a cache device completed.  Validate buffer contents before
5519 * handing over to the regular ARC routines.
5520 */
5521static void
5522l2arc_read_done(zio_t *zio)
5523{
5524	l2arc_read_callback_t *cb;
5525	arc_buf_hdr_t *hdr;
5526	arc_buf_t *buf;
5527	kmutex_t *hash_lock;
5528	int equal;
5529
5530	ASSERT(zio->io_vd != NULL);
5531	ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
5532
5533	spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
5534
5535	cb = zio->io_private;
5536	ASSERT(cb != NULL);
5537	buf = cb->l2rcb_buf;
5538	ASSERT(buf != NULL);
5539
5540	hash_lock = HDR_LOCK(buf->b_hdr);
5541	mutex_enter(hash_lock);
5542	hdr = buf->b_hdr;
5543	ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
5544
5545	/*
5546	 * If the buffer was compressed, decompress it first.
5547	 */
5548	if (cb->l2rcb_compress != ZIO_COMPRESS_OFF)
5549		l2arc_decompress_zio(zio, hdr, cb->l2rcb_compress);
5550	ASSERT(zio->io_data != NULL);
5551
5552	/*
5553	 * Check this survived the L2ARC journey.
5554	 */
5555	equal = arc_cksum_equal(buf);
5556	if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) {
5557		mutex_exit(hash_lock);
5558		zio->io_private = buf;
5559		zio->io_bp_copy = cb->l2rcb_bp;	/* XXX fix in L2ARC 2.0	*/
5560		zio->io_bp = &zio->io_bp_copy;	/* XXX fix in L2ARC 2.0	*/
5561		arc_read_done(zio);
5562	} else {
5563		mutex_exit(hash_lock);
5564		/*
5565		 * Buffer didn't survive caching.  Increment stats and
5566		 * reissue to the original storage device.
5567		 */
5568		if (zio->io_error != 0) {
5569			ARCSTAT_BUMP(arcstat_l2_io_error);
5570		} else {
5571			zio->io_error = SET_ERROR(EIO);
5572		}
5573		if (!equal)
5574			ARCSTAT_BUMP(arcstat_l2_cksum_bad);
5575
5576		/*
5577		 * If there's no waiter, issue an async i/o to the primary
5578		 * storage now.  If there *is* a waiter, the caller must
5579		 * issue the i/o in a context where it's OK to block.
5580		 */
5581		if (zio->io_waiter == NULL) {
5582			zio_t *pio = zio_unique_parent(zio);
5583
5584			ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
5585
5586			zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp,
5587			    buf->b_data, zio->io_size, arc_read_done, buf,
5588			    zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb));
5589		}
5590	}
5591
5592	kmem_free(cb, sizeof (l2arc_read_callback_t));
5593}
5594
5595/*
5596 * This is the list priority from which the L2ARC will search for pages to
5597 * cache.  This is used within loops (0..3) to cycle through lists in the
5598 * desired order.  This order can have a significant effect on cache
5599 * performance.
5600 *
5601 * Currently the metadata lists are hit first, MFU then MRU, followed by
5602 * the data lists.  This function returns a locked list, and also returns
5603 * the lock pointer.
5604 */
5605static list_t *
5606l2arc_list_locked(int list_num, kmutex_t **lock)
5607{
5608	list_t *list = NULL;
5609	int idx;
5610
5611	ASSERT(list_num >= 0 && list_num < 2 * ARC_BUFC_NUMLISTS);
5612
5613	if (list_num < ARC_BUFC_NUMMETADATALISTS) {
5614		idx = list_num;
5615		list = &arc_mfu->arcs_lists[idx];
5616		*lock = ARCS_LOCK(arc_mfu, idx);
5617	} else if (list_num < ARC_BUFC_NUMMETADATALISTS * 2) {
5618		idx = list_num - ARC_BUFC_NUMMETADATALISTS;
5619		list = &arc_mru->arcs_lists[idx];
5620		*lock = ARCS_LOCK(arc_mru, idx);
5621	} else if (list_num < (ARC_BUFC_NUMMETADATALISTS * 2 +
5622		ARC_BUFC_NUMDATALISTS)) {
5623		idx = list_num - ARC_BUFC_NUMMETADATALISTS;
5624		list = &arc_mfu->arcs_lists[idx];
5625		*lock = ARCS_LOCK(arc_mfu, idx);
5626	} else {
5627		idx = list_num - ARC_BUFC_NUMLISTS;
5628		list = &arc_mru->arcs_lists[idx];
5629		*lock = ARCS_LOCK(arc_mru, idx);
5630	}
5631
5632	ASSERT(!(MUTEX_HELD(*lock)));
5633	mutex_enter(*lock);
5634	return (list);
5635}
5636
5637/*
5638 * Evict buffers from the device write hand to the distance specified in
5639 * bytes.  This distance may span populated buffers, it may span nothing.
5640 * This is clearing a region on the L2ARC device ready for writing.
5641 * If the 'all' boolean is set, every buffer is evicted.
5642 */
5643static void
5644l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
5645{
5646	list_t *buflist;
5647	arc_buf_hdr_t *hdr, *hdr_prev;
5648	kmutex_t *hash_lock;
5649	uint64_t taddr;
5650
5651	buflist = &dev->l2ad_buflist;
5652
5653	if (!all && dev->l2ad_first) {
5654		/*
5655		 * This is the first sweep through the device.  There is
5656		 * nothing to evict.
5657		 */
5658		return;
5659	}
5660
5661	if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) {
5662		/*
5663		 * When nearing the end of the device, evict to the end
5664		 * before the device write hand jumps to the start.
5665		 */
5666		taddr = dev->l2ad_end;
5667	} else {
5668		taddr = dev->l2ad_hand + distance;
5669	}
5670	DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
5671	    uint64_t, taddr, boolean_t, all);
5672
5673top:
5674	mutex_enter(&dev->l2ad_mtx);
5675	for (hdr = list_tail(buflist); hdr; hdr = hdr_prev) {
5676		hdr_prev = list_prev(buflist, hdr);
5677
5678		hash_lock = HDR_LOCK(hdr);
5679		if (!mutex_tryenter(hash_lock)) {
5680			/*
5681			 * Missed the hash lock.  Retry.
5682			 */
5683			ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
5684			mutex_exit(&dev->l2ad_mtx);
5685			mutex_enter(hash_lock);
5686			mutex_exit(hash_lock);
5687			goto top;
5688		}
5689
5690		if (HDR_L2_WRITE_HEAD(hdr)) {
5691			/*
5692			 * We hit a write head node.  Leave it for
5693			 * l2arc_write_done().
5694			 */
5695			list_remove(buflist, hdr);
5696			mutex_exit(hash_lock);
5697			continue;
5698		}
5699
5700		if (!all && HDR_HAS_L2HDR(hdr) &&
5701		    (hdr->b_l2hdr.b_daddr > taddr ||
5702		    hdr->b_l2hdr.b_daddr < dev->l2ad_hand)) {
5703			/*
5704			 * We've evicted to the target address,
5705			 * or the end of the device.
5706			 */
5707			mutex_exit(hash_lock);
5708			break;
5709		}
5710
5711		ASSERT(HDR_HAS_L2HDR(hdr));
5712		if (!HDR_HAS_L1HDR(hdr)) {
5713			ASSERT(!HDR_L2_READING(hdr));
5714			/*
5715			 * This doesn't exist in the ARC.  Destroy.
5716			 * arc_hdr_destroy() will call list_remove()
5717			 * and decrement arcstat_l2_size.
5718			 */
5719			arc_change_state(arc_anon, hdr, hash_lock);
5720			arc_hdr_destroy(hdr);
5721		} else {
5722			ASSERT(hdr->b_l1hdr.b_state != arc_l2c_only);
5723			ARCSTAT_BUMP(arcstat_l2_evict_l1cached);
5724			/*
5725			 * Invalidate issued or about to be issued
5726			 * reads, since we may be about to write
5727			 * over this location.
5728			 */
5729			if (HDR_L2_READING(hdr)) {
5730				ARCSTAT_BUMP(arcstat_l2_evict_reading);
5731				hdr->b_flags |= ARC_FLAG_L2_EVICTED;
5732			}
5733
5734			arc_hdr_l2hdr_destroy(hdr);
5735		}
5736		mutex_exit(hash_lock);
5737	}
5738	mutex_exit(&dev->l2ad_mtx);
5739}
5740
5741/*
5742 * Find and write ARC buffers to the L2ARC device.
5743 *
5744 * An ARC_FLAG_L2_WRITING flag is set so that the L2ARC buffers are not valid
5745 * for reading until they have completed writing.
5746 * The headroom_boost is an in-out parameter used to maintain headroom boost
5747 * state between calls to this function.
5748 *
5749 * Returns the number of bytes actually written (which may be smaller than
5750 * the delta by which the device hand has changed due to alignment).
5751 */
5752static uint64_t
5753l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
5754    boolean_t *headroom_boost)
5755{
5756	arc_buf_hdr_t *hdr, *hdr_prev, *head;
5757	list_t *list;
5758	uint64_t write_asize, write_sz, headroom, buf_compress_minsz;
5759	void *buf_data;
5760	kmutex_t *list_lock;
5761	boolean_t full;
5762	l2arc_write_callback_t *cb;
5763	zio_t *pio, *wzio;
5764	uint64_t guid = spa_load_guid(spa);
5765	const boolean_t do_headroom_boost = *headroom_boost;
5766	int try;
5767
5768	ASSERT(dev->l2ad_vdev != NULL);
5769
5770	/* Lower the flag now, we might want to raise it again later. */
5771	*headroom_boost = B_FALSE;
5772
5773	pio = NULL;
5774	write_sz = write_asize = 0;
5775	full = B_FALSE;
5776	head = kmem_cache_alloc(hdr_l2only_cache, KM_PUSHPAGE);
5777	head->b_flags |= ARC_FLAG_L2_WRITE_HEAD;
5778	head->b_flags |= ARC_FLAG_HAS_L2HDR;
5779
5780	ARCSTAT_BUMP(arcstat_l2_write_buffer_iter);
5781	/*
5782	 * We will want to try to compress buffers that are at least 2x the
5783	 * device sector size.
5784	 */
5785	buf_compress_minsz = 2 << dev->l2ad_vdev->vdev_ashift;
5786
5787	/*
5788	 * Copy buffers for L2ARC writing.
5789	 */
5790	mutex_enter(&dev->l2ad_mtx);
5791	for (try = 0; try < 2 * ARC_BUFC_NUMLISTS; try++) {
5792		uint64_t passed_sz = 0;
5793
5794		list = l2arc_list_locked(try, &list_lock);
5795		ARCSTAT_BUMP(arcstat_l2_write_buffer_list_iter);
5796
5797		/*
5798		 * L2ARC fast warmup.
5799		 *
5800		 * Until the ARC is warm and starts to evict, read from the
5801		 * head of the ARC lists rather than the tail.
5802		 */
5803		if (arc_warm == B_FALSE)
5804			hdr = list_head(list);
5805		else
5806			hdr = list_tail(list);
5807		if (hdr == NULL)
5808			ARCSTAT_BUMP(arcstat_l2_write_buffer_list_null_iter);
5809
5810		headroom = target_sz * l2arc_headroom * 2 / ARC_BUFC_NUMLISTS;
5811		if (do_headroom_boost)
5812			headroom = (headroom * l2arc_headroom_boost) / 100;
5813
5814		for (; hdr; hdr = hdr_prev) {
5815			kmutex_t *hash_lock;
5816			uint64_t buf_sz;
5817			uint64_t buf_a_sz;
5818
5819			if (arc_warm == B_FALSE)
5820				hdr_prev = list_next(list, hdr);
5821			else
5822				hdr_prev = list_prev(list, hdr);
5823			ARCSTAT_INCR(arcstat_l2_write_buffer_bytes_scanned, hdr->b_size);
5824
5825			hash_lock = HDR_LOCK(hdr);
5826			if (!mutex_tryenter(hash_lock)) {
5827				ARCSTAT_BUMP(arcstat_l2_write_trylock_fail);
5828				/*
5829				 * Skip this buffer rather than waiting.
5830				 */
5831				continue;
5832			}
5833
5834			passed_sz += hdr->b_size;
5835			if (passed_sz > headroom) {
5836				/*
5837				 * Searched too far.
5838				 */
5839				mutex_exit(hash_lock);
5840				ARCSTAT_BUMP(arcstat_l2_write_passed_headroom);
5841				break;
5842			}
5843
5844			if (!l2arc_write_eligible(guid, hdr)) {
5845				mutex_exit(hash_lock);
5846				continue;
5847			}
5848
5849			/*
5850			 * Assume that the buffer is not going to be compressed
5851			 * and could take more space on disk because of a larger
5852			 * disk block size.
5853			 */
5854			buf_sz = hdr->b_size;
5855			buf_a_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz);
5856
5857			if ((write_asize + buf_a_sz) > target_sz) {
5858				full = B_TRUE;
5859				mutex_exit(hash_lock);
5860				ARCSTAT_BUMP(arcstat_l2_write_full);
5861				break;
5862			}
5863
5864			if (pio == NULL) {
5865				/*
5866				 * Insert a dummy header on the buflist so
5867				 * l2arc_write_done() can find where the
5868				 * write buffers begin without searching.
5869				 */
5870				list_insert_head(&dev->l2ad_buflist, head);
5871
5872				cb = kmem_alloc(
5873				    sizeof (l2arc_write_callback_t), KM_SLEEP);
5874				cb->l2wcb_dev = dev;
5875				cb->l2wcb_head = head;
5876				pio = zio_root(spa, l2arc_write_done, cb,
5877				    ZIO_FLAG_CANFAIL);
5878				ARCSTAT_BUMP(arcstat_l2_write_pios);
5879			}
5880
5881			/*
5882			 * Create and add a new L2ARC header.
5883			 */
5884			hdr->b_l2hdr.b_dev = dev;
5885			hdr->b_flags |= ARC_FLAG_L2_WRITING;
5886			/*
5887			 * Temporarily stash the data buffer in b_tmp_cdata.
5888			 * The subsequent write step will pick it up from
5889			 * there. This is because can't access b_l1hdr.b_buf
5890			 * without holding the hash_lock, which we in turn
5891			 * can't access without holding the ARC list locks
5892			 * (which we want to avoid during compression/writing).
5893			 */
5894			HDR_SET_COMPRESS(hdr, ZIO_COMPRESS_OFF);
5895			hdr->b_l2hdr.b_asize = hdr->b_size;
5896			hdr->b_l1hdr.b_tmp_cdata = hdr->b_l1hdr.b_buf->b_data;
5897
5898			/*
5899			 * Explicitly set the b_daddr field to a known
5900			 * value which means "invalid address". This
5901			 * enables us to differentiate which stage of
5902			 * l2arc_write_buffers() the particular header
5903			 * is in (e.g. this loop, or the one below).
5904			 * ARC_FLAG_L2_WRITING is not enough to make
5905			 * this distinction, and we need to know in
5906			 * order to do proper l2arc vdev accounting in
5907			 * arc_release() and arc_hdr_destroy().
5908			 *
5909			 * Note, we can't use a new flag to distinguish
5910			 * the two stages because we don't hold the
5911			 * header's hash_lock below, in the second stage
5912			 * of this function. Thus, we can't simply
5913			 * change the b_flags field to denote that the
5914			 * IO has been sent. We can change the b_daddr
5915			 * field of the L2 portion, though, since we'll
5916			 * be holding the l2ad_mtx; which is why we're
5917			 * using it to denote the header's state change.
5918			 */
5919			hdr->b_l2hdr.b_daddr = L2ARC_ADDR_UNSET;
5920			hdr->b_flags |= ARC_FLAG_HAS_L2HDR;
5921
5922			list_insert_head(&dev->l2ad_buflist, hdr);
5923
5924			/*
5925			 * Compute and store the buffer cksum before
5926			 * writing.  On debug the cksum is verified first.
5927			 */
5928			arc_cksum_verify(hdr->b_l1hdr.b_buf);
5929			arc_cksum_compute(hdr->b_l1hdr.b_buf, B_TRUE);
5930
5931			mutex_exit(hash_lock);
5932
5933			write_sz += buf_sz;
5934			write_asize += buf_a_sz;
5935		}
5936
5937		mutex_exit(list_lock);
5938
5939		if (full == B_TRUE)
5940			break;
5941	}
5942
5943	/* No buffers selected for writing? */
5944	if (pio == NULL) {
5945		ASSERT0(write_sz);
5946		mutex_exit(&dev->l2ad_mtx);
5947		ASSERT(!HDR_HAS_L1HDR(head));
5948		kmem_cache_free(hdr_l2only_cache, head);
5949		return (0);
5950	}
5951
5952	/*
5953	 * Note that elsewhere in this file arcstat_l2_asize
5954	 * and the used space on l2ad_vdev are updated using b_asize,
5955	 * which is not necessarily rounded up to the device block size.
5956	 * Too keep accounting consistent we do the same here as well:
5957	 * stats_size accumulates the sum of b_asize of the written buffers,
5958	 * while write_asize accumulates the sum of b_asize rounded up
5959	 * to the device block size.
5960	 * The latter sum is used only to validate the corectness of the code.
5961	 */
5962	uint64_t stats_size = 0;
5963	write_asize = 0;
5964
5965	/*
5966	 * Now start writing the buffers. We're starting at the write head
5967	 * and work backwards, retracing the course of the buffer selector
5968	 * loop above.
5969	 */
5970	for (hdr = list_prev(&dev->l2ad_buflist, head); hdr;
5971	    hdr = list_prev(&dev->l2ad_buflist, hdr)) {
5972		uint64_t buf_sz;
5973
5974		/*
5975		 * We shouldn't need to lock the buffer here, since we flagged
5976		 * it as ARC_FLAG_L2_WRITING in the previous step, but we must
5977		 * take care to only access its L2 cache parameters. In
5978		 * particular, hdr->l1hdr.b_buf may be invalid by now due to
5979		 * ARC eviction.
5980		 */
5981		hdr->b_l2hdr.b_daddr = dev->l2ad_hand;
5982
5983		if ((HDR_L2COMPRESS(hdr)) &&
5984		    hdr->b_l2hdr.b_asize >= buf_compress_minsz) {
5985			if (l2arc_compress_buf(hdr)) {
5986				/*
5987				 * If compression succeeded, enable headroom
5988				 * boost on the next scan cycle.
5989				 */
5990				*headroom_boost = B_TRUE;
5991			}
5992		}
5993
5994		/*
5995		 * Pick up the buffer data we had previously stashed away
5996		 * (and now potentially also compressed).
5997		 */
5998		buf_data = hdr->b_l1hdr.b_tmp_cdata;
5999		buf_sz = hdr->b_l2hdr.b_asize;
6000
6001		/*
6002		 * If the data has not been compressed, then clear b_tmp_cdata
6003		 * to make sure that it points only to a temporary compression
6004		 * buffer.
6005		 */
6006		if (!L2ARC_IS_VALID_COMPRESS(HDR_GET_COMPRESS(hdr)))
6007			hdr->b_l1hdr.b_tmp_cdata = NULL;
6008
6009		/*
6010		 * We need to do this regardless if buf_sz is zero or
6011		 * not, otherwise, when this l2hdr is evicted we'll
6012		 * remove a reference that was never added.
6013		 */
6014		(void) refcount_add_many(&dev->l2ad_alloc, buf_sz, hdr);
6015
6016		/* Compression may have squashed the buffer to zero length. */
6017		if (buf_sz != 0) {
6018			uint64_t buf_a_sz;
6019
6020			wzio = zio_write_phys(pio, dev->l2ad_vdev,
6021			    dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF,
6022			    NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE,
6023			    ZIO_FLAG_CANFAIL, B_FALSE);
6024
6025			DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
6026			    zio_t *, wzio);
6027			(void) zio_nowait(wzio);
6028
6029			stats_size += buf_sz;
6030
6031			/*
6032			 * Keep the clock hand suitably device-aligned.
6033			 */
6034			buf_a_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz);
6035			write_asize += buf_a_sz;
6036			dev->l2ad_hand += buf_a_sz;
6037		}
6038	}
6039
6040	mutex_exit(&dev->l2ad_mtx);
6041
6042	ASSERT3U(write_asize, <=, target_sz);
6043	ARCSTAT_BUMP(arcstat_l2_writes_sent);
6044	ARCSTAT_INCR(arcstat_l2_write_bytes, write_asize);
6045	ARCSTAT_INCR(arcstat_l2_size, write_sz);
6046	ARCSTAT_INCR(arcstat_l2_asize, stats_size);
6047	vdev_space_update(dev->l2ad_vdev, stats_size, 0, 0);
6048
6049	/*
6050	 * Bump device hand to the device start if it is approaching the end.
6051	 * l2arc_evict() will already have evicted ahead for this case.
6052	 */
6053	if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) {
6054		dev->l2ad_hand = dev->l2ad_start;
6055		dev->l2ad_first = B_FALSE;
6056	}
6057
6058	dev->l2ad_writing = B_TRUE;
6059	(void) zio_wait(pio);
6060	dev->l2ad_writing = B_FALSE;
6061
6062	return (write_asize);
6063}
6064
6065/*
6066 * Compresses an L2ARC buffer.
6067 * The data to be compressed must be prefilled in l1hdr.b_tmp_cdata and its
6068 * size in l2hdr->b_asize. This routine tries to compress the data and
6069 * depending on the compression result there are three possible outcomes:
6070 * *) The buffer was incompressible. The original l2hdr contents were left
6071 *    untouched and are ready for writing to an L2 device.
6072 * *) The buffer was all-zeros, so there is no need to write it to an L2
6073 *    device. To indicate this situation b_tmp_cdata is NULL'ed, b_asize is
6074 *    set to zero and b_compress is set to ZIO_COMPRESS_EMPTY.
6075 * *) Compression succeeded and b_tmp_cdata was replaced with a temporary
6076 *    data buffer which holds the compressed data to be written, and b_asize
6077 *    tells us how much data there is. b_compress is set to the appropriate
6078 *    compression algorithm. Once writing is done, invoke
6079 *    l2arc_release_cdata_buf on this l2hdr to free this temporary buffer.
6080 *
6081 * Returns B_TRUE if compression succeeded, or B_FALSE if it didn't (the
6082 * buffer was incompressible).
6083 */
6084static boolean_t
6085l2arc_compress_buf(arc_buf_hdr_t *hdr)
6086{
6087	void *cdata;
6088	size_t csize, len, rounded;
6089	ASSERT(HDR_HAS_L2HDR(hdr));
6090	l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr;
6091
6092	ASSERT(HDR_HAS_L1HDR(hdr));
6093	ASSERT(HDR_GET_COMPRESS(hdr) == ZIO_COMPRESS_OFF);
6094	ASSERT(hdr->b_l1hdr.b_tmp_cdata != NULL);
6095
6096	len = l2hdr->b_asize;
6097	cdata = zio_data_buf_alloc(len);
6098	ASSERT3P(cdata, !=, NULL);
6099	csize = zio_compress_data(ZIO_COMPRESS_LZ4, hdr->b_l1hdr.b_tmp_cdata,
6100	    cdata, l2hdr->b_asize);
6101
6102	if (csize == 0) {
6103		/* zero block, indicate that there's nothing to write */
6104		zio_data_buf_free(cdata, len);
6105		HDR_SET_COMPRESS(hdr, ZIO_COMPRESS_EMPTY);
6106		l2hdr->b_asize = 0;
6107		hdr->b_l1hdr.b_tmp_cdata = NULL;
6108		ARCSTAT_BUMP(arcstat_l2_compress_zeros);
6109		return (B_TRUE);
6110	}
6111
6112	rounded = P2ROUNDUP(csize,
6113	    (size_t)1 << l2hdr->b_dev->l2ad_vdev->vdev_ashift);
6114	if (rounded < len) {
6115		/*
6116		 * Compression succeeded, we'll keep the cdata around for
6117		 * writing and release it afterwards.
6118		 */
6119		if (rounded > csize) {
6120			bzero((char *)cdata + csize, rounded - csize);
6121			csize = rounded;
6122		}
6123		HDR_SET_COMPRESS(hdr, ZIO_COMPRESS_LZ4);
6124		l2hdr->b_asize = csize;
6125		hdr->b_l1hdr.b_tmp_cdata = cdata;
6126		ARCSTAT_BUMP(arcstat_l2_compress_successes);
6127		return (B_TRUE);
6128	} else {
6129		/*
6130		 * Compression failed, release the compressed buffer.
6131		 * l2hdr will be left unmodified.
6132		 */
6133		zio_data_buf_free(cdata, len);
6134		ARCSTAT_BUMP(arcstat_l2_compress_failures);
6135		return (B_FALSE);
6136	}
6137}
6138
6139/*
6140 * Decompresses a zio read back from an l2arc device. On success, the
6141 * underlying zio's io_data buffer is overwritten by the uncompressed
6142 * version. On decompression error (corrupt compressed stream), the
6143 * zio->io_error value is set to signal an I/O error.
6144 *
6145 * Please note that the compressed data stream is not checksummed, so
6146 * if the underlying device is experiencing data corruption, we may feed
6147 * corrupt data to the decompressor, so the decompressor needs to be
6148 * able to handle this situation (LZ4 does).
6149 */
6150static void
6151l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr, enum zio_compress c)
6152{
6153	ASSERT(L2ARC_IS_VALID_COMPRESS(c));
6154
6155	if (zio->io_error != 0) {
6156		/*
6157		 * An io error has occured, just restore the original io
6158		 * size in preparation for a main pool read.
6159		 */
6160		zio->io_orig_size = zio->io_size = hdr->b_size;
6161		return;
6162	}
6163
6164	if (c == ZIO_COMPRESS_EMPTY) {
6165		/*
6166		 * An empty buffer results in a null zio, which means we
6167		 * need to fill its io_data after we're done restoring the
6168		 * buffer's contents.
6169		 */
6170		ASSERT(hdr->b_l1hdr.b_buf != NULL);
6171		bzero(hdr->b_l1hdr.b_buf->b_data, hdr->b_size);
6172		zio->io_data = zio->io_orig_data = hdr->b_l1hdr.b_buf->b_data;
6173	} else {
6174		ASSERT(zio->io_data != NULL);
6175		/*
6176		 * We copy the compressed data from the start of the arc buffer
6177		 * (the zio_read will have pulled in only what we need, the
6178		 * rest is garbage which we will overwrite at decompression)
6179		 * and then decompress back to the ARC data buffer. This way we
6180		 * can minimize copying by simply decompressing back over the
6181		 * original compressed data (rather than decompressing to an
6182		 * aux buffer and then copying back the uncompressed buffer,
6183		 * which is likely to be much larger).
6184		 */
6185		uint64_t csize;
6186		void *cdata;
6187
6188		csize = zio->io_size;
6189		cdata = zio_data_buf_alloc(csize);
6190		bcopy(zio->io_data, cdata, csize);
6191		if (zio_decompress_data(c, cdata, zio->io_data, csize,
6192		    hdr->b_size) != 0)
6193			zio->io_error = EIO;
6194		zio_data_buf_free(cdata, csize);
6195	}
6196
6197	/* Restore the expected uncompressed IO size. */
6198	zio->io_orig_size = zio->io_size = hdr->b_size;
6199}
6200
6201/*
6202 * Releases the temporary b_tmp_cdata buffer in an l2arc header structure.
6203 * This buffer serves as a temporary holder of compressed data while
6204 * the buffer entry is being written to an l2arc device. Once that is
6205 * done, we can dispose of it.
6206 */
6207static void
6208l2arc_release_cdata_buf(arc_buf_hdr_t *hdr)
6209{
6210	ASSERT(HDR_HAS_L1HDR(hdr));
6211	if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_EMPTY) {
6212		/*
6213		 * If the data was compressed, then we've allocated a
6214		 * temporary buffer for it, so now we need to release it.
6215		 */
6216		ASSERT(hdr->b_l1hdr.b_tmp_cdata != NULL);
6217		zio_data_buf_free(hdr->b_l1hdr.b_tmp_cdata,
6218		    hdr->b_size);
6219		hdr->b_l1hdr.b_tmp_cdata = NULL;
6220	} else {
6221		ASSERT(hdr->b_l1hdr.b_tmp_cdata == NULL);
6222	}
6223}
6224
6225/*
6226 * This thread feeds the L2ARC at regular intervals.  This is the beating
6227 * heart of the L2ARC.
6228 */
6229static void
6230l2arc_feed_thread(void *dummy __unused)
6231{
6232	callb_cpr_t cpr;
6233	l2arc_dev_t *dev;
6234	spa_t *spa;
6235	uint64_t size, wrote;
6236	clock_t begin, next = ddi_get_lbolt();
6237	boolean_t headroom_boost = B_FALSE;
6238
6239	CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
6240
6241	mutex_enter(&l2arc_feed_thr_lock);
6242
6243	while (l2arc_thread_exit == 0) {
6244		CALLB_CPR_SAFE_BEGIN(&cpr);
6245		(void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock,
6246		    next - ddi_get_lbolt());
6247		CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
6248		next = ddi_get_lbolt() + hz;
6249
6250		/*
6251		 * Quick check for L2ARC devices.
6252		 */
6253		mutex_enter(&l2arc_dev_mtx);
6254		if (l2arc_ndev == 0) {
6255			mutex_exit(&l2arc_dev_mtx);
6256			continue;
6257		}
6258		mutex_exit(&l2arc_dev_mtx);
6259		begin = ddi_get_lbolt();
6260
6261		/*
6262		 * This selects the next l2arc device to write to, and in
6263		 * doing so the next spa to feed from: dev->l2ad_spa.   This
6264		 * will return NULL if there are now no l2arc devices or if
6265		 * they are all faulted.
6266		 *
6267		 * If a device is returned, its spa's config lock is also
6268		 * held to prevent device removal.  l2arc_dev_get_next()
6269		 * will grab and release l2arc_dev_mtx.
6270		 */
6271		if ((dev = l2arc_dev_get_next()) == NULL)
6272			continue;
6273
6274		spa = dev->l2ad_spa;
6275		ASSERT(spa != NULL);
6276
6277		/*
6278		 * If the pool is read-only then force the feed thread to
6279		 * sleep a little longer.
6280		 */
6281		if (!spa_writeable(spa)) {
6282			next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz;
6283			spa_config_exit(spa, SCL_L2ARC, dev);
6284			continue;
6285		}
6286
6287		/*
6288		 * Avoid contributing to memory pressure.
6289		 */
6290		if (arc_reclaim_needed()) {
6291			ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
6292			spa_config_exit(spa, SCL_L2ARC, dev);
6293			continue;
6294		}
6295
6296		ARCSTAT_BUMP(arcstat_l2_feeds);
6297
6298		size = l2arc_write_size();
6299
6300		/*
6301		 * Evict L2ARC buffers that will be overwritten.
6302		 */
6303		l2arc_evict(dev, size, B_FALSE);
6304
6305		/*
6306		 * Write ARC buffers.
6307		 */
6308		wrote = l2arc_write_buffers(spa, dev, size, &headroom_boost);
6309
6310		/*
6311		 * Calculate interval between writes.
6312		 */
6313		next = l2arc_write_interval(begin, size, wrote);
6314		spa_config_exit(spa, SCL_L2ARC, dev);
6315	}
6316
6317	l2arc_thread_exit = 0;
6318	cv_broadcast(&l2arc_feed_thr_cv);
6319	CALLB_CPR_EXIT(&cpr);		/* drops l2arc_feed_thr_lock */
6320	thread_exit();
6321}
6322
6323boolean_t
6324l2arc_vdev_present(vdev_t *vd)
6325{
6326	l2arc_dev_t *dev;
6327
6328	mutex_enter(&l2arc_dev_mtx);
6329	for (dev = list_head(l2arc_dev_list); dev != NULL;
6330	    dev = list_next(l2arc_dev_list, dev)) {
6331		if (dev->l2ad_vdev == vd)
6332			break;
6333	}
6334	mutex_exit(&l2arc_dev_mtx);
6335
6336	return (dev != NULL);
6337}
6338
6339/*
6340 * Add a vdev for use by the L2ARC.  By this point the spa has already
6341 * validated the vdev and opened it.
6342 */
6343void
6344l2arc_add_vdev(spa_t *spa, vdev_t *vd)
6345{
6346	l2arc_dev_t *adddev;
6347
6348	ASSERT(!l2arc_vdev_present(vd));
6349
6350	vdev_ashift_optimize(vd);
6351
6352	/*
6353	 * Create a new l2arc device entry.
6354	 */
6355	adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
6356	adddev->l2ad_spa = spa;
6357	adddev->l2ad_vdev = vd;
6358	adddev->l2ad_start = VDEV_LABEL_START_SIZE;
6359	adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
6360	adddev->l2ad_hand = adddev->l2ad_start;
6361	adddev->l2ad_first = B_TRUE;
6362	adddev->l2ad_writing = B_FALSE;
6363
6364	mutex_init(&adddev->l2ad_mtx, NULL, MUTEX_DEFAULT, NULL);
6365	/*
6366	 * This is a list of all ARC buffers that are still valid on the
6367	 * device.
6368	 */
6369	list_create(&adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
6370	    offsetof(arc_buf_hdr_t, b_l2hdr.b_l2node));
6371
6372	vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
6373	refcount_create(&adddev->l2ad_alloc);
6374
6375	/*
6376	 * Add device to global list
6377	 */
6378	mutex_enter(&l2arc_dev_mtx);
6379	list_insert_head(l2arc_dev_list, adddev);
6380	atomic_inc_64(&l2arc_ndev);
6381	mutex_exit(&l2arc_dev_mtx);
6382}
6383
6384/*
6385 * Remove a vdev from the L2ARC.
6386 */
6387void
6388l2arc_remove_vdev(vdev_t *vd)
6389{
6390	l2arc_dev_t *dev, *nextdev, *remdev = NULL;
6391
6392	/*
6393	 * Find the device by vdev
6394	 */
6395	mutex_enter(&l2arc_dev_mtx);
6396	for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) {
6397		nextdev = list_next(l2arc_dev_list, dev);
6398		if (vd == dev->l2ad_vdev) {
6399			remdev = dev;
6400			break;
6401		}
6402	}
6403	ASSERT(remdev != NULL);
6404
6405	/*
6406	 * Remove device from global list
6407	 */
6408	list_remove(l2arc_dev_list, remdev);
6409	l2arc_dev_last = NULL;		/* may have been invalidated */
6410	atomic_dec_64(&l2arc_ndev);
6411	mutex_exit(&l2arc_dev_mtx);
6412
6413	/*
6414	 * Clear all buflists and ARC references.  L2ARC device flush.
6415	 */
6416	l2arc_evict(remdev, 0, B_TRUE);
6417	list_destroy(&remdev->l2ad_buflist);
6418	mutex_destroy(&remdev->l2ad_mtx);
6419	refcount_destroy(&remdev->l2ad_alloc);
6420	kmem_free(remdev, sizeof (l2arc_dev_t));
6421}
6422
6423void
6424l2arc_init(void)
6425{
6426	l2arc_thread_exit = 0;
6427	l2arc_ndev = 0;
6428	l2arc_writes_sent = 0;
6429	l2arc_writes_done = 0;
6430
6431	mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
6432	cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
6433	mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
6434	mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
6435
6436	l2arc_dev_list = &L2ARC_dev_list;
6437	l2arc_free_on_write = &L2ARC_free_on_write;
6438	list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
6439	    offsetof(l2arc_dev_t, l2ad_node));
6440	list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
6441	    offsetof(l2arc_data_free_t, l2df_list_node));
6442}
6443
6444void
6445l2arc_fini(void)
6446{
6447	/*
6448	 * This is called from dmu_fini(), which is called from spa_fini();
6449	 * Because of this, we can assume that all l2arc devices have
6450	 * already been removed when the pools themselves were removed.
6451	 */
6452
6453	l2arc_do_free_on_write();
6454
6455	mutex_destroy(&l2arc_feed_thr_lock);
6456	cv_destroy(&l2arc_feed_thr_cv);
6457	mutex_destroy(&l2arc_dev_mtx);
6458	mutex_destroy(&l2arc_free_on_write_mtx);
6459
6460	list_destroy(l2arc_dev_list);
6461	list_destroy(l2arc_free_on_write);
6462}
6463
6464void
6465l2arc_start(void)
6466{
6467	if (!(spa_mode_global & FWRITE))
6468		return;
6469
6470	(void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
6471	    TS_RUN, minclsyspri);
6472}
6473
6474void
6475l2arc_stop(void)
6476{
6477	if (!(spa_mode_global & FWRITE))
6478		return;
6479
6480	mutex_enter(&l2arc_feed_thr_lock);
6481	cv_signal(&l2arc_feed_thr_cv);	/* kick thread out of startup */
6482	l2arc_thread_exit = 1;
6483	while (l2arc_thread_exit != 0)
6484		cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
6485	mutex_exit(&l2arc_feed_thr_lock);
6486}
6487