arc.c revision 273191
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
24 * Copyright (c) 2014 by Saso Kiselkov. All rights reserved.
25 * Copyright 2014 Nexenta Systems, Inc.  All rights reserved.
26 */
27
28/*
29 * DVA-based Adjustable Replacement Cache
30 *
31 * While much of the theory of operation used here is
32 * based on the self-tuning, low overhead replacement cache
33 * presented by Megiddo and Modha at FAST 2003, there are some
34 * significant differences:
35 *
36 * 1. The Megiddo and Modha model assumes any page is evictable.
37 * Pages in its cache cannot be "locked" into memory.  This makes
38 * the eviction algorithm simple: evict the last page in the list.
39 * This also make the performance characteristics easy to reason
40 * about.  Our cache is not so simple.  At any given moment, some
41 * subset of the blocks in the cache are un-evictable because we
42 * have handed out a reference to them.  Blocks are only evictable
43 * when there are no external references active.  This makes
44 * eviction far more problematic:  we choose to evict the evictable
45 * blocks that are the "lowest" in the list.
46 *
47 * There are times when it is not possible to evict the requested
48 * space.  In these circumstances we are unable to adjust the cache
49 * size.  To prevent the cache growing unbounded at these times we
50 * implement a "cache throttle" that slows the flow of new data
51 * into the cache until we can make space available.
52 *
53 * 2. The Megiddo and Modha model assumes a fixed cache size.
54 * Pages are evicted when the cache is full and there is a cache
55 * miss.  Our model has a variable sized cache.  It grows with
56 * high use, but also tries to react to memory pressure from the
57 * operating system: decreasing its size when system memory is
58 * tight.
59 *
60 * 3. The Megiddo and Modha model assumes a fixed page size. All
61 * elements of the cache are therefore exactly the same size.  So
62 * when adjusting the cache size following a cache miss, its simply
63 * a matter of choosing a single page to evict.  In our model, we
64 * have variable sized cache blocks (rangeing from 512 bytes to
65 * 128K bytes).  We therefore choose a set of blocks to evict to make
66 * space for a cache miss that approximates as closely as possible
67 * the space used by the new block.
68 *
69 * See also:  "ARC: A Self-Tuning, Low Overhead Replacement Cache"
70 * by N. Megiddo & D. Modha, FAST 2003
71 */
72
73/*
74 * The locking model:
75 *
76 * A new reference to a cache buffer can be obtained in two
77 * ways: 1) via a hash table lookup using the DVA as a key,
78 * or 2) via one of the ARC lists.  The arc_read() interface
79 * uses method 1, while the internal arc algorithms for
80 * adjusting the cache use method 2.  We therefore provide two
81 * types of locks: 1) the hash table lock array, and 2) the
82 * arc list locks.
83 *
84 * Buffers do not have their own mutexs, rather they rely on the
85 * hash table mutexs for the bulk of their protection (i.e. most
86 * fields in the arc_buf_hdr_t are protected by these mutexs).
87 *
88 * buf_hash_find() returns the appropriate mutex (held) when it
89 * locates the requested buffer in the hash table.  It returns
90 * NULL for the mutex if the buffer was not in the table.
91 *
92 * buf_hash_remove() expects the appropriate hash mutex to be
93 * already held before it is invoked.
94 *
95 * Each arc state also has a mutex which is used to protect the
96 * buffer list associated with the state.  When attempting to
97 * obtain a hash table lock while holding an arc list lock you
98 * must use: mutex_tryenter() to avoid deadlock.  Also note that
99 * the active state mutex must be held before the ghost state mutex.
100 *
101 * Arc buffers may have an associated eviction callback function.
102 * This function will be invoked prior to removing the buffer (e.g.
103 * in arc_do_user_evicts()).  Note however that the data associated
104 * with the buffer may be evicted prior to the callback.  The callback
105 * must be made with *no locks held* (to prevent deadlock).  Additionally,
106 * the users of callbacks must ensure that their private data is
107 * protected from simultaneous callbacks from arc_clear_callback()
108 * and arc_do_user_evicts().
109 *
110 * Note that the majority of the performance stats are manipulated
111 * with atomic operations.
112 *
113 * The L2ARC uses the l2arc_buflist_mtx global mutex for the following:
114 *
115 *	- L2ARC buflist creation
116 *	- L2ARC buflist eviction
117 *	- L2ARC write completion, which walks L2ARC buflists
118 *	- ARC header destruction, as it removes from L2ARC buflists
119 *	- ARC header release, as it removes from L2ARC buflists
120 */
121
122#include <sys/spa.h>
123#include <sys/zio.h>
124#include <sys/zio_compress.h>
125#include <sys/zfs_context.h>
126#include <sys/arc.h>
127#include <sys/refcount.h>
128#include <sys/vdev.h>
129#include <sys/vdev_impl.h>
130#include <sys/dsl_pool.h>
131#ifdef _KERNEL
132#include <sys/dnlc.h>
133#endif
134#include <sys/callb.h>
135#include <sys/kstat.h>
136#include <sys/trim_map.h>
137#include <zfs_fletcher.h>
138#include <sys/sdt.h>
139
140#include <vm/vm_pageout.h>
141#include <machine/vmparam.h>
142
143#ifdef illumos
144#ifndef _KERNEL
145/* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
146boolean_t arc_watch = B_FALSE;
147int arc_procfd;
148#endif
149#endif /* illumos */
150
151static kmutex_t		arc_reclaim_thr_lock;
152static kcondvar_t	arc_reclaim_thr_cv;	/* used to signal reclaim thr */
153static uint8_t		arc_thread_exit;
154
155#define	ARC_REDUCE_DNLC_PERCENT	3
156uint_t arc_reduce_dnlc_percent = ARC_REDUCE_DNLC_PERCENT;
157
158typedef enum arc_reclaim_strategy {
159	ARC_RECLAIM_AGGR,		/* Aggressive reclaim strategy */
160	ARC_RECLAIM_CONS		/* Conservative reclaim strategy */
161} arc_reclaim_strategy_t;
162
163/*
164 * The number of iterations through arc_evict_*() before we
165 * drop & reacquire the lock.
166 */
167int arc_evict_iterations = 100;
168
169/* number of seconds before growing cache again */
170static int		arc_grow_retry = 60;
171
172/* shift of arc_c for calculating both min and max arc_p */
173static int		arc_p_min_shift = 4;
174
175/* log2(fraction of arc to reclaim) */
176static int		arc_shrink_shift = 5;
177
178/*
179 * minimum lifespan of a prefetch block in clock ticks
180 * (initialized in arc_init())
181 */
182static int		arc_min_prefetch_lifespan;
183
184/*
185 * If this percent of memory is free, don't throttle.
186 */
187int arc_lotsfree_percent = 10;
188
189static int arc_dead;
190extern int zfs_prefetch_disable;
191
192/*
193 * The arc has filled available memory and has now warmed up.
194 */
195static boolean_t arc_warm;
196
197uint64_t zfs_arc_max;
198uint64_t zfs_arc_min;
199uint64_t zfs_arc_meta_limit = 0;
200int zfs_arc_grow_retry = 0;
201int zfs_arc_shrink_shift = 0;
202int zfs_arc_p_min_shift = 0;
203int zfs_disable_dup_eviction = 0;
204uint64_t zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
205u_int zfs_arc_free_target = 0;
206
207static int sysctl_vfs_zfs_arc_free_target(SYSCTL_HANDLER_ARGS);
208
209#ifdef _KERNEL
210static void
211arc_free_target_init(void *unused __unused)
212{
213
214	zfs_arc_free_target = vm_pageout_wakeup_thresh;
215}
216SYSINIT(arc_free_target_init, SI_SUB_KTHREAD_PAGE, SI_ORDER_ANY,
217    arc_free_target_init, NULL);
218
219TUNABLE_QUAD("vfs.zfs.arc_max", &zfs_arc_max);
220TUNABLE_QUAD("vfs.zfs.arc_min", &zfs_arc_min);
221TUNABLE_QUAD("vfs.zfs.arc_meta_limit", &zfs_arc_meta_limit);
222TUNABLE_QUAD("vfs.zfs.arc_average_blocksize", &zfs_arc_average_blocksize);
223SYSCTL_DECL(_vfs_zfs);
224SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_max, CTLFLAG_RDTUN, &zfs_arc_max, 0,
225    "Maximum ARC size");
226SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_min, CTLFLAG_RDTUN, &zfs_arc_min, 0,
227    "Minimum ARC size");
228SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_average_blocksize, CTLFLAG_RDTUN,
229    &zfs_arc_average_blocksize, 0,
230    "ARC average blocksize");
231/*
232 * We don't have a tunable for arc_free_target due to the dependency on
233 * pagedaemon initialisation.
234 */
235SYSCTL_PROC(_vfs_zfs, OID_AUTO, arc_free_target,
236    CTLTYPE_UINT | CTLFLAG_MPSAFE | CTLFLAG_RW, 0, sizeof(u_int),
237    sysctl_vfs_zfs_arc_free_target, "IU",
238    "Desired number of free pages below which ARC triggers reclaim");
239
240static int
241sysctl_vfs_zfs_arc_free_target(SYSCTL_HANDLER_ARGS)
242{
243	u_int val;
244	int err;
245
246	val = zfs_arc_free_target;
247	err = sysctl_handle_int(oidp, &val, 0, req);
248	if (err != 0 || req->newptr == NULL)
249		return (err);
250
251	if (val < minfree)
252		return (EINVAL);
253	if (val > cnt.v_page_count)
254		return (EINVAL);
255
256	zfs_arc_free_target = val;
257
258	return (0);
259}
260#endif
261
262/*
263 * Note that buffers can be in one of 6 states:
264 *	ARC_anon	- anonymous (discussed below)
265 *	ARC_mru		- recently used, currently cached
266 *	ARC_mru_ghost	- recentely used, no longer in cache
267 *	ARC_mfu		- frequently used, currently cached
268 *	ARC_mfu_ghost	- frequently used, no longer in cache
269 *	ARC_l2c_only	- exists in L2ARC but not other states
270 * When there are no active references to the buffer, they are
271 * are linked onto a list in one of these arc states.  These are
272 * the only buffers that can be evicted or deleted.  Within each
273 * state there are multiple lists, one for meta-data and one for
274 * non-meta-data.  Meta-data (indirect blocks, blocks of dnodes,
275 * etc.) is tracked separately so that it can be managed more
276 * explicitly: favored over data, limited explicitly.
277 *
278 * Anonymous buffers are buffers that are not associated with
279 * a DVA.  These are buffers that hold dirty block copies
280 * before they are written to stable storage.  By definition,
281 * they are "ref'd" and are considered part of arc_mru
282 * that cannot be freed.  Generally, they will aquire a DVA
283 * as they are written and migrate onto the arc_mru list.
284 *
285 * The ARC_l2c_only state is for buffers that are in the second
286 * level ARC but no longer in any of the ARC_m* lists.  The second
287 * level ARC itself may also contain buffers that are in any of
288 * the ARC_m* states - meaning that a buffer can exist in two
289 * places.  The reason for the ARC_l2c_only state is to keep the
290 * buffer header in the hash table, so that reads that hit the
291 * second level ARC benefit from these fast lookups.
292 */
293
294#define	ARCS_LOCK_PAD		CACHE_LINE_SIZE
295struct arcs_lock {
296	kmutex_t	arcs_lock;
297#ifdef _KERNEL
298	unsigned char	pad[(ARCS_LOCK_PAD - sizeof (kmutex_t))];
299#endif
300};
301
302/*
303 * must be power of two for mask use to work
304 *
305 */
306#define ARC_BUFC_NUMDATALISTS		16
307#define ARC_BUFC_NUMMETADATALISTS	16
308#define ARC_BUFC_NUMLISTS	(ARC_BUFC_NUMMETADATALISTS + ARC_BUFC_NUMDATALISTS)
309
310typedef struct arc_state {
311	uint64_t arcs_lsize[ARC_BUFC_NUMTYPES];	/* amount of evictable data */
312	uint64_t arcs_size;	/* total amount of data in this state */
313	list_t	arcs_lists[ARC_BUFC_NUMLISTS]; /* list of evictable buffers */
314	struct arcs_lock arcs_locks[ARC_BUFC_NUMLISTS] __aligned(CACHE_LINE_SIZE);
315} arc_state_t;
316
317#define ARCS_LOCK(s, i)	(&((s)->arcs_locks[(i)].arcs_lock))
318
319/* The 6 states: */
320static arc_state_t ARC_anon;
321static arc_state_t ARC_mru;
322static arc_state_t ARC_mru_ghost;
323static arc_state_t ARC_mfu;
324static arc_state_t ARC_mfu_ghost;
325static arc_state_t ARC_l2c_only;
326
327typedef struct arc_stats {
328	kstat_named_t arcstat_hits;
329	kstat_named_t arcstat_misses;
330	kstat_named_t arcstat_demand_data_hits;
331	kstat_named_t arcstat_demand_data_misses;
332	kstat_named_t arcstat_demand_metadata_hits;
333	kstat_named_t arcstat_demand_metadata_misses;
334	kstat_named_t arcstat_prefetch_data_hits;
335	kstat_named_t arcstat_prefetch_data_misses;
336	kstat_named_t arcstat_prefetch_metadata_hits;
337	kstat_named_t arcstat_prefetch_metadata_misses;
338	kstat_named_t arcstat_mru_hits;
339	kstat_named_t arcstat_mru_ghost_hits;
340	kstat_named_t arcstat_mfu_hits;
341	kstat_named_t arcstat_mfu_ghost_hits;
342	kstat_named_t arcstat_allocated;
343	kstat_named_t arcstat_deleted;
344	kstat_named_t arcstat_stolen;
345	kstat_named_t arcstat_recycle_miss;
346	/*
347	 * Number of buffers that could not be evicted because the hash lock
348	 * was held by another thread.  The lock may not necessarily be held
349	 * by something using the same buffer, since hash locks are shared
350	 * by multiple buffers.
351	 */
352	kstat_named_t arcstat_mutex_miss;
353	/*
354	 * Number of buffers skipped because they have I/O in progress, are
355	 * indrect prefetch buffers that have not lived long enough, or are
356	 * not from the spa we're trying to evict from.
357	 */
358	kstat_named_t arcstat_evict_skip;
359	kstat_named_t arcstat_evict_l2_cached;
360	kstat_named_t arcstat_evict_l2_eligible;
361	kstat_named_t arcstat_evict_l2_ineligible;
362	kstat_named_t arcstat_hash_elements;
363	kstat_named_t arcstat_hash_elements_max;
364	kstat_named_t arcstat_hash_collisions;
365	kstat_named_t arcstat_hash_chains;
366	kstat_named_t arcstat_hash_chain_max;
367	kstat_named_t arcstat_p;
368	kstat_named_t arcstat_c;
369	kstat_named_t arcstat_c_min;
370	kstat_named_t arcstat_c_max;
371	kstat_named_t arcstat_size;
372	kstat_named_t arcstat_hdr_size;
373	kstat_named_t arcstat_data_size;
374	kstat_named_t arcstat_other_size;
375	kstat_named_t arcstat_l2_hits;
376	kstat_named_t arcstat_l2_misses;
377	kstat_named_t arcstat_l2_feeds;
378	kstat_named_t arcstat_l2_rw_clash;
379	kstat_named_t arcstat_l2_read_bytes;
380	kstat_named_t arcstat_l2_write_bytes;
381	kstat_named_t arcstat_l2_writes_sent;
382	kstat_named_t arcstat_l2_writes_done;
383	kstat_named_t arcstat_l2_writes_error;
384	kstat_named_t arcstat_l2_writes_hdr_miss;
385	kstat_named_t arcstat_l2_evict_lock_retry;
386	kstat_named_t arcstat_l2_evict_reading;
387	kstat_named_t arcstat_l2_free_on_write;
388	kstat_named_t arcstat_l2_abort_lowmem;
389	kstat_named_t arcstat_l2_cksum_bad;
390	kstat_named_t arcstat_l2_io_error;
391	kstat_named_t arcstat_l2_size;
392	kstat_named_t arcstat_l2_asize;
393	kstat_named_t arcstat_l2_hdr_size;
394	kstat_named_t arcstat_l2_compress_successes;
395	kstat_named_t arcstat_l2_compress_zeros;
396	kstat_named_t arcstat_l2_compress_failures;
397	kstat_named_t arcstat_l2_write_trylock_fail;
398	kstat_named_t arcstat_l2_write_passed_headroom;
399	kstat_named_t arcstat_l2_write_spa_mismatch;
400	kstat_named_t arcstat_l2_write_in_l2;
401	kstat_named_t arcstat_l2_write_hdr_io_in_progress;
402	kstat_named_t arcstat_l2_write_not_cacheable;
403	kstat_named_t arcstat_l2_write_full;
404	kstat_named_t arcstat_l2_write_buffer_iter;
405	kstat_named_t arcstat_l2_write_pios;
406	kstat_named_t arcstat_l2_write_buffer_bytes_scanned;
407	kstat_named_t arcstat_l2_write_buffer_list_iter;
408	kstat_named_t arcstat_l2_write_buffer_list_null_iter;
409	kstat_named_t arcstat_memory_throttle_count;
410	kstat_named_t arcstat_duplicate_buffers;
411	kstat_named_t arcstat_duplicate_buffers_size;
412	kstat_named_t arcstat_duplicate_reads;
413} arc_stats_t;
414
415static arc_stats_t arc_stats = {
416	{ "hits",			KSTAT_DATA_UINT64 },
417	{ "misses",			KSTAT_DATA_UINT64 },
418	{ "demand_data_hits",		KSTAT_DATA_UINT64 },
419	{ "demand_data_misses",		KSTAT_DATA_UINT64 },
420	{ "demand_metadata_hits",	KSTAT_DATA_UINT64 },
421	{ "demand_metadata_misses",	KSTAT_DATA_UINT64 },
422	{ "prefetch_data_hits",		KSTAT_DATA_UINT64 },
423	{ "prefetch_data_misses",	KSTAT_DATA_UINT64 },
424	{ "prefetch_metadata_hits",	KSTAT_DATA_UINT64 },
425	{ "prefetch_metadata_misses",	KSTAT_DATA_UINT64 },
426	{ "mru_hits",			KSTAT_DATA_UINT64 },
427	{ "mru_ghost_hits",		KSTAT_DATA_UINT64 },
428	{ "mfu_hits",			KSTAT_DATA_UINT64 },
429	{ "mfu_ghost_hits",		KSTAT_DATA_UINT64 },
430	{ "allocated",			KSTAT_DATA_UINT64 },
431	{ "deleted",			KSTAT_DATA_UINT64 },
432	{ "stolen",			KSTAT_DATA_UINT64 },
433	{ "recycle_miss",		KSTAT_DATA_UINT64 },
434	{ "mutex_miss",			KSTAT_DATA_UINT64 },
435	{ "evict_skip",			KSTAT_DATA_UINT64 },
436	{ "evict_l2_cached",		KSTAT_DATA_UINT64 },
437	{ "evict_l2_eligible",		KSTAT_DATA_UINT64 },
438	{ "evict_l2_ineligible",	KSTAT_DATA_UINT64 },
439	{ "hash_elements",		KSTAT_DATA_UINT64 },
440	{ "hash_elements_max",		KSTAT_DATA_UINT64 },
441	{ "hash_collisions",		KSTAT_DATA_UINT64 },
442	{ "hash_chains",		KSTAT_DATA_UINT64 },
443	{ "hash_chain_max",		KSTAT_DATA_UINT64 },
444	{ "p",				KSTAT_DATA_UINT64 },
445	{ "c",				KSTAT_DATA_UINT64 },
446	{ "c_min",			KSTAT_DATA_UINT64 },
447	{ "c_max",			KSTAT_DATA_UINT64 },
448	{ "size",			KSTAT_DATA_UINT64 },
449	{ "hdr_size",			KSTAT_DATA_UINT64 },
450	{ "data_size",			KSTAT_DATA_UINT64 },
451	{ "other_size",			KSTAT_DATA_UINT64 },
452	{ "l2_hits",			KSTAT_DATA_UINT64 },
453	{ "l2_misses",			KSTAT_DATA_UINT64 },
454	{ "l2_feeds",			KSTAT_DATA_UINT64 },
455	{ "l2_rw_clash",		KSTAT_DATA_UINT64 },
456	{ "l2_read_bytes",		KSTAT_DATA_UINT64 },
457	{ "l2_write_bytes",		KSTAT_DATA_UINT64 },
458	{ "l2_writes_sent",		KSTAT_DATA_UINT64 },
459	{ "l2_writes_done",		KSTAT_DATA_UINT64 },
460	{ "l2_writes_error",		KSTAT_DATA_UINT64 },
461	{ "l2_writes_hdr_miss",		KSTAT_DATA_UINT64 },
462	{ "l2_evict_lock_retry",	KSTAT_DATA_UINT64 },
463	{ "l2_evict_reading",		KSTAT_DATA_UINT64 },
464	{ "l2_free_on_write",		KSTAT_DATA_UINT64 },
465	{ "l2_abort_lowmem",		KSTAT_DATA_UINT64 },
466	{ "l2_cksum_bad",		KSTAT_DATA_UINT64 },
467	{ "l2_io_error",		KSTAT_DATA_UINT64 },
468	{ "l2_size",			KSTAT_DATA_UINT64 },
469	{ "l2_asize",			KSTAT_DATA_UINT64 },
470	{ "l2_hdr_size",		KSTAT_DATA_UINT64 },
471	{ "l2_compress_successes",	KSTAT_DATA_UINT64 },
472	{ "l2_compress_zeros",		KSTAT_DATA_UINT64 },
473	{ "l2_compress_failures",	KSTAT_DATA_UINT64 },
474	{ "l2_write_trylock_fail",	KSTAT_DATA_UINT64 },
475	{ "l2_write_passed_headroom",	KSTAT_DATA_UINT64 },
476	{ "l2_write_spa_mismatch",	KSTAT_DATA_UINT64 },
477	{ "l2_write_in_l2",		KSTAT_DATA_UINT64 },
478	{ "l2_write_io_in_progress",	KSTAT_DATA_UINT64 },
479	{ "l2_write_not_cacheable",	KSTAT_DATA_UINT64 },
480	{ "l2_write_full",		KSTAT_DATA_UINT64 },
481	{ "l2_write_buffer_iter",	KSTAT_DATA_UINT64 },
482	{ "l2_write_pios",		KSTAT_DATA_UINT64 },
483	{ "l2_write_buffer_bytes_scanned", KSTAT_DATA_UINT64 },
484	{ "l2_write_buffer_list_iter",	KSTAT_DATA_UINT64 },
485	{ "l2_write_buffer_list_null_iter", KSTAT_DATA_UINT64 },
486	{ "memory_throttle_count",	KSTAT_DATA_UINT64 },
487	{ "duplicate_buffers",		KSTAT_DATA_UINT64 },
488	{ "duplicate_buffers_size",	KSTAT_DATA_UINT64 },
489	{ "duplicate_reads",		KSTAT_DATA_UINT64 }
490};
491
492#define	ARCSTAT(stat)	(arc_stats.stat.value.ui64)
493
494#define	ARCSTAT_INCR(stat, val) \
495	atomic_add_64(&arc_stats.stat.value.ui64, (val))
496
497#define	ARCSTAT_BUMP(stat)	ARCSTAT_INCR(stat, 1)
498#define	ARCSTAT_BUMPDOWN(stat)	ARCSTAT_INCR(stat, -1)
499
500#define	ARCSTAT_MAX(stat, val) {					\
501	uint64_t m;							\
502	while ((val) > (m = arc_stats.stat.value.ui64) &&		\
503	    (m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val))))	\
504		continue;						\
505}
506
507#define	ARCSTAT_MAXSTAT(stat) \
508	ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
509
510/*
511 * We define a macro to allow ARC hits/misses to be easily broken down by
512 * two separate conditions, giving a total of four different subtypes for
513 * each of hits and misses (so eight statistics total).
514 */
515#define	ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
516	if (cond1) {							\
517		if (cond2) {						\
518			ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
519		} else {						\
520			ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
521		}							\
522	} else {							\
523		if (cond2) {						\
524			ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
525		} else {						\
526			ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
527		}							\
528	}
529
530kstat_t			*arc_ksp;
531static arc_state_t	*arc_anon;
532static arc_state_t	*arc_mru;
533static arc_state_t	*arc_mru_ghost;
534static arc_state_t	*arc_mfu;
535static arc_state_t	*arc_mfu_ghost;
536static arc_state_t	*arc_l2c_only;
537
538/*
539 * There are several ARC variables that are critical to export as kstats --
540 * but we don't want to have to grovel around in the kstat whenever we wish to
541 * manipulate them.  For these variables, we therefore define them to be in
542 * terms of the statistic variable.  This assures that we are not introducing
543 * the possibility of inconsistency by having shadow copies of the variables,
544 * while still allowing the code to be readable.
545 */
546#define	arc_size	ARCSTAT(arcstat_size)	/* actual total arc size */
547#define	arc_p		ARCSTAT(arcstat_p)	/* target size of MRU */
548#define	arc_c		ARCSTAT(arcstat_c)	/* target size of cache */
549#define	arc_c_min	ARCSTAT(arcstat_c_min)	/* min target cache size */
550#define	arc_c_max	ARCSTAT(arcstat_c_max)	/* max target cache size */
551
552#define	L2ARC_IS_VALID_COMPRESS(_c_) \
553	((_c_) == ZIO_COMPRESS_LZ4 || (_c_) == ZIO_COMPRESS_EMPTY)
554
555static int		arc_no_grow;	/* Don't try to grow cache size */
556static uint64_t		arc_tempreserve;
557static uint64_t		arc_loaned_bytes;
558static uint64_t		arc_meta_used;
559static uint64_t		arc_meta_limit;
560static uint64_t		arc_meta_max = 0;
561SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_meta_used, CTLFLAG_RD, &arc_meta_used, 0,
562    "ARC metadata used");
563SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_meta_limit, CTLFLAG_RW, &arc_meta_limit, 0,
564    "ARC metadata limit");
565
566typedef struct l2arc_buf_hdr l2arc_buf_hdr_t;
567
568typedef struct arc_callback arc_callback_t;
569
570struct arc_callback {
571	void			*acb_private;
572	arc_done_func_t		*acb_done;
573	arc_buf_t		*acb_buf;
574	zio_t			*acb_zio_dummy;
575	arc_callback_t		*acb_next;
576};
577
578typedef struct arc_write_callback arc_write_callback_t;
579
580struct arc_write_callback {
581	void		*awcb_private;
582	arc_done_func_t	*awcb_ready;
583	arc_done_func_t	*awcb_physdone;
584	arc_done_func_t	*awcb_done;
585	arc_buf_t	*awcb_buf;
586};
587
588struct arc_buf_hdr {
589	/* protected by hash lock */
590	dva_t			b_dva;
591	uint64_t		b_birth;
592	uint64_t		b_cksum0;
593
594	kmutex_t		b_freeze_lock;
595	zio_cksum_t		*b_freeze_cksum;
596	void			*b_thawed;
597
598	arc_buf_hdr_t		*b_hash_next;
599	arc_buf_t		*b_buf;
600	uint32_t		b_flags;
601	uint32_t		b_datacnt;
602
603	arc_callback_t		*b_acb;
604	kcondvar_t		b_cv;
605
606	/* immutable */
607	arc_buf_contents_t	b_type;
608	uint64_t		b_size;
609	uint64_t		b_spa;
610
611	/* protected by arc state mutex */
612	arc_state_t		*b_state;
613	list_node_t		b_arc_node;
614
615	/* updated atomically */
616	clock_t			b_arc_access;
617
618	/* self protecting */
619	refcount_t		b_refcnt;
620
621	l2arc_buf_hdr_t		*b_l2hdr;
622	list_node_t		b_l2node;
623};
624
625static arc_buf_t *arc_eviction_list;
626static kmutex_t arc_eviction_mtx;
627static arc_buf_hdr_t arc_eviction_hdr;
628static void arc_get_data_buf(arc_buf_t *buf);
629static void arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock);
630static int arc_evict_needed(arc_buf_contents_t type);
631static void arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes);
632#ifdef illumos
633static void arc_buf_watch(arc_buf_t *buf);
634#endif /* illumos */
635
636static boolean_t l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab);
637
638#define	GHOST_STATE(state)	\
639	((state) == arc_mru_ghost || (state) == arc_mfu_ghost ||	\
640	(state) == arc_l2c_only)
641
642/*
643 * Private ARC flags.  These flags are private ARC only flags that will show up
644 * in b_flags in the arc_hdr_buf_t.  Some flags are publicly declared, and can
645 * be passed in as arc_flags in things like arc_read.  However, these flags
646 * should never be passed and should only be set by ARC code.  When adding new
647 * public flags, make sure not to smash the private ones.
648 */
649
650#define	ARC_IN_HASH_TABLE	(1 << 9)	/* this buffer is hashed */
651#define	ARC_IO_IN_PROGRESS	(1 << 10)	/* I/O in progress for buf */
652#define	ARC_IO_ERROR		(1 << 11)	/* I/O failed for buf */
653#define	ARC_FREED_IN_READ	(1 << 12)	/* buf freed while in read */
654#define	ARC_BUF_AVAILABLE	(1 << 13)	/* block not in active use */
655#define	ARC_INDIRECT		(1 << 14)	/* this is an indirect block */
656#define	ARC_FREE_IN_PROGRESS	(1 << 15)	/* hdr about to be freed */
657#define	ARC_L2_WRITING		(1 << 16)	/* L2ARC write in progress */
658#define	ARC_L2_EVICTED		(1 << 17)	/* evicted during I/O */
659#define	ARC_L2_WRITE_HEAD	(1 << 18)	/* head of write list */
660
661#define	HDR_IN_HASH_TABLE(hdr)	((hdr)->b_flags & ARC_IN_HASH_TABLE)
662#define	HDR_IO_IN_PROGRESS(hdr)	((hdr)->b_flags & ARC_IO_IN_PROGRESS)
663#define	HDR_IO_ERROR(hdr)	((hdr)->b_flags & ARC_IO_ERROR)
664#define	HDR_PREFETCH(hdr)	((hdr)->b_flags & ARC_PREFETCH)
665#define	HDR_FREED_IN_READ(hdr)	((hdr)->b_flags & ARC_FREED_IN_READ)
666#define	HDR_BUF_AVAILABLE(hdr)	((hdr)->b_flags & ARC_BUF_AVAILABLE)
667#define	HDR_FREE_IN_PROGRESS(hdr)	((hdr)->b_flags & ARC_FREE_IN_PROGRESS)
668#define	HDR_L2CACHE(hdr)	((hdr)->b_flags & ARC_L2CACHE)
669#define	HDR_L2_READING(hdr)	((hdr)->b_flags & ARC_IO_IN_PROGRESS &&	\
670				    (hdr)->b_l2hdr != NULL)
671#define	HDR_L2_WRITING(hdr)	((hdr)->b_flags & ARC_L2_WRITING)
672#define	HDR_L2_EVICTED(hdr)	((hdr)->b_flags & ARC_L2_EVICTED)
673#define	HDR_L2_WRITE_HEAD(hdr)	((hdr)->b_flags & ARC_L2_WRITE_HEAD)
674
675/*
676 * Other sizes
677 */
678
679#define	HDR_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
680#define	L2HDR_SIZE ((int64_t)sizeof (l2arc_buf_hdr_t))
681
682/*
683 * Hash table routines
684 */
685
686#define	HT_LOCK_PAD	CACHE_LINE_SIZE
687
688struct ht_lock {
689	kmutex_t	ht_lock;
690#ifdef _KERNEL
691	unsigned char	pad[(HT_LOCK_PAD - sizeof (kmutex_t))];
692#endif
693};
694
695#define	BUF_LOCKS 256
696typedef struct buf_hash_table {
697	uint64_t ht_mask;
698	arc_buf_hdr_t **ht_table;
699	struct ht_lock ht_locks[BUF_LOCKS] __aligned(CACHE_LINE_SIZE);
700} buf_hash_table_t;
701
702static buf_hash_table_t buf_hash_table;
703
704#define	BUF_HASH_INDEX(spa, dva, birth) \
705	(buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
706#define	BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
707#define	BUF_HASH_LOCK(idx)	(&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
708#define	HDR_LOCK(hdr) \
709	(BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
710
711uint64_t zfs_crc64_table[256];
712
713/*
714 * Level 2 ARC
715 */
716
717#define	L2ARC_WRITE_SIZE	(8 * 1024 * 1024)	/* initial write max */
718#define	L2ARC_HEADROOM		2			/* num of writes */
719/*
720 * If we discover during ARC scan any buffers to be compressed, we boost
721 * our headroom for the next scanning cycle by this percentage multiple.
722 */
723#define	L2ARC_HEADROOM_BOOST	200
724#define	L2ARC_FEED_SECS		1		/* caching interval secs */
725#define	L2ARC_FEED_MIN_MS	200		/* min caching interval ms */
726
727#define	l2arc_writes_sent	ARCSTAT(arcstat_l2_writes_sent)
728#define	l2arc_writes_done	ARCSTAT(arcstat_l2_writes_done)
729
730/* L2ARC Performance Tunables */
731uint64_t l2arc_write_max = L2ARC_WRITE_SIZE;	/* default max write size */
732uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE;	/* extra write during warmup */
733uint64_t l2arc_headroom = L2ARC_HEADROOM;	/* number of dev writes */
734uint64_t l2arc_headroom_boost = L2ARC_HEADROOM_BOOST;
735uint64_t l2arc_feed_secs = L2ARC_FEED_SECS;	/* interval seconds */
736uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS;	/* min interval milliseconds */
737boolean_t l2arc_noprefetch = B_TRUE;		/* don't cache prefetch bufs */
738boolean_t l2arc_feed_again = B_TRUE;		/* turbo warmup */
739boolean_t l2arc_norw = B_TRUE;			/* no reads during writes */
740
741SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max, CTLFLAG_RW,
742    &l2arc_write_max, 0, "max write size");
743SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_boost, CTLFLAG_RW,
744    &l2arc_write_boost, 0, "extra write during warmup");
745SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_headroom, CTLFLAG_RW,
746    &l2arc_headroom, 0, "number of dev writes");
747SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_secs, CTLFLAG_RW,
748    &l2arc_feed_secs, 0, "interval seconds");
749SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_feed_min_ms, CTLFLAG_RW,
750    &l2arc_feed_min_ms, 0, "min interval milliseconds");
751
752SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_noprefetch, CTLFLAG_RW,
753    &l2arc_noprefetch, 0, "don't cache prefetch bufs");
754SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_feed_again, CTLFLAG_RW,
755    &l2arc_feed_again, 0, "turbo warmup");
756SYSCTL_INT(_vfs_zfs, OID_AUTO, l2arc_norw, CTLFLAG_RW,
757    &l2arc_norw, 0, "no reads during writes");
758
759SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_size, CTLFLAG_RD,
760    &ARC_anon.arcs_size, 0, "size of anonymous state");
761SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_metadata_lsize, CTLFLAG_RD,
762    &ARC_anon.arcs_lsize[ARC_BUFC_METADATA], 0, "size of anonymous state");
763SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, anon_data_lsize, CTLFLAG_RD,
764    &ARC_anon.arcs_lsize[ARC_BUFC_DATA], 0, "size of anonymous state");
765
766SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_size, CTLFLAG_RD,
767    &ARC_mru.arcs_size, 0, "size of mru state");
768SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_metadata_lsize, CTLFLAG_RD,
769    &ARC_mru.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mru state");
770SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_data_lsize, CTLFLAG_RD,
771    &ARC_mru.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mru state");
772
773SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_size, CTLFLAG_RD,
774    &ARC_mru_ghost.arcs_size, 0, "size of mru ghost state");
775SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_metadata_lsize, CTLFLAG_RD,
776    &ARC_mru_ghost.arcs_lsize[ARC_BUFC_METADATA], 0,
777    "size of metadata in mru ghost state");
778SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mru_ghost_data_lsize, CTLFLAG_RD,
779    &ARC_mru_ghost.arcs_lsize[ARC_BUFC_DATA], 0,
780    "size of data in mru ghost state");
781
782SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_size, CTLFLAG_RD,
783    &ARC_mfu.arcs_size, 0, "size of mfu state");
784SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_metadata_lsize, CTLFLAG_RD,
785    &ARC_mfu.arcs_lsize[ARC_BUFC_METADATA], 0, "size of metadata in mfu state");
786SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_data_lsize, CTLFLAG_RD,
787    &ARC_mfu.arcs_lsize[ARC_BUFC_DATA], 0, "size of data in mfu state");
788
789SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_size, CTLFLAG_RD,
790    &ARC_mfu_ghost.arcs_size, 0, "size of mfu ghost state");
791SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_metadata_lsize, CTLFLAG_RD,
792    &ARC_mfu_ghost.arcs_lsize[ARC_BUFC_METADATA], 0,
793    "size of metadata in mfu ghost state");
794SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, mfu_ghost_data_lsize, CTLFLAG_RD,
795    &ARC_mfu_ghost.arcs_lsize[ARC_BUFC_DATA], 0,
796    "size of data in mfu ghost state");
797
798SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2c_only_size, CTLFLAG_RD,
799    &ARC_l2c_only.arcs_size, 0, "size of mru state");
800
801/*
802 * L2ARC Internals
803 */
804typedef struct l2arc_dev {
805	vdev_t			*l2ad_vdev;	/* vdev */
806	spa_t			*l2ad_spa;	/* spa */
807	uint64_t		l2ad_hand;	/* next write location */
808	uint64_t		l2ad_start;	/* first addr on device */
809	uint64_t		l2ad_end;	/* last addr on device */
810	uint64_t		l2ad_evict;	/* last addr eviction reached */
811	boolean_t		l2ad_first;	/* first sweep through */
812	boolean_t		l2ad_writing;	/* currently writing */
813	list_t			*l2ad_buflist;	/* buffer list */
814	list_node_t		l2ad_node;	/* device list node */
815} l2arc_dev_t;
816
817static list_t L2ARC_dev_list;			/* device list */
818static list_t *l2arc_dev_list;			/* device list pointer */
819static kmutex_t l2arc_dev_mtx;			/* device list mutex */
820static l2arc_dev_t *l2arc_dev_last;		/* last device used */
821static kmutex_t l2arc_buflist_mtx;		/* mutex for all buflists */
822static list_t L2ARC_free_on_write;		/* free after write buf list */
823static list_t *l2arc_free_on_write;		/* free after write list ptr */
824static kmutex_t l2arc_free_on_write_mtx;	/* mutex for list */
825static uint64_t l2arc_ndev;			/* number of devices */
826
827typedef struct l2arc_read_callback {
828	arc_buf_t		*l2rcb_buf;		/* read buffer */
829	spa_t			*l2rcb_spa;		/* spa */
830	blkptr_t		l2rcb_bp;		/* original blkptr */
831	zbookmark_phys_t	l2rcb_zb;		/* original bookmark */
832	int			l2rcb_flags;		/* original flags */
833	enum zio_compress	l2rcb_compress;		/* applied compress */
834} l2arc_read_callback_t;
835
836typedef struct l2arc_write_callback {
837	l2arc_dev_t	*l2wcb_dev;		/* device info */
838	arc_buf_hdr_t	*l2wcb_head;		/* head of write buflist */
839} l2arc_write_callback_t;
840
841struct l2arc_buf_hdr {
842	/* protected by arc_buf_hdr  mutex */
843	l2arc_dev_t		*b_dev;		/* L2ARC device */
844	uint64_t		b_daddr;	/* disk address, offset byte */
845	/* compression applied to buffer data */
846	enum zio_compress	b_compress;
847	/* real alloc'd buffer size depending on b_compress applied */
848	int			b_asize;
849	/* temporary buffer holder for in-flight compressed data */
850	void			*b_tmp_cdata;
851};
852
853typedef struct l2arc_data_free {
854	/* protected by l2arc_free_on_write_mtx */
855	void		*l2df_data;
856	size_t		l2df_size;
857	void		(*l2df_func)(void *, size_t);
858	list_node_t	l2df_list_node;
859} l2arc_data_free_t;
860
861static kmutex_t l2arc_feed_thr_lock;
862static kcondvar_t l2arc_feed_thr_cv;
863static uint8_t l2arc_thread_exit;
864
865static void l2arc_read_done(zio_t *zio);
866static void l2arc_hdr_stat_add(void);
867static void l2arc_hdr_stat_remove(void);
868
869static boolean_t l2arc_compress_buf(l2arc_buf_hdr_t *l2hdr);
870static void l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr,
871    enum zio_compress c);
872static void l2arc_release_cdata_buf(arc_buf_hdr_t *ab);
873
874static uint64_t
875buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
876{
877	uint8_t *vdva = (uint8_t *)dva;
878	uint64_t crc = -1ULL;
879	int i;
880
881	ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
882
883	for (i = 0; i < sizeof (dva_t); i++)
884		crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
885
886	crc ^= (spa>>8) ^ birth;
887
888	return (crc);
889}
890
891#define	BUF_EMPTY(buf)						\
892	((buf)->b_dva.dva_word[0] == 0 &&			\
893	(buf)->b_dva.dva_word[1] == 0 &&			\
894	(buf)->b_cksum0 == 0)
895
896#define	BUF_EQUAL(spa, dva, birth, buf)				\
897	((buf)->b_dva.dva_word[0] == (dva)->dva_word[0]) &&	\
898	((buf)->b_dva.dva_word[1] == (dva)->dva_word[1]) &&	\
899	((buf)->b_birth == birth) && ((buf)->b_spa == spa)
900
901static void
902buf_discard_identity(arc_buf_hdr_t *hdr)
903{
904	hdr->b_dva.dva_word[0] = 0;
905	hdr->b_dva.dva_word[1] = 0;
906	hdr->b_birth = 0;
907	hdr->b_cksum0 = 0;
908}
909
910static arc_buf_hdr_t *
911buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp)
912{
913	const dva_t *dva = BP_IDENTITY(bp);
914	uint64_t birth = BP_PHYSICAL_BIRTH(bp);
915	uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
916	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
917	arc_buf_hdr_t *buf;
918
919	mutex_enter(hash_lock);
920	for (buf = buf_hash_table.ht_table[idx]; buf != NULL;
921	    buf = buf->b_hash_next) {
922		if (BUF_EQUAL(spa, dva, birth, buf)) {
923			*lockp = hash_lock;
924			return (buf);
925		}
926	}
927	mutex_exit(hash_lock);
928	*lockp = NULL;
929	return (NULL);
930}
931
932/*
933 * Insert an entry into the hash table.  If there is already an element
934 * equal to elem in the hash table, then the already existing element
935 * will be returned and the new element will not be inserted.
936 * Otherwise returns NULL.
937 */
938static arc_buf_hdr_t *
939buf_hash_insert(arc_buf_hdr_t *buf, kmutex_t **lockp)
940{
941	uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
942	kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
943	arc_buf_hdr_t *fbuf;
944	uint32_t i;
945
946	ASSERT(!DVA_IS_EMPTY(&buf->b_dva));
947	ASSERT(buf->b_birth != 0);
948	ASSERT(!HDR_IN_HASH_TABLE(buf));
949	*lockp = hash_lock;
950	mutex_enter(hash_lock);
951	for (fbuf = buf_hash_table.ht_table[idx], i = 0; fbuf != NULL;
952	    fbuf = fbuf->b_hash_next, i++) {
953		if (BUF_EQUAL(buf->b_spa, &buf->b_dva, buf->b_birth, fbuf))
954			return (fbuf);
955	}
956
957	buf->b_hash_next = buf_hash_table.ht_table[idx];
958	buf_hash_table.ht_table[idx] = buf;
959	buf->b_flags |= ARC_IN_HASH_TABLE;
960
961	/* collect some hash table performance data */
962	if (i > 0) {
963		ARCSTAT_BUMP(arcstat_hash_collisions);
964		if (i == 1)
965			ARCSTAT_BUMP(arcstat_hash_chains);
966
967		ARCSTAT_MAX(arcstat_hash_chain_max, i);
968	}
969
970	ARCSTAT_BUMP(arcstat_hash_elements);
971	ARCSTAT_MAXSTAT(arcstat_hash_elements);
972
973	return (NULL);
974}
975
976static void
977buf_hash_remove(arc_buf_hdr_t *buf)
978{
979	arc_buf_hdr_t *fbuf, **bufp;
980	uint64_t idx = BUF_HASH_INDEX(buf->b_spa, &buf->b_dva, buf->b_birth);
981
982	ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
983	ASSERT(HDR_IN_HASH_TABLE(buf));
984
985	bufp = &buf_hash_table.ht_table[idx];
986	while ((fbuf = *bufp) != buf) {
987		ASSERT(fbuf != NULL);
988		bufp = &fbuf->b_hash_next;
989	}
990	*bufp = buf->b_hash_next;
991	buf->b_hash_next = NULL;
992	buf->b_flags &= ~ARC_IN_HASH_TABLE;
993
994	/* collect some hash table performance data */
995	ARCSTAT_BUMPDOWN(arcstat_hash_elements);
996
997	if (buf_hash_table.ht_table[idx] &&
998	    buf_hash_table.ht_table[idx]->b_hash_next == NULL)
999		ARCSTAT_BUMPDOWN(arcstat_hash_chains);
1000}
1001
1002/*
1003 * Global data structures and functions for the buf kmem cache.
1004 */
1005static kmem_cache_t *hdr_cache;
1006static kmem_cache_t *buf_cache;
1007
1008static void
1009buf_fini(void)
1010{
1011	int i;
1012
1013	kmem_free(buf_hash_table.ht_table,
1014	    (buf_hash_table.ht_mask + 1) * sizeof (void *));
1015	for (i = 0; i < BUF_LOCKS; i++)
1016		mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
1017	kmem_cache_destroy(hdr_cache);
1018	kmem_cache_destroy(buf_cache);
1019}
1020
1021/*
1022 * Constructor callback - called when the cache is empty
1023 * and a new buf is requested.
1024 */
1025/* ARGSUSED */
1026static int
1027hdr_cons(void *vbuf, void *unused, int kmflag)
1028{
1029	arc_buf_hdr_t *buf = vbuf;
1030
1031	bzero(buf, sizeof (arc_buf_hdr_t));
1032	refcount_create(&buf->b_refcnt);
1033	cv_init(&buf->b_cv, NULL, CV_DEFAULT, NULL);
1034	mutex_init(&buf->b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
1035	arc_space_consume(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
1036
1037	return (0);
1038}
1039
1040/* ARGSUSED */
1041static int
1042buf_cons(void *vbuf, void *unused, int kmflag)
1043{
1044	arc_buf_t *buf = vbuf;
1045
1046	bzero(buf, sizeof (arc_buf_t));
1047	mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL);
1048	arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
1049
1050	return (0);
1051}
1052
1053/*
1054 * Destructor callback - called when a cached buf is
1055 * no longer required.
1056 */
1057/* ARGSUSED */
1058static void
1059hdr_dest(void *vbuf, void *unused)
1060{
1061	arc_buf_hdr_t *buf = vbuf;
1062
1063	ASSERT(BUF_EMPTY(buf));
1064	refcount_destroy(&buf->b_refcnt);
1065	cv_destroy(&buf->b_cv);
1066	mutex_destroy(&buf->b_freeze_lock);
1067	arc_space_return(sizeof (arc_buf_hdr_t), ARC_SPACE_HDRS);
1068}
1069
1070/* ARGSUSED */
1071static void
1072buf_dest(void *vbuf, void *unused)
1073{
1074	arc_buf_t *buf = vbuf;
1075
1076	mutex_destroy(&buf->b_evict_lock);
1077	arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
1078}
1079
1080/*
1081 * Reclaim callback -- invoked when memory is low.
1082 */
1083/* ARGSUSED */
1084static void
1085hdr_recl(void *unused)
1086{
1087	dprintf("hdr_recl called\n");
1088	/*
1089	 * umem calls the reclaim func when we destroy the buf cache,
1090	 * which is after we do arc_fini().
1091	 */
1092	if (!arc_dead)
1093		cv_signal(&arc_reclaim_thr_cv);
1094}
1095
1096static void
1097buf_init(void)
1098{
1099	uint64_t *ct;
1100	uint64_t hsize = 1ULL << 12;
1101	int i, j;
1102
1103	/*
1104	 * The hash table is big enough to fill all of physical memory
1105	 * with an average block size of zfs_arc_average_blocksize (default 8K).
1106	 * By default, the table will take up
1107	 * totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
1108	 */
1109	while (hsize * zfs_arc_average_blocksize < (uint64_t)physmem * PAGESIZE)
1110		hsize <<= 1;
1111retry:
1112	buf_hash_table.ht_mask = hsize - 1;
1113	buf_hash_table.ht_table =
1114	    kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
1115	if (buf_hash_table.ht_table == NULL) {
1116		ASSERT(hsize > (1ULL << 8));
1117		hsize >>= 1;
1118		goto retry;
1119	}
1120
1121	hdr_cache = kmem_cache_create("arc_buf_hdr_t", sizeof (arc_buf_hdr_t),
1122	    0, hdr_cons, hdr_dest, hdr_recl, NULL, NULL, 0);
1123	buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
1124	    0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
1125
1126	for (i = 0; i < 256; i++)
1127		for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
1128			*ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
1129
1130	for (i = 0; i < BUF_LOCKS; i++) {
1131		mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
1132		    NULL, MUTEX_DEFAULT, NULL);
1133	}
1134}
1135
1136#define	ARC_MINTIME	(hz>>4) /* 62 ms */
1137
1138static void
1139arc_cksum_verify(arc_buf_t *buf)
1140{
1141	zio_cksum_t zc;
1142
1143	if (!(zfs_flags & ZFS_DEBUG_MODIFY))
1144		return;
1145
1146	mutex_enter(&buf->b_hdr->b_freeze_lock);
1147	if (buf->b_hdr->b_freeze_cksum == NULL ||
1148	    (buf->b_hdr->b_flags & ARC_IO_ERROR)) {
1149		mutex_exit(&buf->b_hdr->b_freeze_lock);
1150		return;
1151	}
1152	fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
1153	if (!ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc))
1154		panic("buffer modified while frozen!");
1155	mutex_exit(&buf->b_hdr->b_freeze_lock);
1156}
1157
1158static int
1159arc_cksum_equal(arc_buf_t *buf)
1160{
1161	zio_cksum_t zc;
1162	int equal;
1163
1164	mutex_enter(&buf->b_hdr->b_freeze_lock);
1165	fletcher_2_native(buf->b_data, buf->b_hdr->b_size, &zc);
1166	equal = ZIO_CHECKSUM_EQUAL(*buf->b_hdr->b_freeze_cksum, zc);
1167	mutex_exit(&buf->b_hdr->b_freeze_lock);
1168
1169	return (equal);
1170}
1171
1172static void
1173arc_cksum_compute(arc_buf_t *buf, boolean_t force)
1174{
1175	if (!force && !(zfs_flags & ZFS_DEBUG_MODIFY))
1176		return;
1177
1178	mutex_enter(&buf->b_hdr->b_freeze_lock);
1179	if (buf->b_hdr->b_freeze_cksum != NULL) {
1180		mutex_exit(&buf->b_hdr->b_freeze_lock);
1181		return;
1182	}
1183	buf->b_hdr->b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t), KM_SLEEP);
1184	fletcher_2_native(buf->b_data, buf->b_hdr->b_size,
1185	    buf->b_hdr->b_freeze_cksum);
1186	mutex_exit(&buf->b_hdr->b_freeze_lock);
1187#ifdef illumos
1188	arc_buf_watch(buf);
1189#endif /* illumos */
1190}
1191
1192#ifdef illumos
1193#ifndef _KERNEL
1194typedef struct procctl {
1195	long cmd;
1196	prwatch_t prwatch;
1197} procctl_t;
1198#endif
1199
1200/* ARGSUSED */
1201static void
1202arc_buf_unwatch(arc_buf_t *buf)
1203{
1204#ifndef _KERNEL
1205	if (arc_watch) {
1206		int result;
1207		procctl_t ctl;
1208		ctl.cmd = PCWATCH;
1209		ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
1210		ctl.prwatch.pr_size = 0;
1211		ctl.prwatch.pr_wflags = 0;
1212		result = write(arc_procfd, &ctl, sizeof (ctl));
1213		ASSERT3U(result, ==, sizeof (ctl));
1214	}
1215#endif
1216}
1217
1218/* ARGSUSED */
1219static void
1220arc_buf_watch(arc_buf_t *buf)
1221{
1222#ifndef _KERNEL
1223	if (arc_watch) {
1224		int result;
1225		procctl_t ctl;
1226		ctl.cmd = PCWATCH;
1227		ctl.prwatch.pr_vaddr = (uintptr_t)buf->b_data;
1228		ctl.prwatch.pr_size = buf->b_hdr->b_size;
1229		ctl.prwatch.pr_wflags = WA_WRITE;
1230		result = write(arc_procfd, &ctl, sizeof (ctl));
1231		ASSERT3U(result, ==, sizeof (ctl));
1232	}
1233#endif
1234}
1235#endif /* illumos */
1236
1237void
1238arc_buf_thaw(arc_buf_t *buf)
1239{
1240	if (zfs_flags & ZFS_DEBUG_MODIFY) {
1241		if (buf->b_hdr->b_state != arc_anon)
1242			panic("modifying non-anon buffer!");
1243		if (buf->b_hdr->b_flags & ARC_IO_IN_PROGRESS)
1244			panic("modifying buffer while i/o in progress!");
1245		arc_cksum_verify(buf);
1246	}
1247
1248	mutex_enter(&buf->b_hdr->b_freeze_lock);
1249	if (buf->b_hdr->b_freeze_cksum != NULL) {
1250		kmem_free(buf->b_hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1251		buf->b_hdr->b_freeze_cksum = NULL;
1252	}
1253
1254	if (zfs_flags & ZFS_DEBUG_MODIFY) {
1255		if (buf->b_hdr->b_thawed)
1256			kmem_free(buf->b_hdr->b_thawed, 1);
1257		buf->b_hdr->b_thawed = kmem_alloc(1, KM_SLEEP);
1258	}
1259
1260	mutex_exit(&buf->b_hdr->b_freeze_lock);
1261
1262#ifdef illumos
1263	arc_buf_unwatch(buf);
1264#endif /* illumos */
1265}
1266
1267void
1268arc_buf_freeze(arc_buf_t *buf)
1269{
1270	kmutex_t *hash_lock;
1271
1272	if (!(zfs_flags & ZFS_DEBUG_MODIFY))
1273		return;
1274
1275	hash_lock = HDR_LOCK(buf->b_hdr);
1276	mutex_enter(hash_lock);
1277
1278	ASSERT(buf->b_hdr->b_freeze_cksum != NULL ||
1279	    buf->b_hdr->b_state == arc_anon);
1280	arc_cksum_compute(buf, B_FALSE);
1281	mutex_exit(hash_lock);
1282
1283}
1284
1285static void
1286get_buf_info(arc_buf_hdr_t *ab, arc_state_t *state, list_t **list, kmutex_t **lock)
1287{
1288	uint64_t buf_hashid = buf_hash(ab->b_spa, &ab->b_dva, ab->b_birth);
1289
1290	if (ab->b_type == ARC_BUFC_METADATA)
1291		buf_hashid &= (ARC_BUFC_NUMMETADATALISTS - 1);
1292	else {
1293		buf_hashid &= (ARC_BUFC_NUMDATALISTS - 1);
1294		buf_hashid += ARC_BUFC_NUMMETADATALISTS;
1295	}
1296
1297	*list = &state->arcs_lists[buf_hashid];
1298	*lock = ARCS_LOCK(state, buf_hashid);
1299}
1300
1301
1302static void
1303add_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
1304{
1305	ASSERT(MUTEX_HELD(hash_lock));
1306
1307	if ((refcount_add(&ab->b_refcnt, tag) == 1) &&
1308	    (ab->b_state != arc_anon)) {
1309		uint64_t delta = ab->b_size * ab->b_datacnt;
1310		uint64_t *size = &ab->b_state->arcs_lsize[ab->b_type];
1311		list_t *list;
1312		kmutex_t *lock;
1313
1314		get_buf_info(ab, ab->b_state, &list, &lock);
1315		ASSERT(!MUTEX_HELD(lock));
1316		mutex_enter(lock);
1317		ASSERT(list_link_active(&ab->b_arc_node));
1318		list_remove(list, ab);
1319		if (GHOST_STATE(ab->b_state)) {
1320			ASSERT0(ab->b_datacnt);
1321			ASSERT3P(ab->b_buf, ==, NULL);
1322			delta = ab->b_size;
1323		}
1324		ASSERT(delta > 0);
1325		ASSERT3U(*size, >=, delta);
1326		atomic_add_64(size, -delta);
1327		mutex_exit(lock);
1328		/* remove the prefetch flag if we get a reference */
1329		if (ab->b_flags & ARC_PREFETCH)
1330			ab->b_flags &= ~ARC_PREFETCH;
1331	}
1332}
1333
1334static int
1335remove_reference(arc_buf_hdr_t *ab, kmutex_t *hash_lock, void *tag)
1336{
1337	int cnt;
1338	arc_state_t *state = ab->b_state;
1339
1340	ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
1341	ASSERT(!GHOST_STATE(state));
1342
1343	if (((cnt = refcount_remove(&ab->b_refcnt, tag)) == 0) &&
1344	    (state != arc_anon)) {
1345		uint64_t *size = &state->arcs_lsize[ab->b_type];
1346		list_t *list;
1347		kmutex_t *lock;
1348
1349		get_buf_info(ab, state, &list, &lock);
1350		ASSERT(!MUTEX_HELD(lock));
1351		mutex_enter(lock);
1352		ASSERT(!list_link_active(&ab->b_arc_node));
1353		list_insert_head(list, ab);
1354		ASSERT(ab->b_datacnt > 0);
1355		atomic_add_64(size, ab->b_size * ab->b_datacnt);
1356		mutex_exit(lock);
1357	}
1358	return (cnt);
1359}
1360
1361/*
1362 * Move the supplied buffer to the indicated state.  The mutex
1363 * for the buffer must be held by the caller.
1364 */
1365static void
1366arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *ab, kmutex_t *hash_lock)
1367{
1368	arc_state_t *old_state = ab->b_state;
1369	int64_t refcnt = refcount_count(&ab->b_refcnt);
1370	uint64_t from_delta, to_delta;
1371	list_t *list;
1372	kmutex_t *lock;
1373
1374	ASSERT(MUTEX_HELD(hash_lock));
1375	ASSERT3P(new_state, !=, old_state);
1376	ASSERT(refcnt == 0 || ab->b_datacnt > 0);
1377	ASSERT(ab->b_datacnt == 0 || !GHOST_STATE(new_state));
1378	ASSERT(ab->b_datacnt <= 1 || old_state != arc_anon);
1379
1380	from_delta = to_delta = ab->b_datacnt * ab->b_size;
1381
1382	/*
1383	 * If this buffer is evictable, transfer it from the
1384	 * old state list to the new state list.
1385	 */
1386	if (refcnt == 0) {
1387		if (old_state != arc_anon) {
1388			int use_mutex;
1389			uint64_t *size = &old_state->arcs_lsize[ab->b_type];
1390
1391			get_buf_info(ab, old_state, &list, &lock);
1392			use_mutex = !MUTEX_HELD(lock);
1393			if (use_mutex)
1394				mutex_enter(lock);
1395
1396			ASSERT(list_link_active(&ab->b_arc_node));
1397			list_remove(list, ab);
1398
1399			/*
1400			 * If prefetching out of the ghost cache,
1401			 * we will have a non-zero datacnt.
1402			 */
1403			if (GHOST_STATE(old_state) && ab->b_datacnt == 0) {
1404				/* ghost elements have a ghost size */
1405				ASSERT(ab->b_buf == NULL);
1406				from_delta = ab->b_size;
1407			}
1408			ASSERT3U(*size, >=, from_delta);
1409			atomic_add_64(size, -from_delta);
1410
1411			if (use_mutex)
1412				mutex_exit(lock);
1413		}
1414		if (new_state != arc_anon) {
1415			int use_mutex;
1416			uint64_t *size = &new_state->arcs_lsize[ab->b_type];
1417
1418			get_buf_info(ab, new_state, &list, &lock);
1419			use_mutex = !MUTEX_HELD(lock);
1420			if (use_mutex)
1421				mutex_enter(lock);
1422
1423			list_insert_head(list, ab);
1424
1425			/* ghost elements have a ghost size */
1426			if (GHOST_STATE(new_state)) {
1427				ASSERT(ab->b_datacnt == 0);
1428				ASSERT(ab->b_buf == NULL);
1429				to_delta = ab->b_size;
1430			}
1431			atomic_add_64(size, to_delta);
1432
1433			if (use_mutex)
1434				mutex_exit(lock);
1435		}
1436	}
1437
1438	ASSERT(!BUF_EMPTY(ab));
1439	if (new_state == arc_anon && HDR_IN_HASH_TABLE(ab))
1440		buf_hash_remove(ab);
1441
1442	/* adjust state sizes */
1443	if (to_delta)
1444		atomic_add_64(&new_state->arcs_size, to_delta);
1445	if (from_delta) {
1446		ASSERT3U(old_state->arcs_size, >=, from_delta);
1447		atomic_add_64(&old_state->arcs_size, -from_delta);
1448	}
1449	ab->b_state = new_state;
1450
1451	/* adjust l2arc hdr stats */
1452	if (new_state == arc_l2c_only)
1453		l2arc_hdr_stat_add();
1454	else if (old_state == arc_l2c_only)
1455		l2arc_hdr_stat_remove();
1456}
1457
1458void
1459arc_space_consume(uint64_t space, arc_space_type_t type)
1460{
1461	ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1462
1463	switch (type) {
1464	case ARC_SPACE_DATA:
1465		ARCSTAT_INCR(arcstat_data_size, space);
1466		break;
1467	case ARC_SPACE_OTHER:
1468		ARCSTAT_INCR(arcstat_other_size, space);
1469		break;
1470	case ARC_SPACE_HDRS:
1471		ARCSTAT_INCR(arcstat_hdr_size, space);
1472		break;
1473	case ARC_SPACE_L2HDRS:
1474		ARCSTAT_INCR(arcstat_l2_hdr_size, space);
1475		break;
1476	}
1477
1478	atomic_add_64(&arc_meta_used, space);
1479	atomic_add_64(&arc_size, space);
1480}
1481
1482void
1483arc_space_return(uint64_t space, arc_space_type_t type)
1484{
1485	ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
1486
1487	switch (type) {
1488	case ARC_SPACE_DATA:
1489		ARCSTAT_INCR(arcstat_data_size, -space);
1490		break;
1491	case ARC_SPACE_OTHER:
1492		ARCSTAT_INCR(arcstat_other_size, -space);
1493		break;
1494	case ARC_SPACE_HDRS:
1495		ARCSTAT_INCR(arcstat_hdr_size, -space);
1496		break;
1497	case ARC_SPACE_L2HDRS:
1498		ARCSTAT_INCR(arcstat_l2_hdr_size, -space);
1499		break;
1500	}
1501
1502	ASSERT(arc_meta_used >= space);
1503	if (arc_meta_max < arc_meta_used)
1504		arc_meta_max = arc_meta_used;
1505	atomic_add_64(&arc_meta_used, -space);
1506	ASSERT(arc_size >= space);
1507	atomic_add_64(&arc_size, -space);
1508}
1509
1510arc_buf_t *
1511arc_buf_alloc(spa_t *spa, int size, void *tag, arc_buf_contents_t type)
1512{
1513	arc_buf_hdr_t *hdr;
1514	arc_buf_t *buf;
1515
1516	ASSERT3U(size, >, 0);
1517	hdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
1518	ASSERT(BUF_EMPTY(hdr));
1519	hdr->b_size = size;
1520	hdr->b_type = type;
1521	hdr->b_spa = spa_load_guid(spa);
1522	hdr->b_state = arc_anon;
1523	hdr->b_arc_access = 0;
1524	buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1525	buf->b_hdr = hdr;
1526	buf->b_data = NULL;
1527	buf->b_efunc = NULL;
1528	buf->b_private = NULL;
1529	buf->b_next = NULL;
1530	hdr->b_buf = buf;
1531	arc_get_data_buf(buf);
1532	hdr->b_datacnt = 1;
1533	hdr->b_flags = 0;
1534	ASSERT(refcount_is_zero(&hdr->b_refcnt));
1535	(void) refcount_add(&hdr->b_refcnt, tag);
1536
1537	return (buf);
1538}
1539
1540static char *arc_onloan_tag = "onloan";
1541
1542/*
1543 * Loan out an anonymous arc buffer. Loaned buffers are not counted as in
1544 * flight data by arc_tempreserve_space() until they are "returned". Loaned
1545 * buffers must be returned to the arc before they can be used by the DMU or
1546 * freed.
1547 */
1548arc_buf_t *
1549arc_loan_buf(spa_t *spa, int size)
1550{
1551	arc_buf_t *buf;
1552
1553	buf = arc_buf_alloc(spa, size, arc_onloan_tag, ARC_BUFC_DATA);
1554
1555	atomic_add_64(&arc_loaned_bytes, size);
1556	return (buf);
1557}
1558
1559/*
1560 * Return a loaned arc buffer to the arc.
1561 */
1562void
1563arc_return_buf(arc_buf_t *buf, void *tag)
1564{
1565	arc_buf_hdr_t *hdr = buf->b_hdr;
1566
1567	ASSERT(buf->b_data != NULL);
1568	(void) refcount_add(&hdr->b_refcnt, tag);
1569	(void) refcount_remove(&hdr->b_refcnt, arc_onloan_tag);
1570
1571	atomic_add_64(&arc_loaned_bytes, -hdr->b_size);
1572}
1573
1574/* Detach an arc_buf from a dbuf (tag) */
1575void
1576arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
1577{
1578	arc_buf_hdr_t *hdr;
1579
1580	ASSERT(buf->b_data != NULL);
1581	hdr = buf->b_hdr;
1582	(void) refcount_add(&hdr->b_refcnt, arc_onloan_tag);
1583	(void) refcount_remove(&hdr->b_refcnt, tag);
1584	buf->b_efunc = NULL;
1585	buf->b_private = NULL;
1586
1587	atomic_add_64(&arc_loaned_bytes, hdr->b_size);
1588}
1589
1590static arc_buf_t *
1591arc_buf_clone(arc_buf_t *from)
1592{
1593	arc_buf_t *buf;
1594	arc_buf_hdr_t *hdr = from->b_hdr;
1595	uint64_t size = hdr->b_size;
1596
1597	ASSERT(hdr->b_state != arc_anon);
1598
1599	buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
1600	buf->b_hdr = hdr;
1601	buf->b_data = NULL;
1602	buf->b_efunc = NULL;
1603	buf->b_private = NULL;
1604	buf->b_next = hdr->b_buf;
1605	hdr->b_buf = buf;
1606	arc_get_data_buf(buf);
1607	bcopy(from->b_data, buf->b_data, size);
1608
1609	/*
1610	 * This buffer already exists in the arc so create a duplicate
1611	 * copy for the caller.  If the buffer is associated with user data
1612	 * then track the size and number of duplicates.  These stats will be
1613	 * updated as duplicate buffers are created and destroyed.
1614	 */
1615	if (hdr->b_type == ARC_BUFC_DATA) {
1616		ARCSTAT_BUMP(arcstat_duplicate_buffers);
1617		ARCSTAT_INCR(arcstat_duplicate_buffers_size, size);
1618	}
1619	hdr->b_datacnt += 1;
1620	return (buf);
1621}
1622
1623void
1624arc_buf_add_ref(arc_buf_t *buf, void* tag)
1625{
1626	arc_buf_hdr_t *hdr;
1627	kmutex_t *hash_lock;
1628
1629	/*
1630	 * Check to see if this buffer is evicted.  Callers
1631	 * must verify b_data != NULL to know if the add_ref
1632	 * was successful.
1633	 */
1634	mutex_enter(&buf->b_evict_lock);
1635	if (buf->b_data == NULL) {
1636		mutex_exit(&buf->b_evict_lock);
1637		return;
1638	}
1639	hash_lock = HDR_LOCK(buf->b_hdr);
1640	mutex_enter(hash_lock);
1641	hdr = buf->b_hdr;
1642	ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1643	mutex_exit(&buf->b_evict_lock);
1644
1645	ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
1646	add_reference(hdr, hash_lock, tag);
1647	DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
1648	arc_access(hdr, hash_lock);
1649	mutex_exit(hash_lock);
1650	ARCSTAT_BUMP(arcstat_hits);
1651	ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
1652	    demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
1653	    data, metadata, hits);
1654}
1655
1656/*
1657 * Free the arc data buffer.  If it is an l2arc write in progress,
1658 * the buffer is placed on l2arc_free_on_write to be freed later.
1659 */
1660static void
1661arc_buf_data_free(arc_buf_t *buf, void (*free_func)(void *, size_t))
1662{
1663	arc_buf_hdr_t *hdr = buf->b_hdr;
1664
1665	if (HDR_L2_WRITING(hdr)) {
1666		l2arc_data_free_t *df;
1667		df = kmem_alloc(sizeof (l2arc_data_free_t), KM_SLEEP);
1668		df->l2df_data = buf->b_data;
1669		df->l2df_size = hdr->b_size;
1670		df->l2df_func = free_func;
1671		mutex_enter(&l2arc_free_on_write_mtx);
1672		list_insert_head(l2arc_free_on_write, df);
1673		mutex_exit(&l2arc_free_on_write_mtx);
1674		ARCSTAT_BUMP(arcstat_l2_free_on_write);
1675	} else {
1676		free_func(buf->b_data, hdr->b_size);
1677	}
1678}
1679
1680/*
1681 * Free up buf->b_data and if 'remove' is set, then pull the
1682 * arc_buf_t off of the the arc_buf_hdr_t's list and free it.
1683 */
1684static void
1685arc_buf_destroy(arc_buf_t *buf, boolean_t recycle, boolean_t remove)
1686{
1687	arc_buf_t **bufp;
1688
1689	/* free up data associated with the buf */
1690	if (buf->b_data) {
1691		arc_state_t *state = buf->b_hdr->b_state;
1692		uint64_t size = buf->b_hdr->b_size;
1693		arc_buf_contents_t type = buf->b_hdr->b_type;
1694
1695		arc_cksum_verify(buf);
1696#ifdef illumos
1697		arc_buf_unwatch(buf);
1698#endif /* illumos */
1699
1700		if (!recycle) {
1701			if (type == ARC_BUFC_METADATA) {
1702				arc_buf_data_free(buf, zio_buf_free);
1703				arc_space_return(size, ARC_SPACE_DATA);
1704			} else {
1705				ASSERT(type == ARC_BUFC_DATA);
1706				arc_buf_data_free(buf, zio_data_buf_free);
1707				ARCSTAT_INCR(arcstat_data_size, -size);
1708				atomic_add_64(&arc_size, -size);
1709			}
1710		}
1711		if (list_link_active(&buf->b_hdr->b_arc_node)) {
1712			uint64_t *cnt = &state->arcs_lsize[type];
1713
1714			ASSERT(refcount_is_zero(&buf->b_hdr->b_refcnt));
1715			ASSERT(state != arc_anon);
1716
1717			ASSERT3U(*cnt, >=, size);
1718			atomic_add_64(cnt, -size);
1719		}
1720		ASSERT3U(state->arcs_size, >=, size);
1721		atomic_add_64(&state->arcs_size, -size);
1722		buf->b_data = NULL;
1723
1724		/*
1725		 * If we're destroying a duplicate buffer make sure
1726		 * that the appropriate statistics are updated.
1727		 */
1728		if (buf->b_hdr->b_datacnt > 1 &&
1729		    buf->b_hdr->b_type == ARC_BUFC_DATA) {
1730			ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
1731			ARCSTAT_INCR(arcstat_duplicate_buffers_size, -size);
1732		}
1733		ASSERT(buf->b_hdr->b_datacnt > 0);
1734		buf->b_hdr->b_datacnt -= 1;
1735	}
1736
1737	/* only remove the buf if requested */
1738	if (!remove)
1739		return;
1740
1741	/* remove the buf from the hdr list */
1742	for (bufp = &buf->b_hdr->b_buf; *bufp != buf; bufp = &(*bufp)->b_next)
1743		continue;
1744	*bufp = buf->b_next;
1745	buf->b_next = NULL;
1746
1747	ASSERT(buf->b_efunc == NULL);
1748
1749	/* clean up the buf */
1750	buf->b_hdr = NULL;
1751	kmem_cache_free(buf_cache, buf);
1752}
1753
1754static void
1755arc_hdr_destroy(arc_buf_hdr_t *hdr)
1756{
1757	ASSERT(refcount_is_zero(&hdr->b_refcnt));
1758	ASSERT3P(hdr->b_state, ==, arc_anon);
1759	ASSERT(!HDR_IO_IN_PROGRESS(hdr));
1760	l2arc_buf_hdr_t *l2hdr = hdr->b_l2hdr;
1761
1762	if (l2hdr != NULL) {
1763		boolean_t buflist_held = MUTEX_HELD(&l2arc_buflist_mtx);
1764		/*
1765		 * To prevent arc_free() and l2arc_evict() from
1766		 * attempting to free the same buffer at the same time,
1767		 * a FREE_IN_PROGRESS flag is given to arc_free() to
1768		 * give it priority.  l2arc_evict() can't destroy this
1769		 * header while we are waiting on l2arc_buflist_mtx.
1770		 *
1771		 * The hdr may be removed from l2ad_buflist before we
1772		 * grab l2arc_buflist_mtx, so b_l2hdr is rechecked.
1773		 */
1774		if (!buflist_held) {
1775			mutex_enter(&l2arc_buflist_mtx);
1776			l2hdr = hdr->b_l2hdr;
1777		}
1778
1779		if (l2hdr != NULL) {
1780			trim_map_free(l2hdr->b_dev->l2ad_vdev, l2hdr->b_daddr,
1781			    hdr->b_size, 0);
1782			list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
1783			ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
1784			ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
1785			vdev_space_update(l2hdr->b_dev->l2ad_vdev,
1786			    -l2hdr->b_asize, 0, 0);
1787			kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
1788			if (hdr->b_state == arc_l2c_only)
1789				l2arc_hdr_stat_remove();
1790			hdr->b_l2hdr = NULL;
1791		}
1792
1793		if (!buflist_held)
1794			mutex_exit(&l2arc_buflist_mtx);
1795	}
1796
1797	if (!BUF_EMPTY(hdr)) {
1798		ASSERT(!HDR_IN_HASH_TABLE(hdr));
1799		buf_discard_identity(hdr);
1800	}
1801	while (hdr->b_buf) {
1802		arc_buf_t *buf = hdr->b_buf;
1803
1804		if (buf->b_efunc) {
1805			mutex_enter(&arc_eviction_mtx);
1806			mutex_enter(&buf->b_evict_lock);
1807			ASSERT(buf->b_hdr != NULL);
1808			arc_buf_destroy(hdr->b_buf, FALSE, FALSE);
1809			hdr->b_buf = buf->b_next;
1810			buf->b_hdr = &arc_eviction_hdr;
1811			buf->b_next = arc_eviction_list;
1812			arc_eviction_list = buf;
1813			mutex_exit(&buf->b_evict_lock);
1814			mutex_exit(&arc_eviction_mtx);
1815		} else {
1816			arc_buf_destroy(hdr->b_buf, FALSE, TRUE);
1817		}
1818	}
1819	if (hdr->b_freeze_cksum != NULL) {
1820		kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
1821		hdr->b_freeze_cksum = NULL;
1822	}
1823	if (hdr->b_thawed) {
1824		kmem_free(hdr->b_thawed, 1);
1825		hdr->b_thawed = NULL;
1826	}
1827
1828	ASSERT(!list_link_active(&hdr->b_arc_node));
1829	ASSERT3P(hdr->b_hash_next, ==, NULL);
1830	ASSERT3P(hdr->b_acb, ==, NULL);
1831	kmem_cache_free(hdr_cache, hdr);
1832}
1833
1834void
1835arc_buf_free(arc_buf_t *buf, void *tag)
1836{
1837	arc_buf_hdr_t *hdr = buf->b_hdr;
1838	int hashed = hdr->b_state != arc_anon;
1839
1840	ASSERT(buf->b_efunc == NULL);
1841	ASSERT(buf->b_data != NULL);
1842
1843	if (hashed) {
1844		kmutex_t *hash_lock = HDR_LOCK(hdr);
1845
1846		mutex_enter(hash_lock);
1847		hdr = buf->b_hdr;
1848		ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1849
1850		(void) remove_reference(hdr, hash_lock, tag);
1851		if (hdr->b_datacnt > 1) {
1852			arc_buf_destroy(buf, FALSE, TRUE);
1853		} else {
1854			ASSERT(buf == hdr->b_buf);
1855			ASSERT(buf->b_efunc == NULL);
1856			hdr->b_flags |= ARC_BUF_AVAILABLE;
1857		}
1858		mutex_exit(hash_lock);
1859	} else if (HDR_IO_IN_PROGRESS(hdr)) {
1860		int destroy_hdr;
1861		/*
1862		 * We are in the middle of an async write.  Don't destroy
1863		 * this buffer unless the write completes before we finish
1864		 * decrementing the reference count.
1865		 */
1866		mutex_enter(&arc_eviction_mtx);
1867		(void) remove_reference(hdr, NULL, tag);
1868		ASSERT(refcount_is_zero(&hdr->b_refcnt));
1869		destroy_hdr = !HDR_IO_IN_PROGRESS(hdr);
1870		mutex_exit(&arc_eviction_mtx);
1871		if (destroy_hdr)
1872			arc_hdr_destroy(hdr);
1873	} else {
1874		if (remove_reference(hdr, NULL, tag) > 0)
1875			arc_buf_destroy(buf, FALSE, TRUE);
1876		else
1877			arc_hdr_destroy(hdr);
1878	}
1879}
1880
1881boolean_t
1882arc_buf_remove_ref(arc_buf_t *buf, void* tag)
1883{
1884	arc_buf_hdr_t *hdr = buf->b_hdr;
1885	kmutex_t *hash_lock = HDR_LOCK(hdr);
1886	boolean_t no_callback = (buf->b_efunc == NULL);
1887
1888	if (hdr->b_state == arc_anon) {
1889		ASSERT(hdr->b_datacnt == 1);
1890		arc_buf_free(buf, tag);
1891		return (no_callback);
1892	}
1893
1894	mutex_enter(hash_lock);
1895	hdr = buf->b_hdr;
1896	ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
1897	ASSERT(hdr->b_state != arc_anon);
1898	ASSERT(buf->b_data != NULL);
1899
1900	(void) remove_reference(hdr, hash_lock, tag);
1901	if (hdr->b_datacnt > 1) {
1902		if (no_callback)
1903			arc_buf_destroy(buf, FALSE, TRUE);
1904	} else if (no_callback) {
1905		ASSERT(hdr->b_buf == buf && buf->b_next == NULL);
1906		ASSERT(buf->b_efunc == NULL);
1907		hdr->b_flags |= ARC_BUF_AVAILABLE;
1908	}
1909	ASSERT(no_callback || hdr->b_datacnt > 1 ||
1910	    refcount_is_zero(&hdr->b_refcnt));
1911	mutex_exit(hash_lock);
1912	return (no_callback);
1913}
1914
1915int
1916arc_buf_size(arc_buf_t *buf)
1917{
1918	return (buf->b_hdr->b_size);
1919}
1920
1921/*
1922 * Called from the DMU to determine if the current buffer should be
1923 * evicted. In order to ensure proper locking, the eviction must be initiated
1924 * from the DMU. Return true if the buffer is associated with user data and
1925 * duplicate buffers still exist.
1926 */
1927boolean_t
1928arc_buf_eviction_needed(arc_buf_t *buf)
1929{
1930	arc_buf_hdr_t *hdr;
1931	boolean_t evict_needed = B_FALSE;
1932
1933	if (zfs_disable_dup_eviction)
1934		return (B_FALSE);
1935
1936	mutex_enter(&buf->b_evict_lock);
1937	hdr = buf->b_hdr;
1938	if (hdr == NULL) {
1939		/*
1940		 * We are in arc_do_user_evicts(); let that function
1941		 * perform the eviction.
1942		 */
1943		ASSERT(buf->b_data == NULL);
1944		mutex_exit(&buf->b_evict_lock);
1945		return (B_FALSE);
1946	} else if (buf->b_data == NULL) {
1947		/*
1948		 * We have already been added to the arc eviction list;
1949		 * recommend eviction.
1950		 */
1951		ASSERT3P(hdr, ==, &arc_eviction_hdr);
1952		mutex_exit(&buf->b_evict_lock);
1953		return (B_TRUE);
1954	}
1955
1956	if (hdr->b_datacnt > 1 && hdr->b_type == ARC_BUFC_DATA)
1957		evict_needed = B_TRUE;
1958
1959	mutex_exit(&buf->b_evict_lock);
1960	return (evict_needed);
1961}
1962
1963/*
1964 * Evict buffers from list until we've removed the specified number of
1965 * bytes.  Move the removed buffers to the appropriate evict state.
1966 * If the recycle flag is set, then attempt to "recycle" a buffer:
1967 * - look for a buffer to evict that is `bytes' long.
1968 * - return the data block from this buffer rather than freeing it.
1969 * This flag is used by callers that are trying to make space for a
1970 * new buffer in a full arc cache.
1971 *
1972 * This function makes a "best effort".  It skips over any buffers
1973 * it can't get a hash_lock on, and so may not catch all candidates.
1974 * It may also return without evicting as much space as requested.
1975 */
1976static void *
1977arc_evict(arc_state_t *state, uint64_t spa, int64_t bytes, boolean_t recycle,
1978    arc_buf_contents_t type)
1979{
1980	arc_state_t *evicted_state;
1981	uint64_t bytes_evicted = 0, skipped = 0, missed = 0;
1982	int64_t bytes_remaining;
1983	arc_buf_hdr_t *ab, *ab_prev = NULL;
1984	list_t *evicted_list, *list, *evicted_list_start, *list_start;
1985	kmutex_t *lock, *evicted_lock;
1986	kmutex_t *hash_lock;
1987	boolean_t have_lock;
1988	void *stolen = NULL;
1989	arc_buf_hdr_t marker = { 0 };
1990	int count = 0;
1991	static int evict_metadata_offset, evict_data_offset;
1992	int i, idx, offset, list_count, lists;
1993
1994	ASSERT(state == arc_mru || state == arc_mfu);
1995
1996	evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
1997
1998	if (type == ARC_BUFC_METADATA) {
1999		offset = 0;
2000		list_count = ARC_BUFC_NUMMETADATALISTS;
2001		list_start = &state->arcs_lists[0];
2002		evicted_list_start = &evicted_state->arcs_lists[0];
2003		idx = evict_metadata_offset;
2004	} else {
2005		offset = ARC_BUFC_NUMMETADATALISTS;
2006		list_start = &state->arcs_lists[offset];
2007		evicted_list_start = &evicted_state->arcs_lists[offset];
2008		list_count = ARC_BUFC_NUMDATALISTS;
2009		idx = evict_data_offset;
2010	}
2011	bytes_remaining = evicted_state->arcs_lsize[type];
2012	lists = 0;
2013
2014evict_start:
2015	list = &list_start[idx];
2016	evicted_list = &evicted_list_start[idx];
2017	lock = ARCS_LOCK(state, (offset + idx));
2018	evicted_lock = ARCS_LOCK(evicted_state, (offset + idx));
2019
2020	mutex_enter(lock);
2021	mutex_enter(evicted_lock);
2022
2023	for (ab = list_tail(list); ab; ab = ab_prev) {
2024		ab_prev = list_prev(list, ab);
2025		bytes_remaining -= (ab->b_size * ab->b_datacnt);
2026		/* prefetch buffers have a minimum lifespan */
2027		if (HDR_IO_IN_PROGRESS(ab) ||
2028		    (spa && ab->b_spa != spa) ||
2029		    (ab->b_flags & (ARC_PREFETCH|ARC_INDIRECT) &&
2030		    ddi_get_lbolt() - ab->b_arc_access <
2031		    arc_min_prefetch_lifespan)) {
2032			skipped++;
2033			continue;
2034		}
2035		/* "lookahead" for better eviction candidate */
2036		if (recycle && ab->b_size != bytes &&
2037		    ab_prev && ab_prev->b_size == bytes)
2038			continue;
2039
2040		/* ignore markers */
2041		if (ab->b_spa == 0)
2042			continue;
2043
2044		/*
2045		 * It may take a long time to evict all the bufs requested.
2046		 * To avoid blocking all arc activity, periodically drop
2047		 * the arcs_mtx and give other threads a chance to run
2048		 * before reacquiring the lock.
2049		 *
2050		 * If we are looking for a buffer to recycle, we are in
2051		 * the hot code path, so don't sleep.
2052		 */
2053		if (!recycle && count++ > arc_evict_iterations) {
2054			list_insert_after(list, ab, &marker);
2055			mutex_exit(evicted_lock);
2056			mutex_exit(lock);
2057			kpreempt(KPREEMPT_SYNC);
2058			mutex_enter(lock);
2059			mutex_enter(evicted_lock);
2060			ab_prev = list_prev(list, &marker);
2061			list_remove(list, &marker);
2062			count = 0;
2063			continue;
2064		}
2065
2066		hash_lock = HDR_LOCK(ab);
2067		have_lock = MUTEX_HELD(hash_lock);
2068		if (have_lock || mutex_tryenter(hash_lock)) {
2069			ASSERT0(refcount_count(&ab->b_refcnt));
2070			ASSERT(ab->b_datacnt > 0);
2071			while (ab->b_buf) {
2072				arc_buf_t *buf = ab->b_buf;
2073				if (!mutex_tryenter(&buf->b_evict_lock)) {
2074					missed += 1;
2075					break;
2076				}
2077				if (buf->b_data) {
2078					bytes_evicted += ab->b_size;
2079					if (recycle && ab->b_type == type &&
2080					    ab->b_size == bytes &&
2081					    !HDR_L2_WRITING(ab)) {
2082						stolen = buf->b_data;
2083						recycle = FALSE;
2084					}
2085				}
2086				if (buf->b_efunc) {
2087					mutex_enter(&arc_eviction_mtx);
2088					arc_buf_destroy(buf,
2089					    buf->b_data == stolen, FALSE);
2090					ab->b_buf = buf->b_next;
2091					buf->b_hdr = &arc_eviction_hdr;
2092					buf->b_next = arc_eviction_list;
2093					arc_eviction_list = buf;
2094					mutex_exit(&arc_eviction_mtx);
2095					mutex_exit(&buf->b_evict_lock);
2096				} else {
2097					mutex_exit(&buf->b_evict_lock);
2098					arc_buf_destroy(buf,
2099					    buf->b_data == stolen, TRUE);
2100				}
2101			}
2102
2103			if (ab->b_l2hdr) {
2104				ARCSTAT_INCR(arcstat_evict_l2_cached,
2105				    ab->b_size);
2106			} else {
2107				if (l2arc_write_eligible(ab->b_spa, ab)) {
2108					ARCSTAT_INCR(arcstat_evict_l2_eligible,
2109					    ab->b_size);
2110				} else {
2111					ARCSTAT_INCR(
2112					    arcstat_evict_l2_ineligible,
2113					    ab->b_size);
2114				}
2115			}
2116
2117			if (ab->b_datacnt == 0) {
2118				arc_change_state(evicted_state, ab, hash_lock);
2119				ASSERT(HDR_IN_HASH_TABLE(ab));
2120				ab->b_flags |= ARC_IN_HASH_TABLE;
2121				ab->b_flags &= ~ARC_BUF_AVAILABLE;
2122				DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, ab);
2123			}
2124			if (!have_lock)
2125				mutex_exit(hash_lock);
2126			if (bytes >= 0 && bytes_evicted >= bytes)
2127				break;
2128			if (bytes_remaining > 0) {
2129				mutex_exit(evicted_lock);
2130				mutex_exit(lock);
2131				idx  = ((idx + 1) & (list_count - 1));
2132				lists++;
2133				goto evict_start;
2134			}
2135		} else {
2136			missed += 1;
2137		}
2138	}
2139
2140	mutex_exit(evicted_lock);
2141	mutex_exit(lock);
2142
2143	idx  = ((idx + 1) & (list_count - 1));
2144	lists++;
2145
2146	if (bytes_evicted < bytes) {
2147		if (lists < list_count)
2148			goto evict_start;
2149		else
2150			dprintf("only evicted %lld bytes from %x",
2151			    (longlong_t)bytes_evicted, state);
2152	}
2153	if (type == ARC_BUFC_METADATA)
2154		evict_metadata_offset = idx;
2155	else
2156		evict_data_offset = idx;
2157
2158	if (skipped)
2159		ARCSTAT_INCR(arcstat_evict_skip, skipped);
2160
2161	if (missed)
2162		ARCSTAT_INCR(arcstat_mutex_miss, missed);
2163
2164	/*
2165	 * Note: we have just evicted some data into the ghost state,
2166	 * potentially putting the ghost size over the desired size.  Rather
2167	 * that evicting from the ghost list in this hot code path, leave
2168	 * this chore to the arc_reclaim_thread().
2169	 */
2170
2171	if (stolen)
2172		ARCSTAT_BUMP(arcstat_stolen);
2173	return (stolen);
2174}
2175
2176/*
2177 * Remove buffers from list until we've removed the specified number of
2178 * bytes.  Destroy the buffers that are removed.
2179 */
2180static void
2181arc_evict_ghost(arc_state_t *state, uint64_t spa, int64_t bytes)
2182{
2183	arc_buf_hdr_t *ab, *ab_prev;
2184	arc_buf_hdr_t marker = { 0 };
2185	list_t *list, *list_start;
2186	kmutex_t *hash_lock, *lock;
2187	uint64_t bytes_deleted = 0;
2188	uint64_t bufs_skipped = 0;
2189	int count = 0;
2190	static int evict_offset;
2191	int list_count, idx = evict_offset;
2192	int offset, lists = 0;
2193
2194	ASSERT(GHOST_STATE(state));
2195
2196	/*
2197	 * data lists come after metadata lists
2198	 */
2199	list_start = &state->arcs_lists[ARC_BUFC_NUMMETADATALISTS];
2200	list_count = ARC_BUFC_NUMDATALISTS;
2201	offset = ARC_BUFC_NUMMETADATALISTS;
2202
2203evict_start:
2204	list = &list_start[idx];
2205	lock = ARCS_LOCK(state, idx + offset);
2206
2207	mutex_enter(lock);
2208	for (ab = list_tail(list); ab; ab = ab_prev) {
2209		ab_prev = list_prev(list, ab);
2210		if (ab->b_type > ARC_BUFC_NUMTYPES)
2211			panic("invalid ab=%p", (void *)ab);
2212		if (spa && ab->b_spa != spa)
2213			continue;
2214
2215		/* ignore markers */
2216		if (ab->b_spa == 0)
2217			continue;
2218
2219		hash_lock = HDR_LOCK(ab);
2220		/* caller may be trying to modify this buffer, skip it */
2221		if (MUTEX_HELD(hash_lock))
2222			continue;
2223
2224		/*
2225		 * It may take a long time to evict all the bufs requested.
2226		 * To avoid blocking all arc activity, periodically drop
2227		 * the arcs_mtx and give other threads a chance to run
2228		 * before reacquiring the lock.
2229		 */
2230		if (count++ > arc_evict_iterations) {
2231			list_insert_after(list, ab, &marker);
2232			mutex_exit(lock);
2233			kpreempt(KPREEMPT_SYNC);
2234			mutex_enter(lock);
2235			ab_prev = list_prev(list, &marker);
2236			list_remove(list, &marker);
2237			count = 0;
2238			continue;
2239		}
2240		if (mutex_tryenter(hash_lock)) {
2241			ASSERT(!HDR_IO_IN_PROGRESS(ab));
2242			ASSERT(ab->b_buf == NULL);
2243			ARCSTAT_BUMP(arcstat_deleted);
2244			bytes_deleted += ab->b_size;
2245
2246			if (ab->b_l2hdr != NULL) {
2247				/*
2248				 * This buffer is cached on the 2nd Level ARC;
2249				 * don't destroy the header.
2250				 */
2251				arc_change_state(arc_l2c_only, ab, hash_lock);
2252				mutex_exit(hash_lock);
2253			} else {
2254				arc_change_state(arc_anon, ab, hash_lock);
2255				mutex_exit(hash_lock);
2256				arc_hdr_destroy(ab);
2257			}
2258
2259			DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, ab);
2260			if (bytes >= 0 && bytes_deleted >= bytes)
2261				break;
2262		} else if (bytes < 0) {
2263			/*
2264			 * Insert a list marker and then wait for the
2265			 * hash lock to become available. Once its
2266			 * available, restart from where we left off.
2267			 */
2268			list_insert_after(list, ab, &marker);
2269			mutex_exit(lock);
2270			mutex_enter(hash_lock);
2271			mutex_exit(hash_lock);
2272			mutex_enter(lock);
2273			ab_prev = list_prev(list, &marker);
2274			list_remove(list, &marker);
2275		} else {
2276			bufs_skipped += 1;
2277		}
2278
2279	}
2280	mutex_exit(lock);
2281	idx  = ((idx + 1) & (ARC_BUFC_NUMDATALISTS - 1));
2282	lists++;
2283
2284	if (lists < list_count)
2285		goto evict_start;
2286
2287	evict_offset = idx;
2288	if ((uintptr_t)list > (uintptr_t)&state->arcs_lists[ARC_BUFC_NUMMETADATALISTS] &&
2289	    (bytes < 0 || bytes_deleted < bytes)) {
2290		list_start = &state->arcs_lists[0];
2291		list_count = ARC_BUFC_NUMMETADATALISTS;
2292		offset = lists = 0;
2293		goto evict_start;
2294	}
2295
2296	if (bufs_skipped) {
2297		ARCSTAT_INCR(arcstat_mutex_miss, bufs_skipped);
2298		ASSERT(bytes >= 0);
2299	}
2300
2301	if (bytes_deleted < bytes)
2302		dprintf("only deleted %lld bytes from %p",
2303		    (longlong_t)bytes_deleted, state);
2304}
2305
2306static void
2307arc_adjust(void)
2308{
2309	int64_t adjustment, delta;
2310
2311	/*
2312	 * Adjust MRU size
2313	 */
2314
2315	adjustment = MIN((int64_t)(arc_size - arc_c),
2316	    (int64_t)(arc_anon->arcs_size + arc_mru->arcs_size + arc_meta_used -
2317	    arc_p));
2318
2319	if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_DATA] > 0) {
2320		delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_DATA], adjustment);
2321		(void) arc_evict(arc_mru, 0, delta, FALSE, ARC_BUFC_DATA);
2322		adjustment -= delta;
2323	}
2324
2325	if (adjustment > 0 && arc_mru->arcs_lsize[ARC_BUFC_METADATA] > 0) {
2326		delta = MIN(arc_mru->arcs_lsize[ARC_BUFC_METADATA], adjustment);
2327		(void) arc_evict(arc_mru, 0, delta, FALSE,
2328		    ARC_BUFC_METADATA);
2329	}
2330
2331	/*
2332	 * Adjust MFU size
2333	 */
2334
2335	adjustment = arc_size - arc_c;
2336
2337	if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_DATA] > 0) {
2338		delta = MIN(adjustment, arc_mfu->arcs_lsize[ARC_BUFC_DATA]);
2339		(void) arc_evict(arc_mfu, 0, delta, FALSE, ARC_BUFC_DATA);
2340		adjustment -= delta;
2341	}
2342
2343	if (adjustment > 0 && arc_mfu->arcs_lsize[ARC_BUFC_METADATA] > 0) {
2344		int64_t delta = MIN(adjustment,
2345		    arc_mfu->arcs_lsize[ARC_BUFC_METADATA]);
2346		(void) arc_evict(arc_mfu, 0, delta, FALSE,
2347		    ARC_BUFC_METADATA);
2348	}
2349
2350	/*
2351	 * Adjust ghost lists
2352	 */
2353
2354	adjustment = arc_mru->arcs_size + arc_mru_ghost->arcs_size - arc_c;
2355
2356	if (adjustment > 0 && arc_mru_ghost->arcs_size > 0) {
2357		delta = MIN(arc_mru_ghost->arcs_size, adjustment);
2358		arc_evict_ghost(arc_mru_ghost, 0, delta);
2359	}
2360
2361	adjustment =
2362	    arc_mru_ghost->arcs_size + arc_mfu_ghost->arcs_size - arc_c;
2363
2364	if (adjustment > 0 && arc_mfu_ghost->arcs_size > 0) {
2365		delta = MIN(arc_mfu_ghost->arcs_size, adjustment);
2366		arc_evict_ghost(arc_mfu_ghost, 0, delta);
2367	}
2368}
2369
2370static void
2371arc_do_user_evicts(void)
2372{
2373	static arc_buf_t *tmp_arc_eviction_list;
2374
2375	/*
2376	 * Move list over to avoid LOR
2377	 */
2378restart:
2379	mutex_enter(&arc_eviction_mtx);
2380	tmp_arc_eviction_list = arc_eviction_list;
2381	arc_eviction_list = NULL;
2382	mutex_exit(&arc_eviction_mtx);
2383
2384	while (tmp_arc_eviction_list != NULL) {
2385		arc_buf_t *buf = tmp_arc_eviction_list;
2386		tmp_arc_eviction_list = buf->b_next;
2387		mutex_enter(&buf->b_evict_lock);
2388		buf->b_hdr = NULL;
2389		mutex_exit(&buf->b_evict_lock);
2390
2391		if (buf->b_efunc != NULL)
2392			VERIFY0(buf->b_efunc(buf->b_private));
2393
2394		buf->b_efunc = NULL;
2395		buf->b_private = NULL;
2396		kmem_cache_free(buf_cache, buf);
2397	}
2398
2399	if (arc_eviction_list != NULL)
2400		goto restart;
2401}
2402
2403/*
2404 * Flush all *evictable* data from the cache for the given spa.
2405 * NOTE: this will not touch "active" (i.e. referenced) data.
2406 */
2407void
2408arc_flush(spa_t *spa)
2409{
2410	uint64_t guid = 0;
2411
2412	if (spa)
2413		guid = spa_load_guid(spa);
2414
2415	while (arc_mru->arcs_lsize[ARC_BUFC_DATA]) {
2416		(void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_DATA);
2417		if (spa)
2418			break;
2419	}
2420	while (arc_mru->arcs_lsize[ARC_BUFC_METADATA]) {
2421		(void) arc_evict(arc_mru, guid, -1, FALSE, ARC_BUFC_METADATA);
2422		if (spa)
2423			break;
2424	}
2425	while (arc_mfu->arcs_lsize[ARC_BUFC_DATA]) {
2426		(void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_DATA);
2427		if (spa)
2428			break;
2429	}
2430	while (arc_mfu->arcs_lsize[ARC_BUFC_METADATA]) {
2431		(void) arc_evict(arc_mfu, guid, -1, FALSE, ARC_BUFC_METADATA);
2432		if (spa)
2433			break;
2434	}
2435
2436	arc_evict_ghost(arc_mru_ghost, guid, -1);
2437	arc_evict_ghost(arc_mfu_ghost, guid, -1);
2438
2439	mutex_enter(&arc_reclaim_thr_lock);
2440	arc_do_user_evicts();
2441	mutex_exit(&arc_reclaim_thr_lock);
2442	ASSERT(spa || arc_eviction_list == NULL);
2443}
2444
2445void
2446arc_shrink(void)
2447{
2448
2449	if (arc_c > arc_c_min) {
2450		uint64_t to_free;
2451
2452		DTRACE_PROBE4(arc__shrink, uint64_t, arc_c, uint64_t,
2453			arc_c_min, uint64_t, arc_p, uint64_t, to_free);
2454#ifdef _KERNEL
2455		to_free = arc_c >> arc_shrink_shift;
2456#else
2457		to_free = arc_c >> arc_shrink_shift;
2458#endif
2459		if (arc_c > arc_c_min + to_free)
2460			atomic_add_64(&arc_c, -to_free);
2461		else
2462			arc_c = arc_c_min;
2463
2464		atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
2465		if (arc_c > arc_size)
2466			arc_c = MAX(arc_size, arc_c_min);
2467		if (arc_p > arc_c)
2468			arc_p = (arc_c >> 1);
2469
2470		DTRACE_PROBE2(arc__shrunk, uint64_t, arc_c, uint64_t,
2471			arc_p);
2472
2473		ASSERT(arc_c >= arc_c_min);
2474		ASSERT((int64_t)arc_p >= 0);
2475	}
2476
2477	if (arc_size > arc_c) {
2478		DTRACE_PROBE2(arc__shrink_adjust, uint64_t, arc_size,
2479			uint64_t, arc_c);
2480		arc_adjust();
2481	}
2482}
2483
2484static int needfree = 0;
2485
2486static int
2487arc_reclaim_needed(void)
2488{
2489
2490#ifdef _KERNEL
2491
2492	if (needfree) {
2493		DTRACE_PROBE(arc__reclaim_needfree);
2494		return (1);
2495	}
2496
2497	/*
2498	 * Cooperate with pagedaemon when it's time for it to scan
2499	 * and reclaim some pages.
2500	 */
2501	if (freemem < zfs_arc_free_target) {
2502		DTRACE_PROBE2(arc__reclaim_freemem, uint64_t,
2503		    freemem, uint64_t, zfs_arc_free_target);
2504		return (1);
2505	}
2506
2507#ifdef sun
2508	/*
2509	 * take 'desfree' extra pages, so we reclaim sooner, rather than later
2510	 */
2511	extra = desfree;
2512
2513	/*
2514	 * check that we're out of range of the pageout scanner.  It starts to
2515	 * schedule paging if freemem is less than lotsfree and needfree.
2516	 * lotsfree is the high-water mark for pageout, and needfree is the
2517	 * number of needed free pages.  We add extra pages here to make sure
2518	 * the scanner doesn't start up while we're freeing memory.
2519	 */
2520	if (freemem < lotsfree + needfree + extra)
2521		return (1);
2522
2523	/*
2524	 * check to make sure that swapfs has enough space so that anon
2525	 * reservations can still succeed. anon_resvmem() checks that the
2526	 * availrmem is greater than swapfs_minfree, and the number of reserved
2527	 * swap pages.  We also add a bit of extra here just to prevent
2528	 * circumstances from getting really dire.
2529	 */
2530	if (availrmem < swapfs_minfree + swapfs_reserve + extra)
2531		return (1);
2532
2533	/*
2534	 * Check that we have enough availrmem that memory locking (e.g., via
2535	 * mlock(3C) or memcntl(2)) can still succeed.  (pages_pp_maximum
2536	 * stores the number of pages that cannot be locked; when availrmem
2537	 * drops below pages_pp_maximum, page locking mechanisms such as
2538	 * page_pp_lock() will fail.)
2539	 */
2540	if (availrmem <= pages_pp_maximum)
2541		return (1);
2542
2543#endif	/* sun */
2544#if defined(__i386) || !defined(UMA_MD_SMALL_ALLOC)
2545	/*
2546	 * If we're on an i386 platform, it's possible that we'll exhaust the
2547	 * kernel heap space before we ever run out of available physical
2548	 * memory.  Most checks of the size of the heap_area compare against
2549	 * tune.t_minarmem, which is the minimum available real memory that we
2550	 * can have in the system.  However, this is generally fixed at 25 pages
2551	 * which is so low that it's useless.  In this comparison, we seek to
2552	 * calculate the total heap-size, and reclaim if more than 3/4ths of the
2553	 * heap is allocated.  (Or, in the calculation, if less than 1/4th is
2554	 * free)
2555	 */
2556	if (vmem_size(heap_arena, VMEM_FREE) <
2557	    (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2)) {
2558		DTRACE_PROBE2(arc__reclaim_used, uint64_t,
2559		    vmem_size(heap_arena, VMEM_FREE), uint64_t,
2560		    (vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC)) >> 2);
2561		return (1);
2562	}
2563#endif
2564#ifdef sun
2565	/*
2566	 * If zio data pages are being allocated out of a separate heap segment,
2567	 * then enforce that the size of available vmem for this arena remains
2568	 * above about 1/16th free.
2569	 *
2570	 * Note: The 1/16th arena free requirement was put in place
2571	 * to aggressively evict memory from the arc in order to avoid
2572	 * memory fragmentation issues.
2573	 */
2574	if (zio_arena != NULL &&
2575	    vmem_size(zio_arena, VMEM_FREE) <
2576	    (vmem_size(zio_arena, VMEM_ALLOC) >> 4))
2577		return (1);
2578#endif	/* sun */
2579#else	/* _KERNEL */
2580	if (spa_get_random(100) == 0)
2581		return (1);
2582#endif	/* _KERNEL */
2583	DTRACE_PROBE(arc__reclaim_no);
2584
2585	return (0);
2586}
2587
2588extern kmem_cache_t	*zio_buf_cache[];
2589extern kmem_cache_t	*zio_data_buf_cache[];
2590
2591static void __noinline
2592arc_kmem_reap_now(arc_reclaim_strategy_t strat)
2593{
2594	size_t			i;
2595	kmem_cache_t		*prev_cache = NULL;
2596	kmem_cache_t		*prev_data_cache = NULL;
2597
2598	DTRACE_PROBE(arc__kmem_reap_start);
2599#ifdef _KERNEL
2600	if (arc_meta_used >= arc_meta_limit) {
2601		/*
2602		 * We are exceeding our meta-data cache limit.
2603		 * Purge some DNLC entries to release holds on meta-data.
2604		 */
2605		dnlc_reduce_cache((void *)(uintptr_t)arc_reduce_dnlc_percent);
2606	}
2607#if defined(__i386)
2608	/*
2609	 * Reclaim unused memory from all kmem caches.
2610	 */
2611	kmem_reap();
2612#endif
2613#endif
2614
2615	/*
2616	 * An aggressive reclamation will shrink the cache size as well as
2617	 * reap free buffers from the arc kmem caches.
2618	 */
2619	if (strat == ARC_RECLAIM_AGGR)
2620		arc_shrink();
2621
2622	for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
2623		if (zio_buf_cache[i] != prev_cache) {
2624			prev_cache = zio_buf_cache[i];
2625			kmem_cache_reap_now(zio_buf_cache[i]);
2626		}
2627		if (zio_data_buf_cache[i] != prev_data_cache) {
2628			prev_data_cache = zio_data_buf_cache[i];
2629			kmem_cache_reap_now(zio_data_buf_cache[i]);
2630		}
2631	}
2632	kmem_cache_reap_now(buf_cache);
2633	kmem_cache_reap_now(hdr_cache);
2634
2635#ifdef sun
2636	/*
2637	 * Ask the vmem arena to reclaim unused memory from its
2638	 * quantum caches.
2639	 */
2640	if (zio_arena != NULL && strat == ARC_RECLAIM_AGGR)
2641		vmem_qcache_reap(zio_arena);
2642#endif
2643	DTRACE_PROBE(arc__kmem_reap_end);
2644}
2645
2646static void
2647arc_reclaim_thread(void *dummy __unused)
2648{
2649	clock_t			growtime = 0;
2650	arc_reclaim_strategy_t	last_reclaim = ARC_RECLAIM_CONS;
2651	callb_cpr_t		cpr;
2652
2653	CALLB_CPR_INIT(&cpr, &arc_reclaim_thr_lock, callb_generic_cpr, FTAG);
2654
2655	mutex_enter(&arc_reclaim_thr_lock);
2656	while (arc_thread_exit == 0) {
2657		if (arc_reclaim_needed()) {
2658
2659			if (arc_no_grow) {
2660				if (last_reclaim == ARC_RECLAIM_CONS) {
2661					DTRACE_PROBE(arc__reclaim_aggr_no_grow);
2662					last_reclaim = ARC_RECLAIM_AGGR;
2663				} else {
2664					last_reclaim = ARC_RECLAIM_CONS;
2665				}
2666			} else {
2667				arc_no_grow = TRUE;
2668				last_reclaim = ARC_RECLAIM_AGGR;
2669				DTRACE_PROBE(arc__reclaim_aggr);
2670				membar_producer();
2671			}
2672
2673			/* reset the growth delay for every reclaim */
2674			growtime = ddi_get_lbolt() + (arc_grow_retry * hz);
2675
2676			if (needfree && last_reclaim == ARC_RECLAIM_CONS) {
2677				/*
2678				 * If needfree is TRUE our vm_lowmem hook
2679				 * was called and in that case we must free some
2680				 * memory, so switch to aggressive mode.
2681				 */
2682				arc_no_grow = TRUE;
2683				last_reclaim = ARC_RECLAIM_AGGR;
2684			}
2685			arc_kmem_reap_now(last_reclaim);
2686			arc_warm = B_TRUE;
2687
2688		} else if (arc_no_grow && ddi_get_lbolt() >= growtime) {
2689			arc_no_grow = FALSE;
2690		}
2691
2692		arc_adjust();
2693
2694		if (arc_eviction_list != NULL)
2695			arc_do_user_evicts();
2696
2697#ifdef _KERNEL
2698		if (needfree) {
2699			needfree = 0;
2700			wakeup(&needfree);
2701		}
2702#endif
2703
2704		/* block until needed, or one second, whichever is shorter */
2705		CALLB_CPR_SAFE_BEGIN(&cpr);
2706		(void) cv_timedwait(&arc_reclaim_thr_cv,
2707		    &arc_reclaim_thr_lock, hz);
2708		CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_thr_lock);
2709	}
2710
2711	arc_thread_exit = 0;
2712	cv_broadcast(&arc_reclaim_thr_cv);
2713	CALLB_CPR_EXIT(&cpr);		/* drops arc_reclaim_thr_lock */
2714	thread_exit();
2715}
2716
2717/*
2718 * Adapt arc info given the number of bytes we are trying to add and
2719 * the state that we are comming from.  This function is only called
2720 * when we are adding new content to the cache.
2721 */
2722static void
2723arc_adapt(int bytes, arc_state_t *state)
2724{
2725	int mult;
2726	uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
2727
2728	if (state == arc_l2c_only)
2729		return;
2730
2731	ASSERT(bytes > 0);
2732	/*
2733	 * Adapt the target size of the MRU list:
2734	 *	- if we just hit in the MRU ghost list, then increase
2735	 *	  the target size of the MRU list.
2736	 *	- if we just hit in the MFU ghost list, then increase
2737	 *	  the target size of the MFU list by decreasing the
2738	 *	  target size of the MRU list.
2739	 */
2740	if (state == arc_mru_ghost) {
2741		mult = ((arc_mru_ghost->arcs_size >= arc_mfu_ghost->arcs_size) ?
2742		    1 : (arc_mfu_ghost->arcs_size/arc_mru_ghost->arcs_size));
2743		mult = MIN(mult, 10); /* avoid wild arc_p adjustment */
2744
2745		arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult);
2746	} else if (state == arc_mfu_ghost) {
2747		uint64_t delta;
2748
2749		mult = ((arc_mfu_ghost->arcs_size >= arc_mru_ghost->arcs_size) ?
2750		    1 : (arc_mru_ghost->arcs_size/arc_mfu_ghost->arcs_size));
2751		mult = MIN(mult, 10);
2752
2753		delta = MIN(bytes * mult, arc_p);
2754		arc_p = MAX(arc_p_min, arc_p - delta);
2755	}
2756	ASSERT((int64_t)arc_p >= 0);
2757
2758	if (arc_reclaim_needed()) {
2759		cv_signal(&arc_reclaim_thr_cv);
2760		return;
2761	}
2762
2763	if (arc_no_grow)
2764		return;
2765
2766	if (arc_c >= arc_c_max)
2767		return;
2768
2769	/*
2770	 * If we're within (2 * maxblocksize) bytes of the target
2771	 * cache size, increment the target cache size
2772	 */
2773	if (arc_size > arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
2774		DTRACE_PROBE1(arc__inc_adapt, int, bytes);
2775		atomic_add_64(&arc_c, (int64_t)bytes);
2776		if (arc_c > arc_c_max)
2777			arc_c = arc_c_max;
2778		else if (state == arc_anon)
2779			atomic_add_64(&arc_p, (int64_t)bytes);
2780		if (arc_p > arc_c)
2781			arc_p = arc_c;
2782	}
2783	ASSERT((int64_t)arc_p >= 0);
2784}
2785
2786/*
2787 * Check if the cache has reached its limits and eviction is required
2788 * prior to insert.
2789 */
2790static int
2791arc_evict_needed(arc_buf_contents_t type)
2792{
2793	if (type == ARC_BUFC_METADATA && arc_meta_used >= arc_meta_limit)
2794		return (1);
2795
2796	if (arc_reclaim_needed())
2797		return (1);
2798
2799	return (arc_size > arc_c);
2800}
2801
2802/*
2803 * The buffer, supplied as the first argument, needs a data block.
2804 * So, if we are at cache max, determine which cache should be victimized.
2805 * We have the following cases:
2806 *
2807 * 1. Insert for MRU, p > sizeof(arc_anon + arc_mru) ->
2808 * In this situation if we're out of space, but the resident size of the MFU is
2809 * under the limit, victimize the MFU cache to satisfy this insertion request.
2810 *
2811 * 2. Insert for MRU, p <= sizeof(arc_anon + arc_mru) ->
2812 * Here, we've used up all of the available space for the MRU, so we need to
2813 * evict from our own cache instead.  Evict from the set of resident MRU
2814 * entries.
2815 *
2816 * 3. Insert for MFU (c - p) > sizeof(arc_mfu) ->
2817 * c minus p represents the MFU space in the cache, since p is the size of the
2818 * cache that is dedicated to the MRU.  In this situation there's still space on
2819 * the MFU side, so the MRU side needs to be victimized.
2820 *
2821 * 4. Insert for MFU (c - p) < sizeof(arc_mfu) ->
2822 * MFU's resident set is consuming more space than it has been allotted.  In
2823 * this situation, we must victimize our own cache, the MFU, for this insertion.
2824 */
2825static void
2826arc_get_data_buf(arc_buf_t *buf)
2827{
2828	arc_state_t		*state = buf->b_hdr->b_state;
2829	uint64_t		size = buf->b_hdr->b_size;
2830	arc_buf_contents_t	type = buf->b_hdr->b_type;
2831
2832	arc_adapt(size, state);
2833
2834	/*
2835	 * We have not yet reached cache maximum size,
2836	 * just allocate a new buffer.
2837	 */
2838	if (!arc_evict_needed(type)) {
2839		if (type == ARC_BUFC_METADATA) {
2840			buf->b_data = zio_buf_alloc(size);
2841			arc_space_consume(size, ARC_SPACE_DATA);
2842		} else {
2843			ASSERT(type == ARC_BUFC_DATA);
2844			buf->b_data = zio_data_buf_alloc(size);
2845			ARCSTAT_INCR(arcstat_data_size, size);
2846			atomic_add_64(&arc_size, size);
2847		}
2848		goto out;
2849	}
2850
2851	/*
2852	 * If we are prefetching from the mfu ghost list, this buffer
2853	 * will end up on the mru list; so steal space from there.
2854	 */
2855	if (state == arc_mfu_ghost)
2856		state = buf->b_hdr->b_flags & ARC_PREFETCH ? arc_mru : arc_mfu;
2857	else if (state == arc_mru_ghost)
2858		state = arc_mru;
2859
2860	if (state == arc_mru || state == arc_anon) {
2861		uint64_t mru_used = arc_anon->arcs_size + arc_mru->arcs_size;
2862		state = (arc_mfu->arcs_lsize[type] >= size &&
2863		    arc_p > mru_used) ? arc_mfu : arc_mru;
2864	} else {
2865		/* MFU cases */
2866		uint64_t mfu_space = arc_c - arc_p;
2867		state =  (arc_mru->arcs_lsize[type] >= size &&
2868		    mfu_space > arc_mfu->arcs_size) ? arc_mru : arc_mfu;
2869	}
2870	if ((buf->b_data = arc_evict(state, 0, size, TRUE, type)) == NULL) {
2871		if (type == ARC_BUFC_METADATA) {
2872			buf->b_data = zio_buf_alloc(size);
2873			arc_space_consume(size, ARC_SPACE_DATA);
2874		} else {
2875			ASSERT(type == ARC_BUFC_DATA);
2876			buf->b_data = zio_data_buf_alloc(size);
2877			ARCSTAT_INCR(arcstat_data_size, size);
2878			atomic_add_64(&arc_size, size);
2879		}
2880		ARCSTAT_BUMP(arcstat_recycle_miss);
2881	}
2882	ASSERT(buf->b_data != NULL);
2883out:
2884	/*
2885	 * Update the state size.  Note that ghost states have a
2886	 * "ghost size" and so don't need to be updated.
2887	 */
2888	if (!GHOST_STATE(buf->b_hdr->b_state)) {
2889		arc_buf_hdr_t *hdr = buf->b_hdr;
2890
2891		atomic_add_64(&hdr->b_state->arcs_size, size);
2892		if (list_link_active(&hdr->b_arc_node)) {
2893			ASSERT(refcount_is_zero(&hdr->b_refcnt));
2894			atomic_add_64(&hdr->b_state->arcs_lsize[type], size);
2895		}
2896		/*
2897		 * If we are growing the cache, and we are adding anonymous
2898		 * data, and we have outgrown arc_p, update arc_p
2899		 */
2900		if (arc_size < arc_c && hdr->b_state == arc_anon &&
2901		    arc_anon->arcs_size + arc_mru->arcs_size > arc_p)
2902			arc_p = MIN(arc_c, arc_p + size);
2903	}
2904	ARCSTAT_BUMP(arcstat_allocated);
2905}
2906
2907/*
2908 * This routine is called whenever a buffer is accessed.
2909 * NOTE: the hash lock is dropped in this function.
2910 */
2911static void
2912arc_access(arc_buf_hdr_t *buf, kmutex_t *hash_lock)
2913{
2914	clock_t now;
2915
2916	ASSERT(MUTEX_HELD(hash_lock));
2917
2918	if (buf->b_state == arc_anon) {
2919		/*
2920		 * This buffer is not in the cache, and does not
2921		 * appear in our "ghost" list.  Add the new buffer
2922		 * to the MRU state.
2923		 */
2924
2925		ASSERT(buf->b_arc_access == 0);
2926		buf->b_arc_access = ddi_get_lbolt();
2927		DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2928		arc_change_state(arc_mru, buf, hash_lock);
2929
2930	} else if (buf->b_state == arc_mru) {
2931		now = ddi_get_lbolt();
2932
2933		/*
2934		 * If this buffer is here because of a prefetch, then either:
2935		 * - clear the flag if this is a "referencing" read
2936		 *   (any subsequent access will bump this into the MFU state).
2937		 * or
2938		 * - move the buffer to the head of the list if this is
2939		 *   another prefetch (to make it less likely to be evicted).
2940		 */
2941		if ((buf->b_flags & ARC_PREFETCH) != 0) {
2942			if (refcount_count(&buf->b_refcnt) == 0) {
2943				ASSERT(list_link_active(&buf->b_arc_node));
2944			} else {
2945				buf->b_flags &= ~ARC_PREFETCH;
2946				ARCSTAT_BUMP(arcstat_mru_hits);
2947			}
2948			buf->b_arc_access = now;
2949			return;
2950		}
2951
2952		/*
2953		 * This buffer has been "accessed" only once so far,
2954		 * but it is still in the cache. Move it to the MFU
2955		 * state.
2956		 */
2957		if (now > buf->b_arc_access + ARC_MINTIME) {
2958			/*
2959			 * More than 125ms have passed since we
2960			 * instantiated this buffer.  Move it to the
2961			 * most frequently used state.
2962			 */
2963			buf->b_arc_access = now;
2964			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2965			arc_change_state(arc_mfu, buf, hash_lock);
2966		}
2967		ARCSTAT_BUMP(arcstat_mru_hits);
2968	} else if (buf->b_state == arc_mru_ghost) {
2969		arc_state_t	*new_state;
2970		/*
2971		 * This buffer has been "accessed" recently, but
2972		 * was evicted from the cache.  Move it to the
2973		 * MFU state.
2974		 */
2975
2976		if (buf->b_flags & ARC_PREFETCH) {
2977			new_state = arc_mru;
2978			if (refcount_count(&buf->b_refcnt) > 0)
2979				buf->b_flags &= ~ARC_PREFETCH;
2980			DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, buf);
2981		} else {
2982			new_state = arc_mfu;
2983			DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
2984		}
2985
2986		buf->b_arc_access = ddi_get_lbolt();
2987		arc_change_state(new_state, buf, hash_lock);
2988
2989		ARCSTAT_BUMP(arcstat_mru_ghost_hits);
2990	} else if (buf->b_state == arc_mfu) {
2991		/*
2992		 * This buffer has been accessed more than once and is
2993		 * still in the cache.  Keep it in the MFU state.
2994		 *
2995		 * NOTE: an add_reference() that occurred when we did
2996		 * the arc_read() will have kicked this off the list.
2997		 * If it was a prefetch, we will explicitly move it to
2998		 * the head of the list now.
2999		 */
3000		if ((buf->b_flags & ARC_PREFETCH) != 0) {
3001			ASSERT(refcount_count(&buf->b_refcnt) == 0);
3002			ASSERT(list_link_active(&buf->b_arc_node));
3003		}
3004		ARCSTAT_BUMP(arcstat_mfu_hits);
3005		buf->b_arc_access = ddi_get_lbolt();
3006	} else if (buf->b_state == arc_mfu_ghost) {
3007		arc_state_t	*new_state = arc_mfu;
3008		/*
3009		 * This buffer has been accessed more than once but has
3010		 * been evicted from the cache.  Move it back to the
3011		 * MFU state.
3012		 */
3013
3014		if (buf->b_flags & ARC_PREFETCH) {
3015			/*
3016			 * This is a prefetch access...
3017			 * move this block back to the MRU state.
3018			 */
3019			ASSERT0(refcount_count(&buf->b_refcnt));
3020			new_state = arc_mru;
3021		}
3022
3023		buf->b_arc_access = ddi_get_lbolt();
3024		DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
3025		arc_change_state(new_state, buf, hash_lock);
3026
3027		ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
3028	} else if (buf->b_state == arc_l2c_only) {
3029		/*
3030		 * This buffer is on the 2nd Level ARC.
3031		 */
3032
3033		buf->b_arc_access = ddi_get_lbolt();
3034		DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, buf);
3035		arc_change_state(arc_mfu, buf, hash_lock);
3036	} else {
3037		ASSERT(!"invalid arc state");
3038	}
3039}
3040
3041/* a generic arc_done_func_t which you can use */
3042/* ARGSUSED */
3043void
3044arc_bcopy_func(zio_t *zio, arc_buf_t *buf, void *arg)
3045{
3046	if (zio == NULL || zio->io_error == 0)
3047		bcopy(buf->b_data, arg, buf->b_hdr->b_size);
3048	VERIFY(arc_buf_remove_ref(buf, arg));
3049}
3050
3051/* a generic arc_done_func_t */
3052void
3053arc_getbuf_func(zio_t *zio, arc_buf_t *buf, void *arg)
3054{
3055	arc_buf_t **bufp = arg;
3056	if (zio && zio->io_error) {
3057		VERIFY(arc_buf_remove_ref(buf, arg));
3058		*bufp = NULL;
3059	} else {
3060		*bufp = buf;
3061		ASSERT(buf->b_data);
3062	}
3063}
3064
3065static void
3066arc_read_done(zio_t *zio)
3067{
3068	arc_buf_hdr_t	*hdr;
3069	arc_buf_t	*buf;
3070	arc_buf_t	*abuf;	/* buffer we're assigning to callback */
3071	kmutex_t	*hash_lock = NULL;
3072	arc_callback_t	*callback_list, *acb;
3073	int		freeable = FALSE;
3074
3075	buf = zio->io_private;
3076	hdr = buf->b_hdr;
3077
3078	/*
3079	 * The hdr was inserted into hash-table and removed from lists
3080	 * prior to starting I/O.  We should find this header, since
3081	 * it's in the hash table, and it should be legit since it's
3082	 * not possible to evict it during the I/O.  The only possible
3083	 * reason for it not to be found is if we were freed during the
3084	 * read.
3085	 */
3086	if (HDR_IN_HASH_TABLE(hdr)) {
3087		ASSERT3U(hdr->b_birth, ==, BP_PHYSICAL_BIRTH(zio->io_bp));
3088		ASSERT3U(hdr->b_dva.dva_word[0], ==,
3089		    BP_IDENTITY(zio->io_bp)->dva_word[0]);
3090		ASSERT3U(hdr->b_dva.dva_word[1], ==,
3091		    BP_IDENTITY(zio->io_bp)->dva_word[1]);
3092
3093		arc_buf_hdr_t *found = buf_hash_find(hdr->b_spa, zio->io_bp,
3094		    &hash_lock);
3095
3096		ASSERT((found == NULL && HDR_FREED_IN_READ(hdr) &&
3097		    hash_lock == NULL) ||
3098		    (found == hdr &&
3099		    DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
3100		    (found == hdr && HDR_L2_READING(hdr)));
3101	}
3102
3103	hdr->b_flags &= ~ARC_L2_EVICTED;
3104	if (l2arc_noprefetch && (hdr->b_flags & ARC_PREFETCH))
3105		hdr->b_flags &= ~ARC_L2CACHE;
3106
3107	/* byteswap if necessary */
3108	callback_list = hdr->b_acb;
3109	ASSERT(callback_list != NULL);
3110	if (BP_SHOULD_BYTESWAP(zio->io_bp) && zio->io_error == 0) {
3111		dmu_object_byteswap_t bswap =
3112		    DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp));
3113		arc_byteswap_func_t *func = BP_GET_LEVEL(zio->io_bp) > 0 ?
3114		    byteswap_uint64_array :
3115		    dmu_ot_byteswap[bswap].ob_func;
3116		func(buf->b_data, hdr->b_size);
3117	}
3118
3119	arc_cksum_compute(buf, B_FALSE);
3120#ifdef illumos
3121	arc_buf_watch(buf);
3122#endif /* illumos */
3123
3124	if (hash_lock && zio->io_error == 0 && hdr->b_state == arc_anon) {
3125		/*
3126		 * Only call arc_access on anonymous buffers.  This is because
3127		 * if we've issued an I/O for an evicted buffer, we've already
3128		 * called arc_access (to prevent any simultaneous readers from
3129		 * getting confused).
3130		 */
3131		arc_access(hdr, hash_lock);
3132	}
3133
3134	/* create copies of the data buffer for the callers */
3135	abuf = buf;
3136	for (acb = callback_list; acb; acb = acb->acb_next) {
3137		if (acb->acb_done) {
3138			if (abuf == NULL) {
3139				ARCSTAT_BUMP(arcstat_duplicate_reads);
3140				abuf = arc_buf_clone(buf);
3141			}
3142			acb->acb_buf = abuf;
3143			abuf = NULL;
3144		}
3145	}
3146	hdr->b_acb = NULL;
3147	hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3148	ASSERT(!HDR_BUF_AVAILABLE(hdr));
3149	if (abuf == buf) {
3150		ASSERT(buf->b_efunc == NULL);
3151		ASSERT(hdr->b_datacnt == 1);
3152		hdr->b_flags |= ARC_BUF_AVAILABLE;
3153	}
3154
3155	ASSERT(refcount_is_zero(&hdr->b_refcnt) || callback_list != NULL);
3156
3157	if (zio->io_error != 0) {
3158		hdr->b_flags |= ARC_IO_ERROR;
3159		if (hdr->b_state != arc_anon)
3160			arc_change_state(arc_anon, hdr, hash_lock);
3161		if (HDR_IN_HASH_TABLE(hdr))
3162			buf_hash_remove(hdr);
3163		freeable = refcount_is_zero(&hdr->b_refcnt);
3164	}
3165
3166	/*
3167	 * Broadcast before we drop the hash_lock to avoid the possibility
3168	 * that the hdr (and hence the cv) might be freed before we get to
3169	 * the cv_broadcast().
3170	 */
3171	cv_broadcast(&hdr->b_cv);
3172
3173	if (hash_lock) {
3174		mutex_exit(hash_lock);
3175	} else {
3176		/*
3177		 * This block was freed while we waited for the read to
3178		 * complete.  It has been removed from the hash table and
3179		 * moved to the anonymous state (so that it won't show up
3180		 * in the cache).
3181		 */
3182		ASSERT3P(hdr->b_state, ==, arc_anon);
3183		freeable = refcount_is_zero(&hdr->b_refcnt);
3184	}
3185
3186	/* execute each callback and free its structure */
3187	while ((acb = callback_list) != NULL) {
3188		if (acb->acb_done)
3189			acb->acb_done(zio, acb->acb_buf, acb->acb_private);
3190
3191		if (acb->acb_zio_dummy != NULL) {
3192			acb->acb_zio_dummy->io_error = zio->io_error;
3193			zio_nowait(acb->acb_zio_dummy);
3194		}
3195
3196		callback_list = acb->acb_next;
3197		kmem_free(acb, sizeof (arc_callback_t));
3198	}
3199
3200	if (freeable)
3201		arc_hdr_destroy(hdr);
3202}
3203
3204/*
3205 * "Read" the block block at the specified DVA (in bp) via the
3206 * cache.  If the block is found in the cache, invoke the provided
3207 * callback immediately and return.  Note that the `zio' parameter
3208 * in the callback will be NULL in this case, since no IO was
3209 * required.  If the block is not in the cache pass the read request
3210 * on to the spa with a substitute callback function, so that the
3211 * requested block will be added to the cache.
3212 *
3213 * If a read request arrives for a block that has a read in-progress,
3214 * either wait for the in-progress read to complete (and return the
3215 * results); or, if this is a read with a "done" func, add a record
3216 * to the read to invoke the "done" func when the read completes,
3217 * and return; or just return.
3218 *
3219 * arc_read_done() will invoke all the requested "done" functions
3220 * for readers of this block.
3221 */
3222int
3223arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp, arc_done_func_t *done,
3224    void *private, zio_priority_t priority, int zio_flags, uint32_t *arc_flags,
3225    const zbookmark_phys_t *zb)
3226{
3227	arc_buf_hdr_t *hdr = NULL;
3228	arc_buf_t *buf = NULL;
3229	kmutex_t *hash_lock = NULL;
3230	zio_t *rzio;
3231	uint64_t guid = spa_load_guid(spa);
3232
3233	ASSERT(!BP_IS_EMBEDDED(bp) ||
3234	    BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA);
3235
3236top:
3237	if (!BP_IS_EMBEDDED(bp)) {
3238		/*
3239		 * Embedded BP's have no DVA and require no I/O to "read".
3240		 * Create an anonymous arc buf to back it.
3241		 */
3242		hdr = buf_hash_find(guid, bp, &hash_lock);
3243	}
3244
3245	if (hdr != NULL && hdr->b_datacnt > 0) {
3246
3247		*arc_flags |= ARC_CACHED;
3248
3249		if (HDR_IO_IN_PROGRESS(hdr)) {
3250
3251			if (*arc_flags & ARC_WAIT) {
3252				cv_wait(&hdr->b_cv, hash_lock);
3253				mutex_exit(hash_lock);
3254				goto top;
3255			}
3256			ASSERT(*arc_flags & ARC_NOWAIT);
3257
3258			if (done) {
3259				arc_callback_t	*acb = NULL;
3260
3261				acb = kmem_zalloc(sizeof (arc_callback_t),
3262				    KM_SLEEP);
3263				acb->acb_done = done;
3264				acb->acb_private = private;
3265				if (pio != NULL)
3266					acb->acb_zio_dummy = zio_null(pio,
3267					    spa, NULL, NULL, NULL, zio_flags);
3268
3269				ASSERT(acb->acb_done != NULL);
3270				acb->acb_next = hdr->b_acb;
3271				hdr->b_acb = acb;
3272				add_reference(hdr, hash_lock, private);
3273				mutex_exit(hash_lock);
3274				return (0);
3275			}
3276			mutex_exit(hash_lock);
3277			return (0);
3278		}
3279
3280		ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
3281
3282		if (done) {
3283			add_reference(hdr, hash_lock, private);
3284			/*
3285			 * If this block is already in use, create a new
3286			 * copy of the data so that we will be guaranteed
3287			 * that arc_release() will always succeed.
3288			 */
3289			buf = hdr->b_buf;
3290			ASSERT(buf);
3291			ASSERT(buf->b_data);
3292			if (HDR_BUF_AVAILABLE(hdr)) {
3293				ASSERT(buf->b_efunc == NULL);
3294				hdr->b_flags &= ~ARC_BUF_AVAILABLE;
3295			} else {
3296				buf = arc_buf_clone(buf);
3297			}
3298
3299		} else if (*arc_flags & ARC_PREFETCH &&
3300		    refcount_count(&hdr->b_refcnt) == 0) {
3301			hdr->b_flags |= ARC_PREFETCH;
3302		}
3303		DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
3304		arc_access(hdr, hash_lock);
3305		if (*arc_flags & ARC_L2CACHE)
3306			hdr->b_flags |= ARC_L2CACHE;
3307		if (*arc_flags & ARC_L2COMPRESS)
3308			hdr->b_flags |= ARC_L2COMPRESS;
3309		mutex_exit(hash_lock);
3310		ARCSTAT_BUMP(arcstat_hits);
3311		ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
3312		    demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
3313		    data, metadata, hits);
3314
3315		if (done)
3316			done(NULL, buf, private);
3317	} else {
3318		uint64_t size = BP_GET_LSIZE(bp);
3319		arc_callback_t *acb;
3320		vdev_t *vd = NULL;
3321		uint64_t addr = 0;
3322		boolean_t devw = B_FALSE;
3323		enum zio_compress b_compress = ZIO_COMPRESS_OFF;
3324		uint64_t b_asize = 0;
3325
3326		if (hdr == NULL) {
3327			/* this block is not in the cache */
3328			arc_buf_hdr_t *exists = NULL;
3329			arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
3330			buf = arc_buf_alloc(spa, size, private, type);
3331			hdr = buf->b_hdr;
3332			if (!BP_IS_EMBEDDED(bp)) {
3333				hdr->b_dva = *BP_IDENTITY(bp);
3334				hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
3335				hdr->b_cksum0 = bp->blk_cksum.zc_word[0];
3336				exists = buf_hash_insert(hdr, &hash_lock);
3337			}
3338			if (exists != NULL) {
3339				/* somebody beat us to the hash insert */
3340				mutex_exit(hash_lock);
3341				buf_discard_identity(hdr);
3342				(void) arc_buf_remove_ref(buf, private);
3343				goto top; /* restart the IO request */
3344			}
3345			/* if this is a prefetch, we don't have a reference */
3346			if (*arc_flags & ARC_PREFETCH) {
3347				(void) remove_reference(hdr, hash_lock,
3348				    private);
3349				hdr->b_flags |= ARC_PREFETCH;
3350			}
3351			if (*arc_flags & ARC_L2CACHE)
3352				hdr->b_flags |= ARC_L2CACHE;
3353			if (*arc_flags & ARC_L2COMPRESS)
3354				hdr->b_flags |= ARC_L2COMPRESS;
3355			if (BP_GET_LEVEL(bp) > 0)
3356				hdr->b_flags |= ARC_INDIRECT;
3357		} else {
3358			/* this block is in the ghost cache */
3359			ASSERT(GHOST_STATE(hdr->b_state));
3360			ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3361			ASSERT0(refcount_count(&hdr->b_refcnt));
3362			ASSERT(hdr->b_buf == NULL);
3363
3364			/* if this is a prefetch, we don't have a reference */
3365			if (*arc_flags & ARC_PREFETCH)
3366				hdr->b_flags |= ARC_PREFETCH;
3367			else
3368				add_reference(hdr, hash_lock, private);
3369			if (*arc_flags & ARC_L2CACHE)
3370				hdr->b_flags |= ARC_L2CACHE;
3371			if (*arc_flags & ARC_L2COMPRESS)
3372				hdr->b_flags |= ARC_L2COMPRESS;
3373			buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
3374			buf->b_hdr = hdr;
3375			buf->b_data = NULL;
3376			buf->b_efunc = NULL;
3377			buf->b_private = NULL;
3378			buf->b_next = NULL;
3379			hdr->b_buf = buf;
3380			ASSERT(hdr->b_datacnt == 0);
3381			hdr->b_datacnt = 1;
3382			arc_get_data_buf(buf);
3383			arc_access(hdr, hash_lock);
3384		}
3385
3386		ASSERT(!GHOST_STATE(hdr->b_state));
3387
3388		acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
3389		acb->acb_done = done;
3390		acb->acb_private = private;
3391
3392		ASSERT(hdr->b_acb == NULL);
3393		hdr->b_acb = acb;
3394		hdr->b_flags |= ARC_IO_IN_PROGRESS;
3395
3396		if (hdr->b_l2hdr != NULL &&
3397		    (vd = hdr->b_l2hdr->b_dev->l2ad_vdev) != NULL) {
3398			devw = hdr->b_l2hdr->b_dev->l2ad_writing;
3399			addr = hdr->b_l2hdr->b_daddr;
3400			b_compress = hdr->b_l2hdr->b_compress;
3401			b_asize = hdr->b_l2hdr->b_asize;
3402			/*
3403			 * Lock out device removal.
3404			 */
3405			if (vdev_is_dead(vd) ||
3406			    !spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
3407				vd = NULL;
3408		}
3409
3410		if (hash_lock != NULL)
3411			mutex_exit(hash_lock);
3412
3413		/*
3414		 * At this point, we have a level 1 cache miss.  Try again in
3415		 * L2ARC if possible.
3416		 */
3417		ASSERT3U(hdr->b_size, ==, size);
3418		DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp,
3419		    uint64_t, size, zbookmark_phys_t *, zb);
3420		ARCSTAT_BUMP(arcstat_misses);
3421		ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
3422		    demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
3423		    data, metadata, misses);
3424#ifdef _KERNEL
3425		curthread->td_ru.ru_inblock++;
3426#endif
3427
3428		if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) {
3429			/*
3430			 * Read from the L2ARC if the following are true:
3431			 * 1. The L2ARC vdev was previously cached.
3432			 * 2. This buffer still has L2ARC metadata.
3433			 * 3. This buffer isn't currently writing to the L2ARC.
3434			 * 4. The L2ARC entry wasn't evicted, which may
3435			 *    also have invalidated the vdev.
3436			 * 5. This isn't prefetch and l2arc_noprefetch is set.
3437			 */
3438			if (hdr->b_l2hdr != NULL &&
3439			    !HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) &&
3440			    !(l2arc_noprefetch && HDR_PREFETCH(hdr))) {
3441				l2arc_read_callback_t *cb;
3442
3443				DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
3444				ARCSTAT_BUMP(arcstat_l2_hits);
3445
3446				cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
3447				    KM_SLEEP);
3448				cb->l2rcb_buf = buf;
3449				cb->l2rcb_spa = spa;
3450				cb->l2rcb_bp = *bp;
3451				cb->l2rcb_zb = *zb;
3452				cb->l2rcb_flags = zio_flags;
3453				cb->l2rcb_compress = b_compress;
3454
3455				ASSERT(addr >= VDEV_LABEL_START_SIZE &&
3456				    addr + size < vd->vdev_psize -
3457				    VDEV_LABEL_END_SIZE);
3458
3459				/*
3460				 * l2arc read.  The SCL_L2ARC lock will be
3461				 * released by l2arc_read_done().
3462				 * Issue a null zio if the underlying buffer
3463				 * was squashed to zero size by compression.
3464				 */
3465				if (b_compress == ZIO_COMPRESS_EMPTY) {
3466					rzio = zio_null(pio, spa, vd,
3467					    l2arc_read_done, cb,
3468					    zio_flags | ZIO_FLAG_DONT_CACHE |
3469					    ZIO_FLAG_CANFAIL |
3470					    ZIO_FLAG_DONT_PROPAGATE |
3471					    ZIO_FLAG_DONT_RETRY);
3472				} else {
3473					rzio = zio_read_phys(pio, vd, addr,
3474					    b_asize, buf->b_data,
3475					    ZIO_CHECKSUM_OFF,
3476					    l2arc_read_done, cb, priority,
3477					    zio_flags | ZIO_FLAG_DONT_CACHE |
3478					    ZIO_FLAG_CANFAIL |
3479					    ZIO_FLAG_DONT_PROPAGATE |
3480					    ZIO_FLAG_DONT_RETRY, B_FALSE);
3481				}
3482				DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
3483				    zio_t *, rzio);
3484				ARCSTAT_INCR(arcstat_l2_read_bytes, b_asize);
3485
3486				if (*arc_flags & ARC_NOWAIT) {
3487					zio_nowait(rzio);
3488					return (0);
3489				}
3490
3491				ASSERT(*arc_flags & ARC_WAIT);
3492				if (zio_wait(rzio) == 0)
3493					return (0);
3494
3495				/* l2arc read error; goto zio_read() */
3496			} else {
3497				DTRACE_PROBE1(l2arc__miss,
3498				    arc_buf_hdr_t *, hdr);
3499				ARCSTAT_BUMP(arcstat_l2_misses);
3500				if (HDR_L2_WRITING(hdr))
3501					ARCSTAT_BUMP(arcstat_l2_rw_clash);
3502				spa_config_exit(spa, SCL_L2ARC, vd);
3503			}
3504		} else {
3505			if (vd != NULL)
3506				spa_config_exit(spa, SCL_L2ARC, vd);
3507			if (l2arc_ndev != 0) {
3508				DTRACE_PROBE1(l2arc__miss,
3509				    arc_buf_hdr_t *, hdr);
3510				ARCSTAT_BUMP(arcstat_l2_misses);
3511			}
3512		}
3513
3514		rzio = zio_read(pio, spa, bp, buf->b_data, size,
3515		    arc_read_done, buf, priority, zio_flags, zb);
3516
3517		if (*arc_flags & ARC_WAIT)
3518			return (zio_wait(rzio));
3519
3520		ASSERT(*arc_flags & ARC_NOWAIT);
3521		zio_nowait(rzio);
3522	}
3523	return (0);
3524}
3525
3526void
3527arc_set_callback(arc_buf_t *buf, arc_evict_func_t *func, void *private)
3528{
3529	ASSERT(buf->b_hdr != NULL);
3530	ASSERT(buf->b_hdr->b_state != arc_anon);
3531	ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt) || func == NULL);
3532	ASSERT(buf->b_efunc == NULL);
3533	ASSERT(!HDR_BUF_AVAILABLE(buf->b_hdr));
3534
3535	buf->b_efunc = func;
3536	buf->b_private = private;
3537}
3538
3539/*
3540 * Notify the arc that a block was freed, and thus will never be used again.
3541 */
3542void
3543arc_freed(spa_t *spa, const blkptr_t *bp)
3544{
3545	arc_buf_hdr_t *hdr;
3546	kmutex_t *hash_lock;
3547	uint64_t guid = spa_load_guid(spa);
3548
3549	ASSERT(!BP_IS_EMBEDDED(bp));
3550
3551	hdr = buf_hash_find(guid, bp, &hash_lock);
3552	if (hdr == NULL)
3553		return;
3554	if (HDR_BUF_AVAILABLE(hdr)) {
3555		arc_buf_t *buf = hdr->b_buf;
3556		add_reference(hdr, hash_lock, FTAG);
3557		hdr->b_flags &= ~ARC_BUF_AVAILABLE;
3558		mutex_exit(hash_lock);
3559
3560		arc_release(buf, FTAG);
3561		(void) arc_buf_remove_ref(buf, FTAG);
3562	} else {
3563		mutex_exit(hash_lock);
3564	}
3565
3566}
3567
3568/*
3569 * Clear the user eviction callback set by arc_set_callback(), first calling
3570 * it if it exists.  Because the presence of a callback keeps an arc_buf cached
3571 * clearing the callback may result in the arc_buf being destroyed.  However,
3572 * it will not result in the *last* arc_buf being destroyed, hence the data
3573 * will remain cached in the ARC. We make a copy of the arc buffer here so
3574 * that we can process the callback without holding any locks.
3575 *
3576 * It's possible that the callback is already in the process of being cleared
3577 * by another thread.  In this case we can not clear the callback.
3578 *
3579 * Returns B_TRUE if the callback was successfully called and cleared.
3580 */
3581boolean_t
3582arc_clear_callback(arc_buf_t *buf)
3583{
3584	arc_buf_hdr_t *hdr;
3585	kmutex_t *hash_lock;
3586	arc_evict_func_t *efunc = buf->b_efunc;
3587	void *private = buf->b_private;
3588	list_t *list, *evicted_list;
3589	kmutex_t *lock, *evicted_lock;
3590
3591	mutex_enter(&buf->b_evict_lock);
3592	hdr = buf->b_hdr;
3593	if (hdr == NULL) {
3594		/*
3595		 * We are in arc_do_user_evicts().
3596		 */
3597		ASSERT(buf->b_data == NULL);
3598		mutex_exit(&buf->b_evict_lock);
3599		return (B_FALSE);
3600	} else if (buf->b_data == NULL) {
3601		/*
3602		 * We are on the eviction list; process this buffer now
3603		 * but let arc_do_user_evicts() do the reaping.
3604		 */
3605		buf->b_efunc = NULL;
3606		mutex_exit(&buf->b_evict_lock);
3607		VERIFY0(efunc(private));
3608		return (B_TRUE);
3609	}
3610	hash_lock = HDR_LOCK(hdr);
3611	mutex_enter(hash_lock);
3612	hdr = buf->b_hdr;
3613	ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3614
3615	ASSERT3U(refcount_count(&hdr->b_refcnt), <, hdr->b_datacnt);
3616	ASSERT(hdr->b_state == arc_mru || hdr->b_state == arc_mfu);
3617
3618	buf->b_efunc = NULL;
3619	buf->b_private = NULL;
3620
3621	if (hdr->b_datacnt > 1) {
3622		mutex_exit(&buf->b_evict_lock);
3623		arc_buf_destroy(buf, FALSE, TRUE);
3624	} else {
3625		ASSERT(buf == hdr->b_buf);
3626		hdr->b_flags |= ARC_BUF_AVAILABLE;
3627		mutex_exit(&buf->b_evict_lock);
3628	}
3629
3630	mutex_exit(hash_lock);
3631	VERIFY0(efunc(private));
3632	return (B_TRUE);
3633}
3634
3635/*
3636 * Release this buffer from the cache, making it an anonymous buffer.  This
3637 * must be done after a read and prior to modifying the buffer contents.
3638 * If the buffer has more than one reference, we must make
3639 * a new hdr for the buffer.
3640 */
3641void
3642arc_release(arc_buf_t *buf, void *tag)
3643{
3644	arc_buf_hdr_t *hdr;
3645	kmutex_t *hash_lock = NULL;
3646	l2arc_buf_hdr_t *l2hdr;
3647	uint64_t buf_size;
3648
3649	/*
3650	 * It would be nice to assert that if it's DMU metadata (level >
3651	 * 0 || it's the dnode file), then it must be syncing context.
3652	 * But we don't know that information at this level.
3653	 */
3654
3655	mutex_enter(&buf->b_evict_lock);
3656	hdr = buf->b_hdr;
3657
3658	/* this buffer is not on any list */
3659	ASSERT(refcount_count(&hdr->b_refcnt) > 0);
3660
3661	if (hdr->b_state == arc_anon) {
3662		/* this buffer is already released */
3663		ASSERT(buf->b_efunc == NULL);
3664	} else {
3665		hash_lock = HDR_LOCK(hdr);
3666		mutex_enter(hash_lock);
3667		hdr = buf->b_hdr;
3668		ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
3669	}
3670
3671	l2hdr = hdr->b_l2hdr;
3672	if (l2hdr) {
3673		mutex_enter(&l2arc_buflist_mtx);
3674		hdr->b_l2hdr = NULL;
3675		list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
3676	}
3677	buf_size = hdr->b_size;
3678
3679	/*
3680	 * Do we have more than one buf?
3681	 */
3682	if (hdr->b_datacnt > 1) {
3683		arc_buf_hdr_t *nhdr;
3684		arc_buf_t **bufp;
3685		uint64_t blksz = hdr->b_size;
3686		uint64_t spa = hdr->b_spa;
3687		arc_buf_contents_t type = hdr->b_type;
3688		uint32_t flags = hdr->b_flags;
3689
3690		ASSERT(hdr->b_buf != buf || buf->b_next != NULL);
3691		/*
3692		 * Pull the data off of this hdr and attach it to
3693		 * a new anonymous hdr.
3694		 */
3695		(void) remove_reference(hdr, hash_lock, tag);
3696		bufp = &hdr->b_buf;
3697		while (*bufp != buf)
3698			bufp = &(*bufp)->b_next;
3699		*bufp = buf->b_next;
3700		buf->b_next = NULL;
3701
3702		ASSERT3U(hdr->b_state->arcs_size, >=, hdr->b_size);
3703		atomic_add_64(&hdr->b_state->arcs_size, -hdr->b_size);
3704		if (refcount_is_zero(&hdr->b_refcnt)) {
3705			uint64_t *size = &hdr->b_state->arcs_lsize[hdr->b_type];
3706			ASSERT3U(*size, >=, hdr->b_size);
3707			atomic_add_64(size, -hdr->b_size);
3708		}
3709
3710		/*
3711		 * We're releasing a duplicate user data buffer, update
3712		 * our statistics accordingly.
3713		 */
3714		if (hdr->b_type == ARC_BUFC_DATA) {
3715			ARCSTAT_BUMPDOWN(arcstat_duplicate_buffers);
3716			ARCSTAT_INCR(arcstat_duplicate_buffers_size,
3717			    -hdr->b_size);
3718		}
3719		hdr->b_datacnt -= 1;
3720		arc_cksum_verify(buf);
3721#ifdef illumos
3722		arc_buf_unwatch(buf);
3723#endif /* illumos */
3724
3725		mutex_exit(hash_lock);
3726
3727		nhdr = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
3728		nhdr->b_size = blksz;
3729		nhdr->b_spa = spa;
3730		nhdr->b_type = type;
3731		nhdr->b_buf = buf;
3732		nhdr->b_state = arc_anon;
3733		nhdr->b_arc_access = 0;
3734		nhdr->b_flags = flags & ARC_L2_WRITING;
3735		nhdr->b_l2hdr = NULL;
3736		nhdr->b_datacnt = 1;
3737		nhdr->b_freeze_cksum = NULL;
3738		(void) refcount_add(&nhdr->b_refcnt, tag);
3739		buf->b_hdr = nhdr;
3740		mutex_exit(&buf->b_evict_lock);
3741		atomic_add_64(&arc_anon->arcs_size, blksz);
3742	} else {
3743		mutex_exit(&buf->b_evict_lock);
3744		ASSERT(refcount_count(&hdr->b_refcnt) == 1);
3745		ASSERT(!list_link_active(&hdr->b_arc_node));
3746		ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3747		if (hdr->b_state != arc_anon)
3748			arc_change_state(arc_anon, hdr, hash_lock);
3749		hdr->b_arc_access = 0;
3750		if (hash_lock)
3751			mutex_exit(hash_lock);
3752
3753		buf_discard_identity(hdr);
3754		arc_buf_thaw(buf);
3755	}
3756	buf->b_efunc = NULL;
3757	buf->b_private = NULL;
3758
3759	if (l2hdr) {
3760		ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
3761		vdev_space_update(l2hdr->b_dev->l2ad_vdev,
3762		    -l2hdr->b_asize, 0, 0);
3763		trim_map_free(l2hdr->b_dev->l2ad_vdev, l2hdr->b_daddr,
3764		    hdr->b_size, 0);
3765		kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
3766		ARCSTAT_INCR(arcstat_l2_size, -buf_size);
3767		mutex_exit(&l2arc_buflist_mtx);
3768	}
3769}
3770
3771int
3772arc_released(arc_buf_t *buf)
3773{
3774	int released;
3775
3776	mutex_enter(&buf->b_evict_lock);
3777	released = (buf->b_data != NULL && buf->b_hdr->b_state == arc_anon);
3778	mutex_exit(&buf->b_evict_lock);
3779	return (released);
3780}
3781
3782#ifdef ZFS_DEBUG
3783int
3784arc_referenced(arc_buf_t *buf)
3785{
3786	int referenced;
3787
3788	mutex_enter(&buf->b_evict_lock);
3789	referenced = (refcount_count(&buf->b_hdr->b_refcnt));
3790	mutex_exit(&buf->b_evict_lock);
3791	return (referenced);
3792}
3793#endif
3794
3795static void
3796arc_write_ready(zio_t *zio)
3797{
3798	arc_write_callback_t *callback = zio->io_private;
3799	arc_buf_t *buf = callback->awcb_buf;
3800	arc_buf_hdr_t *hdr = buf->b_hdr;
3801
3802	ASSERT(!refcount_is_zero(&buf->b_hdr->b_refcnt));
3803	callback->awcb_ready(zio, buf, callback->awcb_private);
3804
3805	/*
3806	 * If the IO is already in progress, then this is a re-write
3807	 * attempt, so we need to thaw and re-compute the cksum.
3808	 * It is the responsibility of the callback to handle the
3809	 * accounting for any re-write attempt.
3810	 */
3811	if (HDR_IO_IN_PROGRESS(hdr)) {
3812		mutex_enter(&hdr->b_freeze_lock);
3813		if (hdr->b_freeze_cksum != NULL) {
3814			kmem_free(hdr->b_freeze_cksum, sizeof (zio_cksum_t));
3815			hdr->b_freeze_cksum = NULL;
3816		}
3817		mutex_exit(&hdr->b_freeze_lock);
3818	}
3819	arc_cksum_compute(buf, B_FALSE);
3820	hdr->b_flags |= ARC_IO_IN_PROGRESS;
3821}
3822
3823/*
3824 * The SPA calls this callback for each physical write that happens on behalf
3825 * of a logical write.  See the comment in dbuf_write_physdone() for details.
3826 */
3827static void
3828arc_write_physdone(zio_t *zio)
3829{
3830	arc_write_callback_t *cb = zio->io_private;
3831	if (cb->awcb_physdone != NULL)
3832		cb->awcb_physdone(zio, cb->awcb_buf, cb->awcb_private);
3833}
3834
3835static void
3836arc_write_done(zio_t *zio)
3837{
3838	arc_write_callback_t *callback = zio->io_private;
3839	arc_buf_t *buf = callback->awcb_buf;
3840	arc_buf_hdr_t *hdr = buf->b_hdr;
3841
3842	ASSERT(hdr->b_acb == NULL);
3843
3844	if (zio->io_error == 0) {
3845		if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) {
3846			buf_discard_identity(hdr);
3847		} else {
3848			hdr->b_dva = *BP_IDENTITY(zio->io_bp);
3849			hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp);
3850			hdr->b_cksum0 = zio->io_bp->blk_cksum.zc_word[0];
3851		}
3852	} else {
3853		ASSERT(BUF_EMPTY(hdr));
3854	}
3855
3856	/*
3857	 * If the block to be written was all-zero or compressed enough to be
3858	 * embedded in the BP, no write was performed so there will be no
3859	 * dva/birth/checksum.  The buffer must therefore remain anonymous
3860	 * (and uncached).
3861	 */
3862	if (!BUF_EMPTY(hdr)) {
3863		arc_buf_hdr_t *exists;
3864		kmutex_t *hash_lock;
3865
3866		ASSERT(zio->io_error == 0);
3867
3868		arc_cksum_verify(buf);
3869
3870		exists = buf_hash_insert(hdr, &hash_lock);
3871		if (exists) {
3872			/*
3873			 * This can only happen if we overwrite for
3874			 * sync-to-convergence, because we remove
3875			 * buffers from the hash table when we arc_free().
3876			 */
3877			if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
3878				if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
3879					panic("bad overwrite, hdr=%p exists=%p",
3880					    (void *)hdr, (void *)exists);
3881				ASSERT(refcount_is_zero(&exists->b_refcnt));
3882				arc_change_state(arc_anon, exists, hash_lock);
3883				mutex_exit(hash_lock);
3884				arc_hdr_destroy(exists);
3885				exists = buf_hash_insert(hdr, &hash_lock);
3886				ASSERT3P(exists, ==, NULL);
3887			} else if (zio->io_flags & ZIO_FLAG_NOPWRITE) {
3888				/* nopwrite */
3889				ASSERT(zio->io_prop.zp_nopwrite);
3890				if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
3891					panic("bad nopwrite, hdr=%p exists=%p",
3892					    (void *)hdr, (void *)exists);
3893			} else {
3894				/* Dedup */
3895				ASSERT(hdr->b_datacnt == 1);
3896				ASSERT(hdr->b_state == arc_anon);
3897				ASSERT(BP_GET_DEDUP(zio->io_bp));
3898				ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
3899			}
3900		}
3901		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3902		/* if it's not anon, we are doing a scrub */
3903		if (!exists && hdr->b_state == arc_anon)
3904			arc_access(hdr, hash_lock);
3905		mutex_exit(hash_lock);
3906	} else {
3907		hdr->b_flags &= ~ARC_IO_IN_PROGRESS;
3908	}
3909
3910	ASSERT(!refcount_is_zero(&hdr->b_refcnt));
3911	callback->awcb_done(zio, buf, callback->awcb_private);
3912
3913	kmem_free(callback, sizeof (arc_write_callback_t));
3914}
3915
3916zio_t *
3917arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
3918    blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, boolean_t l2arc_compress,
3919    const zio_prop_t *zp, arc_done_func_t *ready, arc_done_func_t *physdone,
3920    arc_done_func_t *done, void *private, zio_priority_t priority,
3921    int zio_flags, const zbookmark_phys_t *zb)
3922{
3923	arc_buf_hdr_t *hdr = buf->b_hdr;
3924	arc_write_callback_t *callback;
3925	zio_t *zio;
3926
3927	ASSERT(ready != NULL);
3928	ASSERT(done != NULL);
3929	ASSERT(!HDR_IO_ERROR(hdr));
3930	ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
3931	ASSERT(hdr->b_acb == NULL);
3932	if (l2arc)
3933		hdr->b_flags |= ARC_L2CACHE;
3934	if (l2arc_compress)
3935		hdr->b_flags |= ARC_L2COMPRESS;
3936	callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
3937	callback->awcb_ready = ready;
3938	callback->awcb_physdone = physdone;
3939	callback->awcb_done = done;
3940	callback->awcb_private = private;
3941	callback->awcb_buf = buf;
3942
3943	zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp,
3944	    arc_write_ready, arc_write_physdone, arc_write_done, callback,
3945	    priority, zio_flags, zb);
3946
3947	return (zio);
3948}
3949
3950static int
3951arc_memory_throttle(uint64_t reserve, uint64_t txg)
3952{
3953#ifdef _KERNEL
3954	uint64_t available_memory = ptob(freemem);
3955	static uint64_t page_load = 0;
3956	static uint64_t last_txg = 0;
3957
3958#if defined(__i386) || !defined(UMA_MD_SMALL_ALLOC)
3959	available_memory =
3960	    MIN(available_memory, ptob(vmem_size(heap_arena, VMEM_FREE)));
3961#endif
3962
3963	if (freemem > (uint64_t)physmem * arc_lotsfree_percent / 100)
3964		return (0);
3965
3966	if (txg > last_txg) {
3967		last_txg = txg;
3968		page_load = 0;
3969	}
3970	/*
3971	 * If we are in pageout, we know that memory is already tight,
3972	 * the arc is already going to be evicting, so we just want to
3973	 * continue to let page writes occur as quickly as possible.
3974	 */
3975	if (curproc == pageproc) {
3976		if (page_load > MAX(ptob(minfree), available_memory) / 4)
3977			return (SET_ERROR(ERESTART));
3978		/* Note: reserve is inflated, so we deflate */
3979		page_load += reserve / 8;
3980		return (0);
3981	} else if (page_load > 0 && arc_reclaim_needed()) {
3982		/* memory is low, delay before restarting */
3983		ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
3984		return (SET_ERROR(EAGAIN));
3985	}
3986	page_load = 0;
3987#endif
3988	return (0);
3989}
3990
3991void
3992arc_tempreserve_clear(uint64_t reserve)
3993{
3994	atomic_add_64(&arc_tempreserve, -reserve);
3995	ASSERT((int64_t)arc_tempreserve >= 0);
3996}
3997
3998int
3999arc_tempreserve_space(uint64_t reserve, uint64_t txg)
4000{
4001	int error;
4002	uint64_t anon_size;
4003
4004	if (reserve > arc_c/4 && !arc_no_grow) {
4005		arc_c = MIN(arc_c_max, reserve * 4);
4006		DTRACE_PROBE1(arc__set_reserve, uint64_t, arc_c);
4007	}
4008	if (reserve > arc_c)
4009		return (SET_ERROR(ENOMEM));
4010
4011	/*
4012	 * Don't count loaned bufs as in flight dirty data to prevent long
4013	 * network delays from blocking transactions that are ready to be
4014	 * assigned to a txg.
4015	 */
4016	anon_size = MAX((int64_t)(arc_anon->arcs_size - arc_loaned_bytes), 0);
4017
4018	/*
4019	 * Writes will, almost always, require additional memory allocations
4020	 * in order to compress/encrypt/etc the data.  We therefore need to
4021	 * make sure that there is sufficient available memory for this.
4022	 */
4023	error = arc_memory_throttle(reserve, txg);
4024	if (error != 0)
4025		return (error);
4026
4027	/*
4028	 * Throttle writes when the amount of dirty data in the cache
4029	 * gets too large.  We try to keep the cache less than half full
4030	 * of dirty blocks so that our sync times don't grow too large.
4031	 * Note: if two requests come in concurrently, we might let them
4032	 * both succeed, when one of them should fail.  Not a huge deal.
4033	 */
4034
4035	if (reserve + arc_tempreserve + anon_size > arc_c / 2 &&
4036	    anon_size > arc_c / 4) {
4037		dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
4038		    "anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
4039		    arc_tempreserve>>10,
4040		    arc_anon->arcs_lsize[ARC_BUFC_METADATA]>>10,
4041		    arc_anon->arcs_lsize[ARC_BUFC_DATA]>>10,
4042		    reserve>>10, arc_c>>10);
4043		return (SET_ERROR(ERESTART));
4044	}
4045	atomic_add_64(&arc_tempreserve, reserve);
4046	return (0);
4047}
4048
4049static kmutex_t arc_lowmem_lock;
4050#ifdef _KERNEL
4051static eventhandler_tag arc_event_lowmem = NULL;
4052
4053static void
4054arc_lowmem(void *arg __unused, int howto __unused)
4055{
4056
4057	/* Serialize access via arc_lowmem_lock. */
4058	mutex_enter(&arc_lowmem_lock);
4059	mutex_enter(&arc_reclaim_thr_lock);
4060	needfree = 1;
4061	DTRACE_PROBE(arc__needfree);
4062	cv_signal(&arc_reclaim_thr_cv);
4063
4064	/*
4065	 * It is unsafe to block here in arbitrary threads, because we can come
4066	 * here from ARC itself and may hold ARC locks and thus risk a deadlock
4067	 * with ARC reclaim thread.
4068	 */
4069	if (curproc == pageproc) {
4070		while (needfree)
4071			msleep(&needfree, &arc_reclaim_thr_lock, 0, "zfs:lowmem", 0);
4072	}
4073	mutex_exit(&arc_reclaim_thr_lock);
4074	mutex_exit(&arc_lowmem_lock);
4075}
4076#endif
4077
4078void
4079arc_init(void)
4080{
4081	int i, prefetch_tunable_set = 0;
4082
4083	mutex_init(&arc_reclaim_thr_lock, NULL, MUTEX_DEFAULT, NULL);
4084	cv_init(&arc_reclaim_thr_cv, NULL, CV_DEFAULT, NULL);
4085	mutex_init(&arc_lowmem_lock, NULL, MUTEX_DEFAULT, NULL);
4086
4087	/* Convert seconds to clock ticks */
4088	arc_min_prefetch_lifespan = 1 * hz;
4089
4090	/* Start out with 1/8 of all memory */
4091	arc_c = kmem_size() / 8;
4092
4093#ifdef sun
4094#ifdef _KERNEL
4095	/*
4096	 * On architectures where the physical memory can be larger
4097	 * than the addressable space (intel in 32-bit mode), we may
4098	 * need to limit the cache to 1/8 of VM size.
4099	 */
4100	arc_c = MIN(arc_c, vmem_size(heap_arena, VMEM_ALLOC | VMEM_FREE) / 8);
4101#endif
4102#endif	/* sun */
4103	/* set min cache to 1/32 of all memory, or 16MB, whichever is more */
4104	arc_c_min = MAX(arc_c / 4, 64<<18);
4105	/* set max to 1/2 of all memory, or all but 1GB, whichever is more */
4106	if (arc_c * 8 >= 1<<30)
4107		arc_c_max = (arc_c * 8) - (1<<30);
4108	else
4109		arc_c_max = arc_c_min;
4110	arc_c_max = MAX(arc_c * 5, arc_c_max);
4111
4112#ifdef _KERNEL
4113	/*
4114	 * Allow the tunables to override our calculations if they are
4115	 * reasonable (ie. over 16MB)
4116	 */
4117	if (zfs_arc_max > 64<<18 && zfs_arc_max < kmem_size())
4118		arc_c_max = zfs_arc_max;
4119	if (zfs_arc_min > 64<<18 && zfs_arc_min <= arc_c_max)
4120		arc_c_min = zfs_arc_min;
4121#endif
4122
4123	arc_c = arc_c_max;
4124	arc_p = (arc_c >> 1);
4125
4126	/* limit meta-data to 1/4 of the arc capacity */
4127	arc_meta_limit = arc_c_max / 4;
4128
4129	/* Allow the tunable to override if it is reasonable */
4130	if (zfs_arc_meta_limit > 0 && zfs_arc_meta_limit <= arc_c_max)
4131		arc_meta_limit = zfs_arc_meta_limit;
4132
4133	if (arc_c_min < arc_meta_limit / 2 && zfs_arc_min == 0)
4134		arc_c_min = arc_meta_limit / 2;
4135
4136	if (zfs_arc_grow_retry > 0)
4137		arc_grow_retry = zfs_arc_grow_retry;
4138
4139	if (zfs_arc_shrink_shift > 0)
4140		arc_shrink_shift = zfs_arc_shrink_shift;
4141
4142	if (zfs_arc_p_min_shift > 0)
4143		arc_p_min_shift = zfs_arc_p_min_shift;
4144
4145	/* if kmem_flags are set, lets try to use less memory */
4146	if (kmem_debugging())
4147		arc_c = arc_c / 2;
4148	if (arc_c < arc_c_min)
4149		arc_c = arc_c_min;
4150
4151	zfs_arc_min = arc_c_min;
4152	zfs_arc_max = arc_c_max;
4153
4154	arc_anon = &ARC_anon;
4155	arc_mru = &ARC_mru;
4156	arc_mru_ghost = &ARC_mru_ghost;
4157	arc_mfu = &ARC_mfu;
4158	arc_mfu_ghost = &ARC_mfu_ghost;
4159	arc_l2c_only = &ARC_l2c_only;
4160	arc_size = 0;
4161
4162	for (i = 0; i < ARC_BUFC_NUMLISTS; i++) {
4163		mutex_init(&arc_anon->arcs_locks[i].arcs_lock,
4164		    NULL, MUTEX_DEFAULT, NULL);
4165		mutex_init(&arc_mru->arcs_locks[i].arcs_lock,
4166		    NULL, MUTEX_DEFAULT, NULL);
4167		mutex_init(&arc_mru_ghost->arcs_locks[i].arcs_lock,
4168		    NULL, MUTEX_DEFAULT, NULL);
4169		mutex_init(&arc_mfu->arcs_locks[i].arcs_lock,
4170		    NULL, MUTEX_DEFAULT, NULL);
4171		mutex_init(&arc_mfu_ghost->arcs_locks[i].arcs_lock,
4172		    NULL, MUTEX_DEFAULT, NULL);
4173		mutex_init(&arc_l2c_only->arcs_locks[i].arcs_lock,
4174		    NULL, MUTEX_DEFAULT, NULL);
4175
4176		list_create(&arc_mru->arcs_lists[i],
4177		    sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4178		list_create(&arc_mru_ghost->arcs_lists[i],
4179		    sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4180		list_create(&arc_mfu->arcs_lists[i],
4181		    sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4182		list_create(&arc_mfu_ghost->arcs_lists[i],
4183		    sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4184		list_create(&arc_mfu_ghost->arcs_lists[i],
4185		    sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4186		list_create(&arc_l2c_only->arcs_lists[i],
4187		    sizeof (arc_buf_hdr_t), offsetof(arc_buf_hdr_t, b_arc_node));
4188	}
4189
4190	buf_init();
4191
4192	arc_thread_exit = 0;
4193	arc_eviction_list = NULL;
4194	mutex_init(&arc_eviction_mtx, NULL, MUTEX_DEFAULT, NULL);
4195	bzero(&arc_eviction_hdr, sizeof (arc_buf_hdr_t));
4196
4197	arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
4198	    sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
4199
4200	if (arc_ksp != NULL) {
4201		arc_ksp->ks_data = &arc_stats;
4202		kstat_install(arc_ksp);
4203	}
4204
4205	(void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
4206	    TS_RUN, minclsyspri);
4207
4208#ifdef _KERNEL
4209	arc_event_lowmem = EVENTHANDLER_REGISTER(vm_lowmem, arc_lowmem, NULL,
4210	    EVENTHANDLER_PRI_FIRST);
4211#endif
4212
4213	arc_dead = FALSE;
4214	arc_warm = B_FALSE;
4215
4216	/*
4217	 * Calculate maximum amount of dirty data per pool.
4218	 *
4219	 * If it has been set by /etc/system, take that.
4220	 * Otherwise, use a percentage of physical memory defined by
4221	 * zfs_dirty_data_max_percent (default 10%) with a cap at
4222	 * zfs_dirty_data_max_max (default 4GB).
4223	 */
4224	if (zfs_dirty_data_max == 0) {
4225		zfs_dirty_data_max = ptob(physmem) *
4226		    zfs_dirty_data_max_percent / 100;
4227		zfs_dirty_data_max = MIN(zfs_dirty_data_max,
4228		    zfs_dirty_data_max_max);
4229	}
4230
4231#ifdef _KERNEL
4232	if (TUNABLE_INT_FETCH("vfs.zfs.prefetch_disable", &zfs_prefetch_disable))
4233		prefetch_tunable_set = 1;
4234
4235#ifdef __i386__
4236	if (prefetch_tunable_set == 0) {
4237		printf("ZFS NOTICE: Prefetch is disabled by default on i386 "
4238		    "-- to enable,\n");
4239		printf("            add \"vfs.zfs.prefetch_disable=0\" "
4240		    "to /boot/loader.conf.\n");
4241		zfs_prefetch_disable = 1;
4242	}
4243#else
4244	if ((((uint64_t)physmem * PAGESIZE) < (1ULL << 32)) &&
4245	    prefetch_tunable_set == 0) {
4246		printf("ZFS NOTICE: Prefetch is disabled by default if less "
4247		    "than 4GB of RAM is present;\n"
4248		    "            to enable, add \"vfs.zfs.prefetch_disable=0\" "
4249		    "to /boot/loader.conf.\n");
4250		zfs_prefetch_disable = 1;
4251	}
4252#endif
4253	/* Warn about ZFS memory and address space requirements. */
4254	if (((uint64_t)physmem * PAGESIZE) < (256 + 128 + 64) * (1 << 20)) {
4255		printf("ZFS WARNING: Recommended minimum RAM size is 512MB; "
4256		    "expect unstable behavior.\n");
4257	}
4258	if (kmem_size() < 512 * (1 << 20)) {
4259		printf("ZFS WARNING: Recommended minimum kmem_size is 512MB; "
4260		    "expect unstable behavior.\n");
4261		printf("             Consider tuning vm.kmem_size and "
4262		    "vm.kmem_size_max\n");
4263		printf("             in /boot/loader.conf.\n");
4264	}
4265#endif
4266}
4267
4268void
4269arc_fini(void)
4270{
4271	int i;
4272
4273	mutex_enter(&arc_reclaim_thr_lock);
4274	arc_thread_exit = 1;
4275	cv_signal(&arc_reclaim_thr_cv);
4276	while (arc_thread_exit != 0)
4277		cv_wait(&arc_reclaim_thr_cv, &arc_reclaim_thr_lock);
4278	mutex_exit(&arc_reclaim_thr_lock);
4279
4280	arc_flush(NULL);
4281
4282	arc_dead = TRUE;
4283
4284	if (arc_ksp != NULL) {
4285		kstat_delete(arc_ksp);
4286		arc_ksp = NULL;
4287	}
4288
4289	mutex_destroy(&arc_eviction_mtx);
4290	mutex_destroy(&arc_reclaim_thr_lock);
4291	cv_destroy(&arc_reclaim_thr_cv);
4292
4293	for (i = 0; i < ARC_BUFC_NUMLISTS; i++) {
4294		list_destroy(&arc_mru->arcs_lists[i]);
4295		list_destroy(&arc_mru_ghost->arcs_lists[i]);
4296		list_destroy(&arc_mfu->arcs_lists[i]);
4297		list_destroy(&arc_mfu_ghost->arcs_lists[i]);
4298		list_destroy(&arc_l2c_only->arcs_lists[i]);
4299
4300		mutex_destroy(&arc_anon->arcs_locks[i].arcs_lock);
4301		mutex_destroy(&arc_mru->arcs_locks[i].arcs_lock);
4302		mutex_destroy(&arc_mru_ghost->arcs_locks[i].arcs_lock);
4303		mutex_destroy(&arc_mfu->arcs_locks[i].arcs_lock);
4304		mutex_destroy(&arc_mfu_ghost->arcs_locks[i].arcs_lock);
4305		mutex_destroy(&arc_l2c_only->arcs_locks[i].arcs_lock);
4306	}
4307
4308	buf_fini();
4309
4310	ASSERT(arc_loaned_bytes == 0);
4311
4312	mutex_destroy(&arc_lowmem_lock);
4313#ifdef _KERNEL
4314	if (arc_event_lowmem != NULL)
4315		EVENTHANDLER_DEREGISTER(vm_lowmem, arc_event_lowmem);
4316#endif
4317}
4318
4319/*
4320 * Level 2 ARC
4321 *
4322 * The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
4323 * It uses dedicated storage devices to hold cached data, which are populated
4324 * using large infrequent writes.  The main role of this cache is to boost
4325 * the performance of random read workloads.  The intended L2ARC devices
4326 * include short-stroked disks, solid state disks, and other media with
4327 * substantially faster read latency than disk.
4328 *
4329 *                 +-----------------------+
4330 *                 |         ARC           |
4331 *                 +-----------------------+
4332 *                    |         ^     ^
4333 *                    |         |     |
4334 *      l2arc_feed_thread()    arc_read()
4335 *                    |         |     |
4336 *                    |  l2arc read   |
4337 *                    V         |     |
4338 *               +---------------+    |
4339 *               |     L2ARC     |    |
4340 *               +---------------+    |
4341 *                   |    ^           |
4342 *          l2arc_write() |           |
4343 *                   |    |           |
4344 *                   V    |           |
4345 *                 +-------+      +-------+
4346 *                 | vdev  |      | vdev  |
4347 *                 | cache |      | cache |
4348 *                 +-------+      +-------+
4349 *                 +=========+     .-----.
4350 *                 :  L2ARC  :    |-_____-|
4351 *                 : devices :    | Disks |
4352 *                 +=========+    `-_____-'
4353 *
4354 * Read requests are satisfied from the following sources, in order:
4355 *
4356 *	1) ARC
4357 *	2) vdev cache of L2ARC devices
4358 *	3) L2ARC devices
4359 *	4) vdev cache of disks
4360 *	5) disks
4361 *
4362 * Some L2ARC device types exhibit extremely slow write performance.
4363 * To accommodate for this there are some significant differences between
4364 * the L2ARC and traditional cache design:
4365 *
4366 * 1. There is no eviction path from the ARC to the L2ARC.  Evictions from
4367 * the ARC behave as usual, freeing buffers and placing headers on ghost
4368 * lists.  The ARC does not send buffers to the L2ARC during eviction as
4369 * this would add inflated write latencies for all ARC memory pressure.
4370 *
4371 * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
4372 * It does this by periodically scanning buffers from the eviction-end of
4373 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
4374 * not already there. It scans until a headroom of buffers is satisfied,
4375 * which itself is a buffer for ARC eviction. If a compressible buffer is
4376 * found during scanning and selected for writing to an L2ARC device, we
4377 * temporarily boost scanning headroom during the next scan cycle to make
4378 * sure we adapt to compression effects (which might significantly reduce
4379 * the data volume we write to L2ARC). The thread that does this is
4380 * l2arc_feed_thread(), illustrated below; example sizes are included to
4381 * provide a better sense of ratio than this diagram:
4382 *
4383 *	       head -->                        tail
4384 *	        +---------------------+----------+
4385 *	ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->.   # already on L2ARC
4386 *	        +---------------------+----------+   |   o L2ARC eligible
4387 *	ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->|   : ARC buffer
4388 *	        +---------------------+----------+   |
4389 *	             15.9 Gbytes      ^ 32 Mbytes    |
4390 *	                           headroom          |
4391 *	                                      l2arc_feed_thread()
4392 *	                                             |
4393 *	                 l2arc write hand <--[oooo]--'
4394 *	                         |           8 Mbyte
4395 *	                         |          write max
4396 *	                         V
4397 *		  +==============================+
4398 *	L2ARC dev |####|#|###|###|    |####| ... |
4399 *	          +==============================+
4400 *	                     32 Gbytes
4401 *
4402 * 3. If an ARC buffer is copied to the L2ARC but then hit instead of
4403 * evicted, then the L2ARC has cached a buffer much sooner than it probably
4404 * needed to, potentially wasting L2ARC device bandwidth and storage.  It is
4405 * safe to say that this is an uncommon case, since buffers at the end of
4406 * the ARC lists have moved there due to inactivity.
4407 *
4408 * 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
4409 * then the L2ARC simply misses copying some buffers.  This serves as a
4410 * pressure valve to prevent heavy read workloads from both stalling the ARC
4411 * with waits and clogging the L2ARC with writes.  This also helps prevent
4412 * the potential for the L2ARC to churn if it attempts to cache content too
4413 * quickly, such as during backups of the entire pool.
4414 *
4415 * 5. After system boot and before the ARC has filled main memory, there are
4416 * no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
4417 * lists can remain mostly static.  Instead of searching from tail of these
4418 * lists as pictured, the l2arc_feed_thread() will search from the list heads
4419 * for eligible buffers, greatly increasing its chance of finding them.
4420 *
4421 * The L2ARC device write speed is also boosted during this time so that
4422 * the L2ARC warms up faster.  Since there have been no ARC evictions yet,
4423 * there are no L2ARC reads, and no fear of degrading read performance
4424 * through increased writes.
4425 *
4426 * 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
4427 * the vdev queue can aggregate them into larger and fewer writes.  Each
4428 * device is written to in a rotor fashion, sweeping writes through
4429 * available space then repeating.
4430 *
4431 * 7. The L2ARC does not store dirty content.  It never needs to flush
4432 * write buffers back to disk based storage.
4433 *
4434 * 8. If an ARC buffer is written (and dirtied) which also exists in the
4435 * L2ARC, the now stale L2ARC buffer is immediately dropped.
4436 *
4437 * The performance of the L2ARC can be tweaked by a number of tunables, which
4438 * may be necessary for different workloads:
4439 *
4440 *	l2arc_write_max		max write bytes per interval
4441 *	l2arc_write_boost	extra write bytes during device warmup
4442 *	l2arc_noprefetch	skip caching prefetched buffers
4443 *	l2arc_headroom		number of max device writes to precache
4444 *	l2arc_headroom_boost	when we find compressed buffers during ARC
4445 *				scanning, we multiply headroom by this
4446 *				percentage factor for the next scan cycle,
4447 *				since more compressed buffers are likely to
4448 *				be present
4449 *	l2arc_feed_secs		seconds between L2ARC writing
4450 *
4451 * Tunables may be removed or added as future performance improvements are
4452 * integrated, and also may become zpool properties.
4453 *
4454 * There are three key functions that control how the L2ARC warms up:
4455 *
4456 *	l2arc_write_eligible()	check if a buffer is eligible to cache
4457 *	l2arc_write_size()	calculate how much to write
4458 *	l2arc_write_interval()	calculate sleep delay between writes
4459 *
4460 * These three functions determine what to write, how much, and how quickly
4461 * to send writes.
4462 */
4463
4464static boolean_t
4465l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *ab)
4466{
4467	/*
4468	 * A buffer is *not* eligible for the L2ARC if it:
4469	 * 1. belongs to a different spa.
4470	 * 2. is already cached on the L2ARC.
4471	 * 3. has an I/O in progress (it may be an incomplete read).
4472	 * 4. is flagged not eligible (zfs property).
4473	 */
4474	if (ab->b_spa != spa_guid) {
4475		ARCSTAT_BUMP(arcstat_l2_write_spa_mismatch);
4476		return (B_FALSE);
4477	}
4478	if (ab->b_l2hdr != NULL) {
4479		ARCSTAT_BUMP(arcstat_l2_write_in_l2);
4480		return (B_FALSE);
4481	}
4482	if (HDR_IO_IN_PROGRESS(ab)) {
4483		ARCSTAT_BUMP(arcstat_l2_write_hdr_io_in_progress);
4484		return (B_FALSE);
4485	}
4486	if (!HDR_L2CACHE(ab)) {
4487		ARCSTAT_BUMP(arcstat_l2_write_not_cacheable);
4488		return (B_FALSE);
4489	}
4490
4491	return (B_TRUE);
4492}
4493
4494static uint64_t
4495l2arc_write_size(void)
4496{
4497	uint64_t size;
4498
4499	/*
4500	 * Make sure our globals have meaningful values in case the user
4501	 * altered them.
4502	 */
4503	size = l2arc_write_max;
4504	if (size == 0) {
4505		cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must "
4506		    "be greater than zero, resetting it to the default (%d)",
4507		    L2ARC_WRITE_SIZE);
4508		size = l2arc_write_max = L2ARC_WRITE_SIZE;
4509	}
4510
4511	if (arc_warm == B_FALSE)
4512		size += l2arc_write_boost;
4513
4514	return (size);
4515
4516}
4517
4518static clock_t
4519l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
4520{
4521	clock_t interval, next, now;
4522
4523	/*
4524	 * If the ARC lists are busy, increase our write rate; if the
4525	 * lists are stale, idle back.  This is achieved by checking
4526	 * how much we previously wrote - if it was more than half of
4527	 * what we wanted, schedule the next write much sooner.
4528	 */
4529	if (l2arc_feed_again && wrote > (wanted / 2))
4530		interval = (hz * l2arc_feed_min_ms) / 1000;
4531	else
4532		interval = hz * l2arc_feed_secs;
4533
4534	now = ddi_get_lbolt();
4535	next = MAX(now, MIN(now + interval, began + interval));
4536
4537	return (next);
4538}
4539
4540static void
4541l2arc_hdr_stat_add(void)
4542{
4543	ARCSTAT_INCR(arcstat_l2_hdr_size, HDR_SIZE + L2HDR_SIZE);
4544	ARCSTAT_INCR(arcstat_hdr_size, -HDR_SIZE);
4545}
4546
4547static void
4548l2arc_hdr_stat_remove(void)
4549{
4550	ARCSTAT_INCR(arcstat_l2_hdr_size, -(HDR_SIZE + L2HDR_SIZE));
4551	ARCSTAT_INCR(arcstat_hdr_size, HDR_SIZE);
4552}
4553
4554/*
4555 * Cycle through L2ARC devices.  This is how L2ARC load balances.
4556 * If a device is returned, this also returns holding the spa config lock.
4557 */
4558static l2arc_dev_t *
4559l2arc_dev_get_next(void)
4560{
4561	l2arc_dev_t *first, *next = NULL;
4562
4563	/*
4564	 * Lock out the removal of spas (spa_namespace_lock), then removal
4565	 * of cache devices (l2arc_dev_mtx).  Once a device has been selected,
4566	 * both locks will be dropped and a spa config lock held instead.
4567	 */
4568	mutex_enter(&spa_namespace_lock);
4569	mutex_enter(&l2arc_dev_mtx);
4570
4571	/* if there are no vdevs, there is nothing to do */
4572	if (l2arc_ndev == 0)
4573		goto out;
4574
4575	first = NULL;
4576	next = l2arc_dev_last;
4577	do {
4578		/* loop around the list looking for a non-faulted vdev */
4579		if (next == NULL) {
4580			next = list_head(l2arc_dev_list);
4581		} else {
4582			next = list_next(l2arc_dev_list, next);
4583			if (next == NULL)
4584				next = list_head(l2arc_dev_list);
4585		}
4586
4587		/* if we have come back to the start, bail out */
4588		if (first == NULL)
4589			first = next;
4590		else if (next == first)
4591			break;
4592
4593	} while (vdev_is_dead(next->l2ad_vdev));
4594
4595	/* if we were unable to find any usable vdevs, return NULL */
4596	if (vdev_is_dead(next->l2ad_vdev))
4597		next = NULL;
4598
4599	l2arc_dev_last = next;
4600
4601out:
4602	mutex_exit(&l2arc_dev_mtx);
4603
4604	/*
4605	 * Grab the config lock to prevent the 'next' device from being
4606	 * removed while we are writing to it.
4607	 */
4608	if (next != NULL)
4609		spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
4610	mutex_exit(&spa_namespace_lock);
4611
4612	return (next);
4613}
4614
4615/*
4616 * Free buffers that were tagged for destruction.
4617 */
4618static void
4619l2arc_do_free_on_write()
4620{
4621	list_t *buflist;
4622	l2arc_data_free_t *df, *df_prev;
4623
4624	mutex_enter(&l2arc_free_on_write_mtx);
4625	buflist = l2arc_free_on_write;
4626
4627	for (df = list_tail(buflist); df; df = df_prev) {
4628		df_prev = list_prev(buflist, df);
4629		ASSERT(df->l2df_data != NULL);
4630		ASSERT(df->l2df_func != NULL);
4631		df->l2df_func(df->l2df_data, df->l2df_size);
4632		list_remove(buflist, df);
4633		kmem_free(df, sizeof (l2arc_data_free_t));
4634	}
4635
4636	mutex_exit(&l2arc_free_on_write_mtx);
4637}
4638
4639/*
4640 * A write to a cache device has completed.  Update all headers to allow
4641 * reads from these buffers to begin.
4642 */
4643static void
4644l2arc_write_done(zio_t *zio)
4645{
4646	l2arc_write_callback_t *cb;
4647	l2arc_dev_t *dev;
4648	list_t *buflist;
4649	arc_buf_hdr_t *head, *ab, *ab_prev;
4650	l2arc_buf_hdr_t *abl2;
4651	kmutex_t *hash_lock;
4652	int64_t bytes_dropped = 0;
4653
4654	cb = zio->io_private;
4655	ASSERT(cb != NULL);
4656	dev = cb->l2wcb_dev;
4657	ASSERT(dev != NULL);
4658	head = cb->l2wcb_head;
4659	ASSERT(head != NULL);
4660	buflist = dev->l2ad_buflist;
4661	ASSERT(buflist != NULL);
4662	DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
4663	    l2arc_write_callback_t *, cb);
4664
4665	if (zio->io_error != 0)
4666		ARCSTAT_BUMP(arcstat_l2_writes_error);
4667
4668	mutex_enter(&l2arc_buflist_mtx);
4669
4670	/*
4671	 * All writes completed, or an error was hit.
4672	 */
4673	for (ab = list_prev(buflist, head); ab; ab = ab_prev) {
4674		ab_prev = list_prev(buflist, ab);
4675		abl2 = ab->b_l2hdr;
4676
4677		/*
4678		 * Release the temporary compressed buffer as soon as possible.
4679		 */
4680		if (abl2->b_compress != ZIO_COMPRESS_OFF)
4681			l2arc_release_cdata_buf(ab);
4682
4683		hash_lock = HDR_LOCK(ab);
4684		if (!mutex_tryenter(hash_lock)) {
4685			/*
4686			 * This buffer misses out.  It may be in a stage
4687			 * of eviction.  Its ARC_L2_WRITING flag will be
4688			 * left set, denying reads to this buffer.
4689			 */
4690			ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss);
4691			continue;
4692		}
4693
4694		if (zio->io_error != 0) {
4695			/*
4696			 * Error - drop L2ARC entry.
4697			 */
4698			list_remove(buflist, ab);
4699			ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize);
4700			bytes_dropped += abl2->b_asize;
4701			ab->b_l2hdr = NULL;
4702			trim_map_free(abl2->b_dev->l2ad_vdev, abl2->b_daddr,
4703			    ab->b_size, 0);
4704			kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
4705			ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4706		}
4707
4708		/*
4709		 * Allow ARC to begin reads to this L2ARC entry.
4710		 */
4711		ab->b_flags &= ~ARC_L2_WRITING;
4712
4713		mutex_exit(hash_lock);
4714	}
4715
4716	atomic_inc_64(&l2arc_writes_done);
4717	list_remove(buflist, head);
4718	kmem_cache_free(hdr_cache, head);
4719	mutex_exit(&l2arc_buflist_mtx);
4720
4721	vdev_space_update(dev->l2ad_vdev, -bytes_dropped, 0, 0);
4722
4723	l2arc_do_free_on_write();
4724
4725	kmem_free(cb, sizeof (l2arc_write_callback_t));
4726}
4727
4728/*
4729 * A read to a cache device completed.  Validate buffer contents before
4730 * handing over to the regular ARC routines.
4731 */
4732static void
4733l2arc_read_done(zio_t *zio)
4734{
4735	l2arc_read_callback_t *cb;
4736	arc_buf_hdr_t *hdr;
4737	arc_buf_t *buf;
4738	kmutex_t *hash_lock;
4739	int equal;
4740
4741	ASSERT(zio->io_vd != NULL);
4742	ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
4743
4744	spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
4745
4746	cb = zio->io_private;
4747	ASSERT(cb != NULL);
4748	buf = cb->l2rcb_buf;
4749	ASSERT(buf != NULL);
4750
4751	hash_lock = HDR_LOCK(buf->b_hdr);
4752	mutex_enter(hash_lock);
4753	hdr = buf->b_hdr;
4754	ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
4755
4756	/*
4757	 * If the buffer was compressed, decompress it first.
4758	 */
4759	if (cb->l2rcb_compress != ZIO_COMPRESS_OFF)
4760		l2arc_decompress_zio(zio, hdr, cb->l2rcb_compress);
4761	ASSERT(zio->io_data != NULL);
4762
4763	/*
4764	 * Check this survived the L2ARC journey.
4765	 */
4766	equal = arc_cksum_equal(buf);
4767	if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) {
4768		mutex_exit(hash_lock);
4769		zio->io_private = buf;
4770		zio->io_bp_copy = cb->l2rcb_bp;	/* XXX fix in L2ARC 2.0	*/
4771		zio->io_bp = &zio->io_bp_copy;	/* XXX fix in L2ARC 2.0	*/
4772		arc_read_done(zio);
4773	} else {
4774		mutex_exit(hash_lock);
4775		/*
4776		 * Buffer didn't survive caching.  Increment stats and
4777		 * reissue to the original storage device.
4778		 */
4779		if (zio->io_error != 0) {
4780			ARCSTAT_BUMP(arcstat_l2_io_error);
4781		} else {
4782			zio->io_error = SET_ERROR(EIO);
4783		}
4784		if (!equal)
4785			ARCSTAT_BUMP(arcstat_l2_cksum_bad);
4786
4787		/*
4788		 * If there's no waiter, issue an async i/o to the primary
4789		 * storage now.  If there *is* a waiter, the caller must
4790		 * issue the i/o in a context where it's OK to block.
4791		 */
4792		if (zio->io_waiter == NULL) {
4793			zio_t *pio = zio_unique_parent(zio);
4794
4795			ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
4796
4797			zio_nowait(zio_read(pio, cb->l2rcb_spa, &cb->l2rcb_bp,
4798			    buf->b_data, zio->io_size, arc_read_done, buf,
4799			    zio->io_priority, cb->l2rcb_flags, &cb->l2rcb_zb));
4800		}
4801	}
4802
4803	kmem_free(cb, sizeof (l2arc_read_callback_t));
4804}
4805
4806/*
4807 * This is the list priority from which the L2ARC will search for pages to
4808 * cache.  This is used within loops (0..3) to cycle through lists in the
4809 * desired order.  This order can have a significant effect on cache
4810 * performance.
4811 *
4812 * Currently the metadata lists are hit first, MFU then MRU, followed by
4813 * the data lists.  This function returns a locked list, and also returns
4814 * the lock pointer.
4815 */
4816static list_t *
4817l2arc_list_locked(int list_num, kmutex_t **lock)
4818{
4819	list_t *list = NULL;
4820	int idx;
4821
4822	ASSERT(list_num >= 0 && list_num < 2 * ARC_BUFC_NUMLISTS);
4823
4824	if (list_num < ARC_BUFC_NUMMETADATALISTS) {
4825		idx = list_num;
4826		list = &arc_mfu->arcs_lists[idx];
4827		*lock = ARCS_LOCK(arc_mfu, idx);
4828	} else if (list_num < ARC_BUFC_NUMMETADATALISTS * 2) {
4829		idx = list_num - ARC_BUFC_NUMMETADATALISTS;
4830		list = &arc_mru->arcs_lists[idx];
4831		*lock = ARCS_LOCK(arc_mru, idx);
4832	} else if (list_num < (ARC_BUFC_NUMMETADATALISTS * 2 +
4833		ARC_BUFC_NUMDATALISTS)) {
4834		idx = list_num - ARC_BUFC_NUMMETADATALISTS;
4835		list = &arc_mfu->arcs_lists[idx];
4836		*lock = ARCS_LOCK(arc_mfu, idx);
4837	} else {
4838		idx = list_num - ARC_BUFC_NUMLISTS;
4839		list = &arc_mru->arcs_lists[idx];
4840		*lock = ARCS_LOCK(arc_mru, idx);
4841	}
4842
4843	ASSERT(!(MUTEX_HELD(*lock)));
4844	mutex_enter(*lock);
4845	return (list);
4846}
4847
4848/*
4849 * Evict buffers from the device write hand to the distance specified in
4850 * bytes.  This distance may span populated buffers, it may span nothing.
4851 * This is clearing a region on the L2ARC device ready for writing.
4852 * If the 'all' boolean is set, every buffer is evicted.
4853 */
4854static void
4855l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
4856{
4857	list_t *buflist;
4858	l2arc_buf_hdr_t *abl2;
4859	arc_buf_hdr_t *ab, *ab_prev;
4860	kmutex_t *hash_lock;
4861	uint64_t taddr;
4862	int64_t bytes_evicted = 0;
4863
4864	buflist = dev->l2ad_buflist;
4865
4866	if (buflist == NULL)
4867		return;
4868
4869	if (!all && dev->l2ad_first) {
4870		/*
4871		 * This is the first sweep through the device.  There is
4872		 * nothing to evict.
4873		 */
4874		return;
4875	}
4876
4877	if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) {
4878		/*
4879		 * When nearing the end of the device, evict to the end
4880		 * before the device write hand jumps to the start.
4881		 */
4882		taddr = dev->l2ad_end;
4883	} else {
4884		taddr = dev->l2ad_hand + distance;
4885	}
4886	DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
4887	    uint64_t, taddr, boolean_t, all);
4888
4889top:
4890	mutex_enter(&l2arc_buflist_mtx);
4891	for (ab = list_tail(buflist); ab; ab = ab_prev) {
4892		ab_prev = list_prev(buflist, ab);
4893
4894		hash_lock = HDR_LOCK(ab);
4895		if (!mutex_tryenter(hash_lock)) {
4896			/*
4897			 * Missed the hash lock.  Retry.
4898			 */
4899			ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
4900			mutex_exit(&l2arc_buflist_mtx);
4901			mutex_enter(hash_lock);
4902			mutex_exit(hash_lock);
4903			goto top;
4904		}
4905
4906		if (HDR_L2_WRITE_HEAD(ab)) {
4907			/*
4908			 * We hit a write head node.  Leave it for
4909			 * l2arc_write_done().
4910			 */
4911			list_remove(buflist, ab);
4912			mutex_exit(hash_lock);
4913			continue;
4914		}
4915
4916		if (!all && ab->b_l2hdr != NULL &&
4917		    (ab->b_l2hdr->b_daddr > taddr ||
4918		    ab->b_l2hdr->b_daddr < dev->l2ad_hand)) {
4919			/*
4920			 * We've evicted to the target address,
4921			 * or the end of the device.
4922			 */
4923			mutex_exit(hash_lock);
4924			break;
4925		}
4926
4927		if (HDR_FREE_IN_PROGRESS(ab)) {
4928			/*
4929			 * Already on the path to destruction.
4930			 */
4931			mutex_exit(hash_lock);
4932			continue;
4933		}
4934
4935		if (ab->b_state == arc_l2c_only) {
4936			ASSERT(!HDR_L2_READING(ab));
4937			/*
4938			 * This doesn't exist in the ARC.  Destroy.
4939			 * arc_hdr_destroy() will call list_remove()
4940			 * and decrement arcstat_l2_size.
4941			 */
4942			arc_change_state(arc_anon, ab, hash_lock);
4943			arc_hdr_destroy(ab);
4944		} else {
4945			/*
4946			 * Invalidate issued or about to be issued
4947			 * reads, since we may be about to write
4948			 * over this location.
4949			 */
4950			if (HDR_L2_READING(ab)) {
4951				ARCSTAT_BUMP(arcstat_l2_evict_reading);
4952				ab->b_flags |= ARC_L2_EVICTED;
4953			}
4954
4955			/*
4956			 * Tell ARC this no longer exists in L2ARC.
4957			 */
4958			if (ab->b_l2hdr != NULL) {
4959				abl2 = ab->b_l2hdr;
4960				ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize);
4961				bytes_evicted += abl2->b_asize;
4962				ab->b_l2hdr = NULL;
4963				kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
4964				ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4965			}
4966			list_remove(buflist, ab);
4967
4968			/*
4969			 * This may have been leftover after a
4970			 * failed write.
4971			 */
4972			ab->b_flags &= ~ARC_L2_WRITING;
4973		}
4974		mutex_exit(hash_lock);
4975	}
4976	mutex_exit(&l2arc_buflist_mtx);
4977
4978	vdev_space_update(dev->l2ad_vdev, -bytes_evicted, 0, 0);
4979	dev->l2ad_evict = taddr;
4980}
4981
4982/*
4983 * Find and write ARC buffers to the L2ARC device.
4984 *
4985 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid
4986 * for reading until they have completed writing.
4987 * The headroom_boost is an in-out parameter used to maintain headroom boost
4988 * state between calls to this function.
4989 *
4990 * Returns the number of bytes actually written (which may be smaller than
4991 * the delta by which the device hand has changed due to alignment).
4992 */
4993static uint64_t
4994l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
4995    boolean_t *headroom_boost)
4996{
4997	arc_buf_hdr_t *ab, *ab_prev, *head;
4998	list_t *list;
4999	uint64_t write_asize, write_psize, write_sz, headroom,
5000	    buf_compress_minsz;
5001	void *buf_data;
5002	kmutex_t *list_lock;
5003	boolean_t full;
5004	l2arc_write_callback_t *cb;
5005	zio_t *pio, *wzio;
5006	uint64_t guid = spa_load_guid(spa);
5007	const boolean_t do_headroom_boost = *headroom_boost;
5008	int try;
5009
5010	ASSERT(dev->l2ad_vdev != NULL);
5011
5012	/* Lower the flag now, we might want to raise it again later. */
5013	*headroom_boost = B_FALSE;
5014
5015	pio = NULL;
5016	write_sz = write_asize = write_psize = 0;
5017	full = B_FALSE;
5018	head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
5019	head->b_flags |= ARC_L2_WRITE_HEAD;
5020
5021	ARCSTAT_BUMP(arcstat_l2_write_buffer_iter);
5022	/*
5023	 * We will want to try to compress buffers that are at least 2x the
5024	 * device sector size.
5025	 */
5026	buf_compress_minsz = 2 << dev->l2ad_vdev->vdev_ashift;
5027
5028	/*
5029	 * Copy buffers for L2ARC writing.
5030	 */
5031	mutex_enter(&l2arc_buflist_mtx);
5032	for (try = 0; try < 2 * ARC_BUFC_NUMLISTS; try++) {
5033		uint64_t passed_sz = 0;
5034
5035		list = l2arc_list_locked(try, &list_lock);
5036		ARCSTAT_BUMP(arcstat_l2_write_buffer_list_iter);
5037
5038		/*
5039		 * L2ARC fast warmup.
5040		 *
5041		 * Until the ARC is warm and starts to evict, read from the
5042		 * head of the ARC lists rather than the tail.
5043		 */
5044		if (arc_warm == B_FALSE)
5045			ab = list_head(list);
5046		else
5047			ab = list_tail(list);
5048		if (ab == NULL)
5049			ARCSTAT_BUMP(arcstat_l2_write_buffer_list_null_iter);
5050
5051		headroom = target_sz * l2arc_headroom;
5052		if (do_headroom_boost)
5053			headroom = (headroom * l2arc_headroom_boost) / 100;
5054
5055		for (; ab; ab = ab_prev) {
5056			l2arc_buf_hdr_t *l2hdr;
5057			kmutex_t *hash_lock;
5058			uint64_t buf_sz;
5059
5060			if (arc_warm == B_FALSE)
5061				ab_prev = list_next(list, ab);
5062			else
5063				ab_prev = list_prev(list, ab);
5064			ARCSTAT_INCR(arcstat_l2_write_buffer_bytes_scanned, ab->b_size);
5065
5066			hash_lock = HDR_LOCK(ab);
5067			if (!mutex_tryenter(hash_lock)) {
5068				ARCSTAT_BUMP(arcstat_l2_write_trylock_fail);
5069				/*
5070				 * Skip this buffer rather than waiting.
5071				 */
5072				continue;
5073			}
5074
5075			passed_sz += ab->b_size;
5076			if (passed_sz > headroom) {
5077				/*
5078				 * Searched too far.
5079				 */
5080				mutex_exit(hash_lock);
5081				ARCSTAT_BUMP(arcstat_l2_write_passed_headroom);
5082				break;
5083			}
5084
5085			if (!l2arc_write_eligible(guid, ab)) {
5086				mutex_exit(hash_lock);
5087				continue;
5088			}
5089
5090			if ((write_sz + ab->b_size) > target_sz) {
5091				full = B_TRUE;
5092				mutex_exit(hash_lock);
5093				ARCSTAT_BUMP(arcstat_l2_write_full);
5094				break;
5095			}
5096
5097			if (pio == NULL) {
5098				/*
5099				 * Insert a dummy header on the buflist so
5100				 * l2arc_write_done() can find where the
5101				 * write buffers begin without searching.
5102				 */
5103				list_insert_head(dev->l2ad_buflist, head);
5104
5105				cb = kmem_alloc(
5106				    sizeof (l2arc_write_callback_t), KM_SLEEP);
5107				cb->l2wcb_dev = dev;
5108				cb->l2wcb_head = head;
5109				pio = zio_root(spa, l2arc_write_done, cb,
5110				    ZIO_FLAG_CANFAIL);
5111				ARCSTAT_BUMP(arcstat_l2_write_pios);
5112			}
5113
5114			/*
5115			 * Create and add a new L2ARC header.
5116			 */
5117			l2hdr = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP);
5118			l2hdr->b_dev = dev;
5119			ab->b_flags |= ARC_L2_WRITING;
5120
5121			/*
5122			 * Temporarily stash the data buffer in b_tmp_cdata.
5123			 * The subsequent write step will pick it up from
5124			 * there. This is because can't access ab->b_buf
5125			 * without holding the hash_lock, which we in turn
5126			 * can't access without holding the ARC list locks
5127			 * (which we want to avoid during compression/writing).
5128			 */
5129			l2hdr->b_compress = ZIO_COMPRESS_OFF;
5130			l2hdr->b_asize = ab->b_size;
5131			l2hdr->b_tmp_cdata = ab->b_buf->b_data;
5132
5133			buf_sz = ab->b_size;
5134			ab->b_l2hdr = l2hdr;
5135
5136			list_insert_head(dev->l2ad_buflist, ab);
5137
5138			/*
5139			 * Compute and store the buffer cksum before
5140			 * writing.  On debug the cksum is verified first.
5141			 */
5142			arc_cksum_verify(ab->b_buf);
5143			arc_cksum_compute(ab->b_buf, B_TRUE);
5144
5145			mutex_exit(hash_lock);
5146
5147			write_sz += buf_sz;
5148		}
5149
5150		mutex_exit(list_lock);
5151
5152		if (full == B_TRUE)
5153			break;
5154	}
5155
5156	/* No buffers selected for writing? */
5157	if (pio == NULL) {
5158		ASSERT0(write_sz);
5159		mutex_exit(&l2arc_buflist_mtx);
5160		kmem_cache_free(hdr_cache, head);
5161		return (0);
5162	}
5163
5164	/*
5165	 * Now start writing the buffers. We're starting at the write head
5166	 * and work backwards, retracing the course of the buffer selector
5167	 * loop above.
5168	 */
5169	for (ab = list_prev(dev->l2ad_buflist, head); ab;
5170	    ab = list_prev(dev->l2ad_buflist, ab)) {
5171		l2arc_buf_hdr_t *l2hdr;
5172		uint64_t buf_sz;
5173
5174		/*
5175		 * We shouldn't need to lock the buffer here, since we flagged
5176		 * it as ARC_L2_WRITING in the previous step, but we must take
5177		 * care to only access its L2 cache parameters. In particular,
5178		 * ab->b_buf may be invalid by now due to ARC eviction.
5179		 */
5180		l2hdr = ab->b_l2hdr;
5181		l2hdr->b_daddr = dev->l2ad_hand;
5182
5183		if ((ab->b_flags & ARC_L2COMPRESS) &&
5184		    l2hdr->b_asize >= buf_compress_minsz) {
5185			if (l2arc_compress_buf(l2hdr)) {
5186				/*
5187				 * If compression succeeded, enable headroom
5188				 * boost on the next scan cycle.
5189				 */
5190				*headroom_boost = B_TRUE;
5191			}
5192		}
5193
5194		/*
5195		 * Pick up the buffer data we had previously stashed away
5196		 * (and now potentially also compressed).
5197		 */
5198		buf_data = l2hdr->b_tmp_cdata;
5199		buf_sz = l2hdr->b_asize;
5200
5201		/* Compression may have squashed the buffer to zero length. */
5202		if (buf_sz != 0) {
5203			uint64_t buf_p_sz;
5204
5205			wzio = zio_write_phys(pio, dev->l2ad_vdev,
5206			    dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF,
5207			    NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE,
5208			    ZIO_FLAG_CANFAIL, B_FALSE);
5209
5210			DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
5211			    zio_t *, wzio);
5212			(void) zio_nowait(wzio);
5213
5214			write_asize += buf_sz;
5215			/*
5216			 * Keep the clock hand suitably device-aligned.
5217			 */
5218			buf_p_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz);
5219			write_psize += buf_p_sz;
5220			dev->l2ad_hand += buf_p_sz;
5221		}
5222	}
5223
5224	mutex_exit(&l2arc_buflist_mtx);
5225
5226	ASSERT3U(write_asize, <=, target_sz);
5227	ARCSTAT_BUMP(arcstat_l2_writes_sent);
5228	ARCSTAT_INCR(arcstat_l2_write_bytes, write_asize);
5229	ARCSTAT_INCR(arcstat_l2_size, write_sz);
5230	ARCSTAT_INCR(arcstat_l2_asize, write_asize);
5231	vdev_space_update(dev->l2ad_vdev, write_psize, 0, 0);
5232
5233	/*
5234	 * Bump device hand to the device start if it is approaching the end.
5235	 * l2arc_evict() will already have evicted ahead for this case.
5236	 */
5237	if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) {
5238		dev->l2ad_hand = dev->l2ad_start;
5239		dev->l2ad_evict = dev->l2ad_start;
5240		dev->l2ad_first = B_FALSE;
5241	}
5242
5243	dev->l2ad_writing = B_TRUE;
5244	(void) zio_wait(pio);
5245	dev->l2ad_writing = B_FALSE;
5246
5247	return (write_asize);
5248}
5249
5250/*
5251 * Compresses an L2ARC buffer.
5252 * The data to be compressed must be prefilled in l2hdr->b_tmp_cdata and its
5253 * size in l2hdr->b_asize. This routine tries to compress the data and
5254 * depending on the compression result there are three possible outcomes:
5255 * *) The buffer was incompressible. The original l2hdr contents were left
5256 *    untouched and are ready for writing to an L2 device.
5257 * *) The buffer was all-zeros, so there is no need to write it to an L2
5258 *    device. To indicate this situation b_tmp_cdata is NULL'ed, b_asize is
5259 *    set to zero and b_compress is set to ZIO_COMPRESS_EMPTY.
5260 * *) Compression succeeded and b_tmp_cdata was replaced with a temporary
5261 *    data buffer which holds the compressed data to be written, and b_asize
5262 *    tells us how much data there is. b_compress is set to the appropriate
5263 *    compression algorithm. Once writing is done, invoke
5264 *    l2arc_release_cdata_buf on this l2hdr to free this temporary buffer.
5265 *
5266 * Returns B_TRUE if compression succeeded, or B_FALSE if it didn't (the
5267 * buffer was incompressible).
5268 */
5269static boolean_t
5270l2arc_compress_buf(l2arc_buf_hdr_t *l2hdr)
5271{
5272	void *cdata;
5273	size_t csize, len, rounded;
5274
5275	ASSERT(l2hdr->b_compress == ZIO_COMPRESS_OFF);
5276	ASSERT(l2hdr->b_tmp_cdata != NULL);
5277
5278	len = l2hdr->b_asize;
5279	cdata = zio_data_buf_alloc(len);
5280	csize = zio_compress_data(ZIO_COMPRESS_LZ4, l2hdr->b_tmp_cdata,
5281	    cdata, l2hdr->b_asize);
5282
5283	rounded = P2ROUNDUP(csize, (size_t)SPA_MINBLOCKSIZE);
5284	if (rounded > csize) {
5285		bzero((char *)cdata + csize, rounded - csize);
5286		csize = rounded;
5287	}
5288
5289	if (csize == 0) {
5290		/* zero block, indicate that there's nothing to write */
5291		zio_data_buf_free(cdata, len);
5292		l2hdr->b_compress = ZIO_COMPRESS_EMPTY;
5293		l2hdr->b_asize = 0;
5294		l2hdr->b_tmp_cdata = NULL;
5295		ARCSTAT_BUMP(arcstat_l2_compress_zeros);
5296		return (B_TRUE);
5297	} else if (csize > 0 && csize < len) {
5298		/*
5299		 * Compression succeeded, we'll keep the cdata around for
5300		 * writing and release it afterwards.
5301		 */
5302		l2hdr->b_compress = ZIO_COMPRESS_LZ4;
5303		l2hdr->b_asize = csize;
5304		l2hdr->b_tmp_cdata = cdata;
5305		ARCSTAT_BUMP(arcstat_l2_compress_successes);
5306		return (B_TRUE);
5307	} else {
5308		/*
5309		 * Compression failed, release the compressed buffer.
5310		 * l2hdr will be left unmodified.
5311		 */
5312		zio_data_buf_free(cdata, len);
5313		ARCSTAT_BUMP(arcstat_l2_compress_failures);
5314		return (B_FALSE);
5315	}
5316}
5317
5318/*
5319 * Decompresses a zio read back from an l2arc device. On success, the
5320 * underlying zio's io_data buffer is overwritten by the uncompressed
5321 * version. On decompression error (corrupt compressed stream), the
5322 * zio->io_error value is set to signal an I/O error.
5323 *
5324 * Please note that the compressed data stream is not checksummed, so
5325 * if the underlying device is experiencing data corruption, we may feed
5326 * corrupt data to the decompressor, so the decompressor needs to be
5327 * able to handle this situation (LZ4 does).
5328 */
5329static void
5330l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr, enum zio_compress c)
5331{
5332	ASSERT(L2ARC_IS_VALID_COMPRESS(c));
5333
5334	if (zio->io_error != 0) {
5335		/*
5336		 * An io error has occured, just restore the original io
5337		 * size in preparation for a main pool read.
5338		 */
5339		zio->io_orig_size = zio->io_size = hdr->b_size;
5340		return;
5341	}
5342
5343	if (c == ZIO_COMPRESS_EMPTY) {
5344		/*
5345		 * An empty buffer results in a null zio, which means we
5346		 * need to fill its io_data after we're done restoring the
5347		 * buffer's contents.
5348		 */
5349		ASSERT(hdr->b_buf != NULL);
5350		bzero(hdr->b_buf->b_data, hdr->b_size);
5351		zio->io_data = zio->io_orig_data = hdr->b_buf->b_data;
5352	} else {
5353		ASSERT(zio->io_data != NULL);
5354		/*
5355		 * We copy the compressed data from the start of the arc buffer
5356		 * (the zio_read will have pulled in only what we need, the
5357		 * rest is garbage which we will overwrite at decompression)
5358		 * and then decompress back to the ARC data buffer. This way we
5359		 * can minimize copying by simply decompressing back over the
5360		 * original compressed data (rather than decompressing to an
5361		 * aux buffer and then copying back the uncompressed buffer,
5362		 * which is likely to be much larger).
5363		 */
5364		uint64_t csize;
5365		void *cdata;
5366
5367		csize = zio->io_size;
5368		cdata = zio_data_buf_alloc(csize);
5369		bcopy(zio->io_data, cdata, csize);
5370		if (zio_decompress_data(c, cdata, zio->io_data, csize,
5371		    hdr->b_size) != 0)
5372			zio->io_error = EIO;
5373		zio_data_buf_free(cdata, csize);
5374	}
5375
5376	/* Restore the expected uncompressed IO size. */
5377	zio->io_orig_size = zio->io_size = hdr->b_size;
5378}
5379
5380/*
5381 * Releases the temporary b_tmp_cdata buffer in an l2arc header structure.
5382 * This buffer serves as a temporary holder of compressed data while
5383 * the buffer entry is being written to an l2arc device. Once that is
5384 * done, we can dispose of it.
5385 */
5386static void
5387l2arc_release_cdata_buf(arc_buf_hdr_t *ab)
5388{
5389	l2arc_buf_hdr_t *l2hdr = ab->b_l2hdr;
5390
5391	if (l2hdr->b_compress == ZIO_COMPRESS_LZ4) {
5392		/*
5393		 * If the data was compressed, then we've allocated a
5394		 * temporary buffer for it, so now we need to release it.
5395		 */
5396		ASSERT(l2hdr->b_tmp_cdata != NULL);
5397		zio_data_buf_free(l2hdr->b_tmp_cdata, ab->b_size);
5398	}
5399	l2hdr->b_tmp_cdata = NULL;
5400}
5401
5402/*
5403 * This thread feeds the L2ARC at regular intervals.  This is the beating
5404 * heart of the L2ARC.
5405 */
5406static void
5407l2arc_feed_thread(void *dummy __unused)
5408{
5409	callb_cpr_t cpr;
5410	l2arc_dev_t *dev;
5411	spa_t *spa;
5412	uint64_t size, wrote;
5413	clock_t begin, next = ddi_get_lbolt();
5414	boolean_t headroom_boost = B_FALSE;
5415
5416	CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
5417
5418	mutex_enter(&l2arc_feed_thr_lock);
5419
5420	while (l2arc_thread_exit == 0) {
5421		CALLB_CPR_SAFE_BEGIN(&cpr);
5422		(void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock,
5423		    next - ddi_get_lbolt());
5424		CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
5425		next = ddi_get_lbolt() + hz;
5426
5427		/*
5428		 * Quick check for L2ARC devices.
5429		 */
5430		mutex_enter(&l2arc_dev_mtx);
5431		if (l2arc_ndev == 0) {
5432			mutex_exit(&l2arc_dev_mtx);
5433			continue;
5434		}
5435		mutex_exit(&l2arc_dev_mtx);
5436		begin = ddi_get_lbolt();
5437
5438		/*
5439		 * This selects the next l2arc device to write to, and in
5440		 * doing so the next spa to feed from: dev->l2ad_spa.   This
5441		 * will return NULL if there are now no l2arc devices or if
5442		 * they are all faulted.
5443		 *
5444		 * If a device is returned, its spa's config lock is also
5445		 * held to prevent device removal.  l2arc_dev_get_next()
5446		 * will grab and release l2arc_dev_mtx.
5447		 */
5448		if ((dev = l2arc_dev_get_next()) == NULL)
5449			continue;
5450
5451		spa = dev->l2ad_spa;
5452		ASSERT(spa != NULL);
5453
5454		/*
5455		 * If the pool is read-only then force the feed thread to
5456		 * sleep a little longer.
5457		 */
5458		if (!spa_writeable(spa)) {
5459			next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz;
5460			spa_config_exit(spa, SCL_L2ARC, dev);
5461			continue;
5462		}
5463
5464		/*
5465		 * Avoid contributing to memory pressure.
5466		 */
5467		if (arc_reclaim_needed()) {
5468			ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
5469			spa_config_exit(spa, SCL_L2ARC, dev);
5470			continue;
5471		}
5472
5473		ARCSTAT_BUMP(arcstat_l2_feeds);
5474
5475		size = l2arc_write_size();
5476
5477		/*
5478		 * Evict L2ARC buffers that will be overwritten.
5479		 */
5480		l2arc_evict(dev, size, B_FALSE);
5481
5482		/*
5483		 * Write ARC buffers.
5484		 */
5485		wrote = l2arc_write_buffers(spa, dev, size, &headroom_boost);
5486
5487		/*
5488		 * Calculate interval between writes.
5489		 */
5490		next = l2arc_write_interval(begin, size, wrote);
5491		spa_config_exit(spa, SCL_L2ARC, dev);
5492	}
5493
5494	l2arc_thread_exit = 0;
5495	cv_broadcast(&l2arc_feed_thr_cv);
5496	CALLB_CPR_EXIT(&cpr);		/* drops l2arc_feed_thr_lock */
5497	thread_exit();
5498}
5499
5500boolean_t
5501l2arc_vdev_present(vdev_t *vd)
5502{
5503	l2arc_dev_t *dev;
5504
5505	mutex_enter(&l2arc_dev_mtx);
5506	for (dev = list_head(l2arc_dev_list); dev != NULL;
5507	    dev = list_next(l2arc_dev_list, dev)) {
5508		if (dev->l2ad_vdev == vd)
5509			break;
5510	}
5511	mutex_exit(&l2arc_dev_mtx);
5512
5513	return (dev != NULL);
5514}
5515
5516/*
5517 * Add a vdev for use by the L2ARC.  By this point the spa has already
5518 * validated the vdev and opened it.
5519 */
5520void
5521l2arc_add_vdev(spa_t *spa, vdev_t *vd)
5522{
5523	l2arc_dev_t *adddev;
5524
5525	ASSERT(!l2arc_vdev_present(vd));
5526
5527	vdev_ashift_optimize(vd);
5528
5529	/*
5530	 * Create a new l2arc device entry.
5531	 */
5532	adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
5533	adddev->l2ad_spa = spa;
5534	adddev->l2ad_vdev = vd;
5535	adddev->l2ad_start = VDEV_LABEL_START_SIZE;
5536	adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
5537	adddev->l2ad_hand = adddev->l2ad_start;
5538	adddev->l2ad_evict = adddev->l2ad_start;
5539	adddev->l2ad_first = B_TRUE;
5540	adddev->l2ad_writing = B_FALSE;
5541
5542	/*
5543	 * This is a list of all ARC buffers that are still valid on the
5544	 * device.
5545	 */
5546	adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP);
5547	list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
5548	    offsetof(arc_buf_hdr_t, b_l2node));
5549
5550	vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
5551
5552	/*
5553	 * Add device to global list
5554	 */
5555	mutex_enter(&l2arc_dev_mtx);
5556	list_insert_head(l2arc_dev_list, adddev);
5557	atomic_inc_64(&l2arc_ndev);
5558	mutex_exit(&l2arc_dev_mtx);
5559}
5560
5561/*
5562 * Remove a vdev from the L2ARC.
5563 */
5564void
5565l2arc_remove_vdev(vdev_t *vd)
5566{
5567	l2arc_dev_t *dev, *nextdev, *remdev = NULL;
5568
5569	/*
5570	 * Find the device by vdev
5571	 */
5572	mutex_enter(&l2arc_dev_mtx);
5573	for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) {
5574		nextdev = list_next(l2arc_dev_list, dev);
5575		if (vd == dev->l2ad_vdev) {
5576			remdev = dev;
5577			break;
5578		}
5579	}
5580	ASSERT(remdev != NULL);
5581
5582	/*
5583	 * Remove device from global list
5584	 */
5585	list_remove(l2arc_dev_list, remdev);
5586	l2arc_dev_last = NULL;		/* may have been invalidated */
5587	atomic_dec_64(&l2arc_ndev);
5588	mutex_exit(&l2arc_dev_mtx);
5589
5590	/*
5591	 * Clear all buflists and ARC references.  L2ARC device flush.
5592	 */
5593	l2arc_evict(remdev, 0, B_TRUE);
5594	list_destroy(remdev->l2ad_buflist);
5595	kmem_free(remdev->l2ad_buflist, sizeof (list_t));
5596	kmem_free(remdev, sizeof (l2arc_dev_t));
5597}
5598
5599void
5600l2arc_init(void)
5601{
5602	l2arc_thread_exit = 0;
5603	l2arc_ndev = 0;
5604	l2arc_writes_sent = 0;
5605	l2arc_writes_done = 0;
5606
5607	mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
5608	cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
5609	mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
5610	mutex_init(&l2arc_buflist_mtx, NULL, MUTEX_DEFAULT, NULL);
5611	mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
5612
5613	l2arc_dev_list = &L2ARC_dev_list;
5614	l2arc_free_on_write = &L2ARC_free_on_write;
5615	list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
5616	    offsetof(l2arc_dev_t, l2ad_node));
5617	list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
5618	    offsetof(l2arc_data_free_t, l2df_list_node));
5619}
5620
5621void
5622l2arc_fini(void)
5623{
5624	/*
5625	 * This is called from dmu_fini(), which is called from spa_fini();
5626	 * Because of this, we can assume that all l2arc devices have
5627	 * already been removed when the pools themselves were removed.
5628	 */
5629
5630	l2arc_do_free_on_write();
5631
5632	mutex_destroy(&l2arc_feed_thr_lock);
5633	cv_destroy(&l2arc_feed_thr_cv);
5634	mutex_destroy(&l2arc_dev_mtx);
5635	mutex_destroy(&l2arc_buflist_mtx);
5636	mutex_destroy(&l2arc_free_on_write_mtx);
5637
5638	list_destroy(l2arc_dev_list);
5639	list_destroy(l2arc_free_on_write);
5640}
5641
5642void
5643l2arc_start(void)
5644{
5645	if (!(spa_mode_global & FWRITE))
5646		return;
5647
5648	(void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
5649	    TS_RUN, minclsyspri);
5650}
5651
5652void
5653l2arc_stop(void)
5654{
5655	if (!(spa_mode_global & FWRITE))
5656		return;
5657
5658	mutex_enter(&l2arc_feed_thr_lock);
5659	cv_signal(&l2arc_feed_thr_cv);	/* kick thread out of startup */
5660	l2arc_thread_exit = 1;
5661	while (l2arc_thread_exit != 0)
5662		cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
5663	mutex_exit(&l2arc_feed_thr_lock);
5664}
5665