Deleted Added
full compact
arc.c (249195) arc.c (251478)
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE

--- 8 unchanged lines hidden (view full) ---

17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2013 by Delphix. All rights reserved.
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE

--- 8 unchanged lines hidden (view full) ---

17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
23 * Copyright 2011 Nexenta Systems, Inc. All rights reserved.
24 * Copyright (c) 2013 by Delphix. All rights reserved.
25 * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
25 */
26
27/*
28 * DVA-based Adjustable Replacement Cache
29 *
30 * While much of the theory of operation used here is
31 * based on the self-tuning, low overhead replacement cache
32 * presented by Megiddo and Modha at FAST 2003, there are some

--- 82 unchanged lines hidden (view full) ---

115 * - L2ARC buflist eviction
116 * - L2ARC write completion, which walks L2ARC buflists
117 * - ARC header destruction, as it removes from L2ARC buflists
118 * - ARC header release, as it removes from L2ARC buflists
119 */
120
121#include <sys/spa.h>
122#include <sys/zio.h>
26 */
27
28/*
29 * DVA-based Adjustable Replacement Cache
30 *
31 * While much of the theory of operation used here is
32 * based on the self-tuning, low overhead replacement cache
33 * presented by Megiddo and Modha at FAST 2003, there are some

--- 82 unchanged lines hidden (view full) ---

116 * - L2ARC buflist eviction
117 * - L2ARC write completion, which walks L2ARC buflists
118 * - ARC header destruction, as it removes from L2ARC buflists
119 * - ARC header release, as it removes from L2ARC buflists
120 */
121
122#include <sys/spa.h>
123#include <sys/zio.h>
124#include <sys/zio_compress.h>
123#include <sys/zfs_context.h>
124#include <sys/arc.h>
125#include <sys/refcount.h>
126#include <sys/vdev.h>
127#include <sys/vdev_impl.h>
128#ifdef _KERNEL
129#include <sys/dnlc.h>
130#endif

--- 186 unchanged lines hidden (view full) ---

317 kstat_named_t arcstat_l2_writes_hdr_miss;
318 kstat_named_t arcstat_l2_evict_lock_retry;
319 kstat_named_t arcstat_l2_evict_reading;
320 kstat_named_t arcstat_l2_free_on_write;
321 kstat_named_t arcstat_l2_abort_lowmem;
322 kstat_named_t arcstat_l2_cksum_bad;
323 kstat_named_t arcstat_l2_io_error;
324 kstat_named_t arcstat_l2_size;
125#include <sys/zfs_context.h>
126#include <sys/arc.h>
127#include <sys/refcount.h>
128#include <sys/vdev.h>
129#include <sys/vdev_impl.h>
130#ifdef _KERNEL
131#include <sys/dnlc.h>
132#endif

--- 186 unchanged lines hidden (view full) ---

319 kstat_named_t arcstat_l2_writes_hdr_miss;
320 kstat_named_t arcstat_l2_evict_lock_retry;
321 kstat_named_t arcstat_l2_evict_reading;
322 kstat_named_t arcstat_l2_free_on_write;
323 kstat_named_t arcstat_l2_abort_lowmem;
324 kstat_named_t arcstat_l2_cksum_bad;
325 kstat_named_t arcstat_l2_io_error;
326 kstat_named_t arcstat_l2_size;
327 kstat_named_t arcstat_l2_asize;
325 kstat_named_t arcstat_l2_hdr_size;
328 kstat_named_t arcstat_l2_hdr_size;
329 kstat_named_t arcstat_l2_compress_successes;
330 kstat_named_t arcstat_l2_compress_zeros;
331 kstat_named_t arcstat_l2_compress_failures;
326 kstat_named_t arcstat_l2_write_trylock_fail;
327 kstat_named_t arcstat_l2_write_passed_headroom;
328 kstat_named_t arcstat_l2_write_spa_mismatch;
329 kstat_named_t arcstat_l2_write_in_l2;
330 kstat_named_t arcstat_l2_write_hdr_io_in_progress;
331 kstat_named_t arcstat_l2_write_not_cacheable;
332 kstat_named_t arcstat_l2_write_full;
333 kstat_named_t arcstat_l2_write_buffer_iter;

--- 56 unchanged lines hidden (view full) ---

390 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 },
391 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 },
392 { "l2_evict_reading", KSTAT_DATA_UINT64 },
393 { "l2_free_on_write", KSTAT_DATA_UINT64 },
394 { "l2_abort_lowmem", KSTAT_DATA_UINT64 },
395 { "l2_cksum_bad", KSTAT_DATA_UINT64 },
396 { "l2_io_error", KSTAT_DATA_UINT64 },
397 { "l2_size", KSTAT_DATA_UINT64 },
332 kstat_named_t arcstat_l2_write_trylock_fail;
333 kstat_named_t arcstat_l2_write_passed_headroom;
334 kstat_named_t arcstat_l2_write_spa_mismatch;
335 kstat_named_t arcstat_l2_write_in_l2;
336 kstat_named_t arcstat_l2_write_hdr_io_in_progress;
337 kstat_named_t arcstat_l2_write_not_cacheable;
338 kstat_named_t arcstat_l2_write_full;
339 kstat_named_t arcstat_l2_write_buffer_iter;

--- 56 unchanged lines hidden (view full) ---

396 { "l2_writes_hdr_miss", KSTAT_DATA_UINT64 },
397 { "l2_evict_lock_retry", KSTAT_DATA_UINT64 },
398 { "l2_evict_reading", KSTAT_DATA_UINT64 },
399 { "l2_free_on_write", KSTAT_DATA_UINT64 },
400 { "l2_abort_lowmem", KSTAT_DATA_UINT64 },
401 { "l2_cksum_bad", KSTAT_DATA_UINT64 },
402 { "l2_io_error", KSTAT_DATA_UINT64 },
403 { "l2_size", KSTAT_DATA_UINT64 },
404 { "l2_asize", KSTAT_DATA_UINT64 },
398 { "l2_hdr_size", KSTAT_DATA_UINT64 },
405 { "l2_hdr_size", KSTAT_DATA_UINT64 },
406 { "l2_compress_successes", KSTAT_DATA_UINT64 },
407 { "l2_compress_zeros", KSTAT_DATA_UINT64 },
408 { "l2_compress_failures", KSTAT_DATA_UINT64 },
399 { "l2_write_trylock_fail", KSTAT_DATA_UINT64 },
400 { "l2_write_passed_headroom", KSTAT_DATA_UINT64 },
401 { "l2_write_spa_mismatch", KSTAT_DATA_UINT64 },
402 { "l2_write_in_l2", KSTAT_DATA_UINT64 },
403 { "l2_write_io_in_progress", KSTAT_DATA_UINT64 },
404 { "l2_write_not_cacheable", KSTAT_DATA_UINT64 },
405 { "l2_write_full", KSTAT_DATA_UINT64 },
406 { "l2_write_buffer_iter", KSTAT_DATA_UINT64 },

--- 62 unchanged lines hidden (view full) ---

469 * while still allowing the code to be readable.
470 */
471#define arc_size ARCSTAT(arcstat_size) /* actual total arc size */
472#define arc_p ARCSTAT(arcstat_p) /* target size of MRU */
473#define arc_c ARCSTAT(arcstat_c) /* target size of cache */
474#define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */
475#define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */
476
409 { "l2_write_trylock_fail", KSTAT_DATA_UINT64 },
410 { "l2_write_passed_headroom", KSTAT_DATA_UINT64 },
411 { "l2_write_spa_mismatch", KSTAT_DATA_UINT64 },
412 { "l2_write_in_l2", KSTAT_DATA_UINT64 },
413 { "l2_write_io_in_progress", KSTAT_DATA_UINT64 },
414 { "l2_write_not_cacheable", KSTAT_DATA_UINT64 },
415 { "l2_write_full", KSTAT_DATA_UINT64 },
416 { "l2_write_buffer_iter", KSTAT_DATA_UINT64 },

--- 62 unchanged lines hidden (view full) ---

479 * while still allowing the code to be readable.
480 */
481#define arc_size ARCSTAT(arcstat_size) /* actual total arc size */
482#define arc_p ARCSTAT(arcstat_p) /* target size of MRU */
483#define arc_c ARCSTAT(arcstat_c) /* target size of cache */
484#define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */
485#define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */
486
487#define L2ARC_IS_VALID_COMPRESS(_c_) \
488 ((_c_) == ZIO_COMPRESS_LZ4 || (_c_) == ZIO_COMPRESS_EMPTY)
489
477static int arc_no_grow; /* Don't try to grow cache size */
478static uint64_t arc_tempreserve;
479static uint64_t arc_loaned_bytes;
480static uint64_t arc_meta_used;
481static uint64_t arc_meta_limit;
482static uint64_t arc_meta_max = 0;
483SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_meta_used, CTLFLAG_RD, &arc_meta_used, 0,
484 "ARC metadata used");

--- 146 unchanged lines hidden (view full) ---

631
632uint64_t zfs_crc64_table[256];
633
634/*
635 * Level 2 ARC
636 */
637
638#define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
490static int arc_no_grow; /* Don't try to grow cache size */
491static uint64_t arc_tempreserve;
492static uint64_t arc_loaned_bytes;
493static uint64_t arc_meta_used;
494static uint64_t arc_meta_limit;
495static uint64_t arc_meta_max = 0;
496SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, arc_meta_used, CTLFLAG_RD, &arc_meta_used, 0,
497 "ARC metadata used");

--- 146 unchanged lines hidden (view full) ---

644
645uint64_t zfs_crc64_table[256];
646
647/*
648 * Level 2 ARC
649 */
650
651#define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
639#define L2ARC_HEADROOM 2 /* num of writes */
652#define L2ARC_HEADROOM 2 /* num of writes */
653/*
654 * If we discover during ARC scan any buffers to be compressed, we boost
655 * our headroom for the next scanning cycle by this percentage multiple.
656 */
657#define L2ARC_HEADROOM_BOOST 200
640#define L2ARC_FEED_SECS 1 /* caching interval secs */
641#define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */
642
643#define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent)
644#define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done)
645
646/*
647 * L2ARC Performance Tunables
648 */
649uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */
650uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */
651uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */
658#define L2ARC_FEED_SECS 1 /* caching interval secs */
659#define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */
660
661#define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent)
662#define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done)
663
664/*
665 * L2ARC Performance Tunables
666 */
667uint64_t l2arc_write_max = L2ARC_WRITE_SIZE; /* default max write size */
668uint64_t l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra write during warmup */
669uint64_t l2arc_headroom = L2ARC_HEADROOM; /* number of dev writes */
670uint64_t l2arc_headroom_boost = L2ARC_HEADROOM_BOOST;
652uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */
653uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */
654boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
655boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */
656boolean_t l2arc_norw = B_TRUE; /* no reads during writes */
657
658SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max, CTLFLAG_RW,
659 &l2arc_write_max, 0, "max write size");

--- 57 unchanged lines hidden (view full) ---

717
718/*
719 * L2ARC Internals
720 */
721typedef struct l2arc_dev {
722 vdev_t *l2ad_vdev; /* vdev */
723 spa_t *l2ad_spa; /* spa */
724 uint64_t l2ad_hand; /* next write location */
671uint64_t l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */
672uint64_t l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval milliseconds */
673boolean_t l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
674boolean_t l2arc_feed_again = B_TRUE; /* turbo warmup */
675boolean_t l2arc_norw = B_TRUE; /* no reads during writes */
676
677SYSCTL_UQUAD(_vfs_zfs, OID_AUTO, l2arc_write_max, CTLFLAG_RW,
678 &l2arc_write_max, 0, "max write size");

--- 57 unchanged lines hidden (view full) ---

736
737/*
738 * L2ARC Internals
739 */
740typedef struct l2arc_dev {
741 vdev_t *l2ad_vdev; /* vdev */
742 spa_t *l2ad_spa; /* spa */
743 uint64_t l2ad_hand; /* next write location */
725 uint64_t l2ad_write; /* desired write size, bytes */
726 uint64_t l2ad_boost; /* warmup write boost, bytes */
727 uint64_t l2ad_start; /* first addr on device */
728 uint64_t l2ad_end; /* last addr on device */
729 uint64_t l2ad_evict; /* last addr eviction reached */
730 boolean_t l2ad_first; /* first sweep through */
731 boolean_t l2ad_writing; /* currently writing */
732 list_t *l2ad_buflist; /* buffer list */
733 list_node_t l2ad_node; /* device list node */
734} l2arc_dev_t;

--- 4 unchanged lines hidden (view full) ---

739static l2arc_dev_t *l2arc_dev_last; /* last device used */
740static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */
741static list_t L2ARC_free_on_write; /* free after write buf list */
742static list_t *l2arc_free_on_write; /* free after write list ptr */
743static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */
744static uint64_t l2arc_ndev; /* number of devices */
745
746typedef struct l2arc_read_callback {
744 uint64_t l2ad_start; /* first addr on device */
745 uint64_t l2ad_end; /* last addr on device */
746 uint64_t l2ad_evict; /* last addr eviction reached */
747 boolean_t l2ad_first; /* first sweep through */
748 boolean_t l2ad_writing; /* currently writing */
749 list_t *l2ad_buflist; /* buffer list */
750 list_node_t l2ad_node; /* device list node */
751} l2arc_dev_t;

--- 4 unchanged lines hidden (view full) ---

756static l2arc_dev_t *l2arc_dev_last; /* last device used */
757static kmutex_t l2arc_buflist_mtx; /* mutex for all buflists */
758static list_t L2ARC_free_on_write; /* free after write buf list */
759static list_t *l2arc_free_on_write; /* free after write list ptr */
760static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */
761static uint64_t l2arc_ndev; /* number of devices */
762
763typedef struct l2arc_read_callback {
747 arc_buf_t *l2rcb_buf; /* read buffer */
748 spa_t *l2rcb_spa; /* spa */
749 blkptr_t l2rcb_bp; /* original blkptr */
750 zbookmark_t l2rcb_zb; /* original bookmark */
751 int l2rcb_flags; /* original flags */
764 arc_buf_t *l2rcb_buf; /* read buffer */
765 spa_t *l2rcb_spa; /* spa */
766 blkptr_t l2rcb_bp; /* original blkptr */
767 zbookmark_t l2rcb_zb; /* original bookmark */
768 int l2rcb_flags; /* original flags */
769 enum zio_compress l2rcb_compress; /* applied compress */
752} l2arc_read_callback_t;
753
754typedef struct l2arc_write_callback {
755 l2arc_dev_t *l2wcb_dev; /* device info */
756 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */
757} l2arc_write_callback_t;
758
759struct l2arc_buf_hdr {
760 /* protected by arc_buf_hdr mutex */
770} l2arc_read_callback_t;
771
772typedef struct l2arc_write_callback {
773 l2arc_dev_t *l2wcb_dev; /* device info */
774 arc_buf_hdr_t *l2wcb_head; /* head of write buflist */
775} l2arc_write_callback_t;
776
777struct l2arc_buf_hdr {
778 /* protected by arc_buf_hdr mutex */
761 l2arc_dev_t *b_dev; /* L2ARC device */
762 uint64_t b_daddr; /* disk address, offset byte */
779 l2arc_dev_t *b_dev; /* L2ARC device */
780 uint64_t b_daddr; /* disk address, offset byte */
781 /* compression applied to buffer data */
782 enum zio_compress b_compress;
783 /* real alloc'd buffer size depending on b_compress applied */
784 int b_asize;
785 /* temporary buffer holder for in-flight compressed data */
786 void *b_tmp_cdata;
763};
764
765typedef struct l2arc_data_free {
766 /* protected by l2arc_free_on_write_mtx */
767 void *l2df_data;
768 size_t l2df_size;
769 void (*l2df_func)(void *, size_t);
770 list_node_t l2df_list_node;
771} l2arc_data_free_t;
772
773static kmutex_t l2arc_feed_thr_lock;
774static kcondvar_t l2arc_feed_thr_cv;
775static uint8_t l2arc_thread_exit;
776
777static void l2arc_read_done(zio_t *zio);
778static void l2arc_hdr_stat_add(void);
779static void l2arc_hdr_stat_remove(void);
780
787};
788
789typedef struct l2arc_data_free {
790 /* protected by l2arc_free_on_write_mtx */
791 void *l2df_data;
792 size_t l2df_size;
793 void (*l2df_func)(void *, size_t);
794 list_node_t l2df_list_node;
795} l2arc_data_free_t;
796
797static kmutex_t l2arc_feed_thr_lock;
798static kcondvar_t l2arc_feed_thr_cv;
799static uint8_t l2arc_thread_exit;
800
801static void l2arc_read_done(zio_t *zio);
802static void l2arc_hdr_stat_add(void);
803static void l2arc_hdr_stat_remove(void);
804
805static boolean_t l2arc_compress_buf(l2arc_buf_hdr_t *l2hdr);
806static void l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr,
807 enum zio_compress c);
808static void l2arc_release_cdata_buf(arc_buf_hdr_t *ab);
809
781static uint64_t
782buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
783{
784 uint8_t *vdva = (uint8_t *)dva;
785 uint64_t crc = -1ULL;
786 int i;
787
788 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);

--- 902 unchanged lines hidden (view full) ---

1691 l2hdr = hdr->b_l2hdr;
1692 }
1693
1694 if (l2hdr != NULL) {
1695 trim_map_free(l2hdr->b_dev->l2ad_vdev, l2hdr->b_daddr,
1696 hdr->b_size, 0);
1697 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
1698 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
810static uint64_t
811buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
812{
813 uint8_t *vdva = (uint8_t *)dva;
814 uint64_t crc = -1ULL;
815 int i;
816
817 ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);

--- 902 unchanged lines hidden (view full) ---

1720 l2hdr = hdr->b_l2hdr;
1721 }
1722
1723 if (l2hdr != NULL) {
1724 trim_map_free(l2hdr->b_dev->l2ad_vdev, l2hdr->b_daddr,
1725 hdr->b_size, 0);
1726 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
1727 ARCSTAT_INCR(arcstat_l2_size, -hdr->b_size);
1728 ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
1699 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
1700 if (hdr->b_state == arc_l2c_only)
1701 l2arc_hdr_stat_remove();
1702 hdr->b_l2hdr = NULL;
1703 }
1704
1705 if (!buflist_held)
1706 mutex_exit(&l2arc_buflist_mtx);

--- 1406 unchanged lines hidden (view full) ---

3113 } else if (*arc_flags & ARC_PREFETCH &&
3114 refcount_count(&hdr->b_refcnt) == 0) {
3115 hdr->b_flags |= ARC_PREFETCH;
3116 }
3117 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
3118 arc_access(hdr, hash_lock);
3119 if (*arc_flags & ARC_L2CACHE)
3120 hdr->b_flags |= ARC_L2CACHE;
1729 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
1730 if (hdr->b_state == arc_l2c_only)
1731 l2arc_hdr_stat_remove();
1732 hdr->b_l2hdr = NULL;
1733 }
1734
1735 if (!buflist_held)
1736 mutex_exit(&l2arc_buflist_mtx);

--- 1406 unchanged lines hidden (view full) ---

3143 } else if (*arc_flags & ARC_PREFETCH &&
3144 refcount_count(&hdr->b_refcnt) == 0) {
3145 hdr->b_flags |= ARC_PREFETCH;
3146 }
3147 DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
3148 arc_access(hdr, hash_lock);
3149 if (*arc_flags & ARC_L2CACHE)
3150 hdr->b_flags |= ARC_L2CACHE;
3151 if (*arc_flags & ARC_L2COMPRESS)
3152 hdr->b_flags |= ARC_L2COMPRESS;
3121 mutex_exit(hash_lock);
3122 ARCSTAT_BUMP(arcstat_hits);
3123 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
3124 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
3125 data, metadata, hits);
3126
3127 if (done)
3128 done(NULL, buf, private);

--- 24 unchanged lines hidden (view full) ---

3153 /* if this is a prefetch, we don't have a reference */
3154 if (*arc_flags & ARC_PREFETCH) {
3155 (void) remove_reference(hdr, hash_lock,
3156 private);
3157 hdr->b_flags |= ARC_PREFETCH;
3158 }
3159 if (*arc_flags & ARC_L2CACHE)
3160 hdr->b_flags |= ARC_L2CACHE;
3153 mutex_exit(hash_lock);
3154 ARCSTAT_BUMP(arcstat_hits);
3155 ARCSTAT_CONDSTAT(!(hdr->b_flags & ARC_PREFETCH),
3156 demand, prefetch, hdr->b_type != ARC_BUFC_METADATA,
3157 data, metadata, hits);
3158
3159 if (done)
3160 done(NULL, buf, private);

--- 24 unchanged lines hidden (view full) ---

3185 /* if this is a prefetch, we don't have a reference */
3186 if (*arc_flags & ARC_PREFETCH) {
3187 (void) remove_reference(hdr, hash_lock,
3188 private);
3189 hdr->b_flags |= ARC_PREFETCH;
3190 }
3191 if (*arc_flags & ARC_L2CACHE)
3192 hdr->b_flags |= ARC_L2CACHE;
3193 if (*arc_flags & ARC_L2COMPRESS)
3194 hdr->b_flags |= ARC_L2COMPRESS;
3161 if (BP_GET_LEVEL(bp) > 0)
3162 hdr->b_flags |= ARC_INDIRECT;
3163 } else {
3164 /* this block is in the ghost cache */
3165 ASSERT(GHOST_STATE(hdr->b_state));
3166 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3167 ASSERT0(refcount_count(&hdr->b_refcnt));
3168 ASSERT(hdr->b_buf == NULL);
3169
3170 /* if this is a prefetch, we don't have a reference */
3171 if (*arc_flags & ARC_PREFETCH)
3172 hdr->b_flags |= ARC_PREFETCH;
3173 else
3174 add_reference(hdr, hash_lock, private);
3175 if (*arc_flags & ARC_L2CACHE)
3176 hdr->b_flags |= ARC_L2CACHE;
3195 if (BP_GET_LEVEL(bp) > 0)
3196 hdr->b_flags |= ARC_INDIRECT;
3197 } else {
3198 /* this block is in the ghost cache */
3199 ASSERT(GHOST_STATE(hdr->b_state));
3200 ASSERT(!HDR_IO_IN_PROGRESS(hdr));
3201 ASSERT0(refcount_count(&hdr->b_refcnt));
3202 ASSERT(hdr->b_buf == NULL);
3203
3204 /* if this is a prefetch, we don't have a reference */
3205 if (*arc_flags & ARC_PREFETCH)
3206 hdr->b_flags |= ARC_PREFETCH;
3207 else
3208 add_reference(hdr, hash_lock, private);
3209 if (*arc_flags & ARC_L2CACHE)
3210 hdr->b_flags |= ARC_L2CACHE;
3211 if (*arc_flags & ARC_L2COMPRESS)
3212 hdr->b_flags |= ARC_L2COMPRESS;
3177 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
3178 buf->b_hdr = hdr;
3179 buf->b_data = NULL;
3180 buf->b_efunc = NULL;
3181 buf->b_private = NULL;
3182 buf->b_next = NULL;
3183 hdr->b_buf = buf;
3184 ASSERT(hdr->b_datacnt == 0);

--- 57 unchanged lines hidden (view full) ---

3242
3243 cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
3244 KM_SLEEP);
3245 cb->l2rcb_buf = buf;
3246 cb->l2rcb_spa = spa;
3247 cb->l2rcb_bp = *bp;
3248 cb->l2rcb_zb = *zb;
3249 cb->l2rcb_flags = zio_flags;
3213 buf = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
3214 buf->b_hdr = hdr;
3215 buf->b_data = NULL;
3216 buf->b_efunc = NULL;
3217 buf->b_private = NULL;
3218 buf->b_next = NULL;
3219 hdr->b_buf = buf;
3220 ASSERT(hdr->b_datacnt == 0);

--- 57 unchanged lines hidden (view full) ---

3278
3279 cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
3280 KM_SLEEP);
3281 cb->l2rcb_buf = buf;
3282 cb->l2rcb_spa = spa;
3283 cb->l2rcb_bp = *bp;
3284 cb->l2rcb_zb = *zb;
3285 cb->l2rcb_flags = zio_flags;
3286 cb->l2rcb_compress = hdr->b_l2hdr->b_compress;
3250
3251 ASSERT(addr >= VDEV_LABEL_START_SIZE &&
3252 addr + size < vd->vdev_psize -
3253 VDEV_LABEL_END_SIZE);
3254
3255 /*
3256 * l2arc read. The SCL_L2ARC lock will be
3257 * released by l2arc_read_done().
3287
3288 ASSERT(addr >= VDEV_LABEL_START_SIZE &&
3289 addr + size < vd->vdev_psize -
3290 VDEV_LABEL_END_SIZE);
3291
3292 /*
3293 * l2arc read. The SCL_L2ARC lock will be
3294 * released by l2arc_read_done().
3295 * Issue a null zio if the underlying buffer
3296 * was squashed to zero size by compression.
3258 */
3297 */
3259 rzio = zio_read_phys(pio, vd, addr, size,
3260 buf->b_data, ZIO_CHECKSUM_OFF,
3261 l2arc_read_done, cb, priority, zio_flags |
3262 ZIO_FLAG_DONT_CACHE | ZIO_FLAG_CANFAIL |
3263 ZIO_FLAG_DONT_PROPAGATE |
3264 ZIO_FLAG_DONT_RETRY, B_FALSE);
3298 if (hdr->b_l2hdr->b_compress ==
3299 ZIO_COMPRESS_EMPTY) {
3300 rzio = zio_null(pio, spa, vd,
3301 l2arc_read_done, cb,
3302 zio_flags | ZIO_FLAG_DONT_CACHE |
3303 ZIO_FLAG_CANFAIL |
3304 ZIO_FLAG_DONT_PROPAGATE |
3305 ZIO_FLAG_DONT_RETRY);
3306 } else {
3307 rzio = zio_read_phys(pio, vd, addr,
3308 hdr->b_l2hdr->b_asize,
3309 buf->b_data, ZIO_CHECKSUM_OFF,
3310 l2arc_read_done, cb, priority,
3311 zio_flags | ZIO_FLAG_DONT_CACHE |
3312 ZIO_FLAG_CANFAIL |
3313 ZIO_FLAG_DONT_PROPAGATE |
3314 ZIO_FLAG_DONT_RETRY, B_FALSE);
3315 }
3265 DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
3266 zio_t *, rzio);
3316 DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
3317 zio_t *, rzio);
3267 ARCSTAT_INCR(arcstat_l2_read_bytes, size);
3318 ARCSTAT_INCR(arcstat_l2_read_bytes,
3319 hdr->b_l2hdr->b_asize);
3268
3269 if (*arc_flags & ARC_NOWAIT) {
3270 zio_nowait(rzio);
3271 return (0);
3272 }
3273
3274 ASSERT(*arc_flags & ARC_WAIT);
3275 if (zio_wait(rzio) == 0)

--- 250 unchanged lines hidden (view full) ---

3526
3527 buf_discard_identity(hdr);
3528 arc_buf_thaw(buf);
3529 }
3530 buf->b_efunc = NULL;
3531 buf->b_private = NULL;
3532
3533 if (l2hdr) {
3320
3321 if (*arc_flags & ARC_NOWAIT) {
3322 zio_nowait(rzio);
3323 return (0);
3324 }
3325
3326 ASSERT(*arc_flags & ARC_WAIT);
3327 if (zio_wait(rzio) == 0)

--- 250 unchanged lines hidden (view full) ---

3578
3579 buf_discard_identity(hdr);
3580 arc_buf_thaw(buf);
3581 }
3582 buf->b_efunc = NULL;
3583 buf->b_private = NULL;
3584
3585 if (l2hdr) {
3586 ARCSTAT_INCR(arcstat_l2_asize, -l2hdr->b_asize);
3534 trim_map_free(l2hdr->b_dev->l2ad_vdev, l2hdr->b_daddr,
3535 hdr->b_size, 0);
3536 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
3537 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
3538 ARCSTAT_INCR(arcstat_l2_size, -buf_size);
3539 mutex_exit(&l2arc_buflist_mtx);
3540 }
3541}

--- 135 unchanged lines hidden (view full) ---

3677 ASSERT(!refcount_is_zero(&hdr->b_refcnt));
3678 callback->awcb_done(zio, buf, callback->awcb_private);
3679
3680 kmem_free(callback, sizeof (arc_write_callback_t));
3681}
3682
3683zio_t *
3684arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
3587 trim_map_free(l2hdr->b_dev->l2ad_vdev, l2hdr->b_daddr,
3588 hdr->b_size, 0);
3589 list_remove(l2hdr->b_dev->l2ad_buflist, hdr);
3590 kmem_free(l2hdr, sizeof (l2arc_buf_hdr_t));
3591 ARCSTAT_INCR(arcstat_l2_size, -buf_size);
3592 mutex_exit(&l2arc_buflist_mtx);
3593 }
3594}

--- 135 unchanged lines hidden (view full) ---

3730 ASSERT(!refcount_is_zero(&hdr->b_refcnt));
3731 callback->awcb_done(zio, buf, callback->awcb_private);
3732
3733 kmem_free(callback, sizeof (arc_write_callback_t));
3734}
3735
3736zio_t *
3737arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
3685 blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, const zio_prop_t *zp,
3686 arc_done_func_t *ready, arc_done_func_t *done, void *private,
3687 int priority, int zio_flags, const zbookmark_t *zb)
3738 blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc, boolean_t l2arc_compress,
3739 const zio_prop_t *zp, arc_done_func_t *ready, arc_done_func_t *done,
3740 void *private, int priority, int zio_flags, const zbookmark_t *zb)
3688{
3689 arc_buf_hdr_t *hdr = buf->b_hdr;
3690 arc_write_callback_t *callback;
3691 zio_t *zio;
3692
3693 ASSERT(ready != NULL);
3694 ASSERT(done != NULL);
3695 ASSERT(!HDR_IO_ERROR(hdr));
3696 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
3697 ASSERT(hdr->b_acb == NULL);
3698 if (l2arc)
3699 hdr->b_flags |= ARC_L2CACHE;
3741{
3742 arc_buf_hdr_t *hdr = buf->b_hdr;
3743 arc_write_callback_t *callback;
3744 zio_t *zio;
3745
3746 ASSERT(ready != NULL);
3747 ASSERT(done != NULL);
3748 ASSERT(!HDR_IO_ERROR(hdr));
3749 ASSERT((hdr->b_flags & ARC_IO_IN_PROGRESS) == 0);
3750 ASSERT(hdr->b_acb == NULL);
3751 if (l2arc)
3752 hdr->b_flags |= ARC_L2CACHE;
3753 if (l2arc_compress)
3754 hdr->b_flags |= ARC_L2COMPRESS;
3700 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
3701 callback->awcb_ready = ready;
3702 callback->awcb_done = done;
3703 callback->awcb_private = private;
3704 callback->awcb_buf = buf;
3705
3706 zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp,
3707 arc_write_ready, arc_write_done, callback, priority, zio_flags, zb);

--- 434 unchanged lines hidden (view full) ---

4142 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from
4143 * the ARC behave as usual, freeing buffers and placing headers on ghost
4144 * lists. The ARC does not send buffers to the L2ARC during eviction as
4145 * this would add inflated write latencies for all ARC memory pressure.
4146 *
4147 * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
4148 * It does this by periodically scanning buffers from the eviction-end of
4149 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
3755 callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
3756 callback->awcb_ready = ready;
3757 callback->awcb_done = done;
3758 callback->awcb_private = private;
3759 callback->awcb_buf = buf;
3760
3761 zio = zio_write(pio, spa, txg, bp, buf->b_data, hdr->b_size, zp,
3762 arc_write_ready, arc_write_done, callback, priority, zio_flags, zb);

--- 434 unchanged lines hidden (view full) ---

4197 * 1. There is no eviction path from the ARC to the L2ARC. Evictions from
4198 * the ARC behave as usual, freeing buffers and placing headers on ghost
4199 * lists. The ARC does not send buffers to the L2ARC during eviction as
4200 * this would add inflated write latencies for all ARC memory pressure.
4201 *
4202 * 2. The L2ARC attempts to cache data from the ARC before it is evicted.
4203 * It does this by periodically scanning buffers from the eviction-end of
4204 * the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
4150 * not already there. It scans until a headroom of buffers is satisfied,
4151 * which itself is a buffer for ARC eviction. The thread that does this is
4205 * not already there. It scans until a headroom of buffers is satisfied,
4206 * which itself is a buffer for ARC eviction. If a compressible buffer is
4207 * found during scanning and selected for writing to an L2ARC device, we
4208 * temporarily boost scanning headroom during the next scan cycle to make
4209 * sure we adapt to compression effects (which might significantly reduce
4210 * the data volume we write to L2ARC). The thread that does this is
4152 * l2arc_feed_thread(), illustrated below; example sizes are included to
4153 * provide a better sense of ratio than this diagram:
4154 *
4155 * head --> tail
4156 * +---------------------+----------+
4157 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
4158 * +---------------------+----------+ | o L2ARC eligible
4159 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer

--- 48 unchanged lines hidden (view full) ---

4208 *
4209 * The performance of the L2ARC can be tweaked by a number of tunables, which
4210 * may be necessary for different workloads:
4211 *
4212 * l2arc_write_max max write bytes per interval
4213 * l2arc_write_boost extra write bytes during device warmup
4214 * l2arc_noprefetch skip caching prefetched buffers
4215 * l2arc_headroom number of max device writes to precache
4211 * l2arc_feed_thread(), illustrated below; example sizes are included to
4212 * provide a better sense of ratio than this diagram:
4213 *
4214 * head --> tail
4215 * +---------------------+----------+
4216 * ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
4217 * +---------------------+----------+ | o L2ARC eligible
4218 * ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer

--- 48 unchanged lines hidden (view full) ---

4267 *
4268 * The performance of the L2ARC can be tweaked by a number of tunables, which
4269 * may be necessary for different workloads:
4270 *
4271 * l2arc_write_max max write bytes per interval
4272 * l2arc_write_boost extra write bytes during device warmup
4273 * l2arc_noprefetch skip caching prefetched buffers
4274 * l2arc_headroom number of max device writes to precache
4275 * l2arc_headroom_boost when we find compressed buffers during ARC
4276 * scanning, we multiply headroom by this
4277 * percentage factor for the next scan cycle,
4278 * since more compressed buffers are likely to
4279 * be present
4216 * l2arc_feed_secs seconds between L2ARC writing
4217 *
4218 * Tunables may be removed or added as future performance improvements are
4219 * integrated, and also may become zpool properties.
4220 *
4221 * There are three key functions that control how the L2ARC warms up:
4222 *
4223 * l2arc_write_eligible() check if a buffer is eligible to cache

--- 30 unchanged lines hidden (view full) ---

4254 ARCSTAT_BUMP(arcstat_l2_write_not_cacheable);
4255 return (B_FALSE);
4256 }
4257
4258 return (B_TRUE);
4259}
4260
4261static uint64_t
4280 * l2arc_feed_secs seconds between L2ARC writing
4281 *
4282 * Tunables may be removed or added as future performance improvements are
4283 * integrated, and also may become zpool properties.
4284 *
4285 * There are three key functions that control how the L2ARC warms up:
4286 *
4287 * l2arc_write_eligible() check if a buffer is eligible to cache

--- 30 unchanged lines hidden (view full) ---

4318 ARCSTAT_BUMP(arcstat_l2_write_not_cacheable);
4319 return (B_FALSE);
4320 }
4321
4322 return (B_TRUE);
4323}
4324
4325static uint64_t
4262l2arc_write_size(l2arc_dev_t *dev)
4326l2arc_write_size(void)
4263{
4264 uint64_t size;
4265
4327{
4328 uint64_t size;
4329
4266 size = dev->l2ad_write;
4330 /*
4331 * Make sure our globals have meaningful values in case the user
4332 * altered them.
4333 */
4334 size = l2arc_write_max;
4335 if (size == 0) {
4336 cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must "
4337 "be greater than zero, resetting it to the default (%d)",
4338 L2ARC_WRITE_SIZE);
4339 size = l2arc_write_max = L2ARC_WRITE_SIZE;
4340 }
4267
4268 if (arc_warm == B_FALSE)
4341
4342 if (arc_warm == B_FALSE)
4269 size += dev->l2ad_boost;
4343 size += l2arc_write_boost;
4270
4271 return (size);
4272
4273}
4274
4275static clock_t
4276l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
4277{

--- 157 unchanged lines hidden (view full) ---

4435 * This buffer misses out. It may be in a stage
4436 * of eviction. Its ARC_L2_WRITING flag will be
4437 * left set, denying reads to this buffer.
4438 */
4439 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss);
4440 continue;
4441 }
4442
4344
4345 return (size);
4346
4347}
4348
4349static clock_t
4350l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
4351{

--- 157 unchanged lines hidden (view full) ---

4509 * This buffer misses out. It may be in a stage
4510 * of eviction. Its ARC_L2_WRITING flag will be
4511 * left set, denying reads to this buffer.
4512 */
4513 ARCSTAT_BUMP(arcstat_l2_writes_hdr_miss);
4514 continue;
4515 }
4516
4517 abl2 = ab->b_l2hdr;
4518
4519 /*
4520 * Release the temporary compressed buffer as soon as possible.
4521 */
4522 if (abl2->b_compress != ZIO_COMPRESS_OFF)
4523 l2arc_release_cdata_buf(ab);
4524
4443 if (zio->io_error != 0) {
4444 /*
4445 * Error - drop L2ARC entry.
4446 */
4447 list_remove(buflist, ab);
4525 if (zio->io_error != 0) {
4526 /*
4527 * Error - drop L2ARC entry.
4528 */
4529 list_remove(buflist, ab);
4448 abl2 = ab->b_l2hdr;
4530 ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize);
4449 ab->b_l2hdr = NULL;
4450 trim_map_free(abl2->b_dev->l2ad_vdev, abl2->b_daddr,
4451 ab->b_size, 0);
4452 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
4453 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4454 }
4455
4456 /*

--- 38 unchanged lines hidden (view full) ---

4495 ASSERT(buf != NULL);
4496
4497 hash_lock = HDR_LOCK(buf->b_hdr);
4498 mutex_enter(hash_lock);
4499 hdr = buf->b_hdr;
4500 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
4501
4502 /*
4531 ab->b_l2hdr = NULL;
4532 trim_map_free(abl2->b_dev->l2ad_vdev, abl2->b_daddr,
4533 ab->b_size, 0);
4534 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
4535 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4536 }
4537
4538 /*

--- 38 unchanged lines hidden (view full) ---

4577 ASSERT(buf != NULL);
4578
4579 hash_lock = HDR_LOCK(buf->b_hdr);
4580 mutex_enter(hash_lock);
4581 hdr = buf->b_hdr;
4582 ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
4583
4584 /*
4585 * If the buffer was compressed, decompress it first.
4586 */
4587 if (cb->l2rcb_compress != ZIO_COMPRESS_OFF)
4588 l2arc_decompress_zio(zio, hdr, cb->l2rcb_compress);
4589 ASSERT(zio->io_data != NULL);
4590
4591 /*
4503 * Check this survived the L2ARC journey.
4504 */
4505 equal = arc_cksum_equal(buf);
4506 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) {
4507 mutex_exit(hash_lock);
4508 zio->io_private = buf;
4509 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */
4510 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */

--- 179 unchanged lines hidden (view full) ---

4690 ab->b_flags |= ARC_L2_EVICTED;
4691 }
4692
4693 /*
4694 * Tell ARC this no longer exists in L2ARC.
4695 */
4696 if (ab->b_l2hdr != NULL) {
4697 abl2 = ab->b_l2hdr;
4592 * Check this survived the L2ARC journey.
4593 */
4594 equal = arc_cksum_equal(buf);
4595 if (equal && zio->io_error == 0 && !HDR_L2_EVICTED(hdr)) {
4596 mutex_exit(hash_lock);
4597 zio->io_private = buf;
4598 zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */
4599 zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */

--- 179 unchanged lines hidden (view full) ---

4779 ab->b_flags |= ARC_L2_EVICTED;
4780 }
4781
4782 /*
4783 * Tell ARC this no longer exists in L2ARC.
4784 */
4785 if (ab->b_l2hdr != NULL) {
4786 abl2 = ab->b_l2hdr;
4787 ARCSTAT_INCR(arcstat_l2_asize, -abl2->b_asize);
4698 ab->b_l2hdr = NULL;
4699 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
4700 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4701 }
4702 list_remove(buflist, ab);
4703
4704 /*
4705 * This may have been leftover after a

--- 9 unchanged lines hidden (view full) ---

4715 dev->l2ad_evict = taddr;
4716}
4717
4718/*
4719 * Find and write ARC buffers to the L2ARC device.
4720 *
4721 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid
4722 * for reading until they have completed writing.
4788 ab->b_l2hdr = NULL;
4789 kmem_free(abl2, sizeof (l2arc_buf_hdr_t));
4790 ARCSTAT_INCR(arcstat_l2_size, -ab->b_size);
4791 }
4792 list_remove(buflist, ab);
4793
4794 /*
4795 * This may have been leftover after a

--- 9 unchanged lines hidden (view full) ---

4805 dev->l2ad_evict = taddr;
4806}
4807
4808/*
4809 * Find and write ARC buffers to the L2ARC device.
4810 *
4811 * An ARC_L2_WRITING flag is set so that the L2ARC buffers are not valid
4812 * for reading until they have completed writing.
4813 * The headroom_boost is an in-out parameter used to maintain headroom boost
4814 * state between calls to this function.
4815 *
4816 * Returns the number of bytes actually written (which may be smaller than
4817 * the delta by which the device hand has changed due to alignment).
4723 */
4724static uint64_t
4818 */
4819static uint64_t
4725l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
4820l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz,
4821 boolean_t *headroom_boost)
4726{
4727 arc_buf_hdr_t *ab, *ab_prev, *head;
4822{
4823 arc_buf_hdr_t *ab, *ab_prev, *head;
4728 l2arc_buf_hdr_t *hdrl2;
4729 list_t *list;
4824 list_t *list;
4730 uint64_t passed_sz, write_sz, buf_sz, headroom;
4825 uint64_t write_asize, write_psize, write_sz, headroom,
4826 buf_compress_minsz;
4731 void *buf_data;
4827 void *buf_data;
4732 kmutex_t *hash_lock, *list_lock;
4733 boolean_t have_lock, full;
4828 kmutex_t *list_lock;
4829 boolean_t full;
4734 l2arc_write_callback_t *cb;
4735 zio_t *pio, *wzio;
4736 uint64_t guid = spa_load_guid(spa);
4830 l2arc_write_callback_t *cb;
4831 zio_t *pio, *wzio;
4832 uint64_t guid = spa_load_guid(spa);
4833 const boolean_t do_headroom_boost = *headroom_boost;
4737 int try;
4738
4739 ASSERT(dev->l2ad_vdev != NULL);
4740
4834 int try;
4835
4836 ASSERT(dev->l2ad_vdev != NULL);
4837
4838 /* Lower the flag now, we might want to raise it again later. */
4839 *headroom_boost = B_FALSE;
4840
4741 pio = NULL;
4841 pio = NULL;
4742 write_sz = 0;
4842 write_sz = write_asize = write_psize = 0;
4743 full = B_FALSE;
4744 head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
4745 head->b_flags |= ARC_L2_WRITE_HEAD;
4746
4747 ARCSTAT_BUMP(arcstat_l2_write_buffer_iter);
4748 /*
4843 full = B_FALSE;
4844 head = kmem_cache_alloc(hdr_cache, KM_PUSHPAGE);
4845 head->b_flags |= ARC_L2_WRITE_HEAD;
4846
4847 ARCSTAT_BUMP(arcstat_l2_write_buffer_iter);
4848 /*
4849 * We will want to try to compress buffers that are at least 2x the
4850 * device sector size.
4851 */
4852 buf_compress_minsz = 2 << dev->l2ad_vdev->vdev_ashift;
4853
4854 /*
4749 * Copy buffers for L2ARC writing.
4750 */
4751 mutex_enter(&l2arc_buflist_mtx);
4752 for (try = 0; try < 2 * ARC_BUFC_NUMLISTS; try++) {
4855 * Copy buffers for L2ARC writing.
4856 */
4857 mutex_enter(&l2arc_buflist_mtx);
4858 for (try = 0; try < 2 * ARC_BUFC_NUMLISTS; try++) {
4859 uint64_t passed_sz = 0;
4860
4753 list = l2arc_list_locked(try, &list_lock);
4861 list = l2arc_list_locked(try, &list_lock);
4754 passed_sz = 0;
4755 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_iter);
4756
4757 /*
4758 * L2ARC fast warmup.
4759 *
4760 * Until the ARC is warm and starts to evict, read from the
4761 * head of the ARC lists rather than the tail.
4762 */
4862 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_iter);
4863
4864 /*
4865 * L2ARC fast warmup.
4866 *
4867 * Until the ARC is warm and starts to evict, read from the
4868 * head of the ARC lists rather than the tail.
4869 */
4763 headroom = target_sz * l2arc_headroom;
4764 if (arc_warm == B_FALSE)
4765 ab = list_head(list);
4766 else
4767 ab = list_tail(list);
4768 if (ab == NULL)
4769 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_null_iter);
4770
4870 if (arc_warm == B_FALSE)
4871 ab = list_head(list);
4872 else
4873 ab = list_tail(list);
4874 if (ab == NULL)
4875 ARCSTAT_BUMP(arcstat_l2_write_buffer_list_null_iter);
4876
4877 headroom = target_sz * l2arc_headroom;
4878 if (do_headroom_boost)
4879 headroom = (headroom * l2arc_headroom_boost) / 100;
4880
4771 for (; ab; ab = ab_prev) {
4881 for (; ab; ab = ab_prev) {
4882 l2arc_buf_hdr_t *l2hdr;
4883 kmutex_t *hash_lock;
4884 uint64_t buf_sz;
4885
4772 if (arc_warm == B_FALSE)
4773 ab_prev = list_next(list, ab);
4774 else
4775 ab_prev = list_prev(list, ab);
4776 ARCSTAT_INCR(arcstat_l2_write_buffer_bytes_scanned, ab->b_size);
4777
4778 hash_lock = HDR_LOCK(ab);
4886 if (arc_warm == B_FALSE)
4887 ab_prev = list_next(list, ab);
4888 else
4889 ab_prev = list_prev(list, ab);
4890 ARCSTAT_INCR(arcstat_l2_write_buffer_bytes_scanned, ab->b_size);
4891
4892 hash_lock = HDR_LOCK(ab);
4779 have_lock = MUTEX_HELD(hash_lock);
4780 if (!have_lock && !mutex_tryenter(hash_lock)) {
4893 if (!mutex_tryenter(hash_lock)) {
4781 ARCSTAT_BUMP(arcstat_l2_write_trylock_fail);
4782 /*
4783 * Skip this buffer rather than waiting.
4784 */
4785 continue;
4786 }
4787
4788 passed_sz += ab->b_size;

--- 33 unchanged lines hidden (view full) ---

4822 pio = zio_root(spa, l2arc_write_done, cb,
4823 ZIO_FLAG_CANFAIL);
4824 ARCSTAT_BUMP(arcstat_l2_write_pios);
4825 }
4826
4827 /*
4828 * Create and add a new L2ARC header.
4829 */
4894 ARCSTAT_BUMP(arcstat_l2_write_trylock_fail);
4895 /*
4896 * Skip this buffer rather than waiting.
4897 */
4898 continue;
4899 }
4900
4901 passed_sz += ab->b_size;

--- 33 unchanged lines hidden (view full) ---

4935 pio = zio_root(spa, l2arc_write_done, cb,
4936 ZIO_FLAG_CANFAIL);
4937 ARCSTAT_BUMP(arcstat_l2_write_pios);
4938 }
4939
4940 /*
4941 * Create and add a new L2ARC header.
4942 */
4830 hdrl2 = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP);
4831 hdrl2->b_dev = dev;
4832 hdrl2->b_daddr = dev->l2ad_hand;
4833
4943 l2hdr = kmem_zalloc(sizeof (l2arc_buf_hdr_t), KM_SLEEP);
4944 l2hdr->b_dev = dev;
4834 ab->b_flags |= ARC_L2_WRITING;
4945 ab->b_flags |= ARC_L2_WRITING;
4835 ab->b_l2hdr = hdrl2;
4836 list_insert_head(dev->l2ad_buflist, ab);
4837 buf_data = ab->b_buf->b_data;
4946
4947 /*
4948 * Temporarily stash the data buffer in b_tmp_cdata.
4949 * The subsequent write step will pick it up from
4950 * there. This is because can't access ab->b_buf
4951 * without holding the hash_lock, which we in turn
4952 * can't access without holding the ARC list locks
4953 * (which we want to avoid during compression/writing).
4954 */
4955 l2hdr->b_compress = ZIO_COMPRESS_OFF;
4956 l2hdr->b_asize = ab->b_size;
4957 l2hdr->b_tmp_cdata = ab->b_buf->b_data;
4958
4838 buf_sz = ab->b_size;
4959 buf_sz = ab->b_size;
4960 ab->b_l2hdr = l2hdr;
4839
4961
4962 list_insert_head(dev->l2ad_buflist, ab);
4963
4840 /*
4841 * Compute and store the buffer cksum before
4842 * writing. On debug the cksum is verified first.
4843 */
4844 arc_cksum_verify(ab->b_buf);
4845 arc_cksum_compute(ab->b_buf, B_TRUE);
4846
4847 mutex_exit(hash_lock);
4848
4964 /*
4965 * Compute and store the buffer cksum before
4966 * writing. On debug the cksum is verified first.
4967 */
4968 arc_cksum_verify(ab->b_buf);
4969 arc_cksum_compute(ab->b_buf, B_TRUE);
4970
4971 mutex_exit(hash_lock);
4972
4973 write_sz += buf_sz;
4974 }
4975
4976 mutex_exit(list_lock);
4977
4978 if (full == B_TRUE)
4979 break;
4980 }
4981
4982 /* No buffers selected for writing? */
4983 if (pio == NULL) {
4984 ASSERT0(write_sz);
4985 mutex_exit(&l2arc_buflist_mtx);
4986 kmem_cache_free(hdr_cache, head);
4987 return (0);
4988 }
4989
4990 /*
4991 * Now start writing the buffers. We're starting at the write head
4992 * and work backwards, retracing the course of the buffer selector
4993 * loop above.
4994 */
4995 for (ab = list_prev(dev->l2ad_buflist, head); ab;
4996 ab = list_prev(dev->l2ad_buflist, ab)) {
4997 l2arc_buf_hdr_t *l2hdr;
4998 uint64_t buf_sz;
4999
5000 /*
5001 * We shouldn't need to lock the buffer here, since we flagged
5002 * it as ARC_L2_WRITING in the previous step, but we must take
5003 * care to only access its L2 cache parameters. In particular,
5004 * ab->b_buf may be invalid by now due to ARC eviction.
5005 */
5006 l2hdr = ab->b_l2hdr;
5007 l2hdr->b_daddr = dev->l2ad_hand;
5008
5009 if ((ab->b_flags & ARC_L2COMPRESS) &&
5010 l2hdr->b_asize >= buf_compress_minsz) {
5011 if (l2arc_compress_buf(l2hdr)) {
5012 /*
5013 * If compression succeeded, enable headroom
5014 * boost on the next scan cycle.
5015 */
5016 *headroom_boost = B_TRUE;
5017 }
5018 }
5019
5020 /*
5021 * Pick up the buffer data we had previously stashed away
5022 * (and now potentially also compressed).
5023 */
5024 buf_data = l2hdr->b_tmp_cdata;
5025 buf_sz = l2hdr->b_asize;
5026
5027 /* Compression may have squashed the buffer to zero length. */
5028 if (buf_sz != 0) {
5029 uint64_t buf_p_sz;
5030
4849 wzio = zio_write_phys(pio, dev->l2ad_vdev,
4850 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF,
4851 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE,
4852 ZIO_FLAG_CANFAIL, B_FALSE);
4853
4854 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
4855 zio_t *, wzio);
4856 (void) zio_nowait(wzio);
4857
5031 wzio = zio_write_phys(pio, dev->l2ad_vdev,
5032 dev->l2ad_hand, buf_sz, buf_data, ZIO_CHECKSUM_OFF,
5033 NULL, NULL, ZIO_PRIORITY_ASYNC_WRITE,
5034 ZIO_FLAG_CANFAIL, B_FALSE);
5035
5036 DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
5037 zio_t *, wzio);
5038 (void) zio_nowait(wzio);
5039
5040 write_asize += buf_sz;
4858 /*
4859 * Keep the clock hand suitably device-aligned.
4860 */
5041 /*
5042 * Keep the clock hand suitably device-aligned.
5043 */
4861 buf_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz);
4862
4863 write_sz += buf_sz;
4864 dev->l2ad_hand += buf_sz;
5044 buf_p_sz = vdev_psize_to_asize(dev->l2ad_vdev, buf_sz);
5045 write_psize += buf_p_sz;
5046 dev->l2ad_hand += buf_p_sz;
4865 }
5047 }
4866
4867 mutex_exit(list_lock);
4868
4869 if (full == B_TRUE)
4870 break;
4871 }
5048 }
5049
4872 mutex_exit(&l2arc_buflist_mtx);
4873
5050 mutex_exit(&l2arc_buflist_mtx);
5051
4874 if (pio == NULL) {
4875 ASSERT0(write_sz);
4876 kmem_cache_free(hdr_cache, head);
4877 return (0);
4878 }
4879
4880 ASSERT3U(write_sz, <=, target_sz);
5052 ASSERT3U(write_asize, <=, target_sz);
4881 ARCSTAT_BUMP(arcstat_l2_writes_sent);
5053 ARCSTAT_BUMP(arcstat_l2_writes_sent);
4882 ARCSTAT_INCR(arcstat_l2_write_bytes, write_sz);
5054 ARCSTAT_INCR(arcstat_l2_write_bytes, write_asize);
4883 ARCSTAT_INCR(arcstat_l2_size, write_sz);
5055 ARCSTAT_INCR(arcstat_l2_size, write_sz);
4884 vdev_space_update(dev->l2ad_vdev, write_sz, 0, 0);
5056 ARCSTAT_INCR(arcstat_l2_asize, write_asize);
5057 vdev_space_update(dev->l2ad_vdev, write_psize, 0, 0);
4885
4886 /*
4887 * Bump device hand to the device start if it is approaching the end.
4888 * l2arc_evict() will already have evicted ahead for this case.
4889 */
4890 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) {
4891 vdev_space_update(dev->l2ad_vdev,
4892 dev->l2ad_end - dev->l2ad_hand, 0, 0);
4893 dev->l2ad_hand = dev->l2ad_start;
4894 dev->l2ad_evict = dev->l2ad_start;
4895 dev->l2ad_first = B_FALSE;
4896 }
4897
4898 dev->l2ad_writing = B_TRUE;
4899 (void) zio_wait(pio);
4900 dev->l2ad_writing = B_FALSE;
4901
5058
5059 /*
5060 * Bump device hand to the device start if it is approaching the end.
5061 * l2arc_evict() will already have evicted ahead for this case.
5062 */
5063 if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) {
5064 vdev_space_update(dev->l2ad_vdev,
5065 dev->l2ad_end - dev->l2ad_hand, 0, 0);
5066 dev->l2ad_hand = dev->l2ad_start;
5067 dev->l2ad_evict = dev->l2ad_start;
5068 dev->l2ad_first = B_FALSE;
5069 }
5070
5071 dev->l2ad_writing = B_TRUE;
5072 (void) zio_wait(pio);
5073 dev->l2ad_writing = B_FALSE;
5074
4902 return (write_sz);
5075 return (write_asize);
4903}
4904
4905/*
5076}
5077
5078/*
5079 * Compresses an L2ARC buffer.
5080 * The data to be compressed must be prefilled in l2hdr->b_tmp_cdata and its
5081 * size in l2hdr->b_asize. This routine tries to compress the data and
5082 * depending on the compression result there are three possible outcomes:
5083 * *) The buffer was incompressible. The original l2hdr contents were left
5084 * untouched and are ready for writing to an L2 device.
5085 * *) The buffer was all-zeros, so there is no need to write it to an L2
5086 * device. To indicate this situation b_tmp_cdata is NULL'ed, b_asize is
5087 * set to zero and b_compress is set to ZIO_COMPRESS_EMPTY.
5088 * *) Compression succeeded and b_tmp_cdata was replaced with a temporary
5089 * data buffer which holds the compressed data to be written, and b_asize
5090 * tells us how much data there is. b_compress is set to the appropriate
5091 * compression algorithm. Once writing is done, invoke
5092 * l2arc_release_cdata_buf on this l2hdr to free this temporary buffer.
5093 *
5094 * Returns B_TRUE if compression succeeded, or B_FALSE if it didn't (the
5095 * buffer was incompressible).
5096 */
5097static boolean_t
5098l2arc_compress_buf(l2arc_buf_hdr_t *l2hdr)
5099{
5100 void *cdata;
5101 size_t csize, len;
5102
5103 ASSERT(l2hdr->b_compress == ZIO_COMPRESS_OFF);
5104 ASSERT(l2hdr->b_tmp_cdata != NULL);
5105
5106 len = l2hdr->b_asize;
5107 cdata = zio_data_buf_alloc(len);
5108 csize = zio_compress_data(ZIO_COMPRESS_LZ4, l2hdr->b_tmp_cdata,
5109 cdata, l2hdr->b_asize);
5110
5111 if (csize == 0) {
5112 /* zero block, indicate that there's nothing to write */
5113 zio_data_buf_free(cdata, len);
5114 l2hdr->b_compress = ZIO_COMPRESS_EMPTY;
5115 l2hdr->b_asize = 0;
5116 l2hdr->b_tmp_cdata = NULL;
5117 ARCSTAT_BUMP(arcstat_l2_compress_zeros);
5118 return (B_TRUE);
5119 } else if (csize > 0 && csize < len) {
5120 /*
5121 * Compression succeeded, we'll keep the cdata around for
5122 * writing and release it afterwards.
5123 */
5124 l2hdr->b_compress = ZIO_COMPRESS_LZ4;
5125 l2hdr->b_asize = csize;
5126 l2hdr->b_tmp_cdata = cdata;
5127 ARCSTAT_BUMP(arcstat_l2_compress_successes);
5128 return (B_TRUE);
5129 } else {
5130 /*
5131 * Compression failed, release the compressed buffer.
5132 * l2hdr will be left unmodified.
5133 */
5134 zio_data_buf_free(cdata, len);
5135 ARCSTAT_BUMP(arcstat_l2_compress_failures);
5136 return (B_FALSE);
5137 }
5138}
5139
5140/*
5141 * Decompresses a zio read back from an l2arc device. On success, the
5142 * underlying zio's io_data buffer is overwritten by the uncompressed
5143 * version. On decompression error (corrupt compressed stream), the
5144 * zio->io_error value is set to signal an I/O error.
5145 *
5146 * Please note that the compressed data stream is not checksummed, so
5147 * if the underlying device is experiencing data corruption, we may feed
5148 * corrupt data to the decompressor, so the decompressor needs to be
5149 * able to handle this situation (LZ4 does).
5150 */
5151static void
5152l2arc_decompress_zio(zio_t *zio, arc_buf_hdr_t *hdr, enum zio_compress c)
5153{
5154 ASSERT(L2ARC_IS_VALID_COMPRESS(c));
5155
5156 if (zio->io_error != 0) {
5157 /*
5158 * An io error has occured, just restore the original io
5159 * size in preparation for a main pool read.
5160 */
5161 zio->io_orig_size = zio->io_size = hdr->b_size;
5162 return;
5163 }
5164
5165 if (c == ZIO_COMPRESS_EMPTY) {
5166 /*
5167 * An empty buffer results in a null zio, which means we
5168 * need to fill its io_data after we're done restoring the
5169 * buffer's contents.
5170 */
5171 ASSERT(hdr->b_buf != NULL);
5172 bzero(hdr->b_buf->b_data, hdr->b_size);
5173 zio->io_data = zio->io_orig_data = hdr->b_buf->b_data;
5174 } else {
5175 ASSERT(zio->io_data != NULL);
5176 /*
5177 * We copy the compressed data from the start of the arc buffer
5178 * (the zio_read will have pulled in only what we need, the
5179 * rest is garbage which we will overwrite at decompression)
5180 * and then decompress back to the ARC data buffer. This way we
5181 * can minimize copying by simply decompressing back over the
5182 * original compressed data (rather than decompressing to an
5183 * aux buffer and then copying back the uncompressed buffer,
5184 * which is likely to be much larger).
5185 */
5186 uint64_t csize;
5187 void *cdata;
5188
5189 csize = zio->io_size;
5190 cdata = zio_data_buf_alloc(csize);
5191 bcopy(zio->io_data, cdata, csize);
5192 if (zio_decompress_data(c, cdata, zio->io_data, csize,
5193 hdr->b_size) != 0)
5194 zio->io_error = EIO;
5195 zio_data_buf_free(cdata, csize);
5196 }
5197
5198 /* Restore the expected uncompressed IO size. */
5199 zio->io_orig_size = zio->io_size = hdr->b_size;
5200}
5201
5202/*
5203 * Releases the temporary b_tmp_cdata buffer in an l2arc header structure.
5204 * This buffer serves as a temporary holder of compressed data while
5205 * the buffer entry is being written to an l2arc device. Once that is
5206 * done, we can dispose of it.
5207 */
5208static void
5209l2arc_release_cdata_buf(arc_buf_hdr_t *ab)
5210{
5211 l2arc_buf_hdr_t *l2hdr = ab->b_l2hdr;
5212
5213 if (l2hdr->b_compress == ZIO_COMPRESS_LZ4) {
5214 /*
5215 * If the data was compressed, then we've allocated a
5216 * temporary buffer for it, so now we need to release it.
5217 */
5218 ASSERT(l2hdr->b_tmp_cdata != NULL);
5219 zio_data_buf_free(l2hdr->b_tmp_cdata, ab->b_size);
5220 }
5221 l2hdr->b_tmp_cdata = NULL;
5222}
5223
5224/*
4906 * This thread feeds the L2ARC at regular intervals. This is the beating
4907 * heart of the L2ARC.
4908 */
4909static void
4910l2arc_feed_thread(void *dummy __unused)
4911{
4912 callb_cpr_t cpr;
4913 l2arc_dev_t *dev;
4914 spa_t *spa;
4915 uint64_t size, wrote;
4916 clock_t begin, next = ddi_get_lbolt();
5225 * This thread feeds the L2ARC at regular intervals. This is the beating
5226 * heart of the L2ARC.
5227 */
5228static void
5229l2arc_feed_thread(void *dummy __unused)
5230{
5231 callb_cpr_t cpr;
5232 l2arc_dev_t *dev;
5233 spa_t *spa;
5234 uint64_t size, wrote;
5235 clock_t begin, next = ddi_get_lbolt();
5236 boolean_t headroom_boost = B_FALSE;
4917
4918 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
4919
4920 mutex_enter(&l2arc_feed_thr_lock);
4921
4922 while (l2arc_thread_exit == 0) {
4923 CALLB_CPR_SAFE_BEGIN(&cpr);
4924 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock,

--- 44 unchanged lines hidden (view full) ---

4969 if (arc_reclaim_needed()) {
4970 ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
4971 spa_config_exit(spa, SCL_L2ARC, dev);
4972 continue;
4973 }
4974
4975 ARCSTAT_BUMP(arcstat_l2_feeds);
4976
5237
5238 CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
5239
5240 mutex_enter(&l2arc_feed_thr_lock);
5241
5242 while (l2arc_thread_exit == 0) {
5243 CALLB_CPR_SAFE_BEGIN(&cpr);
5244 (void) cv_timedwait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock,

--- 44 unchanged lines hidden (view full) ---

5289 if (arc_reclaim_needed()) {
5290 ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
5291 spa_config_exit(spa, SCL_L2ARC, dev);
5292 continue;
5293 }
5294
5295 ARCSTAT_BUMP(arcstat_l2_feeds);
5296
4977 size = l2arc_write_size(dev);
5297 size = l2arc_write_size();
4978
4979 /*
4980 * Evict L2ARC buffers that will be overwritten.
4981 */
4982 l2arc_evict(dev, size, B_FALSE);
4983
4984 /*
4985 * Write ARC buffers.
4986 */
5298
5299 /*
5300 * Evict L2ARC buffers that will be overwritten.
5301 */
5302 l2arc_evict(dev, size, B_FALSE);
5303
5304 /*
5305 * Write ARC buffers.
5306 */
4987 wrote = l2arc_write_buffers(spa, dev, size);
5307 wrote = l2arc_write_buffers(spa, dev, size, &headroom_boost);
4988
4989 /*
4990 * Calculate interval between writes.
4991 */
4992 next = l2arc_write_interval(begin, size, wrote);
4993 spa_config_exit(spa, SCL_L2ARC, dev);
4994 }
4995

--- 31 unchanged lines hidden (view full) ---

5027 ASSERT(!l2arc_vdev_present(vd));
5028
5029 /*
5030 * Create a new l2arc device entry.
5031 */
5032 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
5033 adddev->l2ad_spa = spa;
5034 adddev->l2ad_vdev = vd;
5308
5309 /*
5310 * Calculate interval between writes.
5311 */
5312 next = l2arc_write_interval(begin, size, wrote);
5313 spa_config_exit(spa, SCL_L2ARC, dev);
5314 }
5315

--- 31 unchanged lines hidden (view full) ---

5347 ASSERT(!l2arc_vdev_present(vd));
5348
5349 /*
5350 * Create a new l2arc device entry.
5351 */
5352 adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
5353 adddev->l2ad_spa = spa;
5354 adddev->l2ad_vdev = vd;
5035 adddev->l2ad_write = l2arc_write_max;
5036 adddev->l2ad_boost = l2arc_write_boost;
5037 adddev->l2ad_start = VDEV_LABEL_START_SIZE;
5038 adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
5039 adddev->l2ad_hand = adddev->l2ad_start;
5040 adddev->l2ad_evict = adddev->l2ad_start;
5041 adddev->l2ad_first = B_TRUE;
5042 adddev->l2ad_writing = B_FALSE;
5355 adddev->l2ad_start = VDEV_LABEL_START_SIZE;
5356 adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
5357 adddev->l2ad_hand = adddev->l2ad_start;
5358 adddev->l2ad_evict = adddev->l2ad_start;
5359 adddev->l2ad_first = B_TRUE;
5360 adddev->l2ad_writing = B_FALSE;
5043 ASSERT3U(adddev->l2ad_write, >, 0);
5044
5045 /*
5046 * This is a list of all ARC buffers that are still valid on the
5047 * device.
5048 */
5049 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP);
5050 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
5051 offsetof(arc_buf_hdr_t, b_l2node));

--- 116 unchanged lines hidden ---
5361
5362 /*
5363 * This is a list of all ARC buffers that are still valid on the
5364 * device.
5365 */
5366 adddev->l2ad_buflist = kmem_zalloc(sizeof (list_t), KM_SLEEP);
5367 list_create(adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
5368 offsetof(arc_buf_hdr_t, b_l2node));

--- 116 unchanged lines hidden ---