trim_map.c revision 277818
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2012 Pawel Jakub Dawidek <pawel@dawidek.net>.
23 * All rights reserved.
24 */
25
26#include <sys/zfs_context.h>
27#include <sys/spa_impl.h>
28#include <sys/vdev_impl.h>
29#include <sys/trim_map.h>
30#include <sys/time.h>
31
32/*
33 * Calculate the zio end, upgrading based on ashift which would be
34 * done by zio_vdev_io_start.
35 *
36 * This makes free range consolidation much more effective
37 * than it would otherwise be as well as ensuring that entire
38 * blocks are invalidated by writes.
39 */
40#define	TRIM_ZIO_END(vd, offset, size)	(offset +		\
41 	P2ROUNDUP(size, 1ULL << vd->vdev_top->vdev_ashift))
42
43/* Maximal segment size for ATA TRIM. */
44#define TRIM_MAP_SIZE_FACTOR	(512 << 16)
45
46#define TRIM_MAP_SEGS(size)	(1 + (size) / TRIM_MAP_SIZE_FACTOR)
47
48#define TRIM_MAP_ADD(tm, ts)	do {				\
49	list_insert_tail(&(tm)->tm_head, (ts));			\
50	(tm)->tm_pending += TRIM_MAP_SEGS((ts)->ts_end - (ts)->ts_start); \
51} while (0)
52
53#define TRIM_MAP_REM(tm, ts)	do {				\
54	list_remove(&(tm)->tm_head, (ts));			\
55	(tm)->tm_pending -= TRIM_MAP_SEGS((ts)->ts_end - (ts)->ts_start); \
56} while (0)
57
58typedef struct trim_map {
59	list_t		tm_head;		/* List of segments sorted by txg. */
60	avl_tree_t	tm_queued_frees;	/* AVL tree of segments waiting for TRIM. */
61	avl_tree_t	tm_inflight_frees;	/* AVL tree of in-flight TRIMs. */
62	avl_tree_t	tm_inflight_writes;	/* AVL tree of in-flight writes. */
63	list_t		tm_pending_writes;	/* Writes blocked on in-flight frees. */
64	kmutex_t	tm_lock;
65	uint64_t	tm_pending;		/* Count of pending TRIMs. */
66} trim_map_t;
67
68typedef struct trim_seg {
69	avl_node_t	ts_node;	/* AVL node. */
70	list_node_t	ts_next;	/* List element. */
71	uint64_t	ts_start;	/* Starting offset of this segment. */
72	uint64_t	ts_end;		/* Ending offset (non-inclusive). */
73	uint64_t	ts_txg;		/* Segment creation txg. */
74	hrtime_t	ts_time;	/* Segment creation time. */
75} trim_seg_t;
76
77extern boolean_t zfs_trim_enabled;
78
79static u_int trim_txg_delay = 32;	/* Keep deleted data up to 32 TXG */
80static u_int trim_timeout = 30;		/* Keep deleted data up to 30s */
81static u_int trim_max_interval = 1;	/* 1s delays between TRIMs */
82static u_int trim_vdev_max_pending = 10000; /* Keep up to 10K segments */
83
84SYSCTL_DECL(_vfs_zfs);
85SYSCTL_NODE(_vfs_zfs, OID_AUTO, trim, CTLFLAG_RD, 0, "ZFS TRIM");
86
87TUNABLE_INT("vfs.zfs.trim.txg_delay", &trim_txg_delay);
88SYSCTL_UINT(_vfs_zfs_trim, OID_AUTO, txg_delay, CTLFLAG_RWTUN, &trim_txg_delay,
89    0, "Delay TRIMs by up to this many TXGs");
90
91TUNABLE_INT("vfs.zfs.trim.timeout", &trim_timeout);
92SYSCTL_UINT(_vfs_zfs_trim, OID_AUTO, timeout, CTLFLAG_RWTUN, &trim_timeout, 0,
93    "Delay TRIMs by up to this many seconds");
94
95TUNABLE_INT("vfs.zfs.trim.max_interval", &trim_max_interval);
96SYSCTL_UINT(_vfs_zfs_trim, OID_AUTO, max_interval, CTLFLAG_RWTUN,
97    &trim_max_interval, 0,
98    "Maximum interval between TRIM queue processing (seconds)");
99
100SYSCTL_DECL(_vfs_zfs_vdev);
101TUNABLE_INT("vfs.zfs.vdev.trim_max_pending", &trim_vdev_max_pending);
102SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, trim_max_pending, CTLFLAG_RWTUN,
103    &trim_vdev_max_pending, 0,
104    "Maximum pending TRIM segments for a vdev");
105
106
107static void trim_map_vdev_commit_done(spa_t *spa, vdev_t *vd);
108
109static int
110trim_map_seg_compare(const void *x1, const void *x2)
111{
112	const trim_seg_t *s1 = x1;
113	const trim_seg_t *s2 = x2;
114
115	if (s1->ts_start < s2->ts_start) {
116		if (s1->ts_end > s2->ts_start)
117			return (0);
118		return (-1);
119	}
120	if (s1->ts_start > s2->ts_start) {
121		if (s1->ts_start < s2->ts_end)
122			return (0);
123		return (1);
124	}
125	return (0);
126}
127
128static int
129trim_map_zio_compare(const void *x1, const void *x2)
130{
131	const zio_t *z1 = x1;
132	const zio_t *z2 = x2;
133
134	if (z1->io_offset < z2->io_offset) {
135		if (z1->io_offset + z1->io_size > z2->io_offset)
136			return (0);
137		return (-1);
138	}
139	if (z1->io_offset > z2->io_offset) {
140		if (z1->io_offset < z2->io_offset + z2->io_size)
141			return (0);
142		return (1);
143	}
144	return (0);
145}
146
147void
148trim_map_create(vdev_t *vd)
149{
150	trim_map_t *tm;
151
152	ASSERT(zfs_trim_enabled && !vd->vdev_notrim &&
153		vd->vdev_ops->vdev_op_leaf);
154
155	tm = kmem_zalloc(sizeof (*tm), KM_SLEEP);
156	mutex_init(&tm->tm_lock, NULL, MUTEX_DEFAULT, NULL);
157	list_create(&tm->tm_head, sizeof (trim_seg_t),
158	    offsetof(trim_seg_t, ts_next));
159	list_create(&tm->tm_pending_writes, sizeof (zio_t),
160	    offsetof(zio_t, io_trim_link));
161	avl_create(&tm->tm_queued_frees, trim_map_seg_compare,
162	    sizeof (trim_seg_t), offsetof(trim_seg_t, ts_node));
163	avl_create(&tm->tm_inflight_frees, trim_map_seg_compare,
164	    sizeof (trim_seg_t), offsetof(trim_seg_t, ts_node));
165	avl_create(&tm->tm_inflight_writes, trim_map_zio_compare,
166	    sizeof (zio_t), offsetof(zio_t, io_trim_node));
167	vd->vdev_trimmap = tm;
168}
169
170void
171trim_map_destroy(vdev_t *vd)
172{
173	trim_map_t *tm;
174	trim_seg_t *ts;
175
176	ASSERT(vd->vdev_ops->vdev_op_leaf);
177
178	if (!zfs_trim_enabled)
179		return;
180
181	tm = vd->vdev_trimmap;
182	if (tm == NULL)
183		return;
184
185	/*
186	 * We may have been called before trim_map_vdev_commit_done()
187	 * had a chance to run, so do it now to prune the remaining
188	 * inflight frees.
189	 */
190	trim_map_vdev_commit_done(vd->vdev_spa, vd);
191
192	mutex_enter(&tm->tm_lock);
193	while ((ts = list_head(&tm->tm_head)) != NULL) {
194		avl_remove(&tm->tm_queued_frees, ts);
195		TRIM_MAP_REM(tm, ts);
196		kmem_free(ts, sizeof (*ts));
197	}
198	mutex_exit(&tm->tm_lock);
199
200	avl_destroy(&tm->tm_queued_frees);
201	avl_destroy(&tm->tm_inflight_frees);
202	avl_destroy(&tm->tm_inflight_writes);
203	list_destroy(&tm->tm_pending_writes);
204	list_destroy(&tm->tm_head);
205	mutex_destroy(&tm->tm_lock);
206	kmem_free(tm, sizeof (*tm));
207	vd->vdev_trimmap = NULL;
208}
209
210static void
211trim_map_segment_add(trim_map_t *tm, uint64_t start, uint64_t end, uint64_t txg)
212{
213	avl_index_t where;
214	trim_seg_t tsearch, *ts_before, *ts_after, *ts;
215	boolean_t merge_before, merge_after;
216	hrtime_t time;
217
218	ASSERT(MUTEX_HELD(&tm->tm_lock));
219	VERIFY(start < end);
220
221	time = gethrtime();
222	tsearch.ts_start = start;
223	tsearch.ts_end = end;
224
225	ts = avl_find(&tm->tm_queued_frees, &tsearch, &where);
226	if (ts != NULL) {
227		if (start < ts->ts_start)
228			trim_map_segment_add(tm, start, ts->ts_start, txg);
229		if (end > ts->ts_end)
230			trim_map_segment_add(tm, ts->ts_end, end, txg);
231		return;
232	}
233
234	ts_before = avl_nearest(&tm->tm_queued_frees, where, AVL_BEFORE);
235	ts_after = avl_nearest(&tm->tm_queued_frees, where, AVL_AFTER);
236
237	merge_before = (ts_before != NULL && ts_before->ts_end == start);
238	merge_after = (ts_after != NULL && ts_after->ts_start == end);
239
240	if (merge_before && merge_after) {
241		avl_remove(&tm->tm_queued_frees, ts_before);
242		TRIM_MAP_REM(tm, ts_before);
243		TRIM_MAP_REM(tm, ts_after);
244		ts_after->ts_start = ts_before->ts_start;
245		ts_after->ts_txg = txg;
246		ts_after->ts_time = time;
247		TRIM_MAP_ADD(tm, ts_after);
248		kmem_free(ts_before, sizeof (*ts_before));
249	} else if (merge_before) {
250		TRIM_MAP_REM(tm, ts_before);
251		ts_before->ts_end = end;
252		ts_before->ts_txg = txg;
253		ts_before->ts_time = time;
254		TRIM_MAP_ADD(tm, ts_before);
255	} else if (merge_after) {
256		TRIM_MAP_REM(tm, ts_after);
257		ts_after->ts_start = start;
258		ts_after->ts_txg = txg;
259		ts_after->ts_time = time;
260		TRIM_MAP_ADD(tm, ts_after);
261	} else {
262		ts = kmem_alloc(sizeof (*ts), KM_SLEEP);
263		ts->ts_start = start;
264		ts->ts_end = end;
265		ts->ts_txg = txg;
266		ts->ts_time = time;
267		avl_insert(&tm->tm_queued_frees, ts, where);
268		TRIM_MAP_ADD(tm, ts);
269	}
270}
271
272static void
273trim_map_segment_remove(trim_map_t *tm, trim_seg_t *ts, uint64_t start,
274    uint64_t end)
275{
276	trim_seg_t *nts;
277	boolean_t left_over, right_over;
278
279	ASSERT(MUTEX_HELD(&tm->tm_lock));
280
281	left_over = (ts->ts_start < start);
282	right_over = (ts->ts_end > end);
283
284	TRIM_MAP_REM(tm, ts);
285	if (left_over && right_over) {
286		nts = kmem_alloc(sizeof (*nts), KM_SLEEP);
287		nts->ts_start = end;
288		nts->ts_end = ts->ts_end;
289		nts->ts_txg = ts->ts_txg;
290		nts->ts_time = ts->ts_time;
291		ts->ts_end = start;
292		avl_insert_here(&tm->tm_queued_frees, nts, ts, AVL_AFTER);
293		TRIM_MAP_ADD(tm, ts);
294		TRIM_MAP_ADD(tm, nts);
295	} else if (left_over) {
296		ts->ts_end = start;
297		TRIM_MAP_ADD(tm, ts);
298	} else if (right_over) {
299		ts->ts_start = end;
300		TRIM_MAP_ADD(tm, ts);
301	} else {
302		avl_remove(&tm->tm_queued_frees, ts);
303		kmem_free(ts, sizeof (*ts));
304	}
305}
306
307static void
308trim_map_free_locked(trim_map_t *tm, uint64_t start, uint64_t end, uint64_t txg)
309{
310	zio_t zsearch, *zs;
311
312	ASSERT(MUTEX_HELD(&tm->tm_lock));
313
314	zsearch.io_offset = start;
315	zsearch.io_size = end - start;
316
317	zs = avl_find(&tm->tm_inflight_writes, &zsearch, NULL);
318	if (zs == NULL) {
319		trim_map_segment_add(tm, start, end, txg);
320		return;
321	}
322	if (start < zs->io_offset)
323		trim_map_free_locked(tm, start, zs->io_offset, txg);
324	if (zs->io_offset + zs->io_size < end)
325		trim_map_free_locked(tm, zs->io_offset + zs->io_size, end, txg);
326}
327
328void
329trim_map_free(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg)
330{
331	trim_map_t *tm = vd->vdev_trimmap;
332
333	if (!zfs_trim_enabled || vd->vdev_notrim || tm == NULL)
334		return;
335
336	mutex_enter(&tm->tm_lock);
337	trim_map_free_locked(tm, offset, TRIM_ZIO_END(vd, offset, size), txg);
338	mutex_exit(&tm->tm_lock);
339}
340
341boolean_t
342trim_map_write_start(zio_t *zio)
343{
344	vdev_t *vd = zio->io_vd;
345	trim_map_t *tm = vd->vdev_trimmap;
346	trim_seg_t tsearch, *ts;
347	boolean_t left_over, right_over;
348	uint64_t start, end;
349
350	if (!zfs_trim_enabled || vd->vdev_notrim || tm == NULL)
351		return (B_TRUE);
352
353	start = zio->io_offset;
354	end = TRIM_ZIO_END(zio->io_vd, start, zio->io_size);
355	tsearch.ts_start = start;
356	tsearch.ts_end = end;
357
358	mutex_enter(&tm->tm_lock);
359
360	/*
361	 * Checking for colliding in-flight frees.
362	 */
363	ts = avl_find(&tm->tm_inflight_frees, &tsearch, NULL);
364	if (ts != NULL) {
365		list_insert_tail(&tm->tm_pending_writes, zio);
366		mutex_exit(&tm->tm_lock);
367		return (B_FALSE);
368	}
369
370	ts = avl_find(&tm->tm_queued_frees, &tsearch, NULL);
371	if (ts != NULL) {
372		/*
373		 * Loop until all overlapping segments are removed.
374		 */
375		do {
376			trim_map_segment_remove(tm, ts, start, end);
377			ts = avl_find(&tm->tm_queued_frees, &tsearch, NULL);
378		} while (ts != NULL);
379	}
380	avl_add(&tm->tm_inflight_writes, zio);
381
382	mutex_exit(&tm->tm_lock);
383
384	return (B_TRUE);
385}
386
387void
388trim_map_write_done(zio_t *zio)
389{
390	vdev_t *vd = zio->io_vd;
391	trim_map_t *tm = vd->vdev_trimmap;
392
393	/*
394	 * Don't check for vdev_notrim, since the write could have
395	 * started before vdev_notrim was set.
396	 */
397	if (!zfs_trim_enabled || tm == NULL)
398		return;
399
400	mutex_enter(&tm->tm_lock);
401	/*
402	 * Don't fail if the write isn't in the tree, since the write
403	 * could have started after vdev_notrim was set.
404	 */
405	if (zio->io_trim_node.avl_child[0] ||
406	    zio->io_trim_node.avl_child[1] ||
407	    AVL_XPARENT(&zio->io_trim_node) ||
408	    tm->tm_inflight_writes.avl_root == &zio->io_trim_node)
409		avl_remove(&tm->tm_inflight_writes, zio);
410	mutex_exit(&tm->tm_lock);
411}
412
413/*
414 * Return the oldest segment (the one with the lowest txg / time) or NULL if:
415 * 1. The list is empty
416 * 2. The first element's txg is greater than txgsafe
417 * 3. The first element's txg is not greater than the txg argument and the
418 *    the first element's time is not greater than time argument
419 */
420static trim_seg_t *
421trim_map_first(trim_map_t *tm, uint64_t txg, uint64_t txgsafe, hrtime_t time,
422    boolean_t force)
423{
424	trim_seg_t *ts;
425
426	ASSERT(MUTEX_HELD(&tm->tm_lock));
427	VERIFY(txgsafe >= txg);
428
429	ts = list_head(&tm->tm_head);
430	if (ts != NULL && ts->ts_txg <= txgsafe &&
431	    (ts->ts_txg <= txg || ts->ts_time <= time || force))
432		return (ts);
433	return (NULL);
434}
435
436static void
437trim_map_vdev_commit(spa_t *spa, zio_t *zio, vdev_t *vd)
438{
439	trim_map_t *tm = vd->vdev_trimmap;
440	trim_seg_t *ts;
441	uint64_t size, offset, txgtarget, txgsafe;
442	int64_t hard, soft;
443	hrtime_t timelimit;
444
445	ASSERT(vd->vdev_ops->vdev_op_leaf);
446
447	if (tm == NULL)
448		return;
449
450	timelimit = gethrtime() - trim_timeout * NANOSEC;
451	if (vd->vdev_isl2cache) {
452		txgsafe = UINT64_MAX;
453		txgtarget = UINT64_MAX;
454	} else {
455		txgsafe = MIN(spa_last_synced_txg(spa), spa_freeze_txg(spa));
456		if (txgsafe > trim_txg_delay)
457			txgtarget = txgsafe - trim_txg_delay;
458		else
459			txgtarget = 0;
460	}
461
462	mutex_enter(&tm->tm_lock);
463	hard = 0;
464	if (tm->tm_pending > trim_vdev_max_pending)
465		hard = (tm->tm_pending - trim_vdev_max_pending) / 4;
466	soft = P2ROUNDUP(hard + tm->tm_pending / trim_timeout + 1, 64);
467	/* Loop until we have sent all outstanding free's */
468	while (soft > 0 &&
469	    (ts = trim_map_first(tm, txgtarget, txgsafe, timelimit, hard > 0))
470	    != NULL) {
471		TRIM_MAP_REM(tm, ts);
472		avl_remove(&tm->tm_queued_frees, ts);
473		avl_add(&tm->tm_inflight_frees, ts);
474		size = ts->ts_end - ts->ts_start;
475		offset = ts->ts_start;
476		/*
477		 * We drop the lock while we call zio_nowait as the IO
478		 * scheduler can result in a different IO being run e.g.
479		 * a write which would result in a recursive lock.
480		 */
481		mutex_exit(&tm->tm_lock);
482
483		zio_nowait(zio_trim(zio, spa, vd, offset, size));
484
485		soft -= TRIM_MAP_SEGS(size);
486		hard -= TRIM_MAP_SEGS(size);
487		mutex_enter(&tm->tm_lock);
488	}
489	mutex_exit(&tm->tm_lock);
490}
491
492static void
493trim_map_vdev_commit_done(spa_t *spa, vdev_t *vd)
494{
495	trim_map_t *tm = vd->vdev_trimmap;
496	trim_seg_t *ts;
497	list_t pending_writes;
498	zio_t *zio;
499	uint64_t start, size;
500	void *cookie;
501
502	ASSERT(vd->vdev_ops->vdev_op_leaf);
503
504	if (tm == NULL)
505		return;
506
507	mutex_enter(&tm->tm_lock);
508	if (!avl_is_empty(&tm->tm_inflight_frees)) {
509		cookie = NULL;
510		while ((ts = avl_destroy_nodes(&tm->tm_inflight_frees,
511		    &cookie)) != NULL) {
512			kmem_free(ts, sizeof (*ts));
513		}
514	}
515	list_create(&pending_writes, sizeof (zio_t), offsetof(zio_t,
516	    io_trim_link));
517	list_move_tail(&pending_writes, &tm->tm_pending_writes);
518	mutex_exit(&tm->tm_lock);
519
520	while ((zio = list_remove_head(&pending_writes)) != NULL) {
521		zio_vdev_io_reissue(zio);
522		zio_execute(zio);
523	}
524	list_destroy(&pending_writes);
525}
526
527static void
528trim_map_commit(spa_t *spa, zio_t *zio, vdev_t *vd)
529{
530	int c;
531
532	if (vd == NULL)
533		return;
534
535	if (vd->vdev_ops->vdev_op_leaf) {
536		trim_map_vdev_commit(spa, zio, vd);
537	} else {
538		for (c = 0; c < vd->vdev_children; c++)
539			trim_map_commit(spa, zio, vd->vdev_child[c]);
540	}
541}
542
543static void
544trim_map_commit_done(spa_t *spa, vdev_t *vd)
545{
546	int c;
547
548	if (vd == NULL)
549		return;
550
551	if (vd->vdev_ops->vdev_op_leaf) {
552		trim_map_vdev_commit_done(spa, vd);
553	} else {
554		for (c = 0; c < vd->vdev_children; c++)
555			trim_map_commit_done(spa, vd->vdev_child[c]);
556	}
557}
558
559static void
560trim_thread(void *arg)
561{
562	spa_t *spa = arg;
563	zio_t *zio;
564
565#ifdef _KERNEL
566	(void) snprintf(curthread->td_name, sizeof(curthread->td_name),
567	    "trim %s", spa_name(spa));
568#endif
569
570	for (;;) {
571		mutex_enter(&spa->spa_trim_lock);
572		if (spa->spa_trim_thread == NULL) {
573			spa->spa_trim_thread = curthread;
574			cv_signal(&spa->spa_trim_cv);
575			mutex_exit(&spa->spa_trim_lock);
576			thread_exit();
577		}
578
579		(void) cv_timedwait(&spa->spa_trim_cv, &spa->spa_trim_lock,
580		    hz * trim_max_interval);
581		mutex_exit(&spa->spa_trim_lock);
582
583		zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
584
585		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
586		trim_map_commit(spa, zio, spa->spa_root_vdev);
587		(void) zio_wait(zio);
588		trim_map_commit_done(spa, spa->spa_root_vdev);
589		spa_config_exit(spa, SCL_STATE, FTAG);
590	}
591}
592
593void
594trim_thread_create(spa_t *spa)
595{
596
597	if (!zfs_trim_enabled)
598		return;
599
600	mutex_init(&spa->spa_trim_lock, NULL, MUTEX_DEFAULT, NULL);
601	cv_init(&spa->spa_trim_cv, NULL, CV_DEFAULT, NULL);
602	mutex_enter(&spa->spa_trim_lock);
603	spa->spa_trim_thread = thread_create(NULL, 0, trim_thread, spa, 0, &p0,
604	    TS_RUN, minclsyspri);
605	mutex_exit(&spa->spa_trim_lock);
606}
607
608void
609trim_thread_destroy(spa_t *spa)
610{
611
612	if (!zfs_trim_enabled)
613		return;
614	if (spa->spa_trim_thread == NULL)
615		return;
616
617	mutex_enter(&spa->spa_trim_lock);
618	/* Setting spa_trim_thread to NULL tells the thread to stop. */
619	spa->spa_trim_thread = NULL;
620	cv_signal(&spa->spa_trim_cv);
621	/* The thread will set it back to != NULL on exit. */
622	while (spa->spa_trim_thread == NULL)
623		cv_wait(&spa->spa_trim_cv, &spa->spa_trim_lock);
624	spa->spa_trim_thread = NULL;
625	mutex_exit(&spa->spa_trim_lock);
626
627	cv_destroy(&spa->spa_trim_cv);
628	mutex_destroy(&spa->spa_trim_lock);
629}
630
631void
632trim_thread_wakeup(spa_t *spa)
633{
634
635	if (!zfs_trim_enabled)
636		return;
637	if (spa->spa_trim_thread == NULL)
638		return;
639
640	mutex_enter(&spa->spa_trim_lock);
641	cv_signal(&spa->spa_trim_cv);
642	mutex_exit(&spa->spa_trim_lock);
643}
644