trim_map.c revision 277701
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2012 Pawel Jakub Dawidek <pawel@dawidek.net>.
23 * All rights reserved.
24 */
25
26#include <sys/zfs_context.h>
27#include <sys/spa_impl.h>
28#include <sys/vdev_impl.h>
29#include <sys/trim_map.h>
30#include <sys/time.h>
31
32/*
33 * Calculate the zio end, upgrading based on ashift which would be
34 * done by zio_vdev_io_start.
35 *
36 * This makes free range consolidation much more effective
37 * than it would otherwise be as well as ensuring that entire
38 * blocks are invalidated by writes.
39 */
40#define	TRIM_ZIO_END(vd, offset, size)	(offset +		\
41 	P2ROUNDUP(size, 1ULL << vd->vdev_top->vdev_ashift))
42
43#define TRIM_MAP_SINC(tm, size)					\
44	atomic_add_64(&(tm)->tm_bytes, (size))
45
46#define TRIM_MAP_SDEC(tm, size)					\
47	atomic_add_64(&(tm)->tm_bytes, -(size))
48
49#define TRIM_MAP_QINC(tm)					\
50	atomic_inc_64(&(tm)->tm_pending);			\
51
52#define TRIM_MAP_QDEC(tm)					\
53	atomic_dec_64(&(tm)->tm_pending);
54
55typedef struct trim_map {
56	list_t		tm_head;		/* List of segments sorted by txg. */
57	avl_tree_t	tm_queued_frees;	/* AVL tree of segments waiting for TRIM. */
58	avl_tree_t	tm_inflight_frees;	/* AVL tree of in-flight TRIMs. */
59	avl_tree_t	tm_inflight_writes;	/* AVL tree of in-flight writes. */
60	list_t		tm_pending_writes;	/* Writes blocked on in-flight frees. */
61	kmutex_t	tm_lock;
62	uint64_t	tm_pending;		/* Count of pending TRIMs. */
63	uint64_t	tm_bytes;		/* Total size in bytes of queued TRIMs. */
64} trim_map_t;
65
66typedef struct trim_seg {
67	avl_node_t	ts_node;	/* AVL node. */
68	list_node_t	ts_next;	/* List element. */
69	uint64_t	ts_start;	/* Starting offset of this segment. */
70	uint64_t	ts_end;		/* Ending offset (non-inclusive). */
71	uint64_t	ts_txg;		/* Segment creation txg. */
72	hrtime_t	ts_time;	/* Segment creation time. */
73} trim_seg_t;
74
75extern boolean_t zfs_trim_enabled;
76
77static u_int trim_txg_delay = 32;
78static u_int trim_timeout = 30;
79static u_int trim_max_interval = 1;
80/* Limit outstanding TRIMs to 2G (max size for a single TRIM request) */
81static uint64_t trim_vdev_max_bytes = 2147483648;
82/* Limit outstanding TRIMs to 64 (max ranges for a single TRIM request) */
83static u_int trim_vdev_max_pending = 64;
84
85SYSCTL_DECL(_vfs_zfs);
86SYSCTL_NODE(_vfs_zfs, OID_AUTO, trim, CTLFLAG_RD, 0, "ZFS TRIM");
87
88TUNABLE_INT("vfs.zfs.trim.txg_delay", &trim_txg_delay);
89SYSCTL_UINT(_vfs_zfs_trim, OID_AUTO, txg_delay, CTLFLAG_RWTUN, &trim_txg_delay,
90    0, "Delay TRIMs by up to this many TXGs");
91
92TUNABLE_INT("vfs.zfs.trim.timeout", &trim_timeout);
93SYSCTL_UINT(_vfs_zfs_trim, OID_AUTO, timeout, CTLFLAG_RWTUN, &trim_timeout, 0,
94    "Delay TRIMs by up to this many seconds");
95
96TUNABLE_INT("vfs.zfs.trim.max_interval", &trim_max_interval);
97SYSCTL_UINT(_vfs_zfs_trim, OID_AUTO, max_interval, CTLFLAG_RWTUN,
98    &trim_max_interval, 0,
99    "Maximum interval between TRIM queue processing (seconds)");
100
101SYSCTL_DECL(_vfs_zfs_vdev);
102TUNABLE_QUAD("vfs.zfs.vdev.trim_max_bytes", &trim_vdev_max_bytes);
103SYSCTL_QUAD(_vfs_zfs_vdev, OID_AUTO, trim_max_bytes, CTLFLAG_RWTUN,
104    &trim_vdev_max_bytes, 0,
105    "Maximum pending TRIM bytes for a vdev");
106
107TUNABLE_INT("vfs.zfs.vdev.trim_max_pending", &trim_vdev_max_pending);
108SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, trim_max_pending, CTLFLAG_RWTUN,
109    &trim_vdev_max_pending, 0,
110    "Maximum pending TRIM segments for a vdev");
111
112
113static void trim_map_vdev_commit_done(spa_t *spa, vdev_t *vd);
114
115static int
116trim_map_seg_compare(const void *x1, const void *x2)
117{
118	const trim_seg_t *s1 = x1;
119	const trim_seg_t *s2 = x2;
120
121	if (s1->ts_start < s2->ts_start) {
122		if (s1->ts_end > s2->ts_start)
123			return (0);
124		return (-1);
125	}
126	if (s1->ts_start > s2->ts_start) {
127		if (s1->ts_start < s2->ts_end)
128			return (0);
129		return (1);
130	}
131	return (0);
132}
133
134static int
135trim_map_zio_compare(const void *x1, const void *x2)
136{
137	const zio_t *z1 = x1;
138	const zio_t *z2 = x2;
139
140	if (z1->io_offset < z2->io_offset) {
141		if (z1->io_offset + z1->io_size > z2->io_offset)
142			return (0);
143		return (-1);
144	}
145	if (z1->io_offset > z2->io_offset) {
146		if (z1->io_offset < z2->io_offset + z2->io_size)
147			return (0);
148		return (1);
149	}
150	return (0);
151}
152
153void
154trim_map_create(vdev_t *vd)
155{
156	trim_map_t *tm;
157
158	ASSERT(zfs_trim_enabled && !vd->vdev_notrim &&
159		vd->vdev_ops->vdev_op_leaf);
160
161	tm = kmem_zalloc(sizeof (*tm), KM_SLEEP);
162	mutex_init(&tm->tm_lock, NULL, MUTEX_DEFAULT, NULL);
163	list_create(&tm->tm_head, sizeof (trim_seg_t),
164	    offsetof(trim_seg_t, ts_next));
165	list_create(&tm->tm_pending_writes, sizeof (zio_t),
166	    offsetof(zio_t, io_trim_link));
167	avl_create(&tm->tm_queued_frees, trim_map_seg_compare,
168	    sizeof (trim_seg_t), offsetof(trim_seg_t, ts_node));
169	avl_create(&tm->tm_inflight_frees, trim_map_seg_compare,
170	    sizeof (trim_seg_t), offsetof(trim_seg_t, ts_node));
171	avl_create(&tm->tm_inflight_writes, trim_map_zio_compare,
172	    sizeof (zio_t), offsetof(zio_t, io_trim_node));
173	vd->vdev_trimmap = tm;
174}
175
176void
177trim_map_destroy(vdev_t *vd)
178{
179	trim_map_t *tm;
180	trim_seg_t *ts;
181
182	ASSERT(vd->vdev_ops->vdev_op_leaf);
183
184	if (!zfs_trim_enabled)
185		return;
186
187	tm = vd->vdev_trimmap;
188	if (tm == NULL)
189		return;
190
191	/*
192	 * We may have been called before trim_map_vdev_commit_done()
193	 * had a chance to run, so do it now to prune the remaining
194	 * inflight frees.
195	 */
196	trim_map_vdev_commit_done(vd->vdev_spa, vd);
197
198	mutex_enter(&tm->tm_lock);
199	while ((ts = list_head(&tm->tm_head)) != NULL) {
200		avl_remove(&tm->tm_queued_frees, ts);
201		list_remove(&tm->tm_head, ts);
202		kmem_free(ts, sizeof (*ts));
203		TRIM_MAP_SDEC(tm, ts->ts_end - ts->ts_start);
204		TRIM_MAP_QDEC(tm);
205	}
206	mutex_exit(&tm->tm_lock);
207
208	avl_destroy(&tm->tm_queued_frees);
209	avl_destroy(&tm->tm_inflight_frees);
210	avl_destroy(&tm->tm_inflight_writes);
211	list_destroy(&tm->tm_pending_writes);
212	list_destroy(&tm->tm_head);
213	mutex_destroy(&tm->tm_lock);
214	kmem_free(tm, sizeof (*tm));
215	vd->vdev_trimmap = NULL;
216}
217
218static void
219trim_map_segment_add(trim_map_t *tm, uint64_t start, uint64_t end, uint64_t txg)
220{
221	avl_index_t where;
222	trim_seg_t tsearch, *ts_before, *ts_after, *ts;
223	boolean_t merge_before, merge_after;
224	hrtime_t time;
225
226	ASSERT(MUTEX_HELD(&tm->tm_lock));
227	VERIFY(start < end);
228
229	time = gethrtime();
230	tsearch.ts_start = start;
231	tsearch.ts_end = end;
232
233	ts = avl_find(&tm->tm_queued_frees, &tsearch, &where);
234	if (ts != NULL) {
235		if (start < ts->ts_start)
236			trim_map_segment_add(tm, start, ts->ts_start, txg);
237		if (end > ts->ts_end)
238			trim_map_segment_add(tm, ts->ts_end, end, txg);
239		return;
240	}
241
242	ts_before = avl_nearest(&tm->tm_queued_frees, where, AVL_BEFORE);
243	ts_after = avl_nearest(&tm->tm_queued_frees, where, AVL_AFTER);
244
245	merge_before = (ts_before != NULL && ts_before->ts_end == start);
246	merge_after = (ts_after != NULL && ts_after->ts_start == end);
247
248	if (merge_before && merge_after) {
249		TRIM_MAP_SINC(tm, ts_after->ts_start - ts_before->ts_end);
250		TRIM_MAP_QDEC(tm);
251		avl_remove(&tm->tm_queued_frees, ts_before);
252		list_remove(&tm->tm_head, ts_before);
253		ts_after->ts_start = ts_before->ts_start;
254		ts_after->ts_txg = txg;
255		ts_after->ts_time = time;
256		list_remove(&tm->tm_head, ts_after);
257		list_insert_tail(&tm->tm_head, ts_after);
258		kmem_free(ts_before, sizeof (*ts_before));
259	} else if (merge_before) {
260		TRIM_MAP_SINC(tm, end - ts_before->ts_end);
261		ts_before->ts_end = end;
262		ts_before->ts_txg = txg;
263		ts_before->ts_time = time;
264		list_remove(&tm->tm_head, ts_before);
265		list_insert_tail(&tm->tm_head, ts_before);
266	} else if (merge_after) {
267		TRIM_MAP_SINC(tm, ts_after->ts_start - start);
268		ts_after->ts_start = start;
269		ts_after->ts_txg = txg;
270		ts_after->ts_time = time;
271		list_remove(&tm->tm_head, ts_after);
272		list_insert_tail(&tm->tm_head, ts_after);
273	} else {
274		TRIM_MAP_SINC(tm, end - start);
275		TRIM_MAP_QINC(tm);
276		ts = kmem_alloc(sizeof (*ts), KM_SLEEP);
277		ts->ts_start = start;
278		ts->ts_end = end;
279		ts->ts_txg = txg;
280		ts->ts_time = time;
281		avl_insert(&tm->tm_queued_frees, ts, where);
282		list_insert_tail(&tm->tm_head, ts);
283	}
284}
285
286static void
287trim_map_segment_remove(trim_map_t *tm, trim_seg_t *ts, uint64_t start,
288    uint64_t end)
289{
290	trim_seg_t *nts;
291	boolean_t left_over, right_over;
292
293	ASSERT(MUTEX_HELD(&tm->tm_lock));
294
295	left_over = (ts->ts_start < start);
296	right_over = (ts->ts_end > end);
297
298	TRIM_MAP_SDEC(tm, end - start);
299	if (left_over && right_over) {
300		nts = kmem_alloc(sizeof (*nts), KM_SLEEP);
301		nts->ts_start = end;
302		nts->ts_end = ts->ts_end;
303		nts->ts_txg = ts->ts_txg;
304		nts->ts_time = ts->ts_time;
305		ts->ts_end = start;
306		avl_insert_here(&tm->tm_queued_frees, nts, ts, AVL_AFTER);
307		list_insert_after(&tm->tm_head, ts, nts);
308		TRIM_MAP_QINC(tm);
309	} else if (left_over) {
310		ts->ts_end = start;
311	} else if (right_over) {
312		ts->ts_start = end;
313	} else {
314		avl_remove(&tm->tm_queued_frees, ts);
315		list_remove(&tm->tm_head, ts);
316		TRIM_MAP_QDEC(tm);
317		kmem_free(ts, sizeof (*ts));
318	}
319}
320
321static void
322trim_map_free_locked(trim_map_t *tm, uint64_t start, uint64_t end, uint64_t txg)
323{
324	zio_t zsearch, *zs;
325
326	ASSERT(MUTEX_HELD(&tm->tm_lock));
327
328	zsearch.io_offset = start;
329	zsearch.io_size = end - start;
330
331	zs = avl_find(&tm->tm_inflight_writes, &zsearch, NULL);
332	if (zs == NULL) {
333		trim_map_segment_add(tm, start, end, txg);
334		return;
335	}
336	if (start < zs->io_offset)
337		trim_map_free_locked(tm, start, zs->io_offset, txg);
338	if (zs->io_offset + zs->io_size < end)
339		trim_map_free_locked(tm, zs->io_offset + zs->io_size, end, txg);
340}
341
342void
343trim_map_free(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg)
344{
345	trim_map_t *tm = vd->vdev_trimmap;
346
347	if (!zfs_trim_enabled || vd->vdev_notrim || tm == NULL)
348		return;
349
350	mutex_enter(&tm->tm_lock);
351	trim_map_free_locked(tm, offset, TRIM_ZIO_END(vd, offset, size), txg);
352	mutex_exit(&tm->tm_lock);
353}
354
355boolean_t
356trim_map_write_start(zio_t *zio)
357{
358	vdev_t *vd = zio->io_vd;
359	trim_map_t *tm = vd->vdev_trimmap;
360	trim_seg_t tsearch, *ts;
361	boolean_t left_over, right_over;
362	uint64_t start, end;
363
364	if (!zfs_trim_enabled || vd->vdev_notrim || tm == NULL)
365		return (B_TRUE);
366
367	start = zio->io_offset;
368	end = TRIM_ZIO_END(zio->io_vd, start, zio->io_size);
369	tsearch.ts_start = start;
370	tsearch.ts_end = end;
371
372	mutex_enter(&tm->tm_lock);
373
374	/*
375	 * Checking for colliding in-flight frees.
376	 */
377	ts = avl_find(&tm->tm_inflight_frees, &tsearch, NULL);
378	if (ts != NULL) {
379		list_insert_tail(&tm->tm_pending_writes, zio);
380		mutex_exit(&tm->tm_lock);
381		return (B_FALSE);
382	}
383
384	ts = avl_find(&tm->tm_queued_frees, &tsearch, NULL);
385	if (ts != NULL) {
386		/*
387		 * Loop until all overlapping segments are removed.
388		 */
389		do {
390			trim_map_segment_remove(tm, ts, start, end);
391			ts = avl_find(&tm->tm_queued_frees, &tsearch, NULL);
392		} while (ts != NULL);
393	}
394	avl_add(&tm->tm_inflight_writes, zio);
395
396	mutex_exit(&tm->tm_lock);
397
398	return (B_TRUE);
399}
400
401void
402trim_map_write_done(zio_t *zio)
403{
404	vdev_t *vd = zio->io_vd;
405	trim_map_t *tm = vd->vdev_trimmap;
406
407	/*
408	 * Don't check for vdev_notrim, since the write could have
409	 * started before vdev_notrim was set.
410	 */
411	if (!zfs_trim_enabled || tm == NULL)
412		return;
413
414	mutex_enter(&tm->tm_lock);
415	/*
416	 * Don't fail if the write isn't in the tree, since the write
417	 * could have started after vdev_notrim was set.
418	 */
419	if (zio->io_trim_node.avl_child[0] ||
420	    zio->io_trim_node.avl_child[1] ||
421	    AVL_XPARENT(&zio->io_trim_node) ||
422	    tm->tm_inflight_writes.avl_root == &zio->io_trim_node)
423		avl_remove(&tm->tm_inflight_writes, zio);
424	mutex_exit(&tm->tm_lock);
425}
426
427/*
428 * Return the oldest segment (the one with the lowest txg / time) or NULL if:
429 * 1. The list is empty
430 * 2. The first element's txg is greater than txgsafe
431 * 3. The first element's txg is not greater than the txg argument and the
432 *    the first element's time is not greater than time argument
433 */
434static trim_seg_t *
435trim_map_first(trim_map_t *tm, uint64_t txg, uint64_t txgsafe, hrtime_t time)
436{
437	trim_seg_t *ts;
438
439	ASSERT(MUTEX_HELD(&tm->tm_lock));
440	VERIFY(txgsafe >= txg);
441
442	ts = list_head(&tm->tm_head);
443	if (ts != NULL && ts->ts_txg <= txgsafe &&
444	    (ts->ts_txg <= txg || ts->ts_time <= time ||
445	    tm->tm_bytes > trim_vdev_max_bytes ||
446	    tm->tm_pending > trim_vdev_max_pending))
447		return (ts);
448	return (NULL);
449}
450
451static void
452trim_map_vdev_commit(spa_t *spa, zio_t *zio, vdev_t *vd)
453{
454	trim_map_t *tm = vd->vdev_trimmap;
455	trim_seg_t *ts;
456	uint64_t size, offset, txgtarget, txgsafe;
457	hrtime_t timelimit;
458
459	ASSERT(vd->vdev_ops->vdev_op_leaf);
460
461	if (tm == NULL)
462		return;
463
464	timelimit = gethrtime() - trim_timeout * NANOSEC;
465	if (vd->vdev_isl2cache) {
466		txgsafe = UINT64_MAX;
467		txgtarget = UINT64_MAX;
468	} else {
469		txgsafe = MIN(spa_last_synced_txg(spa), spa_freeze_txg(spa));
470		if (txgsafe > trim_txg_delay)
471			txgtarget = txgsafe - trim_txg_delay;
472		else
473			txgtarget = 0;
474	}
475
476	mutex_enter(&tm->tm_lock);
477	/* Loop until we have sent all outstanding free's */
478	while ((ts = trim_map_first(tm, txgtarget, txgsafe, timelimit))
479	    != NULL) {
480		list_remove(&tm->tm_head, ts);
481		avl_remove(&tm->tm_queued_frees, ts);
482		avl_add(&tm->tm_inflight_frees, ts);
483		size = ts->ts_end - ts->ts_start;
484		offset = ts->ts_start;
485		TRIM_MAP_SDEC(tm, size);
486		TRIM_MAP_QDEC(tm);
487		/*
488		 * We drop the lock while we call zio_nowait as the IO
489		 * scheduler can result in a different IO being run e.g.
490		 * a write which would result in a recursive lock.
491		 */
492		mutex_exit(&tm->tm_lock);
493
494		zio_nowait(zio_trim(zio, spa, vd, offset, size));
495
496		mutex_enter(&tm->tm_lock);
497	}
498	mutex_exit(&tm->tm_lock);
499}
500
501static void
502trim_map_vdev_commit_done(spa_t *spa, vdev_t *vd)
503{
504	trim_map_t *tm = vd->vdev_trimmap;
505	trim_seg_t *ts;
506	list_t pending_writes;
507	zio_t *zio;
508	uint64_t start, size;
509	void *cookie;
510
511	ASSERT(vd->vdev_ops->vdev_op_leaf);
512
513	if (tm == NULL)
514		return;
515
516	mutex_enter(&tm->tm_lock);
517	if (!avl_is_empty(&tm->tm_inflight_frees)) {
518		cookie = NULL;
519		while ((ts = avl_destroy_nodes(&tm->tm_inflight_frees,
520		    &cookie)) != NULL) {
521			kmem_free(ts, sizeof (*ts));
522		}
523	}
524	list_create(&pending_writes, sizeof (zio_t), offsetof(zio_t,
525	    io_trim_link));
526	list_move_tail(&pending_writes, &tm->tm_pending_writes);
527	mutex_exit(&tm->tm_lock);
528
529	while ((zio = list_remove_head(&pending_writes)) != NULL) {
530		zio_vdev_io_reissue(zio);
531		zio_execute(zio);
532	}
533	list_destroy(&pending_writes);
534}
535
536static void
537trim_map_commit(spa_t *spa, zio_t *zio, vdev_t *vd)
538{
539	int c;
540
541	if (vd == NULL)
542		return;
543
544	if (vd->vdev_ops->vdev_op_leaf) {
545		trim_map_vdev_commit(spa, zio, vd);
546	} else {
547		for (c = 0; c < vd->vdev_children; c++)
548			trim_map_commit(spa, zio, vd->vdev_child[c]);
549	}
550}
551
552static void
553trim_map_commit_done(spa_t *spa, vdev_t *vd)
554{
555	int c;
556
557	if (vd == NULL)
558		return;
559
560	if (vd->vdev_ops->vdev_op_leaf) {
561		trim_map_vdev_commit_done(spa, vd);
562	} else {
563		for (c = 0; c < vd->vdev_children; c++)
564			trim_map_commit_done(spa, vd->vdev_child[c]);
565	}
566}
567
568static void
569trim_thread(void *arg)
570{
571	spa_t *spa = arg;
572	zio_t *zio;
573
574#ifdef _KERNEL
575	(void) snprintf(curthread->td_name, sizeof(curthread->td_name),
576	    "trim %s", spa_name(spa));
577#endif
578
579	for (;;) {
580		mutex_enter(&spa->spa_trim_lock);
581		if (spa->spa_trim_thread == NULL) {
582			spa->spa_trim_thread = curthread;
583			cv_signal(&spa->spa_trim_cv);
584			mutex_exit(&spa->spa_trim_lock);
585			thread_exit();
586		}
587
588		(void) cv_timedwait(&spa->spa_trim_cv, &spa->spa_trim_lock,
589		    hz * trim_max_interval);
590		mutex_exit(&spa->spa_trim_lock);
591
592		zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
593
594		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
595		trim_map_commit(spa, zio, spa->spa_root_vdev);
596		(void) zio_wait(zio);
597		trim_map_commit_done(spa, spa->spa_root_vdev);
598		spa_config_exit(spa, SCL_STATE, FTAG);
599	}
600}
601
602void
603trim_thread_create(spa_t *spa)
604{
605
606	if (!zfs_trim_enabled)
607		return;
608
609	mutex_init(&spa->spa_trim_lock, NULL, MUTEX_DEFAULT, NULL);
610	cv_init(&spa->spa_trim_cv, NULL, CV_DEFAULT, NULL);
611	mutex_enter(&spa->spa_trim_lock);
612	spa->spa_trim_thread = thread_create(NULL, 0, trim_thread, spa, 0, &p0,
613	    TS_RUN, minclsyspri);
614	mutex_exit(&spa->spa_trim_lock);
615}
616
617void
618trim_thread_destroy(spa_t *spa)
619{
620
621	if (!zfs_trim_enabled)
622		return;
623	if (spa->spa_trim_thread == NULL)
624		return;
625
626	mutex_enter(&spa->spa_trim_lock);
627	/* Setting spa_trim_thread to NULL tells the thread to stop. */
628	spa->spa_trim_thread = NULL;
629	cv_signal(&spa->spa_trim_cv);
630	/* The thread will set it back to != NULL on exit. */
631	while (spa->spa_trim_thread == NULL)
632		cv_wait(&spa->spa_trim_cv, &spa->spa_trim_lock);
633	spa->spa_trim_thread = NULL;
634	mutex_exit(&spa->spa_trim_lock);
635
636	cv_destroy(&spa->spa_trim_cv);
637	mutex_destroy(&spa->spa_trim_lock);
638}
639
640void
641trim_thread_wakeup(spa_t *spa)
642{
643
644	if (!zfs_trim_enabled)
645		return;
646	if (spa->spa_trim_thread == NULL)
647		return;
648
649	mutex_enter(&spa->spa_trim_lock);
650	cv_signal(&spa->spa_trim_cv);
651	mutex_exit(&spa->spa_trim_lock);
652}
653