trim_map.c revision 265152
1/*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21/*
22 * Copyright (c) 2012 Pawel Jakub Dawidek <pawel@dawidek.net>.
23 * All rights reserved.
24 */
25
26#include <sys/zfs_context.h>
27#include <sys/spa_impl.h>
28#include <sys/vdev_impl.h>
29#include <sys/trim_map.h>
30#include <sys/time.h>
31
32/*
33 * Calculate the zio end, upgrading based on ashift which would be
34 * done by zio_vdev_io_start.
35 *
36 * This makes free range consolidation much more effective
37 * than it would otherwise be as well as ensuring that entire
38 * blocks are invalidated by writes.
39 */
40#define	TRIM_ZIO_END(vd, offset, size)	(offset +		\
41 	P2ROUNDUP(size, 1ULL << vd->vdev_top->vdev_ashift))
42
43#define TRIM_MAP_SINC(tm, size)					\
44	atomic_add_64(&(tm)->tm_bytes, (size))
45
46#define TRIM_MAP_SDEC(tm, size)					\
47	atomic_add_64(&(tm)->tm_bytes, -(size))
48
49#define TRIM_MAP_QINC(tm)					\
50	atomic_inc_64(&(tm)->tm_pending);			\
51
52#define TRIM_MAP_QDEC(tm)					\
53	atomic_dec_64(&(tm)->tm_pending);
54
55typedef struct trim_map {
56	list_t		tm_head;		/* List of segments sorted by txg. */
57	avl_tree_t	tm_queued_frees;	/* AVL tree of segments waiting for TRIM. */
58	avl_tree_t	tm_inflight_frees;	/* AVL tree of in-flight TRIMs. */
59	avl_tree_t	tm_inflight_writes;	/* AVL tree of in-flight writes. */
60	list_t		tm_pending_writes;	/* Writes blocked on in-flight frees. */
61	kmutex_t	tm_lock;
62	uint64_t	tm_pending;		/* Count of pending TRIMs. */
63	uint64_t	tm_bytes;		/* Total size in bytes of queued TRIMs. */
64} trim_map_t;
65
66typedef struct trim_seg {
67	avl_node_t	ts_node;	/* AVL node. */
68	list_node_t	ts_next;	/* List element. */
69	uint64_t	ts_start;	/* Starting offset of this segment. */
70	uint64_t	ts_end;		/* Ending offset (non-inclusive). */
71	uint64_t	ts_txg;		/* Segment creation txg. */
72	hrtime_t	ts_time;	/* Segment creation time. */
73} trim_seg_t;
74
75extern boolean_t zfs_trim_enabled;
76
77static u_int trim_txg_delay = 32;
78static u_int trim_timeout = 30;
79static u_int trim_max_interval = 1;
80/* Limit outstanding TRIMs to 2G (max size for a single TRIM request) */
81static uint64_t trim_vdev_max_bytes = 2147483648;
82/* Limit outstanding TRIMs to 64 (max ranges for a single TRIM request) */
83static u_int trim_vdev_max_pending = 64;
84
85SYSCTL_DECL(_vfs_zfs);
86SYSCTL_NODE(_vfs_zfs, OID_AUTO, trim, CTLFLAG_RD, 0, "ZFS TRIM");
87
88TUNABLE_INT("vfs.zfs.trim.txg_delay", &trim_txg_delay);
89SYSCTL_UINT(_vfs_zfs_trim, OID_AUTO, txg_delay, CTLFLAG_RWTUN, &trim_txg_delay,
90    0, "Delay TRIMs by up to this many TXGs");
91
92TUNABLE_INT("vfs.zfs.trim.timeout", &trim_timeout);
93SYSCTL_UINT(_vfs_zfs_trim, OID_AUTO, timeout, CTLFLAG_RWTUN, &trim_timeout, 0,
94    "Delay TRIMs by up to this many seconds");
95
96TUNABLE_INT("vfs.zfs.trim.max_interval", &trim_max_interval);
97SYSCTL_UINT(_vfs_zfs_trim, OID_AUTO, max_interval, CTLFLAG_RWTUN,
98    &trim_max_interval, 0,
99    "Maximum interval between TRIM queue processing (seconds)");
100
101SYSCTL_DECL(_vfs_zfs_vdev);
102TUNABLE_QUAD("vfs.zfs.vdev.trim_max_bytes", &trim_vdev_max_bytes);
103SYSCTL_QUAD(_vfs_zfs_vdev, OID_AUTO, trim_max_bytes, CTLFLAG_RWTUN,
104    &trim_vdev_max_bytes, 0,
105    "Maximum pending TRIM bytes for a vdev");
106
107TUNABLE_INT("vfs.zfs.vdev.trim_max_pending", &trim_vdev_max_pending);
108SYSCTL_UINT(_vfs_zfs_vdev, OID_AUTO, trim_max_pending, CTLFLAG_RWTUN,
109    &trim_vdev_max_pending, 0,
110    "Maximum pending TRIM segments for a vdev");
111
112
113static void trim_map_vdev_commit_done(spa_t *spa, vdev_t *vd);
114
115static int
116trim_map_seg_compare(const void *x1, const void *x2)
117{
118	const trim_seg_t *s1 = x1;
119	const trim_seg_t *s2 = x2;
120
121	if (s1->ts_start < s2->ts_start) {
122		if (s1->ts_end > s2->ts_start)
123			return (0);
124		return (-1);
125	}
126	if (s1->ts_start > s2->ts_start) {
127		if (s1->ts_start < s2->ts_end)
128			return (0);
129		return (1);
130	}
131	return (0);
132}
133
134static int
135trim_map_zio_compare(const void *x1, const void *x2)
136{
137	const zio_t *z1 = x1;
138	const zio_t *z2 = x2;
139
140	if (z1->io_offset < z2->io_offset) {
141		if (z1->io_offset + z1->io_size > z2->io_offset)
142			return (0);
143		return (-1);
144	}
145	if (z1->io_offset > z2->io_offset) {
146		if (z1->io_offset < z2->io_offset + z2->io_size)
147			return (0);
148		return (1);
149	}
150	return (0);
151}
152
153void
154trim_map_create(vdev_t *vd)
155{
156	trim_map_t *tm;
157
158	ASSERT(vd->vdev_ops->vdev_op_leaf);
159
160	if (!zfs_trim_enabled)
161		return;
162
163	tm = kmem_zalloc(sizeof (*tm), KM_SLEEP);
164	mutex_init(&tm->tm_lock, NULL, MUTEX_DEFAULT, NULL);
165	list_create(&tm->tm_head, sizeof (trim_seg_t),
166	    offsetof(trim_seg_t, ts_next));
167	list_create(&tm->tm_pending_writes, sizeof (zio_t),
168	    offsetof(zio_t, io_trim_link));
169	avl_create(&tm->tm_queued_frees, trim_map_seg_compare,
170	    sizeof (trim_seg_t), offsetof(trim_seg_t, ts_node));
171	avl_create(&tm->tm_inflight_frees, trim_map_seg_compare,
172	    sizeof (trim_seg_t), offsetof(trim_seg_t, ts_node));
173	avl_create(&tm->tm_inflight_writes, trim_map_zio_compare,
174	    sizeof (zio_t), offsetof(zio_t, io_trim_node));
175	vd->vdev_trimmap = tm;
176}
177
178void
179trim_map_destroy(vdev_t *vd)
180{
181	trim_map_t *tm;
182	trim_seg_t *ts;
183
184	ASSERT(vd->vdev_ops->vdev_op_leaf);
185
186	if (!zfs_trim_enabled)
187		return;
188
189	tm = vd->vdev_trimmap;
190	if (tm == NULL)
191		return;
192
193	/*
194	 * We may have been called before trim_map_vdev_commit_done()
195	 * had a chance to run, so do it now to prune the remaining
196	 * inflight frees.
197	 */
198	trim_map_vdev_commit_done(vd->vdev_spa, vd);
199
200	mutex_enter(&tm->tm_lock);
201	while ((ts = list_head(&tm->tm_head)) != NULL) {
202		avl_remove(&tm->tm_queued_frees, ts);
203		list_remove(&tm->tm_head, ts);
204		kmem_free(ts, sizeof (*ts));
205		TRIM_MAP_SDEC(tm, ts->ts_end - ts->ts_start);
206		TRIM_MAP_QDEC(tm);
207	}
208	mutex_exit(&tm->tm_lock);
209
210	avl_destroy(&tm->tm_queued_frees);
211	avl_destroy(&tm->tm_inflight_frees);
212	avl_destroy(&tm->tm_inflight_writes);
213	list_destroy(&tm->tm_pending_writes);
214	list_destroy(&tm->tm_head);
215	mutex_destroy(&tm->tm_lock);
216	kmem_free(tm, sizeof (*tm));
217	vd->vdev_trimmap = NULL;
218}
219
220static void
221trim_map_segment_add(trim_map_t *tm, uint64_t start, uint64_t end, uint64_t txg)
222{
223	avl_index_t where;
224	trim_seg_t tsearch, *ts_before, *ts_after, *ts;
225	boolean_t merge_before, merge_after;
226	hrtime_t time;
227
228	ASSERT(MUTEX_HELD(&tm->tm_lock));
229	VERIFY(start < end);
230
231	time = gethrtime();
232	tsearch.ts_start = start;
233	tsearch.ts_end = end;
234
235	ts = avl_find(&tm->tm_queued_frees, &tsearch, &where);
236	if (ts != NULL) {
237		if (start < ts->ts_start)
238			trim_map_segment_add(tm, start, ts->ts_start, txg);
239		if (end > ts->ts_end)
240			trim_map_segment_add(tm, ts->ts_end, end, txg);
241		return;
242	}
243
244	ts_before = avl_nearest(&tm->tm_queued_frees, where, AVL_BEFORE);
245	ts_after = avl_nearest(&tm->tm_queued_frees, where, AVL_AFTER);
246
247	merge_before = (ts_before != NULL && ts_before->ts_end == start);
248	merge_after = (ts_after != NULL && ts_after->ts_start == end);
249
250	if (merge_before && merge_after) {
251		TRIM_MAP_SINC(tm, ts_after->ts_start - ts_before->ts_end);
252		TRIM_MAP_QDEC(tm);
253		avl_remove(&tm->tm_queued_frees, ts_before);
254		list_remove(&tm->tm_head, ts_before);
255		ts_after->ts_start = ts_before->ts_start;
256		ts_after->ts_txg = txg;
257		ts_after->ts_time = time;
258		kmem_free(ts_before, sizeof (*ts_before));
259	} else if (merge_before) {
260		TRIM_MAP_SINC(tm, end - ts_before->ts_end);
261		ts_before->ts_end = end;
262		ts_before->ts_txg = txg;
263		ts_before->ts_time = time;
264	} else if (merge_after) {
265		TRIM_MAP_SINC(tm, ts_after->ts_start - start);
266		ts_after->ts_start = start;
267		ts_after->ts_txg = txg;
268		ts_after->ts_time = time;
269	} else {
270		TRIM_MAP_SINC(tm, end - start);
271		TRIM_MAP_QINC(tm);
272		ts = kmem_alloc(sizeof (*ts), KM_SLEEP);
273		ts->ts_start = start;
274		ts->ts_end = end;
275		ts->ts_txg = txg;
276		ts->ts_time = time;
277		avl_insert(&tm->tm_queued_frees, ts, where);
278		list_insert_tail(&tm->tm_head, ts);
279	}
280}
281
282static void
283trim_map_segment_remove(trim_map_t *tm, trim_seg_t *ts, uint64_t start,
284    uint64_t end)
285{
286	trim_seg_t *nts;
287	boolean_t left_over, right_over;
288
289	ASSERT(MUTEX_HELD(&tm->tm_lock));
290
291	left_over = (ts->ts_start < start);
292	right_over = (ts->ts_end > end);
293
294	TRIM_MAP_SDEC(tm, end - start);
295	if (left_over && right_over) {
296		nts = kmem_alloc(sizeof (*nts), KM_SLEEP);
297		nts->ts_start = end;
298		nts->ts_end = ts->ts_end;
299		nts->ts_txg = ts->ts_txg;
300		nts->ts_time = ts->ts_time;
301		ts->ts_end = start;
302		avl_insert_here(&tm->tm_queued_frees, nts, ts, AVL_AFTER);
303		list_insert_after(&tm->tm_head, ts, nts);
304		TRIM_MAP_QINC(tm);
305	} else if (left_over) {
306		ts->ts_end = start;
307	} else if (right_over) {
308		ts->ts_start = end;
309	} else {
310		avl_remove(&tm->tm_queued_frees, ts);
311		list_remove(&tm->tm_head, ts);
312		TRIM_MAP_QDEC(tm);
313		kmem_free(ts, sizeof (*ts));
314	}
315}
316
317static void
318trim_map_free_locked(trim_map_t *tm, uint64_t start, uint64_t end, uint64_t txg)
319{
320	zio_t zsearch, *zs;
321
322	ASSERT(MUTEX_HELD(&tm->tm_lock));
323
324	zsearch.io_offset = start;
325	zsearch.io_size = end - start;
326
327	zs = avl_find(&tm->tm_inflight_writes, &zsearch, NULL);
328	if (zs == NULL) {
329		trim_map_segment_add(tm, start, end, txg);
330		return;
331	}
332	if (start < zs->io_offset)
333		trim_map_free_locked(tm, start, zs->io_offset, txg);
334	if (zs->io_offset + zs->io_size < end)
335		trim_map_free_locked(tm, zs->io_offset + zs->io_size, end, txg);
336}
337
338void
339trim_map_free(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg)
340{
341	trim_map_t *tm = vd->vdev_trimmap;
342
343	if (!zfs_trim_enabled || vd->vdev_notrim || tm == NULL)
344		return;
345
346	mutex_enter(&tm->tm_lock);
347	trim_map_free_locked(tm, offset, TRIM_ZIO_END(vd, offset, size), txg);
348	mutex_exit(&tm->tm_lock);
349}
350
351boolean_t
352trim_map_write_start(zio_t *zio)
353{
354	vdev_t *vd = zio->io_vd;
355	trim_map_t *tm = vd->vdev_trimmap;
356	trim_seg_t tsearch, *ts;
357	boolean_t left_over, right_over;
358	uint64_t start, end;
359
360	if (!zfs_trim_enabled || vd->vdev_notrim || tm == NULL)
361		return (B_TRUE);
362
363	start = zio->io_offset;
364	end = TRIM_ZIO_END(zio->io_vd, start, zio->io_size);
365	tsearch.ts_start = start;
366	tsearch.ts_end = end;
367
368	mutex_enter(&tm->tm_lock);
369
370	/*
371	 * Checking for colliding in-flight frees.
372	 */
373	ts = avl_find(&tm->tm_inflight_frees, &tsearch, NULL);
374	if (ts != NULL) {
375		list_insert_tail(&tm->tm_pending_writes, zio);
376		mutex_exit(&tm->tm_lock);
377		return (B_FALSE);
378	}
379
380	ts = avl_find(&tm->tm_queued_frees, &tsearch, NULL);
381	if (ts != NULL) {
382		/*
383		 * Loop until all overlapping segments are removed.
384		 */
385		do {
386			trim_map_segment_remove(tm, ts, start, end);
387			ts = avl_find(&tm->tm_queued_frees, &tsearch, NULL);
388		} while (ts != NULL);
389	}
390	avl_add(&tm->tm_inflight_writes, zio);
391
392	mutex_exit(&tm->tm_lock);
393
394	return (B_TRUE);
395}
396
397void
398trim_map_write_done(zio_t *zio)
399{
400	vdev_t *vd = zio->io_vd;
401	trim_map_t *tm = vd->vdev_trimmap;
402
403	/*
404	 * Don't check for vdev_notrim, since the write could have
405	 * started before vdev_notrim was set.
406	 */
407	if (!zfs_trim_enabled || tm == NULL)
408		return;
409
410	mutex_enter(&tm->tm_lock);
411	/*
412	 * Don't fail if the write isn't in the tree, since the write
413	 * could have started after vdev_notrim was set.
414	 */
415	if (zio->io_trim_node.avl_child[0] ||
416	    zio->io_trim_node.avl_child[1] ||
417	    AVL_XPARENT(&zio->io_trim_node) ||
418	    tm->tm_inflight_writes.avl_root == &zio->io_trim_node)
419		avl_remove(&tm->tm_inflight_writes, zio);
420	mutex_exit(&tm->tm_lock);
421}
422
423/*
424 * Return the oldest segment (the one with the lowest txg / time) or NULL if:
425 * 1. The list is empty
426 * 2. The first element's txg is greater than txgsafe
427 * 3. The first element's txg is not greater than the txg argument and the
428 *    the first element's time is not greater than time argument
429 */
430static trim_seg_t *
431trim_map_first(trim_map_t *tm, uint64_t txg, uint64_t txgsafe, hrtime_t time)
432{
433	trim_seg_t *ts;
434
435	ASSERT(MUTEX_HELD(&tm->tm_lock));
436	VERIFY(txgsafe >= txg);
437
438	ts = list_head(&tm->tm_head);
439	if (ts != NULL && ts->ts_txg <= txgsafe &&
440	    (ts->ts_txg <= txg || ts->ts_time <= time ||
441	    tm->tm_bytes > trim_vdev_max_bytes ||
442	    tm->tm_pending > trim_vdev_max_pending))
443		return (ts);
444	return (NULL);
445}
446
447static void
448trim_map_vdev_commit(spa_t *spa, zio_t *zio, vdev_t *vd)
449{
450	trim_map_t *tm = vd->vdev_trimmap;
451	trim_seg_t *ts;
452	uint64_t size, offset, txgtarget, txgsafe;
453	hrtime_t timelimit;
454
455	ASSERT(vd->vdev_ops->vdev_op_leaf);
456
457	if (tm == NULL)
458		return;
459
460	timelimit = gethrtime() - trim_timeout * NANOSEC;
461	if (vd->vdev_isl2cache) {
462		txgsafe = UINT64_MAX;
463		txgtarget = UINT64_MAX;
464	} else {
465		txgsafe = MIN(spa_last_synced_txg(spa), spa_freeze_txg(spa));
466		if (txgsafe > trim_txg_delay)
467			txgtarget = txgsafe - trim_txg_delay;
468		else
469			txgtarget = 0;
470	}
471
472	mutex_enter(&tm->tm_lock);
473	/* Loop until we have sent all outstanding free's */
474	while ((ts = trim_map_first(tm, txgtarget, txgsafe, timelimit))
475	    != NULL) {
476		list_remove(&tm->tm_head, ts);
477		avl_remove(&tm->tm_queued_frees, ts);
478		avl_add(&tm->tm_inflight_frees, ts);
479		size = ts->ts_end - ts->ts_start;
480		offset = ts->ts_start;
481		TRIM_MAP_SDEC(tm, size);
482		TRIM_MAP_QDEC(tm);
483		/*
484		 * We drop the lock while we call zio_nowait as the IO
485		 * scheduler can result in a different IO being run e.g.
486		 * a write which would result in a recursive lock.
487		 */
488		mutex_exit(&tm->tm_lock);
489
490		zio_nowait(zio_trim(zio, spa, vd, offset, size));
491
492		mutex_enter(&tm->tm_lock);
493		ts = trim_map_first(tm, txgtarget, txgsafe, timelimit);
494	}
495	mutex_exit(&tm->tm_lock);
496}
497
498static void
499trim_map_vdev_commit_done(spa_t *spa, vdev_t *vd)
500{
501	trim_map_t *tm = vd->vdev_trimmap;
502	trim_seg_t *ts;
503	list_t pending_writes;
504	zio_t *zio;
505	uint64_t start, size;
506	void *cookie;
507
508	ASSERT(vd->vdev_ops->vdev_op_leaf);
509
510	if (tm == NULL)
511		return;
512
513	mutex_enter(&tm->tm_lock);
514	if (!avl_is_empty(&tm->tm_inflight_frees)) {
515		cookie = NULL;
516		while ((ts = avl_destroy_nodes(&tm->tm_inflight_frees,
517		    &cookie)) != NULL) {
518			kmem_free(ts, sizeof (*ts));
519		}
520	}
521	list_create(&pending_writes, sizeof (zio_t), offsetof(zio_t,
522	    io_trim_link));
523	list_move_tail(&pending_writes, &tm->tm_pending_writes);
524	mutex_exit(&tm->tm_lock);
525
526	while ((zio = list_remove_head(&pending_writes)) != NULL) {
527		zio_vdev_io_reissue(zio);
528		zio_execute(zio);
529	}
530	list_destroy(&pending_writes);
531}
532
533static void
534trim_map_commit(spa_t *spa, zio_t *zio, vdev_t *vd)
535{
536	int c;
537
538	if (vd == NULL)
539		return;
540
541	if (vd->vdev_ops->vdev_op_leaf) {
542		trim_map_vdev_commit(spa, zio, vd);
543	} else {
544		for (c = 0; c < vd->vdev_children; c++)
545			trim_map_commit(spa, zio, vd->vdev_child[c]);
546	}
547}
548
549static void
550trim_map_commit_done(spa_t *spa, vdev_t *vd)
551{
552	int c;
553
554	if (vd == NULL)
555		return;
556
557	if (vd->vdev_ops->vdev_op_leaf) {
558		trim_map_vdev_commit_done(spa, vd);
559	} else {
560		for (c = 0; c < vd->vdev_children; c++)
561			trim_map_commit_done(spa, vd->vdev_child[c]);
562	}
563}
564
565static void
566trim_thread(void *arg)
567{
568	spa_t *spa = arg;
569	zio_t *zio;
570
571#ifdef _KERNEL
572	(void) snprintf(curthread->td_name, sizeof(curthread->td_name),
573	    "trim %s", spa_name(spa));
574#endif
575
576	for (;;) {
577		mutex_enter(&spa->spa_trim_lock);
578		if (spa->spa_trim_thread == NULL) {
579			spa->spa_trim_thread = curthread;
580			cv_signal(&spa->spa_trim_cv);
581			mutex_exit(&spa->spa_trim_lock);
582			thread_exit();
583		}
584
585		(void) cv_timedwait(&spa->spa_trim_cv, &spa->spa_trim_lock,
586		    hz * trim_max_interval);
587		mutex_exit(&spa->spa_trim_lock);
588
589		zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
590
591		spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
592		trim_map_commit(spa, zio, spa->spa_root_vdev);
593		(void) zio_wait(zio);
594		trim_map_commit_done(spa, spa->spa_root_vdev);
595		spa_config_exit(spa, SCL_STATE, FTAG);
596	}
597}
598
599void
600trim_thread_create(spa_t *spa)
601{
602
603	if (!zfs_trim_enabled)
604		return;
605
606	mutex_init(&spa->spa_trim_lock, NULL, MUTEX_DEFAULT, NULL);
607	cv_init(&spa->spa_trim_cv, NULL, CV_DEFAULT, NULL);
608	mutex_enter(&spa->spa_trim_lock);
609	spa->spa_trim_thread = thread_create(NULL, 0, trim_thread, spa, 0, &p0,
610	    TS_RUN, minclsyspri);
611	mutex_exit(&spa->spa_trim_lock);
612}
613
614void
615trim_thread_destroy(spa_t *spa)
616{
617
618	if (!zfs_trim_enabled)
619		return;
620	if (spa->spa_trim_thread == NULL)
621		return;
622
623	mutex_enter(&spa->spa_trim_lock);
624	/* Setting spa_trim_thread to NULL tells the thread to stop. */
625	spa->spa_trim_thread = NULL;
626	cv_signal(&spa->spa_trim_cv);
627	/* The thread will set it back to != NULL on exit. */
628	while (spa->spa_trim_thread == NULL)
629		cv_wait(&spa->spa_trim_cv, &spa->spa_trim_lock);
630	spa->spa_trim_thread = NULL;
631	mutex_exit(&spa->spa_trim_lock);
632
633	cv_destroy(&spa->spa_trim_cv);
634	mutex_destroy(&spa->spa_trim_lock);
635}
636
637void
638trim_thread_wakeup(spa_t *spa)
639{
640
641	if (!zfs_trim_enabled)
642		return;
643	if (spa->spa_trim_thread == NULL)
644		return;
645
646	mutex_enter(&spa->spa_trim_lock);
647	cv_signal(&spa->spa_trim_cv);
648	mutex_exit(&spa->spa_trim_lock);
649}
650