xfs_trans_ail.c revision 0020a190
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2000-2002,2005 Silicon Graphics, Inc.
4 * Copyright (c) 2008 Dave Chinner
5 * All Rights Reserved.
6 */
7#include "xfs.h"
8#include "xfs_fs.h"
9#include "xfs_shared.h"
10#include "xfs_format.h"
11#include "xfs_log_format.h"
12#include "xfs_trans_resv.h"
13#include "xfs_mount.h"
14#include "xfs_trans.h"
15#include "xfs_trans_priv.h"
16#include "xfs_trace.h"
17#include "xfs_errortag.h"
18#include "xfs_error.h"
19#include "xfs_log.h"
20#include "xfs_log_priv.h"
21
22#ifdef DEBUG
23/*
24 * Check that the list is sorted as it should be.
25 *
26 * Called with the ail lock held, but we don't want to assert fail with it
27 * held otherwise we'll lock everything up and won't be able to debug the
28 * cause. Hence we sample and check the state under the AIL lock and return if
29 * everything is fine, otherwise we drop the lock and run the ASSERT checks.
30 * Asserts may not be fatal, so pick the lock back up and continue onwards.
31 */
32STATIC void
33xfs_ail_check(
34	struct xfs_ail		*ailp,
35	struct xfs_log_item	*lip)
36	__must_hold(&ailp->ail_lock)
37{
38	struct xfs_log_item	*prev_lip;
39	struct xfs_log_item	*next_lip;
40	xfs_lsn_t		prev_lsn = NULLCOMMITLSN;
41	xfs_lsn_t		next_lsn = NULLCOMMITLSN;
42	xfs_lsn_t		lsn;
43	bool			in_ail;
44
45
46	if (list_empty(&ailp->ail_head))
47		return;
48
49	/*
50	 * Sample then check the next and previous entries are valid.
51	 */
52	in_ail = test_bit(XFS_LI_IN_AIL, &lip->li_flags);
53	prev_lip = list_entry(lip->li_ail.prev, struct xfs_log_item, li_ail);
54	if (&prev_lip->li_ail != &ailp->ail_head)
55		prev_lsn = prev_lip->li_lsn;
56	next_lip = list_entry(lip->li_ail.next, struct xfs_log_item, li_ail);
57	if (&next_lip->li_ail != &ailp->ail_head)
58		next_lsn = next_lip->li_lsn;
59	lsn = lip->li_lsn;
60
61	if (in_ail &&
62	    (prev_lsn == NULLCOMMITLSN || XFS_LSN_CMP(prev_lsn, lsn) <= 0) &&
63	    (next_lsn == NULLCOMMITLSN || XFS_LSN_CMP(next_lsn, lsn) >= 0))
64		return;
65
66	spin_unlock(&ailp->ail_lock);
67	ASSERT(in_ail);
68	ASSERT(prev_lsn == NULLCOMMITLSN || XFS_LSN_CMP(prev_lsn, lsn) <= 0);
69	ASSERT(next_lsn == NULLCOMMITLSN || XFS_LSN_CMP(next_lsn, lsn) >= 0);
70	spin_lock(&ailp->ail_lock);
71}
72#else /* !DEBUG */
73#define	xfs_ail_check(a,l)
74#endif /* DEBUG */
75
76/*
77 * Return a pointer to the last item in the AIL.  If the AIL is empty, then
78 * return NULL.
79 */
80static struct xfs_log_item *
81xfs_ail_max(
82	struct xfs_ail  *ailp)
83{
84	if (list_empty(&ailp->ail_head))
85		return NULL;
86
87	return list_entry(ailp->ail_head.prev, struct xfs_log_item, li_ail);
88}
89
90/*
91 * Return a pointer to the item which follows the given item in the AIL.  If
92 * the given item is the last item in the list, then return NULL.
93 */
94static struct xfs_log_item *
95xfs_ail_next(
96	struct xfs_ail		*ailp,
97	struct xfs_log_item	*lip)
98{
99	if (lip->li_ail.next == &ailp->ail_head)
100		return NULL;
101
102	return list_first_entry(&lip->li_ail, struct xfs_log_item, li_ail);
103}
104
105/*
106 * This is called by the log manager code to determine the LSN of the tail of
107 * the log.  This is exactly the LSN of the first item in the AIL.  If the AIL
108 * is empty, then this function returns 0.
109 *
110 * We need the AIL lock in order to get a coherent read of the lsn of the last
111 * item in the AIL.
112 */
113static xfs_lsn_t
114__xfs_ail_min_lsn(
115	struct xfs_ail		*ailp)
116{
117	struct xfs_log_item	*lip = xfs_ail_min(ailp);
118
119	if (lip)
120		return lip->li_lsn;
121	return 0;
122}
123
124xfs_lsn_t
125xfs_ail_min_lsn(
126	struct xfs_ail		*ailp)
127{
128	xfs_lsn_t		lsn;
129
130	spin_lock(&ailp->ail_lock);
131	lsn = __xfs_ail_min_lsn(ailp);
132	spin_unlock(&ailp->ail_lock);
133
134	return lsn;
135}
136
137/*
138 * Return the maximum lsn held in the AIL, or zero if the AIL is empty.
139 */
140static xfs_lsn_t
141xfs_ail_max_lsn(
142	struct xfs_ail		*ailp)
143{
144	xfs_lsn_t       	lsn = 0;
145	struct xfs_log_item	*lip;
146
147	spin_lock(&ailp->ail_lock);
148	lip = xfs_ail_max(ailp);
149	if (lip)
150		lsn = lip->li_lsn;
151	spin_unlock(&ailp->ail_lock);
152
153	return lsn;
154}
155
156/*
157 * The cursor keeps track of where our current traversal is up to by tracking
158 * the next item in the list for us. However, for this to be safe, removing an
159 * object from the AIL needs to invalidate any cursor that points to it. hence
160 * the traversal cursor needs to be linked to the struct xfs_ail so that
161 * deletion can search all the active cursors for invalidation.
162 */
163STATIC void
164xfs_trans_ail_cursor_init(
165	struct xfs_ail		*ailp,
166	struct xfs_ail_cursor	*cur)
167{
168	cur->item = NULL;
169	list_add_tail(&cur->list, &ailp->ail_cursors);
170}
171
172/*
173 * Get the next item in the traversal and advance the cursor.  If the cursor
174 * was invalidated (indicated by a lip of 1), restart the traversal.
175 */
176struct xfs_log_item *
177xfs_trans_ail_cursor_next(
178	struct xfs_ail		*ailp,
179	struct xfs_ail_cursor	*cur)
180{
181	struct xfs_log_item	*lip = cur->item;
182
183	if ((uintptr_t)lip & 1)
184		lip = xfs_ail_min(ailp);
185	if (lip)
186		cur->item = xfs_ail_next(ailp, lip);
187	return lip;
188}
189
190/*
191 * When the traversal is complete, we need to remove the cursor from the list
192 * of traversing cursors.
193 */
194void
195xfs_trans_ail_cursor_done(
196	struct xfs_ail_cursor	*cur)
197{
198	cur->item = NULL;
199	list_del_init(&cur->list);
200}
201
202/*
203 * Invalidate any cursor that is pointing to this item. This is called when an
204 * item is removed from the AIL. Any cursor pointing to this object is now
205 * invalid and the traversal needs to be terminated so it doesn't reference a
206 * freed object. We set the low bit of the cursor item pointer so we can
207 * distinguish between an invalidation and the end of the list when getting the
208 * next item from the cursor.
209 */
210STATIC void
211xfs_trans_ail_cursor_clear(
212	struct xfs_ail		*ailp,
213	struct xfs_log_item	*lip)
214{
215	struct xfs_ail_cursor	*cur;
216
217	list_for_each_entry(cur, &ailp->ail_cursors, list) {
218		if (cur->item == lip)
219			cur->item = (struct xfs_log_item *)
220					((uintptr_t)cur->item | 1);
221	}
222}
223
224/*
225 * Find the first item in the AIL with the given @lsn by searching in ascending
226 * LSN order and initialise the cursor to point to the next item for a
227 * ascending traversal.  Pass a @lsn of zero to initialise the cursor to the
228 * first item in the AIL. Returns NULL if the list is empty.
229 */
230struct xfs_log_item *
231xfs_trans_ail_cursor_first(
232	struct xfs_ail		*ailp,
233	struct xfs_ail_cursor	*cur,
234	xfs_lsn_t		lsn)
235{
236	struct xfs_log_item	*lip;
237
238	xfs_trans_ail_cursor_init(ailp, cur);
239
240	if (lsn == 0) {
241		lip = xfs_ail_min(ailp);
242		goto out;
243	}
244
245	list_for_each_entry(lip, &ailp->ail_head, li_ail) {
246		if (XFS_LSN_CMP(lip->li_lsn, lsn) >= 0)
247			goto out;
248	}
249	return NULL;
250
251out:
252	if (lip)
253		cur->item = xfs_ail_next(ailp, lip);
254	return lip;
255}
256
257static struct xfs_log_item *
258__xfs_trans_ail_cursor_last(
259	struct xfs_ail		*ailp,
260	xfs_lsn_t		lsn)
261{
262	struct xfs_log_item	*lip;
263
264	list_for_each_entry_reverse(lip, &ailp->ail_head, li_ail) {
265		if (XFS_LSN_CMP(lip->li_lsn, lsn) <= 0)
266			return lip;
267	}
268	return NULL;
269}
270
271/*
272 * Find the last item in the AIL with the given @lsn by searching in descending
273 * LSN order and initialise the cursor to point to that item.  If there is no
274 * item with the value of @lsn, then it sets the cursor to the last item with an
275 * LSN lower than @lsn.  Returns NULL if the list is empty.
276 */
277struct xfs_log_item *
278xfs_trans_ail_cursor_last(
279	struct xfs_ail		*ailp,
280	struct xfs_ail_cursor	*cur,
281	xfs_lsn_t		lsn)
282{
283	xfs_trans_ail_cursor_init(ailp, cur);
284	cur->item = __xfs_trans_ail_cursor_last(ailp, lsn);
285	return cur->item;
286}
287
288/*
289 * Splice the log item list into the AIL at the given LSN. We splice to the
290 * tail of the given LSN to maintain insert order for push traversals. The
291 * cursor is optional, allowing repeated updates to the same LSN to avoid
292 * repeated traversals.  This should not be called with an empty list.
293 */
294static void
295xfs_ail_splice(
296	struct xfs_ail		*ailp,
297	struct xfs_ail_cursor	*cur,
298	struct list_head	*list,
299	xfs_lsn_t		lsn)
300{
301	struct xfs_log_item	*lip;
302
303	ASSERT(!list_empty(list));
304
305	/*
306	 * Use the cursor to determine the insertion point if one is
307	 * provided.  If not, or if the one we got is not valid,
308	 * find the place in the AIL where the items belong.
309	 */
310	lip = cur ? cur->item : NULL;
311	if (!lip || (uintptr_t)lip & 1)
312		lip = __xfs_trans_ail_cursor_last(ailp, lsn);
313
314	/*
315	 * If a cursor is provided, we know we're processing the AIL
316	 * in lsn order, and future items to be spliced in will
317	 * follow the last one being inserted now.  Update the
318	 * cursor to point to that last item, now while we have a
319	 * reliable pointer to it.
320	 */
321	if (cur)
322		cur->item = list_entry(list->prev, struct xfs_log_item, li_ail);
323
324	/*
325	 * Finally perform the splice.  Unless the AIL was empty,
326	 * lip points to the item in the AIL _after_ which the new
327	 * items should go.  If lip is null the AIL was empty, so
328	 * the new items go at the head of the AIL.
329	 */
330	if (lip)
331		list_splice(list, &lip->li_ail);
332	else
333		list_splice(list, &ailp->ail_head);
334}
335
336/*
337 * Delete the given item from the AIL.  Return a pointer to the item.
338 */
339static void
340xfs_ail_delete(
341	struct xfs_ail		*ailp,
342	struct xfs_log_item	*lip)
343{
344	xfs_ail_check(ailp, lip);
345	list_del(&lip->li_ail);
346	xfs_trans_ail_cursor_clear(ailp, lip);
347}
348
349/*
350 * Requeue a failed buffer for writeback.
351 *
352 * We clear the log item failed state here as well, but we have to be careful
353 * about reference counts because the only active reference counts on the buffer
354 * may be the failed log items. Hence if we clear the log item failed state
355 * before queuing the buffer for IO we can release all active references to
356 * the buffer and free it, leading to use after free problems in
357 * xfs_buf_delwri_queue. It makes no difference to the buffer or log items which
358 * order we process them in - the buffer is locked, and we own the buffer list
359 * so nothing on them is going to change while we are performing this action.
360 *
361 * Hence we can safely queue the buffer for IO before we clear the failed log
362 * item state, therefore  always having an active reference to the buffer and
363 * avoiding the transient zero-reference state that leads to use-after-free.
364 */
365static inline int
366xfsaild_resubmit_item(
367	struct xfs_log_item	*lip,
368	struct list_head	*buffer_list)
369{
370	struct xfs_buf		*bp = lip->li_buf;
371
372	if (!xfs_buf_trylock(bp))
373		return XFS_ITEM_LOCKED;
374
375	if (!xfs_buf_delwri_queue(bp, buffer_list)) {
376		xfs_buf_unlock(bp);
377		return XFS_ITEM_FLUSHING;
378	}
379
380	/* protected by ail_lock */
381	list_for_each_entry(lip, &bp->b_li_list, li_bio_list) {
382		if (bp->b_flags & _XBF_INODES)
383			clear_bit(XFS_LI_FAILED, &lip->li_flags);
384		else
385			xfs_clear_li_failed(lip);
386	}
387
388	xfs_buf_unlock(bp);
389	return XFS_ITEM_SUCCESS;
390}
391
392static inline uint
393xfsaild_push_item(
394	struct xfs_ail		*ailp,
395	struct xfs_log_item	*lip)
396{
397	/*
398	 * If log item pinning is enabled, skip the push and track the item as
399	 * pinned. This can help induce head-behind-tail conditions.
400	 */
401	if (XFS_TEST_ERROR(false, ailp->ail_mount, XFS_ERRTAG_LOG_ITEM_PIN))
402		return XFS_ITEM_PINNED;
403
404	/*
405	 * Consider the item pinned if a push callback is not defined so the
406	 * caller will force the log. This should only happen for intent items
407	 * as they are unpinned once the associated done item is committed to
408	 * the on-disk log.
409	 */
410	if (!lip->li_ops->iop_push)
411		return XFS_ITEM_PINNED;
412	if (test_bit(XFS_LI_FAILED, &lip->li_flags))
413		return xfsaild_resubmit_item(lip, &ailp->ail_buf_list);
414	return lip->li_ops->iop_push(lip, &ailp->ail_buf_list);
415}
416
417static long
418xfsaild_push(
419	struct xfs_ail		*ailp)
420{
421	xfs_mount_t		*mp = ailp->ail_mount;
422	struct xfs_ail_cursor	cur;
423	struct xfs_log_item	*lip;
424	xfs_lsn_t		lsn;
425	xfs_lsn_t		target;
426	long			tout;
427	int			stuck = 0;
428	int			flushing = 0;
429	int			count = 0;
430
431	/*
432	 * If we encountered pinned items or did not finish writing out all
433	 * buffers the last time we ran, force a background CIL push to get the
434	 * items unpinned in the near future. We do not wait on the CIL push as
435	 * that could stall us for seconds if there is enough background IO
436	 * load. Stalling for that long when the tail of the log is pinned and
437	 * needs flushing will hard stop the transaction subsystem when log
438	 * space runs out.
439	 */
440	if (ailp->ail_log_flush && ailp->ail_last_pushed_lsn == 0 &&
441	    (!list_empty_careful(&ailp->ail_buf_list) ||
442	     xfs_ail_min_lsn(ailp))) {
443		ailp->ail_log_flush = 0;
444
445		XFS_STATS_INC(mp, xs_push_ail_flush);
446		xlog_cil_flush(mp->m_log);
447	}
448
449	spin_lock(&ailp->ail_lock);
450
451	/* barrier matches the ail_target update in xfs_ail_push() */
452	smp_rmb();
453	target = ailp->ail_target;
454	ailp->ail_target_prev = target;
455
456	/* we're done if the AIL is empty or our push has reached the end */
457	lip = xfs_trans_ail_cursor_first(ailp, &cur, ailp->ail_last_pushed_lsn);
458	if (!lip)
459		goto out_done;
460
461	XFS_STATS_INC(mp, xs_push_ail);
462
463	lsn = lip->li_lsn;
464	while ((XFS_LSN_CMP(lip->li_lsn, target) <= 0)) {
465		int	lock_result;
466
467		/*
468		 * Note that iop_push may unlock and reacquire the AIL lock.  We
469		 * rely on the AIL cursor implementation to be able to deal with
470		 * the dropped lock.
471		 */
472		lock_result = xfsaild_push_item(ailp, lip);
473		switch (lock_result) {
474		case XFS_ITEM_SUCCESS:
475			XFS_STATS_INC(mp, xs_push_ail_success);
476			trace_xfs_ail_push(lip);
477
478			ailp->ail_last_pushed_lsn = lsn;
479			break;
480
481		case XFS_ITEM_FLUSHING:
482			/*
483			 * The item or its backing buffer is already being
484			 * flushed.  The typical reason for that is that an
485			 * inode buffer is locked because we already pushed the
486			 * updates to it as part of inode clustering.
487			 *
488			 * We do not want to stop flushing just because lots
489			 * of items are already being flushed, but we need to
490			 * re-try the flushing relatively soon if most of the
491			 * AIL is being flushed.
492			 */
493			XFS_STATS_INC(mp, xs_push_ail_flushing);
494			trace_xfs_ail_flushing(lip);
495
496			flushing++;
497			ailp->ail_last_pushed_lsn = lsn;
498			break;
499
500		case XFS_ITEM_PINNED:
501			XFS_STATS_INC(mp, xs_push_ail_pinned);
502			trace_xfs_ail_pinned(lip);
503
504			stuck++;
505			ailp->ail_log_flush++;
506			break;
507		case XFS_ITEM_LOCKED:
508			XFS_STATS_INC(mp, xs_push_ail_locked);
509			trace_xfs_ail_locked(lip);
510
511			stuck++;
512			break;
513		default:
514			ASSERT(0);
515			break;
516		}
517
518		count++;
519
520		/*
521		 * Are there too many items we can't do anything with?
522		 *
523		 * If we are skipping too many items because we can't flush
524		 * them or they are already being flushed, we back off and
525		 * given them time to complete whatever operation is being
526		 * done. i.e. remove pressure from the AIL while we can't make
527		 * progress so traversals don't slow down further inserts and
528		 * removals to/from the AIL.
529		 *
530		 * The value of 100 is an arbitrary magic number based on
531		 * observation.
532		 */
533		if (stuck > 100)
534			break;
535
536		lip = xfs_trans_ail_cursor_next(ailp, &cur);
537		if (lip == NULL)
538			break;
539		lsn = lip->li_lsn;
540	}
541
542out_done:
543	xfs_trans_ail_cursor_done(&cur);
544	spin_unlock(&ailp->ail_lock);
545
546	if (xfs_buf_delwri_submit_nowait(&ailp->ail_buf_list))
547		ailp->ail_log_flush++;
548
549	if (!count || XFS_LSN_CMP(lsn, target) >= 0) {
550		/*
551		 * We reached the target or the AIL is empty, so wait a bit
552		 * longer for I/O to complete and remove pushed items from the
553		 * AIL before we start the next scan from the start of the AIL.
554		 */
555		tout = 50;
556		ailp->ail_last_pushed_lsn = 0;
557	} else if (((stuck + flushing) * 100) / count > 90) {
558		/*
559		 * Either there is a lot of contention on the AIL or we are
560		 * stuck due to operations in progress. "Stuck" in this case
561		 * is defined as >90% of the items we tried to push were stuck.
562		 *
563		 * Backoff a bit more to allow some I/O to complete before
564		 * restarting from the start of the AIL. This prevents us from
565		 * spinning on the same items, and if they are pinned will all
566		 * the restart to issue a log force to unpin the stuck items.
567		 */
568		tout = 20;
569		ailp->ail_last_pushed_lsn = 0;
570	} else {
571		/*
572		 * Assume we have more work to do in a short while.
573		 */
574		tout = 10;
575	}
576
577	return tout;
578}
579
580static int
581xfsaild(
582	void		*data)
583{
584	struct xfs_ail	*ailp = data;
585	long		tout = 0;	/* milliseconds */
586	unsigned int	noreclaim_flag;
587
588	noreclaim_flag = memalloc_noreclaim_save();
589	set_freezable();
590
591	while (1) {
592		if (tout && tout <= 20)
593			set_current_state(TASK_KILLABLE);
594		else
595			set_current_state(TASK_INTERRUPTIBLE);
596
597		/*
598		 * Check kthread_should_stop() after we set the task state to
599		 * guarantee that we either see the stop bit and exit or the
600		 * task state is reset to runnable such that it's not scheduled
601		 * out indefinitely and detects the stop bit at next iteration.
602		 * A memory barrier is included in above task state set to
603		 * serialize again kthread_stop().
604		 */
605		if (kthread_should_stop()) {
606			__set_current_state(TASK_RUNNING);
607
608			/*
609			 * The caller forces out the AIL before stopping the
610			 * thread in the common case, which means the delwri
611			 * queue is drained. In the shutdown case, the queue may
612			 * still hold relogged buffers that haven't been
613			 * submitted because they were pinned since added to the
614			 * queue.
615			 *
616			 * Log I/O error processing stales the underlying buffer
617			 * and clears the delwri state, expecting the buf to be
618			 * removed on the next submission attempt. That won't
619			 * happen if we're shutting down, so this is the last
620			 * opportunity to release such buffers from the queue.
621			 */
622			ASSERT(list_empty(&ailp->ail_buf_list) ||
623			       XFS_FORCED_SHUTDOWN(ailp->ail_mount));
624			xfs_buf_delwri_cancel(&ailp->ail_buf_list);
625			break;
626		}
627
628		spin_lock(&ailp->ail_lock);
629
630		/*
631		 * Idle if the AIL is empty and we are not racing with a target
632		 * update. We check the AIL after we set the task to a sleep
633		 * state to guarantee that we either catch an ail_target update
634		 * or that a wake_up resets the state to TASK_RUNNING.
635		 * Otherwise, we run the risk of sleeping indefinitely.
636		 *
637		 * The barrier matches the ail_target update in xfs_ail_push().
638		 */
639		smp_rmb();
640		if (!xfs_ail_min(ailp) &&
641		    ailp->ail_target == ailp->ail_target_prev &&
642		    list_empty(&ailp->ail_buf_list)) {
643			spin_unlock(&ailp->ail_lock);
644			freezable_schedule();
645			tout = 0;
646			continue;
647		}
648		spin_unlock(&ailp->ail_lock);
649
650		if (tout)
651			freezable_schedule_timeout(msecs_to_jiffies(tout));
652
653		__set_current_state(TASK_RUNNING);
654
655		try_to_freeze();
656
657		tout = xfsaild_push(ailp);
658	}
659
660	memalloc_noreclaim_restore(noreclaim_flag);
661	return 0;
662}
663
664/*
665 * This routine is called to move the tail of the AIL forward.  It does this by
666 * trying to flush items in the AIL whose lsns are below the given
667 * threshold_lsn.
668 *
669 * The push is run asynchronously in a workqueue, which means the caller needs
670 * to handle waiting on the async flush for space to become available.
671 * We don't want to interrupt any push that is in progress, hence we only queue
672 * work if we set the pushing bit appropriately.
673 *
674 * We do this unlocked - we only need to know whether there is anything in the
675 * AIL at the time we are called. We don't need to access the contents of
676 * any of the objects, so the lock is not needed.
677 */
678void
679xfs_ail_push(
680	struct xfs_ail		*ailp,
681	xfs_lsn_t		threshold_lsn)
682{
683	struct xfs_log_item	*lip;
684
685	lip = xfs_ail_min(ailp);
686	if (!lip || XFS_FORCED_SHUTDOWN(ailp->ail_mount) ||
687	    XFS_LSN_CMP(threshold_lsn, ailp->ail_target) <= 0)
688		return;
689
690	/*
691	 * Ensure that the new target is noticed in push code before it clears
692	 * the XFS_AIL_PUSHING_BIT.
693	 */
694	smp_wmb();
695	xfs_trans_ail_copy_lsn(ailp, &ailp->ail_target, &threshold_lsn);
696	smp_wmb();
697
698	wake_up_process(ailp->ail_task);
699}
700
701/*
702 * Push out all items in the AIL immediately
703 */
704void
705xfs_ail_push_all(
706	struct xfs_ail  *ailp)
707{
708	xfs_lsn_t       threshold_lsn = xfs_ail_max_lsn(ailp);
709
710	if (threshold_lsn)
711		xfs_ail_push(ailp, threshold_lsn);
712}
713
714/*
715 * Push out all items in the AIL immediately and wait until the AIL is empty.
716 */
717void
718xfs_ail_push_all_sync(
719	struct xfs_ail  *ailp)
720{
721	struct xfs_log_item	*lip;
722	DEFINE_WAIT(wait);
723
724	spin_lock(&ailp->ail_lock);
725	while ((lip = xfs_ail_max(ailp)) != NULL) {
726		prepare_to_wait(&ailp->ail_empty, &wait, TASK_UNINTERRUPTIBLE);
727		ailp->ail_target = lip->li_lsn;
728		wake_up_process(ailp->ail_task);
729		spin_unlock(&ailp->ail_lock);
730		schedule();
731		spin_lock(&ailp->ail_lock);
732	}
733	spin_unlock(&ailp->ail_lock);
734
735	finish_wait(&ailp->ail_empty, &wait);
736}
737
738void
739xfs_ail_update_finish(
740	struct xfs_ail		*ailp,
741	xfs_lsn_t		old_lsn) __releases(ailp->ail_lock)
742{
743	struct xfs_mount	*mp = ailp->ail_mount;
744
745	/* if the tail lsn hasn't changed, don't do updates or wakeups. */
746	if (!old_lsn || old_lsn == __xfs_ail_min_lsn(ailp)) {
747		spin_unlock(&ailp->ail_lock);
748		return;
749	}
750
751	if (!XFS_FORCED_SHUTDOWN(mp))
752		xlog_assign_tail_lsn_locked(mp);
753
754	if (list_empty(&ailp->ail_head))
755		wake_up_all(&ailp->ail_empty);
756	spin_unlock(&ailp->ail_lock);
757	xfs_log_space_wake(mp);
758}
759
760/*
761 * xfs_trans_ail_update - bulk AIL insertion operation.
762 *
763 * @xfs_trans_ail_update takes an array of log items that all need to be
764 * positioned at the same LSN in the AIL. If an item is not in the AIL, it will
765 * be added.  Otherwise, it will be repositioned  by removing it and re-adding
766 * it to the AIL. If we move the first item in the AIL, update the log tail to
767 * match the new minimum LSN in the AIL.
768 *
769 * This function takes the AIL lock once to execute the update operations on
770 * all the items in the array, and as such should not be called with the AIL
771 * lock held. As a result, once we have the AIL lock, we need to check each log
772 * item LSN to confirm it needs to be moved forward in the AIL.
773 *
774 * To optimise the insert operation, we delete all the items from the AIL in
775 * the first pass, moving them into a temporary list, then splice the temporary
776 * list into the correct position in the AIL. This avoids needing to do an
777 * insert operation on every item.
778 *
779 * This function must be called with the AIL lock held.  The lock is dropped
780 * before returning.
781 */
782void
783xfs_trans_ail_update_bulk(
784	struct xfs_ail		*ailp,
785	struct xfs_ail_cursor	*cur,
786	struct xfs_log_item	**log_items,
787	int			nr_items,
788	xfs_lsn_t		lsn) __releases(ailp->ail_lock)
789{
790	struct xfs_log_item	*mlip;
791	xfs_lsn_t		tail_lsn = 0;
792	int			i;
793	LIST_HEAD(tmp);
794
795	ASSERT(nr_items > 0);		/* Not required, but true. */
796	mlip = xfs_ail_min(ailp);
797
798	for (i = 0; i < nr_items; i++) {
799		struct xfs_log_item *lip = log_items[i];
800		if (test_and_set_bit(XFS_LI_IN_AIL, &lip->li_flags)) {
801			/* check if we really need to move the item */
802			if (XFS_LSN_CMP(lsn, lip->li_lsn) <= 0)
803				continue;
804
805			trace_xfs_ail_move(lip, lip->li_lsn, lsn);
806			if (mlip == lip && !tail_lsn)
807				tail_lsn = lip->li_lsn;
808
809			xfs_ail_delete(ailp, lip);
810		} else {
811			trace_xfs_ail_insert(lip, 0, lsn);
812		}
813		lip->li_lsn = lsn;
814		list_add(&lip->li_ail, &tmp);
815	}
816
817	if (!list_empty(&tmp))
818		xfs_ail_splice(ailp, cur, &tmp, lsn);
819
820	xfs_ail_update_finish(ailp, tail_lsn);
821}
822
823/* Insert a log item into the AIL. */
824void
825xfs_trans_ail_insert(
826	struct xfs_ail		*ailp,
827	struct xfs_log_item	*lip,
828	xfs_lsn_t		lsn)
829{
830	spin_lock(&ailp->ail_lock);
831	xfs_trans_ail_update_bulk(ailp, NULL, &lip, 1, lsn);
832}
833
834/*
835 * Delete one log item from the AIL.
836 *
837 * If this item was at the tail of the AIL, return the LSN of the log item so
838 * that we can use it to check if the LSN of the tail of the log has moved
839 * when finishing up the AIL delete process in xfs_ail_update_finish().
840 */
841xfs_lsn_t
842xfs_ail_delete_one(
843	struct xfs_ail		*ailp,
844	struct xfs_log_item	*lip)
845{
846	struct xfs_log_item	*mlip = xfs_ail_min(ailp);
847	xfs_lsn_t		lsn = lip->li_lsn;
848
849	trace_xfs_ail_delete(lip, mlip->li_lsn, lip->li_lsn);
850	xfs_ail_delete(ailp, lip);
851	clear_bit(XFS_LI_IN_AIL, &lip->li_flags);
852	lip->li_lsn = 0;
853
854	if (mlip == lip)
855		return lsn;
856	return 0;
857}
858
859void
860xfs_trans_ail_delete(
861	struct xfs_log_item	*lip,
862	int			shutdown_type)
863{
864	struct xfs_ail		*ailp = lip->li_ailp;
865	struct xfs_mount	*mp = ailp->ail_mount;
866	xfs_lsn_t		tail_lsn;
867
868	spin_lock(&ailp->ail_lock);
869	if (!test_bit(XFS_LI_IN_AIL, &lip->li_flags)) {
870		spin_unlock(&ailp->ail_lock);
871		if (shutdown_type && !XFS_FORCED_SHUTDOWN(mp)) {
872			xfs_alert_tag(mp, XFS_PTAG_AILDELETE,
873	"%s: attempting to delete a log item that is not in the AIL",
874					__func__);
875			xfs_force_shutdown(mp, shutdown_type);
876		}
877		return;
878	}
879
880	/* xfs_ail_update_finish() drops the AIL lock */
881	xfs_clear_li_failed(lip);
882	tail_lsn = xfs_ail_delete_one(ailp, lip);
883	xfs_ail_update_finish(ailp, tail_lsn);
884}
885
886int
887xfs_trans_ail_init(
888	xfs_mount_t	*mp)
889{
890	struct xfs_ail	*ailp;
891
892	ailp = kmem_zalloc(sizeof(struct xfs_ail), KM_MAYFAIL);
893	if (!ailp)
894		return -ENOMEM;
895
896	ailp->ail_mount = mp;
897	INIT_LIST_HEAD(&ailp->ail_head);
898	INIT_LIST_HEAD(&ailp->ail_cursors);
899	spin_lock_init(&ailp->ail_lock);
900	INIT_LIST_HEAD(&ailp->ail_buf_list);
901	init_waitqueue_head(&ailp->ail_empty);
902
903	ailp->ail_task = kthread_run(xfsaild, ailp, "xfsaild/%s",
904			ailp->ail_mount->m_super->s_id);
905	if (IS_ERR(ailp->ail_task))
906		goto out_free_ailp;
907
908	mp->m_ail = ailp;
909	return 0;
910
911out_free_ailp:
912	kmem_free(ailp);
913	return -ENOMEM;
914}
915
916void
917xfs_trans_ail_destroy(
918	xfs_mount_t	*mp)
919{
920	struct xfs_ail	*ailp = mp->m_ail;
921
922	kthread_stop(ailp->ail_task);
923	kmem_free(ailp);
924}
925