• Home
  • History
  • Annotate
  • Line#
  • Navigate
  • Raw
  • Download
  • only in /netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/fs/xfs/linux-2.6/
1/*
2 * Copyright (c) 2000-2005 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_bit.h"
22#include "xfs_log.h"
23#include "xfs_inum.h"
24#include "xfs_trans.h"
25#include "xfs_sb.h"
26#include "xfs_ag.h"
27#include "xfs_mount.h"
28#include "xfs_bmap_btree.h"
29#include "xfs_inode.h"
30#include "xfs_dinode.h"
31#include "xfs_error.h"
32#include "xfs_filestream.h"
33#include "xfs_vnodeops.h"
34#include "xfs_inode_item.h"
35#include "xfs_quota.h"
36#include "xfs_trace.h"
37#include "xfs_fsops.h"
38
39#include <linux/kthread.h>
40#include <linux/freezer.h>
41
42
43STATIC xfs_inode_t *
44xfs_inode_ag_lookup(
45	struct xfs_mount	*mp,
46	struct xfs_perag	*pag,
47	uint32_t		*first_index,
48	int			tag)
49{
50	int			nr_found;
51	struct xfs_inode	*ip;
52
53	/*
54	 * use a gang lookup to find the next inode in the tree
55	 * as the tree is sparse and a gang lookup walks to find
56	 * the number of objects requested.
57	 */
58	if (tag == XFS_ICI_NO_TAG) {
59		nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
60				(void **)&ip, *first_index, 1);
61	} else {
62		nr_found = radix_tree_gang_lookup_tag(&pag->pag_ici_root,
63				(void **)&ip, *first_index, 1, tag);
64	}
65	if (!nr_found)
66		return NULL;
67
68	/*
69	 * Update the index for the next lookup. Catch overflows
70	 * into the next AG range which can occur if we have inodes
71	 * in the last block of the AG and we are currently
72	 * pointing to the last inode.
73	 */
74	*first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
75	if (*first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
76		return NULL;
77	return ip;
78}
79
80STATIC int
81xfs_inode_ag_walk(
82	struct xfs_mount	*mp,
83	struct xfs_perag	*pag,
84	int			(*execute)(struct xfs_inode *ip,
85					   struct xfs_perag *pag, int flags),
86	int			flags,
87	int			tag,
88	int			exclusive,
89	int			*nr_to_scan)
90{
91	uint32_t		first_index;
92	int			last_error = 0;
93	int			skipped;
94
95restart:
96	skipped = 0;
97	first_index = 0;
98	do {
99		int		error = 0;
100		xfs_inode_t	*ip;
101
102		if (exclusive)
103			write_lock(&pag->pag_ici_lock);
104		else
105			read_lock(&pag->pag_ici_lock);
106		ip = xfs_inode_ag_lookup(mp, pag, &first_index, tag);
107		if (!ip) {
108			if (exclusive)
109				write_unlock(&pag->pag_ici_lock);
110			else
111				read_unlock(&pag->pag_ici_lock);
112			break;
113		}
114
115		/* execute releases pag->pag_ici_lock */
116		error = execute(ip, pag, flags);
117		if (error == EAGAIN) {
118			skipped++;
119			continue;
120		}
121		if (error)
122			last_error = error;
123
124		/* bail out if the filesystem is corrupted.  */
125		if (error == EFSCORRUPTED)
126			break;
127
128	} while ((*nr_to_scan)--);
129
130	if (skipped) {
131		delay(1);
132		goto restart;
133	}
134	return last_error;
135}
136
137/*
138 * Select the next per-ag structure to iterate during the walk. The reclaim
139 * walk is optimised only to walk AGs with reclaimable inodes in them.
140 */
141static struct xfs_perag *
142xfs_inode_ag_iter_next_pag(
143	struct xfs_mount	*mp,
144	xfs_agnumber_t		*first,
145	int			tag)
146{
147	struct xfs_perag	*pag = NULL;
148
149	if (tag == XFS_ICI_RECLAIM_TAG) {
150		int found;
151		int ref;
152
153		spin_lock(&mp->m_perag_lock);
154		found = radix_tree_gang_lookup_tag(&mp->m_perag_tree,
155				(void **)&pag, *first, 1, tag);
156		if (found <= 0) {
157			spin_unlock(&mp->m_perag_lock);
158			return NULL;
159		}
160		*first = pag->pag_agno + 1;
161		/* open coded pag reference increment */
162		ref = atomic_inc_return(&pag->pag_ref);
163		spin_unlock(&mp->m_perag_lock);
164		trace_xfs_perag_get_reclaim(mp, pag->pag_agno, ref, _RET_IP_);
165	} else {
166		pag = xfs_perag_get(mp, *first);
167		(*first)++;
168	}
169	return pag;
170}
171
172int
173xfs_inode_ag_iterator(
174	struct xfs_mount	*mp,
175	int			(*execute)(struct xfs_inode *ip,
176					   struct xfs_perag *pag, int flags),
177	int			flags,
178	int			tag,
179	int			exclusive,
180	int			*nr_to_scan)
181{
182	struct xfs_perag	*pag;
183	int			error = 0;
184	int			last_error = 0;
185	xfs_agnumber_t		ag;
186	int			nr;
187
188	nr = nr_to_scan ? *nr_to_scan : INT_MAX;
189	ag = 0;
190	while ((pag = xfs_inode_ag_iter_next_pag(mp, &ag, tag))) {
191		error = xfs_inode_ag_walk(mp, pag, execute, flags, tag,
192						exclusive, &nr);
193		xfs_perag_put(pag);
194		if (error) {
195			last_error = error;
196			if (error == EFSCORRUPTED)
197				break;
198		}
199		if (nr <= 0)
200			break;
201	}
202	if (nr_to_scan)
203		*nr_to_scan = nr;
204	return XFS_ERROR(last_error);
205}
206
207/* must be called with pag_ici_lock held and releases it */
208int
209xfs_sync_inode_valid(
210	struct xfs_inode	*ip,
211	struct xfs_perag	*pag)
212{
213	struct inode		*inode = VFS_I(ip);
214	int			error = EFSCORRUPTED;
215
216	/* nothing to sync during shutdown */
217	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
218		goto out_unlock;
219
220	/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
221	error = ENOENT;
222	if (xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
223		goto out_unlock;
224
225	/* If we can't grab the inode, it must on it's way to reclaim. */
226	if (!igrab(inode))
227		goto out_unlock;
228
229	if (is_bad_inode(inode)) {
230		IRELE(ip);
231		goto out_unlock;
232	}
233
234	/* inode is valid */
235	error = 0;
236out_unlock:
237	read_unlock(&pag->pag_ici_lock);
238	return error;
239}
240
241STATIC int
242xfs_sync_inode_data(
243	struct xfs_inode	*ip,
244	struct xfs_perag	*pag,
245	int			flags)
246{
247	struct inode		*inode = VFS_I(ip);
248	struct address_space *mapping = inode->i_mapping;
249	int			error = 0;
250
251	error = xfs_sync_inode_valid(ip, pag);
252	if (error)
253		return error;
254
255	if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
256		goto out_wait;
257
258	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) {
259		if (flags & SYNC_TRYLOCK)
260			goto out_wait;
261		xfs_ilock(ip, XFS_IOLOCK_SHARED);
262	}
263
264	error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ?
265				0 : XBF_ASYNC, FI_NONE);
266	xfs_iunlock(ip, XFS_IOLOCK_SHARED);
267
268 out_wait:
269	if (flags & SYNC_WAIT)
270		xfs_ioend_wait(ip);
271	IRELE(ip);
272	return error;
273}
274
275STATIC int
276xfs_sync_inode_attr(
277	struct xfs_inode	*ip,
278	struct xfs_perag	*pag,
279	int			flags)
280{
281	int			error = 0;
282
283	error = xfs_sync_inode_valid(ip, pag);
284	if (error)
285		return error;
286
287	xfs_ilock(ip, XFS_ILOCK_SHARED);
288	if (xfs_inode_clean(ip))
289		goto out_unlock;
290	if (!xfs_iflock_nowait(ip)) {
291		if (!(flags & SYNC_WAIT))
292			goto out_unlock;
293		xfs_iflock(ip);
294	}
295
296	if (xfs_inode_clean(ip)) {
297		xfs_ifunlock(ip);
298		goto out_unlock;
299	}
300
301	error = xfs_iflush(ip, flags);
302
303 out_unlock:
304	xfs_iunlock(ip, XFS_ILOCK_SHARED);
305	IRELE(ip);
306	return error;
307}
308
309/*
310 * Write out pagecache data for the whole filesystem.
311 */
312STATIC int
313xfs_sync_data(
314	struct xfs_mount	*mp,
315	int			flags)
316{
317	int			error;
318
319	ASSERT((flags & ~(SYNC_TRYLOCK|SYNC_WAIT)) == 0);
320
321	error = xfs_inode_ag_iterator(mp, xfs_sync_inode_data, flags,
322				      XFS_ICI_NO_TAG, 0, NULL);
323	if (error)
324		return XFS_ERROR(error);
325
326	xfs_log_force(mp, (flags & SYNC_WAIT) ? XFS_LOG_SYNC : 0);
327	return 0;
328}
329
330/*
331 * Write out inode metadata (attributes) for the whole filesystem.
332 */
333STATIC int
334xfs_sync_attr(
335	struct xfs_mount	*mp,
336	int			flags)
337{
338	ASSERT((flags & ~SYNC_WAIT) == 0);
339
340	return xfs_inode_ag_iterator(mp, xfs_sync_inode_attr, flags,
341				     XFS_ICI_NO_TAG, 0, NULL);
342}
343
344STATIC int
345xfs_sync_fsdata(
346	struct xfs_mount	*mp)
347{
348	struct xfs_buf		*bp;
349
350	/*
351	 * If the buffer is pinned then push on the log so we won't get stuck
352	 * waiting in the write for someone, maybe ourselves, to flush the log.
353	 *
354	 * Even though we just pushed the log above, we did not have the
355	 * superblock buffer locked at that point so it can become pinned in
356	 * between there and here.
357	 */
358	bp = xfs_getsb(mp, 0);
359	if (XFS_BUF_ISPINNED(bp))
360		xfs_log_force(mp, 0);
361
362	return xfs_bwrite(mp, bp);
363}
364
365/*
366 * When remounting a filesystem read-only or freezing the filesystem, we have
367 * two phases to execute. This first phase is syncing the data before we
368 * quiesce the filesystem, and the second is flushing all the inodes out after
369 * we've waited for all the transactions created by the first phase to
370 * complete. The second phase ensures that the inodes are written to their
371 * location on disk rather than just existing in transactions in the log. This
372 * means after a quiesce there is no log replay required to write the inodes to
373 * disk (this is the main difference between a sync and a quiesce).
374 */
375/*
376 * First stage of freeze - no writers will make progress now we are here,
377 * so we flush delwri and delalloc buffers here, then wait for all I/O to
378 * complete.  Data is frozen at that point. Metadata is not frozen,
379 * transactions can still occur here so don't bother flushing the buftarg
380 * because it'll just get dirty again.
381 */
382int
383xfs_quiesce_data(
384	struct xfs_mount	*mp)
385{
386	int			error, error2 = 0;
387
388	/* push non-blocking */
389	xfs_sync_data(mp, 0);
390	xfs_qm_sync(mp, SYNC_TRYLOCK);
391
392	/* push and block till complete */
393	xfs_sync_data(mp, SYNC_WAIT);
394	xfs_qm_sync(mp, SYNC_WAIT);
395
396	/* write superblock and hoover up shutdown errors */
397	error = xfs_sync_fsdata(mp);
398
399	/* make sure all delwri buffers are written out */
400	xfs_flush_buftarg(mp->m_ddev_targp, 1);
401
402	/* mark the log as covered if needed */
403	if (xfs_log_need_covered(mp))
404		error2 = xfs_fs_log_dummy(mp, SYNC_WAIT);
405
406	/* flush data-only devices */
407	if (mp->m_rtdev_targp)
408		XFS_bflush(mp->m_rtdev_targp);
409
410	return error ? error : error2;
411}
412
413STATIC void
414xfs_quiesce_fs(
415	struct xfs_mount	*mp)
416{
417	int	count = 0, pincount;
418
419	xfs_reclaim_inodes(mp, 0);
420	xfs_flush_buftarg(mp->m_ddev_targp, 0);
421
422	/*
423	 * This loop must run at least twice.  The first instance of the loop
424	 * will flush most meta data but that will generate more meta data
425	 * (typically directory updates).  Which then must be flushed and
426	 * logged before we can write the unmount record. We also so sync
427	 * reclaim of inodes to catch any that the above delwri flush skipped.
428	 */
429	do {
430		xfs_reclaim_inodes(mp, SYNC_WAIT);
431		xfs_sync_attr(mp, SYNC_WAIT);
432		pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1);
433		if (!pincount) {
434			delay(50);
435			count++;
436		}
437	} while (count < 2);
438}
439
440/*
441 * Second stage of a quiesce. The data is already synced, now we have to take
442 * care of the metadata. New transactions are already blocked, so we need to
443 * wait for any remaining transactions to drain out before proceding.
444 */
445void
446xfs_quiesce_attr(
447	struct xfs_mount	*mp)
448{
449	int	error = 0;
450
451	/* wait for all modifications to complete */
452	while (atomic_read(&mp->m_active_trans) > 0)
453		delay(100);
454
455	/* flush inodes and push all remaining buffers out to disk */
456	xfs_quiesce_fs(mp);
457
458	/*
459	 * Just warn here till VFS can correctly support
460	 * read-only remount without racing.
461	 */
462	WARN_ON(atomic_read(&mp->m_active_trans) != 0);
463
464	/* Push the superblock and write an unmount record */
465	error = xfs_log_sbcount(mp, 1);
466	if (error)
467		xfs_fs_cmn_err(CE_WARN, mp,
468				"xfs_attr_quiesce: failed to log sb changes. "
469				"Frozen image may not be consistent.");
470	xfs_log_unmount_write(mp);
471	xfs_unmountfs_writesb(mp);
472}
473
474/*
475 * Enqueue a work item to be picked up by the vfs xfssyncd thread.
476 * Doing this has two advantages:
477 * - It saves on stack space, which is tight in certain situations
478 * - It can be used (with care) as a mechanism to avoid deadlocks.
479 * Flushing while allocating in a full filesystem requires both.
480 */
481STATIC void
482xfs_syncd_queue_work(
483	struct xfs_mount *mp,
484	void		*data,
485	void		(*syncer)(struct xfs_mount *, void *),
486	struct completion *completion)
487{
488	struct xfs_sync_work *work;
489
490	work = kmem_alloc(sizeof(struct xfs_sync_work), KM_SLEEP);
491	INIT_LIST_HEAD(&work->w_list);
492	work->w_syncer = syncer;
493	work->w_data = data;
494	work->w_mount = mp;
495	work->w_completion = completion;
496	spin_lock(&mp->m_sync_lock);
497	list_add_tail(&work->w_list, &mp->m_sync_list);
498	spin_unlock(&mp->m_sync_lock);
499	wake_up_process(mp->m_sync_task);
500}
501
502/*
503 * Flush delayed allocate data, attempting to free up reserved space
504 * from existing allocations.  At this point a new allocation attempt
505 * has failed with ENOSPC and we are in the process of scratching our
506 * heads, looking about for more room...
507 */
508STATIC void
509xfs_flush_inodes_work(
510	struct xfs_mount *mp,
511	void		*arg)
512{
513	struct inode	*inode = arg;
514	xfs_sync_data(mp, SYNC_TRYLOCK);
515	xfs_sync_data(mp, SYNC_TRYLOCK | SYNC_WAIT);
516	iput(inode);
517}
518
519void
520xfs_flush_inodes(
521	xfs_inode_t	*ip)
522{
523	struct inode	*inode = VFS_I(ip);
524	DECLARE_COMPLETION_ONSTACK(completion);
525
526	igrab(inode);
527	xfs_syncd_queue_work(ip->i_mount, inode, xfs_flush_inodes_work, &completion);
528	wait_for_completion(&completion);
529	xfs_log_force(ip->i_mount, XFS_LOG_SYNC);
530}
531
532/*
533 * Every sync period we need to unpin all items, reclaim inodes and sync
534 * disk quotas.  We might need to cover the log to indicate that the
535 * filesystem is idle and not frozen.
536 */
537STATIC void
538xfs_sync_worker(
539	struct xfs_mount *mp,
540	void		*unused)
541{
542	int		error;
543
544	if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
545		xfs_log_force(mp, 0);
546		xfs_reclaim_inodes(mp, 0);
547		/* dgc: errors ignored here */
548		error = xfs_qm_sync(mp, SYNC_TRYLOCK);
549		if (mp->m_super->s_frozen == SB_UNFROZEN &&
550		    xfs_log_need_covered(mp))
551			error = xfs_fs_log_dummy(mp, 0);
552	}
553	mp->m_sync_seq++;
554	wake_up(&mp->m_wait_single_sync_task);
555}
556
557STATIC int
558xfssyncd(
559	void			*arg)
560{
561	struct xfs_mount	*mp = arg;
562	long			timeleft;
563	xfs_sync_work_t		*work, *n;
564	LIST_HEAD		(tmp);
565
566	set_freezable();
567	timeleft = xfs_syncd_centisecs * msecs_to_jiffies(10);
568	for (;;) {
569		if (list_empty(&mp->m_sync_list))
570			timeleft = schedule_timeout_interruptible(timeleft);
571		/* swsusp */
572		try_to_freeze();
573		if (kthread_should_stop() && list_empty(&mp->m_sync_list))
574			break;
575
576		spin_lock(&mp->m_sync_lock);
577		/*
578		 * We can get woken by laptop mode, to do a sync -
579		 * that's the (only!) case where the list would be
580		 * empty with time remaining.
581		 */
582		if (!timeleft || list_empty(&mp->m_sync_list)) {
583			if (!timeleft)
584				timeleft = xfs_syncd_centisecs *
585							msecs_to_jiffies(10);
586			INIT_LIST_HEAD(&mp->m_sync_work.w_list);
587			list_add_tail(&mp->m_sync_work.w_list,
588					&mp->m_sync_list);
589		}
590		list_splice_init(&mp->m_sync_list, &tmp);
591		spin_unlock(&mp->m_sync_lock);
592
593		list_for_each_entry_safe(work, n, &tmp, w_list) {
594			(*work->w_syncer)(mp, work->w_data);
595			list_del(&work->w_list);
596			if (work == &mp->m_sync_work)
597				continue;
598			if (work->w_completion)
599				complete(work->w_completion);
600			kmem_free(work);
601		}
602	}
603
604	return 0;
605}
606
607int
608xfs_syncd_init(
609	struct xfs_mount	*mp)
610{
611	mp->m_sync_work.w_syncer = xfs_sync_worker;
612	mp->m_sync_work.w_mount = mp;
613	mp->m_sync_work.w_completion = NULL;
614	mp->m_sync_task = kthread_run(xfssyncd, mp, "xfssyncd/%s", mp->m_fsname);
615	if (IS_ERR(mp->m_sync_task))
616		return -PTR_ERR(mp->m_sync_task);
617	return 0;
618}
619
620void
621xfs_syncd_stop(
622	struct xfs_mount	*mp)
623{
624	kthread_stop(mp->m_sync_task);
625}
626
627void
628__xfs_inode_set_reclaim_tag(
629	struct xfs_perag	*pag,
630	struct xfs_inode	*ip)
631{
632	radix_tree_tag_set(&pag->pag_ici_root,
633			   XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino),
634			   XFS_ICI_RECLAIM_TAG);
635
636	if (!pag->pag_ici_reclaimable) {
637		/* propagate the reclaim tag up into the perag radix tree */
638		spin_lock(&ip->i_mount->m_perag_lock);
639		radix_tree_tag_set(&ip->i_mount->m_perag_tree,
640				XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
641				XFS_ICI_RECLAIM_TAG);
642		spin_unlock(&ip->i_mount->m_perag_lock);
643		trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno,
644							-1, _RET_IP_);
645	}
646	pag->pag_ici_reclaimable++;
647}
648
649/*
650 * We set the inode flag atomically with the radix tree tag.
651 * Once we get tag lookups on the radix tree, this inode flag
652 * can go away.
653 */
654void
655xfs_inode_set_reclaim_tag(
656	xfs_inode_t	*ip)
657{
658	struct xfs_mount *mp = ip->i_mount;
659	struct xfs_perag *pag;
660
661	pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino));
662	write_lock(&pag->pag_ici_lock);
663	spin_lock(&ip->i_flags_lock);
664	__xfs_inode_set_reclaim_tag(pag, ip);
665	__xfs_iflags_set(ip, XFS_IRECLAIMABLE);
666	spin_unlock(&ip->i_flags_lock);
667	write_unlock(&pag->pag_ici_lock);
668	xfs_perag_put(pag);
669}
670
671STATIC void
672__xfs_inode_clear_reclaim(
673	xfs_perag_t	*pag,
674	xfs_inode_t	*ip)
675{
676	pag->pag_ici_reclaimable--;
677	if (!pag->pag_ici_reclaimable) {
678		/* clear the reclaim tag from the perag radix tree */
679		spin_lock(&ip->i_mount->m_perag_lock);
680		radix_tree_tag_clear(&ip->i_mount->m_perag_tree,
681				XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino),
682				XFS_ICI_RECLAIM_TAG);
683		spin_unlock(&ip->i_mount->m_perag_lock);
684		trace_xfs_perag_clear_reclaim(ip->i_mount, pag->pag_agno,
685							-1, _RET_IP_);
686	}
687}
688
689void
690__xfs_inode_clear_reclaim_tag(
691	xfs_mount_t	*mp,
692	xfs_perag_t	*pag,
693	xfs_inode_t	*ip)
694{
695	radix_tree_tag_clear(&pag->pag_ici_root,
696			XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG);
697	__xfs_inode_clear_reclaim(pag, ip);
698}
699
700/*
701 * Inodes in different states need to be treated differently, and the return
702 * value of xfs_iflush is not sufficient to get this right. The following table
703 * lists the inode states and the reclaim actions necessary for non-blocking
704 * reclaim:
705 *
706 *
707 *	inode state	     iflush ret		required action
708 *      ---------------      ----------         ---------------
709 *	bad			-		reclaim
710 *	shutdown		EIO		unpin and reclaim
711 *	clean, unpinned		0		reclaim
712 *	stale, unpinned		0		reclaim
713 *	clean, pinned(*)	0		requeue
714 *	stale, pinned		EAGAIN		requeue
715 *	dirty, delwri ok	0		requeue
716 *	dirty, delwri blocked	EAGAIN		requeue
717 *	dirty, sync flush	0		reclaim
718 *
719 * (*) dgc: I don't think the clean, pinned state is possible but it gets
720 * handled anyway given the order of checks implemented.
721 *
722 * As can be seen from the table, the return value of xfs_iflush() is not
723 * sufficient to correctly decide the reclaim action here. The checks in
724 * xfs_iflush() might look like duplicates, but they are not.
725 *
726 * Also, because we get the flush lock first, we know that any inode that has
727 * been flushed delwri has had the flush completed by the time we check that
728 * the inode is clean. The clean inode check needs to be done before flushing
729 * the inode delwri otherwise we would loop forever requeuing clean inodes as
730 * we cannot tell apart a successful delwri flush and a clean inode from the
731 * return value of xfs_iflush().
732 *
733 * Note that because the inode is flushed delayed write by background
734 * writeback, the flush lock may already be held here and waiting on it can
735 * result in very long latencies. Hence for sync reclaims, where we wait on the
736 * flush lock, the caller should push out delayed write inodes first before
737 * trying to reclaim them to minimise the amount of time spent waiting. For
738 * background relaim, we just requeue the inode for the next pass.
739 *
740 * Hence the order of actions after gaining the locks should be:
741 *	bad		=> reclaim
742 *	shutdown	=> unpin and reclaim
743 *	pinned, delwri	=> requeue
744 *	pinned, sync	=> unpin
745 *	stale		=> reclaim
746 *	clean		=> reclaim
747 *	dirty, delwri	=> flush and requeue
748 *	dirty, sync	=> flush, wait and reclaim
749 */
750STATIC int
751xfs_reclaim_inode(
752	struct xfs_inode	*ip,
753	struct xfs_perag	*pag,
754	int			sync_mode)
755{
756	int	error = 0;
757
758	/*
759	 * The radix tree lock here protects a thread in xfs_iget from racing
760	 * with us starting reclaim on the inode.  Once we have the
761	 * XFS_IRECLAIM flag set it will not touch us.
762	 */
763	spin_lock(&ip->i_flags_lock);
764	ASSERT_ALWAYS(__xfs_iflags_test(ip, XFS_IRECLAIMABLE));
765	if (__xfs_iflags_test(ip, XFS_IRECLAIM)) {
766		/* ignore as it is already under reclaim */
767		spin_unlock(&ip->i_flags_lock);
768		write_unlock(&pag->pag_ici_lock);
769		return 0;
770	}
771	__xfs_iflags_set(ip, XFS_IRECLAIM);
772	spin_unlock(&ip->i_flags_lock);
773	write_unlock(&pag->pag_ici_lock);
774
775	xfs_ilock(ip, XFS_ILOCK_EXCL);
776	if (!xfs_iflock_nowait(ip)) {
777		if (!(sync_mode & SYNC_WAIT))
778			goto out;
779		xfs_iflock(ip);
780	}
781
782	if (is_bad_inode(VFS_I(ip)))
783		goto reclaim;
784	if (XFS_FORCED_SHUTDOWN(ip->i_mount)) {
785		xfs_iunpin_wait(ip);
786		goto reclaim;
787	}
788	if (xfs_ipincount(ip)) {
789		if (!(sync_mode & SYNC_WAIT)) {
790			xfs_ifunlock(ip);
791			goto out;
792		}
793		xfs_iunpin_wait(ip);
794	}
795	if (xfs_iflags_test(ip, XFS_ISTALE))
796		goto reclaim;
797	if (xfs_inode_clean(ip))
798		goto reclaim;
799
800	/* Now we have an inode that needs flushing */
801	error = xfs_iflush(ip, sync_mode);
802	if (sync_mode & SYNC_WAIT) {
803		xfs_iflock(ip);
804		goto reclaim;
805	}
806
807	/*
808	 * When we have to flush an inode but don't have SYNC_WAIT set, we
809	 * flush the inode out using a delwri buffer and wait for the next
810	 * call into reclaim to find it in a clean state instead of waiting for
811	 * it now. We also don't return errors here - if the error is transient
812	 * then the next reclaim pass will flush the inode, and if the error
813	 * is permanent then the next sync reclaim will reclaim the inode and
814	 * pass on the error.
815	 */
816	if (error && error != EAGAIN && !XFS_FORCED_SHUTDOWN(ip->i_mount)) {
817		xfs_fs_cmn_err(CE_WARN, ip->i_mount,
818			"inode 0x%llx background reclaim flush failed with %d",
819			(long long)ip->i_ino, error);
820	}
821out:
822	xfs_iflags_clear(ip, XFS_IRECLAIM);
823	xfs_iunlock(ip, XFS_ILOCK_EXCL);
824	/*
825	 * We could return EAGAIN here to make reclaim rescan the inode tree in
826	 * a short while. However, this just burns CPU time scanning the tree
827	 * waiting for IO to complete and xfssyncd never goes back to the idle
828	 * state. Instead, return 0 to let the next scheduled background reclaim
829	 * attempt to reclaim the inode again.
830	 */
831	return 0;
832
833reclaim:
834	xfs_ifunlock(ip);
835	xfs_iunlock(ip, XFS_ILOCK_EXCL);
836
837	XFS_STATS_INC(xs_ig_reclaims);
838	/*
839	 * Remove the inode from the per-AG radix tree.
840	 *
841	 * Because radix_tree_delete won't complain even if the item was never
842	 * added to the tree assert that it's been there before to catch
843	 * problems with the inode life time early on.
844	 */
845	write_lock(&pag->pag_ici_lock);
846	if (!radix_tree_delete(&pag->pag_ici_root,
847				XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino)))
848		ASSERT(0);
849	__xfs_inode_clear_reclaim(pag, ip);
850	write_unlock(&pag->pag_ici_lock);
851
852	/*
853	 * Here we do an (almost) spurious inode lock in order to coordinate
854	 * with inode cache radix tree lookups.  This is because the lookup
855	 * can reference the inodes in the cache without taking references.
856	 *
857	 * We make that OK here by ensuring that we wait until the inode is
858	 * unlocked after the lookup before we go ahead and free it.  We get
859	 * both the ilock and the iolock because the code may need to drop the
860	 * ilock one but will still hold the iolock.
861	 */
862	xfs_ilock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
863	xfs_qm_dqdetach(ip);
864	xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
865
866	xfs_inode_free(ip);
867	return error;
868
869}
870
871int
872xfs_reclaim_inodes(
873	xfs_mount_t	*mp,
874	int		mode)
875{
876	return xfs_inode_ag_iterator(mp, xfs_reclaim_inode, mode,
877					XFS_ICI_RECLAIM_TAG, 1, NULL);
878}
879
880/*
881 * Shrinker infrastructure.
882 */
883static int
884xfs_reclaim_inode_shrink(
885	struct shrinker	*shrink,
886	int		nr_to_scan,
887	gfp_t		gfp_mask)
888{
889	struct xfs_mount *mp;
890	struct xfs_perag *pag;
891	xfs_agnumber_t	ag;
892	int		reclaimable;
893
894	mp = container_of(shrink, struct xfs_mount, m_inode_shrink);
895	if (nr_to_scan) {
896		if (!(gfp_mask & __GFP_FS))
897			return -1;
898
899		xfs_inode_ag_iterator(mp, xfs_reclaim_inode, 0,
900					XFS_ICI_RECLAIM_TAG, 1, &nr_to_scan);
901		/* if we don't exhaust the scan, don't bother coming back */
902		if (nr_to_scan > 0)
903			return -1;
904       }
905
906	reclaimable = 0;
907	ag = 0;
908	while ((pag = xfs_inode_ag_iter_next_pag(mp, &ag,
909					XFS_ICI_RECLAIM_TAG))) {
910		reclaimable += pag->pag_ici_reclaimable;
911		xfs_perag_put(pag);
912	}
913	return reclaimable;
914}
915
916void
917xfs_inode_shrinker_register(
918	struct xfs_mount	*mp)
919{
920	mp->m_inode_shrink.shrink = xfs_reclaim_inode_shrink;
921	mp->m_inode_shrink.seeks = DEFAULT_SEEKS;
922	register_shrinker(&mp->m_inode_shrink);
923}
924
925void
926xfs_inode_shrinker_unregister(
927	struct xfs_mount	*mp)
928{
929	unregister_shrinker(&mp->m_inode_shrink);
930}
931