1/*
2 * Copyright (c) 2000-2006 Silicon Graphics, Inc.
3 * All Rights Reserved.
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it would be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write the Free Software Foundation,
16 * Inc.,  51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
17 */
18#include "xfs.h"
19#include "xfs_fs.h"
20#include "xfs_types.h"
21#include "xfs_bit.h"
22#include "xfs_log.h"
23#include "xfs_inum.h"
24#include "xfs_imap.h"
25#include "xfs_trans.h"
26#include "xfs_trans_priv.h"
27#include "xfs_sb.h"
28#include "xfs_ag.h"
29#include "xfs_dir2.h"
30#include "xfs_dmapi.h"
31#include "xfs_mount.h"
32#include "xfs_bmap_btree.h"
33#include "xfs_alloc_btree.h"
34#include "xfs_ialloc_btree.h"
35#include "xfs_dir2_sf.h"
36#include "xfs_attr_sf.h"
37#include "xfs_dinode.h"
38#include "xfs_inode.h"
39#include "xfs_buf_item.h"
40#include "xfs_inode_item.h"
41#include "xfs_btree.h"
42#include "xfs_alloc.h"
43#include "xfs_ialloc.h"
44#include "xfs_bmap.h"
45#include "xfs_rw.h"
46#include "xfs_error.h"
47#include "xfs_utils.h"
48#include "xfs_dir2_trace.h"
49#include "xfs_quota.h"
50#include "xfs_acl.h"
51
52
53kmem_zone_t *xfs_ifork_zone;
54kmem_zone_t *xfs_inode_zone;
55kmem_zone_t *xfs_chashlist_zone;
56
57/*
58 * Used in xfs_itruncate().  This is the maximum number of extents
59 * freed from a file in a single transaction.
60 */
61#define	XFS_ITRUNC_MAX_EXTENTS	2
62
63STATIC int xfs_iflush_int(xfs_inode_t *, xfs_buf_t *);
64STATIC int xfs_iformat_local(xfs_inode_t *, xfs_dinode_t *, int, int);
65STATIC int xfs_iformat_extents(xfs_inode_t *, xfs_dinode_t *, int);
66STATIC int xfs_iformat_btree(xfs_inode_t *, xfs_dinode_t *, int);
67
68
69#ifdef DEBUG
70/*
71 * Make sure that the extents in the given memory buffer
72 * are valid.
73 */
74STATIC void
75xfs_validate_extents(
76	xfs_ifork_t		*ifp,
77	int			nrecs,
78	int			disk,
79	xfs_exntfmt_t		fmt)
80{
81	xfs_bmbt_rec_t		*ep;
82	xfs_bmbt_irec_t		irec;
83	xfs_bmbt_rec_t		rec;
84	int			i;
85
86	for (i = 0; i < nrecs; i++) {
87		ep = xfs_iext_get_ext(ifp, i);
88		rec.l0 = get_unaligned((__uint64_t*)&ep->l0);
89		rec.l1 = get_unaligned((__uint64_t*)&ep->l1);
90		if (disk)
91			xfs_bmbt_disk_get_all(&rec, &irec);
92		else
93			xfs_bmbt_get_all(&rec, &irec);
94		if (fmt == XFS_EXTFMT_NOSTATE)
95			ASSERT(irec.br_state == XFS_EXT_NORM);
96	}
97}
98#else /* DEBUG */
99#define xfs_validate_extents(ifp, nrecs, disk, fmt)
100#endif /* DEBUG */
101
102/*
103 * Check that none of the inode's in the buffer have a next
104 * unlinked field of 0.
105 */
106#if defined(DEBUG)
107void
108xfs_inobp_check(
109	xfs_mount_t	*mp,
110	xfs_buf_t	*bp)
111{
112	int		i;
113	int		j;
114	xfs_dinode_t	*dip;
115
116	j = mp->m_inode_cluster_size >> mp->m_sb.sb_inodelog;
117
118	for (i = 0; i < j; i++) {
119		dip = (xfs_dinode_t *)xfs_buf_offset(bp,
120					i * mp->m_sb.sb_inodesize);
121		if (!dip->di_next_unlinked)  {
122			xfs_fs_cmn_err(CE_ALERT, mp,
123				"Detected a bogus zero next_unlinked field in incore inode buffer 0x%p.  About to pop an ASSERT.",
124				bp);
125			ASSERT(dip->di_next_unlinked);
126		}
127	}
128}
129#endif
130
131/*
132 * This routine is called to map an inode number within a file
133 * system to the buffer containing the on-disk version of the
134 * inode.  It returns a pointer to the buffer containing the
135 * on-disk inode in the bpp parameter, and in the dip parameter
136 * it returns a pointer to the on-disk inode within that buffer.
137 *
138 * If a non-zero error is returned, then the contents of bpp and
139 * dipp are undefined.
140 *
141 * Use xfs_imap() to determine the size and location of the
142 * buffer to read from disk.
143 */
144STATIC int
145xfs_inotobp(
146	xfs_mount_t	*mp,
147	xfs_trans_t	*tp,
148	xfs_ino_t	ino,
149	xfs_dinode_t	**dipp,
150	xfs_buf_t	**bpp,
151	int		*offset)
152{
153	int		di_ok;
154	xfs_imap_t	imap;
155	xfs_buf_t	*bp;
156	int		error;
157	xfs_dinode_t	*dip;
158
159	/*
160	 * Call the space management code to find the location of the
161	 * inode on disk.
162	 */
163	imap.im_blkno = 0;
164	error = xfs_imap(mp, tp, ino, &imap, XFS_IMAP_LOOKUP);
165	if (error != 0) {
166		cmn_err(CE_WARN,
167	"xfs_inotobp: xfs_imap()  returned an "
168	"error %d on %s.  Returning error.", error, mp->m_fsname);
169		return error;
170	}
171
172	/*
173	 * If the inode number maps to a block outside the bounds of the
174	 * file system then return NULL rather than calling read_buf
175	 * and panicing when we get an error from the driver.
176	 */
177	if ((imap.im_blkno + imap.im_len) >
178	    XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
179		cmn_err(CE_WARN,
180	"xfs_inotobp: inode number (%llu + %d) maps to a block outside the bounds "
181	"of the file system %s.  Returning EINVAL.",
182			(unsigned long long)imap.im_blkno,
183			imap.im_len, mp->m_fsname);
184		return XFS_ERROR(EINVAL);
185	}
186
187	/*
188	 * Read in the buffer.  If tp is NULL, xfs_trans_read_buf() will
189	 * default to just a read_buf() call.
190	 */
191	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap.im_blkno,
192				   (int)imap.im_len, XFS_BUF_LOCK, &bp);
193
194	if (error) {
195		cmn_err(CE_WARN,
196	"xfs_inotobp: xfs_trans_read_buf()  returned an "
197	"error %d on %s.  Returning error.", error, mp->m_fsname);
198		return error;
199	}
200	dip = (xfs_dinode_t *)xfs_buf_offset(bp, 0);
201	di_ok =
202		INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC &&
203		XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT));
204	if (unlikely(XFS_TEST_ERROR(!di_ok, mp, XFS_ERRTAG_ITOBP_INOTOBP,
205			XFS_RANDOM_ITOBP_INOTOBP))) {
206		XFS_CORRUPTION_ERROR("xfs_inotobp", XFS_ERRLEVEL_LOW, mp, dip);
207		xfs_trans_brelse(tp, bp);
208		cmn_err(CE_WARN,
209	"xfs_inotobp: XFS_TEST_ERROR()  returned an "
210	"error on %s.  Returning EFSCORRUPTED.",  mp->m_fsname);
211		return XFS_ERROR(EFSCORRUPTED);
212	}
213
214	xfs_inobp_check(mp, bp);
215
216	/*
217	 * Set *dipp to point to the on-disk inode in the buffer.
218	 */
219	*dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
220	*bpp = bp;
221	*offset = imap.im_boffset;
222	return 0;
223}
224
225
226/*
227 * This routine is called to map an inode to the buffer containing
228 * the on-disk version of the inode.  It returns a pointer to the
229 * buffer containing the on-disk inode in the bpp parameter, and in
230 * the dip parameter it returns a pointer to the on-disk inode within
231 * that buffer.
232 *
233 * If a non-zero error is returned, then the contents of bpp and
234 * dipp are undefined.
235 *
236 * If the inode is new and has not yet been initialized, use xfs_imap()
237 * to determine the size and location of the buffer to read from disk.
238 * If the inode has already been mapped to its buffer and read in once,
239 * then use the mapping information stored in the inode rather than
240 * calling xfs_imap().  This allows us to avoid the overhead of looking
241 * at the inode btree for small block file systems (see xfs_dilocate()).
242 * We can tell whether the inode has been mapped in before by comparing
243 * its disk block address to 0.  Only uninitialized inodes will have
244 * 0 for the disk block address.
245 */
246int
247xfs_itobp(
248	xfs_mount_t	*mp,
249	xfs_trans_t	*tp,
250	xfs_inode_t	*ip,
251	xfs_dinode_t	**dipp,
252	xfs_buf_t	**bpp,
253	xfs_daddr_t	bno,
254	uint		imap_flags)
255{
256	xfs_imap_t	imap;
257	xfs_buf_t	*bp;
258	int		error;
259	int		i;
260	int		ni;
261
262	if (ip->i_blkno == (xfs_daddr_t)0) {
263		/*
264		 * Call the space management code to find the location of the
265		 * inode on disk.
266		 */
267		imap.im_blkno = bno;
268		if ((error = xfs_imap(mp, tp, ip->i_ino, &imap,
269					XFS_IMAP_LOOKUP | imap_flags)))
270			return error;
271
272		/*
273		 * If the inode number maps to a block outside the bounds
274		 * of the file system then return NULL rather than calling
275		 * read_buf and panicing when we get an error from the
276		 * driver.
277		 */
278		if ((imap.im_blkno + imap.im_len) >
279		    XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks)) {
280#ifdef DEBUG
281			xfs_fs_cmn_err(CE_ALERT, mp, "xfs_itobp: "
282					"(imap.im_blkno (0x%llx) "
283					"+ imap.im_len (0x%llx)) > "
284					" XFS_FSB_TO_BB(mp, "
285					"mp->m_sb.sb_dblocks) (0x%llx)",
286					(unsigned long long) imap.im_blkno,
287					(unsigned long long) imap.im_len,
288					XFS_FSB_TO_BB(mp, mp->m_sb.sb_dblocks));
289#endif /* DEBUG */
290			return XFS_ERROR(EINVAL);
291		}
292
293		/*
294		 * Fill in the fields in the inode that will be used to
295		 * map the inode to its buffer from now on.
296		 */
297		ip->i_blkno = imap.im_blkno;
298		ip->i_len = imap.im_len;
299		ip->i_boffset = imap.im_boffset;
300	} else {
301		/*
302		 * We've already mapped the inode once, so just use the
303		 * mapping that we saved the first time.
304		 */
305		imap.im_blkno = ip->i_blkno;
306		imap.im_len = ip->i_len;
307		imap.im_boffset = ip->i_boffset;
308	}
309	ASSERT(bno == 0 || bno == imap.im_blkno);
310
311	/*
312	 * Read in the buffer.  If tp is NULL, xfs_trans_read_buf() will
313	 * default to just a read_buf() call.
314	 */
315	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, imap.im_blkno,
316				   (int)imap.im_len, XFS_BUF_LOCK, &bp);
317	if (error) {
318#ifdef DEBUG
319		xfs_fs_cmn_err(CE_ALERT, mp, "xfs_itobp: "
320				"xfs_trans_read_buf() returned error %d, "
321				"imap.im_blkno 0x%llx, imap.im_len 0x%llx",
322				error, (unsigned long long) imap.im_blkno,
323				(unsigned long long) imap.im_len);
324#endif /* DEBUG */
325		return error;
326	}
327
328	/*
329	 * Validate the magic number and version of every inode in the buffer
330	 * (if DEBUG kernel) or the first inode in the buffer, otherwise.
331	 * No validation is done here in userspace (xfs_repair).
332	 */
333#if !defined(__KERNEL__)
334	ni = 0;
335#elif defined(DEBUG)
336	ni = BBTOB(imap.im_len) >> mp->m_sb.sb_inodelog;
337#else	/* usual case */
338	ni = 1;
339#endif
340
341	for (i = 0; i < ni; i++) {
342		int		di_ok;
343		xfs_dinode_t	*dip;
344
345		dip = (xfs_dinode_t *)xfs_buf_offset(bp,
346					(i << mp->m_sb.sb_inodelog));
347		di_ok = INT_GET(dip->di_core.di_magic, ARCH_CONVERT) == XFS_DINODE_MAGIC &&
348			    XFS_DINODE_GOOD_VERSION(INT_GET(dip->di_core.di_version, ARCH_CONVERT));
349		if (unlikely(XFS_TEST_ERROR(!di_ok, mp,
350						XFS_ERRTAG_ITOBP_INOTOBP,
351						XFS_RANDOM_ITOBP_INOTOBP))) {
352			if (imap_flags & XFS_IMAP_BULKSTAT) {
353				xfs_trans_brelse(tp, bp);
354				return XFS_ERROR(EINVAL);
355			}
356#ifdef DEBUG
357			cmn_err(CE_ALERT,
358					"Device %s - bad inode magic/vsn "
359					"daddr %lld #%d (magic=%x)",
360				XFS_BUFTARG_NAME(mp->m_ddev_targp),
361				(unsigned long long)imap.im_blkno, i,
362				INT_GET(dip->di_core.di_magic, ARCH_CONVERT));
363#endif
364			XFS_CORRUPTION_ERROR("xfs_itobp", XFS_ERRLEVEL_HIGH,
365					     mp, dip);
366			xfs_trans_brelse(tp, bp);
367			return XFS_ERROR(EFSCORRUPTED);
368		}
369	}
370
371	xfs_inobp_check(mp, bp);
372
373	/*
374	 * Mark the buffer as an inode buffer now that it looks good
375	 */
376	XFS_BUF_SET_VTYPE(bp, B_FS_INO);
377
378	/*
379	 * Set *dipp to point to the on-disk inode in the buffer.
380	 */
381	*dipp = (xfs_dinode_t *)xfs_buf_offset(bp, imap.im_boffset);
382	*bpp = bp;
383	return 0;
384}
385
386/*
387 * Move inode type and inode format specific information from the
388 * on-disk inode to the in-core inode.  For fifos, devs, and sockets
389 * this means set if_rdev to the proper value.  For files, directories,
390 * and symlinks this means to bring in the in-line data or extent
391 * pointers.  For a file in B-tree format, only the root is immediately
392 * brought in-core.  The rest will be in-lined in if_extents when it
393 * is first referenced (see xfs_iread_extents()).
394 */
395STATIC int
396xfs_iformat(
397	xfs_inode_t		*ip,
398	xfs_dinode_t		*dip)
399{
400	xfs_attr_shortform_t	*atp;
401	int			size;
402	int			error;
403	xfs_fsize_t             di_size;
404	ip->i_df.if_ext_max =
405		XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
406	error = 0;
407
408	if (unlikely(
409	    INT_GET(dip->di_core.di_nextents, ARCH_CONVERT) +
410		INT_GET(dip->di_core.di_anextents, ARCH_CONVERT) >
411	    INT_GET(dip->di_core.di_nblocks, ARCH_CONVERT))) {
412		xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
413			"corrupt dinode %Lu, extent total = %d, nblocks = %Lu.",
414			(unsigned long long)ip->i_ino,
415			(int)(INT_GET(dip->di_core.di_nextents, ARCH_CONVERT)
416			    + INT_GET(dip->di_core.di_anextents, ARCH_CONVERT)),
417			(unsigned long long)
418			INT_GET(dip->di_core.di_nblocks, ARCH_CONVERT));
419		XFS_CORRUPTION_ERROR("xfs_iformat(1)", XFS_ERRLEVEL_LOW,
420				     ip->i_mount, dip);
421		return XFS_ERROR(EFSCORRUPTED);
422	}
423
424	if (unlikely(INT_GET(dip->di_core.di_forkoff, ARCH_CONVERT) > ip->i_mount->m_sb.sb_inodesize)) {
425		xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
426			"corrupt dinode %Lu, forkoff = 0x%x.",
427			(unsigned long long)ip->i_ino,
428			(int)(INT_GET(dip->di_core.di_forkoff, ARCH_CONVERT)));
429		XFS_CORRUPTION_ERROR("xfs_iformat(2)", XFS_ERRLEVEL_LOW,
430				     ip->i_mount, dip);
431		return XFS_ERROR(EFSCORRUPTED);
432	}
433
434	switch (ip->i_d.di_mode & S_IFMT) {
435	case S_IFIFO:
436	case S_IFCHR:
437	case S_IFBLK:
438	case S_IFSOCK:
439		if (unlikely(INT_GET(dip->di_core.di_format, ARCH_CONVERT) != XFS_DINODE_FMT_DEV)) {
440			XFS_CORRUPTION_ERROR("xfs_iformat(3)", XFS_ERRLEVEL_LOW,
441					      ip->i_mount, dip);
442			return XFS_ERROR(EFSCORRUPTED);
443		}
444		ip->i_d.di_size = 0;
445		ip->i_size = 0;
446		ip->i_df.if_u2.if_rdev = INT_GET(dip->di_u.di_dev, ARCH_CONVERT);
447		break;
448
449	case S_IFREG:
450	case S_IFLNK:
451	case S_IFDIR:
452		switch (INT_GET(dip->di_core.di_format, ARCH_CONVERT)) {
453		case XFS_DINODE_FMT_LOCAL:
454			/*
455			 * no local regular files yet
456			 */
457			if (unlikely((INT_GET(dip->di_core.di_mode, ARCH_CONVERT) & S_IFMT) == S_IFREG)) {
458				xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
459					"corrupt inode %Lu "
460					"(local format for regular file).",
461					(unsigned long long) ip->i_ino);
462				XFS_CORRUPTION_ERROR("xfs_iformat(4)",
463						     XFS_ERRLEVEL_LOW,
464						     ip->i_mount, dip);
465				return XFS_ERROR(EFSCORRUPTED);
466			}
467
468			di_size = INT_GET(dip->di_core.di_size, ARCH_CONVERT);
469			if (unlikely(di_size > XFS_DFORK_DSIZE(dip, ip->i_mount))) {
470				xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
471					"corrupt inode %Lu "
472					"(bad size %Ld for local inode).",
473					(unsigned long long) ip->i_ino,
474					(long long) di_size);
475				XFS_CORRUPTION_ERROR("xfs_iformat(5)",
476						     XFS_ERRLEVEL_LOW,
477						     ip->i_mount, dip);
478				return XFS_ERROR(EFSCORRUPTED);
479			}
480
481			size = (int)di_size;
482			error = xfs_iformat_local(ip, dip, XFS_DATA_FORK, size);
483			break;
484		case XFS_DINODE_FMT_EXTENTS:
485			error = xfs_iformat_extents(ip, dip, XFS_DATA_FORK);
486			break;
487		case XFS_DINODE_FMT_BTREE:
488			error = xfs_iformat_btree(ip, dip, XFS_DATA_FORK);
489			break;
490		default:
491			XFS_ERROR_REPORT("xfs_iformat(6)", XFS_ERRLEVEL_LOW,
492					 ip->i_mount);
493			return XFS_ERROR(EFSCORRUPTED);
494		}
495		break;
496
497	default:
498		XFS_ERROR_REPORT("xfs_iformat(7)", XFS_ERRLEVEL_LOW, ip->i_mount);
499		return XFS_ERROR(EFSCORRUPTED);
500	}
501	if (error) {
502		return error;
503	}
504	if (!XFS_DFORK_Q(dip))
505		return 0;
506	ASSERT(ip->i_afp == NULL);
507	ip->i_afp = kmem_zone_zalloc(xfs_ifork_zone, KM_SLEEP);
508	ip->i_afp->if_ext_max =
509		XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
510	switch (INT_GET(dip->di_core.di_aformat, ARCH_CONVERT)) {
511	case XFS_DINODE_FMT_LOCAL:
512		atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip);
513		size = be16_to_cpu(atp->hdr.totsize);
514		error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size);
515		break;
516	case XFS_DINODE_FMT_EXTENTS:
517		error = xfs_iformat_extents(ip, dip, XFS_ATTR_FORK);
518		break;
519	case XFS_DINODE_FMT_BTREE:
520		error = xfs_iformat_btree(ip, dip, XFS_ATTR_FORK);
521		break;
522	default:
523		error = XFS_ERROR(EFSCORRUPTED);
524		break;
525	}
526	if (error) {
527		kmem_zone_free(xfs_ifork_zone, ip->i_afp);
528		ip->i_afp = NULL;
529		xfs_idestroy_fork(ip, XFS_DATA_FORK);
530	}
531	return error;
532}
533
534/*
535 * The file is in-lined in the on-disk inode.
536 * If it fits into if_inline_data, then copy
537 * it there, otherwise allocate a buffer for it
538 * and copy the data there.  Either way, set
539 * if_data to point at the data.
540 * If we allocate a buffer for the data, make
541 * sure that its size is a multiple of 4 and
542 * record the real size in i_real_bytes.
543 */
544STATIC int
545xfs_iformat_local(
546	xfs_inode_t	*ip,
547	xfs_dinode_t	*dip,
548	int		whichfork,
549	int		size)
550{
551	xfs_ifork_t	*ifp;
552	int		real_size;
553
554	/*
555	 * If the size is unreasonable, then something
556	 * is wrong and we just bail out rather than crash in
557	 * kmem_alloc() or memcpy() below.
558	 */
559	if (unlikely(size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
560		xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
561			"corrupt inode %Lu "
562			"(bad size %d for local fork, size = %d).",
563			(unsigned long long) ip->i_ino, size,
564			XFS_DFORK_SIZE(dip, ip->i_mount, whichfork));
565		XFS_CORRUPTION_ERROR("xfs_iformat_local", XFS_ERRLEVEL_LOW,
566				     ip->i_mount, dip);
567		return XFS_ERROR(EFSCORRUPTED);
568	}
569	ifp = XFS_IFORK_PTR(ip, whichfork);
570	real_size = 0;
571	if (size == 0)
572		ifp->if_u1.if_data = NULL;
573	else if (size <= sizeof(ifp->if_u2.if_inline_data))
574		ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
575	else {
576		real_size = roundup(size, 4);
577		ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
578	}
579	ifp->if_bytes = size;
580	ifp->if_real_bytes = real_size;
581	if (size)
582		memcpy(ifp->if_u1.if_data, XFS_DFORK_PTR(dip, whichfork), size);
583	ifp->if_flags &= ~XFS_IFEXTENTS;
584	ifp->if_flags |= XFS_IFINLINE;
585	return 0;
586}
587
588/*
589 * The file consists of a set of extents all
590 * of which fit into the on-disk inode.
591 * If there are few enough extents to fit into
592 * the if_inline_ext, then copy them there.
593 * Otherwise allocate a buffer for them and copy
594 * them into it.  Either way, set if_extents
595 * to point at the extents.
596 */
597STATIC int
598xfs_iformat_extents(
599	xfs_inode_t	*ip,
600	xfs_dinode_t	*dip,
601	int		whichfork)
602{
603	xfs_bmbt_rec_t	*ep, *dp;
604	xfs_ifork_t	*ifp;
605	int		nex;
606	int		size;
607	int		i;
608
609	ifp = XFS_IFORK_PTR(ip, whichfork);
610	nex = XFS_DFORK_NEXTENTS(dip, whichfork);
611	size = nex * (uint)sizeof(xfs_bmbt_rec_t);
612
613	/*
614	 * If the number of extents is unreasonable, then something
615	 * is wrong and we just bail out rather than crash in
616	 * kmem_alloc() or memcpy() below.
617	 */
618	if (unlikely(size < 0 || size > XFS_DFORK_SIZE(dip, ip->i_mount, whichfork))) {
619		xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
620			"corrupt inode %Lu ((a)extents = %d).",
621			(unsigned long long) ip->i_ino, nex);
622		XFS_CORRUPTION_ERROR("xfs_iformat_extents(1)", XFS_ERRLEVEL_LOW,
623				     ip->i_mount, dip);
624		return XFS_ERROR(EFSCORRUPTED);
625	}
626
627	ifp->if_real_bytes = 0;
628	if (nex == 0)
629		ifp->if_u1.if_extents = NULL;
630	else if (nex <= XFS_INLINE_EXTS)
631		ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
632	else
633		xfs_iext_add(ifp, 0, nex);
634
635	ifp->if_bytes = size;
636	if (size) {
637		dp = (xfs_bmbt_rec_t *) XFS_DFORK_PTR(dip, whichfork);
638		xfs_validate_extents(ifp, nex, 1, XFS_EXTFMT_INODE(ip));
639		for (i = 0; i < nex; i++, dp++) {
640			ep = xfs_iext_get_ext(ifp, i);
641			ep->l0 = INT_GET(get_unaligned((__uint64_t*)&dp->l0),
642								ARCH_CONVERT);
643			ep->l1 = INT_GET(get_unaligned((__uint64_t*)&dp->l1),
644								ARCH_CONVERT);
645		}
646		xfs_bmap_trace_exlist("xfs_iformat_extents", ip, nex,
647			whichfork);
648		if (whichfork != XFS_DATA_FORK ||
649			XFS_EXTFMT_INODE(ip) == XFS_EXTFMT_NOSTATE)
650				if (unlikely(xfs_check_nostate_extents(
651				    ifp, 0, nex))) {
652					XFS_ERROR_REPORT("xfs_iformat_extents(2)",
653							 XFS_ERRLEVEL_LOW,
654							 ip->i_mount);
655					return XFS_ERROR(EFSCORRUPTED);
656				}
657	}
658	ifp->if_flags |= XFS_IFEXTENTS;
659	return 0;
660}
661
662/*
663 * The file has too many extents to fit into
664 * the inode, so they are in B-tree format.
665 * Allocate a buffer for the root of the B-tree
666 * and copy the root into it.  The i_extents
667 * field will remain NULL until all of the
668 * extents are read in (when they are needed).
669 */
670STATIC int
671xfs_iformat_btree(
672	xfs_inode_t		*ip,
673	xfs_dinode_t		*dip,
674	int			whichfork)
675{
676	xfs_bmdr_block_t	*dfp;
677	xfs_ifork_t		*ifp;
678	/* REFERENCED */
679	int			nrecs;
680	int			size;
681
682	ifp = XFS_IFORK_PTR(ip, whichfork);
683	dfp = (xfs_bmdr_block_t *)XFS_DFORK_PTR(dip, whichfork);
684	size = XFS_BMAP_BROOT_SPACE(dfp);
685	nrecs = XFS_BMAP_BROOT_NUMRECS(dfp);
686
687	/*
688	 * blow out if -- fork has less extents than can fit in
689	 * fork (fork shouldn't be a btree format), root btree
690	 * block has more records than can fit into the fork,
691	 * or the number of extents is greater than the number of
692	 * blocks.
693	 */
694	if (unlikely(XFS_IFORK_NEXTENTS(ip, whichfork) <= ifp->if_ext_max
695	    || XFS_BMDR_SPACE_CALC(nrecs) >
696			XFS_DFORK_SIZE(dip, ip->i_mount, whichfork)
697	    || XFS_IFORK_NEXTENTS(ip, whichfork) > ip->i_d.di_nblocks)) {
698		xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
699			"corrupt inode %Lu (btree).",
700			(unsigned long long) ip->i_ino);
701		XFS_ERROR_REPORT("xfs_iformat_btree", XFS_ERRLEVEL_LOW,
702				 ip->i_mount);
703		return XFS_ERROR(EFSCORRUPTED);
704	}
705
706	ifp->if_broot_bytes = size;
707	ifp->if_broot = kmem_alloc(size, KM_SLEEP);
708	ASSERT(ifp->if_broot != NULL);
709	/*
710	 * Copy and convert from the on-disk structure
711	 * to the in-memory structure.
712	 */
713	xfs_bmdr_to_bmbt(dfp, XFS_DFORK_SIZE(dip, ip->i_mount, whichfork),
714		ifp->if_broot, size);
715	ifp->if_flags &= ~XFS_IFEXTENTS;
716	ifp->if_flags |= XFS_IFBROOT;
717
718	return 0;
719}
720
721/*
722 * xfs_xlate_dinode_core - translate an xfs_inode_core_t between ondisk
723 * and native format
724 *
725 * buf  = on-disk representation
726 * dip  = native representation
727 * dir  = direction - +ve -> disk to native
728 *                    -ve -> native to disk
729 */
730void
731xfs_xlate_dinode_core(
732	xfs_caddr_t		buf,
733	xfs_dinode_core_t	*dip,
734	int			dir)
735{
736	xfs_dinode_core_t	*buf_core = (xfs_dinode_core_t *)buf;
737	xfs_dinode_core_t	*mem_core = (xfs_dinode_core_t *)dip;
738	xfs_arch_t		arch = ARCH_CONVERT;
739
740	ASSERT(dir);
741
742	INT_XLATE(buf_core->di_magic, mem_core->di_magic, dir, arch);
743	INT_XLATE(buf_core->di_mode, mem_core->di_mode, dir, arch);
744	INT_XLATE(buf_core->di_version,	mem_core->di_version, dir, arch);
745	INT_XLATE(buf_core->di_format, mem_core->di_format, dir, arch);
746	INT_XLATE(buf_core->di_onlink, mem_core->di_onlink, dir, arch);
747	INT_XLATE(buf_core->di_uid, mem_core->di_uid, dir, arch);
748	INT_XLATE(buf_core->di_gid, mem_core->di_gid, dir, arch);
749	INT_XLATE(buf_core->di_nlink, mem_core->di_nlink, dir, arch);
750	INT_XLATE(buf_core->di_projid, mem_core->di_projid, dir, arch);
751
752	if (dir > 0) {
753		memcpy(mem_core->di_pad, buf_core->di_pad,
754			sizeof(buf_core->di_pad));
755	} else {
756		memcpy(buf_core->di_pad, mem_core->di_pad,
757			sizeof(buf_core->di_pad));
758	}
759
760	INT_XLATE(buf_core->di_flushiter, mem_core->di_flushiter, dir, arch);
761
762	INT_XLATE(buf_core->di_atime.t_sec, mem_core->di_atime.t_sec,
763			dir, arch);
764	INT_XLATE(buf_core->di_atime.t_nsec, mem_core->di_atime.t_nsec,
765			dir, arch);
766	INT_XLATE(buf_core->di_mtime.t_sec, mem_core->di_mtime.t_sec,
767			dir, arch);
768	INT_XLATE(buf_core->di_mtime.t_nsec, mem_core->di_mtime.t_nsec,
769			dir, arch);
770	INT_XLATE(buf_core->di_ctime.t_sec, mem_core->di_ctime.t_sec,
771			dir, arch);
772	INT_XLATE(buf_core->di_ctime.t_nsec, mem_core->di_ctime.t_nsec,
773			dir, arch);
774	INT_XLATE(buf_core->di_size, mem_core->di_size, dir, arch);
775	INT_XLATE(buf_core->di_nblocks, mem_core->di_nblocks, dir, arch);
776	INT_XLATE(buf_core->di_extsize, mem_core->di_extsize, dir, arch);
777	INT_XLATE(buf_core->di_nextents, mem_core->di_nextents, dir, arch);
778	INT_XLATE(buf_core->di_anextents, mem_core->di_anextents, dir, arch);
779	INT_XLATE(buf_core->di_forkoff, mem_core->di_forkoff, dir, arch);
780	INT_XLATE(buf_core->di_aformat, mem_core->di_aformat, dir, arch);
781	INT_XLATE(buf_core->di_dmevmask, mem_core->di_dmevmask, dir, arch);
782	INT_XLATE(buf_core->di_dmstate, mem_core->di_dmstate, dir, arch);
783	INT_XLATE(buf_core->di_flags, mem_core->di_flags, dir, arch);
784	INT_XLATE(buf_core->di_gen, mem_core->di_gen, dir, arch);
785}
786
787STATIC uint
788_xfs_dic2xflags(
789	__uint16_t		di_flags)
790{
791	uint			flags = 0;
792
793	if (di_flags & XFS_DIFLAG_ANY) {
794		if (di_flags & XFS_DIFLAG_REALTIME)
795			flags |= XFS_XFLAG_REALTIME;
796		if (di_flags & XFS_DIFLAG_PREALLOC)
797			flags |= XFS_XFLAG_PREALLOC;
798		if (di_flags & XFS_DIFLAG_IMMUTABLE)
799			flags |= XFS_XFLAG_IMMUTABLE;
800		if (di_flags & XFS_DIFLAG_APPEND)
801			flags |= XFS_XFLAG_APPEND;
802		if (di_flags & XFS_DIFLAG_SYNC)
803			flags |= XFS_XFLAG_SYNC;
804		if (di_flags & XFS_DIFLAG_NOATIME)
805			flags |= XFS_XFLAG_NOATIME;
806		if (di_flags & XFS_DIFLAG_NODUMP)
807			flags |= XFS_XFLAG_NODUMP;
808		if (di_flags & XFS_DIFLAG_RTINHERIT)
809			flags |= XFS_XFLAG_RTINHERIT;
810		if (di_flags & XFS_DIFLAG_PROJINHERIT)
811			flags |= XFS_XFLAG_PROJINHERIT;
812		if (di_flags & XFS_DIFLAG_NOSYMLINKS)
813			flags |= XFS_XFLAG_NOSYMLINKS;
814		if (di_flags & XFS_DIFLAG_EXTSIZE)
815			flags |= XFS_XFLAG_EXTSIZE;
816		if (di_flags & XFS_DIFLAG_EXTSZINHERIT)
817			flags |= XFS_XFLAG_EXTSZINHERIT;
818		if (di_flags & XFS_DIFLAG_NODEFRAG)
819			flags |= XFS_XFLAG_NODEFRAG;
820	}
821
822	return flags;
823}
824
825uint
826xfs_ip2xflags(
827	xfs_inode_t		*ip)
828{
829	xfs_dinode_core_t	*dic = &ip->i_d;
830
831	return _xfs_dic2xflags(dic->di_flags) |
832				(XFS_CFORK_Q(dic) ? XFS_XFLAG_HASATTR : 0);
833}
834
835uint
836xfs_dic2xflags(
837	xfs_dinode_core_t	*dic)
838{
839	return _xfs_dic2xflags(INT_GET(dic->di_flags, ARCH_CONVERT)) |
840				(XFS_CFORK_Q_DISK(dic) ? XFS_XFLAG_HASATTR : 0);
841}
842
843/*
844 * Given a mount structure and an inode number, return a pointer
845 * to a newly allocated in-core inode corresponding to the given
846 * inode number.
847 *
848 * Initialize the inode's attributes and extent pointers if it
849 * already has them (it will not if the inode has no links).
850 */
851int
852xfs_iread(
853	xfs_mount_t	*mp,
854	xfs_trans_t	*tp,
855	xfs_ino_t	ino,
856	xfs_inode_t	**ipp,
857	xfs_daddr_t	bno,
858	uint		imap_flags)
859{
860	xfs_buf_t	*bp;
861	xfs_dinode_t	*dip;
862	xfs_inode_t	*ip;
863	int		error;
864
865	ASSERT(xfs_inode_zone != NULL);
866
867	ip = kmem_zone_zalloc(xfs_inode_zone, KM_SLEEP);
868	ip->i_ino = ino;
869	ip->i_mount = mp;
870	spin_lock_init(&ip->i_flags_lock);
871
872	/*
873	 * Get pointer's to the on-disk inode and the buffer containing it.
874	 * If the inode number refers to a block outside the file system
875	 * then xfs_itobp() will return NULL.  In this case we should
876	 * return NULL as well.  Set i_blkno to 0 so that xfs_itobp() will
877	 * know that this is a new incore inode.
878	 */
879	error = xfs_itobp(mp, tp, ip, &dip, &bp, bno, imap_flags);
880	if (error) {
881		kmem_zone_free(xfs_inode_zone, ip);
882		return error;
883	}
884
885	/*
886	 * Initialize inode's trace buffers.
887	 * Do this before xfs_iformat in case it adds entries.
888	 */
889#ifdef XFS_BMAP_TRACE
890	ip->i_xtrace = ktrace_alloc(XFS_BMAP_KTRACE_SIZE, KM_SLEEP);
891#endif
892#ifdef XFS_BMBT_TRACE
893	ip->i_btrace = ktrace_alloc(XFS_BMBT_KTRACE_SIZE, KM_SLEEP);
894#endif
895#ifdef XFS_RW_TRACE
896	ip->i_rwtrace = ktrace_alloc(XFS_RW_KTRACE_SIZE, KM_SLEEP);
897#endif
898#ifdef XFS_ILOCK_TRACE
899	ip->i_lock_trace = ktrace_alloc(XFS_ILOCK_KTRACE_SIZE, KM_SLEEP);
900#endif
901#ifdef XFS_DIR2_TRACE
902	ip->i_dir_trace = ktrace_alloc(XFS_DIR2_KTRACE_SIZE, KM_SLEEP);
903#endif
904
905	/*
906	 * If we got something that isn't an inode it means someone
907	 * (nfs or dmi) has a stale handle.
908	 */
909	if (INT_GET(dip->di_core.di_magic, ARCH_CONVERT) != XFS_DINODE_MAGIC) {
910		kmem_zone_free(xfs_inode_zone, ip);
911		xfs_trans_brelse(tp, bp);
912#ifdef DEBUG
913		xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: "
914				"dip->di_core.di_magic (0x%x) != "
915				"XFS_DINODE_MAGIC (0x%x)",
916				INT_GET(dip->di_core.di_magic, ARCH_CONVERT),
917				XFS_DINODE_MAGIC);
918#endif /* DEBUG */
919		return XFS_ERROR(EINVAL);
920	}
921
922	/*
923	 * If the on-disk inode is already linked to a directory
924	 * entry, copy all of the inode into the in-core inode.
925	 * xfs_iformat() handles copying in the inode format
926	 * specific information.
927	 * Otherwise, just get the truly permanent information.
928	 */
929	if (dip->di_core.di_mode) {
930		xfs_xlate_dinode_core((xfs_caddr_t)&dip->di_core,
931		     &(ip->i_d), 1);
932		error = xfs_iformat(ip, dip);
933		if (error)  {
934			kmem_zone_free(xfs_inode_zone, ip);
935			xfs_trans_brelse(tp, bp);
936#ifdef DEBUG
937			xfs_fs_cmn_err(CE_ALERT, mp, "xfs_iread: "
938					"xfs_iformat() returned error %d",
939					error);
940#endif /* DEBUG */
941			return error;
942		}
943	} else {
944		ip->i_d.di_magic = INT_GET(dip->di_core.di_magic, ARCH_CONVERT);
945		ip->i_d.di_version = INT_GET(dip->di_core.di_version, ARCH_CONVERT);
946		ip->i_d.di_gen = INT_GET(dip->di_core.di_gen, ARCH_CONVERT);
947		ip->i_d.di_flushiter = INT_GET(dip->di_core.di_flushiter, ARCH_CONVERT);
948		/*
949		 * Make sure to pull in the mode here as well in
950		 * case the inode is released without being used.
951		 * This ensures that xfs_inactive() will see that
952		 * the inode is already free and not try to mess
953		 * with the uninitialized part of it.
954		 */
955		ip->i_d.di_mode = 0;
956		/*
957		 * Initialize the per-fork minima and maxima for a new
958		 * inode here.  xfs_iformat will do it for old inodes.
959		 */
960		ip->i_df.if_ext_max =
961			XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
962	}
963
964	INIT_LIST_HEAD(&ip->i_reclaim);
965
966	/*
967	 * The inode format changed when we moved the link count and
968	 * made it 32 bits long.  If this is an old format inode,
969	 * convert it in memory to look like a new one.  If it gets
970	 * flushed to disk we will convert back before flushing or
971	 * logging it.  We zero out the new projid field and the old link
972	 * count field.  We'll handle clearing the pad field (the remains
973	 * of the old uuid field) when we actually convert the inode to
974	 * the new format. We don't change the version number so that we
975	 * can distinguish this from a real new format inode.
976	 */
977	if (ip->i_d.di_version == XFS_DINODE_VERSION_1) {
978		ip->i_d.di_nlink = ip->i_d.di_onlink;
979		ip->i_d.di_onlink = 0;
980		ip->i_d.di_projid = 0;
981	}
982
983	ip->i_delayed_blks = 0;
984	ip->i_size = ip->i_d.di_size;
985
986	/*
987	 * Mark the buffer containing the inode as something to keep
988	 * around for a while.  This helps to keep recently accessed
989	 * meta-data in-core longer.
990	 */
991	 XFS_BUF_SET_REF(bp, XFS_INO_REF);
992
993	/*
994	 * Use xfs_trans_brelse() to release the buffer containing the
995	 * on-disk inode, because it was acquired with xfs_trans_read_buf()
996	 * in xfs_itobp() above.  If tp is NULL, this is just a normal
997	 * brelse().  If we're within a transaction, then xfs_trans_brelse()
998	 * will only release the buffer if it is not dirty within the
999	 * transaction.  It will be OK to release the buffer in this case,
1000	 * because inodes on disk are never destroyed and we will be
1001	 * locking the new in-core inode before putting it in the hash
1002	 * table where other processes can find it.  Thus we don't have
1003	 * to worry about the inode being changed just because we released
1004	 * the buffer.
1005	 */
1006	xfs_trans_brelse(tp, bp);
1007	*ipp = ip;
1008	return 0;
1009}
1010
1011/*
1012 * Read in extents from a btree-format inode.
1013 * Allocate and fill in if_extents.  Real work is done in xfs_bmap.c.
1014 */
1015int
1016xfs_iread_extents(
1017	xfs_trans_t	*tp,
1018	xfs_inode_t	*ip,
1019	int		whichfork)
1020{
1021	int		error;
1022	xfs_ifork_t	*ifp;
1023	xfs_extnum_t	nextents;
1024	size_t		size;
1025
1026	if (unlikely(XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)) {
1027		XFS_ERROR_REPORT("xfs_iread_extents", XFS_ERRLEVEL_LOW,
1028				 ip->i_mount);
1029		return XFS_ERROR(EFSCORRUPTED);
1030	}
1031	nextents = XFS_IFORK_NEXTENTS(ip, whichfork);
1032	size = nextents * sizeof(xfs_bmbt_rec_t);
1033	ifp = XFS_IFORK_PTR(ip, whichfork);
1034
1035	/*
1036	 * We know that the size is valid (it's checked in iformat_btree)
1037	 */
1038	ifp->if_lastex = NULLEXTNUM;
1039	ifp->if_bytes = ifp->if_real_bytes = 0;
1040	ifp->if_flags |= XFS_IFEXTENTS;
1041	xfs_iext_add(ifp, 0, nextents);
1042	error = xfs_bmap_read_extents(tp, ip, whichfork);
1043	if (error) {
1044		xfs_iext_destroy(ifp);
1045		ifp->if_flags &= ~XFS_IFEXTENTS;
1046		return error;
1047	}
1048	xfs_validate_extents(ifp, nextents, 0, XFS_EXTFMT_INODE(ip));
1049	return 0;
1050}
1051
1052/*
1053 * Allocate an inode on disk and return a copy of its in-core version.
1054 * The in-core inode is locked exclusively.  Set mode, nlink, and rdev
1055 * appropriately within the inode.  The uid and gid for the inode are
1056 * set according to the contents of the given cred structure.
1057 *
1058 * Use xfs_dialloc() to allocate the on-disk inode. If xfs_dialloc()
1059 * has a free inode available, call xfs_iget()
1060 * to obtain the in-core version of the allocated inode.  Finally,
1061 * fill in the inode and log its initial contents.  In this case,
1062 * ialloc_context would be set to NULL and call_again set to false.
1063 *
1064 * If xfs_dialloc() does not have an available inode,
1065 * it will replenish its supply by doing an allocation. Since we can
1066 * only do one allocation within a transaction without deadlocks, we
1067 * must commit the current transaction before returning the inode itself.
1068 * In this case, therefore, we will set call_again to true and return.
1069 * The caller should then commit the current transaction, start a new
1070 * transaction, and call xfs_ialloc() again to actually get the inode.
1071 *
1072 * To ensure that some other process does not grab the inode that
1073 * was allocated during the first call to xfs_ialloc(), this routine
1074 * also returns the [locked] bp pointing to the head of the freelist
1075 * as ialloc_context.  The caller should hold this buffer across
1076 * the commit and pass it back into this routine on the second call.
1077 */
1078int
1079xfs_ialloc(
1080	xfs_trans_t	*tp,
1081	xfs_inode_t	*pip,
1082	mode_t		mode,
1083	xfs_nlink_t	nlink,
1084	xfs_dev_t	rdev,
1085	cred_t		*cr,
1086	xfs_prid_t	prid,
1087	int		okalloc,
1088	xfs_buf_t	**ialloc_context,
1089	boolean_t	*call_again,
1090	xfs_inode_t	**ipp)
1091{
1092	xfs_ino_t	ino;
1093	xfs_inode_t	*ip;
1094	bhv_vnode_t	*vp;
1095	uint		flags;
1096	int		error;
1097
1098	/*
1099	 * Call the space management code to pick
1100	 * the on-disk inode to be allocated.
1101	 */
1102	error = xfs_dialloc(tp, pip->i_ino, mode, okalloc,
1103			    ialloc_context, call_again, &ino);
1104	if (error != 0) {
1105		return error;
1106	}
1107	if (*call_again || ino == NULLFSINO) {
1108		*ipp = NULL;
1109		return 0;
1110	}
1111	ASSERT(*ialloc_context == NULL);
1112
1113	/*
1114	 * Get the in-core inode with the lock held exclusively.
1115	 * This is because we're setting fields here we need
1116	 * to prevent others from looking at until we're done.
1117	 */
1118	error = xfs_trans_iget(tp->t_mountp, tp, ino,
1119				XFS_IGET_CREATE, XFS_ILOCK_EXCL, &ip);
1120	if (error != 0) {
1121		return error;
1122	}
1123	ASSERT(ip != NULL);
1124
1125	vp = XFS_ITOV(ip);
1126	ip->i_d.di_mode = (__uint16_t)mode;
1127	ip->i_d.di_onlink = 0;
1128	ip->i_d.di_nlink = nlink;
1129	ASSERT(ip->i_d.di_nlink == nlink);
1130	ip->i_d.di_uid = current_fsuid(cr);
1131	ip->i_d.di_gid = current_fsgid(cr);
1132	ip->i_d.di_projid = prid;
1133	memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
1134
1135	/*
1136	 * If the superblock version is up to where we support new format
1137	 * inodes and this is currently an old format inode, then change
1138	 * the inode version number now.  This way we only do the conversion
1139	 * here rather than here and in the flush/logging code.
1140	 */
1141	if (XFS_SB_VERSION_HASNLINK(&tp->t_mountp->m_sb) &&
1142	    ip->i_d.di_version == XFS_DINODE_VERSION_1) {
1143		ip->i_d.di_version = XFS_DINODE_VERSION_2;
1144		/*
1145		 * We've already zeroed the old link count, the projid field,
1146		 * and the pad field.
1147		 */
1148	}
1149
1150	/*
1151	 * Project ids won't be stored on disk if we are using a version 1 inode.
1152	 */
1153	if ( (prid != 0) && (ip->i_d.di_version == XFS_DINODE_VERSION_1))
1154		xfs_bump_ino_vers2(tp, ip);
1155
1156	if (XFS_INHERIT_GID(pip, vp->v_vfsp)) {
1157		ip->i_d.di_gid = pip->i_d.di_gid;
1158		if ((pip->i_d.di_mode & S_ISGID) && (mode & S_IFMT) == S_IFDIR) {
1159			ip->i_d.di_mode |= S_ISGID;
1160		}
1161	}
1162
1163	/*
1164	 * If the group ID of the new file does not match the effective group
1165	 * ID or one of the supplementary group IDs, the S_ISGID bit is cleared
1166	 * (and only if the irix_sgid_inherit compatibility variable is set).
1167	 */
1168	if ((irix_sgid_inherit) &&
1169	    (ip->i_d.di_mode & S_ISGID) &&
1170	    (!in_group_p((gid_t)ip->i_d.di_gid))) {
1171		ip->i_d.di_mode &= ~S_ISGID;
1172	}
1173
1174	ip->i_d.di_size = 0;
1175	ip->i_size = 0;
1176	ip->i_d.di_nextents = 0;
1177	ASSERT(ip->i_d.di_nblocks == 0);
1178	xfs_ichgtime(ip, XFS_ICHGTIME_CHG|XFS_ICHGTIME_ACC|XFS_ICHGTIME_MOD);
1179	/*
1180	 * di_gen will have been taken care of in xfs_iread.
1181	 */
1182	ip->i_d.di_extsize = 0;
1183	ip->i_d.di_dmevmask = 0;
1184	ip->i_d.di_dmstate = 0;
1185	ip->i_d.di_flags = 0;
1186	flags = XFS_ILOG_CORE;
1187	switch (mode & S_IFMT) {
1188	case S_IFIFO:
1189	case S_IFCHR:
1190	case S_IFBLK:
1191	case S_IFSOCK:
1192		ip->i_d.di_format = XFS_DINODE_FMT_DEV;
1193		ip->i_df.if_u2.if_rdev = rdev;
1194		ip->i_df.if_flags = 0;
1195		flags |= XFS_ILOG_DEV;
1196		break;
1197	case S_IFREG:
1198	case S_IFDIR:
1199		if (unlikely(pip->i_d.di_flags & XFS_DIFLAG_ANY)) {
1200			uint	di_flags = 0;
1201
1202			if ((mode & S_IFMT) == S_IFDIR) {
1203				if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT)
1204					di_flags |= XFS_DIFLAG_RTINHERIT;
1205				if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1206					di_flags |= XFS_DIFLAG_EXTSZINHERIT;
1207					ip->i_d.di_extsize = pip->i_d.di_extsize;
1208				}
1209			} else if ((mode & S_IFMT) == S_IFREG) {
1210				if (pip->i_d.di_flags & XFS_DIFLAG_RTINHERIT) {
1211					di_flags |= XFS_DIFLAG_REALTIME;
1212					ip->i_iocore.io_flags |= XFS_IOCORE_RT;
1213				}
1214				if (pip->i_d.di_flags & XFS_DIFLAG_EXTSZINHERIT) {
1215					di_flags |= XFS_DIFLAG_EXTSIZE;
1216					ip->i_d.di_extsize = pip->i_d.di_extsize;
1217				}
1218			}
1219			if ((pip->i_d.di_flags & XFS_DIFLAG_NOATIME) &&
1220			    xfs_inherit_noatime)
1221				di_flags |= XFS_DIFLAG_NOATIME;
1222			if ((pip->i_d.di_flags & XFS_DIFLAG_NODUMP) &&
1223			    xfs_inherit_nodump)
1224				di_flags |= XFS_DIFLAG_NODUMP;
1225			if ((pip->i_d.di_flags & XFS_DIFLAG_SYNC) &&
1226			    xfs_inherit_sync)
1227				di_flags |= XFS_DIFLAG_SYNC;
1228			if ((pip->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) &&
1229			    xfs_inherit_nosymlinks)
1230				di_flags |= XFS_DIFLAG_NOSYMLINKS;
1231			if (pip->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
1232				di_flags |= XFS_DIFLAG_PROJINHERIT;
1233			if ((pip->i_d.di_flags & XFS_DIFLAG_NODEFRAG) &&
1234			    xfs_inherit_nodefrag)
1235				di_flags |= XFS_DIFLAG_NODEFRAG;
1236			ip->i_d.di_flags |= di_flags;
1237		}
1238		/* FALLTHROUGH */
1239	case S_IFLNK:
1240		ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
1241		ip->i_df.if_flags = XFS_IFEXTENTS;
1242		ip->i_df.if_bytes = ip->i_df.if_real_bytes = 0;
1243		ip->i_df.if_u1.if_extents = NULL;
1244		break;
1245	default:
1246		ASSERT(0);
1247	}
1248	/*
1249	 * Attribute fork settings for new inode.
1250	 */
1251	ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
1252	ip->i_d.di_anextents = 0;
1253
1254	/*
1255	 * Log the new values stuffed into the inode.
1256	 */
1257	xfs_trans_log_inode(tp, ip, flags);
1258
1259	/* now that we have an i_mode we can setup inode ops and unlock */
1260	bhv_vfs_init_vnode(XFS_MTOVFS(tp->t_mountp), vp, XFS_ITOBHV(ip), 1);
1261
1262	*ipp = ip;
1263	return 0;
1264}
1265
1266/*
1267 * Check to make sure that there are no blocks allocated to the
1268 * file beyond the size of the file.  We don't check this for
1269 * files with fixed size extents or real time extents, but we
1270 * at least do it for regular files.
1271 */
1272#ifdef DEBUG
1273void
1274xfs_isize_check(
1275	xfs_mount_t	*mp,
1276	xfs_inode_t	*ip,
1277	xfs_fsize_t	isize)
1278{
1279	xfs_fileoff_t	map_first;
1280	int		nimaps;
1281	xfs_bmbt_irec_t	imaps[2];
1282
1283	if ((ip->i_d.di_mode & S_IFMT) != S_IFREG)
1284		return;
1285
1286	if (ip->i_d.di_flags & (XFS_DIFLAG_REALTIME | XFS_DIFLAG_EXTSIZE))
1287		return;
1288
1289	nimaps = 2;
1290	map_first = XFS_B_TO_FSB(mp, (xfs_ufsize_t)isize);
1291	/*
1292	 * The filesystem could be shutting down, so bmapi may return
1293	 * an error.
1294	 */
1295	if (xfs_bmapi(NULL, ip, map_first,
1296			 (XFS_B_TO_FSB(mp,
1297				       (xfs_ufsize_t)XFS_MAXIOFFSET(mp)) -
1298			  map_first),
1299			 XFS_BMAPI_ENTIRE, NULL, 0, imaps, &nimaps,
1300			 NULL, NULL))
1301	    return;
1302	ASSERT(nimaps == 1);
1303	ASSERT(imaps[0].br_startblock == HOLESTARTBLOCK);
1304}
1305#endif	/* DEBUG */
1306
1307/*
1308 * Calculate the last possible buffered byte in a file.  This must
1309 * include data that was buffered beyond the EOF by the write code.
1310 * This also needs to deal with overflowing the xfs_fsize_t type
1311 * which can happen for sizes near the limit.
1312 *
1313 * We also need to take into account any blocks beyond the EOF.  It
1314 * may be the case that they were buffered by a write which failed.
1315 * In that case the pages will still be in memory, but the inode size
1316 * will never have been updated.
1317 */
1318xfs_fsize_t
1319xfs_file_last_byte(
1320	xfs_inode_t	*ip)
1321{
1322	xfs_mount_t	*mp;
1323	xfs_fsize_t	last_byte;
1324	xfs_fileoff_t	last_block;
1325	xfs_fileoff_t	size_last_block;
1326	int		error;
1327
1328	ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE | MR_ACCESS));
1329
1330	mp = ip->i_mount;
1331	/*
1332	 * Only check for blocks beyond the EOF if the extents have
1333	 * been read in.  This eliminates the need for the inode lock,
1334	 * and it also saves us from looking when it really isn't
1335	 * necessary.
1336	 */
1337	if (ip->i_df.if_flags & XFS_IFEXTENTS) {
1338		error = xfs_bmap_last_offset(NULL, ip, &last_block,
1339			XFS_DATA_FORK);
1340		if (error) {
1341			last_block = 0;
1342		}
1343	} else {
1344		last_block = 0;
1345	}
1346	size_last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)ip->i_size);
1347	last_block = XFS_FILEOFF_MAX(last_block, size_last_block);
1348
1349	last_byte = XFS_FSB_TO_B(mp, last_block);
1350	if (last_byte < 0) {
1351		return XFS_MAXIOFFSET(mp);
1352	}
1353	last_byte += (1 << mp->m_writeio_log);
1354	if (last_byte < 0) {
1355		return XFS_MAXIOFFSET(mp);
1356	}
1357	return last_byte;
1358}
1359
1360#if defined(XFS_RW_TRACE)
1361STATIC void
1362xfs_itrunc_trace(
1363	int		tag,
1364	xfs_inode_t	*ip,
1365	int		flag,
1366	xfs_fsize_t	new_size,
1367	xfs_off_t	toss_start,
1368	xfs_off_t	toss_finish)
1369{
1370	if (ip->i_rwtrace == NULL) {
1371		return;
1372	}
1373
1374	ktrace_enter(ip->i_rwtrace,
1375		     (void*)((long)tag),
1376		     (void*)ip,
1377		     (void*)(unsigned long)((ip->i_d.di_size >> 32) & 0xffffffff),
1378		     (void*)(unsigned long)(ip->i_d.di_size & 0xffffffff),
1379		     (void*)((long)flag),
1380		     (void*)(unsigned long)((new_size >> 32) & 0xffffffff),
1381		     (void*)(unsigned long)(new_size & 0xffffffff),
1382		     (void*)(unsigned long)((toss_start >> 32) & 0xffffffff),
1383		     (void*)(unsigned long)(toss_start & 0xffffffff),
1384		     (void*)(unsigned long)((toss_finish >> 32) & 0xffffffff),
1385		     (void*)(unsigned long)(toss_finish & 0xffffffff),
1386		     (void*)(unsigned long)current_cpu(),
1387		     (void*)(unsigned long)current_pid(),
1388		     (void*)NULL,
1389		     (void*)NULL,
1390		     (void*)NULL);
1391}
1392#else
1393#define	xfs_itrunc_trace(tag, ip, flag, new_size, toss_start, toss_finish)
1394#endif
1395
1396/*
1397 * Start the truncation of the file to new_size.  The new size
1398 * must be smaller than the current size.  This routine will
1399 * clear the buffer and page caches of file data in the removed
1400 * range, and xfs_itruncate_finish() will remove the underlying
1401 * disk blocks.
1402 *
1403 * The inode must have its I/O lock locked EXCLUSIVELY, and it
1404 * must NOT have the inode lock held at all.  This is because we're
1405 * calling into the buffer/page cache code and we can't hold the
1406 * inode lock when we do so.
1407 *
1408 * We need to wait for any direct I/Os in flight to complete before we
1409 * proceed with the truncate. This is needed to prevent the extents
1410 * being read or written by the direct I/Os from being removed while the
1411 * I/O is in flight as there is no other method of synchronising
1412 * direct I/O with the truncate operation.  Also, because we hold
1413 * the IOLOCK in exclusive mode, we prevent new direct I/Os from being
1414 * started until the truncate completes and drops the lock. Essentially,
1415 * the vn_iowait() call forms an I/O barrier that provides strict ordering
1416 * between direct I/Os and the truncate operation.
1417 *
1418 * The flags parameter can have either the value XFS_ITRUNC_DEFINITE
1419 * or XFS_ITRUNC_MAYBE.  The XFS_ITRUNC_MAYBE value should be used
1420 * in the case that the caller is locking things out of order and
1421 * may not be able to call xfs_itruncate_finish() with the inode lock
1422 * held without dropping the I/O lock.  If the caller must drop the
1423 * I/O lock before calling xfs_itruncate_finish(), then xfs_itruncate_start()
1424 * must be called again with all the same restrictions as the initial
1425 * call.
1426 */
1427int
1428xfs_itruncate_start(
1429	xfs_inode_t	*ip,
1430	uint		flags,
1431	xfs_fsize_t	new_size)
1432{
1433	xfs_fsize_t	last_byte;
1434	xfs_off_t	toss_start;
1435	xfs_mount_t	*mp;
1436	bhv_vnode_t	*vp;
1437	int		error = 0;
1438
1439	ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0);
1440	ASSERT((new_size == 0) || (new_size <= ip->i_size));
1441	ASSERT((flags == XFS_ITRUNC_DEFINITE) ||
1442	       (flags == XFS_ITRUNC_MAYBE));
1443
1444	mp = ip->i_mount;
1445	vp = XFS_ITOV(ip);
1446
1447	vn_iowait(vp);  /* wait for the completion of any pending DIOs */
1448
1449	/*
1450	 * Call toss_pages or flushinval_pages to get rid of pages
1451	 * overlapping the region being removed.  We have to use
1452	 * the less efficient flushinval_pages in the case that the
1453	 * caller may not be able to finish the truncate without
1454	 * dropping the inode's I/O lock.  Make sure
1455	 * to catch any pages brought in by buffers overlapping
1456	 * the EOF by searching out beyond the isize by our
1457	 * block size. We round new_size up to a block boundary
1458	 * so that we don't toss things on the same block as
1459	 * new_size but before it.
1460	 *
1461	 * Before calling toss_page or flushinval_pages, make sure to
1462	 * call remapf() over the same region if the file is mapped.
1463	 * This frees up mapped file references to the pages in the
1464	 * given range and for the flushinval_pages case it ensures
1465	 * that we get the latest mapped changes flushed out.
1466	 */
1467	toss_start = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1468	toss_start = XFS_FSB_TO_B(mp, toss_start);
1469	if (toss_start < 0) {
1470		/*
1471		 * The place to start tossing is beyond our maximum
1472		 * file size, so there is no way that the data extended
1473		 * out there.
1474		 */
1475		return 0;
1476	}
1477	last_byte = xfs_file_last_byte(ip);
1478	xfs_itrunc_trace(XFS_ITRUNC_START, ip, flags, new_size, toss_start,
1479			 last_byte);
1480	if (last_byte > toss_start) {
1481		if (flags & XFS_ITRUNC_DEFINITE) {
1482			bhv_vop_toss_pages(vp, toss_start, -1, FI_REMAPF_LOCKED);
1483		} else {
1484			error = bhv_vop_flushinval_pages(vp, toss_start, -1, FI_REMAPF_LOCKED);
1485		}
1486	}
1487
1488#ifdef DEBUG
1489	if (new_size == 0) {
1490		ASSERT(VN_CACHED(vp) == 0);
1491	}
1492#endif
1493	return error;
1494}
1495
1496/*
1497 * Shrink the file to the given new_size.  The new
1498 * size must be smaller than the current size.
1499 * This will free up the underlying blocks
1500 * in the removed range after a call to xfs_itruncate_start()
1501 * or xfs_atruncate_start().
1502 *
1503 * The transaction passed to this routine must have made
1504 * a permanent log reservation of at least XFS_ITRUNCATE_LOG_RES.
1505 * This routine may commit the given transaction and
1506 * start new ones, so make sure everything involved in
1507 * the transaction is tidy before calling here.
1508 * Some transaction will be returned to the caller to be
1509 * committed.  The incoming transaction must already include
1510 * the inode, and both inode locks must be held exclusively.
1511 * The inode must also be "held" within the transaction.  On
1512 * return the inode will be "held" within the returned transaction.
1513 * This routine does NOT require any disk space to be reserved
1514 * for it within the transaction.
1515 *
1516 * The fork parameter must be either xfs_attr_fork or xfs_data_fork,
1517 * and it indicates the fork which is to be truncated.  For the
1518 * attribute fork we only support truncation to size 0.
1519 *
1520 * We use the sync parameter to indicate whether or not the first
1521 * transaction we perform might have to be synchronous.  For the attr fork,
1522 * it needs to be so if the unlink of the inode is not yet known to be
1523 * permanent in the log.  This keeps us from freeing and reusing the
1524 * blocks of the attribute fork before the unlink of the inode becomes
1525 * permanent.
1526 *
1527 * For the data fork, we normally have to run synchronously if we're
1528 * being called out of the inactive path or we're being called
1529 * out of the create path where we're truncating an existing file.
1530 * Either way, the truncate needs to be sync so blocks don't reappear
1531 * in the file with altered data in case of a crash.  wsync filesystems
1532 * can run the first case async because anything that shrinks the inode
1533 * has to run sync so by the time we're called here from inactive, the
1534 * inode size is permanently set to 0.
1535 *
1536 * Calls from the truncate path always need to be sync unless we're
1537 * in a wsync filesystem and the file has already been unlinked.
1538 *
1539 * The caller is responsible for correctly setting the sync parameter.
1540 * It gets too hard for us to guess here which path we're being called
1541 * out of just based on inode state.
1542 */
1543int
1544xfs_itruncate_finish(
1545	xfs_trans_t	**tp,
1546	xfs_inode_t	*ip,
1547	xfs_fsize_t	new_size,
1548	int		fork,
1549	int		sync)
1550{
1551	xfs_fsblock_t	first_block;
1552	xfs_fileoff_t	first_unmap_block;
1553	xfs_fileoff_t	last_block;
1554	xfs_filblks_t	unmap_len=0;
1555	xfs_mount_t	*mp;
1556	xfs_trans_t	*ntp;
1557	int		done;
1558	int		committed;
1559	xfs_bmap_free_t	free_list;
1560	int		error;
1561
1562	ASSERT(ismrlocked(&ip->i_iolock, MR_UPDATE) != 0);
1563	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE) != 0);
1564	ASSERT((new_size == 0) || (new_size <= ip->i_size));
1565	ASSERT(*tp != NULL);
1566	ASSERT((*tp)->t_flags & XFS_TRANS_PERM_LOG_RES);
1567	ASSERT(ip->i_transp == *tp);
1568	ASSERT(ip->i_itemp != NULL);
1569	ASSERT(ip->i_itemp->ili_flags & XFS_ILI_HOLD);
1570
1571
1572	ntp = *tp;
1573	mp = (ntp)->t_mountp;
1574	ASSERT(! XFS_NOT_DQATTACHED(mp, ip));
1575
1576	/*
1577	 * We only support truncating the entire attribute fork.
1578	 */
1579	if (fork == XFS_ATTR_FORK) {
1580		new_size = 0LL;
1581	}
1582	first_unmap_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)new_size);
1583	xfs_itrunc_trace(XFS_ITRUNC_FINISH1, ip, 0, new_size, 0, 0);
1584	/*
1585	 * The first thing we do is set the size to new_size permanently
1586	 * on disk.  This way we don't have to worry about anyone ever
1587	 * being able to look at the data being freed even in the face
1588	 * of a crash.  What we're getting around here is the case where
1589	 * we free a block, it is allocated to another file, it is written
1590	 * to, and then we crash.  If the new data gets written to the
1591	 * file but the log buffers containing the free and reallocation
1592	 * don't, then we'd end up with garbage in the blocks being freed.
1593	 * As long as we make the new_size permanent before actually
1594	 * freeing any blocks it doesn't matter if they get writtten to.
1595	 *
1596	 * The callers must signal into us whether or not the size
1597	 * setting here must be synchronous.  There are a few cases
1598	 * where it doesn't have to be synchronous.  Those cases
1599	 * occur if the file is unlinked and we know the unlink is
1600	 * permanent or if the blocks being truncated are guaranteed
1601	 * to be beyond the inode eof (regardless of the link count)
1602	 * and the eof value is permanent.  Both of these cases occur
1603	 * only on wsync-mounted filesystems.  In those cases, we're
1604	 * guaranteed that no user will ever see the data in the blocks
1605	 * that are being truncated so the truncate can run async.
1606	 * In the free beyond eof case, the file may wind up with
1607	 * more blocks allocated to it than it needs if we crash
1608	 * and that won't get fixed until the next time the file
1609	 * is re-opened and closed but that's ok as that shouldn't
1610	 * be too many blocks.
1611	 *
1612	 * However, we can't just make all wsync xactions run async
1613	 * because there's one call out of the create path that needs
1614	 * to run sync where it's truncating an existing file to size
1615	 * 0 whose size is > 0.
1616	 *
1617	 * It's probably possible to come up with a test in this
1618	 * routine that would correctly distinguish all the above
1619	 * cases from the values of the function parameters and the
1620	 * inode state but for sanity's sake, I've decided to let the
1621	 * layers above just tell us.  It's simpler to correctly figure
1622	 * out in the layer above exactly under what conditions we
1623	 * can run async and I think it's easier for others read and
1624	 * follow the logic in case something has to be changed.
1625	 * cscope is your friend -- rcc.
1626	 *
1627	 * The attribute fork is much simpler.
1628	 *
1629	 * For the attribute fork we allow the caller to tell us whether
1630	 * the unlink of the inode that led to this call is yet permanent
1631	 * in the on disk log.  If it is not and we will be freeing extents
1632	 * in this inode then we make the first transaction synchronous
1633	 * to make sure that the unlink is permanent by the time we free
1634	 * the blocks.
1635	 */
1636	if (fork == XFS_DATA_FORK) {
1637		if (ip->i_d.di_nextents > 0) {
1638			/*
1639			 * If we are not changing the file size then do
1640			 * not update the on-disk file size - we may be
1641			 * called from xfs_inactive_free_eofblocks().  If we
1642			 * update the on-disk file size and then the system
1643			 * crashes before the contents of the file are
1644			 * flushed to disk then the files may be full of
1645			 * holes (ie NULL files bug).
1646			 */
1647			if (ip->i_size != new_size) {
1648				ip->i_d.di_size = new_size;
1649				ip->i_size = new_size;
1650				xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1651			}
1652		}
1653	} else if (sync) {
1654		ASSERT(!(mp->m_flags & XFS_MOUNT_WSYNC));
1655		if (ip->i_d.di_anextents > 0)
1656			xfs_trans_set_sync(ntp);
1657	}
1658	ASSERT(fork == XFS_DATA_FORK ||
1659		(fork == XFS_ATTR_FORK &&
1660			((sync && !(mp->m_flags & XFS_MOUNT_WSYNC)) ||
1661			 (sync == 0 && (mp->m_flags & XFS_MOUNT_WSYNC)))));
1662
1663	/*
1664	 * Since it is possible for space to become allocated beyond
1665	 * the end of the file (in a crash where the space is allocated
1666	 * but the inode size is not yet updated), simply remove any
1667	 * blocks which show up between the new EOF and the maximum
1668	 * possible file size.  If the first block to be removed is
1669	 * beyond the maximum file size (ie it is the same as last_block),
1670	 * then there is nothing to do.
1671	 */
1672	last_block = XFS_B_TO_FSB(mp, (xfs_ufsize_t)XFS_MAXIOFFSET(mp));
1673	ASSERT(first_unmap_block <= last_block);
1674	done = 0;
1675	if (last_block == first_unmap_block) {
1676		done = 1;
1677	} else {
1678		unmap_len = last_block - first_unmap_block + 1;
1679	}
1680	while (!done) {
1681		/*
1682		 * Free up up to XFS_ITRUNC_MAX_EXTENTS.  xfs_bunmapi()
1683		 * will tell us whether it freed the entire range or
1684		 * not.  If this is a synchronous mount (wsync),
1685		 * then we can tell bunmapi to keep all the
1686		 * transactions asynchronous since the unlink
1687		 * transaction that made this inode inactive has
1688		 * already hit the disk.  There's no danger of
1689		 * the freed blocks being reused, there being a
1690		 * crash, and the reused blocks suddenly reappearing
1691		 * in this file with garbage in them once recovery
1692		 * runs.
1693		 */
1694		XFS_BMAP_INIT(&free_list, &first_block);
1695		error = XFS_BUNMAPI(mp, ntp, &ip->i_iocore,
1696				    first_unmap_block, unmap_len,
1697				    XFS_BMAPI_AFLAG(fork) |
1698				      (sync ? 0 : XFS_BMAPI_ASYNC),
1699				    XFS_ITRUNC_MAX_EXTENTS,
1700				    &first_block, &free_list,
1701				    NULL, &done);
1702		if (error) {
1703			/*
1704			 * If the bunmapi call encounters an error,
1705			 * return to the caller where the transaction
1706			 * can be properly aborted.  We just need to
1707			 * make sure we're not holding any resources
1708			 * that we were not when we came in.
1709			 */
1710			xfs_bmap_cancel(&free_list);
1711			return error;
1712		}
1713
1714		/*
1715		 * Duplicate the transaction that has the permanent
1716		 * reservation and commit the old transaction.
1717		 */
1718		error = xfs_bmap_finish(tp, &free_list, &committed);
1719		ntp = *tp;
1720		if (error) {
1721			/*
1722			 * If the bmap finish call encounters an error,
1723			 * return to the caller where the transaction
1724			 * can be properly aborted.  We just need to
1725			 * make sure we're not holding any resources
1726			 * that we were not when we came in.
1727			 *
1728			 * Aborting from this point might lose some
1729			 * blocks in the file system, but oh well.
1730			 */
1731			xfs_bmap_cancel(&free_list);
1732			if (committed) {
1733				/*
1734				 * If the passed in transaction committed
1735				 * in xfs_bmap_finish(), then we want to
1736				 * add the inode to this one before returning.
1737				 * This keeps things simple for the higher
1738				 * level code, because it always knows that
1739				 * the inode is locked and held in the
1740				 * transaction that returns to it whether
1741				 * errors occur or not.  We don't mark the
1742				 * inode dirty so that this transaction can
1743				 * be easily aborted if possible.
1744				 */
1745				xfs_trans_ijoin(ntp, ip,
1746					XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1747				xfs_trans_ihold(ntp, ip);
1748			}
1749			return error;
1750		}
1751
1752		if (committed) {
1753			/*
1754			 * The first xact was committed,
1755			 * so add the inode to the new one.
1756			 * Mark it dirty so it will be logged
1757			 * and moved forward in the log as
1758			 * part of every commit.
1759			 */
1760			xfs_trans_ijoin(ntp, ip,
1761					XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1762			xfs_trans_ihold(ntp, ip);
1763			xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1764		}
1765		ntp = xfs_trans_dup(ntp);
1766		(void) xfs_trans_commit(*tp, 0);
1767		*tp = ntp;
1768		error = xfs_trans_reserve(ntp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
1769					  XFS_TRANS_PERM_LOG_RES,
1770					  XFS_ITRUNCATE_LOG_COUNT);
1771		/*
1772		 * Add the inode being truncated to the next chained
1773		 * transaction.
1774		 */
1775		xfs_trans_ijoin(ntp, ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
1776		xfs_trans_ihold(ntp, ip);
1777		if (error)
1778			return (error);
1779	}
1780	/*
1781	 * Only update the size in the case of the data fork, but
1782	 * always re-log the inode so that our permanent transaction
1783	 * can keep on rolling it forward in the log.
1784	 */
1785	if (fork == XFS_DATA_FORK) {
1786		xfs_isize_check(mp, ip, new_size);
1787		/*
1788		 * If we are not changing the file size then do
1789		 * not update the on-disk file size - we may be
1790		 * called from xfs_inactive_free_eofblocks().  If we
1791		 * update the on-disk file size and then the system
1792		 * crashes before the contents of the file are
1793		 * flushed to disk then the files may be full of
1794		 * holes (ie NULL files bug).
1795		 */
1796		if (ip->i_size != new_size) {
1797			ip->i_d.di_size = new_size;
1798			ip->i_size = new_size;
1799		}
1800	}
1801	xfs_trans_log_inode(ntp, ip, XFS_ILOG_CORE);
1802	ASSERT((new_size != 0) ||
1803	       (fork == XFS_ATTR_FORK) ||
1804	       (ip->i_delayed_blks == 0));
1805	ASSERT((new_size != 0) ||
1806	       (fork == XFS_ATTR_FORK) ||
1807	       (ip->i_d.di_nextents == 0));
1808	xfs_itrunc_trace(XFS_ITRUNC_FINISH2, ip, 0, new_size, 0, 0);
1809	return 0;
1810}
1811
1812
1813/*
1814 * xfs_igrow_start
1815 *
1816 * Do the first part of growing a file: zero any data in the last
1817 * block that is beyond the old EOF.  We need to do this before
1818 * the inode is joined to the transaction to modify the i_size.
1819 * That way we can drop the inode lock and call into the buffer
1820 * cache to get the buffer mapping the EOF.
1821 */
1822int
1823xfs_igrow_start(
1824	xfs_inode_t	*ip,
1825	xfs_fsize_t	new_size,
1826	cred_t		*credp)
1827{
1828	int		error;
1829
1830	ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0);
1831	ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0);
1832	ASSERT(new_size > ip->i_size);
1833
1834	/*
1835	 * Zero any pages that may have been created by
1836	 * xfs_write_file() beyond the end of the file
1837	 * and any blocks between the old and new file sizes.
1838	 */
1839	error = xfs_zero_eof(XFS_ITOV(ip), &ip->i_iocore, new_size,
1840			     ip->i_size);
1841	return error;
1842}
1843
1844/*
1845 * xfs_igrow_finish
1846 *
1847 * This routine is called to extend the size of a file.
1848 * The inode must have both the iolock and the ilock locked
1849 * for update and it must be a part of the current transaction.
1850 * The xfs_igrow_start() function must have been called previously.
1851 * If the change_flag is not zero, the inode change timestamp will
1852 * be updated.
1853 */
1854void
1855xfs_igrow_finish(
1856	xfs_trans_t	*tp,
1857	xfs_inode_t	*ip,
1858	xfs_fsize_t	new_size,
1859	int		change_flag)
1860{
1861	ASSERT(ismrlocked(&(ip->i_lock), MR_UPDATE) != 0);
1862	ASSERT(ismrlocked(&(ip->i_iolock), MR_UPDATE) != 0);
1863	ASSERT(ip->i_transp == tp);
1864	ASSERT(new_size > ip->i_size);
1865
1866	/*
1867	 * Update the file size.  Update the inode change timestamp
1868	 * if change_flag set.
1869	 */
1870	ip->i_d.di_size = new_size;
1871	ip->i_size = new_size;
1872	if (change_flag)
1873		xfs_ichgtime(ip, XFS_ICHGTIME_CHG);
1874	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
1875
1876}
1877
1878
1879/*
1880 * This is called when the inode's link count goes to 0.
1881 * We place the on-disk inode on a list in the AGI.  It
1882 * will be pulled from this list when the inode is freed.
1883 */
1884int
1885xfs_iunlink(
1886	xfs_trans_t	*tp,
1887	xfs_inode_t	*ip)
1888{
1889	xfs_mount_t	*mp;
1890	xfs_agi_t	*agi;
1891	xfs_dinode_t	*dip;
1892	xfs_buf_t	*agibp;
1893	xfs_buf_t	*ibp;
1894	xfs_agnumber_t	agno;
1895	xfs_daddr_t	agdaddr;
1896	xfs_agino_t	agino;
1897	short		bucket_index;
1898	int		offset;
1899	int		error;
1900	int		agi_ok;
1901
1902	ASSERT(ip->i_d.di_nlink == 0);
1903	ASSERT(ip->i_d.di_mode != 0);
1904	ASSERT(ip->i_transp == tp);
1905
1906	mp = tp->t_mountp;
1907
1908	agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
1909	agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp));
1910
1911	/*
1912	 * Get the agi buffer first.  It ensures lock ordering
1913	 * on the list.
1914	 */
1915	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr,
1916				   XFS_FSS_TO_BB(mp, 1), 0, &agibp);
1917	if (error) {
1918		return error;
1919	}
1920	/*
1921	 * Validate the magic number of the agi block.
1922	 */
1923	agi = XFS_BUF_TO_AGI(agibp);
1924	agi_ok =
1925		be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC &&
1926		XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum));
1927	if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK,
1928			XFS_RANDOM_IUNLINK))) {
1929		XFS_CORRUPTION_ERROR("xfs_iunlink", XFS_ERRLEVEL_LOW, mp, agi);
1930		xfs_trans_brelse(tp, agibp);
1931		return XFS_ERROR(EFSCORRUPTED);
1932	}
1933	/*
1934	 * Get the index into the agi hash table for the
1935	 * list this inode will go on.
1936	 */
1937	agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
1938	ASSERT(agino != 0);
1939	bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
1940	ASSERT(agi->agi_unlinked[bucket_index]);
1941	ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != agino);
1942
1943	if (be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO) {
1944		/*
1945		 * There is already another inode in the bucket we need
1946		 * to add ourselves to.  Add us at the front of the list.
1947		 * Here we put the head pointer into our next pointer,
1948		 * and then we fall through to point the head at us.
1949		 */
1950		error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0);
1951		if (error) {
1952			return error;
1953		}
1954		ASSERT(INT_GET(dip->di_next_unlinked, ARCH_CONVERT) == NULLAGINO);
1955		ASSERT(dip->di_next_unlinked);
1956		/* both on-disk, don't endian flip twice */
1957		dip->di_next_unlinked = agi->agi_unlinked[bucket_index];
1958		offset = ip->i_boffset +
1959			offsetof(xfs_dinode_t, di_next_unlinked);
1960		xfs_trans_inode_buf(tp, ibp);
1961		xfs_trans_log_buf(tp, ibp, offset,
1962				  (offset + sizeof(xfs_agino_t) - 1));
1963		xfs_inobp_check(mp, ibp);
1964	}
1965
1966	/*
1967	 * Point the bucket head pointer at the inode being inserted.
1968	 */
1969	ASSERT(agino != 0);
1970	agi->agi_unlinked[bucket_index] = cpu_to_be32(agino);
1971	offset = offsetof(xfs_agi_t, agi_unlinked) +
1972		(sizeof(xfs_agino_t) * bucket_index);
1973	xfs_trans_log_buf(tp, agibp, offset,
1974			  (offset + sizeof(xfs_agino_t) - 1));
1975	return 0;
1976}
1977
1978/*
1979 * Pull the on-disk inode from the AGI unlinked list.
1980 */
1981STATIC int
1982xfs_iunlink_remove(
1983	xfs_trans_t	*tp,
1984	xfs_inode_t	*ip)
1985{
1986	xfs_ino_t	next_ino;
1987	xfs_mount_t	*mp;
1988	xfs_agi_t	*agi;
1989	xfs_dinode_t	*dip;
1990	xfs_buf_t	*agibp;
1991	xfs_buf_t	*ibp;
1992	xfs_agnumber_t	agno;
1993	xfs_daddr_t	agdaddr;
1994	xfs_agino_t	agino;
1995	xfs_agino_t	next_agino;
1996	xfs_buf_t	*last_ibp;
1997	xfs_dinode_t	*last_dip = NULL;
1998	short		bucket_index;
1999	int		offset, last_offset = 0;
2000	int		error;
2001	int		agi_ok;
2002
2003	/*
2004	 * First pull the on-disk inode from the AGI unlinked list.
2005	 */
2006	mp = tp->t_mountp;
2007
2008	agno = XFS_INO_TO_AGNO(mp, ip->i_ino);
2009	agdaddr = XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp));
2010
2011	/*
2012	 * Get the agi buffer first.  It ensures lock ordering
2013	 * on the list.
2014	 */
2015	error = xfs_trans_read_buf(mp, tp, mp->m_ddev_targp, agdaddr,
2016				   XFS_FSS_TO_BB(mp, 1), 0, &agibp);
2017	if (error) {
2018		cmn_err(CE_WARN,
2019			"xfs_iunlink_remove: xfs_trans_read_buf()  returned an error %d on %s.  Returning error.",
2020			error, mp->m_fsname);
2021		return error;
2022	}
2023	/*
2024	 * Validate the magic number of the agi block.
2025	 */
2026	agi = XFS_BUF_TO_AGI(agibp);
2027	agi_ok =
2028		be32_to_cpu(agi->agi_magicnum) == XFS_AGI_MAGIC &&
2029		XFS_AGI_GOOD_VERSION(be32_to_cpu(agi->agi_versionnum));
2030	if (unlikely(XFS_TEST_ERROR(!agi_ok, mp, XFS_ERRTAG_IUNLINK_REMOVE,
2031			XFS_RANDOM_IUNLINK_REMOVE))) {
2032		XFS_CORRUPTION_ERROR("xfs_iunlink_remove", XFS_ERRLEVEL_LOW,
2033				     mp, agi);
2034		xfs_trans_brelse(tp, agibp);
2035		cmn_err(CE_WARN,
2036			"xfs_iunlink_remove: XFS_TEST_ERROR()  returned an error on %s.  Returning EFSCORRUPTED.",
2037			 mp->m_fsname);
2038		return XFS_ERROR(EFSCORRUPTED);
2039	}
2040	/*
2041	 * Get the index into the agi hash table for the
2042	 * list this inode will go on.
2043	 */
2044	agino = XFS_INO_TO_AGINO(mp, ip->i_ino);
2045	ASSERT(agino != 0);
2046	bucket_index = agino % XFS_AGI_UNLINKED_BUCKETS;
2047	ASSERT(be32_to_cpu(agi->agi_unlinked[bucket_index]) != NULLAGINO);
2048	ASSERT(agi->agi_unlinked[bucket_index]);
2049
2050	if (be32_to_cpu(agi->agi_unlinked[bucket_index]) == agino) {
2051		/*
2052		 * We're at the head of the list.  Get the inode's
2053		 * on-disk buffer to see if there is anyone after us
2054		 * on the list.  Only modify our next pointer if it
2055		 * is not already NULLAGINO.  This saves us the overhead
2056		 * of dealing with the buffer when there is no need to
2057		 * change it.
2058		 */
2059		error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0);
2060		if (error) {
2061			cmn_err(CE_WARN,
2062				"xfs_iunlink_remove: xfs_itobp()  returned an error %d on %s.  Returning error.",
2063				error, mp->m_fsname);
2064			return error;
2065		}
2066		next_agino = INT_GET(dip->di_next_unlinked, ARCH_CONVERT);
2067		ASSERT(next_agino != 0);
2068		if (next_agino != NULLAGINO) {
2069			INT_SET(dip->di_next_unlinked, ARCH_CONVERT, NULLAGINO);
2070			offset = ip->i_boffset +
2071				offsetof(xfs_dinode_t, di_next_unlinked);
2072			xfs_trans_inode_buf(tp, ibp);
2073			xfs_trans_log_buf(tp, ibp, offset,
2074					  (offset + sizeof(xfs_agino_t) - 1));
2075			xfs_inobp_check(mp, ibp);
2076		} else {
2077			xfs_trans_brelse(tp, ibp);
2078		}
2079		/*
2080		 * Point the bucket head pointer at the next inode.
2081		 */
2082		ASSERT(next_agino != 0);
2083		ASSERT(next_agino != agino);
2084		agi->agi_unlinked[bucket_index] = cpu_to_be32(next_agino);
2085		offset = offsetof(xfs_agi_t, agi_unlinked) +
2086			(sizeof(xfs_agino_t) * bucket_index);
2087		xfs_trans_log_buf(tp, agibp, offset,
2088				  (offset + sizeof(xfs_agino_t) - 1));
2089	} else {
2090		/*
2091		 * We need to search the list for the inode being freed.
2092		 */
2093		next_agino = be32_to_cpu(agi->agi_unlinked[bucket_index]);
2094		last_ibp = NULL;
2095		while (next_agino != agino) {
2096			/*
2097			 * If the last inode wasn't the one pointing to
2098			 * us, then release its buffer since we're not
2099			 * going to do anything with it.
2100			 */
2101			if (last_ibp != NULL) {
2102				xfs_trans_brelse(tp, last_ibp);
2103			}
2104			next_ino = XFS_AGINO_TO_INO(mp, agno, next_agino);
2105			error = xfs_inotobp(mp, tp, next_ino, &last_dip,
2106					    &last_ibp, &last_offset);
2107			if (error) {
2108				cmn_err(CE_WARN,
2109			"xfs_iunlink_remove: xfs_inotobp()  returned an error %d on %s.  Returning error.",
2110					error, mp->m_fsname);
2111				return error;
2112			}
2113			next_agino = INT_GET(last_dip->di_next_unlinked, ARCH_CONVERT);
2114			ASSERT(next_agino != NULLAGINO);
2115			ASSERT(next_agino != 0);
2116		}
2117		/*
2118		 * Now last_ibp points to the buffer previous to us on
2119		 * the unlinked list.  Pull us from the list.
2120		 */
2121		error = xfs_itobp(mp, tp, ip, &dip, &ibp, 0, 0);
2122		if (error) {
2123			cmn_err(CE_WARN,
2124				"xfs_iunlink_remove: xfs_itobp()  returned an error %d on %s.  Returning error.",
2125				error, mp->m_fsname);
2126			return error;
2127		}
2128		next_agino = INT_GET(dip->di_next_unlinked, ARCH_CONVERT);
2129		ASSERT(next_agino != 0);
2130		ASSERT(next_agino != agino);
2131		if (next_agino != NULLAGINO) {
2132			INT_SET(dip->di_next_unlinked, ARCH_CONVERT, NULLAGINO);
2133			offset = ip->i_boffset +
2134				offsetof(xfs_dinode_t, di_next_unlinked);
2135			xfs_trans_inode_buf(tp, ibp);
2136			xfs_trans_log_buf(tp, ibp, offset,
2137					  (offset + sizeof(xfs_agino_t) - 1));
2138			xfs_inobp_check(mp, ibp);
2139		} else {
2140			xfs_trans_brelse(tp, ibp);
2141		}
2142		/*
2143		 * Point the previous inode on the list to the next inode.
2144		 */
2145		INT_SET(last_dip->di_next_unlinked, ARCH_CONVERT, next_agino);
2146		ASSERT(next_agino != 0);
2147		offset = last_offset + offsetof(xfs_dinode_t, di_next_unlinked);
2148		xfs_trans_inode_buf(tp, last_ibp);
2149		xfs_trans_log_buf(tp, last_ibp, offset,
2150				  (offset + sizeof(xfs_agino_t) - 1));
2151		xfs_inobp_check(mp, last_ibp);
2152	}
2153	return 0;
2154}
2155
2156STATIC_INLINE int xfs_inode_clean(xfs_inode_t *ip)
2157{
2158	return (((ip->i_itemp == NULL) ||
2159		!(ip->i_itemp->ili_format.ilf_fields & XFS_ILOG_ALL)) &&
2160		(ip->i_update_core == 0));
2161}
2162
2163STATIC void
2164xfs_ifree_cluster(
2165	xfs_inode_t	*free_ip,
2166	xfs_trans_t	*tp,
2167	xfs_ino_t	inum)
2168{
2169	xfs_mount_t		*mp = free_ip->i_mount;
2170	int			blks_per_cluster;
2171	int			nbufs;
2172	int			ninodes;
2173	int			i, j, found, pre_flushed;
2174	xfs_daddr_t		blkno;
2175	xfs_buf_t		*bp;
2176	xfs_ihash_t		*ih;
2177	xfs_inode_t		*ip, **ip_found;
2178	xfs_inode_log_item_t	*iip;
2179	xfs_log_item_t		*lip;
2180	SPLDECL(s);
2181
2182	if (mp->m_sb.sb_blocksize >= XFS_INODE_CLUSTER_SIZE(mp)) {
2183		blks_per_cluster = 1;
2184		ninodes = mp->m_sb.sb_inopblock;
2185		nbufs = XFS_IALLOC_BLOCKS(mp);
2186	} else {
2187		blks_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) /
2188					mp->m_sb.sb_blocksize;
2189		ninodes = blks_per_cluster * mp->m_sb.sb_inopblock;
2190		nbufs = XFS_IALLOC_BLOCKS(mp) / blks_per_cluster;
2191	}
2192
2193	ip_found = kmem_alloc(ninodes * sizeof(xfs_inode_t *), KM_NOFS);
2194
2195	for (j = 0; j < nbufs; j++, inum += ninodes) {
2196		blkno = XFS_AGB_TO_DADDR(mp, XFS_INO_TO_AGNO(mp, inum),
2197					 XFS_INO_TO_AGBNO(mp, inum));
2198
2199
2200		/*
2201		 * Look for each inode in memory and attempt to lock it,
2202		 * we can be racing with flush and tail pushing here.
2203		 * any inode we get the locks on, add to an array of
2204		 * inode items to process later.
2205		 *
2206		 * The get the buffer lock, we could beat a flush
2207		 * or tail pushing thread to the lock here, in which
2208		 * case they will go looking for the inode buffer
2209		 * and fail, we need some other form of interlock
2210		 * here.
2211		 */
2212		found = 0;
2213		for (i = 0; i < ninodes; i++) {
2214			ih = XFS_IHASH(mp, inum + i);
2215			read_lock(&ih->ih_lock);
2216			for (ip = ih->ih_next; ip != NULL; ip = ip->i_next) {
2217				if (ip->i_ino == inum + i)
2218					break;
2219			}
2220
2221			/* Inode not in memory or we found it already,
2222			 * nothing to do
2223			 */
2224			if (!ip || xfs_iflags_test(ip, XFS_ISTALE)) {
2225				read_unlock(&ih->ih_lock);
2226				continue;
2227			}
2228
2229			if (xfs_inode_clean(ip)) {
2230				read_unlock(&ih->ih_lock);
2231				continue;
2232			}
2233
2234			/* If we can get the locks then add it to the
2235			 * list, otherwise by the time we get the bp lock
2236			 * below it will already be attached to the
2237			 * inode buffer.
2238			 */
2239
2240			/* This inode will already be locked - by us, lets
2241			 * keep it that way.
2242			 */
2243
2244			if (ip == free_ip) {
2245				if (xfs_iflock_nowait(ip)) {
2246					xfs_iflags_set(ip, XFS_ISTALE);
2247					if (xfs_inode_clean(ip)) {
2248						xfs_ifunlock(ip);
2249					} else {
2250						ip_found[found++] = ip;
2251					}
2252				}
2253				read_unlock(&ih->ih_lock);
2254				continue;
2255			}
2256
2257			if (xfs_ilock_nowait(ip, XFS_ILOCK_EXCL)) {
2258				if (xfs_iflock_nowait(ip)) {
2259					xfs_iflags_set(ip, XFS_ISTALE);
2260
2261					if (xfs_inode_clean(ip)) {
2262						xfs_ifunlock(ip);
2263						xfs_iunlock(ip, XFS_ILOCK_EXCL);
2264					} else {
2265						ip_found[found++] = ip;
2266					}
2267				} else {
2268					xfs_iunlock(ip, XFS_ILOCK_EXCL);
2269				}
2270			}
2271
2272			read_unlock(&ih->ih_lock);
2273		}
2274
2275		bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, blkno,
2276					mp->m_bsize * blks_per_cluster,
2277					XFS_BUF_LOCK);
2278
2279		pre_flushed = 0;
2280		lip = XFS_BUF_FSPRIVATE(bp, xfs_log_item_t *);
2281		while (lip) {
2282			if (lip->li_type == XFS_LI_INODE) {
2283				iip = (xfs_inode_log_item_t *)lip;
2284				ASSERT(iip->ili_logged == 1);
2285				lip->li_cb = (void(*)(xfs_buf_t*,xfs_log_item_t*)) xfs_istale_done;
2286				AIL_LOCK(mp,s);
2287				iip->ili_flush_lsn = iip->ili_item.li_lsn;
2288				AIL_UNLOCK(mp, s);
2289				xfs_iflags_set(iip->ili_inode, XFS_ISTALE);
2290				pre_flushed++;
2291			}
2292			lip = lip->li_bio_list;
2293		}
2294
2295		for (i = 0; i < found; i++) {
2296			ip = ip_found[i];
2297			iip = ip->i_itemp;
2298
2299			if (!iip) {
2300				ip->i_update_core = 0;
2301				xfs_ifunlock(ip);
2302				xfs_iunlock(ip, XFS_ILOCK_EXCL);
2303				continue;
2304			}
2305
2306			iip->ili_last_fields = iip->ili_format.ilf_fields;
2307			iip->ili_format.ilf_fields = 0;
2308			iip->ili_logged = 1;
2309			AIL_LOCK(mp,s);
2310			iip->ili_flush_lsn = iip->ili_item.li_lsn;
2311			AIL_UNLOCK(mp, s);
2312
2313			xfs_buf_attach_iodone(bp,
2314				(void(*)(xfs_buf_t*,xfs_log_item_t*))
2315				xfs_istale_done, (xfs_log_item_t *)iip);
2316			if (ip != free_ip) {
2317				xfs_iunlock(ip, XFS_ILOCK_EXCL);
2318			}
2319		}
2320
2321		if (found || pre_flushed)
2322			xfs_trans_stale_inode_buf(tp, bp);
2323		xfs_trans_binval(tp, bp);
2324	}
2325
2326	kmem_free(ip_found, ninodes * sizeof(xfs_inode_t *));
2327}
2328
2329/*
2330 * This is called to return an inode to the inode free list.
2331 * The inode should already be truncated to 0 length and have
2332 * no pages associated with it.  This routine also assumes that
2333 * the inode is already a part of the transaction.
2334 *
2335 * The on-disk copy of the inode will have been added to the list
2336 * of unlinked inodes in the AGI. We need to remove the inode from
2337 * that list atomically with respect to freeing it here.
2338 */
2339int
2340xfs_ifree(
2341	xfs_trans_t	*tp,
2342	xfs_inode_t	*ip,
2343	xfs_bmap_free_t	*flist)
2344{
2345	int			error;
2346	int			delete;
2347	xfs_ino_t		first_ino;
2348
2349	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
2350	ASSERT(ip->i_transp == tp);
2351	ASSERT(ip->i_d.di_nlink == 0);
2352	ASSERT(ip->i_d.di_nextents == 0);
2353	ASSERT(ip->i_d.di_anextents == 0);
2354	ASSERT((ip->i_d.di_size == 0 && ip->i_size == 0) ||
2355	       ((ip->i_d.di_mode & S_IFMT) != S_IFREG));
2356	ASSERT(ip->i_d.di_nblocks == 0);
2357
2358	/*
2359	 * Pull the on-disk inode from the AGI unlinked list.
2360	 */
2361	error = xfs_iunlink_remove(tp, ip);
2362	if (error != 0) {
2363		return error;
2364	}
2365
2366	error = xfs_difree(tp, ip->i_ino, flist, &delete, &first_ino);
2367	if (error != 0) {
2368		return error;
2369	}
2370	ip->i_d.di_mode = 0;		/* mark incore inode as free */
2371	ip->i_d.di_flags = 0;
2372	ip->i_d.di_dmevmask = 0;
2373	ip->i_d.di_forkoff = 0;		/* mark the attr fork not in use */
2374	ip->i_df.if_ext_max =
2375		XFS_IFORK_DSIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
2376	ip->i_d.di_format = XFS_DINODE_FMT_EXTENTS;
2377	ip->i_d.di_aformat = XFS_DINODE_FMT_EXTENTS;
2378	/*
2379	 * Bump the generation count so no one will be confused
2380	 * by reincarnations of this inode.
2381	 */
2382	ip->i_d.di_gen++;
2383	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
2384
2385	if (delete) {
2386		xfs_ifree_cluster(ip, tp, first_ino);
2387	}
2388
2389	return 0;
2390}
2391
2392/*
2393 * Reallocate the space for if_broot based on the number of records
2394 * being added or deleted as indicated in rec_diff.  Move the records
2395 * and pointers in if_broot to fit the new size.  When shrinking this
2396 * will eliminate holes between the records and pointers created by
2397 * the caller.  When growing this will create holes to be filled in
2398 * by the caller.
2399 *
2400 * The caller must not request to add more records than would fit in
2401 * the on-disk inode root.  If the if_broot is currently NULL, then
2402 * if we adding records one will be allocated.  The caller must also
2403 * not request that the number of records go below zero, although
2404 * it can go to zero.
2405 *
2406 * ip -- the inode whose if_broot area is changing
2407 * ext_diff -- the change in the number of records, positive or negative,
2408 *	 requested for the if_broot array.
2409 */
2410void
2411xfs_iroot_realloc(
2412	xfs_inode_t		*ip,
2413	int			rec_diff,
2414	int			whichfork)
2415{
2416	int			cur_max;
2417	xfs_ifork_t		*ifp;
2418	xfs_bmbt_block_t	*new_broot;
2419	int			new_max;
2420	size_t			new_size;
2421	char			*np;
2422	char			*op;
2423
2424	/*
2425	 * Handle the degenerate case quietly.
2426	 */
2427	if (rec_diff == 0) {
2428		return;
2429	}
2430
2431	ifp = XFS_IFORK_PTR(ip, whichfork);
2432	if (rec_diff > 0) {
2433		/*
2434		 * If there wasn't any memory allocated before, just
2435		 * allocate it now and get out.
2436		 */
2437		if (ifp->if_broot_bytes == 0) {
2438			new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(rec_diff);
2439			ifp->if_broot = (xfs_bmbt_block_t*)kmem_alloc(new_size,
2440								     KM_SLEEP);
2441			ifp->if_broot_bytes = (int)new_size;
2442			return;
2443		}
2444
2445		/*
2446		 * If there is already an existing if_broot, then we need
2447		 * to realloc() it and shift the pointers to their new
2448		 * location.  The records don't change location because
2449		 * they are kept butted up against the btree block header.
2450		 */
2451		cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes);
2452		new_max = cur_max + rec_diff;
2453		new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
2454		ifp->if_broot = (xfs_bmbt_block_t *)
2455		  kmem_realloc(ifp->if_broot,
2456				new_size,
2457				(size_t)XFS_BMAP_BROOT_SPACE_CALC(cur_max), /* old size */
2458				KM_SLEEP);
2459		op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
2460						      ifp->if_broot_bytes);
2461		np = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
2462						      (int)new_size);
2463		ifp->if_broot_bytes = (int)new_size;
2464		ASSERT(ifp->if_broot_bytes <=
2465			XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
2466		memmove(np, op, cur_max * (uint)sizeof(xfs_dfsbno_t));
2467		return;
2468	}
2469
2470	/*
2471	 * rec_diff is less than 0.  In this case, we are shrinking the
2472	 * if_broot buffer.  It must already exist.  If we go to zero
2473	 * records, just get rid of the root and clear the status bit.
2474	 */
2475	ASSERT((ifp->if_broot != NULL) && (ifp->if_broot_bytes > 0));
2476	cur_max = XFS_BMAP_BROOT_MAXRECS(ifp->if_broot_bytes);
2477	new_max = cur_max + rec_diff;
2478	ASSERT(new_max >= 0);
2479	if (new_max > 0)
2480		new_size = (size_t)XFS_BMAP_BROOT_SPACE_CALC(new_max);
2481	else
2482		new_size = 0;
2483	if (new_size > 0) {
2484		new_broot = (xfs_bmbt_block_t *)kmem_alloc(new_size, KM_SLEEP);
2485		/*
2486		 * First copy over the btree block header.
2487		 */
2488		memcpy(new_broot, ifp->if_broot, sizeof(xfs_bmbt_block_t));
2489	} else {
2490		new_broot = NULL;
2491		ifp->if_flags &= ~XFS_IFBROOT;
2492	}
2493
2494	/*
2495	 * Only copy the records and pointers if there are any.
2496	 */
2497	if (new_max > 0) {
2498		/*
2499		 * First copy the records.
2500		 */
2501		op = (char *)XFS_BMAP_BROOT_REC_ADDR(ifp->if_broot, 1,
2502						     ifp->if_broot_bytes);
2503		np = (char *)XFS_BMAP_BROOT_REC_ADDR(new_broot, 1,
2504						     (int)new_size);
2505		memcpy(np, op, new_max * (uint)sizeof(xfs_bmbt_rec_t));
2506
2507		/*
2508		 * Then copy the pointers.
2509		 */
2510		op = (char *)XFS_BMAP_BROOT_PTR_ADDR(ifp->if_broot, 1,
2511						     ifp->if_broot_bytes);
2512		np = (char *)XFS_BMAP_BROOT_PTR_ADDR(new_broot, 1,
2513						     (int)new_size);
2514		memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t));
2515	}
2516	kmem_free(ifp->if_broot, ifp->if_broot_bytes);
2517	ifp->if_broot = new_broot;
2518	ifp->if_broot_bytes = (int)new_size;
2519	ASSERT(ifp->if_broot_bytes <=
2520		XFS_IFORK_SIZE(ip, whichfork) + XFS_BROOT_SIZE_ADJ);
2521	return;
2522}
2523
2524
2525/*
2526 * This is called when the amount of space needed for if_data
2527 * is increased or decreased.  The change in size is indicated by
2528 * the number of bytes that need to be added or deleted in the
2529 * byte_diff parameter.
2530 *
2531 * If the amount of space needed has decreased below the size of the
2532 * inline buffer, then switch to using the inline buffer.  Otherwise,
2533 * use kmem_realloc() or kmem_alloc() to adjust the size of the buffer
2534 * to what is needed.
2535 *
2536 * ip -- the inode whose if_data area is changing
2537 * byte_diff -- the change in the number of bytes, positive or negative,
2538 *	 requested for the if_data array.
2539 */
2540void
2541xfs_idata_realloc(
2542	xfs_inode_t	*ip,
2543	int		byte_diff,
2544	int		whichfork)
2545{
2546	xfs_ifork_t	*ifp;
2547	int		new_size;
2548	int		real_size;
2549
2550	if (byte_diff == 0) {
2551		return;
2552	}
2553
2554	ifp = XFS_IFORK_PTR(ip, whichfork);
2555	new_size = (int)ifp->if_bytes + byte_diff;
2556	ASSERT(new_size >= 0);
2557
2558	if (new_size == 0) {
2559		if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2560			kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes);
2561		}
2562		ifp->if_u1.if_data = NULL;
2563		real_size = 0;
2564	} else if (new_size <= sizeof(ifp->if_u2.if_inline_data)) {
2565		/*
2566		 * If the valid extents/data can fit in if_inline_ext/data,
2567		 * copy them from the malloc'd vector and free it.
2568		 */
2569		if (ifp->if_u1.if_data == NULL) {
2570			ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
2571		} else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2572			ASSERT(ifp->if_real_bytes != 0);
2573			memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data,
2574			      new_size);
2575			kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes);
2576			ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
2577		}
2578		real_size = 0;
2579	} else {
2580		/*
2581		 * Stuck with malloc/realloc.
2582		 * For inline data, the underlying buffer must be
2583		 * a multiple of 4 bytes in size so that it can be
2584		 * logged and stay on word boundaries.  We enforce
2585		 * that here.
2586		 */
2587		real_size = roundup(new_size, 4);
2588		if (ifp->if_u1.if_data == NULL) {
2589			ASSERT(ifp->if_real_bytes == 0);
2590			ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
2591		} else if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
2592			/*
2593			 * Only do the realloc if the underlying size
2594			 * is really changing.
2595			 */
2596			if (ifp->if_real_bytes != real_size) {
2597				ifp->if_u1.if_data =
2598					kmem_realloc(ifp->if_u1.if_data,
2599							real_size,
2600							ifp->if_real_bytes,
2601							KM_SLEEP);
2602			}
2603		} else {
2604			ASSERT(ifp->if_real_bytes == 0);
2605			ifp->if_u1.if_data = kmem_alloc(real_size, KM_SLEEP);
2606			memcpy(ifp->if_u1.if_data, ifp->if_u2.if_inline_data,
2607				ifp->if_bytes);
2608		}
2609	}
2610	ifp->if_real_bytes = real_size;
2611	ifp->if_bytes = new_size;
2612	ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
2613}
2614
2615
2616
2617
2618/*
2619 * Map inode to disk block and offset.
2620 *
2621 * mp -- the mount point structure for the current file system
2622 * tp -- the current transaction
2623 * ino -- the inode number of the inode to be located
2624 * imap -- this structure is filled in with the information necessary
2625 *	 to retrieve the given inode from disk
2626 * flags -- flags to pass to xfs_dilocate indicating whether or not
2627 *	 lookups in the inode btree were OK or not
2628 */
2629int
2630xfs_imap(
2631	xfs_mount_t	*mp,
2632	xfs_trans_t	*tp,
2633	xfs_ino_t	ino,
2634	xfs_imap_t	*imap,
2635	uint		flags)
2636{
2637	xfs_fsblock_t	fsbno;
2638	int		len;
2639	int		off;
2640	int		error;
2641
2642	fsbno = imap->im_blkno ?
2643		XFS_DADDR_TO_FSB(mp, imap->im_blkno) : NULLFSBLOCK;
2644	error = xfs_dilocate(mp, tp, ino, &fsbno, &len, &off, flags);
2645	if (error != 0) {
2646		return error;
2647	}
2648	imap->im_blkno = XFS_FSB_TO_DADDR(mp, fsbno);
2649	imap->im_len = XFS_FSB_TO_BB(mp, len);
2650	imap->im_agblkno = XFS_FSB_TO_AGBNO(mp, fsbno);
2651	imap->im_ioffset = (ushort)off;
2652	imap->im_boffset = (ushort)(off << mp->m_sb.sb_inodelog);
2653	return 0;
2654}
2655
2656void
2657xfs_idestroy_fork(
2658	xfs_inode_t	*ip,
2659	int		whichfork)
2660{
2661	xfs_ifork_t	*ifp;
2662
2663	ifp = XFS_IFORK_PTR(ip, whichfork);
2664	if (ifp->if_broot != NULL) {
2665		kmem_free(ifp->if_broot, ifp->if_broot_bytes);
2666		ifp->if_broot = NULL;
2667	}
2668
2669	/*
2670	 * If the format is local, then we can't have an extents
2671	 * array so just look for an inline data array.  If we're
2672	 * not local then we may or may not have an extents list,
2673	 * so check and free it up if we do.
2674	 */
2675	if (XFS_IFORK_FORMAT(ip, whichfork) == XFS_DINODE_FMT_LOCAL) {
2676		if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) &&
2677		    (ifp->if_u1.if_data != NULL)) {
2678			ASSERT(ifp->if_real_bytes != 0);
2679			kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes);
2680			ifp->if_u1.if_data = NULL;
2681			ifp->if_real_bytes = 0;
2682		}
2683	} else if ((ifp->if_flags & XFS_IFEXTENTS) &&
2684		   ((ifp->if_flags & XFS_IFEXTIREC) ||
2685		    ((ifp->if_u1.if_extents != NULL) &&
2686		     (ifp->if_u1.if_extents != ifp->if_u2.if_inline_ext)))) {
2687		ASSERT(ifp->if_real_bytes != 0);
2688		xfs_iext_destroy(ifp);
2689	}
2690	ASSERT(ifp->if_u1.if_extents == NULL ||
2691	       ifp->if_u1.if_extents == ifp->if_u2.if_inline_ext);
2692	ASSERT(ifp->if_real_bytes == 0);
2693	if (whichfork == XFS_ATTR_FORK) {
2694		kmem_zone_free(xfs_ifork_zone, ip->i_afp);
2695		ip->i_afp = NULL;
2696	}
2697}
2698
2699/*
2700 * This is called free all the memory associated with an inode.
2701 * It must free the inode itself and any buffers allocated for
2702 * if_extents/if_data and if_broot.  It must also free the lock
2703 * associated with the inode.
2704 */
2705void
2706xfs_idestroy(
2707	xfs_inode_t	*ip)
2708{
2709
2710	switch (ip->i_d.di_mode & S_IFMT) {
2711	case S_IFREG:
2712	case S_IFDIR:
2713	case S_IFLNK:
2714		xfs_idestroy_fork(ip, XFS_DATA_FORK);
2715		break;
2716	}
2717	if (ip->i_afp)
2718		xfs_idestroy_fork(ip, XFS_ATTR_FORK);
2719	mrfree(&ip->i_lock);
2720	mrfree(&ip->i_iolock);
2721	freesema(&ip->i_flock);
2722#ifdef XFS_BMAP_TRACE
2723	ktrace_free(ip->i_xtrace);
2724#endif
2725#ifdef XFS_BMBT_TRACE
2726	ktrace_free(ip->i_btrace);
2727#endif
2728#ifdef XFS_RW_TRACE
2729	ktrace_free(ip->i_rwtrace);
2730#endif
2731#ifdef XFS_ILOCK_TRACE
2732	ktrace_free(ip->i_lock_trace);
2733#endif
2734#ifdef XFS_DIR2_TRACE
2735	ktrace_free(ip->i_dir_trace);
2736#endif
2737	if (ip->i_itemp) {
2738		/*
2739		 * Only if we are shutting down the fs will we see an
2740		 * inode still in the AIL. If it is there, we should remove
2741		 * it to prevent a use-after-free from occurring.
2742		 */
2743		xfs_mount_t	*mp = ip->i_mount;
2744		xfs_log_item_t	*lip = &ip->i_itemp->ili_item;
2745		int		s;
2746
2747		ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) ||
2748				       XFS_FORCED_SHUTDOWN(ip->i_mount));
2749		if (lip->li_flags & XFS_LI_IN_AIL) {
2750			AIL_LOCK(mp, s);
2751			if (lip->li_flags & XFS_LI_IN_AIL)
2752				xfs_trans_delete_ail(mp, lip, s);
2753			else
2754				AIL_UNLOCK(mp, s);
2755		}
2756		xfs_inode_item_destroy(ip);
2757	}
2758	kmem_zone_free(xfs_inode_zone, ip);
2759}
2760
2761
2762/*
2763 * Increment the pin count of the given buffer.
2764 * This value is protected by ipinlock spinlock in the mount structure.
2765 */
2766void
2767xfs_ipin(
2768	xfs_inode_t	*ip)
2769{
2770	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE));
2771
2772	atomic_inc(&ip->i_pincount);
2773}
2774
2775/*
2776 * Decrement the pin count of the given inode, and wake up
2777 * anyone in xfs_iwait_unpin() if the count goes to 0.  The
2778 * inode must have been previously pinned with a call to xfs_ipin().
2779 */
2780void
2781xfs_iunpin(
2782	xfs_inode_t	*ip)
2783{
2784	ASSERT(atomic_read(&ip->i_pincount) > 0);
2785
2786	if (atomic_dec_and_lock(&ip->i_pincount, &ip->i_flags_lock)) {
2787
2788		/*
2789		 * If the inode is currently being reclaimed, the link between
2790		 * the bhv_vnode and the xfs_inode will be broken after the
2791		 * XFS_IRECLAIM* flag is set. Hence, if these flags are not
2792		 * set, then we can move forward and mark the linux inode dirty
2793		 * knowing that it is still valid as it won't freed until after
2794		 * the bhv_vnode<->xfs_inode link is broken in xfs_reclaim. The
2795		 * i_flags_lock is used to synchronise the setting of the
2796		 * XFS_IRECLAIM* flags and the breaking of the link, and so we
2797		 * can execute atomically w.r.t to reclaim by holding this lock
2798		 * here.
2799		 *
2800		 * However, we still need to issue the unpin wakeup call as the
2801		 * inode reclaim may be blocked waiting for the inode to become
2802		 * unpinned.
2803		 */
2804
2805		if (!__xfs_iflags_test(ip, XFS_IRECLAIM|XFS_IRECLAIMABLE)) {
2806			bhv_vnode_t	*vp = XFS_ITOV_NULL(ip);
2807			struct inode *inode = NULL;
2808
2809			BUG_ON(vp == NULL);
2810			inode = vn_to_inode(vp);
2811			BUG_ON(inode->i_state & I_CLEAR);
2812
2813			/* make sync come back and flush this inode */
2814			if (!(inode->i_state & (I_NEW|I_FREEING)))
2815				mark_inode_dirty_sync(inode);
2816		}
2817		spin_unlock(&ip->i_flags_lock);
2818		wake_up(&ip->i_ipin_wait);
2819	}
2820}
2821
2822/*
2823 * This is called to wait for the given inode to be unpinned.
2824 * It will sleep until this happens.  The caller must have the
2825 * inode locked in at least shared mode so that the buffer cannot
2826 * be subsequently pinned once someone is waiting for it to be
2827 * unpinned.
2828 */
2829STATIC void
2830xfs_iunpin_wait(
2831	xfs_inode_t	*ip)
2832{
2833	xfs_inode_log_item_t	*iip;
2834	xfs_lsn_t	lsn;
2835
2836	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE | MR_ACCESS));
2837
2838	if (atomic_read(&ip->i_pincount) == 0) {
2839		return;
2840	}
2841
2842	iip = ip->i_itemp;
2843	if (iip && iip->ili_last_lsn) {
2844		lsn = iip->ili_last_lsn;
2845	} else {
2846		lsn = (xfs_lsn_t)0;
2847	}
2848
2849	/*
2850	 * Give the log a push so we don't wait here too long.
2851	 */
2852	xfs_log_force(ip->i_mount, lsn, XFS_LOG_FORCE);
2853
2854	wait_event(ip->i_ipin_wait, (atomic_read(&ip->i_pincount) == 0));
2855}
2856
2857
2858/*
2859 * xfs_iextents_copy()
2860 *
2861 * This is called to copy the REAL extents (as opposed to the delayed
2862 * allocation extents) from the inode into the given buffer.  It
2863 * returns the number of bytes copied into the buffer.
2864 *
2865 * If there are no delayed allocation extents, then we can just
2866 * memcpy() the extents into the buffer.  Otherwise, we need to
2867 * examine each extent in turn and skip those which are delayed.
2868 */
2869int
2870xfs_iextents_copy(
2871	xfs_inode_t		*ip,
2872	xfs_bmbt_rec_t		*buffer,
2873	int			whichfork)
2874{
2875	int			copied;
2876	xfs_bmbt_rec_t		*dest_ep;
2877	xfs_bmbt_rec_t		*ep;
2878#ifdef XFS_BMAP_TRACE
2879	static char		fname[] = "xfs_iextents_copy";
2880#endif
2881	int			i;
2882	xfs_ifork_t		*ifp;
2883	int			nrecs;
2884	xfs_fsblock_t		start_block;
2885
2886	ifp = XFS_IFORK_PTR(ip, whichfork);
2887	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
2888	ASSERT(ifp->if_bytes > 0);
2889
2890	nrecs = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
2891	xfs_bmap_trace_exlist(fname, ip, nrecs, whichfork);
2892	ASSERT(nrecs > 0);
2893
2894	/*
2895	 * There are some delayed allocation extents in the
2896	 * inode, so copy the extents one at a time and skip
2897	 * the delayed ones.  There must be at least one
2898	 * non-delayed extent.
2899	 */
2900	dest_ep = buffer;
2901	copied = 0;
2902	for (i = 0; i < nrecs; i++) {
2903		ep = xfs_iext_get_ext(ifp, i);
2904		start_block = xfs_bmbt_get_startblock(ep);
2905		if (ISNULLSTARTBLOCK(start_block)) {
2906			/*
2907			 * It's a delayed allocation extent, so skip it.
2908			 */
2909			continue;
2910		}
2911
2912		/* Translate to on disk format */
2913		put_unaligned(INT_GET(ep->l0, ARCH_CONVERT),
2914			      (__uint64_t*)&dest_ep->l0);
2915		put_unaligned(INT_GET(ep->l1, ARCH_CONVERT),
2916			      (__uint64_t*)&dest_ep->l1);
2917		dest_ep++;
2918		copied++;
2919	}
2920	ASSERT(copied != 0);
2921	xfs_validate_extents(ifp, copied, 1, XFS_EXTFMT_INODE(ip));
2922
2923	return (copied * (uint)sizeof(xfs_bmbt_rec_t));
2924}
2925
2926/*
2927 * Each of the following cases stores data into the same region
2928 * of the on-disk inode, so only one of them can be valid at
2929 * any given time. While it is possible to have conflicting formats
2930 * and log flags, e.g. having XFS_ILOG_?DATA set when the fork is
2931 * in EXTENTS format, this can only happen when the fork has
2932 * changed formats after being modified but before being flushed.
2933 * In these cases, the format always takes precedence, because the
2934 * format indicates the current state of the fork.
2935 */
2936/*ARGSUSED*/
2937STATIC int
2938xfs_iflush_fork(
2939	xfs_inode_t		*ip,
2940	xfs_dinode_t		*dip,
2941	xfs_inode_log_item_t	*iip,
2942	int			whichfork,
2943	xfs_buf_t		*bp)
2944{
2945	char			*cp;
2946	xfs_ifork_t		*ifp;
2947	xfs_mount_t		*mp;
2948#ifdef XFS_TRANS_DEBUG
2949	int			first;
2950#endif
2951	static const short	brootflag[2] =
2952		{ XFS_ILOG_DBROOT, XFS_ILOG_ABROOT };
2953	static const short	dataflag[2] =
2954		{ XFS_ILOG_DDATA, XFS_ILOG_ADATA };
2955	static const short	extflag[2] =
2956		{ XFS_ILOG_DEXT, XFS_ILOG_AEXT };
2957
2958	if (iip == NULL)
2959		return 0;
2960	ifp = XFS_IFORK_PTR(ip, whichfork);
2961	/*
2962	 * This can happen if we gave up in iformat in an error path,
2963	 * for the attribute fork.
2964	 */
2965	if (ifp == NULL) {
2966		ASSERT(whichfork == XFS_ATTR_FORK);
2967		return 0;
2968	}
2969	cp = XFS_DFORK_PTR(dip, whichfork);
2970	mp = ip->i_mount;
2971	switch (XFS_IFORK_FORMAT(ip, whichfork)) {
2972	case XFS_DINODE_FMT_LOCAL:
2973		if ((iip->ili_format.ilf_fields & dataflag[whichfork]) &&
2974		    (ifp->if_bytes > 0)) {
2975			ASSERT(ifp->if_u1.if_data != NULL);
2976			ASSERT(ifp->if_bytes <= XFS_IFORK_SIZE(ip, whichfork));
2977			memcpy(cp, ifp->if_u1.if_data, ifp->if_bytes);
2978		}
2979		break;
2980
2981	case XFS_DINODE_FMT_EXTENTS:
2982		ASSERT((ifp->if_flags & XFS_IFEXTENTS) ||
2983		       !(iip->ili_format.ilf_fields & extflag[whichfork]));
2984		ASSERT((xfs_iext_get_ext(ifp, 0) != NULL) ||
2985			(ifp->if_bytes == 0));
2986		ASSERT((xfs_iext_get_ext(ifp, 0) == NULL) ||
2987			(ifp->if_bytes > 0));
2988		if ((iip->ili_format.ilf_fields & extflag[whichfork]) &&
2989		    (ifp->if_bytes > 0)) {
2990			ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) > 0);
2991			(void)xfs_iextents_copy(ip, (xfs_bmbt_rec_t *)cp,
2992				whichfork);
2993		}
2994		break;
2995
2996	case XFS_DINODE_FMT_BTREE:
2997		if ((iip->ili_format.ilf_fields & brootflag[whichfork]) &&
2998		    (ifp->if_broot_bytes > 0)) {
2999			ASSERT(ifp->if_broot != NULL);
3000			ASSERT(ifp->if_broot_bytes <=
3001			       (XFS_IFORK_SIZE(ip, whichfork) +
3002				XFS_BROOT_SIZE_ADJ));
3003			xfs_bmbt_to_bmdr(ifp->if_broot, ifp->if_broot_bytes,
3004				(xfs_bmdr_block_t *)cp,
3005				XFS_DFORK_SIZE(dip, mp, whichfork));
3006		}
3007		break;
3008
3009	case XFS_DINODE_FMT_DEV:
3010		if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) {
3011			ASSERT(whichfork == XFS_DATA_FORK);
3012			INT_SET(dip->di_u.di_dev, ARCH_CONVERT, ip->i_df.if_u2.if_rdev);
3013		}
3014		break;
3015
3016	case XFS_DINODE_FMT_UUID:
3017		if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) {
3018			ASSERT(whichfork == XFS_DATA_FORK);
3019			memcpy(&dip->di_u.di_muuid, &ip->i_df.if_u2.if_uuid,
3020				sizeof(uuid_t));
3021		}
3022		break;
3023
3024	default:
3025		ASSERT(0);
3026		break;
3027	}
3028
3029	return 0;
3030}
3031
3032/*
3033 * xfs_iflush() will write a modified inode's changes out to the
3034 * inode's on disk home.  The caller must have the inode lock held
3035 * in at least shared mode and the inode flush semaphore must be
3036 * held as well.  The inode lock will still be held upon return from
3037 * the call and the caller is free to unlock it.
3038 * The inode flush lock will be unlocked when the inode reaches the disk.
3039 * The flags indicate how the inode's buffer should be written out.
3040 */
3041int
3042xfs_iflush(
3043	xfs_inode_t		*ip,
3044	uint			flags)
3045{
3046	xfs_inode_log_item_t	*iip;
3047	xfs_buf_t		*bp;
3048	xfs_dinode_t		*dip;
3049	xfs_mount_t		*mp;
3050	int			error;
3051	/* REFERENCED */
3052	xfs_chash_t		*ch;
3053	xfs_inode_t		*iq;
3054	int			clcount;	/* count of inodes clustered */
3055	int			bufwasdelwri;
3056	enum { INT_DELWRI = (1 << 0), INT_ASYNC = (1 << 1) };
3057	SPLDECL(s);
3058
3059	XFS_STATS_INC(xs_iflush_count);
3060
3061	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
3062	ASSERT(issemalocked(&(ip->i_flock)));
3063	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3064	       ip->i_d.di_nextents > ip->i_df.if_ext_max);
3065
3066	iip = ip->i_itemp;
3067	mp = ip->i_mount;
3068
3069	/*
3070	 * If the inode isn't dirty, then just release the inode
3071	 * flush lock and do nothing.
3072	 */
3073	if ((ip->i_update_core == 0) &&
3074	    ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) {
3075		ASSERT((iip != NULL) ?
3076			 !(iip->ili_item.li_flags & XFS_LI_IN_AIL) : 1);
3077		xfs_ifunlock(ip);
3078		return 0;
3079	}
3080
3081	/*
3082	 * We can't flush the inode until it is unpinned, so
3083	 * wait for it.  We know noone new can pin it, because
3084	 * we are holding the inode lock shared and you need
3085	 * to hold it exclusively to pin the inode.
3086	 */
3087	xfs_iunpin_wait(ip);
3088
3089	/*
3090	 * This may have been unpinned because the filesystem is shutting
3091	 * down forcibly. If that's the case we must not write this inode
3092	 * to disk, because the log record didn't make it to disk!
3093	 */
3094	if (XFS_FORCED_SHUTDOWN(mp)) {
3095		ip->i_update_core = 0;
3096		if (iip)
3097			iip->ili_format.ilf_fields = 0;
3098		xfs_ifunlock(ip);
3099		return XFS_ERROR(EIO);
3100	}
3101
3102	/*
3103	 * Get the buffer containing the on-disk inode.
3104	 */
3105	error = xfs_itobp(mp, NULL, ip, &dip, &bp, 0, 0);
3106	if (error) {
3107		xfs_ifunlock(ip);
3108		return error;
3109	}
3110
3111	/*
3112	 * Decide how buffer will be flushed out.  This is done before
3113	 * the call to xfs_iflush_int because this field is zeroed by it.
3114	 */
3115	if (iip != NULL && iip->ili_format.ilf_fields != 0) {
3116		/*
3117		 * Flush out the inode buffer according to the directions
3118		 * of the caller.  In the cases where the caller has given
3119		 * us a choice choose the non-delwri case.  This is because
3120		 * the inode is in the AIL and we need to get it out soon.
3121		 */
3122		switch (flags) {
3123		case XFS_IFLUSH_SYNC:
3124		case XFS_IFLUSH_DELWRI_ELSE_SYNC:
3125			flags = 0;
3126			break;
3127		case XFS_IFLUSH_ASYNC:
3128		case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
3129			flags = INT_ASYNC;
3130			break;
3131		case XFS_IFLUSH_DELWRI:
3132			flags = INT_DELWRI;
3133			break;
3134		default:
3135			ASSERT(0);
3136			flags = 0;
3137			break;
3138		}
3139	} else {
3140		switch (flags) {
3141		case XFS_IFLUSH_DELWRI_ELSE_SYNC:
3142		case XFS_IFLUSH_DELWRI_ELSE_ASYNC:
3143		case XFS_IFLUSH_DELWRI:
3144			flags = INT_DELWRI;
3145			break;
3146		case XFS_IFLUSH_ASYNC:
3147			flags = INT_ASYNC;
3148			break;
3149		case XFS_IFLUSH_SYNC:
3150			flags = 0;
3151			break;
3152		default:
3153			ASSERT(0);
3154			flags = 0;
3155			break;
3156		}
3157	}
3158
3159	/*
3160	 * First flush out the inode that xfs_iflush was called with.
3161	 */
3162	error = xfs_iflush_int(ip, bp);
3163	if (error) {
3164		goto corrupt_out;
3165	}
3166
3167	/*
3168	 * inode clustering:
3169	 * see if other inodes can be gathered into this write
3170	 */
3171
3172	ip->i_chash->chl_buf = bp;
3173
3174	ch = XFS_CHASH(mp, ip->i_blkno);
3175	s = mutex_spinlock(&ch->ch_lock);
3176
3177	clcount = 0;
3178	for (iq = ip->i_cnext; iq != ip; iq = iq->i_cnext) {
3179		/*
3180		 * Do an un-protected check to see if the inode is dirty and
3181		 * is a candidate for flushing.  These checks will be repeated
3182		 * later after the appropriate locks are acquired.
3183		 */
3184		iip = iq->i_itemp;
3185		if ((iq->i_update_core == 0) &&
3186		    ((iip == NULL) ||
3187		     !(iip->ili_format.ilf_fields & XFS_ILOG_ALL)) &&
3188		      xfs_ipincount(iq) == 0) {
3189			continue;
3190		}
3191
3192		/*
3193		 * Try to get locks.  If any are unavailable,
3194		 * then this inode cannot be flushed and is skipped.
3195		 */
3196
3197		/* get inode locks (just i_lock) */
3198		if (xfs_ilock_nowait(iq, XFS_ILOCK_SHARED)) {
3199			/* get inode flush lock */
3200			if (xfs_iflock_nowait(iq)) {
3201				/* check if pinned */
3202				if (xfs_ipincount(iq) == 0) {
3203					/* arriving here means that
3204					 * this inode can be flushed.
3205					 * first re-check that it's
3206					 * dirty
3207					 */
3208					iip = iq->i_itemp;
3209					if ((iq->i_update_core != 0)||
3210					    ((iip != NULL) &&
3211					     (iip->ili_format.ilf_fields & XFS_ILOG_ALL))) {
3212						clcount++;
3213						error = xfs_iflush_int(iq, bp);
3214						if (error) {
3215							xfs_iunlock(iq,
3216								    XFS_ILOCK_SHARED);
3217							goto cluster_corrupt_out;
3218						}
3219					} else {
3220						xfs_ifunlock(iq);
3221					}
3222				} else {
3223					xfs_ifunlock(iq);
3224				}
3225			}
3226			xfs_iunlock(iq, XFS_ILOCK_SHARED);
3227		}
3228	}
3229	mutex_spinunlock(&ch->ch_lock, s);
3230
3231	if (clcount) {
3232		XFS_STATS_INC(xs_icluster_flushcnt);
3233		XFS_STATS_ADD(xs_icluster_flushinode, clcount);
3234	}
3235
3236	/*
3237	 * If the buffer is pinned then push on the log so we won't
3238	 * get stuck waiting in the write for too long.
3239	 */
3240	if (XFS_BUF_ISPINNED(bp)){
3241		xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE);
3242	}
3243
3244	if (flags & INT_DELWRI) {
3245		xfs_bdwrite(mp, bp);
3246	} else if (flags & INT_ASYNC) {
3247		xfs_bawrite(mp, bp);
3248	} else {
3249		error = xfs_bwrite(mp, bp);
3250	}
3251	return error;
3252
3253corrupt_out:
3254	xfs_buf_relse(bp);
3255	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3256	xfs_iflush_abort(ip);
3257	/*
3258	 * Unlocks the flush lock
3259	 */
3260	return XFS_ERROR(EFSCORRUPTED);
3261
3262cluster_corrupt_out:
3263	/* Corruption detected in the clustering loop.  Invalidate the
3264	 * inode buffer and shut down the filesystem.
3265	 */
3266	mutex_spinunlock(&ch->ch_lock, s);
3267
3268	/*
3269	 * Clean up the buffer.  If it was B_DELWRI, just release it --
3270	 * brelse can handle it with no problems.  If not, shut down the
3271	 * filesystem before releasing the buffer.
3272	 */
3273	if ((bufwasdelwri= XFS_BUF_ISDELAYWRITE(bp))) {
3274		xfs_buf_relse(bp);
3275	}
3276
3277	xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
3278
3279	if(!bufwasdelwri)  {
3280		/*
3281		 * Just like incore_relse: if we have b_iodone functions,
3282		 * mark the buffer as an error and call them.  Otherwise
3283		 * mark it as stale and brelse.
3284		 */
3285		if (XFS_BUF_IODONE_FUNC(bp)) {
3286			XFS_BUF_CLR_BDSTRAT_FUNC(bp);
3287			XFS_BUF_UNDONE(bp);
3288			XFS_BUF_STALE(bp);
3289			XFS_BUF_SHUT(bp);
3290			XFS_BUF_ERROR(bp,EIO);
3291			xfs_biodone(bp);
3292		} else {
3293			XFS_BUF_STALE(bp);
3294			xfs_buf_relse(bp);
3295		}
3296	}
3297
3298	xfs_iflush_abort(iq);
3299	/*
3300	 * Unlocks the flush lock
3301	 */
3302	return XFS_ERROR(EFSCORRUPTED);
3303}
3304
3305
3306STATIC int
3307xfs_iflush_int(
3308	xfs_inode_t		*ip,
3309	xfs_buf_t		*bp)
3310{
3311	xfs_inode_log_item_t	*iip;
3312	xfs_dinode_t		*dip;
3313	xfs_mount_t		*mp;
3314#ifdef XFS_TRANS_DEBUG
3315	int			first;
3316#endif
3317	SPLDECL(s);
3318
3319	ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE|MR_ACCESS));
3320	ASSERT(issemalocked(&(ip->i_flock)));
3321	ASSERT(ip->i_d.di_format != XFS_DINODE_FMT_BTREE ||
3322	       ip->i_d.di_nextents > ip->i_df.if_ext_max);
3323
3324	iip = ip->i_itemp;
3325	mp = ip->i_mount;
3326
3327
3328	/*
3329	 * If the inode isn't dirty, then just release the inode
3330	 * flush lock and do nothing.
3331	 */
3332	if ((ip->i_update_core == 0) &&
3333	    ((iip == NULL) || !(iip->ili_format.ilf_fields & XFS_ILOG_ALL))) {
3334		xfs_ifunlock(ip);
3335		return 0;
3336	}
3337
3338	/* set *dip = inode's place in the buffer */
3339	dip = (xfs_dinode_t *)xfs_buf_offset(bp, ip->i_boffset);
3340
3341	/*
3342	 * Clear i_update_core before copying out the data.
3343	 * This is for coordination with our timestamp updates
3344	 * that don't hold the inode lock. They will always
3345	 * update the timestamps BEFORE setting i_update_core,
3346	 * so if we clear i_update_core after they set it we
3347	 * are guaranteed to see their updates to the timestamps.
3348	 * I believe that this depends on strongly ordered memory
3349	 * semantics, but we have that.  We use the SYNCHRONIZE
3350	 * macro to make sure that the compiler does not reorder
3351	 * the i_update_core access below the data copy below.
3352	 */
3353	ip->i_update_core = 0;
3354	SYNCHRONIZE();
3355
3356	/*
3357	 * Make sure to get the latest atime from the Linux inode.
3358	 */
3359	xfs_synchronize_atime(ip);
3360
3361	if (XFS_TEST_ERROR(INT_GET(dip->di_core.di_magic,ARCH_CONVERT) != XFS_DINODE_MAGIC,
3362			       mp, XFS_ERRTAG_IFLUSH_1, XFS_RANDOM_IFLUSH_1)) {
3363		xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3364		    "xfs_iflush: Bad inode %Lu magic number 0x%x, ptr 0x%p",
3365			ip->i_ino, (int) INT_GET(dip->di_core.di_magic, ARCH_CONVERT), dip);
3366		goto corrupt_out;
3367	}
3368	if (XFS_TEST_ERROR(ip->i_d.di_magic != XFS_DINODE_MAGIC,
3369				mp, XFS_ERRTAG_IFLUSH_2, XFS_RANDOM_IFLUSH_2)) {
3370		xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3371			"xfs_iflush: Bad inode %Lu, ptr 0x%p, magic number 0x%x",
3372			ip->i_ino, ip, ip->i_d.di_magic);
3373		goto corrupt_out;
3374	}
3375	if ((ip->i_d.di_mode & S_IFMT) == S_IFREG) {
3376		if (XFS_TEST_ERROR(
3377		    (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3378		    (ip->i_d.di_format != XFS_DINODE_FMT_BTREE),
3379		    mp, XFS_ERRTAG_IFLUSH_3, XFS_RANDOM_IFLUSH_3)) {
3380			xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3381				"xfs_iflush: Bad regular inode %Lu, ptr 0x%p",
3382				ip->i_ino, ip);
3383			goto corrupt_out;
3384		}
3385	} else if ((ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
3386		if (XFS_TEST_ERROR(
3387		    (ip->i_d.di_format != XFS_DINODE_FMT_EXTENTS) &&
3388		    (ip->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
3389		    (ip->i_d.di_format != XFS_DINODE_FMT_LOCAL),
3390		    mp, XFS_ERRTAG_IFLUSH_4, XFS_RANDOM_IFLUSH_4)) {
3391			xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3392				"xfs_iflush: Bad directory inode %Lu, ptr 0x%p",
3393				ip->i_ino, ip);
3394			goto corrupt_out;
3395		}
3396	}
3397	if (XFS_TEST_ERROR(ip->i_d.di_nextents + ip->i_d.di_anextents >
3398				ip->i_d.di_nblocks, mp, XFS_ERRTAG_IFLUSH_5,
3399				XFS_RANDOM_IFLUSH_5)) {
3400		xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3401			"xfs_iflush: detected corrupt incore inode %Lu, total extents = %d, nblocks = %Ld, ptr 0x%p",
3402			ip->i_ino,
3403			ip->i_d.di_nextents + ip->i_d.di_anextents,
3404			ip->i_d.di_nblocks,
3405			ip);
3406		goto corrupt_out;
3407	}
3408	if (XFS_TEST_ERROR(ip->i_d.di_forkoff > mp->m_sb.sb_inodesize,
3409				mp, XFS_ERRTAG_IFLUSH_6, XFS_RANDOM_IFLUSH_6)) {
3410		xfs_cmn_err(XFS_PTAG_IFLUSH, CE_ALERT, mp,
3411			"xfs_iflush: bad inode %Lu, forkoff 0x%x, ptr 0x%p",
3412			ip->i_ino, ip->i_d.di_forkoff, ip);
3413		goto corrupt_out;
3414	}
3415	/*
3416	 * bump the flush iteration count, used to detect flushes which
3417	 * postdate a log record during recovery.
3418	 */
3419
3420	ip->i_d.di_flushiter++;
3421
3422	/*
3423	 * Copy the dirty parts of the inode into the on-disk
3424	 * inode.  We always copy out the core of the inode,
3425	 * because if the inode is dirty at all the core must
3426	 * be.
3427	 */
3428	xfs_xlate_dinode_core((xfs_caddr_t)&(dip->di_core), &(ip->i_d), -1);
3429
3430	/* Wrap, we never let the log put out DI_MAX_FLUSH */
3431	if (ip->i_d.di_flushiter == DI_MAX_FLUSH)
3432		ip->i_d.di_flushiter = 0;
3433
3434	/*
3435	 * If this is really an old format inode and the superblock version
3436	 * has not been updated to support only new format inodes, then
3437	 * convert back to the old inode format.  If the superblock version
3438	 * has been updated, then make the conversion permanent.
3439	 */
3440	ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1 ||
3441	       XFS_SB_VERSION_HASNLINK(&mp->m_sb));
3442	if (ip->i_d.di_version == XFS_DINODE_VERSION_1) {
3443		if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) {
3444			/*
3445			 * Convert it back.
3446			 */
3447			ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1);
3448			INT_SET(dip->di_core.di_onlink, ARCH_CONVERT, ip->i_d.di_nlink);
3449		} else {
3450			/*
3451			 * The superblock version has already been bumped,
3452			 * so just make the conversion to the new inode
3453			 * format permanent.
3454			 */
3455			ip->i_d.di_version = XFS_DINODE_VERSION_2;
3456			INT_SET(dip->di_core.di_version, ARCH_CONVERT, XFS_DINODE_VERSION_2);
3457			ip->i_d.di_onlink = 0;
3458			dip->di_core.di_onlink = 0;
3459			memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
3460			memset(&(dip->di_core.di_pad[0]), 0,
3461			      sizeof(dip->di_core.di_pad));
3462			ASSERT(ip->i_d.di_projid == 0);
3463		}
3464	}
3465
3466	if (xfs_iflush_fork(ip, dip, iip, XFS_DATA_FORK, bp) == EFSCORRUPTED) {
3467		goto corrupt_out;
3468	}
3469
3470	if (XFS_IFORK_Q(ip)) {
3471		/*
3472		 * The only error from xfs_iflush_fork is on the data fork.
3473		 */
3474		(void) xfs_iflush_fork(ip, dip, iip, XFS_ATTR_FORK, bp);
3475	}
3476	xfs_inobp_check(mp, bp);
3477
3478	/*
3479	 * We've recorded everything logged in the inode, so we'd
3480	 * like to clear the ilf_fields bits so we don't log and
3481	 * flush things unnecessarily.  However, we can't stop
3482	 * logging all this information until the data we've copied
3483	 * into the disk buffer is written to disk.  If we did we might
3484	 * overwrite the copy of the inode in the log with all the
3485	 * data after re-logging only part of it, and in the face of
3486	 * a crash we wouldn't have all the data we need to recover.
3487	 *
3488	 * What we do is move the bits to the ili_last_fields field.
3489	 * When logging the inode, these bits are moved back to the
3490	 * ilf_fields field.  In the xfs_iflush_done() routine we
3491	 * clear ili_last_fields, since we know that the information
3492	 * those bits represent is permanently on disk.  As long as
3493	 * the flush completes before the inode is logged again, then
3494	 * both ilf_fields and ili_last_fields will be cleared.
3495	 *
3496	 * We can play with the ilf_fields bits here, because the inode
3497	 * lock must be held exclusively in order to set bits there
3498	 * and the flush lock protects the ili_last_fields bits.
3499	 * Set ili_logged so the flush done
3500	 * routine can tell whether or not to look in the AIL.
3501	 * Also, store the current LSN of the inode so that we can tell
3502	 * whether the item has moved in the AIL from xfs_iflush_done().
3503	 * In order to read the lsn we need the AIL lock, because
3504	 * it is a 64 bit value that cannot be read atomically.
3505	 */
3506	if (iip != NULL && iip->ili_format.ilf_fields != 0) {
3507		iip->ili_last_fields = iip->ili_format.ilf_fields;
3508		iip->ili_format.ilf_fields = 0;
3509		iip->ili_logged = 1;
3510
3511		ASSERT(sizeof(xfs_lsn_t) == 8);	/* don't lock if it shrinks */
3512		AIL_LOCK(mp,s);
3513		iip->ili_flush_lsn = iip->ili_item.li_lsn;
3514		AIL_UNLOCK(mp, s);
3515
3516		/*
3517		 * Attach the function xfs_iflush_done to the inode's
3518		 * buffer.  This will remove the inode from the AIL
3519		 * and unlock the inode's flush lock when the inode is
3520		 * completely written to disk.
3521		 */
3522		xfs_buf_attach_iodone(bp, (void(*)(xfs_buf_t*,xfs_log_item_t*))
3523				      xfs_iflush_done, (xfs_log_item_t *)iip);
3524
3525		ASSERT(XFS_BUF_FSPRIVATE(bp, void *) != NULL);
3526		ASSERT(XFS_BUF_IODONE_FUNC(bp) != NULL);
3527	} else {
3528		/*
3529		 * We're flushing an inode which is not in the AIL and has
3530		 * not been logged but has i_update_core set.  For this
3531		 * case we can use a B_DELWRI flush and immediately drop
3532		 * the inode flush lock because we can avoid the whole
3533		 * AIL state thing.  It's OK to drop the flush lock now,
3534		 * because we've already locked the buffer and to do anything
3535		 * you really need both.
3536		 */
3537		if (iip != NULL) {
3538			ASSERT(iip->ili_logged == 0);
3539			ASSERT(iip->ili_last_fields == 0);
3540			ASSERT((iip->ili_item.li_flags & XFS_LI_IN_AIL) == 0);
3541		}
3542		xfs_ifunlock(ip);
3543	}
3544
3545	return 0;
3546
3547corrupt_out:
3548	return XFS_ERROR(EFSCORRUPTED);
3549}
3550
3551
3552/*
3553 * Flush all inactive inodes in mp.
3554 */
3555void
3556xfs_iflush_all(
3557	xfs_mount_t	*mp)
3558{
3559	xfs_inode_t	*ip;
3560	bhv_vnode_t	*vp;
3561
3562 again:
3563	XFS_MOUNT_ILOCK(mp);
3564	ip = mp->m_inodes;
3565	if (ip == NULL)
3566		goto out;
3567
3568	do {
3569		/* Make sure we skip markers inserted by sync */
3570		if (ip->i_mount == NULL) {
3571			ip = ip->i_mnext;
3572			continue;
3573		}
3574
3575		vp = XFS_ITOV_NULL(ip);
3576		if (!vp) {
3577			XFS_MOUNT_IUNLOCK(mp);
3578			xfs_finish_reclaim(ip, 0, XFS_IFLUSH_ASYNC);
3579			goto again;
3580		}
3581
3582		ASSERT(vn_count(vp) == 0);
3583
3584		ip = ip->i_mnext;
3585	} while (ip != mp->m_inodes);
3586 out:
3587	XFS_MOUNT_IUNLOCK(mp);
3588}
3589
3590/*
3591 * xfs_iaccess: check accessibility of inode for mode.
3592 */
3593int
3594xfs_iaccess(
3595	xfs_inode_t	*ip,
3596	mode_t		mode,
3597	cred_t		*cr)
3598{
3599	int		error;
3600	mode_t		orgmode = mode;
3601	struct inode	*inode = vn_to_inode(XFS_ITOV(ip));
3602
3603	if (mode & S_IWUSR) {
3604		umode_t		imode = inode->i_mode;
3605
3606		if (IS_RDONLY(inode) &&
3607		    (S_ISREG(imode) || S_ISDIR(imode) || S_ISLNK(imode)))
3608			return XFS_ERROR(EROFS);
3609
3610		if (IS_IMMUTABLE(inode))
3611			return XFS_ERROR(EACCES);
3612	}
3613
3614	/*
3615	 * If there's an Access Control List it's used instead of
3616	 * the mode bits.
3617	 */
3618	if ((error = _ACL_XFS_IACCESS(ip, mode, cr)) != -1)
3619		return error ? XFS_ERROR(error) : 0;
3620
3621	if (current_fsuid(cr) != ip->i_d.di_uid) {
3622		mode >>= 3;
3623		if (!in_group_p((gid_t)ip->i_d.di_gid))
3624			mode >>= 3;
3625	}
3626
3627	/*
3628	 * If the DACs are ok we don't need any capability check.
3629	 */
3630	if ((ip->i_d.di_mode & mode) == mode)
3631		return 0;
3632	/*
3633	 * Read/write DACs are always overridable.
3634	 * Executable DACs are overridable if at least one exec bit is set.
3635	 */
3636	if (!(orgmode & S_IXUSR) ||
3637	    (inode->i_mode & S_IXUGO) || S_ISDIR(inode->i_mode))
3638		if (capable_cred(cr, CAP_DAC_OVERRIDE))
3639			return 0;
3640
3641	if ((orgmode == S_IRUSR) ||
3642	    (S_ISDIR(inode->i_mode) && (!(orgmode & S_IWUSR)))) {
3643		if (capable_cred(cr, CAP_DAC_READ_SEARCH))
3644			return 0;
3645#ifdef	NOISE
3646		cmn_err(CE_NOTE, "Ick: mode=%o, orgmode=%o", mode, orgmode);
3647#endif	/* NOISE */
3648		return XFS_ERROR(EACCES);
3649	}
3650	return XFS_ERROR(EACCES);
3651}
3652
3653/*
3654 * xfs_iroundup: round up argument to next power of two
3655 */
3656uint
3657xfs_iroundup(
3658	uint	v)
3659{
3660	int i;
3661	uint m;
3662
3663	if ((v & (v - 1)) == 0)
3664		return v;
3665	ASSERT((v & 0x80000000) == 0);
3666	if ((v & (v + 1)) == 0)
3667		return v + 1;
3668	for (i = 0, m = 1; i < 31; i++, m <<= 1) {
3669		if (v & m)
3670			continue;
3671		v |= m;
3672		if ((v & (v + 1)) == 0)
3673			return v + 1;
3674	}
3675	ASSERT(0);
3676	return( 0 );
3677}
3678
3679#ifdef XFS_ILOCK_TRACE
3680ktrace_t	*xfs_ilock_trace_buf;
3681
3682void
3683xfs_ilock_trace(xfs_inode_t *ip, int lock, unsigned int lockflags, inst_t *ra)
3684{
3685	ktrace_enter(ip->i_lock_trace,
3686		     (void *)ip,
3687		     (void *)(unsigned long)lock, /* 1 = LOCK, 3=UNLOCK, etc */
3688		     (void *)(unsigned long)lockflags, /* XFS_ILOCK_EXCL etc */
3689		     (void *)ra,		/* caller of ilock */
3690		     (void *)(unsigned long)current_cpu(),
3691		     (void *)(unsigned long)current_pid(),
3692		     NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL);
3693}
3694#endif
3695
3696/*
3697 * Return a pointer to the extent record at file index idx.
3698 */
3699xfs_bmbt_rec_t *
3700xfs_iext_get_ext(
3701	xfs_ifork_t	*ifp,		/* inode fork pointer */
3702	xfs_extnum_t	idx)		/* index of target extent */
3703{
3704	ASSERT(idx >= 0);
3705	if ((ifp->if_flags & XFS_IFEXTIREC) && (idx == 0)) {
3706		return ifp->if_u1.if_ext_irec->er_extbuf;
3707	} else if (ifp->if_flags & XFS_IFEXTIREC) {
3708		xfs_ext_irec_t	*erp;		/* irec pointer */
3709		int		erp_idx = 0;	/* irec index */
3710		xfs_extnum_t	page_idx = idx;	/* ext index in target list */
3711
3712		erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 0);
3713		return &erp->er_extbuf[page_idx];
3714	} else if (ifp->if_bytes) {
3715		return &ifp->if_u1.if_extents[idx];
3716	} else {
3717		return NULL;
3718	}
3719}
3720
3721/*
3722 * Insert new item(s) into the extent records for incore inode
3723 * fork 'ifp'.  'count' new items are inserted at index 'idx'.
3724 */
3725void
3726xfs_iext_insert(
3727	xfs_ifork_t	*ifp,		/* inode fork pointer */
3728	xfs_extnum_t	idx,		/* starting index of new items */
3729	xfs_extnum_t	count,		/* number of inserted items */
3730	xfs_bmbt_irec_t	*new)		/* items to insert */
3731{
3732	xfs_bmbt_rec_t	*ep;		/* extent record pointer */
3733	xfs_extnum_t	i;		/* extent record index */
3734
3735	ASSERT(ifp->if_flags & XFS_IFEXTENTS);
3736	xfs_iext_add(ifp, idx, count);
3737	for (i = idx; i < idx + count; i++, new++) {
3738		ep = xfs_iext_get_ext(ifp, i);
3739		xfs_bmbt_set_all(ep, new);
3740	}
3741}
3742
3743/*
3744 * This is called when the amount of space required for incore file
3745 * extents needs to be increased. The ext_diff parameter stores the
3746 * number of new extents being added and the idx parameter contains
3747 * the extent index where the new extents will be added. If the new
3748 * extents are being appended, then we just need to (re)allocate and
3749 * initialize the space. Otherwise, if the new extents are being
3750 * inserted into the middle of the existing entries, a bit more work
3751 * is required to make room for the new extents to be inserted. The
3752 * caller is responsible for filling in the new extent entries upon
3753 * return.
3754 */
3755void
3756xfs_iext_add(
3757	xfs_ifork_t	*ifp,		/* inode fork pointer */
3758	xfs_extnum_t	idx,		/* index to begin adding exts */
3759	int		ext_diff)	/* number of extents to add */
3760{
3761	int		byte_diff;	/* new bytes being added */
3762	int		new_size;	/* size of extents after adding */
3763	xfs_extnum_t	nextents;	/* number of extents in file */
3764
3765	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3766	ASSERT((idx >= 0) && (idx <= nextents));
3767	byte_diff = ext_diff * sizeof(xfs_bmbt_rec_t);
3768	new_size = ifp->if_bytes + byte_diff;
3769	/*
3770	 * If the new number of extents (nextents + ext_diff)
3771	 * fits inside the inode, then continue to use the inline
3772	 * extent buffer.
3773	 */
3774	if (nextents + ext_diff <= XFS_INLINE_EXTS) {
3775		if (idx < nextents) {
3776			memmove(&ifp->if_u2.if_inline_ext[idx + ext_diff],
3777				&ifp->if_u2.if_inline_ext[idx],
3778				(nextents - idx) * sizeof(xfs_bmbt_rec_t));
3779			memset(&ifp->if_u2.if_inline_ext[idx], 0, byte_diff);
3780		}
3781		ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
3782		ifp->if_real_bytes = 0;
3783		ifp->if_lastex = nextents + ext_diff;
3784	}
3785	/*
3786	 * Otherwise use a linear (direct) extent list.
3787	 * If the extents are currently inside the inode,
3788	 * xfs_iext_realloc_direct will switch us from
3789	 * inline to direct extent allocation mode.
3790	 */
3791	else if (nextents + ext_diff <= XFS_LINEAR_EXTS) {
3792		xfs_iext_realloc_direct(ifp, new_size);
3793		if (idx < nextents) {
3794			memmove(&ifp->if_u1.if_extents[idx + ext_diff],
3795				&ifp->if_u1.if_extents[idx],
3796				(nextents - idx) * sizeof(xfs_bmbt_rec_t));
3797			memset(&ifp->if_u1.if_extents[idx], 0, byte_diff);
3798		}
3799	}
3800	/* Indirection array */
3801	else {
3802		xfs_ext_irec_t	*erp;
3803		int		erp_idx = 0;
3804		int		page_idx = idx;
3805
3806		ASSERT(nextents + ext_diff > XFS_LINEAR_EXTS);
3807		if (ifp->if_flags & XFS_IFEXTIREC) {
3808			erp = xfs_iext_idx_to_irec(ifp, &page_idx, &erp_idx, 1);
3809		} else {
3810			xfs_iext_irec_init(ifp);
3811			ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3812			erp = ifp->if_u1.if_ext_irec;
3813		}
3814		/* Extents fit in target extent page */
3815		if (erp && erp->er_extcount + ext_diff <= XFS_LINEAR_EXTS) {
3816			if (page_idx < erp->er_extcount) {
3817				memmove(&erp->er_extbuf[page_idx + ext_diff],
3818					&erp->er_extbuf[page_idx],
3819					(erp->er_extcount - page_idx) *
3820					sizeof(xfs_bmbt_rec_t));
3821				memset(&erp->er_extbuf[page_idx], 0, byte_diff);
3822			}
3823			erp->er_extcount += ext_diff;
3824			xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3825		}
3826		/* Insert a new extent page */
3827		else if (erp) {
3828			xfs_iext_add_indirect_multi(ifp,
3829				erp_idx, page_idx, ext_diff);
3830		}
3831		/*
3832		 * If extent(s) are being appended to the last page in
3833		 * the indirection array and the new extent(s) don't fit
3834		 * in the page, then erp is NULL and erp_idx is set to
3835		 * the next index needed in the indirection array.
3836		 */
3837		else {
3838			int	count = ext_diff;
3839
3840			while (count) {
3841				erp = xfs_iext_irec_new(ifp, erp_idx);
3842				erp->er_extcount = count;
3843				count -= MIN(count, (int)XFS_LINEAR_EXTS);
3844				if (count) {
3845					erp_idx++;
3846				}
3847			}
3848		}
3849	}
3850	ifp->if_bytes = new_size;
3851}
3852
3853/*
3854 * This is called when incore extents are being added to the indirection
3855 * array and the new extents do not fit in the target extent list. The
3856 * erp_idx parameter contains the irec index for the target extent list
3857 * in the indirection array, and the idx parameter contains the extent
3858 * index within the list. The number of extents being added is stored
3859 * in the count parameter.
3860 *
3861 *    |-------|   |-------|
3862 *    |       |   |       |    idx - number of extents before idx
3863 *    |  idx  |   | count |
3864 *    |       |   |       |    count - number of extents being inserted at idx
3865 *    |-------|   |-------|
3866 *    | count |   | nex2  |    nex2 - number of extents after idx + count
3867 *    |-------|   |-------|
3868 */
3869void
3870xfs_iext_add_indirect_multi(
3871	xfs_ifork_t	*ifp,			/* inode fork pointer */
3872	int		erp_idx,		/* target extent irec index */
3873	xfs_extnum_t	idx,			/* index within target list */
3874	int		count)			/* new extents being added */
3875{
3876	int		byte_diff;		/* new bytes being added */
3877	xfs_ext_irec_t	*erp;			/* pointer to irec entry */
3878	xfs_extnum_t	ext_diff;		/* number of extents to add */
3879	xfs_extnum_t	ext_cnt;		/* new extents still needed */
3880	xfs_extnum_t	nex2;			/* extents after idx + count */
3881	xfs_bmbt_rec_t	*nex2_ep = NULL;	/* temp list for nex2 extents */
3882	int		nlists;			/* number of irec's (lists) */
3883
3884	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
3885	erp = &ifp->if_u1.if_ext_irec[erp_idx];
3886	nex2 = erp->er_extcount - idx;
3887	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
3888
3889	/*
3890	 * Save second part of target extent list
3891	 * (all extents past */
3892	if (nex2) {
3893		byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3894		nex2_ep = (xfs_bmbt_rec_t *) kmem_alloc(byte_diff, KM_SLEEP);
3895		memmove(nex2_ep, &erp->er_extbuf[idx], byte_diff);
3896		erp->er_extcount -= nex2;
3897		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -nex2);
3898		memset(&erp->er_extbuf[idx], 0, byte_diff);
3899	}
3900
3901	/*
3902	 * Add the new extents to the end of the target
3903	 * list, then allocate new irec record(s) and
3904	 * extent buffer(s) as needed to store the rest
3905	 * of the new extents.
3906	 */
3907	ext_cnt = count;
3908	ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS - erp->er_extcount);
3909	if (ext_diff) {
3910		erp->er_extcount += ext_diff;
3911		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3912		ext_cnt -= ext_diff;
3913	}
3914	while (ext_cnt) {
3915		erp_idx++;
3916		erp = xfs_iext_irec_new(ifp, erp_idx);
3917		ext_diff = MIN(ext_cnt, (int)XFS_LINEAR_EXTS);
3918		erp->er_extcount = ext_diff;
3919		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, ext_diff);
3920		ext_cnt -= ext_diff;
3921	}
3922
3923	/* Add nex2 extents back to indirection array */
3924	if (nex2) {
3925		xfs_extnum_t	ext_avail;
3926		int		i;
3927
3928		byte_diff = nex2 * sizeof(xfs_bmbt_rec_t);
3929		ext_avail = XFS_LINEAR_EXTS - erp->er_extcount;
3930		i = 0;
3931		/*
3932		 * If nex2 extents fit in the current page, append
3933		 * nex2_ep after the new extents.
3934		 */
3935		if (nex2 <= ext_avail) {
3936			i = erp->er_extcount;
3937		}
3938		/*
3939		 * Otherwise, check if space is available in the
3940		 * next page.
3941		 */
3942		else if ((erp_idx < nlists - 1) &&
3943			 (nex2 <= (ext_avail = XFS_LINEAR_EXTS -
3944			  ifp->if_u1.if_ext_irec[erp_idx+1].er_extcount))) {
3945			erp_idx++;
3946			erp++;
3947			/* Create a hole for nex2 extents */
3948			memmove(&erp->er_extbuf[nex2], erp->er_extbuf,
3949				erp->er_extcount * sizeof(xfs_bmbt_rec_t));
3950		}
3951		/*
3952		 * Final choice, create a new extent page for
3953		 * nex2 extents.
3954		 */
3955		else {
3956			erp_idx++;
3957			erp = xfs_iext_irec_new(ifp, erp_idx);
3958		}
3959		memmove(&erp->er_extbuf[i], nex2_ep, byte_diff);
3960		kmem_free(nex2_ep, byte_diff);
3961		erp->er_extcount += nex2;
3962		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2);
3963	}
3964}
3965
3966/*
3967 * This is called when the amount of space required for incore file
3968 * extents needs to be decreased. The ext_diff parameter stores the
3969 * number of extents to be removed and the idx parameter contains
3970 * the extent index where the extents will be removed from.
3971 *
3972 * If the amount of space needed has decreased below the linear
3973 * limit, XFS_IEXT_BUFSZ, then switch to using the contiguous
3974 * extent array.  Otherwise, use kmem_realloc() to adjust the
3975 * size to what is needed.
3976 */
3977void
3978xfs_iext_remove(
3979	xfs_ifork_t	*ifp,		/* inode fork pointer */
3980	xfs_extnum_t	idx,		/* index to begin removing exts */
3981	int		ext_diff)	/* number of extents to remove */
3982{
3983	xfs_extnum_t	nextents;	/* number of extents in file */
3984	int		new_size;	/* size of extents after removal */
3985
3986	ASSERT(ext_diff > 0);
3987	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
3988	new_size = (nextents - ext_diff) * sizeof(xfs_bmbt_rec_t);
3989
3990	if (new_size == 0) {
3991		xfs_iext_destroy(ifp);
3992	} else if (ifp->if_flags & XFS_IFEXTIREC) {
3993		xfs_iext_remove_indirect(ifp, idx, ext_diff);
3994	} else if (ifp->if_real_bytes) {
3995		xfs_iext_remove_direct(ifp, idx, ext_diff);
3996	} else {
3997		xfs_iext_remove_inline(ifp, idx, ext_diff);
3998	}
3999	ifp->if_bytes = new_size;
4000}
4001
4002/*
4003 * This removes ext_diff extents from the inline buffer, beginning
4004 * at extent index idx.
4005 */
4006void
4007xfs_iext_remove_inline(
4008	xfs_ifork_t	*ifp,		/* inode fork pointer */
4009	xfs_extnum_t	idx,		/* index to begin removing exts */
4010	int		ext_diff)	/* number of extents to remove */
4011{
4012	int		nextents;	/* number of extents in file */
4013
4014	ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
4015	ASSERT(idx < XFS_INLINE_EXTS);
4016	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4017	ASSERT(((nextents - ext_diff) > 0) &&
4018		(nextents - ext_diff) < XFS_INLINE_EXTS);
4019
4020	if (idx + ext_diff < nextents) {
4021		memmove(&ifp->if_u2.if_inline_ext[idx],
4022			&ifp->if_u2.if_inline_ext[idx + ext_diff],
4023			(nextents - (idx + ext_diff)) *
4024			 sizeof(xfs_bmbt_rec_t));
4025		memset(&ifp->if_u2.if_inline_ext[nextents - ext_diff],
4026			0, ext_diff * sizeof(xfs_bmbt_rec_t));
4027	} else {
4028		memset(&ifp->if_u2.if_inline_ext[idx], 0,
4029			ext_diff * sizeof(xfs_bmbt_rec_t));
4030	}
4031}
4032
4033/*
4034 * This removes ext_diff extents from a linear (direct) extent list,
4035 * beginning at extent index idx. If the extents are being removed
4036 * from the end of the list (ie. truncate) then we just need to re-
4037 * allocate the list to remove the extra space. Otherwise, if the
4038 * extents are being removed from the middle of the existing extent
4039 * entries, then we first need to move the extent records beginning
4040 * at idx + ext_diff up in the list to overwrite the records being
4041 * removed, then remove the extra space via kmem_realloc.
4042 */
4043void
4044xfs_iext_remove_direct(
4045	xfs_ifork_t	*ifp,		/* inode fork pointer */
4046	xfs_extnum_t	idx,		/* index to begin removing exts */
4047	int		ext_diff)	/* number of extents to remove */
4048{
4049	xfs_extnum_t	nextents;	/* number of extents in file */
4050	int		new_size;	/* size of extents after removal */
4051
4052	ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
4053	new_size = ifp->if_bytes -
4054		(ext_diff * sizeof(xfs_bmbt_rec_t));
4055	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4056
4057	if (new_size == 0) {
4058		xfs_iext_destroy(ifp);
4059		return;
4060	}
4061	/* Move extents up in the list (if needed) */
4062	if (idx + ext_diff < nextents) {
4063		memmove(&ifp->if_u1.if_extents[idx],
4064			&ifp->if_u1.if_extents[idx + ext_diff],
4065			(nextents - (idx + ext_diff)) *
4066			 sizeof(xfs_bmbt_rec_t));
4067	}
4068	memset(&ifp->if_u1.if_extents[nextents - ext_diff],
4069		0, ext_diff * sizeof(xfs_bmbt_rec_t));
4070	/*
4071	 * Reallocate the direct extent list. If the extents
4072	 * will fit inside the inode then xfs_iext_realloc_direct
4073	 * will switch from direct to inline extent allocation
4074	 * mode for us.
4075	 */
4076	xfs_iext_realloc_direct(ifp, new_size);
4077	ifp->if_bytes = new_size;
4078}
4079
4080/*
4081 * This is called when incore extents are being removed from the
4082 * indirection array and the extents being removed span multiple extent
4083 * buffers. The idx parameter contains the file extent index where we
4084 * want to begin removing extents, and the count parameter contains
4085 * how many extents need to be removed.
4086 *
4087 *    |-------|   |-------|
4088 *    | nex1  |   |       |    nex1 - number of extents before idx
4089 *    |-------|   | count |
4090 *    |       |   |       |    count - number of extents being removed at idx
4091 *    | count |   |-------|
4092 *    |       |   | nex2  |    nex2 - number of extents after idx + count
4093 *    |-------|   |-------|
4094 */
4095void
4096xfs_iext_remove_indirect(
4097	xfs_ifork_t	*ifp,		/* inode fork pointer */
4098	xfs_extnum_t	idx,		/* index to begin removing extents */
4099	int		count)		/* number of extents to remove */
4100{
4101	xfs_ext_irec_t	*erp;		/* indirection array pointer */
4102	int		erp_idx = 0;	/* indirection array index */
4103	xfs_extnum_t	ext_cnt;	/* extents left to remove */
4104	xfs_extnum_t	ext_diff;	/* extents to remove in current list */
4105	xfs_extnum_t	nex1;		/* number of extents before idx */
4106	xfs_extnum_t	nex2;		/* extents after idx + count */
4107	int		nlists;		/* entries in indirection array */
4108	int		page_idx = idx;	/* index in target extent list */
4109
4110	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4111	erp = xfs_iext_idx_to_irec(ifp,  &page_idx, &erp_idx, 0);
4112	ASSERT(erp != NULL);
4113	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4114	nex1 = page_idx;
4115	ext_cnt = count;
4116	while (ext_cnt) {
4117		nex2 = MAX((erp->er_extcount - (nex1 + ext_cnt)), 0);
4118		ext_diff = MIN(ext_cnt, (erp->er_extcount - nex1));
4119		/*
4120		 * Check for deletion of entire list;
4121		 * xfs_iext_irec_remove() updates extent offsets.
4122		 */
4123		if (ext_diff == erp->er_extcount) {
4124			xfs_iext_irec_remove(ifp, erp_idx);
4125			ext_cnt -= ext_diff;
4126			nex1 = 0;
4127			if (ext_cnt) {
4128				ASSERT(erp_idx < ifp->if_real_bytes /
4129					XFS_IEXT_BUFSZ);
4130				erp = &ifp->if_u1.if_ext_irec[erp_idx];
4131				nex1 = 0;
4132				continue;
4133			} else {
4134				break;
4135			}
4136		}
4137		/* Move extents up (if needed) */
4138		if (nex2) {
4139			memmove(&erp->er_extbuf[nex1],
4140				&erp->er_extbuf[nex1 + ext_diff],
4141				nex2 * sizeof(xfs_bmbt_rec_t));
4142		}
4143		/* Zero out rest of page */
4144		memset(&erp->er_extbuf[nex1 + nex2], 0, (XFS_IEXT_BUFSZ -
4145			((nex1 + nex2) * sizeof(xfs_bmbt_rec_t))));
4146		/* Update remaining counters */
4147		erp->er_extcount -= ext_diff;
4148		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, -ext_diff);
4149		ext_cnt -= ext_diff;
4150		nex1 = 0;
4151		erp_idx++;
4152		erp++;
4153	}
4154	ifp->if_bytes -= count * sizeof(xfs_bmbt_rec_t);
4155	xfs_iext_irec_compact(ifp);
4156}
4157
4158/*
4159 * Create, destroy, or resize a linear (direct) block of extents.
4160 */
4161void
4162xfs_iext_realloc_direct(
4163	xfs_ifork_t	*ifp,		/* inode fork pointer */
4164	int		new_size)	/* new size of extents */
4165{
4166	int		rnew_size;	/* real new size of extents */
4167
4168	rnew_size = new_size;
4169
4170	ASSERT(!(ifp->if_flags & XFS_IFEXTIREC) ||
4171		((new_size >= 0) && (new_size <= XFS_IEXT_BUFSZ) &&
4172		 (new_size != ifp->if_real_bytes)));
4173
4174	/* Free extent records */
4175	if (new_size == 0) {
4176		xfs_iext_destroy(ifp);
4177	}
4178	/* Resize direct extent list and zero any new bytes */
4179	else if (ifp->if_real_bytes) {
4180		/* Check if extents will fit inside the inode */
4181		if (new_size <= XFS_INLINE_EXTS * sizeof(xfs_bmbt_rec_t)) {
4182			xfs_iext_direct_to_inline(ifp, new_size /
4183				(uint)sizeof(xfs_bmbt_rec_t));
4184			ifp->if_bytes = new_size;
4185			return;
4186		}
4187		if ((new_size & (new_size - 1)) != 0) {
4188			rnew_size = xfs_iroundup(new_size);
4189		}
4190		if (rnew_size != ifp->if_real_bytes) {
4191			ifp->if_u1.if_extents = (xfs_bmbt_rec_t *)
4192				kmem_realloc(ifp->if_u1.if_extents,
4193						rnew_size,
4194						ifp->if_real_bytes,
4195						KM_SLEEP);
4196		}
4197		if (rnew_size > ifp->if_real_bytes) {
4198			memset(&ifp->if_u1.if_extents[ifp->if_bytes /
4199				(uint)sizeof(xfs_bmbt_rec_t)], 0,
4200				rnew_size - ifp->if_real_bytes);
4201		}
4202	}
4203	/*
4204	 * Switch from the inline extent buffer to a direct
4205	 * extent list. Be sure to include the inline extent
4206	 * bytes in new_size.
4207	 */
4208	else {
4209		new_size += ifp->if_bytes;
4210		if ((new_size & (new_size - 1)) != 0) {
4211			rnew_size = xfs_iroundup(new_size);
4212		}
4213		xfs_iext_inline_to_direct(ifp, rnew_size);
4214	}
4215	ifp->if_real_bytes = rnew_size;
4216	ifp->if_bytes = new_size;
4217}
4218
4219/*
4220 * Switch from linear (direct) extent records to inline buffer.
4221 */
4222void
4223xfs_iext_direct_to_inline(
4224	xfs_ifork_t	*ifp,		/* inode fork pointer */
4225	xfs_extnum_t	nextents)	/* number of extents in file */
4226{
4227	ASSERT(ifp->if_flags & XFS_IFEXTENTS);
4228	ASSERT(nextents <= XFS_INLINE_EXTS);
4229	/*
4230	 * The inline buffer was zeroed when we switched
4231	 * from inline to direct extent allocation mode,
4232	 * so we don't need to clear it here.
4233	 */
4234	memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents,
4235		nextents * sizeof(xfs_bmbt_rec_t));
4236	kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes);
4237	ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
4238	ifp->if_real_bytes = 0;
4239}
4240
4241/*
4242 * Switch from inline buffer to linear (direct) extent records.
4243 * new_size should already be rounded up to the next power of 2
4244 * by the caller (when appropriate), so use new_size as it is.
4245 * However, since new_size may be rounded up, we can't update
4246 * if_bytes here. It is the caller's responsibility to update
4247 * if_bytes upon return.
4248 */
4249void
4250xfs_iext_inline_to_direct(
4251	xfs_ifork_t	*ifp,		/* inode fork pointer */
4252	int		new_size)	/* number of extents in file */
4253{
4254	ifp->if_u1.if_extents = (xfs_bmbt_rec_t *)
4255		kmem_alloc(new_size, KM_SLEEP);
4256	memset(ifp->if_u1.if_extents, 0, new_size);
4257	if (ifp->if_bytes) {
4258		memcpy(ifp->if_u1.if_extents, ifp->if_u2.if_inline_ext,
4259			ifp->if_bytes);
4260		memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
4261			sizeof(xfs_bmbt_rec_t));
4262	}
4263	ifp->if_real_bytes = new_size;
4264}
4265
4266/*
4267 * Resize an extent indirection array to new_size bytes.
4268 */
4269void
4270xfs_iext_realloc_indirect(
4271	xfs_ifork_t	*ifp,		/* inode fork pointer */
4272	int		new_size)	/* new indirection array size */
4273{
4274	int		nlists;		/* number of irec's (ex lists) */
4275	int		size;		/* current indirection array size */
4276
4277	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4278	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4279	size = nlists * sizeof(xfs_ext_irec_t);
4280	ASSERT(ifp->if_real_bytes);
4281	ASSERT((new_size >= 0) && (new_size != size));
4282	if (new_size == 0) {
4283		xfs_iext_destroy(ifp);
4284	} else {
4285		ifp->if_u1.if_ext_irec = (xfs_ext_irec_t *)
4286			kmem_realloc(ifp->if_u1.if_ext_irec,
4287				new_size, size, KM_SLEEP);
4288	}
4289}
4290
4291/*
4292 * Switch from indirection array to linear (direct) extent allocations.
4293 */
4294void
4295xfs_iext_indirect_to_direct(
4296	 xfs_ifork_t	*ifp)		/* inode fork pointer */
4297{
4298	xfs_bmbt_rec_t	*ep;		/* extent record pointer */
4299	xfs_extnum_t	nextents;	/* number of extents in file */
4300	int		size;		/* size of file extents */
4301
4302	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4303	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4304	ASSERT(nextents <= XFS_LINEAR_EXTS);
4305	size = nextents * sizeof(xfs_bmbt_rec_t);
4306
4307	xfs_iext_irec_compact_full(ifp);
4308	ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ);
4309
4310	ep = ifp->if_u1.if_ext_irec->er_extbuf;
4311	kmem_free(ifp->if_u1.if_ext_irec, sizeof(xfs_ext_irec_t));
4312	ifp->if_flags &= ~XFS_IFEXTIREC;
4313	ifp->if_u1.if_extents = ep;
4314	ifp->if_bytes = size;
4315	if (nextents < XFS_LINEAR_EXTS) {
4316		xfs_iext_realloc_direct(ifp, size);
4317	}
4318}
4319
4320/*
4321 * Free incore file extents.
4322 */
4323void
4324xfs_iext_destroy(
4325	xfs_ifork_t	*ifp)		/* inode fork pointer */
4326{
4327	if (ifp->if_flags & XFS_IFEXTIREC) {
4328		int	erp_idx;
4329		int	nlists;
4330
4331		nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4332		for (erp_idx = nlists - 1; erp_idx >= 0 ; erp_idx--) {
4333			xfs_iext_irec_remove(ifp, erp_idx);
4334		}
4335		ifp->if_flags &= ~XFS_IFEXTIREC;
4336	} else if (ifp->if_real_bytes) {
4337		kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes);
4338	} else if (ifp->if_bytes) {
4339		memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
4340			sizeof(xfs_bmbt_rec_t));
4341	}
4342	ifp->if_u1.if_extents = NULL;
4343	ifp->if_real_bytes = 0;
4344	ifp->if_bytes = 0;
4345}
4346
4347/*
4348 * Return a pointer to the extent record for file system block bno.
4349 */
4350xfs_bmbt_rec_t *			/* pointer to found extent record */
4351xfs_iext_bno_to_ext(
4352	xfs_ifork_t	*ifp,		/* inode fork pointer */
4353	xfs_fileoff_t	bno,		/* block number to search for */
4354	xfs_extnum_t	*idxp)		/* index of target extent */
4355{
4356	xfs_bmbt_rec_t	*base;		/* pointer to first extent */
4357	xfs_filblks_t	blockcount = 0;	/* number of blocks in extent */
4358	xfs_bmbt_rec_t	*ep = NULL;	/* pointer to target extent */
4359	xfs_ext_irec_t	*erp = NULL;	/* indirection array pointer */
4360	int		high;		/* upper boundary in search */
4361	xfs_extnum_t	idx = 0;	/* index of target extent */
4362	int		low;		/* lower boundary in search */
4363	xfs_extnum_t	nextents;	/* number of file extents */
4364	xfs_fileoff_t	startoff = 0;	/* start offset of extent */
4365
4366	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4367	if (nextents == 0) {
4368		*idxp = 0;
4369		return NULL;
4370	}
4371	low = 0;
4372	if (ifp->if_flags & XFS_IFEXTIREC) {
4373		/* Find target extent list */
4374		int	erp_idx = 0;
4375		erp = xfs_iext_bno_to_irec(ifp, bno, &erp_idx);
4376		base = erp->er_extbuf;
4377		high = erp->er_extcount - 1;
4378	} else {
4379		base = ifp->if_u1.if_extents;
4380		high = nextents - 1;
4381	}
4382	/* Binary search extent records */
4383	while (low <= high) {
4384		idx = (low + high) >> 1;
4385		ep = base + idx;
4386		startoff = xfs_bmbt_get_startoff(ep);
4387		blockcount = xfs_bmbt_get_blockcount(ep);
4388		if (bno < startoff) {
4389			high = idx - 1;
4390		} else if (bno >= startoff + blockcount) {
4391			low = idx + 1;
4392		} else {
4393			/* Convert back to file-based extent index */
4394			if (ifp->if_flags & XFS_IFEXTIREC) {
4395				idx += erp->er_extoff;
4396			}
4397			*idxp = idx;
4398			return ep;
4399		}
4400	}
4401	/* Convert back to file-based extent index */
4402	if (ifp->if_flags & XFS_IFEXTIREC) {
4403		idx += erp->er_extoff;
4404	}
4405	if (bno >= startoff + blockcount) {
4406		if (++idx == nextents) {
4407			ep = NULL;
4408		} else {
4409			ep = xfs_iext_get_ext(ifp, idx);
4410		}
4411	}
4412	*idxp = idx;
4413	return ep;
4414}
4415
4416/*
4417 * Return a pointer to the indirection array entry containing the
4418 * extent record for filesystem block bno. Store the index of the
4419 * target irec in *erp_idxp.
4420 */
4421xfs_ext_irec_t *			/* pointer to found extent record */
4422xfs_iext_bno_to_irec(
4423	xfs_ifork_t	*ifp,		/* inode fork pointer */
4424	xfs_fileoff_t	bno,		/* block number to search for */
4425	int		*erp_idxp)	/* irec index of target ext list */
4426{
4427	xfs_ext_irec_t	*erp = NULL;	/* indirection array pointer */
4428	xfs_ext_irec_t	*erp_next;	/* next indirection array entry */
4429	int		erp_idx;	/* indirection array index */
4430	int		nlists;		/* number of extent irec's (lists) */
4431	int		high;		/* binary search upper limit */
4432	int		low;		/* binary search lower limit */
4433
4434	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4435	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4436	erp_idx = 0;
4437	low = 0;
4438	high = nlists - 1;
4439	while (low <= high) {
4440		erp_idx = (low + high) >> 1;
4441		erp = &ifp->if_u1.if_ext_irec[erp_idx];
4442		erp_next = erp_idx < nlists - 1 ? erp + 1 : NULL;
4443		if (bno < xfs_bmbt_get_startoff(erp->er_extbuf)) {
4444			high = erp_idx - 1;
4445		} else if (erp_next && bno >=
4446			   xfs_bmbt_get_startoff(erp_next->er_extbuf)) {
4447			low = erp_idx + 1;
4448		} else {
4449			break;
4450		}
4451	}
4452	*erp_idxp = erp_idx;
4453	return erp;
4454}
4455
4456/*
4457 * Return a pointer to the indirection array entry containing the
4458 * extent record at file extent index *idxp. Store the index of the
4459 * target irec in *erp_idxp and store the page index of the target
4460 * extent record in *idxp.
4461 */
4462xfs_ext_irec_t *
4463xfs_iext_idx_to_irec(
4464	xfs_ifork_t	*ifp,		/* inode fork pointer */
4465	xfs_extnum_t	*idxp,		/* extent index (file -> page) */
4466	int		*erp_idxp,	/* pointer to target irec */
4467	int		realloc)	/* new bytes were just added */
4468{
4469	xfs_ext_irec_t	*prev;		/* pointer to previous irec */
4470	xfs_ext_irec_t	*erp = NULL;	/* pointer to current irec */
4471	int		erp_idx;	/* indirection array index */
4472	int		nlists;		/* number of irec's (ex lists) */
4473	int		high;		/* binary search upper limit */
4474	int		low;		/* binary search lower limit */
4475	xfs_extnum_t	page_idx = *idxp; /* extent index in target list */
4476
4477	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4478	ASSERT(page_idx >= 0 && page_idx <=
4479		ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t));
4480	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4481	erp_idx = 0;
4482	low = 0;
4483	high = nlists - 1;
4484
4485	/* Binary search extent irec's */
4486	while (low <= high) {
4487		erp_idx = (low + high) >> 1;
4488		erp = &ifp->if_u1.if_ext_irec[erp_idx];
4489		prev = erp_idx > 0 ? erp - 1 : NULL;
4490		if (page_idx < erp->er_extoff || (page_idx == erp->er_extoff &&
4491		     realloc && prev && prev->er_extcount < XFS_LINEAR_EXTS)) {
4492			high = erp_idx - 1;
4493		} else if (page_idx > erp->er_extoff + erp->er_extcount ||
4494			   (page_idx == erp->er_extoff + erp->er_extcount &&
4495			    !realloc)) {
4496			low = erp_idx + 1;
4497		} else if (page_idx == erp->er_extoff + erp->er_extcount &&
4498			   erp->er_extcount == XFS_LINEAR_EXTS) {
4499			ASSERT(realloc);
4500			page_idx = 0;
4501			erp_idx++;
4502			erp = erp_idx < nlists ? erp + 1 : NULL;
4503			break;
4504		} else {
4505			page_idx -= erp->er_extoff;
4506			break;
4507		}
4508	}
4509	*idxp = page_idx;
4510	*erp_idxp = erp_idx;
4511	return(erp);
4512}
4513
4514/*
4515 * Allocate and initialize an indirection array once the space needed
4516 * for incore extents increases above XFS_IEXT_BUFSZ.
4517 */
4518void
4519xfs_iext_irec_init(
4520	xfs_ifork_t	*ifp)		/* inode fork pointer */
4521{
4522	xfs_ext_irec_t	*erp;		/* indirection array pointer */
4523	xfs_extnum_t	nextents;	/* number of extents in file */
4524
4525	ASSERT(!(ifp->if_flags & XFS_IFEXTIREC));
4526	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4527	ASSERT(nextents <= XFS_LINEAR_EXTS);
4528
4529	erp = (xfs_ext_irec_t *)
4530		kmem_alloc(sizeof(xfs_ext_irec_t), KM_SLEEP);
4531
4532	if (nextents == 0) {
4533		ifp->if_u1.if_extents = (xfs_bmbt_rec_t *)
4534			kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP);
4535	} else if (!ifp->if_real_bytes) {
4536		xfs_iext_inline_to_direct(ifp, XFS_IEXT_BUFSZ);
4537	} else if (ifp->if_real_bytes < XFS_IEXT_BUFSZ) {
4538		xfs_iext_realloc_direct(ifp, XFS_IEXT_BUFSZ);
4539	}
4540	erp->er_extbuf = ifp->if_u1.if_extents;
4541	erp->er_extcount = nextents;
4542	erp->er_extoff = 0;
4543
4544	ifp->if_flags |= XFS_IFEXTIREC;
4545	ifp->if_real_bytes = XFS_IEXT_BUFSZ;
4546	ifp->if_bytes = nextents * sizeof(xfs_bmbt_rec_t);
4547	ifp->if_u1.if_ext_irec = erp;
4548
4549	return;
4550}
4551
4552/*
4553 * Allocate and initialize a new entry in the indirection array.
4554 */
4555xfs_ext_irec_t *
4556xfs_iext_irec_new(
4557	xfs_ifork_t	*ifp,		/* inode fork pointer */
4558	int		erp_idx)	/* index for new irec */
4559{
4560	xfs_ext_irec_t	*erp;		/* indirection array pointer */
4561	int		i;		/* loop counter */
4562	int		nlists;		/* number of irec's (ex lists) */
4563
4564	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4565	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4566
4567	/* Resize indirection array */
4568	xfs_iext_realloc_indirect(ifp, ++nlists *
4569				  sizeof(xfs_ext_irec_t));
4570	/*
4571	 * Move records down in the array so the
4572	 * new page can use erp_idx.
4573	 */
4574	erp = ifp->if_u1.if_ext_irec;
4575	for (i = nlists - 1; i > erp_idx; i--) {
4576		memmove(&erp[i], &erp[i-1], sizeof(xfs_ext_irec_t));
4577	}
4578	ASSERT(i == erp_idx);
4579
4580	/* Initialize new extent record */
4581	erp = ifp->if_u1.if_ext_irec;
4582	erp[erp_idx].er_extbuf = (xfs_bmbt_rec_t *)
4583		kmem_alloc(XFS_IEXT_BUFSZ, KM_SLEEP);
4584	ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
4585	memset(erp[erp_idx].er_extbuf, 0, XFS_IEXT_BUFSZ);
4586	erp[erp_idx].er_extcount = 0;
4587	erp[erp_idx].er_extoff = erp_idx > 0 ?
4588		erp[erp_idx-1].er_extoff + erp[erp_idx-1].er_extcount : 0;
4589	return (&erp[erp_idx]);
4590}
4591
4592/*
4593 * Remove a record from the indirection array.
4594 */
4595void
4596xfs_iext_irec_remove(
4597	xfs_ifork_t	*ifp,		/* inode fork pointer */
4598	int		erp_idx)	/* irec index to remove */
4599{
4600	xfs_ext_irec_t	*erp;		/* indirection array pointer */
4601	int		i;		/* loop counter */
4602	int		nlists;		/* number of irec's (ex lists) */
4603
4604	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4605	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4606	erp = &ifp->if_u1.if_ext_irec[erp_idx];
4607	if (erp->er_extbuf) {
4608		xfs_iext_irec_update_extoffs(ifp, erp_idx + 1,
4609			-erp->er_extcount);
4610		kmem_free(erp->er_extbuf, XFS_IEXT_BUFSZ);
4611	}
4612	/* Compact extent records */
4613	erp = ifp->if_u1.if_ext_irec;
4614	for (i = erp_idx; i < nlists - 1; i++) {
4615		memmove(&erp[i], &erp[i+1], sizeof(xfs_ext_irec_t));
4616	}
4617	/*
4618	 * Manually free the last extent record from the indirection
4619	 * array.  A call to xfs_iext_realloc_indirect() with a size
4620	 * of zero would result in a call to xfs_iext_destroy() which
4621	 * would in turn call this function again, creating a nasty
4622	 * infinite loop.
4623	 */
4624	if (--nlists) {
4625		xfs_iext_realloc_indirect(ifp,
4626			nlists * sizeof(xfs_ext_irec_t));
4627	} else {
4628		kmem_free(ifp->if_u1.if_ext_irec,
4629			sizeof(xfs_ext_irec_t));
4630	}
4631	ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
4632}
4633
4634/*
4635 * This is called to clean up large amounts of unused memory allocated
4636 * by the indirection array.  Before compacting anything though, verify
4637 * that the indirection array is still needed and switch back to the
4638 * linear extent list (or even the inline buffer) if possible.  The
4639 * compaction policy is as follows:
4640 *
4641 *    Full Compaction: Extents fit into a single page (or inline buffer)
4642 *    Full Compaction: Extents occupy less than 10% of allocated space
4643 * Partial Compaction: Extents occupy > 10% and < 50% of allocated space
4644 *      No Compaction: Extents occupy at least 50% of allocated space
4645 */
4646void
4647xfs_iext_irec_compact(
4648	xfs_ifork_t	*ifp)		/* inode fork pointer */
4649{
4650	xfs_extnum_t	nextents;	/* number of extents in file */
4651	int		nlists;		/* number of irec's (ex lists) */
4652
4653	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4654	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4655	nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
4656
4657	if (nextents == 0) {
4658		xfs_iext_destroy(ifp);
4659	} else if (nextents <= XFS_INLINE_EXTS) {
4660		xfs_iext_indirect_to_direct(ifp);
4661		xfs_iext_direct_to_inline(ifp, nextents);
4662	} else if (nextents <= XFS_LINEAR_EXTS) {
4663		xfs_iext_indirect_to_direct(ifp);
4664	} else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 3) {
4665		xfs_iext_irec_compact_full(ifp);
4666	} else if (nextents < (nlists * XFS_LINEAR_EXTS) >> 1) {
4667		xfs_iext_irec_compact_pages(ifp);
4668	}
4669}
4670
4671/*
4672 * Combine extents from neighboring extent pages.
4673 */
4674void
4675xfs_iext_irec_compact_pages(
4676	xfs_ifork_t	*ifp)		/* inode fork pointer */
4677{
4678	xfs_ext_irec_t	*erp, *erp_next;/* pointers to irec entries */
4679	int		erp_idx = 0;	/* indirection array index */
4680	int		nlists;		/* number of irec's (ex lists) */
4681
4682	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4683	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4684	while (erp_idx < nlists - 1) {
4685		erp = &ifp->if_u1.if_ext_irec[erp_idx];
4686		erp_next = erp + 1;
4687		if (erp_next->er_extcount <=
4688		    (XFS_LINEAR_EXTS - erp->er_extcount)) {
4689			memmove(&erp->er_extbuf[erp->er_extcount],
4690				erp_next->er_extbuf, erp_next->er_extcount *
4691				sizeof(xfs_bmbt_rec_t));
4692			erp->er_extcount += erp_next->er_extcount;
4693			/*
4694			 * Free page before removing extent record
4695			 * so er_extoffs don't get modified in
4696			 * xfs_iext_irec_remove.
4697			 */
4698			kmem_free(erp_next->er_extbuf, XFS_IEXT_BUFSZ);
4699			erp_next->er_extbuf = NULL;
4700			xfs_iext_irec_remove(ifp, erp_idx + 1);
4701			nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4702		} else {
4703			erp_idx++;
4704		}
4705	}
4706}
4707
4708/*
4709 * Fully compact the extent records managed by the indirection array.
4710 */
4711void
4712xfs_iext_irec_compact_full(
4713	xfs_ifork_t	*ifp)			/* inode fork pointer */
4714{
4715	xfs_bmbt_rec_t	*ep, *ep_next;		/* extent record pointers */
4716	xfs_ext_irec_t	*erp, *erp_next;	/* extent irec pointers */
4717	int		erp_idx = 0;		/* extent irec index */
4718	int		ext_avail;		/* empty entries in ex list */
4719	int		ext_diff;		/* number of exts to add */
4720	int		nlists;			/* number of irec's (ex lists) */
4721
4722	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4723	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4724	erp = ifp->if_u1.if_ext_irec;
4725	ep = &erp->er_extbuf[erp->er_extcount];
4726	erp_next = erp + 1;
4727	ep_next = erp_next->er_extbuf;
4728	while (erp_idx < nlists - 1) {
4729		ext_avail = XFS_LINEAR_EXTS - erp->er_extcount;
4730		ext_diff = MIN(ext_avail, erp_next->er_extcount);
4731		memcpy(ep, ep_next, ext_diff * sizeof(xfs_bmbt_rec_t));
4732		erp->er_extcount += ext_diff;
4733		erp_next->er_extcount -= ext_diff;
4734		/* Remove next page */
4735		if (erp_next->er_extcount == 0) {
4736			/*
4737			 * Free page before removing extent record
4738			 * so er_extoffs don't get modified in
4739			 * xfs_iext_irec_remove.
4740			 */
4741			kmem_free(erp_next->er_extbuf,
4742				erp_next->er_extcount * sizeof(xfs_bmbt_rec_t));
4743			erp_next->er_extbuf = NULL;
4744			xfs_iext_irec_remove(ifp, erp_idx + 1);
4745			erp = &ifp->if_u1.if_ext_irec[erp_idx];
4746			nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4747		/* Update next page */
4748		} else {
4749			/* Move rest of page up to become next new page */
4750			memmove(erp_next->er_extbuf, ep_next,
4751				erp_next->er_extcount * sizeof(xfs_bmbt_rec_t));
4752			ep_next = erp_next->er_extbuf;
4753			memset(&ep_next[erp_next->er_extcount], 0,
4754				(XFS_LINEAR_EXTS - erp_next->er_extcount) *
4755				sizeof(xfs_bmbt_rec_t));
4756		}
4757		if (erp->er_extcount == XFS_LINEAR_EXTS) {
4758			erp_idx++;
4759			if (erp_idx < nlists)
4760				erp = &ifp->if_u1.if_ext_irec[erp_idx];
4761			else
4762				break;
4763		}
4764		ep = &erp->er_extbuf[erp->er_extcount];
4765		erp_next = erp + 1;
4766		ep_next = erp_next->er_extbuf;
4767	}
4768}
4769
4770/*
4771 * This is called to update the er_extoff field in the indirection
4772 * array when extents have been added or removed from one of the
4773 * extent lists. erp_idx contains the irec index to begin updating
4774 * at and ext_diff contains the number of extents that were added
4775 * or removed.
4776 */
4777void
4778xfs_iext_irec_update_extoffs(
4779	xfs_ifork_t	*ifp,		/* inode fork pointer */
4780	int		erp_idx,	/* irec index to update */
4781	int		ext_diff)	/* number of new extents */
4782{
4783	int		i;		/* loop counter */
4784	int		nlists;		/* number of irec's (ex lists */
4785
4786	ASSERT(ifp->if_flags & XFS_IFEXTIREC);
4787	nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
4788	for (i = erp_idx; i < nlists; i++) {
4789		ifp->if_u1.if_ext_irec[i].er_extoff += ext_diff;
4790	}
4791}
4792