ffs_softdep.c revision 256812
1/*-
2 * Copyright 1998, 2000 Marshall Kirk McKusick.
3 * Copyright 2009, 2010 Jeffrey W. Roberson <jeff@FreeBSD.org>
4 * All rights reserved.
5 *
6 * The soft updates code is derived from the appendix of a University
7 * of Michigan technical report (Gregory R. Ganger and Yale N. Patt,
8 * "Soft Updates: A Solution to the Metadata Update Problem in File
9 * Systems", CSE-TR-254-95, August 1995).
10 *
11 * Further information about soft updates can be obtained from:
12 *
13 *	Marshall Kirk McKusick		http://www.mckusick.com/softdep/
14 *	1614 Oxford Street		mckusick@mckusick.com
15 *	Berkeley, CA 94709-1608		+1-510-843-9542
16 *	USA
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions
20 * are met:
21 *
22 * 1. Redistributions of source code must retain the above copyright
23 *    notice, this list of conditions and the following disclaimer.
24 * 2. Redistributions in binary form must reproduce the above copyright
25 *    notice, this list of conditions and the following disclaimer in the
26 *    documentation and/or other materials provided with the distribution.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
29 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
30 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
31 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
33 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
34 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 *
39 *	from: @(#)ffs_softdep.c	9.59 (McKusick) 6/21/00
40 */
41
42#include <sys/cdefs.h>
43__FBSDID("$FreeBSD: head/sys/ufs/ffs/ffs_softdep.c 256812 2013-10-20 22:21:01Z mckusick $");
44
45#include "opt_ffs.h"
46#include "opt_quota.h"
47#include "opt_ddb.h"
48
49/*
50 * For now we want the safety net that the DEBUG flag provides.
51 */
52#ifndef DEBUG
53#define DEBUG
54#endif
55
56#include <sys/param.h>
57#include <sys/kernel.h>
58#include <sys/systm.h>
59#include <sys/bio.h>
60#include <sys/buf.h>
61#include <sys/kdb.h>
62#include <sys/kthread.h>
63#include <sys/ktr.h>
64#include <sys/limits.h>
65#include <sys/lock.h>
66#include <sys/malloc.h>
67#include <sys/mount.h>
68#include <sys/mutex.h>
69#include <sys/namei.h>
70#include <sys/priv.h>
71#include <sys/proc.h>
72#include <sys/rwlock.h>
73#include <sys/stat.h>
74#include <sys/sysctl.h>
75#include <sys/syslog.h>
76#include <sys/vnode.h>
77#include <sys/conf.h>
78
79#include <ufs/ufs/dir.h>
80#include <ufs/ufs/extattr.h>
81#include <ufs/ufs/quota.h>
82#include <ufs/ufs/inode.h>
83#include <ufs/ufs/ufsmount.h>
84#include <ufs/ffs/fs.h>
85#include <ufs/ffs/softdep.h>
86#include <ufs/ffs/ffs_extern.h>
87#include <ufs/ufs/ufs_extern.h>
88
89#include <vm/vm.h>
90#include <vm/vm_extern.h>
91#include <vm/vm_object.h>
92
93#include <geom/geom.h>
94
95#include <ddb/ddb.h>
96
97#define	KTR_SUJ	0	/* Define to KTR_SPARE. */
98
99#ifndef SOFTUPDATES
100
101int
102softdep_flushfiles(oldmnt, flags, td)
103	struct mount *oldmnt;
104	int flags;
105	struct thread *td;
106{
107
108	panic("softdep_flushfiles called");
109}
110
111int
112softdep_mount(devvp, mp, fs, cred)
113	struct vnode *devvp;
114	struct mount *mp;
115	struct fs *fs;
116	struct ucred *cred;
117{
118
119	return (0);
120}
121
122void
123softdep_initialize()
124{
125
126	return;
127}
128
129void
130softdep_uninitialize()
131{
132
133	return;
134}
135
136void
137softdep_unmount(mp)
138	struct mount *mp;
139{
140
141	panic("softdep_unmount called");
142}
143
144void
145softdep_setup_sbupdate(ump, fs, bp)
146	struct ufsmount *ump;
147	struct fs *fs;
148	struct buf *bp;
149{
150
151	panic("softdep_setup_sbupdate called");
152}
153
154void
155softdep_setup_inomapdep(bp, ip, newinum, mode)
156	struct buf *bp;
157	struct inode *ip;
158	ino_t newinum;
159	int mode;
160{
161
162	panic("softdep_setup_inomapdep called");
163}
164
165void
166softdep_setup_blkmapdep(bp, mp, newblkno, frags, oldfrags)
167	struct buf *bp;
168	struct mount *mp;
169	ufs2_daddr_t newblkno;
170	int frags;
171	int oldfrags;
172{
173
174	panic("softdep_setup_blkmapdep called");
175}
176
177void
178softdep_setup_allocdirect(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp)
179	struct inode *ip;
180	ufs_lbn_t lbn;
181	ufs2_daddr_t newblkno;
182	ufs2_daddr_t oldblkno;
183	long newsize;
184	long oldsize;
185	struct buf *bp;
186{
187
188	panic("softdep_setup_allocdirect called");
189}
190
191void
192softdep_setup_allocext(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp)
193	struct inode *ip;
194	ufs_lbn_t lbn;
195	ufs2_daddr_t newblkno;
196	ufs2_daddr_t oldblkno;
197	long newsize;
198	long oldsize;
199	struct buf *bp;
200{
201
202	panic("softdep_setup_allocext called");
203}
204
205void
206softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp)
207	struct inode *ip;
208	ufs_lbn_t lbn;
209	struct buf *bp;
210	int ptrno;
211	ufs2_daddr_t newblkno;
212	ufs2_daddr_t oldblkno;
213	struct buf *nbp;
214{
215
216	panic("softdep_setup_allocindir_page called");
217}
218
219void
220softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno)
221	struct buf *nbp;
222	struct inode *ip;
223	struct buf *bp;
224	int ptrno;
225	ufs2_daddr_t newblkno;
226{
227
228	panic("softdep_setup_allocindir_meta called");
229}
230
231void
232softdep_journal_freeblocks(ip, cred, length, flags)
233	struct inode *ip;
234	struct ucred *cred;
235	off_t length;
236	int flags;
237{
238
239	panic("softdep_journal_freeblocks called");
240}
241
242void
243softdep_journal_fsync(ip)
244	struct inode *ip;
245{
246
247	panic("softdep_journal_fsync called");
248}
249
250void
251softdep_setup_freeblocks(ip, length, flags)
252	struct inode *ip;
253	off_t length;
254	int flags;
255{
256
257	panic("softdep_setup_freeblocks called");
258}
259
260void
261softdep_freefile(pvp, ino, mode)
262		struct vnode *pvp;
263		ino_t ino;
264		int mode;
265{
266
267	panic("softdep_freefile called");
268}
269
270int
271softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk)
272	struct buf *bp;
273	struct inode *dp;
274	off_t diroffset;
275	ino_t newinum;
276	struct buf *newdirbp;
277	int isnewblk;
278{
279
280	panic("softdep_setup_directory_add called");
281}
282
283void
284softdep_change_directoryentry_offset(bp, dp, base, oldloc, newloc, entrysize)
285	struct buf *bp;
286	struct inode *dp;
287	caddr_t base;
288	caddr_t oldloc;
289	caddr_t newloc;
290	int entrysize;
291{
292
293	panic("softdep_change_directoryentry_offset called");
294}
295
296void
297softdep_setup_remove(bp, dp, ip, isrmdir)
298	struct buf *bp;
299	struct inode *dp;
300	struct inode *ip;
301	int isrmdir;
302{
303
304	panic("softdep_setup_remove called");
305}
306
307void
308softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir)
309	struct buf *bp;
310	struct inode *dp;
311	struct inode *ip;
312	ino_t newinum;
313	int isrmdir;
314{
315
316	panic("softdep_setup_directory_change called");
317}
318
319void
320softdep_setup_blkfree(mp, bp, blkno, frags, wkhd)
321	struct mount *mp;
322	struct buf *bp;
323	ufs2_daddr_t blkno;
324	int frags;
325	struct workhead *wkhd;
326{
327
328	panic("%s called", __FUNCTION__);
329}
330
331void
332softdep_setup_inofree(mp, bp, ino, wkhd)
333	struct mount *mp;
334	struct buf *bp;
335	ino_t ino;
336	struct workhead *wkhd;
337{
338
339	panic("%s called", __FUNCTION__);
340}
341
342void
343softdep_setup_unlink(dp, ip)
344	struct inode *dp;
345	struct inode *ip;
346{
347
348	panic("%s called", __FUNCTION__);
349}
350
351void
352softdep_setup_link(dp, ip)
353	struct inode *dp;
354	struct inode *ip;
355{
356
357	panic("%s called", __FUNCTION__);
358}
359
360void
361softdep_revert_link(dp, ip)
362	struct inode *dp;
363	struct inode *ip;
364{
365
366	panic("%s called", __FUNCTION__);
367}
368
369void
370softdep_setup_rmdir(dp, ip)
371	struct inode *dp;
372	struct inode *ip;
373{
374
375	panic("%s called", __FUNCTION__);
376}
377
378void
379softdep_revert_rmdir(dp, ip)
380	struct inode *dp;
381	struct inode *ip;
382{
383
384	panic("%s called", __FUNCTION__);
385}
386
387void
388softdep_setup_create(dp, ip)
389	struct inode *dp;
390	struct inode *ip;
391{
392
393	panic("%s called", __FUNCTION__);
394}
395
396void
397softdep_revert_create(dp, ip)
398	struct inode *dp;
399	struct inode *ip;
400{
401
402	panic("%s called", __FUNCTION__);
403}
404
405void
406softdep_setup_mkdir(dp, ip)
407	struct inode *dp;
408	struct inode *ip;
409{
410
411	panic("%s called", __FUNCTION__);
412}
413
414void
415softdep_revert_mkdir(dp, ip)
416	struct inode *dp;
417	struct inode *ip;
418{
419
420	panic("%s called", __FUNCTION__);
421}
422
423void
424softdep_setup_dotdot_link(dp, ip)
425	struct inode *dp;
426	struct inode *ip;
427{
428
429	panic("%s called", __FUNCTION__);
430}
431
432int
433softdep_prealloc(vp, waitok)
434	struct vnode *vp;
435	int waitok;
436{
437
438	panic("%s called", __FUNCTION__);
439}
440
441int
442softdep_journal_lookup(mp, vpp)
443	struct mount *mp;
444	struct vnode **vpp;
445{
446
447	return (ENOENT);
448}
449
450void
451softdep_change_linkcnt(ip)
452	struct inode *ip;
453{
454
455	panic("softdep_change_linkcnt called");
456}
457
458void
459softdep_load_inodeblock(ip)
460	struct inode *ip;
461{
462
463	panic("softdep_load_inodeblock called");
464}
465
466void
467softdep_update_inodeblock(ip, bp, waitfor)
468	struct inode *ip;
469	struct buf *bp;
470	int waitfor;
471{
472
473	panic("softdep_update_inodeblock called");
474}
475
476int
477softdep_fsync(vp)
478	struct vnode *vp;	/* the "in_core" copy of the inode */
479{
480
481	return (0);
482}
483
484void
485softdep_fsync_mountdev(vp)
486	struct vnode *vp;
487{
488
489	return;
490}
491
492int
493softdep_flushworklist(oldmnt, countp, td)
494	struct mount *oldmnt;
495	int *countp;
496	struct thread *td;
497{
498
499	*countp = 0;
500	return (0);
501}
502
503int
504softdep_sync_metadata(struct vnode *vp)
505{
506
507	panic("softdep_sync_metadata called");
508}
509
510int
511softdep_sync_buf(struct vnode *vp, struct buf *bp, int waitfor)
512{
513
514	panic("softdep_sync_buf called");
515}
516
517int
518softdep_slowdown(vp)
519	struct vnode *vp;
520{
521
522	panic("softdep_slowdown called");
523}
524
525int
526softdep_request_cleanup(fs, vp, cred, resource)
527	struct fs *fs;
528	struct vnode *vp;
529	struct ucred *cred;
530	int resource;
531{
532
533	return (0);
534}
535
536int
537softdep_check_suspend(struct mount *mp,
538		      struct vnode *devvp,
539		      int softdep_deps,
540		      int softdep_accdeps,
541		      int secondary_writes,
542		      int secondary_accwrites)
543{
544	struct bufobj *bo;
545	int error;
546
547	(void) softdep_deps,
548	(void) softdep_accdeps;
549
550	bo = &devvp->v_bufobj;
551	ASSERT_BO_WLOCKED(bo);
552
553	MNT_ILOCK(mp);
554	while (mp->mnt_secondary_writes != 0) {
555		BO_UNLOCK(bo);
556		msleep(&mp->mnt_secondary_writes, MNT_MTX(mp),
557		    (PUSER - 1) | PDROP, "secwr", 0);
558		BO_LOCK(bo);
559		MNT_ILOCK(mp);
560	}
561
562	/*
563	 * Reasons for needing more work before suspend:
564	 * - Dirty buffers on devvp.
565	 * - Secondary writes occurred after start of vnode sync loop
566	 */
567	error = 0;
568	if (bo->bo_numoutput > 0 ||
569	    bo->bo_dirty.bv_cnt > 0 ||
570	    secondary_writes != 0 ||
571	    mp->mnt_secondary_writes != 0 ||
572	    secondary_accwrites != mp->mnt_secondary_accwrites)
573		error = EAGAIN;
574	BO_UNLOCK(bo);
575	return (error);
576}
577
578void
579softdep_get_depcounts(struct mount *mp,
580		      int *softdepactivep,
581		      int *softdepactiveaccp)
582{
583	(void) mp;
584	*softdepactivep = 0;
585	*softdepactiveaccp = 0;
586}
587
588void
589softdep_buf_append(bp, wkhd)
590	struct buf *bp;
591	struct workhead *wkhd;
592{
593
594	panic("softdep_buf_appendwork called");
595}
596
597void
598softdep_inode_append(ip, cred, wkhd)
599	struct inode *ip;
600	struct ucred *cred;
601	struct workhead *wkhd;
602{
603
604	panic("softdep_inode_appendwork called");
605}
606
607void
608softdep_freework(wkhd)
609	struct workhead *wkhd;
610{
611
612	panic("softdep_freework called");
613}
614
615#else
616
617FEATURE(softupdates, "FFS soft-updates support");
618
619/*
620 * These definitions need to be adapted to the system to which
621 * this file is being ported.
622 */
623
624#define M_SOFTDEP_FLAGS	(M_WAITOK)
625
626#define	D_PAGEDEP	0
627#define	D_INODEDEP	1
628#define	D_BMSAFEMAP	2
629#define	D_NEWBLK	3
630#define	D_ALLOCDIRECT	4
631#define	D_INDIRDEP	5
632#define	D_ALLOCINDIR	6
633#define	D_FREEFRAG	7
634#define	D_FREEBLKS	8
635#define	D_FREEFILE	9
636#define	D_DIRADD	10
637#define	D_MKDIR		11
638#define	D_DIRREM	12
639#define	D_NEWDIRBLK	13
640#define	D_FREEWORK	14
641#define	D_FREEDEP	15
642#define	D_JADDREF	16
643#define	D_JREMREF	17
644#define	D_JMVREF	18
645#define	D_JNEWBLK	19
646#define	D_JFREEBLK	20
647#define	D_JFREEFRAG	21
648#define	D_JSEG		22
649#define	D_JSEGDEP	23
650#define	D_SBDEP		24
651#define	D_JTRUNC	25
652#define	D_JFSYNC	26
653#define	D_SENTINEL	27
654#define	D_LAST		D_SENTINEL
655
656unsigned long dep_current[D_LAST + 1];
657unsigned long dep_highuse[D_LAST + 1];
658unsigned long dep_total[D_LAST + 1];
659unsigned long dep_write[D_LAST + 1];
660
661static SYSCTL_NODE(_debug, OID_AUTO, softdep, CTLFLAG_RW, 0,
662    "soft updates stats");
663static SYSCTL_NODE(_debug_softdep, OID_AUTO, total, CTLFLAG_RW, 0,
664    "total dependencies allocated");
665static SYSCTL_NODE(_debug_softdep, OID_AUTO, highuse, CTLFLAG_RW, 0,
666    "high use dependencies allocated");
667static SYSCTL_NODE(_debug_softdep, OID_AUTO, current, CTLFLAG_RW, 0,
668    "current dependencies allocated");
669static SYSCTL_NODE(_debug_softdep, OID_AUTO, write, CTLFLAG_RW, 0,
670    "current dependencies written");
671
672#define	SOFTDEP_TYPE(type, str, long)					\
673    static MALLOC_DEFINE(M_ ## type, #str, long);			\
674    SYSCTL_ULONG(_debug_softdep_total, OID_AUTO, str, CTLFLAG_RD,	\
675	&dep_total[D_ ## type], 0, "");					\
676    SYSCTL_ULONG(_debug_softdep_current, OID_AUTO, str, CTLFLAG_RD, 	\
677	&dep_current[D_ ## type], 0, "");				\
678    SYSCTL_ULONG(_debug_softdep_highuse, OID_AUTO, str, CTLFLAG_RD, 	\
679	&dep_highuse[D_ ## type], 0, "");				\
680    SYSCTL_ULONG(_debug_softdep_write, OID_AUTO, str, CTLFLAG_RD, 	\
681	&dep_write[D_ ## type], 0, "");
682
683SOFTDEP_TYPE(PAGEDEP, pagedep, "File page dependencies");
684SOFTDEP_TYPE(INODEDEP, inodedep, "Inode dependencies");
685SOFTDEP_TYPE(BMSAFEMAP, bmsafemap,
686    "Block or frag allocated from cyl group map");
687SOFTDEP_TYPE(NEWBLK, newblk, "New block or frag allocation dependency");
688SOFTDEP_TYPE(ALLOCDIRECT, allocdirect, "Block or frag dependency for an inode");
689SOFTDEP_TYPE(INDIRDEP, indirdep, "Indirect block dependencies");
690SOFTDEP_TYPE(ALLOCINDIR, allocindir, "Block dependency for an indirect block");
691SOFTDEP_TYPE(FREEFRAG, freefrag, "Previously used frag for an inode");
692SOFTDEP_TYPE(FREEBLKS, freeblks, "Blocks freed from an inode");
693SOFTDEP_TYPE(FREEFILE, freefile, "Inode deallocated");
694SOFTDEP_TYPE(DIRADD, diradd, "New directory entry");
695SOFTDEP_TYPE(MKDIR, mkdir, "New directory");
696SOFTDEP_TYPE(DIRREM, dirrem, "Directory entry deleted");
697SOFTDEP_TYPE(NEWDIRBLK, newdirblk, "Unclaimed new directory block");
698SOFTDEP_TYPE(FREEWORK, freework, "free an inode block");
699SOFTDEP_TYPE(FREEDEP, freedep, "track a block free");
700SOFTDEP_TYPE(JADDREF, jaddref, "Journal inode ref add");
701SOFTDEP_TYPE(JREMREF, jremref, "Journal inode ref remove");
702SOFTDEP_TYPE(JMVREF, jmvref, "Journal inode ref move");
703SOFTDEP_TYPE(JNEWBLK, jnewblk, "Journal new block");
704SOFTDEP_TYPE(JFREEBLK, jfreeblk, "Journal free block");
705SOFTDEP_TYPE(JFREEFRAG, jfreefrag, "Journal free frag");
706SOFTDEP_TYPE(JSEG, jseg, "Journal segment");
707SOFTDEP_TYPE(JSEGDEP, jsegdep, "Journal segment complete");
708SOFTDEP_TYPE(SBDEP, sbdep, "Superblock write dependency");
709SOFTDEP_TYPE(JTRUNC, jtrunc, "Journal inode truncation");
710SOFTDEP_TYPE(JFSYNC, jfsync, "Journal fsync complete");
711
712static MALLOC_DEFINE(M_SENTINEL, "sentinel", "Worklist sentinel");
713
714static MALLOC_DEFINE(M_SAVEDINO, "savedino", "Saved inodes");
715static MALLOC_DEFINE(M_JBLOCKS, "jblocks", "Journal block locations");
716
717/*
718 * translate from workitem type to memory type
719 * MUST match the defines above, such that memtype[D_XXX] == M_XXX
720 */
721static struct malloc_type *memtype[] = {
722	M_PAGEDEP,
723	M_INODEDEP,
724	M_BMSAFEMAP,
725	M_NEWBLK,
726	M_ALLOCDIRECT,
727	M_INDIRDEP,
728	M_ALLOCINDIR,
729	M_FREEFRAG,
730	M_FREEBLKS,
731	M_FREEFILE,
732	M_DIRADD,
733	M_MKDIR,
734	M_DIRREM,
735	M_NEWDIRBLK,
736	M_FREEWORK,
737	M_FREEDEP,
738	M_JADDREF,
739	M_JREMREF,
740	M_JMVREF,
741	M_JNEWBLK,
742	M_JFREEBLK,
743	M_JFREEFRAG,
744	M_JSEG,
745	M_JSEGDEP,
746	M_SBDEP,
747	M_JTRUNC,
748	M_JFSYNC,
749	M_SENTINEL
750};
751
752static LIST_HEAD(mkdirlist, mkdir) mkdirlisthd;
753
754#define DtoM(type) (memtype[type])
755
756/*
757 * Names of malloc types.
758 */
759#define TYPENAME(type)  \
760	((unsigned)(type) <= D_LAST ? memtype[type]->ks_shortdesc : "???")
761/*
762 * End system adaptation definitions.
763 */
764
765#define	DOTDOT_OFFSET	offsetof(struct dirtemplate, dotdot_ino)
766#define	DOT_OFFSET	offsetof(struct dirtemplate, dot_ino)
767
768/*
769 * Forward declarations.
770 */
771struct inodedep_hashhead;
772struct newblk_hashhead;
773struct pagedep_hashhead;
774struct bmsafemap_hashhead;
775
776/*
777 * Private journaling structures.
778 */
779struct jblocks {
780	struct jseglst	jb_segs;	/* TAILQ of current segments. */
781	struct jseg	*jb_writeseg;	/* Next write to complete. */
782	struct jseg	*jb_oldestseg;	/* Oldest segment with valid entries. */
783	struct jextent	*jb_extent;	/* Extent array. */
784	uint64_t	jb_nextseq;	/* Next sequence number. */
785	uint64_t	jb_oldestwrseq;	/* Oldest written sequence number. */
786	uint8_t		jb_needseg;	/* Need a forced segment. */
787	uint8_t		jb_suspended;	/* Did journal suspend writes? */
788	int		jb_avail;	/* Available extents. */
789	int		jb_used;	/* Last used extent. */
790	int		jb_head;	/* Allocator head. */
791	int		jb_off;		/* Allocator extent offset. */
792	int		jb_blocks;	/* Total disk blocks covered. */
793	int		jb_free;	/* Total disk blocks free. */
794	int		jb_min;		/* Minimum free space. */
795	int		jb_low;		/* Low on space. */
796	int		jb_age;		/* Insertion time of oldest rec. */
797};
798
799struct jextent {
800	ufs2_daddr_t	je_daddr;	/* Disk block address. */
801	int		je_blocks;	/* Disk block count. */
802};
803
804/*
805 * Internal function prototypes.
806 */
807static	void softdep_error(char *, int);
808static	int softdep_process_worklist(struct mount *, int);
809static	int softdep_waitidle(struct mount *);
810static	void drain_output(struct vnode *);
811static	struct buf *getdirtybuf(struct buf *, struct rwlock *, int);
812static	void clear_remove(void);
813static	void clear_inodedeps(void);
814static	void unlinked_inodedep(struct mount *, struct inodedep *);
815static	void clear_unlinked_inodedep(struct inodedep *);
816static	struct inodedep *first_unlinked_inodedep(struct ufsmount *);
817static	int flush_pagedep_deps(struct vnode *, struct mount *,
818	    struct diraddhd *);
819static	int free_pagedep(struct pagedep *);
820static	int flush_newblk_dep(struct vnode *, struct mount *, ufs_lbn_t);
821static	int flush_inodedep_deps(struct vnode *, struct mount *, ino_t);
822static	int flush_deplist(struct allocdirectlst *, int, int *);
823static	int sync_cgs(struct mount *, int);
824static	int handle_written_filepage(struct pagedep *, struct buf *);
825static	int handle_written_sbdep(struct sbdep *, struct buf *);
826static	void initiate_write_sbdep(struct sbdep *);
827static  void diradd_inode_written(struct diradd *, struct inodedep *);
828static	int handle_written_indirdep(struct indirdep *, struct buf *,
829	    struct buf**);
830static	int handle_written_inodeblock(struct inodedep *, struct buf *);
831static	int jnewblk_rollforward(struct jnewblk *, struct fs *, struct cg *,
832	    uint8_t *);
833static	int handle_written_bmsafemap(struct bmsafemap *, struct buf *);
834static	void handle_written_jaddref(struct jaddref *);
835static	void handle_written_jremref(struct jremref *);
836static	void handle_written_jseg(struct jseg *, struct buf *);
837static	void handle_written_jnewblk(struct jnewblk *);
838static	void handle_written_jblkdep(struct jblkdep *);
839static	void handle_written_jfreefrag(struct jfreefrag *);
840static	void complete_jseg(struct jseg *);
841static	void complete_jsegs(struct jseg *);
842static	void jseg_write(struct ufsmount *ump, struct jseg *, uint8_t *);
843static	void jaddref_write(struct jaddref *, struct jseg *, uint8_t *);
844static	void jremref_write(struct jremref *, struct jseg *, uint8_t *);
845static	void jmvref_write(struct jmvref *, struct jseg *, uint8_t *);
846static	void jtrunc_write(struct jtrunc *, struct jseg *, uint8_t *);
847static	void jfsync_write(struct jfsync *, struct jseg *, uint8_t *data);
848static	void jnewblk_write(struct jnewblk *, struct jseg *, uint8_t *);
849static	void jfreeblk_write(struct jfreeblk *, struct jseg *, uint8_t *);
850static	void jfreefrag_write(struct jfreefrag *, struct jseg *, uint8_t *);
851static	inline void inoref_write(struct inoref *, struct jseg *,
852	    struct jrefrec *);
853static	void handle_allocdirect_partdone(struct allocdirect *,
854	    struct workhead *);
855static	struct jnewblk *cancel_newblk(struct newblk *, struct worklist *,
856	    struct workhead *);
857static	void indirdep_complete(struct indirdep *);
858static	int indirblk_lookup(struct mount *, ufs2_daddr_t);
859static	void indirblk_insert(struct freework *);
860static	void indirblk_remove(struct freework *);
861static	void handle_allocindir_partdone(struct allocindir *);
862static	void initiate_write_filepage(struct pagedep *, struct buf *);
863static	void initiate_write_indirdep(struct indirdep*, struct buf *);
864static	void handle_written_mkdir(struct mkdir *, int);
865static	int jnewblk_rollback(struct jnewblk *, struct fs *, struct cg *,
866	    uint8_t *);
867static	void initiate_write_bmsafemap(struct bmsafemap *, struct buf *);
868static	void initiate_write_inodeblock_ufs1(struct inodedep *, struct buf *);
869static	void initiate_write_inodeblock_ufs2(struct inodedep *, struct buf *);
870static	void handle_workitem_freefile(struct freefile *);
871static	int handle_workitem_remove(struct dirrem *, int);
872static	struct dirrem *newdirrem(struct buf *, struct inode *,
873	    struct inode *, int, struct dirrem **);
874static	struct indirdep *indirdep_lookup(struct mount *, struct inode *,
875	    struct buf *);
876static	void cancel_indirdep(struct indirdep *, struct buf *,
877	    struct freeblks *);
878static	void free_indirdep(struct indirdep *);
879static	void free_diradd(struct diradd *, struct workhead *);
880static	void merge_diradd(struct inodedep *, struct diradd *);
881static	void complete_diradd(struct diradd *);
882static	struct diradd *diradd_lookup(struct pagedep *, int);
883static	struct jremref *cancel_diradd_dotdot(struct inode *, struct dirrem *,
884	    struct jremref *);
885static	struct jremref *cancel_mkdir_dotdot(struct inode *, struct dirrem *,
886	    struct jremref *);
887static	void cancel_diradd(struct diradd *, struct dirrem *, struct jremref *,
888	    struct jremref *, struct jremref *);
889static	void dirrem_journal(struct dirrem *, struct jremref *, struct jremref *,
890	    struct jremref *);
891static	void cancel_allocindir(struct allocindir *, struct buf *bp,
892	    struct freeblks *, int);
893static	int setup_trunc_indir(struct freeblks *, struct inode *,
894	    ufs_lbn_t, ufs_lbn_t, ufs2_daddr_t);
895static	void complete_trunc_indir(struct freework *);
896static	void trunc_indirdep(struct indirdep *, struct freeblks *, struct buf *,
897	    int);
898static	void complete_mkdir(struct mkdir *);
899static	void free_newdirblk(struct newdirblk *);
900static	void free_jremref(struct jremref *);
901static	void free_jaddref(struct jaddref *);
902static	void free_jsegdep(struct jsegdep *);
903static	void free_jsegs(struct jblocks *);
904static	void rele_jseg(struct jseg *);
905static	void free_jseg(struct jseg *, struct jblocks *);
906static	void free_jnewblk(struct jnewblk *);
907static	void free_jblkdep(struct jblkdep *);
908static	void free_jfreefrag(struct jfreefrag *);
909static	void free_freedep(struct freedep *);
910static	void journal_jremref(struct dirrem *, struct jremref *,
911	    struct inodedep *);
912static	void cancel_jnewblk(struct jnewblk *, struct workhead *);
913static	int cancel_jaddref(struct jaddref *, struct inodedep *,
914	    struct workhead *);
915static	void cancel_jfreefrag(struct jfreefrag *);
916static	inline void setup_freedirect(struct freeblks *, struct inode *,
917	    int, int);
918static	inline void setup_freeext(struct freeblks *, struct inode *, int, int);
919static	inline void setup_freeindir(struct freeblks *, struct inode *, int,
920	    ufs_lbn_t, int);
921static	inline struct freeblks *newfreeblks(struct mount *, struct inode *);
922static	void freeblks_free(struct ufsmount *, struct freeblks *, int);
923static	void indir_trunc(struct freework *, ufs2_daddr_t, ufs_lbn_t);
924static	ufs2_daddr_t blkcount(struct fs *, ufs2_daddr_t, off_t);
925static	int trunc_check_buf(struct buf *, int *, ufs_lbn_t, int, int);
926static	void trunc_dependencies(struct inode *, struct freeblks *, ufs_lbn_t,
927	    int, int);
928static	void trunc_pages(struct inode *, off_t, ufs2_daddr_t, int);
929static 	int cancel_pagedep(struct pagedep *, struct freeblks *, int);
930static	int deallocate_dependencies(struct buf *, struct freeblks *, int);
931static	void newblk_freefrag(struct newblk*);
932static	void free_newblk(struct newblk *);
933static	void cancel_allocdirect(struct allocdirectlst *,
934	    struct allocdirect *, struct freeblks *);
935static	int check_inode_unwritten(struct inodedep *);
936static	int free_inodedep(struct inodedep *);
937static	void freework_freeblock(struct freework *);
938static	void freework_enqueue(struct freework *);
939static	int handle_workitem_freeblocks(struct freeblks *, int);
940static	int handle_complete_freeblocks(struct freeblks *, int);
941static	void handle_workitem_indirblk(struct freework *);
942static	void handle_written_freework(struct freework *);
943static	void merge_inode_lists(struct allocdirectlst *,struct allocdirectlst *);
944static	struct worklist *jnewblk_merge(struct worklist *, struct worklist *,
945	    struct workhead *);
946static	struct freefrag *setup_allocindir_phase2(struct buf *, struct inode *,
947	    struct inodedep *, struct allocindir *, ufs_lbn_t);
948static	struct allocindir *newallocindir(struct inode *, int, ufs2_daddr_t,
949	    ufs2_daddr_t, ufs_lbn_t);
950static	void handle_workitem_freefrag(struct freefrag *);
951static	struct freefrag *newfreefrag(struct inode *, ufs2_daddr_t, long,
952	    ufs_lbn_t);
953static	void allocdirect_merge(struct allocdirectlst *,
954	    struct allocdirect *, struct allocdirect *);
955static	struct freefrag *allocindir_merge(struct allocindir *,
956	    struct allocindir *);
957static	int bmsafemap_find(struct bmsafemap_hashhead *, struct mount *, int,
958	    struct bmsafemap **);
959static	struct bmsafemap *bmsafemap_lookup(struct mount *, struct buf *,
960	    int cg, struct bmsafemap *);
961static	int newblk_find(struct newblk_hashhead *, struct mount *, ufs2_daddr_t,
962	    int, struct newblk **);
963static	int newblk_lookup(struct mount *, ufs2_daddr_t, int, struct newblk **);
964static	int inodedep_find(struct inodedep_hashhead *, struct fs *, ino_t,
965	    struct inodedep **);
966static	int inodedep_lookup(struct mount *, ino_t, int, struct inodedep **);
967static	int pagedep_lookup(struct mount *, struct buf *bp, ino_t, ufs_lbn_t,
968	    int, struct pagedep **);
969static	int pagedep_find(struct pagedep_hashhead *, ino_t, ufs_lbn_t,
970	    struct mount *mp, int, struct pagedep **);
971static	void pause_timer(void *);
972static	int request_cleanup(struct mount *, int);
973static	int process_worklist_item(struct mount *, int, int);
974static	void process_removes(struct vnode *);
975static	void process_truncates(struct vnode *);
976static	void jwork_move(struct workhead *, struct workhead *);
977static	void jwork_insert(struct workhead *, struct jsegdep *);
978static	void add_to_worklist(struct worklist *, int);
979static	void wake_worklist(struct worklist *);
980static	void wait_worklist(struct worklist *, char *);
981static	void remove_from_worklist(struct worklist *);
982static	void softdep_flush(void);
983static	void softdep_flushjournal(struct mount *);
984static	int softdep_speedup(void);
985static	void worklist_speedup(void);
986static	int journal_mount(struct mount *, struct fs *, struct ucred *);
987static	void journal_unmount(struct mount *);
988static	int journal_space(struct ufsmount *, int);
989static	void journal_suspend(struct ufsmount *);
990static	int journal_unsuspend(struct ufsmount *ump);
991static	void softdep_prelink(struct vnode *, struct vnode *);
992static	void add_to_journal(struct worklist *);
993static	void remove_from_journal(struct worklist *);
994static	void softdep_process_journal(struct mount *, struct worklist *, int);
995static	struct jremref *newjremref(struct dirrem *, struct inode *,
996	    struct inode *ip, off_t, nlink_t);
997static	struct jaddref *newjaddref(struct inode *, ino_t, off_t, int16_t,
998	    uint16_t);
999static	inline void newinoref(struct inoref *, ino_t, ino_t, off_t, nlink_t,
1000	    uint16_t);
1001static	inline struct jsegdep *inoref_jseg(struct inoref *);
1002static	struct jmvref *newjmvref(struct inode *, ino_t, off_t, off_t);
1003static	struct jfreeblk *newjfreeblk(struct freeblks *, ufs_lbn_t,
1004	    ufs2_daddr_t, int);
1005static	struct jtrunc *newjtrunc(struct freeblks *, off_t, int);
1006static	void move_newblock_dep(struct jaddref *, struct inodedep *);
1007static	void cancel_jfreeblk(struct freeblks *, ufs2_daddr_t);
1008static	struct jfreefrag *newjfreefrag(struct freefrag *, struct inode *,
1009	    ufs2_daddr_t, long, ufs_lbn_t);
1010static	struct freework *newfreework(struct ufsmount *, struct freeblks *,
1011	    struct freework *, ufs_lbn_t, ufs2_daddr_t, int, int, int);
1012static	int jwait(struct worklist *, int);
1013static	struct inodedep *inodedep_lookup_ip(struct inode *);
1014static	int bmsafemap_backgroundwrite(struct bmsafemap *, struct buf *);
1015static	struct freefile *handle_bufwait(struct inodedep *, struct workhead *);
1016static	void handle_jwork(struct workhead *);
1017static	struct mkdir *setup_newdir(struct diradd *, ino_t, ino_t, struct buf *,
1018	    struct mkdir **);
1019static	struct jblocks *jblocks_create(void);
1020static	ufs2_daddr_t jblocks_alloc(struct jblocks *, int, int *);
1021static	void jblocks_free(struct jblocks *, struct mount *, int);
1022static	void jblocks_destroy(struct jblocks *);
1023static	void jblocks_add(struct jblocks *, ufs2_daddr_t, int);
1024
1025/*
1026 * Exported softdep operations.
1027 */
1028static	void softdep_disk_io_initiation(struct buf *);
1029static	void softdep_disk_write_complete(struct buf *);
1030static	void softdep_deallocate_dependencies(struct buf *);
1031static	int softdep_count_dependencies(struct buf *bp, int);
1032
1033static struct rwlock lk;
1034RW_SYSINIT(softdep_lock, &lk, "Softdep Lock");
1035
1036#define TRY_ACQUIRE_LOCK(lk)		rw_try_wlock(lk)
1037#define ACQUIRE_LOCK(lk)		rw_wlock(lk)
1038#define FREE_LOCK(lk)			rw_wunlock(lk)
1039
1040#define	BUF_AREC(bp)			lockallowrecurse(&(bp)->b_lock)
1041#define	BUF_NOREC(bp)			lockdisablerecurse(&(bp)->b_lock)
1042
1043/*
1044 * Worklist queue management.
1045 * These routines require that the lock be held.
1046 */
1047#ifndef /* NOT */ DEBUG
1048#define WORKLIST_INSERT(head, item) do {	\
1049	(item)->wk_state |= ONWORKLIST;		\
1050	LIST_INSERT_HEAD(head, item, wk_list);	\
1051} while (0)
1052#define WORKLIST_REMOVE(item) do {		\
1053	(item)->wk_state &= ~ONWORKLIST;	\
1054	LIST_REMOVE(item, wk_list);		\
1055} while (0)
1056#define WORKLIST_INSERT_UNLOCKED	WORKLIST_INSERT
1057#define WORKLIST_REMOVE_UNLOCKED	WORKLIST_REMOVE
1058
1059#else /* DEBUG */
1060static	void worklist_insert(struct workhead *, struct worklist *, int);
1061static	void worklist_remove(struct worklist *, int);
1062
1063#define WORKLIST_INSERT(head, item) worklist_insert(head, item, 1)
1064#define WORKLIST_INSERT_UNLOCKED(head, item) worklist_insert(head, item, 0)
1065#define WORKLIST_REMOVE(item) worklist_remove(item, 1)
1066#define WORKLIST_REMOVE_UNLOCKED(item) worklist_remove(item, 0)
1067
1068static void
1069worklist_insert(head, item, locked)
1070	struct workhead *head;
1071	struct worklist *item;
1072	int locked;
1073{
1074
1075	if (locked)
1076		rw_assert(&lk, RA_WLOCKED);
1077	if (item->wk_state & ONWORKLIST)
1078		panic("worklist_insert: %p %s(0x%X) already on list",
1079		    item, TYPENAME(item->wk_type), item->wk_state);
1080	item->wk_state |= ONWORKLIST;
1081	LIST_INSERT_HEAD(head, item, wk_list);
1082}
1083
1084static void
1085worklist_remove(item, locked)
1086	struct worklist *item;
1087	int locked;
1088{
1089
1090	if (locked)
1091		rw_assert(&lk, RA_WLOCKED);
1092	if ((item->wk_state & ONWORKLIST) == 0)
1093		panic("worklist_remove: %p %s(0x%X) not on list",
1094		    item, TYPENAME(item->wk_type), item->wk_state);
1095	item->wk_state &= ~ONWORKLIST;
1096	LIST_REMOVE(item, wk_list);
1097}
1098#endif /* DEBUG */
1099
1100/*
1101 * Merge two jsegdeps keeping only the oldest one as newer references
1102 * can't be discarded until after older references.
1103 */
1104static inline struct jsegdep *
1105jsegdep_merge(struct jsegdep *one, struct jsegdep *two)
1106{
1107	struct jsegdep *swp;
1108
1109	if (two == NULL)
1110		return (one);
1111
1112	if (one->jd_seg->js_seq > two->jd_seg->js_seq) {
1113		swp = one;
1114		one = two;
1115		two = swp;
1116	}
1117	WORKLIST_REMOVE(&two->jd_list);
1118	free_jsegdep(two);
1119
1120	return (one);
1121}
1122
1123/*
1124 * If two freedeps are compatible free one to reduce list size.
1125 */
1126static inline struct freedep *
1127freedep_merge(struct freedep *one, struct freedep *two)
1128{
1129	if (two == NULL)
1130		return (one);
1131
1132	if (one->fd_freework == two->fd_freework) {
1133		WORKLIST_REMOVE(&two->fd_list);
1134		free_freedep(two);
1135	}
1136	return (one);
1137}
1138
1139/*
1140 * Move journal work from one list to another.  Duplicate freedeps and
1141 * jsegdeps are coalesced to keep the lists as small as possible.
1142 */
1143static void
1144jwork_move(dst, src)
1145	struct workhead *dst;
1146	struct workhead *src;
1147{
1148	struct freedep *freedep;
1149	struct jsegdep *jsegdep;
1150	struct worklist *wkn;
1151	struct worklist *wk;
1152
1153	KASSERT(dst != src,
1154	    ("jwork_move: dst == src"));
1155	freedep = NULL;
1156	jsegdep = NULL;
1157	LIST_FOREACH_SAFE(wk, dst, wk_list, wkn) {
1158		if (wk->wk_type == D_JSEGDEP)
1159			jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep);
1160		if (wk->wk_type == D_FREEDEP)
1161			freedep = freedep_merge(WK_FREEDEP(wk), freedep);
1162	}
1163
1164	rw_assert(&lk, RA_WLOCKED);
1165	while ((wk = LIST_FIRST(src)) != NULL) {
1166		WORKLIST_REMOVE(wk);
1167		WORKLIST_INSERT(dst, wk);
1168		if (wk->wk_type == D_JSEGDEP) {
1169			jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep);
1170			continue;
1171		}
1172		if (wk->wk_type == D_FREEDEP)
1173			freedep = freedep_merge(WK_FREEDEP(wk), freedep);
1174	}
1175}
1176
1177static void
1178jwork_insert(dst, jsegdep)
1179	struct workhead *dst;
1180	struct jsegdep *jsegdep;
1181{
1182	struct jsegdep *jsegdepn;
1183	struct worklist *wk;
1184
1185	LIST_FOREACH(wk, dst, wk_list)
1186		if (wk->wk_type == D_JSEGDEP)
1187			break;
1188	if (wk == NULL) {
1189		WORKLIST_INSERT(dst, &jsegdep->jd_list);
1190		return;
1191	}
1192	jsegdepn = WK_JSEGDEP(wk);
1193	if (jsegdep->jd_seg->js_seq < jsegdepn->jd_seg->js_seq) {
1194		WORKLIST_REMOVE(wk);
1195		free_jsegdep(jsegdepn);
1196		WORKLIST_INSERT(dst, &jsegdep->jd_list);
1197	} else
1198		free_jsegdep(jsegdep);
1199}
1200
1201/*
1202 * Routines for tracking and managing workitems.
1203 */
1204static	void workitem_free(struct worklist *, int);
1205static	void workitem_alloc(struct worklist *, int, struct mount *);
1206static	void workitem_reassign(struct worklist *, int);
1207
1208#define	WORKITEM_FREE(item, type) \
1209	workitem_free((struct worklist *)(item), (type))
1210#define	WORKITEM_REASSIGN(item, type) \
1211	workitem_reassign((struct worklist *)(item), (type))
1212
1213static void
1214workitem_free(item, type)
1215	struct worklist *item;
1216	int type;
1217{
1218	struct ufsmount *ump;
1219	rw_assert(&lk, RA_WLOCKED);
1220
1221#ifdef DEBUG
1222	if (item->wk_state & ONWORKLIST)
1223		panic("workitem_free: %s(0x%X) still on list",
1224		    TYPENAME(item->wk_type), item->wk_state);
1225	if (item->wk_type != type && type != D_NEWBLK)
1226		panic("workitem_free: type mismatch %s != %s",
1227		    TYPENAME(item->wk_type), TYPENAME(type));
1228#endif
1229	if (item->wk_state & IOWAITING)
1230		wakeup(item);
1231	ump = VFSTOUFS(item->wk_mp);
1232	KASSERT(ump->softdep_deps > 0,
1233	    ("workitem_free: %s: softdep_deps going negative",
1234	    ump->um_fs->fs_fsmnt));
1235	if (--ump->softdep_deps == 0 && ump->softdep_req)
1236		wakeup(&ump->softdep_deps);
1237	KASSERT(dep_current[item->wk_type] > 0,
1238	    ("workitem_free: %s: dep_current[%s] going negative",
1239	    ump->um_fs->fs_fsmnt, TYPENAME(item->wk_type)));
1240	dep_current[item->wk_type]--;
1241	free(item, DtoM(type));
1242}
1243
1244static void
1245workitem_alloc(item, type, mp)
1246	struct worklist *item;
1247	int type;
1248	struct mount *mp;
1249{
1250	struct ufsmount *ump;
1251
1252	item->wk_type = type;
1253	item->wk_mp = mp;
1254	item->wk_state = 0;
1255
1256	ump = VFSTOUFS(mp);
1257	ACQUIRE_LOCK(&lk);
1258	dep_current[type]++;
1259	if (dep_current[type] > dep_highuse[type])
1260		dep_highuse[type] = dep_current[type];
1261	dep_total[type]++;
1262	ump->softdep_deps++;
1263	ump->softdep_accdeps++;
1264	FREE_LOCK(&lk);
1265}
1266
1267static void
1268workitem_reassign(item, newtype)
1269	struct worklist *item;
1270	int newtype;
1271{
1272
1273	KASSERT(dep_current[item->wk_type] > 0,
1274	    ("workitem_reassign: %s: dep_current[%s] going negative",
1275	    VFSTOUFS(item->wk_mp)->um_fs->fs_fsmnt, TYPENAME(item->wk_type)));
1276	dep_current[item->wk_type]--;
1277	dep_current[newtype]++;
1278	if (dep_current[newtype] > dep_highuse[newtype])
1279		dep_highuse[newtype] = dep_current[newtype];
1280	dep_total[newtype]++;
1281	item->wk_type = newtype;
1282}
1283
1284/*
1285 * Workitem queue management
1286 */
1287static int max_softdeps;	/* maximum number of structs before slowdown */
1288static int maxindirdeps = 50;	/* max number of indirdeps before slowdown */
1289static int tickdelay = 2;	/* number of ticks to pause during slowdown */
1290static int proc_waiting;	/* tracks whether we have a timeout posted */
1291static int *stat_countp;	/* statistic to count in proc_waiting timeout */
1292static struct callout softdep_callout;
1293static int req_pending;
1294static int req_clear_inodedeps;	/* syncer process flush some inodedeps */
1295static int req_clear_remove;	/* syncer process flush some freeblks */
1296static int softdep_flushcache = 0; /* Should we do BIO_FLUSH? */
1297
1298/*
1299 * runtime statistics
1300 */
1301static int stat_worklist_push;	/* number of worklist cleanups */
1302static int stat_blk_limit_push;	/* number of times block limit neared */
1303static int stat_ino_limit_push;	/* number of times inode limit neared */
1304static int stat_blk_limit_hit;	/* number of times block slowdown imposed */
1305static int stat_ino_limit_hit;	/* number of times inode slowdown imposed */
1306static int stat_sync_limit_hit;	/* number of synchronous slowdowns imposed */
1307static int stat_indir_blk_ptrs;	/* bufs redirtied as indir ptrs not written */
1308static int stat_inode_bitmap;	/* bufs redirtied as inode bitmap not written */
1309static int stat_direct_blk_ptrs;/* bufs redirtied as direct ptrs not written */
1310static int stat_dir_entry;	/* bufs redirtied as dir entry cannot write */
1311static int stat_jaddref;	/* bufs redirtied as ino bitmap can not write */
1312static int stat_jnewblk;	/* bufs redirtied as blk bitmap can not write */
1313static int stat_journal_min;	/* Times hit journal min threshold */
1314static int stat_journal_low;	/* Times hit journal low threshold */
1315static int stat_journal_wait;	/* Times blocked in jwait(). */
1316static int stat_jwait_filepage;	/* Times blocked in jwait() for filepage. */
1317static int stat_jwait_freeblks;	/* Times blocked in jwait() for freeblks. */
1318static int stat_jwait_inode;	/* Times blocked in jwait() for inodes. */
1319static int stat_jwait_newblk;	/* Times blocked in jwait() for newblks. */
1320static int stat_cleanup_high_delay; /* Maximum cleanup delay (in ticks) */
1321static int stat_cleanup_blkrequests; /* Number of block cleanup requests */
1322static int stat_cleanup_inorequests; /* Number of inode cleanup requests */
1323static int stat_cleanup_retries; /* Number of cleanups that needed to flush */
1324static int stat_cleanup_failures; /* Number of cleanup requests that failed */
1325
1326SYSCTL_INT(_debug_softdep, OID_AUTO, max_softdeps, CTLFLAG_RW,
1327    &max_softdeps, 0, "");
1328SYSCTL_INT(_debug_softdep, OID_AUTO, tickdelay, CTLFLAG_RW,
1329    &tickdelay, 0, "");
1330SYSCTL_INT(_debug_softdep, OID_AUTO, maxindirdeps, CTLFLAG_RW,
1331    &maxindirdeps, 0, "");
1332SYSCTL_INT(_debug_softdep, OID_AUTO, worklist_push, CTLFLAG_RW,
1333    &stat_worklist_push, 0,"");
1334SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_push, CTLFLAG_RW,
1335    &stat_blk_limit_push, 0,"");
1336SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_push, CTLFLAG_RW,
1337    &stat_ino_limit_push, 0,"");
1338SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_hit, CTLFLAG_RW,
1339    &stat_blk_limit_hit, 0, "");
1340SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_hit, CTLFLAG_RW,
1341    &stat_ino_limit_hit, 0, "");
1342SYSCTL_INT(_debug_softdep, OID_AUTO, sync_limit_hit, CTLFLAG_RW,
1343    &stat_sync_limit_hit, 0, "");
1344SYSCTL_INT(_debug_softdep, OID_AUTO, indir_blk_ptrs, CTLFLAG_RW,
1345    &stat_indir_blk_ptrs, 0, "");
1346SYSCTL_INT(_debug_softdep, OID_AUTO, inode_bitmap, CTLFLAG_RW,
1347    &stat_inode_bitmap, 0, "");
1348SYSCTL_INT(_debug_softdep, OID_AUTO, direct_blk_ptrs, CTLFLAG_RW,
1349    &stat_direct_blk_ptrs, 0, "");
1350SYSCTL_INT(_debug_softdep, OID_AUTO, dir_entry, CTLFLAG_RW,
1351    &stat_dir_entry, 0, "");
1352SYSCTL_INT(_debug_softdep, OID_AUTO, jaddref_rollback, CTLFLAG_RW,
1353    &stat_jaddref, 0, "");
1354SYSCTL_INT(_debug_softdep, OID_AUTO, jnewblk_rollback, CTLFLAG_RW,
1355    &stat_jnewblk, 0, "");
1356SYSCTL_INT(_debug_softdep, OID_AUTO, journal_low, CTLFLAG_RW,
1357    &stat_journal_low, 0, "");
1358SYSCTL_INT(_debug_softdep, OID_AUTO, journal_min, CTLFLAG_RW,
1359    &stat_journal_min, 0, "");
1360SYSCTL_INT(_debug_softdep, OID_AUTO, journal_wait, CTLFLAG_RW,
1361    &stat_journal_wait, 0, "");
1362SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_filepage, CTLFLAG_RW,
1363    &stat_jwait_filepage, 0, "");
1364SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_freeblks, CTLFLAG_RW,
1365    &stat_jwait_freeblks, 0, "");
1366SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_inode, CTLFLAG_RW,
1367    &stat_jwait_inode, 0, "");
1368SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_newblk, CTLFLAG_RW,
1369    &stat_jwait_newblk, 0, "");
1370SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_blkrequests, CTLFLAG_RW,
1371    &stat_cleanup_blkrequests, 0, "");
1372SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_inorequests, CTLFLAG_RW,
1373    &stat_cleanup_inorequests, 0, "");
1374SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_high_delay, CTLFLAG_RW,
1375    &stat_cleanup_high_delay, 0, "");
1376SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_retries, CTLFLAG_RW,
1377    &stat_cleanup_retries, 0, "");
1378SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_failures, CTLFLAG_RW,
1379    &stat_cleanup_failures, 0, "");
1380SYSCTL_INT(_debug_softdep, OID_AUTO, flushcache, CTLFLAG_RW,
1381    &softdep_flushcache, 0, "");
1382
1383SYSCTL_DECL(_vfs_ffs);
1384
1385LIST_HEAD(bmsafemap_hashhead, bmsafemap) *bmsafemap_hashtbl;
1386static u_long	bmsafemap_hash;	/* size of hash table - 1 */
1387
1388static int compute_summary_at_mount = 0;	/* Whether to recompute the summary at mount time */
1389SYSCTL_INT(_vfs_ffs, OID_AUTO, compute_summary_at_mount, CTLFLAG_RW,
1390	   &compute_summary_at_mount, 0, "Recompute summary at mount");
1391
1392static struct proc *softdepproc;
1393static struct kproc_desc softdep_kp = {
1394	"softdepflush",
1395	softdep_flush,
1396	&softdepproc
1397};
1398SYSINIT(sdproc, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start,
1399    &softdep_kp);
1400
1401static void
1402softdep_flush(void)
1403{
1404	struct mount *nmp;
1405	struct mount *mp;
1406	struct ufsmount *ump;
1407	struct thread *td;
1408	int remaining;
1409	int progress;
1410
1411	td = curthread;
1412	td->td_pflags |= TDP_NORUNNINGBUF;
1413
1414	for (;;) {
1415		kproc_suspend_check(softdepproc);
1416		ACQUIRE_LOCK(&lk);
1417		/*
1418		 * If requested, try removing inode or removal dependencies.
1419		 */
1420		if (req_clear_inodedeps) {
1421			clear_inodedeps();
1422			req_clear_inodedeps -= 1;
1423			wakeup_one(&proc_waiting);
1424		}
1425		if (req_clear_remove) {
1426			clear_remove();
1427			req_clear_remove -= 1;
1428			wakeup_one(&proc_waiting);
1429		}
1430		FREE_LOCK(&lk);
1431		remaining = progress = 0;
1432		mtx_lock(&mountlist_mtx);
1433		for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp)  {
1434			nmp = TAILQ_NEXT(mp, mnt_list);
1435			if (MOUNTEDSOFTDEP(mp) == 0)
1436				continue;
1437			if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK))
1438				continue;
1439			progress += softdep_process_worklist(mp, 0);
1440			ump = VFSTOUFS(mp);
1441			remaining += ump->softdep_on_worklist;
1442			mtx_lock(&mountlist_mtx);
1443			nmp = TAILQ_NEXT(mp, mnt_list);
1444			vfs_unbusy(mp);
1445		}
1446		mtx_unlock(&mountlist_mtx);
1447		if (remaining && progress)
1448			continue;
1449		ACQUIRE_LOCK(&lk);
1450		if (!req_pending)
1451			msleep(&req_pending, &lk, PVM, "sdflush", hz);
1452		req_pending = 0;
1453		FREE_LOCK(&lk);
1454	}
1455}
1456
1457static void
1458worklist_speedup(void)
1459{
1460	rw_assert(&lk, RA_WLOCKED);
1461	if (req_pending == 0) {
1462		req_pending = 1;
1463		wakeup(&req_pending);
1464	}
1465}
1466
1467static int
1468softdep_speedup(void)
1469{
1470
1471	worklist_speedup();
1472	bd_speedup();
1473	return speedup_syncer();
1474}
1475
1476/*
1477 * Add an item to the end of the work queue.
1478 * This routine requires that the lock be held.
1479 * This is the only routine that adds items to the list.
1480 * The following routine is the only one that removes items
1481 * and does so in order from first to last.
1482 */
1483
1484#define	WK_HEAD		0x0001	/* Add to HEAD. */
1485#define	WK_NODELAY	0x0002	/* Process immediately. */
1486
1487static void
1488add_to_worklist(wk, flags)
1489	struct worklist *wk;
1490	int flags;
1491{
1492	struct ufsmount *ump;
1493
1494	rw_assert(&lk, RA_WLOCKED);
1495	ump = VFSTOUFS(wk->wk_mp);
1496	if (wk->wk_state & ONWORKLIST)
1497		panic("add_to_worklist: %s(0x%X) already on list",
1498		    TYPENAME(wk->wk_type), wk->wk_state);
1499	wk->wk_state |= ONWORKLIST;
1500	if (ump->softdep_on_worklist == 0) {
1501		LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list);
1502		ump->softdep_worklist_tail = wk;
1503	} else if (flags & WK_HEAD) {
1504		LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list);
1505	} else {
1506		LIST_INSERT_AFTER(ump->softdep_worklist_tail, wk, wk_list);
1507		ump->softdep_worklist_tail = wk;
1508	}
1509	ump->softdep_on_worklist += 1;
1510	if (flags & WK_NODELAY)
1511		worklist_speedup();
1512}
1513
1514/*
1515 * Remove the item to be processed. If we are removing the last
1516 * item on the list, we need to recalculate the tail pointer.
1517 */
1518static void
1519remove_from_worklist(wk)
1520	struct worklist *wk;
1521{
1522	struct ufsmount *ump;
1523
1524	ump = VFSTOUFS(wk->wk_mp);
1525	WORKLIST_REMOVE(wk);
1526	if (ump->softdep_worklist_tail == wk)
1527		ump->softdep_worklist_tail =
1528		    (struct worklist *)wk->wk_list.le_prev;
1529	ump->softdep_on_worklist -= 1;
1530}
1531
1532static void
1533wake_worklist(wk)
1534	struct worklist *wk;
1535{
1536	if (wk->wk_state & IOWAITING) {
1537		wk->wk_state &= ~IOWAITING;
1538		wakeup(wk);
1539	}
1540}
1541
1542static void
1543wait_worklist(wk, wmesg)
1544	struct worklist *wk;
1545	char *wmesg;
1546{
1547
1548	wk->wk_state |= IOWAITING;
1549	msleep(wk, &lk, PVM, wmesg, 0);
1550}
1551
1552/*
1553 * Process that runs once per second to handle items in the background queue.
1554 *
1555 * Note that we ensure that everything is done in the order in which they
1556 * appear in the queue. The code below depends on this property to ensure
1557 * that blocks of a file are freed before the inode itself is freed. This
1558 * ordering ensures that no new <vfsid, inum, lbn> triples will be generated
1559 * until all the old ones have been purged from the dependency lists.
1560 */
1561static int
1562softdep_process_worklist(mp, full)
1563	struct mount *mp;
1564	int full;
1565{
1566	int cnt, matchcnt;
1567	struct ufsmount *ump;
1568	long starttime;
1569
1570	KASSERT(mp != NULL, ("softdep_process_worklist: NULL mp"));
1571	/*
1572	 * Record the process identifier of our caller so that we can give
1573	 * this process preferential treatment in request_cleanup below.
1574	 */
1575	matchcnt = 0;
1576	ump = VFSTOUFS(mp);
1577	ACQUIRE_LOCK(&lk);
1578	starttime = time_second;
1579	softdep_process_journal(mp, NULL, full?MNT_WAIT:0);
1580	while (ump->softdep_on_worklist > 0) {
1581		if ((cnt = process_worklist_item(mp, 10, LK_NOWAIT)) == 0)
1582			break;
1583		else
1584			matchcnt += cnt;
1585		/*
1586		 * If requested, try removing inode or removal dependencies.
1587		 */
1588		if (req_clear_inodedeps) {
1589			clear_inodedeps();
1590			req_clear_inodedeps -= 1;
1591			wakeup_one(&proc_waiting);
1592		}
1593		if (req_clear_remove) {
1594			clear_remove();
1595			req_clear_remove -= 1;
1596			wakeup_one(&proc_waiting);
1597		}
1598		/*
1599		 * We do not generally want to stop for buffer space, but if
1600		 * we are really being a buffer hog, we will stop and wait.
1601		 */
1602		if (should_yield()) {
1603			FREE_LOCK(&lk);
1604			kern_yield(PRI_USER);
1605			bwillwrite();
1606			ACQUIRE_LOCK(&lk);
1607		}
1608		/*
1609		 * Never allow processing to run for more than one
1610		 * second. Otherwise the other mountpoints may get
1611		 * excessively backlogged.
1612		 */
1613		if (!full && starttime != time_second)
1614			break;
1615	}
1616	if (full == 0)
1617		journal_unsuspend(ump);
1618	FREE_LOCK(&lk);
1619	return (matchcnt);
1620}
1621
1622/*
1623 * Process all removes associated with a vnode if we are running out of
1624 * journal space.  Any other process which attempts to flush these will
1625 * be unable as we have the vnodes locked.
1626 */
1627static void
1628process_removes(vp)
1629	struct vnode *vp;
1630{
1631	struct inodedep *inodedep;
1632	struct dirrem *dirrem;
1633	struct mount *mp;
1634	ino_t inum;
1635
1636	rw_assert(&lk, RA_WLOCKED);
1637
1638	mp = vp->v_mount;
1639	inum = VTOI(vp)->i_number;
1640	for (;;) {
1641top:
1642		if (inodedep_lookup(mp, inum, 0, &inodedep) == 0)
1643			return;
1644		LIST_FOREACH(dirrem, &inodedep->id_dirremhd, dm_inonext) {
1645			/*
1646			 * If another thread is trying to lock this vnode
1647			 * it will fail but we must wait for it to do so
1648			 * before we can proceed.
1649			 */
1650			if (dirrem->dm_state & INPROGRESS) {
1651				wait_worklist(&dirrem->dm_list, "pwrwait");
1652				goto top;
1653			}
1654			if ((dirrem->dm_state & (COMPLETE | ONWORKLIST)) ==
1655			    (COMPLETE | ONWORKLIST))
1656				break;
1657		}
1658		if (dirrem == NULL)
1659			return;
1660		remove_from_worklist(&dirrem->dm_list);
1661		FREE_LOCK(&lk);
1662		if (vn_start_secondary_write(NULL, &mp, V_NOWAIT))
1663			panic("process_removes: suspended filesystem");
1664		handle_workitem_remove(dirrem, 0);
1665		vn_finished_secondary_write(mp);
1666		ACQUIRE_LOCK(&lk);
1667	}
1668}
1669
1670/*
1671 * Process all truncations associated with a vnode if we are running out
1672 * of journal space.  This is called when the vnode lock is already held
1673 * and no other process can clear the truncation.  This function returns
1674 * a value greater than zero if it did any work.
1675 */
1676static void
1677process_truncates(vp)
1678	struct vnode *vp;
1679{
1680	struct inodedep *inodedep;
1681	struct freeblks *freeblks;
1682	struct mount *mp;
1683	ino_t inum;
1684	int cgwait;
1685
1686	rw_assert(&lk, RA_WLOCKED);
1687
1688	mp = vp->v_mount;
1689	inum = VTOI(vp)->i_number;
1690	for (;;) {
1691		if (inodedep_lookup(mp, inum, 0, &inodedep) == 0)
1692			return;
1693		cgwait = 0;
1694		TAILQ_FOREACH(freeblks, &inodedep->id_freeblklst, fb_next) {
1695			/* Journal entries not yet written.  */
1696			if (!LIST_EMPTY(&freeblks->fb_jblkdephd)) {
1697				jwait(&LIST_FIRST(
1698				    &freeblks->fb_jblkdephd)->jb_list,
1699				    MNT_WAIT);
1700				break;
1701			}
1702			/* Another thread is executing this item. */
1703			if (freeblks->fb_state & INPROGRESS) {
1704				wait_worklist(&freeblks->fb_list, "ptrwait");
1705				break;
1706			}
1707			/* Freeblks is waiting on a inode write. */
1708			if ((freeblks->fb_state & COMPLETE) == 0) {
1709				FREE_LOCK(&lk);
1710				ffs_update(vp, 1);
1711				ACQUIRE_LOCK(&lk);
1712				break;
1713			}
1714			if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST)) ==
1715			    (ALLCOMPLETE | ONWORKLIST)) {
1716				remove_from_worklist(&freeblks->fb_list);
1717				freeblks->fb_state |= INPROGRESS;
1718				FREE_LOCK(&lk);
1719				if (vn_start_secondary_write(NULL, &mp,
1720				    V_NOWAIT))
1721					panic("process_truncates: "
1722					    "suspended filesystem");
1723				handle_workitem_freeblocks(freeblks, 0);
1724				vn_finished_secondary_write(mp);
1725				ACQUIRE_LOCK(&lk);
1726				break;
1727			}
1728			if (freeblks->fb_cgwait)
1729				cgwait++;
1730		}
1731		if (cgwait) {
1732			FREE_LOCK(&lk);
1733			sync_cgs(mp, MNT_WAIT);
1734			ffs_sync_snap(mp, MNT_WAIT);
1735			ACQUIRE_LOCK(&lk);
1736			continue;
1737		}
1738		if (freeblks == NULL)
1739			break;
1740	}
1741	return;
1742}
1743
1744/*
1745 * Process one item on the worklist.
1746 */
1747static int
1748process_worklist_item(mp, target, flags)
1749	struct mount *mp;
1750	int target;
1751	int flags;
1752{
1753	struct worklist sentinel;
1754	struct worklist *wk;
1755	struct ufsmount *ump;
1756	int matchcnt;
1757	int error;
1758
1759	rw_assert(&lk, RA_WLOCKED);
1760	KASSERT(mp != NULL, ("process_worklist_item: NULL mp"));
1761	/*
1762	 * If we are being called because of a process doing a
1763	 * copy-on-write, then it is not safe to write as we may
1764	 * recurse into the copy-on-write routine.
1765	 */
1766	if (curthread->td_pflags & TDP_COWINPROGRESS)
1767		return (-1);
1768	PHOLD(curproc);	/* Don't let the stack go away. */
1769	ump = VFSTOUFS(mp);
1770	matchcnt = 0;
1771	sentinel.wk_mp = NULL;
1772	sentinel.wk_type = D_SENTINEL;
1773	LIST_INSERT_HEAD(&ump->softdep_workitem_pending, &sentinel, wk_list);
1774	for (wk = LIST_NEXT(&sentinel, wk_list); wk != NULL;
1775	    wk = LIST_NEXT(&sentinel, wk_list)) {
1776		if (wk->wk_type == D_SENTINEL) {
1777			LIST_REMOVE(&sentinel, wk_list);
1778			LIST_INSERT_AFTER(wk, &sentinel, wk_list);
1779			continue;
1780		}
1781		if (wk->wk_state & INPROGRESS)
1782			panic("process_worklist_item: %p already in progress.",
1783			    wk);
1784		wk->wk_state |= INPROGRESS;
1785		remove_from_worklist(wk);
1786		FREE_LOCK(&lk);
1787		if (vn_start_secondary_write(NULL, &mp, V_NOWAIT))
1788			panic("process_worklist_item: suspended filesystem");
1789		switch (wk->wk_type) {
1790		case D_DIRREM:
1791			/* removal of a directory entry */
1792			error = handle_workitem_remove(WK_DIRREM(wk), flags);
1793			break;
1794
1795		case D_FREEBLKS:
1796			/* releasing blocks and/or fragments from a file */
1797			error = handle_workitem_freeblocks(WK_FREEBLKS(wk),
1798			    flags);
1799			break;
1800
1801		case D_FREEFRAG:
1802			/* releasing a fragment when replaced as a file grows */
1803			handle_workitem_freefrag(WK_FREEFRAG(wk));
1804			error = 0;
1805			break;
1806
1807		case D_FREEFILE:
1808			/* releasing an inode when its link count drops to 0 */
1809			handle_workitem_freefile(WK_FREEFILE(wk));
1810			error = 0;
1811			break;
1812
1813		default:
1814			panic("%s_process_worklist: Unknown type %s",
1815			    "softdep", TYPENAME(wk->wk_type));
1816			/* NOTREACHED */
1817		}
1818		vn_finished_secondary_write(mp);
1819		ACQUIRE_LOCK(&lk);
1820		if (error == 0) {
1821			if (++matchcnt == target)
1822				break;
1823			continue;
1824		}
1825		/*
1826		 * We have to retry the worklist item later.  Wake up any
1827		 * waiters who may be able to complete it immediately and
1828		 * add the item back to the head so we don't try to execute
1829		 * it again.
1830		 */
1831		wk->wk_state &= ~INPROGRESS;
1832		wake_worklist(wk);
1833		add_to_worklist(wk, WK_HEAD);
1834	}
1835	LIST_REMOVE(&sentinel, wk_list);
1836	/* Sentinal could've become the tail from remove_from_worklist. */
1837	if (ump->softdep_worklist_tail == &sentinel)
1838		ump->softdep_worklist_tail =
1839		    (struct worklist *)sentinel.wk_list.le_prev;
1840	PRELE(curproc);
1841	return (matchcnt);
1842}
1843
1844/*
1845 * Move dependencies from one buffer to another.
1846 */
1847int
1848softdep_move_dependencies(oldbp, newbp)
1849	struct buf *oldbp;
1850	struct buf *newbp;
1851{
1852	struct worklist *wk, *wktail;
1853	int dirty;
1854
1855	if ((wk = LIST_FIRST(&oldbp->b_dep)) == NULL)
1856		return (0);
1857	KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0,
1858	    ("softdep_move_dependencies called on non-softdep filesystem"));
1859	dirty = 0;
1860	wktail = NULL;
1861	ACQUIRE_LOCK(&lk);
1862	while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) {
1863		LIST_REMOVE(wk, wk_list);
1864		if (wk->wk_type == D_BMSAFEMAP &&
1865		    bmsafemap_backgroundwrite(WK_BMSAFEMAP(wk), newbp))
1866			dirty = 1;
1867		if (wktail == 0)
1868			LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list);
1869		else
1870			LIST_INSERT_AFTER(wktail, wk, wk_list);
1871		wktail = wk;
1872	}
1873	FREE_LOCK(&lk);
1874
1875	return (dirty);
1876}
1877
1878/*
1879 * Purge the work list of all items associated with a particular mount point.
1880 */
1881int
1882softdep_flushworklist(oldmnt, countp, td)
1883	struct mount *oldmnt;
1884	int *countp;
1885	struct thread *td;
1886{
1887	struct vnode *devvp;
1888	int count, error = 0;
1889	struct ufsmount *ump;
1890
1891	/*
1892	 * Alternately flush the block device associated with the mount
1893	 * point and process any dependencies that the flushing
1894	 * creates. We continue until no more worklist dependencies
1895	 * are found.
1896	 */
1897	*countp = 0;
1898	ump = VFSTOUFS(oldmnt);
1899	devvp = ump->um_devvp;
1900	while ((count = softdep_process_worklist(oldmnt, 1)) > 0) {
1901		*countp += count;
1902		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1903		error = VOP_FSYNC(devvp, MNT_WAIT, td);
1904		VOP_UNLOCK(devvp, 0);
1905		if (error)
1906			break;
1907	}
1908	return (error);
1909}
1910
1911static int
1912softdep_waitidle(struct mount *mp)
1913{
1914	struct ufsmount *ump;
1915	int error;
1916	int i;
1917
1918	ump = VFSTOUFS(mp);
1919	ACQUIRE_LOCK(&lk);
1920	for (i = 0; i < 10 && ump->softdep_deps; i++) {
1921		ump->softdep_req = 1;
1922		if (ump->softdep_on_worklist)
1923			panic("softdep_waitidle: work added after flush.");
1924		msleep(&ump->softdep_deps, &lk, PVM, "softdeps", 1);
1925	}
1926	ump->softdep_req = 0;
1927	FREE_LOCK(&lk);
1928	error = 0;
1929	if (i == 10) {
1930		error = EBUSY;
1931		printf("softdep_waitidle: Failed to flush worklist for %p\n",
1932		    mp);
1933	}
1934
1935	return (error);
1936}
1937
1938/*
1939 * Flush all vnodes and worklist items associated with a specified mount point.
1940 */
1941int
1942softdep_flushfiles(oldmnt, flags, td)
1943	struct mount *oldmnt;
1944	int flags;
1945	struct thread *td;
1946{
1947#ifdef QUOTA
1948	struct ufsmount *ump;
1949	int i;
1950#endif
1951	int error, early, depcount, loopcnt, retry_flush_count, retry;
1952	int morework;
1953
1954	KASSERT(MOUNTEDSOFTDEP(oldmnt) != 0,
1955	    ("softdep_flushfiles called on non-softdep filesystem"));
1956	loopcnt = 10;
1957	retry_flush_count = 3;
1958retry_flush:
1959	error = 0;
1960
1961	/*
1962	 * Alternately flush the vnodes associated with the mount
1963	 * point and process any dependencies that the flushing
1964	 * creates. In theory, this loop can happen at most twice,
1965	 * but we give it a few extra just to be sure.
1966	 */
1967	for (; loopcnt > 0; loopcnt--) {
1968		/*
1969		 * Do another flush in case any vnodes were brought in
1970		 * as part of the cleanup operations.
1971		 */
1972		early = retry_flush_count == 1 || (oldmnt->mnt_kern_flag &
1973		    MNTK_UNMOUNT) == 0 ? 0 : EARLYFLUSH;
1974		if ((error = ffs_flushfiles(oldmnt, flags | early, td)) != 0)
1975			break;
1976		if ((error = softdep_flushworklist(oldmnt, &depcount, td)) != 0 ||
1977		    depcount == 0)
1978			break;
1979	}
1980	/*
1981	 * If we are unmounting then it is an error to fail. If we
1982	 * are simply trying to downgrade to read-only, then filesystem
1983	 * activity can keep us busy forever, so we just fail with EBUSY.
1984	 */
1985	if (loopcnt == 0) {
1986		if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT)
1987			panic("softdep_flushfiles: looping");
1988		error = EBUSY;
1989	}
1990	if (!error)
1991		error = softdep_waitidle(oldmnt);
1992	if (!error) {
1993		if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) {
1994			retry = 0;
1995			MNT_ILOCK(oldmnt);
1996			KASSERT((oldmnt->mnt_kern_flag & MNTK_NOINSMNTQ) != 0,
1997			    ("softdep_flushfiles: !MNTK_NOINSMNTQ"));
1998			morework = oldmnt->mnt_nvnodelistsize > 0;
1999#ifdef QUOTA
2000			ump = VFSTOUFS(oldmnt);
2001			UFS_LOCK(ump);
2002			for (i = 0; i < MAXQUOTAS; i++) {
2003				if (ump->um_quotas[i] != NULLVP)
2004					morework = 1;
2005			}
2006			UFS_UNLOCK(ump);
2007#endif
2008			if (morework) {
2009				if (--retry_flush_count > 0) {
2010					retry = 1;
2011					loopcnt = 3;
2012				} else
2013					error = EBUSY;
2014			}
2015			MNT_IUNLOCK(oldmnt);
2016			if (retry)
2017				goto retry_flush;
2018		}
2019	}
2020	return (error);
2021}
2022
2023/*
2024 * Structure hashing.
2025 *
2026 * There are three types of structures that can be looked up:
2027 *	1) pagedep structures identified by mount point, inode number,
2028 *	   and logical block.
2029 *	2) inodedep structures identified by mount point and inode number.
2030 *	3) newblk structures identified by mount point and
2031 *	   physical block number.
2032 *
2033 * The "pagedep" and "inodedep" dependency structures are hashed
2034 * separately from the file blocks and inodes to which they correspond.
2035 * This separation helps when the in-memory copy of an inode or
2036 * file block must be replaced. It also obviates the need to access
2037 * an inode or file page when simply updating (or de-allocating)
2038 * dependency structures. Lookup of newblk structures is needed to
2039 * find newly allocated blocks when trying to associate them with
2040 * their allocdirect or allocindir structure.
2041 *
2042 * The lookup routines optionally create and hash a new instance when
2043 * an existing entry is not found.
2044 */
2045#define DEPALLOC	0x0001	/* allocate structure if lookup fails */
2046#define NODELAY		0x0002	/* cannot do background work */
2047
2048/*
2049 * Structures and routines associated with pagedep caching.
2050 */
2051LIST_HEAD(pagedep_hashhead, pagedep) *pagedep_hashtbl;
2052u_long	pagedep_hash;		/* size of hash table - 1 */
2053#define	PAGEDEP_HASH(mp, inum, lbn) \
2054	(&pagedep_hashtbl[((((register_t)(mp)) >> 13) + (inum) + (lbn)) & \
2055	    pagedep_hash])
2056
2057static int
2058pagedep_find(pagedephd, ino, lbn, mp, flags, pagedeppp)
2059	struct pagedep_hashhead *pagedephd;
2060	ino_t ino;
2061	ufs_lbn_t lbn;
2062	struct mount *mp;
2063	int flags;
2064	struct pagedep **pagedeppp;
2065{
2066	struct pagedep *pagedep;
2067
2068	LIST_FOREACH(pagedep, pagedephd, pd_hash) {
2069		if (ino == pagedep->pd_ino && lbn == pagedep->pd_lbn &&
2070		    mp == pagedep->pd_list.wk_mp) {
2071			*pagedeppp = pagedep;
2072			return (1);
2073		}
2074	}
2075	*pagedeppp = NULL;
2076	return (0);
2077}
2078/*
2079 * Look up a pagedep. Return 1 if found, 0 otherwise.
2080 * If not found, allocate if DEPALLOC flag is passed.
2081 * Found or allocated entry is returned in pagedeppp.
2082 * This routine must be called with splbio interrupts blocked.
2083 */
2084static int
2085pagedep_lookup(mp, bp, ino, lbn, flags, pagedeppp)
2086	struct mount *mp;
2087	struct buf *bp;
2088	ino_t ino;
2089	ufs_lbn_t lbn;
2090	int flags;
2091	struct pagedep **pagedeppp;
2092{
2093	struct pagedep *pagedep;
2094	struct pagedep_hashhead *pagedephd;
2095	struct worklist *wk;
2096	int ret;
2097	int i;
2098
2099	rw_assert(&lk, RA_WLOCKED);
2100	if (bp) {
2101		LIST_FOREACH(wk, &bp->b_dep, wk_list) {
2102			if (wk->wk_type == D_PAGEDEP) {
2103				*pagedeppp = WK_PAGEDEP(wk);
2104				return (1);
2105			}
2106		}
2107	}
2108	pagedephd = PAGEDEP_HASH(mp, ino, lbn);
2109	ret = pagedep_find(pagedephd, ino, lbn, mp, flags, pagedeppp);
2110	if (ret) {
2111		if (((*pagedeppp)->pd_state & ONWORKLIST) == 0 && bp)
2112			WORKLIST_INSERT(&bp->b_dep, &(*pagedeppp)->pd_list);
2113		return (1);
2114	}
2115	if ((flags & DEPALLOC) == 0)
2116		return (0);
2117	FREE_LOCK(&lk);
2118	pagedep = malloc(sizeof(struct pagedep),
2119	    M_PAGEDEP, M_SOFTDEP_FLAGS|M_ZERO);
2120	workitem_alloc(&pagedep->pd_list, D_PAGEDEP, mp);
2121	ACQUIRE_LOCK(&lk);
2122	ret = pagedep_find(pagedephd, ino, lbn, mp, flags, pagedeppp);
2123	if (*pagedeppp) {
2124		/*
2125		 * This should never happen since we only create pagedeps
2126		 * with the vnode lock held.  Could be an assert.
2127		 */
2128		WORKITEM_FREE(pagedep, D_PAGEDEP);
2129		return (ret);
2130	}
2131	pagedep->pd_ino = ino;
2132	pagedep->pd_lbn = lbn;
2133	LIST_INIT(&pagedep->pd_dirremhd);
2134	LIST_INIT(&pagedep->pd_pendinghd);
2135	for (i = 0; i < DAHASHSZ; i++)
2136		LIST_INIT(&pagedep->pd_diraddhd[i]);
2137	LIST_INSERT_HEAD(pagedephd, pagedep, pd_hash);
2138	WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list);
2139	*pagedeppp = pagedep;
2140	return (0);
2141}
2142
2143/*
2144 * Structures and routines associated with inodedep caching.
2145 */
2146LIST_HEAD(inodedep_hashhead, inodedep) *inodedep_hashtbl;
2147static u_long	inodedep_hash;	/* size of hash table - 1 */
2148#define	INODEDEP_HASH(fs, inum) \
2149      (&inodedep_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & inodedep_hash])
2150
2151static int
2152inodedep_find(inodedephd, fs, inum, inodedeppp)
2153	struct inodedep_hashhead *inodedephd;
2154	struct fs *fs;
2155	ino_t inum;
2156	struct inodedep **inodedeppp;
2157{
2158	struct inodedep *inodedep;
2159
2160	LIST_FOREACH(inodedep, inodedephd, id_hash)
2161		if (inum == inodedep->id_ino && fs == inodedep->id_fs)
2162			break;
2163	if (inodedep) {
2164		*inodedeppp = inodedep;
2165		return (1);
2166	}
2167	*inodedeppp = NULL;
2168
2169	return (0);
2170}
2171/*
2172 * Look up an inodedep. Return 1 if found, 0 if not found.
2173 * If not found, allocate if DEPALLOC flag is passed.
2174 * Found or allocated entry is returned in inodedeppp.
2175 * This routine must be called with splbio interrupts blocked.
2176 */
2177static int
2178inodedep_lookup(mp, inum, flags, inodedeppp)
2179	struct mount *mp;
2180	ino_t inum;
2181	int flags;
2182	struct inodedep **inodedeppp;
2183{
2184	struct inodedep *inodedep;
2185	struct inodedep_hashhead *inodedephd;
2186	struct fs *fs;
2187
2188	rw_assert(&lk, RA_WLOCKED);
2189	fs = VFSTOUFS(mp)->um_fs;
2190	inodedephd = INODEDEP_HASH(fs, inum);
2191
2192	if (inodedep_find(inodedephd, fs, inum, inodedeppp))
2193		return (1);
2194	if ((flags & DEPALLOC) == 0)
2195		return (0);
2196	/*
2197	 * If we are over our limit, try to improve the situation.
2198	 */
2199	if (dep_current[D_INODEDEP] > max_softdeps && (flags & NODELAY) == 0)
2200		request_cleanup(mp, FLUSH_INODES);
2201	FREE_LOCK(&lk);
2202	inodedep = malloc(sizeof(struct inodedep),
2203		M_INODEDEP, M_SOFTDEP_FLAGS);
2204	workitem_alloc(&inodedep->id_list, D_INODEDEP, mp);
2205	ACQUIRE_LOCK(&lk);
2206	if (inodedep_find(inodedephd, fs, inum, inodedeppp)) {
2207		WORKITEM_FREE(inodedep, D_INODEDEP);
2208		return (1);
2209	}
2210	inodedep->id_fs = fs;
2211	inodedep->id_ino = inum;
2212	inodedep->id_state = ALLCOMPLETE;
2213	inodedep->id_nlinkdelta = 0;
2214	inodedep->id_savedino1 = NULL;
2215	inodedep->id_savedsize = -1;
2216	inodedep->id_savedextsize = -1;
2217	inodedep->id_savednlink = -1;
2218	inodedep->id_bmsafemap = NULL;
2219	inodedep->id_mkdiradd = NULL;
2220	LIST_INIT(&inodedep->id_dirremhd);
2221	LIST_INIT(&inodedep->id_pendinghd);
2222	LIST_INIT(&inodedep->id_inowait);
2223	LIST_INIT(&inodedep->id_bufwait);
2224	TAILQ_INIT(&inodedep->id_inoreflst);
2225	TAILQ_INIT(&inodedep->id_inoupdt);
2226	TAILQ_INIT(&inodedep->id_newinoupdt);
2227	TAILQ_INIT(&inodedep->id_extupdt);
2228	TAILQ_INIT(&inodedep->id_newextupdt);
2229	TAILQ_INIT(&inodedep->id_freeblklst);
2230	LIST_INSERT_HEAD(inodedephd, inodedep, id_hash);
2231	*inodedeppp = inodedep;
2232	return (0);
2233}
2234
2235/*
2236 * Structures and routines associated with newblk caching.
2237 */
2238LIST_HEAD(newblk_hashhead, newblk) *newblk_hashtbl;
2239u_long	newblk_hash;		/* size of hash table - 1 */
2240#define	NEWBLK_HASH(fs, inum) \
2241	(&newblk_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & newblk_hash])
2242
2243static int
2244newblk_find(newblkhd, mp, newblkno, flags, newblkpp)
2245	struct newblk_hashhead *newblkhd;
2246	struct mount *mp;
2247	ufs2_daddr_t newblkno;
2248	int flags;
2249	struct newblk **newblkpp;
2250{
2251	struct newblk *newblk;
2252
2253	LIST_FOREACH(newblk, newblkhd, nb_hash) {
2254		if (newblkno != newblk->nb_newblkno)
2255			continue;
2256		if (mp != newblk->nb_list.wk_mp)
2257			continue;
2258		/*
2259		 * If we're creating a new dependency don't match those that
2260		 * have already been converted to allocdirects.  This is for
2261		 * a frag extend.
2262		 */
2263		if ((flags & DEPALLOC) && newblk->nb_list.wk_type != D_NEWBLK)
2264			continue;
2265		break;
2266	}
2267	if (newblk) {
2268		*newblkpp = newblk;
2269		return (1);
2270	}
2271	*newblkpp = NULL;
2272	return (0);
2273}
2274
2275/*
2276 * Look up a newblk. Return 1 if found, 0 if not found.
2277 * If not found, allocate if DEPALLOC flag is passed.
2278 * Found or allocated entry is returned in newblkpp.
2279 */
2280static int
2281newblk_lookup(mp, newblkno, flags, newblkpp)
2282	struct mount *mp;
2283	ufs2_daddr_t newblkno;
2284	int flags;
2285	struct newblk **newblkpp;
2286{
2287	struct newblk *newblk;
2288	struct newblk_hashhead *newblkhd;
2289
2290	newblkhd = NEWBLK_HASH(VFSTOUFS(mp)->um_fs, newblkno);
2291	if (newblk_find(newblkhd, mp, newblkno, flags, newblkpp))
2292		return (1);
2293	if ((flags & DEPALLOC) == 0)
2294		return (0);
2295	FREE_LOCK(&lk);
2296	newblk = malloc(sizeof(union allblk), M_NEWBLK,
2297	    M_SOFTDEP_FLAGS | M_ZERO);
2298	workitem_alloc(&newblk->nb_list, D_NEWBLK, mp);
2299	ACQUIRE_LOCK(&lk);
2300	if (newblk_find(newblkhd, mp, newblkno, flags, newblkpp)) {
2301		WORKITEM_FREE(newblk, D_NEWBLK);
2302		return (1);
2303	}
2304	newblk->nb_freefrag = NULL;
2305	LIST_INIT(&newblk->nb_indirdeps);
2306	LIST_INIT(&newblk->nb_newdirblk);
2307	LIST_INIT(&newblk->nb_jwork);
2308	newblk->nb_state = ATTACHED;
2309	newblk->nb_newblkno = newblkno;
2310	LIST_INSERT_HEAD(newblkhd, newblk, nb_hash);
2311	*newblkpp = newblk;
2312	return (0);
2313}
2314
2315/*
2316 * Structures and routines associated with freed indirect block caching.
2317 */
2318struct freeworklst *indir_hashtbl;
2319u_long	indir_hash;		/* size of hash table - 1 */
2320#define	INDIR_HASH(mp, blkno) \
2321	(&indir_hashtbl[((((register_t)(mp)) >> 13) + (blkno)) & indir_hash])
2322
2323/*
2324 * Lookup an indirect block in the indir hash table.  The freework is
2325 * removed and potentially freed.  The caller must do a blocking journal
2326 * write before writing to the blkno.
2327 */
2328static int
2329indirblk_lookup(mp, blkno)
2330	struct mount *mp;
2331	ufs2_daddr_t blkno;
2332{
2333	struct freework *freework;
2334	struct freeworklst *wkhd;
2335
2336	wkhd = INDIR_HASH(mp, blkno);
2337	TAILQ_FOREACH(freework, wkhd, fw_next) {
2338		if (freework->fw_blkno != blkno)
2339			continue;
2340		if (freework->fw_list.wk_mp != mp)
2341			continue;
2342		indirblk_remove(freework);
2343		return (1);
2344	}
2345	return (0);
2346}
2347
2348/*
2349 * Insert an indirect block represented by freework into the indirblk
2350 * hash table so that it may prevent the block from being re-used prior
2351 * to the journal being written.
2352 */
2353static void
2354indirblk_insert(freework)
2355	struct freework *freework;
2356{
2357	struct jblocks *jblocks;
2358	struct jseg *jseg;
2359
2360	jblocks = VFSTOUFS(freework->fw_list.wk_mp)->softdep_jblocks;
2361	jseg = TAILQ_LAST(&jblocks->jb_segs, jseglst);
2362	if (jseg == NULL)
2363		return;
2364
2365	LIST_INSERT_HEAD(&jseg->js_indirs, freework, fw_segs);
2366	TAILQ_INSERT_HEAD(INDIR_HASH(freework->fw_list.wk_mp,
2367	    freework->fw_blkno), freework, fw_next);
2368	freework->fw_state &= ~DEPCOMPLETE;
2369}
2370
2371static void
2372indirblk_remove(freework)
2373	struct freework *freework;
2374{
2375
2376	LIST_REMOVE(freework, fw_segs);
2377	TAILQ_REMOVE(INDIR_HASH(freework->fw_list.wk_mp,
2378	    freework->fw_blkno), freework, fw_next);
2379	freework->fw_state |= DEPCOMPLETE;
2380	if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE)
2381		WORKITEM_FREE(freework, D_FREEWORK);
2382}
2383
2384/*
2385 * Executed during filesystem system initialization before
2386 * mounting any filesystems.
2387 */
2388void
2389softdep_initialize()
2390{
2391	int i;
2392
2393	LIST_INIT(&mkdirlisthd);
2394	max_softdeps = desiredvnodes * 4;
2395	pagedep_hashtbl = hashinit(desiredvnodes / 5, M_PAGEDEP, &pagedep_hash);
2396	inodedep_hashtbl = hashinit(desiredvnodes, M_INODEDEP, &inodedep_hash);
2397	newblk_hashtbl = hashinit(max_softdeps / 2,  M_NEWBLK, &newblk_hash);
2398	bmsafemap_hashtbl = hashinit(1024, M_BMSAFEMAP, &bmsafemap_hash);
2399	i = 1 << (ffs(desiredvnodes / 10) - 1);
2400	indir_hashtbl = malloc(i * sizeof(indir_hashtbl[0]), M_FREEWORK,
2401	    M_WAITOK);
2402	indir_hash = i - 1;
2403	for (i = 0; i <= indir_hash; i++)
2404		TAILQ_INIT(&indir_hashtbl[i]);
2405
2406	/* initialise bioops hack */
2407	bioops.io_start = softdep_disk_io_initiation;
2408	bioops.io_complete = softdep_disk_write_complete;
2409	bioops.io_deallocate = softdep_deallocate_dependencies;
2410	bioops.io_countdeps = softdep_count_dependencies;
2411
2412	/* Initialize the callout with an mtx. */
2413	callout_init_mtx(&softdep_callout, &lk, 0);
2414}
2415
2416/*
2417 * Executed after all filesystems have been unmounted during
2418 * filesystem module unload.
2419 */
2420void
2421softdep_uninitialize()
2422{
2423
2424	callout_drain(&softdep_callout);
2425	hashdestroy(pagedep_hashtbl, M_PAGEDEP, pagedep_hash);
2426	hashdestroy(inodedep_hashtbl, M_INODEDEP, inodedep_hash);
2427	hashdestroy(newblk_hashtbl, M_NEWBLK, newblk_hash);
2428	hashdestroy(bmsafemap_hashtbl, M_BMSAFEMAP, bmsafemap_hash);
2429	free(indir_hashtbl, M_FREEWORK);
2430}
2431
2432/*
2433 * Called at mount time to notify the dependency code that a
2434 * filesystem wishes to use it.
2435 */
2436int
2437softdep_mount(devvp, mp, fs, cred)
2438	struct vnode *devvp;
2439	struct mount *mp;
2440	struct fs *fs;
2441	struct ucred *cred;
2442{
2443	struct csum_total cstotal;
2444	struct ufsmount *ump;
2445	struct cg *cgp;
2446	struct buf *bp;
2447	int error, cyl;
2448
2449	MNT_ILOCK(mp);
2450	mp->mnt_flag = (mp->mnt_flag & ~MNT_ASYNC) | MNT_SOFTDEP;
2451	if ((mp->mnt_kern_flag & MNTK_SOFTDEP) == 0) {
2452		mp->mnt_kern_flag = (mp->mnt_kern_flag & ~MNTK_ASYNC) |
2453			MNTK_SOFTDEP | MNTK_NOASYNC;
2454	}
2455	MNT_IUNLOCK(mp);
2456	ump = VFSTOUFS(mp);
2457	LIST_INIT(&ump->softdep_workitem_pending);
2458	LIST_INIT(&ump->softdep_journal_pending);
2459	TAILQ_INIT(&ump->softdep_unlinked);
2460	LIST_INIT(&ump->softdep_dirtycg);
2461	ump->softdep_worklist_tail = NULL;
2462	ump->softdep_on_worklist = 0;
2463	ump->softdep_deps = 0;
2464	if ((fs->fs_flags & FS_SUJ) &&
2465	    (error = journal_mount(mp, fs, cred)) != 0) {
2466		printf("Failed to start journal: %d\n", error);
2467		return (error);
2468	}
2469	/*
2470	 * When doing soft updates, the counters in the
2471	 * superblock may have gotten out of sync. Recomputation
2472	 * can take a long time and can be deferred for background
2473	 * fsck.  However, the old behavior of scanning the cylinder
2474	 * groups and recalculating them at mount time is available
2475	 * by setting vfs.ffs.compute_summary_at_mount to one.
2476	 */
2477	if (compute_summary_at_mount == 0 || fs->fs_clean != 0)
2478		return (0);
2479	bzero(&cstotal, sizeof cstotal);
2480	for (cyl = 0; cyl < fs->fs_ncg; cyl++) {
2481		if ((error = bread(devvp, fsbtodb(fs, cgtod(fs, cyl)),
2482		    fs->fs_cgsize, cred, &bp)) != 0) {
2483			brelse(bp);
2484			return (error);
2485		}
2486		cgp = (struct cg *)bp->b_data;
2487		cstotal.cs_nffree += cgp->cg_cs.cs_nffree;
2488		cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree;
2489		cstotal.cs_nifree += cgp->cg_cs.cs_nifree;
2490		cstotal.cs_ndir += cgp->cg_cs.cs_ndir;
2491		fs->fs_cs(fs, cyl) = cgp->cg_cs;
2492		brelse(bp);
2493	}
2494#ifdef DEBUG
2495	if (bcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal))
2496		printf("%s: superblock summary recomputed\n", fs->fs_fsmnt);
2497#endif
2498	bcopy(&cstotal, &fs->fs_cstotal, sizeof cstotal);
2499	return (0);
2500}
2501
2502void
2503softdep_unmount(mp)
2504	struct mount *mp;
2505{
2506
2507	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
2508	    ("softdep_unmount called on non-softdep filesystem"));
2509	MNT_ILOCK(mp);
2510	mp->mnt_flag &= ~MNT_SOFTDEP;
2511	if (MOUNTEDSUJ(mp) == 0) {
2512		MNT_IUNLOCK(mp);
2513		return;
2514	}
2515	mp->mnt_flag &= ~MNT_SUJ;
2516	MNT_IUNLOCK(mp);
2517	journal_unmount(mp);
2518}
2519
2520static struct jblocks *
2521jblocks_create(void)
2522{
2523	struct jblocks *jblocks;
2524
2525	jblocks = malloc(sizeof(*jblocks), M_JBLOCKS, M_WAITOK | M_ZERO);
2526	TAILQ_INIT(&jblocks->jb_segs);
2527	jblocks->jb_avail = 10;
2528	jblocks->jb_extent = malloc(sizeof(struct jextent) * jblocks->jb_avail,
2529	    M_JBLOCKS, M_WAITOK | M_ZERO);
2530
2531	return (jblocks);
2532}
2533
2534static ufs2_daddr_t
2535jblocks_alloc(jblocks, bytes, actual)
2536	struct jblocks *jblocks;
2537	int bytes;
2538	int *actual;
2539{
2540	ufs2_daddr_t daddr;
2541	struct jextent *jext;
2542	int freecnt;
2543	int blocks;
2544
2545	blocks = bytes / DEV_BSIZE;
2546	jext = &jblocks->jb_extent[jblocks->jb_head];
2547	freecnt = jext->je_blocks - jblocks->jb_off;
2548	if (freecnt == 0) {
2549		jblocks->jb_off = 0;
2550		if (++jblocks->jb_head > jblocks->jb_used)
2551			jblocks->jb_head = 0;
2552		jext = &jblocks->jb_extent[jblocks->jb_head];
2553		freecnt = jext->je_blocks;
2554	}
2555	if (freecnt > blocks)
2556		freecnt = blocks;
2557	*actual = freecnt * DEV_BSIZE;
2558	daddr = jext->je_daddr + jblocks->jb_off;
2559	jblocks->jb_off += freecnt;
2560	jblocks->jb_free -= freecnt;
2561
2562	return (daddr);
2563}
2564
2565static void
2566jblocks_free(jblocks, mp, bytes)
2567	struct jblocks *jblocks;
2568	struct mount *mp;
2569	int bytes;
2570{
2571
2572	jblocks->jb_free += bytes / DEV_BSIZE;
2573	if (jblocks->jb_suspended)
2574		worklist_speedup();
2575	wakeup(jblocks);
2576}
2577
2578static void
2579jblocks_destroy(jblocks)
2580	struct jblocks *jblocks;
2581{
2582
2583	if (jblocks->jb_extent)
2584		free(jblocks->jb_extent, M_JBLOCKS);
2585	free(jblocks, M_JBLOCKS);
2586}
2587
2588static void
2589jblocks_add(jblocks, daddr, blocks)
2590	struct jblocks *jblocks;
2591	ufs2_daddr_t daddr;
2592	int blocks;
2593{
2594	struct jextent *jext;
2595
2596	jblocks->jb_blocks += blocks;
2597	jblocks->jb_free += blocks;
2598	jext = &jblocks->jb_extent[jblocks->jb_used];
2599	/* Adding the first block. */
2600	if (jext->je_daddr == 0) {
2601		jext->je_daddr = daddr;
2602		jext->je_blocks = blocks;
2603		return;
2604	}
2605	/* Extending the last extent. */
2606	if (jext->je_daddr + jext->je_blocks == daddr) {
2607		jext->je_blocks += blocks;
2608		return;
2609	}
2610	/* Adding a new extent. */
2611	if (++jblocks->jb_used == jblocks->jb_avail) {
2612		jblocks->jb_avail *= 2;
2613		jext = malloc(sizeof(struct jextent) * jblocks->jb_avail,
2614		    M_JBLOCKS, M_WAITOK | M_ZERO);
2615		memcpy(jext, jblocks->jb_extent,
2616		    sizeof(struct jextent) * jblocks->jb_used);
2617		free(jblocks->jb_extent, M_JBLOCKS);
2618		jblocks->jb_extent = jext;
2619	}
2620	jext = &jblocks->jb_extent[jblocks->jb_used];
2621	jext->je_daddr = daddr;
2622	jext->je_blocks = blocks;
2623	return;
2624}
2625
2626int
2627softdep_journal_lookup(mp, vpp)
2628	struct mount *mp;
2629	struct vnode **vpp;
2630{
2631	struct componentname cnp;
2632	struct vnode *dvp;
2633	ino_t sujournal;
2634	int error;
2635
2636	error = VFS_VGET(mp, ROOTINO, LK_EXCLUSIVE, &dvp);
2637	if (error)
2638		return (error);
2639	bzero(&cnp, sizeof(cnp));
2640	cnp.cn_nameiop = LOOKUP;
2641	cnp.cn_flags = ISLASTCN;
2642	cnp.cn_thread = curthread;
2643	cnp.cn_cred = curthread->td_ucred;
2644	cnp.cn_pnbuf = SUJ_FILE;
2645	cnp.cn_nameptr = SUJ_FILE;
2646	cnp.cn_namelen = strlen(SUJ_FILE);
2647	error = ufs_lookup_ino(dvp, NULL, &cnp, &sujournal);
2648	vput(dvp);
2649	if (error != 0)
2650		return (error);
2651	error = VFS_VGET(mp, sujournal, LK_EXCLUSIVE, vpp);
2652	return (error);
2653}
2654
2655/*
2656 * Open and verify the journal file.
2657 */
2658static int
2659journal_mount(mp, fs, cred)
2660	struct mount *mp;
2661	struct fs *fs;
2662	struct ucred *cred;
2663{
2664	struct jblocks *jblocks;
2665	struct vnode *vp;
2666	struct inode *ip;
2667	ufs2_daddr_t blkno;
2668	int bcount;
2669	int error;
2670	int i;
2671
2672	error = softdep_journal_lookup(mp, &vp);
2673	if (error != 0) {
2674		printf("Failed to find journal.  Use tunefs to create one\n");
2675		return (error);
2676	}
2677	ip = VTOI(vp);
2678	if (ip->i_size < SUJ_MIN) {
2679		error = ENOSPC;
2680		goto out;
2681	}
2682	bcount = lblkno(fs, ip->i_size);	/* Only use whole blocks. */
2683	jblocks = jblocks_create();
2684	for (i = 0; i < bcount; i++) {
2685		error = ufs_bmaparray(vp, i, &blkno, NULL, NULL, NULL);
2686		if (error)
2687			break;
2688		jblocks_add(jblocks, blkno, fsbtodb(fs, fs->fs_frag));
2689	}
2690	if (error) {
2691		jblocks_destroy(jblocks);
2692		goto out;
2693	}
2694	jblocks->jb_low = jblocks->jb_free / 3;	/* Reserve 33%. */
2695	jblocks->jb_min = jblocks->jb_free / 10; /* Suspend at 10%. */
2696	VFSTOUFS(mp)->softdep_jblocks = jblocks;
2697out:
2698	if (error == 0) {
2699		MNT_ILOCK(mp);
2700		mp->mnt_flag |= MNT_SUJ;
2701		mp->mnt_flag &= ~MNT_SOFTDEP;
2702		MNT_IUNLOCK(mp);
2703		/*
2704		 * Only validate the journal contents if the
2705		 * filesystem is clean, otherwise we write the logs
2706		 * but they'll never be used.  If the filesystem was
2707		 * still dirty when we mounted it the journal is
2708		 * invalid and a new journal can only be valid if it
2709		 * starts from a clean mount.
2710		 */
2711		if (fs->fs_clean) {
2712			DIP_SET(ip, i_modrev, fs->fs_mtime);
2713			ip->i_flags |= IN_MODIFIED;
2714			ffs_update(vp, 1);
2715		}
2716	}
2717	vput(vp);
2718	return (error);
2719}
2720
2721static void
2722journal_unmount(mp)
2723	struct mount *mp;
2724{
2725	struct ufsmount *ump;
2726
2727	ump = VFSTOUFS(mp);
2728	if (ump->softdep_jblocks)
2729		jblocks_destroy(ump->softdep_jblocks);
2730	ump->softdep_jblocks = NULL;
2731}
2732
2733/*
2734 * Called when a journal record is ready to be written.  Space is allocated
2735 * and the journal entry is created when the journal is flushed to stable
2736 * store.
2737 */
2738static void
2739add_to_journal(wk)
2740	struct worklist *wk;
2741{
2742	struct ufsmount *ump;
2743
2744	rw_assert(&lk, RA_WLOCKED);
2745	ump = VFSTOUFS(wk->wk_mp);
2746	if (wk->wk_state & ONWORKLIST)
2747		panic("add_to_journal: %s(0x%X) already on list",
2748		    TYPENAME(wk->wk_type), wk->wk_state);
2749	wk->wk_state |= ONWORKLIST | DEPCOMPLETE;
2750	if (LIST_EMPTY(&ump->softdep_journal_pending)) {
2751		ump->softdep_jblocks->jb_age = ticks;
2752		LIST_INSERT_HEAD(&ump->softdep_journal_pending, wk, wk_list);
2753	} else
2754		LIST_INSERT_AFTER(ump->softdep_journal_tail, wk, wk_list);
2755	ump->softdep_journal_tail = wk;
2756	ump->softdep_on_journal += 1;
2757}
2758
2759/*
2760 * Remove an arbitrary item for the journal worklist maintain the tail
2761 * pointer.  This happens when a new operation obviates the need to
2762 * journal an old operation.
2763 */
2764static void
2765remove_from_journal(wk)
2766	struct worklist *wk;
2767{
2768	struct ufsmount *ump;
2769
2770	rw_assert(&lk, RA_WLOCKED);
2771	ump = VFSTOUFS(wk->wk_mp);
2772#ifdef SUJ_DEBUG
2773	{
2774		struct worklist *wkn;
2775
2776		LIST_FOREACH(wkn, &ump->softdep_journal_pending, wk_list)
2777			if (wkn == wk)
2778				break;
2779		if (wkn == NULL)
2780			panic("remove_from_journal: %p is not in journal", wk);
2781	}
2782#endif
2783	/*
2784	 * We emulate a TAILQ to save space in most structures which do not
2785	 * require TAILQ semantics.  Here we must update the tail position
2786	 * when removing the tail which is not the final entry. This works
2787	 * only if the worklist linkage are at the beginning of the structure.
2788	 */
2789	if (ump->softdep_journal_tail == wk)
2790		ump->softdep_journal_tail =
2791		    (struct worklist *)wk->wk_list.le_prev;
2792
2793	WORKLIST_REMOVE(wk);
2794	ump->softdep_on_journal -= 1;
2795}
2796
2797/*
2798 * Check for journal space as well as dependency limits so the prelink
2799 * code can throttle both journaled and non-journaled filesystems.
2800 * Threshold is 0 for low and 1 for min.
2801 */
2802static int
2803journal_space(ump, thresh)
2804	struct ufsmount *ump;
2805	int thresh;
2806{
2807	struct jblocks *jblocks;
2808	int avail;
2809
2810	jblocks = ump->softdep_jblocks;
2811	if (jblocks == NULL)
2812		return (1);
2813	/*
2814	 * We use a tighter restriction here to prevent request_cleanup()
2815	 * running in threads from running into locks we currently hold.
2816	 */
2817	if (dep_current[D_INODEDEP] > (max_softdeps / 10) * 9)
2818		return (0);
2819	if (thresh)
2820		thresh = jblocks->jb_min;
2821	else
2822		thresh = jblocks->jb_low;
2823	avail = (ump->softdep_on_journal * JREC_SIZE) / DEV_BSIZE;
2824	avail = jblocks->jb_free - avail;
2825
2826	return (avail > thresh);
2827}
2828
2829static void
2830journal_suspend(ump)
2831	struct ufsmount *ump;
2832{
2833	struct jblocks *jblocks;
2834	struct mount *mp;
2835
2836	mp = UFSTOVFS(ump);
2837	jblocks = ump->softdep_jblocks;
2838	MNT_ILOCK(mp);
2839	if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0) {
2840		stat_journal_min++;
2841		mp->mnt_kern_flag |= MNTK_SUSPEND;
2842		mp->mnt_susp_owner = FIRST_THREAD_IN_PROC(softdepproc);
2843	}
2844	jblocks->jb_suspended = 1;
2845	MNT_IUNLOCK(mp);
2846}
2847
2848static int
2849journal_unsuspend(struct ufsmount *ump)
2850{
2851	struct jblocks *jblocks;
2852	struct mount *mp;
2853
2854	mp = UFSTOVFS(ump);
2855	jblocks = ump->softdep_jblocks;
2856
2857	if (jblocks != NULL && jblocks->jb_suspended &&
2858	    journal_space(ump, jblocks->jb_min)) {
2859		jblocks->jb_suspended = 0;
2860		FREE_LOCK(&lk);
2861		mp->mnt_susp_owner = curthread;
2862		vfs_write_resume(mp, 0);
2863		ACQUIRE_LOCK(&lk);
2864		return (1);
2865	}
2866	return (0);
2867}
2868
2869/*
2870 * Called before any allocation function to be certain that there is
2871 * sufficient space in the journal prior to creating any new records.
2872 * Since in the case of block allocation we may have multiple locked
2873 * buffers at the time of the actual allocation we can not block
2874 * when the journal records are created.  Doing so would create a deadlock
2875 * if any of these buffers needed to be flushed to reclaim space.  Instead
2876 * we require a sufficiently large amount of available space such that
2877 * each thread in the system could have passed this allocation check and
2878 * still have sufficient free space.  With 20% of a minimum journal size
2879 * of 1MB we have 6553 records available.
2880 */
2881int
2882softdep_prealloc(vp, waitok)
2883	struct vnode *vp;
2884	int waitok;
2885{
2886	struct ufsmount *ump;
2887
2888	KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0,
2889	    ("softdep_prealloc called on non-softdep filesystem"));
2890	/*
2891	 * Nothing to do if we are not running journaled soft updates.
2892	 * If we currently hold the snapshot lock, we must avoid handling
2893	 * other resources that could cause deadlock.
2894	 */
2895	if (DOINGSUJ(vp) == 0 || IS_SNAPSHOT(VTOI(vp)))
2896		return (0);
2897	ump = VFSTOUFS(vp->v_mount);
2898	ACQUIRE_LOCK(&lk);
2899	if (journal_space(ump, 0)) {
2900		FREE_LOCK(&lk);
2901		return (0);
2902	}
2903	stat_journal_low++;
2904	FREE_LOCK(&lk);
2905	if (waitok == MNT_NOWAIT)
2906		return (ENOSPC);
2907	/*
2908	 * Attempt to sync this vnode once to flush any journal
2909	 * work attached to it.
2910	 */
2911	if ((curthread->td_pflags & TDP_COWINPROGRESS) == 0)
2912		ffs_syncvnode(vp, waitok, 0);
2913	ACQUIRE_LOCK(&lk);
2914	process_removes(vp);
2915	process_truncates(vp);
2916	if (journal_space(ump, 0) == 0) {
2917		softdep_speedup();
2918		if (journal_space(ump, 1) == 0)
2919			journal_suspend(ump);
2920	}
2921	FREE_LOCK(&lk);
2922
2923	return (0);
2924}
2925
2926/*
2927 * Before adjusting a link count on a vnode verify that we have sufficient
2928 * journal space.  If not, process operations that depend on the currently
2929 * locked pair of vnodes to try to flush space as the syncer, buf daemon,
2930 * and softdep flush threads can not acquire these locks to reclaim space.
2931 */
2932static void
2933softdep_prelink(dvp, vp)
2934	struct vnode *dvp;
2935	struct vnode *vp;
2936{
2937	struct ufsmount *ump;
2938
2939	ump = VFSTOUFS(dvp->v_mount);
2940	rw_assert(&lk, RA_WLOCKED);
2941	/*
2942	 * Nothing to do if we have sufficient journal space.
2943	 * If we currently hold the snapshot lock, we must avoid
2944	 * handling other resources that could cause deadlock.
2945	 */
2946	if (journal_space(ump, 0) || (vp && IS_SNAPSHOT(VTOI(vp))))
2947		return;
2948	stat_journal_low++;
2949	FREE_LOCK(&lk);
2950	if (vp)
2951		ffs_syncvnode(vp, MNT_NOWAIT, 0);
2952	ffs_syncvnode(dvp, MNT_WAIT, 0);
2953	ACQUIRE_LOCK(&lk);
2954	/* Process vp before dvp as it may create .. removes. */
2955	if (vp) {
2956		process_removes(vp);
2957		process_truncates(vp);
2958	}
2959	process_removes(dvp);
2960	process_truncates(dvp);
2961	softdep_speedup();
2962	process_worklist_item(UFSTOVFS(ump), 2, LK_NOWAIT);
2963	if (journal_space(ump, 0) == 0) {
2964		softdep_speedup();
2965		if (journal_space(ump, 1) == 0)
2966			journal_suspend(ump);
2967	}
2968}
2969
2970static void
2971jseg_write(ump, jseg, data)
2972	struct ufsmount *ump;
2973	struct jseg *jseg;
2974	uint8_t *data;
2975{
2976	struct jsegrec *rec;
2977
2978	rec = (struct jsegrec *)data;
2979	rec->jsr_seq = jseg->js_seq;
2980	rec->jsr_oldest = jseg->js_oldseq;
2981	rec->jsr_cnt = jseg->js_cnt;
2982	rec->jsr_blocks = jseg->js_size / ump->um_devvp->v_bufobj.bo_bsize;
2983	rec->jsr_crc = 0;
2984	rec->jsr_time = ump->um_fs->fs_mtime;
2985}
2986
2987static inline void
2988inoref_write(inoref, jseg, rec)
2989	struct inoref *inoref;
2990	struct jseg *jseg;
2991	struct jrefrec *rec;
2992{
2993
2994	inoref->if_jsegdep->jd_seg = jseg;
2995	rec->jr_ino = inoref->if_ino;
2996	rec->jr_parent = inoref->if_parent;
2997	rec->jr_nlink = inoref->if_nlink;
2998	rec->jr_mode = inoref->if_mode;
2999	rec->jr_diroff = inoref->if_diroff;
3000}
3001
3002static void
3003jaddref_write(jaddref, jseg, data)
3004	struct jaddref *jaddref;
3005	struct jseg *jseg;
3006	uint8_t *data;
3007{
3008	struct jrefrec *rec;
3009
3010	rec = (struct jrefrec *)data;
3011	rec->jr_op = JOP_ADDREF;
3012	inoref_write(&jaddref->ja_ref, jseg, rec);
3013}
3014
3015static void
3016jremref_write(jremref, jseg, data)
3017	struct jremref *jremref;
3018	struct jseg *jseg;
3019	uint8_t *data;
3020{
3021	struct jrefrec *rec;
3022
3023	rec = (struct jrefrec *)data;
3024	rec->jr_op = JOP_REMREF;
3025	inoref_write(&jremref->jr_ref, jseg, rec);
3026}
3027
3028static void
3029jmvref_write(jmvref, jseg, data)
3030	struct jmvref *jmvref;
3031	struct jseg *jseg;
3032	uint8_t *data;
3033{
3034	struct jmvrec *rec;
3035
3036	rec = (struct jmvrec *)data;
3037	rec->jm_op = JOP_MVREF;
3038	rec->jm_ino = jmvref->jm_ino;
3039	rec->jm_parent = jmvref->jm_parent;
3040	rec->jm_oldoff = jmvref->jm_oldoff;
3041	rec->jm_newoff = jmvref->jm_newoff;
3042}
3043
3044static void
3045jnewblk_write(jnewblk, jseg, data)
3046	struct jnewblk *jnewblk;
3047	struct jseg *jseg;
3048	uint8_t *data;
3049{
3050	struct jblkrec *rec;
3051
3052	jnewblk->jn_jsegdep->jd_seg = jseg;
3053	rec = (struct jblkrec *)data;
3054	rec->jb_op = JOP_NEWBLK;
3055	rec->jb_ino = jnewblk->jn_ino;
3056	rec->jb_blkno = jnewblk->jn_blkno;
3057	rec->jb_lbn = jnewblk->jn_lbn;
3058	rec->jb_frags = jnewblk->jn_frags;
3059	rec->jb_oldfrags = jnewblk->jn_oldfrags;
3060}
3061
3062static void
3063jfreeblk_write(jfreeblk, jseg, data)
3064	struct jfreeblk *jfreeblk;
3065	struct jseg *jseg;
3066	uint8_t *data;
3067{
3068	struct jblkrec *rec;
3069
3070	jfreeblk->jf_dep.jb_jsegdep->jd_seg = jseg;
3071	rec = (struct jblkrec *)data;
3072	rec->jb_op = JOP_FREEBLK;
3073	rec->jb_ino = jfreeblk->jf_ino;
3074	rec->jb_blkno = jfreeblk->jf_blkno;
3075	rec->jb_lbn = jfreeblk->jf_lbn;
3076	rec->jb_frags = jfreeblk->jf_frags;
3077	rec->jb_oldfrags = 0;
3078}
3079
3080static void
3081jfreefrag_write(jfreefrag, jseg, data)
3082	struct jfreefrag *jfreefrag;
3083	struct jseg *jseg;
3084	uint8_t *data;
3085{
3086	struct jblkrec *rec;
3087
3088	jfreefrag->fr_jsegdep->jd_seg = jseg;
3089	rec = (struct jblkrec *)data;
3090	rec->jb_op = JOP_FREEBLK;
3091	rec->jb_ino = jfreefrag->fr_ino;
3092	rec->jb_blkno = jfreefrag->fr_blkno;
3093	rec->jb_lbn = jfreefrag->fr_lbn;
3094	rec->jb_frags = jfreefrag->fr_frags;
3095	rec->jb_oldfrags = 0;
3096}
3097
3098static void
3099jtrunc_write(jtrunc, jseg, data)
3100	struct jtrunc *jtrunc;
3101	struct jseg *jseg;
3102	uint8_t *data;
3103{
3104	struct jtrncrec *rec;
3105
3106	jtrunc->jt_dep.jb_jsegdep->jd_seg = jseg;
3107	rec = (struct jtrncrec *)data;
3108	rec->jt_op = JOP_TRUNC;
3109	rec->jt_ino = jtrunc->jt_ino;
3110	rec->jt_size = jtrunc->jt_size;
3111	rec->jt_extsize = jtrunc->jt_extsize;
3112}
3113
3114static void
3115jfsync_write(jfsync, jseg, data)
3116	struct jfsync *jfsync;
3117	struct jseg *jseg;
3118	uint8_t *data;
3119{
3120	struct jtrncrec *rec;
3121
3122	rec = (struct jtrncrec *)data;
3123	rec->jt_op = JOP_SYNC;
3124	rec->jt_ino = jfsync->jfs_ino;
3125	rec->jt_size = jfsync->jfs_size;
3126	rec->jt_extsize = jfsync->jfs_extsize;
3127}
3128
3129static void
3130softdep_flushjournal(mp)
3131	struct mount *mp;
3132{
3133	struct jblocks *jblocks;
3134	struct ufsmount *ump;
3135
3136	if (MOUNTEDSUJ(mp) == 0)
3137		return;
3138	ump = VFSTOUFS(mp);
3139	jblocks = ump->softdep_jblocks;
3140	ACQUIRE_LOCK(&lk);
3141	while (ump->softdep_on_journal) {
3142		jblocks->jb_needseg = 1;
3143		softdep_process_journal(mp, NULL, MNT_WAIT);
3144	}
3145	FREE_LOCK(&lk);
3146}
3147
3148static void softdep_synchronize_completed(struct bio *);
3149static void softdep_synchronize(struct bio *, struct ufsmount *, void *);
3150
3151static void
3152softdep_synchronize_completed(bp)
3153        struct bio *bp;
3154{
3155	struct jseg *oldest;
3156	struct jseg *jseg;
3157
3158	/*
3159	 * caller1 marks the last segment written before we issued the
3160	 * synchronize cache.
3161	 */
3162	jseg = bp->bio_caller1;
3163	oldest = NULL;
3164	ACQUIRE_LOCK(&lk);
3165	/*
3166	 * Mark all the journal entries waiting on the synchronize cache
3167	 * as completed so they may continue on.
3168	 */
3169	while (jseg != NULL && (jseg->js_state & COMPLETE) == 0) {
3170		jseg->js_state |= COMPLETE;
3171		oldest = jseg;
3172		jseg = TAILQ_PREV(jseg, jseglst, js_next);
3173	}
3174	/*
3175	 * Restart deferred journal entry processing from the oldest
3176	 * completed jseg.
3177	 */
3178	if (oldest)
3179		complete_jsegs(oldest);
3180
3181	FREE_LOCK(&lk);
3182	g_destroy_bio(bp);
3183}
3184
3185/*
3186 * Send BIO_FLUSH/SYNCHRONIZE CACHE to the device to enforce write ordering
3187 * barriers.  The journal must be written prior to any blocks that depend
3188 * on it and the journal can not be released until the blocks have be
3189 * written.  This code handles both barriers simultaneously.
3190 */
3191static void
3192softdep_synchronize(bp, ump, caller1)
3193	struct bio *bp;
3194	struct ufsmount *ump;
3195	void *caller1;
3196{
3197
3198	bp->bio_cmd = BIO_FLUSH;
3199	bp->bio_flags |= BIO_ORDERED;
3200	bp->bio_data = NULL;
3201	bp->bio_offset = ump->um_cp->provider->mediasize;
3202	bp->bio_length = 0;
3203	bp->bio_done = softdep_synchronize_completed;
3204	bp->bio_caller1 = caller1;
3205	g_io_request(bp,
3206	    (struct g_consumer *)ump->um_devvp->v_bufobj.bo_private);
3207}
3208
3209/*
3210 * Flush some journal records to disk.
3211 */
3212static void
3213softdep_process_journal(mp, needwk, flags)
3214	struct mount *mp;
3215	struct worklist *needwk;
3216	int flags;
3217{
3218	struct jblocks *jblocks;
3219	struct ufsmount *ump;
3220	struct worklist *wk;
3221	struct jseg *jseg;
3222	struct buf *bp;
3223	struct bio *bio;
3224	uint8_t *data;
3225	struct fs *fs;
3226	int shouldflush;
3227	int segwritten;
3228	int jrecmin;	/* Minimum records per block. */
3229	int jrecmax;	/* Maximum records per block. */
3230	int size;
3231	int cnt;
3232	int off;
3233	int devbsize;
3234
3235	if (MOUNTEDSUJ(mp) == 0)
3236		return;
3237	shouldflush = softdep_flushcache;
3238	bio = NULL;
3239	jseg = NULL;
3240	ump = VFSTOUFS(mp);
3241	fs = ump->um_fs;
3242	jblocks = ump->softdep_jblocks;
3243	devbsize = ump->um_devvp->v_bufobj.bo_bsize;
3244	/*
3245	 * We write anywhere between a disk block and fs block.  The upper
3246	 * bound is picked to prevent buffer cache fragmentation and limit
3247	 * processing time per I/O.
3248	 */
3249	jrecmin = (devbsize / JREC_SIZE) - 1; /* -1 for seg header */
3250	jrecmax = (fs->fs_bsize / devbsize) * jrecmin;
3251	segwritten = 0;
3252	for (;;) {
3253		cnt = ump->softdep_on_journal;
3254		/*
3255		 * Criteria for writing a segment:
3256		 * 1) We have a full block.
3257		 * 2) We're called from jwait() and haven't found the
3258		 *    journal item yet.
3259		 * 3) Always write if needseg is set.
3260		 * 4) If we are called from process_worklist and have
3261		 *    not yet written anything we write a partial block
3262		 *    to enforce a 1 second maximum latency on journal
3263		 *    entries.
3264		 */
3265		if (cnt < (jrecmax - 1) && needwk == NULL &&
3266		    jblocks->jb_needseg == 0 && (segwritten || cnt == 0))
3267			break;
3268		cnt++;
3269		/*
3270		 * Verify some free journal space.  softdep_prealloc() should
3271	 	 * guarantee that we don't run out so this is indicative of
3272		 * a problem with the flow control.  Try to recover
3273		 * gracefully in any event.
3274		 */
3275		while (jblocks->jb_free == 0) {
3276			if (flags != MNT_WAIT)
3277				break;
3278			printf("softdep: Out of journal space!\n");
3279			softdep_speedup();
3280			msleep(jblocks, &lk, PRIBIO, "jblocks", hz);
3281		}
3282		FREE_LOCK(&lk);
3283		jseg = malloc(sizeof(*jseg), M_JSEG, M_SOFTDEP_FLAGS);
3284		workitem_alloc(&jseg->js_list, D_JSEG, mp);
3285		LIST_INIT(&jseg->js_entries);
3286		LIST_INIT(&jseg->js_indirs);
3287		jseg->js_state = ATTACHED;
3288		if (shouldflush == 0)
3289			jseg->js_state |= COMPLETE;
3290		else if (bio == NULL)
3291			bio = g_alloc_bio();
3292		jseg->js_jblocks = jblocks;
3293		bp = geteblk(fs->fs_bsize, 0);
3294		ACQUIRE_LOCK(&lk);
3295		/*
3296		 * If there was a race while we were allocating the block
3297		 * and jseg the entry we care about was likely written.
3298		 * We bail out in both the WAIT and NOWAIT case and assume
3299		 * the caller will loop if the entry it cares about is
3300		 * not written.
3301		 */
3302		cnt = ump->softdep_on_journal;
3303		if (cnt + jblocks->jb_needseg == 0 || jblocks->jb_free == 0) {
3304			bp->b_flags |= B_INVAL | B_NOCACHE;
3305			WORKITEM_FREE(jseg, D_JSEG);
3306			FREE_LOCK(&lk);
3307			brelse(bp);
3308			ACQUIRE_LOCK(&lk);
3309			break;
3310		}
3311		/*
3312		 * Calculate the disk block size required for the available
3313		 * records rounded to the min size.
3314		 */
3315		if (cnt == 0)
3316			size = devbsize;
3317		else if (cnt < jrecmax)
3318			size = howmany(cnt, jrecmin) * devbsize;
3319		else
3320			size = fs->fs_bsize;
3321		/*
3322		 * Allocate a disk block for this journal data and account
3323		 * for truncation of the requested size if enough contiguous
3324		 * space was not available.
3325		 */
3326		bp->b_blkno = jblocks_alloc(jblocks, size, &size);
3327		bp->b_lblkno = bp->b_blkno;
3328		bp->b_offset = bp->b_blkno * DEV_BSIZE;
3329		bp->b_bcount = size;
3330		bp->b_flags &= ~B_INVAL;
3331		bp->b_flags |= B_VALIDSUSPWRT | B_NOCOPY;
3332		/*
3333		 * Initialize our jseg with cnt records.  Assign the next
3334		 * sequence number to it and link it in-order.
3335		 */
3336		cnt = MIN(cnt, (size / devbsize) * jrecmin);
3337		jseg->js_buf = bp;
3338		jseg->js_cnt = cnt;
3339		jseg->js_refs = cnt + 1;	/* Self ref. */
3340		jseg->js_size = size;
3341		jseg->js_seq = jblocks->jb_nextseq++;
3342		if (jblocks->jb_oldestseg == NULL)
3343			jblocks->jb_oldestseg = jseg;
3344		jseg->js_oldseq = jblocks->jb_oldestseg->js_seq;
3345		TAILQ_INSERT_TAIL(&jblocks->jb_segs, jseg, js_next);
3346		if (jblocks->jb_writeseg == NULL)
3347			jblocks->jb_writeseg = jseg;
3348		/*
3349		 * Start filling in records from the pending list.
3350		 */
3351		data = bp->b_data;
3352		off = 0;
3353		while ((wk = LIST_FIRST(&ump->softdep_journal_pending))
3354		    != NULL) {
3355			if (cnt == 0)
3356				break;
3357			/* Place a segment header on every device block. */
3358			if ((off % devbsize) == 0) {
3359				jseg_write(ump, jseg, data);
3360				off += JREC_SIZE;
3361				data = bp->b_data + off;
3362			}
3363			if (wk == needwk)
3364				needwk = NULL;
3365			remove_from_journal(wk);
3366			wk->wk_state |= INPROGRESS;
3367			WORKLIST_INSERT(&jseg->js_entries, wk);
3368			switch (wk->wk_type) {
3369			case D_JADDREF:
3370				jaddref_write(WK_JADDREF(wk), jseg, data);
3371				break;
3372			case D_JREMREF:
3373				jremref_write(WK_JREMREF(wk), jseg, data);
3374				break;
3375			case D_JMVREF:
3376				jmvref_write(WK_JMVREF(wk), jseg, data);
3377				break;
3378			case D_JNEWBLK:
3379				jnewblk_write(WK_JNEWBLK(wk), jseg, data);
3380				break;
3381			case D_JFREEBLK:
3382				jfreeblk_write(WK_JFREEBLK(wk), jseg, data);
3383				break;
3384			case D_JFREEFRAG:
3385				jfreefrag_write(WK_JFREEFRAG(wk), jseg, data);
3386				break;
3387			case D_JTRUNC:
3388				jtrunc_write(WK_JTRUNC(wk), jseg, data);
3389				break;
3390			case D_JFSYNC:
3391				jfsync_write(WK_JFSYNC(wk), jseg, data);
3392				break;
3393			default:
3394				panic("process_journal: Unknown type %s",
3395				    TYPENAME(wk->wk_type));
3396				/* NOTREACHED */
3397			}
3398			off += JREC_SIZE;
3399			data = bp->b_data + off;
3400			cnt--;
3401		}
3402		/*
3403		 * Write this one buffer and continue.
3404		 */
3405		segwritten = 1;
3406		jblocks->jb_needseg = 0;
3407		WORKLIST_INSERT(&bp->b_dep, &jseg->js_list);
3408		FREE_LOCK(&lk);
3409		pbgetvp(ump->um_devvp, bp);
3410		/*
3411		 * We only do the blocking wait once we find the journal
3412		 * entry we're looking for.
3413		 */
3414		if (needwk == NULL && flags == MNT_WAIT)
3415			bwrite(bp);
3416		else
3417			bawrite(bp);
3418		ACQUIRE_LOCK(&lk);
3419	}
3420	/*
3421	 * If we wrote a segment issue a synchronize cache so the journal
3422	 * is reflected on disk before the data is written.  Since reclaiming
3423	 * journal space also requires writing a journal record this
3424	 * process also enforces a barrier before reclamation.
3425	 */
3426	if (segwritten && shouldflush) {
3427		softdep_synchronize(bio, ump,
3428		    TAILQ_LAST(&jblocks->jb_segs, jseglst));
3429	} else if (bio)
3430		g_destroy_bio(bio);
3431	/*
3432	 * If we've suspended the filesystem because we ran out of journal
3433	 * space either try to sync it here to make some progress or
3434	 * unsuspend it if we already have.
3435	 */
3436	if (flags == 0 && jblocks->jb_suspended) {
3437		if (journal_unsuspend(ump))
3438			return;
3439		FREE_LOCK(&lk);
3440		VFS_SYNC(mp, MNT_NOWAIT);
3441		ffs_sbupdate(ump, MNT_WAIT, 0);
3442		ACQUIRE_LOCK(&lk);
3443	}
3444}
3445
3446/*
3447 * Complete a jseg, allowing all dependencies awaiting journal writes
3448 * to proceed.  Each journal dependency also attaches a jsegdep to dependent
3449 * structures so that the journal segment can be freed to reclaim space.
3450 */
3451static void
3452complete_jseg(jseg)
3453	struct jseg *jseg;
3454{
3455	struct worklist *wk;
3456	struct jmvref *jmvref;
3457	int waiting;
3458#ifdef INVARIANTS
3459	int i = 0;
3460#endif
3461
3462	while ((wk = LIST_FIRST(&jseg->js_entries)) != NULL) {
3463		WORKLIST_REMOVE(wk);
3464		waiting = wk->wk_state & IOWAITING;
3465		wk->wk_state &= ~(INPROGRESS | IOWAITING);
3466		wk->wk_state |= COMPLETE;
3467		KASSERT(i++ < jseg->js_cnt,
3468		    ("handle_written_jseg: overflow %d >= %d",
3469		    i - 1, jseg->js_cnt));
3470		switch (wk->wk_type) {
3471		case D_JADDREF:
3472			handle_written_jaddref(WK_JADDREF(wk));
3473			break;
3474		case D_JREMREF:
3475			handle_written_jremref(WK_JREMREF(wk));
3476			break;
3477		case D_JMVREF:
3478			rele_jseg(jseg);	/* No jsegdep. */
3479			jmvref = WK_JMVREF(wk);
3480			LIST_REMOVE(jmvref, jm_deps);
3481			if ((jmvref->jm_pagedep->pd_state & ONWORKLIST) == 0)
3482				free_pagedep(jmvref->jm_pagedep);
3483			WORKITEM_FREE(jmvref, D_JMVREF);
3484			break;
3485		case D_JNEWBLK:
3486			handle_written_jnewblk(WK_JNEWBLK(wk));
3487			break;
3488		case D_JFREEBLK:
3489			handle_written_jblkdep(&WK_JFREEBLK(wk)->jf_dep);
3490			break;
3491		case D_JTRUNC:
3492			handle_written_jblkdep(&WK_JTRUNC(wk)->jt_dep);
3493			break;
3494		case D_JFSYNC:
3495			rele_jseg(jseg);	/* No jsegdep. */
3496			WORKITEM_FREE(wk, D_JFSYNC);
3497			break;
3498		case D_JFREEFRAG:
3499			handle_written_jfreefrag(WK_JFREEFRAG(wk));
3500			break;
3501		default:
3502			panic("handle_written_jseg: Unknown type %s",
3503			    TYPENAME(wk->wk_type));
3504			/* NOTREACHED */
3505		}
3506		if (waiting)
3507			wakeup(wk);
3508	}
3509	/* Release the self reference so the structure may be freed. */
3510	rele_jseg(jseg);
3511}
3512
3513/*
3514 * Determine which jsegs are ready for completion processing.  Waits for
3515 * synchronize cache to complete as well as forcing in-order completion
3516 * of journal entries.
3517 */
3518static void
3519complete_jsegs(jseg)
3520	struct jseg *jseg;
3521{
3522	struct jblocks *jblocks;
3523	struct jseg *jsegn;
3524
3525	jblocks = jseg->js_jblocks;
3526	/*
3527	 * Don't allow out of order completions.  If this isn't the first
3528	 * block wait for it to write before we're done.
3529	 */
3530	if (jseg != jblocks->jb_writeseg)
3531		return;
3532	/* Iterate through available jsegs processing their entries. */
3533	while (jseg && (jseg->js_state & ALLCOMPLETE) == ALLCOMPLETE) {
3534		jblocks->jb_oldestwrseq = jseg->js_oldseq;
3535		jsegn = TAILQ_NEXT(jseg, js_next);
3536		complete_jseg(jseg);
3537		jseg = jsegn;
3538	}
3539	jblocks->jb_writeseg = jseg;
3540	/*
3541	 * Attempt to free jsegs now that oldestwrseq may have advanced.
3542	 */
3543	free_jsegs(jblocks);
3544}
3545
3546/*
3547 * Mark a jseg as DEPCOMPLETE and throw away the buffer.  Attempt to handle
3548 * the final completions.
3549 */
3550static void
3551handle_written_jseg(jseg, bp)
3552	struct jseg *jseg;
3553	struct buf *bp;
3554{
3555
3556	if (jseg->js_refs == 0)
3557		panic("handle_written_jseg: No self-reference on %p", jseg);
3558	jseg->js_state |= DEPCOMPLETE;
3559	/*
3560	 * We'll never need this buffer again, set flags so it will be
3561	 * discarded.
3562	 */
3563	bp->b_flags |= B_INVAL | B_NOCACHE;
3564	pbrelvp(bp);
3565	complete_jsegs(jseg);
3566}
3567
3568static inline struct jsegdep *
3569inoref_jseg(inoref)
3570	struct inoref *inoref;
3571{
3572	struct jsegdep *jsegdep;
3573
3574	jsegdep = inoref->if_jsegdep;
3575	inoref->if_jsegdep = NULL;
3576
3577	return (jsegdep);
3578}
3579
3580/*
3581 * Called once a jremref has made it to stable store.  The jremref is marked
3582 * complete and we attempt to free it.  Any pagedeps writes sleeping waiting
3583 * for the jremref to complete will be awoken by free_jremref.
3584 */
3585static void
3586handle_written_jremref(jremref)
3587	struct jremref *jremref;
3588{
3589	struct inodedep *inodedep;
3590	struct jsegdep *jsegdep;
3591	struct dirrem *dirrem;
3592
3593	/* Grab the jsegdep. */
3594	jsegdep = inoref_jseg(&jremref->jr_ref);
3595	/*
3596	 * Remove us from the inoref list.
3597	 */
3598	if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino,
3599	    0, &inodedep) == 0)
3600		panic("handle_written_jremref: Lost inodedep");
3601	TAILQ_REMOVE(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps);
3602	/*
3603	 * Complete the dirrem.
3604	 */
3605	dirrem = jremref->jr_dirrem;
3606	jremref->jr_dirrem = NULL;
3607	LIST_REMOVE(jremref, jr_deps);
3608	jsegdep->jd_state |= jremref->jr_state & MKDIR_PARENT;
3609	jwork_insert(&dirrem->dm_jwork, jsegdep);
3610	if (LIST_EMPTY(&dirrem->dm_jremrefhd) &&
3611	    (dirrem->dm_state & COMPLETE) != 0)
3612		add_to_worklist(&dirrem->dm_list, 0);
3613	free_jremref(jremref);
3614}
3615
3616/*
3617 * Called once a jaddref has made it to stable store.  The dependency is
3618 * marked complete and any dependent structures are added to the inode
3619 * bufwait list to be completed as soon as it is written.  If a bitmap write
3620 * depends on this entry we move the inode into the inodedephd of the
3621 * bmsafemap dependency and attempt to remove the jaddref from the bmsafemap.
3622 */
3623static void
3624handle_written_jaddref(jaddref)
3625	struct jaddref *jaddref;
3626{
3627	struct jsegdep *jsegdep;
3628	struct inodedep *inodedep;
3629	struct diradd *diradd;
3630	struct mkdir *mkdir;
3631
3632	/* Grab the jsegdep. */
3633	jsegdep = inoref_jseg(&jaddref->ja_ref);
3634	mkdir = NULL;
3635	diradd = NULL;
3636	if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino,
3637	    0, &inodedep) == 0)
3638		panic("handle_written_jaddref: Lost inodedep.");
3639	if (jaddref->ja_diradd == NULL)
3640		panic("handle_written_jaddref: No dependency");
3641	if (jaddref->ja_diradd->da_list.wk_type == D_DIRADD) {
3642		diradd = jaddref->ja_diradd;
3643		WORKLIST_INSERT(&inodedep->id_bufwait, &diradd->da_list);
3644	} else if (jaddref->ja_state & MKDIR_PARENT) {
3645		mkdir = jaddref->ja_mkdir;
3646		WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir->md_list);
3647	} else if (jaddref->ja_state & MKDIR_BODY)
3648		mkdir = jaddref->ja_mkdir;
3649	else
3650		panic("handle_written_jaddref: Unknown dependency %p",
3651		    jaddref->ja_diradd);
3652	jaddref->ja_diradd = NULL;	/* also clears ja_mkdir */
3653	/*
3654	 * Remove us from the inode list.
3655	 */
3656	TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref, if_deps);
3657	/*
3658	 * The mkdir may be waiting on the jaddref to clear before freeing.
3659	 */
3660	if (mkdir) {
3661		KASSERT(mkdir->md_list.wk_type == D_MKDIR,
3662		    ("handle_written_jaddref: Incorrect type for mkdir %s",
3663		    TYPENAME(mkdir->md_list.wk_type)));
3664		mkdir->md_jaddref = NULL;
3665		diradd = mkdir->md_diradd;
3666		mkdir->md_state |= DEPCOMPLETE;
3667		complete_mkdir(mkdir);
3668	}
3669	jwork_insert(&diradd->da_jwork, jsegdep);
3670	if (jaddref->ja_state & NEWBLOCK) {
3671		inodedep->id_state |= ONDEPLIST;
3672		LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_inodedephd,
3673		    inodedep, id_deps);
3674	}
3675	free_jaddref(jaddref);
3676}
3677
3678/*
3679 * Called once a jnewblk journal is written.  The allocdirect or allocindir
3680 * is placed in the bmsafemap to await notification of a written bitmap.  If
3681 * the operation was canceled we add the segdep to the appropriate
3682 * dependency to free the journal space once the canceling operation
3683 * completes.
3684 */
3685static void
3686handle_written_jnewblk(jnewblk)
3687	struct jnewblk *jnewblk;
3688{
3689	struct bmsafemap *bmsafemap;
3690	struct freefrag *freefrag;
3691	struct freework *freework;
3692	struct jsegdep *jsegdep;
3693	struct newblk *newblk;
3694
3695	/* Grab the jsegdep. */
3696	jsegdep = jnewblk->jn_jsegdep;
3697	jnewblk->jn_jsegdep = NULL;
3698	if (jnewblk->jn_dep == NULL)
3699		panic("handle_written_jnewblk: No dependency for the segdep.");
3700	switch (jnewblk->jn_dep->wk_type) {
3701	case D_NEWBLK:
3702	case D_ALLOCDIRECT:
3703	case D_ALLOCINDIR:
3704		/*
3705		 * Add the written block to the bmsafemap so it can
3706		 * be notified when the bitmap is on disk.
3707		 */
3708		newblk = WK_NEWBLK(jnewblk->jn_dep);
3709		newblk->nb_jnewblk = NULL;
3710		if ((newblk->nb_state & GOINGAWAY) == 0) {
3711			bmsafemap = newblk->nb_bmsafemap;
3712			newblk->nb_state |= ONDEPLIST;
3713			LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk,
3714			    nb_deps);
3715		}
3716		jwork_insert(&newblk->nb_jwork, jsegdep);
3717		break;
3718	case D_FREEFRAG:
3719		/*
3720		 * A newblock being removed by a freefrag when replaced by
3721		 * frag extension.
3722		 */
3723		freefrag = WK_FREEFRAG(jnewblk->jn_dep);
3724		freefrag->ff_jdep = NULL;
3725		jwork_insert(&freefrag->ff_jwork, jsegdep);
3726		break;
3727	case D_FREEWORK:
3728		/*
3729		 * A direct block was removed by truncate.
3730		 */
3731		freework = WK_FREEWORK(jnewblk->jn_dep);
3732		freework->fw_jnewblk = NULL;
3733		jwork_insert(&freework->fw_freeblks->fb_jwork, jsegdep);
3734		break;
3735	default:
3736		panic("handle_written_jnewblk: Unknown type %d.",
3737		    jnewblk->jn_dep->wk_type);
3738	}
3739	jnewblk->jn_dep = NULL;
3740	free_jnewblk(jnewblk);
3741}
3742
3743/*
3744 * Cancel a jfreefrag that won't be needed, probably due to colliding with
3745 * an in-flight allocation that has not yet been committed.  Divorce us
3746 * from the freefrag and mark it DEPCOMPLETE so that it may be added
3747 * to the worklist.
3748 */
3749static void
3750cancel_jfreefrag(jfreefrag)
3751	struct jfreefrag *jfreefrag;
3752{
3753	struct freefrag *freefrag;
3754
3755	if (jfreefrag->fr_jsegdep) {
3756		free_jsegdep(jfreefrag->fr_jsegdep);
3757		jfreefrag->fr_jsegdep = NULL;
3758	}
3759	freefrag = jfreefrag->fr_freefrag;
3760	jfreefrag->fr_freefrag = NULL;
3761	free_jfreefrag(jfreefrag);
3762	freefrag->ff_state |= DEPCOMPLETE;
3763	CTR1(KTR_SUJ, "cancel_jfreefrag: blkno %jd", freefrag->ff_blkno);
3764}
3765
3766/*
3767 * Free a jfreefrag when the parent freefrag is rendered obsolete.
3768 */
3769static void
3770free_jfreefrag(jfreefrag)
3771	struct jfreefrag *jfreefrag;
3772{
3773
3774	if (jfreefrag->fr_state & INPROGRESS)
3775		WORKLIST_REMOVE(&jfreefrag->fr_list);
3776	else if (jfreefrag->fr_state & ONWORKLIST)
3777		remove_from_journal(&jfreefrag->fr_list);
3778	if (jfreefrag->fr_freefrag != NULL)
3779		panic("free_jfreefrag:  Still attached to a freefrag.");
3780	WORKITEM_FREE(jfreefrag, D_JFREEFRAG);
3781}
3782
3783/*
3784 * Called when the journal write for a jfreefrag completes.  The parent
3785 * freefrag is added to the worklist if this completes its dependencies.
3786 */
3787static void
3788handle_written_jfreefrag(jfreefrag)
3789	struct jfreefrag *jfreefrag;
3790{
3791	struct jsegdep *jsegdep;
3792	struct freefrag *freefrag;
3793
3794	/* Grab the jsegdep. */
3795	jsegdep = jfreefrag->fr_jsegdep;
3796	jfreefrag->fr_jsegdep = NULL;
3797	freefrag = jfreefrag->fr_freefrag;
3798	if (freefrag == NULL)
3799		panic("handle_written_jfreefrag: No freefrag.");
3800	freefrag->ff_state |= DEPCOMPLETE;
3801	freefrag->ff_jdep = NULL;
3802	jwork_insert(&freefrag->ff_jwork, jsegdep);
3803	if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE)
3804		add_to_worklist(&freefrag->ff_list, 0);
3805	jfreefrag->fr_freefrag = NULL;
3806	free_jfreefrag(jfreefrag);
3807}
3808
3809/*
3810 * Called when the journal write for a jfreeblk completes.  The jfreeblk
3811 * is removed from the freeblks list of pending journal writes and the
3812 * jsegdep is moved to the freeblks jwork to be completed when all blocks
3813 * have been reclaimed.
3814 */
3815static void
3816handle_written_jblkdep(jblkdep)
3817	struct jblkdep *jblkdep;
3818{
3819	struct freeblks *freeblks;
3820	struct jsegdep *jsegdep;
3821
3822	/* Grab the jsegdep. */
3823	jsegdep = jblkdep->jb_jsegdep;
3824	jblkdep->jb_jsegdep = NULL;
3825	freeblks = jblkdep->jb_freeblks;
3826	LIST_REMOVE(jblkdep, jb_deps);
3827	jwork_insert(&freeblks->fb_jwork, jsegdep);
3828	/*
3829	 * If the freeblks is all journaled, we can add it to the worklist.
3830	 */
3831	if (LIST_EMPTY(&freeblks->fb_jblkdephd) &&
3832	    (freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE)
3833		add_to_worklist(&freeblks->fb_list, WK_NODELAY);
3834
3835	free_jblkdep(jblkdep);
3836}
3837
3838static struct jsegdep *
3839newjsegdep(struct worklist *wk)
3840{
3841	struct jsegdep *jsegdep;
3842
3843	jsegdep = malloc(sizeof(*jsegdep), M_JSEGDEP, M_SOFTDEP_FLAGS);
3844	workitem_alloc(&jsegdep->jd_list, D_JSEGDEP, wk->wk_mp);
3845	jsegdep->jd_seg = NULL;
3846
3847	return (jsegdep);
3848}
3849
3850static struct jmvref *
3851newjmvref(dp, ino, oldoff, newoff)
3852	struct inode *dp;
3853	ino_t ino;
3854	off_t oldoff;
3855	off_t newoff;
3856{
3857	struct jmvref *jmvref;
3858
3859	jmvref = malloc(sizeof(*jmvref), M_JMVREF, M_SOFTDEP_FLAGS);
3860	workitem_alloc(&jmvref->jm_list, D_JMVREF, UFSTOVFS(dp->i_ump));
3861	jmvref->jm_list.wk_state = ATTACHED | DEPCOMPLETE;
3862	jmvref->jm_parent = dp->i_number;
3863	jmvref->jm_ino = ino;
3864	jmvref->jm_oldoff = oldoff;
3865	jmvref->jm_newoff = newoff;
3866
3867	return (jmvref);
3868}
3869
3870/*
3871 * Allocate a new jremref that tracks the removal of ip from dp with the
3872 * directory entry offset of diroff.  Mark the entry as ATTACHED and
3873 * DEPCOMPLETE as we have all the information required for the journal write
3874 * and the directory has already been removed from the buffer.  The caller
3875 * is responsible for linking the jremref into the pagedep and adding it
3876 * to the journal to write.  The MKDIR_PARENT flag is set if we're doing
3877 * a DOTDOT addition so handle_workitem_remove() can properly assign
3878 * the jsegdep when we're done.
3879 */
3880static struct jremref *
3881newjremref(struct dirrem *dirrem, struct inode *dp, struct inode *ip,
3882    off_t diroff, nlink_t nlink)
3883{
3884	struct jremref *jremref;
3885
3886	jremref = malloc(sizeof(*jremref), M_JREMREF, M_SOFTDEP_FLAGS);
3887	workitem_alloc(&jremref->jr_list, D_JREMREF, UFSTOVFS(dp->i_ump));
3888	jremref->jr_state = ATTACHED;
3889	newinoref(&jremref->jr_ref, ip->i_number, dp->i_number, diroff,
3890	   nlink, ip->i_mode);
3891	jremref->jr_dirrem = dirrem;
3892
3893	return (jremref);
3894}
3895
3896static inline void
3897newinoref(struct inoref *inoref, ino_t ino, ino_t parent, off_t diroff,
3898    nlink_t nlink, uint16_t mode)
3899{
3900
3901	inoref->if_jsegdep = newjsegdep(&inoref->if_list);
3902	inoref->if_diroff = diroff;
3903	inoref->if_ino = ino;
3904	inoref->if_parent = parent;
3905	inoref->if_nlink = nlink;
3906	inoref->if_mode = mode;
3907}
3908
3909/*
3910 * Allocate a new jaddref to track the addition of ino to dp at diroff.  The
3911 * directory offset may not be known until later.  The caller is responsible
3912 * adding the entry to the journal when this information is available.  nlink
3913 * should be the link count prior to the addition and mode is only required
3914 * to have the correct FMT.
3915 */
3916static struct jaddref *
3917newjaddref(struct inode *dp, ino_t ino, off_t diroff, int16_t nlink,
3918    uint16_t mode)
3919{
3920	struct jaddref *jaddref;
3921
3922	jaddref = malloc(sizeof(*jaddref), M_JADDREF, M_SOFTDEP_FLAGS);
3923	workitem_alloc(&jaddref->ja_list, D_JADDREF, UFSTOVFS(dp->i_ump));
3924	jaddref->ja_state = ATTACHED;
3925	jaddref->ja_mkdir = NULL;
3926	newinoref(&jaddref->ja_ref, ino, dp->i_number, diroff, nlink, mode);
3927
3928	return (jaddref);
3929}
3930
3931/*
3932 * Create a new free dependency for a freework.  The caller is responsible
3933 * for adjusting the reference count when it has the lock held.  The freedep
3934 * will track an outstanding bitmap write that will ultimately clear the
3935 * freework to continue.
3936 */
3937static struct freedep *
3938newfreedep(struct freework *freework)
3939{
3940	struct freedep *freedep;
3941
3942	freedep = malloc(sizeof(*freedep), M_FREEDEP, M_SOFTDEP_FLAGS);
3943	workitem_alloc(&freedep->fd_list, D_FREEDEP, freework->fw_list.wk_mp);
3944	freedep->fd_freework = freework;
3945
3946	return (freedep);
3947}
3948
3949/*
3950 * Free a freedep structure once the buffer it is linked to is written.  If
3951 * this is the last reference to the freework schedule it for completion.
3952 */
3953static void
3954free_freedep(freedep)
3955	struct freedep *freedep;
3956{
3957	struct freework *freework;
3958
3959	freework = freedep->fd_freework;
3960	freework->fw_freeblks->fb_cgwait--;
3961	if (--freework->fw_ref == 0)
3962		freework_enqueue(freework);
3963	WORKITEM_FREE(freedep, D_FREEDEP);
3964}
3965
3966/*
3967 * Allocate a new freework structure that may be a level in an indirect
3968 * when parent is not NULL or a top level block when it is.  The top level
3969 * freework structures are allocated without lk held and before the freeblks
3970 * is visible outside of softdep_setup_freeblocks().
3971 */
3972static struct freework *
3973newfreework(ump, freeblks, parent, lbn, nb, frags, off, journal)
3974	struct ufsmount *ump;
3975	struct freeblks *freeblks;
3976	struct freework *parent;
3977	ufs_lbn_t lbn;
3978	ufs2_daddr_t nb;
3979	int frags;
3980	int off;
3981	int journal;
3982{
3983	struct freework *freework;
3984
3985	freework = malloc(sizeof(*freework), M_FREEWORK, M_SOFTDEP_FLAGS);
3986	workitem_alloc(&freework->fw_list, D_FREEWORK, freeblks->fb_list.wk_mp);
3987	freework->fw_state = ATTACHED;
3988	freework->fw_jnewblk = NULL;
3989	freework->fw_freeblks = freeblks;
3990	freework->fw_parent = parent;
3991	freework->fw_lbn = lbn;
3992	freework->fw_blkno = nb;
3993	freework->fw_frags = frags;
3994	freework->fw_indir = NULL;
3995	freework->fw_ref = (MOUNTEDSUJ(UFSTOVFS(ump)) == 0 || lbn >= -NXADDR)
3996		? 0 : NINDIR(ump->um_fs) + 1;
3997	freework->fw_start = freework->fw_off = off;
3998	if (journal)
3999		newjfreeblk(freeblks, lbn, nb, frags);
4000	if (parent == NULL) {
4001		ACQUIRE_LOCK(&lk);
4002		WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list);
4003		freeblks->fb_ref++;
4004		FREE_LOCK(&lk);
4005	}
4006
4007	return (freework);
4008}
4009
4010/*
4011 * Eliminate a jfreeblk for a block that does not need journaling.
4012 */
4013static void
4014cancel_jfreeblk(freeblks, blkno)
4015	struct freeblks *freeblks;
4016	ufs2_daddr_t blkno;
4017{
4018	struct jfreeblk *jfreeblk;
4019	struct jblkdep *jblkdep;
4020
4021	LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps) {
4022		if (jblkdep->jb_list.wk_type != D_JFREEBLK)
4023			continue;
4024		jfreeblk = WK_JFREEBLK(&jblkdep->jb_list);
4025		if (jfreeblk->jf_blkno == blkno)
4026			break;
4027	}
4028	if (jblkdep == NULL)
4029		return;
4030	CTR1(KTR_SUJ, "cancel_jfreeblk: blkno %jd", blkno);
4031	free_jsegdep(jblkdep->jb_jsegdep);
4032	LIST_REMOVE(jblkdep, jb_deps);
4033	WORKITEM_FREE(jfreeblk, D_JFREEBLK);
4034}
4035
4036/*
4037 * Allocate a new jfreeblk to journal top level block pointer when truncating
4038 * a file.  The caller must add this to the worklist when lk is held.
4039 */
4040static struct jfreeblk *
4041newjfreeblk(freeblks, lbn, blkno, frags)
4042	struct freeblks *freeblks;
4043	ufs_lbn_t lbn;
4044	ufs2_daddr_t blkno;
4045	int frags;
4046{
4047	struct jfreeblk *jfreeblk;
4048
4049	jfreeblk = malloc(sizeof(*jfreeblk), M_JFREEBLK, M_SOFTDEP_FLAGS);
4050	workitem_alloc(&jfreeblk->jf_dep.jb_list, D_JFREEBLK,
4051	    freeblks->fb_list.wk_mp);
4052	jfreeblk->jf_dep.jb_jsegdep = newjsegdep(&jfreeblk->jf_dep.jb_list);
4053	jfreeblk->jf_dep.jb_freeblks = freeblks;
4054	jfreeblk->jf_ino = freeblks->fb_inum;
4055	jfreeblk->jf_lbn = lbn;
4056	jfreeblk->jf_blkno = blkno;
4057	jfreeblk->jf_frags = frags;
4058	LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jfreeblk->jf_dep, jb_deps);
4059
4060	return (jfreeblk);
4061}
4062
4063/*
4064 * Allocate a new jtrunc to track a partial truncation.
4065 */
4066static struct jtrunc *
4067newjtrunc(freeblks, size, extsize)
4068	struct freeblks *freeblks;
4069	off_t size;
4070	int extsize;
4071{
4072	struct jtrunc *jtrunc;
4073
4074	jtrunc = malloc(sizeof(*jtrunc), M_JTRUNC, M_SOFTDEP_FLAGS);
4075	workitem_alloc(&jtrunc->jt_dep.jb_list, D_JTRUNC,
4076	    freeblks->fb_list.wk_mp);
4077	jtrunc->jt_dep.jb_jsegdep = newjsegdep(&jtrunc->jt_dep.jb_list);
4078	jtrunc->jt_dep.jb_freeblks = freeblks;
4079	jtrunc->jt_ino = freeblks->fb_inum;
4080	jtrunc->jt_size = size;
4081	jtrunc->jt_extsize = extsize;
4082	LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jtrunc->jt_dep, jb_deps);
4083
4084	return (jtrunc);
4085}
4086
4087/*
4088 * If we're canceling a new bitmap we have to search for another ref
4089 * to move into the bmsafemap dep.  This might be better expressed
4090 * with another structure.
4091 */
4092static void
4093move_newblock_dep(jaddref, inodedep)
4094	struct jaddref *jaddref;
4095	struct inodedep *inodedep;
4096{
4097	struct inoref *inoref;
4098	struct jaddref *jaddrefn;
4099
4100	jaddrefn = NULL;
4101	for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref;
4102	    inoref = TAILQ_NEXT(inoref, if_deps)) {
4103		if ((jaddref->ja_state & NEWBLOCK) &&
4104		    inoref->if_list.wk_type == D_JADDREF) {
4105			jaddrefn = (struct jaddref *)inoref;
4106			break;
4107		}
4108	}
4109	if (jaddrefn == NULL)
4110		return;
4111	jaddrefn->ja_state &= ~(ATTACHED | UNDONE);
4112	jaddrefn->ja_state |= jaddref->ja_state &
4113	    (ATTACHED | UNDONE | NEWBLOCK);
4114	jaddref->ja_state &= ~(ATTACHED | UNDONE | NEWBLOCK);
4115	jaddref->ja_state |= ATTACHED;
4116	LIST_REMOVE(jaddref, ja_bmdeps);
4117	LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_jaddrefhd, jaddrefn,
4118	    ja_bmdeps);
4119}
4120
4121/*
4122 * Cancel a jaddref either before it has been written or while it is being
4123 * written.  This happens when a link is removed before the add reaches
4124 * the disk.  The jaddref dependency is kept linked into the bmsafemap
4125 * and inode to prevent the link count or bitmap from reaching the disk
4126 * until handle_workitem_remove() re-adjusts the counts and bitmaps as
4127 * required.
4128 *
4129 * Returns 1 if the canceled addref requires journaling of the remove and
4130 * 0 otherwise.
4131 */
4132static int
4133cancel_jaddref(jaddref, inodedep, wkhd)
4134	struct jaddref *jaddref;
4135	struct inodedep *inodedep;
4136	struct workhead *wkhd;
4137{
4138	struct inoref *inoref;
4139	struct jsegdep *jsegdep;
4140	int needsj;
4141
4142	KASSERT((jaddref->ja_state & COMPLETE) == 0,
4143	    ("cancel_jaddref: Canceling complete jaddref"));
4144	if (jaddref->ja_state & (INPROGRESS | COMPLETE))
4145		needsj = 1;
4146	else
4147		needsj = 0;
4148	if (inodedep == NULL)
4149		if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino,
4150		    0, &inodedep) == 0)
4151			panic("cancel_jaddref: Lost inodedep");
4152	/*
4153	 * We must adjust the nlink of any reference operation that follows
4154	 * us so that it is consistent with the in-memory reference.  This
4155	 * ensures that inode nlink rollbacks always have the correct link.
4156	 */
4157	if (needsj == 0) {
4158		for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref;
4159		    inoref = TAILQ_NEXT(inoref, if_deps)) {
4160			if (inoref->if_state & GOINGAWAY)
4161				break;
4162			inoref->if_nlink--;
4163		}
4164	}
4165	jsegdep = inoref_jseg(&jaddref->ja_ref);
4166	if (jaddref->ja_state & NEWBLOCK)
4167		move_newblock_dep(jaddref, inodedep);
4168	wake_worklist(&jaddref->ja_list);
4169	jaddref->ja_mkdir = NULL;
4170	if (jaddref->ja_state & INPROGRESS) {
4171		jaddref->ja_state &= ~INPROGRESS;
4172		WORKLIST_REMOVE(&jaddref->ja_list);
4173		jwork_insert(wkhd, jsegdep);
4174	} else {
4175		free_jsegdep(jsegdep);
4176		if (jaddref->ja_state & DEPCOMPLETE)
4177			remove_from_journal(&jaddref->ja_list);
4178	}
4179	jaddref->ja_state |= (GOINGAWAY | DEPCOMPLETE);
4180	/*
4181	 * Leave NEWBLOCK jaddrefs on the inodedep so handle_workitem_remove
4182	 * can arrange for them to be freed with the bitmap.  Otherwise we
4183	 * no longer need this addref attached to the inoreflst and it
4184	 * will incorrectly adjust nlink if we leave it.
4185	 */
4186	if ((jaddref->ja_state & NEWBLOCK) == 0) {
4187		TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref,
4188		    if_deps);
4189		jaddref->ja_state |= COMPLETE;
4190		free_jaddref(jaddref);
4191		return (needsj);
4192	}
4193	/*
4194	 * Leave the head of the list for jsegdeps for fast merging.
4195	 */
4196	if (LIST_FIRST(wkhd) != NULL) {
4197		jaddref->ja_state |= ONWORKLIST;
4198		LIST_INSERT_AFTER(LIST_FIRST(wkhd), &jaddref->ja_list, wk_list);
4199	} else
4200		WORKLIST_INSERT(wkhd, &jaddref->ja_list);
4201
4202	return (needsj);
4203}
4204
4205/*
4206 * Attempt to free a jaddref structure when some work completes.  This
4207 * should only succeed once the entry is written and all dependencies have
4208 * been notified.
4209 */
4210static void
4211free_jaddref(jaddref)
4212	struct jaddref *jaddref;
4213{
4214
4215	if ((jaddref->ja_state & ALLCOMPLETE) != ALLCOMPLETE)
4216		return;
4217	if (jaddref->ja_ref.if_jsegdep)
4218		panic("free_jaddref: segdep attached to jaddref %p(0x%X)\n",
4219		    jaddref, jaddref->ja_state);
4220	if (jaddref->ja_state & NEWBLOCK)
4221		LIST_REMOVE(jaddref, ja_bmdeps);
4222	if (jaddref->ja_state & (INPROGRESS | ONWORKLIST))
4223		panic("free_jaddref: Bad state %p(0x%X)",
4224		    jaddref, jaddref->ja_state);
4225	if (jaddref->ja_mkdir != NULL)
4226		panic("free_jaddref: Work pending, 0x%X\n", jaddref->ja_state);
4227	WORKITEM_FREE(jaddref, D_JADDREF);
4228}
4229
4230/*
4231 * Free a jremref structure once it has been written or discarded.
4232 */
4233static void
4234free_jremref(jremref)
4235	struct jremref *jremref;
4236{
4237
4238	if (jremref->jr_ref.if_jsegdep)
4239		free_jsegdep(jremref->jr_ref.if_jsegdep);
4240	if (jremref->jr_state & INPROGRESS)
4241		panic("free_jremref: IO still pending");
4242	WORKITEM_FREE(jremref, D_JREMREF);
4243}
4244
4245/*
4246 * Free a jnewblk structure.
4247 */
4248static void
4249free_jnewblk(jnewblk)
4250	struct jnewblk *jnewblk;
4251{
4252
4253	if ((jnewblk->jn_state & ALLCOMPLETE) != ALLCOMPLETE)
4254		return;
4255	LIST_REMOVE(jnewblk, jn_deps);
4256	if (jnewblk->jn_dep != NULL)
4257		panic("free_jnewblk: Dependency still attached.");
4258	WORKITEM_FREE(jnewblk, D_JNEWBLK);
4259}
4260
4261/*
4262 * Cancel a jnewblk which has been been made redundant by frag extension.
4263 */
4264static void
4265cancel_jnewblk(jnewblk, wkhd)
4266	struct jnewblk *jnewblk;
4267	struct workhead *wkhd;
4268{
4269	struct jsegdep *jsegdep;
4270
4271	CTR1(KTR_SUJ, "cancel_jnewblk: blkno %jd", jnewblk->jn_blkno);
4272	jsegdep = jnewblk->jn_jsegdep;
4273	if (jnewblk->jn_jsegdep == NULL || jnewblk->jn_dep == NULL)
4274		panic("cancel_jnewblk: Invalid state");
4275	jnewblk->jn_jsegdep  = NULL;
4276	jnewblk->jn_dep = NULL;
4277	jnewblk->jn_state |= GOINGAWAY;
4278	if (jnewblk->jn_state & INPROGRESS) {
4279		jnewblk->jn_state &= ~INPROGRESS;
4280		WORKLIST_REMOVE(&jnewblk->jn_list);
4281		jwork_insert(wkhd, jsegdep);
4282	} else {
4283		free_jsegdep(jsegdep);
4284		remove_from_journal(&jnewblk->jn_list);
4285	}
4286	wake_worklist(&jnewblk->jn_list);
4287	WORKLIST_INSERT(wkhd, &jnewblk->jn_list);
4288}
4289
4290static void
4291free_jblkdep(jblkdep)
4292	struct jblkdep *jblkdep;
4293{
4294
4295	if (jblkdep->jb_list.wk_type == D_JFREEBLK)
4296		WORKITEM_FREE(jblkdep, D_JFREEBLK);
4297	else if (jblkdep->jb_list.wk_type == D_JTRUNC)
4298		WORKITEM_FREE(jblkdep, D_JTRUNC);
4299	else
4300		panic("free_jblkdep: Unexpected type %s",
4301		    TYPENAME(jblkdep->jb_list.wk_type));
4302}
4303
4304/*
4305 * Free a single jseg once it is no longer referenced in memory or on
4306 * disk.  Reclaim journal blocks and dependencies waiting for the segment
4307 * to disappear.
4308 */
4309static void
4310free_jseg(jseg, jblocks)
4311	struct jseg *jseg;
4312	struct jblocks *jblocks;
4313{
4314	struct freework *freework;
4315
4316	/*
4317	 * Free freework structures that were lingering to indicate freed
4318	 * indirect blocks that forced journal write ordering on reallocate.
4319	 */
4320	while ((freework = LIST_FIRST(&jseg->js_indirs)) != NULL)
4321		indirblk_remove(freework);
4322	if (jblocks->jb_oldestseg == jseg)
4323		jblocks->jb_oldestseg = TAILQ_NEXT(jseg, js_next);
4324	TAILQ_REMOVE(&jblocks->jb_segs, jseg, js_next);
4325	jblocks_free(jblocks, jseg->js_list.wk_mp, jseg->js_size);
4326	KASSERT(LIST_EMPTY(&jseg->js_entries),
4327	    ("free_jseg: Freed jseg has valid entries."));
4328	WORKITEM_FREE(jseg, D_JSEG);
4329}
4330
4331/*
4332 * Free all jsegs that meet the criteria for being reclaimed and update
4333 * oldestseg.
4334 */
4335static void
4336free_jsegs(jblocks)
4337	struct jblocks *jblocks;
4338{
4339	struct jseg *jseg;
4340
4341	/*
4342	 * Free only those jsegs which have none allocated before them to
4343	 * preserve the journal space ordering.
4344	 */
4345	while ((jseg = TAILQ_FIRST(&jblocks->jb_segs)) != NULL) {
4346		/*
4347		 * Only reclaim space when nothing depends on this journal
4348		 * set and another set has written that it is no longer
4349		 * valid.
4350		 */
4351		if (jseg->js_refs != 0) {
4352			jblocks->jb_oldestseg = jseg;
4353			return;
4354		}
4355		if ((jseg->js_state & ALLCOMPLETE) != ALLCOMPLETE)
4356			break;
4357		if (jseg->js_seq > jblocks->jb_oldestwrseq)
4358			break;
4359		/*
4360		 * We can free jsegs that didn't write entries when
4361		 * oldestwrseq == js_seq.
4362		 */
4363		if (jseg->js_seq == jblocks->jb_oldestwrseq &&
4364		    jseg->js_cnt != 0)
4365			break;
4366		free_jseg(jseg, jblocks);
4367	}
4368	/*
4369	 * If we exited the loop above we still must discover the
4370	 * oldest valid segment.
4371	 */
4372	if (jseg)
4373		for (jseg = jblocks->jb_oldestseg; jseg != NULL;
4374		     jseg = TAILQ_NEXT(jseg, js_next))
4375			if (jseg->js_refs != 0)
4376				break;
4377	jblocks->jb_oldestseg = jseg;
4378	/*
4379	 * The journal has no valid records but some jsegs may still be
4380	 * waiting on oldestwrseq to advance.  We force a small record
4381	 * out to permit these lingering records to be reclaimed.
4382	 */
4383	if (jblocks->jb_oldestseg == NULL && !TAILQ_EMPTY(&jblocks->jb_segs))
4384		jblocks->jb_needseg = 1;
4385}
4386
4387/*
4388 * Release one reference to a jseg and free it if the count reaches 0.  This
4389 * should eventually reclaim journal space as well.
4390 */
4391static void
4392rele_jseg(jseg)
4393	struct jseg *jseg;
4394{
4395
4396	KASSERT(jseg->js_refs > 0,
4397	    ("free_jseg: Invalid refcnt %d", jseg->js_refs));
4398	if (--jseg->js_refs != 0)
4399		return;
4400	free_jsegs(jseg->js_jblocks);
4401}
4402
4403/*
4404 * Release a jsegdep and decrement the jseg count.
4405 */
4406static void
4407free_jsegdep(jsegdep)
4408	struct jsegdep *jsegdep;
4409{
4410
4411	if (jsegdep->jd_seg)
4412		rele_jseg(jsegdep->jd_seg);
4413	WORKITEM_FREE(jsegdep, D_JSEGDEP);
4414}
4415
4416/*
4417 * Wait for a journal item to make it to disk.  Initiate journal processing
4418 * if required.
4419 */
4420static int
4421jwait(wk, waitfor)
4422	struct worklist *wk;
4423	int waitfor;
4424{
4425
4426	/*
4427	 * Blocking journal waits cause slow synchronous behavior.  Record
4428	 * stats on the frequency of these blocking operations.
4429	 */
4430	if (waitfor == MNT_WAIT) {
4431		stat_journal_wait++;
4432		switch (wk->wk_type) {
4433		case D_JREMREF:
4434		case D_JMVREF:
4435			stat_jwait_filepage++;
4436			break;
4437		case D_JTRUNC:
4438		case D_JFREEBLK:
4439			stat_jwait_freeblks++;
4440			break;
4441		case D_JNEWBLK:
4442			stat_jwait_newblk++;
4443			break;
4444		case D_JADDREF:
4445			stat_jwait_inode++;
4446			break;
4447		default:
4448			break;
4449		}
4450	}
4451	/*
4452	 * If IO has not started we process the journal.  We can't mark the
4453	 * worklist item as IOWAITING because we drop the lock while
4454	 * processing the journal and the worklist entry may be freed after
4455	 * this point.  The caller may call back in and re-issue the request.
4456	 */
4457	if ((wk->wk_state & INPROGRESS) == 0) {
4458		softdep_process_journal(wk->wk_mp, wk, waitfor);
4459		if (waitfor != MNT_WAIT)
4460			return (EBUSY);
4461		return (0);
4462	}
4463	if (waitfor != MNT_WAIT)
4464		return (EBUSY);
4465	wait_worklist(wk, "jwait");
4466	return (0);
4467}
4468
4469/*
4470 * Lookup an inodedep based on an inode pointer and set the nlinkdelta as
4471 * appropriate.  This is a convenience function to reduce duplicate code
4472 * for the setup and revert functions below.
4473 */
4474static struct inodedep *
4475inodedep_lookup_ip(ip)
4476	struct inode *ip;
4477{
4478	struct inodedep *inodedep;
4479	int dflags;
4480
4481	KASSERT(ip->i_nlink >= ip->i_effnlink,
4482	    ("inodedep_lookup_ip: bad delta"));
4483	dflags = DEPALLOC;
4484	if (IS_SNAPSHOT(ip))
4485		dflags |= NODELAY;
4486	(void) inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, dflags,
4487	    &inodedep);
4488	inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
4489	KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked"));
4490
4491	return (inodedep);
4492}
4493
4494/*
4495 * Called prior to creating a new inode and linking it to a directory.  The
4496 * jaddref structure must already be allocated by softdep_setup_inomapdep
4497 * and it is discovered here so we can initialize the mode and update
4498 * nlinkdelta.
4499 */
4500void
4501softdep_setup_create(dp, ip)
4502	struct inode *dp;
4503	struct inode *ip;
4504{
4505	struct inodedep *inodedep;
4506	struct jaddref *jaddref;
4507	struct vnode *dvp;
4508
4509	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4510	    ("softdep_setup_create called on non-softdep filesystem"));
4511	KASSERT(ip->i_nlink == 1,
4512	    ("softdep_setup_create: Invalid link count."));
4513	dvp = ITOV(dp);
4514	ACQUIRE_LOCK(&lk);
4515	inodedep = inodedep_lookup_ip(ip);
4516	if (DOINGSUJ(dvp)) {
4517		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4518		    inoreflst);
4519		KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number,
4520		    ("softdep_setup_create: No addref structure present."));
4521	}
4522	softdep_prelink(dvp, NULL);
4523	FREE_LOCK(&lk);
4524}
4525
4526/*
4527 * Create a jaddref structure to track the addition of a DOTDOT link when
4528 * we are reparenting an inode as part of a rename.  This jaddref will be
4529 * found by softdep_setup_directory_change.  Adjusts nlinkdelta for
4530 * non-journaling softdep.
4531 */
4532void
4533softdep_setup_dotdot_link(dp, ip)
4534	struct inode *dp;
4535	struct inode *ip;
4536{
4537	struct inodedep *inodedep;
4538	struct jaddref *jaddref;
4539	struct vnode *dvp;
4540	struct vnode *vp;
4541
4542	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4543	    ("softdep_setup_dotdot_link called on non-softdep filesystem"));
4544	dvp = ITOV(dp);
4545	vp = ITOV(ip);
4546	jaddref = NULL;
4547	/*
4548	 * We don't set MKDIR_PARENT as this is not tied to a mkdir and
4549	 * is used as a normal link would be.
4550	 */
4551	if (DOINGSUJ(dvp))
4552		jaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET,
4553		    dp->i_effnlink - 1, dp->i_mode);
4554	ACQUIRE_LOCK(&lk);
4555	inodedep = inodedep_lookup_ip(dp);
4556	if (jaddref)
4557		TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref,
4558		    if_deps);
4559	softdep_prelink(dvp, ITOV(ip));
4560	FREE_LOCK(&lk);
4561}
4562
4563/*
4564 * Create a jaddref structure to track a new link to an inode.  The directory
4565 * offset is not known until softdep_setup_directory_add or
4566 * softdep_setup_directory_change.  Adjusts nlinkdelta for non-journaling
4567 * softdep.
4568 */
4569void
4570softdep_setup_link(dp, ip)
4571	struct inode *dp;
4572	struct inode *ip;
4573{
4574	struct inodedep *inodedep;
4575	struct jaddref *jaddref;
4576	struct vnode *dvp;
4577
4578	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4579	    ("softdep_setup_link called on non-softdep filesystem"));
4580	dvp = ITOV(dp);
4581	jaddref = NULL;
4582	if (DOINGSUJ(dvp))
4583		jaddref = newjaddref(dp, ip->i_number, 0, ip->i_effnlink - 1,
4584		    ip->i_mode);
4585	ACQUIRE_LOCK(&lk);
4586	inodedep = inodedep_lookup_ip(ip);
4587	if (jaddref)
4588		TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref,
4589		    if_deps);
4590	softdep_prelink(dvp, ITOV(ip));
4591	FREE_LOCK(&lk);
4592}
4593
4594/*
4595 * Called to create the jaddref structures to track . and .. references as
4596 * well as lookup and further initialize the incomplete jaddref created
4597 * by softdep_setup_inomapdep when the inode was allocated.  Adjusts
4598 * nlinkdelta for non-journaling softdep.
4599 */
4600void
4601softdep_setup_mkdir(dp, ip)
4602	struct inode *dp;
4603	struct inode *ip;
4604{
4605	struct inodedep *inodedep;
4606	struct jaddref *dotdotaddref;
4607	struct jaddref *dotaddref;
4608	struct jaddref *jaddref;
4609	struct vnode *dvp;
4610
4611	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4612	    ("softdep_setup_mkdir called on non-softdep filesystem"));
4613	dvp = ITOV(dp);
4614	dotaddref = dotdotaddref = NULL;
4615	if (DOINGSUJ(dvp)) {
4616		dotaddref = newjaddref(ip, ip->i_number, DOT_OFFSET, 1,
4617		    ip->i_mode);
4618		dotaddref->ja_state |= MKDIR_BODY;
4619		dotdotaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET,
4620		    dp->i_effnlink - 1, dp->i_mode);
4621		dotdotaddref->ja_state |= MKDIR_PARENT;
4622	}
4623	ACQUIRE_LOCK(&lk);
4624	inodedep = inodedep_lookup_ip(ip);
4625	if (DOINGSUJ(dvp)) {
4626		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4627		    inoreflst);
4628		KASSERT(jaddref != NULL,
4629		    ("softdep_setup_mkdir: No addref structure present."));
4630		KASSERT(jaddref->ja_parent == dp->i_number,
4631		    ("softdep_setup_mkdir: bad parent %ju",
4632		    (uintmax_t)jaddref->ja_parent));
4633		TAILQ_INSERT_BEFORE(&jaddref->ja_ref, &dotaddref->ja_ref,
4634		    if_deps);
4635	}
4636	inodedep = inodedep_lookup_ip(dp);
4637	if (DOINGSUJ(dvp))
4638		TAILQ_INSERT_TAIL(&inodedep->id_inoreflst,
4639		    &dotdotaddref->ja_ref, if_deps);
4640	softdep_prelink(ITOV(dp), NULL);
4641	FREE_LOCK(&lk);
4642}
4643
4644/*
4645 * Called to track nlinkdelta of the inode and parent directories prior to
4646 * unlinking a directory.
4647 */
4648void
4649softdep_setup_rmdir(dp, ip)
4650	struct inode *dp;
4651	struct inode *ip;
4652{
4653	struct vnode *dvp;
4654
4655	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4656	    ("softdep_setup_rmdir called on non-softdep filesystem"));
4657	dvp = ITOV(dp);
4658	ACQUIRE_LOCK(&lk);
4659	(void) inodedep_lookup_ip(ip);
4660	(void) inodedep_lookup_ip(dp);
4661	softdep_prelink(dvp, ITOV(ip));
4662	FREE_LOCK(&lk);
4663}
4664
4665/*
4666 * Called to track nlinkdelta of the inode and parent directories prior to
4667 * unlink.
4668 */
4669void
4670softdep_setup_unlink(dp, ip)
4671	struct inode *dp;
4672	struct inode *ip;
4673{
4674	struct vnode *dvp;
4675
4676	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4677	    ("softdep_setup_unlink called on non-softdep filesystem"));
4678	dvp = ITOV(dp);
4679	ACQUIRE_LOCK(&lk);
4680	(void) inodedep_lookup_ip(ip);
4681	(void) inodedep_lookup_ip(dp);
4682	softdep_prelink(dvp, ITOV(ip));
4683	FREE_LOCK(&lk);
4684}
4685
4686/*
4687 * Called to release the journal structures created by a failed non-directory
4688 * creation.  Adjusts nlinkdelta for non-journaling softdep.
4689 */
4690void
4691softdep_revert_create(dp, ip)
4692	struct inode *dp;
4693	struct inode *ip;
4694{
4695	struct inodedep *inodedep;
4696	struct jaddref *jaddref;
4697	struct vnode *dvp;
4698
4699	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4700	    ("softdep_revert_create called on non-softdep filesystem"));
4701	dvp = ITOV(dp);
4702	ACQUIRE_LOCK(&lk);
4703	inodedep = inodedep_lookup_ip(ip);
4704	if (DOINGSUJ(dvp)) {
4705		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4706		    inoreflst);
4707		KASSERT(jaddref->ja_parent == dp->i_number,
4708		    ("softdep_revert_create: addref parent mismatch"));
4709		cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
4710	}
4711	FREE_LOCK(&lk);
4712}
4713
4714/*
4715 * Called to release the journal structures created by a failed link
4716 * addition.  Adjusts nlinkdelta for non-journaling softdep.
4717 */
4718void
4719softdep_revert_link(dp, ip)
4720	struct inode *dp;
4721	struct inode *ip;
4722{
4723	struct inodedep *inodedep;
4724	struct jaddref *jaddref;
4725	struct vnode *dvp;
4726
4727	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4728	    ("softdep_revert_link called on non-softdep filesystem"));
4729	dvp = ITOV(dp);
4730	ACQUIRE_LOCK(&lk);
4731	inodedep = inodedep_lookup_ip(ip);
4732	if (DOINGSUJ(dvp)) {
4733		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4734		    inoreflst);
4735		KASSERT(jaddref->ja_parent == dp->i_number,
4736		    ("softdep_revert_link: addref parent mismatch"));
4737		cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
4738	}
4739	FREE_LOCK(&lk);
4740}
4741
4742/*
4743 * Called to release the journal structures created by a failed mkdir
4744 * attempt.  Adjusts nlinkdelta for non-journaling softdep.
4745 */
4746void
4747softdep_revert_mkdir(dp, ip)
4748	struct inode *dp;
4749	struct inode *ip;
4750{
4751	struct inodedep *inodedep;
4752	struct jaddref *jaddref;
4753	struct jaddref *dotaddref;
4754	struct vnode *dvp;
4755
4756	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4757	    ("softdep_revert_mkdir called on non-softdep filesystem"));
4758	dvp = ITOV(dp);
4759
4760	ACQUIRE_LOCK(&lk);
4761	inodedep = inodedep_lookup_ip(dp);
4762	if (DOINGSUJ(dvp)) {
4763		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4764		    inoreflst);
4765		KASSERT(jaddref->ja_parent == ip->i_number,
4766		    ("softdep_revert_mkdir: dotdot addref parent mismatch"));
4767		cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
4768	}
4769	inodedep = inodedep_lookup_ip(ip);
4770	if (DOINGSUJ(dvp)) {
4771		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4772		    inoreflst);
4773		KASSERT(jaddref->ja_parent == dp->i_number,
4774		    ("softdep_revert_mkdir: addref parent mismatch"));
4775		dotaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref,
4776		    inoreflst, if_deps);
4777		cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
4778		KASSERT(dotaddref->ja_parent == ip->i_number,
4779		    ("softdep_revert_mkdir: dot addref parent mismatch"));
4780		cancel_jaddref(dotaddref, inodedep, &inodedep->id_inowait);
4781	}
4782	FREE_LOCK(&lk);
4783}
4784
4785/*
4786 * Called to correct nlinkdelta after a failed rmdir.
4787 */
4788void
4789softdep_revert_rmdir(dp, ip)
4790	struct inode *dp;
4791	struct inode *ip;
4792{
4793
4794	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(dp->i_ump)) != 0,
4795	    ("softdep_revert_rmdir called on non-softdep filesystem"));
4796	ACQUIRE_LOCK(&lk);
4797	(void) inodedep_lookup_ip(ip);
4798	(void) inodedep_lookup_ip(dp);
4799	FREE_LOCK(&lk);
4800}
4801
4802/*
4803 * Protecting the freemaps (or bitmaps).
4804 *
4805 * To eliminate the need to execute fsck before mounting a filesystem
4806 * after a power failure, one must (conservatively) guarantee that the
4807 * on-disk copy of the bitmaps never indicate that a live inode or block is
4808 * free.  So, when a block or inode is allocated, the bitmap should be
4809 * updated (on disk) before any new pointers.  When a block or inode is
4810 * freed, the bitmap should not be updated until all pointers have been
4811 * reset.  The latter dependency is handled by the delayed de-allocation
4812 * approach described below for block and inode de-allocation.  The former
4813 * dependency is handled by calling the following procedure when a block or
4814 * inode is allocated. When an inode is allocated an "inodedep" is created
4815 * with its DEPCOMPLETE flag cleared until its bitmap is written to disk.
4816 * Each "inodedep" is also inserted into the hash indexing structure so
4817 * that any additional link additions can be made dependent on the inode
4818 * allocation.
4819 *
4820 * The ufs filesystem maintains a number of free block counts (e.g., per
4821 * cylinder group, per cylinder and per <cylinder, rotational position> pair)
4822 * in addition to the bitmaps.  These counts are used to improve efficiency
4823 * during allocation and therefore must be consistent with the bitmaps.
4824 * There is no convenient way to guarantee post-crash consistency of these
4825 * counts with simple update ordering, for two main reasons: (1) The counts
4826 * and bitmaps for a single cylinder group block are not in the same disk
4827 * sector.  If a disk write is interrupted (e.g., by power failure), one may
4828 * be written and the other not.  (2) Some of the counts are located in the
4829 * superblock rather than the cylinder group block. So, we focus our soft
4830 * updates implementation on protecting the bitmaps. When mounting a
4831 * filesystem, we recompute the auxiliary counts from the bitmaps.
4832 */
4833
4834/*
4835 * Called just after updating the cylinder group block to allocate an inode.
4836 */
4837void
4838softdep_setup_inomapdep(bp, ip, newinum, mode)
4839	struct buf *bp;		/* buffer for cylgroup block with inode map */
4840	struct inode *ip;	/* inode related to allocation */
4841	ino_t newinum;		/* new inode number being allocated */
4842	int mode;
4843{
4844	struct inodedep *inodedep;
4845	struct bmsafemap *bmsafemap;
4846	struct jaddref *jaddref;
4847	struct mount *mp;
4848	struct fs *fs;
4849
4850	mp = UFSTOVFS(ip->i_ump);
4851	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
4852	    ("softdep_setup_inomapdep called on non-softdep filesystem"));
4853	fs = ip->i_ump->um_fs;
4854	jaddref = NULL;
4855
4856	/*
4857	 * Allocate the journal reference add structure so that the bitmap
4858	 * can be dependent on it.
4859	 */
4860	if (MOUNTEDSUJ(mp)) {
4861		jaddref = newjaddref(ip, newinum, 0, 0, mode);
4862		jaddref->ja_state |= NEWBLOCK;
4863	}
4864
4865	/*
4866	 * Create a dependency for the newly allocated inode.
4867	 * Panic if it already exists as something is seriously wrong.
4868	 * Otherwise add it to the dependency list for the buffer holding
4869	 * the cylinder group map from which it was allocated.
4870	 *
4871	 * We have to preallocate a bmsafemap entry in case it is needed
4872	 * in bmsafemap_lookup since once we allocate the inodedep, we
4873	 * have to finish initializing it before we can FREE_LOCK().
4874	 * By preallocating, we avoid FREE_LOCK() while doing a malloc
4875	 * in bmsafemap_lookup. We cannot call bmsafemap_lookup before
4876	 * creating the inodedep as it can be freed during the time
4877	 * that we FREE_LOCK() while allocating the inodedep. We must
4878	 * call workitem_alloc() before entering the locked section as
4879	 * it also acquires the lock and we must avoid trying doing so
4880	 * recursively.
4881	 */
4882	bmsafemap = malloc(sizeof(struct bmsafemap),
4883	    M_BMSAFEMAP, M_SOFTDEP_FLAGS);
4884	workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp);
4885	ACQUIRE_LOCK(&lk);
4886	if ((inodedep_lookup(mp, newinum, DEPALLOC | NODELAY, &inodedep)))
4887		panic("softdep_setup_inomapdep: dependency %p for new"
4888		    "inode already exists", inodedep);
4889	bmsafemap = bmsafemap_lookup(mp, bp, ino_to_cg(fs, newinum), bmsafemap);
4890	if (jaddref) {
4891		LIST_INSERT_HEAD(&bmsafemap->sm_jaddrefhd, jaddref, ja_bmdeps);
4892		TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref,
4893		    if_deps);
4894	} else {
4895		inodedep->id_state |= ONDEPLIST;
4896		LIST_INSERT_HEAD(&bmsafemap->sm_inodedephd, inodedep, id_deps);
4897	}
4898	inodedep->id_bmsafemap = bmsafemap;
4899	inodedep->id_state &= ~DEPCOMPLETE;
4900	FREE_LOCK(&lk);
4901}
4902
4903/*
4904 * Called just after updating the cylinder group block to
4905 * allocate block or fragment.
4906 */
4907void
4908softdep_setup_blkmapdep(bp, mp, newblkno, frags, oldfrags)
4909	struct buf *bp;		/* buffer for cylgroup block with block map */
4910	struct mount *mp;	/* filesystem doing allocation */
4911	ufs2_daddr_t newblkno;	/* number of newly allocated block */
4912	int frags;		/* Number of fragments. */
4913	int oldfrags;		/* Previous number of fragments for extend. */
4914{
4915	struct newblk *newblk;
4916	struct bmsafemap *bmsafemap;
4917	struct jnewblk *jnewblk;
4918	struct fs *fs;
4919
4920	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
4921	    ("softdep_setup_blkmapdep called on non-softdep filesystem"));
4922	fs = VFSTOUFS(mp)->um_fs;
4923	jnewblk = NULL;
4924	/*
4925	 * Create a dependency for the newly allocated block.
4926	 * Add it to the dependency list for the buffer holding
4927	 * the cylinder group map from which it was allocated.
4928	 */
4929	if (MOUNTEDSUJ(mp)) {
4930		jnewblk = malloc(sizeof(*jnewblk), M_JNEWBLK, M_SOFTDEP_FLAGS);
4931		workitem_alloc(&jnewblk->jn_list, D_JNEWBLK, mp);
4932		jnewblk->jn_jsegdep = newjsegdep(&jnewblk->jn_list);
4933		jnewblk->jn_state = ATTACHED;
4934		jnewblk->jn_blkno = newblkno;
4935		jnewblk->jn_frags = frags;
4936		jnewblk->jn_oldfrags = oldfrags;
4937#ifdef SUJ_DEBUG
4938		{
4939			struct cg *cgp;
4940			uint8_t *blksfree;
4941			long bno;
4942			int i;
4943
4944			cgp = (struct cg *)bp->b_data;
4945			blksfree = cg_blksfree(cgp);
4946			bno = dtogd(fs, jnewblk->jn_blkno);
4947			for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags;
4948			    i++) {
4949				if (isset(blksfree, bno + i))
4950					panic("softdep_setup_blkmapdep: "
4951					    "free fragment %d from %d-%d "
4952					    "state 0x%X dep %p", i,
4953					    jnewblk->jn_oldfrags,
4954					    jnewblk->jn_frags,
4955					    jnewblk->jn_state,
4956					    jnewblk->jn_dep);
4957			}
4958		}
4959#endif
4960	}
4961
4962	CTR3(KTR_SUJ,
4963	    "softdep_setup_blkmapdep: blkno %jd frags %d oldfrags %d",
4964	    newblkno, frags, oldfrags);
4965	ACQUIRE_LOCK(&lk);
4966	if (newblk_lookup(mp, newblkno, DEPALLOC, &newblk) != 0)
4967		panic("softdep_setup_blkmapdep: found block");
4968	newblk->nb_bmsafemap = bmsafemap = bmsafemap_lookup(mp, bp,
4969	    dtog(fs, newblkno), NULL);
4970	if (jnewblk) {
4971		jnewblk->jn_dep = (struct worklist *)newblk;
4972		LIST_INSERT_HEAD(&bmsafemap->sm_jnewblkhd, jnewblk, jn_deps);
4973	} else {
4974		newblk->nb_state |= ONDEPLIST;
4975		LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps);
4976	}
4977	newblk->nb_bmsafemap = bmsafemap;
4978	newblk->nb_jnewblk = jnewblk;
4979	FREE_LOCK(&lk);
4980}
4981
4982#define	BMSAFEMAP_HASH(fs, cg) \
4983      (&bmsafemap_hashtbl[((((register_t)(fs)) >> 13) + (cg)) & bmsafemap_hash])
4984
4985static int
4986bmsafemap_find(bmsafemaphd, mp, cg, bmsafemapp)
4987	struct bmsafemap_hashhead *bmsafemaphd;
4988	struct mount *mp;
4989	int cg;
4990	struct bmsafemap **bmsafemapp;
4991{
4992	struct bmsafemap *bmsafemap;
4993
4994	LIST_FOREACH(bmsafemap, bmsafemaphd, sm_hash)
4995		if (bmsafemap->sm_list.wk_mp == mp && bmsafemap->sm_cg == cg)
4996			break;
4997	if (bmsafemap) {
4998		*bmsafemapp = bmsafemap;
4999		return (1);
5000	}
5001	*bmsafemapp = NULL;
5002
5003	return (0);
5004}
5005
5006/*
5007 * Find the bmsafemap associated with a cylinder group buffer.
5008 * If none exists, create one. The buffer must be locked when
5009 * this routine is called and this routine must be called with
5010 * the softdep lock held. To avoid giving up the lock while
5011 * allocating a new bmsafemap, a preallocated bmsafemap may be
5012 * provided. If it is provided but not needed, it is freed.
5013 */
5014static struct bmsafemap *
5015bmsafemap_lookup(mp, bp, cg, newbmsafemap)
5016	struct mount *mp;
5017	struct buf *bp;
5018	int cg;
5019	struct bmsafemap *newbmsafemap;
5020{
5021	struct bmsafemap_hashhead *bmsafemaphd;
5022	struct bmsafemap *bmsafemap, *collision;
5023	struct worklist *wk;
5024	struct fs *fs;
5025
5026	rw_assert(&lk, RA_WLOCKED);
5027	KASSERT(bp != NULL, ("bmsafemap_lookup: missing buffer"));
5028	LIST_FOREACH(wk, &bp->b_dep, wk_list) {
5029		if (wk->wk_type == D_BMSAFEMAP) {
5030			if (newbmsafemap)
5031				WORKITEM_FREE(newbmsafemap, D_BMSAFEMAP);
5032			return (WK_BMSAFEMAP(wk));
5033		}
5034	}
5035	fs = VFSTOUFS(mp)->um_fs;
5036	bmsafemaphd = BMSAFEMAP_HASH(fs, cg);
5037	if (bmsafemap_find(bmsafemaphd, mp, cg, &bmsafemap) == 1) {
5038		if (newbmsafemap)
5039			WORKITEM_FREE(newbmsafemap, D_BMSAFEMAP);
5040		return (bmsafemap);
5041	}
5042	if (newbmsafemap) {
5043		bmsafemap = newbmsafemap;
5044	} else {
5045		FREE_LOCK(&lk);
5046		bmsafemap = malloc(sizeof(struct bmsafemap),
5047			M_BMSAFEMAP, M_SOFTDEP_FLAGS);
5048		workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp);
5049		ACQUIRE_LOCK(&lk);
5050	}
5051	bmsafemap->sm_buf = bp;
5052	LIST_INIT(&bmsafemap->sm_inodedephd);
5053	LIST_INIT(&bmsafemap->sm_inodedepwr);
5054	LIST_INIT(&bmsafemap->sm_newblkhd);
5055	LIST_INIT(&bmsafemap->sm_newblkwr);
5056	LIST_INIT(&bmsafemap->sm_jaddrefhd);
5057	LIST_INIT(&bmsafemap->sm_jnewblkhd);
5058	LIST_INIT(&bmsafemap->sm_freehd);
5059	LIST_INIT(&bmsafemap->sm_freewr);
5060	if (bmsafemap_find(bmsafemaphd, mp, cg, &collision) == 1) {
5061		WORKITEM_FREE(bmsafemap, D_BMSAFEMAP);
5062		return (collision);
5063	}
5064	bmsafemap->sm_cg = cg;
5065	LIST_INSERT_HEAD(bmsafemaphd, bmsafemap, sm_hash);
5066	LIST_INSERT_HEAD(&VFSTOUFS(mp)->softdep_dirtycg, bmsafemap, sm_next);
5067	WORKLIST_INSERT(&bp->b_dep, &bmsafemap->sm_list);
5068	return (bmsafemap);
5069}
5070
5071/*
5072 * Direct block allocation dependencies.
5073 *
5074 * When a new block is allocated, the corresponding disk locations must be
5075 * initialized (with zeros or new data) before the on-disk inode points to
5076 * them.  Also, the freemap from which the block was allocated must be
5077 * updated (on disk) before the inode's pointer. These two dependencies are
5078 * independent of each other and are needed for all file blocks and indirect
5079 * blocks that are pointed to directly by the inode.  Just before the
5080 * "in-core" version of the inode is updated with a newly allocated block
5081 * number, a procedure (below) is called to setup allocation dependency
5082 * structures.  These structures are removed when the corresponding
5083 * dependencies are satisfied or when the block allocation becomes obsolete
5084 * (i.e., the file is deleted, the block is de-allocated, or the block is a
5085 * fragment that gets upgraded).  All of these cases are handled in
5086 * procedures described later.
5087 *
5088 * When a file extension causes a fragment to be upgraded, either to a larger
5089 * fragment or to a full block, the on-disk location may change (if the
5090 * previous fragment could not simply be extended). In this case, the old
5091 * fragment must be de-allocated, but not until after the inode's pointer has
5092 * been updated. In most cases, this is handled by later procedures, which
5093 * will construct a "freefrag" structure to be added to the workitem queue
5094 * when the inode update is complete (or obsolete).  The main exception to
5095 * this is when an allocation occurs while a pending allocation dependency
5096 * (for the same block pointer) remains.  This case is handled in the main
5097 * allocation dependency setup procedure by immediately freeing the
5098 * unreferenced fragments.
5099 */
5100void
5101softdep_setup_allocdirect(ip, off, newblkno, oldblkno, newsize, oldsize, bp)
5102	struct inode *ip;	/* inode to which block is being added */
5103	ufs_lbn_t off;		/* block pointer within inode */
5104	ufs2_daddr_t newblkno;	/* disk block number being added */
5105	ufs2_daddr_t oldblkno;	/* previous block number, 0 unless frag */
5106	long newsize;		/* size of new block */
5107	long oldsize;		/* size of new block */
5108	struct buf *bp;		/* bp for allocated block */
5109{
5110	struct allocdirect *adp, *oldadp;
5111	struct allocdirectlst *adphead;
5112	struct freefrag *freefrag;
5113	struct inodedep *inodedep;
5114	struct pagedep *pagedep;
5115	struct jnewblk *jnewblk;
5116	struct newblk *newblk;
5117	struct mount *mp;
5118	ufs_lbn_t lbn;
5119
5120	lbn = bp->b_lblkno;
5121	mp = UFSTOVFS(ip->i_ump);
5122	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
5123	    ("softdep_setup_allocdirect called on non-softdep filesystem"));
5124	if (oldblkno && oldblkno != newblkno)
5125		freefrag = newfreefrag(ip, oldblkno, oldsize, lbn);
5126	else
5127		freefrag = NULL;
5128
5129	CTR6(KTR_SUJ,
5130	    "softdep_setup_allocdirect: ino %d blkno %jd oldblkno %jd "
5131	    "off %jd newsize %ld oldsize %d",
5132	    ip->i_number, newblkno, oldblkno, off, newsize, oldsize);
5133	ACQUIRE_LOCK(&lk);
5134	if (off >= NDADDR) {
5135		if (lbn > 0)
5136			panic("softdep_setup_allocdirect: bad lbn %jd, off %jd",
5137			    lbn, off);
5138		/* allocating an indirect block */
5139		if (oldblkno != 0)
5140			panic("softdep_setup_allocdirect: non-zero indir");
5141	} else {
5142		if (off != lbn)
5143			panic("softdep_setup_allocdirect: lbn %jd != off %jd",
5144			    lbn, off);
5145		/*
5146		 * Allocating a direct block.
5147		 *
5148		 * If we are allocating a directory block, then we must
5149		 * allocate an associated pagedep to track additions and
5150		 * deletions.
5151		 */
5152		if ((ip->i_mode & IFMT) == IFDIR)
5153			pagedep_lookup(mp, bp, ip->i_number, off, DEPALLOC,
5154			    &pagedep);
5155	}
5156	if (newblk_lookup(mp, newblkno, 0, &newblk) == 0)
5157		panic("softdep_setup_allocdirect: lost block");
5158	KASSERT(newblk->nb_list.wk_type == D_NEWBLK,
5159	    ("softdep_setup_allocdirect: newblk already initialized"));
5160	/*
5161	 * Convert the newblk to an allocdirect.
5162	 */
5163	WORKITEM_REASSIGN(newblk, D_ALLOCDIRECT);
5164	adp = (struct allocdirect *)newblk;
5165	newblk->nb_freefrag = freefrag;
5166	adp->ad_offset = off;
5167	adp->ad_oldblkno = oldblkno;
5168	adp->ad_newsize = newsize;
5169	adp->ad_oldsize = oldsize;
5170
5171	/*
5172	 * Finish initializing the journal.
5173	 */
5174	if ((jnewblk = newblk->nb_jnewblk) != NULL) {
5175		jnewblk->jn_ino = ip->i_number;
5176		jnewblk->jn_lbn = lbn;
5177		add_to_journal(&jnewblk->jn_list);
5178	}
5179	if (freefrag && freefrag->ff_jdep != NULL &&
5180	    freefrag->ff_jdep->wk_type == D_JFREEFRAG)
5181		add_to_journal(freefrag->ff_jdep);
5182	inodedep_lookup(mp, ip->i_number, DEPALLOC | NODELAY, &inodedep);
5183	adp->ad_inodedep = inodedep;
5184
5185	WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list);
5186	/*
5187	 * The list of allocdirects must be kept in sorted and ascending
5188	 * order so that the rollback routines can quickly determine the
5189	 * first uncommitted block (the size of the file stored on disk
5190	 * ends at the end of the lowest committed fragment, or if there
5191	 * are no fragments, at the end of the highest committed block).
5192	 * Since files generally grow, the typical case is that the new
5193	 * block is to be added at the end of the list. We speed this
5194	 * special case by checking against the last allocdirect in the
5195	 * list before laboriously traversing the list looking for the
5196	 * insertion point.
5197	 */
5198	adphead = &inodedep->id_newinoupdt;
5199	oldadp = TAILQ_LAST(adphead, allocdirectlst);
5200	if (oldadp == NULL || oldadp->ad_offset <= off) {
5201		/* insert at end of list */
5202		TAILQ_INSERT_TAIL(adphead, adp, ad_next);
5203		if (oldadp != NULL && oldadp->ad_offset == off)
5204			allocdirect_merge(adphead, adp, oldadp);
5205		FREE_LOCK(&lk);
5206		return;
5207	}
5208	TAILQ_FOREACH(oldadp, adphead, ad_next) {
5209		if (oldadp->ad_offset >= off)
5210			break;
5211	}
5212	if (oldadp == NULL)
5213		panic("softdep_setup_allocdirect: lost entry");
5214	/* insert in middle of list */
5215	TAILQ_INSERT_BEFORE(oldadp, adp, ad_next);
5216	if (oldadp->ad_offset == off)
5217		allocdirect_merge(adphead, adp, oldadp);
5218
5219	FREE_LOCK(&lk);
5220}
5221
5222/*
5223 * Merge a newer and older journal record to be stored either in a
5224 * newblock or freefrag.  This handles aggregating journal records for
5225 * fragment allocation into a second record as well as replacing a
5226 * journal free with an aborted journal allocation.  A segment for the
5227 * oldest record will be placed on wkhd if it has been written.  If not
5228 * the segment for the newer record will suffice.
5229 */
5230static struct worklist *
5231jnewblk_merge(new, old, wkhd)
5232	struct worklist *new;
5233	struct worklist *old;
5234	struct workhead *wkhd;
5235{
5236	struct jnewblk *njnewblk;
5237	struct jnewblk *jnewblk;
5238
5239	/* Handle NULLs to simplify callers. */
5240	if (new == NULL)
5241		return (old);
5242	if (old == NULL)
5243		return (new);
5244	/* Replace a jfreefrag with a jnewblk. */
5245	if (new->wk_type == D_JFREEFRAG) {
5246		if (WK_JNEWBLK(old)->jn_blkno != WK_JFREEFRAG(new)->fr_blkno)
5247			panic("jnewblk_merge: blkno mismatch: %p, %p",
5248			    old, new);
5249		cancel_jfreefrag(WK_JFREEFRAG(new));
5250		return (old);
5251	}
5252	if (old->wk_type != D_JNEWBLK || new->wk_type != D_JNEWBLK)
5253		panic("jnewblk_merge: Bad type: old %d new %d\n",
5254		    old->wk_type, new->wk_type);
5255	/*
5256	 * Handle merging of two jnewblk records that describe
5257	 * different sets of fragments in the same block.
5258	 */
5259	jnewblk = WK_JNEWBLK(old);
5260	njnewblk = WK_JNEWBLK(new);
5261	if (jnewblk->jn_blkno != njnewblk->jn_blkno)
5262		panic("jnewblk_merge: Merging disparate blocks.");
5263	/*
5264	 * The record may be rolled back in the cg.
5265	 */
5266	if (jnewblk->jn_state & UNDONE) {
5267		jnewblk->jn_state &= ~UNDONE;
5268		njnewblk->jn_state |= UNDONE;
5269		njnewblk->jn_state &= ~ATTACHED;
5270	}
5271	/*
5272	 * We modify the newer addref and free the older so that if neither
5273	 * has been written the most up-to-date copy will be on disk.  If
5274	 * both have been written but rolled back we only temporarily need
5275	 * one of them to fix the bits when the cg write completes.
5276	 */
5277	jnewblk->jn_state |= ATTACHED | COMPLETE;
5278	njnewblk->jn_oldfrags = jnewblk->jn_oldfrags;
5279	cancel_jnewblk(jnewblk, wkhd);
5280	WORKLIST_REMOVE(&jnewblk->jn_list);
5281	free_jnewblk(jnewblk);
5282	return (new);
5283}
5284
5285/*
5286 * Replace an old allocdirect dependency with a newer one.
5287 * This routine must be called with splbio interrupts blocked.
5288 */
5289static void
5290allocdirect_merge(adphead, newadp, oldadp)
5291	struct allocdirectlst *adphead;	/* head of list holding allocdirects */
5292	struct allocdirect *newadp;	/* allocdirect being added */
5293	struct allocdirect *oldadp;	/* existing allocdirect being checked */
5294{
5295	struct worklist *wk;
5296	struct freefrag *freefrag;
5297
5298	freefrag = NULL;
5299	rw_assert(&lk, RA_WLOCKED);
5300	if (newadp->ad_oldblkno != oldadp->ad_newblkno ||
5301	    newadp->ad_oldsize != oldadp->ad_newsize ||
5302	    newadp->ad_offset >= NDADDR)
5303		panic("%s %jd != new %jd || old size %ld != new %ld",
5304		    "allocdirect_merge: old blkno",
5305		    (intmax_t)newadp->ad_oldblkno,
5306		    (intmax_t)oldadp->ad_newblkno,
5307		    newadp->ad_oldsize, oldadp->ad_newsize);
5308	newadp->ad_oldblkno = oldadp->ad_oldblkno;
5309	newadp->ad_oldsize = oldadp->ad_oldsize;
5310	/*
5311	 * If the old dependency had a fragment to free or had never
5312	 * previously had a block allocated, then the new dependency
5313	 * can immediately post its freefrag and adopt the old freefrag.
5314	 * This action is done by swapping the freefrag dependencies.
5315	 * The new dependency gains the old one's freefrag, and the
5316	 * old one gets the new one and then immediately puts it on
5317	 * the worklist when it is freed by free_newblk. It is
5318	 * not possible to do this swap when the old dependency had a
5319	 * non-zero size but no previous fragment to free. This condition
5320	 * arises when the new block is an extension of the old block.
5321	 * Here, the first part of the fragment allocated to the new
5322	 * dependency is part of the block currently claimed on disk by
5323	 * the old dependency, so cannot legitimately be freed until the
5324	 * conditions for the new dependency are fulfilled.
5325	 */
5326	freefrag = newadp->ad_freefrag;
5327	if (oldadp->ad_freefrag != NULL || oldadp->ad_oldblkno == 0) {
5328		newadp->ad_freefrag = oldadp->ad_freefrag;
5329		oldadp->ad_freefrag = freefrag;
5330	}
5331	/*
5332	 * If we are tracking a new directory-block allocation,
5333	 * move it from the old allocdirect to the new allocdirect.
5334	 */
5335	if ((wk = LIST_FIRST(&oldadp->ad_newdirblk)) != NULL) {
5336		WORKLIST_REMOVE(wk);
5337		if (!LIST_EMPTY(&oldadp->ad_newdirblk))
5338			panic("allocdirect_merge: extra newdirblk");
5339		WORKLIST_INSERT(&newadp->ad_newdirblk, wk);
5340	}
5341	TAILQ_REMOVE(adphead, oldadp, ad_next);
5342	/*
5343	 * We need to move any journal dependencies over to the freefrag
5344	 * that releases this block if it exists.  Otherwise we are
5345	 * extending an existing block and we'll wait until that is
5346	 * complete to release the journal space and extend the
5347	 * new journal to cover this old space as well.
5348	 */
5349	if (freefrag == NULL) {
5350		if (oldadp->ad_newblkno != newadp->ad_newblkno)
5351			panic("allocdirect_merge: %jd != %jd",
5352			    oldadp->ad_newblkno, newadp->ad_newblkno);
5353		newadp->ad_block.nb_jnewblk = (struct jnewblk *)
5354		    jnewblk_merge(&newadp->ad_block.nb_jnewblk->jn_list,
5355		    &oldadp->ad_block.nb_jnewblk->jn_list,
5356		    &newadp->ad_block.nb_jwork);
5357		oldadp->ad_block.nb_jnewblk = NULL;
5358		cancel_newblk(&oldadp->ad_block, NULL,
5359		    &newadp->ad_block.nb_jwork);
5360	} else {
5361		wk = (struct worklist *) cancel_newblk(&oldadp->ad_block,
5362		    &freefrag->ff_list, &freefrag->ff_jwork);
5363		freefrag->ff_jdep = jnewblk_merge(freefrag->ff_jdep, wk,
5364		    &freefrag->ff_jwork);
5365	}
5366	free_newblk(&oldadp->ad_block);
5367}
5368
5369/*
5370 * Allocate a jfreefrag structure to journal a single block free.
5371 */
5372static struct jfreefrag *
5373newjfreefrag(freefrag, ip, blkno, size, lbn)
5374	struct freefrag *freefrag;
5375	struct inode *ip;
5376	ufs2_daddr_t blkno;
5377	long size;
5378	ufs_lbn_t lbn;
5379{
5380	struct jfreefrag *jfreefrag;
5381	struct fs *fs;
5382
5383	fs = ip->i_fs;
5384	jfreefrag = malloc(sizeof(struct jfreefrag), M_JFREEFRAG,
5385	    M_SOFTDEP_FLAGS);
5386	workitem_alloc(&jfreefrag->fr_list, D_JFREEFRAG, UFSTOVFS(ip->i_ump));
5387	jfreefrag->fr_jsegdep = newjsegdep(&jfreefrag->fr_list);
5388	jfreefrag->fr_state = ATTACHED | DEPCOMPLETE;
5389	jfreefrag->fr_ino = ip->i_number;
5390	jfreefrag->fr_lbn = lbn;
5391	jfreefrag->fr_blkno = blkno;
5392	jfreefrag->fr_frags = numfrags(fs, size);
5393	jfreefrag->fr_freefrag = freefrag;
5394
5395	return (jfreefrag);
5396}
5397
5398/*
5399 * Allocate a new freefrag structure.
5400 */
5401static struct freefrag *
5402newfreefrag(ip, blkno, size, lbn)
5403	struct inode *ip;
5404	ufs2_daddr_t blkno;
5405	long size;
5406	ufs_lbn_t lbn;
5407{
5408	struct freefrag *freefrag;
5409	struct fs *fs;
5410
5411	CTR4(KTR_SUJ, "newfreefrag: ino %d blkno %jd size %ld lbn %jd",
5412	    ip->i_number, blkno, size, lbn);
5413	fs = ip->i_fs;
5414	if (fragnum(fs, blkno) + numfrags(fs, size) > fs->fs_frag)
5415		panic("newfreefrag: frag size");
5416	freefrag = malloc(sizeof(struct freefrag),
5417	    M_FREEFRAG, M_SOFTDEP_FLAGS);
5418	workitem_alloc(&freefrag->ff_list, D_FREEFRAG, UFSTOVFS(ip->i_ump));
5419	freefrag->ff_state = ATTACHED;
5420	LIST_INIT(&freefrag->ff_jwork);
5421	freefrag->ff_inum = ip->i_number;
5422	freefrag->ff_vtype = ITOV(ip)->v_type;
5423	freefrag->ff_blkno = blkno;
5424	freefrag->ff_fragsize = size;
5425
5426	if (MOUNTEDSUJ(UFSTOVFS(ip->i_ump))) {
5427		freefrag->ff_jdep = (struct worklist *)
5428		    newjfreefrag(freefrag, ip, blkno, size, lbn);
5429	} else {
5430		freefrag->ff_state |= DEPCOMPLETE;
5431		freefrag->ff_jdep = NULL;
5432	}
5433
5434	return (freefrag);
5435}
5436
5437/*
5438 * This workitem de-allocates fragments that were replaced during
5439 * file block allocation.
5440 */
5441static void
5442handle_workitem_freefrag(freefrag)
5443	struct freefrag *freefrag;
5444{
5445	struct ufsmount *ump = VFSTOUFS(freefrag->ff_list.wk_mp);
5446	struct workhead wkhd;
5447
5448	CTR3(KTR_SUJ,
5449	    "handle_workitem_freefrag: ino %d blkno %jd size %ld",
5450	    freefrag->ff_inum, freefrag->ff_blkno, freefrag->ff_fragsize);
5451	/*
5452	 * It would be illegal to add new completion items to the
5453	 * freefrag after it was schedule to be done so it must be
5454	 * safe to modify the list head here.
5455	 */
5456	LIST_INIT(&wkhd);
5457	ACQUIRE_LOCK(&lk);
5458	LIST_SWAP(&freefrag->ff_jwork, &wkhd, worklist, wk_list);
5459	/*
5460	 * If the journal has not been written we must cancel it here.
5461	 */
5462	if (freefrag->ff_jdep) {
5463		if (freefrag->ff_jdep->wk_type != D_JNEWBLK)
5464			panic("handle_workitem_freefrag: Unexpected type %d\n",
5465			    freefrag->ff_jdep->wk_type);
5466		cancel_jnewblk(WK_JNEWBLK(freefrag->ff_jdep), &wkhd);
5467	}
5468	FREE_LOCK(&lk);
5469	ffs_blkfree(ump, ump->um_fs, ump->um_devvp, freefrag->ff_blkno,
5470	   freefrag->ff_fragsize, freefrag->ff_inum, freefrag->ff_vtype, &wkhd);
5471	ACQUIRE_LOCK(&lk);
5472	WORKITEM_FREE(freefrag, D_FREEFRAG);
5473	FREE_LOCK(&lk);
5474}
5475
5476/*
5477 * Set up a dependency structure for an external attributes data block.
5478 * This routine follows much of the structure of softdep_setup_allocdirect.
5479 * See the description of softdep_setup_allocdirect above for details.
5480 */
5481void
5482softdep_setup_allocext(ip, off, newblkno, oldblkno, newsize, oldsize, bp)
5483	struct inode *ip;
5484	ufs_lbn_t off;
5485	ufs2_daddr_t newblkno;
5486	ufs2_daddr_t oldblkno;
5487	long newsize;
5488	long oldsize;
5489	struct buf *bp;
5490{
5491	struct allocdirect *adp, *oldadp;
5492	struct allocdirectlst *adphead;
5493	struct freefrag *freefrag;
5494	struct inodedep *inodedep;
5495	struct jnewblk *jnewblk;
5496	struct newblk *newblk;
5497	struct mount *mp;
5498	ufs_lbn_t lbn;
5499
5500	mp = UFSTOVFS(ip->i_ump);
5501	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
5502	    ("softdep_setup_allocext called on non-softdep filesystem"));
5503	KASSERT(off < NXADDR, ("softdep_setup_allocext: lbn %lld > NXADDR",
5504		    (long long)off));
5505
5506	lbn = bp->b_lblkno;
5507	if (oldblkno && oldblkno != newblkno)
5508		freefrag = newfreefrag(ip, oldblkno, oldsize, lbn);
5509	else
5510		freefrag = NULL;
5511
5512	ACQUIRE_LOCK(&lk);
5513	if (newblk_lookup(mp, newblkno, 0, &newblk) == 0)
5514		panic("softdep_setup_allocext: lost block");
5515	KASSERT(newblk->nb_list.wk_type == D_NEWBLK,
5516	    ("softdep_setup_allocext: newblk already initialized"));
5517	/*
5518	 * Convert the newblk to an allocdirect.
5519	 */
5520	WORKITEM_REASSIGN(newblk, D_ALLOCDIRECT);
5521	adp = (struct allocdirect *)newblk;
5522	newblk->nb_freefrag = freefrag;
5523	adp->ad_offset = off;
5524	adp->ad_oldblkno = oldblkno;
5525	adp->ad_newsize = newsize;
5526	adp->ad_oldsize = oldsize;
5527	adp->ad_state |=  EXTDATA;
5528
5529	/*
5530	 * Finish initializing the journal.
5531	 */
5532	if ((jnewblk = newblk->nb_jnewblk) != NULL) {
5533		jnewblk->jn_ino = ip->i_number;
5534		jnewblk->jn_lbn = lbn;
5535		add_to_journal(&jnewblk->jn_list);
5536	}
5537	if (freefrag && freefrag->ff_jdep != NULL &&
5538	    freefrag->ff_jdep->wk_type == D_JFREEFRAG)
5539		add_to_journal(freefrag->ff_jdep);
5540	inodedep_lookup(mp, ip->i_number, DEPALLOC | NODELAY, &inodedep);
5541	adp->ad_inodedep = inodedep;
5542
5543	WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list);
5544	/*
5545	 * The list of allocdirects must be kept in sorted and ascending
5546	 * order so that the rollback routines can quickly determine the
5547	 * first uncommitted block (the size of the file stored on disk
5548	 * ends at the end of the lowest committed fragment, or if there
5549	 * are no fragments, at the end of the highest committed block).
5550	 * Since files generally grow, the typical case is that the new
5551	 * block is to be added at the end of the list. We speed this
5552	 * special case by checking against the last allocdirect in the
5553	 * list before laboriously traversing the list looking for the
5554	 * insertion point.
5555	 */
5556	adphead = &inodedep->id_newextupdt;
5557	oldadp = TAILQ_LAST(adphead, allocdirectlst);
5558	if (oldadp == NULL || oldadp->ad_offset <= off) {
5559		/* insert at end of list */
5560		TAILQ_INSERT_TAIL(adphead, adp, ad_next);
5561		if (oldadp != NULL && oldadp->ad_offset == off)
5562			allocdirect_merge(adphead, adp, oldadp);
5563		FREE_LOCK(&lk);
5564		return;
5565	}
5566	TAILQ_FOREACH(oldadp, adphead, ad_next) {
5567		if (oldadp->ad_offset >= off)
5568			break;
5569	}
5570	if (oldadp == NULL)
5571		panic("softdep_setup_allocext: lost entry");
5572	/* insert in middle of list */
5573	TAILQ_INSERT_BEFORE(oldadp, adp, ad_next);
5574	if (oldadp->ad_offset == off)
5575		allocdirect_merge(adphead, adp, oldadp);
5576	FREE_LOCK(&lk);
5577}
5578
5579/*
5580 * Indirect block allocation dependencies.
5581 *
5582 * The same dependencies that exist for a direct block also exist when
5583 * a new block is allocated and pointed to by an entry in a block of
5584 * indirect pointers. The undo/redo states described above are also
5585 * used here. Because an indirect block contains many pointers that
5586 * may have dependencies, a second copy of the entire in-memory indirect
5587 * block is kept. The buffer cache copy is always completely up-to-date.
5588 * The second copy, which is used only as a source for disk writes,
5589 * contains only the safe pointers (i.e., those that have no remaining
5590 * update dependencies). The second copy is freed when all pointers
5591 * are safe. The cache is not allowed to replace indirect blocks with
5592 * pending update dependencies. If a buffer containing an indirect
5593 * block with dependencies is written, these routines will mark it
5594 * dirty again. It can only be successfully written once all the
5595 * dependencies are removed. The ffs_fsync routine in conjunction with
5596 * softdep_sync_metadata work together to get all the dependencies
5597 * removed so that a file can be successfully written to disk. Three
5598 * procedures are used when setting up indirect block pointer
5599 * dependencies. The division is necessary because of the organization
5600 * of the "balloc" routine and because of the distinction between file
5601 * pages and file metadata blocks.
5602 */
5603
5604/*
5605 * Allocate a new allocindir structure.
5606 */
5607static struct allocindir *
5608newallocindir(ip, ptrno, newblkno, oldblkno, lbn)
5609	struct inode *ip;	/* inode for file being extended */
5610	int ptrno;		/* offset of pointer in indirect block */
5611	ufs2_daddr_t newblkno;	/* disk block number being added */
5612	ufs2_daddr_t oldblkno;	/* previous block number, 0 if none */
5613	ufs_lbn_t lbn;
5614{
5615	struct newblk *newblk;
5616	struct allocindir *aip;
5617	struct freefrag *freefrag;
5618	struct jnewblk *jnewblk;
5619
5620	if (oldblkno)
5621		freefrag = newfreefrag(ip, oldblkno, ip->i_fs->fs_bsize, lbn);
5622	else
5623		freefrag = NULL;
5624	ACQUIRE_LOCK(&lk);
5625	if (newblk_lookup(UFSTOVFS(ip->i_ump), newblkno, 0, &newblk) == 0)
5626		panic("new_allocindir: lost block");
5627	KASSERT(newblk->nb_list.wk_type == D_NEWBLK,
5628	    ("newallocindir: newblk already initialized"));
5629	WORKITEM_REASSIGN(newblk, D_ALLOCINDIR);
5630	newblk->nb_freefrag = freefrag;
5631	aip = (struct allocindir *)newblk;
5632	aip->ai_offset = ptrno;
5633	aip->ai_oldblkno = oldblkno;
5634	aip->ai_lbn = lbn;
5635	if ((jnewblk = newblk->nb_jnewblk) != NULL) {
5636		jnewblk->jn_ino = ip->i_number;
5637		jnewblk->jn_lbn = lbn;
5638		add_to_journal(&jnewblk->jn_list);
5639	}
5640	if (freefrag && freefrag->ff_jdep != NULL &&
5641	    freefrag->ff_jdep->wk_type == D_JFREEFRAG)
5642		add_to_journal(freefrag->ff_jdep);
5643	return (aip);
5644}
5645
5646/*
5647 * Called just before setting an indirect block pointer
5648 * to a newly allocated file page.
5649 */
5650void
5651softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp)
5652	struct inode *ip;	/* inode for file being extended */
5653	ufs_lbn_t lbn;		/* allocated block number within file */
5654	struct buf *bp;		/* buffer with indirect blk referencing page */
5655	int ptrno;		/* offset of pointer in indirect block */
5656	ufs2_daddr_t newblkno;	/* disk block number being added */
5657	ufs2_daddr_t oldblkno;	/* previous block number, 0 if none */
5658	struct buf *nbp;	/* buffer holding allocated page */
5659{
5660	struct inodedep *inodedep;
5661	struct freefrag *freefrag;
5662	struct allocindir *aip;
5663	struct pagedep *pagedep;
5664	struct mount *mp;
5665	int dflags;
5666
5667	mp = UFSTOVFS(ip->i_ump);
5668	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
5669	    ("softdep_setup_allocindir_page called on non-softdep filesystem"));
5670	KASSERT(lbn == nbp->b_lblkno,
5671	    ("softdep_setup_allocindir_page: lbn %jd != lblkno %jd",
5672	    lbn, bp->b_lblkno));
5673	CTR4(KTR_SUJ,
5674	    "softdep_setup_allocindir_page: ino %d blkno %jd oldblkno %jd "
5675	    "lbn %jd", ip->i_number, newblkno, oldblkno, lbn);
5676	ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_page");
5677	aip = newallocindir(ip, ptrno, newblkno, oldblkno, lbn);
5678	dflags = DEPALLOC;
5679	if (IS_SNAPSHOT(ip))
5680		dflags |= NODELAY;
5681	(void) inodedep_lookup(mp, ip->i_number, dflags, &inodedep);
5682	/*
5683	 * If we are allocating a directory page, then we must
5684	 * allocate an associated pagedep to track additions and
5685	 * deletions.
5686	 */
5687	if ((ip->i_mode & IFMT) == IFDIR)
5688		pagedep_lookup(mp, nbp, ip->i_number, lbn, DEPALLOC, &pagedep);
5689	WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list);
5690	freefrag = setup_allocindir_phase2(bp, ip, inodedep, aip, lbn);
5691	FREE_LOCK(&lk);
5692	if (freefrag)
5693		handle_workitem_freefrag(freefrag);
5694}
5695
5696/*
5697 * Called just before setting an indirect block pointer to a
5698 * newly allocated indirect block.
5699 */
5700void
5701softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno)
5702	struct buf *nbp;	/* newly allocated indirect block */
5703	struct inode *ip;	/* inode for file being extended */
5704	struct buf *bp;		/* indirect block referencing allocated block */
5705	int ptrno;		/* offset of pointer in indirect block */
5706	ufs2_daddr_t newblkno;	/* disk block number being added */
5707{
5708	struct inodedep *inodedep;
5709	struct allocindir *aip;
5710	ufs_lbn_t lbn;
5711	int dflags;
5712
5713	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0,
5714	    ("softdep_setup_allocindir_meta called on non-softdep filesystem"));
5715	CTR3(KTR_SUJ,
5716	    "softdep_setup_allocindir_meta: ino %d blkno %jd ptrno %d",
5717	    ip->i_number, newblkno, ptrno);
5718	lbn = nbp->b_lblkno;
5719	ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_meta");
5720	aip = newallocindir(ip, ptrno, newblkno, 0, lbn);
5721	dflags = DEPALLOC;
5722	if (IS_SNAPSHOT(ip))
5723		dflags |= NODELAY;
5724	inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, dflags, &inodedep);
5725	WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list);
5726	if (setup_allocindir_phase2(bp, ip, inodedep, aip, lbn))
5727		panic("softdep_setup_allocindir_meta: Block already existed");
5728	FREE_LOCK(&lk);
5729}
5730
5731static void
5732indirdep_complete(indirdep)
5733	struct indirdep *indirdep;
5734{
5735	struct allocindir *aip;
5736
5737	LIST_REMOVE(indirdep, ir_next);
5738	indirdep->ir_state |= DEPCOMPLETE;
5739
5740	while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != NULL) {
5741		LIST_REMOVE(aip, ai_next);
5742		free_newblk(&aip->ai_block);
5743	}
5744	/*
5745	 * If this indirdep is not attached to a buf it was simply waiting
5746	 * on completion to clear completehd.  free_indirdep() asserts
5747	 * that nothing is dangling.
5748	 */
5749	if ((indirdep->ir_state & ONWORKLIST) == 0)
5750		free_indirdep(indirdep);
5751}
5752
5753static struct indirdep *
5754indirdep_lookup(mp, ip, bp)
5755	struct mount *mp;
5756	struct inode *ip;
5757	struct buf *bp;
5758{
5759	struct indirdep *indirdep, *newindirdep;
5760	struct newblk *newblk;
5761	struct worklist *wk;
5762	struct fs *fs;
5763	ufs2_daddr_t blkno;
5764
5765	rw_assert(&lk, RA_WLOCKED);
5766	indirdep = NULL;
5767	newindirdep = NULL;
5768	fs = ip->i_fs;
5769	for (;;) {
5770		LIST_FOREACH(wk, &bp->b_dep, wk_list) {
5771			if (wk->wk_type != D_INDIRDEP)
5772				continue;
5773			indirdep = WK_INDIRDEP(wk);
5774			break;
5775		}
5776		/* Found on the buffer worklist, no new structure to free. */
5777		if (indirdep != NULL && newindirdep == NULL)
5778			return (indirdep);
5779		if (indirdep != NULL && newindirdep != NULL)
5780			panic("indirdep_lookup: simultaneous create");
5781		/* None found on the buffer and a new structure is ready. */
5782		if (indirdep == NULL && newindirdep != NULL)
5783			break;
5784		/* None found and no new structure available. */
5785		FREE_LOCK(&lk);
5786		newindirdep = malloc(sizeof(struct indirdep),
5787		    M_INDIRDEP, M_SOFTDEP_FLAGS);
5788		workitem_alloc(&newindirdep->ir_list, D_INDIRDEP, mp);
5789		newindirdep->ir_state = ATTACHED;
5790		if (ip->i_ump->um_fstype == UFS1)
5791			newindirdep->ir_state |= UFS1FMT;
5792		TAILQ_INIT(&newindirdep->ir_trunc);
5793		newindirdep->ir_saveddata = NULL;
5794		LIST_INIT(&newindirdep->ir_deplisthd);
5795		LIST_INIT(&newindirdep->ir_donehd);
5796		LIST_INIT(&newindirdep->ir_writehd);
5797		LIST_INIT(&newindirdep->ir_completehd);
5798		if (bp->b_blkno == bp->b_lblkno) {
5799			ufs_bmaparray(bp->b_vp, bp->b_lblkno, &blkno, bp,
5800			    NULL, NULL);
5801			bp->b_blkno = blkno;
5802		}
5803		newindirdep->ir_freeblks = NULL;
5804		newindirdep->ir_savebp =
5805		    getblk(ip->i_devvp, bp->b_blkno, bp->b_bcount, 0, 0, 0);
5806		newindirdep->ir_bp = bp;
5807		BUF_KERNPROC(newindirdep->ir_savebp);
5808		bcopy(bp->b_data, newindirdep->ir_savebp->b_data, bp->b_bcount);
5809		ACQUIRE_LOCK(&lk);
5810	}
5811	indirdep = newindirdep;
5812	WORKLIST_INSERT(&bp->b_dep, &indirdep->ir_list);
5813	/*
5814	 * If the block is not yet allocated we don't set DEPCOMPLETE so
5815	 * that we don't free dependencies until the pointers are valid.
5816	 * This could search b_dep for D_ALLOCDIRECT/D_ALLOCINDIR rather
5817	 * than using the hash.
5818	 */
5819	if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk))
5820		LIST_INSERT_HEAD(&newblk->nb_indirdeps, indirdep, ir_next);
5821	else
5822		indirdep->ir_state |= DEPCOMPLETE;
5823	return (indirdep);
5824}
5825
5826/*
5827 * Called to finish the allocation of the "aip" allocated
5828 * by one of the two routines above.
5829 */
5830static struct freefrag *
5831setup_allocindir_phase2(bp, ip, inodedep, aip, lbn)
5832	struct buf *bp;		/* in-memory copy of the indirect block */
5833	struct inode *ip;	/* inode for file being extended */
5834	struct inodedep *inodedep; /* Inodedep for ip */
5835	struct allocindir *aip;	/* allocindir allocated by the above routines */
5836	ufs_lbn_t lbn;		/* Logical block number for this block. */
5837{
5838	struct fs *fs;
5839	struct indirdep *indirdep;
5840	struct allocindir *oldaip;
5841	struct freefrag *freefrag;
5842	struct mount *mp;
5843
5844	rw_assert(&lk, RA_WLOCKED);
5845	mp = UFSTOVFS(ip->i_ump);
5846	fs = ip->i_fs;
5847	if (bp->b_lblkno >= 0)
5848		panic("setup_allocindir_phase2: not indir blk");
5849	KASSERT(aip->ai_offset >= 0 && aip->ai_offset < NINDIR(fs),
5850	    ("setup_allocindir_phase2: Bad offset %d", aip->ai_offset));
5851	indirdep = indirdep_lookup(mp, ip, bp);
5852	KASSERT(indirdep->ir_savebp != NULL,
5853	    ("setup_allocindir_phase2 NULL ir_savebp"));
5854	aip->ai_indirdep = indirdep;
5855	/*
5856	 * Check for an unwritten dependency for this indirect offset.  If
5857	 * there is, merge the old dependency into the new one.  This happens
5858	 * as a result of reallocblk only.
5859	 */
5860	freefrag = NULL;
5861	if (aip->ai_oldblkno != 0) {
5862		LIST_FOREACH(oldaip, &indirdep->ir_deplisthd, ai_next) {
5863			if (oldaip->ai_offset == aip->ai_offset) {
5864				freefrag = allocindir_merge(aip, oldaip);
5865				goto done;
5866			}
5867		}
5868		LIST_FOREACH(oldaip, &indirdep->ir_donehd, ai_next) {
5869			if (oldaip->ai_offset == aip->ai_offset) {
5870				freefrag = allocindir_merge(aip, oldaip);
5871				goto done;
5872			}
5873		}
5874	}
5875done:
5876	LIST_INSERT_HEAD(&indirdep->ir_deplisthd, aip, ai_next);
5877	return (freefrag);
5878}
5879
5880/*
5881 * Merge two allocindirs which refer to the same block.  Move newblock
5882 * dependencies and setup the freefrags appropriately.
5883 */
5884static struct freefrag *
5885allocindir_merge(aip, oldaip)
5886	struct allocindir *aip;
5887	struct allocindir *oldaip;
5888{
5889	struct freefrag *freefrag;
5890	struct worklist *wk;
5891
5892	if (oldaip->ai_newblkno != aip->ai_oldblkno)
5893		panic("allocindir_merge: blkno");
5894	aip->ai_oldblkno = oldaip->ai_oldblkno;
5895	freefrag = aip->ai_freefrag;
5896	aip->ai_freefrag = oldaip->ai_freefrag;
5897	oldaip->ai_freefrag = NULL;
5898	KASSERT(freefrag != NULL, ("setup_allocindir_phase2: No freefrag"));
5899	/*
5900	 * If we are tracking a new directory-block allocation,
5901	 * move it from the old allocindir to the new allocindir.
5902	 */
5903	if ((wk = LIST_FIRST(&oldaip->ai_newdirblk)) != NULL) {
5904		WORKLIST_REMOVE(wk);
5905		if (!LIST_EMPTY(&oldaip->ai_newdirblk))
5906			panic("allocindir_merge: extra newdirblk");
5907		WORKLIST_INSERT(&aip->ai_newdirblk, wk);
5908	}
5909	/*
5910	 * We can skip journaling for this freefrag and just complete
5911	 * any pending journal work for the allocindir that is being
5912	 * removed after the freefrag completes.
5913	 */
5914	if (freefrag->ff_jdep)
5915		cancel_jfreefrag(WK_JFREEFRAG(freefrag->ff_jdep));
5916	LIST_REMOVE(oldaip, ai_next);
5917	freefrag->ff_jdep = (struct worklist *)cancel_newblk(&oldaip->ai_block,
5918	    &freefrag->ff_list, &freefrag->ff_jwork);
5919	free_newblk(&oldaip->ai_block);
5920
5921	return (freefrag);
5922}
5923
5924static inline void
5925setup_freedirect(freeblks, ip, i, needj)
5926	struct freeblks *freeblks;
5927	struct inode *ip;
5928	int i;
5929	int needj;
5930{
5931	ufs2_daddr_t blkno;
5932	int frags;
5933
5934	blkno = DIP(ip, i_db[i]);
5935	if (blkno == 0)
5936		return;
5937	DIP_SET(ip, i_db[i], 0);
5938	frags = sblksize(ip->i_fs, ip->i_size, i);
5939	frags = numfrags(ip->i_fs, frags);
5940	newfreework(ip->i_ump, freeblks, NULL, i, blkno, frags, 0, needj);
5941}
5942
5943static inline void
5944setup_freeext(freeblks, ip, i, needj)
5945	struct freeblks *freeblks;
5946	struct inode *ip;
5947	int i;
5948	int needj;
5949{
5950	ufs2_daddr_t blkno;
5951	int frags;
5952
5953	blkno = ip->i_din2->di_extb[i];
5954	if (blkno == 0)
5955		return;
5956	ip->i_din2->di_extb[i] = 0;
5957	frags = sblksize(ip->i_fs, ip->i_din2->di_extsize, i);
5958	frags = numfrags(ip->i_fs, frags);
5959	newfreework(ip->i_ump, freeblks, NULL, -1 - i, blkno, frags, 0, needj);
5960}
5961
5962static inline void
5963setup_freeindir(freeblks, ip, i, lbn, needj)
5964	struct freeblks *freeblks;
5965	struct inode *ip;
5966	int i;
5967	ufs_lbn_t lbn;
5968	int needj;
5969{
5970	ufs2_daddr_t blkno;
5971
5972	blkno = DIP(ip, i_ib[i]);
5973	if (blkno == 0)
5974		return;
5975	DIP_SET(ip, i_ib[i], 0);
5976	newfreework(ip->i_ump, freeblks, NULL, lbn, blkno, ip->i_fs->fs_frag,
5977	    0, needj);
5978}
5979
5980static inline struct freeblks *
5981newfreeblks(mp, ip)
5982	struct mount *mp;
5983	struct inode *ip;
5984{
5985	struct freeblks *freeblks;
5986
5987	freeblks = malloc(sizeof(struct freeblks),
5988		M_FREEBLKS, M_SOFTDEP_FLAGS|M_ZERO);
5989	workitem_alloc(&freeblks->fb_list, D_FREEBLKS, mp);
5990	LIST_INIT(&freeblks->fb_jblkdephd);
5991	LIST_INIT(&freeblks->fb_jwork);
5992	freeblks->fb_ref = 0;
5993	freeblks->fb_cgwait = 0;
5994	freeblks->fb_state = ATTACHED;
5995	freeblks->fb_uid = ip->i_uid;
5996	freeblks->fb_inum = ip->i_number;
5997	freeblks->fb_vtype = ITOV(ip)->v_type;
5998	freeblks->fb_modrev = DIP(ip, i_modrev);
5999	freeblks->fb_devvp = ip->i_devvp;
6000	freeblks->fb_chkcnt = 0;
6001	freeblks->fb_len = 0;
6002
6003	return (freeblks);
6004}
6005
6006static void
6007trunc_indirdep(indirdep, freeblks, bp, off)
6008	struct indirdep *indirdep;
6009	struct freeblks *freeblks;
6010	struct buf *bp;
6011	int off;
6012{
6013	struct allocindir *aip, *aipn;
6014
6015	/*
6016	 * The first set of allocindirs won't be in savedbp.
6017	 */
6018	LIST_FOREACH_SAFE(aip, &indirdep->ir_deplisthd, ai_next, aipn)
6019		if (aip->ai_offset > off)
6020			cancel_allocindir(aip, bp, freeblks, 1);
6021	LIST_FOREACH_SAFE(aip, &indirdep->ir_donehd, ai_next, aipn)
6022		if (aip->ai_offset > off)
6023			cancel_allocindir(aip, bp, freeblks, 1);
6024	/*
6025	 * These will exist in savedbp.
6026	 */
6027	LIST_FOREACH_SAFE(aip, &indirdep->ir_writehd, ai_next, aipn)
6028		if (aip->ai_offset > off)
6029			cancel_allocindir(aip, NULL, freeblks, 0);
6030	LIST_FOREACH_SAFE(aip, &indirdep->ir_completehd, ai_next, aipn)
6031		if (aip->ai_offset > off)
6032			cancel_allocindir(aip, NULL, freeblks, 0);
6033}
6034
6035/*
6036 * Follow the chain of indirects down to lastlbn creating a freework
6037 * structure for each.  This will be used to start indir_trunc() at
6038 * the right offset and create the journal records for the parrtial
6039 * truncation.  A second step will handle the truncated dependencies.
6040 */
6041static int
6042setup_trunc_indir(freeblks, ip, lbn, lastlbn, blkno)
6043	struct freeblks *freeblks;
6044	struct inode *ip;
6045	ufs_lbn_t lbn;
6046	ufs_lbn_t lastlbn;
6047	ufs2_daddr_t blkno;
6048{
6049	struct indirdep *indirdep;
6050	struct indirdep *indirn;
6051	struct freework *freework;
6052	struct newblk *newblk;
6053	struct mount *mp;
6054	struct buf *bp;
6055	uint8_t *start;
6056	uint8_t *end;
6057	ufs_lbn_t lbnadd;
6058	int level;
6059	int error;
6060	int off;
6061
6062
6063	freework = NULL;
6064	if (blkno == 0)
6065		return (0);
6066	mp = freeblks->fb_list.wk_mp;
6067	bp = getblk(ITOV(ip), lbn, mp->mnt_stat.f_iosize, 0, 0, 0);
6068	if ((bp->b_flags & B_CACHE) == 0) {
6069		bp->b_blkno = blkptrtodb(VFSTOUFS(mp), blkno);
6070		bp->b_iocmd = BIO_READ;
6071		bp->b_flags &= ~B_INVAL;
6072		bp->b_ioflags &= ~BIO_ERROR;
6073		vfs_busy_pages(bp, 0);
6074		bp->b_iooffset = dbtob(bp->b_blkno);
6075		bstrategy(bp);
6076		curthread->td_ru.ru_inblock++;
6077		error = bufwait(bp);
6078		if (error) {
6079			brelse(bp);
6080			return (error);
6081		}
6082	}
6083	level = lbn_level(lbn);
6084	lbnadd = lbn_offset(ip->i_fs, level);
6085	/*
6086	 * Compute the offset of the last block we want to keep.  Store
6087	 * in the freework the first block we want to completely free.
6088	 */
6089	off = (lastlbn - -(lbn + level)) / lbnadd;
6090	if (off + 1 == NINDIR(ip->i_fs))
6091		goto nowork;
6092	freework = newfreework(ip->i_ump, freeblks, NULL, lbn, blkno, 0, off+1,
6093	    0);
6094	/*
6095	 * Link the freework into the indirdep.  This will prevent any new
6096	 * allocations from proceeding until we are finished with the
6097	 * truncate and the block is written.
6098	 */
6099	ACQUIRE_LOCK(&lk);
6100	indirdep = indirdep_lookup(mp, ip, bp);
6101	if (indirdep->ir_freeblks)
6102		panic("setup_trunc_indir: indirdep already truncated.");
6103	TAILQ_INSERT_TAIL(&indirdep->ir_trunc, freework, fw_next);
6104	freework->fw_indir = indirdep;
6105	/*
6106	 * Cancel any allocindirs that will not make it to disk.
6107	 * We have to do this for all copies of the indirdep that
6108	 * live on this newblk.
6109	 */
6110	if ((indirdep->ir_state & DEPCOMPLETE) == 0) {
6111		newblk_lookup(mp, dbtofsb(ip->i_fs, bp->b_blkno), 0, &newblk);
6112		LIST_FOREACH(indirn, &newblk->nb_indirdeps, ir_next)
6113			trunc_indirdep(indirn, freeblks, bp, off);
6114	} else
6115		trunc_indirdep(indirdep, freeblks, bp, off);
6116	FREE_LOCK(&lk);
6117	/*
6118	 * Creation is protected by the buf lock. The saveddata is only
6119	 * needed if a full truncation follows a partial truncation but it
6120	 * is difficult to allocate in that case so we fetch it anyway.
6121	 */
6122	if (indirdep->ir_saveddata == NULL)
6123		indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP,
6124		    M_SOFTDEP_FLAGS);
6125nowork:
6126	/* Fetch the blkno of the child and the zero start offset. */
6127	if (ip->i_ump->um_fstype == UFS1) {
6128		blkno = ((ufs1_daddr_t *)bp->b_data)[off];
6129		start = (uint8_t *)&((ufs1_daddr_t *)bp->b_data)[off+1];
6130	} else {
6131		blkno = ((ufs2_daddr_t *)bp->b_data)[off];
6132		start = (uint8_t *)&((ufs2_daddr_t *)bp->b_data)[off+1];
6133	}
6134	if (freework) {
6135		/* Zero the truncated pointers. */
6136		end = bp->b_data + bp->b_bcount;
6137		bzero(start, end - start);
6138		bdwrite(bp);
6139	} else
6140		bqrelse(bp);
6141	if (level == 0)
6142		return (0);
6143	lbn++; /* adjust level */
6144	lbn -= (off * lbnadd);
6145	return setup_trunc_indir(freeblks, ip, lbn, lastlbn, blkno);
6146}
6147
6148/*
6149 * Complete the partial truncation of an indirect block setup by
6150 * setup_trunc_indir().  This zeros the truncated pointers in the saved
6151 * copy and writes them to disk before the freeblks is allowed to complete.
6152 */
6153static void
6154complete_trunc_indir(freework)
6155	struct freework *freework;
6156{
6157	struct freework *fwn;
6158	struct indirdep *indirdep;
6159	struct buf *bp;
6160	uintptr_t start;
6161	int count;
6162
6163	indirdep = freework->fw_indir;
6164	for (;;) {
6165		bp = indirdep->ir_bp;
6166		/* See if the block was discarded. */
6167		if (bp == NULL)
6168			break;
6169		/* Inline part of getdirtybuf().  We dont want bremfree. */
6170		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) == 0)
6171			break;
6172		if (BUF_LOCK(bp,
6173		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, &lk) == 0)
6174			BUF_UNLOCK(bp);
6175		ACQUIRE_LOCK(&lk);
6176	}
6177	rw_assert(&lk, RA_WLOCKED);
6178	freework->fw_state |= DEPCOMPLETE;
6179	TAILQ_REMOVE(&indirdep->ir_trunc, freework, fw_next);
6180	/*
6181	 * Zero the pointers in the saved copy.
6182	 */
6183	if (indirdep->ir_state & UFS1FMT)
6184		start = sizeof(ufs1_daddr_t);
6185	else
6186		start = sizeof(ufs2_daddr_t);
6187	start *= freework->fw_start;
6188	count = indirdep->ir_savebp->b_bcount - start;
6189	start += (uintptr_t)indirdep->ir_savebp->b_data;
6190	bzero((char *)start, count);
6191	/*
6192	 * We need to start the next truncation in the list if it has not
6193	 * been started yet.
6194	 */
6195	fwn = TAILQ_FIRST(&indirdep->ir_trunc);
6196	if (fwn != NULL) {
6197		if (fwn->fw_freeblks == indirdep->ir_freeblks)
6198			TAILQ_REMOVE(&indirdep->ir_trunc, fwn, fw_next);
6199		if ((fwn->fw_state & ONWORKLIST) == 0)
6200			freework_enqueue(fwn);
6201	}
6202	/*
6203	 * If bp is NULL the block was fully truncated, restore
6204	 * the saved block list otherwise free it if it is no
6205	 * longer needed.
6206	 */
6207	if (TAILQ_EMPTY(&indirdep->ir_trunc)) {
6208		if (bp == NULL)
6209			bcopy(indirdep->ir_saveddata,
6210			    indirdep->ir_savebp->b_data,
6211			    indirdep->ir_savebp->b_bcount);
6212		free(indirdep->ir_saveddata, M_INDIRDEP);
6213		indirdep->ir_saveddata = NULL;
6214	}
6215	/*
6216	 * When bp is NULL there is a full truncation pending.  We
6217	 * must wait for this full truncation to be journaled before
6218	 * we can release this freework because the disk pointers will
6219	 * never be written as zero.
6220	 */
6221	if (bp == NULL)  {
6222		if (LIST_EMPTY(&indirdep->ir_freeblks->fb_jblkdephd))
6223			handle_written_freework(freework);
6224		else
6225			WORKLIST_INSERT(&indirdep->ir_freeblks->fb_freeworkhd,
6226			   &freework->fw_list);
6227	} else {
6228		/* Complete when the real copy is written. */
6229		WORKLIST_INSERT(&bp->b_dep, &freework->fw_list);
6230		BUF_UNLOCK(bp);
6231	}
6232}
6233
6234/*
6235 * Calculate the number of blocks we are going to release where datablocks
6236 * is the current total and length is the new file size.
6237 */
6238static ufs2_daddr_t
6239blkcount(fs, datablocks, length)
6240	struct fs *fs;
6241	ufs2_daddr_t datablocks;
6242	off_t length;
6243{
6244	off_t totblks, numblks;
6245
6246	totblks = 0;
6247	numblks = howmany(length, fs->fs_bsize);
6248	if (numblks <= NDADDR) {
6249		totblks = howmany(length, fs->fs_fsize);
6250		goto out;
6251	}
6252        totblks = blkstofrags(fs, numblks);
6253	numblks -= NDADDR;
6254	/*
6255	 * Count all single, then double, then triple indirects required.
6256	 * Subtracting one indirects worth of blocks for each pass
6257	 * acknowledges one of each pointed to by the inode.
6258	 */
6259	for (;;) {
6260		totblks += blkstofrags(fs, howmany(numblks, NINDIR(fs)));
6261		numblks -= NINDIR(fs);
6262		if (numblks <= 0)
6263			break;
6264		numblks = howmany(numblks, NINDIR(fs));
6265	}
6266out:
6267	totblks = fsbtodb(fs, totblks);
6268	/*
6269	 * Handle sparse files.  We can't reclaim more blocks than the inode
6270	 * references.  We will correct it later in handle_complete_freeblks()
6271	 * when we know the real count.
6272	 */
6273	if (totblks > datablocks)
6274		return (0);
6275	return (datablocks - totblks);
6276}
6277
6278/*
6279 * Handle freeblocks for journaled softupdate filesystems.
6280 *
6281 * Contrary to normal softupdates, we must preserve the block pointers in
6282 * indirects until their subordinates are free.  This is to avoid journaling
6283 * every block that is freed which may consume more space than the journal
6284 * itself.  The recovery program will see the free block journals at the
6285 * base of the truncated area and traverse them to reclaim space.  The
6286 * pointers in the inode may be cleared immediately after the journal
6287 * records are written because each direct and indirect pointer in the
6288 * inode is recorded in a journal.  This permits full truncation to proceed
6289 * asynchronously.  The write order is journal -> inode -> cgs -> indirects.
6290 *
6291 * The algorithm is as follows:
6292 * 1) Traverse the in-memory state and create journal entries to release
6293 *    the relevant blocks and full indirect trees.
6294 * 2) Traverse the indirect block chain adding partial truncation freework
6295 *    records to indirects in the path to lastlbn.  The freework will
6296 *    prevent new allocation dependencies from being satisfied in this
6297 *    indirect until the truncation completes.
6298 * 3) Read and lock the inode block, performing an update with the new size
6299 *    and pointers.  This prevents truncated data from becoming valid on
6300 *    disk through step 4.
6301 * 4) Reap unsatisfied dependencies that are beyond the truncated area,
6302 *    eliminate journal work for those records that do not require it.
6303 * 5) Schedule the journal records to be written followed by the inode block.
6304 * 6) Allocate any necessary frags for the end of file.
6305 * 7) Zero any partially truncated blocks.
6306 *
6307 * From this truncation proceeds asynchronously using the freework and
6308 * indir_trunc machinery.  The file will not be extended again into a
6309 * partially truncated indirect block until all work is completed but
6310 * the normal dependency mechanism ensures that it is rolled back/forward
6311 * as appropriate.  Further truncation may occur without delay and is
6312 * serialized in indir_trunc().
6313 */
6314void
6315softdep_journal_freeblocks(ip, cred, length, flags)
6316	struct inode *ip;	/* The inode whose length is to be reduced */
6317	struct ucred *cred;
6318	off_t length;		/* The new length for the file */
6319	int flags;		/* IO_EXT and/or IO_NORMAL */
6320{
6321	struct freeblks *freeblks, *fbn;
6322	struct worklist *wk, *wkn;
6323	struct inodedep *inodedep;
6324	struct jblkdep *jblkdep;
6325	struct allocdirect *adp, *adpn;
6326	struct ufsmount *ump;
6327	struct fs *fs;
6328	struct buf *bp;
6329	struct vnode *vp;
6330	struct mount *mp;
6331	ufs2_daddr_t extblocks, datablocks;
6332	ufs_lbn_t tmpval, lbn, lastlbn;
6333	int frags, lastoff, iboff, allocblock, needj, dflags, error, i;
6334
6335	fs = ip->i_fs;
6336	ump = ip->i_ump;
6337	mp = UFSTOVFS(ump);
6338	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
6339	    ("softdep_journal_freeblocks called on non-softdep filesystem"));
6340	vp = ITOV(ip);
6341	needj = 1;
6342	iboff = -1;
6343	allocblock = 0;
6344	extblocks = 0;
6345	datablocks = 0;
6346	frags = 0;
6347	freeblks = newfreeblks(mp, ip);
6348	ACQUIRE_LOCK(&lk);
6349	/*
6350	 * If we're truncating a removed file that will never be written
6351	 * we don't need to journal the block frees.  The canceled journals
6352	 * for the allocations will suffice.
6353	 */
6354	dflags = DEPALLOC;
6355	if (IS_SNAPSHOT(ip))
6356		dflags |= NODELAY;
6357	inodedep_lookup(mp, ip->i_number, dflags, &inodedep);
6358	if ((inodedep->id_state & (UNLINKED | DEPCOMPLETE)) == UNLINKED &&
6359	    length == 0)
6360		needj = 0;
6361	CTR3(KTR_SUJ, "softdep_journal_freeblks: ip %d length %ld needj %d",
6362	    ip->i_number, length, needj);
6363	FREE_LOCK(&lk);
6364	/*
6365	 * Calculate the lbn that we are truncating to.  This results in -1
6366	 * if we're truncating the 0 bytes.  So it is the last lbn we want
6367	 * to keep, not the first lbn we want to truncate.
6368	 */
6369	lastlbn = lblkno(fs, length + fs->fs_bsize - 1) - 1;
6370	lastoff = blkoff(fs, length);
6371	/*
6372	 * Compute frags we are keeping in lastlbn.  0 means all.
6373	 */
6374	if (lastlbn >= 0 && lastlbn < NDADDR) {
6375		frags = fragroundup(fs, lastoff);
6376		/* adp offset of last valid allocdirect. */
6377		iboff = lastlbn;
6378	} else if (lastlbn > 0)
6379		iboff = NDADDR;
6380	if (fs->fs_magic == FS_UFS2_MAGIC)
6381		extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize));
6382	/*
6383	 * Handle normal data blocks and indirects.  This section saves
6384	 * values used after the inode update to complete frag and indirect
6385	 * truncation.
6386	 */
6387	if ((flags & IO_NORMAL) != 0) {
6388		/*
6389		 * Handle truncation of whole direct and indirect blocks.
6390		 */
6391		for (i = iboff + 1; i < NDADDR; i++)
6392			setup_freedirect(freeblks, ip, i, needj);
6393		for (i = 0, tmpval = NINDIR(fs), lbn = NDADDR; i < NIADDR;
6394		    i++, lbn += tmpval, tmpval *= NINDIR(fs)) {
6395			/* Release a whole indirect tree. */
6396			if (lbn > lastlbn) {
6397				setup_freeindir(freeblks, ip, i, -lbn -i,
6398				    needj);
6399				continue;
6400			}
6401			iboff = i + NDADDR;
6402			/*
6403			 * Traverse partially truncated indirect tree.
6404			 */
6405			if (lbn <= lastlbn && lbn + tmpval - 1 > lastlbn)
6406				setup_trunc_indir(freeblks, ip, -lbn - i,
6407				    lastlbn, DIP(ip, i_ib[i]));
6408		}
6409		/*
6410		 * Handle partial truncation to a frag boundary.
6411		 */
6412		if (frags) {
6413			ufs2_daddr_t blkno;
6414			long oldfrags;
6415
6416			oldfrags = blksize(fs, ip, lastlbn);
6417			blkno = DIP(ip, i_db[lastlbn]);
6418			if (blkno && oldfrags != frags) {
6419				oldfrags -= frags;
6420				oldfrags = numfrags(ip->i_fs, oldfrags);
6421				blkno += numfrags(ip->i_fs, frags);
6422				newfreework(ip->i_ump, freeblks, NULL, lastlbn,
6423				    blkno, oldfrags, 0, needj);
6424			} else if (blkno == 0)
6425				allocblock = 1;
6426		}
6427		/*
6428		 * Add a journal record for partial truncate if we are
6429		 * handling indirect blocks.  Non-indirects need no extra
6430		 * journaling.
6431		 */
6432		if (length != 0 && lastlbn >= NDADDR) {
6433			ip->i_flag |= IN_TRUNCATED;
6434			newjtrunc(freeblks, length, 0);
6435		}
6436		ip->i_size = length;
6437		DIP_SET(ip, i_size, ip->i_size);
6438		datablocks = DIP(ip, i_blocks) - extblocks;
6439		if (length != 0)
6440			datablocks = blkcount(ip->i_fs, datablocks, length);
6441		freeblks->fb_len = length;
6442	}
6443	if ((flags & IO_EXT) != 0) {
6444		for (i = 0; i < NXADDR; i++)
6445			setup_freeext(freeblks, ip, i, needj);
6446		ip->i_din2->di_extsize = 0;
6447		datablocks += extblocks;
6448	}
6449#ifdef QUOTA
6450	/* Reference the quotas in case the block count is wrong in the end. */
6451	quotaref(vp, freeblks->fb_quota);
6452	(void) chkdq(ip, -datablocks, NOCRED, 0);
6453#endif
6454	freeblks->fb_chkcnt = -datablocks;
6455	UFS_LOCK(ip->i_ump);
6456	fs->fs_pendingblocks += datablocks;
6457	UFS_UNLOCK(ip->i_ump);
6458	DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks);
6459	/*
6460	 * Handle truncation of incomplete alloc direct dependencies.  We
6461	 * hold the inode block locked to prevent incomplete dependencies
6462	 * from reaching the disk while we are eliminating those that
6463	 * have been truncated.  This is a partially inlined ffs_update().
6464	 */
6465	ufs_itimes(vp);
6466	ip->i_flag &= ~(IN_LAZYACCESS | IN_LAZYMOD | IN_MODIFIED);
6467	error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
6468	    (int)fs->fs_bsize, cred, &bp);
6469	if (error) {
6470		brelse(bp);
6471		softdep_error("softdep_journal_freeblocks", error);
6472		return;
6473	}
6474	if (bp->b_bufsize == fs->fs_bsize)
6475		bp->b_flags |= B_CLUSTEROK;
6476	softdep_update_inodeblock(ip, bp, 0);
6477	if (ip->i_ump->um_fstype == UFS1)
6478		*((struct ufs1_dinode *)bp->b_data +
6479		    ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1;
6480	else
6481		*((struct ufs2_dinode *)bp->b_data +
6482		    ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2;
6483	ACQUIRE_LOCK(&lk);
6484	(void) inodedep_lookup(mp, ip->i_number, dflags, &inodedep);
6485	if ((inodedep->id_state & IOSTARTED) != 0)
6486		panic("softdep_setup_freeblocks: inode busy");
6487	/*
6488	 * Add the freeblks structure to the list of operations that
6489	 * must await the zero'ed inode being written to disk. If we
6490	 * still have a bitmap dependency (needj), then the inode
6491	 * has never been written to disk, so we can process the
6492	 * freeblks below once we have deleted the dependencies.
6493	 */
6494	if (needj)
6495		WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list);
6496	else
6497		freeblks->fb_state |= COMPLETE;
6498	if ((flags & IO_NORMAL) != 0) {
6499		TAILQ_FOREACH_SAFE(adp, &inodedep->id_inoupdt, ad_next, adpn) {
6500			if (adp->ad_offset > iboff)
6501				cancel_allocdirect(&inodedep->id_inoupdt, adp,
6502				    freeblks);
6503			/*
6504			 * Truncate the allocdirect.  We could eliminate
6505			 * or modify journal records as well.
6506			 */
6507			else if (adp->ad_offset == iboff && frags)
6508				adp->ad_newsize = frags;
6509		}
6510	}
6511	if ((flags & IO_EXT) != 0)
6512		while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != 0)
6513			cancel_allocdirect(&inodedep->id_extupdt, adp,
6514			    freeblks);
6515	/*
6516	 * Scan the bufwait list for newblock dependencies that will never
6517	 * make it to disk.
6518	 */
6519	LIST_FOREACH_SAFE(wk, &inodedep->id_bufwait, wk_list, wkn) {
6520		if (wk->wk_type != D_ALLOCDIRECT)
6521			continue;
6522		adp = WK_ALLOCDIRECT(wk);
6523		if (((flags & IO_NORMAL) != 0 && (adp->ad_offset > iboff)) ||
6524		    ((flags & IO_EXT) != 0 && (adp->ad_state & EXTDATA))) {
6525			cancel_jfreeblk(freeblks, adp->ad_newblkno);
6526			cancel_newblk(WK_NEWBLK(wk), NULL, &freeblks->fb_jwork);
6527			WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk);
6528		}
6529	}
6530	/*
6531	 * Add journal work.
6532	 */
6533	LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps)
6534		add_to_journal(&jblkdep->jb_list);
6535	FREE_LOCK(&lk);
6536	bdwrite(bp);
6537	/*
6538	 * Truncate dependency structures beyond length.
6539	 */
6540	trunc_dependencies(ip, freeblks, lastlbn, frags, flags);
6541	/*
6542	 * This is only set when we need to allocate a fragment because
6543	 * none existed at the end of a frag-sized file.  It handles only
6544	 * allocating a new, zero filled block.
6545	 */
6546	if (allocblock) {
6547		ip->i_size = length - lastoff;
6548		DIP_SET(ip, i_size, ip->i_size);
6549		error = UFS_BALLOC(vp, length - 1, 1, cred, BA_CLRBUF, &bp);
6550		if (error != 0) {
6551			softdep_error("softdep_journal_freeblks", error);
6552			return;
6553		}
6554		ip->i_size = length;
6555		DIP_SET(ip, i_size, length);
6556		ip->i_flag |= IN_CHANGE | IN_UPDATE;
6557		allocbuf(bp, frags);
6558		ffs_update(vp, 0);
6559		bawrite(bp);
6560	} else if (lastoff != 0 && vp->v_type != VDIR) {
6561		int size;
6562
6563		/*
6564		 * Zero the end of a truncated frag or block.
6565		 */
6566		size = sblksize(fs, length, lastlbn);
6567		error = bread(vp, lastlbn, size, cred, &bp);
6568		if (error) {
6569			softdep_error("softdep_journal_freeblks", error);
6570			return;
6571		}
6572		bzero((char *)bp->b_data + lastoff, size - lastoff);
6573		bawrite(bp);
6574
6575	}
6576	ACQUIRE_LOCK(&lk);
6577	inodedep_lookup(mp, ip->i_number, dflags, &inodedep);
6578	TAILQ_INSERT_TAIL(&inodedep->id_freeblklst, freeblks, fb_next);
6579	freeblks->fb_state |= DEPCOMPLETE | ONDEPLIST;
6580	/*
6581	 * We zero earlier truncations so they don't erroneously
6582	 * update i_blocks.
6583	 */
6584	if (freeblks->fb_len == 0 && (flags & IO_NORMAL) != 0)
6585		TAILQ_FOREACH(fbn, &inodedep->id_freeblklst, fb_next)
6586			fbn->fb_len = 0;
6587	if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE &&
6588	    LIST_EMPTY(&freeblks->fb_jblkdephd))
6589		freeblks->fb_state |= INPROGRESS;
6590	else
6591		freeblks = NULL;
6592	FREE_LOCK(&lk);
6593	if (freeblks)
6594		handle_workitem_freeblocks(freeblks, 0);
6595	trunc_pages(ip, length, extblocks, flags);
6596
6597}
6598
6599/*
6600 * Flush a JOP_SYNC to the journal.
6601 */
6602void
6603softdep_journal_fsync(ip)
6604	struct inode *ip;
6605{
6606	struct jfsync *jfsync;
6607
6608	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0,
6609	    ("softdep_journal_fsync called on non-softdep filesystem"));
6610	if ((ip->i_flag & IN_TRUNCATED) == 0)
6611		return;
6612	ip->i_flag &= ~IN_TRUNCATED;
6613	jfsync = malloc(sizeof(*jfsync), M_JFSYNC, M_SOFTDEP_FLAGS | M_ZERO);
6614	workitem_alloc(&jfsync->jfs_list, D_JFSYNC, UFSTOVFS(ip->i_ump));
6615	jfsync->jfs_size = ip->i_size;
6616	jfsync->jfs_ino = ip->i_number;
6617	ACQUIRE_LOCK(&lk);
6618	add_to_journal(&jfsync->jfs_list);
6619	jwait(&jfsync->jfs_list, MNT_WAIT);
6620	FREE_LOCK(&lk);
6621}
6622
6623/*
6624 * Block de-allocation dependencies.
6625 *
6626 * When blocks are de-allocated, the on-disk pointers must be nullified before
6627 * the blocks are made available for use by other files.  (The true
6628 * requirement is that old pointers must be nullified before new on-disk
6629 * pointers are set.  We chose this slightly more stringent requirement to
6630 * reduce complexity.) Our implementation handles this dependency by updating
6631 * the inode (or indirect block) appropriately but delaying the actual block
6632 * de-allocation (i.e., freemap and free space count manipulation) until
6633 * after the updated versions reach stable storage.  After the disk is
6634 * updated, the blocks can be safely de-allocated whenever it is convenient.
6635 * This implementation handles only the common case of reducing a file's
6636 * length to zero. Other cases are handled by the conventional synchronous
6637 * write approach.
6638 *
6639 * The ffs implementation with which we worked double-checks
6640 * the state of the block pointers and file size as it reduces
6641 * a file's length.  Some of this code is replicated here in our
6642 * soft updates implementation.  The freeblks->fb_chkcnt field is
6643 * used to transfer a part of this information to the procedure
6644 * that eventually de-allocates the blocks.
6645 *
6646 * This routine should be called from the routine that shortens
6647 * a file's length, before the inode's size or block pointers
6648 * are modified. It will save the block pointer information for
6649 * later release and zero the inode so that the calling routine
6650 * can release it.
6651 */
6652void
6653softdep_setup_freeblocks(ip, length, flags)
6654	struct inode *ip;	/* The inode whose length is to be reduced */
6655	off_t length;		/* The new length for the file */
6656	int flags;		/* IO_EXT and/or IO_NORMAL */
6657{
6658	struct ufs1_dinode *dp1;
6659	struct ufs2_dinode *dp2;
6660	struct freeblks *freeblks;
6661	struct inodedep *inodedep;
6662	struct allocdirect *adp;
6663	struct ufsmount *ump;
6664	struct buf *bp;
6665	struct fs *fs;
6666	ufs2_daddr_t extblocks, datablocks;
6667	struct mount *mp;
6668	int i, delay, error, dflags;
6669	ufs_lbn_t tmpval;
6670	ufs_lbn_t lbn;
6671
6672	ump = ip->i_ump;
6673	mp = UFSTOVFS(ump);
6674	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
6675	    ("softdep_setup_freeblocks called on non-softdep filesystem"));
6676	CTR2(KTR_SUJ, "softdep_setup_freeblks: ip %d length %ld",
6677	    ip->i_number, length);
6678	KASSERT(length == 0, ("softdep_setup_freeblocks: non-zero length"));
6679	fs = ip->i_fs;
6680	freeblks = newfreeblks(mp, ip);
6681	extblocks = 0;
6682	datablocks = 0;
6683	if (fs->fs_magic == FS_UFS2_MAGIC)
6684		extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize));
6685	if ((flags & IO_NORMAL) != 0) {
6686		for (i = 0; i < NDADDR; i++)
6687			setup_freedirect(freeblks, ip, i, 0);
6688		for (i = 0, tmpval = NINDIR(fs), lbn = NDADDR; i < NIADDR;
6689		    i++, lbn += tmpval, tmpval *= NINDIR(fs))
6690			setup_freeindir(freeblks, ip, i, -lbn -i, 0);
6691		ip->i_size = 0;
6692		DIP_SET(ip, i_size, 0);
6693		datablocks = DIP(ip, i_blocks) - extblocks;
6694	}
6695	if ((flags & IO_EXT) != 0) {
6696		for (i = 0; i < NXADDR; i++)
6697			setup_freeext(freeblks, ip, i, 0);
6698		ip->i_din2->di_extsize = 0;
6699		datablocks += extblocks;
6700	}
6701#ifdef QUOTA
6702	/* Reference the quotas in case the block count is wrong in the end. */
6703	quotaref(ITOV(ip), freeblks->fb_quota);
6704	(void) chkdq(ip, -datablocks, NOCRED, 0);
6705#endif
6706	freeblks->fb_chkcnt = -datablocks;
6707	UFS_LOCK(ip->i_ump);
6708	fs->fs_pendingblocks += datablocks;
6709	UFS_UNLOCK(ip->i_ump);
6710	DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks);
6711	/*
6712	 * Push the zero'ed inode to to its disk buffer so that we are free
6713	 * to delete its dependencies below. Once the dependencies are gone
6714	 * the buffer can be safely released.
6715	 */
6716	if ((error = bread(ip->i_devvp,
6717	    fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
6718	    (int)fs->fs_bsize, NOCRED, &bp)) != 0) {
6719		brelse(bp);
6720		softdep_error("softdep_setup_freeblocks", error);
6721	}
6722	if (ip->i_ump->um_fstype == UFS1) {
6723		dp1 = ((struct ufs1_dinode *)bp->b_data +
6724		    ino_to_fsbo(fs, ip->i_number));
6725		ip->i_din1->di_freelink = dp1->di_freelink;
6726		*dp1 = *ip->i_din1;
6727	} else {
6728		dp2 = ((struct ufs2_dinode *)bp->b_data +
6729		    ino_to_fsbo(fs, ip->i_number));
6730		ip->i_din2->di_freelink = dp2->di_freelink;
6731		*dp2 = *ip->i_din2;
6732	}
6733	/*
6734	 * Find and eliminate any inode dependencies.
6735	 */
6736	ACQUIRE_LOCK(&lk);
6737	dflags = DEPALLOC;
6738	if (IS_SNAPSHOT(ip))
6739		dflags |= NODELAY;
6740	(void) inodedep_lookup(mp, ip->i_number, dflags, &inodedep);
6741	if ((inodedep->id_state & IOSTARTED) != 0)
6742		panic("softdep_setup_freeblocks: inode busy");
6743	/*
6744	 * Add the freeblks structure to the list of operations that
6745	 * must await the zero'ed inode being written to disk. If we
6746	 * still have a bitmap dependency (delay == 0), then the inode
6747	 * has never been written to disk, so we can process the
6748	 * freeblks below once we have deleted the dependencies.
6749	 */
6750	delay = (inodedep->id_state & DEPCOMPLETE);
6751	if (delay)
6752		WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list);
6753	else
6754		freeblks->fb_state |= COMPLETE;
6755	/*
6756	 * Because the file length has been truncated to zero, any
6757	 * pending block allocation dependency structures associated
6758	 * with this inode are obsolete and can simply be de-allocated.
6759	 * We must first merge the two dependency lists to get rid of
6760	 * any duplicate freefrag structures, then purge the merged list.
6761	 * If we still have a bitmap dependency, then the inode has never
6762	 * been written to disk, so we can free any fragments without delay.
6763	 */
6764	if (flags & IO_NORMAL) {
6765		merge_inode_lists(&inodedep->id_newinoupdt,
6766		    &inodedep->id_inoupdt);
6767		while ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != 0)
6768			cancel_allocdirect(&inodedep->id_inoupdt, adp,
6769			    freeblks);
6770	}
6771	if (flags & IO_EXT) {
6772		merge_inode_lists(&inodedep->id_newextupdt,
6773		    &inodedep->id_extupdt);
6774		while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != 0)
6775			cancel_allocdirect(&inodedep->id_extupdt, adp,
6776			    freeblks);
6777	}
6778	FREE_LOCK(&lk);
6779	bdwrite(bp);
6780	trunc_dependencies(ip, freeblks, -1, 0, flags);
6781	ACQUIRE_LOCK(&lk);
6782	if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0)
6783		(void) free_inodedep(inodedep);
6784	freeblks->fb_state |= DEPCOMPLETE;
6785	/*
6786	 * If the inode with zeroed block pointers is now on disk
6787	 * we can start freeing blocks.
6788	 */
6789	if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE)
6790		freeblks->fb_state |= INPROGRESS;
6791	else
6792		freeblks = NULL;
6793	FREE_LOCK(&lk);
6794	if (freeblks)
6795		handle_workitem_freeblocks(freeblks, 0);
6796	trunc_pages(ip, length, extblocks, flags);
6797}
6798
6799/*
6800 * Eliminate pages from the page cache that back parts of this inode and
6801 * adjust the vnode pager's idea of our size.  This prevents stale data
6802 * from hanging around in the page cache.
6803 */
6804static void
6805trunc_pages(ip, length, extblocks, flags)
6806	struct inode *ip;
6807	off_t length;
6808	ufs2_daddr_t extblocks;
6809	int flags;
6810{
6811	struct vnode *vp;
6812	struct fs *fs;
6813	ufs_lbn_t lbn;
6814	off_t end, extend;
6815
6816	vp = ITOV(ip);
6817	fs = ip->i_fs;
6818	extend = OFF_TO_IDX(lblktosize(fs, -extblocks));
6819	if ((flags & IO_EXT) != 0)
6820		vn_pages_remove(vp, extend, 0);
6821	if ((flags & IO_NORMAL) == 0)
6822		return;
6823	BO_LOCK(&vp->v_bufobj);
6824	drain_output(vp);
6825	BO_UNLOCK(&vp->v_bufobj);
6826	/*
6827	 * The vnode pager eliminates file pages we eliminate indirects
6828	 * below.
6829	 */
6830	vnode_pager_setsize(vp, length);
6831	/*
6832	 * Calculate the end based on the last indirect we want to keep.  If
6833	 * the block extends into indirects we can just use the negative of
6834	 * its lbn.  Doubles and triples exist at lower numbers so we must
6835	 * be careful not to remove those, if they exist.  double and triple
6836	 * indirect lbns do not overlap with others so it is not important
6837	 * to verify how many levels are required.
6838	 */
6839	lbn = lblkno(fs, length);
6840	if (lbn >= NDADDR) {
6841		/* Calculate the virtual lbn of the triple indirect. */
6842		lbn = -lbn - (NIADDR - 1);
6843		end = OFF_TO_IDX(lblktosize(fs, lbn));
6844	} else
6845		end = extend;
6846	vn_pages_remove(vp, OFF_TO_IDX(OFF_MAX), end);
6847}
6848
6849/*
6850 * See if the buf bp is in the range eliminated by truncation.
6851 */
6852static int
6853trunc_check_buf(bp, blkoffp, lastlbn, lastoff, flags)
6854	struct buf *bp;
6855	int *blkoffp;
6856	ufs_lbn_t lastlbn;
6857	int lastoff;
6858	int flags;
6859{
6860	ufs_lbn_t lbn;
6861
6862	*blkoffp = 0;
6863	/* Only match ext/normal blocks as appropriate. */
6864	if (((flags & IO_EXT) == 0 && (bp->b_xflags & BX_ALTDATA)) ||
6865	    ((flags & IO_NORMAL) == 0 && (bp->b_xflags & BX_ALTDATA) == 0))
6866		return (0);
6867	/* ALTDATA is always a full truncation. */
6868	if ((bp->b_xflags & BX_ALTDATA) != 0)
6869		return (1);
6870	/* -1 is full truncation. */
6871	if (lastlbn == -1)
6872		return (1);
6873	/*
6874	 * If this is a partial truncate we only want those
6875	 * blocks and indirect blocks that cover the range
6876	 * we're after.
6877	 */
6878	lbn = bp->b_lblkno;
6879	if (lbn < 0)
6880		lbn = -(lbn + lbn_level(lbn));
6881	if (lbn < lastlbn)
6882		return (0);
6883	/* Here we only truncate lblkno if it's partial. */
6884	if (lbn == lastlbn) {
6885		if (lastoff == 0)
6886			return (0);
6887		*blkoffp = lastoff;
6888	}
6889	return (1);
6890}
6891
6892/*
6893 * Eliminate any dependencies that exist in memory beyond lblkno:off
6894 */
6895static void
6896trunc_dependencies(ip, freeblks, lastlbn, lastoff, flags)
6897	struct inode *ip;
6898	struct freeblks *freeblks;
6899	ufs_lbn_t lastlbn;
6900	int lastoff;
6901	int flags;
6902{
6903	struct bufobj *bo;
6904	struct vnode *vp;
6905	struct buf *bp;
6906	struct fs *fs;
6907	int blkoff;
6908
6909	/*
6910	 * We must wait for any I/O in progress to finish so that
6911	 * all potential buffers on the dirty list will be visible.
6912	 * Once they are all there, walk the list and get rid of
6913	 * any dependencies.
6914	 */
6915	fs = ip->i_fs;
6916	vp = ITOV(ip);
6917	bo = &vp->v_bufobj;
6918	BO_LOCK(bo);
6919	drain_output(vp);
6920	TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
6921		bp->b_vflags &= ~BV_SCANNED;
6922restart:
6923	TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) {
6924		if (bp->b_vflags & BV_SCANNED)
6925			continue;
6926		if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) {
6927			bp->b_vflags |= BV_SCANNED;
6928			continue;
6929		}
6930		if ((bp = getdirtybuf(bp, BO_LOCKPTR(bo), MNT_WAIT)) == NULL)
6931			goto restart;
6932		BO_UNLOCK(bo);
6933		if (deallocate_dependencies(bp, freeblks, blkoff))
6934			bqrelse(bp);
6935		else
6936			brelse(bp);
6937		BO_LOCK(bo);
6938		goto restart;
6939	}
6940	/*
6941	 * Now do the work of vtruncbuf while also matching indirect blocks.
6942	 */
6943	TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs)
6944		bp->b_vflags &= ~BV_SCANNED;
6945cleanrestart:
6946	TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs) {
6947		if (bp->b_vflags & BV_SCANNED)
6948			continue;
6949		if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) {
6950			bp->b_vflags |= BV_SCANNED;
6951			continue;
6952		}
6953		if (BUF_LOCK(bp,
6954		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
6955		    BO_LOCKPTR(bo)) == ENOLCK) {
6956			BO_LOCK(bo);
6957			goto cleanrestart;
6958		}
6959		bp->b_vflags |= BV_SCANNED;
6960		bremfree(bp);
6961		if (blkoff != 0) {
6962			allocbuf(bp, blkoff);
6963			bqrelse(bp);
6964		} else {
6965			bp->b_flags |= B_INVAL | B_NOCACHE | B_RELBUF;
6966			brelse(bp);
6967		}
6968		BO_LOCK(bo);
6969		goto cleanrestart;
6970	}
6971	drain_output(vp);
6972	BO_UNLOCK(bo);
6973}
6974
6975static int
6976cancel_pagedep(pagedep, freeblks, blkoff)
6977	struct pagedep *pagedep;
6978	struct freeblks *freeblks;
6979	int blkoff;
6980{
6981	struct jremref *jremref;
6982	struct jmvref *jmvref;
6983	struct dirrem *dirrem, *tmp;
6984	int i;
6985
6986	/*
6987	 * Copy any directory remove dependencies to the list
6988	 * to be processed after the freeblks proceeds.  If
6989	 * directory entry never made it to disk they
6990	 * can be dumped directly onto the work list.
6991	 */
6992	LIST_FOREACH_SAFE(dirrem, &pagedep->pd_dirremhd, dm_next, tmp) {
6993		/* Skip this directory removal if it is intended to remain. */
6994		if (dirrem->dm_offset < blkoff)
6995			continue;
6996		/*
6997		 * If there are any dirrems we wait for the journal write
6998		 * to complete and then restart the buf scan as the lock
6999		 * has been dropped.
7000		 */
7001		while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL) {
7002			jwait(&jremref->jr_list, MNT_WAIT);
7003			return (ERESTART);
7004		}
7005		LIST_REMOVE(dirrem, dm_next);
7006		dirrem->dm_dirinum = pagedep->pd_ino;
7007		WORKLIST_INSERT(&freeblks->fb_freeworkhd, &dirrem->dm_list);
7008	}
7009	while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL) {
7010		jwait(&jmvref->jm_list, MNT_WAIT);
7011		return (ERESTART);
7012	}
7013	/*
7014	 * When we're partially truncating a pagedep we just want to flush
7015	 * journal entries and return.  There can not be any adds in the
7016	 * truncated portion of the directory and newblk must remain if
7017	 * part of the block remains.
7018	 */
7019	if (blkoff != 0) {
7020		struct diradd *dap;
7021
7022		LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist)
7023			if (dap->da_offset > blkoff)
7024				panic("cancel_pagedep: diradd %p off %d > %d",
7025				    dap, dap->da_offset, blkoff);
7026		for (i = 0; i < DAHASHSZ; i++)
7027			LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist)
7028				if (dap->da_offset > blkoff)
7029					panic("cancel_pagedep: diradd %p off %d > %d",
7030					    dap, dap->da_offset, blkoff);
7031		return (0);
7032	}
7033	/*
7034	 * There should be no directory add dependencies present
7035	 * as the directory could not be truncated until all
7036	 * children were removed.
7037	 */
7038	KASSERT(LIST_FIRST(&pagedep->pd_pendinghd) == NULL,
7039	    ("deallocate_dependencies: pendinghd != NULL"));
7040	for (i = 0; i < DAHASHSZ; i++)
7041		KASSERT(LIST_FIRST(&pagedep->pd_diraddhd[i]) == NULL,
7042		    ("deallocate_dependencies: diraddhd != NULL"));
7043	if ((pagedep->pd_state & NEWBLOCK) != 0)
7044		free_newdirblk(pagedep->pd_newdirblk);
7045	if (free_pagedep(pagedep) == 0)
7046		panic("Failed to free pagedep %p", pagedep);
7047	return (0);
7048}
7049
7050/*
7051 * Reclaim any dependency structures from a buffer that is about to
7052 * be reallocated to a new vnode. The buffer must be locked, thus,
7053 * no I/O completion operations can occur while we are manipulating
7054 * its associated dependencies. The mutex is held so that other I/O's
7055 * associated with related dependencies do not occur.
7056 */
7057static int
7058deallocate_dependencies(bp, freeblks, off)
7059	struct buf *bp;
7060	struct freeblks *freeblks;
7061	int off;
7062{
7063	struct indirdep *indirdep;
7064	struct pagedep *pagedep;
7065	struct allocdirect *adp;
7066	struct worklist *wk, *wkn;
7067
7068	ACQUIRE_LOCK(&lk);
7069	LIST_FOREACH_SAFE(wk, &bp->b_dep, wk_list, wkn) {
7070		switch (wk->wk_type) {
7071		case D_INDIRDEP:
7072			indirdep = WK_INDIRDEP(wk);
7073			if (bp->b_lblkno >= 0 ||
7074			    bp->b_blkno != indirdep->ir_savebp->b_lblkno)
7075				panic("deallocate_dependencies: not indir");
7076			cancel_indirdep(indirdep, bp, freeblks);
7077			continue;
7078
7079		case D_PAGEDEP:
7080			pagedep = WK_PAGEDEP(wk);
7081			if (cancel_pagedep(pagedep, freeblks, off)) {
7082				FREE_LOCK(&lk);
7083				return (ERESTART);
7084			}
7085			continue;
7086
7087		case D_ALLOCINDIR:
7088			/*
7089			 * Simply remove the allocindir, we'll find it via
7090			 * the indirdep where we can clear pointers if
7091			 * needed.
7092			 */
7093			WORKLIST_REMOVE(wk);
7094			continue;
7095
7096		case D_FREEWORK:
7097			/*
7098			 * A truncation is waiting for the zero'd pointers
7099			 * to be written.  It can be freed when the freeblks
7100			 * is journaled.
7101			 */
7102			WORKLIST_REMOVE(wk);
7103			wk->wk_state |= ONDEPLIST;
7104			WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk);
7105			break;
7106
7107		case D_ALLOCDIRECT:
7108			adp = WK_ALLOCDIRECT(wk);
7109			if (off != 0)
7110				continue;
7111			/* FALLTHROUGH */
7112		default:
7113			panic("deallocate_dependencies: Unexpected type %s",
7114			    TYPENAME(wk->wk_type));
7115			/* NOTREACHED */
7116		}
7117	}
7118	FREE_LOCK(&lk);
7119	/*
7120	 * Don't throw away this buf, we were partially truncating and
7121	 * some deps may always remain.
7122	 */
7123	if (off) {
7124		allocbuf(bp, off);
7125		bp->b_vflags |= BV_SCANNED;
7126		return (EBUSY);
7127	}
7128	bp->b_flags |= B_INVAL | B_NOCACHE;
7129
7130	return (0);
7131}
7132
7133/*
7134 * An allocdirect is being canceled due to a truncate.  We must make sure
7135 * the journal entry is released in concert with the blkfree that releases
7136 * the storage.  Completed journal entries must not be released until the
7137 * space is no longer pointed to by the inode or in the bitmap.
7138 */
7139static void
7140cancel_allocdirect(adphead, adp, freeblks)
7141	struct allocdirectlst *adphead;
7142	struct allocdirect *adp;
7143	struct freeblks *freeblks;
7144{
7145	struct freework *freework;
7146	struct newblk *newblk;
7147	struct worklist *wk;
7148
7149	TAILQ_REMOVE(adphead, adp, ad_next);
7150	newblk = (struct newblk *)adp;
7151	freework = NULL;
7152	/*
7153	 * Find the correct freework structure.
7154	 */
7155	LIST_FOREACH(wk, &freeblks->fb_freeworkhd, wk_list) {
7156		if (wk->wk_type != D_FREEWORK)
7157			continue;
7158		freework = WK_FREEWORK(wk);
7159		if (freework->fw_blkno == newblk->nb_newblkno)
7160			break;
7161	}
7162	if (freework == NULL)
7163		panic("cancel_allocdirect: Freework not found");
7164	/*
7165	 * If a newblk exists at all we still have the journal entry that
7166	 * initiated the allocation so we do not need to journal the free.
7167	 */
7168	cancel_jfreeblk(freeblks, freework->fw_blkno);
7169	/*
7170	 * If the journal hasn't been written the jnewblk must be passed
7171	 * to the call to ffs_blkfree that reclaims the space.  We accomplish
7172	 * this by linking the journal dependency into the freework to be
7173	 * freed when freework_freeblock() is called.  If the journal has
7174	 * been written we can simply reclaim the journal space when the
7175	 * freeblks work is complete.
7176	 */
7177	freework->fw_jnewblk = cancel_newblk(newblk, &freework->fw_list,
7178	    &freeblks->fb_jwork);
7179	WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list);
7180}
7181
7182
7183/*
7184 * Cancel a new block allocation.  May be an indirect or direct block.  We
7185 * remove it from various lists and return any journal record that needs to
7186 * be resolved by the caller.
7187 *
7188 * A special consideration is made for indirects which were never pointed
7189 * at on disk and will never be found once this block is released.
7190 */
7191static struct jnewblk *
7192cancel_newblk(newblk, wk, wkhd)
7193	struct newblk *newblk;
7194	struct worklist *wk;
7195	struct workhead *wkhd;
7196{
7197	struct jnewblk *jnewblk;
7198
7199	CTR1(KTR_SUJ, "cancel_newblk: blkno %jd", newblk->nb_newblkno);
7200
7201	newblk->nb_state |= GOINGAWAY;
7202	/*
7203	 * Previously we traversed the completedhd on each indirdep
7204	 * attached to this newblk to cancel them and gather journal
7205	 * work.  Since we need only the oldest journal segment and
7206	 * the lowest point on the tree will always have the oldest
7207	 * journal segment we are free to release the segments
7208	 * of any subordinates and may leave the indirdep list to
7209	 * indirdep_complete() when this newblk is freed.
7210	 */
7211	if (newblk->nb_state & ONDEPLIST) {
7212		newblk->nb_state &= ~ONDEPLIST;
7213		LIST_REMOVE(newblk, nb_deps);
7214	}
7215	if (newblk->nb_state & ONWORKLIST)
7216		WORKLIST_REMOVE(&newblk->nb_list);
7217	/*
7218	 * If the journal entry hasn't been written we save a pointer to
7219	 * the dependency that frees it until it is written or the
7220	 * superseding operation completes.
7221	 */
7222	jnewblk = newblk->nb_jnewblk;
7223	if (jnewblk != NULL && wk != NULL) {
7224		newblk->nb_jnewblk = NULL;
7225		jnewblk->jn_dep = wk;
7226	}
7227	if (!LIST_EMPTY(&newblk->nb_jwork))
7228		jwork_move(wkhd, &newblk->nb_jwork);
7229	/*
7230	 * When truncating we must free the newdirblk early to remove
7231	 * the pagedep from the hash before returning.
7232	 */
7233	if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL)
7234		free_newdirblk(WK_NEWDIRBLK(wk));
7235	if (!LIST_EMPTY(&newblk->nb_newdirblk))
7236		panic("cancel_newblk: extra newdirblk");
7237
7238	return (jnewblk);
7239}
7240
7241/*
7242 * Schedule the freefrag associated with a newblk to be released once
7243 * the pointers are written and the previous block is no longer needed.
7244 */
7245static void
7246newblk_freefrag(newblk)
7247	struct newblk *newblk;
7248{
7249	struct freefrag *freefrag;
7250
7251	if (newblk->nb_freefrag == NULL)
7252		return;
7253	freefrag = newblk->nb_freefrag;
7254	newblk->nb_freefrag = NULL;
7255	freefrag->ff_state |= COMPLETE;
7256	if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE)
7257		add_to_worklist(&freefrag->ff_list, 0);
7258}
7259
7260/*
7261 * Free a newblk. Generate a new freefrag work request if appropriate.
7262 * This must be called after the inode pointer and any direct block pointers
7263 * are valid or fully removed via truncate or frag extension.
7264 */
7265static void
7266free_newblk(newblk)
7267	struct newblk *newblk;
7268{
7269	struct indirdep *indirdep;
7270	struct worklist *wk;
7271
7272	KASSERT(newblk->nb_jnewblk == NULL,
7273	    ("free_newblk: jnewblk %p still attached", newblk->nb_jnewblk));
7274	KASSERT(newblk->nb_list.wk_type != D_NEWBLK,
7275	    ("free_newblk: unclaimed newblk"));
7276	rw_assert(&lk, RA_WLOCKED);
7277	newblk_freefrag(newblk);
7278	if (newblk->nb_state & ONDEPLIST)
7279		LIST_REMOVE(newblk, nb_deps);
7280	if (newblk->nb_state & ONWORKLIST)
7281		WORKLIST_REMOVE(&newblk->nb_list);
7282	LIST_REMOVE(newblk, nb_hash);
7283	if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL)
7284		free_newdirblk(WK_NEWDIRBLK(wk));
7285	if (!LIST_EMPTY(&newblk->nb_newdirblk))
7286		panic("free_newblk: extra newdirblk");
7287	while ((indirdep = LIST_FIRST(&newblk->nb_indirdeps)) != NULL)
7288		indirdep_complete(indirdep);
7289	handle_jwork(&newblk->nb_jwork);
7290	WORKITEM_FREE(newblk, D_NEWBLK);
7291}
7292
7293/*
7294 * Free a newdirblk. Clear the NEWBLOCK flag on its associated pagedep.
7295 * This routine must be called with splbio interrupts blocked.
7296 */
7297static void
7298free_newdirblk(newdirblk)
7299	struct newdirblk *newdirblk;
7300{
7301	struct pagedep *pagedep;
7302	struct diradd *dap;
7303	struct worklist *wk;
7304
7305	rw_assert(&lk, RA_WLOCKED);
7306	WORKLIST_REMOVE(&newdirblk->db_list);
7307	/*
7308	 * If the pagedep is still linked onto the directory buffer
7309	 * dependency chain, then some of the entries on the
7310	 * pd_pendinghd list may not be committed to disk yet. In
7311	 * this case, we will simply clear the NEWBLOCK flag and
7312	 * let the pd_pendinghd list be processed when the pagedep
7313	 * is next written. If the pagedep is no longer on the buffer
7314	 * dependency chain, then all the entries on the pd_pending
7315	 * list are committed to disk and we can free them here.
7316	 */
7317	pagedep = newdirblk->db_pagedep;
7318	pagedep->pd_state &= ~NEWBLOCK;
7319	if ((pagedep->pd_state & ONWORKLIST) == 0) {
7320		while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL)
7321			free_diradd(dap, NULL);
7322		/*
7323		 * If no dependencies remain, the pagedep will be freed.
7324		 */
7325		free_pagedep(pagedep);
7326	}
7327	/* Should only ever be one item in the list. */
7328	while ((wk = LIST_FIRST(&newdirblk->db_mkdir)) != NULL) {
7329		WORKLIST_REMOVE(wk);
7330		handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY);
7331	}
7332	WORKITEM_FREE(newdirblk, D_NEWDIRBLK);
7333}
7334
7335/*
7336 * Prepare an inode to be freed. The actual free operation is not
7337 * done until the zero'ed inode has been written to disk.
7338 */
7339void
7340softdep_freefile(pvp, ino, mode)
7341	struct vnode *pvp;
7342	ino_t ino;
7343	int mode;
7344{
7345	struct inode *ip = VTOI(pvp);
7346	struct inodedep *inodedep;
7347	struct freefile *freefile;
7348	struct freeblks *freeblks;
7349	struct ufsmount *ump;
7350
7351	ump = ip->i_ump;
7352	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
7353	    ("softdep_freefile called on non-softdep filesystem"));
7354	/*
7355	 * This sets up the inode de-allocation dependency.
7356	 */
7357	freefile = malloc(sizeof(struct freefile),
7358		M_FREEFILE, M_SOFTDEP_FLAGS);
7359	workitem_alloc(&freefile->fx_list, D_FREEFILE, pvp->v_mount);
7360	freefile->fx_mode = mode;
7361	freefile->fx_oldinum = ino;
7362	freefile->fx_devvp = ip->i_devvp;
7363	LIST_INIT(&freefile->fx_jwork);
7364	UFS_LOCK(ump);
7365	ip->i_fs->fs_pendinginodes += 1;
7366	UFS_UNLOCK(ump);
7367
7368	/*
7369	 * If the inodedep does not exist, then the zero'ed inode has
7370	 * been written to disk. If the allocated inode has never been
7371	 * written to disk, then the on-disk inode is zero'ed. In either
7372	 * case we can free the file immediately.  If the journal was
7373	 * canceled before being written the inode will never make it to
7374	 * disk and we must send the canceled journal entrys to
7375	 * ffs_freefile() to be cleared in conjunction with the bitmap.
7376	 * Any blocks waiting on the inode to write can be safely freed
7377	 * here as it will never been written.
7378	 */
7379	ACQUIRE_LOCK(&lk);
7380	inodedep_lookup(pvp->v_mount, ino, 0, &inodedep);
7381	if (inodedep) {
7382		/*
7383		 * Clear out freeblks that no longer need to reference
7384		 * this inode.
7385		 */
7386		while ((freeblks =
7387		    TAILQ_FIRST(&inodedep->id_freeblklst)) != NULL) {
7388			TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks,
7389			    fb_next);
7390			freeblks->fb_state &= ~ONDEPLIST;
7391		}
7392		/*
7393		 * Remove this inode from the unlinked list.
7394		 */
7395		if (inodedep->id_state & UNLINKED) {
7396			/*
7397			 * Save the journal work to be freed with the bitmap
7398			 * before we clear UNLINKED.  Otherwise it can be lost
7399			 * if the inode block is written.
7400			 */
7401			handle_bufwait(inodedep, &freefile->fx_jwork);
7402			clear_unlinked_inodedep(inodedep);
7403			/* Re-acquire inodedep as we've dropped lk. */
7404			inodedep_lookup(pvp->v_mount, ino, 0, &inodedep);
7405		}
7406	}
7407	if (inodedep == NULL || check_inode_unwritten(inodedep)) {
7408		FREE_LOCK(&lk);
7409		handle_workitem_freefile(freefile);
7410		return;
7411	}
7412	if ((inodedep->id_state & DEPCOMPLETE) == 0)
7413		inodedep->id_state |= GOINGAWAY;
7414	WORKLIST_INSERT(&inodedep->id_inowait, &freefile->fx_list);
7415	FREE_LOCK(&lk);
7416	if (ip->i_number == ino)
7417		ip->i_flag |= IN_MODIFIED;
7418}
7419
7420/*
7421 * Check to see if an inode has never been written to disk. If
7422 * so free the inodedep and return success, otherwise return failure.
7423 * This routine must be called with splbio interrupts blocked.
7424 *
7425 * If we still have a bitmap dependency, then the inode has never
7426 * been written to disk. Drop the dependency as it is no longer
7427 * necessary since the inode is being deallocated. We set the
7428 * ALLCOMPLETE flags since the bitmap now properly shows that the
7429 * inode is not allocated. Even if the inode is actively being
7430 * written, it has been rolled back to its zero'ed state, so we
7431 * are ensured that a zero inode is what is on the disk. For short
7432 * lived files, this change will usually result in removing all the
7433 * dependencies from the inode so that it can be freed immediately.
7434 */
7435static int
7436check_inode_unwritten(inodedep)
7437	struct inodedep *inodedep;
7438{
7439
7440	rw_assert(&lk, RA_WLOCKED);
7441
7442	if ((inodedep->id_state & (DEPCOMPLETE | UNLINKED)) != 0 ||
7443	    !LIST_EMPTY(&inodedep->id_dirremhd) ||
7444	    !LIST_EMPTY(&inodedep->id_pendinghd) ||
7445	    !LIST_EMPTY(&inodedep->id_bufwait) ||
7446	    !LIST_EMPTY(&inodedep->id_inowait) ||
7447	    !TAILQ_EMPTY(&inodedep->id_inoreflst) ||
7448	    !TAILQ_EMPTY(&inodedep->id_inoupdt) ||
7449	    !TAILQ_EMPTY(&inodedep->id_newinoupdt) ||
7450	    !TAILQ_EMPTY(&inodedep->id_extupdt) ||
7451	    !TAILQ_EMPTY(&inodedep->id_newextupdt) ||
7452	    !TAILQ_EMPTY(&inodedep->id_freeblklst) ||
7453	    inodedep->id_mkdiradd != NULL ||
7454	    inodedep->id_nlinkdelta != 0)
7455		return (0);
7456	/*
7457	 * Another process might be in initiate_write_inodeblock_ufs[12]
7458	 * trying to allocate memory without holding "Softdep Lock".
7459	 */
7460	if ((inodedep->id_state & IOSTARTED) != 0 &&
7461	    inodedep->id_savedino1 == NULL)
7462		return (0);
7463
7464	if (inodedep->id_state & ONDEPLIST)
7465		LIST_REMOVE(inodedep, id_deps);
7466	inodedep->id_state &= ~ONDEPLIST;
7467	inodedep->id_state |= ALLCOMPLETE;
7468	inodedep->id_bmsafemap = NULL;
7469	if (inodedep->id_state & ONWORKLIST)
7470		WORKLIST_REMOVE(&inodedep->id_list);
7471	if (inodedep->id_savedino1 != NULL) {
7472		free(inodedep->id_savedino1, M_SAVEDINO);
7473		inodedep->id_savedino1 = NULL;
7474	}
7475	if (free_inodedep(inodedep) == 0)
7476		panic("check_inode_unwritten: busy inode");
7477	return (1);
7478}
7479
7480/*
7481 * Try to free an inodedep structure. Return 1 if it could be freed.
7482 */
7483static int
7484free_inodedep(inodedep)
7485	struct inodedep *inodedep;
7486{
7487
7488	rw_assert(&lk, RA_WLOCKED);
7489	if ((inodedep->id_state & (ONWORKLIST | UNLINKED)) != 0 ||
7490	    (inodedep->id_state & ALLCOMPLETE) != ALLCOMPLETE ||
7491	    !LIST_EMPTY(&inodedep->id_dirremhd) ||
7492	    !LIST_EMPTY(&inodedep->id_pendinghd) ||
7493	    !LIST_EMPTY(&inodedep->id_bufwait) ||
7494	    !LIST_EMPTY(&inodedep->id_inowait) ||
7495	    !TAILQ_EMPTY(&inodedep->id_inoreflst) ||
7496	    !TAILQ_EMPTY(&inodedep->id_inoupdt) ||
7497	    !TAILQ_EMPTY(&inodedep->id_newinoupdt) ||
7498	    !TAILQ_EMPTY(&inodedep->id_extupdt) ||
7499	    !TAILQ_EMPTY(&inodedep->id_newextupdt) ||
7500	    !TAILQ_EMPTY(&inodedep->id_freeblklst) ||
7501	    inodedep->id_mkdiradd != NULL ||
7502	    inodedep->id_nlinkdelta != 0 ||
7503	    inodedep->id_savedino1 != NULL)
7504		return (0);
7505	if (inodedep->id_state & ONDEPLIST)
7506		LIST_REMOVE(inodedep, id_deps);
7507	LIST_REMOVE(inodedep, id_hash);
7508	WORKITEM_FREE(inodedep, D_INODEDEP);
7509	return (1);
7510}
7511
7512/*
7513 * Free the block referenced by a freework structure.  The parent freeblks
7514 * structure is released and completed when the final cg bitmap reaches
7515 * the disk.  This routine may be freeing a jnewblk which never made it to
7516 * disk in which case we do not have to wait as the operation is undone
7517 * in memory immediately.
7518 */
7519static void
7520freework_freeblock(freework)
7521	struct freework *freework;
7522{
7523	struct freeblks *freeblks;
7524	struct jnewblk *jnewblk;
7525	struct ufsmount *ump;
7526	struct workhead wkhd;
7527	struct fs *fs;
7528	int bsize;
7529	int needj;
7530
7531	rw_assert(&lk, RA_WLOCKED);
7532	/*
7533	 * Handle partial truncate separately.
7534	 */
7535	if (freework->fw_indir) {
7536		complete_trunc_indir(freework);
7537		return;
7538	}
7539	freeblks = freework->fw_freeblks;
7540	ump = VFSTOUFS(freeblks->fb_list.wk_mp);
7541	fs = ump->um_fs;
7542	needj = MOUNTEDSUJ(freeblks->fb_list.wk_mp) != 0;
7543	bsize = lfragtosize(fs, freework->fw_frags);
7544	LIST_INIT(&wkhd);
7545	/*
7546	 * DEPCOMPLETE is cleared in indirblk_insert() if the block lives
7547	 * on the indirblk hashtable and prevents premature freeing.
7548	 */
7549	freework->fw_state |= DEPCOMPLETE;
7550	/*
7551	 * SUJ needs to wait for the segment referencing freed indirect
7552	 * blocks to expire so that we know the checker will not confuse
7553	 * a re-allocated indirect block with its old contents.
7554	 */
7555	if (needj && freework->fw_lbn <= -NDADDR)
7556		indirblk_insert(freework);
7557	/*
7558	 * If we are canceling an existing jnewblk pass it to the free
7559	 * routine, otherwise pass the freeblk which will ultimately
7560	 * release the freeblks.  If we're not journaling, we can just
7561	 * free the freeblks immediately.
7562	 */
7563	jnewblk = freework->fw_jnewblk;
7564	if (jnewblk != NULL) {
7565		cancel_jnewblk(jnewblk, &wkhd);
7566		needj = 0;
7567	} else if (needj) {
7568		freework->fw_state |= DELAYEDFREE;
7569		freeblks->fb_cgwait++;
7570		WORKLIST_INSERT(&wkhd, &freework->fw_list);
7571	}
7572	FREE_LOCK(&lk);
7573	freeblks_free(ump, freeblks, btodb(bsize));
7574	CTR4(KTR_SUJ,
7575	    "freework_freeblock: ino %d blkno %jd lbn %jd size %ld",
7576	    freeblks->fb_inum, freework->fw_blkno, freework->fw_lbn, bsize);
7577	ffs_blkfree(ump, fs, freeblks->fb_devvp, freework->fw_blkno, bsize,
7578	    freeblks->fb_inum, freeblks->fb_vtype, &wkhd);
7579	ACQUIRE_LOCK(&lk);
7580	/*
7581	 * The jnewblk will be discarded and the bits in the map never
7582	 * made it to disk.  We can immediately free the freeblk.
7583	 */
7584	if (needj == 0)
7585		handle_written_freework(freework);
7586}
7587
7588/*
7589 * We enqueue freework items that need processing back on the freeblks and
7590 * add the freeblks to the worklist.  This makes it easier to find all work
7591 * required to flush a truncation in process_truncates().
7592 */
7593static void
7594freework_enqueue(freework)
7595	struct freework *freework;
7596{
7597	struct freeblks *freeblks;
7598
7599	freeblks = freework->fw_freeblks;
7600	if ((freework->fw_state & INPROGRESS) == 0)
7601		WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list);
7602	if ((freeblks->fb_state &
7603	    (ONWORKLIST | INPROGRESS | ALLCOMPLETE)) == ALLCOMPLETE &&
7604	    LIST_EMPTY(&freeblks->fb_jblkdephd))
7605		add_to_worklist(&freeblks->fb_list, WK_NODELAY);
7606}
7607
7608/*
7609 * Start, continue, or finish the process of freeing an indirect block tree.
7610 * The free operation may be paused at any point with fw_off containing the
7611 * offset to restart from.  This enables us to implement some flow control
7612 * for large truncates which may fan out and generate a huge number of
7613 * dependencies.
7614 */
7615static void
7616handle_workitem_indirblk(freework)
7617	struct freework *freework;
7618{
7619	struct freeblks *freeblks;
7620	struct ufsmount *ump;
7621	struct fs *fs;
7622
7623	freeblks = freework->fw_freeblks;
7624	ump = VFSTOUFS(freeblks->fb_list.wk_mp);
7625	fs = ump->um_fs;
7626	if (freework->fw_state & DEPCOMPLETE) {
7627		handle_written_freework(freework);
7628		return;
7629	}
7630	if (freework->fw_off == NINDIR(fs)) {
7631		freework_freeblock(freework);
7632		return;
7633	}
7634	freework->fw_state |= INPROGRESS;
7635	FREE_LOCK(&lk);
7636	indir_trunc(freework, fsbtodb(fs, freework->fw_blkno),
7637	    freework->fw_lbn);
7638	ACQUIRE_LOCK(&lk);
7639}
7640
7641/*
7642 * Called when a freework structure attached to a cg buf is written.  The
7643 * ref on either the parent or the freeblks structure is released and
7644 * the freeblks is added back to the worklist if there is more work to do.
7645 */
7646static void
7647handle_written_freework(freework)
7648	struct freework *freework;
7649{
7650	struct freeblks *freeblks;
7651	struct freework *parent;
7652
7653	freeblks = freework->fw_freeblks;
7654	parent = freework->fw_parent;
7655	if (freework->fw_state & DELAYEDFREE)
7656		freeblks->fb_cgwait--;
7657	freework->fw_state |= COMPLETE;
7658	if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE)
7659		WORKITEM_FREE(freework, D_FREEWORK);
7660	if (parent) {
7661		if (--parent->fw_ref == 0)
7662			freework_enqueue(parent);
7663		return;
7664	}
7665	if (--freeblks->fb_ref != 0)
7666		return;
7667	if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST | INPROGRESS)) ==
7668	    ALLCOMPLETE && LIST_EMPTY(&freeblks->fb_jblkdephd))
7669		add_to_worklist(&freeblks->fb_list, WK_NODELAY);
7670}
7671
7672/*
7673 * This workitem routine performs the block de-allocation.
7674 * The workitem is added to the pending list after the updated
7675 * inode block has been written to disk.  As mentioned above,
7676 * checks regarding the number of blocks de-allocated (compared
7677 * to the number of blocks allocated for the file) are also
7678 * performed in this function.
7679 */
7680static int
7681handle_workitem_freeblocks(freeblks, flags)
7682	struct freeblks *freeblks;
7683	int flags;
7684{
7685	struct freework *freework;
7686	struct newblk *newblk;
7687	struct allocindir *aip;
7688	struct ufsmount *ump;
7689	struct worklist *wk;
7690
7691	KASSERT(LIST_EMPTY(&freeblks->fb_jblkdephd),
7692	    ("handle_workitem_freeblocks: Journal entries not written."));
7693	ump = VFSTOUFS(freeblks->fb_list.wk_mp);
7694	ACQUIRE_LOCK(&lk);
7695	while ((wk = LIST_FIRST(&freeblks->fb_freeworkhd)) != NULL) {
7696		WORKLIST_REMOVE(wk);
7697		switch (wk->wk_type) {
7698		case D_DIRREM:
7699			wk->wk_state |= COMPLETE;
7700			add_to_worklist(wk, 0);
7701			continue;
7702
7703		case D_ALLOCDIRECT:
7704			free_newblk(WK_NEWBLK(wk));
7705			continue;
7706
7707		case D_ALLOCINDIR:
7708			aip = WK_ALLOCINDIR(wk);
7709			freework = NULL;
7710			if (aip->ai_state & DELAYEDFREE) {
7711				FREE_LOCK(&lk);
7712				freework = newfreework(ump, freeblks, NULL,
7713				    aip->ai_lbn, aip->ai_newblkno,
7714				    ump->um_fs->fs_frag, 0, 0);
7715				ACQUIRE_LOCK(&lk);
7716			}
7717			newblk = WK_NEWBLK(wk);
7718			if (newblk->nb_jnewblk) {
7719				freework->fw_jnewblk = newblk->nb_jnewblk;
7720				newblk->nb_jnewblk->jn_dep = &freework->fw_list;
7721				newblk->nb_jnewblk = NULL;
7722			}
7723			free_newblk(newblk);
7724			continue;
7725
7726		case D_FREEWORK:
7727			freework = WK_FREEWORK(wk);
7728			if (freework->fw_lbn <= -NDADDR)
7729				handle_workitem_indirblk(freework);
7730			else
7731				freework_freeblock(freework);
7732			continue;
7733		default:
7734			panic("handle_workitem_freeblocks: Unknown type %s",
7735			    TYPENAME(wk->wk_type));
7736		}
7737	}
7738	if (freeblks->fb_ref != 0) {
7739		freeblks->fb_state &= ~INPROGRESS;
7740		wake_worklist(&freeblks->fb_list);
7741		freeblks = NULL;
7742	}
7743	FREE_LOCK(&lk);
7744	if (freeblks)
7745		return handle_complete_freeblocks(freeblks, flags);
7746	return (0);
7747}
7748
7749/*
7750 * Handle completion of block free via truncate.  This allows fs_pending
7751 * to track the actual free block count more closely than if we only updated
7752 * it at the end.  We must be careful to handle cases where the block count
7753 * on free was incorrect.
7754 */
7755static void
7756freeblks_free(ump, freeblks, blocks)
7757	struct ufsmount *ump;
7758	struct freeblks *freeblks;
7759	int blocks;
7760{
7761	struct fs *fs;
7762	ufs2_daddr_t remain;
7763
7764	UFS_LOCK(ump);
7765	remain = -freeblks->fb_chkcnt;
7766	freeblks->fb_chkcnt += blocks;
7767	if (remain > 0) {
7768		if (remain < blocks)
7769			blocks = remain;
7770		fs = ump->um_fs;
7771		fs->fs_pendingblocks -= blocks;
7772	}
7773	UFS_UNLOCK(ump);
7774}
7775
7776/*
7777 * Once all of the freework workitems are complete we can retire the
7778 * freeblocks dependency and any journal work awaiting completion.  This
7779 * can not be called until all other dependencies are stable on disk.
7780 */
7781static int
7782handle_complete_freeblocks(freeblks, flags)
7783	struct freeblks *freeblks;
7784	int flags;
7785{
7786	struct inodedep *inodedep;
7787	struct inode *ip;
7788	struct vnode *vp;
7789	struct fs *fs;
7790	struct ufsmount *ump;
7791	ufs2_daddr_t spare;
7792
7793	ump = VFSTOUFS(freeblks->fb_list.wk_mp);
7794	fs = ump->um_fs;
7795	flags = LK_EXCLUSIVE | flags;
7796	spare = freeblks->fb_chkcnt;
7797
7798	/*
7799	 * If we did not release the expected number of blocks we may have
7800	 * to adjust the inode block count here.  Only do so if it wasn't
7801	 * a truncation to zero and the modrev still matches.
7802	 */
7803	if (spare && freeblks->fb_len != 0) {
7804		if (ffs_vgetf(freeblks->fb_list.wk_mp, freeblks->fb_inum,
7805		    flags, &vp, FFSV_FORCEINSMQ) != 0)
7806			return (EBUSY);
7807		ip = VTOI(vp);
7808		if (DIP(ip, i_modrev) == freeblks->fb_modrev) {
7809			DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - spare);
7810			ip->i_flag |= IN_CHANGE;
7811			/*
7812			 * We must wait so this happens before the
7813			 * journal is reclaimed.
7814			 */
7815			ffs_update(vp, 1);
7816		}
7817		vput(vp);
7818	}
7819	if (spare < 0) {
7820		UFS_LOCK(ump);
7821		fs->fs_pendingblocks += spare;
7822		UFS_UNLOCK(ump);
7823	}
7824#ifdef QUOTA
7825	/* Handle spare. */
7826	if (spare)
7827		quotaadj(freeblks->fb_quota, ump, -spare);
7828	quotarele(freeblks->fb_quota);
7829#endif
7830	ACQUIRE_LOCK(&lk);
7831	if (freeblks->fb_state & ONDEPLIST) {
7832		inodedep_lookup(freeblks->fb_list.wk_mp, freeblks->fb_inum,
7833		    0, &inodedep);
7834		TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks, fb_next);
7835		freeblks->fb_state &= ~ONDEPLIST;
7836		if (TAILQ_EMPTY(&inodedep->id_freeblklst))
7837			free_inodedep(inodedep);
7838	}
7839	/*
7840	 * All of the freeblock deps must be complete prior to this call
7841	 * so it's now safe to complete earlier outstanding journal entries.
7842	 */
7843	handle_jwork(&freeblks->fb_jwork);
7844	WORKITEM_FREE(freeblks, D_FREEBLKS);
7845	FREE_LOCK(&lk);
7846	return (0);
7847}
7848
7849/*
7850 * Release blocks associated with the freeblks and stored in the indirect
7851 * block dbn. If level is greater than SINGLE, the block is an indirect block
7852 * and recursive calls to indirtrunc must be used to cleanse other indirect
7853 * blocks.
7854 *
7855 * This handles partial and complete truncation of blocks.  Partial is noted
7856 * with goingaway == 0.  In this case the freework is completed after the
7857 * zero'd indirects are written to disk.  For full truncation the freework
7858 * is completed after the block is freed.
7859 */
7860static void
7861indir_trunc(freework, dbn, lbn)
7862	struct freework *freework;
7863	ufs2_daddr_t dbn;
7864	ufs_lbn_t lbn;
7865{
7866	struct freework *nfreework;
7867	struct workhead wkhd;
7868	struct freeblks *freeblks;
7869	struct buf *bp;
7870	struct fs *fs;
7871	struct indirdep *indirdep;
7872	struct ufsmount *ump;
7873	ufs1_daddr_t *bap1 = 0;
7874	ufs2_daddr_t nb, nnb, *bap2 = 0;
7875	ufs_lbn_t lbnadd, nlbn;
7876	int i, nblocks, ufs1fmt;
7877	int freedblocks;
7878	int goingaway;
7879	int freedeps;
7880	int needj;
7881	int level;
7882	int cnt;
7883
7884	freeblks = freework->fw_freeblks;
7885	ump = VFSTOUFS(freeblks->fb_list.wk_mp);
7886	fs = ump->um_fs;
7887	/*
7888	 * Get buffer of block pointers to be freed.  There are three cases:
7889	 *
7890	 * 1) Partial truncate caches the indirdep pointer in the freework
7891	 *    which provides us a back copy to the save bp which holds the
7892	 *    pointers we want to clear.  When this completes the zero
7893	 *    pointers are written to the real copy.
7894	 * 2) The indirect is being completely truncated, cancel_indirdep()
7895	 *    eliminated the real copy and placed the indirdep on the saved
7896	 *    copy.  The indirdep and buf are discarded when this completes.
7897	 * 3) The indirect was not in memory, we read a copy off of the disk
7898	 *    using the devvp and drop and invalidate the buffer when we're
7899	 *    done.
7900	 */
7901	goingaway = 1;
7902	indirdep = NULL;
7903	if (freework->fw_indir != NULL) {
7904		goingaway = 0;
7905		indirdep = freework->fw_indir;
7906		bp = indirdep->ir_savebp;
7907		if (bp == NULL || bp->b_blkno != dbn)
7908			panic("indir_trunc: Bad saved buf %p blkno %jd",
7909			    bp, (intmax_t)dbn);
7910	} else if ((bp = incore(&freeblks->fb_devvp->v_bufobj, dbn)) != NULL) {
7911		/*
7912		 * The lock prevents the buf dep list from changing and
7913	 	 * indirects on devvp should only ever have one dependency.
7914		 */
7915		indirdep = WK_INDIRDEP(LIST_FIRST(&bp->b_dep));
7916		if (indirdep == NULL || (indirdep->ir_state & GOINGAWAY) == 0)
7917			panic("indir_trunc: Bad indirdep %p from buf %p",
7918			    indirdep, bp);
7919	} else if (bread(freeblks->fb_devvp, dbn, (int)fs->fs_bsize,
7920	    NOCRED, &bp) != 0) {
7921		brelse(bp);
7922		return;
7923	}
7924	ACQUIRE_LOCK(&lk);
7925	/* Protects against a race with complete_trunc_indir(). */
7926	freework->fw_state &= ~INPROGRESS;
7927	/*
7928	 * If we have an indirdep we need to enforce the truncation order
7929	 * and discard it when it is complete.
7930	 */
7931	if (indirdep) {
7932		if (freework != TAILQ_FIRST(&indirdep->ir_trunc) &&
7933		    !TAILQ_EMPTY(&indirdep->ir_trunc)) {
7934			/*
7935			 * Add the complete truncate to the list on the
7936			 * indirdep to enforce in-order processing.
7937			 */
7938			if (freework->fw_indir == NULL)
7939				TAILQ_INSERT_TAIL(&indirdep->ir_trunc,
7940				    freework, fw_next);
7941			FREE_LOCK(&lk);
7942			return;
7943		}
7944		/*
7945		 * If we're goingaway, free the indirdep.  Otherwise it will
7946		 * linger until the write completes.
7947		 */
7948		if (goingaway) {
7949			free_indirdep(indirdep);
7950			ump->um_numindirdeps -= 1;
7951		}
7952	}
7953	FREE_LOCK(&lk);
7954	/* Initialize pointers depending on block size. */
7955	if (ump->um_fstype == UFS1) {
7956		bap1 = (ufs1_daddr_t *)bp->b_data;
7957		nb = bap1[freework->fw_off];
7958		ufs1fmt = 1;
7959	} else {
7960		bap2 = (ufs2_daddr_t *)bp->b_data;
7961		nb = bap2[freework->fw_off];
7962		ufs1fmt = 0;
7963	}
7964	level = lbn_level(lbn);
7965	needj = MOUNTEDSUJ(UFSTOVFS(ump)) != 0;
7966	lbnadd = lbn_offset(fs, level);
7967	nblocks = btodb(fs->fs_bsize);
7968	nfreework = freework;
7969	freedeps = 0;
7970	cnt = 0;
7971	/*
7972	 * Reclaim blocks.  Traverses into nested indirect levels and
7973	 * arranges for the current level to be freed when subordinates
7974	 * are free when journaling.
7975	 */
7976	for (i = freework->fw_off; i < NINDIR(fs); i++, nb = nnb) {
7977		if (i != NINDIR(fs) - 1) {
7978			if (ufs1fmt)
7979				nnb = bap1[i+1];
7980			else
7981				nnb = bap2[i+1];
7982		} else
7983			nnb = 0;
7984		if (nb == 0)
7985			continue;
7986		cnt++;
7987		if (level != 0) {
7988			nlbn = (lbn + 1) - (i * lbnadd);
7989			if (needj != 0) {
7990				nfreework = newfreework(ump, freeblks, freework,
7991				    nlbn, nb, fs->fs_frag, 0, 0);
7992				freedeps++;
7993			}
7994			indir_trunc(nfreework, fsbtodb(fs, nb), nlbn);
7995		} else {
7996			struct freedep *freedep;
7997
7998			/*
7999			 * Attempt to aggregate freedep dependencies for
8000			 * all blocks being released to the same CG.
8001			 */
8002			LIST_INIT(&wkhd);
8003			if (needj != 0 &&
8004			    (nnb == 0 || (dtog(fs, nb) != dtog(fs, nnb)))) {
8005				freedep = newfreedep(freework);
8006				WORKLIST_INSERT_UNLOCKED(&wkhd,
8007				    &freedep->fd_list);
8008				freedeps++;
8009			}
8010			CTR3(KTR_SUJ,
8011			    "indir_trunc: ino %d blkno %jd size %ld",
8012			    freeblks->fb_inum, nb, fs->fs_bsize);
8013			ffs_blkfree(ump, fs, freeblks->fb_devvp, nb,
8014			    fs->fs_bsize, freeblks->fb_inum,
8015			    freeblks->fb_vtype, &wkhd);
8016		}
8017	}
8018	if (goingaway) {
8019		bp->b_flags |= B_INVAL | B_NOCACHE;
8020		brelse(bp);
8021	}
8022	freedblocks = 0;
8023	if (level == 0)
8024		freedblocks = (nblocks * cnt);
8025	if (needj == 0)
8026		freedblocks += nblocks;
8027	freeblks_free(ump, freeblks, freedblocks);
8028	/*
8029	 * If we are journaling set up the ref counts and offset so this
8030	 * indirect can be completed when its children are free.
8031	 */
8032	if (needj) {
8033		ACQUIRE_LOCK(&lk);
8034		freework->fw_off = i;
8035		freework->fw_ref += freedeps;
8036		freework->fw_ref -= NINDIR(fs) + 1;
8037		if (level == 0)
8038			freeblks->fb_cgwait += freedeps;
8039		if (freework->fw_ref == 0)
8040			freework_freeblock(freework);
8041		FREE_LOCK(&lk);
8042		return;
8043	}
8044	/*
8045	 * If we're not journaling we can free the indirect now.
8046	 */
8047	dbn = dbtofsb(fs, dbn);
8048	CTR3(KTR_SUJ,
8049	    "indir_trunc 2: ino %d blkno %jd size %ld",
8050	    freeblks->fb_inum, dbn, fs->fs_bsize);
8051	ffs_blkfree(ump, fs, freeblks->fb_devvp, dbn, fs->fs_bsize,
8052	    freeblks->fb_inum, freeblks->fb_vtype, NULL);
8053	/* Non SUJ softdep does single-threaded truncations. */
8054	if (freework->fw_blkno == dbn) {
8055		freework->fw_state |= ALLCOMPLETE;
8056		ACQUIRE_LOCK(&lk);
8057		handle_written_freework(freework);
8058		FREE_LOCK(&lk);
8059	}
8060	return;
8061}
8062
8063/*
8064 * Cancel an allocindir when it is removed via truncation.  When bp is not
8065 * NULL the indirect never appeared on disk and is scheduled to be freed
8066 * independently of the indir so we can more easily track journal work.
8067 */
8068static void
8069cancel_allocindir(aip, bp, freeblks, trunc)
8070	struct allocindir *aip;
8071	struct buf *bp;
8072	struct freeblks *freeblks;
8073	int trunc;
8074{
8075	struct indirdep *indirdep;
8076	struct freefrag *freefrag;
8077	struct newblk *newblk;
8078
8079	newblk = (struct newblk *)aip;
8080	LIST_REMOVE(aip, ai_next);
8081	/*
8082	 * We must eliminate the pointer in bp if it must be freed on its
8083	 * own due to partial truncate or pending journal work.
8084	 */
8085	if (bp && (trunc || newblk->nb_jnewblk)) {
8086		/*
8087		 * Clear the pointer and mark the aip to be freed
8088		 * directly if it never existed on disk.
8089		 */
8090		aip->ai_state |= DELAYEDFREE;
8091		indirdep = aip->ai_indirdep;
8092		if (indirdep->ir_state & UFS1FMT)
8093			((ufs1_daddr_t *)bp->b_data)[aip->ai_offset] = 0;
8094		else
8095			((ufs2_daddr_t *)bp->b_data)[aip->ai_offset] = 0;
8096	}
8097	/*
8098	 * When truncating the previous pointer will be freed via
8099	 * savedbp.  Eliminate the freefrag which would dup free.
8100	 */
8101	if (trunc && (freefrag = newblk->nb_freefrag) != NULL) {
8102		newblk->nb_freefrag = NULL;
8103		if (freefrag->ff_jdep)
8104			cancel_jfreefrag(
8105			    WK_JFREEFRAG(freefrag->ff_jdep));
8106		jwork_move(&freeblks->fb_jwork, &freefrag->ff_jwork);
8107		WORKITEM_FREE(freefrag, D_FREEFRAG);
8108	}
8109	/*
8110	 * If the journal hasn't been written the jnewblk must be passed
8111	 * to the call to ffs_blkfree that reclaims the space.  We accomplish
8112	 * this by leaving the journal dependency on the newblk to be freed
8113	 * when a freework is created in handle_workitem_freeblocks().
8114	 */
8115	cancel_newblk(newblk, NULL, &freeblks->fb_jwork);
8116	WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list);
8117}
8118
8119/*
8120 * Create the mkdir dependencies for . and .. in a new directory.  Link them
8121 * in to a newdirblk so any subsequent additions are tracked properly.  The
8122 * caller is responsible for adding the mkdir1 dependency to the journal
8123 * and updating id_mkdiradd.  This function returns with lk held.
8124 */
8125static struct mkdir *
8126setup_newdir(dap, newinum, dinum, newdirbp, mkdirp)
8127	struct diradd *dap;
8128	ino_t newinum;
8129	ino_t dinum;
8130	struct buf *newdirbp;
8131	struct mkdir **mkdirp;
8132{
8133	struct newblk *newblk;
8134	struct pagedep *pagedep;
8135	struct inodedep *inodedep;
8136	struct newdirblk *newdirblk = 0;
8137	struct mkdir *mkdir1, *mkdir2;
8138	struct worklist *wk;
8139	struct jaddref *jaddref;
8140	struct mount *mp;
8141
8142	mp = dap->da_list.wk_mp;
8143	newdirblk = malloc(sizeof(struct newdirblk), M_NEWDIRBLK,
8144	    M_SOFTDEP_FLAGS);
8145	workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp);
8146	LIST_INIT(&newdirblk->db_mkdir);
8147	mkdir1 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS);
8148	workitem_alloc(&mkdir1->md_list, D_MKDIR, mp);
8149	mkdir1->md_state = ATTACHED | MKDIR_BODY;
8150	mkdir1->md_diradd = dap;
8151	mkdir1->md_jaddref = NULL;
8152	mkdir2 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS);
8153	workitem_alloc(&mkdir2->md_list, D_MKDIR, mp);
8154	mkdir2->md_state = ATTACHED | MKDIR_PARENT;
8155	mkdir2->md_diradd = dap;
8156	mkdir2->md_jaddref = NULL;
8157	if (MOUNTEDSUJ(mp) == 0) {
8158		mkdir1->md_state |= DEPCOMPLETE;
8159		mkdir2->md_state |= DEPCOMPLETE;
8160	}
8161	/*
8162	 * Dependency on "." and ".." being written to disk.
8163	 */
8164	mkdir1->md_buf = newdirbp;
8165	ACQUIRE_LOCK(&lk);
8166	LIST_INSERT_HEAD(&mkdirlisthd, mkdir1, md_mkdirs);
8167	/*
8168	 * We must link the pagedep, allocdirect, and newdirblk for
8169	 * the initial file page so the pointer to the new directory
8170	 * is not written until the directory contents are live and
8171	 * any subsequent additions are not marked live until the
8172	 * block is reachable via the inode.
8173	 */
8174	if (pagedep_lookup(mp, newdirbp, newinum, 0, 0, &pagedep) == 0)
8175		panic("setup_newdir: lost pagedep");
8176	LIST_FOREACH(wk, &newdirbp->b_dep, wk_list)
8177		if (wk->wk_type == D_ALLOCDIRECT)
8178			break;
8179	if (wk == NULL)
8180		panic("setup_newdir: lost allocdirect");
8181	if (pagedep->pd_state & NEWBLOCK)
8182		panic("setup_newdir: NEWBLOCK already set");
8183	newblk = WK_NEWBLK(wk);
8184	pagedep->pd_state |= NEWBLOCK;
8185	pagedep->pd_newdirblk = newdirblk;
8186	newdirblk->db_pagedep = pagedep;
8187	WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list);
8188	WORKLIST_INSERT(&newdirblk->db_mkdir, &mkdir1->md_list);
8189	/*
8190	 * Look up the inodedep for the parent directory so that we
8191	 * can link mkdir2 into the pending dotdot jaddref or
8192	 * the inode write if there is none.  If the inode is
8193	 * ALLCOMPLETE and no jaddref is present all dependencies have
8194	 * been satisfied and mkdir2 can be freed.
8195	 */
8196	inodedep_lookup(mp, dinum, 0, &inodedep);
8197	if (MOUNTEDSUJ(mp)) {
8198		if (inodedep == NULL)
8199			panic("setup_newdir: Lost parent.");
8200		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
8201		    inoreflst);
8202		KASSERT(jaddref != NULL && jaddref->ja_parent == newinum &&
8203		    (jaddref->ja_state & MKDIR_PARENT),
8204		    ("setup_newdir: bad dotdot jaddref %p", jaddref));
8205		LIST_INSERT_HEAD(&mkdirlisthd, mkdir2, md_mkdirs);
8206		mkdir2->md_jaddref = jaddref;
8207		jaddref->ja_mkdir = mkdir2;
8208	} else if (inodedep == NULL ||
8209	    (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) {
8210		dap->da_state &= ~MKDIR_PARENT;
8211		WORKITEM_FREE(mkdir2, D_MKDIR);
8212		mkdir2 = NULL;
8213	} else {
8214		LIST_INSERT_HEAD(&mkdirlisthd, mkdir2, md_mkdirs);
8215		WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir2->md_list);
8216	}
8217	*mkdirp = mkdir2;
8218
8219	return (mkdir1);
8220}
8221
8222/*
8223 * Directory entry addition dependencies.
8224 *
8225 * When adding a new directory entry, the inode (with its incremented link
8226 * count) must be written to disk before the directory entry's pointer to it.
8227 * Also, if the inode is newly allocated, the corresponding freemap must be
8228 * updated (on disk) before the directory entry's pointer. These requirements
8229 * are met via undo/redo on the directory entry's pointer, which consists
8230 * simply of the inode number.
8231 *
8232 * As directory entries are added and deleted, the free space within a
8233 * directory block can become fragmented.  The ufs filesystem will compact
8234 * a fragmented directory block to make space for a new entry. When this
8235 * occurs, the offsets of previously added entries change. Any "diradd"
8236 * dependency structures corresponding to these entries must be updated with
8237 * the new offsets.
8238 */
8239
8240/*
8241 * This routine is called after the in-memory inode's link
8242 * count has been incremented, but before the directory entry's
8243 * pointer to the inode has been set.
8244 */
8245int
8246softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk)
8247	struct buf *bp;		/* buffer containing directory block */
8248	struct inode *dp;	/* inode for directory */
8249	off_t diroffset;	/* offset of new entry in directory */
8250	ino_t newinum;		/* inode referenced by new directory entry */
8251	struct buf *newdirbp;	/* non-NULL => contents of new mkdir */
8252	int isnewblk;		/* entry is in a newly allocated block */
8253{
8254	int offset;		/* offset of new entry within directory block */
8255	ufs_lbn_t lbn;		/* block in directory containing new entry */
8256	struct fs *fs;
8257	struct diradd *dap;
8258	struct newblk *newblk;
8259	struct pagedep *pagedep;
8260	struct inodedep *inodedep;
8261	struct newdirblk *newdirblk = 0;
8262	struct mkdir *mkdir1, *mkdir2;
8263	struct jaddref *jaddref;
8264	struct ufsmount *ump;
8265	struct mount *mp;
8266	int isindir;
8267
8268	ump = dp->i_ump;
8269	mp = UFSTOVFS(ump);
8270	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
8271	    ("softdep_setup_directory_add called on non-softdep filesystem"));
8272	/*
8273	 * Whiteouts have no dependencies.
8274	 */
8275	if (newinum == WINO) {
8276		if (newdirbp != NULL)
8277			bdwrite(newdirbp);
8278		return (0);
8279	}
8280	jaddref = NULL;
8281	mkdir1 = mkdir2 = NULL;
8282	fs = dp->i_fs;
8283	lbn = lblkno(fs, diroffset);
8284	offset = blkoff(fs, diroffset);
8285	dap = malloc(sizeof(struct diradd), M_DIRADD,
8286		M_SOFTDEP_FLAGS|M_ZERO);
8287	workitem_alloc(&dap->da_list, D_DIRADD, mp);
8288	dap->da_offset = offset;
8289	dap->da_newinum = newinum;
8290	dap->da_state = ATTACHED;
8291	LIST_INIT(&dap->da_jwork);
8292	isindir = bp->b_lblkno >= NDADDR;
8293	if (isnewblk &&
8294	    (isindir ? blkoff(fs, diroffset) : fragoff(fs, diroffset)) == 0) {
8295		newdirblk = malloc(sizeof(struct newdirblk),
8296		    M_NEWDIRBLK, M_SOFTDEP_FLAGS);
8297		workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp);
8298		LIST_INIT(&newdirblk->db_mkdir);
8299	}
8300	/*
8301	 * If we're creating a new directory setup the dependencies and set
8302	 * the dap state to wait for them.  Otherwise it's COMPLETE and
8303	 * we can move on.
8304	 */
8305	if (newdirbp == NULL) {
8306		dap->da_state |= DEPCOMPLETE;
8307		ACQUIRE_LOCK(&lk);
8308	} else {
8309		dap->da_state |= MKDIR_BODY | MKDIR_PARENT;
8310		mkdir1 = setup_newdir(dap, newinum, dp->i_number, newdirbp,
8311		    &mkdir2);
8312	}
8313	/*
8314	 * Link into parent directory pagedep to await its being written.
8315	 */
8316	pagedep_lookup(mp, bp, dp->i_number, lbn, DEPALLOC, &pagedep);
8317#ifdef DEBUG
8318	if (diradd_lookup(pagedep, offset) != NULL)
8319		panic("softdep_setup_directory_add: %p already at off %d\n",
8320		    diradd_lookup(pagedep, offset), offset);
8321#endif
8322	dap->da_pagedep = pagedep;
8323	LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap,
8324	    da_pdlist);
8325	inodedep_lookup(mp, newinum, DEPALLOC | NODELAY, &inodedep);
8326	/*
8327	 * If we're journaling, link the diradd into the jaddref so it
8328	 * may be completed after the journal entry is written.  Otherwise,
8329	 * link the diradd into its inodedep.  If the inode is not yet
8330	 * written place it on the bufwait list, otherwise do the post-inode
8331	 * write processing to put it on the id_pendinghd list.
8332	 */
8333	if (MOUNTEDSUJ(mp)) {
8334		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
8335		    inoreflst);
8336		KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number,
8337		    ("softdep_setup_directory_add: bad jaddref %p", jaddref));
8338		jaddref->ja_diroff = diroffset;
8339		jaddref->ja_diradd = dap;
8340		add_to_journal(&jaddref->ja_list);
8341	} else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE)
8342		diradd_inode_written(dap, inodedep);
8343	else
8344		WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list);
8345	/*
8346	 * Add the journal entries for . and .. links now that the primary
8347	 * link is written.
8348	 */
8349	if (mkdir1 != NULL && MOUNTEDSUJ(mp)) {
8350		jaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref,
8351		    inoreflst, if_deps);
8352		KASSERT(jaddref != NULL &&
8353		    jaddref->ja_ino == jaddref->ja_parent &&
8354		    (jaddref->ja_state & MKDIR_BODY),
8355		    ("softdep_setup_directory_add: bad dot jaddref %p",
8356		    jaddref));
8357		mkdir1->md_jaddref = jaddref;
8358		jaddref->ja_mkdir = mkdir1;
8359		/*
8360		 * It is important that the dotdot journal entry
8361		 * is added prior to the dot entry since dot writes
8362		 * both the dot and dotdot links.  These both must
8363		 * be added after the primary link for the journal
8364		 * to remain consistent.
8365		 */
8366		add_to_journal(&mkdir2->md_jaddref->ja_list);
8367		add_to_journal(&jaddref->ja_list);
8368	}
8369	/*
8370	 * If we are adding a new directory remember this diradd so that if
8371	 * we rename it we can keep the dot and dotdot dependencies.  If
8372	 * we are adding a new name for an inode that has a mkdiradd we
8373	 * must be in rename and we have to move the dot and dotdot
8374	 * dependencies to this new name.  The old name is being orphaned
8375	 * soon.
8376	 */
8377	if (mkdir1 != NULL) {
8378		if (inodedep->id_mkdiradd != NULL)
8379			panic("softdep_setup_directory_add: Existing mkdir");
8380		inodedep->id_mkdiradd = dap;
8381	} else if (inodedep->id_mkdiradd)
8382		merge_diradd(inodedep, dap);
8383	if (newdirblk) {
8384		/*
8385		 * There is nothing to do if we are already tracking
8386		 * this block.
8387		 */
8388		if ((pagedep->pd_state & NEWBLOCK) != 0) {
8389			WORKITEM_FREE(newdirblk, D_NEWDIRBLK);
8390			FREE_LOCK(&lk);
8391			return (0);
8392		}
8393		if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk)
8394		    == 0)
8395			panic("softdep_setup_directory_add: lost entry");
8396		WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list);
8397		pagedep->pd_state |= NEWBLOCK;
8398		pagedep->pd_newdirblk = newdirblk;
8399		newdirblk->db_pagedep = pagedep;
8400		FREE_LOCK(&lk);
8401		/*
8402		 * If we extended into an indirect signal direnter to sync.
8403		 */
8404		if (isindir)
8405			return (1);
8406		return (0);
8407	}
8408	FREE_LOCK(&lk);
8409	return (0);
8410}
8411
8412/*
8413 * This procedure is called to change the offset of a directory
8414 * entry when compacting a directory block which must be owned
8415 * exclusively by the caller. Note that the actual entry movement
8416 * must be done in this procedure to ensure that no I/O completions
8417 * occur while the move is in progress.
8418 */
8419void
8420softdep_change_directoryentry_offset(bp, dp, base, oldloc, newloc, entrysize)
8421	struct buf *bp;		/* Buffer holding directory block. */
8422	struct inode *dp;	/* inode for directory */
8423	caddr_t base;		/* address of dp->i_offset */
8424	caddr_t oldloc;		/* address of old directory location */
8425	caddr_t newloc;		/* address of new directory location */
8426	int entrysize;		/* size of directory entry */
8427{
8428	int offset, oldoffset, newoffset;
8429	struct pagedep *pagedep;
8430	struct jmvref *jmvref;
8431	struct diradd *dap;
8432	struct direct *de;
8433	struct mount *mp;
8434	ufs_lbn_t lbn;
8435	int flags;
8436
8437	mp = UFSTOVFS(dp->i_ump);
8438	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
8439	    ("softdep_change_directoryentry_offset called on "
8440	     "non-softdep filesystem"));
8441	de = (struct direct *)oldloc;
8442	jmvref = NULL;
8443	flags = 0;
8444	/*
8445	 * Moves are always journaled as it would be too complex to
8446	 * determine if any affected adds or removes are present in the
8447	 * journal.
8448	 */
8449	if (MOUNTEDSUJ(mp)) {
8450		flags = DEPALLOC;
8451		jmvref = newjmvref(dp, de->d_ino,
8452		    dp->i_offset + (oldloc - base),
8453		    dp->i_offset + (newloc - base));
8454	}
8455	lbn = lblkno(dp->i_fs, dp->i_offset);
8456	offset = blkoff(dp->i_fs, dp->i_offset);
8457	oldoffset = offset + (oldloc - base);
8458	newoffset = offset + (newloc - base);
8459	ACQUIRE_LOCK(&lk);
8460	if (pagedep_lookup(mp, bp, dp->i_number, lbn, flags, &pagedep) == 0)
8461		goto done;
8462	dap = diradd_lookup(pagedep, oldoffset);
8463	if (dap) {
8464		dap->da_offset = newoffset;
8465		newoffset = DIRADDHASH(newoffset);
8466		oldoffset = DIRADDHASH(oldoffset);
8467		if ((dap->da_state & ALLCOMPLETE) != ALLCOMPLETE &&
8468		    newoffset != oldoffset) {
8469			LIST_REMOVE(dap, da_pdlist);
8470			LIST_INSERT_HEAD(&pagedep->pd_diraddhd[newoffset],
8471			    dap, da_pdlist);
8472		}
8473	}
8474done:
8475	if (jmvref) {
8476		jmvref->jm_pagedep = pagedep;
8477		LIST_INSERT_HEAD(&pagedep->pd_jmvrefhd, jmvref, jm_deps);
8478		add_to_journal(&jmvref->jm_list);
8479	}
8480	bcopy(oldloc, newloc, entrysize);
8481	FREE_LOCK(&lk);
8482}
8483
8484/*
8485 * Move the mkdir dependencies and journal work from one diradd to another
8486 * when renaming a directory.  The new name must depend on the mkdir deps
8487 * completing as the old name did.  Directories can only have one valid link
8488 * at a time so one must be canonical.
8489 */
8490static void
8491merge_diradd(inodedep, newdap)
8492	struct inodedep *inodedep;
8493	struct diradd *newdap;
8494{
8495	struct diradd *olddap;
8496	struct mkdir *mkdir, *nextmd;
8497	short state;
8498
8499	olddap = inodedep->id_mkdiradd;
8500	inodedep->id_mkdiradd = newdap;
8501	if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
8502		newdap->da_state &= ~DEPCOMPLETE;
8503		for (mkdir = LIST_FIRST(&mkdirlisthd); mkdir; mkdir = nextmd) {
8504			nextmd = LIST_NEXT(mkdir, md_mkdirs);
8505			if (mkdir->md_diradd != olddap)
8506				continue;
8507			mkdir->md_diradd = newdap;
8508			state = mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY);
8509			newdap->da_state |= state;
8510			olddap->da_state &= ~state;
8511			if ((olddap->da_state &
8512			    (MKDIR_PARENT | MKDIR_BODY)) == 0)
8513				break;
8514		}
8515		if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0)
8516			panic("merge_diradd: unfound ref");
8517	}
8518	/*
8519	 * Any mkdir related journal items are not safe to be freed until
8520	 * the new name is stable.
8521	 */
8522	jwork_move(&newdap->da_jwork, &olddap->da_jwork);
8523	olddap->da_state |= DEPCOMPLETE;
8524	complete_diradd(olddap);
8525}
8526
8527/*
8528 * Move the diradd to the pending list when all diradd dependencies are
8529 * complete.
8530 */
8531static void
8532complete_diradd(dap)
8533	struct diradd *dap;
8534{
8535	struct pagedep *pagedep;
8536
8537	if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) {
8538		if (dap->da_state & DIRCHG)
8539			pagedep = dap->da_previous->dm_pagedep;
8540		else
8541			pagedep = dap->da_pagedep;
8542		LIST_REMOVE(dap, da_pdlist);
8543		LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist);
8544	}
8545}
8546
8547/*
8548 * Cancel a diradd when a dirrem overlaps with it.  We must cancel the journal
8549 * add entries and conditonally journal the remove.
8550 */
8551static void
8552cancel_diradd(dap, dirrem, jremref, dotremref, dotdotremref)
8553	struct diradd *dap;
8554	struct dirrem *dirrem;
8555	struct jremref *jremref;
8556	struct jremref *dotremref;
8557	struct jremref *dotdotremref;
8558{
8559	struct inodedep *inodedep;
8560	struct jaddref *jaddref;
8561	struct inoref *inoref;
8562	struct mkdir *mkdir;
8563
8564	/*
8565	 * If no remove references were allocated we're on a non-journaled
8566	 * filesystem and can skip the cancel step.
8567	 */
8568	if (jremref == NULL) {
8569		free_diradd(dap, NULL);
8570		return;
8571	}
8572	/*
8573	 * Cancel the primary name an free it if it does not require
8574	 * journaling.
8575	 */
8576	if (inodedep_lookup(dap->da_list.wk_mp, dap->da_newinum,
8577	    0, &inodedep) != 0) {
8578		/* Abort the addref that reference this diradd.  */
8579		TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
8580			if (inoref->if_list.wk_type != D_JADDREF)
8581				continue;
8582			jaddref = (struct jaddref *)inoref;
8583			if (jaddref->ja_diradd != dap)
8584				continue;
8585			if (cancel_jaddref(jaddref, inodedep,
8586			    &dirrem->dm_jwork) == 0) {
8587				free_jremref(jremref);
8588				jremref = NULL;
8589			}
8590			break;
8591		}
8592	}
8593	/*
8594	 * Cancel subordinate names and free them if they do not require
8595	 * journaling.
8596	 */
8597	if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
8598		LIST_FOREACH(mkdir, &mkdirlisthd, md_mkdirs) {
8599			if (mkdir->md_diradd != dap)
8600				continue;
8601			if ((jaddref = mkdir->md_jaddref) == NULL)
8602				continue;
8603			mkdir->md_jaddref = NULL;
8604			if (mkdir->md_state & MKDIR_PARENT) {
8605				if (cancel_jaddref(jaddref, NULL,
8606				    &dirrem->dm_jwork) == 0) {
8607					free_jremref(dotdotremref);
8608					dotdotremref = NULL;
8609				}
8610			} else {
8611				if (cancel_jaddref(jaddref, inodedep,
8612				    &dirrem->dm_jwork) == 0) {
8613					free_jremref(dotremref);
8614					dotremref = NULL;
8615				}
8616			}
8617		}
8618	}
8619
8620	if (jremref)
8621		journal_jremref(dirrem, jremref, inodedep);
8622	if (dotremref)
8623		journal_jremref(dirrem, dotremref, inodedep);
8624	if (dotdotremref)
8625		journal_jremref(dirrem, dotdotremref, NULL);
8626	jwork_move(&dirrem->dm_jwork, &dap->da_jwork);
8627	free_diradd(dap, &dirrem->dm_jwork);
8628}
8629
8630/*
8631 * Free a diradd dependency structure. This routine must be called
8632 * with splbio interrupts blocked.
8633 */
8634static void
8635free_diradd(dap, wkhd)
8636	struct diradd *dap;
8637	struct workhead *wkhd;
8638{
8639	struct dirrem *dirrem;
8640	struct pagedep *pagedep;
8641	struct inodedep *inodedep;
8642	struct mkdir *mkdir, *nextmd;
8643
8644	rw_assert(&lk, RA_WLOCKED);
8645	LIST_REMOVE(dap, da_pdlist);
8646	if (dap->da_state & ONWORKLIST)
8647		WORKLIST_REMOVE(&dap->da_list);
8648	if ((dap->da_state & DIRCHG) == 0) {
8649		pagedep = dap->da_pagedep;
8650	} else {
8651		dirrem = dap->da_previous;
8652		pagedep = dirrem->dm_pagedep;
8653		dirrem->dm_dirinum = pagedep->pd_ino;
8654		dirrem->dm_state |= COMPLETE;
8655		if (LIST_EMPTY(&dirrem->dm_jremrefhd))
8656			add_to_worklist(&dirrem->dm_list, 0);
8657	}
8658	if (inodedep_lookup(pagedep->pd_list.wk_mp, dap->da_newinum,
8659	    0, &inodedep) != 0)
8660		if (inodedep->id_mkdiradd == dap)
8661			inodedep->id_mkdiradd = NULL;
8662	if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
8663		for (mkdir = LIST_FIRST(&mkdirlisthd); mkdir; mkdir = nextmd) {
8664			nextmd = LIST_NEXT(mkdir, md_mkdirs);
8665			if (mkdir->md_diradd != dap)
8666				continue;
8667			dap->da_state &=
8668			    ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY));
8669			LIST_REMOVE(mkdir, md_mkdirs);
8670			if (mkdir->md_state & ONWORKLIST)
8671				WORKLIST_REMOVE(&mkdir->md_list);
8672			if (mkdir->md_jaddref != NULL)
8673				panic("free_diradd: Unexpected jaddref");
8674			WORKITEM_FREE(mkdir, D_MKDIR);
8675			if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0)
8676				break;
8677		}
8678		if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0)
8679			panic("free_diradd: unfound ref");
8680	}
8681	if (inodedep)
8682		free_inodedep(inodedep);
8683	/*
8684	 * Free any journal segments waiting for the directory write.
8685	 */
8686	handle_jwork(&dap->da_jwork);
8687	WORKITEM_FREE(dap, D_DIRADD);
8688}
8689
8690/*
8691 * Directory entry removal dependencies.
8692 *
8693 * When removing a directory entry, the entry's inode pointer must be
8694 * zero'ed on disk before the corresponding inode's link count is decremented
8695 * (possibly freeing the inode for re-use). This dependency is handled by
8696 * updating the directory entry but delaying the inode count reduction until
8697 * after the directory block has been written to disk. After this point, the
8698 * inode count can be decremented whenever it is convenient.
8699 */
8700
8701/*
8702 * This routine should be called immediately after removing
8703 * a directory entry.  The inode's link count should not be
8704 * decremented by the calling procedure -- the soft updates
8705 * code will do this task when it is safe.
8706 */
8707void
8708softdep_setup_remove(bp, dp, ip, isrmdir)
8709	struct buf *bp;		/* buffer containing directory block */
8710	struct inode *dp;	/* inode for the directory being modified */
8711	struct inode *ip;	/* inode for directory entry being removed */
8712	int isrmdir;		/* indicates if doing RMDIR */
8713{
8714	struct dirrem *dirrem, *prevdirrem;
8715	struct inodedep *inodedep;
8716	int direct;
8717
8718	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0,
8719	    ("softdep_setup_remove called on non-softdep filesystem"));
8720	/*
8721	 * Allocate a new dirrem if appropriate and ACQUIRE_LOCK.  We want
8722	 * newdirrem() to setup the full directory remove which requires
8723	 * isrmdir > 1.
8724	 */
8725	dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem);
8726	/*
8727	 * Add the dirrem to the inodedep's pending remove list for quick
8728	 * discovery later.
8729	 */
8730	if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0,
8731	    &inodedep) == 0)
8732		panic("softdep_setup_remove: Lost inodedep.");
8733	KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked"));
8734	dirrem->dm_state |= ONDEPLIST;
8735	LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext);
8736
8737	/*
8738	 * If the COMPLETE flag is clear, then there were no active
8739	 * entries and we want to roll back to a zeroed entry until
8740	 * the new inode is committed to disk. If the COMPLETE flag is
8741	 * set then we have deleted an entry that never made it to
8742	 * disk. If the entry we deleted resulted from a name change,
8743	 * then the old name still resides on disk. We cannot delete
8744	 * its inode (returned to us in prevdirrem) until the zeroed
8745	 * directory entry gets to disk. The new inode has never been
8746	 * referenced on the disk, so can be deleted immediately.
8747	 */
8748	if ((dirrem->dm_state & COMPLETE) == 0) {
8749		LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, dirrem,
8750		    dm_next);
8751		FREE_LOCK(&lk);
8752	} else {
8753		if (prevdirrem != NULL)
8754			LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd,
8755			    prevdirrem, dm_next);
8756		dirrem->dm_dirinum = dirrem->dm_pagedep->pd_ino;
8757		direct = LIST_EMPTY(&dirrem->dm_jremrefhd);
8758		FREE_LOCK(&lk);
8759		if (direct)
8760			handle_workitem_remove(dirrem, 0);
8761	}
8762}
8763
8764/*
8765 * Check for an entry matching 'offset' on both the pd_dirraddhd list and the
8766 * pd_pendinghd list of a pagedep.
8767 */
8768static struct diradd *
8769diradd_lookup(pagedep, offset)
8770	struct pagedep *pagedep;
8771	int offset;
8772{
8773	struct diradd *dap;
8774
8775	LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(offset)], da_pdlist)
8776		if (dap->da_offset == offset)
8777			return (dap);
8778	LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist)
8779		if (dap->da_offset == offset)
8780			return (dap);
8781	return (NULL);
8782}
8783
8784/*
8785 * Search for a .. diradd dependency in a directory that is being removed.
8786 * If the directory was renamed to a new parent we have a diradd rather
8787 * than a mkdir for the .. entry.  We need to cancel it now before
8788 * it is found in truncate().
8789 */
8790static struct jremref *
8791cancel_diradd_dotdot(ip, dirrem, jremref)
8792	struct inode *ip;
8793	struct dirrem *dirrem;
8794	struct jremref *jremref;
8795{
8796	struct pagedep *pagedep;
8797	struct diradd *dap;
8798	struct worklist *wk;
8799
8800	if (pagedep_lookup(UFSTOVFS(ip->i_ump), NULL, ip->i_number, 0, 0,
8801	    &pagedep) == 0)
8802		return (jremref);
8803	dap = diradd_lookup(pagedep, DOTDOT_OFFSET);
8804	if (dap == NULL)
8805		return (jremref);
8806	cancel_diradd(dap, dirrem, jremref, NULL, NULL);
8807	/*
8808	 * Mark any journal work as belonging to the parent so it is freed
8809	 * with the .. reference.
8810	 */
8811	LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list)
8812		wk->wk_state |= MKDIR_PARENT;
8813	return (NULL);
8814}
8815
8816/*
8817 * Cancel the MKDIR_PARENT mkdir component of a diradd when we're going to
8818 * replace it with a dirrem/diradd pair as a result of re-parenting a
8819 * directory.  This ensures that we don't simultaneously have a mkdir and
8820 * a diradd for the same .. entry.
8821 */
8822static struct jremref *
8823cancel_mkdir_dotdot(ip, dirrem, jremref)
8824	struct inode *ip;
8825	struct dirrem *dirrem;
8826	struct jremref *jremref;
8827{
8828	struct inodedep *inodedep;
8829	struct jaddref *jaddref;
8830	struct mkdir *mkdir;
8831	struct diradd *dap;
8832
8833	if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0,
8834	    &inodedep) == 0)
8835		return (jremref);
8836	dap = inodedep->id_mkdiradd;
8837	if (dap == NULL || (dap->da_state & MKDIR_PARENT) == 0)
8838		return (jremref);
8839	for (mkdir = LIST_FIRST(&mkdirlisthd); mkdir;
8840	    mkdir = LIST_NEXT(mkdir, md_mkdirs))
8841		if (mkdir->md_diradd == dap && mkdir->md_state & MKDIR_PARENT)
8842			break;
8843	if (mkdir == NULL)
8844		panic("cancel_mkdir_dotdot: Unable to find mkdir\n");
8845	if ((jaddref = mkdir->md_jaddref) != NULL) {
8846		mkdir->md_jaddref = NULL;
8847		jaddref->ja_state &= ~MKDIR_PARENT;
8848		if (inodedep_lookup(UFSTOVFS(ip->i_ump), jaddref->ja_ino, 0,
8849		    &inodedep) == 0)
8850			panic("cancel_mkdir_dotdot: Lost parent inodedep");
8851		if (cancel_jaddref(jaddref, inodedep, &dirrem->dm_jwork)) {
8852			journal_jremref(dirrem, jremref, inodedep);
8853			jremref = NULL;
8854		}
8855	}
8856	if (mkdir->md_state & ONWORKLIST)
8857		WORKLIST_REMOVE(&mkdir->md_list);
8858	mkdir->md_state |= ALLCOMPLETE;
8859	complete_mkdir(mkdir);
8860	return (jremref);
8861}
8862
8863static void
8864journal_jremref(dirrem, jremref, inodedep)
8865	struct dirrem *dirrem;
8866	struct jremref *jremref;
8867	struct inodedep *inodedep;
8868{
8869
8870	if (inodedep == NULL)
8871		if (inodedep_lookup(jremref->jr_list.wk_mp,
8872		    jremref->jr_ref.if_ino, 0, &inodedep) == 0)
8873			panic("journal_jremref: Lost inodedep");
8874	LIST_INSERT_HEAD(&dirrem->dm_jremrefhd, jremref, jr_deps);
8875	TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps);
8876	add_to_journal(&jremref->jr_list);
8877}
8878
8879static void
8880dirrem_journal(dirrem, jremref, dotremref, dotdotremref)
8881	struct dirrem *dirrem;
8882	struct jremref *jremref;
8883	struct jremref *dotremref;
8884	struct jremref *dotdotremref;
8885{
8886	struct inodedep *inodedep;
8887
8888
8889	if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino, 0,
8890	    &inodedep) == 0)
8891		panic("dirrem_journal: Lost inodedep");
8892	journal_jremref(dirrem, jremref, inodedep);
8893	if (dotremref)
8894		journal_jremref(dirrem, dotremref, inodedep);
8895	if (dotdotremref)
8896		journal_jremref(dirrem, dotdotremref, NULL);
8897}
8898
8899/*
8900 * Allocate a new dirrem if appropriate and return it along with
8901 * its associated pagedep. Called without a lock, returns with lock.
8902 */
8903static struct dirrem *
8904newdirrem(bp, dp, ip, isrmdir, prevdirremp)
8905	struct buf *bp;		/* buffer containing directory block */
8906	struct inode *dp;	/* inode for the directory being modified */
8907	struct inode *ip;	/* inode for directory entry being removed */
8908	int isrmdir;		/* indicates if doing RMDIR */
8909	struct dirrem **prevdirremp; /* previously referenced inode, if any */
8910{
8911	int offset;
8912	ufs_lbn_t lbn;
8913	struct diradd *dap;
8914	struct dirrem *dirrem;
8915	struct pagedep *pagedep;
8916	struct jremref *jremref;
8917	struct jremref *dotremref;
8918	struct jremref *dotdotremref;
8919	struct vnode *dvp;
8920
8921	/*
8922	 * Whiteouts have no deletion dependencies.
8923	 */
8924	if (ip == NULL)
8925		panic("newdirrem: whiteout");
8926	dvp = ITOV(dp);
8927	/*
8928	 * If we are over our limit, try to improve the situation.
8929	 * Limiting the number of dirrem structures will also limit
8930	 * the number of freefile and freeblks structures.
8931	 */
8932	ACQUIRE_LOCK(&lk);
8933	if (!IS_SNAPSHOT(ip) && dep_current[D_DIRREM] > max_softdeps / 2)
8934		(void) request_cleanup(ITOV(dp)->v_mount, FLUSH_BLOCKS);
8935	FREE_LOCK(&lk);
8936	dirrem = malloc(sizeof(struct dirrem),
8937		M_DIRREM, M_SOFTDEP_FLAGS|M_ZERO);
8938	workitem_alloc(&dirrem->dm_list, D_DIRREM, dvp->v_mount);
8939	LIST_INIT(&dirrem->dm_jremrefhd);
8940	LIST_INIT(&dirrem->dm_jwork);
8941	dirrem->dm_state = isrmdir ? RMDIR : 0;
8942	dirrem->dm_oldinum = ip->i_number;
8943	*prevdirremp = NULL;
8944	/*
8945	 * Allocate remove reference structures to track journal write
8946	 * dependencies.  We will always have one for the link and
8947	 * when doing directories we will always have one more for dot.
8948	 * When renaming a directory we skip the dotdot link change so
8949	 * this is not needed.
8950	 */
8951	jremref = dotremref = dotdotremref = NULL;
8952	if (DOINGSUJ(dvp)) {
8953		if (isrmdir) {
8954			jremref = newjremref(dirrem, dp, ip, dp->i_offset,
8955			    ip->i_effnlink + 2);
8956			dotremref = newjremref(dirrem, ip, ip, DOT_OFFSET,
8957			    ip->i_effnlink + 1);
8958			dotdotremref = newjremref(dirrem, ip, dp, DOTDOT_OFFSET,
8959			    dp->i_effnlink + 1);
8960			dotdotremref->jr_state |= MKDIR_PARENT;
8961		} else
8962			jremref = newjremref(dirrem, dp, ip, dp->i_offset,
8963			    ip->i_effnlink + 1);
8964	}
8965	ACQUIRE_LOCK(&lk);
8966	lbn = lblkno(dp->i_fs, dp->i_offset);
8967	offset = blkoff(dp->i_fs, dp->i_offset);
8968	pagedep_lookup(UFSTOVFS(dp->i_ump), bp, dp->i_number, lbn, DEPALLOC,
8969	    &pagedep);
8970	dirrem->dm_pagedep = pagedep;
8971	dirrem->dm_offset = offset;
8972	/*
8973	 * If we're renaming a .. link to a new directory, cancel any
8974	 * existing MKDIR_PARENT mkdir.  If it has already been canceled
8975	 * the jremref is preserved for any potential diradd in this
8976	 * location.  This can not coincide with a rmdir.
8977	 */
8978	if (dp->i_offset == DOTDOT_OFFSET) {
8979		if (isrmdir)
8980			panic("newdirrem: .. directory change during remove?");
8981		jremref = cancel_mkdir_dotdot(dp, dirrem, jremref);
8982	}
8983	/*
8984	 * If we're removing a directory search for the .. dependency now and
8985	 * cancel it.  Any pending journal work will be added to the dirrem
8986	 * to be completed when the workitem remove completes.
8987	 */
8988	if (isrmdir)
8989		dotdotremref = cancel_diradd_dotdot(ip, dirrem, dotdotremref);
8990	/*
8991	 * Check for a diradd dependency for the same directory entry.
8992	 * If present, then both dependencies become obsolete and can
8993	 * be de-allocated.
8994	 */
8995	dap = diradd_lookup(pagedep, offset);
8996	if (dap == NULL) {
8997		/*
8998		 * Link the jremref structures into the dirrem so they are
8999		 * written prior to the pagedep.
9000		 */
9001		if (jremref)
9002			dirrem_journal(dirrem, jremref, dotremref,
9003			    dotdotremref);
9004		return (dirrem);
9005	}
9006	/*
9007	 * Must be ATTACHED at this point.
9008	 */
9009	if ((dap->da_state & ATTACHED) == 0)
9010		panic("newdirrem: not ATTACHED");
9011	if (dap->da_newinum != ip->i_number)
9012		panic("newdirrem: inum %ju should be %ju",
9013		    (uintmax_t)ip->i_number, (uintmax_t)dap->da_newinum);
9014	/*
9015	 * If we are deleting a changed name that never made it to disk,
9016	 * then return the dirrem describing the previous inode (which
9017	 * represents the inode currently referenced from this entry on disk).
9018	 */
9019	if ((dap->da_state & DIRCHG) != 0) {
9020		*prevdirremp = dap->da_previous;
9021		dap->da_state &= ~DIRCHG;
9022		dap->da_pagedep = pagedep;
9023	}
9024	/*
9025	 * We are deleting an entry that never made it to disk.
9026	 * Mark it COMPLETE so we can delete its inode immediately.
9027	 */
9028	dirrem->dm_state |= COMPLETE;
9029	cancel_diradd(dap, dirrem, jremref, dotremref, dotdotremref);
9030#ifdef SUJ_DEBUG
9031	if (isrmdir == 0) {
9032		struct worklist *wk;
9033
9034		LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list)
9035			if (wk->wk_state & (MKDIR_BODY | MKDIR_PARENT))
9036				panic("bad wk %p (0x%X)\n", wk, wk->wk_state);
9037	}
9038#endif
9039
9040	return (dirrem);
9041}
9042
9043/*
9044 * Directory entry change dependencies.
9045 *
9046 * Changing an existing directory entry requires that an add operation
9047 * be completed first followed by a deletion. The semantics for the addition
9048 * are identical to the description of adding a new entry above except
9049 * that the rollback is to the old inode number rather than zero. Once
9050 * the addition dependency is completed, the removal is done as described
9051 * in the removal routine above.
9052 */
9053
9054/*
9055 * This routine should be called immediately after changing
9056 * a directory entry.  The inode's link count should not be
9057 * decremented by the calling procedure -- the soft updates
9058 * code will perform this task when it is safe.
9059 */
9060void
9061softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir)
9062	struct buf *bp;		/* buffer containing directory block */
9063	struct inode *dp;	/* inode for the directory being modified */
9064	struct inode *ip;	/* inode for directory entry being removed */
9065	ino_t newinum;		/* new inode number for changed entry */
9066	int isrmdir;		/* indicates if doing RMDIR */
9067{
9068	int offset;
9069	struct diradd *dap = NULL;
9070	struct dirrem *dirrem, *prevdirrem;
9071	struct pagedep *pagedep;
9072	struct inodedep *inodedep;
9073	struct jaddref *jaddref;
9074	struct mount *mp;
9075
9076	offset = blkoff(dp->i_fs, dp->i_offset);
9077	mp = UFSTOVFS(dp->i_ump);
9078	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
9079	   ("softdep_setup_directory_change called on non-softdep filesystem"));
9080
9081	/*
9082	 * Whiteouts do not need diradd dependencies.
9083	 */
9084	if (newinum != WINO) {
9085		dap = malloc(sizeof(struct diradd),
9086		    M_DIRADD, M_SOFTDEP_FLAGS|M_ZERO);
9087		workitem_alloc(&dap->da_list, D_DIRADD, mp);
9088		dap->da_state = DIRCHG | ATTACHED | DEPCOMPLETE;
9089		dap->da_offset = offset;
9090		dap->da_newinum = newinum;
9091		LIST_INIT(&dap->da_jwork);
9092	}
9093
9094	/*
9095	 * Allocate a new dirrem and ACQUIRE_LOCK.
9096	 */
9097	dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem);
9098	pagedep = dirrem->dm_pagedep;
9099	/*
9100	 * The possible values for isrmdir:
9101	 *	0 - non-directory file rename
9102	 *	1 - directory rename within same directory
9103	 *   inum - directory rename to new directory of given inode number
9104	 * When renaming to a new directory, we are both deleting and
9105	 * creating a new directory entry, so the link count on the new
9106	 * directory should not change. Thus we do not need the followup
9107	 * dirrem which is usually done in handle_workitem_remove. We set
9108	 * the DIRCHG flag to tell handle_workitem_remove to skip the
9109	 * followup dirrem.
9110	 */
9111	if (isrmdir > 1)
9112		dirrem->dm_state |= DIRCHG;
9113
9114	/*
9115	 * Whiteouts have no additional dependencies,
9116	 * so just put the dirrem on the correct list.
9117	 */
9118	if (newinum == WINO) {
9119		if ((dirrem->dm_state & COMPLETE) == 0) {
9120			LIST_INSERT_HEAD(&pagedep->pd_dirremhd, dirrem,
9121			    dm_next);
9122		} else {
9123			dirrem->dm_dirinum = pagedep->pd_ino;
9124			if (LIST_EMPTY(&dirrem->dm_jremrefhd))
9125				add_to_worklist(&dirrem->dm_list, 0);
9126		}
9127		FREE_LOCK(&lk);
9128		return;
9129	}
9130	/*
9131	 * Add the dirrem to the inodedep's pending remove list for quick
9132	 * discovery later.  A valid nlinkdelta ensures that this lookup
9133	 * will not fail.
9134	 */
9135	if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0)
9136		panic("softdep_setup_directory_change: Lost inodedep.");
9137	dirrem->dm_state |= ONDEPLIST;
9138	LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext);
9139
9140	/*
9141	 * If the COMPLETE flag is clear, then there were no active
9142	 * entries and we want to roll back to the previous inode until
9143	 * the new inode is committed to disk. If the COMPLETE flag is
9144	 * set, then we have deleted an entry that never made it to disk.
9145	 * If the entry we deleted resulted from a name change, then the old
9146	 * inode reference still resides on disk. Any rollback that we do
9147	 * needs to be to that old inode (returned to us in prevdirrem). If
9148	 * the entry we deleted resulted from a create, then there is
9149	 * no entry on the disk, so we want to roll back to zero rather
9150	 * than the uncommitted inode. In either of the COMPLETE cases we
9151	 * want to immediately free the unwritten and unreferenced inode.
9152	 */
9153	if ((dirrem->dm_state & COMPLETE) == 0) {
9154		dap->da_previous = dirrem;
9155	} else {
9156		if (prevdirrem != NULL) {
9157			dap->da_previous = prevdirrem;
9158		} else {
9159			dap->da_state &= ~DIRCHG;
9160			dap->da_pagedep = pagedep;
9161		}
9162		dirrem->dm_dirinum = pagedep->pd_ino;
9163		if (LIST_EMPTY(&dirrem->dm_jremrefhd))
9164			add_to_worklist(&dirrem->dm_list, 0);
9165	}
9166	/*
9167	 * Lookup the jaddref for this journal entry.  We must finish
9168	 * initializing it and make the diradd write dependent on it.
9169	 * If we're not journaling, put it on the id_bufwait list if the
9170	 * inode is not yet written. If it is written, do the post-inode
9171	 * write processing to put it on the id_pendinghd list.
9172	 */
9173	inodedep_lookup(mp, newinum, DEPALLOC | NODELAY, &inodedep);
9174	if (MOUNTEDSUJ(mp)) {
9175		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
9176		    inoreflst);
9177		KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number,
9178		    ("softdep_setup_directory_change: bad jaddref %p",
9179		    jaddref));
9180		jaddref->ja_diroff = dp->i_offset;
9181		jaddref->ja_diradd = dap;
9182		LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)],
9183		    dap, da_pdlist);
9184		add_to_journal(&jaddref->ja_list);
9185	} else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) {
9186		dap->da_state |= COMPLETE;
9187		LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist);
9188		WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list);
9189	} else {
9190		LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)],
9191		    dap, da_pdlist);
9192		WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list);
9193	}
9194	/*
9195	 * If we're making a new name for a directory that has not been
9196	 * committed when need to move the dot and dotdot references to
9197	 * this new name.
9198	 */
9199	if (inodedep->id_mkdiradd && dp->i_offset != DOTDOT_OFFSET)
9200		merge_diradd(inodedep, dap);
9201	FREE_LOCK(&lk);
9202}
9203
9204/*
9205 * Called whenever the link count on an inode is changed.
9206 * It creates an inode dependency so that the new reference(s)
9207 * to the inode cannot be committed to disk until the updated
9208 * inode has been written.
9209 */
9210void
9211softdep_change_linkcnt(ip)
9212	struct inode *ip;	/* the inode with the increased link count */
9213{
9214	struct inodedep *inodedep;
9215	int dflags;
9216
9217	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0,
9218	    ("softdep_change_linkcnt called on non-softdep filesystem"));
9219	ACQUIRE_LOCK(&lk);
9220	dflags = DEPALLOC;
9221	if (IS_SNAPSHOT(ip))
9222		dflags |= NODELAY;
9223	inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, dflags, &inodedep);
9224	if (ip->i_nlink < ip->i_effnlink)
9225		panic("softdep_change_linkcnt: bad delta");
9226	inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
9227	FREE_LOCK(&lk);
9228}
9229
9230/*
9231 * Attach a sbdep dependency to the superblock buf so that we can keep
9232 * track of the head of the linked list of referenced but unlinked inodes.
9233 */
9234void
9235softdep_setup_sbupdate(ump, fs, bp)
9236	struct ufsmount *ump;
9237	struct fs *fs;
9238	struct buf *bp;
9239{
9240	struct sbdep *sbdep;
9241	struct worklist *wk;
9242
9243	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
9244	    ("softdep_setup_sbupdate called on non-softdep filesystem"));
9245	LIST_FOREACH(wk, &bp->b_dep, wk_list)
9246		if (wk->wk_type == D_SBDEP)
9247			break;
9248	if (wk != NULL)
9249		return;
9250	sbdep = malloc(sizeof(struct sbdep), M_SBDEP, M_SOFTDEP_FLAGS);
9251	workitem_alloc(&sbdep->sb_list, D_SBDEP, UFSTOVFS(ump));
9252	sbdep->sb_fs = fs;
9253	sbdep->sb_ump = ump;
9254	ACQUIRE_LOCK(&lk);
9255	WORKLIST_INSERT(&bp->b_dep, &sbdep->sb_list);
9256	FREE_LOCK(&lk);
9257}
9258
9259/*
9260 * Return the first unlinked inodedep which is ready to be the head of the
9261 * list.  The inodedep and all those after it must have valid next pointers.
9262 */
9263static struct inodedep *
9264first_unlinked_inodedep(ump)
9265	struct ufsmount *ump;
9266{
9267	struct inodedep *inodedep;
9268	struct inodedep *idp;
9269
9270	rw_assert(&lk, RA_WLOCKED);
9271	for (inodedep = TAILQ_LAST(&ump->softdep_unlinked, inodedeplst);
9272	    inodedep; inodedep = idp) {
9273		if ((inodedep->id_state & UNLINKNEXT) == 0)
9274			return (NULL);
9275		idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked);
9276		if (idp == NULL || (idp->id_state & UNLINKNEXT) == 0)
9277			break;
9278		if ((inodedep->id_state & UNLINKPREV) == 0)
9279			break;
9280	}
9281	return (inodedep);
9282}
9283
9284/*
9285 * Set the sujfree unlinked head pointer prior to writing a superblock.
9286 */
9287static void
9288initiate_write_sbdep(sbdep)
9289	struct sbdep *sbdep;
9290{
9291	struct inodedep *inodedep;
9292	struct fs *bpfs;
9293	struct fs *fs;
9294
9295	bpfs = sbdep->sb_fs;
9296	fs = sbdep->sb_ump->um_fs;
9297	inodedep = first_unlinked_inodedep(sbdep->sb_ump);
9298	if (inodedep) {
9299		fs->fs_sujfree = inodedep->id_ino;
9300		inodedep->id_state |= UNLINKPREV;
9301	} else
9302		fs->fs_sujfree = 0;
9303	bpfs->fs_sujfree = fs->fs_sujfree;
9304}
9305
9306/*
9307 * After a superblock is written determine whether it must be written again
9308 * due to a changing unlinked list head.
9309 */
9310static int
9311handle_written_sbdep(sbdep, bp)
9312	struct sbdep *sbdep;
9313	struct buf *bp;
9314{
9315	struct inodedep *inodedep;
9316	struct mount *mp;
9317	struct fs *fs;
9318
9319	rw_assert(&lk, RA_WLOCKED);
9320	fs = sbdep->sb_fs;
9321	mp = UFSTOVFS(sbdep->sb_ump);
9322	/*
9323	 * If the superblock doesn't match the in-memory list start over.
9324	 */
9325	inodedep = first_unlinked_inodedep(sbdep->sb_ump);
9326	if ((inodedep && fs->fs_sujfree != inodedep->id_ino) ||
9327	    (inodedep == NULL && fs->fs_sujfree != 0)) {
9328		bdirty(bp);
9329		return (1);
9330	}
9331	WORKITEM_FREE(sbdep, D_SBDEP);
9332	if (fs->fs_sujfree == 0)
9333		return (0);
9334	/*
9335	 * Now that we have a record of this inode in stable store allow it
9336	 * to be written to free up pending work.  Inodes may see a lot of
9337	 * write activity after they are unlinked which we must not hold up.
9338	 */
9339	for (; inodedep != NULL; inodedep = TAILQ_NEXT(inodedep, id_unlinked)) {
9340		if ((inodedep->id_state & UNLINKLINKS) != UNLINKLINKS)
9341			panic("handle_written_sbdep: Bad inodedep %p (0x%X)",
9342			    inodedep, inodedep->id_state);
9343		if (inodedep->id_state & UNLINKONLIST)
9344			break;
9345		inodedep->id_state |= DEPCOMPLETE | UNLINKONLIST;
9346	}
9347
9348	return (0);
9349}
9350
9351/*
9352 * Mark an inodedep as unlinked and insert it into the in-memory unlinked list.
9353 */
9354static void
9355unlinked_inodedep(mp, inodedep)
9356	struct mount *mp;
9357	struct inodedep *inodedep;
9358{
9359	struct ufsmount *ump;
9360
9361	rw_assert(&lk, RA_WLOCKED);
9362	if (MOUNTEDSUJ(mp) == 0)
9363		return;
9364	ump = VFSTOUFS(mp);
9365	ump->um_fs->fs_fmod = 1;
9366	if (inodedep->id_state & UNLINKED)
9367		panic("unlinked_inodedep: %p already unlinked\n", inodedep);
9368	inodedep->id_state |= UNLINKED;
9369	TAILQ_INSERT_HEAD(&ump->softdep_unlinked, inodedep, id_unlinked);
9370}
9371
9372/*
9373 * Remove an inodedep from the unlinked inodedep list.  This may require
9374 * disk writes if the inode has made it that far.
9375 */
9376static void
9377clear_unlinked_inodedep(inodedep)
9378	struct inodedep *inodedep;
9379{
9380	struct ufsmount *ump;
9381	struct inodedep *idp;
9382	struct inodedep *idn;
9383	struct fs *fs;
9384	struct buf *bp;
9385	ino_t ino;
9386	ino_t nino;
9387	ino_t pino;
9388	int error;
9389
9390	ump = VFSTOUFS(inodedep->id_list.wk_mp);
9391	fs = ump->um_fs;
9392	ino = inodedep->id_ino;
9393	error = 0;
9394	for (;;) {
9395		rw_assert(&lk, RA_WLOCKED);
9396		KASSERT((inodedep->id_state & UNLINKED) != 0,
9397		    ("clear_unlinked_inodedep: inodedep %p not unlinked",
9398		    inodedep));
9399		/*
9400		 * If nothing has yet been written simply remove us from
9401		 * the in memory list and return.  This is the most common
9402		 * case where handle_workitem_remove() loses the final
9403		 * reference.
9404		 */
9405		if ((inodedep->id_state & UNLINKLINKS) == 0)
9406			break;
9407		/*
9408		 * If we have a NEXT pointer and no PREV pointer we can simply
9409		 * clear NEXT's PREV and remove ourselves from the list.  Be
9410		 * careful not to clear PREV if the superblock points at
9411		 * next as well.
9412		 */
9413		idn = TAILQ_NEXT(inodedep, id_unlinked);
9414		if ((inodedep->id_state & UNLINKLINKS) == UNLINKNEXT) {
9415			if (idn && fs->fs_sujfree != idn->id_ino)
9416				idn->id_state &= ~UNLINKPREV;
9417			break;
9418		}
9419		/*
9420		 * Here we have an inodedep which is actually linked into
9421		 * the list.  We must remove it by forcing a write to the
9422		 * link before us, whether it be the superblock or an inode.
9423		 * Unfortunately the list may change while we're waiting
9424		 * on the buf lock for either resource so we must loop until
9425		 * we lock the right one.  If both the superblock and an
9426		 * inode point to this inode we must clear the inode first
9427		 * followed by the superblock.
9428		 */
9429		idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked);
9430		pino = 0;
9431		if (idp && (idp->id_state & UNLINKNEXT))
9432			pino = idp->id_ino;
9433		FREE_LOCK(&lk);
9434		if (pino == 0) {
9435			bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
9436			    (int)fs->fs_sbsize, 0, 0, 0);
9437		} else {
9438			error = bread(ump->um_devvp,
9439			    fsbtodb(fs, ino_to_fsba(fs, pino)),
9440			    (int)fs->fs_bsize, NOCRED, &bp);
9441			if (error)
9442				brelse(bp);
9443		}
9444		ACQUIRE_LOCK(&lk);
9445		if (error)
9446			break;
9447		/* If the list has changed restart the loop. */
9448		idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked);
9449		nino = 0;
9450		if (idp && (idp->id_state & UNLINKNEXT))
9451			nino = idp->id_ino;
9452		if (nino != pino ||
9453		    (inodedep->id_state & UNLINKPREV) != UNLINKPREV) {
9454			FREE_LOCK(&lk);
9455			brelse(bp);
9456			ACQUIRE_LOCK(&lk);
9457			continue;
9458		}
9459		nino = 0;
9460		idn = TAILQ_NEXT(inodedep, id_unlinked);
9461		if (idn)
9462			nino = idn->id_ino;
9463		/*
9464		 * Remove us from the in memory list.  After this we cannot
9465		 * access the inodedep.
9466		 */
9467		KASSERT((inodedep->id_state & UNLINKED) != 0,
9468		    ("clear_unlinked_inodedep: inodedep %p not unlinked",
9469		    inodedep));
9470		inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST);
9471		TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked);
9472		FREE_LOCK(&lk);
9473		/*
9474		 * The predecessor's next pointer is manually updated here
9475		 * so that the NEXT flag is never cleared for an element
9476		 * that is in the list.
9477		 */
9478		if (pino == 0) {
9479			bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
9480			ffs_oldfscompat_write((struct fs *)bp->b_data, ump);
9481			softdep_setup_sbupdate(ump, (struct fs *)bp->b_data,
9482			    bp);
9483		} else if (fs->fs_magic == FS_UFS1_MAGIC)
9484			((struct ufs1_dinode *)bp->b_data +
9485			    ino_to_fsbo(fs, pino))->di_freelink = nino;
9486		else
9487			((struct ufs2_dinode *)bp->b_data +
9488			    ino_to_fsbo(fs, pino))->di_freelink = nino;
9489		/*
9490		 * If the bwrite fails we have no recourse to recover.  The
9491		 * filesystem is corrupted already.
9492		 */
9493		bwrite(bp);
9494		ACQUIRE_LOCK(&lk);
9495		/*
9496		 * If the superblock pointer still needs to be cleared force
9497		 * a write here.
9498		 */
9499		if (fs->fs_sujfree == ino) {
9500			FREE_LOCK(&lk);
9501			bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
9502			    (int)fs->fs_sbsize, 0, 0, 0);
9503			bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
9504			ffs_oldfscompat_write((struct fs *)bp->b_data, ump);
9505			softdep_setup_sbupdate(ump, (struct fs *)bp->b_data,
9506			    bp);
9507			bwrite(bp);
9508			ACQUIRE_LOCK(&lk);
9509		}
9510
9511		if (fs->fs_sujfree != ino)
9512			return;
9513		panic("clear_unlinked_inodedep: Failed to clear free head");
9514	}
9515	if (inodedep->id_ino == fs->fs_sujfree)
9516		panic("clear_unlinked_inodedep: Freeing head of free list");
9517	inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST);
9518	TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked);
9519	return;
9520}
9521
9522/*
9523 * This workitem decrements the inode's link count.
9524 * If the link count reaches zero, the file is removed.
9525 */
9526static int
9527handle_workitem_remove(dirrem, flags)
9528	struct dirrem *dirrem;
9529	int flags;
9530{
9531	struct inodedep *inodedep;
9532	struct workhead dotdotwk;
9533	struct worklist *wk;
9534	struct ufsmount *ump;
9535	struct mount *mp;
9536	struct vnode *vp;
9537	struct inode *ip;
9538	ino_t oldinum;
9539
9540	if (dirrem->dm_state & ONWORKLIST)
9541		panic("handle_workitem_remove: dirrem %p still on worklist",
9542		    dirrem);
9543	oldinum = dirrem->dm_oldinum;
9544	mp = dirrem->dm_list.wk_mp;
9545	ump = VFSTOUFS(mp);
9546	flags |= LK_EXCLUSIVE;
9547	if (ffs_vgetf(mp, oldinum, flags, &vp, FFSV_FORCEINSMQ) != 0)
9548		return (EBUSY);
9549	ip = VTOI(vp);
9550	ACQUIRE_LOCK(&lk);
9551	if ((inodedep_lookup(mp, oldinum, 0, &inodedep)) == 0)
9552		panic("handle_workitem_remove: lost inodedep");
9553	if (dirrem->dm_state & ONDEPLIST)
9554		LIST_REMOVE(dirrem, dm_inonext);
9555	KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd),
9556	    ("handle_workitem_remove:  Journal entries not written."));
9557
9558	/*
9559	 * Move all dependencies waiting on the remove to complete
9560	 * from the dirrem to the inode inowait list to be completed
9561	 * after the inode has been updated and written to disk.  Any
9562	 * marked MKDIR_PARENT are saved to be completed when the .. ref
9563	 * is removed.
9564	 */
9565	LIST_INIT(&dotdotwk);
9566	while ((wk = LIST_FIRST(&dirrem->dm_jwork)) != NULL) {
9567		WORKLIST_REMOVE(wk);
9568		if (wk->wk_state & MKDIR_PARENT) {
9569			wk->wk_state &= ~MKDIR_PARENT;
9570			WORKLIST_INSERT(&dotdotwk, wk);
9571			continue;
9572		}
9573		WORKLIST_INSERT(&inodedep->id_inowait, wk);
9574	}
9575	LIST_SWAP(&dirrem->dm_jwork, &dotdotwk, worklist, wk_list);
9576	/*
9577	 * Normal file deletion.
9578	 */
9579	if ((dirrem->dm_state & RMDIR) == 0) {
9580		ip->i_nlink--;
9581		DIP_SET(ip, i_nlink, ip->i_nlink);
9582		ip->i_flag |= IN_CHANGE;
9583		if (ip->i_nlink < ip->i_effnlink)
9584			panic("handle_workitem_remove: bad file delta");
9585		if (ip->i_nlink == 0)
9586			unlinked_inodedep(mp, inodedep);
9587		inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
9588		KASSERT(LIST_EMPTY(&dirrem->dm_jwork),
9589		    ("handle_workitem_remove: worklist not empty. %s",
9590		    TYPENAME(LIST_FIRST(&dirrem->dm_jwork)->wk_type)));
9591		WORKITEM_FREE(dirrem, D_DIRREM);
9592		FREE_LOCK(&lk);
9593		goto out;
9594	}
9595	/*
9596	 * Directory deletion. Decrement reference count for both the
9597	 * just deleted parent directory entry and the reference for ".".
9598	 * Arrange to have the reference count on the parent decremented
9599	 * to account for the loss of "..".
9600	 */
9601	ip->i_nlink -= 2;
9602	DIP_SET(ip, i_nlink, ip->i_nlink);
9603	ip->i_flag |= IN_CHANGE;
9604	if (ip->i_nlink < ip->i_effnlink)
9605		panic("handle_workitem_remove: bad dir delta");
9606	if (ip->i_nlink == 0)
9607		unlinked_inodedep(mp, inodedep);
9608	inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
9609	/*
9610	 * Rename a directory to a new parent. Since, we are both deleting
9611	 * and creating a new directory entry, the link count on the new
9612	 * directory should not change. Thus we skip the followup dirrem.
9613	 */
9614	if (dirrem->dm_state & DIRCHG) {
9615		KASSERT(LIST_EMPTY(&dirrem->dm_jwork),
9616		    ("handle_workitem_remove: DIRCHG and worklist not empty."));
9617		WORKITEM_FREE(dirrem, D_DIRREM);
9618		FREE_LOCK(&lk);
9619		goto out;
9620	}
9621	dirrem->dm_state = ONDEPLIST;
9622	dirrem->dm_oldinum = dirrem->dm_dirinum;
9623	/*
9624	 * Place the dirrem on the parent's diremhd list.
9625	 */
9626	if (inodedep_lookup(mp, dirrem->dm_oldinum, 0, &inodedep) == 0)
9627		panic("handle_workitem_remove: lost dir inodedep");
9628	LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext);
9629	/*
9630	 * If the allocated inode has never been written to disk, then
9631	 * the on-disk inode is zero'ed and we can remove the file
9632	 * immediately.  When journaling if the inode has been marked
9633	 * unlinked and not DEPCOMPLETE we know it can never be written.
9634	 */
9635	inodedep_lookup(mp, oldinum, 0, &inodedep);
9636	if (inodedep == NULL ||
9637	    (inodedep->id_state & (DEPCOMPLETE | UNLINKED)) == UNLINKED ||
9638	    check_inode_unwritten(inodedep)) {
9639		FREE_LOCK(&lk);
9640		vput(vp);
9641		return handle_workitem_remove(dirrem, flags);
9642	}
9643	WORKLIST_INSERT(&inodedep->id_inowait, &dirrem->dm_list);
9644	FREE_LOCK(&lk);
9645	ip->i_flag |= IN_CHANGE;
9646out:
9647	ffs_update(vp, 0);
9648	vput(vp);
9649	return (0);
9650}
9651
9652/*
9653 * Inode de-allocation dependencies.
9654 *
9655 * When an inode's link count is reduced to zero, it can be de-allocated. We
9656 * found it convenient to postpone de-allocation until after the inode is
9657 * written to disk with its new link count (zero).  At this point, all of the
9658 * on-disk inode's block pointers are nullified and, with careful dependency
9659 * list ordering, all dependencies related to the inode will be satisfied and
9660 * the corresponding dependency structures de-allocated.  So, if/when the
9661 * inode is reused, there will be no mixing of old dependencies with new
9662 * ones.  This artificial dependency is set up by the block de-allocation
9663 * procedure above (softdep_setup_freeblocks) and completed by the
9664 * following procedure.
9665 */
9666static void
9667handle_workitem_freefile(freefile)
9668	struct freefile *freefile;
9669{
9670	struct workhead wkhd;
9671	struct fs *fs;
9672	struct inodedep *idp;
9673	struct ufsmount *ump;
9674	int error;
9675
9676	ump = VFSTOUFS(freefile->fx_list.wk_mp);
9677	fs = ump->um_fs;
9678#ifdef DEBUG
9679	ACQUIRE_LOCK(&lk);
9680	error = inodedep_lookup(UFSTOVFS(ump), freefile->fx_oldinum, 0, &idp);
9681	FREE_LOCK(&lk);
9682	if (error)
9683		panic("handle_workitem_freefile: inodedep %p survived", idp);
9684#endif
9685	UFS_LOCK(ump);
9686	fs->fs_pendinginodes -= 1;
9687	UFS_UNLOCK(ump);
9688	LIST_INIT(&wkhd);
9689	LIST_SWAP(&freefile->fx_jwork, &wkhd, worklist, wk_list);
9690	if ((error = ffs_freefile(ump, fs, freefile->fx_devvp,
9691	    freefile->fx_oldinum, freefile->fx_mode, &wkhd)) != 0)
9692		softdep_error("handle_workitem_freefile", error);
9693	ACQUIRE_LOCK(&lk);
9694	WORKITEM_FREE(freefile, D_FREEFILE);
9695	FREE_LOCK(&lk);
9696}
9697
9698
9699/*
9700 * Helper function which unlinks marker element from work list and returns
9701 * the next element on the list.
9702 */
9703static __inline struct worklist *
9704markernext(struct worklist *marker)
9705{
9706	struct worklist *next;
9707
9708	next = LIST_NEXT(marker, wk_list);
9709	LIST_REMOVE(marker, wk_list);
9710	return next;
9711}
9712
9713/*
9714 * Disk writes.
9715 *
9716 * The dependency structures constructed above are most actively used when file
9717 * system blocks are written to disk.  No constraints are placed on when a
9718 * block can be written, but unsatisfied update dependencies are made safe by
9719 * modifying (or replacing) the source memory for the duration of the disk
9720 * write.  When the disk write completes, the memory block is again brought
9721 * up-to-date.
9722 *
9723 * In-core inode structure reclamation.
9724 *
9725 * Because there are a finite number of "in-core" inode structures, they are
9726 * reused regularly.  By transferring all inode-related dependencies to the
9727 * in-memory inode block and indexing them separately (via "inodedep"s), we
9728 * can allow "in-core" inode structures to be reused at any time and avoid
9729 * any increase in contention.
9730 *
9731 * Called just before entering the device driver to initiate a new disk I/O.
9732 * The buffer must be locked, thus, no I/O completion operations can occur
9733 * while we are manipulating its associated dependencies.
9734 */
9735static void
9736softdep_disk_io_initiation(bp)
9737	struct buf *bp;		/* structure describing disk write to occur */
9738{
9739	struct worklist *wk;
9740	struct worklist marker;
9741	struct inodedep *inodedep;
9742	struct freeblks *freeblks;
9743	struct jblkdep *jblkdep;
9744	struct newblk *newblk;
9745
9746	/*
9747	 * We only care about write operations. There should never
9748	 * be dependencies for reads.
9749	 */
9750	if (bp->b_iocmd != BIO_WRITE)
9751		panic("softdep_disk_io_initiation: not write");
9752
9753	if (bp->b_vflags & BV_BKGRDINPROG)
9754		panic("softdep_disk_io_initiation: Writing buffer with "
9755		    "background write in progress: %p", bp);
9756
9757	marker.wk_type = D_LAST + 1;	/* Not a normal workitem */
9758	PHOLD(curproc);			/* Don't swap out kernel stack */
9759
9760	ACQUIRE_LOCK(&lk);
9761	/*
9762	 * Do any necessary pre-I/O processing.
9763	 */
9764	for (wk = LIST_FIRST(&bp->b_dep); wk != NULL;
9765	     wk = markernext(&marker)) {
9766		LIST_INSERT_AFTER(wk, &marker, wk_list);
9767		switch (wk->wk_type) {
9768
9769		case D_PAGEDEP:
9770			initiate_write_filepage(WK_PAGEDEP(wk), bp);
9771			continue;
9772
9773		case D_INODEDEP:
9774			inodedep = WK_INODEDEP(wk);
9775			if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC)
9776				initiate_write_inodeblock_ufs1(inodedep, bp);
9777			else
9778				initiate_write_inodeblock_ufs2(inodedep, bp);
9779			continue;
9780
9781		case D_INDIRDEP:
9782			initiate_write_indirdep(WK_INDIRDEP(wk), bp);
9783			continue;
9784
9785		case D_BMSAFEMAP:
9786			initiate_write_bmsafemap(WK_BMSAFEMAP(wk), bp);
9787			continue;
9788
9789		case D_JSEG:
9790			WK_JSEG(wk)->js_buf = NULL;
9791			continue;
9792
9793		case D_FREEBLKS:
9794			freeblks = WK_FREEBLKS(wk);
9795			jblkdep = LIST_FIRST(&freeblks->fb_jblkdephd);
9796			/*
9797			 * We have to wait for the freeblks to be journaled
9798			 * before we can write an inodeblock with updated
9799			 * pointers.  Be careful to arrange the marker so
9800			 * we revisit the freeblks if it's not removed by
9801			 * the first jwait().
9802			 */
9803			if (jblkdep != NULL) {
9804				LIST_REMOVE(&marker, wk_list);
9805				LIST_INSERT_BEFORE(wk, &marker, wk_list);
9806				jwait(&jblkdep->jb_list, MNT_WAIT);
9807			}
9808			continue;
9809		case D_ALLOCDIRECT:
9810		case D_ALLOCINDIR:
9811			/*
9812			 * We have to wait for the jnewblk to be journaled
9813			 * before we can write to a block if the contents
9814			 * may be confused with an earlier file's indirect
9815			 * at recovery time.  Handle the marker as described
9816			 * above.
9817			 */
9818			newblk = WK_NEWBLK(wk);
9819			if (newblk->nb_jnewblk != NULL &&
9820			    indirblk_lookup(newblk->nb_list.wk_mp,
9821			    newblk->nb_newblkno)) {
9822				LIST_REMOVE(&marker, wk_list);
9823				LIST_INSERT_BEFORE(wk, &marker, wk_list);
9824				jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT);
9825			}
9826			continue;
9827
9828		case D_SBDEP:
9829			initiate_write_sbdep(WK_SBDEP(wk));
9830			continue;
9831
9832		case D_MKDIR:
9833		case D_FREEWORK:
9834		case D_FREEDEP:
9835		case D_JSEGDEP:
9836			continue;
9837
9838		default:
9839			panic("handle_disk_io_initiation: Unexpected type %s",
9840			    TYPENAME(wk->wk_type));
9841			/* NOTREACHED */
9842		}
9843	}
9844	FREE_LOCK(&lk);
9845	PRELE(curproc);			/* Allow swapout of kernel stack */
9846}
9847
9848/*
9849 * Called from within the procedure above to deal with unsatisfied
9850 * allocation dependencies in a directory. The buffer must be locked,
9851 * thus, no I/O completion operations can occur while we are
9852 * manipulating its associated dependencies.
9853 */
9854static void
9855initiate_write_filepage(pagedep, bp)
9856	struct pagedep *pagedep;
9857	struct buf *bp;
9858{
9859	struct jremref *jremref;
9860	struct jmvref *jmvref;
9861	struct dirrem *dirrem;
9862	struct diradd *dap;
9863	struct direct *ep;
9864	int i;
9865
9866	if (pagedep->pd_state & IOSTARTED) {
9867		/*
9868		 * This can only happen if there is a driver that does not
9869		 * understand chaining. Here biodone will reissue the call
9870		 * to strategy for the incomplete buffers.
9871		 */
9872		printf("initiate_write_filepage: already started\n");
9873		return;
9874	}
9875	pagedep->pd_state |= IOSTARTED;
9876	/*
9877	 * Wait for all journal remove dependencies to hit the disk.
9878	 * We can not allow any potentially conflicting directory adds
9879	 * to be visible before removes and rollback is too difficult.
9880	 * lk may be dropped and re-acquired, however we hold the buf
9881	 * locked so the dependency can not go away.
9882	 */
9883	LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next)
9884		while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL)
9885			jwait(&jremref->jr_list, MNT_WAIT);
9886	while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL)
9887		jwait(&jmvref->jm_list, MNT_WAIT);
9888	for (i = 0; i < DAHASHSZ; i++) {
9889		LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) {
9890			ep = (struct direct *)
9891			    ((char *)bp->b_data + dap->da_offset);
9892			if (ep->d_ino != dap->da_newinum)
9893				panic("%s: dir inum %ju != new %ju",
9894				    "initiate_write_filepage",
9895				    (uintmax_t)ep->d_ino,
9896				    (uintmax_t)dap->da_newinum);
9897			if (dap->da_state & DIRCHG)
9898				ep->d_ino = dap->da_previous->dm_oldinum;
9899			else
9900				ep->d_ino = 0;
9901			dap->da_state &= ~ATTACHED;
9902			dap->da_state |= UNDONE;
9903		}
9904	}
9905}
9906
9907/*
9908 * Version of initiate_write_inodeblock that handles UFS1 dinodes.
9909 * Note that any bug fixes made to this routine must be done in the
9910 * version found below.
9911 *
9912 * Called from within the procedure above to deal with unsatisfied
9913 * allocation dependencies in an inodeblock. The buffer must be
9914 * locked, thus, no I/O completion operations can occur while we
9915 * are manipulating its associated dependencies.
9916 */
9917static void
9918initiate_write_inodeblock_ufs1(inodedep, bp)
9919	struct inodedep *inodedep;
9920	struct buf *bp;			/* The inode block */
9921{
9922	struct allocdirect *adp, *lastadp;
9923	struct ufs1_dinode *dp;
9924	struct ufs1_dinode *sip;
9925	struct inoref *inoref;
9926	struct fs *fs;
9927	ufs_lbn_t i;
9928#ifdef INVARIANTS
9929	ufs_lbn_t prevlbn = 0;
9930#endif
9931	int deplist;
9932
9933	if (inodedep->id_state & IOSTARTED)
9934		panic("initiate_write_inodeblock_ufs1: already started");
9935	inodedep->id_state |= IOSTARTED;
9936	fs = inodedep->id_fs;
9937	dp = (struct ufs1_dinode *)bp->b_data +
9938	    ino_to_fsbo(fs, inodedep->id_ino);
9939
9940	/*
9941	 * If we're on the unlinked list but have not yet written our
9942	 * next pointer initialize it here.
9943	 */
9944	if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) {
9945		struct inodedep *inon;
9946
9947		inon = TAILQ_NEXT(inodedep, id_unlinked);
9948		dp->di_freelink = inon ? inon->id_ino : 0;
9949	}
9950	/*
9951	 * If the bitmap is not yet written, then the allocated
9952	 * inode cannot be written to disk.
9953	 */
9954	if ((inodedep->id_state & DEPCOMPLETE) == 0) {
9955		if (inodedep->id_savedino1 != NULL)
9956			panic("initiate_write_inodeblock_ufs1: I/O underway");
9957		FREE_LOCK(&lk);
9958		sip = malloc(sizeof(struct ufs1_dinode),
9959		    M_SAVEDINO, M_SOFTDEP_FLAGS);
9960		ACQUIRE_LOCK(&lk);
9961		inodedep->id_savedino1 = sip;
9962		*inodedep->id_savedino1 = *dp;
9963		bzero((caddr_t)dp, sizeof(struct ufs1_dinode));
9964		dp->di_gen = inodedep->id_savedino1->di_gen;
9965		dp->di_freelink = inodedep->id_savedino1->di_freelink;
9966		return;
9967	}
9968	/*
9969	 * If no dependencies, then there is nothing to roll back.
9970	 */
9971	inodedep->id_savedsize = dp->di_size;
9972	inodedep->id_savedextsize = 0;
9973	inodedep->id_savednlink = dp->di_nlink;
9974	if (TAILQ_EMPTY(&inodedep->id_inoupdt) &&
9975	    TAILQ_EMPTY(&inodedep->id_inoreflst))
9976		return;
9977	/*
9978	 * Revert the link count to that of the first unwritten journal entry.
9979	 */
9980	inoref = TAILQ_FIRST(&inodedep->id_inoreflst);
9981	if (inoref)
9982		dp->di_nlink = inoref->if_nlink;
9983	/*
9984	 * Set the dependencies to busy.
9985	 */
9986	for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
9987	     adp = TAILQ_NEXT(adp, ad_next)) {
9988#ifdef INVARIANTS
9989		if (deplist != 0 && prevlbn >= adp->ad_offset)
9990			panic("softdep_write_inodeblock: lbn order");
9991		prevlbn = adp->ad_offset;
9992		if (adp->ad_offset < NDADDR &&
9993		    dp->di_db[adp->ad_offset] != adp->ad_newblkno)
9994			panic("%s: direct pointer #%jd mismatch %d != %jd",
9995			    "softdep_write_inodeblock",
9996			    (intmax_t)adp->ad_offset,
9997			    dp->di_db[adp->ad_offset],
9998			    (intmax_t)adp->ad_newblkno);
9999		if (adp->ad_offset >= NDADDR &&
10000		    dp->di_ib[adp->ad_offset - NDADDR] != adp->ad_newblkno)
10001			panic("%s: indirect pointer #%jd mismatch %d != %jd",
10002			    "softdep_write_inodeblock",
10003			    (intmax_t)adp->ad_offset - NDADDR,
10004			    dp->di_ib[adp->ad_offset - NDADDR],
10005			    (intmax_t)adp->ad_newblkno);
10006		deplist |= 1 << adp->ad_offset;
10007		if ((adp->ad_state & ATTACHED) == 0)
10008			panic("softdep_write_inodeblock: Unknown state 0x%x",
10009			    adp->ad_state);
10010#endif /* INVARIANTS */
10011		adp->ad_state &= ~ATTACHED;
10012		adp->ad_state |= UNDONE;
10013	}
10014	/*
10015	 * The on-disk inode cannot claim to be any larger than the last
10016	 * fragment that has been written. Otherwise, the on-disk inode
10017	 * might have fragments that were not the last block in the file
10018	 * which would corrupt the filesystem.
10019	 */
10020	for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
10021	     lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
10022		if (adp->ad_offset >= NDADDR)
10023			break;
10024		dp->di_db[adp->ad_offset] = adp->ad_oldblkno;
10025		/* keep going until hitting a rollback to a frag */
10026		if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
10027			continue;
10028		dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize;
10029		for (i = adp->ad_offset + 1; i < NDADDR; i++) {
10030#ifdef INVARIANTS
10031			if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0)
10032				panic("softdep_write_inodeblock: lost dep1");
10033#endif /* INVARIANTS */
10034			dp->di_db[i] = 0;
10035		}
10036		for (i = 0; i < NIADDR; i++) {
10037#ifdef INVARIANTS
10038			if (dp->di_ib[i] != 0 &&
10039			    (deplist & ((1 << NDADDR) << i)) == 0)
10040				panic("softdep_write_inodeblock: lost dep2");
10041#endif /* INVARIANTS */
10042			dp->di_ib[i] = 0;
10043		}
10044		return;
10045	}
10046	/*
10047	 * If we have zero'ed out the last allocated block of the file,
10048	 * roll back the size to the last currently allocated block.
10049	 * We know that this last allocated block is a full-sized as
10050	 * we already checked for fragments in the loop above.
10051	 */
10052	if (lastadp != NULL &&
10053	    dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) {
10054		for (i = lastadp->ad_offset; i >= 0; i--)
10055			if (dp->di_db[i] != 0)
10056				break;
10057		dp->di_size = (i + 1) * fs->fs_bsize;
10058	}
10059	/*
10060	 * The only dependencies are for indirect blocks.
10061	 *
10062	 * The file size for indirect block additions is not guaranteed.
10063	 * Such a guarantee would be non-trivial to achieve. The conventional
10064	 * synchronous write implementation also does not make this guarantee.
10065	 * Fsck should catch and fix discrepancies. Arguably, the file size
10066	 * can be over-estimated without destroying integrity when the file
10067	 * moves into the indirect blocks (i.e., is large). If we want to
10068	 * postpone fsck, we are stuck with this argument.
10069	 */
10070	for (; adp; adp = TAILQ_NEXT(adp, ad_next))
10071		dp->di_ib[adp->ad_offset - NDADDR] = 0;
10072}
10073
10074/*
10075 * Version of initiate_write_inodeblock that handles UFS2 dinodes.
10076 * Note that any bug fixes made to this routine must be done in the
10077 * version found above.
10078 *
10079 * Called from within the procedure above to deal with unsatisfied
10080 * allocation dependencies in an inodeblock. The buffer must be
10081 * locked, thus, no I/O completion operations can occur while we
10082 * are manipulating its associated dependencies.
10083 */
10084static void
10085initiate_write_inodeblock_ufs2(inodedep, bp)
10086	struct inodedep *inodedep;
10087	struct buf *bp;			/* The inode block */
10088{
10089	struct allocdirect *adp, *lastadp;
10090	struct ufs2_dinode *dp;
10091	struct ufs2_dinode *sip;
10092	struct inoref *inoref;
10093	struct fs *fs;
10094	ufs_lbn_t i;
10095#ifdef INVARIANTS
10096	ufs_lbn_t prevlbn = 0;
10097#endif
10098	int deplist;
10099
10100	if (inodedep->id_state & IOSTARTED)
10101		panic("initiate_write_inodeblock_ufs2: already started");
10102	inodedep->id_state |= IOSTARTED;
10103	fs = inodedep->id_fs;
10104	dp = (struct ufs2_dinode *)bp->b_data +
10105	    ino_to_fsbo(fs, inodedep->id_ino);
10106
10107	/*
10108	 * If we're on the unlinked list but have not yet written our
10109	 * next pointer initialize it here.
10110	 */
10111	if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) {
10112		struct inodedep *inon;
10113
10114		inon = TAILQ_NEXT(inodedep, id_unlinked);
10115		dp->di_freelink = inon ? inon->id_ino : 0;
10116	}
10117	/*
10118	 * If the bitmap is not yet written, then the allocated
10119	 * inode cannot be written to disk.
10120	 */
10121	if ((inodedep->id_state & DEPCOMPLETE) == 0) {
10122		if (inodedep->id_savedino2 != NULL)
10123			panic("initiate_write_inodeblock_ufs2: I/O underway");
10124		FREE_LOCK(&lk);
10125		sip = malloc(sizeof(struct ufs2_dinode),
10126		    M_SAVEDINO, M_SOFTDEP_FLAGS);
10127		ACQUIRE_LOCK(&lk);
10128		inodedep->id_savedino2 = sip;
10129		*inodedep->id_savedino2 = *dp;
10130		bzero((caddr_t)dp, sizeof(struct ufs2_dinode));
10131		dp->di_gen = inodedep->id_savedino2->di_gen;
10132		dp->di_freelink = inodedep->id_savedino2->di_freelink;
10133		return;
10134	}
10135	/*
10136	 * If no dependencies, then there is nothing to roll back.
10137	 */
10138	inodedep->id_savedsize = dp->di_size;
10139	inodedep->id_savedextsize = dp->di_extsize;
10140	inodedep->id_savednlink = dp->di_nlink;
10141	if (TAILQ_EMPTY(&inodedep->id_inoupdt) &&
10142	    TAILQ_EMPTY(&inodedep->id_extupdt) &&
10143	    TAILQ_EMPTY(&inodedep->id_inoreflst))
10144		return;
10145	/*
10146	 * Revert the link count to that of the first unwritten journal entry.
10147	 */
10148	inoref = TAILQ_FIRST(&inodedep->id_inoreflst);
10149	if (inoref)
10150		dp->di_nlink = inoref->if_nlink;
10151
10152	/*
10153	 * Set the ext data dependencies to busy.
10154	 */
10155	for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp;
10156	     adp = TAILQ_NEXT(adp, ad_next)) {
10157#ifdef INVARIANTS
10158		if (deplist != 0 && prevlbn >= adp->ad_offset)
10159			panic("softdep_write_inodeblock: lbn order");
10160		prevlbn = adp->ad_offset;
10161		if (dp->di_extb[adp->ad_offset] != adp->ad_newblkno)
10162			panic("%s: direct pointer #%jd mismatch %jd != %jd",
10163			    "softdep_write_inodeblock",
10164			    (intmax_t)adp->ad_offset,
10165			    (intmax_t)dp->di_extb[adp->ad_offset],
10166			    (intmax_t)adp->ad_newblkno);
10167		deplist |= 1 << adp->ad_offset;
10168		if ((adp->ad_state & ATTACHED) == 0)
10169			panic("softdep_write_inodeblock: Unknown state 0x%x",
10170			    adp->ad_state);
10171#endif /* INVARIANTS */
10172		adp->ad_state &= ~ATTACHED;
10173		adp->ad_state |= UNDONE;
10174	}
10175	/*
10176	 * The on-disk inode cannot claim to be any larger than the last
10177	 * fragment that has been written. Otherwise, the on-disk inode
10178	 * might have fragments that were not the last block in the ext
10179	 * data which would corrupt the filesystem.
10180	 */
10181	for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp;
10182	     lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
10183		dp->di_extb[adp->ad_offset] = adp->ad_oldblkno;
10184		/* keep going until hitting a rollback to a frag */
10185		if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
10186			continue;
10187		dp->di_extsize = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize;
10188		for (i = adp->ad_offset + 1; i < NXADDR; i++) {
10189#ifdef INVARIANTS
10190			if (dp->di_extb[i] != 0 && (deplist & (1 << i)) == 0)
10191				panic("softdep_write_inodeblock: lost dep1");
10192#endif /* INVARIANTS */
10193			dp->di_extb[i] = 0;
10194		}
10195		lastadp = NULL;
10196		break;
10197	}
10198	/*
10199	 * If we have zero'ed out the last allocated block of the ext
10200	 * data, roll back the size to the last currently allocated block.
10201	 * We know that this last allocated block is a full-sized as
10202	 * we already checked for fragments in the loop above.
10203	 */
10204	if (lastadp != NULL &&
10205	    dp->di_extsize <= (lastadp->ad_offset + 1) * fs->fs_bsize) {
10206		for (i = lastadp->ad_offset; i >= 0; i--)
10207			if (dp->di_extb[i] != 0)
10208				break;
10209		dp->di_extsize = (i + 1) * fs->fs_bsize;
10210	}
10211	/*
10212	 * Set the file data dependencies to busy.
10213	 */
10214	for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
10215	     adp = TAILQ_NEXT(adp, ad_next)) {
10216#ifdef INVARIANTS
10217		if (deplist != 0 && prevlbn >= adp->ad_offset)
10218			panic("softdep_write_inodeblock: lbn order");
10219		if ((adp->ad_state & ATTACHED) == 0)
10220			panic("inodedep %p and adp %p not attached", inodedep, adp);
10221		prevlbn = adp->ad_offset;
10222		if (adp->ad_offset < NDADDR &&
10223		    dp->di_db[adp->ad_offset] != adp->ad_newblkno)
10224			panic("%s: direct pointer #%jd mismatch %jd != %jd",
10225			    "softdep_write_inodeblock",
10226			    (intmax_t)adp->ad_offset,
10227			    (intmax_t)dp->di_db[adp->ad_offset],
10228			    (intmax_t)adp->ad_newblkno);
10229		if (adp->ad_offset >= NDADDR &&
10230		    dp->di_ib[adp->ad_offset - NDADDR] != adp->ad_newblkno)
10231			panic("%s indirect pointer #%jd mismatch %jd != %jd",
10232			    "softdep_write_inodeblock:",
10233			    (intmax_t)adp->ad_offset - NDADDR,
10234			    (intmax_t)dp->di_ib[adp->ad_offset - NDADDR],
10235			    (intmax_t)adp->ad_newblkno);
10236		deplist |= 1 << adp->ad_offset;
10237		if ((adp->ad_state & ATTACHED) == 0)
10238			panic("softdep_write_inodeblock: Unknown state 0x%x",
10239			    adp->ad_state);
10240#endif /* INVARIANTS */
10241		adp->ad_state &= ~ATTACHED;
10242		adp->ad_state |= UNDONE;
10243	}
10244	/*
10245	 * The on-disk inode cannot claim to be any larger than the last
10246	 * fragment that has been written. Otherwise, the on-disk inode
10247	 * might have fragments that were not the last block in the file
10248	 * which would corrupt the filesystem.
10249	 */
10250	for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
10251	     lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
10252		if (adp->ad_offset >= NDADDR)
10253			break;
10254		dp->di_db[adp->ad_offset] = adp->ad_oldblkno;
10255		/* keep going until hitting a rollback to a frag */
10256		if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
10257			continue;
10258		dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize;
10259		for (i = adp->ad_offset + 1; i < NDADDR; i++) {
10260#ifdef INVARIANTS
10261			if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0)
10262				panic("softdep_write_inodeblock: lost dep2");
10263#endif /* INVARIANTS */
10264			dp->di_db[i] = 0;
10265		}
10266		for (i = 0; i < NIADDR; i++) {
10267#ifdef INVARIANTS
10268			if (dp->di_ib[i] != 0 &&
10269			    (deplist & ((1 << NDADDR) << i)) == 0)
10270				panic("softdep_write_inodeblock: lost dep3");
10271#endif /* INVARIANTS */
10272			dp->di_ib[i] = 0;
10273		}
10274		return;
10275	}
10276	/*
10277	 * If we have zero'ed out the last allocated block of the file,
10278	 * roll back the size to the last currently allocated block.
10279	 * We know that this last allocated block is a full-sized as
10280	 * we already checked for fragments in the loop above.
10281	 */
10282	if (lastadp != NULL &&
10283	    dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) {
10284		for (i = lastadp->ad_offset; i >= 0; i--)
10285			if (dp->di_db[i] != 0)
10286				break;
10287		dp->di_size = (i + 1) * fs->fs_bsize;
10288	}
10289	/*
10290	 * The only dependencies are for indirect blocks.
10291	 *
10292	 * The file size for indirect block additions is not guaranteed.
10293	 * Such a guarantee would be non-trivial to achieve. The conventional
10294	 * synchronous write implementation also does not make this guarantee.
10295	 * Fsck should catch and fix discrepancies. Arguably, the file size
10296	 * can be over-estimated without destroying integrity when the file
10297	 * moves into the indirect blocks (i.e., is large). If we want to
10298	 * postpone fsck, we are stuck with this argument.
10299	 */
10300	for (; adp; adp = TAILQ_NEXT(adp, ad_next))
10301		dp->di_ib[adp->ad_offset - NDADDR] = 0;
10302}
10303
10304/*
10305 * Cancel an indirdep as a result of truncation.  Release all of the
10306 * children allocindirs and place their journal work on the appropriate
10307 * list.
10308 */
10309static void
10310cancel_indirdep(indirdep, bp, freeblks)
10311	struct indirdep *indirdep;
10312	struct buf *bp;
10313	struct freeblks *freeblks;
10314{
10315	struct allocindir *aip;
10316
10317	/*
10318	 * None of the indirect pointers will ever be visible,
10319	 * so they can simply be tossed. GOINGAWAY ensures
10320	 * that allocated pointers will be saved in the buffer
10321	 * cache until they are freed. Note that they will
10322	 * only be able to be found by their physical address
10323	 * since the inode mapping the logical address will
10324	 * be gone. The save buffer used for the safe copy
10325	 * was allocated in setup_allocindir_phase2 using
10326	 * the physical address so it could be used for this
10327	 * purpose. Hence we swap the safe copy with the real
10328	 * copy, allowing the safe copy to be freed and holding
10329	 * on to the real copy for later use in indir_trunc.
10330	 */
10331	if (indirdep->ir_state & GOINGAWAY)
10332		panic("cancel_indirdep: already gone");
10333	if ((indirdep->ir_state & DEPCOMPLETE) == 0) {
10334		indirdep->ir_state |= DEPCOMPLETE;
10335		LIST_REMOVE(indirdep, ir_next);
10336	}
10337	indirdep->ir_state |= GOINGAWAY;
10338	VFSTOUFS(indirdep->ir_list.wk_mp)->um_numindirdeps += 1;
10339	/*
10340	 * Pass in bp for blocks still have journal writes
10341	 * pending so we can cancel them on their own.
10342	 */
10343	while ((aip = LIST_FIRST(&indirdep->ir_deplisthd)) != 0)
10344		cancel_allocindir(aip, bp, freeblks, 0);
10345	while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0)
10346		cancel_allocindir(aip, NULL, freeblks, 0);
10347	while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != 0)
10348		cancel_allocindir(aip, NULL, freeblks, 0);
10349	while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != 0)
10350		cancel_allocindir(aip, NULL, freeblks, 0);
10351	/*
10352	 * If there are pending partial truncations we need to keep the
10353	 * old block copy around until they complete.  This is because
10354	 * the current b_data is not a perfect superset of the available
10355	 * blocks.
10356	 */
10357	if (TAILQ_EMPTY(&indirdep->ir_trunc))
10358		bcopy(bp->b_data, indirdep->ir_savebp->b_data, bp->b_bcount);
10359	else
10360		bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount);
10361	WORKLIST_REMOVE(&indirdep->ir_list);
10362	WORKLIST_INSERT(&indirdep->ir_savebp->b_dep, &indirdep->ir_list);
10363	indirdep->ir_bp = NULL;
10364	indirdep->ir_freeblks = freeblks;
10365}
10366
10367/*
10368 * Free an indirdep once it no longer has new pointers to track.
10369 */
10370static void
10371free_indirdep(indirdep)
10372	struct indirdep *indirdep;
10373{
10374
10375	KASSERT(TAILQ_EMPTY(&indirdep->ir_trunc),
10376	    ("free_indirdep: Indir trunc list not empty."));
10377	KASSERT(LIST_EMPTY(&indirdep->ir_completehd),
10378	    ("free_indirdep: Complete head not empty."));
10379	KASSERT(LIST_EMPTY(&indirdep->ir_writehd),
10380	    ("free_indirdep: write head not empty."));
10381	KASSERT(LIST_EMPTY(&indirdep->ir_donehd),
10382	    ("free_indirdep: done head not empty."));
10383	KASSERT(LIST_EMPTY(&indirdep->ir_deplisthd),
10384	    ("free_indirdep: deplist head not empty."));
10385	KASSERT((indirdep->ir_state & DEPCOMPLETE),
10386	    ("free_indirdep: %p still on newblk list.", indirdep));
10387	KASSERT(indirdep->ir_saveddata == NULL,
10388	    ("free_indirdep: %p still has saved data.", indirdep));
10389	if (indirdep->ir_state & ONWORKLIST)
10390		WORKLIST_REMOVE(&indirdep->ir_list);
10391	WORKITEM_FREE(indirdep, D_INDIRDEP);
10392}
10393
10394/*
10395 * Called before a write to an indirdep.  This routine is responsible for
10396 * rolling back pointers to a safe state which includes only those
10397 * allocindirs which have been completed.
10398 */
10399static void
10400initiate_write_indirdep(indirdep, bp)
10401	struct indirdep *indirdep;
10402	struct buf *bp;
10403{
10404
10405	indirdep->ir_state |= IOSTARTED;
10406	if (indirdep->ir_state & GOINGAWAY)
10407		panic("disk_io_initiation: indirdep gone");
10408	/*
10409	 * If there are no remaining dependencies, this will be writing
10410	 * the real pointers.
10411	 */
10412	if (LIST_EMPTY(&indirdep->ir_deplisthd) &&
10413	    TAILQ_EMPTY(&indirdep->ir_trunc))
10414		return;
10415	/*
10416	 * Replace up-to-date version with safe version.
10417	 */
10418	if (indirdep->ir_saveddata == NULL) {
10419		FREE_LOCK(&lk);
10420		indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP,
10421		    M_SOFTDEP_FLAGS);
10422		ACQUIRE_LOCK(&lk);
10423	}
10424	indirdep->ir_state &= ~ATTACHED;
10425	indirdep->ir_state |= UNDONE;
10426	bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount);
10427	bcopy(indirdep->ir_savebp->b_data, bp->b_data,
10428	    bp->b_bcount);
10429}
10430
10431/*
10432 * Called when an inode has been cleared in a cg bitmap.  This finally
10433 * eliminates any canceled jaddrefs
10434 */
10435void
10436softdep_setup_inofree(mp, bp, ino, wkhd)
10437	struct mount *mp;
10438	struct buf *bp;
10439	ino_t ino;
10440	struct workhead *wkhd;
10441{
10442	struct worklist *wk, *wkn;
10443	struct inodedep *inodedep;
10444	uint8_t *inosused;
10445	struct cg *cgp;
10446	struct fs *fs;
10447
10448	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
10449	    ("softdep_setup_inofree called on non-softdep filesystem"));
10450	ACQUIRE_LOCK(&lk);
10451	fs = VFSTOUFS(mp)->um_fs;
10452	cgp = (struct cg *)bp->b_data;
10453	inosused = cg_inosused(cgp);
10454	if (isset(inosused, ino % fs->fs_ipg))
10455		panic("softdep_setup_inofree: inode %ju not freed.",
10456		    (uintmax_t)ino);
10457	if (inodedep_lookup(mp, ino, 0, &inodedep))
10458		panic("softdep_setup_inofree: ino %ju has existing inodedep %p",
10459		    (uintmax_t)ino, inodedep);
10460	if (wkhd) {
10461		LIST_FOREACH_SAFE(wk, wkhd, wk_list, wkn) {
10462			if (wk->wk_type != D_JADDREF)
10463				continue;
10464			WORKLIST_REMOVE(wk);
10465			/*
10466			 * We can free immediately even if the jaddref
10467			 * isn't attached in a background write as now
10468			 * the bitmaps are reconciled.
10469		 	 */
10470			wk->wk_state |= COMPLETE | ATTACHED;
10471			free_jaddref(WK_JADDREF(wk));
10472		}
10473		jwork_move(&bp->b_dep, wkhd);
10474	}
10475	FREE_LOCK(&lk);
10476}
10477
10478
10479/*
10480 * Called via ffs_blkfree() after a set of frags has been cleared from a cg
10481 * map.  Any dependencies waiting for the write to clear are added to the
10482 * buf's list and any jnewblks that are being canceled are discarded
10483 * immediately.
10484 */
10485void
10486softdep_setup_blkfree(mp, bp, blkno, frags, wkhd)
10487	struct mount *mp;
10488	struct buf *bp;
10489	ufs2_daddr_t blkno;
10490	int frags;
10491	struct workhead *wkhd;
10492{
10493	struct bmsafemap *bmsafemap;
10494	struct jnewblk *jnewblk;
10495	struct ufsmount *ump;
10496	struct worklist *wk;
10497	struct fs *fs;
10498#ifdef SUJ_DEBUG
10499	uint8_t *blksfree;
10500	struct cg *cgp;
10501	ufs2_daddr_t jstart;
10502	ufs2_daddr_t jend;
10503	ufs2_daddr_t end;
10504	long bno;
10505	int i;
10506#endif
10507
10508	CTR3(KTR_SUJ,
10509	    "softdep_setup_blkfree: blkno %jd frags %d wk head %p",
10510	    blkno, frags, wkhd);
10511
10512	ump = VFSTOUFS(mp);
10513	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ump)) != 0,
10514	    ("softdep_setup_blkfree called on non-softdep filesystem"));
10515	ACQUIRE_LOCK(&lk);
10516	/* Lookup the bmsafemap so we track when it is dirty. */
10517	fs = ump->um_fs;
10518	bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL);
10519	/*
10520	 * Detach any jnewblks which have been canceled.  They must linger
10521	 * until the bitmap is cleared again by ffs_blkfree() to prevent
10522	 * an unjournaled allocation from hitting the disk.
10523	 */
10524	if (wkhd) {
10525		while ((wk = LIST_FIRST(wkhd)) != NULL) {
10526			CTR2(KTR_SUJ,
10527			    "softdep_setup_blkfree: blkno %jd wk type %d",
10528			    blkno, wk->wk_type);
10529			WORKLIST_REMOVE(wk);
10530			if (wk->wk_type != D_JNEWBLK) {
10531				WORKLIST_INSERT(&bmsafemap->sm_freehd, wk);
10532				continue;
10533			}
10534			jnewblk = WK_JNEWBLK(wk);
10535			KASSERT(jnewblk->jn_state & GOINGAWAY,
10536			    ("softdep_setup_blkfree: jnewblk not canceled."));
10537#ifdef SUJ_DEBUG
10538			/*
10539			 * Assert that this block is free in the bitmap
10540			 * before we discard the jnewblk.
10541			 */
10542			cgp = (struct cg *)bp->b_data;
10543			blksfree = cg_blksfree(cgp);
10544			bno = dtogd(fs, jnewblk->jn_blkno);
10545			for (i = jnewblk->jn_oldfrags;
10546			    i < jnewblk->jn_frags; i++) {
10547				if (isset(blksfree, bno + i))
10548					continue;
10549				panic("softdep_setup_blkfree: not free");
10550			}
10551#endif
10552			/*
10553			 * Even if it's not attached we can free immediately
10554			 * as the new bitmap is correct.
10555			 */
10556			wk->wk_state |= COMPLETE | ATTACHED;
10557			free_jnewblk(jnewblk);
10558		}
10559	}
10560
10561#ifdef SUJ_DEBUG
10562	/*
10563	 * Assert that we are not freeing a block which has an outstanding
10564	 * allocation dependency.
10565	 */
10566	fs = VFSTOUFS(mp)->um_fs;
10567	bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL);
10568	end = blkno + frags;
10569	LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) {
10570		/*
10571		 * Don't match against blocks that will be freed when the
10572		 * background write is done.
10573		 */
10574		if ((jnewblk->jn_state & (ATTACHED | COMPLETE | DEPCOMPLETE)) ==
10575		    (COMPLETE | DEPCOMPLETE))
10576			continue;
10577		jstart = jnewblk->jn_blkno + jnewblk->jn_oldfrags;
10578		jend = jnewblk->jn_blkno + jnewblk->jn_frags;
10579		if ((blkno >= jstart && blkno < jend) ||
10580		    (end > jstart && end <= jend)) {
10581			printf("state 0x%X %jd - %d %d dep %p\n",
10582			    jnewblk->jn_state, jnewblk->jn_blkno,
10583			    jnewblk->jn_oldfrags, jnewblk->jn_frags,
10584			    jnewblk->jn_dep);
10585			panic("softdep_setup_blkfree: "
10586			    "%jd-%jd(%d) overlaps with %jd-%jd",
10587			    blkno, end, frags, jstart, jend);
10588		}
10589	}
10590#endif
10591	FREE_LOCK(&lk);
10592}
10593
10594/*
10595 * Revert a block allocation when the journal record that describes it
10596 * is not yet written.
10597 */
10598int
10599jnewblk_rollback(jnewblk, fs, cgp, blksfree)
10600	struct jnewblk *jnewblk;
10601	struct fs *fs;
10602	struct cg *cgp;
10603	uint8_t *blksfree;
10604{
10605	ufs1_daddr_t fragno;
10606	long cgbno, bbase;
10607	int frags, blk;
10608	int i;
10609
10610	frags = 0;
10611	cgbno = dtogd(fs, jnewblk->jn_blkno);
10612	/*
10613	 * We have to test which frags need to be rolled back.  We may
10614	 * be operating on a stale copy when doing background writes.
10615	 */
10616	for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++)
10617		if (isclr(blksfree, cgbno + i))
10618			frags++;
10619	if (frags == 0)
10620		return (0);
10621	/*
10622	 * This is mostly ffs_blkfree() sans some validation and
10623	 * superblock updates.
10624	 */
10625	if (frags == fs->fs_frag) {
10626		fragno = fragstoblks(fs, cgbno);
10627		ffs_setblock(fs, blksfree, fragno);
10628		ffs_clusteracct(fs, cgp, fragno, 1);
10629		cgp->cg_cs.cs_nbfree++;
10630	} else {
10631		cgbno += jnewblk->jn_oldfrags;
10632		bbase = cgbno - fragnum(fs, cgbno);
10633		/* Decrement the old frags.  */
10634		blk = blkmap(fs, blksfree, bbase);
10635		ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
10636		/* Deallocate the fragment */
10637		for (i = 0; i < frags; i++)
10638			setbit(blksfree, cgbno + i);
10639		cgp->cg_cs.cs_nffree += frags;
10640		/* Add back in counts associated with the new frags */
10641		blk = blkmap(fs, blksfree, bbase);
10642		ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
10643                /* If a complete block has been reassembled, account for it. */
10644		fragno = fragstoblks(fs, bbase);
10645		if (ffs_isblock(fs, blksfree, fragno)) {
10646			cgp->cg_cs.cs_nffree -= fs->fs_frag;
10647			ffs_clusteracct(fs, cgp, fragno, 1);
10648			cgp->cg_cs.cs_nbfree++;
10649		}
10650	}
10651	stat_jnewblk++;
10652	jnewblk->jn_state &= ~ATTACHED;
10653	jnewblk->jn_state |= UNDONE;
10654
10655	return (frags);
10656}
10657
10658static void
10659initiate_write_bmsafemap(bmsafemap, bp)
10660	struct bmsafemap *bmsafemap;
10661	struct buf *bp;			/* The cg block. */
10662{
10663	struct jaddref *jaddref;
10664	struct jnewblk *jnewblk;
10665	uint8_t *inosused;
10666	uint8_t *blksfree;
10667	struct cg *cgp;
10668	struct fs *fs;
10669	ino_t ino;
10670
10671	if (bmsafemap->sm_state & IOSTARTED)
10672		return;
10673	bmsafemap->sm_state |= IOSTARTED;
10674	/*
10675	 * Clear any inode allocations which are pending journal writes.
10676	 */
10677	if (LIST_FIRST(&bmsafemap->sm_jaddrefhd) != NULL) {
10678		cgp = (struct cg *)bp->b_data;
10679		fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
10680		inosused = cg_inosused(cgp);
10681		LIST_FOREACH(jaddref, &bmsafemap->sm_jaddrefhd, ja_bmdeps) {
10682			ino = jaddref->ja_ino % fs->fs_ipg;
10683			if (isset(inosused, ino)) {
10684				if ((jaddref->ja_mode & IFMT) == IFDIR)
10685					cgp->cg_cs.cs_ndir--;
10686				cgp->cg_cs.cs_nifree++;
10687				clrbit(inosused, ino);
10688				jaddref->ja_state &= ~ATTACHED;
10689				jaddref->ja_state |= UNDONE;
10690				stat_jaddref++;
10691			} else
10692				panic("initiate_write_bmsafemap: inode %ju "
10693				    "marked free", (uintmax_t)jaddref->ja_ino);
10694		}
10695	}
10696	/*
10697	 * Clear any block allocations which are pending journal writes.
10698	 */
10699	if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) {
10700		cgp = (struct cg *)bp->b_data;
10701		fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
10702		blksfree = cg_blksfree(cgp);
10703		LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) {
10704			if (jnewblk_rollback(jnewblk, fs, cgp, blksfree))
10705				continue;
10706			panic("initiate_write_bmsafemap: block %jd "
10707			    "marked free", jnewblk->jn_blkno);
10708		}
10709	}
10710	/*
10711	 * Move allocation lists to the written lists so they can be
10712	 * cleared once the block write is complete.
10713	 */
10714	LIST_SWAP(&bmsafemap->sm_inodedephd, &bmsafemap->sm_inodedepwr,
10715	    inodedep, id_deps);
10716	LIST_SWAP(&bmsafemap->sm_newblkhd, &bmsafemap->sm_newblkwr,
10717	    newblk, nb_deps);
10718	LIST_SWAP(&bmsafemap->sm_freehd, &bmsafemap->sm_freewr, worklist,
10719	    wk_list);
10720}
10721
10722/*
10723 * This routine is called during the completion interrupt
10724 * service routine for a disk write (from the procedure called
10725 * by the device driver to inform the filesystem caches of
10726 * a request completion).  It should be called early in this
10727 * procedure, before the block is made available to other
10728 * processes or other routines are called.
10729 *
10730 */
10731static void
10732softdep_disk_write_complete(bp)
10733	struct buf *bp;		/* describes the completed disk write */
10734{
10735	struct worklist *wk;
10736	struct worklist *owk;
10737	struct workhead reattach;
10738	struct freeblks *freeblks;
10739	struct buf *sbp;
10740
10741	/*
10742	 * If an error occurred while doing the write, then the data
10743	 * has not hit the disk and the dependencies cannot be unrolled.
10744	 */
10745	if ((bp->b_ioflags & BIO_ERROR) != 0 && (bp->b_flags & B_INVAL) == 0)
10746		return;
10747	LIST_INIT(&reattach);
10748	/*
10749	 * This lock must not be released anywhere in this code segment.
10750	 */
10751	sbp = NULL;
10752	owk = NULL;
10753	ACQUIRE_LOCK(&lk);
10754	while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) {
10755		WORKLIST_REMOVE(wk);
10756		dep_write[wk->wk_type]++;
10757		if (wk == owk)
10758			panic("duplicate worklist: %p\n", wk);
10759		owk = wk;
10760		switch (wk->wk_type) {
10761
10762		case D_PAGEDEP:
10763			if (handle_written_filepage(WK_PAGEDEP(wk), bp))
10764				WORKLIST_INSERT(&reattach, wk);
10765			continue;
10766
10767		case D_INODEDEP:
10768			if (handle_written_inodeblock(WK_INODEDEP(wk), bp))
10769				WORKLIST_INSERT(&reattach, wk);
10770			continue;
10771
10772		case D_BMSAFEMAP:
10773			if (handle_written_bmsafemap(WK_BMSAFEMAP(wk), bp))
10774				WORKLIST_INSERT(&reattach, wk);
10775			continue;
10776
10777		case D_MKDIR:
10778			handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY);
10779			continue;
10780
10781		case D_ALLOCDIRECT:
10782			wk->wk_state |= COMPLETE;
10783			handle_allocdirect_partdone(WK_ALLOCDIRECT(wk), NULL);
10784			continue;
10785
10786		case D_ALLOCINDIR:
10787			wk->wk_state |= COMPLETE;
10788			handle_allocindir_partdone(WK_ALLOCINDIR(wk));
10789			continue;
10790
10791		case D_INDIRDEP:
10792			if (handle_written_indirdep(WK_INDIRDEP(wk), bp, &sbp))
10793				WORKLIST_INSERT(&reattach, wk);
10794			continue;
10795
10796		case D_FREEBLKS:
10797			wk->wk_state |= COMPLETE;
10798			freeblks = WK_FREEBLKS(wk);
10799			if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE &&
10800			    LIST_EMPTY(&freeblks->fb_jblkdephd))
10801				add_to_worklist(wk, WK_NODELAY);
10802			continue;
10803
10804		case D_FREEWORK:
10805			handle_written_freework(WK_FREEWORK(wk));
10806			break;
10807
10808		case D_JSEGDEP:
10809			free_jsegdep(WK_JSEGDEP(wk));
10810			continue;
10811
10812		case D_JSEG:
10813			handle_written_jseg(WK_JSEG(wk), bp);
10814			continue;
10815
10816		case D_SBDEP:
10817			if (handle_written_sbdep(WK_SBDEP(wk), bp))
10818				WORKLIST_INSERT(&reattach, wk);
10819			continue;
10820
10821		case D_FREEDEP:
10822			free_freedep(WK_FREEDEP(wk));
10823			continue;
10824
10825		default:
10826			panic("handle_disk_write_complete: Unknown type %s",
10827			    TYPENAME(wk->wk_type));
10828			/* NOTREACHED */
10829		}
10830	}
10831	/*
10832	 * Reattach any requests that must be redone.
10833	 */
10834	while ((wk = LIST_FIRST(&reattach)) != NULL) {
10835		WORKLIST_REMOVE(wk);
10836		WORKLIST_INSERT(&bp->b_dep, wk);
10837	}
10838	FREE_LOCK(&lk);
10839	if (sbp)
10840		brelse(sbp);
10841}
10842
10843/*
10844 * Called from within softdep_disk_write_complete above. Note that
10845 * this routine is always called from interrupt level with further
10846 * splbio interrupts blocked.
10847 */
10848static void
10849handle_allocdirect_partdone(adp, wkhd)
10850	struct allocdirect *adp;	/* the completed allocdirect */
10851	struct workhead *wkhd;		/* Work to do when inode is writtne. */
10852{
10853	struct allocdirectlst *listhead;
10854	struct allocdirect *listadp;
10855	struct inodedep *inodedep;
10856	long bsize;
10857
10858	if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE)
10859		return;
10860	/*
10861	 * The on-disk inode cannot claim to be any larger than the last
10862	 * fragment that has been written. Otherwise, the on-disk inode
10863	 * might have fragments that were not the last block in the file
10864	 * which would corrupt the filesystem. Thus, we cannot free any
10865	 * allocdirects after one whose ad_oldblkno claims a fragment as
10866	 * these blocks must be rolled back to zero before writing the inode.
10867	 * We check the currently active set of allocdirects in id_inoupdt
10868	 * or id_extupdt as appropriate.
10869	 */
10870	inodedep = adp->ad_inodedep;
10871	bsize = inodedep->id_fs->fs_bsize;
10872	if (adp->ad_state & EXTDATA)
10873		listhead = &inodedep->id_extupdt;
10874	else
10875		listhead = &inodedep->id_inoupdt;
10876	TAILQ_FOREACH(listadp, listhead, ad_next) {
10877		/* found our block */
10878		if (listadp == adp)
10879			break;
10880		/* continue if ad_oldlbn is not a fragment */
10881		if (listadp->ad_oldsize == 0 ||
10882		    listadp->ad_oldsize == bsize)
10883			continue;
10884		/* hit a fragment */
10885		return;
10886	}
10887	/*
10888	 * If we have reached the end of the current list without
10889	 * finding the just finished dependency, then it must be
10890	 * on the future dependency list. Future dependencies cannot
10891	 * be freed until they are moved to the current list.
10892	 */
10893	if (listadp == NULL) {
10894#ifdef DEBUG
10895		if (adp->ad_state & EXTDATA)
10896			listhead = &inodedep->id_newextupdt;
10897		else
10898			listhead = &inodedep->id_newinoupdt;
10899		TAILQ_FOREACH(listadp, listhead, ad_next)
10900			/* found our block */
10901			if (listadp == adp)
10902				break;
10903		if (listadp == NULL)
10904			panic("handle_allocdirect_partdone: lost dep");
10905#endif /* DEBUG */
10906		return;
10907	}
10908	/*
10909	 * If we have found the just finished dependency, then queue
10910	 * it along with anything that follows it that is complete.
10911	 * Since the pointer has not yet been written in the inode
10912	 * as the dependency prevents it, place the allocdirect on the
10913	 * bufwait list where it will be freed once the pointer is
10914	 * valid.
10915	 */
10916	if (wkhd == NULL)
10917		wkhd = &inodedep->id_bufwait;
10918	for (; adp; adp = listadp) {
10919		listadp = TAILQ_NEXT(adp, ad_next);
10920		if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE)
10921			return;
10922		TAILQ_REMOVE(listhead, adp, ad_next);
10923		WORKLIST_INSERT(wkhd, &adp->ad_block.nb_list);
10924	}
10925}
10926
10927/*
10928 * Called from within softdep_disk_write_complete above.  This routine
10929 * completes successfully written allocindirs.
10930 */
10931static void
10932handle_allocindir_partdone(aip)
10933	struct allocindir *aip;		/* the completed allocindir */
10934{
10935	struct indirdep *indirdep;
10936
10937	if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE)
10938		return;
10939	indirdep = aip->ai_indirdep;
10940	LIST_REMOVE(aip, ai_next);
10941	/*
10942	 * Don't set a pointer while the buffer is undergoing IO or while
10943	 * we have active truncations.
10944	 */
10945	if (indirdep->ir_state & UNDONE || !TAILQ_EMPTY(&indirdep->ir_trunc)) {
10946		LIST_INSERT_HEAD(&indirdep->ir_donehd, aip, ai_next);
10947		return;
10948	}
10949	if (indirdep->ir_state & UFS1FMT)
10950		((ufs1_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] =
10951		    aip->ai_newblkno;
10952	else
10953		((ufs2_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] =
10954		    aip->ai_newblkno;
10955	/*
10956	 * Await the pointer write before freeing the allocindir.
10957	 */
10958	LIST_INSERT_HEAD(&indirdep->ir_writehd, aip, ai_next);
10959}
10960
10961/*
10962 * Release segments held on a jwork list.
10963 */
10964static void
10965handle_jwork(wkhd)
10966	struct workhead *wkhd;
10967{
10968	struct worklist *wk;
10969
10970	while ((wk = LIST_FIRST(wkhd)) != NULL) {
10971		WORKLIST_REMOVE(wk);
10972		switch (wk->wk_type) {
10973		case D_JSEGDEP:
10974			free_jsegdep(WK_JSEGDEP(wk));
10975			continue;
10976		case D_FREEDEP:
10977			free_freedep(WK_FREEDEP(wk));
10978			continue;
10979		case D_FREEFRAG:
10980			rele_jseg(WK_JSEG(WK_FREEFRAG(wk)->ff_jdep));
10981			WORKITEM_FREE(wk, D_FREEFRAG);
10982			continue;
10983		case D_FREEWORK:
10984			handle_written_freework(WK_FREEWORK(wk));
10985			continue;
10986		default:
10987			panic("handle_jwork: Unknown type %s\n",
10988			    TYPENAME(wk->wk_type));
10989		}
10990	}
10991}
10992
10993/*
10994 * Handle the bufwait list on an inode when it is safe to release items
10995 * held there.  This normally happens after an inode block is written but
10996 * may be delayed and handled later if there are pending journal items that
10997 * are not yet safe to be released.
10998 */
10999static struct freefile *
11000handle_bufwait(inodedep, refhd)
11001	struct inodedep *inodedep;
11002	struct workhead *refhd;
11003{
11004	struct jaddref *jaddref;
11005	struct freefile *freefile;
11006	struct worklist *wk;
11007
11008	freefile = NULL;
11009	while ((wk = LIST_FIRST(&inodedep->id_bufwait)) != NULL) {
11010		WORKLIST_REMOVE(wk);
11011		switch (wk->wk_type) {
11012		case D_FREEFILE:
11013			/*
11014			 * We defer adding freefile to the worklist
11015			 * until all other additions have been made to
11016			 * ensure that it will be done after all the
11017			 * old blocks have been freed.
11018			 */
11019			if (freefile != NULL)
11020				panic("handle_bufwait: freefile");
11021			freefile = WK_FREEFILE(wk);
11022			continue;
11023
11024		case D_MKDIR:
11025			handle_written_mkdir(WK_MKDIR(wk), MKDIR_PARENT);
11026			continue;
11027
11028		case D_DIRADD:
11029			diradd_inode_written(WK_DIRADD(wk), inodedep);
11030			continue;
11031
11032		case D_FREEFRAG:
11033			wk->wk_state |= COMPLETE;
11034			if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE)
11035				add_to_worklist(wk, 0);
11036			continue;
11037
11038		case D_DIRREM:
11039			wk->wk_state |= COMPLETE;
11040			add_to_worklist(wk, 0);
11041			continue;
11042
11043		case D_ALLOCDIRECT:
11044		case D_ALLOCINDIR:
11045			free_newblk(WK_NEWBLK(wk));
11046			continue;
11047
11048		case D_JNEWBLK:
11049			wk->wk_state |= COMPLETE;
11050			free_jnewblk(WK_JNEWBLK(wk));
11051			continue;
11052
11053		/*
11054		 * Save freed journal segments and add references on
11055		 * the supplied list which will delay their release
11056		 * until the cg bitmap is cleared on disk.
11057		 */
11058		case D_JSEGDEP:
11059			if (refhd == NULL)
11060				free_jsegdep(WK_JSEGDEP(wk));
11061			else
11062				WORKLIST_INSERT(refhd, wk);
11063			continue;
11064
11065		case D_JADDREF:
11066			jaddref = WK_JADDREF(wk);
11067			TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref,
11068			    if_deps);
11069			/*
11070			 * Transfer any jaddrefs to the list to be freed with
11071			 * the bitmap if we're handling a removed file.
11072			 */
11073			if (refhd == NULL) {
11074				wk->wk_state |= COMPLETE;
11075				free_jaddref(jaddref);
11076			} else
11077				WORKLIST_INSERT(refhd, wk);
11078			continue;
11079
11080		default:
11081			panic("handle_bufwait: Unknown type %p(%s)",
11082			    wk, TYPENAME(wk->wk_type));
11083			/* NOTREACHED */
11084		}
11085	}
11086	return (freefile);
11087}
11088/*
11089 * Called from within softdep_disk_write_complete above to restore
11090 * in-memory inode block contents to their most up-to-date state. Note
11091 * that this routine is always called from interrupt level with further
11092 * splbio interrupts blocked.
11093 */
11094static int
11095handle_written_inodeblock(inodedep, bp)
11096	struct inodedep *inodedep;
11097	struct buf *bp;		/* buffer containing the inode block */
11098{
11099	struct freefile *freefile;
11100	struct allocdirect *adp, *nextadp;
11101	struct ufs1_dinode *dp1 = NULL;
11102	struct ufs2_dinode *dp2 = NULL;
11103	struct workhead wkhd;
11104	int hadchanges, fstype;
11105	ino_t freelink;
11106
11107	LIST_INIT(&wkhd);
11108	hadchanges = 0;
11109	freefile = NULL;
11110	if ((inodedep->id_state & IOSTARTED) == 0)
11111		panic("handle_written_inodeblock: not started");
11112	inodedep->id_state &= ~IOSTARTED;
11113	if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC) {
11114		fstype = UFS1;
11115		dp1 = (struct ufs1_dinode *)bp->b_data +
11116		    ino_to_fsbo(inodedep->id_fs, inodedep->id_ino);
11117		freelink = dp1->di_freelink;
11118	} else {
11119		fstype = UFS2;
11120		dp2 = (struct ufs2_dinode *)bp->b_data +
11121		    ino_to_fsbo(inodedep->id_fs, inodedep->id_ino);
11122		freelink = dp2->di_freelink;
11123	}
11124	/*
11125	 * Leave this inodeblock dirty until it's in the list.
11126	 */
11127	if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) == UNLINKED) {
11128		struct inodedep *inon;
11129
11130		inon = TAILQ_NEXT(inodedep, id_unlinked);
11131		if ((inon == NULL && freelink == 0) ||
11132		    (inon && inon->id_ino == freelink)) {
11133			if (inon)
11134				inon->id_state |= UNLINKPREV;
11135			inodedep->id_state |= UNLINKNEXT;
11136		}
11137		hadchanges = 1;
11138	}
11139	/*
11140	 * If we had to rollback the inode allocation because of
11141	 * bitmaps being incomplete, then simply restore it.
11142	 * Keep the block dirty so that it will not be reclaimed until
11143	 * all associated dependencies have been cleared and the
11144	 * corresponding updates written to disk.
11145	 */
11146	if (inodedep->id_savedino1 != NULL) {
11147		hadchanges = 1;
11148		if (fstype == UFS1)
11149			*dp1 = *inodedep->id_savedino1;
11150		else
11151			*dp2 = *inodedep->id_savedino2;
11152		free(inodedep->id_savedino1, M_SAVEDINO);
11153		inodedep->id_savedino1 = NULL;
11154		if ((bp->b_flags & B_DELWRI) == 0)
11155			stat_inode_bitmap++;
11156		bdirty(bp);
11157		/*
11158		 * If the inode is clear here and GOINGAWAY it will never
11159		 * be written.  Process the bufwait and clear any pending
11160		 * work which may include the freefile.
11161		 */
11162		if (inodedep->id_state & GOINGAWAY)
11163			goto bufwait;
11164		return (1);
11165	}
11166	inodedep->id_state |= COMPLETE;
11167	/*
11168	 * Roll forward anything that had to be rolled back before
11169	 * the inode could be updated.
11170	 */
11171	for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) {
11172		nextadp = TAILQ_NEXT(adp, ad_next);
11173		if (adp->ad_state & ATTACHED)
11174			panic("handle_written_inodeblock: new entry");
11175		if (fstype == UFS1) {
11176			if (adp->ad_offset < NDADDR) {
11177				if (dp1->di_db[adp->ad_offset]!=adp->ad_oldblkno)
11178					panic("%s %s #%jd mismatch %d != %jd",
11179					    "handle_written_inodeblock:",
11180					    "direct pointer",
11181					    (intmax_t)adp->ad_offset,
11182					    dp1->di_db[adp->ad_offset],
11183					    (intmax_t)adp->ad_oldblkno);
11184				dp1->di_db[adp->ad_offset] = adp->ad_newblkno;
11185			} else {
11186				if (dp1->di_ib[adp->ad_offset - NDADDR] != 0)
11187					panic("%s: %s #%jd allocated as %d",
11188					    "handle_written_inodeblock",
11189					    "indirect pointer",
11190					    (intmax_t)adp->ad_offset - NDADDR,
11191					    dp1->di_ib[adp->ad_offset - NDADDR]);
11192				dp1->di_ib[adp->ad_offset - NDADDR] =
11193				    adp->ad_newblkno;
11194			}
11195		} else {
11196			if (adp->ad_offset < NDADDR) {
11197				if (dp2->di_db[adp->ad_offset]!=adp->ad_oldblkno)
11198					panic("%s: %s #%jd %s %jd != %jd",
11199					    "handle_written_inodeblock",
11200					    "direct pointer",
11201					    (intmax_t)adp->ad_offset, "mismatch",
11202					    (intmax_t)dp2->di_db[adp->ad_offset],
11203					    (intmax_t)adp->ad_oldblkno);
11204				dp2->di_db[adp->ad_offset] = adp->ad_newblkno;
11205			} else {
11206				if (dp2->di_ib[adp->ad_offset - NDADDR] != 0)
11207					panic("%s: %s #%jd allocated as %jd",
11208					    "handle_written_inodeblock",
11209					    "indirect pointer",
11210					    (intmax_t)adp->ad_offset - NDADDR,
11211					    (intmax_t)
11212					    dp2->di_ib[adp->ad_offset - NDADDR]);
11213				dp2->di_ib[adp->ad_offset - NDADDR] =
11214				    adp->ad_newblkno;
11215			}
11216		}
11217		adp->ad_state &= ~UNDONE;
11218		adp->ad_state |= ATTACHED;
11219		hadchanges = 1;
11220	}
11221	for (adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; adp = nextadp) {
11222		nextadp = TAILQ_NEXT(adp, ad_next);
11223		if (adp->ad_state & ATTACHED)
11224			panic("handle_written_inodeblock: new entry");
11225		if (dp2->di_extb[adp->ad_offset] != adp->ad_oldblkno)
11226			panic("%s: direct pointers #%jd %s %jd != %jd",
11227			    "handle_written_inodeblock",
11228			    (intmax_t)adp->ad_offset, "mismatch",
11229			    (intmax_t)dp2->di_extb[adp->ad_offset],
11230			    (intmax_t)adp->ad_oldblkno);
11231		dp2->di_extb[adp->ad_offset] = adp->ad_newblkno;
11232		adp->ad_state &= ~UNDONE;
11233		adp->ad_state |= ATTACHED;
11234		hadchanges = 1;
11235	}
11236	if (hadchanges && (bp->b_flags & B_DELWRI) == 0)
11237		stat_direct_blk_ptrs++;
11238	/*
11239	 * Reset the file size to its most up-to-date value.
11240	 */
11241	if (inodedep->id_savedsize == -1 || inodedep->id_savedextsize == -1)
11242		panic("handle_written_inodeblock: bad size");
11243	if (inodedep->id_savednlink > LINK_MAX)
11244		panic("handle_written_inodeblock: Invalid link count "
11245		    "%d for inodedep %p", inodedep->id_savednlink, inodedep);
11246	if (fstype == UFS1) {
11247		if (dp1->di_nlink != inodedep->id_savednlink) {
11248			dp1->di_nlink = inodedep->id_savednlink;
11249			hadchanges = 1;
11250		}
11251		if (dp1->di_size != inodedep->id_savedsize) {
11252			dp1->di_size = inodedep->id_savedsize;
11253			hadchanges = 1;
11254		}
11255	} else {
11256		if (dp2->di_nlink != inodedep->id_savednlink) {
11257			dp2->di_nlink = inodedep->id_savednlink;
11258			hadchanges = 1;
11259		}
11260		if (dp2->di_size != inodedep->id_savedsize) {
11261			dp2->di_size = inodedep->id_savedsize;
11262			hadchanges = 1;
11263		}
11264		if (dp2->di_extsize != inodedep->id_savedextsize) {
11265			dp2->di_extsize = inodedep->id_savedextsize;
11266			hadchanges = 1;
11267		}
11268	}
11269	inodedep->id_savedsize = -1;
11270	inodedep->id_savedextsize = -1;
11271	inodedep->id_savednlink = -1;
11272	/*
11273	 * If there were any rollbacks in the inode block, then it must be
11274	 * marked dirty so that its will eventually get written back in
11275	 * its correct form.
11276	 */
11277	if (hadchanges)
11278		bdirty(bp);
11279bufwait:
11280	/*
11281	 * Process any allocdirects that completed during the update.
11282	 */
11283	if ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL)
11284		handle_allocdirect_partdone(adp, &wkhd);
11285	if ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL)
11286		handle_allocdirect_partdone(adp, &wkhd);
11287	/*
11288	 * Process deallocations that were held pending until the
11289	 * inode had been written to disk. Freeing of the inode
11290	 * is delayed until after all blocks have been freed to
11291	 * avoid creation of new <vfsid, inum, lbn> triples
11292	 * before the old ones have been deleted.  Completely
11293	 * unlinked inodes are not processed until the unlinked
11294	 * inode list is written or the last reference is removed.
11295	 */
11296	if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) != UNLINKED) {
11297		freefile = handle_bufwait(inodedep, NULL);
11298		if (freefile && !LIST_EMPTY(&wkhd)) {
11299			WORKLIST_INSERT(&wkhd, &freefile->fx_list);
11300			freefile = NULL;
11301		}
11302	}
11303	/*
11304	 * Move rolled forward dependency completions to the bufwait list
11305	 * now that those that were already written have been processed.
11306	 */
11307	if (!LIST_EMPTY(&wkhd) && hadchanges == 0)
11308		panic("handle_written_inodeblock: bufwait but no changes");
11309	jwork_move(&inodedep->id_bufwait, &wkhd);
11310
11311	if (freefile != NULL) {
11312		/*
11313		 * If the inode is goingaway it was never written.  Fake up
11314		 * the state here so free_inodedep() can succeed.
11315		 */
11316		if (inodedep->id_state & GOINGAWAY)
11317			inodedep->id_state |= COMPLETE | DEPCOMPLETE;
11318		if (free_inodedep(inodedep) == 0)
11319			panic("handle_written_inodeblock: live inodedep %p",
11320			    inodedep);
11321		add_to_worklist(&freefile->fx_list, 0);
11322		return (0);
11323	}
11324
11325	/*
11326	 * If no outstanding dependencies, free it.
11327	 */
11328	if (free_inodedep(inodedep) ||
11329	    (TAILQ_FIRST(&inodedep->id_inoreflst) == 0 &&
11330	     TAILQ_FIRST(&inodedep->id_inoupdt) == 0 &&
11331	     TAILQ_FIRST(&inodedep->id_extupdt) == 0 &&
11332	     LIST_FIRST(&inodedep->id_bufwait) == 0))
11333		return (0);
11334	return (hadchanges);
11335}
11336
11337static int
11338handle_written_indirdep(indirdep, bp, bpp)
11339	struct indirdep *indirdep;
11340	struct buf *bp;
11341	struct buf **bpp;
11342{
11343	struct allocindir *aip;
11344	struct buf *sbp;
11345	int chgs;
11346
11347	if (indirdep->ir_state & GOINGAWAY)
11348		panic("handle_written_indirdep: indirdep gone");
11349	if ((indirdep->ir_state & IOSTARTED) == 0)
11350		panic("handle_written_indirdep: IO not started");
11351	chgs = 0;
11352	/*
11353	 * If there were rollbacks revert them here.
11354	 */
11355	if (indirdep->ir_saveddata) {
11356		bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount);
11357		if (TAILQ_EMPTY(&indirdep->ir_trunc)) {
11358			free(indirdep->ir_saveddata, M_INDIRDEP);
11359			indirdep->ir_saveddata = NULL;
11360		}
11361		chgs = 1;
11362	}
11363	indirdep->ir_state &= ~(UNDONE | IOSTARTED);
11364	indirdep->ir_state |= ATTACHED;
11365	/*
11366	 * Move allocindirs with written pointers to the completehd if
11367	 * the indirdep's pointer is not yet written.  Otherwise
11368	 * free them here.
11369	 */
11370	while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != 0) {
11371		LIST_REMOVE(aip, ai_next);
11372		if ((indirdep->ir_state & DEPCOMPLETE) == 0) {
11373			LIST_INSERT_HEAD(&indirdep->ir_completehd, aip,
11374			    ai_next);
11375			newblk_freefrag(&aip->ai_block);
11376			continue;
11377		}
11378		free_newblk(&aip->ai_block);
11379	}
11380	/*
11381	 * Move allocindirs that have finished dependency processing from
11382	 * the done list to the write list after updating the pointers.
11383	 */
11384	if (TAILQ_EMPTY(&indirdep->ir_trunc)) {
11385		while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0) {
11386			handle_allocindir_partdone(aip);
11387			if (aip == LIST_FIRST(&indirdep->ir_donehd))
11388				panic("disk_write_complete: not gone");
11389			chgs = 1;
11390		}
11391	}
11392	/*
11393	 * Preserve the indirdep if there were any changes or if it is not
11394	 * yet valid on disk.
11395	 */
11396	if (chgs) {
11397		stat_indir_blk_ptrs++;
11398		bdirty(bp);
11399		return (1);
11400	}
11401	/*
11402	 * If there were no changes we can discard the savedbp and detach
11403	 * ourselves from the buf.  We are only carrying completed pointers
11404	 * in this case.
11405	 */
11406	sbp = indirdep->ir_savebp;
11407	sbp->b_flags |= B_INVAL | B_NOCACHE;
11408	indirdep->ir_savebp = NULL;
11409	indirdep->ir_bp = NULL;
11410	if (*bpp != NULL)
11411		panic("handle_written_indirdep: bp already exists.");
11412	*bpp = sbp;
11413	/*
11414	 * The indirdep may not be freed until its parent points at it.
11415	 */
11416	if (indirdep->ir_state & DEPCOMPLETE)
11417		free_indirdep(indirdep);
11418
11419	return (0);
11420}
11421
11422/*
11423 * Process a diradd entry after its dependent inode has been written.
11424 * This routine must be called with splbio interrupts blocked.
11425 */
11426static void
11427diradd_inode_written(dap, inodedep)
11428	struct diradd *dap;
11429	struct inodedep *inodedep;
11430{
11431
11432	dap->da_state |= COMPLETE;
11433	complete_diradd(dap);
11434	WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list);
11435}
11436
11437/*
11438 * Returns true if the bmsafemap will have rollbacks when written.  Must
11439 * only be called with lk and the buf lock on the cg held.
11440 */
11441static int
11442bmsafemap_backgroundwrite(bmsafemap, bp)
11443	struct bmsafemap *bmsafemap;
11444	struct buf *bp;
11445{
11446	int dirty;
11447
11448	dirty = !LIST_EMPTY(&bmsafemap->sm_jaddrefhd) |
11449	    !LIST_EMPTY(&bmsafemap->sm_jnewblkhd);
11450	/*
11451	 * If we're initiating a background write we need to process the
11452	 * rollbacks as they exist now, not as they exist when IO starts.
11453	 * No other consumers will look at the contents of the shadowed
11454	 * buf so this is safe to do here.
11455	 */
11456	if (bp->b_xflags & BX_BKGRDMARKER)
11457		initiate_write_bmsafemap(bmsafemap, bp);
11458
11459	return (dirty);
11460}
11461
11462/*
11463 * Re-apply an allocation when a cg write is complete.
11464 */
11465static int
11466jnewblk_rollforward(jnewblk, fs, cgp, blksfree)
11467	struct jnewblk *jnewblk;
11468	struct fs *fs;
11469	struct cg *cgp;
11470	uint8_t *blksfree;
11471{
11472	ufs1_daddr_t fragno;
11473	ufs2_daddr_t blkno;
11474	long cgbno, bbase;
11475	int frags, blk;
11476	int i;
11477
11478	frags = 0;
11479	cgbno = dtogd(fs, jnewblk->jn_blkno);
11480	for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++) {
11481		if (isclr(blksfree, cgbno + i))
11482			panic("jnewblk_rollforward: re-allocated fragment");
11483		frags++;
11484	}
11485	if (frags == fs->fs_frag) {
11486		blkno = fragstoblks(fs, cgbno);
11487		ffs_clrblock(fs, blksfree, (long)blkno);
11488		ffs_clusteracct(fs, cgp, blkno, -1);
11489		cgp->cg_cs.cs_nbfree--;
11490	} else {
11491		bbase = cgbno - fragnum(fs, cgbno);
11492		cgbno += jnewblk->jn_oldfrags;
11493                /* If a complete block had been reassembled, account for it. */
11494		fragno = fragstoblks(fs, bbase);
11495		if (ffs_isblock(fs, blksfree, fragno)) {
11496			cgp->cg_cs.cs_nffree += fs->fs_frag;
11497			ffs_clusteracct(fs, cgp, fragno, -1);
11498			cgp->cg_cs.cs_nbfree--;
11499		}
11500		/* Decrement the old frags.  */
11501		blk = blkmap(fs, blksfree, bbase);
11502		ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
11503		/* Allocate the fragment */
11504		for (i = 0; i < frags; i++)
11505			clrbit(blksfree, cgbno + i);
11506		cgp->cg_cs.cs_nffree -= frags;
11507		/* Add back in counts associated with the new frags */
11508		blk = blkmap(fs, blksfree, bbase);
11509		ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
11510	}
11511	return (frags);
11512}
11513
11514/*
11515 * Complete a write to a bmsafemap structure.  Roll forward any bitmap
11516 * changes if it's not a background write.  Set all written dependencies
11517 * to DEPCOMPLETE and free the structure if possible.
11518 */
11519static int
11520handle_written_bmsafemap(bmsafemap, bp)
11521	struct bmsafemap *bmsafemap;
11522	struct buf *bp;
11523{
11524	struct newblk *newblk;
11525	struct inodedep *inodedep;
11526	struct jaddref *jaddref, *jatmp;
11527	struct jnewblk *jnewblk, *jntmp;
11528	struct ufsmount *ump;
11529	uint8_t *inosused;
11530	uint8_t *blksfree;
11531	struct cg *cgp;
11532	struct fs *fs;
11533	ino_t ino;
11534	int foreground;
11535	int chgs;
11536
11537	if ((bmsafemap->sm_state & IOSTARTED) == 0)
11538		panic("initiate_write_bmsafemap: Not started\n");
11539	ump = VFSTOUFS(bmsafemap->sm_list.wk_mp);
11540	chgs = 0;
11541	bmsafemap->sm_state &= ~IOSTARTED;
11542	foreground = (bp->b_xflags & BX_BKGRDMARKER) == 0;
11543	/*
11544	 * Release journal work that was waiting on the write.
11545	 */
11546	handle_jwork(&bmsafemap->sm_freewr);
11547
11548	/*
11549	 * Restore unwritten inode allocation pending jaddref writes.
11550	 */
11551	if (!LIST_EMPTY(&bmsafemap->sm_jaddrefhd)) {
11552		cgp = (struct cg *)bp->b_data;
11553		fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
11554		inosused = cg_inosused(cgp);
11555		LIST_FOREACH_SAFE(jaddref, &bmsafemap->sm_jaddrefhd,
11556		    ja_bmdeps, jatmp) {
11557			if ((jaddref->ja_state & UNDONE) == 0)
11558				continue;
11559			ino = jaddref->ja_ino % fs->fs_ipg;
11560			if (isset(inosused, ino))
11561				panic("handle_written_bmsafemap: "
11562				    "re-allocated inode");
11563			/* Do the roll-forward only if it's a real copy. */
11564			if (foreground) {
11565				if ((jaddref->ja_mode & IFMT) == IFDIR)
11566					cgp->cg_cs.cs_ndir++;
11567				cgp->cg_cs.cs_nifree--;
11568				setbit(inosused, ino);
11569				chgs = 1;
11570			}
11571			jaddref->ja_state &= ~UNDONE;
11572			jaddref->ja_state |= ATTACHED;
11573			free_jaddref(jaddref);
11574		}
11575	}
11576	/*
11577	 * Restore any block allocations which are pending journal writes.
11578	 */
11579	if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) {
11580		cgp = (struct cg *)bp->b_data;
11581		fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
11582		blksfree = cg_blksfree(cgp);
11583		LIST_FOREACH_SAFE(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps,
11584		    jntmp) {
11585			if ((jnewblk->jn_state & UNDONE) == 0)
11586				continue;
11587			/* Do the roll-forward only if it's a real copy. */
11588			if (foreground &&
11589			    jnewblk_rollforward(jnewblk, fs, cgp, blksfree))
11590				chgs = 1;
11591			jnewblk->jn_state &= ~(UNDONE | NEWBLOCK);
11592			jnewblk->jn_state |= ATTACHED;
11593			free_jnewblk(jnewblk);
11594		}
11595	}
11596	while ((newblk = LIST_FIRST(&bmsafemap->sm_newblkwr))) {
11597		newblk->nb_state |= DEPCOMPLETE;
11598		newblk->nb_state &= ~ONDEPLIST;
11599		newblk->nb_bmsafemap = NULL;
11600		LIST_REMOVE(newblk, nb_deps);
11601		if (newblk->nb_list.wk_type == D_ALLOCDIRECT)
11602			handle_allocdirect_partdone(
11603			    WK_ALLOCDIRECT(&newblk->nb_list), NULL);
11604		else if (newblk->nb_list.wk_type == D_ALLOCINDIR)
11605			handle_allocindir_partdone(
11606			    WK_ALLOCINDIR(&newblk->nb_list));
11607		else if (newblk->nb_list.wk_type != D_NEWBLK)
11608			panic("handle_written_bmsafemap: Unexpected type: %s",
11609			    TYPENAME(newblk->nb_list.wk_type));
11610	}
11611	while ((inodedep = LIST_FIRST(&bmsafemap->sm_inodedepwr)) != NULL) {
11612		inodedep->id_state |= DEPCOMPLETE;
11613		inodedep->id_state &= ~ONDEPLIST;
11614		LIST_REMOVE(inodedep, id_deps);
11615		inodedep->id_bmsafemap = NULL;
11616	}
11617	LIST_REMOVE(bmsafemap, sm_next);
11618	if (chgs == 0 && LIST_EMPTY(&bmsafemap->sm_jaddrefhd) &&
11619	    LIST_EMPTY(&bmsafemap->sm_jnewblkhd) &&
11620	    LIST_EMPTY(&bmsafemap->sm_newblkhd) &&
11621	    LIST_EMPTY(&bmsafemap->sm_inodedephd) &&
11622	    LIST_EMPTY(&bmsafemap->sm_freehd)) {
11623		LIST_REMOVE(bmsafemap, sm_hash);
11624		WORKITEM_FREE(bmsafemap, D_BMSAFEMAP);
11625		return (0);
11626	}
11627	LIST_INSERT_HEAD(&ump->softdep_dirtycg, bmsafemap, sm_next);
11628	if (foreground)
11629		bdirty(bp);
11630	return (1);
11631}
11632
11633/*
11634 * Try to free a mkdir dependency.
11635 */
11636static void
11637complete_mkdir(mkdir)
11638	struct mkdir *mkdir;
11639{
11640	struct diradd *dap;
11641
11642	if ((mkdir->md_state & ALLCOMPLETE) != ALLCOMPLETE)
11643		return;
11644	LIST_REMOVE(mkdir, md_mkdirs);
11645	dap = mkdir->md_diradd;
11646	dap->da_state &= ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY));
11647	if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) {
11648		dap->da_state |= DEPCOMPLETE;
11649		complete_diradd(dap);
11650	}
11651	WORKITEM_FREE(mkdir, D_MKDIR);
11652}
11653
11654/*
11655 * Handle the completion of a mkdir dependency.
11656 */
11657static void
11658handle_written_mkdir(mkdir, type)
11659	struct mkdir *mkdir;
11660	int type;
11661{
11662
11663	if ((mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY)) != type)
11664		panic("handle_written_mkdir: bad type");
11665	mkdir->md_state |= COMPLETE;
11666	complete_mkdir(mkdir);
11667}
11668
11669static int
11670free_pagedep(pagedep)
11671	struct pagedep *pagedep;
11672{
11673	int i;
11674
11675	if (pagedep->pd_state & NEWBLOCK)
11676		return (0);
11677	if (!LIST_EMPTY(&pagedep->pd_dirremhd))
11678		return (0);
11679	for (i = 0; i < DAHASHSZ; i++)
11680		if (!LIST_EMPTY(&pagedep->pd_diraddhd[i]))
11681			return (0);
11682	if (!LIST_EMPTY(&pagedep->pd_pendinghd))
11683		return (0);
11684	if (!LIST_EMPTY(&pagedep->pd_jmvrefhd))
11685		return (0);
11686	if (pagedep->pd_state & ONWORKLIST)
11687		WORKLIST_REMOVE(&pagedep->pd_list);
11688	LIST_REMOVE(pagedep, pd_hash);
11689	WORKITEM_FREE(pagedep, D_PAGEDEP);
11690
11691	return (1);
11692}
11693
11694/*
11695 * Called from within softdep_disk_write_complete above.
11696 * A write operation was just completed. Removed inodes can
11697 * now be freed and associated block pointers may be committed.
11698 * Note that this routine is always called from interrupt level
11699 * with further splbio interrupts blocked.
11700 */
11701static int
11702handle_written_filepage(pagedep, bp)
11703	struct pagedep *pagedep;
11704	struct buf *bp;		/* buffer containing the written page */
11705{
11706	struct dirrem *dirrem;
11707	struct diradd *dap, *nextdap;
11708	struct direct *ep;
11709	int i, chgs;
11710
11711	if ((pagedep->pd_state & IOSTARTED) == 0)
11712		panic("handle_written_filepage: not started");
11713	pagedep->pd_state &= ~IOSTARTED;
11714	/*
11715	 * Process any directory removals that have been committed.
11716	 */
11717	while ((dirrem = LIST_FIRST(&pagedep->pd_dirremhd)) != NULL) {
11718		LIST_REMOVE(dirrem, dm_next);
11719		dirrem->dm_state |= COMPLETE;
11720		dirrem->dm_dirinum = pagedep->pd_ino;
11721		KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd),
11722		    ("handle_written_filepage: Journal entries not written."));
11723		add_to_worklist(&dirrem->dm_list, 0);
11724	}
11725	/*
11726	 * Free any directory additions that have been committed.
11727	 * If it is a newly allocated block, we have to wait until
11728	 * the on-disk directory inode claims the new block.
11729	 */
11730	if ((pagedep->pd_state & NEWBLOCK) == 0)
11731		while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL)
11732			free_diradd(dap, NULL);
11733	/*
11734	 * Uncommitted directory entries must be restored.
11735	 */
11736	for (chgs = 0, i = 0; i < DAHASHSZ; i++) {
11737		for (dap = LIST_FIRST(&pagedep->pd_diraddhd[i]); dap;
11738		     dap = nextdap) {
11739			nextdap = LIST_NEXT(dap, da_pdlist);
11740			if (dap->da_state & ATTACHED)
11741				panic("handle_written_filepage: attached");
11742			ep = (struct direct *)
11743			    ((char *)bp->b_data + dap->da_offset);
11744			ep->d_ino = dap->da_newinum;
11745			dap->da_state &= ~UNDONE;
11746			dap->da_state |= ATTACHED;
11747			chgs = 1;
11748			/*
11749			 * If the inode referenced by the directory has
11750			 * been written out, then the dependency can be
11751			 * moved to the pending list.
11752			 */
11753			if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) {
11754				LIST_REMOVE(dap, da_pdlist);
11755				LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap,
11756				    da_pdlist);
11757			}
11758		}
11759	}
11760	/*
11761	 * If there were any rollbacks in the directory, then it must be
11762	 * marked dirty so that its will eventually get written back in
11763	 * its correct form.
11764	 */
11765	if (chgs) {
11766		if ((bp->b_flags & B_DELWRI) == 0)
11767			stat_dir_entry++;
11768		bdirty(bp);
11769		return (1);
11770	}
11771	/*
11772	 * If we are not waiting for a new directory block to be
11773	 * claimed by its inode, then the pagedep will be freed.
11774	 * Otherwise it will remain to track any new entries on
11775	 * the page in case they are fsync'ed.
11776	 */
11777	free_pagedep(pagedep);
11778	return (0);
11779}
11780
11781/*
11782 * Writing back in-core inode structures.
11783 *
11784 * The filesystem only accesses an inode's contents when it occupies an
11785 * "in-core" inode structure.  These "in-core" structures are separate from
11786 * the page frames used to cache inode blocks.  Only the latter are
11787 * transferred to/from the disk.  So, when the updated contents of the
11788 * "in-core" inode structure are copied to the corresponding in-memory inode
11789 * block, the dependencies are also transferred.  The following procedure is
11790 * called when copying a dirty "in-core" inode to a cached inode block.
11791 */
11792
11793/*
11794 * Called when an inode is loaded from disk. If the effective link count
11795 * differed from the actual link count when it was last flushed, then we
11796 * need to ensure that the correct effective link count is put back.
11797 */
11798void
11799softdep_load_inodeblock(ip)
11800	struct inode *ip;	/* the "in_core" copy of the inode */
11801{
11802	struct inodedep *inodedep;
11803
11804	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0,
11805	    ("softdep_load_inodeblock called on non-softdep filesystem"));
11806	/*
11807	 * Check for alternate nlink count.
11808	 */
11809	ip->i_effnlink = ip->i_nlink;
11810	ACQUIRE_LOCK(&lk);
11811	if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0,
11812	    &inodedep) == 0) {
11813		FREE_LOCK(&lk);
11814		return;
11815	}
11816	ip->i_effnlink -= inodedep->id_nlinkdelta;
11817	FREE_LOCK(&lk);
11818}
11819
11820/*
11821 * This routine is called just before the "in-core" inode
11822 * information is to be copied to the in-memory inode block.
11823 * Recall that an inode block contains several inodes. If
11824 * the force flag is set, then the dependencies will be
11825 * cleared so that the update can always be made. Note that
11826 * the buffer is locked when this routine is called, so we
11827 * will never be in the middle of writing the inode block
11828 * to disk.
11829 */
11830void
11831softdep_update_inodeblock(ip, bp, waitfor)
11832	struct inode *ip;	/* the "in_core" copy of the inode */
11833	struct buf *bp;		/* the buffer containing the inode block */
11834	int waitfor;		/* nonzero => update must be allowed */
11835{
11836	struct inodedep *inodedep;
11837	struct inoref *inoref;
11838	struct ufsmount *ump;
11839	struct worklist *wk;
11840	struct mount *mp;
11841	struct buf *ibp;
11842	struct fs *fs;
11843	int error;
11844
11845	ump = ip->i_ump;
11846	mp = UFSTOVFS(ump);
11847	KASSERT(MOUNTEDSOFTDEP(mp) != 0,
11848	    ("softdep_update_inodeblock called on non-softdep filesystem"));
11849	fs = ip->i_fs;
11850	/*
11851	 * Preserve the freelink that is on disk.  clear_unlinked_inodedep()
11852	 * does not have access to the in-core ip so must write directly into
11853	 * the inode block buffer when setting freelink.
11854	 */
11855	if (fs->fs_magic == FS_UFS1_MAGIC)
11856		DIP_SET(ip, i_freelink, ((struct ufs1_dinode *)bp->b_data +
11857		    ino_to_fsbo(fs, ip->i_number))->di_freelink);
11858	else
11859		DIP_SET(ip, i_freelink, ((struct ufs2_dinode *)bp->b_data +
11860		    ino_to_fsbo(fs, ip->i_number))->di_freelink);
11861	/*
11862	 * If the effective link count is not equal to the actual link
11863	 * count, then we must track the difference in an inodedep while
11864	 * the inode is (potentially) tossed out of the cache. Otherwise,
11865	 * if there is no existing inodedep, then there are no dependencies
11866	 * to track.
11867	 */
11868	ACQUIRE_LOCK(&lk);
11869again:
11870	if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) {
11871		FREE_LOCK(&lk);
11872		if (ip->i_effnlink != ip->i_nlink)
11873			panic("softdep_update_inodeblock: bad link count");
11874		return;
11875	}
11876	if (inodedep->id_nlinkdelta != ip->i_nlink - ip->i_effnlink)
11877		panic("softdep_update_inodeblock: bad delta");
11878	/*
11879	 * If we're flushing all dependencies we must also move any waiting
11880	 * for journal writes onto the bufwait list prior to I/O.
11881	 */
11882	if (waitfor) {
11883		TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
11884			if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
11885			    == DEPCOMPLETE) {
11886				jwait(&inoref->if_list, MNT_WAIT);
11887				goto again;
11888			}
11889		}
11890	}
11891	/*
11892	 * Changes have been initiated. Anything depending on these
11893	 * changes cannot occur until this inode has been written.
11894	 */
11895	inodedep->id_state &= ~COMPLETE;
11896	if ((inodedep->id_state & ONWORKLIST) == 0)
11897		WORKLIST_INSERT(&bp->b_dep, &inodedep->id_list);
11898	/*
11899	 * Any new dependencies associated with the incore inode must
11900	 * now be moved to the list associated with the buffer holding
11901	 * the in-memory copy of the inode. Once merged process any
11902	 * allocdirects that are completed by the merger.
11903	 */
11904	merge_inode_lists(&inodedep->id_newinoupdt, &inodedep->id_inoupdt);
11905	if (!TAILQ_EMPTY(&inodedep->id_inoupdt))
11906		handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_inoupdt),
11907		    NULL);
11908	merge_inode_lists(&inodedep->id_newextupdt, &inodedep->id_extupdt);
11909	if (!TAILQ_EMPTY(&inodedep->id_extupdt))
11910		handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_extupdt),
11911		    NULL);
11912	/*
11913	 * Now that the inode has been pushed into the buffer, the
11914	 * operations dependent on the inode being written to disk
11915	 * can be moved to the id_bufwait so that they will be
11916	 * processed when the buffer I/O completes.
11917	 */
11918	while ((wk = LIST_FIRST(&inodedep->id_inowait)) != NULL) {
11919		WORKLIST_REMOVE(wk);
11920		WORKLIST_INSERT(&inodedep->id_bufwait, wk);
11921	}
11922	/*
11923	 * Newly allocated inodes cannot be written until the bitmap
11924	 * that allocates them have been written (indicated by
11925	 * DEPCOMPLETE being set in id_state). If we are doing a
11926	 * forced sync (e.g., an fsync on a file), we force the bitmap
11927	 * to be written so that the update can be done.
11928	 */
11929	if (waitfor == 0) {
11930		FREE_LOCK(&lk);
11931		return;
11932	}
11933retry:
11934	if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) != 0) {
11935		FREE_LOCK(&lk);
11936		return;
11937	}
11938	ibp = inodedep->id_bmsafemap->sm_buf;
11939	ibp = getdirtybuf(ibp, &lk, MNT_WAIT);
11940	if (ibp == NULL) {
11941		/*
11942		 * If ibp came back as NULL, the dependency could have been
11943		 * freed while we slept.  Look it up again, and check to see
11944		 * that it has completed.
11945		 */
11946		if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0)
11947			goto retry;
11948		FREE_LOCK(&lk);
11949		return;
11950	}
11951	FREE_LOCK(&lk);
11952	if ((error = bwrite(ibp)) != 0)
11953		softdep_error("softdep_update_inodeblock: bwrite", error);
11954}
11955
11956/*
11957 * Merge the a new inode dependency list (such as id_newinoupdt) into an
11958 * old inode dependency list (such as id_inoupdt). This routine must be
11959 * called with splbio interrupts blocked.
11960 */
11961static void
11962merge_inode_lists(newlisthead, oldlisthead)
11963	struct allocdirectlst *newlisthead;
11964	struct allocdirectlst *oldlisthead;
11965{
11966	struct allocdirect *listadp, *newadp;
11967
11968	newadp = TAILQ_FIRST(newlisthead);
11969	for (listadp = TAILQ_FIRST(oldlisthead); listadp && newadp;) {
11970		if (listadp->ad_offset < newadp->ad_offset) {
11971			listadp = TAILQ_NEXT(listadp, ad_next);
11972			continue;
11973		}
11974		TAILQ_REMOVE(newlisthead, newadp, ad_next);
11975		TAILQ_INSERT_BEFORE(listadp, newadp, ad_next);
11976		if (listadp->ad_offset == newadp->ad_offset) {
11977			allocdirect_merge(oldlisthead, newadp,
11978			    listadp);
11979			listadp = newadp;
11980		}
11981		newadp = TAILQ_FIRST(newlisthead);
11982	}
11983	while ((newadp = TAILQ_FIRST(newlisthead)) != NULL) {
11984		TAILQ_REMOVE(newlisthead, newadp, ad_next);
11985		TAILQ_INSERT_TAIL(oldlisthead, newadp, ad_next);
11986	}
11987}
11988
11989/*
11990 * If we are doing an fsync, then we must ensure that any directory
11991 * entries for the inode have been written after the inode gets to disk.
11992 */
11993int
11994softdep_fsync(vp)
11995	struct vnode *vp;	/* the "in_core" copy of the inode */
11996{
11997	struct inodedep *inodedep;
11998	struct pagedep *pagedep;
11999	struct inoref *inoref;
12000	struct worklist *wk;
12001	struct diradd *dap;
12002	struct mount *mp;
12003	struct vnode *pvp;
12004	struct inode *ip;
12005	struct buf *bp;
12006	struct fs *fs;
12007	struct thread *td = curthread;
12008	int error, flushparent, pagedep_new_block;
12009	ino_t parentino;
12010	ufs_lbn_t lbn;
12011
12012	ip = VTOI(vp);
12013	fs = ip->i_fs;
12014	mp = vp->v_mount;
12015	ACQUIRE_LOCK(&lk);
12016restart:
12017	if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) {
12018		FREE_LOCK(&lk);
12019		return (0);
12020	}
12021	TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
12022		if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
12023		    == DEPCOMPLETE) {
12024			jwait(&inoref->if_list, MNT_WAIT);
12025			goto restart;
12026		}
12027	}
12028	if (!LIST_EMPTY(&inodedep->id_inowait) ||
12029	    !TAILQ_EMPTY(&inodedep->id_extupdt) ||
12030	    !TAILQ_EMPTY(&inodedep->id_newextupdt) ||
12031	    !TAILQ_EMPTY(&inodedep->id_inoupdt) ||
12032	    !TAILQ_EMPTY(&inodedep->id_newinoupdt))
12033		panic("softdep_fsync: pending ops %p", inodedep);
12034	for (error = 0, flushparent = 0; ; ) {
12035		if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) == NULL)
12036			break;
12037		if (wk->wk_type != D_DIRADD)
12038			panic("softdep_fsync: Unexpected type %s",
12039			    TYPENAME(wk->wk_type));
12040		dap = WK_DIRADD(wk);
12041		/*
12042		 * Flush our parent if this directory entry has a MKDIR_PARENT
12043		 * dependency or is contained in a newly allocated block.
12044		 */
12045		if (dap->da_state & DIRCHG)
12046			pagedep = dap->da_previous->dm_pagedep;
12047		else
12048			pagedep = dap->da_pagedep;
12049		parentino = pagedep->pd_ino;
12050		lbn = pagedep->pd_lbn;
12051		if ((dap->da_state & (MKDIR_BODY | COMPLETE)) != COMPLETE)
12052			panic("softdep_fsync: dirty");
12053		if ((dap->da_state & MKDIR_PARENT) ||
12054		    (pagedep->pd_state & NEWBLOCK))
12055			flushparent = 1;
12056		else
12057			flushparent = 0;
12058		/*
12059		 * If we are being fsync'ed as part of vgone'ing this vnode,
12060		 * then we will not be able to release and recover the
12061		 * vnode below, so we just have to give up on writing its
12062		 * directory entry out. It will eventually be written, just
12063		 * not now, but then the user was not asking to have it
12064		 * written, so we are not breaking any promises.
12065		 */
12066		if (vp->v_iflag & VI_DOOMED)
12067			break;
12068		/*
12069		 * We prevent deadlock by always fetching inodes from the
12070		 * root, moving down the directory tree. Thus, when fetching
12071		 * our parent directory, we first try to get the lock. If
12072		 * that fails, we must unlock ourselves before requesting
12073		 * the lock on our parent. See the comment in ufs_lookup
12074		 * for details on possible races.
12075		 */
12076		FREE_LOCK(&lk);
12077		if (ffs_vgetf(mp, parentino, LK_NOWAIT | LK_EXCLUSIVE, &pvp,
12078		    FFSV_FORCEINSMQ)) {
12079			error = vfs_busy(mp, MBF_NOWAIT);
12080			if (error != 0) {
12081				vfs_ref(mp);
12082				VOP_UNLOCK(vp, 0);
12083				error = vfs_busy(mp, 0);
12084				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
12085				vfs_rel(mp);
12086				if (error != 0)
12087					return (ENOENT);
12088				if (vp->v_iflag & VI_DOOMED) {
12089					vfs_unbusy(mp);
12090					return (ENOENT);
12091				}
12092			}
12093			VOP_UNLOCK(vp, 0);
12094			error = ffs_vgetf(mp, parentino, LK_EXCLUSIVE,
12095			    &pvp, FFSV_FORCEINSMQ);
12096			vfs_unbusy(mp);
12097			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
12098			if (vp->v_iflag & VI_DOOMED) {
12099				if (error == 0)
12100					vput(pvp);
12101				error = ENOENT;
12102			}
12103			if (error != 0)
12104				return (error);
12105		}
12106		/*
12107		 * All MKDIR_PARENT dependencies and all the NEWBLOCK pagedeps
12108		 * that are contained in direct blocks will be resolved by
12109		 * doing a ffs_update. Pagedeps contained in indirect blocks
12110		 * may require a complete sync'ing of the directory. So, we
12111		 * try the cheap and fast ffs_update first, and if that fails,
12112		 * then we do the slower ffs_syncvnode of the directory.
12113		 */
12114		if (flushparent) {
12115			int locked;
12116
12117			if ((error = ffs_update(pvp, 1)) != 0) {
12118				vput(pvp);
12119				return (error);
12120			}
12121			ACQUIRE_LOCK(&lk);
12122			locked = 1;
12123			if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0) {
12124				if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) != NULL) {
12125					if (wk->wk_type != D_DIRADD)
12126						panic("softdep_fsync: Unexpected type %s",
12127						      TYPENAME(wk->wk_type));
12128					dap = WK_DIRADD(wk);
12129					if (dap->da_state & DIRCHG)
12130						pagedep = dap->da_previous->dm_pagedep;
12131					else
12132						pagedep = dap->da_pagedep;
12133					pagedep_new_block = pagedep->pd_state & NEWBLOCK;
12134					FREE_LOCK(&lk);
12135					locked = 0;
12136					if (pagedep_new_block && (error =
12137					    ffs_syncvnode(pvp, MNT_WAIT, 0))) {
12138						vput(pvp);
12139						return (error);
12140					}
12141				}
12142			}
12143			if (locked)
12144				FREE_LOCK(&lk);
12145		}
12146		/*
12147		 * Flush directory page containing the inode's name.
12148		 */
12149		error = bread(pvp, lbn, blksize(fs, VTOI(pvp), lbn), td->td_ucred,
12150		    &bp);
12151		if (error == 0)
12152			error = bwrite(bp);
12153		else
12154			brelse(bp);
12155		vput(pvp);
12156		if (error != 0)
12157			return (error);
12158		ACQUIRE_LOCK(&lk);
12159		if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0)
12160			break;
12161	}
12162	FREE_LOCK(&lk);
12163	return (0);
12164}
12165
12166/*
12167 * Flush all the dirty bitmaps associated with the block device
12168 * before flushing the rest of the dirty blocks so as to reduce
12169 * the number of dependencies that will have to be rolled back.
12170 *
12171 * XXX Unused?
12172 */
12173void
12174softdep_fsync_mountdev(vp)
12175	struct vnode *vp;
12176{
12177	struct buf *bp, *nbp;
12178	struct worklist *wk;
12179	struct bufobj *bo;
12180
12181	if (!vn_isdisk(vp, NULL))
12182		panic("softdep_fsync_mountdev: vnode not a disk");
12183	bo = &vp->v_bufobj;
12184restart:
12185	BO_LOCK(bo);
12186	ACQUIRE_LOCK(&lk);
12187	TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
12188		/*
12189		 * If it is already scheduled, skip to the next buffer.
12190		 */
12191		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
12192			continue;
12193
12194		if ((bp->b_flags & B_DELWRI) == 0)
12195			panic("softdep_fsync_mountdev: not dirty");
12196		/*
12197		 * We are only interested in bitmaps with outstanding
12198		 * dependencies.
12199		 */
12200		if ((wk = LIST_FIRST(&bp->b_dep)) == NULL ||
12201		    wk->wk_type != D_BMSAFEMAP ||
12202		    (bp->b_vflags & BV_BKGRDINPROG)) {
12203			BUF_UNLOCK(bp);
12204			continue;
12205		}
12206		FREE_LOCK(&lk);
12207		BO_UNLOCK(bo);
12208		bremfree(bp);
12209		(void) bawrite(bp);
12210		goto restart;
12211	}
12212	FREE_LOCK(&lk);
12213	drain_output(vp);
12214	BO_UNLOCK(bo);
12215}
12216
12217/*
12218 * Sync all cylinder groups that were dirty at the time this function is
12219 * called.  Newly dirtied cgs will be inserted before the sentinel.  This
12220 * is used to flush freedep activity that may be holding up writes to a
12221 * indirect block.
12222 */
12223static int
12224sync_cgs(mp, waitfor)
12225	struct mount *mp;
12226	int waitfor;
12227{
12228	struct bmsafemap *bmsafemap;
12229	struct bmsafemap *sentinel;
12230	struct ufsmount *ump;
12231	struct buf *bp;
12232	int error;
12233
12234	sentinel = malloc(sizeof(*sentinel), M_BMSAFEMAP, M_ZERO | M_WAITOK);
12235	sentinel->sm_cg = -1;
12236	ump = VFSTOUFS(mp);
12237	error = 0;
12238	ACQUIRE_LOCK(&lk);
12239	LIST_INSERT_HEAD(&ump->softdep_dirtycg, sentinel, sm_next);
12240	for (bmsafemap = LIST_NEXT(sentinel, sm_next); bmsafemap != NULL;
12241	    bmsafemap = LIST_NEXT(sentinel, sm_next)) {
12242		/* Skip sentinels and cgs with no work to release. */
12243		if (bmsafemap->sm_cg == -1 ||
12244		    (LIST_EMPTY(&bmsafemap->sm_freehd) &&
12245		    LIST_EMPTY(&bmsafemap->sm_freewr))) {
12246			LIST_REMOVE(sentinel, sm_next);
12247			LIST_INSERT_AFTER(bmsafemap, sentinel, sm_next);
12248			continue;
12249		}
12250		/*
12251		 * If we don't get the lock and we're waiting try again, if
12252		 * not move on to the next buf and try to sync it.
12253		 */
12254		bp = getdirtybuf(bmsafemap->sm_buf, &lk, waitfor);
12255		if (bp == NULL && waitfor == MNT_WAIT)
12256			continue;
12257		LIST_REMOVE(sentinel, sm_next);
12258		LIST_INSERT_AFTER(bmsafemap, sentinel, sm_next);
12259		if (bp == NULL)
12260			continue;
12261		FREE_LOCK(&lk);
12262		if (waitfor == MNT_NOWAIT)
12263			bawrite(bp);
12264		else
12265			error = bwrite(bp);
12266		ACQUIRE_LOCK(&lk);
12267		if (error)
12268			break;
12269	}
12270	LIST_REMOVE(sentinel, sm_next);
12271	FREE_LOCK(&lk);
12272	free(sentinel, M_BMSAFEMAP);
12273	return (error);
12274}
12275
12276/*
12277 * This routine is called when we are trying to synchronously flush a
12278 * file. This routine must eliminate any filesystem metadata dependencies
12279 * so that the syncing routine can succeed.
12280 */
12281int
12282softdep_sync_metadata(struct vnode *vp)
12283{
12284	struct inode *ip;
12285	int error;
12286
12287	ip = VTOI(vp);
12288	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0,
12289	    ("softdep_sync_metadata called on non-softdep filesystem"));
12290	/*
12291	 * Ensure that any direct block dependencies have been cleared,
12292	 * truncations are started, and inode references are journaled.
12293	 */
12294	ACQUIRE_LOCK(&lk);
12295	/*
12296	 * Write all journal records to prevent rollbacks on devvp.
12297	 */
12298	if (vp->v_type == VCHR)
12299		softdep_flushjournal(vp->v_mount);
12300	error = flush_inodedep_deps(vp, vp->v_mount, ip->i_number);
12301	/*
12302	 * Ensure that all truncates are written so we won't find deps on
12303	 * indirect blocks.
12304	 */
12305	process_truncates(vp);
12306	FREE_LOCK(&lk);
12307
12308	return (error);
12309}
12310
12311/*
12312 * This routine is called when we are attempting to sync a buf with
12313 * dependencies.  If waitfor is MNT_NOWAIT it attempts to schedule any
12314 * other IO it can but returns EBUSY if the buffer is not yet able to
12315 * be written.  Dependencies which will not cause rollbacks will always
12316 * return 0.
12317 */
12318int
12319softdep_sync_buf(struct vnode *vp, struct buf *bp, int waitfor)
12320{
12321	struct indirdep *indirdep;
12322	struct pagedep *pagedep;
12323	struct allocindir *aip;
12324	struct newblk *newblk;
12325	struct buf *nbp;
12326	struct worklist *wk;
12327	int i, error;
12328
12329	KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0,
12330	    ("softdep_sync_buf called on non-softdep filesystem"));
12331	/*
12332	 * For VCHR we just don't want to force flush any dependencies that
12333	 * will cause rollbacks.
12334	 */
12335	if (vp->v_type == VCHR) {
12336		if (waitfor == MNT_NOWAIT && softdep_count_dependencies(bp, 0))
12337			return (EBUSY);
12338		return (0);
12339	}
12340	ACQUIRE_LOCK(&lk);
12341	/*
12342	 * As we hold the buffer locked, none of its dependencies
12343	 * will disappear.
12344	 */
12345	error = 0;
12346top:
12347	LIST_FOREACH(wk, &bp->b_dep, wk_list) {
12348		switch (wk->wk_type) {
12349
12350		case D_ALLOCDIRECT:
12351		case D_ALLOCINDIR:
12352			newblk = WK_NEWBLK(wk);
12353			if (newblk->nb_jnewblk != NULL) {
12354				if (waitfor == MNT_NOWAIT) {
12355					error = EBUSY;
12356					goto out_unlock;
12357				}
12358				jwait(&newblk->nb_jnewblk->jn_list, waitfor);
12359				goto top;
12360			}
12361			if (newblk->nb_state & DEPCOMPLETE ||
12362			    waitfor == MNT_NOWAIT)
12363				continue;
12364			nbp = newblk->nb_bmsafemap->sm_buf;
12365			nbp = getdirtybuf(nbp, &lk, waitfor);
12366			if (nbp == NULL)
12367				goto top;
12368			FREE_LOCK(&lk);
12369			if ((error = bwrite(nbp)) != 0)
12370				goto out;
12371			ACQUIRE_LOCK(&lk);
12372			continue;
12373
12374		case D_INDIRDEP:
12375			indirdep = WK_INDIRDEP(wk);
12376			if (waitfor == MNT_NOWAIT) {
12377				if (!TAILQ_EMPTY(&indirdep->ir_trunc) ||
12378				    !LIST_EMPTY(&indirdep->ir_deplisthd)) {
12379					error = EBUSY;
12380					goto out_unlock;
12381				}
12382			}
12383			if (!TAILQ_EMPTY(&indirdep->ir_trunc))
12384				panic("softdep_sync_buf: truncation pending.");
12385		restart:
12386			LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) {
12387				newblk = (struct newblk *)aip;
12388				if (newblk->nb_jnewblk != NULL) {
12389					jwait(&newblk->nb_jnewblk->jn_list,
12390					    waitfor);
12391					goto restart;
12392				}
12393				if (newblk->nb_state & DEPCOMPLETE)
12394					continue;
12395				nbp = newblk->nb_bmsafemap->sm_buf;
12396				nbp = getdirtybuf(nbp, &lk, waitfor);
12397				if (nbp == NULL)
12398					goto restart;
12399				FREE_LOCK(&lk);
12400				if ((error = bwrite(nbp)) != 0)
12401					goto out;
12402				ACQUIRE_LOCK(&lk);
12403				goto restart;
12404			}
12405			continue;
12406
12407		case D_PAGEDEP:
12408			/*
12409			 * Only flush directory entries in synchronous passes.
12410			 */
12411			if (waitfor != MNT_WAIT) {
12412				error = EBUSY;
12413				goto out_unlock;
12414			}
12415			/*
12416			 * While syncing snapshots, we must allow recursive
12417			 * lookups.
12418			 */
12419			BUF_AREC(bp);
12420			/*
12421			 * We are trying to sync a directory that may
12422			 * have dependencies on both its own metadata
12423			 * and/or dependencies on the inodes of any
12424			 * recently allocated files. We walk its diradd
12425			 * lists pushing out the associated inode.
12426			 */
12427			pagedep = WK_PAGEDEP(wk);
12428			for (i = 0; i < DAHASHSZ; i++) {
12429				if (LIST_FIRST(&pagedep->pd_diraddhd[i]) == 0)
12430					continue;
12431				if ((error = flush_pagedep_deps(vp, wk->wk_mp,
12432				    &pagedep->pd_diraddhd[i]))) {
12433					BUF_NOREC(bp);
12434					goto out_unlock;
12435				}
12436			}
12437			BUF_NOREC(bp);
12438			continue;
12439
12440		case D_FREEWORK:
12441		case D_FREEDEP:
12442		case D_JSEGDEP:
12443		case D_JNEWBLK:
12444			continue;
12445
12446		default:
12447			panic("softdep_sync_buf: Unknown type %s",
12448			    TYPENAME(wk->wk_type));
12449			/* NOTREACHED */
12450		}
12451	}
12452out_unlock:
12453	FREE_LOCK(&lk);
12454out:
12455	return (error);
12456}
12457
12458/*
12459 * Flush the dependencies associated with an inodedep.
12460 * Called with splbio blocked.
12461 */
12462static int
12463flush_inodedep_deps(vp, mp, ino)
12464	struct vnode *vp;
12465	struct mount *mp;
12466	ino_t ino;
12467{
12468	struct inodedep *inodedep;
12469	struct inoref *inoref;
12470	int error, waitfor;
12471
12472	/*
12473	 * This work is done in two passes. The first pass grabs most
12474	 * of the buffers and begins asynchronously writing them. The
12475	 * only way to wait for these asynchronous writes is to sleep
12476	 * on the filesystem vnode which may stay busy for a long time
12477	 * if the filesystem is active. So, instead, we make a second
12478	 * pass over the dependencies blocking on each write. In the
12479	 * usual case we will be blocking against a write that we
12480	 * initiated, so when it is done the dependency will have been
12481	 * resolved. Thus the second pass is expected to end quickly.
12482	 * We give a brief window at the top of the loop to allow
12483	 * any pending I/O to complete.
12484	 */
12485	for (error = 0, waitfor = MNT_NOWAIT; ; ) {
12486		if (error)
12487			return (error);
12488		FREE_LOCK(&lk);
12489		ACQUIRE_LOCK(&lk);
12490restart:
12491		if (inodedep_lookup(mp, ino, 0, &inodedep) == 0)
12492			return (0);
12493		TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
12494			if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
12495			    == DEPCOMPLETE) {
12496				jwait(&inoref->if_list, MNT_WAIT);
12497				goto restart;
12498			}
12499		}
12500		if (flush_deplist(&inodedep->id_inoupdt, waitfor, &error) ||
12501		    flush_deplist(&inodedep->id_newinoupdt, waitfor, &error) ||
12502		    flush_deplist(&inodedep->id_extupdt, waitfor, &error) ||
12503		    flush_deplist(&inodedep->id_newextupdt, waitfor, &error))
12504			continue;
12505		/*
12506		 * If pass2, we are done, otherwise do pass 2.
12507		 */
12508		if (waitfor == MNT_WAIT)
12509			break;
12510		waitfor = MNT_WAIT;
12511	}
12512	/*
12513	 * Try freeing inodedep in case all dependencies have been removed.
12514	 */
12515	if (inodedep_lookup(mp, ino, 0, &inodedep) != 0)
12516		(void) free_inodedep(inodedep);
12517	return (0);
12518}
12519
12520/*
12521 * Flush an inode dependency list.
12522 * Called with splbio blocked.
12523 */
12524static int
12525flush_deplist(listhead, waitfor, errorp)
12526	struct allocdirectlst *listhead;
12527	int waitfor;
12528	int *errorp;
12529{
12530	struct allocdirect *adp;
12531	struct newblk *newblk;
12532	struct buf *bp;
12533
12534	rw_assert(&lk, RA_WLOCKED);
12535	TAILQ_FOREACH(adp, listhead, ad_next) {
12536		newblk = (struct newblk *)adp;
12537		if (newblk->nb_jnewblk != NULL) {
12538			jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT);
12539			return (1);
12540		}
12541		if (newblk->nb_state & DEPCOMPLETE)
12542			continue;
12543		bp = newblk->nb_bmsafemap->sm_buf;
12544		bp = getdirtybuf(bp, &lk, waitfor);
12545		if (bp == NULL) {
12546			if (waitfor == MNT_NOWAIT)
12547				continue;
12548			return (1);
12549		}
12550		FREE_LOCK(&lk);
12551		if (waitfor == MNT_NOWAIT)
12552			bawrite(bp);
12553		else
12554			*errorp = bwrite(bp);
12555		ACQUIRE_LOCK(&lk);
12556		return (1);
12557	}
12558	return (0);
12559}
12560
12561/*
12562 * Flush dependencies associated with an allocdirect block.
12563 */
12564static int
12565flush_newblk_dep(vp, mp, lbn)
12566	struct vnode *vp;
12567	struct mount *mp;
12568	ufs_lbn_t lbn;
12569{
12570	struct newblk *newblk;
12571	struct bufobj *bo;
12572	struct inode *ip;
12573	struct buf *bp;
12574	ufs2_daddr_t blkno;
12575	int error;
12576
12577	error = 0;
12578	bo = &vp->v_bufobj;
12579	ip = VTOI(vp);
12580	blkno = DIP(ip, i_db[lbn]);
12581	if (blkno == 0)
12582		panic("flush_newblk_dep: Missing block");
12583	ACQUIRE_LOCK(&lk);
12584	/*
12585	 * Loop until all dependencies related to this block are satisfied.
12586	 * We must be careful to restart after each sleep in case a write
12587	 * completes some part of this process for us.
12588	 */
12589	for (;;) {
12590		if (newblk_lookup(mp, blkno, 0, &newblk) == 0) {
12591			FREE_LOCK(&lk);
12592			break;
12593		}
12594		if (newblk->nb_list.wk_type != D_ALLOCDIRECT)
12595			panic("flush_newblk_deps: Bad newblk %p", newblk);
12596		/*
12597		 * Flush the journal.
12598		 */
12599		if (newblk->nb_jnewblk != NULL) {
12600			jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT);
12601			continue;
12602		}
12603		/*
12604		 * Write the bitmap dependency.
12605		 */
12606		if ((newblk->nb_state & DEPCOMPLETE) == 0) {
12607			bp = newblk->nb_bmsafemap->sm_buf;
12608			bp = getdirtybuf(bp, &lk, MNT_WAIT);
12609			if (bp == NULL)
12610				continue;
12611			FREE_LOCK(&lk);
12612			error = bwrite(bp);
12613			if (error)
12614				break;
12615			ACQUIRE_LOCK(&lk);
12616			continue;
12617		}
12618		/*
12619		 * Write the buffer.
12620		 */
12621		FREE_LOCK(&lk);
12622		BO_LOCK(bo);
12623		bp = gbincore(bo, lbn);
12624		if (bp != NULL) {
12625			error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL |
12626			    LK_INTERLOCK, BO_LOCKPTR(bo));
12627			if (error == ENOLCK) {
12628				ACQUIRE_LOCK(&lk);
12629				continue; /* Slept, retry */
12630			}
12631			if (error != 0)
12632				break;	/* Failed */
12633			if (bp->b_flags & B_DELWRI) {
12634				bremfree(bp);
12635				error = bwrite(bp);
12636				if (error)
12637					break;
12638			} else
12639				BUF_UNLOCK(bp);
12640		} else
12641			BO_UNLOCK(bo);
12642		/*
12643		 * We have to wait for the direct pointers to
12644		 * point at the newdirblk before the dependency
12645		 * will go away.
12646		 */
12647		error = ffs_update(vp, 1);
12648		if (error)
12649			break;
12650		ACQUIRE_LOCK(&lk);
12651	}
12652	return (error);
12653}
12654
12655/*
12656 * Eliminate a pagedep dependency by flushing out all its diradd dependencies.
12657 * Called with splbio blocked.
12658 */
12659static int
12660flush_pagedep_deps(pvp, mp, diraddhdp)
12661	struct vnode *pvp;
12662	struct mount *mp;
12663	struct diraddhd *diraddhdp;
12664{
12665	struct inodedep *inodedep;
12666	struct inoref *inoref;
12667	struct ufsmount *ump;
12668	struct diradd *dap;
12669	struct vnode *vp;
12670	int error = 0;
12671	struct buf *bp;
12672	ino_t inum;
12673
12674	ump = VFSTOUFS(mp);
12675restart:
12676	while ((dap = LIST_FIRST(diraddhdp)) != NULL) {
12677		/*
12678		 * Flush ourselves if this directory entry
12679		 * has a MKDIR_PARENT dependency.
12680		 */
12681		if (dap->da_state & MKDIR_PARENT) {
12682			FREE_LOCK(&lk);
12683			if ((error = ffs_update(pvp, 1)) != 0)
12684				break;
12685			ACQUIRE_LOCK(&lk);
12686			/*
12687			 * If that cleared dependencies, go on to next.
12688			 */
12689			if (dap != LIST_FIRST(diraddhdp))
12690				continue;
12691			if (dap->da_state & MKDIR_PARENT)
12692				panic("flush_pagedep_deps: MKDIR_PARENT");
12693		}
12694		/*
12695		 * A newly allocated directory must have its "." and
12696		 * ".." entries written out before its name can be
12697		 * committed in its parent.
12698		 */
12699		inum = dap->da_newinum;
12700		if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0)
12701			panic("flush_pagedep_deps: lost inode1");
12702		/*
12703		 * Wait for any pending journal adds to complete so we don't
12704		 * cause rollbacks while syncing.
12705		 */
12706		TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
12707			if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
12708			    == DEPCOMPLETE) {
12709				jwait(&inoref->if_list, MNT_WAIT);
12710				goto restart;
12711			}
12712		}
12713		if (dap->da_state & MKDIR_BODY) {
12714			FREE_LOCK(&lk);
12715			if ((error = ffs_vgetf(mp, inum, LK_EXCLUSIVE, &vp,
12716			    FFSV_FORCEINSMQ)))
12717				break;
12718			error = flush_newblk_dep(vp, mp, 0);
12719			/*
12720			 * If we still have the dependency we might need to
12721			 * update the vnode to sync the new link count to
12722			 * disk.
12723			 */
12724			if (error == 0 && dap == LIST_FIRST(diraddhdp))
12725				error = ffs_update(vp, 1);
12726			vput(vp);
12727			if (error != 0)
12728				break;
12729			ACQUIRE_LOCK(&lk);
12730			/*
12731			 * If that cleared dependencies, go on to next.
12732			 */
12733			if (dap != LIST_FIRST(diraddhdp))
12734				continue;
12735			if (dap->da_state & MKDIR_BODY) {
12736				inodedep_lookup(UFSTOVFS(ump), inum, 0,
12737				    &inodedep);
12738				panic("flush_pagedep_deps: MKDIR_BODY "
12739				    "inodedep %p dap %p vp %p",
12740				    inodedep, dap, vp);
12741			}
12742		}
12743		/*
12744		 * Flush the inode on which the directory entry depends.
12745		 * Having accounted for MKDIR_PARENT and MKDIR_BODY above,
12746		 * the only remaining dependency is that the updated inode
12747		 * count must get pushed to disk. The inode has already
12748		 * been pushed into its inode buffer (via VOP_UPDATE) at
12749		 * the time of the reference count change. So we need only
12750		 * locate that buffer, ensure that there will be no rollback
12751		 * caused by a bitmap dependency, then write the inode buffer.
12752		 */
12753retry:
12754		if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0)
12755			panic("flush_pagedep_deps: lost inode");
12756		/*
12757		 * If the inode still has bitmap dependencies,
12758		 * push them to disk.
12759		 */
12760		if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) == 0) {
12761			bp = inodedep->id_bmsafemap->sm_buf;
12762			bp = getdirtybuf(bp, &lk, MNT_WAIT);
12763			if (bp == NULL)
12764				goto retry;
12765			FREE_LOCK(&lk);
12766			if ((error = bwrite(bp)) != 0)
12767				break;
12768			ACQUIRE_LOCK(&lk);
12769			if (dap != LIST_FIRST(diraddhdp))
12770				continue;
12771		}
12772		/*
12773		 * If the inode is still sitting in a buffer waiting
12774		 * to be written or waiting for the link count to be
12775		 * adjusted update it here to flush it to disk.
12776		 */
12777		if (dap == LIST_FIRST(diraddhdp)) {
12778			FREE_LOCK(&lk);
12779			if ((error = ffs_vgetf(mp, inum, LK_EXCLUSIVE, &vp,
12780			    FFSV_FORCEINSMQ)))
12781				break;
12782			error = ffs_update(vp, 1);
12783			vput(vp);
12784			if (error)
12785				break;
12786			ACQUIRE_LOCK(&lk);
12787		}
12788		/*
12789		 * If we have failed to get rid of all the dependencies
12790		 * then something is seriously wrong.
12791		 */
12792		if (dap == LIST_FIRST(diraddhdp)) {
12793			inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep);
12794			panic("flush_pagedep_deps: failed to flush "
12795			    "inodedep %p ino %ju dap %p",
12796			    inodedep, (uintmax_t)inum, dap);
12797		}
12798	}
12799	if (error)
12800		ACQUIRE_LOCK(&lk);
12801	return (error);
12802}
12803
12804/*
12805 * A large burst of file addition or deletion activity can drive the
12806 * memory load excessively high. First attempt to slow things down
12807 * using the techniques below. If that fails, this routine requests
12808 * the offending operations to fall back to running synchronously
12809 * until the memory load returns to a reasonable level.
12810 */
12811int
12812softdep_slowdown(vp)
12813	struct vnode *vp;
12814{
12815	struct ufsmount *ump;
12816	int jlow;
12817	int max_softdeps_hard;
12818
12819	KASSERT(MOUNTEDSOFTDEP(vp->v_mount) != 0,
12820	    ("softdep_slowdown called on non-softdep filesystem"));
12821	ACQUIRE_LOCK(&lk);
12822	jlow = 0;
12823	/*
12824	 * Check for journal space if needed.
12825	 */
12826	if (DOINGSUJ(vp)) {
12827		ump = VFSTOUFS(vp->v_mount);
12828		if (journal_space(ump, 0) == 0)
12829			jlow = 1;
12830	}
12831	max_softdeps_hard = max_softdeps * 11 / 10;
12832	if (dep_current[D_DIRREM] < max_softdeps_hard / 2 &&
12833	    dep_current[D_INODEDEP] < max_softdeps_hard &&
12834	    VFSTOUFS(vp->v_mount)->um_numindirdeps < maxindirdeps &&
12835	    dep_current[D_FREEBLKS] < max_softdeps_hard && jlow == 0) {
12836		FREE_LOCK(&lk);
12837  		return (0);
12838	}
12839	if (VFSTOUFS(vp->v_mount)->um_numindirdeps >= maxindirdeps || jlow)
12840		softdep_speedup();
12841	stat_sync_limit_hit += 1;
12842	FREE_LOCK(&lk);
12843	if (DOINGSUJ(vp))
12844		return (0);
12845	return (1);
12846}
12847
12848/*
12849 * Called by the allocation routines when they are about to fail
12850 * in the hope that we can free up the requested resource (inodes
12851 * or disk space).
12852 *
12853 * First check to see if the work list has anything on it. If it has,
12854 * clean up entries until we successfully free the requested resource.
12855 * Because this process holds inodes locked, we cannot handle any remove
12856 * requests that might block on a locked inode as that could lead to
12857 * deadlock. If the worklist yields none of the requested resource,
12858 * start syncing out vnodes to free up the needed space.
12859 */
12860int
12861softdep_request_cleanup(fs, vp, cred, resource)
12862	struct fs *fs;
12863	struct vnode *vp;
12864	struct ucred *cred;
12865	int resource;
12866{
12867	struct ufsmount *ump;
12868	struct mount *mp;
12869	struct vnode *lvp, *mvp;
12870	long starttime;
12871	ufs2_daddr_t needed;
12872	int error;
12873
12874	/*
12875	 * If we are being called because of a process doing a
12876	 * copy-on-write, then it is not safe to process any
12877	 * worklist items as we will recurse into the copyonwrite
12878	 * routine.  This will result in an incoherent snapshot.
12879	 * If the vnode that we hold is a snapshot, we must avoid
12880	 * handling other resources that could cause deadlock.
12881	 */
12882	if ((curthread->td_pflags & TDP_COWINPROGRESS) || IS_SNAPSHOT(VTOI(vp)))
12883		return (0);
12884
12885	if (resource == FLUSH_BLOCKS_WAIT)
12886		stat_cleanup_blkrequests += 1;
12887	else
12888		stat_cleanup_inorequests += 1;
12889
12890	mp = vp->v_mount;
12891	ump = VFSTOUFS(mp);
12892	mtx_assert(UFS_MTX(ump), MA_OWNED);
12893	UFS_UNLOCK(ump);
12894	error = ffs_update(vp, 1);
12895	if (error != 0) {
12896		UFS_LOCK(ump);
12897		return (0);
12898	}
12899	/*
12900	 * If we are in need of resources, consider pausing for
12901	 * tickdelay to give ourselves some breathing room.
12902	 */
12903	ACQUIRE_LOCK(&lk);
12904	process_removes(vp);
12905	process_truncates(vp);
12906	request_cleanup(UFSTOVFS(ump), resource);
12907	FREE_LOCK(&lk);
12908	/*
12909	 * Now clean up at least as many resources as we will need.
12910	 *
12911	 * When requested to clean up inodes, the number that are needed
12912	 * is set by the number of simultaneous writers (mnt_writeopcount)
12913	 * plus a bit of slop (2) in case some more writers show up while
12914	 * we are cleaning.
12915	 *
12916	 * When requested to free up space, the amount of space that
12917	 * we need is enough blocks to allocate a full-sized segment
12918	 * (fs_contigsumsize). The number of such segments that will
12919	 * be needed is set by the number of simultaneous writers
12920	 * (mnt_writeopcount) plus a bit of slop (2) in case some more
12921	 * writers show up while we are cleaning.
12922	 *
12923	 * Additionally, if we are unpriviledged and allocating space,
12924	 * we need to ensure that we clean up enough blocks to get the
12925	 * needed number of blocks over the threshhold of the minimum
12926	 * number of blocks required to be kept free by the filesystem
12927	 * (fs_minfree).
12928	 */
12929	if (resource == FLUSH_INODES_WAIT) {
12930		needed = vp->v_mount->mnt_writeopcount + 2;
12931	} else if (resource == FLUSH_BLOCKS_WAIT) {
12932		needed = (vp->v_mount->mnt_writeopcount + 2) *
12933		    fs->fs_contigsumsize;
12934		if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0))
12935			needed += fragstoblks(fs,
12936			    roundup((fs->fs_dsize * fs->fs_minfree / 100) -
12937			    fs->fs_cstotal.cs_nffree, fs->fs_frag));
12938	} else {
12939		UFS_LOCK(ump);
12940		printf("softdep_request_cleanup: Unknown resource type %d\n",
12941		    resource);
12942		return (0);
12943	}
12944	starttime = time_second;
12945retry:
12946	if ((resource == FLUSH_BLOCKS_WAIT && ump->softdep_on_worklist > 0 &&
12947	    fs->fs_cstotal.cs_nbfree <= needed) ||
12948	    (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 &&
12949	    fs->fs_cstotal.cs_nifree <= needed)) {
12950		ACQUIRE_LOCK(&lk);
12951		if (ump->softdep_on_worklist > 0 &&
12952		    process_worklist_item(UFSTOVFS(ump),
12953		    ump->softdep_on_worklist, LK_NOWAIT) != 0)
12954			stat_worklist_push += 1;
12955		FREE_LOCK(&lk);
12956	}
12957	/*
12958	 * If we still need resources and there are no more worklist
12959	 * entries to process to obtain them, we have to start flushing
12960	 * the dirty vnodes to force the release of additional requests
12961	 * to the worklist that we can then process to reap addition
12962	 * resources. We walk the vnodes associated with the mount point
12963	 * until we get the needed worklist requests that we can reap.
12964	 */
12965	if ((resource == FLUSH_BLOCKS_WAIT &&
12966	     fs->fs_cstotal.cs_nbfree <= needed) ||
12967	    (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 &&
12968	     fs->fs_cstotal.cs_nifree <= needed)) {
12969		MNT_VNODE_FOREACH_ALL(lvp, mp, mvp) {
12970			if (TAILQ_FIRST(&lvp->v_bufobj.bo_dirty.bv_hd) == 0) {
12971				VI_UNLOCK(lvp);
12972				continue;
12973			}
12974			if (vget(lvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_NOWAIT,
12975			    curthread))
12976				continue;
12977			if (lvp->v_vflag & VV_NOSYNC) {	/* unlinked */
12978				vput(lvp);
12979				continue;
12980			}
12981			(void) ffs_syncvnode(lvp, MNT_NOWAIT, 0);
12982			vput(lvp);
12983		}
12984		lvp = ump->um_devvp;
12985		if (vn_lock(lvp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
12986			VOP_FSYNC(lvp, MNT_NOWAIT, curthread);
12987			VOP_UNLOCK(lvp, 0);
12988		}
12989		if (ump->softdep_on_worklist > 0) {
12990			stat_cleanup_retries += 1;
12991			goto retry;
12992		}
12993		stat_cleanup_failures += 1;
12994	}
12995	if (time_second - starttime > stat_cleanup_high_delay)
12996		stat_cleanup_high_delay = time_second - starttime;
12997	UFS_LOCK(ump);
12998	return (1);
12999}
13000
13001/*
13002 * If memory utilization has gotten too high, deliberately slow things
13003 * down and speed up the I/O processing.
13004 */
13005extern struct thread *syncertd;
13006static int
13007request_cleanup(mp, resource)
13008	struct mount *mp;
13009	int resource;
13010{
13011	struct thread *td = curthread;
13012	struct ufsmount *ump;
13013
13014	rw_assert(&lk, RA_WLOCKED);
13015	/*
13016	 * We never hold up the filesystem syncer or buf daemon.
13017	 */
13018	if (td->td_pflags & (TDP_SOFTDEP|TDP_NORUNNINGBUF))
13019		return (0);
13020	ump = VFSTOUFS(mp);
13021	/*
13022	 * First check to see if the work list has gotten backlogged.
13023	 * If it has, co-opt this process to help clean up two entries.
13024	 * Because this process may hold inodes locked, we cannot
13025	 * handle any remove requests that might block on a locked
13026	 * inode as that could lead to deadlock.  We set TDP_SOFTDEP
13027	 * to avoid recursively processing the worklist.
13028	 */
13029	if (ump->softdep_on_worklist > max_softdeps / 10) {
13030		td->td_pflags |= TDP_SOFTDEP;
13031		process_worklist_item(mp, 2, LK_NOWAIT);
13032		td->td_pflags &= ~TDP_SOFTDEP;
13033		stat_worklist_push += 2;
13034		return(1);
13035	}
13036	/*
13037	 * Next, we attempt to speed up the syncer process. If that
13038	 * is successful, then we allow the process to continue.
13039	 */
13040	if (softdep_speedup() &&
13041	    resource != FLUSH_BLOCKS_WAIT &&
13042	    resource != FLUSH_INODES_WAIT)
13043		return(0);
13044	/*
13045	 * If we are resource constrained on inode dependencies, try
13046	 * flushing some dirty inodes. Otherwise, we are constrained
13047	 * by file deletions, so try accelerating flushes of directories
13048	 * with removal dependencies. We would like to do the cleanup
13049	 * here, but we probably hold an inode locked at this point and
13050	 * that might deadlock against one that we try to clean. So,
13051	 * the best that we can do is request the syncer daemon to do
13052	 * the cleanup for us.
13053	 */
13054	switch (resource) {
13055
13056	case FLUSH_INODES:
13057	case FLUSH_INODES_WAIT:
13058		stat_ino_limit_push += 1;
13059		req_clear_inodedeps += 1;
13060		stat_countp = &stat_ino_limit_hit;
13061		break;
13062
13063	case FLUSH_BLOCKS:
13064	case FLUSH_BLOCKS_WAIT:
13065		stat_blk_limit_push += 1;
13066		req_clear_remove += 1;
13067		stat_countp = &stat_blk_limit_hit;
13068		break;
13069
13070	default:
13071		panic("request_cleanup: unknown type");
13072	}
13073	/*
13074	 * Hopefully the syncer daemon will catch up and awaken us.
13075	 * We wait at most tickdelay before proceeding in any case.
13076	 */
13077	proc_waiting += 1;
13078	if (callout_pending(&softdep_callout) == FALSE)
13079		callout_reset(&softdep_callout, tickdelay > 2 ? tickdelay : 2,
13080		    pause_timer, 0);
13081
13082	msleep((caddr_t)&proc_waiting, &lk, PPAUSE, "softupdate", 0);
13083	proc_waiting -= 1;
13084	return (1);
13085}
13086
13087/*
13088 * Awaken processes pausing in request_cleanup and clear proc_waiting
13089 * to indicate that there is no longer a timer running.
13090 */
13091static void
13092pause_timer(arg)
13093	void *arg;
13094{
13095
13096	/*
13097	 * The callout_ API has acquired mtx and will hold it around this
13098	 * function call.
13099	 */
13100	*stat_countp += 1;
13101	wakeup_one(&proc_waiting);
13102	if (proc_waiting > 0)
13103		callout_reset(&softdep_callout, tickdelay > 2 ? tickdelay : 2,
13104		    pause_timer, 0);
13105}
13106
13107/*
13108 * Flush out a directory with at least one removal dependency in an effort to
13109 * reduce the number of dirrem, freefile, and freeblks dependency structures.
13110 */
13111static void
13112clear_remove(void)
13113{
13114	struct pagedep_hashhead *pagedephd;
13115	struct pagedep *pagedep;
13116	static int next = 0;
13117	struct mount *mp;
13118	struct vnode *vp;
13119	struct bufobj *bo;
13120	int error, cnt;
13121	ino_t ino;
13122
13123	rw_assert(&lk, RA_WLOCKED);
13124
13125	for (cnt = 0; cnt <= pagedep_hash; cnt++) {
13126		pagedephd = &pagedep_hashtbl[next++];
13127		if (next > pagedep_hash)
13128			next = 0;
13129		LIST_FOREACH(pagedep, pagedephd, pd_hash) {
13130			if (LIST_EMPTY(&pagedep->pd_dirremhd))
13131				continue;
13132			mp = pagedep->pd_list.wk_mp;
13133			ino = pagedep->pd_ino;
13134			if (vn_start_write(NULL, &mp, V_NOWAIT) != 0)
13135				continue;
13136			FREE_LOCK(&lk);
13137
13138			/*
13139			 * Let unmount clear deps
13140			 */
13141			error = vfs_busy(mp, MBF_NOWAIT);
13142			if (error != 0)
13143				goto finish_write;
13144			error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp,
13145			     FFSV_FORCEINSMQ);
13146			vfs_unbusy(mp);
13147			if (error != 0) {
13148				softdep_error("clear_remove: vget", error);
13149				goto finish_write;
13150			}
13151			if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0)))
13152				softdep_error("clear_remove: fsync", error);
13153			bo = &vp->v_bufobj;
13154			BO_LOCK(bo);
13155			drain_output(vp);
13156			BO_UNLOCK(bo);
13157			vput(vp);
13158		finish_write:
13159			vn_finished_write(mp);
13160			ACQUIRE_LOCK(&lk);
13161			return;
13162		}
13163	}
13164}
13165
13166/*
13167 * Clear out a block of dirty inodes in an effort to reduce
13168 * the number of inodedep dependency structures.
13169 */
13170static void
13171clear_inodedeps(void)
13172{
13173	struct inodedep_hashhead *inodedephd;
13174	struct inodedep *inodedep;
13175	static int next = 0;
13176	struct mount *mp;
13177	struct vnode *vp;
13178	struct fs *fs;
13179	int error, cnt;
13180	ino_t firstino, lastino, ino;
13181
13182	rw_assert(&lk, RA_WLOCKED);
13183	/*
13184	 * Pick a random inode dependency to be cleared.
13185	 * We will then gather up all the inodes in its block
13186	 * that have dependencies and flush them out.
13187	 */
13188	for (cnt = 0; cnt <= inodedep_hash; cnt++) {
13189		inodedephd = &inodedep_hashtbl[next++];
13190		if (next > inodedep_hash)
13191			next = 0;
13192		if ((inodedep = LIST_FIRST(inodedephd)) != NULL)
13193			break;
13194	}
13195	if (inodedep == NULL)
13196		return;
13197	fs = inodedep->id_fs;
13198	mp = inodedep->id_list.wk_mp;
13199	/*
13200	 * Find the last inode in the block with dependencies.
13201	 */
13202	firstino = inodedep->id_ino & ~(INOPB(fs) - 1);
13203	for (lastino = firstino + INOPB(fs) - 1; lastino > firstino; lastino--)
13204		if (inodedep_lookup(mp, lastino, 0, &inodedep) != 0)
13205			break;
13206	/*
13207	 * Asynchronously push all but the last inode with dependencies.
13208	 * Synchronously push the last inode with dependencies to ensure
13209	 * that the inode block gets written to free up the inodedeps.
13210	 */
13211	for (ino = firstino; ino <= lastino; ino++) {
13212		if (inodedep_lookup(mp, ino, 0, &inodedep) == 0)
13213			continue;
13214		if (vn_start_write(NULL, &mp, V_NOWAIT) != 0)
13215			continue;
13216		FREE_LOCK(&lk);
13217		error = vfs_busy(mp, MBF_NOWAIT); /* Let unmount clear deps */
13218		if (error != 0) {
13219			vn_finished_write(mp);
13220			ACQUIRE_LOCK(&lk);
13221			return;
13222		}
13223		if ((error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp,
13224		    FFSV_FORCEINSMQ)) != 0) {
13225			softdep_error("clear_inodedeps: vget", error);
13226			vfs_unbusy(mp);
13227			vn_finished_write(mp);
13228			ACQUIRE_LOCK(&lk);
13229			return;
13230		}
13231		vfs_unbusy(mp);
13232		if (ino == lastino) {
13233			if ((error = ffs_syncvnode(vp, MNT_WAIT, 0)))
13234				softdep_error("clear_inodedeps: fsync1", error);
13235		} else {
13236			if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0)))
13237				softdep_error("clear_inodedeps: fsync2", error);
13238			BO_LOCK(&vp->v_bufobj);
13239			drain_output(vp);
13240			BO_UNLOCK(&vp->v_bufobj);
13241		}
13242		vput(vp);
13243		vn_finished_write(mp);
13244		ACQUIRE_LOCK(&lk);
13245	}
13246}
13247
13248void
13249softdep_buf_append(bp, wkhd)
13250	struct buf *bp;
13251	struct workhead *wkhd;
13252{
13253	struct worklist *wk;
13254
13255	if ((wk = LIST_FIRST(wkhd)) == NULL)
13256		return;
13257	KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0,
13258	    ("softdep_buf_append called on non-softdep filesystem"));
13259	ACQUIRE_LOCK(&lk);
13260	while ((wk = LIST_FIRST(wkhd)) != NULL) {
13261		WORKLIST_REMOVE(wk);
13262		WORKLIST_INSERT(&bp->b_dep, wk);
13263	}
13264	FREE_LOCK(&lk);
13265
13266}
13267
13268void
13269softdep_inode_append(ip, cred, wkhd)
13270	struct inode *ip;
13271	struct ucred *cred;
13272	struct workhead *wkhd;
13273{
13274	struct buf *bp;
13275	struct fs *fs;
13276	int error;
13277
13278	KASSERT(MOUNTEDSOFTDEP(UFSTOVFS(ip->i_ump)) != 0,
13279	    ("softdep_inode_append called on non-softdep filesystem"));
13280	fs = ip->i_fs;
13281	error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
13282	    (int)fs->fs_bsize, cred, &bp);
13283	if (error) {
13284		bqrelse(bp);
13285		softdep_freework(wkhd);
13286		return;
13287	}
13288	softdep_buf_append(bp, wkhd);
13289	bqrelse(bp);
13290}
13291
13292void
13293softdep_freework(wkhd)
13294	struct workhead *wkhd;
13295{
13296	struct worklist *wk;
13297
13298	if ((wk = LIST_FIRST(wkhd)) == NULL)
13299		return;
13300	KASSERT(MOUNTEDSOFTDEP(wk->wk_mp) != 0,
13301	    ("softdep_freework called on non-softdep filesystem"));
13302	ACQUIRE_LOCK(&lk);
13303	handle_jwork(wkhd);
13304	FREE_LOCK(&lk);
13305}
13306
13307/*
13308 * Function to determine if the buffer has outstanding dependencies
13309 * that will cause a roll-back if the buffer is written. If wantcount
13310 * is set, return number of dependencies, otherwise just yes or no.
13311 */
13312static int
13313softdep_count_dependencies(bp, wantcount)
13314	struct buf *bp;
13315	int wantcount;
13316{
13317	struct worklist *wk;
13318	struct bmsafemap *bmsafemap;
13319	struct freework *freework;
13320	struct inodedep *inodedep;
13321	struct indirdep *indirdep;
13322	struct freeblks *freeblks;
13323	struct allocindir *aip;
13324	struct pagedep *pagedep;
13325	struct dirrem *dirrem;
13326	struct newblk *newblk;
13327	struct mkdir *mkdir;
13328	struct diradd *dap;
13329	int i, retval;
13330
13331	retval = 0;
13332	ACQUIRE_LOCK(&lk);
13333	LIST_FOREACH(wk, &bp->b_dep, wk_list) {
13334		switch (wk->wk_type) {
13335
13336		case D_INODEDEP:
13337			inodedep = WK_INODEDEP(wk);
13338			if ((inodedep->id_state & DEPCOMPLETE) == 0) {
13339				/* bitmap allocation dependency */
13340				retval += 1;
13341				if (!wantcount)
13342					goto out;
13343			}
13344			if (TAILQ_FIRST(&inodedep->id_inoupdt)) {
13345				/* direct block pointer dependency */
13346				retval += 1;
13347				if (!wantcount)
13348					goto out;
13349			}
13350			if (TAILQ_FIRST(&inodedep->id_extupdt)) {
13351				/* direct block pointer dependency */
13352				retval += 1;
13353				if (!wantcount)
13354					goto out;
13355			}
13356			if (TAILQ_FIRST(&inodedep->id_inoreflst)) {
13357				/* Add reference dependency. */
13358				retval += 1;
13359				if (!wantcount)
13360					goto out;
13361			}
13362			continue;
13363
13364		case D_INDIRDEP:
13365			indirdep = WK_INDIRDEP(wk);
13366
13367			TAILQ_FOREACH(freework, &indirdep->ir_trunc, fw_next) {
13368				/* indirect truncation dependency */
13369				retval += 1;
13370				if (!wantcount)
13371					goto out;
13372			}
13373
13374			LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) {
13375				/* indirect block pointer dependency */
13376				retval += 1;
13377				if (!wantcount)
13378					goto out;
13379			}
13380			continue;
13381
13382		case D_PAGEDEP:
13383			pagedep = WK_PAGEDEP(wk);
13384			LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) {
13385				if (LIST_FIRST(&dirrem->dm_jremrefhd)) {
13386					/* Journal remove ref dependency. */
13387					retval += 1;
13388					if (!wantcount)
13389						goto out;
13390				}
13391			}
13392			for (i = 0; i < DAHASHSZ; i++) {
13393
13394				LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) {
13395					/* directory entry dependency */
13396					retval += 1;
13397					if (!wantcount)
13398						goto out;
13399				}
13400			}
13401			continue;
13402
13403		case D_BMSAFEMAP:
13404			bmsafemap = WK_BMSAFEMAP(wk);
13405			if (LIST_FIRST(&bmsafemap->sm_jaddrefhd)) {
13406				/* Add reference dependency. */
13407				retval += 1;
13408				if (!wantcount)
13409					goto out;
13410			}
13411			if (LIST_FIRST(&bmsafemap->sm_jnewblkhd)) {
13412				/* Allocate block dependency. */
13413				retval += 1;
13414				if (!wantcount)
13415					goto out;
13416			}
13417			continue;
13418
13419		case D_FREEBLKS:
13420			freeblks = WK_FREEBLKS(wk);
13421			if (LIST_FIRST(&freeblks->fb_jblkdephd)) {
13422				/* Freeblk journal dependency. */
13423				retval += 1;
13424				if (!wantcount)
13425					goto out;
13426			}
13427			continue;
13428
13429		case D_ALLOCDIRECT:
13430		case D_ALLOCINDIR:
13431			newblk = WK_NEWBLK(wk);
13432			if (newblk->nb_jnewblk) {
13433				/* Journal allocate dependency. */
13434				retval += 1;
13435				if (!wantcount)
13436					goto out;
13437			}
13438			continue;
13439
13440		case D_MKDIR:
13441			mkdir = WK_MKDIR(wk);
13442			if (mkdir->md_jaddref) {
13443				/* Journal reference dependency. */
13444				retval += 1;
13445				if (!wantcount)
13446					goto out;
13447			}
13448			continue;
13449
13450		case D_FREEWORK:
13451		case D_FREEDEP:
13452		case D_JSEGDEP:
13453		case D_JSEG:
13454		case D_SBDEP:
13455			/* never a dependency on these blocks */
13456			continue;
13457
13458		default:
13459			panic("softdep_count_dependencies: Unexpected type %s",
13460			    TYPENAME(wk->wk_type));
13461			/* NOTREACHED */
13462		}
13463	}
13464out:
13465	FREE_LOCK(&lk);
13466	return retval;
13467}
13468
13469/*
13470 * Acquire exclusive access to a buffer.
13471 * Must be called with a locked mtx parameter.
13472 * Return acquired buffer or NULL on failure.
13473 */
13474static struct buf *
13475getdirtybuf(bp, lock, waitfor)
13476	struct buf *bp;
13477	struct rwlock *lock;
13478	int waitfor;
13479{
13480	int error;
13481
13482	rw_assert(lock, RA_WLOCKED);
13483	if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) {
13484		if (waitfor != MNT_WAIT)
13485			return (NULL);
13486		error = BUF_LOCK(bp,
13487		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, lock);
13488		/*
13489		 * Even if we sucessfully acquire bp here, we have dropped
13490		 * lock, which may violates our guarantee.
13491		 */
13492		if (error == 0)
13493			BUF_UNLOCK(bp);
13494		else if (error != ENOLCK)
13495			panic("getdirtybuf: inconsistent lock: %d", error);
13496		rw_wlock(lock);
13497		return (NULL);
13498	}
13499	if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
13500		if (lock == &lk && waitfor == MNT_WAIT) {
13501			rw_wunlock(lock);
13502			BO_LOCK(bp->b_bufobj);
13503			BUF_UNLOCK(bp);
13504			if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
13505				bp->b_vflags |= BV_BKGRDWAIT;
13506				msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj),
13507				       PRIBIO | PDROP, "getbuf", 0);
13508			} else
13509				BO_UNLOCK(bp->b_bufobj);
13510			rw_wlock(lock);
13511			return (NULL);
13512		}
13513		BUF_UNLOCK(bp);
13514		if (waitfor != MNT_WAIT)
13515			return (NULL);
13516		/*
13517		 * The lock argument must be bp->b_vp's mutex in
13518		 * this case.
13519		 */
13520#ifdef	DEBUG_VFS_LOCKS
13521		if (bp->b_vp->v_type != VCHR)
13522			ASSERT_BO_WLOCKED(bp->b_bufobj);
13523#endif
13524		bp->b_vflags |= BV_BKGRDWAIT;
13525		rw_sleep(&bp->b_xflags, lock, PRIBIO, "getbuf", 0);
13526		return (NULL);
13527	}
13528	if ((bp->b_flags & B_DELWRI) == 0) {
13529		BUF_UNLOCK(bp);
13530		return (NULL);
13531	}
13532	bremfree(bp);
13533	return (bp);
13534}
13535
13536
13537/*
13538 * Check if it is safe to suspend the file system now.  On entry,
13539 * the vnode interlock for devvp should be held.  Return 0 with
13540 * the mount interlock held if the file system can be suspended now,
13541 * otherwise return EAGAIN with the mount interlock held.
13542 */
13543int
13544softdep_check_suspend(struct mount *mp,
13545		      struct vnode *devvp,
13546		      int softdep_deps,
13547		      int softdep_accdeps,
13548		      int secondary_writes,
13549		      int secondary_accwrites)
13550{
13551	struct bufobj *bo;
13552	struct ufsmount *ump;
13553	int error;
13554
13555	ump = VFSTOUFS(mp);
13556	bo = &devvp->v_bufobj;
13557	ASSERT_BO_WLOCKED(bo);
13558
13559	for (;;) {
13560		if (!TRY_ACQUIRE_LOCK(&lk)) {
13561			BO_UNLOCK(bo);
13562			ACQUIRE_LOCK(&lk);
13563			FREE_LOCK(&lk);
13564			BO_LOCK(bo);
13565			continue;
13566		}
13567		MNT_ILOCK(mp);
13568		if (mp->mnt_secondary_writes != 0) {
13569			FREE_LOCK(&lk);
13570			BO_UNLOCK(bo);
13571			msleep(&mp->mnt_secondary_writes,
13572			       MNT_MTX(mp),
13573			       (PUSER - 1) | PDROP, "secwr", 0);
13574			BO_LOCK(bo);
13575			continue;
13576		}
13577		break;
13578	}
13579
13580	/*
13581	 * Reasons for needing more work before suspend:
13582	 * - Dirty buffers on devvp.
13583	 * - Softdep activity occurred after start of vnode sync loop
13584	 * - Secondary writes occurred after start of vnode sync loop
13585	 */
13586	error = 0;
13587	if (bo->bo_numoutput > 0 ||
13588	    bo->bo_dirty.bv_cnt > 0 ||
13589	    softdep_deps != 0 ||
13590	    ump->softdep_deps != 0 ||
13591	    softdep_accdeps != ump->softdep_accdeps ||
13592	    secondary_writes != 0 ||
13593	    mp->mnt_secondary_writes != 0 ||
13594	    secondary_accwrites != mp->mnt_secondary_accwrites)
13595		error = EAGAIN;
13596	FREE_LOCK(&lk);
13597	BO_UNLOCK(bo);
13598	return (error);
13599}
13600
13601
13602/*
13603 * Get the number of dependency structures for the file system, both
13604 * the current number and the total number allocated.  These will
13605 * later be used to detect that softdep processing has occurred.
13606 */
13607void
13608softdep_get_depcounts(struct mount *mp,
13609		      int *softdep_depsp,
13610		      int *softdep_accdepsp)
13611{
13612	struct ufsmount *ump;
13613
13614	ump = VFSTOUFS(mp);
13615	ACQUIRE_LOCK(&lk);
13616	*softdep_depsp = ump->softdep_deps;
13617	*softdep_accdepsp = ump->softdep_accdeps;
13618	FREE_LOCK(&lk);
13619}
13620
13621/*
13622 * Wait for pending output on a vnode to complete.
13623 * Must be called with vnode lock and interlock locked.
13624 *
13625 * XXX: Should just be a call to bufobj_wwait().
13626 */
13627static void
13628drain_output(vp)
13629	struct vnode *vp;
13630{
13631	struct bufobj *bo;
13632
13633	bo = &vp->v_bufobj;
13634	ASSERT_VOP_LOCKED(vp, "drain_output");
13635	ASSERT_BO_WLOCKED(bo);
13636
13637	while (bo->bo_numoutput) {
13638		bo->bo_flag |= BO_WWAIT;
13639		msleep((caddr_t)&bo->bo_numoutput,
13640		    BO_LOCKPTR(bo), PRIBIO + 1, "drainvp", 0);
13641	}
13642}
13643
13644/*
13645 * Called whenever a buffer that is being invalidated or reallocated
13646 * contains dependencies. This should only happen if an I/O error has
13647 * occurred. The routine is called with the buffer locked.
13648 */
13649static void
13650softdep_deallocate_dependencies(bp)
13651	struct buf *bp;
13652{
13653
13654	if ((bp->b_ioflags & BIO_ERROR) == 0)
13655		panic("softdep_deallocate_dependencies: dangling deps");
13656	if (bp->b_vp != NULL && bp->b_vp->v_mount != NULL)
13657		softdep_error(bp->b_vp->v_mount->mnt_stat.f_mntonname, bp->b_error);
13658	else
13659		printf("softdep_deallocate_dependencies: "
13660		    "got error %d while accessing filesystem\n", bp->b_error);
13661	if (bp->b_error != ENXIO)
13662		panic("softdep_deallocate_dependencies: unrecovered I/O error");
13663}
13664
13665/*
13666 * Function to handle asynchronous write errors in the filesystem.
13667 */
13668static void
13669softdep_error(func, error)
13670	char *func;
13671	int error;
13672{
13673
13674	/* XXX should do something better! */
13675	printf("%s: got error %d while accessing filesystem\n", func, error);
13676}
13677
13678#ifdef DDB
13679
13680static void
13681inodedep_print(struct inodedep *inodedep, int verbose)
13682{
13683	db_printf("%p fs %p st %x ino %jd inoblk %jd delta %d nlink %d"
13684	    " saveino %p\n",
13685	    inodedep, inodedep->id_fs, inodedep->id_state,
13686	    (intmax_t)inodedep->id_ino,
13687	    (intmax_t)fsbtodb(inodedep->id_fs,
13688	    ino_to_fsba(inodedep->id_fs, inodedep->id_ino)),
13689	    inodedep->id_nlinkdelta, inodedep->id_savednlink,
13690	    inodedep->id_savedino1);
13691
13692	if (verbose == 0)
13693		return;
13694
13695	db_printf("\tpendinghd %p, bufwait %p, inowait %p, inoreflst %p, "
13696	    "mkdiradd %p\n",
13697	    LIST_FIRST(&inodedep->id_pendinghd),
13698	    LIST_FIRST(&inodedep->id_bufwait),
13699	    LIST_FIRST(&inodedep->id_inowait),
13700	    TAILQ_FIRST(&inodedep->id_inoreflst),
13701	    inodedep->id_mkdiradd);
13702	db_printf("\tinoupdt %p, newinoupdt %p, extupdt %p, newextupdt %p\n",
13703	    TAILQ_FIRST(&inodedep->id_inoupdt),
13704	    TAILQ_FIRST(&inodedep->id_newinoupdt),
13705	    TAILQ_FIRST(&inodedep->id_extupdt),
13706	    TAILQ_FIRST(&inodedep->id_newextupdt));
13707}
13708
13709DB_SHOW_COMMAND(inodedep, db_show_inodedep)
13710{
13711
13712	if (have_addr == 0) {
13713		db_printf("Address required\n");
13714		return;
13715	}
13716	inodedep_print((struct inodedep*)addr, 1);
13717}
13718
13719DB_SHOW_COMMAND(inodedeps, db_show_inodedeps)
13720{
13721	struct inodedep_hashhead *inodedephd;
13722	struct inodedep *inodedep;
13723	struct fs *fs;
13724	int cnt;
13725
13726	fs = have_addr ? (struct fs *)addr : NULL;
13727	for (cnt = 0; cnt < inodedep_hash; cnt++) {
13728		inodedephd = &inodedep_hashtbl[cnt];
13729		LIST_FOREACH(inodedep, inodedephd, id_hash) {
13730			if (fs != NULL && fs != inodedep->id_fs)
13731				continue;
13732			inodedep_print(inodedep, 0);
13733		}
13734	}
13735}
13736
13737DB_SHOW_COMMAND(worklist, db_show_worklist)
13738{
13739	struct worklist *wk;
13740
13741	if (have_addr == 0) {
13742		db_printf("Address required\n");
13743		return;
13744	}
13745	wk = (struct worklist *)addr;
13746	printf("worklist: %p type %s state 0x%X\n",
13747	    wk, TYPENAME(wk->wk_type), wk->wk_state);
13748}
13749
13750DB_SHOW_COMMAND(workhead, db_show_workhead)
13751{
13752	struct workhead *wkhd;
13753	struct worklist *wk;
13754	int i;
13755
13756	if (have_addr == 0) {
13757		db_printf("Address required\n");
13758		return;
13759	}
13760	wkhd = (struct workhead *)addr;
13761	wk = LIST_FIRST(wkhd);
13762	for (i = 0; i < 100 && wk != NULL; i++, wk = LIST_NEXT(wk, wk_list))
13763		db_printf("worklist: %p type %s state 0x%X",
13764		    wk, TYPENAME(wk->wk_type), wk->wk_state);
13765	if (i == 100)
13766		db_printf("workhead overflow");
13767	printf("\n");
13768}
13769
13770
13771DB_SHOW_COMMAND(mkdirs, db_show_mkdirs)
13772{
13773	struct jaddref *jaddref;
13774	struct diradd *diradd;
13775	struct mkdir *mkdir;
13776
13777	LIST_FOREACH(mkdir, &mkdirlisthd, md_mkdirs) {
13778		diradd = mkdir->md_diradd;
13779		db_printf("mkdir: %p state 0x%X dap %p state 0x%X",
13780		    mkdir, mkdir->md_state, diradd, diradd->da_state);
13781		if ((jaddref = mkdir->md_jaddref) != NULL)
13782			db_printf(" jaddref %p jaddref state 0x%X",
13783			    jaddref, jaddref->ja_state);
13784		db_printf("\n");
13785	}
13786}
13787
13788/* exported to ffs_vfsops.c */
13789extern void db_print_ffs(struct ufsmount *ump);
13790void
13791db_print_ffs(struct ufsmount *ump)
13792{
13793	db_printf("mp %p %s devvp %p fs %p su_wl %d su_deps %d su_req %d\n",
13794	    ump->um_mountp, ump->um_mountp->mnt_stat.f_mntonname,
13795	    ump->um_devvp, ump->um_fs, ump->softdep_on_worklist,
13796	    ump->softdep_deps, ump->softdep_req);
13797}
13798
13799#endif /* DDB */
13800
13801#endif /* SOFTUPDATES */
13802