1/*-
2 * Copyright 1998, 2000 Marshall Kirk McKusick.
3 * Copyright 2009, 2010 Jeffrey W. Roberson <jeff@FreeBSD.org>
4 * All rights reserved.
5 *
6 * The soft updates code is derived from the appendix of a University
7 * of Michigan technical report (Gregory R. Ganger and Yale N. Patt,
8 * "Soft Updates: A Solution to the Metadata Update Problem in File
9 * Systems", CSE-TR-254-95, August 1995).
10 *
11 * Further information about soft updates can be obtained from:
12 *
13 *	Marshall Kirk McKusick		http://www.mckusick.com/softdep/
14 *	1614 Oxford Street		mckusick@mckusick.com
15 *	Berkeley, CA 94709-1608		+1-510-843-9542
16 *	USA
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions
20 * are met:
21 *
22 * 1. Redistributions of source code must retain the above copyright
23 *    notice, this list of conditions and the following disclaimer.
24 * 2. Redistributions in binary form must reproduce the above copyright
25 *    notice, this list of conditions and the following disclaimer in the
26 *    documentation and/or other materials provided with the distribution.
27 *
28 * THIS SOFTWARE IS PROVIDED BY THE AUTHORS ``AS IS'' AND ANY EXPRESS OR
29 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
30 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
31 * IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY DIRECT, INDIRECT,
32 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
33 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
34 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
35 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR
36 * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
37 * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
38 *
39 *	from: @(#)ffs_softdep.c	9.59 (McKusick) 6/21/00
40 */
41
42#include <sys/cdefs.h>
43__FBSDID("$FreeBSD$");
44
45#include "opt_ffs.h"
46#include "opt_quota.h"
47#include "opt_ddb.h"
48
49/*
50 * For now we want the safety net that the DEBUG flag provides.
51 */
52#ifndef DEBUG
53#define DEBUG
54#endif
55
56#include <sys/param.h>
57#include <sys/kernel.h>
58#include <sys/systm.h>
59#include <sys/bio.h>
60#include <sys/buf.h>
61#include <sys/kdb.h>
62#include <sys/kthread.h>
63#include <sys/ktr.h>
64#include <sys/limits.h>
65#include <sys/lock.h>
66#include <sys/malloc.h>
67#include <sys/mount.h>
68#include <sys/mutex.h>
69#include <sys/namei.h>
70#include <sys/priv.h>
71#include <sys/proc.h>
72#include <sys/rwlock.h>
73#include <sys/stat.h>
74#include <sys/sysctl.h>
75#include <sys/syslog.h>
76#include <sys/vnode.h>
77#include <sys/conf.h>
78
79#include <ufs/ufs/dir.h>
80#include <ufs/ufs/extattr.h>
81#include <ufs/ufs/quota.h>
82#include <ufs/ufs/inode.h>
83#include <ufs/ufs/ufsmount.h>
84#include <ufs/ffs/fs.h>
85#include <ufs/ffs/softdep.h>
86#include <ufs/ffs/ffs_extern.h>
87#include <ufs/ufs/ufs_extern.h>
88
89#include <vm/vm.h>
90#include <vm/vm_extern.h>
91#include <vm/vm_object.h>
92
93#include <geom/geom.h>
94
95#include <ddb/ddb.h>
96
97#define	KTR_SUJ	0	/* Define to KTR_SPARE. */
98
99#ifndef SOFTUPDATES
100
101int
102softdep_flushfiles(oldmnt, flags, td)
103	struct mount *oldmnt;
104	int flags;
105	struct thread *td;
106{
107
108	panic("softdep_flushfiles called");
109}
110
111int
112softdep_mount(devvp, mp, fs, cred)
113	struct vnode *devvp;
114	struct mount *mp;
115	struct fs *fs;
116	struct ucred *cred;
117{
118
119	return (0);
120}
121
122void
123softdep_initialize()
124{
125
126	return;
127}
128
129void
130softdep_uninitialize()
131{
132
133	return;
134}
135
136void
137softdep_unmount(mp)
138	struct mount *mp;
139{
140
141}
142
143void
144softdep_setup_sbupdate(ump, fs, bp)
145	struct ufsmount *ump;
146	struct fs *fs;
147	struct buf *bp;
148{
149}
150
151void
152softdep_setup_inomapdep(bp, ip, newinum, mode)
153	struct buf *bp;
154	struct inode *ip;
155	ino_t newinum;
156	int mode;
157{
158
159	panic("softdep_setup_inomapdep called");
160}
161
162void
163softdep_setup_blkmapdep(bp, mp, newblkno, frags, oldfrags)
164	struct buf *bp;
165	struct mount *mp;
166	ufs2_daddr_t newblkno;
167	int frags;
168	int oldfrags;
169{
170
171	panic("softdep_setup_blkmapdep called");
172}
173
174void
175softdep_setup_allocdirect(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp)
176	struct inode *ip;
177	ufs_lbn_t lbn;
178	ufs2_daddr_t newblkno;
179	ufs2_daddr_t oldblkno;
180	long newsize;
181	long oldsize;
182	struct buf *bp;
183{
184
185	panic("softdep_setup_allocdirect called");
186}
187
188void
189softdep_setup_allocext(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp)
190	struct inode *ip;
191	ufs_lbn_t lbn;
192	ufs2_daddr_t newblkno;
193	ufs2_daddr_t oldblkno;
194	long newsize;
195	long oldsize;
196	struct buf *bp;
197{
198
199	panic("softdep_setup_allocext called");
200}
201
202void
203softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp)
204	struct inode *ip;
205	ufs_lbn_t lbn;
206	struct buf *bp;
207	int ptrno;
208	ufs2_daddr_t newblkno;
209	ufs2_daddr_t oldblkno;
210	struct buf *nbp;
211{
212
213	panic("softdep_setup_allocindir_page called");
214}
215
216void
217softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno)
218	struct buf *nbp;
219	struct inode *ip;
220	struct buf *bp;
221	int ptrno;
222	ufs2_daddr_t newblkno;
223{
224
225	panic("softdep_setup_allocindir_meta called");
226}
227
228void
229softdep_journal_freeblocks(ip, cred, length, flags)
230	struct inode *ip;
231	struct ucred *cred;
232	off_t length;
233	int flags;
234{
235
236	panic("softdep_journal_freeblocks called");
237}
238
239void
240softdep_journal_fsync(ip)
241	struct inode *ip;
242{
243
244	panic("softdep_journal_fsync called");
245}
246
247void
248softdep_setup_freeblocks(ip, length, flags)
249	struct inode *ip;
250	off_t length;
251	int flags;
252{
253
254	panic("softdep_setup_freeblocks called");
255}
256
257void
258softdep_freefile(pvp, ino, mode)
259		struct vnode *pvp;
260		ino_t ino;
261		int mode;
262{
263
264	panic("softdep_freefile called");
265}
266
267int
268softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk)
269	struct buf *bp;
270	struct inode *dp;
271	off_t diroffset;
272	ino_t newinum;
273	struct buf *newdirbp;
274	int isnewblk;
275{
276
277	panic("softdep_setup_directory_add called");
278}
279
280void
281softdep_change_directoryentry_offset(bp, dp, base, oldloc, newloc, entrysize)
282	struct buf *bp;
283	struct inode *dp;
284	caddr_t base;
285	caddr_t oldloc;
286	caddr_t newloc;
287	int entrysize;
288{
289
290	panic("softdep_change_directoryentry_offset called");
291}
292
293void
294softdep_setup_remove(bp, dp, ip, isrmdir)
295	struct buf *bp;
296	struct inode *dp;
297	struct inode *ip;
298	int isrmdir;
299{
300
301	panic("softdep_setup_remove called");
302}
303
304void
305softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir)
306	struct buf *bp;
307	struct inode *dp;
308	struct inode *ip;
309	ino_t newinum;
310	int isrmdir;
311{
312
313	panic("softdep_setup_directory_change called");
314}
315
316void
317softdep_setup_blkfree(mp, bp, blkno, frags, wkhd)
318	struct mount *mp;
319	struct buf *bp;
320	ufs2_daddr_t blkno;
321	int frags;
322	struct workhead *wkhd;
323{
324
325	panic("%s called", __FUNCTION__);
326}
327
328void
329softdep_setup_inofree(mp, bp, ino, wkhd)
330	struct mount *mp;
331	struct buf *bp;
332	ino_t ino;
333	struct workhead *wkhd;
334{
335
336	panic("%s called", __FUNCTION__);
337}
338
339void
340softdep_setup_unlink(dp, ip)
341	struct inode *dp;
342	struct inode *ip;
343{
344
345	panic("%s called", __FUNCTION__);
346}
347
348void
349softdep_setup_link(dp, ip)
350	struct inode *dp;
351	struct inode *ip;
352{
353
354	panic("%s called", __FUNCTION__);
355}
356
357void
358softdep_revert_link(dp, ip)
359	struct inode *dp;
360	struct inode *ip;
361{
362
363	panic("%s called", __FUNCTION__);
364}
365
366void
367softdep_setup_rmdir(dp, ip)
368	struct inode *dp;
369	struct inode *ip;
370{
371
372	panic("%s called", __FUNCTION__);
373}
374
375void
376softdep_revert_rmdir(dp, ip)
377	struct inode *dp;
378	struct inode *ip;
379{
380
381	panic("%s called", __FUNCTION__);
382}
383
384void
385softdep_setup_create(dp, ip)
386	struct inode *dp;
387	struct inode *ip;
388{
389
390	panic("%s called", __FUNCTION__);
391}
392
393void
394softdep_revert_create(dp, ip)
395	struct inode *dp;
396	struct inode *ip;
397{
398
399	panic("%s called", __FUNCTION__);
400}
401
402void
403softdep_setup_mkdir(dp, ip)
404	struct inode *dp;
405	struct inode *ip;
406{
407
408	panic("%s called", __FUNCTION__);
409}
410
411void
412softdep_revert_mkdir(dp, ip)
413	struct inode *dp;
414	struct inode *ip;
415{
416
417	panic("%s called", __FUNCTION__);
418}
419
420void
421softdep_setup_dotdot_link(dp, ip)
422	struct inode *dp;
423	struct inode *ip;
424{
425
426	panic("%s called", __FUNCTION__);
427}
428
429int
430softdep_prealloc(vp, waitok)
431	struct vnode *vp;
432	int waitok;
433{
434
435	panic("%s called", __FUNCTION__);
436
437	return (0);
438}
439
440int
441softdep_journal_lookup(mp, vpp)
442	struct mount *mp;
443	struct vnode **vpp;
444{
445
446	return (ENOENT);
447}
448
449void
450softdep_change_linkcnt(ip)
451	struct inode *ip;
452{
453
454	panic("softdep_change_linkcnt called");
455}
456
457void
458softdep_load_inodeblock(ip)
459	struct inode *ip;
460{
461
462	panic("softdep_load_inodeblock called");
463}
464
465void
466softdep_update_inodeblock(ip, bp, waitfor)
467	struct inode *ip;
468	struct buf *bp;
469	int waitfor;
470{
471
472	panic("softdep_update_inodeblock called");
473}
474
475int
476softdep_fsync(vp)
477	struct vnode *vp;	/* the "in_core" copy of the inode */
478{
479
480	return (0);
481}
482
483void
484softdep_fsync_mountdev(vp)
485	struct vnode *vp;
486{
487
488	return;
489}
490
491int
492softdep_flushworklist(oldmnt, countp, td)
493	struct mount *oldmnt;
494	int *countp;
495	struct thread *td;
496{
497
498	*countp = 0;
499	return (0);
500}
501
502int
503softdep_sync_metadata(struct vnode *vp)
504{
505
506	return (0);
507}
508
509int
510softdep_sync_buf(struct vnode *vp, struct buf *bp, int waitfor)
511{
512
513	return (0);
514}
515
516int
517softdep_slowdown(vp)
518	struct vnode *vp;
519{
520
521	panic("softdep_slowdown called");
522}
523
524void
525softdep_releasefile(ip)
526	struct inode *ip;	/* inode with the zero effective link count */
527{
528
529	panic("softdep_releasefile called");
530}
531
532int
533softdep_request_cleanup(fs, vp, cred, resource)
534	struct fs *fs;
535	struct vnode *vp;
536	struct ucred *cred;
537	int resource;
538{
539
540	return (0);
541}
542
543int
544softdep_check_suspend(struct mount *mp,
545		      struct vnode *devvp,
546		      int softdep_deps,
547		      int softdep_accdeps,
548		      int secondary_writes,
549		      int secondary_accwrites)
550{
551	struct bufobj *bo;
552	int error;
553
554	(void) softdep_deps,
555	(void) softdep_accdeps;
556
557	bo = &devvp->v_bufobj;
558	ASSERT_BO_WLOCKED(bo);
559
560	MNT_ILOCK(mp);
561	while (mp->mnt_secondary_writes != 0) {
562		BO_UNLOCK(bo);
563		msleep(&mp->mnt_secondary_writes, MNT_MTX(mp),
564		    (PUSER - 1) | PDROP, "secwr", 0);
565		BO_LOCK(bo);
566		MNT_ILOCK(mp);
567	}
568
569	/*
570	 * Reasons for needing more work before suspend:
571	 * - Dirty buffers on devvp.
572	 * - Secondary writes occurred after start of vnode sync loop
573	 */
574	error = 0;
575	if (bo->bo_numoutput > 0 ||
576	    bo->bo_dirty.bv_cnt > 0 ||
577	    secondary_writes != 0 ||
578	    mp->mnt_secondary_writes != 0 ||
579	    secondary_accwrites != mp->mnt_secondary_accwrites)
580		error = EAGAIN;
581	BO_UNLOCK(bo);
582	return (error);
583}
584
585void
586softdep_get_depcounts(struct mount *mp,
587		      int *softdepactivep,
588		      int *softdepactiveaccp)
589{
590	(void) mp;
591	*softdepactivep = 0;
592	*softdepactiveaccp = 0;
593}
594
595void
596softdep_buf_append(bp, wkhd)
597	struct buf *bp;
598	struct workhead *wkhd;
599{
600
601	panic("softdep_buf_appendwork called");
602}
603
604void
605softdep_inode_append(ip, cred, wkhd)
606	struct inode *ip;
607	struct ucred *cred;
608	struct workhead *wkhd;
609{
610
611	panic("softdep_inode_appendwork called");
612}
613
614void
615softdep_freework(wkhd)
616	struct workhead *wkhd;
617{
618
619	panic("softdep_freework called");
620}
621
622#else
623
624FEATURE(softupdates, "FFS soft-updates support");
625
626/*
627 * These definitions need to be adapted to the system to which
628 * this file is being ported.
629 */
630
631#define M_SOFTDEP_FLAGS	(M_WAITOK)
632
633#define	D_PAGEDEP	0
634#define	D_INODEDEP	1
635#define	D_BMSAFEMAP	2
636#define	D_NEWBLK	3
637#define	D_ALLOCDIRECT	4
638#define	D_INDIRDEP	5
639#define	D_ALLOCINDIR	6
640#define	D_FREEFRAG	7
641#define	D_FREEBLKS	8
642#define	D_FREEFILE	9
643#define	D_DIRADD	10
644#define	D_MKDIR		11
645#define	D_DIRREM	12
646#define	D_NEWDIRBLK	13
647#define	D_FREEWORK	14
648#define	D_FREEDEP	15
649#define	D_JADDREF	16
650#define	D_JREMREF	17
651#define	D_JMVREF	18
652#define	D_JNEWBLK	19
653#define	D_JFREEBLK	20
654#define	D_JFREEFRAG	21
655#define	D_JSEG		22
656#define	D_JSEGDEP	23
657#define	D_SBDEP		24
658#define	D_JTRUNC	25
659#define	D_JFSYNC	26
660#define	D_SENTINEL	27
661#define	D_LAST		D_SENTINEL
662
663unsigned long dep_current[D_LAST + 1];
664unsigned long dep_highuse[D_LAST + 1];
665unsigned long dep_total[D_LAST + 1];
666unsigned long dep_write[D_LAST + 1];
667
668static SYSCTL_NODE(_debug, OID_AUTO, softdep, CTLFLAG_RW, 0,
669    "soft updates stats");
670static SYSCTL_NODE(_debug_softdep, OID_AUTO, total, CTLFLAG_RW, 0,
671    "total dependencies allocated");
672static SYSCTL_NODE(_debug_softdep, OID_AUTO, highuse, CTLFLAG_RW, 0,
673    "high use dependencies allocated");
674static SYSCTL_NODE(_debug_softdep, OID_AUTO, current, CTLFLAG_RW, 0,
675    "current dependencies allocated");
676static SYSCTL_NODE(_debug_softdep, OID_AUTO, write, CTLFLAG_RW, 0,
677    "current dependencies written");
678
679#define	SOFTDEP_TYPE(type, str, long)					\
680    static MALLOC_DEFINE(M_ ## type, #str, long);			\
681    SYSCTL_ULONG(_debug_softdep_total, OID_AUTO, str, CTLFLAG_RD,	\
682	&dep_total[D_ ## type], 0, "");					\
683    SYSCTL_ULONG(_debug_softdep_current, OID_AUTO, str, CTLFLAG_RD, 	\
684	&dep_current[D_ ## type], 0, "");				\
685    SYSCTL_ULONG(_debug_softdep_highuse, OID_AUTO, str, CTLFLAG_RD, 	\
686	&dep_highuse[D_ ## type], 0, "");				\
687    SYSCTL_ULONG(_debug_softdep_write, OID_AUTO, str, CTLFLAG_RD, 	\
688	&dep_write[D_ ## type], 0, "");
689
690SOFTDEP_TYPE(PAGEDEP, pagedep, "File page dependencies");
691SOFTDEP_TYPE(INODEDEP, inodedep, "Inode dependencies");
692SOFTDEP_TYPE(BMSAFEMAP, bmsafemap,
693    "Block or frag allocated from cyl group map");
694SOFTDEP_TYPE(NEWBLK, newblk, "New block or frag allocation dependency");
695SOFTDEP_TYPE(ALLOCDIRECT, allocdirect, "Block or frag dependency for an inode");
696SOFTDEP_TYPE(INDIRDEP, indirdep, "Indirect block dependencies");
697SOFTDEP_TYPE(ALLOCINDIR, allocindir, "Block dependency for an indirect block");
698SOFTDEP_TYPE(FREEFRAG, freefrag, "Previously used frag for an inode");
699SOFTDEP_TYPE(FREEBLKS, freeblks, "Blocks freed from an inode");
700SOFTDEP_TYPE(FREEFILE, freefile, "Inode deallocated");
701SOFTDEP_TYPE(DIRADD, diradd, "New directory entry");
702SOFTDEP_TYPE(MKDIR, mkdir, "New directory");
703SOFTDEP_TYPE(DIRREM, dirrem, "Directory entry deleted");
704SOFTDEP_TYPE(NEWDIRBLK, newdirblk, "Unclaimed new directory block");
705SOFTDEP_TYPE(FREEWORK, freework, "free an inode block");
706SOFTDEP_TYPE(FREEDEP, freedep, "track a block free");
707SOFTDEP_TYPE(JADDREF, jaddref, "Journal inode ref add");
708SOFTDEP_TYPE(JREMREF, jremref, "Journal inode ref remove");
709SOFTDEP_TYPE(JMVREF, jmvref, "Journal inode ref move");
710SOFTDEP_TYPE(JNEWBLK, jnewblk, "Journal new block");
711SOFTDEP_TYPE(JFREEBLK, jfreeblk, "Journal free block");
712SOFTDEP_TYPE(JFREEFRAG, jfreefrag, "Journal free frag");
713SOFTDEP_TYPE(JSEG, jseg, "Journal segment");
714SOFTDEP_TYPE(JSEGDEP, jsegdep, "Journal segment complete");
715SOFTDEP_TYPE(SBDEP, sbdep, "Superblock write dependency");
716SOFTDEP_TYPE(JTRUNC, jtrunc, "Journal inode truncation");
717SOFTDEP_TYPE(JFSYNC, jfsync, "Journal fsync complete");
718
719static MALLOC_DEFINE(M_SENTINEL, "sentinel", "Worklist sentinel");
720
721static MALLOC_DEFINE(M_SAVEDINO, "savedino", "Saved inodes");
722static MALLOC_DEFINE(M_JBLOCKS, "jblocks", "Journal block locations");
723
724/*
725 * translate from workitem type to memory type
726 * MUST match the defines above, such that memtype[D_XXX] == M_XXX
727 */
728static struct malloc_type *memtype[] = {
729	M_PAGEDEP,
730	M_INODEDEP,
731	M_BMSAFEMAP,
732	M_NEWBLK,
733	M_ALLOCDIRECT,
734	M_INDIRDEP,
735	M_ALLOCINDIR,
736	M_FREEFRAG,
737	M_FREEBLKS,
738	M_FREEFILE,
739	M_DIRADD,
740	M_MKDIR,
741	M_DIRREM,
742	M_NEWDIRBLK,
743	M_FREEWORK,
744	M_FREEDEP,
745	M_JADDREF,
746	M_JREMREF,
747	M_JMVREF,
748	M_JNEWBLK,
749	M_JFREEBLK,
750	M_JFREEFRAG,
751	M_JSEG,
752	M_JSEGDEP,
753	M_SBDEP,
754	M_JTRUNC,
755	M_JFSYNC,
756	M_SENTINEL
757};
758
759static LIST_HEAD(mkdirlist, mkdir) mkdirlisthd;
760
761#define DtoM(type) (memtype[type])
762
763/*
764 * Names of malloc types.
765 */
766#define TYPENAME(type)  \
767	((unsigned)(type) <= D_LAST ? memtype[type]->ks_shortdesc : "???")
768/*
769 * End system adaptation definitions.
770 */
771
772#define	DOTDOT_OFFSET	offsetof(struct dirtemplate, dotdot_ino)
773#define	DOT_OFFSET	offsetof(struct dirtemplate, dot_ino)
774
775/*
776 * Forward declarations.
777 */
778struct inodedep_hashhead;
779struct newblk_hashhead;
780struct pagedep_hashhead;
781struct bmsafemap_hashhead;
782
783/*
784 * Private journaling structures.
785 */
786struct jblocks {
787	struct jseglst	jb_segs;	/* TAILQ of current segments. */
788	struct jseg	*jb_writeseg;	/* Next write to complete. */
789	struct jseg	*jb_oldestseg;	/* Oldest segment with valid entries. */
790	struct jextent	*jb_extent;	/* Extent array. */
791	uint64_t	jb_nextseq;	/* Next sequence number. */
792	uint64_t	jb_oldestwrseq;	/* Oldest written sequence number. */
793	uint8_t		jb_needseg;	/* Need a forced segment. */
794	uint8_t		jb_suspended;	/* Did journal suspend writes? */
795	int		jb_avail;	/* Available extents. */
796	int		jb_used;	/* Last used extent. */
797	int		jb_head;	/* Allocator head. */
798	int		jb_off;		/* Allocator extent offset. */
799	int		jb_blocks;	/* Total disk blocks covered. */
800	int		jb_free;	/* Total disk blocks free. */
801	int		jb_min;		/* Minimum free space. */
802	int		jb_low;		/* Low on space. */
803	int		jb_age;		/* Insertion time of oldest rec. */
804};
805
806struct jextent {
807	ufs2_daddr_t	je_daddr;	/* Disk block address. */
808	int		je_blocks;	/* Disk block count. */
809};
810
811/*
812 * Internal function prototypes.
813 */
814static	void softdep_error(char *, int);
815static	void drain_output(struct vnode *);
816static	struct buf *getdirtybuf(struct buf *, struct rwlock *, int);
817static	void clear_remove(void);
818static	void clear_inodedeps(void);
819static	void unlinked_inodedep(struct mount *, struct inodedep *);
820static	void clear_unlinked_inodedep(struct inodedep *);
821static	struct inodedep *first_unlinked_inodedep(struct ufsmount *);
822static	int flush_pagedep_deps(struct vnode *, struct mount *,
823	    struct diraddhd *);
824static	int free_pagedep(struct pagedep *);
825static	int flush_newblk_dep(struct vnode *, struct mount *, ufs_lbn_t);
826static	int flush_inodedep_deps(struct vnode *, struct mount *, ino_t);
827static	int flush_deplist(struct allocdirectlst *, int, int *);
828static	int sync_cgs(struct mount *, int);
829static	int handle_written_filepage(struct pagedep *, struct buf *);
830static	int handle_written_sbdep(struct sbdep *, struct buf *);
831static	void initiate_write_sbdep(struct sbdep *);
832static  void diradd_inode_written(struct diradd *, struct inodedep *);
833static	int handle_written_indirdep(struct indirdep *, struct buf *,
834	    struct buf**);
835static	int handle_written_inodeblock(struct inodedep *, struct buf *);
836static	int jnewblk_rollforward(struct jnewblk *, struct fs *, struct cg *,
837	    uint8_t *);
838static	int handle_written_bmsafemap(struct bmsafemap *, struct buf *);
839static	void handle_written_jaddref(struct jaddref *);
840static	void handle_written_jremref(struct jremref *);
841static	void handle_written_jseg(struct jseg *, struct buf *);
842static	void handle_written_jnewblk(struct jnewblk *);
843static	void handle_written_jblkdep(struct jblkdep *);
844static	void handle_written_jfreefrag(struct jfreefrag *);
845static	void complete_jseg(struct jseg *);
846static	void complete_jsegs(struct jseg *);
847static	void jseg_write(struct ufsmount *ump, struct jseg *, uint8_t *);
848static	void jaddref_write(struct jaddref *, struct jseg *, uint8_t *);
849static	void jremref_write(struct jremref *, struct jseg *, uint8_t *);
850static	void jmvref_write(struct jmvref *, struct jseg *, uint8_t *);
851static	void jtrunc_write(struct jtrunc *, struct jseg *, uint8_t *);
852static	void jfsync_write(struct jfsync *, struct jseg *, uint8_t *data);
853static	void jnewblk_write(struct jnewblk *, struct jseg *, uint8_t *);
854static	void jfreeblk_write(struct jfreeblk *, struct jseg *, uint8_t *);
855static	void jfreefrag_write(struct jfreefrag *, struct jseg *, uint8_t *);
856static	inline void inoref_write(struct inoref *, struct jseg *,
857	    struct jrefrec *);
858static	void handle_allocdirect_partdone(struct allocdirect *,
859	    struct workhead *);
860static	struct jnewblk *cancel_newblk(struct newblk *, struct worklist *,
861	    struct workhead *);
862static	void indirdep_complete(struct indirdep *);
863static	int indirblk_lookup(struct mount *, ufs2_daddr_t);
864static	void indirblk_insert(struct freework *);
865static	void indirblk_remove(struct freework *);
866static	void handle_allocindir_partdone(struct allocindir *);
867static	void initiate_write_filepage(struct pagedep *, struct buf *);
868static	void initiate_write_indirdep(struct indirdep*, struct buf *);
869static	void handle_written_mkdir(struct mkdir *, int);
870static	int jnewblk_rollback(struct jnewblk *, struct fs *, struct cg *,
871	    uint8_t *);
872static	void initiate_write_bmsafemap(struct bmsafemap *, struct buf *);
873static	void initiate_write_inodeblock_ufs1(struct inodedep *, struct buf *);
874static	void initiate_write_inodeblock_ufs2(struct inodedep *, struct buf *);
875static	void handle_workitem_freefile(struct freefile *);
876static	int handle_workitem_remove(struct dirrem *, int);
877static	struct dirrem *newdirrem(struct buf *, struct inode *,
878	    struct inode *, int, struct dirrem **);
879static	struct indirdep *indirdep_lookup(struct mount *, struct inode *,
880	    struct buf *);
881static	void cancel_indirdep(struct indirdep *, struct buf *,
882	    struct freeblks *);
883static	void free_indirdep(struct indirdep *);
884static	void free_diradd(struct diradd *, struct workhead *);
885static	void merge_diradd(struct inodedep *, struct diradd *);
886static	void complete_diradd(struct diradd *);
887static	struct diradd *diradd_lookup(struct pagedep *, int);
888static	struct jremref *cancel_diradd_dotdot(struct inode *, struct dirrem *,
889	    struct jremref *);
890static	struct jremref *cancel_mkdir_dotdot(struct inode *, struct dirrem *,
891	    struct jremref *);
892static	void cancel_diradd(struct diradd *, struct dirrem *, struct jremref *,
893	    struct jremref *, struct jremref *);
894static	void dirrem_journal(struct dirrem *, struct jremref *, struct jremref *,
895	    struct jremref *);
896static	void cancel_allocindir(struct allocindir *, struct buf *bp,
897	    struct freeblks *, int);
898static	int setup_trunc_indir(struct freeblks *, struct inode *,
899	    ufs_lbn_t, ufs_lbn_t, ufs2_daddr_t);
900static	void complete_trunc_indir(struct freework *);
901static	void trunc_indirdep(struct indirdep *, struct freeblks *, struct buf *,
902	    int);
903static	void complete_mkdir(struct mkdir *);
904static	void free_newdirblk(struct newdirblk *);
905static	void free_jremref(struct jremref *);
906static	void free_jaddref(struct jaddref *);
907static	void free_jsegdep(struct jsegdep *);
908static	void free_jsegs(struct jblocks *);
909static	void rele_jseg(struct jseg *);
910static	void free_jseg(struct jseg *, struct jblocks *);
911static	void free_jnewblk(struct jnewblk *);
912static	void free_jblkdep(struct jblkdep *);
913static	void free_jfreefrag(struct jfreefrag *);
914static	void free_freedep(struct freedep *);
915static	void journal_jremref(struct dirrem *, struct jremref *,
916	    struct inodedep *);
917static	void cancel_jnewblk(struct jnewblk *, struct workhead *);
918static	int cancel_jaddref(struct jaddref *, struct inodedep *,
919	    struct workhead *);
920static	void cancel_jfreefrag(struct jfreefrag *);
921static	inline void setup_freedirect(struct freeblks *, struct inode *,
922	    int, int);
923static	inline void setup_freeext(struct freeblks *, struct inode *, int, int);
924static	inline void setup_freeindir(struct freeblks *, struct inode *, int,
925	    ufs_lbn_t, int);
926static	inline struct freeblks *newfreeblks(struct mount *, struct inode *);
927static	void freeblks_free(struct ufsmount *, struct freeblks *, int);
928static	void indir_trunc(struct freework *, ufs2_daddr_t, ufs_lbn_t);
929ufs2_daddr_t blkcount(struct fs *, ufs2_daddr_t, off_t);
930static	int trunc_check_buf(struct buf *, int *, ufs_lbn_t, int, int);
931static	void trunc_dependencies(struct inode *, struct freeblks *, ufs_lbn_t,
932	    int, int);
933static	void trunc_pages(struct inode *, off_t, ufs2_daddr_t, int);
934static 	int cancel_pagedep(struct pagedep *, struct freeblks *, int);
935static	int deallocate_dependencies(struct buf *, struct freeblks *, int);
936static	void newblk_freefrag(struct newblk*);
937static	void free_newblk(struct newblk *);
938static	void cancel_allocdirect(struct allocdirectlst *,
939	    struct allocdirect *, struct freeblks *);
940static	int check_inode_unwritten(struct inodedep *);
941static	int free_inodedep(struct inodedep *);
942static	void freework_freeblock(struct freework *);
943static	void freework_enqueue(struct freework *);
944static	int handle_workitem_freeblocks(struct freeblks *, int);
945static	int handle_complete_freeblocks(struct freeblks *, int);
946static	void handle_workitem_indirblk(struct freework *);
947static	void handle_written_freework(struct freework *);
948static	void merge_inode_lists(struct allocdirectlst *,struct allocdirectlst *);
949static	struct worklist *jnewblk_merge(struct worklist *, struct worklist *,
950	    struct workhead *);
951static	struct freefrag *setup_allocindir_phase2(struct buf *, struct inode *,
952	    struct inodedep *, struct allocindir *, ufs_lbn_t);
953static	struct allocindir *newallocindir(struct inode *, int, ufs2_daddr_t,
954	    ufs2_daddr_t, ufs_lbn_t);
955static	void handle_workitem_freefrag(struct freefrag *);
956static	struct freefrag *newfreefrag(struct inode *, ufs2_daddr_t, long,
957	    ufs_lbn_t);
958static	void allocdirect_merge(struct allocdirectlst *,
959	    struct allocdirect *, struct allocdirect *);
960static	struct freefrag *allocindir_merge(struct allocindir *,
961	    struct allocindir *);
962static	int bmsafemap_find(struct bmsafemap_hashhead *, struct mount *, int,
963	    struct bmsafemap **);
964static	struct bmsafemap *bmsafemap_lookup(struct mount *, struct buf *,
965	    int cg, struct bmsafemap *);
966static	int newblk_find(struct newblk_hashhead *, struct mount *, ufs2_daddr_t,
967	    int, struct newblk **);
968static	int newblk_lookup(struct mount *, ufs2_daddr_t, int, struct newblk **);
969static	int inodedep_find(struct inodedep_hashhead *, struct fs *, ino_t,
970	    struct inodedep **);
971static	int inodedep_lookup(struct mount *, ino_t, int, struct inodedep **);
972static	int pagedep_lookup(struct mount *, struct buf *bp, ino_t, ufs_lbn_t,
973	    int, struct pagedep **);
974static	int pagedep_find(struct pagedep_hashhead *, ino_t, ufs_lbn_t,
975	    struct mount *mp, int, struct pagedep **);
976static	void pause_timer(void *);
977static	int request_cleanup(struct mount *, int);
978static	int process_worklist_item(struct mount *, int, int);
979static	void process_removes(struct vnode *);
980static	void process_truncates(struct vnode *);
981static	void jwork_move(struct workhead *, struct workhead *);
982static	void jwork_insert(struct workhead *, struct jsegdep *);
983static	void add_to_worklist(struct worklist *, int);
984static	void wake_worklist(struct worklist *);
985static	void wait_worklist(struct worklist *, char *);
986static	void remove_from_worklist(struct worklist *);
987static	void softdep_flush(void);
988static	void softdep_flushjournal(struct mount *);
989static	int softdep_speedup(void);
990static	void worklist_speedup(void);
991static	int journal_mount(struct mount *, struct fs *, struct ucred *);
992static	void journal_unmount(struct mount *);
993static	int journal_space(struct ufsmount *, int);
994static	void journal_suspend(struct ufsmount *);
995static	int journal_unsuspend(struct ufsmount *ump);
996static	void softdep_prelink(struct vnode *, struct vnode *);
997static	void add_to_journal(struct worklist *);
998static	void remove_from_journal(struct worklist *);
999static	void softdep_process_journal(struct mount *, struct worklist *, int);
1000static	struct jremref *newjremref(struct dirrem *, struct inode *,
1001	    struct inode *ip, off_t, nlink_t);
1002static	struct jaddref *newjaddref(struct inode *, ino_t, off_t, int16_t,
1003	    uint16_t);
1004static	inline void newinoref(struct inoref *, ino_t, ino_t, off_t, nlink_t,
1005	    uint16_t);
1006static	inline struct jsegdep *inoref_jseg(struct inoref *);
1007static	struct jmvref *newjmvref(struct inode *, ino_t, off_t, off_t);
1008static	struct jfreeblk *newjfreeblk(struct freeblks *, ufs_lbn_t,
1009	    ufs2_daddr_t, int);
1010static	struct jtrunc *newjtrunc(struct freeblks *, off_t, int);
1011static	void move_newblock_dep(struct jaddref *, struct inodedep *);
1012static	void cancel_jfreeblk(struct freeblks *, ufs2_daddr_t);
1013static	struct jfreefrag *newjfreefrag(struct freefrag *, struct inode *,
1014	    ufs2_daddr_t, long, ufs_lbn_t);
1015static	struct freework *newfreework(struct ufsmount *, struct freeblks *,
1016	    struct freework *, ufs_lbn_t, ufs2_daddr_t, int, int, int);
1017static	int jwait(struct worklist *, int);
1018static	struct inodedep *inodedep_lookup_ip(struct inode *);
1019static	int bmsafemap_backgroundwrite(struct bmsafemap *, struct buf *);
1020static	struct freefile *handle_bufwait(struct inodedep *, struct workhead *);
1021static	void handle_jwork(struct workhead *);
1022static	struct mkdir *setup_newdir(struct diradd *, ino_t, ino_t, struct buf *,
1023	    struct mkdir **);
1024static	struct jblocks *jblocks_create(void);
1025static	ufs2_daddr_t jblocks_alloc(struct jblocks *, int, int *);
1026static	void jblocks_free(struct jblocks *, struct mount *, int);
1027static	void jblocks_destroy(struct jblocks *);
1028static	void jblocks_add(struct jblocks *, ufs2_daddr_t, int);
1029
1030/*
1031 * Exported softdep operations.
1032 */
1033static	void softdep_disk_io_initiation(struct buf *);
1034static	void softdep_disk_write_complete(struct buf *);
1035static	void softdep_deallocate_dependencies(struct buf *);
1036static	int softdep_count_dependencies(struct buf *bp, int);
1037
1038static struct rwlock lk;
1039RW_SYSINIT(softdep_lock, &lk, "Softdep Lock");
1040
1041#define TRY_ACQUIRE_LOCK(lk)		rw_try_wlock(lk)
1042#define ACQUIRE_LOCK(lk)		rw_wlock(lk)
1043#define FREE_LOCK(lk)			rw_wunlock(lk)
1044
1045#define	BUF_AREC(bp)			lockallowrecurse(&(bp)->b_lock)
1046#define	BUF_NOREC(bp)			lockdisablerecurse(&(bp)->b_lock)
1047
1048/*
1049 * Worklist queue management.
1050 * These routines require that the lock be held.
1051 */
1052#ifndef /* NOT */ DEBUG
1053#define WORKLIST_INSERT(head, item) do {	\
1054	(item)->wk_state |= ONWORKLIST;		\
1055	LIST_INSERT_HEAD(head, item, wk_list);	\
1056} while (0)
1057#define WORKLIST_REMOVE(item) do {		\
1058	(item)->wk_state &= ~ONWORKLIST;	\
1059	LIST_REMOVE(item, wk_list);		\
1060} while (0)
1061#define WORKLIST_INSERT_UNLOCKED	WORKLIST_INSERT
1062#define WORKLIST_REMOVE_UNLOCKED	WORKLIST_REMOVE
1063
1064#else /* DEBUG */
1065static	void worklist_insert(struct workhead *, struct worklist *, int);
1066static	void worklist_remove(struct worklist *, int);
1067
1068#define WORKLIST_INSERT(head, item) worklist_insert(head, item, 1)
1069#define WORKLIST_INSERT_UNLOCKED(head, item) worklist_insert(head, item, 0)
1070#define WORKLIST_REMOVE(item) worklist_remove(item, 1)
1071#define WORKLIST_REMOVE_UNLOCKED(item) worklist_remove(item, 0)
1072
1073static void
1074worklist_insert(head, item, locked)
1075	struct workhead *head;
1076	struct worklist *item;
1077	int locked;
1078{
1079
1080	if (locked)
1081		rw_assert(&lk, RA_WLOCKED);
1082	if (item->wk_state & ONWORKLIST)
1083		panic("worklist_insert: %p %s(0x%X) already on list",
1084		    item, TYPENAME(item->wk_type), item->wk_state);
1085	item->wk_state |= ONWORKLIST;
1086	LIST_INSERT_HEAD(head, item, wk_list);
1087}
1088
1089static void
1090worklist_remove(item, locked)
1091	struct worklist *item;
1092	int locked;
1093{
1094
1095	if (locked)
1096		rw_assert(&lk, RA_WLOCKED);
1097	if ((item->wk_state & ONWORKLIST) == 0)
1098		panic("worklist_remove: %p %s(0x%X) not on list",
1099		    item, TYPENAME(item->wk_type), item->wk_state);
1100	item->wk_state &= ~ONWORKLIST;
1101	LIST_REMOVE(item, wk_list);
1102}
1103#endif /* DEBUG */
1104
1105/*
1106 * Merge two jsegdeps keeping only the oldest one as newer references
1107 * can't be discarded until after older references.
1108 */
1109static inline struct jsegdep *
1110jsegdep_merge(struct jsegdep *one, struct jsegdep *two)
1111{
1112	struct jsegdep *swp;
1113
1114	if (two == NULL)
1115		return (one);
1116
1117	if (one->jd_seg->js_seq > two->jd_seg->js_seq) {
1118		swp = one;
1119		one = two;
1120		two = swp;
1121	}
1122	WORKLIST_REMOVE(&two->jd_list);
1123	free_jsegdep(two);
1124
1125	return (one);
1126}
1127
1128/*
1129 * If two freedeps are compatible free one to reduce list size.
1130 */
1131static inline struct freedep *
1132freedep_merge(struct freedep *one, struct freedep *two)
1133{
1134	if (two == NULL)
1135		return (one);
1136
1137	if (one->fd_freework == two->fd_freework) {
1138		WORKLIST_REMOVE(&two->fd_list);
1139		free_freedep(two);
1140	}
1141	return (one);
1142}
1143
1144/*
1145 * Move journal work from one list to another.  Duplicate freedeps and
1146 * jsegdeps are coalesced to keep the lists as small as possible.
1147 */
1148static void
1149jwork_move(dst, src)
1150	struct workhead *dst;
1151	struct workhead *src;
1152{
1153	struct freedep *freedep;
1154	struct jsegdep *jsegdep;
1155	struct worklist *wkn;
1156	struct worklist *wk;
1157
1158	KASSERT(dst != src,
1159	    ("jwork_move: dst == src"));
1160	freedep = NULL;
1161	jsegdep = NULL;
1162	LIST_FOREACH_SAFE(wk, dst, wk_list, wkn) {
1163		if (wk->wk_type == D_JSEGDEP)
1164			jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep);
1165		if (wk->wk_type == D_FREEDEP)
1166			freedep = freedep_merge(WK_FREEDEP(wk), freedep);
1167	}
1168
1169	rw_assert(&lk, RA_WLOCKED);
1170	while ((wk = LIST_FIRST(src)) != NULL) {
1171		WORKLIST_REMOVE(wk);
1172		WORKLIST_INSERT(dst, wk);
1173		if (wk->wk_type == D_JSEGDEP) {
1174			jsegdep = jsegdep_merge(WK_JSEGDEP(wk), jsegdep);
1175			continue;
1176		}
1177		if (wk->wk_type == D_FREEDEP)
1178			freedep = freedep_merge(WK_FREEDEP(wk), freedep);
1179	}
1180}
1181
1182static void
1183jwork_insert(dst, jsegdep)
1184	struct workhead *dst;
1185	struct jsegdep *jsegdep;
1186{
1187	struct jsegdep *jsegdepn;
1188	struct worklist *wk;
1189
1190	LIST_FOREACH(wk, dst, wk_list)
1191		if (wk->wk_type == D_JSEGDEP)
1192			break;
1193	if (wk == NULL) {
1194		WORKLIST_INSERT(dst, &jsegdep->jd_list);
1195		return;
1196	}
1197	jsegdepn = WK_JSEGDEP(wk);
1198	if (jsegdep->jd_seg->js_seq < jsegdepn->jd_seg->js_seq) {
1199		WORKLIST_REMOVE(wk);
1200		free_jsegdep(jsegdepn);
1201		WORKLIST_INSERT(dst, &jsegdep->jd_list);
1202	} else
1203		free_jsegdep(jsegdep);
1204}
1205
1206/*
1207 * Routines for tracking and managing workitems.
1208 */
1209static	void workitem_free(struct worklist *, int);
1210static	void workitem_alloc(struct worklist *, int, struct mount *);
1211static	void workitem_reassign(struct worklist *, int);
1212
1213#define	WORKITEM_FREE(item, type) \
1214	workitem_free((struct worklist *)(item), (type))
1215#define	WORKITEM_REASSIGN(item, type) \
1216	workitem_reassign((struct worklist *)(item), (type))
1217
1218static void
1219workitem_free(item, type)
1220	struct worklist *item;
1221	int type;
1222{
1223	struct ufsmount *ump;
1224	rw_assert(&lk, RA_WLOCKED);
1225
1226#ifdef DEBUG
1227	if (item->wk_state & ONWORKLIST)
1228		panic("workitem_free: %s(0x%X) still on list",
1229		    TYPENAME(item->wk_type), item->wk_state);
1230	if (item->wk_type != type && type != D_NEWBLK)
1231		panic("workitem_free: type mismatch %s != %s",
1232		    TYPENAME(item->wk_type), TYPENAME(type));
1233#endif
1234	if (item->wk_state & IOWAITING)
1235		wakeup(item);
1236	ump = VFSTOUFS(item->wk_mp);
1237	KASSERT(ump->softdep_deps > 0,
1238	    ("workitem_free: %s: softdep_deps going negative",
1239	    ump->um_fs->fs_fsmnt));
1240	if (--ump->softdep_deps == 0 && ump->softdep_req)
1241		wakeup(&ump->softdep_deps);
1242	KASSERT(dep_current[item->wk_type] > 0,
1243	    ("workitem_free: %s: dep_current[%s] going negative",
1244	    ump->um_fs->fs_fsmnt, TYPENAME(item->wk_type)));
1245	dep_current[item->wk_type]--;
1246	free(item, DtoM(type));
1247}
1248
1249static void
1250workitem_alloc(item, type, mp)
1251	struct worklist *item;
1252	int type;
1253	struct mount *mp;
1254{
1255	struct ufsmount *ump;
1256
1257	item->wk_type = type;
1258	item->wk_mp = mp;
1259	item->wk_state = 0;
1260
1261	ump = VFSTOUFS(mp);
1262	ACQUIRE_LOCK(&lk);
1263	dep_current[type]++;
1264	if (dep_current[type] > dep_highuse[type])
1265		dep_highuse[type] = dep_current[type];
1266	dep_total[type]++;
1267	ump->softdep_deps++;
1268	ump->softdep_accdeps++;
1269	FREE_LOCK(&lk);
1270}
1271
1272static void
1273workitem_reassign(item, newtype)
1274	struct worklist *item;
1275	int newtype;
1276{
1277
1278	KASSERT(dep_current[item->wk_type] > 0,
1279	    ("workitem_reassign: %s: dep_current[%s] going negative",
1280	    VFSTOUFS(item->wk_mp)->um_fs->fs_fsmnt, TYPENAME(item->wk_type)));
1281	dep_current[item->wk_type]--;
1282	dep_current[newtype]++;
1283	if (dep_current[newtype] > dep_highuse[newtype])
1284		dep_highuse[newtype] = dep_current[newtype];
1285	dep_total[newtype]++;
1286	item->wk_type = newtype;
1287}
1288
1289/*
1290 * Workitem queue management
1291 */
1292static int max_softdeps;	/* maximum number of structs before slowdown */
1293static int maxindirdeps = 50;	/* max number of indirdeps before slowdown */
1294static int tickdelay = 2;	/* number of ticks to pause during slowdown */
1295static int proc_waiting;	/* tracks whether we have a timeout posted */
1296static int *stat_countp;	/* statistic to count in proc_waiting timeout */
1297static struct callout softdep_callout;
1298static int req_pending;
1299static int req_clear_inodedeps;	/* syncer process flush some inodedeps */
1300static int req_clear_remove;	/* syncer process flush some freeblks */
1301static int softdep_flushcache = 0; /* Should we do BIO_FLUSH? */
1302
1303/*
1304 * runtime statistics
1305 */
1306static int stat_worklist_push;	/* number of worklist cleanups */
1307static int stat_blk_limit_push;	/* number of times block limit neared */
1308static int stat_ino_limit_push;	/* number of times inode limit neared */
1309static int stat_blk_limit_hit;	/* number of times block slowdown imposed */
1310static int stat_ino_limit_hit;	/* number of times inode slowdown imposed */
1311static int stat_sync_limit_hit;	/* number of synchronous slowdowns imposed */
1312static int stat_indir_blk_ptrs;	/* bufs redirtied as indir ptrs not written */
1313static int stat_inode_bitmap;	/* bufs redirtied as inode bitmap not written */
1314static int stat_direct_blk_ptrs;/* bufs redirtied as direct ptrs not written */
1315static int stat_dir_entry;	/* bufs redirtied as dir entry cannot write */
1316static int stat_jaddref;	/* bufs redirtied as ino bitmap can not write */
1317static int stat_jnewblk;	/* bufs redirtied as blk bitmap can not write */
1318static int stat_journal_min;	/* Times hit journal min threshold */
1319static int stat_journal_low;	/* Times hit journal low threshold */
1320static int stat_journal_wait;	/* Times blocked in jwait(). */
1321static int stat_jwait_filepage;	/* Times blocked in jwait() for filepage. */
1322static int stat_jwait_freeblks;	/* Times blocked in jwait() for freeblks. */
1323static int stat_jwait_inode;	/* Times blocked in jwait() for inodes. */
1324static int stat_jwait_newblk;	/* Times blocked in jwait() for newblks. */
1325static int stat_cleanup_high_delay; /* Maximum cleanup delay (in ticks) */
1326static int stat_cleanup_blkrequests; /* Number of block cleanup requests */
1327static int stat_cleanup_inorequests; /* Number of inode cleanup requests */
1328static int stat_cleanup_retries; /* Number of cleanups that needed to flush */
1329static int stat_cleanup_failures; /* Number of cleanup requests that failed */
1330
1331SYSCTL_INT(_debug_softdep, OID_AUTO, max_softdeps, CTLFLAG_RW,
1332    &max_softdeps, 0, "");
1333SYSCTL_INT(_debug_softdep, OID_AUTO, tickdelay, CTLFLAG_RW,
1334    &tickdelay, 0, "");
1335SYSCTL_INT(_debug_softdep, OID_AUTO, maxindirdeps, CTLFLAG_RW,
1336    &maxindirdeps, 0, "");
1337SYSCTL_INT(_debug_softdep, OID_AUTO, worklist_push, CTLFLAG_RW,
1338    &stat_worklist_push, 0,"");
1339SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_push, CTLFLAG_RW,
1340    &stat_blk_limit_push, 0,"");
1341SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_push, CTLFLAG_RW,
1342    &stat_ino_limit_push, 0,"");
1343SYSCTL_INT(_debug_softdep, OID_AUTO, blk_limit_hit, CTLFLAG_RW,
1344    &stat_blk_limit_hit, 0, "");
1345SYSCTL_INT(_debug_softdep, OID_AUTO, ino_limit_hit, CTLFLAG_RW,
1346    &stat_ino_limit_hit, 0, "");
1347SYSCTL_INT(_debug_softdep, OID_AUTO, sync_limit_hit, CTLFLAG_RW,
1348    &stat_sync_limit_hit, 0, "");
1349SYSCTL_INT(_debug_softdep, OID_AUTO, indir_blk_ptrs, CTLFLAG_RW,
1350    &stat_indir_blk_ptrs, 0, "");
1351SYSCTL_INT(_debug_softdep, OID_AUTO, inode_bitmap, CTLFLAG_RW,
1352    &stat_inode_bitmap, 0, "");
1353SYSCTL_INT(_debug_softdep, OID_AUTO, direct_blk_ptrs, CTLFLAG_RW,
1354    &stat_direct_blk_ptrs, 0, "");
1355SYSCTL_INT(_debug_softdep, OID_AUTO, dir_entry, CTLFLAG_RW,
1356    &stat_dir_entry, 0, "");
1357SYSCTL_INT(_debug_softdep, OID_AUTO, jaddref_rollback, CTLFLAG_RW,
1358    &stat_jaddref, 0, "");
1359SYSCTL_INT(_debug_softdep, OID_AUTO, jnewblk_rollback, CTLFLAG_RW,
1360    &stat_jnewblk, 0, "");
1361SYSCTL_INT(_debug_softdep, OID_AUTO, journal_low, CTLFLAG_RW,
1362    &stat_journal_low, 0, "");
1363SYSCTL_INT(_debug_softdep, OID_AUTO, journal_min, CTLFLAG_RW,
1364    &stat_journal_min, 0, "");
1365SYSCTL_INT(_debug_softdep, OID_AUTO, journal_wait, CTLFLAG_RW,
1366    &stat_journal_wait, 0, "");
1367SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_filepage, CTLFLAG_RW,
1368    &stat_jwait_filepage, 0, "");
1369SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_freeblks, CTLFLAG_RW,
1370    &stat_jwait_freeblks, 0, "");
1371SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_inode, CTLFLAG_RW,
1372    &stat_jwait_inode, 0, "");
1373SYSCTL_INT(_debug_softdep, OID_AUTO, jwait_newblk, CTLFLAG_RW,
1374    &stat_jwait_newblk, 0, "");
1375SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_blkrequests, CTLFLAG_RW,
1376    &stat_cleanup_blkrequests, 0, "");
1377SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_inorequests, CTLFLAG_RW,
1378    &stat_cleanup_inorequests, 0, "");
1379SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_high_delay, CTLFLAG_RW,
1380    &stat_cleanup_high_delay, 0, "");
1381SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_retries, CTLFLAG_RW,
1382    &stat_cleanup_retries, 0, "");
1383SYSCTL_INT(_debug_softdep, OID_AUTO, cleanup_failures, CTLFLAG_RW,
1384    &stat_cleanup_failures, 0, "");
1385SYSCTL_INT(_debug_softdep, OID_AUTO, flushcache, CTLFLAG_RW,
1386    &softdep_flushcache, 0, "");
1387
1388SYSCTL_DECL(_vfs_ffs);
1389
1390LIST_HEAD(bmsafemap_hashhead, bmsafemap) *bmsafemap_hashtbl;
1391static u_long	bmsafemap_hash;	/* size of hash table - 1 */
1392
1393static int compute_summary_at_mount = 0;	/* Whether to recompute the summary at mount time */
1394SYSCTL_INT(_vfs_ffs, OID_AUTO, compute_summary_at_mount, CTLFLAG_RW,
1395	   &compute_summary_at_mount, 0, "Recompute summary at mount");
1396
1397static struct proc *softdepproc;
1398static struct kproc_desc softdep_kp = {
1399	"softdepflush",
1400	softdep_flush,
1401	&softdepproc
1402};
1403SYSINIT(sdproc, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start,
1404    &softdep_kp);
1405
1406static void
1407softdep_flush(void)
1408{
1409	struct mount *nmp;
1410	struct mount *mp;
1411	struct ufsmount *ump;
1412	struct thread *td;
1413	int remaining;
1414	int progress;
1415
1416	td = curthread;
1417	td->td_pflags |= TDP_NORUNNINGBUF;
1418
1419	for (;;) {
1420		kproc_suspend_check(softdepproc);
1421		ACQUIRE_LOCK(&lk);
1422		/*
1423		 * If requested, try removing inode or removal dependencies.
1424		 */
1425		if (req_clear_inodedeps) {
1426			clear_inodedeps();
1427			req_clear_inodedeps -= 1;
1428			wakeup_one(&proc_waiting);
1429		}
1430		if (req_clear_remove) {
1431			clear_remove();
1432			req_clear_remove -= 1;
1433			wakeup_one(&proc_waiting);
1434		}
1435		FREE_LOCK(&lk);
1436		remaining = progress = 0;
1437		mtx_lock(&mountlist_mtx);
1438		for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp)  {
1439			nmp = TAILQ_NEXT(mp, mnt_list);
1440			if (MOUNTEDSOFTDEP(mp) == 0)
1441				continue;
1442			if (vfs_busy(mp, MBF_NOWAIT | MBF_MNTLSTLOCK))
1443				continue;
1444			progress += softdep_process_worklist(mp, 0);
1445			ump = VFSTOUFS(mp);
1446			remaining += ump->softdep_on_worklist;
1447			mtx_lock(&mountlist_mtx);
1448			nmp = TAILQ_NEXT(mp, mnt_list);
1449			vfs_unbusy(mp);
1450		}
1451		mtx_unlock(&mountlist_mtx);
1452		if (remaining && progress)
1453			continue;
1454		ACQUIRE_LOCK(&lk);
1455		if (!req_pending)
1456			msleep(&req_pending, &lk, PVM, "sdflush", hz);
1457		req_pending = 0;
1458		FREE_LOCK(&lk);
1459	}
1460}
1461
1462static void
1463worklist_speedup(void)
1464{
1465	rw_assert(&lk, RA_WLOCKED);
1466	if (req_pending == 0) {
1467		req_pending = 1;
1468		wakeup(&req_pending);
1469	}
1470}
1471
1472static int
1473softdep_speedup(void)
1474{
1475
1476	worklist_speedup();
1477	bd_speedup();
1478	return speedup_syncer();
1479}
1480
1481/*
1482 * Add an item to the end of the work queue.
1483 * This routine requires that the lock be held.
1484 * This is the only routine that adds items to the list.
1485 * The following routine is the only one that removes items
1486 * and does so in order from first to last.
1487 */
1488
1489#define	WK_HEAD		0x0001	/* Add to HEAD. */
1490#define	WK_NODELAY	0x0002	/* Process immediately. */
1491
1492static void
1493add_to_worklist(wk, flags)
1494	struct worklist *wk;
1495	int flags;
1496{
1497	struct ufsmount *ump;
1498
1499	rw_assert(&lk, RA_WLOCKED);
1500	ump = VFSTOUFS(wk->wk_mp);
1501	if (wk->wk_state & ONWORKLIST)
1502		panic("add_to_worklist: %s(0x%X) already on list",
1503		    TYPENAME(wk->wk_type), wk->wk_state);
1504	wk->wk_state |= ONWORKLIST;
1505	if (ump->softdep_on_worklist == 0) {
1506		LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list);
1507		ump->softdep_worklist_tail = wk;
1508	} else if (flags & WK_HEAD) {
1509		LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list);
1510	} else {
1511		LIST_INSERT_AFTER(ump->softdep_worklist_tail, wk, wk_list);
1512		ump->softdep_worklist_tail = wk;
1513	}
1514	ump->softdep_on_worklist += 1;
1515	if (flags & WK_NODELAY)
1516		worklist_speedup();
1517}
1518
1519/*
1520 * Remove the item to be processed. If we are removing the last
1521 * item on the list, we need to recalculate the tail pointer.
1522 */
1523static void
1524remove_from_worklist(wk)
1525	struct worklist *wk;
1526{
1527	struct ufsmount *ump;
1528
1529	ump = VFSTOUFS(wk->wk_mp);
1530	WORKLIST_REMOVE(wk);
1531	if (ump->softdep_worklist_tail == wk)
1532		ump->softdep_worklist_tail =
1533		    (struct worklist *)wk->wk_list.le_prev;
1534	ump->softdep_on_worklist -= 1;
1535}
1536
1537static void
1538wake_worklist(wk)
1539	struct worklist *wk;
1540{
1541	if (wk->wk_state & IOWAITING) {
1542		wk->wk_state &= ~IOWAITING;
1543		wakeup(wk);
1544	}
1545}
1546
1547static void
1548wait_worklist(wk, wmesg)
1549	struct worklist *wk;
1550	char *wmesg;
1551{
1552
1553	wk->wk_state |= IOWAITING;
1554	msleep(wk, &lk, PVM, wmesg, 0);
1555}
1556
1557/*
1558 * Process that runs once per second to handle items in the background queue.
1559 *
1560 * Note that we ensure that everything is done in the order in which they
1561 * appear in the queue. The code below depends on this property to ensure
1562 * that blocks of a file are freed before the inode itself is freed. This
1563 * ordering ensures that no new <vfsid, inum, lbn> triples will be generated
1564 * until all the old ones have been purged from the dependency lists.
1565 */
1566int
1567softdep_process_worklist(mp, full)
1568	struct mount *mp;
1569	int full;
1570{
1571	int cnt, matchcnt;
1572	struct ufsmount *ump;
1573	long starttime;
1574
1575	KASSERT(mp != NULL, ("softdep_process_worklist: NULL mp"));
1576	/*
1577	 * Record the process identifier of our caller so that we can give
1578	 * this process preferential treatment in request_cleanup below.
1579	 */
1580	matchcnt = 0;
1581	ump = VFSTOUFS(mp);
1582	ACQUIRE_LOCK(&lk);
1583	starttime = time_second;
1584	softdep_process_journal(mp, NULL, full?MNT_WAIT:0);
1585	while (ump->softdep_on_worklist > 0) {
1586		if ((cnt = process_worklist_item(mp, 10, LK_NOWAIT)) == 0)
1587			break;
1588		else
1589			matchcnt += cnt;
1590		/*
1591		 * If requested, try removing inode or removal dependencies.
1592		 */
1593		if (req_clear_inodedeps) {
1594			clear_inodedeps();
1595			req_clear_inodedeps -= 1;
1596			wakeup_one(&proc_waiting);
1597		}
1598		if (req_clear_remove) {
1599			clear_remove();
1600			req_clear_remove -= 1;
1601			wakeup_one(&proc_waiting);
1602		}
1603		/*
1604		 * We do not generally want to stop for buffer space, but if
1605		 * we are really being a buffer hog, we will stop and wait.
1606		 */
1607		if (should_yield()) {
1608			FREE_LOCK(&lk);
1609			kern_yield(PRI_USER);
1610			bwillwrite();
1611			ACQUIRE_LOCK(&lk);
1612		}
1613		/*
1614		 * Never allow processing to run for more than one
1615		 * second. Otherwise the other mountpoints may get
1616		 * excessively backlogged.
1617		 */
1618		if (!full && starttime != time_second)
1619			break;
1620	}
1621	if (full == 0)
1622		journal_unsuspend(ump);
1623	FREE_LOCK(&lk);
1624	return (matchcnt);
1625}
1626
1627/*
1628 * Process all removes associated with a vnode if we are running out of
1629 * journal space.  Any other process which attempts to flush these will
1630 * be unable as we have the vnodes locked.
1631 */
1632static void
1633process_removes(vp)
1634	struct vnode *vp;
1635{
1636	struct inodedep *inodedep;
1637	struct dirrem *dirrem;
1638	struct mount *mp;
1639	ino_t inum;
1640
1641	rw_assert(&lk, RA_WLOCKED);
1642
1643	mp = vp->v_mount;
1644	inum = VTOI(vp)->i_number;
1645	for (;;) {
1646top:
1647		if (inodedep_lookup(mp, inum, 0, &inodedep) == 0)
1648			return;
1649		LIST_FOREACH(dirrem, &inodedep->id_dirremhd, dm_inonext) {
1650			/*
1651			 * If another thread is trying to lock this vnode
1652			 * it will fail but we must wait for it to do so
1653			 * before we can proceed.
1654			 */
1655			if (dirrem->dm_state & INPROGRESS) {
1656				wait_worklist(&dirrem->dm_list, "pwrwait");
1657				goto top;
1658			}
1659			if ((dirrem->dm_state & (COMPLETE | ONWORKLIST)) ==
1660			    (COMPLETE | ONWORKLIST))
1661				break;
1662		}
1663		if (dirrem == NULL)
1664			return;
1665		remove_from_worklist(&dirrem->dm_list);
1666		FREE_LOCK(&lk);
1667		if (vn_start_secondary_write(NULL, &mp, V_NOWAIT))
1668			panic("process_removes: suspended filesystem");
1669		handle_workitem_remove(dirrem, 0);
1670		vn_finished_secondary_write(mp);
1671		ACQUIRE_LOCK(&lk);
1672	}
1673}
1674
1675/*
1676 * Process all truncations associated with a vnode if we are running out
1677 * of journal space.  This is called when the vnode lock is already held
1678 * and no other process can clear the truncation.  This function returns
1679 * a value greater than zero if it did any work.
1680 */
1681static void
1682process_truncates(vp)
1683	struct vnode *vp;
1684{
1685	struct inodedep *inodedep;
1686	struct freeblks *freeblks;
1687	struct mount *mp;
1688	ino_t inum;
1689	int cgwait;
1690
1691	rw_assert(&lk, RA_WLOCKED);
1692
1693	mp = vp->v_mount;
1694	inum = VTOI(vp)->i_number;
1695	for (;;) {
1696		if (inodedep_lookup(mp, inum, 0, &inodedep) == 0)
1697			return;
1698		cgwait = 0;
1699		TAILQ_FOREACH(freeblks, &inodedep->id_freeblklst, fb_next) {
1700			/* Journal entries not yet written.  */
1701			if (!LIST_EMPTY(&freeblks->fb_jblkdephd)) {
1702				jwait(&LIST_FIRST(
1703				    &freeblks->fb_jblkdephd)->jb_list,
1704				    MNT_WAIT);
1705				break;
1706			}
1707			/* Another thread is executing this item. */
1708			if (freeblks->fb_state & INPROGRESS) {
1709				wait_worklist(&freeblks->fb_list, "ptrwait");
1710				break;
1711			}
1712			/* Freeblks is waiting on a inode write. */
1713			if ((freeblks->fb_state & COMPLETE) == 0) {
1714				FREE_LOCK(&lk);
1715				ffs_update(vp, 1);
1716				ACQUIRE_LOCK(&lk);
1717				break;
1718			}
1719			if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST)) ==
1720			    (ALLCOMPLETE | ONWORKLIST)) {
1721				remove_from_worklist(&freeblks->fb_list);
1722				freeblks->fb_state |= INPROGRESS;
1723				FREE_LOCK(&lk);
1724				if (vn_start_secondary_write(NULL, &mp,
1725				    V_NOWAIT))
1726					panic("process_truncates: "
1727					    "suspended filesystem");
1728				handle_workitem_freeblocks(freeblks, 0);
1729				vn_finished_secondary_write(mp);
1730				ACQUIRE_LOCK(&lk);
1731				break;
1732			}
1733			if (freeblks->fb_cgwait)
1734				cgwait++;
1735		}
1736		if (cgwait) {
1737			FREE_LOCK(&lk);
1738			sync_cgs(mp, MNT_WAIT);
1739			ffs_sync_snap(mp, MNT_WAIT);
1740			ACQUIRE_LOCK(&lk);
1741			continue;
1742		}
1743		if (freeblks == NULL)
1744			break;
1745	}
1746	return;
1747}
1748
1749/*
1750 * Process one item on the worklist.
1751 */
1752static int
1753process_worklist_item(mp, target, flags)
1754	struct mount *mp;
1755	int target;
1756	int flags;
1757{
1758	struct worklist sentinel;
1759	struct worklist *wk;
1760	struct ufsmount *ump;
1761	int matchcnt;
1762	int error;
1763
1764	rw_assert(&lk, RA_WLOCKED);
1765	KASSERT(mp != NULL, ("process_worklist_item: NULL mp"));
1766	/*
1767	 * If we are being called because of a process doing a
1768	 * copy-on-write, then it is not safe to write as we may
1769	 * recurse into the copy-on-write routine.
1770	 */
1771	if (curthread->td_pflags & TDP_COWINPROGRESS)
1772		return (-1);
1773	PHOLD(curproc);	/* Don't let the stack go away. */
1774	ump = VFSTOUFS(mp);
1775	matchcnt = 0;
1776	sentinel.wk_mp = NULL;
1777	sentinel.wk_type = D_SENTINEL;
1778	LIST_INSERT_HEAD(&ump->softdep_workitem_pending, &sentinel, wk_list);
1779	for (wk = LIST_NEXT(&sentinel, wk_list); wk != NULL;
1780	    wk = LIST_NEXT(&sentinel, wk_list)) {
1781		if (wk->wk_type == D_SENTINEL) {
1782			LIST_REMOVE(&sentinel, wk_list);
1783			LIST_INSERT_AFTER(wk, &sentinel, wk_list);
1784			continue;
1785		}
1786		if (wk->wk_state & INPROGRESS)
1787			panic("process_worklist_item: %p already in progress.",
1788			    wk);
1789		wk->wk_state |= INPROGRESS;
1790		remove_from_worklist(wk);
1791		FREE_LOCK(&lk);
1792		if (vn_start_secondary_write(NULL, &mp, V_NOWAIT))
1793			panic("process_worklist_item: suspended filesystem");
1794		switch (wk->wk_type) {
1795		case D_DIRREM:
1796			/* removal of a directory entry */
1797			error = handle_workitem_remove(WK_DIRREM(wk), flags);
1798			break;
1799
1800		case D_FREEBLKS:
1801			/* releasing blocks and/or fragments from a file */
1802			error = handle_workitem_freeblocks(WK_FREEBLKS(wk),
1803			    flags);
1804			break;
1805
1806		case D_FREEFRAG:
1807			/* releasing a fragment when replaced as a file grows */
1808			handle_workitem_freefrag(WK_FREEFRAG(wk));
1809			error = 0;
1810			break;
1811
1812		case D_FREEFILE:
1813			/* releasing an inode when its link count drops to 0 */
1814			handle_workitem_freefile(WK_FREEFILE(wk));
1815			error = 0;
1816			break;
1817
1818		default:
1819			panic("%s_process_worklist: Unknown type %s",
1820			    "softdep", TYPENAME(wk->wk_type));
1821			/* NOTREACHED */
1822		}
1823		vn_finished_secondary_write(mp);
1824		ACQUIRE_LOCK(&lk);
1825		if (error == 0) {
1826			if (++matchcnt == target)
1827				break;
1828			continue;
1829		}
1830		/*
1831		 * We have to retry the worklist item later.  Wake up any
1832		 * waiters who may be able to complete it immediately and
1833		 * add the item back to the head so we don't try to execute
1834		 * it again.
1835		 */
1836		wk->wk_state &= ~INPROGRESS;
1837		wake_worklist(wk);
1838		add_to_worklist(wk, WK_HEAD);
1839	}
1840	LIST_REMOVE(&sentinel, wk_list);
1841	/* Sentinal could've become the tail from remove_from_worklist. */
1842	if (ump->softdep_worklist_tail == &sentinel)
1843		ump->softdep_worklist_tail =
1844		    (struct worklist *)sentinel.wk_list.le_prev;
1845	PRELE(curproc);
1846	return (matchcnt);
1847}
1848
1849/*
1850 * Move dependencies from one buffer to another.
1851 */
1852int
1853softdep_move_dependencies(oldbp, newbp)
1854	struct buf *oldbp;
1855	struct buf *newbp;
1856{
1857	struct worklist *wk, *wktail;
1858	int dirty;
1859
1860	dirty = 0;
1861	wktail = NULL;
1862	ACQUIRE_LOCK(&lk);
1863	while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) {
1864		LIST_REMOVE(wk, wk_list);
1865		if (wk->wk_type == D_BMSAFEMAP &&
1866		    bmsafemap_backgroundwrite(WK_BMSAFEMAP(wk), newbp))
1867			dirty = 1;
1868		if (wktail == 0)
1869			LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list);
1870		else
1871			LIST_INSERT_AFTER(wktail, wk, wk_list);
1872		wktail = wk;
1873	}
1874	FREE_LOCK(&lk);
1875
1876	return (dirty);
1877}
1878
1879/*
1880 * Purge the work list of all items associated with a particular mount point.
1881 */
1882int
1883softdep_flushworklist(oldmnt, countp, td)
1884	struct mount *oldmnt;
1885	int *countp;
1886	struct thread *td;
1887{
1888	struct vnode *devvp;
1889	int count, error = 0;
1890	struct ufsmount *ump;
1891
1892	/*
1893	 * Alternately flush the block device associated with the mount
1894	 * point and process any dependencies that the flushing
1895	 * creates. We continue until no more worklist dependencies
1896	 * are found.
1897	 */
1898	*countp = 0;
1899	ump = VFSTOUFS(oldmnt);
1900	devvp = ump->um_devvp;
1901	while ((count = softdep_process_worklist(oldmnt, 1)) > 0) {
1902		*countp += count;
1903		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY);
1904		error = VOP_FSYNC(devvp, MNT_WAIT, td);
1905		VOP_UNLOCK(devvp, 0);
1906		if (error)
1907			break;
1908	}
1909	return (error);
1910}
1911
1912int
1913softdep_waitidle(struct mount *mp)
1914{
1915	struct ufsmount *ump;
1916	int error;
1917	int i;
1918
1919	ump = VFSTOUFS(mp);
1920	ACQUIRE_LOCK(&lk);
1921	for (i = 0; i < 10 && ump->softdep_deps; i++) {
1922		ump->softdep_req = 1;
1923		if (ump->softdep_on_worklist)
1924			panic("softdep_waitidle: work added after flush.");
1925		msleep(&ump->softdep_deps, &lk, PVM, "softdeps", 1);
1926	}
1927	ump->softdep_req = 0;
1928	FREE_LOCK(&lk);
1929	error = 0;
1930	if (i == 10) {
1931		error = EBUSY;
1932		printf("softdep_waitidle: Failed to flush worklist for %p\n",
1933		    mp);
1934	}
1935
1936	return (error);
1937}
1938
1939/*
1940 * Flush all vnodes and worklist items associated with a specified mount point.
1941 */
1942int
1943softdep_flushfiles(oldmnt, flags, td)
1944	struct mount *oldmnt;
1945	int flags;
1946	struct thread *td;
1947{
1948#ifdef QUOTA
1949	struct ufsmount *ump;
1950	int i;
1951#endif
1952	int error, early, depcount, loopcnt, retry_flush_count, retry;
1953	int morework;
1954
1955	loopcnt = 10;
1956	retry_flush_count = 3;
1957retry_flush:
1958	error = 0;
1959
1960	/*
1961	 * Alternately flush the vnodes associated with the mount
1962	 * point and process any dependencies that the flushing
1963	 * creates. In theory, this loop can happen at most twice,
1964	 * but we give it a few extra just to be sure.
1965	 */
1966	for (; loopcnt > 0; loopcnt--) {
1967		/*
1968		 * Do another flush in case any vnodes were brought in
1969		 * as part of the cleanup operations.
1970		 */
1971		early = retry_flush_count == 1 || (oldmnt->mnt_kern_flag &
1972		    MNTK_UNMOUNT) == 0 ? 0 : EARLYFLUSH;
1973		if ((error = ffs_flushfiles(oldmnt, flags | early, td)) != 0)
1974			break;
1975		if ((error = softdep_flushworklist(oldmnt, &depcount, td)) != 0 ||
1976		    depcount == 0)
1977			break;
1978	}
1979	/*
1980	 * If we are unmounting then it is an error to fail. If we
1981	 * are simply trying to downgrade to read-only, then filesystem
1982	 * activity can keep us busy forever, so we just fail with EBUSY.
1983	 */
1984	if (loopcnt == 0) {
1985		if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT)
1986			panic("softdep_flushfiles: looping");
1987		error = EBUSY;
1988	}
1989	if (!error)
1990		error = softdep_waitidle(oldmnt);
1991	if (!error) {
1992		if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT) {
1993			retry = 0;
1994			MNT_ILOCK(oldmnt);
1995			KASSERT((oldmnt->mnt_kern_flag & MNTK_NOINSMNTQ) != 0,
1996			    ("softdep_flushfiles: !MNTK_NOINSMNTQ"));
1997			morework = oldmnt->mnt_nvnodelistsize > 0;
1998#ifdef QUOTA
1999			ump = VFSTOUFS(oldmnt);
2000			UFS_LOCK(ump);
2001			for (i = 0; i < MAXQUOTAS; i++) {
2002				if (ump->um_quotas[i] != NULLVP)
2003					morework = 1;
2004			}
2005			UFS_UNLOCK(ump);
2006#endif
2007			if (morework) {
2008				if (--retry_flush_count > 0) {
2009					retry = 1;
2010					loopcnt = 3;
2011				} else
2012					error = EBUSY;
2013			}
2014			MNT_IUNLOCK(oldmnt);
2015			if (retry)
2016				goto retry_flush;
2017		}
2018	}
2019	return (error);
2020}
2021
2022/*
2023 * Structure hashing.
2024 *
2025 * There are three types of structures that can be looked up:
2026 *	1) pagedep structures identified by mount point, inode number,
2027 *	   and logical block.
2028 *	2) inodedep structures identified by mount point and inode number.
2029 *	3) newblk structures identified by mount point and
2030 *	   physical block number.
2031 *
2032 * The "pagedep" and "inodedep" dependency structures are hashed
2033 * separately from the file blocks and inodes to which they correspond.
2034 * This separation helps when the in-memory copy of an inode or
2035 * file block must be replaced. It also obviates the need to access
2036 * an inode or file page when simply updating (or de-allocating)
2037 * dependency structures. Lookup of newblk structures is needed to
2038 * find newly allocated blocks when trying to associate them with
2039 * their allocdirect or allocindir structure.
2040 *
2041 * The lookup routines optionally create and hash a new instance when
2042 * an existing entry is not found.
2043 */
2044#define DEPALLOC	0x0001	/* allocate structure if lookup fails */
2045#define NODELAY		0x0002	/* cannot do background work */
2046
2047/*
2048 * Structures and routines associated with pagedep caching.
2049 */
2050LIST_HEAD(pagedep_hashhead, pagedep) *pagedep_hashtbl;
2051u_long	pagedep_hash;		/* size of hash table - 1 */
2052#define	PAGEDEP_HASH(mp, inum, lbn) \
2053	(&pagedep_hashtbl[((((register_t)(mp)) >> 13) + (inum) + (lbn)) & \
2054	    pagedep_hash])
2055
2056static int
2057pagedep_find(pagedephd, ino, lbn, mp, flags, pagedeppp)
2058	struct pagedep_hashhead *pagedephd;
2059	ino_t ino;
2060	ufs_lbn_t lbn;
2061	struct mount *mp;
2062	int flags;
2063	struct pagedep **pagedeppp;
2064{
2065	struct pagedep *pagedep;
2066
2067	LIST_FOREACH(pagedep, pagedephd, pd_hash) {
2068		if (ino == pagedep->pd_ino && lbn == pagedep->pd_lbn &&
2069		    mp == pagedep->pd_list.wk_mp) {
2070			*pagedeppp = pagedep;
2071			return (1);
2072		}
2073	}
2074	*pagedeppp = NULL;
2075	return (0);
2076}
2077/*
2078 * Look up a pagedep. Return 1 if found, 0 otherwise.
2079 * If not found, allocate if DEPALLOC flag is passed.
2080 * Found or allocated entry is returned in pagedeppp.
2081 * This routine must be called with splbio interrupts blocked.
2082 */
2083static int
2084pagedep_lookup(mp, bp, ino, lbn, flags, pagedeppp)
2085	struct mount *mp;
2086	struct buf *bp;
2087	ino_t ino;
2088	ufs_lbn_t lbn;
2089	int flags;
2090	struct pagedep **pagedeppp;
2091{
2092	struct pagedep *pagedep;
2093	struct pagedep_hashhead *pagedephd;
2094	struct worklist *wk;
2095	int ret;
2096	int i;
2097
2098	rw_assert(&lk, RA_WLOCKED);
2099	if (bp) {
2100		LIST_FOREACH(wk, &bp->b_dep, wk_list) {
2101			if (wk->wk_type == D_PAGEDEP) {
2102				*pagedeppp = WK_PAGEDEP(wk);
2103				return (1);
2104			}
2105		}
2106	}
2107	pagedephd = PAGEDEP_HASH(mp, ino, lbn);
2108	ret = pagedep_find(pagedephd, ino, lbn, mp, flags, pagedeppp);
2109	if (ret) {
2110		if (((*pagedeppp)->pd_state & ONWORKLIST) == 0 && bp)
2111			WORKLIST_INSERT(&bp->b_dep, &(*pagedeppp)->pd_list);
2112		return (1);
2113	}
2114	if ((flags & DEPALLOC) == 0)
2115		return (0);
2116	FREE_LOCK(&lk);
2117	pagedep = malloc(sizeof(struct pagedep),
2118	    M_PAGEDEP, M_SOFTDEP_FLAGS|M_ZERO);
2119	workitem_alloc(&pagedep->pd_list, D_PAGEDEP, mp);
2120	ACQUIRE_LOCK(&lk);
2121	ret = pagedep_find(pagedephd, ino, lbn, mp, flags, pagedeppp);
2122	if (*pagedeppp) {
2123		/*
2124		 * This should never happen since we only create pagedeps
2125		 * with the vnode lock held.  Could be an assert.
2126		 */
2127		WORKITEM_FREE(pagedep, D_PAGEDEP);
2128		return (ret);
2129	}
2130	pagedep->pd_ino = ino;
2131	pagedep->pd_lbn = lbn;
2132	LIST_INIT(&pagedep->pd_dirremhd);
2133	LIST_INIT(&pagedep->pd_pendinghd);
2134	for (i = 0; i < DAHASHSZ; i++)
2135		LIST_INIT(&pagedep->pd_diraddhd[i]);
2136	LIST_INSERT_HEAD(pagedephd, pagedep, pd_hash);
2137	WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list);
2138	*pagedeppp = pagedep;
2139	return (0);
2140}
2141
2142/*
2143 * Structures and routines associated with inodedep caching.
2144 */
2145LIST_HEAD(inodedep_hashhead, inodedep) *inodedep_hashtbl;
2146static u_long	inodedep_hash;	/* size of hash table - 1 */
2147#define	INODEDEP_HASH(fs, inum) \
2148      (&inodedep_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & inodedep_hash])
2149
2150static int
2151inodedep_find(inodedephd, fs, inum, inodedeppp)
2152	struct inodedep_hashhead *inodedephd;
2153	struct fs *fs;
2154	ino_t inum;
2155	struct inodedep **inodedeppp;
2156{
2157	struct inodedep *inodedep;
2158
2159	LIST_FOREACH(inodedep, inodedephd, id_hash)
2160		if (inum == inodedep->id_ino && fs == inodedep->id_fs)
2161			break;
2162	if (inodedep) {
2163		*inodedeppp = inodedep;
2164		return (1);
2165	}
2166	*inodedeppp = NULL;
2167
2168	return (0);
2169}
2170/*
2171 * Look up an inodedep. Return 1 if found, 0 if not found.
2172 * If not found, allocate if DEPALLOC flag is passed.
2173 * Found or allocated entry is returned in inodedeppp.
2174 * This routine must be called with splbio interrupts blocked.
2175 */
2176static int
2177inodedep_lookup(mp, inum, flags, inodedeppp)
2178	struct mount *mp;
2179	ino_t inum;
2180	int flags;
2181	struct inodedep **inodedeppp;
2182{
2183	struct inodedep *inodedep;
2184	struct inodedep_hashhead *inodedephd;
2185	struct fs *fs;
2186
2187	rw_assert(&lk, RA_WLOCKED);
2188	fs = VFSTOUFS(mp)->um_fs;
2189	inodedephd = INODEDEP_HASH(fs, inum);
2190
2191	if (inodedep_find(inodedephd, fs, inum, inodedeppp))
2192		return (1);
2193	if ((flags & DEPALLOC) == 0)
2194		return (0);
2195	/*
2196	 * If we are over our limit, try to improve the situation.
2197	 */
2198	if (dep_current[D_INODEDEP] > max_softdeps && (flags & NODELAY) == 0)
2199		request_cleanup(mp, FLUSH_INODES);
2200	FREE_LOCK(&lk);
2201	inodedep = malloc(sizeof(struct inodedep),
2202		M_INODEDEP, M_SOFTDEP_FLAGS);
2203	workitem_alloc(&inodedep->id_list, D_INODEDEP, mp);
2204	ACQUIRE_LOCK(&lk);
2205	if (inodedep_find(inodedephd, fs, inum, inodedeppp)) {
2206		WORKITEM_FREE(inodedep, D_INODEDEP);
2207		return (1);
2208	}
2209	inodedep->id_fs = fs;
2210	inodedep->id_ino = inum;
2211	inodedep->id_state = ALLCOMPLETE;
2212	inodedep->id_nlinkdelta = 0;
2213	inodedep->id_savedino1 = NULL;
2214	inodedep->id_savedsize = -1;
2215	inodedep->id_savedextsize = -1;
2216	inodedep->id_savednlink = -1;
2217	inodedep->id_bmsafemap = NULL;
2218	inodedep->id_mkdiradd = NULL;
2219	LIST_INIT(&inodedep->id_dirremhd);
2220	LIST_INIT(&inodedep->id_pendinghd);
2221	LIST_INIT(&inodedep->id_inowait);
2222	LIST_INIT(&inodedep->id_bufwait);
2223	TAILQ_INIT(&inodedep->id_inoreflst);
2224	TAILQ_INIT(&inodedep->id_inoupdt);
2225	TAILQ_INIT(&inodedep->id_newinoupdt);
2226	TAILQ_INIT(&inodedep->id_extupdt);
2227	TAILQ_INIT(&inodedep->id_newextupdt);
2228	TAILQ_INIT(&inodedep->id_freeblklst);
2229	LIST_INSERT_HEAD(inodedephd, inodedep, id_hash);
2230	*inodedeppp = inodedep;
2231	return (0);
2232}
2233
2234/*
2235 * Structures and routines associated with newblk caching.
2236 */
2237LIST_HEAD(newblk_hashhead, newblk) *newblk_hashtbl;
2238u_long	newblk_hash;		/* size of hash table - 1 */
2239#define	NEWBLK_HASH(fs, inum) \
2240	(&newblk_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & newblk_hash])
2241
2242static int
2243newblk_find(newblkhd, mp, newblkno, flags, newblkpp)
2244	struct newblk_hashhead *newblkhd;
2245	struct mount *mp;
2246	ufs2_daddr_t newblkno;
2247	int flags;
2248	struct newblk **newblkpp;
2249{
2250	struct newblk *newblk;
2251
2252	LIST_FOREACH(newblk, newblkhd, nb_hash) {
2253		if (newblkno != newblk->nb_newblkno)
2254			continue;
2255		if (mp != newblk->nb_list.wk_mp)
2256			continue;
2257		/*
2258		 * If we're creating a new dependency don't match those that
2259		 * have already been converted to allocdirects.  This is for
2260		 * a frag extend.
2261		 */
2262		if ((flags & DEPALLOC) && newblk->nb_list.wk_type != D_NEWBLK)
2263			continue;
2264		break;
2265	}
2266	if (newblk) {
2267		*newblkpp = newblk;
2268		return (1);
2269	}
2270	*newblkpp = NULL;
2271	return (0);
2272}
2273
2274/*
2275 * Look up a newblk. Return 1 if found, 0 if not found.
2276 * If not found, allocate if DEPALLOC flag is passed.
2277 * Found or allocated entry is returned in newblkpp.
2278 */
2279static int
2280newblk_lookup(mp, newblkno, flags, newblkpp)
2281	struct mount *mp;
2282	ufs2_daddr_t newblkno;
2283	int flags;
2284	struct newblk **newblkpp;
2285{
2286	struct newblk *newblk;
2287	struct newblk_hashhead *newblkhd;
2288
2289	newblkhd = NEWBLK_HASH(VFSTOUFS(mp)->um_fs, newblkno);
2290	if (newblk_find(newblkhd, mp, newblkno, flags, newblkpp))
2291		return (1);
2292	if ((flags & DEPALLOC) == 0)
2293		return (0);
2294	FREE_LOCK(&lk);
2295	newblk = malloc(sizeof(union allblk), M_NEWBLK,
2296	    M_SOFTDEP_FLAGS | M_ZERO);
2297	workitem_alloc(&newblk->nb_list, D_NEWBLK, mp);
2298	ACQUIRE_LOCK(&lk);
2299	if (newblk_find(newblkhd, mp, newblkno, flags, newblkpp)) {
2300		WORKITEM_FREE(newblk, D_NEWBLK);
2301		return (1);
2302	}
2303	newblk->nb_freefrag = NULL;
2304	LIST_INIT(&newblk->nb_indirdeps);
2305	LIST_INIT(&newblk->nb_newdirblk);
2306	LIST_INIT(&newblk->nb_jwork);
2307	newblk->nb_state = ATTACHED;
2308	newblk->nb_newblkno = newblkno;
2309	LIST_INSERT_HEAD(newblkhd, newblk, nb_hash);
2310	*newblkpp = newblk;
2311	return (0);
2312}
2313
2314/*
2315 * Structures and routines associated with freed indirect block caching.
2316 */
2317struct freeworklst *indir_hashtbl;
2318u_long	indir_hash;		/* size of hash table - 1 */
2319#define	INDIR_HASH(mp, blkno) \
2320	(&indir_hashtbl[((((register_t)(mp)) >> 13) + (blkno)) & indir_hash])
2321
2322/*
2323 * Lookup an indirect block in the indir hash table.  The freework is
2324 * removed and potentially freed.  The caller must do a blocking journal
2325 * write before writing to the blkno.
2326 */
2327static int
2328indirblk_lookup(mp, blkno)
2329	struct mount *mp;
2330	ufs2_daddr_t blkno;
2331{
2332	struct freework *freework;
2333	struct freeworklst *wkhd;
2334
2335	wkhd = INDIR_HASH(mp, blkno);
2336	TAILQ_FOREACH(freework, wkhd, fw_next) {
2337		if (freework->fw_blkno != blkno)
2338			continue;
2339		if (freework->fw_list.wk_mp != mp)
2340			continue;
2341		indirblk_remove(freework);
2342		return (1);
2343	}
2344	return (0);
2345}
2346
2347/*
2348 * Insert an indirect block represented by freework into the indirblk
2349 * hash table so that it may prevent the block from being re-used prior
2350 * to the journal being written.
2351 */
2352static void
2353indirblk_insert(freework)
2354	struct freework *freework;
2355{
2356	struct jblocks *jblocks;
2357	struct jseg *jseg;
2358
2359	jblocks = VFSTOUFS(freework->fw_list.wk_mp)->softdep_jblocks;
2360	jseg = TAILQ_LAST(&jblocks->jb_segs, jseglst);
2361	if (jseg == NULL)
2362		return;
2363
2364	LIST_INSERT_HEAD(&jseg->js_indirs, freework, fw_segs);
2365	TAILQ_INSERT_HEAD(INDIR_HASH(freework->fw_list.wk_mp,
2366	    freework->fw_blkno), freework, fw_next);
2367	freework->fw_state &= ~DEPCOMPLETE;
2368}
2369
2370static void
2371indirblk_remove(freework)
2372	struct freework *freework;
2373{
2374
2375	LIST_REMOVE(freework, fw_segs);
2376	TAILQ_REMOVE(INDIR_HASH(freework->fw_list.wk_mp,
2377	    freework->fw_blkno), freework, fw_next);
2378	freework->fw_state |= DEPCOMPLETE;
2379	if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE)
2380		WORKITEM_FREE(freework, D_FREEWORK);
2381}
2382
2383/*
2384 * Executed during filesystem system initialization before
2385 * mounting any filesystems.
2386 */
2387void
2388softdep_initialize()
2389{
2390	int i;
2391
2392	LIST_INIT(&mkdirlisthd);
2393	max_softdeps = desiredvnodes * 4;
2394	pagedep_hashtbl = hashinit(desiredvnodes / 5, M_PAGEDEP, &pagedep_hash);
2395	inodedep_hashtbl = hashinit(desiredvnodes, M_INODEDEP, &inodedep_hash);
2396	newblk_hashtbl = hashinit(max_softdeps / 2,  M_NEWBLK, &newblk_hash);
2397	bmsafemap_hashtbl = hashinit(1024, M_BMSAFEMAP, &bmsafemap_hash);
2398	i = 1 << (ffs(desiredvnodes / 10) - 1);
2399	indir_hashtbl = malloc(i * sizeof(indir_hashtbl[0]), M_FREEWORK,
2400	    M_WAITOK);
2401	indir_hash = i - 1;
2402	for (i = 0; i <= indir_hash; i++)
2403		TAILQ_INIT(&indir_hashtbl[i]);
2404
2405	/* initialise bioops hack */
2406	bioops.io_start = softdep_disk_io_initiation;
2407	bioops.io_complete = softdep_disk_write_complete;
2408	bioops.io_deallocate = softdep_deallocate_dependencies;
2409	bioops.io_countdeps = softdep_count_dependencies;
2410
2411	/* Initialize the callout with an mtx. */
2412	callout_init_mtx(&softdep_callout, &lk, 0);
2413}
2414
2415/*
2416 * Executed after all filesystems have been unmounted during
2417 * filesystem module unload.
2418 */
2419void
2420softdep_uninitialize()
2421{
2422
2423	callout_drain(&softdep_callout);
2424	hashdestroy(pagedep_hashtbl, M_PAGEDEP, pagedep_hash);
2425	hashdestroy(inodedep_hashtbl, M_INODEDEP, inodedep_hash);
2426	hashdestroy(newblk_hashtbl, M_NEWBLK, newblk_hash);
2427	hashdestroy(bmsafemap_hashtbl, M_BMSAFEMAP, bmsafemap_hash);
2428	free(indir_hashtbl, M_FREEWORK);
2429}
2430
2431/*
2432 * Called at mount time to notify the dependency code that a
2433 * filesystem wishes to use it.
2434 */
2435int
2436softdep_mount(devvp, mp, fs, cred)
2437	struct vnode *devvp;
2438	struct mount *mp;
2439	struct fs *fs;
2440	struct ucred *cred;
2441{
2442	struct csum_total cstotal;
2443	struct ufsmount *ump;
2444	struct cg *cgp;
2445	struct buf *bp;
2446	int error, cyl;
2447
2448	MNT_ILOCK(mp);
2449	mp->mnt_flag = (mp->mnt_flag & ~MNT_ASYNC) | MNT_SOFTDEP;
2450	if ((mp->mnt_kern_flag & MNTK_SOFTDEP) == 0) {
2451		mp->mnt_kern_flag = (mp->mnt_kern_flag & ~MNTK_ASYNC) |
2452			MNTK_SOFTDEP | MNTK_NOASYNC;
2453	}
2454	MNT_IUNLOCK(mp);
2455	ump = VFSTOUFS(mp);
2456	LIST_INIT(&ump->softdep_workitem_pending);
2457	LIST_INIT(&ump->softdep_journal_pending);
2458	TAILQ_INIT(&ump->softdep_unlinked);
2459	LIST_INIT(&ump->softdep_dirtycg);
2460	ump->softdep_worklist_tail = NULL;
2461	ump->softdep_on_worklist = 0;
2462	ump->softdep_deps = 0;
2463	if ((fs->fs_flags & FS_SUJ) &&
2464	    (error = journal_mount(mp, fs, cred)) != 0) {
2465		printf("Failed to start journal: %d\n", error);
2466		return (error);
2467	}
2468	/*
2469	 * When doing soft updates, the counters in the
2470	 * superblock may have gotten out of sync. Recomputation
2471	 * can take a long time and can be deferred for background
2472	 * fsck.  However, the old behavior of scanning the cylinder
2473	 * groups and recalculating them at mount time is available
2474	 * by setting vfs.ffs.compute_summary_at_mount to one.
2475	 */
2476	if (compute_summary_at_mount == 0 || fs->fs_clean != 0)
2477		return (0);
2478	bzero(&cstotal, sizeof cstotal);
2479	for (cyl = 0; cyl < fs->fs_ncg; cyl++) {
2480		if ((error = bread(devvp, fsbtodb(fs, cgtod(fs, cyl)),
2481		    fs->fs_cgsize, cred, &bp)) != 0) {
2482			brelse(bp);
2483			return (error);
2484		}
2485		cgp = (struct cg *)bp->b_data;
2486		cstotal.cs_nffree += cgp->cg_cs.cs_nffree;
2487		cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree;
2488		cstotal.cs_nifree += cgp->cg_cs.cs_nifree;
2489		cstotal.cs_ndir += cgp->cg_cs.cs_ndir;
2490		fs->fs_cs(fs, cyl) = cgp->cg_cs;
2491		brelse(bp);
2492	}
2493#ifdef DEBUG
2494	if (bcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal))
2495		printf("%s: superblock summary recomputed\n", fs->fs_fsmnt);
2496#endif
2497	bcopy(&cstotal, &fs->fs_cstotal, sizeof cstotal);
2498	return (0);
2499}
2500
2501void
2502softdep_unmount(mp)
2503	struct mount *mp;
2504{
2505
2506	MNT_ILOCK(mp);
2507	mp->mnt_flag &= ~MNT_SOFTDEP;
2508	if (MOUNTEDSUJ(mp) == 0) {
2509		MNT_IUNLOCK(mp);
2510		return;
2511	}
2512	mp->mnt_flag &= ~MNT_SUJ;
2513	MNT_IUNLOCK(mp);
2514	journal_unmount(mp);
2515}
2516
2517static struct jblocks *
2518jblocks_create(void)
2519{
2520	struct jblocks *jblocks;
2521
2522	jblocks = malloc(sizeof(*jblocks), M_JBLOCKS, M_WAITOK | M_ZERO);
2523	TAILQ_INIT(&jblocks->jb_segs);
2524	jblocks->jb_avail = 10;
2525	jblocks->jb_extent = malloc(sizeof(struct jextent) * jblocks->jb_avail,
2526	    M_JBLOCKS, M_WAITOK | M_ZERO);
2527
2528	return (jblocks);
2529}
2530
2531static ufs2_daddr_t
2532jblocks_alloc(jblocks, bytes, actual)
2533	struct jblocks *jblocks;
2534	int bytes;
2535	int *actual;
2536{
2537	ufs2_daddr_t daddr;
2538	struct jextent *jext;
2539	int freecnt;
2540	int blocks;
2541
2542	blocks = bytes / DEV_BSIZE;
2543	jext = &jblocks->jb_extent[jblocks->jb_head];
2544	freecnt = jext->je_blocks - jblocks->jb_off;
2545	if (freecnt == 0) {
2546		jblocks->jb_off = 0;
2547		if (++jblocks->jb_head > jblocks->jb_used)
2548			jblocks->jb_head = 0;
2549		jext = &jblocks->jb_extent[jblocks->jb_head];
2550		freecnt = jext->je_blocks;
2551	}
2552	if (freecnt > blocks)
2553		freecnt = blocks;
2554	*actual = freecnt * DEV_BSIZE;
2555	daddr = jext->je_daddr + jblocks->jb_off;
2556	jblocks->jb_off += freecnt;
2557	jblocks->jb_free -= freecnt;
2558
2559	return (daddr);
2560}
2561
2562static void
2563jblocks_free(jblocks, mp, bytes)
2564	struct jblocks *jblocks;
2565	struct mount *mp;
2566	int bytes;
2567{
2568
2569	jblocks->jb_free += bytes / DEV_BSIZE;
2570	if (jblocks->jb_suspended)
2571		worklist_speedup();
2572	wakeup(jblocks);
2573}
2574
2575static void
2576jblocks_destroy(jblocks)
2577	struct jblocks *jblocks;
2578{
2579
2580	if (jblocks->jb_extent)
2581		free(jblocks->jb_extent, M_JBLOCKS);
2582	free(jblocks, M_JBLOCKS);
2583}
2584
2585static void
2586jblocks_add(jblocks, daddr, blocks)
2587	struct jblocks *jblocks;
2588	ufs2_daddr_t daddr;
2589	int blocks;
2590{
2591	struct jextent *jext;
2592
2593	jblocks->jb_blocks += blocks;
2594	jblocks->jb_free += blocks;
2595	jext = &jblocks->jb_extent[jblocks->jb_used];
2596	/* Adding the first block. */
2597	if (jext->je_daddr == 0) {
2598		jext->je_daddr = daddr;
2599		jext->je_blocks = blocks;
2600		return;
2601	}
2602	/* Extending the last extent. */
2603	if (jext->je_daddr + jext->je_blocks == daddr) {
2604		jext->je_blocks += blocks;
2605		return;
2606	}
2607	/* Adding a new extent. */
2608	if (++jblocks->jb_used == jblocks->jb_avail) {
2609		jblocks->jb_avail *= 2;
2610		jext = malloc(sizeof(struct jextent) * jblocks->jb_avail,
2611		    M_JBLOCKS, M_WAITOK | M_ZERO);
2612		memcpy(jext, jblocks->jb_extent,
2613		    sizeof(struct jextent) * jblocks->jb_used);
2614		free(jblocks->jb_extent, M_JBLOCKS);
2615		jblocks->jb_extent = jext;
2616	}
2617	jext = &jblocks->jb_extent[jblocks->jb_used];
2618	jext->je_daddr = daddr;
2619	jext->je_blocks = blocks;
2620	return;
2621}
2622
2623int
2624softdep_journal_lookup(mp, vpp)
2625	struct mount *mp;
2626	struct vnode **vpp;
2627{
2628	struct componentname cnp;
2629	struct vnode *dvp;
2630	ino_t sujournal;
2631	int error;
2632
2633	error = VFS_VGET(mp, ROOTINO, LK_EXCLUSIVE, &dvp);
2634	if (error)
2635		return (error);
2636	bzero(&cnp, sizeof(cnp));
2637	cnp.cn_nameiop = LOOKUP;
2638	cnp.cn_flags = ISLASTCN;
2639	cnp.cn_thread = curthread;
2640	cnp.cn_cred = curthread->td_ucred;
2641	cnp.cn_pnbuf = SUJ_FILE;
2642	cnp.cn_nameptr = SUJ_FILE;
2643	cnp.cn_namelen = strlen(SUJ_FILE);
2644	error = ufs_lookup_ino(dvp, NULL, &cnp, &sujournal);
2645	vput(dvp);
2646	if (error != 0)
2647		return (error);
2648	error = VFS_VGET(mp, sujournal, LK_EXCLUSIVE, vpp);
2649	return (error);
2650}
2651
2652/*
2653 * Open and verify the journal file.
2654 */
2655static int
2656journal_mount(mp, fs, cred)
2657	struct mount *mp;
2658	struct fs *fs;
2659	struct ucred *cred;
2660{
2661	struct jblocks *jblocks;
2662	struct vnode *vp;
2663	struct inode *ip;
2664	ufs2_daddr_t blkno;
2665	int bcount;
2666	int error;
2667	int i;
2668
2669	error = softdep_journal_lookup(mp, &vp);
2670	if (error != 0) {
2671		printf("Failed to find journal.  Use tunefs to create one\n");
2672		return (error);
2673	}
2674	ip = VTOI(vp);
2675	if (ip->i_size < SUJ_MIN) {
2676		error = ENOSPC;
2677		goto out;
2678	}
2679	bcount = lblkno(fs, ip->i_size);	/* Only use whole blocks. */
2680	jblocks = jblocks_create();
2681	for (i = 0; i < bcount; i++) {
2682		error = ufs_bmaparray(vp, i, &blkno, NULL, NULL, NULL);
2683		if (error)
2684			break;
2685		jblocks_add(jblocks, blkno, fsbtodb(fs, fs->fs_frag));
2686	}
2687	if (error) {
2688		jblocks_destroy(jblocks);
2689		goto out;
2690	}
2691	jblocks->jb_low = jblocks->jb_free / 3;	/* Reserve 33%. */
2692	jblocks->jb_min = jblocks->jb_free / 10; /* Suspend at 10%. */
2693	VFSTOUFS(mp)->softdep_jblocks = jblocks;
2694out:
2695	if (error == 0) {
2696		MNT_ILOCK(mp);
2697		mp->mnt_flag |= MNT_SUJ;
2698		mp->mnt_flag &= ~MNT_SOFTDEP;
2699		MNT_IUNLOCK(mp);
2700		/*
2701		 * Only validate the journal contents if the
2702		 * filesystem is clean, otherwise we write the logs
2703		 * but they'll never be used.  If the filesystem was
2704		 * still dirty when we mounted it the journal is
2705		 * invalid and a new journal can only be valid if it
2706		 * starts from a clean mount.
2707		 */
2708		if (fs->fs_clean) {
2709			DIP_SET(ip, i_modrev, fs->fs_mtime);
2710			ip->i_flags |= IN_MODIFIED;
2711			ffs_update(vp, 1);
2712		}
2713	}
2714	vput(vp);
2715	return (error);
2716}
2717
2718static void
2719journal_unmount(mp)
2720	struct mount *mp;
2721{
2722	struct ufsmount *ump;
2723
2724	ump = VFSTOUFS(mp);
2725	if (ump->softdep_jblocks)
2726		jblocks_destroy(ump->softdep_jblocks);
2727	ump->softdep_jblocks = NULL;
2728}
2729
2730/*
2731 * Called when a journal record is ready to be written.  Space is allocated
2732 * and the journal entry is created when the journal is flushed to stable
2733 * store.
2734 */
2735static void
2736add_to_journal(wk)
2737	struct worklist *wk;
2738{
2739	struct ufsmount *ump;
2740
2741	rw_assert(&lk, RA_WLOCKED);
2742	ump = VFSTOUFS(wk->wk_mp);
2743	if (wk->wk_state & ONWORKLIST)
2744		panic("add_to_journal: %s(0x%X) already on list",
2745		    TYPENAME(wk->wk_type), wk->wk_state);
2746	wk->wk_state |= ONWORKLIST | DEPCOMPLETE;
2747	if (LIST_EMPTY(&ump->softdep_journal_pending)) {
2748		ump->softdep_jblocks->jb_age = ticks;
2749		LIST_INSERT_HEAD(&ump->softdep_journal_pending, wk, wk_list);
2750	} else
2751		LIST_INSERT_AFTER(ump->softdep_journal_tail, wk, wk_list);
2752	ump->softdep_journal_tail = wk;
2753	ump->softdep_on_journal += 1;
2754}
2755
2756/*
2757 * Remove an arbitrary item for the journal worklist maintain the tail
2758 * pointer.  This happens when a new operation obviates the need to
2759 * journal an old operation.
2760 */
2761static void
2762remove_from_journal(wk)
2763	struct worklist *wk;
2764{
2765	struct ufsmount *ump;
2766
2767	rw_assert(&lk, RA_WLOCKED);
2768	ump = VFSTOUFS(wk->wk_mp);
2769#ifdef SUJ_DEBUG
2770	{
2771		struct worklist *wkn;
2772
2773		LIST_FOREACH(wkn, &ump->softdep_journal_pending, wk_list)
2774			if (wkn == wk)
2775				break;
2776		if (wkn == NULL)
2777			panic("remove_from_journal: %p is not in journal", wk);
2778	}
2779#endif
2780	/*
2781	 * We emulate a TAILQ to save space in most structures which do not
2782	 * require TAILQ semantics.  Here we must update the tail position
2783	 * when removing the tail which is not the final entry. This works
2784	 * only if the worklist linkage are at the beginning of the structure.
2785	 */
2786	if (ump->softdep_journal_tail == wk)
2787		ump->softdep_journal_tail =
2788		    (struct worklist *)wk->wk_list.le_prev;
2789
2790	WORKLIST_REMOVE(wk);
2791	ump->softdep_on_journal -= 1;
2792}
2793
2794/*
2795 * Check for journal space as well as dependency limits so the prelink
2796 * code can throttle both journaled and non-journaled filesystems.
2797 * Threshold is 0 for low and 1 for min.
2798 */
2799static int
2800journal_space(ump, thresh)
2801	struct ufsmount *ump;
2802	int thresh;
2803{
2804	struct jblocks *jblocks;
2805	int avail;
2806
2807	jblocks = ump->softdep_jblocks;
2808	if (jblocks == NULL)
2809		return (1);
2810	/*
2811	 * We use a tighter restriction here to prevent request_cleanup()
2812	 * running in threads from running into locks we currently hold.
2813	 */
2814	if (dep_current[D_INODEDEP] > (max_softdeps / 10) * 9)
2815		return (0);
2816	if (thresh)
2817		thresh = jblocks->jb_min;
2818	else
2819		thresh = jblocks->jb_low;
2820	avail = (ump->softdep_on_journal * JREC_SIZE) / DEV_BSIZE;
2821	avail = jblocks->jb_free - avail;
2822
2823	return (avail > thresh);
2824}
2825
2826static void
2827journal_suspend(ump)
2828	struct ufsmount *ump;
2829{
2830	struct jblocks *jblocks;
2831	struct mount *mp;
2832
2833	mp = UFSTOVFS(ump);
2834	jblocks = ump->softdep_jblocks;
2835	MNT_ILOCK(mp);
2836	if ((mp->mnt_kern_flag & MNTK_SUSPEND) == 0) {
2837		stat_journal_min++;
2838		mp->mnt_kern_flag |= MNTK_SUSPEND;
2839		mp->mnt_susp_owner = FIRST_THREAD_IN_PROC(softdepproc);
2840	}
2841	jblocks->jb_suspended = 1;
2842	MNT_IUNLOCK(mp);
2843}
2844
2845static int
2846journal_unsuspend(struct ufsmount *ump)
2847{
2848	struct jblocks *jblocks;
2849	struct mount *mp;
2850
2851	mp = UFSTOVFS(ump);
2852	jblocks = ump->softdep_jblocks;
2853
2854	if (jblocks != NULL && jblocks->jb_suspended &&
2855	    journal_space(ump, jblocks->jb_min)) {
2856		jblocks->jb_suspended = 0;
2857		FREE_LOCK(&lk);
2858		mp->mnt_susp_owner = curthread;
2859		vfs_write_resume(mp, 0);
2860		ACQUIRE_LOCK(&lk);
2861		return (1);
2862	}
2863	return (0);
2864}
2865
2866/*
2867 * Called before any allocation function to be certain that there is
2868 * sufficient space in the journal prior to creating any new records.
2869 * Since in the case of block allocation we may have multiple locked
2870 * buffers at the time of the actual allocation we can not block
2871 * when the journal records are created.  Doing so would create a deadlock
2872 * if any of these buffers needed to be flushed to reclaim space.  Instead
2873 * we require a sufficiently large amount of available space such that
2874 * each thread in the system could have passed this allocation check and
2875 * still have sufficient free space.  With 20% of a minimum journal size
2876 * of 1MB we have 6553 records available.
2877 */
2878int
2879softdep_prealloc(vp, waitok)
2880	struct vnode *vp;
2881	int waitok;
2882{
2883	struct ufsmount *ump;
2884
2885	/*
2886	 * Nothing to do if we are not running journaled soft updates.
2887	 * If we currently hold the snapshot lock, we must avoid handling
2888	 * other resources that could cause deadlock.
2889	 */
2890	if (DOINGSUJ(vp) == 0 || IS_SNAPSHOT(VTOI(vp)))
2891		return (0);
2892	ump = VFSTOUFS(vp->v_mount);
2893	ACQUIRE_LOCK(&lk);
2894	if (journal_space(ump, 0)) {
2895		FREE_LOCK(&lk);
2896		return (0);
2897	}
2898	stat_journal_low++;
2899	FREE_LOCK(&lk);
2900	if (waitok == MNT_NOWAIT)
2901		return (ENOSPC);
2902	/*
2903	 * Attempt to sync this vnode once to flush any journal
2904	 * work attached to it.
2905	 */
2906	if ((curthread->td_pflags & TDP_COWINPROGRESS) == 0)
2907		ffs_syncvnode(vp, waitok, 0);
2908	ACQUIRE_LOCK(&lk);
2909	process_removes(vp);
2910	process_truncates(vp);
2911	if (journal_space(ump, 0) == 0) {
2912		softdep_speedup();
2913		if (journal_space(ump, 1) == 0)
2914			journal_suspend(ump);
2915	}
2916	FREE_LOCK(&lk);
2917
2918	return (0);
2919}
2920
2921/*
2922 * Before adjusting a link count on a vnode verify that we have sufficient
2923 * journal space.  If not, process operations that depend on the currently
2924 * locked pair of vnodes to try to flush space as the syncer, buf daemon,
2925 * and softdep flush threads can not acquire these locks to reclaim space.
2926 */
2927static void
2928softdep_prelink(dvp, vp)
2929	struct vnode *dvp;
2930	struct vnode *vp;
2931{
2932	struct ufsmount *ump;
2933
2934	ump = VFSTOUFS(dvp->v_mount);
2935	rw_assert(&lk, RA_WLOCKED);
2936	/*
2937	 * Nothing to do if we have sufficient journal space.
2938	 * If we currently hold the snapshot lock, we must avoid
2939	 * handling other resources that could cause deadlock.
2940	 */
2941	if (journal_space(ump, 0) || (vp && IS_SNAPSHOT(VTOI(vp))))
2942		return;
2943	stat_journal_low++;
2944	FREE_LOCK(&lk);
2945	if (vp)
2946		ffs_syncvnode(vp, MNT_NOWAIT, 0);
2947	ffs_syncvnode(dvp, MNT_WAIT, 0);
2948	ACQUIRE_LOCK(&lk);
2949	/* Process vp before dvp as it may create .. removes. */
2950	if (vp) {
2951		process_removes(vp);
2952		process_truncates(vp);
2953	}
2954	process_removes(dvp);
2955	process_truncates(dvp);
2956	softdep_speedup();
2957	process_worklist_item(UFSTOVFS(ump), 2, LK_NOWAIT);
2958	if (journal_space(ump, 0) == 0) {
2959		softdep_speedup();
2960		if (journal_space(ump, 1) == 0)
2961			journal_suspend(ump);
2962	}
2963}
2964
2965static void
2966jseg_write(ump, jseg, data)
2967	struct ufsmount *ump;
2968	struct jseg *jseg;
2969	uint8_t *data;
2970{
2971	struct jsegrec *rec;
2972
2973	rec = (struct jsegrec *)data;
2974	rec->jsr_seq = jseg->js_seq;
2975	rec->jsr_oldest = jseg->js_oldseq;
2976	rec->jsr_cnt = jseg->js_cnt;
2977	rec->jsr_blocks = jseg->js_size / ump->um_devvp->v_bufobj.bo_bsize;
2978	rec->jsr_crc = 0;
2979	rec->jsr_time = ump->um_fs->fs_mtime;
2980}
2981
2982static inline void
2983inoref_write(inoref, jseg, rec)
2984	struct inoref *inoref;
2985	struct jseg *jseg;
2986	struct jrefrec *rec;
2987{
2988
2989	inoref->if_jsegdep->jd_seg = jseg;
2990	rec->jr_ino = inoref->if_ino;
2991	rec->jr_parent = inoref->if_parent;
2992	rec->jr_nlink = inoref->if_nlink;
2993	rec->jr_mode = inoref->if_mode;
2994	rec->jr_diroff = inoref->if_diroff;
2995}
2996
2997static void
2998jaddref_write(jaddref, jseg, data)
2999	struct jaddref *jaddref;
3000	struct jseg *jseg;
3001	uint8_t *data;
3002{
3003	struct jrefrec *rec;
3004
3005	rec = (struct jrefrec *)data;
3006	rec->jr_op = JOP_ADDREF;
3007	inoref_write(&jaddref->ja_ref, jseg, rec);
3008}
3009
3010static void
3011jremref_write(jremref, jseg, data)
3012	struct jremref *jremref;
3013	struct jseg *jseg;
3014	uint8_t *data;
3015{
3016	struct jrefrec *rec;
3017
3018	rec = (struct jrefrec *)data;
3019	rec->jr_op = JOP_REMREF;
3020	inoref_write(&jremref->jr_ref, jseg, rec);
3021}
3022
3023static void
3024jmvref_write(jmvref, jseg, data)
3025	struct jmvref *jmvref;
3026	struct jseg *jseg;
3027	uint8_t *data;
3028{
3029	struct jmvrec *rec;
3030
3031	rec = (struct jmvrec *)data;
3032	rec->jm_op = JOP_MVREF;
3033	rec->jm_ino = jmvref->jm_ino;
3034	rec->jm_parent = jmvref->jm_parent;
3035	rec->jm_oldoff = jmvref->jm_oldoff;
3036	rec->jm_newoff = jmvref->jm_newoff;
3037}
3038
3039static void
3040jnewblk_write(jnewblk, jseg, data)
3041	struct jnewblk *jnewblk;
3042	struct jseg *jseg;
3043	uint8_t *data;
3044{
3045	struct jblkrec *rec;
3046
3047	jnewblk->jn_jsegdep->jd_seg = jseg;
3048	rec = (struct jblkrec *)data;
3049	rec->jb_op = JOP_NEWBLK;
3050	rec->jb_ino = jnewblk->jn_ino;
3051	rec->jb_blkno = jnewblk->jn_blkno;
3052	rec->jb_lbn = jnewblk->jn_lbn;
3053	rec->jb_frags = jnewblk->jn_frags;
3054	rec->jb_oldfrags = jnewblk->jn_oldfrags;
3055}
3056
3057static void
3058jfreeblk_write(jfreeblk, jseg, data)
3059	struct jfreeblk *jfreeblk;
3060	struct jseg *jseg;
3061	uint8_t *data;
3062{
3063	struct jblkrec *rec;
3064
3065	jfreeblk->jf_dep.jb_jsegdep->jd_seg = jseg;
3066	rec = (struct jblkrec *)data;
3067	rec->jb_op = JOP_FREEBLK;
3068	rec->jb_ino = jfreeblk->jf_ino;
3069	rec->jb_blkno = jfreeblk->jf_blkno;
3070	rec->jb_lbn = jfreeblk->jf_lbn;
3071	rec->jb_frags = jfreeblk->jf_frags;
3072	rec->jb_oldfrags = 0;
3073}
3074
3075static void
3076jfreefrag_write(jfreefrag, jseg, data)
3077	struct jfreefrag *jfreefrag;
3078	struct jseg *jseg;
3079	uint8_t *data;
3080{
3081	struct jblkrec *rec;
3082
3083	jfreefrag->fr_jsegdep->jd_seg = jseg;
3084	rec = (struct jblkrec *)data;
3085	rec->jb_op = JOP_FREEBLK;
3086	rec->jb_ino = jfreefrag->fr_ino;
3087	rec->jb_blkno = jfreefrag->fr_blkno;
3088	rec->jb_lbn = jfreefrag->fr_lbn;
3089	rec->jb_frags = jfreefrag->fr_frags;
3090	rec->jb_oldfrags = 0;
3091}
3092
3093static void
3094jtrunc_write(jtrunc, jseg, data)
3095	struct jtrunc *jtrunc;
3096	struct jseg *jseg;
3097	uint8_t *data;
3098{
3099	struct jtrncrec *rec;
3100
3101	jtrunc->jt_dep.jb_jsegdep->jd_seg = jseg;
3102	rec = (struct jtrncrec *)data;
3103	rec->jt_op = JOP_TRUNC;
3104	rec->jt_ino = jtrunc->jt_ino;
3105	rec->jt_size = jtrunc->jt_size;
3106	rec->jt_extsize = jtrunc->jt_extsize;
3107}
3108
3109static void
3110jfsync_write(jfsync, jseg, data)
3111	struct jfsync *jfsync;
3112	struct jseg *jseg;
3113	uint8_t *data;
3114{
3115	struct jtrncrec *rec;
3116
3117	rec = (struct jtrncrec *)data;
3118	rec->jt_op = JOP_SYNC;
3119	rec->jt_ino = jfsync->jfs_ino;
3120	rec->jt_size = jfsync->jfs_size;
3121	rec->jt_extsize = jfsync->jfs_extsize;
3122}
3123
3124static void
3125softdep_flushjournal(mp)
3126	struct mount *mp;
3127{
3128	struct jblocks *jblocks;
3129	struct ufsmount *ump;
3130
3131	if (MOUNTEDSUJ(mp) == 0)
3132		return;
3133	ump = VFSTOUFS(mp);
3134	jblocks = ump->softdep_jblocks;
3135	ACQUIRE_LOCK(&lk);
3136	while (ump->softdep_on_journal) {
3137		jblocks->jb_needseg = 1;
3138		softdep_process_journal(mp, NULL, MNT_WAIT);
3139	}
3140	FREE_LOCK(&lk);
3141}
3142
3143static void softdep_synchronize_completed(struct bio *);
3144static void softdep_synchronize(struct bio *, struct ufsmount *, void *);
3145
3146static void
3147softdep_synchronize_completed(bp)
3148        struct bio *bp;
3149{
3150	struct jseg *oldest;
3151	struct jseg *jseg;
3152
3153	/*
3154	 * caller1 marks the last segment written before we issued the
3155	 * synchronize cache.
3156	 */
3157	jseg = bp->bio_caller1;
3158	oldest = NULL;
3159	ACQUIRE_LOCK(&lk);
3160	/*
3161	 * Mark all the journal entries waiting on the synchronize cache
3162	 * as completed so they may continue on.
3163	 */
3164	while (jseg != NULL && (jseg->js_state & COMPLETE) == 0) {
3165		jseg->js_state |= COMPLETE;
3166		oldest = jseg;
3167		jseg = TAILQ_PREV(jseg, jseglst, js_next);
3168	}
3169	/*
3170	 * Restart deferred journal entry processing from the oldest
3171	 * completed jseg.
3172	 */
3173	if (oldest)
3174		complete_jsegs(oldest);
3175
3176	FREE_LOCK(&lk);
3177	g_destroy_bio(bp);
3178}
3179
3180/*
3181 * Send BIO_FLUSH/SYNCHRONIZE CACHE to the device to enforce write ordering
3182 * barriers.  The journal must be written prior to any blocks that depend
3183 * on it and the journal can not be released until the blocks have be
3184 * written.  This code handles both barriers simultaneously.
3185 */
3186static void
3187softdep_synchronize(bp, ump, caller1)
3188	struct bio *bp;
3189	struct ufsmount *ump;
3190	void *caller1;
3191{
3192
3193	bp->bio_cmd = BIO_FLUSH;
3194	bp->bio_flags |= BIO_ORDERED;
3195	bp->bio_data = NULL;
3196	bp->bio_offset = ump->um_cp->provider->mediasize;
3197	bp->bio_length = 0;
3198	bp->bio_done = softdep_synchronize_completed;
3199	bp->bio_caller1 = caller1;
3200	g_io_request(bp,
3201	    (struct g_consumer *)ump->um_devvp->v_bufobj.bo_private);
3202}
3203
3204/*
3205 * Flush some journal records to disk.
3206 */
3207static void
3208softdep_process_journal(mp, needwk, flags)
3209	struct mount *mp;
3210	struct worklist *needwk;
3211	int flags;
3212{
3213	struct jblocks *jblocks;
3214	struct ufsmount *ump;
3215	struct worklist *wk;
3216	struct jseg *jseg;
3217	struct buf *bp;
3218	struct bio *bio;
3219	uint8_t *data;
3220	struct fs *fs;
3221	int shouldflush;
3222	int segwritten;
3223	int jrecmin;	/* Minimum records per block. */
3224	int jrecmax;	/* Maximum records per block. */
3225	int size;
3226	int cnt;
3227	int off;
3228	int devbsize;
3229
3230	if (MOUNTEDSUJ(mp) == 0)
3231		return;
3232	shouldflush = softdep_flushcache;
3233	bio = NULL;
3234	jseg = NULL;
3235	ump = VFSTOUFS(mp);
3236	fs = ump->um_fs;
3237	jblocks = ump->softdep_jblocks;
3238	devbsize = ump->um_devvp->v_bufobj.bo_bsize;
3239	/*
3240	 * We write anywhere between a disk block and fs block.  The upper
3241	 * bound is picked to prevent buffer cache fragmentation and limit
3242	 * processing time per I/O.
3243	 */
3244	jrecmin = (devbsize / JREC_SIZE) - 1; /* -1 for seg header */
3245	jrecmax = (fs->fs_bsize / devbsize) * jrecmin;
3246	segwritten = 0;
3247	for (;;) {
3248		cnt = ump->softdep_on_journal;
3249		/*
3250		 * Criteria for writing a segment:
3251		 * 1) We have a full block.
3252		 * 2) We're called from jwait() and haven't found the
3253		 *    journal item yet.
3254		 * 3) Always write if needseg is set.
3255		 * 4) If we are called from process_worklist and have
3256		 *    not yet written anything we write a partial block
3257		 *    to enforce a 1 second maximum latency on journal
3258		 *    entries.
3259		 */
3260		if (cnt < (jrecmax - 1) && needwk == NULL &&
3261		    jblocks->jb_needseg == 0 && (segwritten || cnt == 0))
3262			break;
3263		cnt++;
3264		/*
3265		 * Verify some free journal space.  softdep_prealloc() should
3266	 	 * guarantee that we don't run out so this is indicative of
3267		 * a problem with the flow control.  Try to recover
3268		 * gracefully in any event.
3269		 */
3270		while (jblocks->jb_free == 0) {
3271			if (flags != MNT_WAIT)
3272				break;
3273			printf("softdep: Out of journal space!\n");
3274			softdep_speedup();
3275			msleep(jblocks, &lk, PRIBIO, "jblocks", hz);
3276		}
3277		FREE_LOCK(&lk);
3278		jseg = malloc(sizeof(*jseg), M_JSEG, M_SOFTDEP_FLAGS);
3279		workitem_alloc(&jseg->js_list, D_JSEG, mp);
3280		LIST_INIT(&jseg->js_entries);
3281		LIST_INIT(&jseg->js_indirs);
3282		jseg->js_state = ATTACHED;
3283		if (shouldflush == 0)
3284			jseg->js_state |= COMPLETE;
3285		else if (bio == NULL)
3286			bio = g_alloc_bio();
3287		jseg->js_jblocks = jblocks;
3288		bp = geteblk(fs->fs_bsize, 0);
3289		ACQUIRE_LOCK(&lk);
3290		/*
3291		 * If there was a race while we were allocating the block
3292		 * and jseg the entry we care about was likely written.
3293		 * We bail out in both the WAIT and NOWAIT case and assume
3294		 * the caller will loop if the entry it cares about is
3295		 * not written.
3296		 */
3297		cnt = ump->softdep_on_journal;
3298		if (cnt + jblocks->jb_needseg == 0 || jblocks->jb_free == 0) {
3299			bp->b_flags |= B_INVAL | B_NOCACHE;
3300			WORKITEM_FREE(jseg, D_JSEG);
3301			FREE_LOCK(&lk);
3302			brelse(bp);
3303			ACQUIRE_LOCK(&lk);
3304			break;
3305		}
3306		/*
3307		 * Calculate the disk block size required for the available
3308		 * records rounded to the min size.
3309		 */
3310		if (cnt == 0)
3311			size = devbsize;
3312		else if (cnt < jrecmax)
3313			size = howmany(cnt, jrecmin) * devbsize;
3314		else
3315			size = fs->fs_bsize;
3316		/*
3317		 * Allocate a disk block for this journal data and account
3318		 * for truncation of the requested size if enough contiguous
3319		 * space was not available.
3320		 */
3321		bp->b_blkno = jblocks_alloc(jblocks, size, &size);
3322		bp->b_lblkno = bp->b_blkno;
3323		bp->b_offset = bp->b_blkno * DEV_BSIZE;
3324		bp->b_bcount = size;
3325		bp->b_flags &= ~B_INVAL;
3326		bp->b_flags |= B_VALIDSUSPWRT | B_NOCOPY;
3327		/*
3328		 * Initialize our jseg with cnt records.  Assign the next
3329		 * sequence number to it and link it in-order.
3330		 */
3331		cnt = MIN(cnt, (size / devbsize) * jrecmin);
3332		jseg->js_buf = bp;
3333		jseg->js_cnt = cnt;
3334		jseg->js_refs = cnt + 1;	/* Self ref. */
3335		jseg->js_size = size;
3336		jseg->js_seq = jblocks->jb_nextseq++;
3337		if (jblocks->jb_oldestseg == NULL)
3338			jblocks->jb_oldestseg = jseg;
3339		jseg->js_oldseq = jblocks->jb_oldestseg->js_seq;
3340		TAILQ_INSERT_TAIL(&jblocks->jb_segs, jseg, js_next);
3341		if (jblocks->jb_writeseg == NULL)
3342			jblocks->jb_writeseg = jseg;
3343		/*
3344		 * Start filling in records from the pending list.
3345		 */
3346		data = bp->b_data;
3347		off = 0;
3348		while ((wk = LIST_FIRST(&ump->softdep_journal_pending))
3349		    != NULL) {
3350			if (cnt == 0)
3351				break;
3352			/* Place a segment header on every device block. */
3353			if ((off % devbsize) == 0) {
3354				jseg_write(ump, jseg, data);
3355				off += JREC_SIZE;
3356				data = bp->b_data + off;
3357			}
3358			if (wk == needwk)
3359				needwk = NULL;
3360			remove_from_journal(wk);
3361			wk->wk_state |= INPROGRESS;
3362			WORKLIST_INSERT(&jseg->js_entries, wk);
3363			switch (wk->wk_type) {
3364			case D_JADDREF:
3365				jaddref_write(WK_JADDREF(wk), jseg, data);
3366				break;
3367			case D_JREMREF:
3368				jremref_write(WK_JREMREF(wk), jseg, data);
3369				break;
3370			case D_JMVREF:
3371				jmvref_write(WK_JMVREF(wk), jseg, data);
3372				break;
3373			case D_JNEWBLK:
3374				jnewblk_write(WK_JNEWBLK(wk), jseg, data);
3375				break;
3376			case D_JFREEBLK:
3377				jfreeblk_write(WK_JFREEBLK(wk), jseg, data);
3378				break;
3379			case D_JFREEFRAG:
3380				jfreefrag_write(WK_JFREEFRAG(wk), jseg, data);
3381				break;
3382			case D_JTRUNC:
3383				jtrunc_write(WK_JTRUNC(wk), jseg, data);
3384				break;
3385			case D_JFSYNC:
3386				jfsync_write(WK_JFSYNC(wk), jseg, data);
3387				break;
3388			default:
3389				panic("process_journal: Unknown type %s",
3390				    TYPENAME(wk->wk_type));
3391				/* NOTREACHED */
3392			}
3393			off += JREC_SIZE;
3394			data = bp->b_data + off;
3395			cnt--;
3396		}
3397		/*
3398		 * Write this one buffer and continue.
3399		 */
3400		segwritten = 1;
3401		jblocks->jb_needseg = 0;
3402		WORKLIST_INSERT(&bp->b_dep, &jseg->js_list);
3403		FREE_LOCK(&lk);
3404		pbgetvp(ump->um_devvp, bp);
3405		/*
3406		 * We only do the blocking wait once we find the journal
3407		 * entry we're looking for.
3408		 */
3409		if (needwk == NULL && flags == MNT_WAIT)
3410			bwrite(bp);
3411		else
3412			bawrite(bp);
3413		ACQUIRE_LOCK(&lk);
3414	}
3415	/*
3416	 * If we wrote a segment issue a synchronize cache so the journal
3417	 * is reflected on disk before the data is written.  Since reclaiming
3418	 * journal space also requires writing a journal record this
3419	 * process also enforces a barrier before reclamation.
3420	 */
3421	if (segwritten && shouldflush) {
3422		softdep_synchronize(bio, ump,
3423		    TAILQ_LAST(&jblocks->jb_segs, jseglst));
3424	} else if (bio)
3425		g_destroy_bio(bio);
3426	/*
3427	 * If we've suspended the filesystem because we ran out of journal
3428	 * space either try to sync it here to make some progress or
3429	 * unsuspend it if we already have.
3430	 */
3431	if (flags == 0 && jblocks->jb_suspended) {
3432		if (journal_unsuspend(ump))
3433			return;
3434		FREE_LOCK(&lk);
3435		VFS_SYNC(mp, MNT_NOWAIT);
3436		ffs_sbupdate(ump, MNT_WAIT, 0);
3437		ACQUIRE_LOCK(&lk);
3438	}
3439}
3440
3441/*
3442 * Complete a jseg, allowing all dependencies awaiting journal writes
3443 * to proceed.  Each journal dependency also attaches a jsegdep to dependent
3444 * structures so that the journal segment can be freed to reclaim space.
3445 */
3446static void
3447complete_jseg(jseg)
3448	struct jseg *jseg;
3449{
3450	struct worklist *wk;
3451	struct jmvref *jmvref;
3452	int waiting;
3453#ifdef INVARIANTS
3454	int i = 0;
3455#endif
3456
3457	while ((wk = LIST_FIRST(&jseg->js_entries)) != NULL) {
3458		WORKLIST_REMOVE(wk);
3459		waiting = wk->wk_state & IOWAITING;
3460		wk->wk_state &= ~(INPROGRESS | IOWAITING);
3461		wk->wk_state |= COMPLETE;
3462		KASSERT(i++ < jseg->js_cnt,
3463		    ("handle_written_jseg: overflow %d >= %d",
3464		    i - 1, jseg->js_cnt));
3465		switch (wk->wk_type) {
3466		case D_JADDREF:
3467			handle_written_jaddref(WK_JADDREF(wk));
3468			break;
3469		case D_JREMREF:
3470			handle_written_jremref(WK_JREMREF(wk));
3471			break;
3472		case D_JMVREF:
3473			rele_jseg(jseg);	/* No jsegdep. */
3474			jmvref = WK_JMVREF(wk);
3475			LIST_REMOVE(jmvref, jm_deps);
3476			if ((jmvref->jm_pagedep->pd_state & ONWORKLIST) == 0)
3477				free_pagedep(jmvref->jm_pagedep);
3478			WORKITEM_FREE(jmvref, D_JMVREF);
3479			break;
3480		case D_JNEWBLK:
3481			handle_written_jnewblk(WK_JNEWBLK(wk));
3482			break;
3483		case D_JFREEBLK:
3484			handle_written_jblkdep(&WK_JFREEBLK(wk)->jf_dep);
3485			break;
3486		case D_JTRUNC:
3487			handle_written_jblkdep(&WK_JTRUNC(wk)->jt_dep);
3488			break;
3489		case D_JFSYNC:
3490			rele_jseg(jseg);	/* No jsegdep. */
3491			WORKITEM_FREE(wk, D_JFSYNC);
3492			break;
3493		case D_JFREEFRAG:
3494			handle_written_jfreefrag(WK_JFREEFRAG(wk));
3495			break;
3496		default:
3497			panic("handle_written_jseg: Unknown type %s",
3498			    TYPENAME(wk->wk_type));
3499			/* NOTREACHED */
3500		}
3501		if (waiting)
3502			wakeup(wk);
3503	}
3504	/* Release the self reference so the structure may be freed. */
3505	rele_jseg(jseg);
3506}
3507
3508/*
3509 * Determine which jsegs are ready for completion processing.  Waits for
3510 * synchronize cache to complete as well as forcing in-order completion
3511 * of journal entries.
3512 */
3513static void
3514complete_jsegs(jseg)
3515	struct jseg *jseg;
3516{
3517	struct jblocks *jblocks;
3518	struct jseg *jsegn;
3519
3520	jblocks = jseg->js_jblocks;
3521	/*
3522	 * Don't allow out of order completions.  If this isn't the first
3523	 * block wait for it to write before we're done.
3524	 */
3525	if (jseg != jblocks->jb_writeseg)
3526		return;
3527	/* Iterate through available jsegs processing their entries. */
3528	while (jseg && (jseg->js_state & ALLCOMPLETE) == ALLCOMPLETE) {
3529		jblocks->jb_oldestwrseq = jseg->js_oldseq;
3530		jsegn = TAILQ_NEXT(jseg, js_next);
3531		complete_jseg(jseg);
3532		jseg = jsegn;
3533	}
3534	jblocks->jb_writeseg = jseg;
3535	/*
3536	 * Attempt to free jsegs now that oldestwrseq may have advanced.
3537	 */
3538	free_jsegs(jblocks);
3539}
3540
3541/*
3542 * Mark a jseg as DEPCOMPLETE and throw away the buffer.  Attempt to handle
3543 * the final completions.
3544 */
3545static void
3546handle_written_jseg(jseg, bp)
3547	struct jseg *jseg;
3548	struct buf *bp;
3549{
3550
3551	if (jseg->js_refs == 0)
3552		panic("handle_written_jseg: No self-reference on %p", jseg);
3553	jseg->js_state |= DEPCOMPLETE;
3554	/*
3555	 * We'll never need this buffer again, set flags so it will be
3556	 * discarded.
3557	 */
3558	bp->b_flags |= B_INVAL | B_NOCACHE;
3559	pbrelvp(bp);
3560	complete_jsegs(jseg);
3561}
3562
3563static inline struct jsegdep *
3564inoref_jseg(inoref)
3565	struct inoref *inoref;
3566{
3567	struct jsegdep *jsegdep;
3568
3569	jsegdep = inoref->if_jsegdep;
3570	inoref->if_jsegdep = NULL;
3571
3572	return (jsegdep);
3573}
3574
3575/*
3576 * Called once a jremref has made it to stable store.  The jremref is marked
3577 * complete and we attempt to free it.  Any pagedeps writes sleeping waiting
3578 * for the jremref to complete will be awoken by free_jremref.
3579 */
3580static void
3581handle_written_jremref(jremref)
3582	struct jremref *jremref;
3583{
3584	struct inodedep *inodedep;
3585	struct jsegdep *jsegdep;
3586	struct dirrem *dirrem;
3587
3588	/* Grab the jsegdep. */
3589	jsegdep = inoref_jseg(&jremref->jr_ref);
3590	/*
3591	 * Remove us from the inoref list.
3592	 */
3593	if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino,
3594	    0, &inodedep) == 0)
3595		panic("handle_written_jremref: Lost inodedep");
3596	TAILQ_REMOVE(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps);
3597	/*
3598	 * Complete the dirrem.
3599	 */
3600	dirrem = jremref->jr_dirrem;
3601	jremref->jr_dirrem = NULL;
3602	LIST_REMOVE(jremref, jr_deps);
3603	jsegdep->jd_state |= jremref->jr_state & MKDIR_PARENT;
3604	jwork_insert(&dirrem->dm_jwork, jsegdep);
3605	if (LIST_EMPTY(&dirrem->dm_jremrefhd) &&
3606	    (dirrem->dm_state & COMPLETE) != 0)
3607		add_to_worklist(&dirrem->dm_list, 0);
3608	free_jremref(jremref);
3609}
3610
3611/*
3612 * Called once a jaddref has made it to stable store.  The dependency is
3613 * marked complete and any dependent structures are added to the inode
3614 * bufwait list to be completed as soon as it is written.  If a bitmap write
3615 * depends on this entry we move the inode into the inodedephd of the
3616 * bmsafemap dependency and attempt to remove the jaddref from the bmsafemap.
3617 */
3618static void
3619handle_written_jaddref(jaddref)
3620	struct jaddref *jaddref;
3621{
3622	struct jsegdep *jsegdep;
3623	struct inodedep *inodedep;
3624	struct diradd *diradd;
3625	struct mkdir *mkdir;
3626
3627	/* Grab the jsegdep. */
3628	jsegdep = inoref_jseg(&jaddref->ja_ref);
3629	mkdir = NULL;
3630	diradd = NULL;
3631	if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino,
3632	    0, &inodedep) == 0)
3633		panic("handle_written_jaddref: Lost inodedep.");
3634	if (jaddref->ja_diradd == NULL)
3635		panic("handle_written_jaddref: No dependency");
3636	if (jaddref->ja_diradd->da_list.wk_type == D_DIRADD) {
3637		diradd = jaddref->ja_diradd;
3638		WORKLIST_INSERT(&inodedep->id_bufwait, &diradd->da_list);
3639	} else if (jaddref->ja_state & MKDIR_PARENT) {
3640		mkdir = jaddref->ja_mkdir;
3641		WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir->md_list);
3642	} else if (jaddref->ja_state & MKDIR_BODY)
3643		mkdir = jaddref->ja_mkdir;
3644	else
3645		panic("handle_written_jaddref: Unknown dependency %p",
3646		    jaddref->ja_diradd);
3647	jaddref->ja_diradd = NULL;	/* also clears ja_mkdir */
3648	/*
3649	 * Remove us from the inode list.
3650	 */
3651	TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref, if_deps);
3652	/*
3653	 * The mkdir may be waiting on the jaddref to clear before freeing.
3654	 */
3655	if (mkdir) {
3656		KASSERT(mkdir->md_list.wk_type == D_MKDIR,
3657		    ("handle_written_jaddref: Incorrect type for mkdir %s",
3658		    TYPENAME(mkdir->md_list.wk_type)));
3659		mkdir->md_jaddref = NULL;
3660		diradd = mkdir->md_diradd;
3661		mkdir->md_state |= DEPCOMPLETE;
3662		complete_mkdir(mkdir);
3663	}
3664	jwork_insert(&diradd->da_jwork, jsegdep);
3665	if (jaddref->ja_state & NEWBLOCK) {
3666		inodedep->id_state |= ONDEPLIST;
3667		LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_inodedephd,
3668		    inodedep, id_deps);
3669	}
3670	free_jaddref(jaddref);
3671}
3672
3673/*
3674 * Called once a jnewblk journal is written.  The allocdirect or allocindir
3675 * is placed in the bmsafemap to await notification of a written bitmap.  If
3676 * the operation was canceled we add the segdep to the appropriate
3677 * dependency to free the journal space once the canceling operation
3678 * completes.
3679 */
3680static void
3681handle_written_jnewblk(jnewblk)
3682	struct jnewblk *jnewblk;
3683{
3684	struct bmsafemap *bmsafemap;
3685	struct freefrag *freefrag;
3686	struct freework *freework;
3687	struct jsegdep *jsegdep;
3688	struct newblk *newblk;
3689
3690	/* Grab the jsegdep. */
3691	jsegdep = jnewblk->jn_jsegdep;
3692	jnewblk->jn_jsegdep = NULL;
3693	if (jnewblk->jn_dep == NULL)
3694		panic("handle_written_jnewblk: No dependency for the segdep.");
3695	switch (jnewblk->jn_dep->wk_type) {
3696	case D_NEWBLK:
3697	case D_ALLOCDIRECT:
3698	case D_ALLOCINDIR:
3699		/*
3700		 * Add the written block to the bmsafemap so it can
3701		 * be notified when the bitmap is on disk.
3702		 */
3703		newblk = WK_NEWBLK(jnewblk->jn_dep);
3704		newblk->nb_jnewblk = NULL;
3705		if ((newblk->nb_state & GOINGAWAY) == 0) {
3706			bmsafemap = newblk->nb_bmsafemap;
3707			newblk->nb_state |= ONDEPLIST;
3708			LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk,
3709			    nb_deps);
3710		}
3711		jwork_insert(&newblk->nb_jwork, jsegdep);
3712		break;
3713	case D_FREEFRAG:
3714		/*
3715		 * A newblock being removed by a freefrag when replaced by
3716		 * frag extension.
3717		 */
3718		freefrag = WK_FREEFRAG(jnewblk->jn_dep);
3719		freefrag->ff_jdep = NULL;
3720		jwork_insert(&freefrag->ff_jwork, jsegdep);
3721		break;
3722	case D_FREEWORK:
3723		/*
3724		 * A direct block was removed by truncate.
3725		 */
3726		freework = WK_FREEWORK(jnewblk->jn_dep);
3727		freework->fw_jnewblk = NULL;
3728		jwork_insert(&freework->fw_freeblks->fb_jwork, jsegdep);
3729		break;
3730	default:
3731		panic("handle_written_jnewblk: Unknown type %d.",
3732		    jnewblk->jn_dep->wk_type);
3733	}
3734	jnewblk->jn_dep = NULL;
3735	free_jnewblk(jnewblk);
3736}
3737
3738/*
3739 * Cancel a jfreefrag that won't be needed, probably due to colliding with
3740 * an in-flight allocation that has not yet been committed.  Divorce us
3741 * from the freefrag and mark it DEPCOMPLETE so that it may be added
3742 * to the worklist.
3743 */
3744static void
3745cancel_jfreefrag(jfreefrag)
3746	struct jfreefrag *jfreefrag;
3747{
3748	struct freefrag *freefrag;
3749
3750	if (jfreefrag->fr_jsegdep) {
3751		free_jsegdep(jfreefrag->fr_jsegdep);
3752		jfreefrag->fr_jsegdep = NULL;
3753	}
3754	freefrag = jfreefrag->fr_freefrag;
3755	jfreefrag->fr_freefrag = NULL;
3756	free_jfreefrag(jfreefrag);
3757	freefrag->ff_state |= DEPCOMPLETE;
3758	CTR1(KTR_SUJ, "cancel_jfreefrag: blkno %jd", freefrag->ff_blkno);
3759}
3760
3761/*
3762 * Free a jfreefrag when the parent freefrag is rendered obsolete.
3763 */
3764static void
3765free_jfreefrag(jfreefrag)
3766	struct jfreefrag *jfreefrag;
3767{
3768
3769	if (jfreefrag->fr_state & INPROGRESS)
3770		WORKLIST_REMOVE(&jfreefrag->fr_list);
3771	else if (jfreefrag->fr_state & ONWORKLIST)
3772		remove_from_journal(&jfreefrag->fr_list);
3773	if (jfreefrag->fr_freefrag != NULL)
3774		panic("free_jfreefrag:  Still attached to a freefrag.");
3775	WORKITEM_FREE(jfreefrag, D_JFREEFRAG);
3776}
3777
3778/*
3779 * Called when the journal write for a jfreefrag completes.  The parent
3780 * freefrag is added to the worklist if this completes its dependencies.
3781 */
3782static void
3783handle_written_jfreefrag(jfreefrag)
3784	struct jfreefrag *jfreefrag;
3785{
3786	struct jsegdep *jsegdep;
3787	struct freefrag *freefrag;
3788
3789	/* Grab the jsegdep. */
3790	jsegdep = jfreefrag->fr_jsegdep;
3791	jfreefrag->fr_jsegdep = NULL;
3792	freefrag = jfreefrag->fr_freefrag;
3793	if (freefrag == NULL)
3794		panic("handle_written_jfreefrag: No freefrag.");
3795	freefrag->ff_state |= DEPCOMPLETE;
3796	freefrag->ff_jdep = NULL;
3797	jwork_insert(&freefrag->ff_jwork, jsegdep);
3798	if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE)
3799		add_to_worklist(&freefrag->ff_list, 0);
3800	jfreefrag->fr_freefrag = NULL;
3801	free_jfreefrag(jfreefrag);
3802}
3803
3804/*
3805 * Called when the journal write for a jfreeblk completes.  The jfreeblk
3806 * is removed from the freeblks list of pending journal writes and the
3807 * jsegdep is moved to the freeblks jwork to be completed when all blocks
3808 * have been reclaimed.
3809 */
3810static void
3811handle_written_jblkdep(jblkdep)
3812	struct jblkdep *jblkdep;
3813{
3814	struct freeblks *freeblks;
3815	struct jsegdep *jsegdep;
3816
3817	/* Grab the jsegdep. */
3818	jsegdep = jblkdep->jb_jsegdep;
3819	jblkdep->jb_jsegdep = NULL;
3820	freeblks = jblkdep->jb_freeblks;
3821	LIST_REMOVE(jblkdep, jb_deps);
3822	jwork_insert(&freeblks->fb_jwork, jsegdep);
3823	/*
3824	 * If the freeblks is all journaled, we can add it to the worklist.
3825	 */
3826	if (LIST_EMPTY(&freeblks->fb_jblkdephd) &&
3827	    (freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE)
3828		add_to_worklist(&freeblks->fb_list, WK_NODELAY);
3829
3830	free_jblkdep(jblkdep);
3831}
3832
3833static struct jsegdep *
3834newjsegdep(struct worklist *wk)
3835{
3836	struct jsegdep *jsegdep;
3837
3838	jsegdep = malloc(sizeof(*jsegdep), M_JSEGDEP, M_SOFTDEP_FLAGS);
3839	workitem_alloc(&jsegdep->jd_list, D_JSEGDEP, wk->wk_mp);
3840	jsegdep->jd_seg = NULL;
3841
3842	return (jsegdep);
3843}
3844
3845static struct jmvref *
3846newjmvref(dp, ino, oldoff, newoff)
3847	struct inode *dp;
3848	ino_t ino;
3849	off_t oldoff;
3850	off_t newoff;
3851{
3852	struct jmvref *jmvref;
3853
3854	jmvref = malloc(sizeof(*jmvref), M_JMVREF, M_SOFTDEP_FLAGS);
3855	workitem_alloc(&jmvref->jm_list, D_JMVREF, UFSTOVFS(dp->i_ump));
3856	jmvref->jm_list.wk_state = ATTACHED | DEPCOMPLETE;
3857	jmvref->jm_parent = dp->i_number;
3858	jmvref->jm_ino = ino;
3859	jmvref->jm_oldoff = oldoff;
3860	jmvref->jm_newoff = newoff;
3861
3862	return (jmvref);
3863}
3864
3865/*
3866 * Allocate a new jremref that tracks the removal of ip from dp with the
3867 * directory entry offset of diroff.  Mark the entry as ATTACHED and
3868 * DEPCOMPLETE as we have all the information required for the journal write
3869 * and the directory has already been removed from the buffer.  The caller
3870 * is responsible for linking the jremref into the pagedep and adding it
3871 * to the journal to write.  The MKDIR_PARENT flag is set if we're doing
3872 * a DOTDOT addition so handle_workitem_remove() can properly assign
3873 * the jsegdep when we're done.
3874 */
3875static struct jremref *
3876newjremref(struct dirrem *dirrem, struct inode *dp, struct inode *ip,
3877    off_t diroff, nlink_t nlink)
3878{
3879	struct jremref *jremref;
3880
3881	jremref = malloc(sizeof(*jremref), M_JREMREF, M_SOFTDEP_FLAGS);
3882	workitem_alloc(&jremref->jr_list, D_JREMREF, UFSTOVFS(dp->i_ump));
3883	jremref->jr_state = ATTACHED;
3884	newinoref(&jremref->jr_ref, ip->i_number, dp->i_number, diroff,
3885	   nlink, ip->i_mode);
3886	jremref->jr_dirrem = dirrem;
3887
3888	return (jremref);
3889}
3890
3891static inline void
3892newinoref(struct inoref *inoref, ino_t ino, ino_t parent, off_t diroff,
3893    nlink_t nlink, uint16_t mode)
3894{
3895
3896	inoref->if_jsegdep = newjsegdep(&inoref->if_list);
3897	inoref->if_diroff = diroff;
3898	inoref->if_ino = ino;
3899	inoref->if_parent = parent;
3900	inoref->if_nlink = nlink;
3901	inoref->if_mode = mode;
3902}
3903
3904/*
3905 * Allocate a new jaddref to track the addition of ino to dp at diroff.  The
3906 * directory offset may not be known until later.  The caller is responsible
3907 * adding the entry to the journal when this information is available.  nlink
3908 * should be the link count prior to the addition and mode is only required
3909 * to have the correct FMT.
3910 */
3911static struct jaddref *
3912newjaddref(struct inode *dp, ino_t ino, off_t diroff, int16_t nlink,
3913    uint16_t mode)
3914{
3915	struct jaddref *jaddref;
3916
3917	jaddref = malloc(sizeof(*jaddref), M_JADDREF, M_SOFTDEP_FLAGS);
3918	workitem_alloc(&jaddref->ja_list, D_JADDREF, UFSTOVFS(dp->i_ump));
3919	jaddref->ja_state = ATTACHED;
3920	jaddref->ja_mkdir = NULL;
3921	newinoref(&jaddref->ja_ref, ino, dp->i_number, diroff, nlink, mode);
3922
3923	return (jaddref);
3924}
3925
3926/*
3927 * Create a new free dependency for a freework.  The caller is responsible
3928 * for adjusting the reference count when it has the lock held.  The freedep
3929 * will track an outstanding bitmap write that will ultimately clear the
3930 * freework to continue.
3931 */
3932static struct freedep *
3933newfreedep(struct freework *freework)
3934{
3935	struct freedep *freedep;
3936
3937	freedep = malloc(sizeof(*freedep), M_FREEDEP, M_SOFTDEP_FLAGS);
3938	workitem_alloc(&freedep->fd_list, D_FREEDEP, freework->fw_list.wk_mp);
3939	freedep->fd_freework = freework;
3940
3941	return (freedep);
3942}
3943
3944/*
3945 * Free a freedep structure once the buffer it is linked to is written.  If
3946 * this is the last reference to the freework schedule it for completion.
3947 */
3948static void
3949free_freedep(freedep)
3950	struct freedep *freedep;
3951{
3952	struct freework *freework;
3953
3954	freework = freedep->fd_freework;
3955	freework->fw_freeblks->fb_cgwait--;
3956	if (--freework->fw_ref == 0)
3957		freework_enqueue(freework);
3958	WORKITEM_FREE(freedep, D_FREEDEP);
3959}
3960
3961/*
3962 * Allocate a new freework structure that may be a level in an indirect
3963 * when parent is not NULL or a top level block when it is.  The top level
3964 * freework structures are allocated without lk held and before the freeblks
3965 * is visible outside of softdep_setup_freeblocks().
3966 */
3967static struct freework *
3968newfreework(ump, freeblks, parent, lbn, nb, frags, off, journal)
3969	struct ufsmount *ump;
3970	struct freeblks *freeblks;
3971	struct freework *parent;
3972	ufs_lbn_t lbn;
3973	ufs2_daddr_t nb;
3974	int frags;
3975	int off;
3976	int journal;
3977{
3978	struct freework *freework;
3979
3980	freework = malloc(sizeof(*freework), M_FREEWORK, M_SOFTDEP_FLAGS);
3981	workitem_alloc(&freework->fw_list, D_FREEWORK, freeblks->fb_list.wk_mp);
3982	freework->fw_state = ATTACHED;
3983	freework->fw_jnewblk = NULL;
3984	freework->fw_freeblks = freeblks;
3985	freework->fw_parent = parent;
3986	freework->fw_lbn = lbn;
3987	freework->fw_blkno = nb;
3988	freework->fw_frags = frags;
3989	freework->fw_indir = NULL;
3990	freework->fw_ref = (MOUNTEDSUJ(UFSTOVFS(ump)) == 0 || lbn >= -NXADDR)
3991		? 0 : NINDIR(ump->um_fs) + 1;
3992	freework->fw_start = freework->fw_off = off;
3993	if (journal)
3994		newjfreeblk(freeblks, lbn, nb, frags);
3995	if (parent == NULL) {
3996		ACQUIRE_LOCK(&lk);
3997		WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list);
3998		freeblks->fb_ref++;
3999		FREE_LOCK(&lk);
4000	}
4001
4002	return (freework);
4003}
4004
4005/*
4006 * Eliminate a jfreeblk for a block that does not need journaling.
4007 */
4008static void
4009cancel_jfreeblk(freeblks, blkno)
4010	struct freeblks *freeblks;
4011	ufs2_daddr_t blkno;
4012{
4013	struct jfreeblk *jfreeblk;
4014	struct jblkdep *jblkdep;
4015
4016	LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps) {
4017		if (jblkdep->jb_list.wk_type != D_JFREEBLK)
4018			continue;
4019		jfreeblk = WK_JFREEBLK(&jblkdep->jb_list);
4020		if (jfreeblk->jf_blkno == blkno)
4021			break;
4022	}
4023	if (jblkdep == NULL)
4024		return;
4025	CTR1(KTR_SUJ, "cancel_jfreeblk: blkno %jd", blkno);
4026	free_jsegdep(jblkdep->jb_jsegdep);
4027	LIST_REMOVE(jblkdep, jb_deps);
4028	WORKITEM_FREE(jfreeblk, D_JFREEBLK);
4029}
4030
4031/*
4032 * Allocate a new jfreeblk to journal top level block pointer when truncating
4033 * a file.  The caller must add this to the worklist when lk is held.
4034 */
4035static struct jfreeblk *
4036newjfreeblk(freeblks, lbn, blkno, frags)
4037	struct freeblks *freeblks;
4038	ufs_lbn_t lbn;
4039	ufs2_daddr_t blkno;
4040	int frags;
4041{
4042	struct jfreeblk *jfreeblk;
4043
4044	jfreeblk = malloc(sizeof(*jfreeblk), M_JFREEBLK, M_SOFTDEP_FLAGS);
4045	workitem_alloc(&jfreeblk->jf_dep.jb_list, D_JFREEBLK,
4046	    freeblks->fb_list.wk_mp);
4047	jfreeblk->jf_dep.jb_jsegdep = newjsegdep(&jfreeblk->jf_dep.jb_list);
4048	jfreeblk->jf_dep.jb_freeblks = freeblks;
4049	jfreeblk->jf_ino = freeblks->fb_inum;
4050	jfreeblk->jf_lbn = lbn;
4051	jfreeblk->jf_blkno = blkno;
4052	jfreeblk->jf_frags = frags;
4053	LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jfreeblk->jf_dep, jb_deps);
4054
4055	return (jfreeblk);
4056}
4057
4058/*
4059 * Allocate a new jtrunc to track a partial truncation.
4060 */
4061static struct jtrunc *
4062newjtrunc(freeblks, size, extsize)
4063	struct freeblks *freeblks;
4064	off_t size;
4065	int extsize;
4066{
4067	struct jtrunc *jtrunc;
4068
4069	jtrunc = malloc(sizeof(*jtrunc), M_JTRUNC, M_SOFTDEP_FLAGS);
4070	workitem_alloc(&jtrunc->jt_dep.jb_list, D_JTRUNC,
4071	    freeblks->fb_list.wk_mp);
4072	jtrunc->jt_dep.jb_jsegdep = newjsegdep(&jtrunc->jt_dep.jb_list);
4073	jtrunc->jt_dep.jb_freeblks = freeblks;
4074	jtrunc->jt_ino = freeblks->fb_inum;
4075	jtrunc->jt_size = size;
4076	jtrunc->jt_extsize = extsize;
4077	LIST_INSERT_HEAD(&freeblks->fb_jblkdephd, &jtrunc->jt_dep, jb_deps);
4078
4079	return (jtrunc);
4080}
4081
4082/*
4083 * If we're canceling a new bitmap we have to search for another ref
4084 * to move into the bmsafemap dep.  This might be better expressed
4085 * with another structure.
4086 */
4087static void
4088move_newblock_dep(jaddref, inodedep)
4089	struct jaddref *jaddref;
4090	struct inodedep *inodedep;
4091{
4092	struct inoref *inoref;
4093	struct jaddref *jaddrefn;
4094
4095	jaddrefn = NULL;
4096	for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref;
4097	    inoref = TAILQ_NEXT(inoref, if_deps)) {
4098		if ((jaddref->ja_state & NEWBLOCK) &&
4099		    inoref->if_list.wk_type == D_JADDREF) {
4100			jaddrefn = (struct jaddref *)inoref;
4101			break;
4102		}
4103	}
4104	if (jaddrefn == NULL)
4105		return;
4106	jaddrefn->ja_state &= ~(ATTACHED | UNDONE);
4107	jaddrefn->ja_state |= jaddref->ja_state &
4108	    (ATTACHED | UNDONE | NEWBLOCK);
4109	jaddref->ja_state &= ~(ATTACHED | UNDONE | NEWBLOCK);
4110	jaddref->ja_state |= ATTACHED;
4111	LIST_REMOVE(jaddref, ja_bmdeps);
4112	LIST_INSERT_HEAD(&inodedep->id_bmsafemap->sm_jaddrefhd, jaddrefn,
4113	    ja_bmdeps);
4114}
4115
4116/*
4117 * Cancel a jaddref either before it has been written or while it is being
4118 * written.  This happens when a link is removed before the add reaches
4119 * the disk.  The jaddref dependency is kept linked into the bmsafemap
4120 * and inode to prevent the link count or bitmap from reaching the disk
4121 * until handle_workitem_remove() re-adjusts the counts and bitmaps as
4122 * required.
4123 *
4124 * Returns 1 if the canceled addref requires journaling of the remove and
4125 * 0 otherwise.
4126 */
4127static int
4128cancel_jaddref(jaddref, inodedep, wkhd)
4129	struct jaddref *jaddref;
4130	struct inodedep *inodedep;
4131	struct workhead *wkhd;
4132{
4133	struct inoref *inoref;
4134	struct jsegdep *jsegdep;
4135	int needsj;
4136
4137	KASSERT((jaddref->ja_state & COMPLETE) == 0,
4138	    ("cancel_jaddref: Canceling complete jaddref"));
4139	if (jaddref->ja_state & (INPROGRESS | COMPLETE))
4140		needsj = 1;
4141	else
4142		needsj = 0;
4143	if (inodedep == NULL)
4144		if (inodedep_lookup(jaddref->ja_list.wk_mp, jaddref->ja_ino,
4145		    0, &inodedep) == 0)
4146			panic("cancel_jaddref: Lost inodedep");
4147	/*
4148	 * We must adjust the nlink of any reference operation that follows
4149	 * us so that it is consistent with the in-memory reference.  This
4150	 * ensures that inode nlink rollbacks always have the correct link.
4151	 */
4152	if (needsj == 0) {
4153		for (inoref = TAILQ_NEXT(&jaddref->ja_ref, if_deps); inoref;
4154		    inoref = TAILQ_NEXT(inoref, if_deps)) {
4155			if (inoref->if_state & GOINGAWAY)
4156				break;
4157			inoref->if_nlink--;
4158		}
4159	}
4160	jsegdep = inoref_jseg(&jaddref->ja_ref);
4161	if (jaddref->ja_state & NEWBLOCK)
4162		move_newblock_dep(jaddref, inodedep);
4163	wake_worklist(&jaddref->ja_list);
4164	jaddref->ja_mkdir = NULL;
4165	if (jaddref->ja_state & INPROGRESS) {
4166		jaddref->ja_state &= ~INPROGRESS;
4167		WORKLIST_REMOVE(&jaddref->ja_list);
4168		jwork_insert(wkhd, jsegdep);
4169	} else {
4170		free_jsegdep(jsegdep);
4171		if (jaddref->ja_state & DEPCOMPLETE)
4172			remove_from_journal(&jaddref->ja_list);
4173	}
4174	jaddref->ja_state |= (GOINGAWAY | DEPCOMPLETE);
4175	/*
4176	 * Leave NEWBLOCK jaddrefs on the inodedep so handle_workitem_remove
4177	 * can arrange for them to be freed with the bitmap.  Otherwise we
4178	 * no longer need this addref attached to the inoreflst and it
4179	 * will incorrectly adjust nlink if we leave it.
4180	 */
4181	if ((jaddref->ja_state & NEWBLOCK) == 0) {
4182		TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref,
4183		    if_deps);
4184		jaddref->ja_state |= COMPLETE;
4185		free_jaddref(jaddref);
4186		return (needsj);
4187	}
4188	/*
4189	 * Leave the head of the list for jsegdeps for fast merging.
4190	 */
4191	if (LIST_FIRST(wkhd) != NULL) {
4192		jaddref->ja_state |= ONWORKLIST;
4193		LIST_INSERT_AFTER(LIST_FIRST(wkhd), &jaddref->ja_list, wk_list);
4194	} else
4195		WORKLIST_INSERT(wkhd, &jaddref->ja_list);
4196
4197	return (needsj);
4198}
4199
4200/*
4201 * Attempt to free a jaddref structure when some work completes.  This
4202 * should only succeed once the entry is written and all dependencies have
4203 * been notified.
4204 */
4205static void
4206free_jaddref(jaddref)
4207	struct jaddref *jaddref;
4208{
4209
4210	if ((jaddref->ja_state & ALLCOMPLETE) != ALLCOMPLETE)
4211		return;
4212	if (jaddref->ja_ref.if_jsegdep)
4213		panic("free_jaddref: segdep attached to jaddref %p(0x%X)\n",
4214		    jaddref, jaddref->ja_state);
4215	if (jaddref->ja_state & NEWBLOCK)
4216		LIST_REMOVE(jaddref, ja_bmdeps);
4217	if (jaddref->ja_state & (INPROGRESS | ONWORKLIST))
4218		panic("free_jaddref: Bad state %p(0x%X)",
4219		    jaddref, jaddref->ja_state);
4220	if (jaddref->ja_mkdir != NULL)
4221		panic("free_jaddref: Work pending, 0x%X\n", jaddref->ja_state);
4222	WORKITEM_FREE(jaddref, D_JADDREF);
4223}
4224
4225/*
4226 * Free a jremref structure once it has been written or discarded.
4227 */
4228static void
4229free_jremref(jremref)
4230	struct jremref *jremref;
4231{
4232
4233	if (jremref->jr_ref.if_jsegdep)
4234		free_jsegdep(jremref->jr_ref.if_jsegdep);
4235	if (jremref->jr_state & INPROGRESS)
4236		panic("free_jremref: IO still pending");
4237	WORKITEM_FREE(jremref, D_JREMREF);
4238}
4239
4240/*
4241 * Free a jnewblk structure.
4242 */
4243static void
4244free_jnewblk(jnewblk)
4245	struct jnewblk *jnewblk;
4246{
4247
4248	if ((jnewblk->jn_state & ALLCOMPLETE) != ALLCOMPLETE)
4249		return;
4250	LIST_REMOVE(jnewblk, jn_deps);
4251	if (jnewblk->jn_dep != NULL)
4252		panic("free_jnewblk: Dependency still attached.");
4253	WORKITEM_FREE(jnewblk, D_JNEWBLK);
4254}
4255
4256/*
4257 * Cancel a jnewblk which has been been made redundant by frag extension.
4258 */
4259static void
4260cancel_jnewblk(jnewblk, wkhd)
4261	struct jnewblk *jnewblk;
4262	struct workhead *wkhd;
4263{
4264	struct jsegdep *jsegdep;
4265
4266	CTR1(KTR_SUJ, "cancel_jnewblk: blkno %jd", jnewblk->jn_blkno);
4267	jsegdep = jnewblk->jn_jsegdep;
4268	if (jnewblk->jn_jsegdep == NULL || jnewblk->jn_dep == NULL)
4269		panic("cancel_jnewblk: Invalid state");
4270	jnewblk->jn_jsegdep  = NULL;
4271	jnewblk->jn_dep = NULL;
4272	jnewblk->jn_state |= GOINGAWAY;
4273	if (jnewblk->jn_state & INPROGRESS) {
4274		jnewblk->jn_state &= ~INPROGRESS;
4275		WORKLIST_REMOVE(&jnewblk->jn_list);
4276		jwork_insert(wkhd, jsegdep);
4277	} else {
4278		free_jsegdep(jsegdep);
4279		remove_from_journal(&jnewblk->jn_list);
4280	}
4281	wake_worklist(&jnewblk->jn_list);
4282	WORKLIST_INSERT(wkhd, &jnewblk->jn_list);
4283}
4284
4285static void
4286free_jblkdep(jblkdep)
4287	struct jblkdep *jblkdep;
4288{
4289
4290	if (jblkdep->jb_list.wk_type == D_JFREEBLK)
4291		WORKITEM_FREE(jblkdep, D_JFREEBLK);
4292	else if (jblkdep->jb_list.wk_type == D_JTRUNC)
4293		WORKITEM_FREE(jblkdep, D_JTRUNC);
4294	else
4295		panic("free_jblkdep: Unexpected type %s",
4296		    TYPENAME(jblkdep->jb_list.wk_type));
4297}
4298
4299/*
4300 * Free a single jseg once it is no longer referenced in memory or on
4301 * disk.  Reclaim journal blocks and dependencies waiting for the segment
4302 * to disappear.
4303 */
4304static void
4305free_jseg(jseg, jblocks)
4306	struct jseg *jseg;
4307	struct jblocks *jblocks;
4308{
4309	struct freework *freework;
4310
4311	/*
4312	 * Free freework structures that were lingering to indicate freed
4313	 * indirect blocks that forced journal write ordering on reallocate.
4314	 */
4315	while ((freework = LIST_FIRST(&jseg->js_indirs)) != NULL)
4316		indirblk_remove(freework);
4317	if (jblocks->jb_oldestseg == jseg)
4318		jblocks->jb_oldestseg = TAILQ_NEXT(jseg, js_next);
4319	TAILQ_REMOVE(&jblocks->jb_segs, jseg, js_next);
4320	jblocks_free(jblocks, jseg->js_list.wk_mp, jseg->js_size);
4321	KASSERT(LIST_EMPTY(&jseg->js_entries),
4322	    ("free_jseg: Freed jseg has valid entries."));
4323	WORKITEM_FREE(jseg, D_JSEG);
4324}
4325
4326/*
4327 * Free all jsegs that meet the criteria for being reclaimed and update
4328 * oldestseg.
4329 */
4330static void
4331free_jsegs(jblocks)
4332	struct jblocks *jblocks;
4333{
4334	struct jseg *jseg;
4335
4336	/*
4337	 * Free only those jsegs which have none allocated before them to
4338	 * preserve the journal space ordering.
4339	 */
4340	while ((jseg = TAILQ_FIRST(&jblocks->jb_segs)) != NULL) {
4341		/*
4342		 * Only reclaim space when nothing depends on this journal
4343		 * set and another set has written that it is no longer
4344		 * valid.
4345		 */
4346		if (jseg->js_refs != 0) {
4347			jblocks->jb_oldestseg = jseg;
4348			return;
4349		}
4350		if ((jseg->js_state & ALLCOMPLETE) != ALLCOMPLETE)
4351			break;
4352		if (jseg->js_seq > jblocks->jb_oldestwrseq)
4353			break;
4354		/*
4355		 * We can free jsegs that didn't write entries when
4356		 * oldestwrseq == js_seq.
4357		 */
4358		if (jseg->js_seq == jblocks->jb_oldestwrseq &&
4359		    jseg->js_cnt != 0)
4360			break;
4361		free_jseg(jseg, jblocks);
4362	}
4363	/*
4364	 * If we exited the loop above we still must discover the
4365	 * oldest valid segment.
4366	 */
4367	if (jseg)
4368		for (jseg = jblocks->jb_oldestseg; jseg != NULL;
4369		     jseg = TAILQ_NEXT(jseg, js_next))
4370			if (jseg->js_refs != 0)
4371				break;
4372	jblocks->jb_oldestseg = jseg;
4373	/*
4374	 * The journal has no valid records but some jsegs may still be
4375	 * waiting on oldestwrseq to advance.  We force a small record
4376	 * out to permit these lingering records to be reclaimed.
4377	 */
4378	if (jblocks->jb_oldestseg == NULL && !TAILQ_EMPTY(&jblocks->jb_segs))
4379		jblocks->jb_needseg = 1;
4380}
4381
4382/*
4383 * Release one reference to a jseg and free it if the count reaches 0.  This
4384 * should eventually reclaim journal space as well.
4385 */
4386static void
4387rele_jseg(jseg)
4388	struct jseg *jseg;
4389{
4390
4391	KASSERT(jseg->js_refs > 0,
4392	    ("free_jseg: Invalid refcnt %d", jseg->js_refs));
4393	if (--jseg->js_refs != 0)
4394		return;
4395	free_jsegs(jseg->js_jblocks);
4396}
4397
4398/*
4399 * Release a jsegdep and decrement the jseg count.
4400 */
4401static void
4402free_jsegdep(jsegdep)
4403	struct jsegdep *jsegdep;
4404{
4405
4406	if (jsegdep->jd_seg)
4407		rele_jseg(jsegdep->jd_seg);
4408	WORKITEM_FREE(jsegdep, D_JSEGDEP);
4409}
4410
4411/*
4412 * Wait for a journal item to make it to disk.  Initiate journal processing
4413 * if required.
4414 */
4415static int
4416jwait(wk, waitfor)
4417	struct worklist *wk;
4418	int waitfor;
4419{
4420
4421	/*
4422	 * Blocking journal waits cause slow synchronous behavior.  Record
4423	 * stats on the frequency of these blocking operations.
4424	 */
4425	if (waitfor == MNT_WAIT) {
4426		stat_journal_wait++;
4427		switch (wk->wk_type) {
4428		case D_JREMREF:
4429		case D_JMVREF:
4430			stat_jwait_filepage++;
4431			break;
4432		case D_JTRUNC:
4433		case D_JFREEBLK:
4434			stat_jwait_freeblks++;
4435			break;
4436		case D_JNEWBLK:
4437			stat_jwait_newblk++;
4438			break;
4439		case D_JADDREF:
4440			stat_jwait_inode++;
4441			break;
4442		default:
4443			break;
4444		}
4445	}
4446	/*
4447	 * If IO has not started we process the journal.  We can't mark the
4448	 * worklist item as IOWAITING because we drop the lock while
4449	 * processing the journal and the worklist entry may be freed after
4450	 * this point.  The caller may call back in and re-issue the request.
4451	 */
4452	if ((wk->wk_state & INPROGRESS) == 0) {
4453		softdep_process_journal(wk->wk_mp, wk, waitfor);
4454		if (waitfor != MNT_WAIT)
4455			return (EBUSY);
4456		return (0);
4457	}
4458	if (waitfor != MNT_WAIT)
4459		return (EBUSY);
4460	wait_worklist(wk, "jwait");
4461	return (0);
4462}
4463
4464/*
4465 * Lookup an inodedep based on an inode pointer and set the nlinkdelta as
4466 * appropriate.  This is a convenience function to reduce duplicate code
4467 * for the setup and revert functions below.
4468 */
4469static struct inodedep *
4470inodedep_lookup_ip(ip)
4471	struct inode *ip;
4472{
4473	struct inodedep *inodedep;
4474	int dflags;
4475
4476	KASSERT(ip->i_nlink >= ip->i_effnlink,
4477	    ("inodedep_lookup_ip: bad delta"));
4478	dflags = DEPALLOC;
4479	if (IS_SNAPSHOT(ip))
4480		dflags |= NODELAY;
4481	(void) inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, dflags,
4482	    &inodedep);
4483	inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
4484	KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked"));
4485
4486	return (inodedep);
4487}
4488
4489/*
4490 * Called prior to creating a new inode and linking it to a directory.  The
4491 * jaddref structure must already be allocated by softdep_setup_inomapdep
4492 * and it is discovered here so we can initialize the mode and update
4493 * nlinkdelta.
4494 */
4495void
4496softdep_setup_create(dp, ip)
4497	struct inode *dp;
4498	struct inode *ip;
4499{
4500	struct inodedep *inodedep;
4501	struct jaddref *jaddref;
4502	struct vnode *dvp;
4503
4504	KASSERT(ip->i_nlink == 1,
4505	    ("softdep_setup_create: Invalid link count."));
4506	dvp = ITOV(dp);
4507	ACQUIRE_LOCK(&lk);
4508	inodedep = inodedep_lookup_ip(ip);
4509	if (DOINGSUJ(dvp)) {
4510		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4511		    inoreflst);
4512		KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number,
4513		    ("softdep_setup_create: No addref structure present."));
4514	}
4515	softdep_prelink(dvp, NULL);
4516	FREE_LOCK(&lk);
4517}
4518
4519/*
4520 * Create a jaddref structure to track the addition of a DOTDOT link when
4521 * we are reparenting an inode as part of a rename.  This jaddref will be
4522 * found by softdep_setup_directory_change.  Adjusts nlinkdelta for
4523 * non-journaling softdep.
4524 */
4525void
4526softdep_setup_dotdot_link(dp, ip)
4527	struct inode *dp;
4528	struct inode *ip;
4529{
4530	struct inodedep *inodedep;
4531	struct jaddref *jaddref;
4532	struct vnode *dvp;
4533	struct vnode *vp;
4534
4535	dvp = ITOV(dp);
4536	vp = ITOV(ip);
4537	jaddref = NULL;
4538	/*
4539	 * We don't set MKDIR_PARENT as this is not tied to a mkdir and
4540	 * is used as a normal link would be.
4541	 */
4542	if (DOINGSUJ(dvp))
4543		jaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET,
4544		    dp->i_effnlink - 1, dp->i_mode);
4545	ACQUIRE_LOCK(&lk);
4546	inodedep = inodedep_lookup_ip(dp);
4547	if (jaddref)
4548		TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref,
4549		    if_deps);
4550	softdep_prelink(dvp, ITOV(ip));
4551	FREE_LOCK(&lk);
4552}
4553
4554/*
4555 * Create a jaddref structure to track a new link to an inode.  The directory
4556 * offset is not known until softdep_setup_directory_add or
4557 * softdep_setup_directory_change.  Adjusts nlinkdelta for non-journaling
4558 * softdep.
4559 */
4560void
4561softdep_setup_link(dp, ip)
4562	struct inode *dp;
4563	struct inode *ip;
4564{
4565	struct inodedep *inodedep;
4566	struct jaddref *jaddref;
4567	struct vnode *dvp;
4568
4569	dvp = ITOV(dp);
4570	jaddref = NULL;
4571	if (DOINGSUJ(dvp))
4572		jaddref = newjaddref(dp, ip->i_number, 0, ip->i_effnlink - 1,
4573		    ip->i_mode);
4574	ACQUIRE_LOCK(&lk);
4575	inodedep = inodedep_lookup_ip(ip);
4576	if (jaddref)
4577		TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref,
4578		    if_deps);
4579	softdep_prelink(dvp, ITOV(ip));
4580	FREE_LOCK(&lk);
4581}
4582
4583/*
4584 * Called to create the jaddref structures to track . and .. references as
4585 * well as lookup and further initialize the incomplete jaddref created
4586 * by softdep_setup_inomapdep when the inode was allocated.  Adjusts
4587 * nlinkdelta for non-journaling softdep.
4588 */
4589void
4590softdep_setup_mkdir(dp, ip)
4591	struct inode *dp;
4592	struct inode *ip;
4593{
4594	struct inodedep *inodedep;
4595	struct jaddref *dotdotaddref;
4596	struct jaddref *dotaddref;
4597	struct jaddref *jaddref;
4598	struct vnode *dvp;
4599
4600	dvp = ITOV(dp);
4601	dotaddref = dotdotaddref = NULL;
4602	if (DOINGSUJ(dvp)) {
4603		dotaddref = newjaddref(ip, ip->i_number, DOT_OFFSET, 1,
4604		    ip->i_mode);
4605		dotaddref->ja_state |= MKDIR_BODY;
4606		dotdotaddref = newjaddref(ip, dp->i_number, DOTDOT_OFFSET,
4607		    dp->i_effnlink - 1, dp->i_mode);
4608		dotdotaddref->ja_state |= MKDIR_PARENT;
4609	}
4610	ACQUIRE_LOCK(&lk);
4611	inodedep = inodedep_lookup_ip(ip);
4612	if (DOINGSUJ(dvp)) {
4613		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4614		    inoreflst);
4615		KASSERT(jaddref != NULL,
4616		    ("softdep_setup_mkdir: No addref structure present."));
4617		KASSERT(jaddref->ja_parent == dp->i_number,
4618		    ("softdep_setup_mkdir: bad parent %ju",
4619		    (uintmax_t)jaddref->ja_parent));
4620		TAILQ_INSERT_BEFORE(&jaddref->ja_ref, &dotaddref->ja_ref,
4621		    if_deps);
4622	}
4623	inodedep = inodedep_lookup_ip(dp);
4624	if (DOINGSUJ(dvp))
4625		TAILQ_INSERT_TAIL(&inodedep->id_inoreflst,
4626		    &dotdotaddref->ja_ref, if_deps);
4627	softdep_prelink(ITOV(dp), NULL);
4628	FREE_LOCK(&lk);
4629}
4630
4631/*
4632 * Called to track nlinkdelta of the inode and parent directories prior to
4633 * unlinking a directory.
4634 */
4635void
4636softdep_setup_rmdir(dp, ip)
4637	struct inode *dp;
4638	struct inode *ip;
4639{
4640	struct vnode *dvp;
4641
4642	dvp = ITOV(dp);
4643	ACQUIRE_LOCK(&lk);
4644	(void) inodedep_lookup_ip(ip);
4645	(void) inodedep_lookup_ip(dp);
4646	softdep_prelink(dvp, ITOV(ip));
4647	FREE_LOCK(&lk);
4648}
4649
4650/*
4651 * Called to track nlinkdelta of the inode and parent directories prior to
4652 * unlink.
4653 */
4654void
4655softdep_setup_unlink(dp, ip)
4656	struct inode *dp;
4657	struct inode *ip;
4658{
4659	struct vnode *dvp;
4660
4661	dvp = ITOV(dp);
4662	ACQUIRE_LOCK(&lk);
4663	(void) inodedep_lookup_ip(ip);
4664	(void) inodedep_lookup_ip(dp);
4665	softdep_prelink(dvp, ITOV(ip));
4666	FREE_LOCK(&lk);
4667}
4668
4669/*
4670 * Called to release the journal structures created by a failed non-directory
4671 * creation.  Adjusts nlinkdelta for non-journaling softdep.
4672 */
4673void
4674softdep_revert_create(dp, ip)
4675	struct inode *dp;
4676	struct inode *ip;
4677{
4678	struct inodedep *inodedep;
4679	struct jaddref *jaddref;
4680	struct vnode *dvp;
4681
4682	dvp = ITOV(dp);
4683	ACQUIRE_LOCK(&lk);
4684	inodedep = inodedep_lookup_ip(ip);
4685	if (DOINGSUJ(dvp)) {
4686		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4687		    inoreflst);
4688		KASSERT(jaddref->ja_parent == dp->i_number,
4689		    ("softdep_revert_create: addref parent mismatch"));
4690		cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
4691	}
4692	FREE_LOCK(&lk);
4693}
4694
4695/*
4696 * Called to release the journal structures created by a failed dotdot link
4697 * creation.  Adjusts nlinkdelta for non-journaling softdep.
4698 */
4699void
4700softdep_revert_dotdot_link(dp, ip)
4701	struct inode *dp;
4702	struct inode *ip;
4703{
4704	struct inodedep *inodedep;
4705	struct jaddref *jaddref;
4706	struct vnode *dvp;
4707
4708	dvp = ITOV(dp);
4709	ACQUIRE_LOCK(&lk);
4710	inodedep = inodedep_lookup_ip(dp);
4711	if (DOINGSUJ(dvp)) {
4712		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4713		    inoreflst);
4714		KASSERT(jaddref->ja_parent == ip->i_number,
4715		    ("softdep_revert_dotdot_link: addref parent mismatch"));
4716		cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
4717	}
4718	FREE_LOCK(&lk);
4719}
4720
4721/*
4722 * Called to release the journal structures created by a failed link
4723 * addition.  Adjusts nlinkdelta for non-journaling softdep.
4724 */
4725void
4726softdep_revert_link(dp, ip)
4727	struct inode *dp;
4728	struct inode *ip;
4729{
4730	struct inodedep *inodedep;
4731	struct jaddref *jaddref;
4732	struct vnode *dvp;
4733
4734	dvp = ITOV(dp);
4735	ACQUIRE_LOCK(&lk);
4736	inodedep = inodedep_lookup_ip(ip);
4737	if (DOINGSUJ(dvp)) {
4738		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4739		    inoreflst);
4740		KASSERT(jaddref->ja_parent == dp->i_number,
4741		    ("softdep_revert_link: addref parent mismatch"));
4742		cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
4743	}
4744	FREE_LOCK(&lk);
4745}
4746
4747/*
4748 * Called to release the journal structures created by a failed mkdir
4749 * attempt.  Adjusts nlinkdelta for non-journaling softdep.
4750 */
4751void
4752softdep_revert_mkdir(dp, ip)
4753	struct inode *dp;
4754	struct inode *ip;
4755{
4756	struct inodedep *inodedep;
4757	struct jaddref *jaddref;
4758	struct jaddref *dotaddref;
4759	struct vnode *dvp;
4760
4761	dvp = ITOV(dp);
4762
4763	ACQUIRE_LOCK(&lk);
4764	inodedep = inodedep_lookup_ip(dp);
4765	if (DOINGSUJ(dvp)) {
4766		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4767		    inoreflst);
4768		KASSERT(jaddref->ja_parent == ip->i_number,
4769		    ("softdep_revert_mkdir: dotdot addref parent mismatch"));
4770		cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
4771	}
4772	inodedep = inodedep_lookup_ip(ip);
4773	if (DOINGSUJ(dvp)) {
4774		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
4775		    inoreflst);
4776		KASSERT(jaddref->ja_parent == dp->i_number,
4777		    ("softdep_revert_mkdir: addref parent mismatch"));
4778		dotaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref,
4779		    inoreflst, if_deps);
4780		cancel_jaddref(jaddref, inodedep, &inodedep->id_inowait);
4781		KASSERT(dotaddref->ja_parent == ip->i_number,
4782		    ("softdep_revert_mkdir: dot addref parent mismatch"));
4783		cancel_jaddref(dotaddref, inodedep, &inodedep->id_inowait);
4784	}
4785	FREE_LOCK(&lk);
4786}
4787
4788/*
4789 * Called to correct nlinkdelta after a failed rmdir.
4790 */
4791void
4792softdep_revert_rmdir(dp, ip)
4793	struct inode *dp;
4794	struct inode *ip;
4795{
4796
4797	ACQUIRE_LOCK(&lk);
4798	(void) inodedep_lookup_ip(ip);
4799	(void) inodedep_lookup_ip(dp);
4800	FREE_LOCK(&lk);
4801}
4802
4803/*
4804 * Protecting the freemaps (or bitmaps).
4805 *
4806 * To eliminate the need to execute fsck before mounting a filesystem
4807 * after a power failure, one must (conservatively) guarantee that the
4808 * on-disk copy of the bitmaps never indicate that a live inode or block is
4809 * free.  So, when a block or inode is allocated, the bitmap should be
4810 * updated (on disk) before any new pointers.  When a block or inode is
4811 * freed, the bitmap should not be updated until all pointers have been
4812 * reset.  The latter dependency is handled by the delayed de-allocation
4813 * approach described below for block and inode de-allocation.  The former
4814 * dependency is handled by calling the following procedure when a block or
4815 * inode is allocated. When an inode is allocated an "inodedep" is created
4816 * with its DEPCOMPLETE flag cleared until its bitmap is written to disk.
4817 * Each "inodedep" is also inserted into the hash indexing structure so
4818 * that any additional link additions can be made dependent on the inode
4819 * allocation.
4820 *
4821 * The ufs filesystem maintains a number of free block counts (e.g., per
4822 * cylinder group, per cylinder and per <cylinder, rotational position> pair)
4823 * in addition to the bitmaps.  These counts are used to improve efficiency
4824 * during allocation and therefore must be consistent with the bitmaps.
4825 * There is no convenient way to guarantee post-crash consistency of these
4826 * counts with simple update ordering, for two main reasons: (1) The counts
4827 * and bitmaps for a single cylinder group block are not in the same disk
4828 * sector.  If a disk write is interrupted (e.g., by power failure), one may
4829 * be written and the other not.  (2) Some of the counts are located in the
4830 * superblock rather than the cylinder group block. So, we focus our soft
4831 * updates implementation on protecting the bitmaps. When mounting a
4832 * filesystem, we recompute the auxiliary counts from the bitmaps.
4833 */
4834
4835/*
4836 * Called just after updating the cylinder group block to allocate an inode.
4837 */
4838void
4839softdep_setup_inomapdep(bp, ip, newinum, mode)
4840	struct buf *bp;		/* buffer for cylgroup block with inode map */
4841	struct inode *ip;	/* inode related to allocation */
4842	ino_t newinum;		/* new inode number being allocated */
4843	int mode;
4844{
4845	struct inodedep *inodedep;
4846	struct bmsafemap *bmsafemap;
4847	struct jaddref *jaddref;
4848	struct mount *mp;
4849	struct fs *fs;
4850
4851	mp = UFSTOVFS(ip->i_ump);
4852	fs = ip->i_ump->um_fs;
4853	jaddref = NULL;
4854
4855	/*
4856	 * Allocate the journal reference add structure so that the bitmap
4857	 * can be dependent on it.
4858	 */
4859	if (MOUNTEDSUJ(mp)) {
4860		jaddref = newjaddref(ip, newinum, 0, 0, mode);
4861		jaddref->ja_state |= NEWBLOCK;
4862	}
4863
4864	/*
4865	 * Create a dependency for the newly allocated inode.
4866	 * Panic if it already exists as something is seriously wrong.
4867	 * Otherwise add it to the dependency list for the buffer holding
4868	 * the cylinder group map from which it was allocated.
4869	 *
4870	 * We have to preallocate a bmsafemap entry in case it is needed
4871	 * in bmsafemap_lookup since once we allocate the inodedep, we
4872	 * have to finish initializing it before we can FREE_LOCK().
4873	 * By preallocating, we avoid FREE_LOCK() while doing a malloc
4874	 * in bmsafemap_lookup. We cannot call bmsafemap_lookup before
4875	 * creating the inodedep as it can be freed during the time
4876	 * that we FREE_LOCK() while allocating the inodedep. We must
4877	 * call workitem_alloc() before entering the locked section as
4878	 * it also acquires the lock and we must avoid trying doing so
4879	 * recursively.
4880	 */
4881	bmsafemap = malloc(sizeof(struct bmsafemap),
4882	    M_BMSAFEMAP, M_SOFTDEP_FLAGS);
4883	workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp);
4884	ACQUIRE_LOCK(&lk);
4885	if ((inodedep_lookup(mp, newinum, DEPALLOC | NODELAY, &inodedep)))
4886		panic("softdep_setup_inomapdep: dependency %p for new"
4887		    "inode already exists", inodedep);
4888	bmsafemap = bmsafemap_lookup(mp, bp, ino_to_cg(fs, newinum), bmsafemap);
4889	if (jaddref) {
4890		LIST_INSERT_HEAD(&bmsafemap->sm_jaddrefhd, jaddref, ja_bmdeps);
4891		TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jaddref->ja_ref,
4892		    if_deps);
4893	} else {
4894		inodedep->id_state |= ONDEPLIST;
4895		LIST_INSERT_HEAD(&bmsafemap->sm_inodedephd, inodedep, id_deps);
4896	}
4897	inodedep->id_bmsafemap = bmsafemap;
4898	inodedep->id_state &= ~DEPCOMPLETE;
4899	FREE_LOCK(&lk);
4900}
4901
4902/*
4903 * Called just after updating the cylinder group block to
4904 * allocate block or fragment.
4905 */
4906void
4907softdep_setup_blkmapdep(bp, mp, newblkno, frags, oldfrags)
4908	struct buf *bp;		/* buffer for cylgroup block with block map */
4909	struct mount *mp;	/* filesystem doing allocation */
4910	ufs2_daddr_t newblkno;	/* number of newly allocated block */
4911	int frags;		/* Number of fragments. */
4912	int oldfrags;		/* Previous number of fragments for extend. */
4913{
4914	struct newblk *newblk;
4915	struct bmsafemap *bmsafemap;
4916	struct jnewblk *jnewblk;
4917	struct fs *fs;
4918
4919	fs = VFSTOUFS(mp)->um_fs;
4920	jnewblk = NULL;
4921	/*
4922	 * Create a dependency for the newly allocated block.
4923	 * Add it to the dependency list for the buffer holding
4924	 * the cylinder group map from which it was allocated.
4925	 */
4926	if (MOUNTEDSUJ(mp)) {
4927		jnewblk = malloc(sizeof(*jnewblk), M_JNEWBLK, M_SOFTDEP_FLAGS);
4928		workitem_alloc(&jnewblk->jn_list, D_JNEWBLK, mp);
4929		jnewblk->jn_jsegdep = newjsegdep(&jnewblk->jn_list);
4930		jnewblk->jn_state = ATTACHED;
4931		jnewblk->jn_blkno = newblkno;
4932		jnewblk->jn_frags = frags;
4933		jnewblk->jn_oldfrags = oldfrags;
4934#ifdef SUJ_DEBUG
4935		{
4936			struct cg *cgp;
4937			uint8_t *blksfree;
4938			long bno;
4939			int i;
4940
4941			cgp = (struct cg *)bp->b_data;
4942			blksfree = cg_blksfree(cgp);
4943			bno = dtogd(fs, jnewblk->jn_blkno);
4944			for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags;
4945			    i++) {
4946				if (isset(blksfree, bno + i))
4947					panic("softdep_setup_blkmapdep: "
4948					    "free fragment %d from %d-%d "
4949					    "state 0x%X dep %p", i,
4950					    jnewblk->jn_oldfrags,
4951					    jnewblk->jn_frags,
4952					    jnewblk->jn_state,
4953					    jnewblk->jn_dep);
4954			}
4955		}
4956#endif
4957	}
4958
4959	CTR3(KTR_SUJ,
4960	    "softdep_setup_blkmapdep: blkno %jd frags %d oldfrags %d",
4961	    newblkno, frags, oldfrags);
4962	ACQUIRE_LOCK(&lk);
4963	if (newblk_lookup(mp, newblkno, DEPALLOC, &newblk) != 0)
4964		panic("softdep_setup_blkmapdep: found block");
4965	newblk->nb_bmsafemap = bmsafemap = bmsafemap_lookup(mp, bp,
4966	    dtog(fs, newblkno), NULL);
4967	if (jnewblk) {
4968		jnewblk->jn_dep = (struct worklist *)newblk;
4969		LIST_INSERT_HEAD(&bmsafemap->sm_jnewblkhd, jnewblk, jn_deps);
4970	} else {
4971		newblk->nb_state |= ONDEPLIST;
4972		LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps);
4973	}
4974	newblk->nb_bmsafemap = bmsafemap;
4975	newblk->nb_jnewblk = jnewblk;
4976	FREE_LOCK(&lk);
4977}
4978
4979#define	BMSAFEMAP_HASH(fs, cg) \
4980      (&bmsafemap_hashtbl[((((register_t)(fs)) >> 13) + (cg)) & bmsafemap_hash])
4981
4982static int
4983bmsafemap_find(bmsafemaphd, mp, cg, bmsafemapp)
4984	struct bmsafemap_hashhead *bmsafemaphd;
4985	struct mount *mp;
4986	int cg;
4987	struct bmsafemap **bmsafemapp;
4988{
4989	struct bmsafemap *bmsafemap;
4990
4991	LIST_FOREACH(bmsafemap, bmsafemaphd, sm_hash)
4992		if (bmsafemap->sm_list.wk_mp == mp && bmsafemap->sm_cg == cg)
4993			break;
4994	if (bmsafemap) {
4995		*bmsafemapp = bmsafemap;
4996		return (1);
4997	}
4998	*bmsafemapp = NULL;
4999
5000	return (0);
5001}
5002
5003/*
5004 * Find the bmsafemap associated with a cylinder group buffer.
5005 * If none exists, create one. The buffer must be locked when
5006 * this routine is called and this routine must be called with
5007 * the softdep lock held. To avoid giving up the lock while
5008 * allocating a new bmsafemap, a preallocated bmsafemap may be
5009 * provided. If it is provided but not needed, it is freed.
5010 */
5011static struct bmsafemap *
5012bmsafemap_lookup(mp, bp, cg, newbmsafemap)
5013	struct mount *mp;
5014	struct buf *bp;
5015	int cg;
5016	struct bmsafemap *newbmsafemap;
5017{
5018	struct bmsafemap_hashhead *bmsafemaphd;
5019	struct bmsafemap *bmsafemap, *collision;
5020	struct worklist *wk;
5021	struct fs *fs;
5022
5023	rw_assert(&lk, RA_WLOCKED);
5024	KASSERT(bp != NULL, ("bmsafemap_lookup: missing buffer"));
5025	LIST_FOREACH(wk, &bp->b_dep, wk_list) {
5026		if (wk->wk_type == D_BMSAFEMAP) {
5027			if (newbmsafemap)
5028				WORKITEM_FREE(newbmsafemap, D_BMSAFEMAP);
5029			return (WK_BMSAFEMAP(wk));
5030		}
5031	}
5032	fs = VFSTOUFS(mp)->um_fs;
5033	bmsafemaphd = BMSAFEMAP_HASH(fs, cg);
5034	if (bmsafemap_find(bmsafemaphd, mp, cg, &bmsafemap) == 1) {
5035		if (newbmsafemap)
5036			WORKITEM_FREE(newbmsafemap, D_BMSAFEMAP);
5037		return (bmsafemap);
5038	}
5039	if (newbmsafemap) {
5040		bmsafemap = newbmsafemap;
5041	} else {
5042		FREE_LOCK(&lk);
5043		bmsafemap = malloc(sizeof(struct bmsafemap),
5044			M_BMSAFEMAP, M_SOFTDEP_FLAGS);
5045		workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp);
5046		ACQUIRE_LOCK(&lk);
5047	}
5048	bmsafemap->sm_buf = bp;
5049	LIST_INIT(&bmsafemap->sm_inodedephd);
5050	LIST_INIT(&bmsafemap->sm_inodedepwr);
5051	LIST_INIT(&bmsafemap->sm_newblkhd);
5052	LIST_INIT(&bmsafemap->sm_newblkwr);
5053	LIST_INIT(&bmsafemap->sm_jaddrefhd);
5054	LIST_INIT(&bmsafemap->sm_jnewblkhd);
5055	LIST_INIT(&bmsafemap->sm_freehd);
5056	LIST_INIT(&bmsafemap->sm_freewr);
5057	if (bmsafemap_find(bmsafemaphd, mp, cg, &collision) == 1) {
5058		WORKITEM_FREE(bmsafemap, D_BMSAFEMAP);
5059		return (collision);
5060	}
5061	bmsafemap->sm_cg = cg;
5062	LIST_INSERT_HEAD(bmsafemaphd, bmsafemap, sm_hash);
5063	LIST_INSERT_HEAD(&VFSTOUFS(mp)->softdep_dirtycg, bmsafemap, sm_next);
5064	WORKLIST_INSERT(&bp->b_dep, &bmsafemap->sm_list);
5065	return (bmsafemap);
5066}
5067
5068/*
5069 * Direct block allocation dependencies.
5070 *
5071 * When a new block is allocated, the corresponding disk locations must be
5072 * initialized (with zeros or new data) before the on-disk inode points to
5073 * them.  Also, the freemap from which the block was allocated must be
5074 * updated (on disk) before the inode's pointer. These two dependencies are
5075 * independent of each other and are needed for all file blocks and indirect
5076 * blocks that are pointed to directly by the inode.  Just before the
5077 * "in-core" version of the inode is updated with a newly allocated block
5078 * number, a procedure (below) is called to setup allocation dependency
5079 * structures.  These structures are removed when the corresponding
5080 * dependencies are satisfied or when the block allocation becomes obsolete
5081 * (i.e., the file is deleted, the block is de-allocated, or the block is a
5082 * fragment that gets upgraded).  All of these cases are handled in
5083 * procedures described later.
5084 *
5085 * When a file extension causes a fragment to be upgraded, either to a larger
5086 * fragment or to a full block, the on-disk location may change (if the
5087 * previous fragment could not simply be extended). In this case, the old
5088 * fragment must be de-allocated, but not until after the inode's pointer has
5089 * been updated. In most cases, this is handled by later procedures, which
5090 * will construct a "freefrag" structure to be added to the workitem queue
5091 * when the inode update is complete (or obsolete).  The main exception to
5092 * this is when an allocation occurs while a pending allocation dependency
5093 * (for the same block pointer) remains.  This case is handled in the main
5094 * allocation dependency setup procedure by immediately freeing the
5095 * unreferenced fragments.
5096 */
5097void
5098softdep_setup_allocdirect(ip, off, newblkno, oldblkno, newsize, oldsize, bp)
5099	struct inode *ip;	/* inode to which block is being added */
5100	ufs_lbn_t off;		/* block pointer within inode */
5101	ufs2_daddr_t newblkno;	/* disk block number being added */
5102	ufs2_daddr_t oldblkno;	/* previous block number, 0 unless frag */
5103	long newsize;		/* size of new block */
5104	long oldsize;		/* size of new block */
5105	struct buf *bp;		/* bp for allocated block */
5106{
5107	struct allocdirect *adp, *oldadp;
5108	struct allocdirectlst *adphead;
5109	struct freefrag *freefrag;
5110	struct inodedep *inodedep;
5111	struct pagedep *pagedep;
5112	struct jnewblk *jnewblk;
5113	struct newblk *newblk;
5114	struct mount *mp;
5115	ufs_lbn_t lbn;
5116
5117	lbn = bp->b_lblkno;
5118	mp = UFSTOVFS(ip->i_ump);
5119	if (oldblkno && oldblkno != newblkno)
5120		freefrag = newfreefrag(ip, oldblkno, oldsize, lbn);
5121	else
5122		freefrag = NULL;
5123
5124	CTR6(KTR_SUJ,
5125	    "softdep_setup_allocdirect: ino %d blkno %jd oldblkno %jd "
5126	    "off %jd newsize %ld oldsize %d",
5127	    ip->i_number, newblkno, oldblkno, off, newsize, oldsize);
5128	ACQUIRE_LOCK(&lk);
5129	if (off >= NDADDR) {
5130		if (lbn > 0)
5131			panic("softdep_setup_allocdirect: bad lbn %jd, off %jd",
5132			    lbn, off);
5133		/* allocating an indirect block */
5134		if (oldblkno != 0)
5135			panic("softdep_setup_allocdirect: non-zero indir");
5136	} else {
5137		if (off != lbn)
5138			panic("softdep_setup_allocdirect: lbn %jd != off %jd",
5139			    lbn, off);
5140		/*
5141		 * Allocating a direct block.
5142		 *
5143		 * If we are allocating a directory block, then we must
5144		 * allocate an associated pagedep to track additions and
5145		 * deletions.
5146		 */
5147		if ((ip->i_mode & IFMT) == IFDIR)
5148			pagedep_lookup(mp, bp, ip->i_number, off, DEPALLOC,
5149			    &pagedep);
5150	}
5151	if (newblk_lookup(mp, newblkno, 0, &newblk) == 0)
5152		panic("softdep_setup_allocdirect: lost block");
5153	KASSERT(newblk->nb_list.wk_type == D_NEWBLK,
5154	    ("softdep_setup_allocdirect: newblk already initialized"));
5155	/*
5156	 * Convert the newblk to an allocdirect.
5157	 */
5158	WORKITEM_REASSIGN(newblk, D_ALLOCDIRECT);
5159	adp = (struct allocdirect *)newblk;
5160	newblk->nb_freefrag = freefrag;
5161	adp->ad_offset = off;
5162	adp->ad_oldblkno = oldblkno;
5163	adp->ad_newsize = newsize;
5164	adp->ad_oldsize = oldsize;
5165
5166	/*
5167	 * Finish initializing the journal.
5168	 */
5169	if ((jnewblk = newblk->nb_jnewblk) != NULL) {
5170		jnewblk->jn_ino = ip->i_number;
5171		jnewblk->jn_lbn = lbn;
5172		add_to_journal(&jnewblk->jn_list);
5173	}
5174	if (freefrag && freefrag->ff_jdep != NULL &&
5175	    freefrag->ff_jdep->wk_type == D_JFREEFRAG)
5176		add_to_journal(freefrag->ff_jdep);
5177	inodedep_lookup(mp, ip->i_number, DEPALLOC | NODELAY, &inodedep);
5178	adp->ad_inodedep = inodedep;
5179
5180	WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list);
5181	/*
5182	 * The list of allocdirects must be kept in sorted and ascending
5183	 * order so that the rollback routines can quickly determine the
5184	 * first uncommitted block (the size of the file stored on disk
5185	 * ends at the end of the lowest committed fragment, or if there
5186	 * are no fragments, at the end of the highest committed block).
5187	 * Since files generally grow, the typical case is that the new
5188	 * block is to be added at the end of the list. We speed this
5189	 * special case by checking against the last allocdirect in the
5190	 * list before laboriously traversing the list looking for the
5191	 * insertion point.
5192	 */
5193	adphead = &inodedep->id_newinoupdt;
5194	oldadp = TAILQ_LAST(adphead, allocdirectlst);
5195	if (oldadp == NULL || oldadp->ad_offset <= off) {
5196		/* insert at end of list */
5197		TAILQ_INSERT_TAIL(adphead, adp, ad_next);
5198		if (oldadp != NULL && oldadp->ad_offset == off)
5199			allocdirect_merge(adphead, adp, oldadp);
5200		FREE_LOCK(&lk);
5201		return;
5202	}
5203	TAILQ_FOREACH(oldadp, adphead, ad_next) {
5204		if (oldadp->ad_offset >= off)
5205			break;
5206	}
5207	if (oldadp == NULL)
5208		panic("softdep_setup_allocdirect: lost entry");
5209	/* insert in middle of list */
5210	TAILQ_INSERT_BEFORE(oldadp, adp, ad_next);
5211	if (oldadp->ad_offset == off)
5212		allocdirect_merge(adphead, adp, oldadp);
5213
5214	FREE_LOCK(&lk);
5215}
5216
5217/*
5218 * Merge a newer and older journal record to be stored either in a
5219 * newblock or freefrag.  This handles aggregating journal records for
5220 * fragment allocation into a second record as well as replacing a
5221 * journal free with an aborted journal allocation.  A segment for the
5222 * oldest record will be placed on wkhd if it has been written.  If not
5223 * the segment for the newer record will suffice.
5224 */
5225static struct worklist *
5226jnewblk_merge(new, old, wkhd)
5227	struct worklist *new;
5228	struct worklist *old;
5229	struct workhead *wkhd;
5230{
5231	struct jnewblk *njnewblk;
5232	struct jnewblk *jnewblk;
5233
5234	/* Handle NULLs to simplify callers. */
5235	if (new == NULL)
5236		return (old);
5237	if (old == NULL)
5238		return (new);
5239	/* Replace a jfreefrag with a jnewblk. */
5240	if (new->wk_type == D_JFREEFRAG) {
5241		if (WK_JNEWBLK(old)->jn_blkno != WK_JFREEFRAG(new)->fr_blkno)
5242			panic("jnewblk_merge: blkno mismatch: %p, %p",
5243			    old, new);
5244		cancel_jfreefrag(WK_JFREEFRAG(new));
5245		return (old);
5246	}
5247	if (old->wk_type != D_JNEWBLK || new->wk_type != D_JNEWBLK)
5248		panic("jnewblk_merge: Bad type: old %d new %d\n",
5249		    old->wk_type, new->wk_type);
5250	/*
5251	 * Handle merging of two jnewblk records that describe
5252	 * different sets of fragments in the same block.
5253	 */
5254	jnewblk = WK_JNEWBLK(old);
5255	njnewblk = WK_JNEWBLK(new);
5256	if (jnewblk->jn_blkno != njnewblk->jn_blkno)
5257		panic("jnewblk_merge: Merging disparate blocks.");
5258	/*
5259	 * The record may be rolled back in the cg.
5260	 */
5261	if (jnewblk->jn_state & UNDONE) {
5262		jnewblk->jn_state &= ~UNDONE;
5263		njnewblk->jn_state |= UNDONE;
5264		njnewblk->jn_state &= ~ATTACHED;
5265	}
5266	/*
5267	 * We modify the newer addref and free the older so that if neither
5268	 * has been written the most up-to-date copy will be on disk.  If
5269	 * both have been written but rolled back we only temporarily need
5270	 * one of them to fix the bits when the cg write completes.
5271	 */
5272	jnewblk->jn_state |= ATTACHED | COMPLETE;
5273	njnewblk->jn_oldfrags = jnewblk->jn_oldfrags;
5274	cancel_jnewblk(jnewblk, wkhd);
5275	WORKLIST_REMOVE(&jnewblk->jn_list);
5276	free_jnewblk(jnewblk);
5277	return (new);
5278}
5279
5280/*
5281 * Replace an old allocdirect dependency with a newer one.
5282 * This routine must be called with splbio interrupts blocked.
5283 */
5284static void
5285allocdirect_merge(adphead, newadp, oldadp)
5286	struct allocdirectlst *adphead;	/* head of list holding allocdirects */
5287	struct allocdirect *newadp;	/* allocdirect being added */
5288	struct allocdirect *oldadp;	/* existing allocdirect being checked */
5289{
5290	struct worklist *wk;
5291	struct freefrag *freefrag;
5292
5293	freefrag = NULL;
5294	rw_assert(&lk, RA_WLOCKED);
5295	if (newadp->ad_oldblkno != oldadp->ad_newblkno ||
5296	    newadp->ad_oldsize != oldadp->ad_newsize ||
5297	    newadp->ad_offset >= NDADDR)
5298		panic("%s %jd != new %jd || old size %ld != new %ld",
5299		    "allocdirect_merge: old blkno",
5300		    (intmax_t)newadp->ad_oldblkno,
5301		    (intmax_t)oldadp->ad_newblkno,
5302		    newadp->ad_oldsize, oldadp->ad_newsize);
5303	newadp->ad_oldblkno = oldadp->ad_oldblkno;
5304	newadp->ad_oldsize = oldadp->ad_oldsize;
5305	/*
5306	 * If the old dependency had a fragment to free or had never
5307	 * previously had a block allocated, then the new dependency
5308	 * can immediately post its freefrag and adopt the old freefrag.
5309	 * This action is done by swapping the freefrag dependencies.
5310	 * The new dependency gains the old one's freefrag, and the
5311	 * old one gets the new one and then immediately puts it on
5312	 * the worklist when it is freed by free_newblk. It is
5313	 * not possible to do this swap when the old dependency had a
5314	 * non-zero size but no previous fragment to free. This condition
5315	 * arises when the new block is an extension of the old block.
5316	 * Here, the first part of the fragment allocated to the new
5317	 * dependency is part of the block currently claimed on disk by
5318	 * the old dependency, so cannot legitimately be freed until the
5319	 * conditions for the new dependency are fulfilled.
5320	 */
5321	freefrag = newadp->ad_freefrag;
5322	if (oldadp->ad_freefrag != NULL || oldadp->ad_oldblkno == 0) {
5323		newadp->ad_freefrag = oldadp->ad_freefrag;
5324		oldadp->ad_freefrag = freefrag;
5325	}
5326	/*
5327	 * If we are tracking a new directory-block allocation,
5328	 * move it from the old allocdirect to the new allocdirect.
5329	 */
5330	if ((wk = LIST_FIRST(&oldadp->ad_newdirblk)) != NULL) {
5331		WORKLIST_REMOVE(wk);
5332		if (!LIST_EMPTY(&oldadp->ad_newdirblk))
5333			panic("allocdirect_merge: extra newdirblk");
5334		WORKLIST_INSERT(&newadp->ad_newdirblk, wk);
5335	}
5336	TAILQ_REMOVE(adphead, oldadp, ad_next);
5337	/*
5338	 * We need to move any journal dependencies over to the freefrag
5339	 * that releases this block if it exists.  Otherwise we are
5340	 * extending an existing block and we'll wait until that is
5341	 * complete to release the journal space and extend the
5342	 * new journal to cover this old space as well.
5343	 */
5344	if (freefrag == NULL) {
5345		if (oldadp->ad_newblkno != newadp->ad_newblkno)
5346			panic("allocdirect_merge: %jd != %jd",
5347			    oldadp->ad_newblkno, newadp->ad_newblkno);
5348		newadp->ad_block.nb_jnewblk = (struct jnewblk *)
5349		    jnewblk_merge(&newadp->ad_block.nb_jnewblk->jn_list,
5350		    &oldadp->ad_block.nb_jnewblk->jn_list,
5351		    &newadp->ad_block.nb_jwork);
5352		oldadp->ad_block.nb_jnewblk = NULL;
5353		cancel_newblk(&oldadp->ad_block, NULL,
5354		    &newadp->ad_block.nb_jwork);
5355	} else {
5356		wk = (struct worklist *) cancel_newblk(&oldadp->ad_block,
5357		    &freefrag->ff_list, &freefrag->ff_jwork);
5358		freefrag->ff_jdep = jnewblk_merge(freefrag->ff_jdep, wk,
5359		    &freefrag->ff_jwork);
5360	}
5361	free_newblk(&oldadp->ad_block);
5362}
5363
5364/*
5365 * Allocate a jfreefrag structure to journal a single block free.
5366 */
5367static struct jfreefrag *
5368newjfreefrag(freefrag, ip, blkno, size, lbn)
5369	struct freefrag *freefrag;
5370	struct inode *ip;
5371	ufs2_daddr_t blkno;
5372	long size;
5373	ufs_lbn_t lbn;
5374{
5375	struct jfreefrag *jfreefrag;
5376	struct fs *fs;
5377
5378	fs = ip->i_fs;
5379	jfreefrag = malloc(sizeof(struct jfreefrag), M_JFREEFRAG,
5380	    M_SOFTDEP_FLAGS);
5381	workitem_alloc(&jfreefrag->fr_list, D_JFREEFRAG, UFSTOVFS(ip->i_ump));
5382	jfreefrag->fr_jsegdep = newjsegdep(&jfreefrag->fr_list);
5383	jfreefrag->fr_state = ATTACHED | DEPCOMPLETE;
5384	jfreefrag->fr_ino = ip->i_number;
5385	jfreefrag->fr_lbn = lbn;
5386	jfreefrag->fr_blkno = blkno;
5387	jfreefrag->fr_frags = numfrags(fs, size);
5388	jfreefrag->fr_freefrag = freefrag;
5389
5390	return (jfreefrag);
5391}
5392
5393/*
5394 * Allocate a new freefrag structure.
5395 */
5396static struct freefrag *
5397newfreefrag(ip, blkno, size, lbn)
5398	struct inode *ip;
5399	ufs2_daddr_t blkno;
5400	long size;
5401	ufs_lbn_t lbn;
5402{
5403	struct freefrag *freefrag;
5404	struct fs *fs;
5405
5406	CTR4(KTR_SUJ, "newfreefrag: ino %d blkno %jd size %ld lbn %jd",
5407	    ip->i_number, blkno, size, lbn);
5408	fs = ip->i_fs;
5409	if (fragnum(fs, blkno) + numfrags(fs, size) > fs->fs_frag)
5410		panic("newfreefrag: frag size");
5411	freefrag = malloc(sizeof(struct freefrag),
5412	    M_FREEFRAG, M_SOFTDEP_FLAGS);
5413	workitem_alloc(&freefrag->ff_list, D_FREEFRAG, UFSTOVFS(ip->i_ump));
5414	freefrag->ff_state = ATTACHED;
5415	LIST_INIT(&freefrag->ff_jwork);
5416	freefrag->ff_inum = ip->i_number;
5417	freefrag->ff_vtype = ITOV(ip)->v_type;
5418	freefrag->ff_blkno = blkno;
5419	freefrag->ff_fragsize = size;
5420
5421	if (MOUNTEDSUJ(UFSTOVFS(ip->i_ump))) {
5422		freefrag->ff_jdep = (struct worklist *)
5423		    newjfreefrag(freefrag, ip, blkno, size, lbn);
5424	} else {
5425		freefrag->ff_state |= DEPCOMPLETE;
5426		freefrag->ff_jdep = NULL;
5427	}
5428
5429	return (freefrag);
5430}
5431
5432/*
5433 * This workitem de-allocates fragments that were replaced during
5434 * file block allocation.
5435 */
5436static void
5437handle_workitem_freefrag(freefrag)
5438	struct freefrag *freefrag;
5439{
5440	struct ufsmount *ump = VFSTOUFS(freefrag->ff_list.wk_mp);
5441	struct workhead wkhd;
5442
5443	CTR3(KTR_SUJ,
5444	    "handle_workitem_freefrag: ino %d blkno %jd size %ld",
5445	    freefrag->ff_inum, freefrag->ff_blkno, freefrag->ff_fragsize);
5446	/*
5447	 * It would be illegal to add new completion items to the
5448	 * freefrag after it was schedule to be done so it must be
5449	 * safe to modify the list head here.
5450	 */
5451	LIST_INIT(&wkhd);
5452	ACQUIRE_LOCK(&lk);
5453	LIST_SWAP(&freefrag->ff_jwork, &wkhd, worklist, wk_list);
5454	/*
5455	 * If the journal has not been written we must cancel it here.
5456	 */
5457	if (freefrag->ff_jdep) {
5458		if (freefrag->ff_jdep->wk_type != D_JNEWBLK)
5459			panic("handle_workitem_freefrag: Unexpected type %d\n",
5460			    freefrag->ff_jdep->wk_type);
5461		cancel_jnewblk(WK_JNEWBLK(freefrag->ff_jdep), &wkhd);
5462	}
5463	FREE_LOCK(&lk);
5464	ffs_blkfree(ump, ump->um_fs, ump->um_devvp, freefrag->ff_blkno,
5465	   freefrag->ff_fragsize, freefrag->ff_inum, freefrag->ff_vtype, &wkhd);
5466	ACQUIRE_LOCK(&lk);
5467	WORKITEM_FREE(freefrag, D_FREEFRAG);
5468	FREE_LOCK(&lk);
5469}
5470
5471/*
5472 * Set up a dependency structure for an external attributes data block.
5473 * This routine follows much of the structure of softdep_setup_allocdirect.
5474 * See the description of softdep_setup_allocdirect above for details.
5475 */
5476void
5477softdep_setup_allocext(ip, off, newblkno, oldblkno, newsize, oldsize, bp)
5478	struct inode *ip;
5479	ufs_lbn_t off;
5480	ufs2_daddr_t newblkno;
5481	ufs2_daddr_t oldblkno;
5482	long newsize;
5483	long oldsize;
5484	struct buf *bp;
5485{
5486	struct allocdirect *adp, *oldadp;
5487	struct allocdirectlst *adphead;
5488	struct freefrag *freefrag;
5489	struct inodedep *inodedep;
5490	struct jnewblk *jnewblk;
5491	struct newblk *newblk;
5492	struct mount *mp;
5493	ufs_lbn_t lbn;
5494
5495	if (off >= NXADDR)
5496		panic("softdep_setup_allocext: lbn %lld > NXADDR",
5497		    (long long)off);
5498
5499	lbn = bp->b_lblkno;
5500	mp = UFSTOVFS(ip->i_ump);
5501	if (oldblkno && oldblkno != newblkno)
5502		freefrag = newfreefrag(ip, oldblkno, oldsize, lbn);
5503	else
5504		freefrag = NULL;
5505
5506	ACQUIRE_LOCK(&lk);
5507	if (newblk_lookup(mp, newblkno, 0, &newblk) == 0)
5508		panic("softdep_setup_allocext: lost block");
5509	KASSERT(newblk->nb_list.wk_type == D_NEWBLK,
5510	    ("softdep_setup_allocext: newblk already initialized"));
5511	/*
5512	 * Convert the newblk to an allocdirect.
5513	 */
5514	WORKITEM_REASSIGN(newblk, D_ALLOCDIRECT);
5515	adp = (struct allocdirect *)newblk;
5516	newblk->nb_freefrag = freefrag;
5517	adp->ad_offset = off;
5518	adp->ad_oldblkno = oldblkno;
5519	adp->ad_newsize = newsize;
5520	adp->ad_oldsize = oldsize;
5521	adp->ad_state |=  EXTDATA;
5522
5523	/*
5524	 * Finish initializing the journal.
5525	 */
5526	if ((jnewblk = newblk->nb_jnewblk) != NULL) {
5527		jnewblk->jn_ino = ip->i_number;
5528		jnewblk->jn_lbn = lbn;
5529		add_to_journal(&jnewblk->jn_list);
5530	}
5531	if (freefrag && freefrag->ff_jdep != NULL &&
5532	    freefrag->ff_jdep->wk_type == D_JFREEFRAG)
5533		add_to_journal(freefrag->ff_jdep);
5534	inodedep_lookup(mp, ip->i_number, DEPALLOC | NODELAY, &inodedep);
5535	adp->ad_inodedep = inodedep;
5536
5537	WORKLIST_INSERT(&bp->b_dep, &newblk->nb_list);
5538	/*
5539	 * The list of allocdirects must be kept in sorted and ascending
5540	 * order so that the rollback routines can quickly determine the
5541	 * first uncommitted block (the size of the file stored on disk
5542	 * ends at the end of the lowest committed fragment, or if there
5543	 * are no fragments, at the end of the highest committed block).
5544	 * Since files generally grow, the typical case is that the new
5545	 * block is to be added at the end of the list. We speed this
5546	 * special case by checking against the last allocdirect in the
5547	 * list before laboriously traversing the list looking for the
5548	 * insertion point.
5549	 */
5550	adphead = &inodedep->id_newextupdt;
5551	oldadp = TAILQ_LAST(adphead, allocdirectlst);
5552	if (oldadp == NULL || oldadp->ad_offset <= off) {
5553		/* insert at end of list */
5554		TAILQ_INSERT_TAIL(adphead, adp, ad_next);
5555		if (oldadp != NULL && oldadp->ad_offset == off)
5556			allocdirect_merge(adphead, adp, oldadp);
5557		FREE_LOCK(&lk);
5558		return;
5559	}
5560	TAILQ_FOREACH(oldadp, adphead, ad_next) {
5561		if (oldadp->ad_offset >= off)
5562			break;
5563	}
5564	if (oldadp == NULL)
5565		panic("softdep_setup_allocext: lost entry");
5566	/* insert in middle of list */
5567	TAILQ_INSERT_BEFORE(oldadp, adp, ad_next);
5568	if (oldadp->ad_offset == off)
5569		allocdirect_merge(adphead, adp, oldadp);
5570	FREE_LOCK(&lk);
5571}
5572
5573/*
5574 * Indirect block allocation dependencies.
5575 *
5576 * The same dependencies that exist for a direct block also exist when
5577 * a new block is allocated and pointed to by an entry in a block of
5578 * indirect pointers. The undo/redo states described above are also
5579 * used here. Because an indirect block contains many pointers that
5580 * may have dependencies, a second copy of the entire in-memory indirect
5581 * block is kept. The buffer cache copy is always completely up-to-date.
5582 * The second copy, which is used only as a source for disk writes,
5583 * contains only the safe pointers (i.e., those that have no remaining
5584 * update dependencies). The second copy is freed when all pointers
5585 * are safe. The cache is not allowed to replace indirect blocks with
5586 * pending update dependencies. If a buffer containing an indirect
5587 * block with dependencies is written, these routines will mark it
5588 * dirty again. It can only be successfully written once all the
5589 * dependencies are removed. The ffs_fsync routine in conjunction with
5590 * softdep_sync_metadata work together to get all the dependencies
5591 * removed so that a file can be successfully written to disk. Three
5592 * procedures are used when setting up indirect block pointer
5593 * dependencies. The division is necessary because of the organization
5594 * of the "balloc" routine and because of the distinction between file
5595 * pages and file metadata blocks.
5596 */
5597
5598/*
5599 * Allocate a new allocindir structure.
5600 */
5601static struct allocindir *
5602newallocindir(ip, ptrno, newblkno, oldblkno, lbn)
5603	struct inode *ip;	/* inode for file being extended */
5604	int ptrno;		/* offset of pointer in indirect block */
5605	ufs2_daddr_t newblkno;	/* disk block number being added */
5606	ufs2_daddr_t oldblkno;	/* previous block number, 0 if none */
5607	ufs_lbn_t lbn;
5608{
5609	struct newblk *newblk;
5610	struct allocindir *aip;
5611	struct freefrag *freefrag;
5612	struct jnewblk *jnewblk;
5613
5614	if (oldblkno)
5615		freefrag = newfreefrag(ip, oldblkno, ip->i_fs->fs_bsize, lbn);
5616	else
5617		freefrag = NULL;
5618	ACQUIRE_LOCK(&lk);
5619	if (newblk_lookup(UFSTOVFS(ip->i_ump), newblkno, 0, &newblk) == 0)
5620		panic("new_allocindir: lost block");
5621	KASSERT(newblk->nb_list.wk_type == D_NEWBLK,
5622	    ("newallocindir: newblk already initialized"));
5623	WORKITEM_REASSIGN(newblk, D_ALLOCINDIR);
5624	newblk->nb_freefrag = freefrag;
5625	aip = (struct allocindir *)newblk;
5626	aip->ai_offset = ptrno;
5627	aip->ai_oldblkno = oldblkno;
5628	aip->ai_lbn = lbn;
5629	if ((jnewblk = newblk->nb_jnewblk) != NULL) {
5630		jnewblk->jn_ino = ip->i_number;
5631		jnewblk->jn_lbn = lbn;
5632		add_to_journal(&jnewblk->jn_list);
5633	}
5634	if (freefrag && freefrag->ff_jdep != NULL &&
5635	    freefrag->ff_jdep->wk_type == D_JFREEFRAG)
5636		add_to_journal(freefrag->ff_jdep);
5637	return (aip);
5638}
5639
5640/*
5641 * Called just before setting an indirect block pointer
5642 * to a newly allocated file page.
5643 */
5644void
5645softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp)
5646	struct inode *ip;	/* inode for file being extended */
5647	ufs_lbn_t lbn;		/* allocated block number within file */
5648	struct buf *bp;		/* buffer with indirect blk referencing page */
5649	int ptrno;		/* offset of pointer in indirect block */
5650	ufs2_daddr_t newblkno;	/* disk block number being added */
5651	ufs2_daddr_t oldblkno;	/* previous block number, 0 if none */
5652	struct buf *nbp;	/* buffer holding allocated page */
5653{
5654	struct inodedep *inodedep;
5655	struct freefrag *freefrag;
5656	struct allocindir *aip;
5657	struct pagedep *pagedep;
5658	struct mount *mp;
5659	int dflags;
5660
5661	if (lbn != nbp->b_lblkno)
5662		panic("softdep_setup_allocindir_page: lbn %jd != lblkno %jd",
5663		    lbn, bp->b_lblkno);
5664	CTR4(KTR_SUJ,
5665	    "softdep_setup_allocindir_page: ino %d blkno %jd oldblkno %jd "
5666	    "lbn %jd", ip->i_number, newblkno, oldblkno, lbn);
5667	ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_page");
5668	mp = UFSTOVFS(ip->i_ump);
5669	aip = newallocindir(ip, ptrno, newblkno, oldblkno, lbn);
5670	dflags = DEPALLOC;
5671	if (IS_SNAPSHOT(ip))
5672		dflags |= NODELAY;
5673	(void) inodedep_lookup(mp, ip->i_number, dflags, &inodedep);
5674	/*
5675	 * If we are allocating a directory page, then we must
5676	 * allocate an associated pagedep to track additions and
5677	 * deletions.
5678	 */
5679	if ((ip->i_mode & IFMT) == IFDIR)
5680		pagedep_lookup(mp, nbp, ip->i_number, lbn, DEPALLOC, &pagedep);
5681	WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list);
5682	freefrag = setup_allocindir_phase2(bp, ip, inodedep, aip, lbn);
5683	FREE_LOCK(&lk);
5684	if (freefrag)
5685		handle_workitem_freefrag(freefrag);
5686}
5687
5688/*
5689 * Called just before setting an indirect block pointer to a
5690 * newly allocated indirect block.
5691 */
5692void
5693softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno)
5694	struct buf *nbp;	/* newly allocated indirect block */
5695	struct inode *ip;	/* inode for file being extended */
5696	struct buf *bp;		/* indirect block referencing allocated block */
5697	int ptrno;		/* offset of pointer in indirect block */
5698	ufs2_daddr_t newblkno;	/* disk block number being added */
5699{
5700	struct inodedep *inodedep;
5701	struct allocindir *aip;
5702	ufs_lbn_t lbn;
5703	int dflags;
5704
5705	CTR3(KTR_SUJ,
5706	    "softdep_setup_allocindir_meta: ino %d blkno %jd ptrno %d",
5707	    ip->i_number, newblkno, ptrno);
5708	lbn = nbp->b_lblkno;
5709	ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_meta");
5710	aip = newallocindir(ip, ptrno, newblkno, 0, lbn);
5711	dflags = DEPALLOC;
5712	if (IS_SNAPSHOT(ip))
5713		dflags |= NODELAY;
5714	inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, dflags, &inodedep);
5715	WORKLIST_INSERT(&nbp->b_dep, &aip->ai_block.nb_list);
5716	if (setup_allocindir_phase2(bp, ip, inodedep, aip, lbn))
5717		panic("softdep_setup_allocindir_meta: Block already existed");
5718	FREE_LOCK(&lk);
5719}
5720
5721static void
5722indirdep_complete(indirdep)
5723	struct indirdep *indirdep;
5724{
5725	struct allocindir *aip;
5726
5727	LIST_REMOVE(indirdep, ir_next);
5728	indirdep->ir_state |= DEPCOMPLETE;
5729
5730	while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != NULL) {
5731		LIST_REMOVE(aip, ai_next);
5732		free_newblk(&aip->ai_block);
5733	}
5734	/*
5735	 * If this indirdep is not attached to a buf it was simply waiting
5736	 * on completion to clear completehd.  free_indirdep() asserts
5737	 * that nothing is dangling.
5738	 */
5739	if ((indirdep->ir_state & ONWORKLIST) == 0)
5740		free_indirdep(indirdep);
5741}
5742
5743static struct indirdep *
5744indirdep_lookup(mp, ip, bp)
5745	struct mount *mp;
5746	struct inode *ip;
5747	struct buf *bp;
5748{
5749	struct indirdep *indirdep, *newindirdep;
5750	struct newblk *newblk;
5751	struct worklist *wk;
5752	struct fs *fs;
5753	ufs2_daddr_t blkno;
5754
5755	rw_assert(&lk, RA_WLOCKED);
5756	indirdep = NULL;
5757	newindirdep = NULL;
5758	fs = ip->i_fs;
5759	for (;;) {
5760		LIST_FOREACH(wk, &bp->b_dep, wk_list) {
5761			if (wk->wk_type != D_INDIRDEP)
5762				continue;
5763			indirdep = WK_INDIRDEP(wk);
5764			break;
5765		}
5766		/* Found on the buffer worklist, no new structure to free. */
5767		if (indirdep != NULL && newindirdep == NULL)
5768			return (indirdep);
5769		if (indirdep != NULL && newindirdep != NULL)
5770			panic("indirdep_lookup: simultaneous create");
5771		/* None found on the buffer and a new structure is ready. */
5772		if (indirdep == NULL && newindirdep != NULL)
5773			break;
5774		/* None found and no new structure available. */
5775		FREE_LOCK(&lk);
5776		newindirdep = malloc(sizeof(struct indirdep),
5777		    M_INDIRDEP, M_SOFTDEP_FLAGS);
5778		workitem_alloc(&newindirdep->ir_list, D_INDIRDEP, mp);
5779		newindirdep->ir_state = ATTACHED;
5780		if (ip->i_ump->um_fstype == UFS1)
5781			newindirdep->ir_state |= UFS1FMT;
5782		TAILQ_INIT(&newindirdep->ir_trunc);
5783		newindirdep->ir_saveddata = NULL;
5784		LIST_INIT(&newindirdep->ir_deplisthd);
5785		LIST_INIT(&newindirdep->ir_donehd);
5786		LIST_INIT(&newindirdep->ir_writehd);
5787		LIST_INIT(&newindirdep->ir_completehd);
5788		if (bp->b_blkno == bp->b_lblkno) {
5789			ufs_bmaparray(bp->b_vp, bp->b_lblkno, &blkno, bp,
5790			    NULL, NULL);
5791			bp->b_blkno = blkno;
5792		}
5793		newindirdep->ir_freeblks = NULL;
5794		newindirdep->ir_savebp =
5795		    getblk(ip->i_devvp, bp->b_blkno, bp->b_bcount, 0, 0, 0);
5796		newindirdep->ir_bp = bp;
5797		BUF_KERNPROC(newindirdep->ir_savebp);
5798		bcopy(bp->b_data, newindirdep->ir_savebp->b_data, bp->b_bcount);
5799		ACQUIRE_LOCK(&lk);
5800	}
5801	indirdep = newindirdep;
5802	WORKLIST_INSERT(&bp->b_dep, &indirdep->ir_list);
5803	/*
5804	 * If the block is not yet allocated we don't set DEPCOMPLETE so
5805	 * that we don't free dependencies until the pointers are valid.
5806	 * This could search b_dep for D_ALLOCDIRECT/D_ALLOCINDIR rather
5807	 * than using the hash.
5808	 */
5809	if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk))
5810		LIST_INSERT_HEAD(&newblk->nb_indirdeps, indirdep, ir_next);
5811	else
5812		indirdep->ir_state |= DEPCOMPLETE;
5813	return (indirdep);
5814}
5815
5816/*
5817 * Called to finish the allocation of the "aip" allocated
5818 * by one of the two routines above.
5819 */
5820static struct freefrag *
5821setup_allocindir_phase2(bp, ip, inodedep, aip, lbn)
5822	struct buf *bp;		/* in-memory copy of the indirect block */
5823	struct inode *ip;	/* inode for file being extended */
5824	struct inodedep *inodedep; /* Inodedep for ip */
5825	struct allocindir *aip;	/* allocindir allocated by the above routines */
5826	ufs_lbn_t lbn;		/* Logical block number for this block. */
5827{
5828	struct fs *fs;
5829	struct indirdep *indirdep;
5830	struct allocindir *oldaip;
5831	struct freefrag *freefrag;
5832	struct mount *mp;
5833
5834	rw_assert(&lk, RA_WLOCKED);
5835	mp = UFSTOVFS(ip->i_ump);
5836	fs = ip->i_fs;
5837	if (bp->b_lblkno >= 0)
5838		panic("setup_allocindir_phase2: not indir blk");
5839	KASSERT(aip->ai_offset >= 0 && aip->ai_offset < NINDIR(fs),
5840	    ("setup_allocindir_phase2: Bad offset %d", aip->ai_offset));
5841	indirdep = indirdep_lookup(mp, ip, bp);
5842	KASSERT(indirdep->ir_savebp != NULL,
5843	    ("setup_allocindir_phase2 NULL ir_savebp"));
5844	aip->ai_indirdep = indirdep;
5845	/*
5846	 * Check for an unwritten dependency for this indirect offset.  If
5847	 * there is, merge the old dependency into the new one.  This happens
5848	 * as a result of reallocblk only.
5849	 */
5850	freefrag = NULL;
5851	if (aip->ai_oldblkno != 0) {
5852		LIST_FOREACH(oldaip, &indirdep->ir_deplisthd, ai_next) {
5853			if (oldaip->ai_offset == aip->ai_offset) {
5854				freefrag = allocindir_merge(aip, oldaip);
5855				goto done;
5856			}
5857		}
5858		LIST_FOREACH(oldaip, &indirdep->ir_donehd, ai_next) {
5859			if (oldaip->ai_offset == aip->ai_offset) {
5860				freefrag = allocindir_merge(aip, oldaip);
5861				goto done;
5862			}
5863		}
5864	}
5865done:
5866	LIST_INSERT_HEAD(&indirdep->ir_deplisthd, aip, ai_next);
5867	return (freefrag);
5868}
5869
5870/*
5871 * Merge two allocindirs which refer to the same block.  Move newblock
5872 * dependencies and setup the freefrags appropriately.
5873 */
5874static struct freefrag *
5875allocindir_merge(aip, oldaip)
5876	struct allocindir *aip;
5877	struct allocindir *oldaip;
5878{
5879	struct freefrag *freefrag;
5880	struct worklist *wk;
5881
5882	if (oldaip->ai_newblkno != aip->ai_oldblkno)
5883		panic("allocindir_merge: blkno");
5884	aip->ai_oldblkno = oldaip->ai_oldblkno;
5885	freefrag = aip->ai_freefrag;
5886	aip->ai_freefrag = oldaip->ai_freefrag;
5887	oldaip->ai_freefrag = NULL;
5888	KASSERT(freefrag != NULL, ("setup_allocindir_phase2: No freefrag"));
5889	/*
5890	 * If we are tracking a new directory-block allocation,
5891	 * move it from the old allocindir to the new allocindir.
5892	 */
5893	if ((wk = LIST_FIRST(&oldaip->ai_newdirblk)) != NULL) {
5894		WORKLIST_REMOVE(wk);
5895		if (!LIST_EMPTY(&oldaip->ai_newdirblk))
5896			panic("allocindir_merge: extra newdirblk");
5897		WORKLIST_INSERT(&aip->ai_newdirblk, wk);
5898	}
5899	/*
5900	 * We can skip journaling for this freefrag and just complete
5901	 * any pending journal work for the allocindir that is being
5902	 * removed after the freefrag completes.
5903	 */
5904	if (freefrag->ff_jdep)
5905		cancel_jfreefrag(WK_JFREEFRAG(freefrag->ff_jdep));
5906	LIST_REMOVE(oldaip, ai_next);
5907	freefrag->ff_jdep = (struct worklist *)cancel_newblk(&oldaip->ai_block,
5908	    &freefrag->ff_list, &freefrag->ff_jwork);
5909	free_newblk(&oldaip->ai_block);
5910
5911	return (freefrag);
5912}
5913
5914static inline void
5915setup_freedirect(freeblks, ip, i, needj)
5916	struct freeblks *freeblks;
5917	struct inode *ip;
5918	int i;
5919	int needj;
5920{
5921	ufs2_daddr_t blkno;
5922	int frags;
5923
5924	blkno = DIP(ip, i_db[i]);
5925	if (blkno == 0)
5926		return;
5927	DIP_SET(ip, i_db[i], 0);
5928	frags = sblksize(ip->i_fs, ip->i_size, i);
5929	frags = numfrags(ip->i_fs, frags);
5930	newfreework(ip->i_ump, freeblks, NULL, i, blkno, frags, 0, needj);
5931}
5932
5933static inline void
5934setup_freeext(freeblks, ip, i, needj)
5935	struct freeblks *freeblks;
5936	struct inode *ip;
5937	int i;
5938	int needj;
5939{
5940	ufs2_daddr_t blkno;
5941	int frags;
5942
5943	blkno = ip->i_din2->di_extb[i];
5944	if (blkno == 0)
5945		return;
5946	ip->i_din2->di_extb[i] = 0;
5947	frags = sblksize(ip->i_fs, ip->i_din2->di_extsize, i);
5948	frags = numfrags(ip->i_fs, frags);
5949	newfreework(ip->i_ump, freeblks, NULL, -1 - i, blkno, frags, 0, needj);
5950}
5951
5952static inline void
5953setup_freeindir(freeblks, ip, i, lbn, needj)
5954	struct freeblks *freeblks;
5955	struct inode *ip;
5956	int i;
5957	ufs_lbn_t lbn;
5958	int needj;
5959{
5960	ufs2_daddr_t blkno;
5961
5962	blkno = DIP(ip, i_ib[i]);
5963	if (blkno == 0)
5964		return;
5965	DIP_SET(ip, i_ib[i], 0);
5966	newfreework(ip->i_ump, freeblks, NULL, lbn, blkno, ip->i_fs->fs_frag,
5967	    0, needj);
5968}
5969
5970static inline struct freeblks *
5971newfreeblks(mp, ip)
5972	struct mount *mp;
5973	struct inode *ip;
5974{
5975	struct freeblks *freeblks;
5976
5977	freeblks = malloc(sizeof(struct freeblks),
5978		M_FREEBLKS, M_SOFTDEP_FLAGS|M_ZERO);
5979	workitem_alloc(&freeblks->fb_list, D_FREEBLKS, mp);
5980	LIST_INIT(&freeblks->fb_jblkdephd);
5981	LIST_INIT(&freeblks->fb_jwork);
5982	freeblks->fb_ref = 0;
5983	freeblks->fb_cgwait = 0;
5984	freeblks->fb_state = ATTACHED;
5985	freeblks->fb_uid = ip->i_uid;
5986	freeblks->fb_inum = ip->i_number;
5987	freeblks->fb_vtype = ITOV(ip)->v_type;
5988	freeblks->fb_modrev = DIP(ip, i_modrev);
5989	freeblks->fb_devvp = ip->i_devvp;
5990	freeblks->fb_chkcnt = 0;
5991	freeblks->fb_len = 0;
5992
5993	return (freeblks);
5994}
5995
5996static void
5997trunc_indirdep(indirdep, freeblks, bp, off)
5998	struct indirdep *indirdep;
5999	struct freeblks *freeblks;
6000	struct buf *bp;
6001	int off;
6002{
6003	struct allocindir *aip, *aipn;
6004
6005	/*
6006	 * The first set of allocindirs won't be in savedbp.
6007	 */
6008	LIST_FOREACH_SAFE(aip, &indirdep->ir_deplisthd, ai_next, aipn)
6009		if (aip->ai_offset > off)
6010			cancel_allocindir(aip, bp, freeblks, 1);
6011	LIST_FOREACH_SAFE(aip, &indirdep->ir_donehd, ai_next, aipn)
6012		if (aip->ai_offset > off)
6013			cancel_allocindir(aip, bp, freeblks, 1);
6014	/*
6015	 * These will exist in savedbp.
6016	 */
6017	LIST_FOREACH_SAFE(aip, &indirdep->ir_writehd, ai_next, aipn)
6018		if (aip->ai_offset > off)
6019			cancel_allocindir(aip, NULL, freeblks, 0);
6020	LIST_FOREACH_SAFE(aip, &indirdep->ir_completehd, ai_next, aipn)
6021		if (aip->ai_offset > off)
6022			cancel_allocindir(aip, NULL, freeblks, 0);
6023}
6024
6025/*
6026 * Follow the chain of indirects down to lastlbn creating a freework
6027 * structure for each.  This will be used to start indir_trunc() at
6028 * the right offset and create the journal records for the parrtial
6029 * truncation.  A second step will handle the truncated dependencies.
6030 */
6031static int
6032setup_trunc_indir(freeblks, ip, lbn, lastlbn, blkno)
6033	struct freeblks *freeblks;
6034	struct inode *ip;
6035	ufs_lbn_t lbn;
6036	ufs_lbn_t lastlbn;
6037	ufs2_daddr_t blkno;
6038{
6039	struct indirdep *indirdep;
6040	struct indirdep *indirn;
6041	struct freework *freework;
6042	struct newblk *newblk;
6043	struct mount *mp;
6044	struct buf *bp;
6045	uint8_t *start;
6046	uint8_t *end;
6047	ufs_lbn_t lbnadd;
6048	int level;
6049	int error;
6050	int off;
6051
6052
6053	freework = NULL;
6054	if (blkno == 0)
6055		return (0);
6056	mp = freeblks->fb_list.wk_mp;
6057	bp = getblk(ITOV(ip), lbn, mp->mnt_stat.f_iosize, 0, 0, 0);
6058	if ((bp->b_flags & B_CACHE) == 0) {
6059		bp->b_blkno = blkptrtodb(VFSTOUFS(mp), blkno);
6060		bp->b_iocmd = BIO_READ;
6061		bp->b_flags &= ~B_INVAL;
6062		bp->b_ioflags &= ~BIO_ERROR;
6063		vfs_busy_pages(bp, 0);
6064		bp->b_iooffset = dbtob(bp->b_blkno);
6065		bstrategy(bp);
6066		curthread->td_ru.ru_inblock++;
6067		error = bufwait(bp);
6068		if (error) {
6069			brelse(bp);
6070			return (error);
6071		}
6072	}
6073	level = lbn_level(lbn);
6074	lbnadd = lbn_offset(ip->i_fs, level);
6075	/*
6076	 * Compute the offset of the last block we want to keep.  Store
6077	 * in the freework the first block we want to completely free.
6078	 */
6079	off = (lastlbn - -(lbn + level)) / lbnadd;
6080	if (off + 1 == NINDIR(ip->i_fs))
6081		goto nowork;
6082	freework = newfreework(ip->i_ump, freeblks, NULL, lbn, blkno, 0, off+1,
6083	    0);
6084	/*
6085	 * Link the freework into the indirdep.  This will prevent any new
6086	 * allocations from proceeding until we are finished with the
6087	 * truncate and the block is written.
6088	 */
6089	ACQUIRE_LOCK(&lk);
6090	indirdep = indirdep_lookup(mp, ip, bp);
6091	if (indirdep->ir_freeblks)
6092		panic("setup_trunc_indir: indirdep already truncated.");
6093	TAILQ_INSERT_TAIL(&indirdep->ir_trunc, freework, fw_next);
6094	freework->fw_indir = indirdep;
6095	/*
6096	 * Cancel any allocindirs that will not make it to disk.
6097	 * We have to do this for all copies of the indirdep that
6098	 * live on this newblk.
6099	 */
6100	if ((indirdep->ir_state & DEPCOMPLETE) == 0) {
6101		newblk_lookup(mp, dbtofsb(ip->i_fs, bp->b_blkno), 0, &newblk);
6102		LIST_FOREACH(indirn, &newblk->nb_indirdeps, ir_next)
6103			trunc_indirdep(indirn, freeblks, bp, off);
6104	} else
6105		trunc_indirdep(indirdep, freeblks, bp, off);
6106	FREE_LOCK(&lk);
6107	/*
6108	 * Creation is protected by the buf lock. The saveddata is only
6109	 * needed if a full truncation follows a partial truncation but it
6110	 * is difficult to allocate in that case so we fetch it anyway.
6111	 */
6112	if (indirdep->ir_saveddata == NULL)
6113		indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP,
6114		    M_SOFTDEP_FLAGS);
6115nowork:
6116	/* Fetch the blkno of the child and the zero start offset. */
6117	if (ip->i_ump->um_fstype == UFS1) {
6118		blkno = ((ufs1_daddr_t *)bp->b_data)[off];
6119		start = (uint8_t *)&((ufs1_daddr_t *)bp->b_data)[off+1];
6120	} else {
6121		blkno = ((ufs2_daddr_t *)bp->b_data)[off];
6122		start = (uint8_t *)&((ufs2_daddr_t *)bp->b_data)[off+1];
6123	}
6124	if (freework) {
6125		/* Zero the truncated pointers. */
6126		end = bp->b_data + bp->b_bcount;
6127		bzero(start, end - start);
6128		bdwrite(bp);
6129	} else
6130		bqrelse(bp);
6131	if (level == 0)
6132		return (0);
6133	lbn++; /* adjust level */
6134	lbn -= (off * lbnadd);
6135	return setup_trunc_indir(freeblks, ip, lbn, lastlbn, blkno);
6136}
6137
6138/*
6139 * Complete the partial truncation of an indirect block setup by
6140 * setup_trunc_indir().  This zeros the truncated pointers in the saved
6141 * copy and writes them to disk before the freeblks is allowed to complete.
6142 */
6143static void
6144complete_trunc_indir(freework)
6145	struct freework *freework;
6146{
6147	struct freework *fwn;
6148	struct indirdep *indirdep;
6149	struct buf *bp;
6150	uintptr_t start;
6151	int count;
6152
6153	indirdep = freework->fw_indir;
6154	for (;;) {
6155		bp = indirdep->ir_bp;
6156		/* See if the block was discarded. */
6157		if (bp == NULL)
6158			break;
6159		/* Inline part of getdirtybuf().  We dont want bremfree. */
6160		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) == 0)
6161			break;
6162		if (BUF_LOCK(bp,
6163		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, &lk) == 0)
6164			BUF_UNLOCK(bp);
6165		ACQUIRE_LOCK(&lk);
6166	}
6167	rw_assert(&lk, RA_WLOCKED);
6168	freework->fw_state |= DEPCOMPLETE;
6169	TAILQ_REMOVE(&indirdep->ir_trunc, freework, fw_next);
6170	/*
6171	 * Zero the pointers in the saved copy.
6172	 */
6173	if (indirdep->ir_state & UFS1FMT)
6174		start = sizeof(ufs1_daddr_t);
6175	else
6176		start = sizeof(ufs2_daddr_t);
6177	start *= freework->fw_start;
6178	count = indirdep->ir_savebp->b_bcount - start;
6179	start += (uintptr_t)indirdep->ir_savebp->b_data;
6180	bzero((char *)start, count);
6181	/*
6182	 * We need to start the next truncation in the list if it has not
6183	 * been started yet.
6184	 */
6185	fwn = TAILQ_FIRST(&indirdep->ir_trunc);
6186	if (fwn != NULL) {
6187		if (fwn->fw_freeblks == indirdep->ir_freeblks)
6188			TAILQ_REMOVE(&indirdep->ir_trunc, fwn, fw_next);
6189		if ((fwn->fw_state & ONWORKLIST) == 0)
6190			freework_enqueue(fwn);
6191	}
6192	/*
6193	 * If bp is NULL the block was fully truncated, restore
6194	 * the saved block list otherwise free it if it is no
6195	 * longer needed.
6196	 */
6197	if (TAILQ_EMPTY(&indirdep->ir_trunc)) {
6198		if (bp == NULL)
6199			bcopy(indirdep->ir_saveddata,
6200			    indirdep->ir_savebp->b_data,
6201			    indirdep->ir_savebp->b_bcount);
6202		free(indirdep->ir_saveddata, M_INDIRDEP);
6203		indirdep->ir_saveddata = NULL;
6204	}
6205	/*
6206	 * When bp is NULL there is a full truncation pending.  We
6207	 * must wait for this full truncation to be journaled before
6208	 * we can release this freework because the disk pointers will
6209	 * never be written as zero.
6210	 */
6211	if (bp == NULL)  {
6212		if (LIST_EMPTY(&indirdep->ir_freeblks->fb_jblkdephd))
6213			handle_written_freework(freework);
6214		else
6215			WORKLIST_INSERT(&indirdep->ir_freeblks->fb_freeworkhd,
6216			   &freework->fw_list);
6217	} else {
6218		/* Complete when the real copy is written. */
6219		WORKLIST_INSERT(&bp->b_dep, &freework->fw_list);
6220		BUF_UNLOCK(bp);
6221	}
6222}
6223
6224/*
6225 * Calculate the number of blocks we are going to release where datablocks
6226 * is the current total and length is the new file size.
6227 */
6228ufs2_daddr_t
6229blkcount(fs, datablocks, length)
6230	struct fs *fs;
6231	ufs2_daddr_t datablocks;
6232	off_t length;
6233{
6234	off_t totblks, numblks;
6235
6236	totblks = 0;
6237	numblks = howmany(length, fs->fs_bsize);
6238	if (numblks <= NDADDR) {
6239		totblks = howmany(length, fs->fs_fsize);
6240		goto out;
6241	}
6242        totblks = blkstofrags(fs, numblks);
6243	numblks -= NDADDR;
6244	/*
6245	 * Count all single, then double, then triple indirects required.
6246	 * Subtracting one indirects worth of blocks for each pass
6247	 * acknowledges one of each pointed to by the inode.
6248	 */
6249	for (;;) {
6250		totblks += blkstofrags(fs, howmany(numblks, NINDIR(fs)));
6251		numblks -= NINDIR(fs);
6252		if (numblks <= 0)
6253			break;
6254		numblks = howmany(numblks, NINDIR(fs));
6255	}
6256out:
6257	totblks = fsbtodb(fs, totblks);
6258	/*
6259	 * Handle sparse files.  We can't reclaim more blocks than the inode
6260	 * references.  We will correct it later in handle_complete_freeblks()
6261	 * when we know the real count.
6262	 */
6263	if (totblks > datablocks)
6264		return (0);
6265	return (datablocks - totblks);
6266}
6267
6268/*
6269 * Handle freeblocks for journaled softupdate filesystems.
6270 *
6271 * Contrary to normal softupdates, we must preserve the block pointers in
6272 * indirects until their subordinates are free.  This is to avoid journaling
6273 * every block that is freed which may consume more space than the journal
6274 * itself.  The recovery program will see the free block journals at the
6275 * base of the truncated area and traverse them to reclaim space.  The
6276 * pointers in the inode may be cleared immediately after the journal
6277 * records are written because each direct and indirect pointer in the
6278 * inode is recorded in a journal.  This permits full truncation to proceed
6279 * asynchronously.  The write order is journal -> inode -> cgs -> indirects.
6280 *
6281 * The algorithm is as follows:
6282 * 1) Traverse the in-memory state and create journal entries to release
6283 *    the relevant blocks and full indirect trees.
6284 * 2) Traverse the indirect block chain adding partial truncation freework
6285 *    records to indirects in the path to lastlbn.  The freework will
6286 *    prevent new allocation dependencies from being satisfied in this
6287 *    indirect until the truncation completes.
6288 * 3) Read and lock the inode block, performing an update with the new size
6289 *    and pointers.  This prevents truncated data from becoming valid on
6290 *    disk through step 4.
6291 * 4) Reap unsatisfied dependencies that are beyond the truncated area,
6292 *    eliminate journal work for those records that do not require it.
6293 * 5) Schedule the journal records to be written followed by the inode block.
6294 * 6) Allocate any necessary frags for the end of file.
6295 * 7) Zero any partially truncated blocks.
6296 *
6297 * From this truncation proceeds asynchronously using the freework and
6298 * indir_trunc machinery.  The file will not be extended again into a
6299 * partially truncated indirect block until all work is completed but
6300 * the normal dependency mechanism ensures that it is rolled back/forward
6301 * as appropriate.  Further truncation may occur without delay and is
6302 * serialized in indir_trunc().
6303 */
6304void
6305softdep_journal_freeblocks(ip, cred, length, flags)
6306	struct inode *ip;	/* The inode whose length is to be reduced */
6307	struct ucred *cred;
6308	off_t length;		/* The new length for the file */
6309	int flags;		/* IO_EXT and/or IO_NORMAL */
6310{
6311	struct freeblks *freeblks, *fbn;
6312	struct worklist *wk, *wkn;
6313	struct inodedep *inodedep;
6314	struct jblkdep *jblkdep;
6315	struct allocdirect *adp, *adpn;
6316	struct fs *fs;
6317	struct buf *bp;
6318	struct vnode *vp;
6319	struct mount *mp;
6320	ufs2_daddr_t extblocks, datablocks;
6321	ufs_lbn_t tmpval, lbn, lastlbn;
6322	int frags, lastoff, iboff, allocblock, needj, dflags, error, i;
6323
6324	fs = ip->i_fs;
6325	mp = UFSTOVFS(ip->i_ump);
6326	vp = ITOV(ip);
6327	needj = 1;
6328	iboff = -1;
6329	allocblock = 0;
6330	extblocks = 0;
6331	datablocks = 0;
6332	frags = 0;
6333	freeblks = newfreeblks(mp, ip);
6334	ACQUIRE_LOCK(&lk);
6335	/*
6336	 * If we're truncating a removed file that will never be written
6337	 * we don't need to journal the block frees.  The canceled journals
6338	 * for the allocations will suffice.
6339	 */
6340	dflags = DEPALLOC;
6341	if (IS_SNAPSHOT(ip))
6342		dflags |= NODELAY;
6343	inodedep_lookup(mp, ip->i_number, dflags, &inodedep);
6344	if ((inodedep->id_state & (UNLINKED | DEPCOMPLETE)) == UNLINKED &&
6345	    length == 0)
6346		needj = 0;
6347	CTR3(KTR_SUJ, "softdep_journal_freeblks: ip %d length %ld needj %d",
6348	    ip->i_number, length, needj);
6349	FREE_LOCK(&lk);
6350	/*
6351	 * Calculate the lbn that we are truncating to.  This results in -1
6352	 * if we're truncating the 0 bytes.  So it is the last lbn we want
6353	 * to keep, not the first lbn we want to truncate.
6354	 */
6355	lastlbn = lblkno(fs, length + fs->fs_bsize - 1) - 1;
6356	lastoff = blkoff(fs, length);
6357	/*
6358	 * Compute frags we are keeping in lastlbn.  0 means all.
6359	 */
6360	if (lastlbn >= 0 && lastlbn < NDADDR) {
6361		frags = fragroundup(fs, lastoff);
6362		/* adp offset of last valid allocdirect. */
6363		iboff = lastlbn;
6364	} else if (lastlbn > 0)
6365		iboff = NDADDR;
6366	if (fs->fs_magic == FS_UFS2_MAGIC)
6367		extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize));
6368	/*
6369	 * Handle normal data blocks and indirects.  This section saves
6370	 * values used after the inode update to complete frag and indirect
6371	 * truncation.
6372	 */
6373	if ((flags & IO_NORMAL) != 0) {
6374		/*
6375		 * Handle truncation of whole direct and indirect blocks.
6376		 */
6377		for (i = iboff + 1; i < NDADDR; i++)
6378			setup_freedirect(freeblks, ip, i, needj);
6379		for (i = 0, tmpval = NINDIR(fs), lbn = NDADDR; i < NIADDR;
6380		    i++, lbn += tmpval, tmpval *= NINDIR(fs)) {
6381			/* Release a whole indirect tree. */
6382			if (lbn > lastlbn) {
6383				setup_freeindir(freeblks, ip, i, -lbn -i,
6384				    needj);
6385				continue;
6386			}
6387			iboff = i + NDADDR;
6388			/*
6389			 * Traverse partially truncated indirect tree.
6390			 */
6391			if (lbn <= lastlbn && lbn + tmpval - 1 > lastlbn)
6392				setup_trunc_indir(freeblks, ip, -lbn - i,
6393				    lastlbn, DIP(ip, i_ib[i]));
6394		}
6395		/*
6396		 * Handle partial truncation to a frag boundary.
6397		 */
6398		if (frags) {
6399			ufs2_daddr_t blkno;
6400			long oldfrags;
6401
6402			oldfrags = blksize(fs, ip, lastlbn);
6403			blkno = DIP(ip, i_db[lastlbn]);
6404			if (blkno && oldfrags != frags) {
6405				oldfrags -= frags;
6406				oldfrags = numfrags(ip->i_fs, oldfrags);
6407				blkno += numfrags(ip->i_fs, frags);
6408				newfreework(ip->i_ump, freeblks, NULL, lastlbn,
6409				    blkno, oldfrags, 0, needj);
6410			} else if (blkno == 0)
6411				allocblock = 1;
6412		}
6413		/*
6414		 * Add a journal record for partial truncate if we are
6415		 * handling indirect blocks.  Non-indirects need no extra
6416		 * journaling.
6417		 */
6418		if (length != 0 && lastlbn >= NDADDR) {
6419			ip->i_flag |= IN_TRUNCATED;
6420			newjtrunc(freeblks, length, 0);
6421		}
6422		ip->i_size = length;
6423		DIP_SET(ip, i_size, ip->i_size);
6424		datablocks = DIP(ip, i_blocks) - extblocks;
6425		if (length != 0)
6426			datablocks = blkcount(ip->i_fs, datablocks, length);
6427		freeblks->fb_len = length;
6428	}
6429	if ((flags & IO_EXT) != 0) {
6430		for (i = 0; i < NXADDR; i++)
6431			setup_freeext(freeblks, ip, i, needj);
6432		ip->i_din2->di_extsize = 0;
6433		datablocks += extblocks;
6434	}
6435#ifdef QUOTA
6436	/* Reference the quotas in case the block count is wrong in the end. */
6437	quotaref(vp, freeblks->fb_quota);
6438	(void) chkdq(ip, -datablocks, NOCRED, 0);
6439#endif
6440	freeblks->fb_chkcnt = -datablocks;
6441	UFS_LOCK(ip->i_ump);
6442	fs->fs_pendingblocks += datablocks;
6443	UFS_UNLOCK(ip->i_ump);
6444	DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks);
6445	/*
6446	 * Handle truncation of incomplete alloc direct dependencies.  We
6447	 * hold the inode block locked to prevent incomplete dependencies
6448	 * from reaching the disk while we are eliminating those that
6449	 * have been truncated.  This is a partially inlined ffs_update().
6450	 */
6451	ufs_itimes(vp);
6452	ip->i_flag &= ~(IN_LAZYACCESS | IN_LAZYMOD | IN_MODIFIED);
6453	error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
6454	    (int)fs->fs_bsize, cred, &bp);
6455	if (error) {
6456		brelse(bp);
6457		softdep_error("softdep_journal_freeblocks", error);
6458		return;
6459	}
6460	if (bp->b_bufsize == fs->fs_bsize)
6461		bp->b_flags |= B_CLUSTEROK;
6462	softdep_update_inodeblock(ip, bp, 0);
6463	if (ip->i_ump->um_fstype == UFS1)
6464		*((struct ufs1_dinode *)bp->b_data +
6465		    ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1;
6466	else
6467		*((struct ufs2_dinode *)bp->b_data +
6468		    ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2;
6469	ACQUIRE_LOCK(&lk);
6470	(void) inodedep_lookup(mp, ip->i_number, dflags, &inodedep);
6471	if ((inodedep->id_state & IOSTARTED) != 0)
6472		panic("softdep_setup_freeblocks: inode busy");
6473	/*
6474	 * Add the freeblks structure to the list of operations that
6475	 * must await the zero'ed inode being written to disk. If we
6476	 * still have a bitmap dependency (needj), then the inode
6477	 * has never been written to disk, so we can process the
6478	 * freeblks below once we have deleted the dependencies.
6479	 */
6480	if (needj)
6481		WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list);
6482	else
6483		freeblks->fb_state |= COMPLETE;
6484	if ((flags & IO_NORMAL) != 0) {
6485		TAILQ_FOREACH_SAFE(adp, &inodedep->id_inoupdt, ad_next, adpn) {
6486			if (adp->ad_offset > iboff)
6487				cancel_allocdirect(&inodedep->id_inoupdt, adp,
6488				    freeblks);
6489			/*
6490			 * Truncate the allocdirect.  We could eliminate
6491			 * or modify journal records as well.
6492			 */
6493			else if (adp->ad_offset == iboff && frags)
6494				adp->ad_newsize = frags;
6495		}
6496	}
6497	if ((flags & IO_EXT) != 0)
6498		while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != 0)
6499			cancel_allocdirect(&inodedep->id_extupdt, adp,
6500			    freeblks);
6501	/*
6502	 * Scan the bufwait list for newblock dependencies that will never
6503	 * make it to disk.
6504	 */
6505	LIST_FOREACH_SAFE(wk, &inodedep->id_bufwait, wk_list, wkn) {
6506		if (wk->wk_type != D_ALLOCDIRECT)
6507			continue;
6508		adp = WK_ALLOCDIRECT(wk);
6509		if (((flags & IO_NORMAL) != 0 && (adp->ad_offset > iboff)) ||
6510		    ((flags & IO_EXT) != 0 && (adp->ad_state & EXTDATA))) {
6511			cancel_jfreeblk(freeblks, adp->ad_newblkno);
6512			cancel_newblk(WK_NEWBLK(wk), NULL, &freeblks->fb_jwork);
6513			WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk);
6514		}
6515	}
6516	/*
6517	 * Add journal work.
6518	 */
6519	LIST_FOREACH(jblkdep, &freeblks->fb_jblkdephd, jb_deps)
6520		add_to_journal(&jblkdep->jb_list);
6521	FREE_LOCK(&lk);
6522	bdwrite(bp);
6523	/*
6524	 * Truncate dependency structures beyond length.
6525	 */
6526	trunc_dependencies(ip, freeblks, lastlbn, frags, flags);
6527	/*
6528	 * This is only set when we need to allocate a fragment because
6529	 * none existed at the end of a frag-sized file.  It handles only
6530	 * allocating a new, zero filled block.
6531	 */
6532	if (allocblock) {
6533		ip->i_size = length - lastoff;
6534		DIP_SET(ip, i_size, ip->i_size);
6535		error = UFS_BALLOC(vp, length - 1, 1, cred, BA_CLRBUF, &bp);
6536		if (error != 0) {
6537			softdep_error("softdep_journal_freeblks", error);
6538			return;
6539		}
6540		ip->i_size = length;
6541		DIP_SET(ip, i_size, length);
6542		ip->i_flag |= IN_CHANGE | IN_UPDATE;
6543		allocbuf(bp, frags);
6544		ffs_update(vp, 0);
6545		bawrite(bp);
6546	} else if (lastoff != 0 && vp->v_type != VDIR) {
6547		int size;
6548
6549		/*
6550		 * Zero the end of a truncated frag or block.
6551		 */
6552		size = sblksize(fs, length, lastlbn);
6553		error = bread(vp, lastlbn, size, cred, &bp);
6554		if (error) {
6555			softdep_error("softdep_journal_freeblks", error);
6556			return;
6557		}
6558		bzero((char *)bp->b_data + lastoff, size - lastoff);
6559		bawrite(bp);
6560
6561	}
6562	ACQUIRE_LOCK(&lk);
6563	inodedep_lookup(mp, ip->i_number, dflags, &inodedep);
6564	TAILQ_INSERT_TAIL(&inodedep->id_freeblklst, freeblks, fb_next);
6565	freeblks->fb_state |= DEPCOMPLETE | ONDEPLIST;
6566	/*
6567	 * We zero earlier truncations so they don't erroneously
6568	 * update i_blocks.
6569	 */
6570	if (freeblks->fb_len == 0 && (flags & IO_NORMAL) != 0)
6571		TAILQ_FOREACH(fbn, &inodedep->id_freeblklst, fb_next)
6572			fbn->fb_len = 0;
6573	if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE &&
6574	    LIST_EMPTY(&freeblks->fb_jblkdephd))
6575		freeblks->fb_state |= INPROGRESS;
6576	else
6577		freeblks = NULL;
6578	FREE_LOCK(&lk);
6579	if (freeblks)
6580		handle_workitem_freeblocks(freeblks, 0);
6581	trunc_pages(ip, length, extblocks, flags);
6582
6583}
6584
6585/*
6586 * Flush a JOP_SYNC to the journal.
6587 */
6588void
6589softdep_journal_fsync(ip)
6590	struct inode *ip;
6591{
6592	struct jfsync *jfsync;
6593
6594	if ((ip->i_flag & IN_TRUNCATED) == 0)
6595		return;
6596	ip->i_flag &= ~IN_TRUNCATED;
6597	jfsync = malloc(sizeof(*jfsync), M_JFSYNC, M_SOFTDEP_FLAGS | M_ZERO);
6598	workitem_alloc(&jfsync->jfs_list, D_JFSYNC, UFSTOVFS(ip->i_ump));
6599	jfsync->jfs_size = ip->i_size;
6600	jfsync->jfs_ino = ip->i_number;
6601	ACQUIRE_LOCK(&lk);
6602	add_to_journal(&jfsync->jfs_list);
6603	jwait(&jfsync->jfs_list, MNT_WAIT);
6604	FREE_LOCK(&lk);
6605}
6606
6607/*
6608 * Block de-allocation dependencies.
6609 *
6610 * When blocks are de-allocated, the on-disk pointers must be nullified before
6611 * the blocks are made available for use by other files.  (The true
6612 * requirement is that old pointers must be nullified before new on-disk
6613 * pointers are set.  We chose this slightly more stringent requirement to
6614 * reduce complexity.) Our implementation handles this dependency by updating
6615 * the inode (or indirect block) appropriately but delaying the actual block
6616 * de-allocation (i.e., freemap and free space count manipulation) until
6617 * after the updated versions reach stable storage.  After the disk is
6618 * updated, the blocks can be safely de-allocated whenever it is convenient.
6619 * This implementation handles only the common case of reducing a file's
6620 * length to zero. Other cases are handled by the conventional synchronous
6621 * write approach.
6622 *
6623 * The ffs implementation with which we worked double-checks
6624 * the state of the block pointers and file size as it reduces
6625 * a file's length.  Some of this code is replicated here in our
6626 * soft updates implementation.  The freeblks->fb_chkcnt field is
6627 * used to transfer a part of this information to the procedure
6628 * that eventually de-allocates the blocks.
6629 *
6630 * This routine should be called from the routine that shortens
6631 * a file's length, before the inode's size or block pointers
6632 * are modified. It will save the block pointer information for
6633 * later release and zero the inode so that the calling routine
6634 * can release it.
6635 */
6636void
6637softdep_setup_freeblocks(ip, length, flags)
6638	struct inode *ip;	/* The inode whose length is to be reduced */
6639	off_t length;		/* The new length for the file */
6640	int flags;		/* IO_EXT and/or IO_NORMAL */
6641{
6642	struct ufs1_dinode *dp1;
6643	struct ufs2_dinode *dp2;
6644	struct freeblks *freeblks;
6645	struct inodedep *inodedep;
6646	struct allocdirect *adp;
6647	struct buf *bp;
6648	struct fs *fs;
6649	ufs2_daddr_t extblocks, datablocks;
6650	struct mount *mp;
6651	int i, delay, error, dflags;
6652	ufs_lbn_t tmpval;
6653	ufs_lbn_t lbn;
6654
6655	CTR2(KTR_SUJ, "softdep_setup_freeblks: ip %d length %ld",
6656	    ip->i_number, length);
6657	fs = ip->i_fs;
6658	mp = UFSTOVFS(ip->i_ump);
6659	if (length != 0)
6660		panic("softdep_setup_freeblocks: non-zero length");
6661	freeblks = newfreeblks(mp, ip);
6662	extblocks = 0;
6663	datablocks = 0;
6664	if (fs->fs_magic == FS_UFS2_MAGIC)
6665		extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize));
6666	if ((flags & IO_NORMAL) != 0) {
6667		for (i = 0; i < NDADDR; i++)
6668			setup_freedirect(freeblks, ip, i, 0);
6669		for (i = 0, tmpval = NINDIR(fs), lbn = NDADDR; i < NIADDR;
6670		    i++, lbn += tmpval, tmpval *= NINDIR(fs))
6671			setup_freeindir(freeblks, ip, i, -lbn -i, 0);
6672		ip->i_size = 0;
6673		DIP_SET(ip, i_size, 0);
6674		datablocks = DIP(ip, i_blocks) - extblocks;
6675	}
6676	if ((flags & IO_EXT) != 0) {
6677		for (i = 0; i < NXADDR; i++)
6678			setup_freeext(freeblks, ip, i, 0);
6679		ip->i_din2->di_extsize = 0;
6680		datablocks += extblocks;
6681	}
6682#ifdef QUOTA
6683	/* Reference the quotas in case the block count is wrong in the end. */
6684	quotaref(ITOV(ip), freeblks->fb_quota);
6685	(void) chkdq(ip, -datablocks, NOCRED, 0);
6686#endif
6687	freeblks->fb_chkcnt = -datablocks;
6688	UFS_LOCK(ip->i_ump);
6689	fs->fs_pendingblocks += datablocks;
6690	UFS_UNLOCK(ip->i_ump);
6691	DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - datablocks);
6692	/*
6693	 * Push the zero'ed inode to to its disk buffer so that we are free
6694	 * to delete its dependencies below. Once the dependencies are gone
6695	 * the buffer can be safely released.
6696	 */
6697	if ((error = bread(ip->i_devvp,
6698	    fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
6699	    (int)fs->fs_bsize, NOCRED, &bp)) != 0) {
6700		brelse(bp);
6701		softdep_error("softdep_setup_freeblocks", error);
6702	}
6703	if (ip->i_ump->um_fstype == UFS1) {
6704		dp1 = ((struct ufs1_dinode *)bp->b_data +
6705		    ino_to_fsbo(fs, ip->i_number));
6706		ip->i_din1->di_freelink = dp1->di_freelink;
6707		*dp1 = *ip->i_din1;
6708	} else {
6709		dp2 = ((struct ufs2_dinode *)bp->b_data +
6710		    ino_to_fsbo(fs, ip->i_number));
6711		ip->i_din2->di_freelink = dp2->di_freelink;
6712		*dp2 = *ip->i_din2;
6713	}
6714	/*
6715	 * Find and eliminate any inode dependencies.
6716	 */
6717	ACQUIRE_LOCK(&lk);
6718	dflags = DEPALLOC;
6719	if (IS_SNAPSHOT(ip))
6720		dflags |= NODELAY;
6721	(void) inodedep_lookup(mp, ip->i_number, dflags, &inodedep);
6722	if ((inodedep->id_state & IOSTARTED) != 0)
6723		panic("softdep_setup_freeblocks: inode busy");
6724	/*
6725	 * Add the freeblks structure to the list of operations that
6726	 * must await the zero'ed inode being written to disk. If we
6727	 * still have a bitmap dependency (delay == 0), then the inode
6728	 * has never been written to disk, so we can process the
6729	 * freeblks below once we have deleted the dependencies.
6730	 */
6731	delay = (inodedep->id_state & DEPCOMPLETE);
6732	if (delay)
6733		WORKLIST_INSERT(&bp->b_dep, &freeblks->fb_list);
6734	else
6735		freeblks->fb_state |= COMPLETE;
6736	/*
6737	 * Because the file length has been truncated to zero, any
6738	 * pending block allocation dependency structures associated
6739	 * with this inode are obsolete and can simply be de-allocated.
6740	 * We must first merge the two dependency lists to get rid of
6741	 * any duplicate freefrag structures, then purge the merged list.
6742	 * If we still have a bitmap dependency, then the inode has never
6743	 * been written to disk, so we can free any fragments without delay.
6744	 */
6745	if (flags & IO_NORMAL) {
6746		merge_inode_lists(&inodedep->id_newinoupdt,
6747		    &inodedep->id_inoupdt);
6748		while ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != 0)
6749			cancel_allocdirect(&inodedep->id_inoupdt, adp,
6750			    freeblks);
6751	}
6752	if (flags & IO_EXT) {
6753		merge_inode_lists(&inodedep->id_newextupdt,
6754		    &inodedep->id_extupdt);
6755		while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != 0)
6756			cancel_allocdirect(&inodedep->id_extupdt, adp,
6757			    freeblks);
6758	}
6759	FREE_LOCK(&lk);
6760	bdwrite(bp);
6761	trunc_dependencies(ip, freeblks, -1, 0, flags);
6762	ACQUIRE_LOCK(&lk);
6763	if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0)
6764		(void) free_inodedep(inodedep);
6765	freeblks->fb_state |= DEPCOMPLETE;
6766	/*
6767	 * If the inode with zeroed block pointers is now on disk
6768	 * we can start freeing blocks.
6769	 */
6770	if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE)
6771		freeblks->fb_state |= INPROGRESS;
6772	else
6773		freeblks = NULL;
6774	FREE_LOCK(&lk);
6775	if (freeblks)
6776		handle_workitem_freeblocks(freeblks, 0);
6777	trunc_pages(ip, length, extblocks, flags);
6778}
6779
6780/*
6781 * Eliminate pages from the page cache that back parts of this inode and
6782 * adjust the vnode pager's idea of our size.  This prevents stale data
6783 * from hanging around in the page cache.
6784 */
6785static void
6786trunc_pages(ip, length, extblocks, flags)
6787	struct inode *ip;
6788	off_t length;
6789	ufs2_daddr_t extblocks;
6790	int flags;
6791{
6792	struct vnode *vp;
6793	struct fs *fs;
6794	ufs_lbn_t lbn;
6795	off_t end, extend;
6796
6797	vp = ITOV(ip);
6798	fs = ip->i_fs;
6799	extend = OFF_TO_IDX(lblktosize(fs, -extblocks));
6800	if ((flags & IO_EXT) != 0)
6801		vn_pages_remove(vp, extend, 0);
6802	if ((flags & IO_NORMAL) == 0)
6803		return;
6804	BO_LOCK(&vp->v_bufobj);
6805	drain_output(vp);
6806	BO_UNLOCK(&vp->v_bufobj);
6807	/*
6808	 * The vnode pager eliminates file pages we eliminate indirects
6809	 * below.
6810	 */
6811	vnode_pager_setsize(vp, length);
6812	/*
6813	 * Calculate the end based on the last indirect we want to keep.  If
6814	 * the block extends into indirects we can just use the negative of
6815	 * its lbn.  Doubles and triples exist at lower numbers so we must
6816	 * be careful not to remove those, if they exist.  double and triple
6817	 * indirect lbns do not overlap with others so it is not important
6818	 * to verify how many levels are required.
6819	 */
6820	lbn = lblkno(fs, length);
6821	if (lbn >= NDADDR) {
6822		/* Calculate the virtual lbn of the triple indirect. */
6823		lbn = -lbn - (NIADDR - 1);
6824		end = OFF_TO_IDX(lblktosize(fs, lbn));
6825	} else
6826		end = extend;
6827	vn_pages_remove(vp, OFF_TO_IDX(OFF_MAX), end);
6828}
6829
6830/*
6831 * See if the buf bp is in the range eliminated by truncation.
6832 */
6833static int
6834trunc_check_buf(bp, blkoffp, lastlbn, lastoff, flags)
6835	struct buf *bp;
6836	int *blkoffp;
6837	ufs_lbn_t lastlbn;
6838	int lastoff;
6839	int flags;
6840{
6841	ufs_lbn_t lbn;
6842
6843	*blkoffp = 0;
6844	/* Only match ext/normal blocks as appropriate. */
6845	if (((flags & IO_EXT) == 0 && (bp->b_xflags & BX_ALTDATA)) ||
6846	    ((flags & IO_NORMAL) == 0 && (bp->b_xflags & BX_ALTDATA) == 0))
6847		return (0);
6848	/* ALTDATA is always a full truncation. */
6849	if ((bp->b_xflags & BX_ALTDATA) != 0)
6850		return (1);
6851	/* -1 is full truncation. */
6852	if (lastlbn == -1)
6853		return (1);
6854	/*
6855	 * If this is a partial truncate we only want those
6856	 * blocks and indirect blocks that cover the range
6857	 * we're after.
6858	 */
6859	lbn = bp->b_lblkno;
6860	if (lbn < 0)
6861		lbn = -(lbn + lbn_level(lbn));
6862	if (lbn < lastlbn)
6863		return (0);
6864	/* Here we only truncate lblkno if it's partial. */
6865	if (lbn == lastlbn) {
6866		if (lastoff == 0)
6867			return (0);
6868		*blkoffp = lastoff;
6869	}
6870	return (1);
6871}
6872
6873/*
6874 * Eliminate any dependencies that exist in memory beyond lblkno:off
6875 */
6876static void
6877trunc_dependencies(ip, freeblks, lastlbn, lastoff, flags)
6878	struct inode *ip;
6879	struct freeblks *freeblks;
6880	ufs_lbn_t lastlbn;
6881	int lastoff;
6882	int flags;
6883{
6884	struct bufobj *bo;
6885	struct vnode *vp;
6886	struct buf *bp;
6887	struct fs *fs;
6888	int blkoff;
6889
6890	/*
6891	 * We must wait for any I/O in progress to finish so that
6892	 * all potential buffers on the dirty list will be visible.
6893	 * Once they are all there, walk the list and get rid of
6894	 * any dependencies.
6895	 */
6896	fs = ip->i_fs;
6897	vp = ITOV(ip);
6898	bo = &vp->v_bufobj;
6899	BO_LOCK(bo);
6900	drain_output(vp);
6901	TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs)
6902		bp->b_vflags &= ~BV_SCANNED;
6903restart:
6904	TAILQ_FOREACH(bp, &bo->bo_dirty.bv_hd, b_bobufs) {
6905		if (bp->b_vflags & BV_SCANNED)
6906			continue;
6907		if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) {
6908			bp->b_vflags |= BV_SCANNED;
6909			continue;
6910		}
6911		if ((bp = getdirtybuf(bp, BO_LOCKPTR(bo), MNT_WAIT)) == NULL)
6912			goto restart;
6913		BO_UNLOCK(bo);
6914		if (deallocate_dependencies(bp, freeblks, blkoff))
6915			bqrelse(bp);
6916		else
6917			brelse(bp);
6918		BO_LOCK(bo);
6919		goto restart;
6920	}
6921	/*
6922	 * Now do the work of vtruncbuf while also matching indirect blocks.
6923	 */
6924	TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs)
6925		bp->b_vflags &= ~BV_SCANNED;
6926cleanrestart:
6927	TAILQ_FOREACH(bp, &bo->bo_clean.bv_hd, b_bobufs) {
6928		if (bp->b_vflags & BV_SCANNED)
6929			continue;
6930		if (!trunc_check_buf(bp, &blkoff, lastlbn, lastoff, flags)) {
6931			bp->b_vflags |= BV_SCANNED;
6932			continue;
6933		}
6934		if (BUF_LOCK(bp,
6935		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK,
6936		    BO_LOCKPTR(bo)) == ENOLCK) {
6937			BO_LOCK(bo);
6938			goto cleanrestart;
6939		}
6940		bp->b_vflags |= BV_SCANNED;
6941		bremfree(bp);
6942		if (blkoff != 0) {
6943			allocbuf(bp, blkoff);
6944			bqrelse(bp);
6945		} else {
6946			bp->b_flags |= B_INVAL | B_NOCACHE | B_RELBUF;
6947			brelse(bp);
6948		}
6949		BO_LOCK(bo);
6950		goto cleanrestart;
6951	}
6952	drain_output(vp);
6953	BO_UNLOCK(bo);
6954}
6955
6956static int
6957cancel_pagedep(pagedep, freeblks, blkoff)
6958	struct pagedep *pagedep;
6959	struct freeblks *freeblks;
6960	int blkoff;
6961{
6962	struct jremref *jremref;
6963	struct jmvref *jmvref;
6964	struct dirrem *dirrem, *tmp;
6965	int i;
6966
6967	/*
6968	 * Copy any directory remove dependencies to the list
6969	 * to be processed after the freeblks proceeds.  If
6970	 * directory entry never made it to disk they
6971	 * can be dumped directly onto the work list.
6972	 */
6973	LIST_FOREACH_SAFE(dirrem, &pagedep->pd_dirremhd, dm_next, tmp) {
6974		/* Skip this directory removal if it is intended to remain. */
6975		if (dirrem->dm_offset < blkoff)
6976			continue;
6977		/*
6978		 * If there are any dirrems we wait for the journal write
6979		 * to complete and then restart the buf scan as the lock
6980		 * has been dropped.
6981		 */
6982		while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL) {
6983			jwait(&jremref->jr_list, MNT_WAIT);
6984			return (ERESTART);
6985		}
6986		LIST_REMOVE(dirrem, dm_next);
6987		dirrem->dm_dirinum = pagedep->pd_ino;
6988		WORKLIST_INSERT(&freeblks->fb_freeworkhd, &dirrem->dm_list);
6989	}
6990	while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL) {
6991		jwait(&jmvref->jm_list, MNT_WAIT);
6992		return (ERESTART);
6993	}
6994	/*
6995	 * When we're partially truncating a pagedep we just want to flush
6996	 * journal entries and return.  There can not be any adds in the
6997	 * truncated portion of the directory and newblk must remain if
6998	 * part of the block remains.
6999	 */
7000	if (blkoff != 0) {
7001		struct diradd *dap;
7002
7003		LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist)
7004			if (dap->da_offset > blkoff)
7005				panic("cancel_pagedep: diradd %p off %d > %d",
7006				    dap, dap->da_offset, blkoff);
7007		for (i = 0; i < DAHASHSZ; i++)
7008			LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist)
7009				if (dap->da_offset > blkoff)
7010					panic("cancel_pagedep: diradd %p off %d > %d",
7011					    dap, dap->da_offset, blkoff);
7012		return (0);
7013	}
7014	/*
7015	 * There should be no directory add dependencies present
7016	 * as the directory could not be truncated until all
7017	 * children were removed.
7018	 */
7019	KASSERT(LIST_FIRST(&pagedep->pd_pendinghd) == NULL,
7020	    ("deallocate_dependencies: pendinghd != NULL"));
7021	for (i = 0; i < DAHASHSZ; i++)
7022		KASSERT(LIST_FIRST(&pagedep->pd_diraddhd[i]) == NULL,
7023		    ("deallocate_dependencies: diraddhd != NULL"));
7024	if ((pagedep->pd_state & NEWBLOCK) != 0)
7025		free_newdirblk(pagedep->pd_newdirblk);
7026	if (free_pagedep(pagedep) == 0)
7027		panic("Failed to free pagedep %p", pagedep);
7028	return (0);
7029}
7030
7031/*
7032 * Reclaim any dependency structures from a buffer that is about to
7033 * be reallocated to a new vnode. The buffer must be locked, thus,
7034 * no I/O completion operations can occur while we are manipulating
7035 * its associated dependencies. The mutex is held so that other I/O's
7036 * associated with related dependencies do not occur.
7037 */
7038static int
7039deallocate_dependencies(bp, freeblks, off)
7040	struct buf *bp;
7041	struct freeblks *freeblks;
7042	int off;
7043{
7044	struct indirdep *indirdep;
7045	struct pagedep *pagedep;
7046	struct allocdirect *adp;
7047	struct worklist *wk, *wkn;
7048
7049	ACQUIRE_LOCK(&lk);
7050	LIST_FOREACH_SAFE(wk, &bp->b_dep, wk_list, wkn) {
7051		switch (wk->wk_type) {
7052		case D_INDIRDEP:
7053			indirdep = WK_INDIRDEP(wk);
7054			if (bp->b_lblkno >= 0 ||
7055			    bp->b_blkno != indirdep->ir_savebp->b_lblkno)
7056				panic("deallocate_dependencies: not indir");
7057			cancel_indirdep(indirdep, bp, freeblks);
7058			continue;
7059
7060		case D_PAGEDEP:
7061			pagedep = WK_PAGEDEP(wk);
7062			if (cancel_pagedep(pagedep, freeblks, off)) {
7063				FREE_LOCK(&lk);
7064				return (ERESTART);
7065			}
7066			continue;
7067
7068		case D_ALLOCINDIR:
7069			/*
7070			 * Simply remove the allocindir, we'll find it via
7071			 * the indirdep where we can clear pointers if
7072			 * needed.
7073			 */
7074			WORKLIST_REMOVE(wk);
7075			continue;
7076
7077		case D_FREEWORK:
7078			/*
7079			 * A truncation is waiting for the zero'd pointers
7080			 * to be written.  It can be freed when the freeblks
7081			 * is journaled.
7082			 */
7083			WORKLIST_REMOVE(wk);
7084			wk->wk_state |= ONDEPLIST;
7085			WORKLIST_INSERT(&freeblks->fb_freeworkhd, wk);
7086			break;
7087
7088		case D_ALLOCDIRECT:
7089			adp = WK_ALLOCDIRECT(wk);
7090			if (off != 0)
7091				continue;
7092			/* FALLTHROUGH */
7093		default:
7094			panic("deallocate_dependencies: Unexpected type %s",
7095			    TYPENAME(wk->wk_type));
7096			/* NOTREACHED */
7097		}
7098	}
7099	FREE_LOCK(&lk);
7100	/*
7101	 * Don't throw away this buf, we were partially truncating and
7102	 * some deps may always remain.
7103	 */
7104	if (off) {
7105		allocbuf(bp, off);
7106		bp->b_vflags |= BV_SCANNED;
7107		return (EBUSY);
7108	}
7109	bp->b_flags |= B_INVAL | B_NOCACHE;
7110
7111	return (0);
7112}
7113
7114/*
7115 * An allocdirect is being canceled due to a truncate.  We must make sure
7116 * the journal entry is released in concert with the blkfree that releases
7117 * the storage.  Completed journal entries must not be released until the
7118 * space is no longer pointed to by the inode or in the bitmap.
7119 */
7120static void
7121cancel_allocdirect(adphead, adp, freeblks)
7122	struct allocdirectlst *adphead;
7123	struct allocdirect *adp;
7124	struct freeblks *freeblks;
7125{
7126	struct freework *freework;
7127	struct newblk *newblk;
7128	struct worklist *wk;
7129
7130	TAILQ_REMOVE(adphead, adp, ad_next);
7131	newblk = (struct newblk *)adp;
7132	freework = NULL;
7133	/*
7134	 * Find the correct freework structure.
7135	 */
7136	LIST_FOREACH(wk, &freeblks->fb_freeworkhd, wk_list) {
7137		if (wk->wk_type != D_FREEWORK)
7138			continue;
7139		freework = WK_FREEWORK(wk);
7140		if (freework->fw_blkno == newblk->nb_newblkno)
7141			break;
7142	}
7143	if (freework == NULL)
7144		panic("cancel_allocdirect: Freework not found");
7145	/*
7146	 * If a newblk exists at all we still have the journal entry that
7147	 * initiated the allocation so we do not need to journal the free.
7148	 */
7149	cancel_jfreeblk(freeblks, freework->fw_blkno);
7150	/*
7151	 * If the journal hasn't been written the jnewblk must be passed
7152	 * to the call to ffs_blkfree that reclaims the space.  We accomplish
7153	 * this by linking the journal dependency into the freework to be
7154	 * freed when freework_freeblock() is called.  If the journal has
7155	 * been written we can simply reclaim the journal space when the
7156	 * freeblks work is complete.
7157	 */
7158	freework->fw_jnewblk = cancel_newblk(newblk, &freework->fw_list,
7159	    &freeblks->fb_jwork);
7160	WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list);
7161}
7162
7163
7164/*
7165 * Cancel a new block allocation.  May be an indirect or direct block.  We
7166 * remove it from various lists and return any journal record that needs to
7167 * be resolved by the caller.
7168 *
7169 * A special consideration is made for indirects which were never pointed
7170 * at on disk and will never be found once this block is released.
7171 */
7172static struct jnewblk *
7173cancel_newblk(newblk, wk, wkhd)
7174	struct newblk *newblk;
7175	struct worklist *wk;
7176	struct workhead *wkhd;
7177{
7178	struct jnewblk *jnewblk;
7179
7180	CTR1(KTR_SUJ, "cancel_newblk: blkno %jd", newblk->nb_newblkno);
7181
7182	newblk->nb_state |= GOINGAWAY;
7183	/*
7184	 * Previously we traversed the completedhd on each indirdep
7185	 * attached to this newblk to cancel them and gather journal
7186	 * work.  Since we need only the oldest journal segment and
7187	 * the lowest point on the tree will always have the oldest
7188	 * journal segment we are free to release the segments
7189	 * of any subordinates and may leave the indirdep list to
7190	 * indirdep_complete() when this newblk is freed.
7191	 */
7192	if (newblk->nb_state & ONDEPLIST) {
7193		newblk->nb_state &= ~ONDEPLIST;
7194		LIST_REMOVE(newblk, nb_deps);
7195	}
7196	if (newblk->nb_state & ONWORKLIST)
7197		WORKLIST_REMOVE(&newblk->nb_list);
7198	/*
7199	 * If the journal entry hasn't been written we save a pointer to
7200	 * the dependency that frees it until it is written or the
7201	 * superseding operation completes.
7202	 */
7203	jnewblk = newblk->nb_jnewblk;
7204	if (jnewblk != NULL && wk != NULL) {
7205		newblk->nb_jnewblk = NULL;
7206		jnewblk->jn_dep = wk;
7207	}
7208	if (!LIST_EMPTY(&newblk->nb_jwork))
7209		jwork_move(wkhd, &newblk->nb_jwork);
7210	/*
7211	 * When truncating we must free the newdirblk early to remove
7212	 * the pagedep from the hash before returning.
7213	 */
7214	if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL)
7215		free_newdirblk(WK_NEWDIRBLK(wk));
7216	if (!LIST_EMPTY(&newblk->nb_newdirblk))
7217		panic("cancel_newblk: extra newdirblk");
7218
7219	return (jnewblk);
7220}
7221
7222/*
7223 * Schedule the freefrag associated with a newblk to be released once
7224 * the pointers are written and the previous block is no longer needed.
7225 */
7226static void
7227newblk_freefrag(newblk)
7228	struct newblk *newblk;
7229{
7230	struct freefrag *freefrag;
7231
7232	if (newblk->nb_freefrag == NULL)
7233		return;
7234	freefrag = newblk->nb_freefrag;
7235	newblk->nb_freefrag = NULL;
7236	freefrag->ff_state |= COMPLETE;
7237	if ((freefrag->ff_state & ALLCOMPLETE) == ALLCOMPLETE)
7238		add_to_worklist(&freefrag->ff_list, 0);
7239}
7240
7241/*
7242 * Free a newblk. Generate a new freefrag work request if appropriate.
7243 * This must be called after the inode pointer and any direct block pointers
7244 * are valid or fully removed via truncate or frag extension.
7245 */
7246static void
7247free_newblk(newblk)
7248	struct newblk *newblk;
7249{
7250	struct indirdep *indirdep;
7251	struct worklist *wk;
7252
7253	KASSERT(newblk->nb_jnewblk == NULL,
7254	    ("free_newblk: jnewblk %p still attached", newblk->nb_jnewblk));
7255	KASSERT(newblk->nb_list.wk_type != D_NEWBLK,
7256	    ("free_newblk: unclaimed newblk"));
7257	rw_assert(&lk, RA_WLOCKED);
7258	newblk_freefrag(newblk);
7259	if (newblk->nb_state & ONDEPLIST)
7260		LIST_REMOVE(newblk, nb_deps);
7261	if (newblk->nb_state & ONWORKLIST)
7262		WORKLIST_REMOVE(&newblk->nb_list);
7263	LIST_REMOVE(newblk, nb_hash);
7264	if ((wk = LIST_FIRST(&newblk->nb_newdirblk)) != NULL)
7265		free_newdirblk(WK_NEWDIRBLK(wk));
7266	if (!LIST_EMPTY(&newblk->nb_newdirblk))
7267		panic("free_newblk: extra newdirblk");
7268	while ((indirdep = LIST_FIRST(&newblk->nb_indirdeps)) != NULL)
7269		indirdep_complete(indirdep);
7270	handle_jwork(&newblk->nb_jwork);
7271	WORKITEM_FREE(newblk, D_NEWBLK);
7272}
7273
7274/*
7275 * Free a newdirblk. Clear the NEWBLOCK flag on its associated pagedep.
7276 * This routine must be called with splbio interrupts blocked.
7277 */
7278static void
7279free_newdirblk(newdirblk)
7280	struct newdirblk *newdirblk;
7281{
7282	struct pagedep *pagedep;
7283	struct diradd *dap;
7284	struct worklist *wk;
7285
7286	rw_assert(&lk, RA_WLOCKED);
7287	WORKLIST_REMOVE(&newdirblk->db_list);
7288	/*
7289	 * If the pagedep is still linked onto the directory buffer
7290	 * dependency chain, then some of the entries on the
7291	 * pd_pendinghd list may not be committed to disk yet. In
7292	 * this case, we will simply clear the NEWBLOCK flag and
7293	 * let the pd_pendinghd list be processed when the pagedep
7294	 * is next written. If the pagedep is no longer on the buffer
7295	 * dependency chain, then all the entries on the pd_pending
7296	 * list are committed to disk and we can free them here.
7297	 */
7298	pagedep = newdirblk->db_pagedep;
7299	pagedep->pd_state &= ~NEWBLOCK;
7300	if ((pagedep->pd_state & ONWORKLIST) == 0) {
7301		while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL)
7302			free_diradd(dap, NULL);
7303		/*
7304		 * If no dependencies remain, the pagedep will be freed.
7305		 */
7306		free_pagedep(pagedep);
7307	}
7308	/* Should only ever be one item in the list. */
7309	while ((wk = LIST_FIRST(&newdirblk->db_mkdir)) != NULL) {
7310		WORKLIST_REMOVE(wk);
7311		handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY);
7312	}
7313	WORKITEM_FREE(newdirblk, D_NEWDIRBLK);
7314}
7315
7316/*
7317 * Prepare an inode to be freed. The actual free operation is not
7318 * done until the zero'ed inode has been written to disk.
7319 */
7320void
7321softdep_freefile(pvp, ino, mode)
7322	struct vnode *pvp;
7323	ino_t ino;
7324	int mode;
7325{
7326	struct inode *ip = VTOI(pvp);
7327	struct inodedep *inodedep;
7328	struct freefile *freefile;
7329	struct freeblks *freeblks;
7330
7331	/*
7332	 * This sets up the inode de-allocation dependency.
7333	 */
7334	freefile = malloc(sizeof(struct freefile),
7335		M_FREEFILE, M_SOFTDEP_FLAGS);
7336	workitem_alloc(&freefile->fx_list, D_FREEFILE, pvp->v_mount);
7337	freefile->fx_mode = mode;
7338	freefile->fx_oldinum = ino;
7339	freefile->fx_devvp = ip->i_devvp;
7340	LIST_INIT(&freefile->fx_jwork);
7341	UFS_LOCK(ip->i_ump);
7342	ip->i_fs->fs_pendinginodes += 1;
7343	UFS_UNLOCK(ip->i_ump);
7344
7345	/*
7346	 * If the inodedep does not exist, then the zero'ed inode has
7347	 * been written to disk. If the allocated inode has never been
7348	 * written to disk, then the on-disk inode is zero'ed. In either
7349	 * case we can free the file immediately.  If the journal was
7350	 * canceled before being written the inode will never make it to
7351	 * disk and we must send the canceled journal entrys to
7352	 * ffs_freefile() to be cleared in conjunction with the bitmap.
7353	 * Any blocks waiting on the inode to write can be safely freed
7354	 * here as it will never been written.
7355	 */
7356	ACQUIRE_LOCK(&lk);
7357	inodedep_lookup(pvp->v_mount, ino, 0, &inodedep);
7358	if (inodedep) {
7359		/*
7360		 * Clear out freeblks that no longer need to reference
7361		 * this inode.
7362		 */
7363		while ((freeblks =
7364		    TAILQ_FIRST(&inodedep->id_freeblklst)) != NULL) {
7365			TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks,
7366			    fb_next);
7367			freeblks->fb_state &= ~ONDEPLIST;
7368		}
7369		/*
7370		 * Remove this inode from the unlinked list.
7371		 */
7372		if (inodedep->id_state & UNLINKED) {
7373			/*
7374			 * Save the journal work to be freed with the bitmap
7375			 * before we clear UNLINKED.  Otherwise it can be lost
7376			 * if the inode block is written.
7377			 */
7378			handle_bufwait(inodedep, &freefile->fx_jwork);
7379			clear_unlinked_inodedep(inodedep);
7380			/* Re-acquire inodedep as we've dropped lk. */
7381			inodedep_lookup(pvp->v_mount, ino, 0, &inodedep);
7382		}
7383	}
7384	if (inodedep == NULL || check_inode_unwritten(inodedep)) {
7385		FREE_LOCK(&lk);
7386		handle_workitem_freefile(freefile);
7387		return;
7388	}
7389	if ((inodedep->id_state & DEPCOMPLETE) == 0)
7390		inodedep->id_state |= GOINGAWAY;
7391	WORKLIST_INSERT(&inodedep->id_inowait, &freefile->fx_list);
7392	FREE_LOCK(&lk);
7393	if (ip->i_number == ino)
7394		ip->i_flag |= IN_MODIFIED;
7395}
7396
7397/*
7398 * Check to see if an inode has never been written to disk. If
7399 * so free the inodedep and return success, otherwise return failure.
7400 * This routine must be called with splbio interrupts blocked.
7401 *
7402 * If we still have a bitmap dependency, then the inode has never
7403 * been written to disk. Drop the dependency as it is no longer
7404 * necessary since the inode is being deallocated. We set the
7405 * ALLCOMPLETE flags since the bitmap now properly shows that the
7406 * inode is not allocated. Even if the inode is actively being
7407 * written, it has been rolled back to its zero'ed state, so we
7408 * are ensured that a zero inode is what is on the disk. For short
7409 * lived files, this change will usually result in removing all the
7410 * dependencies from the inode so that it can be freed immediately.
7411 */
7412static int
7413check_inode_unwritten(inodedep)
7414	struct inodedep *inodedep;
7415{
7416
7417	rw_assert(&lk, RA_WLOCKED);
7418
7419	if ((inodedep->id_state & (DEPCOMPLETE | UNLINKED)) != 0 ||
7420	    !LIST_EMPTY(&inodedep->id_dirremhd) ||
7421	    !LIST_EMPTY(&inodedep->id_pendinghd) ||
7422	    !LIST_EMPTY(&inodedep->id_bufwait) ||
7423	    !LIST_EMPTY(&inodedep->id_inowait) ||
7424	    !TAILQ_EMPTY(&inodedep->id_inoreflst) ||
7425	    !TAILQ_EMPTY(&inodedep->id_inoupdt) ||
7426	    !TAILQ_EMPTY(&inodedep->id_newinoupdt) ||
7427	    !TAILQ_EMPTY(&inodedep->id_extupdt) ||
7428	    !TAILQ_EMPTY(&inodedep->id_newextupdt) ||
7429	    !TAILQ_EMPTY(&inodedep->id_freeblklst) ||
7430	    inodedep->id_mkdiradd != NULL ||
7431	    inodedep->id_nlinkdelta != 0)
7432		return (0);
7433	/*
7434	 * Another process might be in initiate_write_inodeblock_ufs[12]
7435	 * trying to allocate memory without holding "Softdep Lock".
7436	 */
7437	if ((inodedep->id_state & IOSTARTED) != 0 &&
7438	    inodedep->id_savedino1 == NULL)
7439		return (0);
7440
7441	if (inodedep->id_state & ONDEPLIST)
7442		LIST_REMOVE(inodedep, id_deps);
7443	inodedep->id_state &= ~ONDEPLIST;
7444	inodedep->id_state |= ALLCOMPLETE;
7445	inodedep->id_bmsafemap = NULL;
7446	if (inodedep->id_state & ONWORKLIST)
7447		WORKLIST_REMOVE(&inodedep->id_list);
7448	if (inodedep->id_savedino1 != NULL) {
7449		free(inodedep->id_savedino1, M_SAVEDINO);
7450		inodedep->id_savedino1 = NULL;
7451	}
7452	if (free_inodedep(inodedep) == 0)
7453		panic("check_inode_unwritten: busy inode");
7454	return (1);
7455}
7456
7457/*
7458 * Try to free an inodedep structure. Return 1 if it could be freed.
7459 */
7460static int
7461free_inodedep(inodedep)
7462	struct inodedep *inodedep;
7463{
7464
7465	rw_assert(&lk, RA_WLOCKED);
7466	if ((inodedep->id_state & (ONWORKLIST | UNLINKED)) != 0 ||
7467	    (inodedep->id_state & ALLCOMPLETE) != ALLCOMPLETE ||
7468	    !LIST_EMPTY(&inodedep->id_dirremhd) ||
7469	    !LIST_EMPTY(&inodedep->id_pendinghd) ||
7470	    !LIST_EMPTY(&inodedep->id_bufwait) ||
7471	    !LIST_EMPTY(&inodedep->id_inowait) ||
7472	    !TAILQ_EMPTY(&inodedep->id_inoreflst) ||
7473	    !TAILQ_EMPTY(&inodedep->id_inoupdt) ||
7474	    !TAILQ_EMPTY(&inodedep->id_newinoupdt) ||
7475	    !TAILQ_EMPTY(&inodedep->id_extupdt) ||
7476	    !TAILQ_EMPTY(&inodedep->id_newextupdt) ||
7477	    !TAILQ_EMPTY(&inodedep->id_freeblklst) ||
7478	    inodedep->id_mkdiradd != NULL ||
7479	    inodedep->id_nlinkdelta != 0 ||
7480	    inodedep->id_savedino1 != NULL)
7481		return (0);
7482	if (inodedep->id_state & ONDEPLIST)
7483		LIST_REMOVE(inodedep, id_deps);
7484	LIST_REMOVE(inodedep, id_hash);
7485	WORKITEM_FREE(inodedep, D_INODEDEP);
7486	return (1);
7487}
7488
7489/*
7490 * Free the block referenced by a freework structure.  The parent freeblks
7491 * structure is released and completed when the final cg bitmap reaches
7492 * the disk.  This routine may be freeing a jnewblk which never made it to
7493 * disk in which case we do not have to wait as the operation is undone
7494 * in memory immediately.
7495 */
7496static void
7497freework_freeblock(freework)
7498	struct freework *freework;
7499{
7500	struct freeblks *freeblks;
7501	struct jnewblk *jnewblk;
7502	struct ufsmount *ump;
7503	struct workhead wkhd;
7504	struct fs *fs;
7505	int bsize;
7506	int needj;
7507
7508	rw_assert(&lk, RA_WLOCKED);
7509	/*
7510	 * Handle partial truncate separately.
7511	 */
7512	if (freework->fw_indir) {
7513		complete_trunc_indir(freework);
7514		return;
7515	}
7516	freeblks = freework->fw_freeblks;
7517	ump = VFSTOUFS(freeblks->fb_list.wk_mp);
7518	fs = ump->um_fs;
7519	needj = MOUNTEDSUJ(freeblks->fb_list.wk_mp) != 0;
7520	bsize = lfragtosize(fs, freework->fw_frags);
7521	LIST_INIT(&wkhd);
7522	/*
7523	 * DEPCOMPLETE is cleared in indirblk_insert() if the block lives
7524	 * on the indirblk hashtable and prevents premature freeing.
7525	 */
7526	freework->fw_state |= DEPCOMPLETE;
7527	/*
7528	 * SUJ needs to wait for the segment referencing freed indirect
7529	 * blocks to expire so that we know the checker will not confuse
7530	 * a re-allocated indirect block with its old contents.
7531	 */
7532	if (needj && freework->fw_lbn <= -NDADDR)
7533		indirblk_insert(freework);
7534	/*
7535	 * If we are canceling an existing jnewblk pass it to the free
7536	 * routine, otherwise pass the freeblk which will ultimately
7537	 * release the freeblks.  If we're not journaling, we can just
7538	 * free the freeblks immediately.
7539	 */
7540	jnewblk = freework->fw_jnewblk;
7541	if (jnewblk != NULL) {
7542		cancel_jnewblk(jnewblk, &wkhd);
7543		needj = 0;
7544	} else if (needj) {
7545		freework->fw_state |= DELAYEDFREE;
7546		freeblks->fb_cgwait++;
7547		WORKLIST_INSERT(&wkhd, &freework->fw_list);
7548	}
7549	FREE_LOCK(&lk);
7550	freeblks_free(ump, freeblks, btodb(bsize));
7551	CTR4(KTR_SUJ,
7552	    "freework_freeblock: ino %d blkno %jd lbn %jd size %ld",
7553	    freeblks->fb_inum, freework->fw_blkno, freework->fw_lbn, bsize);
7554	ffs_blkfree(ump, fs, freeblks->fb_devvp, freework->fw_blkno, bsize,
7555	    freeblks->fb_inum, freeblks->fb_vtype, &wkhd);
7556	ACQUIRE_LOCK(&lk);
7557	/*
7558	 * The jnewblk will be discarded and the bits in the map never
7559	 * made it to disk.  We can immediately free the freeblk.
7560	 */
7561	if (needj == 0)
7562		handle_written_freework(freework);
7563}
7564
7565/*
7566 * We enqueue freework items that need processing back on the freeblks and
7567 * add the freeblks to the worklist.  This makes it easier to find all work
7568 * required to flush a truncation in process_truncates().
7569 */
7570static void
7571freework_enqueue(freework)
7572	struct freework *freework;
7573{
7574	struct freeblks *freeblks;
7575
7576	freeblks = freework->fw_freeblks;
7577	if ((freework->fw_state & INPROGRESS) == 0)
7578		WORKLIST_INSERT(&freeblks->fb_freeworkhd, &freework->fw_list);
7579	if ((freeblks->fb_state &
7580	    (ONWORKLIST | INPROGRESS | ALLCOMPLETE)) == ALLCOMPLETE &&
7581	    LIST_EMPTY(&freeblks->fb_jblkdephd))
7582		add_to_worklist(&freeblks->fb_list, WK_NODELAY);
7583}
7584
7585/*
7586 * Start, continue, or finish the process of freeing an indirect block tree.
7587 * The free operation may be paused at any point with fw_off containing the
7588 * offset to restart from.  This enables us to implement some flow control
7589 * for large truncates which may fan out and generate a huge number of
7590 * dependencies.
7591 */
7592static void
7593handle_workitem_indirblk(freework)
7594	struct freework *freework;
7595{
7596	struct freeblks *freeblks;
7597	struct ufsmount *ump;
7598	struct fs *fs;
7599
7600	freeblks = freework->fw_freeblks;
7601	ump = VFSTOUFS(freeblks->fb_list.wk_mp);
7602	fs = ump->um_fs;
7603	if (freework->fw_state & DEPCOMPLETE) {
7604		handle_written_freework(freework);
7605		return;
7606	}
7607	if (freework->fw_off == NINDIR(fs)) {
7608		freework_freeblock(freework);
7609		return;
7610	}
7611	freework->fw_state |= INPROGRESS;
7612	FREE_LOCK(&lk);
7613	indir_trunc(freework, fsbtodb(fs, freework->fw_blkno),
7614	    freework->fw_lbn);
7615	ACQUIRE_LOCK(&lk);
7616}
7617
7618/*
7619 * Called when a freework structure attached to a cg buf is written.  The
7620 * ref on either the parent or the freeblks structure is released and
7621 * the freeblks is added back to the worklist if there is more work to do.
7622 */
7623static void
7624handle_written_freework(freework)
7625	struct freework *freework;
7626{
7627	struct freeblks *freeblks;
7628	struct freework *parent;
7629
7630	freeblks = freework->fw_freeblks;
7631	parent = freework->fw_parent;
7632	if (freework->fw_state & DELAYEDFREE)
7633		freeblks->fb_cgwait--;
7634	freework->fw_state |= COMPLETE;
7635	if ((freework->fw_state & ALLCOMPLETE) == ALLCOMPLETE)
7636		WORKITEM_FREE(freework, D_FREEWORK);
7637	if (parent) {
7638		if (--parent->fw_ref == 0)
7639			freework_enqueue(parent);
7640		return;
7641	}
7642	if (--freeblks->fb_ref != 0)
7643		return;
7644	if ((freeblks->fb_state & (ALLCOMPLETE | ONWORKLIST | INPROGRESS)) ==
7645	    ALLCOMPLETE && LIST_EMPTY(&freeblks->fb_jblkdephd))
7646		add_to_worklist(&freeblks->fb_list, WK_NODELAY);
7647}
7648
7649/*
7650 * This workitem routine performs the block de-allocation.
7651 * The workitem is added to the pending list after the updated
7652 * inode block has been written to disk.  As mentioned above,
7653 * checks regarding the number of blocks de-allocated (compared
7654 * to the number of blocks allocated for the file) are also
7655 * performed in this function.
7656 */
7657static int
7658handle_workitem_freeblocks(freeblks, flags)
7659	struct freeblks *freeblks;
7660	int flags;
7661{
7662	struct freework *freework;
7663	struct newblk *newblk;
7664	struct allocindir *aip;
7665	struct ufsmount *ump;
7666	struct worklist *wk;
7667
7668	KASSERT(LIST_EMPTY(&freeblks->fb_jblkdephd),
7669	    ("handle_workitem_freeblocks: Journal entries not written."));
7670	ump = VFSTOUFS(freeblks->fb_list.wk_mp);
7671	ACQUIRE_LOCK(&lk);
7672	while ((wk = LIST_FIRST(&freeblks->fb_freeworkhd)) != NULL) {
7673		WORKLIST_REMOVE(wk);
7674		switch (wk->wk_type) {
7675		case D_DIRREM:
7676			wk->wk_state |= COMPLETE;
7677			add_to_worklist(wk, 0);
7678			continue;
7679
7680		case D_ALLOCDIRECT:
7681			free_newblk(WK_NEWBLK(wk));
7682			continue;
7683
7684		case D_ALLOCINDIR:
7685			aip = WK_ALLOCINDIR(wk);
7686			freework = NULL;
7687			if (aip->ai_state & DELAYEDFREE) {
7688				FREE_LOCK(&lk);
7689				freework = newfreework(ump, freeblks, NULL,
7690				    aip->ai_lbn, aip->ai_newblkno,
7691				    ump->um_fs->fs_frag, 0, 0);
7692				ACQUIRE_LOCK(&lk);
7693			}
7694			newblk = WK_NEWBLK(wk);
7695			if (newblk->nb_jnewblk) {
7696				freework->fw_jnewblk = newblk->nb_jnewblk;
7697				newblk->nb_jnewblk->jn_dep = &freework->fw_list;
7698				newblk->nb_jnewblk = NULL;
7699			}
7700			free_newblk(newblk);
7701			continue;
7702
7703		case D_FREEWORK:
7704			freework = WK_FREEWORK(wk);
7705			if (freework->fw_lbn <= -NDADDR)
7706				handle_workitem_indirblk(freework);
7707			else
7708				freework_freeblock(freework);
7709			continue;
7710		default:
7711			panic("handle_workitem_freeblocks: Unknown type %s",
7712			    TYPENAME(wk->wk_type));
7713		}
7714	}
7715	if (freeblks->fb_ref != 0) {
7716		freeblks->fb_state &= ~INPROGRESS;
7717		wake_worklist(&freeblks->fb_list);
7718		freeblks = NULL;
7719	}
7720	FREE_LOCK(&lk);
7721	if (freeblks)
7722		return handle_complete_freeblocks(freeblks, flags);
7723	return (0);
7724}
7725
7726/*
7727 * Handle completion of block free via truncate.  This allows fs_pending
7728 * to track the actual free block count more closely than if we only updated
7729 * it at the end.  We must be careful to handle cases where the block count
7730 * on free was incorrect.
7731 */
7732static void
7733freeblks_free(ump, freeblks, blocks)
7734	struct ufsmount *ump;
7735	struct freeblks *freeblks;
7736	int blocks;
7737{
7738	struct fs *fs;
7739	ufs2_daddr_t remain;
7740
7741	UFS_LOCK(ump);
7742	remain = -freeblks->fb_chkcnt;
7743	freeblks->fb_chkcnt += blocks;
7744	if (remain > 0) {
7745		if (remain < blocks)
7746			blocks = remain;
7747		fs = ump->um_fs;
7748		fs->fs_pendingblocks -= blocks;
7749	}
7750	UFS_UNLOCK(ump);
7751}
7752
7753/*
7754 * Once all of the freework workitems are complete we can retire the
7755 * freeblocks dependency and any journal work awaiting completion.  This
7756 * can not be called until all other dependencies are stable on disk.
7757 */
7758static int
7759handle_complete_freeblocks(freeblks, flags)
7760	struct freeblks *freeblks;
7761	int flags;
7762{
7763	struct inodedep *inodedep;
7764	struct inode *ip;
7765	struct vnode *vp;
7766	struct fs *fs;
7767	struct ufsmount *ump;
7768	ufs2_daddr_t spare;
7769
7770	ump = VFSTOUFS(freeblks->fb_list.wk_mp);
7771	fs = ump->um_fs;
7772	flags = LK_EXCLUSIVE | flags;
7773	spare = freeblks->fb_chkcnt;
7774
7775	/*
7776	 * If we did not release the expected number of blocks we may have
7777	 * to adjust the inode block count here.  Only do so if it wasn't
7778	 * a truncation to zero and the modrev still matches.
7779	 */
7780	if (spare && freeblks->fb_len != 0) {
7781		if (ffs_vgetf(freeblks->fb_list.wk_mp, freeblks->fb_inum,
7782		    flags, &vp, FFSV_FORCEINSMQ) != 0)
7783			return (EBUSY);
7784		ip = VTOI(vp);
7785		if (DIP(ip, i_modrev) == freeblks->fb_modrev) {
7786			DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - spare);
7787			ip->i_flag |= IN_CHANGE;
7788			/*
7789			 * We must wait so this happens before the
7790			 * journal is reclaimed.
7791			 */
7792			ffs_update(vp, 1);
7793		}
7794		vput(vp);
7795	}
7796	if (spare < 0) {
7797		UFS_LOCK(ump);
7798		fs->fs_pendingblocks += spare;
7799		UFS_UNLOCK(ump);
7800	}
7801#ifdef QUOTA
7802	/* Handle spare. */
7803	if (spare)
7804		quotaadj(freeblks->fb_quota, ump, -spare);
7805	quotarele(freeblks->fb_quota);
7806#endif
7807	ACQUIRE_LOCK(&lk);
7808	if (freeblks->fb_state & ONDEPLIST) {
7809		inodedep_lookup(freeblks->fb_list.wk_mp, freeblks->fb_inum,
7810		    0, &inodedep);
7811		TAILQ_REMOVE(&inodedep->id_freeblklst, freeblks, fb_next);
7812		freeblks->fb_state &= ~ONDEPLIST;
7813		if (TAILQ_EMPTY(&inodedep->id_freeblklst))
7814			free_inodedep(inodedep);
7815	}
7816	/*
7817	 * All of the freeblock deps must be complete prior to this call
7818	 * so it's now safe to complete earlier outstanding journal entries.
7819	 */
7820	handle_jwork(&freeblks->fb_jwork);
7821	WORKITEM_FREE(freeblks, D_FREEBLKS);
7822	FREE_LOCK(&lk);
7823	return (0);
7824}
7825
7826/*
7827 * Release blocks associated with the freeblks and stored in the indirect
7828 * block dbn. If level is greater than SINGLE, the block is an indirect block
7829 * and recursive calls to indirtrunc must be used to cleanse other indirect
7830 * blocks.
7831 *
7832 * This handles partial and complete truncation of blocks.  Partial is noted
7833 * with goingaway == 0.  In this case the freework is completed after the
7834 * zero'd indirects are written to disk.  For full truncation the freework
7835 * is completed after the block is freed.
7836 */
7837static void
7838indir_trunc(freework, dbn, lbn)
7839	struct freework *freework;
7840	ufs2_daddr_t dbn;
7841	ufs_lbn_t lbn;
7842{
7843	struct freework *nfreework;
7844	struct workhead wkhd;
7845	struct freeblks *freeblks;
7846	struct buf *bp;
7847	struct fs *fs;
7848	struct indirdep *indirdep;
7849	struct ufsmount *ump;
7850	ufs1_daddr_t *bap1 = 0;
7851	ufs2_daddr_t nb, nnb, *bap2 = 0;
7852	ufs_lbn_t lbnadd, nlbn;
7853	int i, nblocks, ufs1fmt;
7854	int freedblocks;
7855	int goingaway;
7856	int freedeps;
7857	int needj;
7858	int level;
7859	int cnt;
7860
7861	freeblks = freework->fw_freeblks;
7862	ump = VFSTOUFS(freeblks->fb_list.wk_mp);
7863	fs = ump->um_fs;
7864	/*
7865	 * Get buffer of block pointers to be freed.  There are three cases:
7866	 *
7867	 * 1) Partial truncate caches the indirdep pointer in the freework
7868	 *    which provides us a back copy to the save bp which holds the
7869	 *    pointers we want to clear.  When this completes the zero
7870	 *    pointers are written to the real copy.
7871	 * 2) The indirect is being completely truncated, cancel_indirdep()
7872	 *    eliminated the real copy and placed the indirdep on the saved
7873	 *    copy.  The indirdep and buf are discarded when this completes.
7874	 * 3) The indirect was not in memory, we read a copy off of the disk
7875	 *    using the devvp and drop and invalidate the buffer when we're
7876	 *    done.
7877	 */
7878	goingaway = 1;
7879	indirdep = NULL;
7880	if (freework->fw_indir != NULL) {
7881		goingaway = 0;
7882		indirdep = freework->fw_indir;
7883		bp = indirdep->ir_savebp;
7884		if (bp == NULL || bp->b_blkno != dbn)
7885			panic("indir_trunc: Bad saved buf %p blkno %jd",
7886			    bp, (intmax_t)dbn);
7887	} else if ((bp = incore(&freeblks->fb_devvp->v_bufobj, dbn)) != NULL) {
7888		/*
7889		 * The lock prevents the buf dep list from changing and
7890	 	 * indirects on devvp should only ever have one dependency.
7891		 */
7892		indirdep = WK_INDIRDEP(LIST_FIRST(&bp->b_dep));
7893		if (indirdep == NULL || (indirdep->ir_state & GOINGAWAY) == 0)
7894			panic("indir_trunc: Bad indirdep %p from buf %p",
7895			    indirdep, bp);
7896	} else if (bread(freeblks->fb_devvp, dbn, (int)fs->fs_bsize,
7897	    NOCRED, &bp) != 0) {
7898		brelse(bp);
7899		return;
7900	}
7901	ACQUIRE_LOCK(&lk);
7902	/* Protects against a race with complete_trunc_indir(). */
7903	freework->fw_state &= ~INPROGRESS;
7904	/*
7905	 * If we have an indirdep we need to enforce the truncation order
7906	 * and discard it when it is complete.
7907	 */
7908	if (indirdep) {
7909		if (freework != TAILQ_FIRST(&indirdep->ir_trunc) &&
7910		    !TAILQ_EMPTY(&indirdep->ir_trunc)) {
7911			/*
7912			 * Add the complete truncate to the list on the
7913			 * indirdep to enforce in-order processing.
7914			 */
7915			if (freework->fw_indir == NULL)
7916				TAILQ_INSERT_TAIL(&indirdep->ir_trunc,
7917				    freework, fw_next);
7918			FREE_LOCK(&lk);
7919			return;
7920		}
7921		/*
7922		 * If we're goingaway, free the indirdep.  Otherwise it will
7923		 * linger until the write completes.
7924		 */
7925		if (goingaway) {
7926			free_indirdep(indirdep);
7927			ump->um_numindirdeps -= 1;
7928		}
7929	}
7930	FREE_LOCK(&lk);
7931	/* Initialize pointers depending on block size. */
7932	if (ump->um_fstype == UFS1) {
7933		bap1 = (ufs1_daddr_t *)bp->b_data;
7934		nb = bap1[freework->fw_off];
7935		ufs1fmt = 1;
7936	} else {
7937		bap2 = (ufs2_daddr_t *)bp->b_data;
7938		nb = bap2[freework->fw_off];
7939		ufs1fmt = 0;
7940	}
7941	level = lbn_level(lbn);
7942	needj = MOUNTEDSUJ(UFSTOVFS(ump)) != 0;
7943	lbnadd = lbn_offset(fs, level);
7944	nblocks = btodb(fs->fs_bsize);
7945	nfreework = freework;
7946	freedeps = 0;
7947	cnt = 0;
7948	/*
7949	 * Reclaim blocks.  Traverses into nested indirect levels and
7950	 * arranges for the current level to be freed when subordinates
7951	 * are free when journaling.
7952	 */
7953	for (i = freework->fw_off; i < NINDIR(fs); i++, nb = nnb) {
7954		if (i != NINDIR(fs) - 1) {
7955			if (ufs1fmt)
7956				nnb = bap1[i+1];
7957			else
7958				nnb = bap2[i+1];
7959		} else
7960			nnb = 0;
7961		if (nb == 0)
7962			continue;
7963		cnt++;
7964		if (level != 0) {
7965			nlbn = (lbn + 1) - (i * lbnadd);
7966			if (needj != 0) {
7967				nfreework = newfreework(ump, freeblks, freework,
7968				    nlbn, nb, fs->fs_frag, 0, 0);
7969				freedeps++;
7970			}
7971			indir_trunc(nfreework, fsbtodb(fs, nb), nlbn);
7972		} else {
7973			struct freedep *freedep;
7974
7975			/*
7976			 * Attempt to aggregate freedep dependencies for
7977			 * all blocks being released to the same CG.
7978			 */
7979			LIST_INIT(&wkhd);
7980			if (needj != 0 &&
7981			    (nnb == 0 || (dtog(fs, nb) != dtog(fs, nnb)))) {
7982				freedep = newfreedep(freework);
7983				WORKLIST_INSERT_UNLOCKED(&wkhd,
7984				    &freedep->fd_list);
7985				freedeps++;
7986			}
7987			CTR3(KTR_SUJ,
7988			    "indir_trunc: ino %d blkno %jd size %ld",
7989			    freeblks->fb_inum, nb, fs->fs_bsize);
7990			ffs_blkfree(ump, fs, freeblks->fb_devvp, nb,
7991			    fs->fs_bsize, freeblks->fb_inum,
7992			    freeblks->fb_vtype, &wkhd);
7993		}
7994	}
7995	if (goingaway) {
7996		bp->b_flags |= B_INVAL | B_NOCACHE;
7997		brelse(bp);
7998	}
7999	freedblocks = 0;
8000	if (level == 0)
8001		freedblocks = (nblocks * cnt);
8002	if (needj == 0)
8003		freedblocks += nblocks;
8004	freeblks_free(ump, freeblks, freedblocks);
8005	/*
8006	 * If we are journaling set up the ref counts and offset so this
8007	 * indirect can be completed when its children are free.
8008	 */
8009	if (needj) {
8010		ACQUIRE_LOCK(&lk);
8011		freework->fw_off = i;
8012		freework->fw_ref += freedeps;
8013		freework->fw_ref -= NINDIR(fs) + 1;
8014		if (level == 0)
8015			freeblks->fb_cgwait += freedeps;
8016		if (freework->fw_ref == 0)
8017			freework_freeblock(freework);
8018		FREE_LOCK(&lk);
8019		return;
8020	}
8021	/*
8022	 * If we're not journaling we can free the indirect now.
8023	 */
8024	dbn = dbtofsb(fs, dbn);
8025	CTR3(KTR_SUJ,
8026	    "indir_trunc 2: ino %d blkno %jd size %ld",
8027	    freeblks->fb_inum, dbn, fs->fs_bsize);
8028	ffs_blkfree(ump, fs, freeblks->fb_devvp, dbn, fs->fs_bsize,
8029	    freeblks->fb_inum, freeblks->fb_vtype, NULL);
8030	/* Non SUJ softdep does single-threaded truncations. */
8031	if (freework->fw_blkno == dbn) {
8032		freework->fw_state |= ALLCOMPLETE;
8033		ACQUIRE_LOCK(&lk);
8034		handle_written_freework(freework);
8035		FREE_LOCK(&lk);
8036	}
8037	return;
8038}
8039
8040/*
8041 * Cancel an allocindir when it is removed via truncation.  When bp is not
8042 * NULL the indirect never appeared on disk and is scheduled to be freed
8043 * independently of the indir so we can more easily track journal work.
8044 */
8045static void
8046cancel_allocindir(aip, bp, freeblks, trunc)
8047	struct allocindir *aip;
8048	struct buf *bp;
8049	struct freeblks *freeblks;
8050	int trunc;
8051{
8052	struct indirdep *indirdep;
8053	struct freefrag *freefrag;
8054	struct newblk *newblk;
8055
8056	newblk = (struct newblk *)aip;
8057	LIST_REMOVE(aip, ai_next);
8058	/*
8059	 * We must eliminate the pointer in bp if it must be freed on its
8060	 * own due to partial truncate or pending journal work.
8061	 */
8062	if (bp && (trunc || newblk->nb_jnewblk)) {
8063		/*
8064		 * Clear the pointer and mark the aip to be freed
8065		 * directly if it never existed on disk.
8066		 */
8067		aip->ai_state |= DELAYEDFREE;
8068		indirdep = aip->ai_indirdep;
8069		if (indirdep->ir_state & UFS1FMT)
8070			((ufs1_daddr_t *)bp->b_data)[aip->ai_offset] = 0;
8071		else
8072			((ufs2_daddr_t *)bp->b_data)[aip->ai_offset] = 0;
8073	}
8074	/*
8075	 * When truncating the previous pointer will be freed via
8076	 * savedbp.  Eliminate the freefrag which would dup free.
8077	 */
8078	if (trunc && (freefrag = newblk->nb_freefrag) != NULL) {
8079		newblk->nb_freefrag = NULL;
8080		if (freefrag->ff_jdep)
8081			cancel_jfreefrag(
8082			    WK_JFREEFRAG(freefrag->ff_jdep));
8083		jwork_move(&freeblks->fb_jwork, &freefrag->ff_jwork);
8084		WORKITEM_FREE(freefrag, D_FREEFRAG);
8085	}
8086	/*
8087	 * If the journal hasn't been written the jnewblk must be passed
8088	 * to the call to ffs_blkfree that reclaims the space.  We accomplish
8089	 * this by leaving the journal dependency on the newblk to be freed
8090	 * when a freework is created in handle_workitem_freeblocks().
8091	 */
8092	cancel_newblk(newblk, NULL, &freeblks->fb_jwork);
8093	WORKLIST_INSERT(&freeblks->fb_freeworkhd, &newblk->nb_list);
8094}
8095
8096/*
8097 * Create the mkdir dependencies for . and .. in a new directory.  Link them
8098 * in to a newdirblk so any subsequent additions are tracked properly.  The
8099 * caller is responsible for adding the mkdir1 dependency to the journal
8100 * and updating id_mkdiradd.  This function returns with lk held.
8101 */
8102static struct mkdir *
8103setup_newdir(dap, newinum, dinum, newdirbp, mkdirp)
8104	struct diradd *dap;
8105	ino_t newinum;
8106	ino_t dinum;
8107	struct buf *newdirbp;
8108	struct mkdir **mkdirp;
8109{
8110	struct newblk *newblk;
8111	struct pagedep *pagedep;
8112	struct inodedep *inodedep;
8113	struct newdirblk *newdirblk = 0;
8114	struct mkdir *mkdir1, *mkdir2;
8115	struct worklist *wk;
8116	struct jaddref *jaddref;
8117	struct mount *mp;
8118
8119	mp = dap->da_list.wk_mp;
8120	newdirblk = malloc(sizeof(struct newdirblk), M_NEWDIRBLK,
8121	    M_SOFTDEP_FLAGS);
8122	workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp);
8123	LIST_INIT(&newdirblk->db_mkdir);
8124	mkdir1 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS);
8125	workitem_alloc(&mkdir1->md_list, D_MKDIR, mp);
8126	mkdir1->md_state = ATTACHED | MKDIR_BODY;
8127	mkdir1->md_diradd = dap;
8128	mkdir1->md_jaddref = NULL;
8129	mkdir2 = malloc(sizeof(struct mkdir), M_MKDIR, M_SOFTDEP_FLAGS);
8130	workitem_alloc(&mkdir2->md_list, D_MKDIR, mp);
8131	mkdir2->md_state = ATTACHED | MKDIR_PARENT;
8132	mkdir2->md_diradd = dap;
8133	mkdir2->md_jaddref = NULL;
8134	if (MOUNTEDSUJ(mp) == 0) {
8135		mkdir1->md_state |= DEPCOMPLETE;
8136		mkdir2->md_state |= DEPCOMPLETE;
8137	}
8138	/*
8139	 * Dependency on "." and ".." being written to disk.
8140	 */
8141	mkdir1->md_buf = newdirbp;
8142	ACQUIRE_LOCK(&lk);
8143	LIST_INSERT_HEAD(&mkdirlisthd, mkdir1, md_mkdirs);
8144	/*
8145	 * We must link the pagedep, allocdirect, and newdirblk for
8146	 * the initial file page so the pointer to the new directory
8147	 * is not written until the directory contents are live and
8148	 * any subsequent additions are not marked live until the
8149	 * block is reachable via the inode.
8150	 */
8151	if (pagedep_lookup(mp, newdirbp, newinum, 0, 0, &pagedep) == 0)
8152		panic("setup_newdir: lost pagedep");
8153	LIST_FOREACH(wk, &newdirbp->b_dep, wk_list)
8154		if (wk->wk_type == D_ALLOCDIRECT)
8155			break;
8156	if (wk == NULL)
8157		panic("setup_newdir: lost allocdirect");
8158	if (pagedep->pd_state & NEWBLOCK)
8159		panic("setup_newdir: NEWBLOCK already set");
8160	newblk = WK_NEWBLK(wk);
8161	pagedep->pd_state |= NEWBLOCK;
8162	pagedep->pd_newdirblk = newdirblk;
8163	newdirblk->db_pagedep = pagedep;
8164	WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list);
8165	WORKLIST_INSERT(&newdirblk->db_mkdir, &mkdir1->md_list);
8166	/*
8167	 * Look up the inodedep for the parent directory so that we
8168	 * can link mkdir2 into the pending dotdot jaddref or
8169	 * the inode write if there is none.  If the inode is
8170	 * ALLCOMPLETE and no jaddref is present all dependencies have
8171	 * been satisfied and mkdir2 can be freed.
8172	 */
8173	inodedep_lookup(mp, dinum, 0, &inodedep);
8174	if (MOUNTEDSUJ(mp)) {
8175		if (inodedep == NULL)
8176			panic("setup_newdir: Lost parent.");
8177		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
8178		    inoreflst);
8179		KASSERT(jaddref != NULL && jaddref->ja_parent == newinum &&
8180		    (jaddref->ja_state & MKDIR_PARENT),
8181		    ("setup_newdir: bad dotdot jaddref %p", jaddref));
8182		LIST_INSERT_HEAD(&mkdirlisthd, mkdir2, md_mkdirs);
8183		mkdir2->md_jaddref = jaddref;
8184		jaddref->ja_mkdir = mkdir2;
8185	} else if (inodedep == NULL ||
8186	    (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) {
8187		dap->da_state &= ~MKDIR_PARENT;
8188		WORKITEM_FREE(mkdir2, D_MKDIR);
8189		mkdir2 = NULL;
8190	} else {
8191		LIST_INSERT_HEAD(&mkdirlisthd, mkdir2, md_mkdirs);
8192		WORKLIST_INSERT(&inodedep->id_bufwait, &mkdir2->md_list);
8193	}
8194	*mkdirp = mkdir2;
8195
8196	return (mkdir1);
8197}
8198
8199/*
8200 * Directory entry addition dependencies.
8201 *
8202 * When adding a new directory entry, the inode (with its incremented link
8203 * count) must be written to disk before the directory entry's pointer to it.
8204 * Also, if the inode is newly allocated, the corresponding freemap must be
8205 * updated (on disk) before the directory entry's pointer. These requirements
8206 * are met via undo/redo on the directory entry's pointer, which consists
8207 * simply of the inode number.
8208 *
8209 * As directory entries are added and deleted, the free space within a
8210 * directory block can become fragmented.  The ufs filesystem will compact
8211 * a fragmented directory block to make space for a new entry. When this
8212 * occurs, the offsets of previously added entries change. Any "diradd"
8213 * dependency structures corresponding to these entries must be updated with
8214 * the new offsets.
8215 */
8216
8217/*
8218 * This routine is called after the in-memory inode's link
8219 * count has been incremented, but before the directory entry's
8220 * pointer to the inode has been set.
8221 */
8222int
8223softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk)
8224	struct buf *bp;		/* buffer containing directory block */
8225	struct inode *dp;	/* inode for directory */
8226	off_t diroffset;	/* offset of new entry in directory */
8227	ino_t newinum;		/* inode referenced by new directory entry */
8228	struct buf *newdirbp;	/* non-NULL => contents of new mkdir */
8229	int isnewblk;		/* entry is in a newly allocated block */
8230{
8231	int offset;		/* offset of new entry within directory block */
8232	ufs_lbn_t lbn;		/* block in directory containing new entry */
8233	struct fs *fs;
8234	struct diradd *dap;
8235	struct newblk *newblk;
8236	struct pagedep *pagedep;
8237	struct inodedep *inodedep;
8238	struct newdirblk *newdirblk = 0;
8239	struct mkdir *mkdir1, *mkdir2;
8240	struct jaddref *jaddref;
8241	struct mount *mp;
8242	int isindir;
8243
8244	/*
8245	 * Whiteouts have no dependencies.
8246	 */
8247	if (newinum == WINO) {
8248		if (newdirbp != NULL)
8249			bdwrite(newdirbp);
8250		return (0);
8251	}
8252	jaddref = NULL;
8253	mkdir1 = mkdir2 = NULL;
8254	mp = UFSTOVFS(dp->i_ump);
8255	fs = dp->i_fs;
8256	lbn = lblkno(fs, diroffset);
8257	offset = blkoff(fs, diroffset);
8258	dap = malloc(sizeof(struct diradd), M_DIRADD,
8259		M_SOFTDEP_FLAGS|M_ZERO);
8260	workitem_alloc(&dap->da_list, D_DIRADD, mp);
8261	dap->da_offset = offset;
8262	dap->da_newinum = newinum;
8263	dap->da_state = ATTACHED;
8264	LIST_INIT(&dap->da_jwork);
8265	isindir = bp->b_lblkno >= NDADDR;
8266	if (isnewblk &&
8267	    (isindir ? blkoff(fs, diroffset) : fragoff(fs, diroffset)) == 0) {
8268		newdirblk = malloc(sizeof(struct newdirblk),
8269		    M_NEWDIRBLK, M_SOFTDEP_FLAGS);
8270		workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp);
8271		LIST_INIT(&newdirblk->db_mkdir);
8272	}
8273	/*
8274	 * If we're creating a new directory setup the dependencies and set
8275	 * the dap state to wait for them.  Otherwise it's COMPLETE and
8276	 * we can move on.
8277	 */
8278	if (newdirbp == NULL) {
8279		dap->da_state |= DEPCOMPLETE;
8280		ACQUIRE_LOCK(&lk);
8281	} else {
8282		dap->da_state |= MKDIR_BODY | MKDIR_PARENT;
8283		mkdir1 = setup_newdir(dap, newinum, dp->i_number, newdirbp,
8284		    &mkdir2);
8285	}
8286	/*
8287	 * Link into parent directory pagedep to await its being written.
8288	 */
8289	pagedep_lookup(mp, bp, dp->i_number, lbn, DEPALLOC, &pagedep);
8290#ifdef DEBUG
8291	if (diradd_lookup(pagedep, offset) != NULL)
8292		panic("softdep_setup_directory_add: %p already at off %d\n",
8293		    diradd_lookup(pagedep, offset), offset);
8294#endif
8295	dap->da_pagedep = pagedep;
8296	LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap,
8297	    da_pdlist);
8298	inodedep_lookup(mp, newinum, DEPALLOC | NODELAY, &inodedep);
8299	/*
8300	 * If we're journaling, link the diradd into the jaddref so it
8301	 * may be completed after the journal entry is written.  Otherwise,
8302	 * link the diradd into its inodedep.  If the inode is not yet
8303	 * written place it on the bufwait list, otherwise do the post-inode
8304	 * write processing to put it on the id_pendinghd list.
8305	 */
8306	if (MOUNTEDSUJ(mp)) {
8307		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
8308		    inoreflst);
8309		KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number,
8310		    ("softdep_setup_directory_add: bad jaddref %p", jaddref));
8311		jaddref->ja_diroff = diroffset;
8312		jaddref->ja_diradd = dap;
8313		add_to_journal(&jaddref->ja_list);
8314	} else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE)
8315		diradd_inode_written(dap, inodedep);
8316	else
8317		WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list);
8318	/*
8319	 * Add the journal entries for . and .. links now that the primary
8320	 * link is written.
8321	 */
8322	if (mkdir1 != NULL && MOUNTEDSUJ(mp)) {
8323		jaddref = (struct jaddref *)TAILQ_PREV(&jaddref->ja_ref,
8324		    inoreflst, if_deps);
8325		KASSERT(jaddref != NULL &&
8326		    jaddref->ja_ino == jaddref->ja_parent &&
8327		    (jaddref->ja_state & MKDIR_BODY),
8328		    ("softdep_setup_directory_add: bad dot jaddref %p",
8329		    jaddref));
8330		mkdir1->md_jaddref = jaddref;
8331		jaddref->ja_mkdir = mkdir1;
8332		/*
8333		 * It is important that the dotdot journal entry
8334		 * is added prior to the dot entry since dot writes
8335		 * both the dot and dotdot links.  These both must
8336		 * be added after the primary link for the journal
8337		 * to remain consistent.
8338		 */
8339		add_to_journal(&mkdir2->md_jaddref->ja_list);
8340		add_to_journal(&jaddref->ja_list);
8341	}
8342	/*
8343	 * If we are adding a new directory remember this diradd so that if
8344	 * we rename it we can keep the dot and dotdot dependencies.  If
8345	 * we are adding a new name for an inode that has a mkdiradd we
8346	 * must be in rename and we have to move the dot and dotdot
8347	 * dependencies to this new name.  The old name is being orphaned
8348	 * soon.
8349	 */
8350	if (mkdir1 != NULL) {
8351		if (inodedep->id_mkdiradd != NULL)
8352			panic("softdep_setup_directory_add: Existing mkdir");
8353		inodedep->id_mkdiradd = dap;
8354	} else if (inodedep->id_mkdiradd)
8355		merge_diradd(inodedep, dap);
8356	if (newdirblk) {
8357		/*
8358		 * There is nothing to do if we are already tracking
8359		 * this block.
8360		 */
8361		if ((pagedep->pd_state & NEWBLOCK) != 0) {
8362			WORKITEM_FREE(newdirblk, D_NEWDIRBLK);
8363			FREE_LOCK(&lk);
8364			return (0);
8365		}
8366		if (newblk_lookup(mp, dbtofsb(fs, bp->b_blkno), 0, &newblk)
8367		    == 0)
8368			panic("softdep_setup_directory_add: lost entry");
8369		WORKLIST_INSERT(&newblk->nb_newdirblk, &newdirblk->db_list);
8370		pagedep->pd_state |= NEWBLOCK;
8371		pagedep->pd_newdirblk = newdirblk;
8372		newdirblk->db_pagedep = pagedep;
8373		FREE_LOCK(&lk);
8374		/*
8375		 * If we extended into an indirect signal direnter to sync.
8376		 */
8377		if (isindir)
8378			return (1);
8379		return (0);
8380	}
8381	FREE_LOCK(&lk);
8382	return (0);
8383}
8384
8385/*
8386 * This procedure is called to change the offset of a directory
8387 * entry when compacting a directory block which must be owned
8388 * exclusively by the caller. Note that the actual entry movement
8389 * must be done in this procedure to ensure that no I/O completions
8390 * occur while the move is in progress.
8391 */
8392void
8393softdep_change_directoryentry_offset(bp, dp, base, oldloc, newloc, entrysize)
8394	struct buf *bp;		/* Buffer holding directory block. */
8395	struct inode *dp;	/* inode for directory */
8396	caddr_t base;		/* address of dp->i_offset */
8397	caddr_t oldloc;		/* address of old directory location */
8398	caddr_t newloc;		/* address of new directory location */
8399	int entrysize;		/* size of directory entry */
8400{
8401	int offset, oldoffset, newoffset;
8402	struct pagedep *pagedep;
8403	struct jmvref *jmvref;
8404	struct diradd *dap;
8405	struct direct *de;
8406	struct mount *mp;
8407	ufs_lbn_t lbn;
8408	int flags;
8409
8410	mp = UFSTOVFS(dp->i_ump);
8411	de = (struct direct *)oldloc;
8412	jmvref = NULL;
8413	flags = 0;
8414	/*
8415	 * Moves are always journaled as it would be too complex to
8416	 * determine if any affected adds or removes are present in the
8417	 * journal.
8418	 */
8419	if (MOUNTEDSUJ(mp)) {
8420		flags = DEPALLOC;
8421		jmvref = newjmvref(dp, de->d_ino,
8422		    dp->i_offset + (oldloc - base),
8423		    dp->i_offset + (newloc - base));
8424	}
8425	lbn = lblkno(dp->i_fs, dp->i_offset);
8426	offset = blkoff(dp->i_fs, dp->i_offset);
8427	oldoffset = offset + (oldloc - base);
8428	newoffset = offset + (newloc - base);
8429	ACQUIRE_LOCK(&lk);
8430	if (pagedep_lookup(mp, bp, dp->i_number, lbn, flags, &pagedep) == 0)
8431		goto done;
8432	dap = diradd_lookup(pagedep, oldoffset);
8433	if (dap) {
8434		dap->da_offset = newoffset;
8435		newoffset = DIRADDHASH(newoffset);
8436		oldoffset = DIRADDHASH(oldoffset);
8437		if ((dap->da_state & ALLCOMPLETE) != ALLCOMPLETE &&
8438		    newoffset != oldoffset) {
8439			LIST_REMOVE(dap, da_pdlist);
8440			LIST_INSERT_HEAD(&pagedep->pd_diraddhd[newoffset],
8441			    dap, da_pdlist);
8442		}
8443	}
8444done:
8445	if (jmvref) {
8446		jmvref->jm_pagedep = pagedep;
8447		LIST_INSERT_HEAD(&pagedep->pd_jmvrefhd, jmvref, jm_deps);
8448		add_to_journal(&jmvref->jm_list);
8449	}
8450	bcopy(oldloc, newloc, entrysize);
8451	FREE_LOCK(&lk);
8452}
8453
8454/*
8455 * Move the mkdir dependencies and journal work from one diradd to another
8456 * when renaming a directory.  The new name must depend on the mkdir deps
8457 * completing as the old name did.  Directories can only have one valid link
8458 * at a time so one must be canonical.
8459 */
8460static void
8461merge_diradd(inodedep, newdap)
8462	struct inodedep *inodedep;
8463	struct diradd *newdap;
8464{
8465	struct diradd *olddap;
8466	struct mkdir *mkdir, *nextmd;
8467	short state;
8468
8469	olddap = inodedep->id_mkdiradd;
8470	inodedep->id_mkdiradd = newdap;
8471	if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
8472		newdap->da_state &= ~DEPCOMPLETE;
8473		for (mkdir = LIST_FIRST(&mkdirlisthd); mkdir; mkdir = nextmd) {
8474			nextmd = LIST_NEXT(mkdir, md_mkdirs);
8475			if (mkdir->md_diradd != olddap)
8476				continue;
8477			mkdir->md_diradd = newdap;
8478			state = mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY);
8479			newdap->da_state |= state;
8480			olddap->da_state &= ~state;
8481			if ((olddap->da_state &
8482			    (MKDIR_PARENT | MKDIR_BODY)) == 0)
8483				break;
8484		}
8485		if ((olddap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0)
8486			panic("merge_diradd: unfound ref");
8487	}
8488	/*
8489	 * Any mkdir related journal items are not safe to be freed until
8490	 * the new name is stable.
8491	 */
8492	jwork_move(&newdap->da_jwork, &olddap->da_jwork);
8493	olddap->da_state |= DEPCOMPLETE;
8494	complete_diradd(olddap);
8495}
8496
8497/*
8498 * Move the diradd to the pending list when all diradd dependencies are
8499 * complete.
8500 */
8501static void
8502complete_diradd(dap)
8503	struct diradd *dap;
8504{
8505	struct pagedep *pagedep;
8506
8507	if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) {
8508		if (dap->da_state & DIRCHG)
8509			pagedep = dap->da_previous->dm_pagedep;
8510		else
8511			pagedep = dap->da_pagedep;
8512		LIST_REMOVE(dap, da_pdlist);
8513		LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist);
8514	}
8515}
8516
8517/*
8518 * Cancel a diradd when a dirrem overlaps with it.  We must cancel the journal
8519 * add entries and conditonally journal the remove.
8520 */
8521static void
8522cancel_diradd(dap, dirrem, jremref, dotremref, dotdotremref)
8523	struct diradd *dap;
8524	struct dirrem *dirrem;
8525	struct jremref *jremref;
8526	struct jremref *dotremref;
8527	struct jremref *dotdotremref;
8528{
8529	struct inodedep *inodedep;
8530	struct jaddref *jaddref;
8531	struct inoref *inoref;
8532	struct mkdir *mkdir;
8533
8534	/*
8535	 * If no remove references were allocated we're on a non-journaled
8536	 * filesystem and can skip the cancel step.
8537	 */
8538	if (jremref == NULL) {
8539		free_diradd(dap, NULL);
8540		return;
8541	}
8542	/*
8543	 * Cancel the primary name an free it if it does not require
8544	 * journaling.
8545	 */
8546	if (inodedep_lookup(dap->da_list.wk_mp, dap->da_newinum,
8547	    0, &inodedep) != 0) {
8548		/* Abort the addref that reference this diradd.  */
8549		TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
8550			if (inoref->if_list.wk_type != D_JADDREF)
8551				continue;
8552			jaddref = (struct jaddref *)inoref;
8553			if (jaddref->ja_diradd != dap)
8554				continue;
8555			if (cancel_jaddref(jaddref, inodedep,
8556			    &dirrem->dm_jwork) == 0) {
8557				free_jremref(jremref);
8558				jremref = NULL;
8559			}
8560			break;
8561		}
8562	}
8563	/*
8564	 * Cancel subordinate names and free them if they do not require
8565	 * journaling.
8566	 */
8567	if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
8568		LIST_FOREACH(mkdir, &mkdirlisthd, md_mkdirs) {
8569			if (mkdir->md_diradd != dap)
8570				continue;
8571			if ((jaddref = mkdir->md_jaddref) == NULL)
8572				continue;
8573			mkdir->md_jaddref = NULL;
8574			if (mkdir->md_state & MKDIR_PARENT) {
8575				if (cancel_jaddref(jaddref, NULL,
8576				    &dirrem->dm_jwork) == 0) {
8577					free_jremref(dotdotremref);
8578					dotdotremref = NULL;
8579				}
8580			} else {
8581				if (cancel_jaddref(jaddref, inodedep,
8582				    &dirrem->dm_jwork) == 0) {
8583					free_jremref(dotremref);
8584					dotremref = NULL;
8585				}
8586			}
8587		}
8588	}
8589
8590	if (jremref)
8591		journal_jremref(dirrem, jremref, inodedep);
8592	if (dotremref)
8593		journal_jremref(dirrem, dotremref, inodedep);
8594	if (dotdotremref)
8595		journal_jremref(dirrem, dotdotremref, NULL);
8596	jwork_move(&dirrem->dm_jwork, &dap->da_jwork);
8597	free_diradd(dap, &dirrem->dm_jwork);
8598}
8599
8600/*
8601 * Free a diradd dependency structure. This routine must be called
8602 * with splbio interrupts blocked.
8603 */
8604static void
8605free_diradd(dap, wkhd)
8606	struct diradd *dap;
8607	struct workhead *wkhd;
8608{
8609	struct dirrem *dirrem;
8610	struct pagedep *pagedep;
8611	struct inodedep *inodedep;
8612	struct mkdir *mkdir, *nextmd;
8613
8614	rw_assert(&lk, RA_WLOCKED);
8615	LIST_REMOVE(dap, da_pdlist);
8616	if (dap->da_state & ONWORKLIST)
8617		WORKLIST_REMOVE(&dap->da_list);
8618	if ((dap->da_state & DIRCHG) == 0) {
8619		pagedep = dap->da_pagedep;
8620	} else {
8621		dirrem = dap->da_previous;
8622		pagedep = dirrem->dm_pagedep;
8623		dirrem->dm_dirinum = pagedep->pd_ino;
8624		dirrem->dm_state |= COMPLETE;
8625		if (LIST_EMPTY(&dirrem->dm_jremrefhd))
8626			add_to_worklist(&dirrem->dm_list, 0);
8627	}
8628	if (inodedep_lookup(pagedep->pd_list.wk_mp, dap->da_newinum,
8629	    0, &inodedep) != 0)
8630		if (inodedep->id_mkdiradd == dap)
8631			inodedep->id_mkdiradd = NULL;
8632	if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
8633		for (mkdir = LIST_FIRST(&mkdirlisthd); mkdir; mkdir = nextmd) {
8634			nextmd = LIST_NEXT(mkdir, md_mkdirs);
8635			if (mkdir->md_diradd != dap)
8636				continue;
8637			dap->da_state &=
8638			    ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY));
8639			LIST_REMOVE(mkdir, md_mkdirs);
8640			if (mkdir->md_state & ONWORKLIST)
8641				WORKLIST_REMOVE(&mkdir->md_list);
8642			if (mkdir->md_jaddref != NULL)
8643				panic("free_diradd: Unexpected jaddref");
8644			WORKITEM_FREE(mkdir, D_MKDIR);
8645			if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0)
8646				break;
8647		}
8648		if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0)
8649			panic("free_diradd: unfound ref");
8650	}
8651	if (inodedep)
8652		free_inodedep(inodedep);
8653	/*
8654	 * Free any journal segments waiting for the directory write.
8655	 */
8656	handle_jwork(&dap->da_jwork);
8657	WORKITEM_FREE(dap, D_DIRADD);
8658}
8659
8660/*
8661 * Directory entry removal dependencies.
8662 *
8663 * When removing a directory entry, the entry's inode pointer must be
8664 * zero'ed on disk before the corresponding inode's link count is decremented
8665 * (possibly freeing the inode for re-use). This dependency is handled by
8666 * updating the directory entry but delaying the inode count reduction until
8667 * after the directory block has been written to disk. After this point, the
8668 * inode count can be decremented whenever it is convenient.
8669 */
8670
8671/*
8672 * This routine should be called immediately after removing
8673 * a directory entry.  The inode's link count should not be
8674 * decremented by the calling procedure -- the soft updates
8675 * code will do this task when it is safe.
8676 */
8677void
8678softdep_setup_remove(bp, dp, ip, isrmdir)
8679	struct buf *bp;		/* buffer containing directory block */
8680	struct inode *dp;	/* inode for the directory being modified */
8681	struct inode *ip;	/* inode for directory entry being removed */
8682	int isrmdir;		/* indicates if doing RMDIR */
8683{
8684	struct dirrem *dirrem, *prevdirrem;
8685	struct inodedep *inodedep;
8686	int direct;
8687
8688	/*
8689	 * Allocate a new dirrem if appropriate and ACQUIRE_LOCK.  We want
8690	 * newdirrem() to setup the full directory remove which requires
8691	 * isrmdir > 1.
8692	 */
8693	dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem);
8694	/*
8695	 * Add the dirrem to the inodedep's pending remove list for quick
8696	 * discovery later.
8697	 */
8698	if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0,
8699	    &inodedep) == 0)
8700		panic("softdep_setup_remove: Lost inodedep.");
8701	KASSERT((inodedep->id_state & UNLINKED) == 0, ("inode unlinked"));
8702	dirrem->dm_state |= ONDEPLIST;
8703	LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext);
8704
8705	/*
8706	 * If the COMPLETE flag is clear, then there were no active
8707	 * entries and we want to roll back to a zeroed entry until
8708	 * the new inode is committed to disk. If the COMPLETE flag is
8709	 * set then we have deleted an entry that never made it to
8710	 * disk. If the entry we deleted resulted from a name change,
8711	 * then the old name still resides on disk. We cannot delete
8712	 * its inode (returned to us in prevdirrem) until the zeroed
8713	 * directory entry gets to disk. The new inode has never been
8714	 * referenced on the disk, so can be deleted immediately.
8715	 */
8716	if ((dirrem->dm_state & COMPLETE) == 0) {
8717		LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, dirrem,
8718		    dm_next);
8719		FREE_LOCK(&lk);
8720	} else {
8721		if (prevdirrem != NULL)
8722			LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd,
8723			    prevdirrem, dm_next);
8724		dirrem->dm_dirinum = dirrem->dm_pagedep->pd_ino;
8725		direct = LIST_EMPTY(&dirrem->dm_jremrefhd);
8726		FREE_LOCK(&lk);
8727		if (direct)
8728			handle_workitem_remove(dirrem, 0);
8729	}
8730}
8731
8732/*
8733 * Check for an entry matching 'offset' on both the pd_dirraddhd list and the
8734 * pd_pendinghd list of a pagedep.
8735 */
8736static struct diradd *
8737diradd_lookup(pagedep, offset)
8738	struct pagedep *pagedep;
8739	int offset;
8740{
8741	struct diradd *dap;
8742
8743	LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(offset)], da_pdlist)
8744		if (dap->da_offset == offset)
8745			return (dap);
8746	LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist)
8747		if (dap->da_offset == offset)
8748			return (dap);
8749	return (NULL);
8750}
8751
8752/*
8753 * Search for a .. diradd dependency in a directory that is being removed.
8754 * If the directory was renamed to a new parent we have a diradd rather
8755 * than a mkdir for the .. entry.  We need to cancel it now before
8756 * it is found in truncate().
8757 */
8758static struct jremref *
8759cancel_diradd_dotdot(ip, dirrem, jremref)
8760	struct inode *ip;
8761	struct dirrem *dirrem;
8762	struct jremref *jremref;
8763{
8764	struct pagedep *pagedep;
8765	struct diradd *dap;
8766	struct worklist *wk;
8767
8768	if (pagedep_lookup(UFSTOVFS(ip->i_ump), NULL, ip->i_number, 0, 0,
8769	    &pagedep) == 0)
8770		return (jremref);
8771	dap = diradd_lookup(pagedep, DOTDOT_OFFSET);
8772	if (dap == NULL)
8773		return (jremref);
8774	cancel_diradd(dap, dirrem, jremref, NULL, NULL);
8775	/*
8776	 * Mark any journal work as belonging to the parent so it is freed
8777	 * with the .. reference.
8778	 */
8779	LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list)
8780		wk->wk_state |= MKDIR_PARENT;
8781	return (NULL);
8782}
8783
8784/*
8785 * Cancel the MKDIR_PARENT mkdir component of a diradd when we're going to
8786 * replace it with a dirrem/diradd pair as a result of re-parenting a
8787 * directory.  This ensures that we don't simultaneously have a mkdir and
8788 * a diradd for the same .. entry.
8789 */
8790static struct jremref *
8791cancel_mkdir_dotdot(ip, dirrem, jremref)
8792	struct inode *ip;
8793	struct dirrem *dirrem;
8794	struct jremref *jremref;
8795{
8796	struct inodedep *inodedep;
8797	struct jaddref *jaddref;
8798	struct mkdir *mkdir;
8799	struct diradd *dap;
8800
8801	if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0,
8802	    &inodedep) == 0)
8803		return (jremref);
8804	dap = inodedep->id_mkdiradd;
8805	if (dap == NULL || (dap->da_state & MKDIR_PARENT) == 0)
8806		return (jremref);
8807	for (mkdir = LIST_FIRST(&mkdirlisthd); mkdir;
8808	    mkdir = LIST_NEXT(mkdir, md_mkdirs))
8809		if (mkdir->md_diradd == dap && mkdir->md_state & MKDIR_PARENT)
8810			break;
8811	if (mkdir == NULL)
8812		panic("cancel_mkdir_dotdot: Unable to find mkdir\n");
8813	if ((jaddref = mkdir->md_jaddref) != NULL) {
8814		mkdir->md_jaddref = NULL;
8815		jaddref->ja_state &= ~MKDIR_PARENT;
8816		if (inodedep_lookup(UFSTOVFS(ip->i_ump), jaddref->ja_ino, 0,
8817		    &inodedep) == 0)
8818			panic("cancel_mkdir_dotdot: Lost parent inodedep");
8819		if (cancel_jaddref(jaddref, inodedep, &dirrem->dm_jwork)) {
8820			journal_jremref(dirrem, jremref, inodedep);
8821			jremref = NULL;
8822		}
8823	}
8824	if (mkdir->md_state & ONWORKLIST)
8825		WORKLIST_REMOVE(&mkdir->md_list);
8826	mkdir->md_state |= ALLCOMPLETE;
8827	complete_mkdir(mkdir);
8828	return (jremref);
8829}
8830
8831static void
8832journal_jremref(dirrem, jremref, inodedep)
8833	struct dirrem *dirrem;
8834	struct jremref *jremref;
8835	struct inodedep *inodedep;
8836{
8837
8838	if (inodedep == NULL)
8839		if (inodedep_lookup(jremref->jr_list.wk_mp,
8840		    jremref->jr_ref.if_ino, 0, &inodedep) == 0)
8841			panic("journal_jremref: Lost inodedep");
8842	LIST_INSERT_HEAD(&dirrem->dm_jremrefhd, jremref, jr_deps);
8843	TAILQ_INSERT_TAIL(&inodedep->id_inoreflst, &jremref->jr_ref, if_deps);
8844	add_to_journal(&jremref->jr_list);
8845}
8846
8847static void
8848dirrem_journal(dirrem, jremref, dotremref, dotdotremref)
8849	struct dirrem *dirrem;
8850	struct jremref *jremref;
8851	struct jremref *dotremref;
8852	struct jremref *dotdotremref;
8853{
8854	struct inodedep *inodedep;
8855
8856
8857	if (inodedep_lookup(jremref->jr_list.wk_mp, jremref->jr_ref.if_ino, 0,
8858	    &inodedep) == 0)
8859		panic("dirrem_journal: Lost inodedep");
8860	journal_jremref(dirrem, jremref, inodedep);
8861	if (dotremref)
8862		journal_jremref(dirrem, dotremref, inodedep);
8863	if (dotdotremref)
8864		journal_jremref(dirrem, dotdotremref, NULL);
8865}
8866
8867/*
8868 * Allocate a new dirrem if appropriate and return it along with
8869 * its associated pagedep. Called without a lock, returns with lock.
8870 */
8871static struct dirrem *
8872newdirrem(bp, dp, ip, isrmdir, prevdirremp)
8873	struct buf *bp;		/* buffer containing directory block */
8874	struct inode *dp;	/* inode for the directory being modified */
8875	struct inode *ip;	/* inode for directory entry being removed */
8876	int isrmdir;		/* indicates if doing RMDIR */
8877	struct dirrem **prevdirremp; /* previously referenced inode, if any */
8878{
8879	int offset;
8880	ufs_lbn_t lbn;
8881	struct diradd *dap;
8882	struct dirrem *dirrem;
8883	struct pagedep *pagedep;
8884	struct jremref *jremref;
8885	struct jremref *dotremref;
8886	struct jremref *dotdotremref;
8887	struct vnode *dvp;
8888
8889	/*
8890	 * Whiteouts have no deletion dependencies.
8891	 */
8892	if (ip == NULL)
8893		panic("newdirrem: whiteout");
8894	dvp = ITOV(dp);
8895	/*
8896	 * If we are over our limit, try to improve the situation.
8897	 * Limiting the number of dirrem structures will also limit
8898	 * the number of freefile and freeblks structures.
8899	 */
8900	ACQUIRE_LOCK(&lk);
8901	if (!IS_SNAPSHOT(ip) && dep_current[D_DIRREM] > max_softdeps / 2)
8902		(void) request_cleanup(ITOV(dp)->v_mount, FLUSH_BLOCKS);
8903	FREE_LOCK(&lk);
8904	dirrem = malloc(sizeof(struct dirrem),
8905		M_DIRREM, M_SOFTDEP_FLAGS|M_ZERO);
8906	workitem_alloc(&dirrem->dm_list, D_DIRREM, dvp->v_mount);
8907	LIST_INIT(&dirrem->dm_jremrefhd);
8908	LIST_INIT(&dirrem->dm_jwork);
8909	dirrem->dm_state = isrmdir ? RMDIR : 0;
8910	dirrem->dm_oldinum = ip->i_number;
8911	*prevdirremp = NULL;
8912	/*
8913	 * Allocate remove reference structures to track journal write
8914	 * dependencies.  We will always have one for the link and
8915	 * when doing directories we will always have one more for dot.
8916	 * When renaming a directory we skip the dotdot link change so
8917	 * this is not needed.
8918	 */
8919	jremref = dotremref = dotdotremref = NULL;
8920	if (DOINGSUJ(dvp)) {
8921		if (isrmdir) {
8922			jremref = newjremref(dirrem, dp, ip, dp->i_offset,
8923			    ip->i_effnlink + 2);
8924			dotremref = newjremref(dirrem, ip, ip, DOT_OFFSET,
8925			    ip->i_effnlink + 1);
8926			dotdotremref = newjremref(dirrem, ip, dp, DOTDOT_OFFSET,
8927			    dp->i_effnlink + 1);
8928			dotdotremref->jr_state |= MKDIR_PARENT;
8929		} else
8930			jremref = newjremref(dirrem, dp, ip, dp->i_offset,
8931			    ip->i_effnlink + 1);
8932	}
8933	ACQUIRE_LOCK(&lk);
8934	lbn = lblkno(dp->i_fs, dp->i_offset);
8935	offset = blkoff(dp->i_fs, dp->i_offset);
8936	pagedep_lookup(UFSTOVFS(dp->i_ump), bp, dp->i_number, lbn, DEPALLOC,
8937	    &pagedep);
8938	dirrem->dm_pagedep = pagedep;
8939	dirrem->dm_offset = offset;
8940	/*
8941	 * If we're renaming a .. link to a new directory, cancel any
8942	 * existing MKDIR_PARENT mkdir.  If it has already been canceled
8943	 * the jremref is preserved for any potential diradd in this
8944	 * location.  This can not coincide with a rmdir.
8945	 */
8946	if (dp->i_offset == DOTDOT_OFFSET) {
8947		if (isrmdir)
8948			panic("newdirrem: .. directory change during remove?");
8949		jremref = cancel_mkdir_dotdot(dp, dirrem, jremref);
8950	}
8951	/*
8952	 * If we're removing a directory search for the .. dependency now and
8953	 * cancel it.  Any pending journal work will be added to the dirrem
8954	 * to be completed when the workitem remove completes.
8955	 */
8956	if (isrmdir)
8957		dotdotremref = cancel_diradd_dotdot(ip, dirrem, dotdotremref);
8958	/*
8959	 * Check for a diradd dependency for the same directory entry.
8960	 * If present, then both dependencies become obsolete and can
8961	 * be de-allocated.
8962	 */
8963	dap = diradd_lookup(pagedep, offset);
8964	if (dap == NULL) {
8965		/*
8966		 * Link the jremref structures into the dirrem so they are
8967		 * written prior to the pagedep.
8968		 */
8969		if (jremref)
8970			dirrem_journal(dirrem, jremref, dotremref,
8971			    dotdotremref);
8972		return (dirrem);
8973	}
8974	/*
8975	 * Must be ATTACHED at this point.
8976	 */
8977	if ((dap->da_state & ATTACHED) == 0)
8978		panic("newdirrem: not ATTACHED");
8979	if (dap->da_newinum != ip->i_number)
8980		panic("newdirrem: inum %ju should be %ju",
8981		    (uintmax_t)ip->i_number, (uintmax_t)dap->da_newinum);
8982	/*
8983	 * If we are deleting a changed name that never made it to disk,
8984	 * then return the dirrem describing the previous inode (which
8985	 * represents the inode currently referenced from this entry on disk).
8986	 */
8987	if ((dap->da_state & DIRCHG) != 0) {
8988		*prevdirremp = dap->da_previous;
8989		dap->da_state &= ~DIRCHG;
8990		dap->da_pagedep = pagedep;
8991	}
8992	/*
8993	 * We are deleting an entry that never made it to disk.
8994	 * Mark it COMPLETE so we can delete its inode immediately.
8995	 */
8996	dirrem->dm_state |= COMPLETE;
8997	cancel_diradd(dap, dirrem, jremref, dotremref, dotdotremref);
8998#ifdef SUJ_DEBUG
8999	if (isrmdir == 0) {
9000		struct worklist *wk;
9001
9002		LIST_FOREACH(wk, &dirrem->dm_jwork, wk_list)
9003			if (wk->wk_state & (MKDIR_BODY | MKDIR_PARENT))
9004				panic("bad wk %p (0x%X)\n", wk, wk->wk_state);
9005	}
9006#endif
9007
9008	return (dirrem);
9009}
9010
9011/*
9012 * Directory entry change dependencies.
9013 *
9014 * Changing an existing directory entry requires that an add operation
9015 * be completed first followed by a deletion. The semantics for the addition
9016 * are identical to the description of adding a new entry above except
9017 * that the rollback is to the old inode number rather than zero. Once
9018 * the addition dependency is completed, the removal is done as described
9019 * in the removal routine above.
9020 */
9021
9022/*
9023 * This routine should be called immediately after changing
9024 * a directory entry.  The inode's link count should not be
9025 * decremented by the calling procedure -- the soft updates
9026 * code will perform this task when it is safe.
9027 */
9028void
9029softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir)
9030	struct buf *bp;		/* buffer containing directory block */
9031	struct inode *dp;	/* inode for the directory being modified */
9032	struct inode *ip;	/* inode for directory entry being removed */
9033	ino_t newinum;		/* new inode number for changed entry */
9034	int isrmdir;		/* indicates if doing RMDIR */
9035{
9036	int offset;
9037	struct diradd *dap = NULL;
9038	struct dirrem *dirrem, *prevdirrem;
9039	struct pagedep *pagedep;
9040	struct inodedep *inodedep;
9041	struct jaddref *jaddref;
9042	struct mount *mp;
9043
9044	offset = blkoff(dp->i_fs, dp->i_offset);
9045	mp = UFSTOVFS(dp->i_ump);
9046
9047	/*
9048	 * Whiteouts do not need diradd dependencies.
9049	 */
9050	if (newinum != WINO) {
9051		dap = malloc(sizeof(struct diradd),
9052		    M_DIRADD, M_SOFTDEP_FLAGS|M_ZERO);
9053		workitem_alloc(&dap->da_list, D_DIRADD, mp);
9054		dap->da_state = DIRCHG | ATTACHED | DEPCOMPLETE;
9055		dap->da_offset = offset;
9056		dap->da_newinum = newinum;
9057		LIST_INIT(&dap->da_jwork);
9058	}
9059
9060	/*
9061	 * Allocate a new dirrem and ACQUIRE_LOCK.
9062	 */
9063	dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem);
9064	pagedep = dirrem->dm_pagedep;
9065	/*
9066	 * The possible values for isrmdir:
9067	 *	0 - non-directory file rename
9068	 *	1 - directory rename within same directory
9069	 *   inum - directory rename to new directory of given inode number
9070	 * When renaming to a new directory, we are both deleting and
9071	 * creating a new directory entry, so the link count on the new
9072	 * directory should not change. Thus we do not need the followup
9073	 * dirrem which is usually done in handle_workitem_remove. We set
9074	 * the DIRCHG flag to tell handle_workitem_remove to skip the
9075	 * followup dirrem.
9076	 */
9077	if (isrmdir > 1)
9078		dirrem->dm_state |= DIRCHG;
9079
9080	/*
9081	 * Whiteouts have no additional dependencies,
9082	 * so just put the dirrem on the correct list.
9083	 */
9084	if (newinum == WINO) {
9085		if ((dirrem->dm_state & COMPLETE) == 0) {
9086			LIST_INSERT_HEAD(&pagedep->pd_dirremhd, dirrem,
9087			    dm_next);
9088		} else {
9089			dirrem->dm_dirinum = pagedep->pd_ino;
9090			if (LIST_EMPTY(&dirrem->dm_jremrefhd))
9091				add_to_worklist(&dirrem->dm_list, 0);
9092		}
9093		FREE_LOCK(&lk);
9094		return;
9095	}
9096	/*
9097	 * Add the dirrem to the inodedep's pending remove list for quick
9098	 * discovery later.  A valid nlinkdelta ensures that this lookup
9099	 * will not fail.
9100	 */
9101	if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0)
9102		panic("softdep_setup_directory_change: Lost inodedep.");
9103	dirrem->dm_state |= ONDEPLIST;
9104	LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext);
9105
9106	/*
9107	 * If the COMPLETE flag is clear, then there were no active
9108	 * entries and we want to roll back to the previous inode until
9109	 * the new inode is committed to disk. If the COMPLETE flag is
9110	 * set, then we have deleted an entry that never made it to disk.
9111	 * If the entry we deleted resulted from a name change, then the old
9112	 * inode reference still resides on disk. Any rollback that we do
9113	 * needs to be to that old inode (returned to us in prevdirrem). If
9114	 * the entry we deleted resulted from a create, then there is
9115	 * no entry on the disk, so we want to roll back to zero rather
9116	 * than the uncommitted inode. In either of the COMPLETE cases we
9117	 * want to immediately free the unwritten and unreferenced inode.
9118	 */
9119	if ((dirrem->dm_state & COMPLETE) == 0) {
9120		dap->da_previous = dirrem;
9121	} else {
9122		if (prevdirrem != NULL) {
9123			dap->da_previous = prevdirrem;
9124		} else {
9125			dap->da_state &= ~DIRCHG;
9126			dap->da_pagedep = pagedep;
9127		}
9128		dirrem->dm_dirinum = pagedep->pd_ino;
9129		if (LIST_EMPTY(&dirrem->dm_jremrefhd))
9130			add_to_worklist(&dirrem->dm_list, 0);
9131	}
9132	/*
9133	 * Lookup the jaddref for this journal entry.  We must finish
9134	 * initializing it and make the diradd write dependent on it.
9135	 * If we're not journaling, put it on the id_bufwait list if the
9136	 * inode is not yet written. If it is written, do the post-inode
9137	 * write processing to put it on the id_pendinghd list.
9138	 */
9139	inodedep_lookup(mp, newinum, DEPALLOC | NODELAY, &inodedep);
9140	if (MOUNTEDSUJ(mp)) {
9141		jaddref = (struct jaddref *)TAILQ_LAST(&inodedep->id_inoreflst,
9142		    inoreflst);
9143		KASSERT(jaddref != NULL && jaddref->ja_parent == dp->i_number,
9144		    ("softdep_setup_directory_change: bad jaddref %p",
9145		    jaddref));
9146		jaddref->ja_diroff = dp->i_offset;
9147		jaddref->ja_diradd = dap;
9148		LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)],
9149		    dap, da_pdlist);
9150		add_to_journal(&jaddref->ja_list);
9151	} else if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) {
9152		dap->da_state |= COMPLETE;
9153		LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist);
9154		WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list);
9155	} else {
9156		LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)],
9157		    dap, da_pdlist);
9158		WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list);
9159	}
9160	/*
9161	 * If we're making a new name for a directory that has not been
9162	 * committed when need to move the dot and dotdot references to
9163	 * this new name.
9164	 */
9165	if (inodedep->id_mkdiradd && dp->i_offset != DOTDOT_OFFSET)
9166		merge_diradd(inodedep, dap);
9167	FREE_LOCK(&lk);
9168}
9169
9170/*
9171 * Called whenever the link count on an inode is changed.
9172 * It creates an inode dependency so that the new reference(s)
9173 * to the inode cannot be committed to disk until the updated
9174 * inode has been written.
9175 */
9176void
9177softdep_change_linkcnt(ip)
9178	struct inode *ip;	/* the inode with the increased link count */
9179{
9180	struct inodedep *inodedep;
9181	int dflags;
9182
9183	ACQUIRE_LOCK(&lk);
9184	dflags = DEPALLOC;
9185	if (IS_SNAPSHOT(ip))
9186		dflags |= NODELAY;
9187	inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, dflags, &inodedep);
9188	if (ip->i_nlink < ip->i_effnlink)
9189		panic("softdep_change_linkcnt: bad delta");
9190	inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
9191	FREE_LOCK(&lk);
9192}
9193
9194/*
9195 * Attach a sbdep dependency to the superblock buf so that we can keep
9196 * track of the head of the linked list of referenced but unlinked inodes.
9197 */
9198void
9199softdep_setup_sbupdate(ump, fs, bp)
9200	struct ufsmount *ump;
9201	struct fs *fs;
9202	struct buf *bp;
9203{
9204	struct sbdep *sbdep;
9205	struct worklist *wk;
9206
9207	if (MOUNTEDSUJ(UFSTOVFS(ump)) == 0)
9208		return;
9209	LIST_FOREACH(wk, &bp->b_dep, wk_list)
9210		if (wk->wk_type == D_SBDEP)
9211			break;
9212	if (wk != NULL)
9213		return;
9214	sbdep = malloc(sizeof(struct sbdep), M_SBDEP, M_SOFTDEP_FLAGS);
9215	workitem_alloc(&sbdep->sb_list, D_SBDEP, UFSTOVFS(ump));
9216	sbdep->sb_fs = fs;
9217	sbdep->sb_ump = ump;
9218	ACQUIRE_LOCK(&lk);
9219	WORKLIST_INSERT(&bp->b_dep, &sbdep->sb_list);
9220	FREE_LOCK(&lk);
9221}
9222
9223/*
9224 * Return the first unlinked inodedep which is ready to be the head of the
9225 * list.  The inodedep and all those after it must have valid next pointers.
9226 */
9227static struct inodedep *
9228first_unlinked_inodedep(ump)
9229	struct ufsmount *ump;
9230{
9231	struct inodedep *inodedep;
9232	struct inodedep *idp;
9233
9234	rw_assert(&lk, RA_WLOCKED);
9235	for (inodedep = TAILQ_LAST(&ump->softdep_unlinked, inodedeplst);
9236	    inodedep; inodedep = idp) {
9237		if ((inodedep->id_state & UNLINKNEXT) == 0)
9238			return (NULL);
9239		idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked);
9240		if (idp == NULL || (idp->id_state & UNLINKNEXT) == 0)
9241			break;
9242		if ((inodedep->id_state & UNLINKPREV) == 0)
9243			break;
9244	}
9245	return (inodedep);
9246}
9247
9248/*
9249 * Set the sujfree unlinked head pointer prior to writing a superblock.
9250 */
9251static void
9252initiate_write_sbdep(sbdep)
9253	struct sbdep *sbdep;
9254{
9255	struct inodedep *inodedep;
9256	struct fs *bpfs;
9257	struct fs *fs;
9258
9259	bpfs = sbdep->sb_fs;
9260	fs = sbdep->sb_ump->um_fs;
9261	inodedep = first_unlinked_inodedep(sbdep->sb_ump);
9262	if (inodedep) {
9263		fs->fs_sujfree = inodedep->id_ino;
9264		inodedep->id_state |= UNLINKPREV;
9265	} else
9266		fs->fs_sujfree = 0;
9267	bpfs->fs_sujfree = fs->fs_sujfree;
9268}
9269
9270/*
9271 * After a superblock is written determine whether it must be written again
9272 * due to a changing unlinked list head.
9273 */
9274static int
9275handle_written_sbdep(sbdep, bp)
9276	struct sbdep *sbdep;
9277	struct buf *bp;
9278{
9279	struct inodedep *inodedep;
9280	struct mount *mp;
9281	struct fs *fs;
9282
9283	rw_assert(&lk, RA_WLOCKED);
9284	fs = sbdep->sb_fs;
9285	mp = UFSTOVFS(sbdep->sb_ump);
9286	/*
9287	 * If the superblock doesn't match the in-memory list start over.
9288	 */
9289	inodedep = first_unlinked_inodedep(sbdep->sb_ump);
9290	if ((inodedep && fs->fs_sujfree != inodedep->id_ino) ||
9291	    (inodedep == NULL && fs->fs_sujfree != 0)) {
9292		bdirty(bp);
9293		return (1);
9294	}
9295	WORKITEM_FREE(sbdep, D_SBDEP);
9296	if (fs->fs_sujfree == 0)
9297		return (0);
9298	/*
9299	 * Now that we have a record of this inode in stable store allow it
9300	 * to be written to free up pending work.  Inodes may see a lot of
9301	 * write activity after they are unlinked which we must not hold up.
9302	 */
9303	for (; inodedep != NULL; inodedep = TAILQ_NEXT(inodedep, id_unlinked)) {
9304		if ((inodedep->id_state & UNLINKLINKS) != UNLINKLINKS)
9305			panic("handle_written_sbdep: Bad inodedep %p (0x%X)",
9306			    inodedep, inodedep->id_state);
9307		if (inodedep->id_state & UNLINKONLIST)
9308			break;
9309		inodedep->id_state |= DEPCOMPLETE | UNLINKONLIST;
9310	}
9311
9312	return (0);
9313}
9314
9315/*
9316 * Mark an inodedep as unlinked and insert it into the in-memory unlinked list.
9317 */
9318static void
9319unlinked_inodedep(mp, inodedep)
9320	struct mount *mp;
9321	struct inodedep *inodedep;
9322{
9323	struct ufsmount *ump;
9324
9325	rw_assert(&lk, RA_WLOCKED);
9326	if (MOUNTEDSUJ(mp) == 0)
9327		return;
9328	ump = VFSTOUFS(mp);
9329	ump->um_fs->fs_fmod = 1;
9330	if (inodedep->id_state & UNLINKED)
9331		panic("unlinked_inodedep: %p already unlinked\n", inodedep);
9332	inodedep->id_state |= UNLINKED;
9333	TAILQ_INSERT_HEAD(&ump->softdep_unlinked, inodedep, id_unlinked);
9334}
9335
9336/*
9337 * Remove an inodedep from the unlinked inodedep list.  This may require
9338 * disk writes if the inode has made it that far.
9339 */
9340static void
9341clear_unlinked_inodedep(inodedep)
9342	struct inodedep *inodedep;
9343{
9344	struct ufsmount *ump;
9345	struct inodedep *idp;
9346	struct inodedep *idn;
9347	struct fs *fs;
9348	struct buf *bp;
9349	ino_t ino;
9350	ino_t nino;
9351	ino_t pino;
9352	int error;
9353
9354	ump = VFSTOUFS(inodedep->id_list.wk_mp);
9355	fs = ump->um_fs;
9356	ino = inodedep->id_ino;
9357	error = 0;
9358	for (;;) {
9359		rw_assert(&lk, RA_WLOCKED);
9360		KASSERT((inodedep->id_state & UNLINKED) != 0,
9361		    ("clear_unlinked_inodedep: inodedep %p not unlinked",
9362		    inodedep));
9363		/*
9364		 * If nothing has yet been written simply remove us from
9365		 * the in memory list and return.  This is the most common
9366		 * case where handle_workitem_remove() loses the final
9367		 * reference.
9368		 */
9369		if ((inodedep->id_state & UNLINKLINKS) == 0)
9370			break;
9371		/*
9372		 * If we have a NEXT pointer and no PREV pointer we can simply
9373		 * clear NEXT's PREV and remove ourselves from the list.  Be
9374		 * careful not to clear PREV if the superblock points at
9375		 * next as well.
9376		 */
9377		idn = TAILQ_NEXT(inodedep, id_unlinked);
9378		if ((inodedep->id_state & UNLINKLINKS) == UNLINKNEXT) {
9379			if (idn && fs->fs_sujfree != idn->id_ino)
9380				idn->id_state &= ~UNLINKPREV;
9381			break;
9382		}
9383		/*
9384		 * Here we have an inodedep which is actually linked into
9385		 * the list.  We must remove it by forcing a write to the
9386		 * link before us, whether it be the superblock or an inode.
9387		 * Unfortunately the list may change while we're waiting
9388		 * on the buf lock for either resource so we must loop until
9389		 * we lock the right one.  If both the superblock and an
9390		 * inode point to this inode we must clear the inode first
9391		 * followed by the superblock.
9392		 */
9393		idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked);
9394		pino = 0;
9395		if (idp && (idp->id_state & UNLINKNEXT))
9396			pino = idp->id_ino;
9397		FREE_LOCK(&lk);
9398		if (pino == 0) {
9399			bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
9400			    (int)fs->fs_sbsize, 0, 0, 0);
9401		} else {
9402			error = bread(ump->um_devvp,
9403			    fsbtodb(fs, ino_to_fsba(fs, pino)),
9404			    (int)fs->fs_bsize, NOCRED, &bp);
9405			if (error)
9406				brelse(bp);
9407		}
9408		ACQUIRE_LOCK(&lk);
9409		if (error)
9410			break;
9411		/* If the list has changed restart the loop. */
9412		idp = TAILQ_PREV(inodedep, inodedeplst, id_unlinked);
9413		nino = 0;
9414		if (idp && (idp->id_state & UNLINKNEXT))
9415			nino = idp->id_ino;
9416		if (nino != pino ||
9417		    (inodedep->id_state & UNLINKPREV) != UNLINKPREV) {
9418			FREE_LOCK(&lk);
9419			brelse(bp);
9420			ACQUIRE_LOCK(&lk);
9421			continue;
9422		}
9423		nino = 0;
9424		idn = TAILQ_NEXT(inodedep, id_unlinked);
9425		if (idn)
9426			nino = idn->id_ino;
9427		/*
9428		 * Remove us from the in memory list.  After this we cannot
9429		 * access the inodedep.
9430		 */
9431		KASSERT((inodedep->id_state & UNLINKED) != 0,
9432		    ("clear_unlinked_inodedep: inodedep %p not unlinked",
9433		    inodedep));
9434		inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST);
9435		TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked);
9436		FREE_LOCK(&lk);
9437		/*
9438		 * The predecessor's next pointer is manually updated here
9439		 * so that the NEXT flag is never cleared for an element
9440		 * that is in the list.
9441		 */
9442		if (pino == 0) {
9443			bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
9444			ffs_oldfscompat_write((struct fs *)bp->b_data, ump);
9445			softdep_setup_sbupdate(ump, (struct fs *)bp->b_data,
9446			    bp);
9447		} else if (fs->fs_magic == FS_UFS1_MAGIC)
9448			((struct ufs1_dinode *)bp->b_data +
9449			    ino_to_fsbo(fs, pino))->di_freelink = nino;
9450		else
9451			((struct ufs2_dinode *)bp->b_data +
9452			    ino_to_fsbo(fs, pino))->di_freelink = nino;
9453		/*
9454		 * If the bwrite fails we have no recourse to recover.  The
9455		 * filesystem is corrupted already.
9456		 */
9457		bwrite(bp);
9458		ACQUIRE_LOCK(&lk);
9459		/*
9460		 * If the superblock pointer still needs to be cleared force
9461		 * a write here.
9462		 */
9463		if (fs->fs_sujfree == ino) {
9464			FREE_LOCK(&lk);
9465			bp = getblk(ump->um_devvp, btodb(fs->fs_sblockloc),
9466			    (int)fs->fs_sbsize, 0, 0, 0);
9467			bcopy((caddr_t)fs, bp->b_data, (u_int)fs->fs_sbsize);
9468			ffs_oldfscompat_write((struct fs *)bp->b_data, ump);
9469			softdep_setup_sbupdate(ump, (struct fs *)bp->b_data,
9470			    bp);
9471			bwrite(bp);
9472			ACQUIRE_LOCK(&lk);
9473		}
9474
9475		if (fs->fs_sujfree != ino)
9476			return;
9477		panic("clear_unlinked_inodedep: Failed to clear free head");
9478	}
9479	if (inodedep->id_ino == fs->fs_sujfree)
9480		panic("clear_unlinked_inodedep: Freeing head of free list");
9481	inodedep->id_state &= ~(UNLINKED | UNLINKLINKS | UNLINKONLIST);
9482	TAILQ_REMOVE(&ump->softdep_unlinked, inodedep, id_unlinked);
9483	return;
9484}
9485
9486/*
9487 * This workitem decrements the inode's link count.
9488 * If the link count reaches zero, the file is removed.
9489 */
9490static int
9491handle_workitem_remove(dirrem, flags)
9492	struct dirrem *dirrem;
9493	int flags;
9494{
9495	struct inodedep *inodedep;
9496	struct workhead dotdotwk;
9497	struct worklist *wk;
9498	struct ufsmount *ump;
9499	struct mount *mp;
9500	struct vnode *vp;
9501	struct inode *ip;
9502	ino_t oldinum;
9503
9504	if (dirrem->dm_state & ONWORKLIST)
9505		panic("handle_workitem_remove: dirrem %p still on worklist",
9506		    dirrem);
9507	oldinum = dirrem->dm_oldinum;
9508	mp = dirrem->dm_list.wk_mp;
9509	ump = VFSTOUFS(mp);
9510	flags |= LK_EXCLUSIVE;
9511	if (ffs_vgetf(mp, oldinum, flags, &vp, FFSV_FORCEINSMQ) != 0)
9512		return (EBUSY);
9513	ip = VTOI(vp);
9514	ACQUIRE_LOCK(&lk);
9515	if ((inodedep_lookup(mp, oldinum, 0, &inodedep)) == 0)
9516		panic("handle_workitem_remove: lost inodedep");
9517	if (dirrem->dm_state & ONDEPLIST)
9518		LIST_REMOVE(dirrem, dm_inonext);
9519	KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd),
9520	    ("handle_workitem_remove:  Journal entries not written."));
9521
9522	/*
9523	 * Move all dependencies waiting on the remove to complete
9524	 * from the dirrem to the inode inowait list to be completed
9525	 * after the inode has been updated and written to disk.  Any
9526	 * marked MKDIR_PARENT are saved to be completed when the .. ref
9527	 * is removed.
9528	 */
9529	LIST_INIT(&dotdotwk);
9530	while ((wk = LIST_FIRST(&dirrem->dm_jwork)) != NULL) {
9531		WORKLIST_REMOVE(wk);
9532		if (wk->wk_state & MKDIR_PARENT) {
9533			wk->wk_state &= ~MKDIR_PARENT;
9534			WORKLIST_INSERT(&dotdotwk, wk);
9535			continue;
9536		}
9537		WORKLIST_INSERT(&inodedep->id_inowait, wk);
9538	}
9539	LIST_SWAP(&dirrem->dm_jwork, &dotdotwk, worklist, wk_list);
9540	/*
9541	 * Normal file deletion.
9542	 */
9543	if ((dirrem->dm_state & RMDIR) == 0) {
9544		ip->i_nlink--;
9545		DIP_SET(ip, i_nlink, ip->i_nlink);
9546		ip->i_flag |= IN_CHANGE;
9547		if (ip->i_nlink < ip->i_effnlink)
9548			panic("handle_workitem_remove: bad file delta");
9549		if (ip->i_nlink == 0)
9550			unlinked_inodedep(mp, inodedep);
9551		inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
9552		KASSERT(LIST_EMPTY(&dirrem->dm_jwork),
9553		    ("handle_workitem_remove: worklist not empty. %s",
9554		    TYPENAME(LIST_FIRST(&dirrem->dm_jwork)->wk_type)));
9555		WORKITEM_FREE(dirrem, D_DIRREM);
9556		FREE_LOCK(&lk);
9557		goto out;
9558	}
9559	/*
9560	 * Directory deletion. Decrement reference count for both the
9561	 * just deleted parent directory entry and the reference for ".".
9562	 * Arrange to have the reference count on the parent decremented
9563	 * to account for the loss of "..".
9564	 */
9565	ip->i_nlink -= 2;
9566	DIP_SET(ip, i_nlink, ip->i_nlink);
9567	ip->i_flag |= IN_CHANGE;
9568	if (ip->i_nlink < ip->i_effnlink)
9569		panic("handle_workitem_remove: bad dir delta");
9570	if (ip->i_nlink == 0)
9571		unlinked_inodedep(mp, inodedep);
9572	inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
9573	/*
9574	 * Rename a directory to a new parent. Since, we are both deleting
9575	 * and creating a new directory entry, the link count on the new
9576	 * directory should not change. Thus we skip the followup dirrem.
9577	 */
9578	if (dirrem->dm_state & DIRCHG) {
9579		KASSERT(LIST_EMPTY(&dirrem->dm_jwork),
9580		    ("handle_workitem_remove: DIRCHG and worklist not empty."));
9581		WORKITEM_FREE(dirrem, D_DIRREM);
9582		FREE_LOCK(&lk);
9583		goto out;
9584	}
9585	dirrem->dm_state = ONDEPLIST;
9586	dirrem->dm_oldinum = dirrem->dm_dirinum;
9587	/*
9588	 * Place the dirrem on the parent's diremhd list.
9589	 */
9590	if (inodedep_lookup(mp, dirrem->dm_oldinum, 0, &inodedep) == 0)
9591		panic("handle_workitem_remove: lost dir inodedep");
9592	LIST_INSERT_HEAD(&inodedep->id_dirremhd, dirrem, dm_inonext);
9593	/*
9594	 * If the allocated inode has never been written to disk, then
9595	 * the on-disk inode is zero'ed and we can remove the file
9596	 * immediately.  When journaling if the inode has been marked
9597	 * unlinked and not DEPCOMPLETE we know it can never be written.
9598	 */
9599	inodedep_lookup(mp, oldinum, 0, &inodedep);
9600	if (inodedep == NULL ||
9601	    (inodedep->id_state & (DEPCOMPLETE | UNLINKED)) == UNLINKED ||
9602	    check_inode_unwritten(inodedep)) {
9603		FREE_LOCK(&lk);
9604		vput(vp);
9605		return handle_workitem_remove(dirrem, flags);
9606	}
9607	WORKLIST_INSERT(&inodedep->id_inowait, &dirrem->dm_list);
9608	FREE_LOCK(&lk);
9609	ip->i_flag |= IN_CHANGE;
9610out:
9611	ffs_update(vp, 0);
9612	vput(vp);
9613	return (0);
9614}
9615
9616/*
9617 * Inode de-allocation dependencies.
9618 *
9619 * When an inode's link count is reduced to zero, it can be de-allocated. We
9620 * found it convenient to postpone de-allocation until after the inode is
9621 * written to disk with its new link count (zero).  At this point, all of the
9622 * on-disk inode's block pointers are nullified and, with careful dependency
9623 * list ordering, all dependencies related to the inode will be satisfied and
9624 * the corresponding dependency structures de-allocated.  So, if/when the
9625 * inode is reused, there will be no mixing of old dependencies with new
9626 * ones.  This artificial dependency is set up by the block de-allocation
9627 * procedure above (softdep_setup_freeblocks) and completed by the
9628 * following procedure.
9629 */
9630static void
9631handle_workitem_freefile(freefile)
9632	struct freefile *freefile;
9633{
9634	struct workhead wkhd;
9635	struct fs *fs;
9636	struct inodedep *idp;
9637	struct ufsmount *ump;
9638	int error;
9639
9640	ump = VFSTOUFS(freefile->fx_list.wk_mp);
9641	fs = ump->um_fs;
9642#ifdef DEBUG
9643	ACQUIRE_LOCK(&lk);
9644	error = inodedep_lookup(UFSTOVFS(ump), freefile->fx_oldinum, 0, &idp);
9645	FREE_LOCK(&lk);
9646	if (error)
9647		panic("handle_workitem_freefile: inodedep %p survived", idp);
9648#endif
9649	UFS_LOCK(ump);
9650	fs->fs_pendinginodes -= 1;
9651	UFS_UNLOCK(ump);
9652	LIST_INIT(&wkhd);
9653	LIST_SWAP(&freefile->fx_jwork, &wkhd, worklist, wk_list);
9654	if ((error = ffs_freefile(ump, fs, freefile->fx_devvp,
9655	    freefile->fx_oldinum, freefile->fx_mode, &wkhd)) != 0)
9656		softdep_error("handle_workitem_freefile", error);
9657	ACQUIRE_LOCK(&lk);
9658	WORKITEM_FREE(freefile, D_FREEFILE);
9659	FREE_LOCK(&lk);
9660}
9661
9662
9663/*
9664 * Helper function which unlinks marker element from work list and returns
9665 * the next element on the list.
9666 */
9667static __inline struct worklist *
9668markernext(struct worklist *marker)
9669{
9670	struct worklist *next;
9671
9672	next = LIST_NEXT(marker, wk_list);
9673	LIST_REMOVE(marker, wk_list);
9674	return next;
9675}
9676
9677/*
9678 * Disk writes.
9679 *
9680 * The dependency structures constructed above are most actively used when file
9681 * system blocks are written to disk.  No constraints are placed on when a
9682 * block can be written, but unsatisfied update dependencies are made safe by
9683 * modifying (or replacing) the source memory for the duration of the disk
9684 * write.  When the disk write completes, the memory block is again brought
9685 * up-to-date.
9686 *
9687 * In-core inode structure reclamation.
9688 *
9689 * Because there are a finite number of "in-core" inode structures, they are
9690 * reused regularly.  By transferring all inode-related dependencies to the
9691 * in-memory inode block and indexing them separately (via "inodedep"s), we
9692 * can allow "in-core" inode structures to be reused at any time and avoid
9693 * any increase in contention.
9694 *
9695 * Called just before entering the device driver to initiate a new disk I/O.
9696 * The buffer must be locked, thus, no I/O completion operations can occur
9697 * while we are manipulating its associated dependencies.
9698 */
9699static void
9700softdep_disk_io_initiation(bp)
9701	struct buf *bp;		/* structure describing disk write to occur */
9702{
9703	struct worklist *wk;
9704	struct worklist marker;
9705	struct inodedep *inodedep;
9706	struct freeblks *freeblks;
9707	struct jblkdep *jblkdep;
9708	struct newblk *newblk;
9709
9710	/*
9711	 * We only care about write operations. There should never
9712	 * be dependencies for reads.
9713	 */
9714	if (bp->b_iocmd != BIO_WRITE)
9715		panic("softdep_disk_io_initiation: not write");
9716
9717	if (bp->b_vflags & BV_BKGRDINPROG)
9718		panic("softdep_disk_io_initiation: Writing buffer with "
9719		    "background write in progress: %p", bp);
9720
9721	marker.wk_type = D_LAST + 1;	/* Not a normal workitem */
9722	PHOLD(curproc);			/* Don't swap out kernel stack */
9723
9724	ACQUIRE_LOCK(&lk);
9725	/*
9726	 * Do any necessary pre-I/O processing.
9727	 */
9728	for (wk = LIST_FIRST(&bp->b_dep); wk != NULL;
9729	     wk = markernext(&marker)) {
9730		LIST_INSERT_AFTER(wk, &marker, wk_list);
9731		switch (wk->wk_type) {
9732
9733		case D_PAGEDEP:
9734			initiate_write_filepage(WK_PAGEDEP(wk), bp);
9735			continue;
9736
9737		case D_INODEDEP:
9738			inodedep = WK_INODEDEP(wk);
9739			if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC)
9740				initiate_write_inodeblock_ufs1(inodedep, bp);
9741			else
9742				initiate_write_inodeblock_ufs2(inodedep, bp);
9743			continue;
9744
9745		case D_INDIRDEP:
9746			initiate_write_indirdep(WK_INDIRDEP(wk), bp);
9747			continue;
9748
9749		case D_BMSAFEMAP:
9750			initiate_write_bmsafemap(WK_BMSAFEMAP(wk), bp);
9751			continue;
9752
9753		case D_JSEG:
9754			WK_JSEG(wk)->js_buf = NULL;
9755			continue;
9756
9757		case D_FREEBLKS:
9758			freeblks = WK_FREEBLKS(wk);
9759			jblkdep = LIST_FIRST(&freeblks->fb_jblkdephd);
9760			/*
9761			 * We have to wait for the freeblks to be journaled
9762			 * before we can write an inodeblock with updated
9763			 * pointers.  Be careful to arrange the marker so
9764			 * we revisit the freeblks if it's not removed by
9765			 * the first jwait().
9766			 */
9767			if (jblkdep != NULL) {
9768				LIST_REMOVE(&marker, wk_list);
9769				LIST_INSERT_BEFORE(wk, &marker, wk_list);
9770				jwait(&jblkdep->jb_list, MNT_WAIT);
9771			}
9772			continue;
9773		case D_ALLOCDIRECT:
9774		case D_ALLOCINDIR:
9775			/*
9776			 * We have to wait for the jnewblk to be journaled
9777			 * before we can write to a block if the contents
9778			 * may be confused with an earlier file's indirect
9779			 * at recovery time.  Handle the marker as described
9780			 * above.
9781			 */
9782			newblk = WK_NEWBLK(wk);
9783			if (newblk->nb_jnewblk != NULL &&
9784			    indirblk_lookup(newblk->nb_list.wk_mp,
9785			    newblk->nb_newblkno)) {
9786				LIST_REMOVE(&marker, wk_list);
9787				LIST_INSERT_BEFORE(wk, &marker, wk_list);
9788				jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT);
9789			}
9790			continue;
9791
9792		case D_SBDEP:
9793			initiate_write_sbdep(WK_SBDEP(wk));
9794			continue;
9795
9796		case D_MKDIR:
9797		case D_FREEWORK:
9798		case D_FREEDEP:
9799		case D_JSEGDEP:
9800			continue;
9801
9802		default:
9803			panic("handle_disk_io_initiation: Unexpected type %s",
9804			    TYPENAME(wk->wk_type));
9805			/* NOTREACHED */
9806		}
9807	}
9808	FREE_LOCK(&lk);
9809	PRELE(curproc);			/* Allow swapout of kernel stack */
9810}
9811
9812/*
9813 * Called from within the procedure above to deal with unsatisfied
9814 * allocation dependencies in a directory. The buffer must be locked,
9815 * thus, no I/O completion operations can occur while we are
9816 * manipulating its associated dependencies.
9817 */
9818static void
9819initiate_write_filepage(pagedep, bp)
9820	struct pagedep *pagedep;
9821	struct buf *bp;
9822{
9823	struct jremref *jremref;
9824	struct jmvref *jmvref;
9825	struct dirrem *dirrem;
9826	struct diradd *dap;
9827	struct direct *ep;
9828	int i;
9829
9830	if (pagedep->pd_state & IOSTARTED) {
9831		/*
9832		 * This can only happen if there is a driver that does not
9833		 * understand chaining. Here biodone will reissue the call
9834		 * to strategy for the incomplete buffers.
9835		 */
9836		printf("initiate_write_filepage: already started\n");
9837		return;
9838	}
9839	pagedep->pd_state |= IOSTARTED;
9840	/*
9841	 * Wait for all journal remove dependencies to hit the disk.
9842	 * We can not allow any potentially conflicting directory adds
9843	 * to be visible before removes and rollback is too difficult.
9844	 * lk may be dropped and re-acquired, however we hold the buf
9845	 * locked so the dependency can not go away.
9846	 */
9847	LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next)
9848		while ((jremref = LIST_FIRST(&dirrem->dm_jremrefhd)) != NULL)
9849			jwait(&jremref->jr_list, MNT_WAIT);
9850	while ((jmvref = LIST_FIRST(&pagedep->pd_jmvrefhd)) != NULL)
9851		jwait(&jmvref->jm_list, MNT_WAIT);
9852	for (i = 0; i < DAHASHSZ; i++) {
9853		LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) {
9854			ep = (struct direct *)
9855			    ((char *)bp->b_data + dap->da_offset);
9856			if (ep->d_ino != dap->da_newinum)
9857				panic("%s: dir inum %ju != new %ju",
9858				    "initiate_write_filepage",
9859				    (uintmax_t)ep->d_ino,
9860				    (uintmax_t)dap->da_newinum);
9861			if (dap->da_state & DIRCHG)
9862				ep->d_ino = dap->da_previous->dm_oldinum;
9863			else
9864				ep->d_ino = 0;
9865			dap->da_state &= ~ATTACHED;
9866			dap->da_state |= UNDONE;
9867		}
9868	}
9869}
9870
9871/*
9872 * Version of initiate_write_inodeblock that handles UFS1 dinodes.
9873 * Note that any bug fixes made to this routine must be done in the
9874 * version found below.
9875 *
9876 * Called from within the procedure above to deal with unsatisfied
9877 * allocation dependencies in an inodeblock. The buffer must be
9878 * locked, thus, no I/O completion operations can occur while we
9879 * are manipulating its associated dependencies.
9880 */
9881static void
9882initiate_write_inodeblock_ufs1(inodedep, bp)
9883	struct inodedep *inodedep;
9884	struct buf *bp;			/* The inode block */
9885{
9886	struct allocdirect *adp, *lastadp;
9887	struct ufs1_dinode *dp;
9888	struct ufs1_dinode *sip;
9889	struct inoref *inoref;
9890	struct fs *fs;
9891	ufs_lbn_t i;
9892#ifdef INVARIANTS
9893	ufs_lbn_t prevlbn = 0;
9894#endif
9895	int deplist;
9896
9897	if (inodedep->id_state & IOSTARTED)
9898		panic("initiate_write_inodeblock_ufs1: already started");
9899	inodedep->id_state |= IOSTARTED;
9900	fs = inodedep->id_fs;
9901	dp = (struct ufs1_dinode *)bp->b_data +
9902	    ino_to_fsbo(fs, inodedep->id_ino);
9903
9904	/*
9905	 * If we're on the unlinked list but have not yet written our
9906	 * next pointer initialize it here.
9907	 */
9908	if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) {
9909		struct inodedep *inon;
9910
9911		inon = TAILQ_NEXT(inodedep, id_unlinked);
9912		dp->di_freelink = inon ? inon->id_ino : 0;
9913	}
9914	/*
9915	 * If the bitmap is not yet written, then the allocated
9916	 * inode cannot be written to disk.
9917	 */
9918	if ((inodedep->id_state & DEPCOMPLETE) == 0) {
9919		if (inodedep->id_savedino1 != NULL)
9920			panic("initiate_write_inodeblock_ufs1: I/O underway");
9921		FREE_LOCK(&lk);
9922		sip = malloc(sizeof(struct ufs1_dinode),
9923		    M_SAVEDINO, M_SOFTDEP_FLAGS);
9924		ACQUIRE_LOCK(&lk);
9925		inodedep->id_savedino1 = sip;
9926		*inodedep->id_savedino1 = *dp;
9927		bzero((caddr_t)dp, sizeof(struct ufs1_dinode));
9928		dp->di_gen = inodedep->id_savedino1->di_gen;
9929		dp->di_freelink = inodedep->id_savedino1->di_freelink;
9930		return;
9931	}
9932	/*
9933	 * If no dependencies, then there is nothing to roll back.
9934	 */
9935	inodedep->id_savedsize = dp->di_size;
9936	inodedep->id_savedextsize = 0;
9937	inodedep->id_savednlink = dp->di_nlink;
9938	if (TAILQ_EMPTY(&inodedep->id_inoupdt) &&
9939	    TAILQ_EMPTY(&inodedep->id_inoreflst))
9940		return;
9941	/*
9942	 * Revert the link count to that of the first unwritten journal entry.
9943	 */
9944	inoref = TAILQ_FIRST(&inodedep->id_inoreflst);
9945	if (inoref)
9946		dp->di_nlink = inoref->if_nlink;
9947	/*
9948	 * Set the dependencies to busy.
9949	 */
9950	for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
9951	     adp = TAILQ_NEXT(adp, ad_next)) {
9952#ifdef INVARIANTS
9953		if (deplist != 0 && prevlbn >= adp->ad_offset)
9954			panic("softdep_write_inodeblock: lbn order");
9955		prevlbn = adp->ad_offset;
9956		if (adp->ad_offset < NDADDR &&
9957		    dp->di_db[adp->ad_offset] != adp->ad_newblkno)
9958			panic("%s: direct pointer #%jd mismatch %d != %jd",
9959			    "softdep_write_inodeblock",
9960			    (intmax_t)adp->ad_offset,
9961			    dp->di_db[adp->ad_offset],
9962			    (intmax_t)adp->ad_newblkno);
9963		if (adp->ad_offset >= NDADDR &&
9964		    dp->di_ib[adp->ad_offset - NDADDR] != adp->ad_newblkno)
9965			panic("%s: indirect pointer #%jd mismatch %d != %jd",
9966			    "softdep_write_inodeblock",
9967			    (intmax_t)adp->ad_offset - NDADDR,
9968			    dp->di_ib[adp->ad_offset - NDADDR],
9969			    (intmax_t)adp->ad_newblkno);
9970		deplist |= 1 << adp->ad_offset;
9971		if ((adp->ad_state & ATTACHED) == 0)
9972			panic("softdep_write_inodeblock: Unknown state 0x%x",
9973			    adp->ad_state);
9974#endif /* INVARIANTS */
9975		adp->ad_state &= ~ATTACHED;
9976		adp->ad_state |= UNDONE;
9977	}
9978	/*
9979	 * The on-disk inode cannot claim to be any larger than the last
9980	 * fragment that has been written. Otherwise, the on-disk inode
9981	 * might have fragments that were not the last block in the file
9982	 * which would corrupt the filesystem.
9983	 */
9984	for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
9985	     lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
9986		if (adp->ad_offset >= NDADDR)
9987			break;
9988		dp->di_db[adp->ad_offset] = adp->ad_oldblkno;
9989		/* keep going until hitting a rollback to a frag */
9990		if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
9991			continue;
9992		dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize;
9993		for (i = adp->ad_offset + 1; i < NDADDR; i++) {
9994#ifdef INVARIANTS
9995			if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0)
9996				panic("softdep_write_inodeblock: lost dep1");
9997#endif /* INVARIANTS */
9998			dp->di_db[i] = 0;
9999		}
10000		for (i = 0; i < NIADDR; i++) {
10001#ifdef INVARIANTS
10002			if (dp->di_ib[i] != 0 &&
10003			    (deplist & ((1 << NDADDR) << i)) == 0)
10004				panic("softdep_write_inodeblock: lost dep2");
10005#endif /* INVARIANTS */
10006			dp->di_ib[i] = 0;
10007		}
10008		return;
10009	}
10010	/*
10011	 * If we have zero'ed out the last allocated block of the file,
10012	 * roll back the size to the last currently allocated block.
10013	 * We know that this last allocated block is a full-sized as
10014	 * we already checked for fragments in the loop above.
10015	 */
10016	if (lastadp != NULL &&
10017	    dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) {
10018		for (i = lastadp->ad_offset; i >= 0; i--)
10019			if (dp->di_db[i] != 0)
10020				break;
10021		dp->di_size = (i + 1) * fs->fs_bsize;
10022	}
10023	/*
10024	 * The only dependencies are for indirect blocks.
10025	 *
10026	 * The file size for indirect block additions is not guaranteed.
10027	 * Such a guarantee would be non-trivial to achieve. The conventional
10028	 * synchronous write implementation also does not make this guarantee.
10029	 * Fsck should catch and fix discrepancies. Arguably, the file size
10030	 * can be over-estimated without destroying integrity when the file
10031	 * moves into the indirect blocks (i.e., is large). If we want to
10032	 * postpone fsck, we are stuck with this argument.
10033	 */
10034	for (; adp; adp = TAILQ_NEXT(adp, ad_next))
10035		dp->di_ib[adp->ad_offset - NDADDR] = 0;
10036}
10037
10038/*
10039 * Version of initiate_write_inodeblock that handles UFS2 dinodes.
10040 * Note that any bug fixes made to this routine must be done in the
10041 * version found above.
10042 *
10043 * Called from within the procedure above to deal with unsatisfied
10044 * allocation dependencies in an inodeblock. The buffer must be
10045 * locked, thus, no I/O completion operations can occur while we
10046 * are manipulating its associated dependencies.
10047 */
10048static void
10049initiate_write_inodeblock_ufs2(inodedep, bp)
10050	struct inodedep *inodedep;
10051	struct buf *bp;			/* The inode block */
10052{
10053	struct allocdirect *adp, *lastadp;
10054	struct ufs2_dinode *dp;
10055	struct ufs2_dinode *sip;
10056	struct inoref *inoref;
10057	struct fs *fs;
10058	ufs_lbn_t i;
10059#ifdef INVARIANTS
10060	ufs_lbn_t prevlbn = 0;
10061#endif
10062	int deplist;
10063
10064	if (inodedep->id_state & IOSTARTED)
10065		panic("initiate_write_inodeblock_ufs2: already started");
10066	inodedep->id_state |= IOSTARTED;
10067	fs = inodedep->id_fs;
10068	dp = (struct ufs2_dinode *)bp->b_data +
10069	    ino_to_fsbo(fs, inodedep->id_ino);
10070
10071	/*
10072	 * If we're on the unlinked list but have not yet written our
10073	 * next pointer initialize it here.
10074	 */
10075	if ((inodedep->id_state & (UNLINKED | UNLINKNEXT)) == UNLINKED) {
10076		struct inodedep *inon;
10077
10078		inon = TAILQ_NEXT(inodedep, id_unlinked);
10079		dp->di_freelink = inon ? inon->id_ino : 0;
10080	}
10081	/*
10082	 * If the bitmap is not yet written, then the allocated
10083	 * inode cannot be written to disk.
10084	 */
10085	if ((inodedep->id_state & DEPCOMPLETE) == 0) {
10086		if (inodedep->id_savedino2 != NULL)
10087			panic("initiate_write_inodeblock_ufs2: I/O underway");
10088		FREE_LOCK(&lk);
10089		sip = malloc(sizeof(struct ufs2_dinode),
10090		    M_SAVEDINO, M_SOFTDEP_FLAGS);
10091		ACQUIRE_LOCK(&lk);
10092		inodedep->id_savedino2 = sip;
10093		*inodedep->id_savedino2 = *dp;
10094		bzero((caddr_t)dp, sizeof(struct ufs2_dinode));
10095		dp->di_gen = inodedep->id_savedino2->di_gen;
10096		dp->di_freelink = inodedep->id_savedino2->di_freelink;
10097		return;
10098	}
10099	/*
10100	 * If no dependencies, then there is nothing to roll back.
10101	 */
10102	inodedep->id_savedsize = dp->di_size;
10103	inodedep->id_savedextsize = dp->di_extsize;
10104	inodedep->id_savednlink = dp->di_nlink;
10105	if (TAILQ_EMPTY(&inodedep->id_inoupdt) &&
10106	    TAILQ_EMPTY(&inodedep->id_extupdt) &&
10107	    TAILQ_EMPTY(&inodedep->id_inoreflst))
10108		return;
10109	/*
10110	 * Revert the link count to that of the first unwritten journal entry.
10111	 */
10112	inoref = TAILQ_FIRST(&inodedep->id_inoreflst);
10113	if (inoref)
10114		dp->di_nlink = inoref->if_nlink;
10115
10116	/*
10117	 * Set the ext data dependencies to busy.
10118	 */
10119	for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp;
10120	     adp = TAILQ_NEXT(adp, ad_next)) {
10121#ifdef INVARIANTS
10122		if (deplist != 0 && prevlbn >= adp->ad_offset)
10123			panic("softdep_write_inodeblock: lbn order");
10124		prevlbn = adp->ad_offset;
10125		if (dp->di_extb[adp->ad_offset] != adp->ad_newblkno)
10126			panic("%s: direct pointer #%jd mismatch %jd != %jd",
10127			    "softdep_write_inodeblock",
10128			    (intmax_t)adp->ad_offset,
10129			    (intmax_t)dp->di_extb[adp->ad_offset],
10130			    (intmax_t)adp->ad_newblkno);
10131		deplist |= 1 << adp->ad_offset;
10132		if ((adp->ad_state & ATTACHED) == 0)
10133			panic("softdep_write_inodeblock: Unknown state 0x%x",
10134			    adp->ad_state);
10135#endif /* INVARIANTS */
10136		adp->ad_state &= ~ATTACHED;
10137		adp->ad_state |= UNDONE;
10138	}
10139	/*
10140	 * The on-disk inode cannot claim to be any larger than the last
10141	 * fragment that has been written. Otherwise, the on-disk inode
10142	 * might have fragments that were not the last block in the ext
10143	 * data which would corrupt the filesystem.
10144	 */
10145	for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp;
10146	     lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
10147		dp->di_extb[adp->ad_offset] = adp->ad_oldblkno;
10148		/* keep going until hitting a rollback to a frag */
10149		if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
10150			continue;
10151		dp->di_extsize = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize;
10152		for (i = adp->ad_offset + 1; i < NXADDR; i++) {
10153#ifdef INVARIANTS
10154			if (dp->di_extb[i] != 0 && (deplist & (1 << i)) == 0)
10155				panic("softdep_write_inodeblock: lost dep1");
10156#endif /* INVARIANTS */
10157			dp->di_extb[i] = 0;
10158		}
10159		lastadp = NULL;
10160		break;
10161	}
10162	/*
10163	 * If we have zero'ed out the last allocated block of the ext
10164	 * data, roll back the size to the last currently allocated block.
10165	 * We know that this last allocated block is a full-sized as
10166	 * we already checked for fragments in the loop above.
10167	 */
10168	if (lastadp != NULL &&
10169	    dp->di_extsize <= (lastadp->ad_offset + 1) * fs->fs_bsize) {
10170		for (i = lastadp->ad_offset; i >= 0; i--)
10171			if (dp->di_extb[i] != 0)
10172				break;
10173		dp->di_extsize = (i + 1) * fs->fs_bsize;
10174	}
10175	/*
10176	 * Set the file data dependencies to busy.
10177	 */
10178	for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
10179	     adp = TAILQ_NEXT(adp, ad_next)) {
10180#ifdef INVARIANTS
10181		if (deplist != 0 && prevlbn >= adp->ad_offset)
10182			panic("softdep_write_inodeblock: lbn order");
10183		if ((adp->ad_state & ATTACHED) == 0)
10184			panic("inodedep %p and adp %p not attached", inodedep, adp);
10185		prevlbn = adp->ad_offset;
10186		if (adp->ad_offset < NDADDR &&
10187		    dp->di_db[adp->ad_offset] != adp->ad_newblkno)
10188			panic("%s: direct pointer #%jd mismatch %jd != %jd",
10189			    "softdep_write_inodeblock",
10190			    (intmax_t)adp->ad_offset,
10191			    (intmax_t)dp->di_db[adp->ad_offset],
10192			    (intmax_t)adp->ad_newblkno);
10193		if (adp->ad_offset >= NDADDR &&
10194		    dp->di_ib[adp->ad_offset - NDADDR] != adp->ad_newblkno)
10195			panic("%s indirect pointer #%jd mismatch %jd != %jd",
10196			    "softdep_write_inodeblock:",
10197			    (intmax_t)adp->ad_offset - NDADDR,
10198			    (intmax_t)dp->di_ib[adp->ad_offset - NDADDR],
10199			    (intmax_t)adp->ad_newblkno);
10200		deplist |= 1 << adp->ad_offset;
10201		if ((adp->ad_state & ATTACHED) == 0)
10202			panic("softdep_write_inodeblock: Unknown state 0x%x",
10203			    adp->ad_state);
10204#endif /* INVARIANTS */
10205		adp->ad_state &= ~ATTACHED;
10206		adp->ad_state |= UNDONE;
10207	}
10208	/*
10209	 * The on-disk inode cannot claim to be any larger than the last
10210	 * fragment that has been written. Otherwise, the on-disk inode
10211	 * might have fragments that were not the last block in the file
10212	 * which would corrupt the filesystem.
10213	 */
10214	for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
10215	     lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
10216		if (adp->ad_offset >= NDADDR)
10217			break;
10218		dp->di_db[adp->ad_offset] = adp->ad_oldblkno;
10219		/* keep going until hitting a rollback to a frag */
10220		if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
10221			continue;
10222		dp->di_size = fs->fs_bsize * adp->ad_offset + adp->ad_oldsize;
10223		for (i = adp->ad_offset + 1; i < NDADDR; i++) {
10224#ifdef INVARIANTS
10225			if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0)
10226				panic("softdep_write_inodeblock: lost dep2");
10227#endif /* INVARIANTS */
10228			dp->di_db[i] = 0;
10229		}
10230		for (i = 0; i < NIADDR; i++) {
10231#ifdef INVARIANTS
10232			if (dp->di_ib[i] != 0 &&
10233			    (deplist & ((1 << NDADDR) << i)) == 0)
10234				panic("softdep_write_inodeblock: lost dep3");
10235#endif /* INVARIANTS */
10236			dp->di_ib[i] = 0;
10237		}
10238		return;
10239	}
10240	/*
10241	 * If we have zero'ed out the last allocated block of the file,
10242	 * roll back the size to the last currently allocated block.
10243	 * We know that this last allocated block is a full-sized as
10244	 * we already checked for fragments in the loop above.
10245	 */
10246	if (lastadp != NULL &&
10247	    dp->di_size <= (lastadp->ad_offset + 1) * fs->fs_bsize) {
10248		for (i = lastadp->ad_offset; i >= 0; i--)
10249			if (dp->di_db[i] != 0)
10250				break;
10251		dp->di_size = (i + 1) * fs->fs_bsize;
10252	}
10253	/*
10254	 * The only dependencies are for indirect blocks.
10255	 *
10256	 * The file size for indirect block additions is not guaranteed.
10257	 * Such a guarantee would be non-trivial to achieve. The conventional
10258	 * synchronous write implementation also does not make this guarantee.
10259	 * Fsck should catch and fix discrepancies. Arguably, the file size
10260	 * can be over-estimated without destroying integrity when the file
10261	 * moves into the indirect blocks (i.e., is large). If we want to
10262	 * postpone fsck, we are stuck with this argument.
10263	 */
10264	for (; adp; adp = TAILQ_NEXT(adp, ad_next))
10265		dp->di_ib[adp->ad_offset - NDADDR] = 0;
10266}
10267
10268/*
10269 * Cancel an indirdep as a result of truncation.  Release all of the
10270 * children allocindirs and place their journal work on the appropriate
10271 * list.
10272 */
10273static void
10274cancel_indirdep(indirdep, bp, freeblks)
10275	struct indirdep *indirdep;
10276	struct buf *bp;
10277	struct freeblks *freeblks;
10278{
10279	struct allocindir *aip;
10280
10281	/*
10282	 * None of the indirect pointers will ever be visible,
10283	 * so they can simply be tossed. GOINGAWAY ensures
10284	 * that allocated pointers will be saved in the buffer
10285	 * cache until they are freed. Note that they will
10286	 * only be able to be found by their physical address
10287	 * since the inode mapping the logical address will
10288	 * be gone. The save buffer used for the safe copy
10289	 * was allocated in setup_allocindir_phase2 using
10290	 * the physical address so it could be used for this
10291	 * purpose. Hence we swap the safe copy with the real
10292	 * copy, allowing the safe copy to be freed and holding
10293	 * on to the real copy for later use in indir_trunc.
10294	 */
10295	if (indirdep->ir_state & GOINGAWAY)
10296		panic("cancel_indirdep: already gone");
10297	if ((indirdep->ir_state & DEPCOMPLETE) == 0) {
10298		indirdep->ir_state |= DEPCOMPLETE;
10299		LIST_REMOVE(indirdep, ir_next);
10300	}
10301	indirdep->ir_state |= GOINGAWAY;
10302	VFSTOUFS(indirdep->ir_list.wk_mp)->um_numindirdeps += 1;
10303	/*
10304	 * Pass in bp for blocks still have journal writes
10305	 * pending so we can cancel them on their own.
10306	 */
10307	while ((aip = LIST_FIRST(&indirdep->ir_deplisthd)) != 0)
10308		cancel_allocindir(aip, bp, freeblks, 0);
10309	while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0)
10310		cancel_allocindir(aip, NULL, freeblks, 0);
10311	while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != 0)
10312		cancel_allocindir(aip, NULL, freeblks, 0);
10313	while ((aip = LIST_FIRST(&indirdep->ir_completehd)) != 0)
10314		cancel_allocindir(aip, NULL, freeblks, 0);
10315	/*
10316	 * If there are pending partial truncations we need to keep the
10317	 * old block copy around until they complete.  This is because
10318	 * the current b_data is not a perfect superset of the available
10319	 * blocks.
10320	 */
10321	if (TAILQ_EMPTY(&indirdep->ir_trunc))
10322		bcopy(bp->b_data, indirdep->ir_savebp->b_data, bp->b_bcount);
10323	else
10324		bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount);
10325	WORKLIST_REMOVE(&indirdep->ir_list);
10326	WORKLIST_INSERT(&indirdep->ir_savebp->b_dep, &indirdep->ir_list);
10327	indirdep->ir_bp = NULL;
10328	indirdep->ir_freeblks = freeblks;
10329}
10330
10331/*
10332 * Free an indirdep once it no longer has new pointers to track.
10333 */
10334static void
10335free_indirdep(indirdep)
10336	struct indirdep *indirdep;
10337{
10338
10339	KASSERT(TAILQ_EMPTY(&indirdep->ir_trunc),
10340	    ("free_indirdep: Indir trunc list not empty."));
10341	KASSERT(LIST_EMPTY(&indirdep->ir_completehd),
10342	    ("free_indirdep: Complete head not empty."));
10343	KASSERT(LIST_EMPTY(&indirdep->ir_writehd),
10344	    ("free_indirdep: write head not empty."));
10345	KASSERT(LIST_EMPTY(&indirdep->ir_donehd),
10346	    ("free_indirdep: done head not empty."));
10347	KASSERT(LIST_EMPTY(&indirdep->ir_deplisthd),
10348	    ("free_indirdep: deplist head not empty."));
10349	KASSERT((indirdep->ir_state & DEPCOMPLETE),
10350	    ("free_indirdep: %p still on newblk list.", indirdep));
10351	KASSERT(indirdep->ir_saveddata == NULL,
10352	    ("free_indirdep: %p still has saved data.", indirdep));
10353	if (indirdep->ir_state & ONWORKLIST)
10354		WORKLIST_REMOVE(&indirdep->ir_list);
10355	WORKITEM_FREE(indirdep, D_INDIRDEP);
10356}
10357
10358/*
10359 * Called before a write to an indirdep.  This routine is responsible for
10360 * rolling back pointers to a safe state which includes only those
10361 * allocindirs which have been completed.
10362 */
10363static void
10364initiate_write_indirdep(indirdep, bp)
10365	struct indirdep *indirdep;
10366	struct buf *bp;
10367{
10368
10369	indirdep->ir_state |= IOSTARTED;
10370	if (indirdep->ir_state & GOINGAWAY)
10371		panic("disk_io_initiation: indirdep gone");
10372	/*
10373	 * If there are no remaining dependencies, this will be writing
10374	 * the real pointers.
10375	 */
10376	if (LIST_EMPTY(&indirdep->ir_deplisthd) &&
10377	    TAILQ_EMPTY(&indirdep->ir_trunc))
10378		return;
10379	/*
10380	 * Replace up-to-date version with safe version.
10381	 */
10382	if (indirdep->ir_saveddata == NULL) {
10383		FREE_LOCK(&lk);
10384		indirdep->ir_saveddata = malloc(bp->b_bcount, M_INDIRDEP,
10385		    M_SOFTDEP_FLAGS);
10386		ACQUIRE_LOCK(&lk);
10387	}
10388	indirdep->ir_state &= ~ATTACHED;
10389	indirdep->ir_state |= UNDONE;
10390	bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount);
10391	bcopy(indirdep->ir_savebp->b_data, bp->b_data,
10392	    bp->b_bcount);
10393}
10394
10395/*
10396 * Called when an inode has been cleared in a cg bitmap.  This finally
10397 * eliminates any canceled jaddrefs
10398 */
10399void
10400softdep_setup_inofree(mp, bp, ino, wkhd)
10401	struct mount *mp;
10402	struct buf *bp;
10403	ino_t ino;
10404	struct workhead *wkhd;
10405{
10406	struct worklist *wk, *wkn;
10407	struct inodedep *inodedep;
10408	uint8_t *inosused;
10409	struct cg *cgp;
10410	struct fs *fs;
10411
10412	ACQUIRE_LOCK(&lk);
10413	fs = VFSTOUFS(mp)->um_fs;
10414	cgp = (struct cg *)bp->b_data;
10415	inosused = cg_inosused(cgp);
10416	if (isset(inosused, ino % fs->fs_ipg))
10417		panic("softdep_setup_inofree: inode %ju not freed.",
10418		    (uintmax_t)ino);
10419	if (inodedep_lookup(mp, ino, 0, &inodedep))
10420		panic("softdep_setup_inofree: ino %ju has existing inodedep %p",
10421		    (uintmax_t)ino, inodedep);
10422	if (wkhd) {
10423		LIST_FOREACH_SAFE(wk, wkhd, wk_list, wkn) {
10424			if (wk->wk_type != D_JADDREF)
10425				continue;
10426			WORKLIST_REMOVE(wk);
10427			/*
10428			 * We can free immediately even if the jaddref
10429			 * isn't attached in a background write as now
10430			 * the bitmaps are reconciled.
10431		 	 */
10432			wk->wk_state |= COMPLETE | ATTACHED;
10433			free_jaddref(WK_JADDREF(wk));
10434		}
10435		jwork_move(&bp->b_dep, wkhd);
10436	}
10437	FREE_LOCK(&lk);
10438}
10439
10440
10441/*
10442 * Called via ffs_blkfree() after a set of frags has been cleared from a cg
10443 * map.  Any dependencies waiting for the write to clear are added to the
10444 * buf's list and any jnewblks that are being canceled are discarded
10445 * immediately.
10446 */
10447void
10448softdep_setup_blkfree(mp, bp, blkno, frags, wkhd)
10449	struct mount *mp;
10450	struct buf *bp;
10451	ufs2_daddr_t blkno;
10452	int frags;
10453	struct workhead *wkhd;
10454{
10455	struct bmsafemap *bmsafemap;
10456	struct jnewblk *jnewblk;
10457	struct worklist *wk;
10458	struct fs *fs;
10459#ifdef SUJ_DEBUG
10460	uint8_t *blksfree;
10461	struct cg *cgp;
10462	ufs2_daddr_t jstart;
10463	ufs2_daddr_t jend;
10464	ufs2_daddr_t end;
10465	long bno;
10466	int i;
10467#endif
10468
10469	CTR3(KTR_SUJ,
10470	    "softdep_setup_blkfree: blkno %jd frags %d wk head %p",
10471	    blkno, frags, wkhd);
10472
10473	ACQUIRE_LOCK(&lk);
10474	/* Lookup the bmsafemap so we track when it is dirty. */
10475	fs = VFSTOUFS(mp)->um_fs;
10476	bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL);
10477	/*
10478	 * Detach any jnewblks which have been canceled.  They must linger
10479	 * until the bitmap is cleared again by ffs_blkfree() to prevent
10480	 * an unjournaled allocation from hitting the disk.
10481	 */
10482	if (wkhd) {
10483		while ((wk = LIST_FIRST(wkhd)) != NULL) {
10484			CTR2(KTR_SUJ,
10485			    "softdep_setup_blkfree: blkno %jd wk type %d",
10486			    blkno, wk->wk_type);
10487			WORKLIST_REMOVE(wk);
10488			if (wk->wk_type != D_JNEWBLK) {
10489				WORKLIST_INSERT(&bmsafemap->sm_freehd, wk);
10490				continue;
10491			}
10492			jnewblk = WK_JNEWBLK(wk);
10493			KASSERT(jnewblk->jn_state & GOINGAWAY,
10494			    ("softdep_setup_blkfree: jnewblk not canceled."));
10495#ifdef SUJ_DEBUG
10496			/*
10497			 * Assert that this block is free in the bitmap
10498			 * before we discard the jnewblk.
10499			 */
10500			cgp = (struct cg *)bp->b_data;
10501			blksfree = cg_blksfree(cgp);
10502			bno = dtogd(fs, jnewblk->jn_blkno);
10503			for (i = jnewblk->jn_oldfrags;
10504			    i < jnewblk->jn_frags; i++) {
10505				if (isset(blksfree, bno + i))
10506					continue;
10507				panic("softdep_setup_blkfree: not free");
10508			}
10509#endif
10510			/*
10511			 * Even if it's not attached we can free immediately
10512			 * as the new bitmap is correct.
10513			 */
10514			wk->wk_state |= COMPLETE | ATTACHED;
10515			free_jnewblk(jnewblk);
10516		}
10517	}
10518
10519#ifdef SUJ_DEBUG
10520	/*
10521	 * Assert that we are not freeing a block which has an outstanding
10522	 * allocation dependency.
10523	 */
10524	fs = VFSTOUFS(mp)->um_fs;
10525	bmsafemap = bmsafemap_lookup(mp, bp, dtog(fs, blkno), NULL);
10526	end = blkno + frags;
10527	LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) {
10528		/*
10529		 * Don't match against blocks that will be freed when the
10530		 * background write is done.
10531		 */
10532		if ((jnewblk->jn_state & (ATTACHED | COMPLETE | DEPCOMPLETE)) ==
10533		    (COMPLETE | DEPCOMPLETE))
10534			continue;
10535		jstart = jnewblk->jn_blkno + jnewblk->jn_oldfrags;
10536		jend = jnewblk->jn_blkno + jnewblk->jn_frags;
10537		if ((blkno >= jstart && blkno < jend) ||
10538		    (end > jstart && end <= jend)) {
10539			printf("state 0x%X %jd - %d %d dep %p\n",
10540			    jnewblk->jn_state, jnewblk->jn_blkno,
10541			    jnewblk->jn_oldfrags, jnewblk->jn_frags,
10542			    jnewblk->jn_dep);
10543			panic("softdep_setup_blkfree: "
10544			    "%jd-%jd(%d) overlaps with %jd-%jd",
10545			    blkno, end, frags, jstart, jend);
10546		}
10547	}
10548#endif
10549	FREE_LOCK(&lk);
10550}
10551
10552/*
10553 * Revert a block allocation when the journal record that describes it
10554 * is not yet written.
10555 */
10556int
10557jnewblk_rollback(jnewblk, fs, cgp, blksfree)
10558	struct jnewblk *jnewblk;
10559	struct fs *fs;
10560	struct cg *cgp;
10561	uint8_t *blksfree;
10562{
10563	ufs1_daddr_t fragno;
10564	long cgbno, bbase;
10565	int frags, blk;
10566	int i;
10567
10568	frags = 0;
10569	cgbno = dtogd(fs, jnewblk->jn_blkno);
10570	/*
10571	 * We have to test which frags need to be rolled back.  We may
10572	 * be operating on a stale copy when doing background writes.
10573	 */
10574	for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++)
10575		if (isclr(blksfree, cgbno + i))
10576			frags++;
10577	if (frags == 0)
10578		return (0);
10579	/*
10580	 * This is mostly ffs_blkfree() sans some validation and
10581	 * superblock updates.
10582	 */
10583	if (frags == fs->fs_frag) {
10584		fragno = fragstoblks(fs, cgbno);
10585		ffs_setblock(fs, blksfree, fragno);
10586		ffs_clusteracct(fs, cgp, fragno, 1);
10587		cgp->cg_cs.cs_nbfree++;
10588	} else {
10589		cgbno += jnewblk->jn_oldfrags;
10590		bbase = cgbno - fragnum(fs, cgbno);
10591		/* Decrement the old frags.  */
10592		blk = blkmap(fs, blksfree, bbase);
10593		ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
10594		/* Deallocate the fragment */
10595		for (i = 0; i < frags; i++)
10596			setbit(blksfree, cgbno + i);
10597		cgp->cg_cs.cs_nffree += frags;
10598		/* Add back in counts associated with the new frags */
10599		blk = blkmap(fs, blksfree, bbase);
10600		ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
10601                /* If a complete block has been reassembled, account for it. */
10602		fragno = fragstoblks(fs, bbase);
10603		if (ffs_isblock(fs, blksfree, fragno)) {
10604			cgp->cg_cs.cs_nffree -= fs->fs_frag;
10605			ffs_clusteracct(fs, cgp, fragno, 1);
10606			cgp->cg_cs.cs_nbfree++;
10607		}
10608	}
10609	stat_jnewblk++;
10610	jnewblk->jn_state &= ~ATTACHED;
10611	jnewblk->jn_state |= UNDONE;
10612
10613	return (frags);
10614}
10615
10616static void
10617initiate_write_bmsafemap(bmsafemap, bp)
10618	struct bmsafemap *bmsafemap;
10619	struct buf *bp;			/* The cg block. */
10620{
10621	struct jaddref *jaddref;
10622	struct jnewblk *jnewblk;
10623	uint8_t *inosused;
10624	uint8_t *blksfree;
10625	struct cg *cgp;
10626	struct fs *fs;
10627	ino_t ino;
10628
10629	if (bmsafemap->sm_state & IOSTARTED)
10630		return;
10631	bmsafemap->sm_state |= IOSTARTED;
10632	/*
10633	 * Clear any inode allocations which are pending journal writes.
10634	 */
10635	if (LIST_FIRST(&bmsafemap->sm_jaddrefhd) != NULL) {
10636		cgp = (struct cg *)bp->b_data;
10637		fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
10638		inosused = cg_inosused(cgp);
10639		LIST_FOREACH(jaddref, &bmsafemap->sm_jaddrefhd, ja_bmdeps) {
10640			ino = jaddref->ja_ino % fs->fs_ipg;
10641			if (isset(inosused, ino)) {
10642				if ((jaddref->ja_mode & IFMT) == IFDIR)
10643					cgp->cg_cs.cs_ndir--;
10644				cgp->cg_cs.cs_nifree++;
10645				clrbit(inosused, ino);
10646				jaddref->ja_state &= ~ATTACHED;
10647				jaddref->ja_state |= UNDONE;
10648				stat_jaddref++;
10649			} else
10650				panic("initiate_write_bmsafemap: inode %ju "
10651				    "marked free", (uintmax_t)jaddref->ja_ino);
10652		}
10653	}
10654	/*
10655	 * Clear any block allocations which are pending journal writes.
10656	 */
10657	if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) {
10658		cgp = (struct cg *)bp->b_data;
10659		fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
10660		blksfree = cg_blksfree(cgp);
10661		LIST_FOREACH(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps) {
10662			if (jnewblk_rollback(jnewblk, fs, cgp, blksfree))
10663				continue;
10664			panic("initiate_write_bmsafemap: block %jd "
10665			    "marked free", jnewblk->jn_blkno);
10666		}
10667	}
10668	/*
10669	 * Move allocation lists to the written lists so they can be
10670	 * cleared once the block write is complete.
10671	 */
10672	LIST_SWAP(&bmsafemap->sm_inodedephd, &bmsafemap->sm_inodedepwr,
10673	    inodedep, id_deps);
10674	LIST_SWAP(&bmsafemap->sm_newblkhd, &bmsafemap->sm_newblkwr,
10675	    newblk, nb_deps);
10676	LIST_SWAP(&bmsafemap->sm_freehd, &bmsafemap->sm_freewr, worklist,
10677	    wk_list);
10678}
10679
10680/*
10681 * This routine is called during the completion interrupt
10682 * service routine for a disk write (from the procedure called
10683 * by the device driver to inform the filesystem caches of
10684 * a request completion).  It should be called early in this
10685 * procedure, before the block is made available to other
10686 * processes or other routines are called.
10687 *
10688 */
10689static void
10690softdep_disk_write_complete(bp)
10691	struct buf *bp;		/* describes the completed disk write */
10692{
10693	struct worklist *wk;
10694	struct worklist *owk;
10695	struct workhead reattach;
10696	struct freeblks *freeblks;
10697	struct buf *sbp;
10698
10699	/*
10700	 * If an error occurred while doing the write, then the data
10701	 * has not hit the disk and the dependencies cannot be unrolled.
10702	 */
10703	if ((bp->b_ioflags & BIO_ERROR) != 0 && (bp->b_flags & B_INVAL) == 0)
10704		return;
10705	LIST_INIT(&reattach);
10706	/*
10707	 * This lock must not be released anywhere in this code segment.
10708	 */
10709	sbp = NULL;
10710	owk = NULL;
10711	ACQUIRE_LOCK(&lk);
10712	while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) {
10713		WORKLIST_REMOVE(wk);
10714		dep_write[wk->wk_type]++;
10715		if (wk == owk)
10716			panic("duplicate worklist: %p\n", wk);
10717		owk = wk;
10718		switch (wk->wk_type) {
10719
10720		case D_PAGEDEP:
10721			if (handle_written_filepage(WK_PAGEDEP(wk), bp))
10722				WORKLIST_INSERT(&reattach, wk);
10723			continue;
10724
10725		case D_INODEDEP:
10726			if (handle_written_inodeblock(WK_INODEDEP(wk), bp))
10727				WORKLIST_INSERT(&reattach, wk);
10728			continue;
10729
10730		case D_BMSAFEMAP:
10731			if (handle_written_bmsafemap(WK_BMSAFEMAP(wk), bp))
10732				WORKLIST_INSERT(&reattach, wk);
10733			continue;
10734
10735		case D_MKDIR:
10736			handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY);
10737			continue;
10738
10739		case D_ALLOCDIRECT:
10740			wk->wk_state |= COMPLETE;
10741			handle_allocdirect_partdone(WK_ALLOCDIRECT(wk), NULL);
10742			continue;
10743
10744		case D_ALLOCINDIR:
10745			wk->wk_state |= COMPLETE;
10746			handle_allocindir_partdone(WK_ALLOCINDIR(wk));
10747			continue;
10748
10749		case D_INDIRDEP:
10750			if (handle_written_indirdep(WK_INDIRDEP(wk), bp, &sbp))
10751				WORKLIST_INSERT(&reattach, wk);
10752			continue;
10753
10754		case D_FREEBLKS:
10755			wk->wk_state |= COMPLETE;
10756			freeblks = WK_FREEBLKS(wk);
10757			if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE &&
10758			    LIST_EMPTY(&freeblks->fb_jblkdephd))
10759				add_to_worklist(wk, WK_NODELAY);
10760			continue;
10761
10762		case D_FREEWORK:
10763			handle_written_freework(WK_FREEWORK(wk));
10764			break;
10765
10766		case D_JSEGDEP:
10767			free_jsegdep(WK_JSEGDEP(wk));
10768			continue;
10769
10770		case D_JSEG:
10771			handle_written_jseg(WK_JSEG(wk), bp);
10772			continue;
10773
10774		case D_SBDEP:
10775			if (handle_written_sbdep(WK_SBDEP(wk), bp))
10776				WORKLIST_INSERT(&reattach, wk);
10777			continue;
10778
10779		case D_FREEDEP:
10780			free_freedep(WK_FREEDEP(wk));
10781			continue;
10782
10783		default:
10784			panic("handle_disk_write_complete: Unknown type %s",
10785			    TYPENAME(wk->wk_type));
10786			/* NOTREACHED */
10787		}
10788	}
10789	/*
10790	 * Reattach any requests that must be redone.
10791	 */
10792	while ((wk = LIST_FIRST(&reattach)) != NULL) {
10793		WORKLIST_REMOVE(wk);
10794		WORKLIST_INSERT(&bp->b_dep, wk);
10795	}
10796	FREE_LOCK(&lk);
10797	if (sbp)
10798		brelse(sbp);
10799}
10800
10801/*
10802 * Called from within softdep_disk_write_complete above. Note that
10803 * this routine is always called from interrupt level with further
10804 * splbio interrupts blocked.
10805 */
10806static void
10807handle_allocdirect_partdone(adp, wkhd)
10808	struct allocdirect *adp;	/* the completed allocdirect */
10809	struct workhead *wkhd;		/* Work to do when inode is writtne. */
10810{
10811	struct allocdirectlst *listhead;
10812	struct allocdirect *listadp;
10813	struct inodedep *inodedep;
10814	long bsize;
10815
10816	if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE)
10817		return;
10818	/*
10819	 * The on-disk inode cannot claim to be any larger than the last
10820	 * fragment that has been written. Otherwise, the on-disk inode
10821	 * might have fragments that were not the last block in the file
10822	 * which would corrupt the filesystem. Thus, we cannot free any
10823	 * allocdirects after one whose ad_oldblkno claims a fragment as
10824	 * these blocks must be rolled back to zero before writing the inode.
10825	 * We check the currently active set of allocdirects in id_inoupdt
10826	 * or id_extupdt as appropriate.
10827	 */
10828	inodedep = adp->ad_inodedep;
10829	bsize = inodedep->id_fs->fs_bsize;
10830	if (adp->ad_state & EXTDATA)
10831		listhead = &inodedep->id_extupdt;
10832	else
10833		listhead = &inodedep->id_inoupdt;
10834	TAILQ_FOREACH(listadp, listhead, ad_next) {
10835		/* found our block */
10836		if (listadp == adp)
10837			break;
10838		/* continue if ad_oldlbn is not a fragment */
10839		if (listadp->ad_oldsize == 0 ||
10840		    listadp->ad_oldsize == bsize)
10841			continue;
10842		/* hit a fragment */
10843		return;
10844	}
10845	/*
10846	 * If we have reached the end of the current list without
10847	 * finding the just finished dependency, then it must be
10848	 * on the future dependency list. Future dependencies cannot
10849	 * be freed until they are moved to the current list.
10850	 */
10851	if (listadp == NULL) {
10852#ifdef DEBUG
10853		if (adp->ad_state & EXTDATA)
10854			listhead = &inodedep->id_newextupdt;
10855		else
10856			listhead = &inodedep->id_newinoupdt;
10857		TAILQ_FOREACH(listadp, listhead, ad_next)
10858			/* found our block */
10859			if (listadp == adp)
10860				break;
10861		if (listadp == NULL)
10862			panic("handle_allocdirect_partdone: lost dep");
10863#endif /* DEBUG */
10864		return;
10865	}
10866	/*
10867	 * If we have found the just finished dependency, then queue
10868	 * it along with anything that follows it that is complete.
10869	 * Since the pointer has not yet been written in the inode
10870	 * as the dependency prevents it, place the allocdirect on the
10871	 * bufwait list where it will be freed once the pointer is
10872	 * valid.
10873	 */
10874	if (wkhd == NULL)
10875		wkhd = &inodedep->id_bufwait;
10876	for (; adp; adp = listadp) {
10877		listadp = TAILQ_NEXT(adp, ad_next);
10878		if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE)
10879			return;
10880		TAILQ_REMOVE(listhead, adp, ad_next);
10881		WORKLIST_INSERT(wkhd, &adp->ad_block.nb_list);
10882	}
10883}
10884
10885/*
10886 * Called from within softdep_disk_write_complete above.  This routine
10887 * completes successfully written allocindirs.
10888 */
10889static void
10890handle_allocindir_partdone(aip)
10891	struct allocindir *aip;		/* the completed allocindir */
10892{
10893	struct indirdep *indirdep;
10894
10895	if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE)
10896		return;
10897	indirdep = aip->ai_indirdep;
10898	LIST_REMOVE(aip, ai_next);
10899	/*
10900	 * Don't set a pointer while the buffer is undergoing IO or while
10901	 * we have active truncations.
10902	 */
10903	if (indirdep->ir_state & UNDONE || !TAILQ_EMPTY(&indirdep->ir_trunc)) {
10904		LIST_INSERT_HEAD(&indirdep->ir_donehd, aip, ai_next);
10905		return;
10906	}
10907	if (indirdep->ir_state & UFS1FMT)
10908		((ufs1_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] =
10909		    aip->ai_newblkno;
10910	else
10911		((ufs2_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] =
10912		    aip->ai_newblkno;
10913	/*
10914	 * Await the pointer write before freeing the allocindir.
10915	 */
10916	LIST_INSERT_HEAD(&indirdep->ir_writehd, aip, ai_next);
10917}
10918
10919/*
10920 * Release segments held on a jwork list.
10921 */
10922static void
10923handle_jwork(wkhd)
10924	struct workhead *wkhd;
10925{
10926	struct worklist *wk;
10927
10928	while ((wk = LIST_FIRST(wkhd)) != NULL) {
10929		WORKLIST_REMOVE(wk);
10930		switch (wk->wk_type) {
10931		case D_JSEGDEP:
10932			free_jsegdep(WK_JSEGDEP(wk));
10933			continue;
10934		case D_FREEDEP:
10935			free_freedep(WK_FREEDEP(wk));
10936			continue;
10937		case D_FREEFRAG:
10938			rele_jseg(WK_JSEG(WK_FREEFRAG(wk)->ff_jdep));
10939			WORKITEM_FREE(wk, D_FREEFRAG);
10940			continue;
10941		case D_FREEWORK:
10942			handle_written_freework(WK_FREEWORK(wk));
10943			continue;
10944		default:
10945			panic("handle_jwork: Unknown type %s\n",
10946			    TYPENAME(wk->wk_type));
10947		}
10948	}
10949}
10950
10951/*
10952 * Handle the bufwait list on an inode when it is safe to release items
10953 * held there.  This normally happens after an inode block is written but
10954 * may be delayed and handled later if there are pending journal items that
10955 * are not yet safe to be released.
10956 */
10957static struct freefile *
10958handle_bufwait(inodedep, refhd)
10959	struct inodedep *inodedep;
10960	struct workhead *refhd;
10961{
10962	struct jaddref *jaddref;
10963	struct freefile *freefile;
10964	struct worklist *wk;
10965
10966	freefile = NULL;
10967	while ((wk = LIST_FIRST(&inodedep->id_bufwait)) != NULL) {
10968		WORKLIST_REMOVE(wk);
10969		switch (wk->wk_type) {
10970		case D_FREEFILE:
10971			/*
10972			 * We defer adding freefile to the worklist
10973			 * until all other additions have been made to
10974			 * ensure that it will be done after all the
10975			 * old blocks have been freed.
10976			 */
10977			if (freefile != NULL)
10978				panic("handle_bufwait: freefile");
10979			freefile = WK_FREEFILE(wk);
10980			continue;
10981
10982		case D_MKDIR:
10983			handle_written_mkdir(WK_MKDIR(wk), MKDIR_PARENT);
10984			continue;
10985
10986		case D_DIRADD:
10987			diradd_inode_written(WK_DIRADD(wk), inodedep);
10988			continue;
10989
10990		case D_FREEFRAG:
10991			wk->wk_state |= COMPLETE;
10992			if ((wk->wk_state & ALLCOMPLETE) == ALLCOMPLETE)
10993				add_to_worklist(wk, 0);
10994			continue;
10995
10996		case D_DIRREM:
10997			wk->wk_state |= COMPLETE;
10998			add_to_worklist(wk, 0);
10999			continue;
11000
11001		case D_ALLOCDIRECT:
11002		case D_ALLOCINDIR:
11003			free_newblk(WK_NEWBLK(wk));
11004			continue;
11005
11006		case D_JNEWBLK:
11007			wk->wk_state |= COMPLETE;
11008			free_jnewblk(WK_JNEWBLK(wk));
11009			continue;
11010
11011		/*
11012		 * Save freed journal segments and add references on
11013		 * the supplied list which will delay their release
11014		 * until the cg bitmap is cleared on disk.
11015		 */
11016		case D_JSEGDEP:
11017			if (refhd == NULL)
11018				free_jsegdep(WK_JSEGDEP(wk));
11019			else
11020				WORKLIST_INSERT(refhd, wk);
11021			continue;
11022
11023		case D_JADDREF:
11024			jaddref = WK_JADDREF(wk);
11025			TAILQ_REMOVE(&inodedep->id_inoreflst, &jaddref->ja_ref,
11026			    if_deps);
11027			/*
11028			 * Transfer any jaddrefs to the list to be freed with
11029			 * the bitmap if we're handling a removed file.
11030			 */
11031			if (refhd == NULL) {
11032				wk->wk_state |= COMPLETE;
11033				free_jaddref(jaddref);
11034			} else
11035				WORKLIST_INSERT(refhd, wk);
11036			continue;
11037
11038		default:
11039			panic("handle_bufwait: Unknown type %p(%s)",
11040			    wk, TYPENAME(wk->wk_type));
11041			/* NOTREACHED */
11042		}
11043	}
11044	return (freefile);
11045}
11046/*
11047 * Called from within softdep_disk_write_complete above to restore
11048 * in-memory inode block contents to their most up-to-date state. Note
11049 * that this routine is always called from interrupt level with further
11050 * splbio interrupts blocked.
11051 */
11052static int
11053handle_written_inodeblock(inodedep, bp)
11054	struct inodedep *inodedep;
11055	struct buf *bp;		/* buffer containing the inode block */
11056{
11057	struct freefile *freefile;
11058	struct allocdirect *adp, *nextadp;
11059	struct ufs1_dinode *dp1 = NULL;
11060	struct ufs2_dinode *dp2 = NULL;
11061	struct workhead wkhd;
11062	int hadchanges, fstype;
11063	ino_t freelink;
11064
11065	LIST_INIT(&wkhd);
11066	hadchanges = 0;
11067	freefile = NULL;
11068	if ((inodedep->id_state & IOSTARTED) == 0)
11069		panic("handle_written_inodeblock: not started");
11070	inodedep->id_state &= ~IOSTARTED;
11071	if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC) {
11072		fstype = UFS1;
11073		dp1 = (struct ufs1_dinode *)bp->b_data +
11074		    ino_to_fsbo(inodedep->id_fs, inodedep->id_ino);
11075		freelink = dp1->di_freelink;
11076	} else {
11077		fstype = UFS2;
11078		dp2 = (struct ufs2_dinode *)bp->b_data +
11079		    ino_to_fsbo(inodedep->id_fs, inodedep->id_ino);
11080		freelink = dp2->di_freelink;
11081	}
11082	/*
11083	 * Leave this inodeblock dirty until it's in the list.
11084	 */
11085	if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) == UNLINKED) {
11086		struct inodedep *inon;
11087
11088		inon = TAILQ_NEXT(inodedep, id_unlinked);
11089		if ((inon == NULL && freelink == 0) ||
11090		    (inon && inon->id_ino == freelink)) {
11091			if (inon)
11092				inon->id_state |= UNLINKPREV;
11093			inodedep->id_state |= UNLINKNEXT;
11094		}
11095		hadchanges = 1;
11096	}
11097	/*
11098	 * If we had to rollback the inode allocation because of
11099	 * bitmaps being incomplete, then simply restore it.
11100	 * Keep the block dirty so that it will not be reclaimed until
11101	 * all associated dependencies have been cleared and the
11102	 * corresponding updates written to disk.
11103	 */
11104	if (inodedep->id_savedino1 != NULL) {
11105		hadchanges = 1;
11106		if (fstype == UFS1)
11107			*dp1 = *inodedep->id_savedino1;
11108		else
11109			*dp2 = *inodedep->id_savedino2;
11110		free(inodedep->id_savedino1, M_SAVEDINO);
11111		inodedep->id_savedino1 = NULL;
11112		if ((bp->b_flags & B_DELWRI) == 0)
11113			stat_inode_bitmap++;
11114		bdirty(bp);
11115		/*
11116		 * If the inode is clear here and GOINGAWAY it will never
11117		 * be written.  Process the bufwait and clear any pending
11118		 * work which may include the freefile.
11119		 */
11120		if (inodedep->id_state & GOINGAWAY)
11121			goto bufwait;
11122		return (1);
11123	}
11124	inodedep->id_state |= COMPLETE;
11125	/*
11126	 * Roll forward anything that had to be rolled back before
11127	 * the inode could be updated.
11128	 */
11129	for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) {
11130		nextadp = TAILQ_NEXT(adp, ad_next);
11131		if (adp->ad_state & ATTACHED)
11132			panic("handle_written_inodeblock: new entry");
11133		if (fstype == UFS1) {
11134			if (adp->ad_offset < NDADDR) {
11135				if (dp1->di_db[adp->ad_offset]!=adp->ad_oldblkno)
11136					panic("%s %s #%jd mismatch %d != %jd",
11137					    "handle_written_inodeblock:",
11138					    "direct pointer",
11139					    (intmax_t)adp->ad_offset,
11140					    dp1->di_db[adp->ad_offset],
11141					    (intmax_t)adp->ad_oldblkno);
11142				dp1->di_db[adp->ad_offset] = adp->ad_newblkno;
11143			} else {
11144				if (dp1->di_ib[adp->ad_offset - NDADDR] != 0)
11145					panic("%s: %s #%jd allocated as %d",
11146					    "handle_written_inodeblock",
11147					    "indirect pointer",
11148					    (intmax_t)adp->ad_offset - NDADDR,
11149					    dp1->di_ib[adp->ad_offset - NDADDR]);
11150				dp1->di_ib[adp->ad_offset - NDADDR] =
11151				    adp->ad_newblkno;
11152			}
11153		} else {
11154			if (adp->ad_offset < NDADDR) {
11155				if (dp2->di_db[adp->ad_offset]!=adp->ad_oldblkno)
11156					panic("%s: %s #%jd %s %jd != %jd",
11157					    "handle_written_inodeblock",
11158					    "direct pointer",
11159					    (intmax_t)adp->ad_offset, "mismatch",
11160					    (intmax_t)dp2->di_db[adp->ad_offset],
11161					    (intmax_t)adp->ad_oldblkno);
11162				dp2->di_db[adp->ad_offset] = adp->ad_newblkno;
11163			} else {
11164				if (dp2->di_ib[adp->ad_offset - NDADDR] != 0)
11165					panic("%s: %s #%jd allocated as %jd",
11166					    "handle_written_inodeblock",
11167					    "indirect pointer",
11168					    (intmax_t)adp->ad_offset - NDADDR,
11169					    (intmax_t)
11170					    dp2->di_ib[adp->ad_offset - NDADDR]);
11171				dp2->di_ib[adp->ad_offset - NDADDR] =
11172				    adp->ad_newblkno;
11173			}
11174		}
11175		adp->ad_state &= ~UNDONE;
11176		adp->ad_state |= ATTACHED;
11177		hadchanges = 1;
11178	}
11179	for (adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; adp = nextadp) {
11180		nextadp = TAILQ_NEXT(adp, ad_next);
11181		if (adp->ad_state & ATTACHED)
11182			panic("handle_written_inodeblock: new entry");
11183		if (dp2->di_extb[adp->ad_offset] != adp->ad_oldblkno)
11184			panic("%s: direct pointers #%jd %s %jd != %jd",
11185			    "handle_written_inodeblock",
11186			    (intmax_t)adp->ad_offset, "mismatch",
11187			    (intmax_t)dp2->di_extb[adp->ad_offset],
11188			    (intmax_t)adp->ad_oldblkno);
11189		dp2->di_extb[adp->ad_offset] = adp->ad_newblkno;
11190		adp->ad_state &= ~UNDONE;
11191		adp->ad_state |= ATTACHED;
11192		hadchanges = 1;
11193	}
11194	if (hadchanges && (bp->b_flags & B_DELWRI) == 0)
11195		stat_direct_blk_ptrs++;
11196	/*
11197	 * Reset the file size to its most up-to-date value.
11198	 */
11199	if (inodedep->id_savedsize == -1 || inodedep->id_savedextsize == -1)
11200		panic("handle_written_inodeblock: bad size");
11201	if (inodedep->id_savednlink > LINK_MAX)
11202		panic("handle_written_inodeblock: Invalid link count "
11203		    "%d for inodedep %p", inodedep->id_savednlink, inodedep);
11204	if (fstype == UFS1) {
11205		if (dp1->di_nlink != inodedep->id_savednlink) {
11206			dp1->di_nlink = inodedep->id_savednlink;
11207			hadchanges = 1;
11208		}
11209		if (dp1->di_size != inodedep->id_savedsize) {
11210			dp1->di_size = inodedep->id_savedsize;
11211			hadchanges = 1;
11212		}
11213	} else {
11214		if (dp2->di_nlink != inodedep->id_savednlink) {
11215			dp2->di_nlink = inodedep->id_savednlink;
11216			hadchanges = 1;
11217		}
11218		if (dp2->di_size != inodedep->id_savedsize) {
11219			dp2->di_size = inodedep->id_savedsize;
11220			hadchanges = 1;
11221		}
11222		if (dp2->di_extsize != inodedep->id_savedextsize) {
11223			dp2->di_extsize = inodedep->id_savedextsize;
11224			hadchanges = 1;
11225		}
11226	}
11227	inodedep->id_savedsize = -1;
11228	inodedep->id_savedextsize = -1;
11229	inodedep->id_savednlink = -1;
11230	/*
11231	 * If there were any rollbacks in the inode block, then it must be
11232	 * marked dirty so that its will eventually get written back in
11233	 * its correct form.
11234	 */
11235	if (hadchanges)
11236		bdirty(bp);
11237bufwait:
11238	/*
11239	 * Process any allocdirects that completed during the update.
11240	 */
11241	if ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL)
11242		handle_allocdirect_partdone(adp, &wkhd);
11243	if ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL)
11244		handle_allocdirect_partdone(adp, &wkhd);
11245	/*
11246	 * Process deallocations that were held pending until the
11247	 * inode had been written to disk. Freeing of the inode
11248	 * is delayed until after all blocks have been freed to
11249	 * avoid creation of new <vfsid, inum, lbn> triples
11250	 * before the old ones have been deleted.  Completely
11251	 * unlinked inodes are not processed until the unlinked
11252	 * inode list is written or the last reference is removed.
11253	 */
11254	if ((inodedep->id_state & (UNLINKED | UNLINKONLIST)) != UNLINKED) {
11255		freefile = handle_bufwait(inodedep, NULL);
11256		if (freefile && !LIST_EMPTY(&wkhd)) {
11257			WORKLIST_INSERT(&wkhd, &freefile->fx_list);
11258			freefile = NULL;
11259		}
11260	}
11261	/*
11262	 * Move rolled forward dependency completions to the bufwait list
11263	 * now that those that were already written have been processed.
11264	 */
11265	if (!LIST_EMPTY(&wkhd) && hadchanges == 0)
11266		panic("handle_written_inodeblock: bufwait but no changes");
11267	jwork_move(&inodedep->id_bufwait, &wkhd);
11268
11269	if (freefile != NULL) {
11270		/*
11271		 * If the inode is goingaway it was never written.  Fake up
11272		 * the state here so free_inodedep() can succeed.
11273		 */
11274		if (inodedep->id_state & GOINGAWAY)
11275			inodedep->id_state |= COMPLETE | DEPCOMPLETE;
11276		if (free_inodedep(inodedep) == 0)
11277			panic("handle_written_inodeblock: live inodedep %p",
11278			    inodedep);
11279		add_to_worklist(&freefile->fx_list, 0);
11280		return (0);
11281	}
11282
11283	/*
11284	 * If no outstanding dependencies, free it.
11285	 */
11286	if (free_inodedep(inodedep) ||
11287	    (TAILQ_FIRST(&inodedep->id_inoreflst) == 0 &&
11288	     TAILQ_FIRST(&inodedep->id_inoupdt) == 0 &&
11289	     TAILQ_FIRST(&inodedep->id_extupdt) == 0 &&
11290	     LIST_FIRST(&inodedep->id_bufwait) == 0))
11291		return (0);
11292	return (hadchanges);
11293}
11294
11295static int
11296handle_written_indirdep(indirdep, bp, bpp)
11297	struct indirdep *indirdep;
11298	struct buf *bp;
11299	struct buf **bpp;
11300{
11301	struct allocindir *aip;
11302	struct buf *sbp;
11303	int chgs;
11304
11305	if (indirdep->ir_state & GOINGAWAY)
11306		panic("handle_written_indirdep: indirdep gone");
11307	if ((indirdep->ir_state & IOSTARTED) == 0)
11308		panic("handle_written_indirdep: IO not started");
11309	chgs = 0;
11310	/*
11311	 * If there were rollbacks revert them here.
11312	 */
11313	if (indirdep->ir_saveddata) {
11314		bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount);
11315		if (TAILQ_EMPTY(&indirdep->ir_trunc)) {
11316			free(indirdep->ir_saveddata, M_INDIRDEP);
11317			indirdep->ir_saveddata = NULL;
11318		}
11319		chgs = 1;
11320	}
11321	indirdep->ir_state &= ~(UNDONE | IOSTARTED);
11322	indirdep->ir_state |= ATTACHED;
11323	/*
11324	 * Move allocindirs with written pointers to the completehd if
11325	 * the indirdep's pointer is not yet written.  Otherwise
11326	 * free them here.
11327	 */
11328	while ((aip = LIST_FIRST(&indirdep->ir_writehd)) != 0) {
11329		LIST_REMOVE(aip, ai_next);
11330		if ((indirdep->ir_state & DEPCOMPLETE) == 0) {
11331			LIST_INSERT_HEAD(&indirdep->ir_completehd, aip,
11332			    ai_next);
11333			newblk_freefrag(&aip->ai_block);
11334			continue;
11335		}
11336		free_newblk(&aip->ai_block);
11337	}
11338	/*
11339	 * Move allocindirs that have finished dependency processing from
11340	 * the done list to the write list after updating the pointers.
11341	 */
11342	if (TAILQ_EMPTY(&indirdep->ir_trunc)) {
11343		while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0) {
11344			handle_allocindir_partdone(aip);
11345			if (aip == LIST_FIRST(&indirdep->ir_donehd))
11346				panic("disk_write_complete: not gone");
11347			chgs = 1;
11348		}
11349	}
11350	/*
11351	 * Preserve the indirdep if there were any changes or if it is not
11352	 * yet valid on disk.
11353	 */
11354	if (chgs) {
11355		stat_indir_blk_ptrs++;
11356		bdirty(bp);
11357		return (1);
11358	}
11359	/*
11360	 * If there were no changes we can discard the savedbp and detach
11361	 * ourselves from the buf.  We are only carrying completed pointers
11362	 * in this case.
11363	 */
11364	sbp = indirdep->ir_savebp;
11365	sbp->b_flags |= B_INVAL | B_NOCACHE;
11366	indirdep->ir_savebp = NULL;
11367	indirdep->ir_bp = NULL;
11368	if (*bpp != NULL)
11369		panic("handle_written_indirdep: bp already exists.");
11370	*bpp = sbp;
11371	/*
11372	 * The indirdep may not be freed until its parent points at it.
11373	 */
11374	if (indirdep->ir_state & DEPCOMPLETE)
11375		free_indirdep(indirdep);
11376
11377	return (0);
11378}
11379
11380/*
11381 * Process a diradd entry after its dependent inode has been written.
11382 * This routine must be called with splbio interrupts blocked.
11383 */
11384static void
11385diradd_inode_written(dap, inodedep)
11386	struct diradd *dap;
11387	struct inodedep *inodedep;
11388{
11389
11390	dap->da_state |= COMPLETE;
11391	complete_diradd(dap);
11392	WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list);
11393}
11394
11395/*
11396 * Returns true if the bmsafemap will have rollbacks when written.  Must
11397 * only be called with lk and the buf lock on the cg held.
11398 */
11399static int
11400bmsafemap_backgroundwrite(bmsafemap, bp)
11401	struct bmsafemap *bmsafemap;
11402	struct buf *bp;
11403{
11404	int dirty;
11405
11406	dirty = !LIST_EMPTY(&bmsafemap->sm_jaddrefhd) |
11407	    !LIST_EMPTY(&bmsafemap->sm_jnewblkhd);
11408	/*
11409	 * If we're initiating a background write we need to process the
11410	 * rollbacks as they exist now, not as they exist when IO starts.
11411	 * No other consumers will look at the contents of the shadowed
11412	 * buf so this is safe to do here.
11413	 */
11414	if (bp->b_xflags & BX_BKGRDMARKER)
11415		initiate_write_bmsafemap(bmsafemap, bp);
11416
11417	return (dirty);
11418}
11419
11420/*
11421 * Re-apply an allocation when a cg write is complete.
11422 */
11423static int
11424jnewblk_rollforward(jnewblk, fs, cgp, blksfree)
11425	struct jnewblk *jnewblk;
11426	struct fs *fs;
11427	struct cg *cgp;
11428	uint8_t *blksfree;
11429{
11430	ufs1_daddr_t fragno;
11431	ufs2_daddr_t blkno;
11432	long cgbno, bbase;
11433	int frags, blk;
11434	int i;
11435
11436	frags = 0;
11437	cgbno = dtogd(fs, jnewblk->jn_blkno);
11438	for (i = jnewblk->jn_oldfrags; i < jnewblk->jn_frags; i++) {
11439		if (isclr(blksfree, cgbno + i))
11440			panic("jnewblk_rollforward: re-allocated fragment");
11441		frags++;
11442	}
11443	if (frags == fs->fs_frag) {
11444		blkno = fragstoblks(fs, cgbno);
11445		ffs_clrblock(fs, blksfree, (long)blkno);
11446		ffs_clusteracct(fs, cgp, blkno, -1);
11447		cgp->cg_cs.cs_nbfree--;
11448	} else {
11449		bbase = cgbno - fragnum(fs, cgbno);
11450		cgbno += jnewblk->jn_oldfrags;
11451                /* If a complete block had been reassembled, account for it. */
11452		fragno = fragstoblks(fs, bbase);
11453		if (ffs_isblock(fs, blksfree, fragno)) {
11454			cgp->cg_cs.cs_nffree += fs->fs_frag;
11455			ffs_clusteracct(fs, cgp, fragno, -1);
11456			cgp->cg_cs.cs_nbfree--;
11457		}
11458		/* Decrement the old frags.  */
11459		blk = blkmap(fs, blksfree, bbase);
11460		ffs_fragacct(fs, blk, cgp->cg_frsum, -1);
11461		/* Allocate the fragment */
11462		for (i = 0; i < frags; i++)
11463			clrbit(blksfree, cgbno + i);
11464		cgp->cg_cs.cs_nffree -= frags;
11465		/* Add back in counts associated with the new frags */
11466		blk = blkmap(fs, blksfree, bbase);
11467		ffs_fragacct(fs, blk, cgp->cg_frsum, 1);
11468	}
11469	return (frags);
11470}
11471
11472/*
11473 * Complete a write to a bmsafemap structure.  Roll forward any bitmap
11474 * changes if it's not a background write.  Set all written dependencies
11475 * to DEPCOMPLETE and free the structure if possible.
11476 */
11477static int
11478handle_written_bmsafemap(bmsafemap, bp)
11479	struct bmsafemap *bmsafemap;
11480	struct buf *bp;
11481{
11482	struct newblk *newblk;
11483	struct inodedep *inodedep;
11484	struct jaddref *jaddref, *jatmp;
11485	struct jnewblk *jnewblk, *jntmp;
11486	struct ufsmount *ump;
11487	uint8_t *inosused;
11488	uint8_t *blksfree;
11489	struct cg *cgp;
11490	struct fs *fs;
11491	ino_t ino;
11492	int foreground;
11493	int chgs;
11494
11495	if ((bmsafemap->sm_state & IOSTARTED) == 0)
11496		panic("initiate_write_bmsafemap: Not started\n");
11497	ump = VFSTOUFS(bmsafemap->sm_list.wk_mp);
11498	chgs = 0;
11499	bmsafemap->sm_state &= ~IOSTARTED;
11500	foreground = (bp->b_xflags & BX_BKGRDMARKER) == 0;
11501	/*
11502	 * Release journal work that was waiting on the write.
11503	 */
11504	handle_jwork(&bmsafemap->sm_freewr);
11505
11506	/*
11507	 * Restore unwritten inode allocation pending jaddref writes.
11508	 */
11509	if (!LIST_EMPTY(&bmsafemap->sm_jaddrefhd)) {
11510		cgp = (struct cg *)bp->b_data;
11511		fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
11512		inosused = cg_inosused(cgp);
11513		LIST_FOREACH_SAFE(jaddref, &bmsafemap->sm_jaddrefhd,
11514		    ja_bmdeps, jatmp) {
11515			if ((jaddref->ja_state & UNDONE) == 0)
11516				continue;
11517			ino = jaddref->ja_ino % fs->fs_ipg;
11518			if (isset(inosused, ino))
11519				panic("handle_written_bmsafemap: "
11520				    "re-allocated inode");
11521			/* Do the roll-forward only if it's a real copy. */
11522			if (foreground) {
11523				if ((jaddref->ja_mode & IFMT) == IFDIR)
11524					cgp->cg_cs.cs_ndir++;
11525				cgp->cg_cs.cs_nifree--;
11526				setbit(inosused, ino);
11527				chgs = 1;
11528			}
11529			jaddref->ja_state &= ~UNDONE;
11530			jaddref->ja_state |= ATTACHED;
11531			free_jaddref(jaddref);
11532		}
11533	}
11534	/*
11535	 * Restore any block allocations which are pending journal writes.
11536	 */
11537	if (LIST_FIRST(&bmsafemap->sm_jnewblkhd) != NULL) {
11538		cgp = (struct cg *)bp->b_data;
11539		fs = VFSTOUFS(bmsafemap->sm_list.wk_mp)->um_fs;
11540		blksfree = cg_blksfree(cgp);
11541		LIST_FOREACH_SAFE(jnewblk, &bmsafemap->sm_jnewblkhd, jn_deps,
11542		    jntmp) {
11543			if ((jnewblk->jn_state & UNDONE) == 0)
11544				continue;
11545			/* Do the roll-forward only if it's a real copy. */
11546			if (foreground &&
11547			    jnewblk_rollforward(jnewblk, fs, cgp, blksfree))
11548				chgs = 1;
11549			jnewblk->jn_state &= ~(UNDONE | NEWBLOCK);
11550			jnewblk->jn_state |= ATTACHED;
11551			free_jnewblk(jnewblk);
11552		}
11553	}
11554	while ((newblk = LIST_FIRST(&bmsafemap->sm_newblkwr))) {
11555		newblk->nb_state |= DEPCOMPLETE;
11556		newblk->nb_state &= ~ONDEPLIST;
11557		newblk->nb_bmsafemap = NULL;
11558		LIST_REMOVE(newblk, nb_deps);
11559		if (newblk->nb_list.wk_type == D_ALLOCDIRECT)
11560			handle_allocdirect_partdone(
11561			    WK_ALLOCDIRECT(&newblk->nb_list), NULL);
11562		else if (newblk->nb_list.wk_type == D_ALLOCINDIR)
11563			handle_allocindir_partdone(
11564			    WK_ALLOCINDIR(&newblk->nb_list));
11565		else if (newblk->nb_list.wk_type != D_NEWBLK)
11566			panic("handle_written_bmsafemap: Unexpected type: %s",
11567			    TYPENAME(newblk->nb_list.wk_type));
11568	}
11569	while ((inodedep = LIST_FIRST(&bmsafemap->sm_inodedepwr)) != NULL) {
11570		inodedep->id_state |= DEPCOMPLETE;
11571		inodedep->id_state &= ~ONDEPLIST;
11572		LIST_REMOVE(inodedep, id_deps);
11573		inodedep->id_bmsafemap = NULL;
11574	}
11575	LIST_REMOVE(bmsafemap, sm_next);
11576	if (chgs == 0 && LIST_EMPTY(&bmsafemap->sm_jaddrefhd) &&
11577	    LIST_EMPTY(&bmsafemap->sm_jnewblkhd) &&
11578	    LIST_EMPTY(&bmsafemap->sm_newblkhd) &&
11579	    LIST_EMPTY(&bmsafemap->sm_inodedephd) &&
11580	    LIST_EMPTY(&bmsafemap->sm_freehd)) {
11581		LIST_REMOVE(bmsafemap, sm_hash);
11582		WORKITEM_FREE(bmsafemap, D_BMSAFEMAP);
11583		return (0);
11584	}
11585	LIST_INSERT_HEAD(&ump->softdep_dirtycg, bmsafemap, sm_next);
11586	if (foreground)
11587		bdirty(bp);
11588	return (1);
11589}
11590
11591/*
11592 * Try to free a mkdir dependency.
11593 */
11594static void
11595complete_mkdir(mkdir)
11596	struct mkdir *mkdir;
11597{
11598	struct diradd *dap;
11599
11600	if ((mkdir->md_state & ALLCOMPLETE) != ALLCOMPLETE)
11601		return;
11602	LIST_REMOVE(mkdir, md_mkdirs);
11603	dap = mkdir->md_diradd;
11604	dap->da_state &= ~(mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY));
11605	if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0) {
11606		dap->da_state |= DEPCOMPLETE;
11607		complete_diradd(dap);
11608	}
11609	WORKITEM_FREE(mkdir, D_MKDIR);
11610}
11611
11612/*
11613 * Handle the completion of a mkdir dependency.
11614 */
11615static void
11616handle_written_mkdir(mkdir, type)
11617	struct mkdir *mkdir;
11618	int type;
11619{
11620
11621	if ((mkdir->md_state & (MKDIR_PARENT | MKDIR_BODY)) != type)
11622		panic("handle_written_mkdir: bad type");
11623	mkdir->md_state |= COMPLETE;
11624	complete_mkdir(mkdir);
11625}
11626
11627static int
11628free_pagedep(pagedep)
11629	struct pagedep *pagedep;
11630{
11631	int i;
11632
11633	if (pagedep->pd_state & NEWBLOCK)
11634		return (0);
11635	if (!LIST_EMPTY(&pagedep->pd_dirremhd))
11636		return (0);
11637	for (i = 0; i < DAHASHSZ; i++)
11638		if (!LIST_EMPTY(&pagedep->pd_diraddhd[i]))
11639			return (0);
11640	if (!LIST_EMPTY(&pagedep->pd_pendinghd))
11641		return (0);
11642	if (!LIST_EMPTY(&pagedep->pd_jmvrefhd))
11643		return (0);
11644	if (pagedep->pd_state & ONWORKLIST)
11645		WORKLIST_REMOVE(&pagedep->pd_list);
11646	LIST_REMOVE(pagedep, pd_hash);
11647	WORKITEM_FREE(pagedep, D_PAGEDEP);
11648
11649	return (1);
11650}
11651
11652/*
11653 * Called from within softdep_disk_write_complete above.
11654 * A write operation was just completed. Removed inodes can
11655 * now be freed and associated block pointers may be committed.
11656 * Note that this routine is always called from interrupt level
11657 * with further splbio interrupts blocked.
11658 */
11659static int
11660handle_written_filepage(pagedep, bp)
11661	struct pagedep *pagedep;
11662	struct buf *bp;		/* buffer containing the written page */
11663{
11664	struct dirrem *dirrem;
11665	struct diradd *dap, *nextdap;
11666	struct direct *ep;
11667	int i, chgs;
11668
11669	if ((pagedep->pd_state & IOSTARTED) == 0)
11670		panic("handle_written_filepage: not started");
11671	pagedep->pd_state &= ~IOSTARTED;
11672	/*
11673	 * Process any directory removals that have been committed.
11674	 */
11675	while ((dirrem = LIST_FIRST(&pagedep->pd_dirremhd)) != NULL) {
11676		LIST_REMOVE(dirrem, dm_next);
11677		dirrem->dm_state |= COMPLETE;
11678		dirrem->dm_dirinum = pagedep->pd_ino;
11679		KASSERT(LIST_EMPTY(&dirrem->dm_jremrefhd),
11680		    ("handle_written_filepage: Journal entries not written."));
11681		add_to_worklist(&dirrem->dm_list, 0);
11682	}
11683	/*
11684	 * Free any directory additions that have been committed.
11685	 * If it is a newly allocated block, we have to wait until
11686	 * the on-disk directory inode claims the new block.
11687	 */
11688	if ((pagedep->pd_state & NEWBLOCK) == 0)
11689		while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL)
11690			free_diradd(dap, NULL);
11691	/*
11692	 * Uncommitted directory entries must be restored.
11693	 */
11694	for (chgs = 0, i = 0; i < DAHASHSZ; i++) {
11695		for (dap = LIST_FIRST(&pagedep->pd_diraddhd[i]); dap;
11696		     dap = nextdap) {
11697			nextdap = LIST_NEXT(dap, da_pdlist);
11698			if (dap->da_state & ATTACHED)
11699				panic("handle_written_filepage: attached");
11700			ep = (struct direct *)
11701			    ((char *)bp->b_data + dap->da_offset);
11702			ep->d_ino = dap->da_newinum;
11703			dap->da_state &= ~UNDONE;
11704			dap->da_state |= ATTACHED;
11705			chgs = 1;
11706			/*
11707			 * If the inode referenced by the directory has
11708			 * been written out, then the dependency can be
11709			 * moved to the pending list.
11710			 */
11711			if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) {
11712				LIST_REMOVE(dap, da_pdlist);
11713				LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap,
11714				    da_pdlist);
11715			}
11716		}
11717	}
11718	/*
11719	 * If there were any rollbacks in the directory, then it must be
11720	 * marked dirty so that its will eventually get written back in
11721	 * its correct form.
11722	 */
11723	if (chgs) {
11724		if ((bp->b_flags & B_DELWRI) == 0)
11725			stat_dir_entry++;
11726		bdirty(bp);
11727		return (1);
11728	}
11729	/*
11730	 * If we are not waiting for a new directory block to be
11731	 * claimed by its inode, then the pagedep will be freed.
11732	 * Otherwise it will remain to track any new entries on
11733	 * the page in case they are fsync'ed.
11734	 */
11735	free_pagedep(pagedep);
11736	return (0);
11737}
11738
11739/*
11740 * Writing back in-core inode structures.
11741 *
11742 * The filesystem only accesses an inode's contents when it occupies an
11743 * "in-core" inode structure.  These "in-core" structures are separate from
11744 * the page frames used to cache inode blocks.  Only the latter are
11745 * transferred to/from the disk.  So, when the updated contents of the
11746 * "in-core" inode structure are copied to the corresponding in-memory inode
11747 * block, the dependencies are also transferred.  The following procedure is
11748 * called when copying a dirty "in-core" inode to a cached inode block.
11749 */
11750
11751/*
11752 * Called when an inode is loaded from disk. If the effective link count
11753 * differed from the actual link count when it was last flushed, then we
11754 * need to ensure that the correct effective link count is put back.
11755 */
11756void
11757softdep_load_inodeblock(ip)
11758	struct inode *ip;	/* the "in_core" copy of the inode */
11759{
11760	struct inodedep *inodedep;
11761
11762	/*
11763	 * Check for alternate nlink count.
11764	 */
11765	ip->i_effnlink = ip->i_nlink;
11766	ACQUIRE_LOCK(&lk);
11767	if (inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0,
11768	    &inodedep) == 0) {
11769		FREE_LOCK(&lk);
11770		return;
11771	}
11772	ip->i_effnlink -= inodedep->id_nlinkdelta;
11773	FREE_LOCK(&lk);
11774}
11775
11776/*
11777 * This routine is called just before the "in-core" inode
11778 * information is to be copied to the in-memory inode block.
11779 * Recall that an inode block contains several inodes. If
11780 * the force flag is set, then the dependencies will be
11781 * cleared so that the update can always be made. Note that
11782 * the buffer is locked when this routine is called, so we
11783 * will never be in the middle of writing the inode block
11784 * to disk.
11785 */
11786void
11787softdep_update_inodeblock(ip, bp, waitfor)
11788	struct inode *ip;	/* the "in_core" copy of the inode */
11789	struct buf *bp;		/* the buffer containing the inode block */
11790	int waitfor;		/* nonzero => update must be allowed */
11791{
11792	struct inodedep *inodedep;
11793	struct inoref *inoref;
11794	struct worklist *wk;
11795	struct mount *mp;
11796	struct buf *ibp;
11797	struct fs *fs;
11798	int error;
11799
11800	mp = UFSTOVFS(ip->i_ump);
11801	fs = ip->i_fs;
11802	/*
11803	 * Preserve the freelink that is on disk.  clear_unlinked_inodedep()
11804	 * does not have access to the in-core ip so must write directly into
11805	 * the inode block buffer when setting freelink.
11806	 */
11807	if (fs->fs_magic == FS_UFS1_MAGIC)
11808		DIP_SET(ip, i_freelink, ((struct ufs1_dinode *)bp->b_data +
11809		    ino_to_fsbo(fs, ip->i_number))->di_freelink);
11810	else
11811		DIP_SET(ip, i_freelink, ((struct ufs2_dinode *)bp->b_data +
11812		    ino_to_fsbo(fs, ip->i_number))->di_freelink);
11813	/*
11814	 * If the effective link count is not equal to the actual link
11815	 * count, then we must track the difference in an inodedep while
11816	 * the inode is (potentially) tossed out of the cache. Otherwise,
11817	 * if there is no existing inodedep, then there are no dependencies
11818	 * to track.
11819	 */
11820	ACQUIRE_LOCK(&lk);
11821again:
11822	if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) {
11823		FREE_LOCK(&lk);
11824		if (ip->i_effnlink != ip->i_nlink)
11825			panic("softdep_update_inodeblock: bad link count");
11826		return;
11827	}
11828	if (inodedep->id_nlinkdelta != ip->i_nlink - ip->i_effnlink)
11829		panic("softdep_update_inodeblock: bad delta");
11830	/*
11831	 * If we're flushing all dependencies we must also move any waiting
11832	 * for journal writes onto the bufwait list prior to I/O.
11833	 */
11834	if (waitfor) {
11835		TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
11836			if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
11837			    == DEPCOMPLETE) {
11838				jwait(&inoref->if_list, MNT_WAIT);
11839				goto again;
11840			}
11841		}
11842	}
11843	/*
11844	 * Changes have been initiated. Anything depending on these
11845	 * changes cannot occur until this inode has been written.
11846	 */
11847	inodedep->id_state &= ~COMPLETE;
11848	if ((inodedep->id_state & ONWORKLIST) == 0)
11849		WORKLIST_INSERT(&bp->b_dep, &inodedep->id_list);
11850	/*
11851	 * Any new dependencies associated with the incore inode must
11852	 * now be moved to the list associated with the buffer holding
11853	 * the in-memory copy of the inode. Once merged process any
11854	 * allocdirects that are completed by the merger.
11855	 */
11856	merge_inode_lists(&inodedep->id_newinoupdt, &inodedep->id_inoupdt);
11857	if (!TAILQ_EMPTY(&inodedep->id_inoupdt))
11858		handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_inoupdt),
11859		    NULL);
11860	merge_inode_lists(&inodedep->id_newextupdt, &inodedep->id_extupdt);
11861	if (!TAILQ_EMPTY(&inodedep->id_extupdt))
11862		handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_extupdt),
11863		    NULL);
11864	/*
11865	 * Now that the inode has been pushed into the buffer, the
11866	 * operations dependent on the inode being written to disk
11867	 * can be moved to the id_bufwait so that they will be
11868	 * processed when the buffer I/O completes.
11869	 */
11870	while ((wk = LIST_FIRST(&inodedep->id_inowait)) != NULL) {
11871		WORKLIST_REMOVE(wk);
11872		WORKLIST_INSERT(&inodedep->id_bufwait, wk);
11873	}
11874	/*
11875	 * Newly allocated inodes cannot be written until the bitmap
11876	 * that allocates them have been written (indicated by
11877	 * DEPCOMPLETE being set in id_state). If we are doing a
11878	 * forced sync (e.g., an fsync on a file), we force the bitmap
11879	 * to be written so that the update can be done.
11880	 */
11881	if (waitfor == 0) {
11882		FREE_LOCK(&lk);
11883		return;
11884	}
11885retry:
11886	if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) != 0) {
11887		FREE_LOCK(&lk);
11888		return;
11889	}
11890	ibp = inodedep->id_bmsafemap->sm_buf;
11891	ibp = getdirtybuf(ibp, &lk, MNT_WAIT);
11892	if (ibp == NULL) {
11893		/*
11894		 * If ibp came back as NULL, the dependency could have been
11895		 * freed while we slept.  Look it up again, and check to see
11896		 * that it has completed.
11897		 */
11898		if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0)
11899			goto retry;
11900		FREE_LOCK(&lk);
11901		return;
11902	}
11903	FREE_LOCK(&lk);
11904	if ((error = bwrite(ibp)) != 0)
11905		softdep_error("softdep_update_inodeblock: bwrite", error);
11906}
11907
11908/*
11909 * Merge the a new inode dependency list (such as id_newinoupdt) into an
11910 * old inode dependency list (such as id_inoupdt). This routine must be
11911 * called with splbio interrupts blocked.
11912 */
11913static void
11914merge_inode_lists(newlisthead, oldlisthead)
11915	struct allocdirectlst *newlisthead;
11916	struct allocdirectlst *oldlisthead;
11917{
11918	struct allocdirect *listadp, *newadp;
11919
11920	newadp = TAILQ_FIRST(newlisthead);
11921	for (listadp = TAILQ_FIRST(oldlisthead); listadp && newadp;) {
11922		if (listadp->ad_offset < newadp->ad_offset) {
11923			listadp = TAILQ_NEXT(listadp, ad_next);
11924			continue;
11925		}
11926		TAILQ_REMOVE(newlisthead, newadp, ad_next);
11927		TAILQ_INSERT_BEFORE(listadp, newadp, ad_next);
11928		if (listadp->ad_offset == newadp->ad_offset) {
11929			allocdirect_merge(oldlisthead, newadp,
11930			    listadp);
11931			listadp = newadp;
11932		}
11933		newadp = TAILQ_FIRST(newlisthead);
11934	}
11935	while ((newadp = TAILQ_FIRST(newlisthead)) != NULL) {
11936		TAILQ_REMOVE(newlisthead, newadp, ad_next);
11937		TAILQ_INSERT_TAIL(oldlisthead, newadp, ad_next);
11938	}
11939}
11940
11941/*
11942 * If we are doing an fsync, then we must ensure that any directory
11943 * entries for the inode have been written after the inode gets to disk.
11944 */
11945int
11946softdep_fsync(vp)
11947	struct vnode *vp;	/* the "in_core" copy of the inode */
11948{
11949	struct inodedep *inodedep;
11950	struct pagedep *pagedep;
11951	struct inoref *inoref;
11952	struct worklist *wk;
11953	struct diradd *dap;
11954	struct mount *mp;
11955	struct vnode *pvp;
11956	struct inode *ip;
11957	struct buf *bp;
11958	struct fs *fs;
11959	struct thread *td = curthread;
11960	int error, flushparent, pagedep_new_block;
11961	ino_t parentino;
11962	ufs_lbn_t lbn;
11963
11964	ip = VTOI(vp);
11965	fs = ip->i_fs;
11966	mp = vp->v_mount;
11967	ACQUIRE_LOCK(&lk);
11968restart:
11969	if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) {
11970		FREE_LOCK(&lk);
11971		return (0);
11972	}
11973	TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
11974		if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
11975		    == DEPCOMPLETE) {
11976			jwait(&inoref->if_list, MNT_WAIT);
11977			goto restart;
11978		}
11979	}
11980	if (!LIST_EMPTY(&inodedep->id_inowait) ||
11981	    !TAILQ_EMPTY(&inodedep->id_extupdt) ||
11982	    !TAILQ_EMPTY(&inodedep->id_newextupdt) ||
11983	    !TAILQ_EMPTY(&inodedep->id_inoupdt) ||
11984	    !TAILQ_EMPTY(&inodedep->id_newinoupdt))
11985		panic("softdep_fsync: pending ops %p", inodedep);
11986	for (error = 0, flushparent = 0; ; ) {
11987		if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) == NULL)
11988			break;
11989		if (wk->wk_type != D_DIRADD)
11990			panic("softdep_fsync: Unexpected type %s",
11991			    TYPENAME(wk->wk_type));
11992		dap = WK_DIRADD(wk);
11993		/*
11994		 * Flush our parent if this directory entry has a MKDIR_PARENT
11995		 * dependency or is contained in a newly allocated block.
11996		 */
11997		if (dap->da_state & DIRCHG)
11998			pagedep = dap->da_previous->dm_pagedep;
11999		else
12000			pagedep = dap->da_pagedep;
12001		parentino = pagedep->pd_ino;
12002		lbn = pagedep->pd_lbn;
12003		if ((dap->da_state & (MKDIR_BODY | COMPLETE)) != COMPLETE)
12004			panic("softdep_fsync: dirty");
12005		if ((dap->da_state & MKDIR_PARENT) ||
12006		    (pagedep->pd_state & NEWBLOCK))
12007			flushparent = 1;
12008		else
12009			flushparent = 0;
12010		/*
12011		 * If we are being fsync'ed as part of vgone'ing this vnode,
12012		 * then we will not be able to release and recover the
12013		 * vnode below, so we just have to give up on writing its
12014		 * directory entry out. It will eventually be written, just
12015		 * not now, but then the user was not asking to have it
12016		 * written, so we are not breaking any promises.
12017		 */
12018		if (vp->v_iflag & VI_DOOMED)
12019			break;
12020		/*
12021		 * We prevent deadlock by always fetching inodes from the
12022		 * root, moving down the directory tree. Thus, when fetching
12023		 * our parent directory, we first try to get the lock. If
12024		 * that fails, we must unlock ourselves before requesting
12025		 * the lock on our parent. See the comment in ufs_lookup
12026		 * for details on possible races.
12027		 */
12028		FREE_LOCK(&lk);
12029		if (ffs_vgetf(mp, parentino, LK_NOWAIT | LK_EXCLUSIVE, &pvp,
12030		    FFSV_FORCEINSMQ)) {
12031			error = vfs_busy(mp, MBF_NOWAIT);
12032			if (error != 0) {
12033				vfs_ref(mp);
12034				VOP_UNLOCK(vp, 0);
12035				error = vfs_busy(mp, 0);
12036				vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
12037				vfs_rel(mp);
12038				if (error != 0)
12039					return (ENOENT);
12040				if (vp->v_iflag & VI_DOOMED) {
12041					vfs_unbusy(mp);
12042					return (ENOENT);
12043				}
12044			}
12045			VOP_UNLOCK(vp, 0);
12046			error = ffs_vgetf(mp, parentino, LK_EXCLUSIVE,
12047			    &pvp, FFSV_FORCEINSMQ);
12048			vfs_unbusy(mp);
12049			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
12050			if (vp->v_iflag & VI_DOOMED) {
12051				if (error == 0)
12052					vput(pvp);
12053				error = ENOENT;
12054			}
12055			if (error != 0)
12056				return (error);
12057		}
12058		/*
12059		 * All MKDIR_PARENT dependencies and all the NEWBLOCK pagedeps
12060		 * that are contained in direct blocks will be resolved by
12061		 * doing a ffs_update. Pagedeps contained in indirect blocks
12062		 * may require a complete sync'ing of the directory. So, we
12063		 * try the cheap and fast ffs_update first, and if that fails,
12064		 * then we do the slower ffs_syncvnode of the directory.
12065		 */
12066		if (flushparent) {
12067			int locked;
12068
12069			if ((error = ffs_update(pvp, 1)) != 0) {
12070				vput(pvp);
12071				return (error);
12072			}
12073			ACQUIRE_LOCK(&lk);
12074			locked = 1;
12075			if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0) {
12076				if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) != NULL) {
12077					if (wk->wk_type != D_DIRADD)
12078						panic("softdep_fsync: Unexpected type %s",
12079						      TYPENAME(wk->wk_type));
12080					dap = WK_DIRADD(wk);
12081					if (dap->da_state & DIRCHG)
12082						pagedep = dap->da_previous->dm_pagedep;
12083					else
12084						pagedep = dap->da_pagedep;
12085					pagedep_new_block = pagedep->pd_state & NEWBLOCK;
12086					FREE_LOCK(&lk);
12087					locked = 0;
12088					if (pagedep_new_block && (error =
12089					    ffs_syncvnode(pvp, MNT_WAIT, 0))) {
12090						vput(pvp);
12091						return (error);
12092					}
12093				}
12094			}
12095			if (locked)
12096				FREE_LOCK(&lk);
12097		}
12098		/*
12099		 * Flush directory page containing the inode's name.
12100		 */
12101		error = bread(pvp, lbn, blksize(fs, VTOI(pvp), lbn), td->td_ucred,
12102		    &bp);
12103		if (error == 0)
12104			error = bwrite(bp);
12105		else
12106			brelse(bp);
12107		vput(pvp);
12108		if (error != 0)
12109			return (error);
12110		ACQUIRE_LOCK(&lk);
12111		if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0)
12112			break;
12113	}
12114	FREE_LOCK(&lk);
12115	return (0);
12116}
12117
12118/*
12119 * Flush all the dirty bitmaps associated with the block device
12120 * before flushing the rest of the dirty blocks so as to reduce
12121 * the number of dependencies that will have to be rolled back.
12122 *
12123 * XXX Unused?
12124 */
12125void
12126softdep_fsync_mountdev(vp)
12127	struct vnode *vp;
12128{
12129	struct buf *bp, *nbp;
12130	struct worklist *wk;
12131	struct bufobj *bo;
12132
12133	if (!vn_isdisk(vp, NULL))
12134		panic("softdep_fsync_mountdev: vnode not a disk");
12135	bo = &vp->v_bufobj;
12136restart:
12137	BO_LOCK(bo);
12138	ACQUIRE_LOCK(&lk);
12139	TAILQ_FOREACH_SAFE(bp, &bo->bo_dirty.bv_hd, b_bobufs, nbp) {
12140		/*
12141		 * If it is already scheduled, skip to the next buffer.
12142		 */
12143		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
12144			continue;
12145
12146		if ((bp->b_flags & B_DELWRI) == 0)
12147			panic("softdep_fsync_mountdev: not dirty");
12148		/*
12149		 * We are only interested in bitmaps with outstanding
12150		 * dependencies.
12151		 */
12152		if ((wk = LIST_FIRST(&bp->b_dep)) == NULL ||
12153		    wk->wk_type != D_BMSAFEMAP ||
12154		    (bp->b_vflags & BV_BKGRDINPROG)) {
12155			BUF_UNLOCK(bp);
12156			continue;
12157		}
12158		FREE_LOCK(&lk);
12159		BO_UNLOCK(bo);
12160		bremfree(bp);
12161		(void) bawrite(bp);
12162		goto restart;
12163	}
12164	FREE_LOCK(&lk);
12165	drain_output(vp);
12166	BO_UNLOCK(bo);
12167}
12168
12169/*
12170 * Sync all cylinder groups that were dirty at the time this function is
12171 * called.  Newly dirtied cgs will be inserted before the sentinel.  This
12172 * is used to flush freedep activity that may be holding up writes to a
12173 * indirect block.
12174 */
12175static int
12176sync_cgs(mp, waitfor)
12177	struct mount *mp;
12178	int waitfor;
12179{
12180	struct bmsafemap *bmsafemap;
12181	struct bmsafemap *sentinel;
12182	struct ufsmount *ump;
12183	struct buf *bp;
12184	int error;
12185
12186	sentinel = malloc(sizeof(*sentinel), M_BMSAFEMAP, M_ZERO | M_WAITOK);
12187	sentinel->sm_cg = -1;
12188	ump = VFSTOUFS(mp);
12189	error = 0;
12190	ACQUIRE_LOCK(&lk);
12191	LIST_INSERT_HEAD(&ump->softdep_dirtycg, sentinel, sm_next);
12192	for (bmsafemap = LIST_NEXT(sentinel, sm_next); bmsafemap != NULL;
12193	    bmsafemap = LIST_NEXT(sentinel, sm_next)) {
12194		/* Skip sentinels and cgs with no work to release. */
12195		if (bmsafemap->sm_cg == -1 ||
12196		    (LIST_EMPTY(&bmsafemap->sm_freehd) &&
12197		    LIST_EMPTY(&bmsafemap->sm_freewr))) {
12198			LIST_REMOVE(sentinel, sm_next);
12199			LIST_INSERT_AFTER(bmsafemap, sentinel, sm_next);
12200			continue;
12201		}
12202		/*
12203		 * If we don't get the lock and we're waiting try again, if
12204		 * not move on to the next buf and try to sync it.
12205		 */
12206		bp = getdirtybuf(bmsafemap->sm_buf, &lk, waitfor);
12207		if (bp == NULL && waitfor == MNT_WAIT)
12208			continue;
12209		LIST_REMOVE(sentinel, sm_next);
12210		LIST_INSERT_AFTER(bmsafemap, sentinel, sm_next);
12211		if (bp == NULL)
12212			continue;
12213		FREE_LOCK(&lk);
12214		if (waitfor == MNT_NOWAIT)
12215			bawrite(bp);
12216		else
12217			error = bwrite(bp);
12218		ACQUIRE_LOCK(&lk);
12219		if (error)
12220			break;
12221	}
12222	LIST_REMOVE(sentinel, sm_next);
12223	FREE_LOCK(&lk);
12224	free(sentinel, M_BMSAFEMAP);
12225	return (error);
12226}
12227
12228/*
12229 * This routine is called when we are trying to synchronously flush a
12230 * file. This routine must eliminate any filesystem metadata dependencies
12231 * so that the syncing routine can succeed.
12232 */
12233int
12234softdep_sync_metadata(struct vnode *vp)
12235{
12236	int error;
12237
12238	/*
12239	 * Ensure that any direct block dependencies have been cleared,
12240	 * truncations are started, and inode references are journaled.
12241	 */
12242	ACQUIRE_LOCK(&lk);
12243	/*
12244	 * Write all journal records to prevent rollbacks on devvp.
12245	 */
12246	if (vp->v_type == VCHR)
12247		softdep_flushjournal(vp->v_mount);
12248	error = flush_inodedep_deps(vp, vp->v_mount, VTOI(vp)->i_number);
12249	/*
12250	 * Ensure that all truncates are written so we won't find deps on
12251	 * indirect blocks.
12252	 */
12253	process_truncates(vp);
12254	FREE_LOCK(&lk);
12255
12256	return (error);
12257}
12258
12259/*
12260 * This routine is called when we are attempting to sync a buf with
12261 * dependencies.  If waitfor is MNT_NOWAIT it attempts to schedule any
12262 * other IO it can but returns EBUSY if the buffer is not yet able to
12263 * be written.  Dependencies which will not cause rollbacks will always
12264 * return 0.
12265 */
12266int
12267softdep_sync_buf(struct vnode *vp, struct buf *bp, int waitfor)
12268{
12269	struct indirdep *indirdep;
12270	struct pagedep *pagedep;
12271	struct allocindir *aip;
12272	struct newblk *newblk;
12273	struct buf *nbp;
12274	struct worklist *wk;
12275	int i, error;
12276
12277	/*
12278	 * For VCHR we just don't want to force flush any dependencies that
12279	 * will cause rollbacks.
12280	 */
12281	if (vp->v_type == VCHR) {
12282		if (waitfor == MNT_NOWAIT && softdep_count_dependencies(bp, 0))
12283			return (EBUSY);
12284		return (0);
12285	}
12286	ACQUIRE_LOCK(&lk);
12287	/*
12288	 * As we hold the buffer locked, none of its dependencies
12289	 * will disappear.
12290	 */
12291	error = 0;
12292top:
12293	LIST_FOREACH(wk, &bp->b_dep, wk_list) {
12294		switch (wk->wk_type) {
12295
12296		case D_ALLOCDIRECT:
12297		case D_ALLOCINDIR:
12298			newblk = WK_NEWBLK(wk);
12299			if (newblk->nb_jnewblk != NULL) {
12300				if (waitfor == MNT_NOWAIT) {
12301					error = EBUSY;
12302					goto out_unlock;
12303				}
12304				jwait(&newblk->nb_jnewblk->jn_list, waitfor);
12305				goto top;
12306			}
12307			if (newblk->nb_state & DEPCOMPLETE ||
12308			    waitfor == MNT_NOWAIT)
12309				continue;
12310			nbp = newblk->nb_bmsafemap->sm_buf;
12311			nbp = getdirtybuf(nbp, &lk, waitfor);
12312			if (nbp == NULL)
12313				goto top;
12314			FREE_LOCK(&lk);
12315			if ((error = bwrite(nbp)) != 0)
12316				goto out;
12317			ACQUIRE_LOCK(&lk);
12318			continue;
12319
12320		case D_INDIRDEP:
12321			indirdep = WK_INDIRDEP(wk);
12322			if (waitfor == MNT_NOWAIT) {
12323				if (!TAILQ_EMPTY(&indirdep->ir_trunc) ||
12324				    !LIST_EMPTY(&indirdep->ir_deplisthd)) {
12325					error = EBUSY;
12326					goto out_unlock;
12327				}
12328			}
12329			if (!TAILQ_EMPTY(&indirdep->ir_trunc))
12330				panic("softdep_sync_buf: truncation pending.");
12331		restart:
12332			LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) {
12333				newblk = (struct newblk *)aip;
12334				if (newblk->nb_jnewblk != NULL) {
12335					jwait(&newblk->nb_jnewblk->jn_list,
12336					    waitfor);
12337					goto restart;
12338				}
12339				if (newblk->nb_state & DEPCOMPLETE)
12340					continue;
12341				nbp = newblk->nb_bmsafemap->sm_buf;
12342				nbp = getdirtybuf(nbp, &lk, waitfor);
12343				if (nbp == NULL)
12344					goto restart;
12345				FREE_LOCK(&lk);
12346				if ((error = bwrite(nbp)) != 0)
12347					goto out;
12348				ACQUIRE_LOCK(&lk);
12349				goto restart;
12350			}
12351			continue;
12352
12353		case D_PAGEDEP:
12354			/*
12355			 * Only flush directory entries in synchronous passes.
12356			 */
12357			if (waitfor != MNT_WAIT) {
12358				error = EBUSY;
12359				goto out_unlock;
12360			}
12361			/*
12362			 * While syncing snapshots, we must allow recursive
12363			 * lookups.
12364			 */
12365			BUF_AREC(bp);
12366			/*
12367			 * We are trying to sync a directory that may
12368			 * have dependencies on both its own metadata
12369			 * and/or dependencies on the inodes of any
12370			 * recently allocated files. We walk its diradd
12371			 * lists pushing out the associated inode.
12372			 */
12373			pagedep = WK_PAGEDEP(wk);
12374			for (i = 0; i < DAHASHSZ; i++) {
12375				if (LIST_FIRST(&pagedep->pd_diraddhd[i]) == 0)
12376					continue;
12377				if ((error = flush_pagedep_deps(vp, wk->wk_mp,
12378				    &pagedep->pd_diraddhd[i]))) {
12379					BUF_NOREC(bp);
12380					goto out_unlock;
12381				}
12382			}
12383			BUF_NOREC(bp);
12384			continue;
12385
12386		case D_FREEWORK:
12387		case D_FREEDEP:
12388		case D_JSEGDEP:
12389		case D_JNEWBLK:
12390			continue;
12391
12392		default:
12393			panic("softdep_sync_buf: Unknown type %s",
12394			    TYPENAME(wk->wk_type));
12395			/* NOTREACHED */
12396		}
12397	}
12398out_unlock:
12399	FREE_LOCK(&lk);
12400out:
12401	return (error);
12402}
12403
12404/*
12405 * Flush the dependencies associated with an inodedep.
12406 * Called with splbio blocked.
12407 */
12408static int
12409flush_inodedep_deps(vp, mp, ino)
12410	struct vnode *vp;
12411	struct mount *mp;
12412	ino_t ino;
12413{
12414	struct inodedep *inodedep;
12415	struct inoref *inoref;
12416	int error, waitfor;
12417
12418	/*
12419	 * This work is done in two passes. The first pass grabs most
12420	 * of the buffers and begins asynchronously writing them. The
12421	 * only way to wait for these asynchronous writes is to sleep
12422	 * on the filesystem vnode which may stay busy for a long time
12423	 * if the filesystem is active. So, instead, we make a second
12424	 * pass over the dependencies blocking on each write. In the
12425	 * usual case we will be blocking against a write that we
12426	 * initiated, so when it is done the dependency will have been
12427	 * resolved. Thus the second pass is expected to end quickly.
12428	 * We give a brief window at the top of the loop to allow
12429	 * any pending I/O to complete.
12430	 */
12431	for (error = 0, waitfor = MNT_NOWAIT; ; ) {
12432		if (error)
12433			return (error);
12434		FREE_LOCK(&lk);
12435		ACQUIRE_LOCK(&lk);
12436restart:
12437		if (inodedep_lookup(mp, ino, 0, &inodedep) == 0)
12438			return (0);
12439		TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
12440			if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
12441			    == DEPCOMPLETE) {
12442				jwait(&inoref->if_list, MNT_WAIT);
12443				goto restart;
12444			}
12445		}
12446		if (flush_deplist(&inodedep->id_inoupdt, waitfor, &error) ||
12447		    flush_deplist(&inodedep->id_newinoupdt, waitfor, &error) ||
12448		    flush_deplist(&inodedep->id_extupdt, waitfor, &error) ||
12449		    flush_deplist(&inodedep->id_newextupdt, waitfor, &error))
12450			continue;
12451		/*
12452		 * If pass2, we are done, otherwise do pass 2.
12453		 */
12454		if (waitfor == MNT_WAIT)
12455			break;
12456		waitfor = MNT_WAIT;
12457	}
12458	/*
12459	 * Try freeing inodedep in case all dependencies have been removed.
12460	 */
12461	if (inodedep_lookup(mp, ino, 0, &inodedep) != 0)
12462		(void) free_inodedep(inodedep);
12463	return (0);
12464}
12465
12466/*
12467 * Flush an inode dependency list.
12468 * Called with splbio blocked.
12469 */
12470static int
12471flush_deplist(listhead, waitfor, errorp)
12472	struct allocdirectlst *listhead;
12473	int waitfor;
12474	int *errorp;
12475{
12476	struct allocdirect *adp;
12477	struct newblk *newblk;
12478	struct buf *bp;
12479
12480	rw_assert(&lk, RA_WLOCKED);
12481	TAILQ_FOREACH(adp, listhead, ad_next) {
12482		newblk = (struct newblk *)adp;
12483		if (newblk->nb_jnewblk != NULL) {
12484			jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT);
12485			return (1);
12486		}
12487		if (newblk->nb_state & DEPCOMPLETE)
12488			continue;
12489		bp = newblk->nb_bmsafemap->sm_buf;
12490		bp = getdirtybuf(bp, &lk, waitfor);
12491		if (bp == NULL) {
12492			if (waitfor == MNT_NOWAIT)
12493				continue;
12494			return (1);
12495		}
12496		FREE_LOCK(&lk);
12497		if (waitfor == MNT_NOWAIT)
12498			bawrite(bp);
12499		else
12500			*errorp = bwrite(bp);
12501		ACQUIRE_LOCK(&lk);
12502		return (1);
12503	}
12504	return (0);
12505}
12506
12507/*
12508 * Flush dependencies associated with an allocdirect block.
12509 */
12510static int
12511flush_newblk_dep(vp, mp, lbn)
12512	struct vnode *vp;
12513	struct mount *mp;
12514	ufs_lbn_t lbn;
12515{
12516	struct newblk *newblk;
12517	struct bufobj *bo;
12518	struct inode *ip;
12519	struct buf *bp;
12520	ufs2_daddr_t blkno;
12521	int error;
12522
12523	error = 0;
12524	bo = &vp->v_bufobj;
12525	ip = VTOI(vp);
12526	blkno = DIP(ip, i_db[lbn]);
12527	if (blkno == 0)
12528		panic("flush_newblk_dep: Missing block");
12529	ACQUIRE_LOCK(&lk);
12530	/*
12531	 * Loop until all dependencies related to this block are satisfied.
12532	 * We must be careful to restart after each sleep in case a write
12533	 * completes some part of this process for us.
12534	 */
12535	for (;;) {
12536		if (newblk_lookup(mp, blkno, 0, &newblk) == 0) {
12537			FREE_LOCK(&lk);
12538			break;
12539		}
12540		if (newblk->nb_list.wk_type != D_ALLOCDIRECT)
12541			panic("flush_newblk_deps: Bad newblk %p", newblk);
12542		/*
12543		 * Flush the journal.
12544		 */
12545		if (newblk->nb_jnewblk != NULL) {
12546			jwait(&newblk->nb_jnewblk->jn_list, MNT_WAIT);
12547			continue;
12548		}
12549		/*
12550		 * Write the bitmap dependency.
12551		 */
12552		if ((newblk->nb_state & DEPCOMPLETE) == 0) {
12553			bp = newblk->nb_bmsafemap->sm_buf;
12554			bp = getdirtybuf(bp, &lk, MNT_WAIT);
12555			if (bp == NULL)
12556				continue;
12557			FREE_LOCK(&lk);
12558			error = bwrite(bp);
12559			if (error)
12560				break;
12561			ACQUIRE_LOCK(&lk);
12562			continue;
12563		}
12564		/*
12565		 * Write the buffer.
12566		 */
12567		FREE_LOCK(&lk);
12568		BO_LOCK(bo);
12569		bp = gbincore(bo, lbn);
12570		if (bp != NULL) {
12571			error = BUF_LOCK(bp, LK_EXCLUSIVE | LK_SLEEPFAIL |
12572			    LK_INTERLOCK, BO_LOCKPTR(bo));
12573			if (error == ENOLCK) {
12574				ACQUIRE_LOCK(&lk);
12575				continue; /* Slept, retry */
12576			}
12577			if (error != 0)
12578				break;	/* Failed */
12579			if (bp->b_flags & B_DELWRI) {
12580				bremfree(bp);
12581				error = bwrite(bp);
12582				if (error)
12583					break;
12584			} else
12585				BUF_UNLOCK(bp);
12586		} else
12587			BO_UNLOCK(bo);
12588		/*
12589		 * We have to wait for the direct pointers to
12590		 * point at the newdirblk before the dependency
12591		 * will go away.
12592		 */
12593		error = ffs_update(vp, 1);
12594		if (error)
12595			break;
12596		ACQUIRE_LOCK(&lk);
12597	}
12598	return (error);
12599}
12600
12601/*
12602 * Eliminate a pagedep dependency by flushing out all its diradd dependencies.
12603 * Called with splbio blocked.
12604 */
12605static int
12606flush_pagedep_deps(pvp, mp, diraddhdp)
12607	struct vnode *pvp;
12608	struct mount *mp;
12609	struct diraddhd *diraddhdp;
12610{
12611	struct inodedep *inodedep;
12612	struct inoref *inoref;
12613	struct ufsmount *ump;
12614	struct diradd *dap;
12615	struct vnode *vp;
12616	int error = 0;
12617	struct buf *bp;
12618	ino_t inum;
12619
12620	ump = VFSTOUFS(mp);
12621restart:
12622	while ((dap = LIST_FIRST(diraddhdp)) != NULL) {
12623		/*
12624		 * Flush ourselves if this directory entry
12625		 * has a MKDIR_PARENT dependency.
12626		 */
12627		if (dap->da_state & MKDIR_PARENT) {
12628			FREE_LOCK(&lk);
12629			if ((error = ffs_update(pvp, 1)) != 0)
12630				break;
12631			ACQUIRE_LOCK(&lk);
12632			/*
12633			 * If that cleared dependencies, go on to next.
12634			 */
12635			if (dap != LIST_FIRST(diraddhdp))
12636				continue;
12637			if (dap->da_state & MKDIR_PARENT)
12638				panic("flush_pagedep_deps: MKDIR_PARENT");
12639		}
12640		/*
12641		 * A newly allocated directory must have its "." and
12642		 * ".." entries written out before its name can be
12643		 * committed in its parent.
12644		 */
12645		inum = dap->da_newinum;
12646		if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0)
12647			panic("flush_pagedep_deps: lost inode1");
12648		/*
12649		 * Wait for any pending journal adds to complete so we don't
12650		 * cause rollbacks while syncing.
12651		 */
12652		TAILQ_FOREACH(inoref, &inodedep->id_inoreflst, if_deps) {
12653			if ((inoref->if_state & (DEPCOMPLETE | GOINGAWAY))
12654			    == DEPCOMPLETE) {
12655				jwait(&inoref->if_list, MNT_WAIT);
12656				goto restart;
12657			}
12658		}
12659		if (dap->da_state & MKDIR_BODY) {
12660			FREE_LOCK(&lk);
12661			if ((error = ffs_vgetf(mp, inum, LK_EXCLUSIVE, &vp,
12662			    FFSV_FORCEINSMQ)))
12663				break;
12664			error = flush_newblk_dep(vp, mp, 0);
12665			/*
12666			 * If we still have the dependency we might need to
12667			 * update the vnode to sync the new link count to
12668			 * disk.
12669			 */
12670			if (error == 0 && dap == LIST_FIRST(diraddhdp))
12671				error = ffs_update(vp, 1);
12672			vput(vp);
12673			if (error != 0)
12674				break;
12675			ACQUIRE_LOCK(&lk);
12676			/*
12677			 * If that cleared dependencies, go on to next.
12678			 */
12679			if (dap != LIST_FIRST(diraddhdp))
12680				continue;
12681			if (dap->da_state & MKDIR_BODY) {
12682				inodedep_lookup(UFSTOVFS(ump), inum, 0,
12683				    &inodedep);
12684				panic("flush_pagedep_deps: MKDIR_BODY "
12685				    "inodedep %p dap %p vp %p",
12686				    inodedep, dap, vp);
12687			}
12688		}
12689		/*
12690		 * Flush the inode on which the directory entry depends.
12691		 * Having accounted for MKDIR_PARENT and MKDIR_BODY above,
12692		 * the only remaining dependency is that the updated inode
12693		 * count must get pushed to disk. The inode has already
12694		 * been pushed into its inode buffer (via VOP_UPDATE) at
12695		 * the time of the reference count change. So we need only
12696		 * locate that buffer, ensure that there will be no rollback
12697		 * caused by a bitmap dependency, then write the inode buffer.
12698		 */
12699retry:
12700		if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0)
12701			panic("flush_pagedep_deps: lost inode");
12702		/*
12703		 * If the inode still has bitmap dependencies,
12704		 * push them to disk.
12705		 */
12706		if ((inodedep->id_state & (DEPCOMPLETE | GOINGAWAY)) == 0) {
12707			bp = inodedep->id_bmsafemap->sm_buf;
12708			bp = getdirtybuf(bp, &lk, MNT_WAIT);
12709			if (bp == NULL)
12710				goto retry;
12711			FREE_LOCK(&lk);
12712			if ((error = bwrite(bp)) != 0)
12713				break;
12714			ACQUIRE_LOCK(&lk);
12715			if (dap != LIST_FIRST(diraddhdp))
12716				continue;
12717		}
12718		/*
12719		 * If the inode is still sitting in a buffer waiting
12720		 * to be written or waiting for the link count to be
12721		 * adjusted update it here to flush it to disk.
12722		 */
12723		if (dap == LIST_FIRST(diraddhdp)) {
12724			FREE_LOCK(&lk);
12725			if ((error = ffs_vgetf(mp, inum, LK_EXCLUSIVE, &vp,
12726			    FFSV_FORCEINSMQ)))
12727				break;
12728			error = ffs_update(vp, 1);
12729			vput(vp);
12730			if (error)
12731				break;
12732			ACQUIRE_LOCK(&lk);
12733		}
12734		/*
12735		 * If we have failed to get rid of all the dependencies
12736		 * then something is seriously wrong.
12737		 */
12738		if (dap == LIST_FIRST(diraddhdp)) {
12739			inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep);
12740			panic("flush_pagedep_deps: failed to flush "
12741			    "inodedep %p ino %ju dap %p",
12742			    inodedep, (uintmax_t)inum, dap);
12743		}
12744	}
12745	if (error)
12746		ACQUIRE_LOCK(&lk);
12747	return (error);
12748}
12749
12750/*
12751 * A large burst of file addition or deletion activity can drive the
12752 * memory load excessively high. First attempt to slow things down
12753 * using the techniques below. If that fails, this routine requests
12754 * the offending operations to fall back to running synchronously
12755 * until the memory load returns to a reasonable level.
12756 */
12757int
12758softdep_slowdown(vp)
12759	struct vnode *vp;
12760{
12761	struct ufsmount *ump;
12762	int jlow;
12763	int max_softdeps_hard;
12764
12765	ACQUIRE_LOCK(&lk);
12766	jlow = 0;
12767	/*
12768	 * Check for journal space if needed.
12769	 */
12770	if (DOINGSUJ(vp)) {
12771		ump = VFSTOUFS(vp->v_mount);
12772		if (journal_space(ump, 0) == 0)
12773			jlow = 1;
12774	}
12775	max_softdeps_hard = max_softdeps * 11 / 10;
12776	if (dep_current[D_DIRREM] < max_softdeps_hard / 2 &&
12777	    dep_current[D_INODEDEP] < max_softdeps_hard &&
12778	    VFSTOUFS(vp->v_mount)->um_numindirdeps < maxindirdeps &&
12779	    dep_current[D_FREEBLKS] < max_softdeps_hard && jlow == 0) {
12780		FREE_LOCK(&lk);
12781  		return (0);
12782	}
12783	if (VFSTOUFS(vp->v_mount)->um_numindirdeps >= maxindirdeps || jlow)
12784		softdep_speedup();
12785	stat_sync_limit_hit += 1;
12786	FREE_LOCK(&lk);
12787	if (DOINGSUJ(vp))
12788		return (0);
12789	return (1);
12790}
12791
12792/*
12793 * Called by the allocation routines when they are about to fail
12794 * in the hope that we can free up the requested resource (inodes
12795 * or disk space).
12796 *
12797 * First check to see if the work list has anything on it. If it has,
12798 * clean up entries until we successfully free the requested resource.
12799 * Because this process holds inodes locked, we cannot handle any remove
12800 * requests that might block on a locked inode as that could lead to
12801 * deadlock. If the worklist yields none of the requested resource,
12802 * start syncing out vnodes to free up the needed space.
12803 */
12804int
12805softdep_request_cleanup(fs, vp, cred, resource)
12806	struct fs *fs;
12807	struct vnode *vp;
12808	struct ucred *cred;
12809	int resource;
12810{
12811	struct ufsmount *ump;
12812	struct mount *mp;
12813	struct vnode *lvp, *mvp;
12814	long starttime;
12815	ufs2_daddr_t needed;
12816	int error;
12817
12818	/*
12819	 * If we are being called because of a process doing a
12820	 * copy-on-write, then it is not safe to process any
12821	 * worklist items as we will recurse into the copyonwrite
12822	 * routine.  This will result in an incoherent snapshot.
12823	 * If the vnode that we hold is a snapshot, we must avoid
12824	 * handling other resources that could cause deadlock.
12825	 */
12826	if ((curthread->td_pflags & TDP_COWINPROGRESS) || IS_SNAPSHOT(VTOI(vp)))
12827		return (0);
12828
12829	if (resource == FLUSH_BLOCKS_WAIT)
12830		stat_cleanup_blkrequests += 1;
12831	else
12832		stat_cleanup_inorequests += 1;
12833
12834	mp = vp->v_mount;
12835	ump = VFSTOUFS(mp);
12836	mtx_assert(UFS_MTX(ump), MA_OWNED);
12837	UFS_UNLOCK(ump);
12838	error = ffs_update(vp, 1);
12839	if (error != 0) {
12840		UFS_LOCK(ump);
12841		return (0);
12842	}
12843	/*
12844	 * If we are in need of resources, consider pausing for
12845	 * tickdelay to give ourselves some breathing room.
12846	 */
12847	ACQUIRE_LOCK(&lk);
12848	process_removes(vp);
12849	process_truncates(vp);
12850	request_cleanup(UFSTOVFS(ump), resource);
12851	FREE_LOCK(&lk);
12852	/*
12853	 * Now clean up at least as many resources as we will need.
12854	 *
12855	 * When requested to clean up inodes, the number that are needed
12856	 * is set by the number of simultaneous writers (mnt_writeopcount)
12857	 * plus a bit of slop (2) in case some more writers show up while
12858	 * we are cleaning.
12859	 *
12860	 * When requested to free up space, the amount of space that
12861	 * we need is enough blocks to allocate a full-sized segment
12862	 * (fs_contigsumsize). The number of such segments that will
12863	 * be needed is set by the number of simultaneous writers
12864	 * (mnt_writeopcount) plus a bit of slop (2) in case some more
12865	 * writers show up while we are cleaning.
12866	 *
12867	 * Additionally, if we are unpriviledged and allocating space,
12868	 * we need to ensure that we clean up enough blocks to get the
12869	 * needed number of blocks over the threshhold of the minimum
12870	 * number of blocks required to be kept free by the filesystem
12871	 * (fs_minfree).
12872	 */
12873	if (resource == FLUSH_INODES_WAIT) {
12874		needed = vp->v_mount->mnt_writeopcount + 2;
12875	} else if (resource == FLUSH_BLOCKS_WAIT) {
12876		needed = (vp->v_mount->mnt_writeopcount + 2) *
12877		    fs->fs_contigsumsize;
12878		if (priv_check_cred(cred, PRIV_VFS_BLOCKRESERVE, 0))
12879			needed += fragstoblks(fs,
12880			    roundup((fs->fs_dsize * fs->fs_minfree / 100) -
12881			    fs->fs_cstotal.cs_nffree, fs->fs_frag));
12882	} else {
12883		UFS_LOCK(ump);
12884		printf("softdep_request_cleanup: Unknown resource type %d\n",
12885		    resource);
12886		return (0);
12887	}
12888	starttime = time_second;
12889retry:
12890	if ((resource == FLUSH_BLOCKS_WAIT && ump->softdep_on_worklist > 0 &&
12891	    fs->fs_cstotal.cs_nbfree <= needed) ||
12892	    (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 &&
12893	    fs->fs_cstotal.cs_nifree <= needed)) {
12894		ACQUIRE_LOCK(&lk);
12895		if (ump->softdep_on_worklist > 0 &&
12896		    process_worklist_item(UFSTOVFS(ump),
12897		    ump->softdep_on_worklist, LK_NOWAIT) != 0)
12898			stat_worklist_push += 1;
12899		FREE_LOCK(&lk);
12900	}
12901	/*
12902	 * If we still need resources and there are no more worklist
12903	 * entries to process to obtain them, we have to start flushing
12904	 * the dirty vnodes to force the release of additional requests
12905	 * to the worklist that we can then process to reap addition
12906	 * resources. We walk the vnodes associated with the mount point
12907	 * until we get the needed worklist requests that we can reap.
12908	 */
12909	if ((resource == FLUSH_BLOCKS_WAIT &&
12910	     fs->fs_cstotal.cs_nbfree <= needed) ||
12911	    (resource == FLUSH_INODES_WAIT && fs->fs_pendinginodes > 0 &&
12912	     fs->fs_cstotal.cs_nifree <= needed)) {
12913		MNT_VNODE_FOREACH_ALL(lvp, mp, mvp) {
12914			if (TAILQ_FIRST(&lvp->v_bufobj.bo_dirty.bv_hd) == 0) {
12915				VI_UNLOCK(lvp);
12916				continue;
12917			}
12918			if (vget(lvp, LK_EXCLUSIVE | LK_INTERLOCK | LK_NOWAIT,
12919			    curthread))
12920				continue;
12921			if (lvp->v_vflag & VV_NOSYNC) {	/* unlinked */
12922				vput(lvp);
12923				continue;
12924			}
12925			(void) ffs_syncvnode(lvp, MNT_NOWAIT, 0);
12926			vput(lvp);
12927		}
12928		lvp = ump->um_devvp;
12929		if (vn_lock(lvp, LK_EXCLUSIVE | LK_NOWAIT) == 0) {
12930			VOP_FSYNC(lvp, MNT_NOWAIT, curthread);
12931			VOP_UNLOCK(lvp, 0);
12932		}
12933		if (ump->softdep_on_worklist > 0) {
12934			stat_cleanup_retries += 1;
12935			goto retry;
12936		}
12937		stat_cleanup_failures += 1;
12938	}
12939	if (time_second - starttime > stat_cleanup_high_delay)
12940		stat_cleanup_high_delay = time_second - starttime;
12941	UFS_LOCK(ump);
12942	return (1);
12943}
12944
12945/*
12946 * If memory utilization has gotten too high, deliberately slow things
12947 * down and speed up the I/O processing.
12948 */
12949extern struct thread *syncertd;
12950static int
12951request_cleanup(mp, resource)
12952	struct mount *mp;
12953	int resource;
12954{
12955	struct thread *td = curthread;
12956	struct ufsmount *ump;
12957
12958	rw_assert(&lk, RA_WLOCKED);
12959	/*
12960	 * We never hold up the filesystem syncer or buf daemon.
12961	 */
12962	if (td->td_pflags & (TDP_SOFTDEP|TDP_NORUNNINGBUF))
12963		return (0);
12964	ump = VFSTOUFS(mp);
12965	/*
12966	 * First check to see if the work list has gotten backlogged.
12967	 * If it has, co-opt this process to help clean up two entries.
12968	 * Because this process may hold inodes locked, we cannot
12969	 * handle any remove requests that might block on a locked
12970	 * inode as that could lead to deadlock.  We set TDP_SOFTDEP
12971	 * to avoid recursively processing the worklist.
12972	 */
12973	if (ump->softdep_on_worklist > max_softdeps / 10) {
12974		td->td_pflags |= TDP_SOFTDEP;
12975		process_worklist_item(mp, 2, LK_NOWAIT);
12976		td->td_pflags &= ~TDP_SOFTDEP;
12977		stat_worklist_push += 2;
12978		return(1);
12979	}
12980	/*
12981	 * Next, we attempt to speed up the syncer process. If that
12982	 * is successful, then we allow the process to continue.
12983	 */
12984	if (softdep_speedup() &&
12985	    resource != FLUSH_BLOCKS_WAIT &&
12986	    resource != FLUSH_INODES_WAIT)
12987		return(0);
12988	/*
12989	 * If we are resource constrained on inode dependencies, try
12990	 * flushing some dirty inodes. Otherwise, we are constrained
12991	 * by file deletions, so try accelerating flushes of directories
12992	 * with removal dependencies. We would like to do the cleanup
12993	 * here, but we probably hold an inode locked at this point and
12994	 * that might deadlock against one that we try to clean. So,
12995	 * the best that we can do is request the syncer daemon to do
12996	 * the cleanup for us.
12997	 */
12998	switch (resource) {
12999
13000	case FLUSH_INODES:
13001	case FLUSH_INODES_WAIT:
13002		stat_ino_limit_push += 1;
13003		req_clear_inodedeps += 1;
13004		stat_countp = &stat_ino_limit_hit;
13005		break;
13006
13007	case FLUSH_BLOCKS:
13008	case FLUSH_BLOCKS_WAIT:
13009		stat_blk_limit_push += 1;
13010		req_clear_remove += 1;
13011		stat_countp = &stat_blk_limit_hit;
13012		break;
13013
13014	default:
13015		panic("request_cleanup: unknown type");
13016	}
13017	/*
13018	 * Hopefully the syncer daemon will catch up and awaken us.
13019	 * We wait at most tickdelay before proceeding in any case.
13020	 */
13021	proc_waiting += 1;
13022	if (callout_pending(&softdep_callout) == FALSE)
13023		callout_reset(&softdep_callout, tickdelay > 2 ? tickdelay : 2,
13024		    pause_timer, 0);
13025
13026	msleep((caddr_t)&proc_waiting, &lk, PPAUSE, "softupdate", 0);
13027	proc_waiting -= 1;
13028	return (1);
13029}
13030
13031/*
13032 * Awaken processes pausing in request_cleanup and clear proc_waiting
13033 * to indicate that there is no longer a timer running.
13034 */
13035static void
13036pause_timer(arg)
13037	void *arg;
13038{
13039
13040	/*
13041	 * The callout_ API has acquired mtx and will hold it around this
13042	 * function call.
13043	 */
13044	*stat_countp += 1;
13045	wakeup_one(&proc_waiting);
13046	if (proc_waiting > 0)
13047		callout_reset(&softdep_callout, tickdelay > 2 ? tickdelay : 2,
13048		    pause_timer, 0);
13049}
13050
13051/*
13052 * Flush out a directory with at least one removal dependency in an effort to
13053 * reduce the number of dirrem, freefile, and freeblks dependency structures.
13054 */
13055static void
13056clear_remove(void)
13057{
13058	struct pagedep_hashhead *pagedephd;
13059	struct pagedep *pagedep;
13060	static int next = 0;
13061	struct mount *mp;
13062	struct vnode *vp;
13063	struct bufobj *bo;
13064	int error, cnt;
13065	ino_t ino;
13066
13067	rw_assert(&lk, RA_WLOCKED);
13068
13069	for (cnt = 0; cnt <= pagedep_hash; cnt++) {
13070		pagedephd = &pagedep_hashtbl[next++];
13071		if (next > pagedep_hash)
13072			next = 0;
13073		LIST_FOREACH(pagedep, pagedephd, pd_hash) {
13074			if (LIST_EMPTY(&pagedep->pd_dirremhd))
13075				continue;
13076			mp = pagedep->pd_list.wk_mp;
13077			ino = pagedep->pd_ino;
13078			if (vn_start_write(NULL, &mp, V_NOWAIT) != 0)
13079				continue;
13080			FREE_LOCK(&lk);
13081
13082			/*
13083			 * Let unmount clear deps
13084			 */
13085			error = vfs_busy(mp, MBF_NOWAIT);
13086			if (error != 0)
13087				goto finish_write;
13088			error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp,
13089			     FFSV_FORCEINSMQ);
13090			vfs_unbusy(mp);
13091			if (error != 0) {
13092				softdep_error("clear_remove: vget", error);
13093				goto finish_write;
13094			}
13095			if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0)))
13096				softdep_error("clear_remove: fsync", error);
13097			bo = &vp->v_bufobj;
13098			BO_LOCK(bo);
13099			drain_output(vp);
13100			BO_UNLOCK(bo);
13101			vput(vp);
13102		finish_write:
13103			vn_finished_write(mp);
13104			ACQUIRE_LOCK(&lk);
13105			return;
13106		}
13107	}
13108}
13109
13110/*
13111 * Clear out a block of dirty inodes in an effort to reduce
13112 * the number of inodedep dependency structures.
13113 */
13114static void
13115clear_inodedeps(void)
13116{
13117	struct inodedep_hashhead *inodedephd;
13118	struct inodedep *inodedep;
13119	static int next = 0;
13120	struct mount *mp;
13121	struct vnode *vp;
13122	struct fs *fs;
13123	int error, cnt;
13124	ino_t firstino, lastino, ino;
13125
13126	rw_assert(&lk, RA_WLOCKED);
13127	/*
13128	 * Pick a random inode dependency to be cleared.
13129	 * We will then gather up all the inodes in its block
13130	 * that have dependencies and flush them out.
13131	 */
13132	for (cnt = 0; cnt <= inodedep_hash; cnt++) {
13133		inodedephd = &inodedep_hashtbl[next++];
13134		if (next > inodedep_hash)
13135			next = 0;
13136		if ((inodedep = LIST_FIRST(inodedephd)) != NULL)
13137			break;
13138	}
13139	if (inodedep == NULL)
13140		return;
13141	fs = inodedep->id_fs;
13142	mp = inodedep->id_list.wk_mp;
13143	/*
13144	 * Find the last inode in the block with dependencies.
13145	 */
13146	firstino = inodedep->id_ino & ~(INOPB(fs) - 1);
13147	for (lastino = firstino + INOPB(fs) - 1; lastino > firstino; lastino--)
13148		if (inodedep_lookup(mp, lastino, 0, &inodedep) != 0)
13149			break;
13150	/*
13151	 * Asynchronously push all but the last inode with dependencies.
13152	 * Synchronously push the last inode with dependencies to ensure
13153	 * that the inode block gets written to free up the inodedeps.
13154	 */
13155	for (ino = firstino; ino <= lastino; ino++) {
13156		if (inodedep_lookup(mp, ino, 0, &inodedep) == 0)
13157			continue;
13158		if (vn_start_write(NULL, &mp, V_NOWAIT) != 0)
13159			continue;
13160		FREE_LOCK(&lk);
13161		error = vfs_busy(mp, MBF_NOWAIT); /* Let unmount clear deps */
13162		if (error != 0) {
13163			vn_finished_write(mp);
13164			ACQUIRE_LOCK(&lk);
13165			return;
13166		}
13167		if ((error = ffs_vgetf(mp, ino, LK_EXCLUSIVE, &vp,
13168		    FFSV_FORCEINSMQ)) != 0) {
13169			softdep_error("clear_inodedeps: vget", error);
13170			vfs_unbusy(mp);
13171			vn_finished_write(mp);
13172			ACQUIRE_LOCK(&lk);
13173			return;
13174		}
13175		vfs_unbusy(mp);
13176		if (ino == lastino) {
13177			if ((error = ffs_syncvnode(vp, MNT_WAIT, 0)))
13178				softdep_error("clear_inodedeps: fsync1", error);
13179		} else {
13180			if ((error = ffs_syncvnode(vp, MNT_NOWAIT, 0)))
13181				softdep_error("clear_inodedeps: fsync2", error);
13182			BO_LOCK(&vp->v_bufobj);
13183			drain_output(vp);
13184			BO_UNLOCK(&vp->v_bufobj);
13185		}
13186		vput(vp);
13187		vn_finished_write(mp);
13188		ACQUIRE_LOCK(&lk);
13189	}
13190}
13191
13192void
13193softdep_buf_append(bp, wkhd)
13194	struct buf *bp;
13195	struct workhead *wkhd;
13196{
13197	struct worklist *wk;
13198
13199	ACQUIRE_LOCK(&lk);
13200	while ((wk = LIST_FIRST(wkhd)) != NULL) {
13201		WORKLIST_REMOVE(wk);
13202		WORKLIST_INSERT(&bp->b_dep, wk);
13203	}
13204	FREE_LOCK(&lk);
13205
13206}
13207
13208void
13209softdep_inode_append(ip, cred, wkhd)
13210	struct inode *ip;
13211	struct ucred *cred;
13212	struct workhead *wkhd;
13213{
13214	struct buf *bp;
13215	struct fs *fs;
13216	int error;
13217
13218	fs = ip->i_fs;
13219	error = bread(ip->i_devvp, fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
13220	    (int)fs->fs_bsize, cred, &bp);
13221	if (error) {
13222		bqrelse(bp);
13223		softdep_freework(wkhd);
13224		return;
13225	}
13226	softdep_buf_append(bp, wkhd);
13227	bqrelse(bp);
13228}
13229
13230void
13231softdep_freework(wkhd)
13232	struct workhead *wkhd;
13233{
13234
13235	ACQUIRE_LOCK(&lk);
13236	handle_jwork(wkhd);
13237	FREE_LOCK(&lk);
13238}
13239
13240/*
13241 * Function to determine if the buffer has outstanding dependencies
13242 * that will cause a roll-back if the buffer is written. If wantcount
13243 * is set, return number of dependencies, otherwise just yes or no.
13244 */
13245static int
13246softdep_count_dependencies(bp, wantcount)
13247	struct buf *bp;
13248	int wantcount;
13249{
13250	struct worklist *wk;
13251	struct bmsafemap *bmsafemap;
13252	struct freework *freework;
13253	struct inodedep *inodedep;
13254	struct indirdep *indirdep;
13255	struct freeblks *freeblks;
13256	struct allocindir *aip;
13257	struct pagedep *pagedep;
13258	struct dirrem *dirrem;
13259	struct newblk *newblk;
13260	struct mkdir *mkdir;
13261	struct diradd *dap;
13262	int i, retval;
13263
13264	retval = 0;
13265	ACQUIRE_LOCK(&lk);
13266	LIST_FOREACH(wk, &bp->b_dep, wk_list) {
13267		switch (wk->wk_type) {
13268
13269		case D_INODEDEP:
13270			inodedep = WK_INODEDEP(wk);
13271			if ((inodedep->id_state & DEPCOMPLETE) == 0) {
13272				/* bitmap allocation dependency */
13273				retval += 1;
13274				if (!wantcount)
13275					goto out;
13276			}
13277			if (TAILQ_FIRST(&inodedep->id_inoupdt)) {
13278				/* direct block pointer dependency */
13279				retval += 1;
13280				if (!wantcount)
13281					goto out;
13282			}
13283			if (TAILQ_FIRST(&inodedep->id_extupdt)) {
13284				/* direct block pointer dependency */
13285				retval += 1;
13286				if (!wantcount)
13287					goto out;
13288			}
13289			if (TAILQ_FIRST(&inodedep->id_inoreflst)) {
13290				/* Add reference dependency. */
13291				retval += 1;
13292				if (!wantcount)
13293					goto out;
13294			}
13295			continue;
13296
13297		case D_INDIRDEP:
13298			indirdep = WK_INDIRDEP(wk);
13299
13300			TAILQ_FOREACH(freework, &indirdep->ir_trunc, fw_next) {
13301				/* indirect truncation dependency */
13302				retval += 1;
13303				if (!wantcount)
13304					goto out;
13305			}
13306
13307			LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) {
13308				/* indirect block pointer dependency */
13309				retval += 1;
13310				if (!wantcount)
13311					goto out;
13312			}
13313			continue;
13314
13315		case D_PAGEDEP:
13316			pagedep = WK_PAGEDEP(wk);
13317			LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) {
13318				if (LIST_FIRST(&dirrem->dm_jremrefhd)) {
13319					/* Journal remove ref dependency. */
13320					retval += 1;
13321					if (!wantcount)
13322						goto out;
13323				}
13324			}
13325			for (i = 0; i < DAHASHSZ; i++) {
13326
13327				LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) {
13328					/* directory entry dependency */
13329					retval += 1;
13330					if (!wantcount)
13331						goto out;
13332				}
13333			}
13334			continue;
13335
13336		case D_BMSAFEMAP:
13337			bmsafemap = WK_BMSAFEMAP(wk);
13338			if (LIST_FIRST(&bmsafemap->sm_jaddrefhd)) {
13339				/* Add reference dependency. */
13340				retval += 1;
13341				if (!wantcount)
13342					goto out;
13343			}
13344			if (LIST_FIRST(&bmsafemap->sm_jnewblkhd)) {
13345				/* Allocate block dependency. */
13346				retval += 1;
13347				if (!wantcount)
13348					goto out;
13349			}
13350			continue;
13351
13352		case D_FREEBLKS:
13353			freeblks = WK_FREEBLKS(wk);
13354			if (LIST_FIRST(&freeblks->fb_jblkdephd)) {
13355				/* Freeblk journal dependency. */
13356				retval += 1;
13357				if (!wantcount)
13358					goto out;
13359			}
13360			continue;
13361
13362		case D_ALLOCDIRECT:
13363		case D_ALLOCINDIR:
13364			newblk = WK_NEWBLK(wk);
13365			if (newblk->nb_jnewblk) {
13366				/* Journal allocate dependency. */
13367				retval += 1;
13368				if (!wantcount)
13369					goto out;
13370			}
13371			continue;
13372
13373		case D_MKDIR:
13374			mkdir = WK_MKDIR(wk);
13375			if (mkdir->md_jaddref) {
13376				/* Journal reference dependency. */
13377				retval += 1;
13378				if (!wantcount)
13379					goto out;
13380			}
13381			continue;
13382
13383		case D_FREEWORK:
13384		case D_FREEDEP:
13385		case D_JSEGDEP:
13386		case D_JSEG:
13387		case D_SBDEP:
13388			/* never a dependency on these blocks */
13389			continue;
13390
13391		default:
13392			panic("softdep_count_dependencies: Unexpected type %s",
13393			    TYPENAME(wk->wk_type));
13394			/* NOTREACHED */
13395		}
13396	}
13397out:
13398	FREE_LOCK(&lk);
13399	return retval;
13400}
13401
13402/*
13403 * Acquire exclusive access to a buffer.
13404 * Must be called with a locked mtx parameter.
13405 * Return acquired buffer or NULL on failure.
13406 */
13407static struct buf *
13408getdirtybuf(bp, lock, waitfor)
13409	struct buf *bp;
13410	struct rwlock *lock;
13411	int waitfor;
13412{
13413	int error;
13414
13415	rw_assert(lock, RA_WLOCKED);
13416	if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) {
13417		if (waitfor != MNT_WAIT)
13418			return (NULL);
13419		error = BUF_LOCK(bp,
13420		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, lock);
13421		/*
13422		 * Even if we sucessfully acquire bp here, we have dropped
13423		 * lock, which may violates our guarantee.
13424		 */
13425		if (error == 0)
13426			BUF_UNLOCK(bp);
13427		else if (error != ENOLCK)
13428			panic("getdirtybuf: inconsistent lock: %d", error);
13429		rw_wlock(lock);
13430		return (NULL);
13431	}
13432	if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
13433		if (lock == &lk && waitfor == MNT_WAIT) {
13434			rw_wunlock(lock);
13435			BO_LOCK(bp->b_bufobj);
13436			BUF_UNLOCK(bp);
13437			if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
13438				bp->b_vflags |= BV_BKGRDWAIT;
13439				msleep(&bp->b_xflags, BO_LOCKPTR(bp->b_bufobj),
13440				       PRIBIO | PDROP, "getbuf", 0);
13441			} else
13442				BO_UNLOCK(bp->b_bufobj);
13443			rw_wlock(lock);
13444			return (NULL);
13445		}
13446		BUF_UNLOCK(bp);
13447		if (waitfor != MNT_WAIT)
13448			return (NULL);
13449		/*
13450		 * The lock argument must be bp->b_vp's mutex in
13451		 * this case.
13452		 */
13453#ifdef	DEBUG_VFS_LOCKS
13454		if (bp->b_vp->v_type != VCHR)
13455			ASSERT_BO_WLOCKED(bp->b_bufobj);
13456#endif
13457		bp->b_vflags |= BV_BKGRDWAIT;
13458		rw_sleep(&bp->b_xflags, lock, PRIBIO, "getbuf", 0);
13459		return (NULL);
13460	}
13461	if ((bp->b_flags & B_DELWRI) == 0) {
13462		BUF_UNLOCK(bp);
13463		return (NULL);
13464	}
13465	bremfree(bp);
13466	return (bp);
13467}
13468
13469
13470/*
13471 * Check if it is safe to suspend the file system now.  On entry,
13472 * the vnode interlock for devvp should be held.  Return 0 with
13473 * the mount interlock held if the file system can be suspended now,
13474 * otherwise return EAGAIN with the mount interlock held.
13475 */
13476int
13477softdep_check_suspend(struct mount *mp,
13478		      struct vnode *devvp,
13479		      int softdep_deps,
13480		      int softdep_accdeps,
13481		      int secondary_writes,
13482		      int secondary_accwrites)
13483{
13484	struct bufobj *bo;
13485	struct ufsmount *ump;
13486	int error;
13487
13488	ump = VFSTOUFS(mp);
13489	bo = &devvp->v_bufobj;
13490	ASSERT_BO_WLOCKED(bo);
13491
13492	for (;;) {
13493		if (!TRY_ACQUIRE_LOCK(&lk)) {
13494			BO_UNLOCK(bo);
13495			ACQUIRE_LOCK(&lk);
13496			FREE_LOCK(&lk);
13497			BO_LOCK(bo);
13498			continue;
13499		}
13500		MNT_ILOCK(mp);
13501		if (mp->mnt_secondary_writes != 0) {
13502			FREE_LOCK(&lk);
13503			BO_UNLOCK(bo);
13504			msleep(&mp->mnt_secondary_writes,
13505			       MNT_MTX(mp),
13506			       (PUSER - 1) | PDROP, "secwr", 0);
13507			BO_LOCK(bo);
13508			continue;
13509		}
13510		break;
13511	}
13512
13513	/*
13514	 * Reasons for needing more work before suspend:
13515	 * - Dirty buffers on devvp.
13516	 * - Softdep activity occurred after start of vnode sync loop
13517	 * - Secondary writes occurred after start of vnode sync loop
13518	 */
13519	error = 0;
13520	if (bo->bo_numoutput > 0 ||
13521	    bo->bo_dirty.bv_cnt > 0 ||
13522	    softdep_deps != 0 ||
13523	    ump->softdep_deps != 0 ||
13524	    softdep_accdeps != ump->softdep_accdeps ||
13525	    secondary_writes != 0 ||
13526	    mp->mnt_secondary_writes != 0 ||
13527	    secondary_accwrites != mp->mnt_secondary_accwrites)
13528		error = EAGAIN;
13529	FREE_LOCK(&lk);
13530	BO_UNLOCK(bo);
13531	return (error);
13532}
13533
13534
13535/*
13536 * Get the number of dependency structures for the file system, both
13537 * the current number and the total number allocated.  These will
13538 * later be used to detect that softdep processing has occurred.
13539 */
13540void
13541softdep_get_depcounts(struct mount *mp,
13542		      int *softdep_depsp,
13543		      int *softdep_accdepsp)
13544{
13545	struct ufsmount *ump;
13546
13547	ump = VFSTOUFS(mp);
13548	ACQUIRE_LOCK(&lk);
13549	*softdep_depsp = ump->softdep_deps;
13550	*softdep_accdepsp = ump->softdep_accdeps;
13551	FREE_LOCK(&lk);
13552}
13553
13554/*
13555 * Wait for pending output on a vnode to complete.
13556 * Must be called with vnode lock and interlock locked.
13557 *
13558 * XXX: Should just be a call to bufobj_wwait().
13559 */
13560static void
13561drain_output(vp)
13562	struct vnode *vp;
13563{
13564	struct bufobj *bo;
13565
13566	bo = &vp->v_bufobj;
13567	ASSERT_VOP_LOCKED(vp, "drain_output");
13568	ASSERT_BO_WLOCKED(bo);
13569
13570	while (bo->bo_numoutput) {
13571		bo->bo_flag |= BO_WWAIT;
13572		msleep((caddr_t)&bo->bo_numoutput,
13573		    BO_LOCKPTR(bo), PRIBIO + 1, "drainvp", 0);
13574	}
13575}
13576
13577/*
13578 * Called whenever a buffer that is being invalidated or reallocated
13579 * contains dependencies. This should only happen if an I/O error has
13580 * occurred. The routine is called with the buffer locked.
13581 */
13582static void
13583softdep_deallocate_dependencies(bp)
13584	struct buf *bp;
13585{
13586
13587	if ((bp->b_ioflags & BIO_ERROR) == 0)
13588		panic("softdep_deallocate_dependencies: dangling deps");
13589	if (bp->b_vp != NULL && bp->b_vp->v_mount != NULL)
13590		softdep_error(bp->b_vp->v_mount->mnt_stat.f_mntonname, bp->b_error);
13591	else
13592		printf("softdep_deallocate_dependencies: "
13593		    "got error %d while accessing filesystem\n", bp->b_error);
13594	if (bp->b_error != ENXIO)
13595		panic("softdep_deallocate_dependencies: unrecovered I/O error");
13596}
13597
13598/*
13599 * Function to handle asynchronous write errors in the filesystem.
13600 */
13601static void
13602softdep_error(func, error)
13603	char *func;
13604	int error;
13605{
13606
13607	/* XXX should do something better! */
13608	printf("%s: got error %d while accessing filesystem\n", func, error);
13609}
13610
13611#ifdef DDB
13612
13613static void
13614inodedep_print(struct inodedep *inodedep, int verbose)
13615{
13616	db_printf("%p fs %p st %x ino %jd inoblk %jd delta %d nlink %d"
13617	    " saveino %p\n",
13618	    inodedep, inodedep->id_fs, inodedep->id_state,
13619	    (intmax_t)inodedep->id_ino,
13620	    (intmax_t)fsbtodb(inodedep->id_fs,
13621	    ino_to_fsba(inodedep->id_fs, inodedep->id_ino)),
13622	    inodedep->id_nlinkdelta, inodedep->id_savednlink,
13623	    inodedep->id_savedino1);
13624
13625	if (verbose == 0)
13626		return;
13627
13628	db_printf("\tpendinghd %p, bufwait %p, inowait %p, inoreflst %p, "
13629	    "mkdiradd %p\n",
13630	    LIST_FIRST(&inodedep->id_pendinghd),
13631	    LIST_FIRST(&inodedep->id_bufwait),
13632	    LIST_FIRST(&inodedep->id_inowait),
13633	    TAILQ_FIRST(&inodedep->id_inoreflst),
13634	    inodedep->id_mkdiradd);
13635	db_printf("\tinoupdt %p, newinoupdt %p, extupdt %p, newextupdt %p\n",
13636	    TAILQ_FIRST(&inodedep->id_inoupdt),
13637	    TAILQ_FIRST(&inodedep->id_newinoupdt),
13638	    TAILQ_FIRST(&inodedep->id_extupdt),
13639	    TAILQ_FIRST(&inodedep->id_newextupdt));
13640}
13641
13642DB_SHOW_COMMAND(inodedep, db_show_inodedep)
13643{
13644
13645	if (have_addr == 0) {
13646		db_printf("Address required\n");
13647		return;
13648	}
13649	inodedep_print((struct inodedep*)addr, 1);
13650}
13651
13652DB_SHOW_COMMAND(inodedeps, db_show_inodedeps)
13653{
13654	struct inodedep_hashhead *inodedephd;
13655	struct inodedep *inodedep;
13656	struct fs *fs;
13657	int cnt;
13658
13659	fs = have_addr ? (struct fs *)addr : NULL;
13660	for (cnt = 0; cnt < inodedep_hash; cnt++) {
13661		inodedephd = &inodedep_hashtbl[cnt];
13662		LIST_FOREACH(inodedep, inodedephd, id_hash) {
13663			if (fs != NULL && fs != inodedep->id_fs)
13664				continue;
13665			inodedep_print(inodedep, 0);
13666		}
13667	}
13668}
13669
13670DB_SHOW_COMMAND(worklist, db_show_worklist)
13671{
13672	struct worklist *wk;
13673
13674	if (have_addr == 0) {
13675		db_printf("Address required\n");
13676		return;
13677	}
13678	wk = (struct worklist *)addr;
13679	printf("worklist: %p type %s state 0x%X\n",
13680	    wk, TYPENAME(wk->wk_type), wk->wk_state);
13681}
13682
13683DB_SHOW_COMMAND(workhead, db_show_workhead)
13684{
13685	struct workhead *wkhd;
13686	struct worklist *wk;
13687	int i;
13688
13689	if (have_addr == 0) {
13690		db_printf("Address required\n");
13691		return;
13692	}
13693	wkhd = (struct workhead *)addr;
13694	wk = LIST_FIRST(wkhd);
13695	for (i = 0; i < 100 && wk != NULL; i++, wk = LIST_NEXT(wk, wk_list))
13696		db_printf("worklist: %p type %s state 0x%X",
13697		    wk, TYPENAME(wk->wk_type), wk->wk_state);
13698	if (i == 100)
13699		db_printf("workhead overflow");
13700	printf("\n");
13701}
13702
13703
13704DB_SHOW_COMMAND(mkdirs, db_show_mkdirs)
13705{
13706	struct jaddref *jaddref;
13707	struct diradd *diradd;
13708	struct mkdir *mkdir;
13709
13710	LIST_FOREACH(mkdir, &mkdirlisthd, md_mkdirs) {
13711		diradd = mkdir->md_diradd;
13712		db_printf("mkdir: %p state 0x%X dap %p state 0x%X",
13713		    mkdir, mkdir->md_state, diradd, diradd->da_state);
13714		if ((jaddref = mkdir->md_jaddref) != NULL)
13715			db_printf(" jaddref %p jaddref state 0x%X",
13716			    jaddref, jaddref->ja_state);
13717		db_printf("\n");
13718	}
13719}
13720
13721#endif /* DDB */
13722
13723#endif /* SOFTUPDATES */
13724