ffs_softdep.c revision 162653
1258945Sroberto/*-
2258945Sroberto * Copyright 1998, 2000 Marshall Kirk McKusick. All Rights Reserved.
3258945Sroberto *
4258945Sroberto * The soft updates code is derived from the appendix of a University
5258945Sroberto * of Michigan technical report (Gregory R. Ganger and Yale N. Patt,
6258945Sroberto * "Soft Updates: A Solution to the Metadata Update Problem in File
7258945Sroberto * Systems", CSE-TR-254-95, August 1995).
8258945Sroberto *
9258945Sroberto * Further information about soft updates can be obtained from:
10258945Sroberto *
11258945Sroberto *	Marshall Kirk McKusick		http://www.mckusick.com/softdep/
12258945Sroberto *	1614 Oxford Street		mckusick@mckusick.com
13258945Sroberto *	Berkeley, CA 94709-1608		+1-510-843-9542
14258945Sroberto *	USA
15258945Sroberto *
16258945Sroberto * Redistribution and use in source and binary forms, with or without
17258945Sroberto * modification, are permitted provided that the following conditions
18258945Sroberto * are met:
19258945Sroberto *
20258945Sroberto * 1. Redistributions of source code must retain the above copyright
21258945Sroberto *    notice, this list of conditions and the following disclaimer.
22258945Sroberto * 2. Redistributions in binary form must reproduce the above copyright
23258945Sroberto *    notice, this list of conditions and the following disclaimer in the
24258945Sroberto *    documentation and/or other materials provided with the distribution.
25258945Sroberto *
26258945Sroberto * THIS SOFTWARE IS PROVIDED BY MARSHALL KIRK MCKUSICK ``AS IS'' AND ANY
27258945Sroberto * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
28258945Sroberto * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
29258945Sroberto * DISCLAIMED.  IN NO EVENT SHALL MARSHALL KIRK MCKUSICK BE LIABLE FOR
30258945Sroberto * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31258945Sroberto * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32258945Sroberto * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33258945Sroberto * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34258945Sroberto * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35258945Sroberto * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
36258945Sroberto * SUCH DAMAGE.
37258945Sroberto *
38258945Sroberto *	from: @(#)ffs_softdep.c	9.59 (McKusick) 6/21/00
39258945Sroberto */
40258945Sroberto
41258945Sroberto#include <sys/cdefs.h>
42258945Sroberto__FBSDID("$FreeBSD: head/sys/ufs/ffs/ffs_softdep.c 162653 2006-09-26 04:20:09Z tegge $");
43258945Sroberto
44258945Sroberto/*
45258945Sroberto * For now we want the safety net that the DIAGNOSTIC and DEBUG flags provide.
46258945Sroberto */
47258945Sroberto#ifndef DIAGNOSTIC
48258945Sroberto#define DIAGNOSTIC
49258945Sroberto#endif
50258945Sroberto#ifndef DEBUG
51258945Sroberto#define DEBUG
52258945Sroberto#endif
53258945Sroberto
54258945Sroberto#include <sys/param.h>
55258945Sroberto#include <sys/kernel.h>
56258945Sroberto#include <sys/systm.h>
57258945Sroberto#include <sys/bio.h>
58258945Sroberto#include <sys/buf.h>
59258945Sroberto#include <sys/kdb.h>
60258945Sroberto#include <sys/kthread.h>
61258945Sroberto#include <sys/lock.h>
62258945Sroberto#include <sys/malloc.h>
63258945Sroberto#include <sys/mount.h>
64258945Sroberto#include <sys/mutex.h>
65258945Sroberto#include <sys/proc.h>
66258945Sroberto#include <sys/stat.h>
67258945Sroberto#include <sys/sysctl.h>
68258945Sroberto#include <sys/syslog.h>
69258945Sroberto#include <sys/vnode.h>
70258945Sroberto#include <sys/conf.h>
71258945Sroberto#include <ufs/ufs/dir.h>
72258945Sroberto#include <ufs/ufs/extattr.h>
73258945Sroberto#include <ufs/ufs/quota.h>
74258945Sroberto#include <ufs/ufs/inode.h>
75258945Sroberto#include <ufs/ufs/ufsmount.h>
76258945Sroberto#include <ufs/ffs/fs.h>
77258945Sroberto#include <ufs/ffs/softdep.h>
78258945Sroberto#include <ufs/ffs/ffs_extern.h>
79258945Sroberto#include <ufs/ufs/ufs_extern.h>
80258945Sroberto
81258945Sroberto#include <vm/vm.h>
82258945Sroberto
83258945Sroberto#include "opt_ffs.h"
84258945Sroberto
85258945Sroberto#ifndef SOFTUPDATES
86258945Sroberto
87258945Srobertoint
88258945Srobertosoftdep_flushfiles(oldmnt, flags, td)
89258945Sroberto	struct mount *oldmnt;
90258945Sroberto	int flags;
91258945Sroberto	struct thread *td;
92258945Sroberto{
93258945Sroberto
94258945Sroberto	panic("softdep_flushfiles called");
95258945Sroberto}
96258945Sroberto
97258945Srobertoint
98258945Srobertosoftdep_mount(devvp, mp, fs, cred)
99258945Sroberto	struct vnode *devvp;
100258945Sroberto	struct mount *mp;
101258945Sroberto	struct fs *fs;
102258945Sroberto	struct ucred *cred;
103258945Sroberto{
104258945Sroberto
105258945Sroberto	return (0);
106258945Sroberto}
107258945Sroberto
108258945Srobertovoid
109258945Srobertosoftdep_initialize()
110258945Sroberto{
111258945Sroberto
112258945Sroberto	return;
113258945Sroberto}
114258945Sroberto
115258945Srobertovoid
116258945Srobertosoftdep_uninitialize()
117258945Sroberto{
118258945Sroberto
119258945Sroberto	return;
120258945Sroberto}
121258945Sroberto
122258945Srobertovoid
123258945Srobertosoftdep_setup_inomapdep(bp, ip, newinum)
124258945Sroberto	struct buf *bp;
125258945Sroberto	struct inode *ip;
126258945Sroberto	ino_t newinum;
127258945Sroberto{
128258945Sroberto
129258945Sroberto	panic("softdep_setup_inomapdep called");
130258945Sroberto}
131258945Sroberto
132258945Srobertovoid
133258945Srobertosoftdep_setup_blkmapdep(bp, mp, newblkno)
134258945Sroberto	struct buf *bp;
135258945Sroberto	struct mount *mp;
136258945Sroberto	ufs2_daddr_t newblkno;
137258945Sroberto{
138258945Sroberto
139258945Sroberto	panic("softdep_setup_blkmapdep called");
140258945Sroberto}
141258945Sroberto
142258945Srobertovoid
143258945Srobertosoftdep_setup_allocdirect(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp)
144258945Sroberto	struct inode *ip;
145258945Sroberto	ufs_lbn_t lbn;
146258945Sroberto	ufs2_daddr_t newblkno;
147258945Sroberto	ufs2_daddr_t oldblkno;
148258945Sroberto	long newsize;
149258945Sroberto	long oldsize;
150258945Sroberto	struct buf *bp;
151258945Sroberto{
152258945Sroberto
153258945Sroberto	panic("softdep_setup_allocdirect called");
154258945Sroberto}
155258945Sroberto
156258945Srobertovoid
157258945Srobertosoftdep_setup_allocext(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp)
158258945Sroberto	struct inode *ip;
159258945Sroberto	ufs_lbn_t lbn;
160258945Sroberto	ufs2_daddr_t newblkno;
161258945Sroberto	ufs2_daddr_t oldblkno;
162280849Scy	long newsize;
163280849Scy	long oldsize;
164280849Scy	struct buf *bp;
165280849Scy{
166258945Sroberto
167258945Sroberto	panic("softdep_setup_allocext called");
168258945Sroberto}
169258945Sroberto
170258945Srobertovoid
171258945Srobertosoftdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp)
172258945Sroberto	struct inode *ip;
173258945Sroberto	ufs_lbn_t lbn;
174258945Sroberto	struct buf *bp;
175258945Sroberto	int ptrno;
176258945Sroberto	ufs2_daddr_t newblkno;
177258945Sroberto	ufs2_daddr_t oldblkno;
178258945Sroberto	struct buf *nbp;
179258945Sroberto{
180258945Sroberto
181258945Sroberto	panic("softdep_setup_allocindir_page called");
182258945Sroberto}
183258945Sroberto
184258945Srobertovoid
185258945Srobertosoftdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno)
186258945Sroberto	struct buf *nbp;
187258945Sroberto	struct inode *ip;
188258945Sroberto	struct buf *bp;
189258945Sroberto	int ptrno;
190258945Sroberto	ufs2_daddr_t newblkno;
191258945Sroberto{
192258945Sroberto
193258945Sroberto	panic("softdep_setup_allocindir_meta called");
194258945Sroberto}
195258945Sroberto
196258945Srobertovoid
197258945Srobertosoftdep_setup_freeblocks(ip, length, flags)
198258945Sroberto	struct inode *ip;
199258945Sroberto	off_t length;
200258945Sroberto	int flags;
201258945Sroberto{
202258945Sroberto
203258945Sroberto	panic("softdep_setup_freeblocks called");
204258945Sroberto}
205258945Sroberto
206258945Srobertovoid
207258945Srobertosoftdep_freefile(pvp, ino, mode)
208258945Sroberto		struct vnode *pvp;
209258945Sroberto		ino_t ino;
210258945Sroberto		int mode;
211258945Sroberto{
212258945Sroberto
213258945Sroberto	panic("softdep_freefile called");
214258945Sroberto}
215258945Sroberto
216258945Srobertoint
217258945Srobertosoftdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk)
218258945Sroberto	struct buf *bp;
219258945Sroberto	struct inode *dp;
220258945Sroberto	off_t diroffset;
221258945Sroberto	ino_t newinum;
222258945Sroberto	struct buf *newdirbp;
223258945Sroberto	int isnewblk;
224258945Sroberto{
225258945Sroberto
226258945Sroberto	panic("softdep_setup_directory_add called");
227258945Sroberto}
228258945Sroberto
229258945Srobertovoid
230258945Srobertosoftdep_change_directoryentry_offset(dp, base, oldloc, newloc, entrysize)
231258945Sroberto	struct inode *dp;
232258945Sroberto	caddr_t base;
233258945Sroberto	caddr_t oldloc;
234258945Sroberto	caddr_t newloc;
235258945Sroberto	int entrysize;
236258945Sroberto{
237258945Sroberto
238258945Sroberto	panic("softdep_change_directoryentry_offset called");
239258945Sroberto}
240258945Sroberto
241258945Srobertovoid
242258945Srobertosoftdep_setup_remove(bp, dp, ip, isrmdir)
243258945Sroberto	struct buf *bp;
244258945Sroberto	struct inode *dp;
245258945Sroberto	struct inode *ip;
246258945Sroberto	int isrmdir;
247258945Sroberto{
248258945Sroberto
249258945Sroberto	panic("softdep_setup_remove called");
250258945Sroberto}
251258945Sroberto
252258945Srobertovoid
253258945Srobertosoftdep_setup_directory_change(bp, dp, ip, newinum, isrmdir)
254258945Sroberto	struct buf *bp;
255258945Sroberto	struct inode *dp;
256258945Sroberto	struct inode *ip;
257258945Sroberto	ino_t newinum;
258258945Sroberto	int isrmdir;
259258945Sroberto{
260258945Sroberto
261258945Sroberto	panic("softdep_setup_directory_change called");
262258945Sroberto}
263258945Sroberto
264258945Srobertovoid
265258945Srobertosoftdep_change_linkcnt(ip)
266258945Sroberto	struct inode *ip;
267258945Sroberto{
268258945Sroberto
269258945Sroberto	panic("softdep_change_linkcnt called");
270258945Sroberto}
271258945Sroberto
272258945Srobertovoid
273258945Srobertosoftdep_load_inodeblock(ip)
274258945Sroberto	struct inode *ip;
275258945Sroberto{
276258945Sroberto
277258945Sroberto	panic("softdep_load_inodeblock called");
278258945Sroberto}
279280849Scy
280280849Scyvoid
281280849Scysoftdep_update_inodeblock(ip, bp, waitfor)
282280849Scy	struct inode *ip;
283280849Scy	struct buf *bp;
284258945Sroberto	int waitfor;
285258945Sroberto{
286258945Sroberto
287258945Sroberto	panic("softdep_update_inodeblock called");
288258945Sroberto}
289258945Sroberto
290258945Srobertoint
291258945Srobertosoftdep_fsync(vp)
292258945Sroberto	struct vnode *vp;	/* the "in_core" copy of the inode */
293258945Sroberto{
294258945Sroberto
295258945Sroberto	return (0);
296258945Sroberto}
297258945Sroberto
298258945Srobertovoid
299258945Srobertosoftdep_fsync_mountdev(vp)
300258945Sroberto	struct vnode *vp;
301258945Sroberto{
302258945Sroberto
303258945Sroberto	return;
304258945Sroberto}
305258945Sroberto
306258945Srobertoint
307258945Srobertosoftdep_flushworklist(oldmnt, countp, td)
308258945Sroberto	struct mount *oldmnt;
309258945Sroberto	int *countp;
310258945Sroberto	struct thread *td;
311258945Sroberto{
312258945Sroberto
313258945Sroberto	*countp = 0;
314258945Sroberto	return (0);
315258945Sroberto}
316258945Sroberto
317258945Srobertoint
318258945Srobertosoftdep_sync_metadata(struct vnode *vp)
319258945Sroberto{
320258945Sroberto
321258945Sroberto	return (0);
322}
323
324int
325softdep_slowdown(vp)
326	struct vnode *vp;
327{
328
329	panic("softdep_slowdown called");
330}
331
332void
333softdep_releasefile(ip)
334	struct inode *ip;	/* inode with the zero effective link count */
335{
336
337	panic("softdep_releasefile called");
338}
339
340int
341softdep_request_cleanup(fs, vp)
342	struct fs *fs;
343	struct vnode *vp;
344{
345
346	return (0);
347}
348
349int
350softdep_check_suspend(struct mount *mp,
351		      struct vnode *devvp,
352		      int softdep_deps,
353		      int softdep_accdeps,
354		      int secondary_writes,
355		      int secondary_accwrites)
356{
357	struct bufobj *bo;
358	int error;
359
360	(void) softdep_deps,
361	(void) softdep_accdeps;
362
363	ASSERT_VI_LOCKED(devvp, "softdep_check_suspend");
364	bo = &devvp->v_bufobj;
365
366	for (;;) {
367		if (!MNT_ITRYLOCK(mp)) {
368			VI_UNLOCK(devvp);
369			MNT_ILOCK(mp);
370			MNT_IUNLOCK(mp);
371			VI_LOCK(devvp);
372			continue;
373		}
374		if (mp->mnt_secondary_writes != 0) {
375			VI_UNLOCK(devvp);
376			msleep(&mp->mnt_secondary_writes,
377			       MNT_MTX(mp),
378			       (PUSER - 1) | PDROP, "secwr", 0);
379			VI_LOCK(devvp);
380			continue;
381		}
382		break;
383	}
384
385	/*
386	 * Reasons for needing more work before suspend:
387	 * - Dirty buffers on devvp.
388	 * - Secondary writes occurred after start of vnode sync loop
389	 */
390	error = 0;
391	if (bo->bo_numoutput > 0 ||
392	    bo->bo_dirty.bv_cnt > 0 ||
393	    secondary_writes != 0 ||
394	    mp->mnt_secondary_writes != 0 ||
395	    secondary_accwrites != mp->mnt_secondary_accwrites)
396		error = EAGAIN;
397	VI_UNLOCK(devvp);
398	return (error);
399}
400
401void
402softdep_get_depcounts(struct mount *mp,
403		      int *softdepactivep,
404		      int *softdepactiveaccp)
405{
406	(void) mp;
407	*softdepactivep = 0;
408	*softdepactiveaccp = 0;
409}
410
411#else
412/*
413 * These definitions need to be adapted to the system to which
414 * this file is being ported.
415 */
416/*
417 * malloc types defined for the softdep system.
418 */
419static MALLOC_DEFINE(M_PAGEDEP, "pagedep","File page dependencies");
420static MALLOC_DEFINE(M_INODEDEP, "inodedep","Inode dependencies");
421static MALLOC_DEFINE(M_NEWBLK, "newblk","New block allocation");
422static MALLOC_DEFINE(M_BMSAFEMAP, "bmsafemap","Block or frag allocated from cyl group map");
423static MALLOC_DEFINE(M_ALLOCDIRECT, "allocdirect","Block or frag dependency for an inode");
424static MALLOC_DEFINE(M_INDIRDEP, "indirdep","Indirect block dependencies");
425static MALLOC_DEFINE(M_ALLOCINDIR, "allocindir","Block dependency for an indirect block");
426static MALLOC_DEFINE(M_FREEFRAG, "freefrag","Previously used frag for an inode");
427static MALLOC_DEFINE(M_FREEBLKS, "freeblks","Blocks freed from an inode");
428static MALLOC_DEFINE(M_FREEFILE, "freefile","Inode deallocated");
429static MALLOC_DEFINE(M_DIRADD, "diradd","New directory entry");
430static MALLOC_DEFINE(M_MKDIR, "mkdir","New directory");
431static MALLOC_DEFINE(M_DIRREM, "dirrem","Directory entry deleted");
432static MALLOC_DEFINE(M_NEWDIRBLK, "newdirblk","Unclaimed new directory block");
433static MALLOC_DEFINE(M_SAVEDINO, "savedino","Saved inodes");
434
435#define M_SOFTDEP_FLAGS	(M_WAITOK | M_USE_RESERVE)
436
437#define	D_PAGEDEP	0
438#define	D_INODEDEP	1
439#define	D_NEWBLK	2
440#define	D_BMSAFEMAP	3
441#define	D_ALLOCDIRECT	4
442#define	D_INDIRDEP	5
443#define	D_ALLOCINDIR	6
444#define	D_FREEFRAG	7
445#define	D_FREEBLKS	8
446#define	D_FREEFILE	9
447#define	D_DIRADD	10
448#define	D_MKDIR		11
449#define	D_DIRREM	12
450#define	D_NEWDIRBLK	13
451#define	D_LAST		D_NEWDIRBLK
452
453/*
454 * translate from workitem type to memory type
455 * MUST match the defines above, such that memtype[D_XXX] == M_XXX
456 */
457static struct malloc_type *memtype[] = {
458	M_PAGEDEP,
459	M_INODEDEP,
460	M_NEWBLK,
461	M_BMSAFEMAP,
462	M_ALLOCDIRECT,
463	M_INDIRDEP,
464	M_ALLOCINDIR,
465	M_FREEFRAG,
466	M_FREEBLKS,
467	M_FREEFILE,
468	M_DIRADD,
469	M_MKDIR,
470	M_DIRREM,
471	M_NEWDIRBLK
472};
473
474#define DtoM(type) (memtype[type])
475
476/*
477 * Names of malloc types.
478 */
479#define TYPENAME(type)  \
480	((unsigned)(type) < D_LAST ? memtype[type]->ks_shortdesc : "???")
481/*
482 * End system adaptation definitions.
483 */
484
485/*
486 * Forward declarations.
487 */
488struct inodedep_hashhead;
489struct newblk_hashhead;
490struct pagedep_hashhead;
491
492/*
493 * Internal function prototypes.
494 */
495static	void softdep_error(char *, int);
496static	void drain_output(struct vnode *);
497static	struct buf *getdirtybuf(struct buf *, struct mtx *, int);
498static	void clear_remove(struct thread *);
499static	void clear_inodedeps(struct thread *);
500static	int flush_pagedep_deps(struct vnode *, struct mount *,
501	    struct diraddhd *);
502static	int flush_inodedep_deps(struct mount *, ino_t);
503static	int flush_deplist(struct allocdirectlst *, int, int *);
504static	int handle_written_filepage(struct pagedep *, struct buf *);
505static  void diradd_inode_written(struct diradd *, struct inodedep *);
506static	int handle_written_inodeblock(struct inodedep *, struct buf *);
507static	void handle_allocdirect_partdone(struct allocdirect *);
508static	void handle_allocindir_partdone(struct allocindir *);
509static	void initiate_write_filepage(struct pagedep *, struct buf *);
510static	void handle_written_mkdir(struct mkdir *, int);
511static	void initiate_write_inodeblock_ufs1(struct inodedep *, struct buf *);
512static	void initiate_write_inodeblock_ufs2(struct inodedep *, struct buf *);
513static	void handle_workitem_freefile(struct freefile *);
514static	void handle_workitem_remove(struct dirrem *, struct vnode *);
515static	struct dirrem *newdirrem(struct buf *, struct inode *,
516	    struct inode *, int, struct dirrem **);
517static	void free_diradd(struct diradd *);
518static	void free_allocindir(struct allocindir *, struct inodedep *);
519static	void free_newdirblk(struct newdirblk *);
520static	int indir_trunc(struct freeblks *, ufs2_daddr_t, int, ufs_lbn_t,
521	    ufs2_daddr_t *);
522static	void deallocate_dependencies(struct buf *, struct inodedep *);
523static	void free_allocdirect(struct allocdirectlst *,
524	    struct allocdirect *, int);
525static	int check_inode_unwritten(struct inodedep *);
526static	int free_inodedep(struct inodedep *);
527static	void handle_workitem_freeblocks(struct freeblks *, int);
528static	void merge_inode_lists(struct allocdirectlst *,struct allocdirectlst *);
529static	void setup_allocindir_phase2(struct buf *, struct inode *,
530	    struct allocindir *);
531static	struct allocindir *newallocindir(struct inode *, int, ufs2_daddr_t,
532	    ufs2_daddr_t);
533static	void handle_workitem_freefrag(struct freefrag *);
534static	struct freefrag *newfreefrag(struct inode *, ufs2_daddr_t, long);
535static	void allocdirect_merge(struct allocdirectlst *,
536	    struct allocdirect *, struct allocdirect *);
537static	struct bmsafemap *bmsafemap_lookup(struct mount *, struct buf *);
538static	int newblk_find(struct newblk_hashhead *, struct fs *, ufs2_daddr_t,
539	    struct newblk **);
540static	int newblk_lookup(struct fs *, ufs2_daddr_t, int, struct newblk **);
541static	int inodedep_find(struct inodedep_hashhead *, struct fs *, ino_t,
542	    struct inodedep **);
543static	int inodedep_lookup(struct mount *, ino_t, int, struct inodedep **);
544static	int pagedep_lookup(struct inode *, ufs_lbn_t, int, struct pagedep **);
545static	int pagedep_find(struct pagedep_hashhead *, ino_t, ufs_lbn_t,
546	    struct mount *mp, int, struct pagedep **);
547static	void pause_timer(void *);
548static	int request_cleanup(struct mount *, int);
549static	int process_worklist_item(struct mount *, int);
550static	void add_to_worklist(struct worklist *);
551static	void softdep_flush(void);
552static	int softdep_speedup(void);
553
554/*
555 * Exported softdep operations.
556 */
557static	void softdep_disk_io_initiation(struct buf *);
558static	void softdep_disk_write_complete(struct buf *);
559static	void softdep_deallocate_dependencies(struct buf *);
560static	int softdep_count_dependencies(struct buf *bp, int);
561
562static struct mtx lk;
563MTX_SYSINIT(softdep_lock, &lk, "Softdep Lock", MTX_DEF);
564
565#define TRY_ACQUIRE_LOCK(lk)		mtx_trylock(lk)
566#define ACQUIRE_LOCK(lk)		mtx_lock(lk)
567#define FREE_LOCK(lk)			mtx_unlock(lk)
568
569/*
570 * Worklist queue management.
571 * These routines require that the lock be held.
572 */
573#ifndef /* NOT */ DEBUG
574#define WORKLIST_INSERT(head, item) do {	\
575	(item)->wk_state |= ONWORKLIST;		\
576	LIST_INSERT_HEAD(head, item, wk_list);	\
577} while (0)
578#define WORKLIST_REMOVE(item) do {		\
579	(item)->wk_state &= ~ONWORKLIST;	\
580	LIST_REMOVE(item, wk_list);		\
581} while (0)
582#else /* DEBUG */
583static	void worklist_insert(struct workhead *, struct worklist *);
584static	void worklist_remove(struct worklist *);
585
586#define WORKLIST_INSERT(head, item) worklist_insert(head, item)
587#define WORKLIST_REMOVE(item) worklist_remove(item)
588
589static void
590worklist_insert(head, item)
591	struct workhead *head;
592	struct worklist *item;
593{
594
595	mtx_assert(&lk, MA_OWNED);
596	if (item->wk_state & ONWORKLIST)
597		panic("worklist_insert: already on list");
598	item->wk_state |= ONWORKLIST;
599	LIST_INSERT_HEAD(head, item, wk_list);
600}
601
602static void
603worklist_remove(item)
604	struct worklist *item;
605{
606
607	mtx_assert(&lk, MA_OWNED);
608	if ((item->wk_state & ONWORKLIST) == 0)
609		panic("worklist_remove: not on list");
610	item->wk_state &= ~ONWORKLIST;
611	LIST_REMOVE(item, wk_list);
612}
613#endif /* DEBUG */
614
615/*
616 * Routines for tracking and managing workitems.
617 */
618static	void workitem_free(struct worklist *, int);
619static	void workitem_alloc(struct worklist *, int, struct mount *);
620
621#define	WORKITEM_FREE(item, type) workitem_free((struct worklist *)(item), (type))
622
623static void
624workitem_free(item, type)
625	struct worklist *item;
626	int type;
627{
628	struct ufsmount *ump;
629	mtx_assert(&lk, MA_OWNED);
630
631#ifdef DEBUG
632	if (item->wk_state & ONWORKLIST)
633		panic("workitem_free: still on list");
634	if (item->wk_type != type)
635		panic("workitem_free: type mismatch");
636#endif
637	ump = VFSTOUFS(item->wk_mp);
638	if (--ump->softdep_deps == 0 && ump->softdep_req)
639		wakeup(&ump->softdep_deps);
640	FREE(item, DtoM(type));
641}
642
643static void
644workitem_alloc(item, type, mp)
645	struct worklist *item;
646	int type;
647	struct mount *mp;
648{
649	item->wk_type = type;
650	item->wk_mp = mp;
651	item->wk_state = 0;
652	ACQUIRE_LOCK(&lk);
653	VFSTOUFS(mp)->softdep_deps++;
654	VFSTOUFS(mp)->softdep_accdeps++;
655	FREE_LOCK(&lk);
656}
657
658/*
659 * Workitem queue management
660 */
661static int max_softdeps;	/* maximum number of structs before slowdown */
662static int maxindirdeps = 50;	/* max number of indirdeps before slowdown */
663static int tickdelay = 2;	/* number of ticks to pause during slowdown */
664static int proc_waiting;	/* tracks whether we have a timeout posted */
665static int *stat_countp;	/* statistic to count in proc_waiting timeout */
666static struct callout_handle handle; /* handle on posted proc_waiting timeout */
667static int req_pending;
668static int req_clear_inodedeps;	/* syncer process flush some inodedeps */
669#define FLUSH_INODES		1
670static int req_clear_remove;	/* syncer process flush some freeblks */
671#define FLUSH_REMOVE		2
672#define FLUSH_REMOVE_WAIT	3
673/*
674 * runtime statistics
675 */
676static int stat_worklist_push;	/* number of worklist cleanups */
677static int stat_blk_limit_push;	/* number of times block limit neared */
678static int stat_ino_limit_push;	/* number of times inode limit neared */
679static int stat_blk_limit_hit;	/* number of times block slowdown imposed */
680static int stat_ino_limit_hit;	/* number of times inode slowdown imposed */
681static int stat_sync_limit_hit;	/* number of synchronous slowdowns imposed */
682static int stat_indir_blk_ptrs;	/* bufs redirtied as indir ptrs not written */
683static int stat_inode_bitmap;	/* bufs redirtied as inode bitmap not written */
684static int stat_direct_blk_ptrs;/* bufs redirtied as direct ptrs not written */
685static int stat_dir_entry;	/* bufs redirtied as dir entry cannot write */
686
687SYSCTL_INT(_debug, OID_AUTO, max_softdeps, CTLFLAG_RW, &max_softdeps, 0, "");
688SYSCTL_INT(_debug, OID_AUTO, tickdelay, CTLFLAG_RW, &tickdelay, 0, "");
689SYSCTL_INT(_debug, OID_AUTO, maxindirdeps, CTLFLAG_RW, &maxindirdeps, 0, "");
690SYSCTL_INT(_debug, OID_AUTO, worklist_push, CTLFLAG_RW, &stat_worklist_push, 0,"");
691SYSCTL_INT(_debug, OID_AUTO, blk_limit_push, CTLFLAG_RW, &stat_blk_limit_push, 0,"");
692SYSCTL_INT(_debug, OID_AUTO, ino_limit_push, CTLFLAG_RW, &stat_ino_limit_push, 0,"");
693SYSCTL_INT(_debug, OID_AUTO, blk_limit_hit, CTLFLAG_RW, &stat_blk_limit_hit, 0, "");
694SYSCTL_INT(_debug, OID_AUTO, ino_limit_hit, CTLFLAG_RW, &stat_ino_limit_hit, 0, "");
695SYSCTL_INT(_debug, OID_AUTO, sync_limit_hit, CTLFLAG_RW, &stat_sync_limit_hit, 0, "");
696SYSCTL_INT(_debug, OID_AUTO, indir_blk_ptrs, CTLFLAG_RW, &stat_indir_blk_ptrs, 0, "");
697SYSCTL_INT(_debug, OID_AUTO, inode_bitmap, CTLFLAG_RW, &stat_inode_bitmap, 0, "");
698SYSCTL_INT(_debug, OID_AUTO, direct_blk_ptrs, CTLFLAG_RW, &stat_direct_blk_ptrs, 0, "");
699SYSCTL_INT(_debug, OID_AUTO, dir_entry, CTLFLAG_RW, &stat_dir_entry, 0, "");
700/* SYSCTL_INT(_debug, OID_AUTO, worklist_num, CTLFLAG_RD, &softdep_on_worklist, 0, ""); */
701
702SYSCTL_DECL(_vfs_ffs);
703
704static int compute_summary_at_mount = 0;	/* Whether to recompute the summary at mount time */
705SYSCTL_INT(_vfs_ffs, OID_AUTO, compute_summary_at_mount, CTLFLAG_RW,
706	   &compute_summary_at_mount, 0, "Recompute summary at mount");
707
708static struct proc *softdepproc;
709static struct kproc_desc softdep_kp = {
710	"softdepflush",
711	softdep_flush,
712	&softdepproc
713};
714SYSINIT(sdproc, SI_SUB_KTHREAD_UPDATE, SI_ORDER_ANY, kproc_start, &softdep_kp)
715
716static void
717softdep_flush(void)
718{
719	struct mount *nmp;
720	struct mount *mp;
721	struct ufsmount *ump;
722	struct thread *td;
723	int remaining;
724	int vfslocked;
725
726	td = curthread;
727	td->td_pflags |= TDP_NORUNNINGBUF;
728
729	for (;;) {
730		kthread_suspend_check(softdepproc);
731		ACQUIRE_LOCK(&lk);
732		/*
733		 * If requested, try removing inode or removal dependencies.
734		 */
735		if (req_clear_inodedeps) {
736			clear_inodedeps(td);
737			req_clear_inodedeps -= 1;
738			wakeup_one(&proc_waiting);
739		}
740		if (req_clear_remove) {
741			clear_remove(td);
742			req_clear_remove -= 1;
743			wakeup_one(&proc_waiting);
744		}
745		FREE_LOCK(&lk);
746		remaining = 0;
747		mtx_lock(&mountlist_mtx);
748		for (mp = TAILQ_FIRST(&mountlist); mp != NULL; mp = nmp)  {
749			nmp = TAILQ_NEXT(mp, mnt_list);
750			if ((mp->mnt_flag & MNT_SOFTDEP) == 0)
751				continue;
752			if (vfs_busy(mp, LK_NOWAIT, &mountlist_mtx, td))
753				continue;
754			vfslocked = VFS_LOCK_GIANT(mp);
755			softdep_process_worklist(mp, 0);
756			ump = VFSTOUFS(mp);
757			remaining += ump->softdep_on_worklist -
758				ump->softdep_on_worklist_inprogress;
759			VFS_UNLOCK_GIANT(vfslocked);
760			mtx_lock(&mountlist_mtx);
761			nmp = TAILQ_NEXT(mp, mnt_list);
762			vfs_unbusy(mp, td);
763		}
764		mtx_unlock(&mountlist_mtx);
765		if (remaining)
766			continue;
767		ACQUIRE_LOCK(&lk);
768		if (!req_pending)
769			msleep(&req_pending, &lk, PVM, "sdflush", hz);
770		req_pending = 0;
771		FREE_LOCK(&lk);
772	}
773}
774
775static int
776softdep_speedup(void)
777{
778
779	mtx_assert(&lk, MA_OWNED);
780	if (req_pending == 0) {
781		req_pending = 1;
782		wakeup(&req_pending);
783	}
784
785	return speedup_syncer();
786}
787
788/*
789 * Add an item to the end of the work queue.
790 * This routine requires that the lock be held.
791 * This is the only routine that adds items to the list.
792 * The following routine is the only one that removes items
793 * and does so in order from first to last.
794 */
795static void
796add_to_worklist(wk)
797	struct worklist *wk;
798{
799	struct ufsmount *ump;
800
801	mtx_assert(&lk, MA_OWNED);
802	ump = VFSTOUFS(wk->wk_mp);
803	if (wk->wk_state & ONWORKLIST)
804		panic("add_to_worklist: already on list");
805	wk->wk_state |= ONWORKLIST;
806	if (LIST_FIRST(&ump->softdep_workitem_pending) == NULL)
807		LIST_INSERT_HEAD(&ump->softdep_workitem_pending, wk, wk_list);
808	else
809		LIST_INSERT_AFTER(ump->softdep_worklist_tail, wk, wk_list);
810	ump->softdep_worklist_tail = wk;
811	ump->softdep_on_worklist += 1;
812}
813
814/*
815 * Process that runs once per second to handle items in the background queue.
816 *
817 * Note that we ensure that everything is done in the order in which they
818 * appear in the queue. The code below depends on this property to ensure
819 * that blocks of a file are freed before the inode itself is freed. This
820 * ordering ensures that no new <vfsid, inum, lbn> triples will be generated
821 * until all the old ones have been purged from the dependency lists.
822 */
823int
824softdep_process_worklist(mp, full)
825	struct mount *mp;
826	int full;
827{
828	struct thread *td = curthread;
829	int cnt, matchcnt, loopcount;
830	struct ufsmount *ump;
831	long starttime;
832
833	KASSERT(mp != NULL, ("softdep_process_worklist: NULL mp"));
834	/*
835	 * Record the process identifier of our caller so that we can give
836	 * this process preferential treatment in request_cleanup below.
837	 */
838	matchcnt = 0;
839	ump = VFSTOUFS(mp);
840	ACQUIRE_LOCK(&lk);
841	loopcount = 1;
842	starttime = time_second;
843	while (ump->softdep_on_worklist > 0) {
844		if ((cnt = process_worklist_item(mp, 0)) == -1)
845			break;
846		else
847			matchcnt += cnt;
848		/*
849		 * If requested, try removing inode or removal dependencies.
850		 */
851		if (req_clear_inodedeps) {
852			clear_inodedeps(td);
853			req_clear_inodedeps -= 1;
854			wakeup_one(&proc_waiting);
855		}
856		if (req_clear_remove) {
857			clear_remove(td);
858			req_clear_remove -= 1;
859			wakeup_one(&proc_waiting);
860		}
861		/*
862		 * We do not generally want to stop for buffer space, but if
863		 * we are really being a buffer hog, we will stop and wait.
864		 */
865		if (loopcount++ % 128 == 0) {
866			FREE_LOCK(&lk);
867			bwillwrite();
868			ACQUIRE_LOCK(&lk);
869		}
870		/*
871		 * Never allow processing to run for more than one
872		 * second. Otherwise the other mountpoints may get
873		 * excessively backlogged.
874		 */
875		if (!full && starttime != time_second) {
876			matchcnt = -1;
877			break;
878		}
879	}
880	FREE_LOCK(&lk);
881	return (matchcnt);
882}
883
884/*
885 * Process one item on the worklist.
886 */
887static int
888process_worklist_item(mp, flags)
889	struct mount *mp;
890	int flags;
891{
892	struct worklist *wk, *wkend;
893	struct ufsmount *ump;
894	struct vnode *vp;
895	int matchcnt = 0;
896
897	mtx_assert(&lk, MA_OWNED);
898	KASSERT(mp != NULL, ("process_worklist_item: NULL mp"));
899	/*
900	 * If we are being called because of a process doing a
901	 * copy-on-write, then it is not safe to write as we may
902	 * recurse into the copy-on-write routine.
903	 */
904	if (curthread->td_pflags & TDP_COWINPROGRESS)
905		return (-1);
906	/*
907	 * Normally we just process each item on the worklist in order.
908	 * However, if we are in a situation where we cannot lock any
909	 * inodes, we have to skip over any dirrem requests whose
910	 * vnodes are resident and locked.
911	 */
912	ump = VFSTOUFS(mp);
913	vp = NULL;
914	LIST_FOREACH(wk, &ump->softdep_workitem_pending, wk_list) {
915		if (wk->wk_state & INPROGRESS)
916			continue;
917		if ((flags & LK_NOWAIT) == 0 || wk->wk_type != D_DIRREM)
918			break;
919		wk->wk_state |= INPROGRESS;
920		ump->softdep_on_worklist_inprogress++;
921		FREE_LOCK(&lk);
922		ffs_vget(mp, WK_DIRREM(wk)->dm_oldinum,
923		    LK_NOWAIT | LK_EXCLUSIVE, &vp);
924		ACQUIRE_LOCK(&lk);
925		wk->wk_state &= ~INPROGRESS;
926		ump->softdep_on_worklist_inprogress--;
927		if (vp != NULL)
928			break;
929	}
930	if (wk == 0)
931		return (-1);
932	/*
933	 * Remove the item to be processed. If we are removing the last
934	 * item on the list, we need to recalculate the tail pointer.
935	 * As this happens rarely and usually when the list is short,
936	 * we just run down the list to find it rather than tracking it
937	 * in the above loop.
938	 */
939	WORKLIST_REMOVE(wk);
940	if (wk == ump->softdep_worklist_tail) {
941		LIST_FOREACH(wkend, &ump->softdep_workitem_pending, wk_list)
942			if (LIST_NEXT(wkend, wk_list) == NULL)
943				break;
944		ump->softdep_worklist_tail = wkend;
945	}
946	ump->softdep_on_worklist -= 1;
947	FREE_LOCK(&lk);
948	if (vn_start_secondary_write(NULL, &mp, V_NOWAIT))
949		panic("process_worklist_item: suspended filesystem");
950	matchcnt++;
951	switch (wk->wk_type) {
952
953	case D_DIRREM:
954		/* removal of a directory entry */
955		handle_workitem_remove(WK_DIRREM(wk), vp);
956		break;
957
958	case D_FREEBLKS:
959		/* releasing blocks and/or fragments from a file */
960		handle_workitem_freeblocks(WK_FREEBLKS(wk), flags & LK_NOWAIT);
961		break;
962
963	case D_FREEFRAG:
964		/* releasing a fragment when replaced as a file grows */
965		handle_workitem_freefrag(WK_FREEFRAG(wk));
966		break;
967
968	case D_FREEFILE:
969		/* releasing an inode when its link count drops to 0 */
970		handle_workitem_freefile(WK_FREEFILE(wk));
971		break;
972
973	default:
974		panic("%s_process_worklist: Unknown type %s",
975		    "softdep", TYPENAME(wk->wk_type));
976		/* NOTREACHED */
977	}
978	vn_finished_secondary_write(mp);
979	ACQUIRE_LOCK(&lk);
980	return (matchcnt);
981}
982
983/*
984 * Move dependencies from one buffer to another.
985 */
986void
987softdep_move_dependencies(oldbp, newbp)
988	struct buf *oldbp;
989	struct buf *newbp;
990{
991	struct worklist *wk, *wktail;
992
993	if (LIST_FIRST(&newbp->b_dep) != NULL)
994		panic("softdep_move_dependencies: need merge code");
995	wktail = 0;
996	ACQUIRE_LOCK(&lk);
997	while ((wk = LIST_FIRST(&oldbp->b_dep)) != NULL) {
998		LIST_REMOVE(wk, wk_list);
999		if (wktail == 0)
1000			LIST_INSERT_HEAD(&newbp->b_dep, wk, wk_list);
1001		else
1002			LIST_INSERT_AFTER(wktail, wk, wk_list);
1003		wktail = wk;
1004	}
1005	FREE_LOCK(&lk);
1006}
1007
1008/*
1009 * Purge the work list of all items associated with a particular mount point.
1010 */
1011int
1012softdep_flushworklist(oldmnt, countp, td)
1013	struct mount *oldmnt;
1014	int *countp;
1015	struct thread *td;
1016{
1017	struct vnode *devvp;
1018	int count, error = 0;
1019	struct ufsmount *ump;
1020
1021	/*
1022	 * Alternately flush the block device associated with the mount
1023	 * point and process any dependencies that the flushing
1024	 * creates. We continue until no more worklist dependencies
1025	 * are found.
1026	 */
1027	*countp = 0;
1028	ump = VFSTOUFS(oldmnt);
1029	devvp = ump->um_devvp;
1030	while ((count = softdep_process_worklist(oldmnt, 1)) > 0) {
1031		*countp += count;
1032		vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY, td);
1033		error = VOP_FSYNC(devvp, MNT_WAIT, td);
1034		VOP_UNLOCK(devvp, 0, td);
1035		if (error)
1036			break;
1037	}
1038	return (error);
1039}
1040
1041int
1042softdep_waitidle(struct mount *mp)
1043{
1044	struct ufsmount *ump;
1045	int error;
1046	int i;
1047
1048	ump = VFSTOUFS(mp);
1049	ACQUIRE_LOCK(&lk);
1050	for (i = 0; i < 10 && ump->softdep_deps; i++) {
1051		ump->softdep_req = 1;
1052		if (ump->softdep_on_worklist)
1053			panic("softdep_waitidle: work added after flush.");
1054		msleep(&ump->softdep_deps, &lk, PVM, "softdeps", 1);
1055	}
1056	ump->softdep_req = 0;
1057	FREE_LOCK(&lk);
1058	error = 0;
1059	if (i == 10) {
1060		error = EBUSY;
1061		printf("softdep_waitidle: Failed to flush worklist for %p",
1062		    mp);
1063	}
1064
1065	return (error);
1066}
1067
1068/*
1069 * Flush all vnodes and worklist items associated with a specified mount point.
1070 */
1071int
1072softdep_flushfiles(oldmnt, flags, td)
1073	struct mount *oldmnt;
1074	int flags;
1075	struct thread *td;
1076{
1077	int error, count, loopcnt;
1078
1079	error = 0;
1080
1081	/*
1082	 * Alternately flush the vnodes associated with the mount
1083	 * point and process any dependencies that the flushing
1084	 * creates. In theory, this loop can happen at most twice,
1085	 * but we give it a few extra just to be sure.
1086	 */
1087	for (loopcnt = 10; loopcnt > 0; loopcnt--) {
1088		/*
1089		 * Do another flush in case any vnodes were brought in
1090		 * as part of the cleanup operations.
1091		 */
1092		if ((error = ffs_flushfiles(oldmnt, flags, td)) != 0)
1093			break;
1094		if ((error = softdep_flushworklist(oldmnt, &count, td)) != 0 ||
1095		    count == 0)
1096			break;
1097	}
1098	/*
1099	 * If we are unmounting then it is an error to fail. If we
1100	 * are simply trying to downgrade to read-only, then filesystem
1101	 * activity can keep us busy forever, so we just fail with EBUSY.
1102	 */
1103	if (loopcnt == 0) {
1104		if (oldmnt->mnt_kern_flag & MNTK_UNMOUNT)
1105			panic("softdep_flushfiles: looping");
1106		error = EBUSY;
1107	}
1108	if (!error)
1109		error = softdep_waitidle(oldmnt);
1110	return (error);
1111}
1112
1113/*
1114 * Structure hashing.
1115 *
1116 * There are three types of structures that can be looked up:
1117 *	1) pagedep structures identified by mount point, inode number,
1118 *	   and logical block.
1119 *	2) inodedep structures identified by mount point and inode number.
1120 *	3) newblk structures identified by mount point and
1121 *	   physical block number.
1122 *
1123 * The "pagedep" and "inodedep" dependency structures are hashed
1124 * separately from the file blocks and inodes to which they correspond.
1125 * This separation helps when the in-memory copy of an inode or
1126 * file block must be replaced. It also obviates the need to access
1127 * an inode or file page when simply updating (or de-allocating)
1128 * dependency structures. Lookup of newblk structures is needed to
1129 * find newly allocated blocks when trying to associate them with
1130 * their allocdirect or allocindir structure.
1131 *
1132 * The lookup routines optionally create and hash a new instance when
1133 * an existing entry is not found.
1134 */
1135#define DEPALLOC	0x0001	/* allocate structure if lookup fails */
1136#define NODELAY		0x0002	/* cannot do background work */
1137
1138/*
1139 * Structures and routines associated with pagedep caching.
1140 */
1141LIST_HEAD(pagedep_hashhead, pagedep) *pagedep_hashtbl;
1142u_long	pagedep_hash;		/* size of hash table - 1 */
1143#define	PAGEDEP_HASH(mp, inum, lbn) \
1144	(&pagedep_hashtbl[((((register_t)(mp)) >> 13) + (inum) + (lbn)) & \
1145	    pagedep_hash])
1146
1147static int
1148pagedep_find(pagedephd, ino, lbn, mp, flags, pagedeppp)
1149	struct pagedep_hashhead *pagedephd;
1150	ino_t ino;
1151	ufs_lbn_t lbn;
1152	struct mount *mp;
1153	int flags;
1154	struct pagedep **pagedeppp;
1155{
1156	struct pagedep *pagedep;
1157
1158	LIST_FOREACH(pagedep, pagedephd, pd_hash)
1159		if (ino == pagedep->pd_ino &&
1160		    lbn == pagedep->pd_lbn &&
1161		    mp == pagedep->pd_list.wk_mp)
1162			break;
1163	if (pagedep) {
1164		*pagedeppp = pagedep;
1165		if ((flags & DEPALLOC) != 0 &&
1166		    (pagedep->pd_state & ONWORKLIST) == 0)
1167			return (0);
1168		return (1);
1169	}
1170	*pagedeppp = NULL;
1171	return (0);
1172}
1173/*
1174 * Look up a pagedep. Return 1 if found, 0 if not found or found
1175 * when asked to allocate but not associated with any buffer.
1176 * If not found, allocate if DEPALLOC flag is passed.
1177 * Found or allocated entry is returned in pagedeppp.
1178 * This routine must be called with splbio interrupts blocked.
1179 */
1180static int
1181pagedep_lookup(ip, lbn, flags, pagedeppp)
1182	struct inode *ip;
1183	ufs_lbn_t lbn;
1184	int flags;
1185	struct pagedep **pagedeppp;
1186{
1187	struct pagedep *pagedep;
1188	struct pagedep_hashhead *pagedephd;
1189	struct mount *mp;
1190	int ret;
1191	int i;
1192
1193	mtx_assert(&lk, MA_OWNED);
1194	mp = ITOV(ip)->v_mount;
1195	pagedephd = PAGEDEP_HASH(mp, ip->i_number, lbn);
1196
1197	ret = pagedep_find(pagedephd, ip->i_number, lbn, mp, flags, pagedeppp);
1198	if (*pagedeppp || (flags & DEPALLOC) == 0)
1199		return (ret);
1200	FREE_LOCK(&lk);
1201	MALLOC(pagedep, struct pagedep *, sizeof(struct pagedep),
1202	    M_PAGEDEP, M_SOFTDEP_FLAGS|M_ZERO);
1203	workitem_alloc(&pagedep->pd_list, D_PAGEDEP, mp);
1204	ACQUIRE_LOCK(&lk);
1205	ret = pagedep_find(pagedephd, ip->i_number, lbn, mp, flags, pagedeppp);
1206	if (*pagedeppp) {
1207		WORKITEM_FREE(pagedep, D_PAGEDEP);
1208		return (ret);
1209	}
1210	pagedep->pd_ino = ip->i_number;
1211	pagedep->pd_lbn = lbn;
1212	LIST_INIT(&pagedep->pd_dirremhd);
1213	LIST_INIT(&pagedep->pd_pendinghd);
1214	for (i = 0; i < DAHASHSZ; i++)
1215		LIST_INIT(&pagedep->pd_diraddhd[i]);
1216	LIST_INSERT_HEAD(pagedephd, pagedep, pd_hash);
1217	*pagedeppp = pagedep;
1218	return (0);
1219}
1220
1221/*
1222 * Structures and routines associated with inodedep caching.
1223 */
1224LIST_HEAD(inodedep_hashhead, inodedep) *inodedep_hashtbl;
1225static u_long	inodedep_hash;	/* size of hash table - 1 */
1226static long	num_inodedep;	/* number of inodedep allocated */
1227#define	INODEDEP_HASH(fs, inum) \
1228      (&inodedep_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & inodedep_hash])
1229
1230static int
1231inodedep_find(inodedephd, fs, inum, inodedeppp)
1232	struct inodedep_hashhead *inodedephd;
1233	struct fs *fs;
1234	ino_t inum;
1235	struct inodedep **inodedeppp;
1236{
1237	struct inodedep *inodedep;
1238
1239	LIST_FOREACH(inodedep, inodedephd, id_hash)
1240		if (inum == inodedep->id_ino && fs == inodedep->id_fs)
1241			break;
1242	if (inodedep) {
1243		*inodedeppp = inodedep;
1244		return (1);
1245	}
1246	*inodedeppp = NULL;
1247
1248	return (0);
1249}
1250/*
1251 * Look up an inodedep. Return 1 if found, 0 if not found.
1252 * If not found, allocate if DEPALLOC flag is passed.
1253 * Found or allocated entry is returned in inodedeppp.
1254 * This routine must be called with splbio interrupts blocked.
1255 */
1256static int
1257inodedep_lookup(mp, inum, flags, inodedeppp)
1258	struct mount *mp;
1259	ino_t inum;
1260	int flags;
1261	struct inodedep **inodedeppp;
1262{
1263	struct inodedep *inodedep;
1264	struct inodedep_hashhead *inodedephd;
1265	struct fs *fs;
1266
1267	mtx_assert(&lk, MA_OWNED);
1268	fs = VFSTOUFS(mp)->um_fs;
1269	inodedephd = INODEDEP_HASH(fs, inum);
1270
1271	if (inodedep_find(inodedephd, fs, inum, inodedeppp))
1272		return (1);
1273	if ((flags & DEPALLOC) == 0)
1274		return (0);
1275	/*
1276	 * If we are over our limit, try to improve the situation.
1277	 */
1278	if (num_inodedep > max_softdeps && (flags & NODELAY) == 0)
1279		request_cleanup(mp, FLUSH_INODES);
1280	FREE_LOCK(&lk);
1281	MALLOC(inodedep, struct inodedep *, sizeof(struct inodedep),
1282		M_INODEDEP, M_SOFTDEP_FLAGS);
1283	workitem_alloc(&inodedep->id_list, D_INODEDEP, mp);
1284	ACQUIRE_LOCK(&lk);
1285	if (inodedep_find(inodedephd, fs, inum, inodedeppp)) {
1286		WORKITEM_FREE(inodedep, D_INODEDEP);
1287		return (1);
1288	}
1289	num_inodedep += 1;
1290	inodedep->id_fs = fs;
1291	inodedep->id_ino = inum;
1292	inodedep->id_state = ALLCOMPLETE;
1293	inodedep->id_nlinkdelta = 0;
1294	inodedep->id_savedino1 = NULL;
1295	inodedep->id_savedsize = -1;
1296	inodedep->id_savedextsize = -1;
1297	inodedep->id_buf = NULL;
1298	LIST_INIT(&inodedep->id_pendinghd);
1299	LIST_INIT(&inodedep->id_inowait);
1300	LIST_INIT(&inodedep->id_bufwait);
1301	TAILQ_INIT(&inodedep->id_inoupdt);
1302	TAILQ_INIT(&inodedep->id_newinoupdt);
1303	TAILQ_INIT(&inodedep->id_extupdt);
1304	TAILQ_INIT(&inodedep->id_newextupdt);
1305	LIST_INSERT_HEAD(inodedephd, inodedep, id_hash);
1306	*inodedeppp = inodedep;
1307	return (0);
1308}
1309
1310/*
1311 * Structures and routines associated with newblk caching.
1312 */
1313LIST_HEAD(newblk_hashhead, newblk) *newblk_hashtbl;
1314u_long	newblk_hash;		/* size of hash table - 1 */
1315#define	NEWBLK_HASH(fs, inum) \
1316	(&newblk_hashtbl[((((register_t)(fs)) >> 13) + (inum)) & newblk_hash])
1317
1318static int
1319newblk_find(newblkhd, fs, newblkno, newblkpp)
1320	struct newblk_hashhead *newblkhd;
1321	struct fs *fs;
1322	ufs2_daddr_t newblkno;
1323	struct newblk **newblkpp;
1324{
1325	struct newblk *newblk;
1326
1327	LIST_FOREACH(newblk, newblkhd, nb_hash)
1328		if (newblkno == newblk->nb_newblkno && fs == newblk->nb_fs)
1329			break;
1330	if (newblk) {
1331		*newblkpp = newblk;
1332		return (1);
1333	}
1334	*newblkpp = NULL;
1335	return (0);
1336}
1337
1338/*
1339 * Look up a newblk. Return 1 if found, 0 if not found.
1340 * If not found, allocate if DEPALLOC flag is passed.
1341 * Found or allocated entry is returned in newblkpp.
1342 */
1343static int
1344newblk_lookup(fs, newblkno, flags, newblkpp)
1345	struct fs *fs;
1346	ufs2_daddr_t newblkno;
1347	int flags;
1348	struct newblk **newblkpp;
1349{
1350	struct newblk *newblk;
1351	struct newblk_hashhead *newblkhd;
1352
1353	newblkhd = NEWBLK_HASH(fs, newblkno);
1354	if (newblk_find(newblkhd, fs, newblkno, newblkpp))
1355		return (1);
1356	if ((flags & DEPALLOC) == 0)
1357		return (0);
1358	FREE_LOCK(&lk);
1359	MALLOC(newblk, struct newblk *, sizeof(struct newblk),
1360		M_NEWBLK, M_SOFTDEP_FLAGS);
1361	ACQUIRE_LOCK(&lk);
1362	if (newblk_find(newblkhd, fs, newblkno, newblkpp)) {
1363		FREE(newblk, M_NEWBLK);
1364		return (1);
1365	}
1366	newblk->nb_state = 0;
1367	newblk->nb_fs = fs;
1368	newblk->nb_newblkno = newblkno;
1369	LIST_INSERT_HEAD(newblkhd, newblk, nb_hash);
1370	*newblkpp = newblk;
1371	return (0);
1372}
1373
1374/*
1375 * Executed during filesystem system initialization before
1376 * mounting any filesystems.
1377 */
1378void
1379softdep_initialize()
1380{
1381
1382	LIST_INIT(&mkdirlisthd);
1383	max_softdeps = desiredvnodes * 4;
1384	pagedep_hashtbl = hashinit(desiredvnodes / 5, M_PAGEDEP,
1385	    &pagedep_hash);
1386	inodedep_hashtbl = hashinit(desiredvnodes, M_INODEDEP, &inodedep_hash);
1387	newblk_hashtbl = hashinit(64, M_NEWBLK, &newblk_hash);
1388
1389	/* initialise bioops hack */
1390	bioops.io_start = softdep_disk_io_initiation;
1391	bioops.io_complete = softdep_disk_write_complete;
1392	bioops.io_deallocate = softdep_deallocate_dependencies;
1393	bioops.io_countdeps = softdep_count_dependencies;
1394}
1395
1396/*
1397 * Executed after all filesystems have been unmounted during
1398 * filesystem module unload.
1399 */
1400void
1401softdep_uninitialize()
1402{
1403
1404	hashdestroy(pagedep_hashtbl, M_PAGEDEP, pagedep_hash);
1405	hashdestroy(inodedep_hashtbl, M_INODEDEP, inodedep_hash);
1406	hashdestroy(newblk_hashtbl, M_NEWBLK, newblk_hash);
1407}
1408
1409/*
1410 * Called at mount time to notify the dependency code that a
1411 * filesystem wishes to use it.
1412 */
1413int
1414softdep_mount(devvp, mp, fs, cred)
1415	struct vnode *devvp;
1416	struct mount *mp;
1417	struct fs *fs;
1418	struct ucred *cred;
1419{
1420	struct csum_total cstotal;
1421	struct ufsmount *ump;
1422	struct cg *cgp;
1423	struct buf *bp;
1424	int error, cyl;
1425
1426	MNT_ILOCK(mp);
1427	mp->mnt_flag = (mp->mnt_flag & ~MNT_ASYNC) | MNT_SOFTDEP;
1428	if ((mp->mnt_kern_flag & MNTK_SOFTDEP) == 0) {
1429		mp->mnt_kern_flag = (mp->mnt_kern_flag & ~MNTK_ASYNC) |
1430			MNTK_SOFTDEP;
1431		mp->mnt_noasync++;
1432	}
1433	MNT_IUNLOCK(mp);
1434	ump = VFSTOUFS(mp);
1435	LIST_INIT(&ump->softdep_workitem_pending);
1436	ump->softdep_worklist_tail = NULL;
1437	ump->softdep_on_worklist = 0;
1438	ump->softdep_deps = 0;
1439	/*
1440	 * When doing soft updates, the counters in the
1441	 * superblock may have gotten out of sync. Recomputation
1442	 * can take a long time and can be deferred for background
1443	 * fsck.  However, the old behavior of scanning the cylinder
1444	 * groups and recalculating them at mount time is available
1445	 * by setting vfs.ffs.compute_summary_at_mount to one.
1446	 */
1447	if (compute_summary_at_mount == 0 || fs->fs_clean != 0)
1448		return (0);
1449	bzero(&cstotal, sizeof cstotal);
1450	for (cyl = 0; cyl < fs->fs_ncg; cyl++) {
1451		if ((error = bread(devvp, fsbtodb(fs, cgtod(fs, cyl)),
1452		    fs->fs_cgsize, cred, &bp)) != 0) {
1453			brelse(bp);
1454			return (error);
1455		}
1456		cgp = (struct cg *)bp->b_data;
1457		cstotal.cs_nffree += cgp->cg_cs.cs_nffree;
1458		cstotal.cs_nbfree += cgp->cg_cs.cs_nbfree;
1459		cstotal.cs_nifree += cgp->cg_cs.cs_nifree;
1460		cstotal.cs_ndir += cgp->cg_cs.cs_ndir;
1461		fs->fs_cs(fs, cyl) = cgp->cg_cs;
1462		brelse(bp);
1463	}
1464#ifdef DEBUG
1465	if (bcmp(&cstotal, &fs->fs_cstotal, sizeof cstotal))
1466		printf("%s: superblock summary recomputed\n", fs->fs_fsmnt);
1467#endif
1468	bcopy(&cstotal, &fs->fs_cstotal, sizeof cstotal);
1469	return (0);
1470}
1471
1472/*
1473 * Protecting the freemaps (or bitmaps).
1474 *
1475 * To eliminate the need to execute fsck before mounting a filesystem
1476 * after a power failure, one must (conservatively) guarantee that the
1477 * on-disk copy of the bitmaps never indicate that a live inode or block is
1478 * free.  So, when a block or inode is allocated, the bitmap should be
1479 * updated (on disk) before any new pointers.  When a block or inode is
1480 * freed, the bitmap should not be updated until all pointers have been
1481 * reset.  The latter dependency is handled by the delayed de-allocation
1482 * approach described below for block and inode de-allocation.  The former
1483 * dependency is handled by calling the following procedure when a block or
1484 * inode is allocated. When an inode is allocated an "inodedep" is created
1485 * with its DEPCOMPLETE flag cleared until its bitmap is written to disk.
1486 * Each "inodedep" is also inserted into the hash indexing structure so
1487 * that any additional link additions can be made dependent on the inode
1488 * allocation.
1489 *
1490 * The ufs filesystem maintains a number of free block counts (e.g., per
1491 * cylinder group, per cylinder and per <cylinder, rotational position> pair)
1492 * in addition to the bitmaps.  These counts are used to improve efficiency
1493 * during allocation and therefore must be consistent with the bitmaps.
1494 * There is no convenient way to guarantee post-crash consistency of these
1495 * counts with simple update ordering, for two main reasons: (1) The counts
1496 * and bitmaps for a single cylinder group block are not in the same disk
1497 * sector.  If a disk write is interrupted (e.g., by power failure), one may
1498 * be written and the other not.  (2) Some of the counts are located in the
1499 * superblock rather than the cylinder group block. So, we focus our soft
1500 * updates implementation on protecting the bitmaps. When mounting a
1501 * filesystem, we recompute the auxiliary counts from the bitmaps.
1502 */
1503
1504/*
1505 * Called just after updating the cylinder group block to allocate an inode.
1506 */
1507void
1508softdep_setup_inomapdep(bp, ip, newinum)
1509	struct buf *bp;		/* buffer for cylgroup block with inode map */
1510	struct inode *ip;	/* inode related to allocation */
1511	ino_t newinum;		/* new inode number being allocated */
1512{
1513	struct inodedep *inodedep;
1514	struct bmsafemap *bmsafemap;
1515
1516	/*
1517	 * Create a dependency for the newly allocated inode.
1518	 * Panic if it already exists as something is seriously wrong.
1519	 * Otherwise add it to the dependency list for the buffer holding
1520	 * the cylinder group map from which it was allocated.
1521	 */
1522	ACQUIRE_LOCK(&lk);
1523	if ((inodedep_lookup(UFSTOVFS(ip->i_ump), newinum, DEPALLOC|NODELAY,
1524	    &inodedep)))
1525		panic("softdep_setup_inomapdep: dependency for new inode "
1526		    "already exists");
1527	inodedep->id_buf = bp;
1528	inodedep->id_state &= ~DEPCOMPLETE;
1529	bmsafemap = bmsafemap_lookup(inodedep->id_list.wk_mp, bp);
1530	LIST_INSERT_HEAD(&bmsafemap->sm_inodedephd, inodedep, id_deps);
1531	FREE_LOCK(&lk);
1532}
1533
1534/*
1535 * Called just after updating the cylinder group block to
1536 * allocate block or fragment.
1537 */
1538void
1539softdep_setup_blkmapdep(bp, mp, newblkno)
1540	struct buf *bp;		/* buffer for cylgroup block with block map */
1541	struct mount *mp;	/* filesystem doing allocation */
1542	ufs2_daddr_t newblkno;	/* number of newly allocated block */
1543{
1544	struct newblk *newblk;
1545	struct bmsafemap *bmsafemap;
1546	struct fs *fs;
1547
1548	fs = VFSTOUFS(mp)->um_fs;
1549	/*
1550	 * Create a dependency for the newly allocated block.
1551	 * Add it to the dependency list for the buffer holding
1552	 * the cylinder group map from which it was allocated.
1553	 */
1554	ACQUIRE_LOCK(&lk);
1555	if (newblk_lookup(fs, newblkno, DEPALLOC, &newblk) != 0)
1556		panic("softdep_setup_blkmapdep: found block");
1557	newblk->nb_bmsafemap = bmsafemap = bmsafemap_lookup(mp, bp);
1558	LIST_INSERT_HEAD(&bmsafemap->sm_newblkhd, newblk, nb_deps);
1559	FREE_LOCK(&lk);
1560}
1561
1562/*
1563 * Find the bmsafemap associated with a cylinder group buffer.
1564 * If none exists, create one. The buffer must be locked when
1565 * this routine is called and this routine must be called with
1566 * splbio interrupts blocked.
1567 */
1568static struct bmsafemap *
1569bmsafemap_lookup(mp, bp)
1570	struct mount *mp;
1571	struct buf *bp;
1572{
1573	struct bmsafemap *bmsafemap;
1574	struct worklist *wk;
1575
1576	mtx_assert(&lk, MA_OWNED);
1577	LIST_FOREACH(wk, &bp->b_dep, wk_list)
1578		if (wk->wk_type == D_BMSAFEMAP)
1579			return (WK_BMSAFEMAP(wk));
1580	FREE_LOCK(&lk);
1581	MALLOC(bmsafemap, struct bmsafemap *, sizeof(struct bmsafemap),
1582		M_BMSAFEMAP, M_SOFTDEP_FLAGS);
1583	workitem_alloc(&bmsafemap->sm_list, D_BMSAFEMAP, mp);
1584	bmsafemap->sm_buf = bp;
1585	LIST_INIT(&bmsafemap->sm_allocdirecthd);
1586	LIST_INIT(&bmsafemap->sm_allocindirhd);
1587	LIST_INIT(&bmsafemap->sm_inodedephd);
1588	LIST_INIT(&bmsafemap->sm_newblkhd);
1589	ACQUIRE_LOCK(&lk);
1590	WORKLIST_INSERT(&bp->b_dep, &bmsafemap->sm_list);
1591	return (bmsafemap);
1592}
1593
1594/*
1595 * Direct block allocation dependencies.
1596 *
1597 * When a new block is allocated, the corresponding disk locations must be
1598 * initialized (with zeros or new data) before the on-disk inode points to
1599 * them.  Also, the freemap from which the block was allocated must be
1600 * updated (on disk) before the inode's pointer. These two dependencies are
1601 * independent of each other and are needed for all file blocks and indirect
1602 * blocks that are pointed to directly by the inode.  Just before the
1603 * "in-core" version of the inode is updated with a newly allocated block
1604 * number, a procedure (below) is called to setup allocation dependency
1605 * structures.  These structures are removed when the corresponding
1606 * dependencies are satisfied or when the block allocation becomes obsolete
1607 * (i.e., the file is deleted, the block is de-allocated, or the block is a
1608 * fragment that gets upgraded).  All of these cases are handled in
1609 * procedures described later.
1610 *
1611 * When a file extension causes a fragment to be upgraded, either to a larger
1612 * fragment or to a full block, the on-disk location may change (if the
1613 * previous fragment could not simply be extended). In this case, the old
1614 * fragment must be de-allocated, but not until after the inode's pointer has
1615 * been updated. In most cases, this is handled by later procedures, which
1616 * will construct a "freefrag" structure to be added to the workitem queue
1617 * when the inode update is complete (or obsolete).  The main exception to
1618 * this is when an allocation occurs while a pending allocation dependency
1619 * (for the same block pointer) remains.  This case is handled in the main
1620 * allocation dependency setup procedure by immediately freeing the
1621 * unreferenced fragments.
1622 */
1623void
1624softdep_setup_allocdirect(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp)
1625	struct inode *ip;	/* inode to which block is being added */
1626	ufs_lbn_t lbn;		/* block pointer within inode */
1627	ufs2_daddr_t newblkno;	/* disk block number being added */
1628	ufs2_daddr_t oldblkno;	/* previous block number, 0 unless frag */
1629	long newsize;		/* size of new block */
1630	long oldsize;		/* size of new block */
1631	struct buf *bp;		/* bp for allocated block */
1632{
1633	struct allocdirect *adp, *oldadp;
1634	struct allocdirectlst *adphead;
1635	struct bmsafemap *bmsafemap;
1636	struct inodedep *inodedep;
1637	struct pagedep *pagedep;
1638	struct newblk *newblk;
1639	struct mount *mp;
1640
1641	mp = UFSTOVFS(ip->i_ump);
1642	MALLOC(adp, struct allocdirect *, sizeof(struct allocdirect),
1643		M_ALLOCDIRECT, M_SOFTDEP_FLAGS|M_ZERO);
1644	workitem_alloc(&adp->ad_list, D_ALLOCDIRECT, mp);
1645	adp->ad_lbn = lbn;
1646	adp->ad_newblkno = newblkno;
1647	adp->ad_oldblkno = oldblkno;
1648	adp->ad_newsize = newsize;
1649	adp->ad_oldsize = oldsize;
1650	adp->ad_state = ATTACHED;
1651	LIST_INIT(&adp->ad_newdirblk);
1652	if (newblkno == oldblkno)
1653		adp->ad_freefrag = NULL;
1654	else
1655		adp->ad_freefrag = newfreefrag(ip, oldblkno, oldsize);
1656
1657	ACQUIRE_LOCK(&lk);
1658	if (lbn >= NDADDR) {
1659		/* allocating an indirect block */
1660		if (oldblkno != 0)
1661			panic("softdep_setup_allocdirect: non-zero indir");
1662	} else {
1663		/*
1664		 * Allocating a direct block.
1665		 *
1666		 * If we are allocating a directory block, then we must
1667		 * allocate an associated pagedep to track additions and
1668		 * deletions.
1669		 */
1670		if ((ip->i_mode & IFMT) == IFDIR &&
1671		    pagedep_lookup(ip, lbn, DEPALLOC, &pagedep) == 0)
1672			WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list);
1673	}
1674	if (newblk_lookup(ip->i_fs, newblkno, 0, &newblk) == 0)
1675		panic("softdep_setup_allocdirect: lost block");
1676	if (newblk->nb_state == DEPCOMPLETE) {
1677		adp->ad_state |= DEPCOMPLETE;
1678		adp->ad_buf = NULL;
1679	} else {
1680		bmsafemap = newblk->nb_bmsafemap;
1681		adp->ad_buf = bmsafemap->sm_buf;
1682		LIST_REMOVE(newblk, nb_deps);
1683		LIST_INSERT_HEAD(&bmsafemap->sm_allocdirecthd, adp, ad_deps);
1684	}
1685	LIST_REMOVE(newblk, nb_hash);
1686	FREE(newblk, M_NEWBLK);
1687
1688	inodedep_lookup(mp, ip->i_number, DEPALLOC | NODELAY, &inodedep);
1689	adp->ad_inodedep = inodedep;
1690	WORKLIST_INSERT(&bp->b_dep, &adp->ad_list);
1691	/*
1692	 * The list of allocdirects must be kept in sorted and ascending
1693	 * order so that the rollback routines can quickly determine the
1694	 * first uncommitted block (the size of the file stored on disk
1695	 * ends at the end of the lowest committed fragment, or if there
1696	 * are no fragments, at the end of the highest committed block).
1697	 * Since files generally grow, the typical case is that the new
1698	 * block is to be added at the end of the list. We speed this
1699	 * special case by checking against the last allocdirect in the
1700	 * list before laboriously traversing the list looking for the
1701	 * insertion point.
1702	 */
1703	adphead = &inodedep->id_newinoupdt;
1704	oldadp = TAILQ_LAST(adphead, allocdirectlst);
1705	if (oldadp == NULL || oldadp->ad_lbn <= lbn) {
1706		/* insert at end of list */
1707		TAILQ_INSERT_TAIL(adphead, adp, ad_next);
1708		if (oldadp != NULL && oldadp->ad_lbn == lbn)
1709			allocdirect_merge(adphead, adp, oldadp);
1710		FREE_LOCK(&lk);
1711		return;
1712	}
1713	TAILQ_FOREACH(oldadp, adphead, ad_next) {
1714		if (oldadp->ad_lbn >= lbn)
1715			break;
1716	}
1717	if (oldadp == NULL)
1718		panic("softdep_setup_allocdirect: lost entry");
1719	/* insert in middle of list */
1720	TAILQ_INSERT_BEFORE(oldadp, adp, ad_next);
1721	if (oldadp->ad_lbn == lbn)
1722		allocdirect_merge(adphead, adp, oldadp);
1723	FREE_LOCK(&lk);
1724}
1725
1726/*
1727 * Replace an old allocdirect dependency with a newer one.
1728 * This routine must be called with splbio interrupts blocked.
1729 */
1730static void
1731allocdirect_merge(adphead, newadp, oldadp)
1732	struct allocdirectlst *adphead;	/* head of list holding allocdirects */
1733	struct allocdirect *newadp;	/* allocdirect being added */
1734	struct allocdirect *oldadp;	/* existing allocdirect being checked */
1735{
1736	struct worklist *wk;
1737	struct freefrag *freefrag;
1738	struct newdirblk *newdirblk;
1739
1740	mtx_assert(&lk, MA_OWNED);
1741	if (newadp->ad_oldblkno != oldadp->ad_newblkno ||
1742	    newadp->ad_oldsize != oldadp->ad_newsize ||
1743	    newadp->ad_lbn >= NDADDR)
1744		panic("%s %jd != new %jd || old size %ld != new %ld",
1745		    "allocdirect_merge: old blkno",
1746		    (intmax_t)newadp->ad_oldblkno,
1747		    (intmax_t)oldadp->ad_newblkno,
1748		    newadp->ad_oldsize, oldadp->ad_newsize);
1749	newadp->ad_oldblkno = oldadp->ad_oldblkno;
1750	newadp->ad_oldsize = oldadp->ad_oldsize;
1751	/*
1752	 * If the old dependency had a fragment to free or had never
1753	 * previously had a block allocated, then the new dependency
1754	 * can immediately post its freefrag and adopt the old freefrag.
1755	 * This action is done by swapping the freefrag dependencies.
1756	 * The new dependency gains the old one's freefrag, and the
1757	 * old one gets the new one and then immediately puts it on
1758	 * the worklist when it is freed by free_allocdirect. It is
1759	 * not possible to do this swap when the old dependency had a
1760	 * non-zero size but no previous fragment to free. This condition
1761	 * arises when the new block is an extension of the old block.
1762	 * Here, the first part of the fragment allocated to the new
1763	 * dependency is part of the block currently claimed on disk by
1764	 * the old dependency, so cannot legitimately be freed until the
1765	 * conditions for the new dependency are fulfilled.
1766	 */
1767	if (oldadp->ad_freefrag != NULL || oldadp->ad_oldblkno == 0) {
1768		freefrag = newadp->ad_freefrag;
1769		newadp->ad_freefrag = oldadp->ad_freefrag;
1770		oldadp->ad_freefrag = freefrag;
1771	}
1772	/*
1773	 * If we are tracking a new directory-block allocation,
1774	 * move it from the old allocdirect to the new allocdirect.
1775	 */
1776	if ((wk = LIST_FIRST(&oldadp->ad_newdirblk)) != NULL) {
1777		newdirblk = WK_NEWDIRBLK(wk);
1778		WORKLIST_REMOVE(&newdirblk->db_list);
1779		if (LIST_FIRST(&oldadp->ad_newdirblk) != NULL)
1780			panic("allocdirect_merge: extra newdirblk");
1781		WORKLIST_INSERT(&newadp->ad_newdirblk, &newdirblk->db_list);
1782	}
1783	free_allocdirect(adphead, oldadp, 0);
1784}
1785
1786/*
1787 * Allocate a new freefrag structure if needed.
1788 */
1789static struct freefrag *
1790newfreefrag(ip, blkno, size)
1791	struct inode *ip;
1792	ufs2_daddr_t blkno;
1793	long size;
1794{
1795	struct freefrag *freefrag;
1796	struct fs *fs;
1797
1798	if (blkno == 0)
1799		return (NULL);
1800	fs = ip->i_fs;
1801	if (fragnum(fs, blkno) + numfrags(fs, size) > fs->fs_frag)
1802		panic("newfreefrag: frag size");
1803	MALLOC(freefrag, struct freefrag *, sizeof(struct freefrag),
1804		M_FREEFRAG, M_SOFTDEP_FLAGS);
1805	workitem_alloc(&freefrag->ff_list, D_FREEFRAG, UFSTOVFS(ip->i_ump));
1806	freefrag->ff_inum = ip->i_number;
1807	freefrag->ff_blkno = blkno;
1808	freefrag->ff_fragsize = size;
1809	return (freefrag);
1810}
1811
1812/*
1813 * This workitem de-allocates fragments that were replaced during
1814 * file block allocation.
1815 */
1816static void
1817handle_workitem_freefrag(freefrag)
1818	struct freefrag *freefrag;
1819{
1820	struct ufsmount *ump = VFSTOUFS(freefrag->ff_list.wk_mp);
1821
1822	ffs_blkfree(ump, ump->um_fs, ump->um_devvp, freefrag->ff_blkno,
1823	    freefrag->ff_fragsize, freefrag->ff_inum);
1824	ACQUIRE_LOCK(&lk);
1825	WORKITEM_FREE(freefrag, D_FREEFRAG);
1826	FREE_LOCK(&lk);
1827}
1828
1829/*
1830 * Set up a dependency structure for an external attributes data block.
1831 * This routine follows much of the structure of softdep_setup_allocdirect.
1832 * See the description of softdep_setup_allocdirect above for details.
1833 */
1834void
1835softdep_setup_allocext(ip, lbn, newblkno, oldblkno, newsize, oldsize, bp)
1836	struct inode *ip;
1837	ufs_lbn_t lbn;
1838	ufs2_daddr_t newblkno;
1839	ufs2_daddr_t oldblkno;
1840	long newsize;
1841	long oldsize;
1842	struct buf *bp;
1843{
1844	struct allocdirect *adp, *oldadp;
1845	struct allocdirectlst *adphead;
1846	struct bmsafemap *bmsafemap;
1847	struct inodedep *inodedep;
1848	struct newblk *newblk;
1849	struct mount *mp;
1850
1851	mp = UFSTOVFS(ip->i_ump);
1852	MALLOC(adp, struct allocdirect *, sizeof(struct allocdirect),
1853		M_ALLOCDIRECT, M_SOFTDEP_FLAGS|M_ZERO);
1854	workitem_alloc(&adp->ad_list, D_ALLOCDIRECT, mp);
1855	adp->ad_lbn = lbn;
1856	adp->ad_newblkno = newblkno;
1857	adp->ad_oldblkno = oldblkno;
1858	adp->ad_newsize = newsize;
1859	adp->ad_oldsize = oldsize;
1860	adp->ad_state = ATTACHED | EXTDATA;
1861	LIST_INIT(&adp->ad_newdirblk);
1862	if (newblkno == oldblkno)
1863		adp->ad_freefrag = NULL;
1864	else
1865		adp->ad_freefrag = newfreefrag(ip, oldblkno, oldsize);
1866
1867	ACQUIRE_LOCK(&lk);
1868	if (newblk_lookup(ip->i_fs, newblkno, 0, &newblk) == 0)
1869		panic("softdep_setup_allocext: lost block");
1870
1871	inodedep_lookup(mp, ip->i_number, DEPALLOC | NODELAY, &inodedep);
1872	adp->ad_inodedep = inodedep;
1873
1874	if (newblk->nb_state == DEPCOMPLETE) {
1875		adp->ad_state |= DEPCOMPLETE;
1876		adp->ad_buf = NULL;
1877	} else {
1878		bmsafemap = newblk->nb_bmsafemap;
1879		adp->ad_buf = bmsafemap->sm_buf;
1880		LIST_REMOVE(newblk, nb_deps);
1881		LIST_INSERT_HEAD(&bmsafemap->sm_allocdirecthd, adp, ad_deps);
1882	}
1883	LIST_REMOVE(newblk, nb_hash);
1884	FREE(newblk, M_NEWBLK);
1885
1886	WORKLIST_INSERT(&bp->b_dep, &adp->ad_list);
1887	if (lbn >= NXADDR)
1888		panic("softdep_setup_allocext: lbn %lld > NXADDR",
1889		    (long long)lbn);
1890	/*
1891	 * The list of allocdirects must be kept in sorted and ascending
1892	 * order so that the rollback routines can quickly determine the
1893	 * first uncommitted block (the size of the file stored on disk
1894	 * ends at the end of the lowest committed fragment, or if there
1895	 * are no fragments, at the end of the highest committed block).
1896	 * Since files generally grow, the typical case is that the new
1897	 * block is to be added at the end of the list. We speed this
1898	 * special case by checking against the last allocdirect in the
1899	 * list before laboriously traversing the list looking for the
1900	 * insertion point.
1901	 */
1902	adphead = &inodedep->id_newextupdt;
1903	oldadp = TAILQ_LAST(adphead, allocdirectlst);
1904	if (oldadp == NULL || oldadp->ad_lbn <= lbn) {
1905		/* insert at end of list */
1906		TAILQ_INSERT_TAIL(adphead, adp, ad_next);
1907		if (oldadp != NULL && oldadp->ad_lbn == lbn)
1908			allocdirect_merge(adphead, adp, oldadp);
1909		FREE_LOCK(&lk);
1910		return;
1911	}
1912	TAILQ_FOREACH(oldadp, adphead, ad_next) {
1913		if (oldadp->ad_lbn >= lbn)
1914			break;
1915	}
1916	if (oldadp == NULL)
1917		panic("softdep_setup_allocext: lost entry");
1918	/* insert in middle of list */
1919	TAILQ_INSERT_BEFORE(oldadp, adp, ad_next);
1920	if (oldadp->ad_lbn == lbn)
1921		allocdirect_merge(adphead, adp, oldadp);
1922	FREE_LOCK(&lk);
1923}
1924
1925/*
1926 * Indirect block allocation dependencies.
1927 *
1928 * The same dependencies that exist for a direct block also exist when
1929 * a new block is allocated and pointed to by an entry in a block of
1930 * indirect pointers. The undo/redo states described above are also
1931 * used here. Because an indirect block contains many pointers that
1932 * may have dependencies, a second copy of the entire in-memory indirect
1933 * block is kept. The buffer cache copy is always completely up-to-date.
1934 * The second copy, which is used only as a source for disk writes,
1935 * contains only the safe pointers (i.e., those that have no remaining
1936 * update dependencies). The second copy is freed when all pointers
1937 * are safe. The cache is not allowed to replace indirect blocks with
1938 * pending update dependencies. If a buffer containing an indirect
1939 * block with dependencies is written, these routines will mark it
1940 * dirty again. It can only be successfully written once all the
1941 * dependencies are removed. The ffs_fsync routine in conjunction with
1942 * softdep_sync_metadata work together to get all the dependencies
1943 * removed so that a file can be successfully written to disk. Three
1944 * procedures are used when setting up indirect block pointer
1945 * dependencies. The division is necessary because of the organization
1946 * of the "balloc" routine and because of the distinction between file
1947 * pages and file metadata blocks.
1948 */
1949
1950/*
1951 * Allocate a new allocindir structure.
1952 */
1953static struct allocindir *
1954newallocindir(ip, ptrno, newblkno, oldblkno)
1955	struct inode *ip;	/* inode for file being extended */
1956	int ptrno;		/* offset of pointer in indirect block */
1957	ufs2_daddr_t newblkno;	/* disk block number being added */
1958	ufs2_daddr_t oldblkno;	/* previous block number, 0 if none */
1959{
1960	struct allocindir *aip;
1961
1962	MALLOC(aip, struct allocindir *, sizeof(struct allocindir),
1963		M_ALLOCINDIR, M_SOFTDEP_FLAGS|M_ZERO);
1964	workitem_alloc(&aip->ai_list, D_ALLOCINDIR, UFSTOVFS(ip->i_ump));
1965	aip->ai_state = ATTACHED;
1966	aip->ai_offset = ptrno;
1967	aip->ai_newblkno = newblkno;
1968	aip->ai_oldblkno = oldblkno;
1969	aip->ai_freefrag = newfreefrag(ip, oldblkno, ip->i_fs->fs_bsize);
1970	return (aip);
1971}
1972
1973/*
1974 * Called just before setting an indirect block pointer
1975 * to a newly allocated file page.
1976 */
1977void
1978softdep_setup_allocindir_page(ip, lbn, bp, ptrno, newblkno, oldblkno, nbp)
1979	struct inode *ip;	/* inode for file being extended */
1980	ufs_lbn_t lbn;		/* allocated block number within file */
1981	struct buf *bp;		/* buffer with indirect blk referencing page */
1982	int ptrno;		/* offset of pointer in indirect block */
1983	ufs2_daddr_t newblkno;	/* disk block number being added */
1984	ufs2_daddr_t oldblkno;	/* previous block number, 0 if none */
1985	struct buf *nbp;	/* buffer holding allocated page */
1986{
1987	struct allocindir *aip;
1988	struct pagedep *pagedep;
1989
1990	ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_page");
1991	aip = newallocindir(ip, ptrno, newblkno, oldblkno);
1992	ACQUIRE_LOCK(&lk);
1993	/*
1994	 * If we are allocating a directory page, then we must
1995	 * allocate an associated pagedep to track additions and
1996	 * deletions.
1997	 */
1998	if ((ip->i_mode & IFMT) == IFDIR &&
1999	    pagedep_lookup(ip, lbn, DEPALLOC, &pagedep) == 0)
2000		WORKLIST_INSERT(&nbp->b_dep, &pagedep->pd_list);
2001	WORKLIST_INSERT(&nbp->b_dep, &aip->ai_list);
2002	setup_allocindir_phase2(bp, ip, aip);
2003	FREE_LOCK(&lk);
2004}
2005
2006/*
2007 * Called just before setting an indirect block pointer to a
2008 * newly allocated indirect block.
2009 */
2010void
2011softdep_setup_allocindir_meta(nbp, ip, bp, ptrno, newblkno)
2012	struct buf *nbp;	/* newly allocated indirect block */
2013	struct inode *ip;	/* inode for file being extended */
2014	struct buf *bp;		/* indirect block referencing allocated block */
2015	int ptrno;		/* offset of pointer in indirect block */
2016	ufs2_daddr_t newblkno;	/* disk block number being added */
2017{
2018	struct allocindir *aip;
2019
2020	ASSERT_VOP_LOCKED(ITOV(ip), "softdep_setup_allocindir_meta");
2021	aip = newallocindir(ip, ptrno, newblkno, 0);
2022	ACQUIRE_LOCK(&lk);
2023	WORKLIST_INSERT(&nbp->b_dep, &aip->ai_list);
2024	setup_allocindir_phase2(bp, ip, aip);
2025	FREE_LOCK(&lk);
2026}
2027
2028/*
2029 * Called to finish the allocation of the "aip" allocated
2030 * by one of the two routines above.
2031 */
2032static void
2033setup_allocindir_phase2(bp, ip, aip)
2034	struct buf *bp;		/* in-memory copy of the indirect block */
2035	struct inode *ip;	/* inode for file being extended */
2036	struct allocindir *aip;	/* allocindir allocated by the above routines */
2037{
2038	struct worklist *wk;
2039	struct indirdep *indirdep, *newindirdep;
2040	struct bmsafemap *bmsafemap;
2041	struct allocindir *oldaip;
2042	struct freefrag *freefrag;
2043	struct newblk *newblk;
2044	ufs2_daddr_t blkno;
2045
2046	mtx_assert(&lk, MA_OWNED);
2047	if (bp->b_lblkno >= 0)
2048		panic("setup_allocindir_phase2: not indir blk");
2049	for (indirdep = NULL, newindirdep = NULL; ; ) {
2050		LIST_FOREACH(wk, &bp->b_dep, wk_list) {
2051			if (wk->wk_type != D_INDIRDEP)
2052				continue;
2053			indirdep = WK_INDIRDEP(wk);
2054			break;
2055		}
2056		if (indirdep == NULL && newindirdep) {
2057			indirdep = newindirdep;
2058			WORKLIST_INSERT(&bp->b_dep, &indirdep->ir_list);
2059			newindirdep = NULL;
2060		}
2061		if (indirdep) {
2062			if (newblk_lookup(ip->i_fs, aip->ai_newblkno, 0,
2063			    &newblk) == 0)
2064				panic("setup_allocindir: lost block");
2065			if (newblk->nb_state == DEPCOMPLETE) {
2066				aip->ai_state |= DEPCOMPLETE;
2067				aip->ai_buf = NULL;
2068			} else {
2069				bmsafemap = newblk->nb_bmsafemap;
2070				aip->ai_buf = bmsafemap->sm_buf;
2071				LIST_REMOVE(newblk, nb_deps);
2072				LIST_INSERT_HEAD(&bmsafemap->sm_allocindirhd,
2073				    aip, ai_deps);
2074			}
2075			LIST_REMOVE(newblk, nb_hash);
2076			FREE(newblk, M_NEWBLK);
2077			aip->ai_indirdep = indirdep;
2078			/*
2079			 * Check to see if there is an existing dependency
2080			 * for this block. If there is, merge the old
2081			 * dependency into the new one.
2082			 */
2083			if (aip->ai_oldblkno == 0)
2084				oldaip = NULL;
2085			else
2086
2087				LIST_FOREACH(oldaip, &indirdep->ir_deplisthd, ai_next)
2088					if (oldaip->ai_offset == aip->ai_offset)
2089						break;
2090			freefrag = NULL;
2091			if (oldaip != NULL) {
2092				if (oldaip->ai_newblkno != aip->ai_oldblkno)
2093					panic("setup_allocindir_phase2: blkno");
2094				aip->ai_oldblkno = oldaip->ai_oldblkno;
2095				freefrag = aip->ai_freefrag;
2096				aip->ai_freefrag = oldaip->ai_freefrag;
2097				oldaip->ai_freefrag = NULL;
2098				free_allocindir(oldaip, NULL);
2099			}
2100			LIST_INSERT_HEAD(&indirdep->ir_deplisthd, aip, ai_next);
2101			if (ip->i_ump->um_fstype == UFS1)
2102				((ufs1_daddr_t *)indirdep->ir_savebp->b_data)
2103				    [aip->ai_offset] = aip->ai_oldblkno;
2104			else
2105				((ufs2_daddr_t *)indirdep->ir_savebp->b_data)
2106				    [aip->ai_offset] = aip->ai_oldblkno;
2107			FREE_LOCK(&lk);
2108			if (freefrag != NULL)
2109				handle_workitem_freefrag(freefrag);
2110		} else
2111			FREE_LOCK(&lk);
2112		if (newindirdep) {
2113			newindirdep->ir_savebp->b_flags |= B_INVAL | B_NOCACHE;
2114			brelse(newindirdep->ir_savebp);
2115			ACQUIRE_LOCK(&lk);
2116			WORKITEM_FREE((caddr_t)newindirdep, D_INDIRDEP);
2117			if (indirdep)
2118				break;
2119			FREE_LOCK(&lk);
2120		}
2121		if (indirdep) {
2122			ACQUIRE_LOCK(&lk);
2123			break;
2124		}
2125		MALLOC(newindirdep, struct indirdep *, sizeof(struct indirdep),
2126			M_INDIRDEP, M_SOFTDEP_FLAGS);
2127		workitem_alloc(&newindirdep->ir_list, D_INDIRDEP,
2128		    UFSTOVFS(ip->i_ump));
2129		newindirdep->ir_state = ATTACHED;
2130		if (ip->i_ump->um_fstype == UFS1)
2131			newindirdep->ir_state |= UFS1FMT;
2132		LIST_INIT(&newindirdep->ir_deplisthd);
2133		LIST_INIT(&newindirdep->ir_donehd);
2134		if (bp->b_blkno == bp->b_lblkno) {
2135			ufs_bmaparray(bp->b_vp, bp->b_lblkno, &blkno, bp,
2136			    NULL, NULL);
2137			bp->b_blkno = blkno;
2138		}
2139		newindirdep->ir_savebp =
2140		    getblk(ip->i_devvp, bp->b_blkno, bp->b_bcount, 0, 0, 0);
2141		BUF_KERNPROC(newindirdep->ir_savebp);
2142		bcopy(bp->b_data, newindirdep->ir_savebp->b_data, bp->b_bcount);
2143		ACQUIRE_LOCK(&lk);
2144	}
2145}
2146
2147/*
2148 * Block de-allocation dependencies.
2149 *
2150 * When blocks are de-allocated, the on-disk pointers must be nullified before
2151 * the blocks are made available for use by other files.  (The true
2152 * requirement is that old pointers must be nullified before new on-disk
2153 * pointers are set.  We chose this slightly more stringent requirement to
2154 * reduce complexity.) Our implementation handles this dependency by updating
2155 * the inode (or indirect block) appropriately but delaying the actual block
2156 * de-allocation (i.e., freemap and free space count manipulation) until
2157 * after the updated versions reach stable storage.  After the disk is
2158 * updated, the blocks can be safely de-allocated whenever it is convenient.
2159 * This implementation handles only the common case of reducing a file's
2160 * length to zero. Other cases are handled by the conventional synchronous
2161 * write approach.
2162 *
2163 * The ffs implementation with which we worked double-checks
2164 * the state of the block pointers and file size as it reduces
2165 * a file's length.  Some of this code is replicated here in our
2166 * soft updates implementation.  The freeblks->fb_chkcnt field is
2167 * used to transfer a part of this information to the procedure
2168 * that eventually de-allocates the blocks.
2169 *
2170 * This routine should be called from the routine that shortens
2171 * a file's length, before the inode's size or block pointers
2172 * are modified. It will save the block pointer information for
2173 * later release and zero the inode so that the calling routine
2174 * can release it.
2175 */
2176void
2177softdep_setup_freeblocks(ip, length, flags)
2178	struct inode *ip;	/* The inode whose length is to be reduced */
2179	off_t length;		/* The new length for the file */
2180	int flags;		/* IO_EXT and/or IO_NORMAL */
2181{
2182	struct freeblks *freeblks;
2183	struct inodedep *inodedep;
2184	struct allocdirect *adp;
2185	struct vnode *vp;
2186	struct buf *bp;
2187	struct fs *fs;
2188	ufs2_daddr_t extblocks, datablocks;
2189	struct mount *mp;
2190	int i, delay, error;
2191
2192	fs = ip->i_fs;
2193	mp = UFSTOVFS(ip->i_ump);
2194	if (length != 0)
2195		panic("softdep_setup_freeblocks: non-zero length");
2196	MALLOC(freeblks, struct freeblks *, sizeof(struct freeblks),
2197		M_FREEBLKS, M_SOFTDEP_FLAGS|M_ZERO);
2198	workitem_alloc(&freeblks->fb_list, D_FREEBLKS, mp);
2199	freeblks->fb_state = ATTACHED;
2200	freeblks->fb_uid = ip->i_uid;
2201	freeblks->fb_previousinum = ip->i_number;
2202	freeblks->fb_devvp = ip->i_devvp;
2203	extblocks = 0;
2204	if (fs->fs_magic == FS_UFS2_MAGIC)
2205		extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize));
2206	datablocks = DIP(ip, i_blocks) - extblocks;
2207	if ((flags & IO_NORMAL) == 0) {
2208		freeblks->fb_oldsize = 0;
2209		freeblks->fb_chkcnt = 0;
2210	} else {
2211		freeblks->fb_oldsize = ip->i_size;
2212		ip->i_size = 0;
2213		DIP_SET(ip, i_size, 0);
2214		freeblks->fb_chkcnt = datablocks;
2215		for (i = 0; i < NDADDR; i++) {
2216			freeblks->fb_dblks[i] = DIP(ip, i_db[i]);
2217			DIP_SET(ip, i_db[i], 0);
2218		}
2219		for (i = 0; i < NIADDR; i++) {
2220			freeblks->fb_iblks[i] = DIP(ip, i_ib[i]);
2221			DIP_SET(ip, i_ib[i], 0);
2222		}
2223		/*
2224		 * If the file was removed, then the space being freed was
2225		 * accounted for then (see softdep_filereleased()). If the
2226		 * file is merely being truncated, then we account for it now.
2227		 */
2228		if ((ip->i_flag & IN_SPACECOUNTED) == 0) {
2229			UFS_LOCK(ip->i_ump);
2230			fs->fs_pendingblocks += datablocks;
2231			UFS_UNLOCK(ip->i_ump);
2232		}
2233	}
2234	if ((flags & IO_EXT) == 0) {
2235		freeblks->fb_oldextsize = 0;
2236	} else {
2237		freeblks->fb_oldextsize = ip->i_din2->di_extsize;
2238		ip->i_din2->di_extsize = 0;
2239		freeblks->fb_chkcnt += extblocks;
2240		for (i = 0; i < NXADDR; i++) {
2241			freeblks->fb_eblks[i] = ip->i_din2->di_extb[i];
2242			ip->i_din2->di_extb[i] = 0;
2243		}
2244	}
2245	DIP_SET(ip, i_blocks, DIP(ip, i_blocks) - freeblks->fb_chkcnt);
2246	/*
2247	 * Push the zero'ed inode to to its disk buffer so that we are free
2248	 * to delete its dependencies below. Once the dependencies are gone
2249	 * the buffer can be safely released.
2250	 */
2251	if ((error = bread(ip->i_devvp,
2252	    fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
2253	    (int)fs->fs_bsize, NOCRED, &bp)) != 0) {
2254		brelse(bp);
2255		softdep_error("softdep_setup_freeblocks", error);
2256	}
2257	if (ip->i_ump->um_fstype == UFS1)
2258		*((struct ufs1_dinode *)bp->b_data +
2259		    ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1;
2260	else
2261		*((struct ufs2_dinode *)bp->b_data +
2262		    ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2;
2263	/*
2264	 * Find and eliminate any inode dependencies.
2265	 */
2266	ACQUIRE_LOCK(&lk);
2267	(void) inodedep_lookup(mp, ip->i_number, DEPALLOC, &inodedep);
2268	if ((inodedep->id_state & IOSTARTED) != 0)
2269		panic("softdep_setup_freeblocks: inode busy");
2270	/*
2271	 * Add the freeblks structure to the list of operations that
2272	 * must await the zero'ed inode being written to disk. If we
2273	 * still have a bitmap dependency (delay == 0), then the inode
2274	 * has never been written to disk, so we can process the
2275	 * freeblks below once we have deleted the dependencies.
2276	 */
2277	delay = (inodedep->id_state & DEPCOMPLETE);
2278	if (delay)
2279		WORKLIST_INSERT(&inodedep->id_bufwait, &freeblks->fb_list);
2280	/*
2281	 * Because the file length has been truncated to zero, any
2282	 * pending block allocation dependency structures associated
2283	 * with this inode are obsolete and can simply be de-allocated.
2284	 * We must first merge the two dependency lists to get rid of
2285	 * any duplicate freefrag structures, then purge the merged list.
2286	 * If we still have a bitmap dependency, then the inode has never
2287	 * been written to disk, so we can free any fragments without delay.
2288	 */
2289	if (flags & IO_NORMAL) {
2290		merge_inode_lists(&inodedep->id_newinoupdt,
2291		    &inodedep->id_inoupdt);
2292		while ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != 0)
2293			free_allocdirect(&inodedep->id_inoupdt, adp, delay);
2294	}
2295	if (flags & IO_EXT) {
2296		merge_inode_lists(&inodedep->id_newextupdt,
2297		    &inodedep->id_extupdt);
2298		while ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != 0)
2299			free_allocdirect(&inodedep->id_extupdt, adp, delay);
2300	}
2301	FREE_LOCK(&lk);
2302	bdwrite(bp);
2303	/*
2304	 * We must wait for any I/O in progress to finish so that
2305	 * all potential buffers on the dirty list will be visible.
2306	 * Once they are all there, walk the list and get rid of
2307	 * any dependencies.
2308	 */
2309	vp = ITOV(ip);
2310	VI_LOCK(vp);
2311	drain_output(vp);
2312restart:
2313	TAILQ_FOREACH(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs) {
2314		if (((flags & IO_EXT) == 0 && (bp->b_xflags & BX_ALTDATA)) ||
2315		    ((flags & IO_NORMAL) == 0 &&
2316		      (bp->b_xflags & BX_ALTDATA) == 0))
2317			continue;
2318		if ((bp = getdirtybuf(bp, VI_MTX(vp), MNT_WAIT)) == NULL)
2319			goto restart;
2320		VI_UNLOCK(vp);
2321		ACQUIRE_LOCK(&lk);
2322		(void) inodedep_lookup(mp, ip->i_number, 0, &inodedep);
2323		deallocate_dependencies(bp, inodedep);
2324		FREE_LOCK(&lk);
2325		bp->b_flags |= B_INVAL | B_NOCACHE;
2326		brelse(bp);
2327		VI_LOCK(vp);
2328		goto restart;
2329	}
2330	VI_UNLOCK(vp);
2331	ACQUIRE_LOCK(&lk);
2332	if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0)
2333		(void) free_inodedep(inodedep);
2334
2335	if(delay) {
2336		freeblks->fb_state |= DEPCOMPLETE;
2337		/*
2338		 * If the inode with zeroed block pointers is now on disk
2339		 * we can start freeing blocks. Add freeblks to the worklist
2340		 * instead of calling  handle_workitem_freeblocks directly as
2341		 * it is more likely that additional IO is needed to complete
2342		 * the request here than in the !delay case.
2343		 */
2344		if ((freeblks->fb_state & ALLCOMPLETE) == ALLCOMPLETE)
2345			add_to_worklist(&freeblks->fb_list);
2346	}
2347
2348	FREE_LOCK(&lk);
2349	/*
2350	 * If the inode has never been written to disk (delay == 0),
2351	 * then we can process the freeblks now that we have deleted
2352	 * the dependencies.
2353	 */
2354	if (!delay)
2355		handle_workitem_freeblocks(freeblks, 0);
2356}
2357
2358/*
2359 * Reclaim any dependency structures from a buffer that is about to
2360 * be reallocated to a new vnode. The buffer must be locked, thus,
2361 * no I/O completion operations can occur while we are manipulating
2362 * its associated dependencies. The mutex is held so that other I/O's
2363 * associated with related dependencies do not occur.
2364 */
2365static void
2366deallocate_dependencies(bp, inodedep)
2367	struct buf *bp;
2368	struct inodedep *inodedep;
2369{
2370	struct worklist *wk;
2371	struct indirdep *indirdep;
2372	struct allocindir *aip;
2373	struct pagedep *pagedep;
2374	struct dirrem *dirrem;
2375	struct diradd *dap;
2376	int i;
2377
2378	mtx_assert(&lk, MA_OWNED);
2379	while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) {
2380		switch (wk->wk_type) {
2381
2382		case D_INDIRDEP:
2383			indirdep = WK_INDIRDEP(wk);
2384			/*
2385			 * None of the indirect pointers will ever be visible,
2386			 * so they can simply be tossed. GOINGAWAY ensures
2387			 * that allocated pointers will be saved in the buffer
2388			 * cache until they are freed. Note that they will
2389			 * only be able to be found by their physical address
2390			 * since the inode mapping the logical address will
2391			 * be gone. The save buffer used for the safe copy
2392			 * was allocated in setup_allocindir_phase2 using
2393			 * the physical address so it could be used for this
2394			 * purpose. Hence we swap the safe copy with the real
2395			 * copy, allowing the safe copy to be freed and holding
2396			 * on to the real copy for later use in indir_trunc.
2397			 */
2398			if (indirdep->ir_state & GOINGAWAY)
2399				panic("deallocate_dependencies: already gone");
2400			indirdep->ir_state |= GOINGAWAY;
2401			VFSTOUFS(bp->b_vp->v_mount)->um_numindirdeps += 1;
2402			while ((aip = LIST_FIRST(&indirdep->ir_deplisthd)) != 0)
2403				free_allocindir(aip, inodedep);
2404			if (bp->b_lblkno >= 0 ||
2405			    bp->b_blkno != indirdep->ir_savebp->b_lblkno)
2406				panic("deallocate_dependencies: not indir");
2407			bcopy(bp->b_data, indirdep->ir_savebp->b_data,
2408			    bp->b_bcount);
2409			WORKLIST_REMOVE(wk);
2410			WORKLIST_INSERT(&indirdep->ir_savebp->b_dep, wk);
2411			continue;
2412
2413		case D_PAGEDEP:
2414			pagedep = WK_PAGEDEP(wk);
2415			/*
2416			 * None of the directory additions will ever be
2417			 * visible, so they can simply be tossed.
2418			 */
2419			for (i = 0; i < DAHASHSZ; i++)
2420				while ((dap =
2421				    LIST_FIRST(&pagedep->pd_diraddhd[i])))
2422					free_diradd(dap);
2423			while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != 0)
2424				free_diradd(dap);
2425			/*
2426			 * Copy any directory remove dependencies to the list
2427			 * to be processed after the zero'ed inode is written.
2428			 * If the inode has already been written, then they
2429			 * can be dumped directly onto the work list.
2430			 */
2431			LIST_FOREACH(dirrem, &pagedep->pd_dirremhd, dm_next) {
2432				LIST_REMOVE(dirrem, dm_next);
2433				dirrem->dm_dirinum = pagedep->pd_ino;
2434				if (inodedep == NULL ||
2435				    (inodedep->id_state & ALLCOMPLETE) ==
2436				     ALLCOMPLETE)
2437					add_to_worklist(&dirrem->dm_list);
2438				else
2439					WORKLIST_INSERT(&inodedep->id_bufwait,
2440					    &dirrem->dm_list);
2441			}
2442			if ((pagedep->pd_state & NEWBLOCK) != 0) {
2443				LIST_FOREACH(wk, &inodedep->id_bufwait, wk_list)
2444					if (wk->wk_type == D_NEWDIRBLK &&
2445					    WK_NEWDIRBLK(wk)->db_pagedep ==
2446					      pagedep)
2447						break;
2448				if (wk != NULL) {
2449					WORKLIST_REMOVE(wk);
2450					free_newdirblk(WK_NEWDIRBLK(wk));
2451				} else
2452					panic("deallocate_dependencies: "
2453					      "lost pagedep");
2454			}
2455			WORKLIST_REMOVE(&pagedep->pd_list);
2456			LIST_REMOVE(pagedep, pd_hash);
2457			WORKITEM_FREE(pagedep, D_PAGEDEP);
2458			continue;
2459
2460		case D_ALLOCINDIR:
2461			free_allocindir(WK_ALLOCINDIR(wk), inodedep);
2462			continue;
2463
2464		case D_ALLOCDIRECT:
2465		case D_INODEDEP:
2466			panic("deallocate_dependencies: Unexpected type %s",
2467			    TYPENAME(wk->wk_type));
2468			/* NOTREACHED */
2469
2470		default:
2471			panic("deallocate_dependencies: Unknown type %s",
2472			    TYPENAME(wk->wk_type));
2473			/* NOTREACHED */
2474		}
2475	}
2476}
2477
2478/*
2479 * Free an allocdirect. Generate a new freefrag work request if appropriate.
2480 * This routine must be called with splbio interrupts blocked.
2481 */
2482static void
2483free_allocdirect(adphead, adp, delay)
2484	struct allocdirectlst *adphead;
2485	struct allocdirect *adp;
2486	int delay;
2487{
2488	struct newdirblk *newdirblk;
2489	struct worklist *wk;
2490
2491	mtx_assert(&lk, MA_OWNED);
2492	if ((adp->ad_state & DEPCOMPLETE) == 0)
2493		LIST_REMOVE(adp, ad_deps);
2494	TAILQ_REMOVE(adphead, adp, ad_next);
2495	if ((adp->ad_state & COMPLETE) == 0)
2496		WORKLIST_REMOVE(&adp->ad_list);
2497	if (adp->ad_freefrag != NULL) {
2498		if (delay)
2499			WORKLIST_INSERT(&adp->ad_inodedep->id_bufwait,
2500			    &adp->ad_freefrag->ff_list);
2501		else
2502			add_to_worklist(&adp->ad_freefrag->ff_list);
2503	}
2504	if ((wk = LIST_FIRST(&adp->ad_newdirblk)) != NULL) {
2505		newdirblk = WK_NEWDIRBLK(wk);
2506		WORKLIST_REMOVE(&newdirblk->db_list);
2507		if (LIST_FIRST(&adp->ad_newdirblk) != NULL)
2508			panic("free_allocdirect: extra newdirblk");
2509		if (delay)
2510			WORKLIST_INSERT(&adp->ad_inodedep->id_bufwait,
2511			    &newdirblk->db_list);
2512		else
2513			free_newdirblk(newdirblk);
2514	}
2515	WORKITEM_FREE(adp, D_ALLOCDIRECT);
2516}
2517
2518/*
2519 * Free a newdirblk. Clear the NEWBLOCK flag on its associated pagedep.
2520 * This routine must be called with splbio interrupts blocked.
2521 */
2522static void
2523free_newdirblk(newdirblk)
2524	struct newdirblk *newdirblk;
2525{
2526	struct pagedep *pagedep;
2527	struct diradd *dap;
2528	int i;
2529
2530	mtx_assert(&lk, MA_OWNED);
2531	/*
2532	 * If the pagedep is still linked onto the directory buffer
2533	 * dependency chain, then some of the entries on the
2534	 * pd_pendinghd list may not be committed to disk yet. In
2535	 * this case, we will simply clear the NEWBLOCK flag and
2536	 * let the pd_pendinghd list be processed when the pagedep
2537	 * is next written. If the pagedep is no longer on the buffer
2538	 * dependency chain, then all the entries on the pd_pending
2539	 * list are committed to disk and we can free them here.
2540	 */
2541	pagedep = newdirblk->db_pagedep;
2542	pagedep->pd_state &= ~NEWBLOCK;
2543	if ((pagedep->pd_state & ONWORKLIST) == 0)
2544		while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL)
2545			free_diradd(dap);
2546	/*
2547	 * If no dependencies remain, the pagedep will be freed.
2548	 */
2549	for (i = 0; i < DAHASHSZ; i++)
2550		if (LIST_FIRST(&pagedep->pd_diraddhd[i]) != NULL)
2551			break;
2552	if (i == DAHASHSZ && (pagedep->pd_state & ONWORKLIST) == 0) {
2553		LIST_REMOVE(pagedep, pd_hash);
2554		WORKITEM_FREE(pagedep, D_PAGEDEP);
2555	}
2556	WORKITEM_FREE(newdirblk, D_NEWDIRBLK);
2557}
2558
2559/*
2560 * Prepare an inode to be freed. The actual free operation is not
2561 * done until the zero'ed inode has been written to disk.
2562 */
2563void
2564softdep_freefile(pvp, ino, mode)
2565	struct vnode *pvp;
2566	ino_t ino;
2567	int mode;
2568{
2569	struct inode *ip = VTOI(pvp);
2570	struct inodedep *inodedep;
2571	struct freefile *freefile;
2572
2573	/*
2574	 * This sets up the inode de-allocation dependency.
2575	 */
2576	MALLOC(freefile, struct freefile *, sizeof(struct freefile),
2577		M_FREEFILE, M_SOFTDEP_FLAGS);
2578	workitem_alloc(&freefile->fx_list, D_FREEFILE, pvp->v_mount);
2579	freefile->fx_mode = mode;
2580	freefile->fx_oldinum = ino;
2581	freefile->fx_devvp = ip->i_devvp;
2582	if ((ip->i_flag & IN_SPACECOUNTED) == 0) {
2583		UFS_LOCK(ip->i_ump);
2584		ip->i_fs->fs_pendinginodes += 1;
2585		UFS_UNLOCK(ip->i_ump);
2586	}
2587
2588	/*
2589	 * If the inodedep does not exist, then the zero'ed inode has
2590	 * been written to disk. If the allocated inode has never been
2591	 * written to disk, then the on-disk inode is zero'ed. In either
2592	 * case we can free the file immediately.
2593	 */
2594	ACQUIRE_LOCK(&lk);
2595	if (inodedep_lookup(pvp->v_mount, ino, 0, &inodedep) == 0 ||
2596	    check_inode_unwritten(inodedep)) {
2597		FREE_LOCK(&lk);
2598		handle_workitem_freefile(freefile);
2599		return;
2600	}
2601	WORKLIST_INSERT(&inodedep->id_inowait, &freefile->fx_list);
2602	FREE_LOCK(&lk);
2603}
2604
2605/*
2606 * Check to see if an inode has never been written to disk. If
2607 * so free the inodedep and return success, otherwise return failure.
2608 * This routine must be called with splbio interrupts blocked.
2609 *
2610 * If we still have a bitmap dependency, then the inode has never
2611 * been written to disk. Drop the dependency as it is no longer
2612 * necessary since the inode is being deallocated. We set the
2613 * ALLCOMPLETE flags since the bitmap now properly shows that the
2614 * inode is not allocated. Even if the inode is actively being
2615 * written, it has been rolled back to its zero'ed state, so we
2616 * are ensured that a zero inode is what is on the disk. For short
2617 * lived files, this change will usually result in removing all the
2618 * dependencies from the inode so that it can be freed immediately.
2619 */
2620static int
2621check_inode_unwritten(inodedep)
2622	struct inodedep *inodedep;
2623{
2624
2625	mtx_assert(&lk, MA_OWNED);
2626	if ((inodedep->id_state & DEPCOMPLETE) != 0 ||
2627	    LIST_FIRST(&inodedep->id_pendinghd) != NULL ||
2628	    LIST_FIRST(&inodedep->id_bufwait) != NULL ||
2629	    LIST_FIRST(&inodedep->id_inowait) != NULL ||
2630	    TAILQ_FIRST(&inodedep->id_inoupdt) != NULL ||
2631	    TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL ||
2632	    TAILQ_FIRST(&inodedep->id_extupdt) != NULL ||
2633	    TAILQ_FIRST(&inodedep->id_newextupdt) != NULL ||
2634	    inodedep->id_nlinkdelta != 0)
2635		return (0);
2636
2637	/*
2638	 * Another process might be in initiate_write_inodeblock_ufs[12]
2639	 * trying to allocate memory without holding "Softdep Lock".
2640	 */
2641	if ((inodedep->id_state & IOSTARTED) != 0 &&
2642	    inodedep->id_savedino1 == NULL)
2643		return (0);
2644
2645	inodedep->id_state |= ALLCOMPLETE;
2646	LIST_REMOVE(inodedep, id_deps);
2647	inodedep->id_buf = NULL;
2648	if (inodedep->id_state & ONWORKLIST)
2649		WORKLIST_REMOVE(&inodedep->id_list);
2650	if (inodedep->id_savedino1 != NULL) {
2651		FREE(inodedep->id_savedino1, M_SAVEDINO);
2652		inodedep->id_savedino1 = NULL;
2653	}
2654	if (free_inodedep(inodedep) == 0)
2655		panic("check_inode_unwritten: busy inode");
2656	return (1);
2657}
2658
2659/*
2660 * Try to free an inodedep structure. Return 1 if it could be freed.
2661 */
2662static int
2663free_inodedep(inodedep)
2664	struct inodedep *inodedep;
2665{
2666
2667	mtx_assert(&lk, MA_OWNED);
2668	if ((inodedep->id_state & ONWORKLIST) != 0 ||
2669	    (inodedep->id_state & ALLCOMPLETE) != ALLCOMPLETE ||
2670	    LIST_FIRST(&inodedep->id_pendinghd) != NULL ||
2671	    LIST_FIRST(&inodedep->id_bufwait) != NULL ||
2672	    LIST_FIRST(&inodedep->id_inowait) != NULL ||
2673	    TAILQ_FIRST(&inodedep->id_inoupdt) != NULL ||
2674	    TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL ||
2675	    TAILQ_FIRST(&inodedep->id_extupdt) != NULL ||
2676	    TAILQ_FIRST(&inodedep->id_newextupdt) != NULL ||
2677	    inodedep->id_nlinkdelta != 0 || inodedep->id_savedino1 != NULL)
2678		return (0);
2679	LIST_REMOVE(inodedep, id_hash);
2680	WORKITEM_FREE(inodedep, D_INODEDEP);
2681	num_inodedep -= 1;
2682	return (1);
2683}
2684
2685/*
2686 * This workitem routine performs the block de-allocation.
2687 * The workitem is added to the pending list after the updated
2688 * inode block has been written to disk.  As mentioned above,
2689 * checks regarding the number of blocks de-allocated (compared
2690 * to the number of blocks allocated for the file) are also
2691 * performed in this function.
2692 */
2693static void
2694handle_workitem_freeblocks(freeblks, flags)
2695	struct freeblks *freeblks;
2696	int flags;
2697{
2698	struct inode *ip;
2699	struct vnode *vp;
2700	struct fs *fs;
2701	struct ufsmount *ump;
2702	int i, nblocks, level, bsize;
2703	ufs2_daddr_t bn, blocksreleased = 0;
2704	int error, allerror = 0;
2705	ufs_lbn_t baselbns[NIADDR], tmpval;
2706	int fs_pendingblocks;
2707
2708	ump = VFSTOUFS(freeblks->fb_list.wk_mp);
2709	fs = ump->um_fs;
2710	fs_pendingblocks = 0;
2711	tmpval = 1;
2712	baselbns[0] = NDADDR;
2713	for (i = 1; i < NIADDR; i++) {
2714		tmpval *= NINDIR(fs);
2715		baselbns[i] = baselbns[i - 1] + tmpval;
2716	}
2717	nblocks = btodb(fs->fs_bsize);
2718	blocksreleased = 0;
2719	/*
2720	 * Release all extended attribute blocks or frags.
2721	 */
2722	if (freeblks->fb_oldextsize > 0) {
2723		for (i = (NXADDR - 1); i >= 0; i--) {
2724			if ((bn = freeblks->fb_eblks[i]) == 0)
2725				continue;
2726			bsize = sblksize(fs, freeblks->fb_oldextsize, i);
2727			ffs_blkfree(ump, fs, freeblks->fb_devvp, bn, bsize,
2728			    freeblks->fb_previousinum);
2729			blocksreleased += btodb(bsize);
2730		}
2731	}
2732	/*
2733	 * Release all data blocks or frags.
2734	 */
2735	if (freeblks->fb_oldsize > 0) {
2736		/*
2737		 * Indirect blocks first.
2738		 */
2739		for (level = (NIADDR - 1); level >= 0; level--) {
2740			if ((bn = freeblks->fb_iblks[level]) == 0)
2741				continue;
2742			if ((error = indir_trunc(freeblks, fsbtodb(fs, bn),
2743			    level, baselbns[level], &blocksreleased)) == 0)
2744				allerror = error;
2745			ffs_blkfree(ump, fs, freeblks->fb_devvp, bn,
2746			    fs->fs_bsize, freeblks->fb_previousinum);
2747			fs_pendingblocks += nblocks;
2748			blocksreleased += nblocks;
2749		}
2750		/*
2751		 * All direct blocks or frags.
2752		 */
2753		for (i = (NDADDR - 1); i >= 0; i--) {
2754			if ((bn = freeblks->fb_dblks[i]) == 0)
2755				continue;
2756			bsize = sblksize(fs, freeblks->fb_oldsize, i);
2757			ffs_blkfree(ump, fs, freeblks->fb_devvp, bn, bsize,
2758			    freeblks->fb_previousinum);
2759			fs_pendingblocks += btodb(bsize);
2760			blocksreleased += btodb(bsize);
2761		}
2762	}
2763	UFS_LOCK(ump);
2764	fs->fs_pendingblocks -= fs_pendingblocks;
2765	UFS_UNLOCK(ump);
2766	/*
2767	 * If we still have not finished background cleanup, then check
2768	 * to see if the block count needs to be adjusted.
2769	 */
2770	if (freeblks->fb_chkcnt != blocksreleased &&
2771	    (fs->fs_flags & FS_UNCLEAN) != 0 &&
2772	    ffs_vget(freeblks->fb_list.wk_mp, freeblks->fb_previousinum,
2773	    (flags & LK_NOWAIT) | LK_EXCLUSIVE, &vp) == 0) {
2774		ip = VTOI(vp);
2775		DIP_SET(ip, i_blocks, DIP(ip, i_blocks) + \
2776		    freeblks->fb_chkcnt - blocksreleased);
2777		ip->i_flag |= IN_CHANGE;
2778		vput(vp);
2779	}
2780
2781#ifdef DIAGNOSTIC
2782	if (freeblks->fb_chkcnt != blocksreleased &&
2783	    ((fs->fs_flags & FS_UNCLEAN) == 0 || (flags & LK_NOWAIT) != 0))
2784		printf("handle_workitem_freeblocks: block count\n");
2785	if (allerror)
2786		softdep_error("handle_workitem_freeblks", allerror);
2787#endif /* DIAGNOSTIC */
2788
2789	ACQUIRE_LOCK(&lk);
2790	WORKITEM_FREE(freeblks, D_FREEBLKS);
2791	FREE_LOCK(&lk);
2792}
2793
2794/*
2795 * Release blocks associated with the inode ip and stored in the indirect
2796 * block dbn. If level is greater than SINGLE, the block is an indirect block
2797 * and recursive calls to indirtrunc must be used to cleanse other indirect
2798 * blocks.
2799 */
2800static int
2801indir_trunc(freeblks, dbn, level, lbn, countp)
2802	struct freeblks *freeblks;
2803	ufs2_daddr_t dbn;
2804	int level;
2805	ufs_lbn_t lbn;
2806	ufs2_daddr_t *countp;
2807{
2808	struct buf *bp;
2809	struct fs *fs;
2810	struct worklist *wk;
2811	struct indirdep *indirdep;
2812	struct ufsmount *ump;
2813	ufs1_daddr_t *bap1 = 0;
2814	ufs2_daddr_t nb, *bap2 = 0;
2815	ufs_lbn_t lbnadd;
2816	int i, nblocks, ufs1fmt;
2817	int error, allerror = 0;
2818	int fs_pendingblocks;
2819
2820	ump = VFSTOUFS(freeblks->fb_list.wk_mp);
2821	fs = ump->um_fs;
2822	fs_pendingblocks = 0;
2823	lbnadd = 1;
2824	for (i = level; i > 0; i--)
2825		lbnadd *= NINDIR(fs);
2826	/*
2827	 * Get buffer of block pointers to be freed. This routine is not
2828	 * called until the zero'ed inode has been written, so it is safe
2829	 * to free blocks as they are encountered. Because the inode has
2830	 * been zero'ed, calls to bmap on these blocks will fail. So, we
2831	 * have to use the on-disk address and the block device for the
2832	 * filesystem to look them up. If the file was deleted before its
2833	 * indirect blocks were all written to disk, the routine that set
2834	 * us up (deallocate_dependencies) will have arranged to leave
2835	 * a complete copy of the indirect block in memory for our use.
2836	 * Otherwise we have to read the blocks in from the disk.
2837	 */
2838#ifdef notyet
2839	bp = getblk(freeblks->fb_devvp, dbn, (int)fs->fs_bsize, 0, 0,
2840	    GB_NOCREAT);
2841#else
2842	bp = incore(&freeblks->fb_devvp->v_bufobj, dbn);
2843#endif
2844	ACQUIRE_LOCK(&lk);
2845	if (bp != NULL && (wk = LIST_FIRST(&bp->b_dep)) != NULL) {
2846		if (wk->wk_type != D_INDIRDEP ||
2847		    (indirdep = WK_INDIRDEP(wk))->ir_savebp != bp ||
2848		    (indirdep->ir_state & GOINGAWAY) == 0)
2849			panic("indir_trunc: lost indirdep");
2850		WORKLIST_REMOVE(wk);
2851		WORKITEM_FREE(indirdep, D_INDIRDEP);
2852		if (LIST_FIRST(&bp->b_dep) != NULL)
2853			panic("indir_trunc: dangling dep");
2854		ump->um_numindirdeps -= 1;
2855		FREE_LOCK(&lk);
2856	} else {
2857#ifdef notyet
2858		if (bp)
2859			brelse(bp);
2860#endif
2861		FREE_LOCK(&lk);
2862		error = bread(freeblks->fb_devvp, dbn, (int)fs->fs_bsize,
2863		    NOCRED, &bp);
2864		if (error) {
2865			brelse(bp);
2866			return (error);
2867		}
2868	}
2869	/*
2870	 * Recursively free indirect blocks.
2871	 */
2872	if (ump->um_fstype == UFS1) {
2873		ufs1fmt = 1;
2874		bap1 = (ufs1_daddr_t *)bp->b_data;
2875	} else {
2876		ufs1fmt = 0;
2877		bap2 = (ufs2_daddr_t *)bp->b_data;
2878	}
2879	nblocks = btodb(fs->fs_bsize);
2880	for (i = NINDIR(fs) - 1; i >= 0; i--) {
2881		if (ufs1fmt)
2882			nb = bap1[i];
2883		else
2884			nb = bap2[i];
2885		if (nb == 0)
2886			continue;
2887		if (level != 0) {
2888			if ((error = indir_trunc(freeblks, fsbtodb(fs, nb),
2889			     level - 1, lbn + (i * lbnadd), countp)) != 0)
2890				allerror = error;
2891		}
2892		ffs_blkfree(ump, fs, freeblks->fb_devvp, nb, fs->fs_bsize,
2893		    freeblks->fb_previousinum);
2894		fs_pendingblocks += nblocks;
2895		*countp += nblocks;
2896	}
2897	UFS_LOCK(ump);
2898	fs->fs_pendingblocks -= fs_pendingblocks;
2899	UFS_UNLOCK(ump);
2900	bp->b_flags |= B_INVAL | B_NOCACHE;
2901	brelse(bp);
2902	return (allerror);
2903}
2904
2905/*
2906 * Free an allocindir.
2907 * This routine must be called with splbio interrupts blocked.
2908 */
2909static void
2910free_allocindir(aip, inodedep)
2911	struct allocindir *aip;
2912	struct inodedep *inodedep;
2913{
2914	struct freefrag *freefrag;
2915
2916	mtx_assert(&lk, MA_OWNED);
2917	if ((aip->ai_state & DEPCOMPLETE) == 0)
2918		LIST_REMOVE(aip, ai_deps);
2919	if (aip->ai_state & ONWORKLIST)
2920		WORKLIST_REMOVE(&aip->ai_list);
2921	LIST_REMOVE(aip, ai_next);
2922	if ((freefrag = aip->ai_freefrag) != NULL) {
2923		if (inodedep == NULL)
2924			add_to_worklist(&freefrag->ff_list);
2925		else
2926			WORKLIST_INSERT(&inodedep->id_bufwait,
2927			    &freefrag->ff_list);
2928	}
2929	WORKITEM_FREE(aip, D_ALLOCINDIR);
2930}
2931
2932/*
2933 * Directory entry addition dependencies.
2934 *
2935 * When adding a new directory entry, the inode (with its incremented link
2936 * count) must be written to disk before the directory entry's pointer to it.
2937 * Also, if the inode is newly allocated, the corresponding freemap must be
2938 * updated (on disk) before the directory entry's pointer. These requirements
2939 * are met via undo/redo on the directory entry's pointer, which consists
2940 * simply of the inode number.
2941 *
2942 * As directory entries are added and deleted, the free space within a
2943 * directory block can become fragmented.  The ufs filesystem will compact
2944 * a fragmented directory block to make space for a new entry. When this
2945 * occurs, the offsets of previously added entries change. Any "diradd"
2946 * dependency structures corresponding to these entries must be updated with
2947 * the new offsets.
2948 */
2949
2950/*
2951 * This routine is called after the in-memory inode's link
2952 * count has been incremented, but before the directory entry's
2953 * pointer to the inode has been set.
2954 */
2955int
2956softdep_setup_directory_add(bp, dp, diroffset, newinum, newdirbp, isnewblk)
2957	struct buf *bp;		/* buffer containing directory block */
2958	struct inode *dp;	/* inode for directory */
2959	off_t diroffset;	/* offset of new entry in directory */
2960	ino_t newinum;		/* inode referenced by new directory entry */
2961	struct buf *newdirbp;	/* non-NULL => contents of new mkdir */
2962	int isnewblk;		/* entry is in a newly allocated block */
2963{
2964	int offset;		/* offset of new entry within directory block */
2965	ufs_lbn_t lbn;		/* block in directory containing new entry */
2966	struct fs *fs;
2967	struct diradd *dap;
2968	struct allocdirect *adp;
2969	struct pagedep *pagedep;
2970	struct inodedep *inodedep;
2971	struct newdirblk *newdirblk = 0;
2972	struct mkdir *mkdir1, *mkdir2;
2973	struct mount *mp;
2974
2975	/*
2976	 * Whiteouts have no dependencies.
2977	 */
2978	if (newinum == WINO) {
2979		if (newdirbp != NULL)
2980			bdwrite(newdirbp);
2981		return (0);
2982	}
2983	mp = UFSTOVFS(dp->i_ump);
2984	fs = dp->i_fs;
2985	lbn = lblkno(fs, diroffset);
2986	offset = blkoff(fs, diroffset);
2987	MALLOC(dap, struct diradd *, sizeof(struct diradd), M_DIRADD,
2988		M_SOFTDEP_FLAGS|M_ZERO);
2989	workitem_alloc(&dap->da_list, D_DIRADD, mp);
2990	dap->da_offset = offset;
2991	dap->da_newinum = newinum;
2992	dap->da_state = ATTACHED;
2993	if (isnewblk && lbn < NDADDR && fragoff(fs, diroffset) == 0) {
2994		MALLOC(newdirblk, struct newdirblk *, sizeof(struct newdirblk),
2995		    M_NEWDIRBLK, M_SOFTDEP_FLAGS);
2996		workitem_alloc(&newdirblk->db_list, D_NEWDIRBLK, mp);
2997	}
2998	if (newdirbp == NULL) {
2999		dap->da_state |= DEPCOMPLETE;
3000		ACQUIRE_LOCK(&lk);
3001	} else {
3002		dap->da_state |= MKDIR_BODY | MKDIR_PARENT;
3003		MALLOC(mkdir1, struct mkdir *, sizeof(struct mkdir), M_MKDIR,
3004		    M_SOFTDEP_FLAGS);
3005		workitem_alloc(&mkdir1->md_list, D_MKDIR, mp);
3006		mkdir1->md_state = MKDIR_BODY;
3007		mkdir1->md_diradd = dap;
3008		MALLOC(mkdir2, struct mkdir *, sizeof(struct mkdir), M_MKDIR,
3009		    M_SOFTDEP_FLAGS);
3010		workitem_alloc(&mkdir2->md_list, D_MKDIR, mp);
3011		mkdir2->md_state = MKDIR_PARENT;
3012		mkdir2->md_diradd = dap;
3013		/*
3014		 * Dependency on "." and ".." being written to disk.
3015		 */
3016		mkdir1->md_buf = newdirbp;
3017		ACQUIRE_LOCK(&lk);
3018		LIST_INSERT_HEAD(&mkdirlisthd, mkdir1, md_mkdirs);
3019		WORKLIST_INSERT(&newdirbp->b_dep, &mkdir1->md_list);
3020		FREE_LOCK(&lk);
3021		bdwrite(newdirbp);
3022		/*
3023		 * Dependency on link count increase for parent directory
3024		 */
3025		ACQUIRE_LOCK(&lk);
3026		if (inodedep_lookup(mp, dp->i_number, 0, &inodedep) == 0
3027		    || (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) {
3028			dap->da_state &= ~MKDIR_PARENT;
3029			WORKITEM_FREE(mkdir2, D_MKDIR);
3030		} else {
3031			LIST_INSERT_HEAD(&mkdirlisthd, mkdir2, md_mkdirs);
3032			WORKLIST_INSERT(&inodedep->id_bufwait,&mkdir2->md_list);
3033		}
3034	}
3035	/*
3036	 * Link into parent directory pagedep to await its being written.
3037	 */
3038	if (pagedep_lookup(dp, lbn, DEPALLOC, &pagedep) == 0)
3039		WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list);
3040	dap->da_pagedep = pagedep;
3041	LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)], dap,
3042	    da_pdlist);
3043	/*
3044	 * Link into its inodedep. Put it on the id_bufwait list if the inode
3045	 * is not yet written. If it is written, do the post-inode write
3046	 * processing to put it on the id_pendinghd list.
3047	 */
3048	(void) inodedep_lookup(mp, newinum, DEPALLOC, &inodedep);
3049	if ((inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE)
3050		diradd_inode_written(dap, inodedep);
3051	else
3052		WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list);
3053	if (isnewblk) {
3054		/*
3055		 * Directories growing into indirect blocks are rare
3056		 * enough and the frequency of new block allocation
3057		 * in those cases even more rare, that we choose not
3058		 * to bother tracking them. Rather we simply force the
3059		 * new directory entry to disk.
3060		 */
3061		if (lbn >= NDADDR) {
3062			FREE_LOCK(&lk);
3063			/*
3064			 * We only have a new allocation when at the
3065			 * beginning of a new block, not when we are
3066			 * expanding into an existing block.
3067			 */
3068			if (blkoff(fs, diroffset) == 0)
3069				return (1);
3070			return (0);
3071		}
3072		/*
3073		 * We only have a new allocation when at the beginning
3074		 * of a new fragment, not when we are expanding into an
3075		 * existing fragment. Also, there is nothing to do if we
3076		 * are already tracking this block.
3077		 */
3078		if (fragoff(fs, diroffset) != 0) {
3079			FREE_LOCK(&lk);
3080			return (0);
3081		}
3082		if ((pagedep->pd_state & NEWBLOCK) != 0) {
3083			WORKITEM_FREE(newdirblk, D_NEWDIRBLK);
3084			FREE_LOCK(&lk);
3085			return (0);
3086		}
3087		/*
3088		 * Find our associated allocdirect and have it track us.
3089		 */
3090		if (inodedep_lookup(mp, dp->i_number, 0, &inodedep) == 0)
3091			panic("softdep_setup_directory_add: lost inodedep");
3092		adp = TAILQ_LAST(&inodedep->id_newinoupdt, allocdirectlst);
3093		if (adp == NULL || adp->ad_lbn != lbn)
3094			panic("softdep_setup_directory_add: lost entry");
3095		pagedep->pd_state |= NEWBLOCK;
3096		newdirblk->db_pagedep = pagedep;
3097		WORKLIST_INSERT(&adp->ad_newdirblk, &newdirblk->db_list);
3098	}
3099	FREE_LOCK(&lk);
3100	return (0);
3101}
3102
3103/*
3104 * This procedure is called to change the offset of a directory
3105 * entry when compacting a directory block which must be owned
3106 * exclusively by the caller. Note that the actual entry movement
3107 * must be done in this procedure to ensure that no I/O completions
3108 * occur while the move is in progress.
3109 */
3110void
3111softdep_change_directoryentry_offset(dp, base, oldloc, newloc, entrysize)
3112	struct inode *dp;	/* inode for directory */
3113	caddr_t base;		/* address of dp->i_offset */
3114	caddr_t oldloc;		/* address of old directory location */
3115	caddr_t newloc;		/* address of new directory location */
3116	int entrysize;		/* size of directory entry */
3117{
3118	int offset, oldoffset, newoffset;
3119	struct pagedep *pagedep;
3120	struct diradd *dap;
3121	ufs_lbn_t lbn;
3122
3123	ACQUIRE_LOCK(&lk);
3124	lbn = lblkno(dp->i_fs, dp->i_offset);
3125	offset = blkoff(dp->i_fs, dp->i_offset);
3126	if (pagedep_lookup(dp, lbn, 0, &pagedep) == 0)
3127		goto done;
3128	oldoffset = offset + (oldloc - base);
3129	newoffset = offset + (newloc - base);
3130
3131	LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(oldoffset)], da_pdlist) {
3132		if (dap->da_offset != oldoffset)
3133			continue;
3134		dap->da_offset = newoffset;
3135		if (DIRADDHASH(newoffset) == DIRADDHASH(oldoffset))
3136			break;
3137		LIST_REMOVE(dap, da_pdlist);
3138		LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(newoffset)],
3139		    dap, da_pdlist);
3140		break;
3141	}
3142	if (dap == NULL) {
3143
3144		LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist) {
3145			if (dap->da_offset == oldoffset) {
3146				dap->da_offset = newoffset;
3147				break;
3148			}
3149		}
3150	}
3151done:
3152	bcopy(oldloc, newloc, entrysize);
3153	FREE_LOCK(&lk);
3154}
3155
3156/*
3157 * Free a diradd dependency structure. This routine must be called
3158 * with splbio interrupts blocked.
3159 */
3160static void
3161free_diradd(dap)
3162	struct diradd *dap;
3163{
3164	struct dirrem *dirrem;
3165	struct pagedep *pagedep;
3166	struct inodedep *inodedep;
3167	struct mkdir *mkdir, *nextmd;
3168
3169	mtx_assert(&lk, MA_OWNED);
3170	WORKLIST_REMOVE(&dap->da_list);
3171	LIST_REMOVE(dap, da_pdlist);
3172	if ((dap->da_state & DIRCHG) == 0) {
3173		pagedep = dap->da_pagedep;
3174	} else {
3175		dirrem = dap->da_previous;
3176		pagedep = dirrem->dm_pagedep;
3177		dirrem->dm_dirinum = pagedep->pd_ino;
3178		add_to_worklist(&dirrem->dm_list);
3179	}
3180	if (inodedep_lookup(pagedep->pd_list.wk_mp, dap->da_newinum,
3181	    0, &inodedep) != 0)
3182		(void) free_inodedep(inodedep);
3183	if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0) {
3184		for (mkdir = LIST_FIRST(&mkdirlisthd); mkdir; mkdir = nextmd) {
3185			nextmd = LIST_NEXT(mkdir, md_mkdirs);
3186			if (mkdir->md_diradd != dap)
3187				continue;
3188			dap->da_state &= ~mkdir->md_state;
3189			WORKLIST_REMOVE(&mkdir->md_list);
3190			LIST_REMOVE(mkdir, md_mkdirs);
3191			WORKITEM_FREE(mkdir, D_MKDIR);
3192		}
3193		if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) != 0)
3194			panic("free_diradd: unfound ref");
3195	}
3196	WORKITEM_FREE(dap, D_DIRADD);
3197}
3198
3199/*
3200 * Directory entry removal dependencies.
3201 *
3202 * When removing a directory entry, the entry's inode pointer must be
3203 * zero'ed on disk before the corresponding inode's link count is decremented
3204 * (possibly freeing the inode for re-use). This dependency is handled by
3205 * updating the directory entry but delaying the inode count reduction until
3206 * after the directory block has been written to disk. After this point, the
3207 * inode count can be decremented whenever it is convenient.
3208 */
3209
3210/*
3211 * This routine should be called immediately after removing
3212 * a directory entry.  The inode's link count should not be
3213 * decremented by the calling procedure -- the soft updates
3214 * code will do this task when it is safe.
3215 */
3216void
3217softdep_setup_remove(bp, dp, ip, isrmdir)
3218	struct buf *bp;		/* buffer containing directory block */
3219	struct inode *dp;	/* inode for the directory being modified */
3220	struct inode *ip;	/* inode for directory entry being removed */
3221	int isrmdir;		/* indicates if doing RMDIR */
3222{
3223	struct dirrem *dirrem, *prevdirrem;
3224
3225	/*
3226	 * Allocate a new dirrem if appropriate and ACQUIRE_LOCK.
3227	 */
3228	dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem);
3229
3230	/*
3231	 * If the COMPLETE flag is clear, then there were no active
3232	 * entries and we want to roll back to a zeroed entry until
3233	 * the new inode is committed to disk. If the COMPLETE flag is
3234	 * set then we have deleted an entry that never made it to
3235	 * disk. If the entry we deleted resulted from a name change,
3236	 * then the old name still resides on disk. We cannot delete
3237	 * its inode (returned to us in prevdirrem) until the zeroed
3238	 * directory entry gets to disk. The new inode has never been
3239	 * referenced on the disk, so can be deleted immediately.
3240	 */
3241	if ((dirrem->dm_state & COMPLETE) == 0) {
3242		LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd, dirrem,
3243		    dm_next);
3244		FREE_LOCK(&lk);
3245	} else {
3246		if (prevdirrem != NULL)
3247			LIST_INSERT_HEAD(&dirrem->dm_pagedep->pd_dirremhd,
3248			    prevdirrem, dm_next);
3249		dirrem->dm_dirinum = dirrem->dm_pagedep->pd_ino;
3250		FREE_LOCK(&lk);
3251		handle_workitem_remove(dirrem, NULL);
3252	}
3253}
3254
3255/*
3256 * Allocate a new dirrem if appropriate and return it along with
3257 * its associated pagedep. Called without a lock, returns with lock.
3258 */
3259static long num_dirrem;		/* number of dirrem allocated */
3260static struct dirrem *
3261newdirrem(bp, dp, ip, isrmdir, prevdirremp)
3262	struct buf *bp;		/* buffer containing directory block */
3263	struct inode *dp;	/* inode for the directory being modified */
3264	struct inode *ip;	/* inode for directory entry being removed */
3265	int isrmdir;		/* indicates if doing RMDIR */
3266	struct dirrem **prevdirremp; /* previously referenced inode, if any */
3267{
3268	int offset;
3269	ufs_lbn_t lbn;
3270	struct diradd *dap;
3271	struct dirrem *dirrem;
3272	struct pagedep *pagedep;
3273
3274	/*
3275	 * Whiteouts have no deletion dependencies.
3276	 */
3277	if (ip == NULL)
3278		panic("newdirrem: whiteout");
3279	/*
3280	 * If we are over our limit, try to improve the situation.
3281	 * Limiting the number of dirrem structures will also limit
3282	 * the number of freefile and freeblks structures.
3283	 */
3284	ACQUIRE_LOCK(&lk);
3285	if (num_dirrem > max_softdeps / 2)
3286		(void) request_cleanup(ITOV(dp)->v_mount, FLUSH_REMOVE);
3287	num_dirrem += 1;
3288	FREE_LOCK(&lk);
3289	MALLOC(dirrem, struct dirrem *, sizeof(struct dirrem),
3290		M_DIRREM, M_SOFTDEP_FLAGS|M_ZERO);
3291	workitem_alloc(&dirrem->dm_list, D_DIRREM, ITOV(dp)->v_mount);
3292	dirrem->dm_state = isrmdir ? RMDIR : 0;
3293	dirrem->dm_oldinum = ip->i_number;
3294	*prevdirremp = NULL;
3295
3296	ACQUIRE_LOCK(&lk);
3297	lbn = lblkno(dp->i_fs, dp->i_offset);
3298	offset = blkoff(dp->i_fs, dp->i_offset);
3299	if (pagedep_lookup(dp, lbn, DEPALLOC, &pagedep) == 0)
3300		WORKLIST_INSERT(&bp->b_dep, &pagedep->pd_list);
3301	dirrem->dm_pagedep = pagedep;
3302	/*
3303	 * Check for a diradd dependency for the same directory entry.
3304	 * If present, then both dependencies become obsolete and can
3305	 * be de-allocated. Check for an entry on both the pd_dirraddhd
3306	 * list and the pd_pendinghd list.
3307	 */
3308
3309	LIST_FOREACH(dap, &pagedep->pd_diraddhd[DIRADDHASH(offset)], da_pdlist)
3310		if (dap->da_offset == offset)
3311			break;
3312	if (dap == NULL) {
3313
3314		LIST_FOREACH(dap, &pagedep->pd_pendinghd, da_pdlist)
3315			if (dap->da_offset == offset)
3316				break;
3317		if (dap == NULL)
3318			return (dirrem);
3319	}
3320	/*
3321	 * Must be ATTACHED at this point.
3322	 */
3323	if ((dap->da_state & ATTACHED) == 0)
3324		panic("newdirrem: not ATTACHED");
3325	if (dap->da_newinum != ip->i_number)
3326		panic("newdirrem: inum %d should be %d",
3327		    ip->i_number, dap->da_newinum);
3328	/*
3329	 * If we are deleting a changed name that never made it to disk,
3330	 * then return the dirrem describing the previous inode (which
3331	 * represents the inode currently referenced from this entry on disk).
3332	 */
3333	if ((dap->da_state & DIRCHG) != 0) {
3334		*prevdirremp = dap->da_previous;
3335		dap->da_state &= ~DIRCHG;
3336		dap->da_pagedep = pagedep;
3337	}
3338	/*
3339	 * We are deleting an entry that never made it to disk.
3340	 * Mark it COMPLETE so we can delete its inode immediately.
3341	 */
3342	dirrem->dm_state |= COMPLETE;
3343	free_diradd(dap);
3344	return (dirrem);
3345}
3346
3347/*
3348 * Directory entry change dependencies.
3349 *
3350 * Changing an existing directory entry requires that an add operation
3351 * be completed first followed by a deletion. The semantics for the addition
3352 * are identical to the description of adding a new entry above except
3353 * that the rollback is to the old inode number rather than zero. Once
3354 * the addition dependency is completed, the removal is done as described
3355 * in the removal routine above.
3356 */
3357
3358/*
3359 * This routine should be called immediately after changing
3360 * a directory entry.  The inode's link count should not be
3361 * decremented by the calling procedure -- the soft updates
3362 * code will perform this task when it is safe.
3363 */
3364void
3365softdep_setup_directory_change(bp, dp, ip, newinum, isrmdir)
3366	struct buf *bp;		/* buffer containing directory block */
3367	struct inode *dp;	/* inode for the directory being modified */
3368	struct inode *ip;	/* inode for directory entry being removed */
3369	ino_t newinum;		/* new inode number for changed entry */
3370	int isrmdir;		/* indicates if doing RMDIR */
3371{
3372	int offset;
3373	struct diradd *dap = NULL;
3374	struct dirrem *dirrem, *prevdirrem;
3375	struct pagedep *pagedep;
3376	struct inodedep *inodedep;
3377	struct mount *mp;
3378
3379	offset = blkoff(dp->i_fs, dp->i_offset);
3380	mp = UFSTOVFS(dp->i_ump);
3381
3382	/*
3383	 * Whiteouts do not need diradd dependencies.
3384	 */
3385	if (newinum != WINO) {
3386		MALLOC(dap, struct diradd *, sizeof(struct diradd),
3387		    M_DIRADD, M_SOFTDEP_FLAGS|M_ZERO);
3388		workitem_alloc(&dap->da_list, D_DIRADD, mp);
3389		dap->da_state = DIRCHG | ATTACHED | DEPCOMPLETE;
3390		dap->da_offset = offset;
3391		dap->da_newinum = newinum;
3392	}
3393
3394	/*
3395	 * Allocate a new dirrem and ACQUIRE_LOCK.
3396	 */
3397	dirrem = newdirrem(bp, dp, ip, isrmdir, &prevdirrem);
3398	pagedep = dirrem->dm_pagedep;
3399	/*
3400	 * The possible values for isrmdir:
3401	 *	0 - non-directory file rename
3402	 *	1 - directory rename within same directory
3403	 *   inum - directory rename to new directory of given inode number
3404	 * When renaming to a new directory, we are both deleting and
3405	 * creating a new directory entry, so the link count on the new
3406	 * directory should not change. Thus we do not need the followup
3407	 * dirrem which is usually done in handle_workitem_remove. We set
3408	 * the DIRCHG flag to tell handle_workitem_remove to skip the
3409	 * followup dirrem.
3410	 */
3411	if (isrmdir > 1)
3412		dirrem->dm_state |= DIRCHG;
3413
3414	/*
3415	 * Whiteouts have no additional dependencies,
3416	 * so just put the dirrem on the correct list.
3417	 */
3418	if (newinum == WINO) {
3419		if ((dirrem->dm_state & COMPLETE) == 0) {
3420			LIST_INSERT_HEAD(&pagedep->pd_dirremhd, dirrem,
3421			    dm_next);
3422		} else {
3423			dirrem->dm_dirinum = pagedep->pd_ino;
3424			add_to_worklist(&dirrem->dm_list);
3425		}
3426		FREE_LOCK(&lk);
3427		return;
3428	}
3429
3430	/*
3431	 * If the COMPLETE flag is clear, then there were no active
3432	 * entries and we want to roll back to the previous inode until
3433	 * the new inode is committed to disk. If the COMPLETE flag is
3434	 * set, then we have deleted an entry that never made it to disk.
3435	 * If the entry we deleted resulted from a name change, then the old
3436	 * inode reference still resides on disk. Any rollback that we do
3437	 * needs to be to that old inode (returned to us in prevdirrem). If
3438	 * the entry we deleted resulted from a create, then there is
3439	 * no entry on the disk, so we want to roll back to zero rather
3440	 * than the uncommitted inode. In either of the COMPLETE cases we
3441	 * want to immediately free the unwritten and unreferenced inode.
3442	 */
3443	if ((dirrem->dm_state & COMPLETE) == 0) {
3444		dap->da_previous = dirrem;
3445	} else {
3446		if (prevdirrem != NULL) {
3447			dap->da_previous = prevdirrem;
3448		} else {
3449			dap->da_state &= ~DIRCHG;
3450			dap->da_pagedep = pagedep;
3451		}
3452		dirrem->dm_dirinum = pagedep->pd_ino;
3453		add_to_worklist(&dirrem->dm_list);
3454	}
3455	/*
3456	 * Link into its inodedep. Put it on the id_bufwait list if the inode
3457	 * is not yet written. If it is written, do the post-inode write
3458	 * processing to put it on the id_pendinghd list.
3459	 */
3460	if (inodedep_lookup(mp, newinum, DEPALLOC, &inodedep) == 0 ||
3461	    (inodedep->id_state & ALLCOMPLETE) == ALLCOMPLETE) {
3462		dap->da_state |= COMPLETE;
3463		LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist);
3464		WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list);
3465	} else {
3466		LIST_INSERT_HEAD(&pagedep->pd_diraddhd[DIRADDHASH(offset)],
3467		    dap, da_pdlist);
3468		WORKLIST_INSERT(&inodedep->id_bufwait, &dap->da_list);
3469	}
3470	FREE_LOCK(&lk);
3471}
3472
3473/*
3474 * Called whenever the link count on an inode is changed.
3475 * It creates an inode dependency so that the new reference(s)
3476 * to the inode cannot be committed to disk until the updated
3477 * inode has been written.
3478 */
3479void
3480softdep_change_linkcnt(ip)
3481	struct inode *ip;	/* the inode with the increased link count */
3482{
3483	struct inodedep *inodedep;
3484
3485	ACQUIRE_LOCK(&lk);
3486	(void) inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number,
3487	    DEPALLOC, &inodedep);
3488	if (ip->i_nlink < ip->i_effnlink)
3489		panic("softdep_change_linkcnt: bad delta");
3490	inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
3491	FREE_LOCK(&lk);
3492}
3493
3494/*
3495 * Called when the effective link count and the reference count
3496 * on an inode drops to zero. At this point there are no names
3497 * referencing the file in the filesystem and no active file
3498 * references. The space associated with the file will be freed
3499 * as soon as the necessary soft dependencies are cleared.
3500 */
3501void
3502softdep_releasefile(ip)
3503	struct inode *ip;	/* inode with the zero effective link count */
3504{
3505	struct inodedep *inodedep;
3506	struct fs *fs;
3507	int extblocks;
3508
3509	if (ip->i_effnlink > 0)
3510		panic("softdep_filerelease: file still referenced");
3511	/*
3512	 * We may be called several times as the real reference count
3513	 * drops to zero. We only want to account for the space once.
3514	 */
3515	if (ip->i_flag & IN_SPACECOUNTED)
3516		return;
3517	/*
3518	 * We have to deactivate a snapshot otherwise copyonwrites may
3519	 * add blocks and the cleanup may remove blocks after we have
3520	 * tried to account for them.
3521	 */
3522	if ((ip->i_flags & SF_SNAPSHOT) != 0)
3523		ffs_snapremove(ITOV(ip));
3524	/*
3525	 * If we are tracking an nlinkdelta, we have to also remember
3526	 * whether we accounted for the freed space yet.
3527	 */
3528	ACQUIRE_LOCK(&lk);
3529	if ((inodedep_lookup(UFSTOVFS(ip->i_ump), ip->i_number, 0, &inodedep)))
3530		inodedep->id_state |= SPACECOUNTED;
3531	FREE_LOCK(&lk);
3532	fs = ip->i_fs;
3533	extblocks = 0;
3534	if (fs->fs_magic == FS_UFS2_MAGIC)
3535		extblocks = btodb(fragroundup(fs, ip->i_din2->di_extsize));
3536	UFS_LOCK(ip->i_ump);
3537	ip->i_fs->fs_pendingblocks += DIP(ip, i_blocks) - extblocks;
3538	ip->i_fs->fs_pendinginodes += 1;
3539	UFS_UNLOCK(ip->i_ump);
3540	ip->i_flag |= IN_SPACECOUNTED;
3541}
3542
3543/*
3544 * This workitem decrements the inode's link count.
3545 * If the link count reaches zero, the file is removed.
3546 */
3547static void
3548handle_workitem_remove(dirrem, xp)
3549	struct dirrem *dirrem;
3550	struct vnode *xp;
3551{
3552	struct thread *td = curthread;
3553	struct inodedep *inodedep;
3554	struct vnode *vp;
3555	struct inode *ip;
3556	ino_t oldinum;
3557	int error;
3558
3559	if ((vp = xp) == NULL &&
3560	    (error = ffs_vget(dirrem->dm_list.wk_mp,
3561	    dirrem->dm_oldinum, LK_EXCLUSIVE, &vp)) != 0) {
3562		softdep_error("handle_workitem_remove: vget", error);
3563		return;
3564	}
3565	ip = VTOI(vp);
3566	ACQUIRE_LOCK(&lk);
3567	if ((inodedep_lookup(dirrem->dm_list.wk_mp,
3568	    dirrem->dm_oldinum, 0, &inodedep)) == 0)
3569		panic("handle_workitem_remove: lost inodedep");
3570	/*
3571	 * Normal file deletion.
3572	 */
3573	if ((dirrem->dm_state & RMDIR) == 0) {
3574		ip->i_nlink--;
3575		DIP_SET(ip, i_nlink, ip->i_nlink);
3576		ip->i_flag |= IN_CHANGE;
3577		if (ip->i_nlink < ip->i_effnlink)
3578			panic("handle_workitem_remove: bad file delta");
3579		inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
3580		num_dirrem -= 1;
3581		WORKITEM_FREE(dirrem, D_DIRREM);
3582		FREE_LOCK(&lk);
3583		vput(vp);
3584		return;
3585	}
3586	/*
3587	 * Directory deletion. Decrement reference count for both the
3588	 * just deleted parent directory entry and the reference for ".".
3589	 * Next truncate the directory to length zero. When the
3590	 * truncation completes, arrange to have the reference count on
3591	 * the parent decremented to account for the loss of "..".
3592	 */
3593	ip->i_nlink -= 2;
3594	DIP_SET(ip, i_nlink, ip->i_nlink);
3595	ip->i_flag |= IN_CHANGE;
3596	if (ip->i_nlink < ip->i_effnlink)
3597		panic("handle_workitem_remove: bad dir delta");
3598	inodedep->id_nlinkdelta = ip->i_nlink - ip->i_effnlink;
3599	FREE_LOCK(&lk);
3600	if ((error = ffs_truncate(vp, (off_t)0, 0, td->td_ucred, td)) != 0)
3601		softdep_error("handle_workitem_remove: truncate", error);
3602	ACQUIRE_LOCK(&lk);
3603	/*
3604	 * Rename a directory to a new parent. Since, we are both deleting
3605	 * and creating a new directory entry, the link count on the new
3606	 * directory should not change. Thus we skip the followup dirrem.
3607	 */
3608	if (dirrem->dm_state & DIRCHG) {
3609		num_dirrem -= 1;
3610		WORKITEM_FREE(dirrem, D_DIRREM);
3611		FREE_LOCK(&lk);
3612		vput(vp);
3613		return;
3614	}
3615	/*
3616	 * If the inodedep does not exist, then the zero'ed inode has
3617	 * been written to disk. If the allocated inode has never been
3618	 * written to disk, then the on-disk inode is zero'ed. In either
3619	 * case we can remove the file immediately.
3620	 */
3621	dirrem->dm_state = 0;
3622	oldinum = dirrem->dm_oldinum;
3623	dirrem->dm_oldinum = dirrem->dm_dirinum;
3624	if (inodedep_lookup(dirrem->dm_list.wk_mp, oldinum,
3625	    0, &inodedep) == 0 || check_inode_unwritten(inodedep)) {
3626		FREE_LOCK(&lk);
3627		vput(vp);
3628		handle_workitem_remove(dirrem, NULL);
3629		return;
3630	}
3631	WORKLIST_INSERT(&inodedep->id_inowait, &dirrem->dm_list);
3632	FREE_LOCK(&lk);
3633	ip->i_flag |= IN_CHANGE;
3634	ffs_update(vp, 0);
3635	vput(vp);
3636}
3637
3638/*
3639 * Inode de-allocation dependencies.
3640 *
3641 * When an inode's link count is reduced to zero, it can be de-allocated. We
3642 * found it convenient to postpone de-allocation until after the inode is
3643 * written to disk with its new link count (zero).  At this point, all of the
3644 * on-disk inode's block pointers are nullified and, with careful dependency
3645 * list ordering, all dependencies related to the inode will be satisfied and
3646 * the corresponding dependency structures de-allocated.  So, if/when the
3647 * inode is reused, there will be no mixing of old dependencies with new
3648 * ones.  This artificial dependency is set up by the block de-allocation
3649 * procedure above (softdep_setup_freeblocks) and completed by the
3650 * following procedure.
3651 */
3652static void
3653handle_workitem_freefile(freefile)
3654	struct freefile *freefile;
3655{
3656	struct fs *fs;
3657	struct inodedep *idp;
3658	struct ufsmount *ump;
3659	int error;
3660
3661	ump = VFSTOUFS(freefile->fx_list.wk_mp);
3662	fs = ump->um_fs;
3663#ifdef DEBUG
3664	ACQUIRE_LOCK(&lk);
3665	error = inodedep_lookup(UFSTOVFS(ump), freefile->fx_oldinum, 0, &idp);
3666	FREE_LOCK(&lk);
3667	if (error)
3668		panic("handle_workitem_freefile: inodedep survived");
3669#endif
3670	UFS_LOCK(ump);
3671	fs->fs_pendinginodes -= 1;
3672	UFS_UNLOCK(ump);
3673	if ((error = ffs_freefile(ump, fs, freefile->fx_devvp,
3674	    freefile->fx_oldinum, freefile->fx_mode)) != 0)
3675		softdep_error("handle_workitem_freefile", error);
3676	ACQUIRE_LOCK(&lk);
3677	WORKITEM_FREE(freefile, D_FREEFILE);
3678	FREE_LOCK(&lk);
3679}
3680
3681
3682/*
3683 * Helper function which unlinks marker element from work list and returns
3684 * the next element on the list.
3685 */
3686static __inline struct worklist *
3687markernext(struct worklist *marker)
3688{
3689	struct worklist *next;
3690
3691	next = LIST_NEXT(marker, wk_list);
3692	LIST_REMOVE(marker, wk_list);
3693	return next;
3694}
3695
3696/*
3697 * Disk writes.
3698 *
3699 * The dependency structures constructed above are most actively used when file
3700 * system blocks are written to disk.  No constraints are placed on when a
3701 * block can be written, but unsatisfied update dependencies are made safe by
3702 * modifying (or replacing) the source memory for the duration of the disk
3703 * write.  When the disk write completes, the memory block is again brought
3704 * up-to-date.
3705 *
3706 * In-core inode structure reclamation.
3707 *
3708 * Because there are a finite number of "in-core" inode structures, they are
3709 * reused regularly.  By transferring all inode-related dependencies to the
3710 * in-memory inode block and indexing them separately (via "inodedep"s), we
3711 * can allow "in-core" inode structures to be reused at any time and avoid
3712 * any increase in contention.
3713 *
3714 * Called just before entering the device driver to initiate a new disk I/O.
3715 * The buffer must be locked, thus, no I/O completion operations can occur
3716 * while we are manipulating its associated dependencies.
3717 */
3718static void
3719softdep_disk_io_initiation(bp)
3720	struct buf *bp;		/* structure describing disk write to occur */
3721{
3722	struct worklist *wk;
3723	struct worklist marker;
3724	struct indirdep *indirdep;
3725	struct inodedep *inodedep;
3726
3727	/*
3728	 * We only care about write operations. There should never
3729	 * be dependencies for reads.
3730	 */
3731	if (bp->b_iocmd != BIO_WRITE)
3732		panic("softdep_disk_io_initiation: not write");
3733
3734	marker.wk_type = D_LAST + 1;	/* Not a normal workitem */
3735	PHOLD(curproc);			/* Don't swap out kernel stack */
3736
3737	ACQUIRE_LOCK(&lk);
3738	/*
3739	 * Do any necessary pre-I/O processing.
3740	 */
3741	for (wk = LIST_FIRST(&bp->b_dep); wk != NULL;
3742	     wk = markernext(&marker)) {
3743		LIST_INSERT_AFTER(wk, &marker, wk_list);
3744		switch (wk->wk_type) {
3745
3746		case D_PAGEDEP:
3747			initiate_write_filepage(WK_PAGEDEP(wk), bp);
3748			continue;
3749
3750		case D_INODEDEP:
3751			inodedep = WK_INODEDEP(wk);
3752			if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC)
3753				initiate_write_inodeblock_ufs1(inodedep, bp);
3754			else
3755				initiate_write_inodeblock_ufs2(inodedep, bp);
3756			continue;
3757
3758		case D_INDIRDEP:
3759			indirdep = WK_INDIRDEP(wk);
3760			if (indirdep->ir_state & GOINGAWAY)
3761				panic("disk_io_initiation: indirdep gone");
3762			/*
3763			 * If there are no remaining dependencies, this
3764			 * will be writing the real pointers, so the
3765			 * dependency can be freed.
3766			 */
3767			if (LIST_FIRST(&indirdep->ir_deplisthd) == NULL) {
3768				struct buf *bp;
3769
3770				bp = indirdep->ir_savebp;
3771				bp->b_flags |= B_INVAL | B_NOCACHE;
3772				/* inline expand WORKLIST_REMOVE(wk); */
3773				wk->wk_state &= ~ONWORKLIST;
3774				LIST_REMOVE(wk, wk_list);
3775				WORKITEM_FREE(indirdep, D_INDIRDEP);
3776				FREE_LOCK(&lk);
3777				brelse(bp);
3778				ACQUIRE_LOCK(&lk);
3779				continue;
3780			}
3781			/*
3782			 * Replace up-to-date version with safe version.
3783			 */
3784			FREE_LOCK(&lk);
3785			MALLOC(indirdep->ir_saveddata, caddr_t, bp->b_bcount,
3786			    M_INDIRDEP, M_SOFTDEP_FLAGS);
3787			ACQUIRE_LOCK(&lk);
3788			indirdep->ir_state &= ~ATTACHED;
3789			indirdep->ir_state |= UNDONE;
3790			bcopy(bp->b_data, indirdep->ir_saveddata, bp->b_bcount);
3791			bcopy(indirdep->ir_savebp->b_data, bp->b_data,
3792			    bp->b_bcount);
3793			continue;
3794
3795		case D_MKDIR:
3796		case D_BMSAFEMAP:
3797		case D_ALLOCDIRECT:
3798		case D_ALLOCINDIR:
3799			continue;
3800
3801		default:
3802			panic("handle_disk_io_initiation: Unexpected type %s",
3803			    TYPENAME(wk->wk_type));
3804			/* NOTREACHED */
3805		}
3806	}
3807	FREE_LOCK(&lk);
3808	PRELE(curproc);			/* Allow swapout of kernel stack */
3809}
3810
3811/*
3812 * Called from within the procedure above to deal with unsatisfied
3813 * allocation dependencies in a directory. The buffer must be locked,
3814 * thus, no I/O completion operations can occur while we are
3815 * manipulating its associated dependencies.
3816 */
3817static void
3818initiate_write_filepage(pagedep, bp)
3819	struct pagedep *pagedep;
3820	struct buf *bp;
3821{
3822	struct diradd *dap;
3823	struct direct *ep;
3824	int i;
3825
3826	if (pagedep->pd_state & IOSTARTED) {
3827		/*
3828		 * This can only happen if there is a driver that does not
3829		 * understand chaining. Here biodone will reissue the call
3830		 * to strategy for the incomplete buffers.
3831		 */
3832		printf("initiate_write_filepage: already started\n");
3833		return;
3834	}
3835	pagedep->pd_state |= IOSTARTED;
3836	for (i = 0; i < DAHASHSZ; i++) {
3837		LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) {
3838			ep = (struct direct *)
3839			    ((char *)bp->b_data + dap->da_offset);
3840			if (ep->d_ino != dap->da_newinum)
3841				panic("%s: dir inum %d != new %d",
3842				    "initiate_write_filepage",
3843				    ep->d_ino, dap->da_newinum);
3844			if (dap->da_state & DIRCHG)
3845				ep->d_ino = dap->da_previous->dm_oldinum;
3846			else
3847				ep->d_ino = 0;
3848			dap->da_state &= ~ATTACHED;
3849			dap->da_state |= UNDONE;
3850		}
3851	}
3852}
3853
3854/*
3855 * Version of initiate_write_inodeblock that handles UFS1 dinodes.
3856 * Note that any bug fixes made to this routine must be done in the
3857 * version found below.
3858 *
3859 * Called from within the procedure above to deal with unsatisfied
3860 * allocation dependencies in an inodeblock. The buffer must be
3861 * locked, thus, no I/O completion operations can occur while we
3862 * are manipulating its associated dependencies.
3863 */
3864static void
3865initiate_write_inodeblock_ufs1(inodedep, bp)
3866	struct inodedep *inodedep;
3867	struct buf *bp;			/* The inode block */
3868{
3869	struct allocdirect *adp, *lastadp;
3870	struct ufs1_dinode *dp;
3871	struct ufs1_dinode *sip;
3872	struct fs *fs;
3873	ufs_lbn_t i, prevlbn = 0;
3874	int deplist;
3875
3876	if (inodedep->id_state & IOSTARTED)
3877		panic("initiate_write_inodeblock_ufs1: already started");
3878	inodedep->id_state |= IOSTARTED;
3879	fs = inodedep->id_fs;
3880	dp = (struct ufs1_dinode *)bp->b_data +
3881	    ino_to_fsbo(fs, inodedep->id_ino);
3882	/*
3883	 * If the bitmap is not yet written, then the allocated
3884	 * inode cannot be written to disk.
3885	 */
3886	if ((inodedep->id_state & DEPCOMPLETE) == 0) {
3887		if (inodedep->id_savedino1 != NULL)
3888			panic("initiate_write_inodeblock_ufs1: I/O underway");
3889		FREE_LOCK(&lk);
3890		MALLOC(sip, struct ufs1_dinode *,
3891		    sizeof(struct ufs1_dinode), M_SAVEDINO, M_SOFTDEP_FLAGS);
3892		ACQUIRE_LOCK(&lk);
3893		inodedep->id_savedino1 = sip;
3894		*inodedep->id_savedino1 = *dp;
3895		bzero((caddr_t)dp, sizeof(struct ufs1_dinode));
3896		dp->di_gen = inodedep->id_savedino1->di_gen;
3897		return;
3898	}
3899	/*
3900	 * If no dependencies, then there is nothing to roll back.
3901	 */
3902	inodedep->id_savedsize = dp->di_size;
3903	inodedep->id_savedextsize = 0;
3904	if (TAILQ_FIRST(&inodedep->id_inoupdt) == NULL)
3905		return;
3906	/*
3907	 * Set the dependencies to busy.
3908	 */
3909	for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
3910	     adp = TAILQ_NEXT(adp, ad_next)) {
3911#ifdef DIAGNOSTIC
3912		if (deplist != 0 && prevlbn >= adp->ad_lbn)
3913			panic("softdep_write_inodeblock: lbn order");
3914		prevlbn = adp->ad_lbn;
3915		if (adp->ad_lbn < NDADDR &&
3916		    dp->di_db[adp->ad_lbn] != adp->ad_newblkno)
3917			panic("%s: direct pointer #%jd mismatch %d != %jd",
3918			    "softdep_write_inodeblock",
3919			    (intmax_t)adp->ad_lbn,
3920			    dp->di_db[adp->ad_lbn],
3921			    (intmax_t)adp->ad_newblkno);
3922		if (adp->ad_lbn >= NDADDR &&
3923		    dp->di_ib[adp->ad_lbn - NDADDR] != adp->ad_newblkno)
3924			panic("%s: indirect pointer #%jd mismatch %d != %jd",
3925			    "softdep_write_inodeblock",
3926			    (intmax_t)adp->ad_lbn - NDADDR,
3927			    dp->di_ib[adp->ad_lbn - NDADDR],
3928			    (intmax_t)adp->ad_newblkno);
3929		deplist |= 1 << adp->ad_lbn;
3930		if ((adp->ad_state & ATTACHED) == 0)
3931			panic("softdep_write_inodeblock: Unknown state 0x%x",
3932			    adp->ad_state);
3933#endif /* DIAGNOSTIC */
3934		adp->ad_state &= ~ATTACHED;
3935		adp->ad_state |= UNDONE;
3936	}
3937	/*
3938	 * The on-disk inode cannot claim to be any larger than the last
3939	 * fragment that has been written. Otherwise, the on-disk inode
3940	 * might have fragments that were not the last block in the file
3941	 * which would corrupt the filesystem.
3942	 */
3943	for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
3944	     lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
3945		if (adp->ad_lbn >= NDADDR)
3946			break;
3947		dp->di_db[adp->ad_lbn] = adp->ad_oldblkno;
3948		/* keep going until hitting a rollback to a frag */
3949		if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
3950			continue;
3951		dp->di_size = fs->fs_bsize * adp->ad_lbn + adp->ad_oldsize;
3952		for (i = adp->ad_lbn + 1; i < NDADDR; i++) {
3953#ifdef DIAGNOSTIC
3954			if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0)
3955				panic("softdep_write_inodeblock: lost dep1");
3956#endif /* DIAGNOSTIC */
3957			dp->di_db[i] = 0;
3958		}
3959		for (i = 0; i < NIADDR; i++) {
3960#ifdef DIAGNOSTIC
3961			if (dp->di_ib[i] != 0 &&
3962			    (deplist & ((1 << NDADDR) << i)) == 0)
3963				panic("softdep_write_inodeblock: lost dep2");
3964#endif /* DIAGNOSTIC */
3965			dp->di_ib[i] = 0;
3966		}
3967		return;
3968	}
3969	/*
3970	 * If we have zero'ed out the last allocated block of the file,
3971	 * roll back the size to the last currently allocated block.
3972	 * We know that this last allocated block is a full-sized as
3973	 * we already checked for fragments in the loop above.
3974	 */
3975	if (lastadp != NULL &&
3976	    dp->di_size <= (lastadp->ad_lbn + 1) * fs->fs_bsize) {
3977		for (i = lastadp->ad_lbn; i >= 0; i--)
3978			if (dp->di_db[i] != 0)
3979				break;
3980		dp->di_size = (i + 1) * fs->fs_bsize;
3981	}
3982	/*
3983	 * The only dependencies are for indirect blocks.
3984	 *
3985	 * The file size for indirect block additions is not guaranteed.
3986	 * Such a guarantee would be non-trivial to achieve. The conventional
3987	 * synchronous write implementation also does not make this guarantee.
3988	 * Fsck should catch and fix discrepancies. Arguably, the file size
3989	 * can be over-estimated without destroying integrity when the file
3990	 * moves into the indirect blocks (i.e., is large). If we want to
3991	 * postpone fsck, we are stuck with this argument.
3992	 */
3993	for (; adp; adp = TAILQ_NEXT(adp, ad_next))
3994		dp->di_ib[adp->ad_lbn - NDADDR] = 0;
3995}
3996
3997/*
3998 * Version of initiate_write_inodeblock that handles UFS2 dinodes.
3999 * Note that any bug fixes made to this routine must be done in the
4000 * version found above.
4001 *
4002 * Called from within the procedure above to deal with unsatisfied
4003 * allocation dependencies in an inodeblock. The buffer must be
4004 * locked, thus, no I/O completion operations can occur while we
4005 * are manipulating its associated dependencies.
4006 */
4007static void
4008initiate_write_inodeblock_ufs2(inodedep, bp)
4009	struct inodedep *inodedep;
4010	struct buf *bp;			/* The inode block */
4011{
4012	struct allocdirect *adp, *lastadp;
4013	struct ufs2_dinode *dp;
4014	struct ufs2_dinode *sip;
4015	struct fs *fs;
4016	ufs_lbn_t i, prevlbn = 0;
4017	int deplist;
4018
4019	if (inodedep->id_state & IOSTARTED)
4020		panic("initiate_write_inodeblock_ufs2: already started");
4021	inodedep->id_state |= IOSTARTED;
4022	fs = inodedep->id_fs;
4023	dp = (struct ufs2_dinode *)bp->b_data +
4024	    ino_to_fsbo(fs, inodedep->id_ino);
4025	/*
4026	 * If the bitmap is not yet written, then the allocated
4027	 * inode cannot be written to disk.
4028	 */
4029	if ((inodedep->id_state & DEPCOMPLETE) == 0) {
4030		if (inodedep->id_savedino2 != NULL)
4031			panic("initiate_write_inodeblock_ufs2: I/O underway");
4032		FREE_LOCK(&lk);
4033		MALLOC(sip, struct ufs2_dinode *,
4034		    sizeof(struct ufs2_dinode), M_SAVEDINO, M_SOFTDEP_FLAGS);
4035		ACQUIRE_LOCK(&lk);
4036		inodedep->id_savedino2 = sip;
4037		*inodedep->id_savedino2 = *dp;
4038		bzero((caddr_t)dp, sizeof(struct ufs2_dinode));
4039		dp->di_gen = inodedep->id_savedino2->di_gen;
4040		return;
4041	}
4042	/*
4043	 * If no dependencies, then there is nothing to roll back.
4044	 */
4045	inodedep->id_savedsize = dp->di_size;
4046	inodedep->id_savedextsize = dp->di_extsize;
4047	if (TAILQ_FIRST(&inodedep->id_inoupdt) == NULL &&
4048	    TAILQ_FIRST(&inodedep->id_extupdt) == NULL)
4049		return;
4050	/*
4051	 * Set the ext data dependencies to busy.
4052	 */
4053	for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp;
4054	     adp = TAILQ_NEXT(adp, ad_next)) {
4055#ifdef DIAGNOSTIC
4056		if (deplist != 0 && prevlbn >= adp->ad_lbn)
4057			panic("softdep_write_inodeblock: lbn order");
4058		prevlbn = adp->ad_lbn;
4059		if (dp->di_extb[adp->ad_lbn] != adp->ad_newblkno)
4060			panic("%s: direct pointer #%jd mismatch %jd != %jd",
4061			    "softdep_write_inodeblock",
4062			    (intmax_t)adp->ad_lbn,
4063			    (intmax_t)dp->di_extb[adp->ad_lbn],
4064			    (intmax_t)adp->ad_newblkno);
4065		deplist |= 1 << adp->ad_lbn;
4066		if ((adp->ad_state & ATTACHED) == 0)
4067			panic("softdep_write_inodeblock: Unknown state 0x%x",
4068			    adp->ad_state);
4069#endif /* DIAGNOSTIC */
4070		adp->ad_state &= ~ATTACHED;
4071		adp->ad_state |= UNDONE;
4072	}
4073	/*
4074	 * The on-disk inode cannot claim to be any larger than the last
4075	 * fragment that has been written. Otherwise, the on-disk inode
4076	 * might have fragments that were not the last block in the ext
4077	 * data which would corrupt the filesystem.
4078	 */
4079	for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_extupdt); adp;
4080	     lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
4081		dp->di_extb[adp->ad_lbn] = adp->ad_oldblkno;
4082		/* keep going until hitting a rollback to a frag */
4083		if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
4084			continue;
4085		dp->di_extsize = fs->fs_bsize * adp->ad_lbn + adp->ad_oldsize;
4086		for (i = adp->ad_lbn + 1; i < NXADDR; i++) {
4087#ifdef DIAGNOSTIC
4088			if (dp->di_extb[i] != 0 && (deplist & (1 << i)) == 0)
4089				panic("softdep_write_inodeblock: lost dep1");
4090#endif /* DIAGNOSTIC */
4091			dp->di_extb[i] = 0;
4092		}
4093		lastadp = NULL;
4094		break;
4095	}
4096	/*
4097	 * If we have zero'ed out the last allocated block of the ext
4098	 * data, roll back the size to the last currently allocated block.
4099	 * We know that this last allocated block is a full-sized as
4100	 * we already checked for fragments in the loop above.
4101	 */
4102	if (lastadp != NULL &&
4103	    dp->di_extsize <= (lastadp->ad_lbn + 1) * fs->fs_bsize) {
4104		for (i = lastadp->ad_lbn; i >= 0; i--)
4105			if (dp->di_extb[i] != 0)
4106				break;
4107		dp->di_extsize = (i + 1) * fs->fs_bsize;
4108	}
4109	/*
4110	 * Set the file data dependencies to busy.
4111	 */
4112	for (deplist = 0, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
4113	     adp = TAILQ_NEXT(adp, ad_next)) {
4114#ifdef DIAGNOSTIC
4115		if (deplist != 0 && prevlbn >= adp->ad_lbn)
4116			panic("softdep_write_inodeblock: lbn order");
4117		prevlbn = adp->ad_lbn;
4118		if (adp->ad_lbn < NDADDR &&
4119		    dp->di_db[adp->ad_lbn] != adp->ad_newblkno)
4120			panic("%s: direct pointer #%jd mismatch %jd != %jd",
4121			    "softdep_write_inodeblock",
4122			    (intmax_t)adp->ad_lbn,
4123			    (intmax_t)dp->di_db[adp->ad_lbn],
4124			    (intmax_t)adp->ad_newblkno);
4125		if (adp->ad_lbn >= NDADDR &&
4126		    dp->di_ib[adp->ad_lbn - NDADDR] != adp->ad_newblkno)
4127			panic("%s indirect pointer #%jd mismatch %jd != %jd",
4128			    "softdep_write_inodeblock:",
4129			    (intmax_t)adp->ad_lbn - NDADDR,
4130			    (intmax_t)dp->di_ib[adp->ad_lbn - NDADDR],
4131			    (intmax_t)adp->ad_newblkno);
4132		deplist |= 1 << adp->ad_lbn;
4133		if ((adp->ad_state & ATTACHED) == 0)
4134			panic("softdep_write_inodeblock: Unknown state 0x%x",
4135			    adp->ad_state);
4136#endif /* DIAGNOSTIC */
4137		adp->ad_state &= ~ATTACHED;
4138		adp->ad_state |= UNDONE;
4139	}
4140	/*
4141	 * The on-disk inode cannot claim to be any larger than the last
4142	 * fragment that has been written. Otherwise, the on-disk inode
4143	 * might have fragments that were not the last block in the file
4144	 * which would corrupt the filesystem.
4145	 */
4146	for (lastadp = NULL, adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp;
4147	     lastadp = adp, adp = TAILQ_NEXT(adp, ad_next)) {
4148		if (adp->ad_lbn >= NDADDR)
4149			break;
4150		dp->di_db[adp->ad_lbn] = adp->ad_oldblkno;
4151		/* keep going until hitting a rollback to a frag */
4152		if (adp->ad_oldsize == 0 || adp->ad_oldsize == fs->fs_bsize)
4153			continue;
4154		dp->di_size = fs->fs_bsize * adp->ad_lbn + adp->ad_oldsize;
4155		for (i = adp->ad_lbn + 1; i < NDADDR; i++) {
4156#ifdef DIAGNOSTIC
4157			if (dp->di_db[i] != 0 && (deplist & (1 << i)) == 0)
4158				panic("softdep_write_inodeblock: lost dep2");
4159#endif /* DIAGNOSTIC */
4160			dp->di_db[i] = 0;
4161		}
4162		for (i = 0; i < NIADDR; i++) {
4163#ifdef DIAGNOSTIC
4164			if (dp->di_ib[i] != 0 &&
4165			    (deplist & ((1 << NDADDR) << i)) == 0)
4166				panic("softdep_write_inodeblock: lost dep3");
4167#endif /* DIAGNOSTIC */
4168			dp->di_ib[i] = 0;
4169		}
4170		return;
4171	}
4172	/*
4173	 * If we have zero'ed out the last allocated block of the file,
4174	 * roll back the size to the last currently allocated block.
4175	 * We know that this last allocated block is a full-sized as
4176	 * we already checked for fragments in the loop above.
4177	 */
4178	if (lastadp != NULL &&
4179	    dp->di_size <= (lastadp->ad_lbn + 1) * fs->fs_bsize) {
4180		for (i = lastadp->ad_lbn; i >= 0; i--)
4181			if (dp->di_db[i] != 0)
4182				break;
4183		dp->di_size = (i + 1) * fs->fs_bsize;
4184	}
4185	/*
4186	 * The only dependencies are for indirect blocks.
4187	 *
4188	 * The file size for indirect block additions is not guaranteed.
4189	 * Such a guarantee would be non-trivial to achieve. The conventional
4190	 * synchronous write implementation also does not make this guarantee.
4191	 * Fsck should catch and fix discrepancies. Arguably, the file size
4192	 * can be over-estimated without destroying integrity when the file
4193	 * moves into the indirect blocks (i.e., is large). If we want to
4194	 * postpone fsck, we are stuck with this argument.
4195	 */
4196	for (; adp; adp = TAILQ_NEXT(adp, ad_next))
4197		dp->di_ib[adp->ad_lbn - NDADDR] = 0;
4198}
4199
4200/*
4201 * This routine is called during the completion interrupt
4202 * service routine for a disk write (from the procedure called
4203 * by the device driver to inform the filesystem caches of
4204 * a request completion).  It should be called early in this
4205 * procedure, before the block is made available to other
4206 * processes or other routines are called.
4207 */
4208static void
4209softdep_disk_write_complete(bp)
4210	struct buf *bp;		/* describes the completed disk write */
4211{
4212	struct worklist *wk;
4213	struct worklist *owk;
4214	struct workhead reattach;
4215	struct newblk *newblk;
4216	struct allocindir *aip;
4217	struct allocdirect *adp;
4218	struct indirdep *indirdep;
4219	struct inodedep *inodedep;
4220	struct bmsafemap *bmsafemap;
4221
4222	/*
4223	 * If an error occurred while doing the write, then the data
4224	 * has not hit the disk and the dependencies cannot be unrolled.
4225	 */
4226	if ((bp->b_ioflags & BIO_ERROR) != 0 && (bp->b_flags & B_INVAL) == 0)
4227		return;
4228	LIST_INIT(&reattach);
4229	/*
4230	 * This lock must not be released anywhere in this code segment.
4231	 */
4232	ACQUIRE_LOCK(&lk);
4233	owk = NULL;
4234	while ((wk = LIST_FIRST(&bp->b_dep)) != NULL) {
4235		WORKLIST_REMOVE(wk);
4236		if (wk == owk)
4237			panic("duplicate worklist: %p\n", wk);
4238		owk = wk;
4239		switch (wk->wk_type) {
4240
4241		case D_PAGEDEP:
4242			if (handle_written_filepage(WK_PAGEDEP(wk), bp))
4243				WORKLIST_INSERT(&reattach, wk);
4244			continue;
4245
4246		case D_INODEDEP:
4247			if (handle_written_inodeblock(WK_INODEDEP(wk), bp))
4248				WORKLIST_INSERT(&reattach, wk);
4249			continue;
4250
4251		case D_BMSAFEMAP:
4252			bmsafemap = WK_BMSAFEMAP(wk);
4253			while ((newblk = LIST_FIRST(&bmsafemap->sm_newblkhd))) {
4254				newblk->nb_state |= DEPCOMPLETE;
4255				newblk->nb_bmsafemap = NULL;
4256				LIST_REMOVE(newblk, nb_deps);
4257			}
4258			while ((adp =
4259			   LIST_FIRST(&bmsafemap->sm_allocdirecthd))) {
4260				adp->ad_state |= DEPCOMPLETE;
4261				adp->ad_buf = NULL;
4262				LIST_REMOVE(adp, ad_deps);
4263				handle_allocdirect_partdone(adp);
4264			}
4265			while ((aip =
4266			    LIST_FIRST(&bmsafemap->sm_allocindirhd))) {
4267				aip->ai_state |= DEPCOMPLETE;
4268				aip->ai_buf = NULL;
4269				LIST_REMOVE(aip, ai_deps);
4270				handle_allocindir_partdone(aip);
4271			}
4272			while ((inodedep =
4273			     LIST_FIRST(&bmsafemap->sm_inodedephd)) != NULL) {
4274				inodedep->id_state |= DEPCOMPLETE;
4275				LIST_REMOVE(inodedep, id_deps);
4276				inodedep->id_buf = NULL;
4277			}
4278			WORKITEM_FREE(bmsafemap, D_BMSAFEMAP);
4279			continue;
4280
4281		case D_MKDIR:
4282			handle_written_mkdir(WK_MKDIR(wk), MKDIR_BODY);
4283			continue;
4284
4285		case D_ALLOCDIRECT:
4286			adp = WK_ALLOCDIRECT(wk);
4287			adp->ad_state |= COMPLETE;
4288			handle_allocdirect_partdone(adp);
4289			continue;
4290
4291		case D_ALLOCINDIR:
4292			aip = WK_ALLOCINDIR(wk);
4293			aip->ai_state |= COMPLETE;
4294			handle_allocindir_partdone(aip);
4295			continue;
4296
4297		case D_INDIRDEP:
4298			indirdep = WK_INDIRDEP(wk);
4299			if (indirdep->ir_state & GOINGAWAY)
4300				panic("disk_write_complete: indirdep gone");
4301			bcopy(indirdep->ir_saveddata, bp->b_data, bp->b_bcount);
4302			FREE(indirdep->ir_saveddata, M_INDIRDEP);
4303			indirdep->ir_saveddata = 0;
4304			indirdep->ir_state &= ~UNDONE;
4305			indirdep->ir_state |= ATTACHED;
4306			while ((aip = LIST_FIRST(&indirdep->ir_donehd)) != 0) {
4307				handle_allocindir_partdone(aip);
4308				if (aip == LIST_FIRST(&indirdep->ir_donehd))
4309					panic("disk_write_complete: not gone");
4310			}
4311			WORKLIST_INSERT(&reattach, wk);
4312			if ((bp->b_flags & B_DELWRI) == 0)
4313				stat_indir_blk_ptrs++;
4314			bdirty(bp);
4315			continue;
4316
4317		default:
4318			panic("handle_disk_write_complete: Unknown type %s",
4319			    TYPENAME(wk->wk_type));
4320			/* NOTREACHED */
4321		}
4322	}
4323	/*
4324	 * Reattach any requests that must be redone.
4325	 */
4326	while ((wk = LIST_FIRST(&reattach)) != NULL) {
4327		WORKLIST_REMOVE(wk);
4328		WORKLIST_INSERT(&bp->b_dep, wk);
4329	}
4330	FREE_LOCK(&lk);
4331}
4332
4333/*
4334 * Called from within softdep_disk_write_complete above. Note that
4335 * this routine is always called from interrupt level with further
4336 * splbio interrupts blocked.
4337 */
4338static void
4339handle_allocdirect_partdone(adp)
4340	struct allocdirect *adp;	/* the completed allocdirect */
4341{
4342	struct allocdirectlst *listhead;
4343	struct allocdirect *listadp;
4344	struct inodedep *inodedep;
4345	long bsize, delay;
4346
4347	if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE)
4348		return;
4349	if (adp->ad_buf != NULL)
4350		panic("handle_allocdirect_partdone: dangling dep");
4351	/*
4352	 * The on-disk inode cannot claim to be any larger than the last
4353	 * fragment that has been written. Otherwise, the on-disk inode
4354	 * might have fragments that were not the last block in the file
4355	 * which would corrupt the filesystem. Thus, we cannot free any
4356	 * allocdirects after one whose ad_oldblkno claims a fragment as
4357	 * these blocks must be rolled back to zero before writing the inode.
4358	 * We check the currently active set of allocdirects in id_inoupdt
4359	 * or id_extupdt as appropriate.
4360	 */
4361	inodedep = adp->ad_inodedep;
4362	bsize = inodedep->id_fs->fs_bsize;
4363	if (adp->ad_state & EXTDATA)
4364		listhead = &inodedep->id_extupdt;
4365	else
4366		listhead = &inodedep->id_inoupdt;
4367	TAILQ_FOREACH(listadp, listhead, ad_next) {
4368		/* found our block */
4369		if (listadp == adp)
4370			break;
4371		/* continue if ad_oldlbn is not a fragment */
4372		if (listadp->ad_oldsize == 0 ||
4373		    listadp->ad_oldsize == bsize)
4374			continue;
4375		/* hit a fragment */
4376		return;
4377	}
4378	/*
4379	 * If we have reached the end of the current list without
4380	 * finding the just finished dependency, then it must be
4381	 * on the future dependency list. Future dependencies cannot
4382	 * be freed until they are moved to the current list.
4383	 */
4384	if (listadp == NULL) {
4385#ifdef DEBUG
4386		if (adp->ad_state & EXTDATA)
4387			listhead = &inodedep->id_newextupdt;
4388		else
4389			listhead = &inodedep->id_newinoupdt;
4390		TAILQ_FOREACH(listadp, listhead, ad_next)
4391			/* found our block */
4392			if (listadp == adp)
4393				break;
4394		if (listadp == NULL)
4395			panic("handle_allocdirect_partdone: lost dep");
4396#endif /* DEBUG */
4397		return;
4398	}
4399	/*
4400	 * If we have found the just finished dependency, then free
4401	 * it along with anything that follows it that is complete.
4402	 * If the inode still has a bitmap dependency, then it has
4403	 * never been written to disk, hence the on-disk inode cannot
4404	 * reference the old fragment so we can free it without delay.
4405	 */
4406	delay = (inodedep->id_state & DEPCOMPLETE);
4407	for (; adp; adp = listadp) {
4408		listadp = TAILQ_NEXT(adp, ad_next);
4409		if ((adp->ad_state & ALLCOMPLETE) != ALLCOMPLETE)
4410			return;
4411		free_allocdirect(listhead, adp, delay);
4412	}
4413}
4414
4415/*
4416 * Called from within softdep_disk_write_complete above. Note that
4417 * this routine is always called from interrupt level with further
4418 * splbio interrupts blocked.
4419 */
4420static void
4421handle_allocindir_partdone(aip)
4422	struct allocindir *aip;		/* the completed allocindir */
4423{
4424	struct indirdep *indirdep;
4425
4426	if ((aip->ai_state & ALLCOMPLETE) != ALLCOMPLETE)
4427		return;
4428	if (aip->ai_buf != NULL)
4429		panic("handle_allocindir_partdone: dangling dependency");
4430	indirdep = aip->ai_indirdep;
4431	if (indirdep->ir_state & UNDONE) {
4432		LIST_REMOVE(aip, ai_next);
4433		LIST_INSERT_HEAD(&indirdep->ir_donehd, aip, ai_next);
4434		return;
4435	}
4436	if (indirdep->ir_state & UFS1FMT)
4437		((ufs1_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] =
4438		    aip->ai_newblkno;
4439	else
4440		((ufs2_daddr_t *)indirdep->ir_savebp->b_data)[aip->ai_offset] =
4441		    aip->ai_newblkno;
4442	LIST_REMOVE(aip, ai_next);
4443	if (aip->ai_freefrag != NULL)
4444		add_to_worklist(&aip->ai_freefrag->ff_list);
4445	WORKITEM_FREE(aip, D_ALLOCINDIR);
4446}
4447
4448/*
4449 * Called from within softdep_disk_write_complete above to restore
4450 * in-memory inode block contents to their most up-to-date state. Note
4451 * that this routine is always called from interrupt level with further
4452 * splbio interrupts blocked.
4453 */
4454static int
4455handle_written_inodeblock(inodedep, bp)
4456	struct inodedep *inodedep;
4457	struct buf *bp;		/* buffer containing the inode block */
4458{
4459	struct worklist *wk, *filefree;
4460	struct allocdirect *adp, *nextadp;
4461	struct ufs1_dinode *dp1 = NULL;
4462	struct ufs2_dinode *dp2 = NULL;
4463	int hadchanges, fstype;
4464
4465	if ((inodedep->id_state & IOSTARTED) == 0)
4466		panic("handle_written_inodeblock: not started");
4467	inodedep->id_state &= ~IOSTARTED;
4468	if (inodedep->id_fs->fs_magic == FS_UFS1_MAGIC) {
4469		fstype = UFS1;
4470		dp1 = (struct ufs1_dinode *)bp->b_data +
4471		    ino_to_fsbo(inodedep->id_fs, inodedep->id_ino);
4472	} else {
4473		fstype = UFS2;
4474		dp2 = (struct ufs2_dinode *)bp->b_data +
4475		    ino_to_fsbo(inodedep->id_fs, inodedep->id_ino);
4476	}
4477	/*
4478	 * If we had to rollback the inode allocation because of
4479	 * bitmaps being incomplete, then simply restore it.
4480	 * Keep the block dirty so that it will not be reclaimed until
4481	 * all associated dependencies have been cleared and the
4482	 * corresponding updates written to disk.
4483	 */
4484	if (inodedep->id_savedino1 != NULL) {
4485		if (fstype == UFS1)
4486			*dp1 = *inodedep->id_savedino1;
4487		else
4488			*dp2 = *inodedep->id_savedino2;
4489		FREE(inodedep->id_savedino1, M_SAVEDINO);
4490		inodedep->id_savedino1 = NULL;
4491		if ((bp->b_flags & B_DELWRI) == 0)
4492			stat_inode_bitmap++;
4493		bdirty(bp);
4494		return (1);
4495	}
4496	inodedep->id_state |= COMPLETE;
4497	/*
4498	 * Roll forward anything that had to be rolled back before
4499	 * the inode could be updated.
4500	 */
4501	hadchanges = 0;
4502	for (adp = TAILQ_FIRST(&inodedep->id_inoupdt); adp; adp = nextadp) {
4503		nextadp = TAILQ_NEXT(adp, ad_next);
4504		if (adp->ad_state & ATTACHED)
4505			panic("handle_written_inodeblock: new entry");
4506		if (fstype == UFS1) {
4507			if (adp->ad_lbn < NDADDR) {
4508				if (dp1->di_db[adp->ad_lbn]!=adp->ad_oldblkno)
4509					panic("%s %s #%jd mismatch %d != %jd",
4510					    "handle_written_inodeblock:",
4511					    "direct pointer",
4512					    (intmax_t)adp->ad_lbn,
4513					    dp1->di_db[adp->ad_lbn],
4514					    (intmax_t)adp->ad_oldblkno);
4515				dp1->di_db[adp->ad_lbn] = adp->ad_newblkno;
4516			} else {
4517				if (dp1->di_ib[adp->ad_lbn - NDADDR] != 0)
4518					panic("%s: %s #%jd allocated as %d",
4519					    "handle_written_inodeblock",
4520					    "indirect pointer",
4521					    (intmax_t)adp->ad_lbn - NDADDR,
4522					    dp1->di_ib[adp->ad_lbn - NDADDR]);
4523				dp1->di_ib[adp->ad_lbn - NDADDR] =
4524				    adp->ad_newblkno;
4525			}
4526		} else {
4527			if (adp->ad_lbn < NDADDR) {
4528				if (dp2->di_db[adp->ad_lbn]!=adp->ad_oldblkno)
4529					panic("%s: %s #%jd %s %jd != %jd",
4530					    "handle_written_inodeblock",
4531					    "direct pointer",
4532					    (intmax_t)adp->ad_lbn, "mismatch",
4533					    (intmax_t)dp2->di_db[adp->ad_lbn],
4534					    (intmax_t)adp->ad_oldblkno);
4535				dp2->di_db[adp->ad_lbn] = adp->ad_newblkno;
4536			} else {
4537				if (dp2->di_ib[adp->ad_lbn - NDADDR] != 0)
4538					panic("%s: %s #%jd allocated as %jd",
4539					    "handle_written_inodeblock",
4540					    "indirect pointer",
4541					    (intmax_t)adp->ad_lbn - NDADDR,
4542					    (intmax_t)
4543					    dp2->di_ib[adp->ad_lbn - NDADDR]);
4544				dp2->di_ib[adp->ad_lbn - NDADDR] =
4545				    adp->ad_newblkno;
4546			}
4547		}
4548		adp->ad_state &= ~UNDONE;
4549		adp->ad_state |= ATTACHED;
4550		hadchanges = 1;
4551	}
4552	for (adp = TAILQ_FIRST(&inodedep->id_extupdt); adp; adp = nextadp) {
4553		nextadp = TAILQ_NEXT(adp, ad_next);
4554		if (adp->ad_state & ATTACHED)
4555			panic("handle_written_inodeblock: new entry");
4556		if (dp2->di_extb[adp->ad_lbn] != adp->ad_oldblkno)
4557			panic("%s: direct pointers #%jd %s %jd != %jd",
4558			    "handle_written_inodeblock",
4559			    (intmax_t)adp->ad_lbn, "mismatch",
4560			    (intmax_t)dp2->di_extb[adp->ad_lbn],
4561			    (intmax_t)adp->ad_oldblkno);
4562		dp2->di_extb[adp->ad_lbn] = adp->ad_newblkno;
4563		adp->ad_state &= ~UNDONE;
4564		adp->ad_state |= ATTACHED;
4565		hadchanges = 1;
4566	}
4567	if (hadchanges && (bp->b_flags & B_DELWRI) == 0)
4568		stat_direct_blk_ptrs++;
4569	/*
4570	 * Reset the file size to its most up-to-date value.
4571	 */
4572	if (inodedep->id_savedsize == -1 || inodedep->id_savedextsize == -1)
4573		panic("handle_written_inodeblock: bad size");
4574	if (fstype == UFS1) {
4575		if (dp1->di_size != inodedep->id_savedsize) {
4576			dp1->di_size = inodedep->id_savedsize;
4577			hadchanges = 1;
4578		}
4579	} else {
4580		if (dp2->di_size != inodedep->id_savedsize) {
4581			dp2->di_size = inodedep->id_savedsize;
4582			hadchanges = 1;
4583		}
4584		if (dp2->di_extsize != inodedep->id_savedextsize) {
4585			dp2->di_extsize = inodedep->id_savedextsize;
4586			hadchanges = 1;
4587		}
4588	}
4589	inodedep->id_savedsize = -1;
4590	inodedep->id_savedextsize = -1;
4591	/*
4592	 * If there were any rollbacks in the inode block, then it must be
4593	 * marked dirty so that its will eventually get written back in
4594	 * its correct form.
4595	 */
4596	if (hadchanges)
4597		bdirty(bp);
4598	/*
4599	 * Process any allocdirects that completed during the update.
4600	 */
4601	if ((adp = TAILQ_FIRST(&inodedep->id_inoupdt)) != NULL)
4602		handle_allocdirect_partdone(adp);
4603	if ((adp = TAILQ_FIRST(&inodedep->id_extupdt)) != NULL)
4604		handle_allocdirect_partdone(adp);
4605	/*
4606	 * Process deallocations that were held pending until the
4607	 * inode had been written to disk. Freeing of the inode
4608	 * is delayed until after all blocks have been freed to
4609	 * avoid creation of new <vfsid, inum, lbn> triples
4610	 * before the old ones have been deleted.
4611	 */
4612	filefree = NULL;
4613	while ((wk = LIST_FIRST(&inodedep->id_bufwait)) != NULL) {
4614		WORKLIST_REMOVE(wk);
4615		switch (wk->wk_type) {
4616
4617		case D_FREEFILE:
4618			/*
4619			 * We defer adding filefree to the worklist until
4620			 * all other additions have been made to ensure
4621			 * that it will be done after all the old blocks
4622			 * have been freed.
4623			 */
4624			if (filefree != NULL)
4625				panic("handle_written_inodeblock: filefree");
4626			filefree = wk;
4627			continue;
4628
4629		case D_MKDIR:
4630			handle_written_mkdir(WK_MKDIR(wk), MKDIR_PARENT);
4631			continue;
4632
4633		case D_DIRADD:
4634			diradd_inode_written(WK_DIRADD(wk), inodedep);
4635			continue;
4636
4637		case D_FREEBLKS:
4638			wk->wk_state |= COMPLETE;
4639			if ((wk->wk_state  & ALLCOMPLETE) != ALLCOMPLETE)
4640				continue;
4641			 /* -- fall through -- */
4642		case D_FREEFRAG:
4643		case D_DIRREM:
4644			add_to_worklist(wk);
4645			continue;
4646
4647		case D_NEWDIRBLK:
4648			free_newdirblk(WK_NEWDIRBLK(wk));
4649			continue;
4650
4651		default:
4652			panic("handle_written_inodeblock: Unknown type %s",
4653			    TYPENAME(wk->wk_type));
4654			/* NOTREACHED */
4655		}
4656	}
4657	if (filefree != NULL) {
4658		if (free_inodedep(inodedep) == 0)
4659			panic("handle_written_inodeblock: live inodedep");
4660		add_to_worklist(filefree);
4661		return (0);
4662	}
4663
4664	/*
4665	 * If no outstanding dependencies, free it.
4666	 */
4667	if (free_inodedep(inodedep) ||
4668	    (TAILQ_FIRST(&inodedep->id_inoupdt) == 0 &&
4669	     TAILQ_FIRST(&inodedep->id_extupdt) == 0))
4670		return (0);
4671	return (hadchanges);
4672}
4673
4674/*
4675 * Process a diradd entry after its dependent inode has been written.
4676 * This routine must be called with splbio interrupts blocked.
4677 */
4678static void
4679diradd_inode_written(dap, inodedep)
4680	struct diradd *dap;
4681	struct inodedep *inodedep;
4682{
4683	struct pagedep *pagedep;
4684
4685	dap->da_state |= COMPLETE;
4686	if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) {
4687		if (dap->da_state & DIRCHG)
4688			pagedep = dap->da_previous->dm_pagedep;
4689		else
4690			pagedep = dap->da_pagedep;
4691		LIST_REMOVE(dap, da_pdlist);
4692		LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist);
4693	}
4694	WORKLIST_INSERT(&inodedep->id_pendinghd, &dap->da_list);
4695}
4696
4697/*
4698 * Handle the completion of a mkdir dependency.
4699 */
4700static void
4701handle_written_mkdir(mkdir, type)
4702	struct mkdir *mkdir;
4703	int type;
4704{
4705	struct diradd *dap;
4706	struct pagedep *pagedep;
4707
4708	if (mkdir->md_state != type)
4709		panic("handle_written_mkdir: bad type");
4710	dap = mkdir->md_diradd;
4711	dap->da_state &= ~type;
4712	if ((dap->da_state & (MKDIR_PARENT | MKDIR_BODY)) == 0)
4713		dap->da_state |= DEPCOMPLETE;
4714	if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) {
4715		if (dap->da_state & DIRCHG)
4716			pagedep = dap->da_previous->dm_pagedep;
4717		else
4718			pagedep = dap->da_pagedep;
4719		LIST_REMOVE(dap, da_pdlist);
4720		LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap, da_pdlist);
4721	}
4722	LIST_REMOVE(mkdir, md_mkdirs);
4723	WORKITEM_FREE(mkdir, D_MKDIR);
4724}
4725
4726/*
4727 * Called from within softdep_disk_write_complete above.
4728 * A write operation was just completed. Removed inodes can
4729 * now be freed and associated block pointers may be committed.
4730 * Note that this routine is always called from interrupt level
4731 * with further splbio interrupts blocked.
4732 */
4733static int
4734handle_written_filepage(pagedep, bp)
4735	struct pagedep *pagedep;
4736	struct buf *bp;		/* buffer containing the written page */
4737{
4738	struct dirrem *dirrem;
4739	struct diradd *dap, *nextdap;
4740	struct direct *ep;
4741	int i, chgs;
4742
4743	if ((pagedep->pd_state & IOSTARTED) == 0)
4744		panic("handle_written_filepage: not started");
4745	pagedep->pd_state &= ~IOSTARTED;
4746	/*
4747	 * Process any directory removals that have been committed.
4748	 */
4749	while ((dirrem = LIST_FIRST(&pagedep->pd_dirremhd)) != NULL) {
4750		LIST_REMOVE(dirrem, dm_next);
4751		dirrem->dm_dirinum = pagedep->pd_ino;
4752		add_to_worklist(&dirrem->dm_list);
4753	}
4754	/*
4755	 * Free any directory additions that have been committed.
4756	 * If it is a newly allocated block, we have to wait until
4757	 * the on-disk directory inode claims the new block.
4758	 */
4759	if ((pagedep->pd_state & NEWBLOCK) == 0)
4760		while ((dap = LIST_FIRST(&pagedep->pd_pendinghd)) != NULL)
4761			free_diradd(dap);
4762	/*
4763	 * Uncommitted directory entries must be restored.
4764	 */
4765	for (chgs = 0, i = 0; i < DAHASHSZ; i++) {
4766		for (dap = LIST_FIRST(&pagedep->pd_diraddhd[i]); dap;
4767		     dap = nextdap) {
4768			nextdap = LIST_NEXT(dap, da_pdlist);
4769			if (dap->da_state & ATTACHED)
4770				panic("handle_written_filepage: attached");
4771			ep = (struct direct *)
4772			    ((char *)bp->b_data + dap->da_offset);
4773			ep->d_ino = dap->da_newinum;
4774			dap->da_state &= ~UNDONE;
4775			dap->da_state |= ATTACHED;
4776			chgs = 1;
4777			/*
4778			 * If the inode referenced by the directory has
4779			 * been written out, then the dependency can be
4780			 * moved to the pending list.
4781			 */
4782			if ((dap->da_state & ALLCOMPLETE) == ALLCOMPLETE) {
4783				LIST_REMOVE(dap, da_pdlist);
4784				LIST_INSERT_HEAD(&pagedep->pd_pendinghd, dap,
4785				    da_pdlist);
4786			}
4787		}
4788	}
4789	/*
4790	 * If there were any rollbacks in the directory, then it must be
4791	 * marked dirty so that its will eventually get written back in
4792	 * its correct form.
4793	 */
4794	if (chgs) {
4795		if ((bp->b_flags & B_DELWRI) == 0)
4796			stat_dir_entry++;
4797		bdirty(bp);
4798		return (1);
4799	}
4800	/*
4801	 * If we are not waiting for a new directory block to be
4802	 * claimed by its inode, then the pagedep will be freed.
4803	 * Otherwise it will remain to track any new entries on
4804	 * the page in case they are fsync'ed.
4805	 */
4806	if ((pagedep->pd_state & NEWBLOCK) == 0) {
4807		LIST_REMOVE(pagedep, pd_hash);
4808		WORKITEM_FREE(pagedep, D_PAGEDEP);
4809	}
4810	return (0);
4811}
4812
4813/*
4814 * Writing back in-core inode structures.
4815 *
4816 * The filesystem only accesses an inode's contents when it occupies an
4817 * "in-core" inode structure.  These "in-core" structures are separate from
4818 * the page frames used to cache inode blocks.  Only the latter are
4819 * transferred to/from the disk.  So, when the updated contents of the
4820 * "in-core" inode structure are copied to the corresponding in-memory inode
4821 * block, the dependencies are also transferred.  The following procedure is
4822 * called when copying a dirty "in-core" inode to a cached inode block.
4823 */
4824
4825/*
4826 * Called when an inode is loaded from disk. If the effective link count
4827 * differed from the actual link count when it was last flushed, then we
4828 * need to ensure that the correct effective link count is put back.
4829 */
4830void
4831softdep_load_inodeblock(ip)
4832	struct inode *ip;	/* the "in_core" copy of the inode */
4833{
4834	struct inodedep *inodedep;
4835
4836	/*
4837	 * Check for alternate nlink count.
4838	 */
4839	ip->i_effnlink = ip->i_nlink;
4840	ACQUIRE_LOCK(&lk);
4841	if (inodedep_lookup(UFSTOVFS(ip->i_ump),
4842	    ip->i_number, 0, &inodedep) == 0) {
4843		FREE_LOCK(&lk);
4844		return;
4845	}
4846	ip->i_effnlink -= inodedep->id_nlinkdelta;
4847	if (inodedep->id_state & SPACECOUNTED)
4848		ip->i_flag |= IN_SPACECOUNTED;
4849	FREE_LOCK(&lk);
4850}
4851
4852/*
4853 * This routine is called just before the "in-core" inode
4854 * information is to be copied to the in-memory inode block.
4855 * Recall that an inode block contains several inodes. If
4856 * the force flag is set, then the dependencies will be
4857 * cleared so that the update can always be made. Note that
4858 * the buffer is locked when this routine is called, so we
4859 * will never be in the middle of writing the inode block
4860 * to disk.
4861 */
4862void
4863softdep_update_inodeblock(ip, bp, waitfor)
4864	struct inode *ip;	/* the "in_core" copy of the inode */
4865	struct buf *bp;		/* the buffer containing the inode block */
4866	int waitfor;		/* nonzero => update must be allowed */
4867{
4868	struct inodedep *inodedep;
4869	struct worklist *wk;
4870	struct mount *mp;
4871	struct buf *ibp;
4872	int error;
4873
4874	/*
4875	 * If the effective link count is not equal to the actual link
4876	 * count, then we must track the difference in an inodedep while
4877	 * the inode is (potentially) tossed out of the cache. Otherwise,
4878	 * if there is no existing inodedep, then there are no dependencies
4879	 * to track.
4880	 */
4881	mp = UFSTOVFS(ip->i_ump);
4882	ACQUIRE_LOCK(&lk);
4883	if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) {
4884		FREE_LOCK(&lk);
4885		if (ip->i_effnlink != ip->i_nlink)
4886			panic("softdep_update_inodeblock: bad link count");
4887		return;
4888	}
4889	if (inodedep->id_nlinkdelta != ip->i_nlink - ip->i_effnlink)
4890		panic("softdep_update_inodeblock: bad delta");
4891	/*
4892	 * Changes have been initiated. Anything depending on these
4893	 * changes cannot occur until this inode has been written.
4894	 */
4895	inodedep->id_state &= ~COMPLETE;
4896	if ((inodedep->id_state & ONWORKLIST) == 0)
4897		WORKLIST_INSERT(&bp->b_dep, &inodedep->id_list);
4898	/*
4899	 * Any new dependencies associated with the incore inode must
4900	 * now be moved to the list associated with the buffer holding
4901	 * the in-memory copy of the inode. Once merged process any
4902	 * allocdirects that are completed by the merger.
4903	 */
4904	merge_inode_lists(&inodedep->id_newinoupdt, &inodedep->id_inoupdt);
4905	if (TAILQ_FIRST(&inodedep->id_inoupdt) != NULL)
4906		handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_inoupdt));
4907	merge_inode_lists(&inodedep->id_newextupdt, &inodedep->id_extupdt);
4908	if (TAILQ_FIRST(&inodedep->id_extupdt) != NULL)
4909		handle_allocdirect_partdone(TAILQ_FIRST(&inodedep->id_extupdt));
4910	/*
4911	 * Now that the inode has been pushed into the buffer, the
4912	 * operations dependent on the inode being written to disk
4913	 * can be moved to the id_bufwait so that they will be
4914	 * processed when the buffer I/O completes.
4915	 */
4916	while ((wk = LIST_FIRST(&inodedep->id_inowait)) != NULL) {
4917		WORKLIST_REMOVE(wk);
4918		WORKLIST_INSERT(&inodedep->id_bufwait, wk);
4919	}
4920	/*
4921	 * Newly allocated inodes cannot be written until the bitmap
4922	 * that allocates them have been written (indicated by
4923	 * DEPCOMPLETE being set in id_state). If we are doing a
4924	 * forced sync (e.g., an fsync on a file), we force the bitmap
4925	 * to be written so that the update can be done.
4926	 */
4927	if (waitfor == 0) {
4928		FREE_LOCK(&lk);
4929		return;
4930	}
4931retry:
4932	if ((inodedep->id_state & DEPCOMPLETE) != 0) {
4933		FREE_LOCK(&lk);
4934		return;
4935	}
4936	ibp = inodedep->id_buf;
4937	ibp = getdirtybuf(ibp, &lk, MNT_WAIT);
4938	if (ibp == NULL) {
4939		/*
4940		 * If ibp came back as NULL, the dependency could have been
4941		 * freed while we slept.  Look it up again, and check to see
4942		 * that it has completed.
4943		 */
4944		if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) != 0)
4945			goto retry;
4946		FREE_LOCK(&lk);
4947		return;
4948	}
4949	FREE_LOCK(&lk);
4950	if ((error = bwrite(ibp)) != 0)
4951		softdep_error("softdep_update_inodeblock: bwrite", error);
4952}
4953
4954/*
4955 * Merge the a new inode dependency list (such as id_newinoupdt) into an
4956 * old inode dependency list (such as id_inoupdt). This routine must be
4957 * called with splbio interrupts blocked.
4958 */
4959static void
4960merge_inode_lists(newlisthead, oldlisthead)
4961	struct allocdirectlst *newlisthead;
4962	struct allocdirectlst *oldlisthead;
4963{
4964	struct allocdirect *listadp, *newadp;
4965
4966	newadp = TAILQ_FIRST(newlisthead);
4967	for (listadp = TAILQ_FIRST(oldlisthead); listadp && newadp;) {
4968		if (listadp->ad_lbn < newadp->ad_lbn) {
4969			listadp = TAILQ_NEXT(listadp, ad_next);
4970			continue;
4971		}
4972		TAILQ_REMOVE(newlisthead, newadp, ad_next);
4973		TAILQ_INSERT_BEFORE(listadp, newadp, ad_next);
4974		if (listadp->ad_lbn == newadp->ad_lbn) {
4975			allocdirect_merge(oldlisthead, newadp,
4976			    listadp);
4977			listadp = newadp;
4978		}
4979		newadp = TAILQ_FIRST(newlisthead);
4980	}
4981	while ((newadp = TAILQ_FIRST(newlisthead)) != NULL) {
4982		TAILQ_REMOVE(newlisthead, newadp, ad_next);
4983		TAILQ_INSERT_TAIL(oldlisthead, newadp, ad_next);
4984	}
4985}
4986
4987/*
4988 * If we are doing an fsync, then we must ensure that any directory
4989 * entries for the inode have been written after the inode gets to disk.
4990 */
4991int
4992softdep_fsync(vp)
4993	struct vnode *vp;	/* the "in_core" copy of the inode */
4994{
4995	struct inodedep *inodedep;
4996	struct pagedep *pagedep;
4997	struct worklist *wk;
4998	struct diradd *dap;
4999	struct mount *mp;
5000	struct vnode *pvp;
5001	struct inode *ip;
5002	struct buf *bp;
5003	struct fs *fs;
5004	struct thread *td = curthread;
5005	int error, flushparent;
5006	ino_t parentino;
5007	ufs_lbn_t lbn;
5008
5009	ip = VTOI(vp);
5010	fs = ip->i_fs;
5011	mp = vp->v_mount;
5012	ACQUIRE_LOCK(&lk);
5013	if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0) {
5014		FREE_LOCK(&lk);
5015		return (0);
5016	}
5017	if (LIST_FIRST(&inodedep->id_inowait) != NULL ||
5018	    LIST_FIRST(&inodedep->id_bufwait) != NULL ||
5019	    TAILQ_FIRST(&inodedep->id_extupdt) != NULL ||
5020	    TAILQ_FIRST(&inodedep->id_newextupdt) != NULL ||
5021	    TAILQ_FIRST(&inodedep->id_inoupdt) != NULL ||
5022	    TAILQ_FIRST(&inodedep->id_newinoupdt) != NULL)
5023		panic("softdep_fsync: pending ops");
5024	for (error = 0, flushparent = 0; ; ) {
5025		if ((wk = LIST_FIRST(&inodedep->id_pendinghd)) == NULL)
5026			break;
5027		if (wk->wk_type != D_DIRADD)
5028			panic("softdep_fsync: Unexpected type %s",
5029			    TYPENAME(wk->wk_type));
5030		dap = WK_DIRADD(wk);
5031		/*
5032		 * Flush our parent if this directory entry has a MKDIR_PARENT
5033		 * dependency or is contained in a newly allocated block.
5034		 */
5035		if (dap->da_state & DIRCHG)
5036			pagedep = dap->da_previous->dm_pagedep;
5037		else
5038			pagedep = dap->da_pagedep;
5039		parentino = pagedep->pd_ino;
5040		lbn = pagedep->pd_lbn;
5041		if ((dap->da_state & (MKDIR_BODY | COMPLETE)) != COMPLETE)
5042			panic("softdep_fsync: dirty");
5043		if ((dap->da_state & MKDIR_PARENT) ||
5044		    (pagedep->pd_state & NEWBLOCK))
5045			flushparent = 1;
5046		else
5047			flushparent = 0;
5048		/*
5049		 * If we are being fsync'ed as part of vgone'ing this vnode,
5050		 * then we will not be able to release and recover the
5051		 * vnode below, so we just have to give up on writing its
5052		 * directory entry out. It will eventually be written, just
5053		 * not now, but then the user was not asking to have it
5054		 * written, so we are not breaking any promises.
5055		 */
5056		if (vp->v_iflag & VI_DOOMED)
5057			break;
5058		/*
5059		 * We prevent deadlock by always fetching inodes from the
5060		 * root, moving down the directory tree. Thus, when fetching
5061		 * our parent directory, we first try to get the lock. If
5062		 * that fails, we must unlock ourselves before requesting
5063		 * the lock on our parent. See the comment in ufs_lookup
5064		 * for details on possible races.
5065		 */
5066		FREE_LOCK(&lk);
5067		if (ffs_vget(mp, parentino, LK_NOWAIT | LK_EXCLUSIVE, &pvp)) {
5068			VOP_UNLOCK(vp, 0, td);
5069			error = ffs_vget(mp, parentino, LK_EXCLUSIVE, &pvp);
5070			vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, td);
5071			if (error != 0)
5072				return (error);
5073		}
5074		/*
5075		 * All MKDIR_PARENT dependencies and all the NEWBLOCK pagedeps
5076		 * that are contained in direct blocks will be resolved by
5077		 * doing a ffs_update. Pagedeps contained in indirect blocks
5078		 * may require a complete sync'ing of the directory. So, we
5079		 * try the cheap and fast ffs_update first, and if that fails,
5080		 * then we do the slower ffs_syncvnode of the directory.
5081		 */
5082		if (flushparent) {
5083			if ((error = ffs_update(pvp, 1)) != 0) {
5084				vput(pvp);
5085				return (error);
5086			}
5087			if ((pagedep->pd_state & NEWBLOCK) &&
5088			    (error = ffs_syncvnode(pvp, MNT_WAIT))) {
5089				vput(pvp);
5090				return (error);
5091			}
5092		}
5093		/*
5094		 * Flush directory page containing the inode's name.
5095		 */
5096		error = bread(pvp, lbn, blksize(fs, VTOI(pvp), lbn), td->td_ucred,
5097		    &bp);
5098		if (error == 0)
5099			error = bwrite(bp);
5100		else
5101			brelse(bp);
5102		vput(pvp);
5103		if (error != 0)
5104			return (error);
5105		ACQUIRE_LOCK(&lk);
5106		if (inodedep_lookup(mp, ip->i_number, 0, &inodedep) == 0)
5107			break;
5108	}
5109	FREE_LOCK(&lk);
5110	return (0);
5111}
5112
5113/*
5114 * Flush all the dirty bitmaps associated with the block device
5115 * before flushing the rest of the dirty blocks so as to reduce
5116 * the number of dependencies that will have to be rolled back.
5117 */
5118void
5119softdep_fsync_mountdev(vp)
5120	struct vnode *vp;
5121{
5122	struct buf *bp, *nbp;
5123	struct worklist *wk;
5124
5125	if (!vn_isdisk(vp, NULL))
5126		panic("softdep_fsync_mountdev: vnode not a disk");
5127restart:
5128	ACQUIRE_LOCK(&lk);
5129	VI_LOCK(vp);
5130	TAILQ_FOREACH_SAFE(bp, &vp->v_bufobj.bo_dirty.bv_hd, b_bobufs, nbp) {
5131		/*
5132		 * If it is already scheduled, skip to the next buffer.
5133		 */
5134		if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL))
5135			continue;
5136
5137		if ((bp->b_flags & B_DELWRI) == 0)
5138			panic("softdep_fsync_mountdev: not dirty");
5139		/*
5140		 * We are only interested in bitmaps with outstanding
5141		 * dependencies.
5142		 */
5143		if ((wk = LIST_FIRST(&bp->b_dep)) == NULL ||
5144		    wk->wk_type != D_BMSAFEMAP ||
5145		    (bp->b_vflags & BV_BKGRDINPROG)) {
5146			BUF_UNLOCK(bp);
5147			continue;
5148		}
5149		VI_UNLOCK(vp);
5150		FREE_LOCK(&lk);
5151		bremfree(bp);
5152		(void) bawrite(bp);
5153		goto restart;
5154	}
5155	FREE_LOCK(&lk);
5156	drain_output(vp);
5157	VI_UNLOCK(vp);
5158}
5159
5160/*
5161 * This routine is called when we are trying to synchronously flush a
5162 * file. This routine must eliminate any filesystem metadata dependencies
5163 * so that the syncing routine can succeed by pushing the dirty blocks
5164 * associated with the file. If any I/O errors occur, they are returned.
5165 */
5166int
5167softdep_sync_metadata(struct vnode *vp)
5168{
5169	struct pagedep *pagedep;
5170	struct allocdirect *adp;
5171	struct allocindir *aip;
5172	struct buf *bp, *nbp;
5173	struct worklist *wk;
5174	int i, error, waitfor;
5175
5176	if (!DOINGSOFTDEP(vp))
5177		return (0);
5178	/*
5179	 * Ensure that any direct block dependencies have been cleared.
5180	 */
5181	ACQUIRE_LOCK(&lk);
5182	if ((error = flush_inodedep_deps(vp->v_mount, VTOI(vp)->i_number))) {
5183		FREE_LOCK(&lk);
5184		return (error);
5185	}
5186	FREE_LOCK(&lk);
5187	/*
5188	 * For most files, the only metadata dependencies are the
5189	 * cylinder group maps that allocate their inode or blocks.
5190	 * The block allocation dependencies can be found by traversing
5191	 * the dependency lists for any buffers that remain on their
5192	 * dirty buffer list. The inode allocation dependency will
5193	 * be resolved when the inode is updated with MNT_WAIT.
5194	 * This work is done in two passes. The first pass grabs most
5195	 * of the buffers and begins asynchronously writing them. The
5196	 * only way to wait for these asynchronous writes is to sleep
5197	 * on the filesystem vnode which may stay busy for a long time
5198	 * if the filesystem is active. So, instead, we make a second
5199	 * pass over the dependencies blocking on each write. In the
5200	 * usual case we will be blocking against a write that we
5201	 * initiated, so when it is done the dependency will have been
5202	 * resolved. Thus the second pass is expected to end quickly.
5203	 */
5204	waitfor = MNT_NOWAIT;
5205
5206top:
5207	/*
5208	 * We must wait for any I/O in progress to finish so that
5209	 * all potential buffers on the dirty list will be visible.
5210	 */
5211	VI_LOCK(vp);
5212	drain_output(vp);
5213	while ((bp = TAILQ_FIRST(&vp->v_bufobj.bo_dirty.bv_hd)) != NULL) {
5214		bp = getdirtybuf(bp, VI_MTX(vp), MNT_WAIT);
5215		if (bp)
5216			break;
5217	}
5218	VI_UNLOCK(vp);
5219	if (bp == NULL)
5220		return (0);
5221loop:
5222	/* While syncing snapshots, we must allow recursive lookups */
5223	bp->b_lock.lk_flags |= LK_CANRECURSE;
5224	ACQUIRE_LOCK(&lk);
5225	/*
5226	 * As we hold the buffer locked, none of its dependencies
5227	 * will disappear.
5228	 */
5229	LIST_FOREACH(wk, &bp->b_dep, wk_list) {
5230		switch (wk->wk_type) {
5231
5232		case D_ALLOCDIRECT:
5233			adp = WK_ALLOCDIRECT(wk);
5234			if (adp->ad_state & DEPCOMPLETE)
5235				continue;
5236			nbp = adp->ad_buf;
5237			nbp = getdirtybuf(nbp, &lk, waitfor);
5238			if (nbp == NULL)
5239				continue;
5240			FREE_LOCK(&lk);
5241			if (waitfor == MNT_NOWAIT) {
5242				bawrite(nbp);
5243			} else if ((error = bwrite(nbp)) != 0) {
5244				break;
5245			}
5246			ACQUIRE_LOCK(&lk);
5247			continue;
5248
5249		case D_ALLOCINDIR:
5250			aip = WK_ALLOCINDIR(wk);
5251			if (aip->ai_state & DEPCOMPLETE)
5252				continue;
5253			nbp = aip->ai_buf;
5254			nbp = getdirtybuf(nbp, &lk, waitfor);
5255			if (nbp == NULL)
5256				continue;
5257			FREE_LOCK(&lk);
5258			if (waitfor == MNT_NOWAIT) {
5259				bawrite(nbp);
5260			} else if ((error = bwrite(nbp)) != 0) {
5261				break;
5262			}
5263			ACQUIRE_LOCK(&lk);
5264			continue;
5265
5266		case D_INDIRDEP:
5267		restart:
5268
5269			LIST_FOREACH(aip, &WK_INDIRDEP(wk)->ir_deplisthd, ai_next) {
5270				if (aip->ai_state & DEPCOMPLETE)
5271					continue;
5272				nbp = aip->ai_buf;
5273				nbp = getdirtybuf(nbp, &lk, MNT_WAIT);
5274				if (nbp == NULL)
5275					goto restart;
5276				FREE_LOCK(&lk);
5277				if ((error = bwrite(nbp)) != 0) {
5278					goto loop_end;
5279				}
5280				ACQUIRE_LOCK(&lk);
5281				goto restart;
5282			}
5283			continue;
5284
5285		case D_INODEDEP:
5286			if ((error = flush_inodedep_deps(wk->wk_mp,
5287			    WK_INODEDEP(wk)->id_ino)) != 0) {
5288				FREE_LOCK(&lk);
5289				break;
5290			}
5291			continue;
5292
5293		case D_PAGEDEP:
5294			/*
5295			 * We are trying to sync a directory that may
5296			 * have dependencies on both its own metadata
5297			 * and/or dependencies on the inodes of any
5298			 * recently allocated files. We walk its diradd
5299			 * lists pushing out the associated inode.
5300			 */
5301			pagedep = WK_PAGEDEP(wk);
5302			for (i = 0; i < DAHASHSZ; i++) {
5303				if (LIST_FIRST(&pagedep->pd_diraddhd[i]) == 0)
5304					continue;
5305				if ((error =
5306				    flush_pagedep_deps(vp, wk->wk_mp,
5307						&pagedep->pd_diraddhd[i]))) {
5308					FREE_LOCK(&lk);
5309					goto loop_end;
5310				}
5311			}
5312			continue;
5313
5314		case D_MKDIR:
5315			/*
5316			 * This case should never happen if the vnode has
5317			 * been properly sync'ed. However, if this function
5318			 * is used at a place where the vnode has not yet
5319			 * been sync'ed, this dependency can show up. So,
5320			 * rather than panic, just flush it.
5321			 */
5322			nbp = WK_MKDIR(wk)->md_buf;
5323			nbp = getdirtybuf(nbp, &lk, waitfor);
5324			if (nbp == NULL)
5325				continue;
5326			FREE_LOCK(&lk);
5327			if (waitfor == MNT_NOWAIT) {
5328				bawrite(nbp);
5329			} else if ((error = bwrite(nbp)) != 0) {
5330				break;
5331			}
5332			ACQUIRE_LOCK(&lk);
5333			continue;
5334
5335		case D_BMSAFEMAP:
5336			/*
5337			 * This case should never happen if the vnode has
5338			 * been properly sync'ed. However, if this function
5339			 * is used at a place where the vnode has not yet
5340			 * been sync'ed, this dependency can show up. So,
5341			 * rather than panic, just flush it.
5342			 */
5343			nbp = WK_BMSAFEMAP(wk)->sm_buf;
5344			nbp = getdirtybuf(nbp, &lk, waitfor);
5345			if (nbp == NULL)
5346				continue;
5347			FREE_LOCK(&lk);
5348			if (waitfor == MNT_NOWAIT) {
5349				bawrite(nbp);
5350			} else if ((error = bwrite(nbp)) != 0) {
5351				break;
5352			}
5353			ACQUIRE_LOCK(&lk);
5354			continue;
5355
5356		default:
5357			panic("softdep_sync_metadata: Unknown type %s",
5358			    TYPENAME(wk->wk_type));
5359			/* NOTREACHED */
5360		}
5361	loop_end:
5362		/* We reach here only in error and unlocked */
5363		if (error == 0)
5364			panic("softdep_sync_metadata: zero error");
5365		bp->b_lock.lk_flags &= ~LK_CANRECURSE;
5366		bawrite(bp);
5367		return (error);
5368	}
5369	FREE_LOCK(&lk);
5370	VI_LOCK(vp);
5371	while ((nbp = TAILQ_NEXT(bp, b_bobufs)) != NULL) {
5372		nbp = getdirtybuf(nbp, VI_MTX(vp), MNT_WAIT);
5373		if (nbp)
5374			break;
5375	}
5376	VI_UNLOCK(vp);
5377	bp->b_lock.lk_flags &= ~LK_CANRECURSE;
5378	bawrite(bp);
5379	if (nbp != NULL) {
5380		bp = nbp;
5381		goto loop;
5382	}
5383	/*
5384	 * The brief unlock is to allow any pent up dependency
5385	 * processing to be done. Then proceed with the second pass.
5386	 */
5387	if (waitfor == MNT_NOWAIT) {
5388		waitfor = MNT_WAIT;
5389		goto top;
5390	}
5391
5392	/*
5393	 * If we have managed to get rid of all the dirty buffers,
5394	 * then we are done. For certain directories and block
5395	 * devices, we may need to do further work.
5396	 *
5397	 * We must wait for any I/O in progress to finish so that
5398	 * all potential buffers on the dirty list will be visible.
5399	 */
5400	VI_LOCK(vp);
5401	drain_output(vp);
5402	VI_UNLOCK(vp);
5403	return (0);
5404}
5405
5406/*
5407 * Flush the dependencies associated with an inodedep.
5408 * Called with splbio blocked.
5409 */
5410static int
5411flush_inodedep_deps(mp, ino)
5412	struct mount *mp;
5413	ino_t ino;
5414{
5415	struct inodedep *inodedep;
5416	int error, waitfor;
5417
5418	/*
5419	 * This work is done in two passes. The first pass grabs most
5420	 * of the buffers and begins asynchronously writing them. The
5421	 * only way to wait for these asynchronous writes is to sleep
5422	 * on the filesystem vnode which may stay busy for a long time
5423	 * if the filesystem is active. So, instead, we make a second
5424	 * pass over the dependencies blocking on each write. In the
5425	 * usual case we will be blocking against a write that we
5426	 * initiated, so when it is done the dependency will have been
5427	 * resolved. Thus the second pass is expected to end quickly.
5428	 * We give a brief window at the top of the loop to allow
5429	 * any pending I/O to complete.
5430	 */
5431	for (error = 0, waitfor = MNT_NOWAIT; ; ) {
5432		if (error)
5433			return (error);
5434		FREE_LOCK(&lk);
5435		ACQUIRE_LOCK(&lk);
5436		if (inodedep_lookup(mp, ino, 0, &inodedep) == 0)
5437			return (0);
5438		if (flush_deplist(&inodedep->id_inoupdt, waitfor, &error) ||
5439		    flush_deplist(&inodedep->id_newinoupdt, waitfor, &error) ||
5440		    flush_deplist(&inodedep->id_extupdt, waitfor, &error) ||
5441		    flush_deplist(&inodedep->id_newextupdt, waitfor, &error))
5442			continue;
5443		/*
5444		 * If pass2, we are done, otherwise do pass 2.
5445		 */
5446		if (waitfor == MNT_WAIT)
5447			break;
5448		waitfor = MNT_WAIT;
5449	}
5450	/*
5451	 * Try freeing inodedep in case all dependencies have been removed.
5452	 */
5453	if (inodedep_lookup(mp, ino, 0, &inodedep) != 0)
5454		(void) free_inodedep(inodedep);
5455	return (0);
5456}
5457
5458/*
5459 * Flush an inode dependency list.
5460 * Called with splbio blocked.
5461 */
5462static int
5463flush_deplist(listhead, waitfor, errorp)
5464	struct allocdirectlst *listhead;
5465	int waitfor;
5466	int *errorp;
5467{
5468	struct allocdirect *adp;
5469	struct buf *bp;
5470
5471	mtx_assert(&lk, MA_OWNED);
5472	TAILQ_FOREACH(adp, listhead, ad_next) {
5473		if (adp->ad_state & DEPCOMPLETE)
5474			continue;
5475		bp = adp->ad_buf;
5476		bp = getdirtybuf(bp, &lk, waitfor);
5477		if (bp == NULL) {
5478			if (waitfor == MNT_NOWAIT)
5479				continue;
5480			return (1);
5481		}
5482		FREE_LOCK(&lk);
5483		if (waitfor == MNT_NOWAIT) {
5484			bawrite(bp);
5485		} else if ((*errorp = bwrite(bp)) != 0) {
5486			ACQUIRE_LOCK(&lk);
5487			return (1);
5488		}
5489		ACQUIRE_LOCK(&lk);
5490		return (1);
5491	}
5492	return (0);
5493}
5494
5495/*
5496 * Eliminate a pagedep dependency by flushing out all its diradd dependencies.
5497 * Called with splbio blocked.
5498 */
5499static int
5500flush_pagedep_deps(pvp, mp, diraddhdp)
5501	struct vnode *pvp;
5502	struct mount *mp;
5503	struct diraddhd *diraddhdp;
5504{
5505	struct inodedep *inodedep;
5506	struct ufsmount *ump;
5507	struct diradd *dap;
5508	struct vnode *vp;
5509	int error = 0;
5510	struct buf *bp;
5511	ino_t inum;
5512	struct worklist *wk;
5513
5514	ump = VFSTOUFS(mp);
5515	while ((dap = LIST_FIRST(diraddhdp)) != NULL) {
5516		/*
5517		 * Flush ourselves if this directory entry
5518		 * has a MKDIR_PARENT dependency.
5519		 */
5520		if (dap->da_state & MKDIR_PARENT) {
5521			FREE_LOCK(&lk);
5522			if ((error = ffs_update(pvp, 1)) != 0)
5523				break;
5524			ACQUIRE_LOCK(&lk);
5525			/*
5526			 * If that cleared dependencies, go on to next.
5527			 */
5528			if (dap != LIST_FIRST(diraddhdp))
5529				continue;
5530			if (dap->da_state & MKDIR_PARENT)
5531				panic("flush_pagedep_deps: MKDIR_PARENT");
5532		}
5533		/*
5534		 * A newly allocated directory must have its "." and
5535		 * ".." entries written out before its name can be
5536		 * committed in its parent. We do not want or need
5537		 * the full semantics of a synchronous ffs_syncvnode as
5538		 * that may end up here again, once for each directory
5539		 * level in the filesystem. Instead, we push the blocks
5540		 * and wait for them to clear. We have to fsync twice
5541		 * because the first call may choose to defer blocks
5542		 * that still have dependencies, but deferral will
5543		 * happen at most once.
5544		 */
5545		inum = dap->da_newinum;
5546		if (dap->da_state & MKDIR_BODY) {
5547			FREE_LOCK(&lk);
5548			if ((error = ffs_vget(mp, inum, LK_EXCLUSIVE, &vp)))
5549				break;
5550			if ((error=ffs_syncvnode(vp, MNT_NOWAIT)) ||
5551			    (error=ffs_syncvnode(vp, MNT_NOWAIT))) {
5552				vput(vp);
5553				break;
5554			}
5555			VI_LOCK(vp);
5556			drain_output(vp);
5557			/*
5558			 * If first block is still dirty with a D_MKDIR
5559			 * dependency then it needs to be written now.
5560			 */
5561			for (;;) {
5562				error = 0;
5563				bp = gbincore(&vp->v_bufobj, 0);
5564				if (bp == NULL)
5565					break;	/* First block not present */
5566				error = BUF_LOCK(bp,
5567						 LK_EXCLUSIVE |
5568						 LK_SLEEPFAIL |
5569						 LK_INTERLOCK,
5570						 VI_MTX(vp));
5571				VI_LOCK(vp);
5572				if (error == ENOLCK)
5573					continue;	/* Slept, retry */
5574				if (error != 0)
5575					break;		/* Failed */
5576				if ((bp->b_flags & B_DELWRI) == 0) {
5577					BUF_UNLOCK(bp);
5578					break;	/* Buffer not dirty */
5579				}
5580				for (wk = LIST_FIRST(&bp->b_dep);
5581				     wk != NULL;
5582				     wk = LIST_NEXT(wk, wk_list))
5583					if (wk->wk_type == D_MKDIR)
5584						break;
5585				if (wk == NULL)
5586					BUF_UNLOCK(bp);	/* Dependency gone */
5587				else {
5588					/*
5589					 * D_MKDIR dependency remains,
5590					 * must write buffer to stable
5591					 * storage.
5592					 */
5593					VI_UNLOCK(vp);
5594					bremfree(bp);
5595					error = bwrite(bp);
5596					VI_LOCK(vp);
5597				}
5598				break;
5599			}
5600			VI_UNLOCK(vp);
5601			vput(vp);
5602			if (error != 0)
5603				break;	/* Flushing of first block failed */
5604			ACQUIRE_LOCK(&lk);
5605			/*
5606			 * If that cleared dependencies, go on to next.
5607			 */
5608			if (dap != LIST_FIRST(diraddhdp))
5609				continue;
5610			if (dap->da_state & MKDIR_BODY)
5611				panic("flush_pagedep_deps: MKDIR_BODY");
5612		}
5613		/*
5614		 * Flush the inode on which the directory entry depends.
5615		 * Having accounted for MKDIR_PARENT and MKDIR_BODY above,
5616		 * the only remaining dependency is that the updated inode
5617		 * count must get pushed to disk. The inode has already
5618		 * been pushed into its inode buffer (via VOP_UPDATE) at
5619		 * the time of the reference count change. So we need only
5620		 * locate that buffer, ensure that there will be no rollback
5621		 * caused by a bitmap dependency, then write the inode buffer.
5622		 */
5623retry:
5624		if (inodedep_lookup(UFSTOVFS(ump), inum, 0, &inodedep) == 0)
5625			panic("flush_pagedep_deps: lost inode");
5626		/*
5627		 * If the inode still has bitmap dependencies,
5628		 * push them to disk.
5629		 */
5630		if ((inodedep->id_state & DEPCOMPLETE) == 0) {
5631			bp = inodedep->id_buf;
5632			bp = getdirtybuf(bp, &lk, MNT_WAIT);
5633			if (bp == NULL)
5634				goto retry;
5635			FREE_LOCK(&lk);
5636			if ((error = bwrite(bp)) != 0)
5637				break;
5638			ACQUIRE_LOCK(&lk);
5639			if (dap != LIST_FIRST(diraddhdp))
5640				continue;
5641		}
5642		/*
5643		 * If the inode is still sitting in a buffer waiting
5644		 * to be written, push it to disk.
5645		 */
5646		FREE_LOCK(&lk);
5647		if ((error = bread(ump->um_devvp,
5648		    fsbtodb(ump->um_fs, ino_to_fsba(ump->um_fs, inum)),
5649		    (int)ump->um_fs->fs_bsize, NOCRED, &bp)) != 0) {
5650			brelse(bp);
5651			break;
5652		}
5653		if ((error = bwrite(bp)) != 0)
5654			break;
5655		ACQUIRE_LOCK(&lk);
5656		/*
5657		 * If we have failed to get rid of all the dependencies
5658		 * then something is seriously wrong.
5659		 */
5660		if (dap == LIST_FIRST(diraddhdp))
5661			panic("flush_pagedep_deps: flush failed");
5662	}
5663	if (error)
5664		ACQUIRE_LOCK(&lk);
5665	return (error);
5666}
5667
5668/*
5669 * A large burst of file addition or deletion activity can drive the
5670 * memory load excessively high. First attempt to slow things down
5671 * using the techniques below. If that fails, this routine requests
5672 * the offending operations to fall back to running synchronously
5673 * until the memory load returns to a reasonable level.
5674 */
5675int
5676softdep_slowdown(vp)
5677	struct vnode *vp;
5678{
5679	int max_softdeps_hard;
5680
5681	ACQUIRE_LOCK(&lk);
5682	max_softdeps_hard = max_softdeps * 11 / 10;
5683	if (num_dirrem < max_softdeps_hard / 2 &&
5684	    num_inodedep < max_softdeps_hard &&
5685	    VFSTOUFS(vp->v_mount)->um_numindirdeps < maxindirdeps) {
5686		FREE_LOCK(&lk);
5687  		return (0);
5688	}
5689	if (VFSTOUFS(vp->v_mount)->um_numindirdeps >= maxindirdeps)
5690		softdep_speedup();
5691	stat_sync_limit_hit += 1;
5692	FREE_LOCK(&lk);
5693	return (1);
5694}
5695
5696/*
5697 * Called by the allocation routines when they are about to fail
5698 * in the hope that we can free up some disk space.
5699 *
5700 * First check to see if the work list has anything on it. If it has,
5701 * clean up entries until we successfully free some space. Because this
5702 * process holds inodes locked, we cannot handle any remove requests
5703 * that might block on a locked inode as that could lead to deadlock.
5704 * If the worklist yields no free space, encourage the syncer daemon
5705 * to help us. In no event will we try for longer than tickdelay seconds.
5706 */
5707int
5708softdep_request_cleanup(fs, vp)
5709	struct fs *fs;
5710	struct vnode *vp;
5711{
5712	struct ufsmount *ump;
5713	long starttime;
5714	ufs2_daddr_t needed;
5715	int error;
5716
5717	ump = VTOI(vp)->i_ump;
5718	mtx_assert(UFS_MTX(ump), MA_OWNED);
5719	needed = fs->fs_cstotal.cs_nbfree + fs->fs_contigsumsize;
5720	starttime = time_second + tickdelay;
5721	/*
5722	 * If we are being called because of a process doing a
5723	 * copy-on-write, then it is not safe to update the vnode
5724	 * as we may recurse into the copy-on-write routine.
5725	 */
5726	if (!(curthread->td_pflags & TDP_COWINPROGRESS)) {
5727		UFS_UNLOCK(ump);
5728		error = ffs_update(vp, 1);
5729		UFS_LOCK(ump);
5730		if (error != 0)
5731			return (0);
5732	}
5733	while (fs->fs_pendingblocks > 0 && fs->fs_cstotal.cs_nbfree <= needed) {
5734		if (time_second > starttime)
5735			return (0);
5736		UFS_UNLOCK(ump);
5737		ACQUIRE_LOCK(&lk);
5738		if (ump->softdep_on_worklist > 0 &&
5739		    process_worklist_item(UFSTOVFS(ump), LK_NOWAIT) != -1) {
5740			stat_worklist_push += 1;
5741			FREE_LOCK(&lk);
5742			UFS_LOCK(ump);
5743			continue;
5744		}
5745		request_cleanup(UFSTOVFS(ump), FLUSH_REMOVE_WAIT);
5746		FREE_LOCK(&lk);
5747		UFS_LOCK(ump);
5748	}
5749	return (1);
5750}
5751
5752/*
5753 * If memory utilization has gotten too high, deliberately slow things
5754 * down and speed up the I/O processing.
5755 */
5756extern struct thread *syncertd;
5757static int
5758request_cleanup(mp, resource)
5759	struct mount *mp;
5760	int resource;
5761{
5762	struct thread *td = curthread;
5763	struct ufsmount *ump;
5764
5765	mtx_assert(&lk, MA_OWNED);
5766	/*
5767	 * We never hold up the filesystem syncer or buf daemon.
5768	 */
5769	if (td->td_pflags & (TDP_SOFTDEP|TDP_NORUNNINGBUF))
5770		return (0);
5771	ump = VFSTOUFS(mp);
5772	/*
5773	 * First check to see if the work list has gotten backlogged.
5774	 * If it has, co-opt this process to help clean up two entries.
5775	 * Because this process may hold inodes locked, we cannot
5776	 * handle any remove requests that might block on a locked
5777	 * inode as that could lead to deadlock.  We set TDP_SOFTDEP
5778	 * to avoid recursively processing the worklist.
5779	 */
5780	if (ump->softdep_on_worklist > max_softdeps / 10) {
5781		td->td_pflags |= TDP_SOFTDEP;
5782		process_worklist_item(mp, LK_NOWAIT);
5783		process_worklist_item(mp, LK_NOWAIT);
5784		td->td_pflags &= ~TDP_SOFTDEP;
5785		stat_worklist_push += 2;
5786		return(1);
5787	}
5788	/*
5789	 * Next, we attempt to speed up the syncer process. If that
5790	 * is successful, then we allow the process to continue.
5791	 */
5792	if (softdep_speedup() && resource != FLUSH_REMOVE_WAIT)
5793		return(0);
5794	/*
5795	 * If we are resource constrained on inode dependencies, try
5796	 * flushing some dirty inodes. Otherwise, we are constrained
5797	 * by file deletions, so try accelerating flushes of directories
5798	 * with removal dependencies. We would like to do the cleanup
5799	 * here, but we probably hold an inode locked at this point and
5800	 * that might deadlock against one that we try to clean. So,
5801	 * the best that we can do is request the syncer daemon to do
5802	 * the cleanup for us.
5803	 */
5804	switch (resource) {
5805
5806	case FLUSH_INODES:
5807		stat_ino_limit_push += 1;
5808		req_clear_inodedeps += 1;
5809		stat_countp = &stat_ino_limit_hit;
5810		break;
5811
5812	case FLUSH_REMOVE:
5813	case FLUSH_REMOVE_WAIT:
5814		stat_blk_limit_push += 1;
5815		req_clear_remove += 1;
5816		stat_countp = &stat_blk_limit_hit;
5817		break;
5818
5819	default:
5820		panic("request_cleanup: unknown type");
5821	}
5822	/*
5823	 * Hopefully the syncer daemon will catch up and awaken us.
5824	 * We wait at most tickdelay before proceeding in any case.
5825	 */
5826	proc_waiting += 1;
5827	if (handle.callout == NULL)
5828		handle = timeout(pause_timer, 0, tickdelay > 2 ? tickdelay : 2);
5829	msleep((caddr_t)&proc_waiting, &lk, PPAUSE, "softupdate", 0);
5830	proc_waiting -= 1;
5831	return (1);
5832}
5833
5834/*
5835 * Awaken processes pausing in request_cleanup and clear proc_waiting
5836 * to indicate that there is no longer a timer running.
5837 */
5838static void
5839pause_timer(arg)
5840	void *arg;
5841{
5842
5843	ACQUIRE_LOCK(&lk);
5844	*stat_countp += 1;
5845	wakeup_one(&proc_waiting);
5846	if (proc_waiting > 0)
5847		handle = timeout(pause_timer, 0, tickdelay > 2 ? tickdelay : 2);
5848	else
5849		handle.callout = NULL;
5850	FREE_LOCK(&lk);
5851}
5852
5853/*
5854 * Flush out a directory with at least one removal dependency in an effort to
5855 * reduce the number of dirrem, freefile, and freeblks dependency structures.
5856 */
5857static void
5858clear_remove(td)
5859	struct thread *td;
5860{
5861	struct pagedep_hashhead *pagedephd;
5862	struct pagedep *pagedep;
5863	static int next = 0;
5864	struct mount *mp;
5865	struct vnode *vp;
5866	int error, cnt;
5867	ino_t ino;
5868
5869	mtx_assert(&lk, MA_OWNED);
5870
5871	for (cnt = 0; cnt < pagedep_hash; cnt++) {
5872		pagedephd = &pagedep_hashtbl[next++];
5873		if (next >= pagedep_hash)
5874			next = 0;
5875		LIST_FOREACH(pagedep, pagedephd, pd_hash) {
5876			if (LIST_FIRST(&pagedep->pd_dirremhd) == NULL)
5877				continue;
5878			mp = pagedep->pd_list.wk_mp;
5879			ino = pagedep->pd_ino;
5880			if (vn_start_write(NULL, &mp, V_NOWAIT) != 0)
5881				continue;
5882			FREE_LOCK(&lk);
5883			if ((error = ffs_vget(mp, ino, LK_EXCLUSIVE, &vp))) {
5884				softdep_error("clear_remove: vget", error);
5885				vn_finished_write(mp);
5886				ACQUIRE_LOCK(&lk);
5887				return;
5888			}
5889			if ((error = ffs_syncvnode(vp, MNT_NOWAIT)))
5890				softdep_error("clear_remove: fsync", error);
5891			VI_LOCK(vp);
5892			drain_output(vp);
5893			VI_UNLOCK(vp);
5894			vput(vp);
5895			vn_finished_write(mp);
5896			ACQUIRE_LOCK(&lk);
5897			return;
5898		}
5899	}
5900}
5901
5902/*
5903 * Clear out a block of dirty inodes in an effort to reduce
5904 * the number of inodedep dependency structures.
5905 */
5906static void
5907clear_inodedeps(td)
5908	struct thread *td;
5909{
5910	struct inodedep_hashhead *inodedephd;
5911	struct inodedep *inodedep;
5912	static int next = 0;
5913	struct mount *mp;
5914	struct vnode *vp;
5915	struct fs *fs;
5916	int error, cnt;
5917	ino_t firstino, lastino, ino;
5918
5919	mtx_assert(&lk, MA_OWNED);
5920	/*
5921	 * Pick a random inode dependency to be cleared.
5922	 * We will then gather up all the inodes in its block
5923	 * that have dependencies and flush them out.
5924	 */
5925	for (cnt = 0; cnt < inodedep_hash; cnt++) {
5926		inodedephd = &inodedep_hashtbl[next++];
5927		if (next >= inodedep_hash)
5928			next = 0;
5929		if ((inodedep = LIST_FIRST(inodedephd)) != NULL)
5930			break;
5931	}
5932	if (inodedep == NULL)
5933		return;
5934	fs = inodedep->id_fs;
5935	mp = inodedep->id_list.wk_mp;
5936	/*
5937	 * Find the last inode in the block with dependencies.
5938	 */
5939	firstino = inodedep->id_ino & ~(INOPB(fs) - 1);
5940	for (lastino = firstino + INOPB(fs) - 1; lastino > firstino; lastino--)
5941		if (inodedep_lookup(mp, lastino, 0, &inodedep) != 0)
5942			break;
5943	/*
5944	 * Asynchronously push all but the last inode with dependencies.
5945	 * Synchronously push the last inode with dependencies to ensure
5946	 * that the inode block gets written to free up the inodedeps.
5947	 */
5948	for (ino = firstino; ino <= lastino; ino++) {
5949		if (inodedep_lookup(mp, ino, 0, &inodedep) == 0)
5950			continue;
5951		if (vn_start_write(NULL, &mp, V_NOWAIT) != 0)
5952			continue;
5953		FREE_LOCK(&lk);
5954		if ((error = ffs_vget(mp, ino, LK_EXCLUSIVE, &vp)) != 0) {
5955			softdep_error("clear_inodedeps: vget", error);
5956			vn_finished_write(mp);
5957			ACQUIRE_LOCK(&lk);
5958			return;
5959		}
5960		if (ino == lastino) {
5961			if ((error = ffs_syncvnode(vp, MNT_WAIT)))
5962				softdep_error("clear_inodedeps: fsync1", error);
5963		} else {
5964			if ((error = ffs_syncvnode(vp, MNT_NOWAIT)))
5965				softdep_error("clear_inodedeps: fsync2", error);
5966			VI_LOCK(vp);
5967			drain_output(vp);
5968			VI_UNLOCK(vp);
5969		}
5970		vput(vp);
5971		vn_finished_write(mp);
5972		ACQUIRE_LOCK(&lk);
5973	}
5974}
5975
5976/*
5977 * Function to determine if the buffer has outstanding dependencies
5978 * that will cause a roll-back if the buffer is written. If wantcount
5979 * is set, return number of dependencies, otherwise just yes or no.
5980 */
5981static int
5982softdep_count_dependencies(bp, wantcount)
5983	struct buf *bp;
5984	int wantcount;
5985{
5986	struct worklist *wk;
5987	struct inodedep *inodedep;
5988	struct indirdep *indirdep;
5989	struct allocindir *aip;
5990	struct pagedep *pagedep;
5991	struct diradd *dap;
5992	int i, retval;
5993
5994	retval = 0;
5995	ACQUIRE_LOCK(&lk);
5996	LIST_FOREACH(wk, &bp->b_dep, wk_list) {
5997		switch (wk->wk_type) {
5998
5999		case D_INODEDEP:
6000			inodedep = WK_INODEDEP(wk);
6001			if ((inodedep->id_state & DEPCOMPLETE) == 0) {
6002				/* bitmap allocation dependency */
6003				retval += 1;
6004				if (!wantcount)
6005					goto out;
6006			}
6007			if (TAILQ_FIRST(&inodedep->id_inoupdt)) {
6008				/* direct block pointer dependency */
6009				retval += 1;
6010				if (!wantcount)
6011					goto out;
6012			}
6013			if (TAILQ_FIRST(&inodedep->id_extupdt)) {
6014				/* direct block pointer dependency */
6015				retval += 1;
6016				if (!wantcount)
6017					goto out;
6018			}
6019			continue;
6020
6021		case D_INDIRDEP:
6022			indirdep = WK_INDIRDEP(wk);
6023
6024			LIST_FOREACH(aip, &indirdep->ir_deplisthd, ai_next) {
6025				/* indirect block pointer dependency */
6026				retval += 1;
6027				if (!wantcount)
6028					goto out;
6029			}
6030			continue;
6031
6032		case D_PAGEDEP:
6033			pagedep = WK_PAGEDEP(wk);
6034			for (i = 0; i < DAHASHSZ; i++) {
6035
6036				LIST_FOREACH(dap, &pagedep->pd_diraddhd[i], da_pdlist) {
6037					/* directory entry dependency */
6038					retval += 1;
6039					if (!wantcount)
6040						goto out;
6041				}
6042			}
6043			continue;
6044
6045		case D_BMSAFEMAP:
6046		case D_ALLOCDIRECT:
6047		case D_ALLOCINDIR:
6048		case D_MKDIR:
6049			/* never a dependency on these blocks */
6050			continue;
6051
6052		default:
6053			panic("softdep_check_for_rollback: Unexpected type %s",
6054			    TYPENAME(wk->wk_type));
6055			/* NOTREACHED */
6056		}
6057	}
6058out:
6059	FREE_LOCK(&lk);
6060	return retval;
6061}
6062
6063/*
6064 * Acquire exclusive access to a buffer.
6065 * Must be called with a locked mtx parameter.
6066 * Return acquired buffer or NULL on failure.
6067 */
6068static struct buf *
6069getdirtybuf(bp, mtx, waitfor)
6070	struct buf *bp;
6071	struct mtx *mtx;
6072	int waitfor;
6073{
6074	int error;
6075
6076	mtx_assert(mtx, MA_OWNED);
6077	if (BUF_LOCK(bp, LK_EXCLUSIVE | LK_NOWAIT, NULL) != 0) {
6078		if (waitfor != MNT_WAIT)
6079			return (NULL);
6080		error = BUF_LOCK(bp,
6081		    LK_EXCLUSIVE | LK_SLEEPFAIL | LK_INTERLOCK, mtx);
6082		/*
6083		 * Even if we sucessfully acquire bp here, we have dropped
6084		 * mtx, which may violates our guarantee.
6085		 */
6086		if (error == 0)
6087			BUF_UNLOCK(bp);
6088		else if (error != ENOLCK)
6089			panic("getdirtybuf: inconsistent lock: %d", error);
6090		mtx_lock(mtx);
6091		return (NULL);
6092	}
6093	if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
6094		if (mtx == &lk && waitfor == MNT_WAIT) {
6095			mtx_unlock(mtx);
6096			BO_LOCK(bp->b_bufobj);
6097			BUF_UNLOCK(bp);
6098			if ((bp->b_vflags & BV_BKGRDINPROG) != 0) {
6099				bp->b_vflags |= BV_BKGRDWAIT;
6100				msleep(&bp->b_xflags, BO_MTX(bp->b_bufobj),
6101				       PRIBIO | PDROP, "getbuf", 0);
6102			} else
6103				BO_UNLOCK(bp->b_bufobj);
6104			mtx_lock(mtx);
6105			return (NULL);
6106		}
6107		BUF_UNLOCK(bp);
6108		if (waitfor != MNT_WAIT)
6109			return (NULL);
6110		/*
6111		 * The mtx argument must be bp->b_vp's mutex in
6112		 * this case.
6113		 */
6114#ifdef	DEBUG_VFS_LOCKS
6115		if (bp->b_vp->v_type != VCHR)
6116			ASSERT_VI_LOCKED(bp->b_vp, "getdirtybuf");
6117#endif
6118		bp->b_vflags |= BV_BKGRDWAIT;
6119		msleep(&bp->b_xflags, mtx, PRIBIO, "getbuf", 0);
6120		return (NULL);
6121	}
6122	if ((bp->b_flags & B_DELWRI) == 0) {
6123		BUF_UNLOCK(bp);
6124		return (NULL);
6125	}
6126	bremfree(bp);
6127	return (bp);
6128}
6129
6130
6131/*
6132 * Check if it is safe to suspend the file system now.  On entry,
6133 * the vnode interlock for devvp should be held.  Return 0 with
6134 * the mount interlock held if the file system can be suspended now,
6135 * otherwise return EAGAIN with the mount interlock held.
6136 */
6137int
6138softdep_check_suspend(struct mount *mp,
6139		      struct vnode *devvp,
6140		      int softdep_deps,
6141		      int softdep_accdeps,
6142		      int secondary_writes,
6143		      int secondary_accwrites)
6144{
6145	struct bufobj *bo;
6146	struct ufsmount *ump;
6147	int error;
6148
6149	ASSERT_VI_LOCKED(devvp, "softdep_check_suspend");
6150	ump = VFSTOUFS(mp);
6151	bo = &devvp->v_bufobj;
6152
6153	for (;;) {
6154		if (!TRY_ACQUIRE_LOCK(&lk)) {
6155			VI_UNLOCK(devvp);
6156			ACQUIRE_LOCK(&lk);
6157			FREE_LOCK(&lk);
6158			VI_LOCK(devvp);
6159			continue;
6160		}
6161		if (!MNT_ITRYLOCK(mp)) {
6162			FREE_LOCK(&lk);
6163			VI_UNLOCK(devvp);
6164			MNT_ILOCK(mp);
6165			MNT_IUNLOCK(mp);
6166			VI_LOCK(devvp);
6167			continue;
6168		}
6169		if (mp->mnt_secondary_writes != 0) {
6170			FREE_LOCK(&lk);
6171			VI_UNLOCK(devvp);
6172			msleep(&mp->mnt_secondary_writes,
6173			       MNT_MTX(mp),
6174			       (PUSER - 1) | PDROP, "secwr", 0);
6175			VI_LOCK(devvp);
6176			continue;
6177		}
6178		break;
6179	}
6180
6181	/*
6182	 * Reasons for needing more work before suspend:
6183	 * - Dirty buffers on devvp.
6184	 * - Softdep activity occurred after start of vnode sync loop
6185	 * - Secondary writes occurred after start of vnode sync loop
6186	 */
6187	error = 0;
6188	if (bo->bo_numoutput > 0 ||
6189	    bo->bo_dirty.bv_cnt > 0 ||
6190	    softdep_deps != 0 ||
6191	    ump->softdep_deps != 0 ||
6192	    softdep_accdeps != ump->softdep_accdeps ||
6193	    secondary_writes != 0 ||
6194	    mp->mnt_secondary_writes != 0 ||
6195	    secondary_accwrites != mp->mnt_secondary_accwrites)
6196		error = EAGAIN;
6197	FREE_LOCK(&lk);
6198	VI_UNLOCK(devvp);
6199	return (error);
6200}
6201
6202
6203/*
6204 * Get the number of dependency structures for the file system, both
6205 * the current number and the total number allocated.  These will
6206 * later be used to detect that softdep processing has occurred.
6207 */
6208void
6209softdep_get_depcounts(struct mount *mp,
6210		      int *softdep_depsp,
6211		      int *softdep_accdepsp)
6212{
6213	struct ufsmount *ump;
6214
6215	ump = VFSTOUFS(mp);
6216	ACQUIRE_LOCK(&lk);
6217	*softdep_depsp = ump->softdep_deps;
6218	*softdep_accdepsp = ump->softdep_accdeps;
6219	FREE_LOCK(&lk);
6220}
6221
6222/*
6223 * Wait for pending output on a vnode to complete.
6224 * Must be called with vnode lock and interlock locked.
6225 *
6226 * XXX: Should just be a call to bufobj_wwait().
6227 */
6228static void
6229drain_output(vp)
6230	struct vnode *vp;
6231{
6232	ASSERT_VOP_LOCKED(vp, "drain_output");
6233	ASSERT_VI_LOCKED(vp, "drain_output");
6234
6235	while (vp->v_bufobj.bo_numoutput) {
6236		vp->v_bufobj.bo_flag |= BO_WWAIT;
6237		msleep((caddr_t)&vp->v_bufobj.bo_numoutput,
6238		    VI_MTX(vp), PRIBIO + 1, "drainvp", 0);
6239	}
6240}
6241
6242/*
6243 * Called whenever a buffer that is being invalidated or reallocated
6244 * contains dependencies. This should only happen if an I/O error has
6245 * occurred. The routine is called with the buffer locked.
6246 */
6247static void
6248softdep_deallocate_dependencies(bp)
6249	struct buf *bp;
6250{
6251
6252	if ((bp->b_ioflags & BIO_ERROR) == 0)
6253		panic("softdep_deallocate_dependencies: dangling deps");
6254	softdep_error(bp->b_vp->v_mount->mnt_stat.f_mntonname, bp->b_error);
6255	panic("softdep_deallocate_dependencies: unrecovered I/O error");
6256}
6257
6258/*
6259 * Function to handle asynchronous write errors in the filesystem.
6260 */
6261static void
6262softdep_error(func, error)
6263	char *func;
6264	int error;
6265{
6266
6267	/* XXX should do something better! */
6268	printf("%s: got error %d while accessing filesystem\n", func, error);
6269}
6270
6271#endif /* SOFTUPDATES */
6272