1/*
2 * Copyright (c) 1999-2008 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * Copyright (c) 1991, 1993, 1994
30 *	The Regents of the University of California.  All rights reserved.
31 * (c) UNIX System Laboratories, Inc.
32 * All or some portions of this file are derived from material licensed
33 * to the University of California by American Telephone and Telegraph
34 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
35 * the permission of UNIX System Laboratories, Inc.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 *    notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 *    notice, this list of conditions and the following disclaimer in the
44 *    documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 *    must display the following acknowledgement:
47 *	This product includes software developed by the University of
48 *	California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 *    may be used to endorse or promote products derived from this software
51 *    without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 *      hfs_vfsops.c
66 *  derived from	@(#)ufs_vfsops.c	8.8 (Berkeley) 5/20/95
67 *
68 *      (c) Copyright 1997-2002 Apple Computer, Inc. All rights reserved.
69 *
70 *      hfs_vfsops.c -- VFS layer for loadable HFS file system.
71 *
72 */
73#include <sys/param.h>
74#include <sys/systm.h>
75#include <sys/kauth.h>
76
77#include <sys/ubc.h>
78#include <sys/ubc_internal.h>
79#include <sys/vnode_internal.h>
80#include <sys/mount_internal.h>
81#include <sys/sysctl.h>
82#include <sys/malloc.h>
83#include <sys/stat.h>
84#include <sys/quota.h>
85#include <sys/disk.h>
86#include <sys/paths.h>
87#include <sys/utfconv.h>
88#include <sys/kdebug.h>
89#include <sys/fslog.h>
90
91#include <kern/locks.h>
92
93#include <vfs/vfs_journal.h>
94
95#include <miscfs/specfs/specdev.h>
96#include <hfs/hfs_mount.h>
97
98#include "hfs.h"
99#include "hfs_catalog.h"
100#include "hfs_cnode.h"
101#include "hfs_dbg.h"
102#include "hfs_endian.h"
103#include "hfs_hotfiles.h"
104#include "hfs_quota.h"
105
106#include "hfscommon/headers/FileMgrInternal.h"
107#include "hfscommon/headers/BTreesInternal.h"
108
109#if	HFS_DIAGNOSTIC
110int hfs_dbg_all = 0;
111int hfs_dbg_err = 0;
112#endif
113
114
115lck_grp_attr_t *  hfs_group_attr;
116lck_attr_t *  hfs_lock_attr;
117lck_grp_t *  hfs_mutex_group;
118lck_grp_t *  hfs_rwlock_group;
119
120extern struct vnodeopv_desc hfs_vnodeop_opv_desc;
121/* not static so we can re-use in hfs_readwrite.c for build_path */
122int hfs_vfs_vget(struct mount *mp, ino64_t ino, struct vnode **vpp, vfs_context_t context);
123
124
125static int hfs_changefs(struct mount *mp, struct hfs_mount_args *args);
126static int hfs_fhtovp(struct mount *mp, int fhlen, unsigned char *fhp, struct vnode **vpp, vfs_context_t context);
127static int hfs_flushfiles(struct mount *, int, struct proc *);
128static int hfs_flushMDB(struct hfsmount *hfsmp, int waitfor, int altflush);
129static int hfs_getmountpoint(struct vnode *vp, struct hfsmount **hfsmpp);
130static int hfs_init(struct vfsconf *vfsp);
131static int hfs_mount(struct mount *mp, vnode_t  devvp, user_addr_t data, vfs_context_t context);
132static int hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, int journal_replay_only, vfs_context_t context);
133static int hfs_reload(struct mount *mp);
134static int hfs_vfs_root(struct mount *mp, struct vnode **vpp, vfs_context_t context);
135static int hfs_quotactl(struct mount *, int, uid_t, caddr_t, vfs_context_t context);
136static int hfs_start(struct mount *mp, int flags, vfs_context_t context);
137static int hfs_statfs(struct mount *mp, register struct vfsstatfs *sbp, vfs_context_t context);
138static int hfs_sync(struct mount *mp, int waitfor, vfs_context_t context);
139static int hfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
140                      user_addr_t newp, size_t newlen, vfs_context_t context);
141static int hfs_unmount(struct mount *mp, int mntflags, vfs_context_t context);
142static int hfs_vptofh(struct vnode *vp, int *fhlenp, unsigned char *fhp, vfs_context_t context);
143
144static int hfs_reclaimspace(struct hfsmount *hfsmp, u_long startblk, u_long reclaimblks, vfs_context_t context);
145static int hfs_overlapped_overflow_extents(struct hfsmount *hfsmp, u_int32_t startblk,
146                                           u_int32_t catblks, u_int32_t fileID, int rsrcfork);
147static int hfs_journal_replay(const char *devnode, vfs_context_t context);
148
149
150/*
151 * Called by vfs_mountroot when mounting HFS Plus as root.
152 */
153
154__private_extern__
155int
156hfs_mountroot(mount_t mp, vnode_t rvp, vfs_context_t context)
157{
158	struct hfsmount *hfsmp;
159	ExtendedVCB *vcb;
160	struct vfsstatfs *vfsp;
161	int error;
162
163	hfs_chashinit_finish();
164
165	if ((error = hfs_mountfs(rvp, mp, NULL, 0, context)))
166		return (error);
167
168	/* Init hfsmp */
169	hfsmp = VFSTOHFS(mp);
170
171	hfsmp->hfs_uid = UNKNOWNUID;
172	hfsmp->hfs_gid = UNKNOWNGID;
173	hfsmp->hfs_dir_mask = (S_IRWXU | S_IRGRP|S_IXGRP | S_IROTH|S_IXOTH); /* 0755 */
174	hfsmp->hfs_file_mask = (S_IRWXU | S_IRGRP|S_IXGRP | S_IROTH|S_IXOTH); /* 0755 */
175
176	/* Establish the free block reserve. */
177	vcb = HFSTOVCB(hfsmp);
178	vcb->reserveBlocks = ((u_int64_t)vcb->totalBlocks * HFS_MINFREE) / 100;
179	vcb->reserveBlocks = MIN(vcb->reserveBlocks, HFS_MAXRESERVE / vcb->blockSize);
180
181	vfsp = vfs_statfs(mp);
182	(void)hfs_statfs(mp, vfsp, NULL);
183
184	return (0);
185}
186
187
188/*
189 * VFS Operations.
190 *
191 * mount system call
192 */
193
194static int
195hfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data, vfs_context_t context)
196{
197	struct proc *p = vfs_context_proc(context);
198	struct hfsmount *hfsmp = NULL;
199	struct hfs_mount_args args;
200	int retval = E_NONE;
201	u_int32_t cmdflags;
202
203	if ((retval = copyin(data, (caddr_t)&args, sizeof(args)))) {
204		return (retval);
205	}
206	cmdflags = (u_int32_t)vfs_flags(mp) & MNT_CMDFLAGS;
207	if (cmdflags & MNT_UPDATE) {
208		hfsmp = VFSTOHFS(mp);
209
210		/* Reload incore data after an fsck. */
211		if (cmdflags & MNT_RELOAD) {
212			if (vfs_isrdonly(mp))
213				return hfs_reload(mp);
214			else
215				return (EINVAL);
216		}
217
218		/* Change to a read-only file system. */
219		if (((hfsmp->hfs_flags & HFS_READ_ONLY) == 0) &&
220		    vfs_isrdonly(mp)) {
221			int flags;
222
223			/* Set flag to indicate that a downgrade to read-only
224			 * is in progress and therefore block any further
225			 * modifications to the file system.
226			 */
227			hfs_global_exclusive_lock_acquire(hfsmp);
228			hfsmp->hfs_flags |= HFS_RDONLY_DOWNGRADE;
229			hfsmp->hfs_downgrading_proc = current_thread();
230			hfs_global_exclusive_lock_release(hfsmp);
231
232			/* use VFS_SYNC to push out System (btree) files */
233			retval = VFS_SYNC(mp, MNT_WAIT, context);
234			if (retval && ((cmdflags & MNT_FORCE) == 0)) {
235				hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE;
236				hfsmp->hfs_downgrading_proc = NULL;
237				goto out;
238			}
239
240			flags = WRITECLOSE;
241			if (cmdflags & MNT_FORCE)
242				flags |= FORCECLOSE;
243
244			if ((retval = hfs_flushfiles(mp, flags, p))) {
245				hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE;
246				hfsmp->hfs_downgrading_proc = NULL;
247				goto out;
248			}
249
250			/* mark the volume cleanly unmounted */
251			hfsmp->vcbAtrb |= kHFSVolumeUnmountedMask;
252			retval = hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0);
253			hfsmp->hfs_flags |= HFS_READ_ONLY;
254
255			/* also get the volume bitmap blocks */
256			if (!retval) {
257				if (vnode_mount(hfsmp->hfs_devvp) == mp) {
258					retval = hfs_fsync(hfsmp->hfs_devvp, MNT_WAIT, 0, p);
259				} else {
260					vnode_get(hfsmp->hfs_devvp);
261					retval = VNOP_FSYNC(hfsmp->hfs_devvp, MNT_WAIT, context);
262					vnode_put(hfsmp->hfs_devvp);
263				}
264			}
265			if (retval) {
266				hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE;
267				hfsmp->hfs_downgrading_proc = NULL;
268				hfsmp->hfs_flags &= ~HFS_READ_ONLY;
269				goto out;
270			}
271			if (hfsmp->jnl) {
272			    hfs_global_exclusive_lock_acquire(hfsmp);
273
274			    journal_close(hfsmp->jnl);
275			    hfsmp->jnl = NULL;
276
277			    // Note: we explicitly don't want to shutdown
278			    //       access to the jvp because we may need
279			    //       it later if we go back to being read-write.
280
281			    hfs_global_exclusive_lock_release(hfsmp);
282			}
283
284			hfsmp->hfs_downgrading_proc = NULL;
285		}
286
287		/* Change to a writable file system. */
288		if (vfs_iswriteupgrade(mp)) {
289
290			/*
291			 * On inconsistent disks, do not allow read-write mount
292			 * unless it is the boot volume being mounted.
293			 */
294			if (!(vfs_flags(mp) & MNT_ROOTFS) &&
295					(hfsmp->vcbAtrb & kHFSVolumeInconsistentMask)) {
296				retval = EINVAL;
297				goto out;
298			}
299
300			// If the journal was shut-down previously because we were
301			// asked to be read-only, let's start it back up again now
302
303			if (   (HFSTOVCB(hfsmp)->vcbAtrb & kHFSVolumeJournaledMask)
304			    && hfsmp->jnl == NULL
305			    && hfsmp->jvp != NULL) {
306			    int jflags;
307
308			    if (hfsmp->hfs_flags & HFS_NEED_JNL_RESET) {
309					jflags = JOURNAL_RESET;
310			    } else {
311					jflags = 0;
312			    }
313
314			    hfs_global_exclusive_lock_acquire(hfsmp);
315
316			    hfsmp->jnl = journal_open(hfsmp->jvp,
317						      (hfsmp->jnl_start * HFSTOVCB(hfsmp)->blockSize) + (off_t)HFSTOVCB(hfsmp)->hfsPlusIOPosOffset,
318						      hfsmp->jnl_size,
319						      hfsmp->hfs_devvp,
320						      hfsmp->hfs_logical_block_size,
321						      jflags,
322						      0,
323						      hfs_sync_metadata, hfsmp->hfs_mp);
324
325			    hfs_global_exclusive_lock_release(hfsmp);
326
327			    if (hfsmp->jnl == NULL) {
328				retval = EINVAL;
329				goto out;
330			    } else {
331				hfsmp->hfs_flags &= ~HFS_NEED_JNL_RESET;
332			    }
333
334			}
335
336			/* Only clear HFS_READ_ONLY after a successfull write */
337			hfsmp->hfs_flags &= ~HFS_READ_ONLY;
338
339			/* If this mount point was downgraded from read-write
340			 * to read-only, clear that information as we are now
341			 * moving back to read-write.
342			 */
343			hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE;
344			hfsmp->hfs_downgrading_proc = NULL;
345
346			/* mark the volume dirty (clear clean unmount bit) */
347			hfsmp->vcbAtrb &= ~kHFSVolumeUnmountedMask;
348
349			retval = hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0);
350			if (retval != E_NONE)
351				goto out;
352
353			if (!(hfsmp->hfs_flags & (HFS_READ_ONLY | HFS_STANDARD))) {
354				/* Setup private/hidden directories for hardlinks. */
355				hfs_privatedir_init(hfsmp, FILE_HARDLINKS);
356				hfs_privatedir_init(hfsmp, DIR_HARDLINKS);
357
358				hfs_remove_orphans(hfsmp);
359
360				/*
361				 * Allow hot file clustering if conditions allow.
362				 */
363				if (hfsmp->hfs_flags & HFS_METADATA_ZONE) {
364					(void) hfs_recording_init(hfsmp);
365				}
366				/* Force ACLs on HFS+ file systems. */
367				if (vfs_extendedsecurity(HFSTOVFS(hfsmp)) == 0) {
368					vfs_setextendedsecurity(HFSTOVFS(hfsmp));
369				}
370			}
371		}
372
373		/* Update file system parameters. */
374		retval = hfs_changefs(mp, &args);
375
376	} else /* not an update request */ {
377
378		/* Set the mount flag to indicate that we support volfs  */
379		vfs_setflags(mp, (u_int64_t)((unsigned int)MNT_DOVOLFS));
380
381		hfs_chashinit_finish();
382
383		retval = hfs_mountfs(devvp, mp, &args, 0, context);
384	}
385out:
386	if (retval == 0) {
387		(void)hfs_statfs(mp, vfs_statfs(mp), context);
388	}
389	return (retval);
390}
391
392
393struct hfs_changefs_cargs {
394	struct hfsmount *hfsmp;
395        int		namefix;
396        int		permfix;
397        int		permswitch;
398};
399
400static int
401hfs_changefs_callback(struct vnode *vp, void *cargs)
402{
403	ExtendedVCB *vcb;
404	struct cnode *cp;
405	struct cat_desc cndesc;
406	struct cat_attr cnattr;
407	struct hfs_changefs_cargs *args;
408	int lockflags;
409	int error;
410
411	args = (struct hfs_changefs_cargs *)cargs;
412
413	cp = VTOC(vp);
414	vcb = HFSTOVCB(args->hfsmp);
415
416	lockflags = hfs_systemfile_lock(args->hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
417	error = cat_lookup(args->hfsmp, &cp->c_desc, 0, &cndesc, &cnattr, NULL, NULL);
418	hfs_systemfile_unlock(args->hfsmp, lockflags);
419	if (error) {
420	        /*
421		 * If we couldn't find this guy skip to the next one
422		 */
423	        if (args->namefix)
424		        cache_purge(vp);
425
426		return (VNODE_RETURNED);
427	}
428	/*
429	 * Get the real uid/gid and perm mask from disk.
430	 */
431	if (args->permswitch || args->permfix) {
432	        cp->c_uid = cnattr.ca_uid;
433		cp->c_gid = cnattr.ca_gid;
434		cp->c_mode = cnattr.ca_mode;
435	}
436	/*
437	 * If we're switching name converters then...
438	 *   Remove the existing entry from the namei cache.
439	 *   Update name to one based on new encoder.
440	 */
441	if (args->namefix) {
442	        cache_purge(vp);
443		replace_desc(cp, &cndesc);
444
445		if (cndesc.cd_cnid == kHFSRootFolderID) {
446		        strlcpy((char *)vcb->vcbVN, (const char *)cp->c_desc.cd_nameptr, NAME_MAX+1);
447			cp->c_desc.cd_encoding = args->hfsmp->hfs_encoding;
448		}
449	} else {
450	        cat_releasedesc(&cndesc);
451	}
452	return (VNODE_RETURNED);
453}
454
455/* Change fs mount parameters */
456static int
457hfs_changefs(struct mount *mp, struct hfs_mount_args *args)
458{
459	int retval = 0;
460	int namefix, permfix, permswitch;
461	struct hfsmount *hfsmp;
462	ExtendedVCB *vcb;
463	hfs_to_unicode_func_t	get_unicode_func;
464	unicode_to_hfs_func_t	get_hfsname_func;
465	u_long old_encoding = 0;
466	struct hfs_changefs_cargs cargs;
467	u_int32_t mount_flags;
468
469	hfsmp = VFSTOHFS(mp);
470	vcb = HFSTOVCB(hfsmp);
471	mount_flags = (unsigned int)vfs_flags(mp);
472
473	hfsmp->hfs_flags |= HFS_IN_CHANGEFS;
474
475	permswitch = (((hfsmp->hfs_flags & HFS_UNKNOWN_PERMS) &&
476	               ((mount_flags & MNT_UNKNOWNPERMISSIONS) == 0)) ||
477	              (((hfsmp->hfs_flags & HFS_UNKNOWN_PERMS) == 0) &&
478	               (mount_flags & MNT_UNKNOWNPERMISSIONS)));
479
480	/* The root filesystem must operate with actual permissions: */
481	if (permswitch && (mount_flags & MNT_ROOTFS) && (mount_flags & MNT_UNKNOWNPERMISSIONS)) {
482		vfs_clearflags(mp, (u_int64_t)((unsigned int)MNT_UNKNOWNPERMISSIONS));	/* Just say "No". */
483		retval = EINVAL;
484		goto exit;
485	}
486	if (mount_flags & MNT_UNKNOWNPERMISSIONS)
487		hfsmp->hfs_flags |= HFS_UNKNOWN_PERMS;
488	else
489		hfsmp->hfs_flags &= ~HFS_UNKNOWN_PERMS;
490
491	namefix = permfix = 0;
492
493	/*
494	 * Tracking of hot files requires up-to-date access times.  So if
495	 * access time updates are disabled, we must also disable hot files.
496	 */
497	if (mount_flags & MNT_NOATIME) {
498		(void) hfs_recording_suspend(hfsmp);
499	}
500
501	/* Change the timezone (Note: this affects all hfs volumes and hfs+ volume create dates) */
502	if (args->hfs_timezone.tz_minuteswest != VNOVAL) {
503		gTimeZone = args->hfs_timezone;
504	}
505
506	/* Change the default uid, gid and/or mask */
507	if ((args->hfs_uid != (uid_t)VNOVAL) && (hfsmp->hfs_uid != args->hfs_uid)) {
508		hfsmp->hfs_uid = args->hfs_uid;
509		if (vcb->vcbSigWord == kHFSPlusSigWord)
510			++permfix;
511	}
512	if ((args->hfs_gid != (gid_t)VNOVAL) && (hfsmp->hfs_gid != args->hfs_gid)) {
513		hfsmp->hfs_gid = args->hfs_gid;
514		if (vcb->vcbSigWord == kHFSPlusSigWord)
515			++permfix;
516	}
517	if (args->hfs_mask != (mode_t)VNOVAL) {
518		if (hfsmp->hfs_dir_mask != (args->hfs_mask & ALLPERMS)) {
519			hfsmp->hfs_dir_mask = args->hfs_mask & ALLPERMS;
520			hfsmp->hfs_file_mask = args->hfs_mask & ALLPERMS;
521			if ((args->flags != VNOVAL) && (args->flags & HFSFSMNT_NOXONFILES))
522				hfsmp->hfs_file_mask = (args->hfs_mask & DEFFILEMODE);
523			if (vcb->vcbSigWord == kHFSPlusSigWord)
524				++permfix;
525		}
526	}
527
528	/* Change the hfs encoding value (hfs only) */
529	if ((vcb->vcbSigWord == kHFSSigWord)	&&
530	    (args->hfs_encoding != (u_long)VNOVAL)              &&
531	    (hfsmp->hfs_encoding != args->hfs_encoding)) {
532
533		retval = hfs_getconverter(args->hfs_encoding, &get_unicode_func, &get_hfsname_func);
534		if (retval)
535			goto exit;
536
537		/*
538		 * Connect the new hfs_get_unicode converter but leave
539		 * the old hfs_get_hfsname converter in place so that
540		 * we can lookup existing vnodes to get their correctly
541		 * encoded names.
542		 *
543		 * When we're all finished, we can then connect the new
544		 * hfs_get_hfsname converter and release our interest
545		 * in the old converters.
546		 */
547		hfsmp->hfs_get_unicode = get_unicode_func;
548		old_encoding = hfsmp->hfs_encoding;
549		hfsmp->hfs_encoding = args->hfs_encoding;
550		++namefix;
551	}
552
553	if (!(namefix || permfix || permswitch))
554		goto exit;
555
556	/* XXX 3762912 hack to support HFS filesystem 'owner' */
557	if (permfix)
558		vfs_setowner(mp,
559		    hfsmp->hfs_uid == UNKNOWNUID ? KAUTH_UID_NONE : hfsmp->hfs_uid,
560		    hfsmp->hfs_gid == UNKNOWNGID ? KAUTH_GID_NONE : hfsmp->hfs_gid);
561
562	/*
563	 * For each active vnode fix things that changed
564	 *
565	 * Note that we can visit a vnode more than once
566	 * and we can race with fsync.
567	 *
568	 * hfs_changefs_callback will be called for each vnode
569	 * hung off of this mount point
570	 *
571	 * The vnode will be properly referenced and unreferenced
572	 * around the callback
573	 */
574	cargs.hfsmp = hfsmp;
575	cargs.namefix = namefix;
576	cargs.permfix = permfix;
577	cargs.permswitch = permswitch;
578
579	vnode_iterate(mp, 0, hfs_changefs_callback, (void *)&cargs);
580
581	/*
582	 * If we're switching name converters we can now
583	 * connect the new hfs_get_hfsname converter and
584	 * release our interest in the old converters.
585	 */
586	if (namefix) {
587		hfsmp->hfs_get_hfsname = get_hfsname_func;
588		vcb->volumeNameEncodingHint = args->hfs_encoding;
589		(void) hfs_relconverter(old_encoding);
590	}
591exit:
592	hfsmp->hfs_flags &= ~HFS_IN_CHANGEFS;
593	return (retval);
594}
595
596
597struct hfs_reload_cargs {
598	struct hfsmount *hfsmp;
599        int		error;
600};
601
602static int
603hfs_reload_callback(struct vnode *vp, void *cargs)
604{
605	struct cnode *cp;
606	struct hfs_reload_cargs *args;
607	int lockflags;
608
609	args = (struct hfs_reload_cargs *)cargs;
610	/*
611	 * flush all the buffers associated with this node
612	 */
613	(void) buf_invalidateblks(vp, 0, 0, 0);
614
615	cp = VTOC(vp);
616	/*
617	 * Remove any directory hints
618	 */
619	if (vnode_isdir(vp))
620	        hfs_reldirhints(cp, 0);
621
622	/*
623	 * Re-read cnode data for all active vnodes (non-metadata files).
624	 */
625	if (!vnode_issystem(vp) && !VNODE_IS_RSRC(vp)) {
626	        struct cat_fork *datafork;
627		struct cat_desc desc;
628
629		datafork = cp->c_datafork ? &cp->c_datafork->ff_data : NULL;
630
631		/* lookup by fileID since name could have changed */
632		lockflags = hfs_systemfile_lock(args->hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
633		args->error = cat_idlookup(args->hfsmp, cp->c_fileid, 0, &desc, &cp->c_attr, datafork);
634		hfs_systemfile_unlock(args->hfsmp, lockflags);
635		if (args->error) {
636		        return (VNODE_RETURNED_DONE);
637		}
638
639		/* update cnode's catalog descriptor */
640		(void) replace_desc(cp, &desc);
641	}
642	return (VNODE_RETURNED);
643}
644
645/*
646 * Reload all incore data for a filesystem (used after running fsck on
647 * the root filesystem and finding things to fix). The filesystem must
648 * be mounted read-only.
649 *
650 * Things to do to update the mount:
651 *	invalidate all cached meta-data.
652 *	invalidate all inactive vnodes.
653 *	invalidate all cached file data.
654 *	re-read volume header from disk.
655 *	re-load meta-file info (extents, file size).
656 *	re-load B-tree header data.
657 *	re-read cnode data for all active vnodes.
658 */
659static int
660hfs_reload(struct mount *mountp)
661{
662	register struct vnode *devvp;
663	struct buf *bp;
664	int error, i;
665	struct hfsmount *hfsmp;
666	struct HFSPlusVolumeHeader *vhp;
667	ExtendedVCB *vcb;
668	struct filefork *forkp;
669    	struct cat_desc cndesc;
670	struct hfs_reload_cargs args;
671	daddr64_t priIDSector;
672
673    	hfsmp = VFSTOHFS(mountp);
674	vcb = HFSTOVCB(hfsmp);
675
676	if (vcb->vcbSigWord == kHFSSigWord)
677		return (EINVAL);	/* rooting from HFS is not supported! */
678
679	/*
680	 * Invalidate all cached meta-data.
681	 */
682	devvp = hfsmp->hfs_devvp;
683	if (buf_invalidateblks(devvp, 0, 0, 0))
684		panic("hfs_reload: dirty1");
685
686	args.hfsmp = hfsmp;
687	args.error = 0;
688	/*
689	 * hfs_reload_callback will be called for each vnode
690	 * hung off of this mount point that can't be recycled...
691	 * vnode_iterate will recycle those that it can (the VNODE_RELOAD option)
692	 * the vnode will be in an 'unbusy' state (VNODE_WAIT) and
693	 * properly referenced and unreferenced around the callback
694	 */
695	vnode_iterate(mountp, VNODE_RELOAD | VNODE_WAIT, hfs_reload_callback, (void *)&args);
696
697	if (args.error)
698	        return (args.error);
699
700	/*
701	 * Re-read VolumeHeader from disk.
702	 */
703	priIDSector = (daddr64_t)((vcb->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) +
704			HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size));
705
706	error = (int)buf_meta_bread(hfsmp->hfs_devvp,
707			HFS_PHYSBLK_ROUNDDOWN(priIDSector, hfsmp->hfs_log_per_phys),
708			hfsmp->hfs_physical_block_size, NOCRED, &bp);
709	if (error) {
710        	if (bp != NULL)
711        		buf_brelse(bp);
712		return (error);
713	}
714
715	vhp = (HFSPlusVolumeHeader *) (buf_dataptr(bp) + HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size));
716
717	/* Do a quick sanity check */
718	if ((SWAP_BE16(vhp->signature) != kHFSPlusSigWord &&
719	     SWAP_BE16(vhp->signature) != kHFSXSigWord) ||
720	    (SWAP_BE16(vhp->version) != kHFSPlusVersion &&
721	     SWAP_BE16(vhp->version) != kHFSXVersion) ||
722	    SWAP_BE32(vhp->blockSize) != vcb->blockSize) {
723		buf_brelse(bp);
724		return (EIO);
725	}
726
727	vcb->vcbLsMod		= to_bsd_time(SWAP_BE32(vhp->modifyDate));
728	vcb->vcbAtrb		= SWAP_BE32 (vhp->attributes);
729	vcb->vcbJinfoBlock  = SWAP_BE32(vhp->journalInfoBlock);
730	vcb->vcbClpSiz		= SWAP_BE32 (vhp->rsrcClumpSize);
731	vcb->vcbNxtCNID		= SWAP_BE32 (vhp->nextCatalogID);
732	vcb->vcbVolBkUp		= to_bsd_time(SWAP_BE32(vhp->backupDate));
733	vcb->vcbWrCnt		= SWAP_BE32 (vhp->writeCount);
734	vcb->vcbFilCnt		= SWAP_BE32 (vhp->fileCount);
735	vcb->vcbDirCnt		= SWAP_BE32 (vhp->folderCount);
736	HFS_UPDATE_NEXT_ALLOCATION(vcb, SWAP_BE32 (vhp->nextAllocation));
737	vcb->totalBlocks	= SWAP_BE32 (vhp->totalBlocks);
738	vcb->freeBlocks		= SWAP_BE32 (vhp->freeBlocks);
739	vcb->encodingsBitmap	= SWAP_BE64 (vhp->encodingsBitmap);
740	bcopy(vhp->finderInfo, vcb->vcbFndrInfo, sizeof(vhp->finderInfo));
741	vcb->localCreateDate	= SWAP_BE32 (vhp->createDate); /* hfs+ create date is in local time */
742
743	/*
744	 * Re-load meta-file vnode data (extent info, file size, etc).
745	 */
746	forkp = VTOF((struct vnode *)vcb->extentsRefNum);
747	for (i = 0; i < kHFSPlusExtentDensity; i++) {
748		forkp->ff_extents[i].startBlock =
749			SWAP_BE32 (vhp->extentsFile.extents[i].startBlock);
750		forkp->ff_extents[i].blockCount =
751			SWAP_BE32 (vhp->extentsFile.extents[i].blockCount);
752	}
753	forkp->ff_size      = SWAP_BE64 (vhp->extentsFile.logicalSize);
754	forkp->ff_blocks    = SWAP_BE32 (vhp->extentsFile.totalBlocks);
755	forkp->ff_clumpsize = SWAP_BE32 (vhp->extentsFile.clumpSize);
756
757
758	forkp = VTOF((struct vnode *)vcb->catalogRefNum);
759	for (i = 0; i < kHFSPlusExtentDensity; i++) {
760		forkp->ff_extents[i].startBlock	=
761			SWAP_BE32 (vhp->catalogFile.extents[i].startBlock);
762		forkp->ff_extents[i].blockCount	=
763			SWAP_BE32 (vhp->catalogFile.extents[i].blockCount);
764	}
765	forkp->ff_size      = SWAP_BE64 (vhp->catalogFile.logicalSize);
766	forkp->ff_blocks    = SWAP_BE32 (vhp->catalogFile.totalBlocks);
767	forkp->ff_clumpsize = SWAP_BE32 (vhp->catalogFile.clumpSize);
768
769	if (hfsmp->hfs_attribute_vp) {
770		forkp = VTOF(hfsmp->hfs_attribute_vp);
771		for (i = 0; i < kHFSPlusExtentDensity; i++) {
772			forkp->ff_extents[i].startBlock	=
773				SWAP_BE32 (vhp->attributesFile.extents[i].startBlock);
774			forkp->ff_extents[i].blockCount	=
775				SWAP_BE32 (vhp->attributesFile.extents[i].blockCount);
776		}
777		forkp->ff_size      = SWAP_BE64 (vhp->attributesFile.logicalSize);
778		forkp->ff_blocks    = SWAP_BE32 (vhp->attributesFile.totalBlocks);
779		forkp->ff_clumpsize = SWAP_BE32 (vhp->attributesFile.clumpSize);
780	}
781
782	forkp = VTOF((struct vnode *)vcb->allocationsRefNum);
783	for (i = 0; i < kHFSPlusExtentDensity; i++) {
784		forkp->ff_extents[i].startBlock	=
785			SWAP_BE32 (vhp->allocationFile.extents[i].startBlock);
786		forkp->ff_extents[i].blockCount	=
787			SWAP_BE32 (vhp->allocationFile.extents[i].blockCount);
788	}
789	forkp->ff_size      = SWAP_BE64 (vhp->allocationFile.logicalSize);
790	forkp->ff_blocks    = SWAP_BE32 (vhp->allocationFile.totalBlocks);
791	forkp->ff_clumpsize = SWAP_BE32 (vhp->allocationFile.clumpSize);
792
793	buf_brelse(bp);
794	vhp = NULL;
795
796	/*
797	 * Re-load B-tree header data
798	 */
799	forkp = VTOF((struct vnode *)vcb->extentsRefNum);
800	if ( (error = MacToVFSError( BTReloadData((FCB*)forkp) )) )
801		return (error);
802
803	forkp = VTOF((struct vnode *)vcb->catalogRefNum);
804	if ( (error = MacToVFSError( BTReloadData((FCB*)forkp) )) )
805		return (error);
806
807	if (hfsmp->hfs_attribute_vp) {
808		forkp = VTOF(hfsmp->hfs_attribute_vp);
809		if ( (error = MacToVFSError( BTReloadData((FCB*)forkp) )) )
810			return (error);
811	}
812
813	/* Reload the volume name */
814	if ((error = cat_idlookup(hfsmp, kHFSRootFolderID, 0, &cndesc, NULL, NULL)))
815		return (error);
816	vcb->volumeNameEncodingHint = cndesc.cd_encoding;
817	bcopy(cndesc.cd_nameptr, vcb->vcbVN, min(255, cndesc.cd_namelen));
818	cat_releasedesc(&cndesc);
819
820	/* Re-establish private/hidden directories. */
821	hfs_privatedir_init(hfsmp, FILE_HARDLINKS);
822	hfs_privatedir_init(hfsmp, DIR_HARDLINKS);
823
824	/* In case any volume information changed to trigger a notification */
825	hfs_generate_volume_notifications(hfsmp);
826
827	return (0);
828}
829
830int hfs_last_io_wait_time = 125000;
831SYSCTL_INT (_kern, OID_AUTO, hfs_last_io_wait_time, CTLFLAG_RW, &hfs_last_io_wait_time, 0, "number of usecs to wait after an i/o before syncing ejectable media");
832
833static void
834hfs_syncer(void *arg0, void *unused)
835{
836#pragma unused(unused)
837
838    struct hfsmount *hfsmp = arg0;
839    uint32_t secs, usecs, delay = HFS_META_DELAY;
840    uint64_t now;
841    struct timeval nowtv, last_io;
842
843    clock_get_calendar_microtime(&secs, &usecs);
844    now = ((uint64_t)secs * 1000000LL) + usecs;
845    //
846    // If we have put off the last sync for more than
847    // 5 seconds, force it so that we don't let too
848    // much i/o queue up (since flushing the journal
849    // causes the i/o queue to drain)
850    //
851    if ((now - hfsmp->hfs_last_sync_time) >= 5000000LL) {
852	    goto doit;
853    }
854
855    //
856    // Find out when the last i/o was done to this device (read or write).
857    //
858    throttle_info_get_last_io_time(hfsmp->hfs_mp, &last_io);
859    microuptime(&nowtv);
860    timevalsub(&nowtv, &last_io);
861
862    //
863    // If the last i/o was too recent, defer this sync until later.
864    // The limit chosen (125 milli-seconds) was picked based on
865    // some experiments copying data to an SD card and seems to
866    // prevent us from issuing too many syncs.
867    //
868    if (nowtv.tv_sec >= 0 && nowtv.tv_usec > 0 && nowtv.tv_usec < hfs_last_io_wait_time) {
869	    delay /= 2;
870	    goto resched;
871    }
872
873    //
874    // If there's pending i/o, also skip the sync.
875    //
876    if (hfsmp->hfs_devvp && hfsmp->hfs_devvp->v_numoutput > 0) {
877	    goto resched;
878    }
879
880
881    //
882    // Only flush the journal if we have not sync'ed recently
883    // and the last sync request time was more than 100 milli
884    // seconds ago and there is no one in the middle of a
885    // transaction right now.  Else we defer the sync and
886    // reschedule it for later.
887    //
888    if (  ((now - hfsmp->hfs_last_sync_time) >= 100000LL)
889       && ((now - hfsmp->hfs_last_sync_request_time) >= 100000LL)
890       && (hfsmp->hfs_active_threads == 0)
891       && (hfsmp->hfs_global_lock_nesting == 0)) {
892
893    doit:
894	    OSAddAtomic(1, (SInt32 *)&hfsmp->hfs_active_threads);
895	    if (hfsmp->jnl) {
896		    journal_flush(hfsmp->jnl);
897	    }
898	    OSAddAtomic(-1, (SInt32 *)&hfsmp->hfs_active_threads);
899
900	    clock_get_calendar_microtime(&secs, &usecs);
901	    hfsmp->hfs_last_sync_time = ((int64_t)secs * 1000000) + usecs;
902
903    } else if (hfsmp->hfs_active_threads == 0) {
904	    uint64_t deadline;
905
906    resched:
907	    clock_interval_to_deadline(delay, HFS_MILLISEC_SCALE, &deadline);
908	    thread_call_enter_delayed(hfsmp->hfs_syncer, deadline);
909	    return;
910    }
911
912    //
913    // NOTE: we decrement these *after* we're done the journal_flush() since
914    // it can take a significant amount of time and so we don't want more
915    // callbacks scheduled until we're done this one.
916    //
917    OSDecrementAtomic((volatile SInt32 *)&hfsmp->hfs_sync_scheduled);
918    OSDecrementAtomic((volatile SInt32 *)&hfsmp->hfs_sync_incomplete);
919    wakeup((caddr_t)&hfsmp->hfs_sync_incomplete);
920}
921
922extern int IOBSDIsMediaEjectable( const char *cdev_name );
923
924/*
925 * Common code for mount and mountroot
926 */
927static int
928hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args,
929            int journal_replay_only, vfs_context_t context)
930{
931	struct proc *p = vfs_context_proc(context);
932	int retval = E_NONE;
933	struct hfsmount	*hfsmp;
934	struct buf *bp;
935	dev_t dev;
936	HFSMasterDirectoryBlock *mdbp;
937	int ronly;
938#if QUOTA
939	int i;
940#endif
941	int mntwrapper;
942	kauth_cred_t cred;
943	u_int64_t disksize;
944	daddr64_t log_blkcnt;
945	u_int32_t log_blksize;
946	u_int32_t phys_blksize;
947	u_int32_t minblksize;
948	u_int32_t iswritable;
949	daddr64_t mdb_offset;
950	int isvirtual = 0;
951	int isroot = 0;
952
953	ronly = vfs_isrdonly(mp);
954	dev = vnode_specrdev(devvp);
955	cred = p ? vfs_context_ucred(context) : NOCRED;
956	mntwrapper = 0;
957
958	if (args == NULL) {
959		/* only hfs_mountroot passes us NULL as the 'args' argument */
960		isroot = 1;
961	}
962
963	bp = NULL;
964	hfsmp = NULL;
965	mdbp = NULL;
966	minblksize = kHFSBlockSize;
967
968	/* Advisory locking should be handled at the VFS layer */
969	vfs_setlocklocal(mp);
970
971	/* Get the logical block size (treated as physical block size everywhere) */
972	if (VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE, (caddr_t)&log_blksize, 0, context)) {
973		retval = ENXIO;
974		goto error_exit;
975	}
976	/* Get the physical block size. */
977	retval = VNOP_IOCTL(devvp, DKIOCGETPHYSICALBLOCKSIZE, (caddr_t)&phys_blksize, 0, context);
978	if (retval) {
979		if ((retval != ENOTSUP) && (retval != ENOTTY)) {
980			retval = ENXIO;
981			goto error_exit;
982		}
983		/* If device does not support this ioctl, assume that physical
984		 * block size is same as logical block size
985		 */
986		phys_blksize = log_blksize;
987	}
988	/* Switch to 512 byte sectors (temporarily) */
989	if (log_blksize > 512) {
990		u_int32_t size512 = 512;
991
992		if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&size512, FWRITE, context)) {
993			retval = ENXIO;
994			goto error_exit;
995		}
996	}
997	/* Get the number of 512 byte physical blocks. */
998	if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) {
999		/* resetting block size may fail if getting block count did */
1000		(void)VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context);
1001
1002		retval = ENXIO;
1003		goto error_exit;
1004	}
1005	/* Compute an accurate disk size (i.e. within 512 bytes) */
1006	disksize = (u_int64_t)log_blkcnt * (u_int64_t)512;
1007
1008	/*
1009	 * On Tiger it is not necessary to switch the device
1010	 * block size to be 4k if there are more than 31-bits
1011	 * worth of blocks but to insure compatibility with
1012	 * pre-Tiger systems we have to do it.
1013	 *
1014	 * If the device size is not a multiple of 4K (8 * 512), then
1015	 * switching the logical block size isn't going to help because
1016	 * we will be unable to write the alternate volume header.
1017	 * In this case, just leave the logical block size unchanged.
1018	 */
1019	if (log_blkcnt > 0x000000007fffffff && (log_blkcnt & 7) == 0) {
1020		minblksize = log_blksize = 4096;
1021		if (phys_blksize < log_blksize)
1022			phys_blksize = log_blksize;
1023	}
1024
1025	/* Now switch to our preferred physical block size. */
1026	if (log_blksize > 512) {
1027		if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context)) {
1028			retval = ENXIO;
1029			goto error_exit;
1030		}
1031		/* Get the count of physical blocks. */
1032		if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) {
1033			retval = ENXIO;
1034			goto error_exit;
1035		}
1036	}
1037	/*
1038	 * At this point:
1039	 *   minblksize is the minimum physical block size
1040	 *   log_blksize has our preferred physical block size
1041	 *   log_blkcnt has the total number of physical blocks
1042	 */
1043
1044	mdb_offset = (daddr64_t)HFS_PRI_SECTOR(log_blksize);
1045	if ((retval = (int)buf_meta_bread(devvp,
1046				HFS_PHYSBLK_ROUNDDOWN(mdb_offset, (phys_blksize/log_blksize)),
1047				phys_blksize, cred, &bp))) {
1048		goto error_exit;
1049	}
1050	MALLOC(mdbp, HFSMasterDirectoryBlock *, kMDBSize, M_TEMP, M_WAITOK);
1051	bcopy((char *)buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize), mdbp, kMDBSize);
1052	buf_brelse(bp);
1053	bp = NULL;
1054
1055	MALLOC(hfsmp, struct hfsmount *, sizeof(struct hfsmount), M_HFSMNT, M_WAITOK);
1056	bzero(hfsmp, sizeof(struct hfsmount));
1057
1058	/*
1059	 *  Init the volume information structure
1060	 */
1061
1062	lck_mtx_init(&hfsmp->hfs_mutex, hfs_mutex_group, hfs_lock_attr);
1063	lck_mtx_init(&hfsmp->hfc_mutex, hfs_mutex_group, hfs_lock_attr);
1064	lck_rw_init(&hfsmp->hfs_global_lock, hfs_rwlock_group, hfs_lock_attr);
1065	lck_rw_init(&hfsmp->hfs_insync, hfs_rwlock_group, hfs_lock_attr);
1066
1067	vfs_setfsprivate(mp, hfsmp);
1068	hfsmp->hfs_mp = mp;			/* Make VFSTOHFS work */
1069	hfsmp->hfs_raw_dev = vnode_specrdev(devvp);
1070	hfsmp->hfs_devvp = devvp;
1071	vnode_ref(devvp);  /* Hold a ref on the device, dropped when hfsmp is freed. */
1072	hfsmp->hfs_logical_block_size = log_blksize;
1073	hfsmp->hfs_logical_block_count = log_blkcnt;
1074	hfsmp->hfs_physical_block_size = phys_blksize;
1075	hfsmp->hfs_log_per_phys = (phys_blksize / log_blksize);
1076	hfsmp->hfs_flags |= HFS_WRITEABLE_MEDIA;
1077	if (ronly)
1078		hfsmp->hfs_flags |= HFS_READ_ONLY;
1079	if (((unsigned int)vfs_flags(mp)) & MNT_UNKNOWNPERMISSIONS)
1080		hfsmp->hfs_flags |= HFS_UNKNOWN_PERMS;
1081
1082#if QUOTA
1083	for (i = 0; i < MAXQUOTAS; i++)
1084		dqfileinit(&hfsmp->hfs_qfiles[i]);
1085#endif
1086
1087	if (args) {
1088		hfsmp->hfs_uid = (args->hfs_uid == (uid_t)VNOVAL) ? UNKNOWNUID : args->hfs_uid;
1089		if (hfsmp->hfs_uid == 0xfffffffd) hfsmp->hfs_uid = UNKNOWNUID;
1090		hfsmp->hfs_gid = (args->hfs_gid == (gid_t)VNOVAL) ? UNKNOWNGID : args->hfs_gid;
1091		if (hfsmp->hfs_gid == 0xfffffffd) hfsmp->hfs_gid = UNKNOWNGID;
1092		vfs_setowner(mp, hfsmp->hfs_uid, hfsmp->hfs_gid);				/* tell the VFS */
1093		if (args->hfs_mask != (mode_t)VNOVAL) {
1094			hfsmp->hfs_dir_mask = args->hfs_mask & ALLPERMS;
1095			if (args->flags & HFSFSMNT_NOXONFILES) {
1096				hfsmp->hfs_file_mask = (args->hfs_mask & DEFFILEMODE);
1097			} else {
1098				hfsmp->hfs_file_mask = args->hfs_mask & ALLPERMS;
1099			}
1100		} else {
1101			hfsmp->hfs_dir_mask = UNKNOWNPERMISSIONS & ALLPERMS;		/* 0777: rwx---rwx */
1102			hfsmp->hfs_file_mask = UNKNOWNPERMISSIONS & DEFFILEMODE;	/* 0666: no --x by default? */
1103		}
1104		if ((args->flags != (int)VNOVAL) && (args->flags & HFSFSMNT_WRAPPER))
1105			mntwrapper = 1;
1106	} else {
1107		/* Even w/o explicit mount arguments, MNT_UNKNOWNPERMISSIONS requires setting up uid, gid, and mask: */
1108		if (((unsigned int)vfs_flags(mp)) & MNT_UNKNOWNPERMISSIONS) {
1109			hfsmp->hfs_uid = UNKNOWNUID;
1110			hfsmp->hfs_gid = UNKNOWNGID;
1111			vfs_setowner(mp, hfsmp->hfs_uid, hfsmp->hfs_gid);			/* tell the VFS */
1112			hfsmp->hfs_dir_mask = UNKNOWNPERMISSIONS & ALLPERMS;		/* 0777: rwx---rwx */
1113			hfsmp->hfs_file_mask = UNKNOWNPERMISSIONS & DEFFILEMODE;	/* 0666: no --x by default? */
1114		}
1115	}
1116
1117	/* Find out if disk media is writable. */
1118	if (VNOP_IOCTL(devvp, DKIOCISWRITABLE, (caddr_t)&iswritable, 0, context) == 0) {
1119		if (iswritable)
1120			hfsmp->hfs_flags |= HFS_WRITEABLE_MEDIA;
1121		else
1122			hfsmp->hfs_flags &= ~HFS_WRITEABLE_MEDIA;
1123	}
1124
1125	// record the current time at which we're mounting this volume
1126	struct timeval tv;
1127	microtime(&tv);
1128	hfsmp->hfs_mount_time = tv.tv_sec;
1129
1130	/* Mount a standard HFS disk */
1131	if ((SWAP_BE16(mdbp->drSigWord) == kHFSSigWord) &&
1132	    (mntwrapper || (SWAP_BE16(mdbp->drEmbedSigWord) != kHFSPlusSigWord))) {
1133
1134	    	/* If only journal replay is requested, exit immediately */
1135		if (journal_replay_only) {
1136			retval = 0;
1137			goto error_exit;
1138		}
1139
1140	        if ((vfs_flags(mp) & MNT_ROOTFS)) {
1141			retval = EINVAL;  /* Cannot root from HFS standard disks */
1142			goto error_exit;
1143		}
1144		/* HFS disks can only use 512 byte physical blocks */
1145		if (log_blksize > kHFSBlockSize) {
1146			log_blksize = kHFSBlockSize;
1147			if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context)) {
1148				retval = ENXIO;
1149				goto error_exit;
1150			}
1151			if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) {
1152				retval = ENXIO;
1153				goto error_exit;
1154			}
1155			hfsmp->hfs_logical_block_size = log_blksize;
1156			hfsmp->hfs_logical_block_count = log_blkcnt;
1157			hfsmp->hfs_physical_block_size = log_blksize;
1158			hfsmp->hfs_log_per_phys = 1;
1159		}
1160		if (args) {
1161			hfsmp->hfs_encoding = args->hfs_encoding;
1162			HFSTOVCB(hfsmp)->volumeNameEncodingHint = args->hfs_encoding;
1163
1164			/* establish the timezone */
1165			gTimeZone = args->hfs_timezone;
1166		}
1167
1168		retval = hfs_getconverter(hfsmp->hfs_encoding, &hfsmp->hfs_get_unicode,
1169					&hfsmp->hfs_get_hfsname);
1170		if (retval)
1171			goto error_exit;
1172
1173		retval = hfs_MountHFSVolume(hfsmp, mdbp, p);
1174		if (retval)
1175			(void) hfs_relconverter(hfsmp->hfs_encoding);
1176
1177	} else /* Mount an HFS Plus disk */ {
1178		HFSPlusVolumeHeader *vhp;
1179		off_t embeddedOffset;
1180		int   jnl_disable = 0;
1181
1182		/* Get the embedded Volume Header */
1183		if (SWAP_BE16(mdbp->drEmbedSigWord) == kHFSPlusSigWord) {
1184			embeddedOffset = SWAP_BE16(mdbp->drAlBlSt) * kHFSBlockSize;
1185			embeddedOffset += (u_int64_t)SWAP_BE16(mdbp->drEmbedExtent.startBlock) *
1186			                  (u_int64_t)SWAP_BE32(mdbp->drAlBlkSiz);
1187
1188			/*
1189			 * If the embedded volume doesn't start on a block
1190			 * boundary, then switch the device to a 512-byte
1191			 * block size so everything will line up on a block
1192			 * boundary.
1193			 */
1194			if ((embeddedOffset % log_blksize) != 0) {
1195				printf("HFS Mount: embedded volume offset not"
1196				    " a multiple of physical block size (%d);"
1197				    " switching to 512\n", log_blksize);
1198				log_blksize = 512;
1199				if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE,
1200				    (caddr_t)&log_blksize, FWRITE, context)) {
1201					retval = ENXIO;
1202					goto error_exit;
1203				}
1204				if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT,
1205				    (caddr_t)&log_blkcnt, 0, context)) {
1206					retval = ENXIO;
1207					goto error_exit;
1208				}
1209				/* Note: relative block count adjustment */
1210				hfsmp->hfs_logical_block_count *=
1211				    hfsmp->hfs_logical_block_size / log_blksize;
1212				hfsmp->hfs_logical_block_size = log_blksize;
1213
1214				/* Update logical/physical block size */
1215				hfsmp->hfs_physical_block_size = log_blksize;
1216				phys_blksize = log_blksize;
1217				hfsmp->hfs_log_per_phys = 1;
1218			}
1219
1220			disksize = (u_int64_t)SWAP_BE16(mdbp->drEmbedExtent.blockCount) *
1221			           (u_int64_t)SWAP_BE32(mdbp->drAlBlkSiz);
1222
1223			hfsmp->hfs_logical_block_count = disksize / log_blksize;
1224
1225			mdb_offset = (daddr64_t)((embeddedOffset / log_blksize) + HFS_PRI_SECTOR(log_blksize));
1226			retval = (int)buf_meta_bread(devvp, HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys),
1227					phys_blksize, cred, &bp);
1228			if (retval)
1229				goto error_exit;
1230			bcopy((char *)buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize), mdbp, 512);
1231			buf_brelse(bp);
1232			bp = NULL;
1233			vhp = (HFSPlusVolumeHeader*) mdbp;
1234
1235		} else /* pure HFS+ */ {
1236			embeddedOffset = 0;
1237			vhp = (HFSPlusVolumeHeader*) mdbp;
1238		}
1239
1240		/*
1241		 * On inconsistent disks, do not allow read-write mount
1242		 * unless it is the boot volume being mounted.
1243		 */
1244		if (!(vfs_flags(mp) & MNT_ROOTFS) &&
1245				(SWAP_BE32(vhp->attributes) & kHFSVolumeInconsistentMask) &&
1246				!(hfsmp->hfs_flags & HFS_READ_ONLY)) {
1247			retval = EINVAL;
1248			goto error_exit;
1249		}
1250
1251
1252		// XXXdbg
1253		//
1254		hfsmp->jnl = NULL;
1255		hfsmp->jvp = NULL;
1256		if (args != NULL && (args->flags & HFSFSMNT_EXTENDED_ARGS) &&
1257		    args->journal_disable) {
1258		    jnl_disable = 1;
1259		}
1260
1261		//
1262		// We only initialize the journal here if the last person
1263		// to mount this volume was journaling aware.  Otherwise
1264		// we delay journal initialization until later at the end
1265		// of hfs_MountHFSPlusVolume() because the last person who
1266		// mounted it could have messed things up behind our back
1267		// (so we need to go find the .journal file, make sure it's
1268		// the right size, re-sync up if it was moved, etc).
1269		//
1270		if (   (SWAP_BE32(vhp->lastMountedVersion) == kHFSJMountVersion)
1271			&& (SWAP_BE32(vhp->attributes) & kHFSVolumeJournaledMask)
1272			&& !jnl_disable) {
1273
1274			// if we're able to init the journal, mark the mount
1275			// point as journaled.
1276			//
1277			if (hfs_early_journal_init(hfsmp, vhp, args, embeddedOffset, mdb_offset, mdbp, cred) == 0) {
1278				vfs_setflags(mp, (u_int64_t)((unsigned int)MNT_JOURNALED));
1279			} else {
1280				// if the journal failed to open, then set the lastMountedVersion
1281				// to be "FSK!" which fsck_hfs will see and force the fsck instead
1282				// of just bailing out because the volume is journaled.
1283				if (!ronly) {
1284				    HFSPlusVolumeHeader *jvhp;
1285
1286				    hfsmp->hfs_flags |= HFS_NEED_JNL_RESET;
1287
1288				    if (mdb_offset == 0) {
1289					mdb_offset = (daddr64_t)((embeddedOffset / log_blksize) + HFS_PRI_SECTOR(log_blksize));
1290				    }
1291
1292				    bp = NULL;
1293				    retval = (int)buf_meta_bread(devvp,
1294						    HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys),
1295						    phys_blksize, cred, &bp);
1296				    if (retval == 0) {
1297					jvhp = (HFSPlusVolumeHeader *)(buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize));
1298
1299					if (SWAP_BE16(jvhp->signature) == kHFSPlusSigWord || SWAP_BE16(jvhp->signature) == kHFSXSigWord) {
1300						printf ("hfs(1): Journal replay fail.  Writing lastMountVersion as FSK!\n");
1301					    jvhp->lastMountedVersion = SWAP_BE32(kFSKMountVersion);
1302					    buf_bwrite(bp);
1303					} else {
1304					    buf_brelse(bp);
1305					}
1306					bp = NULL;
1307				    } else if (bp) {
1308					buf_brelse(bp);
1309					// clear this so the error exit path won't try to use it
1310					bp = NULL;
1311				    }
1312				}
1313
1314				// if this isn't the root device just bail out.
1315				// If it is the root device we just continue on
1316				// in the hopes that fsck_hfs will be able to
1317				// fix any damage that exists on the volume.
1318				if ( !(vfs_flags(mp) & MNT_ROOTFS)) {
1319				    retval = EINVAL;
1320				    goto error_exit;
1321				}
1322			}
1323		}
1324		// XXXdbg
1325
1326		/* Either the journal is replayed successfully, or there
1327		 * was nothing to replay, or no journal exists.  In any case,
1328		 * return success.
1329		 */
1330		if (journal_replay_only) {
1331			retval = 0;
1332			goto error_exit;
1333		}
1334
1335		(void) hfs_getconverter(0, &hfsmp->hfs_get_unicode, &hfsmp->hfs_get_hfsname);
1336
1337		retval = hfs_MountHFSPlusVolume(hfsmp, vhp, embeddedOffset, disksize, p, args, cred);
1338		/*
1339		 * If the backend didn't like our physical blocksize
1340		 * then retry with physical blocksize of 512.
1341		 */
1342		if ((retval == ENXIO) && (log_blksize > 512) && (log_blksize != minblksize)) {
1343			printf("HFS Mount: could not use physical block size "
1344				"(%d) switching to 512\n", log_blksize);
1345			log_blksize = 512;
1346			if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context)) {
1347				retval = ENXIO;
1348				goto error_exit;
1349			}
1350			if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) {
1351				retval = ENXIO;
1352				goto error_exit;
1353			}
1354			devvp->v_specsize = log_blksize;
1355			/* Note: relative block count adjustment (in case this is an embedded volume). */
1356    			hfsmp->hfs_logical_block_count *= hfsmp->hfs_logical_block_size / log_blksize;
1357     			hfsmp->hfs_logical_block_size = log_blksize;
1358     			hfsmp->hfs_log_per_phys = hfsmp->hfs_physical_block_size / log_blksize;
1359
1360			if (hfsmp->jnl) {
1361			    // close and re-open this with the new block size
1362			    journal_close(hfsmp->jnl);
1363			    hfsmp->jnl = NULL;
1364			    if (hfs_early_journal_init(hfsmp, vhp, args, embeddedOffset, mdb_offset, mdbp, cred) == 0) {
1365					vfs_setflags(mp, (u_int64_t)((unsigned int)MNT_JOURNALED));
1366				} else {
1367					// if the journal failed to open, then set the lastMountedVersion
1368					// to be "FSK!" which fsck_hfs will see and force the fsck instead
1369					// of just bailing out because the volume is journaled.
1370					if (!ronly) {
1371				    	HFSPlusVolumeHeader *jvhp;
1372
1373				    	hfsmp->hfs_flags |= HFS_NEED_JNL_RESET;
1374
1375				    	if (mdb_offset == 0) {
1376							mdb_offset = (daddr64_t)((embeddedOffset / log_blksize) + HFS_PRI_SECTOR(log_blksize));
1377				    	}
1378
1379				   	 	bp = NULL;
1380				    	retval = (int)buf_meta_bread(devvp, HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys),
1381							phys_blksize, cred, &bp);
1382				    	if (retval == 0) {
1383							jvhp = (HFSPlusVolumeHeader *)(buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize));
1384
1385							if (SWAP_BE16(jvhp->signature) == kHFSPlusSigWord || SWAP_BE16(jvhp->signature) == kHFSXSigWord) {
1386								printf ("hfs(2): Journal replay fail.  Writing lastMountVersion as FSK!\n");
1387					    		jvhp->lastMountedVersion = SWAP_BE32(kFSKMountVersion);
1388					    		buf_bwrite(bp);
1389							} else {
1390					    		buf_brelse(bp);
1391							}
1392							bp = NULL;
1393				    	} else if (bp) {
1394							buf_brelse(bp);
1395							// clear this so the error exit path won't try to use it
1396							bp = NULL;
1397				    	}
1398					}
1399
1400					// if this isn't the root device just bail out.
1401					// If it is the root device we just continue on
1402					// in the hopes that fsck_hfs will be able to
1403					// fix any damage that exists on the volume.
1404					if ( !(vfs_flags(mp) & MNT_ROOTFS)) {
1405				    	retval = EINVAL;
1406				    	goto error_exit;
1407					}
1408				}
1409			}
1410
1411			/* Try again with a smaller block size... */
1412			retval = hfs_MountHFSPlusVolume(hfsmp, vhp, embeddedOffset, disksize, p, args, cred);
1413		}
1414		if (retval)
1415			(void) hfs_relconverter(0);
1416	}
1417
1418	// save off a snapshot of the mtime from the previous mount
1419	// (for matador).
1420	hfsmp->hfs_last_mounted_mtime = hfsmp->hfs_mtime;
1421
1422	if ( retval ) {
1423		goto error_exit;
1424	}
1425
1426	mp->mnt_vfsstat.f_fsid.val[0] = (long)dev;
1427	mp->mnt_vfsstat.f_fsid.val[1] = vfs_typenum(mp);
1428	vfs_setmaxsymlen(mp, 0);
1429	mp->mnt_vtable->vfc_threadsafe = TRUE;
1430	mp->mnt_vtable->vfc_vfsflags |= VFC_VFSNATIVEXATTR;
1431#if NAMEDSTREAMS
1432	mp->mnt_kern_flag |= MNTK_NAMED_STREAMS;
1433#endif
1434	if (!(hfsmp->hfs_flags & HFS_STANDARD)) {
1435		/* Tell VFS that we support directory hard links. */
1436		mp->mnt_vtable->vfc_vfsflags |= VFC_VFSDIRLINKS;
1437	} else {
1438		/* HFS standard doesn't support extended readdir! */
1439		mp->mnt_vtable->vfc_vfsflags &= ~VFC_VFSREADDIR_EXTENDED;
1440	}
1441
1442	if (args) {
1443		/*
1444		 * Set the free space warning levels for a non-root volume:
1445		 *
1446		 * Set the lower freespace limit (the level that will trigger a warning)
1447		 * to 5% of the volume size or 250MB, whichever is less, and the desired
1448		 * level (which will cancel the alert request) to 1/2 above that limit.
1449		 * Start looking for free space to drop below this level and generate a
1450		 * warning immediately if needed:
1451		 */
1452		hfsmp->hfs_freespace_notify_warninglimit =
1453			MIN(HFS_LOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize,
1454				(HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_LOWDISKTRIGGERFRACTION);
1455		hfsmp->hfs_freespace_notify_desiredlevel =
1456			MIN(HFS_LOWDISKSHUTOFFLEVEL / HFSTOVCB(hfsmp)->blockSize,
1457				(HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_LOWDISKSHUTOFFFRACTION);
1458	} else {
1459		/*
1460		 * Set the free space warning levels for the root volume:
1461		 *
1462		 * Set the lower freespace limit (the level that will trigger a warning)
1463		 * to 1% of the volume size or 50MB, whichever is less, and the desired
1464		 * level (which will cancel the alert request) to 2% or 75MB, whichever is less.
1465		 */
1466		hfsmp->hfs_freespace_notify_warninglimit =
1467			MIN(HFS_ROOTLOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize,
1468				(HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_ROOTLOWDISKTRIGGERFRACTION);
1469		hfsmp->hfs_freespace_notify_desiredlevel =
1470			MIN(HFS_ROOTLOWDISKSHUTOFFLEVEL / HFSTOVCB(hfsmp)->blockSize,
1471				(HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_ROOTLOWDISKSHUTOFFFRACTION);
1472	};
1473
1474	/* Check if the file system exists on virtual device, like disk image */
1475	if (VNOP_IOCTL(devvp, DKIOCISVIRTUAL, (caddr_t)&isvirtual, 0, context) == 0) {
1476		if (isvirtual) {
1477			hfsmp->hfs_flags |= HFS_VIRTUAL_DEVICE;
1478		}
1479	}
1480
1481	/* ejectability checks will time out when the device is root_device, so skip them */
1482	if (isroot == 0) {
1483		if ((hfsmp->hfs_flags & HFS_VIRTUAL_DEVICE) == 0 &&
1484				IOBSDIsMediaEjectable(mp->mnt_vfsstat.f_mntfromname)) {
1485			hfsmp->hfs_syncer = thread_call_allocate(hfs_syncer, hfsmp);
1486			if (hfsmp->hfs_syncer == NULL) {
1487				printf("hfs: failed to allocate syncer thread callback for %s (%s)\n",
1488						mp->mnt_vfsstat.f_mntfromname, mp->mnt_vfsstat.f_mntonname);
1489			}
1490		}
1491	}
1492
1493	/*
1494	 * Start looking for free space to drop below this level and generate a
1495	 * warning immediately if needed:
1496	 */
1497	hfsmp->hfs_notification_conditions = 0;
1498	hfs_generate_volume_notifications(hfsmp);
1499
1500	if (ronly == 0) {
1501		(void) hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0);
1502	}
1503	FREE(mdbp, M_TEMP);
1504	return (0);
1505
1506error_exit:
1507	if (bp)
1508		buf_brelse(bp);
1509	if (mdbp)
1510		FREE(mdbp, M_TEMP);
1511
1512	if (hfsmp && hfsmp->jvp && hfsmp->jvp != hfsmp->hfs_devvp) {
1513	    (void)VNOP_CLOSE(hfsmp->jvp, ronly ? FREAD : FREAD|FWRITE, context);
1514		hfsmp->jvp = NULL;
1515	}
1516	if (hfsmp) {
1517		if (hfsmp->hfs_devvp) {
1518			vnode_rele(hfsmp->hfs_devvp);
1519		}
1520		FREE(hfsmp, M_HFSMNT);
1521		vfs_setfsprivate(mp, NULL);
1522	}
1523        return (retval);
1524}
1525
1526
1527/*
1528 * Make a filesystem operational.
1529 * Nothing to do at the moment.
1530 */
1531/* ARGSUSED */
1532static int
1533hfs_start(__unused struct mount *mp, __unused int flags, __unused vfs_context_t context)
1534{
1535	return (0);
1536}
1537
1538
1539/*
1540 * unmount system call
1541 */
1542static int
1543hfs_unmount(struct mount *mp, int mntflags, vfs_context_t context)
1544{
1545	struct proc *p = vfs_context_proc(context);
1546	struct hfsmount *hfsmp = VFSTOHFS(mp);
1547	int retval = E_NONE;
1548	int flags;
1549	int force;
1550	int started_tr = 0;
1551
1552	flags = 0;
1553	force = 0;
1554	if (mntflags & MNT_FORCE) {
1555		flags |= FORCECLOSE;
1556		force = 1;
1557	}
1558
1559	if ((retval = hfs_flushfiles(mp, flags, p)) && !force)
1560 		return (retval);
1561
1562	if (hfsmp->hfs_flags & HFS_METADATA_ZONE)
1563		(void) hfs_recording_suspend(hfsmp);
1564
1565	/*
1566	 * Cancel any pending timers for this volume.  Then wait for any timers
1567	 * which have fired, but whose callbacks have not yet completed.
1568	 */
1569	if (hfsmp->hfs_syncer)
1570	{
1571		struct timespec ts = {0, 100000000};	/* 0.1 seconds */
1572
1573		/*
1574		 * Cancel any timers that have been scheduled, but have not
1575		 * fired yet.  NOTE: The kernel considers a timer complete as
1576		 * soon as it starts your callback, so the kernel does not
1577		 * keep track of the number of callbacks in progress.
1578		 */
1579		if (thread_call_cancel(hfsmp->hfs_syncer))
1580			OSDecrementAtomic((volatile SInt32 *)&hfsmp->hfs_sync_incomplete);
1581		thread_call_free(hfsmp->hfs_syncer);
1582		hfsmp->hfs_syncer = NULL;
1583
1584		/*
1585		 * This waits for all of the callbacks that were entered before
1586		 * we did thread_call_cancel above, but have not completed yet.
1587		 */
1588		while(hfsmp->hfs_sync_incomplete > 0)
1589		{
1590			msleep((caddr_t)&hfsmp->hfs_sync_incomplete, NULL, PWAIT, "hfs_unmount", &ts);
1591		}
1592
1593		if (hfsmp->hfs_sync_incomplete < 0)
1594			printf("hfs_unmount: pm_sync_incomplete underflow (%d)!\n", hfsmp->hfs_sync_incomplete);
1595	}
1596
1597	/*
1598	 * Flush out the b-trees, volume bitmap and Volume Header
1599	 */
1600	if ((hfsmp->hfs_flags & HFS_READ_ONLY) == 0) {
1601		retval = hfs_start_transaction(hfsmp);
1602		if (retval == 0) {
1603		    started_tr = 1;
1604		} else if (!force) {
1605		    goto err_exit;
1606		}
1607
1608		if (hfsmp->hfs_startup_vp) {
1609			(void) hfs_lock(VTOC(hfsmp->hfs_startup_vp), HFS_EXCLUSIVE_LOCK);
1610			retval = hfs_fsync(hfsmp->hfs_startup_vp, MNT_WAIT, 0, p);
1611			hfs_unlock(VTOC(hfsmp->hfs_startup_vp));
1612			if (retval && !force)
1613				goto err_exit;
1614		}
1615
1616		if (hfsmp->hfs_attribute_vp) {
1617			(void) hfs_lock(VTOC(hfsmp->hfs_attribute_vp), HFS_EXCLUSIVE_LOCK);
1618			retval = hfs_fsync(hfsmp->hfs_attribute_vp, MNT_WAIT, 0, p);
1619			hfs_unlock(VTOC(hfsmp->hfs_attribute_vp));
1620			if (retval && !force)
1621				goto err_exit;
1622		}
1623
1624		(void) hfs_lock(VTOC(hfsmp->hfs_catalog_vp), HFS_EXCLUSIVE_LOCK);
1625		retval = hfs_fsync(hfsmp->hfs_catalog_vp, MNT_WAIT, 0, p);
1626		hfs_unlock(VTOC(hfsmp->hfs_catalog_vp));
1627		if (retval && !force)
1628			goto err_exit;
1629
1630		(void) hfs_lock(VTOC(hfsmp->hfs_extents_vp), HFS_EXCLUSIVE_LOCK);
1631		retval = hfs_fsync(hfsmp->hfs_extents_vp, MNT_WAIT, 0, p);
1632		hfs_unlock(VTOC(hfsmp->hfs_extents_vp));
1633		if (retval && !force)
1634			goto err_exit;
1635
1636		if (hfsmp->hfs_allocation_vp) {
1637			(void) hfs_lock(VTOC(hfsmp->hfs_allocation_vp), HFS_EXCLUSIVE_LOCK);
1638			retval = hfs_fsync(hfsmp->hfs_allocation_vp, MNT_WAIT, 0, p);
1639			hfs_unlock(VTOC(hfsmp->hfs_allocation_vp));
1640			if (retval && !force)
1641				goto err_exit;
1642		}
1643
1644		if (hfsmp->hfc_filevp && vnode_issystem(hfsmp->hfc_filevp)) {
1645			retval = hfs_fsync(hfsmp->hfc_filevp, MNT_WAIT, 0, p);
1646			if (retval && !force)
1647				goto err_exit;
1648		}
1649
1650		/* If runtime corruption was detected, indicate that the volume
1651		 * was not unmounted cleanly.
1652		 */
1653		if (hfsmp->vcbAtrb & kHFSVolumeInconsistentMask) {
1654			HFSTOVCB(hfsmp)->vcbAtrb &= ~kHFSVolumeUnmountedMask;
1655		} else {
1656			HFSTOVCB(hfsmp)->vcbAtrb |= kHFSVolumeUnmountedMask;
1657		}
1658
1659		retval = hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0);
1660		if (retval) {
1661			HFSTOVCB(hfsmp)->vcbAtrb &= ~kHFSVolumeUnmountedMask;
1662			if (!force)
1663				goto err_exit;	/* could not flush everything */
1664		}
1665
1666		if (started_tr) {
1667		    hfs_end_transaction(hfsmp);
1668		    started_tr = 0;
1669		}
1670	}
1671
1672	if (hfsmp->jnl) {
1673		journal_flush(hfsmp->jnl);
1674	}
1675
1676	/*
1677	 *	Invalidate our caches and release metadata vnodes
1678	 */
1679	(void) hfsUnmount(hfsmp, p);
1680
1681	/*
1682	 * Last chance to dump unreferenced system files.
1683	 */
1684	(void) vflush(mp, NULLVP, FORCECLOSE);
1685
1686	if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSSigWord)
1687		(void) hfs_relconverter(hfsmp->hfs_encoding);
1688
1689	// XXXdbg
1690	if (hfsmp->jnl) {
1691	    journal_close(hfsmp->jnl);
1692	    hfsmp->jnl = NULL;
1693	}
1694
1695	VNOP_FSYNC(hfsmp->hfs_devvp, MNT_WAIT, context);
1696
1697	if (hfsmp->jvp && hfsmp->jvp != hfsmp->hfs_devvp) {
1698	    retval = VNOP_CLOSE(hfsmp->jvp,
1699	                       hfsmp->hfs_flags & HFS_READ_ONLY ? FREAD : FREAD|FWRITE,
1700			       context);
1701	    vnode_put(hfsmp->jvp);
1702	    hfsmp->jvp = NULL;
1703	}
1704	// XXXdbg
1705
1706#ifdef HFS_SPARSE_DEV
1707	/* Drop our reference on the backing fs (if any). */
1708	if ((hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) && hfsmp->hfs_backingfs_rootvp) {
1709		struct vnode * tmpvp;
1710
1711		hfsmp->hfs_flags &= ~HFS_HAS_SPARSE_DEVICE;
1712		tmpvp = hfsmp->hfs_backingfs_rootvp;
1713		hfsmp->hfs_backingfs_rootvp = NULLVP;
1714		vnode_rele(tmpvp);
1715	}
1716#endif /* HFS_SPARSE_DEV */
1717	lck_mtx_destroy(&hfsmp->hfc_mutex, hfs_mutex_group);
1718	vnode_rele(hfsmp->hfs_devvp);
1719	FREE(hfsmp, M_HFSMNT);
1720
1721	return (0);
1722
1723  err_exit:
1724	if (started_tr) {
1725		hfs_end_transaction(hfsmp);
1726	}
1727	return retval;
1728}
1729
1730
1731/*
1732 * Return the root of a filesystem.
1733 */
1734static int
1735hfs_vfs_root(struct mount *mp, struct vnode **vpp, __unused vfs_context_t context)
1736{
1737	return hfs_vget(VFSTOHFS(mp), (cnid_t)kHFSRootFolderID, vpp, 1);
1738}
1739
1740
1741/*
1742 * Do operations associated with quotas
1743 */
1744#if !QUOTA
1745static int
1746hfs_quotactl(__unused struct mount *mp, __unused int cmds, __unused uid_t uid, __unused caddr_t datap, __unused vfs_context_t context)
1747{
1748	return (ENOTSUP);
1749}
1750#else
1751static int
1752hfs_quotactl(struct mount *mp, int cmds, uid_t uid, caddr_t datap, vfs_context_t context)
1753{
1754	struct proc *p = vfs_context_proc(context);
1755	int cmd, type, error;
1756
1757	if (uid == ~0U)
1758		uid = vfs_context_ucred(context)->cr_ruid;
1759	cmd = cmds >> SUBCMDSHIFT;
1760
1761	switch (cmd) {
1762	case Q_SYNC:
1763	case Q_QUOTASTAT:
1764		break;
1765	case Q_GETQUOTA:
1766		if (uid == vfs_context_ucred(context)->cr_ruid)
1767			break;
1768		/* fall through */
1769	default:
1770		if ( (error = vfs_context_suser(context)) )
1771			return (error);
1772	}
1773
1774	type = cmds & SUBCMDMASK;
1775	if ((u_int)type >= MAXQUOTAS)
1776		return (EINVAL);
1777	if (vfs_busy(mp, LK_NOWAIT))
1778		return (0);
1779
1780	switch (cmd) {
1781
1782	case Q_QUOTAON:
1783		error = hfs_quotaon(p, mp, type, datap);
1784		break;
1785
1786	case Q_QUOTAOFF:
1787		error = hfs_quotaoff(p, mp, type);
1788		break;
1789
1790	case Q_SETQUOTA:
1791		error = hfs_setquota(mp, uid, type, datap);
1792		break;
1793
1794	case Q_SETUSE:
1795		error = hfs_setuse(mp, uid, type, datap);
1796		break;
1797
1798	case Q_GETQUOTA:
1799		error = hfs_getquota(mp, uid, type, datap);
1800		break;
1801
1802	case Q_SYNC:
1803		error = hfs_qsync(mp);
1804		break;
1805
1806	case Q_QUOTASTAT:
1807		error = hfs_quotastat(mp, type, datap);
1808		break;
1809
1810	default:
1811		error = EINVAL;
1812		break;
1813	}
1814	vfs_unbusy(mp);
1815
1816	return (error);
1817}
1818#endif /* QUOTA */
1819
1820/* Subtype is composite of bits */
1821#define HFS_SUBTYPE_JOURNALED      0x01
1822#define HFS_SUBTYPE_CASESENSITIVE  0x02
1823/* bits 2 - 6 reserved */
1824#define HFS_SUBTYPE_STANDARDHFS    0x80
1825
1826/*
1827 * Get file system statistics.
1828 */
1829static int
1830hfs_statfs(struct mount *mp, register struct vfsstatfs *sbp, __unused vfs_context_t context)
1831{
1832	ExtendedVCB *vcb = VFSTOVCB(mp);
1833	struct hfsmount *hfsmp = VFSTOHFS(mp);
1834	u_long freeCNIDs;
1835	u_int16_t subtype = 0;
1836
1837	freeCNIDs = (u_long)0xFFFFFFFF - (u_long)vcb->vcbNxtCNID;
1838
1839	sbp->f_bsize = (u_int32_t)vcb->blockSize;
1840	sbp->f_iosize = (size_t)cluster_max_io_size(mp, 0);
1841	sbp->f_blocks = (u_int64_t)((unsigned long)vcb->totalBlocks);
1842	sbp->f_bfree = (u_int64_t)((unsigned long )hfs_freeblks(hfsmp, 0));
1843	sbp->f_bavail = (u_int64_t)((unsigned long )hfs_freeblks(hfsmp, 1));
1844	sbp->f_files = (u_int64_t)((unsigned long )(vcb->totalBlocks - 2));  /* max files is constrained by total blocks */
1845	sbp->f_ffree = (u_int64_t)((unsigned long )(MIN(freeCNIDs, sbp->f_bavail)));
1846
1847	/*
1848	 * Subtypes (flavors) for HFS
1849	 *   0:   Mac OS Extended
1850	 *   1:   Mac OS Extended (Journaled)
1851	 *   2:   Mac OS Extended (Case Sensitive)
1852	 *   3:   Mac OS Extended (Case Sensitive, Journaled)
1853	 *   4 - 127:   Reserved
1854	 * 128:   Mac OS Standard
1855	 *
1856	 */
1857	if (hfsmp->hfs_flags & HFS_STANDARD) {
1858		subtype = HFS_SUBTYPE_STANDARDHFS;
1859	} else /* HFS Plus */ {
1860		if (hfsmp->jnl)
1861			subtype |= HFS_SUBTYPE_JOURNALED;
1862		if (hfsmp->hfs_flags & HFS_CASE_SENSITIVE)
1863			subtype |= HFS_SUBTYPE_CASESENSITIVE;
1864	}
1865	sbp->f_fssubtype = subtype;
1866
1867	return (0);
1868}
1869
1870
1871//
1872// XXXdbg -- this is a callback to be used by the journal to
1873//           get meta data blocks flushed out to disk.
1874//
1875// XXXdbg -- be smarter and don't flush *every* block on each
1876//           call.  try to only flush some so we don't wind up
1877//           being too synchronous.
1878//
1879__private_extern__
1880void
1881hfs_sync_metadata(void *arg)
1882{
1883	struct mount *mp = (struct mount *)arg;
1884	struct hfsmount *hfsmp;
1885	ExtendedVCB *vcb;
1886	buf_t	bp;
1887	int  retval;
1888	daddr64_t priIDSector;
1889	hfsmp = VFSTOHFS(mp);
1890	vcb = HFSTOVCB(hfsmp);
1891
1892	// now make sure the super block is flushed
1893	priIDSector = (daddr64_t)((vcb->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) +
1894				  HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size));
1895
1896	retval = (int)buf_meta_bread(hfsmp->hfs_devvp,
1897			HFS_PHYSBLK_ROUNDDOWN(priIDSector, hfsmp->hfs_log_per_phys),
1898			hfsmp->hfs_physical_block_size, NOCRED, &bp);
1899	if ((retval != 0 ) && (retval != ENXIO)) {
1900		printf("hfs_sync_metadata: can't read volume header at %d! (retval 0x%x)\n",
1901		       (int)priIDSector, retval);
1902	}
1903
1904	if (retval == 0 && ((buf_flags(bp) & (B_DELWRI | B_LOCKED)) == B_DELWRI)) {
1905	    buf_bwrite(bp);
1906	} else if (bp) {
1907	    buf_brelse(bp);
1908	}
1909
1910	// the alternate super block...
1911	// XXXdbg - we probably don't need to do this each and every time.
1912	//          hfs_btreeio.c:FlushAlternate() should flag when it was
1913	//          written...
1914	if (hfsmp->hfs_alt_id_sector) {
1915		retval = (int)buf_meta_bread(hfsmp->hfs_devvp,
1916				HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_alt_id_sector, hfsmp->hfs_log_per_phys),
1917				hfsmp->hfs_physical_block_size, NOCRED, &bp);
1918		if (retval == 0 && ((buf_flags(bp) & (B_DELWRI | B_LOCKED)) == B_DELWRI)) {
1919		    buf_bwrite(bp);
1920		} else if (bp) {
1921		    buf_brelse(bp);
1922		}
1923	}
1924}
1925
1926
1927struct hfs_sync_cargs {
1928        kauth_cred_t cred;
1929        struct proc  *p;
1930        int    waitfor;
1931        int    error;
1932};
1933
1934
1935static int
1936hfs_sync_callback(struct vnode *vp, void *cargs)
1937{
1938	struct cnode *cp;
1939	struct hfs_sync_cargs *args;
1940	int error;
1941
1942	args = (struct hfs_sync_cargs *)cargs;
1943
1944	if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK) != 0) {
1945		return (VNODE_RETURNED);
1946	}
1947	cp = VTOC(vp);
1948
1949	if ((cp->c_flag & C_MODIFIED) ||
1950	    (cp->c_touch_acctime | cp->c_touch_chgtime | cp->c_touch_modtime) ||
1951	    vnode_hasdirtyblks(vp)) {
1952	        error = hfs_fsync(vp, args->waitfor, 0, args->p);
1953
1954		if (error)
1955		        args->error = error;
1956	}
1957	hfs_unlock(cp);
1958	return (VNODE_RETURNED);
1959}
1960
1961
1962
1963/*
1964 * Go through the disk queues to initiate sandbagged IO;
1965 * go through the inodes to write those that have been modified;
1966 * initiate the writing of the super block if it has been modified.
1967 *
1968 * Note: we are always called with the filesystem marked `MPBUSY'.
1969 */
1970static int
1971hfs_sync(struct mount *mp, int waitfor, vfs_context_t context)
1972{
1973	struct proc *p = vfs_context_proc(context);
1974	struct cnode *cp;
1975	struct hfsmount *hfsmp;
1976	ExtendedVCB *vcb;
1977	struct vnode *meta_vp[4];
1978	int i;
1979	int error, allerror = 0;
1980	struct hfs_sync_cargs args;
1981
1982	hfsmp = VFSTOHFS(mp);
1983
1984	/*
1985	 * hfs_changefs might be manipulating vnodes so back off
1986	 */
1987	if (hfsmp->hfs_flags & HFS_IN_CHANGEFS)
1988		return (0);
1989
1990	if (hfsmp->hfs_flags & HFS_READ_ONLY)
1991		return (EROFS);
1992
1993	/* skip over frozen volumes */
1994	if (!lck_rw_try_lock_shared(&hfsmp->hfs_insync))
1995		return 0;
1996
1997	args.cred = kauth_cred_get();
1998	args.waitfor = waitfor;
1999	args.p = p;
2000	args.error = 0;
2001	/*
2002	 * hfs_sync_callback will be called for each vnode
2003	 * hung off of this mount point... the vnode will be
2004	 * properly referenced and unreferenced around the callback
2005	 */
2006	vnode_iterate(mp, 0, hfs_sync_callback, (void *)&args);
2007
2008	if (args.error)
2009	        allerror = args.error;
2010
2011	vcb = HFSTOVCB(hfsmp);
2012
2013	meta_vp[0] = vcb->extentsRefNum;
2014	meta_vp[1] = vcb->catalogRefNum;
2015	meta_vp[2] = vcb->allocationsRefNum;  /* This is NULL for standard HFS */
2016	meta_vp[3] = hfsmp->hfs_attribute_vp; /* Optional file */
2017
2018	/* Now sync our three metadata files */
2019	for (i = 0; i < 4; ++i) {
2020		struct vnode *btvp;
2021
2022		btvp = meta_vp[i];;
2023		if ((btvp==0) || (vnode_mount(btvp) != mp))
2024			continue;
2025
2026		/* XXX use hfs_systemfile_lock instead ? */
2027		(void) hfs_lock(VTOC(btvp), HFS_EXCLUSIVE_LOCK);
2028		cp = VTOC(btvp);
2029
2030		if (((cp->c_flag &  C_MODIFIED) == 0) &&
2031		    (cp->c_touch_acctime == 0) &&
2032		    (cp->c_touch_chgtime == 0) &&
2033		    (cp->c_touch_modtime == 0) &&
2034		    vnode_hasdirtyblks(btvp) == 0) {
2035			hfs_unlock(VTOC(btvp));
2036			continue;
2037		}
2038		error = vnode_get(btvp);
2039		if (error) {
2040			hfs_unlock(VTOC(btvp));
2041			continue;
2042		}
2043		if ((error = hfs_fsync(btvp, waitfor, 0, p)))
2044			allerror = error;
2045
2046		hfs_unlock(cp);
2047		vnode_put(btvp);
2048	};
2049
2050	/*
2051	 * Force stale file system control information to be flushed.
2052	 */
2053	if (vcb->vcbSigWord == kHFSSigWord) {
2054		if ((error = VNOP_FSYNC(hfsmp->hfs_devvp, waitfor, context))) {
2055			allerror = error;
2056		}
2057	}
2058#if QUOTA
2059	hfs_qsync(mp);
2060#endif /* QUOTA */
2061
2062	hfs_hotfilesync(hfsmp, vfs_context_kernel());
2063
2064	/*
2065	 * Write back modified superblock.
2066	 */
2067	if (IsVCBDirty(vcb)) {
2068		error = hfs_flushvolumeheader(hfsmp, waitfor, 0);
2069		if (error)
2070			allerror = error;
2071	}
2072
2073	if (hfsmp->jnl) {
2074	    journal_flush(hfsmp->jnl);
2075	}
2076
2077	{
2078		uint32_t secs, usecs;
2079		uint64_t now;
2080
2081		clock_get_calendar_microtime(&secs, &usecs);
2082		now = ((uint64_t)secs * 1000000LL) + usecs;
2083		hfsmp->hfs_last_sync_time = now;
2084	}
2085
2086	lck_rw_unlock_shared(&hfsmp->hfs_insync);
2087	return (allerror);
2088}
2089
2090
2091/*
2092 * File handle to vnode
2093 *
2094 * Have to be really careful about stale file handles:
2095 * - check that the cnode id is valid
2096 * - call hfs_vget() to get the locked cnode
2097 * - check for an unallocated cnode (i_mode == 0)
2098 * - check that the given client host has export rights and return
2099 *   those rights via. exflagsp and credanonp
2100 */
2101static int
2102hfs_fhtovp(struct mount *mp, int fhlen, unsigned char *fhp, struct vnode **vpp, __unused vfs_context_t context)
2103{
2104	struct hfsfid *hfsfhp;
2105	struct vnode *nvp;
2106	int result;
2107
2108	*vpp = NULL;
2109	hfsfhp = (struct hfsfid *)fhp;
2110
2111	if (fhlen < (int)sizeof(struct hfsfid))
2112		return (EINVAL);
2113
2114	result = hfs_vget(VFSTOHFS(mp), ntohl(hfsfhp->hfsfid_cnid), &nvp, 0);
2115	if (result) {
2116		if (result == ENOENT)
2117			result = ESTALE;
2118		return result;
2119	}
2120
2121	/* The createtime can be changed by hfs_setattr or hfs_setattrlist.
2122	 * For NFS, we are assuming that only if the createtime was moved
2123	 * forward would it mean the fileID got reused in that session by
2124	 * wrapping. We don't have a volume ID or other unique identifier to
2125	 * to use here for a generation ID across reboots, crashes where
2126	 * metadata noting lastFileID didn't make it to disk but client has
2127	 * it, or volume erasures where fileIDs start over again. Lastly,
2128	 * with HFS allowing "wraps" of fileIDs now, this becomes more
2129	 * error prone. Future, would be change the "wrap bit" to a unique
2130	 * wrap number and use that for generation number. For now do this.
2131	 */
2132	if (((time_t)(ntohl(hfsfhp->hfsfid_gen)) < VTOC(nvp)->c_itime)) {
2133		hfs_unlock(VTOC(nvp));
2134		vnode_put(nvp);
2135		return (ESTALE);
2136	}
2137	*vpp = nvp;
2138
2139	hfs_unlock(VTOC(nvp));
2140	return (0);
2141}
2142
2143
2144/*
2145 * Vnode pointer to File handle
2146 */
2147/* ARGSUSED */
2148static int
2149hfs_vptofh(struct vnode *vp, int *fhlenp, unsigned char *fhp, __unused vfs_context_t context)
2150{
2151	struct cnode *cp;
2152	struct hfsfid *hfsfhp;
2153
2154	if (ISHFS(VTOVCB(vp)))
2155		return (ENOTSUP);	/* hfs standard is not exportable */
2156
2157	if (*fhlenp < (int)sizeof(struct hfsfid))
2158		return (EOVERFLOW);
2159
2160	cp = VTOC(vp);
2161	hfsfhp = (struct hfsfid *)fhp;
2162	hfsfhp->hfsfid_cnid = htonl(cp->c_fileid);
2163	hfsfhp->hfsfid_gen = htonl(cp->c_itime);
2164	*fhlenp = sizeof(struct hfsfid);
2165
2166	return (0);
2167}
2168
2169
2170/*
2171 * Initial HFS filesystems, done only once.
2172 */
2173static int
2174hfs_init(__unused struct vfsconf *vfsp)
2175{
2176	static int done = 0;
2177
2178	if (done)
2179		return (0);
2180	done = 1;
2181	hfs_chashinit();
2182	hfs_converterinit();
2183
2184	BTReserveSetup();
2185
2186
2187	hfs_lock_attr    = lck_attr_alloc_init();
2188	hfs_group_attr   = lck_grp_attr_alloc_init();
2189	hfs_mutex_group  = lck_grp_alloc_init("hfs-mutex", hfs_group_attr);
2190	hfs_rwlock_group = lck_grp_alloc_init("hfs-rwlock", hfs_group_attr);
2191
2192
2193	return (0);
2194}
2195
2196static int
2197hfs_getmountpoint(struct vnode *vp, struct hfsmount **hfsmpp)
2198{
2199	struct hfsmount * hfsmp;
2200	char fstypename[MFSNAMELEN];
2201
2202	if (vp == NULL)
2203		return (EINVAL);
2204
2205	if (!vnode_isvroot(vp))
2206		return (EINVAL);
2207
2208	vnode_vfsname(vp, fstypename);
2209	if (strncmp(fstypename, "hfs", sizeof(fstypename)) != 0)
2210		return (EINVAL);
2211
2212	hfsmp = VTOHFS(vp);
2213
2214	if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSSigWord)
2215		return (EINVAL);
2216
2217	*hfsmpp = hfsmp;
2218
2219	return (0);
2220}
2221
2222// XXXdbg
2223#include <sys/filedesc.h>
2224
2225/*
2226 * HFS filesystem related variables.
2227 */
2228static int
2229hfs_sysctl(int *name, __unused u_int namelen, user_addr_t oldp, size_t *oldlenp,
2230			user_addr_t newp, size_t newlen, vfs_context_t context)
2231{
2232	struct proc *p = vfs_context_proc(context);
2233	int error;
2234	struct hfsmount *hfsmp;
2235
2236	/* all sysctl names at this level are terminal */
2237
2238	if (name[0] == HFS_ENCODINGBIAS) {
2239		int bias;
2240
2241		bias = hfs_getencodingbias();
2242		error = sysctl_int(oldp, oldlenp, newp, newlen, &bias);
2243		if (error == 0 && newp)
2244			hfs_setencodingbias(bias);
2245		return (error);
2246
2247	} else if (name[0] == HFS_EXTEND_FS) {
2248        u_int64_t  newsize;
2249		vnode_t vp = vfs_context_cwd(context);
2250
2251		if (newp == USER_ADDR_NULL || vp == NULLVP)
2252			return (EINVAL);
2253		if ((error = hfs_getmountpoint(vp, &hfsmp)))
2254			return (error);
2255		error = sysctl_quad(oldp, oldlenp, newp, newlen, (quad_t *)&newsize);
2256		if (error)
2257			return (error);
2258
2259		error = hfs_extendfs(hfsmp, newsize, context);
2260		return (error);
2261
2262	} else if (name[0] == HFS_ENCODINGHINT) {
2263		size_t bufsize;
2264		size_t bytes;
2265		u_int32_t hint;
2266		u_int16_t *unicode_name;
2267		char *filename;
2268
2269		if ((newlen <= 0) || (newlen > MAXPATHLEN))
2270			return (EINVAL);
2271
2272		bufsize = MAX(newlen * 3, MAXPATHLEN);
2273		MALLOC(filename, char *, newlen, M_TEMP, M_WAITOK);
2274		MALLOC(unicode_name, u_int16_t *, bufsize, M_TEMP, M_WAITOK);
2275
2276		error = copyin(newp, (caddr_t)filename, newlen);
2277		if (error == 0) {
2278			error = utf8_decodestr((u_int8_t *)filename, newlen - 1, unicode_name,
2279			                       &bytes, bufsize, 0, UTF_DECOMPOSED);
2280			if (error == 0) {
2281				hint = hfs_pickencoding(unicode_name, bytes / 2);
2282				error = sysctl_int(oldp, oldlenp, USER_ADDR_NULL, 0, (int32_t *)&hint);
2283			}
2284		}
2285		FREE(unicode_name, M_TEMP);
2286		FREE(filename, M_TEMP);
2287		return (error);
2288
2289	} else if (name[0] == HFS_ENABLE_JOURNALING) {
2290		// make the file system journaled...
2291		vnode_t vp = vfs_context_cwd(context);
2292		vnode_t jvp;
2293		ExtendedVCB *vcb;
2294		struct cat_attr jnl_attr, jinfo_attr;
2295		struct cat_fork jnl_fork, jinfo_fork;
2296		void *jnl = NULL;
2297		int lockflags;
2298
2299		/* Only root can enable journaling */
2300		if (!is_suser()) {
2301			return (EPERM);
2302		}
2303		if (vp == NULLVP)
2304		        return EINVAL;
2305
2306		hfsmp = VTOHFS(vp);
2307		if (hfsmp->hfs_flags & HFS_READ_ONLY) {
2308			return EROFS;
2309		}
2310		if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSSigWord) {
2311			printf("hfs: can't make a plain hfs volume journaled.\n");
2312			return EINVAL;
2313		}
2314
2315		if (hfsmp->jnl) {
2316		    printf("hfs: volume @ mp %p is already journaled!\n", vnode_mount(vp));
2317		    return EAGAIN;
2318		}
2319
2320		vcb = HFSTOVCB(hfsmp);
2321		lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS, HFS_EXCLUSIVE_LOCK);
2322		if (BTHasContiguousNodes(VTOF(vcb->catalogRefNum)) == 0 ||
2323			BTHasContiguousNodes(VTOF(vcb->extentsRefNum)) == 0) {
2324
2325			printf("hfs: volume has a btree w/non-contiguous nodes.  can not enable journaling.\n");
2326			hfs_systemfile_unlock(hfsmp, lockflags);
2327			return EINVAL;
2328		}
2329		hfs_systemfile_unlock(hfsmp, lockflags);
2330
2331		// make sure these both exist!
2332		if (   GetFileInfo(vcb, kHFSRootFolderID, ".journal_info_block", &jinfo_attr, &jinfo_fork) == 0
2333			|| GetFileInfo(vcb, kHFSRootFolderID, ".journal", &jnl_attr, &jnl_fork) == 0) {
2334
2335			return EINVAL;
2336		}
2337
2338		hfs_sync(hfsmp->hfs_mp, MNT_WAIT, context);
2339
2340		printf("hfs: Initializing the journal (joffset 0x%llx sz 0x%llx)...\n",
2341			   (off_t)name[2], (off_t)name[3]);
2342
2343		jvp = hfsmp->hfs_devvp;
2344		jnl = journal_create(jvp,
2345							 (off_t)name[2] * (off_t)HFSTOVCB(hfsmp)->blockSize
2346							 + HFSTOVCB(hfsmp)->hfsPlusIOPosOffset,
2347							 (off_t)((unsigned)name[3]),
2348							 hfsmp->hfs_devvp,
2349							 hfsmp->hfs_logical_block_size,
2350							 0,
2351							 0,
2352							 hfs_sync_metadata, hfsmp->hfs_mp);
2353
2354		if (jnl == NULL) {
2355			printf("hfs: FAILED to create the journal!\n");
2356			if (jvp && jvp != hfsmp->hfs_devvp) {
2357				VNOP_CLOSE(jvp, hfsmp->hfs_flags & HFS_READ_ONLY ? FREAD : FREAD|FWRITE, context);
2358			}
2359			jvp = NULL;
2360
2361			return EINVAL;
2362		}
2363
2364		hfs_global_exclusive_lock_acquire(hfsmp);
2365
2366		/*
2367		 * Flush all dirty metadata buffers.
2368		 */
2369		buf_flushdirtyblks(hfsmp->hfs_devvp, MNT_WAIT, 0, "hfs_sysctl");
2370		buf_flushdirtyblks(hfsmp->hfs_extents_vp, MNT_WAIT, 0, "hfs_sysctl");
2371		buf_flushdirtyblks(hfsmp->hfs_catalog_vp, MNT_WAIT, 0, "hfs_sysctl");
2372		buf_flushdirtyblks(hfsmp->hfs_allocation_vp, MNT_WAIT, 0, "hfs_sysctl");
2373		if (hfsmp->hfs_attribute_vp)
2374			buf_flushdirtyblks(hfsmp->hfs_attribute_vp, MNT_WAIT, 0, "hfs_sysctl");
2375
2376		HFSTOVCB(hfsmp)->vcbJinfoBlock = name[1];
2377		HFSTOVCB(hfsmp)->vcbAtrb |= kHFSVolumeJournaledMask;
2378		hfsmp->jvp = jvp;
2379		hfsmp->jnl = jnl;
2380
2381		// save this off for the hack-y check in hfs_remove()
2382		hfsmp->jnl_start        = (u_int32_t)name[2];
2383		hfsmp->jnl_size         = (off_t)((unsigned)name[3]);
2384		hfsmp->hfs_jnlinfoblkid = jinfo_attr.ca_fileid;
2385		hfsmp->hfs_jnlfileid    = jnl_attr.ca_fileid;
2386
2387		vfs_setflags(hfsmp->hfs_mp, (u_int64_t)((unsigned int)MNT_JOURNALED));
2388
2389		hfs_global_exclusive_lock_release(hfsmp);
2390		hfs_flushvolumeheader(hfsmp, MNT_WAIT, 1);
2391
2392		return 0;
2393	} else if (name[0] == HFS_DISABLE_JOURNALING) {
2394		// clear the journaling bit
2395		vnode_t vp = vfs_context_cwd(context);
2396
2397		/* Only root can disable journaling */
2398		if (!is_suser()) {
2399			return (EPERM);
2400		}
2401		if (vp == NULLVP)
2402		        return EINVAL;
2403
2404		hfsmp = VTOHFS(vp);
2405
2406		/*
2407		 * Disabling journaling is disallowed on volumes with directory hard links
2408		 * because we have not tested the relevant code path.
2409		 */
2410		if (hfsmp->hfs_private_attr[DIR_HARDLINKS].ca_entries != 0){
2411			printf("hfs: cannot disable journaling on volumes with directory hardlinks\n");
2412			return EPERM;
2413		}
2414
2415		printf("hfs: disabling journaling for mount @ %p\n", vnode_mount(vp));
2416
2417		hfs_global_exclusive_lock_acquire(hfsmp);
2418
2419		// Lights out for you buddy!
2420		journal_close(hfsmp->jnl);
2421		hfsmp->jnl = NULL;
2422
2423		if (hfsmp->jvp && hfsmp->jvp != hfsmp->hfs_devvp) {
2424			VNOP_CLOSE(hfsmp->jvp, hfsmp->hfs_flags & HFS_READ_ONLY ? FREAD : FREAD|FWRITE, context);
2425		}
2426		hfsmp->jvp = NULL;
2427		vfs_clearflags(hfsmp->hfs_mp, (u_int64_t)((unsigned int)MNT_JOURNALED));
2428		hfsmp->jnl_start        = 0;
2429		hfsmp->hfs_jnlinfoblkid = 0;
2430		hfsmp->hfs_jnlfileid    = 0;
2431
2432		HFSTOVCB(hfsmp)->vcbAtrb &= ~kHFSVolumeJournaledMask;
2433
2434		hfs_global_exclusive_lock_release(hfsmp);
2435		hfs_flushvolumeheader(hfsmp, MNT_WAIT, 1);
2436
2437		return 0;
2438	} else if (name[0] == HFS_GET_JOURNAL_INFO) {
2439		vnode_t vp = vfs_context_cwd(context);
2440		off_t jnl_start, jnl_size;
2441
2442		if (vp == NULLVP)
2443		        return EINVAL;
2444
2445		hfsmp = VTOHFS(vp);
2446	    if (hfsmp->jnl == NULL) {
2447			jnl_start = 0;
2448			jnl_size  = 0;
2449	    } else {
2450			jnl_start = (off_t)(hfsmp->jnl_start * HFSTOVCB(hfsmp)->blockSize) + (off_t)HFSTOVCB(hfsmp)->hfsPlusIOPosOffset;
2451			jnl_size  = (off_t)hfsmp->jnl_size;
2452	    }
2453
2454	    if ((error = copyout((caddr_t)&jnl_start, CAST_USER_ADDR_T(name[1]), sizeof(off_t))) != 0) {
2455			return error;
2456		}
2457	    if ((error = copyout((caddr_t)&jnl_size, CAST_USER_ADDR_T(name[2]), sizeof(off_t))) != 0) {
2458			return error;
2459		}
2460
2461		return 0;
2462	} else if (name[0] == HFS_SET_PKG_EXTENSIONS) {
2463
2464	    return set_package_extensions_table((void *)name[1], name[2], name[3]);
2465
2466	} else if (name[0] == VFS_CTL_QUERY) {
2467    	struct sysctl_req *req;
2468    	struct vfsidctl vc;
2469	    struct user_vfsidctl user_vc;
2470    	struct mount *mp;
2471 	    struct vfsquery vq;
2472	    boolean_t is_64_bit;
2473
2474    	is_64_bit = proc_is64bit(p);
2475		req = CAST_DOWN(struct sysctl_req *, oldp);	/* we're new style vfs sysctl. */
2476
2477        if (is_64_bit) {
2478            error = SYSCTL_IN(req, &user_vc, sizeof(user_vc));
2479            if (error) return (error);
2480
2481            mp = vfs_getvfs(&user_vc.vc_fsid);
2482        }
2483        else {
2484            error = SYSCTL_IN(req, &vc, sizeof(vc));
2485            if (error) return (error);
2486
2487            mp = vfs_getvfs(&vc.vc_fsid);
2488        }
2489        if (mp == NULL) return (ENOENT);
2490
2491		hfsmp = VFSTOHFS(mp);
2492		bzero(&vq, sizeof(vq));
2493		vq.vq_flags = hfsmp->hfs_notification_conditions;
2494		return SYSCTL_OUT(req, &vq, sizeof(vq));;
2495	} else if (name[0] == HFS_REPLAY_JOURNAL) {
2496		char *devnode = NULL;
2497		size_t devnode_len;
2498
2499		devnode_len = *oldlenp;
2500		MALLOC(devnode, char *, devnode_len + 1, M_TEMP, M_WAITOK);
2501		if (devnode == NULL) {
2502			return ENOMEM;
2503		}
2504
2505		error = copyin(oldp, (caddr_t)devnode, devnode_len);
2506		if (error) {
2507			FREE(devnode, M_TEMP);
2508			return error;
2509		}
2510		devnode[devnode_len] = 0;
2511
2512		error = hfs_journal_replay(devnode, context);
2513		FREE(devnode, M_TEMP);
2514		return error;
2515	}
2516
2517	return (ENOTSUP);
2518}
2519
2520/* hfs_vfs_vget is not static since it is used in hfs_readwrite.c to support the
2521 * build_path ioctl.  We use it to leverage the code below that updates the origin
2522 * cache if necessary.
2523 */
2524int
2525hfs_vfs_vget(struct mount *mp, ino64_t ino, struct vnode **vpp, __unused vfs_context_t context)
2526{
2527	int error;
2528	int lockflags;
2529	struct hfsmount *hfsmp;
2530
2531	hfsmp = VFSTOHFS(mp);
2532
2533	error = hfs_vget(hfsmp, (cnid_t)ino, vpp, 1);
2534	if (error)
2535		return (error);
2536
2537	/*
2538	 * ADLs may need to have their origin state updated
2539	 * since build_path needs a valid parent. The same is true
2540	 * for hardlinked files as well. There isn't a race window here in re-acquiring
2541	 * the cnode lock since we aren't pulling any data out of the cnode; instead, we're
2542	 * going back to the catalog.
2543	 */
2544	if ((VTOC(*vpp)->c_flag & C_HARDLINK) &&
2545	    (hfs_lock(VTOC(*vpp), HFS_EXCLUSIVE_LOCK) == 0)) {
2546		cnode_t *cp = VTOC(*vpp);
2547		struct cat_desc cdesc;
2548
2549		if (!hfs_haslinkorigin(cp)) {
2550			lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
2551		    error = cat_findname(hfsmp, (cnid_t)ino, &cdesc);
2552			hfs_systemfile_unlock(hfsmp, lockflags);
2553			if (error == 0) {
2554				if ((cdesc.cd_parentcnid !=
2555			    	hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) &&
2556			   		(cdesc.cd_parentcnid !=
2557					hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid)) {
2558					hfs_savelinkorigin(cp, cdesc.cd_parentcnid);
2559				}
2560				cat_releasedesc(&cdesc);
2561			}
2562		}
2563		hfs_unlock(cp);
2564	}
2565	return (0);
2566}
2567
2568
2569/*
2570 * Look up an HFS object by ID.
2571 *
2572 * The object is returned with an iocount reference and the cnode locked.
2573 *
2574 * If the object is a file then it will represent the data fork.
2575 */
2576__private_extern__
2577int
2578hfs_vget(struct hfsmount *hfsmp, cnid_t cnid, struct vnode **vpp, int skiplock)
2579{
2580	struct vnode *vp = NULLVP;
2581	struct cat_desc cndesc;
2582	struct cat_attr cnattr;
2583	struct cat_fork cnfork;
2584	u_int32_t linkref = 0;
2585	int error;
2586
2587	/* Check for cnids that should't be exported. */
2588	if ((cnid < kHFSFirstUserCatalogNodeID) &&
2589	    (cnid != kHFSRootFolderID && cnid != kHFSRootParentID)) {
2590		return (ENOENT);
2591	}
2592	/* Don't export our private directories. */
2593	if (cnid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid ||
2594	    cnid == hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) {
2595		return (ENOENT);
2596	}
2597	/*
2598	 * Check the hash first
2599	 */
2600	vp = hfs_chash_getvnode(hfsmp->hfs_raw_dev, cnid, 0, skiplock);
2601	if (vp) {
2602		*vpp = vp;
2603		return(0);
2604	}
2605
2606	bzero(&cndesc, sizeof(cndesc));
2607	bzero(&cnattr, sizeof(cnattr));
2608	bzero(&cnfork, sizeof(cnfork));
2609
2610	/*
2611	 * Not in hash, lookup in catalog
2612	 */
2613	if (cnid == kHFSRootParentID) {
2614		static char hfs_rootname[] = "/";
2615
2616		cndesc.cd_nameptr = (const u_int8_t *)&hfs_rootname[0];
2617		cndesc.cd_namelen = 1;
2618		cndesc.cd_parentcnid = kHFSRootParentID;
2619		cndesc.cd_cnid = kHFSRootFolderID;
2620		cndesc.cd_flags = CD_ISDIR;
2621
2622		cnattr.ca_fileid = kHFSRootFolderID;
2623		cnattr.ca_linkcount = 1;
2624		cnattr.ca_entries = 1;
2625		cnattr.ca_dircount = 1;
2626		cnattr.ca_mode = (S_IFDIR | S_IRWXU | S_IRWXG | S_IRWXO);
2627	} else {
2628		int lockflags;
2629		cnid_t pid;
2630		const char *nameptr;
2631
2632		lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
2633		error = cat_idlookup(hfsmp, cnid, 0, &cndesc, &cnattr, &cnfork);
2634		hfs_systemfile_unlock(hfsmp, lockflags);
2635
2636		if (error) {
2637			*vpp = NULL;
2638			return (error);
2639		}
2640
2641		/*
2642		 * Check for a raw hardlink inode and save its linkref.
2643		 */
2644		pid = cndesc.cd_parentcnid;
2645		nameptr = (const char *)cndesc.cd_nameptr;
2646
2647		if ((pid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) &&
2648		    (bcmp(nameptr, HFS_INODE_PREFIX, HFS_INODE_PREFIX_LEN) == 0)) {
2649			linkref = strtoul(&nameptr[HFS_INODE_PREFIX_LEN], NULL, 10);
2650
2651		} else if ((pid == hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) &&
2652		           (bcmp(nameptr, HFS_DIRINODE_PREFIX, HFS_DIRINODE_PREFIX_LEN) == 0)) {
2653			linkref = strtoul(&nameptr[HFS_DIRINODE_PREFIX_LEN], NULL, 10);
2654
2655		} else if ((pid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) &&
2656		           (bcmp(nameptr, HFS_DELETE_PREFIX, HFS_DELETE_PREFIX_LEN) == 0)) {
2657			*vpp = NULL;
2658			cat_releasedesc(&cndesc);
2659			return (ENOENT);  /* open unlinked file */
2660		}
2661	}
2662
2663	/*
2664	 * Finish initializing cnode descriptor for hardlinks.
2665	 *
2666	 * We need a valid name and parent for reverse lookups.
2667	 */
2668	if (linkref) {
2669		cnid_t nextlinkid;
2670		cnid_t prevlinkid;
2671		struct cat_desc linkdesc;
2672		int lockflags;
2673
2674		cnattr.ca_linkref = linkref;
2675
2676		/*
2677		 * Pick up the first link in the chain and get a descriptor for it.
2678		 * This allows blind volfs paths to work for hardlinks.
2679		 */
2680		if ((hfs_lookuplink(hfsmp, linkref, &prevlinkid,  &nextlinkid) == 0) &&
2681		    (nextlinkid != 0)) {
2682			lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
2683			error = cat_findname(hfsmp, nextlinkid, &linkdesc);
2684			hfs_systemfile_unlock(hfsmp, lockflags);
2685			if (error == 0) {
2686				cat_releasedesc(&cndesc);
2687				bcopy(&linkdesc, &cndesc, sizeof(linkdesc));
2688			}
2689		}
2690	}
2691
2692	if (linkref) {
2693		error = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr, &cnfork, &vp);
2694		if (error == 0) {
2695			VTOC(vp)->c_flag |= C_HARDLINK;
2696			vnode_setmultipath(vp);
2697		}
2698	} else {
2699		struct componentname cn;
2700
2701		/* Supply hfs_getnewvnode with a component name. */
2702		MALLOC_ZONE(cn.cn_pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
2703		cn.cn_nameiop = LOOKUP;
2704		cn.cn_flags = ISLASTCN | HASBUF;
2705		cn.cn_context = NULL;
2706		cn.cn_pnlen = MAXPATHLEN;
2707		cn.cn_nameptr = cn.cn_pnbuf;
2708		cn.cn_namelen = cndesc.cd_namelen;
2709		cn.cn_hash = 0;
2710		cn.cn_consume = 0;
2711		bcopy(cndesc.cd_nameptr, cn.cn_nameptr, cndesc.cd_namelen + 1);
2712
2713		error = hfs_getnewvnode(hfsmp, NULLVP, &cn, &cndesc, 0, &cnattr, &cnfork, &vp);
2714
2715		if ((error == 0) && (VTOC(vp)->c_flag & C_HARDLINK)) {
2716			hfs_savelinkorigin(VTOC(vp), cndesc.cd_parentcnid);
2717		}
2718		FREE_ZONE(cn.cn_pnbuf, cn.cn_pnlen, M_NAMEI);
2719	}
2720	cat_releasedesc(&cndesc);
2721
2722	*vpp = vp;
2723	if (vp && skiplock) {
2724		hfs_unlock(VTOC(vp));
2725	}
2726	return (error);
2727}
2728
2729
2730/*
2731 * Flush out all the files in a filesystem.
2732 */
2733static int
2734#if QUOTA
2735hfs_flushfiles(struct mount *mp, int flags, struct proc *p)
2736#else
2737hfs_flushfiles(struct mount *mp, int flags, __unused struct proc *p)
2738#endif /* QUOTA */
2739{
2740	struct hfsmount *hfsmp;
2741	struct vnode *skipvp = NULLVP;
2742	int error;
2743#if QUOTA
2744	int quotafilecnt;
2745	int i;
2746#endif
2747
2748	hfsmp = VFSTOHFS(mp);
2749
2750#if QUOTA
2751	/*
2752	 * The open quota files have an indirect reference on
2753	 * the root directory vnode.  We must account for this
2754	 * extra reference when doing the intial vflush.
2755	 */
2756	quotafilecnt = 0;
2757	if (((unsigned int)vfs_flags(mp)) & MNT_QUOTA) {
2758
2759		/* Find out how many quota files we have open. */
2760		for (i = 0; i < MAXQUOTAS; i++) {
2761			if (hfsmp->hfs_qfiles[i].qf_vp != NULLVP)
2762				++quotafilecnt;
2763		}
2764
2765		/* Obtain the root vnode so we can skip over it. */
2766		skipvp = hfs_chash_getvnode(hfsmp->hfs_raw_dev, kHFSRootFolderID, 0, 0);
2767	}
2768#endif /* QUOTA */
2769
2770	error = vflush(mp, skipvp, SKIPSYSTEM | SKIPSWAP | flags);
2771	if (error != 0)
2772		return(error);
2773
2774	error = vflush(mp, skipvp, SKIPSYSTEM | flags);
2775
2776#if QUOTA
2777	if (((unsigned int)vfs_flags(mp)) & MNT_QUOTA) {
2778		if (skipvp) {
2779			/*
2780			 * See if there are additional references on the
2781			 * root vp besides the ones obtained from the open
2782			 * quota files and the hfs_chash_getvnode call above.
2783			 */
2784			if ((error == 0) &&
2785			    (vnode_isinuse(skipvp,  quotafilecnt))) {
2786				error = EBUSY;  /* root directory is still open */
2787			}
2788			hfs_unlock(VTOC(skipvp));
2789			vnode_put(skipvp);
2790		}
2791		if (error && (flags & FORCECLOSE) == 0)
2792			return (error);
2793
2794		for (i = 0; i < MAXQUOTAS; i++) {
2795			if (hfsmp->hfs_qfiles[i].qf_vp == NULLVP)
2796				continue;
2797			hfs_quotaoff(p, mp, i);
2798		}
2799		error = vflush(mp, NULLVP, SKIPSYSTEM | flags);
2800	}
2801#endif /* QUOTA */
2802
2803	return (error);
2804}
2805
2806/*
2807 * Update volume encoding bitmap (HFS Plus only)
2808 */
2809__private_extern__
2810void
2811hfs_setencodingbits(struct hfsmount *hfsmp, u_int32_t encoding)
2812{
2813#define  kIndexMacUkrainian	48  /* MacUkrainian encoding is 152 */
2814#define  kIndexMacFarsi		49  /* MacFarsi encoding is 140 */
2815
2816	u_int32_t	index;
2817
2818	switch (encoding) {
2819	case kTextEncodingMacUkrainian:
2820		index = kIndexMacUkrainian;
2821		break;
2822	case kTextEncodingMacFarsi:
2823		index = kIndexMacFarsi;
2824		break;
2825	default:
2826		index = encoding;
2827		break;
2828	}
2829
2830	if (index < 64 && (hfsmp->encodingsBitmap & (u_int64_t)(1ULL << index)) == 0) {
2831		HFS_MOUNT_LOCK(hfsmp, TRUE)
2832		hfsmp->encodingsBitmap |= (u_int64_t)(1ULL << index);
2833		MarkVCBDirty(hfsmp);
2834		HFS_MOUNT_UNLOCK(hfsmp, TRUE);
2835	}
2836}
2837
2838/*
2839 * Update volume stats
2840 *
2841 * On journal volumes this will cause a volume header flush
2842 */
2843__private_extern__
2844int
2845hfs_volupdate(struct hfsmount *hfsmp, enum volop op, int inroot)
2846{
2847	struct timeval tv;
2848
2849	microtime(&tv);
2850
2851	lck_mtx_lock(&hfsmp->hfs_mutex);
2852
2853	MarkVCBDirty(hfsmp);
2854	hfsmp->hfs_mtime = tv.tv_sec;
2855
2856	switch (op) {
2857	case VOL_UPDATE:
2858		break;
2859	case VOL_MKDIR:
2860		if (hfsmp->hfs_dircount != 0xFFFFFFFF)
2861			++hfsmp->hfs_dircount;
2862		if (inroot && hfsmp->vcbNmRtDirs != 0xFFFF)
2863			++hfsmp->vcbNmRtDirs;
2864		break;
2865	case VOL_RMDIR:
2866		if (hfsmp->hfs_dircount != 0)
2867			--hfsmp->hfs_dircount;
2868		if (inroot && hfsmp->vcbNmRtDirs != 0xFFFF)
2869			--hfsmp->vcbNmRtDirs;
2870		break;
2871	case VOL_MKFILE:
2872		if (hfsmp->hfs_filecount != 0xFFFFFFFF)
2873			++hfsmp->hfs_filecount;
2874		if (inroot && hfsmp->vcbNmFls != 0xFFFF)
2875			++hfsmp->vcbNmFls;
2876		break;
2877	case VOL_RMFILE:
2878		if (hfsmp->hfs_filecount != 0)
2879			--hfsmp->hfs_filecount;
2880		if (inroot && hfsmp->vcbNmFls != 0xFFFF)
2881			--hfsmp->vcbNmFls;
2882		break;
2883	}
2884
2885	lck_mtx_unlock(&hfsmp->hfs_mutex);
2886
2887	if (hfsmp->jnl) {
2888		hfs_flushvolumeheader(hfsmp, 0, 0);
2889	}
2890
2891	return (0);
2892}
2893
2894
2895static int
2896hfs_flushMDB(struct hfsmount *hfsmp, int waitfor, int altflush)
2897{
2898	ExtendedVCB *vcb = HFSTOVCB(hfsmp);
2899	struct filefork *fp;
2900	HFSMasterDirectoryBlock	*mdb;
2901	struct buf *bp = NULL;
2902	int retval;
2903	int sectorsize;
2904	ByteCount namelen;
2905
2906	sectorsize = hfsmp->hfs_logical_block_size;
2907	retval = (int)buf_bread(hfsmp->hfs_devvp, (daddr64_t)HFS_PRI_SECTOR(sectorsize), sectorsize, NOCRED, &bp);
2908	if (retval) {
2909		if (bp)
2910			buf_brelse(bp);
2911		return retval;
2912	}
2913
2914	lck_mtx_lock(&hfsmp->hfs_mutex);
2915
2916	mdb = (HFSMasterDirectoryBlock *)(buf_dataptr(bp) + HFS_PRI_OFFSET(sectorsize));
2917
2918	mdb->drCrDate	= SWAP_BE32 (UTCToLocal(to_hfs_time(vcb->vcbCrDate)));
2919	mdb->drLsMod	= SWAP_BE32 (UTCToLocal(to_hfs_time(vcb->vcbLsMod)));
2920	mdb->drAtrb	= SWAP_BE16 (vcb->vcbAtrb);
2921	mdb->drNmFls	= SWAP_BE16 (vcb->vcbNmFls);
2922	mdb->drAllocPtr	= SWAP_BE16 (vcb->nextAllocation);
2923	mdb->drClpSiz	= SWAP_BE32 (vcb->vcbClpSiz);
2924	mdb->drNxtCNID	= SWAP_BE32 (vcb->vcbNxtCNID);
2925	mdb->drFreeBks	= SWAP_BE16 (vcb->freeBlocks);
2926
2927	namelen = strlen((char *)vcb->vcbVN);
2928	retval = utf8_to_hfs(vcb, namelen, vcb->vcbVN, mdb->drVN);
2929	/* Retry with MacRoman in case that's how it was exported. */
2930	if (retval)
2931		retval = utf8_to_mac_roman(namelen, vcb->vcbVN, mdb->drVN);
2932
2933	mdb->drVolBkUp	= SWAP_BE32 (UTCToLocal(to_hfs_time(vcb->vcbVolBkUp)));
2934	mdb->drWrCnt	= SWAP_BE32 (vcb->vcbWrCnt);
2935	mdb->drNmRtDirs	= SWAP_BE16 (vcb->vcbNmRtDirs);
2936	mdb->drFilCnt	= SWAP_BE32 (vcb->vcbFilCnt);
2937	mdb->drDirCnt	= SWAP_BE32 (vcb->vcbDirCnt);
2938
2939	bcopy(vcb->vcbFndrInfo, mdb->drFndrInfo, sizeof(mdb->drFndrInfo));
2940
2941	fp = VTOF(vcb->extentsRefNum);
2942	mdb->drXTExtRec[0].startBlock = SWAP_BE16 (fp->ff_extents[0].startBlock);
2943	mdb->drXTExtRec[0].blockCount = SWAP_BE16 (fp->ff_extents[0].blockCount);
2944	mdb->drXTExtRec[1].startBlock = SWAP_BE16 (fp->ff_extents[1].startBlock);
2945	mdb->drXTExtRec[1].blockCount = SWAP_BE16 (fp->ff_extents[1].blockCount);
2946	mdb->drXTExtRec[2].startBlock = SWAP_BE16 (fp->ff_extents[2].startBlock);
2947	mdb->drXTExtRec[2].blockCount = SWAP_BE16 (fp->ff_extents[2].blockCount);
2948	mdb->drXTFlSize	= SWAP_BE32 (fp->ff_blocks * vcb->blockSize);
2949	mdb->drXTClpSiz	= SWAP_BE32 (fp->ff_clumpsize);
2950	FTOC(fp)->c_flag &= ~C_MODIFIED;
2951
2952	fp = VTOF(vcb->catalogRefNum);
2953	mdb->drCTExtRec[0].startBlock = SWAP_BE16 (fp->ff_extents[0].startBlock);
2954	mdb->drCTExtRec[0].blockCount = SWAP_BE16 (fp->ff_extents[0].blockCount);
2955	mdb->drCTExtRec[1].startBlock = SWAP_BE16 (fp->ff_extents[1].startBlock);
2956	mdb->drCTExtRec[1].blockCount = SWAP_BE16 (fp->ff_extents[1].blockCount);
2957	mdb->drCTExtRec[2].startBlock = SWAP_BE16 (fp->ff_extents[2].startBlock);
2958	mdb->drCTExtRec[2].blockCount = SWAP_BE16 (fp->ff_extents[2].blockCount);
2959	mdb->drCTFlSize	= SWAP_BE32 (fp->ff_blocks * vcb->blockSize);
2960	mdb->drCTClpSiz	= SWAP_BE32 (fp->ff_clumpsize);
2961	FTOC(fp)->c_flag &= ~C_MODIFIED;
2962
2963	MarkVCBClean( vcb );
2964
2965	lck_mtx_unlock(&hfsmp->hfs_mutex);
2966
2967	/* If requested, flush out the alternate MDB */
2968	if (altflush) {
2969		struct buf *alt_bp = NULL;
2970
2971		if (buf_meta_bread(hfsmp->hfs_devvp, hfsmp->hfs_alt_id_sector, sectorsize, NOCRED, &alt_bp) == 0) {
2972			bcopy(mdb, (char *)buf_dataptr(alt_bp) + HFS_ALT_OFFSET(sectorsize), kMDBSize);
2973
2974			(void) VNOP_BWRITE(alt_bp);
2975		} else if (alt_bp)
2976			buf_brelse(alt_bp);
2977	}
2978
2979	if (waitfor != MNT_WAIT)
2980		buf_bawrite(bp);
2981	else
2982		retval = VNOP_BWRITE(bp);
2983
2984	return (retval);
2985}
2986
2987/*
2988 *  Flush any dirty in-memory mount data to the on-disk
2989 *  volume header.
2990 *
2991 *  Note: the on-disk volume signature is intentionally
2992 *  not flushed since the on-disk "H+" and "HX" signatures
2993 *  are always stored in-memory as "H+".
2994 */
2995__private_extern__
2996int
2997hfs_flushvolumeheader(struct hfsmount *hfsmp, int waitfor, int altflush)
2998{
2999	ExtendedVCB *vcb = HFSTOVCB(hfsmp);
3000	struct filefork *fp;
3001	HFSPlusVolumeHeader *volumeHeader;
3002	int retval;
3003	struct buf *bp;
3004	int i;
3005	daddr64_t priIDSector;
3006	int critical;
3007	u_int16_t  signature;
3008	u_int16_t  hfsversion;
3009
3010	if (hfsmp->hfs_flags & HFS_READ_ONLY) {
3011		return(0);
3012	}
3013	if (hfsmp->hfs_flags & HFS_STANDARD) {
3014		return hfs_flushMDB(hfsmp, waitfor, altflush);
3015	}
3016	critical = altflush;
3017	priIDSector = (daddr64_t)((vcb->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) +
3018				  HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size));
3019
3020	if (hfs_start_transaction(hfsmp) != 0) {
3021	    return EINVAL;
3022	}
3023
3024	retval = (int)buf_meta_bread(hfsmp->hfs_devvp,
3025			HFS_PHYSBLK_ROUNDDOWN(priIDSector, hfsmp->hfs_log_per_phys),
3026			hfsmp->hfs_physical_block_size, NOCRED, &bp);
3027	if (retval) {
3028		if (bp)
3029			buf_brelse(bp);
3030
3031		hfs_end_transaction(hfsmp);
3032
3033		printf("HFS: err %d reading VH blk (%s)\n", retval, vcb->vcbVN);
3034		return (retval);
3035	}
3036
3037	if (hfsmp->jnl) {
3038		journal_modify_block_start(hfsmp->jnl, bp);
3039	}
3040
3041	volumeHeader = (HFSPlusVolumeHeader *)((char *)buf_dataptr(bp) +
3042			HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size));
3043
3044	/*
3045	 * Sanity check what we just read.
3046	 */
3047	signature = SWAP_BE16 (volumeHeader->signature);
3048	hfsversion   = SWAP_BE16 (volumeHeader->version);
3049	if ((signature != kHFSPlusSigWord && signature != kHFSXSigWord) ||
3050	    (hfsversion < kHFSPlusVersion) || (hfsversion > 100) ||
3051	    (SWAP_BE32 (volumeHeader->blockSize) != vcb->blockSize)) {
3052#if 1
3053		panic("HFS: corrupt VH on %s, sig 0x%04x, ver %d, blksize %d",
3054		      vcb->vcbVN, signature, hfsversion,
3055		      SWAP_BE32 (volumeHeader->blockSize));
3056#endif
3057		printf("HFS: corrupt VH blk (%s)\n", vcb->vcbVN);
3058		buf_brelse(bp);
3059		return (EIO);
3060	}
3061
3062	/*
3063	 * For embedded HFS+ volumes, update create date if it changed
3064	 * (ie from a setattrlist call)
3065	 */
3066	if ((vcb->hfsPlusIOPosOffset != 0) &&
3067	    (SWAP_BE32 (volumeHeader->createDate) != vcb->localCreateDate)) {
3068		struct buf *bp2;
3069		HFSMasterDirectoryBlock	*mdb;
3070
3071		retval = (int)buf_meta_bread(hfsmp->hfs_devvp,
3072				HFS_PHYSBLK_ROUNDDOWN(HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size), hfsmp->hfs_log_per_phys),
3073				hfsmp->hfs_physical_block_size, NOCRED, &bp2);
3074		if (retval) {
3075			if (bp2)
3076				buf_brelse(bp2);
3077			retval = 0;
3078		} else {
3079			mdb = (HFSMasterDirectoryBlock *)(buf_dataptr(bp2) +
3080				HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size));
3081
3082			if ( SWAP_BE32 (mdb->drCrDate) != vcb->localCreateDate )
3083			  {
3084				if (hfsmp->jnl) {
3085				    journal_modify_block_start(hfsmp->jnl, bp2);
3086				}
3087
3088				mdb->drCrDate = SWAP_BE32 (vcb->localCreateDate);	/* pick up the new create date */
3089
3090				if (hfsmp->jnl) {
3091					journal_modify_block_end(hfsmp->jnl, bp2, NULL, NULL);
3092				} else {
3093					(void) VNOP_BWRITE(bp2);		/* write out the changes */
3094				}
3095			  }
3096			else
3097			  {
3098				buf_brelse(bp2);						/* just release it */
3099			  }
3100		  }
3101	}
3102
3103	lck_mtx_lock(&hfsmp->hfs_mutex);
3104
3105	/* Note: only update the lower 16 bits worth of attributes */
3106	volumeHeader->attributes       = SWAP_BE32 (vcb->vcbAtrb);
3107	volumeHeader->journalInfoBlock = SWAP_BE32 (vcb->vcbJinfoBlock);
3108	if (hfsmp->jnl) {
3109		volumeHeader->lastMountedVersion = SWAP_BE32 (kHFSJMountVersion);
3110	} else {
3111		volumeHeader->lastMountedVersion = SWAP_BE32 (kHFSPlusMountVersion);
3112	}
3113	volumeHeader->createDate	= SWAP_BE32 (vcb->localCreateDate);  /* volume create date is in local time */
3114	volumeHeader->modifyDate	= SWAP_BE32 (to_hfs_time(vcb->vcbLsMod));
3115	volumeHeader->backupDate	= SWAP_BE32 (to_hfs_time(vcb->vcbVolBkUp));
3116	volumeHeader->fileCount		= SWAP_BE32 (vcb->vcbFilCnt);
3117	volumeHeader->folderCount	= SWAP_BE32 (vcb->vcbDirCnt);
3118	volumeHeader->totalBlocks	= SWAP_BE32 (vcb->totalBlocks);
3119	volumeHeader->freeBlocks	= SWAP_BE32 (vcb->freeBlocks);
3120	volumeHeader->nextAllocation	= SWAP_BE32 (vcb->nextAllocation);
3121	volumeHeader->rsrcClumpSize	= SWAP_BE32 (vcb->vcbClpSiz);
3122	volumeHeader->dataClumpSize	= SWAP_BE32 (vcb->vcbClpSiz);
3123	volumeHeader->nextCatalogID	= SWAP_BE32 (vcb->vcbNxtCNID);
3124	volumeHeader->writeCount	= SWAP_BE32 (vcb->vcbWrCnt);
3125	volumeHeader->encodingsBitmap	= SWAP_BE64 (vcb->encodingsBitmap);
3126
3127	if (bcmp(vcb->vcbFndrInfo, volumeHeader->finderInfo, sizeof(volumeHeader->finderInfo)) != 0) {
3128		bcopy(vcb->vcbFndrInfo, volumeHeader->finderInfo, sizeof(volumeHeader->finderInfo));
3129		critical = 1;
3130	}
3131
3132	/*
3133	 * System files are only dirty when altflush is set.
3134	 */
3135	if (altflush == 0) {
3136		goto done;
3137	}
3138
3139	/* Sync Extents over-flow file meta data */
3140	fp = VTOF(vcb->extentsRefNum);
3141	if (FTOC(fp)->c_flag & C_MODIFIED) {
3142		for (i = 0; i < kHFSPlusExtentDensity; i++) {
3143			volumeHeader->extentsFile.extents[i].startBlock	=
3144				SWAP_BE32 (fp->ff_extents[i].startBlock);
3145			volumeHeader->extentsFile.extents[i].blockCount	=
3146				SWAP_BE32 (fp->ff_extents[i].blockCount);
3147		}
3148		volumeHeader->extentsFile.logicalSize = SWAP_BE64 (fp->ff_size);
3149		volumeHeader->extentsFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
3150		volumeHeader->extentsFile.clumpSize   = SWAP_BE32 (fp->ff_clumpsize);
3151		FTOC(fp)->c_flag &= ~C_MODIFIED;
3152	}
3153
3154	/* Sync Catalog file meta data */
3155	fp = VTOF(vcb->catalogRefNum);
3156	if (FTOC(fp)->c_flag & C_MODIFIED) {
3157		for (i = 0; i < kHFSPlusExtentDensity; i++) {
3158			volumeHeader->catalogFile.extents[i].startBlock	=
3159				SWAP_BE32 (fp->ff_extents[i].startBlock);
3160			volumeHeader->catalogFile.extents[i].blockCount	=
3161				SWAP_BE32 (fp->ff_extents[i].blockCount);
3162		}
3163		volumeHeader->catalogFile.logicalSize = SWAP_BE64 (fp->ff_size);
3164		volumeHeader->catalogFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
3165		volumeHeader->catalogFile.clumpSize   = SWAP_BE32 (fp->ff_clumpsize);
3166		FTOC(fp)->c_flag &= ~C_MODIFIED;
3167	}
3168
3169	/* Sync Allocation file meta data */
3170	fp = VTOF(vcb->allocationsRefNum);
3171	if (FTOC(fp)->c_flag & C_MODIFIED) {
3172		for (i = 0; i < kHFSPlusExtentDensity; i++) {
3173			volumeHeader->allocationFile.extents[i].startBlock =
3174				SWAP_BE32 (fp->ff_extents[i].startBlock);
3175			volumeHeader->allocationFile.extents[i].blockCount =
3176				SWAP_BE32 (fp->ff_extents[i].blockCount);
3177		}
3178		volumeHeader->allocationFile.logicalSize = SWAP_BE64 (fp->ff_size);
3179		volumeHeader->allocationFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
3180		volumeHeader->allocationFile.clumpSize   = SWAP_BE32 (fp->ff_clumpsize);
3181		FTOC(fp)->c_flag &= ~C_MODIFIED;
3182	}
3183
3184	/* Sync Attribute file meta data */
3185	if (hfsmp->hfs_attribute_vp) {
3186		fp = VTOF(hfsmp->hfs_attribute_vp);
3187		for (i = 0; i < kHFSPlusExtentDensity; i++) {
3188			volumeHeader->attributesFile.extents[i].startBlock =
3189				SWAP_BE32 (fp->ff_extents[i].startBlock);
3190			volumeHeader->attributesFile.extents[i].blockCount =
3191				SWAP_BE32 (fp->ff_extents[i].blockCount);
3192		}
3193		FTOC(fp)->c_flag &= ~C_MODIFIED;
3194		volumeHeader->attributesFile.logicalSize = SWAP_BE64 (fp->ff_size);
3195		volumeHeader->attributesFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
3196		volumeHeader->attributesFile.clumpSize   = SWAP_BE32 (fp->ff_clumpsize);
3197	}
3198
3199	/* Sync Startup file meta data */
3200	if (hfsmp->hfs_startup_vp) {
3201		fp = VTOF(hfsmp->hfs_startup_vp);
3202		if (FTOC(fp)->c_flag & C_MODIFIED) {
3203			for (i = 0; i < kHFSPlusExtentDensity; i++) {
3204				volumeHeader->startupFile.extents[i].startBlock =
3205					SWAP_BE32 (fp->ff_extents[i].startBlock);
3206				volumeHeader->startupFile.extents[i].blockCount =
3207					SWAP_BE32 (fp->ff_extents[i].blockCount);
3208			}
3209			volumeHeader->startupFile.logicalSize = SWAP_BE64 (fp->ff_size);
3210			volumeHeader->startupFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
3211			volumeHeader->startupFile.clumpSize   = SWAP_BE32 (fp->ff_clumpsize);
3212			FTOC(fp)->c_flag &= ~C_MODIFIED;
3213		}
3214	}
3215
3216done:
3217	MarkVCBClean(hfsmp);
3218	lck_mtx_unlock(&hfsmp->hfs_mutex);
3219
3220	/* If requested, flush out the alternate volume header */
3221	if (altflush && hfsmp->hfs_alt_id_sector) {
3222		struct buf *alt_bp = NULL;
3223
3224		if (buf_meta_bread(hfsmp->hfs_devvp,
3225				HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_alt_id_sector, hfsmp->hfs_log_per_phys),
3226				hfsmp->hfs_physical_block_size, NOCRED, &alt_bp) == 0) {
3227			if (hfsmp->jnl) {
3228				journal_modify_block_start(hfsmp->jnl, alt_bp);
3229			}
3230
3231			bcopy(volumeHeader, (char *)buf_dataptr(alt_bp) +
3232					HFS_ALT_OFFSET(hfsmp->hfs_physical_block_size),
3233					kMDBSize);
3234
3235			if (hfsmp->jnl) {
3236				journal_modify_block_end(hfsmp->jnl, alt_bp, NULL, NULL);
3237			} else {
3238				(void) VNOP_BWRITE(alt_bp);
3239			}
3240		} else if (alt_bp)
3241			buf_brelse(alt_bp);
3242	}
3243
3244	if (hfsmp->jnl) {
3245		journal_modify_block_end(hfsmp->jnl, bp, NULL, NULL);
3246	} else {
3247		if (waitfor != MNT_WAIT)
3248			buf_bawrite(bp);
3249		else {
3250		    retval = VNOP_BWRITE(bp);
3251		    /* When critical data changes, flush the device cache */
3252		    if (critical && (retval == 0)) {
3253			(void) VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE,
3254					 NULL, FWRITE, NULL);
3255		    }
3256		}
3257	}
3258	hfs_end_transaction(hfsmp);
3259
3260	return (retval);
3261}
3262
3263
3264/*
3265 * Extend a file system.
3266 */
3267__private_extern__
3268int
3269hfs_extendfs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context)
3270{
3271	struct proc *p = vfs_context_proc(context);
3272	kauth_cred_t cred = vfs_context_ucred(context);
3273	struct  vnode *vp;
3274	struct  vnode *devvp;
3275	struct  buf *bp;
3276	struct  filefork *fp = NULL;
3277	ExtendedVCB  *vcb;
3278	struct  cat_fork forkdata;
3279	u_int64_t  oldsize;
3280	u_int64_t  newblkcnt;
3281	u_int64_t  prev_phys_block_count;
3282	u_int32_t  addblks;
3283	u_int64_t  sectorcnt;
3284	u_int32_t  sectorsize;
3285	u_int32_t  phys_sectorsize;
3286	daddr64_t  prev_alt_sector;
3287	daddr_t	   bitmapblks;
3288	int  lockflags;
3289	int  error;
3290	int64_t oldBitmapSize;
3291	Boolean  usedExtendFileC = false;
3292
3293	devvp = hfsmp->hfs_devvp;
3294	vcb = HFSTOVCB(hfsmp);
3295
3296	/*
3297	 * - HFS Plus file systems only.
3298	 * - Journaling must be enabled.
3299	 * - No embedded volumes.
3300	 */
3301	if ((vcb->vcbSigWord == kHFSSigWord) ||
3302	     (hfsmp->jnl == NULL) ||
3303	     (vcb->hfsPlusIOPosOffset != 0)) {
3304		return (EPERM);
3305	}
3306	/*
3307	 * If extending file system by non-root, then verify
3308	 * ownership and check permissions.
3309	 */
3310	if (suser(cred, NULL)) {
3311		error = hfs_vget(hfsmp, kHFSRootFolderID, &vp, 0);
3312
3313		if (error)
3314			return (error);
3315		error = hfs_owner_rights(hfsmp, VTOC(vp)->c_uid, cred, p, 0);
3316		if (error == 0) {
3317			error = hfs_write_access(vp, cred, p, false);
3318		}
3319		hfs_unlock(VTOC(vp));
3320		vnode_put(vp);
3321		if (error)
3322			return (error);
3323
3324		error = vnode_authorize(devvp, NULL, KAUTH_VNODE_READ_DATA | KAUTH_VNODE_WRITE_DATA, context);
3325		if (error)
3326			return (error);
3327	}
3328	if (VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE, (caddr_t)&sectorsize, 0, context)) {
3329		return (ENXIO);
3330	}
3331	if (sectorsize != hfsmp->hfs_logical_block_size) {
3332		return (ENXIO);
3333	}
3334	if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&sectorcnt, 0, context)) {
3335		return (ENXIO);
3336	}
3337	if ((sectorsize * sectorcnt) < newsize) {
3338		printf("hfs_extendfs: not enough space on device\n");
3339		return (ENOSPC);
3340	}
3341	error = VNOP_IOCTL(devvp, DKIOCGETPHYSICALBLOCKSIZE, (caddr_t)&phys_sectorsize, 0, context);
3342	if (error) {
3343		if ((error != ENOTSUP) && (error != ENOTTY)) {
3344			return (ENXIO);
3345		}
3346		/* If ioctl is not supported, force physical and logical sector size to be same */
3347		phys_sectorsize = sectorsize;
3348	}
3349	oldsize = (u_int64_t)hfsmp->totalBlocks * (u_int64_t)hfsmp->blockSize;
3350
3351	/*
3352	 * Validate new size.
3353	 */
3354	if ((newsize <= oldsize) || (newsize % sectorsize) || (newsize % phys_sectorsize)) {
3355		printf("hfs_extendfs: invalid size\n");
3356		return (EINVAL);
3357	}
3358	newblkcnt = newsize / vcb->blockSize;
3359	if (newblkcnt > (u_int64_t)0xFFFFFFFF)
3360		return (EOVERFLOW);
3361
3362	addblks = newblkcnt - vcb->totalBlocks;
3363
3364	printf("hfs_extendfs: growing %s by %d blocks\n", vcb->vcbVN, addblks);
3365	/*
3366	 * Enclose changes inside a transaction.
3367	 */
3368	if (hfs_start_transaction(hfsmp) != 0) {
3369		return (EINVAL);
3370	}
3371
3372	/*
3373	 * Note: we take the attributes lock in case we have an attribute data vnode
3374	 * which needs to change size.
3375	 */
3376	lockflags = hfs_systemfile_lock(hfsmp, SFL_ATTRIBUTE | SFL_EXTENTS | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
3377	vp = vcb->allocationsRefNum;
3378	fp = VTOF(vp);
3379	bcopy(&fp->ff_data, &forkdata, sizeof(forkdata));
3380
3381	/*
3382	 * Calculate additional space required (if any) by allocation bitmap.
3383	 */
3384	oldBitmapSize = fp->ff_size;
3385	bitmapblks = roundup((newblkcnt+7) / 8, vcb->vcbVBMIOSize) / vcb->blockSize;
3386	if (bitmapblks > (daddr_t)fp->ff_blocks)
3387		bitmapblks -= fp->ff_blocks;
3388	else
3389		bitmapblks = 0;
3390
3391	if (bitmapblks > 0) {
3392		daddr64_t blkno;
3393		daddr_t blkcnt;
3394		off_t bytesAdded;
3395
3396		/*
3397		 * Get the bitmap's current size (in allocation blocks) so we know
3398		 * where to start zero filling once the new space is added.  We've
3399		 * got to do this before the bitmap is grown.
3400		 */
3401		blkno  = (daddr64_t)fp->ff_blocks;
3402
3403		/*
3404		 * Try to grow the allocation file in the normal way, using allocation
3405		 * blocks already existing in the file system.  This way, we might be
3406		 * able to grow the bitmap contiguously, or at least in the metadata
3407		 * zone.
3408		 */
3409		error = ExtendFileC(vcb, fp, bitmapblks * vcb->blockSize, 0,
3410				kEFAllMask | kEFNoClumpMask | kEFReserveMask | kEFMetadataMask,
3411				&bytesAdded);
3412
3413		if (error == 0) {
3414			usedExtendFileC = true;
3415		} else {
3416			/*
3417			 * If the above allocation failed, fall back to allocating the new
3418			 * extent of the bitmap from the space we're going to add.  Since those
3419			 * blocks don't yet belong to the file system, we have to update the
3420			 * extent list directly, and manually adjust the file size.
3421			 */
3422			bytesAdded = 0;
3423			error = AddFileExtent(vcb, fp, vcb->totalBlocks, bitmapblks);
3424			if (error) {
3425				printf("hfs_extendfs: error %d adding extents\n", error);
3426				goto out;
3427			}
3428			fp->ff_blocks += bitmapblks;
3429			VTOC(vp)->c_blocks = fp->ff_blocks;
3430			VTOC(vp)->c_flag |= C_MODIFIED;
3431		}
3432
3433		/*
3434		 * Update the allocation file's size to include the newly allocated
3435		 * blocks.  Note that ExtendFileC doesn't do this, which is why this
3436		 * statement is outside the above "if" statement.
3437		 */
3438		fp->ff_size += (u_int64_t)bitmapblks * (u_int64_t)vcb->blockSize;
3439
3440		/*
3441		 * Zero out the new bitmap blocks.
3442		 */
3443		{
3444
3445			bp = NULL;
3446			blkcnt = bitmapblks;
3447			while (blkcnt > 0) {
3448				error = (int)buf_meta_bread(vp, blkno, vcb->blockSize, NOCRED, &bp);
3449				if (error) {
3450					if (bp) {
3451						buf_brelse(bp);
3452					}
3453					break;
3454				}
3455				bzero((char *)buf_dataptr(bp), vcb->blockSize);
3456				buf_markaged(bp);
3457				error = (int)buf_bwrite(bp);
3458				if (error)
3459					break;
3460				--blkcnt;
3461				++blkno;
3462			}
3463		}
3464		if (error) {
3465			printf("hfs_extendfs: error %d  clearing blocks\n", error);
3466			goto out;
3467		}
3468		/*
3469		 * Mark the new bitmap space as allocated.
3470		 *
3471		 * Note that ExtendFileC will have marked any blocks it allocated, so
3472		 * this is only needed if we used AddFileExtent.  Also note that this
3473		 * has to come *after* the zero filling of new blocks in the case where
3474		 * we used AddFileExtent (since the part of the bitmap we're touching
3475		 * is in those newly allocated blocks).
3476		 */
3477		if (!usedExtendFileC) {
3478			error = BlockMarkAllocated(vcb, vcb->totalBlocks, bitmapblks);
3479			if (error) {
3480				printf("hfs_extendfs: error %d setting bitmap\n", error);
3481				goto out;
3482			}
3483			vcb->freeBlocks -= bitmapblks;
3484		}
3485	}
3486	/*
3487	 * Mark the new alternate VH as allocated.
3488	 */
3489	if (vcb->blockSize == 512)
3490		error = BlockMarkAllocated(vcb, vcb->totalBlocks + addblks - 2, 2);
3491	else
3492		error = BlockMarkAllocated(vcb, vcb->totalBlocks + addblks - 1, 1);
3493	if (error) {
3494		printf("hfs_extendfs: error %d setting bitmap (VH)\n", error);
3495		goto out;
3496	}
3497	/*
3498	 * Mark the old alternate VH as free.
3499	 */
3500	if (vcb->blockSize == 512)
3501		(void) BlockMarkFree(vcb, vcb->totalBlocks - 2, 2);
3502	else
3503		(void) BlockMarkFree(vcb, vcb->totalBlocks - 1, 1);
3504	/*
3505	 * Adjust file system variables for new space.
3506	 */
3507	prev_phys_block_count = hfsmp->hfs_logical_block_count;
3508	prev_alt_sector = hfsmp->hfs_alt_id_sector;
3509
3510	vcb->totalBlocks += addblks;
3511	vcb->freeBlocks += addblks;
3512	hfsmp->hfs_logical_block_count = newsize / sectorsize;
3513	hfsmp->hfs_alt_id_sector = (hfsmp->hfsPlusIOPosOffset / sectorsize) +
3514	                          HFS_ALT_SECTOR(sectorsize, hfsmp->hfs_logical_block_count);
3515	MarkVCBDirty(vcb);
3516	error = hfs_flushvolumeheader(hfsmp, MNT_WAIT, HFS_ALTFLUSH);
3517	if (error) {
3518		printf("hfs_extendfs: couldn't flush volume headers (%d)", error);
3519		/*
3520		 * Restore to old state.
3521		 */
3522		if (usedExtendFileC) {
3523			(void) TruncateFileC(vcb, fp, oldBitmapSize, false);
3524		} else {
3525			fp->ff_blocks -= bitmapblks;
3526			fp->ff_size -= (u_int64_t)bitmapblks * (u_int64_t)vcb->blockSize;
3527			/*
3528			 * No need to mark the excess blocks free since those bitmap blocks
3529			 * are no longer part of the bitmap.  But we do need to undo the
3530			 * effect of the "vcb->freeBlocks -= bitmapblks" above.
3531			 */
3532			vcb->freeBlocks += bitmapblks;
3533		}
3534		vcb->totalBlocks -= addblks;
3535		vcb->freeBlocks -= addblks;
3536		hfsmp->hfs_logical_block_count = prev_phys_block_count;
3537		hfsmp->hfs_alt_id_sector = prev_alt_sector;
3538		MarkVCBDirty(vcb);
3539		if (vcb->blockSize == 512)
3540			(void) BlockMarkAllocated(vcb, vcb->totalBlocks - 2, 2);
3541		else
3542			(void) BlockMarkAllocated(vcb, vcb->totalBlocks - 1, 1);
3543		goto out;
3544	}
3545	/*
3546	 * Invalidate the old alternate volume header.
3547	 */
3548	bp = NULL;
3549	if (prev_alt_sector) {
3550		if (buf_meta_bread(hfsmp->hfs_devvp,
3551				HFS_PHYSBLK_ROUNDDOWN(prev_alt_sector, hfsmp->hfs_log_per_phys),
3552				hfsmp->hfs_physical_block_size, NOCRED, &bp) == 0) {
3553			journal_modify_block_start(hfsmp->jnl, bp);
3554
3555			bzero((char *)buf_dataptr(bp) + HFS_ALT_OFFSET(hfsmp->hfs_physical_block_size), kMDBSize);
3556
3557			journal_modify_block_end(hfsmp->jnl, bp, NULL, NULL);
3558		} else if (bp) {
3559			buf_brelse(bp);
3560		}
3561	}
3562
3563	/*
3564	 * TODO: Adjust the size of the metadata zone based on new volume size?
3565	 */
3566
3567	/*
3568	 * Adjust the size of hfsmp->hfs_attrdata_vp
3569	 */
3570	if (hfsmp->hfs_attrdata_vp) {
3571		struct cnode *attr_cp;
3572		struct filefork *attr_fp;
3573
3574		if (vnode_get(hfsmp->hfs_attrdata_vp) == 0) {
3575			attr_cp = VTOC(hfsmp->hfs_attrdata_vp);
3576			attr_fp = VTOF(hfsmp->hfs_attrdata_vp);
3577
3578			attr_cp->c_blocks = newblkcnt;
3579			attr_fp->ff_blocks = newblkcnt;
3580			attr_fp->ff_extents[0].blockCount = newblkcnt;
3581			attr_fp->ff_size = (off_t) newblkcnt * hfsmp->blockSize;
3582			ubc_setsize(hfsmp->hfs_attrdata_vp, attr_fp->ff_size);
3583			vnode_put(hfsmp->hfs_attrdata_vp);
3584		}
3585	}
3586
3587out:
3588	if (error && fp) {
3589		/* Restore allocation fork. */
3590		bcopy(&forkdata, &fp->ff_data, sizeof(forkdata));
3591		VTOC(vp)->c_blocks = fp->ff_blocks;
3592
3593	}
3594	/*
3595	   Regardless of whether or not the totalblocks actually increased,
3596	   we should reset the allocLimit field. If it changed, it will
3597	   get updated; if not, it will remain the same.
3598	*/
3599	hfsmp->allocLimit = vcb->totalBlocks;
3600	hfs_systemfile_unlock(hfsmp, lockflags);
3601	hfs_end_transaction(hfsmp);
3602
3603	return (error);
3604}
3605
3606#define HFS_MIN_SIZE  (32LL * 1024LL * 1024LL)
3607
3608/*
3609 * Truncate a file system (while still mounted).
3610 */
3611__private_extern__
3612int
3613hfs_truncatefs(struct hfsmount *hfsmp, u_int64_t newsize, vfs_context_t context)
3614{
3615	struct  buf *bp = NULL;
3616	u_int64_t oldsize;
3617	u_int32_t newblkcnt;
3618	u_int32_t reclaimblks = 0;
3619	int lockflags = 0;
3620	int transaction_begun = 0;
3621	int error;
3622
3623	lck_mtx_lock(&hfsmp->hfs_mutex);
3624	if (hfsmp->hfs_flags & HFS_RESIZE_IN_PROGRESS) {
3625		lck_mtx_unlock(&hfsmp->hfs_mutex);
3626		return (EALREADY);
3627	}
3628	hfsmp->hfs_flags |= HFS_RESIZE_IN_PROGRESS;
3629	hfsmp->hfs_resize_filesmoved = 0;
3630	hfsmp->hfs_resize_totalfiles = 0;
3631	lck_mtx_unlock(&hfsmp->hfs_mutex);
3632
3633	/*
3634	 * - Journaled HFS Plus volumes only.
3635	 * - No embedded volumes.
3636	 */
3637	if ((hfsmp->jnl == NULL) ||
3638	    (hfsmp->hfsPlusIOPosOffset != 0)) {
3639		error = EPERM;
3640		goto out;
3641	}
3642	oldsize = (u_int64_t)hfsmp->totalBlocks * (u_int64_t)hfsmp->blockSize;
3643	newblkcnt = newsize / hfsmp->blockSize;
3644	reclaimblks = hfsmp->totalBlocks - newblkcnt;
3645
3646	/* Make sure new size is valid. */
3647	if ((newsize < HFS_MIN_SIZE) ||
3648	    (newsize >= oldsize) ||
3649	    (newsize % hfsmp->hfs_logical_block_size) ||
3650	    (newsize % hfsmp->hfs_physical_block_size)) {
3651		printf ("hfs_truncatefs: invalid size\n");
3652		error = EINVAL;
3653		goto out;
3654	}
3655	/* Make sure there's enough space to work with. */
3656	if (reclaimblks >= hfs_freeblks(hfsmp, 1)) {
3657		printf("hfs_truncatefs: insufficient space (need %u blocks; have %u blocks)\n", reclaimblks, hfs_freeblks(hfsmp, 1));
3658		error = ENOSPC;
3659		goto out;
3660	}
3661
3662	/* Start with a clean journal. */
3663	journal_flush(hfsmp->jnl);
3664
3665	if (hfs_start_transaction(hfsmp) != 0) {
3666		error = EINVAL;
3667		goto out;
3668	}
3669	transaction_begun = 1;
3670
3671	/*
3672	 * Prevent new allocations from using the part we're trying to truncate.
3673	 *
3674	 * NOTE: allocLimit is set to the allocation block number where the new
3675	 * alternate volume header will be.  That way there will be no files to
3676	 * interfere with allocating the new alternate volume header, and no files
3677	 * in the allocation blocks beyond (i.e. the blocks we're trying to
3678	 * truncate away.
3679	 */
3680	lck_mtx_lock(&hfsmp->hfs_mutex);
3681	if (hfsmp->blockSize == 512)
3682		hfsmp->allocLimit = newblkcnt - 2;
3683	else
3684		hfsmp->allocLimit = newblkcnt - 1;
3685	hfsmp->freeBlocks -= reclaimblks;
3686	lck_mtx_unlock(&hfsmp->hfs_mutex);
3687
3688	/*
3689	 * Look for files that have blocks at or beyond the location of the
3690	 * new alternate volume header.
3691	 */
3692	if (hfs_isallocated(hfsmp, hfsmp->allocLimit, reclaimblks)) {
3693		/*
3694		 * hfs_reclaimspace will use separate transactions when
3695		 * relocating files (so we don't overwhelm the journal).
3696		 */
3697		hfs_end_transaction(hfsmp);
3698		transaction_begun = 0;
3699
3700		/* Attempt to reclaim some space. */
3701		if (hfs_reclaimspace(hfsmp, hfsmp->allocLimit, reclaimblks, context) != 0) {
3702			printf("hfs_truncatefs: couldn't reclaim space on %s\n", hfsmp->vcbVN);
3703			error = ENOSPC;
3704			goto out;
3705		}
3706		if (hfs_start_transaction(hfsmp) != 0) {
3707			error = EINVAL;
3708			goto out;
3709		}
3710		transaction_begun = 1;
3711
3712		/* Check if we're clear now. */
3713		if (hfs_isallocated(hfsmp, hfsmp->allocLimit, reclaimblks)) {
3714			printf("hfs_truncatefs: didn't reclaim enough space on %s\n", hfsmp->vcbVN);
3715			error = EAGAIN;  /* tell client to try again */
3716			goto out;
3717		}
3718	}
3719
3720	/*
3721	 * Note: we take the attributes lock in case we have an attribute data vnode
3722	 * which needs to change size.
3723	 */
3724	lockflags = hfs_systemfile_lock(hfsmp, SFL_ATTRIBUTE | SFL_EXTENTS | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
3725
3726	/*
3727	 * Mark the old alternate volume header as free.
3728	 * We don't bother shrinking allocation bitmap file.
3729	 */
3730	if (hfsmp->blockSize == 512)
3731		(void) BlockMarkFree(hfsmp, hfsmp->totalBlocks - 2, 2);
3732	else
3733		(void) BlockMarkFree(hfsmp, hfsmp->totalBlocks - 1, 1);
3734
3735	/*
3736	 * Allocate last 1KB for alternate volume header.
3737	 */
3738	error = BlockMarkAllocated(hfsmp, hfsmp->allocLimit, (hfsmp->blockSize == 512) ? 2 : 1);
3739	if (error) {
3740		printf("hfs_truncatefs: Error %d allocating new alternate volume header\n", error);
3741		goto out;
3742	}
3743
3744	/*
3745	 * Invalidate the existing alternate volume header.
3746	 *
3747	 * Don't include this in a transaction (don't call journal_modify_block)
3748	 * since this block will be outside of the truncated file system!
3749	 */
3750	if (hfsmp->hfs_alt_id_sector) {
3751		if (buf_meta_bread(hfsmp->hfs_devvp,
3752				HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_alt_id_sector, hfsmp->hfs_log_per_phys),
3753				hfsmp->hfs_physical_block_size, NOCRED, &bp) == 0) {
3754
3755			bzero((void*)((char *)buf_dataptr(bp) + HFS_ALT_OFFSET(hfsmp->hfs_physical_block_size)), kMDBSize);
3756			(void) VNOP_BWRITE(bp);
3757		} else if (bp) {
3758			buf_brelse(bp);
3759		}
3760		bp = NULL;
3761	}
3762
3763	/* Log successful shrinking. */
3764	printf("hfs_truncatefs: shrank \"%s\" to %d blocks (was %d blocks)\n",
3765	       hfsmp->vcbVN, newblkcnt, hfsmp->totalBlocks);
3766
3767	/*
3768	 * Adjust file system variables and flush them to disk.
3769	 */
3770	hfsmp->totalBlocks = newblkcnt;
3771	hfsmp->hfs_logical_block_count = newsize / hfsmp->hfs_logical_block_size;
3772	hfsmp->hfs_alt_id_sector = HFS_ALT_SECTOR(hfsmp->hfs_logical_block_size, hfsmp->hfs_logical_block_count);
3773	MarkVCBDirty(hfsmp);
3774	error = hfs_flushvolumeheader(hfsmp, MNT_WAIT, HFS_ALTFLUSH);
3775	if (error)
3776		panic("hfs_truncatefs: unexpected error flushing volume header (%d)\n", error);
3777
3778	/*
3779	 * TODO: Adjust the size of the metadata zone based on new volume size?
3780	 */
3781
3782	/*
3783	 * Adjust the size of hfsmp->hfs_attrdata_vp
3784	 */
3785	if (hfsmp->hfs_attrdata_vp) {
3786		struct cnode *cp;
3787		struct filefork *fp;
3788
3789		if (vnode_get(hfsmp->hfs_attrdata_vp) == 0) {
3790			cp = VTOC(hfsmp->hfs_attrdata_vp);
3791			fp = VTOF(hfsmp->hfs_attrdata_vp);
3792
3793			cp->c_blocks = newblkcnt;
3794			fp->ff_blocks = newblkcnt;
3795			fp->ff_extents[0].blockCount = newblkcnt;
3796			fp->ff_size = (off_t) newblkcnt * hfsmp->blockSize;
3797			ubc_setsize(hfsmp->hfs_attrdata_vp, fp->ff_size);
3798			vnode_put(hfsmp->hfs_attrdata_vp);
3799		}
3800	}
3801
3802out:
3803	if (error)
3804		hfsmp->freeBlocks += reclaimblks;
3805
3806	lck_mtx_lock(&hfsmp->hfs_mutex);
3807	hfsmp->allocLimit = hfsmp->totalBlocks;
3808	if (hfsmp->nextAllocation >= hfsmp->allocLimit)
3809		hfsmp->nextAllocation = hfsmp->hfs_metazone_end + 1;
3810	hfsmp->hfs_flags &= ~HFS_RESIZE_IN_PROGRESS;
3811	lck_mtx_unlock(&hfsmp->hfs_mutex);
3812
3813	if (lockflags) {
3814		hfs_systemfile_unlock(hfsmp, lockflags);
3815	}
3816	if (transaction_begun) {
3817		hfs_end_transaction(hfsmp);
3818		journal_flush(hfsmp->jnl);
3819	}
3820
3821	return (error);
3822}
3823
3824
3825/*
3826 * Invalidate the physical block numbers associated with buffer cache blocks
3827 * in the given extent of the given vnode.
3828 */
3829struct hfs_inval_blk_no {
3830	daddr64_t sectorStart;
3831	daddr64_t sectorCount;
3832};
3833static int
3834hfs_invalidate_block_numbers_callback(buf_t bp, void *args_in)
3835{
3836	daddr64_t blkno;
3837	struct hfs_inval_blk_no *args;
3838
3839	blkno = buf_blkno(bp);
3840	args = args_in;
3841
3842	if (blkno >= args->sectorStart && blkno < args->sectorStart+args->sectorCount)
3843		buf_setblkno(bp, buf_lblkno(bp));
3844
3845	return BUF_RETURNED;
3846}
3847static void
3848hfs_invalidate_sectors(struct vnode *vp, daddr64_t sectorStart, daddr64_t sectorCount)
3849{
3850	struct hfs_inval_blk_no args;
3851	args.sectorStart = sectorStart;
3852	args.sectorCount = sectorCount;
3853
3854	buf_iterate(vp, hfs_invalidate_block_numbers_callback, BUF_SCAN_DIRTY|BUF_SCAN_CLEAN, &args);
3855}
3856
3857
3858/*
3859 * Copy the contents of an extent to a new location.  Also invalidates the
3860 * physical block number of any buffer cache block in the copied extent
3861 * (so that if the block is written, it will go through VNOP_BLOCKMAP to
3862 * determine the new physical block number).
3863 */
3864static int
3865hfs_copy_extent(
3866	struct hfsmount *hfsmp,
3867	struct vnode *vp,		/* The file whose extent is being copied. */
3868	u_int32_t oldStart,		/* The start of the source extent. */
3869	u_int32_t newStart,		/* The start of the destination extent. */
3870	u_int32_t blockCount,	/* The number of allocation blocks to copy. */
3871	vfs_context_t context)
3872{
3873	int err = 0;
3874	size_t bufferSize;
3875	void *buffer = NULL;
3876	struct vfsioattr ioattr;
3877	buf_t bp = NULL;
3878	off_t resid;
3879	size_t ioSize;
3880	u_int32_t ioSizeSectors;	/* Device sectors in this I/O */
3881	daddr64_t srcSector, destSector;
3882	u_int32_t sectorsPerBlock = hfsmp->blockSize / hfsmp->hfs_logical_block_size;
3883
3884	/*
3885	 * Sanity check that we have locked the vnode of the file we're copying.
3886	 *
3887	 * But since hfs_systemfile_lock() doesn't actually take the lock on
3888	 * the allocation file if a journal is active, ignore the check if the
3889	 * file being copied is the allocation file.
3890	 */
3891	struct cnode *cp = VTOC(vp);
3892	if (cp != hfsmp->hfs_allocation_cp && cp->c_lockowner != current_thread())
3893		panic("hfs_copy_extent: vp=%p (cp=%p) not owned?\n", vp, cp);
3894
3895	/*
3896	 * Wait for any in-progress writes to this vnode to complete, so that we'll
3897	 * be copying consistent bits.  (Otherwise, it's possible that an async
3898	 * write will complete to the old extent after we read from it.  That
3899	 * could lead to corruption.)
3900	 */
3901	err = vnode_waitforwrites(vp, 0, 0, 0, "hfs_copy_extent");
3902	if (err) {
3903		printf("hfs_copy_extent: Error %d from vnode_waitforwrites\n", err);
3904		return err;
3905	}
3906
3907	/*
3908	 * Determine the I/O size to use
3909	 *
3910	 * NOTE: Many external drives will result in an ioSize of 128KB.
3911	 * TODO: Should we use a larger buffer, doing several consecutive
3912	 * reads, then several consecutive writes?
3913	 */
3914	vfs_ioattr(hfsmp->hfs_mp, &ioattr);
3915	bufferSize = MIN(ioattr.io_maxreadcnt, ioattr.io_maxwritecnt);
3916	if (kmem_alloc(kernel_map, (vm_offset_t*) &buffer, bufferSize))
3917		return ENOMEM;
3918
3919	/* Get a buffer for doing the I/O */
3920	bp = buf_alloc(hfsmp->hfs_devvp);
3921	buf_setdataptr(bp, (uintptr_t)buffer);
3922
3923	resid = (off_t) blockCount * (off_t) hfsmp->blockSize;
3924	srcSector = (daddr64_t) oldStart * hfsmp->blockSize / hfsmp->hfs_logical_block_size;
3925	destSector = (daddr64_t) newStart * hfsmp->blockSize / hfsmp->hfs_logical_block_size;
3926	while (resid > 0) {
3927		ioSize = MIN(bufferSize, resid);
3928		ioSizeSectors = ioSize / hfsmp->hfs_logical_block_size;
3929
3930		/* Prepare the buffer for reading */
3931		buf_reset(bp, B_READ);
3932		buf_setsize(bp, ioSize);
3933		buf_setcount(bp, ioSize);
3934		buf_setblkno(bp, srcSector);
3935		buf_setlblkno(bp, srcSector);
3936
3937		/* Do the read */
3938		err = VNOP_STRATEGY(bp);
3939		if (!err)
3940			err = buf_biowait(bp);
3941		if (err) {
3942			printf("hfs_copy_extent: Error %d from VNOP_STRATEGY (read)\n", err);
3943			break;
3944		}
3945
3946		/* Prepare the buffer for writing */
3947		buf_reset(bp, B_WRITE);
3948		buf_setsize(bp, ioSize);
3949		buf_setcount(bp, ioSize);
3950		buf_setblkno(bp, destSector);
3951		buf_setlblkno(bp, destSector);
3952		if (journal_uses_fua(hfsmp->jnl))
3953			buf_markfua(bp);
3954
3955		/* Do the write */
3956		vnode_startwrite(hfsmp->hfs_devvp);
3957		err = VNOP_STRATEGY(bp);
3958		if (!err)
3959			err = buf_biowait(bp);
3960		if (err) {
3961			printf("hfs_copy_extent: Error %d from VNOP_STRATEGY (write)\n", err);
3962			break;
3963		}
3964
3965		resid -= ioSize;
3966		srcSector += ioSizeSectors;
3967		destSector += ioSizeSectors;
3968	}
3969	if (bp)
3970		buf_free(bp);
3971	if (buffer)
3972		kmem_free(kernel_map, (vm_offset_t)buffer, bufferSize);
3973
3974	/* Make sure all writes have been flushed to disk. */
3975	if (!journal_uses_fua(hfsmp->jnl)) {
3976		err = VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, context);
3977		if (err) {
3978			printf("hfs_copy_extent: DKIOCSYNCHRONIZECACHE failed (%d)\n", err);
3979			err = 0;	/* Don't fail the copy. */
3980		}
3981	}
3982
3983	if (!err)
3984		hfs_invalidate_sectors(vp, (daddr64_t)oldStart*sectorsPerBlock, (daddr64_t)blockCount*sectorsPerBlock);
3985
3986	return err;
3987}
3988
3989
3990/*
3991 * Reclaim space at the end of a volume, used by a given system file.
3992 *
3993 * This routine attempts to move any extent which contains allocation blocks
3994 * at or after "startblk."  A separate transaction is used to do the move.
3995 * The contents of any moved extents are read and written via the volume's
3996 * device vnode -- NOT via "vp."  During the move, moved blocks which are part
3997 * of a transaction have their physical block numbers invalidated so they will
3998 * eventually be written to their new locations.
3999 *
4000 * This routine can be used to move overflow extents for the allocation file.
4001 *
4002 * Inputs:
4003 *    hfsmp       The volume being resized.
4004 *    startblk    Blocks >= this allocation block need to be moved.
4005 *    locks       Which locks need to be taken for the given system file.
4006 *    vp          The vnode for the system file.
4007 *
4008 * Outputs:
4009 *    moved       Set to true if any extents were moved.
4010 */
4011static int
4012hfs_relocate_callback(__unused HFSPlusExtentKey *key, HFSPlusExtentRecord *record, HFSPlusExtentRecord *state)
4013{
4014	bcopy(state, record, sizeof(HFSPlusExtentRecord));
4015	return 0;
4016}
4017static int
4018hfs_reclaim_sys_file(struct hfsmount *hfsmp, struct vnode *vp, u_long startblk, int locks, Boolean *moved, vfs_context_t context)
4019{
4020	int error;
4021	int lockflags;
4022	int i;
4023	u_long datablks;
4024	u_long block;
4025	u_int32_t oldStartBlock;
4026	u_int32_t newStartBlock;
4027	u_int32_t blockCount;
4028	struct filefork *fp;
4029
4030	/* If there is no vnode for this file, then there's nothing to do. */
4031	if (vp == NULL)
4032		return 0;
4033
4034	/* printf("hfs_reclaim_sys_file: %.*s\n", VTOC(vp)->c_desc.cd_namelen, VTOC(vp)->c_desc.cd_nameptr); */
4035
4036	/* We always need the allocation bitmap and extents B-tree */
4037	locks |= SFL_BITMAP | SFL_EXTENTS;
4038
4039	error = hfs_start_transaction(hfsmp);
4040	if (error) {
4041		printf("hfs_reclaim_sys_file: hfs_start_transaction returned %d\n", error);
4042		return error;
4043	}
4044	lockflags = hfs_systemfile_lock(hfsmp, locks, HFS_EXCLUSIVE_LOCK);
4045	fp = VTOF(vp);
4046	datablks = 0;
4047
4048	/* Relocate non-overflow extents */
4049	for (i = 0; i < kHFSPlusExtentDensity; ++i) {
4050		if (fp->ff_extents[i].blockCount == 0)
4051			break;
4052		oldStartBlock = fp->ff_extents[i].startBlock;
4053		blockCount = fp->ff_extents[i].blockCount;
4054		datablks += blockCount;
4055		block = oldStartBlock + blockCount;
4056		if (block > startblk) {
4057			error = BlockAllocate(hfsmp, 1, blockCount, blockCount, true, true, &newStartBlock, &blockCount);
4058			if (error) {
4059				printf("hfs_reclaim_sys_file: BlockAllocate returned %d\n", error);
4060				goto fail;
4061			}
4062			if (blockCount != fp->ff_extents[i].blockCount) {
4063				printf("hfs_reclaim_sys_file: new blockCount=%u, original blockCount=%u", blockCount, fp->ff_extents[i].blockCount);
4064				goto free_fail;
4065			}
4066			error = hfs_copy_extent(hfsmp, vp, oldStartBlock, newStartBlock, blockCount, context);
4067			if (error) {
4068				printf("hfs_reclaim_sys_file: hfs_copy_extent returned %d\n", error);
4069				goto free_fail;
4070			}
4071			fp->ff_extents[i].startBlock = newStartBlock;
4072			VTOC(vp)->c_flag |= C_MODIFIED;
4073			*moved = true;
4074			error = BlockDeallocate(hfsmp, oldStartBlock, blockCount);
4075			if (error) {
4076				/* TODO: Mark volume inconsistent? */
4077				printf("hfs_reclaim_sys_file: BlockDeallocate returned %d\n", error);
4078				goto fail;
4079			}
4080			error = hfs_flushvolumeheader(hfsmp, MNT_WAIT, HFS_ALTFLUSH);
4081			if (error) {
4082				/* TODO: Mark volume inconsistent? */
4083				printf("hfs_reclaim_sys_file: hfs_flushvolumeheader returned %d\n", error);
4084				goto fail;
4085			}
4086		}
4087	}
4088
4089	/* Relocate overflow extents (if any) */
4090	if (i == kHFSPlusExtentDensity && fp->ff_blocks > datablks) {
4091		struct BTreeIterator *iterator = NULL;
4092		struct FSBufferDescriptor btdata;
4093		HFSPlusExtentRecord record;
4094		HFSPlusExtentKey *key;
4095		FCB *fcb;
4096		u_int32_t fileID;
4097		u_int8_t forktype;
4098
4099		forktype = VNODE_IS_RSRC(vp) ? 0xFF : 0;
4100		fileID = VTOC(vp)->c_cnid;
4101		if (kmem_alloc(kernel_map, (vm_offset_t*) &iterator, sizeof(*iterator))) {
4102			printf("hfs_reclaim_sys_file: kmem_alloc failed!\n");
4103			error = ENOMEM;
4104			goto fail;
4105		}
4106
4107		bzero(iterator, sizeof(*iterator));
4108		key = (HFSPlusExtentKey *) &iterator->key;
4109		key->keyLength = kHFSPlusExtentKeyMaximumLength;
4110		key->forkType = forktype;
4111		key->fileID = fileID;
4112		key->startBlock = datablks;
4113
4114		btdata.bufferAddress = &record;
4115		btdata.itemSize = sizeof(record);
4116		btdata.itemCount = 1;
4117
4118		fcb = VTOF(hfsmp->hfs_extents_vp);
4119
4120		error = BTSearchRecord(fcb, iterator, &btdata, NULL, iterator);
4121		while (error == 0) {
4122			/* Stop when we encounter a different file or fork. */
4123			if ((key->fileID != fileID) ||
4124				(key->forkType != forktype)) {
4125				break;
4126			}
4127			/*
4128			 * Check if the file overlaps target space.
4129			 */
4130			for (i = 0; i < kHFSPlusExtentDensity; ++i) {
4131				if (record[i].blockCount == 0) {
4132					goto overflow_done;
4133				}
4134				oldStartBlock = record[i].startBlock;
4135				blockCount = record[i].blockCount;
4136				block = oldStartBlock + blockCount;
4137				if (block > startblk) {
4138					error = BlockAllocate(hfsmp, 1, blockCount, blockCount, true, true, &newStartBlock, &blockCount);
4139					if (error) {
4140						printf("hfs_reclaim_sys_file: BlockAllocate returned %d\n", error);
4141						goto overflow_done;
4142					}
4143					if (blockCount != record[i].blockCount) {
4144						printf("hfs_reclaim_sys_file: new blockCount=%u, original blockCount=%u", blockCount, fp->ff_extents[i].blockCount);
4145						kmem_free(kernel_map, (vm_offset_t)iterator, sizeof(*iterator));
4146						goto free_fail;
4147					}
4148					error = hfs_copy_extent(hfsmp, vp, oldStartBlock, newStartBlock, blockCount, context);
4149					if (error) {
4150						printf("hfs_reclaim_sys_file: hfs_copy_extent returned %d\n", error);
4151						kmem_free(kernel_map, (vm_offset_t)iterator, sizeof(*iterator));
4152						goto free_fail;
4153					}
4154					record[i].startBlock = newStartBlock;
4155					VTOC(vp)->c_flag |= C_MODIFIED;
4156					*moved = true;
4157					/*
4158					 * NOTE: To support relocating overflow extents of the
4159					 * allocation file, we must update the BTree record BEFORE
4160					 * deallocating the old extent so that BlockDeallocate will
4161					 * use the extent's new location to calculate physical block
4162					 * numbers.  (This is for the case where the old extent's
4163					 * bitmap bits actually reside in the extent being moved.)
4164					 */
4165					error = BTUpdateRecord(fcb, iterator, (IterateCallBackProcPtr) hfs_relocate_callback, &record);
4166					if (error) {
4167						/* TODO: Mark volume inconsistent? */
4168						printf("hfs_reclaim_sys_file: BTUpdateRecord returned %d\n", error);
4169						goto overflow_done;
4170					}
4171					error = BlockDeallocate(hfsmp, oldStartBlock, blockCount);
4172					if (error) {
4173						/* TODO: Mark volume inconsistent? */
4174						printf("hfs_reclaim_sys_file: BlockDeallocate returned %d\n", error);
4175						goto overflow_done;
4176					}
4177				}
4178			}
4179			/* Look for more records. */
4180			error = BTIterateRecord(fcb, kBTreeNextRecord, iterator, &btdata, NULL);
4181			if (error == btNotFound) {
4182				error = 0;
4183				break;
4184			}
4185		}
4186overflow_done:
4187		kmem_free(kernel_map, (vm_offset_t)iterator, sizeof(*iterator));
4188		if (error) {
4189			goto fail;
4190		}
4191	}
4192
4193	hfs_systemfile_unlock(hfsmp, lockflags);
4194	error = hfs_end_transaction(hfsmp);
4195	if (error) {
4196		printf("hfs_reclaim_sys_file: hfs_end_transaction returned %d\n", error);
4197	}
4198
4199	return error;
4200
4201free_fail:
4202	(void) BlockDeallocate(hfsmp, newStartBlock, blockCount);
4203fail:
4204	(void) hfs_systemfile_unlock(hfsmp, lockflags);
4205	(void) hfs_end_transaction(hfsmp);
4206	return error;
4207}
4208
4209
4210/*
4211 * This journal_relocate callback updates the journal info block to point
4212 * at the new journal location.  This write must NOT be done using the
4213 * transaction.  We must write the block immediately.  We must also force
4214 * it to get to the media so that the new journal location will be seen by
4215 * the replay code before we can safely let journaled blocks be written
4216 * to their normal locations.
4217 *
4218 * The tests for journal_uses_fua below are mildly hacky.  Since the journal
4219 * and the file system are both on the same device, I'm leveraging what
4220 * the journal has decided about FUA.
4221 */
4222struct hfs_journal_relocate_args {
4223	struct hfsmount *hfsmp;
4224	vfs_context_t context;
4225	u_int32_t newStartBlock;
4226};
4227
4228static errno_t
4229hfs_journal_relocate_callback(void *_args)
4230{
4231	int error;
4232	struct hfs_journal_relocate_args *args = _args;
4233	struct hfsmount *hfsmp = args->hfsmp;
4234	buf_t bp;
4235	JournalInfoBlock *jibp;
4236
4237	error = buf_meta_bread(hfsmp->hfs_devvp,
4238		hfsmp->vcbJinfoBlock * (hfsmp->blockSize/hfsmp->hfs_logical_block_size),
4239		hfsmp->blockSize, vfs_context_ucred(args->context), &bp);
4240	if (error) {
4241		printf("hfs_reclaim_journal_file: failed to read JIB (%d)\n", error);
4242		return error;
4243	}
4244	jibp = (JournalInfoBlock*) buf_dataptr(bp);
4245	jibp->offset = SWAP_BE64((u_int64_t)args->newStartBlock * hfsmp->blockSize);
4246	jibp->size = SWAP_BE64(hfsmp->jnl_size);
4247	if (journal_uses_fua(hfsmp->jnl))
4248		buf_markfua(bp);
4249	error = buf_bwrite(bp);
4250	if (error) {
4251		printf("hfs_reclaim_journal_file: failed to write JIB (%d)\n", error);
4252		return error;
4253	}
4254	if (!journal_uses_fua(hfsmp->jnl)) {
4255		error = VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, args->context);
4256		if (error) {
4257			printf("hfs_reclaim_journal_file: DKIOCSYNCHRONIZECACHE failed (%d)\n", error);
4258			error = 0;		/* Don't fail the operation. */
4259		}
4260	}
4261
4262	return error;
4263}
4264
4265
4266static int
4267hfs_reclaim_journal_file(struct hfsmount *hfsmp, vfs_context_t context)
4268{
4269	int error;
4270	int lockflags;
4271	u_int32_t newStartBlock;
4272	u_int32_t oldBlockCount;
4273	u_int32_t newBlockCount;
4274	struct cat_desc journal_desc;
4275	struct cat_attr journal_attr;
4276	struct cat_fork journal_fork;
4277	struct hfs_journal_relocate_args callback_args;
4278
4279	error = hfs_start_transaction(hfsmp);
4280	if (error) {
4281		printf("hfs_reclaim_journal_file: hfs_start_transaction returned %d\n", error);
4282		return error;
4283	}
4284	lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
4285
4286	oldBlockCount = hfsmp->jnl_size / hfsmp->blockSize;
4287
4288	/* TODO: Allow the journal to change size based on the new volume size. */
4289	error = BlockAllocate(hfsmp, 1, oldBlockCount, oldBlockCount, true, true, &newStartBlock, &newBlockCount);
4290	if (error) {
4291		printf("hfs_reclaim_journal_file: BlockAllocate returned %d\n", error);
4292		goto fail;
4293	}
4294	if (newBlockCount != oldBlockCount) {
4295		printf("hfs_reclaim_journal_file: newBlockCount != oldBlockCount (%u, %u)\n", newBlockCount, oldBlockCount);
4296		goto free_fail;
4297	}
4298
4299	error = BlockDeallocate(hfsmp, hfsmp->jnl_start, oldBlockCount);
4300	if (error) {
4301		printf("hfs_reclaim_journal_file: BlockDeallocate returned %d\n", error);
4302		goto free_fail;
4303	}
4304
4305	/* Update the catalog record for .journal */
4306	error = cat_idlookup(hfsmp, hfsmp->hfs_jnlfileid, 1, &journal_desc, &journal_attr, &journal_fork);
4307	if (error) {
4308		printf("hfs_reclaim_journal_file: cat_idlookup returned %d\n", error);
4309		goto free_fail;
4310	}
4311	journal_fork.cf_size = newBlockCount * hfsmp->blockSize;
4312	journal_fork.cf_extents[0].startBlock = newStartBlock;
4313	journal_fork.cf_extents[0].blockCount = newBlockCount;
4314	journal_fork.cf_blocks = newBlockCount;
4315	error = cat_update(hfsmp, &journal_desc, &journal_attr, &journal_fork, NULL);
4316	cat_releasedesc(&journal_desc);  /* all done with cat descriptor */
4317	if (error) {
4318		printf("hfs_reclaim_journal_file: cat_update returned %d\n", error);
4319		goto free_fail;
4320	}
4321	callback_args.hfsmp = hfsmp;
4322	callback_args.context = context;
4323	callback_args.newStartBlock = newStartBlock;
4324
4325	error = journal_relocate(hfsmp->jnl, (off_t)newStartBlock*hfsmp->blockSize,
4326		(off_t)newBlockCount*hfsmp->blockSize, 0,
4327		hfs_journal_relocate_callback, &callback_args);
4328	if (error) {
4329		/* NOTE: journal_relocate will mark the journal invalid. */
4330		printf("hfs_reclaim_journal_file: journal_relocate returned %d\n", error);
4331		goto fail;
4332	}
4333	hfsmp->jnl_start = newStartBlock;
4334	hfsmp->jnl_size = (off_t)newBlockCount * hfsmp->blockSize;
4335
4336	hfs_systemfile_unlock(hfsmp, lockflags);
4337	error = hfs_end_transaction(hfsmp);
4338	if (error) {
4339		printf("hfs_reclaim_journal_file: hfs_end_transaction returned %d\n", error);
4340	}
4341
4342	return error;
4343
4344free_fail:
4345	(void) BlockDeallocate(hfsmp, newStartBlock, newBlockCount);
4346fail:
4347	hfs_systemfile_unlock(hfsmp, lockflags);
4348	(void) hfs_end_transaction(hfsmp);
4349	return error;
4350}
4351
4352
4353/*
4354 * Move the journal info block to a new location.  We have to make sure the
4355 * new copy of the journal info block gets to the media first, then change
4356 * the field in the volume header and the catalog record.
4357 */
4358static int
4359hfs_reclaim_journal_info_block(struct hfsmount *hfsmp, vfs_context_t context)
4360{
4361	int error;
4362	int lockflags;
4363	u_int32_t newBlock;
4364	u_int32_t blockCount;
4365	struct cat_desc jib_desc;
4366	struct cat_attr jib_attr;
4367	struct cat_fork jib_fork;
4368	buf_t old_bp, new_bp;
4369
4370	error = hfs_start_transaction(hfsmp);
4371	if (error) {
4372		printf("hfs_reclaim_journal_info_block: hfs_start_transaction returned %d\n", error);
4373		return error;
4374	}
4375	lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
4376
4377	error = BlockAllocate(hfsmp, 1, 1, 1, true, true, &newBlock, &blockCount);
4378	if (error) {
4379		printf("hfs_reclaim_journal_info_block: BlockAllocate returned %d\n", error);
4380		goto fail;
4381	}
4382	if (blockCount != 1) {
4383		printf("hfs_reclaim_journal_info_block: blockCount != 1 (%u)\n", blockCount);
4384		goto free_fail;
4385	}
4386	error = BlockDeallocate(hfsmp, hfsmp->vcbJinfoBlock, 1);
4387	if (error) {
4388		printf("hfs_reclaim_journal_info_block: BlockDeallocate returned %d\n", error);
4389		goto free_fail;
4390	}
4391
4392	/* Copy the old journal info block content to the new location */
4393	error = buf_meta_bread(hfsmp->hfs_devvp,
4394		hfsmp->vcbJinfoBlock * (hfsmp->blockSize/hfsmp->hfs_logical_block_size),
4395		hfsmp->blockSize, vfs_context_ucred(context), &old_bp);
4396	if (error) {
4397		printf("hfs_reclaim_journal_info_block: failed to read JIB (%d)\n", error);
4398		goto free_fail;
4399	}
4400	new_bp = buf_getblk(hfsmp->hfs_devvp,
4401		newBlock * (hfsmp->blockSize/hfsmp->hfs_logical_block_size),
4402		hfsmp->blockSize, 0, 0, BLK_META);
4403	bcopy((char*)buf_dataptr(old_bp), (char*)buf_dataptr(new_bp), hfsmp->blockSize);
4404	buf_brelse(old_bp);
4405	if (journal_uses_fua(hfsmp->jnl))
4406		buf_markfua(new_bp);
4407	error = buf_bwrite(new_bp);
4408	if (error) {
4409		printf("hfs_reclaim_journal_info_block: failed to write new JIB (%d)\n", error);
4410		goto free_fail;
4411	}
4412	if (!journal_uses_fua(hfsmp->jnl)) {
4413		error = VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, context);
4414		if (error) {
4415			printf("hfs_reclaim_journal_info_block: DKIOCSYNCHRONIZECACHE failed (%d)\n", error);
4416			/* Don't fail the operation. */
4417		}
4418	}
4419
4420	/* Update the catalog record for .journal_info_block */
4421	error = cat_idlookup(hfsmp, hfsmp->hfs_jnlinfoblkid, 1, &jib_desc, &jib_attr, &jib_fork);
4422	if (error) {
4423		printf("hfs_reclaim_journal_file: cat_idlookup returned %d\n", error);
4424		goto fail;
4425	}
4426	jib_fork.cf_size = hfsmp->blockSize;
4427	jib_fork.cf_extents[0].startBlock = newBlock;
4428	jib_fork.cf_extents[0].blockCount = 1;
4429	jib_fork.cf_blocks = 1;
4430	error = cat_update(hfsmp, &jib_desc, &jib_attr, &jib_fork, NULL);
4431	cat_releasedesc(&jib_desc);  /* all done with cat descriptor */
4432	if (error) {
4433		printf("hfs_reclaim_journal_info_block: cat_update returned %d\n", error);
4434		goto fail;
4435	}
4436
4437	/* Update the pointer to the journal info block in the volume header. */
4438	hfsmp->vcbJinfoBlock = newBlock;
4439	error = hfs_flushvolumeheader(hfsmp, MNT_WAIT, HFS_ALTFLUSH);
4440	if (error) {
4441		printf("hfs_reclaim_journal_info_block: hfs_flushvolumeheader returned %d\n", error);
4442		goto fail;
4443	}
4444	hfs_systemfile_unlock(hfsmp, lockflags);
4445	error = hfs_end_transaction(hfsmp);
4446	if (error) {
4447		printf("hfs_reclaim_journal_info_block: hfs_end_transaction returned %d\n", error);
4448	}
4449	error = journal_flush(hfsmp->jnl);
4450	if (error) {
4451		printf("hfs_reclaim_journal_info_block: journal_flush returned %d\n", error);
4452	}
4453	return error;
4454
4455free_fail:
4456	(void) BlockDeallocate(hfsmp, newBlock, blockCount);
4457fail:
4458	hfs_systemfile_unlock(hfsmp, lockflags);
4459	(void) hfs_end_transaction(hfsmp);
4460	return error;
4461}
4462
4463
4464/*
4465 * Reclaim space at the end of a file system.
4466 */
4467static int
4468hfs_reclaimspace(struct hfsmount *hfsmp, u_long startblk, u_long reclaimblks, vfs_context_t context)
4469{
4470	struct vnode *vp = NULL;
4471	FCB *fcb;
4472	struct BTreeIterator * iterator = NULL;
4473	struct FSBufferDescriptor btdata;
4474	struct HFSPlusCatalogFile filerec;
4475	u_int32_t  saved_next_allocation;
4476	cnid_t * cnidbufp;
4477	size_t cnidbufsize;
4478	int filecnt = 0;
4479	int maxfilecnt;
4480	u_long block;
4481	u_long datablks;
4482	u_long rsrcblks;
4483	u_long blkstomove = 0;
4484	int lockflags;
4485	int i;
4486	int error;
4487	int lastprogress = 0;
4488	Boolean system_file_moved = false;
4489
4490	/* Relocate extents of the Allocation file if they're in the way. */
4491	error = hfs_reclaim_sys_file(hfsmp, hfsmp->hfs_allocation_vp, startblk, SFL_BITMAP, &system_file_moved, context);
4492	if (error) {
4493		printf("hfs_reclaimspace: reclaim allocation file returned %d\n", error);
4494		return error;
4495	}
4496	/* Relocate extents of the Extents B-tree if they're in the way. */
4497	error = hfs_reclaim_sys_file(hfsmp, hfsmp->hfs_extents_vp, startblk, SFL_EXTENTS, &system_file_moved, context);
4498	if (error) {
4499		printf("hfs_reclaimspace: reclaim extents b-tree returned %d\n", error);
4500		return error;
4501	}
4502	/* Relocate extents of the Catalog B-tree if they're in the way. */
4503	error = hfs_reclaim_sys_file(hfsmp, hfsmp->hfs_catalog_vp, startblk, SFL_CATALOG, &system_file_moved, context);
4504	if (error) {
4505		printf("hfs_reclaimspace: reclaim catalog b-tree returned %d\n", error);
4506		return error;
4507	}
4508	/* Relocate extents of the Attributes B-tree if they're in the way. */
4509	error = hfs_reclaim_sys_file(hfsmp, hfsmp->hfs_attribute_vp, startblk, SFL_ATTRIBUTE, &system_file_moved, context);
4510	if (error) {
4511		printf("hfs_reclaimspace: reclaim attribute b-tree returned %d\n", error);
4512		return error;
4513	}
4514	/* Relocate extents of the Startup File if there is one and they're in the way. */
4515	error = hfs_reclaim_sys_file(hfsmp, hfsmp->hfs_startup_vp, startblk, SFL_STARTUP, &system_file_moved, context);
4516	if (error) {
4517		printf("hfs_reclaimspace: reclaim startup file returned %d\n", error);
4518		return error;
4519	}
4520
4521	/*
4522	 * We need to make sure the alternate volume header gets flushed if we moved
4523	 * any extents in the volume header.  But we need to do that before
4524	 * shrinking the size of the volume, or else the journal code will panic
4525	 * with an invalid (too large) block number.
4526	 *
4527	 * Note that system_file_moved will be set if ANY extent was moved, even
4528	 * if it was just an overflow extent.  In this case, the journal_flush isn't
4529	 * strictly required, but shouldn't hurt.
4530	 */
4531	if (system_file_moved)
4532		journal_flush(hfsmp->jnl);
4533
4534	if (hfsmp->jnl_start + (hfsmp->jnl_size / hfsmp->blockSize) > startblk) {
4535		error = hfs_reclaim_journal_file(hfsmp, context);
4536		if (error) {
4537			printf("hfs_reclaimspace: hfs_reclaim_journal_file failed (%d)\n", error);
4538			return error;
4539		}
4540	}
4541
4542	if (hfsmp->vcbJinfoBlock >= startblk) {
4543		error = hfs_reclaim_journal_info_block(hfsmp, context);
4544		if (error) {
4545			printf("hfs_reclaimspace: hfs_reclaim_journal_info_block failed (%d)\n", error);
4546			return error;
4547		}
4548	}
4549
4550	/* For now move a maximum of 250,000 files. */
4551	maxfilecnt = MIN(hfsmp->hfs_filecount, 250000);
4552	maxfilecnt = MIN((u_long)maxfilecnt, reclaimblks);
4553	cnidbufsize = maxfilecnt * sizeof(cnid_t);
4554	if (kmem_alloc(kernel_map, (vm_offset_t *)&cnidbufp, cnidbufsize)) {
4555		return (ENOMEM);
4556	}
4557	if (kmem_alloc(kernel_map, (vm_offset_t *)&iterator, sizeof(*iterator))) {
4558		kmem_free(kernel_map, (vm_offset_t)cnidbufp, cnidbufsize);
4559		return (ENOMEM);
4560	}
4561
4562	saved_next_allocation = hfsmp->nextAllocation;
4563	HFS_UPDATE_NEXT_ALLOCATION(hfsmp, hfsmp->hfs_metazone_start);
4564
4565	fcb = VTOF(hfsmp->hfs_catalog_vp);
4566	bzero(iterator, sizeof(*iterator));
4567
4568	btdata.bufferAddress = &filerec;
4569	btdata.itemSize = sizeof(filerec);
4570	btdata.itemCount = 1;
4571
4572	/* Keep the Catalog and extents files locked during iteration. */
4573	lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS, HFS_SHARED_LOCK);
4574
4575	error = BTIterateRecord(fcb, kBTreeFirstRecord, iterator, NULL, NULL);
4576	if (error) {
4577		goto end_iteration;
4578	}
4579	/*
4580	 * Iterate over all the catalog records looking for files
4581	 * that overlap into the space we're trying to free up.
4582	 */
4583	for (filecnt = 0; filecnt < maxfilecnt; ) {
4584		error = BTIterateRecord(fcb, kBTreeNextRecord, iterator, &btdata, NULL);
4585		if (error) {
4586			if (error == fsBTRecordNotFoundErr || error == fsBTEndOfIterationErr) {
4587				error = 0;
4588			}
4589			break;
4590		}
4591		if (filerec.recordType != kHFSPlusFileRecord) {
4592			continue;
4593		}
4594		datablks = rsrcblks = 0;
4595		/*
4596		 * Check if either fork overlaps target space.
4597		 */
4598		for (i = 0; i < kHFSPlusExtentDensity; ++i) {
4599			if (filerec.dataFork.extents[i].blockCount != 0) {
4600				datablks += filerec.dataFork.extents[i].blockCount;
4601				block = filerec.dataFork.extents[i].startBlock +
4602						filerec.dataFork.extents[i].blockCount;
4603				if (block >= startblk) {
4604					if ((filerec.fileID == hfsmp->hfs_jnlfileid) ||
4605						(filerec.fileID == hfsmp->hfs_jnlinfoblkid)) {
4606						printf("hfs_reclaimspace: cannot move active journal\n");
4607						error = EPERM;
4608						goto end_iteration;
4609					}
4610					cnidbufp[filecnt++] = filerec.fileID;
4611					blkstomove += filerec.dataFork.totalBlocks;
4612					break;
4613				}
4614			}
4615			if (filerec.resourceFork.extents[i].blockCount != 0) {
4616				rsrcblks += filerec.resourceFork.extents[i].blockCount;
4617				block = filerec.resourceFork.extents[i].startBlock +
4618						filerec.resourceFork.extents[i].blockCount;
4619				if (block >= startblk) {
4620					cnidbufp[filecnt++] = filerec.fileID;
4621					blkstomove += filerec.resourceFork.totalBlocks;
4622					break;
4623				}
4624			}
4625		}
4626		/*
4627		 * Check for any overflow extents that overlap.
4628		 */
4629		if (i == kHFSPlusExtentDensity) {
4630			if (filerec.dataFork.totalBlocks > datablks) {
4631				if (hfs_overlapped_overflow_extents(hfsmp, startblk, datablks, filerec.fileID, 0)) {
4632					cnidbufp[filecnt++] = filerec.fileID;
4633					blkstomove += filerec.dataFork.totalBlocks;
4634				}
4635			} else if (filerec.resourceFork.totalBlocks > rsrcblks) {
4636				if (hfs_overlapped_overflow_extents(hfsmp, startblk, rsrcblks, filerec.fileID, 1)) {
4637					cnidbufp[filecnt++] = filerec.fileID;
4638					blkstomove += filerec.resourceFork.totalBlocks;
4639				}
4640			}
4641		}
4642	}
4643
4644end_iteration:
4645	if (filecnt == 0 && !system_file_moved) {
4646		printf("hfs_reclaimspace: no files moved\n");
4647		error = ENOSPC;
4648	}
4649	/* All done with catalog. */
4650	hfs_systemfile_unlock(hfsmp, lockflags);
4651	if (error || filecnt == 0)
4652		goto out;
4653
4654	/*
4655	 * Double check space requirements to make sure
4656	 * there is enough space to relocate any files
4657	 * that reside in the reclaim area.
4658	 *
4659	 *                                          Blocks To Move --------------
4660	 *                                                            |    |    |
4661	 *                                                            V    V    V
4662	 * ------------------------------------------------------------------------
4663	 * |                                                        | /   ///  // |
4664	 * |                                                        | /   ///  // |
4665	 * |                                                        | /   ///  // |
4666	 * ------------------------------------------------------------------------
4667	 *
4668	 * <------------------- New Total Blocks ------------------><-- Reclaim -->
4669	 *
4670	 * <------------------------ Original Total Blocks ----------------------->
4671	 *
4672	 */
4673	if (blkstomove >= hfs_freeblks(hfsmp, 1)) {
4674		printf("hfs_truncatefs: insufficient space (need %lu blocks; have %u blocks)\n", blkstomove, hfs_freeblks(hfsmp, 1));
4675		error = ENOSPC;
4676		goto out;
4677	}
4678	hfsmp->hfs_resize_filesmoved = 0;
4679	hfsmp->hfs_resize_totalfiles = filecnt;
4680
4681	/* Now move any files that are in the way. */
4682	for (i = 0; i < filecnt; ++i) {
4683		struct vnode * rvp;
4684        struct cnode * cp;
4685
4686		if (hfs_vget(hfsmp, cnidbufp[i], &vp, 0) != 0)
4687			continue;
4688
4689        /* Relocating directory hard links is not supported, so we
4690         * punt (see radar 6217026). */
4691        cp = VTOC(vp);
4692        if ((cp->c_flag & C_HARDLINK) && vnode_isdir(vp)) {
4693            printf("hfs_reclaimspace: unable to relocate directory hard link %d\n", cp->c_cnid);
4694            error = EINVAL;
4695            goto out;
4696        }
4697
4698		/* Relocate any data fork blocks. */
4699		if (VTOF(vp) && VTOF(vp)->ff_blocks > 0) {
4700			error = hfs_relocate(vp, hfsmp->hfs_metazone_end + 1, kauth_cred_get(), current_proc());
4701		}
4702		if (error)
4703			break;
4704
4705		/* Relocate any resource fork blocks. */
4706		if ((cp->c_blocks - (VTOF(vp) ? VTOF((vp))->ff_blocks : 0)) > 0) {
4707			error = hfs_vgetrsrc(hfsmp, vp, &rvp, TRUE);
4708			if (error)
4709				break;
4710			error = hfs_relocate(rvp, hfsmp->hfs_metazone_end + 1, kauth_cred_get(), current_proc());
4711			VTOC(rvp)->c_flag |= C_NEED_RVNODE_PUT;
4712			if (error)
4713				break;
4714		}
4715		hfs_unlock(cp);
4716		vnode_put(vp);
4717		vp = NULL;
4718
4719		++hfsmp->hfs_resize_filesmoved;
4720
4721		/* Report intermediate progress. */
4722		if (filecnt > 100) {
4723			int progress;
4724
4725			progress = (i * 100) / filecnt;
4726			if (progress > (lastprogress + 9)) {
4727				printf("hfs_reclaimspace: %d%% done...\n", progress);
4728				lastprogress = progress;
4729			}
4730		}
4731	}
4732	if (vp) {
4733		hfs_unlock(VTOC(vp));
4734		vnode_put(vp);
4735		vp = NULL;
4736	}
4737	if (hfsmp->hfs_resize_filesmoved != 0) {
4738		printf("hfs_reclaimspace: relocated %d files on \"%s\"\n",
4739		       (int)hfsmp->hfs_resize_filesmoved, hfsmp->vcbVN);
4740	}
4741out:
4742	kmem_free(kernel_map, (vm_offset_t)iterator, sizeof(*iterator));
4743	kmem_free(kernel_map, (vm_offset_t)cnidbufp, cnidbufsize);
4744
4745	/*
4746	 * Restore the roving allocation pointer on errors.
4747	 * (but only if we didn't move any files)
4748	 */
4749	if (error && hfsmp->hfs_resize_filesmoved == 0) {
4750		HFS_UPDATE_NEXT_ALLOCATION(hfsmp, saved_next_allocation);
4751	}
4752	return (error);
4753}
4754
4755
4756/*
4757 * Check if there are any overflow extents that overlap.
4758 */
4759static int
4760hfs_overlapped_overflow_extents(struct hfsmount *hfsmp, u_int32_t startblk, u_int32_t catblks, u_int32_t fileID, int rsrcfork)
4761{
4762	struct BTreeIterator * iterator = NULL;
4763	struct FSBufferDescriptor btdata;
4764	HFSPlusExtentRecord extrec;
4765	HFSPlusExtentKey *extkeyptr;
4766	FCB *fcb;
4767	u_int32_t block;
4768	u_int8_t forktype;
4769	int overlapped = 0;
4770	int i;
4771	int error;
4772
4773	forktype = rsrcfork ? 0xFF : 0;
4774	if (kmem_alloc(kernel_map, (vm_offset_t *)&iterator, sizeof(*iterator))) {
4775		return (0);
4776	}
4777	bzero(iterator, sizeof(*iterator));
4778	extkeyptr = (HFSPlusExtentKey *)&iterator->key;
4779	extkeyptr->keyLength = kHFSPlusExtentKeyMaximumLength;
4780	extkeyptr->forkType = forktype;
4781	extkeyptr->fileID = fileID;
4782	extkeyptr->startBlock = catblks;
4783
4784	btdata.bufferAddress = &extrec;
4785	btdata.itemSize = sizeof(extrec);
4786	btdata.itemCount = 1;
4787
4788	fcb = VTOF(hfsmp->hfs_extents_vp);
4789
4790	error = BTSearchRecord(fcb, iterator, &btdata, NULL, iterator);
4791	while (error == 0) {
4792		/* Stop when we encounter a different file. */
4793		if ((extkeyptr->fileID != fileID) ||
4794		    (extkeyptr->forkType != forktype)) {
4795			break;
4796		}
4797		/*
4798		 * Check if the file overlaps target space.
4799		 */
4800		for (i = 0; i < kHFSPlusExtentDensity; ++i) {
4801			if (extrec[i].blockCount == 0) {
4802				break;
4803			}
4804			block = extrec[i].startBlock + extrec[i].blockCount;
4805			if (block >= startblk) {
4806				overlapped = 1;
4807				break;
4808			}
4809		}
4810		/* Look for more records. */
4811		error = BTIterateRecord(fcb, kBTreeNextRecord, iterator, &btdata, NULL);
4812	}
4813
4814	kmem_free(kernel_map, (vm_offset_t)iterator, sizeof(*iterator));
4815	return (overlapped);
4816}
4817
4818
4819/*
4820 * Calculate the progress of a file system resize operation.
4821 */
4822__private_extern__
4823int
4824hfs_resize_progress(struct hfsmount *hfsmp, u_int32_t *progress)
4825{
4826	if ((hfsmp->hfs_flags & HFS_RESIZE_IN_PROGRESS) == 0) {
4827		return (ENXIO);
4828	}
4829
4830	if (hfsmp->hfs_resize_totalfiles > 0)
4831		*progress = (hfsmp->hfs_resize_filesmoved * 100) / hfsmp->hfs_resize_totalfiles;
4832	else
4833		*progress = 0;
4834
4835	return (0);
4836}
4837
4838
4839/*
4840 * Get file system attributes.
4841 */
4842static int
4843hfs_vfs_getattr(struct mount *mp, struct vfs_attr *fsap, __unused vfs_context_t context)
4844{
4845#define HFS_ATTR_CMN_VALIDMASK (ATTR_CMN_VALIDMASK & ~(ATTR_CMN_NAMEDATTRCOUNT | ATTR_CMN_NAMEDATTRLIST))
4846#define HFS_ATTR_FILE_VALIDMASK (ATTR_FILE_VALIDMASK & ~(ATTR_FILE_FILETYPE | ATTR_FILE_FORKCOUNT | ATTR_FILE_FORKLIST))
4847
4848	ExtendedVCB *vcb = VFSTOVCB(mp);
4849	struct hfsmount *hfsmp = VFSTOHFS(mp);
4850	u_long freeCNIDs;
4851
4852	freeCNIDs = (u_long)0xFFFFFFFF - (u_long)hfsmp->vcbNxtCNID;
4853
4854	VFSATTR_RETURN(fsap, f_objcount, (u_int64_t)hfsmp->vcbFilCnt + (u_int64_t)hfsmp->vcbDirCnt);
4855	VFSATTR_RETURN(fsap, f_filecount, (u_int64_t)hfsmp->vcbFilCnt);
4856	VFSATTR_RETURN(fsap, f_dircount, (u_int64_t)hfsmp->vcbDirCnt);
4857	VFSATTR_RETURN(fsap, f_maxobjcount, (u_int64_t)0xFFFFFFFF);
4858	VFSATTR_RETURN(fsap, f_iosize, (size_t)cluster_max_io_size(mp, 0));
4859	VFSATTR_RETURN(fsap, f_blocks, (u_int64_t)hfsmp->totalBlocks);
4860	VFSATTR_RETURN(fsap, f_bfree, (u_int64_t)hfs_freeblks(hfsmp, 0));
4861	VFSATTR_RETURN(fsap, f_bavail, (u_int64_t)hfs_freeblks(hfsmp, 1));
4862	VFSATTR_RETURN(fsap, f_bsize, (u_int32_t)vcb->blockSize);
4863	/* XXX needs clarification */
4864	VFSATTR_RETURN(fsap, f_bused, hfsmp->totalBlocks - hfs_freeblks(hfsmp, 1));
4865	/* Maximum files is constrained by total blocks. */
4866	VFSATTR_RETURN(fsap, f_files, (u_int64_t)(hfsmp->totalBlocks - 2));
4867	VFSATTR_RETURN(fsap, f_ffree, MIN((u_int64_t)freeCNIDs, (u_int64_t)hfs_freeblks(hfsmp, 1)));
4868
4869	fsap->f_fsid.val[0] = hfsmp->hfs_raw_dev;
4870	fsap->f_fsid.val[1] = vfs_typenum(mp);
4871	VFSATTR_SET_SUPPORTED(fsap, f_fsid);
4872
4873	VFSATTR_RETURN(fsap, f_signature, vcb->vcbSigWord);
4874	VFSATTR_RETURN(fsap, f_carbon_fsid, 0);
4875
4876	if (VFSATTR_IS_ACTIVE(fsap, f_capabilities)) {
4877		vol_capabilities_attr_t *cap;
4878
4879		cap = &fsap->f_capabilities;
4880
4881		if (hfsmp->hfs_flags & HFS_STANDARD) {
4882			cap->capabilities[VOL_CAPABILITIES_FORMAT] =
4883				VOL_CAP_FMT_PERSISTENTOBJECTIDS |
4884				VOL_CAP_FMT_CASE_PRESERVING |
4885				VOL_CAP_FMT_FAST_STATFS |
4886				VOL_CAP_FMT_HIDDEN_FILES |
4887				VOL_CAP_FMT_PATH_FROM_ID;
4888		} else {
4889			cap->capabilities[VOL_CAPABILITIES_FORMAT] =
4890				VOL_CAP_FMT_PERSISTENTOBJECTIDS |
4891				VOL_CAP_FMT_SYMBOLICLINKS |
4892				VOL_CAP_FMT_HARDLINKS |
4893				VOL_CAP_FMT_JOURNAL |
4894				VOL_CAP_FMT_ZERO_RUNS |
4895				(hfsmp->jnl ? VOL_CAP_FMT_JOURNAL_ACTIVE : 0) |
4896				(hfsmp->hfs_flags & HFS_CASE_SENSITIVE ? VOL_CAP_FMT_CASE_SENSITIVE : 0) |
4897				VOL_CAP_FMT_CASE_PRESERVING |
4898				VOL_CAP_FMT_FAST_STATFS |
4899				VOL_CAP_FMT_2TB_FILESIZE |
4900				VOL_CAP_FMT_HIDDEN_FILES |
4901				VOL_CAP_FMT_PATH_FROM_ID;
4902		}
4903		cap->capabilities[VOL_CAPABILITIES_INTERFACES] =
4904			VOL_CAP_INT_SEARCHFS |
4905			VOL_CAP_INT_ATTRLIST |
4906			VOL_CAP_INT_NFSEXPORT |
4907			VOL_CAP_INT_READDIRATTR |
4908			VOL_CAP_INT_EXCHANGEDATA |
4909			VOL_CAP_INT_ALLOCATE |
4910			VOL_CAP_INT_VOL_RENAME |
4911			VOL_CAP_INT_ADVLOCK |
4912			VOL_CAP_INT_FLOCK |
4913#if NAMEDSTREAMS
4914			VOL_CAP_INT_EXTENDED_ATTR |
4915			VOL_CAP_INT_NAMEDSTREAMS;
4916#else
4917			VOL_CAP_INT_EXTENDED_ATTR;
4918#endif
4919		cap->capabilities[VOL_CAPABILITIES_RESERVED1] = 0;
4920		cap->capabilities[VOL_CAPABILITIES_RESERVED2] = 0;
4921
4922		cap->valid[VOL_CAPABILITIES_FORMAT] =
4923			VOL_CAP_FMT_PERSISTENTOBJECTIDS |
4924			VOL_CAP_FMT_SYMBOLICLINKS |
4925			VOL_CAP_FMT_HARDLINKS |
4926			VOL_CAP_FMT_JOURNAL |
4927			VOL_CAP_FMT_JOURNAL_ACTIVE |
4928			VOL_CAP_FMT_NO_ROOT_TIMES |
4929			VOL_CAP_FMT_SPARSE_FILES |
4930			VOL_CAP_FMT_ZERO_RUNS |
4931			VOL_CAP_FMT_CASE_SENSITIVE |
4932			VOL_CAP_FMT_CASE_PRESERVING |
4933			VOL_CAP_FMT_FAST_STATFS |
4934			VOL_CAP_FMT_2TB_FILESIZE |
4935			VOL_CAP_FMT_OPENDENYMODES |
4936			VOL_CAP_FMT_HIDDEN_FILES |
4937			VOL_CAP_FMT_PATH_FROM_ID;
4938		cap->valid[VOL_CAPABILITIES_INTERFACES] =
4939			VOL_CAP_INT_SEARCHFS |
4940			VOL_CAP_INT_ATTRLIST |
4941			VOL_CAP_INT_NFSEXPORT |
4942			VOL_CAP_INT_READDIRATTR |
4943			VOL_CAP_INT_EXCHANGEDATA |
4944			VOL_CAP_INT_COPYFILE |
4945			VOL_CAP_INT_ALLOCATE |
4946			VOL_CAP_INT_VOL_RENAME |
4947			VOL_CAP_INT_ADVLOCK |
4948			VOL_CAP_INT_FLOCK |
4949			VOL_CAP_INT_MANLOCK |
4950#if NAMEDSTREAMS
4951			VOL_CAP_INT_EXTENDED_ATTR |
4952			VOL_CAP_INT_NAMEDSTREAMS;
4953#else
4954			VOL_CAP_INT_EXTENDED_ATTR;
4955#endif
4956		cap->valid[VOL_CAPABILITIES_RESERVED1] = 0;
4957		cap->valid[VOL_CAPABILITIES_RESERVED2] = 0;
4958		VFSATTR_SET_SUPPORTED(fsap, f_capabilities);
4959	}
4960	if (VFSATTR_IS_ACTIVE(fsap, f_attributes)) {
4961		vol_attributes_attr_t *attrp = &fsap->f_attributes;
4962
4963        	attrp->validattr.commonattr = HFS_ATTR_CMN_VALIDMASK;
4964        	attrp->validattr.volattr = ATTR_VOL_VALIDMASK & ~ATTR_VOL_INFO;
4965        	attrp->validattr.dirattr = ATTR_DIR_VALIDMASK;
4966        	attrp->validattr.fileattr = HFS_ATTR_FILE_VALIDMASK;
4967        	attrp->validattr.forkattr = 0;
4968
4969        	attrp->nativeattr.commonattr = HFS_ATTR_CMN_VALIDMASK;
4970        	attrp->nativeattr.volattr = ATTR_VOL_VALIDMASK & ~ATTR_VOL_INFO;
4971        	attrp->nativeattr.dirattr = ATTR_DIR_VALIDMASK;
4972        	attrp->nativeattr.fileattr = HFS_ATTR_FILE_VALIDMASK;
4973        	attrp->nativeattr.forkattr = 0;
4974		VFSATTR_SET_SUPPORTED(fsap, f_attributes);
4975	}
4976	fsap->f_create_time.tv_sec = hfsmp->vcbCrDate;
4977	fsap->f_create_time.tv_nsec = 0;
4978	VFSATTR_SET_SUPPORTED(fsap, f_create_time);
4979	fsap->f_modify_time.tv_sec = hfsmp->vcbLsMod;
4980	fsap->f_modify_time.tv_nsec = 0;
4981	VFSATTR_SET_SUPPORTED(fsap, f_modify_time);
4982
4983	fsap->f_backup_time.tv_sec = hfsmp->vcbVolBkUp;
4984	fsap->f_backup_time.tv_nsec = 0;
4985	VFSATTR_SET_SUPPORTED(fsap, f_backup_time);
4986	if (VFSATTR_IS_ACTIVE(fsap, f_fssubtype)) {
4987		u_int16_t subtype = 0;
4988
4989		/*
4990		 * Subtypes (flavors) for HFS
4991		 *   0:   Mac OS Extended
4992		 *   1:   Mac OS Extended (Journaled)
4993		 *   2:   Mac OS Extended (Case Sensitive)
4994		 *   3:   Mac OS Extended (Case Sensitive, Journaled)
4995		 *   4 - 127:   Reserved
4996		 * 128:   Mac OS Standard
4997		 *
4998		 */
4999		if (hfsmp->hfs_flags & HFS_STANDARD) {
5000			subtype = HFS_SUBTYPE_STANDARDHFS;
5001		} else /* HFS Plus */ {
5002			if (hfsmp->jnl)
5003				subtype |= HFS_SUBTYPE_JOURNALED;
5004			if (hfsmp->hfs_flags & HFS_CASE_SENSITIVE)
5005				subtype |= HFS_SUBTYPE_CASESENSITIVE;
5006		}
5007		fsap->f_fssubtype = subtype;
5008		VFSATTR_SET_SUPPORTED(fsap, f_fssubtype);
5009	}
5010
5011	if (VFSATTR_IS_ACTIVE(fsap, f_vol_name)) {
5012		strlcpy(fsap->f_vol_name, (char *) hfsmp->vcbVN, MAXPATHLEN);
5013		VFSATTR_SET_SUPPORTED(fsap, f_vol_name);
5014	}
5015	return (0);
5016}
5017
5018/*
5019 * Perform a volume rename.  Requires the FS' root vp.
5020 */
5021static int
5022hfs_rename_volume(struct vnode *vp, const char *name, proc_t p)
5023{
5024	ExtendedVCB *vcb = VTOVCB(vp);
5025	struct cnode *cp = VTOC(vp);
5026	struct hfsmount *hfsmp = VTOHFS(vp);
5027	struct cat_desc to_desc;
5028	struct cat_desc todir_desc;
5029	struct cat_desc new_desc;
5030	cat_cookie_t cookie;
5031	int lockflags;
5032	int error = 0;
5033
5034	/*
5035	 * Ignore attempts to rename a volume to a zero-length name.
5036	 */
5037	if (name[0] == 0)
5038		return(0);
5039
5040	bzero(&to_desc, sizeof(to_desc));
5041	bzero(&todir_desc, sizeof(todir_desc));
5042	bzero(&new_desc, sizeof(new_desc));
5043	bzero(&cookie, sizeof(cookie));
5044
5045	todir_desc.cd_parentcnid = kHFSRootParentID;
5046	todir_desc.cd_cnid = kHFSRootFolderID;
5047	todir_desc.cd_flags = CD_ISDIR;
5048
5049	to_desc.cd_nameptr = (const u_int8_t *)name;
5050	to_desc.cd_namelen = strlen(name);
5051	to_desc.cd_parentcnid = kHFSRootParentID;
5052	to_desc.cd_cnid = cp->c_cnid;
5053	to_desc.cd_flags = CD_ISDIR;
5054
5055	if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK)) == 0) {
5056		if ((error = hfs_start_transaction(hfsmp)) == 0) {
5057			if ((error = cat_preflight(hfsmp, CAT_RENAME, &cookie, p)) == 0) {
5058				lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
5059
5060				error = cat_rename(hfsmp, &cp->c_desc, &todir_desc, &to_desc, &new_desc);
5061
5062				/*
5063				 * If successful, update the name in the VCB, ensure it's terminated.
5064				 */
5065				if (!error) {
5066					strlcpy((char *)vcb->vcbVN, name, sizeof(vcb->vcbVN));
5067				}
5068
5069				hfs_systemfile_unlock(hfsmp, lockflags);
5070				cat_postflight(hfsmp, &cookie, p);
5071
5072				if (error)
5073					MarkVCBDirty(vcb);
5074				(void) hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0);
5075			}
5076			hfs_end_transaction(hfsmp);
5077		}
5078		if (!error) {
5079			/* Release old allocated name buffer */
5080			if (cp->c_desc.cd_flags & CD_HASBUF) {
5081				const char *tmp_name = (const char *)cp->c_desc.cd_nameptr;
5082
5083				cp->c_desc.cd_nameptr = 0;
5084				cp->c_desc.cd_namelen = 0;
5085				cp->c_desc.cd_flags &= ~CD_HASBUF;
5086				vfs_removename(tmp_name);
5087			}
5088			/* Update cnode's catalog descriptor */
5089			replace_desc(cp, &new_desc);
5090			vcb->volumeNameEncodingHint = new_desc.cd_encoding;
5091			cp->c_touch_chgtime = TRUE;
5092		}
5093
5094		hfs_unlock(cp);
5095	}
5096
5097	return(error);
5098}
5099
5100/*
5101 * Get file system attributes.
5102 */
5103static int
5104hfs_vfs_setattr(struct mount *mp, struct vfs_attr *fsap, __unused vfs_context_t context)
5105{
5106	kauth_cred_t cred = vfs_context_ucred(context);
5107	int error = 0;
5108
5109	/*
5110	 * Must be superuser or owner of filesystem to change volume attributes
5111	 */
5112	if (!kauth_cred_issuser(cred) && (kauth_cred_getuid(cred) != vfs_statfs(mp)->f_owner))
5113		return(EACCES);
5114
5115	if (VFSATTR_IS_ACTIVE(fsap, f_vol_name)) {
5116		vnode_t root_vp;
5117
5118		error = hfs_vfs_root(mp, &root_vp, context);
5119		if (error)
5120			goto out;
5121
5122		error = hfs_rename_volume(root_vp, fsap->f_vol_name, vfs_context_proc(context));
5123		(void) vnode_put(root_vp);
5124		if (error)
5125			goto out;
5126
5127		VFSATTR_SET_SUPPORTED(fsap, f_vol_name);
5128	}
5129
5130out:
5131	return error;
5132}
5133
5134/* If a runtime corruption is detected, set the volume inconsistent
5135 * bit in the volume attributes.  The volume inconsistent bit is a persistent
5136 * bit which represents that the volume is corrupt and needs repair.
5137 * The volume inconsistent bit can be set from the kernel when it detects
5138 * runtime corruption or from file system repair utilities like fsck_hfs when
5139 * a repair operation fails.  The bit should be cleared only from file system
5140 * verify/repair utility like fsck_hfs when a verify/repair succeeds.
5141 */
5142void hfs_mark_volume_inconsistent(struct hfsmount *hfsmp)
5143{
5144	HFS_MOUNT_LOCK(hfsmp, TRUE);
5145	if ((hfsmp->vcbAtrb & kHFSVolumeInconsistentMask) == 0) {
5146		hfsmp->vcbAtrb |= kHFSVolumeInconsistentMask;
5147		MarkVCBDirty(hfsmp);
5148	}
5149	/* Log information to ASL log */
5150	fslog_fs_corrupt(hfsmp->hfs_mp);
5151	printf("HFS: Runtime corruption detected on %s, fsck will be forced on next mount.\n", hfsmp->vcbVN);
5152	HFS_MOUNT_UNLOCK(hfsmp, TRUE);
5153}
5154
5155/* Replay the journal on the device node provided.  Returns zero if
5156 * journal replay succeeded or no journal was supposed to be replayed.
5157 */
5158static int hfs_journal_replay(const char *devnode, vfs_context_t context)
5159{
5160	int retval = 0;
5161	struct vnode *devvp = NULL;
5162	struct mount *mp = NULL;
5163	struct hfs_mount_args *args = NULL;
5164
5165	/* Lookup vnode for given raw device path */
5166	retval = vnode_open(devnode, FREAD|FWRITE, 0, 0, &devvp, NULL);
5167	if (retval) {
5168		goto out;
5169	}
5170
5171	/* Replay allowed only on raw devices */
5172	if (!vnode_ischr(devvp)) {
5173		retval = EINVAL;
5174		goto out;
5175	}
5176
5177	/* Create dummy mount structures */
5178	MALLOC(mp, struct mount *, sizeof(struct mount), M_TEMP, M_WAITOK);
5179	bzero(mp, sizeof(struct mount));
5180	mount_lock_init(mp);
5181
5182	MALLOC(args, struct hfs_mount_args *, sizeof(struct hfs_mount_args), M_TEMP, M_WAITOK);
5183	bzero(args, sizeof(struct hfs_mount_args));
5184
5185	retval = hfs_mountfs(devvp, mp, args, 1, context);
5186	buf_flushdirtyblks(devvp, MNT_WAIT, 0, "hfs_journal_replay");
5187
5188out:
5189	if (mp) {
5190		mount_lock_destroy(mp);
5191		FREE(mp, M_TEMP);
5192	}
5193	if (args) {
5194		FREE(args, M_TEMP);
5195	}
5196	if (devvp) {
5197		vnode_close(devvp, FREAD|FWRITE, NULL);
5198	}
5199	return retval;
5200}
5201
5202/*
5203 * hfs vfs operations.
5204 */
5205struct vfsops hfs_vfsops = {
5206	hfs_mount,
5207	hfs_start,
5208	hfs_unmount,
5209	hfs_vfs_root,
5210	hfs_quotactl,
5211	hfs_vfs_getattr, 	/* was hfs_statfs */
5212	hfs_sync,
5213	hfs_vfs_vget,
5214	hfs_fhtovp,
5215	hfs_vptofh,
5216	hfs_init,
5217	hfs_sysctl,
5218	hfs_vfs_setattr,
5219	{NULL}
5220};
5221