1/*
2 * Copyright (c) 1999-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28/*
29 * Copyright (c) 1991, 1993, 1994
30 *	The Regents of the University of California.  All rights reserved.
31 * (c) UNIX System Laboratories, Inc.
32 * All or some portions of this file are derived from material licensed
33 * to the University of California by American Telephone and Telegraph
34 * Co. or Unix System Laboratories, Inc. and are reproduced herein with
35 * the permission of UNIX System Laboratories, Inc.
36 *
37 * Redistribution and use in source and binary forms, with or without
38 * modification, are permitted provided that the following conditions
39 * are met:
40 * 1. Redistributions of source code must retain the above copyright
41 *    notice, this list of conditions and the following disclaimer.
42 * 2. Redistributions in binary form must reproduce the above copyright
43 *    notice, this list of conditions and the following disclaimer in the
44 *    documentation and/or other materials provided with the distribution.
45 * 3. All advertising materials mentioning features or use of this software
46 *    must display the following acknowledgement:
47 *	This product includes software developed by the University of
48 *	California, Berkeley and its contributors.
49 * 4. Neither the name of the University nor the names of its contributors
50 *    may be used to endorse or promote products derived from this software
51 *    without specific prior written permission.
52 *
53 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
54 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
55 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
56 * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
57 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
58 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
59 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
60 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
61 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
62 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
63 * SUCH DAMAGE.
64 *
65 *      hfs_vfsops.c
66 *  derived from	@(#)ufs_vfsops.c	8.8 (Berkeley) 5/20/95
67 *
68 *      (c) Copyright 1997-2002 Apple Computer, Inc. All rights reserved.
69 *
70 *      hfs_vfsops.c -- VFS layer for loadable HFS file system.
71 *
72 */
73#include <sys/param.h>
74#include <sys/systm.h>
75#include <sys/kauth.h>
76
77#include <sys/ubc.h>
78#include <sys/ubc_internal.h>
79#include <sys/vnode_internal.h>
80#include <sys/mount_internal.h>
81#include <sys/sysctl.h>
82#include <sys/malloc.h>
83#include <sys/stat.h>
84#include <sys/quota.h>
85#include <sys/disk.h>
86#include <sys/paths.h>
87#include <sys/utfconv.h>
88#include <sys/kdebug.h>
89#include <sys/fslog.h>
90#include <sys/ubc.h>
91#include <sys/buf_internal.h>
92
93/* for parsing boot-args */
94#include <pexpert/pexpert.h>
95
96
97#include <kern/locks.h>
98
99#include <vfs/vfs_journal.h>
100
101#include <miscfs/specfs/specdev.h>
102#include <hfs/hfs_mount.h>
103
104#include <libkern/crypto/md5.h>
105#include <uuid/uuid.h>
106
107#include "hfs.h"
108#include "hfs_catalog.h"
109#include "hfs_cnode.h"
110#include "hfs_dbg.h"
111#include "hfs_endian.h"
112#include "hfs_hotfiles.h"
113#include "hfs_quota.h"
114#include "hfs_btreeio.h"
115#include "hfs_kdebug.h"
116
117#include "hfscommon/headers/FileMgrInternal.h"
118#include "hfscommon/headers/BTreesInternal.h"
119
120#if CONFIG_PROTECT
121#include <sys/cprotect.h>
122#endif
123
124#define HFS_MOUNT_DEBUG 1
125
126#if	HFS_DIAGNOSTIC
127int hfs_dbg_all = 0;
128int hfs_dbg_err = 0;
129#endif
130
131/* Enable/disable debugging code for live volume resizing, defined in hfs_resize.c */
132extern int hfs_resize_debug;
133
134lck_grp_attr_t *  hfs_group_attr;
135lck_attr_t *  hfs_lock_attr;
136lck_grp_t *  hfs_mutex_group;
137lck_grp_t *  hfs_rwlock_group;
138lck_grp_t *  hfs_spinlock_group;
139
140extern struct vnodeopv_desc hfs_vnodeop_opv_desc;
141
142#if CONFIG_HFS_STD
143extern struct vnodeopv_desc hfs_std_vnodeop_opv_desc;
144static int hfs_flushMDB(struct hfsmount *hfsmp, int waitfor, int altflush);
145#endif
146
147/* not static so we can re-use in hfs_readwrite.c for build_path calls */
148int hfs_vfs_vget(struct mount *mp, ino64_t ino, struct vnode **vpp, vfs_context_t context);
149
150static int hfs_changefs(struct mount *mp, struct hfs_mount_args *args);
151static int hfs_fhtovp(struct mount *mp, int fhlen, unsigned char *fhp, struct vnode **vpp, vfs_context_t context);
152static int hfs_flushfiles(struct mount *, int, struct proc *);
153static int hfs_getmountpoint(struct vnode *vp, struct hfsmount **hfsmpp);
154static int hfs_init(struct vfsconf *vfsp);
155static void hfs_locks_destroy(struct hfsmount *hfsmp);
156static int hfs_vfs_root(struct mount *mp, struct vnode **vpp, vfs_context_t context);
157static int hfs_quotactl(struct mount *, int, uid_t, caddr_t, vfs_context_t context);
158static int hfs_start(struct mount *mp, int flags, vfs_context_t context);
159static int hfs_vptofh(struct vnode *vp, int *fhlenp, unsigned char *fhp, vfs_context_t context);
160static int hfs_journal_replay(vnode_t devvp, vfs_context_t context);
161static void hfs_syncer_free(struct hfsmount *hfsmp);
162
163void hfs_initialize_allocator (struct hfsmount *hfsmp);
164int hfs_teardown_allocator (struct hfsmount *hfsmp);
165
166int hfs_mount(struct mount *mp, vnode_t  devvp, user_addr_t data, vfs_context_t context);
167int hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args, int journal_replay_only, vfs_context_t context);
168int hfs_reload(struct mount *mp);
169int hfs_statfs(struct mount *mp, register struct vfsstatfs *sbp, vfs_context_t context);
170int hfs_sync(struct mount *mp, int waitfor, vfs_context_t context);
171int hfs_sysctl(int *name, u_int namelen, user_addr_t oldp, size_t *oldlenp,
172                      user_addr_t newp, size_t newlen, vfs_context_t context);
173int hfs_unmount(struct mount *mp, int mntflags, vfs_context_t context);
174
175/*
176 * Called by vfs_mountroot when mounting HFS Plus as root.
177 */
178
179int
180hfs_mountroot(mount_t mp, vnode_t rvp, vfs_context_t context)
181{
182	struct hfsmount *hfsmp;
183	ExtendedVCB *vcb;
184	struct vfsstatfs *vfsp;
185	int error;
186
187	if ((error = hfs_mountfs(rvp, mp, NULL, 0, context))) {
188		if (HFS_MOUNT_DEBUG) {
189			printf("hfs_mountroot: hfs_mountfs returned %d, rvp (%p) name (%s) \n",
190					error, rvp, (rvp->v_name ? rvp->v_name : "unknown device"));
191		}
192		return (error);
193	}
194
195	/* Init hfsmp */
196	hfsmp = VFSTOHFS(mp);
197
198	hfsmp->hfs_uid = UNKNOWNUID;
199	hfsmp->hfs_gid = UNKNOWNGID;
200	hfsmp->hfs_dir_mask = (S_IRWXU | S_IRGRP|S_IXGRP | S_IROTH|S_IXOTH); /* 0755 */
201	hfsmp->hfs_file_mask = (S_IRWXU | S_IRGRP|S_IXGRP | S_IROTH|S_IXOTH); /* 0755 */
202
203	/* Establish the free block reserve. */
204	vcb = HFSTOVCB(hfsmp);
205	vcb->reserveBlocks = ((u_int64_t)vcb->totalBlocks * HFS_MINFREE) / 100;
206	vcb->reserveBlocks = MIN(vcb->reserveBlocks, HFS_MAXRESERVE / vcb->blockSize);
207
208	vfsp = vfs_statfs(mp);
209	(void)hfs_statfs(mp, vfsp, NULL);
210
211	/* Invoke ioctl that asks if the underlying device is Core Storage or not */
212	error = VNOP_IOCTL(rvp, _DKIOCCORESTORAGE, NULL, 0, context);
213	if (error == 0) {
214		hfsmp->hfs_flags |= HFS_CS;
215	}
216	return (0);
217}
218
219
220/*
221 * VFS Operations.
222 *
223 * mount system call
224 */
225
226int
227hfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data, vfs_context_t context)
228{
229	struct proc *p = vfs_context_proc(context);
230	struct hfsmount *hfsmp = NULL;
231	struct hfs_mount_args args;
232	int retval = E_NONE;
233	u_int32_t cmdflags;
234
235	if ((retval = copyin(data, (caddr_t)&args, sizeof(args)))) {
236		if (HFS_MOUNT_DEBUG) {
237			printf("hfs_mount: copyin returned %d for fs\n", retval);
238		}
239		return (retval);
240	}
241	cmdflags = (u_int32_t)vfs_flags(mp) & MNT_CMDFLAGS;
242	if (cmdflags & MNT_UPDATE) {
243		hfsmp = VFSTOHFS(mp);
244
245		/* Reload incore data after an fsck. */
246		if (cmdflags & MNT_RELOAD) {
247			if (vfs_isrdonly(mp)) {
248				int error = hfs_reload(mp);
249				if (error && HFS_MOUNT_DEBUG) {
250					printf("hfs_mount: hfs_reload returned %d on %s \n", error, hfsmp->vcbVN);
251				}
252				return error;
253			}
254			else {
255				if (HFS_MOUNT_DEBUG) {
256					printf("hfs_mount: MNT_RELOAD not supported on rdwr filesystem %s\n", hfsmp->vcbVN);
257				}
258				return (EINVAL);
259			}
260		}
261
262		/* Change to a read-only file system. */
263		if (((hfsmp->hfs_flags & HFS_READ_ONLY) == 0) &&
264		    vfs_isrdonly(mp)) {
265			int flags;
266
267			/* Set flag to indicate that a downgrade to read-only
268			 * is in progress and therefore block any further
269			 * modifications to the file system.
270			 */
271			hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK);
272			hfsmp->hfs_flags |= HFS_RDONLY_DOWNGRADE;
273			hfsmp->hfs_downgrading_thread = current_thread();
274			hfs_unlock_global (hfsmp);
275			hfs_syncer_free(hfsmp);
276
277			/* use VFS_SYNC to push out System (btree) files */
278			retval = VFS_SYNC(mp, MNT_WAIT, context);
279			if (retval && ((cmdflags & MNT_FORCE) == 0)) {
280				hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE;
281				hfsmp->hfs_downgrading_thread = NULL;
282				if (HFS_MOUNT_DEBUG) {
283					printf("hfs_mount: VFS_SYNC returned %d during b-tree sync of %s \n", retval, hfsmp->vcbVN);
284				}
285				goto out;
286			}
287
288			flags = WRITECLOSE;
289			if (cmdflags & MNT_FORCE)
290				flags |= FORCECLOSE;
291
292			if ((retval = hfs_flushfiles(mp, flags, p))) {
293				hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE;
294				hfsmp->hfs_downgrading_thread = NULL;
295				if (HFS_MOUNT_DEBUG) {
296					printf("hfs_mount: hfs_flushfiles returned %d on %s \n", retval, hfsmp->vcbVN);
297				}
298				goto out;
299			}
300
301			/* mark the volume cleanly unmounted */
302			hfsmp->vcbAtrb |= kHFSVolumeUnmountedMask;
303			retval = hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0);
304			hfsmp->hfs_flags |= HFS_READ_ONLY;
305
306			/*
307			 * Close down the journal.
308			 *
309			 * NOTE: It is critically important to close down the journal
310			 * and have it issue all pending I/O prior to calling VNOP_FSYNC below.
311			 * In a journaled environment it is expected that the journal be
312			 * the only actor permitted to issue I/O for metadata blocks in HFS.
313			 * If we were to call VNOP_FSYNC prior to closing down the journal,
314			 * we would inadvertantly issue (and wait for) the I/O we just
315			 * initiated above as part of the flushvolumeheader call.
316			 *
317			 * To avoid this, we follow the same order of operations as in
318			 * unmount and issue the journal_close prior to calling VNOP_FSYNC.
319			 */
320
321			if (hfsmp->jnl) {
322				hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK);
323
324			    journal_close(hfsmp->jnl);
325			    hfsmp->jnl = NULL;
326
327			    // Note: we explicitly don't want to shutdown
328			    //       access to the jvp because we may need
329			    //       it later if we go back to being read-write.
330
331				hfs_unlock_global (hfsmp);
332
333                vfs_clearflags(hfsmp->hfs_mp, MNT_JOURNALED);
334			}
335
336			/*
337			 * Write out any pending I/O still outstanding against the device node
338			 * now that the journal has been closed.
339			 */
340			if (retval == 0) {
341				vnode_get(hfsmp->hfs_devvp);
342				retval = VNOP_FSYNC(hfsmp->hfs_devvp, MNT_WAIT, context);
343				vnode_put(hfsmp->hfs_devvp);
344			}
345
346			if (retval) {
347				if (HFS_MOUNT_DEBUG) {
348					printf("hfs_mount: FSYNC on devvp returned %d for fs %s\n", retval, hfsmp->vcbVN);
349				}
350				hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE;
351				hfsmp->hfs_downgrading_thread = NULL;
352				hfsmp->hfs_flags &= ~HFS_READ_ONLY;
353				goto out;
354			}
355
356			if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) {
357				if (hfsmp->hfs_summary_table) {
358					int err = 0;
359					/*
360					 * Take the bitmap lock to serialize against a concurrent bitmap scan still in progress
361					 */
362					if (hfsmp->hfs_allocation_vp) {
363						err = hfs_lock (VTOC(hfsmp->hfs_allocation_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
364					}
365					FREE (hfsmp->hfs_summary_table, M_TEMP);
366					hfsmp->hfs_summary_table = NULL;
367					hfsmp->hfs_flags &= ~HFS_SUMMARY_TABLE;
368					if (err == 0 && hfsmp->hfs_allocation_vp){
369						hfs_unlock (VTOC(hfsmp->hfs_allocation_vp));
370					}
371				}
372			}
373
374			hfsmp->hfs_downgrading_thread = NULL;
375		}
376
377		/* Change to a writable file system. */
378		if (vfs_iswriteupgrade(mp)) {
379			/*
380			 * On inconsistent disks, do not allow read-write mount
381			 * unless it is the boot volume being mounted.
382			 */
383			if (!(vfs_flags(mp) & MNT_ROOTFS) &&
384					(hfsmp->vcbAtrb & kHFSVolumeInconsistentMask)) {
385				if (HFS_MOUNT_DEBUG) {
386					printf("hfs_mount: attempting to mount inconsistent non-root volume %s\n",  (hfsmp->vcbVN));
387				}
388				retval = EINVAL;
389				goto out;
390			}
391
392			// If the journal was shut-down previously because we were
393			// asked to be read-only, let's start it back up again now
394
395			if (   (HFSTOVCB(hfsmp)->vcbAtrb & kHFSVolumeJournaledMask)
396			    && hfsmp->jnl == NULL
397			    && hfsmp->jvp != NULL) {
398			    int jflags;
399
400			    if (hfsmp->hfs_flags & HFS_NEED_JNL_RESET) {
401					jflags = JOURNAL_RESET;
402				} else {
403					jflags = 0;
404				}
405
406				hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK);
407
408				/* We provide the mount point twice here: The first is used as
409				 * an opaque argument to be passed back when hfs_sync_metadata
410				 * is called.  The second is provided to the throttling code to
411				 * indicate which mount's device should be used when accounting
412				 * for metadata writes.
413				 */
414				hfsmp->jnl = journal_open(hfsmp->jvp,
415						(hfsmp->jnl_start * HFSTOVCB(hfsmp)->blockSize) + (off_t)HFSTOVCB(hfsmp)->hfsPlusIOPosOffset,
416						hfsmp->jnl_size,
417						hfsmp->hfs_devvp,
418						hfsmp->hfs_logical_block_size,
419						jflags,
420						0,
421						hfs_sync_metadata, hfsmp->hfs_mp,
422						hfsmp->hfs_mp);
423
424				/*
425				 * Set up the trim callback function so that we can add
426				 * recently freed extents to the free extent cache once
427				 * the transaction that freed them is written to the
428				 * journal on disk.
429				 */
430				if (hfsmp->jnl)
431					journal_trim_set_callback(hfsmp->jnl, hfs_trim_callback, hfsmp);
432
433				hfs_unlock_global (hfsmp);
434
435				if (hfsmp->jnl == NULL) {
436					if (HFS_MOUNT_DEBUG) {
437						printf("hfs_mount: journal_open == NULL; couldn't be opened on %s \n", (hfsmp->vcbVN));
438					}
439					retval = EINVAL;
440					goto out;
441				} else {
442					hfsmp->hfs_flags &= ~HFS_NEED_JNL_RESET;
443                    vfs_setflags(hfsmp->hfs_mp, MNT_JOURNALED);
444				}
445			}
446
447			/* See if we need to erase unused Catalog nodes due to <rdar://problem/6947811>. */
448			retval = hfs_erase_unused_nodes(hfsmp);
449			if (retval != E_NONE) {
450				if (HFS_MOUNT_DEBUG) {
451					printf("hfs_mount: hfs_erase_unused_nodes returned %d for fs %s\n", retval, hfsmp->vcbVN);
452				}
453				goto out;
454			}
455
456			/* If this mount point was downgraded from read-write
457			 * to read-only, clear that information as we are now
458			 * moving back to read-write.
459			 */
460			hfsmp->hfs_flags &= ~HFS_RDONLY_DOWNGRADE;
461			hfsmp->hfs_downgrading_thread = NULL;
462
463			/* mark the volume dirty (clear clean unmount bit) */
464			hfsmp->vcbAtrb &= ~kHFSVolumeUnmountedMask;
465
466			retval = hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0);
467			if (retval != E_NONE) {
468				if (HFS_MOUNT_DEBUG) {
469					printf("hfs_mount: hfs_flushvolumeheader returned %d for fs %s\n", retval, hfsmp->vcbVN);
470				}
471				goto out;
472			}
473
474			/* Only clear HFS_READ_ONLY after a successful write */
475			hfsmp->hfs_flags &= ~HFS_READ_ONLY;
476
477
478			if (!(hfsmp->hfs_flags & (HFS_READ_ONLY | HFS_STANDARD))) {
479				/* Setup private/hidden directories for hardlinks. */
480				hfs_privatedir_init(hfsmp, FILE_HARDLINKS);
481				hfs_privatedir_init(hfsmp, DIR_HARDLINKS);
482
483				hfs_remove_orphans(hfsmp);
484
485				/*
486				 * Allow hot file clustering if conditions allow.
487				 */
488				if ((hfsmp->hfs_flags & HFS_METADATA_ZONE) &&
489					   ((hfsmp->hfs_mp->mnt_kern_flag & MNTK_SSD) == 0))	{
490					(void) hfs_recording_init(hfsmp);
491				}
492				/* Force ACLs on HFS+ file systems. */
493				if (vfs_extendedsecurity(HFSTOVFS(hfsmp)) == 0) {
494					vfs_setextendedsecurity(HFSTOVFS(hfsmp));
495				}
496			}
497		}
498
499		/* Update file system parameters. */
500		retval = hfs_changefs(mp, &args);
501		if (retval &&  HFS_MOUNT_DEBUG) {
502			printf("hfs_mount: hfs_changefs returned %d for %s\n", retval, hfsmp->vcbVN);
503		}
504
505	} else /* not an update request */ {
506
507		/* Set the mount flag to indicate that we support volfs  */
508		vfs_setflags(mp, (u_int64_t)((unsigned int)MNT_DOVOLFS));
509
510		retval = hfs_mountfs(devvp, mp, &args, 0, context);
511		if (retval) {
512			const char *name = vnode_getname(devvp);
513			printf("hfs_mount: hfs_mountfs returned error=%d for device %s\n", retval, (name ? name : "unknown-dev"));
514			if (name) {
515				vnode_putname(name);
516			}
517			goto out;
518		}
519
520		/* After hfs_mountfs succeeds, we should have valid hfsmp */
521		hfsmp = VFSTOHFS(mp);
522
523		/*
524		 * Check to see if the file system exists on CoreStorage.
525		 *
526		 * This must be done after examining the root folder's CP EA since
527		 * hfs_vfs_root will create a vnode (which must not occur until after
528		 * we've established the CP level of the FS).
529		 */
530		if (retval == 0) {
531			errno_t err;
532			/* Invoke ioctl that asks if the underlying device is Core Storage or not */
533			err = VNOP_IOCTL(devvp, _DKIOCCORESTORAGE, NULL, 0, context);
534			if (err == 0) {
535				hfsmp->hfs_flags |= HFS_CS;
536			}
537		}
538	}
539
540out:
541	if (retval == 0) {
542		(void)hfs_statfs(mp, vfs_statfs(mp), context);
543	}
544	return (retval);
545}
546
547
548struct hfs_changefs_cargs {
549	struct hfsmount *hfsmp;
550        int		namefix;
551        int		permfix;
552        int		permswitch;
553};
554
555static int
556hfs_changefs_callback(struct vnode *vp, void *cargs)
557{
558	ExtendedVCB *vcb;
559	struct cnode *cp;
560	struct cat_desc cndesc;
561	struct cat_attr cnattr;
562	struct hfs_changefs_cargs *args;
563	int lockflags;
564	int error;
565
566	args = (struct hfs_changefs_cargs *)cargs;
567
568	cp = VTOC(vp);
569	vcb = HFSTOVCB(args->hfsmp);
570
571	lockflags = hfs_systemfile_lock(args->hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
572	error = cat_lookup(args->hfsmp, &cp->c_desc, 0, 0, &cndesc, &cnattr, NULL, NULL);
573	hfs_systemfile_unlock(args->hfsmp, lockflags);
574	if (error) {
575	        /*
576		 * If we couldn't find this guy skip to the next one
577		 */
578	        if (args->namefix)
579		        cache_purge(vp);
580
581		return (VNODE_RETURNED);
582	}
583	/*
584	 * Get the real uid/gid and perm mask from disk.
585	 */
586	if (args->permswitch || args->permfix) {
587	        cp->c_uid = cnattr.ca_uid;
588		cp->c_gid = cnattr.ca_gid;
589		cp->c_mode = cnattr.ca_mode;
590	}
591	/*
592	 * If we're switching name converters then...
593	 *   Remove the existing entry from the namei cache.
594	 *   Update name to one based on new encoder.
595	 */
596	if (args->namefix) {
597	        cache_purge(vp);
598		replace_desc(cp, &cndesc);
599
600		if (cndesc.cd_cnid == kHFSRootFolderID) {
601		        strlcpy((char *)vcb->vcbVN, (const char *)cp->c_desc.cd_nameptr, NAME_MAX+1);
602			cp->c_desc.cd_encoding = args->hfsmp->hfs_encoding;
603		}
604	} else {
605	        cat_releasedesc(&cndesc);
606	}
607	return (VNODE_RETURNED);
608}
609
610/* Change fs mount parameters */
611static int
612hfs_changefs(struct mount *mp, struct hfs_mount_args *args)
613{
614	int retval = 0;
615	int namefix, permfix, permswitch;
616	struct hfsmount *hfsmp;
617	ExtendedVCB *vcb;
618	struct hfs_changefs_cargs cargs;
619	u_int32_t mount_flags;
620
621#if CONFIG_HFS_STD
622	u_int32_t old_encoding = 0;
623	hfs_to_unicode_func_t	get_unicode_func;
624	unicode_to_hfs_func_t	get_hfsname_func;
625#endif
626
627	hfsmp = VFSTOHFS(mp);
628	vcb = HFSTOVCB(hfsmp);
629	mount_flags = (unsigned int)vfs_flags(mp);
630
631	hfsmp->hfs_flags |= HFS_IN_CHANGEFS;
632
633	permswitch = (((hfsmp->hfs_flags & HFS_UNKNOWN_PERMS) &&
634	               ((mount_flags & MNT_UNKNOWNPERMISSIONS) == 0)) ||
635	              (((hfsmp->hfs_flags & HFS_UNKNOWN_PERMS) == 0) &&
636	               (mount_flags & MNT_UNKNOWNPERMISSIONS)));
637
638	/* The root filesystem must operate with actual permissions: */
639	if (permswitch && (mount_flags & MNT_ROOTFS) && (mount_flags & MNT_UNKNOWNPERMISSIONS)) {
640		vfs_clearflags(mp, (u_int64_t)((unsigned int)MNT_UNKNOWNPERMISSIONS));	/* Just say "No". */
641		retval = EINVAL;
642		goto exit;
643	}
644	if (mount_flags & MNT_UNKNOWNPERMISSIONS)
645		hfsmp->hfs_flags |= HFS_UNKNOWN_PERMS;
646	else
647		hfsmp->hfs_flags &= ~HFS_UNKNOWN_PERMS;
648
649	namefix = permfix = 0;
650
651	/*
652	 * Tracking of hot files requires up-to-date access times.  So if
653	 * access time updates are disabled, we must also disable hot files.
654	 */
655	if (mount_flags & MNT_NOATIME) {
656		(void) hfs_recording_suspend(hfsmp);
657	}
658
659	/* Change the timezone (Note: this affects all hfs volumes and hfs+ volume create dates) */
660	if (args->hfs_timezone.tz_minuteswest != VNOVAL) {
661		gTimeZone = args->hfs_timezone;
662	}
663
664	/* Change the default uid, gid and/or mask */
665	if ((args->hfs_uid != (uid_t)VNOVAL) && (hfsmp->hfs_uid != args->hfs_uid)) {
666		hfsmp->hfs_uid = args->hfs_uid;
667		if (vcb->vcbSigWord == kHFSPlusSigWord)
668			++permfix;
669	}
670	if ((args->hfs_gid != (gid_t)VNOVAL) && (hfsmp->hfs_gid != args->hfs_gid)) {
671		hfsmp->hfs_gid = args->hfs_gid;
672		if (vcb->vcbSigWord == kHFSPlusSigWord)
673			++permfix;
674	}
675	if (args->hfs_mask != (mode_t)VNOVAL) {
676		if (hfsmp->hfs_dir_mask != (args->hfs_mask & ALLPERMS)) {
677			hfsmp->hfs_dir_mask = args->hfs_mask & ALLPERMS;
678			hfsmp->hfs_file_mask = args->hfs_mask & ALLPERMS;
679			if ((args->flags != VNOVAL) && (args->flags & HFSFSMNT_NOXONFILES))
680				hfsmp->hfs_file_mask = (args->hfs_mask & DEFFILEMODE);
681			if (vcb->vcbSigWord == kHFSPlusSigWord)
682				++permfix;
683		}
684	}
685
686#if CONFIG_HFS_STD
687	/* Change the hfs encoding value (hfs only) */
688	if ((vcb->vcbSigWord == kHFSSigWord)	&&
689	    (args->hfs_encoding != (u_int32_t)VNOVAL)              &&
690	    (hfsmp->hfs_encoding != args->hfs_encoding)) {
691
692		retval = hfs_getconverter(args->hfs_encoding, &get_unicode_func, &get_hfsname_func);
693		if (retval)
694			goto exit;
695
696		/*
697		 * Connect the new hfs_get_unicode converter but leave
698		 * the old hfs_get_hfsname converter in place so that
699		 * we can lookup existing vnodes to get their correctly
700		 * encoded names.
701		 *
702		 * When we're all finished, we can then connect the new
703		 * hfs_get_hfsname converter and release our interest
704		 * in the old converters.
705		 */
706		hfsmp->hfs_get_unicode = get_unicode_func;
707		old_encoding = hfsmp->hfs_encoding;
708		hfsmp->hfs_encoding = args->hfs_encoding;
709		++namefix;
710	}
711#endif
712
713	if (!(namefix || permfix || permswitch))
714		goto exit;
715
716	/* XXX 3762912 hack to support HFS filesystem 'owner' */
717	if (permfix)
718		vfs_setowner(mp,
719		    hfsmp->hfs_uid == UNKNOWNUID ? KAUTH_UID_NONE : hfsmp->hfs_uid,
720		    hfsmp->hfs_gid == UNKNOWNGID ? KAUTH_GID_NONE : hfsmp->hfs_gid);
721
722	/*
723	 * For each active vnode fix things that changed
724	 *
725	 * Note that we can visit a vnode more than once
726	 * and we can race with fsync.
727	 *
728	 * hfs_changefs_callback will be called for each vnode
729	 * hung off of this mount point
730	 *
731	 * The vnode will be properly referenced and unreferenced
732	 * around the callback
733	 */
734	cargs.hfsmp = hfsmp;
735	cargs.namefix = namefix;
736	cargs.permfix = permfix;
737	cargs.permswitch = permswitch;
738
739	vnode_iterate(mp, 0, hfs_changefs_callback, (void *)&cargs);
740
741#if CONFIG_HFS_STD
742	/*
743	 * If we're switching name converters we can now
744	 * connect the new hfs_get_hfsname converter and
745	 * release our interest in the old converters.
746	 */
747	if (namefix) {
748		/* HFS standard only */
749		hfsmp->hfs_get_hfsname = get_hfsname_func;
750		vcb->volumeNameEncodingHint = args->hfs_encoding;
751		(void) hfs_relconverter(old_encoding);
752	}
753#endif
754
755exit:
756	hfsmp->hfs_flags &= ~HFS_IN_CHANGEFS;
757	return (retval);
758}
759
760
761struct hfs_reload_cargs {
762	struct hfsmount *hfsmp;
763        int		error;
764};
765
766static int
767hfs_reload_callback(struct vnode *vp, void *cargs)
768{
769	struct cnode *cp;
770	struct hfs_reload_cargs *args;
771	int lockflags;
772
773	args = (struct hfs_reload_cargs *)cargs;
774	/*
775	 * flush all the buffers associated with this node
776	 */
777	(void) buf_invalidateblks(vp, 0, 0, 0);
778
779	cp = VTOC(vp);
780	/*
781	 * Remove any directory hints
782	 */
783	if (vnode_isdir(vp))
784	        hfs_reldirhints(cp, 0);
785
786	/*
787	 * Re-read cnode data for all active vnodes (non-metadata files).
788	 */
789	if (!vnode_issystem(vp) && !VNODE_IS_RSRC(vp) && (cp->c_fileid >= kHFSFirstUserCatalogNodeID)) {
790	        struct cat_fork *datafork;
791		struct cat_desc desc;
792
793		datafork = cp->c_datafork ? &cp->c_datafork->ff_data : NULL;
794
795		/* lookup by fileID since name could have changed */
796		lockflags = hfs_systemfile_lock(args->hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
797		args->error = cat_idlookup(args->hfsmp, cp->c_fileid, 0, 0, &desc, &cp->c_attr, datafork);
798		hfs_systemfile_unlock(args->hfsmp, lockflags);
799		if (args->error) {
800		        return (VNODE_RETURNED_DONE);
801		}
802
803		/* update cnode's catalog descriptor */
804		(void) replace_desc(cp, &desc);
805	}
806	return (VNODE_RETURNED);
807}
808
809/*
810 * Reload all incore data for a filesystem (used after running fsck on
811 * the root filesystem and finding things to fix). The filesystem must
812 * be mounted read-only.
813 *
814 * Things to do to update the mount:
815 *	invalidate all cached meta-data.
816 *	invalidate all inactive vnodes.
817 *	invalidate all cached file data.
818 *	re-read volume header from disk.
819 *	re-load meta-file info (extents, file size).
820 *	re-load B-tree header data.
821 *	re-read cnode data for all active vnodes.
822 */
823int
824hfs_reload(struct mount *mountp)
825{
826	register struct vnode *devvp;
827	struct buf *bp;
828	int error, i;
829	struct hfsmount *hfsmp;
830	struct HFSPlusVolumeHeader *vhp;
831	ExtendedVCB *vcb;
832	struct filefork *forkp;
833    	struct cat_desc cndesc;
834	struct hfs_reload_cargs args;
835	daddr64_t priIDSector;
836
837    	hfsmp = VFSTOHFS(mountp);
838	vcb = HFSTOVCB(hfsmp);
839
840	if (vcb->vcbSigWord == kHFSSigWord)
841		return (EINVAL);	/* rooting from HFS is not supported! */
842
843	/*
844	 * Invalidate all cached meta-data.
845	 */
846	devvp = hfsmp->hfs_devvp;
847	if (buf_invalidateblks(devvp, 0, 0, 0))
848		panic("hfs_reload: dirty1");
849
850	args.hfsmp = hfsmp;
851	args.error = 0;
852	/*
853	 * hfs_reload_callback will be called for each vnode
854	 * hung off of this mount point that can't be recycled...
855	 * vnode_iterate will recycle those that it can (the VNODE_RELOAD option)
856	 * the vnode will be in an 'unbusy' state (VNODE_WAIT) and
857	 * properly referenced and unreferenced around the callback
858	 */
859	vnode_iterate(mountp, VNODE_RELOAD | VNODE_WAIT, hfs_reload_callback, (void *)&args);
860
861	if (args.error)
862	        return (args.error);
863
864	/*
865	 * Re-read VolumeHeader from disk.
866	 */
867	priIDSector = (daddr64_t)((vcb->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) +
868			HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size));
869
870	error = (int)buf_meta_bread(hfsmp->hfs_devvp,
871			HFS_PHYSBLK_ROUNDDOWN(priIDSector, hfsmp->hfs_log_per_phys),
872			hfsmp->hfs_physical_block_size, NOCRED, &bp);
873	if (error) {
874        	if (bp != NULL)
875        		buf_brelse(bp);
876		return (error);
877	}
878
879	vhp = (HFSPlusVolumeHeader *) (buf_dataptr(bp) + HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size));
880
881	/* Do a quick sanity check */
882	if ((SWAP_BE16(vhp->signature) != kHFSPlusSigWord &&
883	     SWAP_BE16(vhp->signature) != kHFSXSigWord) ||
884	    (SWAP_BE16(vhp->version) != kHFSPlusVersion &&
885	     SWAP_BE16(vhp->version) != kHFSXVersion) ||
886	    SWAP_BE32(vhp->blockSize) != vcb->blockSize) {
887		buf_brelse(bp);
888		return (EIO);
889	}
890
891	vcb->vcbLsMod		= to_bsd_time(SWAP_BE32(vhp->modifyDate));
892	vcb->vcbAtrb		= SWAP_BE32 (vhp->attributes);
893	vcb->vcbJinfoBlock  = SWAP_BE32(vhp->journalInfoBlock);
894	vcb->vcbClpSiz		= SWAP_BE32 (vhp->rsrcClumpSize);
895	vcb->vcbNxtCNID		= SWAP_BE32 (vhp->nextCatalogID);
896	vcb->vcbVolBkUp		= to_bsd_time(SWAP_BE32(vhp->backupDate));
897	vcb->vcbWrCnt		= SWAP_BE32 (vhp->writeCount);
898	vcb->vcbFilCnt		= SWAP_BE32 (vhp->fileCount);
899	vcb->vcbDirCnt		= SWAP_BE32 (vhp->folderCount);
900	HFS_UPDATE_NEXT_ALLOCATION(vcb, SWAP_BE32 (vhp->nextAllocation));
901	vcb->totalBlocks	= SWAP_BE32 (vhp->totalBlocks);
902	vcb->freeBlocks		= SWAP_BE32 (vhp->freeBlocks);
903	vcb->encodingsBitmap	= SWAP_BE64 (vhp->encodingsBitmap);
904	bcopy(vhp->finderInfo, vcb->vcbFndrInfo, sizeof(vhp->finderInfo));
905	vcb->localCreateDate	= SWAP_BE32 (vhp->createDate); /* hfs+ create date is in local time */
906
907	/*
908	 * Re-load meta-file vnode data (extent info, file size, etc).
909	 */
910	forkp = VTOF((struct vnode *)vcb->extentsRefNum);
911	for (i = 0; i < kHFSPlusExtentDensity; i++) {
912		forkp->ff_extents[i].startBlock =
913			SWAP_BE32 (vhp->extentsFile.extents[i].startBlock);
914		forkp->ff_extents[i].blockCount =
915			SWAP_BE32 (vhp->extentsFile.extents[i].blockCount);
916	}
917	forkp->ff_size      = SWAP_BE64 (vhp->extentsFile.logicalSize);
918	forkp->ff_blocks    = SWAP_BE32 (vhp->extentsFile.totalBlocks);
919	forkp->ff_clumpsize = SWAP_BE32 (vhp->extentsFile.clumpSize);
920
921
922	forkp = VTOF((struct vnode *)vcb->catalogRefNum);
923	for (i = 0; i < kHFSPlusExtentDensity; i++) {
924		forkp->ff_extents[i].startBlock	=
925			SWAP_BE32 (vhp->catalogFile.extents[i].startBlock);
926		forkp->ff_extents[i].blockCount	=
927			SWAP_BE32 (vhp->catalogFile.extents[i].blockCount);
928	}
929	forkp->ff_size      = SWAP_BE64 (vhp->catalogFile.logicalSize);
930	forkp->ff_blocks    = SWAP_BE32 (vhp->catalogFile.totalBlocks);
931	forkp->ff_clumpsize = SWAP_BE32 (vhp->catalogFile.clumpSize);
932
933	if (hfsmp->hfs_attribute_vp) {
934		forkp = VTOF(hfsmp->hfs_attribute_vp);
935		for (i = 0; i < kHFSPlusExtentDensity; i++) {
936			forkp->ff_extents[i].startBlock	=
937				SWAP_BE32 (vhp->attributesFile.extents[i].startBlock);
938			forkp->ff_extents[i].blockCount	=
939				SWAP_BE32 (vhp->attributesFile.extents[i].blockCount);
940		}
941		forkp->ff_size      = SWAP_BE64 (vhp->attributesFile.logicalSize);
942		forkp->ff_blocks    = SWAP_BE32 (vhp->attributesFile.totalBlocks);
943		forkp->ff_clumpsize = SWAP_BE32 (vhp->attributesFile.clumpSize);
944	}
945
946	forkp = VTOF((struct vnode *)vcb->allocationsRefNum);
947	for (i = 0; i < kHFSPlusExtentDensity; i++) {
948		forkp->ff_extents[i].startBlock	=
949			SWAP_BE32 (vhp->allocationFile.extents[i].startBlock);
950		forkp->ff_extents[i].blockCount	=
951			SWAP_BE32 (vhp->allocationFile.extents[i].blockCount);
952	}
953	forkp->ff_size      = SWAP_BE64 (vhp->allocationFile.logicalSize);
954	forkp->ff_blocks    = SWAP_BE32 (vhp->allocationFile.totalBlocks);
955	forkp->ff_clumpsize = SWAP_BE32 (vhp->allocationFile.clumpSize);
956
957	buf_brelse(bp);
958	vhp = NULL;
959
960	/*
961	 * Re-load B-tree header data
962	 */
963	forkp = VTOF((struct vnode *)vcb->extentsRefNum);
964	if ( (error = MacToVFSError( BTReloadData((FCB*)forkp) )) )
965		return (error);
966
967	forkp = VTOF((struct vnode *)vcb->catalogRefNum);
968	if ( (error = MacToVFSError( BTReloadData((FCB*)forkp) )) )
969		return (error);
970
971	if (hfsmp->hfs_attribute_vp) {
972		forkp = VTOF(hfsmp->hfs_attribute_vp);
973		if ( (error = MacToVFSError( BTReloadData((FCB*)forkp) )) )
974			return (error);
975	}
976
977	/* Reload the volume name */
978	if ((error = cat_idlookup(hfsmp, kHFSRootFolderID, 0, 0, &cndesc, NULL, NULL)))
979		return (error);
980	vcb->volumeNameEncodingHint = cndesc.cd_encoding;
981	bcopy(cndesc.cd_nameptr, vcb->vcbVN, min(255, cndesc.cd_namelen));
982	cat_releasedesc(&cndesc);
983
984	/* Re-establish private/hidden directories. */
985	hfs_privatedir_init(hfsmp, FILE_HARDLINKS);
986	hfs_privatedir_init(hfsmp, DIR_HARDLINKS);
987
988	/* In case any volume information changed to trigger a notification */
989	hfs_generate_volume_notifications(hfsmp);
990
991	return (0);
992}
993
994__unused
995static uint64_t tv_to_usecs(struct timeval *tv)
996{
997	return tv->tv_sec * 1000000ULL + tv->tv_usec;
998}
999
1000// Returns TRUE if b - a >= usecs
1001static boolean_t hfs_has_elapsed (const struct timeval *a,
1002                                  const struct timeval *b,
1003                                  uint64_t usecs)
1004{
1005    struct timeval diff;
1006    timersub(b, a, &diff);
1007    return diff.tv_sec * 1000000ULL + diff.tv_usec >= usecs;
1008}
1009
1010static void
1011hfs_syncer(void *arg0, void *unused)
1012{
1013#pragma unused(unused)
1014
1015    struct hfsmount *hfsmp = arg0;
1016    struct timeval   now;
1017
1018    microuptime(&now);
1019
1020    KERNEL_DEBUG_CONSTANT(HFSDBG_SYNCER | DBG_FUNC_START, hfsmp,
1021                          tv_to_usecs(&now),
1022                          tv_to_usecs(&hfsmp->hfs_mp->mnt_last_write_completed_timestamp),
1023                          hfsmp->hfs_mp->mnt_pending_write_size, 0);
1024
1025    hfs_syncer_lock(hfsmp);
1026
1027    if (!hfsmp->hfs_syncer) {
1028        // hfs_unmount is waiting for us leave now and let it do the sync
1029        hfsmp->hfs_sync_incomplete = FALSE;
1030        hfs_syncer_unlock(hfsmp);
1031        hfs_syncer_wakeup(hfsmp);
1032        return;
1033    }
1034
1035    /* Check to see whether we should flush now: either the oldest is
1036       > HFS_MAX_META_DELAY or HFS_META_DELAY has elapsed since the
1037       request and there are no pending writes. */
1038
1039    boolean_t flush_now = FALSE;
1040
1041    if (hfs_has_elapsed(&hfsmp->hfs_sync_req_oldest, &now, HFS_MAX_META_DELAY))
1042        flush_now = TRUE;
1043    else if (!hfsmp->hfs_mp->mnt_pending_write_size) {
1044        /* N.B. accessing mnt_last_write_completed_timestamp is not thread safe, but
1045           it won't matter for what we're using it for. */
1046        if (hfs_has_elapsed(&hfsmp->hfs_mp->mnt_last_write_completed_timestamp,
1047                            &now,
1048                            HFS_META_DELAY)) {
1049            flush_now = TRUE;
1050        }
1051    }
1052
1053    if (!flush_now) {
1054        thread_call_t syncer = hfsmp->hfs_syncer;
1055
1056        hfs_syncer_unlock(hfsmp);
1057
1058        hfs_syncer_queue(syncer);
1059
1060        return;
1061    }
1062
1063    timerclear(&hfsmp->hfs_sync_req_oldest);
1064
1065    hfs_syncer_unlock(hfsmp);
1066
1067    KERNEL_DEBUG_CONSTANT(HFSDBG_SYNCER_TIMED | DBG_FUNC_START,
1068                          tv_to_usecs(&now),
1069                          tv_to_usecs(&hfsmp->hfs_mp->mnt_last_write_completed_timestamp),
1070                          tv_to_usecs(&hfsmp->hfs_mp->mnt_last_write_issued_timestamp),
1071                          hfsmp->hfs_mp->mnt_pending_write_size, 0);
1072
1073    if (hfsmp->hfs_syncer_thread) {
1074        printf("hfs: syncer already running!\n");
1075		return;
1076	}
1077
1078    hfsmp->hfs_syncer_thread = current_thread();
1079
1080    if (hfs_start_transaction(hfsmp) != 0)    // so we hold off any new writes
1081        goto out;
1082
1083    /*
1084     * We intentionally do a synchronous flush (of the journal or entire volume) here.
1085     * For journaled volumes, this means we wait until the metadata blocks are written
1086     * to both the journal and their final locations (in the B-trees, etc.).
1087     *
1088     * This tends to avoid interleaving the metadata writes with other writes (for
1089     * example, user data, or to the journal when a later transaction notices that
1090     * an earlier transaction has finished its async writes, and then updates the
1091     * journal start in the journal header).  Avoiding interleaving of writes is
1092     * very good for performance on simple flash devices like SD cards, thumb drives;
1093     * and on devices like floppies.  Since removable devices tend to be this kind of
1094     * simple device, doing a synchronous flush actually improves performance in
1095     * practice.
1096     *
1097     * NOTE: For non-journaled volumes, the call to hfs_sync will also cause dirty
1098     * user data to be written.
1099     */
1100    if (hfsmp->jnl) {
1101        hfs_journal_flush(hfsmp, TRUE);
1102    } else {
1103        hfs_sync(hfsmp->hfs_mp, MNT_WAIT, vfs_context_kernel());
1104    }
1105
1106    KERNEL_DEBUG_CONSTANT(HFSDBG_SYNCER_TIMED | DBG_FUNC_END,
1107                          (microuptime(&now), tv_to_usecs(&now)),
1108                          tv_to_usecs(&hfsmp->hfs_mp->mnt_last_write_completed_timestamp),
1109                          tv_to_usecs(&hfsmp->hfs_mp->mnt_last_write_issued_timestamp),
1110                          hfsmp->hfs_mp->mnt_pending_write_size, 0);
1111
1112    hfs_end_transaction(hfsmp);
1113
1114out:
1115
1116    hfsmp->hfs_syncer_thread = NULL;
1117
1118    hfs_syncer_lock(hfsmp);
1119
1120    // If hfs_unmount lets us and we missed a sync, schedule again
1121    if (hfsmp->hfs_syncer && timerisset(&hfsmp->hfs_sync_req_oldest)) {
1122        thread_call_t syncer = hfsmp->hfs_syncer;
1123
1124        hfs_syncer_unlock(hfsmp);
1125
1126        hfs_syncer_queue(syncer);
1127    } else {
1128        hfsmp->hfs_sync_incomplete = FALSE;
1129        hfs_syncer_unlock(hfsmp);
1130        hfs_syncer_wakeup(hfsmp);
1131    }
1132
1133    /* BE CAREFUL WHAT YOU ADD HERE: at this point hfs_unmount is free
1134       to continue and therefore hfsmp might be invalid. */
1135
1136    KERNEL_DEBUG_CONSTANT(HFSDBG_SYNCER | DBG_FUNC_END, 0, 0, 0, 0, 0);
1137}
1138
1139
1140extern int IOBSDIsMediaEjectable( const char *cdev_name );
1141
1142/*
1143 * Call into the allocator code and perform a full scan of the bitmap file.
1144 *
1145 * This allows us to TRIM unallocated ranges if needed, and also to build up
1146 * an in-memory summary table of the state of the allocated blocks.
1147 */
1148void hfs_scan_blocks (struct hfsmount *hfsmp) {
1149	/*
1150	 * Take the allocation file lock.  Journal transactions will block until
1151	 * we're done here.
1152	 */
1153
1154	int flags = hfs_systemfile_lock(hfsmp, SFL_BITMAP, HFS_EXCLUSIVE_LOCK);
1155
1156	/*
1157	 * We serialize here with the HFS mount lock as we're mounting.
1158	 *
1159	 * The mount can only proceed once this thread has acquired the bitmap
1160	 * lock, since we absolutely do not want someone else racing in and
1161	 * getting the bitmap lock, doing a read/write of the bitmap file,
1162	 * then us getting the bitmap lock.
1163	 *
1164	 * To prevent this, the mount thread takes the HFS mount mutex, starts us
1165	 * up, then immediately msleeps on the scan_var variable in the mount
1166	 * point as a condition variable.  This serialization is safe since
1167	 * if we race in and try to proceed while they're still holding the lock,
1168	 * we'll block trying to acquire the global lock.  Since the mount thread
1169	 * acquires the HFS mutex before starting this function in a new thread,
1170	 * any lock acquisition on our part must be linearizably AFTER the mount thread's.
1171	 *
1172	 * Note that the HFS mount mutex is always taken last, and always for only
1173	 * a short time.  In this case, we just take it long enough to mark the
1174	 * scan-in-flight bit.
1175	 */
1176	(void) hfs_lock_mount (hfsmp);
1177	hfsmp->scan_var |= HFS_ALLOCATOR_SCAN_INFLIGHT;
1178	wakeup((caddr_t) &hfsmp->scan_var);
1179	hfs_unlock_mount (hfsmp);
1180
1181	/* Initialize the summary table */
1182	if (hfs_init_summary (hfsmp)) {
1183		printf("hfs: could not initialize summary table for %s\n", hfsmp->vcbVN);
1184	}
1185
1186	/*
1187	 * ScanUnmapBlocks assumes that the bitmap lock is held when you
1188	 * call the function. We don't care if there were any errors issuing unmaps.
1189	 *
1190	 * It will also attempt to build up the summary table for subsequent
1191	 * allocator use, as configured.
1192	 */
1193	(void) ScanUnmapBlocks(hfsmp);
1194
1195	hfsmp->scan_var |= HFS_ALLOCATOR_SCAN_COMPLETED;
1196
1197	hfs_systemfile_unlock(hfsmp, flags);
1198}
1199
1200static int hfs_root_unmounted_cleanly = 0;
1201
1202SYSCTL_DECL(_vfs_generic);
1203SYSCTL_INT(_vfs_generic, OID_AUTO, root_unmounted_cleanly, CTLFLAG_RD, &hfs_root_unmounted_cleanly, 0, "Root filesystem was unmounted cleanly");
1204
1205/*
1206 * Common code for mount and mountroot
1207 */
1208int
1209hfs_mountfs(struct vnode *devvp, struct mount *mp, struct hfs_mount_args *args,
1210            int journal_replay_only, vfs_context_t context)
1211{
1212	struct proc *p = vfs_context_proc(context);
1213	int retval = E_NONE;
1214	struct hfsmount	*hfsmp = NULL;
1215	struct buf *bp;
1216	dev_t dev;
1217	HFSMasterDirectoryBlock *mdbp = NULL;
1218	int ronly;
1219#if QUOTA
1220	int i;
1221#endif
1222	int mntwrapper;
1223	kauth_cred_t cred;
1224	u_int64_t disksize;
1225	daddr64_t log_blkcnt;
1226	u_int32_t log_blksize;
1227	u_int32_t phys_blksize;
1228	u_int32_t minblksize;
1229	u_int32_t iswritable;
1230	daddr64_t mdb_offset;
1231	int isvirtual = 0;
1232	int isroot = 0;
1233	u_int32_t device_features = 0;
1234	int isssd;
1235
1236	if (args == NULL) {
1237		/* only hfs_mountroot passes us NULL as the 'args' argument */
1238		isroot = 1;
1239	}
1240
1241	ronly = vfs_isrdonly(mp);
1242	dev = vnode_specrdev(devvp);
1243	cred = p ? vfs_context_ucred(context) : NOCRED;
1244	mntwrapper = 0;
1245
1246	bp = NULL;
1247	hfsmp = NULL;
1248	mdbp = NULL;
1249	minblksize = kHFSBlockSize;
1250
1251	/* Advisory locking should be handled at the VFS layer */
1252	vfs_setlocklocal(mp);
1253
1254	/* Get the logical block size (treated as physical block size everywhere) */
1255	if (VNOP_IOCTL(devvp, DKIOCGETBLOCKSIZE, (caddr_t)&log_blksize, 0, context)) {
1256		if (HFS_MOUNT_DEBUG) {
1257			printf("hfs_mountfs: DKIOCGETBLOCKSIZE failed\n");
1258		}
1259		retval = ENXIO;
1260		goto error_exit;
1261	}
1262	if (log_blksize == 0 || log_blksize > 1024*1024*1024) {
1263		printf("hfs: logical block size 0x%x looks bad.  Not mounting.\n", log_blksize);
1264		retval = ENXIO;
1265		goto error_exit;
1266	}
1267
1268	/* Get the physical block size. */
1269	retval = VNOP_IOCTL(devvp, DKIOCGETPHYSICALBLOCKSIZE, (caddr_t)&phys_blksize, 0, context);
1270	if (retval) {
1271		if ((retval != ENOTSUP) && (retval != ENOTTY)) {
1272			if (HFS_MOUNT_DEBUG) {
1273				printf("hfs_mountfs: DKIOCGETPHYSICALBLOCKSIZE failed\n");
1274			}
1275			retval = ENXIO;
1276			goto error_exit;
1277		}
1278		/* If device does not support this ioctl, assume that physical
1279		 * block size is same as logical block size
1280		 */
1281		phys_blksize = log_blksize;
1282	}
1283	if (phys_blksize == 0 || phys_blksize > MAXBSIZE) {
1284		printf("hfs: physical block size 0x%x looks bad.  Not mounting.\n", phys_blksize);
1285		retval = ENXIO;
1286		goto error_exit;
1287	}
1288
1289	/* Switch to 512 byte sectors (temporarily) */
1290	if (log_blksize > 512) {
1291		u_int32_t size512 = 512;
1292
1293		if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&size512, FWRITE, context)) {
1294			if (HFS_MOUNT_DEBUG) {
1295				printf("hfs_mountfs: DKIOCSETBLOCKSIZE failed \n");
1296			}
1297			retval = ENXIO;
1298			goto error_exit;
1299		}
1300	}
1301	/* Get the number of 512 byte physical blocks. */
1302	if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) {
1303		/* resetting block size may fail if getting block count did */
1304		(void)VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context);
1305		if (HFS_MOUNT_DEBUG) {
1306			printf("hfs_mountfs: DKIOCGETBLOCKCOUNT failed\n");
1307		}
1308		retval = ENXIO;
1309		goto error_exit;
1310	}
1311	/* Compute an accurate disk size (i.e. within 512 bytes) */
1312	disksize = (u_int64_t)log_blkcnt * (u_int64_t)512;
1313
1314	/*
1315	 * On Tiger it is not necessary to switch the device
1316	 * block size to be 4k if there are more than 31-bits
1317	 * worth of blocks but to insure compatibility with
1318	 * pre-Tiger systems we have to do it.
1319	 *
1320	 * If the device size is not a multiple of 4K (8 * 512), then
1321	 * switching the logical block size isn't going to help because
1322	 * we will be unable to write the alternate volume header.
1323	 * In this case, just leave the logical block size unchanged.
1324	 */
1325	if (log_blkcnt > 0x000000007fffffff && (log_blkcnt & 7) == 0) {
1326		minblksize = log_blksize = 4096;
1327		if (phys_blksize < log_blksize)
1328			phys_blksize = log_blksize;
1329	}
1330
1331	/*
1332	 * The cluster layer is not currently prepared to deal with a logical
1333	 * block size larger than the system's page size.  (It can handle
1334	 * blocks per page, but not multiple pages per block.)  So limit the
1335	 * logical block size to the page size.
1336	 */
1337	if (log_blksize > PAGE_SIZE) {
1338		log_blksize = PAGE_SIZE;
1339	}
1340
1341	/* Now switch to our preferred physical block size. */
1342	if (log_blksize > 512) {
1343		if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context)) {
1344			if (HFS_MOUNT_DEBUG) {
1345				printf("hfs_mountfs: DKIOCSETBLOCKSIZE (2) failed\n");
1346			}
1347			retval = ENXIO;
1348			goto error_exit;
1349		}
1350		/* Get the count of physical blocks. */
1351		if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) {
1352			if (HFS_MOUNT_DEBUG) {
1353				printf("hfs_mountfs: DKIOCGETBLOCKCOUNT (2) failed\n");
1354			}
1355			retval = ENXIO;
1356			goto error_exit;
1357		}
1358	}
1359	/*
1360	 * At this point:
1361	 *   minblksize is the minimum physical block size
1362	 *   log_blksize has our preferred physical block size
1363	 *   log_blkcnt has the total number of physical blocks
1364	 */
1365
1366	mdb_offset = (daddr64_t)HFS_PRI_SECTOR(log_blksize);
1367	if ((retval = (int)buf_meta_bread(devvp,
1368				HFS_PHYSBLK_ROUNDDOWN(mdb_offset, (phys_blksize/log_blksize)),
1369				phys_blksize, cred, &bp))) {
1370		if (HFS_MOUNT_DEBUG) {
1371			printf("hfs_mountfs: buf_meta_bread failed with %d\n", retval);
1372		}
1373		goto error_exit;
1374	}
1375	MALLOC(mdbp, HFSMasterDirectoryBlock *, kMDBSize, M_TEMP, M_WAITOK);
1376	if (mdbp == NULL) {
1377		retval = ENOMEM;
1378		if (HFS_MOUNT_DEBUG) {
1379			printf("hfs_mountfs: MALLOC failed\n");
1380		}
1381		goto error_exit;
1382	}
1383	bcopy((char *)buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize), mdbp, kMDBSize);
1384	buf_brelse(bp);
1385	bp = NULL;
1386
1387	MALLOC(hfsmp, struct hfsmount *, sizeof(struct hfsmount), M_HFSMNT, M_WAITOK);
1388	if (hfsmp == NULL) {
1389		if (HFS_MOUNT_DEBUG) {
1390			printf("hfs_mountfs: MALLOC (2) failed\n");
1391		}
1392		retval = ENOMEM;
1393		goto error_exit;
1394	}
1395	bzero(hfsmp, sizeof(struct hfsmount));
1396
1397	hfs_chashinit_finish(hfsmp);
1398
1399	/* Init the ID lookup hashtable */
1400	hfs_idhash_init (hfsmp);
1401
1402	/*
1403	 * See if the disk supports unmap (trim).
1404	 *
1405	 * NOTE: vfs_init_io_attributes has not been called yet, so we can't use the io_flags field
1406	 * returned by vfs_ioattr.  We need to call VNOP_IOCTL ourselves.
1407	 */
1408	if (VNOP_IOCTL(devvp, DKIOCGETFEATURES, (caddr_t)&device_features, 0, context) == 0) {
1409		if (device_features & DK_FEATURE_UNMAP) {
1410			hfsmp->hfs_flags |= HFS_UNMAP;
1411		}
1412	}
1413
1414	/*
1415	 * See if the disk is a solid state device, too.  We need this to decide what to do about
1416	 * hotfiles.
1417	 */
1418	if (VNOP_IOCTL(devvp, DKIOCISSOLIDSTATE, (caddr_t)&isssd, 0, context) == 0) {
1419		if (isssd) {
1420			hfsmp->hfs_flags |= HFS_SSD;
1421		}
1422	}
1423
1424
1425	/*
1426	 *  Init the volume information structure
1427	 */
1428
1429	lck_mtx_init(&hfsmp->hfs_mutex, hfs_mutex_group, hfs_lock_attr);
1430	lck_mtx_init(&hfsmp->hfc_mutex, hfs_mutex_group, hfs_lock_attr);
1431	lck_rw_init(&hfsmp->hfs_global_lock, hfs_rwlock_group, hfs_lock_attr);
1432	lck_spin_init(&hfsmp->vcbFreeExtLock, hfs_spinlock_group, hfs_lock_attr);
1433
1434	vfs_setfsprivate(mp, hfsmp);
1435	hfsmp->hfs_mp = mp;			/* Make VFSTOHFS work */
1436	hfsmp->hfs_raw_dev = vnode_specrdev(devvp);
1437	hfsmp->hfs_devvp = devvp;
1438	vnode_ref(devvp);  /* Hold a ref on the device, dropped when hfsmp is freed. */
1439	hfsmp->hfs_logical_block_size = log_blksize;
1440	hfsmp->hfs_logical_block_count = log_blkcnt;
1441	hfsmp->hfs_logical_bytes = (uint64_t) log_blksize * (uint64_t) log_blkcnt;
1442	hfsmp->hfs_physical_block_size = phys_blksize;
1443	hfsmp->hfs_log_per_phys = (phys_blksize / log_blksize);
1444	hfsmp->hfs_flags |= HFS_WRITEABLE_MEDIA;
1445	if (ronly)
1446		hfsmp->hfs_flags |= HFS_READ_ONLY;
1447	if (((unsigned int)vfs_flags(mp)) & MNT_UNKNOWNPERMISSIONS)
1448		hfsmp->hfs_flags |= HFS_UNKNOWN_PERMS;
1449
1450#if QUOTA
1451	for (i = 0; i < MAXQUOTAS; i++)
1452		dqfileinit(&hfsmp->hfs_qfiles[i]);
1453#endif
1454
1455	if (args) {
1456		hfsmp->hfs_uid = (args->hfs_uid == (uid_t)VNOVAL) ? UNKNOWNUID : args->hfs_uid;
1457		if (hfsmp->hfs_uid == 0xfffffffd) hfsmp->hfs_uid = UNKNOWNUID;
1458		hfsmp->hfs_gid = (args->hfs_gid == (gid_t)VNOVAL) ? UNKNOWNGID : args->hfs_gid;
1459		if (hfsmp->hfs_gid == 0xfffffffd) hfsmp->hfs_gid = UNKNOWNGID;
1460		vfs_setowner(mp, hfsmp->hfs_uid, hfsmp->hfs_gid);				/* tell the VFS */
1461		if (args->hfs_mask != (mode_t)VNOVAL) {
1462			hfsmp->hfs_dir_mask = args->hfs_mask & ALLPERMS;
1463			if (args->flags & HFSFSMNT_NOXONFILES) {
1464				hfsmp->hfs_file_mask = (args->hfs_mask & DEFFILEMODE);
1465			} else {
1466				hfsmp->hfs_file_mask = args->hfs_mask & ALLPERMS;
1467			}
1468		} else {
1469			hfsmp->hfs_dir_mask = UNKNOWNPERMISSIONS & ALLPERMS;		/* 0777: rwx---rwx */
1470			hfsmp->hfs_file_mask = UNKNOWNPERMISSIONS & DEFFILEMODE;	/* 0666: no --x by default? */
1471		}
1472		if ((args->flags != (int)VNOVAL) && (args->flags & HFSFSMNT_WRAPPER))
1473			mntwrapper = 1;
1474	} else {
1475		/* Even w/o explicit mount arguments, MNT_UNKNOWNPERMISSIONS requires setting up uid, gid, and mask: */
1476		if (((unsigned int)vfs_flags(mp)) & MNT_UNKNOWNPERMISSIONS) {
1477			hfsmp->hfs_uid = UNKNOWNUID;
1478			hfsmp->hfs_gid = UNKNOWNGID;
1479			vfs_setowner(mp, hfsmp->hfs_uid, hfsmp->hfs_gid);			/* tell the VFS */
1480			hfsmp->hfs_dir_mask = UNKNOWNPERMISSIONS & ALLPERMS;		/* 0777: rwx---rwx */
1481			hfsmp->hfs_file_mask = UNKNOWNPERMISSIONS & DEFFILEMODE;	/* 0666: no --x by default? */
1482		}
1483	}
1484
1485	/* Find out if disk media is writable. */
1486	if (VNOP_IOCTL(devvp, DKIOCISWRITABLE, (caddr_t)&iswritable, 0, context) == 0) {
1487		if (iswritable)
1488			hfsmp->hfs_flags |= HFS_WRITEABLE_MEDIA;
1489		else
1490			hfsmp->hfs_flags &= ~HFS_WRITEABLE_MEDIA;
1491	}
1492
1493	// record the current time at which we're mounting this volume
1494	struct timeval tv;
1495	microtime(&tv);
1496	hfsmp->hfs_mount_time = tv.tv_sec;
1497
1498	/* Mount a standard HFS disk */
1499	if ((SWAP_BE16(mdbp->drSigWord) == kHFSSigWord) &&
1500	    (mntwrapper || (SWAP_BE16(mdbp->drEmbedSigWord) != kHFSPlusSigWord))) {
1501#if CONFIG_HFS_STD
1502		/* On 10.6 and beyond, non read-only mounts for HFS standard vols get rejected */
1503		if (vfs_isrdwr(mp)) {
1504			retval = EROFS;
1505			goto error_exit;
1506		}
1507
1508		printf("hfs_mountfs: Mounting HFS Standard volumes was deprecated in Mac OS 10.7 \n");
1509
1510		/* Treat it as if it's read-only and not writeable */
1511		hfsmp->hfs_flags |= HFS_READ_ONLY;
1512		hfsmp->hfs_flags &= ~HFS_WRITEABLE_MEDIA;
1513
1514	   	/* If only journal replay is requested, exit immediately */
1515		if (journal_replay_only) {
1516			retval = 0;
1517			goto error_exit;
1518		}
1519
1520	        if ((vfs_flags(mp) & MNT_ROOTFS)) {
1521			retval = EINVAL;  /* Cannot root from HFS standard disks */
1522			goto error_exit;
1523		}
1524		/* HFS disks can only use 512 byte physical blocks */
1525		if (log_blksize > kHFSBlockSize) {
1526			log_blksize = kHFSBlockSize;
1527			if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context)) {
1528				retval = ENXIO;
1529				goto error_exit;
1530			}
1531			if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) {
1532				retval = ENXIO;
1533				goto error_exit;
1534			}
1535			hfsmp->hfs_logical_block_size = log_blksize;
1536			hfsmp->hfs_logical_block_count = log_blkcnt;
1537			hfsmp->hfs_logical_bytes = (uint64_t) log_blksize * (uint64_t) log_blkcnt;
1538			hfsmp->hfs_physical_block_size = log_blksize;
1539			hfsmp->hfs_log_per_phys = 1;
1540		}
1541		if (args) {
1542			hfsmp->hfs_encoding = args->hfs_encoding;
1543			HFSTOVCB(hfsmp)->volumeNameEncodingHint = args->hfs_encoding;
1544
1545			/* establish the timezone */
1546			gTimeZone = args->hfs_timezone;
1547		}
1548
1549		retval = hfs_getconverter(hfsmp->hfs_encoding, &hfsmp->hfs_get_unicode,
1550					&hfsmp->hfs_get_hfsname);
1551		if (retval)
1552			goto error_exit;
1553
1554		retval = hfs_MountHFSVolume(hfsmp, mdbp, p);
1555		if (retval)
1556			(void) hfs_relconverter(hfsmp->hfs_encoding);
1557#else
1558		/* On platforms where HFS Standard is not supported, deny the mount altogether */
1559		retval = EINVAL;
1560		goto error_exit;
1561#endif
1562
1563	}
1564	else { /* Mount an HFS Plus disk */
1565		HFSPlusVolumeHeader *vhp;
1566		off_t embeddedOffset;
1567		int   jnl_disable = 0;
1568
1569		/* Get the embedded Volume Header */
1570		if (SWAP_BE16(mdbp->drEmbedSigWord) == kHFSPlusSigWord) {
1571			embeddedOffset = SWAP_BE16(mdbp->drAlBlSt) * kHFSBlockSize;
1572			embeddedOffset += (u_int64_t)SWAP_BE16(mdbp->drEmbedExtent.startBlock) *
1573			                  (u_int64_t)SWAP_BE32(mdbp->drAlBlkSiz);
1574
1575			/*
1576			 * If the embedded volume doesn't start on a block
1577			 * boundary, then switch the device to a 512-byte
1578			 * block size so everything will line up on a block
1579			 * boundary.
1580			 */
1581			if ((embeddedOffset % log_blksize) != 0) {
1582				printf("hfs_mountfs: embedded volume offset not"
1583				    " a multiple of physical block size (%d);"
1584				    " switching to 512\n", log_blksize);
1585				log_blksize = 512;
1586				if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE,
1587				    (caddr_t)&log_blksize, FWRITE, context)) {
1588
1589					if (HFS_MOUNT_DEBUG) {
1590						printf("hfs_mountfs: DKIOCSETBLOCKSIZE (3) failed\n");
1591					}
1592					retval = ENXIO;
1593					goto error_exit;
1594				}
1595				if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT,
1596				    (caddr_t)&log_blkcnt, 0, context)) {
1597					if (HFS_MOUNT_DEBUG) {
1598						printf("hfs_mountfs: DKIOCGETBLOCKCOUNT (3) failed\n");
1599					}
1600					retval = ENXIO;
1601					goto error_exit;
1602				}
1603				/* Note: relative block count adjustment */
1604				hfsmp->hfs_logical_block_count *=
1605				    hfsmp->hfs_logical_block_size / log_blksize;
1606
1607				/* Update logical /physical block size */
1608				hfsmp->hfs_logical_block_size = log_blksize;
1609				hfsmp->hfs_physical_block_size = log_blksize;
1610
1611				phys_blksize = log_blksize;
1612				hfsmp->hfs_log_per_phys = 1;
1613			}
1614
1615			disksize = (u_int64_t)SWAP_BE16(mdbp->drEmbedExtent.blockCount) *
1616			           (u_int64_t)SWAP_BE32(mdbp->drAlBlkSiz);
1617
1618			hfsmp->hfs_logical_block_count = disksize / log_blksize;
1619
1620			hfsmp->hfs_logical_bytes = (uint64_t) hfsmp->hfs_logical_block_count * (uint64_t) hfsmp->hfs_logical_block_size;
1621
1622			mdb_offset = (daddr64_t)((embeddedOffset / log_blksize) + HFS_PRI_SECTOR(log_blksize));
1623			retval = (int)buf_meta_bread(devvp, HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys),
1624					phys_blksize, cred, &bp);
1625			if (retval) {
1626				if (HFS_MOUNT_DEBUG) {
1627					printf("hfs_mountfs: buf_meta_bread (2) failed with %d\n", retval);
1628				}
1629				goto error_exit;
1630			}
1631			bcopy((char *)buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize), mdbp, 512);
1632			buf_brelse(bp);
1633			bp = NULL;
1634			vhp = (HFSPlusVolumeHeader*) mdbp;
1635
1636		}
1637		else { /* pure HFS+ */
1638			embeddedOffset = 0;
1639			vhp = (HFSPlusVolumeHeader*) mdbp;
1640		}
1641
1642		if (isroot) {
1643			hfs_root_unmounted_cleanly = ((SWAP_BE32(vhp->attributes) & kHFSVolumeUnmountedMask) != 0);
1644		}
1645
1646		/*
1647		 * On inconsistent disks, do not allow read-write mount
1648		 * unless it is the boot volume being mounted.  We also
1649		 * always want to replay the journal if the journal_replay_only
1650		 * flag is set because that will (most likely) get the
1651		 * disk into a consistent state before fsck_hfs starts
1652		 * looking at it.
1653		 */
1654		if (  !(vfs_flags(mp) & MNT_ROOTFS)
1655		   && (SWAP_BE32(vhp->attributes) & kHFSVolumeInconsistentMask)
1656		   && !journal_replay_only
1657		   && !(hfsmp->hfs_flags & HFS_READ_ONLY)) {
1658
1659			if (HFS_MOUNT_DEBUG) {
1660				printf("hfs_mountfs: failed to mount non-root inconsistent disk\n");
1661			}
1662			retval = EINVAL;
1663			goto error_exit;
1664		}
1665
1666
1667		// XXXdbg
1668		//
1669		hfsmp->jnl = NULL;
1670		hfsmp->jvp = NULL;
1671		if (args != NULL && (args->flags & HFSFSMNT_EXTENDED_ARGS) &&
1672		    args->journal_disable) {
1673		    jnl_disable = 1;
1674		}
1675
1676		//
1677		// We only initialize the journal here if the last person
1678		// to mount this volume was journaling aware.  Otherwise
1679		// we delay journal initialization until later at the end
1680		// of hfs_MountHFSPlusVolume() because the last person who
1681		// mounted it could have messed things up behind our back
1682		// (so we need to go find the .journal file, make sure it's
1683		// the right size, re-sync up if it was moved, etc).
1684		//
1685		if (   (SWAP_BE32(vhp->lastMountedVersion) == kHFSJMountVersion)
1686			&& (SWAP_BE32(vhp->attributes) & kHFSVolumeJournaledMask)
1687			&& !jnl_disable) {
1688
1689			// if we're able to init the journal, mark the mount
1690			// point as journaled.
1691			//
1692			if ((retval = hfs_early_journal_init(hfsmp, vhp, args, embeddedOffset, mdb_offset, mdbp, cred)) == 0) {
1693				vfs_setflags(mp, (u_int64_t)((unsigned int)MNT_JOURNALED));
1694			} else {
1695				if (retval == EROFS) {
1696					// EROFS is a special error code that means the volume has an external
1697					// journal which we couldn't find.  in that case we do not want to
1698					// rewrite the volume header - we'll just refuse to mount the volume.
1699					if (HFS_MOUNT_DEBUG) {
1700						printf("hfs_mountfs: hfs_early_journal_init indicated external jnl \n");
1701					}
1702					retval = EINVAL;
1703					goto error_exit;
1704				}
1705
1706				// if the journal failed to open, then set the lastMountedVersion
1707				// to be "FSK!" which fsck_hfs will see and force the fsck instead
1708				// of just bailing out because the volume is journaled.
1709				if (!ronly) {
1710					if (HFS_MOUNT_DEBUG) {
1711						printf("hfs_mountfs: hfs_early_journal_init failed, setting to FSK \n");
1712					}
1713
1714					HFSPlusVolumeHeader *jvhp;
1715
1716				    hfsmp->hfs_flags |= HFS_NEED_JNL_RESET;
1717
1718				    if (mdb_offset == 0) {
1719					mdb_offset = (daddr64_t)((embeddedOffset / log_blksize) + HFS_PRI_SECTOR(log_blksize));
1720				    }
1721
1722				    bp = NULL;
1723				    retval = (int)buf_meta_bread(devvp,
1724						    HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys),
1725						    phys_blksize, cred, &bp);
1726				    if (retval == 0) {
1727					jvhp = (HFSPlusVolumeHeader *)(buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize));
1728
1729					if (SWAP_BE16(jvhp->signature) == kHFSPlusSigWord || SWAP_BE16(jvhp->signature) == kHFSXSigWord) {
1730						printf ("hfs(1): Journal replay fail.  Writing lastMountVersion as FSK!\n");
1731					    jvhp->lastMountedVersion = SWAP_BE32(kFSKMountVersion);
1732					    buf_bwrite(bp);
1733					} else {
1734					    buf_brelse(bp);
1735					}
1736					bp = NULL;
1737				    } else if (bp) {
1738					buf_brelse(bp);
1739					// clear this so the error exit path won't try to use it
1740					bp = NULL;
1741				    }
1742				}
1743
1744				// if this isn't the root device just bail out.
1745				// If it is the root device we just continue on
1746				// in the hopes that fsck_hfs will be able to
1747				// fix any damage that exists on the volume.
1748				if ( !(vfs_flags(mp) & MNT_ROOTFS)) {
1749					if (HFS_MOUNT_DEBUG) {
1750						printf("hfs_mountfs: hfs_early_journal_init failed, erroring out \n");
1751					}
1752				    retval = EINVAL;
1753				    goto error_exit;
1754				}
1755			}
1756		}
1757		// XXXdbg
1758
1759		/* Either the journal is replayed successfully, or there
1760		 * was nothing to replay, or no journal exists.  In any case,
1761		 * return success.
1762		 */
1763		if (journal_replay_only) {
1764			retval = 0;
1765			goto error_exit;
1766		}
1767
1768		(void) hfs_getconverter(0, &hfsmp->hfs_get_unicode, &hfsmp->hfs_get_hfsname);
1769
1770		retval = hfs_MountHFSPlusVolume(hfsmp, vhp, embeddedOffset, disksize, p, args, cred);
1771		/*
1772		 * If the backend didn't like our physical blocksize
1773		 * then retry with physical blocksize of 512.
1774		 */
1775		if ((retval == ENXIO) && (log_blksize > 512) && (log_blksize != minblksize)) {
1776			printf("hfs_mountfs: could not use physical block size "
1777					"(%d) switching to 512\n", log_blksize);
1778			log_blksize = 512;
1779			if (VNOP_IOCTL(devvp, DKIOCSETBLOCKSIZE, (caddr_t)&log_blksize, FWRITE, context)) {
1780				if (HFS_MOUNT_DEBUG) {
1781					printf("hfs_mountfs: DKIOCSETBLOCKSIZE (4) failed \n");
1782				}
1783				retval = ENXIO;
1784				goto error_exit;
1785			}
1786			if (VNOP_IOCTL(devvp, DKIOCGETBLOCKCOUNT, (caddr_t)&log_blkcnt, 0, context)) {
1787				if (HFS_MOUNT_DEBUG) {
1788					printf("hfs_mountfs: DKIOCGETBLOCKCOUNT (4) failed \n");
1789				}
1790				retval = ENXIO;
1791				goto error_exit;
1792			}
1793			devvp->v_specsize = log_blksize;
1794			/* Note: relative block count adjustment (in case this is an embedded volume). */
1795			hfsmp->hfs_logical_block_count *= hfsmp->hfs_logical_block_size / log_blksize;
1796			hfsmp->hfs_logical_block_size = log_blksize;
1797			hfsmp->hfs_log_per_phys = hfsmp->hfs_physical_block_size / log_blksize;
1798
1799			hfsmp->hfs_logical_bytes = (uint64_t) hfsmp->hfs_logical_block_count * (uint64_t) hfsmp->hfs_logical_block_size;
1800
1801			if (hfsmp->jnl && hfsmp->jvp == devvp) {
1802			    // close and re-open this with the new block size
1803			    journal_close(hfsmp->jnl);
1804			    hfsmp->jnl = NULL;
1805			    if (hfs_early_journal_init(hfsmp, vhp, args, embeddedOffset, mdb_offset, mdbp, cred) == 0) {
1806					vfs_setflags(mp, (u_int64_t)((unsigned int)MNT_JOURNALED));
1807				} else {
1808					// if the journal failed to open, then set the lastMountedVersion
1809					// to be "FSK!" which fsck_hfs will see and force the fsck instead
1810					// of just bailing out because the volume is journaled.
1811					if (!ronly) {
1812						if (HFS_MOUNT_DEBUG) {
1813							printf("hfs_mountfs: hfs_early_journal_init (2) resetting.. \n");
1814						}
1815				    	HFSPlusVolumeHeader *jvhp;
1816
1817				    	hfsmp->hfs_flags |= HFS_NEED_JNL_RESET;
1818
1819				    	if (mdb_offset == 0) {
1820							mdb_offset = (daddr64_t)((embeddedOffset / log_blksize) + HFS_PRI_SECTOR(log_blksize));
1821				    	}
1822
1823				   	 	bp = NULL;
1824				    	retval = (int)buf_meta_bread(devvp, HFS_PHYSBLK_ROUNDDOWN(mdb_offset, hfsmp->hfs_log_per_phys),
1825							phys_blksize, cred, &bp);
1826				    	if (retval == 0) {
1827							jvhp = (HFSPlusVolumeHeader *)(buf_dataptr(bp) + HFS_PRI_OFFSET(phys_blksize));
1828
1829							if (SWAP_BE16(jvhp->signature) == kHFSPlusSigWord || SWAP_BE16(jvhp->signature) == kHFSXSigWord) {
1830								printf ("hfs(2): Journal replay fail.  Writing lastMountVersion as FSK!\n");
1831					    		jvhp->lastMountedVersion = SWAP_BE32(kFSKMountVersion);
1832					    		buf_bwrite(bp);
1833							} else {
1834					    		buf_brelse(bp);
1835							}
1836							bp = NULL;
1837				    	} else if (bp) {
1838							buf_brelse(bp);
1839							// clear this so the error exit path won't try to use it
1840							bp = NULL;
1841				    	}
1842					}
1843
1844					// if this isn't the root device just bail out.
1845					// If it is the root device we just continue on
1846					// in the hopes that fsck_hfs will be able to
1847					// fix any damage that exists on the volume.
1848					if ( !(vfs_flags(mp) & MNT_ROOTFS)) {
1849						if (HFS_MOUNT_DEBUG) {
1850							printf("hfs_mountfs: hfs_early_journal_init (2) failed \n");
1851						}
1852				    	retval = EINVAL;
1853				    	goto error_exit;
1854					}
1855				}
1856			}
1857
1858			/* Try again with a smaller block size... */
1859			retval = hfs_MountHFSPlusVolume(hfsmp, vhp, embeddedOffset, disksize, p, args, cred);
1860			if (retval && HFS_MOUNT_DEBUG) {
1861				printf("hfs_MountHFSPlusVolume (late) returned %d\n",retval);
1862			}
1863		}
1864		if (retval)
1865			(void) hfs_relconverter(0);
1866	}
1867
1868	// save off a snapshot of the mtime from the previous mount
1869	// (for matador).
1870	hfsmp->hfs_last_mounted_mtime = hfsmp->hfs_mtime;
1871
1872	if ( retval ) {
1873		if (HFS_MOUNT_DEBUG) {
1874			printf("hfs_mountfs: encountered failure %d \n", retval);
1875		}
1876		goto error_exit;
1877	}
1878
1879	mp->mnt_vfsstat.f_fsid.val[0] = dev;
1880	mp->mnt_vfsstat.f_fsid.val[1] = vfs_typenum(mp);
1881	vfs_setmaxsymlen(mp, 0);
1882
1883	mp->mnt_vtable->vfc_vfsflags |= VFC_VFSNATIVEXATTR;
1884#if NAMEDSTREAMS
1885	mp->mnt_kern_flag |= MNTK_NAMED_STREAMS;
1886#endif
1887	if ((hfsmp->hfs_flags & HFS_STANDARD) == 0 ) {
1888		/* Tell VFS that we support directory hard links. */
1889		mp->mnt_vtable->vfc_vfsflags |= VFC_VFSDIRLINKS;
1890	}
1891#if CONFIG_HFS_STD
1892	else {
1893		/* HFS standard doesn't support extended readdir! */
1894		mount_set_noreaddirext (mp);
1895	}
1896#endif
1897
1898	if (args) {
1899		/*
1900		 * Set the free space warning levels for a non-root volume:
1901		 *
1902		 * Set the "danger" limit to 1% of the volume size or 100MB, whichever
1903		 * is less.  Set the "warning" limit to 2% of the volume size or 150MB,
1904		 * whichever is less.  And last, set the "desired" freespace level to
1905		 * to 3% of the volume size or 200MB, whichever is less.
1906		 */
1907		hfsmp->hfs_freespace_notify_dangerlimit =
1908			MIN(HFS_VERYLOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize,
1909				(HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_VERYLOWDISKTRIGGERFRACTION);
1910		hfsmp->hfs_freespace_notify_warninglimit =
1911			MIN(HFS_LOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize,
1912				(HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_LOWDISKTRIGGERFRACTION);
1913		hfsmp->hfs_freespace_notify_desiredlevel =
1914			MIN(HFS_LOWDISKSHUTOFFLEVEL / HFSTOVCB(hfsmp)->blockSize,
1915				(HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_LOWDISKSHUTOFFFRACTION);
1916	} else {
1917		/*
1918		 * Set the free space warning levels for the root volume:
1919		 *
1920		 * Set the "danger" limit to 5% of the volume size or 512MB, whichever
1921		 * is less.  Set the "warning" limit to 10% of the volume size or 1GB,
1922		 * whichever is less.  And last, set the "desired" freespace level to
1923		 * to 11% of the volume size or 1.25GB, whichever is less.
1924		 */
1925		hfsmp->hfs_freespace_notify_dangerlimit =
1926			MIN(HFS_ROOTVERYLOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize,
1927				(HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_ROOTVERYLOWDISKTRIGGERFRACTION);
1928		hfsmp->hfs_freespace_notify_warninglimit =
1929			MIN(HFS_ROOTLOWDISKTRIGGERLEVEL / HFSTOVCB(hfsmp)->blockSize,
1930				(HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_ROOTLOWDISKTRIGGERFRACTION);
1931		hfsmp->hfs_freespace_notify_desiredlevel =
1932			MIN(HFS_ROOTLOWDISKSHUTOFFLEVEL / HFSTOVCB(hfsmp)->blockSize,
1933				(HFSTOVCB(hfsmp)->totalBlocks / 100) * HFS_ROOTLOWDISKSHUTOFFFRACTION);
1934	};
1935
1936	/* Check if the file system exists on virtual device, like disk image */
1937	if (VNOP_IOCTL(devvp, DKIOCISVIRTUAL, (caddr_t)&isvirtual, 0, context) == 0) {
1938		if (isvirtual) {
1939			hfsmp->hfs_flags |= HFS_VIRTUAL_DEVICE;
1940		}
1941	}
1942
1943	/* do not allow ejectability checks on the root device */
1944	if (isroot == 0) {
1945		if ((hfsmp->hfs_flags & HFS_VIRTUAL_DEVICE) == 0 &&
1946				IOBSDIsMediaEjectable(mp->mnt_vfsstat.f_mntfromname)) {
1947			hfsmp->hfs_syncer = thread_call_allocate(hfs_syncer, hfsmp);
1948			if (hfsmp->hfs_syncer == NULL) {
1949				printf("hfs: failed to allocate syncer thread callback for %s (%s)\n",
1950						mp->mnt_vfsstat.f_mntfromname, mp->mnt_vfsstat.f_mntonname);
1951			}
1952		}
1953	}
1954
1955	printf("hfs: mounted %s on device %s\n", (hfsmp->vcbVN ? (const char*) hfsmp->vcbVN : "unknown"),
1956            (devvp->v_name ? devvp->v_name : (isroot ? "root_device": "unknown device")));
1957
1958	/*
1959	 * Start looking for free space to drop below this level and generate a
1960	 * warning immediately if needed:
1961	 */
1962	hfsmp->hfs_notification_conditions = 0;
1963	hfs_generate_volume_notifications(hfsmp);
1964
1965	if (ronly == 0) {
1966		(void) hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0);
1967	}
1968	FREE(mdbp, M_TEMP);
1969	return (0);
1970
1971error_exit:
1972	if (bp)
1973		buf_brelse(bp);
1974	if (mdbp)
1975		FREE(mdbp, M_TEMP);
1976
1977	if (hfsmp && hfsmp->jvp && hfsmp->jvp != hfsmp->hfs_devvp) {
1978		vnode_clearmountedon(hfsmp->jvp);
1979		(void)VNOP_CLOSE(hfsmp->jvp, ronly ? FREAD : FREAD|FWRITE, vfs_context_kernel());
1980		hfsmp->jvp = NULL;
1981	}
1982	if (hfsmp) {
1983		if (hfsmp->hfs_devvp) {
1984			vnode_rele(hfsmp->hfs_devvp);
1985		}
1986		hfs_locks_destroy(hfsmp);
1987		hfs_delete_chash(hfsmp);
1988		hfs_idhash_destroy (hfsmp);
1989
1990		FREE(hfsmp, M_HFSMNT);
1991		vfs_setfsprivate(mp, NULL);
1992	}
1993        return (retval);
1994}
1995
1996
1997/*
1998 * Make a filesystem operational.
1999 * Nothing to do at the moment.
2000 */
2001/* ARGSUSED */
2002static int
2003hfs_start(__unused struct mount *mp, __unused int flags, __unused vfs_context_t context)
2004{
2005	return (0);
2006}
2007
2008
2009/*
2010 * unmount system call
2011 */
2012int
2013hfs_unmount(struct mount *mp, int mntflags, vfs_context_t context)
2014{
2015	struct proc *p = vfs_context_proc(context);
2016	struct hfsmount *hfsmp = VFSTOHFS(mp);
2017	int retval = E_NONE;
2018	int flags;
2019	int force;
2020	int started_tr = 0;
2021
2022	flags = 0;
2023	force = 0;
2024	if (mntflags & MNT_FORCE) {
2025		flags |= FORCECLOSE;
2026		force = 1;
2027	}
2028
2029	printf("hfs: unmount initiated on %s on device %s\n",
2030			(hfsmp->vcbVN ? (const char*) hfsmp->vcbVN : "unknown"),
2031			(hfsmp->hfs_devvp ? ((hfsmp->hfs_devvp->v_name ? hfsmp->hfs_devvp->v_name : "unknown device")) : "unknown device"));
2032
2033	if ((retval = hfs_flushfiles(mp, flags, p)) && !force)
2034 		return (retval);
2035
2036	if (hfsmp->hfs_flags & HFS_METADATA_ZONE)
2037		(void) hfs_recording_suspend(hfsmp);
2038
2039	hfs_syncer_free(hfsmp);
2040
2041	if (hfsmp->hfs_flags & HFS_SUMMARY_TABLE) {
2042		if (hfsmp->hfs_summary_table) {
2043			int err = 0;
2044			/*
2045		 	 * Take the bitmap lock to serialize against a concurrent bitmap scan still in progress
2046			 */
2047			if (hfsmp->hfs_allocation_vp) {
2048				err = hfs_lock (VTOC(hfsmp->hfs_allocation_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2049			}
2050			FREE (hfsmp->hfs_summary_table, M_TEMP);
2051			hfsmp->hfs_summary_table = NULL;
2052			hfsmp->hfs_flags &= ~HFS_SUMMARY_TABLE;
2053
2054			if (err == 0 && hfsmp->hfs_allocation_vp){
2055				hfs_unlock (VTOC(hfsmp->hfs_allocation_vp));
2056			}
2057
2058		}
2059	}
2060
2061	/*
2062	 * Flush out the b-trees, volume bitmap and Volume Header
2063	 */
2064	if ((hfsmp->hfs_flags & HFS_READ_ONLY) == 0) {
2065		retval = hfs_start_transaction(hfsmp);
2066		if (retval == 0) {
2067		    started_tr = 1;
2068		} else if (!force) {
2069		    goto err_exit;
2070		}
2071
2072		if (hfsmp->hfs_startup_vp) {
2073			(void) hfs_lock(VTOC(hfsmp->hfs_startup_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2074			retval = hfs_fsync(hfsmp->hfs_startup_vp, MNT_WAIT, 0, p);
2075			hfs_unlock(VTOC(hfsmp->hfs_startup_vp));
2076			if (retval && !force)
2077				goto err_exit;
2078		}
2079
2080		if (hfsmp->hfs_attribute_vp) {
2081			(void) hfs_lock(VTOC(hfsmp->hfs_attribute_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2082			retval = hfs_fsync(hfsmp->hfs_attribute_vp, MNT_WAIT, 0, p);
2083			hfs_unlock(VTOC(hfsmp->hfs_attribute_vp));
2084			if (retval && !force)
2085				goto err_exit;
2086		}
2087
2088		(void) hfs_lock(VTOC(hfsmp->hfs_catalog_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2089		retval = hfs_fsync(hfsmp->hfs_catalog_vp, MNT_WAIT, 0, p);
2090		hfs_unlock(VTOC(hfsmp->hfs_catalog_vp));
2091		if (retval && !force)
2092			goto err_exit;
2093
2094		(void) hfs_lock(VTOC(hfsmp->hfs_extents_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2095		retval = hfs_fsync(hfsmp->hfs_extents_vp, MNT_WAIT, 0, p);
2096		hfs_unlock(VTOC(hfsmp->hfs_extents_vp));
2097		if (retval && !force)
2098			goto err_exit;
2099
2100		if (hfsmp->hfs_allocation_vp) {
2101			(void) hfs_lock(VTOC(hfsmp->hfs_allocation_vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2102			retval = hfs_fsync(hfsmp->hfs_allocation_vp, MNT_WAIT, 0, p);
2103			hfs_unlock(VTOC(hfsmp->hfs_allocation_vp));
2104			if (retval && !force)
2105				goto err_exit;
2106		}
2107
2108		if (hfsmp->hfc_filevp && vnode_issystem(hfsmp->hfc_filevp)) {
2109			retval = hfs_fsync(hfsmp->hfc_filevp, MNT_WAIT, 0, p);
2110			if (retval && !force)
2111				goto err_exit;
2112		}
2113
2114		/* If runtime corruption was detected, indicate that the volume
2115		 * was not unmounted cleanly.
2116		 */
2117		if (hfsmp->vcbAtrb & kHFSVolumeInconsistentMask) {
2118			HFSTOVCB(hfsmp)->vcbAtrb &= ~kHFSVolumeUnmountedMask;
2119		} else {
2120			HFSTOVCB(hfsmp)->vcbAtrb |= kHFSVolumeUnmountedMask;
2121		}
2122
2123		if (hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) {
2124			int i;
2125			u_int32_t min_start = hfsmp->totalBlocks;
2126
2127			// set the nextAllocation pointer to the smallest free block number
2128			// we've seen so on the next mount we won't rescan unnecessarily
2129			lck_spin_lock(&hfsmp->vcbFreeExtLock);
2130			for(i=0; i < (int)hfsmp->vcbFreeExtCnt; i++) {
2131				if (hfsmp->vcbFreeExt[i].startBlock < min_start) {
2132					min_start = hfsmp->vcbFreeExt[i].startBlock;
2133				}
2134			}
2135			lck_spin_unlock(&hfsmp->vcbFreeExtLock);
2136			if (min_start < hfsmp->nextAllocation) {
2137				hfsmp->nextAllocation = min_start;
2138			}
2139		}
2140
2141		retval = hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0);
2142		if (retval) {
2143			HFSTOVCB(hfsmp)->vcbAtrb &= ~kHFSVolumeUnmountedMask;
2144			if (!force)
2145				goto err_exit;	/* could not flush everything */
2146		}
2147
2148		if (started_tr) {
2149		    hfs_end_transaction(hfsmp);
2150		    started_tr = 0;
2151		}
2152	}
2153
2154	if (hfsmp->jnl) {
2155		hfs_journal_flush(hfsmp, FALSE);
2156	}
2157
2158	/*
2159	 *	Invalidate our caches and release metadata vnodes
2160	 */
2161	(void) hfsUnmount(hfsmp, p);
2162
2163#if CONFIG_HFS_STD
2164	if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSSigWord) {
2165		(void) hfs_relconverter(hfsmp->hfs_encoding);
2166	}
2167#endif
2168
2169	// XXXdbg
2170	if (hfsmp->jnl) {
2171	    journal_close(hfsmp->jnl);
2172	    hfsmp->jnl = NULL;
2173	}
2174
2175	VNOP_FSYNC(hfsmp->hfs_devvp, MNT_WAIT, context);
2176
2177	if (hfsmp->jvp && hfsmp->jvp != hfsmp->hfs_devvp) {
2178	    vnode_clearmountedon(hfsmp->jvp);
2179	    retval = VNOP_CLOSE(hfsmp->jvp,
2180	                       hfsmp->hfs_flags & HFS_READ_ONLY ? FREAD : FREAD|FWRITE,
2181			       vfs_context_kernel());
2182	    vnode_put(hfsmp->jvp);
2183	    hfsmp->jvp = NULL;
2184	}
2185	// XXXdbg
2186
2187	/*
2188	 * Last chance to dump unreferenced system files.
2189	 */
2190	(void) vflush(mp, NULLVP, FORCECLOSE);
2191
2192#if HFS_SPARSE_DEV
2193	/* Drop our reference on the backing fs (if any). */
2194	if ((hfsmp->hfs_flags & HFS_HAS_SPARSE_DEVICE) && hfsmp->hfs_backingfs_rootvp) {
2195		struct vnode * tmpvp;
2196
2197		hfsmp->hfs_flags &= ~HFS_HAS_SPARSE_DEVICE;
2198		tmpvp = hfsmp->hfs_backingfs_rootvp;
2199		hfsmp->hfs_backingfs_rootvp = NULLVP;
2200		vnode_rele(tmpvp);
2201	}
2202#endif /* HFS_SPARSE_DEV */
2203
2204	vnode_rele(hfsmp->hfs_devvp);
2205
2206	hfs_locks_destroy(hfsmp);
2207	hfs_delete_chash(hfsmp);
2208	hfs_idhash_destroy(hfsmp);
2209	FREE(hfsmp, M_HFSMNT);
2210
2211	return (0);
2212
2213  err_exit:
2214	if (started_tr) {
2215		hfs_end_transaction(hfsmp);
2216	}
2217	return retval;
2218}
2219
2220
2221/*
2222 * Return the root of a filesystem.
2223 */
2224static int
2225hfs_vfs_root(struct mount *mp, struct vnode **vpp, __unused vfs_context_t context)
2226{
2227	return hfs_vget(VFSTOHFS(mp), (cnid_t)kHFSRootFolderID, vpp, 1, 0);
2228}
2229
2230
2231/*
2232 * Do operations associated with quotas
2233 */
2234#if !QUOTA
2235static int
2236hfs_quotactl(__unused struct mount *mp, __unused int cmds, __unused uid_t uid, __unused caddr_t datap, __unused vfs_context_t context)
2237{
2238	return (ENOTSUP);
2239}
2240#else
2241static int
2242hfs_quotactl(struct mount *mp, int cmds, uid_t uid, caddr_t datap, vfs_context_t context)
2243{
2244	struct proc *p = vfs_context_proc(context);
2245	int cmd, type, error;
2246
2247	if (uid == ~0U)
2248		uid = kauth_cred_getuid(vfs_context_ucred(context));
2249	cmd = cmds >> SUBCMDSHIFT;
2250
2251	switch (cmd) {
2252	case Q_SYNC:
2253	case Q_QUOTASTAT:
2254		break;
2255	case Q_GETQUOTA:
2256		if (uid == kauth_cred_getuid(vfs_context_ucred(context)))
2257			break;
2258		/* fall through */
2259	default:
2260		if ( (error = vfs_context_suser(context)) )
2261			return (error);
2262	}
2263
2264	type = cmds & SUBCMDMASK;
2265	if ((u_int)type >= MAXQUOTAS)
2266		return (EINVAL);
2267	if (vfs_busy(mp, LK_NOWAIT))
2268		return (0);
2269
2270	switch (cmd) {
2271
2272	case Q_QUOTAON:
2273		error = hfs_quotaon(p, mp, type, datap);
2274		break;
2275
2276	case Q_QUOTAOFF:
2277		error = hfs_quotaoff(p, mp, type);
2278		break;
2279
2280	case Q_SETQUOTA:
2281		error = hfs_setquota(mp, uid, type, datap);
2282		break;
2283
2284	case Q_SETUSE:
2285		error = hfs_setuse(mp, uid, type, datap);
2286		break;
2287
2288	case Q_GETQUOTA:
2289		error = hfs_getquota(mp, uid, type, datap);
2290		break;
2291
2292	case Q_SYNC:
2293		error = hfs_qsync(mp);
2294		break;
2295
2296	case Q_QUOTASTAT:
2297		error = hfs_quotastat(mp, type, datap);
2298		break;
2299
2300	default:
2301		error = EINVAL;
2302		break;
2303	}
2304	vfs_unbusy(mp);
2305
2306	return (error);
2307}
2308#endif /* QUOTA */
2309
2310/* Subtype is composite of bits */
2311#define HFS_SUBTYPE_JOURNALED      0x01
2312#define HFS_SUBTYPE_CASESENSITIVE  0x02
2313/* bits 2 - 6 reserved */
2314#define HFS_SUBTYPE_STANDARDHFS    0x80
2315
2316/*
2317 * Get file system statistics.
2318 */
2319int
2320hfs_statfs(struct mount *mp, register struct vfsstatfs *sbp, __unused vfs_context_t context)
2321{
2322	ExtendedVCB *vcb = VFSTOVCB(mp);
2323	struct hfsmount *hfsmp = VFSTOHFS(mp);
2324	u_int32_t freeCNIDs;
2325	u_int16_t subtype = 0;
2326
2327	freeCNIDs = (u_int32_t)0xFFFFFFFF - (u_int32_t)vcb->vcbNxtCNID;
2328
2329	sbp->f_bsize = (u_int32_t)vcb->blockSize;
2330	sbp->f_iosize = (size_t)cluster_max_io_size(mp, 0);
2331	sbp->f_blocks = (u_int64_t)((u_int32_t)vcb->totalBlocks);
2332	sbp->f_bfree = (u_int64_t)((u_int32_t )hfs_freeblks(hfsmp, 0));
2333	sbp->f_bavail = (u_int64_t)((u_int32_t )hfs_freeblks(hfsmp, 1));
2334	sbp->f_files = (u_int64_t)((u_int32_t )(vcb->totalBlocks - 2));  /* max files is constrained by total blocks */
2335	sbp->f_ffree = (u_int64_t)((u_int32_t )(MIN(freeCNIDs, sbp->f_bavail)));
2336
2337	/*
2338	 * Subtypes (flavors) for HFS
2339	 *   0:   Mac OS Extended
2340	 *   1:   Mac OS Extended (Journaled)
2341	 *   2:   Mac OS Extended (Case Sensitive)
2342	 *   3:   Mac OS Extended (Case Sensitive, Journaled)
2343	 *   4 - 127:   Reserved
2344	 * 128:   Mac OS Standard
2345	 *
2346	 */
2347	if ((hfsmp->hfs_flags & HFS_STANDARD) == 0) {
2348		/* HFS+ & variants */
2349		if (hfsmp->jnl) {
2350			subtype |= HFS_SUBTYPE_JOURNALED;
2351		}
2352		if (hfsmp->hfs_flags & HFS_CASE_SENSITIVE) {
2353			subtype |= HFS_SUBTYPE_CASESENSITIVE;
2354		}
2355	}
2356#if CONFIG_HFS_STD
2357	else {
2358		/* HFS standard */
2359		subtype = HFS_SUBTYPE_STANDARDHFS;
2360	}
2361#endif
2362	sbp->f_fssubtype = subtype;
2363
2364	return (0);
2365}
2366
2367
2368//
2369// XXXdbg -- this is a callback to be used by the journal to
2370//           get meta data blocks flushed out to disk.
2371//
2372// XXXdbg -- be smarter and don't flush *every* block on each
2373//           call.  try to only flush some so we don't wind up
2374//           being too synchronous.
2375//
2376__private_extern__
2377void
2378hfs_sync_metadata(void *arg)
2379{
2380	struct mount *mp = (struct mount *)arg;
2381	struct hfsmount *hfsmp;
2382	ExtendedVCB *vcb;
2383	buf_t	bp;
2384	int  retval;
2385	daddr64_t priIDSector;
2386	hfsmp = VFSTOHFS(mp);
2387	vcb = HFSTOVCB(hfsmp);
2388
2389	// now make sure the super block is flushed
2390	priIDSector = (daddr64_t)((vcb->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) +
2391				  HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size));
2392
2393	retval = (int)buf_meta_bread(hfsmp->hfs_devvp,
2394			HFS_PHYSBLK_ROUNDDOWN(priIDSector, hfsmp->hfs_log_per_phys),
2395			hfsmp->hfs_physical_block_size, NOCRED, &bp);
2396	if ((retval != 0 ) && (retval != ENXIO)) {
2397		printf("hfs_sync_metadata: can't read volume header at %d! (retval 0x%x)\n",
2398		       (int)priIDSector, retval);
2399	}
2400
2401	if (retval == 0 && ((buf_flags(bp) & (B_DELWRI | B_LOCKED)) == B_DELWRI)) {
2402	    buf_bwrite(bp);
2403	} else if (bp) {
2404	    buf_brelse(bp);
2405	}
2406
2407	/* Note that these I/Os bypass the journal (no calls to journal_start_modify_block) */
2408
2409	// the alternate super block...
2410	// XXXdbg - we probably don't need to do this each and every time.
2411	//          hfs_btreeio.c:FlushAlternate() should flag when it was
2412	//          written...
2413	if (hfsmp->hfs_partition_avh_sector) {
2414		retval = (int)buf_meta_bread(hfsmp->hfs_devvp,
2415				HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_partition_avh_sector, hfsmp->hfs_log_per_phys),
2416				hfsmp->hfs_physical_block_size, NOCRED, &bp);
2417		if (retval == 0 && ((buf_flags(bp) & (B_DELWRI | B_LOCKED)) == B_DELWRI)) {
2418		    /*
2419			 * note this I/O can fail if the partition shrank behind our backs!
2420			 * So failure should be OK here.
2421			 */
2422			buf_bwrite(bp);
2423		} else if (bp) {
2424		    buf_brelse(bp);
2425		}
2426	}
2427
2428	/* Is the FS's idea of the AVH different than the partition ? */
2429	if ((hfsmp->hfs_fs_avh_sector) && (hfsmp->hfs_partition_avh_sector != hfsmp->hfs_fs_avh_sector)) {
2430		retval = (int)buf_meta_bread(hfsmp->hfs_devvp,
2431				HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_fs_avh_sector, hfsmp->hfs_log_per_phys),
2432				hfsmp->hfs_physical_block_size, NOCRED, &bp);
2433		if (retval == 0 && ((buf_flags(bp) & (B_DELWRI | B_LOCKED)) == B_DELWRI)) {
2434		    buf_bwrite(bp);
2435		} else if (bp) {
2436		    buf_brelse(bp);
2437		}
2438	}
2439
2440}
2441
2442
2443struct hfs_sync_cargs {
2444        kauth_cred_t cred;
2445        struct proc  *p;
2446        int    waitfor;
2447        int    error;
2448};
2449
2450
2451static int
2452hfs_sync_callback(struct vnode *vp, void *cargs)
2453{
2454	struct cnode *cp;
2455	struct hfs_sync_cargs *args;
2456	int error;
2457
2458	args = (struct hfs_sync_cargs *)cargs;
2459
2460	if (hfs_lock(VTOC(vp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) != 0) {
2461		return (VNODE_RETURNED);
2462	}
2463	cp = VTOC(vp);
2464
2465	if ((cp->c_flag & C_MODIFIED) ||
2466	    (cp->c_touch_acctime | cp->c_touch_chgtime | cp->c_touch_modtime) ||
2467	    vnode_hasdirtyblks(vp)) {
2468	        error = hfs_fsync(vp, args->waitfor, 0, args->p);
2469
2470		if (error)
2471		        args->error = error;
2472	}
2473	hfs_unlock(cp);
2474	return (VNODE_RETURNED);
2475}
2476
2477
2478
2479/*
2480 * Go through the disk queues to initiate sandbagged IO;
2481 * go through the inodes to write those that have been modified;
2482 * initiate the writing of the super block if it has been modified.
2483 *
2484 * Note: we are always called with the filesystem marked `MPBUSY'.
2485 */
2486int
2487hfs_sync(struct mount *mp, int waitfor, vfs_context_t context)
2488{
2489	struct proc *p = vfs_context_proc(context);
2490	struct cnode *cp;
2491	struct hfsmount *hfsmp;
2492	ExtendedVCB *vcb;
2493	struct vnode *meta_vp[4];
2494	int i;
2495	int error, allerror = 0;
2496	struct hfs_sync_cargs args;
2497
2498	hfsmp = VFSTOHFS(mp);
2499
2500	// Back off if hfs_changefs or a freeze is underway
2501	hfs_lock_mount(hfsmp);
2502	if ((hfsmp->hfs_flags & HFS_IN_CHANGEFS)
2503	    || hfsmp->hfs_freeze_state != HFS_THAWED) {
2504		hfs_unlock_mount(hfsmp);
2505		return 0;
2506	}
2507
2508	if (hfsmp->hfs_flags & HFS_READ_ONLY) {
2509		hfs_unlock_mount(hfsmp);
2510		return (EROFS);
2511	}
2512
2513	++hfsmp->hfs_syncers;
2514	hfs_unlock_mount(hfsmp);
2515
2516	args.cred = kauth_cred_get();
2517	args.waitfor = waitfor;
2518	args.p = p;
2519	args.error = 0;
2520	/*
2521	 * hfs_sync_callback will be called for each vnode
2522	 * hung off of this mount point... the vnode will be
2523	 * properly referenced and unreferenced around the callback
2524	 */
2525	vnode_iterate(mp, 0, hfs_sync_callback, (void *)&args);
2526
2527	if (args.error)
2528	        allerror = args.error;
2529
2530	vcb = HFSTOVCB(hfsmp);
2531
2532	meta_vp[0] = vcb->extentsRefNum;
2533	meta_vp[1] = vcb->catalogRefNum;
2534	meta_vp[2] = vcb->allocationsRefNum;  /* This is NULL for standard HFS */
2535	meta_vp[3] = hfsmp->hfs_attribute_vp; /* Optional file */
2536
2537	/* Now sync our three metadata files */
2538	for (i = 0; i < 4; ++i) {
2539		struct vnode *btvp;
2540
2541		btvp = meta_vp[i];;
2542		if ((btvp==0) || (vnode_mount(btvp) != mp))
2543			continue;
2544
2545		/* XXX use hfs_systemfile_lock instead ? */
2546		(void) hfs_lock(VTOC(btvp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT);
2547		cp = VTOC(btvp);
2548
2549		if (((cp->c_flag &  C_MODIFIED) == 0) &&
2550		    (cp->c_touch_acctime == 0) &&
2551		    (cp->c_touch_chgtime == 0) &&
2552		    (cp->c_touch_modtime == 0) &&
2553		    vnode_hasdirtyblks(btvp) == 0) {
2554			hfs_unlock(VTOC(btvp));
2555			continue;
2556		}
2557		error = vnode_get(btvp);
2558		if (error) {
2559			hfs_unlock(VTOC(btvp));
2560			continue;
2561		}
2562		if ((error = hfs_fsync(btvp, waitfor, 0, p)))
2563			allerror = error;
2564
2565		hfs_unlock(cp);
2566		vnode_put(btvp);
2567	};
2568
2569
2570#if CONFIG_HFS_STD
2571	/*
2572	 * Force stale file system control information to be flushed.
2573	 */
2574	if (vcb->vcbSigWord == kHFSSigWord) {
2575		if ((error = VNOP_FSYNC(hfsmp->hfs_devvp, waitfor, context))) {
2576			allerror = error;
2577		}
2578	}
2579#endif
2580
2581#if QUOTA
2582	hfs_qsync(mp);
2583#endif /* QUOTA */
2584
2585	hfs_hotfilesync(hfsmp, vfs_context_kernel());
2586
2587	/*
2588	 * Write back modified superblock.
2589	 */
2590	if (IsVCBDirty(vcb)) {
2591		error = hfs_flushvolumeheader(hfsmp, waitfor, 0);
2592		if (error)
2593			allerror = error;
2594	}
2595
2596	if (hfsmp->jnl) {
2597	    hfs_journal_flush(hfsmp, FALSE);
2598	}
2599
2600	hfs_lock_mount(hfsmp);
2601	boolean_t wake = (!--hfsmp->hfs_syncers
2602					  && hfsmp->hfs_freeze_state == HFS_WANT_TO_FREEZE);
2603	hfs_unlock_mount(hfsmp);
2604	if (wake)
2605		wakeup(&hfsmp->hfs_freeze_state);
2606
2607	return (allerror);
2608}
2609
2610
2611/*
2612 * File handle to vnode
2613 *
2614 * Have to be really careful about stale file handles:
2615 * - check that the cnode id is valid
2616 * - call hfs_vget() to get the locked cnode
2617 * - check for an unallocated cnode (i_mode == 0)
2618 * - check that the given client host has export rights and return
2619 *   those rights via. exflagsp and credanonp
2620 */
2621static int
2622hfs_fhtovp(struct mount *mp, int fhlen, unsigned char *fhp, struct vnode **vpp, __unused vfs_context_t context)
2623{
2624	struct hfsfid *hfsfhp;
2625	struct vnode *nvp;
2626	int result;
2627
2628	*vpp = NULL;
2629	hfsfhp = (struct hfsfid *)fhp;
2630
2631	if (fhlen < (int)sizeof(struct hfsfid))
2632		return (EINVAL);
2633
2634	result = hfs_vget(VFSTOHFS(mp), ntohl(hfsfhp->hfsfid_cnid), &nvp, 0, 0);
2635	if (result) {
2636		if (result == ENOENT)
2637			result = ESTALE;
2638		return result;
2639	}
2640
2641	/*
2642	 * We used to use the create time as the gen id of the file handle,
2643	 * but it is not static enough because it can change at any point
2644	 * via system calls.  We still don't have another volume ID or other
2645	 * unique identifier to use for a generation ID across reboots that
2646	 * persists until the file is removed.  Using only the CNID exposes
2647	 * us to the potential wrap-around case, but as of 2/2008, it would take
2648	 * over 2 months to wrap around if the machine did nothing but allocate
2649	 * CNIDs.  Using some kind of wrap counter would only be effective if
2650	 * each file had the wrap counter associated with it.  For now,
2651	 * we use only the CNID to identify the file as it's good enough.
2652	 */
2653
2654	*vpp = nvp;
2655
2656	hfs_unlock(VTOC(nvp));
2657	return (0);
2658}
2659
2660
2661/*
2662 * Vnode pointer to File handle
2663 */
2664/* ARGSUSED */
2665static int
2666hfs_vptofh(struct vnode *vp, int *fhlenp, unsigned char *fhp, __unused vfs_context_t context)
2667{
2668	struct cnode *cp;
2669	struct hfsfid *hfsfhp;
2670
2671	if (ISHFS(VTOVCB(vp)))
2672		return (ENOTSUP);	/* hfs standard is not exportable */
2673
2674	if (*fhlenp < (int)sizeof(struct hfsfid))
2675		return (EOVERFLOW);
2676
2677	cp = VTOC(vp);
2678	hfsfhp = (struct hfsfid *)fhp;
2679	/* only the CNID is used to identify the file now */
2680	hfsfhp->hfsfid_cnid = htonl(cp->c_fileid);
2681	hfsfhp->hfsfid_gen = htonl(cp->c_fileid);
2682	*fhlenp = sizeof(struct hfsfid);
2683
2684	return (0);
2685}
2686
2687
2688/*
2689 * Initialize HFS filesystems, done only once per boot.
2690 *
2691 * HFS is not a kext-based file system.  This makes it difficult to find
2692 * out when the last HFS file system was unmounted and call hfs_uninit()
2693 * to deallocate data structures allocated in hfs_init().  Therefore we
2694 * never deallocate memory allocated by lock attribute and group initializations
2695 * in this function.
2696 */
2697static int
2698hfs_init(__unused struct vfsconf *vfsp)
2699{
2700	static int done = 0;
2701
2702	if (done)
2703		return (0);
2704	done = 1;
2705	hfs_chashinit();
2706	hfs_converterinit();
2707
2708	BTReserveSetup();
2709
2710	hfs_lock_attr    = lck_attr_alloc_init();
2711	hfs_group_attr   = lck_grp_attr_alloc_init();
2712	hfs_mutex_group  = lck_grp_alloc_init("hfs-mutex", hfs_group_attr);
2713	hfs_rwlock_group = lck_grp_alloc_init("hfs-rwlock", hfs_group_attr);
2714	hfs_spinlock_group = lck_grp_alloc_init("hfs-spinlock", hfs_group_attr);
2715
2716#if HFS_COMPRESSION
2717	decmpfs_init();
2718#endif
2719
2720	return (0);
2721}
2722
2723
2724/*
2725 * Destroy all locks, mutexes and spinlocks in hfsmp on unmount or failed mount
2726 */
2727static void
2728hfs_locks_destroy(struct hfsmount *hfsmp)
2729{
2730
2731	lck_mtx_destroy(&hfsmp->hfs_mutex, hfs_mutex_group);
2732	lck_mtx_destroy(&hfsmp->hfc_mutex, hfs_mutex_group);
2733	lck_rw_destroy(&hfsmp->hfs_global_lock, hfs_rwlock_group);
2734	lck_spin_destroy(&hfsmp->vcbFreeExtLock, hfs_spinlock_group);
2735
2736	return;
2737}
2738
2739
2740static int
2741hfs_getmountpoint(struct vnode *vp, struct hfsmount **hfsmpp)
2742{
2743	struct hfsmount * hfsmp;
2744	char fstypename[MFSNAMELEN];
2745
2746	if (vp == NULL)
2747		return (EINVAL);
2748
2749	if (!vnode_isvroot(vp))
2750		return (EINVAL);
2751
2752	vnode_vfsname(vp, fstypename);
2753	if (strncmp(fstypename, "hfs", sizeof(fstypename)) != 0)
2754		return (EINVAL);
2755
2756	hfsmp = VTOHFS(vp);
2757
2758	if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSSigWord)
2759		return (EINVAL);
2760
2761	*hfsmpp = hfsmp;
2762
2763	return (0);
2764}
2765
2766// XXXdbg
2767#include <sys/filedesc.h>
2768
2769/*
2770 * HFS filesystem related variables.
2771 */
2772int
2773hfs_sysctl(int *name, __unused u_int namelen, user_addr_t oldp, size_t *oldlenp,
2774			user_addr_t newp, size_t newlen, vfs_context_t context)
2775{
2776	struct proc *p = vfs_context_proc(context);
2777	int error;
2778	struct hfsmount *hfsmp;
2779
2780	/* all sysctl names at this level are terminal */
2781
2782	if (name[0] == HFS_ENCODINGBIAS) {
2783		int bias;
2784
2785		bias = hfs_getencodingbias();
2786		error = sysctl_int(oldp, oldlenp, newp, newlen, &bias);
2787		if (error == 0 && newp)
2788			hfs_setencodingbias(bias);
2789		return (error);
2790
2791	} else if (name[0] == HFS_EXTEND_FS) {
2792		u_int64_t  newsize;
2793		vnode_t vp = vfs_context_cwd(context);
2794
2795		if (newp == USER_ADDR_NULL || vp == NULLVP)
2796			return (EINVAL);
2797		if ((error = hfs_getmountpoint(vp, &hfsmp)))
2798			return (error);
2799		error = sysctl_quad(oldp, oldlenp, newp, newlen, (quad_t *)&newsize);
2800		if (error)
2801			return (error);
2802
2803		error = hfs_extendfs(hfsmp, newsize, context);
2804		return (error);
2805
2806	} else if (name[0] == HFS_ENCODINGHINT) {
2807		size_t bufsize;
2808		size_t bytes;
2809		u_int32_t hint;
2810		u_int16_t *unicode_name = NULL;
2811		char *filename = NULL;
2812
2813		if ((newlen <= 0) || (newlen > MAXPATHLEN))
2814			return (EINVAL);
2815
2816		bufsize = MAX(newlen * 3, MAXPATHLEN);
2817		MALLOC(filename, char *, newlen, M_TEMP, M_WAITOK);
2818		if (filename == NULL) {
2819			error = ENOMEM;
2820			goto encodinghint_exit;
2821		}
2822		MALLOC(unicode_name, u_int16_t *, bufsize, M_TEMP, M_WAITOK);
2823		if (unicode_name == NULL) {
2824			error = ENOMEM;
2825			goto encodinghint_exit;
2826		}
2827
2828		error = copyin(newp, (caddr_t)filename, newlen);
2829		if (error == 0) {
2830			error = utf8_decodestr((u_int8_t *)filename, newlen - 1, unicode_name,
2831			                       &bytes, bufsize, 0, UTF_DECOMPOSED);
2832			if (error == 0) {
2833				hint = hfs_pickencoding(unicode_name, bytes / 2);
2834				error = sysctl_int(oldp, oldlenp, USER_ADDR_NULL, 0, (int32_t *)&hint);
2835			}
2836		}
2837
2838encodinghint_exit:
2839		if (unicode_name)
2840			FREE(unicode_name, M_TEMP);
2841		if (filename)
2842			FREE(filename, M_TEMP);
2843		return (error);
2844
2845	} else if (name[0] == HFS_ENABLE_JOURNALING) {
2846		// make the file system journaled...
2847		vnode_t vp = vfs_context_cwd(context);
2848		vnode_t jvp;
2849		ExtendedVCB *vcb;
2850		struct cat_attr jnl_attr;
2851	    struct cat_attr	jinfo_attr;
2852		struct cat_fork jnl_fork;
2853		struct cat_fork jinfo_fork;
2854		buf_t jib_buf;
2855		uint64_t jib_blkno;
2856		uint32_t tmpblkno;
2857		uint64_t journal_byte_offset;
2858		uint64_t journal_size;
2859		vnode_t jib_vp = NULLVP;
2860		struct JournalInfoBlock local_jib;
2861		int err = 0;
2862		void *jnl = NULL;
2863		int lockflags;
2864
2865		/* Only root can enable journaling */
2866		if (!kauth_cred_issuser(kauth_cred_get())) {
2867			return (EPERM);
2868		}
2869		if (vp == NULLVP)
2870		        return EINVAL;
2871
2872		hfsmp = VTOHFS(vp);
2873		if (hfsmp->hfs_flags & HFS_READ_ONLY) {
2874			return EROFS;
2875		}
2876		if (HFSTOVCB(hfsmp)->vcbSigWord == kHFSSigWord) {
2877			printf("hfs: can't make a plain hfs volume journaled.\n");
2878			return EINVAL;
2879		}
2880
2881		if (hfsmp->jnl) {
2882		    printf("hfs: volume @ mp %p is already journaled!\n", vnode_mount(vp));
2883		    return EAGAIN;
2884		}
2885		vcb = HFSTOVCB(hfsmp);
2886
2887		/* Set up local copies of the initialization info */
2888		tmpblkno = (uint32_t) name[1];
2889		jib_blkno = (uint64_t) tmpblkno;
2890		journal_byte_offset = (uint64_t) name[2];
2891		journal_byte_offset *= hfsmp->blockSize;
2892		journal_byte_offset += hfsmp->hfsPlusIOPosOffset;
2893		journal_size = (uint64_t)((unsigned)name[3]);
2894
2895		lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG | SFL_EXTENTS, HFS_EXCLUSIVE_LOCK);
2896		if (BTHasContiguousNodes(VTOF(vcb->catalogRefNum)) == 0 ||
2897			BTHasContiguousNodes(VTOF(vcb->extentsRefNum)) == 0) {
2898
2899			printf("hfs: volume has a btree w/non-contiguous nodes.  can not enable journaling.\n");
2900			hfs_systemfile_unlock(hfsmp, lockflags);
2901			return EINVAL;
2902		}
2903		hfs_systemfile_unlock(hfsmp, lockflags);
2904
2905		// make sure these both exist!
2906		if (   GetFileInfo(vcb, kHFSRootFolderID, ".journal_info_block", &jinfo_attr, &jinfo_fork) == 0
2907			|| GetFileInfo(vcb, kHFSRootFolderID, ".journal", &jnl_attr, &jnl_fork) == 0) {
2908
2909			return EINVAL;
2910		}
2911
2912		/*
2913		 * At this point, we have a copy of the metadata that lives in the catalog for the
2914		 * journal info block.  Compare that the journal info block's single extent matches
2915		 * that which was passed into this sysctl.
2916		 *
2917		 * If it is different, deny the journal enable call.
2918		 */
2919		if (jinfo_fork.cf_blocks > 1) {
2920			/* too many blocks */
2921			return EINVAL;
2922		}
2923
2924		if (jinfo_fork.cf_extents[0].startBlock != jib_blkno) {
2925			/* Wrong block */
2926			return EINVAL;
2927		}
2928
2929		/*
2930		 * We want to immediately purge the vnode for the JIB.
2931		 *
2932		 * Because it was written to from userland, there's probably
2933		 * a vnode somewhere in the vnode cache (possibly with UBC backed blocks).
2934		 * So we bring the vnode into core, then immediately do whatever
2935		 * we can to flush/vclean it out.  This is because those blocks will be
2936		 * interpreted as user data, which may be treated separately on some platforms
2937		 * than metadata.  If the vnode is gone, then there cannot be backing blocks
2938		 * in the UBC.
2939		 */
2940		if (hfs_vget (hfsmp, jinfo_attr.ca_fileid, &jib_vp, 1, 0)) {
2941			return EINVAL;
2942		}
2943		/*
2944		 * Now we have a vnode for the JIB. recycle it. Because we hold an iocount
2945		 * on the vnode, we'll just mark it for termination when the last iocount
2946		 * (hopefully ours), is dropped.
2947		 */
2948		vnode_recycle (jib_vp);
2949		err = vnode_put (jib_vp);
2950		if (err) {
2951			return EINVAL;
2952		}
2953
2954		/* Initialize the local copy of the JIB (just like hfs.util) */
2955		memset (&local_jib, 'Z', sizeof(struct JournalInfoBlock));
2956		local_jib.flags = SWAP_BE32(kJIJournalInFSMask);
2957		/* Note that the JIB's offset is in bytes */
2958		local_jib.offset = SWAP_BE64(journal_byte_offset);
2959		local_jib.size = SWAP_BE64(journal_size);
2960
2961		/*
2962		 * Now write out the local JIB.  This essentially overwrites the userland
2963		 * copy of the JIB.  Read it as BLK_META to treat it as a metadata read/write.
2964		 */
2965		jib_buf = buf_getblk (hfsmp->hfs_devvp,
2966				jib_blkno * (hfsmp->blockSize / hfsmp->hfs_logical_block_size),
2967				hfsmp->blockSize, 0, 0, BLK_META);
2968		char* buf_ptr = (char*) buf_dataptr (jib_buf);
2969
2970		/* Zero out the portion of the block that won't contain JIB data */
2971		memset (buf_ptr, 0, hfsmp->blockSize);
2972
2973		bcopy(&local_jib, buf_ptr, sizeof(local_jib));
2974		if (buf_bwrite (jib_buf)) {
2975			return EIO;
2976		}
2977
2978		/* Force a flush track cache */
2979		(void) VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE, NULL, FWRITE, context);
2980
2981
2982		/* Now proceed with full volume sync */
2983		hfs_sync(hfsmp->hfs_mp, MNT_WAIT, context);
2984
2985		printf("hfs: Initializing the journal (joffset 0x%llx sz 0x%llx)...\n",
2986			   (off_t)name[2], (off_t)name[3]);
2987
2988		//
2989		// XXXdbg - note that currently (Sept, 08) hfs_util does not support
2990		//          enabling the journal on a separate device so it is safe
2991		//          to just copy hfs_devvp here.  If hfs_util gets the ability
2992		//          to dynamically enable the journal on a separate device then
2993		//          we will have to do the same thing as hfs_early_journal_init()
2994		//          to locate and open the journal device.
2995		//
2996		jvp = hfsmp->hfs_devvp;
2997		jnl = journal_create(jvp, journal_byte_offset, journal_size,
2998							 hfsmp->hfs_devvp,
2999							 hfsmp->hfs_logical_block_size,
3000							 0,
3001							 0,
3002							 hfs_sync_metadata, hfsmp->hfs_mp,
3003							 hfsmp->hfs_mp);
3004
3005		/*
3006		 * Set up the trim callback function so that we can add
3007		 * recently freed extents to the free extent cache once
3008		 * the transaction that freed them is written to the
3009		 * journal on disk.
3010		 */
3011		if (jnl)
3012			journal_trim_set_callback(jnl, hfs_trim_callback, hfsmp);
3013
3014		if (jnl == NULL) {
3015			printf("hfs: FAILED to create the journal!\n");
3016			if (jvp && jvp != hfsmp->hfs_devvp) {
3017				vnode_clearmountedon(jvp);
3018				VNOP_CLOSE(jvp, hfsmp->hfs_flags & HFS_READ_ONLY ? FREAD : FREAD|FWRITE, vfs_context_kernel());
3019			}
3020			jvp = NULL;
3021
3022			return EINVAL;
3023		}
3024
3025		hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK);
3026
3027		/*
3028		 * Flush all dirty metadata buffers.
3029		 */
3030		buf_flushdirtyblks(hfsmp->hfs_devvp, TRUE, 0, "hfs_sysctl");
3031		buf_flushdirtyblks(hfsmp->hfs_extents_vp, TRUE, 0, "hfs_sysctl");
3032		buf_flushdirtyblks(hfsmp->hfs_catalog_vp, TRUE, 0, "hfs_sysctl");
3033		buf_flushdirtyblks(hfsmp->hfs_allocation_vp, TRUE, 0, "hfs_sysctl");
3034		if (hfsmp->hfs_attribute_vp)
3035			buf_flushdirtyblks(hfsmp->hfs_attribute_vp, TRUE, 0, "hfs_sysctl");
3036
3037		HFSTOVCB(hfsmp)->vcbJinfoBlock = name[1];
3038		HFSTOVCB(hfsmp)->vcbAtrb |= kHFSVolumeJournaledMask;
3039		hfsmp->jvp = jvp;
3040		hfsmp->jnl = jnl;
3041
3042		// save this off for the hack-y check in hfs_remove()
3043		hfsmp->jnl_start        = (u_int32_t)name[2];
3044		hfsmp->jnl_size         = (off_t)((unsigned)name[3]);
3045		hfsmp->hfs_jnlinfoblkid = jinfo_attr.ca_fileid;
3046		hfsmp->hfs_jnlfileid    = jnl_attr.ca_fileid;
3047
3048		vfs_setflags(hfsmp->hfs_mp, (u_int64_t)((unsigned int)MNT_JOURNALED));
3049
3050		hfs_unlock_global (hfsmp);
3051		hfs_flushvolumeheader(hfsmp, MNT_WAIT, 1);
3052
3053		{
3054			fsid_t fsid;
3055
3056			fsid.val[0] = (int32_t)hfsmp->hfs_raw_dev;
3057			fsid.val[1] = (int32_t)vfs_typenum(HFSTOVFS(hfsmp));
3058			vfs_event_signal(&fsid, VQ_UPDATE, (intptr_t)NULL);
3059		}
3060		return 0;
3061	} else if (name[0] == HFS_DISABLE_JOURNALING) {
3062		// clear the journaling bit
3063		vnode_t vp = vfs_context_cwd(context);
3064
3065		/* Only root can disable journaling */
3066		if (!kauth_cred_issuser(kauth_cred_get())) {
3067			return (EPERM);
3068		}
3069		if (vp == NULLVP)
3070		        return EINVAL;
3071
3072		hfsmp = VTOHFS(vp);
3073
3074		/*
3075		 * Disabling journaling is disallowed on volumes with directory hard links
3076		 * because we have not tested the relevant code path.
3077		 */
3078		if (hfsmp->hfs_private_attr[DIR_HARDLINKS].ca_entries != 0){
3079			printf("hfs: cannot disable journaling on volumes with directory hardlinks\n");
3080			return EPERM;
3081		}
3082
3083		printf("hfs: disabling journaling for mount @ %p\n", vnode_mount(vp));
3084
3085		hfs_lock_global (hfsmp, HFS_EXCLUSIVE_LOCK);
3086
3087		// Lights out for you buddy!
3088		journal_close(hfsmp->jnl);
3089		hfsmp->jnl = NULL;
3090
3091		if (hfsmp->jvp && hfsmp->jvp != hfsmp->hfs_devvp) {
3092			vnode_clearmountedon(hfsmp->jvp);
3093			VNOP_CLOSE(hfsmp->jvp, hfsmp->hfs_flags & HFS_READ_ONLY ? FREAD : FREAD|FWRITE, vfs_context_kernel());
3094			vnode_put(hfsmp->jvp);
3095		}
3096		hfsmp->jvp = NULL;
3097		vfs_clearflags(hfsmp->hfs_mp, (u_int64_t)((unsigned int)MNT_JOURNALED));
3098		hfsmp->jnl_start        = 0;
3099		hfsmp->hfs_jnlinfoblkid = 0;
3100		hfsmp->hfs_jnlfileid    = 0;
3101
3102		HFSTOVCB(hfsmp)->vcbAtrb &= ~kHFSVolumeJournaledMask;
3103
3104		hfs_unlock_global (hfsmp);
3105
3106		hfs_flushvolumeheader(hfsmp, MNT_WAIT, 1);
3107
3108		{
3109			fsid_t fsid;
3110
3111			fsid.val[0] = (int32_t)hfsmp->hfs_raw_dev;
3112			fsid.val[1] = (int32_t)vfs_typenum(HFSTOVFS(hfsmp));
3113			vfs_event_signal(&fsid, VQ_UPDATE, (intptr_t)NULL);
3114		}
3115		return 0;
3116	} else if (name[0] == HFS_GET_JOURNAL_INFO) {
3117		vnode_t vp = vfs_context_cwd(context);
3118		off_t jnl_start, jnl_size;
3119
3120		if (vp == NULLVP)
3121		        return EINVAL;
3122
3123		/* 64-bit processes won't work with this sysctl -- can't fit a pointer into an int! */
3124		if (proc_is64bit(current_proc()))
3125			return EINVAL;
3126
3127		hfsmp = VTOHFS(vp);
3128	    if (hfsmp->jnl == NULL) {
3129			jnl_start = 0;
3130			jnl_size  = 0;
3131	    } else {
3132			jnl_start = (off_t)(hfsmp->jnl_start * HFSTOVCB(hfsmp)->blockSize) + (off_t)HFSTOVCB(hfsmp)->hfsPlusIOPosOffset;
3133			jnl_size  = (off_t)hfsmp->jnl_size;
3134	    }
3135
3136	    if ((error = copyout((caddr_t)&jnl_start, CAST_USER_ADDR_T(name[1]), sizeof(off_t))) != 0) {
3137			return error;
3138		}
3139	    if ((error = copyout((caddr_t)&jnl_size, CAST_USER_ADDR_T(name[2]), sizeof(off_t))) != 0) {
3140			return error;
3141		}
3142
3143		return 0;
3144	} else if (name[0] == HFS_SET_PKG_EXTENSIONS) {
3145
3146	    return set_package_extensions_table((user_addr_t)((unsigned)name[1]), name[2], name[3]);
3147
3148	} else if (name[0] == VFS_CTL_QUERY) {
3149    	struct sysctl_req *req;
3150    	union union_vfsidctl vc;
3151    	struct mount *mp;
3152 	    struct vfsquery vq;
3153
3154		req = CAST_DOWN(struct sysctl_req *, oldp);	/* we're new style vfs sysctl. */
3155		if (req == NULL) {
3156			return EFAULT;
3157		}
3158
3159        error = SYSCTL_IN(req, &vc, proc_is64bit(p)? sizeof(vc.vc64):sizeof(vc.vc32));
3160		if (error) return (error);
3161
3162		mp = vfs_getvfs(&vc.vc32.vc_fsid); /* works for 32 and 64 */
3163        if (mp == NULL) return (ENOENT);
3164
3165		hfsmp = VFSTOHFS(mp);
3166		bzero(&vq, sizeof(vq));
3167		vq.vq_flags = hfsmp->hfs_notification_conditions;
3168		return SYSCTL_OUT(req, &vq, sizeof(vq));;
3169	} else if (name[0] == HFS_REPLAY_JOURNAL) {
3170		vnode_t devvp = NULL;
3171		int device_fd;
3172		if (namelen != 2) {
3173			return (EINVAL);
3174		}
3175		device_fd = name[1];
3176		error = file_vnode(device_fd, &devvp);
3177		if (error) {
3178			return error;
3179		}
3180		error = vnode_getwithref(devvp);
3181		if (error) {
3182			file_drop(device_fd);
3183			return error;
3184		}
3185		error = hfs_journal_replay(devvp, context);
3186		file_drop(device_fd);
3187		vnode_put(devvp);
3188		return error;
3189	} else if (name[0] == HFS_ENABLE_RESIZE_DEBUG) {
3190		hfs_resize_debug = 1;
3191		printf ("hfs_sysctl: Enabled volume resize debugging.\n");
3192		return 0;
3193	}
3194
3195	return (ENOTSUP);
3196}
3197
3198/*
3199 * hfs_vfs_vget is not static since it is used in hfs_readwrite.c to support
3200 * the build_path ioctl.  We use it to leverage the code below that updates
3201 * the origin list cache if necessary
3202 */
3203
3204int
3205hfs_vfs_vget(struct mount *mp, ino64_t ino, struct vnode **vpp, __unused vfs_context_t context)
3206{
3207	int error;
3208	int lockflags;
3209	struct hfsmount *hfsmp;
3210
3211	hfsmp = VFSTOHFS(mp);
3212
3213	error = hfs_vget(hfsmp, (cnid_t)ino, vpp, 1, 0);
3214	if (error)
3215		return (error);
3216
3217	/*
3218	 * ADLs may need to have their origin state updated
3219	 * since build_path needs a valid parent.  The same is true
3220	 * for hardlinked files as well.  There isn't a race window here
3221	 * in re-acquiring the cnode lock since we aren't pulling any data
3222	 * out of the cnode; instead, we're going to the catalog.
3223	 */
3224	if ((VTOC(*vpp)->c_flag & C_HARDLINK) &&
3225	    (hfs_lock(VTOC(*vpp), HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT) == 0)) {
3226		cnode_t *cp = VTOC(*vpp);
3227		struct cat_desc cdesc;
3228
3229		if (!hfs_haslinkorigin(cp)) {
3230			lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
3231			error = cat_findname(hfsmp, (cnid_t)ino, &cdesc);
3232			hfs_systemfile_unlock(hfsmp, lockflags);
3233			if (error == 0) {
3234				if ((cdesc.cd_parentcnid != hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) &&
3235					(cdesc.cd_parentcnid != hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid)) {
3236					hfs_savelinkorigin(cp, cdesc.cd_parentcnid);
3237				}
3238				cat_releasedesc(&cdesc);
3239			}
3240		}
3241		hfs_unlock(cp);
3242	}
3243	return (0);
3244}
3245
3246
3247/*
3248 * Look up an HFS object by ID.
3249 *
3250 * The object is returned with an iocount reference and the cnode locked.
3251 *
3252 * If the object is a file then it will represent the data fork.
3253 */
3254int
3255hfs_vget(struct hfsmount *hfsmp, cnid_t cnid, struct vnode **vpp, int skiplock, int allow_deleted)
3256{
3257	struct vnode *vp = NULLVP;
3258	struct cat_desc cndesc;
3259	struct cat_attr cnattr;
3260	struct cat_fork cnfork;
3261	u_int32_t linkref = 0;
3262	int error;
3263
3264	/* Check for cnids that should't be exported. */
3265	if ((cnid < kHFSFirstUserCatalogNodeID) &&
3266	    (cnid != kHFSRootFolderID && cnid != kHFSRootParentID)) {
3267		return (ENOENT);
3268	}
3269	/* Don't export our private directories. */
3270	if (cnid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid ||
3271	    cnid == hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) {
3272		return (ENOENT);
3273	}
3274	/*
3275	 * Check the hash first
3276	 */
3277	vp = hfs_chash_getvnode(hfsmp, cnid, 0, skiplock, allow_deleted);
3278	if (vp) {
3279		*vpp = vp;
3280		return(0);
3281	}
3282
3283	bzero(&cndesc, sizeof(cndesc));
3284	bzero(&cnattr, sizeof(cnattr));
3285	bzero(&cnfork, sizeof(cnfork));
3286
3287	/*
3288	 * Not in hash, lookup in catalog
3289	 */
3290	if (cnid == kHFSRootParentID) {
3291		static char hfs_rootname[] = "/";
3292
3293		cndesc.cd_nameptr = (const u_int8_t *)&hfs_rootname[0];
3294		cndesc.cd_namelen = 1;
3295		cndesc.cd_parentcnid = kHFSRootParentID;
3296		cndesc.cd_cnid = kHFSRootFolderID;
3297		cndesc.cd_flags = CD_ISDIR;
3298
3299		cnattr.ca_fileid = kHFSRootFolderID;
3300		cnattr.ca_linkcount = 1;
3301		cnattr.ca_entries = 1;
3302		cnattr.ca_dircount = 1;
3303		cnattr.ca_mode = (S_IFDIR | S_IRWXU | S_IRWXG | S_IRWXO);
3304	} else {
3305		int lockflags;
3306		cnid_t pid;
3307		const char *nameptr;
3308
3309		lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_SHARED_LOCK);
3310		error = cat_idlookup(hfsmp, cnid, 0, 0, &cndesc, &cnattr, &cnfork);
3311		hfs_systemfile_unlock(hfsmp, lockflags);
3312
3313		if (error) {
3314			*vpp = NULL;
3315			return (error);
3316		}
3317
3318		/*
3319		 * Check for a raw hardlink inode and save its linkref.
3320		 */
3321		pid = cndesc.cd_parentcnid;
3322		nameptr = (const char *)cndesc.cd_nameptr;
3323
3324		if ((pid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) &&
3325		    (bcmp(nameptr, HFS_INODE_PREFIX, HFS_INODE_PREFIX_LEN) == 0)) {
3326			linkref = strtoul(&nameptr[HFS_INODE_PREFIX_LEN], NULL, 10);
3327
3328		} else if ((pid == hfsmp->hfs_private_desc[DIR_HARDLINKS].cd_cnid) &&
3329		           (bcmp(nameptr, HFS_DIRINODE_PREFIX, HFS_DIRINODE_PREFIX_LEN) == 0)) {
3330			linkref = strtoul(&nameptr[HFS_DIRINODE_PREFIX_LEN], NULL, 10);
3331
3332		} else if ((pid == hfsmp->hfs_private_desc[FILE_HARDLINKS].cd_cnid) &&
3333		           (bcmp(nameptr, HFS_DELETE_PREFIX, HFS_DELETE_PREFIX_LEN) == 0)) {
3334			*vpp = NULL;
3335			cat_releasedesc(&cndesc);
3336			return (ENOENT);  /* open unlinked file */
3337		}
3338	}
3339
3340	/*
3341	 * Finish initializing cnode descriptor for hardlinks.
3342	 *
3343	 * We need a valid name and parent for reverse lookups.
3344	 */
3345	if (linkref) {
3346		cnid_t lastid;
3347		struct cat_desc linkdesc;
3348		int linkerr = 0;
3349
3350		cnattr.ca_linkref = linkref;
3351		bzero (&linkdesc, sizeof (linkdesc));
3352
3353		/*
3354		 * If the caller supplied the raw inode value, then we don't know exactly
3355		 * which hardlink they wanted. It's likely that they acquired the raw inode
3356		 * value BEFORE the item became a hardlink, in which case, they probably
3357		 * want the oldest link.  So request the oldest link from the catalog.
3358		 *
3359		 * Unfortunately, this requires that we iterate through all N hardlinks. On the plus
3360		 * side, since we know that we want the last linkID, we can also have this one
3361		 * call give us back the name of the last ID, since it's going to have it in-hand...
3362		 */
3363		linkerr = hfs_lookup_lastlink (hfsmp, linkref, &lastid, &linkdesc);
3364		if ((linkerr == 0) && (lastid != 0)) {
3365			/*
3366			 * Release any lingering buffers attached to our local descriptor.
3367			 * Then copy the name and other business into the cndesc
3368			 */
3369			cat_releasedesc (&cndesc);
3370			bcopy (&linkdesc, &cndesc, sizeof(linkdesc));
3371		}
3372		/* If it failed, the linkref code will just use whatever it had in-hand below. */
3373	}
3374
3375	if (linkref) {
3376		int newvnode_flags = 0;
3377
3378		error = hfs_getnewvnode(hfsmp, NULL, NULL, &cndesc, 0, &cnattr,
3379								&cnfork, &vp, &newvnode_flags);
3380		if (error == 0) {
3381			VTOC(vp)->c_flag |= C_HARDLINK;
3382			vnode_setmultipath(vp);
3383		}
3384	} else {
3385		struct componentname cn;
3386		int newvnode_flags = 0;
3387
3388		/* Supply hfs_getnewvnode with a component name. */
3389		MALLOC_ZONE(cn.cn_pnbuf, caddr_t, MAXPATHLEN, M_NAMEI, M_WAITOK);
3390		cn.cn_nameiop = LOOKUP;
3391		cn.cn_flags = ISLASTCN | HASBUF;
3392		cn.cn_context = NULL;
3393		cn.cn_pnlen = MAXPATHLEN;
3394		cn.cn_nameptr = cn.cn_pnbuf;
3395		cn.cn_namelen = cndesc.cd_namelen;
3396		cn.cn_hash = 0;
3397		cn.cn_consume = 0;
3398		bcopy(cndesc.cd_nameptr, cn.cn_nameptr, cndesc.cd_namelen + 1);
3399
3400		error = hfs_getnewvnode(hfsmp, NULLVP, &cn, &cndesc, 0, &cnattr,
3401								&cnfork, &vp, &newvnode_flags);
3402
3403		if (error == 0 && (VTOC(vp)->c_flag & C_HARDLINK)) {
3404			hfs_savelinkorigin(VTOC(vp), cndesc.cd_parentcnid);
3405		}
3406		FREE_ZONE(cn.cn_pnbuf, cn.cn_pnlen, M_NAMEI);
3407	}
3408	cat_releasedesc(&cndesc);
3409
3410	*vpp = vp;
3411	if (vp && skiplock) {
3412		hfs_unlock(VTOC(vp));
3413	}
3414	return (error);
3415}
3416
3417
3418/*
3419 * Flush out all the files in a filesystem.
3420 */
3421static int
3422#if QUOTA
3423hfs_flushfiles(struct mount *mp, int flags, struct proc *p)
3424#else
3425hfs_flushfiles(struct mount *mp, int flags, __unused struct proc *p)
3426#endif /* QUOTA */
3427{
3428	struct hfsmount *hfsmp;
3429	struct vnode *skipvp = NULLVP;
3430	int error;
3431	int accounted_root_usecounts;
3432#if QUOTA
3433	int i;
3434#endif
3435
3436	hfsmp = VFSTOHFS(mp);
3437
3438	accounted_root_usecounts = 0;
3439#if QUOTA
3440	/*
3441	 * The open quota files have an indirect reference on
3442	 * the root directory vnode.  We must account for this
3443	 * extra reference when doing the intial vflush.
3444	 */
3445	if (((unsigned int)vfs_flags(mp)) & MNT_QUOTA) {
3446		/* Find out how many quota files we have open. */
3447		for (i = 0; i < MAXQUOTAS; i++) {
3448			if (hfsmp->hfs_qfiles[i].qf_vp != NULLVP)
3449				++accounted_root_usecounts;
3450		}
3451	}
3452#endif /* QUOTA */
3453
3454	if (accounted_root_usecounts > 0) {
3455		/* Obtain the root vnode so we can skip over it. */
3456		skipvp = hfs_chash_getvnode(hfsmp, kHFSRootFolderID, 0, 0, 0);
3457	}
3458
3459	error = vflush(mp, skipvp, SKIPSYSTEM | SKIPSWAP | flags);
3460	if (error != 0)
3461		return(error);
3462
3463	error = vflush(mp, skipvp, SKIPSYSTEM | flags);
3464
3465	if (skipvp) {
3466		/*
3467		 * See if there are additional references on the
3468		 * root vp besides the ones obtained from the open
3469		 * quota files and CoreStorage.
3470		 */
3471		if ((error == 0) &&
3472		    (vnode_isinuse(skipvp,  accounted_root_usecounts))) {
3473			error = EBUSY;  /* root directory is still open */
3474		}
3475		hfs_unlock(VTOC(skipvp));
3476		/* release the iocount from the hfs_chash_getvnode call above. */
3477		vnode_put(skipvp);
3478	}
3479	if (error && (flags & FORCECLOSE) == 0)
3480		return (error);
3481
3482#if QUOTA
3483	if (((unsigned int)vfs_flags(mp)) & MNT_QUOTA) {
3484		for (i = 0; i < MAXQUOTAS; i++) {
3485			if (hfsmp->hfs_qfiles[i].qf_vp == NULLVP)
3486				continue;
3487			hfs_quotaoff(p, mp, i);
3488		}
3489	}
3490#endif /* QUOTA */
3491
3492	if (skipvp) {
3493		error = vflush(mp, NULLVP, SKIPSYSTEM | flags);
3494	}
3495
3496	return (error);
3497}
3498
3499/*
3500 * Update volume encoding bitmap (HFS Plus only)
3501 *
3502 * Mark a legacy text encoding as in-use (as needed)
3503 * in the volume header of this HFS+ filesystem.
3504 */
3505__private_extern__
3506void
3507hfs_setencodingbits(struct hfsmount *hfsmp, u_int32_t encoding)
3508{
3509#define  kIndexMacUkrainian	48  /* MacUkrainian encoding is 152 */
3510#define  kIndexMacFarsi		49  /* MacFarsi encoding is 140 */
3511
3512	u_int32_t	index;
3513
3514	switch (encoding) {
3515	case kTextEncodingMacUkrainian:
3516		index = kIndexMacUkrainian;
3517		break;
3518	case kTextEncodingMacFarsi:
3519		index = kIndexMacFarsi;
3520		break;
3521	default:
3522		index = encoding;
3523		break;
3524	}
3525
3526	/* Only mark the encoding as in-use if it wasn't already set */
3527	if (index < 64 && (hfsmp->encodingsBitmap & (u_int64_t)(1ULL << index)) == 0) {
3528		hfs_lock_mount (hfsmp);
3529		hfsmp->encodingsBitmap |= (u_int64_t)(1ULL << index);
3530		MarkVCBDirty(hfsmp);
3531		hfs_unlock_mount(hfsmp);
3532	}
3533}
3534
3535/*
3536 * Update volume stats
3537 *
3538 * On journal volumes this will cause a volume header flush
3539 */
3540int
3541hfs_volupdate(struct hfsmount *hfsmp, enum volop op, int inroot)
3542{
3543	struct timeval tv;
3544
3545	microtime(&tv);
3546
3547	hfs_lock_mount (hfsmp);
3548
3549	MarkVCBDirty(hfsmp);
3550	hfsmp->hfs_mtime = tv.tv_sec;
3551
3552	switch (op) {
3553	case VOL_UPDATE:
3554		break;
3555	case VOL_MKDIR:
3556		if (hfsmp->hfs_dircount != 0xFFFFFFFF)
3557			++hfsmp->hfs_dircount;
3558		if (inroot && hfsmp->vcbNmRtDirs != 0xFFFF)
3559			++hfsmp->vcbNmRtDirs;
3560		break;
3561	case VOL_RMDIR:
3562		if (hfsmp->hfs_dircount != 0)
3563			--hfsmp->hfs_dircount;
3564		if (inroot && hfsmp->vcbNmRtDirs != 0xFFFF)
3565			--hfsmp->vcbNmRtDirs;
3566		break;
3567	case VOL_MKFILE:
3568		if (hfsmp->hfs_filecount != 0xFFFFFFFF)
3569			++hfsmp->hfs_filecount;
3570		if (inroot && hfsmp->vcbNmFls != 0xFFFF)
3571			++hfsmp->vcbNmFls;
3572		break;
3573	case VOL_RMFILE:
3574		if (hfsmp->hfs_filecount != 0)
3575			--hfsmp->hfs_filecount;
3576		if (inroot && hfsmp->vcbNmFls != 0xFFFF)
3577			--hfsmp->vcbNmFls;
3578		break;
3579	}
3580
3581	hfs_unlock_mount (hfsmp);
3582
3583	if (hfsmp->jnl) {
3584		hfs_flushvolumeheader(hfsmp, 0, 0);
3585	}
3586
3587	return (0);
3588}
3589
3590
3591#if CONFIG_HFS_STD
3592/* HFS Standard MDB flush */
3593static int
3594hfs_flushMDB(struct hfsmount *hfsmp, int waitfor, int altflush)
3595{
3596	ExtendedVCB *vcb = HFSTOVCB(hfsmp);
3597	struct filefork *fp;
3598	HFSMasterDirectoryBlock	*mdb;
3599	struct buf *bp = NULL;
3600	int retval;
3601	int sector_size;
3602	ByteCount namelen;
3603
3604	sector_size = hfsmp->hfs_logical_block_size;
3605	retval = (int)buf_bread(hfsmp->hfs_devvp, (daddr64_t)HFS_PRI_SECTOR(sector_size), sector_size, NOCRED, &bp);
3606	if (retval) {
3607		if (bp)
3608			buf_brelse(bp);
3609		return retval;
3610	}
3611
3612	hfs_lock_mount (hfsmp);
3613
3614	mdb = (HFSMasterDirectoryBlock *)(buf_dataptr(bp) + HFS_PRI_OFFSET(sector_size));
3615
3616	mdb->drCrDate	= SWAP_BE32 (UTCToLocal(to_hfs_time(vcb->hfs_itime)));
3617	mdb->drLsMod	= SWAP_BE32 (UTCToLocal(to_hfs_time(vcb->vcbLsMod)));
3618	mdb->drAtrb	= SWAP_BE16 (vcb->vcbAtrb);
3619	mdb->drNmFls	= SWAP_BE16 (vcb->vcbNmFls);
3620	mdb->drAllocPtr	= SWAP_BE16 (vcb->nextAllocation);
3621	mdb->drClpSiz	= SWAP_BE32 (vcb->vcbClpSiz);
3622	mdb->drNxtCNID	= SWAP_BE32 (vcb->vcbNxtCNID);
3623	mdb->drFreeBks	= SWAP_BE16 (vcb->freeBlocks);
3624
3625	namelen = strlen((char *)vcb->vcbVN);
3626	retval = utf8_to_hfs(vcb, namelen, vcb->vcbVN, mdb->drVN);
3627	/* Retry with MacRoman in case that's how it was exported. */
3628	if (retval)
3629		retval = utf8_to_mac_roman(namelen, vcb->vcbVN, mdb->drVN);
3630
3631	mdb->drVolBkUp	= SWAP_BE32 (UTCToLocal(to_hfs_time(vcb->vcbVolBkUp)));
3632	mdb->drWrCnt	= SWAP_BE32 (vcb->vcbWrCnt);
3633	mdb->drNmRtDirs	= SWAP_BE16 (vcb->vcbNmRtDirs);
3634	mdb->drFilCnt	= SWAP_BE32 (vcb->vcbFilCnt);
3635	mdb->drDirCnt	= SWAP_BE32 (vcb->vcbDirCnt);
3636
3637	bcopy(vcb->vcbFndrInfo, mdb->drFndrInfo, sizeof(mdb->drFndrInfo));
3638
3639	fp = VTOF(vcb->extentsRefNum);
3640	mdb->drXTExtRec[0].startBlock = SWAP_BE16 (fp->ff_extents[0].startBlock);
3641	mdb->drXTExtRec[0].blockCount = SWAP_BE16 (fp->ff_extents[0].blockCount);
3642	mdb->drXTExtRec[1].startBlock = SWAP_BE16 (fp->ff_extents[1].startBlock);
3643	mdb->drXTExtRec[1].blockCount = SWAP_BE16 (fp->ff_extents[1].blockCount);
3644	mdb->drXTExtRec[2].startBlock = SWAP_BE16 (fp->ff_extents[2].startBlock);
3645	mdb->drXTExtRec[2].blockCount = SWAP_BE16 (fp->ff_extents[2].blockCount);
3646	mdb->drXTFlSize	= SWAP_BE32 (fp->ff_blocks * vcb->blockSize);
3647	mdb->drXTClpSiz	= SWAP_BE32 (fp->ff_clumpsize);
3648	FTOC(fp)->c_flag &= ~C_MODIFIED;
3649
3650	fp = VTOF(vcb->catalogRefNum);
3651	mdb->drCTExtRec[0].startBlock = SWAP_BE16 (fp->ff_extents[0].startBlock);
3652	mdb->drCTExtRec[0].blockCount = SWAP_BE16 (fp->ff_extents[0].blockCount);
3653	mdb->drCTExtRec[1].startBlock = SWAP_BE16 (fp->ff_extents[1].startBlock);
3654	mdb->drCTExtRec[1].blockCount = SWAP_BE16 (fp->ff_extents[1].blockCount);
3655	mdb->drCTExtRec[2].startBlock = SWAP_BE16 (fp->ff_extents[2].startBlock);
3656	mdb->drCTExtRec[2].blockCount = SWAP_BE16 (fp->ff_extents[2].blockCount);
3657	mdb->drCTFlSize	= SWAP_BE32 (fp->ff_blocks * vcb->blockSize);
3658	mdb->drCTClpSiz	= SWAP_BE32 (fp->ff_clumpsize);
3659	FTOC(fp)->c_flag &= ~C_MODIFIED;
3660
3661	MarkVCBClean( vcb );
3662
3663	hfs_unlock_mount (hfsmp);
3664
3665	/* If requested, flush out the alternate MDB */
3666	if (altflush) {
3667		struct buf *alt_bp = NULL;
3668
3669		if (buf_meta_bread(hfsmp->hfs_devvp, hfsmp->hfs_partition_avh_sector, sector_size, NOCRED, &alt_bp) == 0) {
3670			bcopy(mdb, (char *)buf_dataptr(alt_bp) + HFS_ALT_OFFSET(sector_size), kMDBSize);
3671
3672			(void) VNOP_BWRITE(alt_bp);
3673		} else if (alt_bp)
3674			buf_brelse(alt_bp);
3675	}
3676
3677	if (waitfor != MNT_WAIT)
3678		buf_bawrite(bp);
3679	else
3680		retval = VNOP_BWRITE(bp);
3681
3682	return (retval);
3683}
3684#endif
3685
3686/*
3687 *  Flush any dirty in-memory mount data to the on-disk
3688 *  volume header.
3689 *
3690 *  Note: the on-disk volume signature is intentionally
3691 *  not flushed since the on-disk "H+" and "HX" signatures
3692 *  are always stored in-memory as "H+".
3693 */
3694int
3695hfs_flushvolumeheader(struct hfsmount *hfsmp, int waitfor, int altflush)
3696{
3697	ExtendedVCB *vcb = HFSTOVCB(hfsmp);
3698	struct filefork *fp;
3699	HFSPlusVolumeHeader *volumeHeader, *altVH;
3700	int retval;
3701	struct buf *bp, *alt_bp;
3702	int i;
3703	daddr64_t priIDSector;
3704	int critical;
3705	u_int16_t  signature;
3706	u_int16_t  hfsversion;
3707	daddr64_t avh_sector;
3708
3709	if (hfsmp->hfs_flags & HFS_READ_ONLY) {
3710		return(0);
3711	}
3712#if CONFIG_HFS_STD
3713	if (hfsmp->hfs_flags & HFS_STANDARD) {
3714		return hfs_flushMDB(hfsmp, waitfor, altflush);
3715	}
3716#endif
3717	critical = altflush;
3718	priIDSector = (daddr64_t)((vcb->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) +
3719				  HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size));
3720
3721	if (hfs_start_transaction(hfsmp) != 0) {
3722	    return EINVAL;
3723	}
3724
3725	bp = NULL;
3726	alt_bp = NULL;
3727
3728	retval = (int)buf_meta_bread(hfsmp->hfs_devvp,
3729			HFS_PHYSBLK_ROUNDDOWN(priIDSector, hfsmp->hfs_log_per_phys),
3730			hfsmp->hfs_physical_block_size, NOCRED, &bp);
3731	if (retval) {
3732		printf("hfs: err %d reading VH blk (vol=%s)\n", retval, vcb->vcbVN);
3733		goto err_exit;
3734	}
3735
3736	volumeHeader = (HFSPlusVolumeHeader *)((char *)buf_dataptr(bp) +
3737			HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size));
3738
3739	/*
3740	 * Sanity check what we just read.  If it's bad, try the alternate
3741	 * instead.
3742	 */
3743	signature = SWAP_BE16 (volumeHeader->signature);
3744	hfsversion   = SWAP_BE16 (volumeHeader->version);
3745	if ((signature != kHFSPlusSigWord && signature != kHFSXSigWord) ||
3746	    (hfsversion < kHFSPlusVersion) || (hfsversion > 100) ||
3747	    (SWAP_BE32 (volumeHeader->blockSize) != vcb->blockSize)) {
3748		printf("hfs: corrupt VH on %s, sig 0x%04x, ver %d, blksize %d\n",
3749			       	vcb->vcbVN, signature, hfsversion,
3750				SWAP_BE32 (volumeHeader->blockSize));
3751		hfs_mark_inconsistent(hfsmp, HFS_INCONSISTENCY_DETECTED);
3752
3753		/* Almost always we read AVH relative to the partition size */
3754		avh_sector = hfsmp->hfs_partition_avh_sector;
3755
3756		if (hfsmp->hfs_partition_avh_sector != hfsmp->hfs_fs_avh_sector) {
3757			/*
3758			 * The two altVH offsets do not match --- which means that a smaller file
3759			 * system exists in a larger partition.  Verify that we have the correct
3760			 * alternate volume header sector as per the current parititon size.
3761			 * The GPT device that we are mounted on top could have changed sizes
3762			 * without us knowing.
3763			 *
3764			 * We're in a transaction, so it's safe to modify the partition_avh_sector
3765			 * field if necessary.
3766			 */
3767
3768			uint64_t sector_count;
3769
3770			/* Get underlying device block count */
3771			if ((retval = VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCGETBLOCKCOUNT,
3772							(caddr_t)&sector_count, 0, vfs_context_current()))) {
3773				printf("hfs_flushVH: err %d getting block count (%s) \n", retval, vcb->vcbVN);
3774				retval = ENXIO;
3775				goto err_exit;
3776			}
3777
3778			/* Partition size was changed without our knowledge */
3779			if (sector_count != (uint64_t)hfsmp->hfs_logical_block_count) {
3780				hfsmp->hfs_partition_avh_sector = (hfsmp->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) +
3781					HFS_ALT_SECTOR(hfsmp->hfs_logical_block_size, sector_count);
3782				/* Note: hfs_fs_avh_sector will remain unchanged */
3783				printf ("hfs_flushVH: partition size changed, partition_avh_sector=%qu, fs_avh_sector=%qu\n",
3784						hfsmp->hfs_partition_avh_sector, hfsmp->hfs_fs_avh_sector);
3785
3786				/*
3787				 * We just updated the offset for AVH relative to
3788				 * the partition size, so the content of that AVH
3789				 * will be invalid.  But since we are also maintaining
3790				 * a valid AVH relative to the file system size, we
3791				 * can read it since primary VH and partition AVH
3792				 * are not valid.
3793				 */
3794				avh_sector = hfsmp->hfs_fs_avh_sector;
3795			}
3796		}
3797
3798		printf ("hfs: trying alternate (for %s) avh_sector=%qu\n",
3799				(avh_sector == hfsmp->hfs_fs_avh_sector) ? "file system" : "partition", avh_sector);
3800
3801		if (avh_sector) {
3802			retval = buf_meta_bread(hfsmp->hfs_devvp,
3803			    HFS_PHYSBLK_ROUNDDOWN(avh_sector, hfsmp->hfs_log_per_phys),
3804			    hfsmp->hfs_physical_block_size, NOCRED, &alt_bp);
3805			if (retval) {
3806				printf("hfs: err %d reading alternate VH (%s)\n", retval, vcb->vcbVN);
3807				goto err_exit;
3808			}
3809
3810			altVH = (HFSPlusVolumeHeader *)((char *)buf_dataptr(alt_bp) +
3811				HFS_ALT_OFFSET(hfsmp->hfs_physical_block_size));
3812			signature = SWAP_BE16(altVH->signature);
3813			hfsversion = SWAP_BE16(altVH->version);
3814
3815			if ((signature != kHFSPlusSigWord && signature != kHFSXSigWord) ||
3816			    (hfsversion < kHFSPlusVersion) || (kHFSPlusVersion > 100) ||
3817			    (SWAP_BE32(altVH->blockSize) != vcb->blockSize)) {
3818				printf("hfs: corrupt alternate VH on %s, sig 0x%04x, ver %d, blksize %d\n",
3819				    vcb->vcbVN, signature, hfsversion,
3820				    SWAP_BE32(altVH->blockSize));
3821				retval = EIO;
3822				goto err_exit;
3823			}
3824
3825			/* The alternate is plausible, so use it. */
3826			bcopy(altVH, volumeHeader, kMDBSize);
3827			buf_brelse(alt_bp);
3828			alt_bp = NULL;
3829		} else {
3830			/* No alternate VH, nothing more we can do. */
3831			retval = EIO;
3832			goto err_exit;
3833		}
3834	}
3835
3836	if (hfsmp->jnl) {
3837		journal_modify_block_start(hfsmp->jnl, bp);
3838	}
3839
3840	/*
3841	 * For embedded HFS+ volumes, update create date if it changed
3842	 * (ie from a setattrlist call)
3843	 */
3844	if ((vcb->hfsPlusIOPosOffset != 0) &&
3845	    (SWAP_BE32 (volumeHeader->createDate) != vcb->localCreateDate)) {
3846		struct buf *bp2;
3847		HFSMasterDirectoryBlock	*mdb;
3848
3849		retval = (int)buf_meta_bread(hfsmp->hfs_devvp,
3850				HFS_PHYSBLK_ROUNDDOWN(HFS_PRI_SECTOR(hfsmp->hfs_logical_block_size), hfsmp->hfs_log_per_phys),
3851				hfsmp->hfs_physical_block_size, NOCRED, &bp2);
3852		if (retval) {
3853			if (bp2)
3854				buf_brelse(bp2);
3855			retval = 0;
3856		} else {
3857			mdb = (HFSMasterDirectoryBlock *)(buf_dataptr(bp2) +
3858				HFS_PRI_OFFSET(hfsmp->hfs_physical_block_size));
3859
3860			if ( SWAP_BE32 (mdb->drCrDate) != vcb->localCreateDate )
3861			  {
3862				if (hfsmp->jnl) {
3863				    journal_modify_block_start(hfsmp->jnl, bp2);
3864				}
3865
3866				mdb->drCrDate = SWAP_BE32 (vcb->localCreateDate);	/* pick up the new create date */
3867
3868				if (hfsmp->jnl) {
3869					journal_modify_block_end(hfsmp->jnl, bp2, NULL, NULL);
3870				} else {
3871					(void) VNOP_BWRITE(bp2);		/* write out the changes */
3872				}
3873			  }
3874			else
3875			  {
3876				buf_brelse(bp2);						/* just release it */
3877			  }
3878		  }
3879	}
3880
3881	hfs_lock_mount (hfsmp);
3882
3883	/* Note: only update the lower 16 bits worth of attributes */
3884	volumeHeader->attributes       = SWAP_BE32 (vcb->vcbAtrb);
3885	volumeHeader->journalInfoBlock = SWAP_BE32 (vcb->vcbJinfoBlock);
3886	if (hfsmp->jnl) {
3887		volumeHeader->lastMountedVersion = SWAP_BE32 (kHFSJMountVersion);
3888	} else {
3889		volumeHeader->lastMountedVersion = SWAP_BE32 (kHFSPlusMountVersion);
3890	}
3891	volumeHeader->createDate	= SWAP_BE32 (vcb->localCreateDate);  /* volume create date is in local time */
3892	volumeHeader->modifyDate	= SWAP_BE32 (to_hfs_time(vcb->vcbLsMod));
3893	volumeHeader->backupDate	= SWAP_BE32 (to_hfs_time(vcb->vcbVolBkUp));
3894	volumeHeader->fileCount		= SWAP_BE32 (vcb->vcbFilCnt);
3895	volumeHeader->folderCount	= SWAP_BE32 (vcb->vcbDirCnt);
3896	volumeHeader->totalBlocks	= SWAP_BE32 (vcb->totalBlocks);
3897	volumeHeader->freeBlocks	= SWAP_BE32 (vcb->freeBlocks);
3898	volumeHeader->nextAllocation	= SWAP_BE32 (vcb->nextAllocation);
3899	volumeHeader->rsrcClumpSize	= SWAP_BE32 (vcb->vcbClpSiz);
3900	volumeHeader->dataClumpSize	= SWAP_BE32 (vcb->vcbClpSiz);
3901	volumeHeader->nextCatalogID	= SWAP_BE32 (vcb->vcbNxtCNID);
3902	volumeHeader->writeCount	= SWAP_BE32 (vcb->vcbWrCnt);
3903	volumeHeader->encodingsBitmap	= SWAP_BE64 (vcb->encodingsBitmap);
3904
3905	if (bcmp(vcb->vcbFndrInfo, volumeHeader->finderInfo, sizeof(volumeHeader->finderInfo)) != 0) {
3906		bcopy(vcb->vcbFndrInfo, volumeHeader->finderInfo, sizeof(volumeHeader->finderInfo));
3907		critical = 1;
3908	}
3909
3910	/*
3911	 * System files are only dirty when altflush is set.
3912	 */
3913	if (altflush == 0) {
3914		goto done;
3915	}
3916
3917	/* Sync Extents over-flow file meta data */
3918	fp = VTOF(vcb->extentsRefNum);
3919	if (FTOC(fp)->c_flag & C_MODIFIED) {
3920		for (i = 0; i < kHFSPlusExtentDensity; i++) {
3921			volumeHeader->extentsFile.extents[i].startBlock	=
3922				SWAP_BE32 (fp->ff_extents[i].startBlock);
3923			volumeHeader->extentsFile.extents[i].blockCount	=
3924				SWAP_BE32 (fp->ff_extents[i].blockCount);
3925		}
3926		volumeHeader->extentsFile.logicalSize = SWAP_BE64 (fp->ff_size);
3927		volumeHeader->extentsFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
3928		volumeHeader->extentsFile.clumpSize   = SWAP_BE32 (fp->ff_clumpsize);
3929		FTOC(fp)->c_flag &= ~C_MODIFIED;
3930	}
3931
3932	/* Sync Catalog file meta data */
3933	fp = VTOF(vcb->catalogRefNum);
3934	if (FTOC(fp)->c_flag & C_MODIFIED) {
3935		for (i = 0; i < kHFSPlusExtentDensity; i++) {
3936			volumeHeader->catalogFile.extents[i].startBlock	=
3937				SWAP_BE32 (fp->ff_extents[i].startBlock);
3938			volumeHeader->catalogFile.extents[i].blockCount	=
3939				SWAP_BE32 (fp->ff_extents[i].blockCount);
3940		}
3941		volumeHeader->catalogFile.logicalSize = SWAP_BE64 (fp->ff_size);
3942		volumeHeader->catalogFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
3943		volumeHeader->catalogFile.clumpSize   = SWAP_BE32 (fp->ff_clumpsize);
3944		FTOC(fp)->c_flag &= ~C_MODIFIED;
3945	}
3946
3947	/* Sync Allocation file meta data */
3948	fp = VTOF(vcb->allocationsRefNum);
3949	if (FTOC(fp)->c_flag & C_MODIFIED) {
3950		for (i = 0; i < kHFSPlusExtentDensity; i++) {
3951			volumeHeader->allocationFile.extents[i].startBlock =
3952				SWAP_BE32 (fp->ff_extents[i].startBlock);
3953			volumeHeader->allocationFile.extents[i].blockCount =
3954				SWAP_BE32 (fp->ff_extents[i].blockCount);
3955		}
3956		volumeHeader->allocationFile.logicalSize = SWAP_BE64 (fp->ff_size);
3957		volumeHeader->allocationFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
3958		volumeHeader->allocationFile.clumpSize   = SWAP_BE32 (fp->ff_clumpsize);
3959		FTOC(fp)->c_flag &= ~C_MODIFIED;
3960	}
3961
3962	/* Sync Attribute file meta data */
3963	if (hfsmp->hfs_attribute_vp) {
3964		fp = VTOF(hfsmp->hfs_attribute_vp);
3965		for (i = 0; i < kHFSPlusExtentDensity; i++) {
3966			volumeHeader->attributesFile.extents[i].startBlock =
3967				SWAP_BE32 (fp->ff_extents[i].startBlock);
3968			volumeHeader->attributesFile.extents[i].blockCount =
3969				SWAP_BE32 (fp->ff_extents[i].blockCount);
3970		}
3971		FTOC(fp)->c_flag &= ~C_MODIFIED;
3972		volumeHeader->attributesFile.logicalSize = SWAP_BE64 (fp->ff_size);
3973		volumeHeader->attributesFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
3974		volumeHeader->attributesFile.clumpSize   = SWAP_BE32 (fp->ff_clumpsize);
3975	}
3976
3977	/* Sync Startup file meta data */
3978	if (hfsmp->hfs_startup_vp) {
3979		fp = VTOF(hfsmp->hfs_startup_vp);
3980		if (FTOC(fp)->c_flag & C_MODIFIED) {
3981			for (i = 0; i < kHFSPlusExtentDensity; i++) {
3982				volumeHeader->startupFile.extents[i].startBlock =
3983					SWAP_BE32 (fp->ff_extents[i].startBlock);
3984				volumeHeader->startupFile.extents[i].blockCount =
3985					SWAP_BE32 (fp->ff_extents[i].blockCount);
3986			}
3987			volumeHeader->startupFile.logicalSize = SWAP_BE64 (fp->ff_size);
3988			volumeHeader->startupFile.totalBlocks = SWAP_BE32 (fp->ff_blocks);
3989			volumeHeader->startupFile.clumpSize   = SWAP_BE32 (fp->ff_clumpsize);
3990			FTOC(fp)->c_flag &= ~C_MODIFIED;
3991		}
3992	}
3993
3994done:
3995	MarkVCBClean(hfsmp);
3996	hfs_unlock_mount (hfsmp);
3997
3998	/* If requested, flush out the alternate volume header */
3999	if (altflush) {
4000		/*
4001		 * The two altVH offsets do not match --- which means that a smaller file
4002		 * system exists in a larger partition.  Verify that we have the correct
4003		 * alternate volume header sector as per the current parititon size.
4004		 * The GPT device that we are mounted on top could have changed sizes
4005		 * without us knowning.
4006		 *
4007		 * We're in a transaction, so it's safe to modify the partition_avh_sector
4008		 * field if necessary.
4009		 */
4010		if (hfsmp->hfs_partition_avh_sector != hfsmp->hfs_fs_avh_sector) {
4011			uint64_t sector_count;
4012
4013			/* Get underlying device block count */
4014			if ((retval = VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCGETBLOCKCOUNT,
4015							(caddr_t)&sector_count, 0, vfs_context_current()))) {
4016				printf("hfs_flushVH: err %d getting block count (%s) \n", retval, vcb->vcbVN);
4017				retval = ENXIO;
4018				goto err_exit;
4019			}
4020
4021			/* Partition size was changed without our knowledge */
4022			if (sector_count != (uint64_t)hfsmp->hfs_logical_block_count) {
4023				hfsmp->hfs_partition_avh_sector = (hfsmp->hfsPlusIOPosOffset / hfsmp->hfs_logical_block_size) +
4024					HFS_ALT_SECTOR(hfsmp->hfs_logical_block_size, sector_count);
4025				/* Note: hfs_fs_avh_sector will remain unchanged */
4026				printf ("hfs_flushVH: altflush: partition size changed, partition_avh_sector=%qu, fs_avh_sector=%qu\n",
4027						hfsmp->hfs_partition_avh_sector, hfsmp->hfs_fs_avh_sector);
4028			}
4029		}
4030
4031		/*
4032		 * First see if we need to write I/O to the "secondary" AVH
4033		 * located at FS Size - 1024 bytes, because this one will
4034		 * always go into the journal.  We put this AVH into the journal
4035		 * because even if the filesystem size has shrunk, this LBA should be
4036		 * reachable after the partition-size modification has occurred.
4037		 * The one where we need to be careful is partitionsize-1024, since the
4038		 * partition size should hopefully shrink.
4039		 *
4040		 * Most of the time this block will not execute.
4041		 */
4042		if ((hfsmp->hfs_fs_avh_sector) &&
4043				(hfsmp->hfs_partition_avh_sector != hfsmp->hfs_fs_avh_sector)) {
4044			if (buf_meta_bread(hfsmp->hfs_devvp,
4045						HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_fs_avh_sector, hfsmp->hfs_log_per_phys),
4046						hfsmp->hfs_physical_block_size, NOCRED, &alt_bp) == 0) {
4047				if (hfsmp->jnl) {
4048					journal_modify_block_start(hfsmp->jnl, alt_bp);
4049				}
4050
4051				bcopy(volumeHeader, (char *)buf_dataptr(alt_bp) +
4052						HFS_ALT_OFFSET(hfsmp->hfs_physical_block_size),
4053						kMDBSize);
4054
4055				if (hfsmp->jnl) {
4056					journal_modify_block_end(hfsmp->jnl, alt_bp, NULL, NULL);
4057				} else {
4058					(void) VNOP_BWRITE(alt_bp);
4059				}
4060			} else if (alt_bp) {
4061				buf_brelse(alt_bp);
4062			}
4063		}
4064
4065		/*
4066		 * Flush out alternate volume header located at 1024 bytes before
4067		 * end of the partition as part of journal transaction.  In
4068		 * most cases, this will be the only alternate volume header
4069		 * that we need to worry about because the file system size is
4070		 * same as the partition size, therefore hfs_fs_avh_sector is
4071		 * same as hfs_partition_avh_sector. This is the "priority" AVH.
4072		 *
4073		 * However, do not always put this I/O into the journal.  If we skipped the
4074		 * FS-Size AVH write above, then we will put this I/O into the journal as
4075		 * that indicates the two were in sync.  However, if the FS size is
4076		 * not the same as the partition size, we are tracking two.  We don't
4077		 * put it in the journal in that case, since if the partition
4078		 * size changes between uptimes, and we need to replay the journal,
4079		 * this I/O could generate an EIO if during replay it is now trying
4080		 * to access blocks beyond the device EOF.
4081		 */
4082		if (hfsmp->hfs_partition_avh_sector) {
4083			if (buf_meta_bread(hfsmp->hfs_devvp,
4084						HFS_PHYSBLK_ROUNDDOWN(hfsmp->hfs_partition_avh_sector, hfsmp->hfs_log_per_phys),
4085						hfsmp->hfs_physical_block_size, NOCRED, &alt_bp) == 0) {
4086
4087				/* only one AVH, put this I/O in the journal. */
4088				if ((hfsmp->jnl) && (hfsmp->hfs_partition_avh_sector == hfsmp->hfs_fs_avh_sector)) {
4089					journal_modify_block_start(hfsmp->jnl, alt_bp);
4090				}
4091
4092				bcopy(volumeHeader, (char *)buf_dataptr(alt_bp) +
4093						HFS_ALT_OFFSET(hfsmp->hfs_physical_block_size),
4094						kMDBSize);
4095
4096				/* If journaled and we only have one AVH to track */
4097				if ((hfsmp->jnl) && (hfsmp->hfs_partition_avh_sector == hfsmp->hfs_fs_avh_sector)) {
4098					journal_modify_block_end (hfsmp->jnl, alt_bp, NULL, NULL);
4099				} else {
4100					/*
4101					 * If we don't have a journal or there are two AVH's at the
4102					 * moment, then this one doesn't go in the journal.  Note that
4103					 * this one may generate I/O errors, since the partition
4104					 * can be resized behind our backs at any moment and this I/O
4105					 * may now appear to be beyond the device EOF.
4106					 */
4107					(void) VNOP_BWRITE(alt_bp);
4108					(void) VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE,
4109							NULL, FWRITE, NULL);
4110				}
4111			} else if (alt_bp) {
4112				buf_brelse(alt_bp);
4113			}
4114		}
4115	}
4116
4117	/* Finish modifying the block for the primary VH */
4118	if (hfsmp->jnl) {
4119		journal_modify_block_end(hfsmp->jnl, bp, NULL, NULL);
4120	} else {
4121		if (waitfor != MNT_WAIT) {
4122			buf_bawrite(bp);
4123		} else {
4124			retval = VNOP_BWRITE(bp);
4125			/* When critical data changes, flush the device cache */
4126			if (critical && (retval == 0)) {
4127				(void) VNOP_IOCTL(hfsmp->hfs_devvp, DKIOCSYNCHRONIZECACHE,
4128						NULL, FWRITE, NULL);
4129			}
4130		}
4131	}
4132	hfs_end_transaction(hfsmp);
4133
4134	return (retval);
4135
4136err_exit:
4137	if (alt_bp)
4138		buf_brelse(alt_bp);
4139	if (bp)
4140		buf_brelse(bp);
4141	hfs_end_transaction(hfsmp);
4142	return retval;
4143}
4144
4145
4146/*
4147 * Creates a UUID from a unique "name" in the HFS UUID Name space.
4148 * See version 3 UUID.
4149 */
4150static void
4151hfs_getvoluuid(struct hfsmount *hfsmp, uuid_t result)
4152{
4153	MD5_CTX  md5c;
4154	uint8_t  rawUUID[8];
4155
4156	((uint32_t *)rawUUID)[0] = hfsmp->vcbFndrInfo[6];
4157	((uint32_t *)rawUUID)[1] = hfsmp->vcbFndrInfo[7];
4158
4159	MD5Init( &md5c );
4160	MD5Update( &md5c, HFS_UUID_NAMESPACE_ID, sizeof( uuid_t ) );
4161	MD5Update( &md5c, rawUUID, sizeof (rawUUID) );
4162	MD5Final( result, &md5c );
4163
4164	result[6] = 0x30 | ( result[6] & 0x0F );
4165	result[8] = 0x80 | ( result[8] & 0x3F );
4166}
4167
4168/*
4169 * Get file system attributes.
4170 */
4171static int
4172hfs_vfs_getattr(struct mount *mp, struct vfs_attr *fsap, __unused vfs_context_t context)
4173{
4174#define HFS_ATTR_CMN_VALIDMASK ATTR_CMN_VALIDMASK
4175#define HFS_ATTR_FILE_VALIDMASK (ATTR_FILE_VALIDMASK & ~(ATTR_FILE_FILETYPE | ATTR_FILE_FORKCOUNT | ATTR_FILE_FORKLIST))
4176#define HFS_ATTR_CMN_VOL_VALIDMASK (ATTR_CMN_VALIDMASK & ~(ATTR_CMN_ACCTIME))
4177
4178	ExtendedVCB *vcb = VFSTOVCB(mp);
4179	struct hfsmount *hfsmp = VFSTOHFS(mp);
4180	u_int32_t freeCNIDs;
4181
4182	int searchfs_on = 0;
4183	int exchangedata_on = 1;
4184
4185#if CONFIG_SEARCHFS
4186	searchfs_on = 1;
4187#endif
4188
4189#if CONFIG_PROTECT
4190	if (cp_fs_protected(mp)) {
4191		exchangedata_on = 0;
4192	}
4193#endif
4194
4195	freeCNIDs = (u_int32_t)0xFFFFFFFF - (u_int32_t)hfsmp->vcbNxtCNID;
4196
4197	VFSATTR_RETURN(fsap, f_objcount, (u_int64_t)hfsmp->vcbFilCnt + (u_int64_t)hfsmp->vcbDirCnt);
4198	VFSATTR_RETURN(fsap, f_filecount, (u_int64_t)hfsmp->vcbFilCnt);
4199	VFSATTR_RETURN(fsap, f_dircount, (u_int64_t)hfsmp->vcbDirCnt);
4200	VFSATTR_RETURN(fsap, f_maxobjcount, (u_int64_t)0xFFFFFFFF);
4201	VFSATTR_RETURN(fsap, f_iosize, (size_t)cluster_max_io_size(mp, 0));
4202	VFSATTR_RETURN(fsap, f_blocks, (u_int64_t)hfsmp->totalBlocks);
4203	VFSATTR_RETURN(fsap, f_bfree, (u_int64_t)hfs_freeblks(hfsmp, 0));
4204	VFSATTR_RETURN(fsap, f_bavail, (u_int64_t)hfs_freeblks(hfsmp, 1));
4205	VFSATTR_RETURN(fsap, f_bsize, (u_int32_t)vcb->blockSize);
4206	/* XXX needs clarification */
4207	VFSATTR_RETURN(fsap, f_bused, hfsmp->totalBlocks - hfs_freeblks(hfsmp, 1));
4208	/* Maximum files is constrained by total blocks. */
4209	VFSATTR_RETURN(fsap, f_files, (u_int64_t)(hfsmp->totalBlocks - 2));
4210	VFSATTR_RETURN(fsap, f_ffree, MIN((u_int64_t)freeCNIDs, (u_int64_t)hfs_freeblks(hfsmp, 1)));
4211
4212	fsap->f_fsid.val[0] = hfsmp->hfs_raw_dev;
4213	fsap->f_fsid.val[1] = vfs_typenum(mp);
4214	VFSATTR_SET_SUPPORTED(fsap, f_fsid);
4215
4216	VFSATTR_RETURN(fsap, f_signature, vcb->vcbSigWord);
4217	VFSATTR_RETURN(fsap, f_carbon_fsid, 0);
4218
4219	if (VFSATTR_IS_ACTIVE(fsap, f_capabilities)) {
4220		vol_capabilities_attr_t *cap;
4221
4222		cap = &fsap->f_capabilities;
4223
4224		if ((hfsmp->hfs_flags & HFS_STANDARD) == 0) {
4225			/* HFS+ & variants */
4226			cap->capabilities[VOL_CAPABILITIES_FORMAT] =
4227				VOL_CAP_FMT_PERSISTENTOBJECTIDS |
4228				VOL_CAP_FMT_SYMBOLICLINKS |
4229				VOL_CAP_FMT_HARDLINKS |
4230				VOL_CAP_FMT_JOURNAL |
4231				VOL_CAP_FMT_ZERO_RUNS |
4232				(hfsmp->jnl ? VOL_CAP_FMT_JOURNAL_ACTIVE : 0) |
4233				(hfsmp->hfs_flags & HFS_CASE_SENSITIVE ? VOL_CAP_FMT_CASE_SENSITIVE : 0) |
4234				VOL_CAP_FMT_CASE_PRESERVING |
4235				VOL_CAP_FMT_FAST_STATFS |
4236				VOL_CAP_FMT_2TB_FILESIZE |
4237				VOL_CAP_FMT_HIDDEN_FILES |
4238#if HFS_COMPRESSION
4239				VOL_CAP_FMT_PATH_FROM_ID |
4240				VOL_CAP_FMT_DECMPFS_COMPRESSION;
4241#else
4242				VOL_CAP_FMT_PATH_FROM_ID;
4243#endif
4244		}
4245#if CONFIG_HFS_STD
4246		else {
4247			/* HFS standard */
4248			cap->capabilities[VOL_CAPABILITIES_FORMAT] =
4249				VOL_CAP_FMT_PERSISTENTOBJECTIDS |
4250				VOL_CAP_FMT_CASE_PRESERVING |
4251				VOL_CAP_FMT_FAST_STATFS |
4252				VOL_CAP_FMT_HIDDEN_FILES |
4253				VOL_CAP_FMT_PATH_FROM_ID;
4254		}
4255#endif
4256
4257		/*
4258		 * The capabilities word in 'cap' tell you whether or not
4259		 * this particular filesystem instance has feature X enabled.
4260		 */
4261
4262		cap->capabilities[VOL_CAPABILITIES_INTERFACES] =
4263			VOL_CAP_INT_ATTRLIST |
4264			VOL_CAP_INT_NFSEXPORT |
4265			VOL_CAP_INT_READDIRATTR |
4266			VOL_CAP_INT_ALLOCATE |
4267			VOL_CAP_INT_VOL_RENAME |
4268			VOL_CAP_INT_ADVLOCK |
4269			VOL_CAP_INT_FLOCK |
4270#if NAMEDSTREAMS
4271			VOL_CAP_INT_EXTENDED_ATTR |
4272			VOL_CAP_INT_NAMEDSTREAMS;
4273#else
4274			VOL_CAP_INT_EXTENDED_ATTR;
4275#endif
4276
4277		/* HFS may conditionally support searchfs and exchangedata depending on the runtime */
4278
4279		if (searchfs_on) {
4280			cap->capabilities[VOL_CAPABILITIES_INTERFACES] |= VOL_CAP_INT_SEARCHFS;
4281		}
4282		if (exchangedata_on) {
4283			cap->capabilities[VOL_CAPABILITIES_INTERFACES] |= VOL_CAP_INT_EXCHANGEDATA;
4284		}
4285
4286		cap->capabilities[VOL_CAPABILITIES_RESERVED1] = 0;
4287		cap->capabilities[VOL_CAPABILITIES_RESERVED2] = 0;
4288
4289		cap->valid[VOL_CAPABILITIES_FORMAT] =
4290			VOL_CAP_FMT_PERSISTENTOBJECTIDS |
4291			VOL_CAP_FMT_SYMBOLICLINKS |
4292			VOL_CAP_FMT_HARDLINKS |
4293			VOL_CAP_FMT_JOURNAL |
4294			VOL_CAP_FMT_JOURNAL_ACTIVE |
4295			VOL_CAP_FMT_NO_ROOT_TIMES |
4296			VOL_CAP_FMT_SPARSE_FILES |
4297			VOL_CAP_FMT_ZERO_RUNS |
4298			VOL_CAP_FMT_CASE_SENSITIVE |
4299			VOL_CAP_FMT_CASE_PRESERVING |
4300			VOL_CAP_FMT_FAST_STATFS |
4301			VOL_CAP_FMT_2TB_FILESIZE |
4302			VOL_CAP_FMT_OPENDENYMODES |
4303			VOL_CAP_FMT_HIDDEN_FILES |
4304#if HFS_COMPRESSION
4305			VOL_CAP_FMT_PATH_FROM_ID |
4306			VOL_CAP_FMT_DECMPFS_COMPRESSION;
4307#else
4308			VOL_CAP_FMT_PATH_FROM_ID;
4309#endif
4310
4311		/*
4312		 * Bits in the "valid" field tell you whether or not the on-disk
4313		 * format supports feature X.
4314		 */
4315
4316		cap->valid[VOL_CAPABILITIES_INTERFACES] =
4317			VOL_CAP_INT_ATTRLIST |
4318			VOL_CAP_INT_NFSEXPORT |
4319			VOL_CAP_INT_READDIRATTR |
4320			VOL_CAP_INT_COPYFILE |
4321			VOL_CAP_INT_ALLOCATE |
4322			VOL_CAP_INT_VOL_RENAME |
4323			VOL_CAP_INT_ADVLOCK |
4324			VOL_CAP_INT_FLOCK |
4325			VOL_CAP_INT_MANLOCK |
4326#if NAMEDSTREAMS
4327			VOL_CAP_INT_EXTENDED_ATTR |
4328			VOL_CAP_INT_NAMEDSTREAMS;
4329#else
4330			VOL_CAP_INT_EXTENDED_ATTR;
4331#endif
4332
4333		/* HFS always supports exchangedata and searchfs in the on-disk format natively */
4334		cap->valid[VOL_CAPABILITIES_INTERFACES] |= (VOL_CAP_INT_SEARCHFS | VOL_CAP_INT_EXCHANGEDATA);
4335
4336
4337		cap->valid[VOL_CAPABILITIES_RESERVED1] = 0;
4338		cap->valid[VOL_CAPABILITIES_RESERVED2] = 0;
4339		VFSATTR_SET_SUPPORTED(fsap, f_capabilities);
4340	}
4341	if (VFSATTR_IS_ACTIVE(fsap, f_attributes)) {
4342		vol_attributes_attr_t *attrp = &fsap->f_attributes;
4343
4344        	attrp->validattr.commonattr = HFS_ATTR_CMN_VOL_VALIDMASK;
4345        	attrp->validattr.volattr = ATTR_VOL_VALIDMASK & ~ATTR_VOL_INFO;
4346        	attrp->validattr.dirattr = ATTR_DIR_VALIDMASK;
4347        	attrp->validattr.fileattr = HFS_ATTR_FILE_VALIDMASK;
4348        	attrp->validattr.forkattr = 0;
4349
4350        	attrp->nativeattr.commonattr = HFS_ATTR_CMN_VOL_VALIDMASK;
4351        	attrp->nativeattr.volattr = ATTR_VOL_VALIDMASK & ~ATTR_VOL_INFO;
4352        	attrp->nativeattr.dirattr = ATTR_DIR_VALIDMASK;
4353        	attrp->nativeattr.fileattr = HFS_ATTR_FILE_VALIDMASK;
4354        	attrp->nativeattr.forkattr = 0;
4355		VFSATTR_SET_SUPPORTED(fsap, f_attributes);
4356	}
4357	fsap->f_create_time.tv_sec = hfsmp->hfs_itime;
4358	fsap->f_create_time.tv_nsec = 0;
4359	VFSATTR_SET_SUPPORTED(fsap, f_create_time);
4360	fsap->f_modify_time.tv_sec = hfsmp->vcbLsMod;
4361	fsap->f_modify_time.tv_nsec = 0;
4362	VFSATTR_SET_SUPPORTED(fsap, f_modify_time);
4363
4364	fsap->f_backup_time.tv_sec = hfsmp->vcbVolBkUp;
4365	fsap->f_backup_time.tv_nsec = 0;
4366	VFSATTR_SET_SUPPORTED(fsap, f_backup_time);
4367	if (VFSATTR_IS_ACTIVE(fsap, f_fssubtype)) {
4368		u_int16_t subtype = 0;
4369
4370		/*
4371		 * Subtypes (flavors) for HFS
4372		 *   0:   Mac OS Extended
4373		 *   1:   Mac OS Extended (Journaled)
4374		 *   2:   Mac OS Extended (Case Sensitive)
4375		 *   3:   Mac OS Extended (Case Sensitive, Journaled)
4376		 *   4 - 127:   Reserved
4377		 * 128:   Mac OS Standard
4378		 *
4379		 */
4380		if ((hfsmp->hfs_flags & HFS_STANDARD) == 0) {
4381			if (hfsmp->jnl) {
4382				subtype |= HFS_SUBTYPE_JOURNALED;
4383			}
4384			if (hfsmp->hfs_flags & HFS_CASE_SENSITIVE) {
4385				subtype |= HFS_SUBTYPE_CASESENSITIVE;
4386			}
4387		}
4388#if CONFIG_HFS_STD
4389		else {
4390			subtype = HFS_SUBTYPE_STANDARDHFS;
4391		}
4392#endif
4393		fsap->f_fssubtype = subtype;
4394		VFSATTR_SET_SUPPORTED(fsap, f_fssubtype);
4395	}
4396
4397	if (VFSATTR_IS_ACTIVE(fsap, f_vol_name)) {
4398		strlcpy(fsap->f_vol_name, (char *) hfsmp->vcbVN, MAXPATHLEN);
4399		VFSATTR_SET_SUPPORTED(fsap, f_vol_name);
4400	}
4401	if (VFSATTR_IS_ACTIVE(fsap, f_uuid)) {
4402		hfs_getvoluuid(hfsmp, fsap->f_uuid);
4403		VFSATTR_SET_SUPPORTED(fsap, f_uuid);
4404	}
4405	return (0);
4406}
4407
4408/*
4409 * Perform a volume rename.  Requires the FS' root vp.
4410 */
4411static int
4412hfs_rename_volume(struct vnode *vp, const char *name, proc_t p)
4413{
4414	ExtendedVCB *vcb = VTOVCB(vp);
4415	struct cnode *cp = VTOC(vp);
4416	struct hfsmount *hfsmp = VTOHFS(vp);
4417	struct cat_desc to_desc;
4418	struct cat_desc todir_desc;
4419	struct cat_desc new_desc;
4420	cat_cookie_t cookie;
4421	int lockflags;
4422	int error = 0;
4423	char converted_volname[256];
4424	size_t volname_length = 0;
4425	size_t conv_volname_length = 0;
4426
4427
4428	/*
4429	 * Ignore attempts to rename a volume to a zero-length name.
4430	 */
4431	if (name[0] == 0)
4432		return(0);
4433
4434	bzero(&to_desc, sizeof(to_desc));
4435	bzero(&todir_desc, sizeof(todir_desc));
4436	bzero(&new_desc, sizeof(new_desc));
4437	bzero(&cookie, sizeof(cookie));
4438
4439	todir_desc.cd_parentcnid = kHFSRootParentID;
4440	todir_desc.cd_cnid = kHFSRootFolderID;
4441	todir_desc.cd_flags = CD_ISDIR;
4442
4443	to_desc.cd_nameptr = (const u_int8_t *)name;
4444	to_desc.cd_namelen = strlen(name);
4445	to_desc.cd_parentcnid = kHFSRootParentID;
4446	to_desc.cd_cnid = cp->c_cnid;
4447	to_desc.cd_flags = CD_ISDIR;
4448
4449	if ((error = hfs_lock(cp, HFS_EXCLUSIVE_LOCK, HFS_LOCK_DEFAULT)) == 0) {
4450		if ((error = hfs_start_transaction(hfsmp)) == 0) {
4451			if ((error = cat_preflight(hfsmp, CAT_RENAME, &cookie, p)) == 0) {
4452				lockflags = hfs_systemfile_lock(hfsmp, SFL_CATALOG, HFS_EXCLUSIVE_LOCK);
4453
4454				error = cat_rename(hfsmp, &cp->c_desc, &todir_desc, &to_desc, &new_desc);
4455
4456				/*
4457				 * If successful, update the name in the VCB, ensure it's terminated.
4458				 */
4459				if (error == 0) {
4460					strlcpy((char *)vcb->vcbVN, name, sizeof(vcb->vcbVN));
4461
4462					volname_length = strlen ((const char*)vcb->vcbVN);
4463#define DKIOCCSSETLVNAME _IOW('d', 198, char[256])
4464					/* Send the volume name down to CoreStorage if necessary */
4465					error = utf8_normalizestr(vcb->vcbVN, volname_length, (u_int8_t*)converted_volname, &conv_volname_length, 256, UTF_PRECOMPOSED);
4466					if (error == 0) {
4467						(void) VNOP_IOCTL (hfsmp->hfs_devvp, DKIOCCSSETLVNAME, converted_volname, 0, vfs_context_current());
4468					}
4469					error = 0;
4470				}
4471
4472				hfs_systemfile_unlock(hfsmp, lockflags);
4473				cat_postflight(hfsmp, &cookie, p);
4474
4475				if (error)
4476					MarkVCBDirty(vcb);
4477				(void) hfs_flushvolumeheader(hfsmp, MNT_WAIT, 0);
4478			}
4479			hfs_end_transaction(hfsmp);
4480		}
4481		if (!error) {
4482			/* Release old allocated name buffer */
4483			if (cp->c_desc.cd_flags & CD_HASBUF) {
4484				const char *tmp_name = (const char *)cp->c_desc.cd_nameptr;
4485
4486				cp->c_desc.cd_nameptr = 0;
4487				cp->c_desc.cd_namelen = 0;
4488				cp->c_desc.cd_flags &= ~CD_HASBUF;
4489				vfs_removename(tmp_name);
4490			}
4491			/* Update cnode's catalog descriptor */
4492			replace_desc(cp, &new_desc);
4493			vcb->volumeNameEncodingHint = new_desc.cd_encoding;
4494			cp->c_touch_chgtime = TRUE;
4495		}
4496
4497		hfs_unlock(cp);
4498	}
4499
4500	return(error);
4501}
4502
4503/*
4504 * Get file system attributes.
4505 */
4506static int
4507hfs_vfs_setattr(struct mount *mp, struct vfs_attr *fsap, __unused vfs_context_t context)
4508{
4509	kauth_cred_t cred = vfs_context_ucred(context);
4510	int error = 0;
4511
4512	/*
4513	 * Must be superuser or owner of filesystem to change volume attributes
4514	 */
4515	if (!kauth_cred_issuser(cred) && (kauth_cred_getuid(cred) != vfs_statfs(mp)->f_owner))
4516		return(EACCES);
4517
4518	if (VFSATTR_IS_ACTIVE(fsap, f_vol_name)) {
4519		vnode_t root_vp;
4520
4521		error = hfs_vfs_root(mp, &root_vp, context);
4522		if (error)
4523			goto out;
4524
4525		error = hfs_rename_volume(root_vp, fsap->f_vol_name, vfs_context_proc(context));
4526		(void) vnode_put(root_vp);
4527		if (error)
4528			goto out;
4529
4530		VFSATTR_SET_SUPPORTED(fsap, f_vol_name);
4531	}
4532
4533out:
4534	return error;
4535}
4536
4537/* If a runtime corruption is detected, set the volume inconsistent
4538 * bit in the volume attributes.  The volume inconsistent bit is a persistent
4539 * bit which represents that the volume is corrupt and needs repair.
4540 * The volume inconsistent bit can be set from the kernel when it detects
4541 * runtime corruption or from file system repair utilities like fsck_hfs when
4542 * a repair operation fails.  The bit should be cleared only from file system
4543 * verify/repair utility like fsck_hfs when a verify/repair succeeds.
4544 */
4545__private_extern__
4546void hfs_mark_inconsistent(struct hfsmount *hfsmp,
4547								  hfs_inconsistency_reason_t reason)
4548{
4549	hfs_lock_mount (hfsmp);
4550	if ((hfsmp->vcbAtrb & kHFSVolumeInconsistentMask) == 0) {
4551		hfsmp->vcbAtrb |= kHFSVolumeInconsistentMask;
4552		MarkVCBDirty(hfsmp);
4553	}
4554	if ((hfsmp->hfs_flags & HFS_READ_ONLY)==0) {
4555		switch (reason) {
4556		case HFS_INCONSISTENCY_DETECTED:
4557			printf("hfs_mark_inconsistent: Runtime corruption detected on %s, fsck will be forced on next mount.\n",
4558				   hfsmp->vcbVN);
4559			break;
4560		case HFS_ROLLBACK_FAILED:
4561			printf("hfs_mark_inconsistent: Failed to roll back; volume `%s' might be inconsistent; fsck will be forced on next mount.\n",
4562				   hfsmp->vcbVN);
4563			break;
4564		case HFS_OP_INCOMPLETE:
4565			printf("hfs_mark_inconsistent: Failed to complete operation; volume `%s' might be inconsistent; fsck will be forced on next mount.\n",
4566				   hfsmp->vcbVN);
4567			break;
4568		case HFS_FSCK_FORCED:
4569			printf("hfs_mark_inconsistent: fsck requested for `%s'; fsck will be forced on next mount.\n",
4570				   hfsmp->vcbVN);
4571			break;
4572		}
4573	}
4574	hfs_unlock_mount (hfsmp);
4575}
4576
4577/* Replay the journal on the device node provided.  Returns zero if
4578 * journal replay succeeded or no journal was supposed to be replayed.
4579 */
4580static int hfs_journal_replay(vnode_t devvp, vfs_context_t context)
4581{
4582	int retval = 0;
4583	int error = 0;
4584	struct mount *mp = NULL;
4585	struct hfs_mount_args *args = NULL;
4586
4587	/* Replay allowed only on raw devices */
4588	if (!vnode_ischr(devvp) && !vnode_isblk(devvp)) {
4589		retval = EINVAL;
4590		goto out;
4591	}
4592
4593	/* Create dummy mount structures */
4594	MALLOC(mp, struct mount *, sizeof(struct mount), M_TEMP, M_WAITOK);
4595	if (mp == NULL) {
4596		retval = ENOMEM;
4597		goto out;
4598	}
4599	bzero(mp, sizeof(struct mount));
4600	mount_lock_init(mp);
4601
4602	MALLOC(args, struct hfs_mount_args *, sizeof(struct hfs_mount_args), M_TEMP, M_WAITOK);
4603	if (args == NULL) {
4604		retval = ENOMEM;
4605		goto out;
4606	}
4607	bzero(args, sizeof(struct hfs_mount_args));
4608
4609	retval = hfs_mountfs(devvp, mp, args, 1, context);
4610	buf_flushdirtyblks(devvp, TRUE, 0, "hfs_journal_replay");
4611
4612	/* FSYNC the devnode to be sure all data has been flushed */
4613	error = VNOP_FSYNC(devvp, MNT_WAIT, context);
4614	if (error) {
4615		retval = error;
4616	}
4617
4618out:
4619	if (mp) {
4620		mount_lock_destroy(mp);
4621		FREE(mp, M_TEMP);
4622	}
4623	if (args) {
4624		FREE(args, M_TEMP);
4625	}
4626	return retval;
4627}
4628
4629
4630/*
4631 * Cancel the syncer
4632 */
4633static void
4634hfs_syncer_free(struct hfsmount *hfsmp)
4635{
4636    if (hfsmp && hfsmp->hfs_syncer) {
4637        hfs_syncer_lock(hfsmp);
4638
4639        /*
4640         * First, make sure everything else knows we don't want any more
4641         * requests queued.
4642         */
4643        thread_call_t syncer = hfsmp->hfs_syncer;
4644        hfsmp->hfs_syncer = NULL;
4645
4646        hfs_syncer_unlock(hfsmp);
4647
4648        // Now deal with requests that are outstanding
4649        if (hfsmp->hfs_sync_incomplete) {
4650            if (thread_call_cancel(syncer)) {
4651                // We managed to cancel the timer so we're done
4652                hfsmp->hfs_sync_incomplete = FALSE;
4653            } else {
4654                // Syncer must be running right now so we have to wait
4655                hfs_syncer_lock(hfsmp);
4656                while (hfsmp->hfs_sync_incomplete)
4657                    hfs_syncer_wait(hfsmp);
4658                hfs_syncer_unlock(hfsmp);
4659            }
4660        }
4661
4662        // Now we're safe to free the syncer
4663        thread_call_free(syncer);
4664    }
4665}
4666
4667/*
4668 * hfs vfs operations.
4669 */
4670struct vfsops hfs_vfsops = {
4671	hfs_mount,
4672	hfs_start,
4673	hfs_unmount,
4674	hfs_vfs_root,
4675	hfs_quotactl,
4676	hfs_vfs_getattr, 	/* was hfs_statfs */
4677	hfs_sync,
4678	hfs_vfs_vget,
4679	hfs_fhtovp,
4680	hfs_vptofh,
4681	hfs_init,
4682	hfs_sysctl,
4683	hfs_vfs_setattr,
4684	{NULL}
4685};
4686