1/*
2 * Copyright (c) 2002-2014 Apple Inc. All rights reserved.
3 *
4 * @APPLE_OSREFERENCE_LICENSE_HEADER_START@
5 *
6 * This file contains Original Code and/or Modifications of Original Code
7 * as defined in and that are subject to the Apple Public Source License
8 * Version 2.0 (the 'License'). You may not use this file except in
9 * compliance with the License. The rights granted to you under the License
10 * may not be used to create, or enable the creation or redistribution of,
11 * unlawful or unlicensed copies of an Apple operating system, or to
12 * circumvent, violate, or enable the circumvention or violation of, any
13 * terms of an Apple operating system software license agreement.
14 *
15 * Please obtain a copy of the License at
16 * http://www.opensource.apple.com/apsl/ and read it before using this file.
17 *
18 * The Original Code and all software distributed under the License are
19 * distributed on an 'AS IS' basis, WITHOUT WARRANTY OF ANY KIND, EITHER
20 * EXPRESS OR IMPLIED, AND APPLE HEREBY DISCLAIMS ALL SUCH WARRANTIES,
21 * INCLUDING WITHOUT LIMITATION, ANY WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE, QUIET ENJOYMENT OR NON-INFRINGEMENT.
23 * Please see the License for the specific language governing rights and
24 * limitations under the License.
25 *
26 * @APPLE_OSREFERENCE_LICENSE_HEADER_END@
27 */
28#ifndef _HFS_CNODE_H_
29#define _HFS_CNODE_H_
30
31#include <sys/appleapiopts.h>
32
33#ifdef KERNEL
34#ifdef __APPLE_API_PRIVATE
35#include <stdbool.h>
36#include <sys/types.h>
37#include <sys/queue.h>
38#include <sys/stat.h>
39#include <sys/vnode.h>
40#include <sys/quota.h>
41
42#include <kern/locks.h>
43
44#include <hfs/hfs_catalog.h>
45#include <hfs/rangelist.h>
46#if HFS_COMPRESSION
47#include <sys/decmpfs.h>
48#endif
49#if CONFIG_PROTECT
50#include <sys/cprotect.h>
51#endif
52
53
54/*
55 * The filefork is used to represent an HFS file fork (data or resource).
56 * Reading or writing any of these fields requires holding cnode lock.
57 */
58struct filefork {
59	struct cnode   *ff_cp;               /* cnode associated with this fork */
60	struct rl_head  ff_invalidranges;    /* Areas of disk that should read back as zeroes */
61	union {
62	   void        *ffu_sysfileinfo;     /* additional info for system files */
63	   char        *ffu_symlinkptr;      /* symbolic link pathname */
64	} ff_union;
65	struct cat_fork ff_data;             /* fork data (size, extents) */
66};
67typedef struct filefork filefork_t;
68
69
70#define HFS_TEMPLOOKUP_NAMELEN 32
71
72/*
73 * Catalog Lookup struct (runtime)
74 *
75 * This is used so that when we need to malloc a container for a catalog
76 * lookup operation, we can acquire memory for everything in one fell swoop
77 * as opposed to putting many of these objects on the stack.  The cat_fork
78 * data structure can take up 100+bytes easily, and that can add to stack
79 * overhead.
80 *
81 * As a result, we use this to easily pass around the memory needed for a
82 * lookup operation.
83 */
84struct cat_lookup_buffer {
85	struct cat_desc lookup_desc;
86	struct cat_attr lookup_attr;
87	struct filefork lookup_fork;
88	struct componentname lookup_cn;
89	char lookup_name[HFS_TEMPLOOKUP_NAMELEN]; /* for open-unlinked paths only */
90};
91
92
93/* Aliases for common fields */
94#define ff_size          ff_data.cf_size
95#define ff_new_size      ff_data.cf_new_size
96#define ff_clumpsize     ff_data.cf_clump
97#define ff_bytesread     ff_data.cf_bytesread
98#define ff_blocks        ff_data.cf_blocks
99#define ff_extents       ff_data.cf_extents
100#define ff_unallocblocks ff_data.cf_vblocks
101
102#define ff_symlinkptr    ff_union.ffu_symlinkptr
103#define ff_sysfileinfo   ff_union.ffu_sysfileinfo
104
105
106/* The btree code still needs these... */
107#define fcbEOF           ff_size
108#define fcbExtents       ff_extents
109#define	fcbBTCBPtr       ff_sysfileinfo
110
111typedef u_int8_t atomicflag_t;
112
113
114/*
115 * Hardlink Origin (for hardlinked directories).
116 */
117struct linkorigin {
118	TAILQ_ENTRY(linkorigin)  lo_link;  /* chain */
119	void *  lo_thread;      /* thread that performed the lookup */
120	cnid_t  lo_cnid;        /* hardlink's cnid */
121	cnid_t  lo_parentcnid;  /* hardlink's parent cnid */
122};
123typedef struct linkorigin linkorigin_t;
124
125#define MAX_CACHED_ORIGINS  10
126#define MAX_CACHED_FILE_ORIGINS 8
127
128/*
129 * The cnode is used to represent each active (or recently active)
130 * file or directory in the HFS filesystem.
131 *
132 * Reading or writing any of these fields requires holding c_lock.
133 */
134struct cnode {
135	lck_rw_t                c_rwlock;       /* cnode's lock */
136	thread_t                c_lockowner;    /* cnode's lock owner (exclusive case only) */
137	lck_rw_t                c_truncatelock; /* protects file from truncation during read/write */
138	thread_t                c_truncatelockowner;    /* truncate lock owner (exclusive case only) */
139	LIST_ENTRY(cnode)	c_hash;		/* cnode's hash chain */
140	u_int32_t		c_flag;		/* cnode's runtime flags */
141	u_int32_t		c_hflag;	/* cnode's flags for maintaining hash - protected by global hash lock */
142	struct vnode		*c_vp;		/* vnode for data fork or dir */
143	struct vnode		*c_rsrc_vp;	/* vnode for resource fork */
144    struct dquot		*c_dquot[MAXQUOTAS]; /* cnode's quota info */
145	u_int32_t		c_childhint;	 /* catalog hint for children (small dirs only) */
146	u_int32_t		c_dirthreadhint; /* catalog hint for directory's thread rec */
147	struct cat_desc		c_desc;		/* cnode's descriptor */
148	struct cat_attr		c_attr;		/* cnode's attributes */
149	TAILQ_HEAD(hfs_originhead, linkorigin) c_originlist;  /* hardlink origin cache */
150	TAILQ_HEAD(hfs_hinthead, directoryhint) c_hintlist;  /* readdir directory hint list */
151  	int16_t			c_dirhinttag;	/* directory hint tag */
152	union {
153	    int16_t     cu_dirhintcnt;          /* directory hint count */
154	    int16_t     cu_syslockcount;        /* system file use only */
155	} c_union;
156	u_int32_t		c_dirchangecnt; /* changes each insert/delete (in-core only) */
157 	struct filefork		*c_datafork;	/* cnode's data fork */
158	struct filefork		*c_rsrcfork;	/* cnode's rsrc fork */
159	atomicflag_t	c_touch_acctime;
160	atomicflag_t	c_touch_chgtime;
161	atomicflag_t	c_touch_modtime;
162
163	// The following flags are protected by the truncate lock
164	union {
165		struct {
166			bool	c_need_dvnode_put_after_truncate_unlock : 1;
167			bool	c_need_rvnode_put_after_truncate_unlock : 1;
168#if HFS_COMPRESSION
169			bool	c_need_decmpfs_reset 					: 1;
170#endif
171		};
172		uint8_t c_tflags;
173	};
174
175#if HFS_COMPRESSION
176	decmpfs_cnode  *c_decmp;
177#endif /* HFS_COMPRESSION */
178#if CONFIG_PROTECT
179	cprotect_t		c_cpentry;	/* content protection data */
180#endif
181
182};
183typedef struct cnode cnode_t;
184
185/* Aliases for common cnode fields */
186#define c_cnid		c_desc.cd_cnid
187#define c_hint		c_desc.cd_hint
188#define c_parentcnid	c_desc.cd_parentcnid
189#define c_encoding	c_desc.cd_encoding
190
191#define c_fileid	c_attr.ca_fileid
192#define c_mode		c_attr.ca_mode
193#define c_linkcount	c_attr.ca_linkcount
194#define c_uid		c_attr.ca_uid
195#define c_gid		c_attr.ca_gid
196#define c_rdev		c_attr.ca_union1.cau_rdev
197#define c_atime		c_attr.ca_atime
198#define c_mtime		c_attr.ca_mtime
199#define c_ctime		c_attr.ca_ctime
200#define c_itime		c_attr.ca_itime
201#define c_btime		c_attr.ca_btime
202#define c_bsdflags		c_attr.ca_flags
203#define c_finderinfo	c_attr.ca_finderinfo
204#define c_blocks	c_attr.ca_union2.cau_blocks
205#define c_entries	c_attr.ca_union2.cau_entries
206#define c_zftimeout	c_childhint
207
208#define c_dirhintcnt    c_union.cu_dirhintcnt
209#define c_syslockcount  c_union.cu_syslockcount
210
211
212/* hash maintenance flags kept in c_hflag and protected by hfs_chash_mutex */
213#define H_ALLOC		0x00001	/* CNode is being allocated */
214#define H_ATTACH	0x00002	/* CNode is being attached to by another vnode */
215#define	H_TRANSIT	0x00004	/* CNode is getting recycled  */
216#define H_WAITING	0x00008	/* CNode is being waited for */
217
218
219/*
220 * Runtime cnode flags (kept in c_flag)
221 */
222#define C_NEED_RVNODE_PUT   0x0000001  /* Need to do a vnode_put on c_rsrc_vp after the unlock */
223#define C_NEED_DVNODE_PUT   0x0000002  /* Need to do a vnode_put on c_vp after the unlock */
224#define C_ZFWANTSYNC	    0x0000004  /* fsync requested and file has holes */
225#define C_FROMSYNC          0x0000008  /* fsync was called from sync */
226
227#define C_MODIFIED          0x0000010  /* CNode has been modified */
228#define C_NOEXISTS          0x0000020  /* CNode has been deleted, catalog entry is gone */
229#define C_DELETED           0x0000040  /* CNode has been marked to be deleted */
230#define C_HARDLINK          0x0000080  /* CNode is a hard link (file or dir) */
231
232#define C_FORCEUPDATE       0x0000100  /* force the catalog entry update */
233#define C_HASXATTRS         0x0000200  /* cnode has extended attributes */
234#define C_NEG_ENTRIES       0x0000400  /* directory has negative name entries */
235/*
236 * For C_SSD_STATIC: SSDs may want to deal with the file payload data in a
237 * different manner knowing that the content is not likely to be modified. This is
238 * purely advisory at the HFS level, and is not maintained after the cnode goes out of core.
239 */
240#define C_SSD_STATIC        0x0000800  /* Assume future writes contain static content */
241
242#define C_NEED_DATA_SETSIZE 0x0001000  /* Do a ubc_setsize(0) on c_rsrc_vp after the unlock */
243#define C_NEED_RSRC_SETSIZE 0x0002000  /* Do a ubc_setsize(0) on c_vp after the unlock */
244#define C_DIR_MODIFICATION  0x0004000  /* Directory is being modified, wait for lookups */
245#define C_ALWAYS_ZEROFILL   0x0008000  /* Always zero-fill the file on an fsync */
246
247#define C_RENAMED           0x0010000  /* cnode was deleted as part of rename; C_DELETED should also be set */
248#define C_NEEDS_DATEADDED   0x0020000  /* cnode needs date-added written to the finderinfo bit */
249#define C_BACKINGSTORE      0x0040000  /* cnode is a backing store for an existing or currently-mounting filesystem */
250
251/*
252 * This flag indicates the cnode might be dirty because it
253 * was mapped writable so if we get any page-outs, update
254 * the modification and change times.
255 */
256#define C_MIGHT_BE_DIRTY_FROM_MAPPING   0x0080000
257
258/*
259 * For C_SSD_GREEDY_MODE: SSDs may want to write the file payload data using the greedy mode knowing
260 * that the content needs to be written out to the disk quicker than normal at the expense of storage efficiency.
261 * This is purely advisory at the HFS level, and is not maintained after the cnode goes out of core.
262 */
263#define C_SSD_GREEDY_MODE   0x0100000  /* Assume future writes are recommended to be written in SLC mode */
264
265/* 0x0200000  is currently unused */
266
267#define C_IO_ISOCHRONOUS    0x0400000  /* device-specific isochronous throughput I/O */
268
269#define ZFTIMELIMIT	(5 * 60)
270
271/*
272 * The following is the "invisible" bit from the fdFlags field
273 * in the FndrFileInfo.
274 */
275enum { kFinderInvisibleMask = 1 << 14 };
276
277
278/*
279 * Convert between cnode pointers and vnode pointers
280 */
281#define VTOC(vp)	((struct cnode *)vnode_fsnode((vp)))
282
283#define CTOV(cp,rsrc)	(((rsrc) && S_ISREG((cp)->c_mode)) ? \
284			(cp)->c_rsrc_vp : (cp)->c_vp)
285
286/*
287 * Convert between vnode pointers and file forks
288 *
289 * Note: no CTOF since that is ambiguous
290 */
291
292#define FTOC(fp)	((fp)->ff_cp)
293
294#define VTOF(vp)	((vp) == VTOC((vp))->c_rsrc_vp ?	\
295			 VTOC((vp))->c_rsrcfork :		\
296			 VTOC((vp))->c_datafork)
297
298#define VCTOF(vp, cp)	((vp) == (cp)->c_rsrc_vp ?	\
299			 (cp)->c_rsrcfork :		\
300			 (cp)->c_datafork)
301
302#define FTOV(fp)	((fp) == FTOC(fp)->c_rsrcfork ?		\
303			 FTOC(fp)->c_rsrc_vp :			\
304			 FTOC(fp)->c_vp)
305
306/*
307 * This is a helper function used for determining whether or not a cnode has become open
308 * unlinked in between the time we acquired its vnode and the time we acquire the cnode lock
309 * to start manipulating it.  Due to the SMP nature of VFS, it is probably necessary to
310 * use this macro every time we acquire a cnode lock, as the content of the Cnode may have
311 * been modified in betweeen the lookup and a VNOP.  Whether or not to call this is dependent
312 * upon the VNOP in question.  Sometimes it is OK to use an open-unlinked file, for example, in,
313 * reading.  But other times, such as on the source of a VNOP_RENAME, it should be disallowed.
314 */
315int hfs_checkdeleted(struct cnode *cp);
316
317/*
318 * Test for a resource fork
319 */
320#define FORK_IS_RSRC(fp)	((fp) == FTOC(fp)->c_rsrcfork)
321
322#define VNODE_IS_RSRC(vp)	((vp) == VTOC((vp))->c_rsrc_vp)
323
324#if HFS_COMPRESSION
325/*
326 * VTOCMP(vp) returns a pointer to vp's decmpfs_cnode; this could be NULL
327 * if the file is not compressed or if hfs_file_is_compressed() hasn't
328 * yet been called on this file.
329 */
330#define VTOCMP(vp) (VTOC((vp))->c_decmp)
331int hfs_file_is_compressed(struct cnode *cp, int skiplock);
332int hfs_uncompressed_size_of_compressed_file(struct hfsmount *hfsmp, struct vnode *vp, cnid_t fid, off_t *size, int skiplock);
333int hfs_hides_rsrc(vfs_context_t ctx, struct cnode *cp, int skiplock);
334int hfs_hides_xattr(vfs_context_t ctx, struct cnode *cp, const char *name, int skiplock);
335#endif
336
337#define ATIME_ONDISK_ACCURACY	300
338
339
340/* This overlays the FileID portion of NFS file handles. */
341struct hfsfid {
342	u_int32_t hfsfid_cnid;	/* Catalog node ID. */
343	u_int32_t hfsfid_gen;	/* Generation number (create date). */
344};
345
346
347/* Get new default vnode */
348extern int hfs_getnewvnode(struct hfsmount *hfsmp, struct vnode *dvp, struct componentname *cnp,
349                           struct cat_desc *descp, int flags, struct cat_attr *attrp,
350                           struct cat_fork *forkp, struct vnode **vpp, int *out_flags);
351
352/* Input flags for hfs_getnewvnode */
353
354#define GNV_WANTRSRC   0x01  /* Request the resource fork vnode. */
355#define GNV_SKIPLOCK   0x02  /* Skip taking the cnode lock (when getting resource fork). */
356#define GNV_CREATE     0x04  /* The vnode is for a newly created item. */
357#define GNV_NOCACHE	   0x08  /* Delay entering this item in the name cache */
358
359/* Output flags for hfs_getnewvnode */
360#define GNV_CHASH_RENAMED	0x01	/* The cnode was renamed in-flight */
361#define GNV_CAT_DELETED		0x02	/* The cnode was deleted from the catalog */
362#define GNV_NEW_CNODE		0x04	/* We are vending out a newly initialized cnode */
363#define GNV_CAT_ATTRCHANGED	0x08	/* Something in struct cat_attr changed in between cat_lookups */
364
365/* Touch cnode times based on c_touch_xxx flags */
366extern void hfs_touchtimes(struct hfsmount *, struct cnode *);
367extern void hfs_write_dateadded (struct cat_attr *cattrp, u_int32_t dateadded);
368extern u_int32_t hfs_get_dateadded (struct cnode *cp);
369extern u_int32_t hfs_get_dateadded_from_blob(const uint8_t * /* finderinfo */, mode_t /* mode */);
370
371/* Gen counter methods */
372extern void hfs_write_gencount(struct cat_attr *cattrp, uint32_t gencount);
373extern uint32_t hfs_get_gencount(struct cnode *cp);
374extern uint32_t hfs_incr_gencount (struct cnode *cp);
375extern uint32_t hfs_get_gencount_from_blob(const uint8_t * /* finderinfo */, mode_t /* mode */);
376
377/* Document id methods */
378extern uint32_t hfs_get_document_id(struct cnode * /* cp */);
379extern uint32_t hfs_get_document_id_from_blob(const uint8_t * /* finderinfo */, mode_t /* mode */);
380
381/* Zero-fill file and push regions out to disk */
382enum {
383	// Use this flag if you're going to sync later
384	HFS_FILE_DONE_NO_SYNC 	= 1,
385};
386typedef uint32_t hfs_file_done_opts_t;
387extern int  hfs_filedone(struct vnode *vp, vfs_context_t context,
388						 hfs_file_done_opts_t opts);
389
390/*
391 * HFS cnode hash functions.
392 */
393extern void  hfs_chashinit(void);
394extern void  hfs_chashinit_finish(struct hfsmount *hfsmp);
395extern void  hfs_delete_chash(struct hfsmount *hfsmp);
396extern int   hfs_chashremove(struct hfsmount *hfsmp, struct cnode *cp);
397extern void  hfs_chash_abort(struct hfsmount *hfsmp, struct cnode *cp);
398extern void  hfs_chash_rehash(struct hfsmount *hfsmp, struct cnode *cp1, struct cnode *cp2);
399extern void  hfs_chashwakeup(struct hfsmount *hfsmp, struct cnode *cp, int flags);
400extern void  hfs_chash_mark_in_transit(struct hfsmount *hfsmp, struct cnode *cp);
401
402extern struct vnode * hfs_chash_getvnode(struct hfsmount *hfsmp, ino_t inum, int wantrsrc,
403										int skiplock, int allow_deleted);
404extern struct cnode * hfs_chash_getcnode(struct hfsmount *hfsmp, ino_t inum, struct vnode **vpp,
405										 int wantrsrc, int skiplock, int *out_flags, int *hflags);
406extern int hfs_chash_snoop(struct hfsmount *, ino_t, int, int (*)(const cnode_t *, void *), void *);
407extern int hfs_valid_cnode(struct hfsmount *hfsmp, struct vnode *dvp, struct componentname *cnp,
408							cnid_t cnid, struct cat_attr *cattr, int *error);
409
410extern int hfs_chash_set_childlinkbit(struct hfsmount *hfsmp, cnid_t cnid);
411
412/*
413 * HFS cnode lock functions.
414 *
415 *  HFS Locking Order:
416 *
417 *  1. cnode truncate lock (if needed) -- see below for more on this
418 *
419 *     + hfs_vnop_pagein/out handles recursive use of this lock (by
420 *       using flag option HFS_LOCK_SKIP_IF_EXCLUSIVE) although there
421 *       are issues with this (see #16620278).
422 *
423 *	   + If locking multiple cnodes then the truncate lock must be taken on
424 *       both (in address order), before taking the cnode locks.
425 *
426 *  2. cnode lock (in parent-child order if related, otherwise by address order)
427 *
428 *  3. journal (if needed)
429 *
430 *  4. system files (as needed)
431 *
432 *       A. Catalog B-tree file
433 *       B. Attributes B-tree file
434 *       C. Startup file (if there is one)
435 *       D. Allocation Bitmap file (always exclusive, supports recursion)
436 *       E. Overflow Extents B-tree file (always exclusive, supports recursion)
437 *
438 *  5. hfs mount point (always last)
439 *
440 *
441 * I. HFS cnode hash lock (must not acquire any new locks while holding this lock, always taken last)
442 */
443
444/*
445 * -- The Truncate Lock --
446 *
447 * The truncate lock is used for a few purposes (more than its name
448 * might suggest).  The first thing to note is that the cnode lock
449 * cannot be held whilst issuing any I/O other than metadata changes,
450 * so the truncate lock, in either shared or exclusive form, must
451 * usually be held in these cases.  This includes calls to ubc_setsize
452 * where the new size is less than the current size known to the VM
453 * subsystem (for two reasons: a) because reaping pages can block
454 * (e.g. on pages that are busy or being cleaned); b) reaping pages
455 * might require page-in for tasks that have that region mapped
456 * privately).  The same applies to other calls into the VM subsystem.
457 *
458 * Here are some (but not necessarily all) cases that the truncate
459 * lock protects for:
460 *
461 *  + When reading and writing a file, we hold the truncate lock
462 *    shared to ensure that the underlying blocks cannot be deleted
463 *    and on systems that use content protection, this also ensures
464 *    the keys remain valid (which might be being used by the
465 *    underlying layers).
466 *
467 *  + We need to protect against the following sequence of events:
468 *
469 *      A file is initially size X.  A thread issues an append to that
470 *      file.  Another thread truncates the file and then extends it
471 *      to a a new size Y.  Now the append can be applied at offset X
472 *      and then the data is lost when the file is truncated; or it
473 *      could be applied after the truncate, i.e. at offset 0; or it
474 *      can be applied at offset Y.  What we *cannot* do is apply the
475 *      append at offset X and for the data to be visible at the end.
476 *      (Note that we are free to choose when we apply the append
477 *      operation.)
478 *
479 *    To solve this, we keep things simple and take the truncate lock
480 *    exclusively in order to sequence the append with other size
481 *    changes.  Therefore any size change must take the truncate lock
482 *    exclusively.
483 *
484 *    (N.B. we could do better and allow readers to run concurrently
485 *    during the append and other size changes.)
486 *
487 * So here are the rules:
488 *
489 *  + If you plan to change ff_size, you must take the truncate lock
490 *    exclusively, *but* be careful what I/O you do whilst you have
491 *    the truncate lock exclusively and try and avoid it if you can:
492 *    if the VM subsystem tries to do something with some pages on a
493 *    different thread and you try and do some I/O with those same
494 *    pages, we will deadlock.  (See #16620278.)
495 *
496 *  + If you do anything that requires blocks to not be deleted or
497 *    encrpytion keys to remain valid, you must take the truncate lock
498 *    shared.
499 *
500 *  + And it follows therefore, that if you want to delete blocks or
501 *    delete keys, you must take the truncate lock exclusively.
502 *
503 * N.B. ff_size is actually protected by the cnode lock and so you
504 * must hold the cnode lock exclusively to change it and shared to
505 * read it.
506 *
507 */
508
509enum hfs_locktype {
510	HFS_SHARED_LOCK = 1,
511	HFS_EXCLUSIVE_LOCK = 2
512};
513
514/* Option flags for cnode and truncate lock functions */
515enum hfs_lockflags {
516	HFS_LOCK_DEFAULT           = 0x0,    /* Default flag, no options provided */
517	HFS_LOCK_ALLOW_NOEXISTS    = 0x1,    /* Allow locking of all cnodes, including cnode marked deleted with no catalog entry */
518	HFS_LOCK_SKIP_IF_EXCLUSIVE = 0x2,    /* Skip locking if the current thread already holds the lock exclusive */
519
520	// Used when you do not want to check return from hfs_lock
521	HFS_LOCK_ALWAYS			   = HFS_LOCK_ALLOW_NOEXISTS,
522};
523#define HFS_SHARED_OWNER  (void *)0xffffffff
524
525void hfs_lock_always(cnode_t *cnode, enum hfs_locktype);
526int hfs_lock(struct cnode *, enum hfs_locktype, enum hfs_lockflags);
527int hfs_lockpair(struct cnode *, struct cnode *, enum hfs_locktype);
528int hfs_lockfour(struct cnode *, struct cnode *, struct cnode *, struct cnode *,
529                        enum hfs_locktype, struct cnode **);
530
531void hfs_unlock(struct cnode *);
532void hfs_unlockpair(struct cnode *, struct cnode *);
533void hfs_unlockfour(struct cnode *, struct cnode *, struct cnode *, struct cnode *);
534
535void hfs_lock_truncate(struct cnode *, enum hfs_locktype, enum hfs_lockflags);
536void hfs_unlock_truncate(struct cnode *, enum hfs_lockflags);
537int hfs_try_trunclock(struct cnode *, enum hfs_locktype, enum hfs_lockflags);
538
539void hfs_clear_might_be_dirty_flag(cnode_t *cp);
540
541// cnode must be locked
542static inline __attribute__((pure))
543bool hfs_has_rsrc(const cnode_t *cp)
544{
545	if (cp->c_rsrcfork)
546		return cp->c_rsrcfork->ff_blocks > 0;
547	else
548		return cp->c_datafork && cp->c_blocks > cp->c_datafork->ff_blocks;
549}
550
551#endif /* __APPLE_API_PRIVATE */
552#endif /* KERNEL */
553
554#endif /* ! _HFS_CNODE_H_ */
555