1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * ocfs2.h
4 *
5 * Defines macros and structures used in OCFS2
6 *
7 * Copyright (C) 2002, 2004 Oracle.  All rights reserved.
8 */
9
10#ifndef OCFS2_H
11#define OCFS2_H
12
13#include <linux/spinlock.h>
14#include <linux/sched.h>
15#include <linux/wait.h>
16#include <linux/list.h>
17#include <linux/llist.h>
18#include <linux/rbtree.h>
19#include <linux/workqueue.h>
20#include <linux/kref.h>
21#include <linux/mutex.h>
22#include <linux/lockdep.h>
23#include <linux/jbd2.h>
24
25/* For union ocfs2_dlm_lksb */
26#include "stackglue.h"
27
28#include "ocfs2_fs.h"
29#include "ocfs2_lockid.h"
30#include "ocfs2_ioctl.h"
31
32/* For struct ocfs2_blockcheck_stats */
33#include "blockcheck.h"
34
35#include "reservations.h"
36
37#include "filecheck.h"
38
39/* Caching of metadata buffers */
40
41/* Most user visible OCFS2 inodes will have very few pieces of
42 * metadata, but larger files (including bitmaps, etc) must be taken
43 * into account when designing an access scheme. We allow a small
44 * amount of inlined blocks to be stored on an array and grow the
45 * structure into a rb tree when necessary. */
46#define OCFS2_CACHE_INFO_MAX_ARRAY 2
47
48/* Flags for ocfs2_caching_info */
49
50enum ocfs2_caching_info_flags {
51	/* Indicates that the metadata cache is using the inline array */
52	OCFS2_CACHE_FL_INLINE	= 1<<1,
53};
54
55struct ocfs2_caching_operations;
56struct ocfs2_caching_info {
57	/*
58	 * The parent structure provides the locks, but because the
59	 * parent structure can differ, it provides locking operations
60	 * to struct ocfs2_caching_info.
61	 */
62	const struct ocfs2_caching_operations *ci_ops;
63
64	/* next two are protected by trans_inc_lock */
65	/* which transaction were we created on? Zero if none. */
66	unsigned long		ci_created_trans;
67	/* last transaction we were a part of. */
68	unsigned long		ci_last_trans;
69
70	/* Cache structures */
71	unsigned int		ci_flags;
72	unsigned int		ci_num_cached;
73	union {
74	sector_t	ci_array[OCFS2_CACHE_INFO_MAX_ARRAY];
75		struct rb_root	ci_tree;
76	} ci_cache;
77};
78/*
79 * Need this prototype here instead of in uptodate.h because journal.h
80 * uses it.
81 */
82struct super_block *ocfs2_metadata_cache_get_super(struct ocfs2_caching_info *ci);
83
84/* this limits us to 256 nodes
85 * if we need more, we can do a kmalloc for the map */
86#define OCFS2_NODE_MAP_MAX_NODES    256
87struct ocfs2_node_map {
88	u16 num_nodes;
89	unsigned long map[BITS_TO_LONGS(OCFS2_NODE_MAP_MAX_NODES)];
90};
91
92enum ocfs2_ast_action {
93	OCFS2_AST_INVALID = 0,
94	OCFS2_AST_ATTACH,
95	OCFS2_AST_CONVERT,
96	OCFS2_AST_DOWNCONVERT,
97};
98
99/* actions for an unlockast function to take. */
100enum ocfs2_unlock_action {
101	OCFS2_UNLOCK_INVALID = 0,
102	OCFS2_UNLOCK_CANCEL_CONVERT,
103	OCFS2_UNLOCK_DROP_LOCK,
104};
105
106/* ocfs2_lock_res->l_flags flags. */
107#define OCFS2_LOCK_ATTACHED      (0x00000001) /* we have initialized
108					       * the lvb */
109#define OCFS2_LOCK_BUSY          (0x00000002) /* we are currently in
110					       * dlm_lock */
111#define OCFS2_LOCK_BLOCKED       (0x00000004) /* blocked waiting to
112					       * downconvert*/
113#define OCFS2_LOCK_LOCAL         (0x00000008) /* newly created inode */
114#define OCFS2_LOCK_NEEDS_REFRESH (0x00000010)
115#define OCFS2_LOCK_REFRESHING    (0x00000020)
116#define OCFS2_LOCK_INITIALIZED   (0x00000040) /* track initialization
117					       * for shutdown paths */
118#define OCFS2_LOCK_FREEING       (0x00000080) /* help dlmglue track
119					       * when to skip queueing
120					       * a lock because it's
121					       * about to be
122					       * dropped. */
123#define OCFS2_LOCK_QUEUED        (0x00000100) /* queued for downconvert */
124#define OCFS2_LOCK_NOCACHE       (0x00000200) /* don't use a holder count */
125#define OCFS2_LOCK_PENDING       (0x00000400) /* This lockres is pending a
126						 call to dlm_lock.  Only
127						 exists with BUSY set. */
128#define OCFS2_LOCK_UPCONVERT_FINISHING (0x00000800) /* blocks the dc thread
129						     * from downconverting
130						     * before the upconvert
131						     * has completed */
132
133#define OCFS2_LOCK_NONBLOCK_FINISHED (0x00001000) /* NONBLOCK cluster
134						   * lock has already
135						   * returned, do not block
136						   * dc thread from
137						   * downconverting */
138
139struct ocfs2_lock_res_ops;
140
141typedef void (*ocfs2_lock_callback)(int status, unsigned long data);
142
143#ifdef CONFIG_OCFS2_FS_STATS
144struct ocfs2_lock_stats {
145	u64		ls_total;	/* Total wait in NSEC */
146	u32		ls_gets;	/* Num acquires */
147	u32		ls_fail;	/* Num failed acquires */
148
149	/* Storing max wait in usecs saves 24 bytes per inode */
150	u32		ls_max;		/* Max wait in USEC */
151	u64		ls_last;	/* Last unlock time in USEC */
152};
153#endif
154
155struct ocfs2_lock_res {
156	void                    *l_priv;
157	struct ocfs2_lock_res_ops *l_ops;
158
159
160	struct list_head         l_blocked_list;
161	struct list_head         l_mask_waiters;
162	struct list_head	 l_holders;
163
164	unsigned long		 l_flags;
165	char                     l_name[OCFS2_LOCK_ID_MAX_LEN];
166	unsigned int             l_ro_holders;
167	unsigned int             l_ex_holders;
168	signed char		 l_level;
169	signed char		 l_requested;
170	signed char		 l_blocking;
171
172	/* Data packed - type enum ocfs2_lock_type */
173	unsigned char            l_type;
174
175	/* used from AST/BAST funcs. */
176	/* Data packed - enum type ocfs2_ast_action */
177	unsigned char            l_action;
178	/* Data packed - enum type ocfs2_unlock_action */
179	unsigned char            l_unlock_action;
180	unsigned int             l_pending_gen;
181
182	spinlock_t               l_lock;
183
184	struct ocfs2_dlm_lksb    l_lksb;
185
186	wait_queue_head_t        l_event;
187
188	struct list_head         l_debug_list;
189
190#ifdef CONFIG_OCFS2_FS_STATS
191	struct ocfs2_lock_stats  l_lock_prmode;		/* PR mode stats */
192	u32                      l_lock_refresh;	/* Disk refreshes */
193	u64                      l_lock_wait;	/* First lock wait time */
194	struct ocfs2_lock_stats  l_lock_exmode;		/* EX mode stats */
195#endif
196#ifdef CONFIG_DEBUG_LOCK_ALLOC
197	struct lockdep_map	 l_lockdep_map;
198#endif
199};
200
201enum ocfs2_orphan_reco_type {
202	ORPHAN_NO_NEED_TRUNCATE = 0,
203	ORPHAN_NEED_TRUNCATE,
204};
205
206enum ocfs2_orphan_scan_state {
207	ORPHAN_SCAN_ACTIVE,
208	ORPHAN_SCAN_INACTIVE
209};
210
211struct ocfs2_orphan_scan {
212	struct mutex 		os_lock;
213	struct ocfs2_super 	*os_osb;
214	struct ocfs2_lock_res 	os_lockres;     /* lock to synchronize scans */
215	struct delayed_work 	os_orphan_scan_work;
216	time64_t		os_scantime;  /* time this node ran the scan */
217	u32			os_count;      /* tracks node specific scans */
218	u32  			os_seqno;       /* tracks cluster wide scans */
219	atomic_t		os_state;              /* ACTIVE or INACTIVE */
220};
221
222struct ocfs2_dlm_debug {
223	struct kref d_refcnt;
224	u32 d_filter_secs;
225	struct list_head d_lockres_tracking;
226};
227
228enum ocfs2_vol_state
229{
230	VOLUME_INIT = 0,
231	VOLUME_MOUNTED,
232	VOLUME_MOUNTED_QUOTAS,
233	VOLUME_DISMOUNTED,
234	VOLUME_DISABLED
235};
236
237struct ocfs2_alloc_stats
238{
239	atomic_t moves;
240	atomic_t local_data;
241	atomic_t bitmap_data;
242	atomic_t bg_allocs;
243	atomic_t bg_extends;
244};
245
246enum ocfs2_local_alloc_state
247{
248	OCFS2_LA_UNUSED = 0,	/* Local alloc will never be used for
249				 * this mountpoint. */
250	OCFS2_LA_ENABLED,	/* Local alloc is in use. */
251	OCFS2_LA_THROTTLED,	/* Local alloc is in use, but number
252				 * of bits has been reduced. */
253	OCFS2_LA_DISABLED	/* Local alloc has temporarily been
254				 * disabled. */
255};
256
257enum ocfs2_mount_options
258{
259	OCFS2_MOUNT_HB_LOCAL = 1 << 0, /* Local heartbeat */
260	OCFS2_MOUNT_BARRIER = 1 << 1,	/* Use block barriers */
261	OCFS2_MOUNT_NOINTR  = 1 << 2,   /* Don't catch signals */
262	OCFS2_MOUNT_ERRORS_PANIC = 1 << 3, /* Panic on errors */
263	OCFS2_MOUNT_DATA_WRITEBACK = 1 << 4, /* No data ordering */
264	OCFS2_MOUNT_LOCALFLOCKS = 1 << 5, /* No cluster aware user file locks */
265	OCFS2_MOUNT_NOUSERXATTR = 1 << 6, /* No user xattr */
266	OCFS2_MOUNT_INODE64 = 1 << 7,	/* Allow inode numbers > 2^32 */
267	OCFS2_MOUNT_POSIX_ACL = 1 << 8,	/* Force POSIX access control lists */
268	OCFS2_MOUNT_NO_POSIX_ACL = 1 << 9,	/* Disable POSIX access
269						   control lists */
270	OCFS2_MOUNT_USRQUOTA = 1 << 10, /* We support user quotas */
271	OCFS2_MOUNT_GRPQUOTA = 1 << 11, /* We support group quotas */
272	OCFS2_MOUNT_COHERENCY_BUFFERED = 1 << 12, /* Allow concurrent O_DIRECT
273						     writes */
274	OCFS2_MOUNT_HB_NONE = 1 << 13, /* No heartbeat */
275	OCFS2_MOUNT_HB_GLOBAL = 1 << 14, /* Global heartbeat */
276
277	OCFS2_MOUNT_JOURNAL_ASYNC_COMMIT = 1 << 15,  /* Journal Async Commit */
278	OCFS2_MOUNT_ERRORS_CONT = 1 << 16, /* Return EIO to the calling process on error */
279	OCFS2_MOUNT_ERRORS_ROFS = 1 << 17, /* Change filesystem to read-only on error */
280};
281
282#define OCFS2_OSB_SOFT_RO	0x0001
283#define OCFS2_OSB_HARD_RO	0x0002
284#define OCFS2_OSB_ERROR_FS	0x0004
285#define OCFS2_DEFAULT_ATIME_QUANTUM	60
286
287struct ocfs2_journal;
288struct ocfs2_slot_info;
289struct ocfs2_recovery_map;
290struct ocfs2_replay_map;
291struct ocfs2_quota_recovery;
292struct ocfs2_super
293{
294	struct task_struct *commit_task;
295	struct super_block *sb;
296	struct inode *root_inode;
297	struct inode *sys_root_inode;
298	struct inode *global_system_inodes[NUM_GLOBAL_SYSTEM_INODES];
299	struct inode **local_system_inodes;
300
301	struct ocfs2_slot_info *slot_info;
302
303	u32 *slot_recovery_generations;
304
305	spinlock_t node_map_lock;
306
307	u64 root_blkno;
308	u64 system_dir_blkno;
309	u64 bitmap_blkno;
310	u32 bitmap_cpg;
311	char *uuid_str;
312	u32 uuid_hash;
313	u8 *vol_label;
314	u64 first_cluster_group_blkno;
315	u32 fs_generation;
316
317	u32 s_feature_compat;
318	u32 s_feature_incompat;
319	u32 s_feature_ro_compat;
320
321	/* Protects s_next_generation, osb_flags and s_inode_steal_slot.
322	 * Could protect more on osb as it's very short lived.
323	 */
324	spinlock_t osb_lock;
325	u32 s_next_generation;
326	unsigned long osb_flags;
327	u16 s_inode_steal_slot;
328	u16 s_meta_steal_slot;
329	atomic_t s_num_inodes_stolen;
330	atomic_t s_num_meta_stolen;
331
332	unsigned long s_mount_opt;
333	unsigned int s_atime_quantum;
334
335	unsigned int max_slots;
336	unsigned int node_num;
337	int slot_num;
338	int preferred_slot;
339	int s_sectsize_bits;
340	int s_clustersize;
341	int s_clustersize_bits;
342	unsigned int s_xattr_inline_size;
343
344	atomic_t vol_state;
345	struct mutex recovery_lock;
346	struct ocfs2_recovery_map *recovery_map;
347	struct ocfs2_replay_map *replay_map;
348	struct task_struct *recovery_thread_task;
349	int disable_recovery;
350	wait_queue_head_t checkpoint_event;
351	struct ocfs2_journal *journal;
352	unsigned long osb_commit_interval;
353
354	struct delayed_work		la_enable_wq;
355
356	/*
357	 * Must hold local alloc i_rwsem and osb->osb_lock to change
358	 * local_alloc_bits. Reads can be done under either lock.
359	 */
360	unsigned int local_alloc_bits;
361	unsigned int local_alloc_default_bits;
362	/* osb_clusters_at_boot can become stale! Do not trust it to
363	 * be up to date. */
364	unsigned int osb_clusters_at_boot;
365
366	enum ocfs2_local_alloc_state local_alloc_state; /* protected
367							 * by osb_lock */
368
369	struct buffer_head *local_alloc_bh;
370
371	u64 la_last_gd;
372
373	struct ocfs2_reservation_map	osb_la_resmap;
374
375	unsigned int	osb_resv_level;
376	unsigned int	osb_dir_resv_level;
377
378	/* Next two fields are for local node slot recovery during
379	 * mount. */
380	struct ocfs2_dinode *local_alloc_copy;
381	struct ocfs2_quota_recovery *quota_rec;
382
383	struct ocfs2_blockcheck_stats osb_ecc_stats;
384	struct ocfs2_alloc_stats alloc_stats;
385	char dev_str[20];		/* "major,minor" of the device */
386
387	u8 osb_stackflags;
388
389	char osb_cluster_stack[OCFS2_STACK_LABEL_LEN + 1];
390	char osb_cluster_name[OCFS2_CLUSTER_NAME_LEN + 1];
391	struct ocfs2_cluster_connection *cconn;
392	struct ocfs2_lock_res osb_super_lockres;
393	struct ocfs2_lock_res osb_rename_lockres;
394	struct ocfs2_lock_res osb_nfs_sync_lockres;
395	struct rw_semaphore nfs_sync_rwlock;
396	struct ocfs2_lock_res osb_trim_fs_lockres;
397	struct mutex obs_trim_fs_mutex;
398	struct ocfs2_dlm_debug *osb_dlm_debug;
399
400	struct dentry *osb_debug_root;
401
402	wait_queue_head_t recovery_event;
403
404	spinlock_t dc_task_lock;
405	struct task_struct *dc_task;
406	wait_queue_head_t dc_event;
407	unsigned long dc_wake_sequence;
408	unsigned long dc_work_sequence;
409
410	/*
411	 * Any thread can add locks to the list, but the downconvert
412	 * thread is the only one allowed to remove locks. Any change
413	 * to this rule requires updating
414	 * ocfs2_downconvert_thread_do_work().
415	 */
416	struct list_head blocked_lock_list;
417	unsigned long blocked_lock_count;
418
419	/* List of dquot structures to drop last reference to */
420	struct llist_head dquot_drop_list;
421	struct work_struct dquot_drop_work;
422
423	wait_queue_head_t		osb_mount_event;
424
425	/* Truncate log info */
426	struct inode			*osb_tl_inode;
427	struct buffer_head		*osb_tl_bh;
428	struct delayed_work		osb_truncate_log_wq;
429	atomic_t			osb_tl_disable;
430	/*
431	 * How many clusters in our truncate log.
432	 * It must be protected by osb_tl_inode->i_rwsem.
433	 */
434	unsigned int truncated_clusters;
435
436	struct ocfs2_node_map		osb_recovering_orphan_dirs;
437	unsigned int			*osb_orphan_wipes;
438	wait_queue_head_t		osb_wipe_event;
439
440	struct ocfs2_orphan_scan	osb_orphan_scan;
441
442	/* used to protect metaecc calculation check of xattr. */
443	spinlock_t osb_xattr_lock;
444
445	unsigned int			osb_dx_mask;
446	u32				osb_dx_seed[4];
447
448	/* the group we used to allocate inodes. */
449	u64				osb_inode_alloc_group;
450
451	/* rb tree root for refcount lock. */
452	struct rb_root	osb_rf_lock_tree;
453	struct ocfs2_refcount_tree *osb_ref_tree_lru;
454
455	struct mutex system_file_mutex;
456
457	/*
458	 * OCFS2 needs to schedule several different types of work which
459	 * require cluster locking, disk I/O, recovery waits, etc. Since these
460	 * types of work tend to be heavy we avoid using the kernel events
461	 * workqueue and schedule on our own.
462	 */
463	struct workqueue_struct *ocfs2_wq;
464
465	/* sysfs directory per partition */
466	struct kset *osb_dev_kset;
467
468	/* file check related stuff */
469	struct ocfs2_filecheck_sysfs_entry osb_fc_ent;
470};
471
472#define OCFS2_SB(sb)	    ((struct ocfs2_super *)(sb)->s_fs_info)
473
474/* Useful typedef for passing around journal access functions */
475typedef int (*ocfs2_journal_access_func)(handle_t *handle,
476					 struct ocfs2_caching_info *ci,
477					 struct buffer_head *bh, int type);
478
479static inline int ocfs2_should_order_data(struct inode *inode)
480{
481	if (!S_ISREG(inode->i_mode))
482		return 0;
483	if (OCFS2_SB(inode->i_sb)->s_mount_opt & OCFS2_MOUNT_DATA_WRITEBACK)
484		return 0;
485	return 1;
486}
487
488static inline int ocfs2_sparse_alloc(struct ocfs2_super *osb)
489{
490	if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_SPARSE_ALLOC)
491		return 1;
492	return 0;
493}
494
495static inline int ocfs2_writes_unwritten_extents(struct ocfs2_super *osb)
496{
497	/*
498	 * Support for sparse files is a pre-requisite
499	 */
500	if (!ocfs2_sparse_alloc(osb))
501		return 0;
502
503	if (osb->s_feature_ro_compat & OCFS2_FEATURE_RO_COMPAT_UNWRITTEN)
504		return 1;
505	return 0;
506}
507
508static inline int ocfs2_supports_append_dio(struct ocfs2_super *osb)
509{
510	if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_APPEND_DIO)
511		return 1;
512	return 0;
513}
514
515
516static inline int ocfs2_supports_inline_data(struct ocfs2_super *osb)
517{
518	if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_INLINE_DATA)
519		return 1;
520	return 0;
521}
522
523static inline int ocfs2_supports_xattr(struct ocfs2_super *osb)
524{
525	if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_XATTR)
526		return 1;
527	return 0;
528}
529
530static inline int ocfs2_meta_ecc(struct ocfs2_super *osb)
531{
532	if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_META_ECC)
533		return 1;
534	return 0;
535}
536
537static inline int ocfs2_supports_indexed_dirs(struct ocfs2_super *osb)
538{
539	if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_INDEXED_DIRS)
540		return 1;
541	return 0;
542}
543
544static inline int ocfs2_supports_discontig_bg(struct ocfs2_super *osb)
545{
546	if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_DISCONTIG_BG)
547		return 1;
548	return 0;
549}
550
551static inline unsigned int ocfs2_link_max(struct ocfs2_super *osb)
552{
553	if (ocfs2_supports_indexed_dirs(osb))
554		return OCFS2_DX_LINK_MAX;
555	return OCFS2_LINK_MAX;
556}
557
558static inline unsigned int ocfs2_read_links_count(struct ocfs2_dinode *di)
559{
560	u32 nlink = le16_to_cpu(di->i_links_count);
561	u32 hi = le16_to_cpu(di->i_links_count_hi);
562
563	nlink |= (hi << OCFS2_LINKS_HI_SHIFT);
564
565	return nlink;
566}
567
568static inline void ocfs2_set_links_count(struct ocfs2_dinode *di, u32 nlink)
569{
570	u16 lo, hi;
571
572	lo = nlink;
573	hi = nlink >> OCFS2_LINKS_HI_SHIFT;
574
575	di->i_links_count = cpu_to_le16(lo);
576	di->i_links_count_hi = cpu_to_le16(hi);
577}
578
579static inline void ocfs2_add_links_count(struct ocfs2_dinode *di, int n)
580{
581	u32 links = ocfs2_read_links_count(di);
582
583	links += n;
584
585	ocfs2_set_links_count(di, links);
586}
587
588static inline int ocfs2_refcount_tree(struct ocfs2_super *osb)
589{
590	if (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_REFCOUNT_TREE)
591		return 1;
592	return 0;
593}
594
595/* set / clear functions because cluster events can make these happen
596 * in parallel so we want the transitions to be atomic. this also
597 * means that any future flags osb_flags must be protected by spinlock
598 * too! */
599static inline void ocfs2_set_osb_flag(struct ocfs2_super *osb,
600				      unsigned long flag)
601{
602	spin_lock(&osb->osb_lock);
603	osb->osb_flags |= flag;
604	spin_unlock(&osb->osb_lock);
605}
606
607static inline void ocfs2_set_ro_flag(struct ocfs2_super *osb,
608				     int hard)
609{
610	spin_lock(&osb->osb_lock);
611	osb->osb_flags &= ~(OCFS2_OSB_SOFT_RO|OCFS2_OSB_HARD_RO);
612	if (hard)
613		osb->osb_flags |= OCFS2_OSB_HARD_RO;
614	else
615		osb->osb_flags |= OCFS2_OSB_SOFT_RO;
616	spin_unlock(&osb->osb_lock);
617}
618
619static inline int ocfs2_is_hard_readonly(struct ocfs2_super *osb)
620{
621	int ret;
622
623	spin_lock(&osb->osb_lock);
624	ret = osb->osb_flags & OCFS2_OSB_HARD_RO;
625	spin_unlock(&osb->osb_lock);
626
627	return ret;
628}
629
630static inline int ocfs2_is_soft_readonly(struct ocfs2_super *osb)
631{
632	int ret;
633
634	spin_lock(&osb->osb_lock);
635	ret = osb->osb_flags & OCFS2_OSB_SOFT_RO;
636	spin_unlock(&osb->osb_lock);
637
638	return ret;
639}
640
641static inline int ocfs2_clusterinfo_valid(struct ocfs2_super *osb)
642{
643	return (osb->s_feature_incompat &
644		(OCFS2_FEATURE_INCOMPAT_USERSPACE_STACK |
645		 OCFS2_FEATURE_INCOMPAT_CLUSTERINFO));
646}
647
648static inline int ocfs2_userspace_stack(struct ocfs2_super *osb)
649{
650	if (ocfs2_clusterinfo_valid(osb) &&
651	    memcmp(osb->osb_cluster_stack, OCFS2_CLASSIC_CLUSTER_STACK,
652		   OCFS2_STACK_LABEL_LEN))
653		return 1;
654	return 0;
655}
656
657static inline int ocfs2_o2cb_stack(struct ocfs2_super *osb)
658{
659	if (ocfs2_clusterinfo_valid(osb) &&
660	    !memcmp(osb->osb_cluster_stack, OCFS2_CLASSIC_CLUSTER_STACK,
661		   OCFS2_STACK_LABEL_LEN))
662		return 1;
663	return 0;
664}
665
666static inline int ocfs2_cluster_o2cb_global_heartbeat(struct ocfs2_super *osb)
667{
668	return ocfs2_o2cb_stack(osb) &&
669		(osb->osb_stackflags & OCFS2_CLUSTER_O2CB_GLOBAL_HEARTBEAT);
670}
671
672static inline int ocfs2_mount_local(struct ocfs2_super *osb)
673{
674	return (osb->s_feature_incompat & OCFS2_FEATURE_INCOMPAT_LOCAL_MOUNT);
675}
676
677static inline int ocfs2_uses_extended_slot_map(struct ocfs2_super *osb)
678{
679	return (osb->s_feature_incompat &
680		OCFS2_FEATURE_INCOMPAT_EXTENDED_SLOT_MAP);
681}
682
683
684#define OCFS2_IS_VALID_DINODE(ptr)					\
685	(!strcmp((ptr)->i_signature, OCFS2_INODE_SIGNATURE))
686
687#define OCFS2_IS_VALID_EXTENT_BLOCK(ptr)				\
688	(!strcmp((ptr)->h_signature, OCFS2_EXTENT_BLOCK_SIGNATURE))
689
690#define OCFS2_IS_VALID_GROUP_DESC(ptr)					\
691	(!strcmp((ptr)->bg_signature, OCFS2_GROUP_DESC_SIGNATURE))
692
693
694#define OCFS2_IS_VALID_XATTR_BLOCK(ptr)					\
695	(!strcmp((ptr)->xb_signature, OCFS2_XATTR_BLOCK_SIGNATURE))
696
697#define OCFS2_IS_VALID_DIR_TRAILER(ptr)					\
698	(!strcmp((ptr)->db_signature, OCFS2_DIR_TRAILER_SIGNATURE))
699
700#define OCFS2_IS_VALID_DX_ROOT(ptr)					\
701	(!strcmp((ptr)->dr_signature, OCFS2_DX_ROOT_SIGNATURE))
702
703#define OCFS2_IS_VALID_DX_LEAF(ptr)					\
704	(!strcmp((ptr)->dl_signature, OCFS2_DX_LEAF_SIGNATURE))
705
706#define OCFS2_IS_VALID_REFCOUNT_BLOCK(ptr)				\
707	(!strcmp((ptr)->rf_signature, OCFS2_REFCOUNT_BLOCK_SIGNATURE))
708
709static inline unsigned long ino_from_blkno(struct super_block *sb,
710					   u64 blkno)
711{
712	return (unsigned long)(blkno & (u64)ULONG_MAX);
713}
714
715static inline u64 ocfs2_clusters_to_blocks(struct super_block *sb,
716					   u32 clusters)
717{
718	int c_to_b_bits = OCFS2_SB(sb)->s_clustersize_bits -
719		sb->s_blocksize_bits;
720
721	return (u64)clusters << c_to_b_bits;
722}
723
724static inline u32 ocfs2_clusters_for_blocks(struct super_block *sb,
725		u64 blocks)
726{
727	int b_to_c_bits = OCFS2_SB(sb)->s_clustersize_bits -
728			sb->s_blocksize_bits;
729
730	blocks += (1 << b_to_c_bits) - 1;
731	return (u32)(blocks >> b_to_c_bits);
732}
733
734static inline u32 ocfs2_blocks_to_clusters(struct super_block *sb,
735					   u64 blocks)
736{
737	int b_to_c_bits = OCFS2_SB(sb)->s_clustersize_bits -
738		sb->s_blocksize_bits;
739
740	return (u32)(blocks >> b_to_c_bits);
741}
742
743static inline unsigned int ocfs2_clusters_for_bytes(struct super_block *sb,
744						    u64 bytes)
745{
746	int cl_bits = OCFS2_SB(sb)->s_clustersize_bits;
747	unsigned int clusters;
748
749	bytes += OCFS2_SB(sb)->s_clustersize - 1;
750	/* OCFS2 just cannot have enough clusters to overflow this */
751	clusters = (unsigned int)(bytes >> cl_bits);
752
753	return clusters;
754}
755
756static inline unsigned int ocfs2_bytes_to_clusters(struct super_block *sb,
757		u64 bytes)
758{
759	int cl_bits = OCFS2_SB(sb)->s_clustersize_bits;
760	unsigned int clusters;
761
762	clusters = (unsigned int)(bytes >> cl_bits);
763	return clusters;
764}
765
766static inline u64 ocfs2_blocks_for_bytes(struct super_block *sb,
767					 u64 bytes)
768{
769	bytes += sb->s_blocksize - 1;
770	return bytes >> sb->s_blocksize_bits;
771}
772
773static inline u64 ocfs2_clusters_to_bytes(struct super_block *sb,
774					  u32 clusters)
775{
776	return (u64)clusters << OCFS2_SB(sb)->s_clustersize_bits;
777}
778
779static inline u64 ocfs2_block_to_cluster_start(struct super_block *sb,
780					       u64 blocks)
781{
782	int bits = OCFS2_SB(sb)->s_clustersize_bits - sb->s_blocksize_bits;
783	unsigned int clusters;
784
785	clusters = ocfs2_blocks_to_clusters(sb, blocks);
786	return (u64)clusters << bits;
787}
788
789static inline u64 ocfs2_align_bytes_to_clusters(struct super_block *sb,
790						u64 bytes)
791{
792	int cl_bits = OCFS2_SB(sb)->s_clustersize_bits;
793	unsigned int clusters;
794
795	clusters = ocfs2_clusters_for_bytes(sb, bytes);
796	return (u64)clusters << cl_bits;
797}
798
799static inline u64 ocfs2_align_bytes_to_blocks(struct super_block *sb,
800					      u64 bytes)
801{
802	u64 blocks;
803
804        blocks = ocfs2_blocks_for_bytes(sb, bytes);
805	return blocks << sb->s_blocksize_bits;
806}
807
808static inline unsigned long ocfs2_align_bytes_to_sectors(u64 bytes)
809{
810	return (unsigned long)((bytes + 511) >> 9);
811}
812
813static inline unsigned int ocfs2_page_index_to_clusters(struct super_block *sb,
814							unsigned long pg_index)
815{
816	u32 clusters = pg_index;
817	unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
818
819	if (unlikely(PAGE_SHIFT > cbits))
820		clusters = pg_index << (PAGE_SHIFT - cbits);
821	else if (PAGE_SHIFT < cbits)
822		clusters = pg_index >> (cbits - PAGE_SHIFT);
823
824	return clusters;
825}
826
827/*
828 * Find the 1st page index which covers the given clusters.
829 */
830static inline pgoff_t ocfs2_align_clusters_to_page_index(struct super_block *sb,
831							u32 clusters)
832{
833	unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
834        pgoff_t index = clusters;
835
836	if (PAGE_SHIFT > cbits) {
837		index = (pgoff_t)clusters >> (PAGE_SHIFT - cbits);
838	} else if (PAGE_SHIFT < cbits) {
839		index = (pgoff_t)clusters << (cbits - PAGE_SHIFT);
840	}
841
842	return index;
843}
844
845static inline unsigned int ocfs2_pages_per_cluster(struct super_block *sb)
846{
847	unsigned int cbits = OCFS2_SB(sb)->s_clustersize_bits;
848	unsigned int pages_per_cluster = 1;
849
850	if (PAGE_SHIFT < cbits)
851		pages_per_cluster = 1 << (cbits - PAGE_SHIFT);
852
853	return pages_per_cluster;
854}
855
856static inline unsigned int ocfs2_megabytes_to_clusters(struct super_block *sb,
857						       unsigned int megs)
858{
859	BUILD_BUG_ON(OCFS2_MAX_CLUSTERSIZE > 1048576);
860
861	return megs << (20 - OCFS2_SB(sb)->s_clustersize_bits);
862}
863
864static inline unsigned int ocfs2_clusters_to_megabytes(struct super_block *sb,
865						       unsigned int clusters)
866{
867	return clusters >> (20 - OCFS2_SB(sb)->s_clustersize_bits);
868}
869
870static inline void _ocfs2_set_bit(unsigned int bit, unsigned long *bitmap)
871{
872	__set_bit_le(bit, bitmap);
873}
874#define ocfs2_set_bit(bit, addr) _ocfs2_set_bit((bit), (unsigned long *)(addr))
875
876static inline void _ocfs2_clear_bit(unsigned int bit, unsigned long *bitmap)
877{
878	__clear_bit_le(bit, bitmap);
879}
880#define ocfs2_clear_bit(bit, addr) _ocfs2_clear_bit((bit), (unsigned long *)(addr))
881
882#define ocfs2_test_bit test_bit_le
883#define ocfs2_find_next_zero_bit find_next_zero_bit_le
884#define ocfs2_find_next_bit find_next_bit_le
885
886static inline void *correct_addr_and_bit_unaligned(int *bit, void *addr)
887{
888#if BITS_PER_LONG == 64
889	*bit += ((unsigned long) addr & 7UL) << 3;
890	addr = (void *) ((unsigned long) addr & ~7UL);
891#elif BITS_PER_LONG == 32
892	*bit += ((unsigned long) addr & 3UL) << 3;
893	addr = (void *) ((unsigned long) addr & ~3UL);
894#else
895#error "how many bits you are?!"
896#endif
897	return addr;
898}
899
900static inline void ocfs2_set_bit_unaligned(int bit, void *bitmap)
901{
902	bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
903	ocfs2_set_bit(bit, bitmap);
904}
905
906static inline void ocfs2_clear_bit_unaligned(int bit, void *bitmap)
907{
908	bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
909	ocfs2_clear_bit(bit, bitmap);
910}
911
912static inline int ocfs2_test_bit_unaligned(int bit, void *bitmap)
913{
914	bitmap = correct_addr_and_bit_unaligned(&bit, bitmap);
915	return ocfs2_test_bit(bit, bitmap);
916}
917
918static inline int ocfs2_find_next_zero_bit_unaligned(void *bitmap, int max,
919							int start)
920{
921	int fix = 0, ret, tmpmax;
922	bitmap = correct_addr_and_bit_unaligned(&fix, bitmap);
923	tmpmax = max + fix;
924	start += fix;
925
926	ret = ocfs2_find_next_zero_bit(bitmap, tmpmax, start) - fix;
927	if (ret > max)
928		return max;
929	return ret;
930}
931
932#endif  /* OCFS2_H */
933
934