1/*
2 *  linux/fs/ext4/super.c
3 *
4 * Copyright (C) 1992, 1993, 1994, 1995
5 * Remy Card (card@masi.ibp.fr)
6 * Laboratoire MASI - Institut Blaise Pascal
7 * Universite Pierre et Marie Curie (Paris VI)
8 *
9 *  from
10 *
11 *  linux/fs/minix/inode.c
12 *
13 *  Copyright (C) 1991, 1992  Linus Torvalds
14 *
15 *  Big-endian to little-endian byte-swapping/bitmaps by
16 *        David S. Miller (davem@caip.rutgers.edu), 1995
17 */
18
19#include <linux/module.h>
20#include <linux/string.h>
21#include <linux/fs.h>
22#include <linux/time.h>
23#include <linux/vmalloc.h>
24#include <linux/jbd2.h>
25#include <linux/slab.h>
26#include <linux/init.h>
27#include <linux/blkdev.h>
28#include <linux/parser.h>
29#include <linux/smp_lock.h>
30#include <linux/buffer_head.h>
31#include <linux/exportfs.h>
32#include <linux/vfs.h>
33#include <linux/random.h>
34#include <linux/mount.h>
35#include <linux/namei.h>
36#include <linux/quotaops.h>
37#include <linux/seq_file.h>
38#include <linux/proc_fs.h>
39#include <linux/ctype.h>
40#include <linux/log2.h>
41#include <linux/crc16.h>
42#include <asm/uaccess.h>
43
44#include "ext4.h"
45#include "ext4_jbd2.h"
46#include "xattr.h"
47#include "acl.h"
48#include "mballoc.h"
49
50#define CREATE_TRACE_POINTS
51#include <trace/events/ext4.h>
52
53struct proc_dir_entry *ext4_proc_root;
54static struct kset *ext4_kset;
55
56static int ext4_load_journal(struct super_block *, struct ext4_super_block *,
57			     unsigned long journal_devnum);
58static int ext4_commit_super(struct super_block *sb, int sync);
59static void ext4_mark_recovery_complete(struct super_block *sb,
60					struct ext4_super_block *es);
61static void ext4_clear_journal_err(struct super_block *sb,
62				   struct ext4_super_block *es);
63static int ext4_sync_fs(struct super_block *sb, int wait);
64static const char *ext4_decode_error(struct super_block *sb, int errno,
65				     char nbuf[16]);
66static int ext4_remount(struct super_block *sb, int *flags, char *data);
67static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf);
68static int ext4_unfreeze(struct super_block *sb);
69static void ext4_write_super(struct super_block *sb);
70static int ext4_freeze(struct super_block *sb);
71static int ext4_get_sb(struct file_system_type *fs_type, int flags,
72		       const char *dev_name, void *data, struct vfsmount *mnt);
73
74#if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && \
75	defined(CONFIG_EXT4_USE_FOR_EXT23)
76static struct file_system_type ext3_fs_type = {
77	.owner		= THIS_MODULE,
78	.name		= "ext3",
79	.get_sb		= ext4_get_sb,
80	.kill_sb	= kill_block_super,
81	.fs_flags	= FS_REQUIRES_DEV,
82};
83#define IS_EXT3_SB(sb) ((sb)->s_bdev->bd_holder == &ext3_fs_type)
84#else
85#define IS_EXT3_SB(sb) (0)
86#endif
87
88ext4_fsblk_t ext4_block_bitmap(struct super_block *sb,
89			       struct ext4_group_desc *bg)
90{
91	return le32_to_cpu(bg->bg_block_bitmap_lo) |
92		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
93		 (ext4_fsblk_t)le32_to_cpu(bg->bg_block_bitmap_hi) << 32 : 0);
94}
95
96ext4_fsblk_t ext4_inode_bitmap(struct super_block *sb,
97			       struct ext4_group_desc *bg)
98{
99	return le32_to_cpu(bg->bg_inode_bitmap_lo) |
100		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
101		 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_bitmap_hi) << 32 : 0);
102}
103
104ext4_fsblk_t ext4_inode_table(struct super_block *sb,
105			      struct ext4_group_desc *bg)
106{
107	return le32_to_cpu(bg->bg_inode_table_lo) |
108		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
109		 (ext4_fsblk_t)le32_to_cpu(bg->bg_inode_table_hi) << 32 : 0);
110}
111
112__u32 ext4_free_blks_count(struct super_block *sb,
113			      struct ext4_group_desc *bg)
114{
115	return le16_to_cpu(bg->bg_free_blocks_count_lo) |
116		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
117		 (__u32)le16_to_cpu(bg->bg_free_blocks_count_hi) << 16 : 0);
118}
119
120__u32 ext4_free_inodes_count(struct super_block *sb,
121			      struct ext4_group_desc *bg)
122{
123	return le16_to_cpu(bg->bg_free_inodes_count_lo) |
124		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
125		 (__u32)le16_to_cpu(bg->bg_free_inodes_count_hi) << 16 : 0);
126}
127
128__u32 ext4_used_dirs_count(struct super_block *sb,
129			      struct ext4_group_desc *bg)
130{
131	return le16_to_cpu(bg->bg_used_dirs_count_lo) |
132		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
133		 (__u32)le16_to_cpu(bg->bg_used_dirs_count_hi) << 16 : 0);
134}
135
136__u32 ext4_itable_unused_count(struct super_block *sb,
137			      struct ext4_group_desc *bg)
138{
139	return le16_to_cpu(bg->bg_itable_unused_lo) |
140		(EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT ?
141		 (__u32)le16_to_cpu(bg->bg_itable_unused_hi) << 16 : 0);
142}
143
144void ext4_block_bitmap_set(struct super_block *sb,
145			   struct ext4_group_desc *bg, ext4_fsblk_t blk)
146{
147	bg->bg_block_bitmap_lo = cpu_to_le32((u32)blk);
148	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
149		bg->bg_block_bitmap_hi = cpu_to_le32(blk >> 32);
150}
151
152void ext4_inode_bitmap_set(struct super_block *sb,
153			   struct ext4_group_desc *bg, ext4_fsblk_t blk)
154{
155	bg->bg_inode_bitmap_lo  = cpu_to_le32((u32)blk);
156	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
157		bg->bg_inode_bitmap_hi = cpu_to_le32(blk >> 32);
158}
159
160void ext4_inode_table_set(struct super_block *sb,
161			  struct ext4_group_desc *bg, ext4_fsblk_t blk)
162{
163	bg->bg_inode_table_lo = cpu_to_le32((u32)blk);
164	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
165		bg->bg_inode_table_hi = cpu_to_le32(blk >> 32);
166}
167
168void ext4_free_blks_set(struct super_block *sb,
169			  struct ext4_group_desc *bg, __u32 count)
170{
171	bg->bg_free_blocks_count_lo = cpu_to_le16((__u16)count);
172	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
173		bg->bg_free_blocks_count_hi = cpu_to_le16(count >> 16);
174}
175
176void ext4_free_inodes_set(struct super_block *sb,
177			  struct ext4_group_desc *bg, __u32 count)
178{
179	bg->bg_free_inodes_count_lo = cpu_to_le16((__u16)count);
180	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
181		bg->bg_free_inodes_count_hi = cpu_to_le16(count >> 16);
182}
183
184void ext4_used_dirs_set(struct super_block *sb,
185			  struct ext4_group_desc *bg, __u32 count)
186{
187	bg->bg_used_dirs_count_lo = cpu_to_le16((__u16)count);
188	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
189		bg->bg_used_dirs_count_hi = cpu_to_le16(count >> 16);
190}
191
192void ext4_itable_unused_set(struct super_block *sb,
193			  struct ext4_group_desc *bg, __u32 count)
194{
195	bg->bg_itable_unused_lo = cpu_to_le16((__u16)count);
196	if (EXT4_DESC_SIZE(sb) >= EXT4_MIN_DESC_SIZE_64BIT)
197		bg->bg_itable_unused_hi = cpu_to_le16(count >> 16);
198}
199
200
201/* Just increment the non-pointer handle value */
202static handle_t *ext4_get_nojournal(void)
203{
204	handle_t *handle = current->journal_info;
205	unsigned long ref_cnt = (unsigned long)handle;
206
207	BUG_ON(ref_cnt >= EXT4_NOJOURNAL_MAX_REF_COUNT);
208
209	ref_cnt++;
210	handle = (handle_t *)ref_cnt;
211
212	current->journal_info = handle;
213	return handle;
214}
215
216
217/* Decrement the non-pointer handle value */
218static void ext4_put_nojournal(handle_t *handle)
219{
220	unsigned long ref_cnt = (unsigned long)handle;
221
222	BUG_ON(ref_cnt == 0);
223
224	ref_cnt--;
225	handle = (handle_t *)ref_cnt;
226
227	current->journal_info = handle;
228}
229
230/*
231 * Wrappers for jbd2_journal_start/end.
232 *
233 * The only special thing we need to do here is to make sure that all
234 * journal_end calls result in the superblock being marked dirty, so
235 * that sync() will call the filesystem's write_super callback if
236 * appropriate.
237 */
238handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks)
239{
240	journal_t *journal;
241
242	if (sb->s_flags & MS_RDONLY)
243		return ERR_PTR(-EROFS);
244
245	vfs_check_frozen(sb, SB_FREEZE_TRANS);
246	/* Special case here: if the journal has aborted behind our
247	 * backs (eg. EIO in the commit thread), then we still need to
248	 * take the FS itself readonly cleanly. */
249	journal = EXT4_SB(sb)->s_journal;
250	if (journal) {
251		if (is_journal_aborted(journal)) {
252			ext4_abort(sb, "Detected aborted journal");
253			return ERR_PTR(-EROFS);
254		}
255		return jbd2_journal_start(journal, nblocks);
256	}
257	return ext4_get_nojournal();
258}
259
260/*
261 * The only special thing we need to do here is to make sure that all
262 * jbd2_journal_stop calls result in the superblock being marked dirty, so
263 * that sync() will call the filesystem's write_super callback if
264 * appropriate.
265 */
266int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
267{
268	struct super_block *sb;
269	int err;
270	int rc;
271
272	if (!ext4_handle_valid(handle)) {
273		ext4_put_nojournal(handle);
274		return 0;
275	}
276	sb = handle->h_transaction->t_journal->j_private;
277	err = handle->h_err;
278	rc = jbd2_journal_stop(handle);
279
280	if (!err)
281		err = rc;
282	if (err)
283		__ext4_std_error(sb, where, line, err);
284	return err;
285}
286
287void ext4_journal_abort_handle(const char *caller, unsigned int line,
288			       const char *err_fn, struct buffer_head *bh,
289			       handle_t *handle, int err)
290{
291	char nbuf[16];
292	const char *errstr = ext4_decode_error(NULL, err, nbuf);
293
294	BUG_ON(!ext4_handle_valid(handle));
295
296	if (bh)
297		BUFFER_TRACE(bh, "abort");
298
299	if (!handle->h_err)
300		handle->h_err = err;
301
302	if (is_handle_aborted(handle))
303		return;
304
305	printk(KERN_ERR "%s:%d: aborting transaction: %s in %s\n",
306	       caller, line, errstr, err_fn);
307
308	jbd2_journal_abort_handle(handle);
309}
310
311static void __save_error_info(struct super_block *sb, const char *func,
312			    unsigned int line)
313{
314	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
315
316	EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
317	es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
318	es->s_last_error_time = cpu_to_le32(get_seconds());
319	strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
320	es->s_last_error_line = cpu_to_le32(line);
321	if (!es->s_first_error_time) {
322		es->s_first_error_time = es->s_last_error_time;
323		strncpy(es->s_first_error_func, func,
324			sizeof(es->s_first_error_func));
325		es->s_first_error_line = cpu_to_le32(line);
326		es->s_first_error_ino = es->s_last_error_ino;
327		es->s_first_error_block = es->s_last_error_block;
328	}
329	/*
330	 * Start the daily error reporting function if it hasn't been
331	 * started already
332	 */
333	if (!es->s_error_count)
334		mod_timer(&EXT4_SB(sb)->s_err_report, jiffies + 24*60*60*HZ);
335	es->s_error_count = cpu_to_le32(le32_to_cpu(es->s_error_count) + 1);
336}
337
338static void save_error_info(struct super_block *sb, const char *func,
339			    unsigned int line)
340{
341	__save_error_info(sb, func, line);
342	ext4_commit_super(sb, 1);
343}
344
345
346/* Deal with the reporting of failure conditions on a filesystem such as
347 * inconsistencies detected or read IO failures.
348 *
349 * On ext2, we can store the error state of the filesystem in the
350 * superblock.  That is not possible on ext4, because we may have other
351 * write ordering constraints on the superblock which prevent us from
352 * writing it out straight away; and given that the journal is about to
353 * be aborted, we can't rely on the current, or future, transactions to
354 * write out the superblock safely.
355 *
356 * We'll just use the jbd2_journal_abort() error code to record an error in
357 * the journal instead.  On recovery, the journal will complain about
358 * that error until we've noted it down and cleared it.
359 */
360
361static void ext4_handle_error(struct super_block *sb)
362{
363	if (sb->s_flags & MS_RDONLY)
364		return;
365
366	if (!test_opt(sb, ERRORS_CONT)) {
367		journal_t *journal = EXT4_SB(sb)->s_journal;
368
369		EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
370		if (journal)
371			jbd2_journal_abort(journal, -EIO);
372	}
373	if (test_opt(sb, ERRORS_RO)) {
374		ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
375		sb->s_flags |= MS_RDONLY;
376	}
377	if (test_opt(sb, ERRORS_PANIC))
378		panic("EXT4-fs (device %s): panic forced after error\n",
379			sb->s_id);
380}
381
382void __ext4_error(struct super_block *sb, const char *function,
383		  unsigned int line, const char *fmt, ...)
384{
385	va_list args;
386
387	va_start(args, fmt);
388	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: ",
389	       sb->s_id, function, line, current->comm);
390	vprintk(fmt, args);
391	printk("\n");
392	va_end(args);
393
394	ext4_handle_error(sb);
395}
396
397void ext4_error_inode(struct inode *inode, const char *function,
398		      unsigned int line, ext4_fsblk_t block,
399		      const char *fmt, ...)
400{
401	va_list args;
402	struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es;
403
404	es->s_last_error_ino = cpu_to_le32(inode->i_ino);
405	es->s_last_error_block = cpu_to_le64(block);
406	save_error_info(inode->i_sb, function, line);
407	va_start(args, fmt);
408	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: inode #%lu: ",
409	       inode->i_sb->s_id, function, line, inode->i_ino);
410	if (block)
411		printk("block %llu: ", block);
412	printk("comm %s: ", current->comm);
413	vprintk(fmt, args);
414	printk("\n");
415	va_end(args);
416
417	ext4_handle_error(inode->i_sb);
418}
419
420void ext4_error_file(struct file *file, const char *function,
421		     unsigned int line, const char *fmt, ...)
422{
423	va_list args;
424	struct ext4_super_block *es;
425	struct inode *inode = file->f_dentry->d_inode;
426	char pathname[80], *path;
427
428	es = EXT4_SB(inode->i_sb)->s_es;
429	es->s_last_error_ino = cpu_to_le32(inode->i_ino);
430	save_error_info(inode->i_sb, function, line);
431	va_start(args, fmt);
432	path = d_path(&(file->f_path), pathname, sizeof(pathname));
433	if (!path)
434		path = "(unknown)";
435	printk(KERN_CRIT
436	       "EXT4-fs error (device %s): %s:%d: inode #%lu "
437	       "(comm %s path %s): ",
438	       inode->i_sb->s_id, function, line, inode->i_ino,
439	       current->comm, path);
440	vprintk(fmt, args);
441	printk("\n");
442	va_end(args);
443
444	ext4_handle_error(inode->i_sb);
445}
446
447static const char *ext4_decode_error(struct super_block *sb, int errno,
448				     char nbuf[16])
449{
450	char *errstr = NULL;
451
452	switch (errno) {
453	case -EIO:
454		errstr = "IO failure";
455		break;
456	case -ENOMEM:
457		errstr = "Out of memory";
458		break;
459	case -EROFS:
460		if (!sb || (EXT4_SB(sb)->s_journal &&
461			    EXT4_SB(sb)->s_journal->j_flags & JBD2_ABORT))
462			errstr = "Journal has aborted";
463		else
464			errstr = "Readonly filesystem";
465		break;
466	default:
467		/* If the caller passed in an extra buffer for unknown
468		 * errors, textualise them now.  Else we just return
469		 * NULL. */
470		if (nbuf) {
471			/* Check for truncated error codes... */
472			if (snprintf(nbuf, 16, "error %d", -errno) >= 0)
473				errstr = nbuf;
474		}
475		break;
476	}
477
478	return errstr;
479}
480
481/* __ext4_std_error decodes expected errors from journaling functions
482 * automatically and invokes the appropriate error response.  */
483
484void __ext4_std_error(struct super_block *sb, const char *function,
485		      unsigned int line, int errno)
486{
487	char nbuf[16];
488	const char *errstr;
489
490	/* Special case: if the error is EROFS, and we're not already
491	 * inside a transaction, then there's really no point in logging
492	 * an error. */
493	if (errno == -EROFS && journal_current_handle() == NULL &&
494	    (sb->s_flags & MS_RDONLY))
495		return;
496
497	errstr = ext4_decode_error(sb, errno, nbuf);
498	printk(KERN_CRIT "EXT4-fs error (device %s) in %s:%d: %s\n",
499	       sb->s_id, function, line, errstr);
500	save_error_info(sb, function, line);
501
502	ext4_handle_error(sb);
503}
504
505/*
506 * ext4_abort is a much stronger failure handler than ext4_error.  The
507 * abort function may be used to deal with unrecoverable failures such
508 * as journal IO errors or ENOMEM at a critical moment in log management.
509 *
510 * We unconditionally force the filesystem into an ABORT|READONLY state,
511 * unless the error response on the fs has been set to panic in which
512 * case we take the easy way out and panic immediately.
513 */
514
515void __ext4_abort(struct super_block *sb, const char *function,
516		unsigned int line, const char *fmt, ...)
517{
518	va_list args;
519
520	save_error_info(sb, function, line);
521	va_start(args, fmt);
522	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: ", sb->s_id,
523	       function, line);
524	vprintk(fmt, args);
525	printk("\n");
526	va_end(args);
527
528	if ((sb->s_flags & MS_RDONLY) == 0) {
529		ext4_msg(sb, KERN_CRIT, "Remounting filesystem read-only");
530		sb->s_flags |= MS_RDONLY;
531		EXT4_SB(sb)->s_mount_flags |= EXT4_MF_FS_ABORTED;
532		if (EXT4_SB(sb)->s_journal)
533			jbd2_journal_abort(EXT4_SB(sb)->s_journal, -EIO);
534		save_error_info(sb, function, line);
535	}
536	if (test_opt(sb, ERRORS_PANIC))
537		panic("EXT4-fs panic from previous error\n");
538}
539
540void ext4_msg (struct super_block * sb, const char *prefix,
541		   const char *fmt, ...)
542{
543	va_list args;
544
545	va_start(args, fmt);
546	printk("%sEXT4-fs (%s): ", prefix, sb->s_id);
547	vprintk(fmt, args);
548	printk("\n");
549	va_end(args);
550}
551
552void __ext4_warning(struct super_block *sb, const char *function,
553		    unsigned int line, const char *fmt, ...)
554{
555	va_list args;
556
557	va_start(args, fmt);
558	printk(KERN_WARNING "EXT4-fs warning (device %s): %s:%d: ",
559	       sb->s_id, function, line);
560	vprintk(fmt, args);
561	printk("\n");
562	va_end(args);
563}
564
565void __ext4_grp_locked_error(const char *function, unsigned int line,
566			     struct super_block *sb, ext4_group_t grp,
567			     unsigned long ino, ext4_fsblk_t block,
568			     const char *fmt, ...)
569__releases(bitlock)
570__acquires(bitlock)
571{
572	va_list args;
573	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
574
575	es->s_last_error_ino = cpu_to_le32(ino);
576	es->s_last_error_block = cpu_to_le64(block);
577	__save_error_info(sb, function, line);
578	va_start(args, fmt);
579	printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: group %u",
580	       sb->s_id, function, line, grp);
581	if (ino)
582		printk("inode %lu: ", ino);
583	if (block)
584		printk("block %llu:", (unsigned long long) block);
585	vprintk(fmt, args);
586	printk("\n");
587	va_end(args);
588
589	if (test_opt(sb, ERRORS_CONT)) {
590		ext4_commit_super(sb, 0);
591		return;
592	}
593
594	ext4_unlock_group(sb, grp);
595	ext4_handle_error(sb);
596	/*
597	 * We only get here in the ERRORS_RO case; relocking the group
598	 * may be dangerous, but nothing bad will happen since the
599	 * filesystem will have already been marked read/only and the
600	 * journal has been aborted.  We return 1 as a hint to callers
601	 * who might what to use the return value from
602	 * ext4_grp_locked_error() to distinguish beween the
603	 * ERRORS_CONT and ERRORS_RO case, and perhaps return more
604	 * aggressively from the ext4 function in question, with a
605	 * more appropriate error code.
606	 */
607	ext4_lock_group(sb, grp);
608	return;
609}
610
611void ext4_update_dynamic_rev(struct super_block *sb)
612{
613	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
614
615	if (le32_to_cpu(es->s_rev_level) > EXT4_GOOD_OLD_REV)
616		return;
617
618	ext4_warning(sb,
619		     "updating to rev %d because of new feature flag, "
620		     "running e2fsck is recommended",
621		     EXT4_DYNAMIC_REV);
622
623	es->s_first_ino = cpu_to_le32(EXT4_GOOD_OLD_FIRST_INO);
624	es->s_inode_size = cpu_to_le16(EXT4_GOOD_OLD_INODE_SIZE);
625	es->s_rev_level = cpu_to_le32(EXT4_DYNAMIC_REV);
626	/* leave es->s_feature_*compat flags alone */
627	/* es->s_uuid will be set by e2fsck if empty */
628
629	/*
630	 * The rest of the superblock fields should be zero, and if not it
631	 * means they are likely already in use, so leave them alone.  We
632	 * can leave it up to e2fsck to clean up any inconsistencies there.
633	 */
634}
635
636/*
637 * Open the external journal device
638 */
639static struct block_device *ext4_blkdev_get(dev_t dev, struct super_block *sb)
640{
641	struct block_device *bdev;
642	char b[BDEVNAME_SIZE];
643
644	bdev = open_by_devnum(dev, FMODE_READ|FMODE_WRITE);
645	if (IS_ERR(bdev))
646		goto fail;
647	return bdev;
648
649fail:
650	ext4_msg(sb, KERN_ERR, "failed to open journal device %s: %ld",
651			__bdevname(dev, b), PTR_ERR(bdev));
652	return NULL;
653}
654
655/*
656 * Release the journal device
657 */
658static int ext4_blkdev_put(struct block_device *bdev)
659{
660	bd_release(bdev);
661	return blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
662}
663
664static int ext4_blkdev_remove(struct ext4_sb_info *sbi)
665{
666	struct block_device *bdev;
667	int ret = -ENODEV;
668
669	bdev = sbi->journal_bdev;
670	if (bdev) {
671		ret = ext4_blkdev_put(bdev);
672		sbi->journal_bdev = NULL;
673	}
674	return ret;
675}
676
677static inline struct inode *orphan_list_entry(struct list_head *l)
678{
679	return &list_entry(l, struct ext4_inode_info, i_orphan)->vfs_inode;
680}
681
682static void dump_orphan_list(struct super_block *sb, struct ext4_sb_info *sbi)
683{
684	struct list_head *l;
685
686	ext4_msg(sb, KERN_ERR, "sb orphan head is %d",
687		 le32_to_cpu(sbi->s_es->s_last_orphan));
688
689	printk(KERN_ERR "sb_info orphan list:\n");
690	list_for_each(l, &sbi->s_orphan) {
691		struct inode *inode = orphan_list_entry(l);
692		printk(KERN_ERR "  "
693		       "inode %s:%lu at %p: mode %o, nlink %d, next %d\n",
694		       inode->i_sb->s_id, inode->i_ino, inode,
695		       inode->i_mode, inode->i_nlink,
696		       NEXT_ORPHAN(inode));
697	}
698}
699
700static void ext4_put_super(struct super_block *sb)
701{
702	struct ext4_sb_info *sbi = EXT4_SB(sb);
703	struct ext4_super_block *es = sbi->s_es;
704	int i, err;
705
706	dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
707
708	flush_workqueue(sbi->dio_unwritten_wq);
709	destroy_workqueue(sbi->dio_unwritten_wq);
710
711	lock_super(sb);
712	lock_kernel();
713	if (sb->s_dirt)
714		ext4_commit_super(sb, 1);
715
716	if (sbi->s_journal) {
717		err = jbd2_journal_destroy(sbi->s_journal);
718		sbi->s_journal = NULL;
719		if (err < 0)
720			ext4_abort(sb, "Couldn't clean up the journal");
721	}
722
723	del_timer(&sbi->s_err_report);
724	ext4_release_system_zone(sb);
725	ext4_mb_release(sb);
726	ext4_ext_release(sb);
727	ext4_xattr_put_super(sb);
728
729	if (!(sb->s_flags & MS_RDONLY)) {
730		EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
731		es->s_state = cpu_to_le16(sbi->s_mount_state);
732		ext4_commit_super(sb, 1);
733	}
734	if (sbi->s_proc) {
735		remove_proc_entry(sb->s_id, ext4_proc_root);
736	}
737	kobject_del(&sbi->s_kobj);
738
739	for (i = 0; i < sbi->s_gdb_count; i++)
740		brelse(sbi->s_group_desc[i]);
741	kfree(sbi->s_group_desc);
742	if (is_vmalloc_addr(sbi->s_flex_groups))
743		vfree(sbi->s_flex_groups);
744	else
745		kfree(sbi->s_flex_groups);
746	percpu_counter_destroy(&sbi->s_freeblocks_counter);
747	percpu_counter_destroy(&sbi->s_freeinodes_counter);
748	percpu_counter_destroy(&sbi->s_dirs_counter);
749	percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
750	brelse(sbi->s_sbh);
751#ifdef CONFIG_QUOTA
752	for (i = 0; i < MAXQUOTAS; i++)
753		kfree(sbi->s_qf_names[i]);
754#endif
755
756	/* Debugging code just in case the in-memory inode orphan list
757	 * isn't empty.  The on-disk one can be non-empty if we've
758	 * detected an error and taken the fs readonly, but the
759	 * in-memory list had better be clean by this point. */
760	if (!list_empty(&sbi->s_orphan))
761		dump_orphan_list(sb, sbi);
762	J_ASSERT(list_empty(&sbi->s_orphan));
763
764	invalidate_bdev(sb->s_bdev);
765	if (sbi->journal_bdev && sbi->journal_bdev != sb->s_bdev) {
766		/*
767		 * Invalidate the journal device's buffers.  We don't want them
768		 * floating about in memory - the physical journal device may
769		 * hotswapped, and it breaks the `ro-after' testing code.
770		 */
771		sync_blockdev(sbi->journal_bdev);
772		invalidate_bdev(sbi->journal_bdev);
773		ext4_blkdev_remove(sbi);
774	}
775	sb->s_fs_info = NULL;
776	/*
777	 * Now that we are completely done shutting down the
778	 * superblock, we need to actually destroy the kobject.
779	 */
780	unlock_kernel();
781	unlock_super(sb);
782	kobject_put(&sbi->s_kobj);
783	wait_for_completion(&sbi->s_kobj_unregister);
784	kfree(sbi->s_blockgroup_lock);
785	kfree(sbi);
786}
787
788static struct kmem_cache *ext4_inode_cachep;
789
790/*
791 * Called inside transaction, so use GFP_NOFS
792 */
793static struct inode *ext4_alloc_inode(struct super_block *sb)
794{
795	struct ext4_inode_info *ei;
796
797	ei = kmem_cache_alloc(ext4_inode_cachep, GFP_NOFS);
798	if (!ei)
799		return NULL;
800
801	ei->vfs_inode.i_version = 1;
802	ei->vfs_inode.i_data.writeback_index = 0;
803	memset(&ei->i_cached_extent, 0, sizeof(struct ext4_ext_cache));
804	INIT_LIST_HEAD(&ei->i_prealloc_list);
805	spin_lock_init(&ei->i_prealloc_lock);
806	/*
807	 * Note:  We can be called before EXT4_SB(sb)->s_journal is set,
808	 * therefore it can be null here.  Don't check it, just initialize
809	 * jinode.
810	 */
811	jbd2_journal_init_jbd_inode(&ei->jinode, &ei->vfs_inode);
812	ei->i_reserved_data_blocks = 0;
813	ei->i_reserved_meta_blocks = 0;
814	ei->i_allocated_meta_blocks = 0;
815	ei->i_da_metadata_calc_len = 0;
816	ei->i_delalloc_reserved_flag = 0;
817	spin_lock_init(&(ei->i_block_reservation_lock));
818#ifdef CONFIG_QUOTA
819	ei->i_reserved_quota = 0;
820#endif
821	INIT_LIST_HEAD(&ei->i_completed_io_list);
822	spin_lock_init(&ei->i_completed_io_lock);
823	ei->cur_aio_dio = NULL;
824	ei->i_sync_tid = 0;
825	ei->i_datasync_tid = 0;
826
827	return &ei->vfs_inode;
828}
829
830static void ext4_destroy_inode(struct inode *inode)
831{
832	if (!list_empty(&(EXT4_I(inode)->i_orphan))) {
833		ext4_msg(inode->i_sb, KERN_ERR,
834			 "Inode %lu (%p): orphan list check failed!",
835			 inode->i_ino, EXT4_I(inode));
836		print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 16, 4,
837				EXT4_I(inode), sizeof(struct ext4_inode_info),
838				true);
839		dump_stack();
840	}
841	kmem_cache_free(ext4_inode_cachep, EXT4_I(inode));
842}
843
844static void init_once(void *foo)
845{
846	struct ext4_inode_info *ei = (struct ext4_inode_info *) foo;
847
848	INIT_LIST_HEAD(&ei->i_orphan);
849#ifdef CONFIG_EXT4_FS_XATTR
850	init_rwsem(&ei->xattr_sem);
851#endif
852	init_rwsem(&ei->i_data_sem);
853	inode_init_once(&ei->vfs_inode);
854}
855
856static int init_inodecache(void)
857{
858	ext4_inode_cachep = kmem_cache_create("ext4_inode_cache",
859					     sizeof(struct ext4_inode_info),
860					     0, (SLAB_RECLAIM_ACCOUNT|
861						SLAB_MEM_SPREAD),
862					     init_once);
863	if (ext4_inode_cachep == NULL)
864		return -ENOMEM;
865	return 0;
866}
867
868static void destroy_inodecache(void)
869{
870	kmem_cache_destroy(ext4_inode_cachep);
871}
872
873void ext4_clear_inode(struct inode *inode)
874{
875	invalidate_inode_buffers(inode);
876	end_writeback(inode);
877	dquot_drop(inode);
878	ext4_discard_preallocations(inode);
879	if (EXT4_JOURNAL(inode))
880		jbd2_journal_release_jbd_inode(EXT4_SB(inode->i_sb)->s_journal,
881				       &EXT4_I(inode)->jinode);
882}
883
884static inline void ext4_show_quota_options(struct seq_file *seq,
885					   struct super_block *sb)
886{
887#if defined(CONFIG_QUOTA)
888	struct ext4_sb_info *sbi = EXT4_SB(sb);
889
890	if (sbi->s_jquota_fmt) {
891		char *fmtname = "";
892
893		switch (sbi->s_jquota_fmt) {
894		case QFMT_VFS_OLD:
895			fmtname = "vfsold";
896			break;
897		case QFMT_VFS_V0:
898			fmtname = "vfsv0";
899			break;
900		case QFMT_VFS_V1:
901			fmtname = "vfsv1";
902			break;
903		}
904		seq_printf(seq, ",jqfmt=%s", fmtname);
905	}
906
907	if (sbi->s_qf_names[USRQUOTA])
908		seq_printf(seq, ",usrjquota=%s", sbi->s_qf_names[USRQUOTA]);
909
910	if (sbi->s_qf_names[GRPQUOTA])
911		seq_printf(seq, ",grpjquota=%s", sbi->s_qf_names[GRPQUOTA]);
912
913	if (test_opt(sb, USRQUOTA))
914		seq_puts(seq, ",usrquota");
915
916	if (test_opt(sb, GRPQUOTA))
917		seq_puts(seq, ",grpquota");
918#endif
919}
920
921/*
922 * Show an option if
923 *  - it's set to a non-default value OR
924 *  - if the per-sb default is different from the global default
925 */
926static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
927{
928	int def_errors;
929	unsigned long def_mount_opts;
930	struct super_block *sb = vfs->mnt_sb;
931	struct ext4_sb_info *sbi = EXT4_SB(sb);
932	struct ext4_super_block *es = sbi->s_es;
933
934	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
935	def_errors     = le16_to_cpu(es->s_errors);
936
937	if (sbi->s_sb_block != 1)
938		seq_printf(seq, ",sb=%llu", sbi->s_sb_block);
939	if (test_opt(sb, MINIX_DF))
940		seq_puts(seq, ",minixdf");
941	if (test_opt(sb, GRPID) && !(def_mount_opts & EXT4_DEFM_BSDGROUPS))
942		seq_puts(seq, ",grpid");
943	if (!test_opt(sb, GRPID) && (def_mount_opts & EXT4_DEFM_BSDGROUPS))
944		seq_puts(seq, ",nogrpid");
945	if (sbi->s_resuid != EXT4_DEF_RESUID ||
946	    le16_to_cpu(es->s_def_resuid) != EXT4_DEF_RESUID) {
947		seq_printf(seq, ",resuid=%u", sbi->s_resuid);
948	}
949	if (sbi->s_resgid != EXT4_DEF_RESGID ||
950	    le16_to_cpu(es->s_def_resgid) != EXT4_DEF_RESGID) {
951		seq_printf(seq, ",resgid=%u", sbi->s_resgid);
952	}
953	if (test_opt(sb, ERRORS_RO)) {
954		if (def_errors == EXT4_ERRORS_PANIC ||
955		    def_errors == EXT4_ERRORS_CONTINUE) {
956			seq_puts(seq, ",errors=remount-ro");
957		}
958	}
959	if (test_opt(sb, ERRORS_CONT) && def_errors != EXT4_ERRORS_CONTINUE)
960		seq_puts(seq, ",errors=continue");
961	if (test_opt(sb, ERRORS_PANIC) && def_errors != EXT4_ERRORS_PANIC)
962		seq_puts(seq, ",errors=panic");
963	if (test_opt(sb, NO_UID32) && !(def_mount_opts & EXT4_DEFM_UID16))
964		seq_puts(seq, ",nouid32");
965	if (test_opt(sb, DEBUG) && !(def_mount_opts & EXT4_DEFM_DEBUG))
966		seq_puts(seq, ",debug");
967	if (test_opt(sb, OLDALLOC))
968		seq_puts(seq, ",oldalloc");
969#ifdef CONFIG_EXT4_FS_XATTR
970	if (test_opt(sb, XATTR_USER) &&
971		!(def_mount_opts & EXT4_DEFM_XATTR_USER))
972		seq_puts(seq, ",user_xattr");
973	if (!test_opt(sb, XATTR_USER) &&
974	    (def_mount_opts & EXT4_DEFM_XATTR_USER)) {
975		seq_puts(seq, ",nouser_xattr");
976	}
977#endif
978#ifdef CONFIG_EXT4_FS_POSIX_ACL
979	if (test_opt(sb, POSIX_ACL) && !(def_mount_opts & EXT4_DEFM_ACL))
980		seq_puts(seq, ",acl");
981	if (!test_opt(sb, POSIX_ACL) && (def_mount_opts & EXT4_DEFM_ACL))
982		seq_puts(seq, ",noacl");
983#endif
984	if (sbi->s_commit_interval != JBD2_DEFAULT_MAX_COMMIT_AGE*HZ) {
985		seq_printf(seq, ",commit=%u",
986			   (unsigned) (sbi->s_commit_interval / HZ));
987	}
988	if (sbi->s_min_batch_time != EXT4_DEF_MIN_BATCH_TIME) {
989		seq_printf(seq, ",min_batch_time=%u",
990			   (unsigned) sbi->s_min_batch_time);
991	}
992	if (sbi->s_max_batch_time != EXT4_DEF_MAX_BATCH_TIME) {
993		seq_printf(seq, ",max_batch_time=%u",
994			   (unsigned) sbi->s_min_batch_time);
995	}
996
997	/*
998	 * We're changing the default of barrier mount option, so
999	 * let's always display its mount state so it's clear what its
1000	 * status is.
1001	 */
1002	seq_puts(seq, ",barrier=");
1003	seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0");
1004	if (test_opt(sb, JOURNAL_ASYNC_COMMIT))
1005		seq_puts(seq, ",journal_async_commit");
1006	else if (test_opt(sb, JOURNAL_CHECKSUM))
1007		seq_puts(seq, ",journal_checksum");
1008	if (test_opt(sb, I_VERSION))
1009		seq_puts(seq, ",i_version");
1010	if (!test_opt(sb, DELALLOC) &&
1011	    !(def_mount_opts & EXT4_DEFM_NODELALLOC))
1012		seq_puts(seq, ",nodelalloc");
1013
1014	if (sbi->s_stripe)
1015		seq_printf(seq, ",stripe=%lu", sbi->s_stripe);
1016	/*
1017	 * journal mode get enabled in different ways
1018	 * So just print the value even if we didn't specify it
1019	 */
1020	if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
1021		seq_puts(seq, ",data=journal");
1022	else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
1023		seq_puts(seq, ",data=ordered");
1024	else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_WRITEBACK_DATA)
1025		seq_puts(seq, ",data=writeback");
1026
1027	if (sbi->s_inode_readahead_blks != EXT4_DEF_INODE_READAHEAD_BLKS)
1028		seq_printf(seq, ",inode_readahead_blks=%u",
1029			   sbi->s_inode_readahead_blks);
1030
1031	if (test_opt(sb, DATA_ERR_ABORT))
1032		seq_puts(seq, ",data_err=abort");
1033
1034	if (test_opt(sb, NO_AUTO_DA_ALLOC))
1035		seq_puts(seq, ",noauto_da_alloc");
1036
1037	if (test_opt(sb, DISCARD) && !(def_mount_opts & EXT4_DEFM_DISCARD))
1038		seq_puts(seq, ",discard");
1039
1040	if (test_opt(sb, NOLOAD))
1041		seq_puts(seq, ",norecovery");
1042
1043	if (test_opt(sb, DIOREAD_NOLOCK))
1044		seq_puts(seq, ",dioread_nolock");
1045
1046	if (test_opt(sb, BLOCK_VALIDITY) &&
1047	    !(def_mount_opts & EXT4_DEFM_BLOCK_VALIDITY))
1048		seq_puts(seq, ",block_validity");
1049
1050	ext4_show_quota_options(seq, sb);
1051
1052	return 0;
1053}
1054
1055static struct inode *ext4_nfs_get_inode(struct super_block *sb,
1056					u64 ino, u32 generation)
1057{
1058	struct inode *inode;
1059
1060	if (ino < EXT4_FIRST_INO(sb) && ino != EXT4_ROOT_INO)
1061		return ERR_PTR(-ESTALE);
1062	if (ino > le32_to_cpu(EXT4_SB(sb)->s_es->s_inodes_count))
1063		return ERR_PTR(-ESTALE);
1064
1065	/* iget isn't really right if the inode is currently unallocated!!
1066	 *
1067	 * ext4_read_inode will return a bad_inode if the inode had been
1068	 * deleted, so we should be safe.
1069	 *
1070	 * Currently we don't know the generation for parent directory, so
1071	 * a generation of 0 means "accept any"
1072	 */
1073	inode = ext4_iget(sb, ino);
1074	if (IS_ERR(inode))
1075		return ERR_CAST(inode);
1076	if (generation && inode->i_generation != generation) {
1077		iput(inode);
1078		return ERR_PTR(-ESTALE);
1079	}
1080
1081	return inode;
1082}
1083
1084static struct dentry *ext4_fh_to_dentry(struct super_block *sb, struct fid *fid,
1085					int fh_len, int fh_type)
1086{
1087	return generic_fh_to_dentry(sb, fid, fh_len, fh_type,
1088				    ext4_nfs_get_inode);
1089}
1090
1091static struct dentry *ext4_fh_to_parent(struct super_block *sb, struct fid *fid,
1092					int fh_len, int fh_type)
1093{
1094	return generic_fh_to_parent(sb, fid, fh_len, fh_type,
1095				    ext4_nfs_get_inode);
1096}
1097
1098/*
1099 * Try to release metadata pages (indirect blocks, directories) which are
1100 * mapped via the block device.  Since these pages could have journal heads
1101 * which would prevent try_to_free_buffers() from freeing them, we must use
1102 * jbd2 layer's try_to_free_buffers() function to release them.
1103 */
1104static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
1105				 gfp_t wait)
1106{
1107	journal_t *journal = EXT4_SB(sb)->s_journal;
1108
1109	WARN_ON(PageChecked(page));
1110	if (!page_has_buffers(page))
1111		return 0;
1112	if (journal)
1113		return jbd2_journal_try_to_free_buffers(journal, page,
1114							wait & ~__GFP_WAIT);
1115	return try_to_free_buffers(page);
1116}
1117
1118#ifdef CONFIG_QUOTA
1119#define QTYPE2NAME(t) ((t) == USRQUOTA ? "user" : "group")
1120#define QTYPE2MOPT(on, t) ((t) == USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
1121
1122static int ext4_write_dquot(struct dquot *dquot);
1123static int ext4_acquire_dquot(struct dquot *dquot);
1124static int ext4_release_dquot(struct dquot *dquot);
1125static int ext4_mark_dquot_dirty(struct dquot *dquot);
1126static int ext4_write_info(struct super_block *sb, int type);
1127static int ext4_quota_on(struct super_block *sb, int type, int format_id,
1128				char *path);
1129static int ext4_quota_off(struct super_block *sb, int type);
1130static int ext4_quota_on_mount(struct super_block *sb, int type);
1131static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
1132			       size_t len, loff_t off);
1133static ssize_t ext4_quota_write(struct super_block *sb, int type,
1134				const char *data, size_t len, loff_t off);
1135
1136static const struct dquot_operations ext4_quota_operations = {
1137#ifdef CONFIG_QUOTA
1138	.get_reserved_space = ext4_get_reserved_space,
1139#endif
1140	.write_dquot	= ext4_write_dquot,
1141	.acquire_dquot	= ext4_acquire_dquot,
1142	.release_dquot	= ext4_release_dquot,
1143	.mark_dirty	= ext4_mark_dquot_dirty,
1144	.write_info	= ext4_write_info,
1145	.alloc_dquot	= dquot_alloc,
1146	.destroy_dquot	= dquot_destroy,
1147};
1148
1149static const struct quotactl_ops ext4_qctl_operations = {
1150	.quota_on	= ext4_quota_on,
1151	.quota_off	= ext4_quota_off,
1152	.quota_sync	= dquot_quota_sync,
1153	.get_info	= dquot_get_dqinfo,
1154	.set_info	= dquot_set_dqinfo,
1155	.get_dqblk	= dquot_get_dqblk,
1156	.set_dqblk	= dquot_set_dqblk
1157};
1158#endif
1159
1160static const struct super_operations ext4_sops = {
1161	.alloc_inode	= ext4_alloc_inode,
1162	.destroy_inode	= ext4_destroy_inode,
1163	.write_inode	= ext4_write_inode,
1164	.dirty_inode	= ext4_dirty_inode,
1165	.evict_inode	= ext4_evict_inode,
1166	.put_super	= ext4_put_super,
1167	.sync_fs	= ext4_sync_fs,
1168	.freeze_fs	= ext4_freeze,
1169	.unfreeze_fs	= ext4_unfreeze,
1170	.statfs		= ext4_statfs,
1171	.remount_fs	= ext4_remount,
1172	.show_options	= ext4_show_options,
1173#ifdef CONFIG_QUOTA
1174	.quota_read	= ext4_quota_read,
1175	.quota_write	= ext4_quota_write,
1176#endif
1177	.bdev_try_to_free_page = bdev_try_to_free_page,
1178};
1179
1180static const struct super_operations ext4_nojournal_sops = {
1181	.alloc_inode	= ext4_alloc_inode,
1182	.destroy_inode	= ext4_destroy_inode,
1183	.write_inode	= ext4_write_inode,
1184	.dirty_inode	= ext4_dirty_inode,
1185	.evict_inode	= ext4_evict_inode,
1186	.write_super	= ext4_write_super,
1187	.put_super	= ext4_put_super,
1188	.statfs		= ext4_statfs,
1189	.remount_fs	= ext4_remount,
1190	.show_options	= ext4_show_options,
1191#ifdef CONFIG_QUOTA
1192	.quota_read	= ext4_quota_read,
1193	.quota_write	= ext4_quota_write,
1194#endif
1195	.bdev_try_to_free_page = bdev_try_to_free_page,
1196};
1197
1198static const struct export_operations ext4_export_ops = {
1199	.fh_to_dentry = ext4_fh_to_dentry,
1200	.fh_to_parent = ext4_fh_to_parent,
1201	.get_parent = ext4_get_parent,
1202};
1203
1204enum {
1205	Opt_bsd_df, Opt_minix_df, Opt_grpid, Opt_nogrpid,
1206	Opt_resgid, Opt_resuid, Opt_sb, Opt_err_cont, Opt_err_panic, Opt_err_ro,
1207	Opt_nouid32, Opt_debug, Opt_oldalloc, Opt_orlov,
1208	Opt_user_xattr, Opt_nouser_xattr, Opt_acl, Opt_noacl,
1209	Opt_auto_da_alloc, Opt_noauto_da_alloc, Opt_noload, Opt_nobh, Opt_bh,
1210	Opt_commit, Opt_min_batch_time, Opt_max_batch_time,
1211	Opt_journal_update, Opt_journal_dev,
1212	Opt_journal_checksum, Opt_journal_async_commit,
1213	Opt_abort, Opt_data_journal, Opt_data_ordered, Opt_data_writeback,
1214	Opt_data_err_abort, Opt_data_err_ignore,
1215	Opt_usrjquota, Opt_grpjquota, Opt_offusrjquota, Opt_offgrpjquota,
1216	Opt_jqfmt_vfsold, Opt_jqfmt_vfsv0, Opt_jqfmt_vfsv1, Opt_quota,
1217	Opt_noquota, Opt_ignore, Opt_barrier, Opt_nobarrier, Opt_err,
1218	Opt_resize, Opt_usrquota, Opt_grpquota, Opt_i_version,
1219	Opt_stripe, Opt_delalloc, Opt_nodelalloc,
1220	Opt_block_validity, Opt_noblock_validity,
1221	Opt_inode_readahead_blks, Opt_journal_ioprio,
1222	Opt_dioread_nolock, Opt_dioread_lock,
1223	Opt_discard, Opt_nodiscard,
1224};
1225
1226static const match_table_t tokens = {
1227	{Opt_bsd_df, "bsddf"},
1228	{Opt_minix_df, "minixdf"},
1229	{Opt_grpid, "grpid"},
1230	{Opt_grpid, "bsdgroups"},
1231	{Opt_nogrpid, "nogrpid"},
1232	{Opt_nogrpid, "sysvgroups"},
1233	{Opt_resgid, "resgid=%u"},
1234	{Opt_resuid, "resuid=%u"},
1235	{Opt_sb, "sb=%u"},
1236	{Opt_err_cont, "errors=continue"},
1237	{Opt_err_panic, "errors=panic"},
1238	{Opt_err_ro, "errors=remount-ro"},
1239	{Opt_nouid32, "nouid32"},
1240	{Opt_debug, "debug"},
1241	{Opt_oldalloc, "oldalloc"},
1242	{Opt_orlov, "orlov"},
1243	{Opt_user_xattr, "user_xattr"},
1244	{Opt_nouser_xattr, "nouser_xattr"},
1245	{Opt_acl, "acl"},
1246	{Opt_noacl, "noacl"},
1247	{Opt_noload, "noload"},
1248	{Opt_noload, "norecovery"},
1249	{Opt_nobh, "nobh"},
1250	{Opt_bh, "bh"},
1251	{Opt_commit, "commit=%u"},
1252	{Opt_min_batch_time, "min_batch_time=%u"},
1253	{Opt_max_batch_time, "max_batch_time=%u"},
1254	{Opt_journal_update, "journal=update"},
1255	{Opt_journal_dev, "journal_dev=%u"},
1256	{Opt_journal_checksum, "journal_checksum"},
1257	{Opt_journal_async_commit, "journal_async_commit"},
1258	{Opt_abort, "abort"},
1259	{Opt_data_journal, "data=journal"},
1260	{Opt_data_ordered, "data=ordered"},
1261	{Opt_data_writeback, "data=writeback"},
1262	{Opt_data_err_abort, "data_err=abort"},
1263	{Opt_data_err_ignore, "data_err=ignore"},
1264	{Opt_offusrjquota, "usrjquota="},
1265	{Opt_usrjquota, "usrjquota=%s"},
1266	{Opt_offgrpjquota, "grpjquota="},
1267	{Opt_grpjquota, "grpjquota=%s"},
1268	{Opt_jqfmt_vfsold, "jqfmt=vfsold"},
1269	{Opt_jqfmt_vfsv0, "jqfmt=vfsv0"},
1270	{Opt_jqfmt_vfsv1, "jqfmt=vfsv1"},
1271	{Opt_grpquota, "grpquota"},
1272	{Opt_noquota, "noquota"},
1273	{Opt_quota, "quota"},
1274	{Opt_usrquota, "usrquota"},
1275	{Opt_barrier, "barrier=%u"},
1276	{Opt_barrier, "barrier"},
1277	{Opt_nobarrier, "nobarrier"},
1278	{Opt_i_version, "i_version"},
1279	{Opt_stripe, "stripe=%u"},
1280	{Opt_resize, "resize"},
1281	{Opt_delalloc, "delalloc"},
1282	{Opt_nodelalloc, "nodelalloc"},
1283	{Opt_block_validity, "block_validity"},
1284	{Opt_noblock_validity, "noblock_validity"},
1285	{Opt_inode_readahead_blks, "inode_readahead_blks=%u"},
1286	{Opt_journal_ioprio, "journal_ioprio=%u"},
1287	{Opt_auto_da_alloc, "auto_da_alloc=%u"},
1288	{Opt_auto_da_alloc, "auto_da_alloc"},
1289	{Opt_noauto_da_alloc, "noauto_da_alloc"},
1290	{Opt_dioread_nolock, "dioread_nolock"},
1291	{Opt_dioread_lock, "dioread_lock"},
1292	{Opt_discard, "discard"},
1293	{Opt_nodiscard, "nodiscard"},
1294	{Opt_err, NULL},
1295};
1296
1297static ext4_fsblk_t get_sb_block(void **data)
1298{
1299	ext4_fsblk_t	sb_block;
1300	char		*options = (char *) *data;
1301
1302	if (!options || strncmp(options, "sb=", 3) != 0)
1303		return 1;	/* Default location */
1304
1305	options += 3;
1306	/* TODO: use simple_strtoll with >32bit ext4 */
1307	sb_block = simple_strtoul(options, &options, 0);
1308	if (*options && *options != ',') {
1309		printk(KERN_ERR "EXT4-fs: Invalid sb specification: %s\n",
1310		       (char *) *data);
1311		return 1;
1312	}
1313	if (*options == ',')
1314		options++;
1315	*data = (void *) options;
1316
1317	return sb_block;
1318}
1319
1320#define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
1321static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
1322	"Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
1323
1324#ifdef CONFIG_QUOTA
1325static int set_qf_name(struct super_block *sb, int qtype, substring_t *args)
1326{
1327	struct ext4_sb_info *sbi = EXT4_SB(sb);
1328	char *qname;
1329
1330	if (sb_any_quota_loaded(sb) &&
1331		!sbi->s_qf_names[qtype]) {
1332		ext4_msg(sb, KERN_ERR,
1333			"Cannot change journaled "
1334			"quota options when quota turned on");
1335		return 0;
1336	}
1337	qname = match_strdup(args);
1338	if (!qname) {
1339		ext4_msg(sb, KERN_ERR,
1340			"Not enough memory for storing quotafile name");
1341		return 0;
1342	}
1343	if (sbi->s_qf_names[qtype] &&
1344		strcmp(sbi->s_qf_names[qtype], qname)) {
1345		ext4_msg(sb, KERN_ERR,
1346			"%s quota file already specified", QTYPE2NAME(qtype));
1347		kfree(qname);
1348		return 0;
1349	}
1350	sbi->s_qf_names[qtype] = qname;
1351	if (strchr(sbi->s_qf_names[qtype], '/')) {
1352		ext4_msg(sb, KERN_ERR,
1353			"quotafile must be on filesystem root");
1354		kfree(sbi->s_qf_names[qtype]);
1355		sbi->s_qf_names[qtype] = NULL;
1356		return 0;
1357	}
1358	set_opt(sbi->s_mount_opt, QUOTA);
1359	return 1;
1360}
1361
1362static int clear_qf_name(struct super_block *sb, int qtype)
1363{
1364
1365	struct ext4_sb_info *sbi = EXT4_SB(sb);
1366
1367	if (sb_any_quota_loaded(sb) &&
1368		sbi->s_qf_names[qtype]) {
1369		ext4_msg(sb, KERN_ERR, "Cannot change journaled quota options"
1370			" when quota turned on");
1371		return 0;
1372	}
1373	/*
1374	 * The space will be released later when all options are confirmed
1375	 * to be correct
1376	 */
1377	sbi->s_qf_names[qtype] = NULL;
1378	return 1;
1379}
1380#endif
1381
1382static int parse_options(char *options, struct super_block *sb,
1383			 unsigned long *journal_devnum,
1384			 unsigned int *journal_ioprio,
1385			 ext4_fsblk_t *n_blocks_count, int is_remount)
1386{
1387	struct ext4_sb_info *sbi = EXT4_SB(sb);
1388	char *p;
1389	substring_t args[MAX_OPT_ARGS];
1390	int data_opt = 0;
1391	int option;
1392#ifdef CONFIG_QUOTA
1393	int qfmt;
1394#endif
1395
1396	if (!options)
1397		return 1;
1398
1399	while ((p = strsep(&options, ",")) != NULL) {
1400		int token;
1401		if (!*p)
1402			continue;
1403
1404		/*
1405		 * Initialize args struct so we know whether arg was
1406		 * found; some options take optional arguments.
1407		 */
1408		args[0].to = args[0].from = 0;
1409		token = match_token(p, tokens, args);
1410		switch (token) {
1411		case Opt_bsd_df:
1412			ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
1413			clear_opt(sbi->s_mount_opt, MINIX_DF);
1414			break;
1415		case Opt_minix_df:
1416			ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
1417			set_opt(sbi->s_mount_opt, MINIX_DF);
1418
1419			break;
1420		case Opt_grpid:
1421			ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
1422			set_opt(sbi->s_mount_opt, GRPID);
1423
1424			break;
1425		case Opt_nogrpid:
1426			ext4_msg(sb, KERN_WARNING, deprecated_msg, p, "2.6.38");
1427			clear_opt(sbi->s_mount_opt, GRPID);
1428
1429			break;
1430		case Opt_resuid:
1431			if (match_int(&args[0], &option))
1432				return 0;
1433			sbi->s_resuid = option;
1434			break;
1435		case Opt_resgid:
1436			if (match_int(&args[0], &option))
1437				return 0;
1438			sbi->s_resgid = option;
1439			break;
1440		case Opt_sb:
1441			/* handled by get_sb_block() instead of here */
1442			/* *sb_block = match_int(&args[0]); */
1443			break;
1444		case Opt_err_panic:
1445			clear_opt(sbi->s_mount_opt, ERRORS_CONT);
1446			clear_opt(sbi->s_mount_opt, ERRORS_RO);
1447			set_opt(sbi->s_mount_opt, ERRORS_PANIC);
1448			break;
1449		case Opt_err_ro:
1450			clear_opt(sbi->s_mount_opt, ERRORS_CONT);
1451			clear_opt(sbi->s_mount_opt, ERRORS_PANIC);
1452			set_opt(sbi->s_mount_opt, ERRORS_RO);
1453			break;
1454		case Opt_err_cont:
1455			clear_opt(sbi->s_mount_opt, ERRORS_RO);
1456			clear_opt(sbi->s_mount_opt, ERRORS_PANIC);
1457			set_opt(sbi->s_mount_opt, ERRORS_CONT);
1458			break;
1459		case Opt_nouid32:
1460			set_opt(sbi->s_mount_opt, NO_UID32);
1461			break;
1462		case Opt_debug:
1463			set_opt(sbi->s_mount_opt, DEBUG);
1464			break;
1465		case Opt_oldalloc:
1466			set_opt(sbi->s_mount_opt, OLDALLOC);
1467			break;
1468		case Opt_orlov:
1469			clear_opt(sbi->s_mount_opt, OLDALLOC);
1470			break;
1471#ifdef CONFIG_EXT4_FS_XATTR
1472		case Opt_user_xattr:
1473			set_opt(sbi->s_mount_opt, XATTR_USER);
1474			break;
1475		case Opt_nouser_xattr:
1476			clear_opt(sbi->s_mount_opt, XATTR_USER);
1477			break;
1478#else
1479		case Opt_user_xattr:
1480		case Opt_nouser_xattr:
1481			ext4_msg(sb, KERN_ERR, "(no)user_xattr options not supported");
1482			break;
1483#endif
1484#ifdef CONFIG_EXT4_FS_POSIX_ACL
1485		case Opt_acl:
1486			set_opt(sbi->s_mount_opt, POSIX_ACL);
1487			break;
1488		case Opt_noacl:
1489			clear_opt(sbi->s_mount_opt, POSIX_ACL);
1490			break;
1491#else
1492		case Opt_acl:
1493		case Opt_noacl:
1494			ext4_msg(sb, KERN_ERR, "(no)acl options not supported");
1495			break;
1496#endif
1497		case Opt_journal_update:
1498			/* Eventually we will want to be able to create
1499			   a journal file here.  For now, only allow the
1500			   user to specify an existing inode to be the
1501			   journal file. */
1502			if (is_remount) {
1503				ext4_msg(sb, KERN_ERR,
1504					 "Cannot specify journal on remount");
1505				return 0;
1506			}
1507			set_opt(sbi->s_mount_opt, UPDATE_JOURNAL);
1508			break;
1509		case Opt_journal_dev:
1510			if (is_remount) {
1511				ext4_msg(sb, KERN_ERR,
1512					"Cannot specify journal on remount");
1513				return 0;
1514			}
1515			if (match_int(&args[0], &option))
1516				return 0;
1517			*journal_devnum = option;
1518			break;
1519		case Opt_journal_checksum:
1520			set_opt(sbi->s_mount_opt, JOURNAL_CHECKSUM);
1521			break;
1522		case Opt_journal_async_commit:
1523			set_opt(sbi->s_mount_opt, JOURNAL_ASYNC_COMMIT);
1524			set_opt(sbi->s_mount_opt, JOURNAL_CHECKSUM);
1525			break;
1526		case Opt_noload:
1527			set_opt(sbi->s_mount_opt, NOLOAD);
1528			break;
1529		case Opt_commit:
1530			if (match_int(&args[0], &option))
1531				return 0;
1532			if (option < 0)
1533				return 0;
1534			if (option == 0)
1535				option = JBD2_DEFAULT_MAX_COMMIT_AGE;
1536			sbi->s_commit_interval = HZ * option;
1537			break;
1538		case Opt_max_batch_time:
1539			if (match_int(&args[0], &option))
1540				return 0;
1541			if (option < 0)
1542				return 0;
1543			if (option == 0)
1544				option = EXT4_DEF_MAX_BATCH_TIME;
1545			sbi->s_max_batch_time = option;
1546			break;
1547		case Opt_min_batch_time:
1548			if (match_int(&args[0], &option))
1549				return 0;
1550			if (option < 0)
1551				return 0;
1552			sbi->s_min_batch_time = option;
1553			break;
1554		case Opt_data_journal:
1555			data_opt = EXT4_MOUNT_JOURNAL_DATA;
1556			goto datacheck;
1557		case Opt_data_ordered:
1558			data_opt = EXT4_MOUNT_ORDERED_DATA;
1559			goto datacheck;
1560		case Opt_data_writeback:
1561			data_opt = EXT4_MOUNT_WRITEBACK_DATA;
1562		datacheck:
1563			if (is_remount) {
1564				if (test_opt(sb, DATA_FLAGS) != data_opt) {
1565					ext4_msg(sb, KERN_ERR,
1566						"Cannot change data mode on remount");
1567					return 0;
1568				}
1569			} else {
1570				clear_opt(sbi->s_mount_opt, DATA_FLAGS);
1571				sbi->s_mount_opt |= data_opt;
1572			}
1573			break;
1574		case Opt_data_err_abort:
1575			set_opt(sbi->s_mount_opt, DATA_ERR_ABORT);
1576			break;
1577		case Opt_data_err_ignore:
1578			clear_opt(sbi->s_mount_opt, DATA_ERR_ABORT);
1579			break;
1580#ifdef CONFIG_QUOTA
1581		case Opt_usrjquota:
1582			if (!set_qf_name(sb, USRQUOTA, &args[0]))
1583				return 0;
1584			break;
1585		case Opt_grpjquota:
1586			if (!set_qf_name(sb, GRPQUOTA, &args[0]))
1587				return 0;
1588			break;
1589		case Opt_offusrjquota:
1590			if (!clear_qf_name(sb, USRQUOTA))
1591				return 0;
1592			break;
1593		case Opt_offgrpjquota:
1594			if (!clear_qf_name(sb, GRPQUOTA))
1595				return 0;
1596			break;
1597
1598		case Opt_jqfmt_vfsold:
1599			qfmt = QFMT_VFS_OLD;
1600			goto set_qf_format;
1601		case Opt_jqfmt_vfsv0:
1602			qfmt = QFMT_VFS_V0;
1603			goto set_qf_format;
1604		case Opt_jqfmt_vfsv1:
1605			qfmt = QFMT_VFS_V1;
1606set_qf_format:
1607			if (sb_any_quota_loaded(sb) &&
1608			    sbi->s_jquota_fmt != qfmt) {
1609				ext4_msg(sb, KERN_ERR, "Cannot change "
1610					"journaled quota options when "
1611					"quota turned on");
1612				return 0;
1613			}
1614			sbi->s_jquota_fmt = qfmt;
1615			break;
1616		case Opt_quota:
1617		case Opt_usrquota:
1618			set_opt(sbi->s_mount_opt, QUOTA);
1619			set_opt(sbi->s_mount_opt, USRQUOTA);
1620			break;
1621		case Opt_grpquota:
1622			set_opt(sbi->s_mount_opt, QUOTA);
1623			set_opt(sbi->s_mount_opt, GRPQUOTA);
1624			break;
1625		case Opt_noquota:
1626			if (sb_any_quota_loaded(sb)) {
1627				ext4_msg(sb, KERN_ERR, "Cannot change quota "
1628					"options when quota turned on");
1629				return 0;
1630			}
1631			clear_opt(sbi->s_mount_opt, QUOTA);
1632			clear_opt(sbi->s_mount_opt, USRQUOTA);
1633			clear_opt(sbi->s_mount_opt, GRPQUOTA);
1634			break;
1635#else
1636		case Opt_quota:
1637		case Opt_usrquota:
1638		case Opt_grpquota:
1639			ext4_msg(sb, KERN_ERR,
1640				"quota options not supported");
1641			break;
1642		case Opt_usrjquota:
1643		case Opt_grpjquota:
1644		case Opt_offusrjquota:
1645		case Opt_offgrpjquota:
1646		case Opt_jqfmt_vfsold:
1647		case Opt_jqfmt_vfsv0:
1648		case Opt_jqfmt_vfsv1:
1649			ext4_msg(sb, KERN_ERR,
1650				"journaled quota options not supported");
1651			break;
1652		case Opt_noquota:
1653			break;
1654#endif
1655		case Opt_abort:
1656			sbi->s_mount_flags |= EXT4_MF_FS_ABORTED;
1657			break;
1658		case Opt_nobarrier:
1659			clear_opt(sbi->s_mount_opt, BARRIER);
1660			break;
1661		case Opt_barrier:
1662			if (args[0].from) {
1663				if (match_int(&args[0], &option))
1664					return 0;
1665			} else
1666				option = 1;	/* No argument, default to 1 */
1667			if (option)
1668				set_opt(sbi->s_mount_opt, BARRIER);
1669			else
1670				clear_opt(sbi->s_mount_opt, BARRIER);
1671			break;
1672		case Opt_ignore:
1673			break;
1674		case Opt_resize:
1675			if (!is_remount) {
1676				ext4_msg(sb, KERN_ERR,
1677					"resize option only available "
1678					"for remount");
1679				return 0;
1680			}
1681			if (match_int(&args[0], &option) != 0)
1682				return 0;
1683			*n_blocks_count = option;
1684			break;
1685		case Opt_nobh:
1686			ext4_msg(sb, KERN_WARNING,
1687				 "Ignoring deprecated nobh option");
1688			break;
1689		case Opt_bh:
1690			ext4_msg(sb, KERN_WARNING,
1691				 "Ignoring deprecated bh option");
1692			break;
1693		case Opt_i_version:
1694			set_opt(sbi->s_mount_opt, I_VERSION);
1695			sb->s_flags |= MS_I_VERSION;
1696			break;
1697		case Opt_nodelalloc:
1698			clear_opt(sbi->s_mount_opt, DELALLOC);
1699			break;
1700		case Opt_stripe:
1701			if (match_int(&args[0], &option))
1702				return 0;
1703			if (option < 0)
1704				return 0;
1705			sbi->s_stripe = option;
1706			break;
1707		case Opt_delalloc:
1708			set_opt(sbi->s_mount_opt, DELALLOC);
1709			break;
1710		case Opt_block_validity:
1711			set_opt(sbi->s_mount_opt, BLOCK_VALIDITY);
1712			break;
1713		case Opt_noblock_validity:
1714			clear_opt(sbi->s_mount_opt, BLOCK_VALIDITY);
1715			break;
1716		case Opt_inode_readahead_blks:
1717			if (match_int(&args[0], &option))
1718				return 0;
1719			if (option < 0 || option > (1 << 30))
1720				return 0;
1721			if (!is_power_of_2(option)) {
1722				ext4_msg(sb, KERN_ERR,
1723					 "EXT4-fs: inode_readahead_blks"
1724					 " must be a power of 2");
1725				return 0;
1726			}
1727			sbi->s_inode_readahead_blks = option;
1728			break;
1729		case Opt_journal_ioprio:
1730			if (match_int(&args[0], &option))
1731				return 0;
1732			if (option < 0 || option > 7)
1733				break;
1734			*journal_ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE,
1735							    option);
1736			break;
1737		case Opt_noauto_da_alloc:
1738			set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC);
1739			break;
1740		case Opt_auto_da_alloc:
1741			if (args[0].from) {
1742				if (match_int(&args[0], &option))
1743					return 0;
1744			} else
1745				option = 1;	/* No argument, default to 1 */
1746			if (option)
1747				clear_opt(sbi->s_mount_opt, NO_AUTO_DA_ALLOC);
1748			else
1749				set_opt(sbi->s_mount_opt,NO_AUTO_DA_ALLOC);
1750			break;
1751		case Opt_discard:
1752			set_opt(sbi->s_mount_opt, DISCARD);
1753			break;
1754		case Opt_nodiscard:
1755			clear_opt(sbi->s_mount_opt, DISCARD);
1756			break;
1757		case Opt_dioread_nolock:
1758			set_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
1759			break;
1760		case Opt_dioread_lock:
1761			clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
1762			break;
1763		default:
1764			ext4_msg(sb, KERN_ERR,
1765			       "Unrecognized mount option \"%s\" "
1766			       "or missing value", p);
1767			return 0;
1768		}
1769	}
1770#ifdef CONFIG_QUOTA
1771	if (sbi->s_qf_names[USRQUOTA] || sbi->s_qf_names[GRPQUOTA]) {
1772		if (test_opt(sb, USRQUOTA) && sbi->s_qf_names[USRQUOTA])
1773			clear_opt(sbi->s_mount_opt, USRQUOTA);
1774
1775		if (test_opt(sb, GRPQUOTA) && sbi->s_qf_names[GRPQUOTA])
1776			clear_opt(sbi->s_mount_opt, GRPQUOTA);
1777
1778		if (test_opt(sb, GRPQUOTA) || test_opt(sb, USRQUOTA)) {
1779			ext4_msg(sb, KERN_ERR, "old and new quota "
1780					"format mixing");
1781			return 0;
1782		}
1783
1784		if (!sbi->s_jquota_fmt) {
1785			ext4_msg(sb, KERN_ERR, "journaled quota format "
1786					"not specified");
1787			return 0;
1788		}
1789	} else {
1790		if (sbi->s_jquota_fmt) {
1791			ext4_msg(sb, KERN_ERR, "journaled quota format "
1792					"specified with no journaling "
1793					"enabled");
1794			return 0;
1795		}
1796	}
1797#endif
1798	return 1;
1799}
1800
1801static int ext4_setup_super(struct super_block *sb, struct ext4_super_block *es,
1802			    int read_only)
1803{
1804	struct ext4_sb_info *sbi = EXT4_SB(sb);
1805	int res = 0;
1806
1807	if (le32_to_cpu(es->s_rev_level) > EXT4_MAX_SUPP_REV) {
1808		ext4_msg(sb, KERN_ERR, "revision level too high, "
1809			 "forcing read-only mode");
1810		res = MS_RDONLY;
1811	}
1812	if (read_only)
1813		return res;
1814	if (!(sbi->s_mount_state & EXT4_VALID_FS))
1815		ext4_msg(sb, KERN_WARNING, "warning: mounting unchecked fs, "
1816			 "running e2fsck is recommended");
1817	else if ((sbi->s_mount_state & EXT4_ERROR_FS))
1818		ext4_msg(sb, KERN_WARNING,
1819			 "warning: mounting fs with errors, "
1820			 "running e2fsck is recommended");
1821	else if ((__s16) le16_to_cpu(es->s_max_mnt_count) >= 0 &&
1822		 le16_to_cpu(es->s_mnt_count) >=
1823		 (unsigned short) (__s16) le16_to_cpu(es->s_max_mnt_count))
1824		ext4_msg(sb, KERN_WARNING,
1825			 "warning: maximal mount count reached, "
1826			 "running e2fsck is recommended");
1827	else if (le32_to_cpu(es->s_checkinterval) &&
1828		(le32_to_cpu(es->s_lastcheck) +
1829			le32_to_cpu(es->s_checkinterval) <= get_seconds()))
1830		ext4_msg(sb, KERN_WARNING,
1831			 "warning: checktime reached, "
1832			 "running e2fsck is recommended");
1833	if (!sbi->s_journal)
1834		es->s_state &= cpu_to_le16(~EXT4_VALID_FS);
1835	if (!(__s16) le16_to_cpu(es->s_max_mnt_count))
1836		es->s_max_mnt_count = cpu_to_le16(EXT4_DFL_MAX_MNT_COUNT);
1837	le16_add_cpu(&es->s_mnt_count, 1);
1838	es->s_mtime = cpu_to_le32(get_seconds());
1839	ext4_update_dynamic_rev(sb);
1840	if (sbi->s_journal)
1841		EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
1842
1843	ext4_commit_super(sb, 1);
1844	if (test_opt(sb, DEBUG))
1845		printk(KERN_INFO "[EXT4 FS bs=%lu, gc=%u, "
1846				"bpg=%lu, ipg=%lu, mo=%04x]\n",
1847			sb->s_blocksize,
1848			sbi->s_groups_count,
1849			EXT4_BLOCKS_PER_GROUP(sb),
1850			EXT4_INODES_PER_GROUP(sb),
1851			sbi->s_mount_opt);
1852
1853	return res;
1854}
1855
1856static int ext4_fill_flex_info(struct super_block *sb)
1857{
1858	struct ext4_sb_info *sbi = EXT4_SB(sb);
1859	struct ext4_group_desc *gdp = NULL;
1860	ext4_group_t flex_group_count;
1861	ext4_group_t flex_group;
1862	int groups_per_flex = 0;
1863	size_t size;
1864	int i;
1865
1866	sbi->s_log_groups_per_flex = sbi->s_es->s_log_groups_per_flex;
1867	groups_per_flex = 1 << sbi->s_log_groups_per_flex;
1868
1869	if (groups_per_flex < 2) {
1870		sbi->s_log_groups_per_flex = 0;
1871		return 1;
1872	}
1873
1874	/* We allocate both existing and potentially added groups */
1875	flex_group_count = ((sbi->s_groups_count + groups_per_flex - 1) +
1876			((le16_to_cpu(sbi->s_es->s_reserved_gdt_blocks) + 1) <<
1877			      EXT4_DESC_PER_BLOCK_BITS(sb))) / groups_per_flex;
1878	size = flex_group_count * sizeof(struct flex_groups);
1879	sbi->s_flex_groups = kzalloc(size, GFP_KERNEL);
1880	if (sbi->s_flex_groups == NULL) {
1881		sbi->s_flex_groups = vmalloc(size);
1882		if (sbi->s_flex_groups)
1883			memset(sbi->s_flex_groups, 0, size);
1884	}
1885	if (sbi->s_flex_groups == NULL) {
1886		ext4_msg(sb, KERN_ERR, "not enough memory for "
1887				"%u flex groups", flex_group_count);
1888		goto failed;
1889	}
1890
1891	for (i = 0; i < sbi->s_groups_count; i++) {
1892		gdp = ext4_get_group_desc(sb, i, NULL);
1893
1894		flex_group = ext4_flex_group(sbi, i);
1895		atomic_add(ext4_free_inodes_count(sb, gdp),
1896			   &sbi->s_flex_groups[flex_group].free_inodes);
1897		atomic_add(ext4_free_blks_count(sb, gdp),
1898			   &sbi->s_flex_groups[flex_group].free_blocks);
1899		atomic_add(ext4_used_dirs_count(sb, gdp),
1900			   &sbi->s_flex_groups[flex_group].used_dirs);
1901	}
1902
1903	return 1;
1904failed:
1905	return 0;
1906}
1907
1908__le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group,
1909			    struct ext4_group_desc *gdp)
1910{
1911	__u16 crc = 0;
1912
1913	if (sbi->s_es->s_feature_ro_compat &
1914	    cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) {
1915		int offset = offsetof(struct ext4_group_desc, bg_checksum);
1916		__le32 le_group = cpu_to_le32(block_group);
1917
1918		crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
1919		crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
1920		crc = crc16(crc, (__u8 *)gdp, offset);
1921		offset += sizeof(gdp->bg_checksum); /* skip checksum */
1922		/* for checksum of struct ext4_group_desc do the rest...*/
1923		if ((sbi->s_es->s_feature_incompat &
1924		     cpu_to_le32(EXT4_FEATURE_INCOMPAT_64BIT)) &&
1925		    offset < le16_to_cpu(sbi->s_es->s_desc_size))
1926			crc = crc16(crc, (__u8 *)gdp + offset,
1927				    le16_to_cpu(sbi->s_es->s_desc_size) -
1928					offset);
1929	}
1930
1931	return cpu_to_le16(crc);
1932}
1933
1934int ext4_group_desc_csum_verify(struct ext4_sb_info *sbi, __u32 block_group,
1935				struct ext4_group_desc *gdp)
1936{
1937	if ((sbi->s_es->s_feature_ro_compat &
1938	     cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) &&
1939	    (gdp->bg_checksum != ext4_group_desc_csum(sbi, block_group, gdp)))
1940		return 0;
1941
1942	return 1;
1943}
1944
1945/* Called at mount-time, super-block is locked */
1946static int ext4_check_descriptors(struct super_block *sb)
1947{
1948	struct ext4_sb_info *sbi = EXT4_SB(sb);
1949	ext4_fsblk_t first_block = le32_to_cpu(sbi->s_es->s_first_data_block);
1950	ext4_fsblk_t last_block;
1951	ext4_fsblk_t block_bitmap;
1952	ext4_fsblk_t inode_bitmap;
1953	ext4_fsblk_t inode_table;
1954	int flexbg_flag = 0;
1955	ext4_group_t i;
1956
1957	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
1958		flexbg_flag = 1;
1959
1960	ext4_debug("Checking group descriptors");
1961
1962	for (i = 0; i < sbi->s_groups_count; i++) {
1963		struct ext4_group_desc *gdp = ext4_get_group_desc(sb, i, NULL);
1964
1965		if (i == sbi->s_groups_count - 1 || flexbg_flag)
1966			last_block = ext4_blocks_count(sbi->s_es) - 1;
1967		else
1968			last_block = first_block +
1969				(EXT4_BLOCKS_PER_GROUP(sb) - 1);
1970
1971		block_bitmap = ext4_block_bitmap(sb, gdp);
1972		if (block_bitmap < first_block || block_bitmap > last_block) {
1973			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
1974			       "Block bitmap for group %u not in group "
1975			       "(block %llu)!", i, block_bitmap);
1976			return 0;
1977		}
1978		inode_bitmap = ext4_inode_bitmap(sb, gdp);
1979		if (inode_bitmap < first_block || inode_bitmap > last_block) {
1980			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
1981			       "Inode bitmap for group %u not in group "
1982			       "(block %llu)!", i, inode_bitmap);
1983			return 0;
1984		}
1985		inode_table = ext4_inode_table(sb, gdp);
1986		if (inode_table < first_block ||
1987		    inode_table + sbi->s_itb_per_group - 1 > last_block) {
1988			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
1989			       "Inode table for group %u not in group "
1990			       "(block %llu)!", i, inode_table);
1991			return 0;
1992		}
1993		ext4_lock_group(sb, i);
1994		if (!ext4_group_desc_csum_verify(sbi, i, gdp)) {
1995			ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
1996				 "Checksum for group %u failed (%u!=%u)",
1997				 i, le16_to_cpu(ext4_group_desc_csum(sbi, i,
1998				     gdp)), le16_to_cpu(gdp->bg_checksum));
1999			if (!(sb->s_flags & MS_RDONLY)) {
2000				ext4_unlock_group(sb, i);
2001				return 0;
2002			}
2003		}
2004		ext4_unlock_group(sb, i);
2005		if (!flexbg_flag)
2006			first_block += EXT4_BLOCKS_PER_GROUP(sb);
2007	}
2008
2009	ext4_free_blocks_count_set(sbi->s_es, ext4_count_free_blocks(sb));
2010	sbi->s_es->s_free_inodes_count =cpu_to_le32(ext4_count_free_inodes(sb));
2011	return 1;
2012}
2013
2014/* ext4_orphan_cleanup() walks a singly-linked list of inodes (starting at
2015 * the superblock) which were deleted from all directories, but held open by
2016 * a process at the time of a crash.  We walk the list and try to delete these
2017 * inodes at recovery time (only with a read-write filesystem).
2018 *
2019 * In order to keep the orphan inode chain consistent during traversal (in
2020 * case of crash during recovery), we link each inode into the superblock
2021 * orphan list_head and handle it the same way as an inode deletion during
2022 * normal operation (which journals the operations for us).
2023 *
2024 * We only do an iget() and an iput() on each inode, which is very safe if we
2025 * accidentally point at an in-use or already deleted inode.  The worst that
2026 * can happen in this case is that we get a "bit already cleared" message from
2027 * ext4_free_inode().  The only reason we would point at a wrong inode is if
2028 * e2fsck was run on this filesystem, and it must have already done the orphan
2029 * inode cleanup for us, so we can safely abort without any further action.
2030 */
2031static void ext4_orphan_cleanup(struct super_block *sb,
2032				struct ext4_super_block *es)
2033{
2034	unsigned int s_flags = sb->s_flags;
2035	int nr_orphans = 0, nr_truncates = 0;
2036#ifdef CONFIG_QUOTA
2037	int i;
2038#endif
2039	if (!es->s_last_orphan) {
2040		jbd_debug(4, "no orphan inodes to clean up\n");
2041		return;
2042	}
2043
2044	if (bdev_read_only(sb->s_bdev)) {
2045		ext4_msg(sb, KERN_ERR, "write access "
2046			"unavailable, skipping orphan cleanup");
2047		return;
2048	}
2049
2050	if (EXT4_SB(sb)->s_mount_state & EXT4_ERROR_FS) {
2051		if (es->s_last_orphan)
2052			jbd_debug(1, "Errors on filesystem, "
2053				  "clearing orphan list.\n");
2054		es->s_last_orphan = 0;
2055		jbd_debug(1, "Skipping orphan recovery on fs with errors.\n");
2056		return;
2057	}
2058
2059	if (s_flags & MS_RDONLY) {
2060		ext4_msg(sb, KERN_INFO, "orphan cleanup on readonly fs");
2061		sb->s_flags &= ~MS_RDONLY;
2062	}
2063#ifdef CONFIG_QUOTA
2064	/* Needed for iput() to work correctly and not trash data */
2065	sb->s_flags |= MS_ACTIVE;
2066	/* Turn on quotas so that they are updated correctly */
2067	for (i = 0; i < MAXQUOTAS; i++) {
2068		if (EXT4_SB(sb)->s_qf_names[i]) {
2069			int ret = ext4_quota_on_mount(sb, i);
2070			if (ret < 0)
2071				ext4_msg(sb, KERN_ERR,
2072					"Cannot turn on journaled "
2073					"quota: error %d", ret);
2074		}
2075	}
2076#endif
2077
2078	while (es->s_last_orphan) {
2079		struct inode *inode;
2080
2081		inode = ext4_orphan_get(sb, le32_to_cpu(es->s_last_orphan));
2082		if (IS_ERR(inode)) {
2083			es->s_last_orphan = 0;
2084			break;
2085		}
2086
2087		list_add(&EXT4_I(inode)->i_orphan, &EXT4_SB(sb)->s_orphan);
2088		dquot_initialize(inode);
2089		if (inode->i_nlink) {
2090			ext4_msg(sb, KERN_DEBUG,
2091				"%s: truncating inode %lu to %lld bytes",
2092				__func__, inode->i_ino, inode->i_size);
2093			jbd_debug(2, "truncating inode %lu to %lld bytes\n",
2094				  inode->i_ino, inode->i_size);
2095			ext4_truncate(inode);
2096			nr_truncates++;
2097		} else {
2098			ext4_msg(sb, KERN_DEBUG,
2099				"%s: deleting unreferenced inode %lu",
2100				__func__, inode->i_ino);
2101			jbd_debug(2, "deleting unreferenced inode %lu\n",
2102				  inode->i_ino);
2103			nr_orphans++;
2104		}
2105		iput(inode);  /* The delete magic happens here! */
2106	}
2107
2108#define PLURAL(x) (x), ((x) == 1) ? "" : "s"
2109
2110	if (nr_orphans)
2111		ext4_msg(sb, KERN_INFO, "%d orphan inode%s deleted",
2112		       PLURAL(nr_orphans));
2113	if (nr_truncates)
2114		ext4_msg(sb, KERN_INFO, "%d truncate%s cleaned up",
2115		       PLURAL(nr_truncates));
2116#ifdef CONFIG_QUOTA
2117	/* Turn quotas off */
2118	for (i = 0; i < MAXQUOTAS; i++) {
2119		if (sb_dqopt(sb)->files[i])
2120			dquot_quota_off(sb, i);
2121	}
2122#endif
2123	sb->s_flags = s_flags; /* Restore MS_RDONLY status */
2124}
2125
2126/*
2127 * Maximal extent format file size.
2128 * Resulting logical blkno at s_maxbytes must fit in our on-disk
2129 * extent format containers, within a sector_t, and within i_blocks
2130 * in the vfs.  ext4 inode has 48 bits of i_block in fsblock units,
2131 * so that won't be a limiting factor.
2132 *
2133 * Note, this does *not* consider any metadata overhead for vfs i_blocks.
2134 */
2135static loff_t ext4_max_size(int blkbits, int has_huge_files)
2136{
2137	loff_t res;
2138	loff_t upper_limit = MAX_LFS_FILESIZE;
2139
2140	/* small i_blocks in vfs inode? */
2141	if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
2142		/*
2143		 * CONFIG_LBDAF is not enabled implies the inode
2144		 * i_block represent total blocks in 512 bytes
2145		 * 32 == size of vfs inode i_blocks * 8
2146		 */
2147		upper_limit = (1LL << 32) - 1;
2148
2149		/* total blocks in file system block size */
2150		upper_limit >>= (blkbits - 9);
2151		upper_limit <<= blkbits;
2152	}
2153
2154	/* 32-bit extent-start container, ee_block */
2155	res = 1LL << 32;
2156	res <<= blkbits;
2157	res -= 1;
2158
2159	/* Sanity check against vm- & vfs- imposed limits */
2160	if (res > upper_limit)
2161		res = upper_limit;
2162
2163	return res;
2164}
2165
2166/*
2167 * Maximal bitmap file size.  There is a direct, and {,double-,triple-}indirect
2168 * block limit, and also a limit of (2^48 - 1) 512-byte sectors in i_blocks.
2169 * We need to be 1 filesystem block less than the 2^48 sector limit.
2170 */
2171static loff_t ext4_max_bitmap_size(int bits, int has_huge_files)
2172{
2173	loff_t res = EXT4_NDIR_BLOCKS;
2174	int meta_blocks;
2175	loff_t upper_limit;
2176	/* This is calculated to be the largest file size for a dense, block
2177	 * mapped file such that the file's total number of 512-byte sectors,
2178	 * including data and all indirect blocks, does not exceed (2^48 - 1).
2179	 *
2180	 * __u32 i_blocks_lo and _u16 i_blocks_high represent the total
2181	 * number of 512-byte sectors of the file.
2182	 */
2183
2184	if (!has_huge_files || sizeof(blkcnt_t) < sizeof(u64)) {
2185		/*
2186		 * !has_huge_files or CONFIG_LBDAF not enabled implies that
2187		 * the inode i_block field represents total file blocks in
2188		 * 2^32 512-byte sectors == size of vfs inode i_blocks * 8
2189		 */
2190		upper_limit = (1LL << 32) - 1;
2191
2192		/* total blocks in file system block size */
2193		upper_limit >>= (bits - 9);
2194
2195	} else {
2196		/*
2197		 * We use 48 bit ext4_inode i_blocks
2198		 * With EXT4_HUGE_FILE_FL set the i_blocks
2199		 * represent total number of blocks in
2200		 * file system block size
2201		 */
2202		upper_limit = (1LL << 48) - 1;
2203
2204	}
2205
2206	/* indirect blocks */
2207	meta_blocks = 1;
2208	/* double indirect blocks */
2209	meta_blocks += 1 + (1LL << (bits-2));
2210	/* tripple indirect blocks */
2211	meta_blocks += 1 + (1LL << (bits-2)) + (1LL << (2*(bits-2)));
2212
2213	upper_limit -= meta_blocks;
2214	upper_limit <<= bits;
2215
2216	res += 1LL << (bits-2);
2217	res += 1LL << (2*(bits-2));
2218	res += 1LL << (3*(bits-2));
2219	res <<= bits;
2220	if (res > upper_limit)
2221		res = upper_limit;
2222
2223	if (res > MAX_LFS_FILESIZE)
2224		res = MAX_LFS_FILESIZE;
2225
2226	return res;
2227}
2228
2229static ext4_fsblk_t descriptor_loc(struct super_block *sb,
2230				   ext4_fsblk_t logical_sb_block, int nr)
2231{
2232	struct ext4_sb_info *sbi = EXT4_SB(sb);
2233	ext4_group_t bg, first_meta_bg;
2234	int has_super = 0;
2235
2236	first_meta_bg = le32_to_cpu(sbi->s_es->s_first_meta_bg);
2237
2238	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_META_BG) ||
2239	    nr < first_meta_bg)
2240		return logical_sb_block + nr + 1;
2241	bg = sbi->s_desc_per_block * nr;
2242	if (ext4_bg_has_super(sb, bg))
2243		has_super = 1;
2244
2245	return (has_super + ext4_group_first_block_no(sb, bg));
2246}
2247
2248/**
2249 * ext4_get_stripe_size: Get the stripe size.
2250 * @sbi: In memory super block info
2251 *
2252 * If we have specified it via mount option, then
2253 * use the mount option value. If the value specified at mount time is
2254 * greater than the blocks per group use the super block value.
2255 * If the super block value is greater than blocks per group return 0.
2256 * Allocator needs it be less than blocks per group.
2257 *
2258 */
2259static unsigned long ext4_get_stripe_size(struct ext4_sb_info *sbi)
2260{
2261	unsigned long stride = le16_to_cpu(sbi->s_es->s_raid_stride);
2262	unsigned long stripe_width =
2263			le32_to_cpu(sbi->s_es->s_raid_stripe_width);
2264
2265	if (sbi->s_stripe && sbi->s_stripe <= sbi->s_blocks_per_group)
2266		return sbi->s_stripe;
2267
2268	if (stripe_width <= sbi->s_blocks_per_group)
2269		return stripe_width;
2270
2271	if (stride <= sbi->s_blocks_per_group)
2272		return stride;
2273
2274	return 0;
2275}
2276
2277/* sysfs supprt */
2278
2279struct ext4_attr {
2280	struct attribute attr;
2281	ssize_t (*show)(struct ext4_attr *, struct ext4_sb_info *, char *);
2282	ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *,
2283			 const char *, size_t);
2284	int offset;
2285};
2286
2287static int parse_strtoul(const char *buf,
2288		unsigned long max, unsigned long *value)
2289{
2290	char *endp;
2291
2292	*value = simple_strtoul(skip_spaces(buf), &endp, 0);
2293	endp = skip_spaces(endp);
2294	if (*endp || *value > max)
2295		return -EINVAL;
2296
2297	return 0;
2298}
2299
2300static ssize_t delayed_allocation_blocks_show(struct ext4_attr *a,
2301					      struct ext4_sb_info *sbi,
2302					      char *buf)
2303{
2304	return snprintf(buf, PAGE_SIZE, "%llu\n",
2305			(s64) percpu_counter_sum(&sbi->s_dirtyblocks_counter));
2306}
2307
2308static ssize_t session_write_kbytes_show(struct ext4_attr *a,
2309					 struct ext4_sb_info *sbi, char *buf)
2310{
2311	struct super_block *sb = sbi->s_buddy_cache->i_sb;
2312
2313	if (!sb->s_bdev->bd_part)
2314		return snprintf(buf, PAGE_SIZE, "0\n");
2315	return snprintf(buf, PAGE_SIZE, "%lu\n",
2316			(part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
2317			 sbi->s_sectors_written_start) >> 1);
2318}
2319
2320static ssize_t lifetime_write_kbytes_show(struct ext4_attr *a,
2321					  struct ext4_sb_info *sbi, char *buf)
2322{
2323	struct super_block *sb = sbi->s_buddy_cache->i_sb;
2324
2325	if (!sb->s_bdev->bd_part)
2326		return snprintf(buf, PAGE_SIZE, "0\n");
2327	return snprintf(buf, PAGE_SIZE, "%llu\n",
2328			(unsigned long long)(sbi->s_kbytes_written +
2329			((part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
2330			  EXT4_SB(sb)->s_sectors_written_start) >> 1)));
2331}
2332
2333static ssize_t inode_readahead_blks_store(struct ext4_attr *a,
2334					  struct ext4_sb_info *sbi,
2335					  const char *buf, size_t count)
2336{
2337	unsigned long t;
2338
2339	if (parse_strtoul(buf, 0x40000000, &t))
2340		return -EINVAL;
2341
2342	if (!is_power_of_2(t))
2343		return -EINVAL;
2344
2345	sbi->s_inode_readahead_blks = t;
2346	return count;
2347}
2348
2349static ssize_t sbi_ui_show(struct ext4_attr *a,
2350			   struct ext4_sb_info *sbi, char *buf)
2351{
2352	unsigned int *ui = (unsigned int *) (((char *) sbi) + a->offset);
2353
2354	return snprintf(buf, PAGE_SIZE, "%u\n", *ui);
2355}
2356
2357static ssize_t sbi_ui_store(struct ext4_attr *a,
2358			    struct ext4_sb_info *sbi,
2359			    const char *buf, size_t count)
2360{
2361	unsigned int *ui = (unsigned int *) (((char *) sbi) + a->offset);
2362	unsigned long t;
2363
2364	if (parse_strtoul(buf, 0xffffffff, &t))
2365		return -EINVAL;
2366	*ui = t;
2367	return count;
2368}
2369
2370#define EXT4_ATTR_OFFSET(_name,_mode,_show,_store,_elname) \
2371static struct ext4_attr ext4_attr_##_name = {			\
2372	.attr = {.name = __stringify(_name), .mode = _mode },	\
2373	.show	= _show,					\
2374	.store	= _store,					\
2375	.offset = offsetof(struct ext4_sb_info, _elname),	\
2376}
2377#define EXT4_ATTR(name, mode, show, store) \
2378static struct ext4_attr ext4_attr_##name = __ATTR(name, mode, show, store)
2379
2380#define EXT4_RO_ATTR(name) EXT4_ATTR(name, 0444, name##_show, NULL)
2381#define EXT4_RW_ATTR(name) EXT4_ATTR(name, 0644, name##_show, name##_store)
2382#define EXT4_RW_ATTR_SBI_UI(name, elname)	\
2383	EXT4_ATTR_OFFSET(name, 0644, sbi_ui_show, sbi_ui_store, elname)
2384#define ATTR_LIST(name) &ext4_attr_##name.attr
2385
2386EXT4_RO_ATTR(delayed_allocation_blocks);
2387EXT4_RO_ATTR(session_write_kbytes);
2388EXT4_RO_ATTR(lifetime_write_kbytes);
2389EXT4_ATTR_OFFSET(inode_readahead_blks, 0644, sbi_ui_show,
2390		 inode_readahead_blks_store, s_inode_readahead_blks);
2391EXT4_RW_ATTR_SBI_UI(inode_goal, s_inode_goal);
2392EXT4_RW_ATTR_SBI_UI(mb_stats, s_mb_stats);
2393EXT4_RW_ATTR_SBI_UI(mb_max_to_scan, s_mb_max_to_scan);
2394EXT4_RW_ATTR_SBI_UI(mb_min_to_scan, s_mb_min_to_scan);
2395EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
2396EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
2397EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
2398EXT4_RW_ATTR_SBI_UI(max_writeback_mb_bump, s_max_writeback_mb_bump);
2399
2400static struct attribute *ext4_attrs[] = {
2401	ATTR_LIST(delayed_allocation_blocks),
2402	ATTR_LIST(session_write_kbytes),
2403	ATTR_LIST(lifetime_write_kbytes),
2404	ATTR_LIST(inode_readahead_blks),
2405	ATTR_LIST(inode_goal),
2406	ATTR_LIST(mb_stats),
2407	ATTR_LIST(mb_max_to_scan),
2408	ATTR_LIST(mb_min_to_scan),
2409	ATTR_LIST(mb_order2_req),
2410	ATTR_LIST(mb_stream_req),
2411	ATTR_LIST(mb_group_prealloc),
2412	ATTR_LIST(max_writeback_mb_bump),
2413	NULL,
2414};
2415
2416static ssize_t ext4_attr_show(struct kobject *kobj,
2417			      struct attribute *attr, char *buf)
2418{
2419	struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info,
2420						s_kobj);
2421	struct ext4_attr *a = container_of(attr, struct ext4_attr, attr);
2422
2423	return a->show ? a->show(a, sbi, buf) : 0;
2424}
2425
2426static ssize_t ext4_attr_store(struct kobject *kobj,
2427			       struct attribute *attr,
2428			       const char *buf, size_t len)
2429{
2430	struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info,
2431						s_kobj);
2432	struct ext4_attr *a = container_of(attr, struct ext4_attr, attr);
2433
2434	return a->store ? a->store(a, sbi, buf, len) : 0;
2435}
2436
2437static void ext4_sb_release(struct kobject *kobj)
2438{
2439	struct ext4_sb_info *sbi = container_of(kobj, struct ext4_sb_info,
2440						s_kobj);
2441	complete(&sbi->s_kobj_unregister);
2442}
2443
2444
2445static const struct sysfs_ops ext4_attr_ops = {
2446	.show	= ext4_attr_show,
2447	.store	= ext4_attr_store,
2448};
2449
2450static struct kobj_type ext4_ktype = {
2451	.default_attrs	= ext4_attrs,
2452	.sysfs_ops	= &ext4_attr_ops,
2453	.release	= ext4_sb_release,
2454};
2455
2456/*
2457 * Check whether this filesystem can be mounted based on
2458 * the features present and the RDONLY/RDWR mount requested.
2459 * Returns 1 if this filesystem can be mounted as requested,
2460 * 0 if it cannot be.
2461 */
2462static int ext4_feature_set_ok(struct super_block *sb, int readonly)
2463{
2464	if (EXT4_HAS_INCOMPAT_FEATURE(sb, ~EXT4_FEATURE_INCOMPAT_SUPP)) {
2465		ext4_msg(sb, KERN_ERR,
2466			"Couldn't mount because of "
2467			"unsupported optional features (%x)",
2468			(le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_incompat) &
2469			~EXT4_FEATURE_INCOMPAT_SUPP));
2470		return 0;
2471	}
2472
2473	if (readonly)
2474		return 1;
2475
2476	/* Check that feature set is OK for a read-write mount */
2477	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, ~EXT4_FEATURE_RO_COMPAT_SUPP)) {
2478		ext4_msg(sb, KERN_ERR, "couldn't mount RDWR because of "
2479			 "unsupported optional features (%x)",
2480			 (le32_to_cpu(EXT4_SB(sb)->s_es->s_feature_ro_compat) &
2481				~EXT4_FEATURE_RO_COMPAT_SUPP));
2482		return 0;
2483	}
2484	/*
2485	 * Large file size enabled file system can only be mounted
2486	 * read-write on 32-bit systems if kernel is built with CONFIG_LBDAF
2487	 */
2488	if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_HUGE_FILE)) {
2489		if (sizeof(blkcnt_t) < sizeof(u64)) {
2490			ext4_msg(sb, KERN_ERR, "Filesystem with huge files "
2491				 "cannot be mounted RDWR without "
2492				 "CONFIG_LBDAF");
2493			return 0;
2494		}
2495	}
2496	return 1;
2497}
2498
2499/*
2500 * This function is called once a day if we have errors logged
2501 * on the file system
2502 */
2503static void print_daily_error_info(unsigned long arg)
2504{
2505	struct super_block *sb = (struct super_block *) arg;
2506	struct ext4_sb_info *sbi;
2507	struct ext4_super_block *es;
2508
2509	sbi = EXT4_SB(sb);
2510	es = sbi->s_es;
2511
2512	if (es->s_error_count)
2513		ext4_msg(sb, KERN_NOTICE, "error count: %u",
2514			 le32_to_cpu(es->s_error_count));
2515	if (es->s_first_error_time) {
2516		printk(KERN_NOTICE "EXT4-fs (%s): initial error at %u: %.*s:%d",
2517		       sb->s_id, le32_to_cpu(es->s_first_error_time),
2518		       (int) sizeof(es->s_first_error_func),
2519		       es->s_first_error_func,
2520		       le32_to_cpu(es->s_first_error_line));
2521		if (es->s_first_error_ino)
2522			printk(": inode %u",
2523			       le32_to_cpu(es->s_first_error_ino));
2524		if (es->s_first_error_block)
2525			printk(": block %llu", (unsigned long long)
2526			       le64_to_cpu(es->s_first_error_block));
2527		printk("\n");
2528	}
2529	if (es->s_last_error_time) {
2530		printk(KERN_NOTICE "EXT4-fs (%s): last error at %u: %.*s:%d",
2531		       sb->s_id, le32_to_cpu(es->s_last_error_time),
2532		       (int) sizeof(es->s_last_error_func),
2533		       es->s_last_error_func,
2534		       le32_to_cpu(es->s_last_error_line));
2535		if (es->s_last_error_ino)
2536			printk(": inode %u",
2537			       le32_to_cpu(es->s_last_error_ino));
2538		if (es->s_last_error_block)
2539			printk(": block %llu", (unsigned long long)
2540			       le64_to_cpu(es->s_last_error_block));
2541		printk("\n");
2542	}
2543	mod_timer(&sbi->s_err_report, jiffies + 24*60*60*HZ);  /* Once a day */
2544}
2545
2546static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2547				__releases(kernel_lock)
2548				__acquires(kernel_lock)
2549{
2550	char *orig_data = kstrdup(data, GFP_KERNEL);
2551	struct buffer_head *bh;
2552	struct ext4_super_block *es = NULL;
2553	struct ext4_sb_info *sbi;
2554	ext4_fsblk_t block;
2555	ext4_fsblk_t sb_block = get_sb_block(&data);
2556	ext4_fsblk_t logical_sb_block;
2557	unsigned long offset = 0;
2558	unsigned long journal_devnum = 0;
2559	unsigned long def_mount_opts;
2560	struct inode *root;
2561	char *cp;
2562	const char *descr;
2563	int ret = -ENOMEM;
2564	int blocksize;
2565	unsigned int db_count;
2566	unsigned int i;
2567	int needs_recovery, has_huge_files;
2568	__u64 blocks_count;
2569	int err;
2570	unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
2571
2572	sbi = kzalloc(sizeof(*sbi), GFP_KERNEL);
2573	if (!sbi)
2574		goto out_free_orig;
2575
2576	sbi->s_blockgroup_lock =
2577		kzalloc(sizeof(struct blockgroup_lock), GFP_KERNEL);
2578	if (!sbi->s_blockgroup_lock) {
2579		kfree(sbi);
2580		goto out_free_orig;
2581	}
2582	sb->s_fs_info = sbi;
2583	sbi->s_mount_opt = 0;
2584	sbi->s_resuid = EXT4_DEF_RESUID;
2585	sbi->s_resgid = EXT4_DEF_RESGID;
2586	sbi->s_inode_readahead_blks = EXT4_DEF_INODE_READAHEAD_BLKS;
2587	sbi->s_sb_block = sb_block;
2588	if (sb->s_bdev->bd_part)
2589		sbi->s_sectors_written_start =
2590			part_stat_read(sb->s_bdev->bd_part, sectors[1]);
2591
2592	unlock_kernel();
2593
2594	/* Cleanup superblock name */
2595	for (cp = sb->s_id; (cp = strchr(cp, '/'));)
2596		*cp = '!';
2597
2598	ret = -EINVAL;
2599	blocksize = sb_min_blocksize(sb, EXT4_MIN_BLOCK_SIZE);
2600	if (!blocksize) {
2601		ext4_msg(sb, KERN_ERR, "unable to set blocksize");
2602		goto out_fail;
2603	}
2604
2605	/*
2606	 * The ext4 superblock will not be buffer aligned for other than 1kB
2607	 * block sizes.  We need to calculate the offset from buffer start.
2608	 */
2609	if (blocksize != EXT4_MIN_BLOCK_SIZE) {
2610		logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
2611		offset = do_div(logical_sb_block, blocksize);
2612	} else {
2613		logical_sb_block = sb_block;
2614	}
2615
2616	if (!(bh = sb_bread(sb, logical_sb_block))) {
2617		ext4_msg(sb, KERN_ERR, "unable to read superblock");
2618		goto out_fail;
2619	}
2620	/*
2621	 * Note: s_es must be initialized as soon as possible because
2622	 *       some ext4 macro-instructions depend on its value
2623	 */
2624	es = (struct ext4_super_block *) (((char *)bh->b_data) + offset);
2625	sbi->s_es = es;
2626	sb->s_magic = le16_to_cpu(es->s_magic);
2627	if (sb->s_magic != EXT4_SUPER_MAGIC)
2628		goto cantfind_ext4;
2629	sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
2630
2631	/* Set defaults before we parse the mount options */
2632	def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
2633	if (def_mount_opts & EXT4_DEFM_DEBUG)
2634		set_opt(sbi->s_mount_opt, DEBUG);
2635	if (def_mount_opts & EXT4_DEFM_BSDGROUPS) {
2636		ext4_msg(sb, KERN_WARNING, deprecated_msg, "bsdgroups",
2637			"2.6.38");
2638		set_opt(sbi->s_mount_opt, GRPID);
2639	}
2640	if (def_mount_opts & EXT4_DEFM_UID16)
2641		set_opt(sbi->s_mount_opt, NO_UID32);
2642#ifdef CONFIG_EXT4_FS_XATTR
2643	if (def_mount_opts & EXT4_DEFM_XATTR_USER)
2644		set_opt(sbi->s_mount_opt, XATTR_USER);
2645#endif
2646#ifdef CONFIG_EXT4_FS_POSIX_ACL
2647	if (def_mount_opts & EXT4_DEFM_ACL)
2648		set_opt(sbi->s_mount_opt, POSIX_ACL);
2649#endif
2650	if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_DATA)
2651		set_opt(sbi->s_mount_opt, JOURNAL_DATA);
2652	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_ORDERED)
2653		set_opt(sbi->s_mount_opt, ORDERED_DATA);
2654	else if ((def_mount_opts & EXT4_DEFM_JMODE) == EXT4_DEFM_JMODE_WBACK)
2655		set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
2656
2657	if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_PANIC)
2658		set_opt(sbi->s_mount_opt, ERRORS_PANIC);
2659	else if (le16_to_cpu(sbi->s_es->s_errors) == EXT4_ERRORS_CONTINUE)
2660		set_opt(sbi->s_mount_opt, ERRORS_CONT);
2661	else
2662		set_opt(sbi->s_mount_opt, ERRORS_RO);
2663	if (def_mount_opts & EXT4_DEFM_BLOCK_VALIDITY)
2664		set_opt(sbi->s_mount_opt, BLOCK_VALIDITY);
2665	if (def_mount_opts & EXT4_DEFM_DISCARD)
2666		set_opt(sbi->s_mount_opt, DISCARD);
2667
2668	sbi->s_resuid = le16_to_cpu(es->s_def_resuid);
2669	sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
2670	sbi->s_commit_interval = JBD2_DEFAULT_MAX_COMMIT_AGE * HZ;
2671	sbi->s_min_batch_time = EXT4_DEF_MIN_BATCH_TIME;
2672	sbi->s_max_batch_time = EXT4_DEF_MAX_BATCH_TIME;
2673
2674	if ((def_mount_opts & EXT4_DEFM_NOBARRIER) == 0)
2675		set_opt(sbi->s_mount_opt, BARRIER);
2676
2677	/*
2678	 * enable delayed allocation by default
2679	 * Use -o nodelalloc to turn it off
2680	 */
2681	if (!IS_EXT3_SB(sb) &&
2682	    ((def_mount_opts & EXT4_DEFM_NODELALLOC) == 0))
2683		set_opt(sbi->s_mount_opt, DELALLOC);
2684
2685	if (!parse_options((char *) sbi->s_es->s_mount_opts, sb,
2686			   &journal_devnum, &journal_ioprio, NULL, 0)) {
2687		ext4_msg(sb, KERN_WARNING,
2688			 "failed to parse options in superblock: %s",
2689			 sbi->s_es->s_mount_opts);
2690	}
2691	if (!parse_options((char *) data, sb, &journal_devnum,
2692			   &journal_ioprio, NULL, 0))
2693		goto failed_mount;
2694
2695	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
2696		(test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
2697
2698	if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV &&
2699	    (EXT4_HAS_COMPAT_FEATURE(sb, ~0U) ||
2700	     EXT4_HAS_RO_COMPAT_FEATURE(sb, ~0U) ||
2701	     EXT4_HAS_INCOMPAT_FEATURE(sb, ~0U)))
2702		ext4_msg(sb, KERN_WARNING,
2703		       "feature flags set on rev 0 fs, "
2704		       "running e2fsck is recommended");
2705
2706	/*
2707	 * Check feature flags regardless of the revision level, since we
2708	 * previously didn't change the revision level when setting the flags,
2709	 * so there is a chance incompat flags are set on a rev 0 filesystem.
2710	 */
2711	if (!ext4_feature_set_ok(sb, (sb->s_flags & MS_RDONLY)))
2712		goto failed_mount;
2713
2714	blocksize = BLOCK_SIZE << le32_to_cpu(es->s_log_block_size);
2715
2716	if (blocksize < EXT4_MIN_BLOCK_SIZE ||
2717	    blocksize > EXT4_MAX_BLOCK_SIZE) {
2718		ext4_msg(sb, KERN_ERR,
2719		       "Unsupported filesystem blocksize %d", blocksize);
2720		goto failed_mount;
2721	}
2722
2723	if (sb->s_blocksize != blocksize) {
2724		/* Validate the filesystem blocksize */
2725		if (!sb_set_blocksize(sb, blocksize)) {
2726			ext4_msg(sb, KERN_ERR, "bad block size %d",
2727					blocksize);
2728			goto failed_mount;
2729		}
2730
2731		brelse(bh);
2732		logical_sb_block = sb_block * EXT4_MIN_BLOCK_SIZE;
2733		offset = do_div(logical_sb_block, blocksize);
2734		bh = sb_bread(sb, logical_sb_block);
2735		if (!bh) {
2736			ext4_msg(sb, KERN_ERR,
2737			       "Can't read superblock on 2nd try");
2738			goto failed_mount;
2739		}
2740		es = (struct ext4_super_block *)(((char *)bh->b_data) + offset);
2741		sbi->s_es = es;
2742		if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
2743			ext4_msg(sb, KERN_ERR,
2744			       "Magic mismatch, very weird!");
2745			goto failed_mount;
2746		}
2747	}
2748
2749	has_huge_files = EXT4_HAS_RO_COMPAT_FEATURE(sb,
2750				EXT4_FEATURE_RO_COMPAT_HUGE_FILE);
2751	sbi->s_bitmap_maxbytes = ext4_max_bitmap_size(sb->s_blocksize_bits,
2752						      has_huge_files);
2753	sb->s_maxbytes = ext4_max_size(sb->s_blocksize_bits, has_huge_files);
2754
2755	if (le32_to_cpu(es->s_rev_level) == EXT4_GOOD_OLD_REV) {
2756		sbi->s_inode_size = EXT4_GOOD_OLD_INODE_SIZE;
2757		sbi->s_first_ino = EXT4_GOOD_OLD_FIRST_INO;
2758	} else {
2759		sbi->s_inode_size = le16_to_cpu(es->s_inode_size);
2760		sbi->s_first_ino = le32_to_cpu(es->s_first_ino);
2761		if ((sbi->s_inode_size < EXT4_GOOD_OLD_INODE_SIZE) ||
2762		    (!is_power_of_2(sbi->s_inode_size)) ||
2763		    (sbi->s_inode_size > blocksize)) {
2764			ext4_msg(sb, KERN_ERR,
2765			       "unsupported inode size: %d",
2766			       sbi->s_inode_size);
2767			goto failed_mount;
2768		}
2769		if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE)
2770			sb->s_time_gran = 1 << (EXT4_EPOCH_BITS - 2);
2771	}
2772
2773	sbi->s_desc_size = le16_to_cpu(es->s_desc_size);
2774	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT)) {
2775		if (sbi->s_desc_size < EXT4_MIN_DESC_SIZE_64BIT ||
2776		    sbi->s_desc_size > EXT4_MAX_DESC_SIZE ||
2777		    !is_power_of_2(sbi->s_desc_size)) {
2778			ext4_msg(sb, KERN_ERR,
2779			       "unsupported descriptor size %lu",
2780			       sbi->s_desc_size);
2781			goto failed_mount;
2782		}
2783	} else
2784		sbi->s_desc_size = EXT4_MIN_DESC_SIZE;
2785
2786	sbi->s_blocks_per_group = le32_to_cpu(es->s_blocks_per_group);
2787	sbi->s_inodes_per_group = le32_to_cpu(es->s_inodes_per_group);
2788	if (EXT4_INODE_SIZE(sb) == 0 || EXT4_INODES_PER_GROUP(sb) == 0)
2789		goto cantfind_ext4;
2790
2791	sbi->s_inodes_per_block = blocksize / EXT4_INODE_SIZE(sb);
2792	if (sbi->s_inodes_per_block == 0)
2793		goto cantfind_ext4;
2794	sbi->s_itb_per_group = sbi->s_inodes_per_group /
2795					sbi->s_inodes_per_block;
2796	sbi->s_desc_per_block = blocksize / EXT4_DESC_SIZE(sb);
2797	sbi->s_sbh = bh;
2798	sbi->s_mount_state = le16_to_cpu(es->s_state);
2799	sbi->s_addr_per_block_bits = ilog2(EXT4_ADDR_PER_BLOCK(sb));
2800	sbi->s_desc_per_block_bits = ilog2(EXT4_DESC_PER_BLOCK(sb));
2801
2802	for (i = 0; i < 4; i++)
2803		sbi->s_hash_seed[i] = le32_to_cpu(es->s_hash_seed[i]);
2804	sbi->s_def_hash_version = es->s_def_hash_version;
2805	i = le32_to_cpu(es->s_flags);
2806	if (i & EXT2_FLAGS_UNSIGNED_HASH)
2807		sbi->s_hash_unsigned = 3;
2808	else if ((i & EXT2_FLAGS_SIGNED_HASH) == 0) {
2809#ifdef __CHAR_UNSIGNED__
2810		es->s_flags |= cpu_to_le32(EXT2_FLAGS_UNSIGNED_HASH);
2811		sbi->s_hash_unsigned = 3;
2812#else
2813		es->s_flags |= cpu_to_le32(EXT2_FLAGS_SIGNED_HASH);
2814#endif
2815		sb->s_dirt = 1;
2816	}
2817
2818	if (sbi->s_blocks_per_group > blocksize * 8) {
2819		ext4_msg(sb, KERN_ERR,
2820		       "#blocks per group too big: %lu",
2821		       sbi->s_blocks_per_group);
2822		goto failed_mount;
2823	}
2824	if (sbi->s_inodes_per_group > blocksize * 8) {
2825		ext4_msg(sb, KERN_ERR,
2826		       "#inodes per group too big: %lu",
2827		       sbi->s_inodes_per_group);
2828		goto failed_mount;
2829	}
2830
2831	/*
2832	 * Test whether we have more sectors than will fit in sector_t,
2833	 * and whether the max offset is addressable by the page cache.
2834	 */
2835	if ((ext4_blocks_count(es) >
2836	     (sector_t)(~0ULL) >> (sb->s_blocksize_bits - 9)) ||
2837	    (ext4_blocks_count(es) >
2838	     (pgoff_t)(~0ULL) >> (PAGE_CACHE_SHIFT - sb->s_blocksize_bits))) {
2839		ext4_msg(sb, KERN_ERR, "filesystem"
2840			 " too large to mount safely on this system");
2841		if (sizeof(sector_t) < 8)
2842			ext4_msg(sb, KERN_WARNING, "CONFIG_LBDAF not enabled");
2843		ret = -EFBIG;
2844		goto failed_mount;
2845	}
2846
2847	if (EXT4_BLOCKS_PER_GROUP(sb) == 0)
2848		goto cantfind_ext4;
2849
2850	/* check blocks count against device size */
2851	blocks_count = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits;
2852	if (blocks_count && ext4_blocks_count(es) > blocks_count) {
2853		ext4_msg(sb, KERN_WARNING, "bad geometry: block count %llu "
2854		       "exceeds size of device (%llu blocks)",
2855		       ext4_blocks_count(es), blocks_count);
2856		goto failed_mount;
2857	}
2858
2859	/*
2860	 * It makes no sense for the first data block to be beyond the end
2861	 * of the filesystem.
2862	 */
2863	if (le32_to_cpu(es->s_first_data_block) >= ext4_blocks_count(es)) {
2864                ext4_msg(sb, KERN_WARNING, "bad geometry: first data"
2865			 "block %u is beyond end of filesystem (%llu)",
2866			 le32_to_cpu(es->s_first_data_block),
2867			 ext4_blocks_count(es));
2868		goto failed_mount;
2869	}
2870	blocks_count = (ext4_blocks_count(es) -
2871			le32_to_cpu(es->s_first_data_block) +
2872			EXT4_BLOCKS_PER_GROUP(sb) - 1);
2873	do_div(blocks_count, EXT4_BLOCKS_PER_GROUP(sb));
2874	if (blocks_count > ((uint64_t)1<<32) - EXT4_DESC_PER_BLOCK(sb)) {
2875		ext4_msg(sb, KERN_WARNING, "groups count too large: %u "
2876		       "(block count %llu, first data block %u, "
2877		       "blocks per group %lu)", sbi->s_groups_count,
2878		       ext4_blocks_count(es),
2879		       le32_to_cpu(es->s_first_data_block),
2880		       EXT4_BLOCKS_PER_GROUP(sb));
2881		goto failed_mount;
2882	}
2883	sbi->s_groups_count = blocks_count;
2884	sbi->s_blockfile_groups = min_t(ext4_group_t, sbi->s_groups_count,
2885			(EXT4_MAX_BLOCK_FILE_PHYS / EXT4_BLOCKS_PER_GROUP(sb)));
2886	db_count = (sbi->s_groups_count + EXT4_DESC_PER_BLOCK(sb) - 1) /
2887		   EXT4_DESC_PER_BLOCK(sb);
2888	sbi->s_group_desc = kmalloc(db_count * sizeof(struct buffer_head *),
2889				    GFP_KERNEL);
2890	if (sbi->s_group_desc == NULL) {
2891		ext4_msg(sb, KERN_ERR, "not enough memory");
2892		goto failed_mount;
2893	}
2894
2895#ifdef CONFIG_PROC_FS
2896	if (ext4_proc_root)
2897		sbi->s_proc = proc_mkdir(sb->s_id, ext4_proc_root);
2898#endif
2899
2900	bgl_lock_init(sbi->s_blockgroup_lock);
2901
2902	for (i = 0; i < db_count; i++) {
2903		block = descriptor_loc(sb, logical_sb_block, i);
2904		sbi->s_group_desc[i] = sb_bread(sb, block);
2905		if (!sbi->s_group_desc[i]) {
2906			ext4_msg(sb, KERN_ERR,
2907			       "can't read group descriptor %d", i);
2908			db_count = i;
2909			goto failed_mount2;
2910		}
2911	}
2912	if (!ext4_check_descriptors(sb)) {
2913		ext4_msg(sb, KERN_ERR, "group descriptors corrupted!");
2914		goto failed_mount2;
2915	}
2916	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG))
2917		if (!ext4_fill_flex_info(sb)) {
2918			ext4_msg(sb, KERN_ERR,
2919			       "unable to initialize "
2920			       "flex_bg meta info!");
2921			goto failed_mount2;
2922		}
2923
2924	sbi->s_gdb_count = db_count;
2925	get_random_bytes(&sbi->s_next_generation, sizeof(u32));
2926	spin_lock_init(&sbi->s_next_gen_lock);
2927
2928	sbi->s_stripe = ext4_get_stripe_size(sbi);
2929	sbi->s_max_writeback_mb_bump = 128;
2930
2931	/*
2932	 * set up enough so that it can read an inode
2933	 */
2934	if (!test_opt(sb, NOLOAD) &&
2935	    EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL))
2936		sb->s_op = &ext4_sops;
2937	else
2938		sb->s_op = &ext4_nojournal_sops;
2939	sb->s_export_op = &ext4_export_ops;
2940	sb->s_xattr = ext4_xattr_handlers;
2941#ifdef CONFIG_QUOTA
2942	sb->s_qcop = &ext4_qctl_operations;
2943	sb->dq_op = &ext4_quota_operations;
2944#endif
2945	INIT_LIST_HEAD(&sbi->s_orphan); /* unlinked but open files */
2946	mutex_init(&sbi->s_orphan_lock);
2947	mutex_init(&sbi->s_resize_lock);
2948
2949	sb->s_root = NULL;
2950
2951	needs_recovery = (es->s_last_orphan != 0 ||
2952			  EXT4_HAS_INCOMPAT_FEATURE(sb,
2953				    EXT4_FEATURE_INCOMPAT_RECOVER));
2954
2955	/*
2956	 * The first inode we look at is the journal inode.  Don't try
2957	 * root first: it may be modified in the journal!
2958	 */
2959	if (!test_opt(sb, NOLOAD) &&
2960	    EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) {
2961		if (ext4_load_journal(sb, es, journal_devnum))
2962			goto failed_mount3;
2963	} else if (test_opt(sb, NOLOAD) && !(sb->s_flags & MS_RDONLY) &&
2964	      EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) {
2965		ext4_msg(sb, KERN_ERR, "required journal recovery "
2966		       "suppressed and not mounted read-only");
2967		goto failed_mount_wq;
2968	} else {
2969		clear_opt(sbi->s_mount_opt, DATA_FLAGS);
2970		set_opt(sbi->s_mount_opt, WRITEBACK_DATA);
2971		sbi->s_journal = NULL;
2972		needs_recovery = 0;
2973		goto no_journal;
2974	}
2975
2976	if (ext4_blocks_count(es) > 0xffffffffULL &&
2977	    !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
2978				       JBD2_FEATURE_INCOMPAT_64BIT)) {
2979		ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
2980		goto failed_mount_wq;
2981	}
2982
2983	if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
2984		jbd2_journal_set_features(sbi->s_journal,
2985				JBD2_FEATURE_COMPAT_CHECKSUM, 0,
2986				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
2987	} else if (test_opt(sb, JOURNAL_CHECKSUM)) {
2988		jbd2_journal_set_features(sbi->s_journal,
2989				JBD2_FEATURE_COMPAT_CHECKSUM, 0, 0);
2990		jbd2_journal_clear_features(sbi->s_journal, 0, 0,
2991				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
2992	} else {
2993		jbd2_journal_clear_features(sbi->s_journal,
2994				JBD2_FEATURE_COMPAT_CHECKSUM, 0,
2995				JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
2996	}
2997
2998	/* We have now updated the journal if required, so we can
2999	 * validate the data journaling mode. */
3000	switch (test_opt(sb, DATA_FLAGS)) {
3001	case 0:
3002		/* No mode set, assume a default based on the journal
3003		 * capabilities: ORDERED_DATA if the journal can
3004		 * cope, else JOURNAL_DATA
3005		 */
3006		if (jbd2_journal_check_available_features
3007		    (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE))
3008			set_opt(sbi->s_mount_opt, ORDERED_DATA);
3009		else
3010			set_opt(sbi->s_mount_opt, JOURNAL_DATA);
3011		break;
3012
3013	case EXT4_MOUNT_ORDERED_DATA:
3014	case EXT4_MOUNT_WRITEBACK_DATA:
3015		if (!jbd2_journal_check_available_features
3016		    (sbi->s_journal, 0, 0, JBD2_FEATURE_INCOMPAT_REVOKE)) {
3017			ext4_msg(sb, KERN_ERR, "Journal does not support "
3018			       "requested data journaling mode");
3019			goto failed_mount_wq;
3020		}
3021	default:
3022		break;
3023	}
3024	set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
3025
3026no_journal:
3027	err = percpu_counter_init(&sbi->s_freeblocks_counter,
3028				  ext4_count_free_blocks(sb));
3029	if (!err)
3030		err = percpu_counter_init(&sbi->s_freeinodes_counter,
3031					  ext4_count_free_inodes(sb));
3032	if (!err)
3033		err = percpu_counter_init(&sbi->s_dirs_counter,
3034					  ext4_count_dirs(sb));
3035	if (!err)
3036		err = percpu_counter_init(&sbi->s_dirtyblocks_counter, 0);
3037	if (err) {
3038		ext4_msg(sb, KERN_ERR, "insufficient memory");
3039		goto failed_mount_wq;
3040	}
3041
3042	EXT4_SB(sb)->dio_unwritten_wq = create_workqueue("ext4-dio-unwritten");
3043	if (!EXT4_SB(sb)->dio_unwritten_wq) {
3044		printk(KERN_ERR "EXT4-fs: failed to create DIO workqueue\n");
3045		goto failed_mount_wq;
3046	}
3047
3048	/*
3049	 * The jbd2_journal_load will have done any necessary log recovery,
3050	 * so we can safely mount the rest of the filesystem now.
3051	 */
3052
3053	root = ext4_iget(sb, EXT4_ROOT_INO);
3054	if (IS_ERR(root)) {
3055		ext4_msg(sb, KERN_ERR, "get root inode failed");
3056		ret = PTR_ERR(root);
3057		goto failed_mount4;
3058	}
3059	if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
3060		iput(root);
3061		ext4_msg(sb, KERN_ERR, "corrupt root inode, run e2fsck");
3062		goto failed_mount4;
3063	}
3064	sb->s_root = d_alloc_root(root);
3065	if (!sb->s_root) {
3066		ext4_msg(sb, KERN_ERR, "get root dentry failed");
3067		iput(root);
3068		ret = -ENOMEM;
3069		goto failed_mount4;
3070	}
3071
3072	ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY);
3073
3074	/* determine the minimum size of new large inodes, if present */
3075	if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
3076		sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
3077						     EXT4_GOOD_OLD_INODE_SIZE;
3078		if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3079				       EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE)) {
3080			if (sbi->s_want_extra_isize <
3081			    le16_to_cpu(es->s_want_extra_isize))
3082				sbi->s_want_extra_isize =
3083					le16_to_cpu(es->s_want_extra_isize);
3084			if (sbi->s_want_extra_isize <
3085			    le16_to_cpu(es->s_min_extra_isize))
3086				sbi->s_want_extra_isize =
3087					le16_to_cpu(es->s_min_extra_isize);
3088		}
3089	}
3090	/* Check if enough inode space is available */
3091	if (EXT4_GOOD_OLD_INODE_SIZE + sbi->s_want_extra_isize >
3092							sbi->s_inode_size) {
3093		sbi->s_want_extra_isize = sizeof(struct ext4_inode) -
3094						       EXT4_GOOD_OLD_INODE_SIZE;
3095		ext4_msg(sb, KERN_INFO, "required extra inode space not"
3096			 "available");
3097	}
3098
3099	if (test_opt(sb, DELALLOC) &&
3100	    (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)) {
3101		ext4_msg(sb, KERN_WARNING, "Ignoring delalloc option - "
3102			 "requested data journaling mode");
3103		clear_opt(sbi->s_mount_opt, DELALLOC);
3104	}
3105	if (test_opt(sb, DIOREAD_NOLOCK)) {
3106		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA) {
3107			ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock "
3108				"option - requested data journaling mode");
3109			clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
3110		}
3111		if (sb->s_blocksize < PAGE_SIZE) {
3112			ext4_msg(sb, KERN_WARNING, "Ignoring dioread_nolock "
3113				"option - block size is too small");
3114			clear_opt(sbi->s_mount_opt, DIOREAD_NOLOCK);
3115		}
3116	}
3117
3118	err = ext4_setup_system_zone(sb);
3119	if (err) {
3120		ext4_msg(sb, KERN_ERR, "failed to initialize system "
3121			 "zone (%d)", err);
3122		goto failed_mount4;
3123	}
3124
3125	ext4_ext_init(sb);
3126	err = ext4_mb_init(sb, needs_recovery);
3127	if (err) {
3128		ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
3129			 err);
3130		goto failed_mount4;
3131	}
3132
3133	sbi->s_kobj.kset = ext4_kset;
3134	init_completion(&sbi->s_kobj_unregister);
3135	err = kobject_init_and_add(&sbi->s_kobj, &ext4_ktype, NULL,
3136				   "%s", sb->s_id);
3137	if (err) {
3138		ext4_mb_release(sb);
3139		ext4_ext_release(sb);
3140		goto failed_mount4;
3141	};
3142
3143	EXT4_SB(sb)->s_mount_state |= EXT4_ORPHAN_FS;
3144	ext4_orphan_cleanup(sb, es);
3145	EXT4_SB(sb)->s_mount_state &= ~EXT4_ORPHAN_FS;
3146	if (needs_recovery) {
3147		ext4_msg(sb, KERN_INFO, "recovery complete");
3148		ext4_mark_recovery_complete(sb, es);
3149	}
3150	if (EXT4_SB(sb)->s_journal) {
3151		if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_JOURNAL_DATA)
3152			descr = " journalled data mode";
3153		else if (test_opt(sb, DATA_FLAGS) == EXT4_MOUNT_ORDERED_DATA)
3154			descr = " ordered data mode";
3155		else
3156			descr = " writeback data mode";
3157	} else
3158		descr = "out journal";
3159
3160	ext4_msg(sb, KERN_INFO, "mounted filesystem with%s. "
3161		 "Opts: %s%s%s", descr, sbi->s_es->s_mount_opts,
3162		 *sbi->s_es->s_mount_opts ? "; " : "", orig_data);
3163
3164	init_timer(&sbi->s_err_report);
3165	sbi->s_err_report.function = print_daily_error_info;
3166	sbi->s_err_report.data = (unsigned long) sb;
3167	if (es->s_error_count)
3168		mod_timer(&sbi->s_err_report, jiffies + 300*HZ); /* 5 minutes */
3169
3170	lock_kernel();
3171	kfree(orig_data);
3172	return 0;
3173
3174cantfind_ext4:
3175	if (!silent)
3176		ext4_msg(sb, KERN_ERR, "VFS: Can't find ext4 filesystem");
3177	goto failed_mount;
3178
3179failed_mount4:
3180	ext4_msg(sb, KERN_ERR, "mount failed");
3181	destroy_workqueue(EXT4_SB(sb)->dio_unwritten_wq);
3182failed_mount_wq:
3183	ext4_release_system_zone(sb);
3184	if (sbi->s_journal) {
3185		jbd2_journal_destroy(sbi->s_journal);
3186		sbi->s_journal = NULL;
3187	}
3188	percpu_counter_destroy(&sbi->s_freeblocks_counter);
3189	percpu_counter_destroy(&sbi->s_freeinodes_counter);
3190	percpu_counter_destroy(&sbi->s_dirs_counter);
3191	percpu_counter_destroy(&sbi->s_dirtyblocks_counter);
3192failed_mount3:
3193	if (sbi->s_flex_groups) {
3194		if (is_vmalloc_addr(sbi->s_flex_groups))
3195			vfree(sbi->s_flex_groups);
3196		else
3197			kfree(sbi->s_flex_groups);
3198	}
3199failed_mount2:
3200	for (i = 0; i < db_count; i++)
3201		brelse(sbi->s_group_desc[i]);
3202	kfree(sbi->s_group_desc);
3203failed_mount:
3204	if (sbi->s_proc) {
3205		remove_proc_entry(sb->s_id, ext4_proc_root);
3206	}
3207#ifdef CONFIG_QUOTA
3208	for (i = 0; i < MAXQUOTAS; i++)
3209		kfree(sbi->s_qf_names[i]);
3210#endif
3211	ext4_blkdev_remove(sbi);
3212	brelse(bh);
3213out_fail:
3214	sb->s_fs_info = NULL;
3215	kfree(sbi->s_blockgroup_lock);
3216	kfree(sbi);
3217	lock_kernel();
3218out_free_orig:
3219	kfree(orig_data);
3220	return ret;
3221}
3222
3223/*
3224 * Setup any per-fs journal parameters now.  We'll do this both on
3225 * initial mount, once the journal has been initialised but before we've
3226 * done any recovery; and again on any subsequent remount.
3227 */
3228static void ext4_init_journal_params(struct super_block *sb, journal_t *journal)
3229{
3230	struct ext4_sb_info *sbi = EXT4_SB(sb);
3231
3232	journal->j_commit_interval = sbi->s_commit_interval;
3233	journal->j_min_batch_time = sbi->s_min_batch_time;
3234	journal->j_max_batch_time = sbi->s_max_batch_time;
3235
3236	write_lock(&journal->j_state_lock);
3237	if (test_opt(sb, BARRIER))
3238		journal->j_flags |= JBD2_BARRIER;
3239	else
3240		journal->j_flags &= ~JBD2_BARRIER;
3241	if (test_opt(sb, DATA_ERR_ABORT))
3242		journal->j_flags |= JBD2_ABORT_ON_SYNCDATA_ERR;
3243	else
3244		journal->j_flags &= ~JBD2_ABORT_ON_SYNCDATA_ERR;
3245	write_unlock(&journal->j_state_lock);
3246}
3247
3248static journal_t *ext4_get_journal(struct super_block *sb,
3249				   unsigned int journal_inum)
3250{
3251	struct inode *journal_inode;
3252	journal_t *journal;
3253
3254	BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL));
3255
3256	/* First, test for the existence of a valid inode on disk.  Bad
3257	 * things happen if we iget() an unused inode, as the subsequent
3258	 * iput() will try to delete it. */
3259
3260	journal_inode = ext4_iget(sb, journal_inum);
3261	if (IS_ERR(journal_inode)) {
3262		ext4_msg(sb, KERN_ERR, "no journal found");
3263		return NULL;
3264	}
3265	if (!journal_inode->i_nlink) {
3266		make_bad_inode(journal_inode);
3267		iput(journal_inode);
3268		ext4_msg(sb, KERN_ERR, "journal inode is deleted");
3269		return NULL;
3270	}
3271
3272	jbd_debug(2, "Journal inode found at %p: %lld bytes\n",
3273		  journal_inode, journal_inode->i_size);
3274	if (!S_ISREG(journal_inode->i_mode)) {
3275		ext4_msg(sb, KERN_ERR, "invalid journal inode");
3276		iput(journal_inode);
3277		return NULL;
3278	}
3279
3280	journal = jbd2_journal_init_inode(journal_inode);
3281	if (!journal) {
3282		ext4_msg(sb, KERN_ERR, "Could not load journal inode");
3283		iput(journal_inode);
3284		return NULL;
3285	}
3286	journal->j_private = sb;
3287	ext4_init_journal_params(sb, journal);
3288	return journal;
3289}
3290
3291static journal_t *ext4_get_dev_journal(struct super_block *sb,
3292				       dev_t j_dev)
3293{
3294	struct buffer_head *bh;
3295	journal_t *journal;
3296	ext4_fsblk_t start;
3297	ext4_fsblk_t len;
3298	int hblock, blocksize;
3299	ext4_fsblk_t sb_block;
3300	unsigned long offset;
3301	struct ext4_super_block *es;
3302	struct block_device *bdev;
3303
3304	BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL));
3305
3306	bdev = ext4_blkdev_get(j_dev, sb);
3307	if (bdev == NULL)
3308		return NULL;
3309
3310	if (bd_claim(bdev, sb)) {
3311		ext4_msg(sb, KERN_ERR,
3312			"failed to claim external journal device");
3313		blkdev_put(bdev, FMODE_READ|FMODE_WRITE);
3314		return NULL;
3315	}
3316
3317	blocksize = sb->s_blocksize;
3318	hblock = bdev_logical_block_size(bdev);
3319	if (blocksize < hblock) {
3320		ext4_msg(sb, KERN_ERR,
3321			"blocksize too small for journal device");
3322		goto out_bdev;
3323	}
3324
3325	sb_block = EXT4_MIN_BLOCK_SIZE / blocksize;
3326	offset = EXT4_MIN_BLOCK_SIZE % blocksize;
3327	set_blocksize(bdev, blocksize);
3328	if (!(bh = __bread(bdev, sb_block, blocksize))) {
3329		ext4_msg(sb, KERN_ERR, "couldn't read superblock of "
3330		       "external journal");
3331		goto out_bdev;
3332	}
3333
3334	es = (struct ext4_super_block *) (((char *)bh->b_data) + offset);
3335	if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
3336	    !(le32_to_cpu(es->s_feature_incompat) &
3337	      EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
3338		ext4_msg(sb, KERN_ERR, "external journal has "
3339					"bad superblock");
3340		brelse(bh);
3341		goto out_bdev;
3342	}
3343
3344	if (memcmp(EXT4_SB(sb)->s_es->s_journal_uuid, es->s_uuid, 16)) {
3345		ext4_msg(sb, KERN_ERR, "journal UUID does not match");
3346		brelse(bh);
3347		goto out_bdev;
3348	}
3349
3350	len = ext4_blocks_count(es);
3351	start = sb_block + 1;
3352	brelse(bh);	/* we're done with the superblock */
3353
3354	journal = jbd2_journal_init_dev(bdev, sb->s_bdev,
3355					start, len, blocksize);
3356	if (!journal) {
3357		ext4_msg(sb, KERN_ERR, "failed to create device journal");
3358		goto out_bdev;
3359	}
3360	journal->j_private = sb;
3361	ll_rw_block(READ, 1, &journal->j_sb_buffer);
3362	wait_on_buffer(journal->j_sb_buffer);
3363	if (!buffer_uptodate(journal->j_sb_buffer)) {
3364		ext4_msg(sb, KERN_ERR, "I/O error on journal device");
3365		goto out_journal;
3366	}
3367	if (be32_to_cpu(journal->j_superblock->s_nr_users) != 1) {
3368		ext4_msg(sb, KERN_ERR, "External journal has more than one "
3369					"user (unsupported) - %d",
3370			be32_to_cpu(journal->j_superblock->s_nr_users));
3371		goto out_journal;
3372	}
3373	EXT4_SB(sb)->journal_bdev = bdev;
3374	ext4_init_journal_params(sb, journal);
3375	return journal;
3376
3377out_journal:
3378	jbd2_journal_destroy(journal);
3379out_bdev:
3380	ext4_blkdev_put(bdev);
3381	return NULL;
3382}
3383
3384static int ext4_load_journal(struct super_block *sb,
3385			     struct ext4_super_block *es,
3386			     unsigned long journal_devnum)
3387{
3388	journal_t *journal;
3389	unsigned int journal_inum = le32_to_cpu(es->s_journal_inum);
3390	dev_t journal_dev;
3391	int err = 0;
3392	int really_read_only;
3393
3394	BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL));
3395
3396	if (journal_devnum &&
3397	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
3398		ext4_msg(sb, KERN_INFO, "external journal device major/minor "
3399			"numbers have changed");
3400		journal_dev = new_decode_dev(journal_devnum);
3401	} else
3402		journal_dev = new_decode_dev(le32_to_cpu(es->s_journal_dev));
3403
3404	really_read_only = bdev_read_only(sb->s_bdev);
3405
3406	/*
3407	 * Are we loading a blank journal or performing recovery after a
3408	 * crash?  For recovery, we need to check in advance whether we
3409	 * can get read-write access to the device.
3410	 */
3411	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER)) {
3412		if (sb->s_flags & MS_RDONLY) {
3413			ext4_msg(sb, KERN_INFO, "INFO: recovery "
3414					"required on readonly filesystem");
3415			if (really_read_only) {
3416				ext4_msg(sb, KERN_ERR, "write access "
3417					"unavailable, cannot proceed");
3418				return -EROFS;
3419			}
3420			ext4_msg(sb, KERN_INFO, "write access will "
3421			       "be enabled during recovery");
3422		}
3423	}
3424
3425	if (journal_inum && journal_dev) {
3426		ext4_msg(sb, KERN_ERR, "filesystem has both journal "
3427		       "and inode journals!");
3428		return -EINVAL;
3429	}
3430
3431	if (journal_inum) {
3432		if (!(journal = ext4_get_journal(sb, journal_inum)))
3433			return -EINVAL;
3434	} else {
3435		if (!(journal = ext4_get_dev_journal(sb, journal_dev)))
3436			return -EINVAL;
3437	}
3438
3439	if (!(journal->j_flags & JBD2_BARRIER))
3440		ext4_msg(sb, KERN_INFO, "barriers disabled");
3441
3442	if (!really_read_only && test_opt(sb, UPDATE_JOURNAL)) {
3443		err = jbd2_journal_update_format(journal);
3444		if (err)  {
3445			ext4_msg(sb, KERN_ERR, "error updating journal");
3446			jbd2_journal_destroy(journal);
3447			return err;
3448		}
3449	}
3450
3451	if (!EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER))
3452		err = jbd2_journal_wipe(journal, !really_read_only);
3453	if (!err) {
3454		char *save = kmalloc(EXT4_S_ERR_LEN, GFP_KERNEL);
3455		if (save)
3456			memcpy(save, ((char *) es) +
3457			       EXT4_S_ERR_START, EXT4_S_ERR_LEN);
3458		err = jbd2_journal_load(journal);
3459		if (save)
3460			memcpy(((char *) es) + EXT4_S_ERR_START,
3461			       save, EXT4_S_ERR_LEN);
3462		kfree(save);
3463	}
3464
3465	if (err) {
3466		ext4_msg(sb, KERN_ERR, "error loading journal");
3467		jbd2_journal_destroy(journal);
3468		return err;
3469	}
3470
3471	EXT4_SB(sb)->s_journal = journal;
3472	ext4_clear_journal_err(sb, es);
3473
3474	if (journal_devnum &&
3475	    journal_devnum != le32_to_cpu(es->s_journal_dev)) {
3476		es->s_journal_dev = cpu_to_le32(journal_devnum);
3477
3478		/* Make sure we flush the recovery flag to disk. */
3479		ext4_commit_super(sb, 1);
3480	}
3481
3482	return 0;
3483}
3484
3485static int ext4_commit_super(struct super_block *sb, int sync)
3486{
3487	struct ext4_super_block *es = EXT4_SB(sb)->s_es;
3488	struct buffer_head *sbh = EXT4_SB(sb)->s_sbh;
3489	int error = 0;
3490
3491	if (!sbh)
3492		return error;
3493	if (buffer_write_io_error(sbh)) {
3494		/*
3495		 * Oh, dear.  A previous attempt to write the
3496		 * superblock failed.  This could happen because the
3497		 * USB device was yanked out.  Or it could happen to
3498		 * be a transient write error and maybe the block will
3499		 * be remapped.  Nothing we can do but to retry the
3500		 * write and hope for the best.
3501		 */
3502		ext4_msg(sb, KERN_ERR, "previous I/O error to "
3503		       "superblock detected");
3504		clear_buffer_write_io_error(sbh);
3505		set_buffer_uptodate(sbh);
3506	}
3507	/*
3508	 * If the file system is mounted read-only, don't update the
3509	 * superblock write time.  This avoids updating the superblock
3510	 * write time when we are mounting the root file system
3511	 * read/only but we need to replay the journal; at that point,
3512	 * for people who are east of GMT and who make their clock
3513	 * tick in localtime for Windows bug-for-bug compatibility,
3514	 * the clock is set in the future, and this will cause e2fsck
3515	 * to complain and force a full file system check.
3516	 */
3517	if (!(sb->s_flags & MS_RDONLY))
3518		es->s_wtime = cpu_to_le32(get_seconds());
3519	if (sb->s_bdev->bd_part)
3520		es->s_kbytes_written =
3521			cpu_to_le64(EXT4_SB(sb)->s_kbytes_written +
3522			    ((part_stat_read(sb->s_bdev->bd_part, sectors[1]) -
3523			      EXT4_SB(sb)->s_sectors_written_start) >> 1));
3524	else
3525		es->s_kbytes_written =
3526			cpu_to_le64(EXT4_SB(sb)->s_kbytes_written);
3527	ext4_free_blocks_count_set(es, percpu_counter_sum_positive(
3528					&EXT4_SB(sb)->s_freeblocks_counter));
3529	es->s_free_inodes_count = cpu_to_le32(percpu_counter_sum_positive(
3530					&EXT4_SB(sb)->s_freeinodes_counter));
3531	sb->s_dirt = 0;
3532	BUFFER_TRACE(sbh, "marking dirty");
3533	mark_buffer_dirty(sbh);
3534	if (sync) {
3535		error = sync_dirty_buffer(sbh);
3536		if (error)
3537			return error;
3538
3539		error = buffer_write_io_error(sbh);
3540		if (error) {
3541			ext4_msg(sb, KERN_ERR, "I/O error while writing "
3542			       "superblock");
3543			clear_buffer_write_io_error(sbh);
3544			set_buffer_uptodate(sbh);
3545		}
3546	}
3547	return error;
3548}
3549
3550/*
3551 * Have we just finished recovery?  If so, and if we are mounting (or
3552 * remounting) the filesystem readonly, then we will end up with a
3553 * consistent fs on disk.  Record that fact.
3554 */
3555static void ext4_mark_recovery_complete(struct super_block *sb,
3556					struct ext4_super_block *es)
3557{
3558	journal_t *journal = EXT4_SB(sb)->s_journal;
3559
3560	if (!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) {
3561		BUG_ON(journal != NULL);
3562		return;
3563	}
3564	jbd2_journal_lock_updates(journal);
3565	if (jbd2_journal_flush(journal) < 0)
3566		goto out;
3567
3568	if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER) &&
3569	    sb->s_flags & MS_RDONLY) {
3570		EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
3571		ext4_commit_super(sb, 1);
3572	}
3573
3574out:
3575	jbd2_journal_unlock_updates(journal);
3576}
3577
3578/*
3579 * If we are mounting (or read-write remounting) a filesystem whose journal
3580 * has recorded an error from a previous lifetime, move that error to the
3581 * main filesystem now.
3582 */
3583static void ext4_clear_journal_err(struct super_block *sb,
3584				   struct ext4_super_block *es)
3585{
3586	journal_t *journal;
3587	int j_errno;
3588	const char *errstr;
3589
3590	BUG_ON(!EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL));
3591
3592	journal = EXT4_SB(sb)->s_journal;
3593
3594	/*
3595	 * Now check for any error status which may have been recorded in the
3596	 * journal by a prior ext4_error() or ext4_abort()
3597	 */
3598
3599	j_errno = jbd2_journal_errno(journal);
3600	if (j_errno) {
3601		char nbuf[16];
3602
3603		errstr = ext4_decode_error(sb, j_errno, nbuf);
3604		ext4_warning(sb, "Filesystem error recorded "
3605			     "from previous mount: %s", errstr);
3606		ext4_warning(sb, "Marking fs in need of filesystem check.");
3607
3608		EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
3609		es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
3610		ext4_commit_super(sb, 1);
3611
3612		jbd2_journal_clear_err(journal);
3613	}
3614}
3615
3616/*
3617 * Force the running and committing transactions to commit,
3618 * and wait on the commit.
3619 */
3620int ext4_force_commit(struct super_block *sb)
3621{
3622	journal_t *journal;
3623	int ret = 0;
3624
3625	if (sb->s_flags & MS_RDONLY)
3626		return 0;
3627
3628	journal = EXT4_SB(sb)->s_journal;
3629	if (journal) {
3630		vfs_check_frozen(sb, SB_FREEZE_TRANS);
3631		ret = ext4_journal_force_commit(journal);
3632	}
3633
3634	return ret;
3635}
3636
3637static void ext4_write_super(struct super_block *sb)
3638{
3639	lock_super(sb);
3640	ext4_commit_super(sb, 1);
3641	unlock_super(sb);
3642}
3643
3644static int ext4_sync_fs(struct super_block *sb, int wait)
3645{
3646	int ret = 0;
3647	tid_t target;
3648	struct ext4_sb_info *sbi = EXT4_SB(sb);
3649
3650	trace_ext4_sync_fs(sb, wait);
3651	flush_workqueue(sbi->dio_unwritten_wq);
3652	if (jbd2_journal_start_commit(sbi->s_journal, &target)) {
3653		if (wait)
3654			jbd2_log_wait_commit(sbi->s_journal, target);
3655	}
3656	return ret;
3657}
3658
3659/*
3660 * LVM calls this function before a (read-only) snapshot is created.  This
3661 * gives us a chance to flush the journal completely and mark the fs clean.
3662 */
3663static int ext4_freeze(struct super_block *sb)
3664{
3665	int error = 0;
3666	journal_t *journal;
3667
3668	if (sb->s_flags & MS_RDONLY)
3669		return 0;
3670
3671	journal = EXT4_SB(sb)->s_journal;
3672
3673	/* Now we set up the journal barrier. */
3674	jbd2_journal_lock_updates(journal);
3675
3676	/*
3677	 * Don't clear the needs_recovery flag if we failed to flush
3678	 * the journal.
3679	 */
3680	error = jbd2_journal_flush(journal);
3681	if (error < 0)
3682		goto out;
3683
3684	/* Journal blocked and flushed, clear needs_recovery flag. */
3685	EXT4_CLEAR_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
3686	error = ext4_commit_super(sb, 1);
3687out:
3688	/* we rely on s_frozen to stop further updates */
3689	jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
3690	return error;
3691}
3692
3693/*
3694 * Called by LVM after the snapshot is done.  We need to reset the RECOVER
3695 * flag here, even though the filesystem is not technically dirty yet.
3696 */
3697static int ext4_unfreeze(struct super_block *sb)
3698{
3699	if (sb->s_flags & MS_RDONLY)
3700		return 0;
3701
3702	lock_super(sb);
3703	/* Reset the needs_recovery flag before the fs is unlocked. */
3704	EXT4_SET_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_RECOVER);
3705	ext4_commit_super(sb, 1);
3706	unlock_super(sb);
3707	return 0;
3708}
3709
3710static int ext4_remount(struct super_block *sb, int *flags, char *data)
3711{
3712	struct ext4_super_block *es;
3713	struct ext4_sb_info *sbi = EXT4_SB(sb);
3714	ext4_fsblk_t n_blocks_count = 0;
3715	unsigned long old_sb_flags;
3716	struct ext4_mount_options old_opts;
3717	int enable_quota = 0;
3718	ext4_group_t g;
3719	unsigned int journal_ioprio = DEFAULT_JOURNAL_IOPRIO;
3720	int err;
3721#ifdef CONFIG_QUOTA
3722	int i;
3723#endif
3724	char *orig_data = kstrdup(data, GFP_KERNEL);
3725
3726	lock_kernel();
3727
3728	/* Store the original options */
3729	lock_super(sb);
3730	old_sb_flags = sb->s_flags;
3731	old_opts.s_mount_opt = sbi->s_mount_opt;
3732	old_opts.s_resuid = sbi->s_resuid;
3733	old_opts.s_resgid = sbi->s_resgid;
3734	old_opts.s_commit_interval = sbi->s_commit_interval;
3735	old_opts.s_min_batch_time = sbi->s_min_batch_time;
3736	old_opts.s_max_batch_time = sbi->s_max_batch_time;
3737#ifdef CONFIG_QUOTA
3738	old_opts.s_jquota_fmt = sbi->s_jquota_fmt;
3739	for (i = 0; i < MAXQUOTAS; i++)
3740		old_opts.s_qf_names[i] = sbi->s_qf_names[i];
3741#endif
3742	if (sbi->s_journal && sbi->s_journal->j_task->io_context)
3743		journal_ioprio = sbi->s_journal->j_task->io_context->ioprio;
3744
3745	/*
3746	 * Allow the "check" option to be passed as a remount option.
3747	 */
3748	if (!parse_options(data, sb, NULL, &journal_ioprio,
3749			   &n_blocks_count, 1)) {
3750		err = -EINVAL;
3751		goto restore_opts;
3752	}
3753
3754	if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED)
3755		ext4_abort(sb, "Abort forced by user");
3756
3757	sb->s_flags = (sb->s_flags & ~MS_POSIXACL) |
3758		(test_opt(sb, POSIX_ACL) ? MS_POSIXACL : 0);
3759
3760	es = sbi->s_es;
3761
3762	if (sbi->s_journal) {
3763		ext4_init_journal_params(sb, sbi->s_journal);
3764		set_task_ioprio(sbi->s_journal->j_task, journal_ioprio);
3765	}
3766
3767	if ((*flags & MS_RDONLY) != (sb->s_flags & MS_RDONLY) ||
3768		n_blocks_count > ext4_blocks_count(es)) {
3769		if (sbi->s_mount_flags & EXT4_MF_FS_ABORTED) {
3770			err = -EROFS;
3771			goto restore_opts;
3772		}
3773
3774		if (*flags & MS_RDONLY) {
3775			err = dquot_suspend(sb, -1);
3776			if (err < 0)
3777				goto restore_opts;
3778
3779			/*
3780			 * First of all, the unconditional stuff we have to do
3781			 * to disable replay of the journal when we next remount
3782			 */
3783			sb->s_flags |= MS_RDONLY;
3784
3785			/*
3786			 * OK, test if we are remounting a valid rw partition
3787			 * readonly, and if so set the rdonly flag and then
3788			 * mark the partition as valid again.
3789			 */
3790			if (!(es->s_state & cpu_to_le16(EXT4_VALID_FS)) &&
3791			    (sbi->s_mount_state & EXT4_VALID_FS))
3792				es->s_state = cpu_to_le16(sbi->s_mount_state);
3793
3794			if (sbi->s_journal)
3795				ext4_mark_recovery_complete(sb, es);
3796		} else {
3797			/* Make sure we can mount this feature set readwrite */
3798			if (!ext4_feature_set_ok(sb, 0)) {
3799				err = -EROFS;
3800				goto restore_opts;
3801			}
3802			/*
3803			 * Make sure the group descriptor checksums
3804			 * are sane.  If they aren't, refuse to remount r/w.
3805			 */
3806			for (g = 0; g < sbi->s_groups_count; g++) {
3807				struct ext4_group_desc *gdp =
3808					ext4_get_group_desc(sb, g, NULL);
3809
3810				if (!ext4_group_desc_csum_verify(sbi, g, gdp)) {
3811					ext4_msg(sb, KERN_ERR,
3812	       "ext4_remount: Checksum for group %u failed (%u!=%u)",
3813		g, le16_to_cpu(ext4_group_desc_csum(sbi, g, gdp)),
3814					       le16_to_cpu(gdp->bg_checksum));
3815					err = -EINVAL;
3816					goto restore_opts;
3817				}
3818			}
3819
3820			/*
3821			 * If we have an unprocessed orphan list hanging
3822			 * around from a previously readonly bdev mount,
3823			 * require a full umount/remount for now.
3824			 */
3825			if (es->s_last_orphan) {
3826				ext4_msg(sb, KERN_WARNING, "Couldn't "
3827				       "remount RDWR because of unprocessed "
3828				       "orphan inode list.  Please "
3829				       "umount/remount instead");
3830				err = -EINVAL;
3831				goto restore_opts;
3832			}
3833
3834			/*
3835			 * Mounting a RDONLY partition read-write, so reread
3836			 * and store the current valid flag.  (It may have
3837			 * been changed by e2fsck since we originally mounted
3838			 * the partition.)
3839			 */
3840			if (sbi->s_journal)
3841				ext4_clear_journal_err(sb, es);
3842			sbi->s_mount_state = le16_to_cpu(es->s_state);
3843			if ((err = ext4_group_extend(sb, es, n_blocks_count)))
3844				goto restore_opts;
3845			if (!ext4_setup_super(sb, es, 0))
3846				sb->s_flags &= ~MS_RDONLY;
3847			enable_quota = 1;
3848		}
3849	}
3850	ext4_setup_system_zone(sb);
3851	if (sbi->s_journal == NULL)
3852		ext4_commit_super(sb, 1);
3853
3854#ifdef CONFIG_QUOTA
3855	/* Release old quota file names */
3856	for (i = 0; i < MAXQUOTAS; i++)
3857		if (old_opts.s_qf_names[i] &&
3858		    old_opts.s_qf_names[i] != sbi->s_qf_names[i])
3859			kfree(old_opts.s_qf_names[i]);
3860#endif
3861	unlock_super(sb);
3862	unlock_kernel();
3863	if (enable_quota)
3864		dquot_resume(sb, -1);
3865
3866	ext4_msg(sb, KERN_INFO, "re-mounted. Opts: %s", orig_data);
3867	kfree(orig_data);
3868	return 0;
3869
3870restore_opts:
3871	sb->s_flags = old_sb_flags;
3872	sbi->s_mount_opt = old_opts.s_mount_opt;
3873	sbi->s_resuid = old_opts.s_resuid;
3874	sbi->s_resgid = old_opts.s_resgid;
3875	sbi->s_commit_interval = old_opts.s_commit_interval;
3876	sbi->s_min_batch_time = old_opts.s_min_batch_time;
3877	sbi->s_max_batch_time = old_opts.s_max_batch_time;
3878#ifdef CONFIG_QUOTA
3879	sbi->s_jquota_fmt = old_opts.s_jquota_fmt;
3880	for (i = 0; i < MAXQUOTAS; i++) {
3881		if (sbi->s_qf_names[i] &&
3882		    old_opts.s_qf_names[i] != sbi->s_qf_names[i])
3883			kfree(sbi->s_qf_names[i]);
3884		sbi->s_qf_names[i] = old_opts.s_qf_names[i];
3885	}
3886#endif
3887	unlock_super(sb);
3888	unlock_kernel();
3889	kfree(orig_data);
3890	return err;
3891}
3892
3893static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
3894{
3895	struct super_block *sb = dentry->d_sb;
3896	struct ext4_sb_info *sbi = EXT4_SB(sb);
3897	struct ext4_super_block *es = sbi->s_es;
3898	u64 fsid;
3899
3900	if (test_opt(sb, MINIX_DF)) {
3901		sbi->s_overhead_last = 0;
3902	} else if (sbi->s_blocks_last != ext4_blocks_count(es)) {
3903		ext4_group_t i, ngroups = ext4_get_groups_count(sb);
3904		ext4_fsblk_t overhead = 0;
3905
3906		/*
3907		 * Compute the overhead (FS structures).  This is constant
3908		 * for a given filesystem unless the number of block groups
3909		 * changes so we cache the previous value until it does.
3910		 */
3911
3912		/*
3913		 * All of the blocks before first_data_block are
3914		 * overhead
3915		 */
3916		overhead = le32_to_cpu(es->s_first_data_block);
3917
3918		/*
3919		 * Add the overhead attributed to the superblock and
3920		 * block group descriptors.  If the sparse superblocks
3921		 * feature is turned on, then not all groups have this.
3922		 */
3923		for (i = 0; i < ngroups; i++) {
3924			overhead += ext4_bg_has_super(sb, i) +
3925				ext4_bg_num_gdb(sb, i);
3926			cond_resched();
3927		}
3928
3929		/*
3930		 * Every block group has an inode bitmap, a block
3931		 * bitmap, and an inode table.
3932		 */
3933		overhead += ngroups * (2 + sbi->s_itb_per_group);
3934		sbi->s_overhead_last = overhead;
3935		smp_wmb();
3936		sbi->s_blocks_last = ext4_blocks_count(es);
3937	}
3938
3939	buf->f_type = EXT4_SUPER_MAGIC;
3940	buf->f_bsize = sb->s_blocksize;
3941	buf->f_blocks = ext4_blocks_count(es) - sbi->s_overhead_last;
3942	buf->f_bfree = percpu_counter_sum_positive(&sbi->s_freeblocks_counter) -
3943		       percpu_counter_sum_positive(&sbi->s_dirtyblocks_counter);
3944	buf->f_bavail = buf->f_bfree - ext4_r_blocks_count(es);
3945	if (buf->f_bfree < ext4_r_blocks_count(es))
3946		buf->f_bavail = 0;
3947	buf->f_files = le32_to_cpu(es->s_inodes_count);
3948	buf->f_ffree = percpu_counter_sum_positive(&sbi->s_freeinodes_counter);
3949	buf->f_namelen = EXT4_NAME_LEN;
3950	fsid = le64_to_cpup((void *)es->s_uuid) ^
3951	       le64_to_cpup((void *)es->s_uuid + sizeof(u64));
3952	buf->f_fsid.val[0] = fsid & 0xFFFFFFFFUL;
3953	buf->f_fsid.val[1] = (fsid >> 32) & 0xFFFFFFFFUL;
3954
3955	return 0;
3956}
3957
3958/* Helper function for writing quotas on sync - we need to start transaction
3959 * before quota file is locked for write. Otherwise the are possible deadlocks:
3960 * Process 1                         Process 2
3961 * ext4_create()                     quota_sync()
3962 *   jbd2_journal_start()                  write_dquot()
3963 *   dquot_initialize()                         down(dqio_mutex)
3964 *     down(dqio_mutex)                    jbd2_journal_start()
3965 *
3966 */
3967
3968#ifdef CONFIG_QUOTA
3969
3970static inline struct inode *dquot_to_inode(struct dquot *dquot)
3971{
3972	return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type];
3973}
3974
3975static int ext4_write_dquot(struct dquot *dquot)
3976{
3977	int ret, err;
3978	handle_t *handle;
3979	struct inode *inode;
3980
3981	inode = dquot_to_inode(dquot);
3982	handle = ext4_journal_start(inode,
3983				    EXT4_QUOTA_TRANS_BLOCKS(dquot->dq_sb));
3984	if (IS_ERR(handle))
3985		return PTR_ERR(handle);
3986	ret = dquot_commit(dquot);
3987	err = ext4_journal_stop(handle);
3988	if (!ret)
3989		ret = err;
3990	return ret;
3991}
3992
3993static int ext4_acquire_dquot(struct dquot *dquot)
3994{
3995	int ret, err;
3996	handle_t *handle;
3997
3998	handle = ext4_journal_start(dquot_to_inode(dquot),
3999				    EXT4_QUOTA_INIT_BLOCKS(dquot->dq_sb));
4000	if (IS_ERR(handle))
4001		return PTR_ERR(handle);
4002	ret = dquot_acquire(dquot);
4003	err = ext4_journal_stop(handle);
4004	if (!ret)
4005		ret = err;
4006	return ret;
4007}
4008
4009static int ext4_release_dquot(struct dquot *dquot)
4010{
4011	int ret, err;
4012	handle_t *handle;
4013
4014	handle = ext4_journal_start(dquot_to_inode(dquot),
4015				    EXT4_QUOTA_DEL_BLOCKS(dquot->dq_sb));
4016	if (IS_ERR(handle)) {
4017		/* Release dquot anyway to avoid endless cycle in dqput() */
4018		dquot_release(dquot);
4019		return PTR_ERR(handle);
4020	}
4021	ret = dquot_release(dquot);
4022	err = ext4_journal_stop(handle);
4023	if (!ret)
4024		ret = err;
4025	return ret;
4026}
4027
4028static int ext4_mark_dquot_dirty(struct dquot *dquot)
4029{
4030	/* Are we journaling quotas? */
4031	if (EXT4_SB(dquot->dq_sb)->s_qf_names[USRQUOTA] ||
4032	    EXT4_SB(dquot->dq_sb)->s_qf_names[GRPQUOTA]) {
4033		dquot_mark_dquot_dirty(dquot);
4034		return ext4_write_dquot(dquot);
4035	} else {
4036		return dquot_mark_dquot_dirty(dquot);
4037	}
4038}
4039
4040static int ext4_write_info(struct super_block *sb, int type)
4041{
4042	int ret, err;
4043	handle_t *handle;
4044
4045	/* Data block + inode block */
4046	handle = ext4_journal_start(sb->s_root->d_inode, 2);
4047	if (IS_ERR(handle))
4048		return PTR_ERR(handle);
4049	ret = dquot_commit_info(sb, type);
4050	err = ext4_journal_stop(handle);
4051	if (!ret)
4052		ret = err;
4053	return ret;
4054}
4055
4056/*
4057 * Turn on quotas during mount time - we need to find
4058 * the quota file and such...
4059 */
4060static int ext4_quota_on_mount(struct super_block *sb, int type)
4061{
4062	return dquot_quota_on_mount(sb, EXT4_SB(sb)->s_qf_names[type],
4063					EXT4_SB(sb)->s_jquota_fmt, type);
4064}
4065
4066/*
4067 * Standard function to be called on quota_on
4068 */
4069static int ext4_quota_on(struct super_block *sb, int type, int format_id,
4070			 char *name)
4071{
4072	int err;
4073	struct path path;
4074
4075	if (!test_opt(sb, QUOTA))
4076		return -EINVAL;
4077
4078	err = kern_path(name, LOOKUP_FOLLOW, &path);
4079	if (err)
4080		return err;
4081
4082	/* Quotafile not on the same filesystem? */
4083	if (path.mnt->mnt_sb != sb) {
4084		path_put(&path);
4085		return -EXDEV;
4086	}
4087	/* Journaling quota? */
4088	if (EXT4_SB(sb)->s_qf_names[type]) {
4089		/* Quotafile not in fs root? */
4090		if (path.dentry->d_parent != sb->s_root)
4091			ext4_msg(sb, KERN_WARNING,
4092				"Quota file not on filesystem root. "
4093				"Journaled quota will not work");
4094	}
4095
4096	/*
4097	 * When we journal data on quota file, we have to flush journal to see
4098	 * all updates to the file when we bypass pagecache...
4099	 */
4100	if (EXT4_SB(sb)->s_journal &&
4101	    ext4_should_journal_data(path.dentry->d_inode)) {
4102		/*
4103		 * We don't need to lock updates but journal_flush() could
4104		 * otherwise be livelocked...
4105		 */
4106		jbd2_journal_lock_updates(EXT4_SB(sb)->s_journal);
4107		err = jbd2_journal_flush(EXT4_SB(sb)->s_journal);
4108		jbd2_journal_unlock_updates(EXT4_SB(sb)->s_journal);
4109		if (err) {
4110			path_put(&path);
4111			return err;
4112		}
4113	}
4114
4115	err = dquot_quota_on_path(sb, type, format_id, &path);
4116	path_put(&path);
4117	return err;
4118}
4119
4120static int ext4_quota_off(struct super_block *sb, int type)
4121{
4122	/* Force all delayed allocation blocks to be allocated */
4123	if (test_opt(sb, DELALLOC)) {
4124		down_read(&sb->s_umount);
4125		sync_filesystem(sb);
4126		up_read(&sb->s_umount);
4127	}
4128
4129	return dquot_quota_off(sb, type);
4130}
4131
4132/* Read data from quotafile - avoid pagecache and such because we cannot afford
4133 * acquiring the locks... As quota files are never truncated and quota code
4134 * itself serializes the operations (and noone else should touch the files)
4135 * we don't have to be afraid of races */
4136static ssize_t ext4_quota_read(struct super_block *sb, int type, char *data,
4137			       size_t len, loff_t off)
4138{
4139	struct inode *inode = sb_dqopt(sb)->files[type];
4140	ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
4141	int err = 0;
4142	int offset = off & (sb->s_blocksize - 1);
4143	int tocopy;
4144	size_t toread;
4145	struct buffer_head *bh;
4146	loff_t i_size = i_size_read(inode);
4147
4148	if (off > i_size)
4149		return 0;
4150	if (off+len > i_size)
4151		len = i_size-off;
4152	toread = len;
4153	while (toread > 0) {
4154		tocopy = sb->s_blocksize - offset < toread ?
4155				sb->s_blocksize - offset : toread;
4156		bh = ext4_bread(NULL, inode, blk, 0, &err);
4157		if (err)
4158			return err;
4159		if (!bh)	/* A hole? */
4160			memset(data, 0, tocopy);
4161		else
4162			memcpy(data, bh->b_data+offset, tocopy);
4163		brelse(bh);
4164		offset = 0;
4165		toread -= tocopy;
4166		data += tocopy;
4167		blk++;
4168	}
4169	return len;
4170}
4171
4172/* Write to quotafile (we know the transaction is already started and has
4173 * enough credits) */
4174static ssize_t ext4_quota_write(struct super_block *sb, int type,
4175				const char *data, size_t len, loff_t off)
4176{
4177	struct inode *inode = sb_dqopt(sb)->files[type];
4178	ext4_lblk_t blk = off >> EXT4_BLOCK_SIZE_BITS(sb);
4179	int err = 0;
4180	int offset = off & (sb->s_blocksize - 1);
4181	struct buffer_head *bh;
4182	handle_t *handle = journal_current_handle();
4183
4184	if (EXT4_SB(sb)->s_journal && !handle) {
4185		ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
4186			" cancelled because transaction is not started",
4187			(unsigned long long)off, (unsigned long long)len);
4188		return -EIO;
4189	}
4190	/*
4191	 * Since we account only one data block in transaction credits,
4192	 * then it is impossible to cross a block boundary.
4193	 */
4194	if (sb->s_blocksize - offset < len) {
4195		ext4_msg(sb, KERN_WARNING, "Quota write (off=%llu, len=%llu)"
4196			" cancelled because not block aligned",
4197			(unsigned long long)off, (unsigned long long)len);
4198		return -EIO;
4199	}
4200
4201	mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
4202	bh = ext4_bread(handle, inode, blk, 1, &err);
4203	if (!bh)
4204		goto out;
4205	err = ext4_journal_get_write_access(handle, bh);
4206	if (err) {
4207		brelse(bh);
4208		goto out;
4209	}
4210	lock_buffer(bh);
4211	memcpy(bh->b_data+offset, data, len);
4212	flush_dcache_page(bh->b_page);
4213	unlock_buffer(bh);
4214	err = ext4_handle_dirty_metadata(handle, NULL, bh);
4215	brelse(bh);
4216out:
4217	if (err) {
4218		mutex_unlock(&inode->i_mutex);
4219		return err;
4220	}
4221	if (inode->i_size < off + len) {
4222		i_size_write(inode, off + len);
4223		EXT4_I(inode)->i_disksize = inode->i_size;
4224	}
4225	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
4226	ext4_mark_inode_dirty(handle, inode);
4227	mutex_unlock(&inode->i_mutex);
4228	return len;
4229}
4230
4231#endif
4232
4233static int ext4_get_sb(struct file_system_type *fs_type, int flags,
4234		       const char *dev_name, void *data, struct vfsmount *mnt)
4235{
4236	return get_sb_bdev(fs_type, flags, dev_name, data, ext4_fill_super,mnt);
4237}
4238
4239#if !defined(CONFIG_EXT2_FS) && !defined(CONFIG_EXT2_FS_MODULE) && \
4240	defined(CONFIG_EXT4_USE_FOR_EXT23)
4241static struct file_system_type ext2_fs_type = {
4242	.owner		= THIS_MODULE,
4243	.name		= "ext2",
4244	.get_sb		= ext4_get_sb,
4245	.kill_sb	= kill_block_super,
4246	.fs_flags	= FS_REQUIRES_DEV,
4247};
4248
4249static inline void register_as_ext2(void)
4250{
4251	int err = register_filesystem(&ext2_fs_type);
4252	if (err)
4253		printk(KERN_WARNING
4254		       "EXT4-fs: Unable to register as ext2 (%d)\n", err);
4255}
4256
4257static inline void unregister_as_ext2(void)
4258{
4259	unregister_filesystem(&ext2_fs_type);
4260}
4261MODULE_ALIAS("ext2");
4262#else
4263static inline void register_as_ext2(void) { }
4264static inline void unregister_as_ext2(void) { }
4265#endif
4266
4267#if !defined(CONFIG_EXT3_FS) && !defined(CONFIG_EXT3_FS_MODULE) && \
4268	defined(CONFIG_EXT4_USE_FOR_EXT23)
4269static inline void register_as_ext3(void)
4270{
4271	int err = register_filesystem(&ext3_fs_type);
4272	if (err)
4273		printk(KERN_WARNING
4274		       "EXT4-fs: Unable to register as ext3 (%d)\n", err);
4275}
4276
4277static inline void unregister_as_ext3(void)
4278{
4279	unregister_filesystem(&ext3_fs_type);
4280}
4281MODULE_ALIAS("ext3");
4282#else
4283static inline void register_as_ext3(void) { }
4284static inline void unregister_as_ext3(void) { }
4285#endif
4286
4287static struct file_system_type ext4_fs_type = {
4288	.owner		= THIS_MODULE,
4289	.name		= "ext4",
4290	.get_sb		= ext4_get_sb,
4291	.kill_sb	= kill_block_super,
4292	.fs_flags	= FS_REQUIRES_DEV,
4293};
4294
4295static int __init init_ext4_fs(void)
4296{
4297	int err;
4298
4299	ext4_check_flag_values();
4300	err = init_ext4_system_zone();
4301	if (err)
4302		return err;
4303	ext4_kset = kset_create_and_add("ext4", NULL, fs_kobj);
4304	if (!ext4_kset)
4305		goto out4;
4306	ext4_proc_root = proc_mkdir("fs/ext4", NULL);
4307	err = init_ext4_mballoc();
4308	if (err)
4309		goto out3;
4310
4311	err = init_ext4_xattr();
4312	if (err)
4313		goto out2;
4314	err = init_inodecache();
4315	if (err)
4316		goto out1;
4317	register_as_ext2();
4318	register_as_ext3();
4319	err = register_filesystem(&ext4_fs_type);
4320	if (err)
4321		goto out;
4322	return 0;
4323out:
4324	unregister_as_ext2();
4325	unregister_as_ext3();
4326	destroy_inodecache();
4327out1:
4328	exit_ext4_xattr();
4329out2:
4330	exit_ext4_mballoc();
4331out3:
4332	remove_proc_entry("fs/ext4", NULL);
4333	kset_unregister(ext4_kset);
4334out4:
4335	exit_ext4_system_zone();
4336	return err;
4337}
4338
4339static void __exit exit_ext4_fs(void)
4340{
4341	unregister_as_ext2();
4342	unregister_as_ext3();
4343	unregister_filesystem(&ext4_fs_type);
4344	destroy_inodecache();
4345	exit_ext4_xattr();
4346	exit_ext4_mballoc();
4347	remove_proc_entry("fs/ext4", NULL);
4348	kset_unregister(ext4_kset);
4349	exit_ext4_system_zone();
4350}
4351
4352MODULE_AUTHOR("Remy Card, Stephen Tweedie, Andrew Morton, Andreas Dilger, Theodore Ts'o and others");
4353MODULE_DESCRIPTION("Fourth Extended Filesystem");
4354MODULE_LICENSE("GPL");
4355module_init(init_ext4_fs)
4356module_exit(exit_ext4_fs)
4357