1/*
2 *   Copyright (C) International Business Machines Corp., 2000-2004
3 *   Portions Copyright (C) Christoph Hellwig, 2001-2002
4 *
5 *   This program is free software;  you can redistribute it and/or modify
6 *   it under the terms of the GNU General Public License as published by
7 *   the Free Software Foundation; either version 2 of the License, or
8 *   (at your option) any later version.
9 *
10 *   This program is distributed in the hope that it will be useful,
11 *   but WITHOUT ANY WARRANTY;  without even the implied warranty of
12 *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See
13 *   the GNU General Public License for more details.
14 *
15 *   You should have received a copy of the GNU General Public License
16 *   along with this program;  if not, write to the Free Software
17 *   Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/fs.h>
21#include <linux/module.h>
22#include <linux/parser.h>
23#include <linux/completion.h>
24#include <linux/vfs.h>
25#include <linux/mount.h>
26#include <linux/moduleparam.h>
27#include <linux/kthread.h>
28#include <linux/posix_acl.h>
29#include <linux/buffer_head.h>
30#include <asm/uaccess.h>
31#include <linux/seq_file.h>
32
33#include "jfs_incore.h"
34#include "jfs_filsys.h"
35#include "jfs_inode.h"
36#include "jfs_metapage.h"
37#include "jfs_superblock.h"
38#include "jfs_dmap.h"
39#include "jfs_imap.h"
40#include "jfs_acl.h"
41#include "jfs_debug.h"
42
43MODULE_DESCRIPTION("The Journaled Filesystem (JFS)");
44MODULE_AUTHOR("Steve Best/Dave Kleikamp/Barry Arndt, IBM");
45MODULE_LICENSE("GPL");
46
47static struct kmem_cache * jfs_inode_cachep;
48
49static const struct super_operations jfs_super_operations;
50static struct export_operations jfs_export_operations;
51static struct file_system_type jfs_fs_type;
52
53#define MAX_COMMIT_THREADS 64
54static int commit_threads = 0;
55module_param(commit_threads, int, 0);
56MODULE_PARM_DESC(commit_threads, "Number of commit threads");
57
58static struct task_struct *jfsCommitThread[MAX_COMMIT_THREADS];
59struct task_struct *jfsIOthread;
60struct task_struct *jfsSyncThread;
61
62#ifdef CONFIG_JFS_DEBUG
63int jfsloglevel = JFS_LOGLEVEL_WARN;
64module_param(jfsloglevel, int, 0644);
65MODULE_PARM_DESC(jfsloglevel, "Specify JFS loglevel (0, 1 or 2)");
66#endif
67
68static void jfs_handle_error(struct super_block *sb)
69{
70	struct jfs_sb_info *sbi = JFS_SBI(sb);
71
72	if (sb->s_flags & MS_RDONLY)
73		return;
74
75	updateSuper(sb, FM_DIRTY);
76
77	if (sbi->flag & JFS_ERR_PANIC)
78		panic("JFS (device %s): panic forced after error\n",
79			sb->s_id);
80	else if (sbi->flag & JFS_ERR_REMOUNT_RO) {
81		jfs_err("ERROR: (device %s): remounting filesystem "
82			"as read-only\n",
83			sb->s_id);
84		sb->s_flags |= MS_RDONLY;
85	}
86
87	/* nothing is done for continue beyond marking the superblock dirty */
88}
89
90void jfs_error(struct super_block *sb, const char * function, ...)
91{
92	static char error_buf[256];
93	va_list args;
94
95	va_start(args, function);
96	vsnprintf(error_buf, sizeof(error_buf), function, args);
97	va_end(args);
98
99	printk(KERN_ERR "ERROR: (device %s): %s\n", sb->s_id, error_buf);
100
101	jfs_handle_error(sb);
102}
103
104static struct inode *jfs_alloc_inode(struct super_block *sb)
105{
106	struct jfs_inode_info *jfs_inode;
107
108	jfs_inode = kmem_cache_alloc(jfs_inode_cachep, GFP_NOFS);
109	if (!jfs_inode)
110		return NULL;
111	return &jfs_inode->vfs_inode;
112}
113
114static void jfs_destroy_inode(struct inode *inode)
115{
116	struct jfs_inode_info *ji = JFS_IP(inode);
117
118	BUG_ON(!list_empty(&ji->anon_inode_list));
119
120	spin_lock_irq(&ji->ag_lock);
121	if (ji->active_ag != -1) {
122		struct bmap *bmap = JFS_SBI(inode->i_sb)->bmap;
123		atomic_dec(&bmap->db_active[ji->active_ag]);
124		ji->active_ag = -1;
125	}
126	spin_unlock_irq(&ji->ag_lock);
127
128#ifdef CONFIG_JFS_POSIX_ACL
129	if (ji->i_acl != JFS_ACL_NOT_CACHED) {
130		posix_acl_release(ji->i_acl);
131		ji->i_acl = JFS_ACL_NOT_CACHED;
132	}
133	if (ji->i_default_acl != JFS_ACL_NOT_CACHED) {
134		posix_acl_release(ji->i_default_acl);
135		ji->i_default_acl = JFS_ACL_NOT_CACHED;
136	}
137#endif
138
139	kmem_cache_free(jfs_inode_cachep, ji);
140}
141
142static int jfs_statfs(struct dentry *dentry, struct kstatfs *buf)
143{
144	struct jfs_sb_info *sbi = JFS_SBI(dentry->d_sb);
145	s64 maxinodes;
146	struct inomap *imap = JFS_IP(sbi->ipimap)->i_imap;
147
148	jfs_info("In jfs_statfs");
149	buf->f_type = JFS_SUPER_MAGIC;
150	buf->f_bsize = sbi->bsize;
151	buf->f_blocks = sbi->bmap->db_mapsize;
152	buf->f_bfree = sbi->bmap->db_nfree;
153	buf->f_bavail = sbi->bmap->db_nfree;
154	/*
155	 * If we really return the number of allocated & free inodes, some
156	 * applications will fail because they won't see enough free inodes.
157	 * We'll try to calculate some guess as to how may inodes we can
158	 * really allocate
159	 *
160	 * buf->f_files = atomic_read(&imap->im_numinos);
161	 * buf->f_ffree = atomic_read(&imap->im_numfree);
162	 */
163	maxinodes = min((s64) atomic_read(&imap->im_numinos) +
164			((sbi->bmap->db_nfree >> imap->im_l2nbperiext)
165			 << L2INOSPEREXT), (s64) 0xffffffffLL);
166	buf->f_files = maxinodes;
167	buf->f_ffree = maxinodes - (atomic_read(&imap->im_numinos) -
168				    atomic_read(&imap->im_numfree));
169
170	buf->f_namelen = JFS_NAME_MAX;
171	return 0;
172}
173
174static void jfs_put_super(struct super_block *sb)
175{
176	struct jfs_sb_info *sbi = JFS_SBI(sb);
177	int rc;
178
179	jfs_info("In jfs_put_super");
180	rc = jfs_umount(sb);
181	if (rc)
182		jfs_err("jfs_umount failed with return code %d", rc);
183	if (sbi->nls_tab)
184		unload_nls(sbi->nls_tab);
185	sbi->nls_tab = NULL;
186
187	truncate_inode_pages(sbi->direct_inode->i_mapping, 0);
188	iput(sbi->direct_inode);
189	sbi->direct_inode = NULL;
190
191	kfree(sbi);
192}
193
194enum {
195	Opt_integrity, Opt_nointegrity, Opt_iocharset, Opt_resize,
196	Opt_resize_nosize, Opt_errors, Opt_ignore, Opt_err, Opt_quota,
197	Opt_usrquota, Opt_grpquota, Opt_uid, Opt_gid, Opt_umask
198};
199
200static match_table_t tokens = {
201	{Opt_integrity, "integrity"},
202	{Opt_nointegrity, "nointegrity"},
203	{Opt_iocharset, "iocharset=%s"},
204	{Opt_resize, "resize=%u"},
205	{Opt_resize_nosize, "resize"},
206	{Opt_errors, "errors=%s"},
207	{Opt_ignore, "noquota"},
208	{Opt_ignore, "quota"},
209	{Opt_usrquota, "usrquota"},
210	{Opt_grpquota, "grpquota"},
211	{Opt_uid, "uid=%u"},
212	{Opt_gid, "gid=%u"},
213	{Opt_umask, "umask=%u"},
214	{Opt_err, NULL}
215};
216
217static int parse_options(char *options, struct super_block *sb, s64 *newLVSize,
218			 int *flag)
219{
220	void *nls_map = (void *)-1;	/* -1: no change;  NULL: none */
221	char *p;
222	struct jfs_sb_info *sbi = JFS_SBI(sb);
223
224	*newLVSize = 0;
225
226	if (!options)
227		return 1;
228
229	while ((p = strsep(&options, ",")) != NULL) {
230		substring_t args[MAX_OPT_ARGS];
231		int token;
232		if (!*p)
233			continue;
234
235		token = match_token(p, tokens, args);
236		switch (token) {
237		case Opt_integrity:
238			*flag &= ~JFS_NOINTEGRITY;
239			break;
240		case Opt_nointegrity:
241			*flag |= JFS_NOINTEGRITY;
242			break;
243		case Opt_ignore:
244			/* Silently ignore the quota options */
245			/* Don't do anything ;-) */
246			break;
247		case Opt_iocharset:
248			if (nls_map && nls_map != (void *) -1)
249				unload_nls(nls_map);
250			if (!strcmp(args[0].from, "none"))
251				nls_map = NULL;
252			else {
253				nls_map = load_nls(args[0].from);
254				if (!nls_map) {
255					printk(KERN_ERR
256					       "JFS: charset not found\n");
257					goto cleanup;
258				}
259			}
260			break;
261		case Opt_resize:
262		{
263			char *resize = args[0].from;
264			*newLVSize = simple_strtoull(resize, &resize, 0);
265			break;
266		}
267		case Opt_resize_nosize:
268		{
269			*newLVSize = sb->s_bdev->bd_inode->i_size >>
270				sb->s_blocksize_bits;
271			if (*newLVSize == 0)
272				printk(KERN_ERR
273				       "JFS: Cannot determine volume size\n");
274			break;
275		}
276		case Opt_errors:
277		{
278			char *errors = args[0].from;
279			if (!errors || !*errors)
280				goto cleanup;
281			if (!strcmp(errors, "continue")) {
282				*flag &= ~JFS_ERR_REMOUNT_RO;
283				*flag &= ~JFS_ERR_PANIC;
284				*flag |= JFS_ERR_CONTINUE;
285			} else if (!strcmp(errors, "remount-ro")) {
286				*flag &= ~JFS_ERR_CONTINUE;
287				*flag &= ~JFS_ERR_PANIC;
288				*flag |= JFS_ERR_REMOUNT_RO;
289			} else if (!strcmp(errors, "panic")) {
290				*flag &= ~JFS_ERR_CONTINUE;
291				*flag &= ~JFS_ERR_REMOUNT_RO;
292				*flag |= JFS_ERR_PANIC;
293			} else {
294				printk(KERN_ERR
295				       "JFS: %s is an invalid error handler\n",
296				       errors);
297				goto cleanup;
298			}
299			break;
300		}
301
302#ifdef CONFIG_QUOTA
303		case Opt_quota:
304		case Opt_usrquota:
305			*flag |= JFS_USRQUOTA;
306			break;
307		case Opt_grpquota:
308			*flag |= JFS_GRPQUOTA;
309			break;
310#else
311		case Opt_usrquota:
312		case Opt_grpquota:
313		case Opt_quota:
314			printk(KERN_ERR
315			       "JFS: quota operations not supported\n");
316			break;
317#endif
318		case Opt_uid:
319		{
320			char *uid = args[0].from;
321			sbi->uid = simple_strtoul(uid, &uid, 0);
322			break;
323		}
324		case Opt_gid:
325		{
326			char *gid = args[0].from;
327			sbi->gid = simple_strtoul(gid, &gid, 0);
328			break;
329		}
330		case Opt_umask:
331		{
332			char *umask = args[0].from;
333			sbi->umask = simple_strtoul(umask, &umask, 8);
334			if (sbi->umask & ~0777) {
335				printk(KERN_ERR
336				       "JFS: Invalid value of umask\n");
337				goto cleanup;
338			}
339			break;
340		}
341		default:
342			printk("jfs: Unrecognized mount option \"%s\" "
343					" or missing value\n", p);
344			goto cleanup;
345		}
346	}
347
348	if (nls_map != (void *) -1) {
349		/* Discard old (if remount) */
350		if (sbi->nls_tab)
351			unload_nls(sbi->nls_tab);
352		sbi->nls_tab = nls_map;
353	}
354	return 1;
355
356cleanup:
357	if (nls_map && nls_map != (void *) -1)
358		unload_nls(nls_map);
359	return 0;
360}
361
362static int jfs_remount(struct super_block *sb, int *flags, char *data)
363{
364	s64 newLVSize = 0;
365	int rc = 0;
366	int flag = JFS_SBI(sb)->flag;
367
368	if (!parse_options(data, sb, &newLVSize, &flag)) {
369		return -EINVAL;
370	}
371	if (newLVSize) {
372		if (sb->s_flags & MS_RDONLY) {
373			printk(KERN_ERR
374		  "JFS: resize requires volume to be mounted read-write\n");
375			return -EROFS;
376		}
377		rc = jfs_extendfs(sb, newLVSize, 0);
378		if (rc)
379			return rc;
380	}
381
382	if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) {
383		/*
384		 * Invalidate any previously read metadata.  fsck may have
385		 * changed the on-disk data since we mounted r/o
386		 */
387		truncate_inode_pages(JFS_SBI(sb)->direct_inode->i_mapping, 0);
388
389		JFS_SBI(sb)->flag = flag;
390		return jfs_mount_rw(sb, 1);
391	}
392	if ((!(sb->s_flags & MS_RDONLY)) && (*flags & MS_RDONLY)) {
393		rc = jfs_umount_rw(sb);
394		JFS_SBI(sb)->flag = flag;
395		return rc;
396	}
397	if ((JFS_SBI(sb)->flag & JFS_NOINTEGRITY) != (flag & JFS_NOINTEGRITY))
398		if (!(sb->s_flags & MS_RDONLY)) {
399			rc = jfs_umount_rw(sb);
400			if (rc)
401				return rc;
402			JFS_SBI(sb)->flag = flag;
403			return jfs_mount_rw(sb, 1);
404		}
405	JFS_SBI(sb)->flag = flag;
406
407	return 0;
408}
409
410static int jfs_fill_super(struct super_block *sb, void *data, int silent)
411{
412	struct jfs_sb_info *sbi;
413	struct inode *inode;
414	int rc;
415	s64 newLVSize = 0;
416	int flag;
417
418	jfs_info("In jfs_read_super: s_flags=0x%lx", sb->s_flags);
419
420	if (!new_valid_dev(sb->s_bdev->bd_dev))
421		return -EOVERFLOW;
422
423	sbi = kzalloc(sizeof (struct jfs_sb_info), GFP_KERNEL);
424	if (!sbi)
425		return -ENOMEM;
426	sb->s_fs_info = sbi;
427	sbi->sb = sb;
428	sbi->uid = sbi->gid = sbi->umask = -1;
429
430	/* initialize the mount flag and determine the default error handler */
431	flag = JFS_ERR_REMOUNT_RO;
432
433	if (!parse_options((char *) data, sb, &newLVSize, &flag)) {
434		kfree(sbi);
435		return -EINVAL;
436	}
437	sbi->flag = flag;
438
439#ifdef CONFIG_JFS_POSIX_ACL
440	sb->s_flags |= MS_POSIXACL;
441#endif
442
443	if (newLVSize) {
444		printk(KERN_ERR "resize option for remount only\n");
445		return -EINVAL;
446	}
447
448	/*
449	 * Initialize blocksize to 4K.
450	 */
451	sb_set_blocksize(sb, PSIZE);
452
453	/*
454	 * Set method vectors.
455	 */
456	sb->s_op = &jfs_super_operations;
457	sb->s_export_op = &jfs_export_operations;
458
459	/*
460	 * Initialize direct-mapping inode/address-space
461	 */
462	inode = new_inode(sb);
463	if (inode == NULL)
464		goto out_kfree;
465	inode->i_ino = 0;
466	inode->i_nlink = 1;
467	inode->i_size = sb->s_bdev->bd_inode->i_size;
468	inode->i_mapping->a_ops = &jfs_metapage_aops;
469	insert_inode_hash(inode);
470	mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
471
472	sbi->direct_inode = inode;
473
474	rc = jfs_mount(sb);
475	if (rc) {
476		if (!silent) {
477			jfs_err("jfs_mount failed w/return code = %d", rc);
478		}
479		goto out_mount_failed;
480	}
481	if (sb->s_flags & MS_RDONLY)
482		sbi->log = NULL;
483	else {
484		rc = jfs_mount_rw(sb, 0);
485		if (rc) {
486			if (!silent) {
487				jfs_err("jfs_mount_rw failed, return code = %d",
488					rc);
489			}
490			goto out_no_rw;
491		}
492	}
493
494	sb->s_magic = JFS_SUPER_MAGIC;
495
496	inode = iget(sb, ROOT_I);
497	if (!inode || is_bad_inode(inode))
498		goto out_no_root;
499	sb->s_root = d_alloc_root(inode);
500	if (!sb->s_root)
501		goto out_no_root;
502
503	if (sbi->mntflag & JFS_OS2)
504		sb->s_root->d_op = &jfs_ci_dentry_operations;
505
506	/* logical blocks are represented by 40 bits in pxd_t, etc. */
507	sb->s_maxbytes = ((u64) sb->s_blocksize) << 40;
508#if BITS_PER_LONG == 32
509	/*
510	 * Page cache is indexed by long.
511	 * I would use MAX_LFS_FILESIZE, but it's only half as big
512	 */
513	sb->s_maxbytes = min(((u64) PAGE_CACHE_SIZE << 32) - 1, sb->s_maxbytes);
514#endif
515	sb->s_time_gran = 1;
516	return 0;
517
518out_no_root:
519	jfs_err("jfs_read_super: get root inode failed");
520	if (inode)
521		iput(inode);
522
523out_no_rw:
524	rc = jfs_umount(sb);
525	if (rc) {
526		jfs_err("jfs_umount failed with return code %d", rc);
527	}
528out_mount_failed:
529	filemap_write_and_wait(sbi->direct_inode->i_mapping);
530	truncate_inode_pages(sbi->direct_inode->i_mapping, 0);
531	make_bad_inode(sbi->direct_inode);
532	iput(sbi->direct_inode);
533	sbi->direct_inode = NULL;
534out_kfree:
535	if (sbi->nls_tab)
536		unload_nls(sbi->nls_tab);
537	kfree(sbi);
538	return -EINVAL;
539}
540
541static void jfs_write_super_lockfs(struct super_block *sb)
542{
543	struct jfs_sb_info *sbi = JFS_SBI(sb);
544	struct jfs_log *log = sbi->log;
545
546	if (!(sb->s_flags & MS_RDONLY)) {
547		txQuiesce(sb);
548		lmLogShutdown(log);
549		updateSuper(sb, FM_CLEAN);
550	}
551}
552
553static void jfs_unlockfs(struct super_block *sb)
554{
555	struct jfs_sb_info *sbi = JFS_SBI(sb);
556	struct jfs_log *log = sbi->log;
557	int rc = 0;
558
559	if (!(sb->s_flags & MS_RDONLY)) {
560		updateSuper(sb, FM_MOUNT);
561		if ((rc = lmLogInit(log)))
562			jfs_err("jfs_unlock failed with return code %d", rc);
563		else
564			txResume(sb);
565	}
566}
567
568static int jfs_get_sb(struct file_system_type *fs_type,
569	int flags, const char *dev_name, void *data, struct vfsmount *mnt)
570{
571	return get_sb_bdev(fs_type, flags, dev_name, data, jfs_fill_super,
572			   mnt);
573}
574
575static int jfs_sync_fs(struct super_block *sb, int wait)
576{
577	struct jfs_log *log = JFS_SBI(sb)->log;
578
579	/* log == NULL indicates read-only mount */
580	if (log) {
581		jfs_flush_journal(log, wait);
582		jfs_syncpt(log, 0);
583	}
584
585	return 0;
586}
587
588static int jfs_show_options(struct seq_file *seq, struct vfsmount *vfs)
589{
590	struct jfs_sb_info *sbi = JFS_SBI(vfs->mnt_sb);
591
592	if (sbi->uid != -1)
593		seq_printf(seq, ",uid=%d", sbi->uid);
594	if (sbi->gid != -1)
595		seq_printf(seq, ",gid=%d", sbi->gid);
596	if (sbi->umask != -1)
597		seq_printf(seq, ",umask=%03o", sbi->umask);
598	if (sbi->flag & JFS_NOINTEGRITY)
599		seq_puts(seq, ",nointegrity");
600
601#ifdef CONFIG_QUOTA
602	if (sbi->flag & JFS_USRQUOTA)
603		seq_puts(seq, ",usrquota");
604
605	if (sbi->flag & JFS_GRPQUOTA)
606		seq_puts(seq, ",grpquota");
607#endif
608
609	return 0;
610}
611
612#ifdef CONFIG_QUOTA
613
614/* Read data from quotafile - avoid pagecache and such because we cannot afford
615 * acquiring the locks... As quota files are never truncated and quota code
616 * itself serializes the operations (and noone else should touch the files)
617 * we don't have to be afraid of races */
618static ssize_t jfs_quota_read(struct super_block *sb, int type, char *data,
619			      size_t len, loff_t off)
620{
621	struct inode *inode = sb_dqopt(sb)->files[type];
622	sector_t blk = off >> sb->s_blocksize_bits;
623	int err = 0;
624	int offset = off & (sb->s_blocksize - 1);
625	int tocopy;
626	size_t toread;
627	struct buffer_head tmp_bh;
628	struct buffer_head *bh;
629	loff_t i_size = i_size_read(inode);
630
631	if (off > i_size)
632		return 0;
633	if (off+len > i_size)
634		len = i_size-off;
635	toread = len;
636	while (toread > 0) {
637		tocopy = sb->s_blocksize - offset < toread ?
638				sb->s_blocksize - offset : toread;
639
640		tmp_bh.b_state = 0;
641		tmp_bh.b_size = 1 << inode->i_blkbits;
642		err = jfs_get_block(inode, blk, &tmp_bh, 0);
643		if (err)
644			return err;
645		if (!buffer_mapped(&tmp_bh))	/* A hole? */
646			memset(data, 0, tocopy);
647		else {
648			bh = sb_bread(sb, tmp_bh.b_blocknr);
649			if (!bh)
650				return -EIO;
651			memcpy(data, bh->b_data+offset, tocopy);
652			brelse(bh);
653		}
654		offset = 0;
655		toread -= tocopy;
656		data += tocopy;
657		blk++;
658	}
659	return len;
660}
661
662/* Write to quotafile */
663static ssize_t jfs_quota_write(struct super_block *sb, int type,
664			       const char *data, size_t len, loff_t off)
665{
666	struct inode *inode = sb_dqopt(sb)->files[type];
667	sector_t blk = off >> sb->s_blocksize_bits;
668	int err = 0;
669	int offset = off & (sb->s_blocksize - 1);
670	int tocopy;
671	size_t towrite = len;
672	struct buffer_head tmp_bh;
673	struct buffer_head *bh;
674
675	mutex_lock(&inode->i_mutex);
676	while (towrite > 0) {
677		tocopy = sb->s_blocksize - offset < towrite ?
678				sb->s_blocksize - offset : towrite;
679
680		tmp_bh.b_state = 0;
681		tmp_bh.b_size = 1 << inode->i_blkbits;
682		err = jfs_get_block(inode, blk, &tmp_bh, 1);
683		if (err)
684			goto out;
685		if (offset || tocopy != sb->s_blocksize)
686			bh = sb_bread(sb, tmp_bh.b_blocknr);
687		else
688			bh = sb_getblk(sb, tmp_bh.b_blocknr);
689		if (!bh) {
690			err = -EIO;
691			goto out;
692		}
693		lock_buffer(bh);
694		memcpy(bh->b_data+offset, data, tocopy);
695		flush_dcache_page(bh->b_page);
696		set_buffer_uptodate(bh);
697		mark_buffer_dirty(bh);
698		unlock_buffer(bh);
699		brelse(bh);
700		offset = 0;
701		towrite -= tocopy;
702		data += tocopy;
703		blk++;
704	}
705out:
706	if (len == towrite)
707		return err;
708	if (inode->i_size < off+len-towrite)
709		i_size_write(inode, off+len-towrite);
710	inode->i_version++;
711	inode->i_mtime = inode->i_ctime = CURRENT_TIME;
712	mark_inode_dirty(inode);
713	mutex_unlock(&inode->i_mutex);
714	return len - towrite;
715}
716
717#endif
718
719static const struct super_operations jfs_super_operations = {
720	.alloc_inode	= jfs_alloc_inode,
721	.destroy_inode	= jfs_destroy_inode,
722	.read_inode	= jfs_read_inode,
723	.dirty_inode	= jfs_dirty_inode,
724	.write_inode	= jfs_write_inode,
725	.delete_inode	= jfs_delete_inode,
726	.put_super	= jfs_put_super,
727	.sync_fs	= jfs_sync_fs,
728	.write_super_lockfs = jfs_write_super_lockfs,
729	.unlockfs       = jfs_unlockfs,
730	.statfs		= jfs_statfs,
731	.remount_fs	= jfs_remount,
732	.show_options	= jfs_show_options,
733#ifdef CONFIG_QUOTA
734	.quota_read	= jfs_quota_read,
735	.quota_write	= jfs_quota_write,
736#endif
737};
738
739static struct export_operations jfs_export_operations = {
740	.get_parent	= jfs_get_parent,
741};
742
743static struct file_system_type jfs_fs_type = {
744	.owner		= THIS_MODULE,
745	.name		= "jfs",
746	.get_sb		= jfs_get_sb,
747	.kill_sb	= kill_block_super,
748	.fs_flags	= FS_REQUIRES_DEV,
749};
750
751static void init_once(void *foo, struct kmem_cache * cachep, unsigned long flags)
752{
753	struct jfs_inode_info *jfs_ip = (struct jfs_inode_info *) foo;
754
755	memset(jfs_ip, 0, sizeof(struct jfs_inode_info));
756	INIT_LIST_HEAD(&jfs_ip->anon_inode_list);
757	init_rwsem(&jfs_ip->rdwrlock);
758	mutex_init(&jfs_ip->commit_mutex);
759	init_rwsem(&jfs_ip->xattr_sem);
760	spin_lock_init(&jfs_ip->ag_lock);
761	jfs_ip->active_ag = -1;
762#ifdef CONFIG_JFS_POSIX_ACL
763	jfs_ip->i_acl = JFS_ACL_NOT_CACHED;
764	jfs_ip->i_default_acl = JFS_ACL_NOT_CACHED;
765#endif
766	inode_init_once(&jfs_ip->vfs_inode);
767}
768
769static int __init init_jfs_fs(void)
770{
771	int i;
772	int rc;
773
774	jfs_inode_cachep =
775	    kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
776			    SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
777			    init_once, NULL);
778	if (jfs_inode_cachep == NULL)
779		return -ENOMEM;
780
781	/*
782	 * Metapage initialization
783	 */
784	rc = metapage_init();
785	if (rc) {
786		jfs_err("metapage_init failed w/rc = %d", rc);
787		goto free_slab;
788	}
789
790	/*
791	 * Transaction Manager initialization
792	 */
793	rc = txInit();
794	if (rc) {
795		jfs_err("txInit failed w/rc = %d", rc);
796		goto free_metapage;
797	}
798
799	/*
800	 * I/O completion thread (endio)
801	 */
802	jfsIOthread = kthread_run(jfsIOWait, NULL, "jfsIO");
803	if (IS_ERR(jfsIOthread)) {
804		rc = PTR_ERR(jfsIOthread);
805		jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
806		goto end_txmngr;
807	}
808
809	if (commit_threads < 1)
810		commit_threads = num_online_cpus();
811	if (commit_threads > MAX_COMMIT_THREADS)
812		commit_threads = MAX_COMMIT_THREADS;
813
814	for (i = 0; i < commit_threads; i++) {
815		jfsCommitThread[i] = kthread_run(jfs_lazycommit, NULL, "jfsCommit");
816		if (IS_ERR(jfsCommitThread[i])) {
817			rc = PTR_ERR(jfsCommitThread[i]);
818			jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
819			commit_threads = i;
820			goto kill_committask;
821		}
822	}
823
824	jfsSyncThread = kthread_run(jfs_sync, NULL, "jfsSync");
825	if (IS_ERR(jfsSyncThread)) {
826		rc = PTR_ERR(jfsSyncThread);
827		jfs_err("init_jfs_fs: fork failed w/rc = %d", rc);
828		goto kill_committask;
829	}
830
831#ifdef PROC_FS_JFS
832	jfs_proc_init();
833#endif
834
835	return register_filesystem(&jfs_fs_type);
836
837kill_committask:
838	for (i = 0; i < commit_threads; i++)
839		kthread_stop(jfsCommitThread[i]);
840	kthread_stop(jfsIOthread);
841end_txmngr:
842	txExit();
843free_metapage:
844	metapage_exit();
845free_slab:
846	kmem_cache_destroy(jfs_inode_cachep);
847	return rc;
848}
849
850static void __exit exit_jfs_fs(void)
851{
852	int i;
853
854	jfs_info("exit_jfs_fs called");
855
856	txExit();
857	metapage_exit();
858
859	kthread_stop(jfsIOthread);
860	for (i = 0; i < commit_threads; i++)
861		kthread_stop(jfsCommitThread[i]);
862	kthread_stop(jfsSyncThread);
863#ifdef PROC_FS_JFS
864	jfs_proc_clean();
865#endif
866	unregister_filesystem(&jfs_fs_type);
867	kmem_cache_destroy(jfs_inode_cachep);
868}
869
870module_init(init_jfs_fs)
871module_exit(exit_jfs_fs)
872