Lines Matching refs:ii

78 	struct nilfs_inode_info *ii = NILFS_I(inode);
85 ret = nilfs_bmap_lookup_contig(ii->i_bmap, blkoff, &blknum, maxblocks);
101 err = nilfs_bmap_insert(ii->i_bmap, blkoff,
330 struct nilfs_inode_info *ii;
344 ii = NILFS_I(inode);
345 ii->i_state = BIT(NILFS_I_NEW);
346 ii->i_root = root;
365 ii->i_bh = bh;
373 err = nilfs_bmap_read(ii->i_bmap, NULL);
377 set_bit(NILFS_I_BMAP, &ii->i_state);
381 ii->i_flags = nilfs_mask_flags(
384 /* ii->i_file_acl = 0; */
385 /* ii->i_dir_acl = 0; */
386 ii->i_dir_start_lookup = 0;
445 struct nilfs_inode_info *ii = NILFS_I(inode);
465 ii->i_flags = le32_to_cpu(raw_inode->i_flags);
467 ii->i_file_acl = le32_to_cpu(raw_inode->i_file_acl);
468 ii->i_dir_acl = S_ISREG(inode->i_mode) ?
471 ii->i_dir_start_lookup = 0;
476 err = nilfs_bmap_read(ii->i_bmap, raw_inode);
479 set_bit(NILFS_I_BMAP, &ii->i_state);
543 struct nilfs_inode_info *ii;
548 ii = NILFS_I(inode);
549 if (test_bit(NILFS_I_BTNC, &ii->i_state)) {
555 if (test_bit(NILFS_I_SHADOW, &ii->i_state)) {
562 if (!test_bit(NILFS_I_GCINODE, &ii->i_state))
565 return args->for_gc && args->cno == ii->i_cno;
670 struct nilfs_inode_info *ii = NILFS_I(inode);
674 if (ii->i_assoc_inode)
678 args.root = ii->i_root;
679 args.cno = ii->i_cno;
680 args.for_gc = test_bit(NILFS_I_GCINODE, &ii->i_state) != 0;
682 args.for_shadow = test_bit(NILFS_I_SHADOW, &ii->i_state) != 0;
693 NILFS_I(btnc_inode)->i_bmap = ii->i_bmap;
694 ii->i_assoc_inode = btnc_inode;
708 struct nilfs_inode_info *ii = NILFS_I(inode);
709 struct inode *btnc_inode = ii->i_assoc_inode;
713 ii->i_assoc_inode = NULL;
775 struct nilfs_inode_info *ii = NILFS_I(inode);
788 raw_inode->i_flags = cpu_to_le32(ii->i_flags);
800 struct nilfs_inode_info *ii = NILFS_I(inode);
801 struct inode *ifile = ii->i_root->ifile;
806 if (test_and_clear_bit(NILFS_I_NEW, &ii->i_state))
809 set_bit(NILFS_I_INODE_SYNC, &ii->i_state);
822 static void nilfs_truncate_bmap(struct nilfs_inode_info *ii,
828 if (!test_bit(NILFS_I_BMAP, &ii->i_state))
831 ret = nilfs_bmap_last_key(ii->i_bmap, &b);
841 ret = nilfs_bmap_truncate(ii->i_bmap, b);
842 nilfs_relax_pressure_in_lock(ii->vfs_inode.i_sb);
844 nilfs_bmap_truncate(ii->i_bmap, b) == 0))
848 nilfs_warn(ii->vfs_inode.i_sb, "error %d truncating bmap (ino=%lu)",
849 ret, ii->vfs_inode.i_ino);
858 struct nilfs_inode_info *ii = NILFS_I(inode);
860 if (!test_bit(NILFS_I_BMAP, &ii->i_state))
871 nilfs_truncate_bmap(ii, blkoff);
888 struct nilfs_inode_info *ii = NILFS_I(inode);
893 BUG_ON(!list_empty(&ii->i_dirty));
894 brelse(ii->i_bh);
895 ii->i_bh = NULL;
900 if (test_bit(NILFS_I_BMAP, &ii->i_state))
901 nilfs_bmap_clear(ii->i_bmap);
903 if (!test_bit(NILFS_I_BTNC, &ii->i_state))
906 if (ii->i_root && inode->i_ino == NILFS_ROOT_INO)
907 nilfs_put_root(ii->i_root);
914 struct nilfs_inode_info *ii = NILFS_I(inode);
918 if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
946 nilfs_truncate_bmap(ii, 0);
950 ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
952 atomic64_dec(&ii->i_root->inodes_count);
1019 struct nilfs_inode_info *ii = NILFS_I(inode);
1023 if (ii->i_bh == NULL || unlikely(!buffer_uptodate(ii->i_bh))) {
1025 err = nilfs_ifile_get_inode_block(ii->i_root->ifile,
1030 if (ii->i_bh == NULL)
1031 ii->i_bh = *pbh;
1032 else if (unlikely(!buffer_uptodate(ii->i_bh))) {
1033 __brelse(ii->i_bh);
1034 ii->i_bh = *pbh;
1037 *pbh = ii->i_bh;
1040 *pbh = ii->i_bh;
1049 struct nilfs_inode_info *ii = NILFS_I(inode);
1053 if (!list_empty(&ii->i_dirty)) {
1055 ret = test_bit(NILFS_I_DIRTY, &ii->i_state) ||
1056 test_bit(NILFS_I_BUSY, &ii->i_state);
1064 struct nilfs_inode_info *ii = NILFS_I(inode);
1069 if (test_and_set_bit(NILFS_I_DIRTY, &ii->i_state))
1073 if (!test_bit(NILFS_I_QUEUED, &ii->i_state) &&
1074 !test_bit(NILFS_I_BUSY, &ii->i_state)) {
1079 if (list_empty(&ii->i_dirty) && igrab(inode) == NULL) {
1093 list_move_tail(&ii->i_dirty, &nilfs->ns_dirty_files);
1094 set_bit(NILFS_I_QUEUED, &ii->i_state);