Searched refs:i_inode (Results 1 - 21 of 21) sorted by relevance

/netgear-R7000-V1.0.7.12_1.2.5/components/opensource/linux/linux-2.6.36/fs/gfs2/
H A Dinode.c344 ip->i_inode.i_mode = be32_to_cpu(str->di_mode);
345 ip->i_inode.i_rdev = 0;
346 switch (ip->i_inode.i_mode & S_IFMT) {
349 ip->i_inode.i_rdev = MKDEV(be32_to_cpu(str->di_major),
354 ip->i_inode.i_uid = be32_to_cpu(str->di_uid);
355 ip->i_inode.i_gid = be32_to_cpu(str->di_gid);
361 ip->i_inode.i_nlink = be32_to_cpu(str->di_nlink);
363 i_size_write(&ip->i_inode, ip->i_disksize);
364 gfs2_set_inode_blocks(&ip->i_inode, be64_to_cpu(str->di_blocks));
367 if (timespec_compare(&ip->i_inode
[all...]
H A Dquota.h39 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
48 ret = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid);
H A Dbmap.c61 struct inode *inode = &ip->i_inode;
144 gfs2_trans_add_unrevoke(GFS2_SB(&ip->i_inode), block, 1);
166 gfs2_add_inode_blocks(&ip->i_inode, 1);
167 di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode));
548 gfs2_add_inode_blocks(&ip->i_inode, alloced);
692 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
758 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
861 gfs2_add_inode_blocks(&ip->i_inode, -1);
870 ip->i_inode.i_mtime = ip->i_inode
[all...]
H A Dxattr.c84 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_EA))
130 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_IN)) {
136 end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs;
229 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
287 gfs2_add_inode_blocks(&ip->i_inode, -1);
307 ip->i_inode.i_ctime = CURRENT_TIME;
335 error = gfs2_rindex_hold(GFS2_SB(&ip->i_inode), &al->al_ri_gh);
468 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
613 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
634 gfs2_add_inode_blocks(&ip->i_inode,
[all...]
H A Dbmap.h34 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
H A Dglops.c139 if (ip && !S_ISREG(ip->i_inode.i_mode))
142 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0);
151 struct address_space *mapping = ip->i_inode.i_mapping;
189 forget_all_cached_acls(&ip->i_inode);
195 if (ip && S_ISREG(ip->i_inode.i_mode))
196 truncate_inode_pages(ip->i_inode.i_mapping, 0);
268 IF2DT(ip->i_inode.i_mode), ip->i_flags,
270 (unsigned long long)ip->i_inode.i_size,
H A Dinode.h36 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
42 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
48 return S_ISDIR(ip->i_inode.i_mode);
H A Dops_inode.c182 if (!dip->i_inode.i_nlink)
191 if (!ip->i_inode.i_nlink)
194 if (ip->i_inode.i_nlink == (u32)-1)
279 if (IS_IMMUTABLE(&ip->i_inode) || IS_APPEND(&ip->i_inode))
282 if ((dip->i_inode.i_mode & S_ISVTX) &&
283 dip->i_inode.i_uid != current_fsuid() &&
284 ip->i_inode.i_uid != current_fsuid() && !capable(CAP_FOWNER))
287 if (IS_APPEND(&dip->i_inode))
290 error = gfs2_permission(&dip->i_inode, MAY_WRIT
[all...]
H A Ddir.c110 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_JD)) {
132 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
154 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
193 error = gfs2_extent_map(&ip->i_inode, lblock, &new,
230 ip->i_inode.i_mtime = ip->i_inode.i_ctime = CURRENT_TIME;
272 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
308 error = gfs2_extent_map(&ip->i_inode, lblock, &new,
668 if (!error && gfs2_metatype_check(GFS2_SB(&dip->i_inode), *bh
[all...]
H A Drgrp.c536 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
588 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
589 struct inode *inode = &ip->i_inode;
622 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
623 struct inode *inode = &ip->i_inode;
1074 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1193 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1223 gfs2_process_unlinked_inode(ip->i_inode.i_sb, unlinked);
1248 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
1496 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
[all...]
H A Dquota.c317 bh_map.b_size = 1 << ip->i_inode.i_blkbits;
318 error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0);
494 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
506 error = qdsb_get(sdp, QUOTA_USER, ip->i_inode.i_uid, qd);
512 error = qdsb_get(sdp, QUOTA_GROUP, ip->i_inode.i_gid, qd);
518 if (uid != NO_QUOTA_CHANGE && uid != ip->i_inode.i_uid) {
526 if (gid != NO_QUOTA_CHANGE && gid != ip->i_inode.i_gid) {
542 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
633 struct inode *inode = &ip->i_inode;
777 mutex_lock_nested(&ip->i_inode
[all...]
H A Dfile.c393 last_index = ip->i_inode.i_size >> PAGE_CACHE_SHIFT;
397 if (!PageUptodate(page) || page->mapping != ip->i_inode.i_mapping)
487 if (S_ISREG(ip->i_inode.i_mode)) {
648 if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
690 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
704 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
H A Dacl.c51 acl = get_cached_acl(&ip->i_inode, type);
139 struct gfs2_sbd *sdp = GFS2_SB(&dip->i_inode);
218 set_cached_acl(&ip->i_inode, ACL_TYPE_ACCESS, acl);
H A Daops.c418 u64 dsize = i_size_read(&ip->i_inode);
530 struct address_space *mapping = ip->i_inode.i_mapping;
628 if (&ip->i_inode == sdp->sd_rindex) {
664 if (&ip->i_inode == sdp->sd_rindex)
699 if (pos + len > ip->i_inode.i_size)
700 truncate_setsize(&ip->i_inode, ip->i_inode.i_size);
712 if (&ip->i_inode == sdp->sd_rindex) {
1007 if (offset >= i_size_read(&ip->i_inode))
H A Dmain.c37 inode_init_once(&ip->i_inode);
H A Dmeta_io.c351 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
384 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
H A Drecovery.c42 error = gfs2_extent_map(&ip->i_inode, blk, &new, &dblock, &extlen);
394 bh_map.b_size = 1 << ip->i_inode.i_blkbits;
395 error = gfs2_block_map(&ip->i_inode, lblock, &bh_map, 0);
H A Dincore.h265 struct inode i_inode; member in struct:gfs2_inode
286 * Since i_inode is the first element of struct gfs2_inode,
291 return container_of(inode, struct gfs2_inode, i_inode);
H A Dutil.c152 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
H A Dops_fstype.c592 bh.b_size = 1 << ip->i_inode.i_blkbits;
H A Dsuper.c1402 return &ip->i_inode;

Completed in 213 milliseconds