Home
last modified time | relevance | path

Searched refs:i_inode (Results 1 – 20 of 20) sorted by relevance

/fs/gfs2/
Dglops.c212 if (ip && !S_ISREG(ip->i_inode.i_mode)) in inode_go_sync()
216 unmap_shared_mapping_range(ip->i_inode.i_mapping, 0, 0); in inode_go_sync()
217 inode_dio_wait(&ip->i_inode); in inode_go_sync()
227 struct address_space *mapping = ip->i_inode.i_mapping; in inode_go_sync()
265 forget_all_cached_acls(&ip->i_inode); in inode_go_inval()
266 security_inode_invalidate_secctx(&ip->i_inode); in inode_go_inval()
275 if (ip && S_ISREG(ip->i_inode.i_mode)) in inode_go_inval()
276 truncate_inode_pages(ip->i_inode.i_mapping, 0); in inode_go_inval()
331 ip->i_inode.i_mode = be32_to_cpu(str->di_mode); in gfs2_dinode_in()
332 ip->i_inode.i_rdev = 0; in gfs2_dinode_in()
[all …]
Dinode.c336 error = gfs2_permission(&dip->i_inode, MAY_WRITE | MAY_EXEC); in create_ok()
341 if (!dip->i_inode.i_nlink) in create_ok()
346 if (S_ISDIR(mode) && dip->i_inode.i_nlink == (u32)-1) in create_ok()
355 if (GFS2_SB(&dip->i_inode)->sd_args.ar_suiddir && in munge_mode_uid_gid()
356 (dip->i_inode.i_mode & S_ISUID) && in munge_mode_uid_gid()
357 !uid_eq(dip->i_inode.i_uid, GLOBAL_ROOT_UID)) { in munge_mode_uid_gid()
360 else if (!uid_eq(dip->i_inode.i_uid, current_fsuid())) in munge_mode_uid_gid()
362 inode->i_uid = dip->i_inode.i_uid; in munge_mode_uid_gid()
366 if (dip->i_inode.i_mode & S_ISGID) { in munge_mode_uid_gid()
369 inode->i_gid = dip->i_inode.i_gid; in munge_mode_uid_gid()
[all …]
Dxattr.c85 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_EA)) in ea_foreach_i()
131 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_IN)) { in ea_foreach()
137 end = eablk + GFS2_SB(&ip->i_inode)->sd_inptrs; in ea_foreach()
230 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in ea_dealloc_unstuffed()
292 gfs2_add_inode_blocks(&ip->i_inode, -1); in ea_dealloc_unstuffed()
312 ip->i_inode.i_ctime = current_time(&ip->i_inode); in ea_dealloc_unstuffed()
331 error = gfs2_rindex_update(GFS2_SB(&ip->i_inode)); in ea_remove_unstuffed()
465 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_iter_unstuffed()
645 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in ea_alloc_blk()
666 gfs2_add_inode_blocks(&ip->i_inode, 1); in ea_alloc_blk()
[all …]
Dbmap.c59 struct inode *inode = &ip->i_inode; in gfs2_unstuffer_page()
132 if (i_size_read(&ip->i_inode)) { in gfs2_unstuff_dinode()
141 gfs2_trans_add_unrevoke(GFS2_SB(&ip->i_inode), block, 1); in gfs2_unstuff_dinode()
161 if (i_size_read(&ip->i_inode)) { in gfs2_unstuff_dinode()
163 gfs2_add_inode_blocks(&ip->i_inode, 1); in gfs2_unstuff_dinode()
164 di->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode)); in gfs2_unstuff_dinode()
582 gfs2_add_inode_blocks(&ip->i_inode, alloced); in gfs2_bmap_alloc()
721 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in do_strip()
828 gfs2_add_inode_blocks(&ip->i_inode, -1); in do_strip()
836 gfs2_quota_change(ip, -(s64)btotal, ip->i_inode.i_uid, in do_strip()
[all …]
Ddir.c116 if (gfs2_metatype_check(GFS2_SB(&ip->i_inode), bh, GFS2_METATYPE_JD)) { in gfs2_dir_get_existing_buffer()
136 if (ip->i_inode.i_size < offset + size) in gfs2_dir_write_stuffed()
137 i_size_write(&ip->i_inode, offset + size); in gfs2_dir_write_stuffed()
138 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode); in gfs2_dir_write_stuffed()
160 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_dir_write_data()
199 error = gfs2_extent_map(&ip->i_inode, lblock, &new, in gfs2_dir_write_data()
234 if (ip->i_inode.i_size < offset + copied) in gfs2_dir_write_data()
235 i_size_write(&ip->i_inode, offset + copied); in gfs2_dir_write_data()
236 ip->i_inode.i_mtime = ip->i_inode.i_ctime = current_time(&ip->i_inode); in gfs2_dir_write_data()
276 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_dir_read_data()
[all …]
Dquota.h46 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_lock_check()
55 ret = gfs2_quota_check(ip, ip->i_inode.i_uid, ip->i_inode.i_gid, ap); in gfs2_quota_lock_check()
Dquota.c387 bh_map.b_size = BIT(ip->i_inode.i_blkbits); in bh_get()
388 error = gfs2_block_map(&ip->i_inode, block, &bh_map, 0); in bh_get()
538 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_qa_alloc()
565 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_hold()
584 error = qdsb_get(sdp, make_kqid_uid(ip->i_inode.i_uid), qd); in gfs2_quota_hold()
590 error = qdsb_get(sdp, make_kqid_gid(ip->i_inode.i_gid), qd); in gfs2_quota_hold()
597 !uid_eq(uid, ip->i_inode.i_uid)) { in gfs2_quota_hold()
606 !gid_eq(gid, ip->i_inode.i_gid)) { in gfs2_quota_hold()
622 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_quota_unhold()
693 struct inode *inode = &ip->i_inode; in gfs2_write_buf_to_page()
[all …]
Dsuper.c714 str->di_mode = cpu_to_be32(ip->i_inode.i_mode); in gfs2_dinode_out()
715 str->di_uid = cpu_to_be32(i_uid_read(&ip->i_inode)); in gfs2_dinode_out()
716 str->di_gid = cpu_to_be32(i_gid_read(&ip->i_inode)); in gfs2_dinode_out()
717 str->di_nlink = cpu_to_be32(ip->i_inode.i_nlink); in gfs2_dinode_out()
718 str->di_size = cpu_to_be64(i_size_read(&ip->i_inode)); in gfs2_dinode_out()
719 str->di_blocks = cpu_to_be64(gfs2_get_inode_blocks(&ip->i_inode)); in gfs2_dinode_out()
720 str->di_atime = cpu_to_be64(ip->i_inode.i_atime.tv_sec); in gfs2_dinode_out()
721 str->di_mtime = cpu_to_be64(ip->i_inode.i_mtime.tv_sec); in gfs2_dinode_out()
722 str->di_ctime = cpu_to_be64(ip->i_inode.i_ctime.tv_sec); in gfs2_dinode_out()
730 str->di_payload_format = cpu_to_be32(S_ISDIR(ip->i_inode.i_mode) && in gfs2_dinode_out()
[all …]
Dinode.h35 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_is_writeback()
41 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_is_ordered()
47 return S_ISDIR(ip->i_inode.i_mode); in gfs2_is_dir()
Daops.c470 u64 dsize = i_size_read(&ip->i_inode); in stuffed_readpage()
581 struct address_space *mapping = ip->i_inode.i_mapping; in gfs2_internal_read()
681 if (&ip->i_inode == sdp->sd_rindex) { in gfs2_write_begin()
713 if (&ip->i_inode == sdp->sd_rindex) in gfs2_write_begin()
752 if (pos + len > ip->i_inode.i_size) in gfs2_write_begin()
753 gfs2_trim_blocks(&ip->i_inode); in gfs2_write_begin()
765 if (&ip->i_inode == sdp->sd_rindex) { in gfs2_write_begin()
1057 if (offset >= i_size_read(&ip->i_inode)) in gfs2_ok_for_dio()
1112 unmap_shared_mapping_range(ip->i_inode.i_mapping, offset, len); in gfs2_direct_IO()
Drgrp.c581 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in check_and_update_goal()
881 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in read_rindex_entry()
888 if (pos >= i_size_read(&ip->i_inode)) in read_rindex_entry()
983 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_ri_update()
1496 struct inode *inode = &ip->i_inode; in rg_mblk_search()
1911 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_orlov_skip()
1969 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_inplace_reserve()
1989 if (S_ISDIR(ip->i_inode.i_mode) && (ap->aflags & GFS2_AF_ORLOV)) in gfs2_inplace_reserve()
2324 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_alloc_blocks()
2390 gfs2_quota_change(ip, *nblocks, ip->i_inode.i_uid, ip->i_inode.i_gid); in gfs2_alloc_blocks()
[all …]
Dfile.c516 !IS_NOATIME(&ip->i_inode)) { in gfs2_mmap()
589 if (S_ISREG(ip->i_inode.i_mode)) { in gfs2_open()
780 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in calc_max_reserv()
994 if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK) in gfs2_lock()
1043 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr, in do_flock()
1064 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error); in do_flock()
Dlog.c540 if (ip->i_inode.i_mapping->nrpages == 0) in gfs2_ordered_write()
543 filemap_fdatawrite(ip->i_inode.i_mapping); in gfs2_ordered_write()
559 if (ip->i_inode.i_mapping->nrpages == 0) in gfs2_ordered_wait()
562 filemap_fdatawait(ip->i_inode.i_mapping); in gfs2_ordered_wait()
570 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_ordered_del_inode()
Drecovery.c42 error = gfs2_extent_map(&ip->i_inode, blk, &new, &dblock, &extlen); in gfs2_replay_read_block()
394 bh_map.b_size = 1 << ip->i_inode.i_blkbits; in clean_journal()
395 error = gfs2_block_map(&ip->i_inode, lblock, &bh_map, 0); in clean_journal()
Dbmap.h34 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_write_calc_reserv()
Dlog.h53 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_ordered_add_inode()
Dmeta_io.c377 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_meta_wipe()
409 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_meta_indirect_buffer()
Dmain.c41 inode_init_once(&ip->i_inode); in gfs2_init_inode_once()
Dincore.h389 struct inode i_inode; member
419 return container_of(inode, struct gfs2_inode, i_inode); in GFS2_I()
Dutil.c159 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_consist_inode_i()