• Home
  • Raw
  • Download

Lines Matching +full:ip +full:- +full:blocks

1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
27 #include <linux/backing-dev.h>
45 * gfs2_llseek - seek to a location in a file
58 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host); in gfs2_llseek() local
64 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, in gfs2_llseek()
83 * These don't reference inode->i_size and don't depend on the in gfs2_llseek()
89 error = -EINVAL; in gfs2_llseek()
96 * gfs2_readdir - Iterator for a directory
105 struct inode *dir = file->f_mapping->host; in gfs2_readdir()
110 error = gfs2_glock_nq_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh); in gfs2_readdir()
114 error = gfs2_dir_read(dir, ctx, &file->f_ra); in gfs2_readdir()
125 * and to GFS2_DIF_JDATA for non-directories.
145 if (S_ISDIR(inode->i_mode)) in gfs2_gfsflags_to_fsflags()
159 struct gfs2_inode *ip = GFS2_I(inode); in gfs2_get_flags() local
164 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); in gfs2_get_flags()
169 fsflags = gfs2_gfsflags_to_fsflags(inode, ip->i_diskflags); in gfs2_get_flags()
172 error = -EFAULT; in gfs2_get_flags()
182 struct gfs2_inode *ip = GFS2_I(inode); in gfs2_set_inode_flags() local
183 unsigned int flags = inode->i_flags; in gfs2_set_inode_flags()
186 if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode)) in gfs2_set_inode_flags()
188 if (ip->i_diskflags & GFS2_DIF_IMMUTABLE) in gfs2_set_inode_flags()
190 if (ip->i_diskflags & GFS2_DIF_APPENDONLY) in gfs2_set_inode_flags()
192 if (ip->i_diskflags & GFS2_DIF_NOATIME) in gfs2_set_inode_flags()
194 if (ip->i_diskflags & GFS2_DIF_SYNC) in gfs2_set_inode_flags()
196 inode->i_flags = flags; in gfs2_set_inode_flags()
209 * do_gfs2_set_flags - set flags on an inode
220 struct gfs2_inode *ip = GFS2_I(inode); in do_gfs2_set_flags() local
231 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); in do_gfs2_set_flags()
235 oldflags = gfs2_gfsflags_to_fsflags(inode, ip->i_diskflags); in do_gfs2_set_flags()
240 error = -EACCES; in do_gfs2_set_flags()
245 flags = ip->i_diskflags; in do_gfs2_set_flags()
250 error = -EPERM; in do_gfs2_set_flags()
265 gfs2_log_flush(sdp, ip->i_gl, in do_gfs2_set_flags()
268 error = filemap_fdatawrite(inode->i_mapping); in do_gfs2_set_flags()
271 error = filemap_fdatawait(inode->i_mapping); in do_gfs2_set_flags()
275 gfs2_ordered_del_inode(ip); in do_gfs2_set_flags()
280 error = gfs2_meta_inode_buffer(ip, &bh); in do_gfs2_set_flags()
283 inode->i_ctime = current_time(inode); in do_gfs2_set_flags()
284 gfs2_trans_add_meta(ip->i_gl, bh); in do_gfs2_set_flags()
285 ip->i_diskflags = new_flags; in do_gfs2_set_flags()
286 gfs2_dinode_out(ip, bh->b_data); in do_gfs2_set_flags()
307 return -EFAULT; in gfs2_set_flags()
316 return -EINVAL; in gfs2_set_flags()
319 if (S_ISDIR(inode->i_mode)) { in gfs2_set_flags()
324 return -EINVAL; in gfs2_set_flags()
336 if (copy_to_user(label, sdp->sd_sb.sb_locktable, GFS2_LOCKNAME_LEN)) in gfs2_getlabel()
337 return -EFAULT; in gfs2_getlabel()
355 return -ENOTTY; in gfs2_ioctl()
374 return -ENOIOCTLCMD; in gfs2_compat_ioctl()
384 * gfs2_size_hint - Give a hint to the size of a write request
391 * about how many blocks will be required.
399 struct gfs2_inode *ip = GFS2_I(inode); in gfs2_size_hint() local
400 size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift; in gfs2_size_hint()
403 if (hint > atomic_read(&ip->i_sizehint)) in gfs2_size_hint()
404 atomic_set(&ip->i_sizehint, hint); in gfs2_size_hint()
408 * gfs2_allocate_page_backing - Allocate blocks for a write fault
412 * We try to allocate all the blocks required for the page in one go. This
413 * might fail for various reasons, so we keep trying until all the blocks to
414 * back this page are allocated. If some of the blocks are already allocated,
424 if (gfs2_iomap_get_alloc(page->mapping->host, pos, length, &iomap)) in gfs2_allocate_page_backing()
425 return -EIO; in gfs2_allocate_page_backing()
429 length -= iomap.length; in gfs2_allocate_page_backing()
437 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
442 * blocks allocated on disk to back that page.
447 struct page *page = vmf->page; in gfs2_page_mkwrite()
448 struct inode *inode = file_inode(vmf->vma->vm_file); in gfs2_page_mkwrite()
449 struct gfs2_inode *ip = GFS2_I(inode); in gfs2_page_mkwrite() local
459 sb_start_pagefault(inode->i_sb); in gfs2_page_mkwrite()
461 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); in gfs2_page_mkwrite()
469 ret = -EINVAL; in gfs2_page_mkwrite()
474 file_update_time(vmf->vma->vm_file); in gfs2_page_mkwrite()
477 if (size - offset < PAGE_SIZE) in gfs2_page_mkwrite()
478 length = size - offset; in gfs2_page_mkwrite()
482 gfs2_size_hint(vmf->vma->vm_file, offset, length); in gfs2_page_mkwrite()
484 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags); in gfs2_page_mkwrite()
485 set_bit(GIF_SW_PAGED, &ip->i_flags); in gfs2_page_mkwrite()
492 if (!gfs2_is_stuffed(ip) && in gfs2_page_mkwrite()
493 !gfs2_write_alloc_required(ip, offset, length)) { in gfs2_page_mkwrite()
495 if (!PageUptodate(page) || page->mapping != inode->i_mapping) { in gfs2_page_mkwrite()
496 ret = -EAGAIN; in gfs2_page_mkwrite()
506 gfs2_write_calc_reserv(ip, length, &data_blocks, &ind_blocks); in gfs2_page_mkwrite()
508 ret = gfs2_quota_lock_check(ip, &ap); in gfs2_page_mkwrite()
511 ret = gfs2_inplace_reserve(ip, &ap); in gfs2_page_mkwrite()
516 if (gfs2_is_jdata(ip)) in gfs2_page_mkwrite()
520 rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks); in gfs2_page_mkwrite()
527 ret = -EAGAIN; in gfs2_page_mkwrite()
531 if (!PageUptodate(page) || page->mapping != inode->i_mapping) in gfs2_page_mkwrite()
534 /* Unstuff, if required, and allocate backing blocks for page */ in gfs2_page_mkwrite()
536 if (gfs2_is_stuffed(ip)) in gfs2_page_mkwrite()
537 ret = gfs2_unstuff_dinode(ip, page); in gfs2_page_mkwrite()
546 gfs2_inplace_release(ip); in gfs2_page_mkwrite()
548 gfs2_quota_unlock(ip); in gfs2_page_mkwrite()
557 sb_end_pagefault(inode->i_sb); in gfs2_page_mkwrite()
563 struct inode *inode = file_inode(vmf->vma->vm_file); in gfs2_fault()
564 struct gfs2_inode *ip = GFS2_I(inode); in gfs2_fault() local
569 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); in gfs2_fault()
589 * gfs2_mmap -
602 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host); in gfs2_mmap() local
604 if (!(file->f_flags & O_NOATIME) && in gfs2_mmap()
605 !IS_NOATIME(&ip->i_inode)) { in gfs2_mmap()
609 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, in gfs2_mmap()
617 vma->vm_ops = &gfs2_vm_ops; in gfs2_mmap()
623 * gfs2_open_common - This is common to open and atomic_open
640 if (S_ISREG(inode->i_mode)) { in gfs2_open_common()
648 return -ENOMEM; in gfs2_open_common()
650 mutex_init(&fp->f_fl_mutex); in gfs2_open_common()
652 gfs2_assert_warn(GFS2_SB(inode), !file->private_data); in gfs2_open_common()
653 file->private_data = fp; in gfs2_open_common()
654 if (file->f_mode & FMODE_WRITE) { in gfs2_open_common()
662 kfree(file->private_data); in gfs2_open_common()
663 file->private_data = NULL; in gfs2_open_common()
668 * gfs2_open - open a file
683 struct gfs2_inode *ip = GFS2_I(inode); in gfs2_open() local
688 if (S_ISREG(ip->i_inode.i_mode)) { in gfs2_open()
689 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, in gfs2_open()
705 * gfs2_release - called to close a struct file
714 struct gfs2_inode *ip = GFS2_I(inode); in gfs2_release() local
716 kfree(file->private_data); in gfs2_release()
717 file->private_data = NULL; in gfs2_release()
719 if (file->f_mode & FMODE_WRITE) { in gfs2_release()
720 gfs2_rs_delete(ip, &inode->i_writecount); in gfs2_release()
721 gfs2_qa_put(ip); in gfs2_release()
727 * gfs2_fsync - sync the dirty data for a file (across the cluster)
739 * If the fdatawrite fails due to any reason except -EIO, we will
750 struct address_space *mapping = file->f_mapping; in gfs2_fsync()
751 struct inode *inode = mapping->host; in gfs2_fsync()
752 int sync_state = inode->i_state & I_DIRTY_ALL; in gfs2_fsync()
753 struct gfs2_inode *ip = GFS2_I(inode); in gfs2_fsync() local
756 if (mapping->nrpages) { in gfs2_fsync()
758 if (ret1 == -EIO) in gfs2_fsync()
762 if (!gfs2_is_jdata(ip)) in gfs2_fsync()
771 if (gfs2_is_jdata(ip)) in gfs2_fsync()
775 gfs2_ail_flush(ip->i_gl, 1); in gfs2_fsync()
778 if (mapping->nrpages) in gfs2_fsync()
787 struct file *file = iocb->ki_filp; in gfs2_file_direct_read()
788 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host); in gfs2_file_direct_read() local
795 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, gh); in gfs2_file_direct_read()
812 struct file *file = iocb->ki_filp; in gfs2_file_direct_write()
813 struct inode *inode = file->f_mapping->host; in gfs2_file_direct_write()
814 struct gfs2_inode *ip = GFS2_I(inode); in gfs2_file_direct_write() local
816 loff_t offset = iocb->ki_pos; in gfs2_file_direct_write()
827 gfs2_holder_init(ip->i_gl, LM_ST_DEFERRED, 0, gh); in gfs2_file_direct_write()
833 if (offset + len > i_size_read(&ip->i_inode)) in gfs2_file_direct_write()
838 if (ret == -ENOTBLK) in gfs2_file_direct_write()
849 struct gfs2_inode *ip; in gfs2_file_read_iter() local
854 if (iocb->ki_flags & IOCB_DIRECT) { in gfs2_file_read_iter()
856 if (likely(ret != -ENOTBLK)) in gfs2_file_read_iter()
858 iocb->ki_flags &= ~IOCB_DIRECT; in gfs2_file_read_iter()
860 iocb->ki_flags |= IOCB_NOIO; in gfs2_file_read_iter()
862 iocb->ki_flags &= ~IOCB_NOIO; in gfs2_file_read_iter()
868 if (ret != -EAGAIN) in gfs2_file_read_iter()
870 if (iocb->ki_flags & IOCB_NOWAIT) in gfs2_file_read_iter()
873 ip = GFS2_I(iocb->ki_filp->f_mapping->host); in gfs2_file_read_iter()
874 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh); in gfs2_file_read_iter()
888 * gfs2_file_write_iter - Perform a write to a file
901 struct file *file = iocb->ki_filp; in gfs2_file_write_iter()
903 struct gfs2_inode *ip = GFS2_I(inode); in gfs2_file_write_iter() local
907 gfs2_size_hint(file, iocb->ki_pos, iov_iter_count(from)); in gfs2_file_write_iter()
909 if (iocb->ki_flags & IOCB_APPEND) { in gfs2_file_write_iter()
910 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh); in gfs2_file_write_iter()
929 if (iocb->ki_flags & IOCB_DIRECT) { in gfs2_file_write_iter()
930 struct address_space *mapping = file->f_mapping; in gfs2_file_write_iter()
937 iocb->ki_flags |= IOCB_DSYNC; in gfs2_file_write_iter()
938 current->backing_dev_info = inode_to_bdi(inode); in gfs2_file_write_iter()
940 current->backing_dev_info = NULL; in gfs2_file_write_iter()
954 iocb->ki_pos += buffered; in gfs2_file_write_iter()
957 (iocb->ki_pos - buffered) >> PAGE_SHIFT, in gfs2_file_write_iter()
958 (iocb->ki_pos - 1) >> PAGE_SHIFT); in gfs2_file_write_iter()
962 current->backing_dev_info = inode_to_bdi(inode); in gfs2_file_write_iter()
964 current->backing_dev_info = NULL; in gfs2_file_write_iter()
966 iocb->ki_pos += ret; in gfs2_file_write_iter()
979 struct super_block *sb = inode->i_sb; in fallocate_chunk()
980 struct gfs2_inode *ip = GFS2_I(inode); in fallocate_chunk() local
985 error = gfs2_meta_inode_buffer(ip, &dibh); in fallocate_chunk()
989 gfs2_trans_add_meta(ip->i_gl, dibh); in fallocate_chunk()
991 if (gfs2_is_stuffed(ip)) { in fallocate_chunk()
992 error = gfs2_unstuff_dinode(ip, NULL); in fallocate_chunk()
1000 error = gfs2_iomap_get_alloc(inode, offset, end - offset, in fallocate_chunk()
1007 error = sb_issue_zeroout(sb, iomap.addr >> inode->i_blkbits, in fallocate_chunk()
1008 iomap.length >> inode->i_blkbits, in fallocate_chunk()
1021 * calc_max_reserv() - Reverse of write_calc_reserv. Given a number of
1022 * blocks, determine how many bytes can be written.
1023 * @ip: The inode in question.
1025 * @data_blocks: Compute and return the number of data blocks needed
1026 * @ind_blocks: Compute and return the number of indirect blocks needed
1027 * @max_blocks: The total blocks available to work with.
1031 static void calc_max_reserv(struct gfs2_inode *ip, loff_t *len, in calc_max_reserv() argument
1036 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in calc_max_reserv()
1037 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1); in calc_max_reserv()
1039 for (tmp = max_data; tmp > sdp->sd_diptrs;) { in calc_max_reserv()
1040 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs); in calc_max_reserv()
1041 max_data -= tmp; in calc_max_reserv()
1045 *ind_blocks = max_blocks - max_data; in calc_max_reserv()
1046 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift; in calc_max_reserv()
1049 gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks); in calc_max_reserv()
1057 struct gfs2_inode *ip = GFS2_I(inode); in __gfs2_fallocate() local
1064 loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1); in __gfs2_fallocate()
1065 loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift; in __gfs2_fallocate()
1068 next = (next + 1) << sdp->sd_sb.sb_bsize_shift; in __gfs2_fallocate()
1072 len = next - offset; in __gfs2_fallocate()
1073 bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2; in __gfs2_fallocate()
1078 bytes = sdp->sd_sb.sb_bsize; in __gfs2_fallocate()
1082 gfs2_write_calc_reserv(ip, PAGE_SIZE, &data_blocks, &ind_blocks); in __gfs2_fallocate()
1088 if (!gfs2_write_alloc_required(ip, offset, bytes)) { in __gfs2_fallocate()
1089 len -= bytes; in __gfs2_fallocate()
1104 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks); in __gfs2_fallocate()
1107 error = gfs2_quota_lock_check(ip, &ap); in __gfs2_fallocate()
1110 /* ap.allowed tells us how many blocks quota will allow in __gfs2_fallocate()
1116 error = gfs2_inplace_reserve(ip, &ap); in __gfs2_fallocate()
1127 calc_max_reserv(ip, &max_bytes, &data_blocks, in __gfs2_fallocate()
1131 RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks); in __gfs2_fallocate()
1132 if (gfs2_is_jdata(ip)) in __gfs2_fallocate()
1136 PAGE_SIZE >> inode->i_blkbits); in __gfs2_fallocate()
1146 len -= max_bytes; in __gfs2_fallocate()
1148 gfs2_inplace_release(ip); in __gfs2_fallocate()
1149 gfs2_quota_unlock(ip); in __gfs2_fallocate()
1152 if (!(mode & FALLOC_FL_KEEP_SIZE) && (pos + count) > inode->i_size) in __gfs2_fallocate()
1157 if ((file->f_flags & O_DSYNC) || IS_SYNC(file->f_mapping->host)) in __gfs2_fallocate()
1158 return vfs_fsync_range(file, pos, pos + count - 1, in __gfs2_fallocate()
1159 (file->f_flags & __O_SYNC) ? 0 : 1); in __gfs2_fallocate()
1163 gfs2_inplace_release(ip); in __gfs2_fallocate()
1165 gfs2_quota_unlock(ip); in __gfs2_fallocate()
1173 struct gfs2_inode *ip = GFS2_I(inode); in gfs2_fallocate() local
1178 return -EOPNOTSUPP; in gfs2_fallocate()
1180 if (gfs2_is_jdata(ip) && inode != sdp->sd_rindex) in gfs2_fallocate()
1181 return -EOPNOTSUPP; in gfs2_fallocate()
1185 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh); in gfs2_fallocate()
1191 (offset + len) > inode->i_size) { in gfs2_fallocate()
1206 gfs2_rs_deltree(&ip->i_res); in gfs2_fallocate()
1233 * gfs2_lock - acquire/release a posix lock on a file
1243 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host); in gfs2_lock() local
1244 struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host); in gfs2_lock()
1245 struct lm_lockstruct *ls = &sdp->sd_lockstruct; in gfs2_lock()
1247 if (!(fl->fl_flags & FL_POSIX)) in gfs2_lock()
1248 return -ENOLCK; in gfs2_lock()
1249 if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK) in gfs2_lock()
1250 return -ENOLCK; in gfs2_lock()
1255 fl->fl_type = F_UNLCK; in gfs2_lock()
1258 if (fl->fl_type == F_UNLCK) in gfs2_lock()
1260 return -EIO; in gfs2_lock()
1263 return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl); in gfs2_lock()
1264 else if (fl->fl_type == F_UNLCK) in gfs2_lock()
1265 return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl); in gfs2_lock()
1267 return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl); in gfs2_lock()
1272 struct gfs2_file *fp = file->private_data; in do_flock()
1273 struct gfs2_holder *fl_gh = &fp->f_fl_gh; in do_flock()
1274 struct gfs2_inode *ip = GFS2_I(file_inode(file)); in do_flock() local
1281 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED; in do_flock()
1284 mutex_lock(&fp->f_fl_mutex); in do_flock()
1288 if (fl_gh->gh_state == state) in do_flock()
1297 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr, in do_flock()
1308 fl_gh->gh_flags = LM_FLAG_TRY | GL_EXACT; in do_flock()
1309 fl_gh->gh_error = 0; in do_flock()
1315 error = -EAGAIN; in do_flock()
1318 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error); in do_flock()
1322 mutex_unlock(&fp->f_fl_mutex); in do_flock()
1328 struct gfs2_file *fp = file->private_data; in do_unflock()
1329 struct gfs2_holder *fl_gh = &fp->f_fl_gh; in do_unflock()
1331 mutex_lock(&fp->f_fl_mutex); in do_unflock()
1337 mutex_unlock(&fp->f_fl_mutex); in do_unflock()
1341 * gfs2_flock - acquire/release a flock lock on a file
1351 if (!(fl->fl_flags & FL_FLOCK)) in gfs2_flock()
1352 return -ENOLCK; in gfs2_flock()
1353 if (fl->fl_type & LOCK_MAND) in gfs2_flock()
1354 return -EOPNOTSUPP; in gfs2_flock()
1356 if (fl->fl_type == F_UNLCK) { in gfs2_flock()