1 /*
2 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
4 *
5 * This copyrighted material is made available to anyone wishing to use,
6 * modify, copy, or redistribute it subject to the terms and conditions
7 * of the GNU General Public License version 2.
8 */
9
10 #include <linux/slab.h>
11 #include <linux/spinlock.h>
12 #include <linux/completion.h>
13 #include <linux/buffer_head.h>
14 #include <linux/pagemap.h>
15 #include <linux/uio.h>
16 #include <linux/blkdev.h>
17 #include <linux/mm.h>
18 #include <linux/mount.h>
19 #include <linux/fs.h>
20 #include <linux/gfs2_ondisk.h>
21 #include <linux/falloc.h>
22 #include <linux/swap.h>
23 #include <linux/crc32.h>
24 #include <linux/writeback.h>
25 #include <asm/uaccess.h>
26 #include <linux/dlm.h>
27 #include <linux/dlm_plock.h>
28 #include <linux/aio.h>
29
30 #include "gfs2.h"
31 #include "incore.h"
32 #include "bmap.h"
33 #include "dir.h"
34 #include "glock.h"
35 #include "glops.h"
36 #include "inode.h"
37 #include "log.h"
38 #include "meta_io.h"
39 #include "quota.h"
40 #include "rgrp.h"
41 #include "trans.h"
42 #include "util.h"
43
44 /**
45 * gfs2_llseek - seek to a location in a file
46 * @file: the file
47 * @offset: the offset
48 * @whence: Where to seek from (SEEK_SET, SEEK_CUR, or SEEK_END)
49 *
50 * SEEK_END requires the glock for the file because it references the
51 * file's size.
52 *
53 * Returns: The new offset, or errno
54 */
55
gfs2_llseek(struct file * file,loff_t offset,int whence)56 static loff_t gfs2_llseek(struct file *file, loff_t offset, int whence)
57 {
58 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
59 struct gfs2_holder i_gh;
60 loff_t error;
61
62 switch (whence) {
63 case SEEK_END: /* These reference inode->i_size */
64 case SEEK_DATA:
65 case SEEK_HOLE:
66 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
67 &i_gh);
68 if (!error) {
69 error = generic_file_llseek(file, offset, whence);
70 gfs2_glock_dq_uninit(&i_gh);
71 }
72 break;
73 case SEEK_CUR:
74 case SEEK_SET:
75 error = generic_file_llseek(file, offset, whence);
76 break;
77 default:
78 error = -EINVAL;
79 }
80
81 return error;
82 }
83
84 /**
85 * gfs2_readdir - Read directory entries from a directory
86 * @file: The directory to read from
87 * @dirent: Buffer for dirents
88 * @filldir: Function used to do the copying
89 *
90 * Returns: errno
91 */
92
gfs2_readdir(struct file * file,void * dirent,filldir_t filldir)93 static int gfs2_readdir(struct file *file, void *dirent, filldir_t filldir)
94 {
95 struct inode *dir = file->f_mapping->host;
96 struct gfs2_inode *dip = GFS2_I(dir);
97 struct gfs2_holder d_gh;
98 u64 offset = file->f_pos;
99 int error;
100
101 gfs2_holder_init(dip->i_gl, LM_ST_SHARED, 0, &d_gh);
102 error = gfs2_glock_nq(&d_gh);
103 if (error) {
104 gfs2_holder_uninit(&d_gh);
105 return error;
106 }
107
108 error = gfs2_dir_read(dir, &offset, dirent, filldir, &file->f_ra);
109
110 gfs2_glock_dq_uninit(&d_gh);
111
112 file->f_pos = offset;
113
114 return error;
115 }
116
117 /**
118 * fsflags_cvt
119 * @table: A table of 32 u32 flags
120 * @val: a 32 bit value to convert
121 *
122 * This function can be used to convert between fsflags values and
123 * GFS2's own flags values.
124 *
125 * Returns: the converted flags
126 */
fsflags_cvt(const u32 * table,u32 val)127 static u32 fsflags_cvt(const u32 *table, u32 val)
128 {
129 u32 res = 0;
130 while(val) {
131 if (val & 1)
132 res |= *table;
133 table++;
134 val >>= 1;
135 }
136 return res;
137 }
138
139 static const u32 fsflags_to_gfs2[32] = {
140 [3] = GFS2_DIF_SYNC,
141 [4] = GFS2_DIF_IMMUTABLE,
142 [5] = GFS2_DIF_APPENDONLY,
143 [7] = GFS2_DIF_NOATIME,
144 [12] = GFS2_DIF_EXHASH,
145 [14] = GFS2_DIF_INHERIT_JDATA,
146 [17] = GFS2_DIF_TOPDIR,
147 };
148
149 static const u32 gfs2_to_fsflags[32] = {
150 [gfs2fl_Sync] = FS_SYNC_FL,
151 [gfs2fl_Immutable] = FS_IMMUTABLE_FL,
152 [gfs2fl_AppendOnly] = FS_APPEND_FL,
153 [gfs2fl_NoAtime] = FS_NOATIME_FL,
154 [gfs2fl_ExHash] = FS_INDEX_FL,
155 [gfs2fl_TopLevel] = FS_TOPDIR_FL,
156 [gfs2fl_InheritJdata] = FS_JOURNAL_DATA_FL,
157 };
158
gfs2_get_flags(struct file * filp,u32 __user * ptr)159 static int gfs2_get_flags(struct file *filp, u32 __user *ptr)
160 {
161 struct inode *inode = file_inode(filp);
162 struct gfs2_inode *ip = GFS2_I(inode);
163 struct gfs2_holder gh;
164 int error;
165 u32 fsflags;
166
167 gfs2_holder_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
168 error = gfs2_glock_nq(&gh);
169 if (error)
170 return error;
171
172 fsflags = fsflags_cvt(gfs2_to_fsflags, ip->i_diskflags);
173 if (!S_ISDIR(inode->i_mode) && ip->i_diskflags & GFS2_DIF_JDATA)
174 fsflags |= FS_JOURNAL_DATA_FL;
175 if (put_user(fsflags, ptr))
176 error = -EFAULT;
177
178 gfs2_glock_dq(&gh);
179 gfs2_holder_uninit(&gh);
180 return error;
181 }
182
gfs2_set_inode_flags(struct inode * inode)183 void gfs2_set_inode_flags(struct inode *inode)
184 {
185 struct gfs2_inode *ip = GFS2_I(inode);
186 unsigned int flags = inode->i_flags;
187
188 flags &= ~(S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_NOSEC);
189 if ((ip->i_eattr == 0) && !is_sxid(inode->i_mode))
190 inode->i_flags |= S_NOSEC;
191 if (ip->i_diskflags & GFS2_DIF_IMMUTABLE)
192 flags |= S_IMMUTABLE;
193 if (ip->i_diskflags & GFS2_DIF_APPENDONLY)
194 flags |= S_APPEND;
195 if (ip->i_diskflags & GFS2_DIF_NOATIME)
196 flags |= S_NOATIME;
197 if (ip->i_diskflags & GFS2_DIF_SYNC)
198 flags |= S_SYNC;
199 inode->i_flags = flags;
200 }
201
202 /* Flags that can be set by user space */
203 #define GFS2_FLAGS_USER_SET (GFS2_DIF_JDATA| \
204 GFS2_DIF_IMMUTABLE| \
205 GFS2_DIF_APPENDONLY| \
206 GFS2_DIF_NOATIME| \
207 GFS2_DIF_SYNC| \
208 GFS2_DIF_SYSTEM| \
209 GFS2_DIF_TOPDIR| \
210 GFS2_DIF_INHERIT_JDATA)
211
212 /**
213 * gfs2_set_flags - set flags on an inode
214 * @inode: The inode
215 * @flags: The flags to set
216 * @mask: Indicates which flags are valid
217 *
218 */
do_gfs2_set_flags(struct file * filp,u32 reqflags,u32 mask)219 static int do_gfs2_set_flags(struct file *filp, u32 reqflags, u32 mask)
220 {
221 struct inode *inode = file_inode(filp);
222 struct gfs2_inode *ip = GFS2_I(inode);
223 struct gfs2_sbd *sdp = GFS2_SB(inode);
224 struct buffer_head *bh;
225 struct gfs2_holder gh;
226 int error;
227 u32 new_flags, flags;
228
229 error = mnt_want_write_file(filp);
230 if (error)
231 return error;
232
233 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
234 if (error)
235 goto out_drop_write;
236
237 error = -EACCES;
238 if (!inode_owner_or_capable(inode))
239 goto out;
240
241 error = 0;
242 flags = ip->i_diskflags;
243 new_flags = (flags & ~mask) | (reqflags & mask);
244 if ((new_flags ^ flags) == 0)
245 goto out;
246
247 error = -EINVAL;
248 if ((new_flags ^ flags) & ~GFS2_FLAGS_USER_SET)
249 goto out;
250
251 error = -EPERM;
252 if (IS_IMMUTABLE(inode) && (new_flags & GFS2_DIF_IMMUTABLE))
253 goto out;
254 if (IS_APPEND(inode) && (new_flags & GFS2_DIF_APPENDONLY))
255 goto out;
256 if (((new_flags ^ flags) & GFS2_DIF_IMMUTABLE) &&
257 !capable(CAP_LINUX_IMMUTABLE))
258 goto out;
259 if (!IS_IMMUTABLE(inode)) {
260 error = gfs2_permission(inode, MAY_WRITE);
261 if (error)
262 goto out;
263 }
264 if ((flags ^ new_flags) & GFS2_DIF_JDATA) {
265 if (flags & GFS2_DIF_JDATA)
266 gfs2_log_flush(sdp, ip->i_gl);
267 error = filemap_fdatawrite(inode->i_mapping);
268 if (error)
269 goto out;
270 error = filemap_fdatawait(inode->i_mapping);
271 if (error)
272 goto out;
273 }
274 error = gfs2_trans_begin(sdp, RES_DINODE, 0);
275 if (error)
276 goto out;
277 error = gfs2_meta_inode_buffer(ip, &bh);
278 if (error)
279 goto out_trans_end;
280 gfs2_trans_add_meta(ip->i_gl, bh);
281 ip->i_diskflags = new_flags;
282 gfs2_dinode_out(ip, bh->b_data);
283 brelse(bh);
284 gfs2_set_inode_flags(inode);
285 gfs2_set_aops(inode);
286 out_trans_end:
287 gfs2_trans_end(sdp);
288 out:
289 gfs2_glock_dq_uninit(&gh);
290 out_drop_write:
291 mnt_drop_write_file(filp);
292 return error;
293 }
294
gfs2_set_flags(struct file * filp,u32 __user * ptr)295 static int gfs2_set_flags(struct file *filp, u32 __user *ptr)
296 {
297 struct inode *inode = file_inode(filp);
298 u32 fsflags, gfsflags;
299
300 if (get_user(fsflags, ptr))
301 return -EFAULT;
302
303 gfsflags = fsflags_cvt(fsflags_to_gfs2, fsflags);
304 if (!S_ISDIR(inode->i_mode)) {
305 gfsflags &= ~GFS2_DIF_TOPDIR;
306 if (gfsflags & GFS2_DIF_INHERIT_JDATA)
307 gfsflags ^= (GFS2_DIF_JDATA | GFS2_DIF_INHERIT_JDATA);
308 return do_gfs2_set_flags(filp, gfsflags, ~0);
309 }
310 return do_gfs2_set_flags(filp, gfsflags, ~GFS2_DIF_JDATA);
311 }
312
gfs2_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)313 static long gfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
314 {
315 switch(cmd) {
316 case FS_IOC_GETFLAGS:
317 return gfs2_get_flags(filp, (u32 __user *)arg);
318 case FS_IOC_SETFLAGS:
319 return gfs2_set_flags(filp, (u32 __user *)arg);
320 case FITRIM:
321 return gfs2_fitrim(filp, (void __user *)arg);
322 }
323 return -ENOTTY;
324 }
325
326 /**
327 * gfs2_size_hint - Give a hint to the size of a write request
328 * @file: The struct file
329 * @offset: The file offset of the write
330 * @size: The length of the write
331 *
332 * When we are about to do a write, this function records the total
333 * write size in order to provide a suitable hint to the lower layers
334 * about how many blocks will be required.
335 *
336 */
337
gfs2_size_hint(struct file * filep,loff_t offset,size_t size)338 static void gfs2_size_hint(struct file *filep, loff_t offset, size_t size)
339 {
340 struct inode *inode = file_inode(filep);
341 struct gfs2_sbd *sdp = GFS2_SB(inode);
342 struct gfs2_inode *ip = GFS2_I(inode);
343 size_t blks = (size + sdp->sd_sb.sb_bsize - 1) >> sdp->sd_sb.sb_bsize_shift;
344 int hint = min_t(size_t, INT_MAX, blks);
345
346 atomic_set(&ip->i_res->rs_sizehint, hint);
347 }
348
349 /**
350 * gfs2_allocate_page_backing - Use bmap to allocate blocks
351 * @page: The (locked) page to allocate backing for
352 *
353 * We try to allocate all the blocks required for the page in
354 * one go. This might fail for various reasons, so we keep
355 * trying until all the blocks to back this page are allocated.
356 * If some of the blocks are already allocated, thats ok too.
357 */
358
gfs2_allocate_page_backing(struct page * page)359 static int gfs2_allocate_page_backing(struct page *page)
360 {
361 struct inode *inode = page->mapping->host;
362 struct buffer_head bh;
363 unsigned long size = PAGE_CACHE_SIZE;
364 u64 lblock = page->index << (PAGE_CACHE_SHIFT - inode->i_blkbits);
365
366 do {
367 bh.b_state = 0;
368 bh.b_size = size;
369 gfs2_block_map(inode, lblock, &bh, 1);
370 if (!buffer_mapped(&bh))
371 return -EIO;
372 size -= bh.b_size;
373 lblock += (bh.b_size >> inode->i_blkbits);
374 } while(size > 0);
375 return 0;
376 }
377
378 /**
379 * gfs2_page_mkwrite - Make a shared, mmap()ed, page writable
380 * @vma: The virtual memory area
381 * @page: The page which is about to become writable
382 *
383 * When the page becomes writable, we need to ensure that we have
384 * blocks allocated on disk to back that page.
385 */
386
gfs2_page_mkwrite(struct vm_area_struct * vma,struct vm_fault * vmf)387 static int gfs2_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
388 {
389 struct page *page = vmf->page;
390 struct inode *inode = file_inode(vma->vm_file);
391 struct gfs2_inode *ip = GFS2_I(inode);
392 struct gfs2_sbd *sdp = GFS2_SB(inode);
393 unsigned long last_index;
394 u64 pos = page->index << PAGE_CACHE_SHIFT;
395 unsigned int data_blocks, ind_blocks, rblocks;
396 struct gfs2_holder gh;
397 loff_t size;
398 int ret;
399
400 sb_start_pagefault(inode->i_sb);
401
402 /* Update file times before taking page lock */
403 file_update_time(vma->vm_file);
404
405 ret = get_write_access(inode);
406 if (ret)
407 goto out;
408
409 ret = gfs2_rs_alloc(ip);
410 if (ret)
411 goto out_write_access;
412
413 gfs2_size_hint(vma->vm_file, pos, PAGE_CACHE_SIZE);
414
415 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
416 ret = gfs2_glock_nq(&gh);
417 if (ret)
418 goto out_uninit;
419
420 set_bit(GLF_DIRTY, &ip->i_gl->gl_flags);
421 set_bit(GIF_SW_PAGED, &ip->i_flags);
422
423 if (!gfs2_write_alloc_required(ip, pos, PAGE_CACHE_SIZE)) {
424 lock_page(page);
425 if (!PageUptodate(page) || page->mapping != inode->i_mapping) {
426 ret = -EAGAIN;
427 unlock_page(page);
428 }
429 goto out_unlock;
430 }
431
432 ret = gfs2_rindex_update(sdp);
433 if (ret)
434 goto out_unlock;
435
436 ret = gfs2_quota_lock_check(ip);
437 if (ret)
438 goto out_unlock;
439 gfs2_write_calc_reserv(ip, PAGE_CACHE_SIZE, &data_blocks, &ind_blocks);
440 ret = gfs2_inplace_reserve(ip, data_blocks + ind_blocks, 0);
441 if (ret)
442 goto out_quota_unlock;
443
444 rblocks = RES_DINODE + ind_blocks;
445 if (gfs2_is_jdata(ip))
446 rblocks += data_blocks ? data_blocks : 1;
447 if (ind_blocks || data_blocks) {
448 rblocks += RES_STATFS + RES_QUOTA;
449 rblocks += gfs2_rg_blocks(ip, data_blocks + ind_blocks);
450 }
451 ret = gfs2_trans_begin(sdp, rblocks, 0);
452 if (ret)
453 goto out_trans_fail;
454
455 lock_page(page);
456 ret = -EINVAL;
457 size = i_size_read(inode);
458 last_index = (size - 1) >> PAGE_CACHE_SHIFT;
459 /* Check page index against inode size */
460 if (size == 0 || (page->index > last_index))
461 goto out_trans_end;
462
463 ret = -EAGAIN;
464 /* If truncated, we must retry the operation, we may have raced
465 * with the glock demotion code.
466 */
467 if (!PageUptodate(page) || page->mapping != inode->i_mapping)
468 goto out_trans_end;
469
470 /* Unstuff, if required, and allocate backing blocks for page */
471 ret = 0;
472 if (gfs2_is_stuffed(ip))
473 ret = gfs2_unstuff_dinode(ip, page);
474 if (ret == 0)
475 ret = gfs2_allocate_page_backing(page);
476
477 out_trans_end:
478 if (ret)
479 unlock_page(page);
480 gfs2_trans_end(sdp);
481 out_trans_fail:
482 gfs2_inplace_release(ip);
483 out_quota_unlock:
484 gfs2_quota_unlock(ip);
485 out_unlock:
486 gfs2_glock_dq(&gh);
487 out_uninit:
488 gfs2_holder_uninit(&gh);
489 if (ret == 0) {
490 set_page_dirty(page);
491 wait_for_stable_page(page);
492 }
493 out_write_access:
494 put_write_access(inode);
495 out:
496 sb_end_pagefault(inode->i_sb);
497 return block_page_mkwrite_return(ret);
498 }
499
500 static const struct vm_operations_struct gfs2_vm_ops = {
501 .fault = filemap_fault,
502 .page_mkwrite = gfs2_page_mkwrite,
503 .remap_pages = generic_file_remap_pages,
504 };
505
506 /**
507 * gfs2_mmap -
508 * @file: The file to map
509 * @vma: The VMA which described the mapping
510 *
511 * There is no need to get a lock here unless we should be updating
512 * atime. We ignore any locking errors since the only consequence is
513 * a missed atime update (which will just be deferred until later).
514 *
515 * Returns: 0
516 */
517
gfs2_mmap(struct file * file,struct vm_area_struct * vma)518 static int gfs2_mmap(struct file *file, struct vm_area_struct *vma)
519 {
520 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
521
522 if (!(file->f_flags & O_NOATIME) &&
523 !IS_NOATIME(&ip->i_inode)) {
524 struct gfs2_holder i_gh;
525 int error;
526
527 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
528 &i_gh);
529 if (error)
530 return error;
531 /* grab lock to update inode */
532 gfs2_glock_dq_uninit(&i_gh);
533 file_accessed(file);
534 }
535 vma->vm_ops = &gfs2_vm_ops;
536
537 return 0;
538 }
539
540 /**
541 * gfs2_open - open a file
542 * @inode: the inode to open
543 * @file: the struct file for this opening
544 *
545 * Returns: errno
546 */
547
gfs2_open(struct inode * inode,struct file * file)548 static int gfs2_open(struct inode *inode, struct file *file)
549 {
550 struct gfs2_inode *ip = GFS2_I(inode);
551 struct gfs2_holder i_gh;
552 struct gfs2_file *fp;
553 int error;
554
555 fp = kzalloc(sizeof(struct gfs2_file), GFP_KERNEL);
556 if (!fp)
557 return -ENOMEM;
558
559 mutex_init(&fp->f_fl_mutex);
560
561 gfs2_assert_warn(GFS2_SB(inode), !file->private_data);
562 file->private_data = fp;
563
564 if (S_ISREG(ip->i_inode.i_mode)) {
565 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY,
566 &i_gh);
567 if (error)
568 goto fail;
569
570 if (!(file->f_flags & O_LARGEFILE) &&
571 i_size_read(inode) > MAX_NON_LFS) {
572 error = -EOVERFLOW;
573 goto fail_gunlock;
574 }
575
576 gfs2_glock_dq_uninit(&i_gh);
577 }
578
579 return 0;
580
581 fail_gunlock:
582 gfs2_glock_dq_uninit(&i_gh);
583 fail:
584 file->private_data = NULL;
585 kfree(fp);
586 return error;
587 }
588
589 /**
590 * gfs2_release - called to close a struct file
591 * @inode: the inode the struct file belongs to
592 * @file: the struct file being closed
593 *
594 * Returns: errno
595 */
596
gfs2_release(struct inode * inode,struct file * file)597 static int gfs2_release(struct inode *inode, struct file *file)
598 {
599 struct gfs2_inode *ip = GFS2_I(inode);
600
601 kfree(file->private_data);
602 file->private_data = NULL;
603
604 if (!(file->f_mode & FMODE_WRITE))
605 return 0;
606
607 gfs2_rs_delete(ip);
608 return 0;
609 }
610
611 /**
612 * gfs2_fsync - sync the dirty data for a file (across the cluster)
613 * @file: the file that points to the dentry
614 * @start: the start position in the file to sync
615 * @end: the end position in the file to sync
616 * @datasync: set if we can ignore timestamp changes
617 *
618 * We split the data flushing here so that we don't wait for the data
619 * until after we've also sent the metadata to disk. Note that for
620 * data=ordered, we will write & wait for the data at the log flush
621 * stage anyway, so this is unlikely to make much of a difference
622 * except in the data=writeback case.
623 *
624 * If the fdatawrite fails due to any reason except -EIO, we will
625 * continue the remainder of the fsync, although we'll still report
626 * the error at the end. This is to match filemap_write_and_wait_range()
627 * behaviour.
628 *
629 * Returns: errno
630 */
631
gfs2_fsync(struct file * file,loff_t start,loff_t end,int datasync)632 static int gfs2_fsync(struct file *file, loff_t start, loff_t end,
633 int datasync)
634 {
635 struct address_space *mapping = file->f_mapping;
636 struct inode *inode = mapping->host;
637 int sync_state = inode->i_state & (I_DIRTY_SYNC|I_DIRTY_DATASYNC);
638 struct gfs2_inode *ip = GFS2_I(inode);
639 int ret = 0, ret1 = 0;
640
641 if (mapping->nrpages) {
642 ret1 = filemap_fdatawrite_range(mapping, start, end);
643 if (ret1 == -EIO)
644 return ret1;
645 }
646
647 if (datasync)
648 sync_state &= ~I_DIRTY_SYNC;
649
650 if (sync_state) {
651 ret = sync_inode_metadata(inode, 1);
652 if (ret)
653 return ret;
654 if (gfs2_is_jdata(ip))
655 filemap_write_and_wait(mapping);
656 gfs2_ail_flush(ip->i_gl, 1);
657 }
658
659 if (mapping->nrpages)
660 ret = filemap_fdatawait_range(mapping, start, end);
661
662 return ret ? ret : ret1;
663 }
664
665 /**
666 * gfs2_file_aio_write - Perform a write to a file
667 * @iocb: The io context
668 * @iov: The data to write
669 * @nr_segs: Number of @iov segments
670 * @pos: The file position
671 *
672 * We have to do a lock/unlock here to refresh the inode size for
673 * O_APPEND writes, otherwise we can land up writing at the wrong
674 * offset. There is still a race, but provided the app is using its
675 * own file locking, this will make O_APPEND work as expected.
676 *
677 */
678
gfs2_file_aio_write(struct kiocb * iocb,const struct iovec * iov,unsigned long nr_segs,loff_t pos)679 static ssize_t gfs2_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
680 unsigned long nr_segs, loff_t pos)
681 {
682 struct file *file = iocb->ki_filp;
683 size_t writesize = iov_length(iov, nr_segs);
684 struct gfs2_inode *ip = GFS2_I(file_inode(file));
685 int ret;
686
687 ret = gfs2_rs_alloc(ip);
688 if (ret)
689 return ret;
690
691 gfs2_size_hint(file, pos, writesize);
692
693 if (file->f_flags & O_APPEND) {
694 struct gfs2_holder gh;
695
696 ret = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, 0, &gh);
697 if (ret)
698 return ret;
699 gfs2_glock_dq_uninit(&gh);
700 }
701
702 return generic_file_aio_write(iocb, iov, nr_segs, pos);
703 }
704
fallocate_chunk(struct inode * inode,loff_t offset,loff_t len,int mode)705 static int fallocate_chunk(struct inode *inode, loff_t offset, loff_t len,
706 int mode)
707 {
708 struct gfs2_inode *ip = GFS2_I(inode);
709 struct buffer_head *dibh;
710 int error;
711 loff_t size = len;
712 unsigned int nr_blks;
713 sector_t lblock = offset >> inode->i_blkbits;
714
715 error = gfs2_meta_inode_buffer(ip, &dibh);
716 if (unlikely(error))
717 return error;
718
719 gfs2_trans_add_meta(ip->i_gl, dibh);
720
721 if (gfs2_is_stuffed(ip)) {
722 error = gfs2_unstuff_dinode(ip, NULL);
723 if (unlikely(error))
724 goto out;
725 }
726
727 while (len) {
728 struct buffer_head bh_map = { .b_state = 0, .b_blocknr = 0 };
729 bh_map.b_size = len;
730 set_buffer_zeronew(&bh_map);
731
732 error = gfs2_block_map(inode, lblock, &bh_map, 1);
733 if (unlikely(error))
734 goto out;
735 len -= bh_map.b_size;
736 nr_blks = bh_map.b_size >> inode->i_blkbits;
737 lblock += nr_blks;
738 if (!buffer_new(&bh_map))
739 continue;
740 if (unlikely(!buffer_zeronew(&bh_map))) {
741 error = -EIO;
742 goto out;
743 }
744 }
745 if (offset + size > inode->i_size && !(mode & FALLOC_FL_KEEP_SIZE))
746 i_size_write(inode, offset + size);
747
748 mark_inode_dirty(inode);
749
750 out:
751 brelse(dibh);
752 return error;
753 }
754
calc_max_reserv(struct gfs2_inode * ip,loff_t max,loff_t * len,unsigned int * data_blocks,unsigned int * ind_blocks)755 static void calc_max_reserv(struct gfs2_inode *ip, loff_t max, loff_t *len,
756 unsigned int *data_blocks, unsigned int *ind_blocks)
757 {
758 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode);
759 unsigned int max_blocks = ip->i_rgd->rd_free_clone;
760 unsigned int tmp, max_data = max_blocks - 3 * (sdp->sd_max_height - 1);
761
762 for (tmp = max_data; tmp > sdp->sd_diptrs;) {
763 tmp = DIV_ROUND_UP(tmp, sdp->sd_inptrs);
764 max_data -= tmp;
765 }
766 /* This calculation isn't the exact reverse of gfs2_write_calc_reserve,
767 so it might end up with fewer data blocks */
768 if (max_data <= *data_blocks)
769 return;
770 *data_blocks = max_data;
771 *ind_blocks = max_blocks - max_data;
772 *len = ((loff_t)max_data - 3) << sdp->sd_sb.sb_bsize_shift;
773 if (*len > max) {
774 *len = max;
775 gfs2_write_calc_reserv(ip, max, data_blocks, ind_blocks);
776 }
777 }
778
gfs2_fallocate(struct file * file,int mode,loff_t offset,loff_t len)779 static long gfs2_fallocate(struct file *file, int mode, loff_t offset,
780 loff_t len)
781 {
782 struct inode *inode = file_inode(file);
783 struct gfs2_sbd *sdp = GFS2_SB(inode);
784 struct gfs2_inode *ip = GFS2_I(inode);
785 unsigned int data_blocks = 0, ind_blocks = 0, rblocks;
786 loff_t bytes, max_bytes;
787 int error;
788 const loff_t pos = offset;
789 const loff_t count = len;
790 loff_t bsize_mask = ~((loff_t)sdp->sd_sb.sb_bsize - 1);
791 loff_t next = (offset + len - 1) >> sdp->sd_sb.sb_bsize_shift;
792 loff_t max_chunk_size = UINT_MAX & bsize_mask;
793 next = (next + 1) << sdp->sd_sb.sb_bsize_shift;
794
795 /* We only support the FALLOC_FL_KEEP_SIZE mode */
796 if (mode & ~FALLOC_FL_KEEP_SIZE)
797 return -EOPNOTSUPP;
798
799 offset &= bsize_mask;
800
801 len = next - offset;
802 bytes = sdp->sd_max_rg_data * sdp->sd_sb.sb_bsize / 2;
803 if (!bytes)
804 bytes = UINT_MAX;
805 bytes &= bsize_mask;
806 if (bytes == 0)
807 bytes = sdp->sd_sb.sb_bsize;
808
809 error = gfs2_rs_alloc(ip);
810 if (error)
811 return error;
812
813 gfs2_holder_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &ip->i_gh);
814 error = gfs2_glock_nq(&ip->i_gh);
815 if (unlikely(error))
816 goto out_uninit;
817
818 gfs2_size_hint(file, offset, len);
819
820 while (len > 0) {
821 if (len < bytes)
822 bytes = len;
823 if (!gfs2_write_alloc_required(ip, offset, bytes)) {
824 len -= bytes;
825 offset += bytes;
826 continue;
827 }
828 error = gfs2_quota_lock_check(ip);
829 if (error)
830 goto out_unlock;
831
832 retry:
833 gfs2_write_calc_reserv(ip, bytes, &data_blocks, &ind_blocks);
834
835 error = gfs2_inplace_reserve(ip, data_blocks + ind_blocks, 0);
836 if (error) {
837 if (error == -ENOSPC && bytes > sdp->sd_sb.sb_bsize) {
838 bytes >>= 1;
839 bytes &= bsize_mask;
840 if (bytes == 0)
841 bytes = sdp->sd_sb.sb_bsize;
842 goto retry;
843 }
844 goto out_qunlock;
845 }
846 max_bytes = bytes;
847 calc_max_reserv(ip, (len > max_chunk_size)? max_chunk_size: len,
848 &max_bytes, &data_blocks, &ind_blocks);
849
850 rblocks = RES_DINODE + ind_blocks + RES_STATFS + RES_QUOTA +
851 RES_RG_HDR + gfs2_rg_blocks(ip, data_blocks + ind_blocks);
852 if (gfs2_is_jdata(ip))
853 rblocks += data_blocks ? data_blocks : 1;
854
855 error = gfs2_trans_begin(sdp, rblocks,
856 PAGE_CACHE_SIZE/sdp->sd_sb.sb_bsize);
857 if (error)
858 goto out_trans_fail;
859
860 error = fallocate_chunk(inode, offset, max_bytes, mode);
861 gfs2_trans_end(sdp);
862
863 if (error)
864 goto out_trans_fail;
865
866 len -= max_bytes;
867 offset += max_bytes;
868 gfs2_inplace_release(ip);
869 gfs2_quota_unlock(ip);
870 }
871
872 if (error == 0)
873 error = generic_write_sync(file, pos, count);
874 goto out_unlock;
875
876 out_trans_fail:
877 gfs2_inplace_release(ip);
878 out_qunlock:
879 gfs2_quota_unlock(ip);
880 out_unlock:
881 gfs2_glock_dq(&ip->i_gh);
882 out_uninit:
883 gfs2_holder_uninit(&ip->i_gh);
884 return error;
885 }
886
887 #ifdef CONFIG_GFS2_FS_LOCKING_DLM
888
889 /**
890 * gfs2_setlease - acquire/release a file lease
891 * @file: the file pointer
892 * @arg: lease type
893 * @fl: file lock
894 *
895 * We don't currently have a way to enforce a lease across the whole
896 * cluster; until we do, disable leases (by just returning -EINVAL),
897 * unless the administrator has requested purely local locking.
898 *
899 * Locking: called under lock_flocks
900 *
901 * Returns: errno
902 */
903
gfs2_setlease(struct file * file,long arg,struct file_lock ** fl)904 static int gfs2_setlease(struct file *file, long arg, struct file_lock **fl)
905 {
906 return -EINVAL;
907 }
908
909 /**
910 * gfs2_lock - acquire/release a posix lock on a file
911 * @file: the file pointer
912 * @cmd: either modify or retrieve lock state, possibly wait
913 * @fl: type and range of lock
914 *
915 * Returns: errno
916 */
917
gfs2_lock(struct file * file,int cmd,struct file_lock * fl)918 static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
919 {
920 struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
921 struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
922 struct lm_lockstruct *ls = &sdp->sd_lockstruct;
923
924 if (!(fl->fl_flags & FL_POSIX))
925 return -ENOLCK;
926 if (__mandatory_lock(&ip->i_inode) && fl->fl_type != F_UNLCK)
927 return -ENOLCK;
928
929 if (cmd == F_CANCELLK) {
930 /* Hack: */
931 cmd = F_SETLK;
932 fl->fl_type = F_UNLCK;
933 }
934 if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
935 if (fl->fl_type == F_UNLCK)
936 posix_lock_file_wait(file, fl);
937 return -EIO;
938 }
939 if (IS_GETLK(cmd))
940 return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
941 else if (fl->fl_type == F_UNLCK)
942 return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
943 else
944 return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
945 }
946
do_flock(struct file * file,int cmd,struct file_lock * fl)947 static int do_flock(struct file *file, int cmd, struct file_lock *fl)
948 {
949 struct gfs2_file *fp = file->private_data;
950 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
951 struct gfs2_inode *ip = GFS2_I(file_inode(file));
952 struct gfs2_glock *gl;
953 unsigned int state;
954 int flags;
955 int error = 0;
956
957 state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
958 flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
959
960 mutex_lock(&fp->f_fl_mutex);
961
962 gl = fl_gh->gh_gl;
963 if (gl) {
964 if (fl_gh->gh_state == state)
965 goto out;
966 flock_lock_file_wait(file,
967 &(struct file_lock){.fl_type = F_UNLCK});
968 gfs2_glock_dq_wait(fl_gh);
969 gfs2_holder_reinit(state, flags, fl_gh);
970 } else {
971 error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
972 &gfs2_flock_glops, CREATE, &gl);
973 if (error)
974 goto out;
975 gfs2_holder_init(gl, state, flags, fl_gh);
976 gfs2_glock_put(gl);
977 }
978 error = gfs2_glock_nq(fl_gh);
979 if (error) {
980 gfs2_holder_uninit(fl_gh);
981 if (error == GLR_TRYFAILED)
982 error = -EAGAIN;
983 } else {
984 error = flock_lock_file_wait(file, fl);
985 gfs2_assert_warn(GFS2_SB(&ip->i_inode), !error);
986 }
987
988 out:
989 mutex_unlock(&fp->f_fl_mutex);
990 return error;
991 }
992
do_unflock(struct file * file,struct file_lock * fl)993 static void do_unflock(struct file *file, struct file_lock *fl)
994 {
995 struct gfs2_file *fp = file->private_data;
996 struct gfs2_holder *fl_gh = &fp->f_fl_gh;
997
998 mutex_lock(&fp->f_fl_mutex);
999 flock_lock_file_wait(file, fl);
1000 if (fl_gh->gh_gl) {
1001 gfs2_glock_dq_wait(fl_gh);
1002 gfs2_holder_uninit(fl_gh);
1003 }
1004 mutex_unlock(&fp->f_fl_mutex);
1005 }
1006
1007 /**
1008 * gfs2_flock - acquire/release a flock lock on a file
1009 * @file: the file pointer
1010 * @cmd: either modify or retrieve lock state, possibly wait
1011 * @fl: type and range of lock
1012 *
1013 * Returns: errno
1014 */
1015
gfs2_flock(struct file * file,int cmd,struct file_lock * fl)1016 static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
1017 {
1018 if (!(fl->fl_flags & FL_FLOCK))
1019 return -ENOLCK;
1020 if (fl->fl_type & LOCK_MAND)
1021 return -EOPNOTSUPP;
1022
1023 if (fl->fl_type == F_UNLCK) {
1024 do_unflock(file, fl);
1025 return 0;
1026 } else {
1027 return do_flock(file, cmd, fl);
1028 }
1029 }
1030
1031 const struct file_operations gfs2_file_fops = {
1032 .llseek = gfs2_llseek,
1033 .read = do_sync_read,
1034 .aio_read = generic_file_aio_read,
1035 .write = do_sync_write,
1036 .aio_write = gfs2_file_aio_write,
1037 .unlocked_ioctl = gfs2_ioctl,
1038 .mmap = gfs2_mmap,
1039 .open = gfs2_open,
1040 .release = gfs2_release,
1041 .fsync = gfs2_fsync,
1042 .lock = gfs2_lock,
1043 .flock = gfs2_flock,
1044 .splice_read = generic_file_splice_read,
1045 .splice_write = generic_file_splice_write,
1046 .setlease = gfs2_setlease,
1047 .fallocate = gfs2_fallocate,
1048 };
1049
1050 const struct file_operations gfs2_dir_fops = {
1051 .readdir = gfs2_readdir,
1052 .unlocked_ioctl = gfs2_ioctl,
1053 .open = gfs2_open,
1054 .release = gfs2_release,
1055 .fsync = gfs2_fsync,
1056 .lock = gfs2_lock,
1057 .flock = gfs2_flock,
1058 .llseek = default_llseek,
1059 };
1060
1061 #endif /* CONFIG_GFS2_FS_LOCKING_DLM */
1062
1063 const struct file_operations gfs2_file_fops_nolock = {
1064 .llseek = gfs2_llseek,
1065 .read = do_sync_read,
1066 .aio_read = generic_file_aio_read,
1067 .write = do_sync_write,
1068 .aio_write = gfs2_file_aio_write,
1069 .unlocked_ioctl = gfs2_ioctl,
1070 .mmap = gfs2_mmap,
1071 .open = gfs2_open,
1072 .release = gfs2_release,
1073 .fsync = gfs2_fsync,
1074 .splice_read = generic_file_splice_read,
1075 .splice_write = generic_file_splice_write,
1076 .setlease = generic_setlease,
1077 .fallocate = gfs2_fallocate,
1078 };
1079
1080 const struct file_operations gfs2_dir_fops_nolock = {
1081 .readdir = gfs2_readdir,
1082 .unlocked_ioctl = gfs2_ioctl,
1083 .open = gfs2_open,
1084 .release = gfs2_release,
1085 .fsync = gfs2_fsync,
1086 .llseek = default_llseek,
1087 };
1088
1089