1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * file.c
4 *
5 * File open, close, extend, truncate
6 *
7 * Copyright (C) 2002, 2004 Oracle. All rights reserved.
8 */
9
10 #include <linux/capability.h>
11 #include <linux/fs.h>
12 #include <linux/types.h>
13 #include <linux/slab.h>
14 #include <linux/highmem.h>
15 #include <linux/pagemap.h>
16 #include <linux/uio.h>
17 #include <linux/sched.h>
18 #include <linux/splice.h>
19 #include <linux/mount.h>
20 #include <linux/writeback.h>
21 #include <linux/falloc.h>
22 #include <linux/quotaops.h>
23 #include <linux/blkdev.h>
24 #include <linux/backing-dev.h>
25
26 #include <cluster/masklog.h>
27
28 #include "ocfs2.h"
29
30 #include "alloc.h"
31 #include "aops.h"
32 #include "dir.h"
33 #include "dlmglue.h"
34 #include "extent_map.h"
35 #include "file.h"
36 #include "sysfile.h"
37 #include "inode.h"
38 #include "ioctl.h"
39 #include "journal.h"
40 #include "locks.h"
41 #include "mmap.h"
42 #include "suballoc.h"
43 #include "super.h"
44 #include "xattr.h"
45 #include "acl.h"
46 #include "quota.h"
47 #include "refcounttree.h"
48 #include "ocfs2_trace.h"
49
50 #include "buffer_head_io.h"
51
ocfs2_init_file_private(struct inode * inode,struct file * file)52 static int ocfs2_init_file_private(struct inode *inode, struct file *file)
53 {
54 struct ocfs2_file_private *fp;
55
56 fp = kzalloc(sizeof(struct ocfs2_file_private), GFP_KERNEL);
57 if (!fp)
58 return -ENOMEM;
59
60 fp->fp_file = file;
61 mutex_init(&fp->fp_mutex);
62 ocfs2_file_lock_res_init(&fp->fp_flock, fp);
63 file->private_data = fp;
64
65 return 0;
66 }
67
ocfs2_free_file_private(struct inode * inode,struct file * file)68 static void ocfs2_free_file_private(struct inode *inode, struct file *file)
69 {
70 struct ocfs2_file_private *fp = file->private_data;
71 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
72
73 if (fp) {
74 ocfs2_simple_drop_lockres(osb, &fp->fp_flock);
75 ocfs2_lock_res_free(&fp->fp_flock);
76 kfree(fp);
77 file->private_data = NULL;
78 }
79 }
80
ocfs2_file_open(struct inode * inode,struct file * file)81 static int ocfs2_file_open(struct inode *inode, struct file *file)
82 {
83 int status;
84 int mode = file->f_flags;
85 struct ocfs2_inode_info *oi = OCFS2_I(inode);
86
87 trace_ocfs2_file_open(inode, file, file->f_path.dentry,
88 (unsigned long long)oi->ip_blkno,
89 file->f_path.dentry->d_name.len,
90 file->f_path.dentry->d_name.name, mode);
91
92 if (file->f_mode & FMODE_WRITE) {
93 status = dquot_initialize(inode);
94 if (status)
95 goto leave;
96 }
97
98 spin_lock(&oi->ip_lock);
99
100 /* Check that the inode hasn't been wiped from disk by another
101 * node. If it hasn't then we're safe as long as we hold the
102 * spin lock until our increment of open count. */
103 if (oi->ip_flags & OCFS2_INODE_DELETED) {
104 spin_unlock(&oi->ip_lock);
105
106 status = -ENOENT;
107 goto leave;
108 }
109
110 if (mode & O_DIRECT)
111 oi->ip_flags |= OCFS2_INODE_OPEN_DIRECT;
112
113 oi->ip_open_count++;
114 spin_unlock(&oi->ip_lock);
115
116 status = ocfs2_init_file_private(inode, file);
117 if (status) {
118 /*
119 * We want to set open count back if we're failing the
120 * open.
121 */
122 spin_lock(&oi->ip_lock);
123 oi->ip_open_count--;
124 spin_unlock(&oi->ip_lock);
125 }
126
127 file->f_mode |= FMODE_NOWAIT;
128
129 leave:
130 return status;
131 }
132
ocfs2_file_release(struct inode * inode,struct file * file)133 static int ocfs2_file_release(struct inode *inode, struct file *file)
134 {
135 struct ocfs2_inode_info *oi = OCFS2_I(inode);
136
137 spin_lock(&oi->ip_lock);
138 if (!--oi->ip_open_count)
139 oi->ip_flags &= ~OCFS2_INODE_OPEN_DIRECT;
140
141 trace_ocfs2_file_release(inode, file, file->f_path.dentry,
142 oi->ip_blkno,
143 file->f_path.dentry->d_name.len,
144 file->f_path.dentry->d_name.name,
145 oi->ip_open_count);
146 spin_unlock(&oi->ip_lock);
147
148 ocfs2_free_file_private(inode, file);
149
150 return 0;
151 }
152
ocfs2_dir_open(struct inode * inode,struct file * file)153 static int ocfs2_dir_open(struct inode *inode, struct file *file)
154 {
155 return ocfs2_init_file_private(inode, file);
156 }
157
ocfs2_dir_release(struct inode * inode,struct file * file)158 static int ocfs2_dir_release(struct inode *inode, struct file *file)
159 {
160 ocfs2_free_file_private(inode, file);
161 return 0;
162 }
163
ocfs2_sync_file(struct file * file,loff_t start,loff_t end,int datasync)164 static int ocfs2_sync_file(struct file *file, loff_t start, loff_t end,
165 int datasync)
166 {
167 int err = 0;
168 struct inode *inode = file->f_mapping->host;
169 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
170 struct ocfs2_inode_info *oi = OCFS2_I(inode);
171 journal_t *journal = osb->journal->j_journal;
172 int ret;
173 tid_t commit_tid;
174 bool needs_barrier = false;
175
176 trace_ocfs2_sync_file(inode, file, file->f_path.dentry,
177 oi->ip_blkno,
178 file->f_path.dentry->d_name.len,
179 file->f_path.dentry->d_name.name,
180 (unsigned long long)datasync);
181
182 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
183 return -EROFS;
184
185 err = file_write_and_wait_range(file, start, end);
186 if (err)
187 return err;
188
189 commit_tid = datasync ? oi->i_datasync_tid : oi->i_sync_tid;
190 if (journal->j_flags & JBD2_BARRIER &&
191 !jbd2_trans_will_send_data_barrier(journal, commit_tid))
192 needs_barrier = true;
193 err = jbd2_complete_transaction(journal, commit_tid);
194 if (needs_barrier) {
195 ret = blkdev_issue_flush(inode->i_sb->s_bdev);
196 if (!err)
197 err = ret;
198 }
199
200 if (err)
201 mlog_errno(err);
202
203 return (err < 0) ? -EIO : 0;
204 }
205
ocfs2_should_update_atime(struct inode * inode,struct vfsmount * vfsmnt)206 int ocfs2_should_update_atime(struct inode *inode,
207 struct vfsmount *vfsmnt)
208 {
209 struct timespec64 now;
210 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
211
212 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
213 return 0;
214
215 if ((inode->i_flags & S_NOATIME) ||
216 ((inode->i_sb->s_flags & SB_NODIRATIME) && S_ISDIR(inode->i_mode)))
217 return 0;
218
219 /*
220 * We can be called with no vfsmnt structure - NFSD will
221 * sometimes do this.
222 *
223 * Note that our action here is different than touch_atime() -
224 * if we can't tell whether this is a noatime mount, then we
225 * don't know whether to trust the value of s_atime_quantum.
226 */
227 if (vfsmnt == NULL)
228 return 0;
229
230 if ((vfsmnt->mnt_flags & MNT_NOATIME) ||
231 ((vfsmnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode)))
232 return 0;
233
234 if (vfsmnt->mnt_flags & MNT_RELATIME) {
235 if ((timespec64_compare(&inode->i_atime, &inode->i_mtime) <= 0) ||
236 (timespec64_compare(&inode->i_atime, &inode->i_ctime) <= 0))
237 return 1;
238
239 return 0;
240 }
241
242 now = current_time(inode);
243 if ((now.tv_sec - inode->i_atime.tv_sec <= osb->s_atime_quantum))
244 return 0;
245 else
246 return 1;
247 }
248
ocfs2_update_inode_atime(struct inode * inode,struct buffer_head * bh)249 int ocfs2_update_inode_atime(struct inode *inode,
250 struct buffer_head *bh)
251 {
252 int ret;
253 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
254 handle_t *handle;
255 struct ocfs2_dinode *di = (struct ocfs2_dinode *) bh->b_data;
256
257 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
258 if (IS_ERR(handle)) {
259 ret = PTR_ERR(handle);
260 mlog_errno(ret);
261 goto out;
262 }
263
264 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
265 OCFS2_JOURNAL_ACCESS_WRITE);
266 if (ret) {
267 mlog_errno(ret);
268 goto out_commit;
269 }
270
271 /*
272 * Don't use ocfs2_mark_inode_dirty() here as we don't always
273 * have i_mutex to guard against concurrent changes to other
274 * inode fields.
275 */
276 inode->i_atime = current_time(inode);
277 di->i_atime = cpu_to_le64(inode->i_atime.tv_sec);
278 di->i_atime_nsec = cpu_to_le32(inode->i_atime.tv_nsec);
279 ocfs2_update_inode_fsync_trans(handle, inode, 0);
280 ocfs2_journal_dirty(handle, bh);
281
282 out_commit:
283 ocfs2_commit_trans(osb, handle);
284 out:
285 return ret;
286 }
287
ocfs2_set_inode_size(handle_t * handle,struct inode * inode,struct buffer_head * fe_bh,u64 new_i_size)288 int ocfs2_set_inode_size(handle_t *handle,
289 struct inode *inode,
290 struct buffer_head *fe_bh,
291 u64 new_i_size)
292 {
293 int status;
294
295 i_size_write(inode, new_i_size);
296 inode->i_blocks = ocfs2_inode_sector_count(inode);
297 inode->i_ctime = inode->i_mtime = current_time(inode);
298
299 status = ocfs2_mark_inode_dirty(handle, inode, fe_bh);
300 if (status < 0) {
301 mlog_errno(status);
302 goto bail;
303 }
304
305 bail:
306 return status;
307 }
308
ocfs2_simple_size_update(struct inode * inode,struct buffer_head * di_bh,u64 new_i_size)309 int ocfs2_simple_size_update(struct inode *inode,
310 struct buffer_head *di_bh,
311 u64 new_i_size)
312 {
313 int ret;
314 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
315 handle_t *handle = NULL;
316
317 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
318 if (IS_ERR(handle)) {
319 ret = PTR_ERR(handle);
320 mlog_errno(ret);
321 goto out;
322 }
323
324 ret = ocfs2_set_inode_size(handle, inode, di_bh,
325 new_i_size);
326 if (ret < 0)
327 mlog_errno(ret);
328
329 ocfs2_update_inode_fsync_trans(handle, inode, 0);
330 ocfs2_commit_trans(osb, handle);
331 out:
332 return ret;
333 }
334
ocfs2_cow_file_pos(struct inode * inode,struct buffer_head * fe_bh,u64 offset)335 static int ocfs2_cow_file_pos(struct inode *inode,
336 struct buffer_head *fe_bh,
337 u64 offset)
338 {
339 int status;
340 u32 phys, cpos = offset >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
341 unsigned int num_clusters = 0;
342 unsigned int ext_flags = 0;
343
344 /*
345 * If the new offset is aligned to the range of the cluster, there is
346 * no space for ocfs2_zero_range_for_truncate to fill, so no need to
347 * CoW either.
348 */
349 if ((offset & (OCFS2_SB(inode->i_sb)->s_clustersize - 1)) == 0)
350 return 0;
351
352 status = ocfs2_get_clusters(inode, cpos, &phys,
353 &num_clusters, &ext_flags);
354 if (status) {
355 mlog_errno(status);
356 goto out;
357 }
358
359 if (!(ext_flags & OCFS2_EXT_REFCOUNTED))
360 goto out;
361
362 return ocfs2_refcount_cow(inode, fe_bh, cpos, 1, cpos+1);
363
364 out:
365 return status;
366 }
367
ocfs2_orphan_for_truncate(struct ocfs2_super * osb,struct inode * inode,struct buffer_head * fe_bh,u64 new_i_size)368 static int ocfs2_orphan_for_truncate(struct ocfs2_super *osb,
369 struct inode *inode,
370 struct buffer_head *fe_bh,
371 u64 new_i_size)
372 {
373 int status;
374 handle_t *handle;
375 struct ocfs2_dinode *di;
376 u64 cluster_bytes;
377
378 /*
379 * We need to CoW the cluster contains the offset if it is reflinked
380 * since we will call ocfs2_zero_range_for_truncate later which will
381 * write "0" from offset to the end of the cluster.
382 */
383 status = ocfs2_cow_file_pos(inode, fe_bh, new_i_size);
384 if (status) {
385 mlog_errno(status);
386 return status;
387 }
388
389 /* TODO: This needs to actually orphan the inode in this
390 * transaction. */
391
392 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
393 if (IS_ERR(handle)) {
394 status = PTR_ERR(handle);
395 mlog_errno(status);
396 goto out;
397 }
398
399 status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), fe_bh,
400 OCFS2_JOURNAL_ACCESS_WRITE);
401 if (status < 0) {
402 mlog_errno(status);
403 goto out_commit;
404 }
405
406 /*
407 * Do this before setting i_size.
408 */
409 cluster_bytes = ocfs2_align_bytes_to_clusters(inode->i_sb, new_i_size);
410 status = ocfs2_zero_range_for_truncate(inode, handle, new_i_size,
411 cluster_bytes);
412 if (status) {
413 mlog_errno(status);
414 goto out_commit;
415 }
416
417 i_size_write(inode, new_i_size);
418 inode->i_ctime = inode->i_mtime = current_time(inode);
419
420 di = (struct ocfs2_dinode *) fe_bh->b_data;
421 di->i_size = cpu_to_le64(new_i_size);
422 di->i_ctime = di->i_mtime = cpu_to_le64(inode->i_ctime.tv_sec);
423 di->i_ctime_nsec = di->i_mtime_nsec = cpu_to_le32(inode->i_ctime.tv_nsec);
424 ocfs2_update_inode_fsync_trans(handle, inode, 0);
425
426 ocfs2_journal_dirty(handle, fe_bh);
427
428 out_commit:
429 ocfs2_commit_trans(osb, handle);
430 out:
431 return status;
432 }
433
ocfs2_truncate_file(struct inode * inode,struct buffer_head * di_bh,u64 new_i_size)434 int ocfs2_truncate_file(struct inode *inode,
435 struct buffer_head *di_bh,
436 u64 new_i_size)
437 {
438 int status = 0;
439 struct ocfs2_dinode *fe = NULL;
440 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
441
442 /* We trust di_bh because it comes from ocfs2_inode_lock(), which
443 * already validated it */
444 fe = (struct ocfs2_dinode *) di_bh->b_data;
445
446 trace_ocfs2_truncate_file((unsigned long long)OCFS2_I(inode)->ip_blkno,
447 (unsigned long long)le64_to_cpu(fe->i_size),
448 (unsigned long long)new_i_size);
449
450 mlog_bug_on_msg(le64_to_cpu(fe->i_size) != i_size_read(inode),
451 "Inode %llu, inode i_size = %lld != di "
452 "i_size = %llu, i_flags = 0x%x\n",
453 (unsigned long long)OCFS2_I(inode)->ip_blkno,
454 i_size_read(inode),
455 (unsigned long long)le64_to_cpu(fe->i_size),
456 le32_to_cpu(fe->i_flags));
457
458 if (new_i_size > le64_to_cpu(fe->i_size)) {
459 trace_ocfs2_truncate_file_error(
460 (unsigned long long)le64_to_cpu(fe->i_size),
461 (unsigned long long)new_i_size);
462 status = -EINVAL;
463 mlog_errno(status);
464 goto bail;
465 }
466
467 down_write(&OCFS2_I(inode)->ip_alloc_sem);
468
469 ocfs2_resv_discard(&osb->osb_la_resmap,
470 &OCFS2_I(inode)->ip_la_data_resv);
471
472 /*
473 * The inode lock forced other nodes to sync and drop their
474 * pages, which (correctly) happens even if we have a truncate
475 * without allocation change - ocfs2 cluster sizes can be much
476 * greater than page size, so we have to truncate them
477 * anyway.
478 */
479
480 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
481 unmap_mapping_range(inode->i_mapping,
482 new_i_size + PAGE_SIZE - 1, 0, 1);
483 truncate_inode_pages(inode->i_mapping, new_i_size);
484 status = ocfs2_truncate_inline(inode, di_bh, new_i_size,
485 i_size_read(inode), 1);
486 if (status)
487 mlog_errno(status);
488
489 goto bail_unlock_sem;
490 }
491
492 /* alright, we're going to need to do a full blown alloc size
493 * change. Orphan the inode so that recovery can complete the
494 * truncate if necessary. This does the task of marking
495 * i_size. */
496 status = ocfs2_orphan_for_truncate(osb, inode, di_bh, new_i_size);
497 if (status < 0) {
498 mlog_errno(status);
499 goto bail_unlock_sem;
500 }
501
502 unmap_mapping_range(inode->i_mapping, new_i_size + PAGE_SIZE - 1, 0, 1);
503 truncate_inode_pages(inode->i_mapping, new_i_size);
504
505 status = ocfs2_commit_truncate(osb, inode, di_bh);
506 if (status < 0) {
507 mlog_errno(status);
508 goto bail_unlock_sem;
509 }
510
511 /* TODO: orphan dir cleanup here. */
512 bail_unlock_sem:
513 up_write(&OCFS2_I(inode)->ip_alloc_sem);
514
515 bail:
516 if (!status && OCFS2_I(inode)->ip_clusters == 0)
517 status = ocfs2_try_remove_refcount_tree(inode, di_bh);
518
519 return status;
520 }
521
522 /*
523 * extend file allocation only here.
524 * we'll update all the disk stuff, and oip->alloc_size
525 *
526 * expect stuff to be locked, a transaction started and enough data /
527 * metadata reservations in the contexts.
528 *
529 * Will return -EAGAIN, and a reason if a restart is needed.
530 * If passed in, *reason will always be set, even in error.
531 */
ocfs2_add_inode_data(struct ocfs2_super * osb,struct inode * inode,u32 * logical_offset,u32 clusters_to_add,int mark_unwritten,struct buffer_head * fe_bh,handle_t * handle,struct ocfs2_alloc_context * data_ac,struct ocfs2_alloc_context * meta_ac,enum ocfs2_alloc_restarted * reason_ret)532 int ocfs2_add_inode_data(struct ocfs2_super *osb,
533 struct inode *inode,
534 u32 *logical_offset,
535 u32 clusters_to_add,
536 int mark_unwritten,
537 struct buffer_head *fe_bh,
538 handle_t *handle,
539 struct ocfs2_alloc_context *data_ac,
540 struct ocfs2_alloc_context *meta_ac,
541 enum ocfs2_alloc_restarted *reason_ret)
542 {
543 int ret;
544 struct ocfs2_extent_tree et;
545
546 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), fe_bh);
547 ret = ocfs2_add_clusters_in_btree(handle, &et, logical_offset,
548 clusters_to_add, mark_unwritten,
549 data_ac, meta_ac, reason_ret);
550
551 return ret;
552 }
553
ocfs2_extend_allocation(struct inode * inode,u32 logical_start,u32 clusters_to_add,int mark_unwritten)554 static int ocfs2_extend_allocation(struct inode *inode, u32 logical_start,
555 u32 clusters_to_add, int mark_unwritten)
556 {
557 int status = 0;
558 int restart_func = 0;
559 int credits;
560 u32 prev_clusters;
561 struct buffer_head *bh = NULL;
562 struct ocfs2_dinode *fe = NULL;
563 handle_t *handle = NULL;
564 struct ocfs2_alloc_context *data_ac = NULL;
565 struct ocfs2_alloc_context *meta_ac = NULL;
566 enum ocfs2_alloc_restarted why = RESTART_NONE;
567 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
568 struct ocfs2_extent_tree et;
569 int did_quota = 0;
570
571 /*
572 * Unwritten extent only exists for file systems which
573 * support holes.
574 */
575 BUG_ON(mark_unwritten && !ocfs2_sparse_alloc(osb));
576
577 status = ocfs2_read_inode_block(inode, &bh);
578 if (status < 0) {
579 mlog_errno(status);
580 goto leave;
581 }
582 fe = (struct ocfs2_dinode *) bh->b_data;
583
584 restart_all:
585 BUG_ON(le32_to_cpu(fe->i_clusters) != OCFS2_I(inode)->ip_clusters);
586
587 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), bh);
588 status = ocfs2_lock_allocators(inode, &et, clusters_to_add, 0,
589 &data_ac, &meta_ac);
590 if (status) {
591 mlog_errno(status);
592 goto leave;
593 }
594
595 credits = ocfs2_calc_extend_credits(osb->sb, &fe->id2.i_list);
596 handle = ocfs2_start_trans(osb, credits);
597 if (IS_ERR(handle)) {
598 status = PTR_ERR(handle);
599 handle = NULL;
600 mlog_errno(status);
601 goto leave;
602 }
603
604 restarted_transaction:
605 trace_ocfs2_extend_allocation(
606 (unsigned long long)OCFS2_I(inode)->ip_blkno,
607 (unsigned long long)i_size_read(inode),
608 le32_to_cpu(fe->i_clusters), clusters_to_add,
609 why, restart_func);
610
611 status = dquot_alloc_space_nodirty(inode,
612 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
613 if (status)
614 goto leave;
615 did_quota = 1;
616
617 /* reserve a write to the file entry early on - that we if we
618 * run out of credits in the allocation path, we can still
619 * update i_size. */
620 status = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
621 OCFS2_JOURNAL_ACCESS_WRITE);
622 if (status < 0) {
623 mlog_errno(status);
624 goto leave;
625 }
626
627 prev_clusters = OCFS2_I(inode)->ip_clusters;
628
629 status = ocfs2_add_inode_data(osb,
630 inode,
631 &logical_start,
632 clusters_to_add,
633 mark_unwritten,
634 bh,
635 handle,
636 data_ac,
637 meta_ac,
638 &why);
639 if ((status < 0) && (status != -EAGAIN)) {
640 if (status != -ENOSPC)
641 mlog_errno(status);
642 goto leave;
643 }
644 ocfs2_update_inode_fsync_trans(handle, inode, 1);
645 ocfs2_journal_dirty(handle, bh);
646
647 spin_lock(&OCFS2_I(inode)->ip_lock);
648 clusters_to_add -= (OCFS2_I(inode)->ip_clusters - prev_clusters);
649 spin_unlock(&OCFS2_I(inode)->ip_lock);
650 /* Release unused quota reservation */
651 dquot_free_space(inode,
652 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
653 did_quota = 0;
654
655 if (why != RESTART_NONE && clusters_to_add) {
656 if (why == RESTART_META) {
657 restart_func = 1;
658 status = 0;
659 } else {
660 BUG_ON(why != RESTART_TRANS);
661
662 status = ocfs2_allocate_extend_trans(handle, 1);
663 if (status < 0) {
664 /* handle still has to be committed at
665 * this point. */
666 status = -ENOMEM;
667 mlog_errno(status);
668 goto leave;
669 }
670 goto restarted_transaction;
671 }
672 }
673
674 trace_ocfs2_extend_allocation_end(OCFS2_I(inode)->ip_blkno,
675 le32_to_cpu(fe->i_clusters),
676 (unsigned long long)le64_to_cpu(fe->i_size),
677 OCFS2_I(inode)->ip_clusters,
678 (unsigned long long)i_size_read(inode));
679
680 leave:
681 if (status < 0 && did_quota)
682 dquot_free_space(inode,
683 ocfs2_clusters_to_bytes(osb->sb, clusters_to_add));
684 if (handle) {
685 ocfs2_commit_trans(osb, handle);
686 handle = NULL;
687 }
688 if (data_ac) {
689 ocfs2_free_alloc_context(data_ac);
690 data_ac = NULL;
691 }
692 if (meta_ac) {
693 ocfs2_free_alloc_context(meta_ac);
694 meta_ac = NULL;
695 }
696 if ((!status) && restart_func) {
697 restart_func = 0;
698 goto restart_all;
699 }
700 brelse(bh);
701 bh = NULL;
702
703 return status;
704 }
705
706 /*
707 * While a write will already be ordering the data, a truncate will not.
708 * Thus, we need to explicitly order the zeroed pages.
709 */
ocfs2_zero_start_ordered_transaction(struct inode * inode,struct buffer_head * di_bh,loff_t start_byte,loff_t length)710 static handle_t *ocfs2_zero_start_ordered_transaction(struct inode *inode,
711 struct buffer_head *di_bh,
712 loff_t start_byte,
713 loff_t length)
714 {
715 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
716 handle_t *handle = NULL;
717 int ret = 0;
718
719 if (!ocfs2_should_order_data(inode))
720 goto out;
721
722 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
723 if (IS_ERR(handle)) {
724 ret = -ENOMEM;
725 mlog_errno(ret);
726 goto out;
727 }
728
729 ret = ocfs2_jbd2_inode_add_write(handle, inode, start_byte, length);
730 if (ret < 0) {
731 mlog_errno(ret);
732 goto out;
733 }
734
735 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), di_bh,
736 OCFS2_JOURNAL_ACCESS_WRITE);
737 if (ret)
738 mlog_errno(ret);
739 ocfs2_update_inode_fsync_trans(handle, inode, 1);
740
741 out:
742 if (ret) {
743 if (!IS_ERR(handle))
744 ocfs2_commit_trans(osb, handle);
745 handle = ERR_PTR(ret);
746 }
747 return handle;
748 }
749
750 /* Some parts of this taken from generic_cont_expand, which turned out
751 * to be too fragile to do exactly what we need without us having to
752 * worry about recursive locking in ->write_begin() and ->write_end(). */
ocfs2_write_zero_page(struct inode * inode,u64 abs_from,u64 abs_to,struct buffer_head * di_bh)753 static int ocfs2_write_zero_page(struct inode *inode, u64 abs_from,
754 u64 abs_to, struct buffer_head *di_bh)
755 {
756 struct address_space *mapping = inode->i_mapping;
757 struct page *page;
758 unsigned long index = abs_from >> PAGE_SHIFT;
759 handle_t *handle;
760 int ret = 0;
761 unsigned zero_from, zero_to, block_start, block_end;
762 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
763
764 BUG_ON(abs_from >= abs_to);
765 BUG_ON(abs_to > (((u64)index + 1) << PAGE_SHIFT));
766 BUG_ON(abs_from & (inode->i_blkbits - 1));
767
768 handle = ocfs2_zero_start_ordered_transaction(inode, di_bh,
769 abs_from,
770 abs_to - abs_from);
771 if (IS_ERR(handle)) {
772 ret = PTR_ERR(handle);
773 goto out;
774 }
775
776 page = find_or_create_page(mapping, index, GFP_NOFS);
777 if (!page) {
778 ret = -ENOMEM;
779 mlog_errno(ret);
780 goto out_commit_trans;
781 }
782
783 /* Get the offsets within the page that we want to zero */
784 zero_from = abs_from & (PAGE_SIZE - 1);
785 zero_to = abs_to & (PAGE_SIZE - 1);
786 if (!zero_to)
787 zero_to = PAGE_SIZE;
788
789 trace_ocfs2_write_zero_page(
790 (unsigned long long)OCFS2_I(inode)->ip_blkno,
791 (unsigned long long)abs_from,
792 (unsigned long long)abs_to,
793 index, zero_from, zero_to);
794
795 /* We know that zero_from is block aligned */
796 for (block_start = zero_from; block_start < zero_to;
797 block_start = block_end) {
798 block_end = block_start + i_blocksize(inode);
799
800 /*
801 * block_start is block-aligned. Bump it by one to force
802 * __block_write_begin and block_commit_write to zero the
803 * whole block.
804 */
805 ret = __block_write_begin(page, block_start + 1, 0,
806 ocfs2_get_block);
807 if (ret < 0) {
808 mlog_errno(ret);
809 goto out_unlock;
810 }
811
812
813 /* must not update i_size! */
814 ret = block_commit_write(page, block_start + 1,
815 block_start + 1);
816 if (ret < 0)
817 mlog_errno(ret);
818 else
819 ret = 0;
820 }
821
822 /*
823 * fs-writeback will release the dirty pages without page lock
824 * whose offset are over inode size, the release happens at
825 * block_write_full_page().
826 */
827 i_size_write(inode, abs_to);
828 inode->i_blocks = ocfs2_inode_sector_count(inode);
829 di->i_size = cpu_to_le64((u64)i_size_read(inode));
830 inode->i_mtime = inode->i_ctime = current_time(inode);
831 di->i_mtime = di->i_ctime = cpu_to_le64(inode->i_mtime.tv_sec);
832 di->i_ctime_nsec = cpu_to_le32(inode->i_mtime.tv_nsec);
833 di->i_mtime_nsec = di->i_ctime_nsec;
834 if (handle) {
835 ocfs2_journal_dirty(handle, di_bh);
836 ocfs2_update_inode_fsync_trans(handle, inode, 1);
837 }
838
839 out_unlock:
840 unlock_page(page);
841 put_page(page);
842 out_commit_trans:
843 if (handle)
844 ocfs2_commit_trans(OCFS2_SB(inode->i_sb), handle);
845 out:
846 return ret;
847 }
848
849 /*
850 * Find the next range to zero. We do this in terms of bytes because
851 * that's what ocfs2_zero_extend() wants, and it is dealing with the
852 * pagecache. We may return multiple extents.
853 *
854 * zero_start and zero_end are ocfs2_zero_extend()s current idea of what
855 * needs to be zeroed. range_start and range_end return the next zeroing
856 * range. A subsequent call should pass the previous range_end as its
857 * zero_start. If range_end is 0, there's nothing to do.
858 *
859 * Unwritten extents are skipped over. Refcounted extents are CoWd.
860 */
ocfs2_zero_extend_get_range(struct inode * inode,struct buffer_head * di_bh,u64 zero_start,u64 zero_end,u64 * range_start,u64 * range_end)861 static int ocfs2_zero_extend_get_range(struct inode *inode,
862 struct buffer_head *di_bh,
863 u64 zero_start, u64 zero_end,
864 u64 *range_start, u64 *range_end)
865 {
866 int rc = 0, needs_cow = 0;
867 u32 p_cpos, zero_clusters = 0;
868 u32 zero_cpos =
869 zero_start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
870 u32 last_cpos = ocfs2_clusters_for_bytes(inode->i_sb, zero_end);
871 unsigned int num_clusters = 0;
872 unsigned int ext_flags = 0;
873
874 while (zero_cpos < last_cpos) {
875 rc = ocfs2_get_clusters(inode, zero_cpos, &p_cpos,
876 &num_clusters, &ext_flags);
877 if (rc) {
878 mlog_errno(rc);
879 goto out;
880 }
881
882 if (p_cpos && !(ext_flags & OCFS2_EXT_UNWRITTEN)) {
883 zero_clusters = num_clusters;
884 if (ext_flags & OCFS2_EXT_REFCOUNTED)
885 needs_cow = 1;
886 break;
887 }
888
889 zero_cpos += num_clusters;
890 }
891 if (!zero_clusters) {
892 *range_end = 0;
893 goto out;
894 }
895
896 while ((zero_cpos + zero_clusters) < last_cpos) {
897 rc = ocfs2_get_clusters(inode, zero_cpos + zero_clusters,
898 &p_cpos, &num_clusters,
899 &ext_flags);
900 if (rc) {
901 mlog_errno(rc);
902 goto out;
903 }
904
905 if (!p_cpos || (ext_flags & OCFS2_EXT_UNWRITTEN))
906 break;
907 if (ext_flags & OCFS2_EXT_REFCOUNTED)
908 needs_cow = 1;
909 zero_clusters += num_clusters;
910 }
911 if ((zero_cpos + zero_clusters) > last_cpos)
912 zero_clusters = last_cpos - zero_cpos;
913
914 if (needs_cow) {
915 rc = ocfs2_refcount_cow(inode, di_bh, zero_cpos,
916 zero_clusters, UINT_MAX);
917 if (rc) {
918 mlog_errno(rc);
919 goto out;
920 }
921 }
922
923 *range_start = ocfs2_clusters_to_bytes(inode->i_sb, zero_cpos);
924 *range_end = ocfs2_clusters_to_bytes(inode->i_sb,
925 zero_cpos + zero_clusters);
926
927 out:
928 return rc;
929 }
930
931 /*
932 * Zero one range returned from ocfs2_zero_extend_get_range(). The caller
933 * has made sure that the entire range needs zeroing.
934 */
ocfs2_zero_extend_range(struct inode * inode,u64 range_start,u64 range_end,struct buffer_head * di_bh)935 static int ocfs2_zero_extend_range(struct inode *inode, u64 range_start,
936 u64 range_end, struct buffer_head *di_bh)
937 {
938 int rc = 0;
939 u64 next_pos;
940 u64 zero_pos = range_start;
941
942 trace_ocfs2_zero_extend_range(
943 (unsigned long long)OCFS2_I(inode)->ip_blkno,
944 (unsigned long long)range_start,
945 (unsigned long long)range_end);
946 BUG_ON(range_start >= range_end);
947
948 while (zero_pos < range_end) {
949 next_pos = (zero_pos & PAGE_MASK) + PAGE_SIZE;
950 if (next_pos > range_end)
951 next_pos = range_end;
952 rc = ocfs2_write_zero_page(inode, zero_pos, next_pos, di_bh);
953 if (rc < 0) {
954 mlog_errno(rc);
955 break;
956 }
957 zero_pos = next_pos;
958
959 /*
960 * Very large extends have the potential to lock up
961 * the cpu for extended periods of time.
962 */
963 cond_resched();
964 }
965
966 return rc;
967 }
968
ocfs2_zero_extend(struct inode * inode,struct buffer_head * di_bh,loff_t zero_to_size)969 int ocfs2_zero_extend(struct inode *inode, struct buffer_head *di_bh,
970 loff_t zero_to_size)
971 {
972 int ret = 0;
973 u64 zero_start, range_start = 0, range_end = 0;
974 struct super_block *sb = inode->i_sb;
975
976 zero_start = ocfs2_align_bytes_to_blocks(sb, i_size_read(inode));
977 trace_ocfs2_zero_extend((unsigned long long)OCFS2_I(inode)->ip_blkno,
978 (unsigned long long)zero_start,
979 (unsigned long long)i_size_read(inode));
980 while (zero_start < zero_to_size) {
981 ret = ocfs2_zero_extend_get_range(inode, di_bh, zero_start,
982 zero_to_size,
983 &range_start,
984 &range_end);
985 if (ret) {
986 mlog_errno(ret);
987 break;
988 }
989 if (!range_end)
990 break;
991 /* Trim the ends */
992 if (range_start < zero_start)
993 range_start = zero_start;
994 if (range_end > zero_to_size)
995 range_end = zero_to_size;
996
997 ret = ocfs2_zero_extend_range(inode, range_start,
998 range_end, di_bh);
999 if (ret) {
1000 mlog_errno(ret);
1001 break;
1002 }
1003 zero_start = range_end;
1004 }
1005
1006 return ret;
1007 }
1008
ocfs2_extend_no_holes(struct inode * inode,struct buffer_head * di_bh,u64 new_i_size,u64 zero_to)1009 int ocfs2_extend_no_holes(struct inode *inode, struct buffer_head *di_bh,
1010 u64 new_i_size, u64 zero_to)
1011 {
1012 int ret;
1013 u32 clusters_to_add;
1014 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1015
1016 /*
1017 * Only quota files call this without a bh, and they can't be
1018 * refcounted.
1019 */
1020 BUG_ON(!di_bh && ocfs2_is_refcount_inode(inode));
1021 BUG_ON(!di_bh && !(oi->ip_flags & OCFS2_INODE_SYSTEM_FILE));
1022
1023 clusters_to_add = ocfs2_clusters_for_bytes(inode->i_sb, new_i_size);
1024 if (clusters_to_add < oi->ip_clusters)
1025 clusters_to_add = 0;
1026 else
1027 clusters_to_add -= oi->ip_clusters;
1028
1029 if (clusters_to_add) {
1030 ret = ocfs2_extend_allocation(inode, oi->ip_clusters,
1031 clusters_to_add, 0);
1032 if (ret) {
1033 mlog_errno(ret);
1034 goto out;
1035 }
1036 }
1037
1038 /*
1039 * Call this even if we don't add any clusters to the tree. We
1040 * still need to zero the area between the old i_size and the
1041 * new i_size.
1042 */
1043 ret = ocfs2_zero_extend(inode, di_bh, zero_to);
1044 if (ret < 0)
1045 mlog_errno(ret);
1046
1047 out:
1048 return ret;
1049 }
1050
ocfs2_extend_file(struct inode * inode,struct buffer_head * di_bh,u64 new_i_size)1051 static int ocfs2_extend_file(struct inode *inode,
1052 struct buffer_head *di_bh,
1053 u64 new_i_size)
1054 {
1055 int ret = 0;
1056 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1057
1058 BUG_ON(!di_bh);
1059
1060 /* setattr sometimes calls us like this. */
1061 if (new_i_size == 0)
1062 goto out;
1063
1064 if (i_size_read(inode) == new_i_size)
1065 goto out;
1066 BUG_ON(new_i_size < i_size_read(inode));
1067
1068 /*
1069 * The alloc sem blocks people in read/write from reading our
1070 * allocation until we're done changing it. We depend on
1071 * i_mutex to block other extend/truncate calls while we're
1072 * here. We even have to hold it for sparse files because there
1073 * might be some tail zeroing.
1074 */
1075 down_write(&oi->ip_alloc_sem);
1076
1077 if (oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1078 /*
1079 * We can optimize small extends by keeping the inodes
1080 * inline data.
1081 */
1082 if (ocfs2_size_fits_inline_data(di_bh, new_i_size)) {
1083 up_write(&oi->ip_alloc_sem);
1084 goto out_update_size;
1085 }
1086
1087 ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1088 if (ret) {
1089 up_write(&oi->ip_alloc_sem);
1090 mlog_errno(ret);
1091 goto out;
1092 }
1093 }
1094
1095 if (ocfs2_sparse_alloc(OCFS2_SB(inode->i_sb)))
1096 ret = ocfs2_zero_extend(inode, di_bh, new_i_size);
1097 else
1098 ret = ocfs2_extend_no_holes(inode, di_bh, new_i_size,
1099 new_i_size);
1100
1101 up_write(&oi->ip_alloc_sem);
1102
1103 if (ret < 0) {
1104 mlog_errno(ret);
1105 goto out;
1106 }
1107
1108 out_update_size:
1109 ret = ocfs2_simple_size_update(inode, di_bh, new_i_size);
1110 if (ret < 0)
1111 mlog_errno(ret);
1112
1113 out:
1114 return ret;
1115 }
1116
ocfs2_setattr(struct user_namespace * mnt_userns,struct dentry * dentry,struct iattr * attr)1117 int ocfs2_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
1118 struct iattr *attr)
1119 {
1120 int status = 0, size_change;
1121 int inode_locked = 0;
1122 struct inode *inode = d_inode(dentry);
1123 struct super_block *sb = inode->i_sb;
1124 struct ocfs2_super *osb = OCFS2_SB(sb);
1125 struct buffer_head *bh = NULL;
1126 handle_t *handle = NULL;
1127 struct dquot *transfer_to[MAXQUOTAS] = { };
1128 int qtype;
1129 int had_lock;
1130 struct ocfs2_lock_holder oh;
1131
1132 trace_ocfs2_setattr(inode, dentry,
1133 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1134 dentry->d_name.len, dentry->d_name.name,
1135 attr->ia_valid, attr->ia_mode,
1136 from_kuid(&init_user_ns, attr->ia_uid),
1137 from_kgid(&init_user_ns, attr->ia_gid));
1138
1139 /* ensuring we don't even attempt to truncate a symlink */
1140 if (S_ISLNK(inode->i_mode))
1141 attr->ia_valid &= ~ATTR_SIZE;
1142
1143 #define OCFS2_VALID_ATTRS (ATTR_ATIME | ATTR_MTIME | ATTR_CTIME | ATTR_SIZE \
1144 | ATTR_GID | ATTR_UID | ATTR_MODE)
1145 if (!(attr->ia_valid & OCFS2_VALID_ATTRS))
1146 return 0;
1147
1148 status = setattr_prepare(&init_user_ns, dentry, attr);
1149 if (status)
1150 return status;
1151
1152 if (is_quota_modification(inode, attr)) {
1153 status = dquot_initialize(inode);
1154 if (status)
1155 return status;
1156 }
1157 size_change = S_ISREG(inode->i_mode) && attr->ia_valid & ATTR_SIZE;
1158 if (size_change) {
1159 /*
1160 * Here we should wait dio to finish before inode lock
1161 * to avoid a deadlock between ocfs2_setattr() and
1162 * ocfs2_dio_end_io_write()
1163 */
1164 inode_dio_wait(inode);
1165
1166 status = ocfs2_rw_lock(inode, 1);
1167 if (status < 0) {
1168 mlog_errno(status);
1169 goto bail;
1170 }
1171 }
1172
1173 had_lock = ocfs2_inode_lock_tracker(inode, &bh, 1, &oh);
1174 if (had_lock < 0) {
1175 status = had_lock;
1176 goto bail_unlock_rw;
1177 } else if (had_lock) {
1178 /*
1179 * As far as we know, ocfs2_setattr() could only be the first
1180 * VFS entry point in the call chain of recursive cluster
1181 * locking issue.
1182 *
1183 * For instance:
1184 * chmod_common()
1185 * notify_change()
1186 * ocfs2_setattr()
1187 * posix_acl_chmod()
1188 * ocfs2_iop_get_acl()
1189 *
1190 * But, we're not 100% sure if it's always true, because the
1191 * ordering of the VFS entry points in the call chain is out
1192 * of our control. So, we'd better dump the stack here to
1193 * catch the other cases of recursive locking.
1194 */
1195 mlog(ML_ERROR, "Another case of recursive locking:\n");
1196 dump_stack();
1197 }
1198 inode_locked = 1;
1199
1200 if (size_change) {
1201 status = inode_newsize_ok(inode, attr->ia_size);
1202 if (status)
1203 goto bail_unlock;
1204
1205 if (i_size_read(inode) >= attr->ia_size) {
1206 if (ocfs2_should_order_data(inode)) {
1207 status = ocfs2_begin_ordered_truncate(inode,
1208 attr->ia_size);
1209 if (status)
1210 goto bail_unlock;
1211 }
1212 status = ocfs2_truncate_file(inode, bh, attr->ia_size);
1213 } else
1214 status = ocfs2_extend_file(inode, bh, attr->ia_size);
1215 if (status < 0) {
1216 if (status != -ENOSPC)
1217 mlog_errno(status);
1218 status = -ENOSPC;
1219 goto bail_unlock;
1220 }
1221 }
1222
1223 if ((attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)) ||
1224 (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid))) {
1225 /*
1226 * Gather pointers to quota structures so that allocation /
1227 * freeing of quota structures happens here and not inside
1228 * dquot_transfer() where we have problems with lock ordering
1229 */
1230 if (attr->ia_valid & ATTR_UID && !uid_eq(attr->ia_uid, inode->i_uid)
1231 && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1232 OCFS2_FEATURE_RO_COMPAT_USRQUOTA)) {
1233 transfer_to[USRQUOTA] = dqget(sb, make_kqid_uid(attr->ia_uid));
1234 if (IS_ERR(transfer_to[USRQUOTA])) {
1235 status = PTR_ERR(transfer_to[USRQUOTA]);
1236 transfer_to[USRQUOTA] = NULL;
1237 goto bail_unlock;
1238 }
1239 }
1240 if (attr->ia_valid & ATTR_GID && !gid_eq(attr->ia_gid, inode->i_gid)
1241 && OCFS2_HAS_RO_COMPAT_FEATURE(sb,
1242 OCFS2_FEATURE_RO_COMPAT_GRPQUOTA)) {
1243 transfer_to[GRPQUOTA] = dqget(sb, make_kqid_gid(attr->ia_gid));
1244 if (IS_ERR(transfer_to[GRPQUOTA])) {
1245 status = PTR_ERR(transfer_to[GRPQUOTA]);
1246 transfer_to[GRPQUOTA] = NULL;
1247 goto bail_unlock;
1248 }
1249 }
1250 down_write(&OCFS2_I(inode)->ip_alloc_sem);
1251 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS +
1252 2 * ocfs2_quota_trans_credits(sb));
1253 if (IS_ERR(handle)) {
1254 status = PTR_ERR(handle);
1255 mlog_errno(status);
1256 goto bail_unlock_alloc;
1257 }
1258 status = __dquot_transfer(inode, transfer_to);
1259 if (status < 0)
1260 goto bail_commit;
1261 } else {
1262 down_write(&OCFS2_I(inode)->ip_alloc_sem);
1263 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1264 if (IS_ERR(handle)) {
1265 status = PTR_ERR(handle);
1266 mlog_errno(status);
1267 goto bail_unlock_alloc;
1268 }
1269 }
1270
1271 setattr_copy(&init_user_ns, inode, attr);
1272 mark_inode_dirty(inode);
1273
1274 status = ocfs2_mark_inode_dirty(handle, inode, bh);
1275 if (status < 0)
1276 mlog_errno(status);
1277
1278 bail_commit:
1279 ocfs2_commit_trans(osb, handle);
1280 bail_unlock_alloc:
1281 up_write(&OCFS2_I(inode)->ip_alloc_sem);
1282 bail_unlock:
1283 if (status && inode_locked) {
1284 ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
1285 inode_locked = 0;
1286 }
1287 bail_unlock_rw:
1288 if (size_change)
1289 ocfs2_rw_unlock(inode, 1);
1290 bail:
1291
1292 /* Release quota pointers in case we acquired them */
1293 for (qtype = 0; qtype < OCFS2_MAXQUOTAS; qtype++)
1294 dqput(transfer_to[qtype]);
1295
1296 if (!status && attr->ia_valid & ATTR_MODE) {
1297 status = ocfs2_acl_chmod(inode, bh);
1298 if (status < 0)
1299 mlog_errno(status);
1300 }
1301 if (inode_locked)
1302 ocfs2_inode_unlock_tracker(inode, 1, &oh, had_lock);
1303
1304 brelse(bh);
1305 return status;
1306 }
1307
ocfs2_getattr(struct user_namespace * mnt_userns,const struct path * path,struct kstat * stat,u32 request_mask,unsigned int flags)1308 int ocfs2_getattr(struct user_namespace *mnt_userns, const struct path *path,
1309 struct kstat *stat, u32 request_mask, unsigned int flags)
1310 {
1311 struct inode *inode = d_inode(path->dentry);
1312 struct super_block *sb = path->dentry->d_sb;
1313 struct ocfs2_super *osb = sb->s_fs_info;
1314 int err;
1315
1316 err = ocfs2_inode_revalidate(path->dentry);
1317 if (err) {
1318 if (err != -ENOENT)
1319 mlog_errno(err);
1320 goto bail;
1321 }
1322
1323 generic_fillattr(&init_user_ns, inode, stat);
1324 /*
1325 * If there is inline data in the inode, the inode will normally not
1326 * have data blocks allocated (it may have an external xattr block).
1327 * Report at least one sector for such files, so tools like tar, rsync,
1328 * others don't incorrectly think the file is completely sparse.
1329 */
1330 if (unlikely(OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL))
1331 stat->blocks += (stat->size + 511)>>9;
1332
1333 /* We set the blksize from the cluster size for performance */
1334 stat->blksize = osb->s_clustersize;
1335
1336 bail:
1337 return err;
1338 }
1339
ocfs2_permission(struct user_namespace * mnt_userns,struct inode * inode,int mask)1340 int ocfs2_permission(struct user_namespace *mnt_userns, struct inode *inode,
1341 int mask)
1342 {
1343 int ret, had_lock;
1344 struct ocfs2_lock_holder oh;
1345
1346 if (mask & MAY_NOT_BLOCK)
1347 return -ECHILD;
1348
1349 had_lock = ocfs2_inode_lock_tracker(inode, NULL, 0, &oh);
1350 if (had_lock < 0) {
1351 ret = had_lock;
1352 goto out;
1353 } else if (had_lock) {
1354 /* See comments in ocfs2_setattr() for details.
1355 * The call chain of this case could be:
1356 * do_sys_open()
1357 * may_open()
1358 * inode_permission()
1359 * ocfs2_permission()
1360 * ocfs2_iop_get_acl()
1361 */
1362 mlog(ML_ERROR, "Another case of recursive locking:\n");
1363 dump_stack();
1364 }
1365
1366 ret = generic_permission(&init_user_ns, inode, mask);
1367
1368 ocfs2_inode_unlock_tracker(inode, 0, &oh, had_lock);
1369 out:
1370 return ret;
1371 }
1372
__ocfs2_write_remove_suid(struct inode * inode,struct buffer_head * bh)1373 static int __ocfs2_write_remove_suid(struct inode *inode,
1374 struct buffer_head *bh)
1375 {
1376 int ret;
1377 handle_t *handle;
1378 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1379 struct ocfs2_dinode *di;
1380
1381 trace_ocfs2_write_remove_suid(
1382 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1383 inode->i_mode);
1384
1385 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1386 if (IS_ERR(handle)) {
1387 ret = PTR_ERR(handle);
1388 mlog_errno(ret);
1389 goto out;
1390 }
1391
1392 ret = ocfs2_journal_access_di(handle, INODE_CACHE(inode), bh,
1393 OCFS2_JOURNAL_ACCESS_WRITE);
1394 if (ret < 0) {
1395 mlog_errno(ret);
1396 goto out_trans;
1397 }
1398
1399 inode->i_mode &= ~S_ISUID;
1400 if ((inode->i_mode & S_ISGID) && (inode->i_mode & S_IXGRP))
1401 inode->i_mode &= ~S_ISGID;
1402
1403 di = (struct ocfs2_dinode *) bh->b_data;
1404 di->i_mode = cpu_to_le16(inode->i_mode);
1405 ocfs2_update_inode_fsync_trans(handle, inode, 0);
1406
1407 ocfs2_journal_dirty(handle, bh);
1408
1409 out_trans:
1410 ocfs2_commit_trans(osb, handle);
1411 out:
1412 return ret;
1413 }
1414
ocfs2_write_remove_suid(struct inode * inode)1415 static int ocfs2_write_remove_suid(struct inode *inode)
1416 {
1417 int ret;
1418 struct buffer_head *bh = NULL;
1419
1420 ret = ocfs2_read_inode_block(inode, &bh);
1421 if (ret < 0) {
1422 mlog_errno(ret);
1423 goto out;
1424 }
1425
1426 ret = __ocfs2_write_remove_suid(inode, bh);
1427 out:
1428 brelse(bh);
1429 return ret;
1430 }
1431
1432 /*
1433 * Allocate enough extents to cover the region starting at byte offset
1434 * start for len bytes. Existing extents are skipped, any extents
1435 * added are marked as "unwritten".
1436 */
ocfs2_allocate_unwritten_extents(struct inode * inode,u64 start,u64 len)1437 static int ocfs2_allocate_unwritten_extents(struct inode *inode,
1438 u64 start, u64 len)
1439 {
1440 int ret;
1441 u32 cpos, phys_cpos, clusters, alloc_size;
1442 u64 end = start + len;
1443 struct buffer_head *di_bh = NULL;
1444
1445 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1446 ret = ocfs2_read_inode_block(inode, &di_bh);
1447 if (ret) {
1448 mlog_errno(ret);
1449 goto out;
1450 }
1451
1452 /*
1453 * Nothing to do if the requested reservation range
1454 * fits within the inode.
1455 */
1456 if (ocfs2_size_fits_inline_data(di_bh, end))
1457 goto out;
1458
1459 ret = ocfs2_convert_inline_data_to_extents(inode, di_bh);
1460 if (ret) {
1461 mlog_errno(ret);
1462 goto out;
1463 }
1464 }
1465
1466 /*
1467 * We consider both start and len to be inclusive.
1468 */
1469 cpos = start >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
1470 clusters = ocfs2_clusters_for_bytes(inode->i_sb, start + len);
1471 clusters -= cpos;
1472
1473 while (clusters) {
1474 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos,
1475 &alloc_size, NULL);
1476 if (ret) {
1477 mlog_errno(ret);
1478 goto out;
1479 }
1480
1481 /*
1482 * Hole or existing extent len can be arbitrary, so
1483 * cap it to our own allocation request.
1484 */
1485 if (alloc_size > clusters)
1486 alloc_size = clusters;
1487
1488 if (phys_cpos) {
1489 /*
1490 * We already have an allocation at this
1491 * region so we can safely skip it.
1492 */
1493 goto next;
1494 }
1495
1496 ret = ocfs2_extend_allocation(inode, cpos, alloc_size, 1);
1497 if (ret) {
1498 if (ret != -ENOSPC)
1499 mlog_errno(ret);
1500 goto out;
1501 }
1502
1503 next:
1504 cpos += alloc_size;
1505 clusters -= alloc_size;
1506 }
1507
1508 ret = 0;
1509 out:
1510
1511 brelse(di_bh);
1512 return ret;
1513 }
1514
1515 /*
1516 * Truncate a byte range, avoiding pages within partial clusters. This
1517 * preserves those pages for the zeroing code to write to.
1518 */
ocfs2_truncate_cluster_pages(struct inode * inode,u64 byte_start,u64 byte_len)1519 static void ocfs2_truncate_cluster_pages(struct inode *inode, u64 byte_start,
1520 u64 byte_len)
1521 {
1522 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1523 loff_t start, end;
1524 struct address_space *mapping = inode->i_mapping;
1525
1526 start = (loff_t)ocfs2_align_bytes_to_clusters(inode->i_sb, byte_start);
1527 end = byte_start + byte_len;
1528 end = end & ~(osb->s_clustersize - 1);
1529
1530 if (start < end) {
1531 unmap_mapping_range(mapping, start, end - start, 0);
1532 truncate_inode_pages_range(mapping, start, end - 1);
1533 }
1534 }
1535
1536 /*
1537 * zero out partial blocks of one cluster.
1538 *
1539 * start: file offset where zero starts, will be made upper block aligned.
1540 * len: it will be trimmed to the end of current cluster if "start + len"
1541 * is bigger than it.
1542 */
ocfs2_zeroout_partial_cluster(struct inode * inode,u64 start,u64 len)1543 static int ocfs2_zeroout_partial_cluster(struct inode *inode,
1544 u64 start, u64 len)
1545 {
1546 int ret;
1547 u64 start_block, end_block, nr_blocks;
1548 u64 p_block, offset;
1549 u32 cluster, p_cluster, nr_clusters;
1550 struct super_block *sb = inode->i_sb;
1551 u64 end = ocfs2_align_bytes_to_clusters(sb, start);
1552
1553 if (start + len < end)
1554 end = start + len;
1555
1556 start_block = ocfs2_blocks_for_bytes(sb, start);
1557 end_block = ocfs2_blocks_for_bytes(sb, end);
1558 nr_blocks = end_block - start_block;
1559 if (!nr_blocks)
1560 return 0;
1561
1562 cluster = ocfs2_bytes_to_clusters(sb, start);
1563 ret = ocfs2_get_clusters(inode, cluster, &p_cluster,
1564 &nr_clusters, NULL);
1565 if (ret)
1566 return ret;
1567 if (!p_cluster)
1568 return 0;
1569
1570 offset = start_block - ocfs2_clusters_to_blocks(sb, cluster);
1571 p_block = ocfs2_clusters_to_blocks(sb, p_cluster) + offset;
1572 return sb_issue_zeroout(sb, p_block, nr_blocks, GFP_NOFS);
1573 }
1574
ocfs2_zero_partial_clusters(struct inode * inode,u64 start,u64 len)1575 static int ocfs2_zero_partial_clusters(struct inode *inode,
1576 u64 start, u64 len)
1577 {
1578 int ret = 0;
1579 u64 tmpend = 0;
1580 u64 end = start + len;
1581 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1582 unsigned int csize = osb->s_clustersize;
1583 handle_t *handle;
1584 loff_t isize = i_size_read(inode);
1585
1586 /*
1587 * The "start" and "end" values are NOT necessarily part of
1588 * the range whose allocation is being deleted. Rather, this
1589 * is what the user passed in with the request. We must zero
1590 * partial clusters here. There's no need to worry about
1591 * physical allocation - the zeroing code knows to skip holes.
1592 */
1593 trace_ocfs2_zero_partial_clusters(
1594 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1595 (unsigned long long)start, (unsigned long long)end);
1596
1597 /*
1598 * If both edges are on a cluster boundary then there's no
1599 * zeroing required as the region is part of the allocation to
1600 * be truncated.
1601 */
1602 if ((start & (csize - 1)) == 0 && (end & (csize - 1)) == 0)
1603 goto out;
1604
1605 /* No page cache for EOF blocks, issue zero out to disk. */
1606 if (end > isize) {
1607 /*
1608 * zeroout eof blocks in last cluster starting from
1609 * "isize" even "start" > "isize" because it is
1610 * complicated to zeroout just at "start" as "start"
1611 * may be not aligned with block size, buffer write
1612 * would be required to do that, but out of eof buffer
1613 * write is not supported.
1614 */
1615 ret = ocfs2_zeroout_partial_cluster(inode, isize,
1616 end - isize);
1617 if (ret) {
1618 mlog_errno(ret);
1619 goto out;
1620 }
1621 if (start >= isize)
1622 goto out;
1623 end = isize;
1624 }
1625 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
1626 if (IS_ERR(handle)) {
1627 ret = PTR_ERR(handle);
1628 mlog_errno(ret);
1629 goto out;
1630 }
1631
1632 /*
1633 * If start is on a cluster boundary and end is somewhere in another
1634 * cluster, we have not COWed the cluster starting at start, unless
1635 * end is also within the same cluster. So, in this case, we skip this
1636 * first call to ocfs2_zero_range_for_truncate() truncate and move on
1637 * to the next one.
1638 */
1639 if ((start & (csize - 1)) != 0) {
1640 /*
1641 * We want to get the byte offset of the end of the 1st
1642 * cluster.
1643 */
1644 tmpend = (u64)osb->s_clustersize +
1645 (start & ~(osb->s_clustersize - 1));
1646 if (tmpend > end)
1647 tmpend = end;
1648
1649 trace_ocfs2_zero_partial_clusters_range1(
1650 (unsigned long long)start,
1651 (unsigned long long)tmpend);
1652
1653 ret = ocfs2_zero_range_for_truncate(inode, handle, start,
1654 tmpend);
1655 if (ret)
1656 mlog_errno(ret);
1657 }
1658
1659 if (tmpend < end) {
1660 /*
1661 * This may make start and end equal, but the zeroing
1662 * code will skip any work in that case so there's no
1663 * need to catch it up here.
1664 */
1665 start = end & ~(osb->s_clustersize - 1);
1666
1667 trace_ocfs2_zero_partial_clusters_range2(
1668 (unsigned long long)start, (unsigned long long)end);
1669
1670 ret = ocfs2_zero_range_for_truncate(inode, handle, start, end);
1671 if (ret)
1672 mlog_errno(ret);
1673 }
1674 ocfs2_update_inode_fsync_trans(handle, inode, 1);
1675
1676 ocfs2_commit_trans(osb, handle);
1677 out:
1678 return ret;
1679 }
1680
ocfs2_find_rec(struct ocfs2_extent_list * el,u32 pos)1681 static int ocfs2_find_rec(struct ocfs2_extent_list *el, u32 pos)
1682 {
1683 int i;
1684 struct ocfs2_extent_rec *rec = NULL;
1685
1686 for (i = le16_to_cpu(el->l_next_free_rec) - 1; i >= 0; i--) {
1687
1688 rec = &el->l_recs[i];
1689
1690 if (le32_to_cpu(rec->e_cpos) < pos)
1691 break;
1692 }
1693
1694 return i;
1695 }
1696
1697 /*
1698 * Helper to calculate the punching pos and length in one run, we handle the
1699 * following three cases in order:
1700 *
1701 * - remove the entire record
1702 * - remove a partial record
1703 * - no record needs to be removed (hole-punching completed)
1704 */
ocfs2_calc_trunc_pos(struct inode * inode,struct ocfs2_extent_list * el,struct ocfs2_extent_rec * rec,u32 trunc_start,u32 * trunc_cpos,u32 * trunc_len,u32 * trunc_end,u64 * blkno,int * done)1705 static void ocfs2_calc_trunc_pos(struct inode *inode,
1706 struct ocfs2_extent_list *el,
1707 struct ocfs2_extent_rec *rec,
1708 u32 trunc_start, u32 *trunc_cpos,
1709 u32 *trunc_len, u32 *trunc_end,
1710 u64 *blkno, int *done)
1711 {
1712 int ret = 0;
1713 u32 coff, range;
1714
1715 range = le32_to_cpu(rec->e_cpos) + ocfs2_rec_clusters(el, rec);
1716
1717 if (le32_to_cpu(rec->e_cpos) >= trunc_start) {
1718 /*
1719 * remove an entire extent record.
1720 */
1721 *trunc_cpos = le32_to_cpu(rec->e_cpos);
1722 /*
1723 * Skip holes if any.
1724 */
1725 if (range < *trunc_end)
1726 *trunc_end = range;
1727 *trunc_len = *trunc_end - le32_to_cpu(rec->e_cpos);
1728 *blkno = le64_to_cpu(rec->e_blkno);
1729 *trunc_end = le32_to_cpu(rec->e_cpos);
1730 } else if (range > trunc_start) {
1731 /*
1732 * remove a partial extent record, which means we're
1733 * removing the last extent record.
1734 */
1735 *trunc_cpos = trunc_start;
1736 /*
1737 * skip hole if any.
1738 */
1739 if (range < *trunc_end)
1740 *trunc_end = range;
1741 *trunc_len = *trunc_end - trunc_start;
1742 coff = trunc_start - le32_to_cpu(rec->e_cpos);
1743 *blkno = le64_to_cpu(rec->e_blkno) +
1744 ocfs2_clusters_to_blocks(inode->i_sb, coff);
1745 *trunc_end = trunc_start;
1746 } else {
1747 /*
1748 * It may have two following possibilities:
1749 *
1750 * - last record has been removed
1751 * - trunc_start was within a hole
1752 *
1753 * both two cases mean the completion of hole punching.
1754 */
1755 ret = 1;
1756 }
1757
1758 *done = ret;
1759 }
1760
ocfs2_remove_inode_range(struct inode * inode,struct buffer_head * di_bh,u64 byte_start,u64 byte_len)1761 int ocfs2_remove_inode_range(struct inode *inode,
1762 struct buffer_head *di_bh, u64 byte_start,
1763 u64 byte_len)
1764 {
1765 int ret = 0, flags = 0, done = 0, i;
1766 u32 trunc_start, trunc_len, trunc_end, trunc_cpos, phys_cpos;
1767 u32 cluster_in_el;
1768 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1769 struct ocfs2_cached_dealloc_ctxt dealloc;
1770 struct address_space *mapping = inode->i_mapping;
1771 struct ocfs2_extent_tree et;
1772 struct ocfs2_path *path = NULL;
1773 struct ocfs2_extent_list *el = NULL;
1774 struct ocfs2_extent_rec *rec = NULL;
1775 struct ocfs2_dinode *di = (struct ocfs2_dinode *)di_bh->b_data;
1776 u64 blkno, refcount_loc = le64_to_cpu(di->i_refcount_loc);
1777
1778 ocfs2_init_dinode_extent_tree(&et, INODE_CACHE(inode), di_bh);
1779 ocfs2_init_dealloc_ctxt(&dealloc);
1780
1781 trace_ocfs2_remove_inode_range(
1782 (unsigned long long)OCFS2_I(inode)->ip_blkno,
1783 (unsigned long long)byte_start,
1784 (unsigned long long)byte_len);
1785
1786 if (byte_len == 0)
1787 return 0;
1788
1789 if (OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL) {
1790 ret = ocfs2_truncate_inline(inode, di_bh, byte_start,
1791 byte_start + byte_len, 0);
1792 if (ret) {
1793 mlog_errno(ret);
1794 goto out;
1795 }
1796 /*
1797 * There's no need to get fancy with the page cache
1798 * truncate of an inline-data inode. We're talking
1799 * about less than a page here, which will be cached
1800 * in the dinode buffer anyway.
1801 */
1802 unmap_mapping_range(mapping, 0, 0, 0);
1803 truncate_inode_pages(mapping, 0);
1804 goto out;
1805 }
1806
1807 /*
1808 * For reflinks, we may need to CoW 2 clusters which might be
1809 * partially zero'd later, if hole's start and end offset were
1810 * within one cluster(means is not exactly aligned to clustersize).
1811 */
1812
1813 if (ocfs2_is_refcount_inode(inode)) {
1814 ret = ocfs2_cow_file_pos(inode, di_bh, byte_start);
1815 if (ret) {
1816 mlog_errno(ret);
1817 goto out;
1818 }
1819
1820 ret = ocfs2_cow_file_pos(inode, di_bh, byte_start + byte_len);
1821 if (ret) {
1822 mlog_errno(ret);
1823 goto out;
1824 }
1825 }
1826
1827 trunc_start = ocfs2_clusters_for_bytes(osb->sb, byte_start);
1828 trunc_end = (byte_start + byte_len) >> osb->s_clustersize_bits;
1829 cluster_in_el = trunc_end;
1830
1831 ret = ocfs2_zero_partial_clusters(inode, byte_start, byte_len);
1832 if (ret) {
1833 mlog_errno(ret);
1834 goto out;
1835 }
1836
1837 path = ocfs2_new_path_from_et(&et);
1838 if (!path) {
1839 ret = -ENOMEM;
1840 mlog_errno(ret);
1841 goto out;
1842 }
1843
1844 while (trunc_end > trunc_start) {
1845
1846 ret = ocfs2_find_path(INODE_CACHE(inode), path,
1847 cluster_in_el);
1848 if (ret) {
1849 mlog_errno(ret);
1850 goto out;
1851 }
1852
1853 el = path_leaf_el(path);
1854
1855 i = ocfs2_find_rec(el, trunc_end);
1856 /*
1857 * Need to go to previous extent block.
1858 */
1859 if (i < 0) {
1860 if (path->p_tree_depth == 0)
1861 break;
1862
1863 ret = ocfs2_find_cpos_for_left_leaf(inode->i_sb,
1864 path,
1865 &cluster_in_el);
1866 if (ret) {
1867 mlog_errno(ret);
1868 goto out;
1869 }
1870
1871 /*
1872 * We've reached the leftmost extent block,
1873 * it's safe to leave.
1874 */
1875 if (cluster_in_el == 0)
1876 break;
1877
1878 /*
1879 * The 'pos' searched for previous extent block is
1880 * always one cluster less than actual trunc_end.
1881 */
1882 trunc_end = cluster_in_el + 1;
1883
1884 ocfs2_reinit_path(path, 1);
1885
1886 continue;
1887
1888 } else
1889 rec = &el->l_recs[i];
1890
1891 ocfs2_calc_trunc_pos(inode, el, rec, trunc_start, &trunc_cpos,
1892 &trunc_len, &trunc_end, &blkno, &done);
1893 if (done)
1894 break;
1895
1896 flags = rec->e_flags;
1897 phys_cpos = ocfs2_blocks_to_clusters(inode->i_sb, blkno);
1898
1899 ret = ocfs2_remove_btree_range(inode, &et, trunc_cpos,
1900 phys_cpos, trunc_len, flags,
1901 &dealloc, refcount_loc, false);
1902 if (ret < 0) {
1903 mlog_errno(ret);
1904 goto out;
1905 }
1906
1907 cluster_in_el = trunc_end;
1908
1909 ocfs2_reinit_path(path, 1);
1910 }
1911
1912 ocfs2_truncate_cluster_pages(inode, byte_start, byte_len);
1913
1914 out:
1915 ocfs2_free_path(path);
1916 ocfs2_schedule_truncate_log_flush(osb, 1);
1917 ocfs2_run_deallocs(osb, &dealloc);
1918
1919 return ret;
1920 }
1921
1922 /*
1923 * Parts of this function taken from xfs_change_file_space()
1924 */
__ocfs2_change_file_space(struct file * file,struct inode * inode,loff_t f_pos,unsigned int cmd,struct ocfs2_space_resv * sr,int change_size)1925 static int __ocfs2_change_file_space(struct file *file, struct inode *inode,
1926 loff_t f_pos, unsigned int cmd,
1927 struct ocfs2_space_resv *sr,
1928 int change_size)
1929 {
1930 int ret;
1931 s64 llen;
1932 loff_t size, orig_isize;
1933 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
1934 struct buffer_head *di_bh = NULL;
1935 handle_t *handle;
1936 unsigned long long max_off = inode->i_sb->s_maxbytes;
1937
1938 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
1939 return -EROFS;
1940
1941 inode_lock(inode);
1942
1943 /*
1944 * This prevents concurrent writes on other nodes
1945 */
1946 ret = ocfs2_rw_lock(inode, 1);
1947 if (ret) {
1948 mlog_errno(ret);
1949 goto out;
1950 }
1951
1952 ret = ocfs2_inode_lock(inode, &di_bh, 1);
1953 if (ret) {
1954 mlog_errno(ret);
1955 goto out_rw_unlock;
1956 }
1957
1958 if (inode->i_flags & (S_IMMUTABLE|S_APPEND)) {
1959 ret = -EPERM;
1960 goto out_inode_unlock;
1961 }
1962
1963 switch (sr->l_whence) {
1964 case 0: /*SEEK_SET*/
1965 break;
1966 case 1: /*SEEK_CUR*/
1967 sr->l_start += f_pos;
1968 break;
1969 case 2: /*SEEK_END*/
1970 sr->l_start += i_size_read(inode);
1971 break;
1972 default:
1973 ret = -EINVAL;
1974 goto out_inode_unlock;
1975 }
1976 sr->l_whence = 0;
1977
1978 llen = sr->l_len > 0 ? sr->l_len - 1 : sr->l_len;
1979
1980 if (sr->l_start < 0
1981 || sr->l_start > max_off
1982 || (sr->l_start + llen) < 0
1983 || (sr->l_start + llen) > max_off) {
1984 ret = -EINVAL;
1985 goto out_inode_unlock;
1986 }
1987 size = sr->l_start + sr->l_len;
1988
1989 if (cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64 ||
1990 cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) {
1991 if (sr->l_len <= 0) {
1992 ret = -EINVAL;
1993 goto out_inode_unlock;
1994 }
1995 }
1996
1997 if (file && setattr_should_drop_suidgid(&init_user_ns, file_inode(file))) {
1998 ret = __ocfs2_write_remove_suid(inode, di_bh);
1999 if (ret) {
2000 mlog_errno(ret);
2001 goto out_inode_unlock;
2002 }
2003 }
2004
2005 down_write(&OCFS2_I(inode)->ip_alloc_sem);
2006 switch (cmd) {
2007 case OCFS2_IOC_RESVSP:
2008 case OCFS2_IOC_RESVSP64:
2009 /*
2010 * This takes unsigned offsets, but the signed ones we
2011 * pass have been checked against overflow above.
2012 */
2013 ret = ocfs2_allocate_unwritten_extents(inode, sr->l_start,
2014 sr->l_len);
2015 break;
2016 case OCFS2_IOC_UNRESVSP:
2017 case OCFS2_IOC_UNRESVSP64:
2018 ret = ocfs2_remove_inode_range(inode, di_bh, sr->l_start,
2019 sr->l_len);
2020 break;
2021 default:
2022 ret = -EINVAL;
2023 }
2024
2025 orig_isize = i_size_read(inode);
2026 /* zeroout eof blocks in the cluster. */
2027 if (!ret && change_size && orig_isize < size) {
2028 ret = ocfs2_zeroout_partial_cluster(inode, orig_isize,
2029 size - orig_isize);
2030 if (!ret)
2031 i_size_write(inode, size);
2032 }
2033 up_write(&OCFS2_I(inode)->ip_alloc_sem);
2034 if (ret) {
2035 mlog_errno(ret);
2036 goto out_inode_unlock;
2037 }
2038
2039 /*
2040 * We update c/mtime for these changes
2041 */
2042 handle = ocfs2_start_trans(osb, OCFS2_INODE_UPDATE_CREDITS);
2043 if (IS_ERR(handle)) {
2044 ret = PTR_ERR(handle);
2045 mlog_errno(ret);
2046 goto out_inode_unlock;
2047 }
2048
2049 inode->i_ctime = inode->i_mtime = current_time(inode);
2050 ret = ocfs2_mark_inode_dirty(handle, inode, di_bh);
2051 if (ret < 0)
2052 mlog_errno(ret);
2053
2054 if (file && (file->f_flags & O_SYNC))
2055 handle->h_sync = 1;
2056
2057 ocfs2_commit_trans(osb, handle);
2058
2059 out_inode_unlock:
2060 brelse(di_bh);
2061 ocfs2_inode_unlock(inode, 1);
2062 out_rw_unlock:
2063 ocfs2_rw_unlock(inode, 1);
2064
2065 out:
2066 inode_unlock(inode);
2067 return ret;
2068 }
2069
ocfs2_change_file_space(struct file * file,unsigned int cmd,struct ocfs2_space_resv * sr)2070 int ocfs2_change_file_space(struct file *file, unsigned int cmd,
2071 struct ocfs2_space_resv *sr)
2072 {
2073 struct inode *inode = file_inode(file);
2074 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2075 int ret;
2076
2077 if ((cmd == OCFS2_IOC_RESVSP || cmd == OCFS2_IOC_RESVSP64) &&
2078 !ocfs2_writes_unwritten_extents(osb))
2079 return -ENOTTY;
2080 else if ((cmd == OCFS2_IOC_UNRESVSP || cmd == OCFS2_IOC_UNRESVSP64) &&
2081 !ocfs2_sparse_alloc(osb))
2082 return -ENOTTY;
2083
2084 if (!S_ISREG(inode->i_mode))
2085 return -EINVAL;
2086
2087 if (!(file->f_mode & FMODE_WRITE))
2088 return -EBADF;
2089
2090 ret = mnt_want_write_file(file);
2091 if (ret)
2092 return ret;
2093 ret = __ocfs2_change_file_space(file, inode, file->f_pos, cmd, sr, 0);
2094 mnt_drop_write_file(file);
2095 return ret;
2096 }
2097
ocfs2_fallocate(struct file * file,int mode,loff_t offset,loff_t len)2098 static long ocfs2_fallocate(struct file *file, int mode, loff_t offset,
2099 loff_t len)
2100 {
2101 struct inode *inode = file_inode(file);
2102 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2103 struct ocfs2_space_resv sr;
2104 int change_size = 1;
2105 int cmd = OCFS2_IOC_RESVSP64;
2106 int ret = 0;
2107
2108 if (mode & ~(FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE))
2109 return -EOPNOTSUPP;
2110 if (!ocfs2_writes_unwritten_extents(osb))
2111 return -EOPNOTSUPP;
2112
2113 if (mode & FALLOC_FL_KEEP_SIZE) {
2114 change_size = 0;
2115 } else {
2116 ret = inode_newsize_ok(inode, offset + len);
2117 if (ret)
2118 return ret;
2119 }
2120
2121 if (mode & FALLOC_FL_PUNCH_HOLE)
2122 cmd = OCFS2_IOC_UNRESVSP64;
2123
2124 sr.l_whence = 0;
2125 sr.l_start = (s64)offset;
2126 sr.l_len = (s64)len;
2127
2128 return __ocfs2_change_file_space(NULL, inode, offset, cmd, &sr,
2129 change_size);
2130 }
2131
ocfs2_check_range_for_refcount(struct inode * inode,loff_t pos,size_t count)2132 int ocfs2_check_range_for_refcount(struct inode *inode, loff_t pos,
2133 size_t count)
2134 {
2135 int ret = 0;
2136 unsigned int extent_flags;
2137 u32 cpos, clusters, extent_len, phys_cpos;
2138 struct super_block *sb = inode->i_sb;
2139
2140 if (!ocfs2_refcount_tree(OCFS2_SB(inode->i_sb)) ||
2141 !ocfs2_is_refcount_inode(inode) ||
2142 OCFS2_I(inode)->ip_dyn_features & OCFS2_INLINE_DATA_FL)
2143 return 0;
2144
2145 cpos = pos >> OCFS2_SB(sb)->s_clustersize_bits;
2146 clusters = ocfs2_clusters_for_bytes(sb, pos + count) - cpos;
2147
2148 while (clusters) {
2149 ret = ocfs2_get_clusters(inode, cpos, &phys_cpos, &extent_len,
2150 &extent_flags);
2151 if (ret < 0) {
2152 mlog_errno(ret);
2153 goto out;
2154 }
2155
2156 if (phys_cpos && (extent_flags & OCFS2_EXT_REFCOUNTED)) {
2157 ret = 1;
2158 break;
2159 }
2160
2161 if (extent_len > clusters)
2162 extent_len = clusters;
2163
2164 clusters -= extent_len;
2165 cpos += extent_len;
2166 }
2167 out:
2168 return ret;
2169 }
2170
ocfs2_is_io_unaligned(struct inode * inode,size_t count,loff_t pos)2171 static int ocfs2_is_io_unaligned(struct inode *inode, size_t count, loff_t pos)
2172 {
2173 int blockmask = inode->i_sb->s_blocksize - 1;
2174 loff_t final_size = pos + count;
2175
2176 if ((pos & blockmask) || (final_size & blockmask))
2177 return 1;
2178 return 0;
2179 }
2180
ocfs2_inode_lock_for_extent_tree(struct inode * inode,struct buffer_head ** di_bh,int meta_level,int write_sem,int wait)2181 static int ocfs2_inode_lock_for_extent_tree(struct inode *inode,
2182 struct buffer_head **di_bh,
2183 int meta_level,
2184 int write_sem,
2185 int wait)
2186 {
2187 int ret = 0;
2188
2189 if (wait)
2190 ret = ocfs2_inode_lock(inode, di_bh, meta_level);
2191 else
2192 ret = ocfs2_try_inode_lock(inode, di_bh, meta_level);
2193 if (ret < 0)
2194 goto out;
2195
2196 if (wait) {
2197 if (write_sem)
2198 down_write(&OCFS2_I(inode)->ip_alloc_sem);
2199 else
2200 down_read(&OCFS2_I(inode)->ip_alloc_sem);
2201 } else {
2202 if (write_sem)
2203 ret = down_write_trylock(&OCFS2_I(inode)->ip_alloc_sem);
2204 else
2205 ret = down_read_trylock(&OCFS2_I(inode)->ip_alloc_sem);
2206
2207 if (!ret) {
2208 ret = -EAGAIN;
2209 goto out_unlock;
2210 }
2211 }
2212
2213 return ret;
2214
2215 out_unlock:
2216 brelse(*di_bh);
2217 *di_bh = NULL;
2218 ocfs2_inode_unlock(inode, meta_level);
2219 out:
2220 return ret;
2221 }
2222
ocfs2_inode_unlock_for_extent_tree(struct inode * inode,struct buffer_head ** di_bh,int meta_level,int write_sem)2223 static void ocfs2_inode_unlock_for_extent_tree(struct inode *inode,
2224 struct buffer_head **di_bh,
2225 int meta_level,
2226 int write_sem)
2227 {
2228 if (write_sem)
2229 up_write(&OCFS2_I(inode)->ip_alloc_sem);
2230 else
2231 up_read(&OCFS2_I(inode)->ip_alloc_sem);
2232
2233 brelse(*di_bh);
2234 *di_bh = NULL;
2235
2236 if (meta_level >= 0)
2237 ocfs2_inode_unlock(inode, meta_level);
2238 }
2239
ocfs2_prepare_inode_for_write(struct file * file,loff_t pos,size_t count,int wait)2240 static int ocfs2_prepare_inode_for_write(struct file *file,
2241 loff_t pos, size_t count, int wait)
2242 {
2243 int ret = 0, meta_level = 0, overwrite_io = 0;
2244 int write_sem = 0;
2245 struct dentry *dentry = file->f_path.dentry;
2246 struct inode *inode = d_inode(dentry);
2247 struct buffer_head *di_bh = NULL;
2248 u32 cpos;
2249 u32 clusters;
2250
2251 /*
2252 * We start with a read level meta lock and only jump to an ex
2253 * if we need to make modifications here.
2254 */
2255 for(;;) {
2256 ret = ocfs2_inode_lock_for_extent_tree(inode,
2257 &di_bh,
2258 meta_level,
2259 write_sem,
2260 wait);
2261 if (ret < 0) {
2262 if (ret != -EAGAIN)
2263 mlog_errno(ret);
2264 goto out;
2265 }
2266
2267 /*
2268 * Check if IO will overwrite allocated blocks in case
2269 * IOCB_NOWAIT flag is set.
2270 */
2271 if (!wait && !overwrite_io) {
2272 overwrite_io = 1;
2273
2274 ret = ocfs2_overwrite_io(inode, di_bh, pos, count);
2275 if (ret < 0) {
2276 if (ret != -EAGAIN)
2277 mlog_errno(ret);
2278 goto out_unlock;
2279 }
2280 }
2281
2282 /* Clear suid / sgid if necessary. We do this here
2283 * instead of later in the write path because
2284 * remove_suid() calls ->setattr without any hint that
2285 * we may have already done our cluster locking. Since
2286 * ocfs2_setattr() *must* take cluster locks to
2287 * proceed, this will lead us to recursively lock the
2288 * inode. There's also the dinode i_size state which
2289 * can be lost via setattr during extending writes (we
2290 * set inode->i_size at the end of a write. */
2291 if (setattr_should_drop_suidgid(&init_user_ns, inode)) {
2292 if (meta_level == 0) {
2293 ocfs2_inode_unlock_for_extent_tree(inode,
2294 &di_bh,
2295 meta_level,
2296 write_sem);
2297 meta_level = 1;
2298 continue;
2299 }
2300
2301 ret = ocfs2_write_remove_suid(inode);
2302 if (ret < 0) {
2303 mlog_errno(ret);
2304 goto out_unlock;
2305 }
2306 }
2307
2308 ret = ocfs2_check_range_for_refcount(inode, pos, count);
2309 if (ret == 1) {
2310 ocfs2_inode_unlock_for_extent_tree(inode,
2311 &di_bh,
2312 meta_level,
2313 write_sem);
2314 meta_level = 1;
2315 write_sem = 1;
2316 ret = ocfs2_inode_lock_for_extent_tree(inode,
2317 &di_bh,
2318 meta_level,
2319 write_sem,
2320 wait);
2321 if (ret < 0) {
2322 if (ret != -EAGAIN)
2323 mlog_errno(ret);
2324 goto out;
2325 }
2326
2327 cpos = pos >> OCFS2_SB(inode->i_sb)->s_clustersize_bits;
2328 clusters =
2329 ocfs2_clusters_for_bytes(inode->i_sb, pos + count) - cpos;
2330 ret = ocfs2_refcount_cow(inode, di_bh, cpos, clusters, UINT_MAX);
2331 }
2332
2333 if (ret < 0) {
2334 if (ret != -EAGAIN)
2335 mlog_errno(ret);
2336 goto out_unlock;
2337 }
2338
2339 break;
2340 }
2341
2342 out_unlock:
2343 trace_ocfs2_prepare_inode_for_write(OCFS2_I(inode)->ip_blkno,
2344 pos, count, wait);
2345
2346 ocfs2_inode_unlock_for_extent_tree(inode,
2347 &di_bh,
2348 meta_level,
2349 write_sem);
2350
2351 out:
2352 return ret;
2353 }
2354
ocfs2_file_write_iter(struct kiocb * iocb,struct iov_iter * from)2355 static ssize_t ocfs2_file_write_iter(struct kiocb *iocb,
2356 struct iov_iter *from)
2357 {
2358 int rw_level;
2359 ssize_t written = 0;
2360 ssize_t ret;
2361 size_t count = iov_iter_count(from);
2362 struct file *file = iocb->ki_filp;
2363 struct inode *inode = file_inode(file);
2364 struct ocfs2_super *osb = OCFS2_SB(inode->i_sb);
2365 int full_coherency = !(osb->s_mount_opt &
2366 OCFS2_MOUNT_COHERENCY_BUFFERED);
2367 void *saved_ki_complete = NULL;
2368 int append_write = ((iocb->ki_pos + count) >=
2369 i_size_read(inode) ? 1 : 0);
2370 int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
2371 int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0;
2372
2373 trace_ocfs2_file_write_iter(inode, file, file->f_path.dentry,
2374 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2375 file->f_path.dentry->d_name.len,
2376 file->f_path.dentry->d_name.name,
2377 (unsigned int)from->nr_segs); /* GRRRRR */
2378
2379 if (!direct_io && nowait)
2380 return -EOPNOTSUPP;
2381
2382 if (count == 0)
2383 return 0;
2384
2385 if (nowait) {
2386 if (!inode_trylock(inode))
2387 return -EAGAIN;
2388 } else
2389 inode_lock(inode);
2390
2391 /*
2392 * Concurrent O_DIRECT writes are allowed with
2393 * mount_option "coherency=buffered".
2394 * For append write, we must take rw EX.
2395 */
2396 rw_level = (!direct_io || full_coherency || append_write);
2397
2398 if (nowait)
2399 ret = ocfs2_try_rw_lock(inode, rw_level);
2400 else
2401 ret = ocfs2_rw_lock(inode, rw_level);
2402 if (ret < 0) {
2403 if (ret != -EAGAIN)
2404 mlog_errno(ret);
2405 goto out_mutex;
2406 }
2407
2408 /*
2409 * O_DIRECT writes with "coherency=full" need to take EX cluster
2410 * inode_lock to guarantee coherency.
2411 */
2412 if (direct_io && full_coherency) {
2413 /*
2414 * We need to take and drop the inode lock to force
2415 * other nodes to drop their caches. Buffered I/O
2416 * already does this in write_begin().
2417 */
2418 if (nowait)
2419 ret = ocfs2_try_inode_lock(inode, NULL, 1);
2420 else
2421 ret = ocfs2_inode_lock(inode, NULL, 1);
2422 if (ret < 0) {
2423 if (ret != -EAGAIN)
2424 mlog_errno(ret);
2425 goto out;
2426 }
2427
2428 ocfs2_inode_unlock(inode, 1);
2429 }
2430
2431 ret = generic_write_checks(iocb, from);
2432 if (ret <= 0) {
2433 if (ret)
2434 mlog_errno(ret);
2435 goto out;
2436 }
2437 count = ret;
2438
2439 ret = ocfs2_prepare_inode_for_write(file, iocb->ki_pos, count, !nowait);
2440 if (ret < 0) {
2441 if (ret != -EAGAIN)
2442 mlog_errno(ret);
2443 goto out;
2444 }
2445
2446 if (direct_io && !is_sync_kiocb(iocb) &&
2447 ocfs2_is_io_unaligned(inode, count, iocb->ki_pos)) {
2448 /*
2449 * Make it a sync io if it's an unaligned aio.
2450 */
2451 saved_ki_complete = xchg(&iocb->ki_complete, NULL);
2452 }
2453
2454 /* communicate with ocfs2_dio_end_io */
2455 ocfs2_iocb_set_rw_locked(iocb, rw_level);
2456
2457 written = __generic_file_write_iter(iocb, from);
2458 /* buffered aio wouldn't have proper lock coverage today */
2459 BUG_ON(written == -EIOCBQUEUED && !direct_io);
2460
2461 /*
2462 * deep in g_f_a_w_n()->ocfs2_direct_IO we pass in a ocfs2_dio_end_io
2463 * function pointer which is called when o_direct io completes so that
2464 * it can unlock our rw lock.
2465 * Unfortunately there are error cases which call end_io and others
2466 * that don't. so we don't have to unlock the rw_lock if either an
2467 * async dio is going to do it in the future or an end_io after an
2468 * error has already done it.
2469 */
2470 if ((written == -EIOCBQUEUED) || (!ocfs2_iocb_is_rw_locked(iocb))) {
2471 rw_level = -1;
2472 }
2473
2474 if (unlikely(written <= 0))
2475 goto out;
2476
2477 if (((file->f_flags & O_DSYNC) && !direct_io) ||
2478 IS_SYNC(inode)) {
2479 ret = filemap_fdatawrite_range(file->f_mapping,
2480 iocb->ki_pos - written,
2481 iocb->ki_pos - 1);
2482 if (ret < 0)
2483 written = ret;
2484
2485 if (!ret) {
2486 ret = jbd2_journal_force_commit(osb->journal->j_journal);
2487 if (ret < 0)
2488 written = ret;
2489 }
2490
2491 if (!ret)
2492 ret = filemap_fdatawait_range(file->f_mapping,
2493 iocb->ki_pos - written,
2494 iocb->ki_pos - 1);
2495 }
2496
2497 out:
2498 if (saved_ki_complete)
2499 xchg(&iocb->ki_complete, saved_ki_complete);
2500
2501 if (rw_level != -1)
2502 ocfs2_rw_unlock(inode, rw_level);
2503
2504 out_mutex:
2505 inode_unlock(inode);
2506
2507 if (written)
2508 ret = written;
2509 return ret;
2510 }
2511
ocfs2_file_read_iter(struct kiocb * iocb,struct iov_iter * to)2512 static ssize_t ocfs2_file_read_iter(struct kiocb *iocb,
2513 struct iov_iter *to)
2514 {
2515 int ret = 0, rw_level = -1, lock_level = 0;
2516 struct file *filp = iocb->ki_filp;
2517 struct inode *inode = file_inode(filp);
2518 int direct_io = iocb->ki_flags & IOCB_DIRECT ? 1 : 0;
2519 int nowait = iocb->ki_flags & IOCB_NOWAIT ? 1 : 0;
2520
2521 trace_ocfs2_file_read_iter(inode, filp, filp->f_path.dentry,
2522 (unsigned long long)OCFS2_I(inode)->ip_blkno,
2523 filp->f_path.dentry->d_name.len,
2524 filp->f_path.dentry->d_name.name,
2525 to->nr_segs); /* GRRRRR */
2526
2527
2528 if (!inode) {
2529 ret = -EINVAL;
2530 mlog_errno(ret);
2531 goto bail;
2532 }
2533
2534 if (!direct_io && nowait)
2535 return -EOPNOTSUPP;
2536
2537 /*
2538 * buffered reads protect themselves in ->readpage(). O_DIRECT reads
2539 * need locks to protect pending reads from racing with truncate.
2540 */
2541 if (direct_io) {
2542 if (nowait)
2543 ret = ocfs2_try_rw_lock(inode, 0);
2544 else
2545 ret = ocfs2_rw_lock(inode, 0);
2546
2547 if (ret < 0) {
2548 if (ret != -EAGAIN)
2549 mlog_errno(ret);
2550 goto bail;
2551 }
2552 rw_level = 0;
2553 /* communicate with ocfs2_dio_end_io */
2554 ocfs2_iocb_set_rw_locked(iocb, rw_level);
2555 }
2556
2557 /*
2558 * We're fine letting folks race truncates and extending
2559 * writes with read across the cluster, just like they can
2560 * locally. Hence no rw_lock during read.
2561 *
2562 * Take and drop the meta data lock to update inode fields
2563 * like i_size. This allows the checks down below
2564 * generic_file_read_iter() a chance of actually working.
2565 */
2566 ret = ocfs2_inode_lock_atime(inode, filp->f_path.mnt, &lock_level,
2567 !nowait);
2568 if (ret < 0) {
2569 if (ret != -EAGAIN)
2570 mlog_errno(ret);
2571 goto bail;
2572 }
2573 ocfs2_inode_unlock(inode, lock_level);
2574
2575 ret = generic_file_read_iter(iocb, to);
2576 trace_generic_file_read_iter_ret(ret);
2577
2578 /* buffered aio wouldn't have proper lock coverage today */
2579 BUG_ON(ret == -EIOCBQUEUED && !direct_io);
2580
2581 /* see ocfs2_file_write_iter */
2582 if (ret == -EIOCBQUEUED || !ocfs2_iocb_is_rw_locked(iocb)) {
2583 rw_level = -1;
2584 }
2585
2586 bail:
2587 if (rw_level != -1)
2588 ocfs2_rw_unlock(inode, rw_level);
2589
2590 return ret;
2591 }
2592
2593 /* Refer generic_file_llseek_unlocked() */
ocfs2_file_llseek(struct file * file,loff_t offset,int whence)2594 static loff_t ocfs2_file_llseek(struct file *file, loff_t offset, int whence)
2595 {
2596 struct inode *inode = file->f_mapping->host;
2597 int ret = 0;
2598
2599 inode_lock(inode);
2600
2601 switch (whence) {
2602 case SEEK_SET:
2603 break;
2604 case SEEK_END:
2605 /* SEEK_END requires the OCFS2 inode lock for the file
2606 * because it references the file's size.
2607 */
2608 ret = ocfs2_inode_lock(inode, NULL, 0);
2609 if (ret < 0) {
2610 mlog_errno(ret);
2611 goto out;
2612 }
2613 offset += i_size_read(inode);
2614 ocfs2_inode_unlock(inode, 0);
2615 break;
2616 case SEEK_CUR:
2617 if (offset == 0) {
2618 offset = file->f_pos;
2619 goto out;
2620 }
2621 offset += file->f_pos;
2622 break;
2623 case SEEK_DATA:
2624 case SEEK_HOLE:
2625 ret = ocfs2_seek_data_hole_offset(file, &offset, whence);
2626 if (ret)
2627 goto out;
2628 break;
2629 default:
2630 ret = -EINVAL;
2631 goto out;
2632 }
2633
2634 offset = vfs_setpos(file, offset, inode->i_sb->s_maxbytes);
2635
2636 out:
2637 inode_unlock(inode);
2638 if (ret)
2639 return ret;
2640 return offset;
2641 }
2642
ocfs2_remap_file_range(struct file * file_in,loff_t pos_in,struct file * file_out,loff_t pos_out,loff_t len,unsigned int remap_flags)2643 static loff_t ocfs2_remap_file_range(struct file *file_in, loff_t pos_in,
2644 struct file *file_out, loff_t pos_out,
2645 loff_t len, unsigned int remap_flags)
2646 {
2647 struct inode *inode_in = file_inode(file_in);
2648 struct inode *inode_out = file_inode(file_out);
2649 struct ocfs2_super *osb = OCFS2_SB(inode_in->i_sb);
2650 struct buffer_head *in_bh = NULL, *out_bh = NULL;
2651 bool same_inode = (inode_in == inode_out);
2652 loff_t remapped = 0;
2653 ssize_t ret;
2654
2655 if (remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_ADVISORY))
2656 return -EINVAL;
2657 if (!ocfs2_refcount_tree(osb))
2658 return -EOPNOTSUPP;
2659 if (ocfs2_is_hard_readonly(osb) || ocfs2_is_soft_readonly(osb))
2660 return -EROFS;
2661
2662 /* Lock both files against IO */
2663 ret = ocfs2_reflink_inodes_lock(inode_in, &in_bh, inode_out, &out_bh);
2664 if (ret)
2665 return ret;
2666
2667 /* Check file eligibility and prepare for block sharing. */
2668 ret = -EINVAL;
2669 if ((OCFS2_I(inode_in)->ip_flags & OCFS2_INODE_SYSTEM_FILE) ||
2670 (OCFS2_I(inode_out)->ip_flags & OCFS2_INODE_SYSTEM_FILE))
2671 goto out_unlock;
2672
2673 ret = generic_remap_file_range_prep(file_in, pos_in, file_out, pos_out,
2674 &len, remap_flags);
2675 if (ret < 0 || len == 0)
2676 goto out_unlock;
2677
2678 /* Lock out changes to the allocation maps and remap. */
2679 down_write(&OCFS2_I(inode_in)->ip_alloc_sem);
2680 if (!same_inode)
2681 down_write_nested(&OCFS2_I(inode_out)->ip_alloc_sem,
2682 SINGLE_DEPTH_NESTING);
2683
2684 /* Zap any page cache for the destination file's range. */
2685 truncate_inode_pages_range(&inode_out->i_data,
2686 round_down(pos_out, PAGE_SIZE),
2687 round_up(pos_out + len, PAGE_SIZE) - 1);
2688
2689 remapped = ocfs2_reflink_remap_blocks(inode_in, in_bh, pos_in,
2690 inode_out, out_bh, pos_out, len);
2691 up_write(&OCFS2_I(inode_in)->ip_alloc_sem);
2692 if (!same_inode)
2693 up_write(&OCFS2_I(inode_out)->ip_alloc_sem);
2694 if (remapped < 0) {
2695 ret = remapped;
2696 mlog_errno(ret);
2697 goto out_unlock;
2698 }
2699
2700 /*
2701 * Empty the extent map so that we may get the right extent
2702 * record from the disk.
2703 */
2704 ocfs2_extent_map_trunc(inode_in, 0);
2705 ocfs2_extent_map_trunc(inode_out, 0);
2706
2707 ret = ocfs2_reflink_update_dest(inode_out, out_bh, pos_out + len);
2708 if (ret) {
2709 mlog_errno(ret);
2710 goto out_unlock;
2711 }
2712
2713 out_unlock:
2714 ocfs2_reflink_inodes_unlock(inode_in, in_bh, inode_out, out_bh);
2715 return remapped > 0 ? remapped : ret;
2716 }
2717
2718 const struct inode_operations ocfs2_file_iops = {
2719 .setattr = ocfs2_setattr,
2720 .getattr = ocfs2_getattr,
2721 .permission = ocfs2_permission,
2722 .listxattr = ocfs2_listxattr,
2723 .fiemap = ocfs2_fiemap,
2724 .get_acl = ocfs2_iop_get_acl,
2725 .set_acl = ocfs2_iop_set_acl,
2726 .fileattr_get = ocfs2_fileattr_get,
2727 .fileattr_set = ocfs2_fileattr_set,
2728 };
2729
2730 const struct inode_operations ocfs2_special_file_iops = {
2731 .setattr = ocfs2_setattr,
2732 .getattr = ocfs2_getattr,
2733 .permission = ocfs2_permission,
2734 .get_acl = ocfs2_iop_get_acl,
2735 .set_acl = ocfs2_iop_set_acl,
2736 };
2737
2738 /*
2739 * Other than ->lock, keep ocfs2_fops and ocfs2_dops in sync with
2740 * ocfs2_fops_no_plocks and ocfs2_dops_no_plocks!
2741 */
2742 const struct file_operations ocfs2_fops = {
2743 .llseek = ocfs2_file_llseek,
2744 .mmap = ocfs2_mmap,
2745 .fsync = ocfs2_sync_file,
2746 .release = ocfs2_file_release,
2747 .open = ocfs2_file_open,
2748 .read_iter = ocfs2_file_read_iter,
2749 .write_iter = ocfs2_file_write_iter,
2750 .unlocked_ioctl = ocfs2_ioctl,
2751 #ifdef CONFIG_COMPAT
2752 .compat_ioctl = ocfs2_compat_ioctl,
2753 #endif
2754 .lock = ocfs2_lock,
2755 .flock = ocfs2_flock,
2756 .splice_read = generic_file_splice_read,
2757 .splice_write = iter_file_splice_write,
2758 .fallocate = ocfs2_fallocate,
2759 .remap_file_range = ocfs2_remap_file_range,
2760 };
2761
2762 const struct file_operations ocfs2_dops = {
2763 .llseek = generic_file_llseek,
2764 .read = generic_read_dir,
2765 .iterate = ocfs2_readdir,
2766 .fsync = ocfs2_sync_file,
2767 .release = ocfs2_dir_release,
2768 .open = ocfs2_dir_open,
2769 .unlocked_ioctl = ocfs2_ioctl,
2770 #ifdef CONFIG_COMPAT
2771 .compat_ioctl = ocfs2_compat_ioctl,
2772 #endif
2773 .lock = ocfs2_lock,
2774 .flock = ocfs2_flock,
2775 };
2776
2777 /*
2778 * POSIX-lockless variants of our file_operations.
2779 *
2780 * These will be used if the underlying cluster stack does not support
2781 * posix file locking, if the user passes the "localflocks" mount
2782 * option, or if we have a local-only fs.
2783 *
2784 * ocfs2_flock is in here because all stacks handle UNIX file locks,
2785 * so we still want it in the case of no stack support for
2786 * plocks. Internally, it will do the right thing when asked to ignore
2787 * the cluster.
2788 */
2789 const struct file_operations ocfs2_fops_no_plocks = {
2790 .llseek = ocfs2_file_llseek,
2791 .mmap = ocfs2_mmap,
2792 .fsync = ocfs2_sync_file,
2793 .release = ocfs2_file_release,
2794 .open = ocfs2_file_open,
2795 .read_iter = ocfs2_file_read_iter,
2796 .write_iter = ocfs2_file_write_iter,
2797 .unlocked_ioctl = ocfs2_ioctl,
2798 #ifdef CONFIG_COMPAT
2799 .compat_ioctl = ocfs2_compat_ioctl,
2800 #endif
2801 .flock = ocfs2_flock,
2802 .splice_read = generic_file_splice_read,
2803 .splice_write = iter_file_splice_write,
2804 .fallocate = ocfs2_fallocate,
2805 .remap_file_range = ocfs2_remap_file_range,
2806 };
2807
2808 const struct file_operations ocfs2_dops_no_plocks = {
2809 .llseek = generic_file_llseek,
2810 .read = generic_read_dir,
2811 .iterate = ocfs2_readdir,
2812 .fsync = ocfs2_sync_file,
2813 .release = ocfs2_dir_release,
2814 .open = ocfs2_dir_open,
2815 .unlocked_ioctl = ocfs2_ioctl,
2816 #ifdef CONFIG_COMPAT
2817 .compat_ioctl = ocfs2_compat_ioctl,
2818 #endif
2819 .flock = ocfs2_flock,
2820 };
2821