1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 */
6
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/pagemap.h>
13 #include <linux/pagevec.h>
14 #include <linux/mpage.h>
15 #include <linux/fs.h>
16 #include <linux/writeback.h>
17 #include <linux/swap.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/backing-dev.h>
20 #include <linux/uio.h>
21 #include <trace/events/writeback.h>
22 #include <linux/sched/signal.h>
23
24 #include "gfs2.h"
25 #include "incore.h"
26 #include "bmap.h"
27 #include "glock.h"
28 #include "inode.h"
29 #include "log.h"
30 #include "meta_io.h"
31 #include "quota.h"
32 #include "trans.h"
33 #include "rgrp.h"
34 #include "super.h"
35 #include "util.h"
36 #include "glops.h"
37 #include "aops.h"
38
39
gfs2_page_add_databufs(struct gfs2_inode * ip,struct page * page,unsigned int from,unsigned int len)40 void gfs2_page_add_databufs(struct gfs2_inode *ip, struct page *page,
41 unsigned int from, unsigned int len)
42 {
43 struct buffer_head *head = page_buffers(page);
44 unsigned int bsize = head->b_size;
45 struct buffer_head *bh;
46 unsigned int to = from + len;
47 unsigned int start, end;
48
49 for (bh = head, start = 0; bh != head || !start;
50 bh = bh->b_this_page, start = end) {
51 end = start + bsize;
52 if (end <= from)
53 continue;
54 if (start >= to)
55 break;
56 set_buffer_uptodate(bh);
57 gfs2_trans_add_data(ip->i_gl, bh);
58 }
59 }
60
61 /**
62 * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
63 * @inode: The inode
64 * @lblock: The block number to look up
65 * @bh_result: The buffer head to return the result in
66 * @create: Non-zero if we may add block to the file
67 *
68 * Returns: errno
69 */
70
gfs2_get_block_noalloc(struct inode * inode,sector_t lblock,struct buffer_head * bh_result,int create)71 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
72 struct buffer_head *bh_result, int create)
73 {
74 int error;
75
76 error = gfs2_block_map(inode, lblock, bh_result, 0);
77 if (error)
78 return error;
79 if (!buffer_mapped(bh_result))
80 return -ENODATA;
81 return 0;
82 }
83
84 /**
85 * gfs2_write_jdata_page - gfs2 jdata-specific version of block_write_full_page
86 * @page: The page to write
87 * @wbc: The writeback control
88 *
89 * This is the same as calling block_write_full_page, but it also
90 * writes pages outside of i_size
91 */
gfs2_write_jdata_page(struct page * page,struct writeback_control * wbc)92 static int gfs2_write_jdata_page(struct page *page,
93 struct writeback_control *wbc)
94 {
95 struct inode * const inode = page->mapping->host;
96 loff_t i_size = i_size_read(inode);
97 const pgoff_t end_index = i_size >> PAGE_SHIFT;
98 unsigned offset;
99
100 /*
101 * The page straddles i_size. It must be zeroed out on each and every
102 * writepage invocation because it may be mmapped. "A file is mapped
103 * in multiples of the page size. For a file that is not a multiple of
104 * the page size, the remaining memory is zeroed when mapped, and
105 * writes to that region are not written out to the file."
106 */
107 offset = i_size & (PAGE_SIZE - 1);
108 if (page->index == end_index && offset)
109 zero_user_segment(page, offset, PAGE_SIZE);
110
111 return __block_write_full_page(inode, page, gfs2_get_block_noalloc, wbc,
112 end_buffer_async_write);
113 }
114
115 /**
116 * __gfs2_jdata_writepage - The core of jdata writepage
117 * @page: The page to write
118 * @wbc: The writeback control
119 *
120 * This is shared between writepage and writepages and implements the
121 * core of the writepage operation. If a transaction is required then
122 * PageChecked will have been set and the transaction will have
123 * already been started before this is called.
124 */
125
__gfs2_jdata_writepage(struct page * page,struct writeback_control * wbc)126 static int __gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
127 {
128 struct inode *inode = page->mapping->host;
129 struct gfs2_inode *ip = GFS2_I(inode);
130
131 if (PageChecked(page)) {
132 ClearPageChecked(page);
133 if (!page_has_buffers(page)) {
134 create_empty_buffers(page, inode->i_sb->s_blocksize,
135 BIT(BH_Dirty)|BIT(BH_Uptodate));
136 }
137 gfs2_page_add_databufs(ip, page, 0, PAGE_SIZE);
138 }
139 return gfs2_write_jdata_page(page, wbc);
140 }
141
142 /**
143 * gfs2_jdata_writepage - Write complete page
144 * @page: Page to write
145 * @wbc: The writeback control
146 *
147 * Returns: errno
148 *
149 */
150
gfs2_jdata_writepage(struct page * page,struct writeback_control * wbc)151 static int gfs2_jdata_writepage(struct page *page, struct writeback_control *wbc)
152 {
153 struct inode *inode = page->mapping->host;
154 struct gfs2_inode *ip = GFS2_I(inode);
155 struct gfs2_sbd *sdp = GFS2_SB(inode);
156
157 if (gfs2_assert_withdraw(sdp, gfs2_glock_is_held_excl(ip->i_gl)))
158 goto out;
159 if (PageChecked(page) || current->journal_info)
160 goto out_ignore;
161 return __gfs2_jdata_writepage(page, wbc);
162
163 out_ignore:
164 redirty_page_for_writepage(wbc, page);
165 out:
166 unlock_page(page);
167 return 0;
168 }
169
170 /**
171 * gfs2_writepages - Write a bunch of dirty pages back to disk
172 * @mapping: The mapping to write
173 * @wbc: Write-back control
174 *
175 * Used for both ordered and writeback modes.
176 */
gfs2_writepages(struct address_space * mapping,struct writeback_control * wbc)177 static int gfs2_writepages(struct address_space *mapping,
178 struct writeback_control *wbc)
179 {
180 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
181 struct iomap_writepage_ctx wpc = { };
182 int ret;
183
184 /*
185 * Even if we didn't write enough pages here, we might still be holding
186 * dirty pages in the ail. We forcibly flush the ail because we don't
187 * want balance_dirty_pages() to loop indefinitely trying to write out
188 * pages held in the ail that it can't find.
189 */
190 ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
191 if (ret == 0 && wbc->nr_to_write > 0)
192 set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
193 return ret;
194 }
195
196 /**
197 * gfs2_write_jdata_pagevec - Write back a pagevec's worth of pages
198 * @mapping: The mapping
199 * @wbc: The writeback control
200 * @pvec: The vector of pages
201 * @nr_pages: The number of pages to write
202 * @done_index: Page index
203 *
204 * Returns: non-zero if loop should terminate, zero otherwise
205 */
206
gfs2_write_jdata_pagevec(struct address_space * mapping,struct writeback_control * wbc,struct pagevec * pvec,int nr_pages,pgoff_t * done_index)207 static int gfs2_write_jdata_pagevec(struct address_space *mapping,
208 struct writeback_control *wbc,
209 struct pagevec *pvec,
210 int nr_pages,
211 pgoff_t *done_index)
212 {
213 struct inode *inode = mapping->host;
214 struct gfs2_sbd *sdp = GFS2_SB(inode);
215 unsigned nrblocks = nr_pages * (PAGE_SIZE >> inode->i_blkbits);
216 int i;
217 int ret;
218
219 ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
220 if (ret < 0)
221 return ret;
222
223 for(i = 0; i < nr_pages; i++) {
224 struct page *page = pvec->pages[i];
225
226 *done_index = page->index;
227
228 lock_page(page);
229
230 if (unlikely(page->mapping != mapping)) {
231 continue_unlock:
232 unlock_page(page);
233 continue;
234 }
235
236 if (!PageDirty(page)) {
237 /* someone wrote it for us */
238 goto continue_unlock;
239 }
240
241 if (PageWriteback(page)) {
242 if (wbc->sync_mode != WB_SYNC_NONE)
243 wait_on_page_writeback(page);
244 else
245 goto continue_unlock;
246 }
247
248 BUG_ON(PageWriteback(page));
249 if (!clear_page_dirty_for_io(page))
250 goto continue_unlock;
251
252 trace_wbc_writepage(wbc, inode_to_bdi(inode));
253
254 ret = __gfs2_jdata_writepage(page, wbc);
255 if (unlikely(ret)) {
256 if (ret == AOP_WRITEPAGE_ACTIVATE) {
257 unlock_page(page);
258 ret = 0;
259 } else {
260
261 /*
262 * done_index is set past this page,
263 * so media errors will not choke
264 * background writeout for the entire
265 * file. This has consequences for
266 * range_cyclic semantics (ie. it may
267 * not be suitable for data integrity
268 * writeout).
269 */
270 *done_index = page->index + 1;
271 ret = 1;
272 break;
273 }
274 }
275
276 /*
277 * We stop writing back only if we are not doing
278 * integrity sync. In case of integrity sync we have to
279 * keep going until we have written all the pages
280 * we tagged for writeback prior to entering this loop.
281 */
282 if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
283 ret = 1;
284 break;
285 }
286
287 }
288 gfs2_trans_end(sdp);
289 return ret;
290 }
291
292 /**
293 * gfs2_write_cache_jdata - Like write_cache_pages but different
294 * @mapping: The mapping to write
295 * @wbc: The writeback control
296 *
297 * The reason that we use our own function here is that we need to
298 * start transactions before we grab page locks. This allows us
299 * to get the ordering right.
300 */
301
gfs2_write_cache_jdata(struct address_space * mapping,struct writeback_control * wbc)302 static int gfs2_write_cache_jdata(struct address_space *mapping,
303 struct writeback_control *wbc)
304 {
305 int ret = 0;
306 int done = 0;
307 struct pagevec pvec;
308 int nr_pages;
309 pgoff_t writeback_index;
310 pgoff_t index;
311 pgoff_t end;
312 pgoff_t done_index;
313 int cycled;
314 int range_whole = 0;
315 xa_mark_t tag;
316
317 pagevec_init(&pvec);
318 if (wbc->range_cyclic) {
319 writeback_index = mapping->writeback_index; /* prev offset */
320 index = writeback_index;
321 if (index == 0)
322 cycled = 1;
323 else
324 cycled = 0;
325 end = -1;
326 } else {
327 index = wbc->range_start >> PAGE_SHIFT;
328 end = wbc->range_end >> PAGE_SHIFT;
329 if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
330 range_whole = 1;
331 cycled = 1; /* ignore range_cyclic tests */
332 }
333 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
334 tag = PAGECACHE_TAG_TOWRITE;
335 else
336 tag = PAGECACHE_TAG_DIRTY;
337
338 retry:
339 if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
340 tag_pages_for_writeback(mapping, index, end);
341 done_index = index;
342 while (!done && (index <= end)) {
343 nr_pages = pagevec_lookup_range_tag(&pvec, mapping, &index, end,
344 tag);
345 if (nr_pages == 0)
346 break;
347
348 ret = gfs2_write_jdata_pagevec(mapping, wbc, &pvec, nr_pages, &done_index);
349 if (ret)
350 done = 1;
351 if (ret > 0)
352 ret = 0;
353 pagevec_release(&pvec);
354 cond_resched();
355 }
356
357 if (!cycled && !done) {
358 /*
359 * range_cyclic:
360 * We hit the last page and there is more work to be done: wrap
361 * back to the start of the file
362 */
363 cycled = 1;
364 index = 0;
365 end = writeback_index - 1;
366 goto retry;
367 }
368
369 if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
370 mapping->writeback_index = done_index;
371
372 return ret;
373 }
374
375
376 /**
377 * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
378 * @mapping: The mapping to write
379 * @wbc: The writeback control
380 *
381 */
382
gfs2_jdata_writepages(struct address_space * mapping,struct writeback_control * wbc)383 static int gfs2_jdata_writepages(struct address_space *mapping,
384 struct writeback_control *wbc)
385 {
386 struct gfs2_inode *ip = GFS2_I(mapping->host);
387 struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
388 int ret;
389
390 ret = gfs2_write_cache_jdata(mapping, wbc);
391 if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
392 gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
393 GFS2_LFC_JDATA_WPAGES);
394 ret = gfs2_write_cache_jdata(mapping, wbc);
395 }
396 return ret;
397 }
398
399 /**
400 * stuffed_readpage - Fill in a Linux page with stuffed file data
401 * @ip: the inode
402 * @page: the page
403 *
404 * Returns: errno
405 */
stuffed_readpage(struct gfs2_inode * ip,struct page * page)406 static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
407 {
408 struct buffer_head *dibh;
409 u64 dsize = i_size_read(&ip->i_inode);
410 void *kaddr;
411 int error;
412
413 /*
414 * Due to the order of unstuffing files and ->fault(), we can be
415 * asked for a zero page in the case of a stuffed file being extended,
416 * so we need to supply one here. It doesn't happen often.
417 */
418 if (unlikely(page->index)) {
419 zero_user(page, 0, PAGE_SIZE);
420 SetPageUptodate(page);
421 return 0;
422 }
423
424 error = gfs2_meta_inode_buffer(ip, &dibh);
425 if (error)
426 return error;
427
428 kaddr = kmap_atomic(page);
429 memcpy(kaddr, dibh->b_data + sizeof(struct gfs2_dinode), dsize);
430 memset(kaddr + dsize, 0, PAGE_SIZE - dsize);
431 kunmap_atomic(kaddr);
432 flush_dcache_page(page);
433 brelse(dibh);
434 SetPageUptodate(page);
435
436 return 0;
437 }
438
439 /**
440 * gfs2_read_folio - read a folio from a file
441 * @file: The file to read
442 * @folio: The folio in the file
443 */
gfs2_read_folio(struct file * file,struct folio * folio)444 static int gfs2_read_folio(struct file *file, struct folio *folio)
445 {
446 struct inode *inode = folio->mapping->host;
447 struct gfs2_inode *ip = GFS2_I(inode);
448 struct gfs2_sbd *sdp = GFS2_SB(inode);
449 int error;
450
451 if (!gfs2_is_jdata(ip) ||
452 (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
453 error = iomap_read_folio(folio, &gfs2_iomap_ops);
454 } else if (gfs2_is_stuffed(ip)) {
455 error = stuffed_readpage(ip, &folio->page);
456 folio_unlock(folio);
457 } else {
458 error = mpage_read_folio(folio, gfs2_block_map);
459 }
460
461 if (unlikely(gfs2_withdrawn(sdp)))
462 return -EIO;
463
464 return error;
465 }
466
467 /**
468 * gfs2_internal_read - read an internal file
469 * @ip: The gfs2 inode
470 * @buf: The buffer to fill
471 * @pos: The file position
472 * @size: The amount to read
473 *
474 */
475
gfs2_internal_read(struct gfs2_inode * ip,char * buf,loff_t * pos,unsigned size)476 int gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
477 unsigned size)
478 {
479 struct address_space *mapping = ip->i_inode.i_mapping;
480 unsigned long index = *pos >> PAGE_SHIFT;
481 unsigned offset = *pos & (PAGE_SIZE - 1);
482 unsigned copied = 0;
483 unsigned amt;
484 struct page *page;
485 void *p;
486
487 do {
488 amt = size - copied;
489 if (offset + size > PAGE_SIZE)
490 amt = PAGE_SIZE - offset;
491 page = read_cache_page(mapping, index, gfs2_read_folio, NULL);
492 if (IS_ERR(page))
493 return PTR_ERR(page);
494 p = kmap_atomic(page);
495 memcpy(buf + copied, p + offset, amt);
496 kunmap_atomic(p);
497 put_page(page);
498 copied += amt;
499 index++;
500 offset = 0;
501 } while(copied < size);
502 (*pos) += size;
503 return size;
504 }
505
506 /**
507 * gfs2_readahead - Read a bunch of pages at once
508 * @rac: Read-ahead control structure
509 *
510 * Some notes:
511 * 1. This is only for readahead, so we can simply ignore any things
512 * which are slightly inconvenient (such as locking conflicts between
513 * the page lock and the glock) and return having done no I/O. Its
514 * obviously not something we'd want to do on too regular a basis.
515 * Any I/O we ignore at this time will be done via readpage later.
516 * 2. We don't handle stuffed files here we let readpage do the honours.
517 * 3. mpage_readahead() does most of the heavy lifting in the common case.
518 * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
519 */
520
gfs2_readahead(struct readahead_control * rac)521 static void gfs2_readahead(struct readahead_control *rac)
522 {
523 struct inode *inode = rac->mapping->host;
524 struct gfs2_inode *ip = GFS2_I(inode);
525
526 if (gfs2_is_stuffed(ip))
527 ;
528 else if (gfs2_is_jdata(ip))
529 mpage_readahead(rac, gfs2_block_map);
530 else
531 iomap_readahead(rac, &gfs2_iomap_ops);
532 }
533
534 /**
535 * adjust_fs_space - Adjusts the free space available due to gfs2_grow
536 * @inode: the rindex inode
537 */
adjust_fs_space(struct inode * inode)538 void adjust_fs_space(struct inode *inode)
539 {
540 struct gfs2_sbd *sdp = GFS2_SB(inode);
541 struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
542 struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
543 struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
544 struct buffer_head *m_bh;
545 u64 fs_total, new_free;
546
547 if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
548 return;
549
550 /* Total up the file system space, according to the latest rindex. */
551 fs_total = gfs2_ri_total(sdp);
552 if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
553 goto out;
554
555 spin_lock(&sdp->sd_statfs_spin);
556 gfs2_statfs_change_in(m_sc, m_bh->b_data +
557 sizeof(struct gfs2_dinode));
558 if (fs_total > (m_sc->sc_total + l_sc->sc_total))
559 new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
560 else
561 new_free = 0;
562 spin_unlock(&sdp->sd_statfs_spin);
563 fs_warn(sdp, "File system extended by %llu blocks.\n",
564 (unsigned long long)new_free);
565 gfs2_statfs_change(sdp, new_free, new_free, 0);
566
567 update_statfs(sdp, m_bh);
568 brelse(m_bh);
569 out:
570 sdp->sd_rindex_uptodate = 0;
571 gfs2_trans_end(sdp);
572 }
573
jdata_dirty_folio(struct address_space * mapping,struct folio * folio)574 static bool jdata_dirty_folio(struct address_space *mapping,
575 struct folio *folio)
576 {
577 if (current->journal_info)
578 folio_set_checked(folio);
579 return block_dirty_folio(mapping, folio);
580 }
581
582 /**
583 * gfs2_bmap - Block map function
584 * @mapping: Address space info
585 * @lblock: The block to map
586 *
587 * Returns: The disk address for the block or 0 on hole or error
588 */
589
gfs2_bmap(struct address_space * mapping,sector_t lblock)590 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
591 {
592 struct gfs2_inode *ip = GFS2_I(mapping->host);
593 struct gfs2_holder i_gh;
594 sector_t dblock = 0;
595 int error;
596
597 error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
598 if (error)
599 return 0;
600
601 if (!gfs2_is_stuffed(ip))
602 dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
603
604 gfs2_glock_dq_uninit(&i_gh);
605
606 return dblock;
607 }
608
gfs2_discard(struct gfs2_sbd * sdp,struct buffer_head * bh)609 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
610 {
611 struct gfs2_bufdata *bd;
612
613 lock_buffer(bh);
614 gfs2_log_lock(sdp);
615 clear_buffer_dirty(bh);
616 bd = bh->b_private;
617 if (bd) {
618 if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
619 list_del_init(&bd->bd_list);
620 else {
621 spin_lock(&sdp->sd_ail_lock);
622 gfs2_remove_from_journal(bh, REMOVE_JDATA);
623 spin_unlock(&sdp->sd_ail_lock);
624 }
625 }
626 bh->b_bdev = NULL;
627 clear_buffer_mapped(bh);
628 clear_buffer_req(bh);
629 clear_buffer_new(bh);
630 gfs2_log_unlock(sdp);
631 unlock_buffer(bh);
632 }
633
gfs2_invalidate_folio(struct folio * folio,size_t offset,size_t length)634 static void gfs2_invalidate_folio(struct folio *folio, size_t offset,
635 size_t length)
636 {
637 struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host);
638 size_t stop = offset + length;
639 int partial_page = (offset || length < folio_size(folio));
640 struct buffer_head *bh, *head;
641 unsigned long pos = 0;
642
643 BUG_ON(!folio_test_locked(folio));
644 if (!partial_page)
645 folio_clear_checked(folio);
646 head = folio_buffers(folio);
647 if (!head)
648 goto out;
649
650 bh = head;
651 do {
652 if (pos + bh->b_size > stop)
653 return;
654
655 if (offset <= pos)
656 gfs2_discard(sdp, bh);
657 pos += bh->b_size;
658 bh = bh->b_this_page;
659 } while (bh != head);
660 out:
661 if (!partial_page)
662 filemap_release_folio(folio, 0);
663 }
664
665 /**
666 * gfs2_release_folio - free the metadata associated with a folio
667 * @folio: the folio that's being released
668 * @gfp_mask: passed from Linux VFS, ignored by us
669 *
670 * Calls try_to_free_buffers() to free the buffers and put the folio if the
671 * buffers can be released.
672 *
673 * Returns: true if the folio was put or else false
674 */
675
gfs2_release_folio(struct folio * folio,gfp_t gfp_mask)676 bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask)
677 {
678 struct address_space *mapping = folio->mapping;
679 struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
680 struct buffer_head *bh, *head;
681 struct gfs2_bufdata *bd;
682
683 head = folio_buffers(folio);
684 if (!head)
685 return false;
686
687 /*
688 * mm accommodates an old ext3 case where clean folios might
689 * not have had the dirty bit cleared. Thus, it can send actual
690 * dirty folios to ->release_folio() via shrink_active_list().
691 *
692 * As a workaround, we skip folios that contain dirty buffers
693 * below. Once ->release_folio isn't called on dirty folios
694 * anymore, we can warn on dirty buffers like we used to here
695 * again.
696 */
697
698 gfs2_log_lock(sdp);
699 bh = head;
700 do {
701 if (atomic_read(&bh->b_count))
702 goto cannot_release;
703 bd = bh->b_private;
704 if (bd && bd->bd_tr)
705 goto cannot_release;
706 if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
707 goto cannot_release;
708 bh = bh->b_this_page;
709 } while (bh != head);
710
711 bh = head;
712 do {
713 bd = bh->b_private;
714 if (bd) {
715 gfs2_assert_warn(sdp, bd->bd_bh == bh);
716 bd->bd_bh = NULL;
717 bh->b_private = NULL;
718 /*
719 * The bd may still be queued as a revoke, in which
720 * case we must not dequeue nor free it.
721 */
722 if (!bd->bd_blkno && !list_empty(&bd->bd_list))
723 list_del_init(&bd->bd_list);
724 if (list_empty(&bd->bd_list))
725 kmem_cache_free(gfs2_bufdata_cachep, bd);
726 }
727
728 bh = bh->b_this_page;
729 } while (bh != head);
730 gfs2_log_unlock(sdp);
731
732 return try_to_free_buffers(folio);
733
734 cannot_release:
735 gfs2_log_unlock(sdp);
736 return false;
737 }
738
739 static const struct address_space_operations gfs2_aops = {
740 .writepages = gfs2_writepages,
741 .read_folio = gfs2_read_folio,
742 .readahead = gfs2_readahead,
743 .dirty_folio = filemap_dirty_folio,
744 .release_folio = iomap_release_folio,
745 .invalidate_folio = iomap_invalidate_folio,
746 .bmap = gfs2_bmap,
747 .direct_IO = noop_direct_IO,
748 .migrate_folio = filemap_migrate_folio,
749 .is_partially_uptodate = iomap_is_partially_uptodate,
750 .error_remove_page = generic_error_remove_page,
751 };
752
753 static const struct address_space_operations gfs2_jdata_aops = {
754 .writepage = gfs2_jdata_writepage,
755 .writepages = gfs2_jdata_writepages,
756 .read_folio = gfs2_read_folio,
757 .readahead = gfs2_readahead,
758 .dirty_folio = jdata_dirty_folio,
759 .bmap = gfs2_bmap,
760 .invalidate_folio = gfs2_invalidate_folio,
761 .release_folio = gfs2_release_folio,
762 .is_partially_uptodate = block_is_partially_uptodate,
763 .error_remove_page = generic_error_remove_page,
764 };
765
gfs2_set_aops(struct inode * inode)766 void gfs2_set_aops(struct inode *inode)
767 {
768 if (gfs2_is_jdata(GFS2_I(inode)))
769 inode->i_mapping->a_ops = &gfs2_jdata_aops;
770 else
771 inode->i_mapping->a_ops = &gfs2_aops;
772 }
773