• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) Sistina Software, Inc.  1997-2003 All rights reserved.
4  * Copyright (C) 2004-2008 Red Hat, Inc.  All rights reserved.
5  */
6 
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/pagemap.h>
13 #include <linux/pagevec.h>
14 #include <linux/mpage.h>
15 #include <linux/fs.h>
16 #include <linux/writeback.h>
17 #include <linux/swap.h>
18 #include <linux/gfs2_ondisk.h>
19 #include <linux/backing-dev.h>
20 #include <linux/uio.h>
21 #include <trace/events/writeback.h>
22 #include <linux/sched/signal.h>
23 
24 #include "gfs2.h"
25 #include "incore.h"
26 #include "bmap.h"
27 #include "glock.h"
28 #include "inode.h"
29 #include "log.h"
30 #include "meta_io.h"
31 #include "quota.h"
32 #include "trans.h"
33 #include "rgrp.h"
34 #include "super.h"
35 #include "util.h"
36 #include "glops.h"
37 #include "aops.h"
38 
39 
40 /**
41  * gfs2_get_block_noalloc - Fills in a buffer head with details about a block
42  * @inode: The inode
43  * @lblock: The block number to look up
44  * @bh_result: The buffer head to return the result in
45  * @create: Non-zero if we may add block to the file
46  *
47  * Returns: errno
48  */
49 
gfs2_get_block_noalloc(struct inode * inode,sector_t lblock,struct buffer_head * bh_result,int create)50 static int gfs2_get_block_noalloc(struct inode *inode, sector_t lblock,
51 				  struct buffer_head *bh_result, int create)
52 {
53 	int error;
54 
55 	error = gfs2_block_map(inode, lblock, bh_result, 0);
56 	if (error)
57 		return error;
58 	if (!buffer_mapped(bh_result))
59 		return -ENODATA;
60 	return 0;
61 }
62 
63 /**
64  * gfs2_write_jdata_folio - gfs2 jdata-specific version of block_write_full_folio
65  * @folio: The folio to write
66  * @wbc: The writeback control
67  *
68  * This is the same as calling block_write_full_folio, but it also
69  * writes pages outside of i_size
70  */
gfs2_write_jdata_folio(struct folio * folio,struct writeback_control * wbc)71 static int gfs2_write_jdata_folio(struct folio *folio,
72 				 struct writeback_control *wbc)
73 {
74 	struct inode * const inode = folio->mapping->host;
75 	loff_t i_size = i_size_read(inode);
76 
77 	/*
78 	 * The folio straddles i_size.  It must be zeroed out on each and every
79 	 * writepage invocation because it may be mmapped.  "A file is mapped
80 	 * in multiples of the page size.  For a file that is not a multiple of
81 	 * the page size, the remaining memory is zeroed when mapped, and
82 	 * writes to that region are not written out to the file."
83 	 */
84 	if (folio_pos(folio) < i_size &&
85 	    i_size < folio_pos(folio) + folio_size(folio))
86 		folio_zero_segment(folio, offset_in_folio(folio, i_size),
87 				folio_size(folio));
88 
89 	return __block_write_full_folio(inode, folio, gfs2_get_block_noalloc,
90 			wbc);
91 }
92 
93 /**
94  * __gfs2_jdata_write_folio - The core of jdata writepage
95  * @folio: The folio to write
96  * @wbc: The writeback control
97  *
98  * Implements the core of write back. If a transaction is required then
99  * the checked flag will have been set and the transaction will have
100  * already been started before this is called.
101  */
__gfs2_jdata_write_folio(struct folio * folio,struct writeback_control * wbc)102 static int __gfs2_jdata_write_folio(struct folio *folio,
103 		struct writeback_control *wbc)
104 {
105 	struct inode *inode = folio->mapping->host;
106 	struct gfs2_inode *ip = GFS2_I(inode);
107 
108 	if (folio_test_checked(folio)) {
109 		folio_clear_checked(folio);
110 		if (!folio_buffers(folio)) {
111 			create_empty_buffers(folio,
112 					inode->i_sb->s_blocksize,
113 					BIT(BH_Dirty)|BIT(BH_Uptodate));
114 		}
115 		gfs2_trans_add_databufs(ip->i_gl, folio, 0, folio_size(folio));
116 	}
117 	return gfs2_write_jdata_folio(folio, wbc);
118 }
119 
120 /**
121  * gfs2_jdata_writeback - Write jdata folios to the log
122  * @mapping: The mapping to write
123  * @wbc: The writeback control
124  *
125  * Returns: errno
126  */
gfs2_jdata_writeback(struct address_space * mapping,struct writeback_control * wbc)127 int gfs2_jdata_writeback(struct address_space *mapping, struct writeback_control *wbc)
128 {
129 	struct inode *inode = mapping->host;
130 	struct gfs2_inode *ip = GFS2_I(inode);
131 	struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
132 	struct folio *folio = NULL;
133 	int error;
134 
135 	BUG_ON(current->journal_info);
136 	if (gfs2_assert_withdraw(sdp, ip->i_gl->gl_state == LM_ST_EXCLUSIVE))
137 		return 0;
138 
139 	while ((folio = writeback_iter(mapping, wbc, folio, &error))) {
140 		if (folio_test_checked(folio)) {
141 			folio_redirty_for_writepage(wbc, folio);
142 			folio_unlock(folio);
143 			continue;
144 		}
145 		error = __gfs2_jdata_write_folio(folio, wbc);
146 	}
147 
148 	return error;
149 }
150 
151 /**
152  * gfs2_writepages - Write a bunch of dirty pages back to disk
153  * @mapping: The mapping to write
154  * @wbc: Write-back control
155  *
156  * Used for both ordered and writeback modes.
157  */
gfs2_writepages(struct address_space * mapping,struct writeback_control * wbc)158 static int gfs2_writepages(struct address_space *mapping,
159 			   struct writeback_control *wbc)
160 {
161 	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
162 	struct iomap_writepage_ctx wpc = { };
163 	int ret;
164 
165 	/*
166 	 * Even if we didn't write enough pages here, we might still be holding
167 	 * dirty pages in the ail. We forcibly flush the ail because we don't
168 	 * want balance_dirty_pages() to loop indefinitely trying to write out
169 	 * pages held in the ail that it can't find.
170 	 */
171 	ret = iomap_writepages(mapping, wbc, &wpc, &gfs2_writeback_ops);
172 	if (ret == 0 && wbc->nr_to_write > 0)
173 		set_bit(SDF_FORCE_AIL_FLUSH, &sdp->sd_flags);
174 	return ret;
175 }
176 
177 /**
178  * gfs2_write_jdata_batch - Write back a folio batch's worth of folios
179  * @mapping: The mapping
180  * @wbc: The writeback control
181  * @fbatch: The batch of folios
182  * @done_index: Page index
183  *
184  * Returns: non-zero if loop should terminate, zero otherwise
185  */
186 
gfs2_write_jdata_batch(struct address_space * mapping,struct writeback_control * wbc,struct folio_batch * fbatch,pgoff_t * done_index)187 static int gfs2_write_jdata_batch(struct address_space *mapping,
188 				    struct writeback_control *wbc,
189 				    struct folio_batch *fbatch,
190 				    pgoff_t *done_index)
191 {
192 	struct inode *inode = mapping->host;
193 	struct gfs2_sbd *sdp = GFS2_SB(inode);
194 	unsigned nrblocks;
195 	int i;
196 	int ret;
197 	size_t size = 0;
198 	int nr_folios = folio_batch_count(fbatch);
199 
200 	for (i = 0; i < nr_folios; i++)
201 		size += folio_size(fbatch->folios[i]);
202 	nrblocks = size >> inode->i_blkbits;
203 
204 	ret = gfs2_trans_begin(sdp, nrblocks, nrblocks);
205 	if (ret < 0)
206 		return ret;
207 
208 	for (i = 0; i < nr_folios; i++) {
209 		struct folio *folio = fbatch->folios[i];
210 
211 		*done_index = folio->index;
212 
213 		folio_lock(folio);
214 
215 		if (unlikely(folio->mapping != mapping)) {
216 continue_unlock:
217 			folio_unlock(folio);
218 			continue;
219 		}
220 
221 		if (!folio_test_dirty(folio)) {
222 			/* someone wrote it for us */
223 			goto continue_unlock;
224 		}
225 
226 		if (folio_test_writeback(folio)) {
227 			if (wbc->sync_mode != WB_SYNC_NONE)
228 				folio_wait_writeback(folio);
229 			else
230 				goto continue_unlock;
231 		}
232 
233 		BUG_ON(folio_test_writeback(folio));
234 		if (!folio_clear_dirty_for_io(folio))
235 			goto continue_unlock;
236 
237 		trace_wbc_writepage(wbc, inode_to_bdi(inode));
238 
239 		ret = __gfs2_jdata_write_folio(folio, wbc);
240 		if (unlikely(ret)) {
241 			if (ret == AOP_WRITEPAGE_ACTIVATE) {
242 				folio_unlock(folio);
243 				ret = 0;
244 			} else {
245 
246 				/*
247 				 * done_index is set past this page,
248 				 * so media errors will not choke
249 				 * background writeout for the entire
250 				 * file. This has consequences for
251 				 * range_cyclic semantics (ie. it may
252 				 * not be suitable for data integrity
253 				 * writeout).
254 				 */
255 				*done_index = folio_next_index(folio);
256 				ret = 1;
257 				break;
258 			}
259 		}
260 
261 		/*
262 		 * We stop writing back only if we are not doing
263 		 * integrity sync. In case of integrity sync we have to
264 		 * keep going until we have written all the pages
265 		 * we tagged for writeback prior to entering this loop.
266 		 */
267 		if (--wbc->nr_to_write <= 0 && wbc->sync_mode == WB_SYNC_NONE) {
268 			ret = 1;
269 			break;
270 		}
271 
272 	}
273 	gfs2_trans_end(sdp);
274 	return ret;
275 }
276 
277 /**
278  * gfs2_write_cache_jdata - Like write_cache_pages but different
279  * @mapping: The mapping to write
280  * @wbc: The writeback control
281  *
282  * The reason that we use our own function here is that we need to
283  * start transactions before we grab page locks. This allows us
284  * to get the ordering right.
285  */
286 
gfs2_write_cache_jdata(struct address_space * mapping,struct writeback_control * wbc)287 static int gfs2_write_cache_jdata(struct address_space *mapping,
288 				  struct writeback_control *wbc)
289 {
290 	int ret = 0;
291 	int done = 0;
292 	struct folio_batch fbatch;
293 	int nr_folios;
294 	pgoff_t writeback_index;
295 	pgoff_t index;
296 	pgoff_t end;
297 	pgoff_t done_index;
298 	int cycled;
299 	int range_whole = 0;
300 	xa_mark_t tag;
301 
302 	folio_batch_init(&fbatch);
303 	if (wbc->range_cyclic) {
304 		writeback_index = mapping->writeback_index; /* prev offset */
305 		index = writeback_index;
306 		if (index == 0)
307 			cycled = 1;
308 		else
309 			cycled = 0;
310 		end = -1;
311 	} else {
312 		index = wbc->range_start >> PAGE_SHIFT;
313 		end = wbc->range_end >> PAGE_SHIFT;
314 		if (wbc->range_start == 0 && wbc->range_end == LLONG_MAX)
315 			range_whole = 1;
316 		cycled = 1; /* ignore range_cyclic tests */
317 	}
318 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
319 		tag = PAGECACHE_TAG_TOWRITE;
320 	else
321 		tag = PAGECACHE_TAG_DIRTY;
322 
323 retry:
324 	if (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages)
325 		tag_pages_for_writeback(mapping, index, end);
326 	done_index = index;
327 	while (!done && (index <= end)) {
328 		nr_folios = filemap_get_folios_tag(mapping, &index, end,
329 				tag, &fbatch);
330 		if (nr_folios == 0)
331 			break;
332 
333 		ret = gfs2_write_jdata_batch(mapping, wbc, &fbatch,
334 				&done_index);
335 		if (ret)
336 			done = 1;
337 		if (ret > 0)
338 			ret = 0;
339 		folio_batch_release(&fbatch);
340 		cond_resched();
341 	}
342 
343 	if (!cycled && !done) {
344 		/*
345 		 * range_cyclic:
346 		 * We hit the last page and there is more work to be done: wrap
347 		 * back to the start of the file
348 		 */
349 		cycled = 1;
350 		index = 0;
351 		end = writeback_index - 1;
352 		goto retry;
353 	}
354 
355 	if (wbc->range_cyclic || (range_whole && wbc->nr_to_write > 0))
356 		mapping->writeback_index = done_index;
357 
358 	return ret;
359 }
360 
361 
362 /**
363  * gfs2_jdata_writepages - Write a bunch of dirty pages back to disk
364  * @mapping: The mapping to write
365  * @wbc: The writeback control
366  *
367  */
368 
gfs2_jdata_writepages(struct address_space * mapping,struct writeback_control * wbc)369 static int gfs2_jdata_writepages(struct address_space *mapping,
370 				 struct writeback_control *wbc)
371 {
372 	struct gfs2_inode *ip = GFS2_I(mapping->host);
373 	struct gfs2_sbd *sdp = GFS2_SB(mapping->host);
374 	int ret;
375 
376 	ret = gfs2_write_cache_jdata(mapping, wbc);
377 	if (ret == 0 && wbc->sync_mode == WB_SYNC_ALL) {
378 		gfs2_log_flush(sdp, ip->i_gl, GFS2_LOG_HEAD_FLUSH_NORMAL |
379 			       GFS2_LFC_JDATA_WPAGES);
380 		ret = gfs2_write_cache_jdata(mapping, wbc);
381 	}
382 	return ret;
383 }
384 
385 /**
386  * stuffed_read_folio - Fill in a Linux folio with stuffed file data
387  * @ip: the inode
388  * @folio: the folio
389  *
390  * Returns: errno
391  */
stuffed_read_folio(struct gfs2_inode * ip,struct folio * folio)392 static int stuffed_read_folio(struct gfs2_inode *ip, struct folio *folio)
393 {
394 	struct buffer_head *dibh = NULL;
395 	size_t dsize = i_size_read(&ip->i_inode);
396 	void *from = NULL;
397 	int error = 0;
398 
399 	/*
400 	 * Due to the order of unstuffing files and ->fault(), we can be
401 	 * asked for a zero folio in the case of a stuffed file being extended,
402 	 * so we need to supply one here. It doesn't happen often.
403 	 */
404 	if (unlikely(folio->index)) {
405 		dsize = 0;
406 	} else {
407 		error = gfs2_meta_inode_buffer(ip, &dibh);
408 		if (error)
409 			goto out;
410 		from = dibh->b_data + sizeof(struct gfs2_dinode);
411 	}
412 
413 	folio_fill_tail(folio, 0, from, dsize);
414 	brelse(dibh);
415 out:
416 	folio_end_read(folio, error == 0);
417 
418 	return error;
419 }
420 
421 /**
422  * gfs2_read_folio - read a folio from a file
423  * @file: The file to read
424  * @folio: The folio in the file
425  */
gfs2_read_folio(struct file * file,struct folio * folio)426 static int gfs2_read_folio(struct file *file, struct folio *folio)
427 {
428 	struct inode *inode = folio->mapping->host;
429 	struct gfs2_inode *ip = GFS2_I(inode);
430 	struct gfs2_sbd *sdp = GFS2_SB(inode);
431 	int error;
432 
433 	if (!gfs2_is_jdata(ip) ||
434 	    (i_blocksize(inode) == PAGE_SIZE && !folio_buffers(folio))) {
435 		error = iomap_read_folio(folio, &gfs2_iomap_ops);
436 	} else if (gfs2_is_stuffed(ip)) {
437 		error = stuffed_read_folio(ip, folio);
438 	} else {
439 		error = mpage_read_folio(folio, gfs2_block_map);
440 	}
441 
442 	if (gfs2_withdrawing_or_withdrawn(sdp))
443 		return -EIO;
444 
445 	return error;
446 }
447 
448 /**
449  * gfs2_internal_read - read an internal file
450  * @ip: The gfs2 inode
451  * @buf: The buffer to fill
452  * @pos: The file position
453  * @size: The amount to read
454  *
455  */
456 
gfs2_internal_read(struct gfs2_inode * ip,char * buf,loff_t * pos,size_t size)457 ssize_t gfs2_internal_read(struct gfs2_inode *ip, char *buf, loff_t *pos,
458 			   size_t size)
459 {
460 	struct address_space *mapping = ip->i_inode.i_mapping;
461 	unsigned long index = *pos >> PAGE_SHIFT;
462 	size_t copied = 0;
463 
464 	do {
465 		size_t offset, chunk;
466 		struct folio *folio;
467 
468 		folio = read_cache_folio(mapping, index, gfs2_read_folio, NULL);
469 		if (IS_ERR(folio)) {
470 			if (PTR_ERR(folio) == -EINTR)
471 				continue;
472 			return PTR_ERR(folio);
473 		}
474 		offset = *pos + copied - folio_pos(folio);
475 		chunk = min(size - copied, folio_size(folio) - offset);
476 		memcpy_from_folio(buf + copied, folio, offset, chunk);
477 		index = folio_next_index(folio);
478 		folio_put(folio);
479 		copied += chunk;
480 	} while(copied < size);
481 	(*pos) += size;
482 	return size;
483 }
484 
485 /**
486  * gfs2_readahead - Read a bunch of pages at once
487  * @rac: Read-ahead control structure
488  *
489  * Some notes:
490  * 1. This is only for readahead, so we can simply ignore any things
491  *    which are slightly inconvenient (such as locking conflicts between
492  *    the page lock and the glock) and return having done no I/O. Its
493  *    obviously not something we'd want to do on too regular a basis.
494  *    Any I/O we ignore at this time will be done via readpage later.
495  * 2. We don't handle stuffed files here we let readpage do the honours.
496  * 3. mpage_readahead() does most of the heavy lifting in the common case.
497  * 4. gfs2_block_map() is relied upon to set BH_Boundary in the right places.
498  */
499 
gfs2_readahead(struct readahead_control * rac)500 static void gfs2_readahead(struct readahead_control *rac)
501 {
502 	struct inode *inode = rac->mapping->host;
503 	struct gfs2_inode *ip = GFS2_I(inode);
504 
505 	if (gfs2_is_stuffed(ip))
506 		;
507 	else if (gfs2_is_jdata(ip))
508 		mpage_readahead(rac, gfs2_block_map);
509 	else
510 		iomap_readahead(rac, &gfs2_iomap_ops);
511 }
512 
513 /**
514  * adjust_fs_space - Adjusts the free space available due to gfs2_grow
515  * @inode: the rindex inode
516  */
adjust_fs_space(struct inode * inode)517 void adjust_fs_space(struct inode *inode)
518 {
519 	struct gfs2_sbd *sdp = GFS2_SB(inode);
520 	struct gfs2_inode *m_ip = GFS2_I(sdp->sd_statfs_inode);
521 	struct gfs2_statfs_change_host *m_sc = &sdp->sd_statfs_master;
522 	struct gfs2_statfs_change_host *l_sc = &sdp->sd_statfs_local;
523 	struct buffer_head *m_bh;
524 	u64 fs_total, new_free;
525 
526 	if (gfs2_trans_begin(sdp, 2 * RES_STATFS, 0) != 0)
527 		return;
528 
529 	/* Total up the file system space, according to the latest rindex. */
530 	fs_total = gfs2_ri_total(sdp);
531 	if (gfs2_meta_inode_buffer(m_ip, &m_bh) != 0)
532 		goto out;
533 
534 	spin_lock(&sdp->sd_statfs_spin);
535 	gfs2_statfs_change_in(m_sc, m_bh->b_data +
536 			      sizeof(struct gfs2_dinode));
537 	if (fs_total > (m_sc->sc_total + l_sc->sc_total))
538 		new_free = fs_total - (m_sc->sc_total + l_sc->sc_total);
539 	else
540 		new_free = 0;
541 	spin_unlock(&sdp->sd_statfs_spin);
542 	fs_warn(sdp, "File system extended by %llu blocks.\n",
543 		(unsigned long long)new_free);
544 	gfs2_statfs_change(sdp, new_free, new_free, 0);
545 
546 	update_statfs(sdp, m_bh);
547 	brelse(m_bh);
548 out:
549 	sdp->sd_rindex_uptodate = 0;
550 	gfs2_trans_end(sdp);
551 }
552 
jdata_dirty_folio(struct address_space * mapping,struct folio * folio)553 static bool jdata_dirty_folio(struct address_space *mapping,
554 		struct folio *folio)
555 {
556 	if (current->journal_info)
557 		folio_set_checked(folio);
558 	return block_dirty_folio(mapping, folio);
559 }
560 
561 /**
562  * gfs2_bmap - Block map function
563  * @mapping: Address space info
564  * @lblock: The block to map
565  *
566  * Returns: The disk address for the block or 0 on hole or error
567  */
568 
gfs2_bmap(struct address_space * mapping,sector_t lblock)569 static sector_t gfs2_bmap(struct address_space *mapping, sector_t lblock)
570 {
571 	struct gfs2_inode *ip = GFS2_I(mapping->host);
572 	struct gfs2_holder i_gh;
573 	sector_t dblock = 0;
574 	int error;
575 
576 	error = gfs2_glock_nq_init(ip->i_gl, LM_ST_SHARED, LM_FLAG_ANY, &i_gh);
577 	if (error)
578 		return 0;
579 
580 	if (!gfs2_is_stuffed(ip))
581 		dblock = iomap_bmap(mapping, lblock, &gfs2_iomap_ops);
582 
583 	gfs2_glock_dq_uninit(&i_gh);
584 
585 	return dblock;
586 }
587 
gfs2_discard(struct gfs2_sbd * sdp,struct buffer_head * bh)588 static void gfs2_discard(struct gfs2_sbd *sdp, struct buffer_head *bh)
589 {
590 	struct gfs2_bufdata *bd;
591 
592 	lock_buffer(bh);
593 	gfs2_log_lock(sdp);
594 	clear_buffer_dirty(bh);
595 	bd = bh->b_private;
596 	if (bd) {
597 		if (!list_empty(&bd->bd_list) && !buffer_pinned(bh))
598 			list_del_init(&bd->bd_list);
599 		else {
600 			spin_lock(&sdp->sd_ail_lock);
601 			gfs2_remove_from_journal(bh, REMOVE_JDATA);
602 			spin_unlock(&sdp->sd_ail_lock);
603 		}
604 	}
605 	bh->b_bdev = NULL;
606 	clear_buffer_mapped(bh);
607 	clear_buffer_req(bh);
608 	clear_buffer_new(bh);
609 	gfs2_log_unlock(sdp);
610 	unlock_buffer(bh);
611 }
612 
gfs2_invalidate_folio(struct folio * folio,size_t offset,size_t length)613 static void gfs2_invalidate_folio(struct folio *folio, size_t offset,
614 				size_t length)
615 {
616 	struct gfs2_sbd *sdp = GFS2_SB(folio->mapping->host);
617 	size_t stop = offset + length;
618 	int partial_page = (offset || length < folio_size(folio));
619 	struct buffer_head *bh, *head;
620 	unsigned long pos = 0;
621 
622 	BUG_ON(!folio_test_locked(folio));
623 	if (!partial_page)
624 		folio_clear_checked(folio);
625 	head = folio_buffers(folio);
626 	if (!head)
627 		goto out;
628 
629 	bh = head;
630 	do {
631 		if (pos + bh->b_size > stop)
632 			return;
633 
634 		if (offset <= pos)
635 			gfs2_discard(sdp, bh);
636 		pos += bh->b_size;
637 		bh = bh->b_this_page;
638 	} while (bh != head);
639 out:
640 	if (!partial_page)
641 		filemap_release_folio(folio, 0);
642 }
643 
644 /**
645  * gfs2_release_folio - free the metadata associated with a folio
646  * @folio: the folio that's being released
647  * @gfp_mask: passed from Linux VFS, ignored by us
648  *
649  * Calls try_to_free_buffers() to free the buffers and put the folio if the
650  * buffers can be released.
651  *
652  * Returns: true if the folio was put or else false
653  */
654 
gfs2_release_folio(struct folio * folio,gfp_t gfp_mask)655 bool gfs2_release_folio(struct folio *folio, gfp_t gfp_mask)
656 {
657 	struct address_space *mapping = folio->mapping;
658 	struct gfs2_sbd *sdp = gfs2_mapping2sbd(mapping);
659 	struct buffer_head *bh, *head;
660 	struct gfs2_bufdata *bd;
661 
662 	head = folio_buffers(folio);
663 	if (!head)
664 		return false;
665 
666 	/*
667 	 * mm accommodates an old ext3 case where clean folios might
668 	 * not have had the dirty bit cleared.	Thus, it can send actual
669 	 * dirty folios to ->release_folio() via shrink_active_list().
670 	 *
671 	 * As a workaround, we skip folios that contain dirty buffers
672 	 * below.  Once ->release_folio isn't called on dirty folios
673 	 * anymore, we can warn on dirty buffers like we used to here
674 	 * again.
675 	 */
676 
677 	gfs2_log_lock(sdp);
678 	bh = head;
679 	do {
680 		if (atomic_read(&bh->b_count))
681 			goto cannot_release;
682 		bd = bh->b_private;
683 		if (bd && bd->bd_tr)
684 			goto cannot_release;
685 		if (buffer_dirty(bh) || WARN_ON(buffer_pinned(bh)))
686 			goto cannot_release;
687 		bh = bh->b_this_page;
688 	} while (bh != head);
689 
690 	bh = head;
691 	do {
692 		bd = bh->b_private;
693 		if (bd) {
694 			gfs2_assert_warn(sdp, bd->bd_bh == bh);
695 			bd->bd_bh = NULL;
696 			bh->b_private = NULL;
697 			/*
698 			 * The bd may still be queued as a revoke, in which
699 			 * case we must not dequeue nor free it.
700 			 */
701 			if (!bd->bd_blkno && !list_empty(&bd->bd_list))
702 				list_del_init(&bd->bd_list);
703 			if (list_empty(&bd->bd_list))
704 				kmem_cache_free(gfs2_bufdata_cachep, bd);
705 		}
706 
707 		bh = bh->b_this_page;
708 	} while (bh != head);
709 	gfs2_log_unlock(sdp);
710 
711 	return try_to_free_buffers(folio);
712 
713 cannot_release:
714 	gfs2_log_unlock(sdp);
715 	return false;
716 }
717 
718 static const struct address_space_operations gfs2_aops = {
719 	.writepages = gfs2_writepages,
720 	.read_folio = gfs2_read_folio,
721 	.readahead = gfs2_readahead,
722 	.dirty_folio = iomap_dirty_folio,
723 	.release_folio = iomap_release_folio,
724 	.invalidate_folio = iomap_invalidate_folio,
725 	.bmap = gfs2_bmap,
726 	.migrate_folio = filemap_migrate_folio,
727 	.is_partially_uptodate = iomap_is_partially_uptodate,
728 	.error_remove_folio = generic_error_remove_folio,
729 };
730 
731 static const struct address_space_operations gfs2_jdata_aops = {
732 	.writepages = gfs2_jdata_writepages,
733 	.read_folio = gfs2_read_folio,
734 	.readahead = gfs2_readahead,
735 	.dirty_folio = jdata_dirty_folio,
736 	.bmap = gfs2_bmap,
737 	.migrate_folio = buffer_migrate_folio,
738 	.invalidate_folio = gfs2_invalidate_folio,
739 	.release_folio = gfs2_release_folio,
740 	.is_partially_uptodate = block_is_partially_uptodate,
741 	.error_remove_folio = generic_error_remove_folio,
742 };
743 
gfs2_set_aops(struct inode * inode)744 void gfs2_set_aops(struct inode *inode)
745 {
746 	if (gfs2_is_jdata(GFS2_I(inode)))
747 		inode->i_mapping->a_ops = &gfs2_jdata_aops;
748 	else
749 		inode->i_mapping->a_ops = &gfs2_aops;
750 }
751