1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
4 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
5 */
6
7 #include <linux/sched.h>
8 #include <linux/slab.h>
9 #include <linux/spinlock.h>
10 #include <linux/completion.h>
11 #include <linux/buffer_head.h>
12 #include <linux/mempool.h>
13 #include <linux/gfs2_ondisk.h>
14 #include <linux/bio.h>
15 #include <linux/fs.h>
16 #include <linux/list_sort.h>
17 #include <linux/blkdev.h>
18
19 #include "bmap.h"
20 #include "dir.h"
21 #include "gfs2.h"
22 #include "incore.h"
23 #include "inode.h"
24 #include "glock.h"
25 #include "log.h"
26 #include "lops.h"
27 #include "meta_io.h"
28 #include "recovery.h"
29 #include "rgrp.h"
30 #include "trans.h"
31 #include "util.h"
32 #include "trace_gfs2.h"
33
34 /**
35 * gfs2_pin - Pin a buffer in memory
36 * @sdp: The superblock
37 * @bh: The buffer to be pinned
38 *
39 * The log lock must be held when calling this function
40 */
gfs2_pin(struct gfs2_sbd * sdp,struct buffer_head * bh)41 void gfs2_pin(struct gfs2_sbd *sdp, struct buffer_head *bh)
42 {
43 struct gfs2_bufdata *bd;
44
45 BUG_ON(!current->journal_info);
46
47 clear_buffer_dirty(bh);
48 if (test_set_buffer_pinned(bh))
49 gfs2_assert_withdraw(sdp, 0);
50 if (!buffer_uptodate(bh))
51 gfs2_io_error_bh_wd(sdp, bh);
52 bd = bh->b_private;
53 /* If this buffer is in the AIL and it has already been written
54 * to in-place disk block, remove it from the AIL.
55 */
56 spin_lock(&sdp->sd_ail_lock);
57 if (bd->bd_tr)
58 list_move(&bd->bd_ail_st_list, &bd->bd_tr->tr_ail2_list);
59 spin_unlock(&sdp->sd_ail_lock);
60 get_bh(bh);
61 atomic_inc(&sdp->sd_log_pinned);
62 trace_gfs2_pin(bd, 1);
63 }
64
buffer_is_rgrp(const struct gfs2_bufdata * bd)65 static bool buffer_is_rgrp(const struct gfs2_bufdata *bd)
66 {
67 return bd->bd_gl->gl_name.ln_type == LM_TYPE_RGRP;
68 }
69
maybe_release_space(struct gfs2_bufdata * bd)70 static void maybe_release_space(struct gfs2_bufdata *bd)
71 {
72 struct gfs2_glock *gl = bd->bd_gl;
73 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
74 struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
75 unsigned int index = bd->bd_bh->b_blocknr - gl->gl_name.ln_number;
76 struct gfs2_bitmap *bi = rgd->rd_bits + index;
77
78 if (bi->bi_clone == NULL)
79 return;
80 if (sdp->sd_args.ar_discard)
81 gfs2_rgrp_send_discards(sdp, rgd->rd_data0, bd->bd_bh, bi, 1, NULL);
82 memcpy(bi->bi_clone + bi->bi_offset,
83 bd->bd_bh->b_data + bi->bi_offset, bi->bi_bytes);
84 clear_bit(GBF_FULL, &bi->bi_flags);
85 rgd->rd_free_clone = rgd->rd_free;
86 rgd->rd_extfail_pt = rgd->rd_free;
87 }
88
89 /**
90 * gfs2_unpin - Unpin a buffer
91 * @sdp: the filesystem the buffer belongs to
92 * @bh: The buffer to unpin
93 * @ai:
94 * @flags: The inode dirty flags
95 *
96 */
97
gfs2_unpin(struct gfs2_sbd * sdp,struct buffer_head * bh,struct gfs2_trans * tr)98 static void gfs2_unpin(struct gfs2_sbd *sdp, struct buffer_head *bh,
99 struct gfs2_trans *tr)
100 {
101 struct gfs2_bufdata *bd = bh->b_private;
102
103 BUG_ON(!buffer_uptodate(bh));
104 BUG_ON(!buffer_pinned(bh));
105
106 lock_buffer(bh);
107 mark_buffer_dirty(bh);
108 clear_buffer_pinned(bh);
109
110 if (buffer_is_rgrp(bd))
111 maybe_release_space(bd);
112
113 spin_lock(&sdp->sd_ail_lock);
114 if (bd->bd_tr) {
115 list_del(&bd->bd_ail_st_list);
116 brelse(bh);
117 } else {
118 struct gfs2_glock *gl = bd->bd_gl;
119 list_add(&bd->bd_ail_gl_list, &gl->gl_ail_list);
120 atomic_inc(&gl->gl_ail_count);
121 }
122 bd->bd_tr = tr;
123 list_add(&bd->bd_ail_st_list, &tr->tr_ail1_list);
124 spin_unlock(&sdp->sd_ail_lock);
125
126 clear_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags);
127 trace_gfs2_pin(bd, 0);
128 unlock_buffer(bh);
129 atomic_dec(&sdp->sd_log_pinned);
130 }
131
gfs2_log_incr_head(struct gfs2_sbd * sdp)132 static void gfs2_log_incr_head(struct gfs2_sbd *sdp)
133 {
134 BUG_ON((sdp->sd_log_flush_head == sdp->sd_log_tail) &&
135 (sdp->sd_log_flush_head != sdp->sd_log_head));
136
137 if (++sdp->sd_log_flush_head == sdp->sd_jdesc->jd_blocks)
138 sdp->sd_log_flush_head = 0;
139 }
140
gfs2_log_bmap(struct gfs2_sbd * sdp)141 u64 gfs2_log_bmap(struct gfs2_sbd *sdp)
142 {
143 unsigned int lbn = sdp->sd_log_flush_head;
144 struct gfs2_journal_extent *je;
145 u64 block;
146
147 list_for_each_entry(je, &sdp->sd_jdesc->extent_list, list) {
148 if ((lbn >= je->lblock) && (lbn < (je->lblock + je->blocks))) {
149 block = je->dblock + lbn - je->lblock;
150 gfs2_log_incr_head(sdp);
151 return block;
152 }
153 }
154
155 return -1;
156 }
157
158 /**
159 * gfs2_end_log_write_bh - end log write of pagecache data with buffers
160 * @sdp: The superblock
161 * @bvec: The bio_vec
162 * @error: The i/o status
163 *
164 * This finds the relevant buffers and unlocks them and sets the
165 * error flag according to the status of the i/o request. This is
166 * used when the log is writing data which has an in-place version
167 * that is pinned in the pagecache.
168 */
169
gfs2_end_log_write_bh(struct gfs2_sbd * sdp,struct bio_vec * bvec,blk_status_t error)170 static void gfs2_end_log_write_bh(struct gfs2_sbd *sdp,
171 struct bio_vec *bvec,
172 blk_status_t error)
173 {
174 struct buffer_head *bh, *next;
175 struct page *page = bvec->bv_page;
176 unsigned size;
177
178 bh = page_buffers(page);
179 size = bvec->bv_len;
180 while (bh_offset(bh) < bvec->bv_offset)
181 bh = bh->b_this_page;
182 do {
183 if (error)
184 mark_buffer_write_io_error(bh);
185 unlock_buffer(bh);
186 next = bh->b_this_page;
187 size -= bh->b_size;
188 brelse(bh);
189 bh = next;
190 } while(bh && size);
191 }
192
193 /**
194 * gfs2_end_log_write - end of i/o to the log
195 * @bio: The bio
196 *
197 * Each bio_vec contains either data from the pagecache or data
198 * relating to the log itself. Here we iterate over the bio_vec
199 * array, processing both kinds of data.
200 *
201 */
202
gfs2_end_log_write(struct bio * bio)203 static void gfs2_end_log_write(struct bio *bio)
204 {
205 struct gfs2_sbd *sdp = bio->bi_private;
206 struct bio_vec *bvec;
207 struct page *page;
208 struct bvec_iter_all iter_all;
209
210 if (bio->bi_status) {
211 fs_err(sdp, "Error %d writing to journal, jid=%u\n",
212 bio->bi_status, sdp->sd_jdesc->jd_jid);
213 wake_up(&sdp->sd_logd_waitq);
214 }
215
216 bio_for_each_segment_all(bvec, bio, iter_all) {
217 page = bvec->bv_page;
218 if (page_has_buffers(page))
219 gfs2_end_log_write_bh(sdp, bvec, bio->bi_status);
220 else
221 mempool_free(page, gfs2_page_pool);
222 }
223
224 bio_put(bio);
225 if (atomic_dec_and_test(&sdp->sd_log_in_flight))
226 wake_up(&sdp->sd_log_flush_wait);
227 }
228
229 /**
230 * gfs2_log_submit_bio - Submit any pending log bio
231 * @biop: Address of the bio pointer
232 * @opf: REQ_OP | op_flags
233 *
234 * Submit any pending part-built or full bio to the block device. If
235 * there is no pending bio, then this is a no-op.
236 */
237
gfs2_log_submit_bio(struct bio ** biop,int opf)238 void gfs2_log_submit_bio(struct bio **biop, int opf)
239 {
240 struct bio *bio = *biop;
241 if (bio) {
242 struct gfs2_sbd *sdp = bio->bi_private;
243 atomic_inc(&sdp->sd_log_in_flight);
244 bio->bi_opf = opf;
245 submit_bio(bio);
246 *biop = NULL;
247 }
248 }
249
250 /**
251 * gfs2_log_alloc_bio - Allocate a bio
252 * @sdp: The super block
253 * @blkno: The device block number we want to write to
254 * @end_io: The bi_end_io callback
255 *
256 * Allocate a new bio, initialize it with the given parameters and return it.
257 *
258 * Returns: The newly allocated bio
259 */
260
gfs2_log_alloc_bio(struct gfs2_sbd * sdp,u64 blkno,bio_end_io_t * end_io)261 static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno,
262 bio_end_io_t *end_io)
263 {
264 struct super_block *sb = sdp->sd_vfs;
265 struct bio *bio = bio_alloc(GFP_NOIO, BIO_MAX_PAGES);
266
267 bio->bi_iter.bi_sector = blkno << sdp->sd_fsb2bb_shift;
268 bio_set_dev(bio, sb->s_bdev);
269 bio->bi_end_io = end_io;
270 bio->bi_private = sdp;
271
272 return bio;
273 }
274
275 /**
276 * gfs2_log_get_bio - Get cached log bio, or allocate a new one
277 * @sdp: The super block
278 * @blkno: The device block number we want to write to
279 * @bio: The bio to get or allocate
280 * @op: REQ_OP
281 * @end_io: The bi_end_io callback
282 * @flush: Always flush the current bio and allocate a new one?
283 *
284 * If there is a cached bio, then if the next block number is sequential
285 * with the previous one, return it, otherwise flush the bio to the
286 * device. If there is no cached bio, or we just flushed it, then
287 * allocate a new one.
288 *
289 * Returns: The bio to use for log writes
290 */
291
gfs2_log_get_bio(struct gfs2_sbd * sdp,u64 blkno,struct bio ** biop,int op,bio_end_io_t * end_io,bool flush)292 static struct bio *gfs2_log_get_bio(struct gfs2_sbd *sdp, u64 blkno,
293 struct bio **biop, int op,
294 bio_end_io_t *end_io, bool flush)
295 {
296 struct bio *bio = *biop;
297
298 if (bio) {
299 u64 nblk;
300
301 nblk = bio_end_sector(bio);
302 nblk >>= sdp->sd_fsb2bb_shift;
303 if (blkno == nblk && !flush)
304 return bio;
305 gfs2_log_submit_bio(biop, op);
306 }
307
308 *biop = gfs2_log_alloc_bio(sdp, blkno, end_io);
309 return *biop;
310 }
311
312 /**
313 * gfs2_log_write - write to log
314 * @sdp: the filesystem
315 * @page: the page to write
316 * @size: the size of the data to write
317 * @offset: the offset within the page
318 * @blkno: block number of the log entry
319 *
320 * Try and add the page segment to the current bio. If that fails,
321 * submit the current bio to the device and create a new one, and
322 * then add the page segment to that.
323 */
324
gfs2_log_write(struct gfs2_sbd * sdp,struct page * page,unsigned size,unsigned offset,u64 blkno)325 void gfs2_log_write(struct gfs2_sbd *sdp, struct page *page,
326 unsigned size, unsigned offset, u64 blkno)
327 {
328 struct bio *bio;
329 int ret;
330
331 bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio, REQ_OP_WRITE,
332 gfs2_end_log_write, false);
333 ret = bio_add_page(bio, page, size, offset);
334 if (ret == 0) {
335 bio = gfs2_log_get_bio(sdp, blkno, &sdp->sd_log_bio,
336 REQ_OP_WRITE, gfs2_end_log_write, true);
337 ret = bio_add_page(bio, page, size, offset);
338 WARN_ON(ret == 0);
339 }
340 }
341
342 /**
343 * gfs2_log_write_bh - write a buffer's content to the log
344 * @sdp: The super block
345 * @bh: The buffer pointing to the in-place location
346 *
347 * This writes the content of the buffer to the next available location
348 * in the log. The buffer will be unlocked once the i/o to the log has
349 * completed.
350 */
351
gfs2_log_write_bh(struct gfs2_sbd * sdp,struct buffer_head * bh)352 static void gfs2_log_write_bh(struct gfs2_sbd *sdp, struct buffer_head *bh)
353 {
354 gfs2_log_write(sdp, bh->b_page, bh->b_size, bh_offset(bh),
355 gfs2_log_bmap(sdp));
356 }
357
358 /**
359 * gfs2_log_write_page - write one block stored in a page, into the log
360 * @sdp: The superblock
361 * @page: The struct page
362 *
363 * This writes the first block-sized part of the page into the log. Note
364 * that the page must have been allocated from the gfs2_page_pool mempool
365 * and that after this has been called, ownership has been transferred and
366 * the page may be freed at any time.
367 */
368
gfs2_log_write_page(struct gfs2_sbd * sdp,struct page * page)369 void gfs2_log_write_page(struct gfs2_sbd *sdp, struct page *page)
370 {
371 struct super_block *sb = sdp->sd_vfs;
372 gfs2_log_write(sdp, page, sb->s_blocksize, 0,
373 gfs2_log_bmap(sdp));
374 }
375
376 /**
377 * gfs2_end_log_read - end I/O callback for reads from the log
378 * @bio: The bio
379 *
380 * Simply unlock the pages in the bio. The main thread will wait on them and
381 * process them in order as necessary.
382 */
383
gfs2_end_log_read(struct bio * bio)384 static void gfs2_end_log_read(struct bio *bio)
385 {
386 struct page *page;
387 struct bio_vec *bvec;
388 struct bvec_iter_all iter_all;
389
390 bio_for_each_segment_all(bvec, bio, iter_all) {
391 page = bvec->bv_page;
392 if (bio->bi_status) {
393 int err = blk_status_to_errno(bio->bi_status);
394
395 SetPageError(page);
396 mapping_set_error(page->mapping, err);
397 }
398 unlock_page(page);
399 }
400
401 bio_put(bio);
402 }
403
404 /**
405 * gfs2_jhead_pg_srch - Look for the journal head in a given page.
406 * @jd: The journal descriptor
407 * @page: The page to look in
408 *
409 * Returns: 1 if found, 0 otherwise.
410 */
411
gfs2_jhead_pg_srch(struct gfs2_jdesc * jd,struct gfs2_log_header_host * head,struct page * page)412 static bool gfs2_jhead_pg_srch(struct gfs2_jdesc *jd,
413 struct gfs2_log_header_host *head,
414 struct page *page)
415 {
416 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
417 struct gfs2_log_header_host lh;
418 void *kaddr = kmap_atomic(page);
419 unsigned int offset;
420 bool ret = false;
421
422 for (offset = 0; offset < PAGE_SIZE; offset += sdp->sd_sb.sb_bsize) {
423 if (!__get_log_header(sdp, kaddr + offset, 0, &lh)) {
424 if (lh.lh_sequence >= head->lh_sequence)
425 *head = lh;
426 else {
427 ret = true;
428 break;
429 }
430 }
431 }
432 kunmap_atomic(kaddr);
433 return ret;
434 }
435
436 /**
437 * gfs2_jhead_process_page - Search/cleanup a page
438 * @jd: The journal descriptor
439 * @index: Index of the page to look into
440 * @done: If set, perform only cleanup, else search and set if found.
441 *
442 * Find the page with 'index' in the journal's mapping. Search the page for
443 * the journal head if requested (cleanup == false). Release refs on the
444 * page so the page cache can reclaim it (put_page() twice). We grabbed a
445 * reference on this page two times, first when we did a find_or_create_page()
446 * to obtain the page to add it to the bio and second when we do a
447 * find_get_page() here to get the page to wait on while I/O on it is being
448 * completed.
449 * This function is also used to free up a page we might've grabbed but not
450 * used. Maybe we added it to a bio, but not submitted it for I/O. Or we
451 * submitted the I/O, but we already found the jhead so we only need to drop
452 * our references to the page.
453 */
454
gfs2_jhead_process_page(struct gfs2_jdesc * jd,unsigned long index,struct gfs2_log_header_host * head,bool * done)455 static void gfs2_jhead_process_page(struct gfs2_jdesc *jd, unsigned long index,
456 struct gfs2_log_header_host *head,
457 bool *done)
458 {
459 struct page *page;
460
461 page = find_get_page(jd->jd_inode->i_mapping, index);
462 wait_on_page_locked(page);
463
464 if (PageError(page))
465 *done = true;
466
467 if (!*done)
468 *done = gfs2_jhead_pg_srch(jd, head, page);
469
470 put_page(page); /* Once for find_get_page */
471 put_page(page); /* Once more for find_or_create_page */
472 }
473
gfs2_chain_bio(struct bio * prev,unsigned int nr_iovecs)474 static struct bio *gfs2_chain_bio(struct bio *prev, unsigned int nr_iovecs)
475 {
476 struct bio *new;
477
478 new = bio_alloc(GFP_NOIO, nr_iovecs);
479 bio_copy_dev(new, prev);
480 new->bi_iter.bi_sector = bio_end_sector(prev);
481 new->bi_opf = prev->bi_opf;
482 new->bi_write_hint = prev->bi_write_hint;
483 bio_chain(new, prev);
484 submit_bio(prev);
485 return new;
486 }
487
488 /**
489 * gfs2_find_jhead - find the head of a log
490 * @jd: The journal descriptor
491 * @head: The log descriptor for the head of the log is returned here
492 *
493 * Do a search of a journal by reading it in large chunks using bios and find
494 * the valid log entry with the highest sequence number. (i.e. the log head)
495 *
496 * Returns: 0 on success, errno otherwise
497 */
gfs2_find_jhead(struct gfs2_jdesc * jd,struct gfs2_log_header_host * head,bool keep_cache)498 int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head,
499 bool keep_cache)
500 {
501 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
502 struct address_space *mapping = jd->jd_inode->i_mapping;
503 unsigned int block = 0, blocks_submitted = 0, blocks_read = 0;
504 unsigned int bsize = sdp->sd_sb.sb_bsize, off;
505 unsigned int bsize_shift = sdp->sd_sb.sb_bsize_shift;
506 unsigned int shift = PAGE_SHIFT - bsize_shift;
507 unsigned int max_blocks = 2 * 1024 * 1024 >> bsize_shift;
508 struct gfs2_journal_extent *je;
509 int sz, ret = 0;
510 struct bio *bio = NULL;
511 struct page *page = NULL;
512 bool done = false;
513 errseq_t since;
514
515 memset(head, 0, sizeof(*head));
516 if (list_empty(&jd->extent_list))
517 gfs2_map_journal_extents(sdp, jd);
518
519 since = filemap_sample_wb_err(mapping);
520 list_for_each_entry(je, &jd->extent_list, list) {
521 u64 dblock = je->dblock;
522
523 for (; block < je->lblock + je->blocks; block++, dblock++) {
524 if (!page) {
525 page = find_or_create_page(mapping,
526 block >> shift, GFP_NOFS);
527 if (!page) {
528 ret = -ENOMEM;
529 done = true;
530 goto out;
531 }
532 off = 0;
533 }
534
535 if (bio && (off || block < blocks_submitted + max_blocks)) {
536 sector_t sector = dblock << sdp->sd_fsb2bb_shift;
537
538 if (bio_end_sector(bio) == sector) {
539 sz = bio_add_page(bio, page, bsize, off);
540 if (sz == bsize)
541 goto block_added;
542 }
543 if (off) {
544 unsigned int blocks =
545 (PAGE_SIZE - off) >> bsize_shift;
546
547 bio = gfs2_chain_bio(bio, blocks);
548 goto add_block_to_new_bio;
549 }
550 }
551
552 if (bio) {
553 blocks_submitted = block;
554 submit_bio(bio);
555 }
556
557 bio = gfs2_log_alloc_bio(sdp, dblock, gfs2_end_log_read);
558 bio->bi_opf = REQ_OP_READ;
559 add_block_to_new_bio:
560 sz = bio_add_page(bio, page, bsize, off);
561 BUG_ON(sz != bsize);
562 block_added:
563 off += bsize;
564 if (off == PAGE_SIZE)
565 page = NULL;
566 if (blocks_submitted <= blocks_read + max_blocks) {
567 /* Keep at least one bio in flight */
568 continue;
569 }
570
571 gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
572 blocks_read += PAGE_SIZE >> bsize_shift;
573 if (done)
574 goto out; /* found */
575 }
576 }
577
578 out:
579 if (bio)
580 submit_bio(bio);
581 while (blocks_read < block) {
582 gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done);
583 blocks_read += PAGE_SIZE >> bsize_shift;
584 }
585
586 if (!ret)
587 ret = filemap_check_wb_err(mapping, since);
588
589 if (!keep_cache)
590 truncate_inode_pages(mapping, 0);
591
592 return ret;
593 }
594
gfs2_get_log_desc(struct gfs2_sbd * sdp,u32 ld_type,u32 ld_length,u32 ld_data1)595 static struct page *gfs2_get_log_desc(struct gfs2_sbd *sdp, u32 ld_type,
596 u32 ld_length, u32 ld_data1)
597 {
598 struct page *page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
599 struct gfs2_log_descriptor *ld = page_address(page);
600 clear_page(ld);
601 ld->ld_header.mh_magic = cpu_to_be32(GFS2_MAGIC);
602 ld->ld_header.mh_type = cpu_to_be32(GFS2_METATYPE_LD);
603 ld->ld_header.mh_format = cpu_to_be32(GFS2_FORMAT_LD);
604 ld->ld_type = cpu_to_be32(ld_type);
605 ld->ld_length = cpu_to_be32(ld_length);
606 ld->ld_data1 = cpu_to_be32(ld_data1);
607 ld->ld_data2 = 0;
608 return page;
609 }
610
gfs2_check_magic(struct buffer_head * bh)611 static void gfs2_check_magic(struct buffer_head *bh)
612 {
613 void *kaddr;
614 __be32 *ptr;
615
616 clear_buffer_escaped(bh);
617 kaddr = kmap_atomic(bh->b_page);
618 ptr = kaddr + bh_offset(bh);
619 if (*ptr == cpu_to_be32(GFS2_MAGIC))
620 set_buffer_escaped(bh);
621 kunmap_atomic(kaddr);
622 }
623
blocknr_cmp(void * priv,struct list_head * a,struct list_head * b)624 static int blocknr_cmp(void *priv, struct list_head *a, struct list_head *b)
625 {
626 struct gfs2_bufdata *bda, *bdb;
627
628 bda = list_entry(a, struct gfs2_bufdata, bd_list);
629 bdb = list_entry(b, struct gfs2_bufdata, bd_list);
630
631 if (bda->bd_bh->b_blocknr < bdb->bd_bh->b_blocknr)
632 return -1;
633 if (bda->bd_bh->b_blocknr > bdb->bd_bh->b_blocknr)
634 return 1;
635 return 0;
636 }
637
gfs2_before_commit(struct gfs2_sbd * sdp,unsigned int limit,unsigned int total,struct list_head * blist,bool is_databuf)638 static void gfs2_before_commit(struct gfs2_sbd *sdp, unsigned int limit,
639 unsigned int total, struct list_head *blist,
640 bool is_databuf)
641 {
642 struct gfs2_log_descriptor *ld;
643 struct gfs2_bufdata *bd1 = NULL, *bd2;
644 struct page *page;
645 unsigned int num;
646 unsigned n;
647 __be64 *ptr;
648
649 gfs2_log_lock(sdp);
650 list_sort(NULL, blist, blocknr_cmp);
651 bd1 = bd2 = list_prepare_entry(bd1, blist, bd_list);
652 while(total) {
653 num = total;
654 if (total > limit)
655 num = limit;
656 gfs2_log_unlock(sdp);
657 page = gfs2_get_log_desc(sdp,
658 is_databuf ? GFS2_LOG_DESC_JDATA :
659 GFS2_LOG_DESC_METADATA, num + 1, num);
660 ld = page_address(page);
661 gfs2_log_lock(sdp);
662 ptr = (__be64 *)(ld + 1);
663
664 n = 0;
665 list_for_each_entry_continue(bd1, blist, bd_list) {
666 *ptr++ = cpu_to_be64(bd1->bd_bh->b_blocknr);
667 if (is_databuf) {
668 gfs2_check_magic(bd1->bd_bh);
669 *ptr++ = cpu_to_be64(buffer_escaped(bd1->bd_bh) ? 1 : 0);
670 }
671 if (++n >= num)
672 break;
673 }
674
675 gfs2_log_unlock(sdp);
676 gfs2_log_write_page(sdp, page);
677 gfs2_log_lock(sdp);
678
679 n = 0;
680 list_for_each_entry_continue(bd2, blist, bd_list) {
681 get_bh(bd2->bd_bh);
682 gfs2_log_unlock(sdp);
683 lock_buffer(bd2->bd_bh);
684
685 if (buffer_escaped(bd2->bd_bh)) {
686 void *kaddr;
687 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
688 ptr = page_address(page);
689 kaddr = kmap_atomic(bd2->bd_bh->b_page);
690 memcpy(ptr, kaddr + bh_offset(bd2->bd_bh),
691 bd2->bd_bh->b_size);
692 kunmap_atomic(kaddr);
693 *(__be32 *)ptr = 0;
694 clear_buffer_escaped(bd2->bd_bh);
695 unlock_buffer(bd2->bd_bh);
696 brelse(bd2->bd_bh);
697 gfs2_log_write_page(sdp, page);
698 } else {
699 gfs2_log_write_bh(sdp, bd2->bd_bh);
700 }
701 gfs2_log_lock(sdp);
702 if (++n >= num)
703 break;
704 }
705
706 BUG_ON(total < num);
707 total -= num;
708 }
709 gfs2_log_unlock(sdp);
710 }
711
buf_lo_before_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)712 static void buf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
713 {
714 unsigned int limit = buf_limit(sdp); /* 503 for 4k blocks */
715 unsigned int nbuf;
716 if (tr == NULL)
717 return;
718 nbuf = tr->tr_num_buf_new - tr->tr_num_buf_rm;
719 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_buf, 0);
720 }
721
buf_lo_after_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)722 static void buf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
723 {
724 struct list_head *head;
725 struct gfs2_bufdata *bd;
726
727 if (tr == NULL)
728 return;
729
730 head = &tr->tr_buf;
731 while (!list_empty(head)) {
732 bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
733 list_del_init(&bd->bd_list);
734 gfs2_unpin(sdp, bd->bd_bh, tr);
735 }
736 }
737
buf_lo_before_scan(struct gfs2_jdesc * jd,struct gfs2_log_header_host * head,int pass)738 static void buf_lo_before_scan(struct gfs2_jdesc *jd,
739 struct gfs2_log_header_host *head, int pass)
740 {
741 if (pass != 0)
742 return;
743
744 jd->jd_found_blocks = 0;
745 jd->jd_replayed_blocks = 0;
746 }
747
buf_lo_scan_elements(struct gfs2_jdesc * jd,u32 start,struct gfs2_log_descriptor * ld,__be64 * ptr,int pass)748 static int buf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
749 struct gfs2_log_descriptor *ld, __be64 *ptr,
750 int pass)
751 {
752 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
753 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
754 struct gfs2_glock *gl = ip->i_gl;
755 unsigned int blks = be32_to_cpu(ld->ld_data1);
756 struct buffer_head *bh_log, *bh_ip;
757 u64 blkno;
758 int error = 0;
759
760 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_METADATA)
761 return 0;
762
763 gfs2_replay_incr_blk(jd, &start);
764
765 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
766 blkno = be64_to_cpu(*ptr++);
767
768 jd->jd_found_blocks++;
769
770 if (gfs2_revoke_check(jd, blkno, start))
771 continue;
772
773 error = gfs2_replay_read_block(jd, start, &bh_log);
774 if (error)
775 return error;
776
777 bh_ip = gfs2_meta_new(gl, blkno);
778 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
779
780 if (gfs2_meta_check(sdp, bh_ip))
781 error = -EIO;
782 else {
783 struct gfs2_meta_header *mh =
784 (struct gfs2_meta_header *)bh_ip->b_data;
785
786 if (mh->mh_type == cpu_to_be32(GFS2_METATYPE_RG)) {
787 struct gfs2_rgrpd *rgd;
788
789 rgd = gfs2_blk2rgrpd(sdp, blkno, false);
790 if (rgd && rgd->rd_addr == blkno &&
791 rgd->rd_bits && rgd->rd_bits->bi_bh) {
792 fs_info(sdp, "Replaying 0x%llx but we "
793 "already have a bh!\n",
794 (unsigned long long)blkno);
795 fs_info(sdp, "busy:%d, pinned:%d\n",
796 buffer_busy(rgd->rd_bits->bi_bh) ? 1 : 0,
797 buffer_pinned(rgd->rd_bits->bi_bh));
798 gfs2_dump_glock(NULL, rgd->rd_gl, true);
799 }
800 }
801 mark_buffer_dirty(bh_ip);
802 }
803 brelse(bh_log);
804 brelse(bh_ip);
805
806 if (error)
807 break;
808
809 jd->jd_replayed_blocks++;
810 }
811
812 return error;
813 }
814
815 /**
816 * gfs2_meta_sync - Sync all buffers associated with a glock
817 * @gl: The glock
818 *
819 */
820
gfs2_meta_sync(struct gfs2_glock * gl)821 static void gfs2_meta_sync(struct gfs2_glock *gl)
822 {
823 struct address_space *mapping = gfs2_glock2aspace(gl);
824 struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
825 int error;
826
827 if (mapping == NULL)
828 mapping = &sdp->sd_aspace;
829
830 filemap_fdatawrite(mapping);
831 error = filemap_fdatawait(mapping);
832
833 if (error)
834 gfs2_io_error(gl->gl_name.ln_sbd);
835 }
836
buf_lo_after_scan(struct gfs2_jdesc * jd,int error,int pass)837 static void buf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
838 {
839 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
840 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
841
842 if (error) {
843 gfs2_meta_sync(ip->i_gl);
844 return;
845 }
846 if (pass != 1)
847 return;
848
849 gfs2_meta_sync(ip->i_gl);
850
851 fs_info(sdp, "jid=%u: Replayed %u of %u blocks\n",
852 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
853 }
854
revoke_lo_before_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)855 static void revoke_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
856 {
857 struct gfs2_meta_header *mh;
858 unsigned int offset;
859 struct list_head *head = &sdp->sd_log_revokes;
860 struct gfs2_bufdata *bd;
861 struct page *page;
862 unsigned int length;
863
864 gfs2_write_revokes(sdp);
865 if (!sdp->sd_log_num_revoke)
866 return;
867
868 length = gfs2_struct2blk(sdp, sdp->sd_log_num_revoke, sizeof(u64));
869 page = gfs2_get_log_desc(sdp, GFS2_LOG_DESC_REVOKE, length, sdp->sd_log_num_revoke);
870 offset = sizeof(struct gfs2_log_descriptor);
871
872 list_for_each_entry(bd, head, bd_list) {
873 sdp->sd_log_num_revoke--;
874
875 if (offset + sizeof(u64) > sdp->sd_sb.sb_bsize) {
876
877 gfs2_log_write_page(sdp, page);
878 page = mempool_alloc(gfs2_page_pool, GFP_NOIO);
879 mh = page_address(page);
880 clear_page(mh);
881 mh->mh_magic = cpu_to_be32(GFS2_MAGIC);
882 mh->mh_type = cpu_to_be32(GFS2_METATYPE_LB);
883 mh->mh_format = cpu_to_be32(GFS2_FORMAT_LB);
884 offset = sizeof(struct gfs2_meta_header);
885 }
886
887 *(__be64 *)(page_address(page) + offset) = cpu_to_be64(bd->bd_blkno);
888 offset += sizeof(u64);
889 }
890 gfs2_assert_withdraw(sdp, !sdp->sd_log_num_revoke);
891
892 gfs2_log_write_page(sdp, page);
893 }
894
revoke_lo_after_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)895 static void revoke_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
896 {
897 struct list_head *head = &sdp->sd_log_revokes;
898 struct gfs2_bufdata *bd;
899 struct gfs2_glock *gl;
900
901 while (!list_empty(head)) {
902 bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
903 list_del_init(&bd->bd_list);
904 gl = bd->bd_gl;
905 gfs2_glock_remove_revoke(gl);
906 kmem_cache_free(gfs2_bufdata_cachep, bd);
907 }
908 }
909
revoke_lo_before_scan(struct gfs2_jdesc * jd,struct gfs2_log_header_host * head,int pass)910 static void revoke_lo_before_scan(struct gfs2_jdesc *jd,
911 struct gfs2_log_header_host *head, int pass)
912 {
913 if (pass != 0)
914 return;
915
916 jd->jd_found_revokes = 0;
917 jd->jd_replay_tail = head->lh_tail;
918 }
919
revoke_lo_scan_elements(struct gfs2_jdesc * jd,u32 start,struct gfs2_log_descriptor * ld,__be64 * ptr,int pass)920 static int revoke_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
921 struct gfs2_log_descriptor *ld, __be64 *ptr,
922 int pass)
923 {
924 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
925 unsigned int blks = be32_to_cpu(ld->ld_length);
926 unsigned int revokes = be32_to_cpu(ld->ld_data1);
927 struct buffer_head *bh;
928 unsigned int offset;
929 u64 blkno;
930 int first = 1;
931 int error;
932
933 if (pass != 0 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_REVOKE)
934 return 0;
935
936 offset = sizeof(struct gfs2_log_descriptor);
937
938 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
939 error = gfs2_replay_read_block(jd, start, &bh);
940 if (error)
941 return error;
942
943 if (!first)
944 gfs2_metatype_check(sdp, bh, GFS2_METATYPE_LB);
945
946 while (offset + sizeof(u64) <= sdp->sd_sb.sb_bsize) {
947 blkno = be64_to_cpu(*(__be64 *)(bh->b_data + offset));
948
949 error = gfs2_revoke_add(jd, blkno, start);
950 if (error < 0) {
951 brelse(bh);
952 return error;
953 }
954 else if (error)
955 jd->jd_found_revokes++;
956
957 if (!--revokes)
958 break;
959 offset += sizeof(u64);
960 }
961
962 brelse(bh);
963 offset = sizeof(struct gfs2_meta_header);
964 first = 0;
965 }
966
967 return 0;
968 }
969
revoke_lo_after_scan(struct gfs2_jdesc * jd,int error,int pass)970 static void revoke_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
971 {
972 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
973
974 if (error) {
975 gfs2_revoke_clean(jd);
976 return;
977 }
978 if (pass != 1)
979 return;
980
981 fs_info(sdp, "jid=%u: Found %u revoke tags\n",
982 jd->jd_jid, jd->jd_found_revokes);
983
984 gfs2_revoke_clean(jd);
985 }
986
987 /**
988 * databuf_lo_before_commit - Scan the data buffers, writing as we go
989 *
990 */
991
databuf_lo_before_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)992 static void databuf_lo_before_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
993 {
994 unsigned int limit = databuf_limit(sdp);
995 unsigned int nbuf;
996 if (tr == NULL)
997 return;
998 nbuf = tr->tr_num_databuf_new - tr->tr_num_databuf_rm;
999 gfs2_before_commit(sdp, limit, nbuf, &tr->tr_databuf, 1);
1000 }
1001
databuf_lo_scan_elements(struct gfs2_jdesc * jd,u32 start,struct gfs2_log_descriptor * ld,__be64 * ptr,int pass)1002 static int databuf_lo_scan_elements(struct gfs2_jdesc *jd, u32 start,
1003 struct gfs2_log_descriptor *ld,
1004 __be64 *ptr, int pass)
1005 {
1006 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1007 struct gfs2_glock *gl = ip->i_gl;
1008 unsigned int blks = be32_to_cpu(ld->ld_data1);
1009 struct buffer_head *bh_log, *bh_ip;
1010 u64 blkno;
1011 u64 esc;
1012 int error = 0;
1013
1014 if (pass != 1 || be32_to_cpu(ld->ld_type) != GFS2_LOG_DESC_JDATA)
1015 return 0;
1016
1017 gfs2_replay_incr_blk(jd, &start);
1018 for (; blks; gfs2_replay_incr_blk(jd, &start), blks--) {
1019 blkno = be64_to_cpu(*ptr++);
1020 esc = be64_to_cpu(*ptr++);
1021
1022 jd->jd_found_blocks++;
1023
1024 if (gfs2_revoke_check(jd, blkno, start))
1025 continue;
1026
1027 error = gfs2_replay_read_block(jd, start, &bh_log);
1028 if (error)
1029 return error;
1030
1031 bh_ip = gfs2_meta_new(gl, blkno);
1032 memcpy(bh_ip->b_data, bh_log->b_data, bh_log->b_size);
1033
1034 /* Unescape */
1035 if (esc) {
1036 __be32 *eptr = (__be32 *)bh_ip->b_data;
1037 *eptr = cpu_to_be32(GFS2_MAGIC);
1038 }
1039 mark_buffer_dirty(bh_ip);
1040
1041 brelse(bh_log);
1042 brelse(bh_ip);
1043
1044 jd->jd_replayed_blocks++;
1045 }
1046
1047 return error;
1048 }
1049
1050 /* FIXME: sort out accounting for log blocks etc. */
1051
databuf_lo_after_scan(struct gfs2_jdesc * jd,int error,int pass)1052 static void databuf_lo_after_scan(struct gfs2_jdesc *jd, int error, int pass)
1053 {
1054 struct gfs2_inode *ip = GFS2_I(jd->jd_inode);
1055 struct gfs2_sbd *sdp = GFS2_SB(jd->jd_inode);
1056
1057 if (error) {
1058 gfs2_meta_sync(ip->i_gl);
1059 return;
1060 }
1061 if (pass != 1)
1062 return;
1063
1064 /* data sync? */
1065 gfs2_meta_sync(ip->i_gl);
1066
1067 fs_info(sdp, "jid=%u: Replayed %u of %u data blocks\n",
1068 jd->jd_jid, jd->jd_replayed_blocks, jd->jd_found_blocks);
1069 }
1070
databuf_lo_after_commit(struct gfs2_sbd * sdp,struct gfs2_trans * tr)1071 static void databuf_lo_after_commit(struct gfs2_sbd *sdp, struct gfs2_trans *tr)
1072 {
1073 struct list_head *head;
1074 struct gfs2_bufdata *bd;
1075
1076 if (tr == NULL)
1077 return;
1078
1079 head = &tr->tr_databuf;
1080 while (!list_empty(head)) {
1081 bd = list_entry(head->next, struct gfs2_bufdata, bd_list);
1082 list_del_init(&bd->bd_list);
1083 gfs2_unpin(sdp, bd->bd_bh, tr);
1084 }
1085 }
1086
1087
1088 static const struct gfs2_log_operations gfs2_buf_lops = {
1089 .lo_before_commit = buf_lo_before_commit,
1090 .lo_after_commit = buf_lo_after_commit,
1091 .lo_before_scan = buf_lo_before_scan,
1092 .lo_scan_elements = buf_lo_scan_elements,
1093 .lo_after_scan = buf_lo_after_scan,
1094 .lo_name = "buf",
1095 };
1096
1097 static const struct gfs2_log_operations gfs2_revoke_lops = {
1098 .lo_before_commit = revoke_lo_before_commit,
1099 .lo_after_commit = revoke_lo_after_commit,
1100 .lo_before_scan = revoke_lo_before_scan,
1101 .lo_scan_elements = revoke_lo_scan_elements,
1102 .lo_after_scan = revoke_lo_after_scan,
1103 .lo_name = "revoke",
1104 };
1105
1106 static const struct gfs2_log_operations gfs2_databuf_lops = {
1107 .lo_before_commit = databuf_lo_before_commit,
1108 .lo_after_commit = databuf_lo_after_commit,
1109 .lo_scan_elements = databuf_lo_scan_elements,
1110 .lo_after_scan = databuf_lo_after_scan,
1111 .lo_name = "databuf",
1112 };
1113
1114 const struct gfs2_log_operations *gfs2_log_ops[] = {
1115 &gfs2_databuf_lops,
1116 &gfs2_buf_lops,
1117 &gfs2_revoke_lops,
1118 NULL,
1119 };
1120
1121