• Home
  • Raw
  • Download

Lines Matching refs:sdp

91 		struct gfs2_sbd *sdp = rbm->rgd->rd_sbd;  in gfs2_setbit()  local
93 fs_warn(sdp, "buf_blk = 0x%x old_state=%d, new_state=%d\n", in gfs2_setbit()
95 fs_warn(sdp, "rgrp=0x%llx bi_start=0x%x biblk: 0x%llx\n", in gfs2_setbit()
98 fs_warn(sdp, "bi_offset=0x%x bi_bytes=0x%x block=0x%llx\n", in gfs2_setbit()
441 struct gfs2_sbd *sdp = rgd->rd_sbd; in gfs2_rgrp_verify() local
460 gfs2_lm(sdp, "free data mismatch: %u != %u\n", in gfs2_rgrp_verify()
468 gfs2_lm(sdp, "used data mismatch: %u != %u\n", in gfs2_rgrp_verify()
475 gfs2_lm(sdp, "used metadata mismatch: %u != %u\n", in gfs2_rgrp_verify()
498 struct gfs2_rgrpd *gfs2_blk2rgrpd(struct gfs2_sbd *sdp, u64 blk, bool exact) in gfs2_blk2rgrpd() argument
503 spin_lock(&sdp->sd_rindex_spin); in gfs2_blk2rgrpd()
504 n = sdp->sd_rindex_tree.rb_node; in gfs2_blk2rgrpd()
513 spin_unlock(&sdp->sd_rindex_spin); in gfs2_blk2rgrpd()
524 spin_unlock(&sdp->sd_rindex_spin); in gfs2_blk2rgrpd()
536 struct gfs2_rgrpd *gfs2_rgrpd_get_first(struct gfs2_sbd *sdp) in gfs2_rgrpd_get_first() argument
541 spin_lock(&sdp->sd_rindex_spin); in gfs2_rgrpd_get_first()
542 n = rb_first(&sdp->sd_rindex_tree); in gfs2_rgrpd_get_first()
544 spin_unlock(&sdp->sd_rindex_spin); in gfs2_rgrpd_get_first()
558 struct gfs2_sbd *sdp = rgd->rd_sbd; in gfs2_rgrpd_get_next() local
561 spin_lock(&sdp->sd_rindex_spin); in gfs2_rgrpd_get_next()
564 n = rb_first(&sdp->sd_rindex_tree); in gfs2_rgrpd_get_next()
567 spin_unlock(&sdp->sd_rindex_spin); in gfs2_rgrpd_get_next()
571 spin_unlock(&sdp->sd_rindex_spin); in gfs2_rgrpd_get_next()
577 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in check_and_update_goal() local
578 if (!ip->i_goal || gfs2_blk2rgrpd(sdp, ip->i_goal, 1) == NULL) in check_and_update_goal()
700 void gfs2_clear_rgrpd(struct gfs2_sbd *sdp) in gfs2_clear_rgrpd() argument
706 while ((n = rb_first(&sdp->sd_rindex_tree))) { in gfs2_clear_rgrpd()
710 rb_erase(n, &sdp->sd_rindex_tree); in gfs2_clear_rgrpd()
741 struct gfs2_sbd *sdp = rgd->rd_sbd; in compute_bitstructs() local
769 bytes = sdp->sd_sb.sb_bsize - sizeof(struct gfs2_rgrp); in compute_bitstructs()
783 bytes = sdp->sd_sb.sb_bsize - in compute_bitstructs()
800 gfs2_lm(sdp, in compute_bitstructs()
825 u64 gfs2_ri_total(struct gfs2_sbd *sdp) in gfs2_ri_total() argument
828 struct inode *inode = sdp->sd_rindex; in gfs2_ri_total()
849 struct gfs2_sbd *sdp = rgd->rd_sbd; in rgd_insert() local
850 struct rb_node **newn = &sdp->sd_rindex_tree.rb_node, *parent = NULL; in rgd_insert()
867 rb_insert_color(&rgd->rd_node, &sdp->sd_rindex_tree); in rgd_insert()
868 sdp->sd_rgrps++; in rgd_insert()
881 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in read_rindex_entry() local
882 loff_t pos = sdp->sd_rgrps * sizeof(struct gfs2_rindex); in read_rindex_entry()
901 rgd->rd_sbd = sdp; in read_rindex_entry()
909 error = gfs2_glock_get(sdp, rgd->rd_addr, in read_rindex_entry()
920 if (rgd->rd_data > sdp->sd_max_rg_data) in read_rindex_entry()
921 sdp->sd_max_rg_data = rgd->rd_data; in read_rindex_entry()
922 spin_lock(&sdp->sd_rindex_spin); in read_rindex_entry()
924 spin_unlock(&sdp->sd_rindex_spin); in read_rindex_entry()
949 static void set_rgrp_preferences(struct gfs2_sbd *sdp) in set_rgrp_preferences() argument
956 rgd = gfs2_rgrpd_get_first(sdp); in set_rgrp_preferences()
957 for (i = 0; i < sdp->sd_lockstruct.ls_jid; i++) in set_rgrp_preferences()
963 for (i = 0; i < sdp->sd_journals; i++) { in set_rgrp_preferences()
980 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_ri_update() local
990 if (RB_EMPTY_ROOT(&sdp->sd_rindex_tree)) { in gfs2_ri_update()
991 fs_err(sdp, "no resource groups found in the file system.\n"); in gfs2_ri_update()
994 set_rgrp_preferences(sdp); in gfs2_ri_update()
996 sdp->sd_rindex_uptodate = 1; in gfs2_ri_update()
1017 int gfs2_rindex_update(struct gfs2_sbd *sdp) in gfs2_rindex_update() argument
1019 struct gfs2_inode *ip = GFS2_I(sdp->sd_rindex); in gfs2_rindex_update()
1026 if (!sdp->sd_rindex_uptodate) { in gfs2_rindex_update()
1033 if (!sdp->sd_rindex_uptodate) in gfs2_rindex_update()
1098 struct gfs2_sbd *sdp = rgd->rd_sbd; in gfs2_rgrp_lvb_valid() local
1102 fs_warn(sdp, "GFS2: rgd: %llu lvb flag mismatch %u/%u", in gfs2_rgrp_lvb_valid()
1108 fs_warn(sdp, "GFS2: rgd: %llu lvb free mismatch %u/%u", in gfs2_rgrp_lvb_valid()
1114 fs_warn(sdp, "GFS2: rgd: %llu lvb dinode mismatch %u/%u", in gfs2_rgrp_lvb_valid()
1121 fs_warn(sdp, "GFS2: rgd: %llu lvb igen mismatch %llu/%llu", in gfs2_rgrp_lvb_valid()
1167 struct gfs2_sbd *sdp = rgd->rd_sbd; in gfs2_rgrp_bh_get() local
1186 error = gfs2_meta_wait(sdp, bi->bi_bh); in gfs2_rgrp_bh_get()
1189 if (gfs2_metatype_check(sdp, bi->bi_bh, y ? GFS2_METATYPE_RB : in gfs2_rgrp_bh_get()
1210 else if (sdp->sd_args.ar_rgrplvb) { in gfs2_rgrp_bh_get()
1226 gfs2_assert_warn(sdp, !bi->bi_clone); in gfs2_rgrp_bh_get()
1258 struct gfs2_sbd *sdp = rgd->rd_sbd; in gfs2_rgrp_go_lock() local
1260 if (gh->gh_flags & GL_SKIP && sdp->sd_args.ar_rgrplvb) in gfs2_rgrp_go_lock()
1284 int gfs2_rgrp_send_discards(struct gfs2_sbd *sdp, u64 offset, in gfs2_rgrp_send_discards() argument
1288 struct super_block *sb = sdp->sd_vfs; in gfs2_rgrp_send_discards()
1345 if (sdp->sd_args.ar_discard) in gfs2_rgrp_send_discards()
1346 fs_warn(sdp, "error %d on discard request, turning discards off for this filesystem\n", rv); in gfs2_rgrp_send_discards()
1347 sdp->sd_args.ar_discard = 0; in gfs2_rgrp_send_discards()
1362 struct gfs2_sbd *sdp = GFS2_SB(inode); in gfs2_fitrim() local
1363 struct request_queue *q = bdev_get_queue(sdp->sd_vfs->s_bdev); in gfs2_fitrim()
1374 unsigned bs_shift = sdp->sd_sb.sb_bsize_shift; in gfs2_fitrim()
1379 if (!test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) in gfs2_fitrim()
1388 ret = gfs2_rindex_update(sdp); in gfs2_fitrim()
1394 minlen = max_t(u64, r.minlen, sdp->sd_sb.sb_bsize); in gfs2_fitrim()
1398 if (end <= start || minlen > sdp->sd_max_rg_data) in gfs2_fitrim()
1401 rgd = gfs2_blk2rgrpd(sdp, start, 0); in gfs2_fitrim()
1402 rgd_end = gfs2_blk2rgrpd(sdp, end, 0); in gfs2_fitrim()
1404 if ((gfs2_rgrpd_get_first(sdp) == gfs2_rgrpd_get_next(rgd_end)) in gfs2_fitrim()
1418 ret = gfs2_rgrp_send_discards(sdp, in gfs2_fitrim()
1429 ret = gfs2_trans_begin(sdp, RES_RG_HDR, 0); in gfs2_fitrim()
1435 gfs2_trans_end(sdp); in gfs2_fitrim()
1807 struct gfs2_sbd *sdp = rgd->rd_sbd; in try_rgrp_unlink() local
1831 error = gfs2_glock_get(sdp, block, &gfs2_iopen_glops, CREATE, &gl); in try_rgrp_unlink()
1888 const struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; in gfs2_rgrp_congested() local
1899 st = &per_cpu_ptr(sdp->sd_lkstats, cpu)->lkstats[LM_TYPE_RGRP]; in gfs2_rgrp_congested()
1905 st = &this_cpu_ptr(sdp->sd_lkstats)->lkstats[LM_TYPE_RGRP]; in gfs2_rgrp_congested()
1951 const struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_orlov_skip() local
1955 return skip % sdp->sd_rgrps; in gfs2_orlov_skip()
1961 struct gfs2_sbd *sdp = rgd->rd_sbd; in gfs2_select_rgrp() local
1965 rgd = gfs2_rgrpd_get_first(sdp); in gfs2_select_rgrp()
2009 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_inplace_reserve() local
2017 if (sdp->sd_args.ar_rgrplvb) in gfs2_inplace_reserve()
2019 if (gfs2_assert_warn(sdp, ap->target)) in gfs2_inplace_reserve()
2028 rs->rs_rbm.rgd = begin = gfs2_blk2rgrpd(sdp, ip->i_goal, 1); in gfs2_inplace_reserve()
2059 if (sdp->sd_args.ar_rgrplvb) { in gfs2_inplace_reserve()
2074 if (sdp->sd_args.ar_rgrplvb) in gfs2_inplace_reserve()
2119 if (ip == GFS2_I(sdp->sd_rindex) && !sdp->sd_rindex_uptodate) { in gfs2_inplace_reserve()
2126 gfs2_log_flush(sdp, NULL, GFS2_LOG_HEAD_FLUSH_NORMAL | in gfs2_inplace_reserve()
2188 static void rgblk_free(struct gfs2_sbd *sdp, struct gfs2_rgrpd *rgd, in rgblk_free() argument
2252 struct gfs2_sbd *sdp = rgd->rd_sbd; in gfs2_rgrp_error() local
2253 char fs_id_buf[sizeof(sdp->sd_fsname) + 7]; in gfs2_rgrp_error()
2255 fs_warn(sdp, "rgrp %llu has an error, marking it readonly until umount\n", in gfs2_rgrp_error()
2257 fs_warn(sdp, "umount on all nodes and run fsck.gfs2 to fix the error\n"); in gfs2_rgrp_error()
2258 sprintf(fs_id_buf, "fsid=%s: ", sdp->sd_fsname); in gfs2_rgrp_error()
2350 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_alloc_blocks() local
2367 fs_warn(sdp, "inum=%llu error=%d, nblocks=%u, full=%d fail_pt=%d\n", in gfs2_alloc_blocks()
2396 fs_warn(sdp, "nblocks=%u\n", *nblocks); in gfs2_alloc_blocks()
2411 gfs2_statfs_change(sdp, 0, -(s64)*nblocks, dinode ? 1 : 0); in gfs2_alloc_blocks()
2413 gfs2_trans_remove_revoke(sdp, block, *nblocks); in gfs2_alloc_blocks()
2441 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in __gfs2_free_blocks() local
2443 rgblk_free(sdp, rgd, bstart, blen, GFS2_BLKST_FREE); in __gfs2_free_blocks()
2467 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_free_meta() local
2470 gfs2_statfs_change(sdp, 0, +blen, 0); in gfs2_free_meta()
2477 struct gfs2_sbd *sdp = GFS2_SB(inode); in gfs2_unlink_di() local
2481 rgd = gfs2_blk2rgrpd(sdp, blkno, true); in gfs2_unlink_di()
2484 rgblk_free(sdp, rgd, blkno, 1, GFS2_BLKST_UNLINKED); in gfs2_unlink_di()
2493 struct gfs2_sbd *sdp = rgd->rd_sbd; in gfs2_free_di() local
2495 rgblk_free(sdp, rgd, ip->i_no_addr, 1, GFS2_BLKST_FREE); in gfs2_free_di()
2505 gfs2_statfs_change(sdp, 0, +1, -1); in gfs2_free_di()
2522 int gfs2_check_blk_type(struct gfs2_sbd *sdp, u64 no_addr, unsigned int type) in gfs2_check_blk_type() argument
2529 rgd = gfs2_blk2rgrpd(sdp, no_addr, 1); in gfs2_check_blk_type()
2565 struct gfs2_sbd *sdp = GFS2_SB(&ip->i_inode); in gfs2_rlist_add() local
2571 if (gfs2_assert_warn(sdp, !rlist->rl_ghs)) in gfs2_rlist_add()
2582 rgd = gfs2_blk2rgrpd(sdp, block, 1); in gfs2_rlist_add()
2586 rgd = gfs2_blk2rgrpd(sdp, block, 1); in gfs2_rlist_add()
2590 fs_err(sdp, "rlist_add: no rgrp for block %llu\n", in gfs2_rlist_add()