/fs/dlm/ |
D | member.c | 43 struct rcom_slot *ro; in dlm_slots_copy_out() local 46 ro = (struct rcom_slot *)(rc->rc_buf + sizeof(struct rcom_config)); in dlm_slots_copy_out() 54 ro->ro_nodeid = cpu_to_le32(slot->nodeid); in dlm_slots_copy_out() 55 ro->ro_slot = cpu_to_le16(slot->slot); in dlm_slots_copy_out() 56 ro++; in dlm_slots_copy_out() 102 struct rcom_slot *ro0, *ro; in dlm_slots_copy_in() local 126 for (i = 0, ro = ro0; i < num_slots; i++, ro++) { in dlm_slots_copy_in() 127 if (le32_to_cpu(ro->ro_nodeid) != memb->nodeid) in dlm_slots_copy_in() 129 memb->slot = le16_to_cpu(ro->ro_slot); in dlm_slots_copy_in()
|
/fs/gfs2/ |
D | recovery.c | 409 int ro = 0; in gfs2_recover_func() local 478 ro = 1; in gfs2_recover_func() 481 ro = 1; in gfs2_recover_func() 485 ro = bdev_read_only(sdp->sd_vfs->s_bdev); in gfs2_recover_func() 486 if (!ro) { in gfs2_recover_func() 495 if (ro) { in gfs2_recover_func()
|
D | sys.c | 718 char ro[20]; in gfs2_sys_fs_add() local 720 char *envp[] = { ro, spectator, NULL }; in gfs2_sys_fs_add() 722 sprintf(ro, "RDONLY=%d", sb_rdonly(sb)); in gfs2_sys_fs_add()
|
D | ops_fstype.c | 1097 char ro[20]; in gfs2_online_uevent() local 1099 char *envp[] = { ro, spectator, NULL }; in gfs2_online_uevent() 1100 sprintf(ro, "RDONLY=%d", sb_rdonly(sb)); in gfs2_online_uevent()
|
/fs/jbd2/ |
D | journal.c | 2222 unsigned long ro, unsigned long incompat) in jbd2_journal_check_used_features() argument 2226 if (!compat && !ro && !incompat) in jbd2_journal_check_used_features() 2238 ((be32_to_cpu(sb->s_feature_ro_compat) & ro) == ro) && in jbd2_journal_check_used_features() 2257 unsigned long ro, unsigned long incompat) in jbd2_journal_check_available_features() argument 2259 if (!compat && !ro && !incompat) in jbd2_journal_check_available_features() 2270 (ro & JBD2_KNOWN_ROCOMPAT_FEATURES) == ro && in jbd2_journal_check_available_features() 2319 unsigned long ro, unsigned long incompat) in jbd2_journal_set_features() argument 2327 if (jbd2_journal_check_used_features(journal, compat, ro, incompat)) in jbd2_journal_set_features() 2330 if (!jbd2_journal_check_available_features(journal, compat, ro, incompat)) in jbd2_journal_set_features() 2345 compat, ro, incompat); in jbd2_journal_set_features() [all …]
|
/fs/btrfs/ |
D | block-group.c | 320 if (bg->ro) in btrfs_inc_nocow_writers() 378 ASSERT(bg->ro); in btrfs_wait_block_group_reservations() 912 BUG_ON(!block_group->ro); in btrfs_remove_block_group() 1212 if (cache->ro) { in inc_block_group_ro() 1213 cache->ro++; in inc_block_group_ro() 1256 cache->ro++; in inc_block_group_ro() 1381 if (btrfs_is_block_group_used(block_group) || block_group->ro || in btrfs_delete_unused_bgs() 1667 if (bg->reserved || bg->pinned || bg->ro) { in btrfs_reclaim_bgs_work() 2790 BUG_ON(!cache->ro); in btrfs_dec_block_group_ro() 2794 if (!--cache->ro) { in btrfs_dec_block_group_ro() [all …]
|
D | block-group.h | 136 unsigned int ro; member
|
D | extent-tree.c | 2729 if (cache->ro) { in unpin_extent_range() 3551 if (cluster_bg != bg && (cluster_bg->ro || in find_free_extent_clustered() 3786 if (block_group->ro || btrfs_zoned_bg_is_full(block_group)) { in do_allocation_zoned() 3818 if (block_group->ro || in do_allocation_zoned() 4355 block_group->ro) { in find_free_extent() 4386 if (unlikely(block_group->ro)) { in find_free_extent() 6033 if (!block_group->ro) { in btrfs_account_ro_block_groups_free_space()
|
D | free-space-cache.c | 2705 if (!block_group->ro) in __btrfs_add_free_space_zoned() 2912 if (info->bytes >= bytes && !block_group->ro) in btrfs_dump_free_space() 3644 if (!block_group->ro) { in do_trimming() 3673 if (block_group->ro) in do_trimming()
|
D | space-info.c | 542 cache->ro ? "[readonly]" : ""); in btrfs_dump_space_info()
|
D | scrub.c | 4127 !cache->ro && cache->reserved == 0 && cache->used == 0) { in scrub_enumerate_chunks()
|
D | inode.c | 7261 if (!block_group || block_group->ro) in btrfs_extent_readonly()
|
/fs/ntfs3/ |
D | fslog.c | 433 u16 ro; in is_rst_page_hdr_valid() local 451 ro = le16_to_cpu(rhdr->ra_off); in is_rst_page_hdr_valid() 452 if (!IS_ALIGNED(ro, 8) || ro > sys_page) in is_rst_page_hdr_valid() 458 if (ro < end_usa) in is_rst_page_hdr_valid() 469 u16 ro = le16_to_cpu(rhdr->ra_off); in is_rst_area_valid() local 472 if (ro + offsetof(struct RESTART_AREA, l_size) > in is_rst_area_valid() 476 ra = Add2Ptr(rhdr, ro); in is_rst_area_valid() 484 if (!IS_ALIGNED(off, 8) || ro + off > SECTOR_SIZE - sizeof(short)) in is_rst_area_valid() 534 u16 ro = le16_to_cpu(rhdr->ra_off); in is_client_area_valid() local 535 const struct RESTART_AREA *ra = Add2Ptr(rhdr, ro); in is_client_area_valid() [all …]
|
D | attrib.c | 1222 u16 ro; in attr_load_runs_vcn() local 1243 ro = le16_to_cpu(attr->nres.run_off); in attr_load_runs_vcn() 1245 if (ro > le32_to_cpu(attr->size)) in attr_load_runs_vcn() 1249 Add2Ptr(attr, ro), le32_to_cpu(attr->size) - ro); in attr_load_runs_vcn()
|
/fs/affs/ |
D | Changes | 224 can be trapped. Previously, ro remounts didn't
|