/fs/proc/ |
D | proc_sysctl.c | 35 static bool is_empty_dir(struct ctl_table_header *head) in is_empty_dir() argument 37 return head->ctl_table[0].child == sysctl_mount_point; in is_empty_dir() 83 static int insert_links(struct ctl_table_header *head); 112 struct ctl_table_header *head; in find_entry() local 123 head = ctl_node->header; in find_entry() 124 entry = &head->ctl_table[ctl_node - head->node]; in find_entry() 133 *phead = head; in find_entry() 140 static int insert_entry(struct ctl_table_header *head, struct ctl_table *entry) in insert_entry() argument 142 struct rb_node *node = &head->node[entry - head->ctl_table].node; in insert_entry() 143 struct rb_node **p = &head->parent->root.rb_node; in insert_entry() [all …]
|
/fs/ |
D | mbcache.c | 79 struct hlist_bl_head *head; in mb_cache_entry_create() local 99 head = mb_cache_entry_head(cache, key); in mb_cache_entry_create() 100 hlist_bl_lock(head); in mb_cache_entry_create() 101 hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) { in mb_cache_entry_create() 103 hlist_bl_unlock(head); in mb_cache_entry_create() 108 hlist_bl_add_head(&entry->e_hash_list, head); in mb_cache_entry_create() 109 hlist_bl_unlock(head); in mb_cache_entry_create() 134 struct hlist_bl_head *head; in __entry_find() local 136 head = mb_cache_entry_head(cache, key); in __entry_find() 137 hlist_bl_lock(head); in __entry_find() [all …]
|
D | buffer.c | 87 struct buffer_head *head, *bh; in buffer_check_dirty_writeback() local 99 head = page_buffers(page); in buffer_check_dirty_writeback() 100 bh = head; in buffer_check_dirty_writeback() 109 } while (bh != head); in buffer_check_dirty_writeback() 202 struct buffer_head *head; in __find_get_block_slow() local 215 head = page_buffers(page); in __find_get_block_slow() 216 bh = head; in __find_get_block_slow() 226 } while (bh != head); in __find_get_block_slow() 627 struct buffer_head *head = page_buffers(page); in __set_page_dirty_buffers() local 628 struct buffer_head *bh = head; in __set_page_dirty_buffers() [all …]
|
D | seq_file.c | 896 struct list_head *seq_list_start(struct list_head *head, loff_t pos) in seq_list_start() argument 900 list_for_each(lh, head) in seq_list_start() 908 struct list_head *seq_list_start_head(struct list_head *head, loff_t pos) in seq_list_start_head() argument 911 return head; in seq_list_start_head() 913 return seq_list_start(head, pos - 1); in seq_list_start_head() 917 struct list_head *seq_list_next(void *v, struct list_head *head, loff_t *ppos) in seq_list_next() argument 923 return lh == head ? NULL : lh; in seq_list_next() 934 struct hlist_node *seq_hlist_start(struct hlist_head *head, loff_t pos) in seq_hlist_start() argument 938 hlist_for_each(node, head) in seq_hlist_start() 953 struct hlist_node *seq_hlist_start_head(struct hlist_head *head, loff_t pos) in seq_hlist_start_head() argument [all …]
|
D | aio.c | 61 unsigned head; /* Written to by userland or under ring_lock member 183 struct wait_queue_head *head; member 546 ring->head = ring->tail = 0; in aio_setup_ring() 956 static void refill_reqs_available(struct kioctx *ctx, unsigned head, in refill_reqs_available() argument 962 head %= ctx->nr_events; in refill_reqs_available() 963 if (head <= tail) in refill_reqs_available() 964 events_in_ring = tail - head; in refill_reqs_available() 966 events_in_ring = ctx->nr_events - (head - tail); in refill_reqs_available() 990 unsigned head; in user_refill_reqs_available() local 1002 head = ring->head; in user_refill_reqs_available() [all …]
|
/fs/gfs2/ |
D | recovery.c | 57 struct list_head *head = &jd->jd_revoke_list; in gfs2_revoke_add() local 61 list_for_each_entry(rr, head, rr_list) { in gfs2_revoke_add() 79 list_add(&rr->rr_list, head); in gfs2_revoke_add() 110 struct list_head *head = &jd->jd_revoke_list; in gfs2_revoke_clean() local 113 while (!list_empty(head)) { in gfs2_revoke_clean() 114 rr = list_entry(head->next, struct gfs2_revoke_replay, rr_list); in gfs2_revoke_clean() 121 unsigned int blkno, struct gfs2_log_header_host *head) in __get_log_header() argument 142 head->lh_sequence = be64_to_cpu(lh->lh_sequence); in __get_log_header() 143 head->lh_flags = be32_to_cpu(lh->lh_flags); in __get_log_header() 144 head->lh_tail = be32_to_cpu(lh->lh_tail); in __get_log_header() [all …]
|
D | lops.c | 413 struct gfs2_log_header_host *head, in gfs2_jhead_pg_srch() argument 424 if (lh.lh_sequence > head->lh_sequence) in gfs2_jhead_pg_srch() 425 *head = lh; in gfs2_jhead_pg_srch() 456 struct gfs2_log_header_host *head, in gfs2_jhead_process_page() argument 468 *done = gfs2_jhead_pg_srch(jd, head, page); in gfs2_jhead_process_page() 484 int gfs2_find_jhead(struct gfs2_jdesc *jd, struct gfs2_log_header_host *head, in gfs2_find_jhead() argument 501 memset(head, 0, sizeof(*head)); in gfs2_find_jhead() 551 gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done); in gfs2_find_jhead() 562 gfs2_jhead_process_page(jd, blocks_read >> shift, head, &done); in gfs2_find_jhead() 704 struct list_head *head; in buf_lo_after_commit() local [all …]
|
/fs/hfsplus/ |
D | btree.c | 136 struct hfs_btree_header_rec *head; in hfs_btree_open() local 166 head = (struct hfs_btree_header_rec *)(kmap(page) + in hfs_btree_open() 168 tree->root = be32_to_cpu(head->root); in hfs_btree_open() 169 tree->leaf_count = be32_to_cpu(head->leaf_count); in hfs_btree_open() 170 tree->leaf_head = be32_to_cpu(head->leaf_head); in hfs_btree_open() 171 tree->leaf_tail = be32_to_cpu(head->leaf_tail); in hfs_btree_open() 172 tree->node_count = be32_to_cpu(head->node_count); in hfs_btree_open() 173 tree->free_nodes = be32_to_cpu(head->free_nodes); in hfs_btree_open() 174 tree->attributes = be32_to_cpu(head->attributes); in hfs_btree_open() 175 tree->node_size = be16_to_cpu(head->node_size); in hfs_btree_open() [all …]
|
/fs/nilfs2/ |
D | segbuf.h | 85 #define NILFS_LIST_SEGBUF(head) \ argument 86 list_entry((head), struct nilfs_segment_buffer, sb_list) 89 #define NILFS_LAST_SEGBUF(head) NILFS_LIST_SEGBUF((head)->prev) argument 90 #define NILFS_FIRST_SEGBUF(head) NILFS_LIST_SEGBUF((head)->next) argument 91 #define NILFS_SEGBUF_IS_LAST(segbuf, head) ((segbuf)->sb_list.next == (head)) argument 97 #define NILFS_SEGBUF_FIRST_BH(head) \ argument 98 (list_entry((head)->next, struct buffer_head, b_assoc_buffers)) 102 #define NILFS_SEGBUF_BH_IS_LAST(bh, head) ((bh)->b_assoc_buffers.next == head) argument
|
D | page.c | 143 struct buffer_head *bh, *head; in nilfs_page_buffers_clean() local 145 bh = head = page_buffers(page); in nilfs_page_buffers_clean() 150 } while (bh != head); in nilfs_page_buffers_clean() 173 struct buffer_head *bh, *head; in nilfs_page_bug() local 176 bh = head = page_buffers(page); in nilfs_page_bug() 183 } while (bh != head); in nilfs_page_bug() 402 struct buffer_head *bh, *head; in nilfs_clear_dirty_page() local 408 bh = head = page_buffers(page); in nilfs_clear_dirty_page() 418 } while (bh = bh->b_this_page, bh != head); in nilfs_clear_dirty_page() 428 struct buffer_head *bh, *head; in nilfs_page_count_clean_buffers() local [all …]
|
D | recovery.c | 306 struct list_head *head) in nilfs_scan_dsync_log() argument 359 list_add_tail(&rb->list, head); in nilfs_scan_dsync_log() 375 static void dispose_recovery_list(struct list_head *head) in dispose_recovery_list() argument 377 while (!list_empty(head)) { in dispose_recovery_list() 380 rb = list_first_entry(head, struct nilfs_recovery_block, list); in dispose_recovery_list() 391 static int nilfs_segment_list_add(struct list_head *head, __u64 segnum) in nilfs_segment_list_add() argument 400 list_add_tail(&ent->list, head); in nilfs_segment_list_add() 404 void nilfs_dispose_segment_list(struct list_head *head) in nilfs_dispose_segment_list() argument 406 while (!list_empty(head)) { in nilfs_dispose_segment_list() 409 ent = list_first_entry(head, struct nilfs_segment_entry, list); in nilfs_dispose_segment_list() [all …]
|
/fs/9p/ |
D | vfs_dir.c | 40 int head; member 108 if (rdir->tail == rdir->head) { in v9fs_dir_readdir() 119 rdir->head = 0; in v9fs_dir_readdir() 122 while (rdir->head < rdir->tail) { in v9fs_dir_readdir() 123 err = p9stat_read(fid->clnt, rdir->buf + rdir->head, in v9fs_dir_readdir() 124 rdir->tail - rdir->head, &st); in v9fs_dir_readdir() 136 rdir->head += err; in v9fs_dir_readdir() 166 if (rdir->tail == rdir->head) { in v9fs_dir_readdir_dotl() 172 rdir->head = 0; in v9fs_dir_readdir_dotl() 176 while (rdir->head < rdir->tail) { in v9fs_dir_readdir_dotl() [all …]
|
/fs/hfs/ |
D | btree.c | 22 struct hfs_btree_header_rec *head; in hfs_btree_open() local 83 head = (struct hfs_btree_header_rec *)(kmap(page) + sizeof(struct hfs_bnode_desc)); in hfs_btree_open() 84 tree->root = be32_to_cpu(head->root); in hfs_btree_open() 85 tree->leaf_count = be32_to_cpu(head->leaf_count); in hfs_btree_open() 86 tree->leaf_head = be32_to_cpu(head->leaf_head); in hfs_btree_open() 87 tree->leaf_tail = be32_to_cpu(head->leaf_tail); in hfs_btree_open() 88 tree->node_count = be32_to_cpu(head->node_count); in hfs_btree_open() 89 tree->free_nodes = be32_to_cpu(head->free_nodes); in hfs_btree_open() 90 tree->attributes = be32_to_cpu(head->attributes); in hfs_btree_open() 91 tree->node_size = be16_to_cpu(head->node_size); in hfs_btree_open() [all …]
|
/fs/nfs/ |
D | write.c | 269 nfs_page_group_search_locked(struct nfs_page *head, unsigned int page_offset) in nfs_page_group_search_locked() argument 273 req = head; in nfs_page_group_search_locked() 280 } while (req != head); in nfs_page_group_search_locked() 386 nfs_unroll_locks(struct inode *inode, struct nfs_page *head, in nfs_unroll_locks() argument 392 for (tmp = head->wb_this_page ; tmp != req; tmp = tmp->wb_this_page) { in nfs_unroll_locks() 470 struct nfs_page *head, *subreq; in nfs_lock_and_join_requests() local 481 head = nfs_page_find_head_request(page); in nfs_lock_and_join_requests() 482 if (!head) in nfs_lock_and_join_requests() 486 if (!nfs_lock_request(head)) { in nfs_lock_and_join_requests() 487 ret = nfs_wait_on_request(head); in nfs_lock_and_join_requests() [all …]
|
D | pagelist.c | 147 struct nfs_page *head = req->wb_head; in nfs_page_group_lock() local 149 WARN_ON_ONCE(head != head->wb_head); in nfs_page_group_lock() 151 if (!test_and_set_bit(PG_HEADLOCK, &head->wb_flags)) in nfs_page_group_lock() 154 set_bit(PG_CONTENDED1, &head->wb_flags); in nfs_page_group_lock() 156 return wait_on_bit_lock(&head->wb_flags, PG_HEADLOCK, in nfs_page_group_lock() 167 struct nfs_page *head = req->wb_head; in nfs_page_group_unlock() local 169 WARN_ON_ONCE(head != head->wb_head); in nfs_page_group_unlock() 172 clear_bit(PG_HEADLOCK, &head->wb_flags); in nfs_page_group_unlock() 174 if (!test_bit(PG_CONTENDED1, &head->wb_flags)) in nfs_page_group_unlock() 176 wake_up_bit(&head->wb_flags, PG_HEADLOCK); in nfs_page_group_unlock() [all …]
|
/fs/befs/ |
D | btree.c | 82 befs_host_btree_nodehead head; /* head of node converted to cpu byteorder */ member 214 node->head.left = fs64_to_cpu(sb, node->od_node->left); in befs_bt_read_node() 215 node->head.right = fs64_to_cpu(sb, node->od_node->right); in befs_bt_read_node() 216 node->head.overflow = fs64_to_cpu(sb, node->od_node->overflow); in befs_bt_read_node() 217 node->head.all_key_count = in befs_bt_read_node() 219 node->head.all_key_length = in befs_bt_read_node() 283 node_off = this_node->head.overflow; in befs_btree_find() 345 last = node->head.all_key_count - 1; in befs_find_key() 459 while (key_sum + this_node->head.all_key_count <= key_no) { in befs_btree_read() 462 if (this_node->head.right == BEFS_BT_INVAL) { in befs_btree_read() [all …]
|
/fs/btrfs/ |
D | delayed-ref.c | 395 struct btrfs_delayed_ref_head *head) in btrfs_delayed_ref_lock() argument 398 if (mutex_trylock(&head->mutex)) in btrfs_delayed_ref_lock() 401 refcount_inc(&head->refs); in btrfs_delayed_ref_lock() 404 mutex_lock(&head->mutex); in btrfs_delayed_ref_lock() 406 if (RB_EMPTY_NODE(&head->href_node)) { in btrfs_delayed_ref_lock() 407 mutex_unlock(&head->mutex); in btrfs_delayed_ref_lock() 408 btrfs_put_delayed_ref_head(head); in btrfs_delayed_ref_lock() 411 btrfs_put_delayed_ref_head(head); in btrfs_delayed_ref_lock() 417 struct btrfs_delayed_ref_head *head, in drop_delayed_ref() argument 420 lockdep_assert_held(&head->lock); in drop_delayed_ref() [all …]
|
D | extent-tree.c | 154 struct btrfs_delayed_ref_head *head; in btrfs_lookup_extent_info() local 238 head = btrfs_find_delayed_ref_head(delayed_refs, bytenr); in btrfs_lookup_extent_info() 239 if (head) { in btrfs_lookup_extent_info() 240 if (!mutex_trylock(&head->mutex)) { in btrfs_lookup_extent_info() 241 refcount_inc(&head->refs); in btrfs_lookup_extent_info() 250 mutex_lock(&head->mutex); in btrfs_lookup_extent_info() 251 mutex_unlock(&head->mutex); in btrfs_lookup_extent_info() 252 btrfs_put_delayed_ref_head(head); in btrfs_lookup_extent_info() 255 spin_lock(&head->lock); in btrfs_lookup_extent_info() 256 if (head->extent_op && head->extent_op->update_flags) in btrfs_lookup_extent_info() [all …]
|
D | delayed-ref.h | 329 static inline void btrfs_put_delayed_ref_head(struct btrfs_delayed_ref_head *head) in btrfs_put_delayed_ref_head() argument 331 if (refcount_dec_and_test(&head->refs)) in btrfs_put_delayed_ref_head() 332 kmem_cache_free(btrfs_delayed_ref_head_cachep, head); in btrfs_put_delayed_ref_head() 348 struct btrfs_delayed_ref_head *head); 354 struct btrfs_delayed_ref_head *head); 355 static inline void btrfs_delayed_ref_unlock(struct btrfs_delayed_ref_head *head) in btrfs_delayed_ref_unlock() argument 357 mutex_unlock(&head->mutex); in btrfs_delayed_ref_unlock() 360 struct btrfs_delayed_ref_head *head);
|
/fs/erofs/ |
D | namei.c | 51 int head, back; in find_target_dirent() local 56 head = 1; in find_target_dirent() 60 while (head <= back) { in find_target_dirent() 61 const int mid = head + (back - head) / 2; in find_target_dirent() 79 head = mid + 1; in find_target_dirent() 95 int head, back; in find_target_block_classic() local 100 head = 0; in find_target_block_classic() 103 while (head <= back) { in find_target_block_classic() 104 const int mid = head + (back - head) / 2; in find_target_block_classic() 145 head = mid + 1; in find_target_block_classic()
|
/fs/ext4/ |
D | page-io.c | 70 struct buffer_head *bh, *head; in ext4_finish_bio() local 88 bh = head = page_buffers(page); in ext4_finish_bio() 94 bit_spin_lock(BH_Uptodate_Lock, &head->b_state); in ext4_finish_bio() 105 } while ((bh = bh->b_this_page) != head); in ext4_finish_bio() 106 bit_spin_unlock(BH_Uptodate_Lock, &head->b_state); in ext4_finish_bio() 165 static void dump_completed_IO(struct inode *inode, struct list_head *head) in dump_completed_IO() argument 171 if (list_empty(head)) in dump_completed_IO() 175 list_for_each_entry(io, head, list) { in dump_completed_IO() 208 struct list_head *head) in ext4_do_flush_completed_IO() argument 217 dump_completed_IO(inode, head); in ext4_do_flush_completed_IO() [all …]
|
D | fsmap.c | 624 int ext4_getfsmap(struct super_block *sb, struct ext4_fsmap_head *head, in ext4_getfsmap() argument 633 if (head->fmh_iflags & ~FMH_IF_VALID) in ext4_getfsmap() 635 if (!ext4_getfsmap_is_valid_device(sb, &head->fmh_keys[0]) || in ext4_getfsmap() 636 !ext4_getfsmap_is_valid_device(sb, &head->fmh_keys[1])) in ext4_getfsmap() 639 head->fmh_entries = 0; in ext4_getfsmap() 665 dkeys[0] = head->fmh_keys[0]; in ext4_getfsmap() 671 if (!ext4_getfsmap_check_keys(dkeys, &head->fmh_keys[1])) in ext4_getfsmap() 674 info.gfi_next_fsblk = head->fmh_keys[0].fmr_physical + in ext4_getfsmap() 675 head->fmh_keys[0].fmr_length; in ext4_getfsmap() 678 info.gfi_head = head; in ext4_getfsmap() [all …]
|
/fs/xfs/ |
D | xfs_fsmap.c | 156 struct xfs_fsmap_head *head; member 257 if (info->head->fmh_count == 0) { in xfs_getfsmap_helper() 259 info->head->fmh_entries++; in xfs_getfsmap_helper() 264 info->head->fmh_entries++; in xfs_getfsmap_helper() 278 if (info->head->fmh_entries >= info->head->fmh_count) in xfs_getfsmap_helper() 290 info->head->fmh_entries++; in xfs_getfsmap_helper() 297 if (info->head->fmh_entries >= info->head->fmh_count) in xfs_getfsmap_helper() 325 info->head->fmh_entries++; in xfs_getfsmap_helper() 817 struct xfs_fsmap_head *head, in xfs_getfsmap() argument 829 if (head->fmh_iflags & ~FMH_IF_VALID) in xfs_getfsmap() [all …]
|
D | xfs_log.c | 43 atomic64_t *head); 119 atomic64_t *head, in xlog_grant_sub_space() argument 122 int64_t head_val = atomic64_read(head); in xlog_grant_sub_space() 138 head_val = atomic64_cmpxchg(head, old, new); in xlog_grant_sub_space() 145 atomic64_t *head, in xlog_grant_add_space() argument 148 int64_t head_val = atomic64_read(head); in xlog_grant_add_space() 167 head_val = atomic64_cmpxchg(head, old, new); in xlog_grant_add_space() 173 struct xlog_grant_head *head) in xlog_grant_head_init() argument 175 xlog_assign_grant_head(&head->grant, 1, 0); in xlog_grant_head_init() 176 INIT_LIST_HEAD(&head->waiters); in xlog_grant_head_init() [all …]
|
/fs/nfs/flexfilelayout/ |
D | flexfilelayoutdev.c | 227 struct list_head *head = &flo->error_list; in ff_layout_add_ds_error_locked() local 237 head = &err->list; in ff_layout_add_ds_error_locked() 247 list_add_tail(&dserr->list, head); in ff_layout_add_ds_error_locked() 459 void ff_layout_free_ds_ioerr(struct list_head *head) in ff_layout_free_ds_ioerr() argument 463 while (!list_empty(head)) { in ff_layout_free_ds_ioerr() 464 err = list_first_entry(head, in ff_layout_free_ds_ioerr() 473 int ff_layout_encode_ds_ioerr(struct xdr_stream *xdr, const struct list_head *head) in ff_layout_encode_ds_ioerr() argument 478 list_for_each_entry(err, head, list) { in ff_layout_encode_ds_ioerr() 508 struct list_head *head, in do_layout_fetch_ds_ioerr() argument 525 list_move(&err->list, head); in do_layout_fetch_ds_ioerr() [all …]
|