Home
last modified time | relevance | path

Searched refs:rb_entry (Results 1 – 25 of 263) sorted by relevance

1234567891011

/kernel/linux/linux-5.10/tools/perf/tests/
Dhists_output.c109 he = rb_entry(node, struct hist_entry, rb_node); in del_hist_entries()
167 he = rb_entry(node, struct hist_entry, rb_node); in test1()
173 he = rb_entry(node, struct hist_entry, rb_node); in test1()
179 he = rb_entry(node, struct hist_entry, rb_node); in test1()
185 he = rb_entry(node, struct hist_entry, rb_node); in test1()
191 he = rb_entry(node, struct hist_entry, rb_node); in test1()
197 he = rb_entry(node, struct hist_entry, rb_node); in test1()
203 he = rb_entry(node, struct hist_entry, rb_node); in test1()
209 he = rb_entry(node, struct hist_entry, rb_node); in test1()
215 he = rb_entry(node, struct hist_entry, rb_node); in test1()
[all …]
/kernel/linux/linux-5.10/fs/f2fs/
Dextent_cache.c18 static struct rb_entry *__lookup_rb_tree_fast(struct rb_entry *cached_re, in __lookup_rb_tree_fast()
30 static struct rb_entry *__lookup_rb_tree_slow(struct rb_root_cached *root, in __lookup_rb_tree_slow()
34 struct rb_entry *re; in __lookup_rb_tree_slow()
37 re = rb_entry(node, struct rb_entry, rb_node); in __lookup_rb_tree_slow()
49 struct rb_entry *f2fs_lookup_rb_tree(struct rb_root_cached *root, in f2fs_lookup_rb_tree()
50 struct rb_entry *cached_re, unsigned int ofs) in f2fs_lookup_rb_tree()
52 struct rb_entry *re; in f2fs_lookup_rb_tree()
67 struct rb_entry *re; in f2fs_lookup_rb_tree_ext()
71 re = rb_entry(*parent, struct rb_entry, rb_node); in f2fs_lookup_rb_tree_ext()
90 struct rb_entry *re; in f2fs_lookup_rb_tree_for_insert()
[all …]
/kernel/linux/linux-5.10/include/linux/
Dinterval_tree_generic.h48 parent = rb_entry(rb_parent, ITSTRUCT, ITRB); \
89 ITSTRUCT *left = rb_entry(node->ITRB.rb_left, \
108 node = rb_entry(node->ITRB.rb_right, \
140 node = rb_entry(root->rb_root.rb_node, ITSTRUCT, ITRB); \
144 leftmost = rb_entry(root->rb_leftmost, ITSTRUCT, ITRB); \
165 ITSTRUCT *right = rb_entry(rb, ITSTRUCT, ITRB); \
177 node = rb_entry(rb, ITSTRUCT, ITRB); \
Drbtree_augmented.h80 RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD); \
89 RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \
90 RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \
96 RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \
97 RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \
127 child = rb_entry(node->RBFIELD.rb_left, RBSTRUCT, RBFIELD); \
132 child = rb_entry(node->RBFIELD.rb_right, RBSTRUCT, RBFIELD); \
/kernel/linux/linux-5.10/fs/jffs2/
Dnodelist.h334 return rb_entry(node, struct jffs2_node_frag, rb); in frag_first()
344 return rb_entry(node, struct jffs2_node_frag, rb); in frag_last()
347 #define frag_next(frag) rb_entry(rb_next(&(frag)->rb), struct jffs2_node_frag, rb)
348 #define frag_prev(frag) rb_entry(rb_prev(&(frag)->rb), struct jffs2_node_frag, rb)
349 #define frag_parent(frag) rb_entry(rb_parent(&(frag)->rb), struct jffs2_node_frag, rb)
350 #define frag_left(frag) rb_entry((frag)->rb.rb_left, struct jffs2_node_frag, rb)
351 #define frag_right(frag) rb_entry((frag)->rb.rb_right, struct jffs2_node_frag, rb)
354 #define tn_next(tn) rb_entry(rb_next(&(tn)->rb), struct jffs2_tmp_dnode_info, rb)
355 #define tn_prev(tn) rb_entry(rb_prev(&(tn)->rb), struct jffs2_tmp_dnode_info, rb)
356 #define tn_parent(tn) rb_entry(rb_parent(&(tn)->rb), struct jffs2_tmp_dnode_info, rb)
[all …]
/kernel/linux/linux-5.10/drivers/block/drbd/
Ddrbd_interval.c12 struct drbd_interval *this = rb_entry(node, struct drbd_interval, rb); in interval_end()
34 rb_entry(*new, struct drbd_interval, rb); in drbd_insert_interval()
75 rb_entry(node, struct drbd_interval, rb); in drbd_contains_interval()
122 rb_entry(node, struct drbd_interval, rb); in drbd_find_overlap()
151 i = rb_entry(node, struct drbd_interval, rb); in drbd_next_overlap()
/kernel/linux/linux-5.10/tools/include/linux/
Drbtree_augmented.h82 RBSTRUCT *node = rb_entry(rb, RBSTRUCT, RBFIELD); \
91 RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \
92 RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \
98 RBSTRUCT *old = rb_entry(rb_old, RBSTRUCT, RBFIELD); \
99 RBSTRUCT *new = rb_entry(rb_new, RBSTRUCT, RBFIELD); \
129 child = rb_entry(node->RBFIELD.rb_left, RBSTRUCT, RBFIELD); \
134 child = rb_entry(node->RBFIELD.rb_right, RBSTRUCT, RBFIELD); \
/kernel/linux/linux-5.10/net/ceph/
Ddebugfs.c71 rb_entry(n, struct ceph_pg_pool_info, node); in osdmap_show()
94 rb_entry(n, struct ceph_pg_mapping, node); in osdmap_show()
105 rb_entry(n, struct ceph_pg_mapping, node); in osdmap_show()
112 rb_entry(n, struct ceph_pg_mapping, node); in osdmap_show()
123 rb_entry(n, struct ceph_pg_mapping, node); in osdmap_show()
162 req = rb_entry(rp, struct ceph_mon_generic_request, node); in monc_show()
241 rb_entry(n, struct ceph_osd_request, r_node); in dump_requests()
267 rb_entry(n, struct ceph_osd_linger_request, node); in dump_linger_requests()
328 rb_entry(n, struct ceph_osd_backoff, id_node); in dump_backoffs()
353 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node); in osdc_show()
[all …]
/kernel/linux/linux-5.10/security/keys/
Dproc.c70 struct key *key = rb_entry(n, struct key, serial_node); in key_serial_next()
85 struct key *key = rb_entry(n, struct key, serial_node); in find_ge_key()
108 minkey = rb_entry(n, struct key, serial_node); in find_ge_key()
131 struct key *key = rb_entry(n, struct key, serial_node); in key_node_serial()
156 struct key *key = rb_entry(_p, struct key, serial_node); in proc_keys_show()
255 struct key_user *user = rb_entry(n, struct key_user, node); in __key_user_next()
306 struct key_user *user = rb_entry(_p, struct key_user, node); in proc_key_users_show()
/kernel/linux/linux-5.10/arch/powerpc/kernel/
Deeh_cache.c60 piar = rb_entry(n, struct pci_io_addr_range, rb_node); in __eeh_addr_cache_get_device()
106 piar = rb_entry(n, struct pci_io_addr_range, rb_node); in eeh_addr_cache_print()
128 piar = rb_entry(parent, struct pci_io_addr_range, rb_node); in eeh_addr_cache_insert()
221 piar = rb_entry(n, struct pci_io_addr_range, rb_node); in __eeh_addr_cache_rmv_dev()
271 piar = rb_entry(n, struct pci_io_addr_range, rb_node); in eeh_addr_cache_show()
/kernel/linux/linux-5.10/fs/btrfs/
Dextent_map.c105 entry = rb_entry(parent, struct extent_map, rb_node); in tree_insert()
120 entry = rb_entry(parent, struct extent_map, rb_node); in tree_insert()
127 entry = rb_entry(parent, struct extent_map, rb_node); in tree_insert()
130 entry = rb_entry(parent, struct extent_map, rb_node); in tree_insert()
156 entry = rb_entry(n, struct extent_map, rb_node); in __tree_search()
172 prev_entry = rb_entry(prev, struct extent_map, rb_node); in __tree_search()
179 prev_entry = rb_entry(prev, struct extent_map, rb_node); in __tree_search()
182 prev_entry = rb_entry(prev, struct extent_map, rb_node); in __tree_search()
254 merge = rb_entry(rb, struct extent_map, rb_node); in try_merge_map()
273 merge = rb_entry(rb, struct extent_map, rb_node); in try_merge_map()
[all …]
Dordered-data.c41 entry = rb_entry(parent, struct btrfs_ordered_extent, rb_node); in tree_insert()
70 entry = rb_entry(n, struct btrfs_ordered_extent, rb_node); in __tree_search()
88 prev_entry = rb_entry(test, struct btrfs_ordered_extent, in __tree_search()
96 prev_entry = rb_entry(prev, struct btrfs_ordered_extent, in __tree_search()
102 prev_entry = rb_entry(test, struct btrfs_ordered_extent, in __tree_search()
143 entry = rb_entry(tree->last, struct btrfs_ordered_extent, in tree_search()
332 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); in btrfs_dec_test_first_ordered_pending()
403 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); in btrfs_dec_test_ordered_pending()
755 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); in btrfs_lookup_ordered_extent()
785 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); in btrfs_lookup_ordered_range()
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/
Ddrm_vma_manager.c152 node = rb_entry(iter, struct drm_mm_node, rb); in drm_vma_offset_lookup_locked()
282 entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb); in drm_vma_node_allow()
335 entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb); in drm_vma_node_revoke()
376 entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb); in drm_vma_node_is_allowed()
/kernel/linux/linux-5.10/lib/
Drbtree_test.c39 if (key < rb_entry(parent, struct test_node, rb)->key) in insert()
57 if (key < rb_entry(parent, struct test_node, rb)->key) in insert_cached()
95 parent = rb_entry(rb_parent, struct test_node, rb); in RB_DECLARE_CALLBACKS_MAX()
120 parent = rb_entry(rb_parent, struct test_node, rb); in insert_augmented_cached()
198 struct test_node *node = rb_entry(rb, struct test_node, rb); in check()
224 struct test_node *node = rb_entry(rb, struct test_node, rb); in check_augmented()
227 subtree = rb_entry(node->rb.rb_left, struct test_node, in check_augmented()
233 subtree = rb_entry(node->rb.rb_right, struct test_node, in check_augmented()
/kernel/linux/linux-5.10/fs/ext4/
Dextents_status.c188 es = rb_entry(node, struct extent_status, rb_node); in ext4_es_print_tree()
217 es = rb_entry(node, struct extent_status, rb_node); in __es_tree_search()
231 return node ? rb_entry(node, struct extent_status, rb_node) : in __es_tree_search()
287 es1 = rb_entry(node, struct extent_status, rb_node); in __es_find_extent_range()
548 es1 = rb_entry(node, struct extent_status, rb_node); in ext4_es_try_to_merge_left()
572 es1 = rb_entry(node, struct extent_status, rb_node); in ext4_es_try_to_merge_right()
766 es = rb_entry(parent, struct extent_status, rb_node); in __es_insert_extent()
949 es1 = rb_entry(node, struct extent_status, rb_node); in ext4_es_lookup_extent()
973 es1 = rb_entry(node, struct extent_status, in ext4_es_lookup_extent()
1030 rc->left_es = node ? rb_entry(node, in init_rsvd()
[all …]
Dblock_validity.c79 entry = rb_entry(parent, struct ext4_system_zone, node); in add_system_zone()
103 entry = rb_entry(node, struct ext4_system_zone, node); in add_system_zone()
115 entry = rb_entry(node, struct ext4_system_zone, node); in add_system_zone()
137 entry = rb_entry(node, struct ext4_system_zone, node); in debug_print_tree()
326 entry = rb_entry(n, struct ext4_system_zone, node); in ext4_inode_block_valid()
/kernel/linux/linux-5.10/tools/perf/util/
Dhist.c248 n = rb_entry(next, struct hist_entry, rb_node); in hists__output_recalc_col_len()
338 child = rb_entry(node, struct hist_entry, rb_node); in hists__decay_entry()
381 n = rb_entry(next, struct hist_entry, rb_node); in hists__decay_entries()
397 n = rb_entry(next, struct hist_entry, rb_node); in hists__delete_entries()
411 n = rb_entry(next, struct hist_entry, rb_node); in hists__get_entry()
596 he = rb_entry(parent, struct hist_entry, rb_node_in); in hists__findnew_entry()
1456 iter = rb_entry(parent, struct hist_entry, rb_node_in); in hierarchy_insert_entry()
1575 iter = rb_entry(parent, struct hist_entry, rb_node_in); in hists__collapse_insert_entry()
1653 n = rb_entry(next, struct hist_entry, rb_node_in); in hists__collapse_resort()
1738 he = rb_entry(node, struct hist_entry, rb_node); in hierarchy_recalc_total_periods()
[all …]
Drb_resort.h66 a = rb_entry(nda, struct __name##_sorted_entry, rb_node); \
67 b = rb_entry(ndb, struct __name##_sorted_entry, rb_node); \
129 __name##_entry = rb_entry(__nd, struct __name##_sorted_entry, \
Denv.c31 node = rb_entry(parent, struct bpf_prog_info_node, rb_node); in perf_env__insert_bpf_prog_info()
59 node = rb_entry(n, struct bpf_prog_info_node, rb_node); in perf_env__find_bpf_prog_info()
87 node = rb_entry(parent, struct btf_node, rb_node); in perf_env__insert_btf()
116 node = rb_entry(n, struct btf_node, rb_node); in perf_env__find_btf()
145 node = rb_entry(next, struct bpf_prog_info_node, rb_node); in perf_env__purge_bpf()
160 node = rb_entry(next, struct btf_node, rb_node); in perf_env__purge_bpf()
Dblock-range.c23 struct block_range *entry = rb_entry(rb, struct block_range, node); in block_range__debug()
41 entry = rb_entry(parent, struct block_range, node); in block_range__find()
90 entry = rb_entry(parent, struct block_range, node); in block_range__create()
117 next = rb_entry(n, struct block_range, node); in block_range__create()
Dintlist.h49 return rn ? rb_entry(rn, struct int_node, rb_node) : NULL; in intlist__first()
57 return rn ? rb_entry(rn, struct int_node, rb_node) : NULL; in intlist__next()
Dstrlist.h61 return rn ? rb_entry(rn, struct str_node, rb_node) : NULL; in strlist__first()
69 return rn ? rb_entry(rn, struct str_node, rb_node) : NULL; in strlist__next()
/kernel/linux/linux-5.10/fs/ocfs2/
Dreservations.c87 resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node); in ocfs2_dump_resv()
142 resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node); in ocfs2_check_resmap()
277 resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node); in ocfs2_resmap_clear_all_resv()
318 tmp = rb_entry(parent, struct ocfs2_alloc_reservation, r_node); in ocfs2_resv_insert()
369 resv = rb_entry(node, struct ocfs2_alloc_reservation, r_node); in ocfs2_find_resv_lhs()
527 next_resv = rb_entry(next, struct ocfs2_alloc_reservation, in __ocfs2_resv_find_window()
564 next_resv = rb_entry(next, in __ocfs2_resv_find_window()
607 prev_resv = rb_entry(prev, struct ocfs2_alloc_reservation, in __ocfs2_resv_find_window()
/kernel/linux/linux-5.10/drivers/base/regmap/
Dregcache-rbtree.c80 rbnode = rb_entry(node, struct regcache_rbtree_node, node); in regcache_rbtree_lookup()
107 rbnode_tmp = rb_entry(*new, struct regcache_rbtree_node, node); in regcache_rbtree_insert()
150 n = rb_entry(node, struct regcache_rbtree_node, node); in rbtree_show()
227 rbtree_node = rb_entry(next, struct regcache_rbtree_node, node); in regcache_rbtree_exit()
404 rbnode_tmp = rb_entry(node, struct regcache_rbtree_node, in regcache_rbtree_write()
476 rbnode = rb_entry(node, struct regcache_rbtree_node, node); in regcache_rbtree_sync()
516 rbnode = rb_entry(node, struct regcache_rbtree_node, node); in regcache_rbtree_drop()
/kernel/linux/linux-5.10/Documentation/core-api/
Drbtree.rst71 individual members may be accessed directly via rb_entry(node, type, member).
188 rb_entry(node, type, member).
194 printk("key=%s\n", rb_entry(node, struct mytype, node)->keystring);
309 node = rb_entry(root->rb_node, struct interval_tree_node, rb);
314 rb_entry(node->rb.rb_left,
333 node = rb_entry(node->rb.rb_right,
350 subtree_last = rb_entry(node->rb.rb_left,
356 subtree_last = rb_entry(node->rb.rb_right,
368 rb_entry(rb, struct interval_tree_node, rb);
380 rb_entry(rb_old, struct interval_tree_node, rb);
[all …]

1234567891011