Home
last modified time | relevance | path

Searched refs:rb_entry (Results 1 – 25 of 52) sorted by relevance

123

/drivers/block/drbd/
Ddrbd_interval.c12 struct drbd_interval *this = rb_entry(node, struct drbd_interval, rb); in interval_end()
34 rb_entry(*new, struct drbd_interval, rb); in drbd_insert_interval()
75 rb_entry(node, struct drbd_interval, rb); in drbd_contains_interval()
122 rb_entry(node, struct drbd_interval, rb); in drbd_find_overlap()
151 i = rb_entry(node, struct drbd_interval, rb); in drbd_next_overlap()
/drivers/gpu/drm/
Ddrm_vma_manager.c152 node = rb_entry(iter, struct drm_mm_node, rb); in drm_vma_offset_lookup_locked()
282 entry = rb_entry(*iter, struct drm_vma_offset_file, vm_rb); in drm_vma_node_allow()
335 entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb); in drm_vma_node_revoke()
376 entry = rb_entry(iter, struct drm_vma_offset_file, vm_rb); in drm_vma_node_is_allowed()
Ddrm_mm.c180 parent = rb_entry(rb, struct drm_mm_node, rb); in drm_mm_interval_tree_add_node()
199 parent = rb_entry(rb, struct drm_mm_node, rb); in drm_mm_interval_tree_add_node()
220 if (x < expr(rb_entry(rb, struct drm_mm_node, member))) \
234 return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size; in rb_to_hole_size()
296 return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size; in rb_hole_size()
306 rb_entry(rb, struct drm_mm_node, rb_hole_size); in best_hole()
Ddrm_prime.c118 pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb); in drm_prime_add_buf_handle()
133 pos = rb_entry(rb, struct drm_prime_member, handle_rb); in drm_prime_add_buf_handle()
154 member = rb_entry(rb, struct drm_prime_member, handle_rb); in drm_prime_lookup_buf_by_handle()
176 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb); in drm_prime_lookup_buf_handle()
199 member = rb_entry(rb, struct drm_prime_member, dmabuf_rb); in drm_prime_remove_buf_handle_locked()
/drivers/base/regmap/
Dregcache-rbtree.c80 rbnode = rb_entry(node, struct regcache_rbtree_node, node); in regcache_rbtree_lookup()
107 rbnode_tmp = rb_entry(*new, struct regcache_rbtree_node, node); in regcache_rbtree_insert()
150 n = rb_entry(node, struct regcache_rbtree_node, node); in rbtree_show()
227 rbtree_node = rb_entry(next, struct regcache_rbtree_node, node); in regcache_rbtree_exit()
405 rbnode_tmp = rb_entry(node, struct regcache_rbtree_node, in regcache_rbtree_write()
477 rbnode = rb_entry(node, struct regcache_rbtree_node, node); in regcache_rbtree_sync()
517 rbnode = rb_entry(node, struct regcache_rbtree_node, node); in regcache_rbtree_drop()
/drivers/iommu/
Diova.c139 cached_iova = rb_entry(iovad->cached32_node, struct iova, node); in __cached_rbnode_delete_update()
147 cached_iova = rb_entry(iovad->cached_node, struct iova, node); in __cached_rbnode_delete_update()
162 struct iova *this = rb_entry(*new, struct iova, node); in iova_insert_rbtree()
200 curr_iova = rb_entry(curr, struct iova, node); in __alloc_and_insert_iova_range()
206 curr_iova = rb_entry(curr, struct iova, node); in __alloc_and_insert_iova_range()
325 struct iova *iova = rb_entry(node, struct iova, node); in private_find_iova()
608 struct iova *iova = rb_entry(node, struct iova, node); in __is_range_overlap()
676 iova = rb_entry(node, struct iova, node); in reserve_iova()
713 struct iova *iova = rb_entry(node, struct iova, node); in copy_reserved_iova()
/drivers/android/
Dbinder_alloc.c87 buffer = rb_entry(parent, struct binder_buffer, rb_node); in binder_insert_free_buffer()
112 buffer = rb_entry(parent, struct binder_buffer, rb_node); in binder_insert_allocated_buffer_locked()
137 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_prepare_to_free_locked()
393 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_new_buf_locked()
417 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_new_buf_locked()
426 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_new_buf_locked()
444 buffer = rb_entry(best_fit, struct binder_buffer, rb_node); in binder_alloc_new_buf_locked()
751 buffer = rb_entry(n, struct binder_buffer, rb_node); in binder_alloc_deferred_release()
828 rb_entry(n, struct binder_buffer, rb_node)); in binder_alloc_print_allocated()
Dbinder.c979 thread = rb_entry(n, struct binder_thread, rb_node); in binder_wakeup_poll_threads_ilocked()
1221 node = rb_entry(n, struct binder_node, rb_node); in binder_get_node_ilocked()
1269 node = rb_entry(parent, struct binder_node, rb_node); in binder_init_node_ilocked()
1554 ref = rb_entry(n, struct binder_ref, rb_node_desc); in binder_get_ref_olocked()
1601 ref = rb_entry(parent, struct binder_ref, rb_node_node); in binder_get_ref_for_node_olocked()
1622 ref = rb_entry(n, struct binder_ref, rb_node_desc); in binder_get_ref_for_node_olocked()
1631 ref = rb_entry(parent, struct binder_ref, rb_node_desc); in binder_get_ref_for_node_olocked()
4843 thread = rb_entry(parent, struct binder_thread, rb_node); in binder_get_thread_ilocked()
5191 struct binder_node *node = rb_entry(n, struct binder_node, in binder_ioctl_get_node_debug_info()
5525 struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); in binder_deferred_flush()
[all …]
/drivers/mtd/ubi/
Dwl.c148 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb); in wl_tree_add()
244 e1 = rb_entry(p, struct ubi_wl_entry, u.rb); in in_wl_tree()
325 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); in find_wl_entry()
332 e1 = rb_entry(p, struct ubi_wl_entry, u.rb); in find_wl_entry()
366 first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); in find_mean_wl_entry()
367 last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb); in find_mean_wl_entry()
370 e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb); in find_mean_wl_entry()
731 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
752 e1 = rb_entry(rb_first(&ubi->scrub), struct ubi_wl_entry, u.rb);
1036 e1 = rb_entry(rb_first(&ubi->used), struct ubi_wl_entry, u.rb);
[all …]
Dattach.c113 av = rb_entry(parent, struct ubi_ainf_volume, rb); in find_or_add_av()
595 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); in ubi_add_to_av()
1289 aeb = rb_entry(this, struct ubi_ainf_peb, u.rb); in destroy_av()
1346 av = rb_entry(rb, struct ubi_ainf_volume, rb); in destroy_ai()
/drivers/infiniband/sw/rdmavt/
Dmcast.c161 mcast = rb_entry(n, struct rvt_mcast, rb_node); in rvt_mcast_find()
207 tmcast = rb_entry(pn, struct rvt_mcast, rb_node); in rvt_mcast_add()
367 mcast = rb_entry(n, struct rvt_mcast, rb_node); in rvt_detach_mcast()
/drivers/gpu/drm/i915/
Di915_active.c204 node = rb_entry(parent, struct active_node, node); in active_instance()
517 rb_entry(p, struct active_node, node); in reuse_idle_barrier()
537 rb_entry(p, struct active_node, node); in reuse_idle_barrier()
573 return rb_entry(p, struct active_node, node); in reuse_idle_barrier()
673 it = rb_entry(parent, struct active_node, node); in i915_active_acquire_barrier()
/drivers/infiniband/hw/mlx4/
Dcm.c151 rb_entry(node, struct id_map_entry, node); in id_map_find_by_sl_id()
226 ent = rb_entry(parent, struct id_map_entry, node); in sl_id_map_add()
433 rb_entry(rb_first(sl_id_map), in mlx4_ib_cm_paravirt_clean()
445 rb_entry(nd, struct id_map_entry, node); in mlx4_ib_cm_paravirt_clean()
Dmcg.c170 group = rb_entry(node, struct mcast_group, node); in mcast_find()
193 cur_group = rb_entry(parent, struct mcast_group, node); in mcast_insert()
1103 group = rb_entry(p, struct mcast_group, node); in _mlx4_ib_mcg_port_cleanup()
1232 group = rb_entry(p, struct mcast_group, node); in clean_vf_mcast()
/drivers/infiniband/sw/rxe/
Drxe_pool.c291 elem = rb_entry(parent, struct rxe_pool_entry, node); in insert_index()
319 elem = rb_entry(parent, struct rxe_pool_entry, node); in insert_key()
491 elem = rb_entry(node, struct rxe_pool_entry, node); in rxe_pool_get_index()
523 elem = rb_entry(node, struct rxe_pool_entry, node); in rxe_pool_get_key()
/drivers/net/ethernet/mellanox/mlx5/core/
Dpagealloc.c87 tfp = rb_entry(parent, struct fw_page, rb_node); in insert_page()
122 tfp = rb_entry(tmp, struct fw_page, rb_node); in find_fw_page()
360 fwp = rb_entry(p, struct fw_page, rb_node); in reclaim_pages_cmd()
531 fwp = rb_entry(p, struct fw_page, rb_node); in mlx5_reclaim_startup_pages()
/drivers/dma-buf/
Dsw_sync.c257 other = rb_entry(parent, typeof(*pt), node); in sync_pt_create()
278 parent ? &rb_entry(parent, typeof(*pt), node)->link : &obj->pt_list); in sync_pt_create()
/drivers/vfio/
Dvfio_iommu_type1.c140 struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node); in vfio_find_dma()
160 dma = rb_entry(parent, struct vfio_dma, node); in vfio_link_dma()
186 vpfn = rb_entry(node, struct vfio_pfn, node); in vfio_find_vpfn()
207 vpfn = rb_entry(parent, struct vfio_pfn, node); in vfio_link_pfn()
1203 dma = rb_entry(n, struct vfio_dma, node); in vfio_iommu_replay()
1859 vfio_remove_dma(iommu, rb_entry(node, struct vfio_dma, node)); in vfio_iommu_unmap_unpin_all()
1871 dma = rb_entry(n, struct vfio_dma, node); in vfio_iommu_unmap_unpin_reaccount()
1875 struct vfio_pfn *vpfn = rb_entry(p, struct vfio_pfn, in vfio_iommu_unmap_unpin_reaccount()
1893 dma = rb_entry(n, struct vfio_dma, node); in vfio_sanity_check_pfn_list()
/drivers/xen/
Devtchn.c116 this = rb_entry(*new, struct user_evtchn, node); in add_evtchn()
148 evtchn = rb_entry(node, struct user_evtchn, node); in find_evtchn()
678 evtchn = rb_entry(node, struct user_evtchn, node); in evtchn_release()
/drivers/mtd/
Dmtdswap.c79 #define MTDSWAP_ECNT_MIN(rbroot) (rb_entry(rb_first(rbroot), struct swap_eb, \
81 #define MTDSWAP_ECNT_MAX(rbroot) (rb_entry(rb_last(rbroot), struct swap_eb, \
211 cur = rb_entry(parent, struct swap_eb, rb); in __mtdswap_rb_add()
438 median = rb_entry(medrb, struct swap_eb, rb)->erase_count; in mtdswap_check_counts()
576 eb = rb_entry(rb_first(clean_root), struct swap_eb, rb); in mtdswap_map_free_block()
861 eb = rb_entry(rb_first(rp), struct swap_eb, rb); in mtdswap_pick_gc_eblk()
/drivers/gpu/drm/nouveau/nvkm/core/
Dobject.c37 object = rb_entry(node, typeof(*object), node); in nvkm_object_search()
71 struct nvkm_object *this = rb_entry(*ptr, typeof(*this), node); in nvkm_object_insert()
/drivers/md/
Ddm-dust.c40 struct badblock *bblk = rb_entry(node, struct badblock, node); in dust_rb_search()
61 bblk = rb_entry(parent, struct badblock, node); in dust_rb_insert()
/drivers/infiniband/core/
Dmulticast.c137 group = rb_entry(node, struct mcast_group, node); in mcast_find()
161 cur_group = rb_entry(parent, struct mcast_group, node); in mcast_insert()
779 group = rb_entry(node, struct mcast_group, node); in mcast_groups_event()
/drivers/infiniband/ulp/ipoib/
Dipoib_multicast.c168 mcast = rb_entry(n, struct ipoib_mcast, rb_node); in __ipoib_mcast_find()
193 tmcast = rb_entry(pn, struct ipoib_mcast, rb_node); in __ipoib_mcast_add()
1028 mcast = rb_entry(n, struct ipoib_mcast, rb_node); in ipoib_mcast_iter_next()
/drivers/gpu/drm/i915/gt/
Dintel_lrc.c266 return rb_entry(rb, struct i915_priolist, node); in to_priolist()
356 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); in need_preempt()
1052 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); in execlists_dequeue()
1168 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); in execlists_dequeue()
2614 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); in execlists_cancel_requests()
3602 other = rb_entry(rb, typeof(*other), rb); in virtual_submission_tasklet()
3951 struct i915_priolist *p = rb_entry(rb, typeof(*p), node); in intel_execlists_show_requests()
3974 rb_entry(rb, typeof(*ve), nodes[engine->id].rb); in intel_execlists_show_requests()

123