/kernel/linux/linux-5.10/kernel/events/ |
D | ring_buffer.c | 22 atomic_set(&handle->rb->poll, EPOLLIN); in perf_output_wakeup() 38 struct perf_buffer *rb = handle->rb; in perf_output_get_handle() local 46 (*(volatile unsigned int *)&rb->nest)++; in perf_output_get_handle() 47 handle->wakeup = local_read(&rb->wakeup); in perf_output_get_handle() 52 struct perf_buffer *rb = handle->rb; in perf_output_put_handle() local 60 nest = READ_ONCE(rb->nest); in perf_output_put_handle() 62 WRITE_ONCE(rb->nest, nest - 1); in perf_output_put_handle() 76 head = local_read(&rb->head); in perf_output_put_handle() 110 WRITE_ONCE(rb->user_page->data_head, head); in perf_output_put_handle() 118 WRITE_ONCE(rb->nest, 0); in perf_output_put_handle() [all …]
|
D | internal.h | 61 extern void rb_free(struct perf_buffer *rb); 65 struct perf_buffer *rb; in rb_free_rcu() local 67 rb = container_of(rcu_head, struct perf_buffer, rcu_head); in rb_free_rcu() 68 rb_free(rb); in rb_free_rcu() 71 static inline void rb_toggle_paused(struct perf_buffer *rb, bool pause) in rb_toggle_paused() argument 73 if (!pause && rb->nr_pages) in rb_toggle_paused() 74 rb->paused = 0; in rb_toggle_paused() 76 rb->paused = 1; in rb_toggle_paused() 82 extern int rb_alloc_aux(struct perf_buffer *rb, struct perf_event *event, 84 extern void rb_free_aux(struct perf_buffer *rb); [all …]
|
/kernel/linux/linux-5.10/drivers/lightnvm/ |
D | pblk-rb.c | 26 static void pblk_rb_data_free(struct pblk_rb *rb) in pblk_rb_data_free() argument 31 list_for_each_entry_safe(p, t, &rb->pages, list) { in pblk_rb_data_free() 39 void pblk_rb_free(struct pblk_rb *rb) in pblk_rb_free() argument 41 pblk_rb_data_free(rb); in pblk_rb_free() 42 vfree(rb->entries); in pblk_rb_free() 72 int pblk_rb_init(struct pblk_rb *rb, unsigned int size, unsigned int threshold, in pblk_rb_init() argument 75 struct pblk *pblk = container_of(rb, struct pblk, rwb); in pblk_rb_init() 92 rb->entries = entries; in pblk_rb_init() 93 rb->seg_size = (1 << power_seg_sz); in pblk_rb_init() 94 rb->nr_entries = (1 << power_size); in pblk_rb_init() [all …]
|
/kernel/linux/linux-5.10/drivers/scsi/bfa/ |
D | bfa_ioc_ct.c | 185 void __iomem *rb; in bfa_ioc_ct_reg_init() local 188 rb = bfa_ioc_bar0(ioc); in bfa_ioc_ct_reg_init() 190 ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox; in bfa_ioc_ct_reg_init() 191 ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox; in bfa_ioc_ct_reg_init() 192 ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn; in bfa_ioc_ct_reg_init() 195 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; in bfa_ioc_ct_reg_init() 196 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; in bfa_ioc_ct_reg_init() 197 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; in bfa_ioc_ct_reg_init() 198 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn; in bfa_ioc_ct_reg_init() 199 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu; in bfa_ioc_ct_reg_init() [all …]
|
D | bfa_ioc_cb.c | 138 void __iomem *rb; in bfa_ioc_cb_reg_init() local 141 rb = bfa_ioc_bar0(ioc); in bfa_ioc_cb_reg_init() 143 ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox; in bfa_ioc_cb_reg_init() 144 ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox; in bfa_ioc_cb_reg_init() 145 ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn; in bfa_ioc_cb_reg_init() 148 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; in bfa_ioc_cb_reg_init() 149 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; in bfa_ioc_cb_reg_init() 150 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; in bfa_ioc_cb_reg_init() 152 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); in bfa_ioc_cb_reg_init() 153 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); in bfa_ioc_cb_reg_init() [all …]
|
/kernel/linux/linux-5.10/drivers/net/ethernet/brocade/bna/ |
D | bfa_ioc_ct.c | 49 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb, 51 static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb, 251 void __iomem *rb; in bfa_ioc_ct_reg_init() local 254 rb = bfa_ioc_bar0(ioc); in bfa_ioc_ct_reg_init() 256 ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox; in bfa_ioc_ct_reg_init() 257 ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox; in bfa_ioc_ct_reg_init() 258 ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn; in bfa_ioc_ct_reg_init() 261 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; in bfa_ioc_ct_reg_init() 262 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; in bfa_ioc_ct_reg_init() 263 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; in bfa_ioc_ct_reg_init() [all …]
|
/kernel/linux/linux-5.10/tools/lib/bpf/ |
D | ringbuf.c | 42 static void ringbuf_unmap_ring(struct ring_buffer *rb, struct ring *r) in ringbuf_unmap_ring() argument 45 munmap(r->consumer_pos, rb->page_size); in ringbuf_unmap_ring() 49 munmap(r->producer_pos, rb->page_size + 2 * (r->mask + 1)); in ringbuf_unmap_ring() 55 int ring_buffer__add(struct ring_buffer *rb, int map_fd, in ring_buffer__add() argument 82 tmp = libbpf_reallocarray(rb->rings, rb->ring_cnt + 1, sizeof(*rb->rings)); in ring_buffer__add() 85 rb->rings = tmp; in ring_buffer__add() 87 tmp = libbpf_reallocarray(rb->events, rb->ring_cnt + 1, sizeof(*rb->events)); in ring_buffer__add() 90 rb->events = tmp; in ring_buffer__add() 92 r = &rb->rings[rb->ring_cnt]; in ring_buffer__add() 101 tmp = mmap(NULL, rb->page_size, PROT_READ | PROT_WRITE, MAP_SHARED, map_fd, 0); in ring_buffer__add() [all …]
|
/kernel/linux/linux-5.10/kernel/bpf/ |
D | ringbuf.c | 53 struct bpf_ringbuf *rb; member 70 struct bpf_ringbuf *rb; in bpf_ringbuf_area_alloc() local 110 rb = vmap(pages, nr_meta_pages + 2 * nr_data_pages, in bpf_ringbuf_area_alloc() 112 if (rb) { in bpf_ringbuf_area_alloc() 114 rb->pages = pages; in bpf_ringbuf_area_alloc() 115 rb->nr_pages = nr_pages; in bpf_ringbuf_area_alloc() 116 return rb; in bpf_ringbuf_area_alloc() 128 struct bpf_ringbuf *rb = container_of(work, struct bpf_ringbuf, work); in bpf_ringbuf_notify() local 130 wake_up_all(&rb->waitq); in bpf_ringbuf_notify() 135 struct bpf_ringbuf *rb; in bpf_ringbuf_alloc() local [all …]
|
/kernel/linux/linux-5.10/drivers/hid/intel-ish-hid/ishtp/ |
D | client-buffers.c | 23 struct ishtp_cl_rb *rb; in ishtp_cl_alloc_rx_ring() local 28 rb = ishtp_io_rb_init(cl); in ishtp_cl_alloc_rx_ring() 29 if (!rb) { in ishtp_cl_alloc_rx_ring() 33 ret = ishtp_io_rb_alloc_buf(rb, len); in ishtp_cl_alloc_rx_ring() 37 list_add_tail(&rb->list, &cl->free_rb_list.list); in ishtp_cl_alloc_rx_ring() 99 struct ishtp_cl_rb *rb; in ishtp_cl_free_rx_ring() local 105 rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, in ishtp_cl_free_rx_ring() 107 list_del(&rb->list); in ishtp_cl_free_rx_ring() 108 kfree(rb->buffer.data); in ishtp_cl_free_rx_ring() 109 kfree(rb); in ishtp_cl_free_rx_ring() [all …]
|
D | client.c | 43 struct ishtp_cl_rb *rb; in ishtp_read_list_flush() local 48 list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list) in ishtp_read_list_flush() 49 if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) { in ishtp_read_list_flush() 50 list_del(&rb->list); in ishtp_read_list_flush() 51 ishtp_io_rb_free(rb); in ishtp_read_list_flush() 447 struct ishtp_cl_rb *rb; in ishtp_cl_read_start() local 477 rb = NULL; in ishtp_cl_read_start() 481 rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, list); in ishtp_cl_read_start() 482 list_del_init(&rb->list); in ishtp_cl_read_start() 485 rb->cl = cl; in ishtp_cl_read_start() [all …]
|
/kernel/linux/linux-5.10/lib/ |
D | rbtree_test.c | 20 struct rb_node rb; member 39 if (key < rb_entry(parent, struct test_node, rb)->key) in insert() 45 rb_link_node(&node->rb, parent, new); in insert() 46 rb_insert_color(&node->rb, &root->rb_root); in insert() 57 if (key < rb_entry(parent, struct test_node, rb)->key) in insert_cached() 65 rb_link_node(&node->rb, parent, new); in insert_cached() 66 rb_insert_color_cached(&node->rb, root, leftmost); in insert_cached() 71 rb_erase(&node->rb, &root->rb_root); in erase() 76 rb_erase_cached(&node->rb, root); in erase_cached() 83 struct test_node, rb, u32, augmented, NODE_VAL) in RB_DECLARE_CALLBACKS_MAX() argument [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/amd/display/dmub/inc/ |
D | dmub_cmd.h | 812 static inline bool dmub_rb_empty(struct dmub_rb *rb) in dmub_rb_empty() argument 814 return (rb->wrpt == rb->rptr); in dmub_rb_empty() 817 static inline bool dmub_rb_full(struct dmub_rb *rb) in dmub_rb_full() argument 821 if (rb->wrpt >= rb->rptr) in dmub_rb_full() 822 data_count = rb->wrpt - rb->rptr; in dmub_rb_full() 824 data_count = rb->capacity - (rb->rptr - rb->wrpt); in dmub_rb_full() 826 return (data_count == (rb->capacity - DMUB_RB_CMD_SIZE)); in dmub_rb_full() 829 static inline bool dmub_rb_push_front(struct dmub_rb *rb, in dmub_rb_push_front() argument 832 uint64_t volatile *dst = (uint64_t volatile *)(rb->base_address) + rb->wrpt / sizeof(uint64_t); in dmub_rb_push_front() 836 if (dmub_rb_full(rb)) in dmub_rb_push_front() [all …]
|
/kernel/linux/linux-5.10/kernel/printk/ |
D | printk_ringbuffer.c | 561 static bool data_make_reusable(struct printk_ringbuffer *rb, in data_make_reusable() argument 567 struct prb_desc_ring *desc_ring = &rb->desc_ring; in data_make_reusable() 628 static bool data_push_tail(struct printk_ringbuffer *rb, in data_push_tail() argument 672 if (!data_make_reusable(rb, data_ring, tail_lpos, lpos, in data_push_tail() 765 static bool desc_push_tail(struct printk_ringbuffer *rb, in desc_push_tail() argument 768 struct prb_desc_ring *desc_ring = &rb->desc_ring; in desc_push_tail() 810 if (!data_push_tail(rb, &rb->text_data_ring, desc.text_blk_lpos.next)) in desc_push_tail() 873 static bool desc_reserve(struct printk_ringbuffer *rb, unsigned long *id_out) in desc_reserve() argument 875 struct prb_desc_ring *desc_ring = &rb->desc_ring; in desc_reserve() 918 if (!desc_push_tail(rb, id_prev_wrap)) in desc_reserve() [all …]
|
D | printk_ringbuffer.h | 105 struct printk_ringbuffer *rb; member 303 bool prb_reserve(struct prb_reserved_entry *e, struct printk_ringbuffer *rb, 305 bool prb_reserve_in_last(struct prb_reserved_entry *e, struct printk_ringbuffer *rb, 310 void prb_init(struct printk_ringbuffer *rb, 353 #define prb_for_each_record(from, rb, s, r) \ argument 354 for ((s) = from; prb_read_valid(rb, s, r); (s) = (r)->info->seq + 1) 371 #define prb_for_each_info(from, rb, s, i, lc) \ argument 372 for ((s) = from; prb_read_valid_info(rb, s, i, lc); (s) = (i)->seq + 1) 374 bool prb_read_valid(struct printk_ringbuffer *rb, u64 seq, 376 bool prb_read_valid_info(struct printk_ringbuffer *rb, u64 seq, [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/ |
D | drm_mm.c | 155 INTERVAL_TREE_DEFINE(struct drm_mm_node, rb, in INTERVAL_TREE_DEFINE() argument 171 struct rb_node **link, *rb; in drm_mm_interval_tree_add_node() local 178 rb = &hole_node->rb; in drm_mm_interval_tree_add_node() 179 while (rb) { in drm_mm_interval_tree_add_node() 180 parent = rb_entry(rb, struct drm_mm_node, rb); in drm_mm_interval_tree_add_node() 185 rb = rb_parent(rb); in drm_mm_interval_tree_add_node() 188 rb = &hole_node->rb; in drm_mm_interval_tree_add_node() 189 link = &hole_node->rb.rb_right; in drm_mm_interval_tree_add_node() 192 rb = NULL; in drm_mm_interval_tree_add_node() 198 rb = *link; in drm_mm_interval_tree_add_node() [all …]
|
D | drm_prime.c | 102 struct rb_node **p, *rb; in drm_prime_add_buf_handle() local 112 rb = NULL; in drm_prime_add_buf_handle() 117 rb = *p; in drm_prime_add_buf_handle() 118 pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb); in drm_prime_add_buf_handle() 120 p = &rb->rb_right; in drm_prime_add_buf_handle() 122 p = &rb->rb_left; in drm_prime_add_buf_handle() 124 rb_link_node(&member->dmabuf_rb, rb, p); in drm_prime_add_buf_handle() 127 rb = NULL; in drm_prime_add_buf_handle() 132 rb = *p; in drm_prime_add_buf_handle() 133 pos = rb_entry(rb, struct drm_prime_member, handle_rb); in drm_prime_add_buf_handle() [all …]
|
/kernel/linux/linux-5.10/mm/ |
D | interval_tree.c | 23 INTERVAL_TREE_DEFINE(struct vm_area_struct, shared.rb, 38 if (!prev->shared.rb.rb_right) { in vma_interval_tree_insert_after() 40 link = &prev->shared.rb.rb_right; in vma_interval_tree_insert_after() 42 parent = rb_entry(prev->shared.rb.rb_right, in vma_interval_tree_insert_after() 43 struct vm_area_struct, shared.rb); in vma_interval_tree_insert_after() 46 while (parent->shared.rb.rb_left) { in vma_interval_tree_insert_after() 47 parent = rb_entry(parent->shared.rb.rb_left, in vma_interval_tree_insert_after() 48 struct vm_area_struct, shared.rb); in vma_interval_tree_insert_after() 52 link = &parent->shared.rb.rb_left; in vma_interval_tree_insert_after() 56 rb_link_node(&node->shared.rb, &parent->shared.rb, link); in vma_interval_tree_insert_after() [all …]
|
/kernel/linux/linux-5.10/drivers/target/iscsi/ |
D | iscsi_target_configfs.c | 44 ssize_t rb; in lio_target_np_driver_show() local 48 rb = sprintf(page, "1\n"); in lio_target_np_driver_show() 50 rb = sprintf(page, "0\n"); in lio_target_np_driver_show() 52 return rb; in lio_target_np_driver_show() 449 ssize_t rb; \ 454 rb = snprintf(page, PAGE_SIZE, \ 458 rb = snprintf(page, PAGE_SIZE, "%u\n", \ 463 return rb; \ 505 ssize_t rb = 0; in lio_target_nacl_info_show() local 511 rb += sprintf(page+rb, "No active iSCSI Session for Initiator" in lio_target_nacl_info_show() [all …]
|
/kernel/linux/linux-5.10/fs/jffs2/ |
D | nodelist.h | 230 struct rb_node rb; member 271 struct rb_node rb; member 334 return rb_entry(node, struct jffs2_node_frag, rb); in frag_first() 344 return rb_entry(node, struct jffs2_node_frag, rb); in frag_last() 347 #define frag_next(frag) rb_entry(rb_next(&(frag)->rb), struct jffs2_node_frag, rb) 348 #define frag_prev(frag) rb_entry(rb_prev(&(frag)->rb), struct jffs2_node_frag, rb) 349 #define frag_parent(frag) rb_entry(rb_parent(&(frag)->rb), struct jffs2_node_frag, rb) 350 #define frag_left(frag) rb_entry((frag)->rb.rb_left, struct jffs2_node_frag, rb) 351 #define frag_right(frag) rb_entry((frag)->rb.rb_right, struct jffs2_node_frag, rb) 352 #define frag_erase(frag, list) rb_erase(&frag->rb, list); [all …]
|
/kernel/linux/linux-5.10/tools/testing/selftests/bpf/benchs/ |
D | run_bench_ringbufs.sh | 34 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do 39 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do 44 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do 67 for b in rb-libbpf rb-custom pb-libbpf pb-custom; do
|
/kernel/linux/linux-5.10/arch/arm/lib/ |
D | getuser.S | 50 rb .req ip label 52 3: ldrbt rb, [r0], #0 54 rb .req r0 label 56 3: ldrb rb, [r0, #1] 59 orr r2, r2, rb, lsl #8 61 orr r2, rb, r2, lsl #8 118 rb .req ip label 120 10: ldrbt rb, [r0], #0 122 rb .req r0 label 124 10: ldrb rb, [r0, #1] [all …]
|
/kernel/linux/linux-5.10/net/sunrpc/xprtrdma/ |
D | verbs.c | 92 static void rpcrdma_regbuf_dma_unmap(struct rpcrdma_regbuf *rb); 93 static void rpcrdma_regbuf_free(struct rpcrdma_regbuf *rb); 885 struct rpcrdma_regbuf *rb; in rpcrdma_req_setup() local 892 rb = rpcrdma_regbuf_alloc(__roundup_pow_of_two(maxhdrsize), in rpcrdma_req_setup() 894 if (!rb) in rpcrdma_req_setup() 897 if (!__rpcrdma_regbuf_dma_map(r_xprt, rb)) in rpcrdma_req_setup() 900 req->rl_rdmabuf = rb; in rpcrdma_req_setup() 901 xdr_buf_init(&req->rl_hdrbuf, rdmab_data(rb), rdmab_length(rb)); in rpcrdma_req_setup() 905 rpcrdma_regbuf_free(rb); in rpcrdma_req_setup() 1273 struct rpcrdma_regbuf *rb; in rpcrdma_regbuf_alloc() local [all …]
|
D | xprt_rdma.h | 120 static inline u64 rdmab_addr(struct rpcrdma_regbuf *rb) in rdmab_addr() argument 122 return rb->rg_iov.addr; in rdmab_addr() 125 static inline u32 rdmab_length(struct rpcrdma_regbuf *rb) in rdmab_length() argument 127 return rb->rg_iov.length; in rdmab_length() 130 static inline u32 rdmab_lkey(struct rpcrdma_regbuf *rb) in rdmab_lkey() argument 132 return rb->rg_iov.lkey; in rdmab_lkey() 135 static inline struct ib_device *rdmab_device(struct rpcrdma_regbuf *rb) in rdmab_device() argument 137 return rb->rg_device; in rdmab_device() 140 static inline void *rdmab_data(const struct rpcrdma_regbuf *rb) in rdmab_data() argument 142 return rb->rg_data; in rdmab_data() [all …]
|
/kernel/linux/linux-5.10/drivers/xen/xenbus/ |
D | xenbus_dev_frontend.c | 130 struct read_buffer *rb; in xenbus_file_read() local 148 rb = list_entry(u->read_buffers.next, struct read_buffer, list); in xenbus_file_read() 151 size_t sz = min_t(size_t, len - i, rb->len - rb->cons); in xenbus_file_read() 153 ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz); in xenbus_file_read() 156 rb->cons += sz - ret; in xenbus_file_read() 165 if (rb->cons == rb->len) { in xenbus_file_read() 166 list_del(&rb->list); in xenbus_file_read() 167 kfree(rb); in xenbus_file_read() 170 rb = list_entry(u->read_buffers.next, in xenbus_file_read() 191 struct read_buffer *rb; in queue_reply() local [all …]
|
/kernel/linux/linux-5.10/drivers/block/drbd/ |
D | drbd_interval.c | 12 struct drbd_interval *this = rb_entry(node, struct drbd_interval, rb); in interval_end() 19 struct drbd_interval, rb, sector_t, end, NODE_END); 34 rb_entry(*new, struct drbd_interval, rb); in drbd_insert_interval() 52 rb_link_node(&this->rb, parent, new); in drbd_insert_interval() 53 rb_insert_augmented(&this->rb, root, &augment_callbacks); in drbd_insert_interval() 75 rb_entry(node, struct drbd_interval, rb); in drbd_contains_interval() 97 rb_erase_augmented(&this->rb, root, &augment_callbacks); in drbd_remove_interval() 122 rb_entry(node, struct drbd_interval, rb); in drbd_find_overlap() 148 node = rb_next(&i->rb); in drbd_next_overlap() 151 i = rb_entry(node, struct drbd_interval, rb); in drbd_next_overlap()
|