Home
last modified time | relevance | path

Searched refs:rb (Results 1 – 25 of 113) sorted by relevance

12345

/drivers/lightnvm/
Dpblk-rb.c25 void pblk_rb_data_free(struct pblk_rb *rb) in pblk_rb_data_free() argument
30 list_for_each_entry_safe(p, t, &rb->pages, list) { in pblk_rb_data_free()
43 int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base, in pblk_rb_init() argument
46 struct pblk *pblk = container_of(rb, struct pblk, rwb); in pblk_rb_init()
53 rb->entries = rb_entry_base; in pblk_rb_init()
54 rb->seg_size = (1 << power_seg_sz); in pblk_rb_init()
55 rb->nr_entries = (1 << power_size); in pblk_rb_init()
56 rb->mem = rb->subm = rb->sync = rb->l2p_update = 0; in pblk_rb_init()
57 rb->sync_point = EMPTY_ENTRY; in pblk_rb_init()
59 spin_lock_init(&rb->w_lock); in pblk_rb_init()
[all …]
Dpblk.h649 int pblk_rb_init(struct pblk_rb *rb, struct pblk_rb_entry *rb_entry_base,
652 void *pblk_rb_entries_ref(struct pblk_rb *rb);
653 int pblk_rb_may_write_user(struct pblk_rb *rb, struct bio *bio,
655 int pblk_rb_may_write_gc(struct pblk_rb *rb, unsigned int nr_entries,
657 void pblk_rb_write_entry_user(struct pblk_rb *rb, void *data,
659 void pblk_rb_write_entry_gc(struct pblk_rb *rb, void *data,
662 struct pblk_w_ctx *pblk_rb_w_ctx(struct pblk_rb *rb, unsigned int pos);
663 void pblk_rb_flush(struct pblk_rb *rb);
665 void pblk_rb_sync_l2p(struct pblk_rb *rb);
666 unsigned int pblk_rb_read_to_bio(struct pblk_rb *rb, struct nvm_rq *rqd,
[all …]
/drivers/misc/mic/scif/
Dscif_rb.c36 void scif_rb_init(struct scif_rb *rb, u32 *read_ptr, u32 *write_ptr, in scif_rb_init() argument
39 rb->rb_base = rb_base; in scif_rb_init()
40 rb->size = (1 << size); in scif_rb_init()
41 rb->read_ptr = read_ptr; in scif_rb_init()
42 rb->write_ptr = write_ptr; in scif_rb_init()
43 rb->current_read_offset = *read_ptr; in scif_rb_init()
44 rb->current_write_offset = *write_ptr; in scif_rb_init()
48 static void memcpy_torb(struct scif_rb *rb, void *header, in memcpy_torb() argument
53 if (header + size >= rb->rb_base + rb->size) { in memcpy_torb()
55 size1 = (u32)(rb->rb_base + rb->size - header); in memcpy_torb()
[all …]
Dscif_rb.h83 void scif_rb_init(struct scif_rb *rb, u32 *read_ptr, u32 *write_ptr,
87 int scif_rb_write(struct scif_rb *rb, void *msg, u32 size);
89 void scif_rb_commit(struct scif_rb *rb);
91 u32 scif_rb_space(struct scif_rb *rb);
95 u32 scif_rb_get_next(struct scif_rb *rb, void *msg, u32 size);
97 void scif_rb_update_read_ptr(struct scif_rb *rb);
99 u32 scif_rb_count(struct scif_rb *rb, u32 size);
/drivers/scsi/bfa/
Dbfa_ioc_ct.c193 void __iomem *rb; in bfa_ioc_ct_reg_init() local
196 rb = bfa_ioc_bar0(ioc); in bfa_ioc_ct_reg_init()
198 ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox; in bfa_ioc_ct_reg_init()
199 ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox; in bfa_ioc_ct_reg_init()
200 ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn; in bfa_ioc_ct_reg_init()
203 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; in bfa_ioc_ct_reg_init()
204 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; in bfa_ioc_ct_reg_init()
205 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; in bfa_ioc_ct_reg_init()
206 ioc->ioc_regs.hfn_mbox_cmd = rb + ct_p0reg[pcifn].hfn; in bfa_ioc_ct_reg_init()
207 ioc->ioc_regs.lpu_mbox_cmd = rb + ct_p0reg[pcifn].lpu; in bfa_ioc_ct_reg_init()
[all …]
Dbfa_ioc_cb.c146 void __iomem *rb; in bfa_ioc_cb_reg_init() local
149 rb = bfa_ioc_bar0(ioc); in bfa_ioc_cb_reg_init()
151 ioc->ioc_regs.hfn_mbox = rb + iocreg_fnreg[pcifn].hfn_mbox; in bfa_ioc_cb_reg_init()
152 ioc->ioc_regs.lpu_mbox = rb + iocreg_fnreg[pcifn].lpu_mbox; in bfa_ioc_cb_reg_init()
153 ioc->ioc_regs.host_page_num_fn = rb + iocreg_fnreg[pcifn].hfn_pgn; in bfa_ioc_cb_reg_init()
156 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; in bfa_ioc_cb_reg_init()
157 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; in bfa_ioc_cb_reg_init()
158 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; in bfa_ioc_cb_reg_init()
160 ioc->ioc_regs.heartbeat = (rb + BFA_IOC1_HBEAT_REG); in bfa_ioc_cb_reg_init()
161 ioc->ioc_regs.ioc_fwstate = (rb + BFA_IOC1_STATE_REG); in bfa_ioc_cb_reg_init()
[all …]
/drivers/net/ethernet/brocade/bna/
Dbfa_ioc_ct.c57 static enum bfa_status bfa_ioc_ct_pll_init(void __iomem *rb,
59 static enum bfa_status bfa_ioc_ct2_pll_init(void __iomem *rb,
259 void __iomem *rb; in bfa_ioc_ct_reg_init() local
262 rb = bfa_ioc_bar0(ioc); in bfa_ioc_ct_reg_init()
264 ioc->ioc_regs.hfn_mbox = rb + ct_fnreg[pcifn].hfn_mbox; in bfa_ioc_ct_reg_init()
265 ioc->ioc_regs.lpu_mbox = rb + ct_fnreg[pcifn].lpu_mbox; in bfa_ioc_ct_reg_init()
266 ioc->ioc_regs.host_page_num_fn = rb + ct_fnreg[pcifn].hfn_pgn; in bfa_ioc_ct_reg_init()
269 ioc->ioc_regs.heartbeat = rb + BFA_IOC0_HBEAT_REG; in bfa_ioc_ct_reg_init()
270 ioc->ioc_regs.ioc_fwstate = rb + BFA_IOC0_STATE_REG; in bfa_ioc_ct_reg_init()
271 ioc->ioc_regs.alt_ioc_fwstate = rb + BFA_IOC1_STATE_REG; in bfa_ioc_ct_reg_init()
[all …]
/drivers/hid/intel-ish-hid/ishtp/
Dclient-buffers.c32 struct ishtp_cl_rb *rb; in ishtp_cl_alloc_rx_ring() local
37 rb = ishtp_io_rb_init(cl); in ishtp_cl_alloc_rx_ring()
38 if (!rb) { in ishtp_cl_alloc_rx_ring()
42 ret = ishtp_io_rb_alloc_buf(rb, len); in ishtp_cl_alloc_rx_ring()
46 list_add_tail(&rb->list, &cl->free_rb_list.list); in ishtp_cl_alloc_rx_ring()
105 struct ishtp_cl_rb *rb; in ishtp_cl_free_rx_ring() local
111 rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, in ishtp_cl_free_rx_ring()
113 list_del(&rb->list); in ishtp_cl_free_rx_ring()
114 kfree(rb->buffer.data); in ishtp_cl_free_rx_ring()
115 kfree(rb); in ishtp_cl_free_rx_ring()
[all …]
Dclient.c33 struct ishtp_cl_rb *rb; in ishtp_read_list_flush() local
38 list_for_each_entry_safe(rb, next, &cl->dev->read_list.list, list) in ishtp_read_list_flush()
39 if (rb->cl && ishtp_cl_cmp_id(cl, rb->cl)) { in ishtp_read_list_flush()
40 list_del(&rb->list); in ishtp_read_list_flush()
41 ishtp_io_rb_free(rb); in ishtp_read_list_flush()
442 struct ishtp_cl_rb *rb; in ishtp_cl_read_start() local
472 rb = NULL; in ishtp_cl_read_start()
476 rb = list_entry(cl->free_rb_list.list.next, struct ishtp_cl_rb, list); in ishtp_cl_read_start()
477 list_del_init(&rb->list); in ishtp_cl_read_start()
480 rb->cl = cl; in ishtp_cl_read_start()
[all …]
/drivers/gpu/drm/
Ddrm_prime.c95 struct rb_node **p, *rb; in drm_prime_add_buf_handle() local
105 rb = NULL; in drm_prime_add_buf_handle()
110 rb = *p; in drm_prime_add_buf_handle()
111 pos = rb_entry(rb, struct drm_prime_member, dmabuf_rb); in drm_prime_add_buf_handle()
113 p = &rb->rb_right; in drm_prime_add_buf_handle()
115 p = &rb->rb_left; in drm_prime_add_buf_handle()
117 rb_link_node(&member->dmabuf_rb, rb, p); in drm_prime_add_buf_handle()
120 rb = NULL; in drm_prime_add_buf_handle()
125 rb = *p; in drm_prime_add_buf_handle()
126 pos = rb_entry(rb, struct drm_prime_member, handle_rb); in drm_prime_add_buf_handle()
[all …]
Ddrm_mm.c165 INTERVAL_TREE_DEFINE(struct drm_mm_node, rb, in INTERVAL_TREE_DEFINE() argument
181 struct rb_node **link, *rb; in drm_mm_interval_tree_add_node() local
188 rb = &hole_node->rb; in drm_mm_interval_tree_add_node()
189 while (rb) { in drm_mm_interval_tree_add_node()
190 parent = rb_entry(rb, struct drm_mm_node, rb); in drm_mm_interval_tree_add_node()
195 rb = rb_parent(rb); in drm_mm_interval_tree_add_node()
198 rb = &hole_node->rb; in drm_mm_interval_tree_add_node()
199 link = &hole_node->rb.rb_right; in drm_mm_interval_tree_add_node()
202 rb = NULL; in drm_mm_interval_tree_add_node()
207 rb = *link; in drm_mm_interval_tree_add_node()
[all …]
/drivers/target/iscsi/
Discsi_target_configfs.c52 ssize_t rb; in lio_target_np_driver_show() local
56 rb = sprintf(page, "1\n"); in lio_target_np_driver_show()
58 rb = sprintf(page, "0\n"); in lio_target_np_driver_show()
60 return rb; in lio_target_np_driver_show()
457 ssize_t rb; \
462 rb = snprintf(page, PAGE_SIZE, \
466 rb = snprintf(page, PAGE_SIZE, "%u\n", \
471 return rb; \
513 ssize_t rb = 0; in lio_target_nacl_info_show() local
519 rb += sprintf(page+rb, "No active iSCSI Session for Initiator" in lio_target_nacl_info_show()
[all …]
/drivers/block/drbd/
Ddrbd_interval.c12 struct drbd_interval *this = rb_entry(node, struct drbd_interval, rb); in interval_end()
28 if (node->rb.rb_left) { in compute_subtree_last()
29 sector_t left = interval_end(node->rb.rb_left); in compute_subtree_last()
33 if (node->rb.rb_right) { in compute_subtree_last()
34 sector_t right = interval_end(node->rb.rb_right); in compute_subtree_last()
41 RB_DECLARE_CALLBACKS(static, augment_callbacks, struct drbd_interval, rb,
57 rb_entry(*new, struct drbd_interval, rb); in drbd_insert_interval()
75 rb_link_node(&this->rb, parent, new); in drbd_insert_interval()
76 rb_insert_augmented(&this->rb, root, &augment_callbacks); in drbd_insert_interval()
98 rb_entry(node, struct drbd_interval, rb); in drbd_contains_interval()
[all …]
Ddrbd_interval.h9 struct rb_node rb; member
21 RB_CLEAR_NODE(&i->rb); in drbd_clear_interval()
26 return RB_EMPTY_NODE(&i->rb); in drbd_interval_empty()
/drivers/xen/xenbus/
Dxenbus_dev_frontend.c127 struct read_buffer *rb; in xenbus_file_read() local
145 rb = list_entry(u->read_buffers.next, struct read_buffer, list); in xenbus_file_read()
148 unsigned sz = min((unsigned)len - i, rb->len - rb->cons); in xenbus_file_read()
150 ret = copy_to_user(ubuf + i, &rb->msg[rb->cons], sz); in xenbus_file_read()
153 rb->cons += sz - ret; in xenbus_file_read()
162 if (rb->cons == rb->len) { in xenbus_file_read()
163 list_del(&rb->list); in xenbus_file_read()
164 kfree(rb); in xenbus_file_read()
167 rb = list_entry(u->read_buffers.next, in xenbus_file_read()
188 struct read_buffer *rb; in queue_reply() local
[all …]
/drivers/tty/hvc/
Dhvc_iucv.c222 struct iucv_tty_buffer *rb; in hvc_iucv_write() local
240 rb = list_first_entry(&priv->tty_inqueue, struct iucv_tty_buffer, list); in hvc_iucv_write()
243 if (!rb->mbuf) { /* message not yet received ... */ in hvc_iucv_write()
246 rb->mbuf = kmalloc(rb->msg.length, GFP_ATOMIC | GFP_DMA); in hvc_iucv_write()
247 if (!rb->mbuf) in hvc_iucv_write()
250 rc = __iucv_message_receive(priv->path, &rb->msg, 0, in hvc_iucv_write()
251 rb->mbuf, rb->msg.length, NULL); in hvc_iucv_write()
263 if (rc || (rb->mbuf->version != MSG_VERSION) || in hvc_iucv_write()
264 (rb->msg.length != MSG_SIZE(rb->mbuf->datalen))) in hvc_iucv_write()
268 switch (rb->mbuf->type) { in hvc_iucv_write()
[all …]
/drivers/mtd/ubi/
Dwl.c161 e1 = rb_entry(parent, struct ubi_wl_entry, u.rb); in wl_tree_add()
176 rb_link_node(&e->u.rb, parent, p); in wl_tree_add()
177 rb_insert_color(&e->u.rb, root); in wl_tree_add()
257 e1 = rb_entry(p, struct ubi_wl_entry, u.rb); in in_wl_tree()
317 e = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); in find_wl_entry()
324 e1 = rb_entry(p, struct ubi_wl_entry, u.rb); in find_wl_entry()
358 first = rb_entry(rb_first(root), struct ubi_wl_entry, u.rb); in find_mean_wl_entry()
359 last = rb_entry(rb_last(root), struct ubi_wl_entry, u.rb); in find_mean_wl_entry()
362 e = rb_entry(root->rb_node, struct ubi_wl_entry, u.rb); in find_mean_wl_entry()
398 rb_erase(&e->u.rb, &ubi->free); in wl_get_wle()
[all …]
Dattach.c126 av = rb_entry(parent, struct ubi_ainf_volume, rb); in find_or_add_av()
156 rb_link_node(&av->rb, parent, p); in find_or_add_av()
157 rb_insert_color(&av->rb, &ai->volumes); in find_or_add_av()
608 aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb); in ubi_add_to_av()
717 rb_link_node(&aeb->u.rb, parent, p); in ubi_add_to_av()
718 rb_insert_color(&aeb->u.rb, &av->root); in ubi_add_to_av()
766 rb_erase(&av->rb, &ai->volumes); in ubi_remove_av()
1301 aeb = rb_entry(this, struct ubi_ainf_peb, u.rb); in destroy_av()
1304 if (this->rb_left == &aeb->u.rb) in destroy_av()
1327 struct rb_node *rb; in destroy_ai() local
[all …]
Dubi.h193 struct rb_node rb; member
215 struct rb_node rb; member
691 struct rb_node rb; member
726 struct rb_node rb; member
1009 ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->free, u.rb)
1018 ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->used, u.rb)
1027 ubi_rb_for_each_entry((tmp_rb), (e), &(ubi)->scrub, u.rb)
1046 #define ubi_rb_for_each_entry(rb, pos, root, member) \ argument
1047 for (rb = rb_first(root), \
1048 pos = (rb ? container_of(rb, typeof(*pos), member) : NULL); \
[all …]
/drivers/gpu/drm/msm/adreno/
Dadreno_gpu.c74 ret = msm_gem_get_iova(gpu->rb->bo, gpu->aspace, &gpu->rb_iova); in adreno_hw_init()
82 gpu->rb->cur = gpu->rb->start; in adreno_hw_init()
91 AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) | in adreno_hw_init()
152 struct msm_ringbuffer *ring = gpu->rb; in adreno_submit()
231 wptr = get_wptr(gpu->rb) & ((gpu->rb->size / 4) - 1); in adreno_flush()
242 uint32_t wptr = get_wptr(gpu->rb); in adreno_idle()
267 seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb)); in adreno_show()
302 printk("rb wptr: %d\n", get_wptr(gpu->rb)); in adreno_dump_info()
328 uint32_t size = gpu->rb->size / 4; in ring_freewords()
329 uint32_t wptr = get_wptr(gpu->rb); in ring_freewords()
/drivers/i2c/
Di2c-stub.c102 struct smbus_block_data *b, *rb = NULL; in stub_find_block() local
106 rb = b; in stub_find_block()
110 if (rb == NULL && create) { in stub_find_block()
111 rb = devm_kzalloc(dev, sizeof(*rb), GFP_KERNEL); in stub_find_block()
112 if (rb == NULL) in stub_find_block()
113 return rb; in stub_find_block()
114 rb->command = command; in stub_find_block()
115 list_add(&rb->node, &chip->smbus_blocks); in stub_find_block()
117 return rb; in stub_find_block()
/drivers/gpu/drm/i915/
Di915_vma.c78 struct rb_node *rb, **p; in vma_create() local
143 rb = NULL; in vma_create()
148 rb = *p; in vma_create()
149 pos = rb_entry(rb, struct i915_vma, obj_node); in vma_create()
151 p = &rb->rb_right; in vma_create()
153 p = &rb->rb_left; in vma_create()
155 rb_link_node(&vma->obj_node, rb, p); in vma_create()
171 struct rb_node *rb; in vma_lookup() local
173 rb = obj->vma_tree.rb_node; in vma_lookup()
174 while (rb) { in vma_lookup()
[all …]
Dintel_breadcrumbs.c451 static inline bool chain_wakeup(struct rb_node *rb, int priority) in chain_wakeup() argument
453 return rb && to_wait(rb)->tsk->prio <= priority; in chain_wakeup()
556 static struct drm_i915_gem_request *to_signaler(struct rb_node *rb) in to_signaler() argument
558 return rb_entry(rb, struct drm_i915_gem_request, signaling.node); in to_signaler()
619 struct rb_node *rb = in intel_breadcrumbs_signaler() local
622 rb ? to_signaler(rb) : NULL); in intel_breadcrumbs_signaler()
756 struct rb_node *rb = in intel_engine_cancel_signaling() local
759 rb ? to_signaler(rb) : NULL); in intel_engine_cancel_signaling()
/drivers/staging/wilc1000/
Dwilc_spi.c204 static int wilc_spi_rx(struct wilc *wilc, u8 *rb, u32 rlen) in wilc_spi_rx() argument
212 .rx_buf = rb, in wilc_spi_rx()
244 static int wilc_spi_tx_rx(struct wilc *wilc, u8 *wb, u8 *rb, u32 rlen) in wilc_spi_tx_rx() argument
252 .rx_buf = rb, in wilc_spi_tx_rx()
283 u8 wb[32], rb[32]; in spi_cmd_complete() local
419 if (wilc_spi_tx_rx(wilc, wb, rb, len2)) { in spi_cmd_complete()
434 rsp = rb[rix++]; in spi_cmd_complete()
448 rsp = rb[rix++]; in spi_cmd_complete()
467 rsp = rb[rix++]; in spi_cmd_complete()
487 b[0] = rb[rix++]; in spi_cmd_complete()
[all …]
/drivers/staging/lustre/lnet/lnet/
Drouter.c1309 lnet_destroy_rtrbuf(struct lnet_rtrbuf *rb, int npages) in lnet_destroy_rtrbuf() argument
1314 __free_page(rb->rb_kiov[npages].bv_page); in lnet_destroy_rtrbuf()
1316 LIBCFS_FREE(rb, sz); in lnet_destroy_rtrbuf()
1325 struct lnet_rtrbuf *rb; in lnet_new_rtrbuf() local
1328 LIBCFS_CPT_ALLOC(rb, lnet_cpt_table(), cpt, sz); in lnet_new_rtrbuf()
1329 if (!rb) in lnet_new_rtrbuf()
1332 rb->rb_pool = rbp; in lnet_new_rtrbuf()
1340 __free_page(rb->rb_kiov[i].bv_page); in lnet_new_rtrbuf()
1342 LIBCFS_FREE(rb, sz); in lnet_new_rtrbuf()
1346 rb->rb_kiov[i].bv_len = PAGE_SIZE; in lnet_new_rtrbuf()
[all …]

12345