/drivers/staging/rdma/amso1100/ |
D | c2_rnic.c | 81 static int c2_adapter_init(struct c2_dev *c2dev) in c2_adapter_init() argument 89 wr.hint_count = cpu_to_be64(c2dev->hint_count_dma); in c2_adapter_init() 90 wr.q0_host_shared = cpu_to_be64(c2dev->req_vq.shared_dma); in c2_adapter_init() 91 wr.q1_host_shared = cpu_to_be64(c2dev->rep_vq.shared_dma); in c2_adapter_init() 92 wr.q1_host_msg_pool = cpu_to_be64(c2dev->rep_vq.host_dma); in c2_adapter_init() 93 wr.q2_host_shared = cpu_to_be64(c2dev->aeq.shared_dma); in c2_adapter_init() 94 wr.q2_host_msg_pool = cpu_to_be64(c2dev->aeq.host_dma); in c2_adapter_init() 97 err = vq_send_wr(c2dev, (union c2wr *) & wr); in c2_adapter_init() 105 static void c2_adapter_term(struct c2_dev *c2dev) in c2_adapter_term() argument 114 vq_send_wr(c2dev, (union c2wr *) & wr); in c2_adapter_term() [all …]
|
D | c2_pd.c | 43 int c2_pd_alloc(struct c2_dev *c2dev, int privileged, struct c2_pd *pd) in c2_pd_alloc() argument 48 spin_lock(&c2dev->pd_table.lock); in c2_pd_alloc() 49 obj = find_next_zero_bit(c2dev->pd_table.table, c2dev->pd_table.max, in c2_pd_alloc() 50 c2dev->pd_table.last); in c2_pd_alloc() 51 if (obj >= c2dev->pd_table.max) in c2_pd_alloc() 52 obj = find_first_zero_bit(c2dev->pd_table.table, in c2_pd_alloc() 53 c2dev->pd_table.max); in c2_pd_alloc() 54 if (obj < c2dev->pd_table.max) { in c2_pd_alloc() 56 __set_bit(obj, c2dev->pd_table.table); in c2_pd_alloc() 57 c2dev->pd_table.last = obj+1; in c2_pd_alloc() [all …]
|
D | c2_vq.c | 82 int vq_init(struct c2_dev *c2dev) in vq_init() argument 84 sprintf(c2dev->vq_cache_name, "c2-vq:dev%c", in vq_init() 85 (char) ('0' + c2dev->devnum)); in vq_init() 86 c2dev->host_msg_cache = in vq_init() 87 kmem_cache_create(c2dev->vq_cache_name, c2dev->rep_vq.msg_size, 0, in vq_init() 89 if (c2dev->host_msg_cache == NULL) { in vq_init() 95 void vq_term(struct c2_dev *c2dev) in vq_term() argument 97 kmem_cache_destroy(c2dev->host_msg_cache); in vq_term() 103 struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev) in vq_req_alloc() argument 124 void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *r) in vq_req_free() argument [all …]
|
D | c2_cm.c | 43 struct c2_dev *c2dev = to_c2dev(cm_id->device); in c2_llp_connect() local 74 err = c2_qp_set_read_limits(c2dev, qp, iw_param->ord, iw_param->ird); in c2_llp_connect() 81 wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); in c2_llp_connect() 87 vq_req = vq_req_alloc(c2dev); in c2_llp_connect() 95 wr->rnic_handle = c2dev->adapter_handle; in c2_llp_connect() 117 err = vq_send_wr(c2dev, (union c2wr *) wr); in c2_llp_connect() 118 vq_req_free(c2dev, vq_req); in c2_llp_connect() 137 struct c2_dev *c2dev; in c2_llp_service_create() local 147 c2dev = to_c2dev(cm_id->device); in c2_llp_service_create() 148 if (c2dev == NULL) in c2_llp_service_create() [all …]
|
D | c2_cq.c | 46 static struct c2_cq *c2_cq_get(struct c2_dev *c2dev, int cqn) in c2_cq_get() argument 51 spin_lock_irqsave(&c2dev->lock, flags); in c2_cq_get() 52 cq = c2dev->qptr_array[cqn]; in c2_cq_get() 54 spin_unlock_irqrestore(&c2dev->lock, flags); in c2_cq_get() 58 spin_unlock_irqrestore(&c2dev->lock, flags); in c2_cq_get() 68 void c2_cq_event(struct c2_dev *c2dev, u32 mq_index) in c2_cq_event() argument 72 cq = c2_cq_get(c2dev, mq_index); in c2_cq_event() 82 void c2_cq_clean(struct c2_dev *c2dev, struct c2_qp *qp, u32 mq_index) in c2_cq_clean() argument 87 cq = c2_cq_get(c2dev, mq_index); in c2_cq_clean() 131 static inline int c2_poll_one(struct c2_dev *c2dev, in c2_poll_one() argument [all …]
|
D | c2.c | 211 struct c2_dev *c2dev = c2_port->c2dev; in c2_rx_alloc() local 232 pci_map_single(c2dev->pcidev, skb->data, maplen, in c2_rx_alloc() 279 struct c2_dev *c2dev = c2_port->c2dev; in c2_rx_clean() local 298 pci_unmap_single(c2dev->pcidev, elem->mapaddr, in c2_rx_clean() 306 static inline int c2_tx_free(struct c2_dev *c2dev, struct c2_element *elem) in c2_tx_free() argument 312 pci_unmap_single(c2dev->pcidev, elem->mapaddr, elem->maplen, in c2_tx_free() 361 c2_tx_free(c2_port->c2dev, elem); in c2_tx_clean() 367 c2_port->c2dev->cur_tx = tx_ring->to_use - tx_ring->start; in c2_tx_clean() 382 struct c2_dev *c2dev = c2_port->c2dev; in c2_tx_interrupt() local 407 c2_tx_free(c2dev, elem); in c2_tx_interrupt() [all …]
|
D | c2_mm.c | 50 send_pbl_messages(struct c2_dev *c2dev, __be32 stag_index, in send_pbl_messages() argument 72 pbe_count = (c2dev->req_vq.msg_size - in send_pbl_messages() 74 wr = kmalloc(c2dev->req_vq.msg_size, GFP_KERNEL); in send_pbl_messages() 86 wr->rnic_handle = c2dev->adapter_handle; in send_pbl_messages() 104 vq_req_get(c2dev, vq_req); in send_pbl_messages() 136 err = vq_send_wr(c2dev, (union c2wr *) wr); in send_pbl_messages() 139 vq_req_put(c2dev, vq_req); in send_pbl_messages() 150 err = vq_wait_for_reply(c2dev, vq_req); in send_pbl_messages() 166 vq_repbuf_free(c2dev, reply); in send_pbl_messages() 174 c2_nsmr_register_phys_kern(struct c2_dev *c2dev, u64 *addr_list, in c2_nsmr_register_phys_kern() argument [all …]
|
D | c2_qp.c | 134 int c2_qp_modify(struct c2_dev *c2dev, struct c2_qp *qp, in c2_qp_modify() argument 150 vq_req = vq_req_alloc(c2dev); in c2_qp_modify() 156 wr.rnic_handle = c2dev->adapter_handle; in c2_qp_modify() 205 vq_req_get(c2dev, vq_req); in c2_qp_modify() 207 err = vq_send_wr(c2dev, (union c2wr *) & wr); in c2_qp_modify() 209 vq_req_put(c2dev, vq_req); in c2_qp_modify() 213 err = vq_wait_for_reply(c2dev, vq_req); in c2_qp_modify() 242 vq_repbuf_free(c2dev, reply); in c2_qp_modify() 244 vq_req_free(c2dev, vq_req); in c2_qp_modify() 253 int c2_qp_set_read_limits(struct c2_dev *c2dev, struct c2_qp *qp, in c2_qp_set_read_limits() argument [all …]
|
D | c2_intr.c | 37 static void handle_mq(struct c2_dev *c2dev, u32 index); 38 static void handle_vq(struct c2_dev *c2dev, u32 mq_index); 43 void c2_rnic_interrupt(struct c2_dev *c2dev) in c2_rnic_interrupt() argument 47 while (c2dev->hints_read != be16_to_cpu(*c2dev->hint_count)) { in c2_rnic_interrupt() 48 mq_index = readl(c2dev->regs + PCI_BAR0_HOST_HINT); in c2_rnic_interrupt() 53 c2dev->hints_read++; in c2_rnic_interrupt() 54 handle_mq(c2dev, mq_index); in c2_rnic_interrupt() 62 static void handle_mq(struct c2_dev *c2dev, u32 mq_index) in handle_mq() argument 64 if (c2dev->qptr_array[mq_index] == NULL) { in handle_mq() 80 wake_up(&c2dev->req_vq_wo); in handle_mq() [all …]
|
D | c2.h | 358 struct c2_dev *c2dev; member 423 #define C2_SET_CUR_RX(c2dev, cur_rx) \ argument 424 __raw_writel((__force u32) cpu_to_be32(cur_rx), c2dev->mmio_txp_ring + 4092) 426 #define C2_GET_CUR_RX(c2dev) \ argument 427 be32_to_cpu((__force __be32) readl(c2dev->mmio_txp_ring + 4092)) 479 extern int c2_register_device(struct c2_dev *c2dev); 480 extern void c2_unregister_device(struct c2_dev *c2dev); 481 extern int c2_rnic_init(struct c2_dev *c2dev); 482 extern void c2_rnic_term(struct c2_dev *c2dev); 483 extern void c2_rnic_interrupt(struct c2_dev *c2dev); [all …]
|
D | c2_vq.h | 50 extern int vq_init(struct c2_dev *c2dev); 51 extern void vq_term(struct c2_dev *c2dev); 53 extern struct c2_vq_req *vq_req_alloc(struct c2_dev *c2dev); 54 extern void vq_req_free(struct c2_dev *c2dev, struct c2_vq_req *req); 55 extern void vq_req_get(struct c2_dev *c2dev, struct c2_vq_req *req); 56 extern void vq_req_put(struct c2_dev *c2dev, struct c2_vq_req *req); 57 extern int vq_send_wr(struct c2_dev *c2dev, union c2wr * wr); 59 extern void *vq_repbuf_alloc(struct c2_dev *c2dev); 60 extern void vq_repbuf_free(struct c2_dev *c2dev, void *reply); 62 extern int vq_wait_for_reply(struct c2_dev *c2dev, struct c2_vq_req *req);
|
D | c2_alloc.c | 39 static int c2_alloc_mqsp_chunk(struct c2_dev *c2dev, gfp_t gfp_mask, in c2_alloc_mqsp_chunk() argument 46 new_head = dma_alloc_coherent(&c2dev->pcidev->dev, PAGE_SIZE, in c2_alloc_mqsp_chunk() 71 int c2_init_mqsp_pool(struct c2_dev *c2dev, gfp_t gfp_mask, in c2_init_mqsp_pool() argument 74 return c2_alloc_mqsp_chunk(c2dev, gfp_mask, root); in c2_init_mqsp_pool() 77 void c2_free_mqsp_pool(struct c2_dev *c2dev, struct sp_chunk *root) in c2_free_mqsp_pool() argument 83 dma_free_coherent(&c2dev->pcidev->dev, PAGE_SIZE, root, in c2_free_mqsp_pool() 89 __be16 *c2_alloc_mqsp(struct c2_dev *c2dev, struct sp_chunk *head, in c2_alloc_mqsp() argument 100 if (c2_alloc_mqsp_chunk(c2dev, gfp_mask, &head->next) == in c2_alloc_mqsp()
|
D | c2_provider.c | 69 struct c2_dev *c2dev = to_c2dev(ibdev); in c2_query_device() local 76 *props = c2dev->props; in c2_query_device() 116 struct c2_dev *c2dev = to_c2dev(ibdev); in c2_query_gid() local 120 memcpy(&(gid->raw[0]), c2dev->pseudo_netdev->dev_addr, 6); in c2_query_gid() 224 struct c2_dev* c2dev = to_c2dev(device); in c2_get_qp() local 227 qp = c2_find_qpn(c2dev, qpn); in c2_get_qp() 524 struct c2_dev *c2dev = container_of(dev, struct c2_dev, ibdev.dev); in show_rev() local 526 return sprintf(buf, "%x\n", c2dev->props.hw_ver); in show_rev() 532 struct c2_dev *c2dev = container_of(dev, struct c2_dev, ibdev.dev); in show_fw_ver() local 535 (int) (c2dev->props.fw_ver >> 32), in show_fw_ver() [all …]
|
D | c2_ae.c | 147 void c2_ae_event(struct c2_dev *c2dev, u32 mq_index) in c2_ae_event() argument 149 struct c2_mq *mq = c2dev->qptr_array[mq_index]; in c2_ae_event() 180 c2dev, event_id, resource_indicator, resource_user_context, in c2_ae_event() 234 ib_event.device = &c2dev->ibdev; in c2_ae_event() 309 ib_event.device = &c2dev->ibdev; in c2_ae_event()
|
/drivers/misc/c2port/ |
D | core.c | 310 struct c2port_device *c2dev = dev_get_drvdata(dev); in c2port_show_name() local 312 return sprintf(buf, "%s\n", c2dev->name); in c2port_show_name() 319 struct c2port_device *c2dev = dev_get_drvdata(dev); in c2port_show_flash_blocks_num() local 320 struct c2port_ops *ops = c2dev->ops; in c2port_show_flash_blocks_num() 329 struct c2port_device *c2dev = dev_get_drvdata(dev); in c2port_show_flash_block_size() local 330 struct c2port_ops *ops = c2dev->ops; in c2port_show_flash_block_size() 339 struct c2port_device *c2dev = dev_get_drvdata(dev); in c2port_show_flash_size() local 340 struct c2port_ops *ops = c2dev->ops; in c2port_show_flash_size() 349 struct c2port_device *c2dev = dev_get_drvdata(dev); in access_show() local 351 return sprintf(buf, "%d\n", c2dev->access); in access_show() [all …]
|