/drivers/infiniband/hw/cxgb4/ |
D | device.c | 60 struct cxgb4_lld_info lldi; member 527 rdev->qpshift = PAGE_SHIFT - ilog2(rdev->lldi.udb_density); in c4iw_rdev_open() 528 rdev->qpmask = rdev->lldi.udb_density - 1; in c4iw_rdev_open() 529 rdev->cqshift = PAGE_SHIFT - ilog2(rdev->lldi.ucq_density); in c4iw_rdev_open() 530 rdev->cqmask = rdev->lldi.ucq_density - 1; in c4iw_rdev_open() 534 __func__, pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start, in c4iw_rdev_open() 535 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev), in c4iw_rdev_open() 536 rdev->lldi.vr->pbl.start, in c4iw_rdev_open() 537 rdev->lldi.vr->pbl.size, rdev->lldi.vr->rq.start, in c4iw_rdev_open() 538 rdev->lldi.vr->rq.size, in c4iw_rdev_open() [all …]
|
D | provider.c | 145 if ((addr >= pci_resource_start(rdev->lldi.pdev, 0)) && in c4iw_mmap() 146 (addr < (pci_resource_start(rdev->lldi.pdev, 0) + in c4iw_mmap() 147 pci_resource_len(rdev->lldi.pdev, 0)))) { in c4iw_mmap() 156 } else if ((addr >= pci_resource_start(rdev->lldi.pdev, 2)) && in c4iw_mmap() 157 (addr < (pci_resource_start(rdev->lldi.pdev, 2) + in c4iw_mmap() 158 pci_resource_len(rdev->lldi.pdev, 2)))) { in c4iw_mmap() 166 if (is_t5(rdev->lldi.adapter_type)) in c4iw_mmap() 258 memcpy(&(gid->raw[0]), dev->rdev.lldi.ports[port-1]->dev_addr, 6); in c4iw_query_gid() 271 memcpy(&props->sys_image_guid, dev->rdev.lldi.ports[0]->dev_addr, 6); in c4iw_query_device() 272 props->hw_ver = CHELSIO_CHIP_RELEASE(dev->rdev.lldi.adapter_type); in c4iw_query_device() [all …]
|
D | resource.c | 43 rdev->lldi.vr->qp.start, in c4iw_init_qid_table() 44 rdev->lldi.vr->qp.size, in c4iw_init_qid_table() 45 rdev->lldi.vr->qp.size, 0)) in c4iw_init_qid_table() 48 for (i = rdev->lldi.vr->qp.start; in c4iw_init_qid_table() 49 i < rdev->lldi.vr->qp.start + rdev->lldi.vr->qp.size; i++) in c4iw_init_qid_table() 282 pbl_start = rdev->lldi.vr->pbl.start; in c4iw_pblpool_create() 283 pbl_chunk = rdev->lldi.vr->pbl.size; in c4iw_pblpool_create() 326 pci_name(rdev->lldi.pdev)); in c4iw_rqtpool_alloc() 355 rqt_start = rdev->lldi.vr->rq.start; in c4iw_rqtpool_create() 356 rqt_chunk = rdev->lldi.vr->rq.size; in c4iw_rqtpool_create() [all …]
|
D | cm.c | 192 error = cxgb4_l2t_send(rdev->lldi.ports[0], skb, l2e); in c4iw_l2t_send() 207 error = cxgb4_ofld_send(rdev->lldi.ports[0], skb); in c4iw_ofld_send() 230 ep->emss = ep->com.dev->rdev.lldi.mtus[GET_TCPOPT_MSS(opt)] - 40; in set_emss() 288 cxgb4_remove_tid(ep->com.dev->rdev.lldi.tids, 0, ep->hwtid); in _c4iw_free_ep() 395 flowc->mnemval[0].val = cpu_to_be32(PCI_FUNC(ep->com.dev->rdev.lldi.pdev->devfn) << 8); in send_flowc() 482 switch (dev->rdev.lldi.filt_mode) { in select_ntuple() 520 int size = is_t4(ep->com.dev->rdev.lldi.adapter_type) ? in send_connect() 535 cxgb4_best_mtu(ep->com.dev->rdev.lldi.mtus, ep->mtu, &mtu_idx); in send_connect() 559 if (is_t4(ep->com.dev->rdev.lldi.adapter_type)) { in send_connect() 854 struct tid_info *t = dev->rdev.lldi.tids; in act_establish() [all …]
|
D | qp.c | 76 dma_free_coherent(&(rdev->lldi.pdev->dev), sq->memsize, sq->queue, in dealloc_host_sq() 90 if (!ocqp_support || !ocqp_supported(&rdev->lldi)) in alloc_oc_sq() 96 rdev->lldi.vr->ocq.start; in alloc_oc_sq() 98 rdev->lldi.vr->ocq.start); in alloc_oc_sq() 105 sq->queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), sq->memsize, in alloc_host_sq() 131 dma_free_coherent(&(rdev->lldi.pdev->dev), in destroy_qp() 198 wq->rq.queue = dma_alloc_coherent(&(rdev->lldi.pdev->dev), in create_qp() 213 wq->db = rdev->lldi.db_reg; in create_qp() 214 wq->gts = rdev->lldi.gts_reg; in create_qp() 216 wq->sq.udb = (u64)pci_resource_start(rdev->lldi.pdev, 2) + in create_qp() [all …]
|
D | mem.c | 112 if (is_t4(rdev->lldi.adapter_type)) in _c4iw_write_mem_inline() 184 daddr = dma_map_single(&rdev->lldi.pdev->dev, data, len, DMA_TO_DEVICE); in _c4iw_write_mem_dma() 185 if (dma_mapping_error(&rdev->lldi.pdev->dev, daddr)) in _c4iw_write_mem_dma() 209 dma_unmap_single(&rdev->lldi.pdev->dev, save, len, DMA_TO_DEVICE); in _c4iw_write_mem_dma() 220 if (is_t5(rdev->lldi.adapter_type) && use_dsgl) { in write_adapter_mem() 226 pci_name(rdev->lldi.pdev)); in write_adapter_mem() 296 (rdev->lldi.vr->stag.start >> 5), in write_tpt_entry() 314 __func__, pbl_addr, rdev->lldi.vr->pbl.start, in write_pbl() 896 c4pl->ibpl.page_list = dma_alloc_coherent(&dev->rdev.lldi.pdev->dev, in c4iw_alloc_fastreg_pbl() 915 dma_free_coherent(&c4pl->dev->rdev.lldi.pdev->dev, in c4iw_free_fastreg_pbl()
|
D | iw_cxgb4.h | 73 #define PBL_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->pbl.start) 74 #define RQT_OFF(rdev_p, a) ((a) - (rdev_p)->lldi.vr->rq.start) 149 struct cxgb4_lld_info lldi; member 162 return min((int)T4_MAX_NUM_STAG, (int)(rdev->lldi.vr->stag.size >> 5)); in c4iw_num_stags() 197 pci_name(rdev->lldi.pdev), hwtid, qpid); in c4iw_wait_for_reply() 207 pci_name(rdev->lldi.pdev), wr_waitp->ret, hwtid, qpid); in c4iw_wait_for_reply()
|
D | cq.c | 71 dma_free_coherent(&(rdev->lldi.pdev->dev), in destroy_cq() 102 cq->queue = dma_alloc_coherent(&rdev->lldi.pdev->dev, cq->memsize, in create_cq() 137 V_FW_RI_RES_WR_IQANDSTINDEX(*rdev->lldi.rxq_ids)); in create_cq() 158 cq->gts = rdev->lldi.gts_reg; in create_cq() 161 cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) + in create_cq() 167 dma_free_coherent(&rdev->lldi.pdev->dev, cq->memsize, cq->queue, in create_cq()
|
/drivers/scsi/cxgbi/cxgb4i/ |
D | cxgb4i.c | 527 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(csk->cdev); in free_atid() local 530 cxgb4_free_atid(lldi->tids, csk->atid); in free_atid() 543 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); in do_act_establish() local 544 struct tid_info *t = lldi->tids; in do_act_establish() 565 cxgb4_insert_tid(lldi->tids, csk, tid); in do_act_establish() 588 csk->advmss = lldi->mtus[GET_TCPOPT_MSS(tcp_opt)] - 40; in do_act_establish() 663 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); in do_act_open_rpl() local 664 struct tid_info *t = lldi->tids; in do_act_open_rpl() 683 cxgb4_remove_tid(lldi->tids, csk->port_id, GET_TID(rpl)); in do_act_open_rpl() 707 struct cxgb4_lld_info *lldi = cxgbi_cdev_priv(cdev); in do_peer_close() local [all …]
|