/drivers/infiniband/hw/cxgb4/ |
D | mem.c | 271 u32 *stag, u8 stag_state, u32 pdid, in write_tpt_entry() argument 290 stag_idx = (*stag) >> 8; in write_tpt_entry() 292 if ((!reset_tpt_entry) && (*stag == T4_STAG_UNSET)) { in write_tpt_entry() 296 rdev->stats.stag.fail++; in write_tpt_entry() 302 rdev->stats.stag.cur += 32; in write_tpt_entry() 303 if (rdev->stats.stag.cur > rdev->stats.stag.max) in write_tpt_entry() 304 rdev->stats.stag.max = rdev->stats.stag.cur; in write_tpt_entry() 306 *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff); in write_tpt_entry() 316 FW_RI_TPTE_STAGKEY_V((*stag & FW_RI_TPTE_STAGKEY_M)) | in write_tpt_entry() 333 (rdev->lldi.vr->stag.start >> 5), in write_tpt_entry() [all …]
|
D | restrack.c | 435 u32 stag = mhp->attr.stag; in c4iw_fill_res_mr_entry() local 440 if (!stag) in c4iw_fill_res_mr_entry() 447 ret = cxgb4_read_tpte(dev->rdev.lldi.ports[0], stag, (__be32 *)&tpte); in c4iw_fill_res_mr_entry() 454 if (rdma_nl_put_driver_u32_hex(msg, "idx", stag >> 8)) in c4iw_fill_res_mr_entry() 459 if (rdma_nl_put_driver_u32_hex(msg, "key", stag & 0xff)) in c4iw_fill_res_mr_entry()
|
D | ev.c | 38 static void print_tpte(struct c4iw_dev *dev, u32 stag) in print_tpte() argument 43 ret = cxgb4_read_tpte(dev->rdev.lldi.ports[0], stag, in print_tpte() 51 stag & 0xffffff00, in print_tpte()
|
D | device.c | 485 dev->rdev.stats.stag.total, dev->rdev.stats.stag.cur, in stats_show() 486 dev->rdev.stats.stag.max, dev->rdev.stats.stag.fail); in stats_show() 528 dev->rdev.stats.stag.max = 0; in stats_clear() 529 dev->rdev.stats.stag.fail = 0; in stats_clear() 824 pci_name(rdev->lldi.pdev), rdev->lldi.vr->stag.start, in c4iw_rdev_open() 825 rdev->lldi.vr->stag.size, c4iw_num_stags(rdev), in c4iw_rdev_open() 843 rdev->stats.stag.total = rdev->lldi.vr->stag.size; in c4iw_rdev_open() 963 return infop->vr->stag.size > 0 && infop->vr->pbl.size > 0 && in rdma_supported()
|
D | t4.h | 179 __be32 stag; member 183 __be32 stag; member 192 __be32 stag; member 270 #define CQE_WRID_STAG(x) (be32_to_cpu((x)->u.rcqe.stag)) 278 #define CQE_WRID_FR_STAG(x) (be32_to_cpu((x)->u.scqe.stag))
|
D | t4fw_ri_api.h | 131 __be32 stag; member 703 __be32 stag; member 737 __be32 stag; member
|
D | iw_cxgb4.h | 131 struct c4iw_stat stag; member 205 return (int)(rdev->lldi.vr->stag.size >> 5); in c4iw_num_stags() 381 u32 stag; member
|
D | qp.c | 795 fr->stag = cpu_to_be32(mhp->ibmr.rkey); in build_tpte_memreg() 839 wqe->fr.stag = cpu_to_be32(wr->key); in build_memreg()
|
/drivers/infiniband/sw/siw/ |
D | siw_mem.c | 34 m->stag = id << 8; in siw_mem_add() 120 mem->stag = id << 8; in siw_mr_add_mem() 121 mr->base_mr.lkey = mr->base_mr.rkey = mem->stag; in siw_mr_add_mem() 135 found = xa_erase(&mem->sdev->mem_xa, mem->stag >> 8); in siw_mr_drop_mem() 172 siw_dbg_pd(pd, "STag 0x%08x invalid\n", mem->stag); in siw_check_mem() 176 siw_dbg_pd(pd, "STag 0x%08x: PD mismatch\n", mem->stag); in siw_check_mem() 198 mem->stag); in siw_check_mem() 243 if (unlikely((*mem)->stag != sge->lkey)) { in siw_check_sge() 292 int siw_invalidate_stag(struct ib_pd *pd, u32 stag) in siw_invalidate_stag() argument 295 struct siw_mem *mem = siw_mem_id2obj(sdev, stag >> 8); in siw_invalidate_stag() [all …]
|
D | siw_mem.h | 15 int siw_invalidate_stag(struct ib_pd *pd, u32 stag);
|
D | siw_verbs.c | 1377 mem->stag |= ureq.stag_key; in siw_reg_user_mr() 1378 uresp.stag = mem->stag; in siw_reg_user_mr() 1446 siw_dbg_pd(pd, "[MEM %u]: success\n", mr->mem->stag); in siw_alloc_mr() 1561 siw_dbg_pd(pd, "[MEM %u]: success\n", mr->mem->stag); in siw_get_dma_mr()
|
D | siw.h | 151 u32 stag; /* iWarp memory access steering tag */ member 715 "MEM[0x%08x] %s: " fmt, mem->stag, __func__, ##__VA_ARGS__)
|
D | siw_qp_tx.c | 965 mem->stag = sqe->rkey; in siw_fastreg_mr()
|
D | siw_qp_rx.c | 583 if (unlikely(mem->stag != srx->ddp_stag)) { in siw_proc_write()
|
/drivers/infiniband/hw/i40iw/ |
D | i40iw_verbs.c | 1230 static void i40iw_free_stag(struct i40iw_device *iwdev, u32 stag) in i40iw_free_stag() argument 1234 stag_idx = (stag & iwdev->mr_stagmask) >> I40IW_CQPSQ_STAG_IDX_SHIFT; in i40iw_free_stag() 1245 u32 stag = 0; in i40iw_create_stag() local 1264 stag = stag_index << I40IW_CQPSQ_STAG_IDX_SHIFT; in i40iw_create_stag() 1265 stag |= driver_key; in i40iw_create_stag() 1266 stag += (u32)consumer_key; in i40iw_create_stag() 1269 return stag; in i40iw_create_stag() 1512 info->stag_idx = iwmr->stag >> I40IW_CQPSQ_STAG_IDX_SHIFT; in i40iw_hw_alloc_stag() 1545 u32 stag; in i40iw_alloc_mr() local 1552 stag = i40iw_create_stag(iwdev); in i40iw_alloc_mr() [all …]
|
D | i40iw_uk.c | 211 LS_64(sge->stag, I40IWQPSQ_FRAG_STAG))); in i40iw_set_fragment() 279 if (!op_info->rem_addr.stag) in i40iw_rdma_write() 282 header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) | in i40iw_rdma_write() 337 header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) | in i40iw_rdma_read() 452 header = LS_64(op_info->rem_addr.stag, I40IWQPSQ_REMSTAG) | in i40iw_inline_rdma_write()
|
D | i40iw_verbs.h | 98 u32 stag; member
|
D | i40iw_user.h | 112 i40iw_stag stag; member
|
D | i40iw_ctrl.c | 3415 i40iw_stag stag) in i40iw_sc_send_lsmm() argument 3426 set_64bit_val(wqe, 8, (size | LS_64(stag, I40IWQPSQ_FRAG_STAG))); in i40iw_sc_send_lsmm()
|
/drivers/net/ethernet/intel/i40e/ |
D | i40e_virtchnl_pf.h | 77 u16 stag; member
|
/drivers/net/ethernet/chelsio/cxgb4/ |
D | cxgb4_uld.h | 359 struct cxgb4_range stag; member 533 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte);
|
D | cxgb4_main.c | 2263 int cxgb4_read_tpte(struct net_device *dev, u32 stag, __be32 *tpte) in cxgb4_read_tpte() argument 2274 offset = ((stag >> 8) * 32) + adap->vres.stag.start; in cxgb4_read_tpte() 2334 stag, offset); in cxgb4_read_tpte() 5250 adap->vres.stag.start = val[0]; in adap_init0() 5251 adap->vres.stag.size = val[1] - val[0] + 1; in adap_init0()
|