/drivers/net/mlx4/ |
D | mr.c | 201 struct mlx4_mtt *mtt) in mlx4_mtt_init() argument 206 mtt->order = -1; in mlx4_mtt_init() 207 mtt->page_shift = MLX4_ICM_PAGE_SHIFT; in mlx4_mtt_init() 210 mtt->page_shift = page_shift; in mlx4_mtt_init() 212 for (mtt->order = 0, i = MLX4_MTT_ENTRY_PER_SEG; i < npages; i <<= 1) in mlx4_mtt_init() 213 ++mtt->order; in mlx4_mtt_init() 215 mtt->first_seg = mlx4_alloc_mtt_range(dev, mtt->order); in mlx4_mtt_init() 216 if (mtt->first_seg == -1) in mlx4_mtt_init() 223 void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) in mlx4_mtt_cleanup() argument 227 if (mtt->order < 0) in mlx4_mtt_cleanup() [all …]
|
D | cq.c | 163 int entries, struct mlx4_mtt *mtt) in mlx4_cq_resize() argument 178 cq_context->log_page_size = mtt->page_shift - 12; in mlx4_cq_resize() 179 mtt_addr = mlx4_mtt_addr(dev, mtt); in mlx4_cq_resize() 190 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, struct mlx4_mtt *mtt, in mlx4_cq_alloc() argument 236 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; in mlx4_cq_alloc() 238 mtt_addr = mlx4_mtt_addr(dev, mtt); in mlx4_cq_alloc()
|
D | qp.c | 68 int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, in mlx4_qp_modify() argument 129 u64 mtt_addr = mlx4_mtt_addr(dev, mtt); in mlx4_qp_modify() 132 context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; in mlx4_qp_modify() 351 int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, in mlx4_qp_to_ready() argument 367 err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], in mlx4_qp_to_ready()
|
D | srq.c | 113 int mlx4_srq_alloc(struct mlx4_dev *dev, u32 pdn, struct mlx4_mtt *mtt, in mlx4_srq_alloc() argument 152 srq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; in mlx4_srq_alloc() 154 mtt_addr = mlx4_mtt_addr(dev, mtt); in mlx4_srq_alloc()
|
D | alloc.c | 408 &wqres->mtt); in mlx4_alloc_hwq_res() 412 err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf); in mlx4_alloc_hwq_res() 419 mlx4_mtt_cleanup(dev, &wqres->mtt); in mlx4_alloc_hwq_res() 432 mlx4_mtt_cleanup(dev, &wqres->mtt); in mlx4_free_hwq_res()
|
D | eq.c | 392 err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt); in mlx4_create_eq() 396 err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list); in mlx4_create_eq() 407 mtt_addr = mlx4_mtt_addr(dev, &eq->mtt); in mlx4_create_eq() 425 mlx4_mtt_cleanup(dev, &eq->mtt); in mlx4_create_eq() 475 mlx4_mtt_cleanup(dev, &eq->mtt); in mlx4_free_eq()
|
D | en_cq.c | 92 err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, &mdev->priv_uar, in mlx4_en_activate_cq()
|
D | en_rx.c | 466 err = mlx4_srq_alloc(mdev->dev, mdev->priv_pdn, &ring->wqres.mtt, in mlx4_en_activate_rx_rings() 960 err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, context, qp, state); in mlx4_en_config_rss_qp() 1030 err = mlx4_qp_to_ready(mdev->dev, &priv->res.mtt, &context, in mlx4_en_config_rss_steer()
|
D | mlx4.h | 140 struct mlx4_mtt mtt; member
|
/drivers/infiniband/hw/mthca/ |
D | mthca_mr.c | 211 struct mthca_mtt *mtt; in __mthca_alloc_mtt() local 217 mtt = kmalloc(sizeof *mtt, GFP_KERNEL); in __mthca_alloc_mtt() 218 if (!mtt) in __mthca_alloc_mtt() 221 mtt->buddy = buddy; in __mthca_alloc_mtt() 222 mtt->order = 0; in __mthca_alloc_mtt() 224 ++mtt->order; in __mthca_alloc_mtt() 226 mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy); in __mthca_alloc_mtt() 227 if (mtt->first_seg == -1) { in __mthca_alloc_mtt() 228 kfree(mtt); in __mthca_alloc_mtt() 232 return mtt; in __mthca_alloc_mtt() [all …]
|
D | mthca_provider.h | 76 struct mthca_mtt *mtt; member 82 struct mthca_mtt *mtt; member
|
D | mthca_provider.c | 1047 mr->mtt = mthca_alloc_mtt(dev, n); in mthca_reg_user_mr() 1048 if (IS_ERR(mr->mtt)) { in mthca_reg_user_mr() 1049 err = PTR_ERR(mr->mtt); in mthca_reg_user_mr() 1074 err = mthca_write_mtt(dev, mr->mtt, n, pages, i); in mthca_reg_user_mr() 1084 err = mthca_write_mtt(dev, mr->mtt, n, pages, i); in mthca_reg_user_mr() 1099 mthca_free_mtt(dev, mr->mtt); in mthca_reg_user_mr()
|
D | mthca_dev.h | 466 void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt); 467 int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
|
/drivers/infiniband/hw/mlx4/ |
D | cq.c | 108 &buf->mtt); in mlx4_ib_alloc_cq_buf() 112 err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf); in mlx4_ib_alloc_cq_buf() 119 mlx4_mtt_cleanup(dev->dev, &buf->mtt); in mlx4_ib_alloc_cq_buf() 146 ilog2((*umem)->page_size), &buf->mtt); in mlx4_ib_get_cq_umem() 150 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem); in mlx4_ib_get_cq_umem() 157 mlx4_mtt_cleanup(dev->dev, &buf->mtt); in mlx4_ib_get_cq_umem() 224 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, in mlx4_ib_create_cq() 245 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt); in mlx4_ib_create_cq() 348 struct mlx4_mtt mtt; in mlx4_ib_resize_cq() local 382 mtt = cq->buf.mtt; in mlx4_ib_resize_cq() [all …]
|
D | srq.c | 120 ilog2(srq->umem->page_size), &srq->mtt); in mlx4_ib_create_srq() 124 err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem); in mlx4_ib_create_srq() 155 &srq->mtt); in mlx4_ib_create_srq() 159 err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf); in mlx4_ib_create_srq() 170 err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, &srq->mtt, in mlx4_ib_create_srq() 194 mlx4_mtt_cleanup(dev->dev, &srq->mtt); in mlx4_ib_create_srq() 262 mlx4_mtt_cleanup(dev->dev, &msrq->mtt); in mlx4_ib_destroy_srq()
|
D | mr.c | 77 int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, in mlx4_ib_umem_write_mtt() argument 95 len = sg_dma_len(&chunk->page_list[j]) >> mtt->page_shift; in mlx4_ib_umem_write_mtt() 104 err = mlx4_write_mtt(dev->dev, mtt, n, in mlx4_ib_umem_write_mtt() 115 err = mlx4_write_mtt(dev->dev, mtt, n, i, pages); in mlx4_ib_umem_write_mtt() 151 err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem); in mlx4_ib_reg_user_mr()
|
D | mlx4_ib.h | 61 struct mlx4_mtt mtt; member 130 struct mlx4_mtt mtt; member 153 struct mlx4_mtt mtt; member 248 int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
|
D | qp.c | 491 ilog2(qp->umem->page_size), &qp->mtt); in create_qp_common() 495 err = mlx4_ib_umem_write_mtt(dev, &qp->mtt, qp->umem); in create_qp_common() 532 &qp->mtt); in create_qp_common() 536 err = mlx4_buf_write_mtt(dev->dev, &qp->mtt, &qp->buf); in create_qp_common() 587 mlx4_mtt_cleanup(dev->dev, &qp->mtt); in create_qp_common() 675 mlx4_mtt_cleanup(dev->dev, &qp->mtt); in destroy_qp_common() 1101 err = mlx4_qp_modify(dev->dev, &qp->mtt, to_mlx4_state(cur_state), in __mlx4_ib_modify_qp()
|
/drivers/net/irda/ |
D | w83977af_ir.c | 496 int mtt; in w83977af_hard_xmit() local 530 mtt = irda_get_mtt(skb); in w83977af_hard_xmit() 532 if (mtt > 50) { in w83977af_hard_xmit() 534 mtt /= 1000+1; in w83977af_hard_xmit() 538 outb(mtt & 0xff, iobase+TMRL); in w83977af_hard_xmit() 539 outb((mtt >> 8) & 0x0f, iobase+TMRH); in w83977af_hard_xmit() 550 IRDA_DEBUG(4, "%s(%ld), mtt=%d\n", __func__ , jiffies, mtt); in w83977af_hard_xmit() 551 if (mtt) in w83977af_hard_xmit() 552 udelay(mtt); in w83977af_hard_xmit()
|
D | ali-ircc.c | 1433 int mtt, diff; in ali_ircc_fir_hard_xmit() local 1479 mtt = irda_get_mtt(skb); in ali_ircc_fir_hard_xmit() 1481 if (mtt) in ali_ircc_fir_hard_xmit() 1497 if (mtt > diff) in ali_ircc_fir_hard_xmit() 1499 mtt -= diff; in ali_ircc_fir_hard_xmit() 1506 if (mtt > 500) in ali_ircc_fir_hard_xmit() 1509 mtt = (mtt+250) / 500; /* 4 discard, 5 get advanced, Let's round off */ in ali_ircc_fir_hard_xmit() 1511 IRDA_DEBUG(1, "%s(), ************** mtt = %d ***********\n", __func__ , mtt); in ali_ircc_fir_hard_xmit() 1514 if (mtt == 1) /* 500 us */ in ali_ircc_fir_hard_xmit() 1519 else if (mtt == 2) /* 1 ms */ in ali_ircc_fir_hard_xmit() [all …]
|
D | donauboe.c | 631 toshoboe_makemttpacket (struct toshoboe_cb *self, void *buf, int mtt) in toshoboe_makemttpacket() argument 635 xbofs = ((int) (mtt/100)) * (int) (self->speed); in toshoboe_makemttpacket() 641 , xbofs,mtt,self->speed); in toshoboe_makemttpacket() 978 int mtt, len, ctl; in toshoboe_hard_xmit() local 1044 if ((mtt = irda_get_mtt(skb))) in toshoboe_hard_xmit() 1059 mtt = toshoboe_makemttpacket (self, self->tx_bufs[self->txs], mtt); in toshoboe_hard_xmit() 1061 ,skb->len,mtt,self->txpending); in toshoboe_hard_xmit() 1062 if (mtt) in toshoboe_hard_xmit() 1064 self->ring->tx[self->txs].len = mtt & 0xfff; in toshoboe_hard_xmit()
|
D | irda-usb.c | 391 int res, mtt; in irda_usb_hard_xmit() local 495 mtt = irda_get_mtt(skb); in irda_usb_hard_xmit() 496 if (mtt) { in irda_usb_hard_xmit() 514 if (mtt > diff) { in irda_usb_hard_xmit() 515 mtt -= diff; in irda_usb_hard_xmit() 516 if (mtt > 1000) in irda_usb_hard_xmit() 517 mdelay(mtt/1000); in irda_usb_hard_xmit() 519 udelay(mtt); in irda_usb_hard_xmit()
|
D | nsc-ircc.c | 1426 int mtt, diff; in nsc_ircc_hard_xmit_fir() local 1484 mtt = irda_get_mtt(skb); in nsc_ircc_hard_xmit_fir() 1485 if (mtt) { in nsc_ircc_hard_xmit_fir() 1495 if (mtt > diff) { in nsc_ircc_hard_xmit_fir() 1496 mtt -= diff; in nsc_ircc_hard_xmit_fir() 1503 if (mtt > 125) { in nsc_ircc_hard_xmit_fir() 1505 mtt = mtt / 125; in nsc_ircc_hard_xmit_fir() 1509 outb(mtt & 0xff, iobase+TMRL); in nsc_ircc_hard_xmit_fir() 1510 outb((mtt >> 8) & 0x0f, iobase+TMRH); in nsc_ircc_hard_xmit_fir() 1523 udelay(mtt); in nsc_ircc_hard_xmit_fir()
|
D | sa1100_ir.c | 689 int mtt = irda_get_mtt(skb); in sa1100_irda_hard_xmit() local 709 if (mtt) in sa1100_irda_hard_xmit() 710 udelay(mtt); in sa1100_irda_hard_xmit()
|
D | pxaficp_ir.c | 523 unsigned long mtt = irda_get_mtt(skb); in pxa_irda_hard_xmit() local 528 if (mtt) in pxa_irda_hard_xmit() 529 while ((unsigned)(OSCR - si->last_oscr)/4 < mtt) in pxa_irda_hard_xmit()
|