Home
last modified time | relevance | path

Searched refs:mtt (Results 1 – 25 of 32) sorted by relevance

12

/drivers/infiniband/hw/mthca/
Dmthca_mr.c211 struct mthca_mtt *mtt; in __mthca_alloc_mtt() local
217 mtt = kmalloc(sizeof *mtt, GFP_KERNEL); in __mthca_alloc_mtt()
218 if (!mtt) in __mthca_alloc_mtt()
221 mtt->buddy = buddy; in __mthca_alloc_mtt()
222 mtt->order = 0; in __mthca_alloc_mtt()
224 ++mtt->order; in __mthca_alloc_mtt()
226 mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy); in __mthca_alloc_mtt()
227 if (mtt->first_seg == -1) { in __mthca_alloc_mtt()
228 kfree(mtt); in __mthca_alloc_mtt()
232 return mtt; in __mthca_alloc_mtt()
[all …]
Dmthca_provider.h76 struct mthca_mtt *mtt; member
82 struct mthca_mtt *mtt; member
Dmthca_provider.c1023 mr->mtt = mthca_alloc_mtt(dev, n); in mthca_reg_user_mr()
1024 if (IS_ERR(mr->mtt)) { in mthca_reg_user_mr()
1025 err = PTR_ERR(mr->mtt); in mthca_reg_user_mr()
1049 err = mthca_write_mtt(dev, mr->mtt, n, pages, i); in mthca_reg_user_mr()
1059 err = mthca_write_mtt(dev, mr->mtt, n, pages, i); in mthca_reg_user_mr()
1074 mthca_free_mtt(dev, mr->mtt); in mthca_reg_user_mr()
Dmthca_dev.h467 void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt);
468 int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
/drivers/net/ethernet/mellanox/mlx4/
Dmr.c198 struct mlx4_mtt *mtt) in mlx4_mtt_init() argument
203 mtt->order = -1; in mlx4_mtt_init()
204 mtt->page_shift = MLX4_ICM_PAGE_SHIFT; in mlx4_mtt_init()
207 mtt->page_shift = page_shift; in mlx4_mtt_init()
209 for (mtt->order = 0, i = 1; i < npages; i <<= 1) in mlx4_mtt_init()
210 ++mtt->order; in mlx4_mtt_init()
212 mtt->offset = mlx4_alloc_mtt_range(dev, mtt->order); in mlx4_mtt_init()
213 if (mtt->offset == -1) in mlx4_mtt_init()
254 void mlx4_mtt_cleanup(struct mlx4_dev *dev, struct mlx4_mtt *mtt) in mlx4_mtt_cleanup() argument
256 if (mtt->order < 0) in mlx4_mtt_cleanup()
[all …]
Dresource_tracker.c109 struct res_mtt *mtt; member
157 struct res_mtt *mtt; member
169 struct res_mtt *mtt; member
180 struct res_mtt *mtt; member
192 struct res_mtt *mtt; member
448 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts; in mlx4_init_quotas()
460 dev->quotas.mtt = in mlx4_init_quotas()
2679 int size, struct res_mtt *mtt) in check_mtt_range() argument
2681 int res_start = mtt->com.res_id; in check_mtt_range()
2682 int res_size = (1 << mtt->order); in check_mtt_range()
[all …]
Dcq.c188 int entries, struct mlx4_mtt *mtt) in mlx4_cq_resize() argument
201 cq_context->log_page_size = mtt->page_shift - 12; in mlx4_cq_resize()
202 mtt_addr = mlx4_mtt_addr(dev, mtt); in mlx4_cq_resize()
286 struct mlx4_mtt *mtt, struct mlx4_uar *uar, u64 db_rec, in mlx4_cq_alloc() argument
325 cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; in mlx4_cq_alloc()
327 mtt_addr = mlx4_mtt_addr(dev, mtt); in mlx4_cq_alloc()
Dqp.c87 static int __mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, in __mlx4_qp_modify() argument
164 u64 mtt_addr = mlx4_mtt_addr(dev, mtt); in __mlx4_qp_modify()
167 context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; in __mlx4_qp_modify()
202 int mlx4_qp_modify(struct mlx4_dev *dev, struct mlx4_mtt *mtt, in mlx4_qp_modify() argument
208 return __mlx4_qp_modify(dev, mtt, cur_state, new_state, context, in mlx4_qp_modify()
914 int mlx4_qp_to_ready(struct mlx4_dev *dev, struct mlx4_mtt *mtt, in mlx4_qp_to_ready() argument
932 err = mlx4_qp_modify(dev, mtt, states[i], states[i + 1], in mlx4_qp_to_ready()
Dsrq.c166 struct mlx4_mtt *mtt, u64 db_rec, struct mlx4_srq *srq) in mlx4_srq_alloc() argument
196 srq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT; in mlx4_srq_alloc()
198 mtt_addr = mlx4_mtt_addr(dev, mtt); in mlx4_srq_alloc()
Dalloc.c807 &wqres->mtt); in mlx4_alloc_hwq_res()
811 err = mlx4_buf_write_mtt(dev, &wqres->mtt, &wqres->buf, GFP_KERNEL); in mlx4_alloc_hwq_res()
818 mlx4_mtt_cleanup(dev, &wqres->mtt); in mlx4_alloc_hwq_res()
831 mlx4_mtt_cleanup(dev, &wqres->mtt); in mlx4_free_hwq_res()
Deq.c1019 err = mlx4_mtt_init(dev, npages, PAGE_SHIFT, &eq->mtt); in mlx4_create_eq()
1023 err = mlx4_write_mtt(dev, &eq->mtt, 0, npages, dma_list); in mlx4_create_eq()
1033 mtt_addr = mlx4_mtt_addr(dev, &eq->mtt); in mlx4_create_eq()
1057 mlx4_mtt_cleanup(dev, &eq->mtt); in mlx4_create_eq()
1097 mlx4_mtt_cleanup(dev, &eq->mtt); in mlx4_free_eq()
Den_cq.c149 err = mlx4_cq_alloc(mdev->dev, cq->size, &cq->wqres.mtt, in mlx4_en_activate_cq()
/drivers/infiniband/hw/mlx4/
Dcq.c112 &buf->mtt); in mlx4_ib_alloc_cq_buf()
116 err = mlx4_buf_write_mtt(dev->dev, &buf->mtt, &buf->buf, GFP_KERNEL); in mlx4_ib_alloc_cq_buf()
123 mlx4_mtt_cleanup(dev->dev, &buf->mtt); in mlx4_ib_alloc_cq_buf()
150 ilog2((*umem)->page_size), &buf->mtt); in mlx4_ib_get_cq_umem()
154 err = mlx4_ib_umem_write_mtt(dev, &buf->mtt, *umem); in mlx4_ib_get_cq_umem()
161 mlx4_mtt_cleanup(dev->dev, &buf->mtt); in mlx4_ib_get_cq_umem()
241 err = mlx4_cq_alloc(dev->dev, entries, &cq->buf.mtt, uar, in mlx4_ib_create_cq()
269 mlx4_mtt_cleanup(dev->dev, &cq->buf.mtt); in mlx4_ib_create_cq()
379 struct mlx4_mtt mtt; in mlx4_ib_resize_cq() local
417 mtt = cq->buf.mtt; in mlx4_ib_resize_cq()
[all …]
Dsrq.c125 ilog2(srq->umem->page_size), &srq->mtt); in mlx4_ib_create_srq()
129 err = mlx4_ib_umem_write_mtt(dev, &srq->mtt, srq->umem); in mlx4_ib_create_srq()
166 &srq->mtt); in mlx4_ib_create_srq()
170 err = mlx4_buf_write_mtt(dev->dev, &srq->mtt, &srq->buf, GFP_KERNEL); in mlx4_ib_create_srq()
190 err = mlx4_srq_alloc(dev->dev, to_mpd(pd)->pdn, cqn, xrcdn, &srq->mtt, in mlx4_ib_create_srq()
215 mlx4_mtt_cleanup(dev->dev, &srq->mtt); in mlx4_ib_create_srq()
283 mlx4_mtt_cleanup(dev->dev, &msrq->mtt); in mlx4_ib_destroy_srq()
Dmr.c89 int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt, in mlx4_ib_umem_write_mtt() argument
106 len = sg_dma_len(sg) >> mtt->page_shift; in mlx4_ib_umem_write_mtt()
115 err = mlx4_write_mtt(dev->dev, mtt, n, in mlx4_ib_umem_write_mtt()
126 err = mlx4_write_mtt(dev->dev, mtt, n, i, pages); in mlx4_ib_umem_write_mtt()
196 err = mlx4_ib_umem_write_mtt(dev, &mr->mmr.mtt, mr->umem); in mlx4_ib_reg_user_mr()
290 err = mlx4_ib_umem_write_mtt(dev, &mmr->mmr.mtt, mmr->umem); in mlx4_ib_rereg_user_mr()
Dmlx4_ib.h107 struct mlx4_mtt mtt; member
303 struct mlx4_mtt mtt; member
338 struct mlx4_mtt mtt; member
700 int mlx4_ib_umem_write_mtt(struct mlx4_ib_dev *dev, struct mlx4_mtt *mtt,
/drivers/net/irda/
Dbfin_sir.c25 static void turnaround_delay(unsigned long last_jif, int mtt) in turnaround_delay() argument
29 mtt = mtt < 10000 ? 10000 : mtt; in turnaround_delay()
30 ticks = 1 + mtt / (USEC_PER_SEC / HZ); in turnaround_delay()
513 turnaround_delay(dev->last_rx, self->mtt); in bfin_sir_send_work()
544 self->mtt = irda_get_mtt(skb); in bfin_sir_hard_xmit()
Dali-ircc.c1409 int mtt, diff; in ali_ircc_fir_hard_xmit() local
1454 mtt = irda_get_mtt(skb); in ali_ircc_fir_hard_xmit()
1456 if (mtt) in ali_ircc_fir_hard_xmit()
1468 if (mtt > diff) in ali_ircc_fir_hard_xmit()
1470 mtt -= diff; in ali_ircc_fir_hard_xmit()
1477 if (mtt > 500) in ali_ircc_fir_hard_xmit()
1480 mtt = (mtt+250) / 500; /* 4 discard, 5 get advanced, Let's round off */ in ali_ircc_fir_hard_xmit()
1483 __func__, mtt); in ali_ircc_fir_hard_xmit()
1486 if (mtt == 1) /* 500 us */ in ali_ircc_fir_hard_xmit()
1491 else if (mtt == 2) /* 1 ms */ in ali_ircc_fir_hard_xmit()
[all …]
Ddonauboe.c620 toshoboe_makemttpacket (struct toshoboe_cb *self, void *buf, int mtt) in toshoboe_makemttpacket() argument
624 xbofs = ((int) (mtt/100)) * (int) (self->speed); in toshoboe_makemttpacket()
629 xbofs, mtt, self->speed); in toshoboe_makemttpacket()
964 int mtt, len, ctl; in toshoboe_hard_xmit() local
1028 if ((mtt = irda_get_mtt(skb))) in toshoboe_hard_xmit()
1043 mtt = toshoboe_makemttpacket (self, self->tx_bufs[self->txs], mtt); in toshoboe_hard_xmit()
1044 pr_debug("%s.mtt:%x(%x)%d\n", __func__, skb->len, mtt, self->txpending); in toshoboe_hard_xmit()
1045 if (mtt) in toshoboe_hard_xmit()
1047 self->ring->tx[self->txs].len = mtt & 0xfff; in toshoboe_hard_xmit()
Dw83977af_ir.c486 int mtt; in w83977af_hard_xmit() local
519 mtt = irda_get_mtt(skb); in w83977af_hard_xmit()
520 pr_debug("%s(%ld), mtt=%d\n", __func__ , jiffies, mtt); in w83977af_hard_xmit()
521 if (mtt > 1000) in w83977af_hard_xmit()
522 mdelay(mtt/1000); in w83977af_hard_xmit()
523 else if (mtt) in w83977af_hard_xmit()
524 udelay(mtt); in w83977af_hard_xmit()
Dirda-usb.c393 int res, mtt; in irda_usb_hard_xmit() local
495 mtt = irda_get_mtt(skb); in irda_usb_hard_xmit()
496 if (mtt) { in irda_usb_hard_xmit()
508 if (mtt > diff) { in irda_usb_hard_xmit()
509 mtt -= diff; in irda_usb_hard_xmit()
510 if (mtt > 1000) in irda_usb_hard_xmit()
511 mdelay(mtt/1000); in irda_usb_hard_xmit()
513 udelay(mtt); in irda_usb_hard_xmit()
Dnsc-ircc.c1443 int mtt, diff; in nsc_ircc_hard_xmit_fir() local
1501 mtt = irda_get_mtt(skb); in nsc_ircc_hard_xmit_fir()
1502 if (mtt) { in nsc_ircc_hard_xmit_fir()
1509 if (mtt > diff) { in nsc_ircc_hard_xmit_fir()
1510 mtt -= diff; in nsc_ircc_hard_xmit_fir()
1517 if (mtt > 125) { in nsc_ircc_hard_xmit_fir()
1519 mtt = mtt / 125; in nsc_ircc_hard_xmit_fir()
1523 outb(mtt & 0xff, iobase+TMRL); in nsc_ircc_hard_xmit_fir()
1524 outb((mtt >> 8) & 0x0f, iobase+TMRH); in nsc_ircc_hard_xmit_fir()
1537 udelay(mtt); in nsc_ircc_hard_xmit_fir()
Dbfin_sir.h80 int mtt; member
Dsa1100_ir.c396 int mtt = irda_get_mtt(skb); in sa1100_irda_fir_tx_start() local
415 if (mtt) in sa1100_irda_fir_tx_start()
416 udelay(mtt); in sa1100_irda_fir_tx_start()
Dpxaficp_ir.c617 unsigned long mtt = irda_get_mtt(skb); in pxa_irda_hard_xmit() local
622 if (mtt) in pxa_irda_hard_xmit()
623 while ((sched_clock() - si->last_clk) * 1000 < mtt) in pxa_irda_hard_xmit()

12