Home
last modified time | relevance | path

Searched refs:mtu (Results 1 – 25 of 576) sorted by relevance

12345678910>>...24

/drivers/usb/mtu3/
Dmtu3_core.c45 dev_dbg(mep->mtu->dev, "%s fifo:%#x/%#x, start_bit: %d\n", in ep_fifo_alloc()
66 dev_dbg(mep->mtu->dev, "%s size:%#x/%#x, start_bit: %d\n", in ep_fifo_free()
71 static inline void mtu3_ss_func_set(struct mtu3 *mtu, bool enable) in mtu3_ss_func_set() argument
75 mtu3_setbits(mtu->mac_base, U3D_USB3_CONFIG, USB3_EN); in mtu3_ss_func_set()
77 mtu3_clrbits(mtu->mac_base, U3D_USB3_CONFIG, USB3_EN); in mtu3_ss_func_set()
79 dev_dbg(mtu->dev, "USB3_EN = %d\n", !!enable); in mtu3_ss_func_set()
83 static inline void mtu3_hs_softconn_set(struct mtu3 *mtu, bool enable) in mtu3_hs_softconn_set() argument
86 mtu3_setbits(mtu->mac_base, U3D_POWER_MANAGEMENT, in mtu3_hs_softconn_set()
89 mtu3_clrbits(mtu->mac_base, U3D_POWER_MANAGEMENT, in mtu3_hs_softconn_set()
92 dev_dbg(mtu->dev, "SOFTCONN = %d\n", !!enable); in mtu3_hs_softconn_set()
[all …]
Dmtu3_gadget.c15 __releases(mep->mtu->lock) in mtu3_req_complete()
16 __acquires(mep->mtu->lock) in mtu3_req_complete()
19 struct mtu3 *mtu = mreq->mtu; in mtu3_req_complete() local
26 spin_unlock(&mtu->lock); in mtu3_req_complete()
30 usb_gadget_unmap_request(&mtu->g, req, mep->is_in); in mtu3_req_complete()
32 dev_dbg(mtu->dev, "%s complete req: %p, sts %d, %d/%d\n", in mtu3_req_complete()
36 spin_lock(&mtu->lock); in mtu3_req_complete()
46 dev_dbg(mep->mtu->dev, "abort %s's req: sts %d\n", mep->name, status); in nuke()
63 struct mtu3 *mtu = mep->mtu; in mtu3_ep_enable() local
74 switch (mtu->g.speed) { in mtu3_ep_enable()
[all …]
Dmtu3_gadget_ep0.c18 #define next_ep0_request(mtu) next_request((mtu)->ep0) argument
39 static char *decode_ep0_state(struct mtu3 *mtu) in decode_ep0_state() argument
41 switch (mtu->ep0_state) { in decode_ep0_state()
57 static void ep0_req_giveback(struct mtu3 *mtu, struct usb_request *req) in ep0_req_giveback() argument
59 mtu3_req_complete(mtu->ep0, req, 0); in ep0_req_giveback()
63 forward_to_driver(struct mtu3 *mtu, const struct usb_ctrlrequest *setup) in forward_to_driver() argument
64 __releases(mtu->lock) in forward_to_driver()
65 __acquires(mtu->lock) in forward_to_driver()
69 if (!mtu->gadget_driver) in forward_to_driver()
72 spin_unlock(&mtu->lock); in forward_to_driver()
[all …]
Dmtu3_qmu.c38 #define GPD_RX_BUF_LEN(mtu, x) \ argument
41 ((mtu)->gen2cp) ? GPD_RX_BUF_LEN_EL(x_) : GPD_RX_BUF_LEN_OG(x_); \
46 #define GPD_DATA_LEN(mtu, x) \ argument
49 ((mtu)->gen2cp) ? GPD_DATA_LEN_EL(x_) : GPD_DATA_LEN_OG(x_); \
57 #define GPD_EXT_NGP(mtu, x) \ argument
60 ((mtu)->gen2cp) ? GPD_EXT_NGP_EL(x_) : GPD_EXT_NGP_OG(x_); \
63 #define GPD_EXT_BUF(mtu, x) \ argument
66 ((mtu)->gen2cp) ? GPD_EXT_BUF_EL(x_) : GPD_EXT_BUF_OG(x_); \
171 gpd = dma_pool_zalloc(mep->mtu->qmu_gpd_pool, GFP_ATOMIC, &ring->dma); in mtu3_gpd_ring_alloc()
184 dma_pool_free(mep->mtu->qmu_gpd_pool, in mtu3_gpd_ring_free()
[all …]
Dmtu3_debugfs.c81 struct mtu3 *mtu = sf->private; in mtu3_link_state_show() local
82 void __iomem *mbase = mtu->mac_base; in mtu3_link_state_show()
93 struct mtu3 *mtu = sf->private; in mtu3_ep_used_show() local
99 spin_lock_irqsave(&mtu->lock, flags); in mtu3_ep_used_show()
101 for (i = 0; i < mtu->num_eps; i++) { in mtu3_ep_used_show()
102 mep = mtu->in_eps + i; in mtu3_ep_used_show()
108 mep = mtu->out_eps + i; in mtu3_ep_used_show()
116 spin_unlock_irqrestore(&mtu->lock, flags); in mtu3_ep_used_show()
124 static void mtu3_debugfs_regset(struct mtu3 *mtu, void __iomem *base, in mtu3_debugfs_regset() argument
131 mregs = devm_kzalloc(mtu->dev, sizeof(*mregs), GFP_KERNEL); in mtu3_debugfs_regset()
[all …]
Dmtu3.h276 struct mtu3 *mtu; member
299 struct mtu3 *mtu; member
419 int mtu3_config_ep(struct mtu3 *mtu, struct mtu3_ep *mep,
421 void mtu3_deconfig_ep(struct mtu3 *mtu, struct mtu3_ep *mep);
423 void mtu3_start(struct mtu3 *mtu);
424 void mtu3_stop(struct mtu3 *mtu);
425 void mtu3_dev_on_off(struct mtu3 *mtu, int is_on);
427 int mtu3_gadget_setup(struct mtu3 *mtu);
428 void mtu3_gadget_cleanup(struct mtu3 *mtu);
429 void mtu3_gadget_reset(struct mtu3 *mtu);
[all …]
Dmtu3_qmu.h31 irqreturn_t mtu3_qmu_isr(struct mtu3 *mtu);
32 int mtu3_qmu_init(struct mtu3 *mtu);
33 void mtu3_qmu_exit(struct mtu3 *mtu);
/drivers/clocksource/
Dsh_mtu2.c33 struct sh_mtu2_device *mtu; member
161 return ioread8(ch->mtu->mapbase + 0x280); in sh_mtu2_read()
177 return iowrite8(value, ch->mtu->mapbase + 0x280); in sh_mtu2_write()
192 raw_spin_lock_irqsave(&ch->mtu->lock, flags); in sh_mtu2_start_stop_ch()
201 raw_spin_unlock_irqrestore(&ch->mtu->lock, flags); in sh_mtu2_start_stop_ch()
210 pm_runtime_get_sync(&ch->mtu->pdev->dev); in sh_mtu2_enable()
211 dev_pm_syscore_device(&ch->mtu->pdev->dev, true); in sh_mtu2_enable()
214 ret = clk_enable(ch->mtu->clk); in sh_mtu2_enable()
216 dev_err(&ch->mtu->pdev->dev, "ch%u: cannot enable clock\n", in sh_mtu2_enable()
224 rate = clk_get_rate(ch->mtu->clk) / 64; in sh_mtu2_enable()
[all …]
/drivers/infiniband/sw/rxe/
Drxe_param.h12 static inline enum ib_mtu rxe_mtu_int_to_enum(int mtu) in rxe_mtu_int_to_enum() argument
14 if (mtu < 256) in rxe_mtu_int_to_enum()
16 else if (mtu < 512) in rxe_mtu_int_to_enum()
18 else if (mtu < 1024) in rxe_mtu_int_to_enum()
20 else if (mtu < 2048) in rxe_mtu_int_to_enum()
22 else if (mtu < 4096) in rxe_mtu_int_to_enum()
29 static inline enum ib_mtu eth_mtu_int_to_enum(int mtu) in eth_mtu_int_to_enum() argument
31 mtu -= RXE_MAX_HDR_LENGTH; in eth_mtu_int_to_enum()
33 return rxe_mtu_int_to_enum(mtu); in eth_mtu_int_to_enum()
Drxe.c224 enum ib_mtu mtu; in rxe_set_mtu() local
226 mtu = eth_mtu_int_to_enum(ndev_mtu); in rxe_set_mtu()
229 mtu = mtu ? min_t(enum ib_mtu, mtu, IB_MTU_4096) : IB_MTU_256; in rxe_set_mtu()
231 port->attr.active_mtu = mtu; in rxe_set_mtu()
232 port->mtu_cap = ib_mtu_enum_to_int(mtu); in rxe_set_mtu()
238 int rxe_add(struct rxe_dev *rxe, unsigned int mtu, const char *ibdev_name) in rxe_add() argument
246 rxe_set_mtu(rxe, mtu); in rxe_add()
Drxe_req.c24 int to_send = (wqe->dma.resid > qp->mtu) ? in retry_first_write_send()
25 qp->mtu : wqe->dma.resid; in retry_first_write_send()
37 wqe->iova += qp->mtu; in retry_first_write_send()
93 qp->mtu; in req_retry()
94 wqe->iova += npsn * qp->mtu; in req_retry()
305 int fits = (wqe->dma.resid <= qp->mtu); in next_opcode()
358 return qp->mtu; in get_mtu()
507 int num_pkt = (wqe->dma.resid + payload + qp->mtu - 1) / qp->mtu; in update_wqe_psn()
622 int mtu; in rxe_requester() local
688 mtu = get_mtu(qp); in rxe_requester()
[all …]
/drivers/infiniband/hw/irdma/
Dmain.c55 static void irdma_log_invalid_mtu(u16 mtu, struct irdma_sc_dev *dev) in irdma_log_invalid_mtu() argument
57 if (mtu < IRDMA_MIN_MTU_IPV4) in irdma_log_invalid_mtu()
58 …rn(to_ibdev(dev), "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 576 for IPv4\n", mtu); in irdma_log_invalid_mtu()
59 else if (mtu < IRDMA_MIN_MTU_IPV6) in irdma_log_invalid_mtu()
60 …(to_ibdev(dev), "MTU setting [%d] too low for RDMA traffic. Minimum MTU is 1280 for IPv6\\n", mtu); in irdma_log_invalid_mtu()
90 ibdev_dbg(&iwdev->ibdev, "CLNT: new MTU = %d\n", iwdev->netdev->mtu); in irdma_iidc_event_handler()
91 if (iwdev->vsi.mtu != iwdev->netdev->mtu) { in irdma_iidc_event_handler()
92 l2params.mtu = iwdev->netdev->mtu; in irdma_iidc_event_handler()
94 irdma_log_invalid_mtu(l2params.mtu, &iwdev->rf->sc_dev); in irdma_iidc_event_handler()
283 l2params.mtu = iwdev->netdev->mtu; in irdma_probe()
Di40iw_if.c29 if (iwdev->vsi.mtu != params->mtu) { in i40iw_l2param_change()
31 l2params.mtu = params->mtu; in i40iw_l2param_change()
134 l2params.mtu = (cdev_info->params.mtu) ? cdev_info->params.mtu : IRDMA_DEFAULT_MTU; in i40iw_open()
/drivers/net/ethernet/qualcomm/rmnet/
Drmnet_vnd.c85 new_mtu > (priv->real_dev->mtu - headroom)) in rmnet_vnd_change_mtu()
88 rmnet_dev->mtu = new_mtu; in rmnet_vnd_change_mtu()
225 rmnet_dev->mtu = RMNET_DFLT_PACKET_SIZE; in rmnet_vnd_setup()
272 if (rmnet_vnd_change_mtu(rmnet_dev, real_dev->mtu - headroom)) { in rmnet_vnd_newlink()
332 if (ep->egress_dev->mtu > (real_dev->mtu - headroom)) in rmnet_vnd_validate_real_dev_mtu()
350 if (ep->egress_dev->mtu <= (real_dev->mtu - headroom)) in rmnet_vnd_update_dev_mtu()
354 real_dev->mtu - headroom)) in rmnet_vnd_update_dev_mtu()
/drivers/net/hamradio/
Dmkiss.c55 int mtu; /* Our mtu (to spot changes!) */ member
364 len = dev->mtu * 2; in ax_changedmtu()
381 dev->mtu = ax->mtu; in ax_changedmtu()
415 ax->mtu = dev->mtu + 73; in ax_changedmtu()
431 if (ax->mtu != ax->dev->mtu + 73) /* Someone has been ifconfigging */ in ax_encaps()
434 if (len > ax->mtu) { /* Sigh, shouldn't occur BUT ... */ in ax_encaps()
584 len = dev->mtu * 2; in ax_open()
600 ax->mtu = dev->mtu + 73; in ax_open()
642 dev->mtu = AX_MTU; in ax_setup()
889 if (ax->mtu != ax->dev->mtu + 73) in mkiss_receive_buf()
/drivers/net/ethernet/ibm/emac/
Dcore.h58 static inline int emac_rx_size(int mtu) in emac_rx_size() argument
60 if (mtu > ETH_DATA_LEN) in emac_rx_size()
67 static inline int emac_rx_skb_size(int mtu) in emac_rx_skb_size() argument
69 int size = max(mtu + EMAC_MTU_OVERHEAD, emac_rx_size(mtu)); in emac_rx_skb_size()
75 static inline int emac_rx_sync_size(int mtu) in emac_rx_sync_size() argument
77 return SKB_DATA_ALIGN(emac_rx_size(mtu) + NET_IP_ALIGN); in emac_rx_sync_size()
/drivers/net/ethernet/chelsio/cxgb3/
Dxgmac.c337 static int rx_fifo_hwm(int mtu) in rx_fifo_hwm() argument
341 hwm = max(MAC_RXFIFO_SIZE - 3 * mtu, (MAC_RXFIFO_SIZE * 38) / 100); in rx_fifo_hwm()
345 int t3_mac_set_mtu(struct cmac *mac, unsigned int mtu) in t3_mac_set_mtu() argument
356 mtu += 14; in t3_mac_set_mtu()
357 if (mtu > 1536) in t3_mac_set_mtu()
358 mtu += 4; in t3_mac_set_mtu()
360 if (mtu > MAX_FRAME_SIZE - 4) in t3_mac_set_mtu()
362 t3_write_reg(adap, A_XGM_RX_MAX_PKT_SIZE + mac->offset, mtu); in t3_mac_set_mtu()
383 V_RXMAXPKTSIZE(mtu)); in t3_mac_set_mtu()
389 V_RXMAXPKTSIZE(mtu)); in t3_mac_set_mtu()
[all …]
/drivers/infiniband/hw/hfi1/
Dqp.c152 static inline int verbs_mtu_enum_to_int(struct ib_device *dev, enum ib_mtu mtu) in verbs_mtu_enum_to_int() argument
155 if (mtu == (enum ib_mtu)OPA_MTU_10240) in verbs_mtu_enum_to_int()
156 mtu = (enum ib_mtu)OPA_MTU_8192; in verbs_mtu_enum_to_int()
157 return opa_mtu_enum_to_int((enum opa_mtu)mtu); in verbs_mtu_enum_to_int()
290 if (wqe->length > dd->vld[15].mtu) in hfi1_setup_wqe()
794 int mtu_to_path_mtu(u32 mtu) in mtu_to_path_mtu() argument
796 return mtu_to_enum(mtu, OPA_MTU_8192); in mtu_to_path_mtu()
801 u32 mtu; in mtu_from_qp() local
815 mtu = verbs_mtu_enum_to_int(qp->ibqp.device, pmtu); in mtu_from_qp()
817 mtu = min_t(u32, mtu, dd->vld[vl].mtu); in mtu_from_qp()
[all …]
/drivers/net/ethernet/netronome/nfp/flower/
Dcmsg.c76 unsigned int mtu, bool mtu_only) in nfp_flower_cmsg_portmod() argument
94 msg->mtu = cpu_to_be16(mtu); in nfp_flower_cmsg_portmod()
136 be16_to_cpu(msg->mtu) != app_priv->mtu_conf.requested_val) { in nfp_flower_process_mtu_ack()
172 u16 mtu = be16_to_cpu(msg->mtu); in nfp_flower_cmsg_portmod_rx() local
177 if (mtu) in nfp_flower_cmsg_portmod_rx()
178 dev_set_mtu(netdev, mtu); in nfp_flower_cmsg_portmod_rx()
/drivers/net/caif/
Dcaif_virtio.c117 u32 mtu; member
386 if (cfv->allocsz <= (num_possible_cpus() + 1) * cfv->ndev->mtu) in cfv_create_genpool()
390 if (cfv->allocsz <= num_possible_cpus() * cfv->ndev->mtu) { in cfv_create_genpool()
422 cfv->reserved_size = num_possible_cpus() * cfv->ndev->mtu; in cfv_create_genpool()
490 if (unlikely(cfv->tx_hr + skb->len + cfv->tx_tr > cfv->mtu)) { in cfv_alloc_and_copy_to_shm()
492 cfv->tx_hr + skb->len + cfv->tx_tr, cfv->mtu); in cfv_alloc_and_copy_to_shm()
619 netdev->mtu = CFV_DEF_MTU_SIZE; in cfv_netdev_setup()
692 virtio_cread(vdev, struct virtio_caif_transf_config, mtu, in cfv_probe()
693 &cfv->mtu); in cfv_probe()
694 virtio_cread(vdev, struct virtio_caif_transf_config, mtu, in cfv_probe()
[all …]
/drivers/net/
Dntb_netdev.c130 skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN); in ntb_netdev_rx_handler()
138 rc = ntb_transport_rx_enqueue(qp, skb, skb->data, ndev->mtu + ETH_HLEN); in ntb_netdev_rx_handler()
255 skb = netdev_alloc_skb(ndev, ndev->mtu + ETH_HLEN); in ntb_netdev_open()
262 ndev->mtu + ETH_HLEN); in ntb_netdev_open()
309 ndev->mtu = new_mtu; in ntb_netdev_change_mtu()
316 if (ndev->mtu < new_mtu) { in ntb_netdev_change_mtu()
338 ndev->mtu = new_mtu; in ntb_netdev_change_mtu()
446 ndev->mtu = ntb_transport_max_size(dev->qp) - ETH_HLEN; in ntb_netdev_probe()
/drivers/staging/wlan-ng/
Dp80211conv.c367 if (payload_length > (netdev->mtu + ETH_HLEN)) { in skb_p80211_to_ether()
371 payload_length, netdev->mtu + ETH_HLEN); in skb_p80211_to_ether()
396 if (payload_length > netdev->mtu) { in skb_p80211_to_ether()
400 payload_length, netdev->mtu); in skb_p80211_to_ether()
429 > netdev->mtu) { in skb_p80211_to_ether()
435 sizeof(struct wlan_snap)), netdev->mtu); in skb_p80211_to_ether()
464 if (payload_length > netdev->mtu) { in skb_p80211_to_ether()
468 payload_length, netdev->mtu); in skb_p80211_to_ether()
/drivers/infiniband/hw/qib/
Dqib_qp.c265 static int mtu_to_enum(u32 mtu) in mtu_to_enum() argument
269 switch (mtu) { in mtu_to_enum()
294 int mtu, pmtu, pidx = qp->port_num - 1; in qib_get_pmtu_from_attr() local
298 mtu = ib_mtu_enum_to_int(attr->path_mtu); in qib_get_pmtu_from_attr()
299 if (mtu == -1) in qib_get_pmtu_from_attr()
302 if (mtu > dd->pport[pidx].ibmtu) in qib_get_pmtu_from_attr()
309 int qib_mtu_to_path_mtu(u32 mtu) in qib_mtu_to_path_mtu() argument
311 return mtu_to_enum(mtu); in qib_mtu_to_path_mtu()
/drivers/net/ethernet/marvell/octeontx2/nic/
Dcn10k.h13 static inline int mtu_to_dwrr_weight(struct otx2_nic *pfvf, int mtu) in mtu_to_dwrr_weight() argument
20 weight = mtu / pfvf->hw.dwrr_mtu; in mtu_to_dwrr_weight()
21 if (mtu % pfvf->hw.dwrr_mtu) in mtu_to_dwrr_weight()
/drivers/net/wan/
Dz85230.c730 c->mtu = dev->mtu + 64; in z8530_sync_open()
791 c->mtu = dev->mtu + 64; in z8530_sync_dma_open()
806 if (c->mtu > PAGE_SIZE / 2) in z8530_sync_dma_open()
864 set_dma_count(c->rxdma, c->mtu); in z8530_sync_dma_open()
971 c->mtu = dev->mtu + 64; in z8530_sync_txdma_open()
981 if (c->mtu > PAGE_SIZE / 2) in z8530_sync_txdma_open()
1287 c->mtu = 1500; in z8530_channel_load()
1457 ct = c->mtu - get_dma_residue(c->rxdma); in z8530_rx_done()
1470 set_dma_count(c->rxdma, c->mtu); in z8530_rx_done()
1523 c->max = c->mtu; in z8530_rx_done()
[all …]

12345678910>>...24