Home
last modified time | relevance | path

Searched refs:num_tx_queues (Results 1 – 14 of 14) sorted by relevance

/net/sched/
Dsch_mq.c62 for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++) in mq_destroy()
83 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), in mq_init()
88 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in mq_init()
113 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in mq_attach()
167 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in mq_dump()
202 if (ntx >= dev->num_tx_queues) in mq_queue_get()
288 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) { in mq_walk()
Dsch_mqprio.c38 ntx < dev->num_tx_queues && priv->qdiscs[ntx]; in mqprio_destroy()
245 if (dev->num_tx_queues >= TC_H_MIN_PRIORITY) in mqprio_init()
263 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), in mqprio_init()
268 for (i = 0; i < dev->num_tx_queues; i++) { in mqprio_init()
339 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in mqprio_attach()
379 if (ntx >= dev->num_tx_queues) in mqprio_queue_get()
465 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in mqprio_dump()
543 return (ntx <= dev->num_tx_queues) ? ntx : 0; in mqprio_find()
664 for (ntx -= TC_MAX_QUEUE; ntx < dev->num_tx_queues; ntx++) { in mqprio_walk()
Dsch_taprio.c511 for (i = 0; i < dev->num_tx_queues; i++) { in taprio_peek_soft()
580 for (i = 0; i < dev->num_tx_queues; i++) { in taprio_dequeue_soft()
968 if (qopt->num_tc > dev->num_tx_queues) { in taprio_parse_mqprio_opt()
987 if (qopt->offset[i] >= dev->num_tx_queues || in taprio_parse_mqprio_opt()
1648 for (i = 0; i < dev->num_tx_queues; i++) in taprio_reset()
1673 for (i = 0; i < dev->num_tx_queues; i++) in taprio_destroy()
1723 q->qdiscs = kcalloc(dev->num_tx_queues, in taprio_init()
1733 for (i = 0; i < dev->num_tx_queues; i++) { in taprio_init()
1762 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in taprio_attach()
1790 if (ntx >= dev->num_tx_queues) in taprio_queue_get()
[all …]
Dsch_generic.c438 for (i = 1; i < dev->num_tx_queues; i++) { in dev_trans_start()
461 for (i = 0; i < dev->num_tx_queues; i++) { in dev_watchdog()
1261 for (i = 0; i < dev->num_tx_queues; i++) { in some_qdisc_is_busy()
1370 for (i = 0; i < dev->num_tx_queues; i++) { in dev_qdisc_change_tx_queue_len()
Dsch_multiq.c250 q->max_bands = qdisc_dev(sch)->num_tx_queues; in multiq_init()
Dsch_htb.c1185 for (ntx = q->num_direct_qdiscs; ntx < dev->num_tx_queues; ntx++) { in htb_attach_offload()
1202 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in htb_attach_software()
1394 if (err || offload_opt.qid >= dev->num_tx_queues) in htb_select_queue()
Dsch_api.c1050 num_q = dev->num_tx_queues; in qdisc_graft()
/net/caif/
Dcaif_usb.c178 if (dev->num_tx_queues > 1) in cfusbl_device_notify()
/net/core/
Drtnetlink.c1736 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) || in rtnl_fill_ifinfo()
3176 unsigned int num_tx_queues = 1; in rtnl_create_link() local
3181 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]); in rtnl_create_link()
3183 num_tx_queues = ops->get_num_tx_queues(); in rtnl_create_link()
3190 if (num_tx_queues < 1 || num_tx_queues > 4096) { in rtnl_create_link()
3202 num_tx_queues, num_rx_queues); in rtnl_create_link()
3208 num_tx_queues, num_rx_queues); in rtnl_create_link()
Dnetpoll.c144 for (i = 0; i < dev->num_tx_queues; i++) { in netif_local_xmit_active()
Ddev.c2530 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index); in netif_reset_xps_queues_gt()
2604 WARN_ON_ONCE(index >= dev->num_tx_queues); in __netif_set_xps_queue()
2813 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; in netdev_unbind_all_sb_channels()
2868 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; in netdev_unbind_sb_channel()
2940 if (txq < 1 || txq > dev->num_tx_queues) in netif_set_real_num_tx_queues()
3022 if (txq < 1 || txq > dev->num_tx_queues || in netif_set_real_num_queues()
10209 unsigned int count = dev->num_tx_queues; in netif_alloc_netdev_queues()
10232 for (i = 0; i < dev->num_tx_queues; i++) { in netif_tx_stop_all_queues()
10900 dev->num_tx_queues = txqs; in alloc_netdev_mqs()
Dnet-sysfs.c1220 BUG_ON(i >= dev->num_tx_queues); in get_netdev_queue_index()
/net/dsa/
Dslave.c1946 if (!ds->num_tx_queues) in dsa_slave_create()
1947 ds->num_tx_queues = 1; in dsa_slave_create()
1951 ds->num_tx_queues, 1); in dsa_slave_create()
/net/xdp/
Dxsk.c113 if (queue_id < dev->num_tx_queues) in xsk_clear_pool_at_qid()