Home
last modified time | relevance | path

Searched refs:num_tx_queues (Results 1 – 13 of 13) sorted by relevance

/net/sched/
Dsch_mq.c62 for (ntx = 0; ntx < dev->num_tx_queues && priv->qdiscs[ntx]; ntx++) in mq_destroy()
83 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), in mq_init()
88 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in mq_init()
113 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in mq_attach()
144 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in mq_dump()
179 if (ntx >= dev->num_tx_queues) in mq_queue_get()
265 for (ntx = arg->skip; ntx < dev->num_tx_queues; ntx++) { in mq_walk()
Dsch_mqprio.c38 ntx < dev->num_tx_queues && priv->qdiscs[ntx]; in mqprio_destroy()
245 if (dev->num_tx_queues >= TC_H_MIN_PRIORITY) in mqprio_init()
263 priv->qdiscs = kcalloc(dev->num_tx_queues, sizeof(priv->qdiscs[0]), in mqprio_init()
268 for (i = 0; i < dev->num_tx_queues; i++) { in mqprio_init()
339 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in mqprio_attach()
357 if (ntx >= dev->num_tx_queues) in mqprio_queue_get()
443 for (ntx = 0; ntx < dev->num_tx_queues; ntx++) { in mqprio_dump()
521 return (ntx <= dev->num_tx_queues) ? ntx : 0; in mqprio_find()
642 for (ntx -= TC_MAX_QUEUE; ntx < dev->num_tx_queues; ntx++) { in mqprio_walk()
Dsch_taprio.c464 for (i = 0; i < dev->num_tx_queues; i++) { in taprio_peek_soft()
498 for (i = 0; i < dev->num_tx_queues; i++) { in taprio_peek_offload()
549 for (i = 0; i < dev->num_tx_queues; i++) { in taprio_dequeue_soft()
623 for (i = 0; i < dev->num_tx_queues; i++) { in taprio_dequeue_offload()
946 if (qopt->num_tc > dev->num_tx_queues) { in taprio_parse_mqprio_opt()
965 if (qopt->offset[i] >= dev->num_tx_queues || in taprio_parse_mqprio_opt()
1626 for (i = 0; i < dev->num_tx_queues; i++) in taprio_reset()
1651 for (i = 0; i < dev->num_tx_queues; i++) in taprio_destroy()
1701 q->qdiscs = kcalloc(dev->num_tx_queues, in taprio_init()
1711 for (i = 0; i < dev->num_tx_queues; i++) { in taprio_init()
[all …]
Dsch_generic.c428 for (i = 1; i < dev->num_tx_queues; i++) { in dev_trans_start()
451 for (i = 0; i < dev->num_tx_queues; i++) { in dev_watchdog()
1234 for (i = 0; i < dev->num_tx_queues; i++) { in some_qdisc_is_busy()
1334 for (i = 0; i < dev->num_tx_queues; i++) { in dev_qdisc_change_tx_queue_len()
Dsch_multiq.c250 q->max_bands = qdisc_dev(sch)->num_tx_queues; in multiq_init()
Dsch_api.c1050 num_q = dev->num_tx_queues; in qdisc_graft()
/net/caif/
Dcaif_usb.c178 if (dev->num_tx_queues > 1) in cfusbl_device_notify()
/net/dsa/
Dslave.c1806 if (!ds->num_tx_queues) in dsa_slave_create()
1807 ds->num_tx_queues = 1; in dsa_slave_create()
1811 ds->num_tx_queues, 1); in dsa_slave_create()
/net/core/
Drtnetlink.c1743 nla_put_u32(skb, IFLA_NUM_TX_QUEUES, dev->num_tx_queues) || in rtnl_fill_ifinfo()
3169 unsigned int num_tx_queues = 1; in rtnl_create_link() local
3173 num_tx_queues = nla_get_u32(tb[IFLA_NUM_TX_QUEUES]); in rtnl_create_link()
3175 num_tx_queues = ops->get_num_tx_queues(); in rtnl_create_link()
3182 if (num_tx_queues < 1 || num_tx_queues > 4096) { in rtnl_create_link()
3193 ops->setup, num_tx_queues, num_rx_queues); in rtnl_create_link()
Dnetpoll.c144 for (i = 0; i < dev->num_tx_queues; i++) { in netif_local_xmit_active()
Ddev.c2583 netif_reset_xps_queues(dev, index, dev->num_tx_queues - index); in netif_reset_xps_queues_gt()
2638 WARN_ON_ONCE(index >= dev->num_tx_queues); in __netif_set_xps_queue()
2843 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; in netdev_unbind_all_sb_channels()
2898 struct netdev_queue *txq = &dev->_tx[dev->num_tx_queues]; in netdev_unbind_sb_channel()
2970 if (txq < 1 || txq > dev->num_tx_queues) in netif_set_real_num_tx_queues()
9882 unsigned int count = dev->num_tx_queues; in netif_alloc_netdev_queues()
9905 for (i = 0; i < dev->num_tx_queues; i++) { in netif_tx_stop_all_queues()
10552 dev->num_tx_queues = txqs; in alloc_netdev_mqs()
Dnet-sysfs.c1179 BUG_ON(i >= dev->num_tx_queues); in get_netdev_queue_index()
/net/xdp/
Dxsk.c112 if (queue_id < dev->num_tx_queues) in xsk_clear_pool_at_qid()