Home
last modified time | relevance | path

Searched refs:queues (Results 1 – 25 of 339) sorted by relevance

12345678910>>...14

/kernel/linux/linux-5.10/net/sched/
Dsch_multiq.c25 struct Qdisc **queues; member
54 return q->queues[0]; in multiq_classify()
56 return q->queues[band]; in multiq_classify()
105 qdisc = q->queues[q->curband]; in multiq_dequeue()
137 qdisc = q->queues[curband]; in multiq_peek()
154 qdisc_reset(q->queues[band]); in multiq_reset()
167 qdisc_put(q->queues[band]); in multiq_destroy()
169 kfree(q->queues); in multiq_destroy()
197 if (q->queues[i] != &noop_qdisc) { in multiq_tune()
198 struct Qdisc *child = q->queues[i]; in multiq_tune()
[all …]
Dsch_prio.c26 struct Qdisc *queues[TCQ_PRIO_BANDS]; member
57 return q->queues[q->prio2band[band & TC_PRIO_MAX]]; in prio_classify()
63 return q->queues[q->prio2band[0]]; in prio_classify()
65 return q->queues[band]; in prio_classify()
103 struct Qdisc *qdisc = q->queues[prio]; in prio_peek()
117 struct Qdisc *qdisc = q->queues[prio]; in prio_dequeue()
137 qdisc_reset(q->queues[prio]); in prio_reset()
175 qdisc_put(q->queues[prio]); in prio_destroy()
182 struct Qdisc *queues[TCQ_PRIO_BANDS]; in prio_tune() local
200 queues[i] = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, in prio_tune()
[all …]
/kernel/linux/linux-5.10/drivers/staging/wfx/
Dqueue.c234 struct wfx_queue *queues[IEEE80211_NUM_ACS * ARRAY_SIZE(wdev->vif)]; in wfx_tx_queues_get_skb() local
244 WARN_ON(num_queues >= ARRAY_SIZE(queues)); in wfx_tx_queues_get_skb()
245 queues[num_queues] = &wvif->tx_queue[i]; in wfx_tx_queues_get_skb()
247 if (wfx_tx_queue_get_weight(queues[j]) < in wfx_tx_queues_get_skb()
248 wfx_tx_queue_get_weight(queues[j - 1])) in wfx_tx_queues_get_skb()
249 swap(queues[j - 1], queues[j]); in wfx_tx_queues_get_skb()
259 skb = skb_dequeue(&queues[i]->cab); in wfx_tx_queues_get_skb()
267 WARN_ON(queues[i] != in wfx_tx_queues_get_skb()
269 atomic_inc(&queues[i]->pending_frames); in wfx_tx_queues_get_skb()
270 trace_queues_stats(wdev, queues[i]); in wfx_tx_queues_get_skb()
[all …]
/kernel/linux/linux-5.10/drivers/scsi/aacraid/
Dcomminit.c373 struct aac_entry * queues; in aac_comm_init() local
375 struct aac_queue_block * comm = dev->queues; in aac_comm_init()
394 queues = (struct aac_entry *)(((ulong)headers) + hdrsize); in aac_comm_init()
397 comm->queue[HostNormCmdQueue].base = queues; in aac_comm_init()
399 queues += HOST_NORM_CMD_ENTRIES; in aac_comm_init()
403 comm->queue[HostHighCmdQueue].base = queues; in aac_comm_init()
406 queues += HOST_HIGH_CMD_ENTRIES; in aac_comm_init()
410 comm->queue[AdapNormCmdQueue].base = queues; in aac_comm_init()
413 queues += ADAP_NORM_CMD_ENTRIES; in aac_comm_init()
417 comm->queue[AdapHighCmdQueue].base = queues; in aac_comm_init()
[all …]
/kernel/linux/linux-5.10/Documentation/ABI/testing/
Dsysfs-class-net-queues1 What: /sys/class/<iface>/queues/rx-<queue>/rps_cpus
11 What: /sys/class/<iface>/queues/rx-<queue>/rps_flow_cnt
19 What: /sys/class/<iface>/queues/tx-<queue>/tx_timeout
27 What: /sys/class/<iface>/queues/tx-<queue>/tx_maxrate
35 What: /sys/class/<iface>/queues/tx-<queue>/xps_cpus
45 What: /sys/class/<iface>/queues/tx-<queue>/xps_rxqs
56 What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/hold_time
65 What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/inflight
73 What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit
82 What: /sys/class/<iface>/queues/tx-<queue>/byte_queue_limits/limit_max
[all …]
/kernel/linux/linux-5.10/drivers/nvme/target/
Dloop.c30 struct nvme_loop_queue *queues; member
71 return queue - queue->ctrl->queues; in nvme_loop_queue_idx()
176 struct nvme_loop_queue *queue = &ctrl->queues[0]; in nvme_loop_submit_async_event()
198 iod->queue = &ctrl->queues[queue_idx]; in nvme_loop_init_iod()
220 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_loop_init_hctx()
240 struct nvme_loop_queue *queue = &ctrl->queues[0]; in nvme_loop_init_admin_hctx()
264 if (!test_and_clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags)) in nvme_loop_destroy_admin_queue()
266 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); in nvme_loop_destroy_admin_queue()
287 kfree(ctrl->queues); in nvme_loop_free_ctrl()
298 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); in nvme_loop_destroy_io_queues()
[all …]
/kernel/linux/linux-5.10/Documentation/devicetree/bindings/soc/ti/
Dkeystone-navigator-qmss.txt9 management of the packet queues. Packets are queued/de-queued by writing or
32 -- managed-queues : the actual queues managed by each queue manager
33 instance, specified as <"base queue #" "# of queues">.
51 - qpend : pool of qpend(interruptible) queues
52 - general-purpose : pool of general queues, primarily used
53 as free descriptor queues or the
54 transmit DMA queues.
55 - accumulator : pool of queues on PDSP accumulator channel
57 -- qrange : number of queues to use per queue range, specified as
58 <"base queue #" "# of queues">.
[all …]
/kernel/linux/linux-5.10/Documentation/networking/device_drivers/ethernet/ti/
Dcpsw.rst26 - TX queues must be rated starting from txq0 that has highest priority
28 - CBS shapers should be used with rated queues
30 potential incoming rate, thus, rate of all incoming tx queues has
150 // Add 4 tx queues, for interface Eth0, and 1 tx queue for Eth1
156 // Check if num of queues is set correctly:
172 // TX queues must be rated starting from 0, so set bws for tx0 and tx1
175 // Leave last 2 tx queues not rated.
176 $ echo 40 > /sys/class/net/eth0/queues/tx-0/tx_maxrate
177 $ echo 20 > /sys/class/net/eth0/queues/tx-1/tx_maxrate
181 // Check maximum rate of tx (cpdma) queues:
[all …]
/kernel/linux/linux-5.10/tools/perf/util/
Dintel-bts.c46 struct auxtrace_queues queues; member
211 for (i = 0; i < bts->queues.nr_queues; i++) { in intel_bts_setup_queues()
212 ret = intel_bts_setup_queue(bts, &bts->queues.queue_array[i], in intel_bts_setup_queues()
222 if (bts->queues.new_data) { in intel_bts_update_queues()
223 bts->queues.new_data = false; in intel_bts_update_queues()
465 queue = &btsq->bts->queues.queue_array[btsq->queue_nr]; in intel_bts_process_queue()
539 struct auxtrace_queues *queues = &bts->queues; in intel_bts_process_tid_exit() local
542 for (i = 0; i < queues->nr_queues; i++) { in intel_bts_process_tid_exit()
543 struct auxtrace_queue *queue = &bts->queues.queue_array[i]; in intel_bts_process_tid_exit()
568 queue = &bts->queues.queue_array[queue_nr]; in intel_bts_process_queues()
[all …]
Darm-spe.c40 struct auxtrace_queues queues; member
142 queue = &speq->spe->queues.queue_array[speq->queue_nr]; in arm_spe_get_trace()
450 for (i = 0; i < spe->queues.nr_queues; i++) { in arm_spe__setup_queues()
451 ret = arm_spe__setup_queue(spe, &spe->queues.queue_array[i], i); in arm_spe__setup_queues()
461 if (spe->queues.new_data) { in arm_spe__update_queues()
462 spe->queues.new_data = false; in arm_spe__update_queues()
529 queue = &spe->queues.queue_array[queue_nr]; in arm_spe_process_queues()
565 struct auxtrace_queues *queues = &spe->queues; in arm_spe_process_timeless_queues() local
569 for (i = 0; i < queues->nr_queues; i++) { in arm_spe_process_timeless_queues()
570 struct auxtrace_queue *queue = &spe->queues.queue_array[i]; in arm_spe_process_timeless_queues()
[all …]
Dauxtrace.c218 int auxtrace_queues__init(struct auxtrace_queues *queues) in auxtrace_queues__init() argument
220 queues->nr_queues = AUXTRACE_INIT_NR_QUEUES; in auxtrace_queues__init()
221 queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues); in auxtrace_queues__init()
222 if (!queues->queue_array) in auxtrace_queues__init()
227 static int auxtrace_queues__grow(struct auxtrace_queues *queues, in auxtrace_queues__grow() argument
230 unsigned int nr_queues = queues->nr_queues; in auxtrace_queues__grow()
240 if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues) in auxtrace_queues__grow()
247 for (i = 0; i < queues->nr_queues; i++) { in auxtrace_queues__grow()
248 list_splice_tail(&queues->queue_array[i].head, in auxtrace_queues__grow()
250 queue_array[i].tid = queues->queue_array[i].tid; in auxtrace_queues__grow()
[all …]
Ds390-cpumsf.c169 struct auxtrace_queues queues; member
202 if (!sf->use_logfile || sf->queues.nr_queues <= sample->cpu) in s390_cpumcf_dumpctr()
205 q = &sf->queues.queue_array[sample->cpu]; in s390_cpumcf_dumpctr()
700 queue = &sfq->sf->queues.queue_array[sfq->queue_nr]; in s390_cpumsf_run_decoder()
824 for (i = 0; i < sf->queues.nr_queues; i++) { in s390_cpumsf_setup_queues()
825 ret = s390_cpumsf_setup_queue(sf, &sf->queues.queue_array[i], in s390_cpumsf_setup_queues()
835 if (!sf->queues.new_data) in s390_cpumsf_update_queues()
838 sf->queues.new_data = false; in s390_cpumsf_update_queues()
859 queue = &sf->queues.queue_array[queue_nr]; in s390_cpumsf_process_queues()
984 err = auxtrace_queues__add_event(&sf->queues, session, event, in s390_cpumsf_process_auxtrace_event()
[all …]
/kernel/linux/linux-5.10/Documentation/arm/keystone/
Dknav-qmss.rst15 management of the packet queues. Packets are queued/de-queued by writing or
24 knav qmss driver provides a set of APIs to drivers to open/close qmss queues,
25 allocate descriptor pools, map the descriptors, push/pop to queues etc. For
31 Accumulator QMSS queues using PDSP firmware
34 queue or multiple contiguous queues. drivers/soc/ti/knav_qmss_acc.c is the
37 1 or 32 queues per channel. More description on the firmware is available in
56 Use of accumulated queues requires the firmware image to be present in the
57 file system. The driver doesn't acc queues to the supported queue range if
/kernel/linux/linux-5.10/include/linux/
Dptr_ring.h625 void ***queues; in ptr_ring_resize_multiple() local
628 queues = kmalloc_array(nrings, sizeof(*queues), gfp); in ptr_ring_resize_multiple()
629 if (!queues) in ptr_ring_resize_multiple()
633 queues[i] = __ptr_ring_init_queue_alloc(size, gfp); in ptr_ring_resize_multiple()
634 if (!queues[i]) in ptr_ring_resize_multiple()
641 queues[i] = __ptr_ring_swap_queue(rings[i], queues[i], in ptr_ring_resize_multiple()
648 kvfree(queues[i]); in ptr_ring_resize_multiple()
650 kfree(queues); in ptr_ring_resize_multiple()
656 kvfree(queues[i]); in ptr_ring_resize_multiple()
658 kfree(queues); in ptr_ring_resize_multiple()
/kernel/linux/linux-5.10/Documentation/block/
Dblk-mq.rst37 spawns multiple queues with individual entry points local to the CPU, removing
49 blk-mq has two group of queues: software staging queues and hardware dispatch
50 queues. When the request arrives at the block layer, it will try the shortest
56 Then, after the requests are processed by software queues, they will be placed
62 Software staging queues
65 The block IO subsystem adds requests in the software staging queues
71 the number of queues is defined by a per-CPU or per-node basis.
93 requests from different queues, otherwise there would be cache trashing and a
99 queue (a.k.a. run the hardware queue), the software queues mapped to that
102 Hardware dispatch queues
[all …]
/kernel/linux/linux-5.10/Documentation/networking/
Dmultiqueue.rst18 the subqueue memory, as well as netdev configuration of where the queues
21 The base driver will also need to manage the queues as it does the global
33 A new round-robin qdisc, sch_multiq also supports multiple hardware queues. The
35 bands and queues based on the value in skb->queue_mapping. Use this field in
42 On qdisc load, the number of bands is based on the number of queues on the
56 The qdisc will allocate the number of bands to equal the number of queues that
58 queues, the band mapping would look like::
Dscaling.rst27 Contemporary NICs support multiple receive and transmit descriptor queues
29 queues to distribute processing among CPUs. The NIC distributes packets by
47 Some advanced NICs allow steering packets to queues based on
57 module parameter for specifying the number of hardware queues to
60 for each CPU if the device supports enough queues, or otherwise at least
66 default mapping is to distribute the queues evenly in the table, but the
69 indirection table could be done to give different queues different
80 of queues to IRQs can be determined from /proc/interrupts. By default,
95 is to allocate as many queues as there are CPUs in the system (or the
97 is likely the one with the smallest number of receive queues where no
[all …]
/kernel/linux/linux-5.10/drivers/staging/qlge/
DTODO13 * rename "rx" queues to "completion" queues. Calling tx completion queues "rx
14 queues" is confusing.
24 frames, resets the link, device and driver buffer queues become
/kernel/linux/linux-5.10/Documentation/devicetree/bindings/dma/
Dfsl-qdma.txt22 - fsl,dma-queues: Should contain number of queues supported.
28 based on queues
52 fsl,dma-queues = <2>;
/kernel/linux/linux-5.10/Documentation/networking/device_drivers/ethernet/freescale/dpaa2/
Dethernet-driver.rst25 - queues, channels
32 hardware resources, like queues, do not have a corresponding MC object and
99 queues ---------------------- | | Buffer pool |
109 Frames are transmitted and received through hardware frame queues, which can be
111 enqueues TX frames on egress queues and after transmission is complete a TX
114 When frames are available on ingress queues, a data availability notification
116 queues in the same channel have available frames, only one notification is sent.
119 Each network interface can have multiple Rx, Tx and confirmation queues affined
/kernel/linux/linux-5.10/drivers/net/ethernet/netronome/nfp/
Dnfp_net_debugfs.c150 struct dentry *queues, *tx, *rx, *xdp; in nfp_net_debugfs_vnic_add() local
164 queues = debugfs_create_dir("queue", nn->debugfs_dir); in nfp_net_debugfs_vnic_add()
166 rx = debugfs_create_dir("rx", queues); in nfp_net_debugfs_vnic_add()
167 tx = debugfs_create_dir("tx", queues); in nfp_net_debugfs_vnic_add()
168 xdp = debugfs_create_dir("xdp", queues); in nfp_net_debugfs_vnic_add()
/kernel/linux/linux-5.10/Documentation/devicetree/bindings/mfd/
Dfsl-imx25-tsadc.txt3 This device combines two general purpose conversion queues one used for general
15 conversion queues.
20 This device includes two conversion queues which can be added as subnodes.
/kernel/linux/linux-5.10/Documentation/devicetree/bindings/net/
Dfsl-fec.txt14 - fsl,num-tx-queues : The property is valid for enet-avb IP, which supports
15 hw multi queues. Should specify the tx queue number, otherwise set tx queue
17 - fsl,num-rx-queues : The property is valid for enet-avb IP, which supports
18 hw multi queues. Should specify the rx queue number, otherwise set rx queue
39 tx/rx queues 1 and 2. "int0" will be used for queue 0 and ENET_MII interrupts.
40 For imx6sx, "int0" handles all 3 queues and ENET_MII. "pps" is for the pulse
/kernel/linux/linux-5.10/drivers/gpu/drm/msm/adreno/
Da6xx_hfi.c99 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE]; in a6xx_hfi_wait_for_ack()
168 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE]; in a6xx_hfi_send_msg()
527 for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) { in a6xx_hfi_stop()
528 struct a6xx_hfi_queue *queue = &gmu->queues[i]; in a6xx_hfi_stop()
578 table_size += (ARRAY_SIZE(gmu->queues) * in a6xx_hfi_init()
586 table->num_queues = ARRAY_SIZE(gmu->queues); in a6xx_hfi_init()
587 table->active_queues = ARRAY_SIZE(gmu->queues); in a6xx_hfi_init()
591 a6xx_hfi_queue_init(&gmu->queues[0], &headers[0], hfi->virt + offset, in a6xx_hfi_init()
596 a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset, in a6xx_hfi_init()
/kernel/linux/linux-5.10/Documentation/networking/device_drivers/ethernet/freescale/
Ddpaa.rst86 Tx FQs transmission frame queues
143 confirmation frame queues. The driver is then responsible for freeing the
164 strict priority levels. Each traffic class contains NR_CPU TX queues. By
165 default, only one traffic class is enabled and the lowest priority Tx queues
184 Traffic coming on the DPAA Rx queues or on the DPAA Tx confirmation
185 queues is seen by the CPU as ingress traffic on a certain portal.
191 hardware frame queues using a hash on IP v4/v6 source and destination
195 queues are configured to put the received traffic into a pool channel
197 The default frame queues have the HOLDACTIVE option set, ensuring that
204 128 Rx frame queues that are configured to dedicated channels, in a
[all …]

12345678910>>...14