Home
last modified time | relevance | path

Searched refs:queue (Results 1 – 25 of 44) sorted by relevance

12

/arch/mips/cavium-octeon/executive/
Dcvmx-pko.c70 int queue; in __cvmx_pko_iport_config() local
76 for (queue = 0; queue < num_queues; queue++) { in __cvmx_pko_iport_config()
82 config.s.index = queue; in __cvmx_pko_iport_config()
83 config.s.qid = base_queue + queue; in __cvmx_pko_iport_config()
85 config.s.tail = (queue == (num_queues - 1)); in __cvmx_pko_iport_config()
86 config.s.s_tail = (queue == static_priority_end); in __cvmx_pko_iport_config()
88 config.s.static_q = (queue <= static_priority_end); in __cvmx_pko_iport_config()
92 CVMX_CMD_QUEUE_PKO(base_queue + queue), in __cvmx_pko_iport_config()
101 num_queues, queue); in __cvmx_pko_iport_config()
104 CVMX_CMD_QUEUE_PKO(base_queue + queue)); in __cvmx_pko_iport_config()
[all …]
Dcvmx-helper-util.c95 static int cvmx_helper_setup_red_queue(int queue, int pass_thresh, in cvmx_helper_setup_red_queue() argument
107 cvmx_write_csr(CVMX_IPD_QOSX_RED_MARKS(queue), red_marks.u64); in cvmx_helper_setup_red_queue()
116 cvmx_write_csr(CVMX_IPD_RED_QUEX_PARAM(queue), red_param.u64); in cvmx_helper_setup_red_queue()
136 int queue; in cvmx_helper_setup_red() local
151 for (queue = 0; queue < 8; queue++) in cvmx_helper_setup_red()
152 cvmx_helper_setup_red_queue(queue, pass_thresh, drop_thresh); in cvmx_helper_setup_red()
Dcvmx-helper-rgmii.c325 int queue = cvmx_pko_get_base_queue(ipd_port) + i; in __cvmx_helper_rgmii_link_set() local
326 cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue); in __cvmx_helper_rgmii_link_set()
329 pko_mem_queue_qos.s.qid = queue; in __cvmx_helper_rgmii_link_set()
437 int queue = cvmx_pko_get_base_queue(ipd_port) + i; in __cvmx_helper_rgmii_link_set() local
438 cvmx_write_csr(CVMX_PKO_REG_READ_IDX, queue); in __cvmx_helper_rgmii_link_set()
DMakefile13 obj-y += cvmx-pko.o cvmx-spi.o cvmx-cmd-queue.o \
/arch/mips/include/asm/octeon/
Dcvmx-pko.h151 uint64_t queue:9; member
156 uint64_t queue:9;
325 static inline void cvmx_pko_doorbell(uint64_t port, uint64_t queue, in cvmx_pko_doorbell() argument
335 ptr.s.queue = queue; in cvmx_pko_doorbell()
377 static inline void cvmx_pko_send_packet_prepare(uint64_t port, uint64_t queue, in cvmx_pko_send_packet_prepare() argument
395 (CVMX_TAG_SUBGROUP_MASK & queue); in cvmx_pko_send_packet_prepare()
420 uint64_t queue, in cvmx_pko_send_packet_finish() argument
428 result = cvmx_cmd_queue_write2(CVMX_CMD_QUEUE_PKO(queue), in cvmx_pko_send_packet_finish()
432 cvmx_pko_doorbell(port, queue, 2); in cvmx_pko_send_packet_finish()
463 uint64_t queue, in cvmx_pko_send_packet_finish3() argument
[all …]
Dcvmx-cmd-queue.h100 #define CVMX_CMD_QUEUE_PKO(queue) \ argument
101 ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_PKO_BASE + (0xffff&(queue))))
108 #define CVMX_CMD_QUEUE_DMA(queue) \ argument
109 ((cvmx_cmd_queue_id_t)(CVMX_CMD_QUEUE_DMA_BASE + (0xffff&(queue))))
/arch/m68k/emu/
Dnfblock.c58 struct request_queue *queue; member
122 dev->queue = blk_alloc_queue(NUMA_NO_NODE); in nfhd_init_one()
123 if (dev->queue == NULL) in nfhd_init_one()
126 blk_queue_logical_block_size(dev->queue, bsize); in nfhd_init_one()
138 dev->disk->queue = dev->queue; in nfhd_init_one()
147 blk_cleanup_queue(dev->queue); in nfhd_init_one()
190 blk_cleanup_queue(dev->queue); in nfhd_exit()
/arch/arm/boot/dts/
Dkeystone-k2l-netcp.dtsi15 queue-range = <0 0x2000>;
35 queue-pools {
77 multi-queue;
155 tx-queue = <896>;
205 rx-queue-depth = <128 128 0 0>;
207 rx-queue = <528>;
208 tx-completion-queue = <530>;
217 rx-queue-depth = <128 128 0 0>;
219 rx-queue = <529>;
220 tx-completion-queue = <531>;
Dkeystone-k2e-netcp.dtsi15 queue-range = <0 0x2000>;
35 queue-pools {
77 multi-queue;
156 tx-queue = <896>;
223 rx-queue-depth = <128 128 0 0>;
225 rx-queue = <528>;
226 tx-completion-queue = <530>;
235 rx-queue-depth = <128 128 0 0>;
237 rx-queue = <529>;
238 tx-completion-queue = <531>;
Dkeystone-k2hk-netcp.dtsi15 queue-range = <0 0x4000>;
48 queue-pools {
94 multi-queue;
175 tx-queue = <648>;
225 rx-queue-depth = <128 128 0 0>;
227 rx-queue = <8704>;
228 tx-completion-queue = <8706>;
237 rx-queue-depth = <128 128 0 0>;
239 rx-queue = <8705>;
240 tx-completion-queue = <8707>;
Dkeystone-k2g-netcp.dtsi17 queue-range = <0 0x80>;
36 queue-pools {
120 tx-queue = <5>;
139 rx-queue-depth = <128 128 0 0>;
141 rx-queue = <77>;
142 tx-completion-queue = <78>;
Dintel-ixp4xx.dtsi17 qmgr: queue-manager@60000000 {
18 compatible = "intel,ixp4xx-ahb-queue-manager";
Dls1021a.dtsi759 queue-group@2d10000 {
768 queue-group@2d14000 {
788 queue-group@2d50000 {
797 queue-group@2d54000 {
817 queue-group@2d90000 {
826 queue-group@2d94000 {
969 queue-sizes = <64 64>;
/arch/xtensa/platforms/iss/
Dsimdisk.c30 struct request_queue *queue; member
275 dev->queue = blk_alloc_queue(NUMA_NO_NODE); in simdisk_setup()
276 if (dev->queue == NULL) { in simdisk_setup()
289 dev->gd->queue = dev->queue; in simdisk_setup()
299 blk_cleanup_queue(dev->queue); in simdisk_setup()
300 dev->queue = NULL; in simdisk_setup()
354 if (dev->queue) in simdisk_teardown()
355 blk_cleanup_queue(dev->queue); in simdisk_teardown()
/arch/powerpc/boot/dts/fsl/
Dp1020rdb-pc_camp_core1.dts107 35 36 40 /* enet1-queue-group0 */
108 51 52 67 /* enet1-queue-group1 */
109 31 32 33 /* enet2-queue-group0 */
110 25 26 27 /* enet2-queue-group1 */
Dp1020rdb-pc_camp_core0.dts54 42 29 30 34 /* serial1, enet0-queue-group0 */
55 17 18 24 45 /* enet0-queue-group1, crypto */
Dqoriq-raid1.0-0.dtsi43 compatible = "fsl,raideng-v1.0-job-queue";
65 compatible = "fsl,raideng-v1.0-job-queue";
Dp1010si-post.dtsi184 queue-group@b0000 {
192 queue-group@b1000 {
200 queue-group@b2000 {
Dbsc9131si-post.dtsi172 queue-group@b0000 {
181 queue-group@b1000 {
Dc293si-post.dtsi172 queue-group@b0000 {
181 queue-group@b1000 {
Dbsc9132si-post.dtsi192 queue-group@b0000 {
201 queue-group@b1000 {
/arch/um/drivers/
Dubd_kern.c168 struct request_queue *queue; member
825 blk_queue_max_hw_sectors(ubd_dev->queue, 8 * sizeof(long)); in ubd_open_dev()
849 ubd_dev->queue->limits.discard_granularity = SECTOR_SIZE; in ubd_open_dev()
850 ubd_dev->queue->limits.discard_alignment = SECTOR_SIZE; in ubd_open_dev()
851 blk_queue_max_discard_sectors(ubd_dev->queue, UBD_MAX_REQUEST); in ubd_open_dev()
852 blk_queue_max_write_zeroes_sectors(ubd_dev->queue, UBD_MAX_REQUEST); in ubd_open_dev()
853 blk_queue_flag_set(QUEUE_FLAG_DISCARD, ubd_dev->queue); in ubd_open_dev()
855 blk_queue_flag_set(QUEUE_FLAG_NONROT, ubd_dev->queue); in ubd_open_dev()
866 blk_cleanup_queue(ubd_dev->queue); in ubd_device_release()
901 disk->queue = ubd_devs[unit].queue; in ubd_disk_register()
[all …]
/arch/powerpc/sysdev/xive/
Dcommon.c143 irq = xive_read_eq(&xc->queue[prio], just_peek); in xive_scan_interrupts()
168 q = &xc->queue[prio]; in xive_scan_interrupts()
259 xive_dump_eq("EQ", &xc->queue[xive_irq_priority]); in xmon_xive_do_dump()
480 struct xive_q *q = &xc->queue[xive_irq_priority]; in xive_try_pick_target()
504 struct xive_q *q = &xc->queue[xive_irq_priority]; in xive_dec_target_count()
1331 if (xc->queue[xive_irq_priority].qpage) in xive_cleanup_cpu_queues()
1340 if (!xc->queue[xive_irq_priority].qpage) in xive_setup_cpu_queues()
1593 struct xive_q *q = &xc->queue[xive_irq_priority]; in xive_debug_show_cpu()
Dxive-internal.h27 struct xive_q queue[XIVE_MAX_QUEUES]; member
/arch/powerpc/include/asm/
Dfsl_hcalls.h492 static inline unsigned int fh_err_get_info(int queue, uint32_t *bufsize, in fh_err_get_info() argument
503 r3 = queue; in fh_err_get_info()

12