Home
last modified time | relevance | path

Searched refs:iq (Results 1 – 25 of 42) sorted by relevance

12

/kernel/linux/linux-5.10/drivers/crypto/cavium/zip/
Dzip_device.c59 return ((zip_dev->iq[queue].sw_head - zip_dev->iq[queue].sw_tail) * in zip_cmd_queue_consumed()
98 spin_lock(&zip_dev->iq[queue].lock); in zip_load_instr()
109 zip_dbg("sw_head : %lx", zip_dev->iq[queue].sw_head); in zip_load_instr()
110 zip_dbg("sw_tail : %lx", zip_dev->iq[queue].sw_tail); in zip_load_instr()
117 memcpy((u8 *)zip_dev->iq[queue].sw_head, (u8 *)instr, in zip_load_instr()
119 zip_dev->iq[queue].sw_head += 16; /* 16 64_bit words = 128B */ in zip_load_instr()
122 ncb_ptr = zip_dev->iq[queue].sw_head; in zip_load_instr()
125 ncb_ptr, zip_dev->iq[queue].sw_head - 16); in zip_load_instr()
128 zip_dev->iq[queue].sw_head = zip_dev->iq[queue].sw_tail; in zip_load_instr()
130 zip_dev->iq[queue].free_flag = 1; in zip_load_instr()
[all …]
Dzip_mem.c59 zip->iq[q].sw_head = (u64 *)__get_free_pages((GFP_KERNEL | GFP_DMA), in zip_cmd_qbuf_alloc()
62 if (!zip->iq[q].sw_head) in zip_cmd_qbuf_alloc()
65 memset(zip->iq[q].sw_head, 0, ZIP_CMD_QBUF_SIZE); in zip_cmd_qbuf_alloc()
67 zip_dbg("cmd_qbuf_alloc[%d] Success : %p\n", q, zip->iq[q].sw_head); in zip_cmd_qbuf_alloc()
78 zip_dbg("Freeing cmd_qbuf 0x%lx\n", zip->iq[q].sw_tail); in zip_cmd_qbuf_free()
80 free_pages((u64)zip->iq[q].sw_tail, get_order(ZIP_CMD_QBUF_SIZE)); in zip_cmd_qbuf_free()
Dzip_main.c167 memset(&zip->iq[q], 0x0, sizeof(struct zip_iq)); in zip_init_hw()
169 spin_lock_init(&zip->iq[q].lock); in zip_init_hw()
180 zip->iq[q].sw_tail = zip->iq[q].sw_head; in zip_init_hw()
181 zip->iq[q].hw_tail = zip->iq[q].sw_head; in zip_init_hw()
185 que_sbuf_addr.s.ptr = (__pa(zip->iq[q].sw_head) >> in zip_init_hw()
198 zip->iq[q].sw_head, zip->iq[q].sw_tail, in zip_init_hw()
199 zip->iq[q].hw_tail); in zip_init_hw()
Dzip_main.h108 struct zip_iq iq[ZIP_MAX_NUM_QUEUES]; member
/kernel/linux/linux-5.10/drivers/net/ethernet/cavium/liquidio/
Drequest_manager.c45 struct octeon_instr_queue *iq = in IQ_INSTR_MODE_64B() local
47 return iq->iqcmd_64B; in IQ_INSTR_MODE_64B()
60 struct octeon_instr_queue *iq; in octeon_init_instr_queue() local
82 iq = oct->instr_queue[iq_no]; in octeon_init_instr_queue()
84 iq->oct_dev = oct; in octeon_init_instr_queue()
86 iq->base_addr = lio_dma_alloc(oct, q_size, &iq->base_addr_dma); in octeon_init_instr_queue()
87 if (!iq->base_addr) { in octeon_init_instr_queue()
93 iq->max_count = num_descs; in octeon_init_instr_queue()
98 iq->request_list = vzalloc_node(array_size(num_descs, sizeof(*iq->request_list)), in octeon_init_instr_queue()
100 if (!iq->request_list) in octeon_init_instr_queue()
[all …]
Dcn23xx_vf_regs.h70 #define CN23XX_VF_SLI_IQ_PKT_CONTROL64(iq) \ argument
71 (CN23XX_VF_SLI_IQ_PKT_CONTROL_START64 + ((iq) * CN23XX_VF_IQ_OFFSET))
73 #define CN23XX_VF_SLI_IQ_BASE_ADDR64(iq) \ argument
74 (CN23XX_VF_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN23XX_VF_IQ_OFFSET))
76 #define CN23XX_VF_SLI_IQ_SIZE(iq) \ argument
77 (CN23XX_VF_SLI_IQ_SIZE_START + ((iq) * CN23XX_VF_IQ_OFFSET))
79 #define CN23XX_VF_SLI_IQ_DOORBELL(iq) \ argument
80 (CN23XX_VF_SLI_IQ_DOORBELL_START + ((iq) * CN23XX_VF_IQ_OFFSET))
82 #define CN23XX_VF_SLI_IQ_INSTR_COUNT64(iq) \ argument
83 (CN23XX_VF_SLI_IQ_INSTR_COUNT_START64 + ((iq) * CN23XX_VF_IQ_OFFSET))
Dcn66xx_regs.h143 #define CN6XXX_SLI_IQ_BASE_ADDR64(iq) \ argument
144 (CN6XXX_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN6XXX_IQ_OFFSET))
146 #define CN6XXX_SLI_IQ_SIZE(iq) \ argument
147 (CN6XXX_SLI_IQ_SIZE_START + ((iq) * CN6XXX_IQ_OFFSET))
149 #define CN6XXX_SLI_IQ_PKT_INSTR_HDR64(iq) \ argument
150 (CN6XXX_SLI_IQ_PKT_INSTR_HDR_START64 + ((iq) * CN6XXX_IQ_OFFSET))
152 #define CN6XXX_SLI_IQ_DOORBELL(iq) \ argument
153 (CN6XXX_SLI_IQ_DOORBELL_START + ((iq) * CN6XXX_IQ_OFFSET))
155 #define CN6XXX_SLI_IQ_INSTR_COUNT(iq) \ argument
156 (CN6XXX_SLI_IQ_INSTR_COUNT_START + ((iq) * CN6XXX_IQ_OFFSET))
[all …]
Dcn23xx_vf_device.c104 struct octeon_instr_queue *iq; in cn23xx_vf_setup_global_input_regs() local
116 iq = oct->instr_queue[q_no]; in cn23xx_vf_setup_global_input_regs()
118 if (iq) in cn23xx_vf_setup_global_input_regs()
119 inst_cnt_reg = iq->inst_cnt_reg; in cn23xx_vf_setup_global_input_regs()
214 struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; in cn23xx_setup_vf_iq_regs() local
219 iq->base_addr_dma); in cn23xx_setup_vf_iq_regs()
220 octeon_write_csr(oct, CN23XX_VF_SLI_IQ_SIZE(iq_no), iq->max_count); in cn23xx_setup_vf_iq_regs()
225 iq->doorbell_reg = in cn23xx_setup_vf_iq_regs()
227 iq->inst_cnt_reg = in cn23xx_setup_vf_iq_regs()
230 iq_no, iq->doorbell_reg, iq->inst_cnt_reg); in cn23xx_setup_vf_iq_regs()
[all …]
Dcn23xx_pf_regs.h170 #define CN23XX_SLI_IQ_PKT_CONTROL64(iq) \ argument
171 (CN23XX_SLI_IQ_PKT_CONTROL_START64 + ((iq) * CN23XX_IQ_OFFSET))
173 #define CN23XX_SLI_IQ_BASE_ADDR64(iq) \ argument
174 (CN23XX_SLI_IQ_BASE_ADDR_START64 + ((iq) * CN23XX_IQ_OFFSET))
176 #define CN23XX_SLI_IQ_SIZE(iq) \ argument
177 (CN23XX_SLI_IQ_SIZE_START + ((iq) * CN23XX_IQ_OFFSET))
179 #define CN23XX_SLI_IQ_DOORBELL(iq) \ argument
180 (CN23XX_SLI_IQ_DOORBELL_START + ((iq) * CN23XX_IQ_OFFSET))
182 #define CN23XX_SLI_IQ_INSTR_COUNT64(iq) \ argument
183 (CN23XX_SLI_IQ_INSTR_COUNT_START64 + ((iq) * CN23XX_IQ_OFFSET))
Docteon_config.h121 #define CFG_GET_IQ_CFG(cfg) ((cfg)->iq)
122 #define CFG_GET_IQ_MAX_Q(cfg) ((cfg)->iq.max_iqs)
123 #define CFG_GET_IQ_PENDING_LIST_SIZE(cfg) ((cfg)->iq.pending_list_size)
124 #define CFG_GET_IQ_INSTR_TYPE(cfg) ((cfg)->iq.instr_type)
125 #define CFG_GET_IQ_DB_MIN(cfg) ((cfg)->iq.db_min)
126 #define CFG_GET_IQ_DB_TIMEOUT(cfg) ((cfg)->iq.db_timeout)
128 #define CFG_GET_IQ_INTR_PKT(cfg) ((cfg)->iq.iq_intr_pkt)
129 #define CFG_SET_IQ_INTR_PKT(cfg, val) (cfg)->iq.iq_intr_pkt = val
410 struct octeon_iq_config iq; member
Dcn66xx_device.c266 struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; in lio_cn6xxx_setup_iq_regs() local
272 iq->base_addr_dma); in lio_cn6xxx_setup_iq_regs()
273 octeon_write_csr(oct, CN6XXX_SLI_IQ_SIZE(iq_no), iq->max_count); in lio_cn6xxx_setup_iq_regs()
278 iq->doorbell_reg = oct->mmio[0].hw_addr + CN6XXX_SLI_IQ_DOORBELL(iq_no); in lio_cn6xxx_setup_iq_regs()
279 iq->inst_cnt_reg = oct->mmio[0].hw_addr in lio_cn6xxx_setup_iq_regs()
282 iq_no, iq->doorbell_reg, iq->inst_cnt_reg); in lio_cn6xxx_setup_iq_regs()
287 iq->reset_instr_cnt = readl(iq->inst_cnt_reg); in lio_cn6xxx_setup_iq_regs()
339 mask |= oct->io_qmask.iq; in lio_cn6xxx_enable_io_queues()
357 mask ^= oct->io_qmask.iq; in lio_cn6xxx_disable_io_queues()
361 mask = (u32)oct->io_qmask.iq; in lio_cn6xxx_disable_io_queues()
[all …]
Dcn68xx_regs.h32 #define CN68XX_SLI_IQ_PORT_PKIND(iq) \ argument
33 (CN68XX_SLI_IQ_PORT0_PKIND + ((iq) * CN6XXX_IQ_OFFSET))
Docteon_device.c41 .iq = {
150 .iq = {
316 .iq = {
419 .iq = {
655 if (oct->io_qmask.iq & BIT_ULL(i)) in octeon_free_device_mem()
1271 (oct->io_qmask.iq & BIT_ULL(q_no))) in octeon_get_tx_qsize()
1429 void lio_enable_irq(struct octeon_droq *droq, struct octeon_instr_queue *iq) in lio_enable_irq() argument
1442 if (iq) { in lio_enable_irq()
1443 spin_lock_bh(&iq->lock); in lio_enable_irq()
1444 writel(iq->pkts_processed, iq->inst_cnt_reg); in lio_enable_irq()
[all …]
Dlio_vf_main.c122 struct octeon_instr_queue *iq; in pcierror_quiesce_device() local
124 if (!(oct->io_qmask.iq & BIT_ULL(i))) in pcierror_quiesce_device()
126 iq = oct->instr_queue[i]; in pcierror_quiesce_device()
128 if (atomic_read(&iq->instr_pending)) { in pcierror_quiesce_device()
129 spin_lock_bh(&iq->lock); in pcierror_quiesce_device()
130 iq->fill_cnt = 0; in pcierror_quiesce_device()
131 iq->octeon_read_index = iq->host_write_index; in pcierror_quiesce_device()
132 iq->stats.instr_processed += in pcierror_quiesce_device()
133 atomic_read(&iq->instr_pending); in pcierror_quiesce_device()
134 lio_process_iq_request_list(oct, iq, 0); in pcierror_quiesce_device()
[all …]
Dcn23xx_pf_device.c403 struct octeon_instr_queue *iq; in cn23xx_pf_setup_global_input_regs() local
446 iq = oct->instr_queue[q_no]; in cn23xx_pf_setup_global_input_regs()
447 if (iq) in cn23xx_pf_setup_global_input_regs()
448 inst_cnt_reg = iq->inst_cnt_reg; in cn23xx_pf_setup_global_input_regs()
589 struct octeon_instr_queue *iq = oct->instr_queue[iq_no]; in cn23xx_setup_iq_regs() local
596 iq->base_addr_dma); in cn23xx_setup_iq_regs()
597 octeon_write_csr(oct, CN23XX_SLI_IQ_SIZE(iq_no), iq->max_count); in cn23xx_setup_iq_regs()
602 iq->doorbell_reg = in cn23xx_setup_iq_regs()
604 iq->inst_cnt_reg = in cn23xx_setup_iq_regs()
607 iq_no, iq->doorbell_reg, iq->inst_cnt_reg); in cn23xx_setup_iq_regs()
[all …]
Dlio_main.c262 struct octeon_instr_queue *iq; in pcierror_quiesce_device() local
264 if (!(oct->io_qmask.iq & BIT_ULL(i))) in pcierror_quiesce_device()
266 iq = oct->instr_queue[i]; in pcierror_quiesce_device()
268 if (atomic_read(&iq->instr_pending)) { in pcierror_quiesce_device()
269 spin_lock_bh(&iq->lock); in pcierror_quiesce_device()
270 iq->fill_cnt = 0; in pcierror_quiesce_device()
271 iq->octeon_read_index = iq->host_write_index; in pcierror_quiesce_device()
272 iq->stats.instr_processed += in pcierror_quiesce_device()
273 atomic_read(&iq->instr_pending); in pcierror_quiesce_device()
274 lio_process_iq_request_list(oct, iq, 0); in pcierror_quiesce_device()
[all …]
Docteon_iq.h375 struct octeon_instr_queue *iq, u32 napi_budget);
397 octeon_flush_iq(struct octeon_device *oct, struct octeon_instr_queue *iq,
Dlio_core.c503 struct octeon_instr_queue *iq = oct->instr_queue[iq_num]; in lio_update_txq_status() local
507 netdev = oct->props[iq->ifidx].netdev; in lio_update_txq_status()
516 if (__netif_subqueue_stopped(netdev, iq->q_index) && in lio_update_txq_status()
519 netif_wake_subqueue(netdev, iq->q_index); in lio_update_txq_status()
749 struct octeon_instr_queue *iq; in liquidio_napi_poll() local
763 iq = oct->instr_queue[iq_no]; in liquidio_napi_poll()
764 if (iq) { in liquidio_napi_poll()
768 if (atomic_read(&iq->instr_pending)) in liquidio_napi_poll()
770 tx_done = octeon_flush_iq(oct, iq, budget); in liquidio_napi_poll()
786 (iq && iq->pkt_in_done >= MAX_REG_CNT) || in liquidio_napi_poll()
/kernel/linux/linux-5.10/drivers/scsi/csiostor/
Dcsio_isr.c212 csio_scsi_isr_handler(struct csio_q *iq) in csio_scsi_isr_handler() argument
214 struct csio_hw *hw = (struct csio_hw *)iq->owner; in csio_scsi_isr_handler()
223 if (unlikely(csio_wr_process_iq(hw, iq, csio_process_scsi_cmpl, in csio_scsi_isr_handler()
258 struct csio_q *iq = (struct csio_q *) dev_id; in csio_scsi_isr() local
261 if (unlikely(!iq)) in csio_scsi_isr()
264 hw = (struct csio_hw *)iq->owner; in csio_scsi_isr()
271 csio_scsi_isr_handler(iq); in csio_scsi_isr()
288 struct csio_q *iq = priv; in csio_scsi_intx_handler() local
290 csio_scsi_isr_handler(iq); in csio_scsi_intx_handler()
Dcsio_wr.h410 struct csio_iq iq; member
463 #define csio_q_iqid(__hw, __idx) ((__hw)->wrm.q_arr[(__idx)]->un.iq.iqid)
465 ((__hw)->wrm.q_arr[(__idx)]->un.iq.physiqid)
467 ((__hw)->wrm.q_arr[(__idx)]->un.iq.flq_idx)
473 #define csio_iq_has_fl(__iq) ((__iq)->un.iq.flq_idx != -1)
476 csio_q_flid((__hw), (__hw)->wrm.q_arr[(__iq_qidx)]->un.iq.flq_idx)
Dcsio_wr.c255 q->un.iq.genbit = 1; in csio_wr_alloc_q()
278 q->un.iq.flq_idx = flq_idx; in csio_wr_alloc_q()
280 flq = wrm->q_arr[q->un.iq.flq_idx]; in csio_wr_alloc_q()
306 q->un.iq.flq_idx = -1; in csio_wr_alloc_q()
310 q->un.iq.iq_intx_handler = iq_intx_handler; in csio_wr_alloc_q()
771 q->un.iq.genbit = 1; in csio_wr_cleanup_iq_ftr()
1054 struct csio_q *flq = hw->wrm.q_arr[q->un.iq.flq_idx]; in csio_wr_process_fl()
1113 return (q->un.iq.genbit == (ftr->u.type_gen >> IQWRF_GEN_SHIFT)); in csio_is_new_iqwr()
1140 wrm->q_arr[q->un.iq.flq_idx] : NULL; in csio_wr_process_iq()
1184 q_completed->un.iq.iq_intx_handler); in csio_wr_process_iq()
[all …]
/kernel/linux/linux-5.10/drivers/media/tuners/
Dr820t.c1598 static void r820t_compre_cor(struct r820t_sect_type iq[3]) in r820t_compre_cor()
1603 if (iq[0].value > iq[i - 1].value) in r820t_compre_cor()
1604 swap(iq[0], iq[i - 1]); in r820t_compre_cor()
1609 struct r820t_sect_type iq[3], u8 reg) in r820t_compre_step()
1622 tmp.phase_y = iq[0].phase_y; in r820t_compre_step()
1623 tmp.gain_x = iq[0].gain_x; in r820t_compre_step()
1645 if (tmp.value <= iq[0].value) { in r820t_compre_step()
1646 iq[0].gain_x = tmp.gain_x; in r820t_compre_step()
1647 iq[0].phase_y = tmp.phase_y; in r820t_compre_step()
1648 iq[0].value = tmp.value; in r820t_compre_step()
[all …]
/kernel/linux/linux-5.10/drivers/net/ethernet/chelsio/cxgb4/
Dcxgb4_filter.c330 int iq; in get_filter_steerq() local
338 if (fs->iq) in get_filter_steerq()
340 iq = 0; in get_filter_steerq()
347 if (fs->iq < pi->nqsets) in get_filter_steerq()
348 iq = adapter->sge.ethrxq[pi->first_qset + in get_filter_steerq()
349 fs->iq].rspq.abs_id; in get_filter_steerq()
351 iq = fs->iq; in get_filter_steerq()
354 return iq; in get_filter_steerq()
856 FW_FILTER_WR_IQ_V(f->fs.iq)); in set_filter_wr()
1324 RSS_QUEUE_V(f->fs.iq) | in mk_act_open_req6()
[all …]
Dsge.c4348 int t4_sge_alloc_rxq(struct adapter *adap, struct sge_rspq *iq, bool fwevtq, in t4_sge_alloc_rxq() argument
4360 iq->size = roundup(iq->size, 16); in t4_sge_alloc_rxq()
4362 iq->desc = alloc_ring(adap->pdev_dev, iq->size, iq->iqe_len, 0, in t4_sge_alloc_rxq()
4363 &iq->phys_addr, NULL, 0, in t4_sge_alloc_rxq()
4365 if (!iq->desc) in t4_sge_alloc_rxq()
4382 FW_IQ_CMD_IQINTCNTTHRESH_V(iq->pktcnt_idx) | in t4_sge_alloc_rxq()
4383 FW_IQ_CMD_IQESIZE_V(ilog2(iq->iqe_len) - 4)); in t4_sge_alloc_rxq()
4384 c.iqsize = htons(iq->size); in t4_sge_alloc_rxq()
4385 c.iqaddr = cpu_to_be64(iq->phys_addr); in t4_sge_alloc_rxq()
4445 netif_napi_add(dev, &iq->napi, napi_rx_handler, 64); in t4_sge_alloc_rxq()
[all …]
/kernel/linux/linux-5.10/scripts/
Dtags.sh284 if $1 --version 2>&1 | grep -iq exuberant; then
286 elif $1 --version 2>&1 | grep -iq emacs; then

12