Home
last modified time | relevance | path

Searched refs:cmdq (Results 1 – 25 of 43) sorted by relevance

12

/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/falcon/
Dcmdq.c26 nvkm_falcon_cmdq_has_room(struct nvkm_falcon_cmdq *cmdq, u32 size, bool *rewind) in nvkm_falcon_cmdq_has_room() argument
28 u32 head = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->head_reg); in nvkm_falcon_cmdq_has_room()
29 u32 tail = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->tail_reg); in nvkm_falcon_cmdq_has_room()
35 free = cmdq->offset + cmdq->size - head; in nvkm_falcon_cmdq_has_room()
40 head = cmdq->offset; in nvkm_falcon_cmdq_has_room()
51 nvkm_falcon_cmdq_push(struct nvkm_falcon_cmdq *cmdq, void *data, u32 size) in nvkm_falcon_cmdq_push() argument
53 struct nvkm_falcon *falcon = cmdq->qmgr->falcon; in nvkm_falcon_cmdq_push()
54 nvkm_falcon_load_dmem(falcon, data, cmdq->position, size, 0); in nvkm_falcon_cmdq_push()
55 cmdq->position += ALIGN(size, QUEUE_ALIGNMENT); in nvkm_falcon_cmdq_push()
59 nvkm_falcon_cmdq_rewind(struct nvkm_falcon_cmdq *cmdq) in nvkm_falcon_cmdq_rewind() argument
[all …]
DKbuild3 nvkm-y += nvkm/falcon/cmdq.o
/kernel/linux/linux-5.10/drivers/crypto/cavium/nitrox/
Dnitrox_lib.c25 static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes) in nitrox_cmdq_init() argument
27 struct nitrox_device *ndev = cmdq->ndev; in nitrox_cmdq_init()
29 cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes; in nitrox_cmdq_init()
30 cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize, in nitrox_cmdq_init()
31 &cmdq->unalign_dma, in nitrox_cmdq_init()
33 if (!cmdq->unalign_base) in nitrox_cmdq_init()
36 cmdq->dma = PTR_ALIGN(cmdq->unalign_dma, align_bytes); in nitrox_cmdq_init()
37 cmdq->base = cmdq->unalign_base + (cmdq->dma - cmdq->unalign_dma); in nitrox_cmdq_init()
38 cmdq->write_idx = 0; in nitrox_cmdq_init()
40 spin_lock_init(&cmdq->cmd_qlock); in nitrox_cmdq_init()
[all …]
Dnitrox_reqmgr.c228 struct nitrox_cmdq *cmdq) in backlog_list_add() argument
232 spin_lock_bh(&cmdq->backlog_qlock); in backlog_list_add()
233 list_add_tail(&sr->backlog, &cmdq->backlog_head); in backlog_list_add()
234 atomic_inc(&cmdq->backlog_count); in backlog_list_add()
236 spin_unlock_bh(&cmdq->backlog_qlock); in backlog_list_add()
240 struct nitrox_cmdq *cmdq) in response_list_add() argument
244 spin_lock_bh(&cmdq->resp_qlock); in response_list_add()
245 list_add_tail(&sr->response, &cmdq->response_head); in response_list_add()
246 spin_unlock_bh(&cmdq->resp_qlock); in response_list_add()
250 struct nitrox_cmdq *cmdq) in response_list_del() argument
[all …]
Dnitrox_isr.c31 struct nitrox_cmdq *cmdq = qvec->cmdq; in nps_pkt_slc_isr() local
33 slc_cnts.value = readq(cmdq->compl_cnt_csr_addr); in nps_pkt_slc_isr()
336 qvec->cmdq = &ndev->pkt_inq[qvec->ring]; in nitrox_register_interrupts()
Dnitrox_hal.c124 struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i]; in nitrox_config_pkt_input_rings() local
137 nitrox_write_csr(ndev, offset, cmdq->dma); in nitrox_config_pkt_input_rings()
356 struct nitrox_cmdq *cmdq = ndev->aqmq[ring]; in nitrox_config_aqm_rings() local
379 nitrox_write_csr(ndev, offset, cmdq->dma); in nitrox_config_aqm_rings()
Dnitrox_dev.h107 struct nitrox_cmdq *cmdq; member
/kernel/linux/linux-5.10/drivers/mailbox/
Dmtk-cmdq-mailbox.c62 struct cmdq *cmdq; member
69 struct cmdq { struct
88 struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox); in cmdq_get_shift_pa() local
90 return cmdq->shift_pa; in cmdq_get_shift_pa()
94 static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread) in cmdq_thread_suspend() argument
106 dev_err(cmdq->mbox.dev, "suspend GCE thread 0x%x failed\n", in cmdq_thread_suspend()
107 (u32)(thread->base - cmdq->base)); in cmdq_thread_suspend()
119 static void cmdq_init(struct cmdq *cmdq) in cmdq_init() argument
123 WARN_ON(clk_enable(cmdq->clock) < 0); in cmdq_init()
124 writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES); in cmdq_init()
[all …]
DMakefile48 obj-$(CONFIG_MTK_CMDQ_MBOX) += mtk-cmdq-mailbox.o
/kernel/linux/linux-5.10/drivers/net/ethernet/brocade/bna/
Dbfa_msgq.c31 static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq);
32 static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq);
43 bfa_fsm_state_decl(cmdq, stopped, struct bfa_msgq_cmdq, enum cmdq_event);
44 bfa_fsm_state_decl(cmdq, init_wait, struct bfa_msgq_cmdq, enum cmdq_event);
45 bfa_fsm_state_decl(cmdq, ready, struct bfa_msgq_cmdq, enum cmdq_event);
46 bfa_fsm_state_decl(cmdq, dbell_wait, struct bfa_msgq_cmdq,
50 cmdq_sm_stopped_entry(struct bfa_msgq_cmdq *cmdq) in cmdq_sm_stopped_entry() argument
54 cmdq->producer_index = 0; in cmdq_sm_stopped_entry()
55 cmdq->consumer_index = 0; in cmdq_sm_stopped_entry()
56 cmdq->flags = 0; in cmdq_sm_stopped_entry()
[all …]
Dbfa_msgq.h104 struct bfa_msgq_cmdq cmdq; member
/kernel/linux/linux-5.10/drivers/net/ethernet/huawei/hinic/
Dhinic_hw_cmdq.c78 #define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \ argument
79 struct hinic_cmdqs, cmdq[0])
325 static void cmdq_set_db(struct hinic_cmdq *cmdq, in cmdq_set_db() argument
337 writel(db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx)); in cmdq_set_db()
340 static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq, in cmdq_sync_cmd_direct_resp() argument
348 struct hinic_wq *wq = cmdq->wq; in cmdq_sync_cmd_direct_resp()
353 spin_lock_bh(&cmdq->cmdq_lock); in cmdq_sync_cmd_direct_resp()
358 spin_unlock_bh(&cmdq->cmdq_lock); in cmdq_sync_cmd_direct_resp()
364 wrapped = cmdq->wrapped; in cmdq_sync_cmd_direct_resp()
369 cmdq->wrapped = !cmdq->wrapped; in cmdq_sync_cmd_direct_resp()
[all …]
Dhinic_hw_io.c534 enum hinic_cmdq_type cmdq, type; in hinic_io_init() local
566 for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) { in hinic_io_init()
574 func_to_io->cmdq_db_area[cmdq] = db_area; in hinic_io_init()
601 for (type = HINIC_CMDQ_SYNC; type < cmdq; type++) in hinic_io_init()
620 enum hinic_cmdq_type cmdq; in hinic_io_free() local
629 for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) in hinic_io_free()
630 return_db_area(func_to_io, func_to_io->cmdq_db_area[cmdq]); in hinic_io_free()
Dhinic_hw_cmdq.h167 struct hinic_cmdq cmdq[HINIC_MAX_CMDQ_TYPES]; member
/kernel/linux/linux-5.10/drivers/infiniband/hw/bnxt_re/
Dqplib_rcfw.c58 struct bnxt_qplib_cmdq_ctx *cmdq; in __wait_for_resp() local
62 cmdq = &rcfw->cmdq; in __wait_for_resp()
64 rc = wait_event_timeout(cmdq->waitq, in __wait_for_resp()
65 !test_bit(cbit, cmdq->cmdq_bitmap), in __wait_for_resp()
73 struct bnxt_qplib_cmdq_ctx *cmdq; in __block_for_resp() local
76 cmdq = &rcfw->cmdq; in __block_for_resp()
78 if (!test_bit(cbit, cmdq->cmdq_bitmap)) in __block_for_resp()
83 } while (test_bit(cbit, cmdq->cmdq_bitmap) && --count); in __block_for_resp()
91 struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq; in __send_message() local
92 struct bnxt_qplib_hwq *hwq = &cmdq->hwq; in __send_message()
[all …]
/kernel/linux/linux-5.10/drivers/iommu/arm/arm-smmu-v3/
Darm-smmu-v3.c341 struct arm_smmu_queue *q = &smmu->cmdq.q; in arm_smmu_cmdq_build_sync_cmd()
369 struct arm_smmu_queue *q = &smmu->cmdq.q; in arm_smmu_cmdq_skip_err()
427 static void arm_smmu_cmdq_shared_lock(struct arm_smmu_cmdq *cmdq) in arm_smmu_cmdq_shared_lock() argument
437 if (atomic_fetch_inc_relaxed(&cmdq->lock) >= 0) in arm_smmu_cmdq_shared_lock()
441 val = atomic_cond_read_relaxed(&cmdq->lock, VAL >= 0); in arm_smmu_cmdq_shared_lock()
442 } while (atomic_cmpxchg_relaxed(&cmdq->lock, val, val + 1) != val); in arm_smmu_cmdq_shared_lock()
445 static void arm_smmu_cmdq_shared_unlock(struct arm_smmu_cmdq *cmdq) in arm_smmu_cmdq_shared_unlock() argument
447 (void)atomic_dec_return_release(&cmdq->lock); in arm_smmu_cmdq_shared_unlock()
450 static bool arm_smmu_cmdq_shared_tryunlock(struct arm_smmu_cmdq *cmdq) in arm_smmu_cmdq_shared_tryunlock() argument
452 if (atomic_read(&cmdq->lock) == 1) in arm_smmu_cmdq_shared_tryunlock()
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/engine/sec2/
Dbase.c61 nvkm_falcon_cmdq_fini(sec2->cmdq); in nvkm_sec2_fini()
73 nvkm_falcon_cmdq_del(&sec2->cmdq); in nvkm_sec2_dtor()
112 (ret = nvkm_falcon_cmdq_new(sec2->qmgr, "cmdq", &sec2->cmdq)) || in nvkm_sec2_new_()
Dgp102.c71 return nvkm_falcon_cmdq_send(sec2->cmdq, &cmd.cmd.hdr, in gp102_sec2_acr_bootstrap_falcon()
153 nvkm_falcon_cmdq_init(sec2->cmdq, in gp102_sec2_initmsg()
246 .cmdq = { 0xa00, 0xa04, 8 },
Dtu102.c40 .cmdq = { 0xc00, 0xc04, 8 },
/kernel/linux/linux-5.10/drivers/atm/
Dfore200e.c558 struct host_cmdq* cmdq = &fore200e->host_cmdq; in fore200e_pca_prom_read() local
559 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; in fore200e_pca_prom_read()
564 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); in fore200e_pca_prom_read()
1227 struct host_cmdq* cmdq = &fore200e->host_cmdq; in fore200e_activate_vcin() local
1228 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; in fore200e_activate_vcin()
1235 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); in fore200e_activate_vcin()
1671 struct host_cmdq* cmdq = &fore200e->host_cmdq; in fore200e_getstats() local
1672 struct host_cmdq_entry* entry = &cmdq->host_entry[ cmdq->head ]; in fore200e_getstats()
1688 FORE200E_NEXT_ENTRY(cmdq->head, QUEUE_SIZE_CMD); in fore200e_getstats()
1717 struct host_cmdq* cmdq = &fore200e->host_cmdq;
[all …]
/kernel/linux/linux-5.10/drivers/soc/mediatek/
DMakefile2 obj-$(CONFIG_MTK_CMDQ) += mtk-cmdq-helper.o
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/include/nvkm/engine/
Dsec2.h14 struct nvkm_falcon_cmdq *cmdq; member
Dfalcon.h101 } cmdq, msgq; member
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/subdev/pmu/
Dgm200.c51 .cmdq = { 0x4a0, 0x4b0, 4 },
/kernel/linux/linux-5.10/drivers/scsi/aacraid/
Ddpcsup.c200 list_add_tail(&fib->fiblink, &q->cmdq); in aac_command_normal()
310 list_add_tail(&fib->fiblink, &q->cmdq); in aac_intr_normal()

12