| /kernel/linux/linux-6.6/drivers/infiniband/hw/erdma/ |
| D | erdma_cmdq.c | 9 static void arm_cmdq_cq(struct erdma_cmdq *cmdq) in arm_cmdq_cq() argument 11 struct erdma_dev *dev = container_of(cmdq, struct erdma_dev, cmdq); in arm_cmdq_cq() 12 u64 db_data = FIELD_PREP(ERDMA_CQDB_CI_MASK, cmdq->cq.ci) | in arm_cmdq_cq() 14 FIELD_PREP(ERDMA_CQDB_CMDSN_MASK, cmdq->cq.cmdsn) | in arm_cmdq_cq() 15 FIELD_PREP(ERDMA_CQDB_IDX_MASK, cmdq->cq.cmdsn); in arm_cmdq_cq() 17 *cmdq->cq.db_record = db_data; in arm_cmdq_cq() 20 atomic64_inc(&cmdq->cq.armed_num); in arm_cmdq_cq() 23 static void kick_cmdq_db(struct erdma_cmdq *cmdq) in kick_cmdq_db() argument 25 struct erdma_dev *dev = container_of(cmdq, struct erdma_dev, cmdq); in kick_cmdq_db() 26 u64 db_data = FIELD_PREP(ERDMA_CMD_HDR_WQEBB_INDEX_MASK, cmdq->sq.pi); in kick_cmdq_db() [all …]
|
| /kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/falcon/ |
| D | cmdq.c | 26 nvkm_falcon_cmdq_has_room(struct nvkm_falcon_cmdq *cmdq, u32 size, bool *rewind) in nvkm_falcon_cmdq_has_room() argument 28 u32 head = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->head_reg); in nvkm_falcon_cmdq_has_room() 29 u32 tail = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->tail_reg); in nvkm_falcon_cmdq_has_room() 35 free = cmdq->offset + cmdq->size - head; in nvkm_falcon_cmdq_has_room() 40 head = cmdq->offset; in nvkm_falcon_cmdq_has_room() 51 nvkm_falcon_cmdq_push(struct nvkm_falcon_cmdq *cmdq, void *data, u32 size) in nvkm_falcon_cmdq_push() argument 53 struct nvkm_falcon *falcon = cmdq->qmgr->falcon; in nvkm_falcon_cmdq_push() 54 nvkm_falcon_load_dmem(falcon, data, cmdq->position, size, 0); in nvkm_falcon_cmdq_push() 55 cmdq->position += ALIGN(size, QUEUE_ALIGNMENT); in nvkm_falcon_cmdq_push() 59 nvkm_falcon_cmdq_rewind(struct nvkm_falcon_cmdq *cmdq) in nvkm_falcon_cmdq_rewind() argument [all …]
|
| /kernel/linux/linux-6.6/drivers/gpu/drm/nouveau/nvkm/falcon/ |
| D | cmdq.c | 26 nvkm_falcon_cmdq_has_room(struct nvkm_falcon_cmdq *cmdq, u32 size, bool *rewind) in nvkm_falcon_cmdq_has_room() argument 28 u32 head = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->head_reg); in nvkm_falcon_cmdq_has_room() 29 u32 tail = nvkm_falcon_rd32(cmdq->qmgr->falcon, cmdq->tail_reg); in nvkm_falcon_cmdq_has_room() 35 free = cmdq->offset + cmdq->size - head; in nvkm_falcon_cmdq_has_room() 40 head = cmdq->offset; in nvkm_falcon_cmdq_has_room() 51 nvkm_falcon_cmdq_push(struct nvkm_falcon_cmdq *cmdq, void *data, u32 size) in nvkm_falcon_cmdq_push() argument 53 struct nvkm_falcon *falcon = cmdq->qmgr->falcon; in nvkm_falcon_cmdq_push() 54 nvkm_falcon_pio_wr(falcon, data, 0, 0, DMEM, cmdq->position, size, 0, false); in nvkm_falcon_cmdq_push() 55 cmdq->position += ALIGN(size, QUEUE_ALIGNMENT); in nvkm_falcon_cmdq_push() 59 nvkm_falcon_cmdq_rewind(struct nvkm_falcon_cmdq *cmdq) in nvkm_falcon_cmdq_rewind() argument [all …]
|
| /kernel/linux/linux-6.6/drivers/mailbox/ |
| D | mtk-cmdq-mailbox.c | 17 #include <linux/mailbox/mtk-cmdq-mailbox.h> 67 struct cmdq *cmdq; member 74 struct cmdq { struct 93 static void cmdq_sw_ddr_enable(struct cmdq *cmdq, bool enable) in cmdq_sw_ddr_enable() argument 95 WARN_ON(clk_bulk_enable(cmdq->pdata->gce_num, cmdq->clocks)); in cmdq_sw_ddr_enable() 98 writel(GCE_DDR_EN | GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE); in cmdq_sw_ddr_enable() 100 writel(GCE_CTRL_BY_SW, cmdq->base + GCE_GCTL_VALUE); in cmdq_sw_ddr_enable() 102 clk_bulk_disable(cmdq->pdata->gce_num, cmdq->clocks); in cmdq_sw_ddr_enable() 107 struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox); in cmdq_get_shift_pa() local 109 return cmdq->pdata->shift; in cmdq_get_shift_pa() [all …]
|
| /kernel/linux/linux-5.10/drivers/mailbox/ |
| D | mtk-cmdq-mailbox.c | 17 #include <linux/mailbox/mtk-cmdq-mailbox.h> 62 struct cmdq *cmdq; member 69 struct cmdq { struct 88 struct cmdq *cmdq = container_of(chan->mbox, struct cmdq, mbox); in cmdq_get_shift_pa() argument 90 return cmdq->shift_pa; in cmdq_get_shift_pa() 94 static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread) in cmdq_thread_suspend() argument 106 dev_err(cmdq->mbox.dev, "suspend GCE thread 0x%x failed\n", in cmdq_thread_suspend() 107 (u32)(thread->base - cmdq->base)); in cmdq_thread_suspend() 119 static void cmdq_init(struct cmdq *cmdq) in cmdq_init() argument 123 WARN_ON(clk_enable(cmdq->clock) < 0); in cmdq_init() [all …]
|
| /kernel/linux/linux-5.10/drivers/crypto/cavium/nitrox/ |
| D | nitrox_lib.c | 25 static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes) in nitrox_cmdq_init() argument 27 struct nitrox_device *ndev = cmdq->ndev; in nitrox_cmdq_init() 29 cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes; in nitrox_cmdq_init() 30 cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize, in nitrox_cmdq_init() 31 &cmdq->unalign_dma, in nitrox_cmdq_init() 33 if (!cmdq->unalign_base) in nitrox_cmdq_init() 36 cmdq->dma = PTR_ALIGN(cmdq->unalign_dma, align_bytes); in nitrox_cmdq_init() 37 cmdq->base = cmdq->unalign_base + (cmdq->dma - cmdq->unalign_dma); in nitrox_cmdq_init() 38 cmdq->write_idx = 0; in nitrox_cmdq_init() 40 spin_lock_init(&cmdq->cmd_qlock); in nitrox_cmdq_init() [all …]
|
| D | nitrox_reqmgr.c | 228 struct nitrox_cmdq *cmdq) in backlog_list_add() argument 232 spin_lock_bh(&cmdq->backlog_qlock); in backlog_list_add() 233 list_add_tail(&sr->backlog, &cmdq->backlog_head); in backlog_list_add() 234 atomic_inc(&cmdq->backlog_count); in backlog_list_add() 236 spin_unlock_bh(&cmdq->backlog_qlock); in backlog_list_add() 240 struct nitrox_cmdq *cmdq) in response_list_add() argument 244 spin_lock_bh(&cmdq->resp_qlock); in response_list_add() 245 list_add_tail(&sr->response, &cmdq->response_head); in response_list_add() 246 spin_unlock_bh(&cmdq->resp_qlock); in response_list_add() 250 struct nitrox_cmdq *cmdq) in response_list_del() argument [all …]
|
| /kernel/linux/linux-6.6/drivers/crypto/cavium/nitrox/ |
| D | nitrox_lib.c | 25 static int nitrox_cmdq_init(struct nitrox_cmdq *cmdq, int align_bytes) in nitrox_cmdq_init() argument 27 struct nitrox_device *ndev = cmdq->ndev; in nitrox_cmdq_init() 29 cmdq->qsize = (ndev->qlen * cmdq->instr_size) + align_bytes; in nitrox_cmdq_init() 30 cmdq->unalign_base = dma_alloc_coherent(DEV(ndev), cmdq->qsize, in nitrox_cmdq_init() 31 &cmdq->unalign_dma, in nitrox_cmdq_init() 33 if (!cmdq->unalign_base) in nitrox_cmdq_init() 36 cmdq->dma = PTR_ALIGN(cmdq->unalign_dma, align_bytes); in nitrox_cmdq_init() 37 cmdq->base = cmdq->unalign_base + (cmdq->dma - cmdq->unalign_dma); in nitrox_cmdq_init() 38 cmdq->write_idx = 0; in nitrox_cmdq_init() 40 spin_lock_init(&cmdq->cmd_qlock); in nitrox_cmdq_init() [all …]
|
| D | nitrox_reqmgr.c | 230 struct nitrox_cmdq *cmdq) in backlog_list_add() argument 234 spin_lock_bh(&cmdq->backlog_qlock); in backlog_list_add() 235 list_add_tail(&sr->backlog, &cmdq->backlog_head); in backlog_list_add() 236 atomic_inc(&cmdq->backlog_count); in backlog_list_add() 238 spin_unlock_bh(&cmdq->backlog_qlock); in backlog_list_add() 242 struct nitrox_cmdq *cmdq) in response_list_add() argument 246 spin_lock_bh(&cmdq->resp_qlock); in response_list_add() 247 list_add_tail(&sr->response, &cmdq->response_head); in response_list_add() 248 spin_unlock_bh(&cmdq->resp_qlock); in response_list_add() 252 struct nitrox_cmdq *cmdq) in response_list_del() argument [all …]
|
| /kernel/linux/linux-6.6/drivers/net/ethernet/brocade/bna/ |
| D | bfa_msgq.c | 31 static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq); 32 static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq); 43 bfa_fsm_state_decl(cmdq, stopped, struct bfa_msgq_cmdq, enum cmdq_event); 44 bfa_fsm_state_decl(cmdq, init_wait, struct bfa_msgq_cmdq, enum cmdq_event); 45 bfa_fsm_state_decl(cmdq, ready, struct bfa_msgq_cmdq, enum cmdq_event); 46 bfa_fsm_state_decl(cmdq, dbell_wait, struct bfa_msgq_cmdq, 50 cmdq_sm_stopped_entry(struct bfa_msgq_cmdq *cmdq) in cmdq_sm_stopped_entry() argument 54 cmdq->producer_index = 0; in cmdq_sm_stopped_entry() 55 cmdq->consumer_index = 0; in cmdq_sm_stopped_entry() 56 cmdq->flags = 0; in cmdq_sm_stopped_entry() [all …]
|
| /kernel/linux/linux-5.10/drivers/net/ethernet/brocade/bna/ |
| D | bfa_msgq.c | 31 static void bfa_msgq_cmdq_dbell(struct bfa_msgq_cmdq *cmdq); 32 static void bfa_msgq_cmdq_copy_rsp(struct bfa_msgq_cmdq *cmdq); 43 bfa_fsm_state_decl(cmdq, stopped, struct bfa_msgq_cmdq, enum cmdq_event); 44 bfa_fsm_state_decl(cmdq, init_wait, struct bfa_msgq_cmdq, enum cmdq_event); 45 bfa_fsm_state_decl(cmdq, ready, struct bfa_msgq_cmdq, enum cmdq_event); 46 bfa_fsm_state_decl(cmdq, dbell_wait, struct bfa_msgq_cmdq, 50 cmdq_sm_stopped_entry(struct bfa_msgq_cmdq *cmdq) in cmdq_sm_stopped_entry() argument 54 cmdq->producer_index = 0; in cmdq_sm_stopped_entry() 55 cmdq->consumer_index = 0; in cmdq_sm_stopped_entry() 56 cmdq->flags = 0; in cmdq_sm_stopped_entry() [all …]
|
| /kernel/linux/linux-5.10/include/linux/soc/mediatek/ |
| D | mtk-cmdq.h | 11 #include <linux/mailbox/mtk-cmdq-mailbox.h> 36 * cmdq_dev_get_client_reg() - parse cmdq client reg from the device 37 * node of CMDQ client 38 * @dev: device of CMDQ mailbox client 39 * @client_reg: CMDQ client reg pointer 44 * Help CMDQ client parsing the cmdq client reg 45 * from the device node of CMDQ client. 51 * cmdq_mbox_create() - create CMDQ mailbox client and channel 52 * @dev: device of CMDQ mailbox client 53 * @index: index of CMDQ mailbox channel [all …]
|
| /kernel/linux/linux-6.6/include/linux/soc/mediatek/ |
| D | mtk-cmdq.h | 11 #include <linux/mailbox/mtk-cmdq-mailbox.h> 33 * cmdq_dev_get_client_reg() - parse cmdq client reg from the device 34 * node of CMDQ client 35 * @dev: device of CMDQ mailbox client 36 * @client_reg: CMDQ client reg pointer 41 * Help CMDQ client parsing the cmdq client reg 42 * from the device node of CMDQ client. 48 * cmdq_mbox_create() - create CMDQ mailbox client and channel 49 * @dev: device of CMDQ mailbox client 50 * @index: index of CMDQ mailbox channel [all …]
|
| /kernel/linux/linux-6.6/drivers/net/ethernet/huawei/hinic/ |
| D | hinic_hw_cmdq.c | 78 #define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \ argument 79 struct hinic_cmdqs, cmdq[0]) 320 static void cmdq_set_db(struct hinic_cmdq *cmdq, in cmdq_set_db() argument 332 writel(db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx)); in cmdq_set_db() 335 static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq, in cmdq_sync_cmd_direct_resp() argument 343 struct hinic_wq *wq = cmdq->wq; in cmdq_sync_cmd_direct_resp() 348 spin_lock_bh(&cmdq->cmdq_lock); in cmdq_sync_cmd_direct_resp() 353 spin_unlock_bh(&cmdq->cmdq_lock); in cmdq_sync_cmd_direct_resp() 359 wrapped = cmdq->wrapped; in cmdq_sync_cmd_direct_resp() 364 cmdq->wrapped = !cmdq->wrapped; in cmdq_sync_cmd_direct_resp() [all …]
|
| D | hinic_hw_io.c | 119 dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); in write_sq_ctxts() 163 dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); in write_rq_ctxts() 220 dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); in hinic_clean_queue_offload_ctxt() 533 enum hinic_cmdq_type cmdq, type; in hinic_io_init() local 565 for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) { in hinic_io_init() 568 dev_err(&pdev->dev, "Failed to get cmdq db area\n"); in hinic_io_init() 573 func_to_io->cmdq_db_area[cmdq] = db_area; in hinic_io_init() 600 for (type = HINIC_CMDQ_SYNC; type < cmdq; type++) in hinic_io_init() 619 enum hinic_cmdq_type cmdq; in hinic_io_free() local 628 for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) in hinic_io_free() [all …]
|
| /kernel/linux/linux-5.10/drivers/net/ethernet/huawei/hinic/ |
| D | hinic_hw_cmdq.c | 78 #define cmdq_to_cmdqs(cmdq) container_of((cmdq) - (cmdq)->cmdq_type, \ argument 79 struct hinic_cmdqs, cmdq[0]) 325 static void cmdq_set_db(struct hinic_cmdq *cmdq, in cmdq_set_db() argument 337 writel(db_info, CMDQ_DB_ADDR(cmdq->db_base, prod_idx)); in cmdq_set_db() 340 static int cmdq_sync_cmd_direct_resp(struct hinic_cmdq *cmdq, in cmdq_sync_cmd_direct_resp() argument 348 struct hinic_wq *wq = cmdq->wq; in cmdq_sync_cmd_direct_resp() 353 spin_lock_bh(&cmdq->cmdq_lock); in cmdq_sync_cmd_direct_resp() 358 spin_unlock_bh(&cmdq->cmdq_lock); in cmdq_sync_cmd_direct_resp() 364 wrapped = cmdq->wrapped; in cmdq_sync_cmd_direct_resp() 369 cmdq->wrapped = !cmdq->wrapped; in cmdq_sync_cmd_direct_resp() [all …]
|
| D | hinic_hw_io.c | 119 dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); in write_sq_ctxts() 163 dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); in write_rq_ctxts() 220 dev_err(&pdev->dev, "Failed to allocate cmdq buf\n"); in hinic_clean_queue_offload_ctxt() 534 enum hinic_cmdq_type cmdq, type; in hinic_io_init() local 566 for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) { in hinic_io_init() 569 dev_err(&pdev->dev, "Failed to get cmdq db area\n"); in hinic_io_init() 574 func_to_io->cmdq_db_area[cmdq] = db_area; in hinic_io_init() 601 for (type = HINIC_CMDQ_SYNC; type < cmdq; type++) in hinic_io_init() 620 enum hinic_cmdq_type cmdq; in hinic_io_free() local 629 for (cmdq = HINIC_CMDQ_SYNC; cmdq < HINIC_MAX_CMDQ_TYPES; cmdq++) in hinic_io_free() [all …]
|
| /kernel/linux/linux-6.6/drivers/accel/ivpu/ |
| D | ivpu_job.c | 31 static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq) in ivpu_cmdq_ring_db() argument 33 ivpu_hw_reg_db_set(vdev, cmdq->db_id); in ivpu_cmdq_ring_db() 40 struct ivpu_cmdq *cmdq; in ivpu_cmdq_alloc() local 42 cmdq = kzalloc(sizeof(*cmdq), GFP_KERNEL); in ivpu_cmdq_alloc() 43 if (!cmdq) in ivpu_cmdq_alloc() 46 cmdq->mem = ivpu_bo_alloc_internal(vdev, 0, SZ_4K, DRM_IVPU_BO_WC); in ivpu_cmdq_alloc() 47 if (!cmdq->mem) in ivpu_cmdq_alloc() 50 cmdq->db_id = file_priv->ctx.id + engine * ivpu_get_context_count(vdev); in ivpu_cmdq_alloc() 51 cmdq->entry_count = (u32)((cmdq->mem->base.size - sizeof(struct vpu_job_queue_header)) / in ivpu_cmdq_alloc() 54 cmdq->jobq = (struct vpu_job_queue *)cmdq->mem->kvaddr; in ivpu_cmdq_alloc() [all …]
|
| D | ivpu_mmu.c | 189 #define IVPU_MMU_GERROR_ERR_MASK ((REG_FLD(VPU_37XX_HOST_MMU_GERROR, CMDQ)) | \ 239 return "Unknown CMDQ command"; in ivpu_mmu_event_to_str() 315 struct ivpu_mmu_queue *q = &mmu->cmdq; in ivpu_mmu_cmdq_alloc() 325 ivpu_dbg(vdev, MMU, "CMDQ alloc: dma=%pad dma_q=%pad size=%u\n", in ivpu_mmu_cmdq_alloc() 368 ivpu_err(vdev, "Failed to allocate cmdq: %d\n", ret); in ivpu_mmu_structs_alloc() 408 struct ivpu_mmu_queue *cmdq = &vdev->mmu->cmdq; in ivpu_mmu_cmdq_wait_for_cons() local 410 return REGV_POLL(VPU_37XX_HOST_MMU_CMDQ_CONS, cmdq->cons, (cmdq->prod == cmdq->cons), in ivpu_mmu_cmdq_wait_for_cons() 416 struct ivpu_mmu_queue *q = &vdev->mmu->cmdq; in ivpu_mmu_cmdq_cmd_write() 436 struct ivpu_mmu_queue *q = &vdev->mmu->cmdq; in ivpu_mmu_cmdq_sync() 488 memset(mmu->cmdq.base, 0, IVPU_MMU_CMDQ_SIZE); in ivpu_mmu_reset() [all …]
|
| /kernel/linux/linux-5.10/drivers/infiniband/hw/bnxt_re/ |
| D | qplib_rcfw.c | 58 struct bnxt_qplib_cmdq_ctx *cmdq; in __wait_for_resp() local 62 cmdq = &rcfw->cmdq; in __wait_for_resp() 64 rc = wait_event_timeout(cmdq->waitq, in __wait_for_resp() 65 !test_bit(cbit, cmdq->cmdq_bitmap), in __wait_for_resp() 73 struct bnxt_qplib_cmdq_ctx *cmdq; in __block_for_resp() local 76 cmdq = &rcfw->cmdq; in __block_for_resp() 78 if (!test_bit(cbit, cmdq->cmdq_bitmap)) in __block_for_resp() 83 } while (test_bit(cbit, cmdq->cmdq_bitmap) && --count); in __block_for_resp() 91 struct bnxt_qplib_cmdq_ctx *cmdq = &rcfw->cmdq; in __send_message() local 92 struct bnxt_qplib_hwq *hwq = &cmdq->hwq; in __send_message() [all …]
|
| /kernel/linux/linux-6.6/drivers/infiniband/hw/bnxt_re/ |
| D | qplib_rcfw.c | 114 struct bnxt_qplib_cmdq_ctx *cmdq; in bnxt_re_is_fw_stalled() local 118 cmdq = &rcfw->cmdq; in bnxt_re_is_fw_stalled() 120 if (time_after(jiffies, cmdq->last_seen + in bnxt_re_is_fw_stalled() 123 "%s: FW STALL Detected. cmdq[%#x]=%#x waited (%d > %d) msec active %d ", in bnxt_re_is_fw_stalled() 125 jiffies_to_msecs(jiffies - cmdq->last_seen), in bnxt_re_is_fw_stalled() 147 struct bnxt_qplib_cmdq_ctx *cmdq; in __wait_for_resp() local 151 cmdq = &rcfw->cmdq; in __wait_for_resp() 155 if (test_bit(ERR_DEVICE_DETACHED, &cmdq->flags)) in __wait_for_resp() 157 if (test_bit(FIRMWARE_STALL_DETECTED, &cmdq->flags)) in __wait_for_resp() 160 wait_event_timeout(cmdq->waitq, in __wait_for_resp() [all …]
|
| /kernel/linux/linux-6.6/drivers/net/ethernet/hisilicon/hns3/hns3_common/ |
| D | hclge_comm_cmd.c | 484 /* If CMDQ ring is full, SW HEAD and HW HEAD may be different, in hclge_comm_cmd_send() 529 struct hclge_comm_cmq *cmdq = &hw->cmq; in hclge_comm_cmd_uninit() local 538 spin_lock_bh(&cmdq->csq.lock); in hclge_comm_cmd_uninit() 539 spin_lock(&cmdq->crq.lock); in hclge_comm_cmd_uninit() 541 spin_unlock(&cmdq->crq.lock); in hclge_comm_cmd_uninit() 542 spin_unlock_bh(&cmdq->csq.lock); in hclge_comm_cmd_uninit() 544 hclge_comm_free_cmd_desc(&cmdq->csq); in hclge_comm_cmd_uninit() 545 hclge_comm_free_cmd_desc(&cmdq->crq); in hclge_comm_cmd_uninit() 550 struct hclge_comm_cmq *cmdq = &hw->cmq; in hclge_comm_cmd_queue_init() local 554 spin_lock_init(&cmdq->csq.lock); in hclge_comm_cmd_queue_init() [all …]
|
| /kernel/linux/linux-5.10/drivers/iommu/arm/arm-smmu-v3/ |
| D | arm-smmu-v3.c | 353 struct arm_smmu_queue *q = &smmu->cmdq.q; in arm_smmu_cmdq_build_sync_cmd() 381 struct arm_smmu_queue *q = &smmu->cmdq.q; in arm_smmu_cmdq_skip_err() 388 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons, in arm_smmu_cmdq_skip_err() 411 * not to touch any of the shadow cmdq state. in arm_smmu_cmdq_skip_err() 439 static void arm_smmu_cmdq_shared_lock(struct arm_smmu_cmdq *cmdq) in arm_smmu_cmdq_shared_lock() argument 449 if (atomic_fetch_inc_relaxed(&cmdq->lock) >= 0) in arm_smmu_cmdq_shared_lock() 453 val = atomic_cond_read_relaxed(&cmdq->lock, VAL >= 0); in arm_smmu_cmdq_shared_lock() 454 } while (atomic_cmpxchg_relaxed(&cmdq->lock, val, val + 1) != val); in arm_smmu_cmdq_shared_lock() 457 static void arm_smmu_cmdq_shared_unlock(struct arm_smmu_cmdq *cmdq) in arm_smmu_cmdq_shared_unlock() argument 459 (void)atomic_dec_return_release(&cmdq->lock); in arm_smmu_cmdq_shared_unlock() [all …]
|
| /kernel/linux/linux-6.6/drivers/gpu/drm/nouveau/nvkm/engine/sec2/ |
| D | base.c | 45 struct nvkm_falcon_cmdq *cmdq = sec2->cmdq; in nvkm_sec2_fini() local 56 ret = nvkm_falcon_cmdq_send(cmdq, &cmd, nvkm_sec2_finimsg, sec2, in nvkm_sec2_fini() 68 nvkm_falcon_cmdq_fini(cmdq); in nvkm_sec2_fini() 119 nvkm_falcon_cmdq_del(&sec2->cmdq); in nvkm_sec2_dtor() 159 (ret = nvkm_falcon_cmdq_new(sec2->qmgr, "cmdq", &sec2->cmdq)) || in nvkm_sec2_new_()
|
| /kernel/linux/linux-6.6/drivers/iommu/arm/arm-smmu-v3/ |
| D | arm-smmu-v3.c | 350 return &smmu->cmdq; in arm_smmu_get_cmdq() 390 dev_err(smmu->dev, "CMDQ error (cons 0x%08x): %s\n", cons, in __arm_smmu_cmdq_skip_err() 414 * not to touch any of the shadow cmdq state. in __arm_smmu_cmdq_skip_err() 429 __arm_smmu_cmdq_skip_err(smmu, &smmu->cmdq.q); in arm_smmu_cmdq_skip_err() 444 static void arm_smmu_cmdq_shared_lock(struct arm_smmu_cmdq *cmdq) in arm_smmu_cmdq_shared_lock() argument 454 if (atomic_fetch_inc_relaxed(&cmdq->lock) >= 0) in arm_smmu_cmdq_shared_lock() 458 val = atomic_cond_read_relaxed(&cmdq->lock, VAL >= 0); in arm_smmu_cmdq_shared_lock() 459 } while (atomic_cmpxchg_relaxed(&cmdq->lock, val, val + 1) != val); in arm_smmu_cmdq_shared_lock() 462 static void arm_smmu_cmdq_shared_unlock(struct arm_smmu_cmdq *cmdq) in arm_smmu_cmdq_shared_unlock() argument 464 (void)atomic_dec_return_release(&cmdq->lock); in arm_smmu_cmdq_shared_unlock() [all …]
|