/drivers/crypto/ccp/ |
D | ccp-dev-v5.c | 31 static u32 ccp_lsb_alloc(struct ccp_cmd_queue *cmd_q, unsigned int count) in ccp_lsb_alloc() argument 37 if (cmd_q->lsb >= 0) { in ccp_lsb_alloc() 38 start = (u32)bitmap_find_next_zero_area(cmd_q->lsbmap, in ccp_lsb_alloc() 42 bitmap_set(cmd_q->lsbmap, start, count); in ccp_lsb_alloc() 43 return start + cmd_q->lsb * LSB_SIZE; in ccp_lsb_alloc() 48 ccp = cmd_q->ccp; in ccp_lsb_alloc() 76 static void ccp_lsb_free(struct ccp_cmd_queue *cmd_q, unsigned int start, in ccp_lsb_free() argument 82 if (cmd_q->lsb == start) { in ccp_lsb_free() 84 bitmap_clear(cmd_q->lsbmap, start, count); in ccp_lsb_free() 87 struct ccp_device *ccp = cmd_q->ccp; in ccp_lsb_free() [all …]
|
D | ccp-dev-v3.c | 23 static u32 ccp_alloc_ksb(struct ccp_cmd_queue *cmd_q, unsigned int count) in ccp_alloc_ksb() argument 26 struct ccp_device *ccp = cmd_q->ccp; in ccp_alloc_ksb() 54 static void ccp_free_ksb(struct ccp_cmd_queue *cmd_q, unsigned int start, in ccp_free_ksb() argument 57 struct ccp_device *ccp = cmd_q->ccp; in ccp_free_ksb() 73 static unsigned int ccp_get_free_slots(struct ccp_cmd_queue *cmd_q) in ccp_get_free_slots() argument 75 return CMD_Q_DEPTH(ioread32(cmd_q->reg_status)); in ccp_get_free_slots() 80 struct ccp_cmd_queue *cmd_q = op->cmd_q; in ccp_do_cmd() local 81 struct ccp_device *ccp = cmd_q->ccp; in ccp_do_cmd() 91 cmd_q->free_slots--; in ccp_do_cmd() 93 cr0 = (cmd_q->id << REQ0_CMD_Q_SHIFT) in ccp_do_cmd() [all …]
|
D | ccp-ops.c | 141 struct ccp_cmd_queue *cmd_q, in ccp_init_dm_workarea() argument 150 wa->dev = cmd_q->ccp->dev; in ccp_init_dm_workarea() 154 wa->dma_pool = cmd_q->dma_pool; in ccp_init_dm_workarea() 251 static void ccp_free_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q) in ccp_free_data() argument 257 static int ccp_init_data(struct ccp_data *data, struct ccp_cmd_queue *cmd_q, in ccp_init_data() argument 266 ret = ccp_init_sg_workarea(&data->sg_wa, cmd_q->ccp->dev, sg, sg_len, in ccp_init_data() 271 ret = ccp_init_dm_workarea(&data->dm_wa, cmd_q, dm_len, dir); in ccp_init_data() 278 ccp_free_data(data, cmd_q); in ccp_init_data() 415 static int ccp_copy_to_from_sb(struct ccp_cmd_queue *cmd_q, in ccp_copy_to_from_sb() argument 423 op.cmd_q = cmd_q; in ccp_copy_to_from_sb() [all …]
|
D | ccp-debugfs.c | 118 struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i]; in ccp5_debugfs_stats_read() local 120 total_ops += cmd_q->total_ops; in ccp5_debugfs_stats_read() 121 total_aes_ops += cmd_q->total_aes_ops; in ccp5_debugfs_stats_read() 122 total_xts_aes_ops += cmd_q->total_xts_aes_ops; in ccp5_debugfs_stats_read() 123 total_3des_ops += cmd_q->total_3des_ops; in ccp5_debugfs_stats_read() 124 total_sha_ops += cmd_q->total_sha_ops; in ccp5_debugfs_stats_read() 125 total_rsa_ops += cmd_q->total_rsa_ops; in ccp5_debugfs_stats_read() 126 total_pt_ops += cmd_q->total_pt_ops; in ccp5_debugfs_stats_read() 127 total_ecc_ops += cmd_q->total_ecc_ops; in ccp5_debugfs_stats_read() 161 static void ccp5_debugfs_reset_queue_stats(struct ccp_cmd_queue *cmd_q) in ccp5_debugfs_reset_queue_stats() argument [all …]
|
D | ccp-dev.c | 312 if (ccp->cmd_q[i].active) in ccp_enqueue_cmd() 324 wake_up_process(ccp->cmd_q[i].kthread); in ccp_enqueue_cmd() 346 if (ccp->cmd_q[i].active) in ccp_do_cmd_backlog() 356 wake_up_process(ccp->cmd_q[i].kthread); in ccp_do_cmd_backlog() 359 static struct ccp_cmd *ccp_dequeue_cmd(struct ccp_cmd_queue *cmd_q) in ccp_dequeue_cmd() argument 361 struct ccp_device *ccp = cmd_q->ccp; in ccp_dequeue_cmd() 368 cmd_q->active = 0; in ccp_dequeue_cmd() 371 cmd_q->suspended = 1; in ccp_dequeue_cmd() 380 cmd_q->active = 1; in ccp_dequeue_cmd() 421 struct ccp_cmd_queue *cmd_q = (struct ccp_cmd_queue *)data; in ccp_cmd_queue_thread() local [all …]
|
D | ccp-dev.h | 383 struct ccp_cmd_queue cmd_q[MAX_HW_QUEUES]; member 533 struct ccp_cmd_queue *cmd_q; member 642 int ccp_run_cmd(struct ccp_cmd_queue *cmd_q, struct ccp_cmd *cmd);
|
/drivers/scsi/ibmvscsi_tgt/ |
D | ibmvscsi_tgt.c | 286 bytes = vscsi->cmd_q.size * PAGE_SIZE; in ibmvscsis_free_command_q() 287 memset(vscsi->cmd_q.base_addr, 0, bytes); in ibmvscsis_free_command_q() 288 vscsi->cmd_q.index = 0; in ibmvscsis_free_command_q() 371 crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask, &vscsi->cmd_q.index, in ibmvscsis_check_init_msg() 372 vscsi->cmd_q.base_addr); in ibmvscsis_check_init_msg() 385 crq = ibmvscsis_cmd_q_dequeue(vscsi->cmd_q.mask, in ibmvscsis_check_init_msg() 386 &vscsi->cmd_q.index, in ibmvscsis_check_init_msg() 387 vscsi->cmd_q.base_addr); in ibmvscsis_check_init_msg() 901 bytes = vscsi->cmd_q.size * PAGE_SIZE; in ibmvscsis_reset_queue() 903 vscsi->cmd_q.crq_token, bytes); in ibmvscsis_reset_queue() [all …]
|
D | ibmvscsi_tgt.h | 277 struct cmd_queue cmd_q; member
|
/drivers/platform/olpc/ |
D | olpc-ec.c | 40 struct list_head cmd_q; member 76 if (!list_empty(&ec->cmd_q)) { in olpc_ec_worker() 77 desc = list_first_entry(&ec->cmd_q, struct ec_cmd_desc, node); in olpc_ec_worker() 111 list_add_tail(&desc->node, &ec->cmd_q); in queue_ec_descriptor() 274 INIT_LIST_HEAD(&ec->cmd_q); in olpc_ec_probe()
|
/drivers/staging/unisys/include/ |
D | iochannel.h | 563 struct signal_queue_header cmd_q; member
|
/drivers/scsi/ |
D | sg.c | 154 char cmd_q; /* 1 -> allow command queuing, 0 -> don't */ member 736 sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */ in sg_new_write() 1055 sfp->cmd_q = val ? 1 : 0; in sg_ioctl() 1058 return put_user((int) sfp->cmd_q, ip); in sg_ioctl() 1209 else if (!sfp->cmd_q) { in sg_poll() 2124 if (!sfp->cmd_q) in sg_add_request() 2183 sfp->cmd_q = SG_DEF_COMMAND_Q; in sg_add_sfp() 2649 (int) fp->cmd_q, (int) fp->force_packid, in sg_proc_debug_helper()
|
/drivers/net/ethernet/brocade/bna/ |
D | bfa_ioc.c | 2132 INIT_LIST_HEAD(&mod->cmd_q); in bfa_ioc_mbox_attach() 2152 if (list_empty(&mod->cmd_q)) in bfa_ioc_mbox_poll() 2165 cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe); in bfa_ioc_mbox_poll() 2187 while (!list_empty(&mod->cmd_q)) { in bfa_ioc_mbox_flush() 2188 cmd = list_first_entry(&mod->cmd_q, struct bfa_mbox_cmd, qe); in bfa_ioc_mbox_flush() 2668 if (!list_empty(&mod->cmd_q)) { in bfa_nw_ioc_mbox_queue() 2669 list_add_tail(&cmd->qe, &mod->cmd_q); in bfa_nw_ioc_mbox_queue() 2678 list_add_tail(&cmd->qe, &mod->cmd_q); in bfa_nw_ioc_mbox_queue()
|
D | bfa_ioc.h | 117 struct list_head cmd_q; /*!< pending mbox queue */ member
|
/drivers/net/wireless/intel/iwlwifi/mvm/ |
D | utils.c | 1152 bool tdls, bool cmd_q) in iwl_mvm_get_wd_timeout() argument 1157 cmd_q ? IWL_DEF_WD_TIMEOUT : mvm->cfg->base_params->wd_timeout; in iwl_mvm_get_wd_timeout() 1178 if (cmd_q) in iwl_mvm_get_wd_timeout()
|
D | mvm.h | 1833 bool tdls, bool cmd_q);
|
/drivers/scsi/bfa/ |
D | bfa_ioc.c | 90 (!list_empty(&((__ioc)->mbox_mod.cmd_q)) || \ 1996 INIT_LIST_HEAD(&mod->cmd_q); in bfa_ioc_mbox_attach() 2016 if (list_empty(&mod->cmd_q)) in bfa_ioc_mbox_poll() 2029 bfa_q_deq(&mod->cmd_q, &cmd); in bfa_ioc_mbox_poll() 2042 while (!list_empty(&mod->cmd_q)) in bfa_ioc_mbox_flush() 2043 bfa_q_deq(&mod->cmd_q, &cmd); in bfa_ioc_mbox_flush() 2583 if (!list_empty(&mod->cmd_q)) { in bfa_ioc_mbox_queue() 2584 list_add_tail(&cmd->qe, &mod->cmd_q); in bfa_ioc_mbox_queue() 2593 list_add_tail(&cmd->qe, &mod->cmd_q); in bfa_ioc_mbox_queue()
|
D | bfa_ioc.h | 246 struct list_head cmd_q; /* pending mbox queue */ member
|
/drivers/net/ethernet/qlogic/qlcnic/ |
D | qlcnic_83xx_hw.c | 3892 struct list_head *head = &mbx->cmd_q; in qlcnic_83xx_flush_mbx_queue() 4014 list_add_tail(&cmd->list, &mbx->cmd_q); in qlcnic_83xx_enqueue_mbx_cmd() 4099 struct list_head *head = &mbx->cmd_q; in qlcnic_83xx_mailbox_worker() 4172 INIT_LIST_HEAD(&mbx->cmd_q); in qlcnic_83xx_init_mailbox_work()
|
D | qlcnic.h | 1097 struct list_head cmd_q; member
|