/drivers/scsi/aacraid/ |
D | comminit.c | 349 struct aac_entry * queues; in aac_comm_init() local 351 struct aac_queue_block * comm = dev->queues; in aac_comm_init() 370 queues = (struct aac_entry *)(((ulong)headers) + hdrsize); in aac_comm_init() 373 comm->queue[HostNormCmdQueue].base = queues; in aac_comm_init() 375 queues += HOST_NORM_CMD_ENTRIES; in aac_comm_init() 379 comm->queue[HostHighCmdQueue].base = queues; in aac_comm_init() 382 queues += HOST_HIGH_CMD_ENTRIES; in aac_comm_init() 386 comm->queue[AdapNormCmdQueue].base = queues; in aac_comm_init() 389 queues += ADAP_NORM_CMD_ENTRIES; in aac_comm_init() 393 comm->queue[AdapHighCmdQueue].base = queues; in aac_comm_init() [all …]
|
D | commsup.c | 383 q = &dev->queues->queue[qid]; in aac_get_entry() 621 if (!dev->queues) in aac_fib_send() 693 struct aac_queue * q = &dev->queues->queue[AdapNormCmdQueue]; in aac_fib_send() 961 q = &dev->queues->queue[AdapNormRespQueue]; in aac_fib_adapter_complete() 1594 kfree(aac->queues); in _aac_reset_adapter() 1595 aac->queues = NULL; in _aac_reset_adapter() 2191 t_lock = dev->queues->queue[HostNormCmdQueue].lock; in aac_process_events() 2194 while (!list_empty(&(dev->queues->queue[HostNormCmdQueue].cmdq))) { in aac_process_events() 2203 entry = dev->queues->queue[HostNormCmdQueue].cmdq.next; in aac_process_events() 2206 t_lock = dev->queues->queue[HostNormCmdQueue].lock; in aac_process_events() [all …]
|
D | dpcsup.c | 88 atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending); in aac_response_normal() 294 struct aac_queue *q = &dev->queues->queue[HostNormCmdQueue]; in aac_intr_normal() 360 atomic_dec(&dev->queues->queue[AdapNormCmdQueue].numpending); in aac_intr_normal()
|
D | rx.c | 69 aac_command_normal(&dev->queues->queue[HostNormCmdQueue]); in aac_rx_intr_producer() 73 aac_response_normal(&dev->queues->queue[HostNormRespQueue]); in aac_rx_intr_producer() 405 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; in aac_rx_deliver_producer() 428 struct aac_queue *q = &dev->queues->queue[AdapNormCmdQueue]; in aac_rx_deliver_message()
|
D | sa.c | 69 aac_command_normal(&dev->queues->queue[HostNormCmdQueue]); in aac_sa_intr() 72 aac_response_normal(&dev->queues->queue[HostNormRespQueue]); in aac_sa_intr()
|
/drivers/nvme/target/ |
D | loop.c | 46 struct nvme_loop_queue *queues; member 87 return queue - queue->ctrl->queues; in nvme_loop_queue_idx() 211 struct nvme_loop_queue *queue = &ctrl->queues[0]; in nvme_loop_submit_async_event() 233 iod->queue = &ctrl->queues[queue_idx]; in nvme_loop_init_iod() 252 struct nvme_loop_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_loop_init_hctx() 264 struct nvme_loop_queue *queue = &ctrl->queues[0]; in nvme_loop_init_admin_hctx() 290 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[0].flags); in nvme_loop_destroy_admin_queue() 291 nvmet_sq_destroy(&ctrl->queues[0].nvme_sq); in nvme_loop_destroy_admin_queue() 311 kfree(ctrl->queues); in nvme_loop_free_ctrl() 322 clear_bit(NVME_LOOP_Q_LIVE, &ctrl->queues[i].flags); in nvme_loop_destroy_io_queues() [all …]
|
/drivers/gpu/drm/amd/amdkfd/ |
D | kfd_process_queue_manager.c | 35 list_for_each_entry(pqn, &pqm->queues, process_queue_list) { in get_queue_by_qid() 68 INIT_LIST_HEAD(&pqm->queues); in pqm_init() 84 list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) { in pqm_uninit() 174 if (list_empty(&pqm->queues)) { in pqm_create_queue() 249 list_add(&pqn->process_queue_list, &pqm->queues); in pqm_create_queue() 264 if (list_empty(&pqm->queues)) in pqm_create_queue() 321 if (list_empty(&pqm->queues)) in pqm_destroy_queue()
|
D | kfd_process.c | 193 kfree(p->queues); in kfd_process_wq_release() 284 process->queues = kmalloc_array(INITIAL_QUEUE_ARRAY_SIZE, in create_process() 285 sizeof(process->queues[0]), GFP_KERNEL); in create_process() 286 if (!process->queues) in create_process() 336 kfree(process->queues); in create_process()
|
D | kfd_device_queue_manager.c | 423 list_add(&n->list, &dqm->queues); in register_process_nocpsch() 446 list_for_each_entry_safe(cur, next, &dqm->queues, list) { in unregister_process_nocpsch() 497 INIT_LIST_HEAD(&dqm->queues); in initialize_nocpsch() 660 INIT_LIST_HEAD(&dqm->queues); in initialize_cpsch() 700 list_for_each_entry(node, &dqm->queues, list) in start_cpsch() 722 list_for_each_entry(node, &dqm->queues, list) { in stop_cpsch() 949 retval = pm_send_runlist(&dqm->packets, &dqm->queues); in execute_queues_cpsch()
|
/drivers/net/ethernet/netronome/nfp/ |
D | nfp_net_debugfs.c | 206 struct dentry *queues, *tx, *rx, *xdp; in nfp_net_debugfs_vnic_add() local 222 queues = debugfs_create_dir("queue", nn->debugfs_dir); in nfp_net_debugfs_vnic_add() 223 if (IS_ERR_OR_NULL(queues)) in nfp_net_debugfs_vnic_add() 226 rx = debugfs_create_dir("rx", queues); in nfp_net_debugfs_vnic_add() 227 tx = debugfs_create_dir("tx", queues); in nfp_net_debugfs_vnic_add() 228 xdp = debugfs_create_dir("xdp", queues); in nfp_net_debugfs_vnic_add()
|
/drivers/staging/fsl-dpaa2/ethernet/ |
D | README | 27 - queues, channels 34 hardware resources, like queues, do not have a corresponding MC object and 101 queues ---------------------- | | Buffer pool | 111 Frames are transmitted and received through hardware frame queues, which can be 113 enqueues TX frames on egress queues and after transmission is complete a TX 116 When frames are available on ingress queues, a data availability notification 118 queues in the same channel have available frames, only one notification is sent. 121 Each network interface can have multiple Rx, Tx and confirmation queues affined
|
/drivers/net/xen-netback/ |
D | interface.c | 201 queue = &vif->queues[index]; in xenvif_start_xmit() 253 queue = &vif->queues[index]; in xenvif_get_stats() 277 queue = &vif->queues[queue_index]; in xenvif_up() 293 queue = &vif->queues[queue_index]; in xenvif_down() 406 void *vif_stats = &vif->queues[queue_index].stats; in xenvif_get_ethtool_stats() 482 vif->queues = NULL; in xenvif_alloc() 747 queue = &vif->queues[queue_index]; in xenvif_disconnect_data() 804 struct xenvif_queue *queues = vif->queues; in xenvif_free() local 812 xenvif_deinit_queue(&queues[queue_index]); in xenvif_free() 813 vfree(queues); in xenvif_free()
|
D | xenbus.c | 229 &vif->queues[i], in xenvif_debugfs_addif() 514 xenvif_deinit_queue(&vif->queues[queue_index]); in backend_disconnect() 516 vfree(vif->queues); in backend_disconnect() 517 vif->queues = NULL; in backend_disconnect() 754 struct xenvif_queue *queue = &vif->queues[queue_index]; in xen_net_rate_changed() 980 be->vif->queues = vzalloc(requested_num_queues * in connect() 982 if (!be->vif->queues) { in connect() 992 queue = &be->vif->queues[queue_index]; in connect() 1056 xenvif_deinit_queue(&be->vif->queues[queue_index]); in connect() 1057 vfree(be->vif->queues); in connect() [all …]
|
/drivers/nvme/host/ |
D | rdma.c | 106 struct nvme_rdma_queue *queues; member 168 return queue - queue->ctrl->queues; in nvme_rdma_queue_idx() 301 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx]; in nvme_rdma_exit_request() 318 struct nvme_rdma_queue *queue = &ctrl->queues[queue_idx]; in nvme_rdma_init_request() 349 struct nvme_rdma_queue *queue = &ctrl->queues[hctx_idx + 1]; in nvme_rdma_init_hctx() 361 struct nvme_rdma_queue *queue = &ctrl->queues[0]; in nvme_rdma_init_admin_hctx() 513 queue = &ctrl->queues[idx]; in nvme_rdma_alloc_queue() 590 nvme_rdma_free_queue(&ctrl->queues[i]); in nvme_rdma_free_io_queues() 598 nvme_rdma_stop_queue(&ctrl->queues[i]); in nvme_rdma_stop_io_queues() 611 set_bit(NVME_RDMA_Q_LIVE, &ctrl->queues[idx].flags); in nvme_rdma_start_queue() [all …]
|
D | pci.c | 80 struct nvme_queue *queues; member 359 struct nvme_queue *nvmeq = &dev->queues[0]; in nvme_admin_init_hctx() 381 struct nvme_queue *nvmeq = &dev->queues[hctx_idx + 1]; in nvme_init_hctx() 397 struct nvme_queue *nvmeq = &dev->queues[queue_idx]; in nvme_init_request() 911 struct nvme_queue *nvmeq = &dev->queues[0]; in nvme_pci_submit_async_event() 1165 nvme_free_queue(&dev->queues[i]); in nvme_free_queues() 1197 struct nvme_queue *nvmeq = &dev->queues[0]; in nvme_disable_admin_queue() 1255 struct nvme_queue *nvmeq = &dev->queues[qid]; in nvme_alloc_queue() 1477 nvmeq = &dev->queues[0]; in nvme_pci_configure_admin_queue() 1516 ret = nvme_create_queue(&dev->queues[i], i); in nvme_create_io_queues() [all …]
|
D | fc.c | 148 struct nvme_fc_queue *queues; member 1521 struct nvme_fc_queue *queue = &ctrl->queues[queue_idx]; in nvme_fc_init_request() 1544 ret = __nvme_fc_init_request(ctrl, &ctrl->queues[0], in nvme_fc_init_aen_ops() 1586 struct nvme_fc_queue *queue = &ctrl->queues[qidx]; in __nvme_fc_init_hctx() 1619 queue = &ctrl->queues[idx]; in nvme_fc_init_queue() 1685 nvme_fc_free_queue(&ctrl->queues[i]); in nvme_fc_free_io_queues() 1705 struct nvme_fc_queue *queue = &ctrl->queues[ctrl->ctrl.queue_count - 1]; in nvme_fc_delete_hw_io_queues() 1715 struct nvme_fc_queue *queue = &ctrl->queues[1]; in nvme_fc_create_hw_io_queues() 1728 __nvme_fc_delete_hw_queue(ctrl, &ctrl->queues[i], i); in nvme_fc_create_hw_io_queues() 1738 ret = nvme_fc_connect_queue(ctrl, &ctrl->queues[i], qsize, in nvme_fc_connect_io_queues() [all …]
|
/drivers/net/ |
D | xen-netfront.c | 156 struct netfront_queue *queues; member 354 if (!np->queues) in xennet_open() 358 queue = &np->queues[i]; in xennet_open() 589 queue = &np->queues[queue_index]; in xennet_start_xmit() 720 queue = &np->queues[i]; in xennet_close() 1262 xennet_interrupt(0, &info->queues[i]); in xennet_poll_controller() 1304 np->queues = NULL; in xennet_create_dev() 1394 for (i = 0; i < num_queues && info->queues; ++i) { in xennet_disconnect_backend() 1395 struct netfront_queue *queue = &info->queues[i]; in xennet_disconnect_backend() 1749 struct netfront_queue *queue = &info->queues[i]; in xennet_destroy_queues() [all …]
|
/drivers/scsi/arm/ |
D | fas216.c | 209 info->stats.queues, info->stats.removes, info->stats.fins, in fas216_dumpinfo() 997 queue_add_cmd_tail(&info->queues.disconnected, info->SCpnt); in fas216_reselected_intr() 1003 info->SCpnt = queue_remove_tgtluntag(&info->queues.disconnected, in fas216_reselected_intr() 1928 SCpnt = queue_remove_exclude(&info->queues.issue, in fas216_kick() 1952 queue_add_cmd_tail(&info->queues.disconnected, info->SCpnt); in fas216_kick() 2218 info->stats.queues += 1; in fas216_queue_command_lck() 2227 result = !queue_add_cmd_ordered(&info->queues.issue, SCpnt); in fas216_queue_command_lck() 2355 if (queue_remove_cmd(&info->queues.issue, SCpnt)) { in fas216_find_command() 2365 } else if (queue_remove_cmd(&info->queues.disconnected, SCpnt)) { in fas216_find_command() 2499 queue_remove_all_target(&info->queues.issue, target); in fas216_eh_device_reset() [all …]
|
D | acornscsi.c | 709 SCpnt = queue_remove_exclude(&host->queues.issue, host->busyluns); in acornscsi_kick() 717 queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt); in acornscsi_kick() 1798 if (!ok && queue_probetgtlun(&host->queues.disconnected, target, lun)) in acornscsi_reconnect() 1813 queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt); in acornscsi_reconnect() 1840 queue_add_cmd_tail(&host->queues.disconnected, host->SCpnt); in acornscsi_reconnect_finish() 1850 host->SCpnt = queue_remove_tgtluntag(&host->queues.disconnected, in acornscsi_reconnect_finish() 2502 host->stats.queues += 1; in acornscsi_queuecmd_lck() 2507 if (!queue_add_cmd_ordered(&host->queues.issue, SCpnt)) { in acornscsi_queuecmd_lck() 2559 if (queue_remove_cmd(&host->queues.issue, SCpnt)) { in acornscsi_do_abort() 2570 } else if (queue_remove_cmd(&host->queues.disconnected, SCpnt)) { in acornscsi_do_abort() [all …]
|
D | acornscsi.h | 305 unsigned int queues; member 320 } queues; member
|
D | fas216.h | 258 unsigned int queues; member 286 } queues; member
|
/drivers/media/platform/qcom/venus/ |
D | hfi_venus.c | 141 struct iface_queue queues[IFACEQ_NUM]; member 403 queue = &hdev->queues[IFACEQ_CMD_IDX]; in venus_iface_cmdq_write_nolock() 609 queue = &hdev->queues[IFACEQ_MSG_IDX]; in venus_iface_msgq_read_nolock() 643 queue = &hdev->queues[IFACEQ_DBG_IDX]; in venus_iface_dbgq_read_nolock() 692 memset(hdev->queues, 0, sizeof(hdev->queues)); in venus_interface_queues_release() 717 queue = &hdev->queues[i]; in venus_interface_queues_init() 749 queue = &hdev->queues[IFACEQ_DBG_IDX]; in venus_interface_queues_init() 848 qhdr = hdev->queues[index].qhdr; in venus_get_queue_size()
|
/drivers/s390/crypto/ |
D | ap_bus.h | 158 struct list_head queues; /* List of assoc. AP queues */ member 225 list_for_each_entry(_aq, &(_ac)->queues, list)
|
/drivers/net/ethernet/cadence/ |
D | macb_main.c | 719 (unsigned int)(queue - bp->queues), in macb_tx_error_task() 821 u16 queue_index = queue - bp->queues; in macb_tx_interrupt() 1302 (unsigned int)(queue - bp->queues), in macb_interrupt() 1399 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) in macb_poll_controller() 1632 struct macb_queue *queue = &bp->queues[queue_index]; in macb_start_xmit() 1790 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_free_consistent() 1838 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in macb_alloc_consistent() 1882 for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { in gem_init_rings() 1907 desc = macb_tx_desc(&bp->queues[0], i); in macb_init_rings() 1911 bp->queues[0].tx_head = 0; in macb_init_rings() [all …]
|
/drivers/staging/fsl-mc/ |
D | README.txt | 33 The MC uses DPAA2 hardware resources such as queues, buffer pools, and 64 | -queues -DPRC | 184 -DPNI (Datapath Network Interface): contains TX/RX queues, 186 mechanisms. The TX/RX queues are in memory and are identified by 195 architecture separates the mechanism to access queues (the DPIO object) 196 from the queues themselves. The DPIO provides an MMIO interface to
|