/drivers/gpu/drm/amd/amdkfd/ |
D | kfd_kernel_queue.c | 41 enum kfd_queue_type type, unsigned int queue_size) in kq_initialize() argument 51 queue_size); in kq_initialize() 84 retval = kfd_gtt_sa_allocate(dev, queue_size, &kq->pq); in kq_initialize() 86 pr_err("Failed to init pq queues size %d\n", queue_size); in kq_initialize() 123 memset(kq->pq_kernel_addr, 0, queue_size); in kq_initialize() 127 prop.queue_size = queue_size; in kq_initialize() 246 queue_size_dwords = kq->queue->properties.queue_size / 4; in kq_acquire_packet_buffer() 317 (kq->queue->properties.queue_size / 4); in kq_rollback_packet()
|
D | kfd_queue.c | 34 pr_debug("Queue Size: %llu\n", q->queue_size); in print_queue_properties() 51 pr_debug("Queue Size: %llu\n", q->properties.queue_size); in print_queue()
|
D | kfd_mqd_manager_cik.c | 171 uint32_t wptr_mask = (uint32_t)((p->queue_size / 4) - 1); in load_mqd() 205 m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1; in __update_mqd() 248 m->sdma_rlc_rb_cntl = order_base_2(q->queue_size / 4) in update_mqd_sdma() 335 m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1; in update_mqd_hiq()
|
D | kfd_mqd_manager_vi.c | 162 uint32_t wptr_mask = (uint32_t)((p->queue_size / 4) - 1); in load_mqd() 180 m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1; in __update_mqd() 357 m->sdmax_rlcx_rb_cntl = order_base_2(q->queue_size / 4) in update_mqd_sdma()
|
/drivers/net/ethernet/microsoft/mana/ |
D | gdma_main.c | 169 req.queue_size = queue->queue_size; in mana_gd_create_hw_eq() 278 u32 num_cqe = cq->queue_size / GDMA_CQE_SIZE; in mana_gd_ring_cq() 288 u32 head = eq->head % (eq->queue_size / GDMA_EQE_SIZE); in mana_gd_process_eqe() 351 num_eqe = eq->queue_size / GDMA_EQE_SIZE; in mana_gd_process_eq_events() 537 log2_num_entries = ilog2(queue->queue_size / GDMA_EQE_SIZE); in mana_gd_create_eq() 576 u32 log2_num_entries = ilog2(spec->queue_size / GDMA_CQE_SIZE); in mana_gd_create_cq() 612 err = mana_gd_alloc_memory(gc, spec->queue_size, gmi); in mana_gd_create_hwc_queue() 619 queue->queue_size = spec->queue_size; in mana_gd_create_hwc_queue() 733 err = mana_gd_alloc_memory(gc, spec->queue_size, gmi); in mana_gd_create_mana_eq() 744 queue->queue_size = spec->queue_size; in mana_gd_create_mana_eq() [all …]
|
D | hw_channel.c | 231 enum gdma_queue_type type, u64 queue_size, in mana_hwc_create_gdma_wq() argument 241 spec.queue_size = queue_size; in mana_hwc_create_gdma_wq() 247 u64 queue_size, in mana_hwc_create_gdma_cq() argument 256 spec.queue_size = queue_size; in mana_hwc_create_gdma_cq() 265 u64 queue_size, in mana_hwc_create_gdma_eq() argument 273 spec.queue_size = queue_size; in mana_hwc_create_gdma_eq() 468 u32 queue_size; in mana_hwc_create_wq() local 474 queue_size = roundup_pow_of_two(GDMA_MAX_RQE_SIZE * q_depth); in mana_hwc_create_wq() 476 queue_size = roundup_pow_of_two(GDMA_MAX_SQE_SIZE * q_depth); in mana_hwc_create_wq() 478 if (queue_size < MINIMUM_SUPPORTED_PAGE_SIZE) in mana_hwc_create_wq() [all …]
|
D | mana_en.c | 635 req.wq_size = wq_spec->queue_size; in mana_create_wq_obj() 636 req.cq_size = cq_spec->queue_size; in mana_create_wq_obj() 735 spec.queue_size = EQ_SIZE; in mana_create_eq() 1215 spec.queue_size = txq_size; in mana_create_txq() 1229 spec.queue_size = cq_size; in mana_create_txq() 1241 wq_spec.queue_size = txq->gdma_sq->queue_size; in mana_create_txq() 1244 cq_spec.queue_size = cq->gdma_cq->queue_size; in mana_create_txq() 1440 spec.queue_size = rq_size; in mana_create_rxq() 1453 spec.queue_size = cq_size; in mana_create_rxq() 1464 wq_spec.queue_size = rxq->gdma_rq->queue_size; in mana_create_rxq() [all …]
|
D | gdma.h | 274 u32 queue_size; member 306 unsigned int queue_size; member 586 u32 queue_size; member
|
/drivers/firmware/tegra/ |
D | ivc.c | 549 unsigned tegra_ivc_total_queue_size(unsigned queue_size) in tegra_ivc_total_queue_size() argument 551 if (!IS_ALIGNED(queue_size, TEGRA_IVC_ALIGN)) { in tegra_ivc_total_queue_size() 553 __func__, queue_size, TEGRA_IVC_ALIGN); in tegra_ivc_total_queue_size() 557 return queue_size + sizeof(struct tegra_ivc_header); in tegra_ivc_total_queue_size() 618 size_t queue_size; in tegra_ivc_init() local 636 queue_size = tegra_ivc_total_queue_size(num_frames * frame_size); in tegra_ivc_init() 639 ivc->rx.phys = dma_map_single(peer, rx, queue_size, in tegra_ivc_init() 644 ivc->tx.phys = dma_map_single(peer, tx, queue_size, in tegra_ivc_init() 647 dma_unmap_single(peer, ivc->rx.phys, queue_size, in tegra_ivc_init()
|
D | bpmp-tegra186.c | 111 size_t message_size, queue_size; in tegra186_bpmp_channel_init() local 121 queue_size = tegra_ivc_total_queue_size(message_size); in tegra186_bpmp_channel_init() 122 offset = queue_size * index; in tegra186_bpmp_channel_init()
|
/drivers/net/wireless/intel/iwlwifi/pcie/ |
D | rx.c | 122 WARN_ON(rxq->queue_size & (rxq->queue_size - 1)); in iwl_rxq_space() 130 return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1); in iwl_rxq_space() 269 rxq->write = (rxq->write + 1) & (rxq->queue_size - 1); in iwl_pcie_rxmq_restock() 682 free_size * rxq->queue_size, in iwl_pcie_free_rxq_dma() 693 rxq->queue_size, in iwl_pcie_free_rxq_dma() 713 rxq->queue_size = trans->cfg->num_rbds; in iwl_pcie_alloc_rxq_dma() 715 rxq->queue_size = RX_QUEUE_SIZE; in iwl_pcie_alloc_rxq_dma() 723 rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size, in iwl_pcie_alloc_rxq_dma() 731 rxq->queue_size, in iwl_pcie_alloc_rxq_dma() 1074 int i, err, queue_size, allocator_pool_size, num_alloc; in _iwl_pcie_rx_init() local [all …]
|
D | trans-gen2.c | 250 int queue_size = max_t(u32, IWL_CMD_QUEUE_SIZE, in iwl_pcie_gen2_nic_init() local 265 if (iwl_txq_gen2_init(trans, trans->txqs.cmd.q_id, queue_size)) in iwl_pcie_gen2_nic_init()
|
/drivers/usb/host/ |
D | u132-hcd.c | 162 u16 queue_size; member 514 if (ENDP_QUEUE_SIZE > --endp->queue_size) { in u132_hcd_giveback_urb() 553 if (ENDP_QUEUE_SIZE > --endp->queue_size) { in u132_hcd_abandon_urb() 1912 endp->queue_size = 1; in create_endpoint_and_queue_int() 1929 if (endp->queue_size++ < ENDP_QUEUE_SIZE) { in queue_int_on_old_endpoint() 1935 endp->queue_size -= 1; in queue_int_on_old_endpoint() 2011 endp->queue_size = 1; in create_endpoint_and_queue_bulk() 2026 if (endp->queue_size++ < ENDP_QUEUE_SIZE) { in queue_bulk_on_old_endpoint() 2032 endp->queue_size -= 1; in queue_bulk_on_old_endpoint() 2100 endp->queue_size = 1; in create_endpoint_and_queue_control() [all …]
|
/drivers/nvme/host/ |
D | rdma.c | 87 int queue_size; member 269 init_attr.cap.max_send_wr = factor * queue->queue_size + 1; in nvme_rdma_create_qp() 271 init_attr.cap.max_recv_wr = queue->queue_size + 1; in nvme_rdma_create_qp() 447 nvme_rdma_free_ring(ibdev, queue->rsp_ring, queue->queue_size, in nvme_rdma_destroy_queue_ib() 512 queue->cq_size = cq_factor * queue->queue_size + 1; in nvme_rdma_create_queue_ib() 522 queue->rsp_ring = nvme_rdma_alloc_ring(ibdev, queue->queue_size, in nvme_rdma_create_queue_ib() 536 queue->queue_size, in nvme_rdma_create_queue_ib() 542 queue->queue_size, nvme_rdma_queue_idx(queue)); in nvme_rdma_create_queue_ib() 548 queue->queue_size, IB_MR_TYPE_INTEGRITY, in nvme_rdma_create_queue_ib() 553 queue->queue_size, nvme_rdma_queue_idx(queue)); in nvme_rdma_create_queue_ib() [all …]
|
D | fabrics.h | 115 size_t queue_size; member
|
/drivers/misc/genwqe/ |
D | card_ddcb.c | 1026 unsigned int queue_size; in setup_ddcb_queue() local 1032 queue_size = roundup(GENWQE_DDCB_MAX * sizeof(struct ddcb), PAGE_SIZE); in setup_ddcb_queue() 1042 queue->ddcb_vaddr = __genwqe_alloc_consistent(cd, queue_size, in setup_ddcb_queue() 1090 __genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr, in setup_ddcb_queue() 1105 unsigned int queue_size; in free_ddcb_queue() local 1107 queue_size = roundup(queue->ddcb_max * sizeof(struct ddcb), PAGE_SIZE); in free_ddcb_queue() 1113 __genwqe_free_consistent(cd, queue_size, queue->ddcb_vaddr, in free_ddcb_queue()
|
/drivers/gpu/drm/amd/amdgpu/ |
D | amdgpu_amdkfd_gfx_v10.c | 267 uint32_t queue_size = in kgd_hqd_load() local 270 uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1); in kgd_hqd_load() 272 if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr) in kgd_hqd_load() 273 guessed_wptr += queue_size; in kgd_hqd_load() 274 guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1); in kgd_hqd_load()
|
D | amdgpu_amdkfd_gfx_v9.c | 281 uint32_t queue_size = in kgd_gfx_v9_hqd_load() local 284 uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1); in kgd_gfx_v9_hqd_load() 286 if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr) in kgd_gfx_v9_hqd_load() 287 guessed_wptr += queue_size; in kgd_gfx_v9_hqd_load() 288 guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1); in kgd_gfx_v9_hqd_load()
|
D | amdgpu_amdkfd_gfx_v10_3.c | 252 uint32_t queue_size = in hqd_load_v10_3() local 255 uint64_t guessed_wptr = m->cp_hqd_pq_rptr & (queue_size - 1); in hqd_load_v10_3() 257 if ((m->cp_hqd_pq_wptr_lo & (queue_size - 1)) < guessed_wptr) in hqd_load_v10_3() 258 guessed_wptr += queue_size; in hqd_load_v10_3() 259 guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1); in hqd_load_v10_3()
|
/drivers/platform/chrome/wilco_ec/ |
D | event.c | 103 static int queue_size = 64; variable 104 module_param(queue_size, int, 0644); 470 dev_data->events = event_queue_new(queue_size); in event_device_add()
|
/drivers/nvme/target/ |
D | loop.c | 529 ctrl->tag_set.queue_depth = ctrl->ctrl.opts->queue_size; in nvme_loop_create_io_queues() 608 ctrl->ctrl.sqsize = opts->queue_size - 1; in nvme_loop_create_ctrl() 621 if (opts->queue_size > ctrl->ctrl.maxcmd) { in nvme_loop_create_ctrl() 625 opts->queue_size, ctrl->ctrl.maxcmd); in nvme_loop_create_ctrl() 626 opts->queue_size = ctrl->ctrl.maxcmd; in nvme_loop_create_ctrl()
|
/drivers/dma/ |
D | fsl-qdma.c | 487 unsigned int queue_size[FSL_QDMA_QUEUE_MAX]; in fsl_qdma_alloc_queue_resources() local 501 queue_size, queue_num); in fsl_qdma_alloc_queue_resources() 508 if (queue_size[i] > FSL_QDMA_CIRCULAR_DESC_SIZE_MAX || in fsl_qdma_alloc_queue_resources() 509 queue_size[i] < FSL_QDMA_CIRCULAR_DESC_SIZE_MIN) { in fsl_qdma_alloc_queue_resources() 519 queue_size[i], in fsl_qdma_alloc_queue_resources() 526 queue_temp->n_cq = queue_size[i]; in fsl_qdma_alloc_queue_resources()
|
/drivers/i2c/busses/ |
D | i2c-qcom-cci.c | 122 u16 queue_size[NUM_QUEUES]; member 323 if (val == cci->data->queue_size[queue]) in cci_validate_queue() 699 .queue_size = { 64, 16 }, 733 .queue_size = { 64, 16 },
|
/drivers/virtio/ |
D | virtio_pci_modern_dev.c | 186 offsetof(struct virtio_pci_common_cfg, queue_size)); in check_offsets() 579 vp_iowrite16(size, &mdev->common->queue_size); in vp_modern_set_queue_size() 596 return vp_ioread16(&mdev->common->queue_size); in vp_modern_get_queue_size()
|
/drivers/misc/vmw_vmci/ |
D | vmci_queue_pair.c | 277 size_t queue_size = sizeof(*queue) + sizeof(*queue->kernel_if); in qp_alloc_queue() local 284 (SIZE_MAX - queue_size) / in qp_alloc_queue() 291 queue_size += pas_size + vas_size; in qp_alloc_queue() 293 queue = vmalloc(queue_size); in qp_alloc_queue() 531 const size_t queue_size = sizeof(*queue) + sizeof(*(queue->kernel_if)); in qp_host_alloc_queue() local 536 if (num_pages > (SIZE_MAX - queue_size) / in qp_host_alloc_queue() 542 if (queue_size + queue_page_size > KMALLOC_MAX_SIZE) in qp_host_alloc_queue() 545 queue = kzalloc(queue_size + queue_page_size, GFP_KERNEL); in qp_host_alloc_queue() 554 (struct page **)((u8 *)queue + queue_size); in qp_host_alloc_queue() 566 static void qp_host_free_queue(struct vmci_queue *queue, u64 queue_size) in qp_host_free_queue() argument
|