Home
last modified time | relevance | path

Searched refs:nent (Results 1 – 25 of 34) sorted by relevance

12

/drivers/gpu/drm/nouveau/nvkm/engine/gr/
Dgk20a.c42 int nent; in gk20a_gr_av_to_init() local
50 nent = (fuc.size / sizeof(struct gk20a_fw_av)); in gk20a_gr_av_to_init()
51 pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1))); in gk20a_gr_av_to_init()
60 for (i = 0; i < nent; i++) { in gk20a_gr_av_to_init()
91 int nent; in gk20a_gr_aiv_to_init() local
99 nent = (fuc.size / sizeof(struct gk20a_fw_aiv)); in gk20a_gr_aiv_to_init()
100 pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1))); in gk20a_gr_aiv_to_init()
109 for (i = 0; i < nent; i++) { in gk20a_gr_aiv_to_init()
136 int nent; in gk20a_gr_av_to_method() local
144 nent = (fuc.size / sizeof(struct gk20a_fw_av)); in gk20a_gr_av_to_method()
[all …]
/drivers/net/ethernet/mellanox/mlx5/core/
Deq.c104 struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1)); in next_eqe_sw()
106 return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe; in next_eqe_sw()
334 for (i = 0; i < eq->nent; i++) { in init_eq_buf()
341 int nent, u64 mask, const char *name, struct mlx5_uar *uar) in mlx5_create_map_eq() argument
351 eq->nent = roundup_pow_of_two(nent + MLX5_NUM_SPARE_EQE); in mlx5_create_map_eq()
353 err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf); in mlx5_create_map_eq()
375 MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent)); in mlx5_create_map_eq()
/drivers/infiniband/hw/mthca/
Dmthca_eq.c184 mthca_write64(MTHCA_EQ_DB_SET_CI | eq->eqn, ci & (eq->nent - 1), in tavor_set_eq_ci()
230 unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE; in get_eqe()
466 int nent, in mthca_create_eq() argument
479 eq->nent = roundup_pow_of_two(max(nent, 2)); in mthca_create_eq()
480 npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE; in mthca_create_eq()
511 for (i = 0; i < eq->nent; ++i) in mthca_create_eq()
535 eq_context->logsize_usrpage = cpu_to_be32((ffs(eq->nent) - 1) << 24); in mthca_create_eq()
560 eq->eqn, eq->nent); in mthca_create_eq()
593 int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / in mthca_free_eq()
Dmthca_allocator.c160 int mthca_array_init(struct mthca_array *array, int nent) in mthca_array_init() argument
162 int npage = (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE; in mthca_array_init()
177 void mthca_array_cleanup(struct mthca_array *array, int nent) in mthca_array_cleanup() argument
181 for (i = 0; i < (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE; ++i) in mthca_array_cleanup()
Dmthca_cq.c353 int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent) in mthca_alloc_cq_buf() argument
358 ret = mthca_buf_alloc(dev, nent * MTHCA_CQ_ENTRY_SIZE, in mthca_alloc_cq_buf()
365 for (i = 0; i < nent; ++i) in mthca_alloc_cq_buf()
773 int mthca_init_cq(struct mthca_dev *dev, int nent, in mthca_init_cq() argument
781 cq->ibcq.cqe = nent - 1; in mthca_init_cq()
817 err = mthca_alloc_cq_buf(dev, &cq->buf, nent); in mthca_init_cq()
831 cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24); in mthca_init_cq()
Dmthca_dev.h422 int mthca_array_init(struct mthca_array *array, int nent);
423 void mthca_array_cleanup(struct mthca_array *array, int nent);
498 int mthca_init_cq(struct mthca_dev *dev, int nent,
509 int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent);
Dmthca_cmd.c662 int nent = 0; in mthca_map_cmd() local
692 pages[nent * 2] = cpu_to_be64(virt); in mthca_map_cmd()
696 pages[nent * 2 + 1] = in mthca_map_cmd()
702 if (++nent == MTHCA_MAILBOX_SIZE / 16) { in mthca_map_cmd()
703 err = mthca_cmd(dev, mailbox->dma, nent, 0, op, in mthca_map_cmd()
707 nent = 0; in mthca_map_cmd()
712 if (nent) in mthca_map_cmd()
713 err = mthca_cmd(dev, mailbox->dma, nent, 0, op, in mthca_map_cmd()
Dmthca_provider.h113 int nent; member
Dmthca_provider.c657 int nent; in mthca_create_cq() local
695 for (nent = 1; nent <= entries; nent <<= 1) in mthca_create_cq()
698 err = mthca_init_cq(to_mdev(ibdev), nent, in mthca_create_cq()
/drivers/infiniband/hw/hns/
Dhns_roce_cq.c83 static int hns_roce_cq_alloc(struct hns_roce_dev *hr_dev, int nent, in hns_roce_cq_alloc() argument
142 nent, vector); in hns_roce_cq_alloc()
243 struct hns_roce_cq_buf *buf, u32 nent) in hns_roce_ib_alloc_cq_buf() argument
247 ret = hns_roce_buf_alloc(hr_dev, nent * hr_dev->caps.cq_entry_sz, in hns_roce_ib_alloc_cq_buf()
267 hns_roce_buf_free(hr_dev, nent * hr_dev->caps.cq_entry_sz, in hns_roce_ib_alloc_cq_buf()
/drivers/net/ethernet/mellanox/mlx4/
Deq.c110 unsigned long offset = (entry & (eq->nent - 1)) * eqe_size; in get_eqe()
124 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; in next_eqe_sw()
780 eq->cons_index, eqe->owner, eq->nent, in mlx4_eq_int()
783 !!(eq->cons_index & eq->nent) ? "HW" : "SW"); in mlx4_eq_int()
805 eq->cons_index, eqe->owner, eq->nent, in mlx4_eq_int()
807 !!(eq->cons_index & eq->nent) ? "HW" : "SW"); in mlx4_eq_int()
817 eq->cons_index, eqe->owner, eq->nent, in mlx4_eq_int()
820 !!(eq->cons_index & eq->nent) ? "HW" : "SW"); in mlx4_eq_int()
974 static int mlx4_create_eq(struct mlx4_dev *dev, int nent, in mlx4_create_eq() argument
988 eq->nent = roundup_pow_of_two(max(nent, 2)); in mlx4_create_eq()
[all …]
Dcq.c285 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, in mlx4_cq_alloc() argument
324 cpu_to_be32((ilog2(nent) << 24) | in mlx4_cq_alloc()
Dfw.c1501 int nent = 0; in mlx4_map_cmd() local
1531 pages[nent * 2] = cpu_to_be64(virt); in mlx4_map_cmd()
1535 pages[nent * 2 + 1] = in mlx4_map_cmd()
1541 if (++nent == MLX4_MAILBOX_SIZE / 16) { in mlx4_map_cmd()
1542 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, in mlx4_map_cmd()
1547 nent = 0; in mlx4_map_cmd()
1552 if (nent) in mlx4_map_cmd()
1553 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, in mlx4_map_cmd()
/drivers/iommu/
Domap-iommu.c690 int nent = 1; in iopgtable_clear_entry_core() local
701 nent *= 16; in iopgtable_clear_entry_core()
705 bytes *= nent; in iopgtable_clear_entry_core()
706 memset(iopte, 0, nent * sizeof(*iopte)); in iopgtable_clear_entry_core()
707 flush_iopte_range(iopte, iopte + (nent - 1) * sizeof(*iopte)); in iopgtable_clear_entry_core()
718 nent = 1; /* for the next L1 entry */ in iopgtable_clear_entry_core()
722 nent *= 16; in iopgtable_clear_entry_core()
726 bytes *= nent; in iopgtable_clear_entry_core()
728 memset(iopgd, 0, nent * sizeof(*iopgd)); in iopgtable_clear_entry_core()
729 flush_iopgd_range(iopgd, iopgd + (nent - 1) * sizeof(*iopgd)); in iopgtable_clear_entry_core()
/drivers/tty/serial/
Dpch_uart.c254 int nent; member
803 for (i = 0; i < priv->nent; i++, sg++) { in pch_dma_tx_complete()
809 dma_unmap_sg(port->dev, sg, priv->nent, DMA_TO_DEVICE); in pch_dma_tx_complete()
811 priv->nent = 0; in pch_dma_tx_complete()
952 int nent; in dma_handle_tx() local
1031 nent = dma_map_sg(port->dev, sg, num, DMA_TO_DEVICE); in dma_handle_tx()
1032 if (!nent) { in dma_handle_tx()
1036 priv->nent = nent; in dma_handle_tx()
1038 for (i = 0; i < nent; i++, sg++) { in dma_handle_tx()
1043 if (i == (nent - 1)) in dma_handle_tx()
[all …]
Datmel_serial.c930 int ret, nent; in atmel_prepare_tx_dma() local
949 nent = dma_map_sg(port->dev, in atmel_prepare_tx_dma()
954 if (!nent) { in atmel_prepare_tx_dma()
1110 int ret, nent; in atmel_prepare_rx_dma() local
1131 nent = dma_map_sg(port->dev, in atmel_prepare_rx_dma()
1136 if (!nent) { in atmel_prepare_rx_dma()
/drivers/infiniband/hw/qib/
Dqib_pcie.c269 int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent, in qib_pcie_params() argument
284 if (nent && *nent && pos) { in qib_pcie_params()
285 qib_msix_setup(dd, pos, nent, entry); in qib_pcie_params()
/drivers/infiniband/hw/mlx5/
Dcq.c77 static u8 sw_ownership_bit(int n, int nent) in sw_ownership_bit() argument
79 return (n & nent) ? 1 : 0; in sw_ownership_bit()
728 int nent, int cqe_size) in alloc_cq_buf() argument
732 err = mlx5_buf_alloc(dev->mdev, nent * cqe_size, &buf->buf); in alloc_cq_buf()
737 buf->nent = nent; in alloc_cq_buf()
830 for (i = 0; i < buf->nent; i++) { in init_cq_buf()
1218 (i + 1) & (cq->resize_buf->nent), in copy_resize_cqes()
1221 sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent); in copy_resize_cqes()
/drivers/net/ethernet/mellanox/mlxsw/
Dpci.c1436 int nent = 0; in mlxsw_pci_fw_area_init() local
1458 mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr); in mlxsw_pci_fw_area_init()
1459 mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */ in mlxsw_pci_fw_area_init()
1460 if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) { in mlxsw_pci_fw_area_init()
1461 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent); in mlxsw_pci_fw_area_init()
1464 nent = 0; in mlxsw_pci_fw_area_init()
1469 if (nent) { in mlxsw_pci_fw_area_init()
1470 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent); in mlxsw_pci_fw_area_init()
/drivers/mailbox/
Dbcm-pdc-mailbox.c1143 int nent; in pdc_send_data() local
1150 nent = dma_map_sg(dev, mssg->spu.src, src_nent, DMA_TO_DEVICE); in pdc_send_data()
1151 if (nent == 0) in pdc_send_data()
1157 nent = dma_map_sg(dev, mssg->spu.dst, dst_nent, in pdc_send_data()
1159 if (nent == 0) { in pdc_send_data()
/drivers/infiniband/hw/hfi1/
Dpcie.c401 void request_msix(struct hfi1_devdata *dd, u32 *nent, in request_msix() argument
407 if (*nent && pos) { in request_msix()
408 msix_setup(dd, pos, nent, entry); in request_msix()
411 *nent = 0; in request_msix()
/drivers/rapidio/
Drio_cm.c338 static void riocm_rx_fill(struct cm_dev *cm, int nent) in riocm_rx_fill() argument
345 for (i = 0; i < RIOCM_RX_RING_SIZE && cm->rx_slots && nent; i++) { in riocm_rx_fill()
352 nent--; in riocm_rx_fill()
1573 u32 nent; in cm_ep_get_list() local
1594 nent = min(info[0], cm->npeers); in cm_ep_get_list()
1595 buf = kcalloc(nent + 2, sizeof(u32), GFP_KERNEL); in cm_ep_get_list()
1606 if (++i == nent) in cm_ep_get_list()
/drivers/nvme/target/
Drdma.c206 unsigned int nent; in nvmet_rdma_alloc_sgl() local
209 nent = DIV_ROUND_UP(length, PAGE_SIZE); in nvmet_rdma_alloc_sgl()
210 sg = kmalloc_array(nent, sizeof(struct scatterlist), GFP_KERNEL); in nvmet_rdma_alloc_sgl()
214 sg_init_table(sg, nent); in nvmet_rdma_alloc_sgl()
228 *nents = nent; in nvmet_rdma_alloc_sgl()
/drivers/spi/
Dspi-topcliff-pch.c125 int nent; member
799 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent, in pch_spi_start_transfer()
802 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent, in pch_spi_start_transfer()
1049 dma->nent = num; in pch_spi_handle_dma()
1108 dma->nent = num; in pch_spi_handle_dma()
/drivers/infiniband/hw/mlx4/
Dcq.c100 static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent) in mlx4_ib_alloc_cq_buf() argument
104 err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size, in mlx4_ib_alloc_cq_buf()
126 mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf); in mlx4_ib_alloc_cq_buf()

12