Home
last modified time | relevance | path

Searched refs:nent (Results 1 – 25 of 34) sorted by relevance

12

/drivers/gpu/drm/nouveau/nvkm/engine/gr/
Dgk20a.c44 int nent; in gk20a_gr_av_to_init() local
52 nent = (blob.size / sizeof(struct gk20a_fw_av)); in gk20a_gr_av_to_init()
53 pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1))); in gk20a_gr_av_to_init()
62 for (i = 0; i < nent; i++) { in gk20a_gr_av_to_init()
94 int nent; in gk20a_gr_aiv_to_init() local
102 nent = (blob.size / sizeof(struct gk20a_fw_aiv)); in gk20a_gr_aiv_to_init()
103 pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1))); in gk20a_gr_aiv_to_init()
112 for (i = 0; i < nent; i++) { in gk20a_gr_aiv_to_init()
140 int nent; in gk20a_gr_av_to_method() local
148 nent = (blob.size / sizeof(struct gk20a_fw_av)); in gk20a_gr_av_to_method()
[all …]
/drivers/tty/vt/
Dconmakehash.c82 int i, nuni, nent; in main() local
272 nent = 0; in main()
275 while ( nent >= unicount[fp0] ) in main()
278 nent = 0; in main()
280 printf("0x%04x", unitable[fp0][nent++]); in main()
/drivers/infiniband/hw/qib/
Dqib_pcie.c204 int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent) in qib_pcie_params() argument
222 maxvec = (nent && *nent) ? *nent : 1; in qib_pcie_params()
232 if (nent) in qib_pcie_params()
233 *nent = !dd->pcidev->msix_enabled ? 0 : nvec; in qib_pcie_params()
/drivers/net/ethernet/mellanox/mlx5/core/
Deq.c263 u8 log_eq_size = order_base_2(param->nent + MLX5_NUM_SPARE_EQE); in create_map_eq()
635 .nent = MLX5_NUM_CMD_EQE, in create_async_eqs()
647 .nent = MLX5_NUM_ASYNC_EQE, in create_async_eqs()
656 .nent = /* TODO: sriov max_vf + */ 1, in create_async_eqs()
751 u32 nent = eq_get_size(eq); in mlx5_eq_get_eqe() local
754 eqe = get_eqe(eq, ci & (nent - 1)); in mlx5_eq_get_eqe()
755 eqe = ((eqe->owner & 1) ^ !!(ci & nent)) ? NULL : eqe; in mlx5_eq_get_eqe()
801 int nent; in create_comp_eqs() local
807 nent = MLX5_COMP_EQ_SIZE; in create_comp_eqs()
826 .nent = nent, in create_comp_eqs()
/drivers/infiniband/hw/mthca/
Dmthca_eq.c184 mthca_write64(MTHCA_EQ_DB_SET_CI | eq->eqn, ci & (eq->nent - 1), in tavor_set_eq_ci()
230 unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE; in get_eqe()
466 int nent, in mthca_create_eq() argument
479 eq->nent = roundup_pow_of_two(max(nent, 2)); in mthca_create_eq()
480 npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE; in mthca_create_eq()
511 for (i = 0; i < eq->nent; ++i) in mthca_create_eq()
535 eq_context->logsize_usrpage = cpu_to_be32((ffs(eq->nent) - 1) << 24); in mthca_create_eq()
560 eq->eqn, eq->nent); in mthca_create_eq()
593 int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / in mthca_free_eq()
Dmthca_allocator.c160 int mthca_array_init(struct mthca_array *array, int nent) in mthca_array_init() argument
162 int npage = (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE; in mthca_array_init()
178 void mthca_array_cleanup(struct mthca_array *array, int nent) in mthca_array_cleanup() argument
182 for (i = 0; i < (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE; ++i) in mthca_array_cleanup()
Dmthca_cq.c348 int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent) in mthca_alloc_cq_buf() argument
353 ret = mthca_buf_alloc(dev, nent * MTHCA_CQ_ENTRY_SIZE, in mthca_alloc_cq_buf()
360 for (i = 0; i < nent; ++i) in mthca_alloc_cq_buf()
768 int mthca_init_cq(struct mthca_dev *dev, int nent, in mthca_init_cq() argument
776 cq->ibcq.cqe = nent - 1; in mthca_init_cq()
814 err = mthca_alloc_cq_buf(dev, &cq->buf, nent); in mthca_init_cq()
828 cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24); in mthca_init_cq()
Dmthca_dev.h421 int mthca_array_init(struct mthca_array *array, int nent);
422 void mthca_array_cleanup(struct mthca_array *array, int nent);
487 int mthca_init_cq(struct mthca_dev *dev, int nent,
498 int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent);
Dmthca_cmd.c664 int nent = 0; in mthca_map_cmd() local
694 pages[nent * 2] = cpu_to_be64(virt); in mthca_map_cmd()
698 pages[nent * 2 + 1] = in mthca_map_cmd()
704 if (++nent == MTHCA_MAILBOX_SIZE / 16) { in mthca_map_cmd()
705 err = mthca_cmd(dev, mailbox->dma, nent, 0, op, in mthca_map_cmd()
709 nent = 0; in mthca_map_cmd()
714 if (nent) in mthca_map_cmd()
715 err = mthca_cmd(dev, mailbox->dma, nent, 0, op, in mthca_map_cmd()
Dmthca_provider.c591 int nent; in mthca_create_cq() local
627 for (nent = 1; nent <= entries; nent <<= 1) in mthca_create_cq()
630 err = mthca_init_cq(to_mdev(ibdev), nent, context, in mthca_create_cq()
Dmthca_provider.h95 int nent; member
/drivers/net/ethernet/mellanox/mlx4/
Deq.c110 unsigned long offset = (entry & (eq->nent - 1)) * eqe_size; in get_eqe()
124 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; in next_eqe_sw()
782 eq->cons_index, eqe->owner, eq->nent, in mlx4_eq_int()
785 !!(eq->cons_index & eq->nent) ? "HW" : "SW"); in mlx4_eq_int()
807 eq->cons_index, eqe->owner, eq->nent, in mlx4_eq_int()
809 !!(eq->cons_index & eq->nent) ? "HW" : "SW"); in mlx4_eq_int()
819 eq->cons_index, eqe->owner, eq->nent, in mlx4_eq_int()
822 !!(eq->cons_index & eq->nent) ? "HW" : "SW"); in mlx4_eq_int()
969 static int mlx4_create_eq(struct mlx4_dev *dev, int nent, in mlx4_create_eq() argument
983 eq->nent = roundup_pow_of_two(max(nent, 2)); in mlx4_create_eq()
[all …]
Dcq.c341 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, in mlx4_cq_alloc() argument
381 cpu_to_be32((ilog2(nent) << 24) | in mlx4_cq_alloc()
393 err = mlx4_init_user_cqes(buf_addr, nent, in mlx4_cq_alloc()
398 mlx4_init_kernel_cqes(buf_addr, nent, in mlx4_cq_alloc()
Dfw.c1525 int nent = 0; in mlx4_map_cmd() local
1555 pages[nent * 2] = cpu_to_be64(virt); in mlx4_map_cmd()
1559 pages[nent * 2 + 1] = in mlx4_map_cmd()
1565 if (++nent == MLX4_MAILBOX_SIZE / 16) { in mlx4_map_cmd()
1566 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, in mlx4_map_cmd()
1571 nent = 0; in mlx4_map_cmd()
1576 if (nent) in mlx4_map_cmd()
1577 err = mlx4_cmd(dev, mailbox->dma, nent, 0, op, in mlx4_map_cmd()
/drivers/tty/serial/
Dpch_uart.c232 int nent; member
762 for (i = 0; i < priv->nent; i++, sg++) { in pch_dma_tx_complete()
770 priv->nent = 0; in pch_dma_tx_complete()
912 int nent; in dma_handle_tx() local
990 nent = dma_map_sg(port->dev, sg, num, DMA_TO_DEVICE); in dma_handle_tx()
991 if (!nent) { in dma_handle_tx()
996 priv->nent = nent; in dma_handle_tx()
998 for (i = 0; i < nent; i++, sg++) { in dma_handle_tx()
1003 if (i == (nent - 1)) in dma_handle_tx()
1010 priv->sg_tx_p, nent, DMA_MEM_TO_DEV, in dma_handle_tx()
[all …]
Datmel_serial.c1022 int ret, nent; in atmel_prepare_tx_dma() local
1041 nent = dma_map_sg(port->dev, in atmel_prepare_tx_dma()
1046 if (!nent) { in atmel_prepare_tx_dma()
1197 int ret, nent; in atmel_prepare_rx_dma() local
1218 nent = dma_map_sg(port->dev, in atmel_prepare_rx_dma()
1223 if (!nent) { in atmel_prepare_rx_dma()
/drivers/iommu/
Domap-iommu.c704 int nent = 1; in iopgtable_clear_entry_core() local
718 nent *= 16; in iopgtable_clear_entry_core()
722 bytes *= nent; in iopgtable_clear_entry_core()
723 memset(iopte, 0, nent * sizeof(*iopte)); in iopgtable_clear_entry_core()
725 flush_iopte_range(obj->dev, pt_dma, pt_offset, nent); in iopgtable_clear_entry_core()
736 nent = 1; /* for the next L1 entry */ in iopgtable_clear_entry_core()
740 nent *= 16; in iopgtable_clear_entry_core()
744 bytes *= nent; in iopgtable_clear_entry_core()
746 memset(iopgd, 0, nent * sizeof(*iopgd)); in iopgtable_clear_entry_core()
747 flush_iopte_range(obj->dev, obj->pd_dma, pd_offset, nent); in iopgtable_clear_entry_core()
/drivers/infiniband/hw/mlx5/
Dcq.c74 static u8 sw_ownership_bit(int n, int nent) in sw_ownership_bit() argument
76 return (n & nent) ? 1 : 0; in sw_ownership_bit()
660 int nent, in alloc_cq_frag_buf() argument
669 nent * cqe_size, in alloc_cq_frag_buf()
678 buf->nent = nent; in alloc_cq_frag_buf()
862 for (i = 0; i < buf->nent; i++) { in init_cq_frag_buf()
1228 (i + 1) & cq->resize_buf->nent); in copy_resize_cqes()
1230 sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent); in copy_resize_cqes()
/drivers/net/ethernet/mellanox/mlxsw/
Dpci.c1317 int nent = 0; in mlxsw_pci_fw_area_init() local
1339 mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr); in mlxsw_pci_fw_area_init()
1340 mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */ in mlxsw_pci_fw_area_init()
1341 if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) { in mlxsw_pci_fw_area_init()
1342 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent); in mlxsw_pci_fw_area_init()
1345 nent = 0; in mlxsw_pci_fw_area_init()
1350 if (nent) { in mlxsw_pci_fw_area_init()
1351 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent); in mlxsw_pci_fw_area_init()
/drivers/mailbox/
Dbcm-pdc-mailbox.c1202 int nent; in pdc_send_data() local
1211 nent = dma_map_sg(dev, mssg->spu.src, src_nent, DMA_TO_DEVICE); in pdc_send_data()
1212 if (unlikely(nent == 0)) in pdc_send_data()
1218 nent = dma_map_sg(dev, mssg->spu.dst, dst_nent, in pdc_send_data()
1220 if (unlikely(nent == 0)) { in pdc_send_data()
/drivers/vdpa/mlx5/core/
Dmr.c44 for_each_sg(mr->sg_head.sgl, sg, mr->nent, i) { in populate_mtts()
276 mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0); in map_direct_mr()
277 if (!mr->nent) { in map_direct_mr()
Dmlx5_vdpa.h22 int nent; member
/drivers/rapidio/
Drio_cm.c329 static void riocm_rx_fill(struct cm_dev *cm, int nent) in riocm_rx_fill() argument
336 for (i = 0; i < RIOCM_RX_RING_SIZE && cm->rx_slots && nent; i++) { in riocm_rx_fill()
343 nent--; in riocm_rx_fill()
1566 u32 nent; in cm_ep_get_list() local
1587 nent = min(info[0], cm->npeers); in cm_ep_get_list()
1588 buf = kcalloc(nent + 2, sizeof(u32), GFP_KERNEL); in cm_ep_get_list()
1599 if (++i == nent) in cm_ep_get_list()
/drivers/spi/
Dspi-topcliff-pch.c116 int nent; member
796 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent, in pch_spi_start_transfer()
799 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent, in pch_spi_start_transfer()
1048 dma->nent = num; in pch_spi_handle_dma()
1110 dma->nent = num; in pch_spi_handle_dma()
/drivers/infiniband/hw/mlx4/
Dcq.c101 static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent) in mlx4_ib_alloc_cq_buf() argument
105 err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size, in mlx4_ib_alloc_cq_buf()
127 mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf); in mlx4_ib_alloc_cq_buf()

12