/drivers/gpu/drm/nouveau/nvkm/engine/gr/ |
D | gk20a.c | 44 int nent; in gk20a_gr_av_to_init() local 52 nent = (blob.size / sizeof(struct gk20a_fw_av)); in gk20a_gr_av_to_init() 53 pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1))); in gk20a_gr_av_to_init() 62 for (i = 0; i < nent; i++) { in gk20a_gr_av_to_init() 94 int nent; in gk20a_gr_aiv_to_init() local 102 nent = (blob.size / sizeof(struct gk20a_fw_aiv)); in gk20a_gr_aiv_to_init() 103 pack = vzalloc((sizeof(*pack) * 2) + (sizeof(*init) * (nent + 1))); in gk20a_gr_aiv_to_init() 112 for (i = 0; i < nent; i++) { in gk20a_gr_aiv_to_init() 140 int nent; in gk20a_gr_av_to_method() local 148 nent = (blob.size / sizeof(struct gk20a_fw_av)); in gk20a_gr_av_to_method() [all …]
|
/drivers/tty/vt/ |
D | conmakehash.c | 82 int i, nuni, nent; in main() local 272 nent = 0; in main() 275 while ( nent >= unicount[fp0] ) in main() 278 nent = 0; in main() 280 printf("0x%04x", unitable[fp0][nent++]); in main()
|
/drivers/net/ethernet/mellanox/mlx5/core/ |
D | eq.c | 272 for (i = 0; i < eq->nent; i++) { in init_eq_buf() 298 eq->nent = roundup_pow_of_two(param->nent + MLX5_NUM_SPARE_EQE); in create_map_eq() 300 err = mlx5_buf_alloc(dev, eq->nent * MLX5_EQE_SIZE, &eq->buf); in create_map_eq() 327 MLX5_SET(eqc, eqc, log_eq_size, ilog2(eq->nent)); in create_map_eq() 647 .nent = MLX5_NUM_CMD_EQE, in create_async_eqs() 660 .nent = MLX5_NUM_ASYNC_EQE, in create_async_eqs() 670 .nent = /* TODO: sriov max_vf + */ 1, in create_async_eqs() 764 eqe = get_eqe(eq, ci & (eq->nent - 1)); in mlx5_eq_get_eqe() 765 eqe = ((eqe->owner & 1) ^ !!(ci & eq->nent)) ? NULL : eqe; in mlx5_eq_get_eqe() 811 int nent; in create_comp_eqs() local [all …]
|
/drivers/net/ethernet/mellanox/mlx5/core/lib/ |
D | eq.h | 33 int nent; member 57 struct mlx5_eqe *eqe = get_eqe(eq, eq->cons_index & (eq->nent - 1)); in next_eqe_sw() 59 return ((eqe->owner & 1) ^ !!(eq->cons_index & eq->nent)) ? NULL : eqe; in next_eqe_sw()
|
/drivers/infiniband/hw/qib/ |
D | qib_pcie.c | 211 int qib_pcie_params(struct qib_devdata *dd, u32 minw, u32 *nent) in qib_pcie_params() argument 229 maxvec = (nent && *nent) ? *nent : 1; in qib_pcie_params() 239 if (nent) in qib_pcie_params() 240 *nent = !dd->pcidev->msix_enabled ? 0 : nvec; in qib_pcie_params()
|
/drivers/infiniband/hw/mthca/ |
D | mthca_eq.c | 184 mthca_write64(MTHCA_EQ_DB_SET_CI | eq->eqn, ci & (eq->nent - 1), in tavor_set_eq_ci() 230 unsigned long off = (entry & (eq->nent - 1)) * MTHCA_EQ_ENTRY_SIZE; in get_eqe() 466 int nent, in mthca_create_eq() argument 479 eq->nent = roundup_pow_of_two(max(nent, 2)); in mthca_create_eq() 480 npages = ALIGN(eq->nent * MTHCA_EQ_ENTRY_SIZE, PAGE_SIZE) / PAGE_SIZE; in mthca_create_eq() 511 for (i = 0; i < eq->nent; ++i) in mthca_create_eq() 535 eq_context->logsize_usrpage = cpu_to_be32((ffs(eq->nent) - 1) << 24); in mthca_create_eq() 560 eq->eqn, eq->nent); in mthca_create_eq() 593 int npages = (eq->nent * MTHCA_EQ_ENTRY_SIZE + PAGE_SIZE - 1) / in mthca_free_eq()
|
D | mthca_allocator.c | 160 int mthca_array_init(struct mthca_array *array, int nent) in mthca_array_init() argument 162 int npage = (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE; in mthca_array_init() 178 void mthca_array_cleanup(struct mthca_array *array, int nent) in mthca_array_cleanup() argument 182 for (i = 0; i < (nent * sizeof (void *) + PAGE_SIZE - 1) / PAGE_SIZE; ++i) in mthca_array_cleanup()
|
D | mthca_cq.c | 348 int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent) in mthca_alloc_cq_buf() argument 353 ret = mthca_buf_alloc(dev, nent * MTHCA_CQ_ENTRY_SIZE, in mthca_alloc_cq_buf() 360 for (i = 0; i < nent; ++i) in mthca_alloc_cq_buf() 768 int mthca_init_cq(struct mthca_dev *dev, int nent, in mthca_init_cq() argument 776 cq->ibcq.cqe = nent - 1; in mthca_init_cq() 814 err = mthca_alloc_cq_buf(dev, &cq->buf, nent); in mthca_init_cq() 828 cq_context->logsize_usrpage = cpu_to_be32((ffs(nent) - 1) << 24); in mthca_init_cq()
|
D | mthca_dev.h | 421 int mthca_array_init(struct mthca_array *array, int nent); 422 void mthca_array_cleanup(struct mthca_array *array, int nent); 487 int mthca_init_cq(struct mthca_dev *dev, int nent, 498 int mthca_alloc_cq_buf(struct mthca_dev *dev, struct mthca_cq_buf *buf, int nent);
|
D | mthca_cmd.c | 664 int nent = 0; in mthca_map_cmd() local 694 pages[nent * 2] = cpu_to_be64(virt); in mthca_map_cmd() 698 pages[nent * 2 + 1] = in mthca_map_cmd() 704 if (++nent == MTHCA_MAILBOX_SIZE / 16) { in mthca_map_cmd() 705 err = mthca_cmd(dev, mailbox->dma, nent, 0, op, in mthca_map_cmd() 709 nent = 0; in mthca_map_cmd() 714 if (nent) in mthca_map_cmd() 715 err = mthca_cmd(dev, mailbox->dma, nent, 0, op, in mthca_map_cmd()
|
D | mthca_provider.c | 609 int nent; in mthca_create_cq() local 645 for (nent = 1; nent <= entries; nent <<= 1) in mthca_create_cq() 648 err = mthca_init_cq(to_mdev(ibdev), nent, context, in mthca_create_cq()
|
D | mthca_provider.h | 95 int nent; member
|
/drivers/net/ethernet/mellanox/mlx4/ |
D | eq.c | 110 unsigned long offset = (entry & (eq->nent - 1)) * eqe_size; in get_eqe() 124 return !!(eqe->owner & 0x80) ^ !!(eq->cons_index & eq->nent) ? NULL : eqe; in next_eqe_sw() 782 eq->cons_index, eqe->owner, eq->nent, in mlx4_eq_int() 785 !!(eq->cons_index & eq->nent) ? "HW" : "SW"); in mlx4_eq_int() 807 eq->cons_index, eqe->owner, eq->nent, in mlx4_eq_int() 809 !!(eq->cons_index & eq->nent) ? "HW" : "SW"); in mlx4_eq_int() 819 eq->cons_index, eqe->owner, eq->nent, in mlx4_eq_int() 822 !!(eq->cons_index & eq->nent) ? "HW" : "SW"); in mlx4_eq_int() 969 static int mlx4_create_eq(struct mlx4_dev *dev, int nent, in mlx4_create_eq() argument 983 eq->nent = roundup_pow_of_two(max(nent, 2)); in mlx4_create_eq() [all …]
|
D | cq.c | 341 int mlx4_cq_alloc(struct mlx4_dev *dev, int nent, in mlx4_cq_alloc() argument 381 cpu_to_be32((ilog2(nent) << 24) | in mlx4_cq_alloc() 393 err = mlx4_init_user_cqes(buf_addr, nent, in mlx4_cq_alloc() 398 mlx4_init_kernel_cqes(buf_addr, nent, in mlx4_cq_alloc()
|
/drivers/tty/serial/ |
D | pch_uart.c | 232 int nent; member 766 for (i = 0; i < priv->nent; i++, sg++) { in pch_dma_tx_complete() 774 priv->nent = 0; in pch_dma_tx_complete() 916 int nent; in dma_handle_tx() local 994 nent = dma_map_sg(port->dev, sg, num, DMA_TO_DEVICE); in dma_handle_tx() 995 if (!nent) { in dma_handle_tx() 1000 priv->nent = nent; in dma_handle_tx() 1002 for (i = 0; i < nent; i++, sg++) { in dma_handle_tx() 1007 if (i == (nent - 1)) in dma_handle_tx() 1014 priv->sg_tx_p, nent, DMA_MEM_TO_DEV, in dma_handle_tx() [all …]
|
/drivers/iommu/ |
D | omap-iommu.c | 705 int nent = 1; in iopgtable_clear_entry_core() local 719 nent *= 16; in iopgtable_clear_entry_core() 723 bytes *= nent; in iopgtable_clear_entry_core() 724 memset(iopte, 0, nent * sizeof(*iopte)); in iopgtable_clear_entry_core() 726 flush_iopte_range(obj->dev, pt_dma, pt_offset, nent); in iopgtable_clear_entry_core() 737 nent = 1; /* for the next L1 entry */ in iopgtable_clear_entry_core() 741 nent *= 16; in iopgtable_clear_entry_core() 745 bytes *= nent; in iopgtable_clear_entry_core() 747 memset(iopgd, 0, nent * sizeof(*iopgd)); in iopgtable_clear_entry_core() 748 flush_iopte_range(obj->dev, obj->pd_dma, pd_offset, nent); in iopgtable_clear_entry_core()
|
/drivers/infiniband/hw/mlx5/ |
D | cq.c | 74 static u8 sw_ownership_bit(int n, int nent) in sw_ownership_bit() argument 76 return (n & nent) ? 1 : 0; in sw_ownership_bit() 660 int nent, in alloc_cq_frag_buf() argument 669 nent * cqe_size, in alloc_cq_frag_buf() 678 buf->nent = nent; in alloc_cq_frag_buf() 847 for (i = 0; i < buf->nent; i++) { in init_cq_frag_buf() 1217 (i + 1) & cq->resize_buf->nent); in copy_resize_cqes() 1219 sw_own = sw_ownership_bit(i + 1, cq->resize_buf->nent); in copy_resize_cqes()
|
/drivers/net/ethernet/mellanox/mlxsw/ |
D | pci.c | 1233 int nent = 0; in mlxsw_pci_fw_area_init() local 1255 mlxsw_cmd_mbox_map_fa_pa_set(mbox, nent, mem_item->mapaddr); in mlxsw_pci_fw_area_init() 1256 mlxsw_cmd_mbox_map_fa_log2size_set(mbox, nent, 0); /* 1 page */ in mlxsw_pci_fw_area_init() 1257 if (++nent == MLXSW_CMD_MAP_FA_VPM_ENTRIES_MAX) { in mlxsw_pci_fw_area_init() 1258 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent); in mlxsw_pci_fw_area_init() 1261 nent = 0; in mlxsw_pci_fw_area_init() 1266 if (nent) { in mlxsw_pci_fw_area_init() 1267 err = mlxsw_cmd_map_fa(mlxsw_pci->core, mbox, nent); in mlxsw_pci_fw_area_init()
|
/drivers/vdpa/mlx5/core/ |
D | mr.c | 43 for_each_sg(mr->sg_head.sgl, sg, mr->nent, i) { in populate_mtts() 275 mr->nent = dma_map_sg_attrs(dma, mr->sg_head.sgl, mr->nsg, DMA_BIDIRECTIONAL, 0); in map_direct_mr() 276 if (!mr->nent) { in map_direct_mr()
|
D | mlx5_vdpa.h | 22 int nent; member
|
/drivers/mailbox/ |
D | bcm-pdc-mailbox.c | 1202 int nent; in pdc_send_data() local 1211 nent = dma_map_sg(dev, mssg->spu.src, src_nent, DMA_TO_DEVICE); in pdc_send_data() 1212 if (unlikely(nent == 0)) in pdc_send_data() 1218 nent = dma_map_sg(dev, mssg->spu.dst, dst_nent, in pdc_send_data() 1220 if (unlikely(nent == 0)) { in pdc_send_data()
|
/drivers/rapidio/ |
D | rio_cm.c | 329 static void riocm_rx_fill(struct cm_dev *cm, int nent) in riocm_rx_fill() argument 336 for (i = 0; i < RIOCM_RX_RING_SIZE && cm->rx_slots && nent; i++) { in riocm_rx_fill() 343 nent--; in riocm_rx_fill() 1566 u32 nent; in cm_ep_get_list() local 1587 nent = min(info[0], cm->npeers); in cm_ep_get_list() 1588 buf = kcalloc(nent + 2, sizeof(u32), GFP_KERNEL); in cm_ep_get_list() 1599 if (++i == nent) in cm_ep_get_list()
|
/drivers/spi/ |
D | spi-topcliff-pch.c | 116 int nent; member 796 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_rx_p, dma->nent, in pch_spi_start_transfer() 799 dma_sync_sg_for_cpu(&data->master->dev, dma->sg_tx_p, dma->nent, in pch_spi_start_transfer() 1048 dma->nent = num; in pch_spi_handle_dma() 1110 dma->nent = num; in pch_spi_handle_dma()
|
/drivers/infiniband/hw/mlx4/ |
D | cq.c | 101 static int mlx4_ib_alloc_cq_buf(struct mlx4_ib_dev *dev, struct mlx4_ib_cq_buf *buf, int nent) in mlx4_ib_alloc_cq_buf() argument 105 err = mlx4_buf_alloc(dev->dev, nent * dev->dev->caps.cqe_size, in mlx4_ib_alloc_cq_buf() 127 mlx4_buf_free(dev->dev, nent * buf->entry_size, &buf->buf); in mlx4_ib_alloc_cq_buf()
|
/drivers/vdpa/mlx5/net/ |
D | mlx5_vnet.c | 50 int nent; member 261 static int cq_frag_buf_alloc(struct mlx5_vdpa_net *ndev, struct mlx5_vdpa_cq_buf *buf, int nent) in cq_frag_buf_alloc() argument 268 err = mlx5_frag_buf_alloc_node(ndev->mvdev.mdev, nent * MLX5_VDPA_CQE_SIZE, frag_buf, in cq_frag_buf_alloc() 276 buf->nent = nent; in cq_frag_buf_alloc() 305 for (i = 0; i < buf->nent; i++) { in cq_frag_buf_init()
|