/drivers/vfio/ |
D | vfio_iommu_type1.c | 81 int prot; /* IOMMU_CACHE */ member 90 int prot; /* IOMMU_READ/WRITE */ member 115 static int put_pfn(unsigned long pfn, int prot); 250 ret = put_pfn(vpfn->pfn, dma->prot); in vfio_iova_put_vfio_pfn() 327 static int put_pfn(unsigned long pfn, int prot) in put_pfn() argument 331 if (prot & IOMMU_WRITE) in put_pfn() 340 int prot, unsigned long *pfn) in vaddr_get_pfn() argument 348 if (prot & IOMMU_WRITE) in vaddr_get_pfn() 408 ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, pfn_base); in vfio_pin_pages_remote() 421 put_pfn(*pfn_base, dma->prot); in vfio_pin_pages_remote() [all …]
|
/drivers/net/wireless/mediatek/mt7601u/ |
D | mac.c | 197 u32 prot[6]; in mt7601u_mac_set_protection() local 201 prot[0] = MT_PROT_NAV_SHORT | in mt7601u_mac_set_protection() 204 prot[1] = prot[0]; in mt7601u_mac_set_protection() 206 prot[1] |= MT_PROT_CTRL_CTS2SELF; in mt7601u_mac_set_protection() 208 prot[2] = prot[4] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_BW20; in mt7601u_mac_set_protection() 209 prot[3] = prot[5] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_ALL; in mt7601u_mac_set_protection() 212 prot[2] |= MT_PROT_RATE_CCK_11; in mt7601u_mac_set_protection() 213 prot[3] |= MT_PROT_RATE_CCK_11; in mt7601u_mac_set_protection() 214 prot[4] |= MT_PROT_RATE_CCK_11; in mt7601u_mac_set_protection() 215 prot[5] |= MT_PROT_RATE_CCK_11; in mt7601u_mac_set_protection() [all …]
|
/drivers/net/ethernet/mellanox/mlx4/ |
D | mcg.c | 158 u32 prot; in new_steering_entry() local 206 prot = be32_to_cpu(mgm->members_count) >> 30; in new_steering_entry() 221 mgm->members_count = cpu_to_be32(members_count | (prot << 30)); in new_steering_entry() 428 u32 prot; in add_promisc_qp() local 474 prot = be32_to_cpu(mgm->members_count) >> 30; in add_promisc_qp() 505 (prot << 30)); in add_promisc_qp() 695 u8 *gid, enum mlx4_protocol prot, in find_entry() argument 704 u8 op_mod = (prot == MLX4_PROT_ETH) ? in find_entry() 739 be32_to_cpu(mgm->members_count) >> 30 == prot) in find_entry() 1105 int block_mcast_loopback, enum mlx4_protocol prot, in mlx4_qp_attach_common() argument [all …]
|
/drivers/iommu/ |
D | io-pgtable-arm-v7s.c | 271 static arm_v7s_iopte arm_v7s_prot_to_pte(int prot, int lvl, in arm_v7s_prot_to_pte() argument 277 if (!(prot & IOMMU_MMIO)) in arm_v7s_prot_to_pte() 281 if (!(prot & IOMMU_PRIV)) in arm_v7s_prot_to_pte() 283 if (!(prot & IOMMU_WRITE)) in arm_v7s_prot_to_pte() 288 if ((prot & IOMMU_NOEXEC) && ap) in arm_v7s_prot_to_pte() 290 if (prot & IOMMU_MMIO) in arm_v7s_prot_to_pte() 292 else if (prot & IOMMU_CACHE) in arm_v7s_prot_to_pte() 307 int prot = IOMMU_READ; in arm_v7s_pte_to_prot() local 311 prot |= IOMMU_WRITE; in arm_v7s_pte_to_prot() 313 prot |= IOMMU_PRIV; in arm_v7s_pte_to_prot() [all …]
|
D | io-pgtable-arm.c | 276 phys_addr_t paddr, arm_lpae_iopte prot, in __arm_lpae_init_pte() argument 279 arm_lpae_iopte pte = prot; in __arm_lpae_init_pte() 297 arm_lpae_iopte prot, int lvl, in arm_lpae_init_pte() argument 319 __arm_lpae_init_pte(data, paddr, prot, lvl, ptep); in arm_lpae_init_pte() 356 phys_addr_t paddr, size_t size, arm_lpae_iopte prot, in __arm_lpae_map() argument 369 return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep); in __arm_lpae_map() 399 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep); in __arm_lpae_map() 403 int prot) in arm_lpae_prot_to_pte() argument 411 if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) in arm_lpae_prot_to_pte() 414 if (!(prot & IOMMU_PRIV)) in arm_lpae_prot_to_pte() [all …]
|
D | dma-iommu.c | 344 int prot = coherent ? IOMMU_CACHE : 0; in dma_info_to_prot() local 347 prot |= IOMMU_PRIV; in dma_info_to_prot() 351 return prot | IOMMU_READ | IOMMU_WRITE; in dma_info_to_prot() 353 return prot | IOMMU_READ; in dma_info_to_prot() 355 return prot | IOMMU_WRITE; in dma_info_to_prot() 527 unsigned long attrs, int prot, dma_addr_t *handle, in iommu_dma_alloc() argument 563 if (!(prot & IOMMU_CACHE)) { in iommu_dma_alloc() 575 if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot) in iommu_dma_alloc() 618 size_t size, int prot) in __iommu_dma_map() argument 634 if (iommu_map(domain, iova, phys - iova_off, size, prot)) { in __iommu_dma_map() [all …]
|
D | exynos-iommu.c | 126 #define mk_lv1ent_sect(pa, prot) ((pa >> PG_ENT_SHIFT) | LV1_PROT[prot] | 2) argument 128 #define mk_lv2ent_lpage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 1) argument 129 #define mk_lv2ent_spage(pa, prot) ((pa >> PG_ENT_SHIFT) | LV2_PROT[prot] | 2) argument 981 phys_addr_t paddr, int prot, short *pgcnt) in lv1set_section() argument 1000 update_pte(sent, mk_lv1ent_sect(paddr, prot)); in lv1set_section() 1018 int prot, short *pgcnt) in lv2set_page() argument 1024 update_pte(pent, mk_lv2ent_spage(paddr, prot)); in lv2set_page() 1040 *pent = mk_lv2ent_lpage(paddr, prot); in lv2set_page() 1079 int prot) in exynos_iommu_map() argument 1088 prot &= SYSMMU_SUPPORTED_PROT_BITS; in exynos_iommu_map() [all …]
|
D | omap-iommu.c | 540 static int iopgd_alloc_section(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) in iopgd_alloc_section() argument 551 *iopgd = (pa & IOSECTION_MASK) | prot | IOPGD_SECTION; in iopgd_alloc_section() 556 static int iopgd_alloc_super(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) in iopgd_alloc_super() argument 569 *(iopgd + i) = (pa & IOSUPER_MASK) | prot | IOPGD_SUPER; in iopgd_alloc_super() 574 static int iopte_alloc_page(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) in iopte_alloc_page() argument 584 *iopte = (pa & IOPAGE_MASK) | prot | IOPTE_SMALL; in iopte_alloc_page() 593 static int iopte_alloc_large(struct omap_iommu *obj, u32 da, u32 pa, u32 prot) in iopte_alloc_large() argument 611 *(iopte + i) = (pa & IOLARGE_MASK) | prot | IOPTE_LARGE; in iopte_alloc_large() 620 u32 prot; in iopgtable_store_entry_core() local 647 prot = get_iopte_attr(e); in iopgtable_store_entry_core() [all …]
|
D | fsl_pamu_domain.c | 116 sub_win_ptr[i].prot); in map_subwins() 142 0, wnd->prot); in map_win() 175 wnd->prot); in update_liodn() 189 0, wnd->prot); in update_liodn() 537 phys_addr_t paddr, u64 size, int prot) in fsl_pamu_window_enable() argument 546 if (prot & IOMMU_READ) in fsl_pamu_window_enable() 548 if (prot & IOMMU_WRITE) in fsl_pamu_window_enable() 590 wnd->prot = pamu_prot; in fsl_pamu_window_enable()
|
D | iommu.c | 230 new->prot, new->type); in iommu_insert_resv_region() 525 ret = iommu_map(domain, addr, addr, pg_size, entry->prot); in iommu_group_create_direct_mappings() 1507 phys_addr_t paddr, size_t size, int prot) in iommu_map() argument 1544 ret = domain->ops->map(domain, iova, paddr, pgsize, prot); in iommu_map() 1638 struct scatterlist *sg, unsigned int nents, int prot) in default_iommu_map_sg() argument 1662 ret = iommu_map(domain, iova + mapped, phys, s->length, prot); in default_iommu_map_sg() 1681 phys_addr_t paddr, u64 size, int prot) in iommu_domain_window_enable() argument 1687 prot); in iommu_domain_window_enable() 1834 size_t length, int prot, in iommu_alloc_resv_region() argument 1846 region->prot = prot; in iommu_alloc_resv_region()
|
/drivers/gpu/drm/ttm/ |
D | ttm_bo_util.c | 260 pgprot_t prot) in ttm_copy_io_ttm_page() argument 271 dst = kmap_atomic_prot(d, prot); in ttm_copy_io_ttm_page() 273 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) in ttm_copy_io_ttm_page() 274 dst = vmap(&d, 1, 0, prot); in ttm_copy_io_ttm_page() 286 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) in ttm_copy_io_ttm_page() 297 pgprot_t prot) in ttm_copy_ttm_io_page() argument 307 src = kmap_atomic_prot(s, prot); in ttm_copy_ttm_io_page() 309 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) in ttm_copy_ttm_io_page() 310 src = vmap(&s, 1, 0, prot); in ttm_copy_ttm_io_page() 322 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) in ttm_copy_ttm_io_page() [all …]
|
/drivers/gpu/drm/i915/ |
D | i915_mm.c | 35 pgprot_t prot; member 44 set_pte_at(r->mm, addr, pte, pte_mkspecial(pfn_pte(r->pfn, r->prot))); in remap_pfn() 74 r.prot = __pgprot((pgprot_val(iomap->prot) & _PAGE_CACHE_MASK) | in remap_io_mapping()
|
/drivers/isdn/hisax/ |
D | l3dss1.c | 55 retval = p->prot.dss1.last_invoke_id + 1; /* try new id */ in new_invoke_id() 56 while ((i) && (p->prot.dss1.invoke_used[retval >> 3] == 0xFF)) { in new_invoke_id() 57 p->prot.dss1.last_invoke_id = (retval & 0xF8) + 8; in new_invoke_id() 61 while (p->prot.dss1.invoke_used[retval >> 3] & (1 << (retval & 7))) in new_invoke_id() 65 p->prot.dss1.last_invoke_id = retval; in new_invoke_id() 66 p->prot.dss1.invoke_used[retval >> 3] |= (1 << (retval & 7)); in new_invoke_id() 78 p->prot.dss1.invoke_used[id >> 3] &= ~(1 << (id & 7)); in free_invoke_id() 92 proc->prot.dss1.invoke_id = 0; in dss1_new_l3_process() 93 proc->prot.dss1.remote_operation = 0; in dss1_new_l3_process() 94 proc->prot.dss1.uus1_data[0] = '\0'; in dss1_new_l3_process() [all …]
|
D | l3ni1.c | 54 retval = p->prot.ni1.last_invoke_id + 1; /* try new id */ in new_invoke_id() 55 while ((i) && (p->prot.ni1.invoke_used[retval >> 3] == 0xFF)) { in new_invoke_id() 56 p->prot.ni1.last_invoke_id = (retval & 0xF8) + 8; in new_invoke_id() 60 while (p->prot.ni1.invoke_used[retval >> 3] & (1 << (retval & 7))) in new_invoke_id() 64 p->prot.ni1.last_invoke_id = retval; in new_invoke_id() 65 p->prot.ni1.invoke_used[retval >> 3] |= (1 << (retval & 7)); in new_invoke_id() 77 p->prot.ni1.invoke_used[id >> 3] &= ~(1 << (id & 7)); in free_invoke_id() 91 proc->prot.ni1.invoke_id = 0; in ni1_new_l3_process() 92 proc->prot.ni1.remote_operation = 0; in ni1_new_l3_process() 93 proc->prot.ni1.uus1_data[0] = '\0'; in ni1_new_l3_process() [all …]
|
/drivers/misc/mic/scif/ |
D | scif_rma.c | 91 scif_create_pinned_pages(int nr_pages, int prot) in scif_create_pinned_pages() argument 104 pin->prot = prot; in scif_create_pinned_pages() 123 int writeable = pin->prot & SCIF_PROT_WRITE; in scif_destroy_pinned_pages() 936 remote_window->prot = window->prot; in scif_prep_remote_window() 1205 req.prot = 0; in scif_recv_unreg() 1333 int prot = *out_prot; in __scif_pin_pages() local 1343 if (prot & ~(SCIF_PROT_READ | SCIF_PROT_WRITE)) in __scif_pin_pages() 1357 pinned_pages = scif_create_pinned_pages(nr_pages, prot); in __scif_pin_pages() 1384 if (prot == SCIF_PROT_READ) in __scif_pin_pages() 1386 prot |= SCIF_PROT_WRITE; in __scif_pin_pages() [all …]
|
D | scif_rma_list.c | 115 if ((window->prot & req->prot) == req->prot) { in scif_query_tcw() 164 if ((window->prot & req->prot) != req->prot) in scif_query_window()
|
D | scif_mmap.c | 42 req.prot = recv_window->prot; in scif_recv_munmap() 246 req.prot = 0; in scif_get_pages() 285 (*pages)->prot_flags = window->prot; in scif_get_pages() 574 req.prot = vma->vm_flags & (VM_READ | VM_WRITE); in scif_munmap() 646 req.prot = vma->vm_flags & (VM_READ | VM_WRITE); in scif_mmap()
|
/drivers/infiniband/core/ |
D | rw.c | 388 ret = rdma_rw_init_one_mr(qp, port_num, &ctx->sig->prot, in rdma_rw_ctx_signature_init() 394 if (ctx->sig->prot.inv_wr.next) in rdma_rw_ctx_signature_init() 395 prev_wr->next = &ctx->sig->prot.inv_wr; in rdma_rw_ctx_signature_init() 397 prev_wr->next = &ctx->sig->prot.reg_wr.wr; in rdma_rw_ctx_signature_init() 398 prev_wr = &ctx->sig->prot.reg_wr.wr; in rdma_rw_ctx_signature_init() 400 ctx->sig->prot.mr = NULL; in rdma_rw_ctx_signature_init() 427 ctx->sig->sig_wr.prot = &ctx->sig->prot.sge; in rdma_rw_ctx_signature_init() 435 ctx->sig->sig_sge.length += ctx->sig->prot.sge.length; in rdma_rw_ctx_signature_init() 454 ib_mr_pool_put(qp, &qp->rdma_mrs, ctx->sig->prot.mr); in rdma_rw_ctx_signature_init() 504 if (ctx->sig->prot.mr) in rdma_rw_ctx_wrs() [all …]
|
/drivers/base/ |
D | dma-mapping.c | 252 size_t size, unsigned long vm_flags, pgprot_t prot, in __dma_common_pages_remap() argument 261 if (map_vm_area(area, prot, pages)) { in __dma_common_pages_remap() 274 unsigned long vm_flags, pgprot_t prot, in dma_common_pages_remap() argument 279 area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller); in dma_common_pages_remap() 295 pgprot_t prot, const void *caller) in dma_common_contiguous_remap() argument 308 area = __dma_common_pages_remap(pages, size, vm_flags, prot, caller); in dma_common_contiguous_remap()
|
/drivers/xen/ |
D | xlate_mmu.c | 67 pgprot_t prot; member 101 pte_t pte = pte_mkspecial(pfn_pte(page_to_pfn(page), info->prot)); in remap_pte_fn() 147 int *err_ptr, pgprot_t prot, in xen_xlate_remap_gfn_array() argument 161 data.prot = prot; in xen_xlate_remap_gfn_array()
|
/drivers/staging/android/ |
D | ashmem.c | 357 static inline vm_flags_t calc_vm_may_flags(unsigned long prot) in calc_vm_may_flags() argument 359 return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD) | in calc_vm_may_flags() 360 _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) | in calc_vm_may_flags() 361 _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC); in calc_vm_may_flags() 516 static int set_prot_mask(struct ashmem_area *asma, unsigned long prot) in set_prot_mask() argument 523 if (unlikely((asma->prot_mask & prot) != prot)) { in set_prot_mask() 529 if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) in set_prot_mask() 530 prot |= PROT_EXEC; in set_prot_mask() 532 asma->prot_mask = prot; in set_prot_mask()
|
/drivers/misc/ |
D | aspeed-lpc-ctrl.c | 45 pgprot_t prot = vma->vm_page_prot; in aspeed_lpc_ctrl_mmap() local 51 prot = pgprot_noncached(prot); in aspeed_lpc_ctrl_mmap() 55 vsize, prot)) in aspeed_lpc_ctrl_mmap()
|
/drivers/infiniband/ulp/iser/ |
D | iser_initiator.c | 65 struct iser_data_buf *pbuf_in = &iser_task->prot[ISER_DIR_IN]; in iser_prepare_read_cmd() 119 struct iser_data_buf *pbuf_out = &iser_task->prot[ISER_DIR_OUT]; in iser_prepare_write_cmd() 384 prot_buf = &iser_task->prot[ISER_DIR_IN]; in iser_send_command() 387 prot_buf = &iser_task->prot[ISER_DIR_OUT]; in iser_send_command() 753 iser_task->prot[ISER_DIR_IN].data_len = 0; in iser_task_rdma_init() 754 iser_task->prot[ISER_DIR_OUT].data_len = 0; in iser_task_rdma_init() 773 &iser_task->prot[ISER_DIR_IN], in iser_task_rdma_finalize() 784 &iser_task->prot[ISER_DIR_OUT], in iser_task_rdma_finalize()
|
/drivers/gpu/drm/msm/ |
D | msm_iommu.c | 61 struct sg_table *sgt, unsigned len, int prot) in msm_iommu_map() argument 67 ret = iommu_map_sg(iommu->domain, iova, sgt->sgl, sgt->nents, prot); in msm_iommu_map()
|
/drivers/ata/ |
D | sata_sil24.c | 41 __le16 prot; member 802 u8 prot = qc->tf.protocol; in sil24_qc_defer() local 823 int is_excl = (ata_is_atapi(prot) || in sil24_qc_defer() 858 u16 prot = 0; in sil24_qc_prep() local 861 prot |= PRB_PROT_NCQ; in sil24_qc_prep() 863 prot |= PRB_PROT_WRITE; in sil24_qc_prep() 865 prot |= PRB_PROT_READ; in sil24_qc_prep() 866 prb->prot = cpu_to_le16(prot); in sil24_qc_prep()
|