Home
last modified time | relevance | path

Searched full:mmu (Results 1 – 25 of 2069) sorted by relevance

12345678910>>...83

/kernel/linux/linux-5.10/drivers/staging/media/ipu3/
Dipu3-mmu.c21 #include "ipu3-mmu.h"
73 * @mmu: MMU to perform the invalidate operation on
78 static void imgu_mmu_tlb_invalidate(struct imgu_mmu *mmu) in imgu_mmu_tlb_invalidate() argument
80 writel(TLB_INVALIDATE, mmu->base + REG_TLB_INVALIDATE); in imgu_mmu_tlb_invalidate()
83 static void call_if_imgu_is_powered(struct imgu_mmu *mmu, in call_if_imgu_is_powered() argument
84 void (*func)(struct imgu_mmu *mmu)) in call_if_imgu_is_powered() argument
86 if (!pm_runtime_get_if_in_use(mmu->dev)) in call_if_imgu_is_powered()
89 func(mmu); in call_if_imgu_is_powered()
90 pm_runtime_put(mmu->dev); in call_if_imgu_is_powered()
95 * @mmu: MMU to set the CIO gate bit in.
[all …]
/kernel/linux/linux-4.19/drivers/gpu/drm/nouveau/nvif/
Dmmu.c22 #include <nvif/mmu.h>
28 nvif_mmu_fini(struct nvif_mmu *mmu) in nvif_mmu_fini() argument
30 kfree(mmu->kind); in nvif_mmu_fini()
31 kfree(mmu->type); in nvif_mmu_fini()
32 kfree(mmu->heap); in nvif_mmu_fini()
33 nvif_object_fini(&mmu->object); in nvif_mmu_fini()
37 nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu) in nvif_mmu_init() argument
49 mmu->heap = NULL; in nvif_mmu_init()
50 mmu->type = NULL; in nvif_mmu_init()
51 mmu->kind = NULL; in nvif_mmu_init()
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvif/
Dmmu.c22 #include <nvif/mmu.h>
28 nvif_mmu_dtor(struct nvif_mmu *mmu) in nvif_mmu_dtor() argument
30 kfree(mmu->kind); in nvif_mmu_dtor()
31 kfree(mmu->type); in nvif_mmu_dtor()
32 kfree(mmu->heap); in nvif_mmu_dtor()
33 nvif_object_dtor(&mmu->object); in nvif_mmu_dtor()
38 struct nvif_mmu *mmu) in nvif_mmu_ctor() argument
50 mmu->heap = NULL; in nvif_mmu_ctor()
51 mmu->type = NULL; in nvif_mmu_ctor()
52 mmu->kind = NULL; in nvif_mmu_ctor()
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
DKbuild2 nvkm-y += nvkm/subdev/mmu/base.o
3 nvkm-y += nvkm/subdev/mmu/nv04.o
4 nvkm-y += nvkm/subdev/mmu/nv41.o
5 nvkm-y += nvkm/subdev/mmu/nv44.o
6 nvkm-y += nvkm/subdev/mmu/nv50.o
7 nvkm-y += nvkm/subdev/mmu/g84.o
8 nvkm-y += nvkm/subdev/mmu/mcp77.o
9 nvkm-y += nvkm/subdev/mmu/gf100.o
10 nvkm-y += nvkm/subdev/mmu/gk104.o
11 nvkm-y += nvkm/subdev/mmu/gk20a.o
[all …]
Dbase.c42 nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt) in nvkm_mmu_ptp_put() argument
51 list_add(&ptp->head, &mmu->ptp.list); in nvkm_mmu_ptp_put()
56 nvkm_mmu_ptc_put(mmu, force, &ptp->pt); in nvkm_mmu_ptp_put()
65 nvkm_mmu_ptp_get(struct nvkm_mmu *mmu, u32 size, bool zero) in nvkm_mmu_ptp_get() argument
74 ptp = list_first_entry_or_null(&mmu->ptp.list, typeof(*ptp), head); in nvkm_mmu_ptp_get()
82 ptp->pt = nvkm_mmu_ptc_get(mmu, 0x1000, 0x1000, false); in nvkm_mmu_ptp_get()
93 list_add(&ptp->head, &mmu->ptp.list); in nvkm_mmu_ptp_get()
120 nvkm_mmu_ptc_find(struct nvkm_mmu *mmu, u32 size) in nvkm_mmu_ptc_find() argument
124 list_for_each_entry(ptc, &mmu->ptc.list, head) { in nvkm_mmu_ptc_find()
134 list_add(&ptc->head, &mmu->ptc.list); in nvkm_mmu_ptc_find()
[all …]
Dummu.c35 struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu; in nvkm_ummu_sclass() local
37 if (mmu->func->mem.user.oclass && oclass->client->super) { in nvkm_ummu_sclass()
39 oclass->base = mmu->func->mem.user; in nvkm_ummu_sclass()
45 if (mmu->func->vmm.user.oclass) { in nvkm_ummu_sclass()
47 oclass->base = mmu->func->vmm.user; in nvkm_ummu_sclass()
59 struct nvkm_mmu *mmu = ummu->mmu; in nvkm_ummu_heap() local
67 if ((index = args->v0.index) >= mmu->heap_nr) in nvkm_ummu_heap()
69 args->v0.size = mmu->heap[index].size; in nvkm_ummu_heap()
79 struct nvkm_mmu *mmu = ummu->mmu; in nvkm_ummu_type() local
87 if ((index = args->v0.index) >= mmu->type_nr) in nvkm_ummu_type()
[all …]
/kernel/linux/linux-4.19/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
Dbase.c42 nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt) in nvkm_mmu_ptp_put() argument
51 list_add(&ptp->head, &mmu->ptp.list); in nvkm_mmu_ptp_put()
56 nvkm_mmu_ptc_put(mmu, force, &ptp->pt); in nvkm_mmu_ptp_put()
65 nvkm_mmu_ptp_get(struct nvkm_mmu *mmu, u32 size, bool zero) in nvkm_mmu_ptp_get() argument
74 ptp = list_first_entry_or_null(&mmu->ptp.list, typeof(*ptp), head); in nvkm_mmu_ptp_get()
82 ptp->pt = nvkm_mmu_ptc_get(mmu, 0x1000, 0x1000, false); in nvkm_mmu_ptp_get()
93 list_add(&ptp->head, &mmu->ptp.list); in nvkm_mmu_ptp_get()
120 nvkm_mmu_ptc_find(struct nvkm_mmu *mmu, u32 size) in nvkm_mmu_ptc_find() argument
124 list_for_each_entry(ptc, &mmu->ptc.list, head) { in nvkm_mmu_ptc_find()
134 list_add(&ptc->head, &mmu->ptc.list); in nvkm_mmu_ptc_find()
[all …]
DKbuild1 nvkm-y += nvkm/subdev/mmu/base.o
2 nvkm-y += nvkm/subdev/mmu/nv04.o
3 nvkm-y += nvkm/subdev/mmu/nv41.o
4 nvkm-y += nvkm/subdev/mmu/nv44.o
5 nvkm-y += nvkm/subdev/mmu/nv50.o
6 nvkm-y += nvkm/subdev/mmu/g84.o
7 nvkm-y += nvkm/subdev/mmu/mcp77.o
8 nvkm-y += nvkm/subdev/mmu/gf100.o
9 nvkm-y += nvkm/subdev/mmu/gk104.o
10 nvkm-y += nvkm/subdev/mmu/gk20a.o
[all …]
Dummu.c35 struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu; in nvkm_ummu_sclass() local
37 if (mmu->func->mem.user.oclass && oclass->client->super) { in nvkm_ummu_sclass()
39 oclass->base = mmu->func->mem.user; in nvkm_ummu_sclass()
45 if (mmu->func->vmm.user.oclass) { in nvkm_ummu_sclass()
47 oclass->base = mmu->func->vmm.user; in nvkm_ummu_sclass()
59 struct nvkm_mmu *mmu = ummu->mmu; in nvkm_ummu_heap() local
67 if ((index = args->v0.index) >= mmu->heap_nr) in nvkm_ummu_heap()
69 args->v0.size = mmu->heap[index].size; in nvkm_ummu_heap()
79 struct nvkm_mmu *mmu = ummu->mmu; in nvkm_ummu_type() local
87 if ((index = args->v0.index) >= mmu->type_nr) in nvkm_ummu_type()
[all …]
/kernel/linux/linux-5.10/drivers/staging/media/atomisp/pci/mmu/
Disp_mmu.c21 * ISP MMU management wrap code
41 #include "mmu/isp_mmu.h"
51 * that are only 32-bit capable(e.g. the ISP MMU).
57 static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt,
79 static phys_addr_t isp_pte_to_pgaddr(struct isp_mmu *mmu, in isp_pte_to_pgaddr() argument
82 return mmu->driver->pte_to_phys(mmu, pte); in isp_pte_to_pgaddr()
85 static unsigned int isp_pgaddr_to_pte_valid(struct isp_mmu *mmu, in isp_pgaddr_to_pte_valid() argument
88 unsigned int pte = mmu->driver->phys_to_pte(mmu, phys); in isp_pgaddr_to_pte_valid()
90 return (unsigned int)(pte | ISP_PTE_VALID_MASK(mmu)); in isp_pgaddr_to_pte_valid()
97 static phys_addr_t alloc_page_table(struct isp_mmu *mmu) in alloc_page_table() argument
[all …]
/kernel/linux/linux-4.19/drivers/gpu/drm/etnaviv/
Detnaviv_mmu.c128 static void etnaviv_iommu_remove_mapping(struct etnaviv_iommu *mmu, in etnaviv_iommu_remove_mapping() argument
133 etnaviv_iommu_unmap(mmu, mapping->vram_node.start, in etnaviv_iommu_remove_mapping()
138 static int etnaviv_iommu_find_iova(struct etnaviv_iommu *mmu, in etnaviv_iommu_find_iova() argument
145 lockdep_assert_held(&mmu->lock); in etnaviv_iommu_find_iova()
153 ret = drm_mm_insert_node_in_range(&mmu->mm, node, in etnaviv_iommu_find_iova()
159 drm_mm_scan_init(&scan, &mmu->mm, size, 0, 0, mode); in etnaviv_iommu_find_iova()
163 list_for_each_entry(free, &mmu->mappings, mmu_node) { in etnaviv_iommu_find_iova()
200 * Unmap the blocks which need to be reaped from the MMU. in etnaviv_iommu_find_iova()
201 * Clear the mmu pointer to prevent the mapping_get finding in etnaviv_iommu_find_iova()
205 etnaviv_iommu_remove_mapping(mmu, m); in etnaviv_iommu_find_iova()
[all …]
/kernel/linux/linux-5.10/drivers/iommu/
Dipmmu-vmsa.c74 struct ipmmu_vmsa_device *mmu; member
102 /* MMU "context" registers */
152 static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu) in ipmmu_is_root() argument
154 return mmu->root == mmu; in ipmmu_is_root()
159 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev); in __ipmmu_check_device() local
162 if (ipmmu_is_root(mmu)) in __ipmmu_check_device()
163 *rootp = mmu; in __ipmmu_check_device()
180 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) in ipmmu_read() argument
182 return ioread32(mmu->base + offset); in ipmmu_read()
185 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, in ipmmu_write() argument
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/panfrost/
Dpanfrost_mmu.c34 /* Wait for the MMU status to indicate there is no active command, in in wait_ready()
49 /* write AS_COMMAND when MMU is ready to accept another command */ in write_cmd()
86 /* Run the MMU operation */ in mmu_hw_do_operation_locked()
94 struct panfrost_mmu *mmu, in mmu_hw_do_operation() argument
100 ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op); in mmu_hw_do_operation()
105 static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) in panfrost_mmu_enable() argument
107 int as_nr = mmu->as; in panfrost_mmu_enable()
108 struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg; in panfrost_mmu_enable()
139 u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu) in panfrost_mmu_as_get() argument
145 as = mmu->as; in panfrost_mmu_as_get()
[all …]
/kernel/linux/linux-4.19/drivers/iommu/
Dipmmu-vmsa.c70 struct ipmmu_vmsa_device *mmu; member
224 static bool ipmmu_is_root(struct ipmmu_vmsa_device *mmu) in ipmmu_is_root() argument
226 return mmu->root == mmu; in ipmmu_is_root()
231 struct ipmmu_vmsa_device *mmu = dev_get_drvdata(dev); in __ipmmu_check_device() local
234 if (ipmmu_is_root(mmu)) in __ipmmu_check_device()
235 *rootp = mmu; in __ipmmu_check_device()
252 static u32 ipmmu_read(struct ipmmu_vmsa_device *mmu, unsigned int offset) in ipmmu_read() argument
254 return ioread32(mmu->base + offset); in ipmmu_read()
257 static void ipmmu_write(struct ipmmu_vmsa_device *mmu, unsigned int offset, in ipmmu_write() argument
260 iowrite32(data, mmu->base + offset); in ipmmu_write()
[all …]
/kernel/linux/linux-5.10/drivers/staging/media/atomisp/include/mmu/
Disp_mmu.h21 * ISP MMU driver for classic two-level page tables
88 unsigned int (*get_pd_base)(struct isp_mmu *mmu, phys_addr_t pd_base);
100 void (*tlb_flush_range)(struct isp_mmu *mmu,
102 void (*tlb_flush_all)(struct isp_mmu *mmu);
103 unsigned int (*phys_to_pte)(struct isp_mmu *mmu,
105 phys_addr_t (*pte_to_phys)(struct isp_mmu *mmu,
120 #define ISP_PTE_VALID_MASK(mmu) \ argument
121 ((mmu)->driver->pte_valid_mask)
123 #define ISP_PTE_VALID(mmu, pte) \ argument
124 ((pte) & ISP_PTE_VALID_MASK(mmu))
[all …]
/kernel/linux/linux-5.10/arch/x86/kernel/
Dparavirt.c333 /* Mmu ops. */
334 .mmu.flush_tlb_user = native_flush_tlb_local,
335 .mmu.flush_tlb_kernel = native_flush_tlb_global,
336 .mmu.flush_tlb_one_user = native_flush_tlb_one_user,
337 .mmu.flush_tlb_others = native_flush_tlb_others,
338 .mmu.tlb_remove_table =
341 .mmu.exit_mmap = paravirt_nop,
344 .mmu.read_cr2 = __PV_IS_CALLEE_SAVE(native_read_cr2),
345 .mmu.write_cr2 = native_write_cr2,
346 .mmu.read_cr3 = __native_read_cr3,
[all …]
/kernel/linux/linux-5.10/arch/m68k/
DKconfig.cpu6 default M68KCLASSIC if MMU
7 default COLDFIRE if !MMU
40 depends on !MMU
53 a paging MMU.
64 System-On-Chip parts, and does not contain a paging MMU.
68 depends on MMU
74 68851 MMU (Memory Management Unit) to run Linux/m68k, except on the
79 depends on MMU && !MMU_SUN3
85 work, as it does not include an MMU (Memory Management Unit).
89 depends on MMU && !MMU_SUN3
[all …]
DKconfig7 select ARCH_HAS_DMA_PREP_COHERENT if HAS_DMA && MMU && !COLDFIRE
14 select DMA_DIRECT_REMAP if HAS_DMA && MMU && !COLDFIRE
19 select GENERIC_STRNCPY_FROM_USER if MMU
20 select GENERIC_STRNLEN_USER if MMU
21 select HAVE_AOUT if MMU
24 select HAVE_FUTEX_CMPXCHG if MMU && FUTEX
28 select MMU_GATHER_NO_RANGE if MMU
31 select NO_DMA if !MMU && !COLDFIRE
35 select UACCESS_MEMCPY if !MMU
78 config MMU config
[all …]
/kernel/linux/linux-4.19/arch/m68k/
DKconfig.cpu6 default M68KCLASSIC if MMU
7 default COLDFIRE if !MMU
39 depends on !MMU
51 a paging MMU.
61 System-On-Chip parts, and does not contain a paging MMU.
65 depends on MMU
71 68851 MMU (Memory Management Unit) to run Linux/m68k, except on the
76 depends on MMU && !MMU_SUN3
82 work, as it does not include an MMU (Memory Management Unit).
86 depends on MMU && !MMU_SUN3
[all …]
/kernel/linux/linux-4.19/drivers/gpu/drm/msm/
Dmsm_iommu.c37 static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names, in msm_iommu_attach() argument
40 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_attach()
43 pm_runtime_get_sync(mmu->dev); in msm_iommu_attach()
44 ret = iommu_attach_device(iommu->domain, mmu->dev); in msm_iommu_attach()
45 pm_runtime_put_sync(mmu->dev); in msm_iommu_attach()
50 static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names, in msm_iommu_detach() argument
53 struct msm_iommu *iommu = to_msm_iommu(mmu); in msm_iommu_detach()
55 pm_runtime_get_sync(mmu->dev); in msm_iommu_detach()
56 iommu_detach_device(iommu->domain, mmu->dev); in msm_iommu_detach()
57 pm_runtime_put_sync(mmu->dev); in msm_iommu_detach()
[all …]
/kernel/linux/linux-4.19/arch/arm/mm/
DKconfig11 depends on !MMU
30 select CPU_COPY_V4WT if MMU
34 select CPU_TLB_V4WT if MMU
37 MMU built around an ARM7TDMI core.
45 depends on !MMU
63 depends on !MMU
82 select CPU_COPY_V4WB if MMU
86 select CPU_TLB_V4WBI if MMU
101 select CPU_COPY_V4WB if MMU
105 select CPU_TLB_V4WBI if MMU
[all …]
/kernel/linux/linux-5.10/arch/arm/mm/
DKconfig11 depends on !MMU
30 select CPU_COPY_V4WT if MMU
34 select CPU_TLB_V4WT if MMU
37 MMU built around an ARM7TDMI core.
45 depends on !MMU
63 depends on !MMU
82 select CPU_COPY_V4WB if MMU
86 select CPU_TLB_V4WBI if MMU
101 select CPU_COPY_V4WB if MMU
105 select CPU_TLB_V4WBI if MMU
[all …]
/kernel/linux/linux-5.10/arch/riscv/
DKconfig19 select ARCH_HAS_DEBUG_VIRTUAL if MMU
28 select ARCH_HAS_STRICT_KERNEL_RWX if MMU
31 select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
35 select CLINT_TIMER if !MMU
47 select GENERIC_PTDUMP if MMU
50 select GENERIC_STRNCPY_FROM_USER if MMU
51 select GENERIC_STRNLEN_USER if MMU
52 select GENERIC_TIME_VSYSCALL if MMU && 64BIT
57 select HAVE_ARCH_KASAN if MMU && 64BIT
60 select HAVE_ARCH_MMAP_RND_BITS if MMU
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/msm/
Dmsm_mmu.h13 void (*detach)(struct msm_mmu *mmu);
14 int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
16 int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len);
17 void (*destroy)(struct msm_mmu *mmu);
34 static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, in msm_mmu_init() argument
37 mmu->dev = dev; in msm_mmu_init()
38 mmu->funcs = funcs; in msm_mmu_init()
39 mmu->type = type; in msm_mmu_init()
45 static inline void msm_mmu_set_fault_handler(struct msm_mmu *mmu, void *arg, in msm_mmu_set_fault_handler() argument
48 mmu->arg = arg; in msm_mmu_set_fault_handler()
[all …]
/kernel/linux/linux-4.19/Documentation/devicetree/bindings/iommu/
Dsamsung,sysmmu.txt1 Samsung Exynos IOMMU H/W, System MMU (System Memory Management Unit)
7 System MMU is an IOMMU and supports identical translation table format to
9 permissions, shareability and security protection. In addition, System MMU has
15 master), but one System MMU can handle transactions from only one peripheral
16 device. The relation between a System MMU and the peripheral device needs to be
21 * MFC has one System MMU on its left and right bus.
22 * FIMD in Exynos5420 has one System MMU for window 0 and 4, the other system MMU
24 * M2M Scalers and G2D in Exynos5420 has one System MMU on the read channel and
25 the other System MMU on the write channel.
27 For information on assigning System MMU controller to its peripheral devices,
[all …]

12345678910>>...83