Home
last modified time | relevance | path

Searched +full:mmu +full:- +full:type (Results 1 – 25 of 838) sorted by relevance

12345678910>>...34

/kernel/linux/linux-4.19/drivers/gpu/drm/nouveau/nvif/
Dmmu.c22 #include <nvif/mmu.h>
28 nvif_mmu_fini(struct nvif_mmu *mmu) in nvif_mmu_fini() argument
30 kfree(mmu->kind); in nvif_mmu_fini()
31 kfree(mmu->type); in nvif_mmu_fini()
32 kfree(mmu->heap); in nvif_mmu_fini()
33 nvif_object_fini(&mmu->object); in nvif_mmu_fini()
37 nvif_mmu_init(struct nvif_object *parent, s32 oclass, struct nvif_mmu *mmu) in nvif_mmu_init() argument
40 { NVIF_CLASS_MEM_GF100, -1 }, in nvif_mmu_init()
41 { NVIF_CLASS_MEM_NV50 , -1 }, in nvif_mmu_init()
42 { NVIF_CLASS_MEM_NV04 , -1 }, in nvif_mmu_init()
[all …]
Dmem.c28 nvif_mem_init_map(struct nvif_mmu *mmu, u8 type, u64 size, struct nvif_mem *mem) in nvif_mem_init_map() argument
30 int ret = nvif_mem_init(mmu, mmu->mem, NVIF_MEM_MAPPABLE | type, 0, in nvif_mem_init_map()
33 ret = nvif_object_map(&mem->object, NULL, 0); in nvif_mem_init_map()
43 nvif_object_fini(&mem->object); in nvif_mem_fini()
47 nvif_mem_init_type(struct nvif_mmu *mmu, s32 oclass, int type, u8 page, in nvif_mem_init_type() argument
54 mem->object.client = NULL; in nvif_mem_init_type()
55 if (type < 0) in nvif_mem_init_type()
56 return -EINVAL; in nvif_mem_init_type()
60 return -ENOMEM; in nvif_mem_init_type()
64 args->version = 0; in nvif_mem_init_type()
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvif/
Dmmu.c22 #include <nvif/mmu.h>
28 nvif_mmu_dtor(struct nvif_mmu *mmu) in nvif_mmu_dtor() argument
30 kfree(mmu->kind); in nvif_mmu_dtor()
31 kfree(mmu->type); in nvif_mmu_dtor()
32 kfree(mmu->heap); in nvif_mmu_dtor()
33 nvif_object_dtor(&mmu->object); in nvif_mmu_dtor()
38 struct nvif_mmu *mmu) in nvif_mmu_ctor() argument
41 { NVIF_CLASS_MEM_GF100, -1 }, in nvif_mmu_ctor()
42 { NVIF_CLASS_MEM_NV50 , -1 }, in nvif_mmu_ctor()
43 { NVIF_CLASS_MEM_NV04 , -1 }, in nvif_mmu_ctor()
[all …]
Dmem.c28 nvif_mem_ctor_map(struct nvif_mmu *mmu, const char *name, u8 type, u64 size, in nvif_mem_ctor_map() argument
31 int ret = nvif_mem_ctor(mmu, name, mmu->mem, NVIF_MEM_MAPPABLE | type, in nvif_mem_ctor_map()
34 ret = nvif_object_map(&mem->object, NULL, 0); in nvif_mem_ctor_map()
44 nvif_object_dtor(&mem->object); in nvif_mem_dtor()
48 nvif_mem_ctor_type(struct nvif_mmu *mmu, const char *name, s32 oclass, in nvif_mem_ctor_type() argument
49 int type, u8 page, u64 size, void *argv, u32 argc, in nvif_mem_ctor_type() argument
56 mem->object.client = NULL; in nvif_mem_ctor_type()
57 if (type < 0) in nvif_mem_ctor_type()
58 return -EINVAL; in nvif_mem_ctor_type()
62 return -ENOMEM; in nvif_mem_ctor_type()
[all …]
/kernel/linux/linux-4.19/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
Dbase.c42 nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt) in nvkm_mmu_ptp_put() argument
44 const int slot = pt->base >> pt->ptp->shift; in nvkm_mmu_ptp_put()
45 struct nvkm_mmu_ptp *ptp = pt->ptp; in nvkm_mmu_ptp_put()
50 if (!ptp->free) in nvkm_mmu_ptp_put()
51 list_add(&ptp->head, &mmu->ptp.list); in nvkm_mmu_ptp_put()
52 ptp->free |= BIT(slot); in nvkm_mmu_ptp_put()
54 /* If there's no more sub-allocations, destroy PTP. */ in nvkm_mmu_ptp_put()
55 if (ptp->free == ptp->mask) { in nvkm_mmu_ptp_put()
56 nvkm_mmu_ptc_put(mmu, force, &ptp->pt); in nvkm_mmu_ptp_put()
57 list_del(&ptp->head); in nvkm_mmu_ptp_put()
[all …]
Dummu.c35 struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu; in nvkm_ummu_sclass() local
37 if (mmu->func->mem.user.oclass && oclass->client->super) { in nvkm_ummu_sclass()
38 if (index-- == 0) { in nvkm_ummu_sclass()
39 oclass->base = mmu->func->mem.user; in nvkm_ummu_sclass()
40 oclass->ctor = nvkm_umem_new; in nvkm_ummu_sclass()
45 if (mmu->func->vmm.user.oclass) { in nvkm_ummu_sclass()
46 if (index-- == 0) { in nvkm_ummu_sclass()
47 oclass->base = mmu->func->vmm.user; in nvkm_ummu_sclass()
48 oclass->ctor = nvkm_uvmm_new; in nvkm_ummu_sclass()
53 return -EINVAL; in nvkm_ummu_sclass()
[all …]
Dumem.c37 struct nvkm_client *master = client->object.client; in nvkm_umem_search()
44 if (client->super && client != master) { in nvkm_umem_search()
45 spin_lock(&master->lock); in nvkm_umem_search()
46 list_for_each_entry(umem, &master->umem, head) { in nvkm_umem_search()
47 if (umem->object.object == handle) { in nvkm_umem_search()
48 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search()
52 spin_unlock(&master->lock); in nvkm_umem_search()
56 if (!umem->priv || client->super) in nvkm_umem_search()
57 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search()
60 return memory ? memory : ERR_PTR(-ENOENT); in nvkm_umem_search()
[all …]
Dmem.c33 struct nvkm_mmu *mmu; member
45 return nvkm_mem(memory)->target; in nvkm_mem_target()
58 if (mem->pages == 1 && mem->mem) in nvkm_mem_addr()
59 return mem->dma[0]; in nvkm_mem_addr()
66 return nvkm_mem(memory)->pages << PAGE_SHIFT; in nvkm_mem_size()
75 .memory = &mem->memory, in nvkm_mem_map_dma()
77 .dma = mem->dma, in nvkm_mem_map_dma()
86 if (mem->mem) { in nvkm_mem_dtor()
87 while (mem->pages--) { in nvkm_mem_dtor()
88 dma_unmap_page(mem->mmu->subdev.device->dev, in nvkm_mem_dtor()
[all …]
Dmemnv04.c31 nv04_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv, in nv04_mem_map() argument
37 struct nvkm_device *device = mmu->subdev.device; in nv04_mem_map()
39 int ret = -ENOSYS; in nv04_mem_map()
41 if ((ret = nvif_unvers(ret, &argv, &argc, args->vn))) in nv04_mem_map()
44 *paddr = device->func->resource_addr(device, 1) + addr; in nv04_mem_map()
46 *pvma = ERR_PTR(-ENODEV); in nv04_mem_map()
51 nv04_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size, in nv04_mem_new() argument
57 int ret = -ENOSYS; in nv04_mem_new()
59 if ((ret = nvif_unvers(ret, &argv, &argc, args->vn))) in nv04_mem_new()
62 if (mmu->type[type].type & NVKM_MEM_MAPPABLE) in nv04_mem_new()
[all …]
Dmemgf100.c34 gf100_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv, in gf100_mem_map() argument
42 struct nvkm_device *device = mmu->subdev.device; in gf100_mem_map()
44 int ret = -ENOSYS; in gf100_mem_map()
46 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) { in gf100_mem_map()
47 uvmm.ro = args->v0.ro; in gf100_mem_map()
48 uvmm.kind = args->v0.kind; in gf100_mem_map()
50 if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) { in gf100_mem_map()
63 *paddr = device->func->resource_addr(device, 1) + (*pvma)->addr; in gf100_mem_map()
64 *psize = (*pvma)->size; in gf100_mem_map()
69 gf100_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size, in gf100_mem_new() argument
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/subdev/mmu/
Dbase.c42 nvkm_mmu_ptp_put(struct nvkm_mmu *mmu, bool force, struct nvkm_mmu_pt *pt) in nvkm_mmu_ptp_put() argument
44 const int slot = pt->base >> pt->ptp->shift; in nvkm_mmu_ptp_put()
45 struct nvkm_mmu_ptp *ptp = pt->ptp; in nvkm_mmu_ptp_put()
50 if (!ptp->free) in nvkm_mmu_ptp_put()
51 list_add(&ptp->head, &mmu->ptp.list); in nvkm_mmu_ptp_put()
52 ptp->free |= BIT(slot); in nvkm_mmu_ptp_put()
54 /* If there's no more sub-allocations, destroy PTP. */ in nvkm_mmu_ptp_put()
55 if (ptp->free == ptp->mask) { in nvkm_mmu_ptp_put()
56 nvkm_mmu_ptc_put(mmu, force, &ptp->pt); in nvkm_mmu_ptp_put()
57 list_del(&ptp->head); in nvkm_mmu_ptp_put()
[all …]
Dummu.c35 struct nvkm_mmu *mmu = nvkm_ummu(object)->mmu; in nvkm_ummu_sclass() local
37 if (mmu->func->mem.user.oclass && oclass->client->super) { in nvkm_ummu_sclass()
38 if (index-- == 0) { in nvkm_ummu_sclass()
39 oclass->base = mmu->func->mem.user; in nvkm_ummu_sclass()
40 oclass->ctor = nvkm_umem_new; in nvkm_ummu_sclass()
45 if (mmu->func->vmm.user.oclass) { in nvkm_ummu_sclass()
46 if (index-- == 0) { in nvkm_ummu_sclass()
47 oclass->base = mmu->func->vmm.user; in nvkm_ummu_sclass()
48 oclass->ctor = nvkm_uvmm_new; in nvkm_ummu_sclass()
53 return -EINVAL; in nvkm_ummu_sclass()
[all …]
Dumem.c37 struct nvkm_client *master = client->object.client; in nvkm_umem_search()
44 if (client->super && client != master) { in nvkm_umem_search()
45 spin_lock(&master->lock); in nvkm_umem_search()
46 list_for_each_entry(umem, &master->umem, head) { in nvkm_umem_search()
47 if (umem->object.object == handle) { in nvkm_umem_search()
48 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search()
52 spin_unlock(&master->lock); in nvkm_umem_search()
56 if (!umem->priv || client->super) in nvkm_umem_search()
57 memory = nvkm_memory_ref(umem->memory); in nvkm_umem_search()
60 return memory ? memory : ERR_PTR(-ENOENT); in nvkm_umem_search()
[all …]
Dmem.c33 struct nvkm_mmu *mmu; member
45 return nvkm_mem(memory)->target; in nvkm_mem_target()
58 if (mem->pages == 1 && mem->mem) in nvkm_mem_addr()
59 return mem->dma[0]; in nvkm_mem_addr()
66 return nvkm_mem(memory)->pages << PAGE_SHIFT; in nvkm_mem_size()
75 .memory = &mem->memory, in nvkm_mem_map_dma()
77 .dma = mem->dma, in nvkm_mem_map_dma()
86 if (mem->mem) { in nvkm_mem_dtor()
87 while (mem->pages--) { in nvkm_mem_dtor()
88 dma_unmap_page(mem->mmu->subdev.device->dev, in nvkm_mem_dtor()
[all …]
Dmemnv04.c31 nv04_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv, in nv04_mem_map() argument
37 struct nvkm_device *device = mmu->subdev.device; in nv04_mem_map()
39 int ret = -ENOSYS; in nv04_mem_map()
41 if ((ret = nvif_unvers(ret, &argv, &argc, args->vn))) in nv04_mem_map()
44 *paddr = device->func->resource_addr(device, 1) + addr; in nv04_mem_map()
46 *pvma = ERR_PTR(-ENODEV); in nv04_mem_map()
51 nv04_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size, in nv04_mem_new() argument
57 int ret = -ENOSYS; in nv04_mem_new()
59 if ((ret = nvif_unvers(ret, &argv, &argc, args->vn))) in nv04_mem_new()
62 if (mmu->type[type].type & NVKM_MEM_MAPPABLE) in nv04_mem_new()
[all …]
Dmemgf100.c34 gf100_mem_map(struct nvkm_mmu *mmu, struct nvkm_memory *memory, void *argv, in gf100_mem_map() argument
42 struct nvkm_device *device = mmu->subdev.device; in gf100_mem_map()
44 int ret = -ENOSYS; in gf100_mem_map()
46 if (!(ret = nvif_unpack(ret, &argv, &argc, args->v0, 0, 0, false))) { in gf100_mem_map()
47 uvmm.ro = args->v0.ro; in gf100_mem_map()
48 uvmm.kind = args->v0.kind; in gf100_mem_map()
50 if (!(ret = nvif_unvers(ret, &argv, &argc, args->vn))) { in gf100_mem_map()
63 *paddr = device->func->resource_addr(device, 1) + (*pvma)->addr; in gf100_mem_map()
64 *psize = (*pvma)->size; in gf100_mem_map()
69 gf100_mem_new(struct nvkm_mmu *mmu, int type, u8 page, u64 size, in gf100_mem_new() argument
[all …]
Dvmmgp100.c37 struct device *dev = vmm->mmu->subdev.device->dev; in gp100_vmm_pfn_unmap()
40 nvkm_kmap(pt->memory); in gp100_vmm_pfn_unmap()
41 while (ptes--) { in gp100_vmm_pfn_unmap()
42 u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 0); in gp100_vmm_pfn_unmap()
43 u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 4); in gp100_vmm_pfn_unmap()
51 nvkm_done(pt->memory); in gp100_vmm_pfn_unmap()
59 nvkm_kmap(pt->memory); in gp100_vmm_pfn_clear()
60 while (ptes--) { in gp100_vmm_pfn_clear()
61 u32 datalo = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 0); in gp100_vmm_pfn_clear()
62 u32 datahi = nvkm_ro32(pt->memory, pt->base + ptei * 8 + 4); in gp100_vmm_pfn_clear()
[all …]
/kernel/linux/linux-5.10/drivers/gpu/drm/msm/
Dmsm_mmu.h1 /* SPDX-License-Identifier: GPL-2.0-only */
13 void (*detach)(struct msm_mmu *mmu);
14 int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
16 int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len);
17 void (*destroy)(struct msm_mmu *mmu);
31 enum msm_mmu_type type; member
34 static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, in msm_mmu_init() argument
35 const struct msm_mmu_funcs *funcs, enum msm_mmu_type type) in msm_mmu_init() argument
37 mmu->dev = dev; in msm_mmu_init()
38 mmu->funcs = funcs; in msm_mmu_init()
[all …]
/kernel/linux/linux-5.10/arch/x86/kernel/
Dparavirt.c1 // SPDX-License-Identifier: GPL-2.0-or-later
6 2007 - x86_64 support added by Glauber de Oliveira Costa, Red Hat Inc
44 ".size _paravirt_nop, . - _paravirt_nop\n\t"
45 ".type _paravirt_nop, @function\n\t"
67 unsigned long delta = (unsigned long)target - (addr+call_len); in paravirt_patch_call()
75 b->opcode = 0xe8; /* call */ in paravirt_patch_call()
76 b->delta = delta; in paravirt_patch_call()
93 unsigned long delta = (unsigned long)target - (addr+5); in paravirt_patch_jmp()
102 b->opcode = 0xe9; /* jmp */ in paravirt_patch_jmp()
103 b->delta = delta; in paravirt_patch_jmp()
[all …]
/kernel/linux/linux-4.19/drivers/gpu/drm/nouveau/include/nvif/
Dmmu.h26 u8 type; member
28 } *type; member
37 nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind) in nvif_mmu_kind_valid() argument
39 const u8 invalid = mmu->kind_nr - 1; in nvif_mmu_kind_valid()
41 if (kind >= mmu->kind_nr || mmu->kind[kind] == invalid) in nvif_mmu_kind_valid()
48 nvif_mmu_type(struct nvif_mmu *mmu, u8 mask) in nvif_mmu_type() argument
51 for (i = 0; i < mmu->type_nr; i++) { in nvif_mmu_type()
52 if ((mmu->type[i].type & mask) == mask) in nvif_mmu_type()
55 return -EINVAL; in nvif_mmu_type()
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/include/nvif/
Dmmu.h27 u8 type; member
29 } *type; member
39 nvif_mmu_kind_valid(struct nvif_mmu *mmu, u8 kind) in nvif_mmu_kind_valid() argument
42 if (kind >= mmu->kind_nr || mmu->kind[kind] == mmu->kind_inv) in nvif_mmu_kind_valid()
49 nvif_mmu_type(struct nvif_mmu *mmu, u8 mask) in nvif_mmu_type() argument
52 for (i = 0; i < mmu->type_nr; i++) { in nvif_mmu_type()
53 if ((mmu->type[i].type & mask) == mask) in nvif_mmu_type()
56 return -EINVAL; in nvif_mmu_type()
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/
Dnouveau_mem.c47 switch (vmm->object.oclass) { in nouveau_mem_map()
54 args.nv50.kind = mem->kind; in nouveau_mem_map()
55 args.nv50.comp = mem->comp; in nouveau_mem_map()
62 if (mem->mem.type & NVIF_MEM_VRAM) in nouveau_mem_map()
68 args.gf100.kind = mem->kind; in nouveau_mem_map()
73 return -ENOSYS; in nouveau_mem_map()
76 super = vmm->object.client->super; in nouveau_mem_map()
77 vmm->object.client->super = true; in nouveau_mem_map()
78 ret = nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc, in nouveau_mem_map()
79 &mem->mem, 0); in nouveau_mem_map()
[all …]
/kernel/linux/linux-4.19/drivers/gpu/drm/nouveau/
Dnouveau_mem.c47 switch (vmm->object.oclass) { in nouveau_mem_map()
54 args.nv50.kind = mem->kind; in nouveau_mem_map()
55 args.nv50.comp = mem->comp; in nouveau_mem_map()
62 if (mem->mem.type & NVIF_MEM_VRAM) in nouveau_mem_map()
68 args.gf100.kind = mem->kind; in nouveau_mem_map()
73 return -ENOSYS; in nouveau_mem_map()
76 super = vmm->object.client->super; in nouveau_mem_map()
77 vmm->object.client->super = true; in nouveau_mem_map()
78 ret = nvif_vmm_map(vmm, vma->addr, mem->mem.size, &args, argc, in nouveau_mem_map()
79 &mem->mem, 0); in nouveau_mem_map()
[all …]
/kernel/linux/linux-4.19/arch/m68k/
DKconfig.cpu1 # SPDX-License-Identifier: GPL-2.0
2 comment "Processor Type"
6 default M68KCLASSIC if MMU
7 default COLDFIRE if !MMU
13 applications, and are all System-On-Chip (SOC) devices, as opposed
39 depends on !MMU
50 System-On-Chip devices (eg 68328, 68302, etc). It does not contain
51 a paging MMU.
61 System-On-Chip parts, and does not contain a paging MMU.
65 depends on MMU
[all …]
/kernel/linux/linux-5.10/arch/m68k/
DKconfig.cpu1 # SPDX-License-Identifier: GPL-2.0
2 comment "Processor Type"
6 default M68KCLASSIC if MMU
7 default COLDFIRE if !MMU
13 applications, and are all System-On-Chip (SOC) devices, as opposed
40 depends on !MMU
52 System-On-Chip devices (eg 68328, 68302, etc). It does not contain
53 a paging MMU.
64 System-On-Chip parts, and does not contain a paging MMU.
68 depends on MMU
[all …]

12345678910>>...34