/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/subdev/bios/ |
D | vmap.c | 32 u32 vmap = 0; in nvbios_vmap_table() local 36 vmap = nvbios_rd32(bios, bit_P.offset + 0x20); in nvbios_vmap_table() 37 if (vmap) { in nvbios_vmap_table() 38 *ver = nvbios_rd08(bios, vmap + 0); in nvbios_vmap_table() 42 *hdr = nvbios_rd08(bios, vmap + 1); in nvbios_vmap_table() 43 *cnt = nvbios_rd08(bios, vmap + 3); in nvbios_vmap_table() 44 *len = nvbios_rd08(bios, vmap + 2); in nvbios_vmap_table() 45 return vmap; in nvbios_vmap_table() 60 u32 vmap = nvbios_vmap_table(bios, ver, hdr, cnt, len); in nvbios_vmap_parse() local 62 switch (!!vmap * *ver) { in nvbios_vmap_parse() [all …]
|
D | Kbuild | 34 nvkm-y += nvkm/subdev/bios/vmap.o
|
/kernel/linux/linux-5.10/drivers/net/ |
D | vrf.c | 106 struct vrf_map vmap; member 179 return &nn_vrf->vmap; in netns_vrf_map() 225 static struct vrf_map_elem *vrf_map_lookup_elem(struct vrf_map *vmap, in vrf_map_lookup_elem() argument 232 hash_for_each_possible(vmap->ht, me, hnode, key) { in vrf_map_lookup_elem() 240 static void vrf_map_add_elem(struct vrf_map *vmap, struct vrf_map_elem *me) in vrf_map_add_elem() argument 246 hash_add(vmap->ht, &me->hnode, key); in vrf_map_add_elem() 254 static void vrf_map_lock(struct vrf_map *vmap) __acquires(&vmap->vmap_lock) in vrf_map_lock() argument 256 spin_lock(&vmap->vmap_lock); in vrf_map_lock() 259 static void vrf_map_unlock(struct vrf_map *vmap) __releases(&vmap->vmap_lock) in vrf_map_unlock() argument 261 spin_unlock(&vmap->vmap_lock); in vrf_map_unlock() [all …]
|
/kernel/linux/linux-5.10/drivers/gpu/drm/nouveau/nvkm/subdev/volt/ |
D | base.c | 87 u32 vmap; in nvkm_volt_map_min() local 89 vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info); in nvkm_volt_map_min() 90 if (vmap) { in nvkm_volt_map_min() 109 u32 vmap; in nvkm_volt_map() local 111 vmap = nvbios_vmap_entry_parse(bios, id, &ver, &len, &info); in nvkm_volt_map() 112 if (vmap) { in nvkm_volt_map() 295 struct nvbios_vmap vmap; in nvkm_volt_ctor() local 301 if (nvbios_vmap_parse(bios, &ver, &hdr, &cnt, &len, &vmap)) { in nvkm_volt_ctor() 302 volt->max0_id = vmap.max0; in nvkm_volt_ctor() 303 volt->max1_id = vmap.max1; in nvkm_volt_ctor() [all …]
|
/kernel/linux/linux-5.10/kernel/dma/ |
D | remap.c | 27 vaddr = vmap(pages, PAGE_ALIGN(size) >> PAGE_SHIFT, in dma_common_pages_remap() 51 vaddr = vmap(pages, count, VM_DMA_COHERENT, prot); in dma_common_contiguous_remap()
|
/kernel/linux/linux-5.10/drivers/gpu/drm/tiny/ |
D | cirrus.c | 317 void *vmap; in cirrus_fb_blit_rect() local 325 vmap = drm_gem_shmem_vmap(fb->obj[0]); in cirrus_fb_blit_rect() 326 if (!vmap) in cirrus_fb_blit_rect() 331 vmap, fb, rect); in cirrus_fb_blit_rect() 336 vmap, fb, rect, false); in cirrus_fb_blit_rect() 341 vmap, fb, rect); in cirrus_fb_blit_rect() 346 drm_gem_shmem_vunmap(fb->obj[0], vmap); in cirrus_fb_blit_rect()
|
/kernel/linux/linux-5.10/arch/arm/mm/ |
D | fault-armv.c | 247 p1 = vmap(&page, 1, VM_IOREMAP, prot); in check_writebuffer_bugs() 248 p2 = vmap(&page, 1, VM_IOREMAP, prot); in check_writebuffer_bugs()
|
/kernel/linux/linux-5.10/Documentation/features/vm/huge-vmap/ |
D | arch-support.txt | 2 # Feature name: huge-vmap
|
/kernel/linux/linux-5.10/arch/x86/kernel/ |
D | irq_64.c | 47 va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, VM_MAP, PAGE_KERNEL); in map_irq_stack()
|
/kernel/linux/linux-5.10/arch/hexagon/kernel/ |
D | vdso.c | 28 vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL); in vdso_init()
|
/kernel/linux/linux-5.10/arch/csky/kernel/ |
D | vdso.c | 29 vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL); in init_vdso()
|
/kernel/linux/linux-5.10/drivers/dma-buf/heaps/ |
D | heap-helpers.c | 46 vaddr = vmap(buffer->pages, buffer->pagecount, VM_MAP, PAGE_KERNEL); in dma_heap_map_kernel() 269 .vmap = dma_heap_dma_buf_vmap,
|
/kernel/linux/linux-5.10/drivers/gpu/drm/etnaviv/ |
D | etnaviv_gem.c | 345 etnaviv_obj->vaddr = etnaviv_obj->ops->vmap(etnaviv_obj); in etnaviv_gem_vmap() 361 return vmap(pages, obj->base.size >> PAGE_SHIFT, in etnaviv_gem_vmap_impl() 512 .vmap = etnaviv_gem_vmap_impl, 717 .vmap = etnaviv_gem_vmap_impl,
|
D | etnaviv_gem.h | 67 void *(*vmap)(struct etnaviv_gem_object *); member
|
D | etnaviv_gem_prime.c | 102 .vmap = etnaviv_gem_prime_vmap_impl,
|
/kernel/linux/linux-5.10/drivers/staging/android/ion/ |
D | ion_heap.c | 45 vaddr = vmap(pages, npages, VM_MAP, pgprot); in ion_heap_map_kernel() 85 void *addr = vmap(pages, num, VM_MAP, pgprot); in ion_heap_clear_pages()
|
/kernel/linux/linux-5.10/Documentation/core-api/ |
D | cachetlb.rst | 383 vmap/vmalloc API. Since kernel I/O goes via physical pages, the I/O 385 the only aliases. This isn't true for vmap aliases, so anything in 386 the kernel trying to do I/O to vmap areas must manually manage 387 coherency. It must do this by flushing the vmap range before doing 393 the vmap area. This is to make sure that any data the kernel 394 modified in the vmap range is made visible to the physical 401 the cache for a given virtual address range in the vmap area 405 vmap area.
|
/kernel/linux/linux-5.10/drivers/gpu/drm/virtio/ |
D | virtgpu_prime.c | 55 .vmap = drm_gem_dmabuf_vmap,
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gem/selftests/ |
D | mock_dmabuf.c | 88 .vmap = mock_dmabuf_vmap,
|
/kernel/linux/linux-5.10/drivers/gpu/drm/ |
D | drm_memory.c | 97 addr = vmap(page_map, num_pages, VM_IOREMAP, PAGE_AGP); in agp_remap()
|
/kernel/linux/linux-5.10/include/linux/ |
D | dma-buf.h | 268 void *(*vmap)(struct dma_buf *); member
|
D | vmalloc.h | 118 extern void *vmap(struct page **pages, unsigned int count,
|
/kernel/linux/linux-5.10/drivers/gpu/drm/i915/gt/ |
D | shmem_utils.c | 70 vaddr = vmap(pages, n_pages, VM_MAP_PUT_PAGES, PAGE_KERNEL); in shmem_pin_map()
|
/kernel/linux/linux-5.10/include/drm/ |
D | drm_gem.h | 141 void *(*vmap)(struct drm_gem_object *obj); member
|
/kernel/linux/linux-5.10/sound/core/ |
D | sgbuf.c | 124 dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot); in snd_malloc_sgbuf_pages()
|