/mm/ |
D | highmem.c | 154 struct page *__kmap_to_page(void *vaddr) in __kmap_to_page() argument 156 unsigned long base = (unsigned long) vaddr & PAGE_MASK; in __kmap_to_page() 158 unsigned long addr = (unsigned long)vaddr; in __kmap_to_page() 181 return virt_to_page(vaddr); in __kmap_to_page() 236 unsigned long vaddr; in map_new_virtual() local 278 vaddr = PKMAP_ADDR(last_pkmap_nr); in map_new_virtual() 279 set_pte_at(&init_mm, vaddr, in map_new_virtual() 283 set_page_address(page, (void *)vaddr); in map_new_virtual() 285 return vaddr; in map_new_virtual() 298 unsigned long vaddr; in kmap_high() local [all …]
|
D | debug_vm_pgtable.c | 60 unsigned long vaddr; member 130 set_pte_at(args->mm, args->vaddr, args->ptep, pte); in pte_advanced_tests() 132 ptep_set_wrprotect(args->mm, args->vaddr, args->ptep); in pte_advanced_tests() 135 ptep_get_and_clear(args->mm, args->vaddr, args->ptep); in pte_advanced_tests() 142 set_pte_at(args->mm, args->vaddr, args->ptep, pte); in pte_advanced_tests() 146 ptep_set_access_flags(args->vma, args->vaddr, args->ptep, pte, 1); in pte_advanced_tests() 149 ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1); in pte_advanced_tests() 155 set_pte_at(args->mm, args->vaddr, args->ptep, pte); in pte_advanced_tests() 157 ptep_test_and_clear_young(args->vma, args->vaddr, args->ptep); in pte_advanced_tests() 161 ptep_get_and_clear_full(args->mm, args->vaddr, args->ptep, 1); in pte_advanced_tests() [all …]
|
D | ioremap.c | 18 unsigned long offset, vaddr; in generic_ioremap_prot() local 40 vaddr = (unsigned long)area->addr; in generic_ioremap_prot() 43 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) { in generic_ioremap_prot() 48 return (void __iomem *)(vaddr + offset); in generic_ioremap_prot() 62 void *vaddr = (void *)((unsigned long)addr & PAGE_MASK); in generic_iounmap() local 64 if (is_ioremap_addr(vaddr)) in generic_iounmap() 65 vunmap(vaddr); in generic_iounmap()
|
D | dmapool.c | 65 void *vaddr; member 133 static bool pool_block_err(struct dma_pool *pool, void *vaddr, dma_addr_t dma) in pool_block_err() argument 141 __func__, pool->name, vaddr, &dma); in pool_block_err() 146 if (block != vaddr) { in pool_block_err() 155 memset(vaddr, POOL_POISON_FREED, pool->size); in pool_block_err() 161 memset(page->vaddr, POOL_POISON_FREED, pool->allocation); in pool_init_page() 169 static bool pool_block_err(struct dma_pool *pool, void *vaddr, dma_addr_t dma) in pool_block_err() argument 172 memset(vaddr, 0, pool->size); in pool_block_err() 313 block = page->vaddr + offset; in pool_initialise_page() 342 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, in pool_alloc_page() [all …]
|
D | zsmalloc.c | 910 void *vaddr; in init_zspage() local 914 vaddr = kmap_atomic(page); in init_zspage() 915 link = (struct link_free *)vaddr + off / sizeof(*link); in init_zspage() 937 kunmap_atomic(vaddr); in init_zspage() 1318 void *vaddr; in obj_malloc() local 1332 vaddr = kmap_atomic(m_page); in obj_malloc() 1333 link = (struct link_free *)vaddr + m_offset / sizeof(*link); in obj_malloc() 1342 kunmap_atomic(vaddr); in obj_malloc() 1426 void *vaddr; in obj_free() local 1432 vaddr = kmap_atomic(f_page); in obj_free() [all …]
|
D | hugetlb_vmemmap.c | 494 unsigned long vaddr = (unsigned long)head; in vmemmap_should_optimize() local 501 pmdp = pmd_off_k(vaddr); in vmemmap_should_optimize() 511 vmemmap_page = pmd_page(pmd) + pte_index(vaddr); in vmemmap_should_optimize() 513 vmemmap_page = pte_page(*pte_offset_kernel(pmdp, vaddr)); in vmemmap_should_optimize()
|
D | vmalloc.c | 2049 void *vaddr; in new_vmap_block() local 2067 vaddr = vmap_block_vaddr(va->va_start, 0); in new_vmap_block() 2101 return vaddr; in new_vmap_block() 2192 void *vaddr = NULL; in vb_alloc() local 2222 vaddr = vmap_block_vaddr(vb->va->va_start, pages_off); in vb_alloc() 2238 if (!vaddr) in vb_alloc() 2239 vaddr = new_vmap_block(order, gfp_mask); in vb_alloc() 2241 return vaddr; in vb_alloc() 3772 char *vaddr; in vread_iter() local 3815 vaddr = (char *) va->va_start; in vread_iter() [all …]
|
/mm/kmsan/ |
D | kmsan.h | 192 struct page *kmsan_vmalloc_to_page_or_null(void *vaddr); 201 static inline bool kmsan_internal_is_module_addr(void *vaddr) in kmsan_internal_is_module_addr() argument 203 return ((u64)vaddr >= MODULES_VADDR) && ((u64)vaddr < MODULES_END); in kmsan_internal_is_module_addr()
|
D | core.c | 305 struct page *kmsan_vmalloc_to_page_or_null(void *vaddr) in kmsan_vmalloc_to_page_or_null() argument 309 if (!kmsan_internal_is_vmalloc_addr(vaddr) && in kmsan_vmalloc_to_page_or_null() 310 !kmsan_internal_is_module_addr(vaddr)) in kmsan_vmalloc_to_page_or_null() 312 page = vmalloc_to_page(vaddr); in kmsan_vmalloc_to_page_or_null()
|
D | shadow.c | 73 static struct page *virt_to_page_or_null(void *vaddr) in virt_to_page_or_null() argument 75 if (kmsan_virt_addr_valid(vaddr)) in virt_to_page_or_null() 76 return virt_to_page(vaddr); in virt_to_page_or_null()
|
/mm/damon/ |
D | Makefile | 4 obj-$(CONFIG_DAMON_VADDR) += ops-common.o vaddr.o
|