1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VMALLOC_H
3 #define _LINUX_VMALLOC_H
4
5 #include <linux/spinlock.h>
6 #include <linux/init.h>
7 #include <linux/list.h>
8 #include <linux/llist.h>
9 #include <asm/page.h> /* pgprot_t */
10 #include <linux/rbtree.h>
11 #include <linux/overflow.h>
12 #include <linux/android_vendor.h>
13
14 #include <asm/vmalloc.h>
15
16 struct vm_area_struct; /* vma defining user mapping in mm_types.h */
17 struct notifier_block; /* in notifier.h */
18
19 /* bits in flags of vmalloc's vm_struct below */
20 #define VM_IOREMAP 0x00000001 /* ioremap() and friends */
21 #define VM_ALLOC 0x00000002 /* vmalloc() */
22 #define VM_MAP 0x00000004 /* vmap()ed pages */
23 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
24 #define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */
25 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
26 #define VM_NO_GUARD 0x00000040 /* don't add guard page */
27 #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
28 #define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
29 #define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */
30
31 /*
32 * VM_KASAN is used slighly differently depending on CONFIG_KASAN_VMALLOC.
33 *
34 * If IS_ENABLED(CONFIG_KASAN_VMALLOC), VM_KASAN is set on a vm_struct after
35 * shadow memory has been mapped. It's used to handle allocation errors so that
36 * we don't try to poision shadow on free if it was never allocated.
37 *
38 * Otherwise, VM_KASAN is set for kasan_module_alloc() allocations and used to
39 * determine which allocations need the module shadow freed.
40 */
41
42 /* bits [20..32] reserved for arch specific ioremap internals */
43
44 /*
45 * Maximum alignment for ioremap() regions.
46 * Can be overriden by arch-specific value.
47 */
48 #ifndef IOREMAP_MAX_ORDER
49 #define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */
50 #endif
51
52 struct vm_struct {
53 struct vm_struct *next;
54 void *addr;
55 unsigned long size;
56 unsigned long flags;
57 struct page **pages;
58 unsigned int nr_pages;
59 phys_addr_t phys_addr;
60 const void *caller;
61 ANDROID_OEM_DATA(1);
62 };
63
64 struct vmap_area {
65 unsigned long va_start;
66 unsigned long va_end;
67
68 struct rb_node rb_node; /* address sorted rbtree */
69 struct list_head list; /* address sorted list */
70
71 /*
72 * The following three variables can be packed, because
73 * a vmap_area object is always one of the three states:
74 * 1) in "free" tree (root is vmap_area_root)
75 * 2) in "busy" tree (root is free_vmap_area_root)
76 * 3) in purge list (head is vmap_purge_list)
77 */
78 union {
79 unsigned long subtree_max_size; /* in "free" tree */
80 struct vm_struct *vm; /* in "busy" tree */
81 struct llist_node purge_list; /* in purge list */
82 };
83 };
84
85 /*
86 * Highlevel APIs for driver use
87 */
88 extern void vm_unmap_ram(const void *mem, unsigned int count);
89 extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
90 extern void vm_unmap_aliases(void);
91
92 #ifdef CONFIG_MMU
93 extern void __init vmalloc_init(void);
94 extern unsigned long vmalloc_nr_pages(void);
95 #else
vmalloc_init(void)96 static inline void vmalloc_init(void)
97 {
98 }
vmalloc_nr_pages(void)99 static inline unsigned long vmalloc_nr_pages(void) { return 0; }
100 #endif
101
102 extern void *vmalloc(unsigned long size);
103 extern void *vzalloc(unsigned long size);
104 extern void *vmalloc_user(unsigned long size);
105 extern void *vmalloc_node(unsigned long size, int node);
106 extern void *vzalloc_node(unsigned long size, int node);
107 extern void *vmalloc_32(unsigned long size);
108 extern void *vmalloc_32_user(unsigned long size);
109 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask);
110 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
111 unsigned long start, unsigned long end, gfp_t gfp_mask,
112 pgprot_t prot, unsigned long vm_flags, int node,
113 const void *caller);
114 void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
115 int node, const void *caller);
116
117 extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags);
118 extern void *vmalloc_array(size_t n, size_t size);
119 extern void *__vcalloc(size_t n, size_t size, gfp_t flags);
120 extern void *vcalloc(size_t n, size_t size);
121
122 extern void vfree(const void *addr);
123 extern void vfree_atomic(const void *addr);
124
125 extern void *vmap(struct page **pages, unsigned int count,
126 unsigned long flags, pgprot_t prot);
127 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot);
128 extern void vunmap(const void *addr);
129
130 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
131 unsigned long uaddr, void *kaddr,
132 unsigned long pgoff, unsigned long size);
133
134 extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
135 unsigned long pgoff);
136
137 /*
138 * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
139 * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings()
140 * needs to be called.
141 */
142 #ifndef ARCH_PAGE_TABLE_SYNC_MASK
143 #define ARCH_PAGE_TABLE_SYNC_MASK 0
144 #endif
145
146 /*
147 * There is no default implementation for arch_sync_kernel_mappings(). It is
148 * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
149 * is 0.
150 */
151 void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
152
153 /*
154 * Lowlevel-APIs (not for driver use!)
155 */
156
get_vm_area_size(const struct vm_struct * area)157 static inline size_t get_vm_area_size(const struct vm_struct *area)
158 {
159 if (!(area->flags & VM_NO_GUARD))
160 /* return actual size without guard page */
161 return area->size - PAGE_SIZE;
162 else
163 return area->size;
164
165 }
166
167 extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
168 extern struct vm_struct *get_vm_area_caller(unsigned long size,
169 unsigned long flags, const void *caller);
170 extern struct vm_struct *__get_vm_area_caller(unsigned long size,
171 unsigned long flags,
172 unsigned long start, unsigned long end,
173 const void *caller);
174 void free_vm_area(struct vm_struct *area);
175 extern struct vm_struct *remove_vm_area(const void *addr);
176 extern struct vm_struct *find_vm_area(const void *addr);
177
178 #ifdef CONFIG_MMU
179 extern int map_kernel_range_noflush(unsigned long start, unsigned long size,
180 pgprot_t prot, struct page **pages);
181 int map_kernel_range(unsigned long start, unsigned long size, pgprot_t prot,
182 struct page **pages);
183 extern void unmap_kernel_range_noflush(unsigned long addr, unsigned long size);
184 extern void unmap_kernel_range(unsigned long addr, unsigned long size);
set_vm_flush_reset_perms(void * addr)185 static inline void set_vm_flush_reset_perms(void *addr)
186 {
187 struct vm_struct *vm = find_vm_area(addr);
188
189 if (vm)
190 vm->flags |= VM_FLUSH_RESET_PERMS;
191 }
192 #else
193 static inline int
map_kernel_range_noflush(unsigned long start,unsigned long size,pgprot_t prot,struct page ** pages)194 map_kernel_range_noflush(unsigned long start, unsigned long size,
195 pgprot_t prot, struct page **pages)
196 {
197 return size >> PAGE_SHIFT;
198 }
199 #define map_kernel_range map_kernel_range_noflush
200 static inline void
unmap_kernel_range_noflush(unsigned long addr,unsigned long size)201 unmap_kernel_range_noflush(unsigned long addr, unsigned long size)
202 {
203 }
204 #define unmap_kernel_range unmap_kernel_range_noflush
set_vm_flush_reset_perms(void * addr)205 static inline void set_vm_flush_reset_perms(void *addr)
206 {
207 }
208 #endif
209
210 /* for /dev/kmem */
211 extern long vread(char *buf, char *addr, unsigned long count);
212 extern long vwrite(char *buf, char *addr, unsigned long count);
213
214 /*
215 * Internals. Dont't use..
216 */
217 extern struct list_head vmap_area_list;
218 extern __init void vm_area_add_early(struct vm_struct *vm);
219 extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
220
221 #ifdef CONFIG_SMP
222 # ifdef CONFIG_MMU
223 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
224 const size_t *sizes, int nr_vms,
225 size_t align);
226
227 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
228 # else
229 static inline struct vm_struct **
pcpu_get_vm_areas(const unsigned long * offsets,const size_t * sizes,int nr_vms,size_t align)230 pcpu_get_vm_areas(const unsigned long *offsets,
231 const size_t *sizes, int nr_vms,
232 size_t align)
233 {
234 return NULL;
235 }
236
237 static inline void
pcpu_free_vm_areas(struct vm_struct ** vms,int nr_vms)238 pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
239 {
240 }
241 # endif
242 #endif
243
244 #ifdef CONFIG_MMU
245 #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
246 #else
247 #define VMALLOC_TOTAL 0UL
248 #endif
249
250 int register_vmap_purge_notifier(struct notifier_block *nb);
251 int unregister_vmap_purge_notifier(struct notifier_block *nb);
252
253 /* Allow disabling lazy TLB flushing */
254 extern bool lazy_vunmap_enable;
255
256 #endif /* _LINUX_VMALLOC_H */
257