1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VMALLOC_H
3 #define _LINUX_VMALLOC_H
4
5 #include <linux/alloc_tag.h>
6 #include <linux/sched.h>
7 #include <linux/spinlock.h>
8 #include <linux/init.h>
9 #include <linux/list.h>
10 #include <linux/llist.h>
11 #include <asm/page.h> /* pgprot_t */
12 #include <linux/rbtree.h>
13 #include <linux/overflow.h>
14 #include <linux/android_vendor.h>
15
16 #include <asm/vmalloc.h>
17
18 struct vm_area_struct; /* vma defining user mapping in mm_types.h */
19 struct notifier_block; /* in notifier.h */
20 struct iov_iter; /* in uio.h */
21
22 /* bits in flags of vmalloc's vm_struct below */
23 #define VM_IOREMAP 0x00000001 /* ioremap() and friends */
24 #define VM_ALLOC 0x00000002 /* vmalloc() */
25 #define VM_MAP 0x00000004 /* vmap()ed pages */
26 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
27 #define VM_DMA_COHERENT 0x00000010 /* dma_alloc_coherent */
28 #define VM_UNINITIALIZED 0x00000020 /* vm_struct is not fully initialized */
29 #define VM_NO_GUARD 0x00000040 /* ***DANGEROUS*** don't add guard page */
30 #define VM_KASAN 0x00000080 /* has allocated kasan shadow memory */
31 #define VM_FLUSH_RESET_PERMS 0x00000100 /* reset direct map and flush TLB on unmap, can't be freed in atomic context */
32 #define VM_MAP_PUT_PAGES 0x00000200 /* put pages and free array in vfree */
33 #define VM_ALLOW_HUGE_VMAP 0x00000400 /* Allow for huge pages on archs with HAVE_ARCH_HUGE_VMALLOC */
34
35 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
36 !defined(CONFIG_KASAN_VMALLOC)
37 #define VM_DEFER_KMEMLEAK 0x00000800 /* defer kmemleak object creation */
38 #else
39 #define VM_DEFER_KMEMLEAK 0
40 #endif
41 #define VM_SPARSE 0x00001000 /* sparse vm_area. not all pages are present. */
42
43 /* bits [20..32] reserved for arch specific ioremap internals */
44
45 /*
46 * Maximum alignment for ioremap() regions.
47 * Can be overridden by arch-specific value.
48 */
49 #ifndef IOREMAP_MAX_ORDER
50 #define IOREMAP_MAX_ORDER (7 + PAGE_SHIFT) /* 128 pages */
51 #endif
52
53 struct vm_struct {
54 struct vm_struct *next;
55 void *addr;
56 unsigned long size;
57 unsigned long flags;
58 struct page **pages;
59 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
60 unsigned int page_order;
61 #endif
62 unsigned int nr_pages;
63 phys_addr_t phys_addr;
64 const void *caller;
65 ANDROID_OEM_DATA(1);
66 };
67
68 struct vmap_area {
69 unsigned long va_start;
70 unsigned long va_end;
71
72 struct rb_node rb_node; /* address sorted rbtree */
73 struct list_head list; /* address sorted list */
74
75 /*
76 * The following two variables can be packed, because
77 * a vmap_area object can be either:
78 * 1) in "free" tree (root is free_vmap_area_root)
79 * 2) or "busy" tree (root is vmap_area_root)
80 */
81 union {
82 unsigned long subtree_max_size; /* in "free" tree */
83 struct vm_struct *vm; /* in "busy" tree */
84 };
85 unsigned long flags; /* mark type of vm_map_ram area */
86 };
87
88 /* archs that select HAVE_ARCH_HUGE_VMAP should override one or more of these */
89 #ifndef arch_vmap_p4d_supported
arch_vmap_p4d_supported(pgprot_t prot)90 static inline bool arch_vmap_p4d_supported(pgprot_t prot)
91 {
92 return false;
93 }
94 #endif
95
96 #ifndef arch_vmap_pud_supported
arch_vmap_pud_supported(pgprot_t prot)97 static inline bool arch_vmap_pud_supported(pgprot_t prot)
98 {
99 return false;
100 }
101 #endif
102
103 #ifndef arch_vmap_pmd_supported
arch_vmap_pmd_supported(pgprot_t prot)104 static inline bool arch_vmap_pmd_supported(pgprot_t prot)
105 {
106 return false;
107 }
108 #endif
109
110 #ifndef arch_vmap_pte_range_map_size
arch_vmap_pte_range_map_size(unsigned long addr,unsigned long end,u64 pfn,unsigned int max_page_shift)111 static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
112 u64 pfn, unsigned int max_page_shift)
113 {
114 return PAGE_SIZE;
115 }
116 #endif
117
118 #ifndef arch_vmap_pte_supported_shift
arch_vmap_pte_supported_shift(unsigned long size)119 static inline int arch_vmap_pte_supported_shift(unsigned long size)
120 {
121 return PAGE_SHIFT;
122 }
123 #endif
124
125 #ifndef arch_vmap_pgprot_tagged
arch_vmap_pgprot_tagged(pgprot_t prot)126 static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot)
127 {
128 return prot;
129 }
130 #endif
131
132 /*
133 * Highlevel APIs for driver use
134 */
135 extern void vm_unmap_ram(const void *mem, unsigned int count);
136 extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
137 extern void vm_unmap_aliases(void);
138
139 #ifdef CONFIG_MMU
140 extern unsigned long vmalloc_nr_pages(void);
141 #else
vmalloc_nr_pages(void)142 static inline unsigned long vmalloc_nr_pages(void) { return 0; }
143 #endif
144
145 extern void *vmalloc_noprof(unsigned long size) __alloc_size(1);
146 #define vmalloc(...) alloc_hooks(vmalloc_noprof(__VA_ARGS__))
147
148 extern void *vzalloc_noprof(unsigned long size) __alloc_size(1);
149 #define vzalloc(...) alloc_hooks(vzalloc_noprof(__VA_ARGS__))
150
151 extern void *vmalloc_user_noprof(unsigned long size) __alloc_size(1);
152 #define vmalloc_user(...) alloc_hooks(vmalloc_user_noprof(__VA_ARGS__))
153
154 extern void *vmalloc_node_noprof(unsigned long size, int node) __alloc_size(1);
155 #define vmalloc_node(...) alloc_hooks(vmalloc_node_noprof(__VA_ARGS__))
156
157 extern void *vzalloc_node_noprof(unsigned long size, int node) __alloc_size(1);
158 #define vzalloc_node(...) alloc_hooks(vzalloc_node_noprof(__VA_ARGS__))
159
160 extern void *vmalloc_32_noprof(unsigned long size) __alloc_size(1);
161 #define vmalloc_32(...) alloc_hooks(vmalloc_32_noprof(__VA_ARGS__))
162
163 extern void *vmalloc_32_user_noprof(unsigned long size) __alloc_size(1);
164 #define vmalloc_32_user(...) alloc_hooks(vmalloc_32_user_noprof(__VA_ARGS__))
165
166 extern void *__vmalloc_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
167 #define __vmalloc(...) alloc_hooks(__vmalloc_noprof(__VA_ARGS__))
168
169 extern void *__vmalloc_node_range_noprof(unsigned long size, unsigned long align,
170 unsigned long start, unsigned long end, gfp_t gfp_mask,
171 pgprot_t prot, unsigned long vm_flags, int node,
172 const void *caller) __alloc_size(1);
173 #define __vmalloc_node_range(...) alloc_hooks(__vmalloc_node_range_noprof(__VA_ARGS__))
174
175 void *__vmalloc_node_noprof(unsigned long size, unsigned long align, gfp_t gfp_mask,
176 int node, const void *caller) __alloc_size(1);
177 #define __vmalloc_node(...) alloc_hooks(__vmalloc_node_noprof(__VA_ARGS__))
178
179 void *vmalloc_huge_noprof(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
180 #define vmalloc_huge(...) alloc_hooks(vmalloc_huge_noprof(__VA_ARGS__))
181
182 extern void *__vmalloc_array_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
183 #define __vmalloc_array(...) alloc_hooks(__vmalloc_array_noprof(__VA_ARGS__))
184
185 extern void *vmalloc_array_noprof(size_t n, size_t size) __alloc_size(1, 2);
186 #define vmalloc_array(...) alloc_hooks(vmalloc_array_noprof(__VA_ARGS__))
187
188 extern void *__vcalloc_noprof(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
189 #define __vcalloc(...) alloc_hooks(__vcalloc_noprof(__VA_ARGS__))
190
191 extern void *vcalloc_noprof(size_t n, size_t size) __alloc_size(1, 2);
192 #define vcalloc(...) alloc_hooks(vcalloc_noprof(__VA_ARGS__))
193
194 void * __must_check vrealloc_noprof(const void *p, size_t size, gfp_t flags)
195 __realloc_size(2);
196 #define vrealloc(...) alloc_hooks(vrealloc_noprof(__VA_ARGS__))
197
198 extern void vfree(const void *addr);
199 extern void vfree_atomic(const void *addr);
200
201 extern void *vmap(struct page **pages, unsigned int count,
202 unsigned long flags, pgprot_t prot);
203 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot);
204 extern void vunmap(const void *addr);
205
206 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
207 unsigned long uaddr, void *kaddr,
208 unsigned long pgoff, unsigned long size);
209
210 extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
211 unsigned long pgoff);
212
213 int vmap_pages_range(unsigned long addr, unsigned long end, pgprot_t prot,
214 struct page **pages, unsigned int page_shift);
215
216 /*
217 * Lowlevel-APIs (not for driver use!)
218 */
219
get_vm_area_size(const struct vm_struct * area)220 static inline size_t get_vm_area_size(const struct vm_struct *area)
221 {
222 if (!(area->flags & VM_NO_GUARD))
223 /* return actual size without guard page */
224 return area->size - PAGE_SIZE;
225 else
226 return area->size;
227
228 }
229
230 extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
231 extern struct vm_struct *get_vm_area_caller(unsigned long size,
232 unsigned long flags, const void *caller);
233 extern struct vm_struct *__get_vm_area_caller(unsigned long size,
234 unsigned long flags,
235 unsigned long start, unsigned long end,
236 const void *caller);
237 void free_vm_area(struct vm_struct *area);
238 extern struct vm_struct *remove_vm_area(const void *addr);
239 extern struct vm_struct *find_vm_area(const void *addr);
240 struct vmap_area *find_vmap_area(unsigned long addr);
241
is_vm_area_hugepages(const void * addr)242 static inline bool is_vm_area_hugepages(const void *addr)
243 {
244 /*
245 * This may not 100% tell if the area is mapped with > PAGE_SIZE
246 * page table entries, if for some reason the architecture indicates
247 * larger sizes are available but decides not to use them, nothing
248 * prevents that. This only indicates the size of the physical page
249 * allocated in the vmalloc layer.
250 */
251 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
252 return find_vm_area(addr)->page_order > 0;
253 #else
254 return false;
255 #endif
256 }
257
258 #ifdef CONFIG_MMU
259 int vm_area_map_pages(struct vm_struct *area, unsigned long start,
260 unsigned long end, struct page **pages);
261 void vm_area_unmap_pages(struct vm_struct *area, unsigned long start,
262 unsigned long end);
263 void vunmap_range(unsigned long addr, unsigned long end);
set_vm_flush_reset_perms(void * addr)264 static inline void set_vm_flush_reset_perms(void *addr)
265 {
266 struct vm_struct *vm = find_vm_area(addr);
267
268 if (vm)
269 vm->flags |= VM_FLUSH_RESET_PERMS;
270 }
271
272 #else
set_vm_flush_reset_perms(void * addr)273 static inline void set_vm_flush_reset_perms(void *addr)
274 {
275 }
276 #endif
277
278 /* for /proc/kcore */
279 extern long vread_iter(struct iov_iter *iter, const char *addr, size_t count);
280
281 /*
282 * Internals. Don't use..
283 */
284 extern __init void vm_area_add_early(struct vm_struct *vm);
285 extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
286
287 #ifdef CONFIG_SMP
288 # ifdef CONFIG_MMU
289 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
290 const size_t *sizes, int nr_vms,
291 size_t align);
292
293 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
294 # else
295 static inline struct vm_struct **
pcpu_get_vm_areas(const unsigned long * offsets,const size_t * sizes,int nr_vms,size_t align)296 pcpu_get_vm_areas(const unsigned long *offsets,
297 const size_t *sizes, int nr_vms,
298 size_t align)
299 {
300 return NULL;
301 }
302
303 static inline void
pcpu_free_vm_areas(struct vm_struct ** vms,int nr_vms)304 pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
305 {
306 }
307 # endif
308 #endif
309
310 #ifdef CONFIG_MMU
311 #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
312 #else
313 #define VMALLOC_TOTAL 0UL
314 #endif
315
316 int register_vmap_purge_notifier(struct notifier_block *nb);
317 int unregister_vmap_purge_notifier(struct notifier_block *nb);
318
319 #if defined(CONFIG_MMU) && defined(CONFIG_PRINTK)
320 bool vmalloc_dump_obj(void *object);
321 #else
vmalloc_dump_obj(void * object)322 static inline bool vmalloc_dump_obj(void *object) { return false; }
323 #endif
324
325 #endif /* _LINUX_VMALLOC_H */
326