• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_VMALLOC_H
3 #define _LINUX_VMALLOC_H
4 
5 #include <linux/spinlock.h>
6 #include <linux/init.h>
7 #include <linux/list.h>
8 #include <linux/llist.h>
9 #include <asm/page.h>		/* pgprot_t */
10 #include <linux/rbtree.h>
11 #include <linux/overflow.h>
12 #include <linux/android_vendor.h>
13 
14 #include <asm/vmalloc.h>
15 
16 struct vm_area_struct;		/* vma defining user mapping in mm_types.h */
17 struct notifier_block;		/* in notifier.h */
18 struct iov_iter;		/* in uio.h */
19 
20 /* bits in flags of vmalloc's vm_struct below */
21 #define VM_IOREMAP		0x00000001	/* ioremap() and friends */
22 #define VM_ALLOC		0x00000002	/* vmalloc() */
23 #define VM_MAP			0x00000004	/* vmap()ed pages */
24 #define VM_USERMAP		0x00000008	/* suitable for remap_vmalloc_range */
25 #define VM_DMA_COHERENT		0x00000010	/* dma_alloc_coherent */
26 #define VM_UNINITIALIZED	0x00000020	/* vm_struct is not fully initialized */
27 #define VM_NO_GUARD		0x00000040      /* ***DANGEROUS*** don't add guard page */
28 #define VM_KASAN		0x00000080      /* has allocated kasan shadow memory */
29 #define VM_FLUSH_RESET_PERMS	0x00000100	/* reset direct map and flush TLB on unmap, can't be freed in atomic context */
30 #define VM_MAP_PUT_PAGES	0x00000200	/* put pages and free array in vfree */
31 #define VM_ALLOW_HUGE_VMAP	0x00000400      /* Allow for huge pages on archs with HAVE_ARCH_HUGE_VMALLOC */
32 
33 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
34 	!defined(CONFIG_KASAN_VMALLOC)
35 #define VM_DEFER_KMEMLEAK	0x00000800	/* defer kmemleak object creation */
36 #else
37 #define VM_DEFER_KMEMLEAK	0
38 #endif
39 
40 /* bits [20..32] reserved for arch specific ioremap internals */
41 
42 /*
43  * Maximum alignment for ioremap() regions.
44  * Can be overridden by arch-specific value.
45  */
46 #ifndef IOREMAP_MAX_ORDER
47 #define IOREMAP_MAX_ORDER	(7 + PAGE_SHIFT)	/* 128 pages */
48 #endif
49 
50 struct vm_struct {
51 	struct vm_struct	*next;
52 	void			*addr;
53 	unsigned long		size;
54 	unsigned long		flags;
55 	struct page		**pages;
56 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
57 	unsigned int		page_order;
58 #endif
59 	unsigned int		nr_pages;
60 	phys_addr_t		phys_addr;
61 	const void		*caller;
62 	ANDROID_OEM_DATA(1);
63 };
64 
65 struct vmap_area {
66 	unsigned long va_start;
67 	unsigned long va_end;
68 
69 	struct rb_node rb_node;         /* address sorted rbtree */
70 	struct list_head list;          /* address sorted list */
71 
72 	/*
73 	 * The following two variables can be packed, because
74 	 * a vmap_area object can be either:
75 	 *    1) in "free" tree (root is free_vmap_area_root)
76 	 *    2) or "busy" tree (root is vmap_area_root)
77 	 */
78 	union {
79 		unsigned long subtree_max_size; /* in "free" tree */
80 		struct vm_struct *vm;           /* in "busy" tree */
81 	};
82 	unsigned long flags; /* mark type of vm_map_ram area */
83 };
84 
85 /* archs that select HAVE_ARCH_HUGE_VMAP should override one or more of these */
86 #ifndef arch_vmap_p4d_supported
arch_vmap_p4d_supported(pgprot_t prot)87 static inline bool arch_vmap_p4d_supported(pgprot_t prot)
88 {
89 	return false;
90 }
91 #endif
92 
93 #ifndef arch_vmap_pud_supported
arch_vmap_pud_supported(pgprot_t prot)94 static inline bool arch_vmap_pud_supported(pgprot_t prot)
95 {
96 	return false;
97 }
98 #endif
99 
100 #ifndef arch_vmap_pmd_supported
arch_vmap_pmd_supported(pgprot_t prot)101 static inline bool arch_vmap_pmd_supported(pgprot_t prot)
102 {
103 	return false;
104 }
105 #endif
106 
107 #ifndef arch_vmap_pte_range_map_size
arch_vmap_pte_range_map_size(unsigned long addr,unsigned long end,u64 pfn,unsigned int max_page_shift)108 static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
109 							 u64 pfn, unsigned int max_page_shift)
110 {
111 	return PAGE_SIZE;
112 }
113 #endif
114 
115 #ifndef arch_vmap_pte_supported_shift
arch_vmap_pte_supported_shift(unsigned long size)116 static inline int arch_vmap_pte_supported_shift(unsigned long size)
117 {
118 	return PAGE_SHIFT;
119 }
120 #endif
121 
122 #ifndef arch_vmap_pgprot_tagged
arch_vmap_pgprot_tagged(pgprot_t prot)123 static inline pgprot_t arch_vmap_pgprot_tagged(pgprot_t prot)
124 {
125 	return prot;
126 }
127 #endif
128 
129 /*
130  *	Highlevel APIs for driver use
131  */
132 extern void vm_unmap_ram(const void *mem, unsigned int count);
133 extern void *vm_map_ram(struct page **pages, unsigned int count, int node);
134 extern void vm_unmap_aliases(void);
135 
136 #ifdef CONFIG_MMU
137 extern unsigned long vmalloc_nr_pages(void);
138 #else
vmalloc_nr_pages(void)139 static inline unsigned long vmalloc_nr_pages(void) { return 0; }
140 #endif
141 
142 extern void *vmalloc(unsigned long size) __alloc_size(1);
143 extern void *vzalloc(unsigned long size) __alloc_size(1);
144 extern void *vmalloc_user(unsigned long size) __alloc_size(1);
145 extern void *vmalloc_node(unsigned long size, int node) __alloc_size(1);
146 extern void *vzalloc_node(unsigned long size, int node) __alloc_size(1);
147 extern void *vmalloc_32(unsigned long size) __alloc_size(1);
148 extern void *vmalloc_32_user(unsigned long size) __alloc_size(1);
149 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
150 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
151 			unsigned long start, unsigned long end, gfp_t gfp_mask,
152 			pgprot_t prot, unsigned long vm_flags, int node,
153 			const void *caller) __alloc_size(1);
154 void *__vmalloc_node(unsigned long size, unsigned long align, gfp_t gfp_mask,
155 		int node, const void *caller) __alloc_size(1);
156 void *vmalloc_huge(unsigned long size, gfp_t gfp_mask) __alloc_size(1);
157 
158 extern void *__vmalloc_array(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
159 extern void *vmalloc_array(size_t n, size_t size) __alloc_size(1, 2);
160 extern void *__vcalloc(size_t n, size_t size, gfp_t flags) __alloc_size(1, 2);
161 extern void *vcalloc(size_t n, size_t size) __alloc_size(1, 2);
162 
163 extern void vfree(const void *addr);
164 extern void vfree_atomic(const void *addr);
165 
166 extern void *vmap(struct page **pages, unsigned int count,
167 			unsigned long flags, pgprot_t prot);
168 void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot);
169 extern void vunmap(const void *addr);
170 
171 extern int remap_vmalloc_range_partial(struct vm_area_struct *vma,
172 				       unsigned long uaddr, void *kaddr,
173 				       unsigned long pgoff, unsigned long size);
174 
175 extern int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
176 							unsigned long pgoff);
177 
178 /*
179  * Architectures can set this mask to a combination of PGTBL_P?D_MODIFIED values
180  * and let generic vmalloc and ioremap code know when arch_sync_kernel_mappings()
181  * needs to be called.
182  */
183 #ifndef ARCH_PAGE_TABLE_SYNC_MASK
184 #define ARCH_PAGE_TABLE_SYNC_MASK 0
185 #endif
186 
187 /*
188  * There is no default implementation for arch_sync_kernel_mappings(). It is
189  * relied upon the compiler to optimize calls out if ARCH_PAGE_TABLE_SYNC_MASK
190  * is 0.
191  */
192 void arch_sync_kernel_mappings(unsigned long start, unsigned long end);
193 
194 /*
195  *	Lowlevel-APIs (not for driver use!)
196  */
197 
get_vm_area_size(const struct vm_struct * area)198 static inline size_t get_vm_area_size(const struct vm_struct *area)
199 {
200 	if (!(area->flags & VM_NO_GUARD))
201 		/* return actual size without guard page */
202 		return area->size - PAGE_SIZE;
203 	else
204 		return area->size;
205 
206 }
207 
208 extern struct vm_struct *get_vm_area(unsigned long size, unsigned long flags);
209 extern struct vm_struct *get_vm_area_caller(unsigned long size,
210 					unsigned long flags, const void *caller);
211 extern struct vm_struct *__get_vm_area_caller(unsigned long size,
212 					unsigned long flags,
213 					unsigned long start, unsigned long end,
214 					const void *caller);
215 void free_vm_area(struct vm_struct *area);
216 extern struct vm_struct *remove_vm_area(const void *addr);
217 extern struct vm_struct *find_vm_area(const void *addr);
218 struct vmap_area *find_vmap_area(unsigned long addr);
219 
is_vm_area_hugepages(const void * addr)220 static inline bool is_vm_area_hugepages(const void *addr)
221 {
222 	/*
223 	 * This may not 100% tell if the area is mapped with > PAGE_SIZE
224 	 * page table entries, if for some reason the architecture indicates
225 	 * larger sizes are available but decides not to use them, nothing
226 	 * prevents that. This only indicates the size of the physical page
227 	 * allocated in the vmalloc layer.
228 	 */
229 #ifdef CONFIG_HAVE_ARCH_HUGE_VMALLOC
230 	return find_vm_area(addr)->page_order > 0;
231 #else
232 	return false;
233 #endif
234 }
235 
236 #ifdef CONFIG_MMU
237 void vunmap_range(unsigned long addr, unsigned long end);
set_vm_flush_reset_perms(void * addr)238 static inline void set_vm_flush_reset_perms(void *addr)
239 {
240 	struct vm_struct *vm = find_vm_area(addr);
241 
242 	if (vm)
243 		vm->flags |= VM_FLUSH_RESET_PERMS;
244 }
245 
246 #else
set_vm_flush_reset_perms(void * addr)247 static inline void set_vm_flush_reset_perms(void *addr)
248 {
249 }
250 #endif
251 
252 /* for /proc/kcore */
253 extern long vread_iter(struct iov_iter *iter, const char *addr, size_t count);
254 
255 /*
256  *	Internals.  Don't use..
257  */
258 extern struct list_head vmap_area_list;
259 extern __init void vm_area_add_early(struct vm_struct *vm);
260 extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
261 
262 #ifdef CONFIG_SMP
263 # ifdef CONFIG_MMU
264 struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
265 				     const size_t *sizes, int nr_vms,
266 				     size_t align);
267 
268 void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms);
269 # else
270 static inline struct vm_struct **
pcpu_get_vm_areas(const unsigned long * offsets,const size_t * sizes,int nr_vms,size_t align)271 pcpu_get_vm_areas(const unsigned long *offsets,
272 		const size_t *sizes, int nr_vms,
273 		size_t align)
274 {
275 	return NULL;
276 }
277 
278 static inline void
pcpu_free_vm_areas(struct vm_struct ** vms,int nr_vms)279 pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms)
280 {
281 }
282 # endif
283 #endif
284 
285 #ifdef CONFIG_MMU
286 #define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
287 #else
288 #define VMALLOC_TOTAL 0UL
289 #endif
290 
291 int register_vmap_purge_notifier(struct notifier_block *nb);
292 int unregister_vmap_purge_notifier(struct notifier_block *nb);
293 
294 #if defined(CONFIG_MMU) && defined(CONFIG_PRINTK)
295 bool vmalloc_dump_obj(void *object);
296 #else
vmalloc_dump_obj(void * object)297 static inline bool vmalloc_dump_obj(void *object) { return false; }
298 #endif
299 
300 #endif /* _LINUX_VMALLOC_H */
301