• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PAGE_SIZE_MIGRATION_H
3 #define _LINUX_PAGE_SIZE_MIGRATION_H
4 
5 /*
6  * Page Size Migration
7  *
8  * Copyright (c) 2024, Google LLC.
9  * Author: Kalesh Singh <kaleshsingh@goole.com>
10  *
11  * This file contains the APIs for mitigations to ensure
12  * app compatibility during the transition from 4kB to 16kB
13  * page size in Android.
14  */
15 
16 #include <linux/mm.h>
17 #include <linux/seq_file.h>
18 #include <linux/sizes.h>
19 
20 /*
21  * vm_flags representation of VMA padding pages.
22  *
23  * This allows the kernel to identify the portion of an ELF LOAD segment VMA
24  * that is padding.
25  *
26  * 4 high bits of vm_flags [63,60] are used to represent ELF segment padding
27  * up to 60kB, which is sufficient for ELFs of both 16kB and 64kB segment
28  * alignment (p_align).
29  *
30  * The representation is illustrated below.
31  *
32  *                    63        62        61        60
33  *                _________ _________ _________ _________
34  *               |  Bit 3  |  Bit 2  |  Bit 1  |  Bit 0  |
35  *               | of  4kB | of  4kB | of  4kB | of  4kB |
36  *               |  chunks |  chunks |  chunks |  chunks |
37  *               |_________|_________|_________|_________|
38  */
39 
40 #define VM_PAD_WIDTH		4
41 #define VM_PAD_SHIFT		(BITS_PER_LONG - VM_PAD_WIDTH)
42 #define VM_TOTAL_PAD_PAGES	((1ULL << VM_PAD_WIDTH) - 1)
43 #define VM_PAD_MASK		(VM_TOTAL_PAD_PAGES << VM_PAD_SHIFT)
44 #define VMA_PAD_START(vma)	(vma->vm_end - (vma_pad_pages(vma) << PAGE_SHIFT))
45 
46 typedef void (*show_pad_vma_fn)(struct seq_file *m, struct vm_area_struct *vma);
47 
48 #if PAGE_SIZE == SZ_4K && defined(CONFIG_64BIT)
49 extern void vma_set_pad_pages(struct vm_area_struct *vma,
50 			      unsigned long nr_pages);
51 
52 extern unsigned long vma_pad_pages(struct vm_area_struct *vma);
53 
54 extern void madvise_vma_pad_pages(struct vm_area_struct *vma,
55 				  unsigned long start, unsigned long end);
56 
57 extern struct vm_area_struct *get_pad_vma(struct vm_area_struct *vma);
58 
59 extern struct vm_area_struct *get_data_vma(struct vm_area_struct *vma);
60 
61 extern void show_map_pad_vma(struct vm_area_struct *vma,
62 			     struct vm_area_struct *pad,
63 			     struct seq_file *m, show_pad_vma_fn func);
64 
65 extern void split_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *new,
66 			  unsigned long addr, int new_below);
67 #else /* PAGE_SIZE != SZ_4K || !defined(CONFIG_64BIT) */
vma_set_pad_pages(struct vm_area_struct * vma,unsigned long nr_pages)68 static inline void vma_set_pad_pages(struct vm_area_struct *vma,
69 				     unsigned long nr_pages)
70 {
71 }
72 
vma_pad_pages(struct vm_area_struct * vma)73 static inline unsigned long vma_pad_pages(struct vm_area_struct *vma)
74 {
75 	return 0;
76 }
77 
madvise_vma_pad_pages(struct vm_area_struct * vma,unsigned long start,unsigned long end)78 static inline void madvise_vma_pad_pages(struct vm_area_struct *vma,
79 					 unsigned long start, unsigned long end)
80 {
81 }
82 
get_pad_vma(struct vm_area_struct * vma)83 static inline struct vm_area_struct *get_pad_vma(struct vm_area_struct *vma)
84 {
85 	return NULL;
86 }
87 
get_data_vma(struct vm_area_struct * vma)88 static inline struct vm_area_struct *get_data_vma(struct vm_area_struct *vma)
89 {
90 	return vma;
91 }
92 
show_map_pad_vma(struct vm_area_struct * vma,struct vm_area_struct * pad,struct seq_file * m,show_pad_vma_fn func)93 static inline void show_map_pad_vma(struct vm_area_struct *vma,
94 				    struct vm_area_struct *pad,
95 				    struct seq_file *m, show_pad_vma_fn func)
96 {
97 }
98 
split_pad_vma(struct vm_area_struct * vma,struct vm_area_struct * new,unsigned long addr,int new_below)99 static inline void split_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *new,
100 				 unsigned long addr, int new_below)
101 {
102 }
103 #endif /* PAGE_SIZE == SZ_4K && defined(CONFIG_64BIT) */
104 
vma_data_pages(struct vm_area_struct * vma)105 static inline unsigned long vma_data_pages(struct vm_area_struct *vma)
106 {
107 	return vma_pages(vma) - vma_pad_pages(vma);
108 }
109 
110 /*
111  * Sets the correct padding bits / flags for a VMA split.
112  */
vma_pad_fixup_flags(struct vm_area_struct * vma,unsigned long newflags)113 static inline unsigned long vma_pad_fixup_flags(struct vm_area_struct *vma,
114 						unsigned long newflags)
115 {
116 	if (newflags & VM_PAD_MASK)
117 		return (newflags & ~VM_PAD_MASK) | (vma->vm_flags & VM_PAD_MASK);
118 	else
119 		return newflags;
120 }
121 
122 /*
123  * Merging of padding VMAs is uncommon, as padding is only allowed
124  * from the linker context.
125  *
126  * To simplify the semantics, adjacent VMAs with padding are not
127  * allowed to merge.
128  */
is_mergable_pad_vma(struct vm_area_struct * vma,unsigned long vm_flags)129 static inline bool is_mergable_pad_vma(struct vm_area_struct *vma,
130 				       unsigned long vm_flags)
131 {
132 	/* Padding VMAs cannot be merged with other padding or real VMAs */
133 	return !((vma->vm_flags | vm_flags) & VM_PAD_MASK);
134 }
135 #endif /* _LINUX_PAGE_SIZE_MIGRATION_H */
136