1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _LINUX_PAGE_SIZE_MIGRATION_H
3 #define _LINUX_PAGE_SIZE_MIGRATION_H
4
5 /*
6 * Page Size Migration
7 *
8 * Copyright (c) 2024, Google LLC.
9 * Author: Kalesh Singh <kaleshsingh@goole.com>
10 *
11 * This file contains the APIs for mitigations to ensure
12 * app compatibility during the transition from 4kB to 16kB
13 * page size in Android.
14 */
15
16 #include <linux/mm.h>
17 #include <linux/seq_file.h>
18 #include <linux/sizes.h>
19
20 /*
21 * vm_flags representation of VMA padding pages.
22 *
23 * This allows the kernel to identify the portion of an ELF LOAD segment VMA
24 * that is padding.
25 *
26 * 4 high bits of vm_flags [62,59] are used to represent ELF segment padding
27 * up to 60kB, which is sufficient for ELFs of both 16kB and 64kB segment
28 * alignment (p_align).
29 *
30 * The representation is illustrated below.
31 *
32 * 62 61 60 59
33 * _________ _________ _________ _________
34 * | Bit 3 | Bit 2 | Bit 1 | Bit 0 |
35 * | of 4kB | of 4kB | of 4kB | of 4kB |
36 * | chunks | chunks | chunks | chunks |
37 * |_________|_________|_________|_________|
38 *
39 * NOTE: Bit 63 is already used by mseal()
40 */
41
42 #define VM_PAD_WIDTH 4
43 #define VM_PAD_SHIFT (BITS_PER_LONG - VM_PAD_WIDTH - 1)
44 #define VM_TOTAL_PAD_PAGES ((1ULL << VM_PAD_WIDTH) - 1)
45 #define VM_PAD_MASK (VM_TOTAL_PAD_PAGES << VM_PAD_SHIFT)
46 #define VMA_PAD_START(vma) (vma->vm_end - (vma_pad_pages(vma) << PAGE_SHIFT))
47
48 #if PAGE_SIZE == SZ_4K && defined(CONFIG_64BIT)
49 extern void vma_set_pad_pages(struct vm_area_struct *vma,
50 unsigned long nr_pages);
51
52 extern unsigned long vma_pad_pages(struct vm_area_struct *vma);
53
54 extern void madvise_vma_pad_pages(struct vm_area_struct *vma,
55 unsigned long start, unsigned long end);
56
57 extern struct vm_area_struct *get_pad_vma(struct vm_area_struct *vma);
58
59 extern struct vm_area_struct *get_data_vma(struct vm_area_struct *vma);
60
61 extern void show_map_pad_vma(struct vm_area_struct *vma,
62 struct vm_area_struct *pad,
63 struct seq_file *m, void *func, bool smaps);
64
65 extern void split_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *new,
66 unsigned long addr, int new_below);
67
68 extern unsigned long vma_pad_fixup_flags(struct vm_area_struct *vma,
69 unsigned long newflags);
70
71 extern bool is_mergable_pad_vma(struct vm_area_struct *vma,
72 unsigned long vm_flags);
73
74 extern unsigned long vma_data_pages(struct vm_area_struct *vma);
75 #else /* PAGE_SIZE != SZ_4K || !defined(CONFIG_64BIT) */
vma_set_pad_pages(struct vm_area_struct * vma,unsigned long nr_pages)76 static inline void vma_set_pad_pages(struct vm_area_struct *vma,
77 unsigned long nr_pages)
78 {
79 }
80
vma_pad_pages(struct vm_area_struct * vma)81 static inline unsigned long vma_pad_pages(struct vm_area_struct *vma)
82 {
83 return 0;
84 }
85
madvise_vma_pad_pages(struct vm_area_struct * vma,unsigned long start,unsigned long end)86 static inline void madvise_vma_pad_pages(struct vm_area_struct *vma,
87 unsigned long start, unsigned long end)
88 {
89 }
90
get_pad_vma(struct vm_area_struct * vma)91 static inline struct vm_area_struct *get_pad_vma(struct vm_area_struct *vma)
92 {
93 return NULL;
94 }
95
get_data_vma(struct vm_area_struct * vma)96 static inline struct vm_area_struct *get_data_vma(struct vm_area_struct *vma)
97 {
98 return vma;
99 }
100
show_map_pad_vma(struct vm_area_struct * vma,struct vm_area_struct * pad,struct seq_file * m,void * func,bool smaps)101 static inline void show_map_pad_vma(struct vm_area_struct *vma,
102 struct vm_area_struct *pad,
103 struct seq_file *m, void *func, bool smaps)
104 {
105 }
106
split_pad_vma(struct vm_area_struct * vma,struct vm_area_struct * new,unsigned long addr,int new_below)107 static inline void split_pad_vma(struct vm_area_struct *vma, struct vm_area_struct *new,
108 unsigned long addr, int new_below)
109 {
110 }
111
vma_pad_fixup_flags(struct vm_area_struct * vma,unsigned long newflags)112 static inline unsigned long vma_pad_fixup_flags(struct vm_area_struct *vma,
113 unsigned long newflags)
114 {
115 return newflags;
116 }
117
is_mergable_pad_vma(struct vm_area_struct * vma,unsigned long vm_flags)118 static inline bool is_mergable_pad_vma(struct vm_area_struct *vma,
119 unsigned long vm_flags)
120 {
121 return true;
122 }
123
vma_data_pages(struct vm_area_struct * vma)124 static inline unsigned long vma_data_pages(struct vm_area_struct *vma)
125 {
126 return vma_pages(vma);
127 }
128 #endif /* PAGE_SIZE == SZ_4K && defined(CONFIG_64BIT) */
129 #endif /* _LINUX_PAGE_SIZE_MIGRATION_H */
130