1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3 * Copyright (C) 2016 ARM Ltd.
4 */
5 #ifndef __ASM_PGTABLE_PROT_H
6 #define __ASM_PGTABLE_PROT_H
7
8 #include <asm/memory.h>
9 #include <asm/pgtable-hwdef.h>
10
11 #include <linux/const.h>
12
13 /*
14 * Software defined PTE bits definition.
15 */
16 #define PTE_WRITE (PTE_DBM) /* same as DBM (51) */
17 #define PTE_SWP_EXCLUSIVE (_AT(pteval_t, 1) << 2) /* only for swp ptes */
18 #define PTE_DIRTY (_AT(pteval_t, 1) << 55)
19 #define PTE_SPECIAL (_AT(pteval_t, 1) << 56)
20 #define PTE_DEVMAP (_AT(pteval_t, 1) << 57)
21
22 /*
23 * PTE_PRESENT_INVALID=1 & PTE_VALID=0 indicates that the pte's fields should be
24 * interpreted according to the HW layout by SW but any attempted HW access to
25 * the address will result in a fault. pte_present() returns true.
26 */
27 #define PTE_PRESENT_INVALID (PTE_NG) /* only when !PTE_VALID */
28
29 #ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
30 #define PTE_UFFD_WP (_AT(pteval_t, 1) << 58) /* uffd-wp tracking */
31 #define PTE_SWP_UFFD_WP (_AT(pteval_t, 1) << 3) /* only for swp ptes */
32 #else
33 #define PTE_UFFD_WP (_AT(pteval_t, 0))
34 #define PTE_SWP_UFFD_WP (_AT(pteval_t, 0))
35 #endif /* CONFIG_HAVE_ARCH_USERFAULTFD_WP */
36
37 #define _PROT_DEFAULT (PTE_TYPE_PAGE | PTE_AF | PTE_SHARED)
38 #define _PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_SECT_AF | PMD_SECT_S)
39
40 #define PROT_DEFAULT (PTE_TYPE_PAGE | PTE_MAYBE_NG | PTE_MAYBE_SHARED | PTE_AF)
41 #define PROT_SECT_DEFAULT (PMD_TYPE_SECT | PMD_MAYBE_NG | PMD_MAYBE_SHARED | PMD_SECT_AF)
42
43 #define PROT_DEVICE_nGnRnE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRnE))
44 #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_DEVICE_nGnRE))
45 #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_NC))
46 #define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL))
47 #define PROT_NORMAL_TAGGED (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_WRITE | PTE_ATTRINDX(MT_NORMAL_TAGGED))
48
49 #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE))
50 #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PTE_WRITE | PMD_ATTRINDX(MT_NORMAL))
51 #define PROT_SECT_NORMAL_EXEC (PROT_SECT_DEFAULT | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL))
52
53 #define _PAGE_DEFAULT (_PROT_DEFAULT | PTE_ATTRINDX(MT_NORMAL))
54
55 #define _PAGE_KERNEL (PROT_NORMAL)
56 #define _PAGE_KERNEL_RO ((PROT_NORMAL & ~PTE_WRITE) | PTE_RDONLY)
57 #define _PAGE_KERNEL_ROX ((PROT_NORMAL & ~(PTE_WRITE | PTE_PXN)) | PTE_RDONLY)
58 #define _PAGE_KERNEL_EXEC (PROT_NORMAL & ~PTE_PXN)
59 #define _PAGE_KERNEL_EXEC_CONT ((PROT_NORMAL & ~PTE_PXN) | PTE_CONT)
60
61 #define _PAGE_SHARED (_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN | PTE_WRITE)
62 #define _PAGE_SHARED_EXEC (_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_WRITE)
63 #define _PAGE_READONLY (_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
64 #define _PAGE_READONLY_EXEC (_PAGE_DEFAULT | PTE_USER | PTE_RDONLY | PTE_NG | PTE_PXN)
65 #define _PAGE_EXECONLY (_PAGE_DEFAULT | PTE_RDONLY | PTE_NG | PTE_PXN)
66
67 #ifndef __ASSEMBLY__
68
69 #include <asm/cpufeature.h>
70 #include <asm/pgtable-types.h>
71
72 extern bool arm64_use_ng_mappings;
73
74 #define PTE_MAYBE_NG (arm64_use_ng_mappings ? PTE_NG : 0)
75 #define PMD_MAYBE_NG (arm64_use_ng_mappings ? PMD_SECT_NG : 0)
76
77 #ifndef CONFIG_ARM64_LPA2
78 #define lpa2_is_enabled() false
79 #define PTE_MAYBE_SHARED PTE_SHARED
80 #define PMD_MAYBE_SHARED PMD_SECT_S
81 #define PHYS_MASK_SHIFT (CONFIG_ARM64_PA_BITS)
82 #else
lpa2_is_enabled(void)83 static inline bool __pure lpa2_is_enabled(void)
84 {
85 return read_tcr() & TCR_DS;
86 }
87
88 #define PTE_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PTE_SHARED)
89 #define PMD_MAYBE_SHARED (lpa2_is_enabled() ? 0 : PMD_SECT_S)
90 #define PHYS_MASK_SHIFT (lpa2_is_enabled() ? CONFIG_ARM64_PA_BITS : 48)
91 #endif
92
93 /*
94 * Highest possible physical address supported.
95 */
96 #define PHYS_MASK ((UL(1) << PHYS_MASK_SHIFT) - 1)
97
98 /*
99 * If we have userspace only BTI we don't want to mark kernel pages
100 * guarded even if the system does support BTI.
101 */
102 #define PTE_MAYBE_GP (system_supports_bti_kernel() ? PTE_GP : 0)
103
104 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
105 #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL_RO)
106 #define PAGE_KERNEL_ROX __pgprot(_PAGE_KERNEL_ROX)
107 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
108 #define PAGE_KERNEL_EXEC_CONT __pgprot(_PAGE_KERNEL_EXEC_CONT)
109
110 #define PAGE_S2_MEMATTR(attr, has_fwb) \
111 ({ \
112 u64 __val; \
113 if (has_fwb) \
114 __val = PTE_S2_MEMATTR(MT_S2_FWB_ ## attr); \
115 else \
116 __val = PTE_S2_MEMATTR(MT_S2_ ## attr); \
117 __val; \
118 })
119
120 #define PAGE_NONE __pgprot(((_PAGE_DEFAULT) & ~PTE_VALID) | PTE_PRESENT_INVALID | PTE_RDONLY | PTE_NG | PTE_PXN | PTE_UXN)
121 /* shared+writable pages are clean by default, hence PTE_RDONLY|PTE_WRITE */
122 #define PAGE_SHARED __pgprot(_PAGE_SHARED)
123 #define PAGE_SHARED_EXEC __pgprot(_PAGE_SHARED_EXEC)
124 #define PAGE_READONLY __pgprot(_PAGE_READONLY)
125 #define PAGE_READONLY_EXEC __pgprot(_PAGE_READONLY_EXEC)
126 #define PAGE_EXECONLY __pgprot(_PAGE_EXECONLY)
127
128 #endif /* __ASSEMBLY__ */
129
130 #define pte_pi_index(pte) ( \
131 ((pte & BIT(PTE_PI_IDX_3)) >> (PTE_PI_IDX_3 - 3)) | \
132 ((pte & BIT(PTE_PI_IDX_2)) >> (PTE_PI_IDX_2 - 2)) | \
133 ((pte & BIT(PTE_PI_IDX_1)) >> (PTE_PI_IDX_1 - 1)) | \
134 ((pte & BIT(PTE_PI_IDX_0)) >> (PTE_PI_IDX_0 - 0)))
135
136 /*
137 * Page types used via Permission Indirection Extension (PIE). PIE uses
138 * the USER, DBM, PXN and UXN bits to to generate an index which is used
139 * to look up the actual permission in PIR_ELx and PIRE0_EL1. We define
140 * combinations we use on non-PIE systems with the same encoding, for
141 * convenience these are listed here as comments as are the unallocated
142 * encodings.
143 */
144
145 /* 0: PAGE_DEFAULT */
146 /* 1: PTE_USER */
147 /* 2: PTE_WRITE */
148 /* 3: PTE_WRITE | PTE_USER */
149 /* 4: PAGE_EXECONLY PTE_PXN */
150 /* 5: PAGE_READONLY_EXEC PTE_PXN | PTE_USER */
151 /* 6: PTE_PXN | PTE_WRITE */
152 /* 7: PAGE_SHARED_EXEC PTE_PXN | PTE_WRITE | PTE_USER */
153 /* 8: PAGE_KERNEL_ROX PTE_UXN */
154 /* 9: PTE_UXN | PTE_USER */
155 /* a: PAGE_KERNEL_EXEC PTE_UXN | PTE_WRITE */
156 /* b: PTE_UXN | PTE_WRITE | PTE_USER */
157 /* c: PAGE_KERNEL_RO PTE_UXN | PTE_PXN */
158 /* d: PAGE_READONLY PTE_UXN | PTE_PXN | PTE_USER */
159 /* e: PAGE_KERNEL PTE_UXN | PTE_PXN | PTE_WRITE */
160 /* f: PAGE_SHARED PTE_UXN | PTE_PXN | PTE_WRITE | PTE_USER */
161
162 #define PIE_E0 ( \
163 PIRx_ELx_PERM(pte_pi_index(_PAGE_EXECONLY), PIE_X_O) | \
164 PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY_EXEC), PIE_RX_O) | \
165 PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED_EXEC), PIE_RWX_O) | \
166 PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY), PIE_R_O) | \
167 PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED), PIE_RW_O))
168
169 #define PIE_E1 ( \
170 PIRx_ELx_PERM(pte_pi_index(_PAGE_EXECONLY), PIE_NONE_O) | \
171 PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY_EXEC), PIE_R) | \
172 PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED_EXEC), PIE_RW) | \
173 PIRx_ELx_PERM(pte_pi_index(_PAGE_READONLY), PIE_R) | \
174 PIRx_ELx_PERM(pte_pi_index(_PAGE_SHARED), PIE_RW) | \
175 PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL_ROX), PIE_RX) | \
176 PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL_EXEC), PIE_RWX) | \
177 PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL_RO), PIE_R) | \
178 PIRx_ELx_PERM(pte_pi_index(_PAGE_KERNEL), PIE_RW))
179
180 #endif /* __ASM_PGTABLE_PROT_H */
181