1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Copyright IBM Corp. 2019
4 */
5 #include <linux/pgtable.h>
6 #include <asm/mem_detect.h>
7 #include <asm/cpacf.h>
8 #include <asm/timex.h>
9 #include <asm/sclp.h>
10 #include "compressed/decompressor.h"
11 #include "boot.h"
12
13 #define PRNG_MODE_TDES 1
14 #define PRNG_MODE_SHA512 2
15 #define PRNG_MODE_TRNG 3
16
17 struct prno_parm {
18 u32 res;
19 u32 reseed_counter;
20 u64 stream_bytes;
21 u8 V[112];
22 u8 C[112];
23 };
24
25 struct prng_parm {
26 u8 parm_block[32];
27 u32 reseed_counter;
28 u64 byte_counter;
29 };
30
check_prng(void)31 static int check_prng(void)
32 {
33 if (!cpacf_query_func(CPACF_KMC, CPACF_KMC_PRNG)) {
34 sclp_early_printk("KASLR disabled: CPU has no PRNG\n");
35 return 0;
36 }
37 if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_TRNG))
38 return PRNG_MODE_TRNG;
39 if (cpacf_query_func(CPACF_PRNO, CPACF_PRNO_SHA512_DRNG_GEN))
40 return PRNG_MODE_SHA512;
41 else
42 return PRNG_MODE_TDES;
43 }
44
get_random(unsigned long limit,unsigned long * value)45 static int get_random(unsigned long limit, unsigned long *value)
46 {
47 struct prng_parm prng = {
48 /* initial parameter block for tdes mode, copied from libica */
49 .parm_block = {
50 0x0F, 0x2B, 0x8E, 0x63, 0x8C, 0x8E, 0xD2, 0x52,
51 0x64, 0xB7, 0xA0, 0x7B, 0x75, 0x28, 0xB8, 0xF4,
52 0x75, 0x5F, 0xD2, 0xA6, 0x8D, 0x97, 0x11, 0xFF,
53 0x49, 0xD8, 0x23, 0xF3, 0x7E, 0x21, 0xEC, 0xA0
54 },
55 };
56 unsigned long seed, random;
57 struct prno_parm prno;
58 __u64 entropy[4];
59 int mode, i;
60
61 mode = check_prng();
62 seed = get_tod_clock_fast();
63 switch (mode) {
64 case PRNG_MODE_TRNG:
65 cpacf_trng(NULL, 0, (u8 *) &random, sizeof(random));
66 break;
67 case PRNG_MODE_SHA512:
68 cpacf_prno(CPACF_PRNO_SHA512_DRNG_SEED, &prno, NULL, 0,
69 (u8 *) &seed, sizeof(seed));
70 cpacf_prno(CPACF_PRNO_SHA512_DRNG_GEN, &prno, (u8 *) &random,
71 sizeof(random), NULL, 0);
72 break;
73 case PRNG_MODE_TDES:
74 /* add entropy */
75 *(unsigned long *) prng.parm_block ^= seed;
76 for (i = 0; i < 16; i++) {
77 cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block,
78 (u8 *) entropy, (u8 *) entropy,
79 sizeof(entropy));
80 memcpy(prng.parm_block, entropy, sizeof(entropy));
81 }
82 random = seed;
83 cpacf_kmc(CPACF_KMC_PRNG, prng.parm_block, (u8 *) &random,
84 (u8 *) &random, sizeof(random));
85 break;
86 default:
87 return -1;
88 }
89 *value = random % limit;
90 return 0;
91 }
92
93 /*
94 * To randomize kernel base address we have to consider several facts:
95 * 1. physical online memory might not be continuous and have holes. mem_detect
96 * info contains list of online memory ranges we should consider.
97 * 2. we have several memory regions which are occupied and we should not
98 * overlap and destroy them. Currently safe_addr tells us the border below
99 * which all those occupied regions are. We are safe to use anything above
100 * safe_addr.
101 * 3. the upper limit might apply as well, even if memory above that limit is
102 * online. Currently those limitations are:
103 * 3.1. Limit set by "mem=" kernel command line option
104 * 3.2. memory reserved at the end for kasan initialization.
105 * 4. kernel base address must be aligned to THREAD_SIZE (kernel stack size).
106 * Which is required for CONFIG_CHECK_STACK. Currently THREAD_SIZE is 4 pages
107 * (16 pages when the kernel is built with kasan enabled)
108 * Assumptions:
109 * 1. kernel size (including .bss size) and upper memory limit are page aligned.
110 * 2. mem_detect memory region start is THREAD_SIZE aligned / end is PAGE_SIZE
111 * aligned (in practice memory configurations granularity on z/VM and LPAR
112 * is 1mb).
113 *
114 * To guarantee uniform distribution of kernel base address among all suitable
115 * addresses we generate random value just once. For that we need to build a
116 * continuous range in which every value would be suitable. We can build this
117 * range by simply counting all suitable addresses (let's call them positions)
118 * which would be valid as kernel base address. To count positions we iterate
119 * over online memory ranges. For each range which is big enough for the
120 * kernel image we count all suitable addresses we can put the kernel image at
121 * that is
122 * (end - start - kernel_size) / THREAD_SIZE + 1
123 * Two functions count_valid_kernel_positions and position_to_address help
124 * to count positions in memory range given and then convert position back
125 * to address.
126 */
count_valid_kernel_positions(unsigned long kernel_size,unsigned long _min,unsigned long _max)127 static unsigned long count_valid_kernel_positions(unsigned long kernel_size,
128 unsigned long _min,
129 unsigned long _max)
130 {
131 unsigned long start, end, pos = 0;
132 int i;
133
134 for_each_mem_detect_block(i, &start, &end) {
135 if (_min >= end)
136 continue;
137 if (start >= _max)
138 break;
139 start = max(_min, start);
140 end = min(_max, end);
141 if (end - start < kernel_size)
142 continue;
143 pos += (end - start - kernel_size) / THREAD_SIZE + 1;
144 }
145
146 return pos;
147 }
148
position_to_address(unsigned long pos,unsigned long kernel_size,unsigned long _min,unsigned long _max)149 static unsigned long position_to_address(unsigned long pos, unsigned long kernel_size,
150 unsigned long _min, unsigned long _max)
151 {
152 unsigned long start, end;
153 int i;
154
155 for_each_mem_detect_block(i, &start, &end) {
156 if (_min >= end)
157 continue;
158 if (start >= _max)
159 break;
160 start = max(_min, start);
161 end = min(_max, end);
162 if (end - start < kernel_size)
163 continue;
164 if ((end - start - kernel_size) / THREAD_SIZE + 1 >= pos)
165 return start + (pos - 1) * THREAD_SIZE;
166 pos -= (end - start - kernel_size) / THREAD_SIZE + 1;
167 }
168
169 return 0;
170 }
171
get_random_base(unsigned long safe_addr)172 unsigned long get_random_base(unsigned long safe_addr)
173 {
174 unsigned long memory_limit = get_mem_detect_end();
175 unsigned long base_pos, max_pos, kernel_size;
176 unsigned long kasan_needs;
177 int i;
178
179 if (memory_end_set)
180 memory_limit = min(memory_limit, memory_end);
181
182 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && INITRD_START && INITRD_SIZE) {
183 if (safe_addr < INITRD_START + INITRD_SIZE)
184 safe_addr = INITRD_START + INITRD_SIZE;
185 }
186 safe_addr = ALIGN(safe_addr, THREAD_SIZE);
187
188 if ((IS_ENABLED(CONFIG_KASAN))) {
189 /*
190 * Estimate kasan memory requirements, which it will reserve
191 * at the very end of available physical memory. To estimate
192 * that, we take into account that kasan would require
193 * 1/8 of available physical memory (for shadow memory) +
194 * creating page tables for the whole memory + shadow memory
195 * region (1 + 1/8). To keep page tables estimates simple take
196 * the double of combined ptes size.
197 */
198 memory_limit = get_mem_detect_end();
199 if (memory_end_set && memory_limit > memory_end)
200 memory_limit = memory_end;
201
202 /* for shadow memory */
203 kasan_needs = memory_limit / 8;
204 /* for paging structures */
205 kasan_needs += (memory_limit + kasan_needs) / PAGE_SIZE /
206 _PAGE_ENTRIES * _PAGE_TABLE_SIZE * 2;
207 memory_limit -= kasan_needs;
208 }
209
210 kernel_size = vmlinux.image_size + vmlinux.bss_size;
211 if (safe_addr + kernel_size > memory_limit)
212 return 0;
213
214 max_pos = count_valid_kernel_positions(kernel_size, safe_addr, memory_limit);
215 if (!max_pos) {
216 sclp_early_printk("KASLR disabled: not enough memory\n");
217 return 0;
218 }
219
220 /* we need a value in the range [1, base_pos] inclusive */
221 if (get_random(max_pos, &base_pos))
222 return 0;
223 return position_to_address(base_pos + 1, kernel_size, safe_addr, memory_limit);
224 }
225