• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * s390 code for kexec_file_load system call
4  *
5  * Copyright IBM Corp. 2018
6  *
7  * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com>
8  */
9 
10 #include <linux/elf.h>
11 #include <linux/errno.h>
12 #include <linux/kexec.h>
13 #include <linux/module_signature.h>
14 #include <linux/verification.h>
15 #include <linux/vmalloc.h>
16 #include <asm/boot_data.h>
17 #include <asm/ipl.h>
18 #include <asm/setup.h>
19 
20 const struct kexec_file_ops * const kexec_file_loaders[] = {
21 	&s390_kexec_elf_ops,
22 	&s390_kexec_image_ops,
23 	NULL,
24 };
25 
26 #ifdef CONFIG_KEXEC_SIG
s390_verify_sig(const char * kernel,unsigned long kernel_len)27 int s390_verify_sig(const char *kernel, unsigned long kernel_len)
28 {
29 	const unsigned long marker_len = sizeof(MODULE_SIG_STRING) - 1;
30 	struct module_signature *ms;
31 	unsigned long sig_len;
32 	int ret;
33 
34 	/* Skip signature verification when not secure IPLed. */
35 	if (!ipl_secure_flag)
36 		return 0;
37 
38 	if (marker_len > kernel_len)
39 		return -EKEYREJECTED;
40 
41 	if (memcmp(kernel + kernel_len - marker_len, MODULE_SIG_STRING,
42 		   marker_len))
43 		return -EKEYREJECTED;
44 	kernel_len -= marker_len;
45 
46 	ms = (void *)kernel + kernel_len - sizeof(*ms);
47 	kernel_len -= sizeof(*ms);
48 
49 	sig_len = be32_to_cpu(ms->sig_len);
50 	if (sig_len >= kernel_len)
51 		return -EKEYREJECTED;
52 	kernel_len -= sig_len;
53 
54 	if (ms->id_type != PKEY_ID_PKCS7)
55 		return -EKEYREJECTED;
56 
57 	if (ms->algo != 0 ||
58 	    ms->hash != 0 ||
59 	    ms->signer_len != 0 ||
60 	    ms->key_id_len != 0 ||
61 	    ms->__pad[0] != 0 ||
62 	    ms->__pad[1] != 0 ||
63 	    ms->__pad[2] != 0) {
64 		return -EBADMSG;
65 	}
66 
67 	ret = verify_pkcs7_signature(kernel, kernel_len,
68 				     kernel + kernel_len, sig_len,
69 				     VERIFY_USE_SECONDARY_KEYRING,
70 				     VERIFYING_MODULE_SIGNATURE,
71 				     NULL, NULL);
72 	if (ret == -ENOKEY && IS_ENABLED(CONFIG_INTEGRITY_PLATFORM_KEYRING))
73 		ret = verify_pkcs7_signature(kernel, kernel_len,
74 					     kernel + kernel_len, sig_len,
75 					     VERIFY_USE_PLATFORM_KEYRING,
76 					     VERIFYING_MODULE_SIGNATURE,
77 					     NULL, NULL);
78 	return ret;
79 }
80 #endif /* CONFIG_KEXEC_SIG */
81 
kexec_file_update_purgatory(struct kimage * image,struct s390_load_data * data)82 static int kexec_file_update_purgatory(struct kimage *image,
83 				       struct s390_load_data *data)
84 {
85 	u64 entry, type;
86 	int ret;
87 
88 	if (image->type == KEXEC_TYPE_CRASH) {
89 		entry = STARTUP_KDUMP_OFFSET;
90 		type = KEXEC_TYPE_CRASH;
91 	} else {
92 		entry = STARTUP_NORMAL_OFFSET;
93 		type = KEXEC_TYPE_DEFAULT;
94 	}
95 
96 	ret = kexec_purgatory_get_set_symbol(image, "kernel_entry", &entry,
97 					     sizeof(entry), false);
98 	if (ret)
99 		return ret;
100 
101 	ret = kexec_purgatory_get_set_symbol(image, "kernel_type", &type,
102 					     sizeof(type), false);
103 	if (ret)
104 		return ret;
105 
106 	if (image->type == KEXEC_TYPE_CRASH) {
107 		u64 crash_size;
108 
109 		ret = kexec_purgatory_get_set_symbol(image, "crash_start",
110 						     &crashk_res.start,
111 						     sizeof(crashk_res.start),
112 						     false);
113 		if (ret)
114 			return ret;
115 
116 		crash_size = crashk_res.end - crashk_res.start + 1;
117 		ret = kexec_purgatory_get_set_symbol(image, "crash_size",
118 						     &crash_size,
119 						     sizeof(crash_size),
120 						     false);
121 	}
122 	return ret;
123 }
124 
kexec_file_add_purgatory(struct kimage * image,struct s390_load_data * data)125 static int kexec_file_add_purgatory(struct kimage *image,
126 				    struct s390_load_data *data)
127 {
128 	struct kexec_buf buf;
129 	int ret;
130 
131 	buf.image = image;
132 
133 	data->memsz = ALIGN(data->memsz, PAGE_SIZE);
134 	buf.mem = data->memsz;
135 	if (image->type == KEXEC_TYPE_CRASH)
136 		buf.mem += crashk_res.start;
137 
138 	ret = kexec_load_purgatory(image, &buf);
139 	if (ret)
140 		return ret;
141 	data->memsz += buf.memsz;
142 
143 	return kexec_file_update_purgatory(image, data);
144 }
145 
kexec_file_add_initrd(struct kimage * image,struct s390_load_data * data)146 static int kexec_file_add_initrd(struct kimage *image,
147 				 struct s390_load_data *data)
148 {
149 	struct kexec_buf buf;
150 	int ret;
151 
152 	buf.image = image;
153 
154 	buf.buffer = image->initrd_buf;
155 	buf.bufsz = image->initrd_buf_len;
156 
157 	data->memsz = ALIGN(data->memsz, PAGE_SIZE);
158 	buf.mem = data->memsz;
159 	if (image->type == KEXEC_TYPE_CRASH)
160 		buf.mem += crashk_res.start;
161 	buf.memsz = buf.bufsz;
162 
163 	data->parm->initrd_start = data->memsz;
164 	data->parm->initrd_size = buf.memsz;
165 	data->memsz += buf.memsz;
166 
167 	ret = kexec_add_buffer(&buf);
168 	if (ret)
169 		return ret;
170 
171 	return ipl_report_add_component(data->report, &buf, 0, 0);
172 }
173 
kexec_file_add_ipl_report(struct kimage * image,struct s390_load_data * data)174 static int kexec_file_add_ipl_report(struct kimage *image,
175 				     struct s390_load_data *data)
176 {
177 	__u32 *lc_ipl_parmblock_ptr;
178 	unsigned int len, ncerts;
179 	struct kexec_buf buf;
180 	unsigned long addr;
181 	void *ptr, *end;
182 	int ret;
183 
184 	buf.image = image;
185 
186 	data->memsz = ALIGN(data->memsz, PAGE_SIZE);
187 	buf.mem = data->memsz;
188 
189 	ptr = (void *)ipl_cert_list_addr;
190 	end = ptr + ipl_cert_list_size;
191 	ncerts = 0;
192 	while (ptr < end) {
193 		ncerts++;
194 		len = *(unsigned int *)ptr;
195 		ptr += sizeof(len);
196 		ptr += len;
197 	}
198 
199 	addr = data->memsz + data->report->size;
200 	addr += ncerts * sizeof(struct ipl_rb_certificate_entry);
201 	ptr = (void *)ipl_cert_list_addr;
202 	while (ptr < end) {
203 		len = *(unsigned int *)ptr;
204 		ptr += sizeof(len);
205 		ipl_report_add_certificate(data->report, ptr, addr, len);
206 		addr += len;
207 		ptr += len;
208 	}
209 
210 	ret = -ENOMEM;
211 	buf.buffer = ipl_report_finish(data->report);
212 	if (!buf.buffer)
213 		goto out;
214 	buf.bufsz = data->report->size;
215 	buf.memsz = buf.bufsz;
216 	image->arch.ipl_buf = buf.buffer;
217 
218 	data->memsz += buf.memsz;
219 
220 	lc_ipl_parmblock_ptr =
221 		data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr);
222 	*lc_ipl_parmblock_ptr = (__u32)buf.mem;
223 
224 	if (image->type == KEXEC_TYPE_CRASH)
225 		buf.mem += crashk_res.start;
226 
227 	ret = kexec_add_buffer(&buf);
228 out:
229 	return ret;
230 }
231 
kexec_file_add_components(struct kimage * image,int (* add_kernel)(struct kimage * image,struct s390_load_data * data))232 void *kexec_file_add_components(struct kimage *image,
233 				int (*add_kernel)(struct kimage *image,
234 						  struct s390_load_data *data))
235 {
236 	struct s390_load_data data = {0};
237 	int ret;
238 
239 	data.report = ipl_report_init(&ipl_block);
240 	if (IS_ERR(data.report))
241 		return data.report;
242 
243 	ret = add_kernel(image, &data);
244 	if (ret)
245 		goto out;
246 
247 	if (image->cmdline_buf_len >= ARCH_COMMAND_LINE_SIZE) {
248 		ret = -EINVAL;
249 		goto out;
250 	}
251 	memcpy(data.parm->command_line, image->cmdline_buf,
252 	       image->cmdline_buf_len);
253 
254 	if (image->type == KEXEC_TYPE_CRASH) {
255 		data.parm->oldmem_base = crashk_res.start;
256 		data.parm->oldmem_size = crashk_res.end - crashk_res.start + 1;
257 	}
258 
259 	if (image->initrd_buf) {
260 		ret = kexec_file_add_initrd(image, &data);
261 		if (ret)
262 			goto out;
263 	}
264 
265 	ret = kexec_file_add_purgatory(image, &data);
266 	if (ret)
267 		goto out;
268 
269 	if (data.kernel_mem == 0) {
270 		unsigned long restart_psw =  0x0008000080000000UL;
271 		restart_psw += image->start;
272 		memcpy(data.kernel_buf, &restart_psw, sizeof(restart_psw));
273 		image->start = 0;
274 	}
275 
276 	ret = kexec_file_add_ipl_report(image, &data);
277 out:
278 	ipl_report_free(data.report);
279 	return ERR_PTR(ret);
280 }
281 
arch_kexec_apply_relocations_add(struct purgatory_info * pi,Elf_Shdr * section,const Elf_Shdr * relsec,const Elf_Shdr * symtab)282 int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
283 				     Elf_Shdr *section,
284 				     const Elf_Shdr *relsec,
285 				     const Elf_Shdr *symtab)
286 {
287 	Elf_Rela *relas;
288 	int i, r_type;
289 	int ret;
290 
291 	relas = (void *)pi->ehdr + relsec->sh_offset;
292 
293 	for (i = 0; i < relsec->sh_size / sizeof(*relas); i++) {
294 		const Elf_Sym *sym;	/* symbol to relocate */
295 		unsigned long addr;	/* final location after relocation */
296 		unsigned long val;	/* relocated symbol value */
297 		void *loc;		/* tmp location to modify */
298 
299 		sym = (void *)pi->ehdr + symtab->sh_offset;
300 		sym += ELF64_R_SYM(relas[i].r_info);
301 
302 		if (sym->st_shndx == SHN_UNDEF)
303 			return -ENOEXEC;
304 
305 		if (sym->st_shndx == SHN_COMMON)
306 			return -ENOEXEC;
307 
308 		if (sym->st_shndx >= pi->ehdr->e_shnum &&
309 		    sym->st_shndx != SHN_ABS)
310 			return -ENOEXEC;
311 
312 		loc = pi->purgatory_buf;
313 		loc += section->sh_offset;
314 		loc += relas[i].r_offset;
315 
316 		val = sym->st_value;
317 		if (sym->st_shndx != SHN_ABS)
318 			val += pi->sechdrs[sym->st_shndx].sh_addr;
319 		val += relas[i].r_addend;
320 
321 		addr = section->sh_addr + relas[i].r_offset;
322 
323 		r_type = ELF64_R_TYPE(relas[i].r_info);
324 
325 		if (r_type == R_390_PLT32DBL)
326 			r_type = R_390_PC32DBL;
327 
328 		ret = arch_kexec_do_relocs(r_type, loc, val, addr);
329 		if (ret) {
330 			pr_err("Unknown rela relocation: %d\n", r_type);
331 			return -ENOEXEC;
332 		}
333 	}
334 	return 0;
335 }
336 
arch_kexec_kernel_image_probe(struct kimage * image,void * buf,unsigned long buf_len)337 int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
338 				  unsigned long buf_len)
339 {
340 	/* A kernel must be at least large enough to contain head.S. During
341 	 * load memory in head.S will be accessed, e.g. to register the next
342 	 * command line. If the next kernel were smaller the current kernel
343 	 * will panic at load.
344 	 */
345 	if (buf_len < HEAD_END)
346 		return -ENOEXEC;
347 
348 	return kexec_image_probe_default(image, buf, buf_len);
349 }
350 
arch_kimage_file_post_load_cleanup(struct kimage * image)351 int arch_kimage_file_post_load_cleanup(struct kimage *image)
352 {
353 	vfree(image->arch.ipl_buf);
354 	image->arch.ipl_buf = NULL;
355 
356 	return kexec_image_post_load_cleanup_default(image);
357 }
358