• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * s390 code for kexec_file_load system call
4  *
5  * Copyright IBM Corp. 2018
6  *
7  * Author(s): Philipp Rudo <prudo@linux.vnet.ibm.com>
8  */
9 
10 #include <linux/elf.h>
11 #include <linux/errno.h>
12 #include <linux/kexec.h>
13 #include <linux/module_signature.h>
14 #include <linux/verification.h>
15 #include <linux/vmalloc.h>
16 #include <asm/boot_data.h>
17 #include <asm/ipl.h>
18 #include <asm/setup.h>
19 
20 const struct kexec_file_ops * const kexec_file_loaders[] = {
21 	&s390_kexec_elf_ops,
22 	&s390_kexec_image_ops,
23 	NULL,
24 };
25 
26 #ifdef CONFIG_KEXEC_SIG
s390_verify_sig(const char * kernel,unsigned long kernel_len)27 int s390_verify_sig(const char *kernel, unsigned long kernel_len)
28 {
29 	const unsigned long marker_len = sizeof(MODULE_SIG_STRING) - 1;
30 	struct module_signature *ms;
31 	unsigned long sig_len;
32 
33 	/* Skip signature verification when not secure IPLed. */
34 	if (!ipl_secure_flag)
35 		return 0;
36 
37 	if (marker_len > kernel_len)
38 		return -EKEYREJECTED;
39 
40 	if (memcmp(kernel + kernel_len - marker_len, MODULE_SIG_STRING,
41 		   marker_len))
42 		return -EKEYREJECTED;
43 	kernel_len -= marker_len;
44 
45 	ms = (void *)kernel + kernel_len - sizeof(*ms);
46 	kernel_len -= sizeof(*ms);
47 
48 	sig_len = be32_to_cpu(ms->sig_len);
49 	if (sig_len >= kernel_len)
50 		return -EKEYREJECTED;
51 	kernel_len -= sig_len;
52 
53 	if (ms->id_type != PKEY_ID_PKCS7)
54 		return -EKEYREJECTED;
55 
56 	if (ms->algo != 0 ||
57 	    ms->hash != 0 ||
58 	    ms->signer_len != 0 ||
59 	    ms->key_id_len != 0 ||
60 	    ms->__pad[0] != 0 ||
61 	    ms->__pad[1] != 0 ||
62 	    ms->__pad[2] != 0) {
63 		return -EBADMSG;
64 	}
65 
66 	return verify_pkcs7_signature(kernel, kernel_len,
67 				      kernel + kernel_len, sig_len,
68 				      VERIFY_USE_PLATFORM_KEYRING,
69 				      VERIFYING_MODULE_SIGNATURE,
70 				      NULL, NULL);
71 }
72 #endif /* CONFIG_KEXEC_SIG */
73 
kexec_file_update_purgatory(struct kimage * image,struct s390_load_data * data)74 static int kexec_file_update_purgatory(struct kimage *image,
75 				       struct s390_load_data *data)
76 {
77 	u64 entry, type;
78 	int ret;
79 
80 	if (image->type == KEXEC_TYPE_CRASH) {
81 		entry = STARTUP_KDUMP_OFFSET;
82 		type = KEXEC_TYPE_CRASH;
83 	} else {
84 		entry = STARTUP_NORMAL_OFFSET;
85 		type = KEXEC_TYPE_DEFAULT;
86 	}
87 
88 	ret = kexec_purgatory_get_set_symbol(image, "kernel_entry", &entry,
89 					     sizeof(entry), false);
90 	if (ret)
91 		return ret;
92 
93 	ret = kexec_purgatory_get_set_symbol(image, "kernel_type", &type,
94 					     sizeof(type), false);
95 	if (ret)
96 		return ret;
97 
98 	if (image->type == KEXEC_TYPE_CRASH) {
99 		u64 crash_size;
100 
101 		ret = kexec_purgatory_get_set_symbol(image, "crash_start",
102 						     &crashk_res.start,
103 						     sizeof(crashk_res.start),
104 						     false);
105 		if (ret)
106 			return ret;
107 
108 		crash_size = crashk_res.end - crashk_res.start + 1;
109 		ret = kexec_purgatory_get_set_symbol(image, "crash_size",
110 						     &crash_size,
111 						     sizeof(crash_size),
112 						     false);
113 	}
114 	return ret;
115 }
116 
kexec_file_add_purgatory(struct kimage * image,struct s390_load_data * data)117 static int kexec_file_add_purgatory(struct kimage *image,
118 				    struct s390_load_data *data)
119 {
120 	struct kexec_buf buf;
121 	int ret;
122 
123 	buf.image = image;
124 
125 	data->memsz = ALIGN(data->memsz, PAGE_SIZE);
126 	buf.mem = data->memsz;
127 	if (image->type == KEXEC_TYPE_CRASH)
128 		buf.mem += crashk_res.start;
129 
130 	ret = kexec_load_purgatory(image, &buf);
131 	if (ret)
132 		return ret;
133 	data->memsz += buf.memsz;
134 
135 	return kexec_file_update_purgatory(image, data);
136 }
137 
kexec_file_add_initrd(struct kimage * image,struct s390_load_data * data)138 static int kexec_file_add_initrd(struct kimage *image,
139 				 struct s390_load_data *data)
140 {
141 	struct kexec_buf buf;
142 	int ret;
143 
144 	buf.image = image;
145 
146 	buf.buffer = image->initrd_buf;
147 	buf.bufsz = image->initrd_buf_len;
148 
149 	data->memsz = ALIGN(data->memsz, PAGE_SIZE);
150 	buf.mem = data->memsz;
151 	if (image->type == KEXEC_TYPE_CRASH)
152 		buf.mem += crashk_res.start;
153 	buf.memsz = buf.bufsz;
154 
155 	data->parm->initrd_start = data->memsz;
156 	data->parm->initrd_size = buf.memsz;
157 	data->memsz += buf.memsz;
158 
159 	ret = kexec_add_buffer(&buf);
160 	if (ret)
161 		return ret;
162 
163 	return ipl_report_add_component(data->report, &buf, 0, 0);
164 }
165 
kexec_file_add_ipl_report(struct kimage * image,struct s390_load_data * data)166 static int kexec_file_add_ipl_report(struct kimage *image,
167 				     struct s390_load_data *data)
168 {
169 	__u32 *lc_ipl_parmblock_ptr;
170 	unsigned int len, ncerts;
171 	struct kexec_buf buf;
172 	unsigned long addr;
173 	void *ptr, *end;
174 	int ret;
175 
176 	buf.image = image;
177 
178 	data->memsz = ALIGN(data->memsz, PAGE_SIZE);
179 	buf.mem = data->memsz;
180 	if (image->type == KEXEC_TYPE_CRASH)
181 		buf.mem += crashk_res.start;
182 
183 	ptr = (void *)ipl_cert_list_addr;
184 	end = ptr + ipl_cert_list_size;
185 	ncerts = 0;
186 	while (ptr < end) {
187 		ncerts++;
188 		len = *(unsigned int *)ptr;
189 		ptr += sizeof(len);
190 		ptr += len;
191 	}
192 
193 	addr = data->memsz + data->report->size;
194 	addr += ncerts * sizeof(struct ipl_rb_certificate_entry);
195 	ptr = (void *)ipl_cert_list_addr;
196 	while (ptr < end) {
197 		len = *(unsigned int *)ptr;
198 		ptr += sizeof(len);
199 		ipl_report_add_certificate(data->report, ptr, addr, len);
200 		addr += len;
201 		ptr += len;
202 	}
203 
204 	ret = -ENOMEM;
205 	buf.buffer = ipl_report_finish(data->report);
206 	if (!buf.buffer)
207 		goto out;
208 	buf.bufsz = data->report->size;
209 	buf.memsz = buf.bufsz;
210 	image->arch.ipl_buf = buf.buffer;
211 
212 	data->memsz += buf.memsz;
213 
214 	lc_ipl_parmblock_ptr =
215 		data->kernel_buf + offsetof(struct lowcore, ipl_parmblock_ptr);
216 	*lc_ipl_parmblock_ptr = (__u32)buf.mem;
217 
218 	ret = kexec_add_buffer(&buf);
219 out:
220 	return ret;
221 }
222 
kexec_file_add_components(struct kimage * image,int (* add_kernel)(struct kimage * image,struct s390_load_data * data))223 void *kexec_file_add_components(struct kimage *image,
224 				int (*add_kernel)(struct kimage *image,
225 						  struct s390_load_data *data))
226 {
227 	struct s390_load_data data = {0};
228 	int ret;
229 
230 	data.report = ipl_report_init(&ipl_block);
231 	if (IS_ERR(data.report))
232 		return data.report;
233 
234 	ret = add_kernel(image, &data);
235 	if (ret)
236 		goto out;
237 
238 	if (image->cmdline_buf_len >= ARCH_COMMAND_LINE_SIZE) {
239 		ret = -EINVAL;
240 		goto out;
241 	}
242 	memcpy(data.parm->command_line, image->cmdline_buf,
243 	       image->cmdline_buf_len);
244 
245 	if (image->type == KEXEC_TYPE_CRASH) {
246 		data.parm->oldmem_base = crashk_res.start;
247 		data.parm->oldmem_size = crashk_res.end - crashk_res.start + 1;
248 	}
249 
250 	if (image->initrd_buf) {
251 		ret = kexec_file_add_initrd(image, &data);
252 		if (ret)
253 			goto out;
254 	}
255 
256 	ret = kexec_file_add_purgatory(image, &data);
257 	if (ret)
258 		goto out;
259 
260 	if (data.kernel_mem == 0) {
261 		unsigned long restart_psw =  0x0008000080000000UL;
262 		restart_psw += image->start;
263 		memcpy(data.kernel_buf, &restart_psw, sizeof(restart_psw));
264 		image->start = 0;
265 	}
266 
267 	ret = kexec_file_add_ipl_report(image, &data);
268 out:
269 	ipl_report_free(data.report);
270 	return ERR_PTR(ret);
271 }
272 
arch_kexec_apply_relocations_add(struct purgatory_info * pi,Elf_Shdr * section,const Elf_Shdr * relsec,const Elf_Shdr * symtab)273 int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
274 				     Elf_Shdr *section,
275 				     const Elf_Shdr *relsec,
276 				     const Elf_Shdr *symtab)
277 {
278 	Elf_Rela *relas;
279 	int i, r_type;
280 	int ret;
281 
282 	relas = (void *)pi->ehdr + relsec->sh_offset;
283 
284 	for (i = 0; i < relsec->sh_size / sizeof(*relas); i++) {
285 		const Elf_Sym *sym;	/* symbol to relocate */
286 		unsigned long addr;	/* final location after relocation */
287 		unsigned long val;	/* relocated symbol value */
288 		void *loc;		/* tmp location to modify */
289 
290 		sym = (void *)pi->ehdr + symtab->sh_offset;
291 		sym += ELF64_R_SYM(relas[i].r_info);
292 
293 		if (sym->st_shndx == SHN_UNDEF)
294 			return -ENOEXEC;
295 
296 		if (sym->st_shndx == SHN_COMMON)
297 			return -ENOEXEC;
298 
299 		if (sym->st_shndx >= pi->ehdr->e_shnum &&
300 		    sym->st_shndx != SHN_ABS)
301 			return -ENOEXEC;
302 
303 		loc = pi->purgatory_buf;
304 		loc += section->sh_offset;
305 		loc += relas[i].r_offset;
306 
307 		val = sym->st_value;
308 		if (sym->st_shndx != SHN_ABS)
309 			val += pi->sechdrs[sym->st_shndx].sh_addr;
310 		val += relas[i].r_addend;
311 
312 		addr = section->sh_addr + relas[i].r_offset;
313 
314 		r_type = ELF64_R_TYPE(relas[i].r_info);
315 		ret = arch_kexec_do_relocs(r_type, loc, val, addr);
316 		if (ret) {
317 			pr_err("Unknown rela relocation: %d\n", r_type);
318 			return -ENOEXEC;
319 		}
320 	}
321 	return 0;
322 }
323 
arch_kexec_kernel_image_probe(struct kimage * image,void * buf,unsigned long buf_len)324 int arch_kexec_kernel_image_probe(struct kimage *image, void *buf,
325 				  unsigned long buf_len)
326 {
327 	/* A kernel must be at least large enough to contain head.S. During
328 	 * load memory in head.S will be accessed, e.g. to register the next
329 	 * command line. If the next kernel were smaller the current kernel
330 	 * will panic at load.
331 	 */
332 	if (buf_len < HEAD_END)
333 		return -ENOEXEC;
334 
335 	return kexec_image_probe_default(image, buf, buf_len);
336 }
337 
arch_kimage_file_post_load_cleanup(struct kimage * image)338 int arch_kimage_file_post_load_cleanup(struct kimage *image)
339 {
340 	vfree(image->arch.ipl_buf);
341 	image->arch.ipl_buf = NULL;
342 
343 	return kexec_image_post_load_cleanup_default(image);
344 }
345