1 /*
2 * AMD CPU Microcode Update Driver for Linux
3 *
4 * This driver allows to upgrade microcode on F10h AMD
5 * CPUs and later.
6 *
7 * Copyright (C) 2008-2011 Advanced Micro Devices Inc.
8 *
9 * Author: Peter Oruba <peter.oruba@amd.com>
10 *
11 * Based on work by:
12 * Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
13 *
14 * early loader:
15 * Copyright (C) 2013 Advanced Micro Devices, Inc.
16 *
17 * Author: Jacob Shin <jacob.shin@amd.com>
18 * Fixes: Borislav Petkov <bp@suse.de>
19 *
20 * Licensed under the terms of the GNU General Public
21 * License version 2. See file COPYING for details.
22 */
23 #define pr_fmt(fmt) "microcode: " fmt
24
25 #include <linux/earlycpio.h>
26 #include <linux/firmware.h>
27 #include <linux/uaccess.h>
28 #include <linux/vmalloc.h>
29 #include <linux/initrd.h>
30 #include <linux/kernel.h>
31 #include <linux/pci.h>
32
33 #include <asm/microcode_amd.h>
34 #include <asm/microcode.h>
35 #include <asm/processor.h>
36 #include <asm/setup.h>
37 #include <asm/cpu.h>
38 #include <asm/msr.h>
39
40 static struct equiv_cpu_entry *equiv_cpu_table;
41
42 struct ucode_patch {
43 struct list_head plist;
44 void *data;
45 u32 patch_id;
46 u16 equiv_cpu;
47 };
48
49 static LIST_HEAD(pcache);
50
51 /*
52 * This points to the current valid container of microcode patches which we will
53 * save from the initrd before jettisoning its contents.
54 */
55 static u8 *container;
56 static size_t container_size;
57 static bool ucode_builtin;
58
59 static u32 ucode_new_rev;
60 static u8 amd_ucode_patch[PATCH_MAX_SIZE];
61 static u16 this_equiv_id;
62
63 static struct cpio_data ucode_cpio;
64
find_ucode_in_initrd(void)65 static struct cpio_data __init find_ucode_in_initrd(void)
66 {
67 #ifdef CONFIG_BLK_DEV_INITRD
68 char *path;
69 void *start;
70 size_t size;
71
72 /*
73 * Microcode patch container file is prepended to the initrd in cpio
74 * format. See Documentation/x86/early-microcode.txt
75 */
76 static __initdata char ucode_path[] = "kernel/x86/microcode/AuthenticAMD.bin";
77
78 #ifdef CONFIG_X86_32
79 struct boot_params *p;
80
81 /*
82 * On 32-bit, early load occurs before paging is turned on so we need
83 * to use physical addresses.
84 */
85 p = (struct boot_params *)__pa_nodebug(&boot_params);
86 path = (char *)__pa_nodebug(ucode_path);
87 start = (void *)p->hdr.ramdisk_image;
88 size = p->hdr.ramdisk_size;
89 #else
90 path = ucode_path;
91 start = (void *)(boot_params.hdr.ramdisk_image + PAGE_OFFSET);
92 size = boot_params.hdr.ramdisk_size;
93 #endif /* !CONFIG_X86_32 */
94
95 return find_cpio_data(path, start, size, NULL);
96 #else
97 return (struct cpio_data){ NULL, 0, "" };
98 #endif
99 }
100
compute_container_size(u8 * data,u32 total_size)101 static size_t compute_container_size(u8 *data, u32 total_size)
102 {
103 size_t size = 0;
104 u32 *header = (u32 *)data;
105
106 if (header[0] != UCODE_MAGIC ||
107 header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
108 header[2] == 0) /* size */
109 return size;
110
111 size = header[2] + CONTAINER_HDR_SZ;
112 total_size -= size;
113 data += size;
114
115 while (total_size) {
116 u16 patch_size;
117
118 header = (u32 *)data;
119
120 if (header[0] != UCODE_UCODE_TYPE)
121 break;
122
123 /*
124 * Sanity-check patch size.
125 */
126 patch_size = header[1];
127 if (patch_size > PATCH_MAX_SIZE)
128 break;
129
130 size += patch_size + SECTION_HDR_SIZE;
131 data += patch_size + SECTION_HDR_SIZE;
132 total_size -= patch_size + SECTION_HDR_SIZE;
133 }
134
135 return size;
136 }
137
138 static enum ucode_state
139 load_microcode_amd(bool save, u8 family, const u8 *data, size_t size);
140
141 /*
142 * Early load occurs before we can vmalloc(). So we look for the microcode
143 * patch container file in initrd, traverse equivalent cpu table, look for a
144 * matching microcode patch, and update, all in initrd memory in place.
145 * When vmalloc() is available for use later -- on 64-bit during first AP load,
146 * and on 32-bit during save_microcode_in_initrd_amd() -- we can call
147 * load_microcode_amd() to save equivalent cpu table and microcode patches in
148 * kernel heap memory.
149 */
apply_ucode_in_initrd(void * ucode,size_t size,bool save_patch)150 static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch)
151 {
152 struct equiv_cpu_entry *eq;
153 size_t *cont_sz;
154 u32 *header;
155 u8 *data, **cont;
156 u8 (*patch)[PATCH_MAX_SIZE];
157 u16 eq_id = 0;
158 int offset, left;
159 u32 rev, eax, ebx, ecx, edx;
160 u32 *new_rev;
161
162 #ifdef CONFIG_X86_32
163 new_rev = (u32 *)__pa_nodebug(&ucode_new_rev);
164 cont_sz = (size_t *)__pa_nodebug(&container_size);
165 cont = (u8 **)__pa_nodebug(&container);
166 patch = (u8 (*)[PATCH_MAX_SIZE])__pa_nodebug(&amd_ucode_patch);
167 #else
168 new_rev = &ucode_new_rev;
169 cont_sz = &container_size;
170 cont = &container;
171 patch = &amd_ucode_patch;
172 #endif
173
174 data = ucode;
175 left = size;
176 header = (u32 *)data;
177
178 /* find equiv cpu table */
179 if (header[0] != UCODE_MAGIC ||
180 header[1] != UCODE_EQUIV_CPU_TABLE_TYPE || /* type */
181 header[2] == 0) /* size */
182 return;
183
184 eax = 0x00000001;
185 ecx = 0;
186 native_cpuid(&eax, &ebx, &ecx, &edx);
187
188 while (left > 0) {
189 eq = (struct equiv_cpu_entry *)(data + CONTAINER_HDR_SZ);
190
191 *cont = data;
192
193 /* Advance past the container header */
194 offset = header[2] + CONTAINER_HDR_SZ;
195 data += offset;
196 left -= offset;
197
198 eq_id = find_equiv_id(eq, eax);
199 if (eq_id) {
200 this_equiv_id = eq_id;
201 *cont_sz = compute_container_size(*cont, left + offset);
202
203 /*
204 * truncate how much we need to iterate over in the
205 * ucode update loop below
206 */
207 left = *cont_sz - offset;
208 break;
209 }
210
211 /*
212 * support multiple container files appended together. if this
213 * one does not have a matching equivalent cpu entry, we fast
214 * forward to the next container file.
215 */
216 while (left > 0) {
217 header = (u32 *)data;
218 if (header[0] == UCODE_MAGIC &&
219 header[1] == UCODE_EQUIV_CPU_TABLE_TYPE)
220 break;
221
222 offset = header[1] + SECTION_HDR_SIZE;
223 data += offset;
224 left -= offset;
225 }
226
227 /* mark where the next microcode container file starts */
228 offset = data - (u8 *)ucode;
229 ucode = data;
230 }
231
232 if (!eq_id) {
233 *cont = NULL;
234 *cont_sz = 0;
235 return;
236 }
237
238 if (check_current_patch_level(&rev, true))
239 return;
240
241 while (left > 0) {
242 struct microcode_amd *mc;
243
244 header = (u32 *)data;
245 if (header[0] != UCODE_UCODE_TYPE || /* type */
246 header[1] == 0) /* size */
247 break;
248
249 mc = (struct microcode_amd *)(data + SECTION_HDR_SIZE);
250
251 if (eq_id == mc->hdr.processor_rev_id && rev < mc->hdr.patch_id) {
252
253 if (!__apply_microcode_amd(mc)) {
254 rev = mc->hdr.patch_id;
255 *new_rev = rev;
256
257 if (save_patch)
258 memcpy(patch, mc,
259 min_t(u32, header[1], PATCH_MAX_SIZE));
260 }
261 }
262
263 offset = header[1] + SECTION_HDR_SIZE;
264 data += offset;
265 left -= offset;
266 }
267 }
268
load_builtin_amd_microcode(struct cpio_data * cp,unsigned int family)269 static bool __init load_builtin_amd_microcode(struct cpio_data *cp,
270 unsigned int family)
271 {
272 #ifdef CONFIG_X86_64
273 char fw_name[36] = "amd-ucode/microcode_amd.bin";
274
275 if (family >= 0x15)
276 snprintf(fw_name, sizeof(fw_name),
277 "amd-ucode/microcode_amd_fam%.2xh.bin", family);
278
279 return get_builtin_firmware(cp, fw_name);
280 #else
281 return false;
282 #endif
283 }
284
load_ucode_amd_bsp(unsigned int family)285 void __init load_ucode_amd_bsp(unsigned int family)
286 {
287 struct cpio_data cp;
288 bool *builtin;
289 void **data;
290 size_t *size;
291
292 #ifdef CONFIG_X86_32
293 data = (void **)__pa_nodebug(&ucode_cpio.data);
294 size = (size_t *)__pa_nodebug(&ucode_cpio.size);
295 builtin = (bool *)__pa_nodebug(&ucode_builtin);
296 #else
297 data = &ucode_cpio.data;
298 size = &ucode_cpio.size;
299 builtin = &ucode_builtin;
300 #endif
301
302 *builtin = load_builtin_amd_microcode(&cp, family);
303 if (!*builtin)
304 cp = find_ucode_in_initrd();
305
306 if (!(cp.data && cp.size))
307 return;
308
309 *data = cp.data;
310 *size = cp.size;
311
312 apply_ucode_in_initrd(cp.data, cp.size, true);
313 }
314
315 #ifdef CONFIG_X86_32
316 /*
317 * On 32-bit, since AP's early load occurs before paging is turned on, we
318 * cannot traverse cpu_equiv_table and pcache in kernel heap memory. So during
319 * cold boot, AP will apply_ucode_in_initrd() just like the BSP. During
320 * save_microcode_in_initrd_amd() BSP's patch is copied to amd_ucode_patch,
321 * which is used upon resume from suspend.
322 */
load_ucode_amd_ap(void)323 void load_ucode_amd_ap(void)
324 {
325 struct microcode_amd *mc;
326 size_t *usize;
327 void **ucode;
328
329 mc = (struct microcode_amd *)__pa_nodebug(amd_ucode_patch);
330 if (mc->hdr.patch_id && mc->hdr.processor_rev_id) {
331 __apply_microcode_amd(mc);
332 return;
333 }
334
335 ucode = (void *)__pa_nodebug(&container);
336 usize = (size_t *)__pa_nodebug(&container_size);
337
338 if (!*ucode || !*usize)
339 return;
340
341 apply_ucode_in_initrd(*ucode, *usize, false);
342 }
343
collect_cpu_sig_on_bsp(void * arg)344 static void __init collect_cpu_sig_on_bsp(void *arg)
345 {
346 unsigned int cpu = smp_processor_id();
347 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
348
349 uci->cpu_sig.sig = cpuid_eax(0x00000001);
350 }
351
get_bsp_sig(void)352 static void __init get_bsp_sig(void)
353 {
354 unsigned int bsp = boot_cpu_data.cpu_index;
355 struct ucode_cpu_info *uci = ucode_cpu_info + bsp;
356
357 if (!uci->cpu_sig.sig)
358 smp_call_function_single(bsp, collect_cpu_sig_on_bsp, NULL, 1);
359 }
360 #else
load_ucode_amd_ap(void)361 void load_ucode_amd_ap(void)
362 {
363 unsigned int cpu = smp_processor_id();
364 struct equiv_cpu_entry *eq;
365 struct microcode_amd *mc;
366 u8 *cont = container;
367 u32 rev, eax;
368 u16 eq_id;
369
370 /* Exit if called on the BSP. */
371 if (!cpu)
372 return;
373
374 if (!container)
375 return;
376
377 /*
378 * 64-bit runs with paging enabled, thus early==false.
379 */
380 if (check_current_patch_level(&rev, false))
381 return;
382
383 /* Add CONFIG_RANDOMIZE_MEMORY offset. */
384 if (!ucode_builtin)
385 cont += PAGE_OFFSET - __PAGE_OFFSET_BASE;
386
387 eax = cpuid_eax(0x00000001);
388 eq = (struct equiv_cpu_entry *)(cont + CONTAINER_HDR_SZ);
389
390 eq_id = find_equiv_id(eq, eax);
391 if (!eq_id)
392 return;
393
394 if (eq_id == this_equiv_id) {
395 mc = (struct microcode_amd *)amd_ucode_patch;
396
397 if (mc && rev < mc->hdr.patch_id) {
398 if (!__apply_microcode_amd(mc))
399 ucode_new_rev = mc->hdr.patch_id;
400 }
401
402 } else {
403 if (!ucode_cpio.data)
404 return;
405
406 /*
407 * AP has a different equivalence ID than BSP, looks like
408 * mixed-steppings silicon so go through the ucode blob anew.
409 */
410 apply_ucode_in_initrd(ucode_cpio.data, ucode_cpio.size, false);
411 }
412 }
413 #endif
414
save_microcode_in_initrd_amd(void)415 int __init save_microcode_in_initrd_amd(void)
416 {
417 unsigned long cont;
418 int retval = 0;
419 enum ucode_state ret;
420 u8 *cont_va;
421 u32 eax;
422
423 if (!container)
424 return -EINVAL;
425
426 #ifdef CONFIG_X86_32
427 get_bsp_sig();
428 cont = (unsigned long)container;
429 cont_va = __va(container);
430 #else
431 /*
432 * We need the physical address of the container for both bitness since
433 * boot_params.hdr.ramdisk_image is a physical address.
434 */
435 cont = __pa_nodebug(container);
436 cont_va = container;
437 #endif
438
439 /*
440 * Take into account the fact that the ramdisk might get relocated and
441 * therefore we need to recompute the container's position in virtual
442 * memory space.
443 */
444 if (relocated_ramdisk)
445 container = (u8 *)(__va(relocated_ramdisk) +
446 (cont - boot_params.hdr.ramdisk_image));
447 else
448 container = cont_va;
449
450 /* Add CONFIG_RANDOMIZE_MEMORY offset. */
451 if (!ucode_builtin)
452 container += PAGE_OFFSET - __PAGE_OFFSET_BASE;
453
454 eax = cpuid_eax(0x00000001);
455 eax = ((eax >> 8) & 0xf) + ((eax >> 20) & 0xff);
456
457 ret = load_microcode_amd(true, eax, container, container_size);
458 if (ret != UCODE_OK)
459 retval = -EINVAL;
460
461 /*
462 * This will be freed any msec now, stash patches for the current
463 * family and switch to patch cache for cpu hotplug, etc later.
464 */
465 container = NULL;
466 container_size = 0;
467
468 return retval;
469 }
470
reload_ucode_amd(void)471 void reload_ucode_amd(void)
472 {
473 struct microcode_amd *mc;
474 u32 rev;
475
476 /*
477 * early==false because this is a syscore ->resume path and by
478 * that time paging is long enabled.
479 */
480 if (check_current_patch_level(&rev, false))
481 return;
482
483 mc = (struct microcode_amd *)amd_ucode_patch;
484
485 if (mc && rev < mc->hdr.patch_id) {
486 if (!__apply_microcode_amd(mc)) {
487 ucode_new_rev = mc->hdr.patch_id;
488 pr_info("reload patch_level=0x%08x\n", ucode_new_rev);
489 }
490 }
491 }
__find_equiv_id(unsigned int cpu)492 static u16 __find_equiv_id(unsigned int cpu)
493 {
494 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
495 return find_equiv_id(equiv_cpu_table, uci->cpu_sig.sig);
496 }
497
find_cpu_family_by_equiv_cpu(u16 equiv_cpu)498 static u32 find_cpu_family_by_equiv_cpu(u16 equiv_cpu)
499 {
500 int i = 0;
501
502 BUG_ON(!equiv_cpu_table);
503
504 while (equiv_cpu_table[i].equiv_cpu != 0) {
505 if (equiv_cpu == equiv_cpu_table[i].equiv_cpu)
506 return equiv_cpu_table[i].installed_cpu;
507 i++;
508 }
509 return 0;
510 }
511
512 /*
513 * a small, trivial cache of per-family ucode patches
514 */
cache_find_patch(u16 equiv_cpu)515 static struct ucode_patch *cache_find_patch(u16 equiv_cpu)
516 {
517 struct ucode_patch *p;
518
519 list_for_each_entry(p, &pcache, plist)
520 if (p->equiv_cpu == equiv_cpu)
521 return p;
522 return NULL;
523 }
524
update_cache(struct ucode_patch * new_patch)525 static void update_cache(struct ucode_patch *new_patch)
526 {
527 struct ucode_patch *p;
528
529 list_for_each_entry(p, &pcache, plist) {
530 if (p->equiv_cpu == new_patch->equiv_cpu) {
531 if (p->patch_id >= new_patch->patch_id)
532 /* we already have the latest patch */
533 return;
534
535 list_replace(&p->plist, &new_patch->plist);
536 kfree(p->data);
537 kfree(p);
538 return;
539 }
540 }
541 /* no patch found, add it */
542 list_add_tail(&new_patch->plist, &pcache);
543 }
544
free_cache(void)545 static void free_cache(void)
546 {
547 struct ucode_patch *p, *tmp;
548
549 list_for_each_entry_safe(p, tmp, &pcache, plist) {
550 __list_del(p->plist.prev, p->plist.next);
551 kfree(p->data);
552 kfree(p);
553 }
554 }
555
find_patch(unsigned int cpu)556 static struct ucode_patch *find_patch(unsigned int cpu)
557 {
558 u16 equiv_id;
559
560 equiv_id = __find_equiv_id(cpu);
561 if (!equiv_id)
562 return NULL;
563
564 return cache_find_patch(equiv_id);
565 }
566
collect_cpu_info_amd(int cpu,struct cpu_signature * csig)567 static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
568 {
569 struct cpuinfo_x86 *c = &cpu_data(cpu);
570 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
571 struct ucode_patch *p;
572
573 csig->sig = cpuid_eax(0x00000001);
574 csig->rev = c->microcode;
575
576 /*
577 * a patch could have been loaded early, set uci->mc so that
578 * mc_bp_resume() can call apply_microcode()
579 */
580 p = find_patch(cpu);
581 if (p && (p->patch_id == csig->rev))
582 uci->mc = p->data;
583
584 pr_info("CPU%d: patch_level=0x%08x\n", cpu, csig->rev);
585
586 return 0;
587 }
588
verify_patch_size(u8 family,u32 patch_size,unsigned int size)589 static unsigned int verify_patch_size(u8 family, u32 patch_size,
590 unsigned int size)
591 {
592 u32 max_size;
593
594 #define F1XH_MPB_MAX_SIZE 2048
595 #define F14H_MPB_MAX_SIZE 1824
596 #define F15H_MPB_MAX_SIZE 4096
597 #define F16H_MPB_MAX_SIZE 3458
598 #define F17H_MPB_MAX_SIZE 3200
599
600 switch (family) {
601 case 0x14:
602 max_size = F14H_MPB_MAX_SIZE;
603 break;
604 case 0x15:
605 max_size = F15H_MPB_MAX_SIZE;
606 break;
607 case 0x16:
608 max_size = F16H_MPB_MAX_SIZE;
609 break;
610 case 0x17:
611 max_size = F17H_MPB_MAX_SIZE;
612 break;
613 default:
614 max_size = F1XH_MPB_MAX_SIZE;
615 break;
616 }
617
618 if (patch_size > min_t(u32, size, max_size)) {
619 pr_err("patch size mismatch\n");
620 return 0;
621 }
622
623 return patch_size;
624 }
625
626 /*
627 * Those patch levels cannot be updated to newer ones and thus should be final.
628 */
629 static u32 final_levels[] = {
630 0x01000098,
631 0x0100009f,
632 0x010000af,
633 0, /* T-101 terminator */
634 };
635
636 /*
637 * Check the current patch level on this CPU.
638 *
639 * @rev: Use it to return the patch level. It is set to 0 in the case of
640 * error.
641 *
642 * Returns:
643 * - true: if update should stop
644 * - false: otherwise
645 */
check_current_patch_level(u32 * rev,bool early)646 bool check_current_patch_level(u32 *rev, bool early)
647 {
648 u32 lvl, dummy, i;
649 bool ret = false;
650 u32 *levels;
651
652 native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
653
654 if (IS_ENABLED(CONFIG_X86_32) && early)
655 levels = (u32 *)__pa_nodebug(&final_levels);
656 else
657 levels = final_levels;
658
659 for (i = 0; levels[i]; i++) {
660 if (lvl == levels[i]) {
661 lvl = 0;
662 ret = true;
663 break;
664 }
665 }
666
667 if (rev)
668 *rev = lvl;
669
670 return ret;
671 }
672
__apply_microcode_amd(struct microcode_amd * mc_amd)673 int __apply_microcode_amd(struct microcode_amd *mc_amd)
674 {
675 u32 rev, dummy;
676
677 native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc_amd->hdr.data_code);
678
679 /* verify patch application was successful */
680 native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
681 if (rev != mc_amd->hdr.patch_id)
682 return -1;
683
684 return 0;
685 }
686
apply_microcode_amd(int cpu)687 int apply_microcode_amd(int cpu)
688 {
689 struct cpuinfo_x86 *c = &cpu_data(cpu);
690 struct microcode_amd *mc_amd;
691 struct ucode_cpu_info *uci;
692 struct ucode_patch *p;
693 u32 rev;
694
695 BUG_ON(raw_smp_processor_id() != cpu);
696
697 uci = ucode_cpu_info + cpu;
698
699 p = find_patch(cpu);
700 if (!p)
701 return 0;
702
703 mc_amd = p->data;
704 uci->mc = p->data;
705
706 if (check_current_patch_level(&rev, false))
707 return -1;
708
709 /* need to apply patch? */
710 if (rev >= mc_amd->hdr.patch_id) {
711 c->microcode = rev;
712 uci->cpu_sig.rev = rev;
713 return 0;
714 }
715
716 if (__apply_microcode_amd(mc_amd)) {
717 pr_err("CPU%d: update failed for patch_level=0x%08x\n",
718 cpu, mc_amd->hdr.patch_id);
719 return -1;
720 }
721 pr_info("CPU%d: new patch_level=0x%08x\n", cpu,
722 mc_amd->hdr.patch_id);
723
724 uci->cpu_sig.rev = mc_amd->hdr.patch_id;
725 c->microcode = mc_amd->hdr.patch_id;
726
727 return 0;
728 }
729
install_equiv_cpu_table(const u8 * buf)730 static int install_equiv_cpu_table(const u8 *buf)
731 {
732 unsigned int *ibuf = (unsigned int *)buf;
733 unsigned int type = ibuf[1];
734 unsigned int size = ibuf[2];
735
736 if (type != UCODE_EQUIV_CPU_TABLE_TYPE || !size) {
737 pr_err("empty section/"
738 "invalid type field in container file section header\n");
739 return -EINVAL;
740 }
741
742 equiv_cpu_table = vmalloc(size);
743 if (!equiv_cpu_table) {
744 pr_err("failed to allocate equivalent CPU table\n");
745 return -ENOMEM;
746 }
747
748 memcpy(equiv_cpu_table, buf + CONTAINER_HDR_SZ, size);
749
750 /* add header length */
751 return size + CONTAINER_HDR_SZ;
752 }
753
free_equiv_cpu_table(void)754 static void free_equiv_cpu_table(void)
755 {
756 vfree(equiv_cpu_table);
757 equiv_cpu_table = NULL;
758 }
759
cleanup(void)760 static void cleanup(void)
761 {
762 free_equiv_cpu_table();
763 free_cache();
764 }
765
766 /*
767 * We return the current size even if some of the checks failed so that
768 * we can skip over the next patch. If we return a negative value, we
769 * signal a grave error like a memory allocation has failed and the
770 * driver cannot continue functioning normally. In such cases, we tear
771 * down everything we've used up so far and exit.
772 */
verify_and_add_patch(u8 family,u8 * fw,unsigned int leftover)773 static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover)
774 {
775 struct microcode_header_amd *mc_hdr;
776 struct ucode_patch *patch;
777 unsigned int patch_size, crnt_size, ret;
778 u32 proc_fam;
779 u16 proc_id;
780
781 patch_size = *(u32 *)(fw + 4);
782 crnt_size = patch_size + SECTION_HDR_SIZE;
783 mc_hdr = (struct microcode_header_amd *)(fw + SECTION_HDR_SIZE);
784 proc_id = mc_hdr->processor_rev_id;
785
786 proc_fam = find_cpu_family_by_equiv_cpu(proc_id);
787 if (!proc_fam) {
788 pr_err("No patch family for equiv ID: 0x%04x\n", proc_id);
789 return crnt_size;
790 }
791
792 /* check if patch is for the current family */
793 proc_fam = ((proc_fam >> 8) & 0xf) + ((proc_fam >> 20) & 0xff);
794 if (proc_fam != family)
795 return crnt_size;
796
797 if (mc_hdr->nb_dev_id || mc_hdr->sb_dev_id) {
798 pr_err("Patch-ID 0x%08x: chipset-specific code unsupported.\n",
799 mc_hdr->patch_id);
800 return crnt_size;
801 }
802
803 ret = verify_patch_size(family, patch_size, leftover);
804 if (!ret) {
805 pr_err("Patch-ID 0x%08x: size mismatch.\n", mc_hdr->patch_id);
806 return crnt_size;
807 }
808
809 patch = kzalloc(sizeof(*patch), GFP_KERNEL);
810 if (!patch) {
811 pr_err("Patch allocation failure.\n");
812 return -EINVAL;
813 }
814
815 patch->data = kmemdup(fw + SECTION_HDR_SIZE, patch_size, GFP_KERNEL);
816 if (!patch->data) {
817 pr_err("Patch data allocation failure.\n");
818 kfree(patch);
819 return -EINVAL;
820 }
821
822 INIT_LIST_HEAD(&patch->plist);
823 patch->patch_id = mc_hdr->patch_id;
824 patch->equiv_cpu = proc_id;
825
826 pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n",
827 __func__, patch->patch_id, proc_id);
828
829 /* ... and add to cache. */
830 update_cache(patch);
831
832 return crnt_size;
833 }
834
__load_microcode_amd(u8 family,const u8 * data,size_t size)835 static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
836 size_t size)
837 {
838 enum ucode_state ret = UCODE_ERROR;
839 unsigned int leftover;
840 u8 *fw = (u8 *)data;
841 int crnt_size = 0;
842 int offset;
843
844 offset = install_equiv_cpu_table(data);
845 if (offset < 0) {
846 pr_err("failed to create equivalent cpu table\n");
847 return ret;
848 }
849 fw += offset;
850 leftover = size - offset;
851
852 if (*(u32 *)fw != UCODE_UCODE_TYPE) {
853 pr_err("invalid type field in container file section header\n");
854 free_equiv_cpu_table();
855 return ret;
856 }
857
858 while (leftover) {
859 crnt_size = verify_and_add_patch(family, fw, leftover);
860 if (crnt_size < 0)
861 return ret;
862
863 fw += crnt_size;
864 leftover -= crnt_size;
865 }
866
867 return UCODE_OK;
868 }
869
870 static enum ucode_state
load_microcode_amd(bool save,u8 family,const u8 * data,size_t size)871 load_microcode_amd(bool save, u8 family, const u8 *data, size_t size)
872 {
873 enum ucode_state ret;
874
875 /* free old equiv table */
876 free_equiv_cpu_table();
877
878 ret = __load_microcode_amd(family, data, size);
879
880 if (ret != UCODE_OK)
881 cleanup();
882
883 #ifdef CONFIG_X86_32
884 /* save BSP's matching patch for early load */
885 if (save) {
886 struct ucode_patch *p = find_patch(0);
887 if (p) {
888 memset(amd_ucode_patch, 0, PATCH_MAX_SIZE);
889 memcpy(amd_ucode_patch, p->data, min_t(u32, ksize(p->data),
890 PATCH_MAX_SIZE));
891 }
892 }
893 #endif
894 return ret;
895 }
896
897 /*
898 * AMD microcode firmware naming convention, up to family 15h they are in
899 * the legacy file:
900 *
901 * amd-ucode/microcode_amd.bin
902 *
903 * This legacy file is always smaller than 2K in size.
904 *
905 * Beginning with family 15h, they are in family-specific firmware files:
906 *
907 * amd-ucode/microcode_amd_fam15h.bin
908 * amd-ucode/microcode_amd_fam16h.bin
909 * ...
910 *
911 * These might be larger than 2K.
912 */
request_microcode_amd(int cpu,struct device * device,bool refresh_fw)913 static enum ucode_state request_microcode_amd(int cpu, struct device *device,
914 bool refresh_fw)
915 {
916 char fw_name[36] = "amd-ucode/microcode_amd.bin";
917 struct cpuinfo_x86 *c = &cpu_data(cpu);
918 bool bsp = c->cpu_index == boot_cpu_data.cpu_index;
919 enum ucode_state ret = UCODE_NFOUND;
920 const struct firmware *fw;
921
922 /* reload ucode container only on the boot cpu */
923 if (!refresh_fw || !bsp)
924 return UCODE_OK;
925
926 if (c->x86 >= 0x15)
927 snprintf(fw_name, sizeof(fw_name), "amd-ucode/microcode_amd_fam%.2xh.bin", c->x86);
928
929 if (request_firmware_direct(&fw, (const char *)fw_name, device)) {
930 pr_debug("failed to load file %s\n", fw_name);
931 goto out;
932 }
933
934 ret = UCODE_ERROR;
935 if (*(u32 *)fw->data != UCODE_MAGIC) {
936 pr_err("invalid magic value (0x%08x)\n", *(u32 *)fw->data);
937 goto fw_release;
938 }
939
940 ret = load_microcode_amd(bsp, c->x86, fw->data, fw->size);
941
942 fw_release:
943 release_firmware(fw);
944
945 out:
946 return ret;
947 }
948
949 static enum ucode_state
request_microcode_user(int cpu,const void __user * buf,size_t size)950 request_microcode_user(int cpu, const void __user *buf, size_t size)
951 {
952 return UCODE_ERROR;
953 }
954
microcode_fini_cpu_amd(int cpu)955 static void microcode_fini_cpu_amd(int cpu)
956 {
957 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
958
959 uci->mc = NULL;
960 }
961
962 static struct microcode_ops microcode_amd_ops = {
963 .request_microcode_user = request_microcode_user,
964 .request_microcode_fw = request_microcode_amd,
965 .collect_cpu_info = collect_cpu_info_amd,
966 .apply_microcode = apply_microcode_amd,
967 .microcode_fini_cpu = microcode_fini_cpu_amd,
968 };
969
init_amd_microcode(void)970 struct microcode_ops * __init init_amd_microcode(void)
971 {
972 struct cpuinfo_x86 *c = &boot_cpu_data;
973
974 if (c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10) {
975 pr_warn("AMD CPU family 0x%x not supported\n", c->x86);
976 return NULL;
977 }
978
979 if (ucode_new_rev)
980 pr_info_once("microcode updated early to new patch_level=0x%08x\n",
981 ucode_new_rev);
982
983 return µcode_amd_ops;
984 }
985
exit_amd_microcode(void)986 void __exit exit_amd_microcode(void)
987 {
988 cleanup();
989 }
990