1 /*
2 * Intel CPU Microcode Update Driver for Linux
3 *
4 * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
5 * 2006 Shaohua Li <shaohua.li@intel.com>
6 *
7 * Intel CPU microcode early update for Linux
8 *
9 * Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
10 * H Peter Anvin" <hpa@zytor.com>
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version
15 * 2 of the License, or (at your option) any later version.
16 */
17
18 /*
19 * This needs to be before all headers so that pr_debug in printk.h doesn't turn
20 * printk calls into no_printk().
21 *
22 *#define DEBUG
23 */
24 #define pr_fmt(fmt) "microcode: " fmt
25
26 #include <linux/earlycpio.h>
27 #include <linux/firmware.h>
28 #include <linux/uaccess.h>
29 #include <linux/vmalloc.h>
30 #include <linux/initrd.h>
31 #include <linux/kernel.h>
32 #include <linux/slab.h>
33 #include <linux/cpu.h>
34 #include <linux/mm.h>
35
36 #include <asm/microcode_intel.h>
37 #include <asm/processor.h>
38 #include <asm/tlbflush.h>
39 #include <asm/setup.h>
40 #include <asm/msr.h>
41
42 /* last level cache size per core */
43 static int llc_size_per_core;
44
45 static unsigned long mc_saved_in_initrd[MAX_UCODE_COUNT];
46 static struct mc_saved_data {
47 unsigned int mc_saved_count;
48 struct microcode_intel **mc_saved;
49 } mc_saved_data;
50
51 static enum ucode_state
load_microcode_early(struct microcode_intel ** saved,unsigned int num_saved,struct ucode_cpu_info * uci)52 load_microcode_early(struct microcode_intel **saved,
53 unsigned int num_saved, struct ucode_cpu_info *uci)
54 {
55 struct microcode_intel *ucode_ptr, *new_mc = NULL;
56 struct microcode_header_intel *mc_hdr;
57 int new_rev, ret, i;
58
59 new_rev = uci->cpu_sig.rev;
60
61 for (i = 0; i < num_saved; i++) {
62 ucode_ptr = saved[i];
63 mc_hdr = (struct microcode_header_intel *)ucode_ptr;
64
65 ret = has_newer_microcode(ucode_ptr,
66 uci->cpu_sig.sig,
67 uci->cpu_sig.pf,
68 new_rev);
69 if (!ret)
70 continue;
71
72 new_rev = mc_hdr->rev;
73 new_mc = ucode_ptr;
74 }
75
76 if (!new_mc)
77 return UCODE_NFOUND;
78
79 uci->mc = (struct microcode_intel *)new_mc;
80 return UCODE_OK;
81 }
82
83 static inline void
copy_initrd_ptrs(struct microcode_intel ** mc_saved,unsigned long * initrd,unsigned long off,int num_saved)84 copy_initrd_ptrs(struct microcode_intel **mc_saved, unsigned long *initrd,
85 unsigned long off, int num_saved)
86 {
87 int i;
88
89 for (i = 0; i < num_saved; i++)
90 mc_saved[i] = (struct microcode_intel *)(initrd[i] + off);
91 }
92
93 #ifdef CONFIG_X86_32
94 static void
microcode_phys(struct microcode_intel ** mc_saved_tmp,struct mc_saved_data * mc_saved_data)95 microcode_phys(struct microcode_intel **mc_saved_tmp,
96 struct mc_saved_data *mc_saved_data)
97 {
98 int i;
99 struct microcode_intel ***mc_saved;
100
101 mc_saved = (struct microcode_intel ***)
102 __pa_nodebug(&mc_saved_data->mc_saved);
103 for (i = 0; i < mc_saved_data->mc_saved_count; i++) {
104 struct microcode_intel *p;
105
106 p = *(struct microcode_intel **)
107 __pa_nodebug(mc_saved_data->mc_saved + i);
108 mc_saved_tmp[i] = (struct microcode_intel *)__pa_nodebug(p);
109 }
110 }
111 #endif
112
113 static enum ucode_state
load_microcode(struct mc_saved_data * mc_saved_data,unsigned long * initrd,unsigned long initrd_start,struct ucode_cpu_info * uci)114 load_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
115 unsigned long initrd_start, struct ucode_cpu_info *uci)
116 {
117 struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
118 unsigned int count = mc_saved_data->mc_saved_count;
119
120 if (!mc_saved_data->mc_saved) {
121 copy_initrd_ptrs(mc_saved_tmp, initrd, initrd_start, count);
122
123 return load_microcode_early(mc_saved_tmp, count, uci);
124 } else {
125 #ifdef CONFIG_X86_32
126 microcode_phys(mc_saved_tmp, mc_saved_data);
127 return load_microcode_early(mc_saved_tmp, count, uci);
128 #else
129 return load_microcode_early(mc_saved_data->mc_saved,
130 count, uci);
131 #endif
132 }
133 }
134
135 static int
save_microcode(struct mc_saved_data * mc_saved_data,struct microcode_intel ** mc_saved_src,unsigned int mc_saved_count)136 save_microcode(struct mc_saved_data *mc_saved_data,
137 struct microcode_intel **mc_saved_src,
138 unsigned int mc_saved_count)
139 {
140 int i, j;
141 struct microcode_intel **saved_ptr;
142 int ret;
143
144 if (!mc_saved_count)
145 return -EINVAL;
146
147 /*
148 * Copy new microcode data.
149 */
150 saved_ptr = kcalloc(mc_saved_count, sizeof(struct microcode_intel *), GFP_KERNEL);
151 if (!saved_ptr)
152 return -ENOMEM;
153
154 for (i = 0; i < mc_saved_count; i++) {
155 struct microcode_header_intel *mc_hdr;
156 struct microcode_intel *mc;
157 unsigned long size;
158
159 if (!mc_saved_src[i]) {
160 ret = -EINVAL;
161 goto err;
162 }
163
164 mc = mc_saved_src[i];
165 mc_hdr = &mc->hdr;
166 size = get_totalsize(mc_hdr);
167
168 saved_ptr[i] = kmalloc(size, GFP_KERNEL);
169 if (!saved_ptr[i]) {
170 ret = -ENOMEM;
171 goto err;
172 }
173
174 memcpy(saved_ptr[i], mc, size);
175 }
176
177 /*
178 * Point to newly saved microcode.
179 */
180 mc_saved_data->mc_saved = saved_ptr;
181 mc_saved_data->mc_saved_count = mc_saved_count;
182
183 return 0;
184
185 err:
186 for (j = 0; j <= i; j++)
187 kfree(saved_ptr[j]);
188 kfree(saved_ptr);
189
190 return ret;
191 }
192
193 /*
194 * A microcode patch in ucode_ptr is saved into mc_saved
195 * - if it has matching signature and newer revision compared to an existing
196 * patch mc_saved.
197 * - or if it is a newly discovered microcode patch.
198 *
199 * The microcode patch should have matching model with CPU.
200 *
201 * Returns: The updated number @num_saved of saved microcode patches.
202 */
_save_mc(struct microcode_intel ** mc_saved,u8 * ucode_ptr,unsigned int num_saved)203 static unsigned int _save_mc(struct microcode_intel **mc_saved,
204 u8 *ucode_ptr, unsigned int num_saved)
205 {
206 struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
207 unsigned int sig, pf;
208 int found = 0, i;
209
210 mc_hdr = (struct microcode_header_intel *)ucode_ptr;
211
212 for (i = 0; i < num_saved; i++) {
213 mc_saved_hdr = (struct microcode_header_intel *)mc_saved[i];
214 sig = mc_saved_hdr->sig;
215 pf = mc_saved_hdr->pf;
216
217 if (!find_matching_signature(ucode_ptr, sig, pf))
218 continue;
219
220 found = 1;
221
222 if (mc_hdr->rev <= mc_saved_hdr->rev)
223 continue;
224
225 /*
226 * Found an older ucode saved earlier. Replace it with
227 * this newer one.
228 */
229 mc_saved[i] = (struct microcode_intel *)ucode_ptr;
230 break;
231 }
232
233 /* Newly detected microcode, save it to memory. */
234 if (i >= num_saved && !found)
235 mc_saved[num_saved++] = (struct microcode_intel *)ucode_ptr;
236
237 return num_saved;
238 }
239
240 /*
241 * Get microcode matching with BSP's model. Only CPUs with the same model as
242 * BSP can stay in the platform.
243 */
244 static enum ucode_state __init
get_matching_model_microcode(int cpu,unsigned long start,void * data,size_t size,struct mc_saved_data * mc_saved_data,unsigned long * mc_saved_in_initrd,struct ucode_cpu_info * uci)245 get_matching_model_microcode(int cpu, unsigned long start,
246 void *data, size_t size,
247 struct mc_saved_data *mc_saved_data,
248 unsigned long *mc_saved_in_initrd,
249 struct ucode_cpu_info *uci)
250 {
251 u8 *ucode_ptr = data;
252 unsigned int leftover = size;
253 enum ucode_state state = UCODE_OK;
254 unsigned int mc_size;
255 struct microcode_header_intel *mc_header;
256 struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
257 unsigned int mc_saved_count = mc_saved_data->mc_saved_count;
258 int i;
259
260 while (leftover && mc_saved_count < ARRAY_SIZE(mc_saved_tmp)) {
261
262 if (leftover < sizeof(mc_header))
263 break;
264
265 mc_header = (struct microcode_header_intel *)ucode_ptr;
266
267 mc_size = get_totalsize(mc_header);
268 if (!mc_size || mc_size > leftover ||
269 microcode_sanity_check(ucode_ptr, 0) < 0)
270 break;
271
272 leftover -= mc_size;
273
274 /*
275 * Since APs with same family and model as the BSP may boot in
276 * the platform, we need to find and save microcode patches
277 * with the same family and model as the BSP.
278 */
279 if (!find_matching_signature(mc_header, uci->cpu_sig.sig,
280 uci->cpu_sig.pf)) {
281 ucode_ptr += mc_size;
282 continue;
283 }
284
285 mc_saved_count = _save_mc(mc_saved_tmp, ucode_ptr, mc_saved_count);
286
287 ucode_ptr += mc_size;
288 }
289
290 if (leftover) {
291 state = UCODE_ERROR;
292 goto out;
293 }
294
295 if (mc_saved_count == 0) {
296 state = UCODE_NFOUND;
297 goto out;
298 }
299
300 for (i = 0; i < mc_saved_count; i++)
301 mc_saved_in_initrd[i] = (unsigned long)mc_saved_tmp[i] - start;
302
303 mc_saved_data->mc_saved_count = mc_saved_count;
304 out:
305 return state;
306 }
307
collect_cpu_info_early(struct ucode_cpu_info * uci)308 static int collect_cpu_info_early(struct ucode_cpu_info *uci)
309 {
310 unsigned int val[2];
311 unsigned int family, model;
312 struct cpu_signature csig;
313 unsigned int eax, ebx, ecx, edx;
314
315 csig.sig = 0;
316 csig.pf = 0;
317 csig.rev = 0;
318
319 memset(uci, 0, sizeof(*uci));
320
321 eax = 0x00000001;
322 ecx = 0;
323 native_cpuid(&eax, &ebx, &ecx, &edx);
324 csig.sig = eax;
325
326 family = __x86_family(csig.sig);
327 model = x86_model(csig.sig);
328
329 if ((model >= 5) || (family > 6)) {
330 /* get processor flags from MSR 0x17 */
331 native_rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
332 csig.pf = 1 << ((val[1] >> 18) & 7);
333 }
334
335 csig.rev = intel_get_microcode_revision();
336
337 uci->cpu_sig = csig;
338 uci->valid = 1;
339
340 return 0;
341 }
342
show_saved_mc(void)343 static void show_saved_mc(void)
344 {
345 #ifdef DEBUG
346 int i, j;
347 unsigned int sig, pf, rev, total_size, data_size, date;
348 struct ucode_cpu_info uci;
349
350 if (mc_saved_data.mc_saved_count == 0) {
351 pr_debug("no microcode data saved.\n");
352 return;
353 }
354 pr_debug("Total microcode saved: %d\n", mc_saved_data.mc_saved_count);
355
356 collect_cpu_info_early(&uci);
357
358 sig = uci.cpu_sig.sig;
359 pf = uci.cpu_sig.pf;
360 rev = uci.cpu_sig.rev;
361 pr_debug("CPU: sig=0x%x, pf=0x%x, rev=0x%x\n", sig, pf, rev);
362
363 for (i = 0; i < mc_saved_data.mc_saved_count; i++) {
364 struct microcode_header_intel *mc_saved_header;
365 struct extended_sigtable *ext_header;
366 int ext_sigcount;
367 struct extended_signature *ext_sig;
368
369 mc_saved_header = (struct microcode_header_intel *)
370 mc_saved_data.mc_saved[i];
371 sig = mc_saved_header->sig;
372 pf = mc_saved_header->pf;
373 rev = mc_saved_header->rev;
374 total_size = get_totalsize(mc_saved_header);
375 data_size = get_datasize(mc_saved_header);
376 date = mc_saved_header->date;
377
378 pr_debug("mc_saved[%d]: sig=0x%x, pf=0x%x, rev=0x%x, toal size=0x%x, date = %04x-%02x-%02x\n",
379 i, sig, pf, rev, total_size,
380 date & 0xffff,
381 date >> 24,
382 (date >> 16) & 0xff);
383
384 /* Look for ext. headers: */
385 if (total_size <= data_size + MC_HEADER_SIZE)
386 continue;
387
388 ext_header = (void *) mc_saved_header + data_size + MC_HEADER_SIZE;
389 ext_sigcount = ext_header->count;
390 ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
391
392 for (j = 0; j < ext_sigcount; j++) {
393 sig = ext_sig->sig;
394 pf = ext_sig->pf;
395
396 pr_debug("\tExtended[%d]: sig=0x%x, pf=0x%x\n",
397 j, sig, pf);
398
399 ext_sig++;
400 }
401
402 }
403 #endif
404 }
405
406 #ifdef CONFIG_HOTPLUG_CPU
407 static DEFINE_MUTEX(x86_cpu_microcode_mutex);
408 /*
409 * Save this mc into mc_saved_data. So it will be loaded early when a CPU is
410 * hot added or resumes.
411 *
412 * Please make sure this mc should be a valid microcode patch before calling
413 * this function.
414 */
save_mc_for_early(u8 * mc)415 int save_mc_for_early(u8 *mc)
416 {
417 struct microcode_intel *mc_saved_tmp[MAX_UCODE_COUNT];
418 unsigned int mc_saved_count_init;
419 unsigned int mc_saved_count;
420 struct microcode_intel **mc_saved;
421 int ret = 0;
422 int i;
423
424 /*
425 * Hold hotplug lock so mc_saved_data is not accessed by a CPU in
426 * hotplug.
427 */
428 mutex_lock(&x86_cpu_microcode_mutex);
429
430 mc_saved_count_init = mc_saved_data.mc_saved_count;
431 mc_saved_count = mc_saved_data.mc_saved_count;
432 mc_saved = mc_saved_data.mc_saved;
433
434 if (mc_saved && mc_saved_count)
435 memcpy(mc_saved_tmp, mc_saved,
436 mc_saved_count * sizeof(struct microcode_intel *));
437 /*
438 * Save the microcode patch mc in mc_save_tmp structure if it's a newer
439 * version.
440 */
441 mc_saved_count = _save_mc(mc_saved_tmp, mc, mc_saved_count);
442
443 /*
444 * Save the mc_save_tmp in global mc_saved_data.
445 */
446 ret = save_microcode(&mc_saved_data, mc_saved_tmp, mc_saved_count);
447 if (ret) {
448 pr_err("Cannot save microcode patch.\n");
449 goto out;
450 }
451
452 show_saved_mc();
453
454 /*
455 * Free old saved microcode data.
456 */
457 if (mc_saved) {
458 for (i = 0; i < mc_saved_count_init; i++)
459 kfree(mc_saved[i]);
460 kfree(mc_saved);
461 }
462
463 out:
464 mutex_unlock(&x86_cpu_microcode_mutex);
465
466 return ret;
467 }
468 EXPORT_SYMBOL_GPL(save_mc_for_early);
469 #endif
470
load_builtin_intel_microcode(struct cpio_data * cp)471 static bool __init load_builtin_intel_microcode(struct cpio_data *cp)
472 {
473 #ifdef CONFIG_X86_64
474 unsigned int eax = 0x00000001, ebx, ecx = 0, edx;
475 unsigned int family, model, stepping;
476 char name[30];
477
478 native_cpuid(&eax, &ebx, &ecx, &edx);
479
480 family = __x86_family(eax);
481 model = x86_model(eax);
482 stepping = eax & 0xf;
483
484 sprintf(name, "intel-ucode/%02x-%02x-%02x", family, model, stepping);
485
486 return get_builtin_firmware(cp, name);
487 #else
488 return false;
489 #endif
490 }
491
492 static __initdata char ucode_name[] = "kernel/x86/microcode/GenuineIntel.bin";
493 static __init enum ucode_state
scan_microcode(struct mc_saved_data * mc_saved_data,unsigned long * initrd,unsigned long start,unsigned long size,struct ucode_cpu_info * uci)494 scan_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
495 unsigned long start, unsigned long size,
496 struct ucode_cpu_info *uci)
497 {
498 struct cpio_data cd;
499 long offset = 0;
500 #ifdef CONFIG_X86_32
501 char *p = (char *)__pa_nodebug(ucode_name);
502 #else
503 char *p = ucode_name;
504 #endif
505
506 cd.data = NULL;
507 cd.size = 0;
508
509 /* try built-in microcode if no initrd */
510 if (!size) {
511 if (!load_builtin_intel_microcode(&cd))
512 return UCODE_ERROR;
513 } else {
514 cd = find_cpio_data(p, (void *)start, size, &offset);
515 if (!cd.data)
516 return UCODE_ERROR;
517 }
518
519 return get_matching_model_microcode(0, start, cd.data, cd.size,
520 mc_saved_data, initrd, uci);
521 }
522
523 /*
524 * Print ucode update info.
525 */
526 static void
print_ucode_info(struct ucode_cpu_info * uci,unsigned int date)527 print_ucode_info(struct ucode_cpu_info *uci, unsigned int date)
528 {
529 int cpu = smp_processor_id();
530
531 pr_info("CPU%d microcode updated early to revision 0x%x, date = %04x-%02x-%02x\n",
532 cpu,
533 uci->cpu_sig.rev,
534 date & 0xffff,
535 date >> 24,
536 (date >> 16) & 0xff);
537 }
538
539 #ifdef CONFIG_X86_32
540
541 static int delay_ucode_info;
542 static int current_mc_date;
543
544 /*
545 * Print early updated ucode info after printk works. This is delayed info dump.
546 */
show_ucode_info_early(void)547 void show_ucode_info_early(void)
548 {
549 struct ucode_cpu_info uci;
550
551 if (delay_ucode_info) {
552 collect_cpu_info_early(&uci);
553 print_ucode_info(&uci, current_mc_date);
554 delay_ucode_info = 0;
555 }
556 }
557
558 /*
559 * At this point, we can not call printk() yet. Keep microcode patch number in
560 * mc_saved_data.mc_saved and delay printing microcode info in
561 * show_ucode_info_early() until printk() works.
562 */
print_ucode(struct ucode_cpu_info * uci)563 static void print_ucode(struct ucode_cpu_info *uci)
564 {
565 struct microcode_intel *mc_intel;
566 int *delay_ucode_info_p;
567 int *current_mc_date_p;
568
569 mc_intel = uci->mc;
570 if (mc_intel == NULL)
571 return;
572
573 delay_ucode_info_p = (int *)__pa_nodebug(&delay_ucode_info);
574 current_mc_date_p = (int *)__pa_nodebug(¤t_mc_date);
575
576 *delay_ucode_info_p = 1;
577 *current_mc_date_p = mc_intel->hdr.date;
578 }
579 #else
580
581 /*
582 * Flush global tlb. We only do this in x86_64 where paging has been enabled
583 * already and PGE should be enabled as well.
584 */
flush_tlb_early(void)585 static inline void flush_tlb_early(void)
586 {
587 __native_flush_tlb_global_irq_disabled();
588 }
589
print_ucode(struct ucode_cpu_info * uci)590 static inline void print_ucode(struct ucode_cpu_info *uci)
591 {
592 struct microcode_intel *mc_intel;
593
594 mc_intel = uci->mc;
595 if (mc_intel == NULL)
596 return;
597
598 print_ucode_info(uci, mc_intel->hdr.date);
599 }
600 #endif
601
apply_microcode_early(struct ucode_cpu_info * uci,bool early)602 static int apply_microcode_early(struct ucode_cpu_info *uci, bool early)
603 {
604 struct microcode_intel *mc_intel;
605 u32 rev;
606
607 mc_intel = uci->mc;
608 if (mc_intel == NULL)
609 return 0;
610
611 /*
612 * Save us the MSR write below - which is a particular expensive
613 * operation - when the other hyperthread has updated the microcode
614 * already.
615 */
616 rev = intel_get_microcode_revision();
617 if (rev >= mc_intel->hdr.rev) {
618 uci->cpu_sig.rev = rev;
619 return 0;
620 }
621
622 /* write microcode via MSR 0x79 */
623 native_wrmsr(MSR_IA32_UCODE_WRITE,
624 (unsigned long) mc_intel->bits,
625 (unsigned long) mc_intel->bits >> 16 >> 16);
626
627 rev = intel_get_microcode_revision();
628 if (rev != mc_intel->hdr.rev)
629 return -1;
630
631 #ifdef CONFIG_X86_64
632 /* Flush global tlb. This is precaution. */
633 flush_tlb_early();
634 #endif
635 uci->cpu_sig.rev = rev;
636
637 if (early)
638 print_ucode(uci);
639 else
640 print_ucode_info(uci, mc_intel->hdr.date);
641
642 return 0;
643 }
644
645 /*
646 * This function converts microcode patch offsets previously stored in
647 * mc_saved_in_initrd to pointers and stores the pointers in mc_saved_data.
648 */
save_microcode_in_initrd_intel(void)649 int __init save_microcode_in_initrd_intel(void)
650 {
651 unsigned int count = mc_saved_data.mc_saved_count;
652 struct microcode_intel *mc_saved[MAX_UCODE_COUNT];
653 int ret = 0;
654
655 if (count == 0)
656 return ret;
657
658 copy_initrd_ptrs(mc_saved, mc_saved_in_initrd, get_initrd_start(), count);
659 ret = save_microcode(&mc_saved_data, mc_saved, count);
660 if (ret)
661 pr_err("Cannot save microcode patches from initrd.\n");
662
663 show_saved_mc();
664
665 return ret;
666 }
667
668 static void __init
_load_ucode_intel_bsp(struct mc_saved_data * mc_saved_data,unsigned long * initrd,unsigned long start,unsigned long size)669 _load_ucode_intel_bsp(struct mc_saved_data *mc_saved_data,
670 unsigned long *initrd,
671 unsigned long start, unsigned long size)
672 {
673 struct ucode_cpu_info uci;
674 enum ucode_state ret;
675
676 collect_cpu_info_early(&uci);
677
678 ret = scan_microcode(mc_saved_data, initrd, start, size, &uci);
679 if (ret != UCODE_OK)
680 return;
681
682 ret = load_microcode(mc_saved_data, initrd, start, &uci);
683 if (ret != UCODE_OK)
684 return;
685
686 apply_microcode_early(&uci, true);
687 }
688
load_ucode_intel_bsp(void)689 void __init load_ucode_intel_bsp(void)
690 {
691 u64 start, size;
692 #ifdef CONFIG_X86_32
693 struct boot_params *p;
694
695 p = (struct boot_params *)__pa_nodebug(&boot_params);
696 size = p->hdr.ramdisk_size;
697
698 /*
699 * Set start only if we have an initrd image. We cannot use initrd_start
700 * because it is not set that early yet.
701 */
702 start = (size ? p->hdr.ramdisk_image : 0);
703
704 _load_ucode_intel_bsp((struct mc_saved_data *)__pa_nodebug(&mc_saved_data),
705 (unsigned long *)__pa_nodebug(&mc_saved_in_initrd),
706 start, size);
707 #else
708 size = boot_params.hdr.ramdisk_size;
709 start = (size ? boot_params.hdr.ramdisk_image + PAGE_OFFSET : 0);
710
711 _load_ucode_intel_bsp(&mc_saved_data, mc_saved_in_initrd, start, size);
712 #endif
713 }
714
load_ucode_intel_ap(void)715 void load_ucode_intel_ap(void)
716 {
717 struct mc_saved_data *mc_saved_data_p;
718 struct ucode_cpu_info uci;
719 unsigned long *mc_saved_in_initrd_p;
720 enum ucode_state ret;
721 #ifdef CONFIG_X86_32
722
723 mc_saved_in_initrd_p = (unsigned long *)__pa_nodebug(mc_saved_in_initrd);
724 mc_saved_data_p = (struct mc_saved_data *)__pa_nodebug(&mc_saved_data);
725 #else
726 mc_saved_in_initrd_p = mc_saved_in_initrd;
727 mc_saved_data_p = &mc_saved_data;
728 #endif
729
730 /*
731 * If there is no valid ucode previously saved in memory, no need to
732 * update ucode on this AP.
733 */
734 if (mc_saved_data_p->mc_saved_count == 0)
735 return;
736
737 collect_cpu_info_early(&uci);
738 ret = load_microcode(mc_saved_data_p, mc_saved_in_initrd_p,
739 get_initrd_start_addr(), &uci);
740
741 if (ret != UCODE_OK)
742 return;
743
744 apply_microcode_early(&uci, true);
745 }
746
reload_ucode_intel(void)747 void reload_ucode_intel(void)
748 {
749 struct ucode_cpu_info uci;
750 enum ucode_state ret;
751
752 if (!mc_saved_data.mc_saved_count)
753 return;
754
755 collect_cpu_info_early(&uci);
756
757 ret = load_microcode_early(mc_saved_data.mc_saved,
758 mc_saved_data.mc_saved_count, &uci);
759 if (ret != UCODE_OK)
760 return;
761
762 apply_microcode_early(&uci, false);
763 }
764
collect_cpu_info(int cpu_num,struct cpu_signature * csig)765 static int collect_cpu_info(int cpu_num, struct cpu_signature *csig)
766 {
767 struct cpuinfo_x86 *c = &cpu_data(cpu_num);
768 unsigned int val[2];
769
770 memset(csig, 0, sizeof(*csig));
771
772 csig->sig = cpuid_eax(0x00000001);
773
774 if ((c->x86_model >= 5) || (c->x86 > 6)) {
775 /* get processor flags from MSR 0x17 */
776 rdmsr(MSR_IA32_PLATFORM_ID, val[0], val[1]);
777 csig->pf = 1 << ((val[1] >> 18) & 7);
778 }
779
780 csig->rev = c->microcode;
781 pr_info("CPU%d sig=0x%x, pf=0x%x, revision=0x%x\n",
782 cpu_num, csig->sig, csig->pf, csig->rev);
783
784 return 0;
785 }
786
787 /*
788 * return 0 - no update found
789 * return 1 - found update
790 */
get_matching_mc(struct microcode_intel * mc_intel,int cpu)791 static int get_matching_mc(struct microcode_intel *mc_intel, int cpu)
792 {
793 struct cpu_signature cpu_sig;
794 unsigned int csig, cpf, crev;
795
796 collect_cpu_info(cpu, &cpu_sig);
797
798 csig = cpu_sig.sig;
799 cpf = cpu_sig.pf;
800 crev = cpu_sig.rev;
801
802 return has_newer_microcode(mc_intel, csig, cpf, crev);
803 }
804
apply_microcode_intel(int cpu)805 static int apply_microcode_intel(int cpu)
806 {
807 struct microcode_intel *mc_intel;
808 struct ucode_cpu_info *uci;
809 u32 rev;
810 int cpu_num = raw_smp_processor_id();
811 struct cpuinfo_x86 *c = &cpu_data(cpu_num);
812
813 uci = ucode_cpu_info + cpu;
814 mc_intel = uci->mc;
815
816 /* We should bind the task to the CPU */
817 BUG_ON(cpu_num != cpu);
818
819 if (mc_intel == NULL)
820 return 0;
821
822 /*
823 * Microcode on this CPU could be updated earlier. Only apply the
824 * microcode patch in mc_intel when it is newer than the one on this
825 * CPU.
826 */
827 if (get_matching_mc(mc_intel, cpu) == 0)
828 return 0;
829
830 /*
831 * Save us the MSR write below - which is a particular expensive
832 * operation - when the other hyperthread has updated the microcode
833 * already.
834 */
835 rev = intel_get_microcode_revision();
836 if (rev >= mc_intel->hdr.rev)
837 goto out;
838
839 /* write microcode via MSR 0x79 */
840 wrmsr(MSR_IA32_UCODE_WRITE,
841 (unsigned long) mc_intel->bits,
842 (unsigned long) mc_intel->bits >> 16 >> 16);
843
844 rev = intel_get_microcode_revision();
845
846 if (rev != mc_intel->hdr.rev) {
847 pr_err("CPU%d update to revision 0x%x failed\n",
848 cpu_num, mc_intel->hdr.rev);
849 return -1;
850 }
851 pr_info("CPU%d updated to revision 0x%x, date = %04x-%02x-%02x\n",
852 cpu_num, rev,
853 mc_intel->hdr.date & 0xffff,
854 mc_intel->hdr.date >> 24,
855 (mc_intel->hdr.date >> 16) & 0xff);
856
857 out:
858 uci->cpu_sig.rev = rev;
859 c->microcode = rev;
860
861 /* Update boot_cpu_data's revision too, if we're on the BSP: */
862 if (c->cpu_index == boot_cpu_data.cpu_index)
863 boot_cpu_data.microcode = rev;
864
865 return 0;
866 }
867
generic_load_microcode(int cpu,void * data,size_t size,int (* get_ucode_data)(void *,const void *,size_t))868 static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
869 int (*get_ucode_data)(void *, const void *, size_t))
870 {
871 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
872 u8 *ucode_ptr = data, *new_mc = NULL, *mc = NULL;
873 int new_rev = uci->cpu_sig.rev;
874 unsigned int leftover = size;
875 enum ucode_state state = UCODE_OK;
876 unsigned int curr_mc_size = 0;
877 unsigned int csig, cpf;
878
879 while (leftover) {
880 struct microcode_header_intel mc_header;
881 unsigned int mc_size;
882
883 if (leftover < sizeof(mc_header)) {
884 pr_err("error! Truncated header in microcode data file\n");
885 break;
886 }
887
888 if (get_ucode_data(&mc_header, ucode_ptr, sizeof(mc_header)))
889 break;
890
891 mc_size = get_totalsize(&mc_header);
892 if (!mc_size || mc_size > leftover) {
893 pr_err("error! Bad data in microcode data file\n");
894 break;
895 }
896
897 /* For performance reasons, reuse mc area when possible */
898 if (!mc || mc_size > curr_mc_size) {
899 vfree(mc);
900 mc = vmalloc(mc_size);
901 if (!mc)
902 break;
903 curr_mc_size = mc_size;
904 }
905
906 if (get_ucode_data(mc, ucode_ptr, mc_size) ||
907 microcode_sanity_check(mc, 1) < 0) {
908 break;
909 }
910
911 csig = uci->cpu_sig.sig;
912 cpf = uci->cpu_sig.pf;
913 if (has_newer_microcode(mc, csig, cpf, new_rev)) {
914 vfree(new_mc);
915 new_rev = mc_header.rev;
916 new_mc = mc;
917 mc = NULL; /* trigger new vmalloc */
918 }
919
920 ucode_ptr += mc_size;
921 leftover -= mc_size;
922 }
923
924 vfree(mc);
925
926 if (leftover) {
927 vfree(new_mc);
928 state = UCODE_ERROR;
929 goto out;
930 }
931
932 if (!new_mc) {
933 state = UCODE_NFOUND;
934 goto out;
935 }
936
937 vfree(uci->mc);
938 uci->mc = (struct microcode_intel *)new_mc;
939
940 /*
941 * If early loading microcode is supported, save this mc into
942 * permanent memory. So it will be loaded early when a CPU is hot added
943 * or resumes.
944 */
945 save_mc_for_early(new_mc);
946
947 pr_debug("CPU%d found a matching microcode update with version 0x%x (current=0x%x)\n",
948 cpu, new_rev, uci->cpu_sig.rev);
949 out:
950 return state;
951 }
952
get_ucode_fw(void * to,const void * from,size_t n)953 static int get_ucode_fw(void *to, const void *from, size_t n)
954 {
955 memcpy(to, from, n);
956 return 0;
957 }
958
is_blacklisted(unsigned int cpu)959 static bool is_blacklisted(unsigned int cpu)
960 {
961 struct cpuinfo_x86 *c = &cpu_data(cpu);
962
963 /*
964 * Late loading on model 79 with microcode revision less than 0x0b000021
965 * and LLC size per core bigger than 2.5MB may result in a system hang.
966 * This behavior is documented in item BDF90, #334165 (Intel Xeon
967 * Processor E7-8800/4800 v4 Product Family).
968 */
969 if (c->x86 == 6 &&
970 c->x86_model == 79 &&
971 c->x86_stepping == 0x01 &&
972 llc_size_per_core > 2621440 &&
973 c->microcode < 0x0b000021) {
974 pr_err_once("Erratum BDF90: late loading with revision < 0x0b000021 (0x%x) disabled.\n", c->microcode);
975 pr_err_once("Please consider either early loading through initrd/built-in or a potential BIOS update.\n");
976 return true;
977 }
978
979 return false;
980 }
981
request_microcode_fw(int cpu,struct device * device,bool refresh_fw)982 static enum ucode_state request_microcode_fw(int cpu, struct device *device,
983 bool refresh_fw)
984 {
985 char name[30];
986 struct cpuinfo_x86 *c = &cpu_data(cpu);
987 const struct firmware *firmware;
988 enum ucode_state ret;
989
990 if (is_blacklisted(cpu))
991 return UCODE_NFOUND;
992
993 sprintf(name, "intel-ucode/%02x-%02x-%02x",
994 c->x86, c->x86_model, c->x86_stepping);
995
996 if (request_firmware_direct(&firmware, name, device)) {
997 pr_debug("data file %s load failed\n", name);
998 return UCODE_NFOUND;
999 }
1000
1001 ret = generic_load_microcode(cpu, (void *)firmware->data,
1002 firmware->size, &get_ucode_fw);
1003
1004 release_firmware(firmware);
1005
1006 return ret;
1007 }
1008
get_ucode_user(void * to,const void * from,size_t n)1009 static int get_ucode_user(void *to, const void *from, size_t n)
1010 {
1011 return copy_from_user(to, from, n);
1012 }
1013
1014 static enum ucode_state
request_microcode_user(int cpu,const void __user * buf,size_t size)1015 request_microcode_user(int cpu, const void __user *buf, size_t size)
1016 {
1017 if (is_blacklisted(cpu))
1018 return UCODE_NFOUND;
1019
1020 return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
1021 }
1022
microcode_fini_cpu(int cpu)1023 static void microcode_fini_cpu(int cpu)
1024 {
1025 struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
1026
1027 vfree(uci->mc);
1028 uci->mc = NULL;
1029 }
1030
1031 static struct microcode_ops microcode_intel_ops = {
1032 .request_microcode_user = request_microcode_user,
1033 .request_microcode_fw = request_microcode_fw,
1034 .collect_cpu_info = collect_cpu_info,
1035 .apply_microcode = apply_microcode_intel,
1036 .microcode_fini_cpu = microcode_fini_cpu,
1037 };
1038
calc_llc_size_per_core(struct cpuinfo_x86 * c)1039 static int __init calc_llc_size_per_core(struct cpuinfo_x86 *c)
1040 {
1041 u64 llc_size = c->x86_cache_size * 1024ULL;
1042
1043 do_div(llc_size, c->x86_max_cores);
1044
1045 return (int)llc_size;
1046 }
1047
init_intel_microcode(void)1048 struct microcode_ops * __init init_intel_microcode(void)
1049 {
1050 struct cpuinfo_x86 *c = &boot_cpu_data;
1051
1052 if (c->x86_vendor != X86_VENDOR_INTEL || c->x86 < 6 ||
1053 cpu_has(c, X86_FEATURE_IA64)) {
1054 pr_err("Intel CPU family 0x%x not supported\n", c->x86);
1055 return NULL;
1056 }
1057
1058 llc_size_per_core = calc_llc_size_per_core(c);
1059
1060 return µcode_intel_ops;
1061 }
1062
1063