• Home
  • Raw
  • Download

Lines Matching +full:0 +full:x86

40 	sld_off = 0,
56 * on CPUs that do not support SLD can cause fireworks, even when writing '0'.
101 if (c->x86 != 6) in probe_xeon_phi_r3mwait()
136 { INTEL_FAM6_KABYLAKE, 0x0B, 0x80 },
137 { INTEL_FAM6_KABYLAKE, 0x0A, 0x80 },
138 { INTEL_FAM6_KABYLAKE, 0x09, 0x80 },
139 { INTEL_FAM6_KABYLAKE_L, 0x0A, 0x80 },
140 { INTEL_FAM6_KABYLAKE_L, 0x09, 0x80 },
141 { INTEL_FAM6_SKYLAKE_X, 0x03, 0x0100013e },
142 { INTEL_FAM6_SKYLAKE_X, 0x04, 0x0200003c },
143 { INTEL_FAM6_BROADWELL, 0x04, 0x28 },
144 { INTEL_FAM6_BROADWELL_G, 0x01, 0x1b },
145 { INTEL_FAM6_BROADWELL_D, 0x02, 0x14 },
146 { INTEL_FAM6_BROADWELL_D, 0x03, 0x07000011 },
147 { INTEL_FAM6_BROADWELL_X, 0x01, 0x0b000025 },
148 { INTEL_FAM6_HASWELL_L, 0x01, 0x21 },
149 { INTEL_FAM6_HASWELL_G, 0x01, 0x18 },
150 { INTEL_FAM6_HASWELL, 0x03, 0x23 },
151 { INTEL_FAM6_HASWELL_X, 0x02, 0x3b },
152 { INTEL_FAM6_HASWELL_X, 0x04, 0x10 },
153 { INTEL_FAM6_IVYBRIDGE_X, 0x04, 0x42a },
155 { INTEL_FAM6_SANDYBRIDGE_X, 0x06, 0x61b },
156 { INTEL_FAM6_SANDYBRIDGE_X, 0x07, 0x712 },
170 if (c->x86 != 6) in bad_spectre_microcode()
173 for (i = 0; i < ARRAY_SIZE(spectre_bad_microcodes); i++) { in bad_spectre_microcode()
186 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { in early_init_intel()
188 MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT) > 0) { in early_init_intel()
189 c->cpuid_level = cpuid_eax(0); in early_init_intel()
194 if ((c->x86 == 0xf && c->x86_model >= 0x03) || in early_init_intel()
195 (c->x86 == 0x6 && c->x86_model >= 0x0e)) in early_init_intel()
198 if (c->x86 >= 6 && !cpu_has(c, X86_FEATURE_IA64)) in early_init_intel()
225 if (c->x86 == 6 && c->x86_model == 0x1c && c->x86_stepping <= 2 && in early_init_intel()
226 c->microcode < 0x20e) { in early_init_intel()
235 if (c->x86 == 15 && c->x86_cache_alignment == 64) in early_init_intel()
239 /* CPUID workaround for 0F33/0F34 CPU */ in early_init_intel()
240 if (c->x86 == 0xF && c->x86_model == 0x3 in early_init_intel()
241 && (c->x86_stepping == 0x3 || c->x86_stepping == 0x4)) in early_init_intel()
257 if (c->x86 == 6) { in early_init_intel()
280 if (c->x86 == 6 && c->x86_model < 15) in early_init_intel()
287 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model >= 0xd)) { in early_init_intel()
302 * As a result, boot_cpu_has(X86_FEATURE_PGE) in arch/x86/include/asm/tlbflush.h in early_init_intel()
306 if (c->x86 == 5 && c->x86_model == 9) { in early_init_intel()
311 if (c->cpuid_level >= 0x00000001) { in early_init_intel()
314 cpuid(0x00000001, &eax, &ebx, &ecx, &edx); in early_init_intel()
321 c->x86_coreid_bits = get_count_order((ebx >> 16) & 0xff); in early_init_intel()
330 if (detect_extended_topology_early(c) < 0) in early_init_intel()
350 boot_cpu_data.x86 == 6 && in ppro_with_ram_bug()
356 return 0; in ppro_with_ram_bug()
368 if (c->x86 == 5 && in intel_smp_check()
392 * have the F0 0F bug, which lets nonprivileged users lock up the in intel_workarounds()
397 if (c->x86 == 5 && c->x86_model < 9) { in intel_workarounds()
402 pr_notice("Intel Pentium with F0 0F bug - workaround enabled.\n"); in intel_workarounds()
412 if ((c->x86<<8 | c->x86_model<<4 | c->x86_stepping) < 0x633) in intel_workarounds()
430 if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_stepping == 1)) { in intel_workarounds()
432 MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT) > 0) { in intel_workarounds()
444 if (boot_cpu_has(X86_FEATURE_APIC) && (c->x86<<8 | c->x86_model<<4) == 0x520 && in intel_workarounds()
445 (c->x86_stepping < 0x6 || c->x86_stepping == 0xb)) in intel_workarounds()
453 switch (c->x86) { in intel_workarounds()
492 #define MSR_IA32_TME_ACTIVATE 0x982
495 #define TME_ACTIVATE_LOCKED(x) (x & 0x1)
496 #define TME_ACTIVATE_ENABLED(x) (x & 0x2)
498 #define TME_ACTIVATE_POLICY(x) ((x >> 4) & 0xf) /* Bits 7:4 */
499 #define TME_ACTIVATE_POLICY_AES_XTS_128 0
501 #define TME_ACTIVATE_KEYID_BITS(x) ((x >> 32) & 0xf) /* Bits 35:32 */
503 #define TME_ACTIVATE_CRYPTO_ALGS(x) ((x >> 48) & 0xffff) /* Bits 63:48 */
507 #define MKTME_ENABLED 0
515 int keyid_bits = 0, nr_keyids = 0; in detect_tme()
516 static u64 tme_activate_cpu0 = 0; in detect_tme()
523 pr_err_once("x86/tme: configuration is inconsistent between CPUs\n"); in detect_tme()
524 pr_err_once("x86/tme: MKTME is not usable\n"); in detect_tme()
534 pr_info_once("x86/tme: not enabled by BIOS\n"); in detect_tme()
542 pr_info("x86/tme: enabled by BIOS\n"); in detect_tme()
546 pr_warn("x86/tme: Unknown policy is active: %#llx\n", tme_policy); in detect_tme()
550 pr_err("x86/mktme: No known encryption algorithm is supported: %#llx\n", in detect_tme()
558 pr_info_once("x86/mktme: enabled by BIOS\n"); in detect_tme()
559 pr_info_once("x86/mktme: %d KeyIDs available\n", nr_keyids); in detect_tme()
561 pr_info_once("x86/mktme: disabled by BIOS\n"); in detect_tme()
594 this_cpu_write(msr_misc_features_shadow, 0); in init_intel_misc_features()
621 * let's use the legacy cpuid vector 0x1 and 0x4 for topology in init_intel()
635 if ((eax & 0xff) && (((eax>>8) & 0xff) > 1)) in init_intel()
652 if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_CLFLUSH) && in init_intel()
656 if (c->x86 == 6 && boot_cpu_has(X86_FEATURE_MWAIT) && in init_intel()
661 if (c->x86 == 15) in init_intel()
663 if (c->x86 == 6) in init_intel()
671 if (c->x86 == 6) { in init_intel()
677 if (l2 == 0) in init_intel()
686 else if (c->x86_stepping == 0 || c->x86_stepping == 5) in init_intel()
700 if (c->x86 == 15) in init_intel()
702 if (c->x86 == 6) in init_intel()
733 if ((c->x86 == 6) && (c->x86_model == 11) && (size == 0)) in intel_size_cache()
740 if ((c->x86 == 5) && (c->x86_model == 9)) in intel_size_cache()
746 #define TLB_INST_4K 0x01
747 #define TLB_INST_4M 0x02
748 #define TLB_INST_2M_4M 0x03
750 #define TLB_INST_ALL 0x05
751 #define TLB_INST_1G 0x06
753 #define TLB_DATA_4K 0x11
754 #define TLB_DATA_4M 0x12
755 #define TLB_DATA_2M_4M 0x13
756 #define TLB_DATA_4K_4M 0x14
758 #define TLB_DATA_1G 0x16
760 #define TLB_DATA0_4K 0x21
761 #define TLB_DATA0_4M 0x22
762 #define TLB_DATA0_2M_4M 0x23
764 #define STLB_4K 0x41
765 #define STLB_4K_2M 0x42
768 { 0x01, TLB_INST_4K, 32, " TLB_INST 4 KByte pages, 4-way set associative" },
769 { 0x02, TLB_INST_4M, 2, " TLB_INST 4 MByte pages, full associative" },
770 { 0x03, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way set associative" },
771 { 0x04, TLB_DATA_4M, 8, " TLB_DATA 4 MByte pages, 4-way set associative" },
772 { 0x05, TLB_DATA_4M, 32, " TLB_DATA 4 MByte pages, 4-way set associative" },
773 { 0x0b, TLB_INST_4M, 4, " TLB_INST 4 MByte pages, 4-way set associative" },
774 { 0x4f, TLB_INST_4K, 32, " TLB_INST 4 KByte pages" },
775 { 0x50, TLB_INST_ALL, 64, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
776 { 0x51, TLB_INST_ALL, 128, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
777 { 0x52, TLB_INST_ALL, 256, " TLB_INST 4 KByte and 2-MByte or 4-MByte pages" },
778 { 0x55, TLB_INST_2M_4M, 7, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
779 { 0x56, TLB_DATA0_4M, 16, " TLB_DATA0 4 MByte pages, 4-way set associative" },
780 { 0x57, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, 4-way associative" },
781 { 0x59, TLB_DATA0_4K, 16, " TLB_DATA0 4 KByte pages, fully associative" },
782 { 0x5a, TLB_DATA0_2M_4M, 32, " TLB_DATA0 2-MByte or 4 MByte pages, 4-way set associative" },
783 { 0x5b, TLB_DATA_4K_4M, 64, " TLB_DATA 4 KByte and 4 MByte pages" },
784 { 0x5c, TLB_DATA_4K_4M, 128, " TLB_DATA 4 KByte and 4 MByte pages" },
785 { 0x5d, TLB_DATA_4K_4M, 256, " TLB_DATA 4 KByte and 4 MByte pages" },
786 { 0x61, TLB_INST_4K, 48, " TLB_INST 4 KByte pages, full associative" },
787 { 0x63, TLB_DATA_1G, 4, " TLB_DATA 1 GByte pages, 4-way set associative" },
788 { 0x6b, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 8-way associative" },
789 { 0x6c, TLB_DATA_2M_4M, 128, " TLB_DATA 2 MByte or 4 MByte pages, 8-way associative" },
790 { 0x6d, TLB_DATA_1G, 16, " TLB_DATA 1 GByte pages, fully associative" },
791 { 0x76, TLB_INST_2M_4M, 8, " TLB_INST 2-MByte or 4-MByte pages, fully associative" },
792 { 0xb0, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 4-way set associative" },
793 { 0xb1, TLB_INST_2M_4M, 4, " TLB_INST 2M pages, 4-way, 8 entries or 4M pages, 4-way entries" },
794 { 0xb2, TLB_INST_4K, 64, " TLB_INST 4KByte pages, 4-way set associative" },
795 { 0xb3, TLB_DATA_4K, 128, " TLB_DATA 4 KByte pages, 4-way set associative" },
796 { 0xb4, TLB_DATA_4K, 256, " TLB_DATA 4 KByte pages, 4-way associative" },
797 { 0xb5, TLB_INST_4K, 64, " TLB_INST 4 KByte pages, 8-way set associative" },
798 { 0xb6, TLB_INST_4K, 128, " TLB_INST 4 KByte pages, 8-way set associative" },
799 { 0xba, TLB_DATA_4K, 64, " TLB_DATA 4 KByte pages, 4-way associative" },
800 { 0xc0, TLB_DATA_4K_4M, 8, " TLB_DATA 4 KByte and 4 MByte pages, 4-way associative" },
801 { 0xc1, STLB_4K_2M, 1024, " STLB 4 KByte and 2 MByte pages, 8-way associative" },
802 { 0xc2, TLB_DATA_2M_4M, 16, " TLB_DATA 2 MByte/4MByte pages, 4-way associative" },
803 { 0xca, STLB_4K, 512, " STLB 4 KByte pages, 4-way associative" },
804 { 0x00, 0, 0 }
810 if (desc == 0) in intel_tlb_lookup()
814 for (k = 0; intel_tlb_table[k].descriptor != desc && in intel_tlb_lookup()
815 intel_tlb_table[k].descriptor != 0; k++) in intel_tlb_lookup()
818 if (intel_tlb_table[k].tlb_type == 0) in intel_tlb_lookup()
904 n = cpuid_eax(2) & 0xFF; in intel_detect_tlb()
906 for (i = 0 ; i < n ; i++) { in intel_detect_tlb()
907 cpuid(2, &regs[0], &regs[1], &regs[2], &regs[3]); in intel_detect_tlb()
910 for (j = 0 ; j < 3 ; j++) in intel_detect_tlb()
912 regs[j] = 0; in intel_detect_tlb()
914 /* Byte 0 is level count, not a descriptor */ in intel_detect_tlb()
927 [0] = "486 DX-25/33",
940 [0] = "Pentium 60/66 A-step",
952 [0] = "Pentium Pro A-step",
966 [0] = "Pentium 4 (Unknown)",
986 #define pr_fmt(fmt) "x86/split lock detection: " fmt
1033 if (ret >= 0) { in split_lock_setup()
1034 for (i = 0; i < ARRAY_SIZE(sld_options); i++) { in split_lock_setup()
1088 pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n", in split_lock_warn()
1107 pr_warn_once("#AC: %s/%d %s split_lock trap at address: 0x%lx\n", in handle_guest_split_lock()
1111 current->thread.error_code = 0; in handle_guest_split_lock()
1145 * - 0: CPU models that are known to have the per-core split-lock detection
1152 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, 0),
1153 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_L, 0),
1154 X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, 0),
1179 case 0: in cpu_set_core_cap_bits()