• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Hygon Processor Support for Linux
4  *
5  * Copyright (C) 2018 Chengdu Haiguang IC Design Co., Ltd.
6  *
7  * Author: Pu Wen <puwen@hygon.cn>
8  */
9 #include <linux/io.h>
10 
11 #include <asm/cpu.h>
12 #include <asm/smp.h>
13 #include <asm/numa.h>
14 #include <asm/cacheinfo.h>
15 #include <asm/spec-ctrl.h>
16 #include <asm/delay.h>
17 #ifdef CONFIG_X86_64
18 # include <asm/set_memory.h>
19 #endif
20 
21 #include "cpu.h"
22 
23 #define APICID_SOCKET_ID_BIT 6
24 
25 /*
26  * nodes_per_socket: Stores the number of nodes per socket.
27  * Refer to CPUID Fn8000_001E_ECX Node Identifiers[10:8]
28  */
29 static u32 nodes_per_socket = 1;
30 
31 #ifdef CONFIG_NUMA
32 /*
33  * To workaround broken NUMA config.  Read the comment in
34  * srat_detect_node().
35  */
nearby_node(int apicid)36 static int nearby_node(int apicid)
37 {
38 	int i, node;
39 
40 	for (i = apicid - 1; i >= 0; i--) {
41 		node = __apicid_to_node[i];
42 		if (node != NUMA_NO_NODE && node_online(node))
43 			return node;
44 	}
45 	for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
46 		node = __apicid_to_node[i];
47 		if (node != NUMA_NO_NODE && node_online(node))
48 			return node;
49 	}
50 	return first_node(node_online_map); /* Shouldn't happen */
51 }
52 #endif
53 
hygon_get_topology_early(struct cpuinfo_x86 * c)54 static void hygon_get_topology_early(struct cpuinfo_x86 *c)
55 {
56 	if (cpu_has(c, X86_FEATURE_TOPOEXT))
57 		smp_num_siblings = ((cpuid_ebx(0x8000001e) >> 8) & 0xff) + 1;
58 }
59 
60 /*
61  * Fixup core topology information for
62  * (1) Hygon multi-node processors
63  *     Assumption: Number of cores in each internal node is the same.
64  * (2) Hygon processors supporting compute units
65  */
hygon_get_topology(struct cpuinfo_x86 * c)66 static void hygon_get_topology(struct cpuinfo_x86 *c)
67 {
68 	int cpu = smp_processor_id();
69 
70 	/* get information required for multi-node processors */
71 	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
72 		int err;
73 		u32 eax, ebx, ecx, edx;
74 
75 		cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
76 
77 		c->cpu_die_id  = ecx & 0xff;
78 
79 		c->cpu_core_id = ebx & 0xff;
80 
81 		if (smp_num_siblings > 1)
82 			c->x86_max_cores /= smp_num_siblings;
83 
84 		/*
85 		 * In case leaf B is available, use it to derive
86 		 * topology information.
87 		 */
88 		err = detect_extended_topology(c);
89 		if (!err)
90 			c->x86_coreid_bits = get_count_order(c->x86_max_cores);
91 
92 		/*
93 		 * Socket ID is ApicId[6] for the processors with model <= 0x3
94 		 * when running on host.
95 		 */
96 		if (!boot_cpu_has(X86_FEATURE_HYPERVISOR) && c->x86_model <= 0x3)
97 			c->phys_proc_id = c->apicid >> APICID_SOCKET_ID_BIT;
98 
99 		cacheinfo_hygon_init_llc_id(c, cpu);
100 	} else if (cpu_has(c, X86_FEATURE_NODEID_MSR)) {
101 		u64 value;
102 
103 		rdmsrl(MSR_FAM10H_NODE_ID, value);
104 		c->cpu_die_id = value & 7;
105 
106 		per_cpu(cpu_llc_id, cpu) = c->cpu_die_id;
107 	} else
108 		return;
109 
110 	if (nodes_per_socket > 1)
111 		set_cpu_cap(c, X86_FEATURE_AMD_DCM);
112 }
113 
114 /*
115  * On Hygon setup the lower bits of the APIC id distinguish the cores.
116  * Assumes number of cores is a power of two.
117  */
hygon_detect_cmp(struct cpuinfo_x86 * c)118 static void hygon_detect_cmp(struct cpuinfo_x86 *c)
119 {
120 	unsigned int bits;
121 	int cpu = smp_processor_id();
122 
123 	bits = c->x86_coreid_bits;
124 	/* Low order bits define the core id (index of core in socket) */
125 	c->cpu_core_id = c->initial_apicid & ((1 << bits)-1);
126 	/* Convert the initial APIC ID into the socket ID */
127 	c->phys_proc_id = c->initial_apicid >> bits;
128 	/* use socket ID also for last level cache */
129 	per_cpu(cpu_llc_id, cpu) = c->cpu_die_id = c->phys_proc_id;
130 }
131 
srat_detect_node(struct cpuinfo_x86 * c)132 static void srat_detect_node(struct cpuinfo_x86 *c)
133 {
134 #ifdef CONFIG_NUMA
135 	int cpu = smp_processor_id();
136 	int node;
137 	unsigned int apicid = c->apicid;
138 
139 	node = numa_cpu_node(cpu);
140 	if (node == NUMA_NO_NODE)
141 		node = per_cpu(cpu_llc_id, cpu);
142 
143 	/*
144 	 * On multi-fabric platform (e.g. Numascale NumaChip) a
145 	 * platform-specific handler needs to be called to fixup some
146 	 * IDs of the CPU.
147 	 */
148 	if (x86_cpuinit.fixup_cpu_id)
149 		x86_cpuinit.fixup_cpu_id(c, node);
150 
151 	if (!node_online(node)) {
152 		/*
153 		 * Two possibilities here:
154 		 *
155 		 * - The CPU is missing memory and no node was created.  In
156 		 *   that case try picking one from a nearby CPU.
157 		 *
158 		 * - The APIC IDs differ from the HyperTransport node IDs.
159 		 *   Assume they are all increased by a constant offset, but
160 		 *   in the same order as the HT nodeids.  If that doesn't
161 		 *   result in a usable node fall back to the path for the
162 		 *   previous case.
163 		 *
164 		 * This workaround operates directly on the mapping between
165 		 * APIC ID and NUMA node, assuming certain relationship
166 		 * between APIC ID, HT node ID and NUMA topology.  As going
167 		 * through CPU mapping may alter the outcome, directly
168 		 * access __apicid_to_node[].
169 		 */
170 		int ht_nodeid = c->initial_apicid;
171 
172 		if (__apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
173 			node = __apicid_to_node[ht_nodeid];
174 		/* Pick a nearby node */
175 		if (!node_online(node))
176 			node = nearby_node(apicid);
177 	}
178 	numa_set_node(cpu, node);
179 #endif
180 }
181 
early_init_hygon_mc(struct cpuinfo_x86 * c)182 static void early_init_hygon_mc(struct cpuinfo_x86 *c)
183 {
184 #ifdef CONFIG_SMP
185 	unsigned int bits, ecx;
186 
187 	/* Multi core CPU? */
188 	if (c->extended_cpuid_level < 0x80000008)
189 		return;
190 
191 	ecx = cpuid_ecx(0x80000008);
192 
193 	c->x86_max_cores = (ecx & 0xff) + 1;
194 
195 	/* CPU telling us the core id bits shift? */
196 	bits = (ecx >> 12) & 0xF;
197 
198 	/* Otherwise recompute */
199 	if (bits == 0) {
200 		while ((1 << bits) < c->x86_max_cores)
201 			bits++;
202 	}
203 
204 	c->x86_coreid_bits = bits;
205 #endif
206 }
207 
bsp_init_hygon(struct cpuinfo_x86 * c)208 static void bsp_init_hygon(struct cpuinfo_x86 *c)
209 {
210 #ifdef CONFIG_X86_64
211 	unsigned long long tseg;
212 
213 	/*
214 	 * Split up direct mapping around the TSEG SMM area.
215 	 * Don't do it for gbpages because there seems very little
216 	 * benefit in doing so.
217 	 */
218 	if (!rdmsrl_safe(MSR_K8_TSEG_ADDR, &tseg)) {
219 		unsigned long pfn = tseg >> PAGE_SHIFT;
220 
221 		pr_debug("tseg: %010llx\n", tseg);
222 		if (pfn_range_is_mapped(pfn, pfn + 1))
223 			set_memory_4k((unsigned long)__va(tseg), 1);
224 	}
225 #endif
226 
227 	if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
228 		u64 val;
229 
230 		rdmsrl(MSR_K7_HWCR, val);
231 		if (!(val & BIT(24)))
232 			pr_warn(FW_BUG "TSC doesn't count with P0 frequency!\n");
233 	}
234 
235 	if (cpu_has(c, X86_FEATURE_MWAITX))
236 		use_mwaitx_delay();
237 
238 	if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
239 		u32 ecx;
240 
241 		ecx = cpuid_ecx(0x8000001e);
242 		nodes_per_socket = ((ecx >> 8) & 7) + 1;
243 	} else if (boot_cpu_has(X86_FEATURE_NODEID_MSR)) {
244 		u64 value;
245 
246 		rdmsrl(MSR_FAM10H_NODE_ID, value);
247 		nodes_per_socket = ((value >> 3) & 7) + 1;
248 	}
249 
250 	if (!boot_cpu_has(X86_FEATURE_AMD_SSBD) &&
251 	    !boot_cpu_has(X86_FEATURE_VIRT_SSBD)) {
252 		/*
253 		 * Try to cache the base value so further operations can
254 		 * avoid RMW. If that faults, do not enable SSBD.
255 		 */
256 		if (!rdmsrl_safe(MSR_AMD64_LS_CFG, &x86_amd_ls_cfg_base)) {
257 			setup_force_cpu_cap(X86_FEATURE_LS_CFG_SSBD);
258 			setup_force_cpu_cap(X86_FEATURE_SSBD);
259 			x86_amd_ls_cfg_ssbd_mask = 1ULL << 10;
260 		}
261 	}
262 }
263 
early_init_hygon(struct cpuinfo_x86 * c)264 static void early_init_hygon(struct cpuinfo_x86 *c)
265 {
266 	u32 dummy;
267 
268 	early_init_hygon_mc(c);
269 
270 	set_cpu_cap(c, X86_FEATURE_K8);
271 
272 	rdmsr_safe(MSR_AMD64_PATCH_LEVEL, &c->microcode, &dummy);
273 
274 	/*
275 	 * c->x86_power is 8000_0007 edx. Bit 8 is TSC runs at constant rate
276 	 * with P/T states and does not stop in deep C-states
277 	 */
278 	if (c->x86_power & (1 << 8)) {
279 		set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
280 		set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
281 	}
282 
283 	/* Bit 12 of 8000_0007 edx is accumulated power mechanism. */
284 	if (c->x86_power & BIT(12))
285 		set_cpu_cap(c, X86_FEATURE_ACC_POWER);
286 
287 #ifdef CONFIG_X86_64
288 	set_cpu_cap(c, X86_FEATURE_SYSCALL32);
289 #endif
290 
291 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
292 	/*
293 	 * ApicID can always be treated as an 8-bit value for Hygon APIC So, we
294 	 * can safely set X86_FEATURE_EXTD_APICID unconditionally.
295 	 */
296 	if (boot_cpu_has(X86_FEATURE_APIC))
297 		set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
298 #endif
299 
300 	/*
301 	 * This is only needed to tell the kernel whether to use VMCALL
302 	 * and VMMCALL.  VMMCALL is never executed except under virt, so
303 	 * we can set it unconditionally.
304 	 */
305 	set_cpu_cap(c, X86_FEATURE_VMMCALL);
306 
307 	hygon_get_topology_early(c);
308 }
309 
init_hygon(struct cpuinfo_x86 * c)310 static void init_hygon(struct cpuinfo_x86 *c)
311 {
312 	early_init_hygon(c);
313 
314 	/*
315 	 * Bit 31 in normal CPUID used for nonstandard 3DNow ID;
316 	 * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway
317 	 */
318 	clear_cpu_cap(c, 0*32+31);
319 
320 	set_cpu_cap(c, X86_FEATURE_REP_GOOD);
321 
322 	/* get apicid instead of initial apic id from cpuid */
323 	c->apicid = hard_smp_processor_id();
324 
325 	/*
326 	 * XXX someone from Hygon needs to confirm this DTRT
327 	 *
328 	init_spectral_chicken(c);
329 	 */
330 
331 	set_cpu_cap(c, X86_FEATURE_ZEN);
332 	set_cpu_cap(c, X86_FEATURE_CPB);
333 
334 	cpu_detect_cache_sizes(c);
335 
336 	hygon_detect_cmp(c);
337 	hygon_get_topology(c);
338 	srat_detect_node(c);
339 
340 	init_hygon_cacheinfo(c);
341 
342 	if (cpu_has(c, X86_FEATURE_XMM2)) {
343 		/*
344 		 * Use LFENCE for execution serialization.  On families which
345 		 * don't have that MSR, LFENCE is already serializing.
346 		 * msr_set_bit() uses the safe accessors, too, even if the MSR
347 		 * is not present.
348 		 */
349 		msr_set_bit(MSR_AMD64_DE_CFG,
350 			    MSR_AMD64_DE_CFG_LFENCE_SERIALIZE_BIT);
351 
352 		/* A serializing LFENCE stops RDTSC speculation */
353 		set_cpu_cap(c, X86_FEATURE_LFENCE_RDTSC);
354 	}
355 
356 	/*
357 	 * Hygon processors have APIC timer running in deep C states.
358 	 */
359 	set_cpu_cap(c, X86_FEATURE_ARAT);
360 
361 	/* Hygon CPUs don't reset SS attributes on SYSRET, Xen does. */
362 	if (!cpu_has(c, X86_FEATURE_XENPV))
363 		set_cpu_bug(c, X86_BUG_SYSRET_SS_ATTRS);
364 
365 	check_null_seg_clears_base(c);
366 }
367 
cpu_detect_tlb_hygon(struct cpuinfo_x86 * c)368 static void cpu_detect_tlb_hygon(struct cpuinfo_x86 *c)
369 {
370 	u32 ebx, eax, ecx, edx;
371 	u16 mask = 0xfff;
372 
373 	if (c->extended_cpuid_level < 0x80000006)
374 		return;
375 
376 	cpuid(0x80000006, &eax, &ebx, &ecx, &edx);
377 
378 	tlb_lld_4k[ENTRIES] = (ebx >> 16) & mask;
379 	tlb_lli_4k[ENTRIES] = ebx & mask;
380 
381 	/* Handle DTLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
382 	if (!((eax >> 16) & mask))
383 		tlb_lld_2m[ENTRIES] = (cpuid_eax(0x80000005) >> 16) & 0xff;
384 	else
385 		tlb_lld_2m[ENTRIES] = (eax >> 16) & mask;
386 
387 	/* a 4M entry uses two 2M entries */
388 	tlb_lld_4m[ENTRIES] = tlb_lld_2m[ENTRIES] >> 1;
389 
390 	/* Handle ITLB 2M and 4M sizes, fall back to L1 if L2 is disabled */
391 	if (!(eax & mask)) {
392 		cpuid(0x80000005, &eax, &ebx, &ecx, &edx);
393 		tlb_lli_2m[ENTRIES] = eax & 0xff;
394 	} else
395 		tlb_lli_2m[ENTRIES] = eax & mask;
396 
397 	tlb_lli_4m[ENTRIES] = tlb_lli_2m[ENTRIES] >> 1;
398 }
399 
400 static const struct cpu_dev hygon_cpu_dev = {
401 	.c_vendor	= "Hygon",
402 	.c_ident	= { "HygonGenuine" },
403 	.c_early_init   = early_init_hygon,
404 	.c_detect_tlb	= cpu_detect_tlb_hygon,
405 	.c_bsp_init	= bsp_init_hygon,
406 	.c_init		= init_hygon,
407 	.c_x86_vendor	= X86_VENDOR_HYGON,
408 };
409 
410 cpu_dev_register(hygon_cpu_dev);
411