• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <string.h>
2 
3 #include <cpuinfo/internal-api.h>
4 #include <cpuinfo/log.h>
5 #include <linux/api.h>
6 #include <riscv/linux/api.h>
7 
8 /* ISA structure to hold supported extensions. */
9 struct cpuinfo_riscv_isa cpuinfo_isa;
10 
11 /* Helper function to bitmask flags and ensure operator precedence. */
bitmask_all(uint32_t flags,uint32_t mask)12 static inline bool bitmask_all(uint32_t flags, uint32_t mask) {
13 	return (flags & mask) == mask;
14 }
15 
compare_riscv_linux_processors(const void * a,const void * b)16 static int compare_riscv_linux_processors(const void* a, const void* b) {
17 	/**
18 	 * For our purposes, it is only relevant that the list is sorted by
19 	 * micro-architecture, so the nature of ordering is irrelevant.
20 	 */
21 	return ((const struct cpuinfo_riscv_linux_processor*)a)->core.uarch -
22 		((const struct cpuinfo_riscv_linux_processor*)b)->core.uarch;
23 }
24 
25 /**
26  * Parses the core cpus list for each processor. This function is called once
27  * per-processor, with the IDs of all other processors in the core list.
28  *
29  * The 'processor_[start|count]' are populated in the processor's 'core'
30  * attribute, with 'start' being the smallest ID in the core list.
31  *
32  * The 'core_leader_id' of each processor is set to the smallest ID in it's
33  * cluster CPU list.
34  *
35  * Precondition: The element in the 'processors' list must be initialized with
36  * their 'core_leader_id' to their index in the list.
37 
38  * E.g. processors[0].core_leader_id = 0.
39  */
core_cpus_parser(uint32_t processor,uint32_t core_cpus_start,uint32_t core_cpus_end,struct cpuinfo_riscv_linux_processor * processors)40 static bool core_cpus_parser(
41 	uint32_t processor,
42 	uint32_t core_cpus_start,
43 	uint32_t core_cpus_end,
44 	struct cpuinfo_riscv_linux_processor* processors) {
45 	uint32_t processor_start = UINT32_MAX;
46 	uint32_t processor_count = 0;
47 
48 	/* If the processor already has a leader, use it. */
49 	if (bitmask_all(processors[processor].flags, CPUINFO_LINUX_FLAG_CORE_CLUSTER)) {
50 		processor_start = processors[processor].core_leader_id;
51 	}
52 
53 	for (size_t core_cpu = core_cpus_start; core_cpu < core_cpus_end; core_cpu++) {
54 		if (!bitmask_all(processors[core_cpu].flags, CPUINFO_LINUX_FLAG_VALID)) {
55 			continue;
56 		}
57 		/**
58 		 * The first valid processor observed is the smallest ID in the
59 		 * list that attaches to this core.
60 		 */
61 		if (processor_start == UINT32_MAX) {
62 			processor_start = core_cpu;
63 		}
64 		processors[core_cpu].core_leader_id = processor_start;
65 		processor_count++;
66 	}
67 	/**
68 	 * If the cluster flag has not been set, assign the processor start. If
69 	 * it has been set, only apply the processor start if it's less than the
70 	 * held value. This can happen if the callback is invoked twice:
71 	 *
72 	 * e.g. core_cpu_list=1,10-12
73 	 */
74 	if (!bitmask_all(processors[processor].flags, CPUINFO_LINUX_FLAG_CORE_CLUSTER) ||
75 	    processors[processor].core.processor_start > processor_start) {
76 		processors[processor].core.processor_start = processor_start;
77 		processors[processor].core_leader_id = processor_start;
78 	}
79 	processors[processor].core.processor_count += processor_count;
80 	processors[processor].flags |= CPUINFO_LINUX_FLAG_CORE_CLUSTER;
81 	/* The parser has failed only if no processors were found. */
82 	return processor_count != 0;
83 }
84 
85 /**
86  * Parses the cluster cpu list for each processor. This function is called once
87  * per-processor, with the IDs of all other processors in the cluster.
88  *
89  * The 'cluster_leader_id' of each processor is set to the smallest ID in it's
90  * cluster CPU list.
91  *
92  * Precondition: The element in the 'processors' list must be initialized with
93  * their 'cluster_leader_id' to their index in the list.
94  * E.g. processors[0].cluster_leader_id = 0.
95  */
cluster_cpus_parser(uint32_t processor,uint32_t cluster_cpus_start,uint32_t cluster_cpus_end,struct cpuinfo_riscv_linux_processor * processors)96 static bool cluster_cpus_parser(
97 	uint32_t processor,
98 	uint32_t cluster_cpus_start,
99 	uint32_t cluster_cpus_end,
100 	struct cpuinfo_riscv_linux_processor* processors) {
101 	uint32_t processor_start = UINT32_MAX;
102 	uint32_t processor_count = 0;
103 	uint32_t core_count = 0;
104 
105 	/* If the processor already has a leader, use it. */
106 	if (bitmask_all(processors[processor].flags, CPUINFO_LINUX_FLAG_CLUSTER_CLUSTER)) {
107 		processor_start = processors[processor].cluster_leader_id;
108 	}
109 
110 	for (size_t cluster_cpu = cluster_cpus_start; cluster_cpu < cluster_cpus_end; cluster_cpu++) {
111 		if (!bitmask_all(processors[cluster_cpu].flags, CPUINFO_LINUX_FLAG_VALID)) {
112 			continue;
113 		}
114 		/**
115 		 * The first valid processor observed is the smallest ID in the
116 		 * list that attaches to this core.
117 		 */
118 		if (processor_start == UINT32_MAX) {
119 			processor_start = cluster_cpu;
120 		}
121 		processors[cluster_cpu].cluster_leader_id = processor_start;
122 		processor_count++;
123 		/**
124 		 * A processor should only represent it's core if it is the
125 		 * assigned leader of that core.
126 		 */
127 		if (processors[cluster_cpu].core_leader_id == cluster_cpu) {
128 			core_count++;
129 		}
130 	}
131 	/**
132 	 * If the cluster flag has not been set, assign the processor start. If
133 	 * it has been set, only apply the processor start if it's less than the
134 	 * held value. This can happen if the callback is invoked twice:
135 	 *
136 	 * e.g. cluster_cpus_list=1,10-12
137 	 */
138 	if (!bitmask_all(processors[processor].flags, CPUINFO_LINUX_FLAG_CLUSTER_CLUSTER) ||
139 	    processors[processor].cluster.processor_start > processor_start) {
140 		processors[processor].cluster.processor_start = processor_start;
141 		processors[processor].cluster.core_start = processor_start;
142 		processors[processor].cluster.cluster_id = processor_start;
143 		processors[processor].cluster_leader_id = processor_start;
144 	}
145 	processors[processor].cluster.processor_count += processor_count;
146 	processors[processor].cluster.core_count += core_count;
147 	processors[processor].flags |= CPUINFO_LINUX_FLAG_CLUSTER_CLUSTER;
148 	return true;
149 }
150 
151 /**
152  * Parses the package cpus list for each processor. This function is called once
153  * per-processor, with the IDs of all other processors in the package list.
154  *
155  * The 'processor_[start|count]' are populated in the processor's 'package'
156  * attribute, with 'start' being the smallest ID in the package list.
157  *
158  * The 'package_leader_id' of each processor is set to the smallest ID in it's
159  * cluster CPU list.
160  *
161  * Precondition: The element in the 'processors' list must be initialized with
162  * their 'package_leader_id' to their index in the list.
163  * E.g. processors[0].package_leader_id = 0.
164  */
package_cpus_parser(uint32_t processor,uint32_t package_cpus_start,uint32_t package_cpus_end,struct cpuinfo_riscv_linux_processor * processors)165 static bool package_cpus_parser(
166 	uint32_t processor,
167 	uint32_t package_cpus_start,
168 	uint32_t package_cpus_end,
169 	struct cpuinfo_riscv_linux_processor* processors) {
170 	uint32_t processor_start = UINT32_MAX;
171 	uint32_t processor_count = 0;
172 	uint32_t cluster_count = 0;
173 	uint32_t core_count = 0;
174 
175 	/* If the processor already has a leader, use it. */
176 	if (bitmask_all(processors[processor].flags, CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER)) {
177 		processor_start = processors[processor].package_leader_id;
178 	}
179 
180 	for (size_t package_cpu = package_cpus_start; package_cpu < package_cpus_end; package_cpu++) {
181 		if (!bitmask_all(processors[package_cpu].flags, CPUINFO_LINUX_FLAG_VALID)) {
182 			continue;
183 		}
184 		/**
185 		 * The first valid processor observed is the smallest ID in the
186 		 * list that attaches to this package.
187 		 */
188 		if (processor_start == UINT32_MAX) {
189 			processor_start = package_cpu;
190 		}
191 		processors[package_cpu].package_leader_id = processor_start;
192 		processor_count++;
193 		/**
194 		 * A processor should only represent it's core if it is the
195 		 * assigned leader of that core, and similarly for it's cluster.
196 		 */
197 		if (processors[package_cpu].cluster_leader_id == package_cpu) {
198 			cluster_count++;
199 		}
200 		if (processors[package_cpu].core_leader_id == package_cpu) {
201 			core_count++;
202 		}
203 	}
204 	/**
205 	 * If the cluster flag has not been set, assign the processor start. If
206 	 * it has been set, only apply the processor start if it's less than the
207 	 * held value. This can happen if the callback is invoked twice:
208 	 *
209 	 * e.g. package_cpus_list=1,10-12
210 	 */
211 	if (!bitmask_all(processors[processor].flags, CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER) ||
212 	    processors[processor].package.processor_start > processor_start) {
213 		processors[processor].package.processor_start = processor_start;
214 		processors[processor].package.cluster_start = processor_start;
215 		processors[processor].package.core_start = processor_start;
216 		processors[processor].package_leader_id = processor_start;
217 	}
218 	processors[processor].package.processor_count += processor_count;
219 	processors[processor].package.cluster_count += cluster_count;
220 	processors[processor].package.core_count += core_count;
221 	processors[processor].flags |= CPUINFO_LINUX_FLAG_PACKAGE_CLUSTER;
222 	return true;
223 }
224 
225 /* Initialization for the RISC-V Linux system. */
cpuinfo_riscv_linux_init(void)226 void cpuinfo_riscv_linux_init(void) {
227 	struct cpuinfo_riscv_linux_processor* riscv_linux_processors = NULL;
228 	struct cpuinfo_processor* processors = NULL;
229 	struct cpuinfo_package* packages = NULL;
230 	struct cpuinfo_cluster* clusters = NULL;
231 	struct cpuinfo_core* cores = NULL;
232 	struct cpuinfo_uarch_info* uarchs = NULL;
233 	const struct cpuinfo_processor** linux_cpu_to_processor_map = NULL;
234 	const struct cpuinfo_core** linux_cpu_to_core_map = NULL;
235 	uint32_t* linux_cpu_to_uarch_index_map = NULL;
236 
237 	/**
238 	 * The interesting set of processors are the number of 'present'
239 	 * processors on the system. There may be more 'possible' processors,
240 	 * but processor information cannot be gathered on non-present
241 	 * processors.
242 	 *
243 	 * Note: For SoCs, it is largely the case that all processors are known
244 	 * at boot and no processors are hotplugged at runtime, so the
245 	 * 'present' and 'possible' list is often the same.
246 	 *
247 	 * Note: This computes the maximum processor ID of the 'present'
248 	 * processors. It is not a count of the number of processors on the
249 	 * system.
250 	 */
251 	const uint32_t max_processor_id =
252 		1 + cpuinfo_linux_get_max_present_processor(cpuinfo_linux_get_max_processors_count());
253 	if (max_processor_id == 0) {
254 		cpuinfo_log_error("failed to discover any processors");
255 		return;
256 	}
257 
258 	/**
259 	 * Allocate space to store all processor information. This array is
260 	 * sized to the max processor ID as opposed to the number of 'present'
261 	 * processors, to leverage pointer math in the common utility functions.
262 	 */
263 	riscv_linux_processors = calloc(max_processor_id, sizeof(struct cpuinfo_riscv_linux_processor));
264 	if (riscv_linux_processors == NULL) {
265 		cpuinfo_log_error(
266 			"failed to allocate %zu bytes for %" PRIu32 " processors.",
267 			max_processor_id * sizeof(struct cpuinfo_riscv_linux_processor),
268 			max_processor_id);
269 		goto cleanup;
270 	}
271 
272 	/**
273 	 * Attempt to detect all processors and apply the corresponding flag to
274 	 * each processor struct that we find.
275 	 */
276 	if (!cpuinfo_linux_detect_present_processors(
277 		    max_processor_id,
278 		    &riscv_linux_processors->flags,
279 		    sizeof(struct cpuinfo_riscv_linux_processor),
280 		    CPUINFO_LINUX_FLAG_PRESENT | CPUINFO_LINUX_FLAG_VALID)) {
281 		cpuinfo_log_error("failed to detect present processors");
282 		goto cleanup;
283 	}
284 
285 	/* Populate processor information. */
286 	for (size_t processor = 0; processor < max_processor_id; processor++) {
287 		if (!bitmask_all(riscv_linux_processors[processor].flags, CPUINFO_LINUX_FLAG_VALID)) {
288 			continue;
289 		}
290 		/* TODO: Determine if an 'smt_id' is available. */
291 		riscv_linux_processors[processor].processor.linux_id = processor;
292 	}
293 
294 	/* Populate core information. */
295 	for (size_t processor = 0; processor < max_processor_id; processor++) {
296 		if (!bitmask_all(riscv_linux_processors[processor].flags, CPUINFO_LINUX_FLAG_VALID)) {
297 			continue;
298 		}
299 
300 		/* Populate processor start and count information. */
301 		if (!cpuinfo_linux_detect_core_cpus(
302 			    max_processor_id,
303 			    processor,
304 			    (cpuinfo_siblings_callback)core_cpus_parser,
305 			    riscv_linux_processors)) {
306 			cpuinfo_log_error("failed to detect core cpus for processor %zu.", processor);
307 			goto cleanup;
308 		}
309 
310 		/* Populate core ID information. */
311 		if (cpuinfo_linux_get_processor_core_id(processor, &riscv_linux_processors[processor].core.core_id)) {
312 			riscv_linux_processors[processor].flags |= CPUINFO_LINUX_FLAG_CORE_ID;
313 		}
314 
315 		/**
316 		 * Populate the vendor and uarch of this core from this
317 		 * processor. When the final 'cores' list is constructed, only
318 		 * the values from the core leader will be honored.
319 		 */
320 		cpuinfo_riscv_linux_decode_vendor_uarch_from_hwprobe(
321 			processor,
322 			&riscv_linux_processors[processor].core.vendor,
323 			&riscv_linux_processors[processor].core.uarch);
324 
325 		/* Populate frequency information of this core. */
326 		uint32_t frequency = cpuinfo_linux_get_processor_cur_frequency(processor);
327 		if (frequency != 0) {
328 			riscv_linux_processors[processor].core.frequency = frequency;
329 			riscv_linux_processors[processor].flags |= CPUINFO_LINUX_FLAG_CUR_FREQUENCY;
330 		}
331 	}
332 
333 	/* Populate cluster information. */
334 	for (size_t processor = 0; processor < max_processor_id; processor++) {
335 		if (!bitmask_all(riscv_linux_processors[processor].flags, CPUINFO_LINUX_FLAG_VALID)) {
336 			continue;
337 		}
338 		if (!cpuinfo_linux_detect_cluster_cpus(
339 			    max_processor_id,
340 			    processor,
341 			    (cpuinfo_siblings_callback)cluster_cpus_parser,
342 			    riscv_linux_processors)) {
343 			cpuinfo_log_warning("failed to detect cluster cpus for processor %zu.", processor);
344 			goto cleanup;
345 		}
346 
347 		/**
348 		 * Populate the vendor, uarch and frequency of this cluster from
349 		 * this logical processor. When the 'clusters' list is
350 		 * constructed, only the values from the cluster leader will be
351 		 * honored.
352 		 */
353 		riscv_linux_processors[processor].cluster.vendor = riscv_linux_processors[processor].core.vendor;
354 		riscv_linux_processors[processor].cluster.uarch = riscv_linux_processors[processor].core.uarch;
355 		riscv_linux_processors[processor].cluster.frequency = riscv_linux_processors[processor].core.frequency;
356 	}
357 
358 	/* Populate package information. */
359 	for (size_t processor = 0; processor < max_processor_id; processor++) {
360 		if (!bitmask_all(riscv_linux_processors[processor].flags, CPUINFO_LINUX_FLAG_VALID)) {
361 			continue;
362 		}
363 		if (!cpuinfo_linux_detect_package_cpus(
364 			    max_processor_id,
365 			    processor,
366 			    (cpuinfo_siblings_callback)package_cpus_parser,
367 			    riscv_linux_processors)) {
368 			cpuinfo_log_warning("failed to detect package cpus for processor %zu.", processor);
369 			goto cleanup;
370 		}
371 	}
372 
373 	/* Populate ISA structure with hwcap information. */
374 	cpuinfo_riscv_linux_decode_isa_from_hwcap(&cpuinfo_isa);
375 
376 	/**
377 	 * To efficiently compute the number of unique micro-architectures
378 	 * present on the system, sort the processor list by micro-architecture
379 	 * and then scan through the list to count the differences.
380 	 *
381 	 * Ensure this is done at the end of composing the processor list - the
382 	 * parsing functions assume that the position of the processor in the
383 	 * list matches it's Linux ID, which this sorting operation breaks.
384 	 */
385 	qsort(riscv_linux_processors,
386 	      max_processor_id,
387 	      sizeof(struct cpuinfo_riscv_linux_processor),
388 	      compare_riscv_linux_processors);
389 
390 	/**
391 	 * Determine the number of *valid* detected processors, cores,
392 	 * clusters, packages and uarchs in the list.
393 	 */
394 	size_t valid_processors_count = 0;
395 	size_t valid_cores_count = 0;
396 	size_t valid_clusters_count = 0;
397 	size_t valid_packages_count = 0;
398 	size_t valid_uarchs_count = 0;
399 	enum cpuinfo_uarch last_uarch = cpuinfo_uarch_unknown;
400 	for (size_t processor = 0; processor < max_processor_id; processor++) {
401 		if (!bitmask_all(riscv_linux_processors[processor].flags, CPUINFO_LINUX_FLAG_VALID)) {
402 			continue;
403 		}
404 
405 		/**
406 		 * All comparisons to the leader id values MUST be done against
407 		 * the 'linux_id' as opposed to 'processor'. The sort function
408 		 * above no longer allows us to make the assumption that these
409 		 * two values are the same.
410 		 */
411 		uint32_t linux_id = riscv_linux_processors[processor].processor.linux_id;
412 
413 		valid_processors_count++;
414 		if (riscv_linux_processors[processor].core_leader_id == linux_id) {
415 			valid_cores_count++;
416 		}
417 		if (riscv_linux_processors[processor].cluster_leader_id == linux_id) {
418 			valid_clusters_count++;
419 		}
420 		if (riscv_linux_processors[processor].package_leader_id == linux_id) {
421 			valid_packages_count++;
422 		}
423 		/**
424 		 * As we've sorted by micro-architecture, when the uarch differs
425 		 * between two entries, a unique uarch has been observed.
426 		 */
427 		if (last_uarch != riscv_linux_processors[processor].core.uarch || valid_uarchs_count == 0) {
428 			valid_uarchs_count++;
429 			last_uarch = riscv_linux_processors[processor].core.uarch;
430 		}
431 	}
432 
433 	/* Allocate and populate final public ABI structures. */
434 	processors = calloc(valid_processors_count, sizeof(struct cpuinfo_processor));
435 	if (processors == NULL) {
436 		cpuinfo_log_error(
437 			"failed to allocate %zu bytes for %zu processors.",
438 			valid_processors_count * sizeof(struct cpuinfo_processor),
439 			valid_processors_count);
440 		goto cleanup;
441 	}
442 
443 	cores = calloc(valid_cores_count, sizeof(struct cpuinfo_core));
444 	if (cores == NULL) {
445 		cpuinfo_log_error(
446 			"failed to allocate %zu bytes for %zu cores.",
447 			valid_cores_count * sizeof(struct cpuinfo_core),
448 			valid_cores_count);
449 		goto cleanup;
450 	}
451 
452 	clusters = calloc(valid_clusters_count, sizeof(struct cpuinfo_cluster));
453 	if (clusters == NULL) {
454 		cpuinfo_log_error(
455 			"failed to allocate %zu bytes for %zu clusters.",
456 			valid_clusters_count * sizeof(struct cpuinfo_cluster),
457 			valid_clusters_count);
458 		goto cleanup;
459 	}
460 
461 	packages = calloc(valid_packages_count, sizeof(struct cpuinfo_package));
462 	if (packages == NULL) {
463 		cpuinfo_log_error(
464 			"failed to allocate %zu bytes for %zu packages.",
465 			valid_packages_count * sizeof(struct cpuinfo_package),
466 			valid_packages_count);
467 		goto cleanup;
468 	}
469 
470 	uarchs = calloc(valid_uarchs_count, sizeof(struct cpuinfo_uarch_info));
471 	if (uarchs == NULL) {
472 		cpuinfo_log_error(
473 			"failed to allocate %zu bytes for %zu packages.",
474 			valid_uarchs_count * sizeof(struct cpuinfo_uarch_info),
475 			valid_uarchs_count);
476 		goto cleanup;
477 	}
478 
479 	linux_cpu_to_processor_map = calloc(max_processor_id, sizeof(struct cpuinfo_processor*));
480 	if (linux_cpu_to_processor_map == NULL) {
481 		cpuinfo_log_error(
482 			"failed to allocate %zu bytes for %" PRIu32 " processor map.",
483 			max_processor_id * sizeof(struct cpuinfo_processor*),
484 			max_processor_id);
485 		goto cleanup;
486 	}
487 
488 	linux_cpu_to_core_map = calloc(max_processor_id, sizeof(struct cpuinfo_core*));
489 	if (linux_cpu_to_core_map == NULL) {
490 		cpuinfo_log_error(
491 			"failed to allocate %zu bytes for %" PRIu32 " core map.",
492 			max_processor_id * sizeof(struct cpuinfo_core*),
493 			max_processor_id);
494 		goto cleanup;
495 	}
496 
497 	linux_cpu_to_uarch_index_map = calloc(max_processor_id, sizeof(struct cpuinfo_uarch_info*));
498 	if (linux_cpu_to_uarch_index_map == NULL) {
499 		cpuinfo_log_error(
500 			"failed to allocate %zu bytes for %" PRIu32 " uarch map.",
501 			max_processor_id * sizeof(struct cpuinfo_uarch_info*),
502 			max_processor_id);
503 		goto cleanup;
504 	}
505 
506 	/* Transfer contents of processor list to ABI structures. */
507 	size_t valid_processors_index = 0;
508 	size_t valid_cores_index = 0;
509 	size_t valid_clusters_index = 0;
510 	size_t valid_packages_index = 0;
511 	size_t valid_uarchs_index = 0;
512 	last_uarch = cpuinfo_uarch_unknown;
513 	for (size_t processor = 0; processor < max_processor_id; processor++) {
514 		if (!bitmask_all(riscv_linux_processors[processor].flags, CPUINFO_LINUX_FLAG_VALID)) {
515 			continue;
516 		}
517 
518 		/**
519 		 * All comparisons to the leader id values MUST be done against
520 		 * the 'linux_id' as opposed to 'processor'. The sort function
521 		 * above no longer allows us to make the assumption that these
522 		 * two values are the same.
523 		 */
524 		uint32_t linux_id = riscv_linux_processors[processor].processor.linux_id;
525 
526 		/* Create uarch entry if this uarch has not been seen before. */
527 		if (last_uarch != riscv_linux_processors[processor].core.uarch || valid_uarchs_index == 0) {
528 			uarchs[valid_uarchs_index++].uarch = riscv_linux_processors[processor].core.uarch;
529 			last_uarch = riscv_linux_processors[processor].core.uarch;
530 		}
531 
532 		/* Copy cpuinfo_processor information. */
533 		memcpy(&processors[valid_processors_index++],
534 		       &riscv_linux_processors[processor].processor,
535 		       sizeof(struct cpuinfo_processor));
536 
537 		/* Update uarch processor count. */
538 		uarchs[valid_uarchs_index - 1].processor_count++;
539 
540 		/* Copy cpuinfo_core information, if this is the leader. */
541 		if (riscv_linux_processors[processor].core_leader_id == linux_id) {
542 			memcpy(&cores[valid_cores_index++],
543 			       &riscv_linux_processors[processor].core,
544 			       sizeof(struct cpuinfo_core));
545 			/* Update uarch core count. */
546 			uarchs[valid_uarchs_index - 1].core_count++;
547 		}
548 
549 		/* Copy cpuinfo_cluster information, if this is the leader. */
550 		if (riscv_linux_processors[processor].cluster_leader_id == linux_id) {
551 			memcpy(&clusters[valid_clusters_index++],
552 			       &riscv_linux_processors[processor].cluster,
553 			       sizeof(struct cpuinfo_cluster));
554 		}
555 
556 		/* Copy cpuinfo_package information, if this is the leader. */
557 		if (riscv_linux_processors[processor].package_leader_id == linux_id) {
558 			memcpy(&packages[valid_packages_index++],
559 			       &riscv_linux_processors[processor].package,
560 			       sizeof(struct cpuinfo_package));
561 		}
562 
563 		/* Commit pointers on the final structures. */
564 		processors[valid_processors_index - 1].core = &cores[valid_cores_index - 1];
565 		processors[valid_processors_index - 1].cluster = &clusters[valid_clusters_index - 1];
566 		processors[valid_processors_index - 1].package = &packages[valid_packages_index - 1];
567 
568 		cores[valid_cores_index - 1].cluster = &clusters[valid_clusters_index - 1];
569 		cores[valid_cores_index - 1].package = &packages[valid_packages_index - 1];
570 
571 		clusters[valid_clusters_index - 1].package = &packages[valid_packages_index - 1];
572 
573 		linux_cpu_to_processor_map[linux_id] = &processors[valid_processors_index - 1];
574 		linux_cpu_to_core_map[linux_id] = &cores[valid_cores_index - 1];
575 		linux_cpu_to_uarch_index_map[linux_id] = valid_uarchs_index - 1;
576 	}
577 
578 	/* Commit */
579 	cpuinfo_processors = processors;
580 	cpuinfo_processors_count = valid_processors_count;
581 	cpuinfo_cores = cores;
582 	cpuinfo_cores_count = valid_cores_count;
583 	cpuinfo_clusters = clusters;
584 	cpuinfo_clusters_count = valid_clusters_count;
585 	cpuinfo_packages = packages;
586 	cpuinfo_packages_count = valid_packages_count;
587 	cpuinfo_uarchs = uarchs;
588 	cpuinfo_uarchs_count = valid_uarchs_count;
589 
590 	cpuinfo_linux_cpu_max = max_processor_id;
591 	cpuinfo_linux_cpu_to_processor_map = linux_cpu_to_processor_map;
592 	cpuinfo_linux_cpu_to_core_map = linux_cpu_to_core_map;
593 	cpuinfo_linux_cpu_to_uarch_index_map = linux_cpu_to_uarch_index_map;
594 
595 	__sync_synchronize();
596 
597 	cpuinfo_is_initialized = true;
598 
599 	/* Mark all public structures NULL to prevent cleanup from erasing them.
600 	 */
601 	processors = NULL;
602 	cores = NULL;
603 	clusters = NULL;
604 	packages = NULL;
605 	uarchs = NULL;
606 	linux_cpu_to_processor_map = NULL;
607 	linux_cpu_to_core_map = NULL;
608 	linux_cpu_to_uarch_index_map = NULL;
609 cleanup:
610 	free(riscv_linux_processors);
611 	free(processors);
612 	free(cores);
613 	free(clusters);
614 	free(packages);
615 	free(uarchs);
616 	free(linux_cpu_to_processor_map);
617 	free(linux_cpu_to_core_map);
618 	free(linux_cpu_to_uarch_index_map);
619 }
620