• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #include <stdint.h>
2 #include <stddef.h>
3 #include <stdlib.h>
4 #include <string.h>
5 
6 #include <cpuinfo.h>
7 #include <x86/api.h>
8 #include <x86/linux/api.h>
9 #include <linux/api.h>
10 #include <cpuinfo/internal-api.h>
11 #include <cpuinfo/log.h>
12 
13 
bit_mask(uint32_t bits)14 static inline uint32_t bit_mask(uint32_t bits) {
15 	return (UINT32_C(1) << bits) - UINT32_C(1);
16 }
17 
bitmask_all(uint32_t bitfield,uint32_t mask)18 static inline bool bitmask_all(uint32_t bitfield, uint32_t mask) {
19 	return (bitfield & mask) == mask;
20 }
21 
min(uint32_t a,uint32_t b)22 static inline uint32_t min(uint32_t a, uint32_t b) {
23 	return a < b ? a : b;
24 }
25 
cmp(uint32_t a,uint32_t b)26 static inline int cmp(uint32_t a, uint32_t b) {
27 	return (a > b) - (a < b);
28 }
29 
cmp_x86_linux_processor(const void * ptr_a,const void * ptr_b)30 static int cmp_x86_linux_processor(const void* ptr_a, const void* ptr_b) {
31 	const struct cpuinfo_x86_linux_processor* processor_a = (const struct cpuinfo_x86_linux_processor*) ptr_a;
32 	const struct cpuinfo_x86_linux_processor* processor_b = (const struct cpuinfo_x86_linux_processor*) ptr_b;
33 
34 	/* Move usable processors towards the start of the array */
35 	const bool usable_a = bitmask_all(processor_a->flags, CPUINFO_LINUX_FLAG_VALID);
36 	const bool usable_b = bitmask_all(processor_b->flags, CPUINFO_LINUX_FLAG_VALID);
37 	if (usable_a != usable_b) {
38 		return (int) usable_b - (int) usable_a;
39 	}
40 
41 	/* Compare based on APIC ID (i.e. processor 0 < processor 1) */
42 	const uint32_t id_a = processor_a->apic_id;
43 	const uint32_t id_b = processor_b->apic_id;
44 	return cmp(id_a, id_b);
45 }
46 
cpuinfo_x86_count_objects(uint32_t linux_processors_count,const struct cpuinfo_x86_linux_processor linux_processors[restrict static linux_processors_count],const struct cpuinfo_x86_processor processor[restrict static1],uint32_t valid_processor_mask,uint32_t llc_apic_bits,uint32_t cores_count_ptr[restrict static1],uint32_t clusters_count_ptr[restrict static1],uint32_t packages_count_ptr[restrict static1],uint32_t l1i_count_ptr[restrict static1],uint32_t l1d_count_ptr[restrict static1],uint32_t l2_count_ptr[restrict static1],uint32_t l3_count_ptr[restrict static1],uint32_t l4_count_ptr[restrict static1])47 static void cpuinfo_x86_count_objects(
48 	uint32_t linux_processors_count,
49 	const struct cpuinfo_x86_linux_processor linux_processors[restrict static linux_processors_count],
50 	const struct cpuinfo_x86_processor processor[restrict static 1],
51 	uint32_t valid_processor_mask,
52 	uint32_t llc_apic_bits,
53 	uint32_t cores_count_ptr[restrict static 1],
54 	uint32_t clusters_count_ptr[restrict static 1],
55 	uint32_t packages_count_ptr[restrict static 1],
56 	uint32_t l1i_count_ptr[restrict static 1],
57 	uint32_t l1d_count_ptr[restrict static 1],
58 	uint32_t l2_count_ptr[restrict static 1],
59 	uint32_t l3_count_ptr[restrict static 1],
60 	uint32_t l4_count_ptr[restrict static 1])
61 {
62 	const uint32_t core_apic_mask =
63 		~(bit_mask(processor->topology.thread_bits_length) << processor->topology.thread_bits_offset);
64 	const uint32_t package_apic_mask =
65 		core_apic_mask & ~(bit_mask(processor->topology.core_bits_length) << processor->topology.core_bits_offset);
66 	const uint32_t llc_apic_mask = ~bit_mask(llc_apic_bits);
67 	const uint32_t cluster_apic_mask = package_apic_mask | llc_apic_mask;
68 
69 	uint32_t cores_count = 0, clusters_count = 0, packages_count = 0;
70 	uint32_t l1i_count = 0, l1d_count = 0, l2_count = 0, l3_count = 0, l4_count = 0;
71 	uint32_t last_core_id = UINT32_MAX, last_cluster_id = UINT32_MAX, last_package_id = UINT32_MAX;
72 	uint32_t last_l1i_id = UINT32_MAX, last_l1d_id = UINT32_MAX;
73 	uint32_t last_l2_id = UINT32_MAX, last_l3_id = UINT32_MAX, last_l4_id = UINT32_MAX;
74 	for (uint32_t i = 0; i < linux_processors_count; i++) {
75 		if (bitmask_all(linux_processors[i].flags, valid_processor_mask)) {
76 			const uint32_t apic_id = linux_processors[i].apic_id;
77 			cpuinfo_log_debug("APID ID %"PRIu32": system processor %"PRIu32, apic_id, linux_processors[i].linux_id);
78 
79 			/* All bits of APIC ID except thread ID mask */
80 			const uint32_t core_id = apic_id & core_apic_mask;
81 			if (core_id != last_core_id) {
82 				last_core_id = core_id;
83 				cores_count++;
84 			}
85 			/* All bits of APIC ID except thread ID and core ID masks */
86 			const uint32_t package_id = apic_id & package_apic_mask;
87 			if (package_id != last_package_id) {
88 				last_package_id = package_id;
89 				packages_count++;
90 			}
91 			/* Bits of APIC ID which are part of either LLC or package ID mask */
92 			const uint32_t cluster_id = apic_id & cluster_apic_mask;
93 			if (cluster_id != last_cluster_id) {
94 				last_cluster_id = cluster_id;
95 				clusters_count++;
96 			}
97 			if (processor->cache.l1i.size != 0) {
98 				const uint32_t l1i_id = apic_id & ~bit_mask(processor->cache.l1i.apic_bits);
99 				if (l1i_id != last_l1i_id) {
100 					last_l1i_id = l1i_id;
101 					l1i_count++;
102 				}
103 			}
104 			if (processor->cache.l1d.size != 0) {
105 				const uint32_t l1d_id = apic_id & ~bit_mask(processor->cache.l1d.apic_bits);
106 				if (l1d_id != last_l1d_id) {
107 					last_l1d_id = l1d_id;
108 					l1d_count++;
109 				}
110 			}
111 			if (processor->cache.l2.size != 0) {
112 				const uint32_t l2_id = apic_id & ~bit_mask(processor->cache.l2.apic_bits);
113 				if (l2_id != last_l2_id) {
114 					last_l2_id = l2_id;
115 					l2_count++;
116 				}
117 			}
118 			if (processor->cache.l3.size != 0) {
119 				const uint32_t l3_id = apic_id & ~bit_mask(processor->cache.l3.apic_bits);
120 				if (l3_id != last_l3_id) {
121 					last_l3_id = l3_id;
122 					l3_count++;
123 				}
124 			}
125 			if (processor->cache.l4.size != 0) {
126 				const uint32_t l4_id = apic_id & ~bit_mask(processor->cache.l4.apic_bits);
127 				if (l4_id != last_l4_id) {
128 					last_l4_id = l4_id;
129 					l4_count++;
130 				}
131 			}
132 		}
133 	}
134 	*cores_count_ptr = cores_count;
135 	*clusters_count_ptr = clusters_count;
136 	*packages_count_ptr = packages_count;
137 	*l1i_count_ptr = l1i_count;
138 	*l1d_count_ptr = l1d_count;
139 	*l2_count_ptr  = l2_count;
140 	*l3_count_ptr  = l3_count;
141 	*l4_count_ptr  = l4_count;
142 }
143 
cpuinfo_x86_linux_init(void)144 void cpuinfo_x86_linux_init(void) {
145 	struct cpuinfo_x86_linux_processor* x86_linux_processors = NULL;
146 	struct cpuinfo_processor* processors = NULL;
147 	struct cpuinfo_core* cores = NULL;
148 	struct cpuinfo_cluster* clusters = NULL;
149 	struct cpuinfo_package* packages = NULL;
150 	const struct cpuinfo_processor** linux_cpu_to_processor_map = NULL;
151 	const struct cpuinfo_core** linux_cpu_to_core_map = NULL;
152 	struct cpuinfo_cache* l1i = NULL;
153 	struct cpuinfo_cache* l1d = NULL;
154 	struct cpuinfo_cache* l2 = NULL;
155 	struct cpuinfo_cache* l3 = NULL;
156 	struct cpuinfo_cache* l4 = NULL;
157 
158 	const uint32_t max_processors_count = cpuinfo_linux_get_max_processors_count();
159 	cpuinfo_log_debug("system maximum processors count: %"PRIu32, max_processors_count);
160 
161 	const uint32_t max_possible_processors_count = 1 +
162 		cpuinfo_linux_get_max_possible_processor(max_processors_count);
163 	cpuinfo_log_debug("maximum possible processors count: %"PRIu32, max_possible_processors_count);
164 	const uint32_t max_present_processors_count = 1 +
165 		cpuinfo_linux_get_max_present_processor(max_processors_count);
166 	cpuinfo_log_debug("maximum present processors count: %"PRIu32, max_present_processors_count);
167 
168 	uint32_t valid_processor_mask = 0;
169 	uint32_t x86_linux_processors_count = max_processors_count;
170 	if (max_present_processors_count != 0) {
171 		x86_linux_processors_count = min(x86_linux_processors_count, max_present_processors_count);
172 		valid_processor_mask = CPUINFO_LINUX_FLAG_PRESENT;
173 	} else {
174 		valid_processor_mask = CPUINFO_LINUX_FLAG_PROC_CPUINFO;
175 	}
176 	if (max_possible_processors_count != 0) {
177 		x86_linux_processors_count = min(x86_linux_processors_count, max_possible_processors_count);
178 		valid_processor_mask |= CPUINFO_LINUX_FLAG_POSSIBLE;
179 	}
180 
181 	x86_linux_processors = calloc(x86_linux_processors_count, sizeof(struct cpuinfo_x86_linux_processor));
182 	if (x86_linux_processors == NULL) {
183 		cpuinfo_log_error(
184 			"failed to allocate %zu bytes for descriptions of %"PRIu32" x86 logical processors",
185 			x86_linux_processors_count * sizeof(struct cpuinfo_x86_linux_processor),
186 			x86_linux_processors_count);
187 		return;
188 	}
189 
190 	if (max_possible_processors_count != 0) {
191 		cpuinfo_linux_detect_possible_processors(
192 			x86_linux_processors_count, &x86_linux_processors->flags,
193 			sizeof(struct cpuinfo_x86_linux_processor),
194 			CPUINFO_LINUX_FLAG_POSSIBLE);
195 	}
196 
197 	if (max_present_processors_count != 0) {
198 		cpuinfo_linux_detect_present_processors(
199 			x86_linux_processors_count, &x86_linux_processors->flags,
200 			sizeof(struct cpuinfo_x86_linux_processor),
201 			CPUINFO_LINUX_FLAG_PRESENT);
202 	}
203 
204 	if (!cpuinfo_x86_linux_parse_proc_cpuinfo(x86_linux_processors_count, x86_linux_processors)) {
205 		cpuinfo_log_error("failed to parse processor information from /proc/cpuinfo");
206 		return;
207 	}
208 
209 	for (uint32_t i = 0; i < x86_linux_processors_count; i++) {
210 		if (bitmask_all(x86_linux_processors[i].flags, valid_processor_mask)) {
211 			x86_linux_processors[i].flags |= CPUINFO_LINUX_FLAG_VALID;
212 		}
213 	}
214 
215 	struct cpuinfo_x86_processor x86_processor;
216 	memset(&x86_processor, 0, sizeof(x86_processor));
217 	cpuinfo_x86_init_processor(&x86_processor);
218 	char brand_string[48];
219 	cpuinfo_x86_normalize_brand_string(x86_processor.brand_string, brand_string);
220 
221 	uint32_t processors_count = 0;
222 	for (uint32_t i = 0; i < x86_linux_processors_count; i++) {
223 		if (bitmask_all(x86_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) {
224 			x86_linux_processors[i].linux_id = i;
225 			processors_count++;
226 		}
227 	}
228 
229 	qsort(x86_linux_processors, x86_linux_processors_count, sizeof(struct cpuinfo_x86_linux_processor),
230 		cmp_x86_linux_processor);
231 
232 	processors = calloc(processors_count, sizeof(struct cpuinfo_processor));
233 	if (processors == NULL) {
234 		cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" logical processors",
235 			processors_count * sizeof(struct cpuinfo_processor), processors_count);
236 		goto cleanup;
237 	}
238 
239 	uint32_t llc_apic_bits = 0;
240 	if (x86_processor.cache.l4.size != 0) {
241 		llc_apic_bits = x86_processor.cache.l4.apic_bits;
242 	} else if (x86_processor.cache.l3.size != 0) {
243 		llc_apic_bits = x86_processor.cache.l3.apic_bits;
244 	} else if (x86_processor.cache.l2.size != 0) {
245 		llc_apic_bits = x86_processor.cache.l2.apic_bits;
246 	} else if (x86_processor.cache.l1d.size != 0) {
247 		llc_apic_bits = x86_processor.cache.l1d.apic_bits;
248 	}
249 	uint32_t packages_count = 0, clusters_count = 0, cores_count = 0;
250 	uint32_t l1i_count = 0, l1d_count = 0, l2_count = 0, l3_count = 0, l4_count = 0;
251 	cpuinfo_x86_count_objects(
252 		x86_linux_processors_count, x86_linux_processors, &x86_processor, valid_processor_mask, llc_apic_bits,
253 		&cores_count, &clusters_count, &packages_count, &l1i_count, &l1d_count, &l2_count, &l3_count, &l4_count);
254 
255 	cpuinfo_log_debug("detected %"PRIu32" cores", cores_count);
256 	cpuinfo_log_debug("detected %"PRIu32" clusters", clusters_count);
257 	cpuinfo_log_debug("detected %"PRIu32" packages", packages_count);
258 	cpuinfo_log_debug("detected %"PRIu32" L1I caches", l1i_count);
259 	cpuinfo_log_debug("detected %"PRIu32" L1D caches", l1d_count);
260 	cpuinfo_log_debug("detected %"PRIu32" L2 caches", l2_count);
261 	cpuinfo_log_debug("detected %"PRIu32" L3 caches", l3_count);
262 	cpuinfo_log_debug("detected %"PRIu32" L4 caches", l4_count);
263 
264 	linux_cpu_to_processor_map = calloc(x86_linux_processors_count, sizeof(struct cpuinfo_processor*));
265 	if (linux_cpu_to_processor_map == NULL) {
266 		cpuinfo_log_error("failed to allocate %zu bytes for mapping entries of %"PRIu32" logical processors",
267 			x86_linux_processors_count * sizeof(struct cpuinfo_processor*),
268 			x86_linux_processors_count);
269 		goto cleanup;
270 	}
271 
272 	linux_cpu_to_core_map = calloc(x86_linux_processors_count, sizeof(struct cpuinfo_core*));
273 	if (linux_cpu_to_core_map == NULL) {
274 		cpuinfo_log_error("failed to allocate %zu bytes for mapping entries of %"PRIu32" cores",
275 			x86_linux_processors_count * sizeof(struct cpuinfo_core*),
276 			x86_linux_processors_count);
277 		goto cleanup;
278 	}
279 
280 	cores = calloc(cores_count, sizeof(struct cpuinfo_core));
281 	if (cores == NULL) {
282 		cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" cores",
283 			cores_count * sizeof(struct cpuinfo_core), cores_count);
284 		goto cleanup;
285 	}
286 
287 	clusters = calloc(clusters_count, sizeof(struct cpuinfo_cluster));
288 	if (clusters == NULL) {
289 		cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" core clusters",
290 			clusters_count * sizeof(struct cpuinfo_cluster), clusters_count);
291 		goto cleanup;
292 	}
293 
294 	packages = calloc(packages_count, sizeof(struct cpuinfo_package));
295 	if (packages == NULL) {
296 		cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" physical packages",
297 			packages_count * sizeof(struct cpuinfo_package), packages_count);
298 		goto cleanup;
299 	}
300 
301 	if (l1i_count != 0) {
302 		l1i = calloc(l1i_count, sizeof(struct cpuinfo_cache));
303 		if (l1i == NULL) {
304 			cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L1I caches",
305 				l1i_count * sizeof(struct cpuinfo_cache), l1i_count);
306 			goto cleanup;
307 		}
308 	}
309 	if (l1d_count != 0) {
310 		l1d = calloc(l1d_count, sizeof(struct cpuinfo_cache));
311 		if (l1d == NULL) {
312 			cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L1D caches",
313 				l1d_count * sizeof(struct cpuinfo_cache), l1d_count);
314 			goto cleanup;
315 		}
316 	}
317 	if (l2_count != 0) {
318 		l2 = calloc(l2_count, sizeof(struct cpuinfo_cache));
319 		if (l2 == NULL) {
320 			cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L2 caches",
321 				l2_count * sizeof(struct cpuinfo_cache), l2_count);
322 			goto cleanup;
323 		}
324 	}
325 	if (l3_count != 0) {
326 		l3 = calloc(l3_count, sizeof(struct cpuinfo_cache));
327 		if (l3 == NULL) {
328 			cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L3 caches",
329 				l3_count * sizeof(struct cpuinfo_cache), l3_count);
330 			goto cleanup;
331 		}
332 	}
333 	if (l4_count != 0) {
334 		l4 = calloc(l4_count, sizeof(struct cpuinfo_cache));
335 		if (l4 == NULL) {
336 			cpuinfo_log_error("failed to allocate %zu bytes for descriptions of %"PRIu32" L4 caches",
337 				l4_count * sizeof(struct cpuinfo_cache), l4_count);
338 			goto cleanup;
339 		}
340 	}
341 
342 	const uint32_t core_apic_mask =
343 		~(bit_mask(x86_processor.topology.thread_bits_length) << x86_processor.topology.thread_bits_offset);
344 	const uint32_t package_apic_mask =
345 		core_apic_mask & ~(bit_mask(x86_processor.topology.core_bits_length) << x86_processor.topology.core_bits_offset);
346 	const uint32_t llc_apic_mask = ~bit_mask(llc_apic_bits);
347 	const uint32_t cluster_apic_mask = package_apic_mask | llc_apic_mask;
348 
349 	uint32_t processor_index = UINT32_MAX, core_index = UINT32_MAX, cluster_index = UINT32_MAX, package_index = UINT32_MAX;
350 	uint32_t l1i_index = UINT32_MAX, l1d_index = UINT32_MAX, l2_index = UINT32_MAX, l3_index = UINT32_MAX, l4_index = UINT32_MAX;
351 	uint32_t cluster_id = 0, core_id = 0, smt_id = 0;
352 	uint32_t last_apic_core_id = UINT32_MAX, last_apic_cluster_id = UINT32_MAX, last_apic_package_id = UINT32_MAX;
353 	uint32_t last_l1i_id = UINT32_MAX, last_l1d_id = UINT32_MAX;
354 	uint32_t last_l2_id = UINT32_MAX, last_l3_id = UINT32_MAX, last_l4_id = UINT32_MAX;
355 	for (uint32_t i = 0; i < x86_linux_processors_count; i++) {
356 		if (bitmask_all(x86_linux_processors[i].flags, CPUINFO_LINUX_FLAG_VALID)) {
357 			const uint32_t apic_id = x86_linux_processors[i].apic_id;
358 			processor_index++;
359 			smt_id++;
360 
361 			/* All bits of APIC ID except thread ID mask */
362 			const uint32_t apid_core_id = apic_id & core_apic_mask;
363 			if (apid_core_id != last_apic_core_id) {
364 				core_index++;
365 				core_id++;
366 				smt_id = 0;
367 			}
368 			/* Bits of APIC ID which are part of either LLC or package ID mask */
369 			const uint32_t apic_cluster_id = apic_id & cluster_apic_mask;
370 			if (apic_cluster_id != last_apic_cluster_id) {
371 				cluster_index++;
372 				cluster_id++;
373 			}
374 			/* All bits of APIC ID except thread ID and core ID masks */
375 			const uint32_t apic_package_id = apic_id & package_apic_mask;
376 			if (apic_package_id != last_apic_package_id) {
377 				package_index++;
378 				core_id = 0;
379 				cluster_id = 0;
380 			}
381 
382 			/* Initialize logical processor object */
383 			processors[processor_index].smt_id   = smt_id;
384 			processors[processor_index].core     = cores + core_index;
385 			processors[processor_index].cluster  = clusters + cluster_index;
386 			processors[processor_index].package  = packages + package_index;
387 			processors[processor_index].linux_id = x86_linux_processors[i].linux_id;
388 			processors[processor_index].apic_id  = x86_linux_processors[i].apic_id;
389 
390 			if (apid_core_id != last_apic_core_id) {
391 				/* new core */
392 				cores[core_index] = (struct cpuinfo_core) {
393 					.processor_start = processor_index,
394 					.processor_count = 1,
395 					.core_id = core_id,
396 					.cluster = clusters + cluster_index,
397 					.package = packages + package_index,
398 					.vendor = x86_processor.vendor,
399 					.uarch = x86_processor.uarch,
400 					.cpuid = x86_processor.cpuid,
401 				};
402 				clusters[cluster_index].core_count += 1;
403 				packages[package_index].core_count += 1;
404 				last_apic_core_id = apid_core_id;
405 			} else {
406 				/* another logical processor on the same core */
407 				cores[core_index].processor_count++;
408 			}
409 
410 			if (apic_cluster_id != last_apic_cluster_id) {
411 				/* new cluster */
412 				clusters[cluster_index].processor_start = processor_index;
413 				clusters[cluster_index].processor_count = 1;
414 				clusters[cluster_index].core_start = core_index;
415 				clusters[cluster_index].cluster_id = cluster_id;
416 				clusters[cluster_index].package = packages + package_index;
417 				clusters[cluster_index].vendor = x86_processor.vendor;
418 				clusters[cluster_index].uarch = x86_processor.uarch;
419 				clusters[cluster_index].cpuid = x86_processor.cpuid;
420 				packages[package_index].cluster_count += 1;
421 				last_apic_cluster_id = apic_cluster_id;
422 			} else {
423 				/* another logical processor on the same cluster */
424 				clusters[cluster_index].processor_count++;
425 			}
426 
427 			if (apic_package_id != last_apic_package_id) {
428 				/* new package */
429 				packages[package_index].processor_start = processor_index;
430 				packages[package_index].processor_count = 1;
431 				packages[package_index].core_start = core_index;
432 				packages[package_index].cluster_start = cluster_index;
433 				cpuinfo_x86_format_package_name(x86_processor.vendor, brand_string, packages[package_index].name);
434 				last_apic_package_id = apic_package_id;
435 			} else {
436 				/* another logical processor on the same package */
437 				packages[package_index].processor_count++;
438 			}
439 
440 			linux_cpu_to_processor_map[x86_linux_processors[i].linux_id] = processors + processor_index;
441 			linux_cpu_to_core_map[x86_linux_processors[i].linux_id] = cores + core_index;
442 
443 			if (x86_processor.cache.l1i.size != 0) {
444 				const uint32_t l1i_id = apic_id & ~bit_mask(x86_processor.cache.l1i.apic_bits);
445 				processors[i].cache.l1i = &l1i[l1i_index];
446 				if (l1i_id != last_l1i_id) {
447 					/* new cache */
448 					last_l1i_id = l1i_id;
449 					l1i[++l1i_index] = (struct cpuinfo_cache) {
450 						.size            = x86_processor.cache.l1i.size,
451 						.associativity   = x86_processor.cache.l1i.associativity,
452 						.sets            = x86_processor.cache.l1i.sets,
453 						.partitions      = x86_processor.cache.l1i.partitions,
454 						.line_size       = x86_processor.cache.l1i.line_size,
455 						.flags           = x86_processor.cache.l1i.flags,
456 						.processor_start = processor_index,
457 						.processor_count = 1,
458 					};
459 				} else {
460 					/* another processor sharing the same cache */
461 					l1i[l1i_index].processor_count += 1;
462 				}
463 				processors[i].cache.l1i = &l1i[l1i_index];
464 			} else {
465 				/* reset cache id */
466 				last_l1i_id = UINT32_MAX;
467 			}
468 			if (x86_processor.cache.l1d.size != 0) {
469 				const uint32_t l1d_id = apic_id & ~bit_mask(x86_processor.cache.l1d.apic_bits);
470 				processors[i].cache.l1d = &l1d[l1d_index];
471 				if (l1d_id != last_l1d_id) {
472 					/* new cache */
473 					last_l1d_id = l1d_id;
474 					l1d[++l1d_index] = (struct cpuinfo_cache) {
475 						.size            = x86_processor.cache.l1d.size,
476 						.associativity   = x86_processor.cache.l1d.associativity,
477 						.sets            = x86_processor.cache.l1d.sets,
478 						.partitions      = x86_processor.cache.l1d.partitions,
479 						.line_size       = x86_processor.cache.l1d.line_size,
480 						.flags           = x86_processor.cache.l1d.flags,
481 						.processor_start = processor_index,
482 						.processor_count = 1,
483 					};
484 				} else {
485 					/* another processor sharing the same cache */
486 					l1d[l1d_index].processor_count += 1;
487 				}
488 				processors[i].cache.l1d = &l1d[l1d_index];
489 			} else {
490 				/* reset cache id */
491 				last_l1d_id = UINT32_MAX;
492 			}
493 			if (x86_processor.cache.l2.size != 0) {
494 				const uint32_t l2_id = apic_id & ~bit_mask(x86_processor.cache.l2.apic_bits);
495 				processors[i].cache.l2 = &l2[l2_index];
496 				if (l2_id != last_l2_id) {
497 					/* new cache */
498 					last_l2_id = l2_id;
499 					l2[++l2_index] = (struct cpuinfo_cache) {
500 						.size            = x86_processor.cache.l2.size,
501 						.associativity   = x86_processor.cache.l2.associativity,
502 						.sets            = x86_processor.cache.l2.sets,
503 						.partitions      = x86_processor.cache.l2.partitions,
504 						.line_size       = x86_processor.cache.l2.line_size,
505 						.flags           = x86_processor.cache.l2.flags,
506 						.processor_start = processor_index,
507 						.processor_count = 1,
508 					};
509 				} else {
510 					/* another processor sharing the same cache */
511 					l2[l2_index].processor_count += 1;
512 				}
513 				processors[i].cache.l2 = &l2[l2_index];
514 			} else {
515 				/* reset cache id */
516 				last_l2_id = UINT32_MAX;
517 			}
518 			if (x86_processor.cache.l3.size != 0) {
519 				const uint32_t l3_id = apic_id & ~bit_mask(x86_processor.cache.l3.apic_bits);
520 				processors[i].cache.l3 = &l3[l3_index];
521 				if (l3_id != last_l3_id) {
522 					/* new cache */
523 					last_l3_id = l3_id;
524 					l3[++l3_index] = (struct cpuinfo_cache) {
525 						.size            = x86_processor.cache.l3.size,
526 						.associativity   = x86_processor.cache.l3.associativity,
527 						.sets            = x86_processor.cache.l3.sets,
528 						.partitions      = x86_processor.cache.l3.partitions,
529 						.line_size       = x86_processor.cache.l3.line_size,
530 						.flags           = x86_processor.cache.l3.flags,
531 						.processor_start = processor_index,
532 						.processor_count = 1,
533 					};
534 				} else {
535 					/* another processor sharing the same cache */
536 					l3[l3_index].processor_count += 1;
537 				}
538 				processors[i].cache.l3 = &l3[l3_index];
539 			} else {
540 				/* reset cache id */
541 				last_l3_id = UINT32_MAX;
542 			}
543 			if (x86_processor.cache.l4.size != 0) {
544 				const uint32_t l4_id = apic_id & ~bit_mask(x86_processor.cache.l4.apic_bits);
545 				processors[i].cache.l4 = &l4[l4_index];
546 				if (l4_id != last_l4_id) {
547 					/* new cache */
548 					last_l4_id = l4_id;
549 					l4[++l4_index] = (struct cpuinfo_cache) {
550 						.size            = x86_processor.cache.l4.size,
551 						.associativity   = x86_processor.cache.l4.associativity,
552 						.sets            = x86_processor.cache.l4.sets,
553 						.partitions      = x86_processor.cache.l4.partitions,
554 						.line_size       = x86_processor.cache.l4.line_size,
555 						.flags           = x86_processor.cache.l4.flags,
556 						.processor_start = processor_index,
557 						.processor_count = 1,
558 					};
559 				} else {
560 					/* another processor sharing the same cache */
561 					l4[l4_index].processor_count += 1;
562 				}
563 				processors[i].cache.l4 = &l4[l4_index];
564 			} else {
565 				/* reset cache id */
566 				last_l4_id = UINT32_MAX;
567 			}
568 		}
569 	}
570 
571 	/* Commit changes */
572 	cpuinfo_processors = processors;
573 	cpuinfo_cores = cores;
574 	cpuinfo_clusters = clusters;
575 	cpuinfo_packages = packages;
576 	cpuinfo_cache[cpuinfo_cache_level_1i] = l1i;
577 	cpuinfo_cache[cpuinfo_cache_level_1d] = l1d;
578 	cpuinfo_cache[cpuinfo_cache_level_2]  = l2;
579 	cpuinfo_cache[cpuinfo_cache_level_3]  = l3;
580 	cpuinfo_cache[cpuinfo_cache_level_4]  = l4;
581 
582 	cpuinfo_processors_count = processors_count;
583 	cpuinfo_cores_count = cores_count;
584 	cpuinfo_clusters_count = clusters_count;
585 	cpuinfo_packages_count = packages_count;
586 	cpuinfo_cache_count[cpuinfo_cache_level_1i] = l1i_count;
587 	cpuinfo_cache_count[cpuinfo_cache_level_1d] = l1d_count;
588 	cpuinfo_cache_count[cpuinfo_cache_level_2]  = l2_count;
589 	cpuinfo_cache_count[cpuinfo_cache_level_3]  = l3_count;
590 	cpuinfo_cache_count[cpuinfo_cache_level_4]  = l4_count;
591 	cpuinfo_max_cache_size = cpuinfo_compute_max_cache_size(&processors[0]);
592 
593 	cpuinfo_global_uarch = (struct cpuinfo_uarch_info) {
594 		.uarch = x86_processor.uarch,
595 		.cpuid = x86_processor.cpuid,
596 		.processor_count = processors_count,
597 		.core_count = cores_count,
598 	};
599 
600 	cpuinfo_linux_cpu_max = x86_linux_processors_count;
601 	cpuinfo_linux_cpu_to_processor_map = linux_cpu_to_processor_map;
602 	cpuinfo_linux_cpu_to_core_map = linux_cpu_to_core_map;
603 
604 	__sync_synchronize();
605 
606 	cpuinfo_is_initialized = true;
607 
608 	processors = NULL;
609 	cores = NULL;
610 	clusters = NULL;
611 	packages = NULL;
612 	l1i = l1d = l2 = l3 = l4 = NULL;
613 	linux_cpu_to_processor_map = NULL;
614 	linux_cpu_to_core_map = NULL;
615 
616 cleanup:
617 	free(x86_linux_processors);
618 	free(processors);
619 	free(cores);
620 	free(clusters);
621 	free(packages);
622 	free(l1i);
623 	free(l1d);
624 	free(l2);
625 	free(l3);
626 	free(l4);
627 	free(linux_cpu_to_processor_map);
628 	free(linux_cpu_to_core_map);
629 }
630