• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-or-later */
2 
3 #include <acpi/acpigen.h>
4 #include <arch/hpet.h>
5 #include <arch/ioapic.h>
6 #include <assert.h>
7 #include <cpu/x86/lapic.h>
8 #include <commonlib/sort.h>
9 #include <device/mmio.h>
10 #include <device/pci.h>
11 #include <device/pciexp.h>
12 #include <device/pci_ids.h>
13 #include <soc/acpi.h>
14 #include <soc/chip_common.h>
15 #include <soc/hest.h>
16 #include <soc/iomap.h>
17 #include <soc/numa.h>
18 #include <soc/pci_devs.h>
19 #include <soc/soc_util.h>
20 #include <soc/util.h>
21 #include "chip.h"
22 
23 /* NUMA related ACPI table generation. SRAT, SLIT, etc */
24 
25 /* Increase if necessary. Currently all x86 CPUs only have 2 SMP threads */
26 #define MAX_THREAD 2
27 
acpi_create_srat_lapics(unsigned long current)28 unsigned long acpi_create_srat_lapics(unsigned long current)
29 {
30 	struct device *cpu;
31 	unsigned int num_cpus = 0;
32 	int apic_ids[CONFIG_MAX_CPUS] = {};
33 
34 	unsigned int sort_start = 0;
35 	for (unsigned int thread_id = 0; thread_id < MAX_THREAD; thread_id++) {
36 		for (cpu = all_devices; cpu; cpu = cpu->next) {
37 			if (!is_enabled_cpu(cpu))
38 				continue;
39 			if (num_cpus >= ARRAY_SIZE(apic_ids))
40 				break;
41 			if (cpu->path.apic.thread_id != thread_id)
42 				continue;
43 			apic_ids[num_cpus++] = cpu->path.apic.apic_id;
44 		}
45 		bubblesort(&apic_ids[sort_start], num_cpus - sort_start, NUM_ASCENDING);
46 		sort_start = num_cpus;
47 	}
48 
49 	for (unsigned int i = 0; i < num_cpus; i++) {
50 		/* Match the sorted apic_ids to a struct device */
51 		for (cpu = all_devices; cpu; cpu = cpu->next) {
52 			if (!is_enabled_cpu(cpu))
53 				continue;
54 			if (cpu->path.apic.apic_id == apic_ids[i])
55 				break;
56 		}
57 		if (!cpu)
58 			continue;
59 
60 		if (is_x2apic_mode()) {
61 			printk(BIOS_DEBUG, "SRAT: x2apic cpu_index=%04x, node_id=%02x, apic_id=%08x\n",
62 			       i, device_to_pd(cpu), cpu->path.apic.apic_id);
63 
64 			current += acpi_create_srat_x2apic((acpi_srat_x2apic_t *)current,
65 				device_to_pd(cpu), cpu->path.apic.apic_id);
66 		} else {
67 			printk(BIOS_DEBUG, "SRAT: lapic cpu_index=%02x, node_id=%02x, apic_id=%02x\n",
68 			       i, device_to_pd(cpu), cpu->path.apic.apic_id);
69 
70 			current += acpi_create_srat_lapic((acpi_srat_lapic_t *)current,
71 				device_to_pd(cpu), cpu->path.apic.apic_id);
72 		}
73 	}
74 	return current;
75 }
76 
get_srat_memory_entries(acpi_srat_mem_t * srat_mem)77 static unsigned int get_srat_memory_entries(acpi_srat_mem_t *srat_mem)
78 {
79 	const struct SystemMemoryMapHob *memory_map;
80 	unsigned int mmap_index;
81 
82 	memory_map = get_system_memory_map();
83 	assert(memory_map);
84 	printk(BIOS_DEBUG, "memory_map: %p\n", memory_map);
85 
86 	mmap_index = 0;
87 	for (int e = 0; e < memory_map->numberEntries; ++e) {
88 		const struct SystemMemoryMapElement *mem_element = &memory_map->Element[e];
89 		uint64_t addr =
90 			(uint64_t)((uint64_t)mem_element->BaseAddress <<
91 				MEM_ADDR_64MB_SHIFT_BITS);
92 		uint64_t size =
93 			(uint64_t)((uint64_t)mem_element->ElementSize <<
94 				MEM_ADDR_64MB_SHIFT_BITS);
95 
96 		printk(BIOS_DEBUG, "memory_map %d addr: 0x%llx, BaseAddress: 0x%x, size: 0x%llx, "
97 			"ElementSize: 0x%x, type: %d, reserved: %d\n",
98 			e, addr, mem_element->BaseAddress, size,
99 			mem_element->ElementSize, mem_element->Type,
100 			is_memtype_reserved(mem_element->Type));
101 
102 		assert(mmap_index < MAX_ACPI_MEMORY_AFFINITY_COUNT);
103 
104 		/* skip reserved memory region */
105 		if (is_memtype_reserved(mem_element->Type))
106 			continue;
107 		/* skip all non processor attached memory regions */
108 		if (CONFIG(SOC_INTEL_HAS_CXL) &&
109 			(!is_memtype_processor_attached(mem_element->Type)))
110 			continue;
111 
112 		/* skip if this address is already added */
113 		bool skip = false;
114 		for (int idx = 0; idx < mmap_index; ++idx) {
115 			uint64_t base_addr = ((uint64_t)srat_mem[idx].base_address_high << 32) +
116 				srat_mem[idx].base_address_low;
117 			if (addr == base_addr) {
118 				skip = true;
119 				break;
120 			}
121 		}
122 		if (skip)
123 			continue;
124 
125 		srat_mem[mmap_index].type = 1; /* Memory affinity structure */
126 		srat_mem[mmap_index].length = sizeof(acpi_srat_mem_t);
127 		srat_mem[mmap_index].base_address_low = (uint32_t)(addr & 0xffffffff);
128 		srat_mem[mmap_index].base_address_high = (uint32_t)(addr >> 32);
129 		srat_mem[mmap_index].length_low = (uint32_t)(size & 0xffffffff);
130 		srat_mem[mmap_index].length_high = (uint32_t)(size >> 32);
131 		srat_mem[mmap_index].proximity_domain = memory_to_pd(mem_element);
132 		srat_mem[mmap_index].flags = ACPI_SRAT_MEMORY_ENABLED;
133 		if (is_memtype_non_volatile(mem_element->Type))
134 			srat_mem[mmap_index].flags |= ACPI_SRAT_MEMORY_NONVOLATILE;
135 		++mmap_index;
136 	}
137 
138 	return mmap_index;
139 }
140 
acpi_fill_srat(unsigned long current)141 static unsigned long acpi_fill_srat(unsigned long current)
142 {
143 	acpi_srat_mem_t srat_mem[MAX_ACPI_MEMORY_AFFINITY_COUNT];
144 	unsigned int mem_count;
145 
146 	/* create all subtables for processors */
147 	current = acpi_create_srat_lapics(current);
148 
149 	memset(srat_mem, 0, sizeof(srat_mem));
150 	mem_count = get_srat_memory_entries(srat_mem);
151 	for (int i = 0; i < mem_count; ++i) {
152 		printk(BIOS_DEBUG, "adding srat memory %d entry length: %d, addr: 0x%x%x, "
153 			"length: 0x%x%x, proximity_domain: %d, flags: %x\n",
154 			i, srat_mem[i].length,
155 			srat_mem[i].base_address_high, srat_mem[i].base_address_low,
156 			srat_mem[i].length_high, srat_mem[i].length_low,
157 			srat_mem[i].proximity_domain, srat_mem[i].flags);
158 		memcpy((acpi_srat_mem_t *)current, &srat_mem[i], sizeof(srat_mem[i]));
159 		current += srat_mem[i].length;
160 	}
161 
162 	if (CONFIG(SOC_INTEL_HAS_CXL))
163 		current = cxl_fill_srat(current);
164 
165 	return current;
166 }
167 
168 #if CONFIG(SOC_INTEL_SAPPHIRERAPIDS_SP)
169 /*
170 Because pds.num_pds comes from spr/numa.c function fill_pds().
171 pds.num_pds = soc_get_num_cpus() + get_cxl_node_count().
172 */
173 /* SPR-SP platform has Generic Initiator domain in addition to processor domain */
acpi_fill_slit(unsigned long current)174 static unsigned long acpi_fill_slit(unsigned long current)
175 {
176 	uint8_t *p = (uint8_t *)current;
177 	/* According to table 5.60 of ACPI 6.4 spec, "Number of System Localities" field takes
178 	   up 8 bytes. Following that, each matrix entry takes up 1 byte. */
179 	memset(p, 0, 8 + pds.num_pds * pds.num_pds);
180 	*p = (uint8_t)pds.num_pds;
181 	p += 8;
182 
183 	for (int i = 0; i < pds.num_pds; i++) {
184 		for (int j = 0; j < pds.num_pds; j++)
185 			p[i * pds.num_pds + j] = pds.pds[i].distances[j];
186 	}
187 
188 	current += 8 + pds.num_pds * pds.num_pds;
189 	return current;
190 }
191 #else
acpi_fill_slit(unsigned long current)192 static unsigned long acpi_fill_slit(unsigned long current)
193 {
194 	unsigned int nodes = soc_get_num_cpus();
195 
196 	uint8_t *p = (uint8_t *)current;
197 	memset(p, 0, 8 + nodes * nodes);
198 	*p = (uint8_t)nodes;
199 	p += 8;
200 
201 	/* this assumes fully connected socket topology */
202 	for (int i = 0; i < nodes; i++) {
203 		for (int j = 0; j < nodes; j++) {
204 			if (i == j)
205 				p[i*nodes+j] = 10;
206 			else
207 				p[i*nodes+j] = 16;
208 		}
209 	}
210 
211 	current += 8 + nodes * nodes;
212 	return current;
213 }
214 #endif
215 
216 /*
217  * This function adds PCIe bridge device entry in DMAR table. If it is called
218  * in the context of ATSR subtable, it adds ATSR subtable when it is first called.
219  */
acpi_create_dmar_ds_pci_br_for_port(unsigned long current,const struct device * bridge_dev,uint32_t pcie_seg,bool is_atsr,bool * first)220 static unsigned long acpi_create_dmar_ds_pci_br_for_port(unsigned long current,
221 							 const struct device *bridge_dev,
222 							 uint32_t pcie_seg,
223 							 bool is_atsr, bool *first)
224 {
225 	const uint32_t bus = bridge_dev->upstream->secondary;
226 	const uint32_t dev = PCI_SLOT(bridge_dev->path.pci.devfn);
227 	const uint32_t func = PCI_FUNC(bridge_dev->path.pci.devfn);
228 
229 	if (bus == 0)
230 		return current;
231 
232 	unsigned long atsr_size = 0;
233 	unsigned long pci_br_size = 0;
234 	if (is_atsr == true && first && *first == true) {
235 		printk(BIOS_DEBUG, "[Root Port ATS Capability] Flags: 0x%x, "
236 			"PCI Segment Number: 0x%x\n", 0, pcie_seg);
237 		atsr_size = acpi_create_dmar_atsr(current, 0, pcie_seg);
238 		*first = false;
239 	}
240 
241 	printk(BIOS_DEBUG, "    [PCI Bridge Device] %s\n", dev_path(bridge_dev));
242 	pci_br_size = acpi_create_dmar_ds_pci_br(current + atsr_size, bus, dev, func);
243 
244 	return (atsr_size + pci_br_size);
245 }
246 
acpi_create_drhd(unsigned long current,struct device * iommu,const IIO_UDS * hob)247 static unsigned long acpi_create_drhd(unsigned long current, struct device *iommu,
248 	const IIO_UDS *hob)
249 {
250 	unsigned long tmp = current;
251 
252 	struct resource *resource;
253 	resource = probe_resource(iommu, VTD_BAR_CSR);
254 	if (!resource)
255 		return current;
256 
257 	uint32_t reg_base = resource->base;
258 	if (!reg_base)
259 		return current;
260 
261 	const uint32_t bus = iommu->upstream->secondary;
262 	uint32_t pcie_seg = iommu->upstream->segment_group;
263 	int socket = iio_pci_domain_socket_from_dev(iommu);
264 	int stack = iio_pci_domain_stack_from_dev(iommu);
265 
266 	printk(BIOS_SPEW, "%s socket: %d, stack: %d, bus: 0x%x, pcie_seg: 0x%x, reg_base: 0x%x\n",
267 		__func__, socket, stack, bus, pcie_seg, reg_base);
268 
269 	// Add DRHD Hardware Unit
270 
271 	if (is_dev_on_domain0(iommu)) {
272 		printk(BIOS_DEBUG, "[Hardware Unit Definition] Flags: 0x%x, PCI Segment Number: 0x%x, "
273 			"Register Base Address: 0x%x\n",
274 			DRHD_INCLUDE_PCI_ALL, pcie_seg, reg_base);
275 		current += acpi_create_dmar_drhd(current, DRHD_INCLUDE_PCI_ALL,
276 			pcie_seg, reg_base, vtd_probe_bar_size(iommu));
277 	} else {
278 		printk(BIOS_DEBUG, "[Hardware Unit Definition] Flags: 0x%x, PCI Segment Number: 0x%x, "
279 			"Register Base Address: 0x%x\n", 0, pcie_seg, reg_base);
280 		current += acpi_create_dmar_drhd(current, 0, pcie_seg, reg_base,
281 			vtd_probe_bar_size(iommu));
282 	}
283 
284 	// Add PCH IOAPIC
285 	if (is_dev_on_domain0(iommu)) {
286 		union p2sb_bdf ioapic_bdf = soc_get_ioapic_bdf();
287 		printk(BIOS_DEBUG, "    [IOAPIC Device] Enumeration ID: 0x%x, PCI Bus Number: 0x%x, "
288 		       "PCI Path: 0x%x, 0x%x\n", get_ioapic_id(IO_APIC_ADDR), ioapic_bdf.bus,
289 		       ioapic_bdf.dev, ioapic_bdf.fn);
290 		current += acpi_create_dmar_ds_ioapic_from_hw(current,
291 				IO_APIC_ADDR, ioapic_bdf.bus, ioapic_bdf.dev, ioapic_bdf.fn);
292 	}
293 
294 /* SPR has no per stack IOAPIC or CBDMA devices */
295 #if CONFIG(SOC_INTEL_SKYLAKE_SP) || CONFIG(SOC_INTEL_COOPERLAKE_SP)
296 	uint32_t enum_id;
297 	// Add IOAPIC entry
298 	enum_id = soc_get_iio_ioapicid(socket, stack);
299 	printk(BIOS_DEBUG, "    [IOAPIC Device] Enumeration ID: 0x%x, PCI Bus Number: 0x%x, "
300 		"PCI Path: 0x%x, 0x%x\n", enum_id, bus, APIC_DEV_NUM, APIC_FUNC_NUM);
301 	current += acpi_create_dmar_ds_ioapic(current, enum_id, bus,
302 		APIC_DEV_NUM, APIC_FUNC_NUM);
303 
304 	// Add CBDMA devices for CSTACK
305 	if (socket != 0 && stack == CSTACK) {
306 		for (int cbdma_func_id = 0; cbdma_func_id < 8; ++cbdma_func_id) {
307 			printk(BIOS_DEBUG, "    [PCI Endpoint Device] "
308 				"PCI Bus Number: 0x%x, PCI Path: 0x%x, 0x%x\n",
309 				bus, CBDMA_DEV_NUM, cbdma_func_id);
310 			current += acpi_create_dmar_ds_pci(current,
311 				bus, CBDMA_DEV_NUM, cbdma_func_id);
312 		}
313 	}
314 #endif
315 
316 	// Add PCIe Ports
317 	if (!is_dev_on_domain0(iommu)) {
318 		const struct device *domain = dev_get_domain(iommu);
319 		struct device *dev = NULL;
320 		while ((dev = dev_bus_each_child(domain->downstream, dev)))
321 			if (is_pci_bridge(dev))
322 				current +=
323 				acpi_create_dmar_ds_pci_br_for_port(
324 				current, dev, pcie_seg, false, NULL);
325 
326 #if CONFIG(SOC_INTEL_SKYLAKE_SP) || CONFIG(SOC_INTEL_COOPERLAKE_SP)
327 		// Add VMD
328 		if (hob->PlatformData.VMDStackEnable[socket][stack] &&
329 			stack >= PSTACK0 && stack <= PSTACK2) {
330 			printk(BIOS_DEBUG, "    [PCI Endpoint Device] "
331 				"PCI Bus Number: 0x%x, PCI Path: 0x%x, 0x%x\n",
332 				 bus, VMD_DEV_NUM, VMD_FUNC_NUM);
333 			current += acpi_create_dmar_ds_pci(current,
334 				bus, VMD_DEV_NUM, VMD_FUNC_NUM);
335 		}
336 #endif
337 	}
338 
339 	// Add IOAT End Points (with memory resources. We don't report every End Point device.)
340 	if (CONFIG(HAVE_IOAT_DOMAINS) && is_dev_on_ioat_domain(iommu)) {
341 		struct device *dev = NULL;
342 		while ((dev = dev_find_all_devices_on_stack(socket, stack,
343 			XEONSP_VENDOR_MAX, XEONSP_DEVICE_MAX, dev)))
344 			/* This may also require a check for IORESOURCE_PREFETCH,
345 			 * but that would not include the FPU (4942/0) */
346 			if ((dev->resource_list->flags &
347 				(IORESOURCE_MEM | IORESOURCE_PCI64 | IORESOURCE_ASSIGNED)) ==
348 				(IORESOURCE_MEM | IORESOURCE_PCI64 | IORESOURCE_ASSIGNED)) {
349 				const uint32_t b = dev->upstream->secondary;
350 				const uint32_t d = PCI_SLOT(dev->path.pci.devfn);
351 				const uint32_t f = PCI_FUNC(dev->path.pci.devfn);
352 				printk(BIOS_DEBUG, "    [PCIE Endpoint Device] %s\n", dev_path(dev));
353 				current += acpi_create_dmar_ds_pci(current, b, d, f);
354 			}
355 	}
356 
357 	// Add HPET
358 	if (is_dev_on_domain0(iommu)) {
359 		uint16_t hpet_capid = read16p(HPET_BASE_ADDRESS);
360 		uint16_t num_hpets = (hpet_capid >> 0x08) & 0x1F;  // Bits [8:12] has hpet count
361 		printk(BIOS_SPEW, "%s hpet_capid: 0x%x, num_hpets: 0x%x\n",
362 			__func__, hpet_capid, num_hpets);
363 		//BIT 15
364 		if (num_hpets && (num_hpets != 0x1f) &&
365 			(read32p(HPET_BASE_ADDRESS + 0x100) & (0x00008000))) {
366 			union p2sb_bdf hpet_bdf = soc_get_hpet_bdf();
367 			printk(BIOS_DEBUG, "    [Message-capable HPET Device] Enumeration ID: 0x%x, "
368 				"PCI Bus Number: 0x%x, PCI Path: 0x%x, 0x%x\n",
369 				0, hpet_bdf.bus, hpet_bdf.dev, hpet_bdf.fn);
370 			current += acpi_create_dmar_ds_msi_hpet(current, 0, hpet_bdf.bus,
371 				hpet_bdf.dev, hpet_bdf.fn);
372 		}
373 	}
374 
375 	acpi_dmar_drhd_fixup(tmp, current);
376 
377 	return current;
378 }
379 
acpi_create_atsr(unsigned long current)380 static unsigned long acpi_create_atsr(unsigned long current)
381 {
382 	struct device *child, *dev;
383 	struct resource *resource;
384 
385 	/*
386 	 * The assumption made here is that the host bridges on a socket share the
387 	 * PCI segment group and thus only one ATSR header needs to be emitted for
388 	 * a single socket.
389 	 * This is easier than to sort the host bridges by PCI segment group first
390 	 * and then generate one ATSR header for every new segment.
391 	 */
392 	for (int socket = 0; socket < CONFIG_MAX_SOCKET; ++socket) {
393 		if (!soc_cpu_is_enabled(socket))
394 			continue;
395 		unsigned long tmp = current;
396 		bool first = true;
397 
398 		dev = NULL;
399 		while ((dev = dev_find_device(PCI_VID_INTEL, MMAP_VTD_CFG_REG_DEVID, dev))) {
400 			/* Only add devices for the current socket */
401 			if (iio_pci_domain_socket_from_dev(dev) != socket)
402 				continue;
403 			/* See if there is a resource with the appropriate index. */
404 			resource = probe_resource(dev, VTD_BAR_CSR);
405 			if (!resource)
406 				continue;
407 			int stack = iio_pci_domain_stack_from_dev(dev);
408 
409 			uint64_t vtd_mmio_cap = read64(res2mmio(resource, VTD_EXT_CAP_LOW, 0));
410 			printk(BIOS_SPEW, "%s socket: %d, stack: %d, bus: 0x%x, vtd_base: %p, "
411 				"vtd_mmio_cap: 0x%llx\n",
412 				__func__, socket, stack, dev->upstream->secondary,
413 				res2mmio(resource, 0, 0), vtd_mmio_cap);
414 
415 			// ATSR is applicable only for platform supporting device IOTLBs
416 			// through the VT-d extended capability register
417 			assert(vtd_mmio_cap != 0xffffffffffffffff);
418 			if ((vtd_mmio_cap & 0x4) == 0) // BIT 2
419 				continue;
420 
421 			if (dev->upstream->secondary == 0 && dev->upstream->segment_group == 0)
422 				continue;
423 
424 			for (child = dev->upstream->children; child; child = child->sibling) {
425 				if (!is_pci_bridge(child))
426 					continue;
427 				current +=
428 					acpi_create_dmar_ds_pci_br_for_port(
429 					current, child, child->upstream->segment_group, true, &first);
430 			}
431 		}
432 		if (tmp != current)
433 			acpi_dmar_atsr_fixup(tmp, current);
434 	}
435 
436 	return current;
437 }
438 
acpi_create_rmrr(unsigned long current)439 static unsigned long acpi_create_rmrr(unsigned long current)
440 {
441 	return current;
442 }
443 
acpi_create_rhsa(unsigned long current)444 static unsigned long acpi_create_rhsa(unsigned long current)
445 {
446 	struct device *dev = NULL;
447 	struct resource *resource;
448 
449 	while ((dev = dev_find_device(PCI_VID_INTEL, MMAP_VTD_CFG_REG_DEVID, dev))) {
450 		/* See if there is a resource with the appropriate index. */
451 		resource = probe_resource(dev, VTD_BAR_CSR);
452 		if (!resource)
453 			continue;
454 
455 		printk(BIOS_DEBUG, "[Remapping Hardware Static Affinity] Base Address: %p, "
456 			"Proximity Domain: 0x%x\n", res2mmio(resource, 0, 0), device_to_pd(dev));
457 		current += acpi_create_dmar_rhsa(current, (uintptr_t)res2mmio(resource, 0, 0), device_to_pd(dev));
458 	}
459 
460 	return current;
461 }
462 
xeonsp_create_satc(unsigned long current,struct device * domain)463 static unsigned long xeonsp_create_satc(unsigned long current, struct device *domain)
464 {
465 	struct device *dev = NULL;
466 	while ((dev = dev_bus_each_child(domain->downstream, dev))) {
467 		if (pciexp_find_extended_cap(dev, PCIE_EXT_CAP_ID_ATS, 0)) {
468 			const uint32_t b = domain->downstream->secondary;
469 			const uint32_t d = PCI_SLOT(dev->path.pci.devfn);
470 			const uint32_t f = PCI_FUNC(dev->path.pci.devfn);
471 			printk(BIOS_DEBUG, "    [SATC Endpoint Device] %s\n", dev_path(dev));
472 			current += acpi_create_dmar_ds_pci(current, b, d, f);
473 		}
474 	}
475 	return current;
476 }
477 
478 /* SoC Integrated Address Translation Cache */
acpi_create_satc(unsigned long current)479 static unsigned long acpi_create_satc(unsigned long current)
480 {
481 	unsigned long tmp = current, seg = ~0;
482 	struct device *dev;
483 
484 	/*
485 	 * Best case only PCI segment group count SATC headers are emitted, worst
486 	 * case for every SATC entry a new SATC header is being generated.
487 	 *
488 	 * The assumption made here is that the host bridges on a socket share the
489 	 * PCI segment group and thus only one SATC header needs to be emitted for
490 	 * a single socket.
491 	 * This is easier than to sort the host bridges by PCI segment group first
492 	 * and then generate one SATC header for every new segment.
493 	 *
494 	 * With this assumption the best case scenario should always be used.
495 	 */
496 	for (int socket = 0; socket < CONFIG_MAX_SOCKET; ++socket) {
497 		if (!soc_cpu_is_enabled(socket))
498 			continue;
499 
500 		dev = NULL;
501 		while ((dev = dev_find_path(dev, DEVICE_PATH_DOMAIN))) {
502 			/* Only add devices for the current socket */
503 			if (iio_pci_domain_socket_from_dev(dev) != socket)
504 				continue;
505 
506 			if (seg != dev->downstream->segment_group) {
507 				// Close previous header
508 				if (tmp != current)
509 					acpi_dmar_satc_fixup(tmp, current);
510 
511 				seg = dev->downstream->segment_group;
512 				tmp = current;
513 				printk(BIOS_DEBUG, "[SATC Segment Header] "
514 				       "Flags: 0x%x, PCI segment group: %lx\n", 0, seg);
515 				// Add the SATC header
516 				current += acpi_create_dmar_satc(current, 0, seg);
517 			}
518 			current = xeonsp_create_satc(current, dev);
519 		}
520 	}
521 	if (tmp != current)
522 		acpi_dmar_satc_fixup(tmp, current);
523 
524 	return current;
525 }
526 
acpi_fill_dmar(unsigned long current)527 static unsigned long acpi_fill_dmar(unsigned long current)
528 {
529 	const IIO_UDS *hob = get_iio_uds();
530 
531 	// DRHD - iommu0 must be the last DRHD entry.
532 	struct device *dev = NULL;
533 	struct device *iommu0 = NULL;
534 	while ((dev = dev_find_device(PCI_VID_INTEL, MMAP_VTD_CFG_REG_DEVID, dev))) {
535 		if (is_domain0(dev_get_domain(dev))) {
536 			iommu0 = dev;
537 			continue;
538 		}
539 		current = acpi_create_drhd(current, dev, hob);
540 	}
541 	assert(iommu0);
542 	current = acpi_create_drhd(current, iommu0, hob);
543 
544 	// RMRR
545 	current = acpi_create_rmrr(current);
546 
547 	// Root Port ATS Capability
548 	current = acpi_create_atsr(current);
549 
550 	// RHSA
551 	current = acpi_create_rhsa(current);
552 
553 	// SATC
554 	current = acpi_create_satc(current);
555 
556 	return current;
557 }
558 
northbridge_write_acpi_tables(const struct device * device,unsigned long current,struct acpi_rsdp * rsdp)559 unsigned long northbridge_write_acpi_tables(const struct device *device, unsigned long current,
560 					    struct acpi_rsdp *rsdp)
561 {
562 	/* Only write uncore ACPI tables for domain0 */
563 	if (device->path.domain.domain != 0)
564 		return current;
565 
566 	acpi_srat_t *srat;
567 	acpi_slit_t *slit;
568 	acpi_dmar_t *dmar;
569 	acpi_hmat_t *hmat;
570 	acpi_cedt_t *cedt;
571 
572 	const config_t *const config = config_of(device);
573 
574 	/* SRAT */
575 	current = ALIGN_UP(current, 8);
576 	printk(BIOS_DEBUG, "ACPI:    * SRAT at %lx\n", current);
577 	srat = (acpi_srat_t *)current;
578 	acpi_create_srat(srat, acpi_fill_srat);
579 	current += srat->header.length;
580 	acpi_add_table(rsdp, srat);
581 
582 	/* SLIT */
583 	current = ALIGN_UP(current, 8);
584 	printk(BIOS_DEBUG, "ACPI:   * SLIT at %lx\n", current);
585 	slit = (acpi_slit_t *)current;
586 	acpi_create_slit(slit, acpi_fill_slit);
587 	current += slit->header.length;
588 	acpi_add_table(rsdp, slit);
589 
590 	if (CONFIG(SOC_INTEL_HAS_CXL)) {
591 		/* HMAT*/
592 		current = ALIGN_UP(current, 8);
593 		printk(BIOS_DEBUG, "ACPI:    * HMAT at %lx\n", current);
594 		hmat = (acpi_hmat_t *)current;
595 		acpi_create_hmat(hmat, acpi_fill_hmat);
596 		current += hmat->header.length;
597 		acpi_add_table(rsdp, hmat);
598 	}
599 
600 	/* DMAR */
601 	if (config->vtd_support) {
602 		current = ALIGN_UP(current, 8);
603 		dmar = (acpi_dmar_t *)current;
604 		enum dmar_flags flags = DMAR_INTR_REMAP;
605 
606 		/* SKX FSP doesn't support X2APIC, but CPX FSP does */
607 		if (CONFIG(SOC_INTEL_SKYLAKE_SP))
608 			flags |= DMAR_X2APIC_OPT_OUT;
609 
610 		printk(BIOS_DEBUG, "ACPI:    * DMAR at %lx\n", current);
611 		printk(BIOS_DEBUG, "[DMA Remapping table] Flags: 0x%x\n", flags);
612 		acpi_create_dmar(dmar, flags, acpi_fill_dmar);
613 		current += dmar->header.length;
614 		current = acpi_align_current(current);
615 		acpi_add_table(rsdp, dmar);
616 	}
617 
618 	if (CONFIG(SOC_INTEL_HAS_CXL)) {
619 		/* CEDT: CXL Early Discovery Table */
620 		if (get_cxl_node_count() > 0) {
621 			current = ALIGN_UP(current, 8);
622 			printk(BIOS_DEBUG, "ACPI:    * CEDT at %lx\n", current);
623 			cedt = (acpi_cedt_t *)current;
624 			acpi_create_cedt(cedt, acpi_fill_cedt);
625 			current += cedt->header.length;
626 			acpi_add_table(rsdp, cedt);
627 		}
628 	}
629 
630 	if (CONFIG(SOC_ACPI_HEST)) {
631 		printk(BIOS_DEBUG, "ACPI:    * HEST at %lx\n", current);
632 		current = hest_create(current, rsdp);
633 	}
634 
635 	return current;
636 }
637