• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 #include <commonlib/helpers.h>
4 #include <console/console.h>
5 #include <acpi/acpi.h>
6 #include <delay.h>
7 #include <cpu/intel/haswell/haswell.h>
8 #include <device/device.h>
9 #include <device/pci.h>
10 #include <device/pci_ids.h>
11 #include <device/pci_ops.h>
12 #include <boot/tables.h>
13 #include <security/intel/txt/txt_register.h>
14 #include <southbridge/intel/lynxpoint/pch.h>
15 #include <types.h>
16 
17 #include "chip.h"
18 #include "haswell.h"
19 
northbridge_acpi_name(const struct device * dev)20 static const char *northbridge_acpi_name(const struct device *dev)
21 {
22 	if (dev->path.type == DEVICE_PATH_DOMAIN)
23 		return "PCI0";
24 
25 	if (!is_pci_dev_on_bus(dev, 0))
26 		return NULL;
27 
28 	switch (dev->path.pci.devfn) {
29 	case PCI_DEVFN(0, 0):
30 		return "MCHC";
31 	}
32 
33 	return NULL;
34 }
35 
36 struct device_operations haswell_pci_domain_ops = {
37 	.read_resources    = pci_domain_read_resources,
38 	.set_resources     = pci_domain_set_resources,
39 	.scan_bus          = pci_host_bridge_scan_bus,
40 	.acpi_name         = northbridge_acpi_name,
41 	.write_acpi_tables = northbridge_write_acpi_tables,
42 };
43 
get_bar(struct device * dev,unsigned int index,u32 * base,u32 * len)44 static int get_bar(struct device *dev, unsigned int index, u32 *base, u32 *len)
45 {
46 	u32 bar = pci_read_config32(dev, index);
47 
48 	/* If not enabled don't report it */
49 	if (!(bar & 0x1))
50 		return 0;
51 
52 	/* Knock down the enable bit */
53 	*base = bar & ~1;
54 
55 	return 1;
56 }
57 
58 /*
59  * There are special BARs that actually are programmed in the MCHBAR. These Intel special
60  * features, but they do consume resources that need to be accounted for.
61  */
get_bar_in_mchbar(struct device * dev,unsigned int index,u32 * base,u32 * len)62 static int get_bar_in_mchbar(struct device *dev, unsigned int index, u32 *base, u32 *len)
63 {
64 	u32 bar = mchbar_read32(index);
65 
66 	/* If not enabled don't report it */
67 	if (!(bar & 0x1))
68 		return 0;
69 
70 	/* Knock down the enable bit */
71 	*base = bar & ~1;
72 
73 	return 1;
74 }
75 
76 struct fixed_mmio_descriptor {
77 	unsigned int index;
78 	u32 size;
79 	int (*get_resource)(struct device *dev, unsigned int index, u32 *base, u32 *size);
80 	const char *description;
81 };
82 
83 struct fixed_mmio_descriptor mc_fixed_resources[] = {
84 	{ MCHBAR,   MCH_BASE_SIZE,   get_bar,           "MCHBAR"   },
85 	{ DMIBAR,   DMI_BASE_SIZE,   get_bar,           "DMIBAR"   },
86 	{ EPBAR,    EP_BASE_SIZE,    get_bar,           "EPBAR"    },
87 	{ GDXCBAR,  GDXC_BASE_SIZE,  get_bar_in_mchbar, "GDXCBAR"  },
88 	{ EDRAMBAR, EDRAM_BASE_SIZE, get_bar_in_mchbar, "EDRAMBAR" },
89 };
90 
91 /* Add all known fixed MMIO ranges that hang off the host bridge/memory controller device. */
mc_add_fixed_mmio_resources(struct device * dev)92 static void mc_add_fixed_mmio_resources(struct device *dev)
93 {
94 	int i;
95 
96 	for (i = 0; i < ARRAY_SIZE(mc_fixed_resources); i++) {
97 		u32 base;
98 		u32 size;
99 		unsigned int index;
100 
101 		size = mc_fixed_resources[i].size;
102 		index = mc_fixed_resources[i].index;
103 		if (!mc_fixed_resources[i].get_resource(dev, index, &base, &size))
104 			continue;
105 
106 		mmio_range(dev, mc_fixed_resources[i].index, base, size);
107 		printk(BIOS_DEBUG, "%s: Adding %s @ %x 0x%08lx-0x%08lx.\n",
108 		       __func__, mc_fixed_resources[i].description, index,
109 		       (unsigned long)base, (unsigned long)(base + size - 1));
110 	}
111 
112 	mmconf_resource(dev, PCIEXBAR);
113 }
114 
115 /*
116  * Host Memory Map:
117  *
118  * +--------------------------+ TOUUD
119  * |                          |
120  * +--------------------------+ 4GiB
121  * |     PCI Address Space    |
122  * +--------------------------+ TOLUD (also maps into MC address space)
123  * |     iGD                  |
124  * +--------------------------+ BDSM
125  * |     GTT                  |
126  * +--------------------------+ BGSM
127  * |     TSEG                 |
128  * +--------------------------+ TSEGMB
129  * |     DPR                  |
130  * +--------------------------+ (DPR top - DPR size)
131  * |     Usage DRAM           |
132  * +--------------------------+ 0
133  *
134  * Some of the base registers above can be equal, making the size of the regions within 0.
135  * This is because the memory controller internally subtracts the base registers from each
136  * other to determine sizes of the regions. In other words, the memory map regions are always
137  * in a fixed order, no matter what sizes they have.
138  */
139 
140 struct map_entry {
141 	int reg;
142 	int is_64_bit;
143 	int is_limit;
144 	const char *description;
145 };
146 
read_map_entry(struct device * dev,struct map_entry * entry,uint64_t * result)147 static void read_map_entry(struct device *dev, struct map_entry *entry, uint64_t *result)
148 {
149 	uint64_t value;
150 	uint64_t mask;
151 
152 	/* All registers have a 1MiB granularity */
153 	mask = ((1ULL << 20) - 1);
154 	mask = ~mask;
155 
156 	value = 0;
157 
158 	if (entry->is_64_bit) {
159 		value = pci_read_config32(dev, entry->reg + 4);
160 		value <<= 32;
161 	}
162 
163 	value |= pci_read_config32(dev, entry->reg);
164 	value &= mask;
165 
166 	if (entry->is_limit)
167 		value |= ~mask;
168 
169 	*result = value;
170 }
171 
172 #define MAP_ENTRY(reg_, is_64_, is_limit_, desc_) \
173 	{ \
174 		.reg = reg_,           \
175 		.is_64_bit = is_64_,   \
176 		.is_limit = is_limit_, \
177 		.description = desc_,  \
178 	}
179 
180 #define MAP_ENTRY_BASE_32(reg_, desc_)	MAP_ENTRY(reg_, 0, 0, desc_)
181 #define MAP_ENTRY_BASE_64(reg_, desc_)	MAP_ENTRY(reg_, 1, 0, desc_)
182 #define MAP_ENTRY_LIMIT_64(reg_, desc_)	MAP_ENTRY(reg_, 1, 1, desc_)
183 
184 enum {
185 	TOM_REG,
186 	TOUUD_REG,
187 	MESEG_BASE_REG,
188 	MESEG_LIMIT_REG,
189 	REMAP_BASE_REG,
190 	REMAP_LIMIT_REG,
191 	TOLUD_REG,
192 	BGSM_REG,
193 	BDSM_REG,
194 	TSEG_REG,
195 	/* Must be last */
196 	NUM_MAP_ENTRIES,
197 };
198 
199 static struct map_entry memory_map[NUM_MAP_ENTRIES] = {
200 	[TOM_REG]         = MAP_ENTRY_BASE_64(TOM, "TOM"),
201 	[TOUUD_REG]       = MAP_ENTRY_BASE_64(TOUUD, "TOUUD"),
202 	[MESEG_BASE_REG]  = MAP_ENTRY_BASE_64(MESEG_BASE, "MESEG_BASE"),
203 	[MESEG_LIMIT_REG] = MAP_ENTRY_LIMIT_64(MESEG_LIMIT, "MESEG_LIMIT"),
204 	[REMAP_BASE_REG]  = MAP_ENTRY_BASE_64(REMAPBASE, "REMAP_BASE"),
205 	[REMAP_LIMIT_REG] = MAP_ENTRY_LIMIT_64(REMAPLIMIT, "REMAP_LIMIT"),
206 	[TOLUD_REG]       = MAP_ENTRY_BASE_32(TOLUD, "TOLUD"),
207 	[BDSM_REG]        = MAP_ENTRY_BASE_32(BDSM, "BDSM"),
208 	[BGSM_REG]        = MAP_ENTRY_BASE_32(BGSM, "BGSM"),
209 	[TSEG_REG]        = MAP_ENTRY_BASE_32(TSEG, "TSEGMB"),
210 };
211 
mc_read_map_entries(struct device * dev,uint64_t * values)212 static void mc_read_map_entries(struct device *dev, uint64_t *values)
213 {
214 	int i;
215 	for (i = 0; i < NUM_MAP_ENTRIES; i++) {
216 		read_map_entry(dev, &memory_map[i], &values[i]);
217 	}
218 }
219 
mc_report_map_entries(struct device * dev,uint64_t * values)220 static void mc_report_map_entries(struct device *dev, uint64_t *values)
221 {
222 	int i;
223 	for (i = 0; i < NUM_MAP_ENTRIES; i++) {
224 		printk(BIOS_DEBUG, "MC MAP: %s: 0x%llx\n",
225 		       memory_map[i].description, values[i]);
226 	}
227 	/* One can validate the BDSM and BGSM against the GGC */
228 	printk(BIOS_DEBUG, "MC MAP: GGC: 0x%x\n", pci_read_config16(dev, GGC));
229 }
230 
mc_add_dram_resources(struct device * dev,int * resource_cnt)231 static void mc_add_dram_resources(struct device *dev, int *resource_cnt)
232 {
233 	int index;
234 	uint64_t mc_values[NUM_MAP_ENTRIES];
235 
236 	/* Read in the MAP registers and report their values */
237 	mc_read_map_entries(dev, &mc_values[0]);
238 	mc_report_map_entries(dev, &mc_values[0]);
239 
240 	/*
241 	 * DMA Protected Range can be reserved below TSEG for PCODE patch
242 	 * or TXT/Boot Guard related data.  Rather than report a base address,
243 	 * the DPR register reports the TOP of the region, which is the same
244 	 * as TSEG base. The region size is reported in MiB in bits 11:4.
245 	 */
246 	const union dpr_register dpr = {
247 		.raw = pci_read_config32(dev, DPR),
248 	};
249 	printk(BIOS_DEBUG, "MC MAP: DPR: 0x%x\n", dpr.raw);
250 
251 	/*
252 	 * These are the host memory ranges that should be added:
253 	 * - 0 -> 0xa0000:    cacheable
254 	 * - 0xc0000 -> TSEG: cacheable
255 	 * - TSEG -> BGSM:    cacheable with standard MTRRs and reserved
256 	 * - BGSM -> TOLUD:   not cacheable with standard MTRRs and reserved
257 	 * - 4GiB -> TOUUD:   cacheable
258 	 *
259 	 * The default SMRAM space is reserved so that the range doesn't have to be saved
260 	 * during S3 Resume. Once marked reserved the OS cannot use the memory. This is a
261 	 * bit of an odd place to reserve the region, but the CPU devices don't have
262 	 * dev_ops->read_resources() called on them.
263 	 *
264 	 * The range 0xa0000 -> 0xc0000 does not have any resources associated with it to
265 	 * handle legacy VGA memory. If this range is not omitted the mtrr code will setup
266 	 * the area as cacheable, causing VGA access to not work.
267 	 *
268 	 * The TSEG region is mapped as cacheable so that one can perform SMRAM relocation
269 	 * faster. Once the SMRR is enabled, the SMRR takes precedence over the existing
270 	 * MTRRs covering this region.
271 	 *
272 	 * It should be noted that cacheable entry types need to be added in order. The reason
273 	 * is that the current MTRR code assumes this and falls over itself if it isn't.
274 	 *
275 	 * The resource index starts low and should not meet or exceed PCI_BASE_ADDRESS_0.
276 	 */
277 	index = *resource_cnt;
278 
279 	/*
280 	 * 0 - > 0xa0000: RAM
281 	 * 0xa0000 - 0xbffff: Legacy VGA
282 	 * 0xc0000 - 0xfffff: RAM
283 	 */
284 
285 	ram_range(dev, index++, 0, 0xa0000);
286 	mmio_from_to(dev, index++, 0xa0000, 0xc0000);
287 	reserved_ram_from_to(dev, index++, 0xc0000, 1 * MiB);
288 
289 	/* 1MiB -> TSEG - DPR */
290 	ram_from_to(dev, index++, 1 * MiB, mc_values[TSEG_REG] - dpr.size * MiB);
291 
292 	/* TSEG - DPR -> BGSM */
293 	reserved_ram_from_to(dev, index++, mc_values[TSEG_REG] - dpr.size * MiB,
294 			     mc_values[BGSM_REG]);
295 
296 	/* BGSM -> TOLUD. If the IGD is disabled, BGSM can equal TOLUD. */
297 	if (mc_values[BGSM_REG] != mc_values[TOLUD_REG])
298 		mmio_from_to(dev, index++, mc_values[BGSM_REG], mc_values[TOLUD_REG]);
299 
300 	/* 4GiB -> TOUUD */
301 	upper_ram_end(dev, index++, mc_values[TOUUD_REG]);
302 
303 	*resource_cnt = index;
304 }
305 
mc_read_resources(struct device * dev)306 static void mc_read_resources(struct device *dev)
307 {
308 	int index = 0;
309 	const bool vtd_capable = !(pci_read_config32(dev, CAPID0_A) & VTD_DISABLE);
310 
311 	/* Read standard PCI resources */
312 	pci_dev_read_resources(dev);
313 
314 	/* Add all fixed MMIO resources */
315 	mc_add_fixed_mmio_resources(dev);
316 
317 	/* Add VT-d MMIO resources, if capable */
318 	if (vtd_capable) {
319 		mmio_range(dev, index++, GFXVT_BASE_ADDRESS, GFXVT_BASE_SIZE);
320 		mmio_range(dev, index++, VTVC0_BASE_ADDRESS, VTVC0_BASE_SIZE);
321 	}
322 
323 	/* Calculate and add DRAM resources */
324 	mc_add_dram_resources(dev, &index);
325 }
326 
327 /*
328  * The Mini-HD audio device is disabled whenever the IGD is. This is because it provides
329  * audio over the integrated graphics port(s), which requires the IGD to be functional.
330  */
disable_devices(void)331 static void disable_devices(void)
332 {
333 	static const struct {
334 		const unsigned int devfn;
335 		const u32 mask;
336 		const char *const name;
337 	} nb_devs[] = {
338 		{ PCI_DEVFN(1, 2), DEVEN_D1F2EN, "PEG12" },
339 		{ PCI_DEVFN(1, 1), DEVEN_D1F1EN, "PEG11" },
340 		{ PCI_DEVFN(1, 0), DEVEN_D1F0EN, "PEG10" },
341 		{ PCI_DEVFN(2, 0), DEVEN_D2EN | DEVEN_D3EN, "IGD" },
342 		{ PCI_DEVFN(3, 0), DEVEN_D3EN, "Mini-HD audio" },
343 		{ PCI_DEVFN(4, 0), DEVEN_D4EN, "\"device 4\"" },
344 		{ PCI_DEVFN(7, 0), DEVEN_D7EN, "\"device 7\"" },
345 	};
346 
347 	struct device *host_dev = pcidev_on_root(0, 0);
348 	u32 deven;
349 	size_t i;
350 
351 	if (!host_dev)
352 		return;
353 
354 	deven = pci_read_config32(host_dev, DEVEN);
355 
356 	for (i = 0; i < ARRAY_SIZE(nb_devs); i++) {
357 		struct device *dev = pcidev_path_on_root(nb_devs[i].devfn);
358 		if (!dev || !dev->enabled) {
359 			printk(BIOS_DEBUG, "Disabling %s.\n", nb_devs[i].name);
360 			deven &= ~nb_devs[i].mask;
361 		}
362 	}
363 
364 	pci_write_config32(host_dev, DEVEN, deven);
365 }
366 
init_egress(void)367 static void init_egress(void)
368 {
369 	/* VC0: Enable, ID0, TC0 */
370 	epbar_write32(EPVC0RCTL, 1 << 31 | 0 << 24 | 1 << 0);
371 
372 	/* No Low Priority Extended VCs, one Extended VC */
373 	epbar_write32(EPPVCCAP1, 0 << 4 | 1 << 0);
374 
375 	/* VC1: Enable, ID1, TC1 */
376 	epbar_write32(EPVC1RCTL, 1 << 31 | 1 << 24 | 1 << 1);
377 
378 	/* Poll the VC1 Negotiation Pending bit */
379 	while ((epbar_read16(EPVC1RSTS) & (1 << 1)) != 0)
380 		;
381 }
382 
northbridge_dmi_init(void)383 static void northbridge_dmi_init(void)
384 {
385 	const bool is_haswell_h = !CONFIG(INTEL_LYNXPOINT_LP);
386 
387 	/* Steps prior to DMI ASPM */
388 	if (is_haswell_h) {
389 		/* Configure DMI De-Emphasis */
390 		dmibar_setbits16(DMILCTL2, 1 << 6);	/* 0b: -6.0 dB, 1b: -3.5 dB */
391 
392 		dmibar_setbits32(DMIL0SLAT, 1 << 31);
393 		dmibar_setbits32(DMILLTC, 1 << 29);
394 
395 		dmibar_clrsetbits32(DMI_AFE_PM_TMR, 0x1f, 0x13);
396 	}
397 
398 	/* Clear error status bits */
399 	dmibar_write32(DMIUESTS, 0xffffffff);
400 	dmibar_write32(DMICESTS, 0xffffffff);
401 
402 	if (is_haswell_h) {
403 		/* Enable ASPM L0s and L1 on SA link, should happen before PCH link */
404 		dmibar_setbits16(DMILCTL, 1 << 1 | 1 << 0);
405 	}
406 }
407 
northbridge_topology_init(void)408 static void northbridge_topology_init(void)
409 {
410 	const u32 eple_a[3] = { EPLE2A, EPLE3A, EPLE4A };
411 	const u32 eple_d[3] = { EPLE2D, EPLE3D, EPLE4D };
412 
413 	/* Set the CID1 Egress Port 0 Root Topology */
414 	epbar_clrsetbits32(EPESD, 0xff << 16, 1 << 16);
415 
416 	epbar_clrsetbits32(EPLE1D, 0xff << 16, 1 | 1 << 16);
417 	epbar_write32(EPLE1A, CONFIG_FIXED_DMIBAR_MMIO_BASE);
418 	epbar_write32(EPLE1A + 4, 0);
419 
420 	for (unsigned int i = 0; i <= 2; i++) {
421 		const struct device *const dev = pcidev_on_root(1, i);
422 
423 		if (!dev || !dev->enabled)
424 			continue;
425 
426 		epbar_write32(eple_a[i], (u32)PCI_DEV(0, 1, i));
427 		epbar_write32(eple_a[i] + 4, 0);
428 
429 		epbar_clrsetbits32(eple_d[i], 0xff << 16, 1 | 1 << 16);
430 
431 		pci_update_config32(dev, PEG_ESD, ~(0xff << 16), (1 << 16));
432 		pci_write_config32(dev, PEG_LE1A, CONFIG_FIXED_EPBAR_MMIO_BASE);
433 		pci_write_config32(dev, PEG_LE1A + 4, 0);
434 		pci_update_config32(dev, PEG_LE1D, ~(0xff << 16), (1 << 16) | 1);
435 
436 		/* Read and write to lock register */
437 		pci_or_config32(dev, PEG_DCAP2, 0);
438 	}
439 
440 	/* Set the CID1 DMI Port Root Topology */
441 	dmibar_clrsetbits32(DMIESD, 0xff << 16, 1 << 16);
442 
443 	dmibar_clrsetbits32(DMILE1D, 0xffff << 16, 1 | 2 << 16);
444 	dmibar_write32(DMILE1A, CONFIG_FIXED_RCBA_MMIO_BASE);
445 	dmibar_write32(DMILE1A + 4, 0);
446 
447 	dmibar_write32(DMILE2A, CONFIG_FIXED_EPBAR_MMIO_BASE);
448 	dmibar_write32(DMILE2A + 4, 0);
449 	dmibar_clrsetbits32(DMILE2D, 0xff << 16, 1 | 1 << 16);
450 
451 	/* Program RO and Write-Once Registers */
452 	dmibar_setbits32(DMIPVCCAP1, 0);
453 	dmibar_setbits32(DMILCAP, 0);
454 }
455 
northbridge_init(struct device * dev)456 static void northbridge_init(struct device *dev)
457 {
458 	init_egress();
459 	northbridge_dmi_init();
460 	northbridge_topology_init();
461 
462 	/* Enable Power Aware Interrupt Routing. */
463 	mchbar_clrsetbits8(INTRDIRCTL, 0x7, 0x4);	/* Clear 2:0, set Fixed Priority */
464 
465 	disable_devices();
466 
467 	/*
468 	 * Set bits 0 + 1 of BIOS_RESET_CPL to indicate to the CPU
469 	 * that BIOS has initialized memory and power management.
470 	 */
471 	mchbar_setbits8(BIOS_RESET_CPL, 3);
472 	printk(BIOS_DEBUG, "Set BIOS_RESET_CPL\n");
473 
474 	/* Configure turbo power limits 1ms after reset complete bit. */
475 	mdelay(1);
476 	set_power_limits(28);
477 }
478 
northbridge_final(struct device * dev)479 static void northbridge_final(struct device *dev)
480 {
481 	pci_or_config16(dev, GGC,         1 << 0);
482 	pci_or_config32(dev, DPR,         1 << 0);
483 	pci_or_config32(dev, MESEG_LIMIT, 1 << 10);
484 	pci_or_config32(dev, REMAPBASE,   1 << 0);
485 	pci_or_config32(dev, REMAPLIMIT,  1 << 0);
486 	pci_or_config32(dev, TOM,         1 << 0);
487 	pci_or_config32(dev, TOUUD,       1 << 0);
488 	pci_or_config32(dev, BDSM,        1 << 0);
489 	pci_or_config32(dev, BGSM,        1 << 0);
490 	pci_or_config32(dev, TSEG,        1 << 0);
491 	pci_or_config32(dev, TOLUD,       1 << 0);
492 
493 	/* Memory Controller Lockdown */
494 	mchbar_setbits32(MC_LOCK, 0x8f);
495 
496 	mchbar_setbits32(MMIO_PAVP_MSG, 1 << 0);	/* PAVP */
497 	mchbar_setbits32(PCU_DDR_PTM_CTL, 1 << 5);	/* DDR PTM */
498 	mchbar_setbits32(DMIVCLIM, 1 << 31);
499 	mchbar_setbits32(CRDTLCK, 1 << 0);
500 	mchbar_setbits32(MCARBLCK, 1 << 0);
501 	mchbar_setbits32(REQLIM, 1 << 31);
502 	mchbar_setbits32(UMAGFXCTL, 1 << 0);		/* UMA GFX */
503 	mchbar_setbits32(VTDTRKLCK, 1 << 0);		/* VTDTRK */
504 
505 	/* Read+write the following */
506 	mchbar_setbits32(VDMBDFBARKVM, 0);
507 	mchbar_setbits32(VDMBDFBARPAVP, 0);
508 	mchbar_setbits32(HDAUDRID, 0);
509 }
510 
511 static struct device_operations mc_ops = {
512 	.read_resources		= mc_read_resources,
513 	.set_resources		= pci_dev_set_resources,
514 	.enable_resources	= pci_dev_enable_resources,
515 	.init			= northbridge_init,
516 	.final			= northbridge_final,
517 	.ops_pci		= &pci_dev_ops_pci,
518 };
519 
520 static const unsigned short mc_pci_device_ids[] = {
521 	0x0c00, /* Desktop */
522 	0x0c04, /* Mobile */
523 	0x0a04, /* ULT */
524 	0x0c08, /* Server */
525 	0x0d00, /* Crystal Well Desktop */
526 	0x0d04, /* Crystal Well Mobile */
527 	0x0d08, /* Crystal Well Server (by extrapolation) */
528 	0
529 };
530 
531 static const struct pci_driver mc_driver_hsw __pci_driver = {
532 	.ops     = &mc_ops,
533 	.vendor  = PCI_VID_INTEL,
534 	.devices = mc_pci_device_ids,
535 };
536 
537 struct device_operations haswell_cpu_bus_ops = {
538 	.read_resources   = noop_read_resources,
539 	.set_resources    = noop_set_resources,
540 	.init             = mp_cpu_bus_init,
541 	.acpi_fill_ssdt   = generate_cpu_entries,
542 };
543 
544 struct chip_operations northbridge_intel_haswell_ops = {
545 	.name = "Intel Haswell integrated Northbridge",
546 };
547