• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 /*
4  * This is a ramstage driver for the Intel Management Engine found in the
5  * 6-series chipset.  It handles the required boot-time messages over the
6  * MMIO-based Management Engine Interface to tell the ME that the BIOS is
7  * finished with POST.  Additional messages are defined for debug but are
8  * not used unless the console loglevel is high enough.
9  */
10 
11 #include <acpi/acpi.h>
12 #include <device/mmio.h>
13 #include <device/pci_ops.h>
14 #include <console/console.h>
15 #include <device/device.h>
16 #include <device/pci.h>
17 #include <device/pci_ids.h>
18 #include <string.h>
19 #include <delay.h>
20 #include <elog.h>
21 #include <stdlib.h>
22 
23 #include "chip.h"
24 #include "me.h"
25 #include "pch.h"
26 
27 #include <vendorcode/google/chromeos/chromeos.h>
28 
29 /* Path that the BIOS should take based on ME state */
30 static const char *const me_bios_path_values[] = {
31 	[ME_NORMAL_BIOS_PATH]		= "Normal",
32 	[ME_S3WAKE_BIOS_PATH]		= "S3 Wake",
33 	[ME_ERROR_BIOS_PATH]		= "Error",
34 	[ME_RECOVERY_BIOS_PATH]		= "Recovery",
35 	[ME_DISABLE_BIOS_PATH]		= "Disable",
36 	[ME_FIRMWARE_UPDATE_BIOS_PATH]	= "Firmware Update",
37 };
38 
39 /* MMIO base address for MEI interface */
40 static u8 *mei_base_address;
41 
mei_dump(u32 dword,int offset,const char * type)42 static void mei_dump(u32 dword, int offset, const char *type)
43 {
44 	union mei_csr csr;
45 
46 	if (!CONFIG(DEBUG_INTEL_ME))
47 		return;
48 
49 	printk(BIOS_SPEW, "%-9s[%02x] : ", type, offset);
50 
51 	switch (offset) {
52 	case MEI_H_CSR:
53 	case MEI_ME_CSR_HA:
54 		csr.raw = dword;
55 		printk(BIOS_SPEW, "cbd=%u cbrp=%02u cbwp=%02u ready=%u "
56 		       "reset=%u ig=%u is=%u ie=%u\n", csr.buffer_depth,
57 		       csr.buffer_read_ptr, csr.buffer_write_ptr,
58 		       csr.ready, csr.reset, csr.interrupt_generate,
59 		       csr.interrupt_status, csr.interrupt_enable);
60 		break;
61 	case MEI_ME_CB_RW:
62 	case MEI_H_CB_WW:
63 		printk(BIOS_SPEW, "CB: 0x%08x\n", dword);
64 		break;
65 	default:
66 		printk(BIOS_SPEW, "0x%08x\n", offset);
67 		break;
68 	}
69 }
70 
71 /*
72  * ME/MEI access helpers using memcpy to avoid aliasing.
73  */
74 
read_host_csr(void)75 static inline union mei_csr read_host_csr(void)
76 {
77 	union mei_csr csr = { .raw = read32(mei_base_address + MEI_H_CSR) };
78 	mei_dump(csr.raw, MEI_H_CSR, "READ");
79 	return csr;
80 }
81 
write_host_csr(union mei_csr csr)82 static inline void write_host_csr(union mei_csr csr)
83 {
84 	write32(mei_base_address + MEI_H_CSR, csr.raw);
85 	mei_dump(csr.raw, MEI_H_CSR, "WRITE");
86 }
87 
read_me_csr(void)88 static inline union mei_csr read_me_csr(void)
89 {
90 	union mei_csr csr = { .raw = read32(mei_base_address + MEI_ME_CSR_HA) };
91 	mei_dump(csr.raw, MEI_ME_CSR_HA, "READ");
92 	return csr;
93 }
94 
write_cb(u32 dword)95 static inline void write_cb(u32 dword)
96 {
97 	write32(mei_base_address + MEI_H_CB_WW, dword);
98 	mei_dump(dword, MEI_H_CB_WW, "WRITE");
99 }
100 
read_cb(void)101 static inline u32 read_cb(void)
102 {
103 	u32 dword = read32(mei_base_address + MEI_ME_CB_RW);
104 	mei_dump(dword, MEI_ME_CB_RW, "READ");
105 	return dword;
106 }
107 
108 /* Wait for ME ready bit to be asserted */
mei_wait_for_me_ready(void)109 static int mei_wait_for_me_ready(void)
110 {
111 	union mei_csr me;
112 	unsigned int try = ME_RETRY;
113 
114 	while (try--) {
115 		me = read_me_csr();
116 		if (me.ready)
117 			return 0;
118 		udelay(ME_DELAY);
119 	}
120 
121 	printk(BIOS_ERR, "ME: failed to become ready\n");
122 	return -1;
123 }
124 
mei_reset(void)125 static void mei_reset(void)
126 {
127 	union mei_csr host;
128 
129 	if (mei_wait_for_me_ready() < 0)
130 		return;
131 
132 	/* Reset host and ME circular buffers for next message */
133 	host = read_host_csr();
134 	host.reset = 1;
135 	host.interrupt_generate = 1;
136 	write_host_csr(host);
137 
138 	if (mei_wait_for_me_ready() < 0)
139 		return;
140 
141 	/* Re-init and indicate host is ready */
142 	host = read_host_csr();
143 	host.interrupt_generate = 1;
144 	host.ready = 1;
145 	host.reset = 0;
146 	write_host_csr(host);
147 }
148 
mei_send_packet(union mei_header * mei,void * req_data)149 static int mei_send_packet(union mei_header *mei, void *req_data)
150 {
151 	union mei_csr host;
152 	unsigned int ndata, n;
153 	u32 *data;
154 
155 	/* Number of dwords to write */
156 	ndata = mei->length >> 2;
157 
158 	/* Pad non-dword aligned request message length */
159 	if (mei->length & 3)
160 		ndata++;
161 	if (!ndata) {
162 		printk(BIOS_DEBUG, "ME: request has no data\n");
163 		return -1;
164 	}
165 	ndata++; /* Add MEI header */
166 
167 	/*
168 	 * Make sure there is still room left in the circular buffer.
169 	 * Reset the buffer pointers if the requested message will not fit.
170 	 */
171 	host = read_host_csr();
172 	if ((host.buffer_depth - host.buffer_write_ptr) < ndata) {
173 		printk(BIOS_ERR, "ME: circular buffer full, resetting...\n");
174 		mei_reset();
175 		host = read_host_csr();
176 	}
177 
178 	/* Ensure the requested length will fit in the circular buffer. */
179 	if ((host.buffer_depth - host.buffer_write_ptr) < ndata) {
180 		printk(BIOS_ERR, "ME: message (%u) too large for buffer (%u)\n",
181 		       ndata + 2, host.buffer_depth);
182 		return -1;
183 	}
184 
185 	/* Write MEI header */
186 	write_cb(mei->raw);
187 	ndata--;
188 
189 	/* Write message data */
190 	data = req_data;
191 	for (n = 0; n < ndata; ++n)
192 		write_cb(*data++);
193 
194 	/* Generate interrupt to the ME */
195 	host = read_host_csr();
196 	host.interrupt_generate = 1;
197 	write_host_csr(host);
198 
199 	/* Make sure ME is ready after sending request data */
200 	return mei_wait_for_me_ready();
201 }
202 
mei_send_data(u8 me_address,u8 host_address,void * req_data,int req_bytes)203 static int mei_send_data(u8 me_address, u8 host_address,
204 			 void *req_data, int req_bytes)
205 {
206 	union mei_header header = {
207 		.client_address = me_address,
208 		.host_address = host_address,
209 	};
210 	union mei_csr host;
211 	int current = 0;
212 	u8 *req_ptr = req_data;
213 
214 	while (!header.is_complete) {
215 		int remain = req_bytes - current;
216 		int buf_len;
217 
218 		host = read_host_csr();
219 		buf_len = host.buffer_depth - host.buffer_write_ptr;
220 
221 		if (buf_len > remain) {
222 			/* Send all remaining data as final message */
223 			header.length = req_bytes - current;
224 			header.is_complete = 1;
225 		} else {
226 			/* Send as much data as the buffer can hold */
227 			header.length = buf_len;
228 		}
229 
230 		mei_send_packet(&header, req_ptr);
231 
232 		req_ptr += header.length;
233 		current += header.length;
234 	}
235 
236 	return 0;
237 }
238 
mei_send_header(u8 me_address,u8 host_address,void * header,int header_len,int complete)239 static int mei_send_header(u8 me_address, u8 host_address,
240 			   void *header, int header_len, int complete)
241 {
242 	union mei_header mei = {
243 		.client_address = me_address,
244 		.host_address   = host_address,
245 		.length         = header_len,
246 		.is_complete    = complete,
247 	};
248 	return mei_send_packet(&mei, header);
249 }
250 
mei_recv_msg(void * header,int header_bytes,void * rsp_data,int rsp_bytes)251 static int mei_recv_msg(void *header, int header_bytes,
252 			void *rsp_data, int rsp_bytes)
253 {
254 	union mei_header mei_rsp;
255 	union mei_csr me, host;
256 	unsigned int ndata, n;
257 	unsigned int expected;
258 	u32 *data;
259 
260 	/* Total number of dwords to read from circular buffer */
261 	expected = (rsp_bytes + sizeof(mei_rsp) + header_bytes) >> 2;
262 	if (rsp_bytes & 3)
263 		expected++;
264 
265 	if (mei_wait_for_me_ready() < 0)
266 		return -1;
267 
268 	/*
269 	 * The interrupt status bit does not appear to indicate that the
270 	 * message has actually been received.  Instead we wait until the
271 	 * expected number of dwords are present in the circular buffer.
272 	 */
273 	for (n = ME_RETRY; n; --n) {
274 		me = read_me_csr();
275 		if ((me.buffer_write_ptr - me.buffer_read_ptr) >= expected)
276 			break;
277 		udelay(ME_DELAY);
278 	}
279 	if (!n) {
280 		printk(BIOS_ERR, "ME: timeout waiting for data: expected "
281 		       "%u, available %u\n", expected,
282 		       me.buffer_write_ptr - me.buffer_read_ptr);
283 		return -1;
284 	}
285 
286 	/* Read and verify MEI response header from the ME */
287 	mei_rsp.raw = read_cb();
288 	if (!mei_rsp.is_complete) {
289 		printk(BIOS_ERR, "ME: response is not complete\n");
290 		return -1;
291 	}
292 
293 	/* Handle non-dword responses and expect at least the header */
294 	ndata = mei_rsp.length >> 2;
295 	if (mei_rsp.length & 3)
296 		ndata++;
297 	if (ndata != (expected - 1)) {
298 		printk(BIOS_ERR, "ME: response is missing data %d != %d\n",
299 		       ndata, (expected - 1));
300 		return -1;
301 	}
302 
303 	/* Read response header from the ME */
304 	data = header;
305 	for (n = 0; n < (header_bytes >> 2); ++n)
306 		*data++ = read_cb();
307 	ndata -= header_bytes >> 2;
308 
309 	/* Make sure caller passed a buffer with enough space */
310 	if (ndata != (rsp_bytes >> 2)) {
311 		printk(BIOS_ERR, "ME: not enough room in response buffer: "
312 		       "%u != %u\n", ndata, rsp_bytes >> 2);
313 		return -1;
314 	}
315 
316 	/* Read response data from the circular buffer */
317 	data = rsp_data;
318 	for (n = 0; n < ndata; ++n)
319 		*data++ = read_cb();
320 
321 	/* Tell the ME that we have consumed the response */
322 	host = read_host_csr();
323 	host.interrupt_status = 1;
324 	host.interrupt_generate = 1;
325 	write_host_csr(host);
326 
327 	return mei_wait_for_me_ready();
328 }
329 
mei_sendrecv_mkhi(struct mkhi_header * mkhi,void * req_data,int req_bytes,void * rsp_data,int rsp_bytes)330 static inline int mei_sendrecv_mkhi(struct mkhi_header *mkhi,
331 				    void *req_data, int req_bytes,
332 				    void *rsp_data, int rsp_bytes)
333 {
334 	struct mkhi_header mkhi_rsp;
335 
336 	/* Send header */
337 	if (mei_send_header(MEI_ADDRESS_MKHI, MEI_HOST_ADDRESS,
338 			    mkhi, sizeof(*mkhi), req_bytes ? 0 : 1) < 0)
339 		return -1;
340 
341 	/* Send data if available */
342 	if (req_bytes && mei_send_data(MEI_ADDRESS_MKHI, MEI_HOST_ADDRESS,
343 				     req_data, req_bytes) < 0)
344 		return -1;
345 
346 	/* Return now if no response expected */
347 	if (!rsp_bytes)
348 		return 0;
349 
350 	/* Read header and data */
351 	if (mei_recv_msg(&mkhi_rsp, sizeof(mkhi_rsp),
352 			 rsp_data, rsp_bytes) < 0)
353 		return -1;
354 
355 	if (!mkhi_rsp.is_response ||
356 	    mkhi->group_id != mkhi_rsp.group_id ||
357 	    mkhi->command != mkhi_rsp.command) {
358 		printk(BIOS_ERR, "ME: invalid response, group %u ?= %u,"
359 		       "command %u ?= %u, is_response %u\n", mkhi->group_id,
360 		       mkhi_rsp.group_id, mkhi->command, mkhi_rsp.command,
361 		       mkhi_rsp.is_response);
362 		return -1;
363 	}
364 
365 	return 0;
366 }
367 
mei_sendrecv_icc(struct icc_header * icc,void * req_data,int req_bytes,void * rsp_data,int rsp_bytes)368 static inline int mei_sendrecv_icc(struct icc_header *icc,
369 				   void *req_data, int req_bytes,
370 				   void *rsp_data, int rsp_bytes)
371 {
372 	struct icc_header icc_rsp;
373 
374 	/* Send header */
375 	if (mei_send_header(MEI_ADDRESS_ICC, MEI_HOST_ADDRESS,
376 			    icc, sizeof(*icc), req_bytes ? 0 : 1) < 0)
377 		return -1;
378 
379 	/* Send data if available */
380 	if (req_bytes && mei_send_data(MEI_ADDRESS_ICC, MEI_HOST_ADDRESS,
381 				       req_data, req_bytes) < 0)
382 		return -1;
383 
384 	/* Read header and data, if needed */
385 	if (rsp_bytes && mei_recv_msg(&icc_rsp, sizeof(icc_rsp),
386 				      rsp_data, rsp_bytes) < 0)
387 		return -1;
388 
389 	return 0;
390 }
391 
392 /*
393  * mbp give up routine. This path is taken if hfs.mpb_rdy is 0 or the read
394  * state machine on the BIOS end doesn't match the ME's state machine.
395  */
intel_me_mbp_give_up(struct device * dev)396 static void intel_me_mbp_give_up(struct device *dev)
397 {
398 	union mei_csr csr;
399 
400 	pci_write_config32(dev, PCI_ME_H_GS2, PCI_ME_MBP_GIVE_UP);
401 
402 	csr = read_host_csr();
403 	csr.reset = 1;
404 	csr.interrupt_generate = 1;
405 	write_host_csr(csr);
406 }
407 
408 /*
409  * mbp clear routine. This will wait for the ME to indicate that
410  * the MBP has been read and cleared.
411  */
intel_me_mbp_clear(struct device * dev)412 static void intel_me_mbp_clear(struct device *dev)
413 {
414 	int count;
415 	union me_hfs2 hfs2;
416 
417 	/* Wait for the mbp_cleared indicator */
418 	for (count = ME_RETRY; count > 0; --count) {
419 		hfs2.raw = pci_read_config32(dev, PCI_ME_HFS2);
420 		if (hfs2.mbp_cleared)
421 			break;
422 		udelay(ME_DELAY);
423 	}
424 
425 	if (count == 0) {
426 		printk(BIOS_WARNING, "ME: Timeout waiting for mbp_cleared\n");
427 		intel_me_mbp_give_up(dev);
428 	} else {
429 		printk(BIOS_INFO, "ME: MBP cleared\n");
430 	}
431 }
432 
me_print_fw_version(struct mbp_fw_version_name * vers_name)433 static void me_print_fw_version(struct mbp_fw_version_name *vers_name)
434 {
435 	if (!vers_name) {
436 		printk(BIOS_ERR, "ME: mbp missing version report\n");
437 		return;
438 	}
439 
440 	printk(BIOS_DEBUG, "ME: found version %d.%d.%d.%d\n",
441 	       vers_name->major_version, vers_name->minor_version,
442 	       vers_name->hotfix_version, vers_name->build_version);
443 }
444 
print_cap(const char * name,int state)445 static inline void print_cap(const char *name, int state)
446 {
447 	printk(BIOS_DEBUG, "ME Capability: %-41s : %sabled\n",
448 	       name, state ? " en" : "dis");
449 }
450 
451 /* Get ME Firmware Capabilities */
mkhi_get_fwcaps(struct mbp_mefwcaps * cap)452 static int mkhi_get_fwcaps(struct mbp_mefwcaps *cap)
453 {
454 	u32 rule_id = 0;
455 	struct me_fwcaps cap_msg;
456 	struct mkhi_header mkhi = {
457 		.group_id       = MKHI_GROUP_ID_FWCAPS,
458 		.command        = MKHI_FWCAPS_GET_RULE,
459 	};
460 
461 	/* Send request and wait for response */
462 	if (mei_sendrecv_mkhi(&mkhi, &rule_id, sizeof(u32),
463 			      &cap_msg, sizeof(cap_msg)) < 0) {
464 		printk(BIOS_ERR, "ME: GET FWCAPS message failed\n");
465 		return -1;
466 	}
467 	*cap = cap_msg.caps_sku;
468 	return 0;
469 }
470 
471 /* Get ME Firmware Capabilities */
me_print_fwcaps(struct mbp_mefwcaps * cap)472 static void me_print_fwcaps(struct mbp_mefwcaps *cap)
473 {
474 	struct mbp_mefwcaps local_caps;
475 	if (!cap) {
476 		cap = &local_caps;
477 		printk(BIOS_ERR, "ME: mbp missing fwcaps report\n");
478 		if (mkhi_get_fwcaps(cap))
479 			return;
480 	}
481 
482 	print_cap("Full Network manageability", cap->full_net);
483 	print_cap("Regular Network manageability", cap->std_net);
484 	print_cap("Manageability", cap->manageability);
485 	print_cap("IntelR Anti-Theft (AT)", cap->intel_at);
486 	print_cap("IntelR Capability Licensing Service (CLS)", cap->intel_cls);
487 	print_cap("IntelR Power Sharing Technology (MPC)", cap->intel_mpc);
488 	print_cap("ICC Over Clocking", cap->icc_over_clocking);
489 	print_cap("Protected Audio Video Path (PAVP)", cap->pavp);
490 	print_cap("IPV6", cap->ipv6);
491 	print_cap("KVM Remote Control (KVM)", cap->kvm);
492 	print_cap("Outbreak Containment Heuristic (OCH)", cap->och);
493 	print_cap("Virtual LAN (VLAN)", cap->vlan);
494 	print_cap("TLS", cap->tls);
495 	print_cap("Wireless LAN (WLAN)", cap->wlan);
496 }
497 
498 /* Send END OF POST message to the ME */
mkhi_end_of_post(void)499 static int mkhi_end_of_post(void)
500 {
501 	struct mkhi_header mkhi = {
502 		.group_id	= MKHI_GROUP_ID_GEN,
503 		.command	= MKHI_END_OF_POST,
504 	};
505 	u32 eop_ack;
506 
507 	/* Send request and wait for response */
508 	printk(BIOS_NOTICE, "ME: %s\n", __func__);
509 	if (mei_sendrecv_mkhi(&mkhi, NULL, 0, &eop_ack, sizeof(eop_ack)) < 0) {
510 		printk(BIOS_ERR, "ME: END OF POST message failed\n");
511 		return -1;
512 	}
513 
514 	printk(BIOS_INFO, "ME: END OF POST message successful (%d)\n", eop_ack);
515 	return 0;
516 }
517 
intel_me_finalize(struct device * dev)518 void intel_me_finalize(struct device *dev)
519 {
520 	union me_hfs hfs;
521 	u32 reg32;
522 
523 	reg32 = pci_read_config32(dev, PCI_BASE_ADDRESS_0);
524 	mei_base_address = (u8 *)(uintptr_t)(reg32 & ~PCI_BASE_ADDRESS_MEM_ATTR_MASK);
525 
526 	/* S3 path will have hidden this device already */
527 	if (!mei_base_address || mei_base_address == (u8 *)0xfffffff0)
528 		return;
529 
530 	/* Wait for ME MBP Cleared indicator */
531 	intel_me_mbp_clear(dev);
532 
533 	/* Make sure ME is in a mode that expects EOP */
534 	hfs.raw = pci_read_config32(dev, PCI_ME_HFS);
535 
536 	/* Abort and leave device alone if not normal mode */
537 	if (hfs.fpt_bad ||
538 	    hfs.working_state != ME_HFS_CWS_NORMAL ||
539 	    hfs.operation_mode != ME_HFS_MODE_NORMAL)
540 		return;
541 
542 	/* Try to send EOP command so ME stops accepting other commands */
543 	mkhi_end_of_post();
544 
545 	if (!CONFIG(DISABLE_ME_PCI))
546 		return;
547 
548 	/* Make sure IO is disabled */
549 	pci_and_config16(dev, PCI_COMMAND,
550 			 ~(PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY | PCI_COMMAND_IO));
551 
552 	/* Hide the PCI device */
553 	RCBA32_OR(FD2, PCH_DISABLE_MEI1);
554 }
555 
me_icc_set_clock_enables(u32 mask)556 static int me_icc_set_clock_enables(u32 mask)
557 {
558 	struct icc_clock_enables_msg clk = {
559 		.clock_enables	= 0, /* Turn off specified clocks */
560 		.clock_mask	= mask,
561 		.no_response	= 1, /* Do not expect response */
562 	};
563 	struct icc_header icc = {
564 		.api_version	= ICC_API_VERSION_LYNXPOINT,
565 		.icc_command	= ICC_SET_CLOCK_ENABLES,
566 		.length		= sizeof(clk),
567 	};
568 
569 	/* Send request and wait for response */
570 	if (mei_sendrecv_icc(&icc, &clk, sizeof(clk), NULL, 0) < 0) {
571 		printk(BIOS_ERR, "ME: ICC SET CLOCK ENABLES message failed\n");
572 		return -1;
573 	}
574 	printk(BIOS_INFO, "ME: ICC SET CLOCK ENABLES 0x%08x\n", mask);
575 	return 0;
576 }
577 
578 /* Determine the path that we should take based on ME status */
intel_me_path(struct device * dev)579 static enum me_bios_path intel_me_path(struct device *dev)
580 {
581 	enum me_bios_path path = ME_DISABLE_BIOS_PATH;
582 	union me_hfs hfs = { .raw = pci_read_config32(dev, PCI_ME_HFS) };
583 	union me_hfs2 hfs2 = { .raw = pci_read_config32(dev, PCI_ME_HFS2) };
584 
585 	/* Check and dump status */
586 	intel_me_status(hfs, hfs2);
587 
588 	/* Check Current Working State */
589 	switch (hfs.working_state) {
590 	case ME_HFS_CWS_NORMAL:
591 		path = ME_NORMAL_BIOS_PATH;
592 		break;
593 	case ME_HFS_CWS_REC:
594 		path = ME_RECOVERY_BIOS_PATH;
595 		break;
596 	default:
597 		path = ME_DISABLE_BIOS_PATH;
598 		break;
599 	}
600 
601 	/* Check Current Operation Mode */
602 	switch (hfs.operation_mode) {
603 	case ME_HFS_MODE_NORMAL:
604 		break;
605 	case ME_HFS_MODE_DEBUG:
606 	case ME_HFS_MODE_DIS:
607 	case ME_HFS_MODE_OVER_JMPR:
608 	case ME_HFS_MODE_OVER_MEI:
609 	default:
610 		path = ME_DISABLE_BIOS_PATH;
611 		break;
612 	}
613 
614 	/* Check for any error code and valid firmware and MBP */
615 	if (hfs.error_code || hfs.fpt_bad)
616 		path = ME_ERROR_BIOS_PATH;
617 
618 	/* Check if the MBP is ready */
619 	if (!hfs2.mbp_rdy) {
620 		printk(BIOS_CRIT, "%s: mbp is not ready!\n",
621 		       __func__);
622 		path = ME_ERROR_BIOS_PATH;
623 	}
624 
625 	if (CONFIG(ELOG) && path != ME_NORMAL_BIOS_PATH) {
626 		struct elog_event_data_me_extended data = {
627 			.current_working_state = hfs.working_state,
628 			.operation_state       = hfs.operation_state,
629 			.operation_mode        = hfs.operation_mode,
630 			.error_code            = hfs.error_code,
631 			.progress_code         = hfs2.progress_code,
632 			.current_pmevent       = hfs2.current_pmevent,
633 			.current_state         = hfs2.current_state,
634 		};
635 		elog_add_event_byte(ELOG_TYPE_MANAGEMENT_ENGINE, path);
636 		elog_add_event_raw(ELOG_TYPE_MANAGEMENT_ENGINE_EXT,
637 				   &data, sizeof(data));
638 	}
639 
640 	return path;
641 }
642 
643 /* Prepare ME for MEI messages */
intel_mei_setup(struct device * dev)644 static int intel_mei_setup(struct device *dev)
645 {
646 	struct resource *res;
647 	union mei_csr host;
648 
649 	/* Find the MMIO base for the ME interface */
650 	res = probe_resource(dev, PCI_BASE_ADDRESS_0);
651 	if (!res || res->base == 0 || res->size == 0) {
652 		printk(BIOS_DEBUG, "ME: MEI resource not present!\n");
653 		return -1;
654 	}
655 	mei_base_address = res2mmio(res, 0, 0);
656 
657 	/* Ensure Memory and Bus Master bits are set */
658 	pci_or_config16(dev, PCI_COMMAND, PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY);
659 
660 	/* Clean up status for next message */
661 	host = read_host_csr();
662 	host.interrupt_generate = 1;
663 	host.ready = 1;
664 	host.reset = 0;
665 	write_host_csr(host);
666 
667 	return 0;
668 }
669 
670 /* Read the Extend register hash of ME firmware */
intel_me_extend_valid(struct device * dev)671 static int intel_me_extend_valid(struct device *dev)
672 {
673 	union me_heres status = { .raw = pci_read_config32(dev, PCI_ME_HERES) };
674 	u32 extend[8] = {0};
675 	int i, count = 0;
676 
677 	if (!status.extend_feature_present) {
678 		printk(BIOS_ERR, "ME: Extend Feature not present\n");
679 		return -1;
680 	}
681 
682 	if (!status.extend_reg_valid) {
683 		printk(BIOS_ERR, "ME: Extend Register not valid\n");
684 		return -1;
685 	}
686 
687 	switch (status.extend_reg_algorithm) {
688 	case PCI_ME_EXT_SHA1:
689 		count = 5;
690 		printk(BIOS_DEBUG, "ME: Extend SHA-1: ");
691 		break;
692 	case PCI_ME_EXT_SHA256:
693 		count = 8;
694 		printk(BIOS_DEBUG, "ME: Extend SHA-256: ");
695 		break;
696 	default:
697 		printk(BIOS_ERR, "ME: Extend Algorithm %d unknown\n",
698 		       status.extend_reg_algorithm);
699 		return -1;
700 	}
701 
702 	for (i = 0; i < count; ++i) {
703 		extend[i] = pci_read_config32(dev, PCI_ME_HER(i));
704 		printk(BIOS_DEBUG, "%08x", extend[i]);
705 	}
706 	printk(BIOS_DEBUG, "\n");
707 
708 	/* Save hash in NVS for the OS to verify */
709 	if (CONFIG(CHROMEOS_NVS))
710 		chromeos_set_me_hash(extend, count);
711 
712 	return 0;
713 }
714 
me_to_host_words_pending(void)715 static u32 me_to_host_words_pending(void)
716 {
717 	union mei_csr me = read_me_csr();
718 	if (!me.ready)
719 		return 0;
720 	return (me.buffer_write_ptr - me.buffer_read_ptr) &
721 		(me.buffer_depth - 1);
722 }
723 
724 struct mbp_payload {
725 	union mbp_header header;
726 	u32 data[];
727 };
728 
729 /*
730  * Read and print ME MBP data
731  *
732  * Return -1 to indicate a problem (give up)
733  * Return 0 to indicate success (send LOCK+EOP)
734  */
intel_me_read_mbp(struct me_bios_payload * mbp_data,struct device * dev)735 static int intel_me_read_mbp(struct me_bios_payload *mbp_data, struct device *dev)
736 {
737 	union mbp_header mbp_hdr;
738 	u32 me2host_pending;
739 	union mei_csr host;
740 	union me_hfs2 hfs2 = { .raw = pci_read_config32(dev, PCI_ME_HFS2) };
741 	struct mbp_payload *mbp;
742 	int i;
743 
744 	if (!hfs2.mbp_rdy) {
745 		printk(BIOS_ERR, "ME: MBP not ready\n");
746 		goto mbp_failure;
747 	}
748 
749 	me2host_pending = me_to_host_words_pending();
750 	if (!me2host_pending) {
751 		printk(BIOS_ERR, "ME: no mbp data!\n");
752 		goto mbp_failure;
753 	}
754 
755 	/* we know for sure that at least the header is there */
756 	mbp_hdr.raw = read_cb();
757 
758 	if ((mbp_hdr.num_entries > (mbp_hdr.mbp_size / 2)) ||
759 	    (me2host_pending < mbp_hdr.mbp_size)) {
760 		printk(BIOS_ERR, "ME: mbp of %d entries, total size %d words"
761 		       " buffer contains %d words\n",
762 		       mbp_hdr.num_entries, mbp_hdr.mbp_size,
763 		       me2host_pending);
764 		goto mbp_failure;
765 	}
766 	mbp = malloc(mbp_hdr.mbp_size * sizeof(u32));
767 	if (!mbp)
768 		goto mbp_failure;
769 
770 	mbp->header = mbp_hdr;
771 	me2host_pending--;
772 
773 	i = 0;
774 	while (i != me2host_pending) {
775 		mbp->data[i] = read_cb();
776 		i++;
777 	}
778 
779 	/* Signal to the ME that the host has finished reading the MBP. */
780 	host = read_host_csr();
781 	host.interrupt_generate = 1;
782 	write_host_csr(host);
783 
784 	/* Dump out the MBP contents. */
785 	if (CONFIG_DEFAULT_CONSOLE_LOGLEVEL >= BIOS_DEBUG) {
786 		printk(BIOS_INFO, "ME MBP: Header: items: %d, size dw: %d\n",
787 		       mbp->header.num_entries, mbp->header.mbp_size);
788 		if (CONFIG(DEBUG_INTEL_ME)) {
789 			for (i = 0; i < mbp->header.mbp_size - 1; i++) {
790 				printk(BIOS_INFO, "ME MBP: %04x: 0x%08x\n", i, mbp->data[i]);
791 			}
792 		}
793 	}
794 
795 	#define ASSIGN_FIELD_PTR(field_,val_) \
796 		{ \
797 		mbp_data->field_ = (typeof(mbp_data->field_))(void *)val_; \
798 		break; \
799 		}
800 	/* Setup the pointers in the me_bios_payload structure. */
801 	for (i = 0; i < mbp->header.mbp_size - 1;) {
802 		struct mbp_item_header *item = (void *)&mbp->data[i];
803 
804 		switch (MBP_MAKE_IDENT(item->app_id, item->item_id)) {
805 		case MBP_IDENT(KERNEL, FW_VER):
806 			ASSIGN_FIELD_PTR(fw_version_name, &mbp->data[i+1]);
807 
808 		case MBP_IDENT(ICC, PROFILE):
809 			ASSIGN_FIELD_PTR(icc_profile, &mbp->data[i+1]);
810 
811 		case MBP_IDENT(INTEL_AT, STATE):
812 			ASSIGN_FIELD_PTR(at_state, &mbp->data[i+1]);
813 
814 		case MBP_IDENT(KERNEL, FW_CAP):
815 			ASSIGN_FIELD_PTR(fw_capabilities, &mbp->data[i+1]);
816 
817 		case MBP_IDENT(KERNEL, ROM_BIST):
818 			ASSIGN_FIELD_PTR(rom_bist_data, &mbp->data[i+1]);
819 
820 		case MBP_IDENT(KERNEL, PLAT_KEY):
821 			ASSIGN_FIELD_PTR(platform_key, &mbp->data[i+1]);
822 
823 		case MBP_IDENT(KERNEL, FW_TYPE):
824 			ASSIGN_FIELD_PTR(fw_plat_type, &mbp->data[i+1]);
825 
826 		case MBP_IDENT(KERNEL, MFS_FAILURE):
827 			ASSIGN_FIELD_PTR(mfsintegrity, &mbp->data[i+1]);
828 
829 		case MBP_IDENT(KERNEL, PLAT_TIME):
830 			ASSIGN_FIELD_PTR(plat_time, &mbp->data[i+1]);
831 
832 		case MBP_IDENT(NFC, SUPPORT_DATA):
833 			ASSIGN_FIELD_PTR(nfc_data, &mbp->data[i+1]);
834 
835 		default:
836 			printk(BIOS_ERR, "ME MBP: unknown item 0x%x @ "
837 			       "dw offset 0x%x\n", mbp->data[i], i);
838 			break;
839 		}
840 		i += item->length;
841 	}
842 	#undef ASSIGN_FIELD_PTR
843 
844 	return 0;
845 
846 mbp_failure:
847 	intel_me_mbp_give_up(dev);
848 	return -1;
849 }
850 
851 /* Check whether ME is present and do basic init */
intel_me_init(struct device * dev)852 static void intel_me_init(struct device *dev)
853 {
854 	struct southbridge_intel_lynxpoint_config *config = dev->chip_info;
855 	enum me_bios_path path = intel_me_path(dev);
856 	struct me_bios_payload mbp_data;
857 
858 	/* Do initial setup and determine the BIOS path */
859 	printk(BIOS_NOTICE, "ME: BIOS path: %s\n", me_bios_path_values[path]);
860 
861 	if (path == ME_NORMAL_BIOS_PATH) {
862 		/* Validate the extend register */
863 		intel_me_extend_valid(dev);
864 	}
865 
866 	memset(&mbp_data, 0, sizeof(mbp_data));
867 
868 	/*
869 	 * According to the ME9 BWG, BIOS is required to fetch MBP data in
870 	 * all boot flows except S3 Resume.
871 	 */
872 
873 	/* Prepare MEI MMIO interface */
874 	if (intel_mei_setup(dev) < 0)
875 		return;
876 
877 	if (intel_me_read_mbp(&mbp_data, dev))
878 		return;
879 
880 	if (CONFIG_DEFAULT_CONSOLE_LOGLEVEL >= BIOS_DEBUG) {
881 		me_print_fw_version(mbp_data.fw_version_name);
882 
883 		if (CONFIG(DEBUG_INTEL_ME))
884 			me_print_fwcaps(mbp_data.fw_capabilities);
885 
886 		if (mbp_data.plat_time) {
887 			printk(BIOS_DEBUG, "ME: Wake Event to ME Reset:      %u ms\n",
888 			       mbp_data.plat_time->wake_event_mrst_time_ms);
889 			printk(BIOS_DEBUG, "ME: ME Reset to Platform Reset:  %u ms\n",
890 			       mbp_data.plat_time->mrst_pltrst_time_ms);
891 			printk(BIOS_DEBUG, "ME: Platform Reset to CPU Reset: %u ms\n",
892 			       mbp_data.plat_time->pltrst_cpurst_time_ms);
893 		}
894 	}
895 
896 	/* Set clock enables according to devicetree */
897 	if (config && config->icc_clock_disable)
898 		me_icc_set_clock_enables(config->icc_clock_disable);
899 
900 	/*
901 	 * Leave the ME unlocked. It will be locked later.
902 	 */
903 }
904 
intel_me_enable(struct device * dev)905 static void intel_me_enable(struct device *dev)
906 {
907 	/* Avoid talking to the device in S3 path */
908 	if (acpi_is_wakeup_s3() && CONFIG(DISABLE_ME_PCI)) {
909 		dev->enabled = 0;
910 		pch_disable_devfn(dev);
911 	}
912 }
913 
914 static struct device_operations device_ops = {
915 	.read_resources		= pci_dev_read_resources,
916 	.set_resources		= pci_dev_set_resources,
917 	.enable_resources	= pci_dev_enable_resources,
918 	.enable			= intel_me_enable,
919 	.init			= intel_me_init,
920 	.final			= intel_me_finalize,
921 	.ops_pci		= &pci_dev_ops_pci,
922 };
923 
924 static const unsigned short pci_device_ids[] = {
925 	PCI_DID_INTEL_LPT_H_MEI,
926 	PCI_DID_INTEL_LPT_H_MEI_9,
927 	PCI_DID_INTEL_LPT_LP_MEI,
928 	0
929 };
930 
931 static const struct pci_driver intel_me __pci_driver = {
932 	.ops     = &device_ops,
933 	.vendor  = PCI_VID_INTEL,
934 	.devices = pci_device_ids,
935 };
936