• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 
3 /*
4  * This is a ramstage driver for the Intel Management Engine found in the
5  * southbridge.  It handles the required boot-time messages over the
6  * MMIO-based Management Engine Interface to tell the ME that the BIOS is
7  * finished with POST.  Additional messages are defined for debug but are
8  * not used unless the console loglevel is high enough.
9  */
10 
11 #include <acpi/acpi.h>
12 #include <device/mmio.h>
13 #include <device/pci_ops.h>
14 #include <console/console.h>
15 #include <device/device.h>
16 #include <device/pci.h>
17 #include <device/pci_ids.h>
18 #include <stdlib.h>
19 #include <string.h>
20 #include <delay.h>
21 #include <elog.h>
22 #include <soc/me.h>
23 #include <soc/lpc.h>
24 #include <soc/pch.h>
25 #include <soc/pci_devs.h>
26 #include <soc/rcba.h>
27 #include <soc/intel/broadwell/pch/chip.h>
28 
29 #include <vendorcode/google/chromeos/chromeos.h>
30 
31 /* Path that the BIOS should take based on ME state */
32 static const char *me_bios_path_values[] = {
33 	[ME_NORMAL_BIOS_PATH]		= "Normal",
34 	[ME_S3WAKE_BIOS_PATH]		= "S3 Wake",
35 	[ME_ERROR_BIOS_PATH]		= "Error",
36 	[ME_RECOVERY_BIOS_PATH]		= "Recovery",
37 	[ME_DISABLE_BIOS_PATH]		= "Disable",
38 	[ME_FIRMWARE_UPDATE_BIOS_PATH]	= "Firmware Update",
39 };
40 
41 /* MMIO base address for MEI interface */
42 static u8 *mei_base_address;
43 
mei_dump(void * ptr,int dword,int offset,const char * type)44 static void mei_dump(void *ptr, int dword, int offset, const char *type)
45 {
46 	struct mei_csr *csr;
47 
48 	if (!CONFIG(DEBUG_INTEL_ME))
49 		return;
50 
51 	printk(BIOS_SPEW, "%-9s[%02x] : ", type, offset);
52 
53 	switch (offset) {
54 	case MEI_H_CSR:
55 	case MEI_ME_CSR_HA:
56 		csr = ptr;
57 		if (!csr) {
58 			printk(BIOS_SPEW, "ERROR: 0x%08x\n", dword);
59 			break;
60 		}
61 		printk(BIOS_SPEW, "cbd=%u cbrp=%02u cbwp=%02u ready=%u "
62 		       "reset=%u ig=%u is=%u ie=%u\n", csr->buffer_depth,
63 		       csr->buffer_read_ptr, csr->buffer_write_ptr,
64 		       csr->ready, csr->reset, csr->interrupt_generate,
65 		       csr->interrupt_status, csr->interrupt_enable);
66 		break;
67 	case MEI_ME_CB_RW:
68 	case MEI_H_CB_WW:
69 		printk(BIOS_SPEW, "CB: 0x%08x\n", dword);
70 		break;
71 	default:
72 		printk(BIOS_SPEW, "0x%08x\n", offset);
73 		break;
74 	}
75 }
76 
77 /*
78  * ME/MEI access helpers using memcpy to avoid aliasing.
79  */
80 
mei_read_dword_ptr(void * ptr,int offset)81 static inline void mei_read_dword_ptr(void *ptr, int offset)
82 {
83 	u32 dword = read32(mei_base_address + offset);
84 	memcpy(ptr, &dword, sizeof(dword));
85 	mei_dump(ptr, dword, offset, "READ");
86 }
87 
mei_write_dword_ptr(void * ptr,int offset)88 static inline void mei_write_dword_ptr(void *ptr, int offset)
89 {
90 	u32 dword = 0;
91 	memcpy(&dword, ptr, sizeof(dword));
92 	write32(mei_base_address + offset, dword);
93 	mei_dump(ptr, dword, offset, "WRITE");
94 }
95 
pci_read_dword_ptr(struct device * dev,void * ptr,int offset)96 static inline void pci_read_dword_ptr(struct device *dev, void *ptr, int offset)
97 {
98 	u32 dword = pci_read_config32(dev, offset);
99 	memcpy(ptr, &dword, sizeof(dword));
100 	mei_dump(ptr, dword, offset, "PCI READ");
101 }
102 
read_host_csr(struct mei_csr * csr)103 static inline void read_host_csr(struct mei_csr *csr)
104 {
105 	mei_read_dword_ptr(csr, MEI_H_CSR);
106 }
107 
write_host_csr(struct mei_csr * csr)108 static inline void write_host_csr(struct mei_csr *csr)
109 {
110 	mei_write_dword_ptr(csr, MEI_H_CSR);
111 }
112 
read_me_csr(struct mei_csr * csr)113 static inline void read_me_csr(struct mei_csr *csr)
114 {
115 	mei_read_dword_ptr(csr, MEI_ME_CSR_HA);
116 }
117 
write_cb(u32 dword)118 static inline void write_cb(u32 dword)
119 {
120 	write32(mei_base_address + MEI_H_CB_WW, dword);
121 	mei_dump(NULL, dword, MEI_H_CB_WW, "WRITE");
122 }
123 
read_cb(void)124 static inline u32 read_cb(void)
125 {
126 	u32 dword = read32(mei_base_address + MEI_ME_CB_RW);
127 	mei_dump(NULL, dword, MEI_ME_CB_RW, "READ");
128 	return dword;
129 }
130 
131 /* Wait for ME ready bit to be asserted */
mei_wait_for_me_ready(void)132 static int mei_wait_for_me_ready(void)
133 {
134 	struct mei_csr me;
135 	unsigned int try = ME_RETRY;
136 
137 	while (try--) {
138 		read_me_csr(&me);
139 		if (me.ready)
140 			return 0;
141 		udelay(ME_DELAY);
142 	}
143 
144 	printk(BIOS_ERR, "ME: failed to become ready\n");
145 	return -1;
146 }
147 
mei_reset(void)148 static void mei_reset(void)
149 {
150 	struct mei_csr host;
151 
152 	if (mei_wait_for_me_ready() < 0)
153 		return;
154 
155 	/* Reset host and ME circular buffers for next message */
156 	read_host_csr(&host);
157 	host.reset = 1;
158 	host.interrupt_generate = 1;
159 	write_host_csr(&host);
160 
161 	if (mei_wait_for_me_ready() < 0)
162 		return;
163 
164 	/* Re-init and indicate host is ready */
165 	read_host_csr(&host);
166 	host.interrupt_generate = 1;
167 	host.ready = 1;
168 	host.reset = 0;
169 	write_host_csr(&host);
170 }
171 
mei_send_packet(struct mei_header * mei,void * req_data)172 static int mei_send_packet(struct mei_header *mei, void *req_data)
173 {
174 	struct mei_csr host;
175 	unsigned int ndata, n;
176 	u32 *data;
177 
178 	/* Number of dwords to write */
179 	ndata = mei->length >> 2;
180 
181 	/* Pad non-dword aligned request message length */
182 	if (mei->length & 3)
183 		ndata++;
184 	if (!ndata) {
185 		printk(BIOS_DEBUG, "ME: request has no data\n");
186 		return -1;
187 	}
188 	ndata++; /* Add MEI header */
189 
190 	/*
191 	 * Make sure there is still room left in the circular buffer.
192 	 * Reset the buffer pointers if the requested message will not fit.
193 	 */
194 	read_host_csr(&host);
195 	if ((host.buffer_depth - host.buffer_write_ptr) < ndata) {
196 		printk(BIOS_ERR, "ME: circular buffer full, resetting...\n");
197 		mei_reset();
198 		read_host_csr(&host);
199 	}
200 
201 	/* Ensure the requested length will fit in the circular buffer. */
202 	if ((host.buffer_depth - host.buffer_write_ptr) < ndata) {
203 		printk(BIOS_ERR, "ME: message (%u) too large for buffer (%u)\n",
204 		       ndata + 2, host.buffer_depth);
205 		return -1;
206 	}
207 
208 	/* Write MEI header */
209 	mei_write_dword_ptr(mei, MEI_H_CB_WW);
210 	ndata--;
211 
212 	/* Write message data */
213 	data = req_data;
214 	for (n = 0; n < ndata; ++n)
215 		write_cb(*data++);
216 
217 	/* Generate interrupt to the ME */
218 	read_host_csr(&host);
219 	host.interrupt_generate = 1;
220 	write_host_csr(&host);
221 
222 	/* Make sure ME is ready after sending request data */
223 	return mei_wait_for_me_ready();
224 }
225 
mei_send_data(u8 me_address,u8 host_address,void * req_data,int req_bytes)226 static int mei_send_data(u8 me_address, u8 host_address,
227 			 void *req_data, int req_bytes)
228 {
229 	struct mei_header header = {
230 		.client_address = me_address,
231 		.host_address = host_address,
232 	};
233 	struct mei_csr host;
234 	int current = 0;
235 	u8 *req_ptr = req_data;
236 
237 	while (!header.is_complete) {
238 		int remain = req_bytes - current;
239 		int buf_len;
240 
241 		read_host_csr(&host);
242 		buf_len = host.buffer_depth - host.buffer_write_ptr;
243 
244 		if (buf_len > remain) {
245 			/* Send all remaining data as final message */
246 			header.length = req_bytes - current;
247 			header.is_complete = 1;
248 		} else {
249 			/* Send as much data as the buffer can hold */
250 			header.length = buf_len;
251 		}
252 
253 		mei_send_packet(&header, req_ptr);
254 
255 		req_ptr += header.length;
256 		current += header.length;
257 	}
258 
259 	return 0;
260 }
261 
mei_send_header(u8 me_address,u8 host_address,void * header,int header_len,int complete)262 static int mei_send_header(u8 me_address, u8 host_address,
263 			   void *header, int header_len, int complete)
264 {
265 	struct mei_header mei = {
266 		.client_address = me_address,
267 		.host_address   = host_address,
268 		.length         = header_len,
269 		.is_complete    = complete,
270 	};
271 	return mei_send_packet(&mei, header);
272 }
273 
mei_recv_msg(void * header,int header_bytes,void * rsp_data,int rsp_bytes)274 static int mei_recv_msg(void *header, int header_bytes,
275 			void *rsp_data, int rsp_bytes)
276 {
277 	struct mei_header mei_rsp;
278 	struct mei_csr me, host;
279 	unsigned int ndata, n;
280 	unsigned int expected;
281 	u32 *data;
282 
283 	/* Total number of dwords to read from circular buffer */
284 	expected = (rsp_bytes + sizeof(mei_rsp) + header_bytes) >> 2;
285 	if (rsp_bytes & 3)
286 		expected++;
287 
288 	if (mei_wait_for_me_ready() < 0)
289 		return -1;
290 
291 	/*
292 	 * The interrupt status bit does not appear to indicate that the
293 	 * message has actually been received.  Instead we wait until the
294 	 * expected number of dwords are present in the circular buffer.
295 	 */
296 	for (n = ME_RETRY; n; --n) {
297 		read_me_csr(&me);
298 		if ((me.buffer_write_ptr - me.buffer_read_ptr) >= expected)
299 			break;
300 		udelay(ME_DELAY);
301 	}
302 	if (!n) {
303 		printk(BIOS_ERR, "ME: timeout waiting for data: expected "
304 		       "%u, available %u\n", expected,
305 		       me.buffer_write_ptr - me.buffer_read_ptr);
306 		return -1;
307 	}
308 
309 	/* Read and verify MEI response header from the ME */
310 	mei_read_dword_ptr(&mei_rsp, MEI_ME_CB_RW);
311 	if (!mei_rsp.is_complete) {
312 		printk(BIOS_ERR, "ME: response is not complete\n");
313 		return -1;
314 	}
315 
316 	/* Handle non-dword responses and expect at least the header */
317 	ndata = mei_rsp.length >> 2;
318 	if (mei_rsp.length & 3)
319 		ndata++;
320 	if (ndata != (expected - 1)) {
321 		printk(BIOS_ERR, "ME: response is missing data %d != %d\n",
322 		       ndata, (expected - 1));
323 		return -1;
324 	}
325 
326 	/* Read response header from the ME */
327 	data = header;
328 	for (n = 0; n < (header_bytes >> 2); ++n)
329 		*data++ = read_cb();
330 	ndata -= header_bytes >> 2;
331 
332 	/* Make sure caller passed a buffer with enough space */
333 	if (ndata != (rsp_bytes >> 2)) {
334 		printk(BIOS_ERR, "ME: not enough room in response buffer: "
335 		       "%u != %u\n", ndata, rsp_bytes >> 2);
336 		return -1;
337 	}
338 
339 	/* Read response data from the circular buffer */
340 	data = rsp_data;
341 	for (n = 0; n < ndata; ++n)
342 		*data++ = read_cb();
343 
344 	/* Tell the ME that we have consumed the response */
345 	read_host_csr(&host);
346 	host.interrupt_status = 1;
347 	host.interrupt_generate = 1;
348 	write_host_csr(&host);
349 
350 	return mei_wait_for_me_ready();
351 }
352 
mei_sendrecv_mkhi(struct mkhi_header * mkhi,void * req_data,int req_bytes,void * rsp_data,int rsp_bytes)353 static inline int mei_sendrecv_mkhi(struct mkhi_header *mkhi,
354 				    void *req_data, int req_bytes,
355 				    void *rsp_data, int rsp_bytes)
356 {
357 	struct mkhi_header mkhi_rsp;
358 
359 	/* Send header */
360 	if (mei_send_header(MEI_ADDRESS_MKHI, MEI_HOST_ADDRESS,
361 			    mkhi, sizeof(*mkhi), req_bytes ? 0 : 1) < 0)
362 		return -1;
363 
364 	/* Send data if available */
365 	if (req_bytes && mei_send_data(MEI_ADDRESS_MKHI, MEI_HOST_ADDRESS,
366 				     req_data, req_bytes) < 0)
367 		return -1;
368 
369 	/* Return now if no response expected */
370 	if (!rsp_bytes)
371 		return 0;
372 
373 	/* Read header and data */
374 	if (mei_recv_msg(&mkhi_rsp, sizeof(mkhi_rsp),
375 			 rsp_data, rsp_bytes) < 0)
376 		return -1;
377 
378 	if (!mkhi_rsp.is_response ||
379 	    mkhi->group_id != mkhi_rsp.group_id ||
380 	    mkhi->command != mkhi_rsp.command) {
381 		printk(BIOS_ERR, "ME: invalid response, group %u ?= %u,"
382 		       "command %u ?= %u, is_response %u\n", mkhi->group_id,
383 		       mkhi_rsp.group_id, mkhi->command, mkhi_rsp.command,
384 		       mkhi_rsp.is_response);
385 		return -1;
386 	}
387 
388 	return 0;
389 }
390 
mei_sendrecv_icc(struct icc_header * icc,void * req_data,int req_bytes,void * rsp_data,int rsp_bytes)391 static inline int mei_sendrecv_icc(struct icc_header *icc,
392 				   void *req_data, int req_bytes,
393 				   void *rsp_data, int rsp_bytes)
394 {
395 	struct icc_header icc_rsp;
396 
397 	/* Send header */
398 	if (mei_send_header(MEI_ADDRESS_ICC, MEI_HOST_ADDRESS,
399 			    icc, sizeof(*icc), req_bytes ? 0 : 1) < 0)
400 		return -1;
401 
402 	/* Send data if available */
403 	if (req_bytes && mei_send_data(MEI_ADDRESS_ICC, MEI_HOST_ADDRESS,
404 				       req_data, req_bytes) < 0)
405 		return -1;
406 
407 	/* Read header and data, if needed */
408 	if (rsp_bytes && mei_recv_msg(&icc_rsp, sizeof(icc_rsp),
409 				      rsp_data, rsp_bytes) < 0)
410 		return -1;
411 
412 	return 0;
413 }
414 
415 /*
416  * mbp give up routine. This path is taken if hfs.mpb_rdy is 0 or the read
417  * state machine on the BIOS end doesn't match the ME's state machine.
418  */
intel_me_mbp_give_up(struct device * dev)419 static void intel_me_mbp_give_up(struct device *dev)
420 {
421 	struct mei_csr csr;
422 
423 	pci_write_config32(dev, PCI_ME_H_GS2, PCI_ME_MBP_GIVE_UP);
424 
425 	read_host_csr(&csr);
426 	csr.reset = 1;
427 	csr.interrupt_generate = 1;
428 	write_host_csr(&csr);
429 }
430 
431 /*
432  * mbp clear routine. This will wait for the ME to indicate that
433  * the MBP has been read and cleared.
434  */
intel_me_mbp_clear(struct device * dev)435 static void intel_me_mbp_clear(struct device *dev)
436 {
437 	int count;
438 	struct me_hfs2 hfs2;
439 
440 	/* Wait for the mbp_cleared indicator */
441 	for (count = ME_RETRY; count > 0; --count) {
442 		pci_read_dword_ptr(dev, &hfs2, PCI_ME_HFS2);
443 		if (hfs2.mbp_cleared)
444 			break;
445 		udelay(ME_DELAY);
446 	}
447 
448 	if (count == 0) {
449 		printk(BIOS_WARNING, "ME: Timeout waiting for mbp_cleared\n");
450 		intel_me_mbp_give_up(dev);
451 	} else {
452 		printk(BIOS_INFO, "ME: MBP cleared\n");
453 	}
454 }
455 
me_print_fw_version(mbp_fw_version_name * vers_name)456 static void me_print_fw_version(mbp_fw_version_name *vers_name)
457 {
458 	if (!vers_name) {
459 		printk(BIOS_ERR, "ME: mbp missing version report\n");
460 		return;
461 	}
462 
463 	printk(BIOS_DEBUG, "ME: found version %d.%d.%d.%d\n",
464 	       vers_name->major_version, vers_name->minor_version,
465 	       vers_name->hotfix_version, vers_name->build_version);
466 }
467 
print_cap(const char * name,int state)468 static inline void print_cap(const char *name, int state)
469 {
470 	printk(BIOS_DEBUG, "ME Capability: %-41s : %sabled\n",
471 	       name, state ? " en" : "dis");
472 }
473 
474 /* Get ME Firmware Capabilities */
mkhi_get_fwcaps(mbp_mefwcaps * cap)475 static int mkhi_get_fwcaps(mbp_mefwcaps *cap)
476 {
477 	u32 rule_id = 0;
478 	struct me_fwcaps cap_msg;
479 	struct mkhi_header mkhi = {
480 		.group_id       = MKHI_GROUP_ID_FWCAPS,
481 		.command        = MKHI_FWCAPS_GET_RULE,
482 	};
483 
484 	/* Send request and wait for response */
485 	if (mei_sendrecv_mkhi(&mkhi, &rule_id, sizeof(u32),
486 			      &cap_msg, sizeof(cap_msg)) < 0) {
487 		printk(BIOS_ERR, "ME: GET FWCAPS message failed\n");
488 		return -1;
489 	}
490 	*cap = cap_msg.caps_sku;
491 	return 0;
492 }
493 
494 /* Get ME Firmware Capabilities */
me_print_fwcaps(mbp_mefwcaps * cap)495 static void me_print_fwcaps(mbp_mefwcaps *cap)
496 {
497 	mbp_mefwcaps local_caps;
498 	if (!cap) {
499 		cap = &local_caps;
500 		printk(BIOS_ERR, "ME: mbp missing fwcaps report\n");
501 		if (mkhi_get_fwcaps(cap))
502 			return;
503 	}
504 
505 	print_cap("Full Network manageability", cap->full_net);
506 	print_cap("Regular Network manageability", cap->std_net);
507 	print_cap("Manageability", cap->manageability);
508 	print_cap("IntelR Anti-Theft (AT)", cap->intel_at);
509 	print_cap("IntelR Capability Licensing Service (CLS)", cap->intel_cls);
510 	print_cap("IntelR Power Sharing Technology (MPC)", cap->intel_mpc);
511 	print_cap("ICC Over Clocking", cap->icc_over_clocking);
512 	print_cap("Protected Audio Video Path (PAVP)", cap->pavp);
513 	print_cap("IPV6", cap->ipv6);
514 	print_cap("KVM Remote Control (KVM)", cap->kvm);
515 	print_cap("Outbreak Containment Heuristic (OCH)", cap->och);
516 	print_cap("Virtual LAN (VLAN)", cap->vlan);
517 	print_cap("TLS", cap->tls);
518 	print_cap("Wireless LAN (WLAN)", cap->wlan);
519 }
520 
521 /* Send END OF POST message to the ME */
mkhi_end_of_post(void)522 static int mkhi_end_of_post(void)
523 {
524 	struct mkhi_header mkhi = {
525 		.group_id	= MKHI_GROUP_ID_GEN,
526 		.command	= MKHI_END_OF_POST,
527 	};
528 	u32 eop_ack;
529 
530 	/* Send request and wait for response */
531 	if (mei_sendrecv_mkhi(&mkhi, NULL, 0, &eop_ack, sizeof(eop_ack)) < 0) {
532 		printk(BIOS_ERR, "ME: END OF POST message failed\n");
533 		return -1;
534 	}
535 
536 	printk(BIOS_INFO, "ME: END OF POST message successful (%d)\n", eop_ack);
537 	return 0;
538 }
539 
540 /* Send END OF POST message to the ME */
mkhi_end_of_post_noack(void)541 static int mkhi_end_of_post_noack(void)
542 {
543 	struct mkhi_header mkhi = {
544 		.group_id	= MKHI_GROUP_ID_GEN,
545 		.command	= MKHI_END_OF_POST_NOACK,
546 	};
547 
548 	/* Send request, do not wait for response */
549 	if (mei_sendrecv_mkhi(&mkhi, NULL, 0, NULL, 0) < 0) {
550 		printk(BIOS_ERR, "ME: END OF POST NOACK message failed\n");
551 		return -1;
552 	}
553 
554 	printk(BIOS_INFO, "ME: END OF POST NOACK message successful\n");
555 	return 0;
556 }
557 
558 /* Send HMRFPO LOCK message to the ME */
mkhi_hmrfpo_lock(void)559 static int mkhi_hmrfpo_lock(void)
560 {
561 	struct mkhi_header mkhi = {
562 		.group_id	= MKHI_GROUP_ID_HMRFPO,
563 		.command	= MKHI_HMRFPO_LOCK,
564 	};
565 	u32 ack;
566 
567 	/* Send request and wait for response */
568 	if (mei_sendrecv_mkhi(&mkhi, NULL, 0, &ack, sizeof(ack)) < 0) {
569 		printk(BIOS_ERR, "ME: HMRFPO LOCK message failed\n");
570 		return -1;
571 	}
572 
573 	printk(BIOS_INFO, "ME: HMRFPO LOCK message successful (%d)\n", ack);
574 	return 0;
575 }
576 
577 /* Send HMRFPO LOCK message to the ME, do not wait for response */
mkhi_hmrfpo_lock_noack(void)578 static int mkhi_hmrfpo_lock_noack(void)
579 {
580 	struct mkhi_header mkhi = {
581 		.group_id	= MKHI_GROUP_ID_HMRFPO,
582 		.command	= MKHI_HMRFPO_LOCK_NOACK,
583 	};
584 
585 	/* Send request, do not wait for response */
586 	if (mei_sendrecv_mkhi(&mkhi, NULL, 0, NULL, 0) < 0) {
587 		printk(BIOS_ERR, "ME: HMRFPO LOCK NOACK message failed\n");
588 		return -1;
589 	}
590 
591 	printk(BIOS_INFO, "ME: HMRFPO LOCK NOACK message successful\n");
592 	return 0;
593 }
594 
intel_me_finalize(struct device * dev)595 static void intel_me_finalize(struct device *dev)
596 {
597 	u16 reg16;
598 
599 	/* S3 path will have hidden this device already */
600 	if (!mei_base_address || mei_base_address == (u8 *)0xfffffff0)
601 		return;
602 
603 	if (!CONFIG(DISABLE_ME_PCI))
604 		return;
605 
606 	/* Make sure IO is disabled */
607 	reg16 = pci_read_config16(dev, PCI_COMMAND);
608 	reg16 &= ~(PCI_COMMAND_MASTER |
609 		   PCI_COMMAND_MEMORY | PCI_COMMAND_IO);
610 	pci_write_config16(dev, PCI_COMMAND, reg16);
611 
612 	/* Hide the PCI device */
613 	RCBA32_OR(FD2, PCH_DISABLE_MEI1);
614 	RCBA32(FD2);
615 }
616 
me_icc_set_clock_enables(u32 mask)617 static int me_icc_set_clock_enables(u32 mask)
618 {
619 	struct icc_clock_enables_msg clk = {
620 		.clock_enables	= 0, /* Turn off specified clocks */
621 		.clock_mask	= mask,
622 		.no_response	= 1, /* Do not expect response */
623 	};
624 	struct icc_header icc = {
625 		.api_version	= ICC_API_VERSION_LYNXPOINT,
626 		.icc_command	= ICC_SET_CLOCK_ENABLES,
627 		.length		= sizeof(clk),
628 	};
629 
630 	/* Send request and wait for response */
631 	if (mei_sendrecv_icc(&icc, &clk, sizeof(clk), NULL, 0) < 0) {
632 		printk(BIOS_ERR, "ME: ICC SET CLOCK ENABLES message failed\n");
633 		return -1;
634 	}
635 	printk(BIOS_INFO, "ME: ICC SET CLOCK ENABLES 0x%08x\n", mask);
636 	return 0;
637 }
638 
639 /* Determine the path that we should take based on ME status */
intel_me_path(struct device * dev)640 static me_bios_path intel_me_path(struct device *dev)
641 {
642 	me_bios_path path = ME_DISABLE_BIOS_PATH;
643 	struct me_hfs hfs;
644 	struct me_hfs2 hfs2;
645 
646 	/* Check and dump status */
647 	intel_me_status();
648 
649 	pci_read_dword_ptr(dev, &hfs, PCI_ME_HFS);
650 	pci_read_dword_ptr(dev, &hfs2, PCI_ME_HFS2);
651 
652 	/* Check Current Working State */
653 	switch (hfs.working_state) {
654 	case ME_HFS_CWS_NORMAL:
655 		path = ME_NORMAL_BIOS_PATH;
656 		break;
657 	case ME_HFS_CWS_REC:
658 		path = ME_RECOVERY_BIOS_PATH;
659 		break;
660 	default:
661 		path = ME_DISABLE_BIOS_PATH;
662 		break;
663 	}
664 
665 	/* Check Current Operation Mode */
666 	switch (hfs.operation_mode) {
667 	case ME_HFS_MODE_NORMAL:
668 		break;
669 	case ME_HFS_MODE_DEBUG:
670 	case ME_HFS_MODE_DIS:
671 	case ME_HFS_MODE_OVER_JMPR:
672 	case ME_HFS_MODE_OVER_MEI:
673 	default:
674 		path = ME_DISABLE_BIOS_PATH;
675 		break;
676 	}
677 
678 	/* Check for any error code and valid firmware and MBP */
679 	if (hfs.error_code || hfs.fpt_bad)
680 		path = ME_ERROR_BIOS_PATH;
681 
682 	/* Check if the MBP is ready */
683 	if (!hfs2.mbp_rdy) {
684 		printk(BIOS_CRIT, "%s: mbp is not ready!\n",
685 		       __func__);
686 		path = ME_ERROR_BIOS_PATH;
687 	}
688 
689 	if (CONFIG(ELOG) && path != ME_NORMAL_BIOS_PATH) {
690 		struct elog_event_data_me_extended data = {
691 			.current_working_state = hfs.working_state,
692 			.operation_state       = hfs.operation_state,
693 			.operation_mode        = hfs.operation_mode,
694 			.error_code            = hfs.error_code,
695 			.progress_code         = hfs2.progress_code,
696 			.current_pmevent       = hfs2.current_pmevent,
697 			.current_state         = hfs2.current_state,
698 		};
699 		elog_add_event_byte(ELOG_TYPE_MANAGEMENT_ENGINE, path);
700 		elog_add_event_raw(ELOG_TYPE_MANAGEMENT_ENGINE_EXT,
701 				   &data, sizeof(data));
702 	}
703 
704 	return path;
705 }
706 
707 /* Prepare ME for MEI messages */
intel_mei_setup(struct device * dev)708 static int intel_mei_setup(struct device *dev)
709 {
710 	struct resource *res;
711 	struct mei_csr host;
712 
713 	/* Find the MMIO base for the ME interface */
714 	res = probe_resource(dev, PCI_BASE_ADDRESS_0);
715 	if (!res || res->base == 0 || res->size == 0) {
716 		printk(BIOS_DEBUG, "ME: MEI resource not present!\n");
717 		return -1;
718 	}
719 	mei_base_address = res2mmio(res, 0, 0);
720 
721 	/* Ensure Memory and Bus Master bits are set */
722 	pci_or_config16(dev, PCI_COMMAND, PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY);
723 
724 	/* Clean up status for next message */
725 	read_host_csr(&host);
726 	host.interrupt_generate = 1;
727 	host.ready = 1;
728 	host.reset = 0;
729 	write_host_csr(&host);
730 
731 	return 0;
732 }
733 
734 /* Read the Extend register hash of ME firmware */
intel_me_extend_valid(struct device * dev)735 static int intel_me_extend_valid(struct device *dev)
736 {
737 	struct me_heres status;
738 	u32 extend[8] = {0};
739 	int i, count = 0;
740 
741 	pci_read_dword_ptr(dev, &status, PCI_ME_HERES);
742 	if (!status.extend_feature_present) {
743 		printk(BIOS_ERR, "ME: Extend Feature not present\n");
744 		return -1;
745 	}
746 
747 	if (!status.extend_reg_valid) {
748 		printk(BIOS_ERR, "ME: Extend Register not valid\n");
749 		return -1;
750 	}
751 
752 	switch (status.extend_reg_algorithm) {
753 	case PCI_ME_EXT_SHA1:
754 		count = 5;
755 		printk(BIOS_DEBUG, "ME: Extend SHA-1: ");
756 		break;
757 	case PCI_ME_EXT_SHA256:
758 		count = 8;
759 		printk(BIOS_DEBUG, "ME: Extend SHA-256: ");
760 		break;
761 	default:
762 		printk(BIOS_ERR, "ME: Extend Algorithm %d unknown\n",
763 		       status.extend_reg_algorithm);
764 		return -1;
765 	}
766 
767 	for (i = 0; i < count; ++i) {
768 		extend[i] = pci_read_config32(dev, PCI_ME_HER(i));
769 		printk(BIOS_DEBUG, "%08x", extend[i]);
770 	}
771 	printk(BIOS_DEBUG, "\n");
772 
773 	/* Save hash in NVS for the OS to verify */
774 	if (CONFIG(CHROMEOS_NVS))
775 		chromeos_set_me_hash(extend, count);
776 
777 	return 0;
778 }
779 
intel_me_print_mbp(me_bios_payload * mbp_data)780 static void intel_me_print_mbp(me_bios_payload *mbp_data)
781 {
782 	me_print_fw_version(mbp_data->fw_version_name);
783 
784 	if (CONFIG(DEBUG_INTEL_ME))
785 		me_print_fwcaps(mbp_data->fw_capabilities);
786 
787 	if (mbp_data->plat_time) {
788 		printk(BIOS_DEBUG, "ME: Wake Event to ME Reset:      %u ms\n",
789 		       mbp_data->plat_time->wake_event_mrst_time_ms);
790 		printk(BIOS_DEBUG, "ME: ME Reset to Platform Reset:  %u ms\n",
791 		       mbp_data->plat_time->mrst_pltrst_time_ms);
792 		printk(BIOS_DEBUG, "ME: Platform Reset to CPU Reset: %u ms\n",
793 		       mbp_data->plat_time->pltrst_cpurst_time_ms);
794 	}
795 }
796 
me_to_host_words_pending(void)797 static u32 me_to_host_words_pending(void)
798 {
799 	struct mei_csr me;
800 	read_me_csr(&me);
801 	if (!me.ready)
802 		return 0;
803 	return (me.buffer_write_ptr - me.buffer_read_ptr) &
804 		(me.buffer_depth - 1);
805 }
806 
807 struct mbp_payload {
808 	mbp_header header;
809 	u32 data[];
810 };
811 
812 /*
813  * Read and print ME MBP data
814  *
815  * Return -1 to indicate a problem (give up)
816  * Return 0 to indicate success (send LOCK+EOP)
817  * Return 1 to indicate success (send LOCK+EOP with NOACK)
818  */
intel_me_read_mbp(me_bios_payload * mbp_data,struct device * dev)819 static int intel_me_read_mbp(me_bios_payload *mbp_data, struct device *dev)
820 {
821 	mbp_header mbp_hdr;
822 	u32 me2host_pending;
823 	struct mei_csr host;
824 	struct me_hfs2 hfs2;
825 	struct mbp_payload *mbp;
826 	int i;
827 	int ret = 0;
828 
829 	pci_read_dword_ptr(dev, &hfs2, PCI_ME_HFS2);
830 
831 	if (!hfs2.mbp_rdy) {
832 		printk(BIOS_ERR, "ME: MBP not ready\n");
833 		intel_me_mbp_give_up(dev);
834 		return -1;
835 	}
836 
837 	me2host_pending = me_to_host_words_pending();
838 	if (!me2host_pending) {
839 		printk(BIOS_ERR, "ME: no mbp data!\n");
840 		intel_me_mbp_give_up(dev);
841 		return -1;
842 	}
843 
844 	/* we know for sure that at least the header is there */
845 	mei_read_dword_ptr(&mbp_hdr, MEI_ME_CB_RW);
846 
847 	if ((mbp_hdr.num_entries > (mbp_hdr.mbp_size / 2)) ||
848 	    (me2host_pending < mbp_hdr.mbp_size)) {
849 		printk(BIOS_ERR, "ME: mbp of %d entries, total size %d words"
850 		       " buffer contains %d words\n",
851 		       mbp_hdr.num_entries, mbp_hdr.mbp_size,
852 		       me2host_pending);
853 		intel_me_mbp_give_up(dev);
854 		return -1;
855 	}
856 	mbp = malloc(mbp_hdr.mbp_size * sizeof(u32));
857 	if (!mbp) {
858 		intel_me_mbp_give_up(dev);
859 		return -1;
860 	}
861 
862 	mbp->header = mbp_hdr;
863 	me2host_pending--;
864 
865 	i = 0;
866 	while (i != me2host_pending) {
867 		mei_read_dword_ptr(&mbp->data[i], MEI_ME_CB_RW);
868 		i++;
869 	}
870 
871 	read_host_csr(&host);
872 
873 	/* Check that read and write pointers are equal. */
874 	if (host.buffer_read_ptr != host.buffer_write_ptr) {
875 		printk(BIOS_INFO, "ME: MBP Read/Write pointer mismatch\n");
876 		printk(BIOS_INFO, "ME: MBP Waiting for MBP cleared flag\n");
877 
878 		/* Tell ME that the host has finished reading the MBP. */
879 	host.interrupt_generate = 1;
880 		host.reset = 0;
881 	write_host_csr(&host);
882 
883 	/* Wait for the mbp_cleared indicator. */
884 	intel_me_mbp_clear(dev);
885 	} else {
886 		/* Indicate NOACK messages should be used. */
887 		ret = 1;
888 	}
889 
890 	/* Dump out the MBP contents. */
891 	if (CONFIG(DEBUG_INTEL_ME)) {
892 		printk(BIOS_INFO, "ME MBP: Header: items: %d, size dw: %d\n",
893 		       mbp->header.num_entries, mbp->header.mbp_size);
894 		for (i = 0; i < mbp->header.mbp_size - 1; i++)
895 			printk(BIOS_INFO, "ME MBP: %04x: 0x%08x\n", i, mbp->data[i]);
896 	}
897 
898 #define ASSIGN_FIELD_PTR(field_, val_) \
899 	{ \
900 		mbp_data->field_ = (typeof(mbp_data->field_))(void *)val_; \
901 		break; \
902 	}
903 
904 	/* Setup the pointers in the me_bios_payload structure. */
905 	for (i = 0; i < mbp->header.mbp_size - 1;) {
906 		mbp_item_header *item = (void *)&mbp->data[i];
907 
908 		switch (MBP_MAKE_IDENT(item->app_id, item->item_id)) {
909 		case MBP_IDENT(KERNEL, FW_VER):
910 			ASSIGN_FIELD_PTR(fw_version_name, &mbp->data[i+1]);
911 
912 		case MBP_IDENT(ICC, PROFILE):
913 			ASSIGN_FIELD_PTR(icc_profile, &mbp->data[i+1]);
914 
915 		case MBP_IDENT(INTEL_AT, STATE):
916 			ASSIGN_FIELD_PTR(at_state, &mbp->data[i+1]);
917 
918 		case MBP_IDENT(KERNEL, FW_CAP):
919 			ASSIGN_FIELD_PTR(fw_capabilities, &mbp->data[i+1]);
920 
921 		case MBP_IDENT(KERNEL, ROM_BIST):
922 			ASSIGN_FIELD_PTR(rom_bist_data, &mbp->data[i+1]);
923 
924 		case MBP_IDENT(KERNEL, PLAT_KEY):
925 			ASSIGN_FIELD_PTR(platform_key, &mbp->data[i+1]);
926 
927 		case MBP_IDENT(KERNEL, FW_TYPE):
928 			ASSIGN_FIELD_PTR(fw_plat_type, &mbp->data[i+1]);
929 
930 		case MBP_IDENT(KERNEL, MFS_FAILURE):
931 			ASSIGN_FIELD_PTR(mfsintegrity, &mbp->data[i+1]);
932 
933 		case MBP_IDENT(KERNEL, PLAT_TIME):
934 			ASSIGN_FIELD_PTR(plat_time, &mbp->data[i+1]);
935 
936 		case MBP_IDENT(NFC, SUPPORT_DATA):
937 			ASSIGN_FIELD_PTR(nfc_data, &mbp->data[i+1]);
938 		}
939 		i += item->length;
940 	}
941 	#undef ASSIGN_FIELD_PTR
942 
943 	free(mbp);
944 	return ret;
945 }
946 
947 /* Check whether ME is present and do basic init */
intel_me_init(struct device * dev)948 static void intel_me_init(struct device *dev)
949 {
950 	const struct soc_intel_broadwell_pch_config *config = config_of(dev);
951 	me_bios_path path = intel_me_path(dev);
952 	me_bios_payload mbp_data;
953 	int mbp_ret;
954 	struct me_hfs hfs;
955 	struct mei_csr csr;
956 
957 	/* Do initial setup and determine the BIOS path */
958 	printk(BIOS_NOTICE, "ME: BIOS path: %s\n", me_bios_path_values[path]);
959 
960 	if (path == ME_NORMAL_BIOS_PATH) {
961 		/* Validate the extend register */
962 		intel_me_extend_valid(dev);
963 }
964 
965 	memset(&mbp_data, 0, sizeof(mbp_data));
966 
967 	/*
968 	 * According to the ME9 BWG, BIOS is required to fetch MBP data in
969 	 * all boot flows except S3 Resume.
970 	 */
971 
972 	/* Prepare MEI MMIO interface */
973 	if (intel_mei_setup(dev) < 0)
974 		return;
975 
976 	/* Read ME MBP data */
977 	mbp_ret = intel_me_read_mbp(&mbp_data, dev);
978 	if (mbp_ret < 0)
979 		return;
980 	intel_me_print_mbp(&mbp_data);
981 
982 	/* Set clock enables according to devicetree */
983 	if (config->icc_clock_disable)
984 		me_icc_set_clock_enables(config->icc_clock_disable);
985 
986 	/* Make sure ME is in a mode that expects EOP */
987 	pci_read_dword_ptr(dev, &hfs, PCI_ME_HFS);
988 
989 	/* Abort and leave device alone if not normal mode */
990 	if (hfs.fpt_bad ||
991 	    hfs.working_state != ME_HFS_CWS_NORMAL ||
992 	    hfs.operation_mode != ME_HFS_MODE_NORMAL)
993 		return;
994 
995 	if (mbp_ret) {
996 		/*
997 		 * MBP Cleared wait is skipped,
998 		 * Do not expect ACK and reset when complete.
999 		 */
1000 
1001 		/* Send HMRFPO Lock command, no response */
1002 		mkhi_hmrfpo_lock_noack();
1003 
1004 		/* Send END OF POST command, no response */
1005 		mkhi_end_of_post_noack();
1006 
1007 		/* Assert reset and interrupt */
1008 		read_host_csr(&csr);
1009 		csr.interrupt_generate = 1;
1010 		csr.reset = 1;
1011 		write_host_csr(&csr);
1012 	} else {
1013 		/*
1014 		 * MBP Cleared wait was not skipped
1015 		 */
1016 
1017 		/* Send HMRFPO LOCK command */
1018 		mkhi_hmrfpo_lock();
1019 
1020 		/* Send EOP command so ME stops accepting other commands */
1021 		mkhi_end_of_post();
1022 	}
1023 }
1024 
intel_me_enable(struct device * dev)1025 static void intel_me_enable(struct device *dev)
1026 {
1027 	/* Avoid talking to the device in S3 path */
1028 	if (acpi_is_wakeup_s3() && CONFIG(DISABLE_ME_PCI)) {
1029 		dev->enabled = 0;
1030 		pch_disable_devfn(dev);
1031 	}
1032 }
1033 
1034 static struct device_operations device_ops = {
1035 	.read_resources		= &pci_dev_read_resources,
1036 	.set_resources		= &pci_dev_set_resources,
1037 	.enable_resources	= &pci_dev_enable_resources,
1038 	.enable			= &intel_me_enable,
1039 	.init			= &intel_me_init,
1040 	.final			= &intel_me_finalize,
1041 	.ops_pci		= &pci_dev_ops_pci,
1042 };
1043 
1044 static const unsigned short pci_device_ids[] = {
1045 	0x9c3a, /* Low Power */
1046 	0x9cba, /* WildcatPoint */
1047 	0
1048 };
1049 
1050 static const struct pci_driver intel_me __pci_driver = {
1051 	.ops	 = &device_ops,
1052 	.vendor	 = PCI_VID_INTEL,
1053 	.devices = pci_device_ids,
1054 };
1055