1 /*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17 #include <linux/pci.h>
18
19 #include <linux/kthread.h>
20 #include <linux/interrupt.h>
21
22 #include "mei_dev.h"
23 #include "hbm.h"
24
25 #include "hw-me.h"
26 #include "hw-me-regs.h"
27
28 /**
29 * mei_me_reg_read - Reads 32bit data from the mei device
30 *
31 * @hw: the me hardware structure
32 * @offset: offset from which to read the data
33 *
34 * Return: register value (u32)
35 */
mei_me_reg_read(const struct mei_me_hw * hw,unsigned long offset)36 static inline u32 mei_me_reg_read(const struct mei_me_hw *hw,
37 unsigned long offset)
38 {
39 return ioread32(hw->mem_addr + offset);
40 }
41
42
43 /**
44 * mei_me_reg_write - Writes 32bit data to the mei device
45 *
46 * @hw: the me hardware structure
47 * @offset: offset from which to write the data
48 * @value: register value to write (u32)
49 */
mei_me_reg_write(const struct mei_me_hw * hw,unsigned long offset,u32 value)50 static inline void mei_me_reg_write(const struct mei_me_hw *hw,
51 unsigned long offset, u32 value)
52 {
53 iowrite32(value, hw->mem_addr + offset);
54 }
55
56 /**
57 * mei_me_mecbrw_read - Reads 32bit data from ME circular buffer
58 * read window register
59 *
60 * @dev: the device structure
61 *
62 * Return: ME_CB_RW register value (u32)
63 */
mei_me_mecbrw_read(const struct mei_device * dev)64 static u32 mei_me_mecbrw_read(const struct mei_device *dev)
65 {
66 return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
67 }
68 /**
69 * mei_me_mecsr_read - Reads 32bit data from the ME CSR
70 *
71 * @hw: the me hardware structure
72 *
73 * Return: ME_CSR_HA register value (u32)
74 */
mei_me_mecsr_read(const struct mei_me_hw * hw)75 static inline u32 mei_me_mecsr_read(const struct mei_me_hw *hw)
76 {
77 return mei_me_reg_read(hw, ME_CSR_HA);
78 }
79
80 /**
81 * mei_hcsr_read - Reads 32bit data from the host CSR
82 *
83 * @hw: the me hardware structure
84 *
85 * Return: H_CSR register value (u32)
86 */
mei_hcsr_read(const struct mei_me_hw * hw)87 static inline u32 mei_hcsr_read(const struct mei_me_hw *hw)
88 {
89 return mei_me_reg_read(hw, H_CSR);
90 }
91
92 /**
93 * mei_hcsr_set - writes H_CSR register to the mei device,
94 * and ignores the H_IS bit for it is write-one-to-zero.
95 *
96 * @hw: the me hardware structure
97 * @hcsr: new register value
98 */
mei_hcsr_set(struct mei_me_hw * hw,u32 hcsr)99 static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr)
100 {
101 hcsr &= ~H_IS;
102 mei_me_reg_write(hw, H_CSR, hcsr);
103 }
104
105 /**
106 * mei_me_fw_status - read fw status register from pci config space
107 *
108 * @dev: mei device
109 * @fw_status: fw status register values
110 *
111 * Return: 0 on success, error otherwise
112 */
mei_me_fw_status(struct mei_device * dev,struct mei_fw_status * fw_status)113 static int mei_me_fw_status(struct mei_device *dev,
114 struct mei_fw_status *fw_status)
115 {
116 struct pci_dev *pdev = to_pci_dev(dev->dev);
117 struct mei_me_hw *hw = to_me_hw(dev);
118 const struct mei_fw_status *fw_src = &hw->cfg->fw_status;
119 int ret;
120 int i;
121
122 if (!fw_status)
123 return -EINVAL;
124
125 fw_status->count = fw_src->count;
126 for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
127 ret = pci_read_config_dword(pdev,
128 fw_src->status[i], &fw_status->status[i]);
129 if (ret)
130 return ret;
131 }
132
133 return 0;
134 }
135
136 /**
137 * mei_me_hw_config - configure hw dependent settings
138 *
139 * @dev: mei device
140 */
mei_me_hw_config(struct mei_device * dev)141 static void mei_me_hw_config(struct mei_device *dev)
142 {
143 struct mei_me_hw *hw = to_me_hw(dev);
144 u32 hcsr = mei_hcsr_read(to_me_hw(dev));
145 /* Doesn't change in runtime */
146 dev->hbuf_depth = (hcsr & H_CBD) >> 24;
147
148 hw->pg_state = MEI_PG_OFF;
149 }
150
151 /**
152 * mei_me_pg_state - translate internal pg state
153 * to the mei power gating state
154 *
155 * @dev: mei device
156 *
157 * Return: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
158 */
mei_me_pg_state(struct mei_device * dev)159 static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
160 {
161 struct mei_me_hw *hw = to_me_hw(dev);
162
163 return hw->pg_state;
164 }
165
166 /**
167 * mei_me_intr_clear - clear and stop interrupts
168 *
169 * @dev: the device structure
170 */
mei_me_intr_clear(struct mei_device * dev)171 static void mei_me_intr_clear(struct mei_device *dev)
172 {
173 struct mei_me_hw *hw = to_me_hw(dev);
174 u32 hcsr = mei_hcsr_read(hw);
175
176 if ((hcsr & H_IS) == H_IS)
177 mei_me_reg_write(hw, H_CSR, hcsr);
178 }
179 /**
180 * mei_me_intr_enable - enables mei device interrupts
181 *
182 * @dev: the device structure
183 */
mei_me_intr_enable(struct mei_device * dev)184 static void mei_me_intr_enable(struct mei_device *dev)
185 {
186 struct mei_me_hw *hw = to_me_hw(dev);
187 u32 hcsr = mei_hcsr_read(hw);
188
189 hcsr |= H_IE;
190 mei_hcsr_set(hw, hcsr);
191 }
192
193 /**
194 * mei_me_intr_disable - disables mei device interrupts
195 *
196 * @dev: the device structure
197 */
mei_me_intr_disable(struct mei_device * dev)198 static void mei_me_intr_disable(struct mei_device *dev)
199 {
200 struct mei_me_hw *hw = to_me_hw(dev);
201 u32 hcsr = mei_hcsr_read(hw);
202
203 hcsr &= ~H_IE;
204 mei_hcsr_set(hw, hcsr);
205 }
206
207 /**
208 * mei_me_hw_reset_release - release device from the reset
209 *
210 * @dev: the device structure
211 */
mei_me_hw_reset_release(struct mei_device * dev)212 static void mei_me_hw_reset_release(struct mei_device *dev)
213 {
214 struct mei_me_hw *hw = to_me_hw(dev);
215 u32 hcsr = mei_hcsr_read(hw);
216
217 hcsr |= H_IG;
218 hcsr &= ~H_RST;
219 mei_hcsr_set(hw, hcsr);
220
221 /* complete this write before we set host ready on another CPU */
222 mmiowb();
223 }
224 /**
225 * mei_me_hw_reset - resets fw via mei csr register.
226 *
227 * @dev: the device structure
228 * @intr_enable: if interrupt should be enabled after reset.
229 *
230 * Return: always 0
231 */
mei_me_hw_reset(struct mei_device * dev,bool intr_enable)232 static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
233 {
234 struct mei_me_hw *hw = to_me_hw(dev);
235 u32 hcsr = mei_hcsr_read(hw);
236
237 /* H_RST may be found lit before reset is started,
238 * for example if preceding reset flow hasn't completed.
239 * In that case asserting H_RST will be ignored, therefore
240 * we need to clean H_RST bit to start a successful reset sequence.
241 */
242 if ((hcsr & H_RST) == H_RST) {
243 dev_warn(dev->dev, "H_RST is set = 0x%08X", hcsr);
244 hcsr &= ~H_RST;
245 mei_hcsr_set(hw, hcsr);
246 hcsr = mei_hcsr_read(hw);
247 }
248
249 hcsr |= H_RST | H_IG | H_IS;
250
251 if (intr_enable)
252 hcsr |= H_IE;
253 else
254 hcsr &= ~H_IE;
255
256 dev->recvd_hw_ready = false;
257 mei_me_reg_write(hw, H_CSR, hcsr);
258
259 /*
260 * Host reads the H_CSR once to ensure that the
261 * posted write to H_CSR completes.
262 */
263 hcsr = mei_hcsr_read(hw);
264
265 if ((hcsr & H_RST) == 0)
266 dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
267
268 if ((hcsr & H_RDY) == H_RDY)
269 dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
270
271 if (intr_enable == false)
272 mei_me_hw_reset_release(dev);
273
274 return 0;
275 }
276
277 /**
278 * mei_me_host_set_ready - enable device
279 *
280 * @dev: mei device
281 */
mei_me_host_set_ready(struct mei_device * dev)282 static void mei_me_host_set_ready(struct mei_device *dev)
283 {
284 struct mei_me_hw *hw = to_me_hw(dev);
285
286 hw->host_hw_state = mei_hcsr_read(hw);
287 hw->host_hw_state |= H_IE | H_IG | H_RDY;
288 mei_hcsr_set(hw, hw->host_hw_state);
289 }
290
291 /**
292 * mei_me_host_is_ready - check whether the host has turned ready
293 *
294 * @dev: mei device
295 * Return: bool
296 */
mei_me_host_is_ready(struct mei_device * dev)297 static bool mei_me_host_is_ready(struct mei_device *dev)
298 {
299 struct mei_me_hw *hw = to_me_hw(dev);
300
301 hw->host_hw_state = mei_hcsr_read(hw);
302 return (hw->host_hw_state & H_RDY) == H_RDY;
303 }
304
305 /**
306 * mei_me_hw_is_ready - check whether the me(hw) has turned ready
307 *
308 * @dev: mei device
309 * Return: bool
310 */
mei_me_hw_is_ready(struct mei_device * dev)311 static bool mei_me_hw_is_ready(struct mei_device *dev)
312 {
313 struct mei_me_hw *hw = to_me_hw(dev);
314
315 hw->me_hw_state = mei_me_mecsr_read(hw);
316 return (hw->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA;
317 }
318
319 /**
320 * mei_me_hw_ready_wait - wait until the me(hw) has turned ready
321 * or timeout is reached
322 *
323 * @dev: mei device
324 * Return: 0 on success, error otherwise
325 */
mei_me_hw_ready_wait(struct mei_device * dev)326 static int mei_me_hw_ready_wait(struct mei_device *dev)
327 {
328 mutex_unlock(&dev->device_lock);
329 wait_event_timeout(dev->wait_hw_ready,
330 dev->recvd_hw_ready,
331 mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
332 mutex_lock(&dev->device_lock);
333 if (!dev->recvd_hw_ready) {
334 dev_err(dev->dev, "wait hw ready failed\n");
335 return -ETIME;
336 }
337
338 mei_me_hw_reset_release(dev);
339 dev->recvd_hw_ready = false;
340 return 0;
341 }
342
343 /**
344 * mei_me_hw_start - hw start routine
345 *
346 * @dev: mei device
347 * Return: 0 on success, error otherwise
348 */
mei_me_hw_start(struct mei_device * dev)349 static int mei_me_hw_start(struct mei_device *dev)
350 {
351 int ret = mei_me_hw_ready_wait(dev);
352
353 if (ret)
354 return ret;
355 dev_dbg(dev->dev, "hw is ready\n");
356
357 mei_me_host_set_ready(dev);
358 return ret;
359 }
360
361
362 /**
363 * mei_hbuf_filled_slots - gets number of device filled buffer slots
364 *
365 * @dev: the device structure
366 *
367 * Return: number of filled slots
368 */
mei_hbuf_filled_slots(struct mei_device * dev)369 static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
370 {
371 struct mei_me_hw *hw = to_me_hw(dev);
372 char read_ptr, write_ptr;
373
374 hw->host_hw_state = mei_hcsr_read(hw);
375
376 read_ptr = (char) ((hw->host_hw_state & H_CBRP) >> 8);
377 write_ptr = (char) ((hw->host_hw_state & H_CBWP) >> 16);
378
379 return (unsigned char) (write_ptr - read_ptr);
380 }
381
382 /**
383 * mei_me_hbuf_is_empty - checks if host buffer is empty.
384 *
385 * @dev: the device structure
386 *
387 * Return: true if empty, false - otherwise.
388 */
mei_me_hbuf_is_empty(struct mei_device * dev)389 static bool mei_me_hbuf_is_empty(struct mei_device *dev)
390 {
391 return mei_hbuf_filled_slots(dev) == 0;
392 }
393
394 /**
395 * mei_me_hbuf_empty_slots - counts write empty slots.
396 *
397 * @dev: the device structure
398 *
399 * Return: -EOVERFLOW if overflow, otherwise empty slots count
400 */
mei_me_hbuf_empty_slots(struct mei_device * dev)401 static int mei_me_hbuf_empty_slots(struct mei_device *dev)
402 {
403 unsigned char filled_slots, empty_slots;
404
405 filled_slots = mei_hbuf_filled_slots(dev);
406 empty_slots = dev->hbuf_depth - filled_slots;
407
408 /* check for overflow */
409 if (filled_slots > dev->hbuf_depth)
410 return -EOVERFLOW;
411
412 return empty_slots;
413 }
414
415 /**
416 * mei_me_hbuf_max_len - returns size of hw buffer.
417 *
418 * @dev: the device structure
419 *
420 * Return: size of hw buffer in bytes
421 */
mei_me_hbuf_max_len(const struct mei_device * dev)422 static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
423 {
424 return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
425 }
426
427
428 /**
429 * mei_me_write_message - writes a message to mei device.
430 *
431 * @dev: the device structure
432 * @header: mei HECI header of message
433 * @buf: message payload will be written
434 *
435 * Return: -EIO if write has failed
436 */
mei_me_write_message(struct mei_device * dev,struct mei_msg_hdr * header,unsigned char * buf)437 static int mei_me_write_message(struct mei_device *dev,
438 struct mei_msg_hdr *header,
439 unsigned char *buf)
440 {
441 struct mei_me_hw *hw = to_me_hw(dev);
442 unsigned long rem;
443 unsigned long length = header->length;
444 u32 *reg_buf = (u32 *)buf;
445 u32 hcsr;
446 u32 dw_cnt;
447 int i;
448 int empty_slots;
449
450 dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
451
452 empty_slots = mei_hbuf_empty_slots(dev);
453 dev_dbg(dev->dev, "empty slots = %hu.\n", empty_slots);
454
455 dw_cnt = mei_data2slots(length);
456 if (empty_slots < 0 || dw_cnt > empty_slots)
457 return -EMSGSIZE;
458
459 mei_me_reg_write(hw, H_CB_WW, *((u32 *) header));
460
461 for (i = 0; i < length / 4; i++)
462 mei_me_reg_write(hw, H_CB_WW, reg_buf[i]);
463
464 rem = length & 0x3;
465 if (rem > 0) {
466 u32 reg = 0;
467
468 memcpy(®, &buf[length - rem], rem);
469 mei_me_reg_write(hw, H_CB_WW, reg);
470 }
471
472 hcsr = mei_hcsr_read(hw) | H_IG;
473 mei_hcsr_set(hw, hcsr);
474 if (!mei_me_hw_is_ready(dev))
475 return -EIO;
476
477 return 0;
478 }
479
480 /**
481 * mei_me_count_full_read_slots - counts read full slots.
482 *
483 * @dev: the device structure
484 *
485 * Return: -EOVERFLOW if overflow, otherwise filled slots count
486 */
mei_me_count_full_read_slots(struct mei_device * dev)487 static int mei_me_count_full_read_slots(struct mei_device *dev)
488 {
489 struct mei_me_hw *hw = to_me_hw(dev);
490 char read_ptr, write_ptr;
491 unsigned char buffer_depth, filled_slots;
492
493 hw->me_hw_state = mei_me_mecsr_read(hw);
494 buffer_depth = (unsigned char)((hw->me_hw_state & ME_CBD_HRA) >> 24);
495 read_ptr = (char) ((hw->me_hw_state & ME_CBRP_HRA) >> 8);
496 write_ptr = (char) ((hw->me_hw_state & ME_CBWP_HRA) >> 16);
497 filled_slots = (unsigned char) (write_ptr - read_ptr);
498
499 /* check for overflow */
500 if (filled_slots > buffer_depth)
501 return -EOVERFLOW;
502
503 dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots);
504 return (int)filled_slots;
505 }
506
507 /**
508 * mei_me_read_slots - reads a message from mei device.
509 *
510 * @dev: the device structure
511 * @buffer: message buffer will be written
512 * @buffer_length: message size will be read
513 *
514 * Return: always 0
515 */
mei_me_read_slots(struct mei_device * dev,unsigned char * buffer,unsigned long buffer_length)516 static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
517 unsigned long buffer_length)
518 {
519 struct mei_me_hw *hw = to_me_hw(dev);
520 u32 *reg_buf = (u32 *)buffer;
521 u32 hcsr;
522
523 for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
524 *reg_buf++ = mei_me_mecbrw_read(dev);
525
526 if (buffer_length > 0) {
527 u32 reg = mei_me_mecbrw_read(dev);
528
529 memcpy(reg_buf, ®, buffer_length);
530 }
531
532 hcsr = mei_hcsr_read(hw) | H_IG;
533 mei_hcsr_set(hw, hcsr);
534 return 0;
535 }
536
537 /**
538 * mei_me_pg_enter - write pg enter register
539 *
540 * @dev: the device structure
541 */
mei_me_pg_enter(struct mei_device * dev)542 static void mei_me_pg_enter(struct mei_device *dev)
543 {
544 struct mei_me_hw *hw = to_me_hw(dev);
545 u32 reg = mei_me_reg_read(hw, H_HPG_CSR);
546
547 reg |= H_HPG_CSR_PGI;
548 mei_me_reg_write(hw, H_HPG_CSR, reg);
549 }
550
551 /**
552 * mei_me_pg_exit - write pg exit register
553 *
554 * @dev: the device structure
555 */
mei_me_pg_exit(struct mei_device * dev)556 static void mei_me_pg_exit(struct mei_device *dev)
557 {
558 struct mei_me_hw *hw = to_me_hw(dev);
559 u32 reg = mei_me_reg_read(hw, H_HPG_CSR);
560
561 WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
562
563 reg |= H_HPG_CSR_PGIHEXR;
564 mei_me_reg_write(hw, H_HPG_CSR, reg);
565 }
566
567 /**
568 * mei_me_pg_set_sync - perform pg entry procedure
569 *
570 * @dev: the device structure
571 *
572 * Return: 0 on success an error code otherwise
573 */
mei_me_pg_set_sync(struct mei_device * dev)574 int mei_me_pg_set_sync(struct mei_device *dev)
575 {
576 struct mei_me_hw *hw = to_me_hw(dev);
577 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
578 int ret;
579
580 dev->pg_event = MEI_PG_EVENT_WAIT;
581
582 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
583 if (ret)
584 return ret;
585
586 mutex_unlock(&dev->device_lock);
587 wait_event_timeout(dev->wait_pg,
588 dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
589 mutex_lock(&dev->device_lock);
590
591 if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
592 mei_me_pg_enter(dev);
593 ret = 0;
594 } else {
595 ret = -ETIME;
596 }
597
598 dev->pg_event = MEI_PG_EVENT_IDLE;
599 hw->pg_state = MEI_PG_ON;
600
601 return ret;
602 }
603
604 /**
605 * mei_me_pg_unset_sync - perform pg exit procedure
606 *
607 * @dev: the device structure
608 *
609 * Return: 0 on success an error code otherwise
610 */
mei_me_pg_unset_sync(struct mei_device * dev)611 int mei_me_pg_unset_sync(struct mei_device *dev)
612 {
613 struct mei_me_hw *hw = to_me_hw(dev);
614 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
615 int ret;
616
617 if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
618 goto reply;
619
620 dev->pg_event = MEI_PG_EVENT_WAIT;
621
622 mei_me_pg_exit(dev);
623
624 mutex_unlock(&dev->device_lock);
625 wait_event_timeout(dev->wait_pg,
626 dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
627 mutex_lock(&dev->device_lock);
628
629 reply:
630 if (dev->pg_event != MEI_PG_EVENT_RECEIVED) {
631 ret = -ETIME;
632 goto out;
633 }
634
635 dev->pg_event = MEI_PG_EVENT_INTR_WAIT;
636 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
637 if (ret)
638 return ret;
639
640 mutex_unlock(&dev->device_lock);
641 wait_event_timeout(dev->wait_pg,
642 dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED, timeout);
643 mutex_lock(&dev->device_lock);
644
645 if (dev->pg_event == MEI_PG_EVENT_INTR_RECEIVED)
646 ret = 0;
647 else
648 ret = -ETIME;
649
650 out:
651 dev->pg_event = MEI_PG_EVENT_IDLE;
652 hw->pg_state = MEI_PG_OFF;
653
654 return ret;
655 }
656
657 /**
658 * mei_me_pg_in_transition - is device now in pg transition
659 *
660 * @dev: the device structure
661 *
662 * Return: true if in pg transition, false otherwise
663 */
mei_me_pg_in_transition(struct mei_device * dev)664 static bool mei_me_pg_in_transition(struct mei_device *dev)
665 {
666 return dev->pg_event >= MEI_PG_EVENT_WAIT &&
667 dev->pg_event <= MEI_PG_EVENT_INTR_WAIT;
668 }
669
670 /**
671 * mei_me_pg_is_enabled - detect if PG is supported by HW
672 *
673 * @dev: the device structure
674 *
675 * Return: true is pg supported, false otherwise
676 */
mei_me_pg_is_enabled(struct mei_device * dev)677 static bool mei_me_pg_is_enabled(struct mei_device *dev)
678 {
679 struct mei_me_hw *hw = to_me_hw(dev);
680 u32 reg = mei_me_reg_read(hw, ME_CSR_HA);
681
682 if ((reg & ME_PGIC_HRA) == 0)
683 goto notsupported;
684
685 if (!dev->hbm_f_pg_supported)
686 goto notsupported;
687
688 return true;
689
690 notsupported:
691 dev_dbg(dev->dev, "pg: not supported: HGP = %d hbm version %d.%d ?= %d.%d\n",
692 !!(reg & ME_PGIC_HRA),
693 dev->version.major_version,
694 dev->version.minor_version,
695 HBM_MAJOR_VERSION_PGI,
696 HBM_MINOR_VERSION_PGI);
697
698 return false;
699 }
700
701 /**
702 * mei_me_pg_intr - perform pg processing in interrupt thread handler
703 *
704 * @dev: the device structure
705 */
mei_me_pg_intr(struct mei_device * dev)706 static void mei_me_pg_intr(struct mei_device *dev)
707 {
708 struct mei_me_hw *hw = to_me_hw(dev);
709
710 if (dev->pg_event != MEI_PG_EVENT_INTR_WAIT)
711 return;
712
713 dev->pg_event = MEI_PG_EVENT_INTR_RECEIVED;
714 hw->pg_state = MEI_PG_OFF;
715 if (waitqueue_active(&dev->wait_pg))
716 wake_up(&dev->wait_pg);
717 }
718
719 /**
720 * mei_me_irq_quick_handler - The ISR of the MEI device
721 *
722 * @irq: The irq number
723 * @dev_id: pointer to the device structure
724 *
725 * Return: irqreturn_t
726 */
727
mei_me_irq_quick_handler(int irq,void * dev_id)728 irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
729 {
730 struct mei_device *dev = (struct mei_device *) dev_id;
731 struct mei_me_hw *hw = to_me_hw(dev);
732 u32 csr_reg = mei_hcsr_read(hw);
733
734 if ((csr_reg & H_IS) != H_IS)
735 return IRQ_NONE;
736
737 /* clear H_IS bit in H_CSR */
738 mei_me_reg_write(hw, H_CSR, csr_reg);
739
740 return IRQ_WAKE_THREAD;
741 }
742
743 /**
744 * mei_me_irq_thread_handler - function called after ISR to handle the interrupt
745 * processing.
746 *
747 * @irq: The irq number
748 * @dev_id: pointer to the device structure
749 *
750 * Return: irqreturn_t
751 *
752 */
mei_me_irq_thread_handler(int irq,void * dev_id)753 irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
754 {
755 struct mei_device *dev = (struct mei_device *) dev_id;
756 struct mei_cl_cb complete_list;
757 s32 slots;
758 int rets = 0;
759
760 dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n");
761 /* initialize our complete list */
762 mutex_lock(&dev->device_lock);
763 mei_io_list_init(&complete_list);
764
765 /* Ack the interrupt here
766 * In case of MSI we don't go through the quick handler */
767 if (pci_dev_msi_enabled(to_pci_dev(dev->dev)))
768 mei_clear_interrupts(dev);
769
770 /* check if ME wants a reset */
771 if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
772 dev_warn(dev->dev, "FW not ready: resetting.\n");
773 schedule_work(&dev->reset_work);
774 goto end;
775 }
776
777 mei_me_pg_intr(dev);
778
779 /* check if we need to start the dev */
780 if (!mei_host_is_ready(dev)) {
781 if (mei_hw_is_ready(dev)) {
782 dev_dbg(dev->dev, "we need to start the dev.\n");
783 dev->recvd_hw_ready = true;
784 wake_up(&dev->wait_hw_ready);
785 } else {
786 dev_dbg(dev->dev, "Spurious Interrupt\n");
787 }
788 goto end;
789 }
790 /* check slots available for reading */
791 slots = mei_count_full_read_slots(dev);
792 while (slots > 0) {
793 dev_dbg(dev->dev, "slots to read = %08x\n", slots);
794 rets = mei_irq_read_handler(dev, &complete_list, &slots);
795 /* There is a race between ME write and interrupt delivery:
796 * Not all data is always available immediately after the
797 * interrupt, so try to read again on the next interrupt.
798 */
799 if (rets == -ENODATA)
800 break;
801
802 if (rets && dev->dev_state != MEI_DEV_RESETTING) {
803 dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n",
804 rets);
805 schedule_work(&dev->reset_work);
806 goto end;
807 }
808 }
809
810 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
811
812 /*
813 * During PG handshake only allowed write is the replay to the
814 * PG exit message, so block calling write function
815 * if the pg event is in PG handshake
816 */
817 if (dev->pg_event != MEI_PG_EVENT_WAIT &&
818 dev->pg_event != MEI_PG_EVENT_RECEIVED) {
819 rets = mei_irq_write_handler(dev, &complete_list);
820 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
821 }
822
823 mei_irq_compl_handler(dev, &complete_list);
824
825 end:
826 dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
827 mutex_unlock(&dev->device_lock);
828 return IRQ_HANDLED;
829 }
830
831 static const struct mei_hw_ops mei_me_hw_ops = {
832
833 .fw_status = mei_me_fw_status,
834 .pg_state = mei_me_pg_state,
835
836 .host_is_ready = mei_me_host_is_ready,
837
838 .hw_is_ready = mei_me_hw_is_ready,
839 .hw_reset = mei_me_hw_reset,
840 .hw_config = mei_me_hw_config,
841 .hw_start = mei_me_hw_start,
842
843 .pg_in_transition = mei_me_pg_in_transition,
844 .pg_is_enabled = mei_me_pg_is_enabled,
845
846 .intr_clear = mei_me_intr_clear,
847 .intr_enable = mei_me_intr_enable,
848 .intr_disable = mei_me_intr_disable,
849
850 .hbuf_free_slots = mei_me_hbuf_empty_slots,
851 .hbuf_is_ready = mei_me_hbuf_is_empty,
852 .hbuf_max_len = mei_me_hbuf_max_len,
853
854 .write = mei_me_write_message,
855
856 .rdbuf_full_slots = mei_me_count_full_read_slots,
857 .read_hdr = mei_me_mecbrw_read,
858 .read = mei_me_read_slots
859 };
860
mei_me_fw_type_nm(struct pci_dev * pdev)861 static bool mei_me_fw_type_nm(struct pci_dev *pdev)
862 {
863 u32 reg;
864
865 pci_read_config_dword(pdev, PCI_CFG_HFS_2, ®);
866 /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
867 return (reg & 0x600) == 0x200;
868 }
869
870 #define MEI_CFG_FW_NM \
871 .quirk_probe = mei_me_fw_type_nm
872
mei_me_fw_type_sps(struct pci_dev * pdev)873 static bool mei_me_fw_type_sps(struct pci_dev *pdev)
874 {
875 u32 reg;
876 /* Read ME FW Status check for SPS Firmware */
877 pci_read_config_dword(pdev, PCI_CFG_HFS_1, ®);
878 /* if bits [19:16] = 15, running SPS Firmware */
879 return (reg & 0xf0000) == 0xf0000;
880 }
881
882 #define MEI_CFG_FW_SPS \
883 .quirk_probe = mei_me_fw_type_sps
884
885
886 #define MEI_CFG_LEGACY_HFS \
887 .fw_status.count = 0
888
889 #define MEI_CFG_ICH_HFS \
890 .fw_status.count = 1, \
891 .fw_status.status[0] = PCI_CFG_HFS_1
892
893 #define MEI_CFG_PCH_HFS \
894 .fw_status.count = 2, \
895 .fw_status.status[0] = PCI_CFG_HFS_1, \
896 .fw_status.status[1] = PCI_CFG_HFS_2
897
898
899 /* ICH Legacy devices */
900 const struct mei_cfg mei_me_legacy_cfg = {
901 MEI_CFG_LEGACY_HFS,
902 };
903
904 /* ICH devices */
905 const struct mei_cfg mei_me_ich_cfg = {
906 MEI_CFG_ICH_HFS,
907 };
908
909 /* PCH devices */
910 const struct mei_cfg mei_me_pch_cfg = {
911 MEI_CFG_PCH_HFS,
912 };
913
914
915 /* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
916 const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
917 MEI_CFG_PCH_HFS,
918 MEI_CFG_FW_NM,
919 };
920
921 /* PCH Lynx Point with quirk for SPS Firmware exclusion */
922 const struct mei_cfg mei_me_lpt_cfg = {
923 MEI_CFG_PCH_HFS,
924 MEI_CFG_FW_SPS,
925 };
926
927 /**
928 * mei_me_dev_init - allocates and initializes the mei device structure
929 *
930 * @pdev: The pci device structure
931 * @cfg: per device generation config
932 *
933 * Return: The mei_device_device pointer on success, NULL on failure.
934 */
mei_me_dev_init(struct pci_dev * pdev,const struct mei_cfg * cfg)935 struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
936 const struct mei_cfg *cfg)
937 {
938 struct mei_device *dev;
939 struct mei_me_hw *hw;
940
941 dev = kzalloc(sizeof(struct mei_device) +
942 sizeof(struct mei_me_hw), GFP_KERNEL);
943 if (!dev)
944 return NULL;
945 hw = to_me_hw(dev);
946
947 mei_device_init(dev, &pdev->dev, &mei_me_hw_ops);
948 hw->cfg = cfg;
949 return dev;
950 }
951
952