1 /*
2 * PCI Error Recovery Driver for RPA-compliant PPC64 platform.
3 * Copyright IBM Corp. 2004 2005
4 * Copyright Linas Vepstas <linas@linas.org> 2004, 2005
5 *
6 * All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or (at
11 * your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
16 * NON INFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 *
23 * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
24 */
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/irq.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <asm/eeh.h>
31 #include <asm/eeh_event.h>
32 #include <asm/ppc-pci.h>
33 #include <asm/pci-bridge.h>
34 #include <asm/prom.h>
35 #include <asm/rtas.h>
36
37 /**
38 * eeh_pcid_name - Retrieve name of PCI device driver
39 * @pdev: PCI device
40 *
41 * This routine is used to retrieve the name of PCI device driver
42 * if that's valid.
43 */
eeh_pcid_name(struct pci_dev * pdev)44 static inline const char *eeh_pcid_name(struct pci_dev *pdev)
45 {
46 if (pdev && pdev->dev.driver)
47 return pdev->dev.driver->name;
48 return "";
49 }
50
51 /**
52 * eeh_pcid_get - Get the PCI device driver
53 * @pdev: PCI device
54 *
55 * The function is used to retrieve the PCI device driver for
56 * the indicated PCI device. Besides, we will increase the reference
57 * of the PCI device driver to prevent that being unloaded on
58 * the fly. Otherwise, kernel crash would be seen.
59 */
eeh_pcid_get(struct pci_dev * pdev)60 static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
61 {
62 if (!pdev || !pdev->driver)
63 return NULL;
64
65 if (!try_module_get(pdev->driver->driver.owner))
66 return NULL;
67
68 return pdev->driver;
69 }
70
71 /**
72 * eeh_pcid_put - Dereference on the PCI device driver
73 * @pdev: PCI device
74 *
75 * The function is called to do dereference on the PCI device
76 * driver of the indicated PCI device.
77 */
eeh_pcid_put(struct pci_dev * pdev)78 static inline void eeh_pcid_put(struct pci_dev *pdev)
79 {
80 if (!pdev || !pdev->driver)
81 return;
82
83 module_put(pdev->driver->driver.owner);
84 }
85
86 #if 0
87 static void print_device_node_tree(struct pci_dn *pdn, int dent)
88 {
89 int i;
90 struct device_node *pc;
91
92 if (!pdn)
93 return;
94 for (i = 0; i < dent; i++)
95 printk(" ");
96 printk("dn=%s mode=%x \tcfg_addr=%x pe_addr=%x \tfull=%s\n",
97 pdn->node->name, pdn->eeh_mode, pdn->eeh_config_addr,
98 pdn->eeh_pe_config_addr, pdn->node->full_name);
99 dent += 3;
100 pc = pdn->node->child;
101 while (pc) {
102 print_device_node_tree(PCI_DN(pc), dent);
103 pc = pc->sibling;
104 }
105 }
106 #endif
107
108 /**
109 * eeh_disable_irq - Disable interrupt for the recovering device
110 * @dev: PCI device
111 *
112 * This routine must be called when reporting temporary or permanent
113 * error to the particular PCI device to disable interrupt of that
114 * device. If the device has enabled MSI or MSI-X interrupt, we needn't
115 * do real work because EEH should freeze DMA transfers for those PCI
116 * devices encountering EEH errors, which includes MSI or MSI-X.
117 */
eeh_disable_irq(struct pci_dev * dev)118 static void eeh_disable_irq(struct pci_dev *dev)
119 {
120 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
121
122 /* Don't disable MSI and MSI-X interrupts. They are
123 * effectively disabled by the DMA Stopped state
124 * when an EEH error occurs.
125 */
126 if (dev->msi_enabled || dev->msix_enabled)
127 return;
128
129 if (!irq_has_action(dev->irq))
130 return;
131
132 edev->mode |= EEH_DEV_IRQ_DISABLED;
133 disable_irq_nosync(dev->irq);
134 }
135
136 /**
137 * eeh_enable_irq - Enable interrupt for the recovering device
138 * @dev: PCI device
139 *
140 * This routine must be called to enable interrupt while failed
141 * device could be resumed.
142 */
eeh_enable_irq(struct pci_dev * dev)143 static void eeh_enable_irq(struct pci_dev *dev)
144 {
145 struct eeh_dev *edev = pci_dev_to_eeh_dev(dev);
146
147 if ((edev->mode) & EEH_DEV_IRQ_DISABLED) {
148 edev->mode &= ~EEH_DEV_IRQ_DISABLED;
149 /*
150 * FIXME !!!!!
151 *
152 * This is just ass backwards. This maze has
153 * unbalanced irq_enable/disable calls. So instead of
154 * finding the root cause it works around the warning
155 * in the irq_enable code by conditionally calling
156 * into it.
157 *
158 * That's just wrong.The warning in the core code is
159 * there to tell people to fix their assymetries in
160 * their own code, not by abusing the core information
161 * to avoid it.
162 *
163 * I so wish that the assymetry would be the other way
164 * round and a few more irq_disable calls render that
165 * shit unusable forever.
166 *
167 * tglx
168 */
169 if (irqd_irq_disabled(irq_get_irq_data(dev->irq)))
170 enable_irq(dev->irq);
171 }
172 }
173
eeh_dev_removed(struct eeh_dev * edev)174 static bool eeh_dev_removed(struct eeh_dev *edev)
175 {
176 /* EEH device removed ? */
177 if (!edev || (edev->mode & EEH_DEV_REMOVED))
178 return true;
179
180 return false;
181 }
182
eeh_dev_save_state(void * data,void * userdata)183 static void *eeh_dev_save_state(void *data, void *userdata)
184 {
185 struct eeh_dev *edev = data;
186 struct pci_dev *pdev;
187
188 if (!edev)
189 return NULL;
190
191 /*
192 * We cannot access the config space on some adapters.
193 * Otherwise, it will cause fenced PHB. We don't save
194 * the content in their config space and will restore
195 * from the initial config space saved when the EEH
196 * device is created.
197 */
198 if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED))
199 return NULL;
200
201 pdev = eeh_dev_to_pci_dev(edev);
202 if (!pdev)
203 return NULL;
204
205 pci_save_state(pdev);
206 return NULL;
207 }
208
209 /**
210 * eeh_report_error - Report pci error to each device driver
211 * @data: eeh device
212 * @userdata: return value
213 *
214 * Report an EEH error to each device driver, collect up and
215 * merge the device driver responses. Cumulative response
216 * passed back in "userdata".
217 */
eeh_report_error(void * data,void * userdata)218 static void *eeh_report_error(void *data, void *userdata)
219 {
220 struct eeh_dev *edev = (struct eeh_dev *)data;
221 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
222 enum pci_ers_result rc, *res = userdata;
223 struct pci_driver *driver;
224
225 if (!dev || eeh_dev_removed(edev))
226 return NULL;
227 dev->error_state = pci_channel_io_frozen;
228
229 driver = eeh_pcid_get(dev);
230 if (!driver) return NULL;
231
232 eeh_disable_irq(dev);
233
234 if (!driver->err_handler ||
235 !driver->err_handler->error_detected) {
236 eeh_pcid_put(dev);
237 return NULL;
238 }
239
240 rc = driver->err_handler->error_detected(dev, pci_channel_io_frozen);
241
242 /* A driver that needs a reset trumps all others */
243 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
244 if (*res == PCI_ERS_RESULT_NONE) *res = rc;
245
246 eeh_pcid_put(dev);
247 return NULL;
248 }
249
250 /**
251 * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled
252 * @data: eeh device
253 * @userdata: return value
254 *
255 * Tells each device driver that IO ports, MMIO and config space I/O
256 * are now enabled. Collects up and merges the device driver responses.
257 * Cumulative response passed back in "userdata".
258 */
eeh_report_mmio_enabled(void * data,void * userdata)259 static void *eeh_report_mmio_enabled(void *data, void *userdata)
260 {
261 struct eeh_dev *edev = (struct eeh_dev *)data;
262 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
263 enum pci_ers_result rc, *res = userdata;
264 struct pci_driver *driver;
265
266 if (!dev || eeh_dev_removed(edev))
267 return NULL;
268
269 driver = eeh_pcid_get(dev);
270 if (!driver) return NULL;
271
272 if (!driver->err_handler ||
273 !driver->err_handler->mmio_enabled ||
274 (edev->mode & EEH_DEV_NO_HANDLER)) {
275 eeh_pcid_put(dev);
276 return NULL;
277 }
278
279 rc = driver->err_handler->mmio_enabled(dev);
280
281 /* A driver that needs a reset trumps all others */
282 if (rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
283 if (*res == PCI_ERS_RESULT_NONE) *res = rc;
284
285 eeh_pcid_put(dev);
286 return NULL;
287 }
288
289 /**
290 * eeh_report_reset - Tell device that slot has been reset
291 * @data: eeh device
292 * @userdata: return value
293 *
294 * This routine must be called while EEH tries to reset particular
295 * PCI device so that the associated PCI device driver could take
296 * some actions, usually to save data the driver needs so that the
297 * driver can work again while the device is recovered.
298 */
eeh_report_reset(void * data,void * userdata)299 static void *eeh_report_reset(void *data, void *userdata)
300 {
301 struct eeh_dev *edev = (struct eeh_dev *)data;
302 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
303 enum pci_ers_result rc, *res = userdata;
304 struct pci_driver *driver;
305
306 if (!dev || eeh_dev_removed(edev))
307 return NULL;
308 dev->error_state = pci_channel_io_normal;
309
310 driver = eeh_pcid_get(dev);
311 if (!driver) return NULL;
312
313 eeh_enable_irq(dev);
314
315 if (!driver->err_handler ||
316 !driver->err_handler->slot_reset ||
317 (edev->mode & EEH_DEV_NO_HANDLER)) {
318 eeh_pcid_put(dev);
319 return NULL;
320 }
321
322 rc = driver->err_handler->slot_reset(dev);
323 if ((*res == PCI_ERS_RESULT_NONE) ||
324 (*res == PCI_ERS_RESULT_RECOVERED)) *res = rc;
325 if (*res == PCI_ERS_RESULT_DISCONNECT &&
326 rc == PCI_ERS_RESULT_NEED_RESET) *res = rc;
327
328 eeh_pcid_put(dev);
329 return NULL;
330 }
331
eeh_dev_restore_state(void * data,void * userdata)332 static void *eeh_dev_restore_state(void *data, void *userdata)
333 {
334 struct eeh_dev *edev = data;
335 struct pci_dev *pdev;
336
337 if (!edev)
338 return NULL;
339
340 /*
341 * The content in the config space isn't saved because
342 * the blocked config space on some adapters. We have
343 * to restore the initial saved config space when the
344 * EEH device is created.
345 */
346 if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) {
347 if (list_is_last(&edev->list, &edev->pe->edevs))
348 eeh_pe_restore_bars(edev->pe);
349
350 return NULL;
351 }
352
353 pdev = eeh_dev_to_pci_dev(edev);
354 if (!pdev)
355 return NULL;
356
357 pci_restore_state(pdev);
358 return NULL;
359 }
360
361 /**
362 * eeh_report_resume - Tell device to resume normal operations
363 * @data: eeh device
364 * @userdata: return value
365 *
366 * This routine must be called to notify the device driver that it
367 * could resume so that the device driver can do some initialization
368 * to make the recovered device work again.
369 */
eeh_report_resume(void * data,void * userdata)370 static void *eeh_report_resume(void *data, void *userdata)
371 {
372 struct eeh_dev *edev = (struct eeh_dev *)data;
373 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
374 struct pci_driver *driver;
375
376 if (!dev || eeh_dev_removed(edev))
377 return NULL;
378 dev->error_state = pci_channel_io_normal;
379
380 driver = eeh_pcid_get(dev);
381 if (!driver) return NULL;
382
383 eeh_enable_irq(dev);
384
385 if (!driver->err_handler ||
386 !driver->err_handler->resume ||
387 (edev->mode & EEH_DEV_NO_HANDLER)) {
388 edev->mode &= ~EEH_DEV_NO_HANDLER;
389 eeh_pcid_put(dev);
390 return NULL;
391 }
392
393 driver->err_handler->resume(dev);
394
395 eeh_pcid_put(dev);
396 return NULL;
397 }
398
399 /**
400 * eeh_report_failure - Tell device driver that device is dead.
401 * @data: eeh device
402 * @userdata: return value
403 *
404 * This informs the device driver that the device is permanently
405 * dead, and that no further recovery attempts will be made on it.
406 */
eeh_report_failure(void * data,void * userdata)407 static void *eeh_report_failure(void *data, void *userdata)
408 {
409 struct eeh_dev *edev = (struct eeh_dev *)data;
410 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
411 struct pci_driver *driver;
412
413 if (!dev || eeh_dev_removed(edev))
414 return NULL;
415 dev->error_state = pci_channel_io_perm_failure;
416
417 driver = eeh_pcid_get(dev);
418 if (!driver) return NULL;
419
420 eeh_disable_irq(dev);
421
422 if (!driver->err_handler ||
423 !driver->err_handler->error_detected) {
424 eeh_pcid_put(dev);
425 return NULL;
426 }
427
428 driver->err_handler->error_detected(dev, pci_channel_io_perm_failure);
429
430 eeh_pcid_put(dev);
431 return NULL;
432 }
433
eeh_rmv_device(void * data,void * userdata)434 static void *eeh_rmv_device(void *data, void *userdata)
435 {
436 struct pci_driver *driver;
437 struct eeh_dev *edev = (struct eeh_dev *)data;
438 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
439 int *removed = (int *)userdata;
440
441 /*
442 * Actually, we should remove the PCI bridges as well.
443 * However, that's lots of complexity to do that,
444 * particularly some of devices under the bridge might
445 * support EEH. So we just care about PCI devices for
446 * simplicity here.
447 */
448 if (!dev || (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE))
449 return NULL;
450
451 /*
452 * We rely on count-based pcibios_release_device() to
453 * detach permanently offlined PEs. Unfortunately, that's
454 * not reliable enough. We might have the permanently
455 * offlined PEs attached, but we needn't take care of
456 * them and their child devices.
457 */
458 if (eeh_dev_removed(edev))
459 return NULL;
460
461 driver = eeh_pcid_get(dev);
462 if (driver) {
463 eeh_pcid_put(dev);
464 if (driver->err_handler)
465 return NULL;
466 }
467
468 /* Remove it from PCI subsystem */
469 pr_debug("EEH: Removing %s without EEH sensitive driver\n",
470 pci_name(dev));
471 edev->bus = dev->bus;
472 edev->mode |= EEH_DEV_DISCONNECTED;
473 (*removed)++;
474
475 pci_lock_rescan_remove();
476 pci_stop_and_remove_bus_device(dev);
477 pci_unlock_rescan_remove();
478
479 return NULL;
480 }
481
eeh_pe_detach_dev(void * data,void * userdata)482 static void *eeh_pe_detach_dev(void *data, void *userdata)
483 {
484 struct eeh_pe *pe = (struct eeh_pe *)data;
485 struct eeh_dev *edev, *tmp;
486
487 eeh_pe_for_each_dev(pe, edev, tmp) {
488 if (!(edev->mode & EEH_DEV_DISCONNECTED))
489 continue;
490
491 edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED);
492 eeh_rmv_from_parent_pe(edev);
493 }
494
495 return NULL;
496 }
497
498 /*
499 * Explicitly clear PE's frozen state for PowerNV where
500 * we have frozen PE until BAR restore is completed. It's
501 * harmless to clear it for pSeries. To be consistent with
502 * PE reset (for 3 times), we try to clear the frozen state
503 * for 3 times as well.
504 */
__eeh_clear_pe_frozen_state(void * data,void * flag)505 static void *__eeh_clear_pe_frozen_state(void *data, void *flag)
506 {
507 struct eeh_pe *pe = (struct eeh_pe *)data;
508 bool *clear_sw_state = flag;
509 int i, rc = 1;
510
511 for (i = 0; rc && i < 3; i++)
512 rc = eeh_unfreeze_pe(pe, clear_sw_state);
513
514 /* Stop immediately on any errors */
515 if (rc) {
516 pr_warn("%s: Failure %d unfreezing PHB#%x-PE#%x\n",
517 __func__, rc, pe->phb->global_number, pe->addr);
518 return (void *)pe;
519 }
520
521 return NULL;
522 }
523
eeh_clear_pe_frozen_state(struct eeh_pe * pe,bool clear_sw_state)524 static int eeh_clear_pe_frozen_state(struct eeh_pe *pe,
525 bool clear_sw_state)
526 {
527 void *rc;
528
529 rc = eeh_pe_traverse(pe, __eeh_clear_pe_frozen_state, &clear_sw_state);
530 if (!rc)
531 eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
532
533 return rc ? -EIO : 0;
534 }
535
eeh_pe_reset_and_recover(struct eeh_pe * pe)536 int eeh_pe_reset_and_recover(struct eeh_pe *pe)
537 {
538 int result, ret;
539
540 /* Bail if the PE is being recovered */
541 if (pe->state & EEH_PE_RECOVERING)
542 return 0;
543
544 /* Put the PE into recovery mode */
545 eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
546
547 /* Save states */
548 eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL);
549
550 /* Issue reset */
551 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
552 ret = eeh_reset_pe(pe);
553 if (ret) {
554 eeh_pe_state_clear(pe, EEH_PE_RECOVERING | EEH_PE_CFG_BLOCKED);
555 return ret;
556 }
557 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
558
559 /* Unfreeze the PE */
560 ret = eeh_clear_pe_frozen_state(pe, true);
561 if (ret) {
562 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
563 return ret;
564 }
565
566 /* Notify completion of reset */
567 eeh_pe_dev_traverse(pe, eeh_report_reset, &result);
568
569 /* Restore device state */
570 eeh_pe_dev_traverse(pe, eeh_dev_restore_state, NULL);
571
572 /* Resume */
573 eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
574
575 /* Clear recovery mode */
576 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
577
578 return 0;
579 }
580
581 /**
582 * eeh_reset_device - Perform actual reset of a pci slot
583 * @pe: EEH PE
584 * @bus: PCI bus corresponding to the isolcated slot
585 *
586 * This routine must be called to do reset on the indicated PE.
587 * During the reset, udev might be invoked because those affected
588 * PCI devices will be removed and then added.
589 */
eeh_reset_device(struct eeh_pe * pe,struct pci_bus * bus)590 static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus)
591 {
592 struct pci_bus *frozen_bus = eeh_pe_bus_get(pe);
593 struct timeval tstamp;
594 int cnt, rc, removed = 0;
595
596 /* pcibios will clear the counter; save the value */
597 cnt = pe->freeze_count;
598 tstamp = pe->tstamp;
599
600 /*
601 * We don't remove the corresponding PE instances because
602 * we need the information afterwords. The attached EEH
603 * devices are expected to be attached soon when calling
604 * into pcibios_add_pci_devices().
605 */
606 eeh_pe_state_mark(pe, EEH_PE_KEEP);
607 if (bus) {
608 pci_lock_rescan_remove();
609 pcibios_remove_pci_devices(bus);
610 pci_unlock_rescan_remove();
611 } else if (frozen_bus) {
612 eeh_pe_dev_traverse(pe, eeh_rmv_device, &removed);
613 }
614
615 /*
616 * Reset the pci controller. (Asserts RST#; resets config space).
617 * Reconfigure bridges and devices. Don't try to bring the system
618 * up if the reset failed for some reason.
619 *
620 * During the reset, it's very dangerous to have uncontrolled PCI
621 * config accesses. So we prefer to block them. However, controlled
622 * PCI config accesses initiated from EEH itself are allowed.
623 */
624 eeh_pe_state_mark(pe, EEH_PE_CFG_BLOCKED);
625 rc = eeh_reset_pe(pe);
626 if (rc) {
627 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
628 return rc;
629 }
630
631 pci_lock_rescan_remove();
632
633 /* Restore PE */
634 eeh_ops->configure_bridge(pe);
635 eeh_pe_restore_bars(pe);
636 eeh_pe_state_clear(pe, EEH_PE_CFG_BLOCKED);
637
638 /* Clear frozen state */
639 rc = eeh_clear_pe_frozen_state(pe, false);
640 if (rc)
641 return rc;
642
643 /* Give the system 5 seconds to finish running the user-space
644 * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
645 * this is a hack, but if we don't do this, and try to bring
646 * the device up before the scripts have taken it down,
647 * potentially weird things happen.
648 */
649 if (bus) {
650 pr_info("EEH: Sleep 5s ahead of complete hotplug\n");
651 ssleep(5);
652
653 /*
654 * The EEH device is still connected with its parent
655 * PE. We should disconnect it so the binding can be
656 * rebuilt when adding PCI devices.
657 */
658 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
659 pcibios_add_pci_devices(bus);
660 } else if (frozen_bus && removed) {
661 pr_info("EEH: Sleep 5s ahead of partial hotplug\n");
662 ssleep(5);
663
664 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
665 pcibios_add_pci_devices(frozen_bus);
666 }
667 eeh_pe_state_clear(pe, EEH_PE_KEEP);
668
669 pe->tstamp = tstamp;
670 pe->freeze_count = cnt;
671
672 pci_unlock_rescan_remove();
673 return 0;
674 }
675
676 /* The longest amount of time to wait for a pci device
677 * to come back on line, in seconds.
678 */
679 #define MAX_WAIT_FOR_RECOVERY 300
680
eeh_handle_normal_event(struct eeh_pe * pe)681 static bool eeh_handle_normal_event(struct eeh_pe *pe)
682 {
683 struct pci_bus *frozen_bus;
684 int rc = 0;
685 enum pci_ers_result result = PCI_ERS_RESULT_NONE;
686
687 frozen_bus = eeh_pe_bus_get(pe);
688 if (!frozen_bus) {
689 pr_err("%s: Cannot find PCI bus for PHB#%d-PE#%x\n",
690 __func__, pe->phb->global_number, pe->addr);
691 return false;
692 }
693
694 eeh_pe_update_time_stamp(pe);
695 pe->freeze_count++;
696 if (pe->freeze_count > EEH_MAX_ALLOWED_FREEZES)
697 goto excess_failures;
698 pr_warn("EEH: This PCI device has failed %d times in the last hour\n",
699 pe->freeze_count);
700
701 /* Walk the various device drivers attached to this slot through
702 * a reset sequence, giving each an opportunity to do what it needs
703 * to accomplish the reset. Each child gets a report of the
704 * status ... if any child can't handle the reset, then the entire
705 * slot is dlpar removed and added.
706 */
707 pr_info("EEH: Notify device drivers to shutdown\n");
708 eeh_pe_dev_traverse(pe, eeh_report_error, &result);
709
710 /* Get the current PCI slot state. This can take a long time,
711 * sometimes over 3 seconds for certain systems.
712 */
713 rc = eeh_ops->wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000);
714 if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
715 pr_warn("EEH: Permanent failure\n");
716 goto hard_fail;
717 }
718
719 /* Since rtas may enable MMIO when posting the error log,
720 * don't post the error log until after all dev drivers
721 * have been informed.
722 */
723 pr_info("EEH: Collect temporary log\n");
724 eeh_slot_error_detail(pe, EEH_LOG_TEMP);
725
726 /* If all device drivers were EEH-unaware, then shut
727 * down all of the device drivers, and hope they
728 * go down willingly, without panicing the system.
729 */
730 if (result == PCI_ERS_RESULT_NONE) {
731 pr_info("EEH: Reset with hotplug activity\n");
732 rc = eeh_reset_device(pe, frozen_bus);
733 if (rc) {
734 pr_warn("%s: Unable to reset, err=%d\n",
735 __func__, rc);
736 goto hard_fail;
737 }
738 }
739
740 /* If all devices reported they can proceed, then re-enable MMIO */
741 if (result == PCI_ERS_RESULT_CAN_RECOVER) {
742 pr_info("EEH: Enable I/O for affected devices\n");
743 rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
744
745 if (rc < 0)
746 goto hard_fail;
747 if (rc) {
748 result = PCI_ERS_RESULT_NEED_RESET;
749 } else {
750 pr_info("EEH: Notify device drivers to resume I/O\n");
751 eeh_pe_dev_traverse(pe, eeh_report_mmio_enabled, &result);
752 }
753 }
754
755 /* If all devices reported they can proceed, then re-enable DMA */
756 if (result == PCI_ERS_RESULT_CAN_RECOVER) {
757 pr_info("EEH: Enabled DMA for affected devices\n");
758 rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
759
760 if (rc < 0)
761 goto hard_fail;
762 if (rc) {
763 result = PCI_ERS_RESULT_NEED_RESET;
764 } else {
765 /*
766 * We didn't do PE reset for the case. The PE
767 * is still in frozen state. Clear it before
768 * resuming the PE.
769 */
770 eeh_pe_state_clear(pe, EEH_PE_ISOLATED);
771 result = PCI_ERS_RESULT_RECOVERED;
772 }
773 }
774
775 /* If any device has a hard failure, then shut off everything. */
776 if (result == PCI_ERS_RESULT_DISCONNECT) {
777 pr_warn("EEH: Device driver gave up\n");
778 goto hard_fail;
779 }
780
781 /* If any device called out for a reset, then reset the slot */
782 if (result == PCI_ERS_RESULT_NEED_RESET) {
783 pr_info("EEH: Reset without hotplug activity\n");
784 rc = eeh_reset_device(pe, NULL);
785 if (rc) {
786 pr_warn("%s: Cannot reset, err=%d\n",
787 __func__, rc);
788 goto hard_fail;
789 }
790
791 pr_info("EEH: Notify device drivers "
792 "the completion of reset\n");
793 result = PCI_ERS_RESULT_NONE;
794 eeh_pe_dev_traverse(pe, eeh_report_reset, &result);
795 }
796
797 /* All devices should claim they have recovered by now. */
798 if ((result != PCI_ERS_RESULT_RECOVERED) &&
799 (result != PCI_ERS_RESULT_NONE)) {
800 pr_warn("EEH: Not recovered\n");
801 goto hard_fail;
802 }
803
804 /* Tell all device drivers that they can resume operations */
805 pr_info("EEH: Notify device driver to resume\n");
806 eeh_pe_dev_traverse(pe, eeh_report_resume, NULL);
807
808 return false;
809
810 excess_failures:
811 /*
812 * About 90% of all real-life EEH failures in the field
813 * are due to poorly seated PCI cards. Only 10% or so are
814 * due to actual, failed cards.
815 */
816 pr_err("EEH: PHB#%d-PE#%x has failed %d times in the\n"
817 "last hour and has been permanently disabled.\n"
818 "Please try reseating or replacing it.\n",
819 pe->phb->global_number, pe->addr,
820 pe->freeze_count);
821 goto perm_error;
822
823 hard_fail:
824 pr_err("EEH: Unable to recover from failure from PHB#%d-PE#%x.\n"
825 "Please try reseating or replacing it\n",
826 pe->phb->global_number, pe->addr);
827
828 perm_error:
829 eeh_slot_error_detail(pe, EEH_LOG_PERM);
830
831 /* Notify all devices that they're about to go down. */
832 eeh_pe_dev_traverse(pe, eeh_report_failure, NULL);
833
834 /* Mark the PE to be removed permanently */
835 pe->freeze_count = EEH_MAX_ALLOWED_FREEZES + 1;
836
837 /*
838 * Shut down the device drivers for good. We mark
839 * all removed devices correctly to avoid access
840 * the their PCI config any more.
841 */
842 if (frozen_bus) {
843 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
844
845 pci_lock_rescan_remove();
846 pcibios_remove_pci_devices(frozen_bus);
847 pci_unlock_rescan_remove();
848
849 /* The passed PE should no longer be used */
850 return true;
851 }
852 return false;
853 }
854
eeh_handle_special_event(void)855 static void eeh_handle_special_event(void)
856 {
857 struct eeh_pe *pe, *phb_pe;
858 struct pci_bus *bus;
859 struct pci_controller *hose;
860 unsigned long flags;
861 int rc;
862
863
864 do {
865 rc = eeh_ops->next_error(&pe);
866
867 switch (rc) {
868 case EEH_NEXT_ERR_DEAD_IOC:
869 /* Mark all PHBs in dead state */
870 eeh_serialize_lock(&flags);
871
872 /* Purge all events */
873 eeh_remove_event(NULL, true);
874
875 list_for_each_entry(hose, &hose_list, list_node) {
876 phb_pe = eeh_phb_pe_get(hose);
877 if (!phb_pe) continue;
878
879 eeh_pe_state_mark(phb_pe, EEH_PE_ISOLATED);
880 }
881
882 eeh_serialize_unlock(flags);
883
884 break;
885 case EEH_NEXT_ERR_FROZEN_PE:
886 case EEH_NEXT_ERR_FENCED_PHB:
887 case EEH_NEXT_ERR_DEAD_PHB:
888 /* Mark the PE in fenced state */
889 eeh_serialize_lock(&flags);
890
891 /* Purge all events of the PHB */
892 eeh_remove_event(pe, true);
893
894 if (rc == EEH_NEXT_ERR_DEAD_PHB)
895 eeh_pe_state_mark(pe, EEH_PE_ISOLATED);
896 else
897 eeh_pe_state_mark(pe,
898 EEH_PE_ISOLATED | EEH_PE_RECOVERING);
899
900 eeh_serialize_unlock(flags);
901
902 break;
903 case EEH_NEXT_ERR_NONE:
904 return;
905 default:
906 pr_warn("%s: Invalid value %d from next_error()\n",
907 __func__, rc);
908 return;
909 }
910
911 /*
912 * For fenced PHB and frozen PE, it's handled as normal
913 * event. We have to remove the affected PHBs for dead
914 * PHB and IOC
915 */
916 if (rc == EEH_NEXT_ERR_FROZEN_PE ||
917 rc == EEH_NEXT_ERR_FENCED_PHB) {
918 /*
919 * eeh_handle_normal_event() can make the PE stale if it
920 * determines that the PE cannot possibly be recovered.
921 * Don't modify the PE state if that's the case.
922 */
923 if (eeh_handle_normal_event(pe))
924 continue;
925
926 eeh_pe_state_clear(pe, EEH_PE_RECOVERING);
927 } else {
928 pci_lock_rescan_remove();
929 list_for_each_entry(hose, &hose_list, list_node) {
930 phb_pe = eeh_phb_pe_get(hose);
931 if (!phb_pe ||
932 !(phb_pe->state & EEH_PE_ISOLATED) ||
933 (phb_pe->state & EEH_PE_RECOVERING))
934 continue;
935
936 /* Notify all devices to be down */
937 bus = eeh_pe_bus_get(phb_pe);
938 eeh_pe_dev_traverse(pe,
939 eeh_report_failure, NULL);
940 pcibios_remove_pci_devices(bus);
941 }
942 pci_unlock_rescan_remove();
943 }
944
945 /*
946 * If we have detected dead IOC, we needn't proceed
947 * any more since all PHBs would have been removed
948 */
949 if (rc == EEH_NEXT_ERR_DEAD_IOC)
950 break;
951 } while (rc != EEH_NEXT_ERR_NONE);
952 }
953
954 /**
955 * eeh_handle_event - Reset a PCI device after hard lockup.
956 * @pe: EEH PE
957 *
958 * While PHB detects address or data parity errors on particular PCI
959 * slot, the associated PE will be frozen. Besides, DMA's occurring
960 * to wild addresses (which usually happen due to bugs in device
961 * drivers or in PCI adapter firmware) can cause EEH error. #SERR,
962 * #PERR or other misc PCI-related errors also can trigger EEH errors.
963 *
964 * Recovery process consists of unplugging the device driver (which
965 * generated hotplug events to userspace), then issuing a PCI #RST to
966 * the device, then reconfiguring the PCI config space for all bridges
967 * & devices under this slot, and then finally restarting the device
968 * drivers (which cause a second set of hotplug events to go out to
969 * userspace).
970 */
eeh_handle_event(struct eeh_pe * pe)971 void eeh_handle_event(struct eeh_pe *pe)
972 {
973 if (pe)
974 eeh_handle_normal_event(pe);
975 else
976 eeh_handle_special_event();
977 }
978