1 /*
2 * PCI Error Recovery Driver for RPA-compliant PPC64 platform.
3 * Copyright IBM Corp. 2004 2005
4 * Copyright Linas Vepstas <linas@linas.org> 2004, 2005
5 *
6 * All rights reserved.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or (at
11 * your option) any later version.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
16 * NON INFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
22 *
23 * Send comments and feedback to Linas Vepstas <linas@austin.ibm.com>
24 */
25 #include <linux/delay.h>
26 #include <linux/interrupt.h>
27 #include <linux/irq.h>
28 #include <linux/module.h>
29 #include <linux/pci.h>
30 #include <linux/pci_hotplug.h>
31 #include <asm/eeh.h>
32 #include <asm/eeh_event.h>
33 #include <asm/ppc-pci.h>
34 #include <asm/pci-bridge.h>
35 #include <asm/prom.h>
36 #include <asm/rtas.h>
37
38 struct eeh_rmv_data {
39 struct list_head removed_vf_list;
40 int removed_dev_count;
41 };
42
eeh_result_priority(enum pci_ers_result result)43 static int eeh_result_priority(enum pci_ers_result result)
44 {
45 switch (result) {
46 case PCI_ERS_RESULT_NONE:
47 return 1;
48 case PCI_ERS_RESULT_NO_AER_DRIVER:
49 return 2;
50 case PCI_ERS_RESULT_RECOVERED:
51 return 3;
52 case PCI_ERS_RESULT_CAN_RECOVER:
53 return 4;
54 case PCI_ERS_RESULT_DISCONNECT:
55 return 5;
56 case PCI_ERS_RESULT_NEED_RESET:
57 return 6;
58 default:
59 WARN_ONCE(1, "Unknown pci_ers_result value: %d\n", (int)result);
60 return 0;
61 }
62 };
63
pci_ers_result_name(enum pci_ers_result result)64 static const char *pci_ers_result_name(enum pci_ers_result result)
65 {
66 switch (result) {
67 case PCI_ERS_RESULT_NONE:
68 return "none";
69 case PCI_ERS_RESULT_CAN_RECOVER:
70 return "can recover";
71 case PCI_ERS_RESULT_NEED_RESET:
72 return "need reset";
73 case PCI_ERS_RESULT_DISCONNECT:
74 return "disconnect";
75 case PCI_ERS_RESULT_RECOVERED:
76 return "recovered";
77 case PCI_ERS_RESULT_NO_AER_DRIVER:
78 return "no AER driver";
79 default:
80 WARN_ONCE(1, "Unknown result type: %d\n", (int)result);
81 return "unknown";
82 }
83 };
84
pci_ers_merge_result(enum pci_ers_result old,enum pci_ers_result new)85 static enum pci_ers_result pci_ers_merge_result(enum pci_ers_result old,
86 enum pci_ers_result new)
87 {
88 if (eeh_result_priority(new) > eeh_result_priority(old))
89 return new;
90 return old;
91 }
92
eeh_dev_removed(struct eeh_dev * edev)93 static bool eeh_dev_removed(struct eeh_dev *edev)
94 {
95 return !edev || (edev->mode & EEH_DEV_REMOVED);
96 }
97
eeh_edev_actionable(struct eeh_dev * edev)98 static bool eeh_edev_actionable(struct eeh_dev *edev)
99 {
100 if (!edev->pdev)
101 return false;
102 if (edev->pdev->error_state == pci_channel_io_perm_failure)
103 return false;
104 if (eeh_dev_removed(edev))
105 return false;
106 if (eeh_pe_passed(edev->pe))
107 return false;
108
109 return true;
110 }
111
112 /**
113 * eeh_pcid_get - Get the PCI device driver
114 * @pdev: PCI device
115 *
116 * The function is used to retrieve the PCI device driver for
117 * the indicated PCI device. Besides, we will increase the reference
118 * of the PCI device driver to prevent that being unloaded on
119 * the fly. Otherwise, kernel crash would be seen.
120 */
eeh_pcid_get(struct pci_dev * pdev)121 static inline struct pci_driver *eeh_pcid_get(struct pci_dev *pdev)
122 {
123 if (!pdev || !pdev->driver)
124 return NULL;
125
126 if (!try_module_get(pdev->driver->driver.owner))
127 return NULL;
128
129 return pdev->driver;
130 }
131
132 /**
133 * eeh_pcid_put - Dereference on the PCI device driver
134 * @pdev: PCI device
135 *
136 * The function is called to do dereference on the PCI device
137 * driver of the indicated PCI device.
138 */
eeh_pcid_put(struct pci_dev * pdev)139 static inline void eeh_pcid_put(struct pci_dev *pdev)
140 {
141 if (!pdev || !pdev->driver)
142 return;
143
144 module_put(pdev->driver->driver.owner);
145 }
146
147 /**
148 * eeh_disable_irq - Disable interrupt for the recovering device
149 * @dev: PCI device
150 *
151 * This routine must be called when reporting temporary or permanent
152 * error to the particular PCI device to disable interrupt of that
153 * device. If the device has enabled MSI or MSI-X interrupt, we needn't
154 * do real work because EEH should freeze DMA transfers for those PCI
155 * devices encountering EEH errors, which includes MSI or MSI-X.
156 */
eeh_disable_irq(struct eeh_dev * edev)157 static void eeh_disable_irq(struct eeh_dev *edev)
158 {
159 /* Don't disable MSI and MSI-X interrupts. They are
160 * effectively disabled by the DMA Stopped state
161 * when an EEH error occurs.
162 */
163 if (edev->pdev->msi_enabled || edev->pdev->msix_enabled)
164 return;
165
166 if (!irq_has_action(edev->pdev->irq))
167 return;
168
169 edev->mode |= EEH_DEV_IRQ_DISABLED;
170 disable_irq_nosync(edev->pdev->irq);
171 }
172
173 /**
174 * eeh_enable_irq - Enable interrupt for the recovering device
175 * @dev: PCI device
176 *
177 * This routine must be called to enable interrupt while failed
178 * device could be resumed.
179 */
eeh_enable_irq(struct eeh_dev * edev)180 static void eeh_enable_irq(struct eeh_dev *edev)
181 {
182 if ((edev->mode) & EEH_DEV_IRQ_DISABLED) {
183 edev->mode &= ~EEH_DEV_IRQ_DISABLED;
184 /*
185 * FIXME !!!!!
186 *
187 * This is just ass backwards. This maze has
188 * unbalanced irq_enable/disable calls. So instead of
189 * finding the root cause it works around the warning
190 * in the irq_enable code by conditionally calling
191 * into it.
192 *
193 * That's just wrong.The warning in the core code is
194 * there to tell people to fix their asymmetries in
195 * their own code, not by abusing the core information
196 * to avoid it.
197 *
198 * I so wish that the assymetry would be the other way
199 * round and a few more irq_disable calls render that
200 * shit unusable forever.
201 *
202 * tglx
203 */
204 if (irqd_irq_disabled(irq_get_irq_data(edev->pdev->irq)))
205 enable_irq(edev->pdev->irq);
206 }
207 }
208
eeh_dev_save_state(struct eeh_dev * edev,void * userdata)209 static void eeh_dev_save_state(struct eeh_dev *edev, void *userdata)
210 {
211 struct pci_dev *pdev;
212
213 if (!edev)
214 return;
215
216 /*
217 * We cannot access the config space on some adapters.
218 * Otherwise, it will cause fenced PHB. We don't save
219 * the content in their config space and will restore
220 * from the initial config space saved when the EEH
221 * device is created.
222 */
223 if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED))
224 return;
225
226 pdev = eeh_dev_to_pci_dev(edev);
227 if (!pdev)
228 return;
229
230 pci_save_state(pdev);
231 }
232
eeh_set_channel_state(struct eeh_pe * root,enum pci_channel_state s)233 static void eeh_set_channel_state(struct eeh_pe *root, enum pci_channel_state s)
234 {
235 struct eeh_pe *pe;
236 struct eeh_dev *edev, *tmp;
237
238 eeh_for_each_pe(root, pe)
239 eeh_pe_for_each_dev(pe, edev, tmp)
240 if (eeh_edev_actionable(edev))
241 edev->pdev->error_state = s;
242 }
243
eeh_set_irq_state(struct eeh_pe * root,bool enable)244 static void eeh_set_irq_state(struct eeh_pe *root, bool enable)
245 {
246 struct eeh_pe *pe;
247 struct eeh_dev *edev, *tmp;
248
249 eeh_for_each_pe(root, pe) {
250 eeh_pe_for_each_dev(pe, edev, tmp) {
251 if (!eeh_edev_actionable(edev))
252 continue;
253
254 if (!eeh_pcid_get(edev->pdev))
255 continue;
256
257 if (enable)
258 eeh_enable_irq(edev);
259 else
260 eeh_disable_irq(edev);
261
262 eeh_pcid_put(edev->pdev);
263 }
264 }
265 }
266
267 typedef enum pci_ers_result (*eeh_report_fn)(struct eeh_dev *,
268 struct pci_dev *,
269 struct pci_driver *);
eeh_pe_report_edev(struct eeh_dev * edev,eeh_report_fn fn,enum pci_ers_result * result)270 static void eeh_pe_report_edev(struct eeh_dev *edev, eeh_report_fn fn,
271 enum pci_ers_result *result)
272 {
273 struct pci_dev *pdev;
274 struct pci_driver *driver;
275 enum pci_ers_result new_result;
276
277 pci_lock_rescan_remove();
278 pdev = edev->pdev;
279 if (pdev)
280 get_device(&pdev->dev);
281 pci_unlock_rescan_remove();
282 if (!pdev) {
283 eeh_edev_info(edev, "no device");
284 return;
285 }
286 device_lock(&pdev->dev);
287 if (eeh_edev_actionable(edev)) {
288 driver = eeh_pcid_get(pdev);
289
290 if (!driver)
291 eeh_edev_info(edev, "no driver");
292 else if (!driver->err_handler)
293 eeh_edev_info(edev, "driver not EEH aware");
294 else if (edev->mode & EEH_DEV_NO_HANDLER)
295 eeh_edev_info(edev, "driver bound too late");
296 else {
297 new_result = fn(edev, pdev, driver);
298 eeh_edev_info(edev, "%s driver reports: '%s'",
299 driver->name,
300 pci_ers_result_name(new_result));
301 if (result)
302 *result = pci_ers_merge_result(*result,
303 new_result);
304 }
305 if (driver)
306 eeh_pcid_put(pdev);
307 } else {
308 eeh_edev_info(edev, "not actionable (%d,%d,%d)", !!pdev,
309 !eeh_dev_removed(edev), !eeh_pe_passed(edev->pe));
310 }
311 device_unlock(&pdev->dev);
312 if (edev->pdev != pdev)
313 eeh_edev_warn(edev, "Device changed during processing!\n");
314 put_device(&pdev->dev);
315 }
316
eeh_pe_report(const char * name,struct eeh_pe * root,eeh_report_fn fn,enum pci_ers_result * result)317 static void eeh_pe_report(const char *name, struct eeh_pe *root,
318 eeh_report_fn fn, enum pci_ers_result *result)
319 {
320 struct eeh_pe *pe;
321 struct eeh_dev *edev, *tmp;
322
323 pr_info("EEH: Beginning: '%s'\n", name);
324 eeh_for_each_pe(root, pe) eeh_pe_for_each_dev(pe, edev, tmp)
325 eeh_pe_report_edev(edev, fn, result);
326 if (result)
327 pr_info("EEH: Finished:'%s' with aggregate recovery state:'%s'\n",
328 name, pci_ers_result_name(*result));
329 else
330 pr_info("EEH: Finished:'%s'", name);
331 }
332
333 /**
334 * eeh_report_error - Report pci error to each device driver
335 * @edev: eeh device
336 * @driver: device's PCI driver
337 *
338 * Report an EEH error to each device driver.
339 */
eeh_report_error(struct eeh_dev * edev,struct pci_dev * pdev,struct pci_driver * driver)340 static enum pci_ers_result eeh_report_error(struct eeh_dev *edev,
341 struct pci_dev *pdev,
342 struct pci_driver *driver)
343 {
344 enum pci_ers_result rc;
345
346 if (!driver->err_handler->error_detected)
347 return PCI_ERS_RESULT_NONE;
348
349 eeh_edev_info(edev, "Invoking %s->error_detected(IO frozen)",
350 driver->name);
351 rc = driver->err_handler->error_detected(pdev, pci_channel_io_frozen);
352
353 edev->in_error = true;
354 pci_uevent_ers(pdev, PCI_ERS_RESULT_NONE);
355 return rc;
356 }
357
358 /**
359 * eeh_report_mmio_enabled - Tell drivers that MMIO has been enabled
360 * @edev: eeh device
361 * @driver: device's PCI driver
362 *
363 * Tells each device driver that IO ports, MMIO and config space I/O
364 * are now enabled.
365 */
eeh_report_mmio_enabled(struct eeh_dev * edev,struct pci_dev * pdev,struct pci_driver * driver)366 static enum pci_ers_result eeh_report_mmio_enabled(struct eeh_dev *edev,
367 struct pci_dev *pdev,
368 struct pci_driver *driver)
369 {
370 if (!driver->err_handler->mmio_enabled)
371 return PCI_ERS_RESULT_NONE;
372 eeh_edev_info(edev, "Invoking %s->mmio_enabled()", driver->name);
373 return driver->err_handler->mmio_enabled(pdev);
374 }
375
376 /**
377 * eeh_report_reset - Tell device that slot has been reset
378 * @edev: eeh device
379 * @driver: device's PCI driver
380 *
381 * This routine must be called while EEH tries to reset particular
382 * PCI device so that the associated PCI device driver could take
383 * some actions, usually to save data the driver needs so that the
384 * driver can work again while the device is recovered.
385 */
eeh_report_reset(struct eeh_dev * edev,struct pci_dev * pdev,struct pci_driver * driver)386 static enum pci_ers_result eeh_report_reset(struct eeh_dev *edev,
387 struct pci_dev *pdev,
388 struct pci_driver *driver)
389 {
390 if (!driver->err_handler->slot_reset || !edev->in_error)
391 return PCI_ERS_RESULT_NONE;
392 eeh_edev_info(edev, "Invoking %s->slot_reset()", driver->name);
393 return driver->err_handler->slot_reset(pdev);
394 }
395
eeh_dev_restore_state(struct eeh_dev * edev,void * userdata)396 static void eeh_dev_restore_state(struct eeh_dev *edev, void *userdata)
397 {
398 struct pci_dev *pdev;
399
400 if (!edev)
401 return;
402
403 /*
404 * The content in the config space isn't saved because
405 * the blocked config space on some adapters. We have
406 * to restore the initial saved config space when the
407 * EEH device is created.
408 */
409 if (edev->pe && (edev->pe->state & EEH_PE_CFG_RESTRICTED)) {
410 if (list_is_last(&edev->entry, &edev->pe->edevs))
411 eeh_pe_restore_bars(edev->pe);
412
413 return;
414 }
415
416 pdev = eeh_dev_to_pci_dev(edev);
417 if (!pdev)
418 return;
419
420 pci_restore_state(pdev);
421 }
422
423 /**
424 * eeh_report_resume - Tell device to resume normal operations
425 * @edev: eeh device
426 * @driver: device's PCI driver
427 *
428 * This routine must be called to notify the device driver that it
429 * could resume so that the device driver can do some initialization
430 * to make the recovered device work again.
431 */
eeh_report_resume(struct eeh_dev * edev,struct pci_dev * pdev,struct pci_driver * driver)432 static enum pci_ers_result eeh_report_resume(struct eeh_dev *edev,
433 struct pci_dev *pdev,
434 struct pci_driver *driver)
435 {
436 if (!driver->err_handler->resume || !edev->in_error)
437 return PCI_ERS_RESULT_NONE;
438
439 eeh_edev_info(edev, "Invoking %s->resume()", driver->name);
440 driver->err_handler->resume(pdev);
441
442 pci_uevent_ers(edev->pdev, PCI_ERS_RESULT_RECOVERED);
443 #ifdef CONFIG_PCI_IOV
444 if (eeh_ops->notify_resume && eeh_dev_to_pdn(edev))
445 eeh_ops->notify_resume(eeh_dev_to_pdn(edev));
446 #endif
447 return PCI_ERS_RESULT_NONE;
448 }
449
450 /**
451 * eeh_report_failure - Tell device driver that device is dead.
452 * @edev: eeh device
453 * @driver: device's PCI driver
454 *
455 * This informs the device driver that the device is permanently
456 * dead, and that no further recovery attempts will be made on it.
457 */
eeh_report_failure(struct eeh_dev * edev,struct pci_dev * pdev,struct pci_driver * driver)458 static enum pci_ers_result eeh_report_failure(struct eeh_dev *edev,
459 struct pci_dev *pdev,
460 struct pci_driver *driver)
461 {
462 enum pci_ers_result rc;
463
464 if (!driver->err_handler->error_detected)
465 return PCI_ERS_RESULT_NONE;
466
467 eeh_edev_info(edev, "Invoking %s->error_detected(permanent failure)",
468 driver->name);
469 rc = driver->err_handler->error_detected(pdev,
470 pci_channel_io_perm_failure);
471
472 pci_uevent_ers(pdev, PCI_ERS_RESULT_DISCONNECT);
473 return rc;
474 }
475
eeh_add_virt_device(struct eeh_dev * edev)476 static void *eeh_add_virt_device(struct eeh_dev *edev)
477 {
478 struct pci_driver *driver;
479 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
480
481 if (!(edev->physfn)) {
482 eeh_edev_warn(edev, "Not for VF\n");
483 return NULL;
484 }
485
486 driver = eeh_pcid_get(dev);
487 if (driver) {
488 if (driver->err_handler) {
489 eeh_pcid_put(dev);
490 return NULL;
491 }
492 eeh_pcid_put(dev);
493 }
494
495 #ifdef CONFIG_PCI_IOV
496 pci_iov_add_virtfn(edev->physfn, eeh_dev_to_pdn(edev)->vf_index);
497 #endif
498 return NULL;
499 }
500
eeh_rmv_device(struct eeh_dev * edev,void * userdata)501 static void eeh_rmv_device(struct eeh_dev *edev, void *userdata)
502 {
503 struct pci_driver *driver;
504 struct pci_dev *dev = eeh_dev_to_pci_dev(edev);
505 struct eeh_rmv_data *rmv_data = (struct eeh_rmv_data *)userdata;
506
507 /*
508 * Actually, we should remove the PCI bridges as well.
509 * However, that's lots of complexity to do that,
510 * particularly some of devices under the bridge might
511 * support EEH. So we just care about PCI devices for
512 * simplicity here.
513 */
514 if (!eeh_edev_actionable(edev) ||
515 (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE))
516 return;
517
518 if (rmv_data) {
519 driver = eeh_pcid_get(dev);
520 if (driver) {
521 if (driver->err_handler &&
522 driver->err_handler->error_detected &&
523 driver->err_handler->slot_reset) {
524 eeh_pcid_put(dev);
525 return;
526 }
527 eeh_pcid_put(dev);
528 }
529 }
530
531 /* Remove it from PCI subsystem */
532 pr_info("EEH: Removing %s without EEH sensitive driver\n",
533 pci_name(dev));
534 edev->mode |= EEH_DEV_DISCONNECTED;
535 if (rmv_data)
536 rmv_data->removed_dev_count++;
537
538 if (edev->physfn) {
539 #ifdef CONFIG_PCI_IOV
540 struct pci_dn *pdn = eeh_dev_to_pdn(edev);
541
542 pci_iov_remove_virtfn(edev->physfn, pdn->vf_index);
543 edev->pdev = NULL;
544 #endif
545 if (rmv_data)
546 list_add(&edev->rmv_entry, &rmv_data->removed_vf_list);
547 } else {
548 pci_lock_rescan_remove();
549 pci_stop_and_remove_bus_device(dev);
550 pci_unlock_rescan_remove();
551 }
552 }
553
eeh_pe_detach_dev(struct eeh_pe * pe,void * userdata)554 static void *eeh_pe_detach_dev(struct eeh_pe *pe, void *userdata)
555 {
556 struct eeh_dev *edev, *tmp;
557
558 eeh_pe_for_each_dev(pe, edev, tmp) {
559 if (!(edev->mode & EEH_DEV_DISCONNECTED))
560 continue;
561
562 edev->mode &= ~(EEH_DEV_DISCONNECTED | EEH_DEV_IRQ_DISABLED);
563 eeh_rmv_from_parent_pe(edev);
564 }
565
566 return NULL;
567 }
568
569 /*
570 * Explicitly clear PE's frozen state for PowerNV where
571 * we have frozen PE until BAR restore is completed. It's
572 * harmless to clear it for pSeries. To be consistent with
573 * PE reset (for 3 times), we try to clear the frozen state
574 * for 3 times as well.
575 */
eeh_clear_pe_frozen_state(struct eeh_pe * root,bool include_passed)576 static int eeh_clear_pe_frozen_state(struct eeh_pe *root, bool include_passed)
577 {
578 struct eeh_pe *pe;
579 int i;
580
581 eeh_for_each_pe(root, pe) {
582 if (include_passed || !eeh_pe_passed(pe)) {
583 for (i = 0; i < 3; i++)
584 if (!eeh_unfreeze_pe(pe))
585 break;
586 if (i >= 3)
587 return -EIO;
588 }
589 }
590 eeh_pe_state_clear(root, EEH_PE_ISOLATED, include_passed);
591 return 0;
592 }
593
eeh_pe_reset_and_recover(struct eeh_pe * pe)594 int eeh_pe_reset_and_recover(struct eeh_pe *pe)
595 {
596 int ret;
597
598 /* Bail if the PE is being recovered */
599 if (pe->state & EEH_PE_RECOVERING)
600 return 0;
601
602 /* Put the PE into recovery mode */
603 eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
604
605 /* Save states */
606 eeh_pe_dev_traverse(pe, eeh_dev_save_state, NULL);
607
608 /* Issue reset */
609 ret = eeh_pe_reset_full(pe, true);
610 if (ret) {
611 eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
612 return ret;
613 }
614
615 /* Unfreeze the PE */
616 ret = eeh_clear_pe_frozen_state(pe, true);
617 if (ret) {
618 eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
619 return ret;
620 }
621
622 /* Restore device state */
623 eeh_pe_dev_traverse(pe, eeh_dev_restore_state, NULL);
624
625 /* Clear recovery mode */
626 eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
627
628 return 0;
629 }
630
631 /**
632 * eeh_reset_device - Perform actual reset of a pci slot
633 * @driver_eeh_aware: Does the device's driver provide EEH support?
634 * @pe: EEH PE
635 * @bus: PCI bus corresponding to the isolcated slot
636 * @rmv_data: Optional, list to record removed devices
637 *
638 * This routine must be called to do reset on the indicated PE.
639 * During the reset, udev might be invoked because those affected
640 * PCI devices will be removed and then added.
641 */
eeh_reset_device(struct eeh_pe * pe,struct pci_bus * bus,struct eeh_rmv_data * rmv_data,bool driver_eeh_aware)642 static int eeh_reset_device(struct eeh_pe *pe, struct pci_bus *bus,
643 struct eeh_rmv_data *rmv_data,
644 bool driver_eeh_aware)
645 {
646 time64_t tstamp;
647 int cnt, rc;
648 struct eeh_dev *edev;
649 struct eeh_pe *tmp_pe;
650 bool any_passed = false;
651
652 eeh_for_each_pe(pe, tmp_pe)
653 any_passed |= eeh_pe_passed(tmp_pe);
654
655 /* pcibios will clear the counter; save the value */
656 cnt = pe->freeze_count;
657 tstamp = pe->tstamp;
658
659 /*
660 * We don't remove the corresponding PE instances because
661 * we need the information afterwords. The attached EEH
662 * devices are expected to be attached soon when calling
663 * into pci_hp_add_devices().
664 */
665 eeh_pe_state_mark(pe, EEH_PE_KEEP);
666 if (any_passed || driver_eeh_aware || (pe->type & EEH_PE_VF)) {
667 eeh_pe_dev_traverse(pe, eeh_rmv_device, rmv_data);
668 } else {
669 pci_lock_rescan_remove();
670 pci_hp_remove_devices(bus);
671 pci_unlock_rescan_remove();
672 }
673
674 /*
675 * Reset the pci controller. (Asserts RST#; resets config space).
676 * Reconfigure bridges and devices. Don't try to bring the system
677 * up if the reset failed for some reason.
678 *
679 * During the reset, it's very dangerous to have uncontrolled PCI
680 * config accesses. So we prefer to block them. However, controlled
681 * PCI config accesses initiated from EEH itself are allowed.
682 */
683 rc = eeh_pe_reset_full(pe, false);
684 if (rc)
685 return rc;
686
687 pci_lock_rescan_remove();
688
689 /* Restore PE */
690 eeh_ops->configure_bridge(pe);
691 eeh_pe_restore_bars(pe);
692
693 /* Clear frozen state */
694 rc = eeh_clear_pe_frozen_state(pe, false);
695 if (rc) {
696 pci_unlock_rescan_remove();
697 return rc;
698 }
699
700 /* Give the system 5 seconds to finish running the user-space
701 * hotplug shutdown scripts, e.g. ifdown for ethernet. Yes,
702 * this is a hack, but if we don't do this, and try to bring
703 * the device up before the scripts have taken it down,
704 * potentially weird things happen.
705 */
706 if (!driver_eeh_aware || rmv_data->removed_dev_count) {
707 pr_info("EEH: Sleep 5s ahead of %s hotplug\n",
708 (driver_eeh_aware ? "partial" : "complete"));
709 ssleep(5);
710
711 /*
712 * The EEH device is still connected with its parent
713 * PE. We should disconnect it so the binding can be
714 * rebuilt when adding PCI devices.
715 */
716 edev = list_first_entry(&pe->edevs, struct eeh_dev, entry);
717 eeh_pe_traverse(pe, eeh_pe_detach_dev, NULL);
718 if (pe->type & EEH_PE_VF) {
719 eeh_add_virt_device(edev);
720 } else {
721 if (!driver_eeh_aware)
722 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
723 pci_hp_add_devices(bus);
724 }
725 }
726 eeh_pe_state_clear(pe, EEH_PE_KEEP, true);
727
728 pe->tstamp = tstamp;
729 pe->freeze_count = cnt;
730
731 pci_unlock_rescan_remove();
732 return 0;
733 }
734
735 /* The longest amount of time to wait for a pci device
736 * to come back on line, in seconds.
737 */
738 #define MAX_WAIT_FOR_RECOVERY 300
739
740
741 /* Walks the PE tree after processing an event to remove any stale PEs.
742 *
743 * NB: This needs to be recursive to ensure the leaf PEs get removed
744 * before their parents do. Although this is possible to do recursively
745 * we don't since this is easier to read and we need to garantee
746 * the leaf nodes will be handled first.
747 */
eeh_pe_cleanup(struct eeh_pe * pe)748 static void eeh_pe_cleanup(struct eeh_pe *pe)
749 {
750 struct eeh_pe *child_pe, *tmp;
751
752 list_for_each_entry_safe(child_pe, tmp, &pe->child_list, child)
753 eeh_pe_cleanup(child_pe);
754
755 if (pe->state & EEH_PE_KEEP)
756 return;
757
758 if (!(pe->state & EEH_PE_INVALID))
759 return;
760
761 if (list_empty(&pe->edevs) && list_empty(&pe->child_list)) {
762 list_del(&pe->child);
763 kfree(pe);
764 }
765 }
766
767 /**
768 * eeh_check_slot_presence - Check if a device is still present in a slot
769 * @pdev: pci_dev to check
770 *
771 * This function may return a false positive if we can't determine the slot's
772 * presence state. This might happen for for PCIe slots if the PE containing
773 * the upstream bridge is also frozen, or the bridge is part of the same PE
774 * as the device.
775 *
776 * This shouldn't happen often, but you might see it if you hotplug a PCIe
777 * switch.
778 */
eeh_slot_presence_check(struct pci_dev * pdev)779 static bool eeh_slot_presence_check(struct pci_dev *pdev)
780 {
781 const struct hotplug_slot_ops *ops;
782 struct pci_slot *slot;
783 u8 state;
784 int rc;
785
786 if (!pdev)
787 return false;
788
789 if (pdev->error_state == pci_channel_io_perm_failure)
790 return false;
791
792 slot = pdev->slot;
793 if (!slot || !slot->hotplug)
794 return true;
795
796 ops = slot->hotplug->ops;
797 if (!ops || !ops->get_adapter_status)
798 return true;
799
800 /* set the attention indicator while we've got the slot ops */
801 if (ops->set_attention_status)
802 ops->set_attention_status(slot->hotplug, 1);
803
804 rc = ops->get_adapter_status(slot->hotplug, &state);
805 if (rc)
806 return true;
807
808 return !!state;
809 }
810
eeh_clear_slot_attention(struct pci_dev * pdev)811 static void eeh_clear_slot_attention(struct pci_dev *pdev)
812 {
813 const struct hotplug_slot_ops *ops;
814 struct pci_slot *slot;
815
816 if (!pdev)
817 return;
818
819 if (pdev->error_state == pci_channel_io_perm_failure)
820 return;
821
822 slot = pdev->slot;
823 if (!slot || !slot->hotplug)
824 return;
825
826 ops = slot->hotplug->ops;
827 if (!ops || !ops->set_attention_status)
828 return;
829
830 ops->set_attention_status(slot->hotplug, 0);
831 }
832
833 /**
834 * eeh_handle_normal_event - Handle EEH events on a specific PE
835 * @pe: EEH PE - which should not be used after we return, as it may
836 * have been invalidated.
837 *
838 * Attempts to recover the given PE. If recovery fails or the PE has failed
839 * too many times, remove the PE.
840 *
841 * While PHB detects address or data parity errors on particular PCI
842 * slot, the associated PE will be frozen. Besides, DMA's occurring
843 * to wild addresses (which usually happen due to bugs in device
844 * drivers or in PCI adapter firmware) can cause EEH error. #SERR,
845 * #PERR or other misc PCI-related errors also can trigger EEH errors.
846 *
847 * Recovery process consists of unplugging the device driver (which
848 * generated hotplug events to userspace), then issuing a PCI #RST to
849 * the device, then reconfiguring the PCI config space for all bridges
850 * & devices under this slot, and then finally restarting the device
851 * drivers (which cause a second set of hotplug events to go out to
852 * userspace).
853 */
eeh_handle_normal_event(struct eeh_pe * pe)854 void eeh_handle_normal_event(struct eeh_pe *pe)
855 {
856 struct pci_bus *bus;
857 struct eeh_dev *edev, *tmp;
858 struct eeh_pe *tmp_pe;
859 int rc = 0;
860 enum pci_ers_result result = PCI_ERS_RESULT_NONE;
861 struct eeh_rmv_data rmv_data =
862 {LIST_HEAD_INIT(rmv_data.removed_vf_list), 0};
863 int devices = 0;
864
865 bus = eeh_pe_bus_get(pe);
866 if (!bus) {
867 pr_err("%s: Cannot find PCI bus for PHB#%x-PE#%x\n",
868 __func__, pe->phb->global_number, pe->addr);
869 return;
870 }
871
872 /*
873 * When devices are hot-removed we might get an EEH due to
874 * a driver attempting to touch the MMIO space of a removed
875 * device. In this case we don't have a device to recover
876 * so suppress the event if we can't find any present devices.
877 *
878 * The hotplug driver should take care of tearing down the
879 * device itself.
880 */
881 eeh_for_each_pe(pe, tmp_pe)
882 eeh_pe_for_each_dev(tmp_pe, edev, tmp)
883 if (eeh_slot_presence_check(edev->pdev))
884 devices++;
885
886 if (!devices) {
887 pr_debug("EEH: Frozen PHB#%x-PE#%x is empty!\n",
888 pe->phb->global_number, pe->addr);
889 goto out; /* nothing to recover */
890 }
891
892 /* Log the event */
893 if (pe->type & EEH_PE_PHB) {
894 pr_err("EEH: Recovering PHB#%x, location: %s\n",
895 pe->phb->global_number, eeh_pe_loc_get(pe));
896 } else {
897 struct eeh_pe *phb_pe = eeh_phb_pe_get(pe->phb);
898
899 pr_err("EEH: Recovering PHB#%x-PE#%x\n",
900 pe->phb->global_number, pe->addr);
901 pr_err("EEH: PE location: %s, PHB location: %s\n",
902 eeh_pe_loc_get(pe), eeh_pe_loc_get(phb_pe));
903 }
904
905 #ifdef CONFIG_STACKTRACE
906 /*
907 * Print the saved stack trace now that we've verified there's
908 * something to recover.
909 */
910 if (pe->trace_entries) {
911 void **ptrs = (void **) pe->stack_trace;
912 int i;
913
914 pr_err("EEH: Frozen PHB#%x-PE#%x detected\n",
915 pe->phb->global_number, pe->addr);
916
917 /* FIXME: Use the same format as dump_stack() */
918 pr_err("EEH: Call Trace:\n");
919 for (i = 0; i < pe->trace_entries; i++)
920 pr_err("EEH: [%pK] %pS\n", ptrs[i], ptrs[i]);
921
922 pe->trace_entries = 0;
923 }
924 #endif /* CONFIG_STACKTRACE */
925
926 eeh_pe_update_time_stamp(pe);
927 pe->freeze_count++;
928 if (pe->freeze_count > eeh_max_freezes) {
929 pr_err("EEH: PHB#%x-PE#%x has failed %d times in the last hour and has been permanently disabled.\n",
930 pe->phb->global_number, pe->addr,
931 pe->freeze_count);
932 result = PCI_ERS_RESULT_DISCONNECT;
933 }
934
935 eeh_for_each_pe(pe, tmp_pe)
936 eeh_pe_for_each_dev(tmp_pe, edev, tmp)
937 edev->mode &= ~EEH_DEV_NO_HANDLER;
938
939 /* Walk the various device drivers attached to this slot through
940 * a reset sequence, giving each an opportunity to do what it needs
941 * to accomplish the reset. Each child gets a report of the
942 * status ... if any child can't handle the reset, then the entire
943 * slot is dlpar removed and added.
944 *
945 * When the PHB is fenced, we have to issue a reset to recover from
946 * the error. Override the result if necessary to have partially
947 * hotplug for this case.
948 */
949 if (result != PCI_ERS_RESULT_DISCONNECT) {
950 pr_warn("EEH: This PCI device has failed %d times in the last hour and will be permanently disabled after %d failures.\n",
951 pe->freeze_count, eeh_max_freezes);
952 pr_info("EEH: Notify device drivers to shutdown\n");
953 eeh_set_channel_state(pe, pci_channel_io_frozen);
954 eeh_set_irq_state(pe, false);
955 eeh_pe_report("error_detected(IO frozen)", pe,
956 eeh_report_error, &result);
957 if ((pe->type & EEH_PE_PHB) &&
958 result != PCI_ERS_RESULT_NONE &&
959 result != PCI_ERS_RESULT_NEED_RESET)
960 result = PCI_ERS_RESULT_NEED_RESET;
961 }
962
963 /* Get the current PCI slot state. This can take a long time,
964 * sometimes over 300 seconds for certain systems.
965 */
966 if (result != PCI_ERS_RESULT_DISCONNECT) {
967 rc = eeh_wait_state(pe, MAX_WAIT_FOR_RECOVERY*1000);
968 if (rc < 0 || rc == EEH_STATE_NOT_SUPPORT) {
969 pr_warn("EEH: Permanent failure\n");
970 result = PCI_ERS_RESULT_DISCONNECT;
971 }
972 }
973
974 /* Since rtas may enable MMIO when posting the error log,
975 * don't post the error log until after all dev drivers
976 * have been informed.
977 */
978 if (result != PCI_ERS_RESULT_DISCONNECT) {
979 pr_info("EEH: Collect temporary log\n");
980 eeh_slot_error_detail(pe, EEH_LOG_TEMP);
981 }
982
983 /* If all device drivers were EEH-unaware, then shut
984 * down all of the device drivers, and hope they
985 * go down willingly, without panicing the system.
986 */
987 if (result == PCI_ERS_RESULT_NONE) {
988 pr_info("EEH: Reset with hotplug activity\n");
989 rc = eeh_reset_device(pe, bus, NULL, false);
990 if (rc) {
991 pr_warn("%s: Unable to reset, err=%d\n",
992 __func__, rc);
993 result = PCI_ERS_RESULT_DISCONNECT;
994 }
995 }
996
997 /* If all devices reported they can proceed, then re-enable MMIO */
998 if (result == PCI_ERS_RESULT_CAN_RECOVER) {
999 pr_info("EEH: Enable I/O for affected devices\n");
1000 rc = eeh_pci_enable(pe, EEH_OPT_THAW_MMIO);
1001
1002 if (rc < 0) {
1003 result = PCI_ERS_RESULT_DISCONNECT;
1004 } else if (rc) {
1005 result = PCI_ERS_RESULT_NEED_RESET;
1006 } else {
1007 pr_info("EEH: Notify device drivers to resume I/O\n");
1008 eeh_pe_report("mmio_enabled", pe,
1009 eeh_report_mmio_enabled, &result);
1010 }
1011 }
1012
1013 /* If all devices reported they can proceed, then re-enable DMA */
1014 if (result == PCI_ERS_RESULT_CAN_RECOVER) {
1015 pr_info("EEH: Enabled DMA for affected devices\n");
1016 rc = eeh_pci_enable(pe, EEH_OPT_THAW_DMA);
1017
1018 if (rc < 0) {
1019 result = PCI_ERS_RESULT_DISCONNECT;
1020 } else if (rc) {
1021 result = PCI_ERS_RESULT_NEED_RESET;
1022 } else {
1023 /*
1024 * We didn't do PE reset for the case. The PE
1025 * is still in frozen state. Clear it before
1026 * resuming the PE.
1027 */
1028 eeh_pe_state_clear(pe, EEH_PE_ISOLATED, true);
1029 result = PCI_ERS_RESULT_RECOVERED;
1030 }
1031 }
1032
1033 /* If any device called out for a reset, then reset the slot */
1034 if (result == PCI_ERS_RESULT_NEED_RESET) {
1035 pr_info("EEH: Reset without hotplug activity\n");
1036 rc = eeh_reset_device(pe, bus, &rmv_data, true);
1037 if (rc) {
1038 pr_warn("%s: Cannot reset, err=%d\n",
1039 __func__, rc);
1040 result = PCI_ERS_RESULT_DISCONNECT;
1041 } else {
1042 result = PCI_ERS_RESULT_NONE;
1043 eeh_set_channel_state(pe, pci_channel_io_normal);
1044 eeh_set_irq_state(pe, true);
1045 eeh_pe_report("slot_reset", pe, eeh_report_reset,
1046 &result);
1047 }
1048 }
1049
1050 if ((result == PCI_ERS_RESULT_RECOVERED) ||
1051 (result == PCI_ERS_RESULT_NONE)) {
1052 /*
1053 * For those hot removed VFs, we should add back them after PF
1054 * get recovered properly.
1055 */
1056 list_for_each_entry_safe(edev, tmp, &rmv_data.removed_vf_list,
1057 rmv_entry) {
1058 eeh_add_virt_device(edev);
1059 list_del(&edev->rmv_entry);
1060 }
1061
1062 /* Tell all device drivers that they can resume operations */
1063 pr_info("EEH: Notify device driver to resume\n");
1064 eeh_set_channel_state(pe, pci_channel_io_normal);
1065 eeh_set_irq_state(pe, true);
1066 eeh_pe_report("resume", pe, eeh_report_resume, NULL);
1067 eeh_for_each_pe(pe, tmp_pe) {
1068 eeh_pe_for_each_dev(tmp_pe, edev, tmp) {
1069 edev->mode &= ~EEH_DEV_NO_HANDLER;
1070 edev->in_error = false;
1071 }
1072 }
1073
1074 pr_info("EEH: Recovery successful.\n");
1075 goto out;
1076 }
1077
1078 /*
1079 * About 90% of all real-life EEH failures in the field
1080 * are due to poorly seated PCI cards. Only 10% or so are
1081 * due to actual, failed cards.
1082 */
1083 pr_err("EEH: Unable to recover from failure from PHB#%x-PE#%x.\n"
1084 "Please try reseating or replacing it\n",
1085 pe->phb->global_number, pe->addr);
1086
1087 eeh_slot_error_detail(pe, EEH_LOG_PERM);
1088
1089 /* Notify all devices that they're about to go down. */
1090 eeh_set_irq_state(pe, false);
1091 eeh_pe_report("error_detected(permanent failure)", pe,
1092 eeh_report_failure, NULL);
1093 eeh_set_channel_state(pe, pci_channel_io_perm_failure);
1094
1095 /* Mark the PE to be removed permanently */
1096 eeh_pe_state_mark(pe, EEH_PE_REMOVED);
1097
1098 /*
1099 * Shut down the device drivers for good. We mark
1100 * all removed devices correctly to avoid access
1101 * the their PCI config any more.
1102 */
1103 if (pe->type & EEH_PE_VF) {
1104 eeh_pe_dev_traverse(pe, eeh_rmv_device, NULL);
1105 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
1106 } else {
1107 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
1108 eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
1109
1110 pci_lock_rescan_remove();
1111 pci_hp_remove_devices(bus);
1112 pci_unlock_rescan_remove();
1113 /* The passed PE should no longer be used */
1114 return;
1115 }
1116
1117 out:
1118 /*
1119 * Clean up any PEs without devices. While marked as EEH_PE_RECOVERYING
1120 * we don't want to modify the PE tree structure so we do it here.
1121 */
1122 eeh_pe_cleanup(pe);
1123
1124 /* clear the slot attention LED for all recovered devices */
1125 eeh_for_each_pe(pe, tmp_pe)
1126 eeh_pe_for_each_dev(tmp_pe, edev, tmp)
1127 eeh_clear_slot_attention(edev->pdev);
1128
1129 eeh_pe_state_clear(pe, EEH_PE_RECOVERING, true);
1130 }
1131
1132 /**
1133 * eeh_handle_special_event - Handle EEH events without a specific failing PE
1134 *
1135 * Called when an EEH event is detected but can't be narrowed down to a
1136 * specific PE. Iterates through possible failures and handles them as
1137 * necessary.
1138 */
eeh_handle_special_event(void)1139 void eeh_handle_special_event(void)
1140 {
1141 struct eeh_pe *pe, *phb_pe, *tmp_pe;
1142 struct eeh_dev *edev, *tmp_edev;
1143 struct pci_bus *bus;
1144 struct pci_controller *hose;
1145 unsigned long flags;
1146 int rc;
1147
1148
1149 do {
1150 rc = eeh_ops->next_error(&pe);
1151
1152 switch (rc) {
1153 case EEH_NEXT_ERR_DEAD_IOC:
1154 /* Mark all PHBs in dead state */
1155 eeh_serialize_lock(&flags);
1156
1157 /* Purge all events */
1158 eeh_remove_event(NULL, true);
1159
1160 list_for_each_entry(hose, &hose_list, list_node) {
1161 phb_pe = eeh_phb_pe_get(hose);
1162 if (!phb_pe) continue;
1163
1164 eeh_pe_mark_isolated(phb_pe);
1165 }
1166
1167 eeh_serialize_unlock(flags);
1168
1169 break;
1170 case EEH_NEXT_ERR_FROZEN_PE:
1171 case EEH_NEXT_ERR_FENCED_PHB:
1172 case EEH_NEXT_ERR_DEAD_PHB:
1173 /* Mark the PE in fenced state */
1174 eeh_serialize_lock(&flags);
1175
1176 /* Purge all events of the PHB */
1177 eeh_remove_event(pe, true);
1178
1179 if (rc != EEH_NEXT_ERR_DEAD_PHB)
1180 eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
1181 eeh_pe_mark_isolated(pe);
1182
1183 eeh_serialize_unlock(flags);
1184
1185 break;
1186 case EEH_NEXT_ERR_NONE:
1187 return;
1188 default:
1189 pr_warn("%s: Invalid value %d from next_error()\n",
1190 __func__, rc);
1191 return;
1192 }
1193
1194 /*
1195 * For fenced PHB and frozen PE, it's handled as normal
1196 * event. We have to remove the affected PHBs for dead
1197 * PHB and IOC
1198 */
1199 if (rc == EEH_NEXT_ERR_FROZEN_PE ||
1200 rc == EEH_NEXT_ERR_FENCED_PHB) {
1201 eeh_pe_state_mark(pe, EEH_PE_RECOVERING);
1202 eeh_handle_normal_event(pe);
1203 } else {
1204 eeh_for_each_pe(pe, tmp_pe)
1205 eeh_pe_for_each_dev(tmp_pe, edev, tmp_edev)
1206 edev->mode &= ~EEH_DEV_NO_HANDLER;
1207
1208 /* Notify all devices to be down */
1209 eeh_pe_state_clear(pe, EEH_PE_PRI_BUS, true);
1210 eeh_pe_report(
1211 "error_detected(permanent failure)", pe,
1212 eeh_report_failure, NULL);
1213 eeh_set_channel_state(pe, pci_channel_io_perm_failure);
1214
1215 pci_lock_rescan_remove();
1216 list_for_each_entry(hose, &hose_list, list_node) {
1217 phb_pe = eeh_phb_pe_get(hose);
1218 if (!phb_pe ||
1219 !(phb_pe->state & EEH_PE_ISOLATED) ||
1220 (phb_pe->state & EEH_PE_RECOVERING))
1221 continue;
1222
1223 bus = eeh_pe_bus_get(phb_pe);
1224 if (!bus) {
1225 pr_err("%s: Cannot find PCI bus for "
1226 "PHB#%x-PE#%x\n",
1227 __func__,
1228 pe->phb->global_number,
1229 pe->addr);
1230 break;
1231 }
1232 pci_hp_remove_devices(bus);
1233 }
1234 pci_unlock_rescan_remove();
1235 }
1236
1237 /*
1238 * If we have detected dead IOC, we needn't proceed
1239 * any more since all PHBs would have been removed
1240 */
1241 if (rc == EEH_NEXT_ERR_DEAD_IOC)
1242 break;
1243 } while (rc != EEH_NEXT_ERR_NONE);
1244 }
1245