1 /*
2 * The file intends to implement the platform dependent EEH operations on
3 * powernv platform. Actually, the powernv was created in order to fully
4 * hypervisor support.
5 *
6 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2013.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12 */
13
14 #include <linux/atomic.h>
15 #include <linux/delay.h>
16 #include <linux/export.h>
17 #include <linux/init.h>
18 #include <linux/list.h>
19 #include <linux/msi.h>
20 #include <linux/of.h>
21 #include <linux/pci.h>
22 #include <linux/proc_fs.h>
23 #include <linux/rbtree.h>
24 #include <linux/sched.h>
25 #include <linux/seq_file.h>
26 #include <linux/spinlock.h>
27
28 #include <asm/eeh.h>
29 #include <asm/eeh_event.h>
30 #include <asm/firmware.h>
31 #include <asm/io.h>
32 #include <asm/iommu.h>
33 #include <asm/machdep.h>
34 #include <asm/msi_bitmap.h>
35 #include <asm/opal.h>
36 #include <asm/ppc-pci.h>
37
38 #include "powernv.h"
39 #include "pci.h"
40
41 /**
42 * pnv_eeh_init - EEH platform dependent initialization
43 *
44 * EEH platform dependent initialization on powernv
45 */
pnv_eeh_init(void)46 static int pnv_eeh_init(void)
47 {
48 struct pci_controller *hose;
49 struct pnv_phb *phb;
50
51 /* We require OPALv3 */
52 if (!firmware_has_feature(FW_FEATURE_OPALv3)) {
53 pr_warn("%s: OPALv3 is required !\n",
54 __func__);
55 return -EINVAL;
56 }
57
58 /* Set probe mode */
59 eeh_add_flag(EEH_PROBE_MODE_DEV);
60
61 /*
62 * P7IOC blocks PCI config access to frozen PE, but PHB3
63 * doesn't do that. So we have to selectively enable I/O
64 * prior to collecting error log.
65 */
66 list_for_each_entry(hose, &hose_list, list_node) {
67 phb = hose->private_data;
68
69 if (phb->model == PNV_PHB_MODEL_P7IOC)
70 eeh_add_flag(EEH_ENABLE_IO_FOR_LOG);
71 break;
72 }
73
74 return 0;
75 }
76
77 /**
78 * pnv_eeh_post_init - EEH platform dependent post initialization
79 *
80 * EEH platform dependent post initialization on powernv. When
81 * the function is called, the EEH PEs and devices should have
82 * been built. If the I/O cache staff has been built, EEH is
83 * ready to supply service.
84 */
pnv_eeh_post_init(void)85 static int pnv_eeh_post_init(void)
86 {
87 struct pci_controller *hose;
88 struct pnv_phb *phb;
89 int ret = 0;
90
91 list_for_each_entry(hose, &hose_list, list_node) {
92 phb = hose->private_data;
93
94 if (phb->eeh_ops && phb->eeh_ops->post_init) {
95 ret = phb->eeh_ops->post_init(hose);
96 if (ret)
97 break;
98 }
99 }
100
101 return ret;
102 }
103
104 /**
105 * pnv_eeh_dev_probe - Do probe on PCI device
106 * @dev: PCI device
107 * @flag: unused
108 *
109 * When EEH module is installed during system boot, all PCI devices
110 * are checked one by one to see if it supports EEH. The function
111 * is introduced for the purpose. By default, EEH has been enabled
112 * on all PCI devices. That's to say, we only need do necessary
113 * initialization on the corresponding eeh device and create PE
114 * accordingly.
115 *
116 * It's notable that's unsafe to retrieve the EEH device through
117 * the corresponding PCI device. During the PCI device hotplug, which
118 * was possiblly triggered by EEH core, the binding between EEH device
119 * and the PCI device isn't built yet.
120 */
pnv_eeh_dev_probe(struct pci_dev * dev,void * flag)121 static int pnv_eeh_dev_probe(struct pci_dev *dev, void *flag)
122 {
123 struct pci_controller *hose = pci_bus_to_host(dev->bus);
124 struct pnv_phb *phb = hose->private_data;
125 struct device_node *dn = pci_device_to_OF_node(dev);
126 struct eeh_dev *edev = of_node_to_eeh_dev(dn);
127 int ret;
128
129 /*
130 * When probing the root bridge, which doesn't have any
131 * subordinate PCI devices. We don't have OF node for
132 * the root bridge. So it's not reasonable to continue
133 * the probing.
134 */
135 if (!dn || !edev || edev->pe)
136 return 0;
137
138 /* Skip for PCI-ISA bridge */
139 if ((dev->class >> 8) == PCI_CLASS_BRIDGE_ISA)
140 return 0;
141
142 /* Initialize eeh device */
143 edev->class_code = dev->class;
144 edev->mode &= 0xFFFFFF00;
145 if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE)
146 edev->mode |= EEH_DEV_BRIDGE;
147 edev->pcix_cap = pci_find_capability(dev, PCI_CAP_ID_PCIX);
148 if (pci_is_pcie(dev)) {
149 edev->pcie_cap = pci_pcie_cap(dev);
150
151 if (pci_pcie_type(dev) == PCI_EXP_TYPE_ROOT_PORT)
152 edev->mode |= EEH_DEV_ROOT_PORT;
153 else if (pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM)
154 edev->mode |= EEH_DEV_DS_PORT;
155
156 edev->aer_cap = pci_find_ext_capability(dev,
157 PCI_EXT_CAP_ID_ERR);
158 }
159
160 edev->config_addr = ((dev->bus->number << 8) | dev->devfn);
161 edev->pe_config_addr = phb->bdfn_to_pe(phb, dev->bus, dev->devfn & 0xff);
162
163 /* Create PE */
164 ret = eeh_add_to_parent_pe(edev);
165 if (ret) {
166 pr_warn("%s: Can't add PCI dev %s to parent PE (%d)\n",
167 __func__, pci_name(dev), ret);
168 return ret;
169 }
170
171 /*
172 * If the PE contains any one of following adapters, the
173 * PCI config space can't be accessed when dumping EEH log.
174 * Otherwise, we will run into fenced PHB caused by shortage
175 * of outbound credits in the adapter. The PCI config access
176 * should be blocked until PE reset. MMIO access is dropped
177 * by hardware certainly. In order to drop PCI config requests,
178 * one more flag (EEH_PE_CFG_RESTRICTED) is introduced, which
179 * will be checked in the backend for PE state retrival. If
180 * the PE becomes frozen for the first time and the flag has
181 * been set for the PE, we will set EEH_PE_CFG_BLOCKED for
182 * that PE to block its config space.
183 *
184 * Broadcom Austin 4-ports NICs (14e4:1657)
185 * Broadcom Shiner 2-ports 10G NICs (14e4:168e)
186 */
187 if ((dev->vendor == PCI_VENDOR_ID_BROADCOM && dev->device == 0x1657) ||
188 (dev->vendor == PCI_VENDOR_ID_BROADCOM && dev->device == 0x168e))
189 edev->pe->state |= EEH_PE_CFG_RESTRICTED;
190
191 /*
192 * Cache the PE primary bus, which can't be fetched when
193 * full hotplug is in progress. In that case, all child
194 * PCI devices of the PE are expected to be removed prior
195 * to PE reset.
196 */
197 if (!edev->pe->bus)
198 edev->pe->bus = dev->bus;
199
200 /*
201 * Enable EEH explicitly so that we will do EEH check
202 * while accessing I/O stuff
203 */
204 eeh_add_flag(EEH_ENABLED);
205
206 /* Save memory bars */
207 eeh_save_bars(edev);
208
209 return 0;
210 }
211
212 /**
213 * pnv_eeh_set_option - Initialize EEH or MMIO/DMA reenable
214 * @pe: EEH PE
215 * @option: operation to be issued
216 *
217 * The function is used to control the EEH functionality globally.
218 * Currently, following options are support according to PAPR:
219 * Enable EEH, Disable EEH, Enable MMIO and Enable DMA
220 */
pnv_eeh_set_option(struct eeh_pe * pe,int option)221 static int pnv_eeh_set_option(struct eeh_pe *pe, int option)
222 {
223 struct pci_controller *hose = pe->phb;
224 struct pnv_phb *phb = hose->private_data;
225 int ret = -EEXIST;
226
227 /*
228 * What we need do is pass it down for hardware
229 * implementation to handle it.
230 */
231 if (phb->eeh_ops && phb->eeh_ops->set_option)
232 ret = phb->eeh_ops->set_option(pe, option);
233
234 return ret;
235 }
236
237 /**
238 * pnv_eeh_get_pe_addr - Retrieve PE address
239 * @pe: EEH PE
240 *
241 * Retrieve the PE address according to the given tranditional
242 * PCI BDF (Bus/Device/Function) address.
243 */
pnv_eeh_get_pe_addr(struct eeh_pe * pe)244 static int pnv_eeh_get_pe_addr(struct eeh_pe *pe)
245 {
246 return pe->addr;
247 }
248
249 /**
250 * pnv_eeh_get_state - Retrieve PE state
251 * @pe: EEH PE
252 * @delay: delay while PE state is temporarily unavailable
253 *
254 * Retrieve the state of the specified PE. For IODA-compitable
255 * platform, it should be retrieved from IODA table. Therefore,
256 * we prefer passing down to hardware implementation to handle
257 * it.
258 */
pnv_eeh_get_state(struct eeh_pe * pe,int * delay)259 static int pnv_eeh_get_state(struct eeh_pe *pe, int *delay)
260 {
261 struct pci_controller *hose = pe->phb;
262 struct pnv_phb *phb = hose->private_data;
263 int ret = EEH_STATE_NOT_SUPPORT;
264
265 if (phb->eeh_ops && phb->eeh_ops->get_state) {
266 ret = phb->eeh_ops->get_state(pe);
267
268 /*
269 * If the PE state is temporarily unavailable,
270 * to inform the EEH core delay for default
271 * period (1 second)
272 */
273 if (delay) {
274 *delay = 0;
275 if (ret & EEH_STATE_UNAVAILABLE)
276 *delay = 1000;
277 }
278 }
279
280 return ret;
281 }
282
283 /**
284 * pnv_eeh_reset - Reset the specified PE
285 * @pe: EEH PE
286 * @option: reset option
287 *
288 * Reset the specified PE
289 */
pnv_eeh_reset(struct eeh_pe * pe,int option)290 static int pnv_eeh_reset(struct eeh_pe *pe, int option)
291 {
292 struct pci_controller *hose = pe->phb;
293 struct pnv_phb *phb = hose->private_data;
294 int ret = -EEXIST;
295
296 if (phb->eeh_ops && phb->eeh_ops->reset)
297 ret = phb->eeh_ops->reset(pe, option);
298
299 return ret;
300 }
301
302 /**
303 * pnv_eeh_wait_state - Wait for PE state
304 * @pe: EEH PE
305 * @max_wait: maximal period in microsecond
306 *
307 * Wait for the state of associated PE. It might take some time
308 * to retrieve the PE's state.
309 */
pnv_eeh_wait_state(struct eeh_pe * pe,int max_wait)310 static int pnv_eeh_wait_state(struct eeh_pe *pe, int max_wait)
311 {
312 int ret;
313 int mwait;
314
315 while (1) {
316 ret = pnv_eeh_get_state(pe, &mwait);
317
318 /*
319 * If the PE's state is temporarily unavailable,
320 * we have to wait for the specified time. Otherwise,
321 * the PE's state will be returned immediately.
322 */
323 if (ret != EEH_STATE_UNAVAILABLE)
324 return ret;
325
326 max_wait -= mwait;
327 if (max_wait <= 0) {
328 pr_warn("%s: Timeout getting PE#%x's state (%d)\n",
329 __func__, pe->addr, max_wait);
330 return EEH_STATE_NOT_SUPPORT;
331 }
332
333 msleep(mwait);
334 }
335
336 return EEH_STATE_NOT_SUPPORT;
337 }
338
339 /**
340 * pnv_eeh_get_log - Retrieve error log
341 * @pe: EEH PE
342 * @severity: temporary or permanent error log
343 * @drv_log: driver log to be combined with retrieved error log
344 * @len: length of driver log
345 *
346 * Retrieve the temporary or permanent error from the PE.
347 */
pnv_eeh_get_log(struct eeh_pe * pe,int severity,char * drv_log,unsigned long len)348 static int pnv_eeh_get_log(struct eeh_pe *pe, int severity,
349 char *drv_log, unsigned long len)
350 {
351 struct pci_controller *hose = pe->phb;
352 struct pnv_phb *phb = hose->private_data;
353 int ret = -EEXIST;
354
355 if (phb->eeh_ops && phb->eeh_ops->get_log)
356 ret = phb->eeh_ops->get_log(pe, severity, drv_log, len);
357
358 return ret;
359 }
360
361 /**
362 * pnv_eeh_configure_bridge - Configure PCI bridges in the indicated PE
363 * @pe: EEH PE
364 *
365 * The function will be called to reconfigure the bridges included
366 * in the specified PE so that the mulfunctional PE would be recovered
367 * again.
368 */
pnv_eeh_configure_bridge(struct eeh_pe * pe)369 static int pnv_eeh_configure_bridge(struct eeh_pe *pe)
370 {
371 struct pci_controller *hose = pe->phb;
372 struct pnv_phb *phb = hose->private_data;
373 int ret = 0;
374
375 if (phb->eeh_ops && phb->eeh_ops->configure_bridge)
376 ret = phb->eeh_ops->configure_bridge(pe);
377
378 return ret;
379 }
380
381 /**
382 * pnv_pe_err_inject - Inject specified error to the indicated PE
383 * @pe: the indicated PE
384 * @type: error type
385 * @func: specific error type
386 * @addr: address
387 * @mask: address mask
388 *
389 * The routine is called to inject specified error, which is
390 * determined by @type and @func, to the indicated PE for
391 * testing purpose.
392 */
pnv_eeh_err_inject(struct eeh_pe * pe,int type,int func,unsigned long addr,unsigned long mask)393 static int pnv_eeh_err_inject(struct eeh_pe *pe, int type, int func,
394 unsigned long addr, unsigned long mask)
395 {
396 struct pci_controller *hose = pe->phb;
397 struct pnv_phb *phb = hose->private_data;
398 int ret = -EEXIST;
399
400 if (phb->eeh_ops && phb->eeh_ops->err_inject)
401 ret = phb->eeh_ops->err_inject(pe, type, func, addr, mask);
402
403 return ret;
404 }
405
pnv_eeh_cfg_blocked(struct device_node * dn)406 static inline bool pnv_eeh_cfg_blocked(struct device_node *dn)
407 {
408 struct eeh_dev *edev = of_node_to_eeh_dev(dn);
409
410 if (!edev || !edev->pe)
411 return false;
412
413 if (edev->pe->state & EEH_PE_CFG_BLOCKED)
414 return true;
415
416 return false;
417 }
418
pnv_eeh_read_config(struct device_node * dn,int where,int size,u32 * val)419 static int pnv_eeh_read_config(struct device_node *dn,
420 int where, int size, u32 *val)
421 {
422 if (pnv_eeh_cfg_blocked(dn)) {
423 *val = 0xFFFFFFFF;
424 return PCIBIOS_SET_FAILED;
425 }
426
427 return pnv_pci_cfg_read(dn, where, size, val);
428 }
429
pnv_eeh_write_config(struct device_node * dn,int where,int size,u32 val)430 static int pnv_eeh_write_config(struct device_node *dn,
431 int where, int size, u32 val)
432 {
433 if (pnv_eeh_cfg_blocked(dn))
434 return PCIBIOS_SET_FAILED;
435
436 return pnv_pci_cfg_write(dn, where, size, val);
437 }
438
439 /**
440 * pnv_eeh_next_error - Retrieve next EEH error to handle
441 * @pe: Affected PE
442 *
443 * Using OPAL API, to retrieve next EEH error for EEH core to handle
444 */
pnv_eeh_next_error(struct eeh_pe ** pe)445 static int pnv_eeh_next_error(struct eeh_pe **pe)
446 {
447 struct pci_controller *hose;
448 struct pnv_phb *phb = NULL;
449
450 list_for_each_entry(hose, &hose_list, list_node) {
451 phb = hose->private_data;
452 break;
453 }
454
455 if (phb && phb->eeh_ops->next_error)
456 return phb->eeh_ops->next_error(pe);
457
458 return -EEXIST;
459 }
460
pnv_eeh_restore_config(struct device_node * dn)461 static int pnv_eeh_restore_config(struct device_node *dn)
462 {
463 struct eeh_dev *edev = of_node_to_eeh_dev(dn);
464 struct pnv_phb *phb;
465 s64 ret;
466
467 if (!edev)
468 return -EEXIST;
469
470 phb = edev->phb->private_data;
471 ret = opal_pci_reinit(phb->opal_id,
472 OPAL_REINIT_PCI_DEV, edev->config_addr);
473 if (ret) {
474 pr_warn("%s: Can't reinit PCI dev 0x%x (%lld)\n",
475 __func__, edev->config_addr, ret);
476 return -EIO;
477 }
478
479 return 0;
480 }
481
482 static struct eeh_ops pnv_eeh_ops = {
483 .name = "powernv",
484 .init = pnv_eeh_init,
485 .post_init = pnv_eeh_post_init,
486 .of_probe = NULL,
487 .dev_probe = pnv_eeh_dev_probe,
488 .set_option = pnv_eeh_set_option,
489 .get_pe_addr = pnv_eeh_get_pe_addr,
490 .get_state = pnv_eeh_get_state,
491 .reset = pnv_eeh_reset,
492 .wait_state = pnv_eeh_wait_state,
493 .get_log = pnv_eeh_get_log,
494 .configure_bridge = pnv_eeh_configure_bridge,
495 .err_inject = pnv_eeh_err_inject,
496 .read_config = pnv_eeh_read_config,
497 .write_config = pnv_eeh_write_config,
498 .next_error = pnv_eeh_next_error,
499 .restore_config = pnv_eeh_restore_config
500 };
501
502 /**
503 * eeh_powernv_init - Register platform dependent EEH operations
504 *
505 * EEH initialization on powernv platform. This function should be
506 * called before any EEH related functions.
507 */
eeh_powernv_init(void)508 static int __init eeh_powernv_init(void)
509 {
510 int ret = -EINVAL;
511
512 eeh_set_pe_aux_size(PNV_PCI_DIAG_BUF_SIZE);
513 ret = eeh_ops_register(&pnv_eeh_ops);
514 if (!ret)
515 pr_info("EEH: PowerNV platform initialized\n");
516 else
517 pr_info("EEH: Failed to initialize PowerNV platform (%d)\n", ret);
518
519 return ret;
520 }
521 machine_early_initcall(powernv, eeh_powernv_init);
522