1 /*
2 * The file intends to implement the platform dependent EEH operations on pseries.
3 * Actually, the pseries platform is built based on RTAS heavily. That means the
4 * pseries platform dependent EEH operations will be built on RTAS calls. The functions
5 * are devired from arch/powerpc/platforms/pseries/eeh.c and necessary cleanup has
6 * been done.
7 *
8 * Copyright Benjamin Herrenschmidt & Gavin Shan, IBM Corporation 2011.
9 * Copyright IBM Corporation 2001, 2005, 2006
10 * Copyright Dave Engebretsen & Todd Inglett 2001
11 * Copyright Linas Vepstas 2005, 2006
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License as published by
15 * the Free Software Foundation; either version 2 of the License, or
16 * (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; if not, write to the Free Software
25 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
26 */
27
28 #include <linux/atomic.h>
29 #include <linux/delay.h>
30 #include <linux/export.h>
31 #include <linux/init.h>
32 #include <linux/list.h>
33 #include <linux/of.h>
34 #include <linux/pci.h>
35 #include <linux/proc_fs.h>
36 #include <linux/rbtree.h>
37 #include <linux/sched.h>
38 #include <linux/seq_file.h>
39 #include <linux/spinlock.h>
40
41 #include <asm/eeh.h>
42 #include <asm/eeh_event.h>
43 #include <asm/io.h>
44 #include <asm/machdep.h>
45 #include <asm/ppc-pci.h>
46 #include <asm/rtas.h>
47
48 /* RTAS tokens */
49 static int ibm_set_eeh_option;
50 static int ibm_set_slot_reset;
51 static int ibm_read_slot_reset_state;
52 static int ibm_read_slot_reset_state2;
53 static int ibm_slot_error_detail;
54 static int ibm_get_config_addr_info;
55 static int ibm_get_config_addr_info2;
56 static int ibm_configure_bridge;
57 static int ibm_configure_pe;
58
59 /*
60 * Buffer for reporting slot-error-detail rtas calls. Its here
61 * in BSS, and not dynamically alloced, so that it ends up in
62 * RMO where RTAS can access it.
63 */
64 static unsigned char slot_errbuf[RTAS_ERROR_LOG_MAX];
65 static DEFINE_SPINLOCK(slot_errbuf_lock);
66 static int eeh_error_buf_size;
67
68 /**
69 * pseries_eeh_init - EEH platform dependent initialization
70 *
71 * EEH platform dependent initialization on pseries.
72 */
pseries_eeh_init(void)73 static int pseries_eeh_init(void)
74 {
75 /* figure out EEH RTAS function call tokens */
76 ibm_set_eeh_option = rtas_token("ibm,set-eeh-option");
77 ibm_set_slot_reset = rtas_token("ibm,set-slot-reset");
78 ibm_read_slot_reset_state2 = rtas_token("ibm,read-slot-reset-state2");
79 ibm_read_slot_reset_state = rtas_token("ibm,read-slot-reset-state");
80 ibm_slot_error_detail = rtas_token("ibm,slot-error-detail");
81 ibm_get_config_addr_info2 = rtas_token("ibm,get-config-addr-info2");
82 ibm_get_config_addr_info = rtas_token("ibm,get-config-addr-info");
83 ibm_configure_pe = rtas_token("ibm,configure-pe");
84 ibm_configure_bridge = rtas_token ("ibm,configure-bridge");
85
86 /*
87 * Necessary sanity check. We needn't check "get-config-addr-info"
88 * and its variant since the old firmware probably support address
89 * of domain/bus/slot/function for EEH RTAS operations.
90 */
91 if (ibm_set_eeh_option == RTAS_UNKNOWN_SERVICE) {
92 pr_warning("%s: RTAS service <ibm,set-eeh-option> invalid\n",
93 __func__);
94 return -EINVAL;
95 } else if (ibm_set_slot_reset == RTAS_UNKNOWN_SERVICE) {
96 pr_warning("%s: RTAS service <ibm, set-slot-reset> invalid\n",
97 __func__);
98 return -EINVAL;
99 } else if (ibm_read_slot_reset_state2 == RTAS_UNKNOWN_SERVICE &&
100 ibm_read_slot_reset_state == RTAS_UNKNOWN_SERVICE) {
101 pr_warning("%s: RTAS service <ibm,read-slot-reset-state2> and "
102 "<ibm,read-slot-reset-state> invalid\n",
103 __func__);
104 return -EINVAL;
105 } else if (ibm_slot_error_detail == RTAS_UNKNOWN_SERVICE) {
106 pr_warning("%s: RTAS service <ibm,slot-error-detail> invalid\n",
107 __func__);
108 return -EINVAL;
109 } else if (ibm_configure_pe == RTAS_UNKNOWN_SERVICE &&
110 ibm_configure_bridge == RTAS_UNKNOWN_SERVICE) {
111 pr_warning("%s: RTAS service <ibm,configure-pe> and "
112 "<ibm,configure-bridge> invalid\n",
113 __func__);
114 return -EINVAL;
115 }
116
117 /* Initialize error log lock and size */
118 spin_lock_init(&slot_errbuf_lock);
119 eeh_error_buf_size = rtas_token("rtas-error-log-max");
120 if (eeh_error_buf_size == RTAS_UNKNOWN_SERVICE) {
121 pr_warning("%s: unknown EEH error log size\n",
122 __func__);
123 eeh_error_buf_size = 1024;
124 } else if (eeh_error_buf_size > RTAS_ERROR_LOG_MAX) {
125 pr_warning("%s: EEH error log size %d exceeds the maximal %d\n",
126 __func__, eeh_error_buf_size, RTAS_ERROR_LOG_MAX);
127 eeh_error_buf_size = RTAS_ERROR_LOG_MAX;
128 }
129
130 return 0;
131 }
132
133 /**
134 * pseries_eeh_set_option - Initialize EEH or MMIO/DMA reenable
135 * @dn: device node
136 * @option: operation to be issued
137 *
138 * The function is used to control the EEH functionality globally.
139 * Currently, following options are support according to PAPR:
140 * Enable EEH, Disable EEH, Enable MMIO and Enable DMA
141 */
pseries_eeh_set_option(struct device_node * dn,int option)142 static int pseries_eeh_set_option(struct device_node *dn, int option)
143 {
144 int ret = 0;
145 struct eeh_dev *edev;
146 const u32 *reg;
147 int config_addr;
148
149 edev = of_node_to_eeh_dev(dn);
150
151 /*
152 * When we're enabling or disabling EEH functioality on
153 * the particular PE, the PE config address is possibly
154 * unavailable. Therefore, we have to figure it out from
155 * the FDT node.
156 */
157 switch (option) {
158 case EEH_OPT_DISABLE:
159 case EEH_OPT_ENABLE:
160 reg = of_get_property(dn, "reg", NULL);
161 config_addr = reg[0];
162 break;
163
164 case EEH_OPT_THAW_MMIO:
165 case EEH_OPT_THAW_DMA:
166 config_addr = edev->config_addr;
167 if (edev->pe_config_addr)
168 config_addr = edev->pe_config_addr;
169 break;
170
171 default:
172 pr_err("%s: Invalid option %d\n",
173 __func__, option);
174 return -EINVAL;
175 }
176
177 ret = rtas_call(ibm_set_eeh_option, 4, 1, NULL,
178 config_addr, BUID_HI(edev->phb->buid),
179 BUID_LO(edev->phb->buid), option);
180
181 return ret;
182 }
183
184 /**
185 * pseries_eeh_get_pe_addr - Retrieve PE address
186 * @dn: device node
187 *
188 * Retrieve the assocated PE address. Actually, there're 2 RTAS
189 * function calls dedicated for the purpose. We need implement
190 * it through the new function and then the old one. Besides,
191 * you should make sure the config address is figured out from
192 * FDT node before calling the function.
193 *
194 * It's notable that zero'ed return value means invalid PE config
195 * address.
196 */
pseries_eeh_get_pe_addr(struct device_node * dn)197 static int pseries_eeh_get_pe_addr(struct device_node *dn)
198 {
199 struct eeh_dev *edev;
200 int ret = 0;
201 int rets[3];
202
203 edev = of_node_to_eeh_dev(dn);
204
205 if (ibm_get_config_addr_info2 != RTAS_UNKNOWN_SERVICE) {
206 /*
207 * First of all, we need to make sure there has one PE
208 * associated with the device. Otherwise, PE address is
209 * meaningless.
210 */
211 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
212 edev->config_addr, BUID_HI(edev->phb->buid),
213 BUID_LO(edev->phb->buid), 1);
214 if (ret || (rets[0] == 0))
215 return 0;
216
217 /* Retrieve the associated PE config address */
218 ret = rtas_call(ibm_get_config_addr_info2, 4, 2, rets,
219 edev->config_addr, BUID_HI(edev->phb->buid),
220 BUID_LO(edev->phb->buid), 0);
221 if (ret) {
222 pr_warning("%s: Failed to get PE address for %s\n",
223 __func__, dn->full_name);
224 return 0;
225 }
226
227 return rets[0];
228 }
229
230 if (ibm_get_config_addr_info != RTAS_UNKNOWN_SERVICE) {
231 ret = rtas_call(ibm_get_config_addr_info, 4, 2, rets,
232 edev->config_addr, BUID_HI(edev->phb->buid),
233 BUID_LO(edev->phb->buid), 0);
234 if (ret) {
235 pr_warning("%s: Failed to get PE address for %s\n",
236 __func__, dn->full_name);
237 return 0;
238 }
239
240 return rets[0];
241 }
242
243 return ret;
244 }
245
246 /**
247 * pseries_eeh_get_state - Retrieve PE state
248 * @dn: PE associated device node
249 * @state: return value
250 *
251 * Retrieve the state of the specified PE. On RTAS compliant
252 * pseries platform, there already has one dedicated RTAS function
253 * for the purpose. It's notable that the associated PE config address
254 * might be ready when calling the function. Therefore, endeavour to
255 * use the PE config address if possible. Further more, there're 2
256 * RTAS calls for the purpose, we need to try the new one and back
257 * to the old one if the new one couldn't work properly.
258 */
pseries_eeh_get_state(struct device_node * dn,int * state)259 static int pseries_eeh_get_state(struct device_node *dn, int *state)
260 {
261 struct eeh_dev *edev;
262 int config_addr;
263 int ret;
264 int rets[4];
265 int result;
266
267 /* Figure out PE config address if possible */
268 edev = of_node_to_eeh_dev(dn);
269 config_addr = edev->config_addr;
270 if (edev->pe_config_addr)
271 config_addr = edev->pe_config_addr;
272
273 if (ibm_read_slot_reset_state2 != RTAS_UNKNOWN_SERVICE) {
274 ret = rtas_call(ibm_read_slot_reset_state2, 3, 4, rets,
275 config_addr, BUID_HI(edev->phb->buid),
276 BUID_LO(edev->phb->buid));
277 } else if (ibm_read_slot_reset_state != RTAS_UNKNOWN_SERVICE) {
278 /* Fake PE unavailable info */
279 rets[2] = 0;
280 ret = rtas_call(ibm_read_slot_reset_state, 3, 3, rets,
281 config_addr, BUID_HI(edev->phb->buid),
282 BUID_LO(edev->phb->buid));
283 } else {
284 return EEH_STATE_NOT_SUPPORT;
285 }
286
287 if (ret)
288 return ret;
289
290 /* Parse the result out */
291 result = 0;
292 if (rets[1]) {
293 switch(rets[0]) {
294 case 0:
295 result &= ~EEH_STATE_RESET_ACTIVE;
296 result |= EEH_STATE_MMIO_ACTIVE;
297 result |= EEH_STATE_DMA_ACTIVE;
298 break;
299 case 1:
300 result |= EEH_STATE_RESET_ACTIVE;
301 result |= EEH_STATE_MMIO_ACTIVE;
302 result |= EEH_STATE_DMA_ACTIVE;
303 break;
304 case 2:
305 result &= ~EEH_STATE_RESET_ACTIVE;
306 result &= ~EEH_STATE_MMIO_ACTIVE;
307 result &= ~EEH_STATE_DMA_ACTIVE;
308 break;
309 case 4:
310 result &= ~EEH_STATE_RESET_ACTIVE;
311 result &= ~EEH_STATE_MMIO_ACTIVE;
312 result &= ~EEH_STATE_DMA_ACTIVE;
313 result |= EEH_STATE_MMIO_ENABLED;
314 break;
315 case 5:
316 if (rets[2]) {
317 if (state) *state = rets[2];
318 result = EEH_STATE_UNAVAILABLE;
319 } else {
320 result = EEH_STATE_NOT_SUPPORT;
321 }
322 default:
323 result = EEH_STATE_NOT_SUPPORT;
324 }
325 } else {
326 result = EEH_STATE_NOT_SUPPORT;
327 }
328
329 return result;
330 }
331
332 /**
333 * pseries_eeh_reset - Reset the specified PE
334 * @dn: PE associated device node
335 * @option: reset option
336 *
337 * Reset the specified PE
338 */
pseries_eeh_reset(struct device_node * dn,int option)339 static int pseries_eeh_reset(struct device_node *dn, int option)
340 {
341 struct eeh_dev *edev;
342 int config_addr;
343 int ret;
344
345 /* Figure out PE address */
346 edev = of_node_to_eeh_dev(dn);
347 config_addr = edev->config_addr;
348 if (edev->pe_config_addr)
349 config_addr = edev->pe_config_addr;
350
351 /* Reset PE through RTAS call */
352 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
353 config_addr, BUID_HI(edev->phb->buid),
354 BUID_LO(edev->phb->buid), option);
355
356 /* If fundamental-reset not supported, try hot-reset */
357 if (option == EEH_RESET_FUNDAMENTAL &&
358 ret == -8) {
359 ret = rtas_call(ibm_set_slot_reset, 4, 1, NULL,
360 config_addr, BUID_HI(edev->phb->buid),
361 BUID_LO(edev->phb->buid), EEH_RESET_HOT);
362 }
363
364 return ret;
365 }
366
367 /**
368 * pseries_eeh_wait_state - Wait for PE state
369 * @dn: PE associated device node
370 * @max_wait: maximal period in microsecond
371 *
372 * Wait for the state of associated PE. It might take some time
373 * to retrieve the PE's state.
374 */
pseries_eeh_wait_state(struct device_node * dn,int max_wait)375 static int pseries_eeh_wait_state(struct device_node *dn, int max_wait)
376 {
377 int ret;
378 int mwait;
379
380 /*
381 * According to PAPR, the state of PE might be temporarily
382 * unavailable. Under the circumstance, we have to wait
383 * for indicated time determined by firmware. The maximal
384 * wait time is 5 minutes, which is acquired from the original
385 * EEH implementation. Also, the original implementation
386 * also defined the minimal wait time as 1 second.
387 */
388 #define EEH_STATE_MIN_WAIT_TIME (1000)
389 #define EEH_STATE_MAX_WAIT_TIME (300 * 1000)
390
391 while (1) {
392 ret = pseries_eeh_get_state(dn, &mwait);
393
394 /*
395 * If the PE's state is temporarily unavailable,
396 * we have to wait for the specified time. Otherwise,
397 * the PE's state will be returned immediately.
398 */
399 if (ret != EEH_STATE_UNAVAILABLE)
400 return ret;
401
402 if (max_wait <= 0) {
403 pr_warning("%s: Timeout when getting PE's state (%d)\n",
404 __func__, max_wait);
405 return EEH_STATE_NOT_SUPPORT;
406 }
407
408 if (mwait <= 0) {
409 pr_warning("%s: Firmware returned bad wait value %d\n",
410 __func__, mwait);
411 mwait = EEH_STATE_MIN_WAIT_TIME;
412 } else if (mwait > EEH_STATE_MAX_WAIT_TIME) {
413 pr_warning("%s: Firmware returned too long wait value %d\n",
414 __func__, mwait);
415 mwait = EEH_STATE_MAX_WAIT_TIME;
416 }
417
418 max_wait -= mwait;
419 msleep(mwait);
420 }
421
422 return EEH_STATE_NOT_SUPPORT;
423 }
424
425 /**
426 * pseries_eeh_get_log - Retrieve error log
427 * @dn: device node
428 * @severity: temporary or permanent error log
429 * @drv_log: driver log to be combined with retrieved error log
430 * @len: length of driver log
431 *
432 * Retrieve the temporary or permanent error from the PE.
433 * Actually, the error will be retrieved through the dedicated
434 * RTAS call.
435 */
pseries_eeh_get_log(struct device_node * dn,int severity,char * drv_log,unsigned long len)436 static int pseries_eeh_get_log(struct device_node *dn, int severity, char *drv_log, unsigned long len)
437 {
438 struct eeh_dev *edev;
439 int config_addr;
440 unsigned long flags;
441 int ret;
442
443 edev = of_node_to_eeh_dev(dn);
444 spin_lock_irqsave(&slot_errbuf_lock, flags);
445 memset(slot_errbuf, 0, eeh_error_buf_size);
446
447 /* Figure out the PE address */
448 config_addr = edev->config_addr;
449 if (edev->pe_config_addr)
450 config_addr = edev->pe_config_addr;
451
452 ret = rtas_call(ibm_slot_error_detail, 8, 1, NULL, config_addr,
453 BUID_HI(edev->phb->buid), BUID_LO(edev->phb->buid),
454 virt_to_phys(drv_log), len,
455 virt_to_phys(slot_errbuf), eeh_error_buf_size,
456 severity);
457 if (!ret)
458 log_error(slot_errbuf, ERR_TYPE_RTAS_LOG, 0);
459 spin_unlock_irqrestore(&slot_errbuf_lock, flags);
460
461 return ret;
462 }
463
464 /**
465 * pseries_eeh_configure_bridge - Configure PCI bridges in the indicated PE
466 * @dn: PE associated device node
467 *
468 * The function will be called to reconfigure the bridges included
469 * in the specified PE so that the mulfunctional PE would be recovered
470 * again.
471 */
pseries_eeh_configure_bridge(struct device_node * dn)472 static int pseries_eeh_configure_bridge(struct device_node *dn)
473 {
474 struct eeh_dev *edev;
475 int config_addr;
476 int ret;
477
478 /* Figure out the PE address */
479 edev = of_node_to_eeh_dev(dn);
480 config_addr = edev->config_addr;
481 if (edev->pe_config_addr)
482 config_addr = edev->pe_config_addr;
483
484 /* Use new configure-pe function, if supported */
485 if (ibm_configure_pe != RTAS_UNKNOWN_SERVICE) {
486 ret = rtas_call(ibm_configure_pe, 3, 1, NULL,
487 config_addr, BUID_HI(edev->phb->buid),
488 BUID_LO(edev->phb->buid));
489 } else if (ibm_configure_bridge != RTAS_UNKNOWN_SERVICE) {
490 ret = rtas_call(ibm_configure_bridge, 3, 1, NULL,
491 config_addr, BUID_HI(edev->phb->buid),
492 BUID_LO(edev->phb->buid));
493 } else {
494 return -EFAULT;
495 }
496
497 if (ret)
498 pr_warning("%s: Unable to configure bridge %d for %s\n",
499 __func__, ret, dn->full_name);
500
501 return ret;
502 }
503
504 /**
505 * pseries_eeh_read_config - Read PCI config space
506 * @dn: device node
507 * @where: PCI address
508 * @size: size to read
509 * @val: return value
510 *
511 * Read config space from the speicifed device
512 */
pseries_eeh_read_config(struct device_node * dn,int where,int size,u32 * val)513 static int pseries_eeh_read_config(struct device_node *dn, int where, int size, u32 *val)
514 {
515 struct pci_dn *pdn;
516
517 pdn = PCI_DN(dn);
518
519 return rtas_read_config(pdn, where, size, val);
520 }
521
522 /**
523 * pseries_eeh_write_config - Write PCI config space
524 * @dn: device node
525 * @where: PCI address
526 * @size: size to write
527 * @val: value to be written
528 *
529 * Write config space to the specified device
530 */
pseries_eeh_write_config(struct device_node * dn,int where,int size,u32 val)531 static int pseries_eeh_write_config(struct device_node *dn, int where, int size, u32 val)
532 {
533 struct pci_dn *pdn;
534
535 pdn = PCI_DN(dn);
536
537 return rtas_write_config(pdn, where, size, val);
538 }
539
540 static struct eeh_ops pseries_eeh_ops = {
541 .name = "pseries",
542 .init = pseries_eeh_init,
543 .set_option = pseries_eeh_set_option,
544 .get_pe_addr = pseries_eeh_get_pe_addr,
545 .get_state = pseries_eeh_get_state,
546 .reset = pseries_eeh_reset,
547 .wait_state = pseries_eeh_wait_state,
548 .get_log = pseries_eeh_get_log,
549 .configure_bridge = pseries_eeh_configure_bridge,
550 .read_config = pseries_eeh_read_config,
551 .write_config = pseries_eeh_write_config
552 };
553
554 /**
555 * eeh_pseries_init - Register platform dependent EEH operations
556 *
557 * EEH initialization on pseries platform. This function should be
558 * called before any EEH related functions.
559 */
eeh_pseries_init(void)560 int __init eeh_pseries_init(void)
561 {
562 return eeh_ops_register(&pseries_eeh_ops);
563 }
564