• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * drivers/pci/pcie/aer/aerdrv_core.c
3  *
4  * This file is subject to the terms and conditions of the GNU General Public
5  * License.  See the file "COPYING" in the main directory of this archive
6  * for more details.
7  *
8  * This file implements the core part of PCI-Express AER. When an pci-express
9  * error is delivered, an error message will be collected and printed to
10  * console, then, an error recovery procedure will be executed by following
11  * the pci error recovery rules.
12  *
13  * Copyright (C) 2006 Intel Corp.
14  *	Tom Long Nguyen (tom.l.nguyen@intel.com)
15  *	Zhang Yanmin (yanmin.zhang@intel.com)
16  *
17  */
18 
19 #include <linux/module.h>
20 #include <linux/pci.h>
21 #include <linux/kernel.h>
22 #include <linux/errno.h>
23 #include <linux/pm.h>
24 #include <linux/suspend.h>
25 #include <linux/delay.h>
26 #include <linux/slab.h>
27 #include <linux/kfifo.h>
28 #include "aerdrv.h"
29 
30 static bool forceload;
31 static bool nosourceid;
32 module_param(forceload, bool, 0);
33 module_param(nosourceid, bool, 0);
34 
35 #define	PCI_EXP_AER_FLAGS	(PCI_EXP_DEVCTL_CERE | PCI_EXP_DEVCTL_NFERE | \
36 				 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE)
37 
pci_enable_pcie_error_reporting(struct pci_dev * dev)38 int pci_enable_pcie_error_reporting(struct pci_dev *dev)
39 {
40 	if (pcie_aer_get_firmware_first(dev))
41 		return -EIO;
42 
43 	if (!pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR))
44 		return -EIO;
45 
46 	return pcie_capability_set_word(dev, PCI_EXP_DEVCTL, PCI_EXP_AER_FLAGS);
47 }
48 EXPORT_SYMBOL_GPL(pci_enable_pcie_error_reporting);
49 
pci_disable_pcie_error_reporting(struct pci_dev * dev)50 int pci_disable_pcie_error_reporting(struct pci_dev *dev)
51 {
52 	if (pcie_aer_get_firmware_first(dev))
53 		return -EIO;
54 
55 	return pcie_capability_clear_word(dev, PCI_EXP_DEVCTL,
56 					  PCI_EXP_AER_FLAGS);
57 }
58 EXPORT_SYMBOL_GPL(pci_disable_pcie_error_reporting);
59 
pci_cleanup_aer_uncorrect_error_status(struct pci_dev * dev)60 int pci_cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
61 {
62 	int pos;
63 	u32 status;
64 
65 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
66 	if (!pos)
67 		return -EIO;
68 
69 	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
70 	if (status)
71 		pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
72 
73 	return 0;
74 }
75 EXPORT_SYMBOL_GPL(pci_cleanup_aer_uncorrect_error_status);
76 
77 /**
78  * add_error_device - list device to be handled
79  * @e_info: pointer to error info
80  * @dev: pointer to pci_dev to be added
81  */
add_error_device(struct aer_err_info * e_info,struct pci_dev * dev)82 static int add_error_device(struct aer_err_info *e_info, struct pci_dev *dev)
83 {
84 	if (e_info->error_dev_num < AER_MAX_MULTI_ERR_DEVICES) {
85 		e_info->dev[e_info->error_dev_num] = dev;
86 		e_info->error_dev_num++;
87 		return 0;
88 	}
89 	return -ENOSPC;
90 }
91 
92 /**
93  * is_error_source - check whether the device is source of reported error
94  * @dev: pointer to pci_dev to be checked
95  * @e_info: pointer to reported error info
96  */
is_error_source(struct pci_dev * dev,struct aer_err_info * e_info)97 static bool is_error_source(struct pci_dev *dev, struct aer_err_info *e_info)
98 {
99 	int pos;
100 	u32 status, mask;
101 	u16 reg16;
102 
103 	/*
104 	 * When bus id is equal to 0, it might be a bad id
105 	 * reported by root port.
106 	 */
107 	if (!nosourceid && (PCI_BUS_NUM(e_info->id) != 0)) {
108 		/* Device ID match? */
109 		if (e_info->id == ((dev->bus->number << 8) | dev->devfn))
110 			return true;
111 
112 		/* Continue id comparing if there is no multiple error */
113 		if (!e_info->multi_error_valid)
114 			return false;
115 	}
116 
117 	/*
118 	 * When either
119 	 *      1) nosourceid==y;
120 	 *      2) bus id is equal to 0. Some ports might lose the bus
121 	 *              id of error source id;
122 	 *      3) There are multiple errors and prior id comparing fails;
123 	 * We check AER status registers to find possible reporter.
124 	 */
125 	if (atomic_read(&dev->enable_cnt) == 0)
126 		return false;
127 
128 	/* Check if AER is enabled */
129 	pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &reg16);
130 	if (!(reg16 & PCI_EXP_AER_FLAGS))
131 		return false;
132 
133 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
134 	if (!pos)
135 		return false;
136 
137 	/* Check if error is recorded */
138 	if (e_info->severity == AER_CORRECTABLE) {
139 		pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS, &status);
140 		pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &mask);
141 	} else {
142 		pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
143 		pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &mask);
144 	}
145 	if (status & ~mask)
146 		return true;
147 
148 	return false;
149 }
150 
find_device_iter(struct pci_dev * dev,void * data)151 static int find_device_iter(struct pci_dev *dev, void *data)
152 {
153 	struct aer_err_info *e_info = (struct aer_err_info *)data;
154 
155 	if (is_error_source(dev, e_info)) {
156 		/* List this device */
157 		if (add_error_device(e_info, dev)) {
158 			/* We cannot handle more... Stop iteration */
159 			/* TODO: Should print error message here? */
160 			return 1;
161 		}
162 
163 		/* If there is only a single error, stop iteration */
164 		if (!e_info->multi_error_valid)
165 			return 1;
166 	}
167 	return 0;
168 }
169 
170 /**
171  * find_source_device - search through device hierarchy for source device
172  * @parent: pointer to Root Port pci_dev data structure
173  * @e_info: including detailed error information such like id
174  *
175  * Return true if found.
176  *
177  * Invoked by DPC when error is detected at the Root Port.
178  * Caller of this function must set id, severity, and multi_error_valid of
179  * struct aer_err_info pointed by @e_info properly.  This function must fill
180  * e_info->error_dev_num and e_info->dev[], based on the given information.
181  */
find_source_device(struct pci_dev * parent,struct aer_err_info * e_info)182 static bool find_source_device(struct pci_dev *parent,
183 		struct aer_err_info *e_info)
184 {
185 	struct pci_dev *dev = parent;
186 	int result;
187 
188 	/* Must reset in this function */
189 	e_info->error_dev_num = 0;
190 
191 	/* Is Root Port an agent that sends error message? */
192 	result = find_device_iter(dev, e_info);
193 	if (result)
194 		return true;
195 
196 	pci_walk_bus(parent->subordinate, find_device_iter, e_info);
197 
198 	if (!e_info->error_dev_num) {
199 		dev_printk(KERN_DEBUG, &parent->dev,
200 				"can't find device of ID%04x\n",
201 				e_info->id);
202 		return false;
203 	}
204 	return true;
205 }
206 
report_error_detected(struct pci_dev * dev,void * data)207 static int report_error_detected(struct pci_dev *dev, void *data)
208 {
209 	pci_ers_result_t vote;
210 	const struct pci_error_handlers *err_handler;
211 	struct aer_broadcast_data *result_data;
212 	result_data = (struct aer_broadcast_data *) data;
213 
214 	device_lock(&dev->dev);
215 	dev->error_state = result_data->state;
216 
217 	if (!dev->driver ||
218 		!dev->driver->err_handler ||
219 		!dev->driver->err_handler->error_detected) {
220 		if (result_data->state == pci_channel_io_frozen &&
221 			!(dev->hdr_type & PCI_HEADER_TYPE_BRIDGE)) {
222 			/*
223 			 * In case of fatal recovery, if one of down-
224 			 * stream device has no driver. We might be
225 			 * unable to recover because a later insmod
226 			 * of a driver for this device is unaware of
227 			 * its hw state.
228 			 */
229 			dev_printk(KERN_DEBUG, &dev->dev, "device has %s\n",
230 				   dev->driver ?
231 				   "no AER-aware driver" : "no driver");
232 		}
233 
234 		/*
235 		 * If there's any device in the subtree that does not
236 		 * have an error_detected callback, returning
237 		 * PCI_ERS_RESULT_NO_AER_DRIVER prevents calling of
238 		 * the subsequent mmio_enabled/slot_reset/resume
239 		 * callbacks of "any" device in the subtree. All the
240 		 * devices in the subtree are left in the error state
241 		 * without recovery.
242 		 */
243 
244 		if (!(dev->hdr_type & PCI_HEADER_TYPE_BRIDGE))
245 			vote = PCI_ERS_RESULT_NO_AER_DRIVER;
246 		else
247 			vote = PCI_ERS_RESULT_NONE;
248 	} else {
249 		err_handler = dev->driver->err_handler;
250 		vote = err_handler->error_detected(dev, result_data->state);
251 	}
252 
253 	result_data->result = merge_result(result_data->result, vote);
254 	device_unlock(&dev->dev);
255 	return 0;
256 }
257 
report_mmio_enabled(struct pci_dev * dev,void * data)258 static int report_mmio_enabled(struct pci_dev *dev, void *data)
259 {
260 	pci_ers_result_t vote;
261 	const struct pci_error_handlers *err_handler;
262 	struct aer_broadcast_data *result_data;
263 	result_data = (struct aer_broadcast_data *) data;
264 
265 	device_lock(&dev->dev);
266 	if (!dev->driver ||
267 		!dev->driver->err_handler ||
268 		!dev->driver->err_handler->mmio_enabled)
269 		goto out;
270 
271 	err_handler = dev->driver->err_handler;
272 	vote = err_handler->mmio_enabled(dev);
273 	result_data->result = merge_result(result_data->result, vote);
274 out:
275 	device_unlock(&dev->dev);
276 	return 0;
277 }
278 
report_slot_reset(struct pci_dev * dev,void * data)279 static int report_slot_reset(struct pci_dev *dev, void *data)
280 {
281 	pci_ers_result_t vote;
282 	const struct pci_error_handlers *err_handler;
283 	struct aer_broadcast_data *result_data;
284 	result_data = (struct aer_broadcast_data *) data;
285 
286 	device_lock(&dev->dev);
287 	if (!dev->driver ||
288 		!dev->driver->err_handler ||
289 		!dev->driver->err_handler->slot_reset)
290 		goto out;
291 
292 	err_handler = dev->driver->err_handler;
293 	vote = err_handler->slot_reset(dev);
294 	result_data->result = merge_result(result_data->result, vote);
295 out:
296 	device_unlock(&dev->dev);
297 	return 0;
298 }
299 
report_resume(struct pci_dev * dev,void * data)300 static int report_resume(struct pci_dev *dev, void *data)
301 {
302 	const struct pci_error_handlers *err_handler;
303 
304 	device_lock(&dev->dev);
305 	dev->error_state = pci_channel_io_normal;
306 
307 	if (!dev->driver ||
308 		!dev->driver->err_handler ||
309 		!dev->driver->err_handler->resume)
310 		goto out;
311 
312 	err_handler = dev->driver->err_handler;
313 	err_handler->resume(dev);
314 out:
315 	device_unlock(&dev->dev);
316 	return 0;
317 }
318 
319 /**
320  * broadcast_error_message - handle message broadcast to downstream drivers
321  * @dev: pointer to from where in a hierarchy message is broadcasted down
322  * @state: error state
323  * @error_mesg: message to print
324  * @cb: callback to be broadcasted
325  *
326  * Invoked during error recovery process. Once being invoked, the content
327  * of error severity will be broadcasted to all downstream drivers in a
328  * hierarchy in question.
329  */
broadcast_error_message(struct pci_dev * dev,enum pci_channel_state state,char * error_mesg,int (* cb)(struct pci_dev *,void *))330 static pci_ers_result_t broadcast_error_message(struct pci_dev *dev,
331 	enum pci_channel_state state,
332 	char *error_mesg,
333 	int (*cb)(struct pci_dev *, void *))
334 {
335 	struct aer_broadcast_data result_data;
336 
337 	dev_printk(KERN_DEBUG, &dev->dev, "broadcast %s message\n", error_mesg);
338 	result_data.state = state;
339 	if (cb == report_error_detected)
340 		result_data.result = PCI_ERS_RESULT_CAN_RECOVER;
341 	else
342 		result_data.result = PCI_ERS_RESULT_RECOVERED;
343 
344 	if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) {
345 		/*
346 		 * If the error is reported by a bridge, we think this error
347 		 * is related to the downstream link of the bridge, so we
348 		 * do error recovery on all subordinates of the bridge instead
349 		 * of the bridge and clear the error status of the bridge.
350 		 */
351 		if (cb == report_error_detected)
352 			dev->error_state = state;
353 		pci_walk_bus(dev->subordinate, cb, &result_data);
354 		if (cb == report_resume) {
355 			pci_cleanup_aer_uncorrect_error_status(dev);
356 			dev->error_state = pci_channel_io_normal;
357 		}
358 	} else {
359 		/*
360 		 * If the error is reported by an end point, we think this
361 		 * error is related to the upstream link of the end point.
362 		 */
363 		pci_walk_bus(dev->bus, cb, &result_data);
364 	}
365 
366 	return result_data.result;
367 }
368 
369 /**
370  * aer_do_secondary_bus_reset - perform secondary bus reset
371  * @dev: pointer to bridge's pci_dev data structure
372  *
373  * Invoked when performing link reset at Root Port or Downstream Port.
374  */
aer_do_secondary_bus_reset(struct pci_dev * dev)375 void aer_do_secondary_bus_reset(struct pci_dev *dev)
376 {
377 	u16 p2p_ctrl;
378 
379 	/* Assert Secondary Bus Reset */
380 	pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &p2p_ctrl);
381 	p2p_ctrl |= PCI_BRIDGE_CTL_BUS_RESET;
382 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
383 
384 	/*
385 	 * we should send hot reset message for 2ms to allow it time to
386 	 * propagate to all downstream ports
387 	 */
388 	msleep(2);
389 
390 	/* De-assert Secondary Bus Reset */
391 	p2p_ctrl &= ~PCI_BRIDGE_CTL_BUS_RESET;
392 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL, p2p_ctrl);
393 
394 	/*
395 	 * System software must wait for at least 100ms from the end
396 	 * of a reset of one or more device before it is permitted
397 	 * to issue Configuration Requests to those devices.
398 	 */
399 	msleep(200);
400 }
401 
402 /**
403  * default_downstream_reset_link - default reset function for Downstream Port
404  * @dev: pointer to downstream port's pci_dev data structure
405  *
406  * Invoked when performing link reset at Downstream Port w/ no aer driver.
407  */
default_downstream_reset_link(struct pci_dev * dev)408 static pci_ers_result_t default_downstream_reset_link(struct pci_dev *dev)
409 {
410 	aer_do_secondary_bus_reset(dev);
411 	dev_printk(KERN_DEBUG, &dev->dev,
412 		"Downstream Port link has been reset\n");
413 	return PCI_ERS_RESULT_RECOVERED;
414 }
415 
find_aer_service_iter(struct device * device,void * data)416 static int find_aer_service_iter(struct device *device, void *data)
417 {
418 	struct pcie_port_service_driver *service_driver, **drv;
419 
420 	drv = (struct pcie_port_service_driver **) data;
421 
422 	if (device->bus == &pcie_port_bus_type && device->driver) {
423 		service_driver = to_service_driver(device->driver);
424 		if (service_driver->service == PCIE_PORT_SERVICE_AER) {
425 			*drv = service_driver;
426 			return 1;
427 		}
428 	}
429 
430 	return 0;
431 }
432 
find_aer_service(struct pci_dev * dev)433 static struct pcie_port_service_driver *find_aer_service(struct pci_dev *dev)
434 {
435 	struct pcie_port_service_driver *drv = NULL;
436 
437 	device_for_each_child(&dev->dev, &drv, find_aer_service_iter);
438 
439 	return drv;
440 }
441 
reset_link(struct pci_dev * dev)442 static pci_ers_result_t reset_link(struct pci_dev *dev)
443 {
444 	struct pci_dev *udev;
445 	pci_ers_result_t status;
446 	struct pcie_port_service_driver *driver;
447 
448 	if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE) {
449 		/* Reset this port for all subordinates */
450 		udev = dev;
451 	} else {
452 		/* Reset the upstream component (likely downstream port) */
453 		udev = dev->bus->self;
454 	}
455 
456 	/* Use the aer driver of the component firstly */
457 	driver = find_aer_service(udev);
458 
459 	if (driver && driver->reset_link) {
460 		status = driver->reset_link(udev);
461 	} else if (pci_pcie_type(udev) == PCI_EXP_TYPE_DOWNSTREAM) {
462 		status = default_downstream_reset_link(udev);
463 	} else {
464 		dev_printk(KERN_DEBUG, &dev->dev,
465 			"no link-reset support at upstream device %s\n",
466 			pci_name(udev));
467 		return PCI_ERS_RESULT_DISCONNECT;
468 	}
469 
470 	if (status != PCI_ERS_RESULT_RECOVERED) {
471 		dev_printk(KERN_DEBUG, &dev->dev,
472 			"link reset at upstream device %s failed\n",
473 			pci_name(udev));
474 		return PCI_ERS_RESULT_DISCONNECT;
475 	}
476 
477 	return status;
478 }
479 
480 /**
481  * do_recovery - handle nonfatal/fatal error recovery process
482  * @dev: pointer to a pci_dev data structure of agent detecting an error
483  * @severity: error severity type
484  *
485  * Invoked when an error is nonfatal/fatal. Once being invoked, broadcast
486  * error detected message to all downstream drivers within a hierarchy in
487  * question and return the returned code.
488  */
do_recovery(struct pci_dev * dev,int severity)489 static void do_recovery(struct pci_dev *dev, int severity)
490 {
491 	pci_ers_result_t status, result = PCI_ERS_RESULT_RECOVERED;
492 	enum pci_channel_state state;
493 
494 	if (severity == AER_FATAL)
495 		state = pci_channel_io_frozen;
496 	else
497 		state = pci_channel_io_normal;
498 
499 	status = broadcast_error_message(dev,
500 			state,
501 			"error_detected",
502 			report_error_detected);
503 
504 	if (severity == AER_FATAL) {
505 		result = reset_link(dev);
506 		if (result != PCI_ERS_RESULT_RECOVERED)
507 			goto failed;
508 	}
509 
510 	if (status == PCI_ERS_RESULT_CAN_RECOVER)
511 		status = broadcast_error_message(dev,
512 				state,
513 				"mmio_enabled",
514 				report_mmio_enabled);
515 
516 	if (status == PCI_ERS_RESULT_NEED_RESET) {
517 		/*
518 		 * TODO: Should call platform-specific
519 		 * functions to reset slot before calling
520 		 * drivers' slot_reset callbacks?
521 		 */
522 		status = broadcast_error_message(dev,
523 				state,
524 				"slot_reset",
525 				report_slot_reset);
526 	}
527 
528 	if (status != PCI_ERS_RESULT_RECOVERED)
529 		goto failed;
530 
531 	broadcast_error_message(dev,
532 				state,
533 				"resume",
534 				report_resume);
535 
536 	dev_info(&dev->dev, "AER: Device recovery successful\n");
537 	return;
538 
539 failed:
540 	/* TODO: Should kernel panic here? */
541 	dev_info(&dev->dev, "AER: Device recovery failed\n");
542 }
543 
544 /**
545  * handle_error_source - handle logging error into an event log
546  * @aerdev: pointer to pcie_device data structure of the root port
547  * @dev: pointer to pci_dev data structure of error source device
548  * @info: comprehensive error information
549  *
550  * Invoked when an error being detected by Root Port.
551  */
handle_error_source(struct pcie_device * aerdev,struct pci_dev * dev,struct aer_err_info * info)552 static void handle_error_source(struct pcie_device *aerdev,
553 	struct pci_dev *dev,
554 	struct aer_err_info *info)
555 {
556 	int pos;
557 
558 	if (info->severity == AER_CORRECTABLE) {
559 		/*
560 		 * Correctable error does not need software intevention.
561 		 * No need to go through error recovery process.
562 		 */
563 		pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
564 		if (pos)
565 			pci_write_config_dword(dev, pos + PCI_ERR_COR_STATUS,
566 					info->status);
567 	} else
568 		do_recovery(dev, info->severity);
569 }
570 
571 #ifdef CONFIG_ACPI_APEI_PCIEAER
572 static void aer_recover_work_func(struct work_struct *work);
573 
574 #define AER_RECOVER_RING_ORDER		4
575 #define AER_RECOVER_RING_SIZE		(1 << AER_RECOVER_RING_ORDER)
576 
577 struct aer_recover_entry
578 {
579 	u8	bus;
580 	u8	devfn;
581 	u16	domain;
582 	int	severity;
583 	struct aer_capability_regs *regs;
584 };
585 
586 static DEFINE_KFIFO(aer_recover_ring, struct aer_recover_entry,
587 		    AER_RECOVER_RING_SIZE);
588 /*
589  * Mutual exclusion for writers of aer_recover_ring, reader side don't
590  * need lock, because there is only one reader and lock is not needed
591  * between reader and writer.
592  */
593 static DEFINE_SPINLOCK(aer_recover_ring_lock);
594 static DECLARE_WORK(aer_recover_work, aer_recover_work_func);
595 
aer_recover_queue(int domain,unsigned int bus,unsigned int devfn,int severity,struct aer_capability_regs * aer_regs)596 void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
597 		       int severity, struct aer_capability_regs *aer_regs)
598 {
599 	unsigned long flags;
600 	struct aer_recover_entry entry = {
601 		.bus		= bus,
602 		.devfn		= devfn,
603 		.domain		= domain,
604 		.severity	= severity,
605 		.regs		= aer_regs,
606 	};
607 
608 	spin_lock_irqsave(&aer_recover_ring_lock, flags);
609 	if (kfifo_put(&aer_recover_ring, &entry))
610 		schedule_work(&aer_recover_work);
611 	else
612 		pr_err("AER recover: Buffer overflow when recovering AER for %04x:%02x:%02x:%x\n",
613 		       domain, bus, PCI_SLOT(devfn), PCI_FUNC(devfn));
614 	spin_unlock_irqrestore(&aer_recover_ring_lock, flags);
615 }
616 EXPORT_SYMBOL_GPL(aer_recover_queue);
617 
aer_recover_work_func(struct work_struct * work)618 static void aer_recover_work_func(struct work_struct *work)
619 {
620 	struct aer_recover_entry entry;
621 	struct pci_dev *pdev;
622 
623 	while (kfifo_get(&aer_recover_ring, &entry)) {
624 		pdev = pci_get_domain_bus_and_slot(entry.domain, entry.bus,
625 						   entry.devfn);
626 		if (!pdev) {
627 			pr_err("AER recover: Can not find pci_dev for %04x:%02x:%02x:%x\n",
628 			       entry.domain, entry.bus,
629 			       PCI_SLOT(entry.devfn), PCI_FUNC(entry.devfn));
630 			continue;
631 		}
632 		cper_print_aer(pdev, entry.severity, entry.regs);
633 		do_recovery(pdev, entry.severity);
634 		pci_dev_put(pdev);
635 	}
636 }
637 #endif
638 
639 /**
640  * get_device_error_info - read error status from dev and store it to info
641  * @dev: pointer to the device expected to have a error record
642  * @info: pointer to structure to store the error record
643  *
644  * Return 1 on success, 0 on error.
645  *
646  * Note that @info is reused among all error devices. Clear fields properly.
647  */
get_device_error_info(struct pci_dev * dev,struct aer_err_info * info)648 static int get_device_error_info(struct pci_dev *dev, struct aer_err_info *info)
649 {
650 	int pos, temp;
651 
652 	/* Must reset in this function */
653 	info->status = 0;
654 	info->tlp_header_valid = 0;
655 
656 	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
657 
658 	/* The device might not support AER */
659 	if (!pos)
660 		return 1;
661 
662 	if (info->severity == AER_CORRECTABLE) {
663 		pci_read_config_dword(dev, pos + PCI_ERR_COR_STATUS,
664 			&info->status);
665 		pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK,
666 			&info->mask);
667 		if (!(info->status & ~info->mask))
668 			return 0;
669 	} else if (dev->hdr_type & PCI_HEADER_TYPE_BRIDGE ||
670 		info->severity == AER_NONFATAL) {
671 
672 		/* Link is still healthy for IO reads */
673 		pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS,
674 			&info->status);
675 		pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK,
676 			&info->mask);
677 		if (!(info->status & ~info->mask))
678 			return 0;
679 
680 		/* Get First Error Pointer */
681 		pci_read_config_dword(dev, pos + PCI_ERR_CAP, &temp);
682 		info->first_error = PCI_ERR_CAP_FEP(temp);
683 
684 		if (info->status & AER_LOG_TLP_MASKS) {
685 			info->tlp_header_valid = 1;
686 			pci_read_config_dword(dev,
687 				pos + PCI_ERR_HEADER_LOG, &info->tlp.dw0);
688 			pci_read_config_dword(dev,
689 				pos + PCI_ERR_HEADER_LOG + 4, &info->tlp.dw1);
690 			pci_read_config_dword(dev,
691 				pos + PCI_ERR_HEADER_LOG + 8, &info->tlp.dw2);
692 			pci_read_config_dword(dev,
693 				pos + PCI_ERR_HEADER_LOG + 12, &info->tlp.dw3);
694 		}
695 	}
696 
697 	return 1;
698 }
699 
aer_process_err_devices(struct pcie_device * p_device,struct aer_err_info * e_info)700 static inline void aer_process_err_devices(struct pcie_device *p_device,
701 			struct aer_err_info *e_info)
702 {
703 	int i;
704 
705 	/* Report all before handle them, not to lost records by reset etc. */
706 	for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
707 		if (get_device_error_info(e_info->dev[i], e_info))
708 			aer_print_error(e_info->dev[i], e_info);
709 	}
710 	for (i = 0; i < e_info->error_dev_num && e_info->dev[i]; i++) {
711 		if (get_device_error_info(e_info->dev[i], e_info))
712 			handle_error_source(p_device, e_info->dev[i], e_info);
713 	}
714 }
715 
716 /**
717  * aer_isr_one_error - consume an error detected by root port
718  * @p_device: pointer to error root port service device
719  * @e_src: pointer to an error source
720  */
aer_isr_one_error(struct pcie_device * p_device,struct aer_err_source * e_src)721 static void aer_isr_one_error(struct pcie_device *p_device,
722 		struct aer_err_source *e_src)
723 {
724 	struct aer_err_info *e_info;
725 
726 	/* struct aer_err_info might be big, so we allocate it with slab */
727 	e_info = kmalloc(sizeof(struct aer_err_info), GFP_KERNEL);
728 	if (!e_info) {
729 		dev_printk(KERN_DEBUG, &p_device->port->dev,
730 			"Can't allocate mem when processing AER errors\n");
731 		return;
732 	}
733 
734 	/*
735 	 * There is a possibility that both correctable error and
736 	 * uncorrectable error being logged. Report correctable error first.
737 	 */
738 	if (e_src->status & PCI_ERR_ROOT_COR_RCV) {
739 		e_info->id = ERR_COR_ID(e_src->id);
740 		e_info->severity = AER_CORRECTABLE;
741 
742 		if (e_src->status & PCI_ERR_ROOT_MULTI_COR_RCV)
743 			e_info->multi_error_valid = 1;
744 		else
745 			e_info->multi_error_valid = 0;
746 
747 		aer_print_port_info(p_device->port, e_info);
748 
749 		if (find_source_device(p_device->port, e_info))
750 			aer_process_err_devices(p_device, e_info);
751 	}
752 
753 	if (e_src->status & PCI_ERR_ROOT_UNCOR_RCV) {
754 		e_info->id = ERR_UNCOR_ID(e_src->id);
755 
756 		if (e_src->status & PCI_ERR_ROOT_FATAL_RCV)
757 			e_info->severity = AER_FATAL;
758 		else
759 			e_info->severity = AER_NONFATAL;
760 
761 		if (e_src->status & PCI_ERR_ROOT_MULTI_UNCOR_RCV)
762 			e_info->multi_error_valid = 1;
763 		else
764 			e_info->multi_error_valid = 0;
765 
766 		aer_print_port_info(p_device->port, e_info);
767 
768 		if (find_source_device(p_device->port, e_info))
769 			aer_process_err_devices(p_device, e_info);
770 	}
771 
772 	kfree(e_info);
773 }
774 
775 /**
776  * get_e_source - retrieve an error source
777  * @rpc: pointer to the root port which holds an error
778  * @e_src: pointer to store retrieved error source
779  *
780  * Return 1 if an error source is retrieved, otherwise 0.
781  *
782  * Invoked by DPC handler to consume an error.
783  */
get_e_source(struct aer_rpc * rpc,struct aer_err_source * e_src)784 static int get_e_source(struct aer_rpc *rpc, struct aer_err_source *e_src)
785 {
786 	unsigned long flags;
787 
788 	/* Lock access to Root error producer/consumer index */
789 	spin_lock_irqsave(&rpc->e_lock, flags);
790 	if (rpc->prod_idx == rpc->cons_idx) {
791 		spin_unlock_irqrestore(&rpc->e_lock, flags);
792 		return 0;
793 	}
794 
795 	*e_src = rpc->e_sources[rpc->cons_idx];
796 	rpc->cons_idx++;
797 	if (rpc->cons_idx == AER_ERROR_SOURCES_MAX)
798 		rpc->cons_idx = 0;
799 	spin_unlock_irqrestore(&rpc->e_lock, flags);
800 
801 	return 1;
802 }
803 
804 /**
805  * aer_isr - consume errors detected by root port
806  * @work: definition of this work item
807  *
808  * Invoked, as DPC, when root port records new detected error
809  */
aer_isr(struct work_struct * work)810 void aer_isr(struct work_struct *work)
811 {
812 	struct aer_rpc *rpc = container_of(work, struct aer_rpc, dpc_handler);
813 	struct pcie_device *p_device = rpc->rpd;
814 	struct aer_err_source uninitialized_var(e_src);
815 
816 	mutex_lock(&rpc->rpc_mutex);
817 	while (get_e_source(rpc, &e_src))
818 		aer_isr_one_error(p_device, &e_src);
819 	mutex_unlock(&rpc->rpc_mutex);
820 
821 	wake_up(&rpc->wait_release);
822 }
823 
824 /**
825  * aer_init - provide AER initialization
826  * @dev: pointer to AER pcie device
827  *
828  * Invoked when AER service driver is loaded.
829  */
aer_init(struct pcie_device * dev)830 int aer_init(struct pcie_device *dev)
831 {
832 	if (forceload) {
833 		dev_printk(KERN_DEBUG, &dev->device,
834 			   "aerdrv forceload requested.\n");
835 		pcie_aer_force_firmware_first(dev->port, 0);
836 	}
837 	return 0;
838 }
839