• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * APEI Generic Hardware Error Source support
3  *
4  * Generic Hardware Error Source provides a way to report platform
5  * hardware errors (such as that from chipset). It works in so called
6  * "Firmware First" mode, that is, hardware errors are reported to
7  * firmware firstly, then reported to Linux by firmware. This way,
8  * some non-standard hardware error registers or non-standard hardware
9  * link can be checked by firmware to produce more hardware error
10  * information for Linux.
11  *
12  * For more information about Generic Hardware Error Source, please
13  * refer to ACPI Specification version 4.0, section 17.3.2.6
14  *
15  * Copyright 2010,2011 Intel Corp.
16  *   Author: Huang Ying <ying.huang@intel.com>
17  *
18  * This program is free software; you can redistribute it and/or
19  * modify it under the terms of the GNU General Public License version
20  * 2 as published by the Free Software Foundation;
21  *
22  * This program is distributed in the hope that it will be useful,
23  * but WITHOUT ANY WARRANTY; without even the implied warranty of
24  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
25  * GNU General Public License for more details.
26  */
27 
28 #include <linux/kernel.h>
29 #include <linux/module.h>
30 #include <linux/init.h>
31 #include <linux/acpi.h>
32 #include <linux/io.h>
33 #include <linux/interrupt.h>
34 #include <linux/timer.h>
35 #include <linux/cper.h>
36 #include <linux/kdebug.h>
37 #include <linux/platform_device.h>
38 #include <linux/mutex.h>
39 #include <linux/ratelimit.h>
40 #include <linux/vmalloc.h>
41 #include <linux/irq_work.h>
42 #include <linux/llist.h>
43 #include <linux/genalloc.h>
44 #include <linux/pci.h>
45 #include <linux/aer.h>
46 #include <linux/nmi.h>
47 
48 #include <acpi/ghes.h>
49 #include <acpi/apei.h>
50 #include <asm/tlbflush.h>
51 
52 #include "apei-internal.h"
53 
54 #define GHES_PFX	"GHES: "
55 
56 #define GHES_ESTATUS_MAX_SIZE		65536
57 #define GHES_ESOURCE_PREALLOC_MAX_SIZE	65536
58 
59 #define GHES_ESTATUS_POOL_MIN_ALLOC_ORDER 3
60 
61 /* This is just an estimation for memory pool allocation */
62 #define GHES_ESTATUS_CACHE_AVG_SIZE	512
63 
64 #define GHES_ESTATUS_CACHES_SIZE	4
65 
66 #define GHES_ESTATUS_IN_CACHE_MAX_NSEC	10000000000ULL
67 /* Prevent too many caches are allocated because of RCU */
68 #define GHES_ESTATUS_CACHE_ALLOCED_MAX	(GHES_ESTATUS_CACHES_SIZE * 3 / 2)
69 
70 #define GHES_ESTATUS_CACHE_LEN(estatus_len)			\
71 	(sizeof(struct ghes_estatus_cache) + (estatus_len))
72 #define GHES_ESTATUS_FROM_CACHE(estatus_cache)			\
73 	((struct acpi_hest_generic_status *)				\
74 	 ((struct ghes_estatus_cache *)(estatus_cache) + 1))
75 
76 #define GHES_ESTATUS_NODE_LEN(estatus_len)			\
77 	(sizeof(struct ghes_estatus_node) + (estatus_len))
78 #define GHES_ESTATUS_FROM_NODE(estatus_node)			\
79 	((struct acpi_hest_generic_status *)				\
80 	 ((struct ghes_estatus_node *)(estatus_node) + 1))
81 
82 bool ghes_disable;
83 module_param_named(disable, ghes_disable, bool, 0);
84 
85 /*
86  * All error sources notified with SCI shares one notifier function,
87  * so they need to be linked and checked one by one.  This is applied
88  * to NMI too.
89  *
90  * RCU is used for these lists, so ghes_list_mutex is only used for
91  * list changing, not for traversing.
92  */
93 static LIST_HEAD(ghes_sci);
94 static DEFINE_MUTEX(ghes_list_mutex);
95 
96 /*
97  * Because the memory area used to transfer hardware error information
98  * from BIOS to Linux can be determined only in NMI, IRQ or timer
99  * handler, but general ioremap can not be used in atomic context, so
100  * a special version of atomic ioremap is implemented for that.
101  */
102 
103 /*
104  * Two virtual pages are used, one for IRQ/PROCESS context, the other for
105  * NMI context (optionally).
106  */
107 #ifdef CONFIG_HAVE_ACPI_APEI_NMI
108 #define GHES_IOREMAP_PAGES           2
109 #else
110 #define GHES_IOREMAP_PAGES           1
111 #endif
112 #define GHES_IOREMAP_IRQ_PAGE(base)	(base)
113 #define GHES_IOREMAP_NMI_PAGE(base)	((base) + PAGE_SIZE)
114 
115 /* virtual memory area for atomic ioremap */
116 static struct vm_struct *ghes_ioremap_area;
117 /*
118  * These 2 spinlock is used to prevent atomic ioremap virtual memory
119  * area from being mapped simultaneously.
120  */
121 static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi);
122 static DEFINE_SPINLOCK(ghes_ioremap_lock_irq);
123 
124 static struct gen_pool *ghes_estatus_pool;
125 static unsigned long ghes_estatus_pool_size_request;
126 
127 static struct ghes_estatus_cache *ghes_estatus_caches[GHES_ESTATUS_CACHES_SIZE];
128 static atomic_t ghes_estatus_cache_alloced;
129 
ghes_ioremap_init(void)130 static int ghes_ioremap_init(void)
131 {
132 	ghes_ioremap_area = __get_vm_area(PAGE_SIZE * GHES_IOREMAP_PAGES,
133 		VM_IOREMAP, VMALLOC_START, VMALLOC_END);
134 	if (!ghes_ioremap_area) {
135 		pr_err(GHES_PFX "Failed to allocate virtual memory area for atomic ioremap.\n");
136 		return -ENOMEM;
137 	}
138 
139 	return 0;
140 }
141 
ghes_ioremap_exit(void)142 static void ghes_ioremap_exit(void)
143 {
144 	free_vm_area(ghes_ioremap_area);
145 }
146 
ghes_ioremap_pfn_nmi(u64 pfn)147 static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn)
148 {
149 	unsigned long vaddr;
150 
151 	vaddr = (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area->addr);
152 	ioremap_page_range(vaddr, vaddr + PAGE_SIZE,
153 			   pfn << PAGE_SHIFT, PAGE_KERNEL);
154 
155 	return (void __iomem *)vaddr;
156 }
157 
ghes_ioremap_pfn_irq(u64 pfn)158 static void __iomem *ghes_ioremap_pfn_irq(u64 pfn)
159 {
160 	unsigned long vaddr, paddr;
161 	pgprot_t prot;
162 
163 	vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr);
164 
165 	paddr = pfn << PAGE_SHIFT;
166 	prot = arch_apei_get_mem_attribute(paddr);
167 
168 	ioremap_page_range(vaddr, vaddr + PAGE_SIZE, paddr, prot);
169 
170 	return (void __iomem *)vaddr;
171 }
172 
ghes_iounmap_nmi(void __iomem * vaddr_ptr)173 static void ghes_iounmap_nmi(void __iomem *vaddr_ptr)
174 {
175 	unsigned long vaddr = (unsigned long __force)vaddr_ptr;
176 	void *base = ghes_ioremap_area->addr;
177 
178 	BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base));
179 	unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
180 	arch_apei_flush_tlb_one(vaddr);
181 }
182 
ghes_iounmap_irq(void __iomem * vaddr_ptr)183 static void ghes_iounmap_irq(void __iomem *vaddr_ptr)
184 {
185 	unsigned long vaddr = (unsigned long __force)vaddr_ptr;
186 	void *base = ghes_ioremap_area->addr;
187 
188 	BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base));
189 	unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
190 	arch_apei_flush_tlb_one(vaddr);
191 }
192 
ghes_estatus_pool_init(void)193 static int ghes_estatus_pool_init(void)
194 {
195 	ghes_estatus_pool = gen_pool_create(GHES_ESTATUS_POOL_MIN_ALLOC_ORDER, -1);
196 	if (!ghes_estatus_pool)
197 		return -ENOMEM;
198 	return 0;
199 }
200 
ghes_estatus_pool_free_chunk(struct gen_pool * pool,struct gen_pool_chunk * chunk,void * data)201 static void ghes_estatus_pool_free_chunk(struct gen_pool *pool,
202 					      struct gen_pool_chunk *chunk,
203 					      void *data)
204 {
205 	vfree((void *)chunk->start_addr);
206 }
207 
ghes_estatus_pool_exit(void)208 static void ghes_estatus_pool_exit(void)
209 {
210 	gen_pool_for_each_chunk(ghes_estatus_pool,
211 				ghes_estatus_pool_free_chunk, NULL);
212 	gen_pool_destroy(ghes_estatus_pool);
213 }
214 
ghes_estatus_pool_expand(unsigned long len)215 static int ghes_estatus_pool_expand(unsigned long len)
216 {
217 	unsigned long size, addr;
218 
219 	ghes_estatus_pool_size_request += PAGE_ALIGN(len);
220 	size = gen_pool_size(ghes_estatus_pool);
221 	if (size >= ghes_estatus_pool_size_request)
222 		return 0;
223 
224 	addr = (unsigned long)vmalloc(PAGE_ALIGN(len));
225 	if (!addr)
226 		return -ENOMEM;
227 
228 	/*
229 	 * New allocation must be visible in all pgd before it can be found by
230 	 * an NMI allocating from the pool.
231 	 */
232 	vmalloc_sync_mappings();
233 
234 	return gen_pool_add(ghes_estatus_pool, addr, PAGE_ALIGN(len), -1);
235 }
236 
ghes_new(struct acpi_hest_generic * generic)237 static struct ghes *ghes_new(struct acpi_hest_generic *generic)
238 {
239 	struct ghes *ghes;
240 	unsigned int error_block_length;
241 	int rc;
242 
243 	ghes = kzalloc(sizeof(*ghes), GFP_KERNEL);
244 	if (!ghes)
245 		return ERR_PTR(-ENOMEM);
246 	ghes->generic = generic;
247 	rc = apei_map_generic_address(&generic->error_status_address);
248 	if (rc)
249 		goto err_free;
250 	error_block_length = generic->error_block_length;
251 	if (error_block_length > GHES_ESTATUS_MAX_SIZE) {
252 		pr_warning(FW_WARN GHES_PFX
253 			   "Error status block length is too long: %u for "
254 			   "generic hardware error source: %d.\n",
255 			   error_block_length, generic->header.source_id);
256 		error_block_length = GHES_ESTATUS_MAX_SIZE;
257 	}
258 	ghes->estatus = kmalloc(error_block_length, GFP_KERNEL);
259 	if (!ghes->estatus) {
260 		rc = -ENOMEM;
261 		goto err_unmap;
262 	}
263 
264 	return ghes;
265 
266 err_unmap:
267 	apei_unmap_generic_address(&generic->error_status_address);
268 err_free:
269 	kfree(ghes);
270 	return ERR_PTR(rc);
271 }
272 
ghes_fini(struct ghes * ghes)273 static void ghes_fini(struct ghes *ghes)
274 {
275 	kfree(ghes->estatus);
276 	apei_unmap_generic_address(&ghes->generic->error_status_address);
277 }
278 
ghes_severity(int severity)279 static inline int ghes_severity(int severity)
280 {
281 	switch (severity) {
282 	case CPER_SEV_INFORMATIONAL:
283 		return GHES_SEV_NO;
284 	case CPER_SEV_CORRECTED:
285 		return GHES_SEV_CORRECTED;
286 	case CPER_SEV_RECOVERABLE:
287 		return GHES_SEV_RECOVERABLE;
288 	case CPER_SEV_FATAL:
289 		return GHES_SEV_PANIC;
290 	default:
291 		/* Unknown, go panic */
292 		return GHES_SEV_PANIC;
293 	}
294 }
295 
ghes_copy_tofrom_phys(void * buffer,u64 paddr,u32 len,int from_phys)296 static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
297 				  int from_phys)
298 {
299 	void __iomem *vaddr;
300 	unsigned long flags = 0;
301 	int in_nmi = in_nmi();
302 	u64 offset;
303 	u32 trunk;
304 
305 	while (len > 0) {
306 		offset = paddr - (paddr & PAGE_MASK);
307 		if (in_nmi) {
308 			raw_spin_lock(&ghes_ioremap_lock_nmi);
309 			vaddr = ghes_ioremap_pfn_nmi(paddr >> PAGE_SHIFT);
310 		} else {
311 			spin_lock_irqsave(&ghes_ioremap_lock_irq, flags);
312 			vaddr = ghes_ioremap_pfn_irq(paddr >> PAGE_SHIFT);
313 		}
314 		trunk = PAGE_SIZE - offset;
315 		trunk = min(trunk, len);
316 		if (from_phys)
317 			memcpy_fromio(buffer, vaddr + offset, trunk);
318 		else
319 			memcpy_toio(vaddr + offset, buffer, trunk);
320 		len -= trunk;
321 		paddr += trunk;
322 		buffer += trunk;
323 		if (in_nmi) {
324 			ghes_iounmap_nmi(vaddr);
325 			raw_spin_unlock(&ghes_ioremap_lock_nmi);
326 		} else {
327 			ghes_iounmap_irq(vaddr);
328 			spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags);
329 		}
330 	}
331 }
332 
ghes_read_estatus(struct ghes * ghes,int silent)333 static int ghes_read_estatus(struct ghes *ghes, int silent)
334 {
335 	struct acpi_hest_generic *g = ghes->generic;
336 	u64 buf_paddr;
337 	u32 len;
338 	int rc;
339 
340 	rc = apei_read(&buf_paddr, &g->error_status_address);
341 	if (rc) {
342 		if (!silent && printk_ratelimit())
343 			pr_warning(FW_WARN GHES_PFX
344 "Failed to read error status block address for hardware error source: %d.\n",
345 				   g->header.source_id);
346 		return -EIO;
347 	}
348 	if (!buf_paddr)
349 		return -ENOENT;
350 
351 	ghes_copy_tofrom_phys(ghes->estatus, buf_paddr,
352 			      sizeof(*ghes->estatus), 1);
353 	if (!ghes->estatus->block_status)
354 		return -ENOENT;
355 
356 	ghes->buffer_paddr = buf_paddr;
357 	ghes->flags |= GHES_TO_CLEAR;
358 
359 	rc = -EIO;
360 	len = cper_estatus_len(ghes->estatus);
361 	if (len < sizeof(*ghes->estatus))
362 		goto err_read_block;
363 	if (len > ghes->generic->error_block_length)
364 		goto err_read_block;
365 	if (cper_estatus_check_header(ghes->estatus))
366 		goto err_read_block;
367 	ghes_copy_tofrom_phys(ghes->estatus + 1,
368 			      buf_paddr + sizeof(*ghes->estatus),
369 			      len - sizeof(*ghes->estatus), 1);
370 	if (cper_estatus_check(ghes->estatus))
371 		goto err_read_block;
372 	rc = 0;
373 
374 err_read_block:
375 	if (rc && !silent && printk_ratelimit())
376 		pr_warning(FW_WARN GHES_PFX
377 			   "Failed to read error status block!\n");
378 	return rc;
379 }
380 
ghes_clear_estatus(struct ghes * ghes)381 static void ghes_clear_estatus(struct ghes *ghes)
382 {
383 	ghes->estatus->block_status = 0;
384 	if (!(ghes->flags & GHES_TO_CLEAR))
385 		return;
386 	ghes_copy_tofrom_phys(ghes->estatus, ghes->buffer_paddr,
387 			      sizeof(ghes->estatus->block_status), 0);
388 	ghes->flags &= ~GHES_TO_CLEAR;
389 }
390 
ghes_handle_memory_failure(struct acpi_hest_generic_data * gdata,int sev)391 static void ghes_handle_memory_failure(struct acpi_hest_generic_data *gdata, int sev)
392 {
393 #ifdef CONFIG_ACPI_APEI_MEMORY_FAILURE
394 	unsigned long pfn;
395 	int flags = -1;
396 	int sec_sev = ghes_severity(gdata->error_severity);
397 	struct cper_sec_mem_err *mem_err;
398 	mem_err = (struct cper_sec_mem_err *)(gdata + 1);
399 
400 	if (!(mem_err->validation_bits & CPER_MEM_VALID_PA))
401 		return;
402 
403 	pfn = mem_err->physical_addr >> PAGE_SHIFT;
404 	if (!pfn_valid(pfn)) {
405 		pr_warn_ratelimited(FW_WARN GHES_PFX
406 		"Invalid address in generic error data: %#llx\n",
407 		mem_err->physical_addr);
408 		return;
409 	}
410 
411 	/* iff following two events can be handled properly by now */
412 	if (sec_sev == GHES_SEV_CORRECTED &&
413 	    (gdata->flags & CPER_SEC_ERROR_THRESHOLD_EXCEEDED))
414 		flags = MF_SOFT_OFFLINE;
415 	if (sev == GHES_SEV_RECOVERABLE && sec_sev == GHES_SEV_RECOVERABLE)
416 		flags = 0;
417 
418 	if (flags != -1)
419 		memory_failure_queue(pfn, 0, flags);
420 #endif
421 }
422 
ghes_do_proc(struct ghes * ghes,const struct acpi_hest_generic_status * estatus)423 static void ghes_do_proc(struct ghes *ghes,
424 			 const struct acpi_hest_generic_status *estatus)
425 {
426 	int sev, sec_sev;
427 	struct acpi_hest_generic_data *gdata;
428 
429 	sev = ghes_severity(estatus->error_severity);
430 	apei_estatus_for_each_section(estatus, gdata) {
431 		sec_sev = ghes_severity(gdata->error_severity);
432 		if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
433 				 CPER_SEC_PLATFORM_MEM)) {
434 			struct cper_sec_mem_err *mem_err;
435 			mem_err = (struct cper_sec_mem_err *)(gdata+1);
436 			ghes_edac_report_mem_error(ghes, sev, mem_err);
437 
438 			arch_apei_report_mem_error(sev, mem_err);
439 			ghes_handle_memory_failure(gdata, sev);
440 		}
441 #ifdef CONFIG_ACPI_APEI_PCIEAER
442 		else if (!uuid_le_cmp(*(uuid_le *)gdata->section_type,
443 				      CPER_SEC_PCIE)) {
444 			struct cper_sec_pcie *pcie_err;
445 			pcie_err = (struct cper_sec_pcie *)(gdata+1);
446 			if (sev == GHES_SEV_RECOVERABLE &&
447 			    sec_sev == GHES_SEV_RECOVERABLE &&
448 			    pcie_err->validation_bits & CPER_PCIE_VALID_DEVICE_ID &&
449 			    pcie_err->validation_bits & CPER_PCIE_VALID_AER_INFO) {
450 				unsigned int devfn;
451 				int aer_severity;
452 
453 				devfn = PCI_DEVFN(pcie_err->device_id.device,
454 						  pcie_err->device_id.function);
455 				aer_severity = cper_severity_to_aer(sev);
456 
457 				/*
458 				 * If firmware reset the component to contain
459 				 * the error, we must reinitialize it before
460 				 * use, so treat it as a fatal AER error.
461 				 */
462 				if (gdata->flags & CPER_SEC_RESET)
463 					aer_severity = AER_FATAL;
464 
465 				aer_recover_queue(pcie_err->device_id.segment,
466 						  pcie_err->device_id.bus,
467 						  devfn, aer_severity,
468 						  (struct aer_capability_regs *)
469 						  pcie_err->aer_info);
470 			}
471 
472 		}
473 #endif
474 	}
475 }
476 
__ghes_print_estatus(const char * pfx,const struct acpi_hest_generic * generic,const struct acpi_hest_generic_status * estatus)477 static void __ghes_print_estatus(const char *pfx,
478 				 const struct acpi_hest_generic *generic,
479 				 const struct acpi_hest_generic_status *estatus)
480 {
481 	static atomic_t seqno;
482 	unsigned int curr_seqno;
483 	char pfx_seq[64];
484 
485 	if (pfx == NULL) {
486 		if (ghes_severity(estatus->error_severity) <=
487 		    GHES_SEV_CORRECTED)
488 			pfx = KERN_WARNING;
489 		else
490 			pfx = KERN_ERR;
491 	}
492 	curr_seqno = atomic_inc_return(&seqno);
493 	snprintf(pfx_seq, sizeof(pfx_seq), "%s{%u}" HW_ERR, pfx, curr_seqno);
494 	printk("%s""Hardware error from APEI Generic Hardware Error Source: %d\n",
495 	       pfx_seq, generic->header.source_id);
496 	cper_estatus_print(pfx_seq, estatus);
497 }
498 
ghes_print_estatus(const char * pfx,const struct acpi_hest_generic * generic,const struct acpi_hest_generic_status * estatus)499 static int ghes_print_estatus(const char *pfx,
500 			      const struct acpi_hest_generic *generic,
501 			      const struct acpi_hest_generic_status *estatus)
502 {
503 	/* Not more than 2 messages every 5 seconds */
504 	static DEFINE_RATELIMIT_STATE(ratelimit_corrected, 5*HZ, 2);
505 	static DEFINE_RATELIMIT_STATE(ratelimit_uncorrected, 5*HZ, 2);
506 	struct ratelimit_state *ratelimit;
507 
508 	if (ghes_severity(estatus->error_severity) <= GHES_SEV_CORRECTED)
509 		ratelimit = &ratelimit_corrected;
510 	else
511 		ratelimit = &ratelimit_uncorrected;
512 	if (__ratelimit(ratelimit)) {
513 		__ghes_print_estatus(pfx, generic, estatus);
514 		return 1;
515 	}
516 	return 0;
517 }
518 
519 /*
520  * GHES error status reporting throttle, to report more kinds of
521  * errors, instead of just most frequently occurred errors.
522  */
ghes_estatus_cached(struct acpi_hest_generic_status * estatus)523 static int ghes_estatus_cached(struct acpi_hest_generic_status *estatus)
524 {
525 	u32 len;
526 	int i, cached = 0;
527 	unsigned long long now;
528 	struct ghes_estatus_cache *cache;
529 	struct acpi_hest_generic_status *cache_estatus;
530 
531 	len = cper_estatus_len(estatus);
532 	rcu_read_lock();
533 	for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
534 		cache = rcu_dereference(ghes_estatus_caches[i]);
535 		if (cache == NULL)
536 			continue;
537 		if (len != cache->estatus_len)
538 			continue;
539 		cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
540 		if (memcmp(estatus, cache_estatus, len))
541 			continue;
542 		atomic_inc(&cache->count);
543 		now = sched_clock();
544 		if (now - cache->time_in < GHES_ESTATUS_IN_CACHE_MAX_NSEC)
545 			cached = 1;
546 		break;
547 	}
548 	rcu_read_unlock();
549 	return cached;
550 }
551 
ghes_estatus_cache_alloc(struct acpi_hest_generic * generic,struct acpi_hest_generic_status * estatus)552 static struct ghes_estatus_cache *ghes_estatus_cache_alloc(
553 	struct acpi_hest_generic *generic,
554 	struct acpi_hest_generic_status *estatus)
555 {
556 	int alloced;
557 	u32 len, cache_len;
558 	struct ghes_estatus_cache *cache;
559 	struct acpi_hest_generic_status *cache_estatus;
560 
561 	alloced = atomic_add_return(1, &ghes_estatus_cache_alloced);
562 	if (alloced > GHES_ESTATUS_CACHE_ALLOCED_MAX) {
563 		atomic_dec(&ghes_estatus_cache_alloced);
564 		return NULL;
565 	}
566 	len = cper_estatus_len(estatus);
567 	cache_len = GHES_ESTATUS_CACHE_LEN(len);
568 	cache = (void *)gen_pool_alloc(ghes_estatus_pool, cache_len);
569 	if (!cache) {
570 		atomic_dec(&ghes_estatus_cache_alloced);
571 		return NULL;
572 	}
573 	cache_estatus = GHES_ESTATUS_FROM_CACHE(cache);
574 	memcpy(cache_estatus, estatus, len);
575 	cache->estatus_len = len;
576 	atomic_set(&cache->count, 0);
577 	cache->generic = generic;
578 	cache->time_in = sched_clock();
579 	return cache;
580 }
581 
ghes_estatus_cache_free(struct ghes_estatus_cache * cache)582 static void ghes_estatus_cache_free(struct ghes_estatus_cache *cache)
583 {
584 	u32 len;
585 
586 	len = cper_estatus_len(GHES_ESTATUS_FROM_CACHE(cache));
587 	len = GHES_ESTATUS_CACHE_LEN(len);
588 	gen_pool_free(ghes_estatus_pool, (unsigned long)cache, len);
589 	atomic_dec(&ghes_estatus_cache_alloced);
590 }
591 
ghes_estatus_cache_rcu_free(struct rcu_head * head)592 static void ghes_estatus_cache_rcu_free(struct rcu_head *head)
593 {
594 	struct ghes_estatus_cache *cache;
595 
596 	cache = container_of(head, struct ghes_estatus_cache, rcu);
597 	ghes_estatus_cache_free(cache);
598 }
599 
ghes_estatus_cache_add(struct acpi_hest_generic * generic,struct acpi_hest_generic_status * estatus)600 static void ghes_estatus_cache_add(
601 	struct acpi_hest_generic *generic,
602 	struct acpi_hest_generic_status *estatus)
603 {
604 	int i, slot = -1, count;
605 	unsigned long long now, duration, period, max_period = 0;
606 	struct ghes_estatus_cache *cache, *slot_cache = NULL, *new_cache;
607 
608 	new_cache = ghes_estatus_cache_alloc(generic, estatus);
609 	if (new_cache == NULL)
610 		return;
611 	rcu_read_lock();
612 	now = sched_clock();
613 	for (i = 0; i < GHES_ESTATUS_CACHES_SIZE; i++) {
614 		cache = rcu_dereference(ghes_estatus_caches[i]);
615 		if (cache == NULL) {
616 			slot = i;
617 			slot_cache = NULL;
618 			break;
619 		}
620 		duration = now - cache->time_in;
621 		if (duration >= GHES_ESTATUS_IN_CACHE_MAX_NSEC) {
622 			slot = i;
623 			slot_cache = cache;
624 			break;
625 		}
626 		count = atomic_read(&cache->count);
627 		period = duration;
628 		do_div(period, (count + 1));
629 		if (period > max_period) {
630 			max_period = period;
631 			slot = i;
632 			slot_cache = cache;
633 		}
634 	}
635 	/* new_cache must be put into array after its contents are written */
636 	smp_wmb();
637 	if (slot != -1 && cmpxchg(ghes_estatus_caches + slot,
638 				  slot_cache, new_cache) == slot_cache) {
639 		if (slot_cache)
640 			call_rcu(&slot_cache->rcu, ghes_estatus_cache_rcu_free);
641 	} else
642 		ghes_estatus_cache_free(new_cache);
643 	rcu_read_unlock();
644 }
645 
ghes_proc(struct ghes * ghes)646 static int ghes_proc(struct ghes *ghes)
647 {
648 	int rc;
649 
650 	rc = ghes_read_estatus(ghes, 0);
651 	if (rc)
652 		goto out;
653 	if (!ghes_estatus_cached(ghes->estatus)) {
654 		if (ghes_print_estatus(NULL, ghes->generic, ghes->estatus))
655 			ghes_estatus_cache_add(ghes->generic, ghes->estatus);
656 	}
657 	ghes_do_proc(ghes, ghes->estatus);
658 out:
659 	ghes_clear_estatus(ghes);
660 	return rc;
661 }
662 
ghes_add_timer(struct ghes * ghes)663 static void ghes_add_timer(struct ghes *ghes)
664 {
665 	struct acpi_hest_generic *g = ghes->generic;
666 	unsigned long expire;
667 
668 	if (!g->notify.poll_interval) {
669 		pr_warning(FW_WARN GHES_PFX "Poll interval is 0 for generic hardware error source: %d, disabled.\n",
670 			   g->header.source_id);
671 		return;
672 	}
673 	expire = jiffies + msecs_to_jiffies(g->notify.poll_interval);
674 	ghes->timer.expires = round_jiffies_relative(expire);
675 	add_timer(&ghes->timer);
676 }
677 
ghes_poll_func(unsigned long data)678 static void ghes_poll_func(unsigned long data)
679 {
680 	struct ghes *ghes = (void *)data;
681 
682 	ghes_proc(ghes);
683 	if (!(ghes->flags & GHES_EXITING))
684 		ghes_add_timer(ghes);
685 }
686 
ghes_irq_func(int irq,void * data)687 static irqreturn_t ghes_irq_func(int irq, void *data)
688 {
689 	struct ghes *ghes = data;
690 	int rc;
691 
692 	rc = ghes_proc(ghes);
693 	if (rc)
694 		return IRQ_NONE;
695 
696 	return IRQ_HANDLED;
697 }
698 
ghes_notify_sci(struct notifier_block * this,unsigned long event,void * data)699 static int ghes_notify_sci(struct notifier_block *this,
700 				  unsigned long event, void *data)
701 {
702 	struct ghes *ghes;
703 	int ret = NOTIFY_DONE;
704 
705 	rcu_read_lock();
706 	list_for_each_entry_rcu(ghes, &ghes_sci, list) {
707 		if (!ghes_proc(ghes))
708 			ret = NOTIFY_OK;
709 	}
710 	rcu_read_unlock();
711 
712 	return ret;
713 }
714 
715 static struct notifier_block ghes_notifier_sci = {
716 	.notifier_call = ghes_notify_sci,
717 };
718 
719 #ifdef CONFIG_HAVE_ACPI_APEI_NMI
720 /*
721  * printk is not safe in NMI context.  So in NMI handler, we allocate
722  * required memory from lock-less memory allocator
723  * (ghes_estatus_pool), save estatus into it, put them into lock-less
724  * list (ghes_estatus_llist), then delay printk into IRQ context via
725  * irq_work (ghes_proc_irq_work).  ghes_estatus_size_request record
726  * required pool size by all NMI error source.
727  */
728 static struct llist_head ghes_estatus_llist;
729 static struct irq_work ghes_proc_irq_work;
730 
731 /*
732  * NMI may be triggered on any CPU, so ghes_in_nmi is used for
733  * having only one concurrent reader.
734  */
735 static atomic_t ghes_in_nmi = ATOMIC_INIT(0);
736 
737 static LIST_HEAD(ghes_nmi);
738 
739 static int ghes_panic_timeout	__read_mostly = 30;
740 
ghes_proc_in_irq(struct irq_work * irq_work)741 static void ghes_proc_in_irq(struct irq_work *irq_work)
742 {
743 	struct llist_node *llnode, *next;
744 	struct ghes_estatus_node *estatus_node;
745 	struct acpi_hest_generic *generic;
746 	struct acpi_hest_generic_status *estatus;
747 	u32 len, node_len;
748 
749 	llnode = llist_del_all(&ghes_estatus_llist);
750 	/*
751 	 * Because the time order of estatus in list is reversed,
752 	 * revert it back to proper order.
753 	 */
754 	llnode = llist_reverse_order(llnode);
755 	while (llnode) {
756 		next = llnode->next;
757 		estatus_node = llist_entry(llnode, struct ghes_estatus_node,
758 					   llnode);
759 		estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
760 		len = cper_estatus_len(estatus);
761 		node_len = GHES_ESTATUS_NODE_LEN(len);
762 		ghes_do_proc(estatus_node->ghes, estatus);
763 		if (!ghes_estatus_cached(estatus)) {
764 			generic = estatus_node->generic;
765 			if (ghes_print_estatus(NULL, generic, estatus))
766 				ghes_estatus_cache_add(generic, estatus);
767 		}
768 		gen_pool_free(ghes_estatus_pool, (unsigned long)estatus_node,
769 			      node_len);
770 		llnode = next;
771 	}
772 }
773 
ghes_print_queued_estatus(void)774 static void ghes_print_queued_estatus(void)
775 {
776 	struct llist_node *llnode;
777 	struct ghes_estatus_node *estatus_node;
778 	struct acpi_hest_generic *generic;
779 	struct acpi_hest_generic_status *estatus;
780 	u32 len, node_len;
781 
782 	llnode = llist_del_all(&ghes_estatus_llist);
783 	/*
784 	 * Because the time order of estatus in list is reversed,
785 	 * revert it back to proper order.
786 	 */
787 	llnode = llist_reverse_order(llnode);
788 	while (llnode) {
789 		estatus_node = llist_entry(llnode, struct ghes_estatus_node,
790 					   llnode);
791 		estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
792 		len = cper_estatus_len(estatus);
793 		node_len = GHES_ESTATUS_NODE_LEN(len);
794 		generic = estatus_node->generic;
795 		ghes_print_estatus(NULL, generic, estatus);
796 		llnode = llnode->next;
797 	}
798 }
799 
800 /* Save estatus for further processing in IRQ context */
__process_error(struct ghes * ghes)801 static void __process_error(struct ghes *ghes)
802 {
803 #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
804 	u32 len, node_len;
805 	struct ghes_estatus_node *estatus_node;
806 	struct acpi_hest_generic_status *estatus;
807 
808 	if (ghes_estatus_cached(ghes->estatus))
809 		return;
810 
811 	len = cper_estatus_len(ghes->estatus);
812 	node_len = GHES_ESTATUS_NODE_LEN(len);
813 
814 	estatus_node = (void *)gen_pool_alloc(ghes_estatus_pool, node_len);
815 	if (!estatus_node)
816 		return;
817 
818 	estatus_node->ghes = ghes;
819 	estatus_node->generic = ghes->generic;
820 	estatus = GHES_ESTATUS_FROM_NODE(estatus_node);
821 	memcpy(estatus, ghes->estatus, len);
822 	llist_add(&estatus_node->llnode, &ghes_estatus_llist);
823 #endif
824 }
825 
__ghes_panic(struct ghes * ghes)826 static void __ghes_panic(struct ghes *ghes)
827 {
828 	oops_begin();
829 	ghes_print_queued_estatus();
830 	__ghes_print_estatus(KERN_EMERG, ghes->generic, ghes->estatus);
831 
832 	/* reboot to log the error! */
833 	if (panic_timeout == 0)
834 		panic_timeout = ghes_panic_timeout;
835 	panic("Fatal hardware error!");
836 }
837 
ghes_notify_nmi(unsigned int cmd,struct pt_regs * regs)838 static int ghes_notify_nmi(unsigned int cmd, struct pt_regs *regs)
839 {
840 	struct ghes *ghes;
841 	int sev, ret = NMI_DONE;
842 
843 	if (!atomic_add_unless(&ghes_in_nmi, 1, 1))
844 		return ret;
845 
846 	list_for_each_entry_rcu(ghes, &ghes_nmi, list) {
847 		if (ghes_read_estatus(ghes, 1)) {
848 			ghes_clear_estatus(ghes);
849 			continue;
850 		} else {
851 			ret = NMI_HANDLED;
852 		}
853 
854 		sev = ghes_severity(ghes->estatus->error_severity);
855 		if (sev >= GHES_SEV_PANIC)
856 			__ghes_panic(ghes);
857 
858 		if (!(ghes->flags & GHES_TO_CLEAR))
859 			continue;
860 
861 		__process_error(ghes);
862 		ghes_clear_estatus(ghes);
863 	}
864 
865 #ifdef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
866 	if (ret == NMI_HANDLED)
867 		irq_work_queue(&ghes_proc_irq_work);
868 #endif
869 	atomic_dec(&ghes_in_nmi);
870 	return ret;
871 }
872 
ghes_esource_prealloc_size(const struct acpi_hest_generic * generic)873 static unsigned long ghes_esource_prealloc_size(
874 	const struct acpi_hest_generic *generic)
875 {
876 	unsigned long block_length, prealloc_records, prealloc_size;
877 
878 	block_length = min_t(unsigned long, generic->error_block_length,
879 			     GHES_ESTATUS_MAX_SIZE);
880 	prealloc_records = max_t(unsigned long,
881 				 generic->records_to_preallocate, 1);
882 	prealloc_size = min_t(unsigned long, block_length * prealloc_records,
883 			      GHES_ESOURCE_PREALLOC_MAX_SIZE);
884 
885 	return prealloc_size;
886 }
887 
ghes_estatus_pool_shrink(unsigned long len)888 static void ghes_estatus_pool_shrink(unsigned long len)
889 {
890 	ghes_estatus_pool_size_request -= PAGE_ALIGN(len);
891 }
892 
ghes_nmi_add(struct ghes * ghes)893 static void ghes_nmi_add(struct ghes *ghes)
894 {
895 	unsigned long len;
896 
897 	len = ghes_esource_prealloc_size(ghes->generic);
898 	ghes_estatus_pool_expand(len);
899 	mutex_lock(&ghes_list_mutex);
900 	if (list_empty(&ghes_nmi))
901 		register_nmi_handler(NMI_LOCAL, ghes_notify_nmi, 0, "ghes");
902 	list_add_rcu(&ghes->list, &ghes_nmi);
903 	mutex_unlock(&ghes_list_mutex);
904 }
905 
ghes_nmi_remove(struct ghes * ghes)906 static void ghes_nmi_remove(struct ghes *ghes)
907 {
908 	unsigned long len;
909 
910 	mutex_lock(&ghes_list_mutex);
911 	list_del_rcu(&ghes->list);
912 	if (list_empty(&ghes_nmi))
913 		unregister_nmi_handler(NMI_LOCAL, "ghes");
914 	mutex_unlock(&ghes_list_mutex);
915 	/*
916 	 * To synchronize with NMI handler, ghes can only be
917 	 * freed after NMI handler finishes.
918 	 */
919 	synchronize_rcu();
920 	len = ghes_esource_prealloc_size(ghes->generic);
921 	ghes_estatus_pool_shrink(len);
922 }
923 
ghes_nmi_init_cxt(void)924 static void ghes_nmi_init_cxt(void)
925 {
926 	init_irq_work(&ghes_proc_irq_work, ghes_proc_in_irq);
927 }
928 #else /* CONFIG_HAVE_ACPI_APEI_NMI */
ghes_nmi_add(struct ghes * ghes)929 static inline void ghes_nmi_add(struct ghes *ghes)
930 {
931 	pr_err(GHES_PFX "ID: %d, trying to add NMI notification which is not supported!\n",
932 	       ghes->generic->header.source_id);
933 	BUG();
934 }
935 
ghes_nmi_remove(struct ghes * ghes)936 static inline void ghes_nmi_remove(struct ghes *ghes)
937 {
938 	pr_err(GHES_PFX "ID: %d, trying to remove NMI notification which is not supported!\n",
939 	       ghes->generic->header.source_id);
940 	BUG();
941 }
942 
ghes_nmi_init_cxt(void)943 static inline void ghes_nmi_init_cxt(void)
944 {
945 }
946 #endif /* CONFIG_HAVE_ACPI_APEI_NMI */
947 
ghes_probe(struct platform_device * ghes_dev)948 static int ghes_probe(struct platform_device *ghes_dev)
949 {
950 	struct acpi_hest_generic *generic;
951 	struct ghes *ghes = NULL;
952 
953 	int rc = -EINVAL;
954 
955 	generic = *(struct acpi_hest_generic **)ghes_dev->dev.platform_data;
956 	if (!generic->enabled)
957 		return -ENODEV;
958 
959 	switch (generic->notify.type) {
960 	case ACPI_HEST_NOTIFY_POLLED:
961 	case ACPI_HEST_NOTIFY_EXTERNAL:
962 	case ACPI_HEST_NOTIFY_SCI:
963 		break;
964 	case ACPI_HEST_NOTIFY_NMI:
965 		if (!IS_ENABLED(CONFIG_HAVE_ACPI_APEI_NMI)) {
966 			pr_warn(GHES_PFX "Generic hardware error source: %d notified via NMI interrupt is not supported!\n",
967 				generic->header.source_id);
968 			goto err;
969 		}
970 		break;
971 	case ACPI_HEST_NOTIFY_LOCAL:
972 		pr_warning(GHES_PFX "Generic hardware error source: %d notified via local interrupt is not supported!\n",
973 			   generic->header.source_id);
974 		goto err;
975 	default:
976 		pr_warning(FW_WARN GHES_PFX "Unknown notification type: %u for generic hardware error source: %d\n",
977 			   generic->notify.type, generic->header.source_id);
978 		goto err;
979 	}
980 
981 	rc = -EIO;
982 	if (generic->error_block_length <
983 	    sizeof(struct acpi_hest_generic_status)) {
984 		pr_warning(FW_BUG GHES_PFX "Invalid error block length: %u for generic hardware error source: %d\n",
985 			   generic->error_block_length,
986 			   generic->header.source_id);
987 		goto err;
988 	}
989 	ghes = ghes_new(generic);
990 	if (IS_ERR(ghes)) {
991 		rc = PTR_ERR(ghes);
992 		ghes = NULL;
993 		goto err;
994 	}
995 
996 	rc = ghes_edac_register(ghes, &ghes_dev->dev);
997 	if (rc < 0)
998 		goto err;
999 
1000 	switch (generic->notify.type) {
1001 	case ACPI_HEST_NOTIFY_POLLED:
1002 		ghes->timer.function = ghes_poll_func;
1003 		ghes->timer.data = (unsigned long)ghes;
1004 		init_timer_deferrable(&ghes->timer);
1005 		ghes_add_timer(ghes);
1006 		break;
1007 	case ACPI_HEST_NOTIFY_EXTERNAL:
1008 		/* External interrupt vector is GSI */
1009 		rc = acpi_gsi_to_irq(generic->notify.vector, &ghes->irq);
1010 		if (rc) {
1011 			pr_err(GHES_PFX "Failed to map GSI to IRQ for generic hardware error source: %d\n",
1012 			       generic->header.source_id);
1013 			goto err_edac_unreg;
1014 		}
1015 		rc = request_irq(ghes->irq, ghes_irq_func, 0, "GHES IRQ", ghes);
1016 		if (rc) {
1017 			pr_err(GHES_PFX "Failed to register IRQ for generic hardware error source: %d\n",
1018 			       generic->header.source_id);
1019 			goto err_edac_unreg;
1020 		}
1021 		break;
1022 	case ACPI_HEST_NOTIFY_SCI:
1023 		mutex_lock(&ghes_list_mutex);
1024 		if (list_empty(&ghes_sci))
1025 			register_acpi_hed_notifier(&ghes_notifier_sci);
1026 		list_add_rcu(&ghes->list, &ghes_sci);
1027 		mutex_unlock(&ghes_list_mutex);
1028 		break;
1029 	case ACPI_HEST_NOTIFY_NMI:
1030 		ghes_nmi_add(ghes);
1031 		break;
1032 	default:
1033 		BUG();
1034 	}
1035 	platform_set_drvdata(ghes_dev, ghes);
1036 
1037 	return 0;
1038 err_edac_unreg:
1039 	ghes_edac_unregister(ghes);
1040 err:
1041 	if (ghes) {
1042 		ghes_fini(ghes);
1043 		kfree(ghes);
1044 	}
1045 	return rc;
1046 }
1047 
ghes_remove(struct platform_device * ghes_dev)1048 static int ghes_remove(struct platform_device *ghes_dev)
1049 {
1050 	struct ghes *ghes;
1051 	struct acpi_hest_generic *generic;
1052 
1053 	ghes = platform_get_drvdata(ghes_dev);
1054 	generic = ghes->generic;
1055 
1056 	ghes->flags |= GHES_EXITING;
1057 	switch (generic->notify.type) {
1058 	case ACPI_HEST_NOTIFY_POLLED:
1059 		del_timer_sync(&ghes->timer);
1060 		break;
1061 	case ACPI_HEST_NOTIFY_EXTERNAL:
1062 		free_irq(ghes->irq, ghes);
1063 		break;
1064 	case ACPI_HEST_NOTIFY_SCI:
1065 		mutex_lock(&ghes_list_mutex);
1066 		list_del_rcu(&ghes->list);
1067 		if (list_empty(&ghes_sci))
1068 			unregister_acpi_hed_notifier(&ghes_notifier_sci);
1069 		mutex_unlock(&ghes_list_mutex);
1070 		synchronize_rcu();
1071 		break;
1072 	case ACPI_HEST_NOTIFY_NMI:
1073 		ghes_nmi_remove(ghes);
1074 		break;
1075 	default:
1076 		BUG();
1077 		break;
1078 	}
1079 
1080 	ghes_fini(ghes);
1081 
1082 	ghes_edac_unregister(ghes);
1083 
1084 	kfree(ghes);
1085 
1086 	platform_set_drvdata(ghes_dev, NULL);
1087 
1088 	return 0;
1089 }
1090 
1091 static struct platform_driver ghes_platform_driver = {
1092 	.driver		= {
1093 		.name	= "GHES",
1094 	},
1095 	.probe		= ghes_probe,
1096 	.remove		= ghes_remove,
1097 };
1098 
ghes_init(void)1099 static int __init ghes_init(void)
1100 {
1101 	int rc;
1102 
1103 	if (acpi_disabled)
1104 		return -ENODEV;
1105 
1106 	if (hest_disable) {
1107 		pr_info(GHES_PFX "HEST is not enabled!\n");
1108 		return -EINVAL;
1109 	}
1110 
1111 	if (ghes_disable) {
1112 		pr_info(GHES_PFX "GHES is not enabled!\n");
1113 		return -EINVAL;
1114 	}
1115 
1116 	ghes_nmi_init_cxt();
1117 
1118 	rc = ghes_ioremap_init();
1119 	if (rc)
1120 		goto err;
1121 
1122 	rc = ghes_estatus_pool_init();
1123 	if (rc)
1124 		goto err_ioremap_exit;
1125 
1126 	rc = ghes_estatus_pool_expand(GHES_ESTATUS_CACHE_AVG_SIZE *
1127 				      GHES_ESTATUS_CACHE_ALLOCED_MAX);
1128 	if (rc)
1129 		goto err_pool_exit;
1130 
1131 	rc = platform_driver_register(&ghes_platform_driver);
1132 	if (rc)
1133 		goto err_pool_exit;
1134 
1135 	rc = apei_osc_setup();
1136 	if (rc == 0 && osc_sb_apei_support_acked)
1137 		pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit and WHEA _OSC.\n");
1138 	else if (rc == 0 && !osc_sb_apei_support_acked)
1139 		pr_info(GHES_PFX "APEI firmware first mode is enabled by WHEA _OSC.\n");
1140 	else if (rc && osc_sb_apei_support_acked)
1141 		pr_info(GHES_PFX "APEI firmware first mode is enabled by APEI bit.\n");
1142 	else
1143 		pr_info(GHES_PFX "Failed to enable APEI firmware first mode.\n");
1144 
1145 	return 0;
1146 err_pool_exit:
1147 	ghes_estatus_pool_exit();
1148 err_ioremap_exit:
1149 	ghes_ioremap_exit();
1150 err:
1151 	return rc;
1152 }
1153 
ghes_exit(void)1154 static void __exit ghes_exit(void)
1155 {
1156 	platform_driver_unregister(&ghes_platform_driver);
1157 	ghes_estatus_pool_exit();
1158 	ghes_ioremap_exit();
1159 }
1160 
1161 module_init(ghes_init);
1162 module_exit(ghes_exit);
1163 
1164 MODULE_AUTHOR("Huang Ying");
1165 MODULE_DESCRIPTION("APEI Generic Hardware Error Source support");
1166 MODULE_LICENSE("GPL");
1167 MODULE_ALIAS("platform:GHES");
1168