1 /*
2 * acpi_osl.c - OS-dependent functions ($Revision: 83 $)
3 *
4 * Copyright (C) 2000 Andrew Henroid
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 * Copyright (c) 2008 Intel Corporation
8 * Author: Matthew Wilcox <willy@linux.intel.com>
9 *
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
16 *
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
21 *
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
23 *
24 */
25
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
29 #include <linux/mm.h>
30 #include <linux/highmem.h>
31 #include <linux/pci.h>
32 #include <linux/interrupt.h>
33 #include <linux/kmod.h>
34 #include <linux/delay.h>
35 #include <linux/workqueue.h>
36 #include <linux/nmi.h>
37 #include <linux/acpi.h>
38 #include <linux/efi.h>
39 #include <linux/ioport.h>
40 #include <linux/list.h>
41 #include <linux/jiffies.h>
42 #include <linux/semaphore.h>
43
44 #include <asm/io.h>
45 #include <asm/uaccess.h>
46 #include <linux/io-64-nonatomic-lo-hi.h>
47
48 #include "internal.h"
49
50 #define _COMPONENT ACPI_OS_SERVICES
51 ACPI_MODULE_NAME("osl");
52
53 struct acpi_os_dpc {
54 acpi_osd_exec_callback function;
55 void *context;
56 struct work_struct work;
57 };
58
59 #ifdef CONFIG_ACPI_CUSTOM_DSDT
60 #include CONFIG_ACPI_CUSTOM_DSDT_FILE
61 #endif
62
63 #ifdef ENABLE_DEBUGGER
64 #include <linux/kdb.h>
65
66 /* stuff for debugger support */
67 int acpi_in_debugger;
68 EXPORT_SYMBOL(acpi_in_debugger);
69 #endif /*ENABLE_DEBUGGER */
70
71 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
72 u32 pm1b_ctrl);
73 static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a,
74 u32 val_b);
75
76 static acpi_osd_handler acpi_irq_handler;
77 static void *acpi_irq_context;
78 static struct workqueue_struct *kacpid_wq;
79 static struct workqueue_struct *kacpi_notify_wq;
80 static struct workqueue_struct *kacpi_hotplug_wq;
81 static bool acpi_os_initialized;
82 unsigned int acpi_sci_irq = INVALID_ACPI_IRQ;
83
84 /*
85 * This list of permanent mappings is for memory that may be accessed from
86 * interrupt context, where we can't do the ioremap().
87 */
88 struct acpi_ioremap {
89 struct list_head list;
90 void __iomem *virt;
91 acpi_physical_address phys;
92 acpi_size size;
93 unsigned long refcount;
94 };
95
96 static LIST_HEAD(acpi_ioremaps);
97 static DEFINE_MUTEX(acpi_ioremap_lock);
98
99 static void __init acpi_osi_setup_late(void);
100
101 /*
102 * The story of _OSI(Linux)
103 *
104 * From pre-history through Linux-2.6.22,
105 * Linux responded TRUE upon a BIOS OSI(Linux) query.
106 *
107 * Unfortunately, reference BIOS writers got wind of this
108 * and put OSI(Linux) in their example code, quickly exposing
109 * this string as ill-conceived and opening the door to
110 * an un-bounded number of BIOS incompatibilities.
111 *
112 * For example, OSI(Linux) was used on resume to re-POST a
113 * video card on one system, because Linux at that time
114 * could not do a speedy restore in its native driver.
115 * But then upon gaining quick native restore capability,
116 * Linux has no way to tell the BIOS to skip the time-consuming
117 * POST -- putting Linux at a permanent performance disadvantage.
118 * On another system, the BIOS writer used OSI(Linux)
119 * to infer native OS support for IPMI! On other systems,
120 * OSI(Linux) simply got in the way of Linux claiming to
121 * be compatible with other operating systems, exposing
122 * BIOS issues such as skipped device initialization.
123 *
124 * So "Linux" turned out to be a really poor chose of
125 * OSI string, and from Linux-2.6.23 onward we respond FALSE.
126 *
127 * BIOS writers should NOT query _OSI(Linux) on future systems.
128 * Linux will complain on the console when it sees it, and return FALSE.
129 * To get Linux to return TRUE for your system will require
130 * a kernel source update to add a DMI entry,
131 * or boot with "acpi_osi=Linux"
132 */
133
134 static struct osi_linux {
135 unsigned int enable:1;
136 unsigned int dmi:1;
137 unsigned int cmdline:1;
138 u8 default_disabling;
139 } osi_linux = {0, 0, 0, 0};
140
acpi_osi_handler(acpi_string interface,u32 supported)141 static u32 acpi_osi_handler(acpi_string interface, u32 supported)
142 {
143 if (!strcmp("Linux", interface)) {
144
145 printk_once(KERN_NOTICE FW_BUG PREFIX
146 "BIOS _OSI(Linux) query %s%s\n",
147 osi_linux.enable ? "honored" : "ignored",
148 osi_linux.cmdline ? " via cmdline" :
149 osi_linux.dmi ? " via DMI" : "");
150 }
151
152 if (!strcmp("Darwin", interface)) {
153 /*
154 * Apple firmware will behave poorly if it receives positive
155 * answers to "Darwin" and any other OS. Respond positively
156 * to Darwin and then disable all other vendor strings.
157 */
158 acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
159 supported = ACPI_UINT32_MAX;
160 }
161
162 return supported;
163 }
164
acpi_request_region(struct acpi_generic_address * gas,unsigned int length,char * desc)165 static void __init acpi_request_region (struct acpi_generic_address *gas,
166 unsigned int length, char *desc)
167 {
168 u64 addr;
169
170 /* Handle possible alignment issues */
171 memcpy(&addr, &gas->address, sizeof(addr));
172 if (!addr || !length)
173 return;
174
175 /* Resources are never freed */
176 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
177 request_region(addr, length, desc);
178 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
179 request_mem_region(addr, length, desc);
180 }
181
acpi_reserve_resources(void)182 static int __init acpi_reserve_resources(void)
183 {
184 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
185 "ACPI PM1a_EVT_BLK");
186
187 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
188 "ACPI PM1b_EVT_BLK");
189
190 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
191 "ACPI PM1a_CNT_BLK");
192
193 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
194 "ACPI PM1b_CNT_BLK");
195
196 if (acpi_gbl_FADT.pm_timer_length == 4)
197 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
198
199 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
200 "ACPI PM2_CNT_BLK");
201
202 /* Length of GPE blocks must be a non-negative multiple of 2 */
203
204 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
205 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
206 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
207
208 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
209 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
210 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
211
212 return 0;
213 }
214 fs_initcall_sync(acpi_reserve_resources);
215
acpi_os_printf(const char * fmt,...)216 void acpi_os_printf(const char *fmt, ...)
217 {
218 va_list args;
219 va_start(args, fmt);
220 acpi_os_vprintf(fmt, args);
221 va_end(args);
222 }
223
acpi_os_vprintf(const char * fmt,va_list args)224 void acpi_os_vprintf(const char *fmt, va_list args)
225 {
226 static char buffer[512];
227
228 vsprintf(buffer, fmt, args);
229
230 #ifdef ENABLE_DEBUGGER
231 if (acpi_in_debugger) {
232 kdb_printf("%s", buffer);
233 } else {
234 printk(KERN_CONT "%s", buffer);
235 }
236 #else
237 printk(KERN_CONT "%s", buffer);
238 #endif
239 }
240
241 #ifdef CONFIG_KEXEC
242 static unsigned long acpi_rsdp;
setup_acpi_rsdp(char * arg)243 static int __init setup_acpi_rsdp(char *arg)
244 {
245 if (kstrtoul(arg, 16, &acpi_rsdp))
246 return -EINVAL;
247 return 0;
248 }
249 early_param("acpi_rsdp", setup_acpi_rsdp);
250 #endif
251
acpi_os_get_root_pointer(void)252 acpi_physical_address __init acpi_os_get_root_pointer(void)
253 {
254 #ifdef CONFIG_KEXEC
255 if (acpi_rsdp)
256 return acpi_rsdp;
257 #endif
258
259 if (efi_enabled(EFI_CONFIG_TABLES)) {
260 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
261 return efi.acpi20;
262 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
263 return efi.acpi;
264 else {
265 printk(KERN_ERR PREFIX
266 "System description tables not found\n");
267 return 0;
268 }
269 } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
270 acpi_physical_address pa = 0;
271
272 acpi_find_root_pointer(&pa);
273 return pa;
274 }
275
276 return 0;
277 }
278
279 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
280 static struct acpi_ioremap *
acpi_map_lookup(acpi_physical_address phys,acpi_size size)281 acpi_map_lookup(acpi_physical_address phys, acpi_size size)
282 {
283 struct acpi_ioremap *map;
284
285 list_for_each_entry_rcu(map, &acpi_ioremaps, list)
286 if (map->phys <= phys &&
287 phys + size <= map->phys + map->size)
288 return map;
289
290 return NULL;
291 }
292
293 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
294 static void __iomem *
acpi_map_vaddr_lookup(acpi_physical_address phys,unsigned int size)295 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
296 {
297 struct acpi_ioremap *map;
298
299 map = acpi_map_lookup(phys, size);
300 if (map)
301 return map->virt + (phys - map->phys);
302
303 return NULL;
304 }
305
acpi_os_get_iomem(acpi_physical_address phys,unsigned int size)306 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
307 {
308 struct acpi_ioremap *map;
309 void __iomem *virt = NULL;
310
311 mutex_lock(&acpi_ioremap_lock);
312 map = acpi_map_lookup(phys, size);
313 if (map) {
314 virt = map->virt + (phys - map->phys);
315 map->refcount++;
316 }
317 mutex_unlock(&acpi_ioremap_lock);
318 return virt;
319 }
320 EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
321
322 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
323 static struct acpi_ioremap *
acpi_map_lookup_virt(void __iomem * virt,acpi_size size)324 acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
325 {
326 struct acpi_ioremap *map;
327
328 list_for_each_entry_rcu(map, &acpi_ioremaps, list)
329 if (map->virt <= virt &&
330 virt + size <= map->virt + map->size)
331 return map;
332
333 return NULL;
334 }
335
336 #if defined(CONFIG_IA64) || defined(CONFIG_ARM64)
337 /* ioremap will take care of cache attributes */
338 #define should_use_kmap(pfn) 0
339 #else
340 #define should_use_kmap(pfn) page_is_ram(pfn)
341 #endif
342
acpi_map(acpi_physical_address pg_off,unsigned long pg_sz)343 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
344 {
345 unsigned long pfn;
346
347 pfn = pg_off >> PAGE_SHIFT;
348 if (should_use_kmap(pfn)) {
349 if (pg_sz > PAGE_SIZE)
350 return NULL;
351 return (void __iomem __force *)kmap(pfn_to_page(pfn));
352 } else
353 return acpi_os_ioremap(pg_off, pg_sz);
354 }
355
acpi_unmap(acpi_physical_address pg_off,void __iomem * vaddr)356 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
357 {
358 unsigned long pfn;
359
360 pfn = pg_off >> PAGE_SHIFT;
361 if (should_use_kmap(pfn))
362 kunmap(pfn_to_page(pfn));
363 else
364 iounmap(vaddr);
365 }
366
367 void __iomem *__init_refok
acpi_os_map_iomem(acpi_physical_address phys,acpi_size size)368 acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
369 {
370 struct acpi_ioremap *map;
371 void __iomem *virt;
372 acpi_physical_address pg_off;
373 acpi_size pg_sz;
374
375 if (phys > ULONG_MAX) {
376 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
377 return NULL;
378 }
379
380 if (!acpi_gbl_permanent_mmap)
381 return __acpi_map_table((unsigned long)phys, size);
382
383 mutex_lock(&acpi_ioremap_lock);
384 /* Check if there's a suitable mapping already. */
385 map = acpi_map_lookup(phys, size);
386 if (map) {
387 map->refcount++;
388 goto out;
389 }
390
391 map = kzalloc(sizeof(*map), GFP_KERNEL);
392 if (!map) {
393 mutex_unlock(&acpi_ioremap_lock);
394 return NULL;
395 }
396
397 pg_off = round_down(phys, PAGE_SIZE);
398 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
399 virt = acpi_map(pg_off, pg_sz);
400 if (!virt) {
401 mutex_unlock(&acpi_ioremap_lock);
402 kfree(map);
403 return NULL;
404 }
405
406 INIT_LIST_HEAD(&map->list);
407 map->virt = virt;
408 map->phys = pg_off;
409 map->size = pg_sz;
410 map->refcount = 1;
411
412 list_add_tail_rcu(&map->list, &acpi_ioremaps);
413
414 out:
415 mutex_unlock(&acpi_ioremap_lock);
416 return map->virt + (phys - map->phys);
417 }
418 EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
419
420 void *__init_refok
acpi_os_map_memory(acpi_physical_address phys,acpi_size size)421 acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
422 {
423 return (void *)acpi_os_map_iomem(phys, size);
424 }
425 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
426
427 /* Must be called with mutex_lock(&acpi_ioremap_lock) */
acpi_os_drop_map_ref(struct acpi_ioremap * map)428 static unsigned long acpi_os_drop_map_ref(struct acpi_ioremap *map)
429 {
430 unsigned long refcount = --map->refcount;
431
432 if (!refcount)
433 list_del_rcu(&map->list);
434 return refcount;
435 }
436
acpi_os_map_cleanup(struct acpi_ioremap * map)437 static void acpi_os_map_cleanup(struct acpi_ioremap *map)
438 {
439 synchronize_rcu_expedited();
440 acpi_unmap(map->phys, map->virt);
441 kfree(map);
442 }
443
acpi_os_unmap_iomem(void __iomem * virt,acpi_size size)444 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
445 {
446 struct acpi_ioremap *map;
447 unsigned long refcount;
448
449 if (!acpi_gbl_permanent_mmap) {
450 __acpi_unmap_table(virt, size);
451 return;
452 }
453
454 mutex_lock(&acpi_ioremap_lock);
455 map = acpi_map_lookup_virt(virt, size);
456 if (!map) {
457 mutex_unlock(&acpi_ioremap_lock);
458 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
459 return;
460 }
461 refcount = acpi_os_drop_map_ref(map);
462 mutex_unlock(&acpi_ioremap_lock);
463
464 if (!refcount)
465 acpi_os_map_cleanup(map);
466 }
467 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem);
468
acpi_os_unmap_memory(void * virt,acpi_size size)469 void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
470 {
471 return acpi_os_unmap_iomem((void __iomem *)virt, size);
472 }
473 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
474
early_acpi_os_unmap_memory(void __iomem * virt,acpi_size size)475 void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
476 {
477 if (!acpi_gbl_permanent_mmap)
478 __acpi_unmap_table(virt, size);
479 }
480
acpi_os_map_generic_address(struct acpi_generic_address * gas)481 int acpi_os_map_generic_address(struct acpi_generic_address *gas)
482 {
483 u64 addr;
484 void __iomem *virt;
485
486 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
487 return 0;
488
489 /* Handle possible alignment issues */
490 memcpy(&addr, &gas->address, sizeof(addr));
491 if (!addr || !gas->bit_width)
492 return -EINVAL;
493
494 virt = acpi_os_map_iomem(addr, gas->bit_width / 8);
495 if (!virt)
496 return -EIO;
497
498 return 0;
499 }
500 EXPORT_SYMBOL(acpi_os_map_generic_address);
501
acpi_os_unmap_generic_address(struct acpi_generic_address * gas)502 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
503 {
504 u64 addr;
505 struct acpi_ioremap *map;
506 unsigned long refcount;
507
508 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
509 return;
510
511 /* Handle possible alignment issues */
512 memcpy(&addr, &gas->address, sizeof(addr));
513 if (!addr || !gas->bit_width)
514 return;
515
516 mutex_lock(&acpi_ioremap_lock);
517 map = acpi_map_lookup(addr, gas->bit_width / 8);
518 if (!map) {
519 mutex_unlock(&acpi_ioremap_lock);
520 return;
521 }
522 refcount = acpi_os_drop_map_ref(map);
523 mutex_unlock(&acpi_ioremap_lock);
524
525 if (!refcount)
526 acpi_os_map_cleanup(map);
527 }
528 EXPORT_SYMBOL(acpi_os_unmap_generic_address);
529
530 #ifdef ACPI_FUTURE_USAGE
531 acpi_status
acpi_os_get_physical_address(void * virt,acpi_physical_address * phys)532 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
533 {
534 if (!phys || !virt)
535 return AE_BAD_PARAMETER;
536
537 *phys = virt_to_phys(virt);
538
539 return AE_OK;
540 }
541 #endif
542
543 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
544 static bool acpi_rev_override;
545
acpi_rev_override_setup(char * str)546 int __init acpi_rev_override_setup(char *str)
547 {
548 acpi_rev_override = true;
549 return 1;
550 }
551 __setup("acpi_rev_override", acpi_rev_override_setup);
552 #else
553 #define acpi_rev_override false
554 #endif
555
556 #define ACPI_MAX_OVERRIDE_LEN 100
557
558 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
559
560 acpi_status
acpi_os_predefined_override(const struct acpi_predefined_names * init_val,char ** new_val)561 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
562 char **new_val)
563 {
564 if (!init_val || !new_val)
565 return AE_BAD_PARAMETER;
566
567 *new_val = NULL;
568 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
569 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
570 acpi_os_name);
571 *new_val = acpi_os_name;
572 }
573
574 if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) {
575 printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n");
576 *new_val = (char *)5;
577 }
578
579 return AE_OK;
580 }
581
582 #ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
583 #include <linux/earlycpio.h>
584 #include <linux/memblock.h>
585
586 static u64 acpi_tables_addr;
587 static int all_tables_size;
588
589 /* Copied from acpica/tbutils.c:acpi_tb_checksum() */
acpi_table_checksum(u8 * buffer,u32 length)590 static u8 __init acpi_table_checksum(u8 *buffer, u32 length)
591 {
592 u8 sum = 0;
593 u8 *end = buffer + length;
594
595 while (buffer < end)
596 sum = (u8) (sum + *(buffer++));
597 return sum;
598 }
599
600 /* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */
601 static const char * const table_sigs[] = {
602 ACPI_SIG_BERT, ACPI_SIG_CPEP, ACPI_SIG_ECDT, ACPI_SIG_EINJ,
603 ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT, ACPI_SIG_MSCT,
604 ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT, ACPI_SIG_ASF,
605 ACPI_SIG_BOOT, ACPI_SIG_DBGP, ACPI_SIG_DMAR, ACPI_SIG_HPET,
606 ACPI_SIG_IBFT, ACPI_SIG_IVRS, ACPI_SIG_MCFG, ACPI_SIG_MCHI,
607 ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI, ACPI_SIG_TCPA,
608 ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, ACPI_SIG_WDDT,
609 ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT,
610 ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, NULL };
611
612 #define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
613
614 #define ACPI_OVERRIDE_TABLES 64
615 static struct cpio_data __initdata acpi_initrd_files[ACPI_OVERRIDE_TABLES];
616
617 #define MAP_CHUNK_SIZE (NR_FIX_BTMAPS << PAGE_SHIFT)
618
acpi_initrd_override(void * data,size_t size)619 void __init acpi_initrd_override(void *data, size_t size)
620 {
621 int sig, no, table_nr = 0, total_offset = 0;
622 long offset = 0;
623 struct acpi_table_header *table;
624 char cpio_path[32] = "kernel/firmware/acpi/";
625 struct cpio_data file;
626
627 if (data == NULL || size == 0)
628 return;
629
630 for (no = 0; no < ACPI_OVERRIDE_TABLES; no++) {
631 file = find_cpio_data(cpio_path, data, size, &offset);
632 if (!file.data)
633 break;
634
635 data += offset;
636 size -= offset;
637
638 if (file.size < sizeof(struct acpi_table_header)) {
639 pr_err("ACPI OVERRIDE: Table smaller than ACPI header [%s%s]\n",
640 cpio_path, file.name);
641 continue;
642 }
643
644 table = file.data;
645
646 for (sig = 0; table_sigs[sig]; sig++)
647 if (!memcmp(table->signature, table_sigs[sig], 4))
648 break;
649
650 if (!table_sigs[sig]) {
651 pr_err("ACPI OVERRIDE: Unknown signature [%s%s]\n",
652 cpio_path, file.name);
653 continue;
654 }
655 if (file.size != table->length) {
656 pr_err("ACPI OVERRIDE: File length does not match table length [%s%s]\n",
657 cpio_path, file.name);
658 continue;
659 }
660 if (acpi_table_checksum(file.data, table->length)) {
661 pr_err("ACPI OVERRIDE: Bad table checksum [%s%s]\n",
662 cpio_path, file.name);
663 continue;
664 }
665
666 pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n",
667 table->signature, cpio_path, file.name, table->length);
668
669 all_tables_size += table->length;
670 acpi_initrd_files[table_nr].data = file.data;
671 acpi_initrd_files[table_nr].size = file.size;
672 table_nr++;
673 }
674 if (table_nr == 0)
675 return;
676
677 acpi_tables_addr =
678 memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT,
679 all_tables_size, PAGE_SIZE);
680 if (!acpi_tables_addr) {
681 WARN_ON(1);
682 return;
683 }
684 /*
685 * Only calling e820_add_reserve does not work and the
686 * tables are invalid (memory got used) later.
687 * memblock_reserve works as expected and the tables won't get modified.
688 * But it's not enough on X86 because ioremap will
689 * complain later (used by acpi_os_map_memory) that the pages
690 * that should get mapped are not marked "reserved".
691 * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area)
692 * works fine.
693 */
694 memblock_reserve(acpi_tables_addr, all_tables_size);
695 arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
696
697 /*
698 * early_ioremap only can remap 256k one time. If we map all
699 * tables one time, we will hit the limit. Need to map chunks
700 * one by one during copying the same as that in relocate_initrd().
701 */
702 for (no = 0; no < table_nr; no++) {
703 unsigned char *src_p = acpi_initrd_files[no].data;
704 phys_addr_t size = acpi_initrd_files[no].size;
705 phys_addr_t dest_addr = acpi_tables_addr + total_offset;
706 phys_addr_t slop, clen;
707 char *dest_p;
708
709 total_offset += size;
710
711 while (size) {
712 slop = dest_addr & ~PAGE_MASK;
713 clen = size;
714 if (clen > MAP_CHUNK_SIZE - slop)
715 clen = MAP_CHUNK_SIZE - slop;
716 dest_p = early_ioremap(dest_addr & PAGE_MASK,
717 clen + slop);
718 memcpy(dest_p + slop, src_p, clen);
719 early_iounmap(dest_p, clen + slop);
720 src_p += clen;
721 dest_addr += clen;
722 size -= clen;
723 }
724 }
725 }
726 #endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */
727
acpi_table_taint(struct acpi_table_header * table)728 static void acpi_table_taint(struct acpi_table_header *table)
729 {
730 pr_warn(PREFIX
731 "Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n",
732 table->signature, table->oem_table_id);
733 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
734 }
735
736
737 acpi_status
acpi_os_table_override(struct acpi_table_header * existing_table,struct acpi_table_header ** new_table)738 acpi_os_table_override(struct acpi_table_header * existing_table,
739 struct acpi_table_header ** new_table)
740 {
741 if (!existing_table || !new_table)
742 return AE_BAD_PARAMETER;
743
744 *new_table = NULL;
745
746 #ifdef CONFIG_ACPI_CUSTOM_DSDT
747 if (strncmp(existing_table->signature, "DSDT", 4) == 0)
748 *new_table = (struct acpi_table_header *)AmlCode;
749 #endif
750 if (*new_table != NULL)
751 acpi_table_taint(existing_table);
752 return AE_OK;
753 }
754
755 acpi_status
acpi_os_physical_table_override(struct acpi_table_header * existing_table,acpi_physical_address * address,u32 * table_length)756 acpi_os_physical_table_override(struct acpi_table_header *existing_table,
757 acpi_physical_address *address,
758 u32 *table_length)
759 {
760 #ifndef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
761 *table_length = 0;
762 *address = 0;
763 return AE_OK;
764 #else
765 int table_offset = 0;
766 struct acpi_table_header *table;
767
768 *table_length = 0;
769 *address = 0;
770
771 if (!acpi_tables_addr)
772 return AE_OK;
773
774 do {
775 if (table_offset + ACPI_HEADER_SIZE > all_tables_size) {
776 WARN_ON(1);
777 return AE_OK;
778 }
779
780 table = acpi_os_map_memory(acpi_tables_addr + table_offset,
781 ACPI_HEADER_SIZE);
782
783 if (table_offset + table->length > all_tables_size) {
784 acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
785 WARN_ON(1);
786 return AE_OK;
787 }
788
789 table_offset += table->length;
790
791 if (memcmp(existing_table->signature, table->signature, 4)) {
792 acpi_os_unmap_memory(table,
793 ACPI_HEADER_SIZE);
794 continue;
795 }
796
797 /* Only override tables with matching oem id */
798 if (memcmp(table->oem_table_id, existing_table->oem_table_id,
799 ACPI_OEM_TABLE_ID_SIZE)) {
800 acpi_os_unmap_memory(table,
801 ACPI_HEADER_SIZE);
802 continue;
803 }
804
805 table_offset -= table->length;
806 *table_length = table->length;
807 acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
808 *address = acpi_tables_addr + table_offset;
809 break;
810 } while (table_offset + ACPI_HEADER_SIZE < all_tables_size);
811
812 if (*address != 0)
813 acpi_table_taint(existing_table);
814 return AE_OK;
815 #endif
816 }
817
acpi_irq(int irq,void * dev_id)818 static irqreturn_t acpi_irq(int irq, void *dev_id)
819 {
820 u32 handled;
821
822 handled = (*acpi_irq_handler) (acpi_irq_context);
823
824 if (handled) {
825 acpi_irq_handled++;
826 return IRQ_HANDLED;
827 } else {
828 acpi_irq_not_handled++;
829 return IRQ_NONE;
830 }
831 }
832
833 acpi_status
acpi_os_install_interrupt_handler(u32 gsi,acpi_osd_handler handler,void * context)834 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
835 void *context)
836 {
837 unsigned int irq;
838
839 acpi_irq_stats_init();
840
841 /*
842 * ACPI interrupts different from the SCI in our copy of the FADT are
843 * not supported.
844 */
845 if (gsi != acpi_gbl_FADT.sci_interrupt)
846 return AE_BAD_PARAMETER;
847
848 if (acpi_irq_handler)
849 return AE_ALREADY_ACQUIRED;
850
851 if (acpi_gsi_to_irq(gsi, &irq) < 0) {
852 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
853 gsi);
854 return AE_OK;
855 }
856
857 acpi_irq_handler = handler;
858 acpi_irq_context = context;
859 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
860 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
861 acpi_irq_handler = NULL;
862 return AE_NOT_ACQUIRED;
863 }
864 acpi_sci_irq = irq;
865
866 return AE_OK;
867 }
868
acpi_os_remove_interrupt_handler(u32 gsi,acpi_osd_handler handler)869 acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler)
870 {
871 if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid())
872 return AE_BAD_PARAMETER;
873
874 free_irq(acpi_sci_irq, acpi_irq);
875 acpi_irq_handler = NULL;
876 acpi_sci_irq = INVALID_ACPI_IRQ;
877
878 return AE_OK;
879 }
880
881 /*
882 * Running in interpreter thread context, safe to sleep
883 */
884
acpi_os_sleep(u64 ms)885 void acpi_os_sleep(u64 ms)
886 {
887 msleep(ms);
888 }
889
acpi_os_stall(u32 us)890 void acpi_os_stall(u32 us)
891 {
892 while (us) {
893 u32 delay = 1000;
894
895 if (delay > us)
896 delay = us;
897 udelay(delay);
898 touch_nmi_watchdog();
899 us -= delay;
900 }
901 }
902
903 /*
904 * Support ACPI 3.0 AML Timer operand
905 * Returns 64-bit free-running, monotonically increasing timer
906 * with 100ns granularity
907 */
acpi_os_get_timer(void)908 u64 acpi_os_get_timer(void)
909 {
910 u64 time_ns = ktime_to_ns(ktime_get());
911 do_div(time_ns, 100);
912 return time_ns;
913 }
914
acpi_os_read_port(acpi_io_address port,u32 * value,u32 width)915 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
916 {
917 u32 dummy;
918
919 if (!value)
920 value = &dummy;
921
922 *value = 0;
923 if (width <= 8) {
924 *(u8 *) value = inb(port);
925 } else if (width <= 16) {
926 *(u16 *) value = inw(port);
927 } else if (width <= 32) {
928 *(u32 *) value = inl(port);
929 } else {
930 BUG();
931 }
932
933 return AE_OK;
934 }
935
936 EXPORT_SYMBOL(acpi_os_read_port);
937
acpi_os_write_port(acpi_io_address port,u32 value,u32 width)938 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
939 {
940 if (width <= 8) {
941 outb(value, port);
942 } else if (width <= 16) {
943 outw(value, port);
944 } else if (width <= 32) {
945 outl(value, port);
946 } else {
947 BUG();
948 }
949
950 return AE_OK;
951 }
952
953 EXPORT_SYMBOL(acpi_os_write_port);
954
955 acpi_status
acpi_os_read_memory(acpi_physical_address phys_addr,u64 * value,u32 width)956 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
957 {
958 void __iomem *virt_addr;
959 unsigned int size = width / 8;
960 bool unmap = false;
961 u64 dummy;
962
963 rcu_read_lock();
964 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
965 if (!virt_addr) {
966 rcu_read_unlock();
967 virt_addr = acpi_os_ioremap(phys_addr, size);
968 if (!virt_addr)
969 return AE_BAD_ADDRESS;
970 unmap = true;
971 }
972
973 if (!value)
974 value = &dummy;
975
976 switch (width) {
977 case 8:
978 *(u8 *) value = readb(virt_addr);
979 break;
980 case 16:
981 *(u16 *) value = readw(virt_addr);
982 break;
983 case 32:
984 *(u32 *) value = readl(virt_addr);
985 break;
986 case 64:
987 *(u64 *) value = readq(virt_addr);
988 break;
989 default:
990 BUG();
991 }
992
993 if (unmap)
994 iounmap(virt_addr);
995 else
996 rcu_read_unlock();
997
998 return AE_OK;
999 }
1000
1001 acpi_status
acpi_os_write_memory(acpi_physical_address phys_addr,u64 value,u32 width)1002 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
1003 {
1004 void __iomem *virt_addr;
1005 unsigned int size = width / 8;
1006 bool unmap = false;
1007
1008 rcu_read_lock();
1009 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
1010 if (!virt_addr) {
1011 rcu_read_unlock();
1012 virt_addr = acpi_os_ioremap(phys_addr, size);
1013 if (!virt_addr)
1014 return AE_BAD_ADDRESS;
1015 unmap = true;
1016 }
1017
1018 switch (width) {
1019 case 8:
1020 writeb(value, virt_addr);
1021 break;
1022 case 16:
1023 writew(value, virt_addr);
1024 break;
1025 case 32:
1026 writel(value, virt_addr);
1027 break;
1028 case 64:
1029 writeq(value, virt_addr);
1030 break;
1031 default:
1032 BUG();
1033 }
1034
1035 if (unmap)
1036 iounmap(virt_addr);
1037 else
1038 rcu_read_unlock();
1039
1040 return AE_OK;
1041 }
1042
1043 acpi_status
acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id,u32 reg,u64 * value,u32 width)1044 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
1045 u64 *value, u32 width)
1046 {
1047 int result, size;
1048 u32 value32;
1049
1050 if (!value)
1051 return AE_BAD_PARAMETER;
1052
1053 switch (width) {
1054 case 8:
1055 size = 1;
1056 break;
1057 case 16:
1058 size = 2;
1059 break;
1060 case 32:
1061 size = 4;
1062 break;
1063 default:
1064 return AE_ERROR;
1065 }
1066
1067 result = raw_pci_read(pci_id->segment, pci_id->bus,
1068 PCI_DEVFN(pci_id->device, pci_id->function),
1069 reg, size, &value32);
1070 *value = value32;
1071
1072 return (result ? AE_ERROR : AE_OK);
1073 }
1074
1075 acpi_status
acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id,u32 reg,u64 value,u32 width)1076 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
1077 u64 value, u32 width)
1078 {
1079 int result, size;
1080
1081 switch (width) {
1082 case 8:
1083 size = 1;
1084 break;
1085 case 16:
1086 size = 2;
1087 break;
1088 case 32:
1089 size = 4;
1090 break;
1091 default:
1092 return AE_ERROR;
1093 }
1094
1095 result = raw_pci_write(pci_id->segment, pci_id->bus,
1096 PCI_DEVFN(pci_id->device, pci_id->function),
1097 reg, size, value);
1098
1099 return (result ? AE_ERROR : AE_OK);
1100 }
1101
acpi_os_execute_deferred(struct work_struct * work)1102 static void acpi_os_execute_deferred(struct work_struct *work)
1103 {
1104 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
1105
1106 dpc->function(dpc->context);
1107 kfree(dpc);
1108 }
1109
1110 /*******************************************************************************
1111 *
1112 * FUNCTION: acpi_os_execute
1113 *
1114 * PARAMETERS: Type - Type of the callback
1115 * Function - Function to be executed
1116 * Context - Function parameters
1117 *
1118 * RETURN: Status
1119 *
1120 * DESCRIPTION: Depending on type, either queues function for deferred execution or
1121 * immediately executes function on a separate thread.
1122 *
1123 ******************************************************************************/
1124
acpi_os_execute(acpi_execute_type type,acpi_osd_exec_callback function,void * context)1125 acpi_status acpi_os_execute(acpi_execute_type type,
1126 acpi_osd_exec_callback function, void *context)
1127 {
1128 acpi_status status = AE_OK;
1129 struct acpi_os_dpc *dpc;
1130 struct workqueue_struct *queue;
1131 int ret;
1132 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1133 "Scheduling function [%p(%p)] for deferred execution.\n",
1134 function, context));
1135
1136 /*
1137 * Allocate/initialize DPC structure. Note that this memory will be
1138 * freed by the callee. The kernel handles the work_struct list in a
1139 * way that allows us to also free its memory inside the callee.
1140 * Because we may want to schedule several tasks with different
1141 * parameters we can't use the approach some kernel code uses of
1142 * having a static work_struct.
1143 */
1144
1145 dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
1146 if (!dpc)
1147 return AE_NO_MEMORY;
1148
1149 dpc->function = function;
1150 dpc->context = context;
1151
1152 /*
1153 * To prevent lockdep from complaining unnecessarily, make sure that
1154 * there is a different static lockdep key for each workqueue by using
1155 * INIT_WORK() for each of them separately.
1156 */
1157 if (type == OSL_NOTIFY_HANDLER) {
1158 queue = kacpi_notify_wq;
1159 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1160 } else {
1161 queue = kacpid_wq;
1162 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1163 }
1164
1165 /*
1166 * On some machines, a software-initiated SMI causes corruption unless
1167 * the SMI runs on CPU 0. An SMI can be initiated by any AML, but
1168 * typically it's done in GPE-related methods that are run via
1169 * workqueues, so we can avoid the known corruption cases by always
1170 * queueing on CPU 0.
1171 */
1172 ret = queue_work_on(0, queue, &dpc->work);
1173
1174 if (!ret) {
1175 printk(KERN_ERR PREFIX
1176 "Call to queue_work() failed.\n");
1177 status = AE_ERROR;
1178 kfree(dpc);
1179 }
1180 return status;
1181 }
1182 EXPORT_SYMBOL(acpi_os_execute);
1183
acpi_os_wait_events_complete(void)1184 void acpi_os_wait_events_complete(void)
1185 {
1186 /*
1187 * Make sure the GPE handler or the fixed event handler is not used
1188 * on another CPU after removal.
1189 */
1190 if (acpi_sci_irq_valid())
1191 synchronize_hardirq(acpi_sci_irq);
1192 flush_workqueue(kacpid_wq);
1193 flush_workqueue(kacpi_notify_wq);
1194 }
1195 EXPORT_SYMBOL(acpi_os_wait_events_complete);
1196
1197 struct acpi_hp_work {
1198 struct work_struct work;
1199 struct acpi_device *adev;
1200 u32 src;
1201 };
1202
acpi_hotplug_work_fn(struct work_struct * work)1203 static void acpi_hotplug_work_fn(struct work_struct *work)
1204 {
1205 struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
1206
1207 acpi_os_wait_events_complete();
1208 acpi_device_hotplug(hpw->adev, hpw->src);
1209 kfree(hpw);
1210 }
1211
acpi_hotplug_schedule(struct acpi_device * adev,u32 src)1212 acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src)
1213 {
1214 struct acpi_hp_work *hpw;
1215
1216 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1217 "Scheduling hotplug event (%p, %u) for deferred execution.\n",
1218 adev, src));
1219
1220 hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
1221 if (!hpw)
1222 return AE_NO_MEMORY;
1223
1224 INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
1225 hpw->adev = adev;
1226 hpw->src = src;
1227 /*
1228 * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because
1229 * the hotplug code may call driver .remove() functions, which may
1230 * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush
1231 * these workqueues.
1232 */
1233 if (!queue_work(kacpi_hotplug_wq, &hpw->work)) {
1234 kfree(hpw);
1235 return AE_ERROR;
1236 }
1237 return AE_OK;
1238 }
1239
acpi_queue_hotplug_work(struct work_struct * work)1240 bool acpi_queue_hotplug_work(struct work_struct *work)
1241 {
1242 return queue_work(kacpi_hotplug_wq, work);
1243 }
1244
1245 acpi_status
acpi_os_create_semaphore(u32 max_units,u32 initial_units,acpi_handle * handle)1246 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
1247 {
1248 struct semaphore *sem = NULL;
1249
1250 sem = acpi_os_allocate_zeroed(sizeof(struct semaphore));
1251 if (!sem)
1252 return AE_NO_MEMORY;
1253
1254 sema_init(sem, initial_units);
1255
1256 *handle = (acpi_handle *) sem;
1257
1258 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
1259 *handle, initial_units));
1260
1261 return AE_OK;
1262 }
1263
1264 /*
1265 * TODO: A better way to delete semaphores? Linux doesn't have a
1266 * 'delete_semaphore()' function -- may result in an invalid
1267 * pointer dereference for non-synchronized consumers. Should
1268 * we at least check for blocked threads and signal/cancel them?
1269 */
1270
acpi_os_delete_semaphore(acpi_handle handle)1271 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
1272 {
1273 struct semaphore *sem = (struct semaphore *)handle;
1274
1275 if (!sem)
1276 return AE_BAD_PARAMETER;
1277
1278 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
1279
1280 BUG_ON(!list_empty(&sem->wait_list));
1281 kfree(sem);
1282 sem = NULL;
1283
1284 return AE_OK;
1285 }
1286
1287 /*
1288 * TODO: Support for units > 1?
1289 */
acpi_os_wait_semaphore(acpi_handle handle,u32 units,u16 timeout)1290 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
1291 {
1292 acpi_status status = AE_OK;
1293 struct semaphore *sem = (struct semaphore *)handle;
1294 long jiffies;
1295 int ret = 0;
1296
1297 if (!acpi_os_initialized)
1298 return AE_OK;
1299
1300 if (!sem || (units < 1))
1301 return AE_BAD_PARAMETER;
1302
1303 if (units > 1)
1304 return AE_SUPPORT;
1305
1306 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
1307 handle, units, timeout));
1308
1309 if (timeout == ACPI_WAIT_FOREVER)
1310 jiffies = MAX_SCHEDULE_TIMEOUT;
1311 else
1312 jiffies = msecs_to_jiffies(timeout);
1313
1314 ret = down_timeout(sem, jiffies);
1315 if (ret)
1316 status = AE_TIME;
1317
1318 if (ACPI_FAILURE(status)) {
1319 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1320 "Failed to acquire semaphore[%p|%d|%d], %s",
1321 handle, units, timeout,
1322 acpi_format_exception(status)));
1323 } else {
1324 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1325 "Acquired semaphore[%p|%d|%d]", handle,
1326 units, timeout));
1327 }
1328
1329 return status;
1330 }
1331
1332 /*
1333 * TODO: Support for units > 1?
1334 */
acpi_os_signal_semaphore(acpi_handle handle,u32 units)1335 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
1336 {
1337 struct semaphore *sem = (struct semaphore *)handle;
1338
1339 if (!acpi_os_initialized)
1340 return AE_OK;
1341
1342 if (!sem || (units < 1))
1343 return AE_BAD_PARAMETER;
1344
1345 if (units > 1)
1346 return AE_SUPPORT;
1347
1348 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
1349 units));
1350
1351 up(sem);
1352
1353 return AE_OK;
1354 }
1355
acpi_os_get_line(char * buffer,u32 buffer_length,u32 * bytes_read)1356 acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read)
1357 {
1358 #ifdef ENABLE_DEBUGGER
1359 if (acpi_in_debugger) {
1360 u32 chars;
1361
1362 kdb_read(buffer, buffer_length);
1363
1364 /* remove the CR kdb includes */
1365 chars = strlen(buffer) - 1;
1366 buffer[chars] = '\0';
1367 }
1368 #endif
1369
1370 return AE_OK;
1371 }
1372
acpi_os_signal(u32 function,void * info)1373 acpi_status acpi_os_signal(u32 function, void *info)
1374 {
1375 switch (function) {
1376 case ACPI_SIGNAL_FATAL:
1377 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
1378 break;
1379 case ACPI_SIGNAL_BREAKPOINT:
1380 /*
1381 * AML Breakpoint
1382 * ACPI spec. says to treat it as a NOP unless
1383 * you are debugging. So if/when we integrate
1384 * AML debugger into the kernel debugger its
1385 * hook will go here. But until then it is
1386 * not useful to print anything on breakpoints.
1387 */
1388 break;
1389 default:
1390 break;
1391 }
1392
1393 return AE_OK;
1394 }
1395
acpi_os_name_setup(char * str)1396 static int __init acpi_os_name_setup(char *str)
1397 {
1398 char *p = acpi_os_name;
1399 int count = ACPI_MAX_OVERRIDE_LEN - 1;
1400
1401 if (!str || !*str)
1402 return 0;
1403
1404 for (; count-- && *str; str++) {
1405 if (isalnum(*str) || *str == ' ' || *str == ':')
1406 *p++ = *str;
1407 else if (*str == '\'' || *str == '"')
1408 continue;
1409 else
1410 break;
1411 }
1412 *p = 0;
1413
1414 return 1;
1415
1416 }
1417
1418 __setup("acpi_os_name=", acpi_os_name_setup);
1419
1420 #define OSI_STRING_LENGTH_MAX 64 /* arbitrary */
1421 #define OSI_STRING_ENTRIES_MAX 16 /* arbitrary */
1422
1423 struct osi_setup_entry {
1424 char string[OSI_STRING_LENGTH_MAX];
1425 bool enable;
1426 };
1427
1428 static struct osi_setup_entry
1429 osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
1430 {"Module Device", true},
1431 {"Processor Device", true},
1432 {"3.0 _SCP Extensions", true},
1433 {"Processor Aggregator Device", true},
1434 };
1435
acpi_osi_setup(char * str)1436 void __init acpi_osi_setup(char *str)
1437 {
1438 struct osi_setup_entry *osi;
1439 bool enable = true;
1440 int i;
1441
1442 if (!acpi_gbl_create_osi_method)
1443 return;
1444
1445 if (str == NULL || *str == '\0') {
1446 printk(KERN_INFO PREFIX "_OSI method disabled\n");
1447 acpi_gbl_create_osi_method = FALSE;
1448 return;
1449 }
1450
1451 if (*str == '!') {
1452 str++;
1453 if (*str == '\0') {
1454 /* Do not override acpi_osi=!* */
1455 if (!osi_linux.default_disabling)
1456 osi_linux.default_disabling =
1457 ACPI_DISABLE_ALL_VENDOR_STRINGS;
1458 return;
1459 } else if (*str == '*') {
1460 osi_linux.default_disabling = ACPI_DISABLE_ALL_STRINGS;
1461 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1462 osi = &osi_setup_entries[i];
1463 osi->enable = false;
1464 }
1465 return;
1466 }
1467 enable = false;
1468 }
1469
1470 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1471 osi = &osi_setup_entries[i];
1472 if (!strcmp(osi->string, str)) {
1473 osi->enable = enable;
1474 break;
1475 } else if (osi->string[0] == '\0') {
1476 osi->enable = enable;
1477 strncpy(osi->string, str, OSI_STRING_LENGTH_MAX);
1478 break;
1479 }
1480 }
1481 }
1482
set_osi_linux(unsigned int enable)1483 static void __init set_osi_linux(unsigned int enable)
1484 {
1485 if (osi_linux.enable != enable)
1486 osi_linux.enable = enable;
1487
1488 if (osi_linux.enable)
1489 acpi_osi_setup("Linux");
1490 else
1491 acpi_osi_setup("!Linux");
1492
1493 return;
1494 }
1495
acpi_cmdline_osi_linux(unsigned int enable)1496 static void __init acpi_cmdline_osi_linux(unsigned int enable)
1497 {
1498 osi_linux.cmdline = 1; /* cmdline set the default and override DMI */
1499 osi_linux.dmi = 0;
1500 set_osi_linux(enable);
1501
1502 return;
1503 }
1504
acpi_dmi_osi_linux(int enable,const struct dmi_system_id * d)1505 void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
1506 {
1507 printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
1508
1509 if (enable == -1)
1510 return;
1511
1512 osi_linux.dmi = 1; /* DMI knows that this box asks OSI(Linux) */
1513 set_osi_linux(enable);
1514
1515 return;
1516 }
1517
1518 /*
1519 * Modify the list of "OS Interfaces" reported to BIOS via _OSI
1520 *
1521 * empty string disables _OSI
1522 * string starting with '!' disables that string
1523 * otherwise string is added to list, augmenting built-in strings
1524 */
acpi_osi_setup_late(void)1525 static void __init acpi_osi_setup_late(void)
1526 {
1527 struct osi_setup_entry *osi;
1528 char *str;
1529 int i;
1530 acpi_status status;
1531
1532 if (osi_linux.default_disabling) {
1533 status = acpi_update_interfaces(osi_linux.default_disabling);
1534
1535 if (ACPI_SUCCESS(status))
1536 printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors%s\n",
1537 osi_linux.default_disabling ==
1538 ACPI_DISABLE_ALL_STRINGS ?
1539 " and feature groups" : "");
1540 }
1541
1542 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1543 osi = &osi_setup_entries[i];
1544 str = osi->string;
1545
1546 if (*str == '\0')
1547 break;
1548 if (osi->enable) {
1549 status = acpi_install_interface(str);
1550
1551 if (ACPI_SUCCESS(status))
1552 printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
1553 } else {
1554 status = acpi_remove_interface(str);
1555
1556 if (ACPI_SUCCESS(status))
1557 printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
1558 }
1559 }
1560 }
1561
osi_setup(char * str)1562 static int __init osi_setup(char *str)
1563 {
1564 if (str && !strcmp("Linux", str))
1565 acpi_cmdline_osi_linux(1);
1566 else if (str && !strcmp("!Linux", str))
1567 acpi_cmdline_osi_linux(0);
1568 else
1569 acpi_osi_setup(str);
1570
1571 return 1;
1572 }
1573
1574 __setup("acpi_osi=", osi_setup);
1575
1576 /*
1577 * Disable the auto-serialization of named objects creation methods.
1578 *
1579 * This feature is enabled by default. It marks the AML control methods
1580 * that contain the opcodes to create named objects as "Serialized".
1581 */
acpi_no_auto_serialize_setup(char * str)1582 static int __init acpi_no_auto_serialize_setup(char *str)
1583 {
1584 acpi_gbl_auto_serialize_methods = FALSE;
1585 pr_info("ACPI: auto-serialization disabled\n");
1586
1587 return 1;
1588 }
1589
1590 __setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup);
1591
1592 /* Check of resource interference between native drivers and ACPI
1593 * OperationRegions (SystemIO and System Memory only).
1594 * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1595 * in arbitrary AML code and can interfere with legacy drivers.
1596 * acpi_enforce_resources= can be set to:
1597 *
1598 * - strict (default) (2)
1599 * -> further driver trying to access the resources will not load
1600 * - lax (1)
1601 * -> further driver trying to access the resources will load, but you
1602 * get a system message that something might go wrong...
1603 *
1604 * - no (0)
1605 * -> ACPI Operation Region resources will not be registered
1606 *
1607 */
1608 #define ENFORCE_RESOURCES_STRICT 2
1609 #define ENFORCE_RESOURCES_LAX 1
1610 #define ENFORCE_RESOURCES_NO 0
1611
1612 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1613
acpi_enforce_resources_setup(char * str)1614 static int __init acpi_enforce_resources_setup(char *str)
1615 {
1616 if (str == NULL || *str == '\0')
1617 return 0;
1618
1619 if (!strcmp("strict", str))
1620 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1621 else if (!strcmp("lax", str))
1622 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1623 else if (!strcmp("no", str))
1624 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1625
1626 return 1;
1627 }
1628
1629 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1630
1631 /* Check for resource conflicts between ACPI OperationRegions and native
1632 * drivers */
acpi_check_resource_conflict(const struct resource * res)1633 int acpi_check_resource_conflict(const struct resource *res)
1634 {
1635 acpi_adr_space_type space_id;
1636 acpi_size length;
1637 u8 warn = 0;
1638 int clash = 0;
1639
1640 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1641 return 0;
1642 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1643 return 0;
1644
1645 if (res->flags & IORESOURCE_IO)
1646 space_id = ACPI_ADR_SPACE_SYSTEM_IO;
1647 else
1648 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
1649
1650 length = resource_size(res);
1651 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
1652 warn = 1;
1653 clash = acpi_check_address_range(space_id, res->start, length, warn);
1654
1655 if (clash) {
1656 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1657 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1658 printk(KERN_NOTICE "ACPI: This conflict may"
1659 " cause random problems and system"
1660 " instability\n");
1661 printk(KERN_INFO "ACPI: If an ACPI driver is available"
1662 " for this device, you should use it instead of"
1663 " the native driver\n");
1664 }
1665 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1666 return -EBUSY;
1667 }
1668 return 0;
1669 }
1670 EXPORT_SYMBOL(acpi_check_resource_conflict);
1671
acpi_check_region(resource_size_t start,resource_size_t n,const char * name)1672 int acpi_check_region(resource_size_t start, resource_size_t n,
1673 const char *name)
1674 {
1675 struct resource res = {
1676 .start = start,
1677 .end = start + n - 1,
1678 .name = name,
1679 .flags = IORESOURCE_IO,
1680 };
1681
1682 return acpi_check_resource_conflict(&res);
1683 }
1684 EXPORT_SYMBOL(acpi_check_region);
1685
1686 /*
1687 * Let drivers know whether the resource checks are effective
1688 */
acpi_resources_are_enforced(void)1689 int acpi_resources_are_enforced(void)
1690 {
1691 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
1692 }
1693 EXPORT_SYMBOL(acpi_resources_are_enforced);
1694
acpi_osi_is_win8(void)1695 bool acpi_osi_is_win8(void)
1696 {
1697 return acpi_gbl_osi_data >= ACPI_OSI_WIN_8;
1698 }
1699 EXPORT_SYMBOL(acpi_osi_is_win8);
1700
1701 /*
1702 * Deallocate the memory for a spinlock.
1703 */
acpi_os_delete_lock(acpi_spinlock handle)1704 void acpi_os_delete_lock(acpi_spinlock handle)
1705 {
1706 ACPI_FREE(handle);
1707 }
1708
1709 /*
1710 * Acquire a spinlock.
1711 *
1712 * handle is a pointer to the spinlock_t.
1713 */
1714
acpi_os_acquire_lock(acpi_spinlock lockp)1715 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1716 {
1717 acpi_cpu_flags flags;
1718 spin_lock_irqsave(lockp, flags);
1719 return flags;
1720 }
1721
1722 /*
1723 * Release a spinlock. See above.
1724 */
1725
acpi_os_release_lock(acpi_spinlock lockp,acpi_cpu_flags flags)1726 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1727 {
1728 spin_unlock_irqrestore(lockp, flags);
1729 }
1730
1731 #ifndef ACPI_USE_LOCAL_CACHE
1732
1733 /*******************************************************************************
1734 *
1735 * FUNCTION: acpi_os_create_cache
1736 *
1737 * PARAMETERS: name - Ascii name for the cache
1738 * size - Size of each cached object
1739 * depth - Maximum depth of the cache (in objects) <ignored>
1740 * cache - Where the new cache object is returned
1741 *
1742 * RETURN: status
1743 *
1744 * DESCRIPTION: Create a cache object
1745 *
1746 ******************************************************************************/
1747
1748 acpi_status
acpi_os_create_cache(char * name,u16 size,u16 depth,acpi_cache_t ** cache)1749 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1750 {
1751 *cache = kmem_cache_create(name, size, 0, 0, NULL);
1752 if (*cache == NULL)
1753 return AE_ERROR;
1754 else
1755 return AE_OK;
1756 }
1757
1758 /*******************************************************************************
1759 *
1760 * FUNCTION: acpi_os_purge_cache
1761 *
1762 * PARAMETERS: Cache - Handle to cache object
1763 *
1764 * RETURN: Status
1765 *
1766 * DESCRIPTION: Free all objects within the requested cache.
1767 *
1768 ******************************************************************************/
1769
acpi_os_purge_cache(acpi_cache_t * cache)1770 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1771 {
1772 kmem_cache_shrink(cache);
1773 return (AE_OK);
1774 }
1775
1776 /*******************************************************************************
1777 *
1778 * FUNCTION: acpi_os_delete_cache
1779 *
1780 * PARAMETERS: Cache - Handle to cache object
1781 *
1782 * RETURN: Status
1783 *
1784 * DESCRIPTION: Free all objects within the requested cache and delete the
1785 * cache object.
1786 *
1787 ******************************************************************************/
1788
acpi_os_delete_cache(acpi_cache_t * cache)1789 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1790 {
1791 kmem_cache_destroy(cache);
1792 return (AE_OK);
1793 }
1794
1795 /*******************************************************************************
1796 *
1797 * FUNCTION: acpi_os_release_object
1798 *
1799 * PARAMETERS: Cache - Handle to cache object
1800 * Object - The object to be released
1801 *
1802 * RETURN: None
1803 *
1804 * DESCRIPTION: Release an object to the specified cache. If cache is full,
1805 * the object is deleted.
1806 *
1807 ******************************************************************************/
1808
acpi_os_release_object(acpi_cache_t * cache,void * object)1809 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1810 {
1811 kmem_cache_free(cache, object);
1812 return (AE_OK);
1813 }
1814 #endif
1815
acpi_no_static_ssdt_setup(char * s)1816 static int __init acpi_no_static_ssdt_setup(char *s)
1817 {
1818 acpi_gbl_disable_ssdt_table_install = TRUE;
1819 pr_info("ACPI: static SSDT installation disabled\n");
1820
1821 return 0;
1822 }
1823
1824 early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup);
1825
acpi_disable_return_repair(char * s)1826 static int __init acpi_disable_return_repair(char *s)
1827 {
1828 printk(KERN_NOTICE PREFIX
1829 "ACPI: Predefined validation mechanism disabled\n");
1830 acpi_gbl_disable_auto_repair = TRUE;
1831
1832 return 1;
1833 }
1834
1835 __setup("acpica_no_return_repair", acpi_disable_return_repair);
1836
acpi_os_initialize(void)1837 acpi_status __init acpi_os_initialize(void)
1838 {
1839 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1840 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1841 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
1842 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
1843 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) {
1844 /*
1845 * Use acpi_os_map_generic_address to pre-map the reset
1846 * register if it's in system memory.
1847 */
1848 int rv;
1849
1850 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
1851 pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv);
1852 }
1853 acpi_os_initialized = true;
1854
1855 return AE_OK;
1856 }
1857
acpi_os_initialize1(void)1858 acpi_status __init acpi_os_initialize1(void)
1859 {
1860 kacpid_wq = alloc_workqueue("kacpid", 0, 1);
1861 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
1862 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
1863 BUG_ON(!kacpid_wq);
1864 BUG_ON(!kacpi_notify_wq);
1865 BUG_ON(!kacpi_hotplug_wq);
1866 acpi_install_interface_handler(acpi_osi_handler);
1867 acpi_osi_setup_late();
1868 return AE_OK;
1869 }
1870
acpi_os_terminate(void)1871 acpi_status acpi_os_terminate(void)
1872 {
1873 if (acpi_irq_handler) {
1874 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
1875 acpi_irq_handler);
1876 }
1877
1878 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
1879 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
1880 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1881 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1882 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)
1883 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register);
1884
1885 destroy_workqueue(kacpid_wq);
1886 destroy_workqueue(kacpi_notify_wq);
1887 destroy_workqueue(kacpi_hotplug_wq);
1888
1889 return AE_OK;
1890 }
1891
acpi_os_prepare_sleep(u8 sleep_state,u32 pm1a_control,u32 pm1b_control)1892 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
1893 u32 pm1b_control)
1894 {
1895 int rc = 0;
1896 if (__acpi_os_prepare_sleep)
1897 rc = __acpi_os_prepare_sleep(sleep_state,
1898 pm1a_control, pm1b_control);
1899 if (rc < 0)
1900 return AE_ERROR;
1901 else if (rc > 0)
1902 return AE_CTRL_SKIP;
1903
1904 return AE_OK;
1905 }
1906
acpi_os_set_prepare_sleep(int (* func)(u8 sleep_state,u32 pm1a_ctrl,u32 pm1b_ctrl))1907 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
1908 u32 pm1a_ctrl, u32 pm1b_ctrl))
1909 {
1910 __acpi_os_prepare_sleep = func;
1911 }
1912
acpi_os_prepare_extended_sleep(u8 sleep_state,u32 val_a,u32 val_b)1913 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
1914 u32 val_b)
1915 {
1916 int rc = 0;
1917 if (__acpi_os_prepare_extended_sleep)
1918 rc = __acpi_os_prepare_extended_sleep(sleep_state,
1919 val_a, val_b);
1920 if (rc < 0)
1921 return AE_ERROR;
1922 else if (rc > 0)
1923 return AE_CTRL_SKIP;
1924
1925 return AE_OK;
1926 }
1927
acpi_os_set_prepare_extended_sleep(int (* func)(u8 sleep_state,u32 val_a,u32 val_b))1928 void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
1929 u32 val_a, u32 val_b))
1930 {
1931 __acpi_os_prepare_extended_sleep = func;
1932 }
1933