• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /******************************************************************************
2  *
3  * Module Name: evgpe - General Purpose Event handling and dispatch
4  *
5  *****************************************************************************/
6 
7 /*
8  * Copyright (C) 2000 - 2008, Intel Corp.
9  * All rights reserved.
10  *
11  * Redistribution and use in source and binary forms, with or without
12  * modification, are permitted provided that the following conditions
13  * are met:
14  * 1. Redistributions of source code must retain the above copyright
15  *    notice, this list of conditions, and the following disclaimer,
16  *    without modification.
17  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18  *    substantially similar to the "NO WARRANTY" disclaimer below
19  *    ("Disclaimer") and any redistribution must be conditioned upon
20  *    including a substantially similar Disclaimer requirement for further
21  *    binary redistribution.
22  * 3. Neither the names of the above-listed copyright holders nor the names
23  *    of any contributors may be used to endorse or promote products derived
24  *    from this software without specific prior written permission.
25  *
26  * Alternatively, this software may be distributed under the terms of the
27  * GNU General Public License ("GPL") version 2 as published by the Free
28  * Software Foundation.
29  *
30  * NO WARRANTY
31  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35  * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41  * POSSIBILITY OF SUCH DAMAGES.
42  */
43 
44 #include <acpi/acpi.h>
45 #include "accommon.h"
46 #include "acevents.h"
47 #include "acnamesp.h"
48 
49 #define _COMPONENT          ACPI_EVENTS
50 ACPI_MODULE_NAME("evgpe")
51 
52 /* Local prototypes */
53 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context);
54 
55 /*******************************************************************************
56  *
57  * FUNCTION:    acpi_ev_set_gpe_type
58  *
59  * PARAMETERS:  gpe_event_info          - GPE to set
60  *              Type                    - New type
61  *
62  * RETURN:      Status
63  *
64  * DESCRIPTION: Sets the new type for the GPE (wake, run, or wake/run)
65  *
66  ******************************************************************************/
67 
68 acpi_status
acpi_ev_set_gpe_type(struct acpi_gpe_event_info * gpe_event_info,u8 type)69 acpi_ev_set_gpe_type(struct acpi_gpe_event_info *gpe_event_info, u8 type)
70 {
71 	acpi_status status;
72 
73 	ACPI_FUNCTION_TRACE(ev_set_gpe_type);
74 
75 	/* Validate type and update register enable masks */
76 
77 	switch (type) {
78 	case ACPI_GPE_TYPE_WAKE:
79 	case ACPI_GPE_TYPE_RUNTIME:
80 	case ACPI_GPE_TYPE_WAKE_RUN:
81 		break;
82 
83 	default:
84 		return_ACPI_STATUS(AE_BAD_PARAMETER);
85 	}
86 
87 	/* Disable the GPE if currently enabled */
88 
89 	status = acpi_ev_disable_gpe(gpe_event_info);
90 
91 	/* Type was validated above */
92 
93 	gpe_event_info->flags &= ~ACPI_GPE_TYPE_MASK;	/* Clear type bits */
94 	gpe_event_info->flags |= type;	/* Insert type */
95 	return_ACPI_STATUS(status);
96 }
97 
98 /*******************************************************************************
99  *
100  * FUNCTION:    acpi_ev_update_gpe_enable_masks
101  *
102  * PARAMETERS:  gpe_event_info          - GPE to update
103  *              Type                    - What to do: ACPI_GPE_DISABLE or
104  *                                        ACPI_GPE_ENABLE
105  *
106  * RETURN:      Status
107  *
108  * DESCRIPTION: Updates GPE register enable masks based on the GPE type
109  *
110  ******************************************************************************/
111 
112 acpi_status
acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info * gpe_event_info,u8 type)113 acpi_ev_update_gpe_enable_masks(struct acpi_gpe_event_info *gpe_event_info,
114 				u8 type)
115 {
116 	struct acpi_gpe_register_info *gpe_register_info;
117 	u8 register_bit;
118 
119 	ACPI_FUNCTION_TRACE(ev_update_gpe_enable_masks);
120 
121 	gpe_register_info = gpe_event_info->register_info;
122 	if (!gpe_register_info) {
123 		return_ACPI_STATUS(AE_NOT_EXIST);
124 	}
125 	register_bit = (u8)
126 	    (1 <<
127 	     (gpe_event_info->gpe_number - gpe_register_info->base_gpe_number));
128 
129 	/* 1) Disable case. Simply clear all enable bits */
130 
131 	if (type == ACPI_GPE_DISABLE) {
132 		ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
133 			       register_bit);
134 		ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
135 		return_ACPI_STATUS(AE_OK);
136 	}
137 
138 	/* 2) Enable case. Set/Clear the appropriate enable bits */
139 
140 	switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
141 	case ACPI_GPE_TYPE_WAKE:
142 		ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit);
143 		ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit);
144 		break;
145 
146 	case ACPI_GPE_TYPE_RUNTIME:
147 		ACPI_CLEAR_BIT(gpe_register_info->enable_for_wake,
148 			       register_bit);
149 		ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit);
150 		break;
151 
152 	case ACPI_GPE_TYPE_WAKE_RUN:
153 		ACPI_SET_BIT(gpe_register_info->enable_for_wake, register_bit);
154 		ACPI_SET_BIT(gpe_register_info->enable_for_run, register_bit);
155 		break;
156 
157 	default:
158 		return_ACPI_STATUS(AE_BAD_PARAMETER);
159 	}
160 
161 	return_ACPI_STATUS(AE_OK);
162 }
163 
164 /*******************************************************************************
165  *
166  * FUNCTION:    acpi_ev_enable_gpe
167  *
168  * PARAMETERS:  gpe_event_info          - GPE to enable
169  *              write_to_hardware       - Enable now, or just mark data structs
170  *                                        (WAKE GPEs should be deferred)
171  *
172  * RETURN:      Status
173  *
174  * DESCRIPTION: Enable a GPE based on the GPE type
175  *
176  ******************************************************************************/
177 
178 acpi_status
acpi_ev_enable_gpe(struct acpi_gpe_event_info * gpe_event_info,u8 write_to_hardware)179 acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info,
180 		   u8 write_to_hardware)
181 {
182 	acpi_status status;
183 
184 	ACPI_FUNCTION_TRACE(ev_enable_gpe);
185 
186 	/* Make sure HW enable masks are updated */
187 
188 	status =
189 	    acpi_ev_update_gpe_enable_masks(gpe_event_info, ACPI_GPE_ENABLE);
190 	if (ACPI_FAILURE(status)) {
191 		return_ACPI_STATUS(status);
192 	}
193 
194 	/* Mark wake-enabled or HW enable, or both */
195 
196 	switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
197 	case ACPI_GPE_TYPE_WAKE:
198 
199 		ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
200 		break;
201 
202 	case ACPI_GPE_TYPE_WAKE_RUN:
203 
204 		ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
205 
206 		/*lint -fallthrough */
207 
208 	case ACPI_GPE_TYPE_RUNTIME:
209 
210 		ACPI_SET_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED);
211 
212 		if (write_to_hardware) {
213 
214 			/* Clear the GPE (of stale events), then enable it */
215 
216 			status = acpi_hw_clear_gpe(gpe_event_info);
217 			if (ACPI_FAILURE(status)) {
218 				return_ACPI_STATUS(status);
219 			}
220 
221 			/* Enable the requested runtime GPE */
222 
223 			status = acpi_hw_write_gpe_enable_reg(gpe_event_info);
224 		}
225 		break;
226 
227 	default:
228 		return_ACPI_STATUS(AE_BAD_PARAMETER);
229 	}
230 
231 	return_ACPI_STATUS(AE_OK);
232 }
233 
234 /*******************************************************************************
235  *
236  * FUNCTION:    acpi_ev_disable_gpe
237  *
238  * PARAMETERS:  gpe_event_info          - GPE to disable
239  *
240  * RETURN:      Status
241  *
242  * DESCRIPTION: Disable a GPE based on the GPE type
243  *
244  ******************************************************************************/
245 
acpi_ev_disable_gpe(struct acpi_gpe_event_info * gpe_event_info)246 acpi_status acpi_ev_disable_gpe(struct acpi_gpe_event_info *gpe_event_info)
247 {
248 	acpi_status status;
249 
250 	ACPI_FUNCTION_TRACE(ev_disable_gpe);
251 
252 	/* Make sure HW enable masks are updated */
253 
254 	status =
255 	    acpi_ev_update_gpe_enable_masks(gpe_event_info, ACPI_GPE_DISABLE);
256 	if (ACPI_FAILURE(status)) {
257 		return_ACPI_STATUS(status);
258 	}
259 
260 	/* Clear the appropriate enabled flags for this GPE */
261 
262 	switch (gpe_event_info->flags & ACPI_GPE_TYPE_MASK) {
263 	case ACPI_GPE_TYPE_WAKE:
264 		ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
265 		break;
266 
267 	case ACPI_GPE_TYPE_WAKE_RUN:
268 		ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_WAKE_ENABLED);
269 
270 		/* fallthrough */
271 
272 	case ACPI_GPE_TYPE_RUNTIME:
273 
274 		/* Disable the requested runtime GPE */
275 
276 		ACPI_CLEAR_BIT(gpe_event_info->flags, ACPI_GPE_RUN_ENABLED);
277 		break;
278 
279 	default:
280 		break;
281 	}
282 
283 	/*
284 	 * Even if we don't know the GPE type, make sure that we always
285 	 * disable it. low_disable_gpe will just clear the enable bit for this
286 	 * GPE and write it. It will not write out the current GPE enable mask,
287 	 * since this may inadvertently enable GPEs too early, if a rogue GPE has
288 	 * come in during ACPICA initialization - possibly as a result of AML or
289 	 * other code that has enabled the GPE.
290 	 */
291 	status = acpi_hw_low_disable_gpe(gpe_event_info);
292 	return_ACPI_STATUS(status);
293 }
294 
295 /*******************************************************************************
296  *
297  * FUNCTION:    acpi_ev_get_gpe_event_info
298  *
299  * PARAMETERS:  gpe_device          - Device node. NULL for GPE0/GPE1
300  *              gpe_number          - Raw GPE number
301  *
302  * RETURN:      A GPE event_info struct. NULL if not a valid GPE
303  *
304  * DESCRIPTION: Returns the event_info struct associated with this GPE.
305  *              Validates the gpe_block and the gpe_number
306  *
307  *              Should be called only when the GPE lists are semaphore locked
308  *              and not subject to change.
309  *
310  ******************************************************************************/
311 
acpi_ev_get_gpe_event_info(acpi_handle gpe_device,u32 gpe_number)312 struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device,
313 						       u32 gpe_number)
314 {
315 	union acpi_operand_object *obj_desc;
316 	struct acpi_gpe_block_info *gpe_block;
317 	u32 i;
318 
319 	ACPI_FUNCTION_ENTRY();
320 
321 	/* A NULL gpe_block means use the FADT-defined GPE block(s) */
322 
323 	if (!gpe_device) {
324 
325 		/* Examine GPE Block 0 and 1 (These blocks are permanent) */
326 
327 		for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) {
328 			gpe_block = acpi_gbl_gpe_fadt_blocks[i];
329 			if (gpe_block) {
330 				if ((gpe_number >= gpe_block->block_base_number)
331 				    && (gpe_number <
332 					gpe_block->block_base_number +
333 					(gpe_block->register_count * 8))) {
334 					return (&gpe_block->
335 						event_info[gpe_number -
336 							   gpe_block->
337 							   block_base_number]);
338 				}
339 			}
340 		}
341 
342 		/* The gpe_number was not in the range of either FADT GPE block */
343 
344 		return (NULL);
345 	}
346 
347 	/* A Non-NULL gpe_device means this is a GPE Block Device */
348 
349 	obj_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *)
350 					       gpe_device);
351 	if (!obj_desc || !obj_desc->device.gpe_block) {
352 		return (NULL);
353 	}
354 
355 	gpe_block = obj_desc->device.gpe_block;
356 
357 	if ((gpe_number >= gpe_block->block_base_number) &&
358 	    (gpe_number <
359 	     gpe_block->block_base_number + (gpe_block->register_count * 8))) {
360 		return (&gpe_block->
361 			event_info[gpe_number - gpe_block->block_base_number]);
362 	}
363 
364 	return (NULL);
365 }
366 
367 /*******************************************************************************
368  *
369  * FUNCTION:    acpi_ev_gpe_detect
370  *
371  * PARAMETERS:  gpe_xrupt_list      - Interrupt block for this interrupt.
372  *                                    Can have multiple GPE blocks attached.
373  *
374  * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
375  *
376  * DESCRIPTION: Detect if any GP events have occurred. This function is
377  *              executed at interrupt level.
378  *
379  ******************************************************************************/
380 
acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)381 u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list)
382 {
383 	acpi_status status;
384 	struct acpi_gpe_block_info *gpe_block;
385 	struct acpi_gpe_register_info *gpe_register_info;
386 	u32 int_status = ACPI_INTERRUPT_NOT_HANDLED;
387 	u8 enabled_status_byte;
388 	u32 status_reg;
389 	u32 enable_reg;
390 	acpi_cpu_flags flags;
391 	u32 i;
392 	u32 j;
393 
394 	ACPI_FUNCTION_NAME(ev_gpe_detect);
395 
396 	/* Check for the case where there are no GPEs */
397 
398 	if (!gpe_xrupt_list) {
399 		return (int_status);
400 	}
401 
402 	/*
403 	 * We need to obtain the GPE lock for both the data structs and registers
404 	 * Note: Not necessary to obtain the hardware lock, since the GPE
405 	 * registers are owned by the gpe_lock.
406 	 */
407 	flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
408 
409 	/* Examine all GPE blocks attached to this interrupt level */
410 
411 	gpe_block = gpe_xrupt_list->gpe_block_list_head;
412 	while (gpe_block) {
413 		/*
414 		 * Read all of the 8-bit GPE status and enable registers in this GPE
415 		 * block, saving all of them. Find all currently active GP events.
416 		 */
417 		for (i = 0; i < gpe_block->register_count; i++) {
418 
419 			/* Get the next status/enable pair */
420 
421 			gpe_register_info = &gpe_block->register_info[i];
422 
423 			/* Read the Status Register */
424 
425 			status =
426 			    acpi_read(&status_reg,
427 				      &gpe_register_info->status_address);
428 			if (ACPI_FAILURE(status)) {
429 				goto unlock_and_exit;
430 			}
431 
432 			/* Read the Enable Register */
433 
434 			status =
435 			    acpi_read(&enable_reg,
436 				      &gpe_register_info->enable_address);
437 			if (ACPI_FAILURE(status)) {
438 				goto unlock_and_exit;
439 			}
440 
441 			ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS,
442 					  "Read GPE Register at GPE%X: Status=%02X, Enable=%02X\n",
443 					  gpe_register_info->base_gpe_number,
444 					  status_reg, enable_reg));
445 
446 			/* Check if there is anything active at all in this register */
447 
448 			enabled_status_byte = (u8) (status_reg & enable_reg);
449 			if (!enabled_status_byte) {
450 
451 				/* No active GPEs in this register, move on */
452 
453 				continue;
454 			}
455 
456 			/* Now look at the individual GPEs in this byte register */
457 
458 			for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
459 
460 				/* Examine one GPE bit */
461 
462 				if (enabled_status_byte & (1 << j)) {
463 					/*
464 					 * Found an active GPE. Dispatch the event to a handler
465 					 * or method.
466 					 */
467 					int_status |=
468 					    acpi_ev_gpe_dispatch(&gpe_block->
469 						event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number);
470 				}
471 			}
472 		}
473 
474 		gpe_block = gpe_block->next;
475 	}
476 
477       unlock_and_exit:
478 
479 	acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
480 	return (int_status);
481 }
482 
483 /*******************************************************************************
484  *
485  * FUNCTION:    acpi_ev_asynch_execute_gpe_method
486  *
487  * PARAMETERS:  Context (gpe_event_info) - Info for this GPE
488  *
489  * RETURN:      None
490  *
491  * DESCRIPTION: Perform the actual execution of a GPE control method. This
492  *              function is called from an invocation of acpi_os_execute and
493  *              therefore does NOT execute at interrupt level - so that
494  *              the control method itself is not executed in the context of
495  *              an interrupt handler.
496  *
497  ******************************************************************************/
498 static void acpi_ev_asynch_enable_gpe(void *context);
499 
acpi_ev_asynch_execute_gpe_method(void * context)500 static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context)
501 {
502 	struct acpi_gpe_event_info *gpe_event_info = (void *)context;
503 	acpi_status status;
504 	struct acpi_gpe_event_info local_gpe_event_info;
505 	struct acpi_evaluate_info *info;
506 
507 	ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method);
508 
509 	status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS);
510 	if (ACPI_FAILURE(status)) {
511 		return_VOID;
512 	}
513 
514 	/* Must revalidate the gpe_number/gpe_block */
515 
516 	if (!acpi_ev_valid_gpe_event(gpe_event_info)) {
517 		status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
518 		return_VOID;
519 	}
520 
521 	/* Set the GPE flags for return to enabled state */
522 
523 	(void)acpi_ev_enable_gpe(gpe_event_info, FALSE);
524 
525 	/*
526 	 * Take a snapshot of the GPE info for this level - we copy the info to
527 	 * prevent a race condition with remove_handler/remove_block.
528 	 */
529 	ACPI_MEMCPY(&local_gpe_event_info, gpe_event_info,
530 		    sizeof(struct acpi_gpe_event_info));
531 
532 	status = acpi_ut_release_mutex(ACPI_MTX_EVENTS);
533 	if (ACPI_FAILURE(status)) {
534 		return_VOID;
535 	}
536 
537 	/*
538 	 * Must check for control method type dispatch one more time to avoid a
539 	 * race with ev_gpe_install_handler
540 	 */
541 	if ((local_gpe_event_info.flags & ACPI_GPE_DISPATCH_MASK) ==
542 	    ACPI_GPE_DISPATCH_METHOD) {
543 
544 		/* Allocate the evaluation information block */
545 
546 		info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
547 		if (!info) {
548 			status = AE_NO_MEMORY;
549 		} else {
550 			/*
551 			 * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the _Lxx/_Exx
552 			 * control method that corresponds to this GPE
553 			 */
554 			info->prefix_node =
555 			    local_gpe_event_info.dispatch.method_node;
556 			info->flags = ACPI_IGNORE_RETURN_VALUE;
557 
558 			status = acpi_ns_evaluate(info);
559 			ACPI_FREE(info);
560 		}
561 
562 		if (ACPI_FAILURE(status)) {
563 			ACPI_EXCEPTION((AE_INFO, status,
564 					"while evaluating GPE method [%4.4s]",
565 					acpi_ut_get_node_name
566 					(local_gpe_event_info.dispatch.
567 					 method_node)));
568 		}
569 	}
570 	/* Defer enabling of GPE until all notify handlers are done */
571 	acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe,
572 				gpe_event_info);
573 	return_VOID;
574 }
575 
acpi_ev_asynch_enable_gpe(void * context)576 static void acpi_ev_asynch_enable_gpe(void *context)
577 {
578 	struct acpi_gpe_event_info *gpe_event_info = context;
579 	acpi_status status;
580 	if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
581 	    ACPI_GPE_LEVEL_TRIGGERED) {
582 		/*
583 		 * GPE is level-triggered, we clear the GPE status bit after handling
584 		 * the event.
585 		 */
586 		status = acpi_hw_clear_gpe(gpe_event_info);
587 		if (ACPI_FAILURE(status)) {
588 			return_VOID;
589 		}
590 	}
591 
592 	/* Enable this GPE */
593 	(void)acpi_hw_write_gpe_enable_reg(gpe_event_info);
594 	return_VOID;
595 }
596 
597 /*******************************************************************************
598  *
599  * FUNCTION:    acpi_ev_gpe_dispatch
600  *
601  * PARAMETERS:  gpe_event_info  - Info for this GPE
602  *              gpe_number      - Number relative to the parent GPE block
603  *
604  * RETURN:      INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED
605  *
606  * DESCRIPTION: Dispatch a General Purpose Event to either a function (e.g. EC)
607  *              or method (e.g. _Lxx/_Exx) handler.
608  *
609  *              This function executes at interrupt level.
610  *
611  ******************************************************************************/
612 
613 u32
acpi_ev_gpe_dispatch(struct acpi_gpe_event_info * gpe_event_info,u32 gpe_number)614 acpi_ev_gpe_dispatch(struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number)
615 {
616 	acpi_status status;
617 
618 	ACPI_FUNCTION_TRACE(ev_gpe_dispatch);
619 
620 	acpi_os_gpe_count(gpe_number);
621 
622 	/*
623 	 * If edge-triggered, clear the GPE status bit now. Note that
624 	 * level-triggered events are cleared after the GPE is serviced.
625 	 */
626 	if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
627 	    ACPI_GPE_EDGE_TRIGGERED) {
628 		status = acpi_hw_clear_gpe(gpe_event_info);
629 		if (ACPI_FAILURE(status)) {
630 			ACPI_EXCEPTION((AE_INFO, status,
631 					"Unable to clear GPE[%2X]",
632 					gpe_number));
633 			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
634 		}
635 	}
636 
637 	/*
638 	 * Dispatch the GPE to either an installed handler, or the control method
639 	 * associated with this GPE (_Lxx or _Exx). If a handler exists, we invoke
640 	 * it and do not attempt to run the method. If there is neither a handler
641 	 * nor a method, we disable this GPE to prevent further such pointless
642 	 * events from firing.
643 	 */
644 	switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) {
645 	case ACPI_GPE_DISPATCH_HANDLER:
646 
647 		/*
648 		 * Invoke the installed handler (at interrupt level)
649 		 * Ignore return status for now.
650 		 * TBD: leave GPE disabled on error?
651 		 */
652 		(void)gpe_event_info->dispatch.handler->address(gpe_event_info->
653 								dispatch.
654 								handler->
655 								context);
656 
657 		/* It is now safe to clear level-triggered events. */
658 
659 		if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) ==
660 		    ACPI_GPE_LEVEL_TRIGGERED) {
661 			status = acpi_hw_clear_gpe(gpe_event_info);
662 			if (ACPI_FAILURE(status)) {
663 				ACPI_EXCEPTION((AE_INFO, status,
664 						"Unable to clear GPE[%2X]",
665 						gpe_number));
666 				return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
667 			}
668 		}
669 		break;
670 
671 	case ACPI_GPE_DISPATCH_METHOD:
672 
673 		/*
674 		 * Disable the GPE, so it doesn't keep firing before the method has a
675 		 * chance to run (it runs asynchronously with interrupts enabled).
676 		 */
677 		status = acpi_ev_disable_gpe(gpe_event_info);
678 		if (ACPI_FAILURE(status)) {
679 			ACPI_EXCEPTION((AE_INFO, status,
680 					"Unable to disable GPE[%2X]",
681 					gpe_number));
682 			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
683 		}
684 
685 		/*
686 		 * Execute the method associated with the GPE
687 		 * NOTE: Level-triggered GPEs are cleared after the method completes.
688 		 */
689 		status = acpi_os_execute(OSL_GPE_HANDLER,
690 					 acpi_ev_asynch_execute_gpe_method,
691 					 gpe_event_info);
692 		if (ACPI_FAILURE(status)) {
693 			ACPI_EXCEPTION((AE_INFO, status,
694 					"Unable to queue handler for GPE[%2X] - event disabled",
695 					gpe_number));
696 		}
697 		break;
698 
699 	default:
700 
701 		/* No handler or method to run! */
702 
703 		ACPI_ERROR((AE_INFO,
704 			    "No handler or method for GPE[%2X], disabling event",
705 			    gpe_number));
706 
707 		/*
708 		 * Disable the GPE. The GPE will remain disabled until the ACPICA
709 		 * Core Subsystem is restarted, or a handler is installed.
710 		 */
711 		status = acpi_ev_disable_gpe(gpe_event_info);
712 		if (ACPI_FAILURE(status)) {
713 			ACPI_EXCEPTION((AE_INFO, status,
714 					"Unable to disable GPE[%2X]",
715 					gpe_number));
716 			return_UINT32(ACPI_INTERRUPT_NOT_HANDLED);
717 		}
718 		break;
719 	}
720 
721 	return_UINT32(ACPI_INTERRUPT_HANDLED);
722 }
723