1 /******************************************************************************
2 *
3 * Module Name: dsmethod - Parser/Interpreter interface - control method parsing
4 *
5 *****************************************************************************/
6
7 /*
8 * Copyright (C) 2000 - 2016, Intel Corp.
9 * All rights reserved.
10 *
11 * Redistribution and use in source and binary forms, with or without
12 * modification, are permitted provided that the following conditions
13 * are met:
14 * 1. Redistributions of source code must retain the above copyright
15 * notice, this list of conditions, and the following disclaimer,
16 * without modification.
17 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
18 * substantially similar to the "NO WARRANTY" disclaimer below
19 * ("Disclaimer") and any redistribution must be conditioned upon
20 * including a substantially similar Disclaimer requirement for further
21 * binary redistribution.
22 * 3. Neither the names of the above-listed copyright holders nor the names
23 * of any contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * Alternatively, this software may be distributed under the terms of the
27 * GNU General Public License ("GPL") version 2 as published by the Free
28 * Software Foundation.
29 *
30 * NO WARRANTY
31 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
32 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
33 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
34 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
35 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
36 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
37 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
38 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
39 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
40 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
41 * POSSIBILITY OF SUCH DAMAGES.
42 */
43
44 #include <acpi/acpi.h>
45 #include "accommon.h"
46 #include "acdispat.h"
47 #include "acinterp.h"
48 #include "acnamesp.h"
49 #include "acparser.h"
50 #include "amlcode.h"
51 #include "acdebug.h"
52
53 #define _COMPONENT ACPI_DISPATCHER
54 ACPI_MODULE_NAME("dsmethod")
55
56 /* Local prototypes */
57 static acpi_status
58 acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state,
59 union acpi_parse_object **out_op);
60
61 static acpi_status
62 acpi_ds_create_method_mutex(union acpi_operand_object *method_desc);
63
64 /*******************************************************************************
65 *
66 * FUNCTION: acpi_ds_auto_serialize_method
67 *
68 * PARAMETERS: node - Namespace Node of the method
69 * obj_desc - Method object attached to node
70 *
71 * RETURN: Status
72 *
73 * DESCRIPTION: Parse a control method AML to scan for control methods that
74 * need serialization due to the creation of named objects.
75 *
76 * NOTE: It is a bit of overkill to mark all such methods serialized, since
77 * there is only a problem if the method actually blocks during execution.
78 * A blocking operation is, for example, a Sleep() operation, or any access
79 * to an operation region. However, it is probably not possible to easily
80 * detect whether a method will block or not, so we simply mark all suspicious
81 * methods as serialized.
82 *
83 * NOTE2: This code is essentially a generic routine for parsing a single
84 * control method.
85 *
86 ******************************************************************************/
87
88 acpi_status
acpi_ds_auto_serialize_method(struct acpi_namespace_node * node,union acpi_operand_object * obj_desc)89 acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
90 union acpi_operand_object *obj_desc)
91 {
92 acpi_status status;
93 union acpi_parse_object *op = NULL;
94 struct acpi_walk_state *walk_state;
95
96 ACPI_FUNCTION_TRACE_PTR(ds_auto_serialize_method, node);
97
98 ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
99 "Method auto-serialization parse [%4.4s] %p\n",
100 acpi_ut_get_node_name(node), node));
101
102 /* Create/Init a root op for the method parse tree */
103
104 op = acpi_ps_alloc_op(AML_METHOD_OP, obj_desc->method.aml_start);
105 if (!op) {
106 return_ACPI_STATUS(AE_NO_MEMORY);
107 }
108
109 acpi_ps_set_name(op, node->name.integer);
110 op->common.node = node;
111
112 /* Create and initialize a new walk state */
113
114 walk_state =
115 acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL);
116 if (!walk_state) {
117 acpi_ps_free_op(op);
118 return_ACPI_STATUS(AE_NO_MEMORY);
119 }
120
121 status = acpi_ds_init_aml_walk(walk_state, op, node,
122 obj_desc->method.aml_start,
123 obj_desc->method.aml_length, NULL, 0);
124 if (ACPI_FAILURE(status)) {
125 acpi_ds_delete_walk_state(walk_state);
126 acpi_ps_free_op(op);
127 return_ACPI_STATUS(status);
128 }
129
130 walk_state->descending_callback = acpi_ds_detect_named_opcodes;
131
132 /* Parse the method, scan for creation of named objects */
133
134 status = acpi_ps_parse_aml(walk_state);
135
136 acpi_ps_delete_parse_tree(op);
137 return_ACPI_STATUS(status);
138 }
139
140 /*******************************************************************************
141 *
142 * FUNCTION: acpi_ds_detect_named_opcodes
143 *
144 * PARAMETERS: walk_state - Current state of the parse tree walk
145 * out_op - Unused, required for parser interface
146 *
147 * RETURN: Status
148 *
149 * DESCRIPTION: Descending callback used during the loading of ACPI tables.
150 * Currently used to detect methods that must be marked serialized
151 * in order to avoid problems with the creation of named objects.
152 *
153 ******************************************************************************/
154
155 static acpi_status
acpi_ds_detect_named_opcodes(struct acpi_walk_state * walk_state,union acpi_parse_object ** out_op)156 acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state,
157 union acpi_parse_object **out_op)
158 {
159
160 ACPI_FUNCTION_NAME(acpi_ds_detect_named_opcodes);
161
162 /* We are only interested in opcodes that create a new name */
163
164 if (!
165 (walk_state->op_info->
166 flags & (AML_NAMED | AML_CREATE | AML_FIELD))) {
167 return (AE_OK);
168 }
169
170 /*
171 * At this point, we know we have a Named object opcode.
172 * Mark the method as serialized. Later code will create a mutex for
173 * this method to enforce serialization.
174 *
175 * Note, ACPI_METHOD_IGNORE_SYNC_LEVEL flag means that we will ignore the
176 * Sync Level mechanism for this method, even though it is now serialized.
177 * Otherwise, there can be conflicts with existing ASL code that actually
178 * uses sync levels.
179 */
180 walk_state->method_desc->method.sync_level = 0;
181 walk_state->method_desc->method.info_flags |=
182 (ACPI_METHOD_SERIALIZED | ACPI_METHOD_IGNORE_SYNC_LEVEL);
183
184 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
185 "Method serialized [%4.4s] %p - [%s] (%4.4X)\n",
186 walk_state->method_node->name.ascii,
187 walk_state->method_node, walk_state->op_info->name,
188 walk_state->opcode));
189
190 /* Abort the parse, no need to examine this method any further */
191
192 return (AE_CTRL_TERMINATE);
193 }
194
195 /*******************************************************************************
196 *
197 * FUNCTION: acpi_ds_method_error
198 *
199 * PARAMETERS: status - Execution status
200 * walk_state - Current state
201 *
202 * RETURN: Status
203 *
204 * DESCRIPTION: Called on method error. Invoke the global exception handler if
205 * present, dump the method data if the debugger is configured
206 *
207 * Note: Allows the exception handler to change the status code
208 *
209 ******************************************************************************/
210
211 acpi_status
acpi_ds_method_error(acpi_status status,struct acpi_walk_state * walk_state)212 acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
213 {
214 u32 aml_offset;
215
216 ACPI_FUNCTION_ENTRY();
217
218 /* Ignore AE_OK and control exception codes */
219
220 if (ACPI_SUCCESS(status) || (status & AE_CODE_CONTROL)) {
221 return (status);
222 }
223
224 /* Invoke the global exception handler */
225
226 if (acpi_gbl_exception_handler) {
227
228 /* Exit the interpreter, allow handler to execute methods */
229
230 acpi_ex_exit_interpreter();
231
232 /*
233 * Handler can map the exception code to anything it wants, including
234 * AE_OK, in which case the executing method will not be aborted.
235 */
236 aml_offset = (u32)ACPI_PTR_DIFF(walk_state->aml,
237 walk_state->parser_state.
238 aml_start);
239
240 status = acpi_gbl_exception_handler(status,
241 walk_state->method_node ?
242 walk_state->method_node->
243 name.integer : 0,
244 walk_state->opcode,
245 aml_offset, NULL);
246 acpi_ex_enter_interpreter();
247 }
248
249 acpi_ds_clear_implicit_return(walk_state);
250
251 if (ACPI_FAILURE(status)) {
252 acpi_ds_dump_method_stack(status, walk_state, walk_state->op);
253
254 /* Display method locals/args if debugger is present */
255
256 #ifdef ACPI_DEBUGGER
257 acpi_db_dump_method_info(status, walk_state);
258 #endif
259 }
260
261 return (status);
262 }
263
264 /*******************************************************************************
265 *
266 * FUNCTION: acpi_ds_create_method_mutex
267 *
268 * PARAMETERS: obj_desc - The method object
269 *
270 * RETURN: Status
271 *
272 * DESCRIPTION: Create a mutex object for a serialized control method
273 *
274 ******************************************************************************/
275
276 static acpi_status
acpi_ds_create_method_mutex(union acpi_operand_object * method_desc)277 acpi_ds_create_method_mutex(union acpi_operand_object *method_desc)
278 {
279 union acpi_operand_object *mutex_desc;
280 acpi_status status;
281
282 ACPI_FUNCTION_TRACE(ds_create_method_mutex);
283
284 /* Create the new mutex object */
285
286 mutex_desc = acpi_ut_create_internal_object(ACPI_TYPE_MUTEX);
287 if (!mutex_desc) {
288 return_ACPI_STATUS(AE_NO_MEMORY);
289 }
290
291 /* Create the actual OS Mutex */
292
293 status = acpi_os_create_mutex(&mutex_desc->mutex.os_mutex);
294 if (ACPI_FAILURE(status)) {
295 acpi_ut_delete_object_desc(mutex_desc);
296 return_ACPI_STATUS(status);
297 }
298
299 mutex_desc->mutex.sync_level = method_desc->method.sync_level;
300 method_desc->method.mutex = mutex_desc;
301 return_ACPI_STATUS(AE_OK);
302 }
303
304 /*******************************************************************************
305 *
306 * FUNCTION: acpi_ds_begin_method_execution
307 *
308 * PARAMETERS: method_node - Node of the method
309 * obj_desc - The method object
310 * walk_state - current state, NULL if not yet executing
311 * a method.
312 *
313 * RETURN: Status
314 *
315 * DESCRIPTION: Prepare a method for execution. Parses the method if necessary,
316 * increments the thread count, and waits at the method semaphore
317 * for clearance to execute.
318 *
319 ******************************************************************************/
320
321 acpi_status
acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,union acpi_operand_object * obj_desc,struct acpi_walk_state * walk_state)322 acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
323 union acpi_operand_object *obj_desc,
324 struct acpi_walk_state *walk_state)
325 {
326 acpi_status status = AE_OK;
327
328 ACPI_FUNCTION_TRACE_PTR(ds_begin_method_execution, method_node);
329
330 if (!method_node) {
331 return_ACPI_STATUS(AE_NULL_ENTRY);
332 }
333
334 acpi_ex_start_trace_method(method_node, obj_desc, walk_state);
335
336 /* Prevent wraparound of thread count */
337
338 if (obj_desc->method.thread_count == ACPI_UINT8_MAX) {
339 ACPI_ERROR((AE_INFO,
340 "Method reached maximum reentrancy limit (255)"));
341 return_ACPI_STATUS(AE_AML_METHOD_LIMIT);
342 }
343
344 /*
345 * If this method is serialized, we need to acquire the method mutex.
346 */
347 if (obj_desc->method.info_flags & ACPI_METHOD_SERIALIZED) {
348 /*
349 * Create a mutex for the method if it is defined to be Serialized
350 * and a mutex has not already been created. We defer the mutex creation
351 * until a method is actually executed, to minimize the object count
352 */
353 if (!obj_desc->method.mutex) {
354 status = acpi_ds_create_method_mutex(obj_desc);
355 if (ACPI_FAILURE(status)) {
356 return_ACPI_STATUS(status);
357 }
358 }
359
360 /*
361 * The current_sync_level (per-thread) must be less than or equal to
362 * the sync level of the method. This mechanism provides some
363 * deadlock prevention.
364 *
365 * If the method was auto-serialized, we just ignore the sync level
366 * mechanism, because auto-serialization of methods can interfere
367 * with ASL code that actually uses sync levels.
368 *
369 * Top-level method invocation has no walk state at this point
370 */
371 if (walk_state &&
372 (!(obj_desc->method.
373 info_flags & ACPI_METHOD_IGNORE_SYNC_LEVEL))
374 && (walk_state->thread->current_sync_level >
375 obj_desc->method.mutex->mutex.sync_level)) {
376 ACPI_ERROR((AE_INFO,
377 "Cannot acquire Mutex for method [%4.4s]"
378 ", current SyncLevel is too large (%u)",
379 acpi_ut_get_node_name(method_node),
380 walk_state->thread->current_sync_level));
381
382 return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
383 }
384
385 /*
386 * Obtain the method mutex if necessary. Do not acquire mutex for a
387 * recursive call.
388 */
389 if (!walk_state ||
390 !obj_desc->method.mutex->mutex.thread_id ||
391 (walk_state->thread->thread_id !=
392 obj_desc->method.mutex->mutex.thread_id)) {
393 /*
394 * Acquire the method mutex. This releases the interpreter if we
395 * block (and reacquires it before it returns)
396 */
397 status =
398 acpi_ex_system_wait_mutex(obj_desc->method.mutex->
399 mutex.os_mutex,
400 ACPI_WAIT_FOREVER);
401 if (ACPI_FAILURE(status)) {
402 return_ACPI_STATUS(status);
403 }
404
405 /* Update the mutex and walk info and save the original sync_level */
406
407 if (walk_state) {
408 obj_desc->method.mutex->mutex.
409 original_sync_level =
410 walk_state->thread->current_sync_level;
411
412 obj_desc->method.mutex->mutex.thread_id =
413 walk_state->thread->thread_id;
414
415 /*
416 * Update the current sync_level only if this is not an auto-
417 * serialized method. In the auto case, we have to ignore
418 * the sync level for the method mutex (created for the
419 * auto-serialization) because we have no idea of what the
420 * sync level should be. Therefore, just ignore it.
421 */
422 if (!(obj_desc->method.info_flags &
423 ACPI_METHOD_IGNORE_SYNC_LEVEL)) {
424 walk_state->thread->current_sync_level =
425 obj_desc->method.sync_level;
426 }
427 } else {
428 obj_desc->method.mutex->mutex.
429 original_sync_level =
430 obj_desc->method.mutex->mutex.sync_level;
431
432 obj_desc->method.mutex->mutex.thread_id =
433 acpi_os_get_thread_id();
434 }
435 }
436
437 /* Always increase acquisition depth */
438
439 obj_desc->method.mutex->mutex.acquisition_depth++;
440 }
441
442 /*
443 * Allocate an Owner ID for this method, only if this is the first thread
444 * to begin concurrent execution. We only need one owner_id, even if the
445 * method is invoked recursively.
446 */
447 if (!obj_desc->method.owner_id) {
448 status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
449 if (ACPI_FAILURE(status)) {
450 goto cleanup;
451 }
452 }
453
454 /*
455 * Increment the method parse tree thread count since it has been
456 * reentered one more time (even if it is the same thread)
457 */
458 obj_desc->method.thread_count++;
459 acpi_method_count++;
460 return_ACPI_STATUS(status);
461
462 cleanup:
463 /* On error, must release the method mutex (if present) */
464
465 if (obj_desc->method.mutex) {
466 acpi_os_release_mutex(obj_desc->method.mutex->mutex.os_mutex);
467 }
468 return_ACPI_STATUS(status);
469 }
470
471 /*******************************************************************************
472 *
473 * FUNCTION: acpi_ds_call_control_method
474 *
475 * PARAMETERS: thread - Info for this thread
476 * this_walk_state - Current walk state
477 * op - Current Op to be walked
478 *
479 * RETURN: Status
480 *
481 * DESCRIPTION: Transfer execution to a called control method
482 *
483 ******************************************************************************/
484
485 acpi_status
acpi_ds_call_control_method(struct acpi_thread_state * thread,struct acpi_walk_state * this_walk_state,union acpi_parse_object * op)486 acpi_ds_call_control_method(struct acpi_thread_state *thread,
487 struct acpi_walk_state *this_walk_state,
488 union acpi_parse_object *op)
489 {
490 acpi_status status;
491 struct acpi_namespace_node *method_node;
492 struct acpi_walk_state *next_walk_state = NULL;
493 union acpi_operand_object *obj_desc;
494 struct acpi_evaluate_info *info;
495 u32 i;
496
497 ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
498
499 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
500 "Calling method %p, currentstate=%p\n",
501 this_walk_state->prev_op, this_walk_state));
502
503 /*
504 * Get the namespace entry for the control method we are about to call
505 */
506 method_node = this_walk_state->method_call_node;
507 if (!method_node) {
508 return_ACPI_STATUS(AE_NULL_ENTRY);
509 }
510
511 obj_desc = acpi_ns_get_attached_object(method_node);
512 if (!obj_desc) {
513 return_ACPI_STATUS(AE_NULL_OBJECT);
514 }
515
516 /* Init for new method, possibly wait on method mutex */
517
518 status =
519 acpi_ds_begin_method_execution(method_node, obj_desc,
520 this_walk_state);
521 if (ACPI_FAILURE(status)) {
522 return_ACPI_STATUS(status);
523 }
524
525 /* Begin method parse/execution. Create a new walk state */
526
527 next_walk_state =
528 acpi_ds_create_walk_state(obj_desc->method.owner_id, NULL, obj_desc,
529 thread);
530 if (!next_walk_state) {
531 status = AE_NO_MEMORY;
532 goto cleanup;
533 }
534
535 /*
536 * The resolved arguments were put on the previous walk state's operand
537 * stack. Operands on the previous walk state stack always
538 * start at index 0. Also, null terminate the list of arguments
539 */
540 this_walk_state->operands[this_walk_state->num_operands] = NULL;
541
542 /*
543 * Allocate and initialize the evaluation information block
544 * TBD: this is somewhat inefficient, should change interface to
545 * ds_init_aml_walk. For now, keeps this struct off the CPU stack
546 */
547 info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
548 if (!info) {
549 status = AE_NO_MEMORY;
550 goto cleanup;
551 }
552
553 info->parameters = &this_walk_state->operands[0];
554
555 status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node,
556 obj_desc->method.aml_start,
557 obj_desc->method.aml_length, info,
558 ACPI_IMODE_EXECUTE);
559
560 ACPI_FREE(info);
561 if (ACPI_FAILURE(status)) {
562 goto cleanup;
563 }
564
565 /*
566 * Delete the operands on the previous walkstate operand stack
567 * (they were copied to new objects)
568 */
569 for (i = 0; i < obj_desc->method.param_count; i++) {
570 acpi_ut_remove_reference(this_walk_state->operands[i]);
571 this_walk_state->operands[i] = NULL;
572 }
573
574 /* Clear the operand stack */
575
576 this_walk_state->num_operands = 0;
577
578 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
579 "**** Begin nested execution of [%4.4s] **** WalkState=%p\n",
580 method_node->name.ascii, next_walk_state));
581
582 /* Invoke an internal method if necessary */
583
584 if (obj_desc->method.info_flags & ACPI_METHOD_INTERNAL_ONLY) {
585 status =
586 obj_desc->method.dispatch.implementation(next_walk_state);
587 if (status == AE_OK) {
588 status = AE_CTRL_TERMINATE;
589 }
590 }
591
592 return_ACPI_STATUS(status);
593
594 cleanup:
595
596 /* On error, we must terminate the method properly */
597
598 acpi_ds_terminate_control_method(obj_desc, next_walk_state);
599 acpi_ds_delete_walk_state(next_walk_state);
600
601 return_ACPI_STATUS(status);
602 }
603
604 /*******************************************************************************
605 *
606 * FUNCTION: acpi_ds_restart_control_method
607 *
608 * PARAMETERS: walk_state - State for preempted method (caller)
609 * return_desc - Return value from the called method
610 *
611 * RETURN: Status
612 *
613 * DESCRIPTION: Restart a method that was preempted by another (nested) method
614 * invocation. Handle the return value (if any) from the callee.
615 *
616 ******************************************************************************/
617
618 acpi_status
acpi_ds_restart_control_method(struct acpi_walk_state * walk_state,union acpi_operand_object * return_desc)619 acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
620 union acpi_operand_object *return_desc)
621 {
622 acpi_status status;
623 int same_as_implicit_return;
624
625 ACPI_FUNCTION_TRACE_PTR(ds_restart_control_method, walk_state);
626
627 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
628 "****Restart [%4.4s] Op %p ReturnValueFromCallee %p\n",
629 acpi_ut_get_node_name(walk_state->method_node),
630 walk_state->method_call_op, return_desc));
631
632 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
633 " ReturnFromThisMethodUsed?=%X ResStack %p Walk %p\n",
634 walk_state->return_used,
635 walk_state->results, walk_state));
636
637 /* Did the called method return a value? */
638
639 if (return_desc) {
640
641 /* Is the implicit return object the same as the return desc? */
642
643 same_as_implicit_return =
644 (walk_state->implicit_return_obj == return_desc);
645
646 /* Are we actually going to use the return value? */
647
648 if (walk_state->return_used) {
649
650 /* Save the return value from the previous method */
651
652 status = acpi_ds_result_push(return_desc, walk_state);
653 if (ACPI_FAILURE(status)) {
654 acpi_ut_remove_reference(return_desc);
655 return_ACPI_STATUS(status);
656 }
657
658 /*
659 * Save as THIS method's return value in case it is returned
660 * immediately to yet another method
661 */
662 walk_state->return_desc = return_desc;
663 }
664
665 /*
666 * The following code is the optional support for the so-called
667 * "implicit return". Some AML code assumes that the last value of the
668 * method is "implicitly" returned to the caller, in the absence of an
669 * explicit return value.
670 *
671 * Just save the last result of the method as the return value.
672 *
673 * NOTE: this is optional because the ASL language does not actually
674 * support this behavior.
675 */
676 else if (!acpi_ds_do_implicit_return
677 (return_desc, walk_state, FALSE)
678 || same_as_implicit_return) {
679 /*
680 * Delete the return value if it will not be used by the
681 * calling method or remove one reference if the explicit return
682 * is the same as the implicit return value.
683 */
684 acpi_ut_remove_reference(return_desc);
685 }
686 }
687
688 return_ACPI_STATUS(AE_OK);
689 }
690
691 /*******************************************************************************
692 *
693 * FUNCTION: acpi_ds_terminate_control_method
694 *
695 * PARAMETERS: method_desc - Method object
696 * walk_state - State associated with the method
697 *
698 * RETURN: None
699 *
700 * DESCRIPTION: Terminate a control method. Delete everything that the method
701 * created, delete all locals and arguments, and delete the parse
702 * tree if requested.
703 *
704 * MUTEX: Interpreter is locked
705 *
706 ******************************************************************************/
707
708 void
acpi_ds_terminate_control_method(union acpi_operand_object * method_desc,struct acpi_walk_state * walk_state)709 acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
710 struct acpi_walk_state *walk_state)
711 {
712
713 ACPI_FUNCTION_TRACE_PTR(ds_terminate_control_method, walk_state);
714
715 /* method_desc is required, walk_state is optional */
716
717 if (!method_desc) {
718 return_VOID;
719 }
720
721 if (walk_state) {
722
723 /* Delete all arguments and locals */
724
725 acpi_ds_method_data_delete_all(walk_state);
726
727 /*
728 * Delete any namespace objects created anywhere within the
729 * namespace by the execution of this method. Unless:
730 * 1) This method is a module-level executable code method, in which
731 * case we want make the objects permanent.
732 * 2) There are other threads executing the method, in which case we
733 * will wait until the last thread has completed.
734 */
735 if (!(method_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL)
736 && (method_desc->method.thread_count == 1)) {
737
738 /* Delete any direct children of (created by) this method */
739
740 (void)acpi_ex_exit_interpreter();
741 acpi_ns_delete_namespace_subtree(walk_state->
742 method_node);
743 (void)acpi_ex_enter_interpreter();
744
745 /*
746 * Delete any objects that were created by this method
747 * elsewhere in the namespace (if any were created).
748 * Use of the ACPI_METHOD_MODIFIED_NAMESPACE optimizes the
749 * deletion such that we don't have to perform an entire
750 * namespace walk for every control method execution.
751 */
752 if (method_desc->method.
753 info_flags & ACPI_METHOD_MODIFIED_NAMESPACE) {
754 (void)acpi_ex_exit_interpreter();
755 acpi_ns_delete_namespace_by_owner(method_desc->
756 method.
757 owner_id);
758 (void)acpi_ex_enter_interpreter();
759 method_desc->method.info_flags &=
760 ~ACPI_METHOD_MODIFIED_NAMESPACE;
761 }
762 }
763
764 /*
765 * If method is serialized, release the mutex and restore the
766 * current sync level for this thread
767 */
768 if (method_desc->method.mutex) {
769
770 /* Acquisition Depth handles recursive calls */
771
772 method_desc->method.mutex->mutex.acquisition_depth--;
773 if (!method_desc->method.mutex->mutex.acquisition_depth) {
774 walk_state->thread->current_sync_level =
775 method_desc->method.mutex->mutex.
776 original_sync_level;
777
778 acpi_os_release_mutex(method_desc->method.
779 mutex->mutex.os_mutex);
780 method_desc->method.mutex->mutex.thread_id = 0;
781 }
782 }
783 }
784
785 /* Decrement the thread count on the method */
786
787 if (method_desc->method.thread_count) {
788 method_desc->method.thread_count--;
789 } else {
790 ACPI_ERROR((AE_INFO, "Invalid zero thread count in method"));
791 }
792
793 /* Are there any other threads currently executing this method? */
794
795 if (method_desc->method.thread_count) {
796 /*
797 * Additional threads. Do not release the owner_id in this case,
798 * we immediately reuse it for the next thread executing this method
799 */
800 ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
801 "*** Completed execution of one thread, %u threads remaining\n",
802 method_desc->method.thread_count));
803 } else {
804 /* This is the only executing thread for this method */
805
806 /*
807 * Support to dynamically change a method from not_serialized to
808 * Serialized if it appears that the method is incorrectly written and
809 * does not support multiple thread execution. The best example of this
810 * is if such a method creates namespace objects and blocks. A second
811 * thread will fail with an AE_ALREADY_EXISTS exception.
812 *
813 * This code is here because we must wait until the last thread exits
814 * before marking the method as serialized.
815 */
816 if (method_desc->method.
817 info_flags & ACPI_METHOD_SERIALIZED_PENDING) {
818 if (walk_state) {
819 ACPI_INFO(("Marking method %4.4s as Serialized "
820 "because of AE_ALREADY_EXISTS error",
821 walk_state->method_node->name.
822 ascii));
823 }
824
825 /*
826 * Method tried to create an object twice and was marked as
827 * "pending serialized". The probable cause is that the method
828 * cannot handle reentrancy.
829 *
830 * The method was created as not_serialized, but it tried to create
831 * a named object and then blocked, causing the second thread
832 * entrance to begin and then fail. Workaround this problem by
833 * marking the method permanently as Serialized when the last
834 * thread exits here.
835 */
836 method_desc->method.info_flags &=
837 ~ACPI_METHOD_SERIALIZED_PENDING;
838
839 method_desc->method.info_flags |=
840 (ACPI_METHOD_SERIALIZED |
841 ACPI_METHOD_IGNORE_SYNC_LEVEL);
842 method_desc->method.sync_level = 0;
843 }
844
845 /* No more threads, we can free the owner_id */
846
847 if (!
848 (method_desc->method.
849 info_flags & ACPI_METHOD_MODULE_LEVEL)) {
850 acpi_ut_release_owner_id(&method_desc->method.owner_id);
851 }
852 }
853
854 acpi_ex_stop_trace_method((struct acpi_namespace_node *)method_desc->
855 method.node, method_desc, walk_state);
856
857 return_VOID;
858 }
859