1*4882a593Smuzhiyun // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2*4882a593Smuzhiyun /******************************************************************************
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Module Name: dsmethod - Parser/Interpreter interface - control method parsing
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Copyright (C) 2000 - 2020, Intel Corp.
7*4882a593Smuzhiyun *
8*4882a593Smuzhiyun *****************************************************************************/
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <acpi/acpi.h>
11*4882a593Smuzhiyun #include "accommon.h"
12*4882a593Smuzhiyun #include "acdispat.h"
13*4882a593Smuzhiyun #include "acinterp.h"
14*4882a593Smuzhiyun #include "acnamesp.h"
15*4882a593Smuzhiyun #include "acparser.h"
16*4882a593Smuzhiyun #include "amlcode.h"
17*4882a593Smuzhiyun #include "acdebug.h"
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #define _COMPONENT ACPI_DISPATCHER
20*4882a593Smuzhiyun ACPI_MODULE_NAME("dsmethod")
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun /* Local prototypes */
23*4882a593Smuzhiyun static acpi_status
24*4882a593Smuzhiyun acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state,
25*4882a593Smuzhiyun union acpi_parse_object **out_op);
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun static acpi_status
28*4882a593Smuzhiyun acpi_ds_create_method_mutex(union acpi_operand_object *method_desc);
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /*******************************************************************************
31*4882a593Smuzhiyun *
32*4882a593Smuzhiyun * FUNCTION: acpi_ds_auto_serialize_method
33*4882a593Smuzhiyun *
34*4882a593Smuzhiyun * PARAMETERS: node - Namespace Node of the method
35*4882a593Smuzhiyun * obj_desc - Method object attached to node
36*4882a593Smuzhiyun *
37*4882a593Smuzhiyun * RETURN: Status
38*4882a593Smuzhiyun *
39*4882a593Smuzhiyun * DESCRIPTION: Parse a control method AML to scan for control methods that
40*4882a593Smuzhiyun * need serialization due to the creation of named objects.
41*4882a593Smuzhiyun *
42*4882a593Smuzhiyun * NOTE: It is a bit of overkill to mark all such methods serialized, since
43*4882a593Smuzhiyun * there is only a problem if the method actually blocks during execution.
44*4882a593Smuzhiyun * A blocking operation is, for example, a Sleep() operation, or any access
45*4882a593Smuzhiyun * to an operation region. However, it is probably not possible to easily
46*4882a593Smuzhiyun * detect whether a method will block or not, so we simply mark all suspicious
47*4882a593Smuzhiyun * methods as serialized.
48*4882a593Smuzhiyun *
49*4882a593Smuzhiyun * NOTE2: This code is essentially a generic routine for parsing a single
50*4882a593Smuzhiyun * control method.
51*4882a593Smuzhiyun *
52*4882a593Smuzhiyun ******************************************************************************/
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun acpi_status
acpi_ds_auto_serialize_method(struct acpi_namespace_node * node,union acpi_operand_object * obj_desc)55*4882a593Smuzhiyun acpi_ds_auto_serialize_method(struct acpi_namespace_node *node,
56*4882a593Smuzhiyun union acpi_operand_object *obj_desc)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun acpi_status status;
59*4882a593Smuzhiyun union acpi_parse_object *op = NULL;
60*4882a593Smuzhiyun struct acpi_walk_state *walk_state;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun ACPI_FUNCTION_TRACE_PTR(ds_auto_serialize_method, node);
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun ACPI_DEBUG_PRINT((ACPI_DB_PARSE,
65*4882a593Smuzhiyun "Method auto-serialization parse [%4.4s] %p\n",
66*4882a593Smuzhiyun acpi_ut_get_node_name(node), node));
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /* Create/Init a root op for the method parse tree */
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun op = acpi_ps_alloc_op(AML_METHOD_OP, obj_desc->method.aml_start);
71*4882a593Smuzhiyun if (!op) {
72*4882a593Smuzhiyun return_ACPI_STATUS(AE_NO_MEMORY);
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun acpi_ps_set_name(op, node->name.integer);
76*4882a593Smuzhiyun op->common.node = node;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /* Create and initialize a new walk state */
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun walk_state =
81*4882a593Smuzhiyun acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL);
82*4882a593Smuzhiyun if (!walk_state) {
83*4882a593Smuzhiyun acpi_ps_free_op(op);
84*4882a593Smuzhiyun return_ACPI_STATUS(AE_NO_MEMORY);
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun status = acpi_ds_init_aml_walk(walk_state, op, node,
88*4882a593Smuzhiyun obj_desc->method.aml_start,
89*4882a593Smuzhiyun obj_desc->method.aml_length, NULL, 0);
90*4882a593Smuzhiyun if (ACPI_FAILURE(status)) {
91*4882a593Smuzhiyun acpi_ds_delete_walk_state(walk_state);
92*4882a593Smuzhiyun acpi_ps_free_op(op);
93*4882a593Smuzhiyun return_ACPI_STATUS(status);
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun walk_state->descending_callback = acpi_ds_detect_named_opcodes;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun /* Parse the method, scan for creation of named objects */
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun status = acpi_ps_parse_aml(walk_state);
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun acpi_ps_delete_parse_tree(op);
103*4882a593Smuzhiyun return_ACPI_STATUS(status);
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /*******************************************************************************
107*4882a593Smuzhiyun *
108*4882a593Smuzhiyun * FUNCTION: acpi_ds_detect_named_opcodes
109*4882a593Smuzhiyun *
110*4882a593Smuzhiyun * PARAMETERS: walk_state - Current state of the parse tree walk
111*4882a593Smuzhiyun * out_op - Unused, required for parser interface
112*4882a593Smuzhiyun *
113*4882a593Smuzhiyun * RETURN: Status
114*4882a593Smuzhiyun *
115*4882a593Smuzhiyun * DESCRIPTION: Descending callback used during the loading of ACPI tables.
116*4882a593Smuzhiyun * Currently used to detect methods that must be marked serialized
117*4882a593Smuzhiyun * in order to avoid problems with the creation of named objects.
118*4882a593Smuzhiyun *
119*4882a593Smuzhiyun ******************************************************************************/
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun static acpi_status
acpi_ds_detect_named_opcodes(struct acpi_walk_state * walk_state,union acpi_parse_object ** out_op)122*4882a593Smuzhiyun acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state,
123*4882a593Smuzhiyun union acpi_parse_object **out_op)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun ACPI_FUNCTION_NAME(acpi_ds_detect_named_opcodes);
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /* We are only interested in opcodes that create a new name */
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun if (!
131*4882a593Smuzhiyun (walk_state->op_info->
132*4882a593Smuzhiyun flags & (AML_NAMED | AML_CREATE | AML_FIELD))) {
133*4882a593Smuzhiyun return (AE_OK);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun /*
137*4882a593Smuzhiyun * At this point, we know we have a Named object opcode.
138*4882a593Smuzhiyun * Mark the method as serialized. Later code will create a mutex for
139*4882a593Smuzhiyun * this method to enforce serialization.
140*4882a593Smuzhiyun *
141*4882a593Smuzhiyun * Note, ACPI_METHOD_IGNORE_SYNC_LEVEL flag means that we will ignore the
142*4882a593Smuzhiyun * Sync Level mechanism for this method, even though it is now serialized.
143*4882a593Smuzhiyun * Otherwise, there can be conflicts with existing ASL code that actually
144*4882a593Smuzhiyun * uses sync levels.
145*4882a593Smuzhiyun */
146*4882a593Smuzhiyun walk_state->method_desc->method.sync_level = 0;
147*4882a593Smuzhiyun walk_state->method_desc->method.info_flags |=
148*4882a593Smuzhiyun (ACPI_METHOD_SERIALIZED | ACPI_METHOD_IGNORE_SYNC_LEVEL);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun ACPI_DEBUG_PRINT((ACPI_DB_INFO,
151*4882a593Smuzhiyun "Method serialized [%4.4s] %p - [%s] (%4.4X)\n",
152*4882a593Smuzhiyun walk_state->method_node->name.ascii,
153*4882a593Smuzhiyun walk_state->method_node, walk_state->op_info->name,
154*4882a593Smuzhiyun walk_state->opcode));
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /* Abort the parse, no need to examine this method any further */
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun return (AE_CTRL_TERMINATE);
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun /*******************************************************************************
162*4882a593Smuzhiyun *
163*4882a593Smuzhiyun * FUNCTION: acpi_ds_method_error
164*4882a593Smuzhiyun *
165*4882a593Smuzhiyun * PARAMETERS: status - Execution status
166*4882a593Smuzhiyun * walk_state - Current state
167*4882a593Smuzhiyun *
168*4882a593Smuzhiyun * RETURN: Status
169*4882a593Smuzhiyun *
170*4882a593Smuzhiyun * DESCRIPTION: Called on method error. Invoke the global exception handler if
171*4882a593Smuzhiyun * present, dump the method data if the debugger is configured
172*4882a593Smuzhiyun *
173*4882a593Smuzhiyun * Note: Allows the exception handler to change the status code
174*4882a593Smuzhiyun *
175*4882a593Smuzhiyun ******************************************************************************/
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun acpi_status
acpi_ds_method_error(acpi_status status,struct acpi_walk_state * walk_state)178*4882a593Smuzhiyun acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun u32 aml_offset;
181*4882a593Smuzhiyun acpi_name name = 0;
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun ACPI_FUNCTION_ENTRY();
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /* Ignore AE_OK and control exception codes */
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun if (ACPI_SUCCESS(status) || (status & AE_CODE_CONTROL)) {
188*4882a593Smuzhiyun return (status);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /* Invoke the global exception handler */
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun if (acpi_gbl_exception_handler) {
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun /* Exit the interpreter, allow handler to execute methods */
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun acpi_ex_exit_interpreter();
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun /*
200*4882a593Smuzhiyun * Handler can map the exception code to anything it wants, including
201*4882a593Smuzhiyun * AE_OK, in which case the executing method will not be aborted.
202*4882a593Smuzhiyun */
203*4882a593Smuzhiyun aml_offset = (u32)ACPI_PTR_DIFF(walk_state->aml,
204*4882a593Smuzhiyun walk_state->parser_state.
205*4882a593Smuzhiyun aml_start);
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun if (walk_state->method_node) {
208*4882a593Smuzhiyun name = walk_state->method_node->name.integer;
209*4882a593Smuzhiyun } else if (walk_state->deferred_node) {
210*4882a593Smuzhiyun name = walk_state->deferred_node->name.integer;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun status = acpi_gbl_exception_handler(status, name,
214*4882a593Smuzhiyun walk_state->opcode,
215*4882a593Smuzhiyun aml_offset, NULL);
216*4882a593Smuzhiyun acpi_ex_enter_interpreter();
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun acpi_ds_clear_implicit_return(walk_state);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun if (ACPI_FAILURE(status)) {
222*4882a593Smuzhiyun acpi_ds_dump_method_stack(status, walk_state, walk_state->op);
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun /* Display method locals/args if debugger is present */
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun #ifdef ACPI_DEBUGGER
227*4882a593Smuzhiyun acpi_db_dump_method_info(status, walk_state);
228*4882a593Smuzhiyun #endif
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun return (status);
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun /*******************************************************************************
235*4882a593Smuzhiyun *
236*4882a593Smuzhiyun * FUNCTION: acpi_ds_create_method_mutex
237*4882a593Smuzhiyun *
238*4882a593Smuzhiyun * PARAMETERS: obj_desc - The method object
239*4882a593Smuzhiyun *
240*4882a593Smuzhiyun * RETURN: Status
241*4882a593Smuzhiyun *
242*4882a593Smuzhiyun * DESCRIPTION: Create a mutex object for a serialized control method
243*4882a593Smuzhiyun *
244*4882a593Smuzhiyun ******************************************************************************/
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun static acpi_status
acpi_ds_create_method_mutex(union acpi_operand_object * method_desc)247*4882a593Smuzhiyun acpi_ds_create_method_mutex(union acpi_operand_object *method_desc)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun union acpi_operand_object *mutex_desc;
250*4882a593Smuzhiyun acpi_status status;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun ACPI_FUNCTION_TRACE(ds_create_method_mutex);
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun /* Create the new mutex object */
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun mutex_desc = acpi_ut_create_internal_object(ACPI_TYPE_MUTEX);
257*4882a593Smuzhiyun if (!mutex_desc) {
258*4882a593Smuzhiyun return_ACPI_STATUS(AE_NO_MEMORY);
259*4882a593Smuzhiyun }
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /* Create the actual OS Mutex */
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun status = acpi_os_create_mutex(&mutex_desc->mutex.os_mutex);
264*4882a593Smuzhiyun if (ACPI_FAILURE(status)) {
265*4882a593Smuzhiyun acpi_ut_delete_object_desc(mutex_desc);
266*4882a593Smuzhiyun return_ACPI_STATUS(status);
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun mutex_desc->mutex.sync_level = method_desc->method.sync_level;
270*4882a593Smuzhiyun method_desc->method.mutex = mutex_desc;
271*4882a593Smuzhiyun return_ACPI_STATUS(AE_OK);
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun /*******************************************************************************
275*4882a593Smuzhiyun *
276*4882a593Smuzhiyun * FUNCTION: acpi_ds_begin_method_execution
277*4882a593Smuzhiyun *
278*4882a593Smuzhiyun * PARAMETERS: method_node - Node of the method
279*4882a593Smuzhiyun * obj_desc - The method object
280*4882a593Smuzhiyun * walk_state - current state, NULL if not yet executing
281*4882a593Smuzhiyun * a method.
282*4882a593Smuzhiyun *
283*4882a593Smuzhiyun * RETURN: Status
284*4882a593Smuzhiyun *
285*4882a593Smuzhiyun * DESCRIPTION: Prepare a method for execution. Parses the method if necessary,
286*4882a593Smuzhiyun * increments the thread count, and waits at the method semaphore
287*4882a593Smuzhiyun * for clearance to execute.
288*4882a593Smuzhiyun *
289*4882a593Smuzhiyun ******************************************************************************/
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun acpi_status
acpi_ds_begin_method_execution(struct acpi_namespace_node * method_node,union acpi_operand_object * obj_desc,struct acpi_walk_state * walk_state)292*4882a593Smuzhiyun acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node,
293*4882a593Smuzhiyun union acpi_operand_object *obj_desc,
294*4882a593Smuzhiyun struct acpi_walk_state *walk_state)
295*4882a593Smuzhiyun {
296*4882a593Smuzhiyun acpi_status status = AE_OK;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun ACPI_FUNCTION_TRACE_PTR(ds_begin_method_execution, method_node);
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun if (!method_node) {
301*4882a593Smuzhiyun return_ACPI_STATUS(AE_NULL_ENTRY);
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun acpi_ex_start_trace_method(method_node, obj_desc, walk_state);
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun /* Prevent wraparound of thread count */
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun if (obj_desc->method.thread_count == ACPI_UINT8_MAX) {
309*4882a593Smuzhiyun ACPI_ERROR((AE_INFO,
310*4882a593Smuzhiyun "Method reached maximum reentrancy limit (255)"));
311*4882a593Smuzhiyun return_ACPI_STATUS(AE_AML_METHOD_LIMIT);
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun /*
315*4882a593Smuzhiyun * If this method is serialized, we need to acquire the method mutex.
316*4882a593Smuzhiyun */
317*4882a593Smuzhiyun if (obj_desc->method.info_flags & ACPI_METHOD_SERIALIZED) {
318*4882a593Smuzhiyun /*
319*4882a593Smuzhiyun * Create a mutex for the method if it is defined to be Serialized
320*4882a593Smuzhiyun * and a mutex has not already been created. We defer the mutex creation
321*4882a593Smuzhiyun * until a method is actually executed, to minimize the object count
322*4882a593Smuzhiyun */
323*4882a593Smuzhiyun if (!obj_desc->method.mutex) {
324*4882a593Smuzhiyun status = acpi_ds_create_method_mutex(obj_desc);
325*4882a593Smuzhiyun if (ACPI_FAILURE(status)) {
326*4882a593Smuzhiyun return_ACPI_STATUS(status);
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /*
331*4882a593Smuzhiyun * The current_sync_level (per-thread) must be less than or equal to
332*4882a593Smuzhiyun * the sync level of the method. This mechanism provides some
333*4882a593Smuzhiyun * deadlock prevention.
334*4882a593Smuzhiyun *
335*4882a593Smuzhiyun * If the method was auto-serialized, we just ignore the sync level
336*4882a593Smuzhiyun * mechanism, because auto-serialization of methods can interfere
337*4882a593Smuzhiyun * with ASL code that actually uses sync levels.
338*4882a593Smuzhiyun *
339*4882a593Smuzhiyun * Top-level method invocation has no walk state at this point
340*4882a593Smuzhiyun */
341*4882a593Smuzhiyun if (walk_state &&
342*4882a593Smuzhiyun (!(obj_desc->method.
343*4882a593Smuzhiyun info_flags & ACPI_METHOD_IGNORE_SYNC_LEVEL))
344*4882a593Smuzhiyun && (walk_state->thread->current_sync_level >
345*4882a593Smuzhiyun obj_desc->method.mutex->mutex.sync_level)) {
346*4882a593Smuzhiyun ACPI_ERROR((AE_INFO,
347*4882a593Smuzhiyun "Cannot acquire Mutex for method [%4.4s]"
348*4882a593Smuzhiyun ", current SyncLevel is too large (%u)",
349*4882a593Smuzhiyun acpi_ut_get_node_name(method_node),
350*4882a593Smuzhiyun walk_state->thread->current_sync_level));
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun return_ACPI_STATUS(AE_AML_MUTEX_ORDER);
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun /*
356*4882a593Smuzhiyun * Obtain the method mutex if necessary. Do not acquire mutex for a
357*4882a593Smuzhiyun * recursive call.
358*4882a593Smuzhiyun */
359*4882a593Smuzhiyun if (!walk_state ||
360*4882a593Smuzhiyun !obj_desc->method.mutex->mutex.thread_id ||
361*4882a593Smuzhiyun (walk_state->thread->thread_id !=
362*4882a593Smuzhiyun obj_desc->method.mutex->mutex.thread_id)) {
363*4882a593Smuzhiyun /*
364*4882a593Smuzhiyun * Acquire the method mutex. This releases the interpreter if we
365*4882a593Smuzhiyun * block (and reacquires it before it returns)
366*4882a593Smuzhiyun */
367*4882a593Smuzhiyun status =
368*4882a593Smuzhiyun acpi_ex_system_wait_mutex(obj_desc->method.mutex->
369*4882a593Smuzhiyun mutex.os_mutex,
370*4882a593Smuzhiyun ACPI_WAIT_FOREVER);
371*4882a593Smuzhiyun if (ACPI_FAILURE(status)) {
372*4882a593Smuzhiyun return_ACPI_STATUS(status);
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun /* Update the mutex and walk info and save the original sync_level */
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun if (walk_state) {
378*4882a593Smuzhiyun obj_desc->method.mutex->mutex.
379*4882a593Smuzhiyun original_sync_level =
380*4882a593Smuzhiyun walk_state->thread->current_sync_level;
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun obj_desc->method.mutex->mutex.thread_id =
383*4882a593Smuzhiyun walk_state->thread->thread_id;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun /*
386*4882a593Smuzhiyun * Update the current sync_level only if this is not an auto-
387*4882a593Smuzhiyun * serialized method. In the auto case, we have to ignore
388*4882a593Smuzhiyun * the sync level for the method mutex (created for the
389*4882a593Smuzhiyun * auto-serialization) because we have no idea of what the
390*4882a593Smuzhiyun * sync level should be. Therefore, just ignore it.
391*4882a593Smuzhiyun */
392*4882a593Smuzhiyun if (!(obj_desc->method.info_flags &
393*4882a593Smuzhiyun ACPI_METHOD_IGNORE_SYNC_LEVEL)) {
394*4882a593Smuzhiyun walk_state->thread->current_sync_level =
395*4882a593Smuzhiyun obj_desc->method.sync_level;
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun } else {
398*4882a593Smuzhiyun obj_desc->method.mutex->mutex.
399*4882a593Smuzhiyun original_sync_level =
400*4882a593Smuzhiyun obj_desc->method.mutex->mutex.sync_level;
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun obj_desc->method.mutex->mutex.thread_id =
403*4882a593Smuzhiyun acpi_os_get_thread_id();
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun /* Always increase acquisition depth */
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun obj_desc->method.mutex->mutex.acquisition_depth++;
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun /*
413*4882a593Smuzhiyun * Allocate an Owner ID for this method, only if this is the first thread
414*4882a593Smuzhiyun * to begin concurrent execution. We only need one owner_id, even if the
415*4882a593Smuzhiyun * method is invoked recursively.
416*4882a593Smuzhiyun */
417*4882a593Smuzhiyun if (!obj_desc->method.owner_id) {
418*4882a593Smuzhiyun status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id);
419*4882a593Smuzhiyun if (ACPI_FAILURE(status)) {
420*4882a593Smuzhiyun goto cleanup;
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun /*
425*4882a593Smuzhiyun * Increment the method parse tree thread count since it has been
426*4882a593Smuzhiyun * reentered one more time (even if it is the same thread)
427*4882a593Smuzhiyun */
428*4882a593Smuzhiyun obj_desc->method.thread_count++;
429*4882a593Smuzhiyun acpi_method_count++;
430*4882a593Smuzhiyun return_ACPI_STATUS(status);
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun cleanup:
433*4882a593Smuzhiyun /* On error, must release the method mutex (if present) */
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun if (obj_desc->method.mutex) {
436*4882a593Smuzhiyun acpi_os_release_mutex(obj_desc->method.mutex->mutex.os_mutex);
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun return_ACPI_STATUS(status);
439*4882a593Smuzhiyun }
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun /*******************************************************************************
442*4882a593Smuzhiyun *
443*4882a593Smuzhiyun * FUNCTION: acpi_ds_call_control_method
444*4882a593Smuzhiyun *
445*4882a593Smuzhiyun * PARAMETERS: thread - Info for this thread
446*4882a593Smuzhiyun * this_walk_state - Current walk state
447*4882a593Smuzhiyun * op - Current Op to be walked
448*4882a593Smuzhiyun *
449*4882a593Smuzhiyun * RETURN: Status
450*4882a593Smuzhiyun *
451*4882a593Smuzhiyun * DESCRIPTION: Transfer execution to a called control method
452*4882a593Smuzhiyun *
453*4882a593Smuzhiyun ******************************************************************************/
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun acpi_status
acpi_ds_call_control_method(struct acpi_thread_state * thread,struct acpi_walk_state * this_walk_state,union acpi_parse_object * op)456*4882a593Smuzhiyun acpi_ds_call_control_method(struct acpi_thread_state *thread,
457*4882a593Smuzhiyun struct acpi_walk_state *this_walk_state,
458*4882a593Smuzhiyun union acpi_parse_object *op)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun acpi_status status;
461*4882a593Smuzhiyun struct acpi_namespace_node *method_node;
462*4882a593Smuzhiyun struct acpi_walk_state *next_walk_state = NULL;
463*4882a593Smuzhiyun union acpi_operand_object *obj_desc;
464*4882a593Smuzhiyun struct acpi_evaluate_info *info;
465*4882a593Smuzhiyun u32 i;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state);
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
470*4882a593Smuzhiyun "Calling method %p, currentstate=%p\n",
471*4882a593Smuzhiyun this_walk_state->prev_op, this_walk_state));
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun /*
474*4882a593Smuzhiyun * Get the namespace entry for the control method we are about to call
475*4882a593Smuzhiyun */
476*4882a593Smuzhiyun method_node = this_walk_state->method_call_node;
477*4882a593Smuzhiyun if (!method_node) {
478*4882a593Smuzhiyun return_ACPI_STATUS(AE_NULL_ENTRY);
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun obj_desc = acpi_ns_get_attached_object(method_node);
482*4882a593Smuzhiyun if (!obj_desc) {
483*4882a593Smuzhiyun return_ACPI_STATUS(AE_NULL_OBJECT);
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun /* Init for new method, possibly wait on method mutex */
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun status =
489*4882a593Smuzhiyun acpi_ds_begin_method_execution(method_node, obj_desc,
490*4882a593Smuzhiyun this_walk_state);
491*4882a593Smuzhiyun if (ACPI_FAILURE(status)) {
492*4882a593Smuzhiyun return_ACPI_STATUS(status);
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun /* Begin method parse/execution. Create a new walk state */
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun next_walk_state =
498*4882a593Smuzhiyun acpi_ds_create_walk_state(obj_desc->method.owner_id, NULL, obj_desc,
499*4882a593Smuzhiyun thread);
500*4882a593Smuzhiyun if (!next_walk_state) {
501*4882a593Smuzhiyun status = AE_NO_MEMORY;
502*4882a593Smuzhiyun goto cleanup;
503*4882a593Smuzhiyun }
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun /*
506*4882a593Smuzhiyun * The resolved arguments were put on the previous walk state's operand
507*4882a593Smuzhiyun * stack. Operands on the previous walk state stack always
508*4882a593Smuzhiyun * start at index 0. Also, null terminate the list of arguments
509*4882a593Smuzhiyun */
510*4882a593Smuzhiyun this_walk_state->operands[this_walk_state->num_operands] = NULL;
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun /*
513*4882a593Smuzhiyun * Allocate and initialize the evaluation information block
514*4882a593Smuzhiyun * TBD: this is somewhat inefficient, should change interface to
515*4882a593Smuzhiyun * ds_init_aml_walk. For now, keeps this struct off the CPU stack
516*4882a593Smuzhiyun */
517*4882a593Smuzhiyun info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info));
518*4882a593Smuzhiyun if (!info) {
519*4882a593Smuzhiyun status = AE_NO_MEMORY;
520*4882a593Smuzhiyun goto cleanup;
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun info->parameters = &this_walk_state->operands[0];
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node,
526*4882a593Smuzhiyun obj_desc->method.aml_start,
527*4882a593Smuzhiyun obj_desc->method.aml_length, info,
528*4882a593Smuzhiyun ACPI_IMODE_EXECUTE);
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun ACPI_FREE(info);
531*4882a593Smuzhiyun if (ACPI_FAILURE(status)) {
532*4882a593Smuzhiyun goto cleanup;
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun next_walk_state->method_nesting_depth =
536*4882a593Smuzhiyun this_walk_state->method_nesting_depth + 1;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun /*
539*4882a593Smuzhiyun * Delete the operands on the previous walkstate operand stack
540*4882a593Smuzhiyun * (they were copied to new objects)
541*4882a593Smuzhiyun */
542*4882a593Smuzhiyun for (i = 0; i < obj_desc->method.param_count; i++) {
543*4882a593Smuzhiyun acpi_ut_remove_reference(this_walk_state->operands[i]);
544*4882a593Smuzhiyun this_walk_state->operands[i] = NULL;
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun /* Clear the operand stack */
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun this_walk_state->num_operands = 0;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
552*4882a593Smuzhiyun "**** Begin nested execution of [%4.4s] **** WalkState=%p\n",
553*4882a593Smuzhiyun method_node->name.ascii, next_walk_state));
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun this_walk_state->method_pathname =
556*4882a593Smuzhiyun acpi_ns_get_normalized_pathname(method_node, TRUE);
557*4882a593Smuzhiyun this_walk_state->method_is_nested = TRUE;
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun /* Optional object evaluation log */
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun ACPI_DEBUG_PRINT_RAW((ACPI_DB_EVALUATION,
562*4882a593Smuzhiyun "%-26s: %*s%s\n", " Nested method call",
563*4882a593Smuzhiyun next_walk_state->method_nesting_depth * 3, " ",
564*4882a593Smuzhiyun &this_walk_state->method_pathname[1]));
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun /* Invoke an internal method if necessary */
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun if (obj_desc->method.info_flags & ACPI_METHOD_INTERNAL_ONLY) {
569*4882a593Smuzhiyun status =
570*4882a593Smuzhiyun obj_desc->method.dispatch.implementation(next_walk_state);
571*4882a593Smuzhiyun if (status == AE_OK) {
572*4882a593Smuzhiyun status = AE_CTRL_TERMINATE;
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun return_ACPI_STATUS(status);
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun cleanup:
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun /* On error, we must terminate the method properly */
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun acpi_ds_terminate_control_method(obj_desc, next_walk_state);
583*4882a593Smuzhiyun acpi_ds_delete_walk_state(next_walk_state);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun return_ACPI_STATUS(status);
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun /*******************************************************************************
589*4882a593Smuzhiyun *
590*4882a593Smuzhiyun * FUNCTION: acpi_ds_restart_control_method
591*4882a593Smuzhiyun *
592*4882a593Smuzhiyun * PARAMETERS: walk_state - State for preempted method (caller)
593*4882a593Smuzhiyun * return_desc - Return value from the called method
594*4882a593Smuzhiyun *
595*4882a593Smuzhiyun * RETURN: Status
596*4882a593Smuzhiyun *
597*4882a593Smuzhiyun * DESCRIPTION: Restart a method that was preempted by another (nested) method
598*4882a593Smuzhiyun * invocation. Handle the return value (if any) from the callee.
599*4882a593Smuzhiyun *
600*4882a593Smuzhiyun ******************************************************************************/
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun acpi_status
acpi_ds_restart_control_method(struct acpi_walk_state * walk_state,union acpi_operand_object * return_desc)603*4882a593Smuzhiyun acpi_ds_restart_control_method(struct acpi_walk_state *walk_state,
604*4882a593Smuzhiyun union acpi_operand_object *return_desc)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun acpi_status status;
607*4882a593Smuzhiyun int same_as_implicit_return;
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun ACPI_FUNCTION_TRACE_PTR(ds_restart_control_method, walk_state);
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
612*4882a593Smuzhiyun "****Restart [%4.4s] Op %p ReturnValueFromCallee %p\n",
613*4882a593Smuzhiyun acpi_ut_get_node_name(walk_state->method_node),
614*4882a593Smuzhiyun walk_state->method_call_op, return_desc));
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
617*4882a593Smuzhiyun " ReturnFromThisMethodUsed?=%X ResStack %p Walk %p\n",
618*4882a593Smuzhiyun walk_state->return_used,
619*4882a593Smuzhiyun walk_state->results, walk_state));
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun /* Did the called method return a value? */
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun if (return_desc) {
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun /* Is the implicit return object the same as the return desc? */
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun same_as_implicit_return =
628*4882a593Smuzhiyun (walk_state->implicit_return_obj == return_desc);
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun /* Are we actually going to use the return value? */
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun if (walk_state->return_used) {
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun /* Save the return value from the previous method */
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun status = acpi_ds_result_push(return_desc, walk_state);
637*4882a593Smuzhiyun if (ACPI_FAILURE(status)) {
638*4882a593Smuzhiyun acpi_ut_remove_reference(return_desc);
639*4882a593Smuzhiyun return_ACPI_STATUS(status);
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun /*
643*4882a593Smuzhiyun * Save as THIS method's return value in case it is returned
644*4882a593Smuzhiyun * immediately to yet another method
645*4882a593Smuzhiyun */
646*4882a593Smuzhiyun walk_state->return_desc = return_desc;
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun /*
650*4882a593Smuzhiyun * The following code is the optional support for the so-called
651*4882a593Smuzhiyun * "implicit return". Some AML code assumes that the last value of the
652*4882a593Smuzhiyun * method is "implicitly" returned to the caller, in the absence of an
653*4882a593Smuzhiyun * explicit return value.
654*4882a593Smuzhiyun *
655*4882a593Smuzhiyun * Just save the last result of the method as the return value.
656*4882a593Smuzhiyun *
657*4882a593Smuzhiyun * NOTE: this is optional because the ASL language does not actually
658*4882a593Smuzhiyun * support this behavior.
659*4882a593Smuzhiyun */
660*4882a593Smuzhiyun else if (!acpi_ds_do_implicit_return
661*4882a593Smuzhiyun (return_desc, walk_state, FALSE)
662*4882a593Smuzhiyun || same_as_implicit_return) {
663*4882a593Smuzhiyun /*
664*4882a593Smuzhiyun * Delete the return value if it will not be used by the
665*4882a593Smuzhiyun * calling method or remove one reference if the explicit return
666*4882a593Smuzhiyun * is the same as the implicit return value.
667*4882a593Smuzhiyun */
668*4882a593Smuzhiyun acpi_ut_remove_reference(return_desc);
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun return_ACPI_STATUS(AE_OK);
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun /*******************************************************************************
676*4882a593Smuzhiyun *
677*4882a593Smuzhiyun * FUNCTION: acpi_ds_terminate_control_method
678*4882a593Smuzhiyun *
679*4882a593Smuzhiyun * PARAMETERS: method_desc - Method object
680*4882a593Smuzhiyun * walk_state - State associated with the method
681*4882a593Smuzhiyun *
682*4882a593Smuzhiyun * RETURN: None
683*4882a593Smuzhiyun *
684*4882a593Smuzhiyun * DESCRIPTION: Terminate a control method. Delete everything that the method
685*4882a593Smuzhiyun * created, delete all locals and arguments, and delete the parse
686*4882a593Smuzhiyun * tree if requested.
687*4882a593Smuzhiyun *
688*4882a593Smuzhiyun * MUTEX: Interpreter is locked
689*4882a593Smuzhiyun *
690*4882a593Smuzhiyun ******************************************************************************/
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun void
acpi_ds_terminate_control_method(union acpi_operand_object * method_desc,struct acpi_walk_state * walk_state)693*4882a593Smuzhiyun acpi_ds_terminate_control_method(union acpi_operand_object *method_desc,
694*4882a593Smuzhiyun struct acpi_walk_state *walk_state)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun ACPI_FUNCTION_TRACE_PTR(ds_terminate_control_method, walk_state);
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun /* method_desc is required, walk_state is optional */
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun if (!method_desc) {
702*4882a593Smuzhiyun return_VOID;
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun
705*4882a593Smuzhiyun if (walk_state) {
706*4882a593Smuzhiyun
707*4882a593Smuzhiyun /* Delete all arguments and locals */
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun acpi_ds_method_data_delete_all(walk_state);
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun /*
712*4882a593Smuzhiyun * Delete any namespace objects created anywhere within the
713*4882a593Smuzhiyun * namespace by the execution of this method. Unless:
714*4882a593Smuzhiyun * 1) This method is a module-level executable code method, in which
715*4882a593Smuzhiyun * case we want make the objects permanent.
716*4882a593Smuzhiyun * 2) There are other threads executing the method, in which case we
717*4882a593Smuzhiyun * will wait until the last thread has completed.
718*4882a593Smuzhiyun */
719*4882a593Smuzhiyun if (!(method_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL)
720*4882a593Smuzhiyun && (method_desc->method.thread_count == 1)) {
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun /* Delete any direct children of (created by) this method */
723*4882a593Smuzhiyun
724*4882a593Smuzhiyun (void)acpi_ex_exit_interpreter();
725*4882a593Smuzhiyun acpi_ns_delete_namespace_subtree(walk_state->
726*4882a593Smuzhiyun method_node);
727*4882a593Smuzhiyun (void)acpi_ex_enter_interpreter();
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun /*
730*4882a593Smuzhiyun * Delete any objects that were created by this method
731*4882a593Smuzhiyun * elsewhere in the namespace (if any were created).
732*4882a593Smuzhiyun * Use of the ACPI_METHOD_MODIFIED_NAMESPACE optimizes the
733*4882a593Smuzhiyun * deletion such that we don't have to perform an entire
734*4882a593Smuzhiyun * namespace walk for every control method execution.
735*4882a593Smuzhiyun */
736*4882a593Smuzhiyun if (method_desc->method.
737*4882a593Smuzhiyun info_flags & ACPI_METHOD_MODIFIED_NAMESPACE) {
738*4882a593Smuzhiyun (void)acpi_ex_exit_interpreter();
739*4882a593Smuzhiyun acpi_ns_delete_namespace_by_owner(method_desc->
740*4882a593Smuzhiyun method.
741*4882a593Smuzhiyun owner_id);
742*4882a593Smuzhiyun (void)acpi_ex_enter_interpreter();
743*4882a593Smuzhiyun method_desc->method.info_flags &=
744*4882a593Smuzhiyun ~ACPI_METHOD_MODIFIED_NAMESPACE;
745*4882a593Smuzhiyun }
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun /*
749*4882a593Smuzhiyun * If method is serialized, release the mutex and restore the
750*4882a593Smuzhiyun * current sync level for this thread
751*4882a593Smuzhiyun */
752*4882a593Smuzhiyun if (method_desc->method.mutex) {
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun /* Acquisition Depth handles recursive calls */
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun method_desc->method.mutex->mutex.acquisition_depth--;
757*4882a593Smuzhiyun if (!method_desc->method.mutex->mutex.acquisition_depth) {
758*4882a593Smuzhiyun walk_state->thread->current_sync_level =
759*4882a593Smuzhiyun method_desc->method.mutex->mutex.
760*4882a593Smuzhiyun original_sync_level;
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun acpi_os_release_mutex(method_desc->method.
763*4882a593Smuzhiyun mutex->mutex.os_mutex);
764*4882a593Smuzhiyun method_desc->method.mutex->mutex.thread_id = 0;
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun }
767*4882a593Smuzhiyun }
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun /* Decrement the thread count on the method */
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun if (method_desc->method.thread_count) {
772*4882a593Smuzhiyun method_desc->method.thread_count--;
773*4882a593Smuzhiyun } else {
774*4882a593Smuzhiyun ACPI_ERROR((AE_INFO, "Invalid zero thread count in method"));
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun /* Are there any other threads currently executing this method? */
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun if (method_desc->method.thread_count) {
780*4882a593Smuzhiyun /*
781*4882a593Smuzhiyun * Additional threads. Do not release the owner_id in this case,
782*4882a593Smuzhiyun * we immediately reuse it for the next thread executing this method
783*4882a593Smuzhiyun */
784*4882a593Smuzhiyun ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH,
785*4882a593Smuzhiyun "*** Completed execution of one thread, %u threads remaining\n",
786*4882a593Smuzhiyun method_desc->method.thread_count));
787*4882a593Smuzhiyun } else {
788*4882a593Smuzhiyun /* This is the only executing thread for this method */
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun /*
791*4882a593Smuzhiyun * Support to dynamically change a method from not_serialized to
792*4882a593Smuzhiyun * Serialized if it appears that the method is incorrectly written and
793*4882a593Smuzhiyun * does not support multiple thread execution. The best example of this
794*4882a593Smuzhiyun * is if such a method creates namespace objects and blocks. A second
795*4882a593Smuzhiyun * thread will fail with an AE_ALREADY_EXISTS exception.
796*4882a593Smuzhiyun *
797*4882a593Smuzhiyun * This code is here because we must wait until the last thread exits
798*4882a593Smuzhiyun * before marking the method as serialized.
799*4882a593Smuzhiyun */
800*4882a593Smuzhiyun if (method_desc->method.
801*4882a593Smuzhiyun info_flags & ACPI_METHOD_SERIALIZED_PENDING) {
802*4882a593Smuzhiyun if (walk_state) {
803*4882a593Smuzhiyun ACPI_INFO(("Marking method %4.4s as Serialized "
804*4882a593Smuzhiyun "because of AE_ALREADY_EXISTS error",
805*4882a593Smuzhiyun walk_state->method_node->name.
806*4882a593Smuzhiyun ascii));
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun /*
810*4882a593Smuzhiyun * Method tried to create an object twice and was marked as
811*4882a593Smuzhiyun * "pending serialized". The probable cause is that the method
812*4882a593Smuzhiyun * cannot handle reentrancy.
813*4882a593Smuzhiyun *
814*4882a593Smuzhiyun * The method was created as not_serialized, but it tried to create
815*4882a593Smuzhiyun * a named object and then blocked, causing the second thread
816*4882a593Smuzhiyun * entrance to begin and then fail. Workaround this problem by
817*4882a593Smuzhiyun * marking the method permanently as Serialized when the last
818*4882a593Smuzhiyun * thread exits here.
819*4882a593Smuzhiyun */
820*4882a593Smuzhiyun method_desc->method.info_flags &=
821*4882a593Smuzhiyun ~ACPI_METHOD_SERIALIZED_PENDING;
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun method_desc->method.info_flags |=
824*4882a593Smuzhiyun (ACPI_METHOD_SERIALIZED |
825*4882a593Smuzhiyun ACPI_METHOD_IGNORE_SYNC_LEVEL);
826*4882a593Smuzhiyun method_desc->method.sync_level = 0;
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun /* No more threads, we can free the owner_id */
830*4882a593Smuzhiyun
831*4882a593Smuzhiyun if (!
832*4882a593Smuzhiyun (method_desc->method.
833*4882a593Smuzhiyun info_flags & ACPI_METHOD_MODULE_LEVEL)) {
834*4882a593Smuzhiyun acpi_ut_release_owner_id(&method_desc->method.owner_id);
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun acpi_ex_stop_trace_method((struct acpi_namespace_node *)method_desc->
839*4882a593Smuzhiyun method.node, method_desc, walk_state);
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun return_VOID;
842*4882a593Smuzhiyun }
843