Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 1 | // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0 |
| 2 | /****************************************************************************** |
| 3 | * |
| 4 | * Module Name: dsmethod - Parser/Interpreter interface - control method parsing |
| 5 | * |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 6 | * Copyright (C) 2000 - 2019, Intel Corp. |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 7 | * |
| 8 | *****************************************************************************/ |
| 9 | |
| 10 | #include <acpi/acpi.h> |
| 11 | #include "accommon.h" |
| 12 | #include "acdispat.h" |
| 13 | #include "acinterp.h" |
| 14 | #include "acnamesp.h" |
| 15 | #include "acparser.h" |
| 16 | #include "amlcode.h" |
| 17 | #include "acdebug.h" |
| 18 | |
| 19 | #define _COMPONENT ACPI_DISPATCHER |
| 20 | ACPI_MODULE_NAME("dsmethod") |
| 21 | |
| 22 | /* Local prototypes */ |
| 23 | static acpi_status |
| 24 | acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state, |
| 25 | union acpi_parse_object **out_op); |
| 26 | |
| 27 | static acpi_status |
| 28 | acpi_ds_create_method_mutex(union acpi_operand_object *method_desc); |
| 29 | |
| 30 | /******************************************************************************* |
| 31 | * |
| 32 | * FUNCTION: acpi_ds_auto_serialize_method |
| 33 | * |
| 34 | * PARAMETERS: node - Namespace Node of the method |
| 35 | * obj_desc - Method object attached to node |
| 36 | * |
| 37 | * RETURN: Status |
| 38 | * |
| 39 | * DESCRIPTION: Parse a control method AML to scan for control methods that |
| 40 | * need serialization due to the creation of named objects. |
| 41 | * |
| 42 | * NOTE: It is a bit of overkill to mark all such methods serialized, since |
| 43 | * there is only a problem if the method actually blocks during execution. |
| 44 | * A blocking operation is, for example, a Sleep() operation, or any access |
| 45 | * to an operation region. However, it is probably not possible to easily |
| 46 | * detect whether a method will block or not, so we simply mark all suspicious |
| 47 | * methods as serialized. |
| 48 | * |
| 49 | * NOTE2: This code is essentially a generic routine for parsing a single |
| 50 | * control method. |
| 51 | * |
| 52 | ******************************************************************************/ |
| 53 | |
| 54 | acpi_status |
| 55 | acpi_ds_auto_serialize_method(struct acpi_namespace_node *node, |
| 56 | union acpi_operand_object *obj_desc) |
| 57 | { |
| 58 | acpi_status status; |
| 59 | union acpi_parse_object *op = NULL; |
| 60 | struct acpi_walk_state *walk_state; |
| 61 | |
| 62 | ACPI_FUNCTION_TRACE_PTR(ds_auto_serialize_method, node); |
| 63 | |
| 64 | ACPI_DEBUG_PRINT((ACPI_DB_PARSE, |
| 65 | "Method auto-serialization parse [%4.4s] %p\n", |
| 66 | acpi_ut_get_node_name(node), node)); |
| 67 | |
| 68 | /* Create/Init a root op for the method parse tree */ |
| 69 | |
| 70 | op = acpi_ps_alloc_op(AML_METHOD_OP, obj_desc->method.aml_start); |
| 71 | if (!op) { |
| 72 | return_ACPI_STATUS(AE_NO_MEMORY); |
| 73 | } |
| 74 | |
| 75 | acpi_ps_set_name(op, node->name.integer); |
| 76 | op->common.node = node; |
| 77 | |
| 78 | /* Create and initialize a new walk state */ |
| 79 | |
| 80 | walk_state = |
| 81 | acpi_ds_create_walk_state(node->owner_id, NULL, NULL, NULL); |
| 82 | if (!walk_state) { |
| 83 | acpi_ps_free_op(op); |
| 84 | return_ACPI_STATUS(AE_NO_MEMORY); |
| 85 | } |
| 86 | |
| 87 | status = acpi_ds_init_aml_walk(walk_state, op, node, |
| 88 | obj_desc->method.aml_start, |
| 89 | obj_desc->method.aml_length, NULL, 0); |
| 90 | if (ACPI_FAILURE(status)) { |
| 91 | acpi_ds_delete_walk_state(walk_state); |
| 92 | acpi_ps_free_op(op); |
| 93 | return_ACPI_STATUS(status); |
| 94 | } |
| 95 | |
| 96 | walk_state->descending_callback = acpi_ds_detect_named_opcodes; |
| 97 | |
| 98 | /* Parse the method, scan for creation of named objects */ |
| 99 | |
| 100 | status = acpi_ps_parse_aml(walk_state); |
| 101 | |
| 102 | acpi_ps_delete_parse_tree(op); |
| 103 | return_ACPI_STATUS(status); |
| 104 | } |
| 105 | |
| 106 | /******************************************************************************* |
| 107 | * |
| 108 | * FUNCTION: acpi_ds_detect_named_opcodes |
| 109 | * |
| 110 | * PARAMETERS: walk_state - Current state of the parse tree walk |
| 111 | * out_op - Unused, required for parser interface |
| 112 | * |
| 113 | * RETURN: Status |
| 114 | * |
| 115 | * DESCRIPTION: Descending callback used during the loading of ACPI tables. |
| 116 | * Currently used to detect methods that must be marked serialized |
| 117 | * in order to avoid problems with the creation of named objects. |
| 118 | * |
| 119 | ******************************************************************************/ |
| 120 | |
| 121 | static acpi_status |
| 122 | acpi_ds_detect_named_opcodes(struct acpi_walk_state *walk_state, |
| 123 | union acpi_parse_object **out_op) |
| 124 | { |
| 125 | |
| 126 | ACPI_FUNCTION_NAME(acpi_ds_detect_named_opcodes); |
| 127 | |
| 128 | /* We are only interested in opcodes that create a new name */ |
| 129 | |
| 130 | if (! |
| 131 | (walk_state->op_info-> |
| 132 | flags & (AML_NAMED | AML_CREATE | AML_FIELD))) { |
| 133 | return (AE_OK); |
| 134 | } |
| 135 | |
| 136 | /* |
| 137 | * At this point, we know we have a Named object opcode. |
| 138 | * Mark the method as serialized. Later code will create a mutex for |
| 139 | * this method to enforce serialization. |
| 140 | * |
| 141 | * Note, ACPI_METHOD_IGNORE_SYNC_LEVEL flag means that we will ignore the |
| 142 | * Sync Level mechanism for this method, even though it is now serialized. |
| 143 | * Otherwise, there can be conflicts with existing ASL code that actually |
| 144 | * uses sync levels. |
| 145 | */ |
| 146 | walk_state->method_desc->method.sync_level = 0; |
| 147 | walk_state->method_desc->method.info_flags |= |
| 148 | (ACPI_METHOD_SERIALIZED | ACPI_METHOD_IGNORE_SYNC_LEVEL); |
| 149 | |
| 150 | ACPI_DEBUG_PRINT((ACPI_DB_INFO, |
| 151 | "Method serialized [%4.4s] %p - [%s] (%4.4X)\n", |
| 152 | walk_state->method_node->name.ascii, |
| 153 | walk_state->method_node, walk_state->op_info->name, |
| 154 | walk_state->opcode)); |
| 155 | |
| 156 | /* Abort the parse, no need to examine this method any further */ |
| 157 | |
| 158 | return (AE_CTRL_TERMINATE); |
| 159 | } |
| 160 | |
| 161 | /******************************************************************************* |
| 162 | * |
| 163 | * FUNCTION: acpi_ds_method_error |
| 164 | * |
| 165 | * PARAMETERS: status - Execution status |
| 166 | * walk_state - Current state |
| 167 | * |
| 168 | * RETURN: Status |
| 169 | * |
| 170 | * DESCRIPTION: Called on method error. Invoke the global exception handler if |
| 171 | * present, dump the method data if the debugger is configured |
| 172 | * |
| 173 | * Note: Allows the exception handler to change the status code |
| 174 | * |
| 175 | ******************************************************************************/ |
| 176 | |
| 177 | acpi_status |
| 178 | acpi_ds_method_error(acpi_status status, struct acpi_walk_state *walk_state) |
| 179 | { |
| 180 | u32 aml_offset; |
| 181 | acpi_name name = 0; |
| 182 | |
| 183 | ACPI_FUNCTION_ENTRY(); |
| 184 | |
| 185 | /* Ignore AE_OK and control exception codes */ |
| 186 | |
| 187 | if (ACPI_SUCCESS(status) || (status & AE_CODE_CONTROL)) { |
| 188 | return (status); |
| 189 | } |
| 190 | |
| 191 | /* Invoke the global exception handler */ |
| 192 | |
| 193 | if (acpi_gbl_exception_handler) { |
| 194 | |
| 195 | /* Exit the interpreter, allow handler to execute methods */ |
| 196 | |
| 197 | acpi_ex_exit_interpreter(); |
| 198 | |
| 199 | /* |
| 200 | * Handler can map the exception code to anything it wants, including |
| 201 | * AE_OK, in which case the executing method will not be aborted. |
| 202 | */ |
| 203 | aml_offset = (u32)ACPI_PTR_DIFF(walk_state->aml, |
| 204 | walk_state->parser_state. |
| 205 | aml_start); |
| 206 | |
| 207 | if (walk_state->method_node) { |
| 208 | name = walk_state->method_node->name.integer; |
| 209 | } else if (walk_state->deferred_node) { |
| 210 | name = walk_state->deferred_node->name.integer; |
| 211 | } |
| 212 | |
| 213 | status = acpi_gbl_exception_handler(status, name, |
| 214 | walk_state->opcode, |
| 215 | aml_offset, NULL); |
| 216 | acpi_ex_enter_interpreter(); |
| 217 | } |
| 218 | |
| 219 | acpi_ds_clear_implicit_return(walk_state); |
| 220 | |
| 221 | if (ACPI_FAILURE(status)) { |
| 222 | acpi_ds_dump_method_stack(status, walk_state, walk_state->op); |
| 223 | |
| 224 | /* Display method locals/args if debugger is present */ |
| 225 | |
| 226 | #ifdef ACPI_DEBUGGER |
| 227 | acpi_db_dump_method_info(status, walk_state); |
| 228 | #endif |
| 229 | } |
| 230 | |
| 231 | return (status); |
| 232 | } |
| 233 | |
| 234 | /******************************************************************************* |
| 235 | * |
| 236 | * FUNCTION: acpi_ds_create_method_mutex |
| 237 | * |
| 238 | * PARAMETERS: obj_desc - The method object |
| 239 | * |
| 240 | * RETURN: Status |
| 241 | * |
| 242 | * DESCRIPTION: Create a mutex object for a serialized control method |
| 243 | * |
| 244 | ******************************************************************************/ |
| 245 | |
| 246 | static acpi_status |
| 247 | acpi_ds_create_method_mutex(union acpi_operand_object *method_desc) |
| 248 | { |
| 249 | union acpi_operand_object *mutex_desc; |
| 250 | acpi_status status; |
| 251 | |
| 252 | ACPI_FUNCTION_TRACE(ds_create_method_mutex); |
| 253 | |
| 254 | /* Create the new mutex object */ |
| 255 | |
| 256 | mutex_desc = acpi_ut_create_internal_object(ACPI_TYPE_MUTEX); |
| 257 | if (!mutex_desc) { |
| 258 | return_ACPI_STATUS(AE_NO_MEMORY); |
| 259 | } |
| 260 | |
| 261 | /* Create the actual OS Mutex */ |
| 262 | |
| 263 | status = acpi_os_create_mutex(&mutex_desc->mutex.os_mutex); |
| 264 | if (ACPI_FAILURE(status)) { |
| 265 | acpi_ut_delete_object_desc(mutex_desc); |
| 266 | return_ACPI_STATUS(status); |
| 267 | } |
| 268 | |
| 269 | mutex_desc->mutex.sync_level = method_desc->method.sync_level; |
| 270 | method_desc->method.mutex = mutex_desc; |
| 271 | return_ACPI_STATUS(AE_OK); |
| 272 | } |
| 273 | |
| 274 | /******************************************************************************* |
| 275 | * |
| 276 | * FUNCTION: acpi_ds_begin_method_execution |
| 277 | * |
| 278 | * PARAMETERS: method_node - Node of the method |
| 279 | * obj_desc - The method object |
| 280 | * walk_state - current state, NULL if not yet executing |
| 281 | * a method. |
| 282 | * |
| 283 | * RETURN: Status |
| 284 | * |
| 285 | * DESCRIPTION: Prepare a method for execution. Parses the method if necessary, |
| 286 | * increments the thread count, and waits at the method semaphore |
| 287 | * for clearance to execute. |
| 288 | * |
| 289 | ******************************************************************************/ |
| 290 | |
| 291 | acpi_status |
| 292 | acpi_ds_begin_method_execution(struct acpi_namespace_node *method_node, |
| 293 | union acpi_operand_object *obj_desc, |
| 294 | struct acpi_walk_state *walk_state) |
| 295 | { |
| 296 | acpi_status status = AE_OK; |
| 297 | |
| 298 | ACPI_FUNCTION_TRACE_PTR(ds_begin_method_execution, method_node); |
| 299 | |
| 300 | if (!method_node) { |
| 301 | return_ACPI_STATUS(AE_NULL_ENTRY); |
| 302 | } |
| 303 | |
| 304 | acpi_ex_start_trace_method(method_node, obj_desc, walk_state); |
| 305 | |
| 306 | /* Prevent wraparound of thread count */ |
| 307 | |
| 308 | if (obj_desc->method.thread_count == ACPI_UINT8_MAX) { |
| 309 | ACPI_ERROR((AE_INFO, |
| 310 | "Method reached maximum reentrancy limit (255)")); |
| 311 | return_ACPI_STATUS(AE_AML_METHOD_LIMIT); |
| 312 | } |
| 313 | |
| 314 | /* |
| 315 | * If this method is serialized, we need to acquire the method mutex. |
| 316 | */ |
| 317 | if (obj_desc->method.info_flags & ACPI_METHOD_SERIALIZED) { |
| 318 | /* |
| 319 | * Create a mutex for the method if it is defined to be Serialized |
| 320 | * and a mutex has not already been created. We defer the mutex creation |
| 321 | * until a method is actually executed, to minimize the object count |
| 322 | */ |
| 323 | if (!obj_desc->method.mutex) { |
| 324 | status = acpi_ds_create_method_mutex(obj_desc); |
| 325 | if (ACPI_FAILURE(status)) { |
| 326 | return_ACPI_STATUS(status); |
| 327 | } |
| 328 | } |
| 329 | |
| 330 | /* |
| 331 | * The current_sync_level (per-thread) must be less than or equal to |
| 332 | * the sync level of the method. This mechanism provides some |
| 333 | * deadlock prevention. |
| 334 | * |
| 335 | * If the method was auto-serialized, we just ignore the sync level |
| 336 | * mechanism, because auto-serialization of methods can interfere |
| 337 | * with ASL code that actually uses sync levels. |
| 338 | * |
| 339 | * Top-level method invocation has no walk state at this point |
| 340 | */ |
| 341 | if (walk_state && |
| 342 | (!(obj_desc->method. |
| 343 | info_flags & ACPI_METHOD_IGNORE_SYNC_LEVEL)) |
| 344 | && (walk_state->thread->current_sync_level > |
| 345 | obj_desc->method.mutex->mutex.sync_level)) { |
| 346 | ACPI_ERROR((AE_INFO, |
| 347 | "Cannot acquire Mutex for method [%4.4s]" |
| 348 | ", current SyncLevel is too large (%u)", |
| 349 | acpi_ut_get_node_name(method_node), |
| 350 | walk_state->thread->current_sync_level)); |
| 351 | |
| 352 | return_ACPI_STATUS(AE_AML_MUTEX_ORDER); |
| 353 | } |
| 354 | |
| 355 | /* |
| 356 | * Obtain the method mutex if necessary. Do not acquire mutex for a |
| 357 | * recursive call. |
| 358 | */ |
| 359 | if (!walk_state || |
| 360 | !obj_desc->method.mutex->mutex.thread_id || |
| 361 | (walk_state->thread->thread_id != |
| 362 | obj_desc->method.mutex->mutex.thread_id)) { |
| 363 | /* |
| 364 | * Acquire the method mutex. This releases the interpreter if we |
| 365 | * block (and reacquires it before it returns) |
| 366 | */ |
| 367 | status = |
| 368 | acpi_ex_system_wait_mutex(obj_desc->method.mutex-> |
| 369 | mutex.os_mutex, |
| 370 | ACPI_WAIT_FOREVER); |
| 371 | if (ACPI_FAILURE(status)) { |
| 372 | return_ACPI_STATUS(status); |
| 373 | } |
| 374 | |
| 375 | /* Update the mutex and walk info and save the original sync_level */ |
| 376 | |
| 377 | if (walk_state) { |
| 378 | obj_desc->method.mutex->mutex. |
| 379 | original_sync_level = |
| 380 | walk_state->thread->current_sync_level; |
| 381 | |
| 382 | obj_desc->method.mutex->mutex.thread_id = |
| 383 | walk_state->thread->thread_id; |
| 384 | |
| 385 | /* |
| 386 | * Update the current sync_level only if this is not an auto- |
| 387 | * serialized method. In the auto case, we have to ignore |
| 388 | * the sync level for the method mutex (created for the |
| 389 | * auto-serialization) because we have no idea of what the |
| 390 | * sync level should be. Therefore, just ignore it. |
| 391 | */ |
| 392 | if (!(obj_desc->method.info_flags & |
| 393 | ACPI_METHOD_IGNORE_SYNC_LEVEL)) { |
| 394 | walk_state->thread->current_sync_level = |
| 395 | obj_desc->method.sync_level; |
| 396 | } |
| 397 | } else { |
| 398 | obj_desc->method.mutex->mutex. |
| 399 | original_sync_level = |
| 400 | obj_desc->method.mutex->mutex.sync_level; |
| 401 | |
| 402 | obj_desc->method.mutex->mutex.thread_id = |
| 403 | acpi_os_get_thread_id(); |
| 404 | } |
| 405 | } |
| 406 | |
| 407 | /* Always increase acquisition depth */ |
| 408 | |
| 409 | obj_desc->method.mutex->mutex.acquisition_depth++; |
| 410 | } |
| 411 | |
| 412 | /* |
| 413 | * Allocate an Owner ID for this method, only if this is the first thread |
| 414 | * to begin concurrent execution. We only need one owner_id, even if the |
| 415 | * method is invoked recursively. |
| 416 | */ |
| 417 | if (!obj_desc->method.owner_id) { |
| 418 | status = acpi_ut_allocate_owner_id(&obj_desc->method.owner_id); |
| 419 | if (ACPI_FAILURE(status)) { |
| 420 | goto cleanup; |
| 421 | } |
| 422 | } |
| 423 | |
| 424 | /* |
| 425 | * Increment the method parse tree thread count since it has been |
| 426 | * reentered one more time (even if it is the same thread) |
| 427 | */ |
| 428 | obj_desc->method.thread_count++; |
| 429 | acpi_method_count++; |
| 430 | return_ACPI_STATUS(status); |
| 431 | |
| 432 | cleanup: |
| 433 | /* On error, must release the method mutex (if present) */ |
| 434 | |
| 435 | if (obj_desc->method.mutex) { |
| 436 | acpi_os_release_mutex(obj_desc->method.mutex->mutex.os_mutex); |
| 437 | } |
| 438 | return_ACPI_STATUS(status); |
| 439 | } |
| 440 | |
| 441 | /******************************************************************************* |
| 442 | * |
| 443 | * FUNCTION: acpi_ds_call_control_method |
| 444 | * |
| 445 | * PARAMETERS: thread - Info for this thread |
| 446 | * this_walk_state - Current walk state |
| 447 | * op - Current Op to be walked |
| 448 | * |
| 449 | * RETURN: Status |
| 450 | * |
| 451 | * DESCRIPTION: Transfer execution to a called control method |
| 452 | * |
| 453 | ******************************************************************************/ |
| 454 | |
| 455 | acpi_status |
| 456 | acpi_ds_call_control_method(struct acpi_thread_state *thread, |
| 457 | struct acpi_walk_state *this_walk_state, |
| 458 | union acpi_parse_object *op) |
| 459 | { |
| 460 | acpi_status status; |
| 461 | struct acpi_namespace_node *method_node; |
| 462 | struct acpi_walk_state *next_walk_state = NULL; |
| 463 | union acpi_operand_object *obj_desc; |
| 464 | struct acpi_evaluate_info *info; |
| 465 | u32 i; |
| 466 | |
| 467 | ACPI_FUNCTION_TRACE_PTR(ds_call_control_method, this_walk_state); |
| 468 | |
| 469 | ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, |
| 470 | "Calling method %p, currentstate=%p\n", |
| 471 | this_walk_state->prev_op, this_walk_state)); |
| 472 | |
| 473 | /* |
| 474 | * Get the namespace entry for the control method we are about to call |
| 475 | */ |
| 476 | method_node = this_walk_state->method_call_node; |
| 477 | if (!method_node) { |
| 478 | return_ACPI_STATUS(AE_NULL_ENTRY); |
| 479 | } |
| 480 | |
| 481 | obj_desc = acpi_ns_get_attached_object(method_node); |
| 482 | if (!obj_desc) { |
| 483 | return_ACPI_STATUS(AE_NULL_OBJECT); |
| 484 | } |
| 485 | |
| 486 | /* Init for new method, possibly wait on method mutex */ |
| 487 | |
| 488 | status = |
| 489 | acpi_ds_begin_method_execution(method_node, obj_desc, |
| 490 | this_walk_state); |
| 491 | if (ACPI_FAILURE(status)) { |
| 492 | return_ACPI_STATUS(status); |
| 493 | } |
| 494 | |
| 495 | /* Begin method parse/execution. Create a new walk state */ |
| 496 | |
| 497 | next_walk_state = |
| 498 | acpi_ds_create_walk_state(obj_desc->method.owner_id, NULL, obj_desc, |
| 499 | thread); |
| 500 | if (!next_walk_state) { |
| 501 | status = AE_NO_MEMORY; |
| 502 | goto cleanup; |
| 503 | } |
| 504 | |
| 505 | /* |
| 506 | * The resolved arguments were put on the previous walk state's operand |
| 507 | * stack. Operands on the previous walk state stack always |
| 508 | * start at index 0. Also, null terminate the list of arguments |
| 509 | */ |
| 510 | this_walk_state->operands[this_walk_state->num_operands] = NULL; |
| 511 | |
| 512 | /* |
| 513 | * Allocate and initialize the evaluation information block |
| 514 | * TBD: this is somewhat inefficient, should change interface to |
| 515 | * ds_init_aml_walk. For now, keeps this struct off the CPU stack |
| 516 | */ |
| 517 | info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); |
| 518 | if (!info) { |
| 519 | status = AE_NO_MEMORY; |
| 520 | goto cleanup; |
| 521 | } |
| 522 | |
| 523 | info->parameters = &this_walk_state->operands[0]; |
| 524 | |
| 525 | status = acpi_ds_init_aml_walk(next_walk_state, NULL, method_node, |
| 526 | obj_desc->method.aml_start, |
| 527 | obj_desc->method.aml_length, info, |
| 528 | ACPI_IMODE_EXECUTE); |
| 529 | |
| 530 | ACPI_FREE(info); |
| 531 | if (ACPI_FAILURE(status)) { |
| 532 | goto cleanup; |
| 533 | } |
| 534 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 535 | next_walk_state->method_nesting_depth = |
| 536 | this_walk_state->method_nesting_depth + 1; |
| 537 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 538 | /* |
| 539 | * Delete the operands on the previous walkstate operand stack |
| 540 | * (they were copied to new objects) |
| 541 | */ |
| 542 | for (i = 0; i < obj_desc->method.param_count; i++) { |
| 543 | acpi_ut_remove_reference(this_walk_state->operands[i]); |
| 544 | this_walk_state->operands[i] = NULL; |
| 545 | } |
| 546 | |
| 547 | /* Clear the operand stack */ |
| 548 | |
| 549 | this_walk_state->num_operands = 0; |
| 550 | |
| 551 | ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, |
| 552 | "**** Begin nested execution of [%4.4s] **** WalkState=%p\n", |
| 553 | method_node->name.ascii, next_walk_state)); |
| 554 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 555 | this_walk_state->method_pathname = |
| 556 | acpi_ns_get_normalized_pathname(method_node, TRUE); |
| 557 | this_walk_state->method_is_nested = TRUE; |
| 558 | |
| 559 | /* Optional object evaluation log */ |
| 560 | |
| 561 | ACPI_DEBUG_PRINT_RAW((ACPI_DB_EVALUATION, |
| 562 | "%-26s: %*s%s\n", " Nested method call", |
| 563 | next_walk_state->method_nesting_depth * 3, " ", |
| 564 | &this_walk_state->method_pathname[1])); |
| 565 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 566 | /* Invoke an internal method if necessary */ |
| 567 | |
| 568 | if (obj_desc->method.info_flags & ACPI_METHOD_INTERNAL_ONLY) { |
| 569 | status = |
| 570 | obj_desc->method.dispatch.implementation(next_walk_state); |
| 571 | if (status == AE_OK) { |
| 572 | status = AE_CTRL_TERMINATE; |
| 573 | } |
| 574 | } |
| 575 | |
| 576 | return_ACPI_STATUS(status); |
| 577 | |
| 578 | cleanup: |
| 579 | |
| 580 | /* On error, we must terminate the method properly */ |
| 581 | |
| 582 | acpi_ds_terminate_control_method(obj_desc, next_walk_state); |
| 583 | acpi_ds_delete_walk_state(next_walk_state); |
| 584 | |
| 585 | return_ACPI_STATUS(status); |
| 586 | } |
| 587 | |
| 588 | /******************************************************************************* |
| 589 | * |
| 590 | * FUNCTION: acpi_ds_restart_control_method |
| 591 | * |
| 592 | * PARAMETERS: walk_state - State for preempted method (caller) |
| 593 | * return_desc - Return value from the called method |
| 594 | * |
| 595 | * RETURN: Status |
| 596 | * |
| 597 | * DESCRIPTION: Restart a method that was preempted by another (nested) method |
| 598 | * invocation. Handle the return value (if any) from the callee. |
| 599 | * |
| 600 | ******************************************************************************/ |
| 601 | |
| 602 | acpi_status |
| 603 | acpi_ds_restart_control_method(struct acpi_walk_state *walk_state, |
| 604 | union acpi_operand_object *return_desc) |
| 605 | { |
| 606 | acpi_status status; |
| 607 | int same_as_implicit_return; |
| 608 | |
| 609 | ACPI_FUNCTION_TRACE_PTR(ds_restart_control_method, walk_state); |
| 610 | |
| 611 | ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, |
| 612 | "****Restart [%4.4s] Op %p ReturnValueFromCallee %p\n", |
| 613 | acpi_ut_get_node_name(walk_state->method_node), |
| 614 | walk_state->method_call_op, return_desc)); |
| 615 | |
| 616 | ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, |
| 617 | " ReturnFromThisMethodUsed?=%X ResStack %p Walk %p\n", |
| 618 | walk_state->return_used, |
| 619 | walk_state->results, walk_state)); |
| 620 | |
| 621 | /* Did the called method return a value? */ |
| 622 | |
| 623 | if (return_desc) { |
| 624 | |
| 625 | /* Is the implicit return object the same as the return desc? */ |
| 626 | |
| 627 | same_as_implicit_return = |
| 628 | (walk_state->implicit_return_obj == return_desc); |
| 629 | |
| 630 | /* Are we actually going to use the return value? */ |
| 631 | |
| 632 | if (walk_state->return_used) { |
| 633 | |
| 634 | /* Save the return value from the previous method */ |
| 635 | |
| 636 | status = acpi_ds_result_push(return_desc, walk_state); |
| 637 | if (ACPI_FAILURE(status)) { |
| 638 | acpi_ut_remove_reference(return_desc); |
| 639 | return_ACPI_STATUS(status); |
| 640 | } |
| 641 | |
| 642 | /* |
| 643 | * Save as THIS method's return value in case it is returned |
| 644 | * immediately to yet another method |
| 645 | */ |
| 646 | walk_state->return_desc = return_desc; |
| 647 | } |
| 648 | |
| 649 | /* |
| 650 | * The following code is the optional support for the so-called |
| 651 | * "implicit return". Some AML code assumes that the last value of the |
| 652 | * method is "implicitly" returned to the caller, in the absence of an |
| 653 | * explicit return value. |
| 654 | * |
| 655 | * Just save the last result of the method as the return value. |
| 656 | * |
| 657 | * NOTE: this is optional because the ASL language does not actually |
| 658 | * support this behavior. |
| 659 | */ |
| 660 | else if (!acpi_ds_do_implicit_return |
| 661 | (return_desc, walk_state, FALSE) |
| 662 | || same_as_implicit_return) { |
| 663 | /* |
| 664 | * Delete the return value if it will not be used by the |
| 665 | * calling method or remove one reference if the explicit return |
| 666 | * is the same as the implicit return value. |
| 667 | */ |
| 668 | acpi_ut_remove_reference(return_desc); |
| 669 | } |
| 670 | } |
| 671 | |
| 672 | return_ACPI_STATUS(AE_OK); |
| 673 | } |
| 674 | |
| 675 | /******************************************************************************* |
| 676 | * |
| 677 | * FUNCTION: acpi_ds_terminate_control_method |
| 678 | * |
| 679 | * PARAMETERS: method_desc - Method object |
| 680 | * walk_state - State associated with the method |
| 681 | * |
| 682 | * RETURN: None |
| 683 | * |
| 684 | * DESCRIPTION: Terminate a control method. Delete everything that the method |
| 685 | * created, delete all locals and arguments, and delete the parse |
| 686 | * tree if requested. |
| 687 | * |
| 688 | * MUTEX: Interpreter is locked |
| 689 | * |
| 690 | ******************************************************************************/ |
| 691 | |
| 692 | void |
| 693 | acpi_ds_terminate_control_method(union acpi_operand_object *method_desc, |
| 694 | struct acpi_walk_state *walk_state) |
| 695 | { |
| 696 | |
| 697 | ACPI_FUNCTION_TRACE_PTR(ds_terminate_control_method, walk_state); |
| 698 | |
| 699 | /* method_desc is required, walk_state is optional */ |
| 700 | |
| 701 | if (!method_desc) { |
| 702 | return_VOID; |
| 703 | } |
| 704 | |
| 705 | if (walk_state) { |
| 706 | |
| 707 | /* Delete all arguments and locals */ |
| 708 | |
| 709 | acpi_ds_method_data_delete_all(walk_state); |
| 710 | |
| 711 | /* |
| 712 | * Delete any namespace objects created anywhere within the |
| 713 | * namespace by the execution of this method. Unless: |
| 714 | * 1) This method is a module-level executable code method, in which |
| 715 | * case we want make the objects permanent. |
| 716 | * 2) There are other threads executing the method, in which case we |
| 717 | * will wait until the last thread has completed. |
| 718 | */ |
| 719 | if (!(method_desc->method.info_flags & ACPI_METHOD_MODULE_LEVEL) |
| 720 | && (method_desc->method.thread_count == 1)) { |
| 721 | |
| 722 | /* Delete any direct children of (created by) this method */ |
| 723 | |
| 724 | (void)acpi_ex_exit_interpreter(); |
| 725 | acpi_ns_delete_namespace_subtree(walk_state-> |
| 726 | method_node); |
| 727 | (void)acpi_ex_enter_interpreter(); |
| 728 | |
| 729 | /* |
| 730 | * Delete any objects that were created by this method |
| 731 | * elsewhere in the namespace (if any were created). |
| 732 | * Use of the ACPI_METHOD_MODIFIED_NAMESPACE optimizes the |
| 733 | * deletion such that we don't have to perform an entire |
| 734 | * namespace walk for every control method execution. |
| 735 | */ |
| 736 | if (method_desc->method. |
| 737 | info_flags & ACPI_METHOD_MODIFIED_NAMESPACE) { |
| 738 | (void)acpi_ex_exit_interpreter(); |
| 739 | acpi_ns_delete_namespace_by_owner(method_desc-> |
| 740 | method. |
| 741 | owner_id); |
| 742 | (void)acpi_ex_enter_interpreter(); |
| 743 | method_desc->method.info_flags &= |
| 744 | ~ACPI_METHOD_MODIFIED_NAMESPACE; |
| 745 | } |
| 746 | } |
| 747 | |
| 748 | /* |
| 749 | * If method is serialized, release the mutex and restore the |
| 750 | * current sync level for this thread |
| 751 | */ |
| 752 | if (method_desc->method.mutex) { |
| 753 | |
| 754 | /* Acquisition Depth handles recursive calls */ |
| 755 | |
| 756 | method_desc->method.mutex->mutex.acquisition_depth--; |
| 757 | if (!method_desc->method.mutex->mutex.acquisition_depth) { |
| 758 | walk_state->thread->current_sync_level = |
| 759 | method_desc->method.mutex->mutex. |
| 760 | original_sync_level; |
| 761 | |
| 762 | acpi_os_release_mutex(method_desc->method. |
| 763 | mutex->mutex.os_mutex); |
| 764 | method_desc->method.mutex->mutex.thread_id = 0; |
| 765 | } |
| 766 | } |
| 767 | } |
| 768 | |
| 769 | /* Decrement the thread count on the method */ |
| 770 | |
| 771 | if (method_desc->method.thread_count) { |
| 772 | method_desc->method.thread_count--; |
| 773 | } else { |
| 774 | ACPI_ERROR((AE_INFO, "Invalid zero thread count in method")); |
| 775 | } |
| 776 | |
| 777 | /* Are there any other threads currently executing this method? */ |
| 778 | |
| 779 | if (method_desc->method.thread_count) { |
| 780 | /* |
| 781 | * Additional threads. Do not release the owner_id in this case, |
| 782 | * we immediately reuse it for the next thread executing this method |
| 783 | */ |
| 784 | ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, |
| 785 | "*** Completed execution of one thread, %u threads remaining\n", |
| 786 | method_desc->method.thread_count)); |
| 787 | } else { |
| 788 | /* This is the only executing thread for this method */ |
| 789 | |
| 790 | /* |
| 791 | * Support to dynamically change a method from not_serialized to |
| 792 | * Serialized if it appears that the method is incorrectly written and |
| 793 | * does not support multiple thread execution. The best example of this |
| 794 | * is if such a method creates namespace objects and blocks. A second |
| 795 | * thread will fail with an AE_ALREADY_EXISTS exception. |
| 796 | * |
| 797 | * This code is here because we must wait until the last thread exits |
| 798 | * before marking the method as serialized. |
| 799 | */ |
| 800 | if (method_desc->method. |
| 801 | info_flags & ACPI_METHOD_SERIALIZED_PENDING) { |
| 802 | if (walk_state) { |
| 803 | ACPI_INFO(("Marking method %4.4s as Serialized " |
| 804 | "because of AE_ALREADY_EXISTS error", |
| 805 | walk_state->method_node->name. |
| 806 | ascii)); |
| 807 | } |
| 808 | |
| 809 | /* |
| 810 | * Method tried to create an object twice and was marked as |
| 811 | * "pending serialized". The probable cause is that the method |
| 812 | * cannot handle reentrancy. |
| 813 | * |
| 814 | * The method was created as not_serialized, but it tried to create |
| 815 | * a named object and then blocked, causing the second thread |
| 816 | * entrance to begin and then fail. Workaround this problem by |
| 817 | * marking the method permanently as Serialized when the last |
| 818 | * thread exits here. |
| 819 | */ |
| 820 | method_desc->method.info_flags &= |
| 821 | ~ACPI_METHOD_SERIALIZED_PENDING; |
| 822 | |
| 823 | method_desc->method.info_flags |= |
| 824 | (ACPI_METHOD_SERIALIZED | |
| 825 | ACPI_METHOD_IGNORE_SYNC_LEVEL); |
| 826 | method_desc->method.sync_level = 0; |
| 827 | } |
| 828 | |
| 829 | /* No more threads, we can free the owner_id */ |
| 830 | |
| 831 | if (! |
| 832 | (method_desc->method. |
| 833 | info_flags & ACPI_METHOD_MODULE_LEVEL)) { |
| 834 | acpi_ut_release_owner_id(&method_desc->method.owner_id); |
| 835 | } |
| 836 | } |
| 837 | |
| 838 | acpi_ex_stop_trace_method((struct acpi_namespace_node *)method_desc-> |
| 839 | method.node, method_desc, walk_state); |
| 840 | |
| 841 | return_VOID; |
| 842 | } |