blob: 95d119ff76b654b7e17d455ecad93a2740411b5d [file] [log] [blame]
David Brazdil0f672f62019-12-10 10:32:29 +00001// SPDX-License-Identifier: GPL-2.0-only
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002/*
3 * scan.c - support for transforming the ACPI namespace into individual objects
4 */
5
6#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/slab.h>
9#include <linux/kernel.h>
10#include <linux/acpi.h>
11#include <linux/acpi_iort.h>
12#include <linux/signal.h>
13#include <linux/kthread.h>
14#include <linux/dmi.h>
15#include <linux/nls.h>
16#include <linux/dma-mapping.h>
17#include <linux/platform_data/x86/apple.h>
18
19#include <asm/pgtable.h>
20
21#include "internal.h"
22
23#define _COMPONENT ACPI_BUS_COMPONENT
24ACPI_MODULE_NAME("scan");
25extern struct acpi_device *acpi_root;
26
27#define ACPI_BUS_CLASS "system_bus"
28#define ACPI_BUS_HID "LNXSYBUS"
29#define ACPI_BUS_DEVICE_NAME "System Bus"
30
31#define ACPI_IS_ROOT_DEVICE(device) (!(device)->parent)
32
33#define INVALID_ACPI_HANDLE ((acpi_handle)empty_zero_page)
34
35static const char *dummy_hid = "device";
36
37static LIST_HEAD(acpi_dep_list);
38static DEFINE_MUTEX(acpi_dep_list_lock);
39LIST_HEAD(acpi_bus_id_list);
40static DEFINE_MUTEX(acpi_scan_lock);
41static LIST_HEAD(acpi_scan_handlers_list);
42DEFINE_MUTEX(acpi_device_lock);
43LIST_HEAD(acpi_wakeup_device_list);
44static DEFINE_MUTEX(acpi_hp_context_lock);
45
46/*
47 * The UART device described by the SPCR table is the only object which needs
48 * special-casing. Everything else is covered by ACPI namespace paths in STAO
49 * table.
50 */
51static u64 spcr_uart_addr;
52
53struct acpi_dep_data {
54 struct list_head node;
55 acpi_handle master;
56 acpi_handle slave;
57};
58
59void acpi_scan_lock_acquire(void)
60{
61 mutex_lock(&acpi_scan_lock);
62}
63EXPORT_SYMBOL_GPL(acpi_scan_lock_acquire);
64
65void acpi_scan_lock_release(void)
66{
67 mutex_unlock(&acpi_scan_lock);
68}
69EXPORT_SYMBOL_GPL(acpi_scan_lock_release);
70
71void acpi_lock_hp_context(void)
72{
73 mutex_lock(&acpi_hp_context_lock);
74}
75
76void acpi_unlock_hp_context(void)
77{
78 mutex_unlock(&acpi_hp_context_lock);
79}
80
81void acpi_initialize_hp_context(struct acpi_device *adev,
82 struct acpi_hotplug_context *hp,
83 int (*notify)(struct acpi_device *, u32),
84 void (*uevent)(struct acpi_device *, u32))
85{
86 acpi_lock_hp_context();
87 hp->notify = notify;
88 hp->uevent = uevent;
89 acpi_set_hp_context(adev, hp);
90 acpi_unlock_hp_context();
91}
92EXPORT_SYMBOL_GPL(acpi_initialize_hp_context);
93
94int acpi_scan_add_handler(struct acpi_scan_handler *handler)
95{
96 if (!handler)
97 return -EINVAL;
98
99 list_add_tail(&handler->list_node, &acpi_scan_handlers_list);
100 return 0;
101}
102
103int acpi_scan_add_handler_with_hotplug(struct acpi_scan_handler *handler,
104 const char *hotplug_profile_name)
105{
106 int error;
107
108 error = acpi_scan_add_handler(handler);
109 if (error)
110 return error;
111
112 acpi_sysfs_add_hotplug_profile(&handler->hotplug, hotplug_profile_name);
113 return 0;
114}
115
116bool acpi_scan_is_offline(struct acpi_device *adev, bool uevent)
117{
118 struct acpi_device_physical_node *pn;
119 bool offline = true;
120 char *envp[] = { "EVENT=offline", NULL };
121
122 /*
123 * acpi_container_offline() calls this for all of the container's
124 * children under the container's physical_node_lock lock.
125 */
126 mutex_lock_nested(&adev->physical_node_lock, SINGLE_DEPTH_NESTING);
127
128 list_for_each_entry(pn, &adev->physical_node_list, node)
129 if (device_supports_offline(pn->dev) && !pn->dev->offline) {
130 if (uevent)
131 kobject_uevent_env(&pn->dev->kobj, KOBJ_CHANGE, envp);
132
133 offline = false;
134 break;
135 }
136
137 mutex_unlock(&adev->physical_node_lock);
138 return offline;
139}
140
141static acpi_status acpi_bus_offline(acpi_handle handle, u32 lvl, void *data,
142 void **ret_p)
143{
144 struct acpi_device *device = NULL;
145 struct acpi_device_physical_node *pn;
146 bool second_pass = (bool)data;
147 acpi_status status = AE_OK;
148
149 if (acpi_bus_get_device(handle, &device))
150 return AE_OK;
151
152 if (device->handler && !device->handler->hotplug.enabled) {
153 *ret_p = &device->dev;
154 return AE_SUPPORT;
155 }
156
157 mutex_lock(&device->physical_node_lock);
158
159 list_for_each_entry(pn, &device->physical_node_list, node) {
160 int ret;
161
162 if (second_pass) {
163 /* Skip devices offlined by the first pass. */
164 if (pn->put_online)
165 continue;
166 } else {
167 pn->put_online = false;
168 }
169 ret = device_offline(pn->dev);
170 if (ret >= 0) {
171 pn->put_online = !ret;
172 } else {
173 *ret_p = pn->dev;
174 if (second_pass) {
175 status = AE_ERROR;
176 break;
177 }
178 }
179 }
180
181 mutex_unlock(&device->physical_node_lock);
182
183 return status;
184}
185
186static acpi_status acpi_bus_online(acpi_handle handle, u32 lvl, void *data,
187 void **ret_p)
188{
189 struct acpi_device *device = NULL;
190 struct acpi_device_physical_node *pn;
191
192 if (acpi_bus_get_device(handle, &device))
193 return AE_OK;
194
195 mutex_lock(&device->physical_node_lock);
196
197 list_for_each_entry(pn, &device->physical_node_list, node)
198 if (pn->put_online) {
199 device_online(pn->dev);
200 pn->put_online = false;
201 }
202
203 mutex_unlock(&device->physical_node_lock);
204
205 return AE_OK;
206}
207
208static int acpi_scan_try_to_offline(struct acpi_device *device)
209{
210 acpi_handle handle = device->handle;
211 struct device *errdev = NULL;
212 acpi_status status;
213
214 /*
215 * Carry out two passes here and ignore errors in the first pass,
216 * because if the devices in question are memory blocks and
217 * CONFIG_MEMCG is set, one of the blocks may hold data structures
218 * that the other blocks depend on, but it is not known in advance which
219 * block holds them.
220 *
221 * If the first pass is successful, the second one isn't needed, though.
222 */
223 status = acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
224 NULL, acpi_bus_offline, (void *)false,
225 (void **)&errdev);
226 if (status == AE_SUPPORT) {
227 dev_warn(errdev, "Offline disabled.\n");
228 acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
229 acpi_bus_online, NULL, NULL, NULL);
230 return -EPERM;
231 }
232 acpi_bus_offline(handle, 0, (void *)false, (void **)&errdev);
233 if (errdev) {
234 errdev = NULL;
235 acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
236 NULL, acpi_bus_offline, (void *)true,
237 (void **)&errdev);
238 if (!errdev)
239 acpi_bus_offline(handle, 0, (void *)true,
240 (void **)&errdev);
241
242 if (errdev) {
243 dev_warn(errdev, "Offline failed.\n");
244 acpi_bus_online(handle, 0, NULL, NULL);
245 acpi_walk_namespace(ACPI_TYPE_ANY, handle,
246 ACPI_UINT32_MAX, acpi_bus_online,
247 NULL, NULL, NULL);
248 return -EBUSY;
249 }
250 }
251 return 0;
252}
253
254static int acpi_scan_hot_remove(struct acpi_device *device)
255{
256 acpi_handle handle = device->handle;
257 unsigned long long sta;
258 acpi_status status;
259
260 if (device->handler && device->handler->hotplug.demand_offline) {
261 if (!acpi_scan_is_offline(device, true))
262 return -EBUSY;
263 } else {
264 int error = acpi_scan_try_to_offline(device);
265 if (error)
266 return error;
267 }
268
269 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
270 "Hot-removing device %s...\n", dev_name(&device->dev)));
271
272 acpi_bus_trim(device);
273
274 acpi_evaluate_lck(handle, 0);
275 /*
276 * TBD: _EJD support.
277 */
278 status = acpi_evaluate_ej0(handle);
279 if (status == AE_NOT_FOUND)
280 return -ENODEV;
281 else if (ACPI_FAILURE(status))
282 return -EIO;
283
284 /*
285 * Verify if eject was indeed successful. If not, log an error
286 * message. No need to call _OST since _EJ0 call was made OK.
287 */
288 status = acpi_evaluate_integer(handle, "_STA", NULL, &sta);
289 if (ACPI_FAILURE(status)) {
290 acpi_handle_warn(handle,
291 "Status check after eject failed (0x%x)\n", status);
292 } else if (sta & ACPI_STA_DEVICE_ENABLED) {
293 acpi_handle_warn(handle,
294 "Eject incomplete - status 0x%llx\n", sta);
295 }
296
297 return 0;
298}
299
300static int acpi_scan_device_not_present(struct acpi_device *adev)
301{
302 if (!acpi_device_enumerated(adev)) {
303 dev_warn(&adev->dev, "Still not present\n");
304 return -EALREADY;
305 }
306 acpi_bus_trim(adev);
307 return 0;
308}
309
310static int acpi_scan_device_check(struct acpi_device *adev)
311{
312 int error;
313
314 acpi_bus_get_status(adev);
315 if (adev->status.present || adev->status.functional) {
316 /*
317 * This function is only called for device objects for which
318 * matching scan handlers exist. The only situation in which
319 * the scan handler is not attached to this device object yet
320 * is when the device has just appeared (either it wasn't
321 * present at all before or it was removed and then added
322 * again).
323 */
324 if (adev->handler) {
325 dev_warn(&adev->dev, "Already enumerated\n");
326 return -EALREADY;
327 }
328 error = acpi_bus_scan(adev->handle);
329 if (error) {
330 dev_warn(&adev->dev, "Namespace scan failure\n");
331 return error;
332 }
333 if (!adev->handler) {
334 dev_warn(&adev->dev, "Enumeration failure\n");
335 error = -ENODEV;
336 }
337 } else {
338 error = acpi_scan_device_not_present(adev);
339 }
340 return error;
341}
342
343static int acpi_scan_bus_check(struct acpi_device *adev)
344{
345 struct acpi_scan_handler *handler = adev->handler;
346 struct acpi_device *child;
347 int error;
348
349 acpi_bus_get_status(adev);
350 if (!(adev->status.present || adev->status.functional)) {
351 acpi_scan_device_not_present(adev);
352 return 0;
353 }
354 if (handler && handler->hotplug.scan_dependent)
355 return handler->hotplug.scan_dependent(adev);
356
357 error = acpi_bus_scan(adev->handle);
358 if (error) {
359 dev_warn(&adev->dev, "Namespace scan failure\n");
360 return error;
361 }
362 list_for_each_entry(child, &adev->children, node) {
363 error = acpi_scan_bus_check(child);
364 if (error)
365 return error;
366 }
367 return 0;
368}
369
370static int acpi_generic_hotplug_event(struct acpi_device *adev, u32 type)
371{
372 switch (type) {
373 case ACPI_NOTIFY_BUS_CHECK:
374 return acpi_scan_bus_check(adev);
375 case ACPI_NOTIFY_DEVICE_CHECK:
376 return acpi_scan_device_check(adev);
377 case ACPI_NOTIFY_EJECT_REQUEST:
378 case ACPI_OST_EC_OSPM_EJECT:
379 if (adev->handler && !adev->handler->hotplug.enabled) {
380 dev_info(&adev->dev, "Eject disabled\n");
381 return -EPERM;
382 }
383 acpi_evaluate_ost(adev->handle, ACPI_NOTIFY_EJECT_REQUEST,
384 ACPI_OST_SC_EJECT_IN_PROGRESS, NULL);
385 return acpi_scan_hot_remove(adev);
386 }
387 return -EINVAL;
388}
389
390void acpi_device_hotplug(struct acpi_device *adev, u32 src)
391{
392 u32 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
393 int error = -ENODEV;
394
395 lock_device_hotplug();
396 mutex_lock(&acpi_scan_lock);
397
398 /*
399 * The device object's ACPI handle cannot become invalid as long as we
400 * are holding acpi_scan_lock, but it might have become invalid before
401 * that lock was acquired.
402 */
403 if (adev->handle == INVALID_ACPI_HANDLE)
404 goto err_out;
405
406 if (adev->flags.is_dock_station) {
407 error = dock_notify(adev, src);
408 } else if (adev->flags.hotplug_notify) {
409 error = acpi_generic_hotplug_event(adev, src);
410 } else {
411 int (*notify)(struct acpi_device *, u32);
412
413 acpi_lock_hp_context();
414 notify = adev->hp ? adev->hp->notify : NULL;
415 acpi_unlock_hp_context();
416 /*
417 * There may be additional notify handlers for device objects
418 * without the .event() callback, so ignore them here.
419 */
420 if (notify)
421 error = notify(adev, src);
422 else
423 goto out;
424 }
425 switch (error) {
426 case 0:
427 ost_code = ACPI_OST_SC_SUCCESS;
428 break;
429 case -EPERM:
430 ost_code = ACPI_OST_SC_EJECT_NOT_SUPPORTED;
431 break;
432 case -EBUSY:
433 ost_code = ACPI_OST_SC_DEVICE_BUSY;
434 break;
435 default:
436 ost_code = ACPI_OST_SC_NON_SPECIFIC_FAILURE;
437 break;
438 }
439
440 err_out:
441 acpi_evaluate_ost(adev->handle, src, ost_code, NULL);
442
443 out:
444 acpi_bus_put_acpi_device(adev);
445 mutex_unlock(&acpi_scan_lock);
446 unlock_device_hotplug();
447}
448
449static void acpi_free_power_resources_lists(struct acpi_device *device)
450{
451 int i;
452
453 if (device->wakeup.flags.valid)
454 acpi_power_resources_list_free(&device->wakeup.resources);
455
456 if (!device->power.flags.power_resources)
457 return;
458
459 for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
460 struct acpi_device_power_state *ps = &device->power.states[i];
461 acpi_power_resources_list_free(&ps->resources);
462 }
463}
464
465static void acpi_device_release(struct device *dev)
466{
467 struct acpi_device *acpi_dev = to_acpi_device(dev);
468
469 acpi_free_properties(acpi_dev);
470 acpi_free_pnp_ids(&acpi_dev->pnp);
471 acpi_free_power_resources_lists(acpi_dev);
472 kfree(acpi_dev);
473}
474
475static void acpi_device_del(struct acpi_device *device)
476{
477 struct acpi_device_bus_id *acpi_device_bus_id;
478
479 mutex_lock(&acpi_device_lock);
480 if (device->parent)
481 list_del(&device->node);
482
483 list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node)
484 if (!strcmp(acpi_device_bus_id->bus_id,
485 acpi_device_hid(device))) {
Olivier Deprez0e641232021-09-23 10:07:05 +0200486 ida_simple_remove(&acpi_device_bus_id->instance_ida, device->pnp.instance_no);
487 if (ida_is_empty(&acpi_device_bus_id->instance_ida)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000488 list_del(&acpi_device_bus_id->node);
Olivier Deprez0e641232021-09-23 10:07:05 +0200489 kfree_const(acpi_device_bus_id->bus_id);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000490 kfree(acpi_device_bus_id);
491 }
492 break;
493 }
494
495 list_del(&device->wakeup_list);
496 mutex_unlock(&acpi_device_lock);
497
498 acpi_power_add_remove_device(device, false);
499 acpi_device_remove_files(device);
500 if (device->remove)
501 device->remove(device);
502
503 device_del(&device->dev);
504}
505
506static BLOCKING_NOTIFIER_HEAD(acpi_reconfig_chain);
507
508static LIST_HEAD(acpi_device_del_list);
509static DEFINE_MUTEX(acpi_device_del_lock);
510
511static void acpi_device_del_work_fn(struct work_struct *work_not_used)
512{
513 for (;;) {
514 struct acpi_device *adev;
515
516 mutex_lock(&acpi_device_del_lock);
517
518 if (list_empty(&acpi_device_del_list)) {
519 mutex_unlock(&acpi_device_del_lock);
520 break;
521 }
522 adev = list_first_entry(&acpi_device_del_list,
523 struct acpi_device, del_list);
524 list_del(&adev->del_list);
525
526 mutex_unlock(&acpi_device_del_lock);
527
528 blocking_notifier_call_chain(&acpi_reconfig_chain,
529 ACPI_RECONFIG_DEVICE_REMOVE, adev);
530
531 acpi_device_del(adev);
532 /*
533 * Drop references to all power resources that might have been
534 * used by the device.
535 */
536 acpi_power_transition(adev, ACPI_STATE_D3_COLD);
537 put_device(&adev->dev);
538 }
539}
540
541/**
542 * acpi_scan_drop_device - Drop an ACPI device object.
543 * @handle: Handle of an ACPI namespace node, not used.
544 * @context: Address of the ACPI device object to drop.
545 *
546 * This is invoked by acpi_ns_delete_node() during the removal of the ACPI
547 * namespace node the device object pointed to by @context is attached to.
548 *
549 * The unregistration is carried out asynchronously to avoid running
550 * acpi_device_del() under the ACPICA's namespace mutex and the list is used to
551 * ensure the correct ordering (the device objects must be unregistered in the
552 * same order in which the corresponding namespace nodes are deleted).
553 */
554static void acpi_scan_drop_device(acpi_handle handle, void *context)
555{
556 static DECLARE_WORK(work, acpi_device_del_work_fn);
557 struct acpi_device *adev = context;
558
559 mutex_lock(&acpi_device_del_lock);
560
561 /*
562 * Use the ACPI hotplug workqueue which is ordered, so this work item
563 * won't run after any hotplug work items submitted subsequently. That
564 * prevents attempts to register device objects identical to those being
565 * deleted from happening concurrently (such attempts result from
566 * hotplug events handled via the ACPI hotplug workqueue). It also will
567 * run after all of the work items submitted previosuly, which helps
568 * those work items to ensure that they are not accessing stale device
569 * objects.
570 */
571 if (list_empty(&acpi_device_del_list))
572 acpi_queue_hotplug_work(&work);
573
574 list_add_tail(&adev->del_list, &acpi_device_del_list);
575 /* Make acpi_ns_validate_handle() return NULL for this handle. */
576 adev->handle = INVALID_ACPI_HANDLE;
577
578 mutex_unlock(&acpi_device_del_lock);
579}
580
581static int acpi_get_device_data(acpi_handle handle, struct acpi_device **device,
582 void (*callback)(void *))
583{
584 acpi_status status;
585
586 if (!device)
587 return -EINVAL;
588
Olivier Deprez0e641232021-09-23 10:07:05 +0200589 *device = NULL;
590
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000591 status = acpi_get_data_full(handle, acpi_scan_drop_device,
592 (void **)device, callback);
593 if (ACPI_FAILURE(status) || !*device) {
594 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No context for object [%p]\n",
595 handle));
596 return -ENODEV;
597 }
598 return 0;
599}
600
601int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
602{
603 return acpi_get_device_data(handle, device, NULL);
604}
605EXPORT_SYMBOL(acpi_bus_get_device);
606
607static void get_acpi_device(void *dev)
608{
609 if (dev)
610 get_device(&((struct acpi_device *)dev)->dev);
611}
612
613struct acpi_device *acpi_bus_get_acpi_device(acpi_handle handle)
614{
615 struct acpi_device *adev = NULL;
616
617 acpi_get_device_data(handle, &adev, get_acpi_device);
618 return adev;
619}
620
621void acpi_bus_put_acpi_device(struct acpi_device *adev)
622{
623 put_device(&adev->dev);
624}
625
Olivier Deprez0e641232021-09-23 10:07:05 +0200626static struct acpi_device_bus_id *acpi_device_bus_id_match(const char *dev_id)
627{
628 struct acpi_device_bus_id *acpi_device_bus_id;
629
630 /* Find suitable bus_id and instance number in acpi_bus_id_list. */
631 list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
632 if (!strcmp(acpi_device_bus_id->bus_id, dev_id))
633 return acpi_device_bus_id;
634 }
635 return NULL;
636}
637
638static int acpi_device_set_name(struct acpi_device *device,
639 struct acpi_device_bus_id *acpi_device_bus_id)
640{
641 struct ida *instance_ida = &acpi_device_bus_id->instance_ida;
642 int result;
643
644 result = ida_simple_get(instance_ida, 0, ACPI_MAX_DEVICE_INSTANCES, GFP_KERNEL);
645 if (result < 0)
646 return result;
647
648 device->pnp.instance_no = result;
649 dev_set_name(&device->dev, "%s:%02x", acpi_device_bus_id->bus_id, result);
650 return 0;
651}
652
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000653int acpi_device_add(struct acpi_device *device,
654 void (*release)(struct device *))
655{
Olivier Deprez0e641232021-09-23 10:07:05 +0200656 struct acpi_device_bus_id *acpi_device_bus_id;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000657 int result;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000658
659 if (device->handle) {
660 acpi_status status;
661
662 status = acpi_attach_data(device->handle, acpi_scan_drop_device,
663 device);
664 if (ACPI_FAILURE(status)) {
665 acpi_handle_err(device->handle,
666 "Unable to attach device data\n");
667 return -ENODEV;
668 }
669 }
670
671 /*
672 * Linkage
673 * -------
674 * Link this device to its parent and siblings.
675 */
676 INIT_LIST_HEAD(&device->children);
677 INIT_LIST_HEAD(&device->node);
678 INIT_LIST_HEAD(&device->wakeup_list);
679 INIT_LIST_HEAD(&device->physical_node_list);
680 INIT_LIST_HEAD(&device->del_list);
681 mutex_init(&device->physical_node_lock);
682
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000683 mutex_lock(&acpi_device_lock);
Olivier Deprez0e641232021-09-23 10:07:05 +0200684
685 acpi_device_bus_id = acpi_device_bus_id_match(acpi_device_hid(device));
686 if (acpi_device_bus_id) {
687 result = acpi_device_set_name(device, acpi_device_bus_id);
688 if (result)
689 goto err_unlock;
690 } else {
691 acpi_device_bus_id = kzalloc(sizeof(*acpi_device_bus_id),
692 GFP_KERNEL);
693 if (!acpi_device_bus_id) {
694 result = -ENOMEM;
695 goto err_unlock;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000696 }
Olivier Deprez0e641232021-09-23 10:07:05 +0200697 acpi_device_bus_id->bus_id =
698 kstrdup_const(acpi_device_hid(device), GFP_KERNEL);
699 if (!acpi_device_bus_id->bus_id) {
700 kfree(acpi_device_bus_id);
701 result = -ENOMEM;
702 goto err_unlock;
703 }
704
705 ida_init(&acpi_device_bus_id->instance_ida);
706
707 result = acpi_device_set_name(device, acpi_device_bus_id);
708 if (result) {
709 kfree_const(acpi_device_bus_id->bus_id);
710 kfree(acpi_device_bus_id);
711 goto err_unlock;
712 }
713
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000714 list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
715 }
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000716
717 if (device->parent)
718 list_add_tail(&device->node, &device->parent->children);
719
720 if (device->wakeup.flags.valid)
721 list_add_tail(&device->wakeup_list, &acpi_wakeup_device_list);
722 mutex_unlock(&acpi_device_lock);
723
724 if (device->parent)
725 device->dev.parent = &device->parent->dev;
726 device->dev.bus = &acpi_bus_type;
727 device->dev.release = release;
728 result = device_add(&device->dev);
729 if (result) {
730 dev_err(&device->dev, "Error registering device\n");
731 goto err;
732 }
733
734 result = acpi_device_setup_files(device);
735 if (result)
736 printk(KERN_ERR PREFIX "Error creating sysfs interface for device %s\n",
737 dev_name(&device->dev));
738
739 return 0;
740
741 err:
742 mutex_lock(&acpi_device_lock);
743 if (device->parent)
744 list_del(&device->node);
745 list_del(&device->wakeup_list);
Olivier Deprez0e641232021-09-23 10:07:05 +0200746
747 err_unlock:
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000748 mutex_unlock(&acpi_device_lock);
749
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000750 acpi_detach_data(device->handle, acpi_scan_drop_device);
751 return result;
752}
753
754/* --------------------------------------------------------------------------
755 Device Enumeration
756 -------------------------------------------------------------------------- */
757static struct acpi_device *acpi_bus_get_parent(acpi_handle handle)
758{
759 struct acpi_device *device = NULL;
760 acpi_status status;
761
762 /*
763 * Fixed hardware devices do not appear in the namespace and do not
764 * have handles, but we fabricate acpi_devices for them, so we have
765 * to deal with them specially.
766 */
767 if (!handle)
768 return acpi_root;
769
770 do {
771 status = acpi_get_parent(handle, &handle);
772 if (ACPI_FAILURE(status))
773 return status == AE_NULL_ENTRY ? NULL : acpi_root;
774 } while (acpi_bus_get_device(handle, &device));
775 return device;
776}
777
778acpi_status
779acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd)
780{
781 acpi_status status;
782 acpi_handle tmp;
783 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
784 union acpi_object *obj;
785
786 status = acpi_get_handle(handle, "_EJD", &tmp);
787 if (ACPI_FAILURE(status))
788 return status;
789
790 status = acpi_evaluate_object(handle, "_EJD", NULL, &buffer);
791 if (ACPI_SUCCESS(status)) {
792 obj = buffer.pointer;
793 status = acpi_get_handle(ACPI_ROOT_OBJECT, obj->string.pointer,
794 ejd);
795 kfree(buffer.pointer);
796 }
797 return status;
798}
799EXPORT_SYMBOL_GPL(acpi_bus_get_ejd);
800
David Brazdil0f672f62019-12-10 10:32:29 +0000801static int acpi_bus_extract_wakeup_device_power_package(struct acpi_device *dev)
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000802{
David Brazdil0f672f62019-12-10 10:32:29 +0000803 acpi_handle handle = dev->handle;
804 struct acpi_device_wakeup *wakeup = &dev->wakeup;
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000805 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
806 union acpi_object *package = NULL;
807 union acpi_object *element = NULL;
808 acpi_status status;
809 int err = -ENODATA;
810
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000811 INIT_LIST_HEAD(&wakeup->resources);
812
813 /* _PRW */
814 status = acpi_evaluate_object(handle, "_PRW", NULL, &buffer);
815 if (ACPI_FAILURE(status)) {
816 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PRW"));
817 return err;
818 }
819
820 package = (union acpi_object *)buffer.pointer;
821
822 if (!package || package->package.count < 2)
823 goto out;
824
825 element = &(package->package.elements[0]);
826 if (!element)
827 goto out;
828
829 if (element->type == ACPI_TYPE_PACKAGE) {
830 if ((element->package.count < 2) ||
831 (element->package.elements[0].type !=
832 ACPI_TYPE_LOCAL_REFERENCE)
833 || (element->package.elements[1].type != ACPI_TYPE_INTEGER))
834 goto out;
835
836 wakeup->gpe_device =
837 element->package.elements[0].reference.handle;
838 wakeup->gpe_number =
839 (u32) element->package.elements[1].integer.value;
840 } else if (element->type == ACPI_TYPE_INTEGER) {
841 wakeup->gpe_device = NULL;
842 wakeup->gpe_number = element->integer.value;
843 } else {
844 goto out;
845 }
846
847 element = &(package->package.elements[1]);
848 if (element->type != ACPI_TYPE_INTEGER)
849 goto out;
850
851 wakeup->sleep_state = element->integer.value;
852
853 err = acpi_extract_power_resources(package, 2, &wakeup->resources);
854 if (err)
855 goto out;
856
857 if (!list_empty(&wakeup->resources)) {
858 int sleep_state;
859
860 err = acpi_power_wakeup_list_init(&wakeup->resources,
861 &sleep_state);
862 if (err) {
863 acpi_handle_warn(handle, "Retrieving current states "
864 "of wakeup power resources failed\n");
865 acpi_power_resources_list_free(&wakeup->resources);
866 goto out;
867 }
868 if (sleep_state < wakeup->sleep_state) {
869 acpi_handle_warn(handle, "Overriding _PRW sleep state "
870 "(S%d) by S%d from power resources\n",
871 (int)wakeup->sleep_state, sleep_state);
872 wakeup->sleep_state = sleep_state;
873 }
874 }
875
876 out:
877 kfree(buffer.pointer);
878 return err;
879}
880
881static bool acpi_wakeup_gpe_init(struct acpi_device *device)
882{
883 static const struct acpi_device_id button_device_ids[] = {
David Brazdil0f672f62019-12-10 10:32:29 +0000884 {"PNP0C0C", 0}, /* Power button */
885 {"PNP0C0D", 0}, /* Lid */
886 {"PNP0C0E", 0}, /* Sleep button */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000887 {"", 0},
888 };
889 struct acpi_device_wakeup *wakeup = &device->wakeup;
890 acpi_status status;
891
892 wakeup->flags.notifier_present = 0;
893
894 /* Power button, Lid switch always enable wakeup */
895 if (!acpi_match_device_ids(device, button_device_ids)) {
896 if (!acpi_match_device_ids(device, &button_device_ids[1])) {
897 /* Do not use Lid/sleep button for S5 wakeup */
898 if (wakeup->sleep_state == ACPI_STATE_S5)
899 wakeup->sleep_state = ACPI_STATE_S4;
900 }
901 acpi_mark_gpe_for_wake(wakeup->gpe_device, wakeup->gpe_number);
902 device_set_wakeup_capable(&device->dev, true);
903 return true;
904 }
905
906 status = acpi_setup_gpe_for_wake(device->handle, wakeup->gpe_device,
907 wakeup->gpe_number);
908 return ACPI_SUCCESS(status);
909}
910
911static void acpi_bus_get_wakeup_device_flags(struct acpi_device *device)
912{
913 int err;
914
915 /* Presence of _PRW indicates wake capable */
916 if (!acpi_has_method(device->handle, "_PRW"))
917 return;
918
David Brazdil0f672f62019-12-10 10:32:29 +0000919 err = acpi_bus_extract_wakeup_device_power_package(device);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000920 if (err) {
921 dev_err(&device->dev, "_PRW evaluation error: %d\n", err);
922 return;
923 }
924
925 device->wakeup.flags.valid = acpi_wakeup_gpe_init(device);
926 device->wakeup.prepare_count = 0;
927 /*
928 * Call _PSW/_DSW object to disable its ability to wake the sleeping
929 * system for the ACPI device with the _PRW object.
David Brazdil0f672f62019-12-10 10:32:29 +0000930 * The _PSW object is deprecated in ACPI 3.0 and is replaced by _DSW.
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000931 * So it is necessary to call _DSW object first. Only when it is not
932 * present will the _PSW object used.
933 */
934 err = acpi_device_sleep_wake(device, 0, 0, 0);
935 if (err)
936 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
937 "error in _DSW or _PSW evaluation\n"));
938}
939
940static void acpi_bus_init_power_state(struct acpi_device *device, int state)
941{
942 struct acpi_device_power_state *ps = &device->power.states[state];
943 char pathname[5] = { '_', 'P', 'R', '0' + state, '\0' };
944 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
945 acpi_status status;
946
947 INIT_LIST_HEAD(&ps->resources);
948
949 /* Evaluate "_PRx" to get referenced power resources */
950 status = acpi_evaluate_object(device->handle, pathname, NULL, &buffer);
951 if (ACPI_SUCCESS(status)) {
952 union acpi_object *package = buffer.pointer;
953
954 if (buffer.length && package
955 && package->type == ACPI_TYPE_PACKAGE
Olivier Deprez0e641232021-09-23 10:07:05 +0200956 && package->package.count)
957 acpi_extract_power_resources(package, 0, &ps->resources);
958
Andrew Scullb4b6d4a2019-01-02 15:54:55 +0000959 ACPI_FREE(buffer.pointer);
960 }
961
962 /* Evaluate "_PSx" to see if we can do explicit sets */
963 pathname[2] = 'S';
964 if (acpi_has_method(device->handle, pathname))
965 ps->flags.explicit_set = 1;
966
967 /* State is valid if there are means to put the device into it. */
968 if (!list_empty(&ps->resources) || ps->flags.explicit_set)
969 ps->flags.valid = 1;
970
971 ps->power = -1; /* Unknown - driver assigned */
972 ps->latency = -1; /* Unknown - driver assigned */
973}
974
975static void acpi_bus_get_power_flags(struct acpi_device *device)
976{
977 u32 i;
978
979 /* Presence of _PS0|_PR0 indicates 'power manageable' */
980 if (!acpi_has_method(device->handle, "_PS0") &&
981 !acpi_has_method(device->handle, "_PR0"))
982 return;
983
984 device->flags.power_manageable = 1;
985
986 /*
987 * Power Management Flags
988 */
989 if (acpi_has_method(device->handle, "_PSC"))
990 device->power.flags.explicit_get = 1;
991
992 if (acpi_has_method(device->handle, "_IRC"))
993 device->power.flags.inrush_current = 1;
994
995 if (acpi_has_method(device->handle, "_DSW"))
996 device->power.flags.dsw_present = 1;
997
998 /*
999 * Enumerate supported power management states
1000 */
1001 for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++)
1002 acpi_bus_init_power_state(device, i);
1003
1004 INIT_LIST_HEAD(&device->power.states[ACPI_STATE_D3_COLD].resources);
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001005
Olivier Deprez0e641232021-09-23 10:07:05 +02001006 /* Set the defaults for D0 and D3hot (always supported). */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001007 device->power.states[ACPI_STATE_D0].flags.valid = 1;
1008 device->power.states[ACPI_STATE_D0].power = 100;
1009 device->power.states[ACPI_STATE_D3_HOT].flags.valid = 1;
1010
Olivier Deprez0e641232021-09-23 10:07:05 +02001011 /*
1012 * Use power resources only if the D0 list of them is populated, because
1013 * some platforms may provide _PR3 only to indicate D3cold support and
1014 * in those cases the power resources list returned by it may be bogus.
1015 */
1016 if (!list_empty(&device->power.states[ACPI_STATE_D0].resources)) {
1017 device->power.flags.power_resources = 1;
1018 /*
1019 * D3cold is supported if the D3hot list of power resources is
1020 * not empty.
1021 */
1022 if (!list_empty(&device->power.states[ACPI_STATE_D3_HOT].resources))
1023 device->power.states[ACPI_STATE_D3_COLD].flags.valid = 1;
1024 }
1025
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001026 if (acpi_bus_init_power(device))
1027 device->flags.power_manageable = 0;
1028}
1029
1030static void acpi_bus_get_flags(struct acpi_device *device)
1031{
1032 /* Presence of _STA indicates 'dynamic_status' */
1033 if (acpi_has_method(device->handle, "_STA"))
1034 device->flags.dynamic_status = 1;
1035
1036 /* Presence of _RMV indicates 'removable' */
1037 if (acpi_has_method(device->handle, "_RMV"))
1038 device->flags.removable = 1;
1039
1040 /* Presence of _EJD|_EJ0 indicates 'ejectable' */
1041 if (acpi_has_method(device->handle, "_EJD") ||
1042 acpi_has_method(device->handle, "_EJ0"))
1043 device->flags.ejectable = 1;
1044}
1045
1046static void acpi_device_get_busid(struct acpi_device *device)
1047{
1048 char bus_id[5] = { '?', 0 };
1049 struct acpi_buffer buffer = { sizeof(bus_id), bus_id };
1050 int i = 0;
1051
1052 /*
1053 * Bus ID
1054 * ------
1055 * The device's Bus ID is simply the object name.
1056 * TBD: Shouldn't this value be unique (within the ACPI namespace)?
1057 */
1058 if (ACPI_IS_ROOT_DEVICE(device)) {
1059 strcpy(device->pnp.bus_id, "ACPI");
1060 return;
1061 }
1062
1063 switch (device->device_type) {
1064 case ACPI_BUS_TYPE_POWER_BUTTON:
1065 strcpy(device->pnp.bus_id, "PWRF");
1066 break;
1067 case ACPI_BUS_TYPE_SLEEP_BUTTON:
1068 strcpy(device->pnp.bus_id, "SLPF");
1069 break;
1070 case ACPI_BUS_TYPE_ECDT_EC:
1071 strcpy(device->pnp.bus_id, "ECDT");
1072 break;
1073 default:
1074 acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer);
1075 /* Clean up trailing underscores (if any) */
1076 for (i = 3; i > 1; i--) {
1077 if (bus_id[i] == '_')
1078 bus_id[i] = '\0';
1079 else
1080 break;
1081 }
1082 strcpy(device->pnp.bus_id, bus_id);
1083 break;
1084 }
1085}
1086
1087/*
1088 * acpi_ata_match - see if an acpi object is an ATA device
1089 *
1090 * If an acpi object has one of the ACPI ATA methods defined,
1091 * then we can safely call it an ATA device.
1092 */
1093bool acpi_ata_match(acpi_handle handle)
1094{
1095 return acpi_has_method(handle, "_GTF") ||
1096 acpi_has_method(handle, "_GTM") ||
1097 acpi_has_method(handle, "_STM") ||
1098 acpi_has_method(handle, "_SDD");
1099}
1100
1101/*
1102 * acpi_bay_match - see if an acpi object is an ejectable driver bay
1103 *
1104 * If an acpi object is ejectable and has one of the ACPI ATA methods defined,
1105 * then we can safely call it an ejectable drive bay
1106 */
1107bool acpi_bay_match(acpi_handle handle)
1108{
1109 acpi_handle phandle;
1110
1111 if (!acpi_has_method(handle, "_EJ0"))
1112 return false;
1113 if (acpi_ata_match(handle))
1114 return true;
1115 if (ACPI_FAILURE(acpi_get_parent(handle, &phandle)))
1116 return false;
1117
1118 return acpi_ata_match(phandle);
1119}
1120
1121bool acpi_device_is_battery(struct acpi_device *adev)
1122{
1123 struct acpi_hardware_id *hwid;
1124
1125 list_for_each_entry(hwid, &adev->pnp.ids, list)
1126 if (!strcmp("PNP0C0A", hwid->id))
1127 return true;
1128
1129 return false;
1130}
1131
1132static bool is_ejectable_bay(struct acpi_device *adev)
1133{
1134 acpi_handle handle = adev->handle;
1135
1136 if (acpi_has_method(handle, "_EJ0") && acpi_device_is_battery(adev))
1137 return true;
1138
1139 return acpi_bay_match(handle);
1140}
1141
1142/*
1143 * acpi_dock_match - see if an acpi object has a _DCK method
1144 */
1145bool acpi_dock_match(acpi_handle handle)
1146{
1147 return acpi_has_method(handle, "_DCK");
1148}
1149
1150static acpi_status
1151acpi_backlight_cap_match(acpi_handle handle, u32 level, void *context,
1152 void **return_value)
1153{
1154 long *cap = context;
1155
1156 if (acpi_has_method(handle, "_BCM") &&
1157 acpi_has_method(handle, "_BCL")) {
1158 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found generic backlight "
1159 "support\n"));
1160 *cap |= ACPI_VIDEO_BACKLIGHT;
1161 /* We have backlight support, no need to scan further */
1162 return AE_CTRL_TERMINATE;
1163 }
1164 return 0;
1165}
1166
1167/* Returns true if the ACPI object is a video device which can be
1168 * handled by video.ko.
1169 * The device will get a Linux specific CID added in scan.c to
1170 * identify the device as an ACPI graphics device
1171 * Be aware that the graphics device may not be physically present
1172 * Use acpi_video_get_capabilities() to detect general ACPI video
1173 * capabilities of present cards
1174 */
1175long acpi_is_video_device(acpi_handle handle)
1176{
1177 long video_caps = 0;
1178
1179 /* Is this device able to support video switching ? */
1180 if (acpi_has_method(handle, "_DOD") || acpi_has_method(handle, "_DOS"))
1181 video_caps |= ACPI_VIDEO_OUTPUT_SWITCHING;
1182
1183 /* Is this device able to retrieve a video ROM ? */
1184 if (acpi_has_method(handle, "_ROM"))
1185 video_caps |= ACPI_VIDEO_ROM_AVAILABLE;
1186
1187 /* Is this device able to configure which video head to be POSTed ? */
1188 if (acpi_has_method(handle, "_VPO") &&
1189 acpi_has_method(handle, "_GPD") &&
1190 acpi_has_method(handle, "_SPD"))
1191 video_caps |= ACPI_VIDEO_DEVICE_POSTING;
1192
1193 /* Only check for backlight functionality if one of the above hit. */
1194 if (video_caps)
1195 acpi_walk_namespace(ACPI_TYPE_DEVICE, handle,
1196 ACPI_UINT32_MAX, acpi_backlight_cap_match, NULL,
1197 &video_caps, NULL);
1198
1199 return video_caps;
1200}
1201EXPORT_SYMBOL(acpi_is_video_device);
1202
1203const char *acpi_device_hid(struct acpi_device *device)
1204{
1205 struct acpi_hardware_id *hid;
1206
1207 if (list_empty(&device->pnp.ids))
1208 return dummy_hid;
1209
1210 hid = list_first_entry(&device->pnp.ids, struct acpi_hardware_id, list);
1211 return hid->id;
1212}
1213EXPORT_SYMBOL(acpi_device_hid);
1214
1215static void acpi_add_id(struct acpi_device_pnp *pnp, const char *dev_id)
1216{
1217 struct acpi_hardware_id *id;
1218
1219 id = kmalloc(sizeof(*id), GFP_KERNEL);
1220 if (!id)
1221 return;
1222
1223 id->id = kstrdup_const(dev_id, GFP_KERNEL);
1224 if (!id->id) {
1225 kfree(id);
1226 return;
1227 }
1228
1229 list_add_tail(&id->list, &pnp->ids);
1230 pnp->type.hardware_id = 1;
1231}
1232
1233/*
1234 * Old IBM workstations have a DSDT bug wherein the SMBus object
1235 * lacks the SMBUS01 HID and the methods do not have the necessary "_"
1236 * prefix. Work around this.
1237 */
1238static bool acpi_ibm_smbus_match(acpi_handle handle)
1239{
1240 char node_name[ACPI_PATH_SEGMENT_LENGTH];
1241 struct acpi_buffer path = { sizeof(node_name), node_name };
1242
1243 if (!dmi_name_in_vendors("IBM"))
1244 return false;
1245
1246 /* Look for SMBS object */
1247 if (ACPI_FAILURE(acpi_get_name(handle, ACPI_SINGLE_NAME, &path)) ||
1248 strcmp("SMBS", path.pointer))
1249 return false;
1250
1251 /* Does it have the necessary (but misnamed) methods? */
1252 if (acpi_has_method(handle, "SBI") &&
1253 acpi_has_method(handle, "SBR") &&
1254 acpi_has_method(handle, "SBW"))
1255 return true;
1256
1257 return false;
1258}
1259
1260static bool acpi_object_is_system_bus(acpi_handle handle)
1261{
1262 acpi_handle tmp;
1263
1264 if (ACPI_SUCCESS(acpi_get_handle(NULL, "\\_SB", &tmp)) &&
1265 tmp == handle)
1266 return true;
1267 if (ACPI_SUCCESS(acpi_get_handle(NULL, "\\_TZ", &tmp)) &&
1268 tmp == handle)
1269 return true;
1270
1271 return false;
1272}
1273
1274static void acpi_set_pnp_ids(acpi_handle handle, struct acpi_device_pnp *pnp,
1275 int device_type)
1276{
1277 acpi_status status;
1278 struct acpi_device_info *info;
1279 struct acpi_pnp_device_id_list *cid_list;
1280 int i;
1281
1282 switch (device_type) {
1283 case ACPI_BUS_TYPE_DEVICE:
1284 if (handle == ACPI_ROOT_OBJECT) {
1285 acpi_add_id(pnp, ACPI_SYSTEM_HID);
1286 break;
1287 }
1288
1289 status = acpi_get_object_info(handle, &info);
1290 if (ACPI_FAILURE(status)) {
1291 pr_err(PREFIX "%s: Error reading device info\n",
1292 __func__);
1293 return;
1294 }
1295
1296 if (info->valid & ACPI_VALID_HID) {
1297 acpi_add_id(pnp, info->hardware_id.string);
1298 pnp->type.platform_id = 1;
1299 }
1300 if (info->valid & ACPI_VALID_CID) {
1301 cid_list = &info->compatible_id_list;
1302 for (i = 0; i < cid_list->count; i++)
1303 acpi_add_id(pnp, cid_list->ids[i].string);
1304 }
1305 if (info->valid & ACPI_VALID_ADR) {
1306 pnp->bus_address = info->address;
1307 pnp->type.bus_address = 1;
1308 }
1309 if (info->valid & ACPI_VALID_UID)
1310 pnp->unique_id = kstrdup(info->unique_id.string,
1311 GFP_KERNEL);
1312 if (info->valid & ACPI_VALID_CLS)
1313 acpi_add_id(pnp, info->class_code.string);
1314
1315 kfree(info);
1316
1317 /*
1318 * Some devices don't reliably have _HIDs & _CIDs, so add
1319 * synthetic HIDs to make sure drivers can find them.
1320 */
1321 if (acpi_is_video_device(handle))
1322 acpi_add_id(pnp, ACPI_VIDEO_HID);
1323 else if (acpi_bay_match(handle))
1324 acpi_add_id(pnp, ACPI_BAY_HID);
1325 else if (acpi_dock_match(handle))
1326 acpi_add_id(pnp, ACPI_DOCK_HID);
1327 else if (acpi_ibm_smbus_match(handle))
1328 acpi_add_id(pnp, ACPI_SMBUS_IBM_HID);
1329 else if (list_empty(&pnp->ids) &&
1330 acpi_object_is_system_bus(handle)) {
1331 /* \_SB, \_TZ, LNXSYBUS */
1332 acpi_add_id(pnp, ACPI_BUS_HID);
1333 strcpy(pnp->device_name, ACPI_BUS_DEVICE_NAME);
1334 strcpy(pnp->device_class, ACPI_BUS_CLASS);
1335 }
1336
1337 break;
1338 case ACPI_BUS_TYPE_POWER:
1339 acpi_add_id(pnp, ACPI_POWER_HID);
1340 break;
1341 case ACPI_BUS_TYPE_PROCESSOR:
1342 acpi_add_id(pnp, ACPI_PROCESSOR_OBJECT_HID);
1343 break;
1344 case ACPI_BUS_TYPE_THERMAL:
1345 acpi_add_id(pnp, ACPI_THERMAL_HID);
1346 break;
1347 case ACPI_BUS_TYPE_POWER_BUTTON:
1348 acpi_add_id(pnp, ACPI_BUTTON_HID_POWERF);
1349 break;
1350 case ACPI_BUS_TYPE_SLEEP_BUTTON:
1351 acpi_add_id(pnp, ACPI_BUTTON_HID_SLEEPF);
1352 break;
1353 case ACPI_BUS_TYPE_ECDT_EC:
1354 acpi_add_id(pnp, ACPI_ECDT_HID);
1355 break;
1356 }
1357}
1358
1359void acpi_free_pnp_ids(struct acpi_device_pnp *pnp)
1360{
1361 struct acpi_hardware_id *id, *tmp;
1362
1363 list_for_each_entry_safe(id, tmp, &pnp->ids, list) {
1364 kfree_const(id->id);
1365 kfree(id);
1366 }
1367 kfree(pnp->unique_id);
1368}
1369
1370/**
1371 * acpi_dma_supported - Check DMA support for the specified device.
1372 * @adev: The pointer to acpi device
1373 *
1374 * Return false if DMA is not supported. Otherwise, return true
1375 */
1376bool acpi_dma_supported(struct acpi_device *adev)
1377{
1378 if (!adev)
1379 return false;
1380
1381 if (adev->flags.cca_seen)
1382 return true;
1383
1384 /*
1385 * Per ACPI 6.0 sec 6.2.17, assume devices can do cache-coherent
1386 * DMA on "Intel platforms". Presumably that includes all x86 and
1387 * ia64, and other arches will set CONFIG_ACPI_CCA_REQUIRED=y.
1388 */
1389 if (!IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED))
1390 return true;
1391
1392 return false;
1393}
1394
1395/**
1396 * acpi_get_dma_attr - Check the supported DMA attr for the specified device.
1397 * @adev: The pointer to acpi device
1398 *
1399 * Return enum dev_dma_attr.
1400 */
1401enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
1402{
1403 if (!acpi_dma_supported(adev))
1404 return DEV_DMA_NOT_SUPPORTED;
1405
1406 if (adev->flags.coherent_dma)
1407 return DEV_DMA_COHERENT;
1408 else
1409 return DEV_DMA_NON_COHERENT;
1410}
1411
1412/**
1413 * acpi_dma_get_range() - Get device DMA parameters.
1414 *
1415 * @dev: device to configure
1416 * @dma_addr: pointer device DMA address result
1417 * @offset: pointer to the DMA offset result
1418 * @size: pointer to DMA range size result
1419 *
1420 * Evaluate DMA regions and return respectively DMA region start, offset
1421 * and size in dma_addr, offset and size on parsing success; it does not
1422 * update the passed in values on failure.
1423 *
1424 * Return 0 on success, < 0 on failure.
1425 */
1426int acpi_dma_get_range(struct device *dev, u64 *dma_addr, u64 *offset,
1427 u64 *size)
1428{
1429 struct acpi_device *adev;
1430 LIST_HEAD(list);
1431 struct resource_entry *rentry;
1432 int ret;
1433 struct device *dma_dev = dev;
1434 u64 len, dma_start = U64_MAX, dma_end = 0, dma_offset = 0;
1435
1436 /*
1437 * Walk the device tree chasing an ACPI companion with a _DMA
1438 * object while we go. Stop if we find a device with an ACPI
1439 * companion containing a _DMA method.
1440 */
1441 do {
1442 adev = ACPI_COMPANION(dma_dev);
1443 if (adev && acpi_has_method(adev->handle, METHOD_NAME__DMA))
1444 break;
1445
1446 dma_dev = dma_dev->parent;
1447 } while (dma_dev);
1448
1449 if (!dma_dev)
1450 return -ENODEV;
1451
1452 if (!acpi_has_method(adev->handle, METHOD_NAME__CRS)) {
1453 acpi_handle_warn(adev->handle, "_DMA is valid only if _CRS is present\n");
1454 return -EINVAL;
1455 }
1456
1457 ret = acpi_dev_get_dma_resources(adev, &list);
1458 if (ret > 0) {
1459 list_for_each_entry(rentry, &list, node) {
1460 if (dma_offset && rentry->offset != dma_offset) {
1461 ret = -EINVAL;
1462 dev_warn(dma_dev, "Can't handle multiple windows with different offsets\n");
1463 goto out;
1464 }
1465 dma_offset = rentry->offset;
1466
1467 /* Take lower and upper limits */
1468 if (rentry->res->start < dma_start)
1469 dma_start = rentry->res->start;
1470 if (rentry->res->end > dma_end)
1471 dma_end = rentry->res->end;
1472 }
1473
1474 if (dma_start >= dma_end) {
1475 ret = -EINVAL;
1476 dev_dbg(dma_dev, "Invalid DMA regions configuration\n");
1477 goto out;
1478 }
1479
1480 *dma_addr = dma_start - dma_offset;
1481 len = dma_end - dma_start;
1482 *size = max(len, len + 1);
1483 *offset = dma_offset;
1484 }
1485 out:
1486 acpi_dev_free_resource_list(&list);
1487
1488 return ret >= 0 ? 0 : ret;
1489}
1490
1491/**
1492 * acpi_dma_configure - Set-up DMA configuration for the device.
1493 * @dev: The pointer to the device
1494 * @attr: device dma attributes
1495 */
1496int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr)
1497{
1498 const struct iommu_ops *iommu;
1499 u64 dma_addr = 0, size = 0;
1500
David Brazdil0f672f62019-12-10 10:32:29 +00001501 if (attr == DEV_DMA_NOT_SUPPORTED) {
1502 set_dma_ops(dev, &dma_dummy_ops);
1503 return 0;
1504 }
1505
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001506 iort_dma_setup(dev, &dma_addr, &size);
1507
1508 iommu = iort_iommu_configure(dev);
1509 if (IS_ERR(iommu) && PTR_ERR(iommu) == -EPROBE_DEFER)
1510 return -EPROBE_DEFER;
1511
1512 arch_setup_dma_ops(dev, dma_addr, size,
1513 iommu, attr == DEV_DMA_COHERENT);
1514
1515 return 0;
1516}
1517EXPORT_SYMBOL_GPL(acpi_dma_configure);
1518
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001519static void acpi_init_coherency(struct acpi_device *adev)
1520{
1521 unsigned long long cca = 0;
1522 acpi_status status;
1523 struct acpi_device *parent = adev->parent;
1524
1525 if (parent && parent->flags.cca_seen) {
1526 /*
1527 * From ACPI spec, OSPM will ignore _CCA if an ancestor
1528 * already saw one.
1529 */
1530 adev->flags.cca_seen = 1;
1531 cca = parent->flags.coherent_dma;
1532 } else {
1533 status = acpi_evaluate_integer(adev->handle, "_CCA",
1534 NULL, &cca);
1535 if (ACPI_SUCCESS(status))
1536 adev->flags.cca_seen = 1;
1537 else if (!IS_ENABLED(CONFIG_ACPI_CCA_REQUIRED))
1538 /*
1539 * If architecture does not specify that _CCA is
1540 * required for DMA-able devices (e.g. x86),
1541 * we default to _CCA=1.
1542 */
1543 cca = 1;
1544 else
1545 acpi_handle_debug(adev->handle,
1546 "ACPI device is missing _CCA.\n");
1547 }
1548
1549 adev->flags.coherent_dma = cca;
1550}
1551
1552static int acpi_check_serial_bus_slave(struct acpi_resource *ares, void *data)
1553{
1554 bool *is_serial_bus_slave_p = data;
1555
1556 if (ares->type != ACPI_RESOURCE_TYPE_SERIAL_BUS)
1557 return 1;
1558
1559 *is_serial_bus_slave_p = true;
1560
1561 /* no need to do more checking */
1562 return -1;
1563}
1564
1565static bool acpi_is_indirect_io_slave(struct acpi_device *device)
1566{
1567 struct acpi_device *parent = device->parent;
1568 static const struct acpi_device_id indirect_io_hosts[] = {
1569 {"HISI0191", 0},
1570 {}
1571 };
1572
1573 return parent && !acpi_match_device_ids(parent, indirect_io_hosts);
1574}
1575
1576static bool acpi_device_enumeration_by_parent(struct acpi_device *device)
1577{
1578 struct list_head resource_list;
1579 bool is_serial_bus_slave = false;
1580 /*
1581 * These devices have multiple I2cSerialBus resources and an i2c-client
1582 * must be instantiated for each, each with its own i2c_device_id.
1583 * Normally we only instantiate an i2c-client for the first resource,
1584 * using the ACPI HID as id. These special cases are handled by the
1585 * drivers/platform/x86/i2c-multi-instantiate.c driver, which knows
1586 * which i2c_device_id to use for each resource.
1587 */
1588 static const struct acpi_device_id i2c_multi_instantiate_ids[] = {
1589 {"BSG1160", },
David Brazdil0f672f62019-12-10 10:32:29 +00001590 {"BSG2150", },
1591 {"INT33FE", },
1592 {"INT3515", },
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001593 {}
1594 };
1595
1596 if (acpi_is_indirect_io_slave(device))
1597 return true;
1598
1599 /* Macs use device properties in lieu of _CRS resources */
1600 if (x86_apple_machine &&
1601 (fwnode_property_present(&device->fwnode, "spiSclkPeriod") ||
1602 fwnode_property_present(&device->fwnode, "i2cAddress") ||
1603 fwnode_property_present(&device->fwnode, "baud")))
1604 return true;
1605
1606 /* Instantiate a pdev for the i2c-multi-instantiate drv to bind to */
1607 if (!acpi_match_device_ids(device, i2c_multi_instantiate_ids))
1608 return false;
1609
1610 INIT_LIST_HEAD(&resource_list);
1611 acpi_dev_get_resources(device, &resource_list,
1612 acpi_check_serial_bus_slave,
1613 &is_serial_bus_slave);
1614 acpi_dev_free_resource_list(&resource_list);
1615
1616 return is_serial_bus_slave;
1617}
1618
1619void acpi_init_device_object(struct acpi_device *device, acpi_handle handle,
1620 int type, unsigned long long sta)
1621{
1622 INIT_LIST_HEAD(&device->pnp.ids);
1623 device->device_type = type;
1624 device->handle = handle;
1625 device->parent = acpi_bus_get_parent(handle);
1626 device->fwnode.ops = &acpi_device_fwnode_ops;
1627 acpi_set_device_status(device, sta);
1628 acpi_device_get_busid(device);
1629 acpi_set_pnp_ids(handle, &device->pnp, type);
1630 acpi_init_properties(device);
1631 acpi_bus_get_flags(device);
1632 device->flags.match_driver = false;
1633 device->flags.initialized = true;
1634 device->flags.enumeration_by_parent =
1635 acpi_device_enumeration_by_parent(device);
1636 acpi_device_clear_enumerated(device);
1637 device_initialize(&device->dev);
1638 dev_set_uevent_suppress(&device->dev, true);
1639 acpi_init_coherency(device);
1640 /* Assume there are unmet deps until acpi_device_dep_initialize() runs */
1641 device->dep_unmet = 1;
1642}
1643
1644void acpi_device_add_finalize(struct acpi_device *device)
1645{
1646 dev_set_uevent_suppress(&device->dev, false);
1647 kobject_uevent(&device->dev.kobj, KOBJ_ADD);
1648}
1649
1650static int acpi_add_single_object(struct acpi_device **child,
1651 acpi_handle handle, int type,
1652 unsigned long long sta)
1653{
1654 int result;
1655 struct acpi_device *device;
1656 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1657
1658 device = kzalloc(sizeof(struct acpi_device), GFP_KERNEL);
1659 if (!device) {
1660 printk(KERN_ERR PREFIX "Memory allocation error\n");
1661 return -ENOMEM;
1662 }
1663
1664 acpi_init_device_object(device, handle, type, sta);
1665 /*
1666 * For ACPI_BUS_TYPE_DEVICE getting the status is delayed till here so
1667 * that we can call acpi_bus_get_status() and use its quirk handling.
1668 * Note this must be done before the get power-/wakeup_dev-flags calls.
1669 */
1670 if (type == ACPI_BUS_TYPE_DEVICE)
1671 if (acpi_bus_get_status(device) < 0)
1672 acpi_set_device_status(device, 0);
1673
1674 acpi_bus_get_power_flags(device);
1675 acpi_bus_get_wakeup_device_flags(device);
1676
1677 result = acpi_device_add(device, acpi_device_release);
1678 if (result) {
1679 acpi_device_release(&device->dev);
1680 return result;
1681 }
1682
1683 acpi_power_add_remove_device(device, true);
1684 acpi_device_add_finalize(device);
1685 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
1686 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Added %s [%s] parent %s\n",
1687 dev_name(&device->dev), (char *) buffer.pointer,
1688 device->parent ? dev_name(&device->parent->dev) : "(null)"));
1689 kfree(buffer.pointer);
1690 *child = device;
1691 return 0;
1692}
1693
1694static acpi_status acpi_get_resource_memory(struct acpi_resource *ares,
1695 void *context)
1696{
1697 struct resource *res = context;
1698
1699 if (acpi_dev_resource_memory(ares, res))
1700 return AE_CTRL_TERMINATE;
1701
1702 return AE_OK;
1703}
1704
1705static bool acpi_device_should_be_hidden(acpi_handle handle)
1706{
1707 acpi_status status;
1708 struct resource res;
1709
1710 /* Check if it should ignore the UART device */
1711 if (!(spcr_uart_addr && acpi_has_method(handle, METHOD_NAME__CRS)))
1712 return false;
1713
1714 /*
1715 * The UART device described in SPCR table is assumed to have only one
1716 * memory resource present. So we only look for the first one here.
1717 */
1718 status = acpi_walk_resources(handle, METHOD_NAME__CRS,
1719 acpi_get_resource_memory, &res);
1720 if (ACPI_FAILURE(status) || res.start != spcr_uart_addr)
1721 return false;
1722
1723 acpi_handle_info(handle, "The UART device @%pa in SPCR table will be hidden\n",
1724 &res.start);
1725
1726 return true;
1727}
1728
1729static int acpi_bus_type_and_status(acpi_handle handle, int *type,
1730 unsigned long long *sta)
1731{
1732 acpi_status status;
1733 acpi_object_type acpi_type;
1734
1735 status = acpi_get_type(handle, &acpi_type);
1736 if (ACPI_FAILURE(status))
1737 return -ENODEV;
1738
1739 switch (acpi_type) {
1740 case ACPI_TYPE_ANY: /* for ACPI_ROOT_OBJECT */
1741 case ACPI_TYPE_DEVICE:
1742 if (acpi_device_should_be_hidden(handle))
1743 return -ENODEV;
1744
1745 *type = ACPI_BUS_TYPE_DEVICE;
1746 /*
1747 * acpi_add_single_object updates this once we've an acpi_device
1748 * so that acpi_bus_get_status' quirk handling can be used.
1749 */
1750 *sta = ACPI_STA_DEFAULT;
1751 break;
1752 case ACPI_TYPE_PROCESSOR:
1753 *type = ACPI_BUS_TYPE_PROCESSOR;
1754 status = acpi_bus_get_status_handle(handle, sta);
1755 if (ACPI_FAILURE(status))
1756 return -ENODEV;
1757 break;
1758 case ACPI_TYPE_THERMAL:
1759 *type = ACPI_BUS_TYPE_THERMAL;
1760 *sta = ACPI_STA_DEFAULT;
1761 break;
1762 case ACPI_TYPE_POWER:
1763 *type = ACPI_BUS_TYPE_POWER;
1764 *sta = ACPI_STA_DEFAULT;
1765 break;
1766 default:
1767 return -ENODEV;
1768 }
1769
1770 return 0;
1771}
1772
1773bool acpi_device_is_present(const struct acpi_device *adev)
1774{
1775 return adev->status.present || adev->status.functional;
1776}
1777
1778static bool acpi_scan_handler_matching(struct acpi_scan_handler *handler,
1779 const char *idstr,
1780 const struct acpi_device_id **matchid)
1781{
1782 const struct acpi_device_id *devid;
1783
1784 if (handler->match)
1785 return handler->match(idstr, matchid);
1786
1787 for (devid = handler->ids; devid->id[0]; devid++)
1788 if (!strcmp((char *)devid->id, idstr)) {
1789 if (matchid)
1790 *matchid = devid;
1791
1792 return true;
1793 }
1794
1795 return false;
1796}
1797
1798static struct acpi_scan_handler *acpi_scan_match_handler(const char *idstr,
1799 const struct acpi_device_id **matchid)
1800{
1801 struct acpi_scan_handler *handler;
1802
1803 list_for_each_entry(handler, &acpi_scan_handlers_list, list_node)
1804 if (acpi_scan_handler_matching(handler, idstr, matchid))
1805 return handler;
1806
1807 return NULL;
1808}
1809
1810void acpi_scan_hotplug_enabled(struct acpi_hotplug_profile *hotplug, bool val)
1811{
1812 if (!!hotplug->enabled == !!val)
1813 return;
1814
1815 mutex_lock(&acpi_scan_lock);
1816
1817 hotplug->enabled = val;
1818
1819 mutex_unlock(&acpi_scan_lock);
1820}
1821
1822static void acpi_scan_init_hotplug(struct acpi_device *adev)
1823{
1824 struct acpi_hardware_id *hwid;
1825
1826 if (acpi_dock_match(adev->handle) || is_ejectable_bay(adev)) {
1827 acpi_dock_add(adev);
1828 return;
1829 }
1830 list_for_each_entry(hwid, &adev->pnp.ids, list) {
1831 struct acpi_scan_handler *handler;
1832
1833 handler = acpi_scan_match_handler(hwid->id, NULL);
1834 if (handler) {
1835 adev->flags.hotplug_notify = true;
1836 break;
1837 }
1838 }
1839}
1840
1841static void acpi_device_dep_initialize(struct acpi_device *adev)
1842{
1843 struct acpi_dep_data *dep;
1844 struct acpi_handle_list dep_devices;
1845 acpi_status status;
1846 int i;
1847
1848 adev->dep_unmet = 0;
1849
1850 if (!acpi_has_method(adev->handle, "_DEP"))
1851 return;
1852
1853 status = acpi_evaluate_reference(adev->handle, "_DEP", NULL,
1854 &dep_devices);
1855 if (ACPI_FAILURE(status)) {
1856 dev_dbg(&adev->dev, "Failed to evaluate _DEP.\n");
1857 return;
1858 }
1859
1860 for (i = 0; i < dep_devices.count; i++) {
1861 struct acpi_device_info *info;
1862 int skip;
1863
1864 status = acpi_get_object_info(dep_devices.handles[i], &info);
1865 if (ACPI_FAILURE(status)) {
1866 dev_dbg(&adev->dev, "Error reading _DEP device info\n");
1867 continue;
1868 }
1869
1870 /*
1871 * Skip the dependency of Windows System Power
1872 * Management Controller
1873 */
1874 skip = info->valid & ACPI_VALID_HID &&
1875 !strcmp(info->hardware_id.string, "INT3396");
1876
1877 kfree(info);
1878
1879 if (skip)
1880 continue;
1881
1882 dep = kzalloc(sizeof(struct acpi_dep_data), GFP_KERNEL);
1883 if (!dep)
1884 return;
1885
1886 dep->master = dep_devices.handles[i];
1887 dep->slave = adev->handle;
1888 adev->dep_unmet++;
1889
1890 mutex_lock(&acpi_dep_list_lock);
1891 list_add_tail(&dep->node , &acpi_dep_list);
1892 mutex_unlock(&acpi_dep_list_lock);
1893 }
1894}
1895
1896static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl_not_used,
1897 void *not_used, void **return_value)
1898{
1899 struct acpi_device *device = NULL;
1900 int type;
1901 unsigned long long sta;
1902 int result;
1903
1904 acpi_bus_get_device(handle, &device);
1905 if (device)
1906 goto out;
1907
1908 result = acpi_bus_type_and_status(handle, &type, &sta);
1909 if (result)
1910 return AE_OK;
1911
1912 if (type == ACPI_BUS_TYPE_POWER) {
1913 acpi_add_power_resource(handle);
1914 return AE_OK;
1915 }
1916
1917 acpi_add_single_object(&device, handle, type, sta);
1918 if (!device)
1919 return AE_CTRL_DEPTH;
1920
1921 acpi_scan_init_hotplug(device);
1922 acpi_device_dep_initialize(device);
1923
1924 out:
1925 if (!*return_value)
1926 *return_value = device;
1927
1928 return AE_OK;
1929}
1930
1931static void acpi_default_enumeration(struct acpi_device *device)
1932{
1933 /*
1934 * Do not enumerate devices with enumeration_by_parent flag set as
1935 * they will be enumerated by their respective parents.
1936 */
1937 if (!device->flags.enumeration_by_parent) {
1938 acpi_create_platform_device(device, NULL);
1939 acpi_device_set_enumerated(device);
1940 } else {
1941 blocking_notifier_call_chain(&acpi_reconfig_chain,
1942 ACPI_RECONFIG_DEVICE_ADD, device);
1943 }
1944}
1945
1946static const struct acpi_device_id generic_device_ids[] = {
1947 {ACPI_DT_NAMESPACE_HID, },
1948 {"", },
1949};
1950
1951static int acpi_generic_device_attach(struct acpi_device *adev,
1952 const struct acpi_device_id *not_used)
1953{
1954 /*
1955 * Since ACPI_DT_NAMESPACE_HID is the only ID handled here, the test
1956 * below can be unconditional.
1957 */
1958 if (adev->data.of_compatible)
1959 acpi_default_enumeration(adev);
1960
1961 return 1;
1962}
1963
1964static struct acpi_scan_handler generic_device_handler = {
1965 .ids = generic_device_ids,
1966 .attach = acpi_generic_device_attach,
1967};
1968
1969static int acpi_scan_attach_handler(struct acpi_device *device)
1970{
1971 struct acpi_hardware_id *hwid;
1972 int ret = 0;
1973
1974 list_for_each_entry(hwid, &device->pnp.ids, list) {
1975 const struct acpi_device_id *devid;
1976 struct acpi_scan_handler *handler;
1977
1978 handler = acpi_scan_match_handler(hwid->id, &devid);
1979 if (handler) {
1980 if (!handler->attach) {
1981 device->pnp.type.platform_id = 0;
1982 continue;
1983 }
1984 device->handler = handler;
1985 ret = handler->attach(device, devid);
1986 if (ret > 0)
1987 break;
1988
1989 device->handler = NULL;
1990 if (ret < 0)
1991 break;
1992 }
1993 }
1994
1995 return ret;
1996}
1997
1998static void acpi_bus_attach(struct acpi_device *device)
1999{
2000 struct acpi_device *child;
2001 acpi_handle ejd;
2002 int ret;
2003
2004 if (ACPI_SUCCESS(acpi_bus_get_ejd(device->handle, &ejd)))
2005 register_dock_dependent_device(device, ejd);
2006
2007 acpi_bus_get_status(device);
2008 /* Skip devices that are not present. */
2009 if (!acpi_device_is_present(device)) {
2010 device->flags.initialized = false;
2011 acpi_device_clear_enumerated(device);
2012 device->flags.power_manageable = 0;
2013 return;
2014 }
2015 if (device->handler)
2016 goto ok;
2017
2018 if (!device->flags.initialized) {
2019 device->flags.power_manageable =
2020 device->power.states[ACPI_STATE_D0].flags.valid;
2021 if (acpi_bus_init_power(device))
2022 device->flags.power_manageable = 0;
2023
2024 device->flags.initialized = true;
2025 } else if (device->flags.visited) {
2026 goto ok;
2027 }
2028
2029 ret = acpi_scan_attach_handler(device);
2030 if (ret < 0)
2031 return;
2032
2033 device->flags.match_driver = true;
2034 if (ret > 0 && !device->flags.enumeration_by_parent) {
2035 acpi_device_set_enumerated(device);
2036 goto ok;
2037 }
2038
2039 ret = device_attach(&device->dev);
2040 if (ret < 0)
2041 return;
2042
2043 if (device->pnp.type.platform_id || device->flags.enumeration_by_parent)
2044 acpi_default_enumeration(device);
2045 else
2046 acpi_device_set_enumerated(device);
2047
2048 ok:
2049 list_for_each_entry(child, &device->children, node)
2050 acpi_bus_attach(child);
2051
2052 if (device->handler && device->handler->hotplug.notify_online)
2053 device->handler->hotplug.notify_online(device);
2054}
2055
2056void acpi_walk_dep_device_list(acpi_handle handle)
2057{
2058 struct acpi_dep_data *dep, *tmp;
2059 struct acpi_device *adev;
2060
2061 mutex_lock(&acpi_dep_list_lock);
2062 list_for_each_entry_safe(dep, tmp, &acpi_dep_list, node) {
2063 if (dep->master == handle) {
2064 acpi_bus_get_device(dep->slave, &adev);
2065 if (!adev)
2066 continue;
2067
2068 adev->dep_unmet--;
2069 if (!adev->dep_unmet)
2070 acpi_bus_attach(adev);
2071 list_del(&dep->node);
2072 kfree(dep);
2073 }
2074 }
2075 mutex_unlock(&acpi_dep_list_lock);
2076}
2077EXPORT_SYMBOL_GPL(acpi_walk_dep_device_list);
2078
2079/**
2080 * acpi_bus_scan - Add ACPI device node objects in a given namespace scope.
2081 * @handle: Root of the namespace scope to scan.
2082 *
2083 * Scan a given ACPI tree (probably recently hot-plugged) and create and add
2084 * found devices.
2085 *
2086 * If no devices were found, -ENODEV is returned, but it does not mean that
2087 * there has been a real error. There just have been no suitable ACPI objects
2088 * in the table trunk from which the kernel could create a device and add an
2089 * appropriate driver.
2090 *
2091 * Must be called under acpi_scan_lock.
2092 */
2093int acpi_bus_scan(acpi_handle handle)
2094{
2095 void *device = NULL;
2096
2097 if (ACPI_SUCCESS(acpi_bus_check_add(handle, 0, NULL, &device)))
2098 acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
2099 acpi_bus_check_add, NULL, NULL, &device);
2100
2101 if (device) {
2102 acpi_bus_attach(device);
2103 return 0;
2104 }
2105 return -ENODEV;
2106}
2107EXPORT_SYMBOL(acpi_bus_scan);
2108
2109/**
2110 * acpi_bus_trim - Detach scan handlers and drivers from ACPI device objects.
2111 * @adev: Root of the ACPI namespace scope to walk.
2112 *
2113 * Must be called under acpi_scan_lock.
2114 */
2115void acpi_bus_trim(struct acpi_device *adev)
2116{
2117 struct acpi_scan_handler *handler = adev->handler;
2118 struct acpi_device *child;
2119
2120 list_for_each_entry_reverse(child, &adev->children, node)
2121 acpi_bus_trim(child);
2122
2123 adev->flags.match_driver = false;
2124 if (handler) {
2125 if (handler->detach)
2126 handler->detach(adev);
2127
2128 adev->handler = NULL;
2129 } else {
2130 device_release_driver(&adev->dev);
2131 }
2132 /*
2133 * Most likely, the device is going away, so put it into D3cold before
2134 * that.
2135 */
2136 acpi_device_set_power(adev, ACPI_STATE_D3_COLD);
2137 adev->flags.initialized = false;
2138 acpi_device_clear_enumerated(adev);
2139}
2140EXPORT_SYMBOL_GPL(acpi_bus_trim);
2141
2142int acpi_bus_register_early_device(int type)
2143{
2144 struct acpi_device *device = NULL;
2145 int result;
2146
2147 result = acpi_add_single_object(&device, NULL,
2148 type, ACPI_STA_DEFAULT);
2149 if (result)
2150 return result;
2151
2152 device->flags.match_driver = true;
2153 return device_attach(&device->dev);
2154}
2155EXPORT_SYMBOL_GPL(acpi_bus_register_early_device);
2156
2157static int acpi_bus_scan_fixed(void)
2158{
2159 int result = 0;
2160
2161 /*
2162 * Enumerate all fixed-feature devices.
2163 */
2164 if (!(acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON)) {
2165 struct acpi_device *device = NULL;
2166
2167 result = acpi_add_single_object(&device, NULL,
2168 ACPI_BUS_TYPE_POWER_BUTTON,
2169 ACPI_STA_DEFAULT);
2170 if (result)
2171 return result;
2172
2173 device->flags.match_driver = true;
2174 result = device_attach(&device->dev);
2175 if (result < 0)
2176 return result;
2177
2178 device_init_wakeup(&device->dev, true);
2179 }
2180
2181 if (!(acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON)) {
2182 struct acpi_device *device = NULL;
2183
2184 result = acpi_add_single_object(&device, NULL,
2185 ACPI_BUS_TYPE_SLEEP_BUTTON,
2186 ACPI_STA_DEFAULT);
2187 if (result)
2188 return result;
2189
2190 device->flags.match_driver = true;
2191 result = device_attach(&device->dev);
2192 }
2193
2194 return result < 0 ? result : 0;
2195}
2196
2197static void __init acpi_get_spcr_uart_addr(void)
2198{
2199 acpi_status status;
2200 struct acpi_table_spcr *spcr_ptr;
2201
2202 status = acpi_get_table(ACPI_SIG_SPCR, 0,
2203 (struct acpi_table_header **)&spcr_ptr);
2204 if (ACPI_SUCCESS(status))
2205 spcr_uart_addr = spcr_ptr->serial_port.address;
2206 else
2207 printk(KERN_WARNING PREFIX "STAO table present, but SPCR is missing\n");
2208}
2209
2210static bool acpi_scan_initialized;
2211
2212int __init acpi_scan_init(void)
2213{
2214 int result;
2215 acpi_status status;
2216 struct acpi_table_stao *stao_ptr;
2217
2218 acpi_pci_root_init();
2219 acpi_pci_link_init();
2220 acpi_processor_init();
Olivier Deprez0e641232021-09-23 10:07:05 +02002221 acpi_platform_init();
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002222 acpi_lpss_init();
2223 acpi_apd_init();
2224 acpi_cmos_rtc_init();
2225 acpi_container_init();
2226 acpi_memory_hotplug_init();
2227 acpi_watchdog_init();
2228 acpi_pnp_init();
2229 acpi_int340x_thermal_init();
2230 acpi_amba_init();
2231 acpi_init_lpit();
2232
2233 acpi_scan_add_handler(&generic_device_handler);
2234
2235 /*
2236 * If there is STAO table, check whether it needs to ignore the UART
2237 * device in SPCR table.
2238 */
2239 status = acpi_get_table(ACPI_SIG_STAO, 0,
2240 (struct acpi_table_header **)&stao_ptr);
2241 if (ACPI_SUCCESS(status)) {
2242 if (stao_ptr->header.length > sizeof(struct acpi_table_stao))
2243 printk(KERN_INFO PREFIX "STAO Name List not yet supported.");
2244
2245 if (stao_ptr->ignore_uart)
2246 acpi_get_spcr_uart_addr();
2247 }
2248
2249 acpi_gpe_apply_masked_gpes();
2250 acpi_update_all_gpes();
2251
David Brazdil0f672f62019-12-10 10:32:29 +00002252 /*
2253 * Although we call __add_memory() that is documented to require the
2254 * device_hotplug_lock, it is not necessary here because this is an
2255 * early code when userspace or any other code path cannot trigger
2256 * hotplug/hotunplug operations.
2257 */
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002258 mutex_lock(&acpi_scan_lock);
2259 /*
2260 * Enumerate devices in the ACPI namespace.
2261 */
2262 result = acpi_bus_scan(ACPI_ROOT_OBJECT);
2263 if (result)
2264 goto out;
2265
2266 result = acpi_bus_get_device(ACPI_ROOT_OBJECT, &acpi_root);
2267 if (result)
2268 goto out;
2269
2270 /* Fixed feature devices do not exist on HW-reduced platform */
2271 if (!acpi_gbl_reduced_hardware) {
2272 result = acpi_bus_scan_fixed();
2273 if (result) {
2274 acpi_detach_data(acpi_root->handle,
2275 acpi_scan_drop_device);
2276 acpi_device_del(acpi_root);
2277 put_device(&acpi_root->dev);
2278 goto out;
2279 }
2280 }
2281
2282 acpi_scan_initialized = true;
2283
2284 out:
2285 mutex_unlock(&acpi_scan_lock);
2286 return result;
2287}
2288
2289static struct acpi_probe_entry *ape;
2290static int acpi_probe_count;
2291static DEFINE_MUTEX(acpi_probe_mutex);
2292
David Brazdil0f672f62019-12-10 10:32:29 +00002293static int __init acpi_match_madt(union acpi_subtable_headers *header,
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002294 const unsigned long end)
2295{
David Brazdil0f672f62019-12-10 10:32:29 +00002296 if (!ape->subtable_valid || ape->subtable_valid(&header->common, ape))
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002297 if (!ape->probe_subtbl(header, end))
2298 acpi_probe_count++;
2299
2300 return 0;
2301}
2302
2303int __init __acpi_probe_device_table(struct acpi_probe_entry *ap_head, int nr)
2304{
2305 int count = 0;
2306
2307 if (acpi_disabled)
2308 return 0;
2309
2310 mutex_lock(&acpi_probe_mutex);
2311 for (ape = ap_head; nr; ape++, nr--) {
David Brazdil0f672f62019-12-10 10:32:29 +00002312 if (ACPI_COMPARE_NAMESEG(ACPI_SIG_MADT, ape->id)) {
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00002313 acpi_probe_count = 0;
2314 acpi_table_parse_madt(ape->type, acpi_match_madt, 0);
2315 count += acpi_probe_count;
2316 } else {
2317 int res;
2318 res = acpi_table_parse(ape->id, ape->probe_table);
2319 if (!res)
2320 count++;
2321 }
2322 }
2323 mutex_unlock(&acpi_probe_mutex);
2324
2325 return count;
2326}
2327
2328struct acpi_table_events_work {
2329 struct work_struct work;
2330 void *table;
2331 u32 event;
2332};
2333
2334static void acpi_table_events_fn(struct work_struct *work)
2335{
2336 struct acpi_table_events_work *tew;
2337
2338 tew = container_of(work, struct acpi_table_events_work, work);
2339
2340 if (tew->event == ACPI_TABLE_EVENT_LOAD) {
2341 acpi_scan_lock_acquire();
2342 acpi_bus_scan(ACPI_ROOT_OBJECT);
2343 acpi_scan_lock_release();
2344 }
2345
2346 kfree(tew);
2347}
2348
2349void acpi_scan_table_handler(u32 event, void *table, void *context)
2350{
2351 struct acpi_table_events_work *tew;
2352
2353 if (!acpi_scan_initialized)
2354 return;
2355
2356 if (event != ACPI_TABLE_EVENT_LOAD)
2357 return;
2358
2359 tew = kmalloc(sizeof(*tew), GFP_KERNEL);
2360 if (!tew)
2361 return;
2362
2363 INIT_WORK(&tew->work, acpi_table_events_fn);
2364 tew->table = table;
2365 tew->event = event;
2366
2367 schedule_work(&tew->work);
2368}
2369
2370int acpi_reconfig_notifier_register(struct notifier_block *nb)
2371{
2372 return blocking_notifier_chain_register(&acpi_reconfig_chain, nb);
2373}
2374EXPORT_SYMBOL(acpi_reconfig_notifier_register);
2375
2376int acpi_reconfig_notifier_unregister(struct notifier_block *nb)
2377{
2378 return blocking_notifier_chain_unregister(&acpi_reconfig_chain, nb);
2379}
2380EXPORT_SYMBOL(acpi_reconfig_notifier_unregister);