blob: 70f5fd08891bb2d64302782b546689857684c82a [file] [log] [blame]
Andrew Scullb4b6d4a2019-01-02 15:54:55 +00001// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Procedures for creating, accessing and interpreting the device tree.
4 *
5 * Paul Mackerras August 1996.
6 * Copyright (C) 1996-2005 Paul Mackerras.
7 *
8 * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
9 * {engebret|bergner}@us.ibm.com
10 *
11 * Adapted for sparc and sparc64 by David S. Miller davem@davemloft.net
12 *
13 * Reconsolidated from arch/x/kernel/prom.c by Stephen Rothwell and
14 * Grant Likely.
15 */
16
17#define pr_fmt(fmt) "OF: " fmt
18
19#include <linux/console.h>
20#include <linux/ctype.h>
21#include <linux/cpu.h>
22#include <linux/module.h>
23#include <linux/of.h>
24#include <linux/of_device.h>
25#include <linux/of_graph.h>
26#include <linux/spinlock.h>
27#include <linux/slab.h>
28#include <linux/string.h>
29#include <linux/proc_fs.h>
30
31#include "of_private.h"
32
33LIST_HEAD(aliases_lookup);
34
35struct device_node *of_root;
36EXPORT_SYMBOL(of_root);
37struct device_node *of_chosen;
38struct device_node *of_aliases;
39struct device_node *of_stdout;
40static const char *of_stdout_options;
41
42struct kset *of_kset;
43
44/*
45 * Used to protect the of_aliases, to hold off addition of nodes to sysfs.
46 * This mutex must be held whenever modifications are being made to the
47 * device tree. The of_{attach,detach}_node() and
48 * of_{add,remove,update}_property() helpers make sure this happens.
49 */
50DEFINE_MUTEX(of_mutex);
51
52/* use when traversing tree through the child, sibling,
53 * or parent members of struct device_node.
54 */
55DEFINE_RAW_SPINLOCK(devtree_lock);
56
57bool of_node_name_eq(const struct device_node *np, const char *name)
58{
59 const char *node_name;
60 size_t len;
61
62 if (!np)
63 return false;
64
65 node_name = kbasename(np->full_name);
66 len = strchrnul(node_name, '@') - node_name;
67
68 return (strlen(name) == len) && (strncmp(node_name, name, len) == 0);
69}
70EXPORT_SYMBOL(of_node_name_eq);
71
72bool of_node_name_prefix(const struct device_node *np, const char *prefix)
73{
74 if (!np)
75 return false;
76
77 return strncmp(kbasename(np->full_name), prefix, strlen(prefix)) == 0;
78}
79EXPORT_SYMBOL(of_node_name_prefix);
80
81int of_n_addr_cells(struct device_node *np)
82{
83 u32 cells;
84
85 do {
86 if (np->parent)
87 np = np->parent;
88 if (!of_property_read_u32(np, "#address-cells", &cells))
89 return cells;
90 } while (np->parent);
91 /* No #address-cells property for the root node */
92 return OF_ROOT_NODE_ADDR_CELLS_DEFAULT;
93}
94EXPORT_SYMBOL(of_n_addr_cells);
95
96int of_n_size_cells(struct device_node *np)
97{
98 u32 cells;
99
100 do {
101 if (np->parent)
102 np = np->parent;
103 if (!of_property_read_u32(np, "#size-cells", &cells))
104 return cells;
105 } while (np->parent);
106 /* No #size-cells property for the root node */
107 return OF_ROOT_NODE_SIZE_CELLS_DEFAULT;
108}
109EXPORT_SYMBOL(of_n_size_cells);
110
111#ifdef CONFIG_NUMA
112int __weak of_node_to_nid(struct device_node *np)
113{
114 return NUMA_NO_NODE;
115}
116#endif
117
118static struct device_node **phandle_cache;
119static u32 phandle_cache_mask;
120
121/*
122 * Assumptions behind phandle_cache implementation:
123 * - phandle property values are in a contiguous range of 1..n
124 *
125 * If the assumptions do not hold, then
126 * - the phandle lookup overhead reduction provided by the cache
127 * will likely be less
128 */
129void of_populate_phandle_cache(void)
130{
131 unsigned long flags;
132 u32 cache_entries;
133 struct device_node *np;
134 u32 phandles = 0;
135
136 raw_spin_lock_irqsave(&devtree_lock, flags);
137
138 kfree(phandle_cache);
139 phandle_cache = NULL;
140
141 for_each_of_allnodes(np)
142 if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
143 phandles++;
144
145 if (!phandles)
146 goto out;
147
148 cache_entries = roundup_pow_of_two(phandles);
149 phandle_cache_mask = cache_entries - 1;
150
151 phandle_cache = kcalloc(cache_entries, sizeof(*phandle_cache),
152 GFP_ATOMIC);
153 if (!phandle_cache)
154 goto out;
155
156 for_each_of_allnodes(np)
157 if (np->phandle && np->phandle != OF_PHANDLE_ILLEGAL)
158 phandle_cache[np->phandle & phandle_cache_mask] = np;
159
160out:
161 raw_spin_unlock_irqrestore(&devtree_lock, flags);
162}
163
164int of_free_phandle_cache(void)
165{
166 unsigned long flags;
167
168 raw_spin_lock_irqsave(&devtree_lock, flags);
169
170 kfree(phandle_cache);
171 phandle_cache = NULL;
172
173 raw_spin_unlock_irqrestore(&devtree_lock, flags);
174
175 return 0;
176}
177#if !defined(CONFIG_MODULES)
178late_initcall_sync(of_free_phandle_cache);
179#endif
180
181void __init of_core_init(void)
182{
183 struct device_node *np;
184
185 of_populate_phandle_cache();
186
187 /* Create the kset, and register existing nodes */
188 mutex_lock(&of_mutex);
189 of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj);
190 if (!of_kset) {
191 mutex_unlock(&of_mutex);
192 pr_err("failed to register existing nodes\n");
193 return;
194 }
195 for_each_of_allnodes(np)
196 __of_attach_node_sysfs(np);
197 mutex_unlock(&of_mutex);
198
199 /* Symlink in /proc as required by userspace ABI */
200 if (of_root)
201 proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base");
202}
203
204static struct property *__of_find_property(const struct device_node *np,
205 const char *name, int *lenp)
206{
207 struct property *pp;
208
209 if (!np)
210 return NULL;
211
212 for (pp = np->properties; pp; pp = pp->next) {
213 if (of_prop_cmp(pp->name, name) == 0) {
214 if (lenp)
215 *lenp = pp->length;
216 break;
217 }
218 }
219
220 return pp;
221}
222
223struct property *of_find_property(const struct device_node *np,
224 const char *name,
225 int *lenp)
226{
227 struct property *pp;
228 unsigned long flags;
229
230 raw_spin_lock_irqsave(&devtree_lock, flags);
231 pp = __of_find_property(np, name, lenp);
232 raw_spin_unlock_irqrestore(&devtree_lock, flags);
233
234 return pp;
235}
236EXPORT_SYMBOL(of_find_property);
237
238struct device_node *__of_find_all_nodes(struct device_node *prev)
239{
240 struct device_node *np;
241 if (!prev) {
242 np = of_root;
243 } else if (prev->child) {
244 np = prev->child;
245 } else {
246 /* Walk back up looking for a sibling, or the end of the structure */
247 np = prev;
248 while (np->parent && !np->sibling)
249 np = np->parent;
250 np = np->sibling; /* Might be null at the end of the tree */
251 }
252 return np;
253}
254
255/**
256 * of_find_all_nodes - Get next node in global list
257 * @prev: Previous node or NULL to start iteration
258 * of_node_put() will be called on it
259 *
260 * Returns a node pointer with refcount incremented, use
261 * of_node_put() on it when done.
262 */
263struct device_node *of_find_all_nodes(struct device_node *prev)
264{
265 struct device_node *np;
266 unsigned long flags;
267
268 raw_spin_lock_irqsave(&devtree_lock, flags);
269 np = __of_find_all_nodes(prev);
270 of_node_get(np);
271 of_node_put(prev);
272 raw_spin_unlock_irqrestore(&devtree_lock, flags);
273 return np;
274}
275EXPORT_SYMBOL(of_find_all_nodes);
276
277/*
278 * Find a property with a given name for a given node
279 * and return the value.
280 */
281const void *__of_get_property(const struct device_node *np,
282 const char *name, int *lenp)
283{
284 struct property *pp = __of_find_property(np, name, lenp);
285
286 return pp ? pp->value : NULL;
287}
288
289/*
290 * Find a property with a given name for a given node
291 * and return the value.
292 */
293const void *of_get_property(const struct device_node *np, const char *name,
294 int *lenp)
295{
296 struct property *pp = of_find_property(np, name, lenp);
297
298 return pp ? pp->value : NULL;
299}
300EXPORT_SYMBOL(of_get_property);
301
302/*
303 * arch_match_cpu_phys_id - Match the given logical CPU and physical id
304 *
305 * @cpu: logical cpu index of a core/thread
306 * @phys_id: physical identifier of a core/thread
307 *
308 * CPU logical to physical index mapping is architecture specific.
309 * However this __weak function provides a default match of physical
310 * id to logical cpu index. phys_id provided here is usually values read
311 * from the device tree which must match the hardware internal registers.
312 *
313 * Returns true if the physical identifier and the logical cpu index
314 * correspond to the same core/thread, false otherwise.
315 */
316bool __weak arch_match_cpu_phys_id(int cpu, u64 phys_id)
317{
318 return (u32)phys_id == cpu;
319}
320
321/**
322 * Checks if the given "prop_name" property holds the physical id of the
323 * core/thread corresponding to the logical cpu 'cpu'. If 'thread' is not
324 * NULL, local thread number within the core is returned in it.
325 */
326static bool __of_find_n_match_cpu_property(struct device_node *cpun,
327 const char *prop_name, int cpu, unsigned int *thread)
328{
329 const __be32 *cell;
330 int ac, prop_len, tid;
331 u64 hwid;
332
333 ac = of_n_addr_cells(cpun);
334 cell = of_get_property(cpun, prop_name, &prop_len);
335 if (!cell || !ac)
336 return false;
337 prop_len /= sizeof(*cell) * ac;
338 for (tid = 0; tid < prop_len; tid++) {
339 hwid = of_read_number(cell, ac);
340 if (arch_match_cpu_phys_id(cpu, hwid)) {
341 if (thread)
342 *thread = tid;
343 return true;
344 }
345 cell += ac;
346 }
347 return false;
348}
349
350/*
351 * arch_find_n_match_cpu_physical_id - See if the given device node is
352 * for the cpu corresponding to logical cpu 'cpu'. Return true if so,
353 * else false. If 'thread' is non-NULL, the local thread number within the
354 * core is returned in it.
355 */
356bool __weak arch_find_n_match_cpu_physical_id(struct device_node *cpun,
357 int cpu, unsigned int *thread)
358{
359 /* Check for non-standard "ibm,ppc-interrupt-server#s" property
360 * for thread ids on PowerPC. If it doesn't exist fallback to
361 * standard "reg" property.
362 */
363 if (IS_ENABLED(CONFIG_PPC) &&
364 __of_find_n_match_cpu_property(cpun,
365 "ibm,ppc-interrupt-server#s",
366 cpu, thread))
367 return true;
368
369 return __of_find_n_match_cpu_property(cpun, "reg", cpu, thread);
370}
371
372/**
373 * of_get_cpu_node - Get device node associated with the given logical CPU
374 *
375 * @cpu: CPU number(logical index) for which device node is required
376 * @thread: if not NULL, local thread number within the physical core is
377 * returned
378 *
379 * The main purpose of this function is to retrieve the device node for the
380 * given logical CPU index. It should be used to initialize the of_node in
381 * cpu device. Once of_node in cpu device is populated, all the further
382 * references can use that instead.
383 *
384 * CPU logical to physical index mapping is architecture specific and is built
385 * before booting secondary cores. This function uses arch_match_cpu_phys_id
386 * which can be overridden by architecture specific implementation.
387 *
388 * Returns a node pointer for the logical cpu with refcount incremented, use
389 * of_node_put() on it when done. Returns NULL if not found.
390 */
391struct device_node *of_get_cpu_node(int cpu, unsigned int *thread)
392{
393 struct device_node *cpun;
394
395 for_each_node_by_type(cpun, "cpu") {
396 if (arch_find_n_match_cpu_physical_id(cpun, cpu, thread))
397 return cpun;
398 }
399 return NULL;
400}
401EXPORT_SYMBOL(of_get_cpu_node);
402
403/**
404 * of_cpu_node_to_id: Get the logical CPU number for a given device_node
405 *
406 * @cpu_node: Pointer to the device_node for CPU.
407 *
408 * Returns the logical CPU number of the given CPU device_node.
409 * Returns -ENODEV if the CPU is not found.
410 */
411int of_cpu_node_to_id(struct device_node *cpu_node)
412{
413 int cpu;
414 bool found = false;
415 struct device_node *np;
416
417 for_each_possible_cpu(cpu) {
418 np = of_cpu_device_node_get(cpu);
419 found = (cpu_node == np);
420 of_node_put(np);
421 if (found)
422 return cpu;
423 }
424
425 return -ENODEV;
426}
427EXPORT_SYMBOL(of_cpu_node_to_id);
428
429/**
430 * __of_device_is_compatible() - Check if the node matches given constraints
431 * @device: pointer to node
432 * @compat: required compatible string, NULL or "" for any match
433 * @type: required device_type value, NULL or "" for any match
434 * @name: required node name, NULL or "" for any match
435 *
436 * Checks if the given @compat, @type and @name strings match the
437 * properties of the given @device. A constraints can be skipped by
438 * passing NULL or an empty string as the constraint.
439 *
440 * Returns 0 for no match, and a positive integer on match. The return
441 * value is a relative score with larger values indicating better
442 * matches. The score is weighted for the most specific compatible value
443 * to get the highest score. Matching type is next, followed by matching
444 * name. Practically speaking, this results in the following priority
445 * order for matches:
446 *
447 * 1. specific compatible && type && name
448 * 2. specific compatible && type
449 * 3. specific compatible && name
450 * 4. specific compatible
451 * 5. general compatible && type && name
452 * 6. general compatible && type
453 * 7. general compatible && name
454 * 8. general compatible
455 * 9. type && name
456 * 10. type
457 * 11. name
458 */
459static int __of_device_is_compatible(const struct device_node *device,
460 const char *compat, const char *type, const char *name)
461{
462 struct property *prop;
463 const char *cp;
464 int index = 0, score = 0;
465
466 /* Compatible match has highest priority */
467 if (compat && compat[0]) {
468 prop = __of_find_property(device, "compatible", NULL);
469 for (cp = of_prop_next_string(prop, NULL); cp;
470 cp = of_prop_next_string(prop, cp), index++) {
471 if (of_compat_cmp(cp, compat, strlen(compat)) == 0) {
472 score = INT_MAX/2 - (index << 2);
473 break;
474 }
475 }
476 if (!score)
477 return 0;
478 }
479
480 /* Matching type is better than matching name */
481 if (type && type[0]) {
482 if (!device->type || of_node_cmp(type, device->type))
483 return 0;
484 score += 2;
485 }
486
487 /* Matching name is a bit better than not */
488 if (name && name[0]) {
489 if (!device->name || of_node_cmp(name, device->name))
490 return 0;
491 score++;
492 }
493
494 return score;
495}
496
497/** Checks if the given "compat" string matches one of the strings in
498 * the device's "compatible" property
499 */
500int of_device_is_compatible(const struct device_node *device,
501 const char *compat)
502{
503 unsigned long flags;
504 int res;
505
506 raw_spin_lock_irqsave(&devtree_lock, flags);
507 res = __of_device_is_compatible(device, compat, NULL, NULL);
508 raw_spin_unlock_irqrestore(&devtree_lock, flags);
509 return res;
510}
511EXPORT_SYMBOL(of_device_is_compatible);
512
513/** Checks if the device is compatible with any of the entries in
514 * a NULL terminated array of strings. Returns the best match
515 * score or 0.
516 */
517int of_device_compatible_match(struct device_node *device,
518 const char *const *compat)
519{
520 unsigned int tmp, score = 0;
521
522 if (!compat)
523 return 0;
524
525 while (*compat) {
526 tmp = of_device_is_compatible(device, *compat);
527 if (tmp > score)
528 score = tmp;
529 compat++;
530 }
531
532 return score;
533}
534
535/**
536 * of_machine_is_compatible - Test root of device tree for a given compatible value
537 * @compat: compatible string to look for in root node's compatible property.
538 *
539 * Returns a positive integer if the root node has the given value in its
540 * compatible property.
541 */
542int of_machine_is_compatible(const char *compat)
543{
544 struct device_node *root;
545 int rc = 0;
546
547 root = of_find_node_by_path("/");
548 if (root) {
549 rc = of_device_is_compatible(root, compat);
550 of_node_put(root);
551 }
552 return rc;
553}
554EXPORT_SYMBOL(of_machine_is_compatible);
555
556/**
557 * __of_device_is_available - check if a device is available for use
558 *
559 * @device: Node to check for availability, with locks already held
560 *
561 * Returns true if the status property is absent or set to "okay" or "ok",
562 * false otherwise
563 */
564static bool __of_device_is_available(const struct device_node *device)
565{
566 const char *status;
567 int statlen;
568
569 if (!device)
570 return false;
571
572 status = __of_get_property(device, "status", &statlen);
573 if (status == NULL)
574 return true;
575
576 if (statlen > 0) {
577 if (!strcmp(status, "okay") || !strcmp(status, "ok"))
578 return true;
579 }
580
581 return false;
582}
583
584/**
585 * of_device_is_available - check if a device is available for use
586 *
587 * @device: Node to check for availability
588 *
589 * Returns true if the status property is absent or set to "okay" or "ok",
590 * false otherwise
591 */
592bool of_device_is_available(const struct device_node *device)
593{
594 unsigned long flags;
595 bool res;
596
597 raw_spin_lock_irqsave(&devtree_lock, flags);
598 res = __of_device_is_available(device);
599 raw_spin_unlock_irqrestore(&devtree_lock, flags);
600 return res;
601
602}
603EXPORT_SYMBOL(of_device_is_available);
604
605/**
606 * of_device_is_big_endian - check if a device has BE registers
607 *
608 * @device: Node to check for endianness
609 *
610 * Returns true if the device has a "big-endian" property, or if the kernel
611 * was compiled for BE *and* the device has a "native-endian" property.
612 * Returns false otherwise.
613 *
614 * Callers would nominally use ioread32be/iowrite32be if
615 * of_device_is_big_endian() == true, or readl/writel otherwise.
616 */
617bool of_device_is_big_endian(const struct device_node *device)
618{
619 if (of_property_read_bool(device, "big-endian"))
620 return true;
621 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN) &&
622 of_property_read_bool(device, "native-endian"))
623 return true;
624 return false;
625}
626EXPORT_SYMBOL(of_device_is_big_endian);
627
628/**
629 * of_get_parent - Get a node's parent if any
630 * @node: Node to get parent
631 *
632 * Returns a node pointer with refcount incremented, use
633 * of_node_put() on it when done.
634 */
635struct device_node *of_get_parent(const struct device_node *node)
636{
637 struct device_node *np;
638 unsigned long flags;
639
640 if (!node)
641 return NULL;
642
643 raw_spin_lock_irqsave(&devtree_lock, flags);
644 np = of_node_get(node->parent);
645 raw_spin_unlock_irqrestore(&devtree_lock, flags);
646 return np;
647}
648EXPORT_SYMBOL(of_get_parent);
649
650/**
651 * of_get_next_parent - Iterate to a node's parent
652 * @node: Node to get parent of
653 *
654 * This is like of_get_parent() except that it drops the
655 * refcount on the passed node, making it suitable for iterating
656 * through a node's parents.
657 *
658 * Returns a node pointer with refcount incremented, use
659 * of_node_put() on it when done.
660 */
661struct device_node *of_get_next_parent(struct device_node *node)
662{
663 struct device_node *parent;
664 unsigned long flags;
665
666 if (!node)
667 return NULL;
668
669 raw_spin_lock_irqsave(&devtree_lock, flags);
670 parent = of_node_get(node->parent);
671 of_node_put(node);
672 raw_spin_unlock_irqrestore(&devtree_lock, flags);
673 return parent;
674}
675EXPORT_SYMBOL(of_get_next_parent);
676
677static struct device_node *__of_get_next_child(const struct device_node *node,
678 struct device_node *prev)
679{
680 struct device_node *next;
681
682 if (!node)
683 return NULL;
684
685 next = prev ? prev->sibling : node->child;
686 for (; next; next = next->sibling)
687 if (of_node_get(next))
688 break;
689 of_node_put(prev);
690 return next;
691}
692#define __for_each_child_of_node(parent, child) \
693 for (child = __of_get_next_child(parent, NULL); child != NULL; \
694 child = __of_get_next_child(parent, child))
695
696/**
697 * of_get_next_child - Iterate a node childs
698 * @node: parent node
699 * @prev: previous child of the parent node, or NULL to get first
700 *
701 * Returns a node pointer with refcount incremented, use of_node_put() on
702 * it when done. Returns NULL when prev is the last child. Decrements the
703 * refcount of prev.
704 */
705struct device_node *of_get_next_child(const struct device_node *node,
706 struct device_node *prev)
707{
708 struct device_node *next;
709 unsigned long flags;
710
711 raw_spin_lock_irqsave(&devtree_lock, flags);
712 next = __of_get_next_child(node, prev);
713 raw_spin_unlock_irqrestore(&devtree_lock, flags);
714 return next;
715}
716EXPORT_SYMBOL(of_get_next_child);
717
718/**
719 * of_get_next_available_child - Find the next available child node
720 * @node: parent node
721 * @prev: previous child of the parent node, or NULL to get first
722 *
723 * This function is like of_get_next_child(), except that it
724 * automatically skips any disabled nodes (i.e. status = "disabled").
725 */
726struct device_node *of_get_next_available_child(const struct device_node *node,
727 struct device_node *prev)
728{
729 struct device_node *next;
730 unsigned long flags;
731
732 if (!node)
733 return NULL;
734
735 raw_spin_lock_irqsave(&devtree_lock, flags);
736 next = prev ? prev->sibling : node->child;
737 for (; next; next = next->sibling) {
738 if (!__of_device_is_available(next))
739 continue;
740 if (of_node_get(next))
741 break;
742 }
743 of_node_put(prev);
744 raw_spin_unlock_irqrestore(&devtree_lock, flags);
745 return next;
746}
747EXPORT_SYMBOL(of_get_next_available_child);
748
749/**
750 * of_get_compatible_child - Find compatible child node
751 * @parent: parent node
752 * @compatible: compatible string
753 *
754 * Lookup child node whose compatible property contains the given compatible
755 * string.
756 *
757 * Returns a node pointer with refcount incremented, use of_node_put() on it
758 * when done; or NULL if not found.
759 */
760struct device_node *of_get_compatible_child(const struct device_node *parent,
761 const char *compatible)
762{
763 struct device_node *child;
764
765 for_each_child_of_node(parent, child) {
766 if (of_device_is_compatible(child, compatible))
767 break;
768 }
769
770 return child;
771}
772EXPORT_SYMBOL(of_get_compatible_child);
773
774/**
775 * of_get_child_by_name - Find the child node by name for a given parent
776 * @node: parent node
777 * @name: child name to look for.
778 *
779 * This function looks for child node for given matching name
780 *
781 * Returns a node pointer if found, with refcount incremented, use
782 * of_node_put() on it when done.
783 * Returns NULL if node is not found.
784 */
785struct device_node *of_get_child_by_name(const struct device_node *node,
786 const char *name)
787{
788 struct device_node *child;
789
790 for_each_child_of_node(node, child)
791 if (child->name && (of_node_cmp(child->name, name) == 0))
792 break;
793 return child;
794}
795EXPORT_SYMBOL(of_get_child_by_name);
796
797struct device_node *__of_find_node_by_path(struct device_node *parent,
798 const char *path)
799{
800 struct device_node *child;
801 int len;
802
803 len = strcspn(path, "/:");
804 if (!len)
805 return NULL;
806
807 __for_each_child_of_node(parent, child) {
808 const char *name = kbasename(child->full_name);
809 if (strncmp(path, name, len) == 0 && (strlen(name) == len))
810 return child;
811 }
812 return NULL;
813}
814
815struct device_node *__of_find_node_by_full_path(struct device_node *node,
816 const char *path)
817{
818 const char *separator = strchr(path, ':');
819
820 while (node && *path == '/') {
821 struct device_node *tmp = node;
822
823 path++; /* Increment past '/' delimiter */
824 node = __of_find_node_by_path(node, path);
825 of_node_put(tmp);
826 path = strchrnul(path, '/');
827 if (separator && separator < path)
828 break;
829 }
830 return node;
831}
832
833/**
834 * of_find_node_opts_by_path - Find a node matching a full OF path
835 * @path: Either the full path to match, or if the path does not
836 * start with '/', the name of a property of the /aliases
837 * node (an alias). In the case of an alias, the node
838 * matching the alias' value will be returned.
839 * @opts: Address of a pointer into which to store the start of
840 * an options string appended to the end of the path with
841 * a ':' separator.
842 *
843 * Valid paths:
844 * /foo/bar Full path
845 * foo Valid alias
846 * foo/bar Valid alias + relative path
847 *
848 * Returns a node pointer with refcount incremented, use
849 * of_node_put() on it when done.
850 */
851struct device_node *of_find_node_opts_by_path(const char *path, const char **opts)
852{
853 struct device_node *np = NULL;
854 struct property *pp;
855 unsigned long flags;
856 const char *separator = strchr(path, ':');
857
858 if (opts)
859 *opts = separator ? separator + 1 : NULL;
860
861 if (strcmp(path, "/") == 0)
862 return of_node_get(of_root);
863
864 /* The path could begin with an alias */
865 if (*path != '/') {
866 int len;
867 const char *p = separator;
868
869 if (!p)
870 p = strchrnul(path, '/');
871 len = p - path;
872
873 /* of_aliases must not be NULL */
874 if (!of_aliases)
875 return NULL;
876
877 for_each_property_of_node(of_aliases, pp) {
878 if (strlen(pp->name) == len && !strncmp(pp->name, path, len)) {
879 np = of_find_node_by_path(pp->value);
880 break;
881 }
882 }
883 if (!np)
884 return NULL;
885 path = p;
886 }
887
888 /* Step down the tree matching path components */
889 raw_spin_lock_irqsave(&devtree_lock, flags);
890 if (!np)
891 np = of_node_get(of_root);
892 np = __of_find_node_by_full_path(np, path);
893 raw_spin_unlock_irqrestore(&devtree_lock, flags);
894 return np;
895}
896EXPORT_SYMBOL(of_find_node_opts_by_path);
897
898/**
899 * of_find_node_by_name - Find a node by its "name" property
900 * @from: The node to start searching from or NULL; the node
901 * you pass will not be searched, only the next one
902 * will. Typically, you pass what the previous call
903 * returned. of_node_put() will be called on @from.
904 * @name: The name string to match against
905 *
906 * Returns a node pointer with refcount incremented, use
907 * of_node_put() on it when done.
908 */
909struct device_node *of_find_node_by_name(struct device_node *from,
910 const char *name)
911{
912 struct device_node *np;
913 unsigned long flags;
914
915 raw_spin_lock_irqsave(&devtree_lock, flags);
916 for_each_of_allnodes_from(from, np)
917 if (np->name && (of_node_cmp(np->name, name) == 0)
918 && of_node_get(np))
919 break;
920 of_node_put(from);
921 raw_spin_unlock_irqrestore(&devtree_lock, flags);
922 return np;
923}
924EXPORT_SYMBOL(of_find_node_by_name);
925
926/**
927 * of_find_node_by_type - Find a node by its "device_type" property
928 * @from: The node to start searching from, or NULL to start searching
929 * the entire device tree. The node you pass will not be
930 * searched, only the next one will; typically, you pass
931 * what the previous call returned. of_node_put() will be
932 * called on from for you.
933 * @type: The type string to match against
934 *
935 * Returns a node pointer with refcount incremented, use
936 * of_node_put() on it when done.
937 */
938struct device_node *of_find_node_by_type(struct device_node *from,
939 const char *type)
940{
941 struct device_node *np;
942 unsigned long flags;
943
944 raw_spin_lock_irqsave(&devtree_lock, flags);
945 for_each_of_allnodes_from(from, np)
946 if (np->type && (of_node_cmp(np->type, type) == 0)
947 && of_node_get(np))
948 break;
949 of_node_put(from);
950 raw_spin_unlock_irqrestore(&devtree_lock, flags);
951 return np;
952}
953EXPORT_SYMBOL(of_find_node_by_type);
954
955/**
956 * of_find_compatible_node - Find a node based on type and one of the
957 * tokens in its "compatible" property
958 * @from: The node to start searching from or NULL, the node
959 * you pass will not be searched, only the next one
960 * will; typically, you pass what the previous call
961 * returned. of_node_put() will be called on it
962 * @type: The type string to match "device_type" or NULL to ignore
963 * @compatible: The string to match to one of the tokens in the device
964 * "compatible" list.
965 *
966 * Returns a node pointer with refcount incremented, use
967 * of_node_put() on it when done.
968 */
969struct device_node *of_find_compatible_node(struct device_node *from,
970 const char *type, const char *compatible)
971{
972 struct device_node *np;
973 unsigned long flags;
974
975 raw_spin_lock_irqsave(&devtree_lock, flags);
976 for_each_of_allnodes_from(from, np)
977 if (__of_device_is_compatible(np, compatible, type, NULL) &&
978 of_node_get(np))
979 break;
980 of_node_put(from);
981 raw_spin_unlock_irqrestore(&devtree_lock, flags);
982 return np;
983}
984EXPORT_SYMBOL(of_find_compatible_node);
985
986/**
987 * of_find_node_with_property - Find a node which has a property with
988 * the given name.
989 * @from: The node to start searching from or NULL, the node
990 * you pass will not be searched, only the next one
991 * will; typically, you pass what the previous call
992 * returned. of_node_put() will be called on it
993 * @prop_name: The name of the property to look for.
994 *
995 * Returns a node pointer with refcount incremented, use
996 * of_node_put() on it when done.
997 */
998struct device_node *of_find_node_with_property(struct device_node *from,
999 const char *prop_name)
1000{
1001 struct device_node *np;
1002 struct property *pp;
1003 unsigned long flags;
1004
1005 raw_spin_lock_irqsave(&devtree_lock, flags);
1006 for_each_of_allnodes_from(from, np) {
1007 for (pp = np->properties; pp; pp = pp->next) {
1008 if (of_prop_cmp(pp->name, prop_name) == 0) {
1009 of_node_get(np);
1010 goto out;
1011 }
1012 }
1013 }
1014out:
1015 of_node_put(from);
1016 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1017 return np;
1018}
1019EXPORT_SYMBOL(of_find_node_with_property);
1020
1021static
1022const struct of_device_id *__of_match_node(const struct of_device_id *matches,
1023 const struct device_node *node)
1024{
1025 const struct of_device_id *best_match = NULL;
1026 int score, best_score = 0;
1027
1028 if (!matches)
1029 return NULL;
1030
1031 for (; matches->name[0] || matches->type[0] || matches->compatible[0]; matches++) {
1032 score = __of_device_is_compatible(node, matches->compatible,
1033 matches->type, matches->name);
1034 if (score > best_score) {
1035 best_match = matches;
1036 best_score = score;
1037 }
1038 }
1039
1040 return best_match;
1041}
1042
1043/**
1044 * of_match_node - Tell if a device_node has a matching of_match structure
1045 * @matches: array of of device match structures to search in
1046 * @node: the of device structure to match against
1047 *
1048 * Low level utility function used by device matching.
1049 */
1050const struct of_device_id *of_match_node(const struct of_device_id *matches,
1051 const struct device_node *node)
1052{
1053 const struct of_device_id *match;
1054 unsigned long flags;
1055
1056 raw_spin_lock_irqsave(&devtree_lock, flags);
1057 match = __of_match_node(matches, node);
1058 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1059 return match;
1060}
1061EXPORT_SYMBOL(of_match_node);
1062
1063/**
1064 * of_find_matching_node_and_match - Find a node based on an of_device_id
1065 * match table.
1066 * @from: The node to start searching from or NULL, the node
1067 * you pass will not be searched, only the next one
1068 * will; typically, you pass what the previous call
1069 * returned. of_node_put() will be called on it
1070 * @matches: array of of device match structures to search in
1071 * @match Updated to point at the matches entry which matched
1072 *
1073 * Returns a node pointer with refcount incremented, use
1074 * of_node_put() on it when done.
1075 */
1076struct device_node *of_find_matching_node_and_match(struct device_node *from,
1077 const struct of_device_id *matches,
1078 const struct of_device_id **match)
1079{
1080 struct device_node *np;
1081 const struct of_device_id *m;
1082 unsigned long flags;
1083
1084 if (match)
1085 *match = NULL;
1086
1087 raw_spin_lock_irqsave(&devtree_lock, flags);
1088 for_each_of_allnodes_from(from, np) {
1089 m = __of_match_node(matches, np);
1090 if (m && of_node_get(np)) {
1091 if (match)
1092 *match = m;
1093 break;
1094 }
1095 }
1096 of_node_put(from);
1097 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1098 return np;
1099}
1100EXPORT_SYMBOL(of_find_matching_node_and_match);
1101
1102/**
1103 * of_modalias_node - Lookup appropriate modalias for a device node
1104 * @node: pointer to a device tree node
1105 * @modalias: Pointer to buffer that modalias value will be copied into
1106 * @len: Length of modalias value
1107 *
1108 * Based on the value of the compatible property, this routine will attempt
1109 * to choose an appropriate modalias value for a particular device tree node.
1110 * It does this by stripping the manufacturer prefix (as delimited by a ',')
1111 * from the first entry in the compatible list property.
1112 *
1113 * This routine returns 0 on success, <0 on failure.
1114 */
1115int of_modalias_node(struct device_node *node, char *modalias, int len)
1116{
1117 const char *compatible, *p;
1118 int cplen;
1119
1120 compatible = of_get_property(node, "compatible", &cplen);
1121 if (!compatible || strlen(compatible) > cplen)
1122 return -ENODEV;
1123 p = strchr(compatible, ',');
1124 strlcpy(modalias, p ? p + 1 : compatible, len);
1125 return 0;
1126}
1127EXPORT_SYMBOL_GPL(of_modalias_node);
1128
1129/**
1130 * of_find_node_by_phandle - Find a node given a phandle
1131 * @handle: phandle of the node to find
1132 *
1133 * Returns a node pointer with refcount incremented, use
1134 * of_node_put() on it when done.
1135 */
1136struct device_node *of_find_node_by_phandle(phandle handle)
1137{
1138 struct device_node *np = NULL;
1139 unsigned long flags;
1140 phandle masked_handle;
1141
1142 if (!handle)
1143 return NULL;
1144
1145 raw_spin_lock_irqsave(&devtree_lock, flags);
1146
1147 masked_handle = handle & phandle_cache_mask;
1148
1149 if (phandle_cache) {
1150 if (phandle_cache[masked_handle] &&
1151 handle == phandle_cache[masked_handle]->phandle)
1152 np = phandle_cache[masked_handle];
1153 }
1154
1155 if (!np) {
1156 for_each_of_allnodes(np)
1157 if (np->phandle == handle) {
1158 if (phandle_cache)
1159 phandle_cache[masked_handle] = np;
1160 break;
1161 }
1162 }
1163
1164 of_node_get(np);
1165 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1166 return np;
1167}
1168EXPORT_SYMBOL(of_find_node_by_phandle);
1169
1170void of_print_phandle_args(const char *msg, const struct of_phandle_args *args)
1171{
1172 int i;
1173 printk("%s %pOF", msg, args->np);
1174 for (i = 0; i < args->args_count; i++) {
1175 const char delim = i ? ',' : ':';
1176
1177 pr_cont("%c%08x", delim, args->args[i]);
1178 }
1179 pr_cont("\n");
1180}
1181
1182int of_phandle_iterator_init(struct of_phandle_iterator *it,
1183 const struct device_node *np,
1184 const char *list_name,
1185 const char *cells_name,
1186 int cell_count)
1187{
1188 const __be32 *list;
1189 int size;
1190
1191 memset(it, 0, sizeof(*it));
1192
1193 list = of_get_property(np, list_name, &size);
1194 if (!list)
1195 return -ENOENT;
1196
1197 it->cells_name = cells_name;
1198 it->cell_count = cell_count;
1199 it->parent = np;
1200 it->list_end = list + size / sizeof(*list);
1201 it->phandle_end = list;
1202 it->cur = list;
1203
1204 return 0;
1205}
1206EXPORT_SYMBOL_GPL(of_phandle_iterator_init);
1207
1208int of_phandle_iterator_next(struct of_phandle_iterator *it)
1209{
1210 uint32_t count = 0;
1211
1212 if (it->node) {
1213 of_node_put(it->node);
1214 it->node = NULL;
1215 }
1216
1217 if (!it->cur || it->phandle_end >= it->list_end)
1218 return -ENOENT;
1219
1220 it->cur = it->phandle_end;
1221
1222 /* If phandle is 0, then it is an empty entry with no arguments. */
1223 it->phandle = be32_to_cpup(it->cur++);
1224
1225 if (it->phandle) {
1226
1227 /*
1228 * Find the provider node and parse the #*-cells property to
1229 * determine the argument length.
1230 */
1231 it->node = of_find_node_by_phandle(it->phandle);
1232
1233 if (it->cells_name) {
1234 if (!it->node) {
1235 pr_err("%pOF: could not find phandle\n",
1236 it->parent);
1237 goto err;
1238 }
1239
1240 if (of_property_read_u32(it->node, it->cells_name,
1241 &count)) {
1242 pr_err("%pOF: could not get %s for %pOF\n",
1243 it->parent,
1244 it->cells_name,
1245 it->node);
1246 goto err;
1247 }
1248 } else {
1249 count = it->cell_count;
1250 }
1251
1252 /*
1253 * Make sure that the arguments actually fit in the remaining
1254 * property data length
1255 */
1256 if (it->cur + count > it->list_end) {
1257 pr_err("%pOF: arguments longer than property\n",
1258 it->parent);
1259 goto err;
1260 }
1261 }
1262
1263 it->phandle_end = it->cur + count;
1264 it->cur_count = count;
1265
1266 return 0;
1267
1268err:
1269 if (it->node) {
1270 of_node_put(it->node);
1271 it->node = NULL;
1272 }
1273
1274 return -EINVAL;
1275}
1276EXPORT_SYMBOL_GPL(of_phandle_iterator_next);
1277
1278int of_phandle_iterator_args(struct of_phandle_iterator *it,
1279 uint32_t *args,
1280 int size)
1281{
1282 int i, count;
1283
1284 count = it->cur_count;
1285
1286 if (WARN_ON(size < count))
1287 count = size;
1288
1289 for (i = 0; i < count; i++)
1290 args[i] = be32_to_cpup(it->cur++);
1291
1292 return count;
1293}
1294
1295static int __of_parse_phandle_with_args(const struct device_node *np,
1296 const char *list_name,
1297 const char *cells_name,
1298 int cell_count, int index,
1299 struct of_phandle_args *out_args)
1300{
1301 struct of_phandle_iterator it;
1302 int rc, cur_index = 0;
1303
1304 /* Loop over the phandles until all the requested entry is found */
1305 of_for_each_phandle(&it, rc, np, list_name, cells_name, cell_count) {
1306 /*
1307 * All of the error cases bail out of the loop, so at
1308 * this point, the parsing is successful. If the requested
1309 * index matches, then fill the out_args structure and return,
1310 * or return -ENOENT for an empty entry.
1311 */
1312 rc = -ENOENT;
1313 if (cur_index == index) {
1314 if (!it.phandle)
1315 goto err;
1316
1317 if (out_args) {
1318 int c;
1319
1320 c = of_phandle_iterator_args(&it,
1321 out_args->args,
1322 MAX_PHANDLE_ARGS);
1323 out_args->np = it.node;
1324 out_args->args_count = c;
1325 } else {
1326 of_node_put(it.node);
1327 }
1328
1329 /* Found it! return success */
1330 return 0;
1331 }
1332
1333 cur_index++;
1334 }
1335
1336 /*
1337 * Unlock node before returning result; will be one of:
1338 * -ENOENT : index is for empty phandle
1339 * -EINVAL : parsing error on data
1340 */
1341
1342 err:
1343 of_node_put(it.node);
1344 return rc;
1345}
1346
1347/**
1348 * of_parse_phandle - Resolve a phandle property to a device_node pointer
1349 * @np: Pointer to device node holding phandle property
1350 * @phandle_name: Name of property holding a phandle value
1351 * @index: For properties holding a table of phandles, this is the index into
1352 * the table
1353 *
1354 * Returns the device_node pointer with refcount incremented. Use
1355 * of_node_put() on it when done.
1356 */
1357struct device_node *of_parse_phandle(const struct device_node *np,
1358 const char *phandle_name, int index)
1359{
1360 struct of_phandle_args args;
1361
1362 if (index < 0)
1363 return NULL;
1364
1365 if (__of_parse_phandle_with_args(np, phandle_name, NULL, 0,
1366 index, &args))
1367 return NULL;
1368
1369 return args.np;
1370}
1371EXPORT_SYMBOL(of_parse_phandle);
1372
1373/**
1374 * of_parse_phandle_with_args() - Find a node pointed by phandle in a list
1375 * @np: pointer to a device tree node containing a list
1376 * @list_name: property name that contains a list
1377 * @cells_name: property name that specifies phandles' arguments count
1378 * @index: index of a phandle to parse out
1379 * @out_args: optional pointer to output arguments structure (will be filled)
1380 *
1381 * This function is useful to parse lists of phandles and their arguments.
1382 * Returns 0 on success and fills out_args, on error returns appropriate
1383 * errno value.
1384 *
1385 * Caller is responsible to call of_node_put() on the returned out_args->np
1386 * pointer.
1387 *
1388 * Example:
1389 *
1390 * phandle1: node1 {
1391 * #list-cells = <2>;
1392 * }
1393 *
1394 * phandle2: node2 {
1395 * #list-cells = <1>;
1396 * }
1397 *
1398 * node3 {
1399 * list = <&phandle1 1 2 &phandle2 3>;
1400 * }
1401 *
1402 * To get a device_node of the `node2' node you may call this:
1403 * of_parse_phandle_with_args(node3, "list", "#list-cells", 1, &args);
1404 */
1405int of_parse_phandle_with_args(const struct device_node *np, const char *list_name,
1406 const char *cells_name, int index,
1407 struct of_phandle_args *out_args)
1408{
1409 if (index < 0)
1410 return -EINVAL;
1411 return __of_parse_phandle_with_args(np, list_name, cells_name, 0,
1412 index, out_args);
1413}
1414EXPORT_SYMBOL(of_parse_phandle_with_args);
1415
1416/**
1417 * of_parse_phandle_with_args_map() - Find a node pointed by phandle in a list and remap it
1418 * @np: pointer to a device tree node containing a list
1419 * @list_name: property name that contains a list
1420 * @stem_name: stem of property names that specify phandles' arguments count
1421 * @index: index of a phandle to parse out
1422 * @out_args: optional pointer to output arguments structure (will be filled)
1423 *
1424 * This function is useful to parse lists of phandles and their arguments.
1425 * Returns 0 on success and fills out_args, on error returns appropriate errno
1426 * value. The difference between this function and of_parse_phandle_with_args()
1427 * is that this API remaps a phandle if the node the phandle points to has
1428 * a <@stem_name>-map property.
1429 *
1430 * Caller is responsible to call of_node_put() on the returned out_args->np
1431 * pointer.
1432 *
1433 * Example:
1434 *
1435 * phandle1: node1 {
1436 * #list-cells = <2>;
1437 * }
1438 *
1439 * phandle2: node2 {
1440 * #list-cells = <1>;
1441 * }
1442 *
1443 * phandle3: node3 {
1444 * #list-cells = <1>;
1445 * list-map = <0 &phandle2 3>,
1446 * <1 &phandle2 2>,
1447 * <2 &phandle1 5 1>;
1448 * list-map-mask = <0x3>;
1449 * };
1450 *
1451 * node4 {
1452 * list = <&phandle1 1 2 &phandle3 0>;
1453 * }
1454 *
1455 * To get a device_node of the `node2' node you may call this:
1456 * of_parse_phandle_with_args(node4, "list", "list", 1, &args);
1457 */
1458int of_parse_phandle_with_args_map(const struct device_node *np,
1459 const char *list_name,
1460 const char *stem_name,
1461 int index, struct of_phandle_args *out_args)
1462{
1463 char *cells_name, *map_name = NULL, *mask_name = NULL;
1464 char *pass_name = NULL;
1465 struct device_node *cur, *new = NULL;
1466 const __be32 *map, *mask, *pass;
1467 static const __be32 dummy_mask[] = { [0 ... MAX_PHANDLE_ARGS] = ~0 };
1468 static const __be32 dummy_pass[] = { [0 ... MAX_PHANDLE_ARGS] = 0 };
1469 __be32 initial_match_array[MAX_PHANDLE_ARGS];
1470 const __be32 *match_array = initial_match_array;
1471 int i, ret, map_len, match;
1472 u32 list_size, new_size;
1473
1474 if (index < 0)
1475 return -EINVAL;
1476
1477 cells_name = kasprintf(GFP_KERNEL, "#%s-cells", stem_name);
1478 if (!cells_name)
1479 return -ENOMEM;
1480
1481 ret = -ENOMEM;
1482 map_name = kasprintf(GFP_KERNEL, "%s-map", stem_name);
1483 if (!map_name)
1484 goto free;
1485
1486 mask_name = kasprintf(GFP_KERNEL, "%s-map-mask", stem_name);
1487 if (!mask_name)
1488 goto free;
1489
1490 pass_name = kasprintf(GFP_KERNEL, "%s-map-pass-thru", stem_name);
1491 if (!pass_name)
1492 goto free;
1493
1494 ret = __of_parse_phandle_with_args(np, list_name, cells_name, 0, index,
1495 out_args);
1496 if (ret)
1497 goto free;
1498
1499 /* Get the #<list>-cells property */
1500 cur = out_args->np;
1501 ret = of_property_read_u32(cur, cells_name, &list_size);
1502 if (ret < 0)
1503 goto put;
1504
1505 /* Precalculate the match array - this simplifies match loop */
1506 for (i = 0; i < list_size; i++)
1507 initial_match_array[i] = cpu_to_be32(out_args->args[i]);
1508
1509 ret = -EINVAL;
1510 while (cur) {
1511 /* Get the <list>-map property */
1512 map = of_get_property(cur, map_name, &map_len);
1513 if (!map) {
1514 ret = 0;
1515 goto free;
1516 }
1517 map_len /= sizeof(u32);
1518
1519 /* Get the <list>-map-mask property (optional) */
1520 mask = of_get_property(cur, mask_name, NULL);
1521 if (!mask)
1522 mask = dummy_mask;
1523 /* Iterate through <list>-map property */
1524 match = 0;
1525 while (map_len > (list_size + 1) && !match) {
1526 /* Compare specifiers */
1527 match = 1;
1528 for (i = 0; i < list_size; i++, map_len--)
1529 match &= !((match_array[i] ^ *map++) & mask[i]);
1530
1531 of_node_put(new);
1532 new = of_find_node_by_phandle(be32_to_cpup(map));
1533 map++;
1534 map_len--;
1535
1536 /* Check if not found */
1537 if (!new)
1538 goto put;
1539
1540 if (!of_device_is_available(new))
1541 match = 0;
1542
1543 ret = of_property_read_u32(new, cells_name, &new_size);
1544 if (ret)
1545 goto put;
1546
1547 /* Check for malformed properties */
1548 if (WARN_ON(new_size > MAX_PHANDLE_ARGS))
1549 goto put;
1550 if (map_len < new_size)
1551 goto put;
1552
1553 /* Move forward by new node's #<list>-cells amount */
1554 map += new_size;
1555 map_len -= new_size;
1556 }
1557 if (!match)
1558 goto put;
1559
1560 /* Get the <list>-map-pass-thru property (optional) */
1561 pass = of_get_property(cur, pass_name, NULL);
1562 if (!pass)
1563 pass = dummy_pass;
1564
1565 /*
1566 * Successfully parsed a <list>-map translation; copy new
1567 * specifier into the out_args structure, keeping the
1568 * bits specified in <list>-map-pass-thru.
1569 */
1570 match_array = map - new_size;
1571 for (i = 0; i < new_size; i++) {
1572 __be32 val = *(map - new_size + i);
1573
1574 if (i < list_size) {
1575 val &= ~pass[i];
1576 val |= cpu_to_be32(out_args->args[i]) & pass[i];
1577 }
1578
1579 out_args->args[i] = be32_to_cpu(val);
1580 }
1581 out_args->args_count = list_size = new_size;
1582 /* Iterate again with new provider */
1583 out_args->np = new;
1584 of_node_put(cur);
1585 cur = new;
1586 }
1587put:
1588 of_node_put(cur);
1589 of_node_put(new);
1590free:
1591 kfree(mask_name);
1592 kfree(map_name);
1593 kfree(cells_name);
1594 kfree(pass_name);
1595
1596 return ret;
1597}
1598EXPORT_SYMBOL(of_parse_phandle_with_args_map);
1599
1600/**
1601 * of_parse_phandle_with_fixed_args() - Find a node pointed by phandle in a list
1602 * @np: pointer to a device tree node containing a list
1603 * @list_name: property name that contains a list
1604 * @cell_count: number of argument cells following the phandle
1605 * @index: index of a phandle to parse out
1606 * @out_args: optional pointer to output arguments structure (will be filled)
1607 *
1608 * This function is useful to parse lists of phandles and their arguments.
1609 * Returns 0 on success and fills out_args, on error returns appropriate
1610 * errno value.
1611 *
1612 * Caller is responsible to call of_node_put() on the returned out_args->np
1613 * pointer.
1614 *
1615 * Example:
1616 *
1617 * phandle1: node1 {
1618 * }
1619 *
1620 * phandle2: node2 {
1621 * }
1622 *
1623 * node3 {
1624 * list = <&phandle1 0 2 &phandle2 2 3>;
1625 * }
1626 *
1627 * To get a device_node of the `node2' node you may call this:
1628 * of_parse_phandle_with_fixed_args(node3, "list", 2, 1, &args);
1629 */
1630int of_parse_phandle_with_fixed_args(const struct device_node *np,
1631 const char *list_name, int cell_count,
1632 int index, struct of_phandle_args *out_args)
1633{
1634 if (index < 0)
1635 return -EINVAL;
1636 return __of_parse_phandle_with_args(np, list_name, NULL, cell_count,
1637 index, out_args);
1638}
1639EXPORT_SYMBOL(of_parse_phandle_with_fixed_args);
1640
1641/**
1642 * of_count_phandle_with_args() - Find the number of phandles references in a property
1643 * @np: pointer to a device tree node containing a list
1644 * @list_name: property name that contains a list
1645 * @cells_name: property name that specifies phandles' arguments count
1646 *
1647 * Returns the number of phandle + argument tuples within a property. It
1648 * is a typical pattern to encode a list of phandle and variable
1649 * arguments into a single property. The number of arguments is encoded
1650 * by a property in the phandle-target node. For example, a gpios
1651 * property would contain a list of GPIO specifies consisting of a
1652 * phandle and 1 or more arguments. The number of arguments are
1653 * determined by the #gpio-cells property in the node pointed to by the
1654 * phandle.
1655 */
1656int of_count_phandle_with_args(const struct device_node *np, const char *list_name,
1657 const char *cells_name)
1658{
1659 struct of_phandle_iterator it;
1660 int rc, cur_index = 0;
1661
1662 rc = of_phandle_iterator_init(&it, np, list_name, cells_name, 0);
1663 if (rc)
1664 return rc;
1665
1666 while ((rc = of_phandle_iterator_next(&it)) == 0)
1667 cur_index += 1;
1668
1669 if (rc != -ENOENT)
1670 return rc;
1671
1672 return cur_index;
1673}
1674EXPORT_SYMBOL(of_count_phandle_with_args);
1675
1676/**
1677 * __of_add_property - Add a property to a node without lock operations
1678 */
1679int __of_add_property(struct device_node *np, struct property *prop)
1680{
1681 struct property **next;
1682
1683 prop->next = NULL;
1684 next = &np->properties;
1685 while (*next) {
1686 if (strcmp(prop->name, (*next)->name) == 0)
1687 /* duplicate ! don't insert it */
1688 return -EEXIST;
1689
1690 next = &(*next)->next;
1691 }
1692 *next = prop;
1693
1694 return 0;
1695}
1696
1697/**
1698 * of_add_property - Add a property to a node
1699 */
1700int of_add_property(struct device_node *np, struct property *prop)
1701{
1702 unsigned long flags;
1703 int rc;
1704
1705 mutex_lock(&of_mutex);
1706
1707 raw_spin_lock_irqsave(&devtree_lock, flags);
1708 rc = __of_add_property(np, prop);
1709 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1710
1711 if (!rc)
1712 __of_add_property_sysfs(np, prop);
1713
1714 mutex_unlock(&of_mutex);
1715
1716 if (!rc)
1717 of_property_notify(OF_RECONFIG_ADD_PROPERTY, np, prop, NULL);
1718
1719 return rc;
1720}
1721
1722int __of_remove_property(struct device_node *np, struct property *prop)
1723{
1724 struct property **next;
1725
1726 for (next = &np->properties; *next; next = &(*next)->next) {
1727 if (*next == prop)
1728 break;
1729 }
1730 if (*next == NULL)
1731 return -ENODEV;
1732
1733 /* found the node */
1734 *next = prop->next;
1735 prop->next = np->deadprops;
1736 np->deadprops = prop;
1737
1738 return 0;
1739}
1740
1741/**
1742 * of_remove_property - Remove a property from a node.
1743 *
1744 * Note that we don't actually remove it, since we have given out
1745 * who-knows-how-many pointers to the data using get-property.
1746 * Instead we just move the property to the "dead properties"
1747 * list, so it won't be found any more.
1748 */
1749int of_remove_property(struct device_node *np, struct property *prop)
1750{
1751 unsigned long flags;
1752 int rc;
1753
1754 if (!prop)
1755 return -ENODEV;
1756
1757 mutex_lock(&of_mutex);
1758
1759 raw_spin_lock_irqsave(&devtree_lock, flags);
1760 rc = __of_remove_property(np, prop);
1761 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1762
1763 if (!rc)
1764 __of_remove_property_sysfs(np, prop);
1765
1766 mutex_unlock(&of_mutex);
1767
1768 if (!rc)
1769 of_property_notify(OF_RECONFIG_REMOVE_PROPERTY, np, prop, NULL);
1770
1771 return rc;
1772}
1773
1774int __of_update_property(struct device_node *np, struct property *newprop,
1775 struct property **oldpropp)
1776{
1777 struct property **next, *oldprop;
1778
1779 for (next = &np->properties; *next; next = &(*next)->next) {
1780 if (of_prop_cmp((*next)->name, newprop->name) == 0)
1781 break;
1782 }
1783 *oldpropp = oldprop = *next;
1784
1785 if (oldprop) {
1786 /* replace the node */
1787 newprop->next = oldprop->next;
1788 *next = newprop;
1789 oldprop->next = np->deadprops;
1790 np->deadprops = oldprop;
1791 } else {
1792 /* new node */
1793 newprop->next = NULL;
1794 *next = newprop;
1795 }
1796
1797 return 0;
1798}
1799
1800/*
1801 * of_update_property - Update a property in a node, if the property does
1802 * not exist, add it.
1803 *
1804 * Note that we don't actually remove it, since we have given out
1805 * who-knows-how-many pointers to the data using get-property.
1806 * Instead we just move the property to the "dead properties" list,
1807 * and add the new property to the property list
1808 */
1809int of_update_property(struct device_node *np, struct property *newprop)
1810{
1811 struct property *oldprop;
1812 unsigned long flags;
1813 int rc;
1814
1815 if (!newprop->name)
1816 return -EINVAL;
1817
1818 mutex_lock(&of_mutex);
1819
1820 raw_spin_lock_irqsave(&devtree_lock, flags);
1821 rc = __of_update_property(np, newprop, &oldprop);
1822 raw_spin_unlock_irqrestore(&devtree_lock, flags);
1823
1824 if (!rc)
1825 __of_update_property_sysfs(np, newprop, oldprop);
1826
1827 mutex_unlock(&of_mutex);
1828
1829 if (!rc)
1830 of_property_notify(OF_RECONFIG_UPDATE_PROPERTY, np, newprop, oldprop);
1831
1832 return rc;
1833}
1834
1835static void of_alias_add(struct alias_prop *ap, struct device_node *np,
1836 int id, const char *stem, int stem_len)
1837{
1838 ap->np = np;
1839 ap->id = id;
1840 strncpy(ap->stem, stem, stem_len);
1841 ap->stem[stem_len] = 0;
1842 list_add_tail(&ap->link, &aliases_lookup);
1843 pr_debug("adding DT alias:%s: stem=%s id=%i node=%pOF\n",
1844 ap->alias, ap->stem, ap->id, np);
1845}
1846
1847/**
1848 * of_alias_scan - Scan all properties of the 'aliases' node
1849 *
1850 * The function scans all the properties of the 'aliases' node and populates
1851 * the global lookup table with the properties. It returns the
1852 * number of alias properties found, or an error code in case of failure.
1853 *
1854 * @dt_alloc: An allocator that provides a virtual address to memory
1855 * for storing the resulting tree
1856 */
1857void of_alias_scan(void * (*dt_alloc)(u64 size, u64 align))
1858{
1859 struct property *pp;
1860
1861 of_aliases = of_find_node_by_path("/aliases");
1862 of_chosen = of_find_node_by_path("/chosen");
1863 if (of_chosen == NULL)
1864 of_chosen = of_find_node_by_path("/chosen@0");
1865
1866 if (of_chosen) {
1867 /* linux,stdout-path and /aliases/stdout are for legacy compatibility */
1868 const char *name = NULL;
1869
1870 if (of_property_read_string(of_chosen, "stdout-path", &name))
1871 of_property_read_string(of_chosen, "linux,stdout-path",
1872 &name);
1873 if (IS_ENABLED(CONFIG_PPC) && !name)
1874 of_property_read_string(of_aliases, "stdout", &name);
1875 if (name)
1876 of_stdout = of_find_node_opts_by_path(name, &of_stdout_options);
1877 }
1878
1879 if (!of_aliases)
1880 return;
1881
1882 for_each_property_of_node(of_aliases, pp) {
1883 const char *start = pp->name;
1884 const char *end = start + strlen(start);
1885 struct device_node *np;
1886 struct alias_prop *ap;
1887 int id, len;
1888
1889 /* Skip those we do not want to proceed */
1890 if (!strcmp(pp->name, "name") ||
1891 !strcmp(pp->name, "phandle") ||
1892 !strcmp(pp->name, "linux,phandle"))
1893 continue;
1894
1895 np = of_find_node_by_path(pp->value);
1896 if (!np)
1897 continue;
1898
1899 /* walk the alias backwards to extract the id and work out
1900 * the 'stem' string */
1901 while (isdigit(*(end-1)) && end > start)
1902 end--;
1903 len = end - start;
1904
1905 if (kstrtoint(end, 10, &id) < 0)
1906 continue;
1907
1908 /* Allocate an alias_prop with enough space for the stem */
1909 ap = dt_alloc(sizeof(*ap) + len + 1, __alignof__(*ap));
1910 if (!ap)
1911 continue;
1912 memset(ap, 0, sizeof(*ap) + len + 1);
1913 ap->alias = start;
1914 of_alias_add(ap, np, id, start, len);
1915 }
1916}
1917
1918/**
1919 * of_alias_get_id - Get alias id for the given device_node
1920 * @np: Pointer to the given device_node
1921 * @stem: Alias stem of the given device_node
1922 *
1923 * The function travels the lookup table to get the alias id for the given
1924 * device_node and alias stem. It returns the alias id if found.
1925 */
1926int of_alias_get_id(struct device_node *np, const char *stem)
1927{
1928 struct alias_prop *app;
1929 int id = -ENODEV;
1930
1931 mutex_lock(&of_mutex);
1932 list_for_each_entry(app, &aliases_lookup, link) {
1933 if (strcmp(app->stem, stem) != 0)
1934 continue;
1935
1936 if (np == app->np) {
1937 id = app->id;
1938 break;
1939 }
1940 }
1941 mutex_unlock(&of_mutex);
1942
1943 return id;
1944}
1945EXPORT_SYMBOL_GPL(of_alias_get_id);
1946
1947/**
1948 * of_alias_get_highest_id - Get highest alias id for the given stem
1949 * @stem: Alias stem to be examined
1950 *
1951 * The function travels the lookup table to get the highest alias id for the
1952 * given alias stem. It returns the alias id if found.
1953 */
1954int of_alias_get_highest_id(const char *stem)
1955{
1956 struct alias_prop *app;
1957 int id = -ENODEV;
1958
1959 mutex_lock(&of_mutex);
1960 list_for_each_entry(app, &aliases_lookup, link) {
1961 if (strcmp(app->stem, stem) != 0)
1962 continue;
1963
1964 if (app->id > id)
1965 id = app->id;
1966 }
1967 mutex_unlock(&of_mutex);
1968
1969 return id;
1970}
1971EXPORT_SYMBOL_GPL(of_alias_get_highest_id);
1972
1973/**
1974 * of_console_check() - Test and setup console for DT setup
1975 * @dn - Pointer to device node
1976 * @name - Name to use for preferred console without index. ex. "ttyS"
1977 * @index - Index to use for preferred console.
1978 *
1979 * Check if the given device node matches the stdout-path property in the
1980 * /chosen node. If it does then register it as the preferred console and return
1981 * TRUE. Otherwise return FALSE.
1982 */
1983bool of_console_check(struct device_node *dn, char *name, int index)
1984{
1985 if (!dn || dn != of_stdout || console_set_on_cmdline)
1986 return false;
1987
1988 /*
1989 * XXX: cast `options' to char pointer to suppress complication
1990 * warnings: printk, UART and console drivers expect char pointer.
1991 */
1992 return !add_preferred_console(name, index, (char *)of_stdout_options);
1993}
1994EXPORT_SYMBOL_GPL(of_console_check);
1995
1996/**
1997 * of_find_next_cache_node - Find a node's subsidiary cache
1998 * @np: node of type "cpu" or "cache"
1999 *
2000 * Returns a node pointer with refcount incremented, use
2001 * of_node_put() on it when done. Caller should hold a reference
2002 * to np.
2003 */
2004struct device_node *of_find_next_cache_node(const struct device_node *np)
2005{
2006 struct device_node *child, *cache_node;
2007
2008 cache_node = of_parse_phandle(np, "l2-cache", 0);
2009 if (!cache_node)
2010 cache_node = of_parse_phandle(np, "next-level-cache", 0);
2011
2012 if (cache_node)
2013 return cache_node;
2014
2015 /* OF on pmac has nodes instead of properties named "l2-cache"
2016 * beneath CPU nodes.
2017 */
2018 if (!strcmp(np->type, "cpu"))
2019 for_each_child_of_node(np, child)
2020 if (!strcmp(child->type, "cache"))
2021 return child;
2022
2023 return NULL;
2024}
2025
2026/**
2027 * of_find_last_cache_level - Find the level at which the last cache is
2028 * present for the given logical cpu
2029 *
2030 * @cpu: cpu number(logical index) for which the last cache level is needed
2031 *
2032 * Returns the the level at which the last cache is present. It is exactly
2033 * same as the total number of cache levels for the given logical cpu.
2034 */
2035int of_find_last_cache_level(unsigned int cpu)
2036{
2037 u32 cache_level = 0;
2038 struct device_node *prev = NULL, *np = of_cpu_device_node_get(cpu);
2039
2040 while (np) {
2041 prev = np;
2042 of_node_put(np);
2043 np = of_find_next_cache_node(np);
2044 }
2045
2046 of_property_read_u32(prev, "cache-level", &cache_level);
2047
2048 return cache_level;
2049}