David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 2 | /* |
| 3 | Red Black Trees |
| 4 | (C) 1999 Andrea Arcangeli <andrea@suse.de> |
| 5 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 6 | |
| 7 | linux/include/linux/rbtree.h |
| 8 | |
| 9 | To use rbtrees you'll have to implement your own insert and search cores. |
| 10 | This will avoid us to use callbacks and to drop drammatically performances. |
| 11 | I know it's not the cleaner way, but in C (not in C++) to get |
| 12 | performances and genericity... |
| 13 | |
| 14 | See Documentation/rbtree.txt for documentation and samples. |
| 15 | */ |
| 16 | |
| 17 | #ifndef _LINUX_RBTREE_H |
| 18 | #define _LINUX_RBTREE_H |
| 19 | |
| 20 | #include <linux/kernel.h> |
| 21 | #include <linux/stddef.h> |
| 22 | #include <linux/rcupdate.h> |
| 23 | |
| 24 | struct rb_node { |
| 25 | unsigned long __rb_parent_color; |
| 26 | struct rb_node *rb_right; |
| 27 | struct rb_node *rb_left; |
| 28 | } __attribute__((aligned(sizeof(long)))); |
| 29 | /* The alignment might seem pointless, but allegedly CRIS needs it */ |
| 30 | |
| 31 | struct rb_root { |
| 32 | struct rb_node *rb_node; |
| 33 | }; |
| 34 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 35 | #define rb_parent(r) ((struct rb_node *)((r)->__rb_parent_color & ~3)) |
| 36 | |
| 37 | #define RB_ROOT (struct rb_root) { NULL, } |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 38 | #define rb_entry(ptr, type, member) container_of(ptr, type, member) |
| 39 | |
| 40 | #define RB_EMPTY_ROOT(root) (READ_ONCE((root)->rb_node) == NULL) |
| 41 | |
| 42 | /* 'empty' nodes are nodes that are known not to be inserted in an rbtree */ |
| 43 | #define RB_EMPTY_NODE(node) \ |
| 44 | ((node)->__rb_parent_color == (unsigned long)(node)) |
| 45 | #define RB_CLEAR_NODE(node) \ |
| 46 | ((node)->__rb_parent_color = (unsigned long)(node)) |
| 47 | |
| 48 | |
| 49 | extern void rb_insert_color(struct rb_node *, struct rb_root *); |
| 50 | extern void rb_erase(struct rb_node *, struct rb_root *); |
| 51 | |
| 52 | |
| 53 | /* Find logical next and previous nodes in a tree */ |
| 54 | extern struct rb_node *rb_next(const struct rb_node *); |
| 55 | extern struct rb_node *rb_prev(const struct rb_node *); |
| 56 | extern struct rb_node *rb_first(const struct rb_root *); |
| 57 | extern struct rb_node *rb_last(const struct rb_root *); |
| 58 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 59 | /* Postorder iteration - always visit the parent after its children */ |
| 60 | extern struct rb_node *rb_first_postorder(const struct rb_root *); |
| 61 | extern struct rb_node *rb_next_postorder(const struct rb_node *); |
| 62 | |
| 63 | /* Fast replacement of a single node without remove/rebalance/add/rebalance */ |
| 64 | extern void rb_replace_node(struct rb_node *victim, struct rb_node *new, |
| 65 | struct rb_root *root); |
| 66 | extern void rb_replace_node_rcu(struct rb_node *victim, struct rb_node *new, |
| 67 | struct rb_root *root); |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 68 | |
| 69 | static inline void rb_link_node(struct rb_node *node, struct rb_node *parent, |
| 70 | struct rb_node **rb_link) |
| 71 | { |
| 72 | node->__rb_parent_color = (unsigned long)parent; |
| 73 | node->rb_left = node->rb_right = NULL; |
| 74 | |
| 75 | *rb_link = node; |
| 76 | } |
| 77 | |
| 78 | static inline void rb_link_node_rcu(struct rb_node *node, struct rb_node *parent, |
| 79 | struct rb_node **rb_link) |
| 80 | { |
| 81 | node->__rb_parent_color = (unsigned long)parent; |
| 82 | node->rb_left = node->rb_right = NULL; |
| 83 | |
| 84 | rcu_assign_pointer(*rb_link, node); |
| 85 | } |
| 86 | |
| 87 | #define rb_entry_safe(ptr, type, member) \ |
| 88 | ({ typeof(ptr) ____ptr = (ptr); \ |
| 89 | ____ptr ? rb_entry(____ptr, type, member) : NULL; \ |
| 90 | }) |
| 91 | |
| 92 | /** |
| 93 | * rbtree_postorder_for_each_entry_safe - iterate in post-order over rb_root of |
| 94 | * given type allowing the backing memory of @pos to be invalidated |
| 95 | * |
| 96 | * @pos: the 'type *' to use as a loop cursor. |
| 97 | * @n: another 'type *' to use as temporary storage |
| 98 | * @root: 'rb_root *' of the rbtree. |
| 99 | * @field: the name of the rb_node field within 'type'. |
| 100 | * |
| 101 | * rbtree_postorder_for_each_entry_safe() provides a similar guarantee as |
| 102 | * list_for_each_entry_safe() and allows the iteration to continue independent |
| 103 | * of changes to @pos by the body of the loop. |
| 104 | * |
| 105 | * Note, however, that it cannot handle other modifications that re-order the |
| 106 | * rbtree it is iterating over. This includes calling rb_erase() on @pos, as |
| 107 | * rb_erase() may rebalance the tree, causing us to miss some nodes. |
| 108 | */ |
| 109 | #define rbtree_postorder_for_each_entry_safe(pos, n, root, field) \ |
| 110 | for (pos = rb_entry_safe(rb_first_postorder(root), typeof(*pos), field); \ |
| 111 | pos && ({ n = rb_entry_safe(rb_next_postorder(&pos->field), \ |
| 112 | typeof(*pos), field); 1; }); \ |
| 113 | pos = n) |
| 114 | |
David Brazdil | 0f672f6 | 2019-12-10 10:32:29 +0000 | [diff] [blame^] | 115 | /* |
| 116 | * Leftmost-cached rbtrees. |
| 117 | * |
| 118 | * We do not cache the rightmost node based on footprint |
| 119 | * size vs number of potential users that could benefit |
| 120 | * from O(1) rb_last(). Just not worth it, users that want |
| 121 | * this feature can always implement the logic explicitly. |
| 122 | * Furthermore, users that want to cache both pointers may |
| 123 | * find it a bit asymmetric, but that's ok. |
| 124 | */ |
| 125 | struct rb_root_cached { |
| 126 | struct rb_root rb_root; |
| 127 | struct rb_node *rb_leftmost; |
| 128 | }; |
| 129 | |
| 130 | #define RB_ROOT_CACHED (struct rb_root_cached) { {NULL, }, NULL } |
| 131 | |
| 132 | /* Same as rb_first(), but O(1) */ |
| 133 | #define rb_first_cached(root) (root)->rb_leftmost |
| 134 | |
| 135 | static inline void rb_insert_color_cached(struct rb_node *node, |
| 136 | struct rb_root_cached *root, |
| 137 | bool leftmost) |
| 138 | { |
| 139 | if (leftmost) |
| 140 | root->rb_leftmost = node; |
| 141 | rb_insert_color(node, &root->rb_root); |
| 142 | } |
| 143 | |
| 144 | static inline void rb_erase_cached(struct rb_node *node, |
| 145 | struct rb_root_cached *root) |
| 146 | { |
| 147 | if (root->rb_leftmost == node) |
| 148 | root->rb_leftmost = rb_next(node); |
| 149 | rb_erase(node, &root->rb_root); |
| 150 | } |
| 151 | |
| 152 | static inline void rb_replace_node_cached(struct rb_node *victim, |
| 153 | struct rb_node *new, |
| 154 | struct rb_root_cached *root) |
| 155 | { |
| 156 | if (root->rb_leftmost == victim) |
| 157 | root->rb_leftmost = new; |
| 158 | rb_replace_node(victim, new, &root->rb_root); |
| 159 | } |
| 160 | |
Andrew Scull | b4b6d4a | 2019-01-02 15:54:55 +0000 | [diff] [blame] | 161 | #endif /* _LINUX_RBTREE_H */ |