Line data Source code
1 0 : /*
2 : * Copyright (c) 2018 Intel Corporation
3 : *
4 : * SPDX-License-Identifier: Apache-2.0
5 : */
6 :
7 : /**
8 : * @file
9 : * @defgroup rbtree_apis Balanced Red/Black Tree
10 : * @ingroup datastructure_apis
11 : *
12 : * @brief Balanced Red/Black Tree implementation
13 : *
14 : * This implements an intrusive balanced tree that guarantees
15 : * O(log2(N)) runtime for all operations and amortized O(1) behavior
16 : * for creation and destruction of whole trees. The algorithms and
17 : * naming are conventional per existing academic and didactic
18 : * implementations, c.f.:
19 : *
20 : * https://en.wikipedia.org/wiki/Red%E2%80%93black_tree
21 : *
22 : * The implementation is size-optimized to prioritize runtime memory
23 : * usage. The data structure is intrusive, which is to say the @ref
24 : * rbnode handle is intended to be placed in a separate struct, in the
25 : * same way as with other such structures (e.g. Zephyr's @ref
26 : * doubly-linked-list_apis), and requires no data pointer to be stored
27 : * in the node. The color bit is unioned with a pointer (fairly common
28 : * for such libraries). Most notably, there is no "parent" pointer
29 : * stored in the node, the upper structure of the tree being generated
30 : * dynamically via a stack as the tree is recursed. So the overall
31 : * memory overhead of a node is just two pointers, identical with a
32 : * doubly-linked list.
33 : *
34 : * @{
35 : */
36 :
37 : #ifndef ZEPHYR_INCLUDE_SYS_RB_H_
38 : #define ZEPHYR_INCLUDE_SYS_RB_H_
39 :
40 : #include <stdbool.h>
41 : #include <stdint.h>
42 :
43 : /* Our SDK/toolchains integration seems to be inconsistent about
44 : * whether they expose alloca.h or not. On gcc it's a moot point as
45 : * it's always builtin.
46 : */
47 : #ifdef __GNUC__
48 : #ifndef alloca
49 : #define alloca __builtin_alloca
50 : #endif
51 : #else
52 : #include <alloca.h>
53 : #endif
54 :
55 : /**
56 : * @brief Balanced red/black tree node structure
57 : */
58 1 : struct rbnode {
59 : /** @cond INTERNAL_HIDDEN */
60 : struct rbnode *children[2];
61 : /** @endcond */
62 : };
63 :
64 : /* Theoretical maximum depth of tree based on pointer size. If memory
65 : * is filled with 2-pointer nodes, and the tree can be twice as a
66 : * packed binary tree, plus root... Works out to 59 entries for 32
67 : * bit pointers and 121 at 64 bits.
68 : */
69 : #define Z_TBITS(t) ((sizeof(t)) < sizeof(uint64_t) ? 2 : 3)
70 : #define Z_PBITS(t) (BITS_PER_BYTE * sizeof(t))
71 : #define Z_MAX_RBTREE_DEPTH (2 * (Z_PBITS(int *) - Z_TBITS(int *) - 1) + 1)
72 :
73 : /**
74 : * @typedef rb_lessthan_t
75 : * @brief Red/black tree comparison predicate
76 : *
77 : * Compares the two nodes and returns true if node A is strictly less
78 : * than B according to the tree's sorting criteria, false otherwise.
79 : *
80 : * Note that during insert, the new node being inserted will always be
81 : * "A", where "B" is the existing node within the tree against which
82 : * it is being compared. This trait can be used (with care!) to
83 : * implement "most/least recently added" semantics between nodes which
84 : * would otherwise compare as equal.
85 : */
86 1 : typedef bool (*rb_lessthan_t)(struct rbnode *a, struct rbnode *b);
87 :
88 : /**
89 : * @brief Balanced red/black tree structure
90 : */
91 1 : struct rbtree {
92 : /** Root node of the tree */
93 1 : struct rbnode *root;
94 : /** Comparison function for nodes in the tree */
95 1 : rb_lessthan_t lessthan_fn;
96 : /** @cond INTERNAL_HIDDEN */
97 : int max_depth;
98 : #ifdef CONFIG_MISRA_SANE
99 : struct rbnode *iter_stack[Z_MAX_RBTREE_DEPTH];
100 : unsigned char iter_left[Z_MAX_RBTREE_DEPTH];
101 : #endif
102 : /** @endcond */
103 : };
104 :
105 : /**
106 : * @brief Prototype for node visitor callback.
107 : * @param node Node being visited
108 : * @param cookie User-specified data
109 : */
110 1 : typedef void (*rb_visit_t)(struct rbnode *node, void *cookie);
111 :
112 : struct rbnode *z_rb_child(struct rbnode *node, uint8_t side);
113 : int z_rb_is_black(struct rbnode *node);
114 : #ifndef CONFIG_MISRA_SANE
115 : void z_rb_walk(struct rbnode *node, rb_visit_t visit_fn, void *cookie);
116 : #endif
117 : struct rbnode *z_rb_get_minmax(struct rbtree *tree, uint8_t side);
118 :
119 : /**
120 : * @brief Insert node into tree
121 : */
122 1 : void rb_insert(struct rbtree *tree, struct rbnode *node);
123 :
124 : /**
125 : * @brief Remove node from tree
126 : */
127 1 : void rb_remove(struct rbtree *tree, struct rbnode *node);
128 :
129 : /**
130 : * @brief Returns the lowest-sorted member of the tree
131 : */
132 1 : static inline struct rbnode *rb_get_min(struct rbtree *tree)
133 : {
134 : return z_rb_get_minmax(tree, 0U);
135 : }
136 :
137 : /**
138 : * @brief Returns the highest-sorted member of the tree
139 : */
140 1 : static inline struct rbnode *rb_get_max(struct rbtree *tree)
141 : {
142 : return z_rb_get_minmax(tree, 1U);
143 : }
144 :
145 : /**
146 : * @brief Returns true if the given node is part of the tree
147 : *
148 : * Note that this does not internally dereference the node pointer
149 : * (though the tree's lessthan callback might!), it just tests it for
150 : * equality with items in the tree. So it's feasible to use this to
151 : * implement a "set" construct by simply testing the pointer value
152 : * itself.
153 : */
154 1 : bool rb_contains(struct rbtree *tree, struct rbnode *node);
155 :
156 : #ifndef CONFIG_MISRA_SANE
157 : /**
158 : * @brief Walk/enumerate a rbtree
159 : *
160 : * Very simple recursive enumeration. Low code size, but requiring a
161 : * separate function can be clumsy for the user and there is no way to
162 : * break out of the loop early. See RB_FOR_EACH for an iterative
163 : * implementation.
164 : */
165 1 : static inline void rb_walk(struct rbtree *tree, rb_visit_t visit_fn,
166 : void *cookie)
167 : {
168 : z_rb_walk(tree->root, visit_fn, cookie);
169 : }
170 : #endif
171 :
172 : struct _rb_foreach {
173 : struct rbnode **stack;
174 : uint8_t *is_left;
175 : int32_t top;
176 : };
177 :
178 : #ifdef CONFIG_MISRA_SANE
179 : #define _RB_FOREACH_INIT(tree, node) { \
180 : .stack = &(tree)->iter_stack[0], \
181 : .is_left = &(tree)->iter_left[0], \
182 : .top = -1 \
183 : }
184 : #else
185 : #define _RB_FOREACH_INIT(tree, node) { \
186 : .stack = (struct rbnode **) \
187 : alloca((tree)->max_depth * sizeof(struct rbnode *)), \
188 : .is_left = (uint8_t *)alloca((tree)->max_depth * sizeof(uint8_t)),\
189 : .top = -1 \
190 : }
191 : #endif
192 :
193 : struct rbnode *z_rb_foreach_next(struct rbtree *tree, struct _rb_foreach *f);
194 :
195 : /**
196 : * @brief Walk a tree in-order without recursing
197 : *
198 : * While @ref rb_walk() is very simple, recursing on the C stack can
199 : * be clumsy for some purposes and on some architectures wastes
200 : * significant memory in stack frames. This macro implements a
201 : * non-recursive "foreach" loop that can iterate directly on the tree,
202 : * at a moderate cost in code size.
203 : *
204 : * Note that the resulting loop is not safe against modifications to
205 : * the tree. Changes to the tree structure during the loop will
206 : * produce incorrect results, as nodes may be skipped or duplicated.
207 : * Unlike linked lists, no _SAFE variant exists.
208 : *
209 : * Note also that the macro expands its arguments multiple times, so
210 : * they should not be expressions with side effects.
211 : *
212 : * @param tree A pointer to a struct rbtree to walk
213 : * @param node The symbol name of a local struct rbnode* variable to
214 : * use as the iterator
215 : */
216 1 : #define RB_FOR_EACH(tree, node) \
217 : for (struct _rb_foreach __f = _RB_FOREACH_INIT(tree, node); \
218 : ((node) = z_rb_foreach_next((tree), &__f)); \
219 : /**/)
220 :
221 : /**
222 : * @brief Loop over rbtree with implicit container field logic
223 : *
224 : * As for RB_FOR_EACH(), but "node" can have an arbitrary type
225 : * containing a struct rbnode.
226 : *
227 : * @param tree A pointer to a struct rbtree to walk
228 : * @param node The symbol name of a local iterator
229 : * @param field The field name of a struct rbnode inside node
230 : */
231 1 : #define RB_FOR_EACH_CONTAINER(tree, node, field) \
232 : for (struct _rb_foreach __f = _RB_FOREACH_INIT(tree, node); \
233 : ({struct rbnode *n = z_rb_foreach_next(tree, &__f); \
234 : (node) = n ? CONTAINER_OF(n, __typeof__(*(node)), \
235 : field) : NULL; (node); }) != NULL; \
236 : /**/)
237 :
238 : /** @} */
239 :
240 : #endif /* ZEPHYR_INCLUDE_SYS_RB_H_ */
|