1*4882a593Smuzhiyun================================= 2*4882a593SmuzhiyunRed-black Trees (rbtree) in Linux 3*4882a593Smuzhiyun================================= 4*4882a593Smuzhiyun 5*4882a593Smuzhiyun 6*4882a593Smuzhiyun:Date: January 18, 2007 7*4882a593Smuzhiyun:Author: Rob Landley <rob@landley.net> 8*4882a593Smuzhiyun 9*4882a593SmuzhiyunWhat are red-black trees, and what are they for? 10*4882a593Smuzhiyun------------------------------------------------ 11*4882a593Smuzhiyun 12*4882a593SmuzhiyunRed-black trees are a type of self-balancing binary search tree, used for 13*4882a593Smuzhiyunstoring sortable key/value data pairs. This differs from radix trees (which 14*4882a593Smuzhiyunare used to efficiently store sparse arrays and thus use long integer indexes 15*4882a593Smuzhiyunto insert/access/delete nodes) and hash tables (which are not kept sorted to 16*4882a593Smuzhiyunbe easily traversed in order, and must be tuned for a specific size and 17*4882a593Smuzhiyunhash function where rbtrees scale gracefully storing arbitrary keys). 18*4882a593Smuzhiyun 19*4882a593SmuzhiyunRed-black trees are similar to AVL trees, but provide faster real-time bounded 20*4882a593Smuzhiyunworst case performance for insertion and deletion (at most two rotations and 21*4882a593Smuzhiyunthree rotations, respectively, to balance the tree), with slightly slower 22*4882a593Smuzhiyun(but still O(log n)) lookup time. 23*4882a593Smuzhiyun 24*4882a593SmuzhiyunTo quote Linux Weekly News: 25*4882a593Smuzhiyun 26*4882a593Smuzhiyun There are a number of red-black trees in use in the kernel. 27*4882a593Smuzhiyun The deadline and CFQ I/O schedulers employ rbtrees to 28*4882a593Smuzhiyun track requests; the packet CD/DVD driver does the same. 29*4882a593Smuzhiyun The high-resolution timer code uses an rbtree to organize outstanding 30*4882a593Smuzhiyun timer requests. The ext3 filesystem tracks directory entries in a 31*4882a593Smuzhiyun red-black tree. Virtual memory areas (VMAs) are tracked with red-black 32*4882a593Smuzhiyun trees, as are epoll file descriptors, cryptographic keys, and network 33*4882a593Smuzhiyun packets in the "hierarchical token bucket" scheduler. 34*4882a593Smuzhiyun 35*4882a593SmuzhiyunThis document covers use of the Linux rbtree implementation. For more 36*4882a593Smuzhiyuninformation on the nature and implementation of Red Black Trees, see: 37*4882a593Smuzhiyun 38*4882a593Smuzhiyun Linux Weekly News article on red-black trees 39*4882a593Smuzhiyun https://lwn.net/Articles/184495/ 40*4882a593Smuzhiyun 41*4882a593Smuzhiyun Wikipedia entry on red-black trees 42*4882a593Smuzhiyun https://en.wikipedia.org/wiki/Red-black_tree 43*4882a593Smuzhiyun 44*4882a593SmuzhiyunLinux implementation of red-black trees 45*4882a593Smuzhiyun--------------------------------------- 46*4882a593Smuzhiyun 47*4882a593SmuzhiyunLinux's rbtree implementation lives in the file "lib/rbtree.c". To use it, 48*4882a593Smuzhiyun"#include <linux/rbtree.h>". 49*4882a593Smuzhiyun 50*4882a593SmuzhiyunThe Linux rbtree implementation is optimized for speed, and thus has one 51*4882a593Smuzhiyunless layer of indirection (and better cache locality) than more traditional 52*4882a593Smuzhiyuntree implementations. Instead of using pointers to separate rb_node and data 53*4882a593Smuzhiyunstructures, each instance of struct rb_node is embedded in the data structure 54*4882a593Smuzhiyunit organizes. And instead of using a comparison callback function pointer, 55*4882a593Smuzhiyunusers are expected to write their own tree search and insert functions 56*4882a593Smuzhiyunwhich call the provided rbtree functions. Locking is also left up to the 57*4882a593Smuzhiyunuser of the rbtree code. 58*4882a593Smuzhiyun 59*4882a593SmuzhiyunCreating a new rbtree 60*4882a593Smuzhiyun--------------------- 61*4882a593Smuzhiyun 62*4882a593SmuzhiyunData nodes in an rbtree tree are structures containing a struct rb_node member:: 63*4882a593Smuzhiyun 64*4882a593Smuzhiyun struct mytype { 65*4882a593Smuzhiyun struct rb_node node; 66*4882a593Smuzhiyun char *keystring; 67*4882a593Smuzhiyun }; 68*4882a593Smuzhiyun 69*4882a593SmuzhiyunWhen dealing with a pointer to the embedded struct rb_node, the containing data 70*4882a593Smuzhiyunstructure may be accessed with the standard container_of() macro. In addition, 71*4882a593Smuzhiyunindividual members may be accessed directly via rb_entry(node, type, member). 72*4882a593Smuzhiyun 73*4882a593SmuzhiyunAt the root of each rbtree is an rb_root structure, which is initialized to be 74*4882a593Smuzhiyunempty via: 75*4882a593Smuzhiyun 76*4882a593Smuzhiyun struct rb_root mytree = RB_ROOT; 77*4882a593Smuzhiyun 78*4882a593SmuzhiyunSearching for a value in an rbtree 79*4882a593Smuzhiyun---------------------------------- 80*4882a593Smuzhiyun 81*4882a593SmuzhiyunWriting a search function for your tree is fairly straightforward: start at the 82*4882a593Smuzhiyunroot, compare each value, and follow the left or right branch as necessary. 83*4882a593Smuzhiyun 84*4882a593SmuzhiyunExample:: 85*4882a593Smuzhiyun 86*4882a593Smuzhiyun struct mytype *my_search(struct rb_root *root, char *string) 87*4882a593Smuzhiyun { 88*4882a593Smuzhiyun struct rb_node *node = root->rb_node; 89*4882a593Smuzhiyun 90*4882a593Smuzhiyun while (node) { 91*4882a593Smuzhiyun struct mytype *data = container_of(node, struct mytype, node); 92*4882a593Smuzhiyun int result; 93*4882a593Smuzhiyun 94*4882a593Smuzhiyun result = strcmp(string, data->keystring); 95*4882a593Smuzhiyun 96*4882a593Smuzhiyun if (result < 0) 97*4882a593Smuzhiyun node = node->rb_left; 98*4882a593Smuzhiyun else if (result > 0) 99*4882a593Smuzhiyun node = node->rb_right; 100*4882a593Smuzhiyun else 101*4882a593Smuzhiyun return data; 102*4882a593Smuzhiyun } 103*4882a593Smuzhiyun return NULL; 104*4882a593Smuzhiyun } 105*4882a593Smuzhiyun 106*4882a593SmuzhiyunInserting data into an rbtree 107*4882a593Smuzhiyun----------------------------- 108*4882a593Smuzhiyun 109*4882a593SmuzhiyunInserting data in the tree involves first searching for the place to insert the 110*4882a593Smuzhiyunnew node, then inserting the node and rebalancing ("recoloring") the tree. 111*4882a593Smuzhiyun 112*4882a593SmuzhiyunThe search for insertion differs from the previous search by finding the 113*4882a593Smuzhiyunlocation of the pointer on which to graft the new node. The new node also 114*4882a593Smuzhiyunneeds a link to its parent node for rebalancing purposes. 115*4882a593Smuzhiyun 116*4882a593SmuzhiyunExample:: 117*4882a593Smuzhiyun 118*4882a593Smuzhiyun int my_insert(struct rb_root *root, struct mytype *data) 119*4882a593Smuzhiyun { 120*4882a593Smuzhiyun struct rb_node **new = &(root->rb_node), *parent = NULL; 121*4882a593Smuzhiyun 122*4882a593Smuzhiyun /* Figure out where to put new node */ 123*4882a593Smuzhiyun while (*new) { 124*4882a593Smuzhiyun struct mytype *this = container_of(*new, struct mytype, node); 125*4882a593Smuzhiyun int result = strcmp(data->keystring, this->keystring); 126*4882a593Smuzhiyun 127*4882a593Smuzhiyun parent = *new; 128*4882a593Smuzhiyun if (result < 0) 129*4882a593Smuzhiyun new = &((*new)->rb_left); 130*4882a593Smuzhiyun else if (result > 0) 131*4882a593Smuzhiyun new = &((*new)->rb_right); 132*4882a593Smuzhiyun else 133*4882a593Smuzhiyun return FALSE; 134*4882a593Smuzhiyun } 135*4882a593Smuzhiyun 136*4882a593Smuzhiyun /* Add new node and rebalance tree. */ 137*4882a593Smuzhiyun rb_link_node(&data->node, parent, new); 138*4882a593Smuzhiyun rb_insert_color(&data->node, root); 139*4882a593Smuzhiyun 140*4882a593Smuzhiyun return TRUE; 141*4882a593Smuzhiyun } 142*4882a593Smuzhiyun 143*4882a593SmuzhiyunRemoving or replacing existing data in an rbtree 144*4882a593Smuzhiyun------------------------------------------------ 145*4882a593Smuzhiyun 146*4882a593SmuzhiyunTo remove an existing node from a tree, call:: 147*4882a593Smuzhiyun 148*4882a593Smuzhiyun void rb_erase(struct rb_node *victim, struct rb_root *tree); 149*4882a593Smuzhiyun 150*4882a593SmuzhiyunExample:: 151*4882a593Smuzhiyun 152*4882a593Smuzhiyun struct mytype *data = mysearch(&mytree, "walrus"); 153*4882a593Smuzhiyun 154*4882a593Smuzhiyun if (data) { 155*4882a593Smuzhiyun rb_erase(&data->node, &mytree); 156*4882a593Smuzhiyun myfree(data); 157*4882a593Smuzhiyun } 158*4882a593Smuzhiyun 159*4882a593SmuzhiyunTo replace an existing node in a tree with a new one with the same key, call:: 160*4882a593Smuzhiyun 161*4882a593Smuzhiyun void rb_replace_node(struct rb_node *old, struct rb_node *new, 162*4882a593Smuzhiyun struct rb_root *tree); 163*4882a593Smuzhiyun 164*4882a593SmuzhiyunReplacing a node this way does not re-sort the tree: If the new node doesn't 165*4882a593Smuzhiyunhave the same key as the old node, the rbtree will probably become corrupted. 166*4882a593Smuzhiyun 167*4882a593SmuzhiyunIterating through the elements stored in an rbtree (in sort order) 168*4882a593Smuzhiyun------------------------------------------------------------------ 169*4882a593Smuzhiyun 170*4882a593SmuzhiyunFour functions are provided for iterating through an rbtree's contents in 171*4882a593Smuzhiyunsorted order. These work on arbitrary trees, and should not need to be 172*4882a593Smuzhiyunmodified or wrapped (except for locking purposes):: 173*4882a593Smuzhiyun 174*4882a593Smuzhiyun struct rb_node *rb_first(struct rb_root *tree); 175*4882a593Smuzhiyun struct rb_node *rb_last(struct rb_root *tree); 176*4882a593Smuzhiyun struct rb_node *rb_next(struct rb_node *node); 177*4882a593Smuzhiyun struct rb_node *rb_prev(struct rb_node *node); 178*4882a593Smuzhiyun 179*4882a593SmuzhiyunTo start iterating, call rb_first() or rb_last() with a pointer to the root 180*4882a593Smuzhiyunof the tree, which will return a pointer to the node structure contained in 181*4882a593Smuzhiyunthe first or last element in the tree. To continue, fetch the next or previous 182*4882a593Smuzhiyunnode by calling rb_next() or rb_prev() on the current node. This will return 183*4882a593SmuzhiyunNULL when there are no more nodes left. 184*4882a593Smuzhiyun 185*4882a593SmuzhiyunThe iterator functions return a pointer to the embedded struct rb_node, from 186*4882a593Smuzhiyunwhich the containing data structure may be accessed with the container_of() 187*4882a593Smuzhiyunmacro, and individual members may be accessed directly via 188*4882a593Smuzhiyunrb_entry(node, type, member). 189*4882a593Smuzhiyun 190*4882a593SmuzhiyunExample:: 191*4882a593Smuzhiyun 192*4882a593Smuzhiyun struct rb_node *node; 193*4882a593Smuzhiyun for (node = rb_first(&mytree); node; node = rb_next(node)) 194*4882a593Smuzhiyun printk("key=%s\n", rb_entry(node, struct mytype, node)->keystring); 195*4882a593Smuzhiyun 196*4882a593SmuzhiyunCached rbtrees 197*4882a593Smuzhiyun-------------- 198*4882a593Smuzhiyun 199*4882a593SmuzhiyunComputing the leftmost (smallest) node is quite a common task for binary 200*4882a593Smuzhiyunsearch trees, such as for traversals or users relying on a the particular 201*4882a593Smuzhiyunorder for their own logic. To this end, users can use 'struct rb_root_cached' 202*4882a593Smuzhiyunto optimize O(logN) rb_first() calls to a simple pointer fetch avoiding 203*4882a593Smuzhiyunpotentially expensive tree iterations. This is done at negligible runtime 204*4882a593Smuzhiyunoverhead for maintanence; albeit larger memory footprint. 205*4882a593Smuzhiyun 206*4882a593SmuzhiyunSimilar to the rb_root structure, cached rbtrees are initialized to be 207*4882a593Smuzhiyunempty via:: 208*4882a593Smuzhiyun 209*4882a593Smuzhiyun struct rb_root_cached mytree = RB_ROOT_CACHED; 210*4882a593Smuzhiyun 211*4882a593SmuzhiyunCached rbtree is simply a regular rb_root with an extra pointer to cache the 212*4882a593Smuzhiyunleftmost node. This allows rb_root_cached to exist wherever rb_root does, 213*4882a593Smuzhiyunwhich permits augmented trees to be supported as well as only a few extra 214*4882a593Smuzhiyuninterfaces:: 215*4882a593Smuzhiyun 216*4882a593Smuzhiyun struct rb_node *rb_first_cached(struct rb_root_cached *tree); 217*4882a593Smuzhiyun void rb_insert_color_cached(struct rb_node *, struct rb_root_cached *, bool); 218*4882a593Smuzhiyun void rb_erase_cached(struct rb_node *node, struct rb_root_cached *); 219*4882a593Smuzhiyun 220*4882a593SmuzhiyunBoth insert and erase calls have their respective counterpart of augmented 221*4882a593Smuzhiyuntrees:: 222*4882a593Smuzhiyun 223*4882a593Smuzhiyun void rb_insert_augmented_cached(struct rb_node *node, struct rb_root_cached *, 224*4882a593Smuzhiyun bool, struct rb_augment_callbacks *); 225*4882a593Smuzhiyun void rb_erase_augmented_cached(struct rb_node *, struct rb_root_cached *, 226*4882a593Smuzhiyun struct rb_augment_callbacks *); 227*4882a593Smuzhiyun 228*4882a593Smuzhiyun 229*4882a593SmuzhiyunSupport for Augmented rbtrees 230*4882a593Smuzhiyun----------------------------- 231*4882a593Smuzhiyun 232*4882a593SmuzhiyunAugmented rbtree is an rbtree with "some" additional data stored in 233*4882a593Smuzhiyuneach node, where the additional data for node N must be a function of 234*4882a593Smuzhiyunthe contents of all nodes in the subtree rooted at N. This data can 235*4882a593Smuzhiyunbe used to augment some new functionality to rbtree. Augmented rbtree 236*4882a593Smuzhiyunis an optional feature built on top of basic rbtree infrastructure. 237*4882a593SmuzhiyunAn rbtree user who wants this feature will have to call the augmentation 238*4882a593Smuzhiyunfunctions with the user provided augmentation callback when inserting 239*4882a593Smuzhiyunand erasing nodes. 240*4882a593Smuzhiyun 241*4882a593SmuzhiyunC files implementing augmented rbtree manipulation must include 242*4882a593Smuzhiyun<linux/rbtree_augmented.h> instead of <linux/rbtree.h>. Note that 243*4882a593Smuzhiyunlinux/rbtree_augmented.h exposes some rbtree implementations details 244*4882a593Smuzhiyunyou are not expected to rely on; please stick to the documented APIs 245*4882a593Smuzhiyunthere and do not include <linux/rbtree_augmented.h> from header files 246*4882a593Smuzhiyuneither so as to minimize chances of your users accidentally relying on 247*4882a593Smuzhiyunsuch implementation details. 248*4882a593Smuzhiyun 249*4882a593SmuzhiyunOn insertion, the user must update the augmented information on the path 250*4882a593Smuzhiyunleading to the inserted node, then call rb_link_node() as usual and 251*4882a593Smuzhiyunrb_augment_inserted() instead of the usual rb_insert_color() call. 252*4882a593SmuzhiyunIf rb_augment_inserted() rebalances the rbtree, it will callback into 253*4882a593Smuzhiyuna user provided function to update the augmented information on the 254*4882a593Smuzhiyunaffected subtrees. 255*4882a593Smuzhiyun 256*4882a593SmuzhiyunWhen erasing a node, the user must call rb_erase_augmented() instead of 257*4882a593Smuzhiyunrb_erase(). rb_erase_augmented() calls back into user provided functions 258*4882a593Smuzhiyunto updated the augmented information on affected subtrees. 259*4882a593Smuzhiyun 260*4882a593SmuzhiyunIn both cases, the callbacks are provided through struct rb_augment_callbacks. 261*4882a593Smuzhiyun3 callbacks must be defined: 262*4882a593Smuzhiyun 263*4882a593Smuzhiyun- A propagation callback, which updates the augmented value for a given 264*4882a593Smuzhiyun node and its ancestors, up to a given stop point (or NULL to update 265*4882a593Smuzhiyun all the way to the root). 266*4882a593Smuzhiyun 267*4882a593Smuzhiyun- A copy callback, which copies the augmented value for a given subtree 268*4882a593Smuzhiyun to a newly assigned subtree root. 269*4882a593Smuzhiyun 270*4882a593Smuzhiyun- A tree rotation callback, which copies the augmented value for a given 271*4882a593Smuzhiyun subtree to a newly assigned subtree root AND recomputes the augmented 272*4882a593Smuzhiyun information for the former subtree root. 273*4882a593Smuzhiyun 274*4882a593SmuzhiyunThe compiled code for rb_erase_augmented() may inline the propagation and 275*4882a593Smuzhiyuncopy callbacks, which results in a large function, so each augmented rbtree 276*4882a593Smuzhiyunuser should have a single rb_erase_augmented() call site in order to limit 277*4882a593Smuzhiyuncompiled code size. 278*4882a593Smuzhiyun 279*4882a593Smuzhiyun 280*4882a593SmuzhiyunSample usage 281*4882a593Smuzhiyun^^^^^^^^^^^^ 282*4882a593Smuzhiyun 283*4882a593SmuzhiyunInterval tree is an example of augmented rb tree. Reference - 284*4882a593Smuzhiyun"Introduction to Algorithms" by Cormen, Leiserson, Rivest and Stein. 285*4882a593SmuzhiyunMore details about interval trees: 286*4882a593Smuzhiyun 287*4882a593SmuzhiyunClassical rbtree has a single key and it cannot be directly used to store 288*4882a593Smuzhiyuninterval ranges like [lo:hi] and do a quick lookup for any overlap with a new 289*4882a593Smuzhiyunlo:hi or to find whether there is an exact match for a new lo:hi. 290*4882a593Smuzhiyun 291*4882a593SmuzhiyunHowever, rbtree can be augmented to store such interval ranges in a structured 292*4882a593Smuzhiyunway making it possible to do efficient lookup and exact match. 293*4882a593Smuzhiyun 294*4882a593SmuzhiyunThis "extra information" stored in each node is the maximum hi 295*4882a593Smuzhiyun(max_hi) value among all the nodes that are its descendants. This 296*4882a593Smuzhiyuninformation can be maintained at each node just be looking at the node 297*4882a593Smuzhiyunand its immediate children. And this will be used in O(log n) lookup 298*4882a593Smuzhiyunfor lowest match (lowest start address among all possible matches) 299*4882a593Smuzhiyunwith something like:: 300*4882a593Smuzhiyun 301*4882a593Smuzhiyun struct interval_tree_node * 302*4882a593Smuzhiyun interval_tree_first_match(struct rb_root *root, 303*4882a593Smuzhiyun unsigned long start, unsigned long last) 304*4882a593Smuzhiyun { 305*4882a593Smuzhiyun struct interval_tree_node *node; 306*4882a593Smuzhiyun 307*4882a593Smuzhiyun if (!root->rb_node) 308*4882a593Smuzhiyun return NULL; 309*4882a593Smuzhiyun node = rb_entry(root->rb_node, struct interval_tree_node, rb); 310*4882a593Smuzhiyun 311*4882a593Smuzhiyun while (true) { 312*4882a593Smuzhiyun if (node->rb.rb_left) { 313*4882a593Smuzhiyun struct interval_tree_node *left = 314*4882a593Smuzhiyun rb_entry(node->rb.rb_left, 315*4882a593Smuzhiyun struct interval_tree_node, rb); 316*4882a593Smuzhiyun if (left->__subtree_last >= start) { 317*4882a593Smuzhiyun /* 318*4882a593Smuzhiyun * Some nodes in left subtree satisfy Cond2. 319*4882a593Smuzhiyun * Iterate to find the leftmost such node N. 320*4882a593Smuzhiyun * If it also satisfies Cond1, that's the match 321*4882a593Smuzhiyun * we are looking for. Otherwise, there is no 322*4882a593Smuzhiyun * matching interval as nodes to the right of N 323*4882a593Smuzhiyun * can't satisfy Cond1 either. 324*4882a593Smuzhiyun */ 325*4882a593Smuzhiyun node = left; 326*4882a593Smuzhiyun continue; 327*4882a593Smuzhiyun } 328*4882a593Smuzhiyun } 329*4882a593Smuzhiyun if (node->start <= last) { /* Cond1 */ 330*4882a593Smuzhiyun if (node->last >= start) /* Cond2 */ 331*4882a593Smuzhiyun return node; /* node is leftmost match */ 332*4882a593Smuzhiyun if (node->rb.rb_right) { 333*4882a593Smuzhiyun node = rb_entry(node->rb.rb_right, 334*4882a593Smuzhiyun struct interval_tree_node, rb); 335*4882a593Smuzhiyun if (node->__subtree_last >= start) 336*4882a593Smuzhiyun continue; 337*4882a593Smuzhiyun } 338*4882a593Smuzhiyun } 339*4882a593Smuzhiyun return NULL; /* No match */ 340*4882a593Smuzhiyun } 341*4882a593Smuzhiyun } 342*4882a593Smuzhiyun 343*4882a593SmuzhiyunInsertion/removal are defined using the following augmented callbacks:: 344*4882a593Smuzhiyun 345*4882a593Smuzhiyun static inline unsigned long 346*4882a593Smuzhiyun compute_subtree_last(struct interval_tree_node *node) 347*4882a593Smuzhiyun { 348*4882a593Smuzhiyun unsigned long max = node->last, subtree_last; 349*4882a593Smuzhiyun if (node->rb.rb_left) { 350*4882a593Smuzhiyun subtree_last = rb_entry(node->rb.rb_left, 351*4882a593Smuzhiyun struct interval_tree_node, rb)->__subtree_last; 352*4882a593Smuzhiyun if (max < subtree_last) 353*4882a593Smuzhiyun max = subtree_last; 354*4882a593Smuzhiyun } 355*4882a593Smuzhiyun if (node->rb.rb_right) { 356*4882a593Smuzhiyun subtree_last = rb_entry(node->rb.rb_right, 357*4882a593Smuzhiyun struct interval_tree_node, rb)->__subtree_last; 358*4882a593Smuzhiyun if (max < subtree_last) 359*4882a593Smuzhiyun max = subtree_last; 360*4882a593Smuzhiyun } 361*4882a593Smuzhiyun return max; 362*4882a593Smuzhiyun } 363*4882a593Smuzhiyun 364*4882a593Smuzhiyun static void augment_propagate(struct rb_node *rb, struct rb_node *stop) 365*4882a593Smuzhiyun { 366*4882a593Smuzhiyun while (rb != stop) { 367*4882a593Smuzhiyun struct interval_tree_node *node = 368*4882a593Smuzhiyun rb_entry(rb, struct interval_tree_node, rb); 369*4882a593Smuzhiyun unsigned long subtree_last = compute_subtree_last(node); 370*4882a593Smuzhiyun if (node->__subtree_last == subtree_last) 371*4882a593Smuzhiyun break; 372*4882a593Smuzhiyun node->__subtree_last = subtree_last; 373*4882a593Smuzhiyun rb = rb_parent(&node->rb); 374*4882a593Smuzhiyun } 375*4882a593Smuzhiyun } 376*4882a593Smuzhiyun 377*4882a593Smuzhiyun static void augment_copy(struct rb_node *rb_old, struct rb_node *rb_new) 378*4882a593Smuzhiyun { 379*4882a593Smuzhiyun struct interval_tree_node *old = 380*4882a593Smuzhiyun rb_entry(rb_old, struct interval_tree_node, rb); 381*4882a593Smuzhiyun struct interval_tree_node *new = 382*4882a593Smuzhiyun rb_entry(rb_new, struct interval_tree_node, rb); 383*4882a593Smuzhiyun 384*4882a593Smuzhiyun new->__subtree_last = old->__subtree_last; 385*4882a593Smuzhiyun } 386*4882a593Smuzhiyun 387*4882a593Smuzhiyun static void augment_rotate(struct rb_node *rb_old, struct rb_node *rb_new) 388*4882a593Smuzhiyun { 389*4882a593Smuzhiyun struct interval_tree_node *old = 390*4882a593Smuzhiyun rb_entry(rb_old, struct interval_tree_node, rb); 391*4882a593Smuzhiyun struct interval_tree_node *new = 392*4882a593Smuzhiyun rb_entry(rb_new, struct interval_tree_node, rb); 393*4882a593Smuzhiyun 394*4882a593Smuzhiyun new->__subtree_last = old->__subtree_last; 395*4882a593Smuzhiyun old->__subtree_last = compute_subtree_last(old); 396*4882a593Smuzhiyun } 397*4882a593Smuzhiyun 398*4882a593Smuzhiyun static const struct rb_augment_callbacks augment_callbacks = { 399*4882a593Smuzhiyun augment_propagate, augment_copy, augment_rotate 400*4882a593Smuzhiyun }; 401*4882a593Smuzhiyun 402*4882a593Smuzhiyun void interval_tree_insert(struct interval_tree_node *node, 403*4882a593Smuzhiyun struct rb_root *root) 404*4882a593Smuzhiyun { 405*4882a593Smuzhiyun struct rb_node **link = &root->rb_node, *rb_parent = NULL; 406*4882a593Smuzhiyun unsigned long start = node->start, last = node->last; 407*4882a593Smuzhiyun struct interval_tree_node *parent; 408*4882a593Smuzhiyun 409*4882a593Smuzhiyun while (*link) { 410*4882a593Smuzhiyun rb_parent = *link; 411*4882a593Smuzhiyun parent = rb_entry(rb_parent, struct interval_tree_node, rb); 412*4882a593Smuzhiyun if (parent->__subtree_last < last) 413*4882a593Smuzhiyun parent->__subtree_last = last; 414*4882a593Smuzhiyun if (start < parent->start) 415*4882a593Smuzhiyun link = &parent->rb.rb_left; 416*4882a593Smuzhiyun else 417*4882a593Smuzhiyun link = &parent->rb.rb_right; 418*4882a593Smuzhiyun } 419*4882a593Smuzhiyun 420*4882a593Smuzhiyun node->__subtree_last = last; 421*4882a593Smuzhiyun rb_link_node(&node->rb, rb_parent, link); 422*4882a593Smuzhiyun rb_insert_augmented(&node->rb, root, &augment_callbacks); 423*4882a593Smuzhiyun } 424*4882a593Smuzhiyun 425*4882a593Smuzhiyun void interval_tree_remove(struct interval_tree_node *node, 426*4882a593Smuzhiyun struct rb_root *root) 427*4882a593Smuzhiyun { 428*4882a593Smuzhiyun rb_erase_augmented(&node->rb, root, &augment_callbacks); 429*4882a593Smuzhiyun } 430