Lines Matching full:upper

2567 	INIT_LIST_HEAD(&node->upper);
2590 * upper edges and any uncached nodes in the path.
2598 struct btrfs_backref_node *upper; local
2605 while (!list_empty(&node->upper)) {
2606 edge = list_entry(node->upper.next, struct btrfs_backref_edge,
2608 upper = edge->node[UPPER];
2610 list_del(&edge->list[UPPER]);
2617 if (list_empty(&upper->lower)) {
2618 list_add_tail(&upper->lower, &cache->leaves);
2619 upper->lowest = 1;
2676 struct btrfs_backref_node *upper; local
2709 upper = btrfs_backref_alloc_node(cache, ref_key->offset,
2711 if (!upper) {
2717 * Backrefs for the upper level block isn't cached, add the
2720 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
2723 upper = rb_entry(rb_node, struct btrfs_backref_node, rb_node);
2724 ASSERT(upper->checked);
2725 INIT_LIST_HEAD(&edge->list[UPPER]);
2727 btrfs_backref_link_edge(edge, cur, upper, LINK_LOWER);
2750 struct btrfs_backref_node *upper; local
2841 upper = btrfs_backref_alloc_node(cache, eb->start,
2843 if (!upper) {
2849 upper->owner = btrfs_header_owner(eb);
2851 upper->cowonly = 1;
2858 upper->checked = 0;
2860 upper->checked = 1;
2867 if (!upper->checked && need_check) {
2869 list_add_tail(&edge->list[UPPER],
2872 if (upper->checked)
2874 INIT_LIST_HEAD(&edge->list[UPPER]);
2877 upper = rb_entry(rb_node, struct btrfs_backref_node,
2879 ASSERT(upper->checked);
2880 INIT_LIST_HEAD(&edge->list[UPPER]);
2881 if (!upper->owner)
2882 upper->owner = btrfs_header_owner(eb);
2884 btrfs_backref_link_edge(edge, lower, upper, LINK_LOWER);
2890 lower = upper;
2891 upper = NULL;
2901 * NOTE: Even if the function returned 0, @cur is not yet cached as its upper
2938 if (!list_empty(&cur->upper)) {
2943 ASSERT(list_is_singular(&cur->upper));
2944 edge = list_entry(cur->upper.next, struct btrfs_backref_edge,
2946 ASSERT(list_empty(&edge->list[UPPER]));
2947 exist = edge->node[UPPER];
2949 * Add the upper level block to pending list if we need check
2953 list_add_tail(&edge->list[UPPER], &cache->pending_edge);
3060 list_for_each_entry(edge, &start->upper, list[LOWER])
3061 list_add_tail(&edge->list[UPPER], &pending_edge);
3064 struct btrfs_backref_node *upper; local
3068 struct btrfs_backref_edge, list[UPPER]);
3069 list_del_init(&edge->list[UPPER]);
3070 upper = edge->node[UPPER];
3074 if (upper->detached) {
3079 if (list_empty(&lower->upper))
3087 * So if we have upper->rb_node populated, this means a cache
3088 * hit. We only need to link the edge, as @upper and all its
3091 if (!RB_EMPTY_NODE(&upper->rb_node)) {
3092 if (upper->lowest) {
3093 list_del_init(&upper->lower);
3094 upper->lowest = 0;
3097 list_add_tail(&edge->list[UPPER], &upper->lower);
3102 if (!upper->checked) {
3108 if (start->cowonly != upper->cowonly) {
3114 if (!upper->cowonly) {
3115 rb_node = rb_simple_insert(&cache->rb_root, upper->bytenr,
3116 &upper->rb_node);
3119 upper->bytenr, -EEXIST);
3124 list_add_tail(&edge->list[UPPER], &upper->lower);
3128 * to finish the upper linkage
3130 list_for_each_entry(edge, &upper->upper, list[LOWER])
3131 list_add_tail(&edge->list[UPPER], &pending_edge);
3140 struct btrfs_backref_node *upper; local
3150 struct btrfs_backref_edge, list[UPPER]);
3151 list_del(&edge->list[UPPER]);
3154 upper = edge->node[UPPER];
3158 * Lower is no longer linked to any upper backref nodes and
3161 if (list_empty(&lower->upper) &&
3165 if (!RB_EMPTY_NODE(&upper->rb_node))
3168 /* Add this guy's upper edges to the list to process */
3169 list_for_each_entry(edge, &upper->upper, list[LOWER])
3170 list_add_tail(&edge->list[UPPER],
3172 if (list_empty(&upper->upper))
3173 list_add(&upper->list, &cache->useless_node);