Lines Matching +full:no +full:- +full:sd
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Hierarchical Budget Worst-case Fair Weighted Fair Queueing
4 * (B-WF2Q+): hierarchical scheduling algorithm by which the BFQ I/O
9 #include "bfq-iosched.h"
12 * bfq_gt - compare two timestamps.
20 return (s64)(a - b) > 0; in bfq_gt()
25 struct rb_node *node = tree->rb_node; in bfq_root_active_entity()
34 return bfqq ? bfqq->ioprio_class - 1 : in bfq_class_idx()
35 BFQ_DEFAULT_GRP_CLASS - 1; in bfq_class_idx()
40 return bfqd->busy_queues[0] + bfqd->busy_queues[1] + in bfq_tot_busy_queues()
41 bfqd->busy_queues[2]; in bfq_tot_busy_queues()
44 static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd,
50 * bfq_update_next_in_service - update sd->next_in_service
51 * @sd: sched_data for which to perform the update.
56 * expiration of the in-service entity
58 * This function is called to update sd->next_in_service, which, in
61 * sd. These insertions/extractions occur as a consequence of
67 * both the last two activation sub-cases, new_entity points to the
70 * Returns true if sd->next_in_service changes in such a way that
71 * entity->parent may become the next_in_service for its parent
74 static bool bfq_update_next_in_service(struct bfq_sched_data *sd, in bfq_update_next_in_service() argument
78 struct bfq_entity *next_in_service = sd->next_in_service; in bfq_update_next_in_service()
85 * sd->next_in_service, then a full lookup in the active tree in bfq_update_next_in_service()
87 * just-modified entity has the same priority as in bfq_update_next_in_service()
88 * sd->next_in_service, is eligible and has a lower virtual in bfq_update_next_in_service()
89 * finish time than sd->next_in_service. If this compound in bfq_update_next_in_service()
91 * next_in_service. Otherwise no change is needed. in bfq_update_next_in_service()
93 if (new_entity && new_entity != sd->next_in_service) { in bfq_update_next_in_service()
96 * sd->next_in_service with new_entity. Tentatively in bfq_update_next_in_service()
98 * sd->next_in_service is NULL. in bfq_update_next_in_service()
105 * to replace sd->service_tree with new_entity. in bfq_update_next_in_service()
111 sd->service_tree + new_entity_class_idx; in bfq_update_next_in_service()
117 !bfq_gt(new_entity->start, st->vtime) in bfq_update_next_in_service()
119 bfq_gt(next_in_service->finish, in bfq_update_next_in_service()
120 new_entity->finish)); in bfq_update_next_in_service()
128 next_in_service = bfq_lookup_next_entity(sd, expiration); in bfq_update_next_in_service()
134 parent_sched_may_change = !sd->next_in_service || in bfq_update_next_in_service()
138 sd->next_in_service = next_in_service; in bfq_update_next_in_service()
150 struct bfq_entity *group_entity = bfqq->entity.parent; in bfq_bfqq_to_bfqg()
153 group_entity = &bfqq->bfqd->root_group->entity; in bfq_bfqq_to_bfqg()
159 * Returns true if this budget changes may let next_in_service->parent
169 group_sd = next_in_service->sched_data; in bfq_update_parent_budget()
175 * as it must never become an in-service entity. in bfq_update_parent_budget()
177 bfqg_entity = bfqg->my_entity; in bfq_update_parent_budget()
179 if (bfqg_entity->budget > next_in_service->budget) in bfq_update_parent_budget()
181 bfqg_entity->budget = next_in_service->budget; in bfq_update_parent_budget()
193 * If entity is a queue, then the entity is no longer a candidate for
195 * about to become the in-service queue. This function then returns
203 * non-queue entity is not a candidate for next-service only if it has
205 * function returns true for a non-queue entity.
219 * not account for the in-service entity in case the latter is in bfq_no_longer_next_in_service()
228 if (bfqg->active_entities == 1) in bfq_no_longer_next_in_service()
238 return bfqq->bfqd->root_group; in bfq_bfqq_to_bfqg()
266 if (!entity->my_sched_data) in bfq_entity_to_bfqq()
274 * bfq_delta - map service into the virtual time domain.
284 * bfq_calc_finish - assign the finish time to an entity.
292 entity->finish = entity->start + in bfq_calc_finish()
293 bfq_delta(service, entity->weight); in bfq_calc_finish()
296 bfq_log_bfqq(bfqq->bfqd, bfqq, in bfq_calc_finish()
298 service, entity->weight); in bfq_calc_finish()
299 bfq_log_bfqq(bfqq->bfqd, bfqq, in bfq_calc_finish()
301 entity->start, entity->finish, in bfq_calc_finish()
302 bfq_delta(service, entity->weight)); in bfq_calc_finish()
307 * bfq_entity_of - get an entity from a node.
326 * bfq_extract - remove an entity from a tree.
332 entity->tree = NULL; in bfq_extract()
333 rb_erase(&entity->rb_node, root); in bfq_extract()
337 * bfq_idle_extract - extract an entity from the idle tree.
347 if (entity == st->first_idle) { in bfq_idle_extract()
348 next = rb_next(&entity->rb_node); in bfq_idle_extract()
349 st->first_idle = bfq_entity_of(next); in bfq_idle_extract()
352 if (entity == st->last_idle) { in bfq_idle_extract()
353 next = rb_prev(&entity->rb_node); in bfq_idle_extract()
354 st->last_idle = bfq_entity_of(next); in bfq_idle_extract()
357 bfq_extract(&st->idle, entity); in bfq_idle_extract()
360 list_del(&bfqq->bfqq_list); in bfq_idle_extract()
364 * bfq_insert - generic tree insertion.
374 struct rb_node **node = &root->rb_node; in bfq_insert()
381 if (bfq_gt(entry->finish, entity->finish)) in bfq_insert()
382 node = &parent->rb_left; in bfq_insert()
384 node = &parent->rb_right; in bfq_insert()
387 rb_link_node(&entity->rb_node, parent, node); in bfq_insert()
388 rb_insert_color(&entity->rb_node, root); in bfq_insert()
390 entity->tree = root; in bfq_insert()
394 * bfq_update_min - update the min_start field of a entity.
409 if (bfq_gt(entity->min_start, child->min_start)) in bfq_update_min()
410 entity->min_start = child->min_start; in bfq_update_min()
415 * bfq_update_active_node - recalculate min_start.
426 entity->min_start = entity->start; in bfq_update_active_node()
427 bfq_update_min(entity, node->rb_right); in bfq_update_active_node()
428 bfq_update_min(entity, node->rb_left); in bfq_update_active_node()
432 * bfq_update_active_tree - update min_start for the whole active tree.
452 if (node == parent->rb_left && parent->rb_right) in bfq_update_active_tree()
453 bfq_update_active_node(parent->rb_right); in bfq_update_active_tree()
454 else if (parent->rb_left) in bfq_update_active_tree()
455 bfq_update_active_node(parent->rb_left); in bfq_update_active_tree()
462 * bfq_active_insert - insert an entity in the active tree of its
476 struct rb_node *node = &entity->rb_node; in bfq_active_insert()
478 struct bfq_sched_data *sd = NULL; in bfq_active_insert() local
483 bfq_insert(&st->active, entity); in bfq_active_insert()
485 if (node->rb_left) in bfq_active_insert()
486 node = node->rb_left; in bfq_active_insert()
487 else if (node->rb_right) in bfq_active_insert()
488 node = node->rb_right; in bfq_active_insert()
493 sd = entity->sched_data; in bfq_active_insert()
494 bfqg = container_of(sd, struct bfq_group, sched_data); in bfq_active_insert()
495 bfqd = (struct bfq_data *)bfqg->bfqd; in bfq_active_insert()
498 list_add(&bfqq->bfqq_list, &bfqq->bfqd->active_list); in bfq_active_insert()
500 if (bfqg != bfqd->root_group) in bfq_active_insert()
501 bfqg->active_entities++; in bfq_active_insert()
506 * bfq_ioprio_to_weight - calc a weight from an ioprio.
511 return (IOPRIO_BE_NR - ioprio) * BFQ_WEIGHT_CONVERSION_COEFF; in bfq_ioprio_to_weight()
515 * bfq_weight_to_ioprio - calc an ioprio from a weight.
518 * To preserve as much as possible the old only-ioprio user interface,
525 IOPRIO_BE_NR * BFQ_WEIGHT_CONVERSION_COEFF - weight); in bfq_weight_to_ioprio()
533 bfqq->ref++; in bfq_get_entity()
534 bfq_log_bfqq(bfqq->bfqd, bfqq, "get_entity: %p %d", in bfq_get_entity()
535 bfqq, bfqq->ref); in bfq_get_entity()
540 * bfq_find_deepest - find the deepest node that an extraction can modify.
552 if (!node->rb_right && !node->rb_left) in bfq_find_deepest()
554 else if (!node->rb_right) in bfq_find_deepest()
555 deepest = node->rb_left; in bfq_find_deepest()
556 else if (!node->rb_left) in bfq_find_deepest()
557 deepest = node->rb_right; in bfq_find_deepest()
560 if (deepest->rb_right) in bfq_find_deepest()
561 deepest = deepest->rb_right; in bfq_find_deepest()
570 * bfq_active_extract - remove an entity from the active tree.
580 struct bfq_sched_data *sd = NULL; in bfq_active_extract() local
585 node = bfq_find_deepest(&entity->rb_node); in bfq_active_extract()
586 bfq_extract(&st->active, entity); in bfq_active_extract()
592 sd = entity->sched_data; in bfq_active_extract()
593 bfqg = container_of(sd, struct bfq_group, sched_data); in bfq_active_extract()
594 bfqd = (struct bfq_data *)bfqg->bfqd; in bfq_active_extract()
597 list_del(&bfqq->bfqq_list); in bfq_active_extract()
599 if (bfqg != bfqd->root_group) in bfq_active_extract()
600 bfqg->active_entities--; in bfq_active_extract()
605 * bfq_idle_insert - insert an entity into the idle tree.
613 struct bfq_entity *first_idle = st->first_idle; in bfq_idle_insert()
614 struct bfq_entity *last_idle = st->last_idle; in bfq_idle_insert()
616 if (!first_idle || bfq_gt(first_idle->finish, entity->finish)) in bfq_idle_insert()
617 st->first_idle = entity; in bfq_idle_insert()
618 if (!last_idle || bfq_gt(entity->finish, last_idle->finish)) in bfq_idle_insert()
619 st->last_idle = entity; in bfq_idle_insert()
621 bfq_insert(&st->idle, entity); in bfq_idle_insert()
624 list_add(&bfqq->bfqq_list, &bfqq->bfqd->idle_list); in bfq_idle_insert()
628 * bfq_forget_entity - do not consider entity any longer for scheduling
631 * @is_in_service: true if entity is currently the in-service entity.
636 * fact, in this case, there is really no more service reference to
648 entity->on_st_or_in_serv = false; in bfq_forget_entity()
649 st->wsum -= entity->weight; in bfq_forget_entity()
655 * bfq_put_idle_entity - release the idle tree ref of an entity.
663 entity == entity->sched_data->in_service_entity); in bfq_put_idle_entity()
667 * bfq_forget_idle - update the idle tree if necessary.
675 struct bfq_entity *first_idle = st->first_idle; in bfq_forget_idle()
676 struct bfq_entity *last_idle = st->last_idle; in bfq_forget_idle()
678 if (RB_EMPTY_ROOT(&st->active) && last_idle && in bfq_forget_idle()
679 !bfq_gt(last_idle->finish, st->vtime)) { in bfq_forget_idle()
684 st->vtime = last_idle->finish; in bfq_forget_idle()
687 if (first_idle && !bfq_gt(first_idle->finish, st->vtime)) in bfq_forget_idle()
693 struct bfq_sched_data *sched_data = entity->sched_data; in bfq_entity_service_tree()
696 return sched_data->service_tree + idx; in bfq_entity_service_tree()
724 if (entity->prio_changed) { in __bfq_entity_update_weight_prio()
730 struct bfq_sched_data *sd; in __bfq_entity_update_weight_prio() local
735 bfqd = bfqq->bfqd; in __bfq_entity_update_weight_prio()
738 sd = entity->my_sched_data; in __bfq_entity_update_weight_prio()
739 bfqg = container_of(sd, struct bfq_group, sched_data); in __bfq_entity_update_weight_prio()
740 bfqd = (struct bfq_data *)bfqg->bfqd; in __bfq_entity_update_weight_prio()
746 old_st->wsum -= entity->weight; in __bfq_entity_update_weight_prio()
748 if (entity->new_weight != entity->orig_weight) { in __bfq_entity_update_weight_prio()
749 if (entity->new_weight < BFQ_MIN_WEIGHT || in __bfq_entity_update_weight_prio()
750 entity->new_weight > BFQ_MAX_WEIGHT) { in __bfq_entity_update_weight_prio()
752 entity->new_weight); in __bfq_entity_update_weight_prio()
753 if (entity->new_weight < BFQ_MIN_WEIGHT) in __bfq_entity_update_weight_prio()
754 entity->new_weight = BFQ_MIN_WEIGHT; in __bfq_entity_update_weight_prio()
756 entity->new_weight = BFQ_MAX_WEIGHT; in __bfq_entity_update_weight_prio()
758 entity->orig_weight = entity->new_weight; in __bfq_entity_update_weight_prio()
760 bfqq->ioprio = in __bfq_entity_update_weight_prio()
761 bfq_weight_to_ioprio(entity->orig_weight); in __bfq_entity_update_weight_prio()
765 bfqq->ioprio_class = bfqq->new_ioprio_class; in __bfq_entity_update_weight_prio()
771 if (!bfqq || bfqq->ioprio_class == bfqq->new_ioprio_class) in __bfq_entity_update_weight_prio()
772 entity->prio_changed = 0; in __bfq_entity_update_weight_prio()
779 * when entity->finish <= old_st->vtime). in __bfq_entity_update_weight_prio()
783 prev_weight = entity->weight; in __bfq_entity_update_weight_prio()
784 new_weight = entity->orig_weight * in __bfq_entity_update_weight_prio()
785 (bfqq ? bfqq->wr_coeff : 1); in __bfq_entity_update_weight_prio()
792 root = &bfqd->queue_weights_tree; in __bfq_entity_update_weight_prio()
795 entity->weight = new_weight; in __bfq_entity_update_weight_prio()
797 * Add the entity, if it is not a weight-raised queue, in __bfq_entity_update_weight_prio()
800 if (prev_weight != new_weight && bfqq && bfqq->wr_coeff == 1) { in __bfq_entity_update_weight_prio()
805 new_st->wsum += entity->weight; in __bfq_entity_update_weight_prio()
808 entity->start = new_st->vtime; in __bfq_entity_update_weight_prio()
815 * bfq_bfqq_served - update the scheduler status after selection for
826 struct bfq_entity *entity = &bfqq->entity; in bfq_bfqq_served()
829 if (!bfqq->service_from_backlogged) in bfq_bfqq_served()
830 bfqq->first_IO_time = jiffies; in bfq_bfqq_served()
832 if (bfqq->wr_coeff > 1) in bfq_bfqq_served()
833 bfqq->service_from_wr += served; in bfq_bfqq_served()
835 bfqq->service_from_backlogged += served; in bfq_bfqq_served()
839 entity->service += served; in bfq_bfqq_served()
841 st->vtime += bfq_delta(served, st->wsum); in bfq_bfqq_served()
844 bfq_log_bfqq(bfqq->bfqd, bfqq, "bfqq_served %d secs", served); in bfq_bfqq_served()
848 * bfq_bfqq_charge_time - charge an amount of service equivalent to the length
877 struct bfq_entity *entity = &bfqq->entity; in bfq_bfqq_charge_time()
881 (bfqd->bfq_max_budget * bounded_time_ms) / timeout_ms; in bfq_bfqq_charge_time()
882 int tot_serv_to_charge = max(serv_to_charge_for_time, entity->service); in bfq_bfqq_charge_time()
885 if (tot_serv_to_charge > entity->budget) in bfq_bfqq_charge_time()
886 entity->budget = tot_serv_to_charge; in bfq_bfqq_charge_time()
889 max_t(int, 0, tot_serv_to_charge - entity->service)); in bfq_bfqq_charge_time()
904 bfq_calc_finish(entity, entity->budget); in bfq_update_fin_time_enqueue()
916 * idle, the system virtual time may be pushed-up to much in bfq_update_fin_time_enqueue()
931 * worst-case fairness guarantees. in bfq_update_fin_time_enqueue()
933 * As a special case, if bfqq is weight-raised, push up in bfq_update_fin_time_enqueue()
936 * weight-raised queues to become higher than the backshifted in bfq_update_fin_time_enqueue()
937 * finish timestamps of non weight-raised queues. in bfq_update_fin_time_enqueue()
939 if (backshifted && bfq_gt(st->vtime, entity->finish)) { in bfq_update_fin_time_enqueue()
940 unsigned long delta = st->vtime - entity->finish; in bfq_update_fin_time_enqueue()
943 delta /= bfqq->wr_coeff; in bfq_update_fin_time_enqueue()
945 entity->start += delta; in bfq_update_fin_time_enqueue()
946 entity->finish += delta; in bfq_update_fin_time_enqueue()
953 * __bfq_activate_entity - handle activation of entity.
972 if (non_blocking_wait_rq && bfq_gt(st->vtime, entity->finish)) { in __bfq_activate_entity()
974 min_vstart = entity->finish; in __bfq_activate_entity()
976 min_vstart = st->vtime; in __bfq_activate_entity()
978 if (entity->tree == &st->idle) { in __bfq_activate_entity()
984 entity->start = bfq_gt(min_vstart, entity->finish) ? in __bfq_activate_entity()
985 min_vstart : entity->finish; in __bfq_activate_entity()
992 entity->start = min_vstart; in __bfq_activate_entity()
993 st->wsum += entity->weight; in __bfq_activate_entity()
997 * sure entity does not disappear until it is no in __bfq_activate_entity()
1002 entity->on_st_or_in_serv = true; in __bfq_activate_entity()
1009 struct bfq_data *bfqd = bfqg->bfqd; in __bfq_activate_entity()
1011 if (!entity->in_groups_with_pending_reqs) { in __bfq_activate_entity()
1012 entity->in_groups_with_pending_reqs = true; in __bfq_activate_entity()
1013 bfqd->num_groups_with_pending_reqs++; in __bfq_activate_entity()
1022 * __bfq_requeue_entity - handle requeueing or repositioning of an entity.
1038 struct bfq_sched_data *sd = entity->sched_data; in __bfq_requeue_entity() local
1041 if (entity == sd->in_service_entity) { in __bfq_requeue_entity()
1043 * We are requeueing the current in-service entity, in __bfq_requeue_entity()
1046 * - entity represents the in-service queue, and the in __bfq_requeue_entity()
1047 * in-service queue is being requeued after an in __bfq_requeue_entity()
1049 * - entity represents a group, and its budget has in __bfq_requeue_entity()
1064 bfq_calc_finish(entity, entity->service); in __bfq_requeue_entity()
1065 entity->start = entity->finish; in __bfq_requeue_entity()
1079 if (entity->tree) in __bfq_requeue_entity()
1094 * non-extracted-entity sub-case above. in __bfq_requeue_entity()
1103 struct bfq_sched_data *sd, in __bfq_activate_requeue_entity() argument
1108 if (sd->in_service_entity == entity || entity->tree == &st->active) in __bfq_activate_requeue_entity()
1124 * bfq_activate_requeue_entity - activate or requeue an entity representing a
1134 * of the in-service queue
1140 struct bfq_sched_data *sd; in bfq_activate_requeue_entity() local
1143 sd = entity->sched_data; in bfq_activate_requeue_entity()
1144 __bfq_activate_requeue_entity(entity, sd, non_blocking_wait_rq); in bfq_activate_requeue_entity()
1146 if (!bfq_update_next_in_service(sd, entity, expiration) && in bfq_activate_requeue_entity()
1153 * __bfq_deactivate_entity - update sched_data and service trees for
1160 * entity may be on no tree if in service.
1164 struct bfq_sched_data *sd = entity->sched_data; in __bfq_deactivate_entity() local
1168 if (!entity->on_st_or_in_serv) /* in __bfq_deactivate_entity()
1178 * entity->sched_data has been set, and we can safely use it. in __bfq_deactivate_entity()
1181 is_in_service = entity == sd->in_service_entity; in __bfq_deactivate_entity()
1183 bfq_calc_finish(entity, entity->service); in __bfq_deactivate_entity()
1186 sd->in_service_entity = NULL; in __bfq_deactivate_entity()
1189 * Non in-service entity: nobody will take care of in __bfq_deactivate_entity()
1193 entity->service = 0; in __bfq_deactivate_entity()
1195 if (entity->tree == &st->active) in __bfq_deactivate_entity()
1197 else if (!is_in_service && entity->tree == &st->idle) in __bfq_deactivate_entity()
1200 if (!ins_into_idle_tree || !bfq_gt(entity->finish, st->vtime)) in __bfq_deactivate_entity()
1209 * bfq_deactivate_entity - deactivate an entity representing a bfq_queue.
1213 * of the in-service queue
1219 struct bfq_sched_data *sd; in bfq_deactivate_entity() local
1223 sd = entity->sched_data; in bfq_deactivate_entity()
1228 * this deactivation is a no-op, and there is in bfq_deactivate_entity()
1229 * nothing to change for upper-level entities in bfq_deactivate_entity()
1236 if (sd->next_in_service == entity) in bfq_deactivate_entity()
1242 bfq_update_next_in_service(sd, NULL, expiration); in bfq_deactivate_entity()
1244 if (sd->next_in_service || sd->in_service_entity) { in bfq_deactivate_entity()
1248 * is not NULL. So, no further upwards in bfq_deactivate_entity()
1269 * If we get here, then the parent is no more in bfq_deactivate_entity()
1285 * no more entities to touch and next loop is not executed at in bfq_deactivate_entity()
1295 * active tree (because sd->next_in_service has in bfq_deactivate_entity()
1300 sd = entity->sched_data; in bfq_deactivate_entity()
1301 if (!bfq_update_next_in_service(sd, entity, expiration) && in bfq_deactivate_entity()
1305 * any change in entity->parent->sd, and no in bfq_deactivate_entity()
1314 * bfq_calc_vtime_jump - compute the value to which the vtime should jump,
1322 struct bfq_entity *root_entity = bfq_root_active_entity(&st->active); in bfq_calc_vtime_jump()
1324 if (bfq_gt(root_entity->min_start, st->vtime)) in bfq_calc_vtime_jump()
1325 return root_entity->min_start; in bfq_calc_vtime_jump()
1327 return st->vtime; in bfq_calc_vtime_jump()
1332 if (new_value > st->vtime) { in bfq_update_vtime()
1333 st->vtime = new_value; in bfq_update_vtime()
1339 * bfq_first_active_entity - find the eligible entity with
1347 * the right is followed only if a) the left subtree contains no eligible
1348 * entities and b) no eligible entity has been found yet.
1354 struct rb_node *node = st->active.rb_node; in bfq_first_active_entity()
1359 if (!bfq_gt(entry->start, vtime)) in bfq_first_active_entity()
1362 if (node->rb_left) { in bfq_first_active_entity()
1363 entry = rb_entry(node->rb_left, in bfq_first_active_entity()
1365 if (!bfq_gt(entry->min_start, vtime)) { in bfq_first_active_entity()
1366 node = node->rb_left; in bfq_first_active_entity()
1372 node = node->rb_right; in bfq_first_active_entity()
1379 * __bfq_lookup_next_entity - return the first eligible entity in @st.
1382 * If there is no in-service entity for the sched_data st belongs to,
1385 * 2) no entity belonging to such parent entity undergoes a state change
1392 * In contrast, if there is an in-service entity, then return the
1395 * in-service entity, on expiration,
1406 if (RB_EMPTY_ROOT(&st->active)) in __bfq_lookup_next_entity()
1416 * If there is no in-service entity for the sched_data this in __bfq_lookup_next_entity()
1419 * eligible. If, instead, there is an in-service entity, then in __bfq_lookup_next_entity()
1421 * eligible entity, namely the in-service one (even if the in __bfq_lookup_next_entity()
1434 * bfq_lookup_next_entity - return the first eligible entity in @sd.
1435 * @sd: the sched_data.
1436 * @expiration: true if we are on the expiration path of the in-service queue
1439 * for sd, and we need to know what is the new next entity to serve
1442 static struct bfq_entity *bfq_lookup_next_entity(struct bfq_sched_data *sd, in bfq_lookup_next_entity() argument
1445 struct bfq_service_tree *st = sd->service_tree; in bfq_lookup_next_entity()
1446 struct bfq_service_tree *idle_class_st = st + (BFQ_IOPRIO_CLASSES - 1); in bfq_lookup_next_entity()
1454 * priority-inversion problems in case a low priority task is in bfq_lookup_next_entity()
1457 if (time_is_before_jiffies(sd->bfq_class_idle_last_service + in bfq_lookup_next_entity()
1459 if (!RB_EMPTY_ROOT(&idle_class_st->active)) in bfq_lookup_next_entity()
1460 class_idx = BFQ_IOPRIO_CLASSES - 1; in bfq_lookup_next_entity()
1462 sd->bfq_class_idle_last_service = jiffies; in bfq_lookup_next_entity()
1466 * Find the next entity to serve for the highest-priority in bfq_lookup_next_entity()
1473 * of the in-service queue. In this case, even if in bfq_lookup_next_entity()
1474 * sd->in_service_entity is not NULL, in bfq_lookup_next_entity()
1475 * sd->in_service_entity at this point is actually not in bfq_lookup_next_entity()
1478 * tree. The reason why sd->in_service_entity is still in bfq_lookup_next_entity()
1480 * sd->in_service_entity is reset as a last step in the in bfq_lookup_next_entity()
1482 * __bfq_lookup_next_entity that there is no in bfq_lookup_next_entity()
1483 * sd->in_service_entity. in bfq_lookup_next_entity()
1486 sd->in_service_entity && in bfq_lookup_next_entity()
1501 struct bfq_sched_data *sd = &bfqd->root_group->sched_data; in next_queue_may_preempt() local
1503 return sd->next_in_service != sd->in_service_entity; in next_queue_may_preempt()
1512 struct bfq_sched_data *sd; in bfq_get_next_queue() local
1523 sd = &bfqd->root_group->sched_data; in bfq_get_next_queue()
1524 for (; sd ; sd = entity->my_sched_data) { in bfq_get_next_queue()
1526 * WARNING. We are about to set the in-service entity in bfq_get_next_queue()
1527 * to sd->next_in_service, i.e., to the (cached) value in bfq_get_next_queue()
1528 * returned by bfq_lookup_next_entity(sd) the last in bfq_get_next_queue()
1530 * service order in sd changed as a consequence of the in bfq_get_next_queue()
1532 * respect, if we execute bfq_lookup_next_entity(sd) in bfq_get_next_queue()
1535 * pointed to by sd->next_in_service. This rare event in bfq_get_next_queue()
1536 * happens in case there was no CLASS_IDLE entity to in bfq_get_next_queue()
1537 * serve for sd when bfq_lookup_next_entity(sd) was in bfq_get_next_queue()
1543 * service of the sd->next_in_service entity in bfq_get_next_queue()
1545 * bfq_lookup_next_entity(sd) gets called again, in bfq_get_next_queue()
1546 * exactly to update sd->next_in_service. in bfq_get_next_queue()
1550 entity = sd->next_in_service; in bfq_get_next_queue()
1551 sd->in_service_entity = entity; in bfq_get_next_queue()
1554 * If entity is no longer a candidate for next in bfq_get_next_queue()
1576 * the correct next-to-serve candidate entity for each in bfq_get_next_queue()
1579 * the next-to-serve leaf entity, we can discover in bfq_get_next_queue()
1581 * becomes the next-to-serve, and so on. in bfq_get_next_queue()
1588 * We can finally update all next-to-serve entities along the in bfq_get_next_queue()
1592 struct bfq_sched_data *sd = entity->sched_data; in bfq_get_next_queue() local
1594 if (!bfq_update_next_in_service(sd, NULL, false)) in bfq_get_next_queue()
1601 /* returns true if the in-service queue gets freed */
1604 struct bfq_queue *in_serv_bfqq = bfqd->in_service_queue; in __bfq_bfqd_reset_in_service()
1605 struct bfq_entity *in_serv_entity = &in_serv_bfqq->entity; in __bfq_bfqd_reset_in_service()
1609 hrtimer_try_to_cancel(&bfqd->idle_slice_timer); in __bfq_bfqd_reset_in_service()
1610 bfqd->in_service_queue = NULL; in __bfq_bfqd_reset_in_service()
1613 * When this function is called, all in-service entities have in __bfq_bfqd_reset_in_service()
1619 entity->sched_data->in_service_entity = NULL; in __bfq_bfqd_reset_in_service()
1622 * in_serv_entity is no longer in service, so, if it is in no in __bfq_bfqd_reset_in_service()
1626 if (!in_serv_entity->on_st_or_in_serv) { in __bfq_bfqd_reset_in_service()
1628 * If no process is referencing in_serv_bfqq any in __bfq_bfqd_reset_in_service()
1633 int ref = in_serv_bfqq->ref; in __bfq_bfqd_reset_in_service()
1645 struct bfq_entity *entity = &bfqq->entity; in bfq_deactivate_bfqq()
1652 struct bfq_entity *entity = &bfqq->entity; in bfq_activate_bfqq()
1662 struct bfq_entity *entity = &bfqq->entity; in bfq_requeue_bfqq()
1665 bfqq == bfqd->in_service_queue, expiration); in bfq_requeue_bfqq()
1669 * Called when the bfqq no longer has requests pending, remove it from
1680 bfqd->busy_queues[bfqq->ioprio_class - 1]--; in bfq_del_bfqq_busy()
1682 if (bfqq->wr_coeff > 1) in bfq_del_bfqq_busy()
1683 bfqd->wr_busy_queues--; in bfq_del_bfqq_busy()
1689 if (!bfqq->dispatched) in bfq_del_bfqq_busy()
1703 bfqd->busy_queues[bfqq->ioprio_class - 1]++; in bfq_add_bfqq_busy()
1705 if (!bfqq->dispatched) in bfq_add_bfqq_busy()
1706 if (bfqq->wr_coeff == 1) in bfq_add_bfqq_busy()
1708 &bfqd->queue_weights_tree); in bfq_add_bfqq_busy()
1710 if (bfqq->wr_coeff > 1) in bfq_add_bfqq_busy()
1711 bfqd->wr_busy_queues++; in bfq_add_bfqq_busy()