Lines Matching refs:iocg
195 #define TRACE_IOCG_PATH(type, iocg, ...) \ argument
200 cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup, \
202 trace_iocost_##type(iocg, trace_iocg_path, \
209 #define TRACE_IOCG_PATH(type, iocg, ...) do { } while (0) argument
573 struct ioc_gq *iocg; member
693 static struct blkcg_gq *iocg_to_blkg(struct ioc_gq *iocg) in iocg_to_blkg() argument
695 return pd_to_blkg(&iocg->pd); in iocg_to_blkg()
721 static void iocg_commit_bio(struct ioc_gq *iocg, struct bio *bio, in iocg_commit_bio() argument
727 atomic64_add(cost, &iocg->vtime); in iocg_commit_bio()
729 gcs = get_cpu_ptr(iocg->pcpu_stat); in iocg_commit_bio()
734 static void iocg_lock(struct ioc_gq *iocg, bool lock_ioc, unsigned long *flags) in iocg_lock() argument
737 spin_lock_irqsave(&iocg->ioc->lock, *flags); in iocg_lock()
738 spin_lock(&iocg->waitq.lock); in iocg_lock()
740 spin_lock_irqsave(&iocg->waitq.lock, *flags); in iocg_lock()
744 static void iocg_unlock(struct ioc_gq *iocg, bool unlock_ioc, unsigned long *flags) in iocg_unlock() argument
747 spin_unlock(&iocg->waitq.lock); in iocg_unlock()
748 spin_unlock_irqrestore(&iocg->ioc->lock, *flags); in iocg_unlock()
750 spin_unlock_irqrestore(&iocg->waitq.lock, *flags); in iocg_unlock()
1018 static void __propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse, in __propagate_weights() argument
1021 struct ioc *ioc = iocg->ioc; in __propagate_weights()
1031 if (list_empty(&iocg->active_list) && iocg->child_active_sum) { in __propagate_weights()
1032 inuse = DIV64_U64_ROUND_UP(active * iocg->child_inuse_sum, in __propagate_weights()
1033 iocg->child_active_sum); in __propagate_weights()
1038 iocg->last_inuse = iocg->inuse; in __propagate_weights()
1040 iocg->saved_margin = now->vnow - atomic64_read(&iocg->vtime); in __propagate_weights()
1042 if (active == iocg->active && inuse == iocg->inuse) in __propagate_weights()
1045 for (lvl = iocg->level - 1; lvl >= 0; lvl--) { in __propagate_weights()
1046 struct ioc_gq *parent = iocg->ancestors[lvl]; in __propagate_weights()
1047 struct ioc_gq *child = iocg->ancestors[lvl + 1]; in __propagate_weights()
1093 static void propagate_weights(struct ioc_gq *iocg, u32 active, u32 inuse, in propagate_weights() argument
1096 __propagate_weights(iocg, active, inuse, save, now); in propagate_weights()
1097 commit_weights(iocg->ioc); in propagate_weights()
1100 static void current_hweight(struct ioc_gq *iocg, u32 *hw_activep, u32 *hw_inusep) in current_hweight() argument
1102 struct ioc *ioc = iocg->ioc; in current_hweight()
1109 if (ioc_gen == iocg->hweight_gen) in current_hweight()
1125 for (lvl = 0; lvl <= iocg->level - 1; lvl++) { in current_hweight()
1126 struct ioc_gq *parent = iocg->ancestors[lvl]; in current_hweight()
1127 struct ioc_gq *child = iocg->ancestors[lvl + 1]; in current_hweight()
1144 iocg->hweight_active = max_t(u32, hwa, 1); in current_hweight()
1145 iocg->hweight_inuse = max_t(u32, hwi, 1); in current_hweight()
1146 iocg->hweight_gen = ioc_gen; in current_hweight()
1149 *hw_activep = iocg->hweight_active; in current_hweight()
1151 *hw_inusep = iocg->hweight_inuse; in current_hweight()
1158 static u32 current_hweight_max(struct ioc_gq *iocg) in current_hweight_max() argument
1161 u32 inuse = iocg->active; in current_hweight_max()
1165 lockdep_assert_held(&iocg->ioc->lock); in current_hweight_max()
1167 for (lvl = iocg->level - 1; lvl >= 0; lvl--) { in current_hweight_max()
1168 struct ioc_gq *parent = iocg->ancestors[lvl]; in current_hweight_max()
1169 struct ioc_gq *child = iocg->ancestors[lvl + 1]; in current_hweight_max()
1180 static void weight_updated(struct ioc_gq *iocg, struct ioc_now *now) in weight_updated() argument
1182 struct ioc *ioc = iocg->ioc; in weight_updated()
1183 struct blkcg_gq *blkg = iocg_to_blkg(iocg); in weight_updated()
1189 weight = iocg->cfg_weight ?: iocc->dfl_weight; in weight_updated()
1190 if (weight != iocg->weight && iocg->active) in weight_updated()
1191 propagate_weights(iocg, weight, iocg->inuse, true, now); in weight_updated()
1192 iocg->weight = weight; in weight_updated()
1195 static bool iocg_activate(struct ioc_gq *iocg, struct ioc_now *now) in iocg_activate() argument
1197 struct ioc *ioc = iocg->ioc; in iocg_activate()
1206 if (!list_empty(&iocg->active_list)) { in iocg_activate()
1209 if (atomic64_read(&iocg->active_period) != cur_period) in iocg_activate()
1210 atomic64_set(&iocg->active_period, cur_period); in iocg_activate()
1215 if (iocg->child_active_sum) in iocg_activate()
1224 last_period = atomic64_read(&iocg->active_period); in iocg_activate()
1225 atomic64_set(&iocg->active_period, cur_period); in iocg_activate()
1228 if (!list_empty(&iocg->active_list)) in iocg_activate()
1230 for (i = iocg->level - 1; i > 0; i--) in iocg_activate()
1231 if (!list_empty(&iocg->ancestors[i]->active_list)) in iocg_activate()
1234 if (iocg->child_active_sum) in iocg_activate()
1242 vtime = atomic64_read(&iocg->vtime); in iocg_activate()
1244 atomic64_add(vtarget - vtime, &iocg->vtime); in iocg_activate()
1245 atomic64_add(vtarget - vtime, &iocg->done_vtime); in iocg_activate()
1253 iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1; in iocg_activate()
1254 list_add(&iocg->active_list, &ioc->active_iocgs); in iocg_activate()
1256 propagate_weights(iocg, iocg->weight, in iocg_activate()
1257 iocg->last_inuse ?: iocg->weight, true, now); in iocg_activate()
1259 TRACE_IOCG_PATH(iocg_activate, iocg, now, in iocg_activate()
1262 iocg->activated_at = now->now; in iocg_activate()
1280 static bool iocg_kick_delay(struct ioc_gq *iocg, struct ioc_now *now) in iocg_kick_delay() argument
1282 struct ioc *ioc = iocg->ioc; in iocg_kick_delay()
1283 struct blkcg_gq *blkg = iocg_to_blkg(iocg); in iocg_kick_delay()
1288 lockdep_assert_held(&iocg->waitq.lock); in iocg_kick_delay()
1291 tdelta = now->now - iocg->delay_at; in iocg_kick_delay()
1292 if (iocg->delay) in iocg_kick_delay()
1293 delay = iocg->delay >> div64_u64(tdelta, USEC_PER_SEC); in iocg_kick_delay()
1298 current_hweight(iocg, &hwa, NULL); in iocg_kick_delay()
1299 vover = atomic64_read(&iocg->vtime) + in iocg_kick_delay()
1300 abs_cost_to_cost(iocg->abs_vdebt, hwa) - now->vnow; in iocg_kick_delay()
1316 iocg->delay = new_delay; in iocg_kick_delay()
1317 iocg->delay_at = now->now; in iocg_kick_delay()
1322 if (!iocg->indelay_since) in iocg_kick_delay()
1323 iocg->indelay_since = now->now; in iocg_kick_delay()
1327 if (iocg->indelay_since) { in iocg_kick_delay()
1328 iocg->local_stat.indelay_us += now->now - iocg->indelay_since; in iocg_kick_delay()
1329 iocg->indelay_since = 0; in iocg_kick_delay()
1331 iocg->delay = 0; in iocg_kick_delay()
1337 static void iocg_incur_debt(struct ioc_gq *iocg, u64 abs_cost, in iocg_incur_debt() argument
1342 lockdep_assert_held(&iocg->ioc->lock); in iocg_incur_debt()
1343 lockdep_assert_held(&iocg->waitq.lock); in iocg_incur_debt()
1344 WARN_ON_ONCE(list_empty(&iocg->active_list)); in iocg_incur_debt()
1350 if (!iocg->abs_vdebt && abs_cost) { in iocg_incur_debt()
1351 iocg->indebt_since = now->now; in iocg_incur_debt()
1352 propagate_weights(iocg, iocg->active, 0, false, now); in iocg_incur_debt()
1355 iocg->abs_vdebt += abs_cost; in iocg_incur_debt()
1357 gcs = get_cpu_ptr(iocg->pcpu_stat); in iocg_incur_debt()
1362 static void iocg_pay_debt(struct ioc_gq *iocg, u64 abs_vpay, in iocg_pay_debt() argument
1365 lockdep_assert_held(&iocg->ioc->lock); in iocg_pay_debt()
1366 lockdep_assert_held(&iocg->waitq.lock); in iocg_pay_debt()
1369 WARN_ON_ONCE(list_empty(&iocg->active_list)); in iocg_pay_debt()
1370 WARN_ON_ONCE(iocg->inuse > 1); in iocg_pay_debt()
1372 iocg->abs_vdebt -= min(abs_vpay, iocg->abs_vdebt); in iocg_pay_debt()
1375 if (!iocg->abs_vdebt) { in iocg_pay_debt()
1376 iocg->local_stat.indebt_us += now->now - iocg->indebt_since; in iocg_pay_debt()
1377 iocg->indebt_since = 0; in iocg_pay_debt()
1379 propagate_weights(iocg, iocg->active, iocg->last_inuse, in iocg_pay_debt()
1396 iocg_commit_bio(ctx->iocg, wait->bio, wait->abs_cost, cost); in iocg_wake_fn()
1416 static void iocg_kick_waitq(struct ioc_gq *iocg, bool pay_debt, in iocg_kick_waitq() argument
1419 struct ioc *ioc = iocg->ioc; in iocg_kick_waitq()
1420 struct iocg_wake_ctx ctx = { .iocg = iocg }; in iocg_kick_waitq()
1425 lockdep_assert_held(&iocg->waitq.lock); in iocg_kick_waitq()
1427 current_hweight(iocg, &hwa, NULL); in iocg_kick_waitq()
1428 vbudget = now->vnow - atomic64_read(&iocg->vtime); in iocg_kick_waitq()
1431 if (pay_debt && iocg->abs_vdebt && vbudget > 0) { in iocg_kick_waitq()
1433 u64 abs_vpay = min_t(u64, abs_vbudget, iocg->abs_vdebt); in iocg_kick_waitq()
1438 atomic64_add(vpay, &iocg->vtime); in iocg_kick_waitq()
1439 atomic64_add(vpay, &iocg->done_vtime); in iocg_kick_waitq()
1440 iocg_pay_debt(iocg, abs_vpay, now); in iocg_kick_waitq()
1444 if (iocg->abs_vdebt || iocg->delay) in iocg_kick_waitq()
1445 iocg_kick_delay(iocg, now); in iocg_kick_waitq()
1453 if (iocg->abs_vdebt) { in iocg_kick_waitq()
1454 s64 vdebt = abs_cost_to_cost(iocg->abs_vdebt, hwa); in iocg_kick_waitq()
1464 current_hweight(iocg, NULL, &ctx.hw_inuse); in iocg_kick_waitq()
1466 __wake_up_locked_key(&iocg->waitq, TASK_NORMAL, &ctx); in iocg_kick_waitq()
1468 if (!waitqueue_active(&iocg->waitq)) { in iocg_kick_waitq()
1469 if (iocg->wait_since) { in iocg_kick_waitq()
1470 iocg->local_stat.wait_us += now->now - iocg->wait_since; in iocg_kick_waitq()
1471 iocg->wait_since = 0; in iocg_kick_waitq()
1476 if (!iocg->wait_since) in iocg_kick_waitq()
1477 iocg->wait_since = now->now; in iocg_kick_waitq()
1490 oexpires = ktime_to_ns(hrtimer_get_softexpires(&iocg->waitq_timer)); in iocg_kick_waitq()
1491 if (hrtimer_is_queued(&iocg->waitq_timer) && in iocg_kick_waitq()
1495 hrtimer_start_range_ns(&iocg->waitq_timer, ns_to_ktime(expires), in iocg_kick_waitq()
1501 struct ioc_gq *iocg = container_of(timer, struct ioc_gq, waitq_timer); in iocg_waitq_timer_fn() local
1502 bool pay_debt = READ_ONCE(iocg->abs_vdebt); in iocg_waitq_timer_fn()
1506 ioc_now(iocg->ioc, &now); in iocg_waitq_timer_fn()
1508 iocg_lock(iocg, pay_debt, &flags); in iocg_waitq_timer_fn()
1509 iocg_kick_waitq(iocg, pay_debt, &now); in iocg_waitq_timer_fn()
1510 iocg_unlock(iocg, pay_debt, &flags); in iocg_waitq_timer_fn()
1555 static bool iocg_is_idle(struct ioc_gq *iocg) in iocg_is_idle() argument
1557 struct ioc *ioc = iocg->ioc; in iocg_is_idle()
1560 if (atomic64_read(&iocg->active_period) == in iocg_is_idle()
1565 if (atomic64_read(&iocg->done_vtime) != atomic64_read(&iocg->vtime)) in iocg_is_idle()
1576 static void iocg_build_inner_walk(struct ioc_gq *iocg, in iocg_build_inner_walk() argument
1581 WARN_ON_ONCE(!list_empty(&iocg->walk_list)); in iocg_build_inner_walk()
1584 for (lvl = iocg->level - 1; lvl >= 0; lvl--) { in iocg_build_inner_walk()
1585 if (!list_empty(&iocg->ancestors[lvl]->walk_list)) in iocg_build_inner_walk()
1590 while (++lvl <= iocg->level - 1) { in iocg_build_inner_walk()
1591 struct ioc_gq *inner = iocg->ancestors[lvl]; in iocg_build_inner_walk()
1599 static void iocg_flush_stat_one(struct ioc_gq *iocg, struct ioc_now *now) in iocg_flush_stat_one() argument
1601 struct ioc *ioc = iocg->ioc; in iocg_flush_stat_one()
1607 lockdep_assert_held(&iocg->ioc->lock); in iocg_flush_stat_one()
1612 per_cpu_ptr(&iocg->pcpu_stat->abs_vusage, cpu)); in iocg_flush_stat_one()
1614 vusage_delta = abs_vusage - iocg->last_stat_abs_vusage; in iocg_flush_stat_one()
1615 iocg->last_stat_abs_vusage = abs_vusage; in iocg_flush_stat_one()
1617 iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate); in iocg_flush_stat_one()
1618 iocg->local_stat.usage_us += iocg->usage_delta_us; in iocg_flush_stat_one()
1622 iocg->local_stat.usage_us + iocg->desc_stat.usage_us; in iocg_flush_stat_one()
1624 iocg->local_stat.wait_us + iocg->desc_stat.wait_us; in iocg_flush_stat_one()
1626 iocg->local_stat.indebt_us + iocg->desc_stat.indebt_us; in iocg_flush_stat_one()
1628 iocg->local_stat.indelay_us + iocg->desc_stat.indelay_us; in iocg_flush_stat_one()
1631 if (iocg->level > 0) { in iocg_flush_stat_one()
1633 &iocg->ancestors[iocg->level - 1]->desc_stat; in iocg_flush_stat_one()
1636 new_stat.usage_us - iocg->last_stat.usage_us; in iocg_flush_stat_one()
1638 new_stat.wait_us - iocg->last_stat.wait_us; in iocg_flush_stat_one()
1640 new_stat.indebt_us - iocg->last_stat.indebt_us; in iocg_flush_stat_one()
1642 new_stat.indelay_us - iocg->last_stat.indelay_us; in iocg_flush_stat_one()
1645 iocg->last_stat = new_stat; in iocg_flush_stat_one()
1652 struct ioc_gq *iocg, *tiocg; in iocg_flush_stat() local
1655 list_for_each_entry(iocg, target_iocgs, active_list) { in iocg_flush_stat()
1656 iocg_flush_stat_one(iocg, now); in iocg_flush_stat()
1657 iocg_build_inner_walk(iocg, &inner_walk); in iocg_flush_stat()
1661 list_for_each_entry_safe_reverse(iocg, tiocg, &inner_walk, walk_list) { in iocg_flush_stat()
1662 iocg_flush_stat_one(iocg, now); in iocg_flush_stat()
1663 list_del_init(&iocg->walk_list); in iocg_flush_stat()
1672 static u32 hweight_after_donation(struct ioc_gq *iocg, u32 old_hwi, u32 hwm, in hweight_after_donation() argument
1675 struct ioc *ioc = iocg->ioc; in hweight_after_donation()
1676 u64 vtime = atomic64_read(&iocg->vtime); in hweight_after_donation()
1680 if (iocg->abs_vdebt) in hweight_after_donation()
1684 if (waitqueue_active(&iocg->waitq) || in hweight_after_donation()
1691 atomic64_add(excess, &iocg->vtime); in hweight_after_donation()
1692 atomic64_add(excess, &iocg->done_vtime); in hweight_after_donation()
1782 struct ioc_gq *iocg, *tiocg, *root_iocg; in transfer_surpluses() local
1794 list_for_each_entry(iocg, surpluses, surplus_list) { in transfer_surpluses()
1797 current_hweight(iocg, &hwa, NULL); in transfer_surpluses()
1798 after_sum += iocg->hweight_after_donation; in transfer_surpluses()
1800 if (iocg->hweight_after_donation > hwa) { in transfer_surpluses()
1801 over_sum += iocg->hweight_after_donation; in transfer_surpluses()
1802 list_add(&iocg->walk_list, &over_hwa); in transfer_surpluses()
1818 list_for_each_entry_safe(iocg, tiocg, &over_hwa, walk_list) { in transfer_surpluses()
1820 iocg->hweight_after_donation = in transfer_surpluses()
1821 div_u64((u64)iocg->hweight_after_donation * in transfer_surpluses()
1823 list_del_init(&iocg->walk_list); in transfer_surpluses()
1830 list_for_each_entry(iocg, surpluses, surplus_list) { in transfer_surpluses()
1831 iocg_build_inner_walk(iocg, &inner_walk); in transfer_surpluses()
1837 list_for_each_entry(iocg, &inner_walk, walk_list) { in transfer_surpluses()
1838 iocg->child_adjusted_sum = 0; in transfer_surpluses()
1839 iocg->hweight_donating = 0; in transfer_surpluses()
1840 iocg->hweight_after_donation = 0; in transfer_surpluses()
1847 list_for_each_entry(iocg, surpluses, surplus_list) { in transfer_surpluses()
1848 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1]; in transfer_surpluses()
1850 parent->hweight_donating += iocg->hweight_donating; in transfer_surpluses()
1851 parent->hweight_after_donation += iocg->hweight_after_donation; in transfer_surpluses()
1854 list_for_each_entry_reverse(iocg, &inner_walk, walk_list) { in transfer_surpluses()
1855 if (iocg->level > 0) { in transfer_surpluses()
1856 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1]; in transfer_surpluses()
1858 parent->hweight_donating += iocg->hweight_donating; in transfer_surpluses()
1859 parent->hweight_after_donation += iocg->hweight_after_donation; in transfer_surpluses()
1868 list_for_each_entry(iocg, &inner_walk, walk_list) { in transfer_surpluses()
1869 if (iocg->level) { in transfer_surpluses()
1870 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1]; in transfer_surpluses()
1872 iocg->hweight_active = DIV64_U64_ROUND_UP( in transfer_surpluses()
1873 (u64)parent->hweight_active * iocg->active, in transfer_surpluses()
1878 iocg->hweight_donating = min(iocg->hweight_donating, in transfer_surpluses()
1879 iocg->hweight_active); in transfer_surpluses()
1880 iocg->hweight_after_donation = min(iocg->hweight_after_donation, in transfer_surpluses()
1881 iocg->hweight_donating - 1); in transfer_surpluses()
1882 if (WARN_ON_ONCE(iocg->hweight_active <= 1 || in transfer_surpluses()
1883 iocg->hweight_donating <= 1 || in transfer_surpluses()
1884 iocg->hweight_after_donation == 0)) { in transfer_surpluses()
1886 pr_cont_cgroup_path(iocg_to_blkg(iocg)->blkcg->css.cgroup); in transfer_surpluses()
1888 iocg->hweight_active, iocg->hweight_donating, in transfer_surpluses()
1889 iocg->hweight_after_donation); in transfer_surpluses()
1915 list_for_each_entry(iocg, &inner_walk, walk_list) { in transfer_surpluses()
1920 if (iocg->level == 0) { in transfer_surpluses()
1922 iocg->child_adjusted_sum = DIV64_U64_ROUND_UP( in transfer_surpluses()
1923 iocg->child_active_sum * (WEIGHT_ONE - iocg->hweight_donating), in transfer_surpluses()
1924 WEIGHT_ONE - iocg->hweight_after_donation); in transfer_surpluses()
1928 parent = iocg->ancestors[iocg->level - 1]; in transfer_surpluses()
1931 iocg->hweight_inuse = DIV64_U64_ROUND_UP( in transfer_surpluses()
1932 (u64)gamma * (iocg->hweight_active - iocg->hweight_donating), in transfer_surpluses()
1933 WEIGHT_ONE) + iocg->hweight_after_donation; in transfer_surpluses()
1937 (u64)parent->child_adjusted_sum * iocg->hweight_inuse, in transfer_surpluses()
1942 iocg->child_active_sum * iocg->hweight_donating, in transfer_surpluses()
1943 iocg->hweight_active); in transfer_surpluses()
1944 sf = iocg->child_active_sum - st; in transfer_surpluses()
1946 (u64)iocg->active * iocg->hweight_donating, in transfer_surpluses()
1947 iocg->hweight_active); in transfer_surpluses()
1949 (u64)inuse * iocg->hweight_after_donation, in transfer_surpluses()
1950 iocg->hweight_inuse); in transfer_surpluses()
1952 iocg->child_adjusted_sum = sf + DIV64_U64_ROUND_UP(st * wptp, wpt); in transfer_surpluses()
1959 list_for_each_entry(iocg, surpluses, surplus_list) { in transfer_surpluses()
1960 struct ioc_gq *parent = iocg->ancestors[iocg->level - 1]; in transfer_surpluses()
1970 if (iocg->abs_vdebt) { in transfer_surpluses()
1971 WARN_ON_ONCE(iocg->inuse > 1); in transfer_surpluses()
1977 parent->child_adjusted_sum * iocg->hweight_after_donation, in transfer_surpluses()
1980 TRACE_IOCG_PATH(inuse_transfer, iocg, now, in transfer_surpluses()
1981 iocg->inuse, inuse, in transfer_surpluses()
1982 iocg->hweight_inuse, in transfer_surpluses()
1983 iocg->hweight_after_donation); in transfer_surpluses()
1985 __propagate_weights(iocg, iocg->active, inuse, true, now); in transfer_surpluses()
1989 list_for_each_entry_safe(iocg, tiocg, &inner_walk, walk_list) in transfer_surpluses()
1990 list_del_init(&iocg->walk_list); in transfer_surpluses()
2007 struct ioc_gq *iocg; in ioc_forgive_debts() local
2059 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { in ioc_forgive_debts()
2062 if (!iocg->abs_vdebt && !iocg->delay) in ioc_forgive_debts()
2065 spin_lock(&iocg->waitq.lock); in ioc_forgive_debts()
2067 old_debt = iocg->abs_vdebt; in ioc_forgive_debts()
2068 old_delay = iocg->delay; in ioc_forgive_debts()
2070 if (iocg->abs_vdebt) in ioc_forgive_debts()
2071 iocg->abs_vdebt = iocg->abs_vdebt >> nr_cycles ?: 1; in ioc_forgive_debts()
2072 if (iocg->delay) in ioc_forgive_debts()
2073 iocg->delay = iocg->delay >> nr_cycles ?: 1; in ioc_forgive_debts()
2075 iocg_kick_waitq(iocg, true, now); in ioc_forgive_debts()
2077 TRACE_IOCG_PATH(iocg_forgive_debt, iocg, now, usage_pct, in ioc_forgive_debts()
2078 old_debt, iocg->abs_vdebt, in ioc_forgive_debts()
2079 old_delay, iocg->delay); in ioc_forgive_debts()
2081 spin_unlock(&iocg->waitq.lock); in ioc_forgive_debts()
2088 struct ioc_gq *iocg, *tiocg; in ioc_timer_fn() local
2119 list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) { in ioc_timer_fn()
2120 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt && in ioc_timer_fn()
2121 !iocg->delay && !iocg_is_idle(iocg)) in ioc_timer_fn()
2124 spin_lock(&iocg->waitq.lock); in ioc_timer_fn()
2127 if (iocg->wait_since) { in ioc_timer_fn()
2128 iocg->local_stat.wait_us += now.now - iocg->wait_since; in ioc_timer_fn()
2129 iocg->wait_since = now.now; in ioc_timer_fn()
2131 if (iocg->indebt_since) { in ioc_timer_fn()
2132 iocg->local_stat.indebt_us += in ioc_timer_fn()
2133 now.now - iocg->indebt_since; in ioc_timer_fn()
2134 iocg->indebt_since = now.now; in ioc_timer_fn()
2136 if (iocg->indelay_since) { in ioc_timer_fn()
2137 iocg->local_stat.indelay_us += in ioc_timer_fn()
2138 now.now - iocg->indelay_since; in ioc_timer_fn()
2139 iocg->indelay_since = now.now; in ioc_timer_fn()
2142 if (waitqueue_active(&iocg->waitq) || iocg->abs_vdebt || in ioc_timer_fn()
2143 iocg->delay) { in ioc_timer_fn()
2145 iocg_kick_waitq(iocg, true, &now); in ioc_timer_fn()
2146 if (iocg->abs_vdebt || iocg->delay) in ioc_timer_fn()
2148 } else if (iocg_is_idle(iocg)) { in ioc_timer_fn()
2150 u64 vtime = atomic64_read(&iocg->vtime); in ioc_timer_fn()
2163 current_hweight(iocg, NULL, &old_hwi); in ioc_timer_fn()
2168 __propagate_weights(iocg, 0, 0, false, &now); in ioc_timer_fn()
2169 list_del_init(&iocg->active_list); in ioc_timer_fn()
2172 spin_unlock(&iocg->waitq.lock); in ioc_timer_fn()
2183 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { in ioc_timer_fn()
2191 vdone = atomic64_read(&iocg->done_vtime); in ioc_timer_fn()
2192 vtime = atomic64_read(&iocg->vtime); in ioc_timer_fn()
2193 current_hweight(iocg, &hw_active, &hw_inuse); in ioc_timer_fn()
2202 !atomic_read(&iocg_to_blkg(iocg)->use_delay) && in ioc_timer_fn()
2213 usage_us = iocg->usage_delta_us; in ioc_timer_fn()
2224 if (time_after64(iocg->activated_at, ioc->period_at)) in ioc_timer_fn()
2225 usage_dur = max_t(u64, now.now - iocg->activated_at, 1); in ioc_timer_fn()
2235 WARN_ON_ONCE(!list_empty(&iocg->surplus_list)); in ioc_timer_fn()
2237 (!waitqueue_active(&iocg->waitq) && in ioc_timer_fn()
2245 current_hweight(iocg, &hwa, &old_hwi); in ioc_timer_fn()
2246 hwm = current_hweight_max(iocg); in ioc_timer_fn()
2247 new_hwi = hweight_after_donation(iocg, old_hwi, hwm, in ioc_timer_fn()
2257 iocg->hweight_donating = hwa; in ioc_timer_fn()
2258 iocg->hweight_after_donation = new_hwi; in ioc_timer_fn()
2259 list_add(&iocg->surplus_list, &surpluses); in ioc_timer_fn()
2260 } else if (!iocg->abs_vdebt) { in ioc_timer_fn()
2271 TRACE_IOCG_PATH(inuse_shortage, iocg, &now, in ioc_timer_fn()
2272 iocg->inuse, iocg->active, in ioc_timer_fn()
2273 iocg->hweight_inuse, new_hwi); in ioc_timer_fn()
2275 __propagate_weights(iocg, iocg->active, in ioc_timer_fn()
2276 iocg->active, true, &now); in ioc_timer_fn()
2291 list_for_each_entry_safe(iocg, tiocg, &surpluses, surplus_list) in ioc_timer_fn()
2292 list_del_init(&iocg->surplus_list); in ioc_timer_fn()
2411 static u64 adjust_inuse_and_calc_cost(struct ioc_gq *iocg, u64 vtime, in adjust_inuse_and_calc_cost() argument
2414 struct ioc *ioc = iocg->ioc; in adjust_inuse_and_calc_cost()
2416 u32 __maybe_unused old_inuse = iocg->inuse, __maybe_unused old_hwi; in adjust_inuse_and_calc_cost()
2421 current_hweight(iocg, NULL, &hwi); in adjust_inuse_and_calc_cost()
2427 if (iocg->abs_vdebt) in adjust_inuse_and_calc_cost()
2434 if (margin >= iocg->saved_margin || margin >= margins->low || in adjust_inuse_and_calc_cost()
2435 iocg->inuse == iocg->active) in adjust_inuse_and_calc_cost()
2441 if (iocg->abs_vdebt || list_empty(&iocg->active_list)) { in adjust_inuse_and_calc_cost()
2453 new_inuse = iocg->inuse; in adjust_inuse_and_calc_cost()
2454 adj_step = DIV_ROUND_UP(iocg->active * INUSE_ADJ_STEP_PCT, 100); in adjust_inuse_and_calc_cost()
2457 propagate_weights(iocg, iocg->active, new_inuse, true, now); in adjust_inuse_and_calc_cost()
2458 current_hweight(iocg, NULL, &hwi); in adjust_inuse_and_calc_cost()
2461 iocg->inuse != iocg->active); in adjust_inuse_and_calc_cost()
2465 TRACE_IOCG_PATH(inuse_adjust, iocg, now, in adjust_inuse_and_calc_cost()
2466 old_inuse, iocg->inuse, old_hwi, hwi); in adjust_inuse_and_calc_cost()
2471 static void calc_vtime_cost_builtin(struct bio *bio, struct ioc_gq *iocg, in calc_vtime_cost_builtin() argument
2474 struct ioc *ioc = iocg->ioc; in calc_vtime_cost_builtin()
2495 if (iocg->cursor) { in calc_vtime_cost_builtin()
2496 seek_pages = abs(bio->bi_iter.bi_sector - iocg->cursor); in calc_vtime_cost_builtin()
2512 static u64 calc_vtime_cost(struct bio *bio, struct ioc_gq *iocg, bool is_merge) in calc_vtime_cost() argument
2516 calc_vtime_cost_builtin(bio, iocg, is_merge, &cost); in calc_vtime_cost()
2549 struct ioc_gq *iocg = blkg_to_iocg(blkg); in ioc_rqos_throttle() local
2557 if (!ioc->enabled || !iocg || !iocg->level) in ioc_rqos_throttle()
2561 abs_cost = calc_vtime_cost(bio, iocg, false); in ioc_rqos_throttle()
2565 if (!iocg_activate(iocg, &now)) in ioc_rqos_throttle()
2568 iocg->cursor = bio_end_sector(bio); in ioc_rqos_throttle()
2569 vtime = atomic64_read(&iocg->vtime); in ioc_rqos_throttle()
2570 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now); in ioc_rqos_throttle()
2577 if (!waitqueue_active(&iocg->waitq) && !iocg->abs_vdebt && in ioc_rqos_throttle()
2579 iocg_commit_bio(iocg, bio, abs_cost, cost); in ioc_rqos_throttle()
2591 ioc_locked = use_debt || READ_ONCE(iocg->abs_vdebt); in ioc_rqos_throttle()
2593 iocg_lock(iocg, ioc_locked, &flags); in ioc_rqos_throttle()
2602 if (unlikely(list_empty(&iocg->active_list))) { in ioc_rqos_throttle()
2603 iocg_unlock(iocg, ioc_locked, &flags); in ioc_rqos_throttle()
2604 iocg_commit_bio(iocg, bio, abs_cost, cost); in ioc_rqos_throttle()
2626 iocg_incur_debt(iocg, abs_cost, &now); in ioc_rqos_throttle()
2627 if (iocg_kick_delay(iocg, &now)) in ioc_rqos_throttle()
2630 iocg_unlock(iocg, ioc_locked, &flags); in ioc_rqos_throttle()
2635 if (!iocg->abs_vdebt && iocg->inuse != iocg->active) { in ioc_rqos_throttle()
2637 iocg_unlock(iocg, false, &flags); in ioc_rqos_throttle()
2641 propagate_weights(iocg, iocg->active, iocg->active, true, in ioc_rqos_throttle()
2664 __add_wait_queue_entry_tail(&iocg->waitq, &wait.wait); in ioc_rqos_throttle()
2665 iocg_kick_waitq(iocg, ioc_locked, &now); in ioc_rqos_throttle()
2667 iocg_unlock(iocg, ioc_locked, &flags); in ioc_rqos_throttle()
2677 finish_wait(&iocg->waitq, &wait.wait); in ioc_rqos_throttle()
2683 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg); in ioc_rqos_merge() local
2691 if (!ioc->enabled || !iocg || !iocg->level) in ioc_rqos_merge()
2694 abs_cost = calc_vtime_cost(bio, iocg, true); in ioc_rqos_merge()
2700 vtime = atomic64_read(&iocg->vtime); in ioc_rqos_merge()
2701 cost = adjust_inuse_and_calc_cost(iocg, vtime, abs_cost, &now); in ioc_rqos_merge()
2705 blk_rq_pos(rq) + blk_rq_sectors(rq) == iocg->cursor) in ioc_rqos_merge()
2706 iocg->cursor = bio_end; in ioc_rqos_merge()
2713 time_before_eq64(atomic64_read(&iocg->vtime) + cost, now.vnow)) { in ioc_rqos_merge()
2714 iocg_commit_bio(iocg, bio, abs_cost, cost); in ioc_rqos_merge()
2724 spin_lock(&iocg->waitq.lock); in ioc_rqos_merge()
2726 if (likely(!list_empty(&iocg->active_list))) { in ioc_rqos_merge()
2727 iocg_incur_debt(iocg, abs_cost, &now); in ioc_rqos_merge()
2728 if (iocg_kick_delay(iocg, &now)) in ioc_rqos_merge()
2732 iocg_commit_bio(iocg, bio, abs_cost, cost); in ioc_rqos_merge()
2735 spin_unlock(&iocg->waitq.lock); in ioc_rqos_merge()
2741 struct ioc_gq *iocg = blkg_to_iocg(bio->bi_blkg); in ioc_rqos_done_bio() local
2743 if (iocg && bio->bi_iocost_cost) in ioc_rqos_done_bio()
2744 atomic64_add(bio->bi_iocost_cost, &iocg->done_vtime); in ioc_rqos_done_bio()
2906 struct ioc_gq *iocg; in ioc_pd_alloc() local
2908 iocg = kzalloc_node(struct_size(iocg, ancestors, levels), gfp, q->node); in ioc_pd_alloc()
2909 if (!iocg) in ioc_pd_alloc()
2912 iocg->pcpu_stat = alloc_percpu_gfp(struct iocg_pcpu_stat, gfp); in ioc_pd_alloc()
2913 if (!iocg->pcpu_stat) { in ioc_pd_alloc()
2914 kfree(iocg); in ioc_pd_alloc()
2918 return &iocg->pd; in ioc_pd_alloc()
2923 struct ioc_gq *iocg = pd_to_iocg(pd); in ioc_pd_init() local
2924 struct blkcg_gq *blkg = pd_to_blkg(&iocg->pd); in ioc_pd_init()
2932 iocg->ioc = ioc; in ioc_pd_init()
2933 atomic64_set(&iocg->vtime, now.vnow); in ioc_pd_init()
2934 atomic64_set(&iocg->done_vtime, now.vnow); in ioc_pd_init()
2935 atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period)); in ioc_pd_init()
2936 INIT_LIST_HEAD(&iocg->active_list); in ioc_pd_init()
2937 INIT_LIST_HEAD(&iocg->walk_list); in ioc_pd_init()
2938 INIT_LIST_HEAD(&iocg->surplus_list); in ioc_pd_init()
2939 iocg->hweight_active = WEIGHT_ONE; in ioc_pd_init()
2940 iocg->hweight_inuse = WEIGHT_ONE; in ioc_pd_init()
2942 init_waitqueue_head(&iocg->waitq); in ioc_pd_init()
2943 hrtimer_init(&iocg->waitq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS); in ioc_pd_init()
2944 iocg->waitq_timer.function = iocg_waitq_timer_fn; in ioc_pd_init()
2946 iocg->level = blkg->blkcg->css.cgroup->level; in ioc_pd_init()
2950 iocg->ancestors[tiocg->level] = tiocg; in ioc_pd_init()
2954 weight_updated(iocg, &now); in ioc_pd_init()
2960 struct ioc_gq *iocg = pd_to_iocg(pd); in ioc_pd_free() local
2961 struct ioc *ioc = iocg->ioc; in ioc_pd_free()
2967 if (!list_empty(&iocg->active_list)) { in ioc_pd_free()
2971 propagate_weights(iocg, 0, 0, false, &now); in ioc_pd_free()
2972 list_del_init(&iocg->active_list); in ioc_pd_free()
2975 WARN_ON_ONCE(!list_empty(&iocg->walk_list)); in ioc_pd_free()
2976 WARN_ON_ONCE(!list_empty(&iocg->surplus_list)); in ioc_pd_free()
2980 hrtimer_cancel(&iocg->waitq_timer); in ioc_pd_free()
2982 free_percpu(iocg->pcpu_stat); in ioc_pd_free()
2983 kfree(iocg); in ioc_pd_free()
2988 struct ioc_gq *iocg = pd_to_iocg(pd); in ioc_pd_stat() local
2989 struct ioc *ioc = iocg->ioc; in ioc_pd_stat()
2995 if (iocg->level == 0) { in ioc_pd_stat()
3004 iocg->last_stat.usage_us); in ioc_pd_stat()
3009 iocg->last_stat.wait_us, in ioc_pd_stat()
3010 iocg->last_stat.indebt_us, in ioc_pd_stat()
3011 iocg->last_stat.indelay_us); in ioc_pd_stat()
3020 struct ioc_gq *iocg = pd_to_iocg(pd); in ioc_weight_prfill() local
3022 if (dname && iocg->cfg_weight) in ioc_weight_prfill()
3023 seq_printf(sf, "%s %u\n", dname, iocg->cfg_weight / WEIGHT_ONE); in ioc_weight_prfill()
3046 struct ioc_gq *iocg; in ioc_weight_write() local
3062 struct ioc_gq *iocg = blkg_to_iocg(blkg); in ioc_weight_write() local
3064 if (iocg) { in ioc_weight_write()
3065 spin_lock(&iocg->ioc->lock); in ioc_weight_write()
3066 ioc_now(iocg->ioc, &now); in ioc_weight_write()
3067 weight_updated(iocg, &now); in ioc_weight_write()
3068 spin_unlock(&iocg->ioc->lock); in ioc_weight_write()
3080 iocg = blkg_to_iocg(ctx.blkg); in ioc_weight_write()
3091 spin_lock(&iocg->ioc->lock); in ioc_weight_write()
3092 iocg->cfg_weight = v * WEIGHT_ONE; in ioc_weight_write()
3093 ioc_now(iocg->ioc, &now); in ioc_weight_write()
3094 weight_updated(iocg, &now); in ioc_weight_write()
3095 spin_unlock(&iocg->ioc->lock); in ioc_weight_write()