Lines Matching refs:ioc

404 struct ioc {  struct
462 struct ioc *ioc; argument
660 static struct ioc *rqos_to_ioc(struct rq_qos *rqos) in rqos_to_ioc()
662 return container_of(rqos, struct ioc, rqos); in rqos_to_ioc()
665 static struct ioc *q_to_ioc(struct request_queue *q) in q_to_ioc()
678 static const char __maybe_unused *ioc_name(struct ioc *ioc) in ioc_name() argument
680 return q_name(ioc->rqos.q); in ioc_name()
737 spin_lock_irqsave(&iocg->ioc->lock, *flags); in iocg_lock()
748 spin_unlock_irqrestore(&iocg->ioc->lock, *flags); in iocg_unlock()
757 static void ioc_refresh_margins(struct ioc *ioc) in ioc_refresh_margins() argument
759 struct ioc_margins *margins = &ioc->margins; in ioc_refresh_margins()
760 u32 period_us = ioc->period_us; in ioc_refresh_margins()
761 u64 vrate = ioc->vtime_base_rate; in ioc_refresh_margins()
769 static void ioc_refresh_period_us(struct ioc *ioc) in ioc_refresh_period_us() argument
773 lockdep_assert_held(&ioc->lock); in ioc_refresh_period_us()
776 if (ioc->params.qos[QOS_RLAT] >= ioc->params.qos[QOS_WLAT]) { in ioc_refresh_period_us()
777 ppm = ioc->params.qos[QOS_RPPM]; in ioc_refresh_period_us()
778 lat = ioc->params.qos[QOS_RLAT]; in ioc_refresh_period_us()
780 ppm = ioc->params.qos[QOS_WPPM]; in ioc_refresh_period_us()
781 lat = ioc->params.qos[QOS_WLAT]; in ioc_refresh_period_us()
800 ioc->period_us = period_us; in ioc_refresh_period_us()
801 ioc->timer_slack_ns = div64_u64( in ioc_refresh_period_us()
804 ioc_refresh_margins(ioc); in ioc_refresh_period_us()
807 static int ioc_autop_idx(struct ioc *ioc) in ioc_autop_idx() argument
809 int idx = ioc->autop_idx; in ioc_autop_idx()
815 if (!blk_queue_nonrot(ioc->rqos.q)) in ioc_autop_idx()
819 if (blk_queue_depth(ioc->rqos.q) == 1) in ioc_autop_idx()
827 if (ioc->user_qos_params || ioc->user_cost_model) in ioc_autop_idx()
831 vrate_pct = div64_u64(ioc->vtime_base_rate * 100, VTIME_PER_USEC); in ioc_autop_idx()
835 if (!ioc->autop_too_fast_at) in ioc_autop_idx()
836 ioc->autop_too_fast_at = now_ns; in ioc_autop_idx()
837 if (now_ns - ioc->autop_too_fast_at >= AUTOP_CYCLE_NSEC) in ioc_autop_idx()
840 ioc->autop_too_fast_at = 0; in ioc_autop_idx()
844 if (!ioc->autop_too_slow_at) in ioc_autop_idx()
845 ioc->autop_too_slow_at = now_ns; in ioc_autop_idx()
846 if (now_ns - ioc->autop_too_slow_at >= AUTOP_CYCLE_NSEC) in ioc_autop_idx()
849 ioc->autop_too_slow_at = 0; in ioc_autop_idx()
892 static void ioc_refresh_lcoefs(struct ioc *ioc) in ioc_refresh_lcoefs() argument
894 u64 *u = ioc->params.i_lcoefs; in ioc_refresh_lcoefs()
895 u64 *c = ioc->params.lcoefs; in ioc_refresh_lcoefs()
903 static bool ioc_refresh_params(struct ioc *ioc, bool force) in ioc_refresh_params() argument
908 lockdep_assert_held(&ioc->lock); in ioc_refresh_params()
910 idx = ioc_autop_idx(ioc); in ioc_refresh_params()
913 if (idx == ioc->autop_idx && !force) in ioc_refresh_params()
916 if (idx != ioc->autop_idx) in ioc_refresh_params()
917 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC); in ioc_refresh_params()
919 ioc->autop_idx = idx; in ioc_refresh_params()
920 ioc->autop_too_fast_at = 0; in ioc_refresh_params()
921 ioc->autop_too_slow_at = 0; in ioc_refresh_params()
923 if (!ioc->user_qos_params) in ioc_refresh_params()
924 memcpy(ioc->params.qos, p->qos, sizeof(p->qos)); in ioc_refresh_params()
925 if (!ioc->user_cost_model) in ioc_refresh_params()
926 memcpy(ioc->params.i_lcoefs, p->i_lcoefs, sizeof(p->i_lcoefs)); in ioc_refresh_params()
928 ioc_refresh_period_us(ioc); in ioc_refresh_params()
929 ioc_refresh_lcoefs(ioc); in ioc_refresh_params()
931 ioc->vrate_min = DIV64_U64_ROUND_UP((u64)ioc->params.qos[QOS_MIN] * in ioc_refresh_params()
933 ioc->vrate_max = div64_u64((u64)ioc->params.qos[QOS_MAX] * in ioc_refresh_params()
946 static void ioc_refresh_vrate(struct ioc *ioc, struct ioc_now *now) in ioc_refresh_vrate() argument
948 s64 pleft = ioc->period_at + ioc->period_us - now->now; in ioc_refresh_vrate()
949 s64 vperiod = ioc->period_us * ioc->vtime_base_rate; in ioc_refresh_vrate()
952 lockdep_assert_held(&ioc->lock); in ioc_refresh_vrate()
963 vcomp = -div64_s64(ioc->vtime_err, pleft); in ioc_refresh_vrate()
964 vcomp_min = -(ioc->vtime_base_rate >> 1); in ioc_refresh_vrate()
965 vcomp_max = ioc->vtime_base_rate; in ioc_refresh_vrate()
968 ioc->vtime_err += vcomp * pleft; in ioc_refresh_vrate()
970 atomic64_set(&ioc->vtime_rate, ioc->vtime_base_rate + vcomp); in ioc_refresh_vrate()
973 ioc->vtime_err = clamp(ioc->vtime_err, -vperiod, vperiod); in ioc_refresh_vrate()
977 static void ioc_now(struct ioc *ioc, struct ioc_now *now) in ioc_now() argument
983 now->vrate = atomic64_read(&ioc->vtime_rate); in ioc_now()
994 seq = read_seqcount_begin(&ioc->period_seqcount); in ioc_now()
995 now->vnow = ioc->period_at_vtime + in ioc_now()
996 (now->now - ioc->period_at) * now->vrate; in ioc_now()
997 } while (read_seqcount_retry(&ioc->period_seqcount, seq)); in ioc_now()
1000 static void ioc_start_period(struct ioc *ioc, struct ioc_now *now) in ioc_start_period() argument
1002 WARN_ON_ONCE(ioc->running != IOC_RUNNING); in ioc_start_period()
1004 write_seqcount_begin(&ioc->period_seqcount); in ioc_start_period()
1005 ioc->period_at = now->now; in ioc_start_period()
1006 ioc->period_at_vtime = now->vnow; in ioc_start_period()
1007 write_seqcount_end(&ioc->period_seqcount); in ioc_start_period()
1009 ioc->timer.expires = jiffies + usecs_to_jiffies(ioc->period_us); in ioc_start_period()
1010 add_timer(&ioc->timer); in ioc_start_period()
1021 struct ioc *ioc = iocg->ioc; in __propagate_weights() local
1024 lockdep_assert_held(&ioc->lock); in __propagate_weights()
1078 ioc->weights_updated = true; in __propagate_weights()
1081 static void commit_weights(struct ioc *ioc) in commit_weights() argument
1083 lockdep_assert_held(&ioc->lock); in commit_weights()
1085 if (ioc->weights_updated) { in commit_weights()
1088 atomic_inc(&ioc->hweight_gen); in commit_weights()
1089 ioc->weights_updated = false; in commit_weights()
1097 commit_weights(iocg->ioc); in propagate_weights()
1102 struct ioc *ioc = iocg->ioc; in current_hweight() local
1108 ioc_gen = atomic_read(&ioc->hweight_gen); in current_hweight()
1165 lockdep_assert_held(&iocg->ioc->lock); in current_hweight_max()
1182 struct ioc *ioc = iocg->ioc; in weight_updated() local
1187 lockdep_assert_held(&ioc->lock); in weight_updated()
1197 struct ioc *ioc = iocg->ioc; in iocg_activate() local
1207 ioc_now(ioc, now); in iocg_activate()
1208 cur_period = atomic64_read(&ioc->cur_period); in iocg_activate()
1218 spin_lock_irq(&ioc->lock); in iocg_activate()
1220 ioc_now(ioc, now); in iocg_activate()
1223 cur_period = atomic64_read(&ioc->cur_period); in iocg_activate()
1241 vtarget = now->vnow - ioc->margins.target; in iocg_activate()
1253 iocg->hweight_gen = atomic_read(&ioc->hweight_gen) - 1; in iocg_activate()
1254 list_add(&iocg->active_list, &ioc->active_iocgs); in iocg_activate()
1264 if (ioc->running == IOC_IDLE) { in iocg_activate()
1265 ioc->running = IOC_RUNNING; in iocg_activate()
1266 ioc->dfgv_period_at = now->now; in iocg_activate()
1267 ioc->dfgv_period_rem = 0; in iocg_activate()
1268 ioc_start_period(ioc, now); in iocg_activate()
1272 spin_unlock_irq(&ioc->lock); in iocg_activate()
1276 spin_unlock_irq(&ioc->lock); in iocg_activate()
1282 struct ioc *ioc = iocg->ioc; in iocg_kick_delay() local
1302 ioc->period_us * ioc->vtime_base_rate); in iocg_kick_delay()
1342 lockdep_assert_held(&iocg->ioc->lock); in iocg_incur_debt()
1365 lockdep_assert_held(&iocg->ioc->lock); in iocg_pay_debt()
1419 struct ioc *ioc = iocg->ioc; in iocg_kick_waitq() local
1436 lockdep_assert_held(&ioc->lock); in iocg_kick_waitq()
1485 DIV64_U64_ROUND_UP(vshortage, ioc->vtime_base_rate) * in iocg_kick_waitq()
1487 expires += ioc->timer_slack_ns; in iocg_kick_waitq()
1492 abs(oexpires - expires) <= ioc->timer_slack_ns) in iocg_kick_waitq()
1496 ioc->timer_slack_ns, HRTIMER_MODE_ABS); in iocg_kick_waitq()
1506 ioc_now(iocg->ioc, &now); in iocg_waitq_timer_fn()
1515 static void ioc_lat_stat(struct ioc *ioc, u32 *missed_ppm_ar, u32 *rq_wait_pct_p) in ioc_lat_stat() argument
1523 struct ioc_pcpu_stat *stat = per_cpu_ptr(ioc->pcpu_stat, cpu); in ioc_lat_stat()
1551 ioc->period_us * NSEC_PER_USEC); in ioc_lat_stat()
1557 struct ioc *ioc = iocg->ioc; in iocg_is_idle() local
1561 atomic64_read(&ioc->cur_period)) in iocg_is_idle()
1601 struct ioc *ioc = iocg->ioc; in iocg_flush_stat_one() local
1607 lockdep_assert_held(&iocg->ioc->lock); in iocg_flush_stat_one()
1617 iocg->usage_delta_us = div64_u64(vusage_delta, ioc->vtime_base_rate); in iocg_flush_stat_one()
1675 struct ioc *ioc = iocg->ioc; in hweight_after_donation() local
1685 time_after64(vtime, now->vnow - ioc->margins.min)) in hweight_after_donation()
1689 excess = now->vnow - vtime - ioc->margins.target; in hweight_after_donation()
1694 ioc->vtime_err -= div64_u64(excess * old_hwi, WEIGHT_ONE); in hweight_after_donation()
1714 now->vnow - ioc->period_at_vtime); in hweight_after_donation()
2004 static void ioc_forgive_debts(struct ioc *ioc, u64 usage_us_sum, int nr_debtors, in ioc_forgive_debts() argument
2012 ioc->dfgv_period_at = now->now; in ioc_forgive_debts()
2013 ioc->dfgv_period_rem = 0; in ioc_forgive_debts()
2014 ioc->dfgv_usage_us_sum = 0; in ioc_forgive_debts()
2024 if (ioc->busy_level > 0) in ioc_forgive_debts()
2025 usage_us_sum = max_t(u64, usage_us_sum, ioc->period_us); in ioc_forgive_debts()
2027 ioc->dfgv_usage_us_sum += usage_us_sum; in ioc_forgive_debts()
2028 if (time_before64(now->now, ioc->dfgv_period_at + DFGV_PERIOD)) in ioc_forgive_debts()
2035 dur = now->now - ioc->dfgv_period_at; in ioc_forgive_debts()
2036 usage_pct = div64_u64(100 * ioc->dfgv_usage_us_sum, dur); in ioc_forgive_debts()
2038 ioc->dfgv_period_at = now->now; in ioc_forgive_debts()
2039 ioc->dfgv_usage_us_sum = 0; in ioc_forgive_debts()
2043 ioc->dfgv_period_rem = 0; in ioc_forgive_debts()
2056 nr_cycles = dur + ioc->dfgv_period_rem; in ioc_forgive_debts()
2057 ioc->dfgv_period_rem = do_div(nr_cycles, DFGV_PERIOD); in ioc_forgive_debts()
2059 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { in ioc_forgive_debts()
2087 struct ioc *ioc = container_of(timer, struct ioc, timer); in ioc_timer_fn() local
2093 u32 ppm_rthr = MILLION - ioc->params.qos[QOS_RPPM]; in ioc_timer_fn()
2094 u32 ppm_wthr = MILLION - ioc->params.qos[QOS_WPPM]; in ioc_timer_fn()
2100 ioc_lat_stat(ioc, missed_ppm, &rq_wait_pct); in ioc_timer_fn()
2103 spin_lock_irq(&ioc->lock); in ioc_timer_fn()
2105 ioc_now(ioc, &now); in ioc_timer_fn()
2107 period_vtime = now.vnow - ioc->period_at_vtime; in ioc_timer_fn()
2109 spin_unlock_irq(&ioc->lock); in ioc_timer_fn()
2119 list_for_each_entry_safe(iocg, tiocg, &ioc->active_iocgs, active_list) { in ioc_timer_fn()
2159 excess = now.vnow - vtime - ioc->margins.target; in ioc_timer_fn()
2164 ioc->vtime_err -= div64_u64(excess * old_hwi, in ioc_timer_fn()
2174 commit_weights(ioc); in ioc_timer_fn()
2180 iocg_flush_stat(&ioc->active_iocgs, &now); in ioc_timer_fn()
2183 list_for_each_entry(iocg, &ioc->active_iocgs, active_list) { in ioc_timer_fn()
2219 ioc->vtime_base_rate); in ioc_timer_fn()
2224 if (time_after64(iocg->activated_at, ioc->period_at)) in ioc_timer_fn()
2227 usage_dur = max_t(u64, now.now - ioc->period_at, 1); in ioc_timer_fn()
2238 time_before64(vtime, now.vnow - ioc->margins.low))) { in ioc_timer_fn()
2288 commit_weights(ioc); in ioc_timer_fn()
2300 prev_busy_level = ioc->busy_level; in ioc_timer_fn()
2305 ioc->busy_level = max(ioc->busy_level, 0); in ioc_timer_fn()
2306 ioc->busy_level++; in ioc_timer_fn()
2316 ioc->busy_level = min(ioc->busy_level, 0); in ioc_timer_fn()
2323 ioc->busy_level--; in ioc_timer_fn()
2331 ioc->busy_level = 0; in ioc_timer_fn()
2335 ioc->busy_level = 0; in ioc_timer_fn()
2338 ioc->busy_level = clamp(ioc->busy_level, -1000, 1000); in ioc_timer_fn()
2340 if (ioc->busy_level > 0 || (ioc->busy_level < 0 && !nr_lagging)) { in ioc_timer_fn()
2341 u64 vrate = ioc->vtime_base_rate; in ioc_timer_fn()
2342 u64 vrate_min = ioc->vrate_min, vrate_max = ioc->vrate_max; in ioc_timer_fn()
2362 int idx = min_t(int, abs(ioc->busy_level), in ioc_timer_fn()
2366 if (ioc->busy_level > 0) in ioc_timer_fn()
2375 trace_iocost_ioc_vrate_adj(ioc, vrate, missed_ppm, rq_wait_pct, in ioc_timer_fn()
2378 ioc->vtime_base_rate = vrate; in ioc_timer_fn()
2379 ioc_refresh_margins(ioc); in ioc_timer_fn()
2380 } else if (ioc->busy_level != prev_busy_level || nr_lagging) { in ioc_timer_fn()
2381 trace_iocost_ioc_vrate_adj(ioc, atomic64_read(&ioc->vtime_rate), in ioc_timer_fn()
2386 ioc_refresh_params(ioc, false); in ioc_timer_fn()
2388 ioc_forgive_debts(ioc, usage_us_sum, nr_debtors, &now); in ioc_timer_fn()
2394 atomic64_inc(&ioc->cur_period); in ioc_timer_fn()
2396 if (ioc->running != IOC_STOP) { in ioc_timer_fn()
2397 if (!list_empty(&ioc->active_iocgs)) { in ioc_timer_fn()
2398 ioc_start_period(ioc, &now); in ioc_timer_fn()
2400 ioc->busy_level = 0; in ioc_timer_fn()
2401 ioc->vtime_err = 0; in ioc_timer_fn()
2402 ioc->running = IOC_IDLE; in ioc_timer_fn()
2405 ioc_refresh_vrate(ioc, &now); in ioc_timer_fn()
2408 spin_unlock_irq(&ioc->lock); in ioc_timer_fn()
2414 struct ioc *ioc = iocg->ioc; in adjust_inuse_and_calc_cost() local
2415 struct ioc_margins *margins = &ioc->margins; in adjust_inuse_and_calc_cost()
2438 spin_lock_irq(&ioc->lock); in adjust_inuse_and_calc_cost()
2442 spin_unlock_irq(&ioc->lock); in adjust_inuse_and_calc_cost()
2463 spin_unlock_irq(&ioc->lock); in adjust_inuse_and_calc_cost()
2474 struct ioc *ioc = iocg->ioc; in calc_vtime_cost_builtin() local
2482 coef_seqio = ioc->params.lcoefs[LCOEF_RSEQIO]; in calc_vtime_cost_builtin()
2483 coef_randio = ioc->params.lcoefs[LCOEF_RRANDIO]; in calc_vtime_cost_builtin()
2484 coef_page = ioc->params.lcoefs[LCOEF_RPAGE]; in calc_vtime_cost_builtin()
2487 coef_seqio = ioc->params.lcoefs[LCOEF_WSEQIO]; in calc_vtime_cost_builtin()
2488 coef_randio = ioc->params.lcoefs[LCOEF_WRANDIO]; in calc_vtime_cost_builtin()
2489 coef_page = ioc->params.lcoefs[LCOEF_WPAGE]; in calc_vtime_cost_builtin()
2520 static void calc_size_vtime_cost_builtin(struct request *rq, struct ioc *ioc, in calc_size_vtime_cost_builtin() argument
2527 *costp = pages * ioc->params.lcoefs[LCOEF_RPAGE]; in calc_size_vtime_cost_builtin()
2530 *costp = pages * ioc->params.lcoefs[LCOEF_WPAGE]; in calc_size_vtime_cost_builtin()
2537 static u64 calc_size_vtime_cost(struct request *rq, struct ioc *ioc) in calc_size_vtime_cost() argument
2541 calc_size_vtime_cost_builtin(rq, ioc, &cost); in calc_size_vtime_cost()
2548 struct ioc *ioc = rqos_to_ioc(rqos); in ioc_rqos_throttle() local
2557 if (!ioc->enabled || !iocg || !iocg->level) in ioc_rqos_throttle()
2684 struct ioc *ioc = rqos_to_ioc(rqos); in ioc_rqos_merge() local
2691 if (!ioc->enabled || !iocg || !iocg->level) in ioc_rqos_merge()
2698 ioc_now(ioc, &now); in ioc_rqos_merge()
2723 spin_lock_irqsave(&ioc->lock, flags); in ioc_rqos_merge()
2736 spin_unlock_irqrestore(&ioc->lock, flags); in ioc_rqos_merge()
2749 struct ioc *ioc = rqos_to_ioc(rqos); in ioc_rqos_done() local
2754 if (!ioc->enabled || !rq->alloc_time_ns || !rq->start_time_ns) in ioc_rqos_done()
2772 size_nsec = div64_u64(calc_size_vtime_cost(rq, ioc), VTIME_PER_NSEC); in ioc_rqos_done()
2774 ccs = get_cpu_ptr(ioc->pcpu_stat); in ioc_rqos_done()
2777 on_q_ns - size_nsec <= ioc->params.qos[pidx] * NSEC_PER_USEC) in ioc_rqos_done()
2789 struct ioc *ioc = rqos_to_ioc(rqos); in ioc_rqos_queue_depth_changed() local
2791 spin_lock_irq(&ioc->lock); in ioc_rqos_queue_depth_changed()
2792 ioc_refresh_params(ioc, false); in ioc_rqos_queue_depth_changed()
2793 spin_unlock_irq(&ioc->lock); in ioc_rqos_queue_depth_changed()
2798 struct ioc *ioc = rqos_to_ioc(rqos); in ioc_rqos_exit() local
2802 spin_lock_irq(&ioc->lock); in ioc_rqos_exit()
2803 ioc->running = IOC_STOP; in ioc_rqos_exit()
2804 spin_unlock_irq(&ioc->lock); in ioc_rqos_exit()
2806 del_timer_sync(&ioc->timer); in ioc_rqos_exit()
2807 free_percpu(ioc->pcpu_stat); in ioc_rqos_exit()
2808 kfree(ioc); in ioc_rqos_exit()
2822 struct ioc *ioc; in blk_iocost_init() local
2826 ioc = kzalloc(sizeof(*ioc), GFP_KERNEL); in blk_iocost_init()
2827 if (!ioc) in blk_iocost_init()
2830 ioc->pcpu_stat = alloc_percpu(struct ioc_pcpu_stat); in blk_iocost_init()
2831 if (!ioc->pcpu_stat) { in blk_iocost_init()
2832 kfree(ioc); in blk_iocost_init()
2837 struct ioc_pcpu_stat *ccs = per_cpu_ptr(ioc->pcpu_stat, cpu); in blk_iocost_init()
2846 rqos = &ioc->rqos; in blk_iocost_init()
2851 spin_lock_init(&ioc->lock); in blk_iocost_init()
2852 timer_setup(&ioc->timer, ioc_timer_fn, 0); in blk_iocost_init()
2853 INIT_LIST_HEAD(&ioc->active_iocgs); in blk_iocost_init()
2855 ioc->running = IOC_IDLE; in blk_iocost_init()
2856 ioc->vtime_base_rate = VTIME_PER_USEC; in blk_iocost_init()
2857 atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC); in blk_iocost_init()
2858 seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock); in blk_iocost_init()
2859 ioc->period_at = ktime_to_us(ktime_get()); in blk_iocost_init()
2860 atomic64_set(&ioc->cur_period, 0); in blk_iocost_init()
2861 atomic_set(&ioc->hweight_gen, 0); in blk_iocost_init()
2863 spin_lock_irq(&ioc->lock); in blk_iocost_init()
2864 ioc->autop_idx = AUTOP_INVALID; in blk_iocost_init()
2865 ioc_refresh_params(ioc, true); in blk_iocost_init()
2866 spin_unlock_irq(&ioc->lock); in blk_iocost_init()
2878 free_percpu(ioc->pcpu_stat); in blk_iocost_init()
2879 kfree(ioc); in blk_iocost_init()
2925 struct ioc *ioc = q_to_ioc(blkg->q); in ioc_pd_init() local
2930 ioc_now(ioc, &now); in ioc_pd_init()
2932 iocg->ioc = ioc; in ioc_pd_init()
2935 atomic64_set(&iocg->active_period, atomic64_read(&ioc->cur_period)); in ioc_pd_init()
2953 spin_lock_irqsave(&ioc->lock, flags); in ioc_pd_init()
2955 spin_unlock_irqrestore(&ioc->lock, flags); in ioc_pd_init()
2961 struct ioc *ioc = iocg->ioc; in ioc_pd_free() local
2964 if (ioc) { in ioc_pd_free()
2965 spin_lock_irqsave(&ioc->lock, flags); in ioc_pd_free()
2970 ioc_now(ioc, &now); in ioc_pd_free()
2978 spin_unlock_irqrestore(&ioc->lock, flags); in ioc_pd_free()
2989 struct ioc *ioc = iocg->ioc; in ioc_pd_stat() local
2992 if (!ioc->enabled) in ioc_pd_stat()
2997 ioc->vtime_base_rate * 10000, in ioc_pd_stat()
3065 spin_lock(&iocg->ioc->lock); in ioc_weight_write()
3066 ioc_now(iocg->ioc, &now); in ioc_weight_write()
3068 spin_unlock(&iocg->ioc->lock); in ioc_weight_write()
3091 spin_lock(&iocg->ioc->lock); in ioc_weight_write()
3093 ioc_now(iocg->ioc, &now); in ioc_weight_write()
3095 spin_unlock(&iocg->ioc->lock); in ioc_weight_write()
3109 struct ioc *ioc = pd_to_iocg(pd)->ioc; in ioc_qos_prfill() local
3115 dname, ioc->enabled, ioc->user_qos_params ? "user" : "auto", in ioc_qos_prfill()
3116 ioc->params.qos[QOS_RPPM] / 10000, in ioc_qos_prfill()
3117 ioc->params.qos[QOS_RPPM] % 10000 / 100, in ioc_qos_prfill()
3118 ioc->params.qos[QOS_RLAT], in ioc_qos_prfill()
3119 ioc->params.qos[QOS_WPPM] / 10000, in ioc_qos_prfill()
3120 ioc->params.qos[QOS_WPPM] % 10000 / 100, in ioc_qos_prfill()
3121 ioc->params.qos[QOS_WLAT], in ioc_qos_prfill()
3122 ioc->params.qos[QOS_MIN] / 10000, in ioc_qos_prfill()
3123 ioc->params.qos[QOS_MIN] % 10000 / 100, in ioc_qos_prfill()
3124 ioc->params.qos[QOS_MAX] / 10000, in ioc_qos_prfill()
3125 ioc->params.qos[QOS_MAX] % 10000 / 100); in ioc_qos_prfill()
3158 struct ioc *ioc; in ioc_qos_write() local
3168 ioc = q_to_ioc(disk->queue); in ioc_qos_write()
3169 if (!ioc) { in ioc_qos_write()
3173 ioc = q_to_ioc(disk->queue); in ioc_qos_write()
3176 spin_lock_irq(&ioc->lock); in ioc_qos_write()
3177 memcpy(qos, ioc->params.qos, sizeof(qos)); in ioc_qos_write()
3178 enable = ioc->enabled; in ioc_qos_write()
3179 user = ioc->user_qos_params; in ioc_qos_write()
3180 spin_unlock_irq(&ioc->lock); in ioc_qos_write()
3247 spin_lock_irq(&ioc->lock); in ioc_qos_write()
3250 blk_stat_enable_accounting(ioc->rqos.q); in ioc_qos_write()
3251 blk_queue_flag_set(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q); in ioc_qos_write()
3252 ioc->enabled = true; in ioc_qos_write()
3254 blk_queue_flag_clear(QUEUE_FLAG_RQ_ALLOC_TIME, ioc->rqos.q); in ioc_qos_write()
3255 ioc->enabled = false; in ioc_qos_write()
3259 memcpy(ioc->params.qos, qos, sizeof(qos)); in ioc_qos_write()
3260 ioc->user_qos_params = true; in ioc_qos_write()
3262 ioc->user_qos_params = false; in ioc_qos_write()
3265 ioc_refresh_params(ioc, true); in ioc_qos_write()
3266 spin_unlock_irq(&ioc->lock); in ioc_qos_write()
3281 struct ioc *ioc = pd_to_iocg(pd)->ioc; in ioc_cost_model_prfill() local
3282 u64 *u = ioc->params.i_lcoefs; in ioc_cost_model_prfill()
3290 dname, ioc->user_cost_model ? "user" : "auto", in ioc_cost_model_prfill()
3325 struct ioc *ioc; in ioc_cost_model_write() local
3335 ioc = q_to_ioc(disk->queue); in ioc_cost_model_write()
3336 if (!ioc) { in ioc_cost_model_write()
3340 ioc = q_to_ioc(disk->queue); in ioc_cost_model_write()
3343 spin_lock_irq(&ioc->lock); in ioc_cost_model_write()
3344 memcpy(u, ioc->params.i_lcoefs, sizeof(u)); in ioc_cost_model_write()
3345 user = ioc->user_cost_model; in ioc_cost_model_write()
3346 spin_unlock_irq(&ioc->lock); in ioc_cost_model_write()
3383 spin_lock_irq(&ioc->lock); in ioc_cost_model_write()
3385 memcpy(ioc->params.i_lcoefs, u, sizeof(u)); in ioc_cost_model_write()
3386 ioc->user_cost_model = true; in ioc_cost_model_write()
3388 ioc->user_cost_model = false; in ioc_cost_model_write()
3390 ioc_refresh_params(ioc, true); in ioc_cost_model_write()
3391 spin_unlock_irq(&ioc->lock); in ioc_cost_model_write()