Lines Matching refs:td
115 struct throtl_data *td; member
276 return tg->td; in sq_to_td()
289 static uint64_t throtl_adjusted_limit(uint64_t low, struct throtl_data *td) in throtl_adjusted_limit() argument
292 if (td->scale < 4096 && time_after_eq(jiffies, in throtl_adjusted_limit()
293 td->low_upgrade_time + td->scale * td->throtl_slice)) in throtl_adjusted_limit()
294 td->scale = (jiffies - td->low_upgrade_time) / td->throtl_slice; in throtl_adjusted_limit()
296 return low + (low >> 1) * td->scale; in throtl_adjusted_limit()
302 struct throtl_data *td; in tg_bps_limit() local
308 td = tg->td; in tg_bps_limit()
309 ret = tg->bps[rw][td->limit_index]; in tg_bps_limit()
310 if (ret == 0 && td->limit_index == LIMIT_LOW) { in tg_bps_limit()
313 tg->iops[rw][td->limit_index]) in tg_bps_limit()
319 if (td->limit_index == LIMIT_MAX && tg->bps[rw][LIMIT_LOW] && in tg_bps_limit()
323 adjusted = throtl_adjusted_limit(tg->bps[rw][LIMIT_LOW], td); in tg_bps_limit()
332 struct throtl_data *td; in tg_iops_limit() local
338 td = tg->td; in tg_iops_limit()
339 ret = tg->iops[rw][td->limit_index]; in tg_iops_limit()
340 if (ret == 0 && tg->td->limit_index == LIMIT_LOW) { in tg_iops_limit()
343 tg->bps[rw][td->limit_index]) in tg_iops_limit()
349 if (td->limit_index == LIMIT_MAX && tg->iops[rw][LIMIT_LOW] && in tg_iops_limit()
353 adjusted = throtl_adjusted_limit(tg->iops[rw][LIMIT_LOW], td); in tg_iops_limit()
543 struct throtl_data *td = blkg->q->td; in throtl_pd_init() local
559 sq->parent_sq = &td->service_queue; in throtl_pd_init()
562 tg->td = td; in throtl_pd_init()
573 struct throtl_data *td = tg->td; in tg_update_has_rules() local
578 (td->limit_valid[td->limit_index] && in tg_update_has_rules()
593 static void blk_throtl_update_limit_valid(struct throtl_data *td) in blk_throtl_update_limit_valid() argument
600 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { in blk_throtl_update_limit_valid()
611 td->limit_valid[LIMIT_LOW] = low_valid; in blk_throtl_update_limit_valid()
614 static void throtl_upgrade_state(struct throtl_data *td);
624 blk_throtl_update_limit_valid(tg->td); in throtl_pd_offline()
626 if (!tg->td->limit_valid[tg->td->limit_index]) in throtl_pd_offline()
627 throtl_upgrade_state(tg->td); in throtl_pd_offline()
788 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; in throtl_start_new_slice_with_credit()
800 tg->slice_end[rw] = jiffies + tg->td->throtl_slice; in throtl_start_new_slice()
813 tg->slice_end[rw] = roundup(jiffy_end, tg->td->throtl_slice); in throtl_set_slice_end()
859 throtl_set_slice_end(tg, rw, jiffies + tg->td->throtl_slice); in throtl_trim_slice()
863 nr_slices = time_elapsed / tg->td->throtl_slice; in throtl_trim_slice()
867 tmp = tg_bps_limit(tg, rw) * tg->td->throtl_slice * nr_slices; in throtl_trim_slice()
871 io_trim = (tg_iops_limit(tg, rw) * tg->td->throtl_slice * nr_slices) / in throtl_trim_slice()
887 tg->slice_start[rw] += nr_slices * tg->td->throtl_slice; in throtl_trim_slice()
912 jiffy_elapsed_rnd = roundup(jiffy_elapsed + 1, tg->td->throtl_slice); in tg_with_in_iops_limit()
961 jiffy_elapsed_rnd = tg->td->throtl_slice; in tg_with_in_bps_limit()
963 jiffy_elapsed_rnd = roundup(jiffy_elapsed_rnd, tg->td->throtl_slice); in tg_with_in_bps_limit()
1029 jiffies + tg->td->throtl_slice)) in tg_may_dispatch()
1031 jiffies + tg->td->throtl_slice); in tg_may_dispatch()
1177 BUG_ON(tg->td->nr_queued[rw] <= 0); in tg_dispatch_one_bio()
1178 tg->td->nr_queued[rw]--; in tg_dispatch_one_bio()
1253 static bool throtl_can_upgrade(struct throtl_data *td,
1274 struct throtl_data *td = sq_to_td(sq); in throtl_pending_timer_fn() local
1275 struct request_queue *q = td->queue; in throtl_pending_timer_fn()
1281 if (throtl_can_upgrade(td, NULL)) in throtl_pending_timer_fn()
1282 throtl_upgrade_state(td); in throtl_pending_timer_fn()
1324 queue_work(kthrotld_workqueue, &td->dispatch_work); in throtl_pending_timer_fn()
1340 struct throtl_data *td = container_of(work, struct throtl_data, in blk_throtl_dispatch_work_fn() local
1342 struct throtl_service_queue *td_sq = &td->service_queue; in blk_throtl_dispatch_work_fn()
1343 struct request_queue *q = td->queue; in blk_throtl_dispatch_work_fn()
1420 global ? tg->td->queue->root_blkg : tg_to_blkg(tg)) { in tg_conf_updated()
1748 blk_throtl_update_limit_valid(tg->td); in tg_set_limit()
1749 if (tg->td->limit_valid[LIMIT_LOW]) { in tg_set_limit()
1751 tg->td->limit_index = LIMIT_LOW; in tg_set_limit()
1753 tg->td->limit_index = LIMIT_MAX; in tg_set_limit()
1755 tg->td->limit_valid[LIMIT_LOW]); in tg_set_limit()
1784 struct throtl_data *td = q->td; in throtl_shutdown_wq() local
1786 cancel_work_sync(&td->dispatch_work); in throtl_shutdown_wq()
1861 tg->bio_cnt, ret, tg->td->scale); in throtl_tg_is_idle()
1886 tg_last_low_overflow_time(tg) + tg->td->throtl_slice) && in throtl_tg_can_upgrade()
1904 static bool throtl_can_upgrade(struct throtl_data *td, in throtl_can_upgrade() argument
1910 if (td->limit_index != LIMIT_LOW) in throtl_can_upgrade()
1913 if (time_before(jiffies, td->low_downgrade_time + td->throtl_slice)) in throtl_can_upgrade()
1917 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { in throtl_can_upgrade()
1937 if (tg->td->limit_index != LIMIT_LOW) in throtl_upgrade_check()
1940 if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) in throtl_upgrade_check()
1946 __tg_last_low_overflow_time(tg) + tg->td->throtl_slice)) in throtl_upgrade_check()
1949 if (throtl_can_upgrade(tg->td, NULL)) in throtl_upgrade_check()
1950 throtl_upgrade_state(tg->td); in throtl_upgrade_check()
1953 static void throtl_upgrade_state(struct throtl_data *td) in throtl_upgrade_state() argument
1958 throtl_log(&td->service_queue, "upgrade to max"); in throtl_upgrade_state()
1959 td->limit_index = LIMIT_MAX; in throtl_upgrade_state()
1960 td->low_upgrade_time = jiffies; in throtl_upgrade_state()
1961 td->scale = 0; in throtl_upgrade_state()
1963 blkg_for_each_descendant_post(blkg, pos_css, td->queue->root_blkg) { in throtl_upgrade_state()
1972 throtl_select_dispatch(&td->service_queue); in throtl_upgrade_state()
1973 throtl_schedule_next_dispatch(&td->service_queue, true); in throtl_upgrade_state()
1974 queue_work(kthrotld_workqueue, &td->dispatch_work); in throtl_upgrade_state()
1977 static void throtl_downgrade_state(struct throtl_data *td) in throtl_downgrade_state() argument
1979 td->scale /= 2; in throtl_downgrade_state()
1981 throtl_log(&td->service_queue, "downgrade, scale %d", td->scale); in throtl_downgrade_state()
1982 if (td->scale) { in throtl_downgrade_state()
1983 td->low_upgrade_time = jiffies - td->scale * td->throtl_slice; in throtl_downgrade_state()
1987 td->limit_index = LIMIT_LOW; in throtl_downgrade_state()
1988 td->low_downgrade_time = jiffies; in throtl_downgrade_state()
1993 struct throtl_data *td = tg->td; in throtl_tg_can_downgrade() local
2000 if (time_after_eq(now, td->low_upgrade_time + td->throtl_slice) && in throtl_tg_can_downgrade()
2002 td->throtl_slice) && in throtl_tg_can_downgrade()
2028 if (tg->td->limit_index != LIMIT_MAX || in throtl_downgrade_check()
2029 !tg->td->limit_valid[LIMIT_LOW]) in throtl_downgrade_check()
2033 if (time_after(tg->last_check_time + tg->td->throtl_slice, now)) in throtl_downgrade_check()
2040 tg->td->throtl_slice)) in throtl_downgrade_check()
2076 throtl_downgrade_state(tg->td); in throtl_downgrade_check()
2102 static void throtl_update_latency_buckets(struct throtl_data *td) in throtl_update_latency_buckets() argument
2109 if (!blk_queue_nonrot(td->queue) || !td->limit_valid[LIMIT_LOW]) in throtl_update_latency_buckets()
2111 if (time_before(jiffies, td->last_calculate_time + HZ)) in throtl_update_latency_buckets()
2113 td->last_calculate_time = jiffies; in throtl_update_latency_buckets()
2118 struct latency_bucket *tmp = &td->tmp_buckets[rw][i]; in throtl_update_latency_buckets()
2124 bucket = per_cpu_ptr(td->latency_buckets[rw], in throtl_update_latency_buckets()
2150 if (td->avg_buckets[rw][i].latency < last_latency[rw]) in throtl_update_latency_buckets()
2151 td->avg_buckets[rw][i].latency = in throtl_update_latency_buckets()
2156 if (!td->avg_buckets[rw][i].valid) in throtl_update_latency_buckets()
2159 latency[rw] = (td->avg_buckets[rw][i].latency * 7 + in throtl_update_latency_buckets()
2162 td->avg_buckets[rw][i].latency = max(latency[rw], in throtl_update_latency_buckets()
2164 td->avg_buckets[rw][i].valid = true; in throtl_update_latency_buckets()
2165 last_latency[rw] = td->avg_buckets[rw][i].latency; in throtl_update_latency_buckets()
2170 throtl_log(&td->service_queue, in throtl_update_latency_buckets()
2173 td->avg_buckets[READ][i].latency, in throtl_update_latency_buckets()
2174 td->avg_buckets[READ][i].valid, in throtl_update_latency_buckets()
2175 td->avg_buckets[WRITE][i].latency, in throtl_update_latency_buckets()
2176 td->avg_buckets[WRITE][i].valid); in throtl_update_latency_buckets()
2179 static inline void throtl_update_latency_buckets(struct throtl_data *td) in throtl_update_latency_buckets() argument
2212 struct throtl_data *td = tg->td; in blk_throtl_bio() local
2231 throtl_update_latency_buckets(td); in blk_throtl_bio()
2250 if (throtl_can_upgrade(td, tg)) { in blk_throtl_bio()
2251 throtl_upgrade_state(td); in blk_throtl_bio()
2295 td->nr_queued[rw]++; in blk_throtl_bio()
2316 if (throttled || !td->track_bio_latency) in blk_throtl_bio()
2324 static void throtl_track_latency(struct throtl_data *td, sector_t size, in throtl_track_latency() argument
2330 if (!td || td->limit_index != LIMIT_LOW || in throtl_track_latency()
2332 !blk_queue_nonrot(td->queue)) in throtl_track_latency()
2337 latency = get_cpu_ptr(td->latency_buckets[op]); in throtl_track_latency()
2340 put_cpu_ptr(td->latency_buckets[op]); in throtl_track_latency()
2346 struct throtl_data *td = q->td; in blk_throtl_stat_add() local
2348 throtl_track_latency(td, blk_rq_stats_sectors(rq), req_op(rq), in blk_throtl_stat_add()
2366 if (!tg->td->limit_valid[LIMIT_LOW]) in blk_throtl_bio_endio()
2380 throtl_track_latency(tg->td, bio_issue_size(&bio->bi_issue), in blk_throtl_bio_endio()
2383 if (tg->latency_target && lat >= tg->td->filtered_latency) { in blk_throtl_bio_endio()
2388 threshold = tg->td->avg_buckets[rw][bucket].latency + in blk_throtl_bio_endio()
2400 tg->bio_cnt_reset_time = tg->td->throtl_slice + jiffies; in blk_throtl_bio_endio()
2409 struct throtl_data *td; in blk_throtl_init() local
2412 td = kzalloc_node(sizeof(*td), GFP_KERNEL, q->node); in blk_throtl_init()
2413 if (!td) in blk_throtl_init()
2415 td->latency_buckets[READ] = __alloc_percpu(sizeof(struct latency_bucket) * in blk_throtl_init()
2417 if (!td->latency_buckets[READ]) { in blk_throtl_init()
2418 kfree(td); in blk_throtl_init()
2421 td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) * in blk_throtl_init()
2423 if (!td->latency_buckets[WRITE]) { in blk_throtl_init()
2424 free_percpu(td->latency_buckets[READ]); in blk_throtl_init()
2425 kfree(td); in blk_throtl_init()
2429 INIT_WORK(&td->dispatch_work, blk_throtl_dispatch_work_fn); in blk_throtl_init()
2430 throtl_service_queue_init(&td->service_queue); in blk_throtl_init()
2432 q->td = td; in blk_throtl_init()
2433 td->queue = q; in blk_throtl_init()
2435 td->limit_valid[LIMIT_MAX] = true; in blk_throtl_init()
2436 td->limit_index = LIMIT_MAX; in blk_throtl_init()
2437 td->low_upgrade_time = jiffies; in blk_throtl_init()
2438 td->low_downgrade_time = jiffies; in blk_throtl_init()
2443 free_percpu(td->latency_buckets[READ]); in blk_throtl_init()
2444 free_percpu(td->latency_buckets[WRITE]); in blk_throtl_init()
2445 kfree(td); in blk_throtl_init()
2452 BUG_ON(!q->td); in blk_throtl_exit()
2453 del_timer_sync(&q->td->service_queue.pending_timer); in blk_throtl_exit()
2456 free_percpu(q->td->latency_buckets[READ]); in blk_throtl_exit()
2457 free_percpu(q->td->latency_buckets[WRITE]); in blk_throtl_exit()
2458 kfree(q->td); in blk_throtl_exit()
2463 struct throtl_data *td; in blk_throtl_register_queue() local
2466 td = q->td; in blk_throtl_register_queue()
2467 BUG_ON(!td); in blk_throtl_register_queue()
2470 td->throtl_slice = DFL_THROTL_SLICE_SSD; in blk_throtl_register_queue()
2471 td->filtered_latency = LATENCY_FILTERED_SSD; in blk_throtl_register_queue()
2473 td->throtl_slice = DFL_THROTL_SLICE_HD; in blk_throtl_register_queue()
2474 td->filtered_latency = LATENCY_FILTERED_HD; in blk_throtl_register_queue()
2476 td->avg_buckets[READ][i].latency = DFL_HD_BASELINE_LATENCY; in blk_throtl_register_queue()
2477 td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY; in blk_throtl_register_queue()
2482 td->throtl_slice = DFL_THROTL_SLICE_HD; in blk_throtl_register_queue()
2485 td->track_bio_latency = !queue_is_mq(q); in blk_throtl_register_queue()
2486 if (!td->track_bio_latency) in blk_throtl_register_queue()
2493 if (!q->td) in blk_throtl_sample_time_show()
2495 return sprintf(page, "%u\n", jiffies_to_msecs(q->td->throtl_slice)); in blk_throtl_sample_time_show()
2504 if (!q->td) in blk_throtl_sample_time_store()
2511 q->td->throtl_slice = t; in blk_throtl_sample_time_store()