Lines Matching refs:WRITE

509 	for (rw = READ; rw <= WRITE; rw++) {  in throtl_pd_alloc()
516 tg->bps[WRITE][LIMIT_MAX] = U64_MAX; in throtl_pd_alloc()
518 tg->iops[WRITE][LIMIT_MAX] = UINT_MAX; in throtl_pd_alloc()
520 tg->bps_conf[WRITE][LIMIT_MAX] = U64_MAX; in throtl_pd_alloc()
522 tg->iops_conf[WRITE][LIMIT_MAX] = UINT_MAX; in throtl_pd_alloc()
576 for (rw = READ; rw <= WRITE; rw++) in tg_update_has_rules()
603 if (tg->bps[READ][LIMIT_LOW] || tg->bps[WRITE][LIMIT_LOW] || in blk_throtl_update_limit_valid()
604 tg->iops[READ][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) { in blk_throtl_update_limit_valid()
620 tg->bps[WRITE][LIMIT_LOW] = 0; in throtl_pd_offline()
622 tg->iops[WRITE][LIMIT_LOW] = 0; in throtl_pd_offline()
1119 bio = throtl_peek_queued(&sq->queued[WRITE]); in tg_update_disptime()
1207 while ((bio = throtl_peek_queued(&sq->queued[WRITE])) && in throtl_dispatch_tg()
1290 sq->nr_queued[READ] + sq->nr_queued[WRITE], in throtl_pending_timer_fn()
1291 sq->nr_queued[READ], sq->nr_queued[WRITE]); in throtl_pending_timer_fn()
1352 for (rw = READ; rw <= WRITE; rw++) in blk_throtl_dispatch_work_fn()
1409 tg_bps_limit(tg, READ), tg_bps_limit(tg, WRITE), in tg_conf_updated()
1410 tg_iops_limit(tg, READ), tg_iops_limit(tg, WRITE)); in tg_conf_updated()
1449 throtl_start_new_slice(tg, WRITE); in tg_conf_updated()
1537 .private = offsetof(struct throtl_grp, bps[WRITE][LIMIT_MAX]),
1549 .private = offsetof(struct throtl_grp, iops[WRITE][LIMIT_MAX]),
1599 tg->bps_conf[WRITE][off] == bps_dft && in tg_prfill_limit()
1601 tg->iops_conf[WRITE][off] == iops_dft && in tg_prfill_limit()
1610 if (tg->bps_conf[WRITE][off] != U64_MAX) in tg_prfill_limit()
1612 tg->bps_conf[WRITE][off]); in tg_prfill_limit()
1616 if (tg->iops_conf[WRITE][off] != UINT_MAX) in tg_prfill_limit()
1618 tg->iops_conf[WRITE][off]); in tg_prfill_limit()
1665 v[1] = tg->bps_conf[WRITE][index]; in tg_set_limit()
1667 v[3] = tg->iops_conf[WRITE][index]; in tg_set_limit()
1711 tg->bps_conf[WRITE][index] = v[1]; in tg_set_limit()
1713 tg->iops_conf[WRITE][index] = v[3]; in tg_set_limit()
1717 tg->bps[WRITE][index] = v[1]; in tg_set_limit()
1719 tg->iops[WRITE][index] = v[3]; in tg_set_limit()
1723 tg->bps[WRITE][LIMIT_LOW] = min(tg->bps_conf[WRITE][LIMIT_LOW], in tg_set_limit()
1724 tg->bps_conf[WRITE][LIMIT_MAX]); in tg_set_limit()
1727 tg->iops[WRITE][LIMIT_LOW] = min(tg->iops_conf[WRITE][LIMIT_LOW], in tg_set_limit()
1728 tg->iops_conf[WRITE][LIMIT_MAX]); in tg_set_limit()
1734 tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) || in tg_set_limit()
1738 tg->bps[WRITE][LIMIT_LOW] = 0; in tg_set_limit()
1740 tg->iops[WRITE][LIMIT_LOW] = 0; in tg_set_limit()
1806 if (tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]) in __tg_last_low_overflow_time()
1807 wtime = tg->last_low_overflow_time[WRITE]; in __tg_last_low_overflow_time()
1830 !parent->bps[WRITE][LIMIT_LOW] && in tg_last_low_overflow_time()
1831 !parent->iops[WRITE][LIMIT_LOW]) in tg_last_low_overflow_time()
1875 write_limit = tg->bps[WRITE][LIMIT_LOW] || tg->iops[WRITE][LIMIT_LOW]; in throtl_tg_can_upgrade()
1879 (!write_limit || sq->nr_queued[WRITE])) in throtl_tg_can_upgrade()
1881 if (write_limit && sq->nr_queued[WRITE] && in throtl_tg_can_upgrade()
2050 if (tg->bps[WRITE][LIMIT_LOW]) { in throtl_downgrade_check()
2051 bps = tg->last_bytes_disp[WRITE] * HZ; in throtl_downgrade_check()
2053 if (bps >= tg->bps[WRITE][LIMIT_LOW]) in throtl_downgrade_check()
2054 tg->last_low_overflow_time[WRITE] = now; in throtl_downgrade_check()
2064 if (tg->iops[WRITE][LIMIT_LOW]) { in throtl_downgrade_check()
2065 tg->last_io_disp[WRITE] += atomic_xchg(&tg->last_io_split_cnt[WRITE], 0); in throtl_downgrade_check()
2066 iops = tg->last_io_disp[WRITE] * HZ / elapsed_time; in throtl_downgrade_check()
2067 if (iops >= tg->iops[WRITE][LIMIT_LOW]) in throtl_downgrade_check()
2068 tg->last_low_overflow_time[WRITE] = now; in throtl_downgrade_check()
2079 tg->last_bytes_disp[WRITE] = 0; in throtl_downgrade_check()
2081 tg->last_io_disp[WRITE] = 0; in throtl_downgrade_check()
2116 for (rw = READ; rw <= WRITE; rw++) { in throtl_update_latency_buckets()
2147 for (rw = READ; rw <= WRITE; rw++) { in throtl_update_latency_buckets()
2175 td->avg_buckets[WRITE][i].latency, in throtl_update_latency_buckets()
2176 td->avg_buckets[WRITE][i].valid); in throtl_update_latency_buckets()
2291 sq->nr_queued[READ], sq->nr_queued[WRITE]); in blk_throtl_bio()
2421 td->latency_buckets[WRITE] = __alloc_percpu(sizeof(struct latency_bucket) * in blk_throtl_init()
2423 if (!td->latency_buckets[WRITE]) { in blk_throtl_init()
2444 free_percpu(td->latency_buckets[WRITE]); in blk_throtl_init()
2457 free_percpu(q->td->latency_buckets[WRITE]); in blk_throtl_exit()
2477 td->avg_buckets[WRITE][i].latency = DFL_HD_BASELINE_LATENCY; in blk_throtl_register_queue()