1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef WB_THROTTLE_H
3*4882a593Smuzhiyun #define WB_THROTTLE_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/kernel.h>
6*4882a593Smuzhiyun #include <linux/atomic.h>
7*4882a593Smuzhiyun #include <linux/wait.h>
8*4882a593Smuzhiyun #include <linux/timer.h>
9*4882a593Smuzhiyun #include <linux/ktime.h>
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include "blk-stat.h"
12*4882a593Smuzhiyun #include "blk-rq-qos.h"
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun enum wbt_flags {
15*4882a593Smuzhiyun WBT_TRACKED = 1, /* write, tracked for throttling */
16*4882a593Smuzhiyun WBT_READ = 2, /* read */
17*4882a593Smuzhiyun WBT_KSWAPD = 4, /* write, from kswapd */
18*4882a593Smuzhiyun WBT_DISCARD = 8, /* discard */
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun WBT_NR_BITS = 4, /* number of bits */
21*4882a593Smuzhiyun };
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun enum {
24*4882a593Smuzhiyun WBT_RWQ_BG = 0,
25*4882a593Smuzhiyun WBT_RWQ_KSWAPD,
26*4882a593Smuzhiyun WBT_RWQ_DISCARD,
27*4882a593Smuzhiyun WBT_NUM_RWQ,
28*4882a593Smuzhiyun };
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun /*
31*4882a593Smuzhiyun * Enable states. Either off, or on by default (done at init time),
32*4882a593Smuzhiyun * or on through manual setup in sysfs.
33*4882a593Smuzhiyun */
34*4882a593Smuzhiyun enum {
35*4882a593Smuzhiyun WBT_STATE_ON_DEFAULT = 1,
36*4882a593Smuzhiyun WBT_STATE_ON_MANUAL = 2,
37*4882a593Smuzhiyun WBT_STATE_OFF_DEFAULT
38*4882a593Smuzhiyun };
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun struct rq_wb {
41*4882a593Smuzhiyun /*
42*4882a593Smuzhiyun * Settings that govern how we throttle
43*4882a593Smuzhiyun */
44*4882a593Smuzhiyun unsigned int wb_background; /* background writeback */
45*4882a593Smuzhiyun unsigned int wb_normal; /* normal writeback */
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun short enable_state; /* WBT_STATE_* */
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /*
50*4882a593Smuzhiyun * Number of consecutive periods where we don't have enough
51*4882a593Smuzhiyun * information to make a firm scale up/down decision.
52*4882a593Smuzhiyun */
53*4882a593Smuzhiyun unsigned int unknown_cnt;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun u64 win_nsec; /* default window size */
56*4882a593Smuzhiyun u64 cur_win_nsec; /* current window size */
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun struct blk_stat_callback *cb;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun u64 sync_issue;
61*4882a593Smuzhiyun void *sync_cookie;
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun unsigned int wc;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun unsigned long last_issue; /* last non-throttled issue */
66*4882a593Smuzhiyun unsigned long last_comp; /* last non-throttled comp */
67*4882a593Smuzhiyun unsigned long min_lat_nsec;
68*4882a593Smuzhiyun struct rq_qos rqos;
69*4882a593Smuzhiyun struct rq_wait rq_wait[WBT_NUM_RWQ];
70*4882a593Smuzhiyun struct rq_depth rq_depth;
71*4882a593Smuzhiyun };
72*4882a593Smuzhiyun
RQWB(struct rq_qos * rqos)73*4882a593Smuzhiyun static inline struct rq_wb *RQWB(struct rq_qos *rqos)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun return container_of(rqos, struct rq_wb, rqos);
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
wbt_inflight(struct rq_wb * rwb)78*4882a593Smuzhiyun static inline unsigned int wbt_inflight(struct rq_wb *rwb)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun unsigned int i, ret = 0;
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun for (i = 0; i < WBT_NUM_RWQ; i++)
83*4882a593Smuzhiyun ret += atomic_read(&rwb->rq_wait[i].inflight);
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun return ret;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun #ifdef CONFIG_BLK_WBT
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun int wbt_init(struct request_queue *);
92*4882a593Smuzhiyun void wbt_disable_default(struct request_queue *);
93*4882a593Smuzhiyun void wbt_enable_default(struct request_queue *);
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun u64 wbt_get_min_lat(struct request_queue *q);
96*4882a593Smuzhiyun void wbt_set_min_lat(struct request_queue *q, u64 val);
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun void wbt_set_write_cache(struct request_queue *, bool);
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun u64 wbt_default_latency_nsec(struct request_queue *);
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun #else
103*4882a593Smuzhiyun
wbt_track(struct request * rq,enum wbt_flags flags)104*4882a593Smuzhiyun static inline void wbt_track(struct request *rq, enum wbt_flags flags)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun }
wbt_init(struct request_queue * q)107*4882a593Smuzhiyun static inline int wbt_init(struct request_queue *q)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun return -EINVAL;
110*4882a593Smuzhiyun }
wbt_disable_default(struct request_queue * q)111*4882a593Smuzhiyun static inline void wbt_disable_default(struct request_queue *q)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun }
wbt_enable_default(struct request_queue * q)114*4882a593Smuzhiyun static inline void wbt_enable_default(struct request_queue *q)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun }
wbt_set_write_cache(struct request_queue * q,bool wc)117*4882a593Smuzhiyun static inline void wbt_set_write_cache(struct request_queue *q, bool wc)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun }
wbt_get_min_lat(struct request_queue * q)120*4882a593Smuzhiyun static inline u64 wbt_get_min_lat(struct request_queue *q)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun return 0;
123*4882a593Smuzhiyun }
wbt_set_min_lat(struct request_queue * q,u64 val)124*4882a593Smuzhiyun static inline void wbt_set_min_lat(struct request_queue *q, u64 val)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun }
wbt_default_latency_nsec(struct request_queue * q)127*4882a593Smuzhiyun static inline u64 wbt_default_latency_nsec(struct request_queue *q)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun return 0;
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun #endif /* CONFIG_BLK_WBT */
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun #endif
135