1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef _BLK_CGROUP_H
3*4882a593Smuzhiyun #define _BLK_CGROUP_H
4*4882a593Smuzhiyun /*
5*4882a593Smuzhiyun * Common Block IO controller cgroup interface
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Based on ideas and code from CFQ, CFS and BFQ:
8*4882a593Smuzhiyun * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
11*4882a593Smuzhiyun * Paolo Valente <paolo.valente@unimore.it>
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
14*4882a593Smuzhiyun * Nauman Rafique <nauman@google.com>
15*4882a593Smuzhiyun */
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #include <linux/cgroup.h>
18*4882a593Smuzhiyun #include <linux/percpu.h>
19*4882a593Smuzhiyun #include <linux/percpu_counter.h>
20*4882a593Smuzhiyun #include <linux/u64_stats_sync.h>
21*4882a593Smuzhiyun #include <linux/seq_file.h>
22*4882a593Smuzhiyun #include <linux/radix-tree.h>
23*4882a593Smuzhiyun #include <linux/blkdev.h>
24*4882a593Smuzhiyun #include <linux/atomic.h>
25*4882a593Smuzhiyun #include <linux/kthread.h>
26*4882a593Smuzhiyun #include <linux/fs.h>
27*4882a593Smuzhiyun #ifndef __GENKSYMS__
28*4882a593Smuzhiyun #include <linux/blk-mq.h>
29*4882a593Smuzhiyun #endif
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
32*4882a593Smuzhiyun #define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /* Max limits for throttle policy */
35*4882a593Smuzhiyun #define THROTL_IOPS_MAX UINT_MAX
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #ifdef CONFIG_BLK_CGROUP
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun enum blkg_iostat_type {
40*4882a593Smuzhiyun BLKG_IOSTAT_READ,
41*4882a593Smuzhiyun BLKG_IOSTAT_WRITE,
42*4882a593Smuzhiyun BLKG_IOSTAT_DISCARD,
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun BLKG_IOSTAT_NR,
45*4882a593Smuzhiyun };
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun struct blkcg_gq;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun struct blkcg {
50*4882a593Smuzhiyun struct cgroup_subsys_state css;
51*4882a593Smuzhiyun spinlock_t lock;
52*4882a593Smuzhiyun refcount_t online_pin;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun struct radix_tree_root blkg_tree;
55*4882a593Smuzhiyun struct blkcg_gq __rcu *blkg_hint;
56*4882a593Smuzhiyun struct hlist_head blkg_list;
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun struct list_head all_blkcgs_node;
61*4882a593Smuzhiyun #ifdef CONFIG_CGROUP_WRITEBACK
62*4882a593Smuzhiyun struct list_head cgwb_list;
63*4882a593Smuzhiyun #endif
64*4882a593Smuzhiyun };
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun struct blkg_iostat {
67*4882a593Smuzhiyun u64 bytes[BLKG_IOSTAT_NR];
68*4882a593Smuzhiyun u64 ios[BLKG_IOSTAT_NR];
69*4882a593Smuzhiyun };
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun struct blkg_iostat_set {
72*4882a593Smuzhiyun struct u64_stats_sync sync;
73*4882a593Smuzhiyun struct blkg_iostat cur;
74*4882a593Smuzhiyun struct blkg_iostat last;
75*4882a593Smuzhiyun };
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /*
78*4882a593Smuzhiyun * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
79*4882a593Smuzhiyun * request_queue (q). This is used by blkcg policies which need to track
80*4882a593Smuzhiyun * information per blkcg - q pair.
81*4882a593Smuzhiyun *
82*4882a593Smuzhiyun * There can be multiple active blkcg policies and each blkg:policy pair is
83*4882a593Smuzhiyun * represented by a blkg_policy_data which is allocated and freed by each
84*4882a593Smuzhiyun * policy's pd_alloc/free_fn() methods. A policy can allocate private data
85*4882a593Smuzhiyun * area by allocating larger data structure which embeds blkg_policy_data
86*4882a593Smuzhiyun * at the beginning.
87*4882a593Smuzhiyun */
88*4882a593Smuzhiyun struct blkg_policy_data {
89*4882a593Smuzhiyun /* the blkg and policy id this per-policy data belongs to */
90*4882a593Smuzhiyun struct blkcg_gq *blkg;
91*4882a593Smuzhiyun int plid;
92*4882a593Smuzhiyun };
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /*
95*4882a593Smuzhiyun * Policies that need to keep per-blkcg data which is independent from any
96*4882a593Smuzhiyun * request_queue associated to it should implement cpd_alloc/free_fn()
97*4882a593Smuzhiyun * methods. A policy can allocate private data area by allocating larger
98*4882a593Smuzhiyun * data structure which embeds blkcg_policy_data at the beginning.
99*4882a593Smuzhiyun * cpd_init() is invoked to let each policy handle per-blkcg data.
100*4882a593Smuzhiyun */
101*4882a593Smuzhiyun struct blkcg_policy_data {
102*4882a593Smuzhiyun /* the blkcg and policy id this per-policy data belongs to */
103*4882a593Smuzhiyun struct blkcg *blkcg;
104*4882a593Smuzhiyun int plid;
105*4882a593Smuzhiyun };
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /* association between a blk cgroup and a request queue */
108*4882a593Smuzhiyun struct blkcg_gq {
109*4882a593Smuzhiyun /* Pointer to the associated request_queue */
110*4882a593Smuzhiyun struct request_queue *q;
111*4882a593Smuzhiyun struct list_head q_node;
112*4882a593Smuzhiyun struct hlist_node blkcg_node;
113*4882a593Smuzhiyun struct blkcg *blkcg;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun /* all non-root blkcg_gq's are guaranteed to have access to parent */
116*4882a593Smuzhiyun struct blkcg_gq *parent;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun /* reference count */
119*4882a593Smuzhiyun struct percpu_ref refcnt;
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /* is this blkg online? protected by both blkcg and q locks */
122*4882a593Smuzhiyun bool online;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun struct blkg_iostat_set __percpu *iostat_cpu;
125*4882a593Smuzhiyun struct blkg_iostat_set iostat;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun struct blkg_policy_data *pd[BLKCG_MAX_POLS];
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun spinlock_t async_bio_lock;
130*4882a593Smuzhiyun struct bio_list async_bios;
131*4882a593Smuzhiyun struct work_struct async_bio_work;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun atomic_t use_delay;
134*4882a593Smuzhiyun atomic64_t delay_nsec;
135*4882a593Smuzhiyun atomic64_t delay_start;
136*4882a593Smuzhiyun u64 last_delay;
137*4882a593Smuzhiyun int last_use;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun struct rcu_head rcu_head;
140*4882a593Smuzhiyun };
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
143*4882a593Smuzhiyun typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
144*4882a593Smuzhiyun typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
145*4882a593Smuzhiyun typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
146*4882a593Smuzhiyun typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp,
147*4882a593Smuzhiyun struct request_queue *q, struct blkcg *blkcg);
148*4882a593Smuzhiyun typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
149*4882a593Smuzhiyun typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
150*4882a593Smuzhiyun typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
151*4882a593Smuzhiyun typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
152*4882a593Smuzhiyun typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
153*4882a593Smuzhiyun typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
154*4882a593Smuzhiyun size_t size);
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun struct blkcg_policy {
157*4882a593Smuzhiyun int plid;
158*4882a593Smuzhiyun /* cgroup files for the policy */
159*4882a593Smuzhiyun struct cftype *dfl_cftypes;
160*4882a593Smuzhiyun struct cftype *legacy_cftypes;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun /* operations */
163*4882a593Smuzhiyun blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
164*4882a593Smuzhiyun blkcg_pol_init_cpd_fn *cpd_init_fn;
165*4882a593Smuzhiyun blkcg_pol_free_cpd_fn *cpd_free_fn;
166*4882a593Smuzhiyun blkcg_pol_bind_cpd_fn *cpd_bind_fn;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun blkcg_pol_alloc_pd_fn *pd_alloc_fn;
169*4882a593Smuzhiyun blkcg_pol_init_pd_fn *pd_init_fn;
170*4882a593Smuzhiyun blkcg_pol_online_pd_fn *pd_online_fn;
171*4882a593Smuzhiyun blkcg_pol_offline_pd_fn *pd_offline_fn;
172*4882a593Smuzhiyun blkcg_pol_free_pd_fn *pd_free_fn;
173*4882a593Smuzhiyun blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
174*4882a593Smuzhiyun blkcg_pol_stat_pd_fn *pd_stat_fn;
175*4882a593Smuzhiyun };
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun extern struct blkcg blkcg_root;
178*4882a593Smuzhiyun extern struct cgroup_subsys_state * const blkcg_root_css;
179*4882a593Smuzhiyun extern bool blkcg_debug_stats;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
182*4882a593Smuzhiyun struct request_queue *q, bool update_hint);
183*4882a593Smuzhiyun int blkcg_init_queue(struct request_queue *q);
184*4882a593Smuzhiyun void blkcg_exit_queue(struct request_queue *q);
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun /* Blkio controller policy registration */
187*4882a593Smuzhiyun int blkcg_policy_register(struct blkcg_policy *pol);
188*4882a593Smuzhiyun void blkcg_policy_unregister(struct blkcg_policy *pol);
189*4882a593Smuzhiyun int blkcg_activate_policy(struct request_queue *q,
190*4882a593Smuzhiyun const struct blkcg_policy *pol);
191*4882a593Smuzhiyun void blkcg_deactivate_policy(struct request_queue *q,
192*4882a593Smuzhiyun const struct blkcg_policy *pol);
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun const char *blkg_dev_name(struct blkcg_gq *blkg);
195*4882a593Smuzhiyun void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
196*4882a593Smuzhiyun u64 (*prfill)(struct seq_file *,
197*4882a593Smuzhiyun struct blkg_policy_data *, int),
198*4882a593Smuzhiyun const struct blkcg_policy *pol, int data,
199*4882a593Smuzhiyun bool show_total);
200*4882a593Smuzhiyun u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun struct blkg_conf_ctx {
203*4882a593Smuzhiyun struct gendisk *disk;
204*4882a593Smuzhiyun struct blkcg_gq *blkg;
205*4882a593Smuzhiyun char *body;
206*4882a593Smuzhiyun };
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun struct gendisk *blkcg_conf_get_disk(char **inputp);
209*4882a593Smuzhiyun int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
210*4882a593Smuzhiyun char *input, struct blkg_conf_ctx *ctx);
211*4882a593Smuzhiyun void blkg_conf_finish(struct blkg_conf_ctx *ctx);
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun /**
214*4882a593Smuzhiyun * blkcg_css - find the current css
215*4882a593Smuzhiyun *
216*4882a593Smuzhiyun * Find the css associated with either the kthread or the current task.
217*4882a593Smuzhiyun * This may return a dying css, so it is up to the caller to use tryget logic
218*4882a593Smuzhiyun * to confirm it is alive and well.
219*4882a593Smuzhiyun */
blkcg_css(void)220*4882a593Smuzhiyun static inline struct cgroup_subsys_state *blkcg_css(void)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun struct cgroup_subsys_state *css;
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun css = kthread_blkcg();
225*4882a593Smuzhiyun if (css)
226*4882a593Smuzhiyun return css;
227*4882a593Smuzhiyun return task_css(current, io_cgrp_id);
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
css_to_blkcg(struct cgroup_subsys_state * css)230*4882a593Smuzhiyun static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun return css ? container_of(css, struct blkcg, css) : NULL;
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /**
236*4882a593Smuzhiyun * __bio_blkcg - internal, inconsistent version to get blkcg
237*4882a593Smuzhiyun *
238*4882a593Smuzhiyun * DO NOT USE.
239*4882a593Smuzhiyun * This function is inconsistent and consequently is dangerous to use. The
240*4882a593Smuzhiyun * first part of the function returns a blkcg where a reference is owned by the
241*4882a593Smuzhiyun * bio. This means it does not need to be rcu protected as it cannot go away
242*4882a593Smuzhiyun * with the bio owning a reference to it. However, the latter potentially gets
243*4882a593Smuzhiyun * it from task_css(). This can race against task migration and the cgroup
244*4882a593Smuzhiyun * dying. It is also semantically different as it must be called rcu protected
245*4882a593Smuzhiyun * and is susceptible to failure when trying to get a reference to it.
246*4882a593Smuzhiyun * Therefore, it is not ok to assume that *_get() will always succeed on the
247*4882a593Smuzhiyun * blkcg returned here.
248*4882a593Smuzhiyun */
__bio_blkcg(struct bio * bio)249*4882a593Smuzhiyun static inline struct blkcg *__bio_blkcg(struct bio *bio)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun if (bio && bio->bi_blkg)
252*4882a593Smuzhiyun return bio->bi_blkg->blkcg;
253*4882a593Smuzhiyun return css_to_blkcg(blkcg_css());
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun /**
257*4882a593Smuzhiyun * bio_blkcg - grab the blkcg associated with a bio
258*4882a593Smuzhiyun * @bio: target bio
259*4882a593Smuzhiyun *
260*4882a593Smuzhiyun * This returns the blkcg associated with a bio, %NULL if not associated.
261*4882a593Smuzhiyun * Callers are expected to either handle %NULL or know association has been
262*4882a593Smuzhiyun * done prior to calling this.
263*4882a593Smuzhiyun */
bio_blkcg(struct bio * bio)264*4882a593Smuzhiyun static inline struct blkcg *bio_blkcg(struct bio *bio)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun if (bio && bio->bi_blkg)
267*4882a593Smuzhiyun return bio->bi_blkg->blkcg;
268*4882a593Smuzhiyun return NULL;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
blk_cgroup_congested(void)271*4882a593Smuzhiyun static inline bool blk_cgroup_congested(void)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun struct cgroup_subsys_state *css;
274*4882a593Smuzhiyun bool ret = false;
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun rcu_read_lock();
277*4882a593Smuzhiyun css = kthread_blkcg();
278*4882a593Smuzhiyun if (!css)
279*4882a593Smuzhiyun css = task_css(current, io_cgrp_id);
280*4882a593Smuzhiyun while (css) {
281*4882a593Smuzhiyun if (atomic_read(&css->cgroup->congestion_count)) {
282*4882a593Smuzhiyun ret = true;
283*4882a593Smuzhiyun break;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun css = css->parent;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun rcu_read_unlock();
288*4882a593Smuzhiyun return ret;
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun /**
292*4882a593Smuzhiyun * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
293*4882a593Smuzhiyun * @return: true if this bio needs to be submitted with the root blkg context.
294*4882a593Smuzhiyun *
295*4882a593Smuzhiyun * In order to avoid priority inversions we sometimes need to issue a bio as if
296*4882a593Smuzhiyun * it were attached to the root blkg, and then backcharge to the actual owning
297*4882a593Smuzhiyun * blkg. The idea is we do bio_blkcg() to look up the actual context for the
298*4882a593Smuzhiyun * bio and attach the appropriate blkg to the bio. Then we call this helper and
299*4882a593Smuzhiyun * if it is true run with the root blkg for that queue and then do any
300*4882a593Smuzhiyun * backcharging to the originating cgroup once the io is complete.
301*4882a593Smuzhiyun */
bio_issue_as_root_blkg(struct bio * bio)302*4882a593Smuzhiyun static inline bool bio_issue_as_root_blkg(struct bio *bio)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun /**
308*4882a593Smuzhiyun * blkcg_parent - get the parent of a blkcg
309*4882a593Smuzhiyun * @blkcg: blkcg of interest
310*4882a593Smuzhiyun *
311*4882a593Smuzhiyun * Return the parent blkcg of @blkcg. Can be called anytime.
312*4882a593Smuzhiyun */
blkcg_parent(struct blkcg * blkcg)313*4882a593Smuzhiyun static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun return css_to_blkcg(blkcg->css.parent);
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun /**
319*4882a593Smuzhiyun * __blkg_lookup - internal version of blkg_lookup()
320*4882a593Smuzhiyun * @blkcg: blkcg of interest
321*4882a593Smuzhiyun * @q: request_queue of interest
322*4882a593Smuzhiyun * @update_hint: whether to update lookup hint with the result or not
323*4882a593Smuzhiyun *
324*4882a593Smuzhiyun * This is internal version and shouldn't be used by policy
325*4882a593Smuzhiyun * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
326*4882a593Smuzhiyun * @q's bypass state. If @update_hint is %true, the caller should be
327*4882a593Smuzhiyun * holding @q->queue_lock and lookup hint is updated on success.
328*4882a593Smuzhiyun */
__blkg_lookup(struct blkcg * blkcg,struct request_queue * q,bool update_hint)329*4882a593Smuzhiyun static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
330*4882a593Smuzhiyun struct request_queue *q,
331*4882a593Smuzhiyun bool update_hint)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun struct blkcg_gq *blkg;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun if (blkcg == &blkcg_root)
336*4882a593Smuzhiyun return q->root_blkg;
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun blkg = rcu_dereference(blkcg->blkg_hint);
339*4882a593Smuzhiyun if (blkg && blkg->q == q)
340*4882a593Smuzhiyun return blkg;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun return blkg_lookup_slowpath(blkcg, q, update_hint);
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun /**
346*4882a593Smuzhiyun * blkg_lookup - lookup blkg for the specified blkcg - q pair
347*4882a593Smuzhiyun * @blkcg: blkcg of interest
348*4882a593Smuzhiyun * @q: request_queue of interest
349*4882a593Smuzhiyun *
350*4882a593Smuzhiyun * Lookup blkg for the @blkcg - @q pair. This function should be called
351*4882a593Smuzhiyun * under RCU read lock.
352*4882a593Smuzhiyun */
blkg_lookup(struct blkcg * blkcg,struct request_queue * q)353*4882a593Smuzhiyun static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
354*4882a593Smuzhiyun struct request_queue *q)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun WARN_ON_ONCE(!rcu_read_lock_held());
357*4882a593Smuzhiyun return __blkg_lookup(blkcg, q, false);
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun /**
361*4882a593Smuzhiyun * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
362*4882a593Smuzhiyun * @q: request_queue of interest
363*4882a593Smuzhiyun *
364*4882a593Smuzhiyun * Lookup blkg for @q at the root level. See also blkg_lookup().
365*4882a593Smuzhiyun */
blk_queue_root_blkg(struct request_queue * q)366*4882a593Smuzhiyun static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun return q->root_blkg;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun /**
372*4882a593Smuzhiyun * blkg_to_pdata - get policy private data
373*4882a593Smuzhiyun * @blkg: blkg of interest
374*4882a593Smuzhiyun * @pol: policy of interest
375*4882a593Smuzhiyun *
376*4882a593Smuzhiyun * Return pointer to private data associated with the @blkg-@pol pair.
377*4882a593Smuzhiyun */
blkg_to_pd(struct blkcg_gq * blkg,struct blkcg_policy * pol)378*4882a593Smuzhiyun static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
379*4882a593Smuzhiyun struct blkcg_policy *pol)
380*4882a593Smuzhiyun {
381*4882a593Smuzhiyun return blkg ? blkg->pd[pol->plid] : NULL;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
blkcg_to_cpd(struct blkcg * blkcg,struct blkcg_policy * pol)384*4882a593Smuzhiyun static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
385*4882a593Smuzhiyun struct blkcg_policy *pol)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun return blkcg ? blkcg->cpd[pol->plid] : NULL;
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun /**
391*4882a593Smuzhiyun * pdata_to_blkg - get blkg associated with policy private data
392*4882a593Smuzhiyun * @pd: policy private data of interest
393*4882a593Smuzhiyun *
394*4882a593Smuzhiyun * @pd is policy private data. Determine the blkg it's associated with.
395*4882a593Smuzhiyun */
pd_to_blkg(struct blkg_policy_data * pd)396*4882a593Smuzhiyun static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun return pd ? pd->blkg : NULL;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun
cpd_to_blkcg(struct blkcg_policy_data * cpd)401*4882a593Smuzhiyun static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun return cpd ? cpd->blkcg : NULL;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun extern void blkcg_destroy_blkgs(struct blkcg *blkcg);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun /**
409*4882a593Smuzhiyun * blkcg_pin_online - pin online state
410*4882a593Smuzhiyun * @blkcg: blkcg of interest
411*4882a593Smuzhiyun *
412*4882a593Smuzhiyun * While pinned, a blkcg is kept online. This is primarily used to
413*4882a593Smuzhiyun * impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline
414*4882a593Smuzhiyun * while an associated cgwb is still active.
415*4882a593Smuzhiyun */
blkcg_pin_online(struct blkcg * blkcg)416*4882a593Smuzhiyun static inline void blkcg_pin_online(struct blkcg *blkcg)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun refcount_inc(&blkcg->online_pin);
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun /**
422*4882a593Smuzhiyun * blkcg_unpin_online - unpin online state
423*4882a593Smuzhiyun * @blkcg: blkcg of interest
424*4882a593Smuzhiyun *
425*4882a593Smuzhiyun * This is primarily used to impedance-match blkg and cgwb lifetimes so
426*4882a593Smuzhiyun * that blkg doesn't go offline while an associated cgwb is still active.
427*4882a593Smuzhiyun * When this count goes to zero, all active cgwbs have finished so the
428*4882a593Smuzhiyun * blkcg can continue destruction by calling blkcg_destroy_blkgs().
429*4882a593Smuzhiyun */
blkcg_unpin_online(struct blkcg * blkcg)430*4882a593Smuzhiyun static inline void blkcg_unpin_online(struct blkcg *blkcg)
431*4882a593Smuzhiyun {
432*4882a593Smuzhiyun do {
433*4882a593Smuzhiyun if (!refcount_dec_and_test(&blkcg->online_pin))
434*4882a593Smuzhiyun break;
435*4882a593Smuzhiyun blkcg_destroy_blkgs(blkcg);
436*4882a593Smuzhiyun blkcg = blkcg_parent(blkcg);
437*4882a593Smuzhiyun } while (blkcg);
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun /**
441*4882a593Smuzhiyun * blkg_path - format cgroup path of blkg
442*4882a593Smuzhiyun * @blkg: blkg of interest
443*4882a593Smuzhiyun * @buf: target buffer
444*4882a593Smuzhiyun * @buflen: target buffer length
445*4882a593Smuzhiyun *
446*4882a593Smuzhiyun * Format the path of the cgroup of @blkg into @buf.
447*4882a593Smuzhiyun */
blkg_path(struct blkcg_gq * blkg,char * buf,int buflen)448*4882a593Smuzhiyun static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun /**
454*4882a593Smuzhiyun * blkg_get - get a blkg reference
455*4882a593Smuzhiyun * @blkg: blkg to get
456*4882a593Smuzhiyun *
457*4882a593Smuzhiyun * The caller should be holding an existing reference.
458*4882a593Smuzhiyun */
blkg_get(struct blkcg_gq * blkg)459*4882a593Smuzhiyun static inline void blkg_get(struct blkcg_gq *blkg)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun percpu_ref_get(&blkg->refcnt);
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun /**
465*4882a593Smuzhiyun * blkg_tryget - try and get a blkg reference
466*4882a593Smuzhiyun * @blkg: blkg to get
467*4882a593Smuzhiyun *
468*4882a593Smuzhiyun * This is for use when doing an RCU lookup of the blkg. We may be in the midst
469*4882a593Smuzhiyun * of freeing this blkg, so we can only use it if the refcnt is not zero.
470*4882a593Smuzhiyun */
blkg_tryget(struct blkcg_gq * blkg)471*4882a593Smuzhiyun static inline bool blkg_tryget(struct blkcg_gq *blkg)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun return blkg && percpu_ref_tryget(&blkg->refcnt);
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun /**
477*4882a593Smuzhiyun * blkg_put - put a blkg reference
478*4882a593Smuzhiyun * @blkg: blkg to put
479*4882a593Smuzhiyun */
blkg_put(struct blkcg_gq * blkg)480*4882a593Smuzhiyun static inline void blkg_put(struct blkcg_gq *blkg)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun percpu_ref_put(&blkg->refcnt);
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun /**
486*4882a593Smuzhiyun * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
487*4882a593Smuzhiyun * @d_blkg: loop cursor pointing to the current descendant
488*4882a593Smuzhiyun * @pos_css: used for iteration
489*4882a593Smuzhiyun * @p_blkg: target blkg to walk descendants of
490*4882a593Smuzhiyun *
491*4882a593Smuzhiyun * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
492*4882a593Smuzhiyun * read locked. If called under either blkcg or queue lock, the iteration
493*4882a593Smuzhiyun * is guaranteed to include all and only online blkgs. The caller may
494*4882a593Smuzhiyun * update @pos_css by calling css_rightmost_descendant() to skip subtree.
495*4882a593Smuzhiyun * @p_blkg is included in the iteration and the first node to be visited.
496*4882a593Smuzhiyun */
497*4882a593Smuzhiyun #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
498*4882a593Smuzhiyun css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
499*4882a593Smuzhiyun if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
500*4882a593Smuzhiyun (p_blkg)->q, false)))
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun /**
503*4882a593Smuzhiyun * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
504*4882a593Smuzhiyun * @d_blkg: loop cursor pointing to the current descendant
505*4882a593Smuzhiyun * @pos_css: used for iteration
506*4882a593Smuzhiyun * @p_blkg: target blkg to walk descendants of
507*4882a593Smuzhiyun *
508*4882a593Smuzhiyun * Similar to blkg_for_each_descendant_pre() but performs post-order
509*4882a593Smuzhiyun * traversal instead. Synchronization rules are the same. @p_blkg is
510*4882a593Smuzhiyun * included in the iteration and the last node to be visited.
511*4882a593Smuzhiyun */
512*4882a593Smuzhiyun #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
513*4882a593Smuzhiyun css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
514*4882a593Smuzhiyun if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
515*4882a593Smuzhiyun (p_blkg)->q, false)))
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun bool __blkcg_punt_bio_submit(struct bio *bio);
518*4882a593Smuzhiyun
blkcg_punt_bio_submit(struct bio * bio)519*4882a593Smuzhiyun static inline bool blkcg_punt_bio_submit(struct bio *bio)
520*4882a593Smuzhiyun {
521*4882a593Smuzhiyun if (bio->bi_opf & REQ_CGROUP_PUNT)
522*4882a593Smuzhiyun return __blkcg_punt_bio_submit(bio);
523*4882a593Smuzhiyun else
524*4882a593Smuzhiyun return false;
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
blkcg_bio_issue_init(struct bio * bio)527*4882a593Smuzhiyun static inline void blkcg_bio_issue_init(struct bio *bio)
528*4882a593Smuzhiyun {
529*4882a593Smuzhiyun bio_issue_init(&bio->bi_issue, bio_sectors(bio));
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun
blkcg_use_delay(struct blkcg_gq * blkg)532*4882a593Smuzhiyun static inline void blkcg_use_delay(struct blkcg_gq *blkg)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
535*4882a593Smuzhiyun return;
536*4882a593Smuzhiyun if (atomic_add_return(1, &blkg->use_delay) == 1)
537*4882a593Smuzhiyun atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun
blkcg_unuse_delay(struct blkcg_gq * blkg)540*4882a593Smuzhiyun static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
541*4882a593Smuzhiyun {
542*4882a593Smuzhiyun int old = atomic_read(&blkg->use_delay);
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun if (WARN_ON_ONCE(old < 0))
545*4882a593Smuzhiyun return 0;
546*4882a593Smuzhiyun if (old == 0)
547*4882a593Smuzhiyun return 0;
548*4882a593Smuzhiyun
549*4882a593Smuzhiyun /*
550*4882a593Smuzhiyun * We do this song and dance because we can race with somebody else
551*4882a593Smuzhiyun * adding or removing delay. If we just did an atomic_dec we'd end up
552*4882a593Smuzhiyun * negative and we'd already be in trouble. We need to subtract 1 and
553*4882a593Smuzhiyun * then check to see if we were the last delay so we can drop the
554*4882a593Smuzhiyun * congestion count on the cgroup.
555*4882a593Smuzhiyun */
556*4882a593Smuzhiyun while (old) {
557*4882a593Smuzhiyun int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
558*4882a593Smuzhiyun if (cur == old)
559*4882a593Smuzhiyun break;
560*4882a593Smuzhiyun old = cur;
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun if (old == 0)
564*4882a593Smuzhiyun return 0;
565*4882a593Smuzhiyun if (old == 1)
566*4882a593Smuzhiyun atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
567*4882a593Smuzhiyun return 1;
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun /**
571*4882a593Smuzhiyun * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount
572*4882a593Smuzhiyun * @blkg: target blkg
573*4882a593Smuzhiyun * @delay: delay duration in nsecs
574*4882a593Smuzhiyun *
575*4882a593Smuzhiyun * When enabled with this function, the delay is not decayed and must be
576*4882a593Smuzhiyun * explicitly cleared with blkcg_clear_delay(). Must not be mixed with
577*4882a593Smuzhiyun * blkcg_[un]use_delay() and blkcg_add_delay() usages.
578*4882a593Smuzhiyun */
blkcg_set_delay(struct blkcg_gq * blkg,u64 delay)579*4882a593Smuzhiyun static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun int old = atomic_read(&blkg->use_delay);
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun /* We only want 1 person setting the congestion count for this blkg. */
584*4882a593Smuzhiyun if (!old && atomic_cmpxchg(&blkg->use_delay, old, -1) == old)
585*4882a593Smuzhiyun atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun atomic64_set(&blkg->delay_nsec, delay);
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun /**
591*4882a593Smuzhiyun * blkcg_clear_delay - Disable allocator delay mechanism
592*4882a593Smuzhiyun * @blkg: target blkg
593*4882a593Smuzhiyun *
594*4882a593Smuzhiyun * Disable use_delay mechanism. See blkcg_set_delay().
595*4882a593Smuzhiyun */
blkcg_clear_delay(struct blkcg_gq * blkg)596*4882a593Smuzhiyun static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
597*4882a593Smuzhiyun {
598*4882a593Smuzhiyun int old = atomic_read(&blkg->use_delay);
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun /* We only want 1 person clearing the congestion count for this blkg. */
601*4882a593Smuzhiyun if (old && atomic_cmpxchg(&blkg->use_delay, old, 0) == old)
602*4882a593Smuzhiyun atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun /**
606*4882a593Smuzhiyun * blk_cgroup_mergeable - Determine whether to allow or disallow merges
607*4882a593Smuzhiyun * @rq: request to merge into
608*4882a593Smuzhiyun * @bio: bio to merge
609*4882a593Smuzhiyun *
610*4882a593Smuzhiyun * @bio and @rq should belong to the same cgroup and their issue_as_root should
611*4882a593Smuzhiyun * match. The latter is necessary as we don't want to throttle e.g. a metadata
612*4882a593Smuzhiyun * update because it happens to be next to a regular IO.
613*4882a593Smuzhiyun */
blk_cgroup_mergeable(struct request * rq,struct bio * bio)614*4882a593Smuzhiyun static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio)
615*4882a593Smuzhiyun {
616*4882a593Smuzhiyun return rq->bio->bi_blkg == bio->bi_blkg &&
617*4882a593Smuzhiyun bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio);
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun void blk_cgroup_bio_start(struct bio *bio);
621*4882a593Smuzhiyun void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
622*4882a593Smuzhiyun void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
623*4882a593Smuzhiyun void blkcg_maybe_throttle_current(void);
624*4882a593Smuzhiyun #else /* CONFIG_BLK_CGROUP */
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun struct blkcg {
627*4882a593Smuzhiyun };
628*4882a593Smuzhiyun
629*4882a593Smuzhiyun struct blkg_policy_data {
630*4882a593Smuzhiyun };
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun struct blkcg_policy_data {
633*4882a593Smuzhiyun };
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun struct blkcg_gq {
636*4882a593Smuzhiyun };
637*4882a593Smuzhiyun
638*4882a593Smuzhiyun struct blkcg_policy {
639*4882a593Smuzhiyun };
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun #define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
642*4882a593Smuzhiyun
blkcg_maybe_throttle_current(void)643*4882a593Smuzhiyun static inline void blkcg_maybe_throttle_current(void) { }
blk_cgroup_congested(void)644*4882a593Smuzhiyun static inline bool blk_cgroup_congested(void) { return false; }
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun #ifdef CONFIG_BLOCK
647*4882a593Smuzhiyun
blkcg_schedule_throttle(struct request_queue * q,bool use_memdelay)648*4882a593Smuzhiyun static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
649*4882a593Smuzhiyun
blkg_lookup(struct blkcg * blkcg,void * key)650*4882a593Smuzhiyun static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
blk_queue_root_blkg(struct request_queue * q)651*4882a593Smuzhiyun static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
652*4882a593Smuzhiyun { return NULL; }
blkcg_init_queue(struct request_queue * q)653*4882a593Smuzhiyun static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
blkcg_exit_queue(struct request_queue * q)654*4882a593Smuzhiyun static inline void blkcg_exit_queue(struct request_queue *q) { }
blkcg_policy_register(struct blkcg_policy * pol)655*4882a593Smuzhiyun static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
blkcg_policy_unregister(struct blkcg_policy * pol)656*4882a593Smuzhiyun static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
blkcg_activate_policy(struct request_queue * q,const struct blkcg_policy * pol)657*4882a593Smuzhiyun static inline int blkcg_activate_policy(struct request_queue *q,
658*4882a593Smuzhiyun const struct blkcg_policy *pol) { return 0; }
blkcg_deactivate_policy(struct request_queue * q,const struct blkcg_policy * pol)659*4882a593Smuzhiyun static inline void blkcg_deactivate_policy(struct request_queue *q,
660*4882a593Smuzhiyun const struct blkcg_policy *pol) { }
661*4882a593Smuzhiyun
__bio_blkcg(struct bio * bio)662*4882a593Smuzhiyun static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
bio_blkcg(struct bio * bio)663*4882a593Smuzhiyun static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
664*4882a593Smuzhiyun
blkg_to_pd(struct blkcg_gq * blkg,struct blkcg_policy * pol)665*4882a593Smuzhiyun static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
666*4882a593Smuzhiyun struct blkcg_policy *pol) { return NULL; }
pd_to_blkg(struct blkg_policy_data * pd)667*4882a593Smuzhiyun static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
blkg_path(struct blkcg_gq * blkg)668*4882a593Smuzhiyun static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
blkg_get(struct blkcg_gq * blkg)669*4882a593Smuzhiyun static inline void blkg_get(struct blkcg_gq *blkg) { }
blkg_put(struct blkcg_gq * blkg)670*4882a593Smuzhiyun static inline void blkg_put(struct blkcg_gq *blkg) { }
671*4882a593Smuzhiyun
blkcg_punt_bio_submit(struct bio * bio)672*4882a593Smuzhiyun static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
blkcg_bio_issue_init(struct bio * bio)673*4882a593Smuzhiyun static inline void blkcg_bio_issue_init(struct bio *bio) { }
blk_cgroup_bio_start(struct bio * bio)674*4882a593Smuzhiyun static inline void blk_cgroup_bio_start(struct bio *bio) { }
blk_cgroup_mergeable(struct request * rq,struct bio * bio)675*4882a593Smuzhiyun static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; }
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun #define blk_queue_for_each_rl(rl, q) \
678*4882a593Smuzhiyun for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun #endif /* CONFIG_BLOCK */
681*4882a593Smuzhiyun #endif /* CONFIG_BLK_CGROUP */
682*4882a593Smuzhiyun #endif /* _BLK_CGROUP_H */
683