1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BLK_CGROUP_H
3 #define _BLK_CGROUP_H
4 /*
5 * Common Block IO controller cgroup interface
6 *
7 * Based on ideas and code from CFQ, CFS and BFQ:
8 * Copyright (C) 2003 Jens Axboe <axboe@kernel.dk>
9 *
10 * Copyright (C) 2008 Fabio Checconi <fabio@gandalf.sssup.it>
11 * Paolo Valente <paolo.valente@unimore.it>
12 *
13 * Copyright (C) 2009 Vivek Goyal <vgoyal@redhat.com>
14 * Nauman Rafique <nauman@google.com>
15 */
16
17 #include <linux/cgroup.h>
18 #include <linux/percpu.h>
19 #include <linux/percpu_counter.h>
20 #include <linux/u64_stats_sync.h>
21 #include <linux/seq_file.h>
22 #include <linux/radix-tree.h>
23 #include <linux/blkdev.h>
24 #include <linux/atomic.h>
25 #include <linux/kthread.h>
26 #include <linux/fs.h>
27 #ifndef __GENKSYMS__
28 #include <linux/blk-mq.h>
29 #endif
30
31 /* percpu_counter batch for blkg_[rw]stats, per-cpu drift doesn't matter */
32 #define BLKG_STAT_CPU_BATCH (INT_MAX / 2)
33
34 /* Max limits for throttle policy */
35 #define THROTL_IOPS_MAX UINT_MAX
36
37 #ifdef CONFIG_BLK_CGROUP
38
39 enum blkg_iostat_type {
40 BLKG_IOSTAT_READ,
41 BLKG_IOSTAT_WRITE,
42 BLKG_IOSTAT_DISCARD,
43
44 BLKG_IOSTAT_NR,
45 };
46
47 struct blkcg_gq;
48
49 struct blkcg {
50 struct cgroup_subsys_state css;
51 spinlock_t lock;
52 refcount_t online_pin;
53
54 struct radix_tree_root blkg_tree;
55 struct blkcg_gq __rcu *blkg_hint;
56 struct hlist_head blkg_list;
57
58 struct blkcg_policy_data *cpd[BLKCG_MAX_POLS];
59
60 struct list_head all_blkcgs_node;
61 #ifdef CONFIG_CGROUP_WRITEBACK
62 struct list_head cgwb_list;
63 #endif
64 };
65
66 struct blkg_iostat {
67 u64 bytes[BLKG_IOSTAT_NR];
68 u64 ios[BLKG_IOSTAT_NR];
69 };
70
71 struct blkg_iostat_set {
72 struct u64_stats_sync sync;
73 struct blkg_iostat cur;
74 struct blkg_iostat last;
75 };
76
77 /*
78 * A blkcg_gq (blkg) is association between a block cgroup (blkcg) and a
79 * request_queue (q). This is used by blkcg policies which need to track
80 * information per blkcg - q pair.
81 *
82 * There can be multiple active blkcg policies and each blkg:policy pair is
83 * represented by a blkg_policy_data which is allocated and freed by each
84 * policy's pd_alloc/free_fn() methods. A policy can allocate private data
85 * area by allocating larger data structure which embeds blkg_policy_data
86 * at the beginning.
87 */
88 struct blkg_policy_data {
89 /* the blkg and policy id this per-policy data belongs to */
90 struct blkcg_gq *blkg;
91 int plid;
92 };
93
94 /*
95 * Policies that need to keep per-blkcg data which is independent from any
96 * request_queue associated to it should implement cpd_alloc/free_fn()
97 * methods. A policy can allocate private data area by allocating larger
98 * data structure which embeds blkcg_policy_data at the beginning.
99 * cpd_init() is invoked to let each policy handle per-blkcg data.
100 */
101 struct blkcg_policy_data {
102 /* the blkcg and policy id this per-policy data belongs to */
103 struct blkcg *blkcg;
104 int plid;
105 };
106
107 /* association between a blk cgroup and a request queue */
108 struct blkcg_gq {
109 /* Pointer to the associated request_queue */
110 struct request_queue *q;
111 struct list_head q_node;
112 struct hlist_node blkcg_node;
113 struct blkcg *blkcg;
114
115 /* all non-root blkcg_gq's are guaranteed to have access to parent */
116 struct blkcg_gq *parent;
117
118 /* reference count */
119 struct percpu_ref refcnt;
120
121 /* is this blkg online? protected by both blkcg and q locks */
122 bool online;
123
124 struct blkg_iostat_set __percpu *iostat_cpu;
125 struct blkg_iostat_set iostat;
126
127 struct blkg_policy_data *pd[BLKCG_MAX_POLS];
128
129 spinlock_t async_bio_lock;
130 struct bio_list async_bios;
131 struct work_struct async_bio_work;
132
133 atomic_t use_delay;
134 atomic64_t delay_nsec;
135 atomic64_t delay_start;
136 u64 last_delay;
137 int last_use;
138
139 struct rcu_head rcu_head;
140 };
141
142 typedef struct blkcg_policy_data *(blkcg_pol_alloc_cpd_fn)(gfp_t gfp);
143 typedef void (blkcg_pol_init_cpd_fn)(struct blkcg_policy_data *cpd);
144 typedef void (blkcg_pol_free_cpd_fn)(struct blkcg_policy_data *cpd);
145 typedef void (blkcg_pol_bind_cpd_fn)(struct blkcg_policy_data *cpd);
146 typedef struct blkg_policy_data *(blkcg_pol_alloc_pd_fn)(gfp_t gfp,
147 struct request_queue *q, struct blkcg *blkcg);
148 typedef void (blkcg_pol_init_pd_fn)(struct blkg_policy_data *pd);
149 typedef void (blkcg_pol_online_pd_fn)(struct blkg_policy_data *pd);
150 typedef void (blkcg_pol_offline_pd_fn)(struct blkg_policy_data *pd);
151 typedef void (blkcg_pol_free_pd_fn)(struct blkg_policy_data *pd);
152 typedef void (blkcg_pol_reset_pd_stats_fn)(struct blkg_policy_data *pd);
153 typedef size_t (blkcg_pol_stat_pd_fn)(struct blkg_policy_data *pd, char *buf,
154 size_t size);
155
156 struct blkcg_policy {
157 int plid;
158 /* cgroup files for the policy */
159 struct cftype *dfl_cftypes;
160 struct cftype *legacy_cftypes;
161
162 /* operations */
163 blkcg_pol_alloc_cpd_fn *cpd_alloc_fn;
164 blkcg_pol_init_cpd_fn *cpd_init_fn;
165 blkcg_pol_free_cpd_fn *cpd_free_fn;
166 blkcg_pol_bind_cpd_fn *cpd_bind_fn;
167
168 blkcg_pol_alloc_pd_fn *pd_alloc_fn;
169 blkcg_pol_init_pd_fn *pd_init_fn;
170 blkcg_pol_online_pd_fn *pd_online_fn;
171 blkcg_pol_offline_pd_fn *pd_offline_fn;
172 blkcg_pol_free_pd_fn *pd_free_fn;
173 blkcg_pol_reset_pd_stats_fn *pd_reset_stats_fn;
174 blkcg_pol_stat_pd_fn *pd_stat_fn;
175 };
176
177 extern struct blkcg blkcg_root;
178 extern struct cgroup_subsys_state * const blkcg_root_css;
179 extern bool blkcg_debug_stats;
180
181 struct blkcg_gq *blkg_lookup_slowpath(struct blkcg *blkcg,
182 struct request_queue *q, bool update_hint);
183 int blkcg_init_queue(struct request_queue *q);
184 void blkcg_exit_queue(struct request_queue *q);
185
186 /* Blkio controller policy registration */
187 int blkcg_policy_register(struct blkcg_policy *pol);
188 void blkcg_policy_unregister(struct blkcg_policy *pol);
189 int blkcg_activate_policy(struct request_queue *q,
190 const struct blkcg_policy *pol);
191 void blkcg_deactivate_policy(struct request_queue *q,
192 const struct blkcg_policy *pol);
193
194 const char *blkg_dev_name(struct blkcg_gq *blkg);
195 void blkcg_print_blkgs(struct seq_file *sf, struct blkcg *blkcg,
196 u64 (*prfill)(struct seq_file *,
197 struct blkg_policy_data *, int),
198 const struct blkcg_policy *pol, int data,
199 bool show_total);
200 u64 __blkg_prfill_u64(struct seq_file *sf, struct blkg_policy_data *pd, u64 v);
201
202 struct blkg_conf_ctx {
203 struct gendisk *disk;
204 struct blkcg_gq *blkg;
205 char *body;
206 };
207
208 struct gendisk *blkcg_conf_get_disk(char **inputp);
209 int blkg_conf_prep(struct blkcg *blkcg, const struct blkcg_policy *pol,
210 char *input, struct blkg_conf_ctx *ctx);
211 void blkg_conf_finish(struct blkg_conf_ctx *ctx);
212
213 /**
214 * blkcg_css - find the current css
215 *
216 * Find the css associated with either the kthread or the current task.
217 * This may return a dying css, so it is up to the caller to use tryget logic
218 * to confirm it is alive and well.
219 */
blkcg_css(void)220 static inline struct cgroup_subsys_state *blkcg_css(void)
221 {
222 struct cgroup_subsys_state *css;
223
224 css = kthread_blkcg();
225 if (css)
226 return css;
227 return task_css(current, io_cgrp_id);
228 }
229
css_to_blkcg(struct cgroup_subsys_state * css)230 static inline struct blkcg *css_to_blkcg(struct cgroup_subsys_state *css)
231 {
232 return css ? container_of(css, struct blkcg, css) : NULL;
233 }
234
235 /**
236 * __bio_blkcg - internal, inconsistent version to get blkcg
237 *
238 * DO NOT USE.
239 * This function is inconsistent and consequently is dangerous to use. The
240 * first part of the function returns a blkcg where a reference is owned by the
241 * bio. This means it does not need to be rcu protected as it cannot go away
242 * with the bio owning a reference to it. However, the latter potentially gets
243 * it from task_css(). This can race against task migration and the cgroup
244 * dying. It is also semantically different as it must be called rcu protected
245 * and is susceptible to failure when trying to get a reference to it.
246 * Therefore, it is not ok to assume that *_get() will always succeed on the
247 * blkcg returned here.
248 */
__bio_blkcg(struct bio * bio)249 static inline struct blkcg *__bio_blkcg(struct bio *bio)
250 {
251 if (bio && bio->bi_blkg)
252 return bio->bi_blkg->blkcg;
253 return css_to_blkcg(blkcg_css());
254 }
255
256 /**
257 * bio_blkcg - grab the blkcg associated with a bio
258 * @bio: target bio
259 *
260 * This returns the blkcg associated with a bio, %NULL if not associated.
261 * Callers are expected to either handle %NULL or know association has been
262 * done prior to calling this.
263 */
bio_blkcg(struct bio * bio)264 static inline struct blkcg *bio_blkcg(struct bio *bio)
265 {
266 if (bio && bio->bi_blkg)
267 return bio->bi_blkg->blkcg;
268 return NULL;
269 }
270
blk_cgroup_congested(void)271 static inline bool blk_cgroup_congested(void)
272 {
273 struct cgroup_subsys_state *css;
274 bool ret = false;
275
276 rcu_read_lock();
277 css = kthread_blkcg();
278 if (!css)
279 css = task_css(current, io_cgrp_id);
280 while (css) {
281 if (atomic_read(&css->cgroup->congestion_count)) {
282 ret = true;
283 break;
284 }
285 css = css->parent;
286 }
287 rcu_read_unlock();
288 return ret;
289 }
290
291 /**
292 * bio_issue_as_root_blkg - see if this bio needs to be issued as root blkg
293 * @return: true if this bio needs to be submitted with the root blkg context.
294 *
295 * In order to avoid priority inversions we sometimes need to issue a bio as if
296 * it were attached to the root blkg, and then backcharge to the actual owning
297 * blkg. The idea is we do bio_blkcg() to look up the actual context for the
298 * bio and attach the appropriate blkg to the bio. Then we call this helper and
299 * if it is true run with the root blkg for that queue and then do any
300 * backcharging to the originating cgroup once the io is complete.
301 */
bio_issue_as_root_blkg(struct bio * bio)302 static inline bool bio_issue_as_root_blkg(struct bio *bio)
303 {
304 return (bio->bi_opf & (REQ_META | REQ_SWAP)) != 0;
305 }
306
307 /**
308 * blkcg_parent - get the parent of a blkcg
309 * @blkcg: blkcg of interest
310 *
311 * Return the parent blkcg of @blkcg. Can be called anytime.
312 */
blkcg_parent(struct blkcg * blkcg)313 static inline struct blkcg *blkcg_parent(struct blkcg *blkcg)
314 {
315 return css_to_blkcg(blkcg->css.parent);
316 }
317
318 /**
319 * __blkg_lookup - internal version of blkg_lookup()
320 * @blkcg: blkcg of interest
321 * @q: request_queue of interest
322 * @update_hint: whether to update lookup hint with the result or not
323 *
324 * This is internal version and shouldn't be used by policy
325 * implementations. Looks up blkgs for the @blkcg - @q pair regardless of
326 * @q's bypass state. If @update_hint is %true, the caller should be
327 * holding @q->queue_lock and lookup hint is updated on success.
328 */
__blkg_lookup(struct blkcg * blkcg,struct request_queue * q,bool update_hint)329 static inline struct blkcg_gq *__blkg_lookup(struct blkcg *blkcg,
330 struct request_queue *q,
331 bool update_hint)
332 {
333 struct blkcg_gq *blkg;
334
335 if (blkcg == &blkcg_root)
336 return q->root_blkg;
337
338 blkg = rcu_dereference(blkcg->blkg_hint);
339 if (blkg && blkg->q == q)
340 return blkg;
341
342 return blkg_lookup_slowpath(blkcg, q, update_hint);
343 }
344
345 /**
346 * blkg_lookup - lookup blkg for the specified blkcg - q pair
347 * @blkcg: blkcg of interest
348 * @q: request_queue of interest
349 *
350 * Lookup blkg for the @blkcg - @q pair. This function should be called
351 * under RCU read lock.
352 */
blkg_lookup(struct blkcg * blkcg,struct request_queue * q)353 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg,
354 struct request_queue *q)
355 {
356 WARN_ON_ONCE(!rcu_read_lock_held());
357 return __blkg_lookup(blkcg, q, false);
358 }
359
360 /**
361 * blk_queue_root_blkg - return blkg for the (blkcg_root, @q) pair
362 * @q: request_queue of interest
363 *
364 * Lookup blkg for @q at the root level. See also blkg_lookup().
365 */
blk_queue_root_blkg(struct request_queue * q)366 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
367 {
368 return q->root_blkg;
369 }
370
371 /**
372 * blkg_to_pdata - get policy private data
373 * @blkg: blkg of interest
374 * @pol: policy of interest
375 *
376 * Return pointer to private data associated with the @blkg-@pol pair.
377 */
blkg_to_pd(struct blkcg_gq * blkg,struct blkcg_policy * pol)378 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
379 struct blkcg_policy *pol)
380 {
381 return blkg ? blkg->pd[pol->plid] : NULL;
382 }
383
blkcg_to_cpd(struct blkcg * blkcg,struct blkcg_policy * pol)384 static inline struct blkcg_policy_data *blkcg_to_cpd(struct blkcg *blkcg,
385 struct blkcg_policy *pol)
386 {
387 return blkcg ? blkcg->cpd[pol->plid] : NULL;
388 }
389
390 /**
391 * pdata_to_blkg - get blkg associated with policy private data
392 * @pd: policy private data of interest
393 *
394 * @pd is policy private data. Determine the blkg it's associated with.
395 */
pd_to_blkg(struct blkg_policy_data * pd)396 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd)
397 {
398 return pd ? pd->blkg : NULL;
399 }
400
cpd_to_blkcg(struct blkcg_policy_data * cpd)401 static inline struct blkcg *cpd_to_blkcg(struct blkcg_policy_data *cpd)
402 {
403 return cpd ? cpd->blkcg : NULL;
404 }
405
406 extern void blkcg_destroy_blkgs(struct blkcg *blkcg);
407
408 /**
409 * blkcg_pin_online - pin online state
410 * @blkcg: blkcg of interest
411 *
412 * While pinned, a blkcg is kept online. This is primarily used to
413 * impedance-match blkg and cgwb lifetimes so that blkg doesn't go offline
414 * while an associated cgwb is still active.
415 */
blkcg_pin_online(struct blkcg * blkcg)416 static inline void blkcg_pin_online(struct blkcg *blkcg)
417 {
418 refcount_inc(&blkcg->online_pin);
419 }
420
421 /**
422 * blkcg_unpin_online - unpin online state
423 * @blkcg: blkcg of interest
424 *
425 * This is primarily used to impedance-match blkg and cgwb lifetimes so
426 * that blkg doesn't go offline while an associated cgwb is still active.
427 * When this count goes to zero, all active cgwbs have finished so the
428 * blkcg can continue destruction by calling blkcg_destroy_blkgs().
429 */
blkcg_unpin_online(struct blkcg * blkcg)430 static inline void blkcg_unpin_online(struct blkcg *blkcg)
431 {
432 do {
433 if (!refcount_dec_and_test(&blkcg->online_pin))
434 break;
435 blkcg_destroy_blkgs(blkcg);
436 blkcg = blkcg_parent(blkcg);
437 } while (blkcg);
438 }
439
440 /**
441 * blkg_path - format cgroup path of blkg
442 * @blkg: blkg of interest
443 * @buf: target buffer
444 * @buflen: target buffer length
445 *
446 * Format the path of the cgroup of @blkg into @buf.
447 */
blkg_path(struct blkcg_gq * blkg,char * buf,int buflen)448 static inline int blkg_path(struct blkcg_gq *blkg, char *buf, int buflen)
449 {
450 return cgroup_path(blkg->blkcg->css.cgroup, buf, buflen);
451 }
452
453 /**
454 * blkg_get - get a blkg reference
455 * @blkg: blkg to get
456 *
457 * The caller should be holding an existing reference.
458 */
blkg_get(struct blkcg_gq * blkg)459 static inline void blkg_get(struct blkcg_gq *blkg)
460 {
461 percpu_ref_get(&blkg->refcnt);
462 }
463
464 /**
465 * blkg_tryget - try and get a blkg reference
466 * @blkg: blkg to get
467 *
468 * This is for use when doing an RCU lookup of the blkg. We may be in the midst
469 * of freeing this blkg, so we can only use it if the refcnt is not zero.
470 */
blkg_tryget(struct blkcg_gq * blkg)471 static inline bool blkg_tryget(struct blkcg_gq *blkg)
472 {
473 return blkg && percpu_ref_tryget(&blkg->refcnt);
474 }
475
476 /**
477 * blkg_put - put a blkg reference
478 * @blkg: blkg to put
479 */
blkg_put(struct blkcg_gq * blkg)480 static inline void blkg_put(struct blkcg_gq *blkg)
481 {
482 percpu_ref_put(&blkg->refcnt);
483 }
484
485 /**
486 * blkg_for_each_descendant_pre - pre-order walk of a blkg's descendants
487 * @d_blkg: loop cursor pointing to the current descendant
488 * @pos_css: used for iteration
489 * @p_blkg: target blkg to walk descendants of
490 *
491 * Walk @c_blkg through the descendants of @p_blkg. Must be used with RCU
492 * read locked. If called under either blkcg or queue lock, the iteration
493 * is guaranteed to include all and only online blkgs. The caller may
494 * update @pos_css by calling css_rightmost_descendant() to skip subtree.
495 * @p_blkg is included in the iteration and the first node to be visited.
496 */
497 #define blkg_for_each_descendant_pre(d_blkg, pos_css, p_blkg) \
498 css_for_each_descendant_pre((pos_css), &(p_blkg)->blkcg->css) \
499 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
500 (p_blkg)->q, false)))
501
502 /**
503 * blkg_for_each_descendant_post - post-order walk of a blkg's descendants
504 * @d_blkg: loop cursor pointing to the current descendant
505 * @pos_css: used for iteration
506 * @p_blkg: target blkg to walk descendants of
507 *
508 * Similar to blkg_for_each_descendant_pre() but performs post-order
509 * traversal instead. Synchronization rules are the same. @p_blkg is
510 * included in the iteration and the last node to be visited.
511 */
512 #define blkg_for_each_descendant_post(d_blkg, pos_css, p_blkg) \
513 css_for_each_descendant_post((pos_css), &(p_blkg)->blkcg->css) \
514 if (((d_blkg) = __blkg_lookup(css_to_blkcg(pos_css), \
515 (p_blkg)->q, false)))
516
517 bool __blkcg_punt_bio_submit(struct bio *bio);
518
blkcg_punt_bio_submit(struct bio * bio)519 static inline bool blkcg_punt_bio_submit(struct bio *bio)
520 {
521 if (bio->bi_opf & REQ_CGROUP_PUNT)
522 return __blkcg_punt_bio_submit(bio);
523 else
524 return false;
525 }
526
blkcg_bio_issue_init(struct bio * bio)527 static inline void blkcg_bio_issue_init(struct bio *bio)
528 {
529 bio_issue_init(&bio->bi_issue, bio_sectors(bio));
530 }
531
blkcg_use_delay(struct blkcg_gq * blkg)532 static inline void blkcg_use_delay(struct blkcg_gq *blkg)
533 {
534 if (WARN_ON_ONCE(atomic_read(&blkg->use_delay) < 0))
535 return;
536 if (atomic_add_return(1, &blkg->use_delay) == 1)
537 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
538 }
539
blkcg_unuse_delay(struct blkcg_gq * blkg)540 static inline int blkcg_unuse_delay(struct blkcg_gq *blkg)
541 {
542 int old = atomic_read(&blkg->use_delay);
543
544 if (WARN_ON_ONCE(old < 0))
545 return 0;
546 if (old == 0)
547 return 0;
548
549 /*
550 * We do this song and dance because we can race with somebody else
551 * adding or removing delay. If we just did an atomic_dec we'd end up
552 * negative and we'd already be in trouble. We need to subtract 1 and
553 * then check to see if we were the last delay so we can drop the
554 * congestion count on the cgroup.
555 */
556 while (old) {
557 int cur = atomic_cmpxchg(&blkg->use_delay, old, old - 1);
558 if (cur == old)
559 break;
560 old = cur;
561 }
562
563 if (old == 0)
564 return 0;
565 if (old == 1)
566 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
567 return 1;
568 }
569
570 /**
571 * blkcg_set_delay - Enable allocator delay mechanism with the specified delay amount
572 * @blkg: target blkg
573 * @delay: delay duration in nsecs
574 *
575 * When enabled with this function, the delay is not decayed and must be
576 * explicitly cleared with blkcg_clear_delay(). Must not be mixed with
577 * blkcg_[un]use_delay() and blkcg_add_delay() usages.
578 */
blkcg_set_delay(struct blkcg_gq * blkg,u64 delay)579 static inline void blkcg_set_delay(struct blkcg_gq *blkg, u64 delay)
580 {
581 int old = atomic_read(&blkg->use_delay);
582
583 /* We only want 1 person setting the congestion count for this blkg. */
584 if (!old && atomic_cmpxchg(&blkg->use_delay, old, -1) == old)
585 atomic_inc(&blkg->blkcg->css.cgroup->congestion_count);
586
587 atomic64_set(&blkg->delay_nsec, delay);
588 }
589
590 /**
591 * blkcg_clear_delay - Disable allocator delay mechanism
592 * @blkg: target blkg
593 *
594 * Disable use_delay mechanism. See blkcg_set_delay().
595 */
blkcg_clear_delay(struct blkcg_gq * blkg)596 static inline void blkcg_clear_delay(struct blkcg_gq *blkg)
597 {
598 int old = atomic_read(&blkg->use_delay);
599
600 /* We only want 1 person clearing the congestion count for this blkg. */
601 if (old && atomic_cmpxchg(&blkg->use_delay, old, 0) == old)
602 atomic_dec(&blkg->blkcg->css.cgroup->congestion_count);
603 }
604
605 /**
606 * blk_cgroup_mergeable - Determine whether to allow or disallow merges
607 * @rq: request to merge into
608 * @bio: bio to merge
609 *
610 * @bio and @rq should belong to the same cgroup and their issue_as_root should
611 * match. The latter is necessary as we don't want to throttle e.g. a metadata
612 * update because it happens to be next to a regular IO.
613 */
blk_cgroup_mergeable(struct request * rq,struct bio * bio)614 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio)
615 {
616 return rq->bio->bi_blkg == bio->bi_blkg &&
617 bio_issue_as_root_blkg(rq->bio) == bio_issue_as_root_blkg(bio);
618 }
619
620 void blk_cgroup_bio_start(struct bio *bio);
621 void blkcg_add_delay(struct blkcg_gq *blkg, u64 now, u64 delta);
622 void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay);
623 void blkcg_maybe_throttle_current(void);
624 #else /* CONFIG_BLK_CGROUP */
625
626 struct blkcg {
627 };
628
629 struct blkg_policy_data {
630 };
631
632 struct blkcg_policy_data {
633 };
634
635 struct blkcg_gq {
636 };
637
638 struct blkcg_policy {
639 };
640
641 #define blkcg_root_css ((struct cgroup_subsys_state *)ERR_PTR(-EINVAL))
642
blkcg_maybe_throttle_current(void)643 static inline void blkcg_maybe_throttle_current(void) { }
blk_cgroup_congested(void)644 static inline bool blk_cgroup_congested(void) { return false; }
645
646 #ifdef CONFIG_BLOCK
647
blkcg_schedule_throttle(struct request_queue * q,bool use_memdelay)648 static inline void blkcg_schedule_throttle(struct request_queue *q, bool use_memdelay) { }
649
blkg_lookup(struct blkcg * blkcg,void * key)650 static inline struct blkcg_gq *blkg_lookup(struct blkcg *blkcg, void *key) { return NULL; }
blk_queue_root_blkg(struct request_queue * q)651 static inline struct blkcg_gq *blk_queue_root_blkg(struct request_queue *q)
652 { return NULL; }
blkcg_init_queue(struct request_queue * q)653 static inline int blkcg_init_queue(struct request_queue *q) { return 0; }
blkcg_exit_queue(struct request_queue * q)654 static inline void blkcg_exit_queue(struct request_queue *q) { }
blkcg_policy_register(struct blkcg_policy * pol)655 static inline int blkcg_policy_register(struct blkcg_policy *pol) { return 0; }
blkcg_policy_unregister(struct blkcg_policy * pol)656 static inline void blkcg_policy_unregister(struct blkcg_policy *pol) { }
blkcg_activate_policy(struct request_queue * q,const struct blkcg_policy * pol)657 static inline int blkcg_activate_policy(struct request_queue *q,
658 const struct blkcg_policy *pol) { return 0; }
blkcg_deactivate_policy(struct request_queue * q,const struct blkcg_policy * pol)659 static inline void blkcg_deactivate_policy(struct request_queue *q,
660 const struct blkcg_policy *pol) { }
661
__bio_blkcg(struct bio * bio)662 static inline struct blkcg *__bio_blkcg(struct bio *bio) { return NULL; }
bio_blkcg(struct bio * bio)663 static inline struct blkcg *bio_blkcg(struct bio *bio) { return NULL; }
664
blkg_to_pd(struct blkcg_gq * blkg,struct blkcg_policy * pol)665 static inline struct blkg_policy_data *blkg_to_pd(struct blkcg_gq *blkg,
666 struct blkcg_policy *pol) { return NULL; }
pd_to_blkg(struct blkg_policy_data * pd)667 static inline struct blkcg_gq *pd_to_blkg(struct blkg_policy_data *pd) { return NULL; }
blkg_path(struct blkcg_gq * blkg)668 static inline char *blkg_path(struct blkcg_gq *blkg) { return NULL; }
blkg_get(struct blkcg_gq * blkg)669 static inline void blkg_get(struct blkcg_gq *blkg) { }
blkg_put(struct blkcg_gq * blkg)670 static inline void blkg_put(struct blkcg_gq *blkg) { }
671
blkcg_punt_bio_submit(struct bio * bio)672 static inline bool blkcg_punt_bio_submit(struct bio *bio) { return false; }
blkcg_bio_issue_init(struct bio * bio)673 static inline void blkcg_bio_issue_init(struct bio *bio) { }
blk_cgroup_bio_start(struct bio * bio)674 static inline void blk_cgroup_bio_start(struct bio *bio) { }
blk_cgroup_mergeable(struct request * rq,struct bio * bio)675 static inline bool blk_cgroup_mergeable(struct request *rq, struct bio *bio) { return true; }
676
677 #define blk_queue_for_each_rl(rl, q) \
678 for ((rl) = &(q)->root_rl; (rl); (rl) = NULL)
679
680 #endif /* CONFIG_BLOCK */
681 #endif /* CONFIG_BLK_CGROUP */
682 #endif /* _BLK_CGROUP_H */
683