xref: /OK3568_Linux_fs/kernel/block/mq-deadline-cgroup.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun 
3*4882a593Smuzhiyun #include <linux/blk-cgroup.h>
4*4882a593Smuzhiyun #include <linux/ioprio.h>
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include "mq-deadline-cgroup.h"
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun static struct blkcg_policy dd_blkcg_policy;
9*4882a593Smuzhiyun 
dd_cpd_alloc(gfp_t gfp)10*4882a593Smuzhiyun static struct blkcg_policy_data *dd_cpd_alloc(gfp_t gfp)
11*4882a593Smuzhiyun {
12*4882a593Smuzhiyun 	struct dd_blkcg *pd;
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun 	pd = kzalloc(sizeof(*pd), gfp);
15*4882a593Smuzhiyun 	if (!pd)
16*4882a593Smuzhiyun 		return NULL;
17*4882a593Smuzhiyun 	pd->stats = alloc_percpu_gfp(typeof(*pd->stats),
18*4882a593Smuzhiyun 				     GFP_KERNEL | __GFP_ZERO);
19*4882a593Smuzhiyun 	if (!pd->stats) {
20*4882a593Smuzhiyun 		kfree(pd);
21*4882a593Smuzhiyun 		return NULL;
22*4882a593Smuzhiyun 	}
23*4882a593Smuzhiyun 	return &pd->cpd;
24*4882a593Smuzhiyun }
25*4882a593Smuzhiyun 
dd_cpd_free(struct blkcg_policy_data * cpd)26*4882a593Smuzhiyun static void dd_cpd_free(struct blkcg_policy_data *cpd)
27*4882a593Smuzhiyun {
28*4882a593Smuzhiyun 	struct dd_blkcg *dd_blkcg = container_of(cpd, typeof(*dd_blkcg), cpd);
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 	free_percpu(dd_blkcg->stats);
31*4882a593Smuzhiyun 	kfree(dd_blkcg);
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun 
dd_blkcg_from_pd(struct blkg_policy_data * pd)34*4882a593Smuzhiyun static struct dd_blkcg *dd_blkcg_from_pd(struct blkg_policy_data *pd)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun 	return container_of(blkcg_to_cpd(pd->blkg->blkcg, &dd_blkcg_policy),
37*4882a593Smuzhiyun 			    struct dd_blkcg, cpd);
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun /*
41*4882a593Smuzhiyun  * Convert an association between a block cgroup and a request queue into a
42*4882a593Smuzhiyun  * pointer to the mq-deadline information associated with a (blkcg, queue) pair.
43*4882a593Smuzhiyun  */
dd_blkcg_from_bio(struct bio * bio)44*4882a593Smuzhiyun struct dd_blkcg *dd_blkcg_from_bio(struct bio *bio)
45*4882a593Smuzhiyun {
46*4882a593Smuzhiyun 	struct blkg_policy_data *pd;
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	pd = blkg_to_pd(bio->bi_blkg, &dd_blkcg_policy);
49*4882a593Smuzhiyun 	if (!pd)
50*4882a593Smuzhiyun 		return NULL;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	return dd_blkcg_from_pd(pd);
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun 
dd_pd_stat(struct blkg_policy_data * pd,char * buf,size_t size)55*4882a593Smuzhiyun static size_t dd_pd_stat(struct blkg_policy_data *pd, char *buf, size_t size)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun 	static const char *const prio_class_name[] = {
58*4882a593Smuzhiyun 		[IOPRIO_CLASS_NONE]	= "NONE",
59*4882a593Smuzhiyun 		[IOPRIO_CLASS_RT]	= "RT",
60*4882a593Smuzhiyun 		[IOPRIO_CLASS_BE]	= "BE",
61*4882a593Smuzhiyun 		[IOPRIO_CLASS_IDLE]	= "IDLE",
62*4882a593Smuzhiyun 	};
63*4882a593Smuzhiyun 	struct dd_blkcg *blkcg = dd_blkcg_from_pd(pd);
64*4882a593Smuzhiyun 	int res = 0;
65*4882a593Smuzhiyun 	u8 prio;
66*4882a593Smuzhiyun 
67*4882a593Smuzhiyun 	for (prio = 0; prio < ARRAY_SIZE(blkcg->stats->stats); prio++)
68*4882a593Smuzhiyun 		res += scnprintf(buf + res, size - res,
69*4882a593Smuzhiyun 			" [%s] dispatched=%u inserted=%u merged=%u",
70*4882a593Smuzhiyun 			prio_class_name[prio],
71*4882a593Smuzhiyun 			ddcg_sum(blkcg, dispatched, prio) +
72*4882a593Smuzhiyun 			ddcg_sum(blkcg, merged, prio) -
73*4882a593Smuzhiyun 			ddcg_sum(blkcg, completed, prio),
74*4882a593Smuzhiyun 			ddcg_sum(blkcg, inserted, prio) -
75*4882a593Smuzhiyun 			ddcg_sum(blkcg, completed, prio),
76*4882a593Smuzhiyun 			ddcg_sum(blkcg, merged, prio));
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	return res;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun 
dd_pd_alloc(gfp_t gfp,struct request_queue * q,struct blkcg * blkcg)81*4882a593Smuzhiyun static struct blkg_policy_data *dd_pd_alloc(gfp_t gfp, struct request_queue *q,
82*4882a593Smuzhiyun 					    struct blkcg *blkcg)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun 	struct dd_blkg *pd;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	pd = kzalloc(sizeof(*pd), gfp);
87*4882a593Smuzhiyun 	if (!pd)
88*4882a593Smuzhiyun 		return NULL;
89*4882a593Smuzhiyun 	return &pd->pd;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun 
dd_pd_free(struct blkg_policy_data * pd)92*4882a593Smuzhiyun static void dd_pd_free(struct blkg_policy_data *pd)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	struct dd_blkg *dd_blkg = container_of(pd, typeof(*dd_blkg), pd);
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	kfree(dd_blkg);
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun static struct blkcg_policy dd_blkcg_policy = {
100*4882a593Smuzhiyun 	.cpd_alloc_fn		= dd_cpd_alloc,
101*4882a593Smuzhiyun 	.cpd_free_fn		= dd_cpd_free,
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	.pd_alloc_fn		= dd_pd_alloc,
104*4882a593Smuzhiyun 	.pd_free_fn		= dd_pd_free,
105*4882a593Smuzhiyun 	.pd_stat_fn		= dd_pd_stat,
106*4882a593Smuzhiyun };
107*4882a593Smuzhiyun 
dd_activate_policy(struct request_queue * q)108*4882a593Smuzhiyun int dd_activate_policy(struct request_queue *q)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun 	return blkcg_activate_policy(q, &dd_blkcg_policy);
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun 
dd_deactivate_policy(struct request_queue * q)113*4882a593Smuzhiyun void dd_deactivate_policy(struct request_queue *q)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun 	blkcg_deactivate_policy(q, &dd_blkcg_policy);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun 
dd_blkcg_init(void)118*4882a593Smuzhiyun int __init dd_blkcg_init(void)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun 	return blkcg_policy_register(&dd_blkcg_policy);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
dd_blkcg_exit(void)123*4882a593Smuzhiyun void __exit dd_blkcg_exit(void)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	blkcg_policy_unregister(&dd_blkcg_policy);
126*4882a593Smuzhiyun }
127