xref: /OK3568_Linux_fs/kernel/block/blk-stat.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Block stat tracking code
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2016 Jens Axboe
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun #include <linux/kernel.h>
8*4882a593Smuzhiyun #include <linux/rculist.h>
9*4882a593Smuzhiyun #include <linux/blk-mq.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include "blk-stat.h"
12*4882a593Smuzhiyun #include "blk-mq.h"
13*4882a593Smuzhiyun #include "blk.h"
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun struct blk_queue_stats {
16*4882a593Smuzhiyun 	struct list_head callbacks;
17*4882a593Smuzhiyun 	spinlock_t lock;
18*4882a593Smuzhiyun 	bool enable_accounting;
19*4882a593Smuzhiyun };
20*4882a593Smuzhiyun 
blk_rq_stat_init(struct blk_rq_stat * stat)21*4882a593Smuzhiyun void blk_rq_stat_init(struct blk_rq_stat *stat)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun 	stat->min = -1ULL;
24*4882a593Smuzhiyun 	stat->max = stat->nr_samples = stat->mean = 0;
25*4882a593Smuzhiyun 	stat->batch = 0;
26*4882a593Smuzhiyun }
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun /* src is a per-cpu stat, mean isn't initialized */
blk_rq_stat_sum(struct blk_rq_stat * dst,struct blk_rq_stat * src)29*4882a593Smuzhiyun void blk_rq_stat_sum(struct blk_rq_stat *dst, struct blk_rq_stat *src)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun 	if (!src->nr_samples)
32*4882a593Smuzhiyun 		return;
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	dst->min = min(dst->min, src->min);
35*4882a593Smuzhiyun 	dst->max = max(dst->max, src->max);
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	dst->mean = div_u64(src->batch + dst->mean * dst->nr_samples,
38*4882a593Smuzhiyun 				dst->nr_samples + src->nr_samples);
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	dst->nr_samples += src->nr_samples;
41*4882a593Smuzhiyun }
42*4882a593Smuzhiyun 
blk_rq_stat_add(struct blk_rq_stat * stat,u64 value)43*4882a593Smuzhiyun void blk_rq_stat_add(struct blk_rq_stat *stat, u64 value)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	stat->min = min(stat->min, value);
46*4882a593Smuzhiyun 	stat->max = max(stat->max, value);
47*4882a593Smuzhiyun 	stat->batch += value;
48*4882a593Smuzhiyun 	stat->nr_samples++;
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun 
blk_stat_add(struct request * rq,u64 now)51*4882a593Smuzhiyun void blk_stat_add(struct request *rq, u64 now)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun 	struct request_queue *q = rq->q;
54*4882a593Smuzhiyun 	struct blk_stat_callback *cb;
55*4882a593Smuzhiyun 	struct blk_rq_stat *stat;
56*4882a593Smuzhiyun 	int bucket, cpu;
57*4882a593Smuzhiyun 	u64 value;
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	value = (now >= rq->io_start_time_ns) ? now - rq->io_start_time_ns : 0;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	blk_throtl_stat_add(rq, value);
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	rcu_read_lock();
64*4882a593Smuzhiyun 	cpu = get_cpu();
65*4882a593Smuzhiyun 	list_for_each_entry_rcu(cb, &q->stats->callbacks, list) {
66*4882a593Smuzhiyun 		if (!blk_stat_is_active(cb))
67*4882a593Smuzhiyun 			continue;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 		bucket = cb->bucket_fn(rq);
70*4882a593Smuzhiyun 		if (bucket < 0)
71*4882a593Smuzhiyun 			continue;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 		stat = &per_cpu_ptr(cb->cpu_stat, cpu)[bucket];
74*4882a593Smuzhiyun 		blk_rq_stat_add(stat, value);
75*4882a593Smuzhiyun 	}
76*4882a593Smuzhiyun 	put_cpu();
77*4882a593Smuzhiyun 	rcu_read_unlock();
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun 
blk_stat_timer_fn(struct timer_list * t)80*4882a593Smuzhiyun static void blk_stat_timer_fn(struct timer_list *t)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	struct blk_stat_callback *cb = from_timer(cb, t, timer);
83*4882a593Smuzhiyun 	unsigned int bucket;
84*4882a593Smuzhiyun 	int cpu;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	for (bucket = 0; bucket < cb->buckets; bucket++)
87*4882a593Smuzhiyun 		blk_rq_stat_init(&cb->stat[bucket]);
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	for_each_online_cpu(cpu) {
90*4882a593Smuzhiyun 		struct blk_rq_stat *cpu_stat;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
93*4882a593Smuzhiyun 		for (bucket = 0; bucket < cb->buckets; bucket++) {
94*4882a593Smuzhiyun 			blk_rq_stat_sum(&cb->stat[bucket], &cpu_stat[bucket]);
95*4882a593Smuzhiyun 			blk_rq_stat_init(&cpu_stat[bucket]);
96*4882a593Smuzhiyun 		}
97*4882a593Smuzhiyun 	}
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	cb->timer_fn(cb);
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun struct blk_stat_callback *
blk_stat_alloc_callback(void (* timer_fn)(struct blk_stat_callback *),int (* bucket_fn)(const struct request *),unsigned int buckets,void * data)103*4882a593Smuzhiyun blk_stat_alloc_callback(void (*timer_fn)(struct blk_stat_callback *),
104*4882a593Smuzhiyun 			int (*bucket_fn)(const struct request *),
105*4882a593Smuzhiyun 			unsigned int buckets, void *data)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	struct blk_stat_callback *cb;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	cb = kmalloc(sizeof(*cb), GFP_KERNEL);
110*4882a593Smuzhiyun 	if (!cb)
111*4882a593Smuzhiyun 		return NULL;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	cb->stat = kmalloc_array(buckets, sizeof(struct blk_rq_stat),
114*4882a593Smuzhiyun 				 GFP_KERNEL);
115*4882a593Smuzhiyun 	if (!cb->stat) {
116*4882a593Smuzhiyun 		kfree(cb);
117*4882a593Smuzhiyun 		return NULL;
118*4882a593Smuzhiyun 	}
119*4882a593Smuzhiyun 	cb->cpu_stat = __alloc_percpu(buckets * sizeof(struct blk_rq_stat),
120*4882a593Smuzhiyun 				      __alignof__(struct blk_rq_stat));
121*4882a593Smuzhiyun 	if (!cb->cpu_stat) {
122*4882a593Smuzhiyun 		kfree(cb->stat);
123*4882a593Smuzhiyun 		kfree(cb);
124*4882a593Smuzhiyun 		return NULL;
125*4882a593Smuzhiyun 	}
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	cb->timer_fn = timer_fn;
128*4882a593Smuzhiyun 	cb->bucket_fn = bucket_fn;
129*4882a593Smuzhiyun 	cb->data = data;
130*4882a593Smuzhiyun 	cb->buckets = buckets;
131*4882a593Smuzhiyun 	timer_setup(&cb->timer, blk_stat_timer_fn, 0);
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	return cb;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun 
blk_stat_add_callback(struct request_queue * q,struct blk_stat_callback * cb)136*4882a593Smuzhiyun void blk_stat_add_callback(struct request_queue *q,
137*4882a593Smuzhiyun 			   struct blk_stat_callback *cb)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	unsigned int bucket;
140*4882a593Smuzhiyun 	unsigned long flags;
141*4882a593Smuzhiyun 	int cpu;
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	for_each_possible_cpu(cpu) {
144*4882a593Smuzhiyun 		struct blk_rq_stat *cpu_stat;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 		cpu_stat = per_cpu_ptr(cb->cpu_stat, cpu);
147*4882a593Smuzhiyun 		for (bucket = 0; bucket < cb->buckets; bucket++)
148*4882a593Smuzhiyun 			blk_rq_stat_init(&cpu_stat[bucket]);
149*4882a593Smuzhiyun 	}
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	spin_lock_irqsave(&q->stats->lock, flags);
152*4882a593Smuzhiyun 	list_add_tail_rcu(&cb->list, &q->stats->callbacks);
153*4882a593Smuzhiyun 	blk_queue_flag_set(QUEUE_FLAG_STATS, q);
154*4882a593Smuzhiyun 	spin_unlock_irqrestore(&q->stats->lock, flags);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
blk_stat_remove_callback(struct request_queue * q,struct blk_stat_callback * cb)157*4882a593Smuzhiyun void blk_stat_remove_callback(struct request_queue *q,
158*4882a593Smuzhiyun 			      struct blk_stat_callback *cb)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun 	unsigned long flags;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	spin_lock_irqsave(&q->stats->lock, flags);
163*4882a593Smuzhiyun 	list_del_rcu(&cb->list);
164*4882a593Smuzhiyun 	if (list_empty(&q->stats->callbacks) && !q->stats->enable_accounting)
165*4882a593Smuzhiyun 		blk_queue_flag_clear(QUEUE_FLAG_STATS, q);
166*4882a593Smuzhiyun 	spin_unlock_irqrestore(&q->stats->lock, flags);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	del_timer_sync(&cb->timer);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun 
blk_stat_free_callback_rcu(struct rcu_head * head)171*4882a593Smuzhiyun static void blk_stat_free_callback_rcu(struct rcu_head *head)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun 	struct blk_stat_callback *cb;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	cb = container_of(head, struct blk_stat_callback, rcu);
176*4882a593Smuzhiyun 	free_percpu(cb->cpu_stat);
177*4882a593Smuzhiyun 	kfree(cb->stat);
178*4882a593Smuzhiyun 	kfree(cb);
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun 
blk_stat_free_callback(struct blk_stat_callback * cb)181*4882a593Smuzhiyun void blk_stat_free_callback(struct blk_stat_callback *cb)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun 	if (cb)
184*4882a593Smuzhiyun 		call_rcu(&cb->rcu, blk_stat_free_callback_rcu);
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun 
blk_stat_enable_accounting(struct request_queue * q)187*4882a593Smuzhiyun void blk_stat_enable_accounting(struct request_queue *q)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun 	unsigned long flags;
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 	spin_lock_irqsave(&q->stats->lock, flags);
192*4882a593Smuzhiyun 	q->stats->enable_accounting = true;
193*4882a593Smuzhiyun 	blk_queue_flag_set(QUEUE_FLAG_STATS, q);
194*4882a593Smuzhiyun 	spin_unlock_irqrestore(&q->stats->lock, flags);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(blk_stat_enable_accounting);
197*4882a593Smuzhiyun 
blk_alloc_queue_stats(void)198*4882a593Smuzhiyun struct blk_queue_stats *blk_alloc_queue_stats(void)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun 	struct blk_queue_stats *stats;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	stats = kmalloc(sizeof(*stats), GFP_KERNEL);
203*4882a593Smuzhiyun 	if (!stats)
204*4882a593Smuzhiyun 		return NULL;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	INIT_LIST_HEAD(&stats->callbacks);
207*4882a593Smuzhiyun 	spin_lock_init(&stats->lock);
208*4882a593Smuzhiyun 	stats->enable_accounting = false;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	return stats;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun 
blk_free_queue_stats(struct blk_queue_stats * stats)213*4882a593Smuzhiyun void blk_free_queue_stats(struct blk_queue_stats *stats)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun 	if (!stats)
216*4882a593Smuzhiyun 		return;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	WARN_ON(!list_empty(&stats->callbacks));
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	kfree(stats);
221*4882a593Smuzhiyun }
222