xref: /OK3568_Linux_fs/kernel/drivers/md/dm-stats.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <linux/errno.h>
3*4882a593Smuzhiyun #include <linux/numa.h>
4*4882a593Smuzhiyun #include <linux/slab.h>
5*4882a593Smuzhiyun #include <linux/rculist.h>
6*4882a593Smuzhiyun #include <linux/threads.h>
7*4882a593Smuzhiyun #include <linux/preempt.h>
8*4882a593Smuzhiyun #include <linux/irqflags.h>
9*4882a593Smuzhiyun #include <linux/vmalloc.h>
10*4882a593Smuzhiyun #include <linux/mm.h>
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/device-mapper.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include "dm-core.h"
15*4882a593Smuzhiyun #include "dm-stats.h"
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #define DM_MSG_PREFIX "stats"
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun static int dm_stat_need_rcu_barrier;
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun /*
22*4882a593Smuzhiyun  * Using 64-bit values to avoid overflow (which is a
23*4882a593Smuzhiyun  * problem that block/genhd.c's IO accounting has).
24*4882a593Smuzhiyun  */
25*4882a593Smuzhiyun struct dm_stat_percpu {
26*4882a593Smuzhiyun 	unsigned long long sectors[2];
27*4882a593Smuzhiyun 	unsigned long long ios[2];
28*4882a593Smuzhiyun 	unsigned long long merges[2];
29*4882a593Smuzhiyun 	unsigned long long ticks[2];
30*4882a593Smuzhiyun 	unsigned long long io_ticks[2];
31*4882a593Smuzhiyun 	unsigned long long io_ticks_total;
32*4882a593Smuzhiyun 	unsigned long long time_in_queue;
33*4882a593Smuzhiyun 	unsigned long long *histogram;
34*4882a593Smuzhiyun };
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun struct dm_stat_shared {
37*4882a593Smuzhiyun 	atomic_t in_flight[2];
38*4882a593Smuzhiyun 	unsigned long long stamp;
39*4882a593Smuzhiyun 	struct dm_stat_percpu tmp;
40*4882a593Smuzhiyun };
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun struct dm_stat {
43*4882a593Smuzhiyun 	struct list_head list_entry;
44*4882a593Smuzhiyun 	int id;
45*4882a593Smuzhiyun 	unsigned stat_flags;
46*4882a593Smuzhiyun 	size_t n_entries;
47*4882a593Smuzhiyun 	sector_t start;
48*4882a593Smuzhiyun 	sector_t end;
49*4882a593Smuzhiyun 	sector_t step;
50*4882a593Smuzhiyun 	unsigned n_histogram_entries;
51*4882a593Smuzhiyun 	unsigned long long *histogram_boundaries;
52*4882a593Smuzhiyun 	const char *program_id;
53*4882a593Smuzhiyun 	const char *aux_data;
54*4882a593Smuzhiyun 	struct rcu_head rcu_head;
55*4882a593Smuzhiyun 	size_t shared_alloc_size;
56*4882a593Smuzhiyun 	size_t percpu_alloc_size;
57*4882a593Smuzhiyun 	size_t histogram_alloc_size;
58*4882a593Smuzhiyun 	struct dm_stat_percpu *stat_percpu[NR_CPUS];
59*4882a593Smuzhiyun 	struct dm_stat_shared stat_shared[];
60*4882a593Smuzhiyun };
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun #define STAT_PRECISE_TIMESTAMPS		1
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun struct dm_stats_last_position {
65*4882a593Smuzhiyun 	sector_t last_sector;
66*4882a593Smuzhiyun 	unsigned last_rw;
67*4882a593Smuzhiyun };
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun  * A typo on the command line could possibly make the kernel run out of memory
71*4882a593Smuzhiyun  * and crash. To prevent the crash we account all used memory. We fail if we
72*4882a593Smuzhiyun  * exhaust 1/4 of all memory or 1/2 of vmalloc space.
73*4882a593Smuzhiyun  */
74*4882a593Smuzhiyun #define DM_STATS_MEMORY_FACTOR		4
75*4882a593Smuzhiyun #define DM_STATS_VMALLOC_FACTOR		2
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun static DEFINE_SPINLOCK(shared_memory_lock);
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun static unsigned long shared_memory_amount;
80*4882a593Smuzhiyun 
__check_shared_memory(size_t alloc_size)81*4882a593Smuzhiyun static bool __check_shared_memory(size_t alloc_size)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	size_t a;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	a = shared_memory_amount + alloc_size;
86*4882a593Smuzhiyun 	if (a < shared_memory_amount)
87*4882a593Smuzhiyun 		return false;
88*4882a593Smuzhiyun 	if (a >> PAGE_SHIFT > totalram_pages() / DM_STATS_MEMORY_FACTOR)
89*4882a593Smuzhiyun 		return false;
90*4882a593Smuzhiyun #ifdef CONFIG_MMU
91*4882a593Smuzhiyun 	if (a > (VMALLOC_END - VMALLOC_START) / DM_STATS_VMALLOC_FACTOR)
92*4882a593Smuzhiyun 		return false;
93*4882a593Smuzhiyun #endif
94*4882a593Smuzhiyun 	return true;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun 
check_shared_memory(size_t alloc_size)97*4882a593Smuzhiyun static bool check_shared_memory(size_t alloc_size)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	bool ret;
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	spin_lock_irq(&shared_memory_lock);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	ret = __check_shared_memory(alloc_size);
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	spin_unlock_irq(&shared_memory_lock);
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	return ret;
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
claim_shared_memory(size_t alloc_size)110*4882a593Smuzhiyun static bool claim_shared_memory(size_t alloc_size)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun 	spin_lock_irq(&shared_memory_lock);
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	if (!__check_shared_memory(alloc_size)) {
115*4882a593Smuzhiyun 		spin_unlock_irq(&shared_memory_lock);
116*4882a593Smuzhiyun 		return false;
117*4882a593Smuzhiyun 	}
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	shared_memory_amount += alloc_size;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	spin_unlock_irq(&shared_memory_lock);
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	return true;
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
free_shared_memory(size_t alloc_size)126*4882a593Smuzhiyun static void free_shared_memory(size_t alloc_size)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	unsigned long flags;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	spin_lock_irqsave(&shared_memory_lock, flags);
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	if (WARN_ON_ONCE(shared_memory_amount < alloc_size)) {
133*4882a593Smuzhiyun 		spin_unlock_irqrestore(&shared_memory_lock, flags);
134*4882a593Smuzhiyun 		DMCRIT("Memory usage accounting bug.");
135*4882a593Smuzhiyun 		return;
136*4882a593Smuzhiyun 	}
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	shared_memory_amount -= alloc_size;
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	spin_unlock_irqrestore(&shared_memory_lock, flags);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun 
dm_kvzalloc(size_t alloc_size,int node)143*4882a593Smuzhiyun static void *dm_kvzalloc(size_t alloc_size, int node)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun 	void *p;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	if (!claim_shared_memory(alloc_size))
148*4882a593Smuzhiyun 		return NULL;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	p = kvzalloc_node(alloc_size, GFP_KERNEL | __GFP_NOMEMALLOC, node);
151*4882a593Smuzhiyun 	if (p)
152*4882a593Smuzhiyun 		return p;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	free_shared_memory(alloc_size);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	return NULL;
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun 
dm_kvfree(void * ptr,size_t alloc_size)159*4882a593Smuzhiyun static void dm_kvfree(void *ptr, size_t alloc_size)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun 	if (!ptr)
162*4882a593Smuzhiyun 		return;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	free_shared_memory(alloc_size);
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	kvfree(ptr);
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun 
dm_stat_free(struct rcu_head * head)169*4882a593Smuzhiyun static void dm_stat_free(struct rcu_head *head)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	int cpu;
172*4882a593Smuzhiyun 	struct dm_stat *s = container_of(head, struct dm_stat, rcu_head);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	kfree(s->histogram_boundaries);
175*4882a593Smuzhiyun 	kfree(s->program_id);
176*4882a593Smuzhiyun 	kfree(s->aux_data);
177*4882a593Smuzhiyun 	for_each_possible_cpu(cpu) {
178*4882a593Smuzhiyun 		dm_kvfree(s->stat_percpu[cpu][0].histogram, s->histogram_alloc_size);
179*4882a593Smuzhiyun 		dm_kvfree(s->stat_percpu[cpu], s->percpu_alloc_size);
180*4882a593Smuzhiyun 	}
181*4882a593Smuzhiyun 	dm_kvfree(s->stat_shared[0].tmp.histogram, s->histogram_alloc_size);
182*4882a593Smuzhiyun 	dm_kvfree(s, s->shared_alloc_size);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
dm_stat_in_flight(struct dm_stat_shared * shared)185*4882a593Smuzhiyun static int dm_stat_in_flight(struct dm_stat_shared *shared)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun 	return atomic_read(&shared->in_flight[READ]) +
188*4882a593Smuzhiyun 	       atomic_read(&shared->in_flight[WRITE]);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
dm_stats_init(struct dm_stats * stats)191*4882a593Smuzhiyun void dm_stats_init(struct dm_stats *stats)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun 	int cpu;
194*4882a593Smuzhiyun 	struct dm_stats_last_position *last;
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	mutex_init(&stats->mutex);
197*4882a593Smuzhiyun 	INIT_LIST_HEAD(&stats->list);
198*4882a593Smuzhiyun 	stats->last = alloc_percpu(struct dm_stats_last_position);
199*4882a593Smuzhiyun 	for_each_possible_cpu(cpu) {
200*4882a593Smuzhiyun 		last = per_cpu_ptr(stats->last, cpu);
201*4882a593Smuzhiyun 		last->last_sector = (sector_t)ULLONG_MAX;
202*4882a593Smuzhiyun 		last->last_rw = UINT_MAX;
203*4882a593Smuzhiyun 	}
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun 
dm_stats_cleanup(struct dm_stats * stats)206*4882a593Smuzhiyun void dm_stats_cleanup(struct dm_stats *stats)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun 	size_t ni;
209*4882a593Smuzhiyun 	struct dm_stat *s;
210*4882a593Smuzhiyun 	struct dm_stat_shared *shared;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	while (!list_empty(&stats->list)) {
213*4882a593Smuzhiyun 		s = container_of(stats->list.next, struct dm_stat, list_entry);
214*4882a593Smuzhiyun 		list_del(&s->list_entry);
215*4882a593Smuzhiyun 		for (ni = 0; ni < s->n_entries; ni++) {
216*4882a593Smuzhiyun 			shared = &s->stat_shared[ni];
217*4882a593Smuzhiyun 			if (WARN_ON(dm_stat_in_flight(shared))) {
218*4882a593Smuzhiyun 				DMCRIT("leaked in-flight counter at index %lu "
219*4882a593Smuzhiyun 				       "(start %llu, end %llu, step %llu): reads %d, writes %d",
220*4882a593Smuzhiyun 				       (unsigned long)ni,
221*4882a593Smuzhiyun 				       (unsigned long long)s->start,
222*4882a593Smuzhiyun 				       (unsigned long long)s->end,
223*4882a593Smuzhiyun 				       (unsigned long long)s->step,
224*4882a593Smuzhiyun 				       atomic_read(&shared->in_flight[READ]),
225*4882a593Smuzhiyun 				       atomic_read(&shared->in_flight[WRITE]));
226*4882a593Smuzhiyun 			}
227*4882a593Smuzhiyun 			cond_resched();
228*4882a593Smuzhiyun 		}
229*4882a593Smuzhiyun 		dm_stat_free(&s->rcu_head);
230*4882a593Smuzhiyun 	}
231*4882a593Smuzhiyun 	free_percpu(stats->last);
232*4882a593Smuzhiyun 	mutex_destroy(&stats->mutex);
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun 
dm_stats_create(struct dm_stats * stats,sector_t start,sector_t end,sector_t step,unsigned stat_flags,unsigned n_histogram_entries,unsigned long long * histogram_boundaries,const char * program_id,const char * aux_data,void (* suspend_callback)(struct mapped_device *),void (* resume_callback)(struct mapped_device *),struct mapped_device * md)235*4882a593Smuzhiyun static int dm_stats_create(struct dm_stats *stats, sector_t start, sector_t end,
236*4882a593Smuzhiyun 			   sector_t step, unsigned stat_flags,
237*4882a593Smuzhiyun 			   unsigned n_histogram_entries,
238*4882a593Smuzhiyun 			   unsigned long long *histogram_boundaries,
239*4882a593Smuzhiyun 			   const char *program_id, const char *aux_data,
240*4882a593Smuzhiyun 			   void (*suspend_callback)(struct mapped_device *),
241*4882a593Smuzhiyun 			   void (*resume_callback)(struct mapped_device *),
242*4882a593Smuzhiyun 			   struct mapped_device *md)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun 	struct list_head *l;
245*4882a593Smuzhiyun 	struct dm_stat *s, *tmp_s;
246*4882a593Smuzhiyun 	sector_t n_entries;
247*4882a593Smuzhiyun 	size_t ni;
248*4882a593Smuzhiyun 	size_t shared_alloc_size;
249*4882a593Smuzhiyun 	size_t percpu_alloc_size;
250*4882a593Smuzhiyun 	size_t histogram_alloc_size;
251*4882a593Smuzhiyun 	struct dm_stat_percpu *p;
252*4882a593Smuzhiyun 	int cpu;
253*4882a593Smuzhiyun 	int ret_id;
254*4882a593Smuzhiyun 	int r;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	if (end < start || !step)
257*4882a593Smuzhiyun 		return -EINVAL;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	n_entries = end - start;
260*4882a593Smuzhiyun 	if (dm_sector_div64(n_entries, step))
261*4882a593Smuzhiyun 		n_entries++;
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	if (n_entries != (size_t)n_entries || !(size_t)(n_entries + 1))
264*4882a593Smuzhiyun 		return -EOVERFLOW;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	shared_alloc_size = struct_size(s, stat_shared, n_entries);
267*4882a593Smuzhiyun 	if ((shared_alloc_size - sizeof(struct dm_stat)) / sizeof(struct dm_stat_shared) != n_entries)
268*4882a593Smuzhiyun 		return -EOVERFLOW;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	percpu_alloc_size = (size_t)n_entries * sizeof(struct dm_stat_percpu);
271*4882a593Smuzhiyun 	if (percpu_alloc_size / sizeof(struct dm_stat_percpu) != n_entries)
272*4882a593Smuzhiyun 		return -EOVERFLOW;
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	histogram_alloc_size = (n_histogram_entries + 1) * (size_t)n_entries * sizeof(unsigned long long);
275*4882a593Smuzhiyun 	if (histogram_alloc_size / (n_histogram_entries + 1) != (size_t)n_entries * sizeof(unsigned long long))
276*4882a593Smuzhiyun 		return -EOVERFLOW;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	if (!check_shared_memory(shared_alloc_size + histogram_alloc_size +
279*4882a593Smuzhiyun 				 num_possible_cpus() * (percpu_alloc_size + histogram_alloc_size)))
280*4882a593Smuzhiyun 		return -ENOMEM;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	s = dm_kvzalloc(shared_alloc_size, NUMA_NO_NODE);
283*4882a593Smuzhiyun 	if (!s)
284*4882a593Smuzhiyun 		return -ENOMEM;
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	s->stat_flags = stat_flags;
287*4882a593Smuzhiyun 	s->n_entries = n_entries;
288*4882a593Smuzhiyun 	s->start = start;
289*4882a593Smuzhiyun 	s->end = end;
290*4882a593Smuzhiyun 	s->step = step;
291*4882a593Smuzhiyun 	s->shared_alloc_size = shared_alloc_size;
292*4882a593Smuzhiyun 	s->percpu_alloc_size = percpu_alloc_size;
293*4882a593Smuzhiyun 	s->histogram_alloc_size = histogram_alloc_size;
294*4882a593Smuzhiyun 
295*4882a593Smuzhiyun 	s->n_histogram_entries = n_histogram_entries;
296*4882a593Smuzhiyun 	s->histogram_boundaries = kmemdup(histogram_boundaries,
297*4882a593Smuzhiyun 					  s->n_histogram_entries * sizeof(unsigned long long), GFP_KERNEL);
298*4882a593Smuzhiyun 	if (!s->histogram_boundaries) {
299*4882a593Smuzhiyun 		r = -ENOMEM;
300*4882a593Smuzhiyun 		goto out;
301*4882a593Smuzhiyun 	}
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	s->program_id = kstrdup(program_id, GFP_KERNEL);
304*4882a593Smuzhiyun 	if (!s->program_id) {
305*4882a593Smuzhiyun 		r = -ENOMEM;
306*4882a593Smuzhiyun 		goto out;
307*4882a593Smuzhiyun 	}
308*4882a593Smuzhiyun 	s->aux_data = kstrdup(aux_data, GFP_KERNEL);
309*4882a593Smuzhiyun 	if (!s->aux_data) {
310*4882a593Smuzhiyun 		r = -ENOMEM;
311*4882a593Smuzhiyun 		goto out;
312*4882a593Smuzhiyun 	}
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	for (ni = 0; ni < n_entries; ni++) {
315*4882a593Smuzhiyun 		atomic_set(&s->stat_shared[ni].in_flight[READ], 0);
316*4882a593Smuzhiyun 		atomic_set(&s->stat_shared[ni].in_flight[WRITE], 0);
317*4882a593Smuzhiyun 		cond_resched();
318*4882a593Smuzhiyun 	}
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun 	if (s->n_histogram_entries) {
321*4882a593Smuzhiyun 		unsigned long long *hi;
322*4882a593Smuzhiyun 		hi = dm_kvzalloc(s->histogram_alloc_size, NUMA_NO_NODE);
323*4882a593Smuzhiyun 		if (!hi) {
324*4882a593Smuzhiyun 			r = -ENOMEM;
325*4882a593Smuzhiyun 			goto out;
326*4882a593Smuzhiyun 		}
327*4882a593Smuzhiyun 		for (ni = 0; ni < n_entries; ni++) {
328*4882a593Smuzhiyun 			s->stat_shared[ni].tmp.histogram = hi;
329*4882a593Smuzhiyun 			hi += s->n_histogram_entries + 1;
330*4882a593Smuzhiyun 			cond_resched();
331*4882a593Smuzhiyun 		}
332*4882a593Smuzhiyun 	}
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	for_each_possible_cpu(cpu) {
335*4882a593Smuzhiyun 		p = dm_kvzalloc(percpu_alloc_size, cpu_to_node(cpu));
336*4882a593Smuzhiyun 		if (!p) {
337*4882a593Smuzhiyun 			r = -ENOMEM;
338*4882a593Smuzhiyun 			goto out;
339*4882a593Smuzhiyun 		}
340*4882a593Smuzhiyun 		s->stat_percpu[cpu] = p;
341*4882a593Smuzhiyun 		if (s->n_histogram_entries) {
342*4882a593Smuzhiyun 			unsigned long long *hi;
343*4882a593Smuzhiyun 			hi = dm_kvzalloc(s->histogram_alloc_size, cpu_to_node(cpu));
344*4882a593Smuzhiyun 			if (!hi) {
345*4882a593Smuzhiyun 				r = -ENOMEM;
346*4882a593Smuzhiyun 				goto out;
347*4882a593Smuzhiyun 			}
348*4882a593Smuzhiyun 			for (ni = 0; ni < n_entries; ni++) {
349*4882a593Smuzhiyun 				p[ni].histogram = hi;
350*4882a593Smuzhiyun 				hi += s->n_histogram_entries + 1;
351*4882a593Smuzhiyun 				cond_resched();
352*4882a593Smuzhiyun 			}
353*4882a593Smuzhiyun 		}
354*4882a593Smuzhiyun 	}
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	/*
357*4882a593Smuzhiyun 	 * Suspend/resume to make sure there is no i/o in flight,
358*4882a593Smuzhiyun 	 * so that newly created statistics will be exact.
359*4882a593Smuzhiyun 	 *
360*4882a593Smuzhiyun 	 * (note: we couldn't suspend earlier because we must not
361*4882a593Smuzhiyun 	 * allocate memory while suspended)
362*4882a593Smuzhiyun 	 */
363*4882a593Smuzhiyun 	suspend_callback(md);
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	mutex_lock(&stats->mutex);
366*4882a593Smuzhiyun 	s->id = 0;
367*4882a593Smuzhiyun 	list_for_each(l, &stats->list) {
368*4882a593Smuzhiyun 		tmp_s = container_of(l, struct dm_stat, list_entry);
369*4882a593Smuzhiyun 		if (WARN_ON(tmp_s->id < s->id)) {
370*4882a593Smuzhiyun 			r = -EINVAL;
371*4882a593Smuzhiyun 			goto out_unlock_resume;
372*4882a593Smuzhiyun 		}
373*4882a593Smuzhiyun 		if (tmp_s->id > s->id)
374*4882a593Smuzhiyun 			break;
375*4882a593Smuzhiyun 		if (unlikely(s->id == INT_MAX)) {
376*4882a593Smuzhiyun 			r = -ENFILE;
377*4882a593Smuzhiyun 			goto out_unlock_resume;
378*4882a593Smuzhiyun 		}
379*4882a593Smuzhiyun 		s->id++;
380*4882a593Smuzhiyun 	}
381*4882a593Smuzhiyun 	ret_id = s->id;
382*4882a593Smuzhiyun 	list_add_tail_rcu(&s->list_entry, l);
383*4882a593Smuzhiyun 	mutex_unlock(&stats->mutex);
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	resume_callback(md);
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	return ret_id;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun out_unlock_resume:
390*4882a593Smuzhiyun 	mutex_unlock(&stats->mutex);
391*4882a593Smuzhiyun 	resume_callback(md);
392*4882a593Smuzhiyun out:
393*4882a593Smuzhiyun 	dm_stat_free(&s->rcu_head);
394*4882a593Smuzhiyun 	return r;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun 
__dm_stats_find(struct dm_stats * stats,int id)397*4882a593Smuzhiyun static struct dm_stat *__dm_stats_find(struct dm_stats *stats, int id)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun 	struct dm_stat *s;
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	list_for_each_entry(s, &stats->list, list_entry) {
402*4882a593Smuzhiyun 		if (s->id > id)
403*4882a593Smuzhiyun 			break;
404*4882a593Smuzhiyun 		if (s->id == id)
405*4882a593Smuzhiyun 			return s;
406*4882a593Smuzhiyun 	}
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	return NULL;
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun 
dm_stats_delete(struct dm_stats * stats,int id)411*4882a593Smuzhiyun static int dm_stats_delete(struct dm_stats *stats, int id)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun 	struct dm_stat *s;
414*4882a593Smuzhiyun 	int cpu;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	mutex_lock(&stats->mutex);
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	s = __dm_stats_find(stats, id);
419*4882a593Smuzhiyun 	if (!s) {
420*4882a593Smuzhiyun 		mutex_unlock(&stats->mutex);
421*4882a593Smuzhiyun 		return -ENOENT;
422*4882a593Smuzhiyun 	}
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	list_del_rcu(&s->list_entry);
425*4882a593Smuzhiyun 	mutex_unlock(&stats->mutex);
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	/*
428*4882a593Smuzhiyun 	 * vfree can't be called from RCU callback
429*4882a593Smuzhiyun 	 */
430*4882a593Smuzhiyun 	for_each_possible_cpu(cpu)
431*4882a593Smuzhiyun 		if (is_vmalloc_addr(s->stat_percpu) ||
432*4882a593Smuzhiyun 		    is_vmalloc_addr(s->stat_percpu[cpu][0].histogram))
433*4882a593Smuzhiyun 			goto do_sync_free;
434*4882a593Smuzhiyun 	if (is_vmalloc_addr(s) ||
435*4882a593Smuzhiyun 	    is_vmalloc_addr(s->stat_shared[0].tmp.histogram)) {
436*4882a593Smuzhiyun do_sync_free:
437*4882a593Smuzhiyun 		synchronize_rcu_expedited();
438*4882a593Smuzhiyun 		dm_stat_free(&s->rcu_head);
439*4882a593Smuzhiyun 	} else {
440*4882a593Smuzhiyun 		WRITE_ONCE(dm_stat_need_rcu_barrier, 1);
441*4882a593Smuzhiyun 		call_rcu(&s->rcu_head, dm_stat_free);
442*4882a593Smuzhiyun 	}
443*4882a593Smuzhiyun 	return 0;
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun 
dm_stats_list(struct dm_stats * stats,const char * program,char * result,unsigned maxlen)446*4882a593Smuzhiyun static int dm_stats_list(struct dm_stats *stats, const char *program,
447*4882a593Smuzhiyun 			 char *result, unsigned maxlen)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun 	struct dm_stat *s;
450*4882a593Smuzhiyun 	sector_t len;
451*4882a593Smuzhiyun 	unsigned sz = 0;
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	/*
454*4882a593Smuzhiyun 	 * Output format:
455*4882a593Smuzhiyun 	 *   <region_id>: <start_sector>+<length> <step> <program_id> <aux_data>
456*4882a593Smuzhiyun 	 */
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	mutex_lock(&stats->mutex);
459*4882a593Smuzhiyun 	list_for_each_entry(s, &stats->list, list_entry) {
460*4882a593Smuzhiyun 		if (!program || !strcmp(program, s->program_id)) {
461*4882a593Smuzhiyun 			len = s->end - s->start;
462*4882a593Smuzhiyun 			DMEMIT("%d: %llu+%llu %llu %s %s", s->id,
463*4882a593Smuzhiyun 				(unsigned long long)s->start,
464*4882a593Smuzhiyun 				(unsigned long long)len,
465*4882a593Smuzhiyun 				(unsigned long long)s->step,
466*4882a593Smuzhiyun 				s->program_id,
467*4882a593Smuzhiyun 				s->aux_data);
468*4882a593Smuzhiyun 			if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
469*4882a593Smuzhiyun 				DMEMIT(" precise_timestamps");
470*4882a593Smuzhiyun 			if (s->n_histogram_entries) {
471*4882a593Smuzhiyun 				unsigned i;
472*4882a593Smuzhiyun 				DMEMIT(" histogram:");
473*4882a593Smuzhiyun 				for (i = 0; i < s->n_histogram_entries; i++) {
474*4882a593Smuzhiyun 					if (i)
475*4882a593Smuzhiyun 						DMEMIT(",");
476*4882a593Smuzhiyun 					DMEMIT("%llu", s->histogram_boundaries[i]);
477*4882a593Smuzhiyun 				}
478*4882a593Smuzhiyun 			}
479*4882a593Smuzhiyun 			DMEMIT("\n");
480*4882a593Smuzhiyun 		}
481*4882a593Smuzhiyun 		cond_resched();
482*4882a593Smuzhiyun 	}
483*4882a593Smuzhiyun 	mutex_unlock(&stats->mutex);
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	return 1;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun 
dm_stat_round(struct dm_stat * s,struct dm_stat_shared * shared,struct dm_stat_percpu * p)488*4882a593Smuzhiyun static void dm_stat_round(struct dm_stat *s, struct dm_stat_shared *shared,
489*4882a593Smuzhiyun 			  struct dm_stat_percpu *p)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun 	/*
492*4882a593Smuzhiyun 	 * This is racy, but so is part_round_stats_single.
493*4882a593Smuzhiyun 	 */
494*4882a593Smuzhiyun 	unsigned long long now, difference;
495*4882a593Smuzhiyun 	unsigned in_flight_read, in_flight_write;
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	if (likely(!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)))
498*4882a593Smuzhiyun 		now = jiffies;
499*4882a593Smuzhiyun 	else
500*4882a593Smuzhiyun 		now = ktime_to_ns(ktime_get());
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	difference = now - shared->stamp;
503*4882a593Smuzhiyun 	if (!difference)
504*4882a593Smuzhiyun 		return;
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	in_flight_read = (unsigned)atomic_read(&shared->in_flight[READ]);
507*4882a593Smuzhiyun 	in_flight_write = (unsigned)atomic_read(&shared->in_flight[WRITE]);
508*4882a593Smuzhiyun 	if (in_flight_read)
509*4882a593Smuzhiyun 		p->io_ticks[READ] += difference;
510*4882a593Smuzhiyun 	if (in_flight_write)
511*4882a593Smuzhiyun 		p->io_ticks[WRITE] += difference;
512*4882a593Smuzhiyun 	if (in_flight_read + in_flight_write) {
513*4882a593Smuzhiyun 		p->io_ticks_total += difference;
514*4882a593Smuzhiyun 		p->time_in_queue += (in_flight_read + in_flight_write) * difference;
515*4882a593Smuzhiyun 	}
516*4882a593Smuzhiyun 	shared->stamp = now;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun 
dm_stat_for_entry(struct dm_stat * s,size_t entry,int idx,sector_t len,struct dm_stats_aux * stats_aux,bool end,unsigned long duration_jiffies)519*4882a593Smuzhiyun static void dm_stat_for_entry(struct dm_stat *s, size_t entry,
520*4882a593Smuzhiyun 			      int idx, sector_t len,
521*4882a593Smuzhiyun 			      struct dm_stats_aux *stats_aux, bool end,
522*4882a593Smuzhiyun 			      unsigned long duration_jiffies)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun 	struct dm_stat_shared *shared = &s->stat_shared[entry];
525*4882a593Smuzhiyun 	struct dm_stat_percpu *p;
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	/*
528*4882a593Smuzhiyun 	 * For strict correctness we should use local_irq_save/restore
529*4882a593Smuzhiyun 	 * instead of preempt_disable/enable.
530*4882a593Smuzhiyun 	 *
531*4882a593Smuzhiyun 	 * preempt_disable/enable is racy if the driver finishes bios
532*4882a593Smuzhiyun 	 * from non-interrupt context as well as from interrupt context
533*4882a593Smuzhiyun 	 * or from more different interrupts.
534*4882a593Smuzhiyun 	 *
535*4882a593Smuzhiyun 	 * On 64-bit architectures the race only results in not counting some
536*4882a593Smuzhiyun 	 * events, so it is acceptable.  On 32-bit architectures the race could
537*4882a593Smuzhiyun 	 * cause the counter going off by 2^32, so we need to do proper locking
538*4882a593Smuzhiyun 	 * there.
539*4882a593Smuzhiyun 	 *
540*4882a593Smuzhiyun 	 * part_stat_lock()/part_stat_unlock() have this race too.
541*4882a593Smuzhiyun 	 */
542*4882a593Smuzhiyun #if BITS_PER_LONG == 32
543*4882a593Smuzhiyun 	unsigned long flags;
544*4882a593Smuzhiyun 	local_irq_save(flags);
545*4882a593Smuzhiyun #else
546*4882a593Smuzhiyun 	preempt_disable();
547*4882a593Smuzhiyun #endif
548*4882a593Smuzhiyun 	p = &s->stat_percpu[smp_processor_id()][entry];
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	if (!end) {
551*4882a593Smuzhiyun 		dm_stat_round(s, shared, p);
552*4882a593Smuzhiyun 		atomic_inc(&shared->in_flight[idx]);
553*4882a593Smuzhiyun 	} else {
554*4882a593Smuzhiyun 		unsigned long long duration;
555*4882a593Smuzhiyun 		dm_stat_round(s, shared, p);
556*4882a593Smuzhiyun 		atomic_dec(&shared->in_flight[idx]);
557*4882a593Smuzhiyun 		p->sectors[idx] += len;
558*4882a593Smuzhiyun 		p->ios[idx] += 1;
559*4882a593Smuzhiyun 		p->merges[idx] += stats_aux->merged;
560*4882a593Smuzhiyun 		if (!(s->stat_flags & STAT_PRECISE_TIMESTAMPS)) {
561*4882a593Smuzhiyun 			p->ticks[idx] += duration_jiffies;
562*4882a593Smuzhiyun 			duration = jiffies_to_msecs(duration_jiffies);
563*4882a593Smuzhiyun 		} else {
564*4882a593Smuzhiyun 			p->ticks[idx] += stats_aux->duration_ns;
565*4882a593Smuzhiyun 			duration = stats_aux->duration_ns;
566*4882a593Smuzhiyun 		}
567*4882a593Smuzhiyun 		if (s->n_histogram_entries) {
568*4882a593Smuzhiyun 			unsigned lo = 0, hi = s->n_histogram_entries + 1;
569*4882a593Smuzhiyun 			while (lo + 1 < hi) {
570*4882a593Smuzhiyun 				unsigned mid = (lo + hi) / 2;
571*4882a593Smuzhiyun 				if (s->histogram_boundaries[mid - 1] > duration) {
572*4882a593Smuzhiyun 					hi = mid;
573*4882a593Smuzhiyun 				} else {
574*4882a593Smuzhiyun 					lo = mid;
575*4882a593Smuzhiyun 				}
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 			}
578*4882a593Smuzhiyun 			p->histogram[lo]++;
579*4882a593Smuzhiyun 		}
580*4882a593Smuzhiyun 	}
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun #if BITS_PER_LONG == 32
583*4882a593Smuzhiyun 	local_irq_restore(flags);
584*4882a593Smuzhiyun #else
585*4882a593Smuzhiyun 	preempt_enable();
586*4882a593Smuzhiyun #endif
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun 
__dm_stat_bio(struct dm_stat * s,int bi_rw,sector_t bi_sector,sector_t end_sector,bool end,unsigned long duration_jiffies,struct dm_stats_aux * stats_aux)589*4882a593Smuzhiyun static void __dm_stat_bio(struct dm_stat *s, int bi_rw,
590*4882a593Smuzhiyun 			  sector_t bi_sector, sector_t end_sector,
591*4882a593Smuzhiyun 			  bool end, unsigned long duration_jiffies,
592*4882a593Smuzhiyun 			  struct dm_stats_aux *stats_aux)
593*4882a593Smuzhiyun {
594*4882a593Smuzhiyun 	sector_t rel_sector, offset, todo, fragment_len;
595*4882a593Smuzhiyun 	size_t entry;
596*4882a593Smuzhiyun 
597*4882a593Smuzhiyun 	if (end_sector <= s->start || bi_sector >= s->end)
598*4882a593Smuzhiyun 		return;
599*4882a593Smuzhiyun 	if (unlikely(bi_sector < s->start)) {
600*4882a593Smuzhiyun 		rel_sector = 0;
601*4882a593Smuzhiyun 		todo = end_sector - s->start;
602*4882a593Smuzhiyun 	} else {
603*4882a593Smuzhiyun 		rel_sector = bi_sector - s->start;
604*4882a593Smuzhiyun 		todo = end_sector - bi_sector;
605*4882a593Smuzhiyun 	}
606*4882a593Smuzhiyun 	if (unlikely(end_sector > s->end))
607*4882a593Smuzhiyun 		todo -= (end_sector - s->end);
608*4882a593Smuzhiyun 
609*4882a593Smuzhiyun 	offset = dm_sector_div64(rel_sector, s->step);
610*4882a593Smuzhiyun 	entry = rel_sector;
611*4882a593Smuzhiyun 	do {
612*4882a593Smuzhiyun 		if (WARN_ON_ONCE(entry >= s->n_entries)) {
613*4882a593Smuzhiyun 			DMCRIT("Invalid area access in region id %d", s->id);
614*4882a593Smuzhiyun 			return;
615*4882a593Smuzhiyun 		}
616*4882a593Smuzhiyun 		fragment_len = todo;
617*4882a593Smuzhiyun 		if (fragment_len > s->step - offset)
618*4882a593Smuzhiyun 			fragment_len = s->step - offset;
619*4882a593Smuzhiyun 		dm_stat_for_entry(s, entry, bi_rw, fragment_len,
620*4882a593Smuzhiyun 				  stats_aux, end, duration_jiffies);
621*4882a593Smuzhiyun 		todo -= fragment_len;
622*4882a593Smuzhiyun 		entry++;
623*4882a593Smuzhiyun 		offset = 0;
624*4882a593Smuzhiyun 	} while (unlikely(todo != 0));
625*4882a593Smuzhiyun }
626*4882a593Smuzhiyun 
dm_stats_account_io(struct dm_stats * stats,unsigned long bi_rw,sector_t bi_sector,unsigned bi_sectors,bool end,unsigned long duration_jiffies,struct dm_stats_aux * stats_aux)627*4882a593Smuzhiyun void dm_stats_account_io(struct dm_stats *stats, unsigned long bi_rw,
628*4882a593Smuzhiyun 			 sector_t bi_sector, unsigned bi_sectors, bool end,
629*4882a593Smuzhiyun 			 unsigned long duration_jiffies,
630*4882a593Smuzhiyun 			 struct dm_stats_aux *stats_aux)
631*4882a593Smuzhiyun {
632*4882a593Smuzhiyun 	struct dm_stat *s;
633*4882a593Smuzhiyun 	sector_t end_sector;
634*4882a593Smuzhiyun 	struct dm_stats_last_position *last;
635*4882a593Smuzhiyun 	bool got_precise_time;
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	if (unlikely(!bi_sectors))
638*4882a593Smuzhiyun 		return;
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	end_sector = bi_sector + bi_sectors;
641*4882a593Smuzhiyun 
642*4882a593Smuzhiyun 	if (!end) {
643*4882a593Smuzhiyun 		/*
644*4882a593Smuzhiyun 		 * A race condition can at worst result in the merged flag being
645*4882a593Smuzhiyun 		 * misrepresented, so we don't have to disable preemption here.
646*4882a593Smuzhiyun 		 */
647*4882a593Smuzhiyun 		last = raw_cpu_ptr(stats->last);
648*4882a593Smuzhiyun 		stats_aux->merged =
649*4882a593Smuzhiyun 			(bi_sector == (READ_ONCE(last->last_sector) &&
650*4882a593Smuzhiyun 				       ((bi_rw == WRITE) ==
651*4882a593Smuzhiyun 					(READ_ONCE(last->last_rw) == WRITE))
652*4882a593Smuzhiyun 				       ));
653*4882a593Smuzhiyun 		WRITE_ONCE(last->last_sector, end_sector);
654*4882a593Smuzhiyun 		WRITE_ONCE(last->last_rw, bi_rw);
655*4882a593Smuzhiyun 	}
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	rcu_read_lock();
658*4882a593Smuzhiyun 
659*4882a593Smuzhiyun 	got_precise_time = false;
660*4882a593Smuzhiyun 	list_for_each_entry_rcu(s, &stats->list, list_entry) {
661*4882a593Smuzhiyun 		if (s->stat_flags & STAT_PRECISE_TIMESTAMPS && !got_precise_time) {
662*4882a593Smuzhiyun 			if (!end)
663*4882a593Smuzhiyun 				stats_aux->duration_ns = ktime_to_ns(ktime_get());
664*4882a593Smuzhiyun 			else
665*4882a593Smuzhiyun 				stats_aux->duration_ns = ktime_to_ns(ktime_get()) - stats_aux->duration_ns;
666*4882a593Smuzhiyun 			got_precise_time = true;
667*4882a593Smuzhiyun 		}
668*4882a593Smuzhiyun 		__dm_stat_bio(s, bi_rw, bi_sector, end_sector, end, duration_jiffies, stats_aux);
669*4882a593Smuzhiyun 	}
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	rcu_read_unlock();
672*4882a593Smuzhiyun }
673*4882a593Smuzhiyun 
__dm_stat_init_temporary_percpu_totals(struct dm_stat_shared * shared,struct dm_stat * s,size_t x)674*4882a593Smuzhiyun static void __dm_stat_init_temporary_percpu_totals(struct dm_stat_shared *shared,
675*4882a593Smuzhiyun 						   struct dm_stat *s, size_t x)
676*4882a593Smuzhiyun {
677*4882a593Smuzhiyun 	int cpu;
678*4882a593Smuzhiyun 	struct dm_stat_percpu *p;
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	local_irq_disable();
681*4882a593Smuzhiyun 	p = &s->stat_percpu[smp_processor_id()][x];
682*4882a593Smuzhiyun 	dm_stat_round(s, shared, p);
683*4882a593Smuzhiyun 	local_irq_enable();
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	shared->tmp.sectors[READ] = 0;
686*4882a593Smuzhiyun 	shared->tmp.sectors[WRITE] = 0;
687*4882a593Smuzhiyun 	shared->tmp.ios[READ] = 0;
688*4882a593Smuzhiyun 	shared->tmp.ios[WRITE] = 0;
689*4882a593Smuzhiyun 	shared->tmp.merges[READ] = 0;
690*4882a593Smuzhiyun 	shared->tmp.merges[WRITE] = 0;
691*4882a593Smuzhiyun 	shared->tmp.ticks[READ] = 0;
692*4882a593Smuzhiyun 	shared->tmp.ticks[WRITE] = 0;
693*4882a593Smuzhiyun 	shared->tmp.io_ticks[READ] = 0;
694*4882a593Smuzhiyun 	shared->tmp.io_ticks[WRITE] = 0;
695*4882a593Smuzhiyun 	shared->tmp.io_ticks_total = 0;
696*4882a593Smuzhiyun 	shared->tmp.time_in_queue = 0;
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	if (s->n_histogram_entries)
699*4882a593Smuzhiyun 		memset(shared->tmp.histogram, 0, (s->n_histogram_entries + 1) * sizeof(unsigned long long));
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	for_each_possible_cpu(cpu) {
702*4882a593Smuzhiyun 		p = &s->stat_percpu[cpu][x];
703*4882a593Smuzhiyun 		shared->tmp.sectors[READ] += READ_ONCE(p->sectors[READ]);
704*4882a593Smuzhiyun 		shared->tmp.sectors[WRITE] += READ_ONCE(p->sectors[WRITE]);
705*4882a593Smuzhiyun 		shared->tmp.ios[READ] += READ_ONCE(p->ios[READ]);
706*4882a593Smuzhiyun 		shared->tmp.ios[WRITE] += READ_ONCE(p->ios[WRITE]);
707*4882a593Smuzhiyun 		shared->tmp.merges[READ] += READ_ONCE(p->merges[READ]);
708*4882a593Smuzhiyun 		shared->tmp.merges[WRITE] += READ_ONCE(p->merges[WRITE]);
709*4882a593Smuzhiyun 		shared->tmp.ticks[READ] += READ_ONCE(p->ticks[READ]);
710*4882a593Smuzhiyun 		shared->tmp.ticks[WRITE] += READ_ONCE(p->ticks[WRITE]);
711*4882a593Smuzhiyun 		shared->tmp.io_ticks[READ] += READ_ONCE(p->io_ticks[READ]);
712*4882a593Smuzhiyun 		shared->tmp.io_ticks[WRITE] += READ_ONCE(p->io_ticks[WRITE]);
713*4882a593Smuzhiyun 		shared->tmp.io_ticks_total += READ_ONCE(p->io_ticks_total);
714*4882a593Smuzhiyun 		shared->tmp.time_in_queue += READ_ONCE(p->time_in_queue);
715*4882a593Smuzhiyun 		if (s->n_histogram_entries) {
716*4882a593Smuzhiyun 			unsigned i;
717*4882a593Smuzhiyun 			for (i = 0; i < s->n_histogram_entries + 1; i++)
718*4882a593Smuzhiyun 				shared->tmp.histogram[i] += READ_ONCE(p->histogram[i]);
719*4882a593Smuzhiyun 		}
720*4882a593Smuzhiyun 	}
721*4882a593Smuzhiyun }
722*4882a593Smuzhiyun 
__dm_stat_clear(struct dm_stat * s,size_t idx_start,size_t idx_end,bool init_tmp_percpu_totals)723*4882a593Smuzhiyun static void __dm_stat_clear(struct dm_stat *s, size_t idx_start, size_t idx_end,
724*4882a593Smuzhiyun 			    bool init_tmp_percpu_totals)
725*4882a593Smuzhiyun {
726*4882a593Smuzhiyun 	size_t x;
727*4882a593Smuzhiyun 	struct dm_stat_shared *shared;
728*4882a593Smuzhiyun 	struct dm_stat_percpu *p;
729*4882a593Smuzhiyun 
730*4882a593Smuzhiyun 	for (x = idx_start; x < idx_end; x++) {
731*4882a593Smuzhiyun 		shared = &s->stat_shared[x];
732*4882a593Smuzhiyun 		if (init_tmp_percpu_totals)
733*4882a593Smuzhiyun 			__dm_stat_init_temporary_percpu_totals(shared, s, x);
734*4882a593Smuzhiyun 		local_irq_disable();
735*4882a593Smuzhiyun 		p = &s->stat_percpu[smp_processor_id()][x];
736*4882a593Smuzhiyun 		p->sectors[READ] -= shared->tmp.sectors[READ];
737*4882a593Smuzhiyun 		p->sectors[WRITE] -= shared->tmp.sectors[WRITE];
738*4882a593Smuzhiyun 		p->ios[READ] -= shared->tmp.ios[READ];
739*4882a593Smuzhiyun 		p->ios[WRITE] -= shared->tmp.ios[WRITE];
740*4882a593Smuzhiyun 		p->merges[READ] -= shared->tmp.merges[READ];
741*4882a593Smuzhiyun 		p->merges[WRITE] -= shared->tmp.merges[WRITE];
742*4882a593Smuzhiyun 		p->ticks[READ] -= shared->tmp.ticks[READ];
743*4882a593Smuzhiyun 		p->ticks[WRITE] -= shared->tmp.ticks[WRITE];
744*4882a593Smuzhiyun 		p->io_ticks[READ] -= shared->tmp.io_ticks[READ];
745*4882a593Smuzhiyun 		p->io_ticks[WRITE] -= shared->tmp.io_ticks[WRITE];
746*4882a593Smuzhiyun 		p->io_ticks_total -= shared->tmp.io_ticks_total;
747*4882a593Smuzhiyun 		p->time_in_queue -= shared->tmp.time_in_queue;
748*4882a593Smuzhiyun 		local_irq_enable();
749*4882a593Smuzhiyun 		if (s->n_histogram_entries) {
750*4882a593Smuzhiyun 			unsigned i;
751*4882a593Smuzhiyun 			for (i = 0; i < s->n_histogram_entries + 1; i++) {
752*4882a593Smuzhiyun 				local_irq_disable();
753*4882a593Smuzhiyun 				p = &s->stat_percpu[smp_processor_id()][x];
754*4882a593Smuzhiyun 				p->histogram[i] -= shared->tmp.histogram[i];
755*4882a593Smuzhiyun 				local_irq_enable();
756*4882a593Smuzhiyun 			}
757*4882a593Smuzhiyun 		}
758*4882a593Smuzhiyun 		cond_resched();
759*4882a593Smuzhiyun 	}
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun 
dm_stats_clear(struct dm_stats * stats,int id)762*4882a593Smuzhiyun static int dm_stats_clear(struct dm_stats *stats, int id)
763*4882a593Smuzhiyun {
764*4882a593Smuzhiyun 	struct dm_stat *s;
765*4882a593Smuzhiyun 
766*4882a593Smuzhiyun 	mutex_lock(&stats->mutex);
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	s = __dm_stats_find(stats, id);
769*4882a593Smuzhiyun 	if (!s) {
770*4882a593Smuzhiyun 		mutex_unlock(&stats->mutex);
771*4882a593Smuzhiyun 		return -ENOENT;
772*4882a593Smuzhiyun 	}
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 	__dm_stat_clear(s, 0, s->n_entries, true);
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	mutex_unlock(&stats->mutex);
777*4882a593Smuzhiyun 
778*4882a593Smuzhiyun 	return 1;
779*4882a593Smuzhiyun }
780*4882a593Smuzhiyun 
781*4882a593Smuzhiyun /*
782*4882a593Smuzhiyun  * This is like jiffies_to_msec, but works for 64-bit values.
783*4882a593Smuzhiyun  */
dm_jiffies_to_msec64(struct dm_stat * s,unsigned long long j)784*4882a593Smuzhiyun static unsigned long long dm_jiffies_to_msec64(struct dm_stat *s, unsigned long long j)
785*4882a593Smuzhiyun {
786*4882a593Smuzhiyun 	unsigned long long result;
787*4882a593Smuzhiyun 	unsigned mult;
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	if (s->stat_flags & STAT_PRECISE_TIMESTAMPS)
790*4882a593Smuzhiyun 		return j;
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun 	result = 0;
793*4882a593Smuzhiyun 	if (j)
794*4882a593Smuzhiyun 		result = jiffies_to_msecs(j & 0x3fffff);
795*4882a593Smuzhiyun 	if (j >= 1 << 22) {
796*4882a593Smuzhiyun 		mult = jiffies_to_msecs(1 << 22);
797*4882a593Smuzhiyun 		result += (unsigned long long)mult * (unsigned long long)jiffies_to_msecs((j >> 22) & 0x3fffff);
798*4882a593Smuzhiyun 	}
799*4882a593Smuzhiyun 	if (j >= 1ULL << 44)
800*4882a593Smuzhiyun 		result += (unsigned long long)mult * (unsigned long long)mult * (unsigned long long)jiffies_to_msecs(j >> 44);
801*4882a593Smuzhiyun 
802*4882a593Smuzhiyun 	return result;
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun 
dm_stats_print(struct dm_stats * stats,int id,size_t idx_start,size_t idx_len,bool clear,char * result,unsigned maxlen)805*4882a593Smuzhiyun static int dm_stats_print(struct dm_stats *stats, int id,
806*4882a593Smuzhiyun 			  size_t idx_start, size_t idx_len,
807*4882a593Smuzhiyun 			  bool clear, char *result, unsigned maxlen)
808*4882a593Smuzhiyun {
809*4882a593Smuzhiyun 	unsigned sz = 0;
810*4882a593Smuzhiyun 	struct dm_stat *s;
811*4882a593Smuzhiyun 	size_t x;
812*4882a593Smuzhiyun 	sector_t start, end, step;
813*4882a593Smuzhiyun 	size_t idx_end;
814*4882a593Smuzhiyun 	struct dm_stat_shared *shared;
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	/*
817*4882a593Smuzhiyun 	 * Output format:
818*4882a593Smuzhiyun 	 *   <start_sector>+<length> counters
819*4882a593Smuzhiyun 	 */
820*4882a593Smuzhiyun 
821*4882a593Smuzhiyun 	mutex_lock(&stats->mutex);
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 	s = __dm_stats_find(stats, id);
824*4882a593Smuzhiyun 	if (!s) {
825*4882a593Smuzhiyun 		mutex_unlock(&stats->mutex);
826*4882a593Smuzhiyun 		return -ENOENT;
827*4882a593Smuzhiyun 	}
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	idx_end = idx_start + idx_len;
830*4882a593Smuzhiyun 	if (idx_end < idx_start ||
831*4882a593Smuzhiyun 	    idx_end > s->n_entries)
832*4882a593Smuzhiyun 		idx_end = s->n_entries;
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 	if (idx_start > idx_end)
835*4882a593Smuzhiyun 		idx_start = idx_end;
836*4882a593Smuzhiyun 
837*4882a593Smuzhiyun 	step = s->step;
838*4882a593Smuzhiyun 	start = s->start + (step * idx_start);
839*4882a593Smuzhiyun 
840*4882a593Smuzhiyun 	for (x = idx_start; x < idx_end; x++, start = end) {
841*4882a593Smuzhiyun 		shared = &s->stat_shared[x];
842*4882a593Smuzhiyun 		end = start + step;
843*4882a593Smuzhiyun 		if (unlikely(end > s->end))
844*4882a593Smuzhiyun 			end = s->end;
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 		__dm_stat_init_temporary_percpu_totals(shared, s, x);
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 		DMEMIT("%llu+%llu %llu %llu %llu %llu %llu %llu %llu %llu %d %llu %llu %llu %llu",
849*4882a593Smuzhiyun 		       (unsigned long long)start,
850*4882a593Smuzhiyun 		       (unsigned long long)step,
851*4882a593Smuzhiyun 		       shared->tmp.ios[READ],
852*4882a593Smuzhiyun 		       shared->tmp.merges[READ],
853*4882a593Smuzhiyun 		       shared->tmp.sectors[READ],
854*4882a593Smuzhiyun 		       dm_jiffies_to_msec64(s, shared->tmp.ticks[READ]),
855*4882a593Smuzhiyun 		       shared->tmp.ios[WRITE],
856*4882a593Smuzhiyun 		       shared->tmp.merges[WRITE],
857*4882a593Smuzhiyun 		       shared->tmp.sectors[WRITE],
858*4882a593Smuzhiyun 		       dm_jiffies_to_msec64(s, shared->tmp.ticks[WRITE]),
859*4882a593Smuzhiyun 		       dm_stat_in_flight(shared),
860*4882a593Smuzhiyun 		       dm_jiffies_to_msec64(s, shared->tmp.io_ticks_total),
861*4882a593Smuzhiyun 		       dm_jiffies_to_msec64(s, shared->tmp.time_in_queue),
862*4882a593Smuzhiyun 		       dm_jiffies_to_msec64(s, shared->tmp.io_ticks[READ]),
863*4882a593Smuzhiyun 		       dm_jiffies_to_msec64(s, shared->tmp.io_ticks[WRITE]));
864*4882a593Smuzhiyun 		if (s->n_histogram_entries) {
865*4882a593Smuzhiyun 			unsigned i;
866*4882a593Smuzhiyun 			for (i = 0; i < s->n_histogram_entries + 1; i++) {
867*4882a593Smuzhiyun 				DMEMIT("%s%llu", !i ? " " : ":", shared->tmp.histogram[i]);
868*4882a593Smuzhiyun 			}
869*4882a593Smuzhiyun 		}
870*4882a593Smuzhiyun 		DMEMIT("\n");
871*4882a593Smuzhiyun 
872*4882a593Smuzhiyun 		if (unlikely(sz + 1 >= maxlen))
873*4882a593Smuzhiyun 			goto buffer_overflow;
874*4882a593Smuzhiyun 
875*4882a593Smuzhiyun 		cond_resched();
876*4882a593Smuzhiyun 	}
877*4882a593Smuzhiyun 
878*4882a593Smuzhiyun 	if (clear)
879*4882a593Smuzhiyun 		__dm_stat_clear(s, idx_start, idx_end, false);
880*4882a593Smuzhiyun 
881*4882a593Smuzhiyun buffer_overflow:
882*4882a593Smuzhiyun 	mutex_unlock(&stats->mutex);
883*4882a593Smuzhiyun 
884*4882a593Smuzhiyun 	return 1;
885*4882a593Smuzhiyun }
886*4882a593Smuzhiyun 
dm_stats_set_aux(struct dm_stats * stats,int id,const char * aux_data)887*4882a593Smuzhiyun static int dm_stats_set_aux(struct dm_stats *stats, int id, const char *aux_data)
888*4882a593Smuzhiyun {
889*4882a593Smuzhiyun 	struct dm_stat *s;
890*4882a593Smuzhiyun 	const char *new_aux_data;
891*4882a593Smuzhiyun 
892*4882a593Smuzhiyun 	mutex_lock(&stats->mutex);
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 	s = __dm_stats_find(stats, id);
895*4882a593Smuzhiyun 	if (!s) {
896*4882a593Smuzhiyun 		mutex_unlock(&stats->mutex);
897*4882a593Smuzhiyun 		return -ENOENT;
898*4882a593Smuzhiyun 	}
899*4882a593Smuzhiyun 
900*4882a593Smuzhiyun 	new_aux_data = kstrdup(aux_data, GFP_KERNEL);
901*4882a593Smuzhiyun 	if (!new_aux_data) {
902*4882a593Smuzhiyun 		mutex_unlock(&stats->mutex);
903*4882a593Smuzhiyun 		return -ENOMEM;
904*4882a593Smuzhiyun 	}
905*4882a593Smuzhiyun 
906*4882a593Smuzhiyun 	kfree(s->aux_data);
907*4882a593Smuzhiyun 	s->aux_data = new_aux_data;
908*4882a593Smuzhiyun 
909*4882a593Smuzhiyun 	mutex_unlock(&stats->mutex);
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 	return 0;
912*4882a593Smuzhiyun }
913*4882a593Smuzhiyun 
parse_histogram(const char * h,unsigned * n_histogram_entries,unsigned long long ** histogram_boundaries)914*4882a593Smuzhiyun static int parse_histogram(const char *h, unsigned *n_histogram_entries,
915*4882a593Smuzhiyun 			   unsigned long long **histogram_boundaries)
916*4882a593Smuzhiyun {
917*4882a593Smuzhiyun 	const char *q;
918*4882a593Smuzhiyun 	unsigned n;
919*4882a593Smuzhiyun 	unsigned long long last;
920*4882a593Smuzhiyun 
921*4882a593Smuzhiyun 	*n_histogram_entries = 1;
922*4882a593Smuzhiyun 	for (q = h; *q; q++)
923*4882a593Smuzhiyun 		if (*q == ',')
924*4882a593Smuzhiyun 			(*n_histogram_entries)++;
925*4882a593Smuzhiyun 
926*4882a593Smuzhiyun 	*histogram_boundaries = kmalloc_array(*n_histogram_entries,
927*4882a593Smuzhiyun 					      sizeof(unsigned long long),
928*4882a593Smuzhiyun 					      GFP_KERNEL);
929*4882a593Smuzhiyun 	if (!*histogram_boundaries)
930*4882a593Smuzhiyun 		return -ENOMEM;
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	n = 0;
933*4882a593Smuzhiyun 	last = 0;
934*4882a593Smuzhiyun 	while (1) {
935*4882a593Smuzhiyun 		unsigned long long hi;
936*4882a593Smuzhiyun 		int s;
937*4882a593Smuzhiyun 		char ch;
938*4882a593Smuzhiyun 		s = sscanf(h, "%llu%c", &hi, &ch);
939*4882a593Smuzhiyun 		if (!s || (s == 2 && ch != ','))
940*4882a593Smuzhiyun 			return -EINVAL;
941*4882a593Smuzhiyun 		if (hi <= last)
942*4882a593Smuzhiyun 			return -EINVAL;
943*4882a593Smuzhiyun 		last = hi;
944*4882a593Smuzhiyun 		(*histogram_boundaries)[n] = hi;
945*4882a593Smuzhiyun 		if (s == 1)
946*4882a593Smuzhiyun 			return 0;
947*4882a593Smuzhiyun 		h = strchr(h, ',') + 1;
948*4882a593Smuzhiyun 		n++;
949*4882a593Smuzhiyun 	}
950*4882a593Smuzhiyun }
951*4882a593Smuzhiyun 
message_stats_create(struct mapped_device * md,unsigned argc,char ** argv,char * result,unsigned maxlen)952*4882a593Smuzhiyun static int message_stats_create(struct mapped_device *md,
953*4882a593Smuzhiyun 				unsigned argc, char **argv,
954*4882a593Smuzhiyun 				char *result, unsigned maxlen)
955*4882a593Smuzhiyun {
956*4882a593Smuzhiyun 	int r;
957*4882a593Smuzhiyun 	int id;
958*4882a593Smuzhiyun 	char dummy;
959*4882a593Smuzhiyun 	unsigned long long start, end, len, step;
960*4882a593Smuzhiyun 	unsigned divisor;
961*4882a593Smuzhiyun 	const char *program_id, *aux_data;
962*4882a593Smuzhiyun 	unsigned stat_flags = 0;
963*4882a593Smuzhiyun 
964*4882a593Smuzhiyun 	unsigned n_histogram_entries = 0;
965*4882a593Smuzhiyun 	unsigned long long *histogram_boundaries = NULL;
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	struct dm_arg_set as, as_backup;
968*4882a593Smuzhiyun 	const char *a;
969*4882a593Smuzhiyun 	unsigned feature_args;
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 	/*
972*4882a593Smuzhiyun 	 * Input format:
973*4882a593Smuzhiyun 	 *   <range> <step> [<extra_parameters> <parameters>] [<program_id> [<aux_data>]]
974*4882a593Smuzhiyun 	 */
975*4882a593Smuzhiyun 
976*4882a593Smuzhiyun 	if (argc < 3)
977*4882a593Smuzhiyun 		goto ret_einval;
978*4882a593Smuzhiyun 
979*4882a593Smuzhiyun 	as.argc = argc;
980*4882a593Smuzhiyun 	as.argv = argv;
981*4882a593Smuzhiyun 	dm_consume_args(&as, 1);
982*4882a593Smuzhiyun 
983*4882a593Smuzhiyun 	a = dm_shift_arg(&as);
984*4882a593Smuzhiyun 	if (!strcmp(a, "-")) {
985*4882a593Smuzhiyun 		start = 0;
986*4882a593Smuzhiyun 		len = dm_get_size(md);
987*4882a593Smuzhiyun 		if (!len)
988*4882a593Smuzhiyun 			len = 1;
989*4882a593Smuzhiyun 	} else if (sscanf(a, "%llu+%llu%c", &start, &len, &dummy) != 2 ||
990*4882a593Smuzhiyun 		   start != (sector_t)start || len != (sector_t)len)
991*4882a593Smuzhiyun 		goto ret_einval;
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun 	end = start + len;
994*4882a593Smuzhiyun 	if (start >= end)
995*4882a593Smuzhiyun 		goto ret_einval;
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun 	a = dm_shift_arg(&as);
998*4882a593Smuzhiyun 	if (sscanf(a, "/%u%c", &divisor, &dummy) == 1) {
999*4882a593Smuzhiyun 		if (!divisor)
1000*4882a593Smuzhiyun 			return -EINVAL;
1001*4882a593Smuzhiyun 		step = end - start;
1002*4882a593Smuzhiyun 		if (do_div(step, divisor))
1003*4882a593Smuzhiyun 			step++;
1004*4882a593Smuzhiyun 		if (!step)
1005*4882a593Smuzhiyun 			step = 1;
1006*4882a593Smuzhiyun 	} else if (sscanf(a, "%llu%c", &step, &dummy) != 1 ||
1007*4882a593Smuzhiyun 		   step != (sector_t)step || !step)
1008*4882a593Smuzhiyun 		goto ret_einval;
1009*4882a593Smuzhiyun 
1010*4882a593Smuzhiyun 	as_backup = as;
1011*4882a593Smuzhiyun 	a = dm_shift_arg(&as);
1012*4882a593Smuzhiyun 	if (a && sscanf(a, "%u%c", &feature_args, &dummy) == 1) {
1013*4882a593Smuzhiyun 		while (feature_args--) {
1014*4882a593Smuzhiyun 			a = dm_shift_arg(&as);
1015*4882a593Smuzhiyun 			if (!a)
1016*4882a593Smuzhiyun 				goto ret_einval;
1017*4882a593Smuzhiyun 			if (!strcasecmp(a, "precise_timestamps"))
1018*4882a593Smuzhiyun 				stat_flags |= STAT_PRECISE_TIMESTAMPS;
1019*4882a593Smuzhiyun 			else if (!strncasecmp(a, "histogram:", 10)) {
1020*4882a593Smuzhiyun 				if (n_histogram_entries)
1021*4882a593Smuzhiyun 					goto ret_einval;
1022*4882a593Smuzhiyun 				if ((r = parse_histogram(a + 10, &n_histogram_entries, &histogram_boundaries)))
1023*4882a593Smuzhiyun 					goto ret;
1024*4882a593Smuzhiyun 			} else
1025*4882a593Smuzhiyun 				goto ret_einval;
1026*4882a593Smuzhiyun 		}
1027*4882a593Smuzhiyun 	} else {
1028*4882a593Smuzhiyun 		as = as_backup;
1029*4882a593Smuzhiyun 	}
1030*4882a593Smuzhiyun 
1031*4882a593Smuzhiyun 	program_id = "-";
1032*4882a593Smuzhiyun 	aux_data = "-";
1033*4882a593Smuzhiyun 
1034*4882a593Smuzhiyun 	a = dm_shift_arg(&as);
1035*4882a593Smuzhiyun 	if (a)
1036*4882a593Smuzhiyun 		program_id = a;
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun 	a = dm_shift_arg(&as);
1039*4882a593Smuzhiyun 	if (a)
1040*4882a593Smuzhiyun 		aux_data = a;
1041*4882a593Smuzhiyun 
1042*4882a593Smuzhiyun 	if (as.argc)
1043*4882a593Smuzhiyun 		goto ret_einval;
1044*4882a593Smuzhiyun 
1045*4882a593Smuzhiyun 	/*
1046*4882a593Smuzhiyun 	 * If a buffer overflow happens after we created the region,
1047*4882a593Smuzhiyun 	 * it's too late (the userspace would retry with a larger
1048*4882a593Smuzhiyun 	 * buffer, but the region id that caused the overflow is already
1049*4882a593Smuzhiyun 	 * leaked).  So we must detect buffer overflow in advance.
1050*4882a593Smuzhiyun 	 */
1051*4882a593Smuzhiyun 	snprintf(result, maxlen, "%d", INT_MAX);
1052*4882a593Smuzhiyun 	if (dm_message_test_buffer_overflow(result, maxlen)) {
1053*4882a593Smuzhiyun 		r = 1;
1054*4882a593Smuzhiyun 		goto ret;
1055*4882a593Smuzhiyun 	}
1056*4882a593Smuzhiyun 
1057*4882a593Smuzhiyun 	id = dm_stats_create(dm_get_stats(md), start, end, step, stat_flags,
1058*4882a593Smuzhiyun 			     n_histogram_entries, histogram_boundaries, program_id, aux_data,
1059*4882a593Smuzhiyun 			     dm_internal_suspend_fast, dm_internal_resume_fast, md);
1060*4882a593Smuzhiyun 	if (id < 0) {
1061*4882a593Smuzhiyun 		r = id;
1062*4882a593Smuzhiyun 		goto ret;
1063*4882a593Smuzhiyun 	}
1064*4882a593Smuzhiyun 
1065*4882a593Smuzhiyun 	snprintf(result, maxlen, "%d", id);
1066*4882a593Smuzhiyun 
1067*4882a593Smuzhiyun 	r = 1;
1068*4882a593Smuzhiyun 	goto ret;
1069*4882a593Smuzhiyun 
1070*4882a593Smuzhiyun ret_einval:
1071*4882a593Smuzhiyun 	r = -EINVAL;
1072*4882a593Smuzhiyun ret:
1073*4882a593Smuzhiyun 	kfree(histogram_boundaries);
1074*4882a593Smuzhiyun 	return r;
1075*4882a593Smuzhiyun }
1076*4882a593Smuzhiyun 
message_stats_delete(struct mapped_device * md,unsigned argc,char ** argv)1077*4882a593Smuzhiyun static int message_stats_delete(struct mapped_device *md,
1078*4882a593Smuzhiyun 				unsigned argc, char **argv)
1079*4882a593Smuzhiyun {
1080*4882a593Smuzhiyun 	int id;
1081*4882a593Smuzhiyun 	char dummy;
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun 	if (argc != 2)
1084*4882a593Smuzhiyun 		return -EINVAL;
1085*4882a593Smuzhiyun 
1086*4882a593Smuzhiyun 	if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1087*4882a593Smuzhiyun 		return -EINVAL;
1088*4882a593Smuzhiyun 
1089*4882a593Smuzhiyun 	return dm_stats_delete(dm_get_stats(md), id);
1090*4882a593Smuzhiyun }
1091*4882a593Smuzhiyun 
message_stats_clear(struct mapped_device * md,unsigned argc,char ** argv)1092*4882a593Smuzhiyun static int message_stats_clear(struct mapped_device *md,
1093*4882a593Smuzhiyun 			       unsigned argc, char **argv)
1094*4882a593Smuzhiyun {
1095*4882a593Smuzhiyun 	int id;
1096*4882a593Smuzhiyun 	char dummy;
1097*4882a593Smuzhiyun 
1098*4882a593Smuzhiyun 	if (argc != 2)
1099*4882a593Smuzhiyun 		return -EINVAL;
1100*4882a593Smuzhiyun 
1101*4882a593Smuzhiyun 	if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1102*4882a593Smuzhiyun 		return -EINVAL;
1103*4882a593Smuzhiyun 
1104*4882a593Smuzhiyun 	return dm_stats_clear(dm_get_stats(md), id);
1105*4882a593Smuzhiyun }
1106*4882a593Smuzhiyun 
message_stats_list(struct mapped_device * md,unsigned argc,char ** argv,char * result,unsigned maxlen)1107*4882a593Smuzhiyun static int message_stats_list(struct mapped_device *md,
1108*4882a593Smuzhiyun 			      unsigned argc, char **argv,
1109*4882a593Smuzhiyun 			      char *result, unsigned maxlen)
1110*4882a593Smuzhiyun {
1111*4882a593Smuzhiyun 	int r;
1112*4882a593Smuzhiyun 	const char *program = NULL;
1113*4882a593Smuzhiyun 
1114*4882a593Smuzhiyun 	if (argc < 1 || argc > 2)
1115*4882a593Smuzhiyun 		return -EINVAL;
1116*4882a593Smuzhiyun 
1117*4882a593Smuzhiyun 	if (argc > 1) {
1118*4882a593Smuzhiyun 		program = kstrdup(argv[1], GFP_KERNEL);
1119*4882a593Smuzhiyun 		if (!program)
1120*4882a593Smuzhiyun 			return -ENOMEM;
1121*4882a593Smuzhiyun 	}
1122*4882a593Smuzhiyun 
1123*4882a593Smuzhiyun 	r = dm_stats_list(dm_get_stats(md), program, result, maxlen);
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun 	kfree(program);
1126*4882a593Smuzhiyun 
1127*4882a593Smuzhiyun 	return r;
1128*4882a593Smuzhiyun }
1129*4882a593Smuzhiyun 
message_stats_print(struct mapped_device * md,unsigned argc,char ** argv,bool clear,char * result,unsigned maxlen)1130*4882a593Smuzhiyun static int message_stats_print(struct mapped_device *md,
1131*4882a593Smuzhiyun 			       unsigned argc, char **argv, bool clear,
1132*4882a593Smuzhiyun 			       char *result, unsigned maxlen)
1133*4882a593Smuzhiyun {
1134*4882a593Smuzhiyun 	int id;
1135*4882a593Smuzhiyun 	char dummy;
1136*4882a593Smuzhiyun 	unsigned long idx_start = 0, idx_len = ULONG_MAX;
1137*4882a593Smuzhiyun 
1138*4882a593Smuzhiyun 	if (argc != 2 && argc != 4)
1139*4882a593Smuzhiyun 		return -EINVAL;
1140*4882a593Smuzhiyun 
1141*4882a593Smuzhiyun 	if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1142*4882a593Smuzhiyun 		return -EINVAL;
1143*4882a593Smuzhiyun 
1144*4882a593Smuzhiyun 	if (argc > 3) {
1145*4882a593Smuzhiyun 		if (strcmp(argv[2], "-") &&
1146*4882a593Smuzhiyun 		    sscanf(argv[2], "%lu%c", &idx_start, &dummy) != 1)
1147*4882a593Smuzhiyun 			return -EINVAL;
1148*4882a593Smuzhiyun 		if (strcmp(argv[3], "-") &&
1149*4882a593Smuzhiyun 		    sscanf(argv[3], "%lu%c", &idx_len, &dummy) != 1)
1150*4882a593Smuzhiyun 			return -EINVAL;
1151*4882a593Smuzhiyun 	}
1152*4882a593Smuzhiyun 
1153*4882a593Smuzhiyun 	return dm_stats_print(dm_get_stats(md), id, idx_start, idx_len, clear,
1154*4882a593Smuzhiyun 			      result, maxlen);
1155*4882a593Smuzhiyun }
1156*4882a593Smuzhiyun 
message_stats_set_aux(struct mapped_device * md,unsigned argc,char ** argv)1157*4882a593Smuzhiyun static int message_stats_set_aux(struct mapped_device *md,
1158*4882a593Smuzhiyun 				 unsigned argc, char **argv)
1159*4882a593Smuzhiyun {
1160*4882a593Smuzhiyun 	int id;
1161*4882a593Smuzhiyun 	char dummy;
1162*4882a593Smuzhiyun 
1163*4882a593Smuzhiyun 	if (argc != 3)
1164*4882a593Smuzhiyun 		return -EINVAL;
1165*4882a593Smuzhiyun 
1166*4882a593Smuzhiyun 	if (sscanf(argv[1], "%d%c", &id, &dummy) != 1 || id < 0)
1167*4882a593Smuzhiyun 		return -EINVAL;
1168*4882a593Smuzhiyun 
1169*4882a593Smuzhiyun 	return dm_stats_set_aux(dm_get_stats(md), id, argv[2]);
1170*4882a593Smuzhiyun }
1171*4882a593Smuzhiyun 
dm_stats_message(struct mapped_device * md,unsigned argc,char ** argv,char * result,unsigned maxlen)1172*4882a593Smuzhiyun int dm_stats_message(struct mapped_device *md, unsigned argc, char **argv,
1173*4882a593Smuzhiyun 		     char *result, unsigned maxlen)
1174*4882a593Smuzhiyun {
1175*4882a593Smuzhiyun 	int r;
1176*4882a593Smuzhiyun 
1177*4882a593Smuzhiyun 	/* All messages here must start with '@' */
1178*4882a593Smuzhiyun 	if (!strcasecmp(argv[0], "@stats_create"))
1179*4882a593Smuzhiyun 		r = message_stats_create(md, argc, argv, result, maxlen);
1180*4882a593Smuzhiyun 	else if (!strcasecmp(argv[0], "@stats_delete"))
1181*4882a593Smuzhiyun 		r = message_stats_delete(md, argc, argv);
1182*4882a593Smuzhiyun 	else if (!strcasecmp(argv[0], "@stats_clear"))
1183*4882a593Smuzhiyun 		r = message_stats_clear(md, argc, argv);
1184*4882a593Smuzhiyun 	else if (!strcasecmp(argv[0], "@stats_list"))
1185*4882a593Smuzhiyun 		r = message_stats_list(md, argc, argv, result, maxlen);
1186*4882a593Smuzhiyun 	else if (!strcasecmp(argv[0], "@stats_print"))
1187*4882a593Smuzhiyun 		r = message_stats_print(md, argc, argv, false, result, maxlen);
1188*4882a593Smuzhiyun 	else if (!strcasecmp(argv[0], "@stats_print_clear"))
1189*4882a593Smuzhiyun 		r = message_stats_print(md, argc, argv, true, result, maxlen);
1190*4882a593Smuzhiyun 	else if (!strcasecmp(argv[0], "@stats_set_aux"))
1191*4882a593Smuzhiyun 		r = message_stats_set_aux(md, argc, argv);
1192*4882a593Smuzhiyun 	else
1193*4882a593Smuzhiyun 		return 2; /* this wasn't a stats message */
1194*4882a593Smuzhiyun 
1195*4882a593Smuzhiyun 	if (r == -EINVAL)
1196*4882a593Smuzhiyun 		DMWARN("Invalid parameters for message %s", argv[0]);
1197*4882a593Smuzhiyun 
1198*4882a593Smuzhiyun 	return r;
1199*4882a593Smuzhiyun }
1200*4882a593Smuzhiyun 
dm_statistics_init(void)1201*4882a593Smuzhiyun int __init dm_statistics_init(void)
1202*4882a593Smuzhiyun {
1203*4882a593Smuzhiyun 	shared_memory_amount = 0;
1204*4882a593Smuzhiyun 	dm_stat_need_rcu_barrier = 0;
1205*4882a593Smuzhiyun 	return 0;
1206*4882a593Smuzhiyun }
1207*4882a593Smuzhiyun 
dm_statistics_exit(void)1208*4882a593Smuzhiyun void dm_statistics_exit(void)
1209*4882a593Smuzhiyun {
1210*4882a593Smuzhiyun 	if (dm_stat_need_rcu_barrier)
1211*4882a593Smuzhiyun 		rcu_barrier();
1212*4882a593Smuzhiyun 	if (WARN_ON(shared_memory_amount))
1213*4882a593Smuzhiyun 		DMCRIT("shared_memory_amount leaked: %lu", shared_memory_amount);
1214*4882a593Smuzhiyun }
1215*4882a593Smuzhiyun 
1216*4882a593Smuzhiyun module_param_named(stats_current_allocated_bytes, shared_memory_amount, ulong, S_IRUGO);
1217*4882a593Smuzhiyun MODULE_PARM_DESC(stats_current_allocated_bytes, "Memory currently used by statistics");
1218