1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * net/core/gen_stats.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Authors: Thomas Graf <tgraf@suug.ch>
6*4882a593Smuzhiyun * Jamal Hadi Salim
7*4882a593Smuzhiyun * Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * See Documentation/networking/gen_stats.rst
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/types.h>
13*4882a593Smuzhiyun #include <linux/kernel.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/interrupt.h>
16*4882a593Smuzhiyun #include <linux/socket.h>
17*4882a593Smuzhiyun #include <linux/rtnetlink.h>
18*4882a593Smuzhiyun #include <linux/gen_stats.h>
19*4882a593Smuzhiyun #include <net/netlink.h>
20*4882a593Smuzhiyun #include <net/gen_stats.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun static inline int
gnet_stats_copy(struct gnet_dump * d,int type,void * buf,int size,int padattr)24*4882a593Smuzhiyun gnet_stats_copy(struct gnet_dump *d, int type, void *buf, int size, int padattr)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun if (nla_put_64bit(d->skb, type, size, buf, padattr))
27*4882a593Smuzhiyun goto nla_put_failure;
28*4882a593Smuzhiyun return 0;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun nla_put_failure:
31*4882a593Smuzhiyun if (d->lock)
32*4882a593Smuzhiyun spin_unlock_bh(d->lock);
33*4882a593Smuzhiyun kfree(d->xstats);
34*4882a593Smuzhiyun d->xstats = NULL;
35*4882a593Smuzhiyun d->xstats_len = 0;
36*4882a593Smuzhiyun return -1;
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun /**
40*4882a593Smuzhiyun * gnet_stats_start_copy_compat - start dumping procedure in compatibility mode
41*4882a593Smuzhiyun * @skb: socket buffer to put statistics TLVs into
42*4882a593Smuzhiyun * @type: TLV type for top level statistic TLV
43*4882a593Smuzhiyun * @tc_stats_type: TLV type for backward compatibility struct tc_stats TLV
44*4882a593Smuzhiyun * @xstats_type: TLV type for backward compatibility xstats TLV
45*4882a593Smuzhiyun * @lock: statistics lock
46*4882a593Smuzhiyun * @d: dumping handle
47*4882a593Smuzhiyun * @padattr: padding attribute
48*4882a593Smuzhiyun *
49*4882a593Smuzhiyun * Initializes the dumping handle, grabs the statistic lock and appends
50*4882a593Smuzhiyun * an empty TLV header to the socket buffer for use a container for all
51*4882a593Smuzhiyun * other statistic TLVS.
52*4882a593Smuzhiyun *
53*4882a593Smuzhiyun * The dumping handle is marked to be in backward compatibility mode telling
54*4882a593Smuzhiyun * all gnet_stats_copy_XXX() functions to fill a local copy of struct tc_stats.
55*4882a593Smuzhiyun *
56*4882a593Smuzhiyun * Returns 0 on success or -1 if the room in the socket buffer was not sufficient.
57*4882a593Smuzhiyun */
58*4882a593Smuzhiyun int
gnet_stats_start_copy_compat(struct sk_buff * skb,int type,int tc_stats_type,int xstats_type,spinlock_t * lock,struct gnet_dump * d,int padattr)59*4882a593Smuzhiyun gnet_stats_start_copy_compat(struct sk_buff *skb, int type, int tc_stats_type,
60*4882a593Smuzhiyun int xstats_type, spinlock_t *lock,
61*4882a593Smuzhiyun struct gnet_dump *d, int padattr)
62*4882a593Smuzhiyun __acquires(lock)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun memset(d, 0, sizeof(*d));
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun if (type)
67*4882a593Smuzhiyun d->tail = (struct nlattr *)skb_tail_pointer(skb);
68*4882a593Smuzhiyun d->skb = skb;
69*4882a593Smuzhiyun d->compat_tc_stats = tc_stats_type;
70*4882a593Smuzhiyun d->compat_xstats = xstats_type;
71*4882a593Smuzhiyun d->padattr = padattr;
72*4882a593Smuzhiyun if (lock) {
73*4882a593Smuzhiyun d->lock = lock;
74*4882a593Smuzhiyun spin_lock_bh(lock);
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun if (d->tail) {
77*4882a593Smuzhiyun int ret = gnet_stats_copy(d, type, NULL, 0, padattr);
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /* The initial attribute added in gnet_stats_copy() may be
80*4882a593Smuzhiyun * preceded by a padding attribute, in which case d->tail will
81*4882a593Smuzhiyun * end up pointing at the padding instead of the real attribute.
82*4882a593Smuzhiyun * Fix this so gnet_stats_finish_copy() adjusts the length of
83*4882a593Smuzhiyun * the right attribute.
84*4882a593Smuzhiyun */
85*4882a593Smuzhiyun if (ret == 0 && d->tail->nla_type == padattr)
86*4882a593Smuzhiyun d->tail = (struct nlattr *)((char *)d->tail +
87*4882a593Smuzhiyun NLA_ALIGN(d->tail->nla_len));
88*4882a593Smuzhiyun return ret;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun return 0;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun EXPORT_SYMBOL(gnet_stats_start_copy_compat);
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /**
96*4882a593Smuzhiyun * gnet_stats_start_copy - start dumping procedure in compatibility mode
97*4882a593Smuzhiyun * @skb: socket buffer to put statistics TLVs into
98*4882a593Smuzhiyun * @type: TLV type for top level statistic TLV
99*4882a593Smuzhiyun * @lock: statistics lock
100*4882a593Smuzhiyun * @d: dumping handle
101*4882a593Smuzhiyun * @padattr: padding attribute
102*4882a593Smuzhiyun *
103*4882a593Smuzhiyun * Initializes the dumping handle, grabs the statistic lock and appends
104*4882a593Smuzhiyun * an empty TLV header to the socket buffer for use a container for all
105*4882a593Smuzhiyun * other statistic TLVS.
106*4882a593Smuzhiyun *
107*4882a593Smuzhiyun * Returns 0 on success or -1 if the room in the socket buffer was not sufficient.
108*4882a593Smuzhiyun */
109*4882a593Smuzhiyun int
gnet_stats_start_copy(struct sk_buff * skb,int type,spinlock_t * lock,struct gnet_dump * d,int padattr)110*4882a593Smuzhiyun gnet_stats_start_copy(struct sk_buff *skb, int type, spinlock_t *lock,
111*4882a593Smuzhiyun struct gnet_dump *d, int padattr)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun return gnet_stats_start_copy_compat(skb, type, 0, 0, lock, d, padattr);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun EXPORT_SYMBOL(gnet_stats_start_copy);
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun static void
__gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed * bstats,struct gnet_stats_basic_cpu __percpu * cpu)118*4882a593Smuzhiyun __gnet_stats_copy_basic_cpu(struct gnet_stats_basic_packed *bstats,
119*4882a593Smuzhiyun struct gnet_stats_basic_cpu __percpu *cpu)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun int i;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun for_each_possible_cpu(i) {
124*4882a593Smuzhiyun struct gnet_stats_basic_cpu *bcpu = per_cpu_ptr(cpu, i);
125*4882a593Smuzhiyun unsigned int start;
126*4882a593Smuzhiyun u64 bytes, packets;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun do {
129*4882a593Smuzhiyun start = u64_stats_fetch_begin_irq(&bcpu->syncp);
130*4882a593Smuzhiyun bytes = bcpu->bstats.bytes;
131*4882a593Smuzhiyun packets = bcpu->bstats.packets;
132*4882a593Smuzhiyun } while (u64_stats_fetch_retry_irq(&bcpu->syncp, start));
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun bstats->bytes += bytes;
135*4882a593Smuzhiyun bstats->packets += packets;
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun void
__gnet_stats_copy_basic(const seqcount_t * running,struct gnet_stats_basic_packed * bstats,struct gnet_stats_basic_cpu __percpu * cpu,struct gnet_stats_basic_packed * b)140*4882a593Smuzhiyun __gnet_stats_copy_basic(const seqcount_t *running,
141*4882a593Smuzhiyun struct gnet_stats_basic_packed *bstats,
142*4882a593Smuzhiyun struct gnet_stats_basic_cpu __percpu *cpu,
143*4882a593Smuzhiyun struct gnet_stats_basic_packed *b)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun unsigned int seq;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun if (cpu) {
148*4882a593Smuzhiyun __gnet_stats_copy_basic_cpu(bstats, cpu);
149*4882a593Smuzhiyun return;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun do {
152*4882a593Smuzhiyun if (running)
153*4882a593Smuzhiyun seq = read_seqcount_begin(running);
154*4882a593Smuzhiyun bstats->bytes = b->bytes;
155*4882a593Smuzhiyun bstats->packets = b->packets;
156*4882a593Smuzhiyun } while (running && read_seqcount_retry(running, seq));
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun EXPORT_SYMBOL(__gnet_stats_copy_basic);
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun static int
___gnet_stats_copy_basic(const seqcount_t * running,struct gnet_dump * d,struct gnet_stats_basic_cpu __percpu * cpu,struct gnet_stats_basic_packed * b,int type)161*4882a593Smuzhiyun ___gnet_stats_copy_basic(const seqcount_t *running,
162*4882a593Smuzhiyun struct gnet_dump *d,
163*4882a593Smuzhiyun struct gnet_stats_basic_cpu __percpu *cpu,
164*4882a593Smuzhiyun struct gnet_stats_basic_packed *b,
165*4882a593Smuzhiyun int type)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun struct gnet_stats_basic_packed bstats = {0};
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun __gnet_stats_copy_basic(running, &bstats, cpu, b);
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun if (d->compat_tc_stats && type == TCA_STATS_BASIC) {
172*4882a593Smuzhiyun d->tc_stats.bytes = bstats.bytes;
173*4882a593Smuzhiyun d->tc_stats.packets = bstats.packets;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun if (d->tail) {
177*4882a593Smuzhiyun struct gnet_stats_basic sb;
178*4882a593Smuzhiyun int res;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun memset(&sb, 0, sizeof(sb));
181*4882a593Smuzhiyun sb.bytes = bstats.bytes;
182*4882a593Smuzhiyun sb.packets = bstats.packets;
183*4882a593Smuzhiyun res = gnet_stats_copy(d, type, &sb, sizeof(sb), TCA_STATS_PAD);
184*4882a593Smuzhiyun if (res < 0 || sb.packets == bstats.packets)
185*4882a593Smuzhiyun return res;
186*4882a593Smuzhiyun /* emit 64bit stats only if needed */
187*4882a593Smuzhiyun return gnet_stats_copy(d, TCA_STATS_PKT64, &bstats.packets,
188*4882a593Smuzhiyun sizeof(bstats.packets), TCA_STATS_PAD);
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun return 0;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun /**
194*4882a593Smuzhiyun * gnet_stats_copy_basic - copy basic statistics into statistic TLV
195*4882a593Smuzhiyun * @running: seqcount_t pointer
196*4882a593Smuzhiyun * @d: dumping handle
197*4882a593Smuzhiyun * @cpu: copy statistic per cpu
198*4882a593Smuzhiyun * @b: basic statistics
199*4882a593Smuzhiyun *
200*4882a593Smuzhiyun * Appends the basic statistics to the top level TLV created by
201*4882a593Smuzhiyun * gnet_stats_start_copy().
202*4882a593Smuzhiyun *
203*4882a593Smuzhiyun * Returns 0 on success or -1 with the statistic lock released
204*4882a593Smuzhiyun * if the room in the socket buffer was not sufficient.
205*4882a593Smuzhiyun */
206*4882a593Smuzhiyun int
gnet_stats_copy_basic(const seqcount_t * running,struct gnet_dump * d,struct gnet_stats_basic_cpu __percpu * cpu,struct gnet_stats_basic_packed * b)207*4882a593Smuzhiyun gnet_stats_copy_basic(const seqcount_t *running,
208*4882a593Smuzhiyun struct gnet_dump *d,
209*4882a593Smuzhiyun struct gnet_stats_basic_cpu __percpu *cpu,
210*4882a593Smuzhiyun struct gnet_stats_basic_packed *b)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun return ___gnet_stats_copy_basic(running, d, cpu, b,
213*4882a593Smuzhiyun TCA_STATS_BASIC);
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun EXPORT_SYMBOL(gnet_stats_copy_basic);
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun /**
218*4882a593Smuzhiyun * gnet_stats_copy_basic_hw - copy basic hw statistics into statistic TLV
219*4882a593Smuzhiyun * @running: seqcount_t pointer
220*4882a593Smuzhiyun * @d: dumping handle
221*4882a593Smuzhiyun * @cpu: copy statistic per cpu
222*4882a593Smuzhiyun * @b: basic statistics
223*4882a593Smuzhiyun *
224*4882a593Smuzhiyun * Appends the basic statistics to the top level TLV created by
225*4882a593Smuzhiyun * gnet_stats_start_copy().
226*4882a593Smuzhiyun *
227*4882a593Smuzhiyun * Returns 0 on success or -1 with the statistic lock released
228*4882a593Smuzhiyun * if the room in the socket buffer was not sufficient.
229*4882a593Smuzhiyun */
230*4882a593Smuzhiyun int
gnet_stats_copy_basic_hw(const seqcount_t * running,struct gnet_dump * d,struct gnet_stats_basic_cpu __percpu * cpu,struct gnet_stats_basic_packed * b)231*4882a593Smuzhiyun gnet_stats_copy_basic_hw(const seqcount_t *running,
232*4882a593Smuzhiyun struct gnet_dump *d,
233*4882a593Smuzhiyun struct gnet_stats_basic_cpu __percpu *cpu,
234*4882a593Smuzhiyun struct gnet_stats_basic_packed *b)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun return ___gnet_stats_copy_basic(running, d, cpu, b,
237*4882a593Smuzhiyun TCA_STATS_BASIC_HW);
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun EXPORT_SYMBOL(gnet_stats_copy_basic_hw);
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun /**
242*4882a593Smuzhiyun * gnet_stats_copy_rate_est - copy rate estimator statistics into statistics TLV
243*4882a593Smuzhiyun * @d: dumping handle
244*4882a593Smuzhiyun * @rate_est: rate estimator
245*4882a593Smuzhiyun *
246*4882a593Smuzhiyun * Appends the rate estimator statistics to the top level TLV created by
247*4882a593Smuzhiyun * gnet_stats_start_copy().
248*4882a593Smuzhiyun *
249*4882a593Smuzhiyun * Returns 0 on success or -1 with the statistic lock released
250*4882a593Smuzhiyun * if the room in the socket buffer was not sufficient.
251*4882a593Smuzhiyun */
252*4882a593Smuzhiyun int
gnet_stats_copy_rate_est(struct gnet_dump * d,struct net_rate_estimator __rcu ** rate_est)253*4882a593Smuzhiyun gnet_stats_copy_rate_est(struct gnet_dump *d,
254*4882a593Smuzhiyun struct net_rate_estimator __rcu **rate_est)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun struct gnet_stats_rate_est64 sample;
257*4882a593Smuzhiyun struct gnet_stats_rate_est est;
258*4882a593Smuzhiyun int res;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun if (!gen_estimator_read(rate_est, &sample))
261*4882a593Smuzhiyun return 0;
262*4882a593Smuzhiyun est.bps = min_t(u64, UINT_MAX, sample.bps);
263*4882a593Smuzhiyun /* we have some time before reaching 2^32 packets per second */
264*4882a593Smuzhiyun est.pps = sample.pps;
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun if (d->compat_tc_stats) {
267*4882a593Smuzhiyun d->tc_stats.bps = est.bps;
268*4882a593Smuzhiyun d->tc_stats.pps = est.pps;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun if (d->tail) {
272*4882a593Smuzhiyun res = gnet_stats_copy(d, TCA_STATS_RATE_EST, &est, sizeof(est),
273*4882a593Smuzhiyun TCA_STATS_PAD);
274*4882a593Smuzhiyun if (res < 0 || est.bps == sample.bps)
275*4882a593Smuzhiyun return res;
276*4882a593Smuzhiyun /* emit 64bit stats only if needed */
277*4882a593Smuzhiyun return gnet_stats_copy(d, TCA_STATS_RATE_EST64, &sample,
278*4882a593Smuzhiyun sizeof(sample), TCA_STATS_PAD);
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun return 0;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun EXPORT_SYMBOL(gnet_stats_copy_rate_est);
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun static void
__gnet_stats_copy_queue_cpu(struct gnet_stats_queue * qstats,const struct gnet_stats_queue __percpu * q)286*4882a593Smuzhiyun __gnet_stats_copy_queue_cpu(struct gnet_stats_queue *qstats,
287*4882a593Smuzhiyun const struct gnet_stats_queue __percpu *q)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun int i;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun for_each_possible_cpu(i) {
292*4882a593Smuzhiyun const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun qstats->qlen = 0;
295*4882a593Smuzhiyun qstats->backlog += qcpu->backlog;
296*4882a593Smuzhiyun qstats->drops += qcpu->drops;
297*4882a593Smuzhiyun qstats->requeues += qcpu->requeues;
298*4882a593Smuzhiyun qstats->overlimits += qcpu->overlimits;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun
__gnet_stats_copy_queue(struct gnet_stats_queue * qstats,const struct gnet_stats_queue __percpu * cpu,const struct gnet_stats_queue * q,__u32 qlen)302*4882a593Smuzhiyun void __gnet_stats_copy_queue(struct gnet_stats_queue *qstats,
303*4882a593Smuzhiyun const struct gnet_stats_queue __percpu *cpu,
304*4882a593Smuzhiyun const struct gnet_stats_queue *q,
305*4882a593Smuzhiyun __u32 qlen)
306*4882a593Smuzhiyun {
307*4882a593Smuzhiyun if (cpu) {
308*4882a593Smuzhiyun __gnet_stats_copy_queue_cpu(qstats, cpu);
309*4882a593Smuzhiyun } else {
310*4882a593Smuzhiyun qstats->qlen = q->qlen;
311*4882a593Smuzhiyun qstats->backlog = q->backlog;
312*4882a593Smuzhiyun qstats->drops = q->drops;
313*4882a593Smuzhiyun qstats->requeues = q->requeues;
314*4882a593Smuzhiyun qstats->overlimits = q->overlimits;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun qstats->qlen = qlen;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun EXPORT_SYMBOL(__gnet_stats_copy_queue);
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun /**
322*4882a593Smuzhiyun * gnet_stats_copy_queue - copy queue statistics into statistics TLV
323*4882a593Smuzhiyun * @d: dumping handle
324*4882a593Smuzhiyun * @cpu_q: per cpu queue statistics
325*4882a593Smuzhiyun * @q: queue statistics
326*4882a593Smuzhiyun * @qlen: queue length statistics
327*4882a593Smuzhiyun *
328*4882a593Smuzhiyun * Appends the queue statistics to the top level TLV created by
329*4882a593Smuzhiyun * gnet_stats_start_copy(). Using per cpu queue statistics if
330*4882a593Smuzhiyun * they are available.
331*4882a593Smuzhiyun *
332*4882a593Smuzhiyun * Returns 0 on success or -1 with the statistic lock released
333*4882a593Smuzhiyun * if the room in the socket buffer was not sufficient.
334*4882a593Smuzhiyun */
335*4882a593Smuzhiyun int
gnet_stats_copy_queue(struct gnet_dump * d,struct gnet_stats_queue __percpu * cpu_q,struct gnet_stats_queue * q,__u32 qlen)336*4882a593Smuzhiyun gnet_stats_copy_queue(struct gnet_dump *d,
337*4882a593Smuzhiyun struct gnet_stats_queue __percpu *cpu_q,
338*4882a593Smuzhiyun struct gnet_stats_queue *q, __u32 qlen)
339*4882a593Smuzhiyun {
340*4882a593Smuzhiyun struct gnet_stats_queue qstats = {0};
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun __gnet_stats_copy_queue(&qstats, cpu_q, q, qlen);
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun if (d->compat_tc_stats) {
345*4882a593Smuzhiyun d->tc_stats.drops = qstats.drops;
346*4882a593Smuzhiyun d->tc_stats.qlen = qstats.qlen;
347*4882a593Smuzhiyun d->tc_stats.backlog = qstats.backlog;
348*4882a593Smuzhiyun d->tc_stats.overlimits = qstats.overlimits;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun if (d->tail)
352*4882a593Smuzhiyun return gnet_stats_copy(d, TCA_STATS_QUEUE,
353*4882a593Smuzhiyun &qstats, sizeof(qstats),
354*4882a593Smuzhiyun TCA_STATS_PAD);
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun return 0;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun EXPORT_SYMBOL(gnet_stats_copy_queue);
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun /**
361*4882a593Smuzhiyun * gnet_stats_copy_app - copy application specific statistics into statistics TLV
362*4882a593Smuzhiyun * @d: dumping handle
363*4882a593Smuzhiyun * @st: application specific statistics data
364*4882a593Smuzhiyun * @len: length of data
365*4882a593Smuzhiyun *
366*4882a593Smuzhiyun * Appends the application specific statistics to the top level TLV created by
367*4882a593Smuzhiyun * gnet_stats_start_copy() and remembers the data for XSTATS if the dumping
368*4882a593Smuzhiyun * handle is in backward compatibility mode.
369*4882a593Smuzhiyun *
370*4882a593Smuzhiyun * Returns 0 on success or -1 with the statistic lock released
371*4882a593Smuzhiyun * if the room in the socket buffer was not sufficient.
372*4882a593Smuzhiyun */
373*4882a593Smuzhiyun int
gnet_stats_copy_app(struct gnet_dump * d,void * st,int len)374*4882a593Smuzhiyun gnet_stats_copy_app(struct gnet_dump *d, void *st, int len)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun if (d->compat_xstats) {
377*4882a593Smuzhiyun d->xstats = kmemdup(st, len, GFP_ATOMIC);
378*4882a593Smuzhiyun if (!d->xstats)
379*4882a593Smuzhiyun goto err_out;
380*4882a593Smuzhiyun d->xstats_len = len;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun if (d->tail)
384*4882a593Smuzhiyun return gnet_stats_copy(d, TCA_STATS_APP, st, len,
385*4882a593Smuzhiyun TCA_STATS_PAD);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun return 0;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun err_out:
390*4882a593Smuzhiyun if (d->lock)
391*4882a593Smuzhiyun spin_unlock_bh(d->lock);
392*4882a593Smuzhiyun d->xstats_len = 0;
393*4882a593Smuzhiyun return -1;
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun EXPORT_SYMBOL(gnet_stats_copy_app);
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun /**
398*4882a593Smuzhiyun * gnet_stats_finish_copy - finish dumping procedure
399*4882a593Smuzhiyun * @d: dumping handle
400*4882a593Smuzhiyun *
401*4882a593Smuzhiyun * Corrects the length of the top level TLV to include all TLVs added
402*4882a593Smuzhiyun * by gnet_stats_copy_XXX() calls. Adds the backward compatibility TLVs
403*4882a593Smuzhiyun * if gnet_stats_start_copy_compat() was used and releases the statistics
404*4882a593Smuzhiyun * lock.
405*4882a593Smuzhiyun *
406*4882a593Smuzhiyun * Returns 0 on success or -1 with the statistic lock released
407*4882a593Smuzhiyun * if the room in the socket buffer was not sufficient.
408*4882a593Smuzhiyun */
409*4882a593Smuzhiyun int
gnet_stats_finish_copy(struct gnet_dump * d)410*4882a593Smuzhiyun gnet_stats_finish_copy(struct gnet_dump *d)
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun if (d->tail)
413*4882a593Smuzhiyun d->tail->nla_len = skb_tail_pointer(d->skb) - (u8 *)d->tail;
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun if (d->compat_tc_stats)
416*4882a593Smuzhiyun if (gnet_stats_copy(d, d->compat_tc_stats, &d->tc_stats,
417*4882a593Smuzhiyun sizeof(d->tc_stats), d->padattr) < 0)
418*4882a593Smuzhiyun return -1;
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun if (d->compat_xstats && d->xstats) {
421*4882a593Smuzhiyun if (gnet_stats_copy(d, d->compat_xstats, d->xstats,
422*4882a593Smuzhiyun d->xstats_len, d->padattr) < 0)
423*4882a593Smuzhiyun return -1;
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun if (d->lock)
427*4882a593Smuzhiyun spin_unlock_bh(d->lock);
428*4882a593Smuzhiyun kfree(d->xstats);
429*4882a593Smuzhiyun d->xstats = NULL;
430*4882a593Smuzhiyun d->xstats_len = 0;
431*4882a593Smuzhiyun return 0;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun EXPORT_SYMBOL(gnet_stats_finish_copy);
434