1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * RDMA Transport Layer
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
6*4882a593Smuzhiyun * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
7*4882a593Smuzhiyun * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun #undef pr_fmt
10*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME " L" __stringify(__LINE__) ": " fmt
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include "rtrs-clt.h"
13*4882a593Smuzhiyun
rtrs_clt_update_wc_stats(struct rtrs_clt_con * con)14*4882a593Smuzhiyun void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con)
15*4882a593Smuzhiyun {
16*4882a593Smuzhiyun struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
17*4882a593Smuzhiyun struct rtrs_clt_stats *stats = sess->stats;
18*4882a593Smuzhiyun struct rtrs_clt_stats_pcpu *s;
19*4882a593Smuzhiyun int cpu;
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun cpu = raw_smp_processor_id();
22*4882a593Smuzhiyun s = this_cpu_ptr(stats->pcpu_stats);
23*4882a593Smuzhiyun if (unlikely(con->cpu != cpu)) {
24*4882a593Smuzhiyun s->cpu_migr.to++;
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /* Careful here, override s pointer */
27*4882a593Smuzhiyun s = per_cpu_ptr(stats->pcpu_stats, con->cpu);
28*4882a593Smuzhiyun atomic_inc(&s->cpu_migr.from);
29*4882a593Smuzhiyun }
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun
rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats * stats)32*4882a593Smuzhiyun void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *stats)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun struct rtrs_clt_stats_pcpu *s;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun s = this_cpu_ptr(stats->pcpu_stats);
37*4882a593Smuzhiyun s->rdma.failover_cnt++;
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun
rtrs_clt_stats_migration_cnt_to_str(struct rtrs_clt_stats * stats,char * buf,size_t len)40*4882a593Smuzhiyun int rtrs_clt_stats_migration_cnt_to_str(struct rtrs_clt_stats *stats,
41*4882a593Smuzhiyun char *buf, size_t len)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun struct rtrs_clt_stats_pcpu *s;
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun size_t used;
46*4882a593Smuzhiyun int cpu;
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun used = scnprintf(buf, len, " ");
49*4882a593Smuzhiyun for_each_possible_cpu(cpu)
50*4882a593Smuzhiyun used += scnprintf(buf + used, len - used, " CPU%u", cpu);
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun used += scnprintf(buf + used, len - used, "\nfrom:");
53*4882a593Smuzhiyun for_each_possible_cpu(cpu) {
54*4882a593Smuzhiyun s = per_cpu_ptr(stats->pcpu_stats, cpu);
55*4882a593Smuzhiyun used += scnprintf(buf + used, len - used, " %d",
56*4882a593Smuzhiyun atomic_read(&s->cpu_migr.from));
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun used += scnprintf(buf + used, len - used, "\nto :");
60*4882a593Smuzhiyun for_each_possible_cpu(cpu) {
61*4882a593Smuzhiyun s = per_cpu_ptr(stats->pcpu_stats, cpu);
62*4882a593Smuzhiyun used += scnprintf(buf + used, len - used, " %d",
63*4882a593Smuzhiyun s->cpu_migr.to);
64*4882a593Smuzhiyun }
65*4882a593Smuzhiyun used += scnprintf(buf + used, len - used, "\n");
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun return used;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats * stats,char * buf,size_t len)70*4882a593Smuzhiyun int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf,
71*4882a593Smuzhiyun size_t len)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun return scnprintf(buf, len, "%d %d\n",
74*4882a593Smuzhiyun stats->reconnects.successful_cnt,
75*4882a593Smuzhiyun stats->reconnects.fail_cnt);
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats * stats,char * page,size_t len)78*4882a593Smuzhiyun ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats,
79*4882a593Smuzhiyun char *page, size_t len)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun struct rtrs_clt_stats_rdma sum;
82*4882a593Smuzhiyun struct rtrs_clt_stats_rdma *r;
83*4882a593Smuzhiyun int cpu;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun memset(&sum, 0, sizeof(sum));
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun for_each_possible_cpu(cpu) {
88*4882a593Smuzhiyun r = &per_cpu_ptr(stats->pcpu_stats, cpu)->rdma;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun sum.dir[READ].cnt += r->dir[READ].cnt;
91*4882a593Smuzhiyun sum.dir[READ].size_total += r->dir[READ].size_total;
92*4882a593Smuzhiyun sum.dir[WRITE].cnt += r->dir[WRITE].cnt;
93*4882a593Smuzhiyun sum.dir[WRITE].size_total += r->dir[WRITE].size_total;
94*4882a593Smuzhiyun sum.failover_cnt += r->failover_cnt;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun return scnprintf(page, len, "%llu %llu %llu %llu %u %llu\n",
98*4882a593Smuzhiyun sum.dir[READ].cnt, sum.dir[READ].size_total,
99*4882a593Smuzhiyun sum.dir[WRITE].cnt, sum.dir[WRITE].size_total,
100*4882a593Smuzhiyun atomic_read(&stats->inflight), sum.failover_cnt);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
rtrs_clt_reset_all_help(struct rtrs_clt_stats * s,char * page,size_t len)103*4882a593Smuzhiyun ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *s,
104*4882a593Smuzhiyun char *page, size_t len)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun return scnprintf(page, len, "echo 1 to reset all statistics\n");
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats * stats,bool enable)109*4882a593Smuzhiyun int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun struct rtrs_clt_stats_pcpu *s;
112*4882a593Smuzhiyun int cpu;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun if (!enable)
115*4882a593Smuzhiyun return -EINVAL;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun for_each_possible_cpu(cpu) {
118*4882a593Smuzhiyun s = per_cpu_ptr(stats->pcpu_stats, cpu);
119*4882a593Smuzhiyun memset(&s->rdma, 0, sizeof(s->rdma));
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun return 0;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
rtrs_clt_reset_cpu_migr_stats(struct rtrs_clt_stats * stats,bool enable)125*4882a593Smuzhiyun int rtrs_clt_reset_cpu_migr_stats(struct rtrs_clt_stats *stats, bool enable)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun struct rtrs_clt_stats_pcpu *s;
128*4882a593Smuzhiyun int cpu;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun if (!enable)
131*4882a593Smuzhiyun return -EINVAL;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun for_each_possible_cpu(cpu) {
134*4882a593Smuzhiyun s = per_cpu_ptr(stats->pcpu_stats, cpu);
135*4882a593Smuzhiyun memset(&s->cpu_migr, 0, sizeof(s->cpu_migr));
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun return 0;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
rtrs_clt_reset_reconnects_stat(struct rtrs_clt_stats * stats,bool enable)141*4882a593Smuzhiyun int rtrs_clt_reset_reconnects_stat(struct rtrs_clt_stats *stats, bool enable)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun if (!enable)
144*4882a593Smuzhiyun return -EINVAL;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun memset(&stats->reconnects, 0, sizeof(stats->reconnects));
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun return 0;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun
rtrs_clt_reset_all_stats(struct rtrs_clt_stats * s,bool enable)151*4882a593Smuzhiyun int rtrs_clt_reset_all_stats(struct rtrs_clt_stats *s, bool enable)
152*4882a593Smuzhiyun {
153*4882a593Smuzhiyun if (enable) {
154*4882a593Smuzhiyun rtrs_clt_reset_rdma_stats(s, enable);
155*4882a593Smuzhiyun rtrs_clt_reset_cpu_migr_stats(s, enable);
156*4882a593Smuzhiyun rtrs_clt_reset_reconnects_stat(s, enable);
157*4882a593Smuzhiyun atomic_set(&s->inflight, 0);
158*4882a593Smuzhiyun return 0;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun return -EINVAL;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
rtrs_clt_update_rdma_stats(struct rtrs_clt_stats * stats,size_t size,int d)164*4882a593Smuzhiyun static inline void rtrs_clt_update_rdma_stats(struct rtrs_clt_stats *stats,
165*4882a593Smuzhiyun size_t size, int d)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun struct rtrs_clt_stats_pcpu *s;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun s = this_cpu_ptr(stats->pcpu_stats);
170*4882a593Smuzhiyun s->rdma.dir[d].cnt++;
171*4882a593Smuzhiyun s->rdma.dir[d].size_total += size;
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
rtrs_clt_update_all_stats(struct rtrs_clt_io_req * req,int dir)174*4882a593Smuzhiyun void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun struct rtrs_clt_con *con = req->con;
177*4882a593Smuzhiyun struct rtrs_clt_sess *sess = to_clt_sess(con->c.sess);
178*4882a593Smuzhiyun struct rtrs_clt_stats *stats = sess->stats;
179*4882a593Smuzhiyun unsigned int len;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun len = req->usr_len + req->data_len;
182*4882a593Smuzhiyun rtrs_clt_update_rdma_stats(stats, len, dir);
183*4882a593Smuzhiyun if (sess->clt->mp_policy == MP_POLICY_MIN_INFLIGHT)
184*4882a593Smuzhiyun atomic_inc(&stats->inflight);
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
rtrs_clt_init_stats(struct rtrs_clt_stats * stats)187*4882a593Smuzhiyun int rtrs_clt_init_stats(struct rtrs_clt_stats *stats)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun stats->pcpu_stats = alloc_percpu(typeof(*stats->pcpu_stats));
190*4882a593Smuzhiyun if (!stats->pcpu_stats)
191*4882a593Smuzhiyun return -ENOMEM;
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun /*
194*4882a593Smuzhiyun * successful_cnt will be set to 0 after session
195*4882a593Smuzhiyun * is established for the first time
196*4882a593Smuzhiyun */
197*4882a593Smuzhiyun stats->reconnects.successful_cnt = -1;
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun return 0;
200*4882a593Smuzhiyun }
201