1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * RDMA Transport Layer
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
6*4882a593Smuzhiyun * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
7*4882a593Smuzhiyun * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #ifndef RTRS_CLT_H
11*4882a593Smuzhiyun #define RTRS_CLT_H
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include <linux/device.h>
14*4882a593Smuzhiyun #include "rtrs-pri.h"
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun /**
17*4882a593Smuzhiyun * enum rtrs_clt_state - Client states.
18*4882a593Smuzhiyun */
19*4882a593Smuzhiyun enum rtrs_clt_state {
20*4882a593Smuzhiyun RTRS_CLT_CONNECTING,
21*4882a593Smuzhiyun RTRS_CLT_CONNECTING_ERR,
22*4882a593Smuzhiyun RTRS_CLT_RECONNECTING,
23*4882a593Smuzhiyun RTRS_CLT_CONNECTED,
24*4882a593Smuzhiyun RTRS_CLT_CLOSING,
25*4882a593Smuzhiyun RTRS_CLT_CLOSED,
26*4882a593Smuzhiyun RTRS_CLT_DEAD,
27*4882a593Smuzhiyun };
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun enum rtrs_mp_policy {
30*4882a593Smuzhiyun MP_POLICY_RR,
31*4882a593Smuzhiyun MP_POLICY_MIN_INFLIGHT,
32*4882a593Smuzhiyun };
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /* see Documentation/ABI/testing/sysfs-class-rtrs-client for details */
35*4882a593Smuzhiyun struct rtrs_clt_stats_reconnects {
36*4882a593Smuzhiyun int successful_cnt;
37*4882a593Smuzhiyun int fail_cnt;
38*4882a593Smuzhiyun };
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /* see Documentation/ABI/testing/sysfs-class-rtrs-client for details */
41*4882a593Smuzhiyun struct rtrs_clt_stats_cpu_migr {
42*4882a593Smuzhiyun atomic_t from;
43*4882a593Smuzhiyun int to;
44*4882a593Smuzhiyun };
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /* stats for Read and write operation.
47*4882a593Smuzhiyun * see Documentation/ABI/testing/sysfs-class-rtrs-client for details
48*4882a593Smuzhiyun */
49*4882a593Smuzhiyun struct rtrs_clt_stats_rdma {
50*4882a593Smuzhiyun struct {
51*4882a593Smuzhiyun u64 cnt;
52*4882a593Smuzhiyun u64 size_total;
53*4882a593Smuzhiyun } dir[2];
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun u64 failover_cnt;
56*4882a593Smuzhiyun };
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun struct rtrs_clt_stats_pcpu {
59*4882a593Smuzhiyun struct rtrs_clt_stats_cpu_migr cpu_migr;
60*4882a593Smuzhiyun struct rtrs_clt_stats_rdma rdma;
61*4882a593Smuzhiyun };
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun struct rtrs_clt_stats {
64*4882a593Smuzhiyun struct kobject kobj_stats;
65*4882a593Smuzhiyun struct rtrs_clt_stats_pcpu __percpu *pcpu_stats;
66*4882a593Smuzhiyun struct rtrs_clt_stats_reconnects reconnects;
67*4882a593Smuzhiyun atomic_t inflight;
68*4882a593Smuzhiyun };
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun struct rtrs_clt_con {
71*4882a593Smuzhiyun struct rtrs_con c;
72*4882a593Smuzhiyun struct rtrs_iu *rsp_ius;
73*4882a593Smuzhiyun u32 queue_size;
74*4882a593Smuzhiyun unsigned int cpu;
75*4882a593Smuzhiyun atomic_t io_cnt;
76*4882a593Smuzhiyun int cm_err;
77*4882a593Smuzhiyun };
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun /**
80*4882a593Smuzhiyun * rtrs_permit - permits the memory allocation for future RDMA operation.
81*4882a593Smuzhiyun * Combine with irq pinning to keep IO on same CPU.
82*4882a593Smuzhiyun */
83*4882a593Smuzhiyun struct rtrs_permit {
84*4882a593Smuzhiyun enum rtrs_clt_con_type con_type;
85*4882a593Smuzhiyun unsigned int cpu_id;
86*4882a593Smuzhiyun unsigned int mem_id;
87*4882a593Smuzhiyun unsigned int mem_off;
88*4882a593Smuzhiyun };
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun /**
91*4882a593Smuzhiyun * rtrs_clt_io_req - describes one inflight IO request
92*4882a593Smuzhiyun */
93*4882a593Smuzhiyun struct rtrs_clt_io_req {
94*4882a593Smuzhiyun struct list_head list;
95*4882a593Smuzhiyun struct rtrs_iu *iu;
96*4882a593Smuzhiyun struct scatterlist *sglist; /* list holding user data */
97*4882a593Smuzhiyun unsigned int sg_cnt;
98*4882a593Smuzhiyun unsigned int sg_size;
99*4882a593Smuzhiyun unsigned int data_len;
100*4882a593Smuzhiyun unsigned int usr_len;
101*4882a593Smuzhiyun void *priv;
102*4882a593Smuzhiyun bool in_use;
103*4882a593Smuzhiyun struct rtrs_clt_con *con;
104*4882a593Smuzhiyun struct rtrs_sg_desc *desc;
105*4882a593Smuzhiyun struct ib_sge *sge;
106*4882a593Smuzhiyun struct rtrs_permit *permit;
107*4882a593Smuzhiyun enum dma_data_direction dir;
108*4882a593Smuzhiyun void (*conf)(void *priv, int errno);
109*4882a593Smuzhiyun unsigned long start_jiffies;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun struct ib_mr *mr;
112*4882a593Smuzhiyun struct ib_cqe inv_cqe;
113*4882a593Smuzhiyun struct completion inv_comp;
114*4882a593Smuzhiyun int inv_errno;
115*4882a593Smuzhiyun bool need_inv_comp;
116*4882a593Smuzhiyun bool need_inv;
117*4882a593Smuzhiyun };
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun struct rtrs_rbuf {
120*4882a593Smuzhiyun u64 addr;
121*4882a593Smuzhiyun u32 rkey;
122*4882a593Smuzhiyun };
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun struct rtrs_clt_sess {
125*4882a593Smuzhiyun struct rtrs_sess s;
126*4882a593Smuzhiyun struct rtrs_clt *clt;
127*4882a593Smuzhiyun wait_queue_head_t state_wq;
128*4882a593Smuzhiyun enum rtrs_clt_state state;
129*4882a593Smuzhiyun atomic_t connected_cnt;
130*4882a593Smuzhiyun struct mutex init_mutex;
131*4882a593Smuzhiyun struct rtrs_clt_io_req *reqs;
132*4882a593Smuzhiyun struct delayed_work reconnect_dwork;
133*4882a593Smuzhiyun struct work_struct close_work;
134*4882a593Smuzhiyun unsigned int reconnect_attempts;
135*4882a593Smuzhiyun bool established;
136*4882a593Smuzhiyun struct rtrs_rbuf *rbufs;
137*4882a593Smuzhiyun size_t max_io_size;
138*4882a593Smuzhiyun u32 max_hdr_size;
139*4882a593Smuzhiyun u32 chunk_size;
140*4882a593Smuzhiyun size_t queue_depth;
141*4882a593Smuzhiyun u32 max_pages_per_mr;
142*4882a593Smuzhiyun int max_send_sge;
143*4882a593Smuzhiyun u32 flags;
144*4882a593Smuzhiyun struct kobject kobj;
145*4882a593Smuzhiyun u8 for_new_clt;
146*4882a593Smuzhiyun struct rtrs_clt_stats *stats;
147*4882a593Smuzhiyun /* cache hca_port and hca_name to display in sysfs */
148*4882a593Smuzhiyun u8 hca_port;
149*4882a593Smuzhiyun char hca_name[IB_DEVICE_NAME_MAX];
150*4882a593Smuzhiyun struct list_head __percpu
151*4882a593Smuzhiyun *mp_skip_entry;
152*4882a593Smuzhiyun };
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun struct rtrs_clt {
155*4882a593Smuzhiyun struct list_head paths_list; /* rcu protected list */
156*4882a593Smuzhiyun size_t paths_num;
157*4882a593Smuzhiyun struct rtrs_clt_sess
158*4882a593Smuzhiyun __rcu * __percpu *pcpu_path;
159*4882a593Smuzhiyun uuid_t paths_uuid;
160*4882a593Smuzhiyun int paths_up;
161*4882a593Smuzhiyun struct mutex paths_mutex;
162*4882a593Smuzhiyun struct mutex paths_ev_mutex;
163*4882a593Smuzhiyun char sessname[NAME_MAX];
164*4882a593Smuzhiyun u16 port;
165*4882a593Smuzhiyun unsigned int max_reconnect_attempts;
166*4882a593Smuzhiyun unsigned int reconnect_delay_sec;
167*4882a593Smuzhiyun unsigned int max_segments;
168*4882a593Smuzhiyun size_t max_segment_size;
169*4882a593Smuzhiyun void *permits;
170*4882a593Smuzhiyun unsigned long *permits_map;
171*4882a593Smuzhiyun size_t queue_depth;
172*4882a593Smuzhiyun size_t max_io_size;
173*4882a593Smuzhiyun wait_queue_head_t permits_wait;
174*4882a593Smuzhiyun size_t pdu_sz;
175*4882a593Smuzhiyun void *priv;
176*4882a593Smuzhiyun void (*link_ev)(void *priv,
177*4882a593Smuzhiyun enum rtrs_clt_link_ev ev);
178*4882a593Smuzhiyun struct device dev;
179*4882a593Smuzhiyun struct kobject *kobj_paths;
180*4882a593Smuzhiyun enum rtrs_mp_policy mp_policy;
181*4882a593Smuzhiyun };
182*4882a593Smuzhiyun
to_clt_con(struct rtrs_con * c)183*4882a593Smuzhiyun static inline struct rtrs_clt_con *to_clt_con(struct rtrs_con *c)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun return container_of(c, struct rtrs_clt_con, c);
186*4882a593Smuzhiyun }
187*4882a593Smuzhiyun
to_clt_sess(struct rtrs_sess * s)188*4882a593Smuzhiyun static inline struct rtrs_clt_sess *to_clt_sess(struct rtrs_sess *s)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun return container_of(s, struct rtrs_clt_sess, s);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
permit_size(struct rtrs_clt * clt)193*4882a593Smuzhiyun static inline int permit_size(struct rtrs_clt *clt)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun return sizeof(struct rtrs_permit) + clt->pdu_sz;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
get_permit(struct rtrs_clt * clt,int idx)198*4882a593Smuzhiyun static inline struct rtrs_permit *get_permit(struct rtrs_clt *clt, int idx)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun return (struct rtrs_permit *)(clt->permits + permit_size(clt) * idx);
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun int rtrs_clt_reconnect_from_sysfs(struct rtrs_clt_sess *sess);
204*4882a593Smuzhiyun int rtrs_clt_disconnect_from_sysfs(struct rtrs_clt_sess *sess);
205*4882a593Smuzhiyun int rtrs_clt_create_path_from_sysfs(struct rtrs_clt *clt,
206*4882a593Smuzhiyun struct rtrs_addr *addr);
207*4882a593Smuzhiyun int rtrs_clt_remove_path_from_sysfs(struct rtrs_clt_sess *sess,
208*4882a593Smuzhiyun const struct attribute *sysfs_self);
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun void rtrs_clt_set_max_reconnect_attempts(struct rtrs_clt *clt, int value);
211*4882a593Smuzhiyun int rtrs_clt_get_max_reconnect_attempts(const struct rtrs_clt *clt);
212*4882a593Smuzhiyun void free_sess(struct rtrs_clt_sess *sess);
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun /* rtrs-clt-stats.c */
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun int rtrs_clt_init_stats(struct rtrs_clt_stats *stats);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun void rtrs_clt_inc_failover_cnt(struct rtrs_clt_stats *s);
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun void rtrs_clt_update_wc_stats(struct rtrs_clt_con *con);
221*4882a593Smuzhiyun void rtrs_clt_update_all_stats(struct rtrs_clt_io_req *req, int dir);
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun int rtrs_clt_reset_rdma_lat_distr_stats(struct rtrs_clt_stats *stats,
224*4882a593Smuzhiyun bool enable);
225*4882a593Smuzhiyun ssize_t rtrs_clt_stats_rdma_lat_distr_to_str(struct rtrs_clt_stats *stats,
226*4882a593Smuzhiyun char *page, size_t len);
227*4882a593Smuzhiyun int rtrs_clt_reset_cpu_migr_stats(struct rtrs_clt_stats *stats, bool enable);
228*4882a593Smuzhiyun int rtrs_clt_stats_migration_cnt_to_str(struct rtrs_clt_stats *stats, char *buf,
229*4882a593Smuzhiyun size_t len);
230*4882a593Smuzhiyun int rtrs_clt_reset_reconnects_stat(struct rtrs_clt_stats *stats, bool enable);
231*4882a593Smuzhiyun int rtrs_clt_stats_reconnects_to_str(struct rtrs_clt_stats *stats, char *buf,
232*4882a593Smuzhiyun size_t len);
233*4882a593Smuzhiyun int rtrs_clt_reset_wc_comp_stats(struct rtrs_clt_stats *stats, bool enable);
234*4882a593Smuzhiyun int rtrs_clt_stats_wc_completion_to_str(struct rtrs_clt_stats *stats, char *buf,
235*4882a593Smuzhiyun size_t len);
236*4882a593Smuzhiyun int rtrs_clt_reset_rdma_stats(struct rtrs_clt_stats *stats, bool enable);
237*4882a593Smuzhiyun ssize_t rtrs_clt_stats_rdma_to_str(struct rtrs_clt_stats *stats,
238*4882a593Smuzhiyun char *page, size_t len);
239*4882a593Smuzhiyun int rtrs_clt_reset_all_stats(struct rtrs_clt_stats *stats, bool enable);
240*4882a593Smuzhiyun ssize_t rtrs_clt_reset_all_help(struct rtrs_clt_stats *stats,
241*4882a593Smuzhiyun char *page, size_t len);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /* rtrs-clt-sysfs.c */
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun int rtrs_clt_create_sysfs_root_files(struct rtrs_clt *clt);
246*4882a593Smuzhiyun void rtrs_clt_destroy_sysfs_root_folders(struct rtrs_clt *clt);
247*4882a593Smuzhiyun void rtrs_clt_destroy_sysfs_root_files(struct rtrs_clt *clt);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun int rtrs_clt_create_sess_files(struct rtrs_clt_sess *sess);
250*4882a593Smuzhiyun void rtrs_clt_destroy_sess_files(struct rtrs_clt_sess *sess,
251*4882a593Smuzhiyun const struct attribute *sysfs_self);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun #endif /* RTRS_CLT_H */
254