xref: /OK3568_Linux_fs/kernel/drivers/infiniband/ulp/rtrs/rtrs-srv.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * RDMA Transport Layer
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
6*4882a593Smuzhiyun  * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
7*4882a593Smuzhiyun  * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #ifndef RTRS_SRV_H
11*4882a593Smuzhiyun #define RTRS_SRV_H
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/device.h>
14*4882a593Smuzhiyun #include <linux/refcount.h>
15*4882a593Smuzhiyun #include "rtrs-pri.h"
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun /*
18*4882a593Smuzhiyun  * enum rtrs_srv_state - Server states.
19*4882a593Smuzhiyun  */
20*4882a593Smuzhiyun enum rtrs_srv_state {
21*4882a593Smuzhiyun 	RTRS_SRV_CONNECTING,
22*4882a593Smuzhiyun 	RTRS_SRV_CONNECTED,
23*4882a593Smuzhiyun 	RTRS_SRV_CLOSING,
24*4882a593Smuzhiyun 	RTRS_SRV_CLOSED,
25*4882a593Smuzhiyun };
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun /* stats for Read and write operation.
28*4882a593Smuzhiyun  * see Documentation/ABI/testing/sysfs-class-rtrs-server for details
29*4882a593Smuzhiyun  */
30*4882a593Smuzhiyun struct rtrs_srv_stats_rdma_stats {
31*4882a593Smuzhiyun 	struct {
32*4882a593Smuzhiyun 		atomic64_t	cnt;
33*4882a593Smuzhiyun 		atomic64_t	size_total;
34*4882a593Smuzhiyun 	} dir[2];
35*4882a593Smuzhiyun };
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun struct rtrs_srv_stats {
38*4882a593Smuzhiyun 	struct kobject				kobj_stats;
39*4882a593Smuzhiyun 	struct rtrs_srv_stats_rdma_stats	rdma_stats;
40*4882a593Smuzhiyun 	struct rtrs_srv_sess			*sess;
41*4882a593Smuzhiyun };
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun struct rtrs_srv_con {
44*4882a593Smuzhiyun 	struct rtrs_con		c;
45*4882a593Smuzhiyun 	atomic_t		wr_cnt;
46*4882a593Smuzhiyun 	atomic_t		sq_wr_avail;
47*4882a593Smuzhiyun 	struct list_head	rsp_wr_wait_list;
48*4882a593Smuzhiyun 	spinlock_t		rsp_wr_wait_lock;
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /* IO context in rtrs_srv, each io has one */
52*4882a593Smuzhiyun struct rtrs_srv_op {
53*4882a593Smuzhiyun 	struct rtrs_srv_con		*con;
54*4882a593Smuzhiyun 	u32				msg_id;
55*4882a593Smuzhiyun 	u8				dir;
56*4882a593Smuzhiyun 	struct rtrs_msg_rdma_read	*rd_msg;
57*4882a593Smuzhiyun 	struct ib_rdma_wr		tx_wr;
58*4882a593Smuzhiyun 	struct ib_sge			tx_sg;
59*4882a593Smuzhiyun 	struct list_head		wait_list;
60*4882a593Smuzhiyun 	int				status;
61*4882a593Smuzhiyun };
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /*
64*4882a593Smuzhiyun  * server side memory region context, when always_invalidate=Y, we need
65*4882a593Smuzhiyun  * queue_depth of memory regrion to invalidate each memory region.
66*4882a593Smuzhiyun  */
67*4882a593Smuzhiyun struct rtrs_srv_mr {
68*4882a593Smuzhiyun 	struct ib_mr	*mr;
69*4882a593Smuzhiyun 	struct sg_table	sgt;
70*4882a593Smuzhiyun 	struct ib_cqe	inv_cqe;	/* only for always_invalidate=true */
71*4882a593Smuzhiyun 	u32		msg_id;		/* only for always_invalidate=true */
72*4882a593Smuzhiyun 	u32		msg_off;	/* only for always_invalidate=true */
73*4882a593Smuzhiyun 	struct rtrs_iu	*iu;		/* send buffer for new rkey msg */
74*4882a593Smuzhiyun };
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun struct rtrs_srv_sess {
77*4882a593Smuzhiyun 	struct rtrs_sess	s;
78*4882a593Smuzhiyun 	struct rtrs_srv	*srv;
79*4882a593Smuzhiyun 	struct work_struct	close_work;
80*4882a593Smuzhiyun 	enum rtrs_srv_state	state;
81*4882a593Smuzhiyun 	spinlock_t		state_lock;
82*4882a593Smuzhiyun 	int			cur_cq_vector;
83*4882a593Smuzhiyun 	struct rtrs_srv_op	**ops_ids;
84*4882a593Smuzhiyun 	atomic_t		ids_inflight;
85*4882a593Smuzhiyun 	wait_queue_head_t	ids_waitq;
86*4882a593Smuzhiyun 	struct rtrs_srv_mr	*mrs;
87*4882a593Smuzhiyun 	unsigned int		mrs_num;
88*4882a593Smuzhiyun 	dma_addr_t		*dma_addr;
89*4882a593Smuzhiyun 	bool			established;
90*4882a593Smuzhiyun 	unsigned int		mem_bits;
91*4882a593Smuzhiyun 	struct kobject		kobj;
92*4882a593Smuzhiyun 	struct rtrs_srv_stats	*stats;
93*4882a593Smuzhiyun };
94*4882a593Smuzhiyun 
95*4882a593Smuzhiyun struct rtrs_srv {
96*4882a593Smuzhiyun 	struct list_head	paths_list;
97*4882a593Smuzhiyun 	int			paths_up;
98*4882a593Smuzhiyun 	struct mutex		paths_ev_mutex;
99*4882a593Smuzhiyun 	size_t			paths_num;
100*4882a593Smuzhiyun 	struct mutex		paths_mutex;
101*4882a593Smuzhiyun 	uuid_t			paths_uuid;
102*4882a593Smuzhiyun 	refcount_t		refcount;
103*4882a593Smuzhiyun 	struct rtrs_srv_ctx	*ctx;
104*4882a593Smuzhiyun 	struct list_head	ctx_list;
105*4882a593Smuzhiyun 	void			*priv;
106*4882a593Smuzhiyun 	size_t			queue_depth;
107*4882a593Smuzhiyun 	struct page		**chunks;
108*4882a593Smuzhiyun 	struct device		dev;
109*4882a593Smuzhiyun 	unsigned int		dev_ref;
110*4882a593Smuzhiyun 	struct kobject		*kobj_paths;
111*4882a593Smuzhiyun };
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun struct rtrs_srv_ctx {
114*4882a593Smuzhiyun 	struct rtrs_srv_ops ops;
115*4882a593Smuzhiyun 	struct rdma_cm_id *cm_id_ip;
116*4882a593Smuzhiyun 	struct rdma_cm_id *cm_id_ib;
117*4882a593Smuzhiyun 	struct mutex srv_mutex;
118*4882a593Smuzhiyun 	struct list_head srv_list;
119*4882a593Smuzhiyun };
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun struct rtrs_srv_ib_ctx {
122*4882a593Smuzhiyun 	struct rtrs_srv_ctx	*srv_ctx;
123*4882a593Smuzhiyun 	u16			port;
124*4882a593Smuzhiyun 	struct mutex            ib_dev_mutex;
125*4882a593Smuzhiyun 	int			ib_dev_count;
126*4882a593Smuzhiyun };
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun extern struct class *rtrs_dev_class;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun void close_sess(struct rtrs_srv_sess *sess);
131*4882a593Smuzhiyun 
rtrs_srv_update_rdma_stats(struct rtrs_srv_stats * s,size_t size,int d)132*4882a593Smuzhiyun static inline void rtrs_srv_update_rdma_stats(struct rtrs_srv_stats *s,
133*4882a593Smuzhiyun 					      size_t size, int d)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun 	atomic64_inc(&s->rdma_stats.dir[d].cnt);
136*4882a593Smuzhiyun 	atomic64_add(size, &s->rdma_stats.dir[d].size_total);
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun /* functions which are implemented in rtrs-srv-stats.c */
140*4882a593Smuzhiyun int rtrs_srv_reset_rdma_stats(struct rtrs_srv_stats *stats, bool enable);
141*4882a593Smuzhiyun ssize_t rtrs_srv_stats_rdma_to_str(struct rtrs_srv_stats *stats,
142*4882a593Smuzhiyun 				    char *page, size_t len);
143*4882a593Smuzhiyun int rtrs_srv_reset_wc_completion_stats(struct rtrs_srv_stats *stats,
144*4882a593Smuzhiyun 					bool enable);
145*4882a593Smuzhiyun int rtrs_srv_stats_wc_completion_to_str(struct rtrs_srv_stats *stats, char *buf,
146*4882a593Smuzhiyun 					 size_t len);
147*4882a593Smuzhiyun int rtrs_srv_reset_all_stats(struct rtrs_srv_stats *stats, bool enable);
148*4882a593Smuzhiyun ssize_t rtrs_srv_reset_all_help(struct rtrs_srv_stats *stats,
149*4882a593Smuzhiyun 				 char *page, size_t len);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun /* functions which are implemented in rtrs-srv-sysfs.c */
152*4882a593Smuzhiyun int rtrs_srv_create_sess_files(struct rtrs_srv_sess *sess);
153*4882a593Smuzhiyun void rtrs_srv_destroy_sess_files(struct rtrs_srv_sess *sess);
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun #endif /* RTRS_SRV_H */
156