xref: /OK3568_Linux_fs/kernel/drivers/block/rnbd/rnbd-clt.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * RDMA Network Block Driver
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2014 - 2018 ProfitBricks GmbH. All rights reserved.
6*4882a593Smuzhiyun  * Copyright (c) 2018 - 2019 1&1 IONOS Cloud GmbH. All rights reserved.
7*4882a593Smuzhiyun  * Copyright (c) 2019 - 2020 1&1 IONOS SE. All rights reserved.
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #ifndef RNBD_CLT_H
11*4882a593Smuzhiyun #define RNBD_CLT_H
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/wait.h>
14*4882a593Smuzhiyun #include <linux/in.h>
15*4882a593Smuzhiyun #include <linux/inet.h>
16*4882a593Smuzhiyun #include <linux/blk-mq.h>
17*4882a593Smuzhiyun #include <linux/refcount.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include <rtrs.h>
20*4882a593Smuzhiyun #include "rnbd-proto.h"
21*4882a593Smuzhiyun #include "rnbd-log.h"
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun /* Max. number of segments per IO request, Mellanox Connect X ~ Connect X5,
24*4882a593Smuzhiyun  * choose minimial 30 for all, minus 1 for internal protocol, so 29.
25*4882a593Smuzhiyun  */
26*4882a593Smuzhiyun #define BMAX_SEGMENTS 29
27*4882a593Smuzhiyun /*  time in seconds between reconnect tries, default to 30 s */
28*4882a593Smuzhiyun #define RECONNECT_DELAY 30
29*4882a593Smuzhiyun /*
30*4882a593Smuzhiyun  * Number of times to reconnect on error before giving up, 0 for * disabled,
31*4882a593Smuzhiyun  * -1 for forever
32*4882a593Smuzhiyun  */
33*4882a593Smuzhiyun #define MAX_RECONNECTS -1
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun enum rnbd_clt_dev_state {
36*4882a593Smuzhiyun 	DEV_STATE_INIT,
37*4882a593Smuzhiyun 	DEV_STATE_MAPPED,
38*4882a593Smuzhiyun 	DEV_STATE_MAPPED_DISCONNECTED,
39*4882a593Smuzhiyun 	DEV_STATE_UNMAPPED,
40*4882a593Smuzhiyun };
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun struct rnbd_iu_comp {
43*4882a593Smuzhiyun 	wait_queue_head_t wait;
44*4882a593Smuzhiyun 	int errno;
45*4882a593Smuzhiyun };
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun struct rnbd_iu {
48*4882a593Smuzhiyun 	union {
49*4882a593Smuzhiyun 		struct request *rq; /* for block io */
50*4882a593Smuzhiyun 		void *buf; /* for user messages */
51*4882a593Smuzhiyun 	};
52*4882a593Smuzhiyun 	struct rtrs_permit	*permit;
53*4882a593Smuzhiyun 	union {
54*4882a593Smuzhiyun 		/* use to send msg associated with a dev */
55*4882a593Smuzhiyun 		struct rnbd_clt_dev *dev;
56*4882a593Smuzhiyun 		/* use to send msg associated with a sess */
57*4882a593Smuzhiyun 		struct rnbd_clt_session *sess;
58*4882a593Smuzhiyun 	};
59*4882a593Smuzhiyun 	struct scatterlist	sglist[BMAX_SEGMENTS];
60*4882a593Smuzhiyun 	struct work_struct	work;
61*4882a593Smuzhiyun 	int			errno;
62*4882a593Smuzhiyun 	struct rnbd_iu_comp	comp;
63*4882a593Smuzhiyun 	atomic_t		refcount;
64*4882a593Smuzhiyun };
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun struct rnbd_cpu_qlist {
67*4882a593Smuzhiyun 	struct list_head	requeue_list;
68*4882a593Smuzhiyun 	spinlock_t		requeue_lock;
69*4882a593Smuzhiyun 	unsigned int		cpu;
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun struct rnbd_clt_session {
73*4882a593Smuzhiyun 	struct list_head        list;
74*4882a593Smuzhiyun 	struct rtrs_clt        *rtrs;
75*4882a593Smuzhiyun 	wait_queue_head_t       rtrs_waitq;
76*4882a593Smuzhiyun 	bool                    rtrs_ready;
77*4882a593Smuzhiyun 	struct rnbd_cpu_qlist	__percpu
78*4882a593Smuzhiyun 				*cpu_queues;
79*4882a593Smuzhiyun 	DECLARE_BITMAP(cpu_queues_bm, NR_CPUS);
80*4882a593Smuzhiyun 	int	__percpu	*cpu_rr; /* per-cpu var for CPU round-robin */
81*4882a593Smuzhiyun 	atomic_t		busy;
82*4882a593Smuzhiyun 	size_t			queue_depth;
83*4882a593Smuzhiyun 	u32			max_io_size;
84*4882a593Smuzhiyun 	struct blk_mq_tag_set	tag_set;
85*4882a593Smuzhiyun 	struct mutex		lock; /* protects state and devs_list */
86*4882a593Smuzhiyun 	struct list_head        devs_list; /* list of struct rnbd_clt_dev */
87*4882a593Smuzhiyun 	refcount_t		refcount;
88*4882a593Smuzhiyun 	char			sessname[NAME_MAX];
89*4882a593Smuzhiyun 	u8			ver; /* protocol version */
90*4882a593Smuzhiyun };
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun /**
93*4882a593Smuzhiyun  * Submission queues.
94*4882a593Smuzhiyun  */
95*4882a593Smuzhiyun struct rnbd_queue {
96*4882a593Smuzhiyun 	struct list_head	requeue_list;
97*4882a593Smuzhiyun 	unsigned long		in_list;
98*4882a593Smuzhiyun 	struct rnbd_clt_dev	*dev;
99*4882a593Smuzhiyun 	struct blk_mq_hw_ctx	*hctx;
100*4882a593Smuzhiyun };
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun struct rnbd_clt_dev {
103*4882a593Smuzhiyun 	struct rnbd_clt_session	*sess;
104*4882a593Smuzhiyun 	struct request_queue	*queue;
105*4882a593Smuzhiyun 	struct rnbd_queue	*hw_queues;
106*4882a593Smuzhiyun 	u32			device_id;
107*4882a593Smuzhiyun 	/* local Idr index - used to track minor number allocations. */
108*4882a593Smuzhiyun 	u32			clt_device_id;
109*4882a593Smuzhiyun 	struct mutex		lock;
110*4882a593Smuzhiyun 	enum rnbd_clt_dev_state	dev_state;
111*4882a593Smuzhiyun 	char			*pathname;
112*4882a593Smuzhiyun 	enum rnbd_access_mode	access_mode;
113*4882a593Smuzhiyun 	bool			read_only;
114*4882a593Smuzhiyun 	bool			rotational;
115*4882a593Smuzhiyun 	u32			max_hw_sectors;
116*4882a593Smuzhiyun 	u32			max_write_same_sectors;
117*4882a593Smuzhiyun 	u32			max_discard_sectors;
118*4882a593Smuzhiyun 	u32			discard_granularity;
119*4882a593Smuzhiyun 	u32			discard_alignment;
120*4882a593Smuzhiyun 	u16			secure_discard;
121*4882a593Smuzhiyun 	u16			physical_block_size;
122*4882a593Smuzhiyun 	u16			logical_block_size;
123*4882a593Smuzhiyun 	u16			max_segments;
124*4882a593Smuzhiyun 	size_t			nsectors;
125*4882a593Smuzhiyun 	u64			size;		/* device size in bytes */
126*4882a593Smuzhiyun 	struct list_head        list;
127*4882a593Smuzhiyun 	struct gendisk		*gd;
128*4882a593Smuzhiyun 	struct kobject		kobj;
129*4882a593Smuzhiyun 	char			*blk_symlink_name;
130*4882a593Smuzhiyun 	refcount_t		refcount;
131*4882a593Smuzhiyun 	struct work_struct	unmap_on_rmmod_work;
132*4882a593Smuzhiyun };
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun /* rnbd-clt.c */
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun struct rnbd_clt_dev *rnbd_clt_map_device(const char *sessname,
137*4882a593Smuzhiyun 					   struct rtrs_addr *paths,
138*4882a593Smuzhiyun 					   size_t path_cnt, u16 port_nr,
139*4882a593Smuzhiyun 					   const char *pathname,
140*4882a593Smuzhiyun 					   enum rnbd_access_mode access_mode);
141*4882a593Smuzhiyun int rnbd_clt_unmap_device(struct rnbd_clt_dev *dev, bool force,
142*4882a593Smuzhiyun 			   const struct attribute *sysfs_self);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun int rnbd_clt_remap_device(struct rnbd_clt_dev *dev);
145*4882a593Smuzhiyun int rnbd_clt_resize_disk(struct rnbd_clt_dev *dev, size_t newsize);
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun /* rnbd-clt-sysfs.c */
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun int rnbd_clt_create_sysfs_files(void);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun void rnbd_clt_destroy_sysfs_files(void);
152*4882a593Smuzhiyun void rnbd_clt_destroy_default_group(void);
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun void rnbd_clt_remove_dev_symlink(struct rnbd_clt_dev *dev);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun #endif /* RNBD_CLT_H */
157