1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2016 Chelsio Communications, Inc.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #ifndef __CXGBIT_H__
7*4882a593Smuzhiyun #define __CXGBIT_H__
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/mutex.h>
10*4882a593Smuzhiyun #include <linux/list.h>
11*4882a593Smuzhiyun #include <linux/spinlock.h>
12*4882a593Smuzhiyun #include <linux/idr.h>
13*4882a593Smuzhiyun #include <linux/completion.h>
14*4882a593Smuzhiyun #include <linux/netdevice.h>
15*4882a593Smuzhiyun #include <linux/sched.h>
16*4882a593Smuzhiyun #include <linux/pci.h>
17*4882a593Smuzhiyun #include <linux/dma-mapping.h>
18*4882a593Smuzhiyun #include <linux/inet.h>
19*4882a593Smuzhiyun #include <linux/wait.h>
20*4882a593Smuzhiyun #include <linux/kref.h>
21*4882a593Smuzhiyun #include <linux/timer.h>
22*4882a593Smuzhiyun #include <linux/io.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include <asm/byteorder.h>
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #include <net/net_namespace.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include <target/iscsi/iscsi_transport.h>
29*4882a593Smuzhiyun #include <iscsi_target_parameters.h>
30*4882a593Smuzhiyun #include <iscsi_target_login.h>
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #include "t4_regs.h"
33*4882a593Smuzhiyun #include "t4_msg.h"
34*4882a593Smuzhiyun #include "cxgb4.h"
35*4882a593Smuzhiyun #include "cxgb4_uld.h"
36*4882a593Smuzhiyun #include "l2t.h"
37*4882a593Smuzhiyun #include "libcxgb_ppm.h"
38*4882a593Smuzhiyun #include "cxgbit_lro.h"
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun extern struct mutex cdev_list_lock;
41*4882a593Smuzhiyun extern struct list_head cdev_list_head;
42*4882a593Smuzhiyun struct cxgbit_np;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun struct cxgbit_sock;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun struct cxgbit_cmd {
47*4882a593Smuzhiyun struct scatterlist sg;
48*4882a593Smuzhiyun struct cxgbi_task_tag_info ttinfo;
49*4882a593Smuzhiyun bool setup_ddp;
50*4882a593Smuzhiyun bool release;
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #define CXGBIT_MAX_ISO_PAYLOAD \
54*4882a593Smuzhiyun min_t(u32, MAX_SKB_FRAGS * PAGE_SIZE, 65535)
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun struct cxgbit_iso_info {
57*4882a593Smuzhiyun u8 flags;
58*4882a593Smuzhiyun u32 mpdu;
59*4882a593Smuzhiyun u32 len;
60*4882a593Smuzhiyun u32 burst_len;
61*4882a593Smuzhiyun };
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun enum cxgbit_skcb_flags {
64*4882a593Smuzhiyun SKCBF_TX_NEED_HDR = (1 << 0), /* packet needs a header */
65*4882a593Smuzhiyun SKCBF_TX_FLAG_COMPL = (1 << 1), /* wr completion flag */
66*4882a593Smuzhiyun SKCBF_TX_ISO = (1 << 2), /* iso cpl in tx skb */
67*4882a593Smuzhiyun SKCBF_RX_LRO = (1 << 3), /* lro skb */
68*4882a593Smuzhiyun };
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun struct cxgbit_skb_rx_cb {
71*4882a593Smuzhiyun u8 opcode;
72*4882a593Smuzhiyun void *pdu_cb;
73*4882a593Smuzhiyun void (*backlog_fn)(struct cxgbit_sock *, struct sk_buff *);
74*4882a593Smuzhiyun };
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun struct cxgbit_skb_tx_cb {
77*4882a593Smuzhiyun u8 submode;
78*4882a593Smuzhiyun u32 extra_len;
79*4882a593Smuzhiyun };
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun union cxgbit_skb_cb {
82*4882a593Smuzhiyun struct {
83*4882a593Smuzhiyun u8 flags;
84*4882a593Smuzhiyun union {
85*4882a593Smuzhiyun struct cxgbit_skb_tx_cb tx;
86*4882a593Smuzhiyun struct cxgbit_skb_rx_cb rx;
87*4882a593Smuzhiyun };
88*4882a593Smuzhiyun };
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun struct {
91*4882a593Smuzhiyun /* This member must be first. */
92*4882a593Smuzhiyun struct l2t_skb_cb l2t;
93*4882a593Smuzhiyun struct sk_buff *wr_next;
94*4882a593Smuzhiyun };
95*4882a593Smuzhiyun };
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun #define CXGBIT_SKB_CB(skb) ((union cxgbit_skb_cb *)&((skb)->cb[0]))
98*4882a593Smuzhiyun #define cxgbit_skcb_flags(skb) (CXGBIT_SKB_CB(skb)->flags)
99*4882a593Smuzhiyun #define cxgbit_skcb_submode(skb) (CXGBIT_SKB_CB(skb)->tx.submode)
100*4882a593Smuzhiyun #define cxgbit_skcb_tx_wr_next(skb) (CXGBIT_SKB_CB(skb)->wr_next)
101*4882a593Smuzhiyun #define cxgbit_skcb_tx_extralen(skb) (CXGBIT_SKB_CB(skb)->tx.extra_len)
102*4882a593Smuzhiyun #define cxgbit_skcb_rx_opcode(skb) (CXGBIT_SKB_CB(skb)->rx.opcode)
103*4882a593Smuzhiyun #define cxgbit_skcb_rx_backlog_fn(skb) (CXGBIT_SKB_CB(skb)->rx.backlog_fn)
104*4882a593Smuzhiyun #define cxgbit_rx_pdu_cb(skb) (CXGBIT_SKB_CB(skb)->rx.pdu_cb)
105*4882a593Smuzhiyun
cplhdr(struct sk_buff * skb)106*4882a593Smuzhiyun static inline void *cplhdr(struct sk_buff *skb)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun return skb->data;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun enum cxgbit_cdev_flags {
112*4882a593Smuzhiyun CDEV_STATE_UP = 0,
113*4882a593Smuzhiyun CDEV_ISO_ENABLE,
114*4882a593Smuzhiyun CDEV_DDP_ENABLE,
115*4882a593Smuzhiyun };
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun #define NP_INFO_HASH_SIZE 32
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun struct np_info {
120*4882a593Smuzhiyun struct np_info *next;
121*4882a593Smuzhiyun struct cxgbit_np *cnp;
122*4882a593Smuzhiyun unsigned int stid;
123*4882a593Smuzhiyun };
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun struct cxgbit_list_head {
126*4882a593Smuzhiyun struct list_head list;
127*4882a593Smuzhiyun /* device lock */
128*4882a593Smuzhiyun spinlock_t lock;
129*4882a593Smuzhiyun };
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun struct cxgbit_device {
132*4882a593Smuzhiyun struct list_head list;
133*4882a593Smuzhiyun struct cxgb4_lld_info lldi;
134*4882a593Smuzhiyun struct np_info *np_hash_tab[NP_INFO_HASH_SIZE];
135*4882a593Smuzhiyun /* np lock */
136*4882a593Smuzhiyun spinlock_t np_lock;
137*4882a593Smuzhiyun u8 selectq[MAX_NPORTS][2];
138*4882a593Smuzhiyun struct cxgbit_list_head cskq;
139*4882a593Smuzhiyun u32 mdsl;
140*4882a593Smuzhiyun struct kref kref;
141*4882a593Smuzhiyun unsigned long flags;
142*4882a593Smuzhiyun };
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun struct cxgbit_wr_wait {
145*4882a593Smuzhiyun struct completion completion;
146*4882a593Smuzhiyun int ret;
147*4882a593Smuzhiyun };
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun enum cxgbit_csk_state {
150*4882a593Smuzhiyun CSK_STATE_IDLE = 0,
151*4882a593Smuzhiyun CSK_STATE_LISTEN,
152*4882a593Smuzhiyun CSK_STATE_CONNECTING,
153*4882a593Smuzhiyun CSK_STATE_ESTABLISHED,
154*4882a593Smuzhiyun CSK_STATE_ABORTING,
155*4882a593Smuzhiyun CSK_STATE_CLOSING,
156*4882a593Smuzhiyun CSK_STATE_MORIBUND,
157*4882a593Smuzhiyun CSK_STATE_DEAD,
158*4882a593Smuzhiyun };
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun enum cxgbit_csk_flags {
161*4882a593Smuzhiyun CSK_TX_DATA_SENT = 0,
162*4882a593Smuzhiyun CSK_LOGIN_PDU_DONE,
163*4882a593Smuzhiyun CSK_LOGIN_DONE,
164*4882a593Smuzhiyun CSK_DDP_ENABLE,
165*4882a593Smuzhiyun CSK_ABORT_RPL_WAIT,
166*4882a593Smuzhiyun };
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun struct cxgbit_sock_common {
169*4882a593Smuzhiyun struct cxgbit_device *cdev;
170*4882a593Smuzhiyun struct sockaddr_storage local_addr;
171*4882a593Smuzhiyun struct sockaddr_storage remote_addr;
172*4882a593Smuzhiyun struct cxgbit_wr_wait wr_wait;
173*4882a593Smuzhiyun enum cxgbit_csk_state state;
174*4882a593Smuzhiyun unsigned long flags;
175*4882a593Smuzhiyun };
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun struct cxgbit_np {
178*4882a593Smuzhiyun struct cxgbit_sock_common com;
179*4882a593Smuzhiyun wait_queue_head_t accept_wait;
180*4882a593Smuzhiyun struct iscsi_np *np;
181*4882a593Smuzhiyun struct completion accept_comp;
182*4882a593Smuzhiyun struct list_head np_accept_list;
183*4882a593Smuzhiyun /* np accept lock */
184*4882a593Smuzhiyun spinlock_t np_accept_lock;
185*4882a593Smuzhiyun struct kref kref;
186*4882a593Smuzhiyun unsigned int stid;
187*4882a593Smuzhiyun };
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun struct cxgbit_sock {
190*4882a593Smuzhiyun struct cxgbit_sock_common com;
191*4882a593Smuzhiyun struct cxgbit_np *cnp;
192*4882a593Smuzhiyun struct iscsi_conn *conn;
193*4882a593Smuzhiyun struct l2t_entry *l2t;
194*4882a593Smuzhiyun struct dst_entry *dst;
195*4882a593Smuzhiyun struct list_head list;
196*4882a593Smuzhiyun struct sk_buff_head rxq;
197*4882a593Smuzhiyun struct sk_buff_head txq;
198*4882a593Smuzhiyun struct sk_buff_head ppodq;
199*4882a593Smuzhiyun struct sk_buff_head backlogq;
200*4882a593Smuzhiyun struct sk_buff_head skbq;
201*4882a593Smuzhiyun struct sk_buff *wr_pending_head;
202*4882a593Smuzhiyun struct sk_buff *wr_pending_tail;
203*4882a593Smuzhiyun struct sk_buff *skb;
204*4882a593Smuzhiyun struct sk_buff *lro_skb;
205*4882a593Smuzhiyun struct sk_buff *lro_hskb;
206*4882a593Smuzhiyun struct list_head accept_node;
207*4882a593Smuzhiyun /* socket lock */
208*4882a593Smuzhiyun spinlock_t lock;
209*4882a593Smuzhiyun wait_queue_head_t waitq;
210*4882a593Smuzhiyun bool lock_owner;
211*4882a593Smuzhiyun struct kref kref;
212*4882a593Smuzhiyun u32 max_iso_npdu;
213*4882a593Smuzhiyun u32 wr_cred;
214*4882a593Smuzhiyun u32 wr_una_cred;
215*4882a593Smuzhiyun u32 wr_max_cred;
216*4882a593Smuzhiyun u32 snd_una;
217*4882a593Smuzhiyun u32 tid;
218*4882a593Smuzhiyun u32 snd_nxt;
219*4882a593Smuzhiyun u32 rcv_nxt;
220*4882a593Smuzhiyun u32 smac_idx;
221*4882a593Smuzhiyun u32 tx_chan;
222*4882a593Smuzhiyun u32 mtu;
223*4882a593Smuzhiyun u32 write_seq;
224*4882a593Smuzhiyun u32 rx_credits;
225*4882a593Smuzhiyun u32 snd_win;
226*4882a593Smuzhiyun u32 rcv_win;
227*4882a593Smuzhiyun u16 mss;
228*4882a593Smuzhiyun u16 emss;
229*4882a593Smuzhiyun u16 plen;
230*4882a593Smuzhiyun u16 rss_qid;
231*4882a593Smuzhiyun u16 txq_idx;
232*4882a593Smuzhiyun u16 ctrlq_idx;
233*4882a593Smuzhiyun u8 tos;
234*4882a593Smuzhiyun u8 port_id;
235*4882a593Smuzhiyun #define CXGBIT_SUBMODE_HCRC 0x1
236*4882a593Smuzhiyun #define CXGBIT_SUBMODE_DCRC 0x2
237*4882a593Smuzhiyun u8 submode;
238*4882a593Smuzhiyun #ifdef CONFIG_CHELSIO_T4_DCB
239*4882a593Smuzhiyun u8 dcb_priority;
240*4882a593Smuzhiyun #endif
241*4882a593Smuzhiyun u8 snd_wscale;
242*4882a593Smuzhiyun };
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun void _cxgbit_free_cdev(struct kref *kref);
245*4882a593Smuzhiyun void _cxgbit_free_csk(struct kref *kref);
246*4882a593Smuzhiyun void _cxgbit_free_cnp(struct kref *kref);
247*4882a593Smuzhiyun
cxgbit_get_cdev(struct cxgbit_device * cdev)248*4882a593Smuzhiyun static inline void cxgbit_get_cdev(struct cxgbit_device *cdev)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun kref_get(&cdev->kref);
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
cxgbit_put_cdev(struct cxgbit_device * cdev)253*4882a593Smuzhiyun static inline void cxgbit_put_cdev(struct cxgbit_device *cdev)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun kref_put(&cdev->kref, _cxgbit_free_cdev);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
cxgbit_get_csk(struct cxgbit_sock * csk)258*4882a593Smuzhiyun static inline void cxgbit_get_csk(struct cxgbit_sock *csk)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun kref_get(&csk->kref);
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
cxgbit_put_csk(struct cxgbit_sock * csk)263*4882a593Smuzhiyun static inline void cxgbit_put_csk(struct cxgbit_sock *csk)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun kref_put(&csk->kref, _cxgbit_free_csk);
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
cxgbit_get_cnp(struct cxgbit_np * cnp)268*4882a593Smuzhiyun static inline void cxgbit_get_cnp(struct cxgbit_np *cnp)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun kref_get(&cnp->kref);
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
cxgbit_put_cnp(struct cxgbit_np * cnp)273*4882a593Smuzhiyun static inline void cxgbit_put_cnp(struct cxgbit_np *cnp)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun kref_put(&cnp->kref, _cxgbit_free_cnp);
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
cxgbit_sock_reset_wr_list(struct cxgbit_sock * csk)278*4882a593Smuzhiyun static inline void cxgbit_sock_reset_wr_list(struct cxgbit_sock *csk)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun csk->wr_pending_tail = NULL;
281*4882a593Smuzhiyun csk->wr_pending_head = NULL;
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
cxgbit_sock_peek_wr(const struct cxgbit_sock * csk)284*4882a593Smuzhiyun static inline struct sk_buff *cxgbit_sock_peek_wr(const struct cxgbit_sock *csk)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun return csk->wr_pending_head;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun static inline void
cxgbit_sock_enqueue_wr(struct cxgbit_sock * csk,struct sk_buff * skb)290*4882a593Smuzhiyun cxgbit_sock_enqueue_wr(struct cxgbit_sock *csk, struct sk_buff *skb)
291*4882a593Smuzhiyun {
292*4882a593Smuzhiyun cxgbit_skcb_tx_wr_next(skb) = NULL;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun skb_get(skb);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun if (!csk->wr_pending_head)
297*4882a593Smuzhiyun csk->wr_pending_head = skb;
298*4882a593Smuzhiyun else
299*4882a593Smuzhiyun cxgbit_skcb_tx_wr_next(csk->wr_pending_tail) = skb;
300*4882a593Smuzhiyun csk->wr_pending_tail = skb;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
cxgbit_sock_dequeue_wr(struct cxgbit_sock * csk)303*4882a593Smuzhiyun static inline struct sk_buff *cxgbit_sock_dequeue_wr(struct cxgbit_sock *csk)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun struct sk_buff *skb = csk->wr_pending_head;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun if (likely(skb)) {
308*4882a593Smuzhiyun csk->wr_pending_head = cxgbit_skcb_tx_wr_next(skb);
309*4882a593Smuzhiyun cxgbit_skcb_tx_wr_next(skb) = NULL;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun return skb;
312*4882a593Smuzhiyun }
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun typedef void (*cxgbit_cplhandler_func)(struct cxgbit_device *,
315*4882a593Smuzhiyun struct sk_buff *);
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun int cxgbit_setup_np(struct iscsi_np *, struct sockaddr_storage *);
318*4882a593Smuzhiyun int cxgbit_setup_conn_digest(struct cxgbit_sock *);
319*4882a593Smuzhiyun int cxgbit_accept_np(struct iscsi_np *, struct iscsi_conn *);
320*4882a593Smuzhiyun void cxgbit_free_np(struct iscsi_np *);
321*4882a593Smuzhiyun void cxgbit_abort_conn(struct cxgbit_sock *csk);
322*4882a593Smuzhiyun void cxgbit_free_conn(struct iscsi_conn *);
323*4882a593Smuzhiyun extern cxgbit_cplhandler_func cxgbit_cplhandlers[NUM_CPL_CMDS];
324*4882a593Smuzhiyun int cxgbit_get_login_rx(struct iscsi_conn *, struct iscsi_login *);
325*4882a593Smuzhiyun int cxgbit_rx_data_ack(struct cxgbit_sock *);
326*4882a593Smuzhiyun int cxgbit_l2t_send(struct cxgbit_device *, struct sk_buff *,
327*4882a593Smuzhiyun struct l2t_entry *);
328*4882a593Smuzhiyun void cxgbit_push_tx_frames(struct cxgbit_sock *);
329*4882a593Smuzhiyun int cxgbit_put_login_tx(struct iscsi_conn *, struct iscsi_login *, u32);
330*4882a593Smuzhiyun int cxgbit_xmit_pdu(struct iscsi_conn *, struct iscsi_cmd *,
331*4882a593Smuzhiyun struct iscsi_datain_req *, const void *, u32);
332*4882a593Smuzhiyun void cxgbit_get_r2t_ttt(struct iscsi_conn *, struct iscsi_cmd *,
333*4882a593Smuzhiyun struct iscsi_r2t *);
334*4882a593Smuzhiyun u32 cxgbit_send_tx_flowc_wr(struct cxgbit_sock *);
335*4882a593Smuzhiyun int cxgbit_ofld_send(struct cxgbit_device *, struct sk_buff *);
336*4882a593Smuzhiyun void cxgbit_get_rx_pdu(struct iscsi_conn *);
337*4882a593Smuzhiyun int cxgbit_validate_params(struct iscsi_conn *);
338*4882a593Smuzhiyun struct cxgbit_device *cxgbit_find_device(struct net_device *, u8 *);
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun /* DDP */
341*4882a593Smuzhiyun int cxgbit_ddp_init(struct cxgbit_device *);
342*4882a593Smuzhiyun int cxgbit_setup_conn_pgidx(struct cxgbit_sock *, u32);
343*4882a593Smuzhiyun int cxgbit_reserve_ttt(struct cxgbit_sock *, struct iscsi_cmd *);
344*4882a593Smuzhiyun void cxgbit_unmap_cmd(struct iscsi_conn *, struct iscsi_cmd *);
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun static inline
cdev2ppm(struct cxgbit_device * cdev)347*4882a593Smuzhiyun struct cxgbi_ppm *cdev2ppm(struct cxgbit_device *cdev)
348*4882a593Smuzhiyun {
349*4882a593Smuzhiyun return (struct cxgbi_ppm *)(*cdev->lldi.iscsi_ppm);
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun #endif /* __CXGBIT_H__ */
352