1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
4*4882a593Smuzhiyun * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #ifndef RXE_HDR_H
8*4882a593Smuzhiyun #define RXE_HDR_H
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun /* extracted information about a packet carried in an sk_buff struct fits in
11*4882a593Smuzhiyun * the skbuff cb array. Must be at most 48 bytes. stored in control block of
12*4882a593Smuzhiyun * sk_buff for received packets.
13*4882a593Smuzhiyun */
14*4882a593Smuzhiyun struct rxe_pkt_info {
15*4882a593Smuzhiyun struct rxe_dev *rxe; /* device that owns packet */
16*4882a593Smuzhiyun struct rxe_qp *qp; /* qp that owns packet */
17*4882a593Smuzhiyun struct rxe_send_wqe *wqe; /* send wqe */
18*4882a593Smuzhiyun u8 *hdr; /* points to bth */
19*4882a593Smuzhiyun u32 mask; /* useful info about pkt */
20*4882a593Smuzhiyun u32 psn; /* bth psn of packet */
21*4882a593Smuzhiyun u16 pkey_index; /* partition of pkt */
22*4882a593Smuzhiyun u16 paylen; /* length of bth - icrc */
23*4882a593Smuzhiyun u8 port_num; /* port pkt received on */
24*4882a593Smuzhiyun u8 opcode; /* bth opcode of packet */
25*4882a593Smuzhiyun u8 offset; /* bth offset from pkt->hdr */
26*4882a593Smuzhiyun };
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun /* Macros should be used only for received skb */
SKB_TO_PKT(struct sk_buff * skb)29*4882a593Smuzhiyun static inline struct rxe_pkt_info *SKB_TO_PKT(struct sk_buff *skb)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun BUILD_BUG_ON(sizeof(struct rxe_pkt_info) > sizeof(skb->cb));
32*4882a593Smuzhiyun return (void *)skb->cb;
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun
PKT_TO_SKB(struct rxe_pkt_info * pkt)35*4882a593Smuzhiyun static inline struct sk_buff *PKT_TO_SKB(struct rxe_pkt_info *pkt)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun return container_of((void *)pkt, struct sk_buff, cb);
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /*
41*4882a593Smuzhiyun * IBA header types and methods
42*4882a593Smuzhiyun *
43*4882a593Smuzhiyun * Some of these are for reference and completeness only since
44*4882a593Smuzhiyun * rxe does not currently support RD transport
45*4882a593Smuzhiyun * most of this could be moved into IB core. ib_pack.h has
46*4882a593Smuzhiyun * part of this but is incomplete
47*4882a593Smuzhiyun *
48*4882a593Smuzhiyun * Header specific routines to insert/extract values to/from headers
49*4882a593Smuzhiyun * the routines that are named __hhh_(set_)fff() take a pointer to a
50*4882a593Smuzhiyun * hhh header and get(set) the fff field. The routines named
51*4882a593Smuzhiyun * hhh_(set_)fff take a packet info struct and find the
52*4882a593Smuzhiyun * header and field based on the opcode in the packet.
53*4882a593Smuzhiyun * Conversion to/from network byte order from cpu order is also done.
54*4882a593Smuzhiyun */
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #define RXE_ICRC_SIZE (4)
57*4882a593Smuzhiyun #define RXE_MAX_HDR_LENGTH (80)
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /******************************************************************************
60*4882a593Smuzhiyun * Base Transport Header
61*4882a593Smuzhiyun ******************************************************************************/
62*4882a593Smuzhiyun struct rxe_bth {
63*4882a593Smuzhiyun u8 opcode;
64*4882a593Smuzhiyun u8 flags;
65*4882a593Smuzhiyun __be16 pkey;
66*4882a593Smuzhiyun __be32 qpn;
67*4882a593Smuzhiyun __be32 apsn;
68*4882a593Smuzhiyun };
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun #define BTH_TVER (0)
71*4882a593Smuzhiyun #define BTH_DEF_PKEY (0xffff)
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun #define BTH_SE_MASK (0x80)
74*4882a593Smuzhiyun #define BTH_MIG_MASK (0x40)
75*4882a593Smuzhiyun #define BTH_PAD_MASK (0x30)
76*4882a593Smuzhiyun #define BTH_TVER_MASK (0x0f)
77*4882a593Smuzhiyun #define BTH_FECN_MASK (0x80000000)
78*4882a593Smuzhiyun #define BTH_BECN_MASK (0x40000000)
79*4882a593Smuzhiyun #define BTH_RESV6A_MASK (0x3f000000)
80*4882a593Smuzhiyun #define BTH_QPN_MASK (0x00ffffff)
81*4882a593Smuzhiyun #define BTH_ACK_MASK (0x80000000)
82*4882a593Smuzhiyun #define BTH_RESV7_MASK (0x7f000000)
83*4882a593Smuzhiyun #define BTH_PSN_MASK (0x00ffffff)
84*4882a593Smuzhiyun
__bth_opcode(void * arg)85*4882a593Smuzhiyun static inline u8 __bth_opcode(void *arg)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun struct rxe_bth *bth = arg;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun return bth->opcode;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
__bth_set_opcode(void * arg,u8 opcode)92*4882a593Smuzhiyun static inline void __bth_set_opcode(void *arg, u8 opcode)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun struct rxe_bth *bth = arg;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun bth->opcode = opcode;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
__bth_se(void * arg)99*4882a593Smuzhiyun static inline u8 __bth_se(void *arg)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun struct rxe_bth *bth = arg;
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun return 0 != (BTH_SE_MASK & bth->flags);
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
__bth_set_se(void * arg,int se)106*4882a593Smuzhiyun static inline void __bth_set_se(void *arg, int se)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun struct rxe_bth *bth = arg;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun if (se)
111*4882a593Smuzhiyun bth->flags |= BTH_SE_MASK;
112*4882a593Smuzhiyun else
113*4882a593Smuzhiyun bth->flags &= ~BTH_SE_MASK;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
__bth_mig(void * arg)116*4882a593Smuzhiyun static inline u8 __bth_mig(void *arg)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun struct rxe_bth *bth = arg;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun return 0 != (BTH_MIG_MASK & bth->flags);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
__bth_set_mig(void * arg,u8 mig)123*4882a593Smuzhiyun static inline void __bth_set_mig(void *arg, u8 mig)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun struct rxe_bth *bth = arg;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun if (mig)
128*4882a593Smuzhiyun bth->flags |= BTH_MIG_MASK;
129*4882a593Smuzhiyun else
130*4882a593Smuzhiyun bth->flags &= ~BTH_MIG_MASK;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun
__bth_pad(void * arg)133*4882a593Smuzhiyun static inline u8 __bth_pad(void *arg)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun struct rxe_bth *bth = arg;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun return (BTH_PAD_MASK & bth->flags) >> 4;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
__bth_set_pad(void * arg,u8 pad)140*4882a593Smuzhiyun static inline void __bth_set_pad(void *arg, u8 pad)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun struct rxe_bth *bth = arg;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun bth->flags = (BTH_PAD_MASK & (pad << 4)) |
145*4882a593Smuzhiyun (~BTH_PAD_MASK & bth->flags);
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun
__bth_tver(void * arg)148*4882a593Smuzhiyun static inline u8 __bth_tver(void *arg)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun struct rxe_bth *bth = arg;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun return BTH_TVER_MASK & bth->flags;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
__bth_set_tver(void * arg,u8 tver)155*4882a593Smuzhiyun static inline void __bth_set_tver(void *arg, u8 tver)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun struct rxe_bth *bth = arg;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun bth->flags = (BTH_TVER_MASK & tver) |
160*4882a593Smuzhiyun (~BTH_TVER_MASK & bth->flags);
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
__bth_pkey(void * arg)163*4882a593Smuzhiyun static inline u16 __bth_pkey(void *arg)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun struct rxe_bth *bth = arg;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun return be16_to_cpu(bth->pkey);
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
__bth_set_pkey(void * arg,u16 pkey)170*4882a593Smuzhiyun static inline void __bth_set_pkey(void *arg, u16 pkey)
171*4882a593Smuzhiyun {
172*4882a593Smuzhiyun struct rxe_bth *bth = arg;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun bth->pkey = cpu_to_be16(pkey);
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun
__bth_qpn(void * arg)177*4882a593Smuzhiyun static inline u32 __bth_qpn(void *arg)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun struct rxe_bth *bth = arg;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun return BTH_QPN_MASK & be32_to_cpu(bth->qpn);
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun
__bth_set_qpn(void * arg,u32 qpn)184*4882a593Smuzhiyun static inline void __bth_set_qpn(void *arg, u32 qpn)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun struct rxe_bth *bth = arg;
187*4882a593Smuzhiyun u32 resvqpn = be32_to_cpu(bth->qpn);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun bth->qpn = cpu_to_be32((BTH_QPN_MASK & qpn) |
190*4882a593Smuzhiyun (~BTH_QPN_MASK & resvqpn));
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
__bth_fecn(void * arg)193*4882a593Smuzhiyun static inline int __bth_fecn(void *arg)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun struct rxe_bth *bth = arg;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun return 0 != (cpu_to_be32(BTH_FECN_MASK) & bth->qpn);
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun
__bth_set_fecn(void * arg,int fecn)200*4882a593Smuzhiyun static inline void __bth_set_fecn(void *arg, int fecn)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun struct rxe_bth *bth = arg;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun if (fecn)
205*4882a593Smuzhiyun bth->qpn |= cpu_to_be32(BTH_FECN_MASK);
206*4882a593Smuzhiyun else
207*4882a593Smuzhiyun bth->qpn &= ~cpu_to_be32(BTH_FECN_MASK);
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun
__bth_becn(void * arg)210*4882a593Smuzhiyun static inline int __bth_becn(void *arg)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun struct rxe_bth *bth = arg;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun return 0 != (cpu_to_be32(BTH_BECN_MASK) & bth->qpn);
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
__bth_set_becn(void * arg,int becn)217*4882a593Smuzhiyun static inline void __bth_set_becn(void *arg, int becn)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun struct rxe_bth *bth = arg;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun if (becn)
222*4882a593Smuzhiyun bth->qpn |= cpu_to_be32(BTH_BECN_MASK);
223*4882a593Smuzhiyun else
224*4882a593Smuzhiyun bth->qpn &= ~cpu_to_be32(BTH_BECN_MASK);
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
__bth_resv6a(void * arg)227*4882a593Smuzhiyun static inline u8 __bth_resv6a(void *arg)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun struct rxe_bth *bth = arg;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun return (BTH_RESV6A_MASK & be32_to_cpu(bth->qpn)) >> 24;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun
__bth_set_resv6a(void * arg)234*4882a593Smuzhiyun static inline void __bth_set_resv6a(void *arg)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun struct rxe_bth *bth = arg;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun bth->qpn = cpu_to_be32(~BTH_RESV6A_MASK);
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
__bth_ack(void * arg)241*4882a593Smuzhiyun static inline int __bth_ack(void *arg)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun struct rxe_bth *bth = arg;
244*4882a593Smuzhiyun
245*4882a593Smuzhiyun return 0 != (cpu_to_be32(BTH_ACK_MASK) & bth->apsn);
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun
__bth_set_ack(void * arg,int ack)248*4882a593Smuzhiyun static inline void __bth_set_ack(void *arg, int ack)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun struct rxe_bth *bth = arg;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun if (ack)
253*4882a593Smuzhiyun bth->apsn |= cpu_to_be32(BTH_ACK_MASK);
254*4882a593Smuzhiyun else
255*4882a593Smuzhiyun bth->apsn &= ~cpu_to_be32(BTH_ACK_MASK);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
__bth_set_resv7(void * arg)258*4882a593Smuzhiyun static inline void __bth_set_resv7(void *arg)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun struct rxe_bth *bth = arg;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun bth->apsn &= ~cpu_to_be32(BTH_RESV7_MASK);
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
__bth_psn(void * arg)265*4882a593Smuzhiyun static inline u32 __bth_psn(void *arg)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun struct rxe_bth *bth = arg;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun return BTH_PSN_MASK & be32_to_cpu(bth->apsn);
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
__bth_set_psn(void * arg,u32 psn)272*4882a593Smuzhiyun static inline void __bth_set_psn(void *arg, u32 psn)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun struct rxe_bth *bth = arg;
275*4882a593Smuzhiyun u32 apsn = be32_to_cpu(bth->apsn);
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun bth->apsn = cpu_to_be32((BTH_PSN_MASK & psn) |
278*4882a593Smuzhiyun (~BTH_PSN_MASK & apsn));
279*4882a593Smuzhiyun }
280*4882a593Smuzhiyun
bth_opcode(struct rxe_pkt_info * pkt)281*4882a593Smuzhiyun static inline u8 bth_opcode(struct rxe_pkt_info *pkt)
282*4882a593Smuzhiyun {
283*4882a593Smuzhiyun return __bth_opcode(pkt->hdr + pkt->offset);
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
bth_set_opcode(struct rxe_pkt_info * pkt,u8 opcode)286*4882a593Smuzhiyun static inline void bth_set_opcode(struct rxe_pkt_info *pkt, u8 opcode)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun __bth_set_opcode(pkt->hdr + pkt->offset, opcode);
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
bth_se(struct rxe_pkt_info * pkt)291*4882a593Smuzhiyun static inline u8 bth_se(struct rxe_pkt_info *pkt)
292*4882a593Smuzhiyun {
293*4882a593Smuzhiyun return __bth_se(pkt->hdr + pkt->offset);
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
bth_set_se(struct rxe_pkt_info * pkt,int se)296*4882a593Smuzhiyun static inline void bth_set_se(struct rxe_pkt_info *pkt, int se)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun __bth_set_se(pkt->hdr + pkt->offset, se);
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
bth_mig(struct rxe_pkt_info * pkt)301*4882a593Smuzhiyun static inline u8 bth_mig(struct rxe_pkt_info *pkt)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun return __bth_mig(pkt->hdr + pkt->offset);
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun
bth_set_mig(struct rxe_pkt_info * pkt,u8 mig)306*4882a593Smuzhiyun static inline void bth_set_mig(struct rxe_pkt_info *pkt, u8 mig)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun __bth_set_mig(pkt->hdr + pkt->offset, mig);
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
bth_pad(struct rxe_pkt_info * pkt)311*4882a593Smuzhiyun static inline u8 bth_pad(struct rxe_pkt_info *pkt)
312*4882a593Smuzhiyun {
313*4882a593Smuzhiyun return __bth_pad(pkt->hdr + pkt->offset);
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
bth_set_pad(struct rxe_pkt_info * pkt,u8 pad)316*4882a593Smuzhiyun static inline void bth_set_pad(struct rxe_pkt_info *pkt, u8 pad)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun __bth_set_pad(pkt->hdr + pkt->offset, pad);
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
bth_tver(struct rxe_pkt_info * pkt)321*4882a593Smuzhiyun static inline u8 bth_tver(struct rxe_pkt_info *pkt)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun return __bth_tver(pkt->hdr + pkt->offset);
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
bth_set_tver(struct rxe_pkt_info * pkt,u8 tver)326*4882a593Smuzhiyun static inline void bth_set_tver(struct rxe_pkt_info *pkt, u8 tver)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun __bth_set_tver(pkt->hdr + pkt->offset, tver);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun
bth_pkey(struct rxe_pkt_info * pkt)331*4882a593Smuzhiyun static inline u16 bth_pkey(struct rxe_pkt_info *pkt)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun return __bth_pkey(pkt->hdr + pkt->offset);
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
bth_set_pkey(struct rxe_pkt_info * pkt,u16 pkey)336*4882a593Smuzhiyun static inline void bth_set_pkey(struct rxe_pkt_info *pkt, u16 pkey)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun __bth_set_pkey(pkt->hdr + pkt->offset, pkey);
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
bth_qpn(struct rxe_pkt_info * pkt)341*4882a593Smuzhiyun static inline u32 bth_qpn(struct rxe_pkt_info *pkt)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun return __bth_qpn(pkt->hdr + pkt->offset);
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun
bth_set_qpn(struct rxe_pkt_info * pkt,u32 qpn)346*4882a593Smuzhiyun static inline void bth_set_qpn(struct rxe_pkt_info *pkt, u32 qpn)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun __bth_set_qpn(pkt->hdr + pkt->offset, qpn);
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
bth_fecn(struct rxe_pkt_info * pkt)351*4882a593Smuzhiyun static inline int bth_fecn(struct rxe_pkt_info *pkt)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun return __bth_fecn(pkt->hdr + pkt->offset);
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
bth_set_fecn(struct rxe_pkt_info * pkt,int fecn)356*4882a593Smuzhiyun static inline void bth_set_fecn(struct rxe_pkt_info *pkt, int fecn)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun __bth_set_fecn(pkt->hdr + pkt->offset, fecn);
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
bth_becn(struct rxe_pkt_info * pkt)361*4882a593Smuzhiyun static inline int bth_becn(struct rxe_pkt_info *pkt)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun return __bth_becn(pkt->hdr + pkt->offset);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
bth_set_becn(struct rxe_pkt_info * pkt,int becn)366*4882a593Smuzhiyun static inline void bth_set_becn(struct rxe_pkt_info *pkt, int becn)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun __bth_set_becn(pkt->hdr + pkt->offset, becn);
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
bth_resv6a(struct rxe_pkt_info * pkt)371*4882a593Smuzhiyun static inline u8 bth_resv6a(struct rxe_pkt_info *pkt)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun return __bth_resv6a(pkt->hdr + pkt->offset);
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
bth_set_resv6a(struct rxe_pkt_info * pkt)376*4882a593Smuzhiyun static inline void bth_set_resv6a(struct rxe_pkt_info *pkt)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun __bth_set_resv6a(pkt->hdr + pkt->offset);
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
bth_ack(struct rxe_pkt_info * pkt)381*4882a593Smuzhiyun static inline int bth_ack(struct rxe_pkt_info *pkt)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun return __bth_ack(pkt->hdr + pkt->offset);
384*4882a593Smuzhiyun }
385*4882a593Smuzhiyun
bth_set_ack(struct rxe_pkt_info * pkt,int ack)386*4882a593Smuzhiyun static inline void bth_set_ack(struct rxe_pkt_info *pkt, int ack)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun __bth_set_ack(pkt->hdr + pkt->offset, ack);
389*4882a593Smuzhiyun }
390*4882a593Smuzhiyun
bth_set_resv7(struct rxe_pkt_info * pkt)391*4882a593Smuzhiyun static inline void bth_set_resv7(struct rxe_pkt_info *pkt)
392*4882a593Smuzhiyun {
393*4882a593Smuzhiyun __bth_set_resv7(pkt->hdr + pkt->offset);
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun
bth_psn(struct rxe_pkt_info * pkt)396*4882a593Smuzhiyun static inline u32 bth_psn(struct rxe_pkt_info *pkt)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun return __bth_psn(pkt->hdr + pkt->offset);
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun
bth_set_psn(struct rxe_pkt_info * pkt,u32 psn)401*4882a593Smuzhiyun static inline void bth_set_psn(struct rxe_pkt_info *pkt, u32 psn)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun __bth_set_psn(pkt->hdr + pkt->offset, psn);
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
bth_init(struct rxe_pkt_info * pkt,u8 opcode,int se,int mig,int pad,u16 pkey,u32 qpn,int ack_req,u32 psn)406*4882a593Smuzhiyun static inline void bth_init(struct rxe_pkt_info *pkt, u8 opcode, int se,
407*4882a593Smuzhiyun int mig, int pad, u16 pkey, u32 qpn, int ack_req,
408*4882a593Smuzhiyun u32 psn)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun struct rxe_bth *bth = (struct rxe_bth *)(pkt->hdr + pkt->offset);
411*4882a593Smuzhiyun
412*4882a593Smuzhiyun bth->opcode = opcode;
413*4882a593Smuzhiyun bth->flags = (pad << 4) & BTH_PAD_MASK;
414*4882a593Smuzhiyun if (se)
415*4882a593Smuzhiyun bth->flags |= BTH_SE_MASK;
416*4882a593Smuzhiyun if (mig)
417*4882a593Smuzhiyun bth->flags |= BTH_MIG_MASK;
418*4882a593Smuzhiyun bth->pkey = cpu_to_be16(pkey);
419*4882a593Smuzhiyun bth->qpn = cpu_to_be32(qpn & BTH_QPN_MASK);
420*4882a593Smuzhiyun psn &= BTH_PSN_MASK;
421*4882a593Smuzhiyun if (ack_req)
422*4882a593Smuzhiyun psn |= BTH_ACK_MASK;
423*4882a593Smuzhiyun bth->apsn = cpu_to_be32(psn);
424*4882a593Smuzhiyun }
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun /******************************************************************************
427*4882a593Smuzhiyun * Reliable Datagram Extended Transport Header
428*4882a593Smuzhiyun ******************************************************************************/
429*4882a593Smuzhiyun struct rxe_rdeth {
430*4882a593Smuzhiyun __be32 een;
431*4882a593Smuzhiyun };
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun #define RDETH_EEN_MASK (0x00ffffff)
434*4882a593Smuzhiyun
__rdeth_een(void * arg)435*4882a593Smuzhiyun static inline u8 __rdeth_een(void *arg)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun struct rxe_rdeth *rdeth = arg;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun return RDETH_EEN_MASK & be32_to_cpu(rdeth->een);
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun
__rdeth_set_een(void * arg,u32 een)442*4882a593Smuzhiyun static inline void __rdeth_set_een(void *arg, u32 een)
443*4882a593Smuzhiyun {
444*4882a593Smuzhiyun struct rxe_rdeth *rdeth = arg;
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun rdeth->een = cpu_to_be32(RDETH_EEN_MASK & een);
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun
rdeth_een(struct rxe_pkt_info * pkt)449*4882a593Smuzhiyun static inline u8 rdeth_een(struct rxe_pkt_info *pkt)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun return __rdeth_een(pkt->hdr + pkt->offset
452*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_RDETH]);
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
rdeth_set_een(struct rxe_pkt_info * pkt,u32 een)455*4882a593Smuzhiyun static inline void rdeth_set_een(struct rxe_pkt_info *pkt, u32 een)
456*4882a593Smuzhiyun {
457*4882a593Smuzhiyun __rdeth_set_een(pkt->hdr + pkt->offset
458*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_RDETH], een);
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun /******************************************************************************
462*4882a593Smuzhiyun * Datagram Extended Transport Header
463*4882a593Smuzhiyun ******************************************************************************/
464*4882a593Smuzhiyun struct rxe_deth {
465*4882a593Smuzhiyun __be32 qkey;
466*4882a593Smuzhiyun __be32 sqp;
467*4882a593Smuzhiyun };
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun #define GSI_QKEY (0x80010000)
470*4882a593Smuzhiyun #define DETH_SQP_MASK (0x00ffffff)
471*4882a593Smuzhiyun
__deth_qkey(void * arg)472*4882a593Smuzhiyun static inline u32 __deth_qkey(void *arg)
473*4882a593Smuzhiyun {
474*4882a593Smuzhiyun struct rxe_deth *deth = arg;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun return be32_to_cpu(deth->qkey);
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
__deth_set_qkey(void * arg,u32 qkey)479*4882a593Smuzhiyun static inline void __deth_set_qkey(void *arg, u32 qkey)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun struct rxe_deth *deth = arg;
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun deth->qkey = cpu_to_be32(qkey);
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
__deth_sqp(void * arg)486*4882a593Smuzhiyun static inline u32 __deth_sqp(void *arg)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun struct rxe_deth *deth = arg;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun return DETH_SQP_MASK & be32_to_cpu(deth->sqp);
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun
__deth_set_sqp(void * arg,u32 sqp)493*4882a593Smuzhiyun static inline void __deth_set_sqp(void *arg, u32 sqp)
494*4882a593Smuzhiyun {
495*4882a593Smuzhiyun struct rxe_deth *deth = arg;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun deth->sqp = cpu_to_be32(DETH_SQP_MASK & sqp);
498*4882a593Smuzhiyun }
499*4882a593Smuzhiyun
deth_qkey(struct rxe_pkt_info * pkt)500*4882a593Smuzhiyun static inline u32 deth_qkey(struct rxe_pkt_info *pkt)
501*4882a593Smuzhiyun {
502*4882a593Smuzhiyun return __deth_qkey(pkt->hdr + pkt->offset
503*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_DETH]);
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
deth_set_qkey(struct rxe_pkt_info * pkt,u32 qkey)506*4882a593Smuzhiyun static inline void deth_set_qkey(struct rxe_pkt_info *pkt, u32 qkey)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun __deth_set_qkey(pkt->hdr + pkt->offset
509*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_DETH], qkey);
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
deth_sqp(struct rxe_pkt_info * pkt)512*4882a593Smuzhiyun static inline u32 deth_sqp(struct rxe_pkt_info *pkt)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun return __deth_sqp(pkt->hdr + pkt->offset
515*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_DETH]);
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun
deth_set_sqp(struct rxe_pkt_info * pkt,u32 sqp)518*4882a593Smuzhiyun static inline void deth_set_sqp(struct rxe_pkt_info *pkt, u32 sqp)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun __deth_set_sqp(pkt->hdr + pkt->offset
521*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_DETH], sqp);
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun /******************************************************************************
525*4882a593Smuzhiyun * RDMA Extended Transport Header
526*4882a593Smuzhiyun ******************************************************************************/
527*4882a593Smuzhiyun struct rxe_reth {
528*4882a593Smuzhiyun __be64 va;
529*4882a593Smuzhiyun __be32 rkey;
530*4882a593Smuzhiyun __be32 len;
531*4882a593Smuzhiyun };
532*4882a593Smuzhiyun
__reth_va(void * arg)533*4882a593Smuzhiyun static inline u64 __reth_va(void *arg)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun struct rxe_reth *reth = arg;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun return be64_to_cpu(reth->va);
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun
__reth_set_va(void * arg,u64 va)540*4882a593Smuzhiyun static inline void __reth_set_va(void *arg, u64 va)
541*4882a593Smuzhiyun {
542*4882a593Smuzhiyun struct rxe_reth *reth = arg;
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun reth->va = cpu_to_be64(va);
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun
__reth_rkey(void * arg)547*4882a593Smuzhiyun static inline u32 __reth_rkey(void *arg)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun struct rxe_reth *reth = arg;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun return be32_to_cpu(reth->rkey);
552*4882a593Smuzhiyun }
553*4882a593Smuzhiyun
__reth_set_rkey(void * arg,u32 rkey)554*4882a593Smuzhiyun static inline void __reth_set_rkey(void *arg, u32 rkey)
555*4882a593Smuzhiyun {
556*4882a593Smuzhiyun struct rxe_reth *reth = arg;
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun reth->rkey = cpu_to_be32(rkey);
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
__reth_len(void * arg)561*4882a593Smuzhiyun static inline u32 __reth_len(void *arg)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun struct rxe_reth *reth = arg;
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun return be32_to_cpu(reth->len);
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun
__reth_set_len(void * arg,u32 len)568*4882a593Smuzhiyun static inline void __reth_set_len(void *arg, u32 len)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun struct rxe_reth *reth = arg;
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun reth->len = cpu_to_be32(len);
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun
reth_va(struct rxe_pkt_info * pkt)575*4882a593Smuzhiyun static inline u64 reth_va(struct rxe_pkt_info *pkt)
576*4882a593Smuzhiyun {
577*4882a593Smuzhiyun return __reth_va(pkt->hdr + pkt->offset
578*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_RETH]);
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun
reth_set_va(struct rxe_pkt_info * pkt,u64 va)581*4882a593Smuzhiyun static inline void reth_set_va(struct rxe_pkt_info *pkt, u64 va)
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun __reth_set_va(pkt->hdr + pkt->offset
584*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_RETH], va);
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun
reth_rkey(struct rxe_pkt_info * pkt)587*4882a593Smuzhiyun static inline u32 reth_rkey(struct rxe_pkt_info *pkt)
588*4882a593Smuzhiyun {
589*4882a593Smuzhiyun return __reth_rkey(pkt->hdr + pkt->offset
590*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_RETH]);
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun
reth_set_rkey(struct rxe_pkt_info * pkt,u32 rkey)593*4882a593Smuzhiyun static inline void reth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
594*4882a593Smuzhiyun {
595*4882a593Smuzhiyun __reth_set_rkey(pkt->hdr + pkt->offset
596*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_RETH], rkey);
597*4882a593Smuzhiyun }
598*4882a593Smuzhiyun
reth_len(struct rxe_pkt_info * pkt)599*4882a593Smuzhiyun static inline u32 reth_len(struct rxe_pkt_info *pkt)
600*4882a593Smuzhiyun {
601*4882a593Smuzhiyun return __reth_len(pkt->hdr + pkt->offset
602*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_RETH]);
603*4882a593Smuzhiyun }
604*4882a593Smuzhiyun
reth_set_len(struct rxe_pkt_info * pkt,u32 len)605*4882a593Smuzhiyun static inline void reth_set_len(struct rxe_pkt_info *pkt, u32 len)
606*4882a593Smuzhiyun {
607*4882a593Smuzhiyun __reth_set_len(pkt->hdr + pkt->offset
608*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_RETH], len);
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun /******************************************************************************
612*4882a593Smuzhiyun * Atomic Extended Transport Header
613*4882a593Smuzhiyun ******************************************************************************/
614*4882a593Smuzhiyun struct rxe_atmeth {
615*4882a593Smuzhiyun __be64 va;
616*4882a593Smuzhiyun __be32 rkey;
617*4882a593Smuzhiyun __be64 swap_add;
618*4882a593Smuzhiyun __be64 comp;
619*4882a593Smuzhiyun } __packed;
620*4882a593Smuzhiyun
__atmeth_va(void * arg)621*4882a593Smuzhiyun static inline u64 __atmeth_va(void *arg)
622*4882a593Smuzhiyun {
623*4882a593Smuzhiyun struct rxe_atmeth *atmeth = arg;
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun return be64_to_cpu(atmeth->va);
626*4882a593Smuzhiyun }
627*4882a593Smuzhiyun
__atmeth_set_va(void * arg,u64 va)628*4882a593Smuzhiyun static inline void __atmeth_set_va(void *arg, u64 va)
629*4882a593Smuzhiyun {
630*4882a593Smuzhiyun struct rxe_atmeth *atmeth = arg;
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun atmeth->va = cpu_to_be64(va);
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun
__atmeth_rkey(void * arg)635*4882a593Smuzhiyun static inline u32 __atmeth_rkey(void *arg)
636*4882a593Smuzhiyun {
637*4882a593Smuzhiyun struct rxe_atmeth *atmeth = arg;
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun return be32_to_cpu(atmeth->rkey);
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun
__atmeth_set_rkey(void * arg,u32 rkey)642*4882a593Smuzhiyun static inline void __atmeth_set_rkey(void *arg, u32 rkey)
643*4882a593Smuzhiyun {
644*4882a593Smuzhiyun struct rxe_atmeth *atmeth = arg;
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun atmeth->rkey = cpu_to_be32(rkey);
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun
__atmeth_swap_add(void * arg)649*4882a593Smuzhiyun static inline u64 __atmeth_swap_add(void *arg)
650*4882a593Smuzhiyun {
651*4882a593Smuzhiyun struct rxe_atmeth *atmeth = arg;
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun return be64_to_cpu(atmeth->swap_add);
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun
__atmeth_set_swap_add(void * arg,u64 swap_add)656*4882a593Smuzhiyun static inline void __atmeth_set_swap_add(void *arg, u64 swap_add)
657*4882a593Smuzhiyun {
658*4882a593Smuzhiyun struct rxe_atmeth *atmeth = arg;
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun atmeth->swap_add = cpu_to_be64(swap_add);
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun
__atmeth_comp(void * arg)663*4882a593Smuzhiyun static inline u64 __atmeth_comp(void *arg)
664*4882a593Smuzhiyun {
665*4882a593Smuzhiyun struct rxe_atmeth *atmeth = arg;
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun return be64_to_cpu(atmeth->comp);
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun
__atmeth_set_comp(void * arg,u64 comp)670*4882a593Smuzhiyun static inline void __atmeth_set_comp(void *arg, u64 comp)
671*4882a593Smuzhiyun {
672*4882a593Smuzhiyun struct rxe_atmeth *atmeth = arg;
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun atmeth->comp = cpu_to_be64(comp);
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun
atmeth_va(struct rxe_pkt_info * pkt)677*4882a593Smuzhiyun static inline u64 atmeth_va(struct rxe_pkt_info *pkt)
678*4882a593Smuzhiyun {
679*4882a593Smuzhiyun return __atmeth_va(pkt->hdr + pkt->offset
680*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun
atmeth_set_va(struct rxe_pkt_info * pkt,u64 va)683*4882a593Smuzhiyun static inline void atmeth_set_va(struct rxe_pkt_info *pkt, u64 va)
684*4882a593Smuzhiyun {
685*4882a593Smuzhiyun __atmeth_set_va(pkt->hdr + pkt->offset
686*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], va);
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun
atmeth_rkey(struct rxe_pkt_info * pkt)689*4882a593Smuzhiyun static inline u32 atmeth_rkey(struct rxe_pkt_info *pkt)
690*4882a593Smuzhiyun {
691*4882a593Smuzhiyun return __atmeth_rkey(pkt->hdr + pkt->offset
692*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun
atmeth_set_rkey(struct rxe_pkt_info * pkt,u32 rkey)695*4882a593Smuzhiyun static inline void atmeth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
696*4882a593Smuzhiyun {
697*4882a593Smuzhiyun __atmeth_set_rkey(pkt->hdr + pkt->offset
698*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], rkey);
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun
atmeth_swap_add(struct rxe_pkt_info * pkt)701*4882a593Smuzhiyun static inline u64 atmeth_swap_add(struct rxe_pkt_info *pkt)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun return __atmeth_swap_add(pkt->hdr + pkt->offset
704*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun
atmeth_set_swap_add(struct rxe_pkt_info * pkt,u64 swap_add)707*4882a593Smuzhiyun static inline void atmeth_set_swap_add(struct rxe_pkt_info *pkt, u64 swap_add)
708*4882a593Smuzhiyun {
709*4882a593Smuzhiyun __atmeth_set_swap_add(pkt->hdr + pkt->offset
710*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], swap_add);
711*4882a593Smuzhiyun }
712*4882a593Smuzhiyun
atmeth_comp(struct rxe_pkt_info * pkt)713*4882a593Smuzhiyun static inline u64 atmeth_comp(struct rxe_pkt_info *pkt)
714*4882a593Smuzhiyun {
715*4882a593Smuzhiyun return __atmeth_comp(pkt->hdr + pkt->offset
716*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_ATMETH]);
717*4882a593Smuzhiyun }
718*4882a593Smuzhiyun
atmeth_set_comp(struct rxe_pkt_info * pkt,u64 comp)719*4882a593Smuzhiyun static inline void atmeth_set_comp(struct rxe_pkt_info *pkt, u64 comp)
720*4882a593Smuzhiyun {
721*4882a593Smuzhiyun __atmeth_set_comp(pkt->hdr + pkt->offset
722*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_ATMETH], comp);
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun /******************************************************************************
726*4882a593Smuzhiyun * Ack Extended Transport Header
727*4882a593Smuzhiyun ******************************************************************************/
728*4882a593Smuzhiyun struct rxe_aeth {
729*4882a593Smuzhiyun __be32 smsn;
730*4882a593Smuzhiyun };
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun #define AETH_SYN_MASK (0xff000000)
733*4882a593Smuzhiyun #define AETH_MSN_MASK (0x00ffffff)
734*4882a593Smuzhiyun
735*4882a593Smuzhiyun enum aeth_syndrome {
736*4882a593Smuzhiyun AETH_TYPE_MASK = 0xe0,
737*4882a593Smuzhiyun AETH_ACK = 0x00,
738*4882a593Smuzhiyun AETH_RNR_NAK = 0x20,
739*4882a593Smuzhiyun AETH_RSVD = 0x40,
740*4882a593Smuzhiyun AETH_NAK = 0x60,
741*4882a593Smuzhiyun AETH_ACK_UNLIMITED = 0x1f,
742*4882a593Smuzhiyun AETH_NAK_PSN_SEQ_ERROR = 0x60,
743*4882a593Smuzhiyun AETH_NAK_INVALID_REQ = 0x61,
744*4882a593Smuzhiyun AETH_NAK_REM_ACC_ERR = 0x62,
745*4882a593Smuzhiyun AETH_NAK_REM_OP_ERR = 0x63,
746*4882a593Smuzhiyun AETH_NAK_INV_RD_REQ = 0x64,
747*4882a593Smuzhiyun };
748*4882a593Smuzhiyun
__aeth_syn(void * arg)749*4882a593Smuzhiyun static inline u8 __aeth_syn(void *arg)
750*4882a593Smuzhiyun {
751*4882a593Smuzhiyun struct rxe_aeth *aeth = arg;
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun return (AETH_SYN_MASK & be32_to_cpu(aeth->smsn)) >> 24;
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun
__aeth_set_syn(void * arg,u8 syn)756*4882a593Smuzhiyun static inline void __aeth_set_syn(void *arg, u8 syn)
757*4882a593Smuzhiyun {
758*4882a593Smuzhiyun struct rxe_aeth *aeth = arg;
759*4882a593Smuzhiyun u32 smsn = be32_to_cpu(aeth->smsn);
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun aeth->smsn = cpu_to_be32((AETH_SYN_MASK & (syn << 24)) |
762*4882a593Smuzhiyun (~AETH_SYN_MASK & smsn));
763*4882a593Smuzhiyun }
764*4882a593Smuzhiyun
__aeth_msn(void * arg)765*4882a593Smuzhiyun static inline u32 __aeth_msn(void *arg)
766*4882a593Smuzhiyun {
767*4882a593Smuzhiyun struct rxe_aeth *aeth = arg;
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun return AETH_MSN_MASK & be32_to_cpu(aeth->smsn);
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun
__aeth_set_msn(void * arg,u32 msn)772*4882a593Smuzhiyun static inline void __aeth_set_msn(void *arg, u32 msn)
773*4882a593Smuzhiyun {
774*4882a593Smuzhiyun struct rxe_aeth *aeth = arg;
775*4882a593Smuzhiyun u32 smsn = be32_to_cpu(aeth->smsn);
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun aeth->smsn = cpu_to_be32((AETH_MSN_MASK & msn) |
778*4882a593Smuzhiyun (~AETH_MSN_MASK & smsn));
779*4882a593Smuzhiyun }
780*4882a593Smuzhiyun
aeth_syn(struct rxe_pkt_info * pkt)781*4882a593Smuzhiyun static inline u8 aeth_syn(struct rxe_pkt_info *pkt)
782*4882a593Smuzhiyun {
783*4882a593Smuzhiyun return __aeth_syn(pkt->hdr + pkt->offset
784*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_AETH]);
785*4882a593Smuzhiyun }
786*4882a593Smuzhiyun
aeth_set_syn(struct rxe_pkt_info * pkt,u8 syn)787*4882a593Smuzhiyun static inline void aeth_set_syn(struct rxe_pkt_info *pkt, u8 syn)
788*4882a593Smuzhiyun {
789*4882a593Smuzhiyun __aeth_set_syn(pkt->hdr + pkt->offset
790*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_AETH], syn);
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun
aeth_msn(struct rxe_pkt_info * pkt)793*4882a593Smuzhiyun static inline u32 aeth_msn(struct rxe_pkt_info *pkt)
794*4882a593Smuzhiyun {
795*4882a593Smuzhiyun return __aeth_msn(pkt->hdr + pkt->offset
796*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_AETH]);
797*4882a593Smuzhiyun }
798*4882a593Smuzhiyun
aeth_set_msn(struct rxe_pkt_info * pkt,u32 msn)799*4882a593Smuzhiyun static inline void aeth_set_msn(struct rxe_pkt_info *pkt, u32 msn)
800*4882a593Smuzhiyun {
801*4882a593Smuzhiyun __aeth_set_msn(pkt->hdr + pkt->offset
802*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_AETH], msn);
803*4882a593Smuzhiyun }
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun /******************************************************************************
806*4882a593Smuzhiyun * Atomic Ack Extended Transport Header
807*4882a593Smuzhiyun ******************************************************************************/
808*4882a593Smuzhiyun struct rxe_atmack {
809*4882a593Smuzhiyun __be64 orig;
810*4882a593Smuzhiyun };
811*4882a593Smuzhiyun
__atmack_orig(void * arg)812*4882a593Smuzhiyun static inline u64 __atmack_orig(void *arg)
813*4882a593Smuzhiyun {
814*4882a593Smuzhiyun struct rxe_atmack *atmack = arg;
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun return be64_to_cpu(atmack->orig);
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun
__atmack_set_orig(void * arg,u64 orig)819*4882a593Smuzhiyun static inline void __atmack_set_orig(void *arg, u64 orig)
820*4882a593Smuzhiyun {
821*4882a593Smuzhiyun struct rxe_atmack *atmack = arg;
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun atmack->orig = cpu_to_be64(orig);
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun
atmack_orig(struct rxe_pkt_info * pkt)826*4882a593Smuzhiyun static inline u64 atmack_orig(struct rxe_pkt_info *pkt)
827*4882a593Smuzhiyun {
828*4882a593Smuzhiyun return __atmack_orig(pkt->hdr + pkt->offset
829*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_ATMACK]);
830*4882a593Smuzhiyun }
831*4882a593Smuzhiyun
atmack_set_orig(struct rxe_pkt_info * pkt,u64 orig)832*4882a593Smuzhiyun static inline void atmack_set_orig(struct rxe_pkt_info *pkt, u64 orig)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun __atmack_set_orig(pkt->hdr + pkt->offset
835*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_ATMACK], orig);
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun /******************************************************************************
839*4882a593Smuzhiyun * Immediate Extended Transport Header
840*4882a593Smuzhiyun ******************************************************************************/
841*4882a593Smuzhiyun struct rxe_immdt {
842*4882a593Smuzhiyun __be32 imm;
843*4882a593Smuzhiyun };
844*4882a593Smuzhiyun
__immdt_imm(void * arg)845*4882a593Smuzhiyun static inline __be32 __immdt_imm(void *arg)
846*4882a593Smuzhiyun {
847*4882a593Smuzhiyun struct rxe_immdt *immdt = arg;
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun return immdt->imm;
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun
__immdt_set_imm(void * arg,__be32 imm)852*4882a593Smuzhiyun static inline void __immdt_set_imm(void *arg, __be32 imm)
853*4882a593Smuzhiyun {
854*4882a593Smuzhiyun struct rxe_immdt *immdt = arg;
855*4882a593Smuzhiyun
856*4882a593Smuzhiyun immdt->imm = imm;
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun
immdt_imm(struct rxe_pkt_info * pkt)859*4882a593Smuzhiyun static inline __be32 immdt_imm(struct rxe_pkt_info *pkt)
860*4882a593Smuzhiyun {
861*4882a593Smuzhiyun return __immdt_imm(pkt->hdr + pkt->offset
862*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_IMMDT]);
863*4882a593Smuzhiyun }
864*4882a593Smuzhiyun
immdt_set_imm(struct rxe_pkt_info * pkt,__be32 imm)865*4882a593Smuzhiyun static inline void immdt_set_imm(struct rxe_pkt_info *pkt, __be32 imm)
866*4882a593Smuzhiyun {
867*4882a593Smuzhiyun __immdt_set_imm(pkt->hdr + pkt->offset
868*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_IMMDT], imm);
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun /******************************************************************************
872*4882a593Smuzhiyun * Invalidate Extended Transport Header
873*4882a593Smuzhiyun ******************************************************************************/
874*4882a593Smuzhiyun struct rxe_ieth {
875*4882a593Smuzhiyun __be32 rkey;
876*4882a593Smuzhiyun };
877*4882a593Smuzhiyun
__ieth_rkey(void * arg)878*4882a593Smuzhiyun static inline u32 __ieth_rkey(void *arg)
879*4882a593Smuzhiyun {
880*4882a593Smuzhiyun struct rxe_ieth *ieth = arg;
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun return be32_to_cpu(ieth->rkey);
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun
__ieth_set_rkey(void * arg,u32 rkey)885*4882a593Smuzhiyun static inline void __ieth_set_rkey(void *arg, u32 rkey)
886*4882a593Smuzhiyun {
887*4882a593Smuzhiyun struct rxe_ieth *ieth = arg;
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun ieth->rkey = cpu_to_be32(rkey);
890*4882a593Smuzhiyun }
891*4882a593Smuzhiyun
ieth_rkey(struct rxe_pkt_info * pkt)892*4882a593Smuzhiyun static inline u32 ieth_rkey(struct rxe_pkt_info *pkt)
893*4882a593Smuzhiyun {
894*4882a593Smuzhiyun return __ieth_rkey(pkt->hdr + pkt->offset
895*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_IETH]);
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun
ieth_set_rkey(struct rxe_pkt_info * pkt,u32 rkey)898*4882a593Smuzhiyun static inline void ieth_set_rkey(struct rxe_pkt_info *pkt, u32 rkey)
899*4882a593Smuzhiyun {
900*4882a593Smuzhiyun __ieth_set_rkey(pkt->hdr + pkt->offset
901*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_IETH], rkey);
902*4882a593Smuzhiyun }
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun enum rxe_hdr_length {
905*4882a593Smuzhiyun RXE_BTH_BYTES = sizeof(struct rxe_bth),
906*4882a593Smuzhiyun RXE_DETH_BYTES = sizeof(struct rxe_deth),
907*4882a593Smuzhiyun RXE_IMMDT_BYTES = sizeof(struct rxe_immdt),
908*4882a593Smuzhiyun RXE_RETH_BYTES = sizeof(struct rxe_reth),
909*4882a593Smuzhiyun RXE_AETH_BYTES = sizeof(struct rxe_aeth),
910*4882a593Smuzhiyun RXE_ATMACK_BYTES = sizeof(struct rxe_atmack),
911*4882a593Smuzhiyun RXE_ATMETH_BYTES = sizeof(struct rxe_atmeth),
912*4882a593Smuzhiyun RXE_IETH_BYTES = sizeof(struct rxe_ieth),
913*4882a593Smuzhiyun RXE_RDETH_BYTES = sizeof(struct rxe_rdeth),
914*4882a593Smuzhiyun };
915*4882a593Smuzhiyun
header_size(struct rxe_pkt_info * pkt)916*4882a593Smuzhiyun static inline size_t header_size(struct rxe_pkt_info *pkt)
917*4882a593Smuzhiyun {
918*4882a593Smuzhiyun return pkt->offset + rxe_opcode[pkt->opcode].length;
919*4882a593Smuzhiyun }
920*4882a593Smuzhiyun
payload_addr(struct rxe_pkt_info * pkt)921*4882a593Smuzhiyun static inline void *payload_addr(struct rxe_pkt_info *pkt)
922*4882a593Smuzhiyun {
923*4882a593Smuzhiyun return pkt->hdr + pkt->offset
924*4882a593Smuzhiyun + rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD];
925*4882a593Smuzhiyun }
926*4882a593Smuzhiyun
payload_size(struct rxe_pkt_info * pkt)927*4882a593Smuzhiyun static inline size_t payload_size(struct rxe_pkt_info *pkt)
928*4882a593Smuzhiyun {
929*4882a593Smuzhiyun return pkt->paylen - rxe_opcode[pkt->opcode].offset[RXE_PAYLOAD]
930*4882a593Smuzhiyun - bth_pad(pkt) - RXE_ICRC_SIZE;
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun
933*4882a593Smuzhiyun #endif /* RXE_HDR_H */
934