xref: /OK3568_Linux_fs/kernel/drivers/infiniband/hw/cxgb4/restrack.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) 2018 Chelsio, Inc. All rights reserved.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * This software is available to you under a choice of one of two
5*4882a593Smuzhiyun  * licenses.  You may choose to be licensed under the terms of the GNU
6*4882a593Smuzhiyun  * General Public License (GPL) Version 2, available from the file
7*4882a593Smuzhiyun  * COPYING in the main directory of this source tree, or the
8*4882a593Smuzhiyun  * OpenIB.org BSD license below:
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  *     Redistribution and use in source and binary forms, with or
11*4882a593Smuzhiyun  *     without modification, are permitted provided that the following
12*4882a593Smuzhiyun  *     conditions are met:
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  *      - Redistributions of source code must retain the above
15*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
16*4882a593Smuzhiyun  *        disclaimer.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  *      - Redistributions in binary form must reproduce the above
19*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
20*4882a593Smuzhiyun  *        disclaimer in the documentation and/or other materials
21*4882a593Smuzhiyun  *        provided with the distribution.
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30*4882a593Smuzhiyun  * SOFTWARE.
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #include <rdma/rdma_cm.h>
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #include "iw_cxgb4.h"
36*4882a593Smuzhiyun #include <rdma/restrack.h>
37*4882a593Smuzhiyun #include <uapi/rdma/rdma_netlink.h>
38*4882a593Smuzhiyun 
fill_sq(struct sk_buff * msg,struct t4_wq * wq)39*4882a593Smuzhiyun static int fill_sq(struct sk_buff *msg, struct t4_wq *wq)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	/* WQ+SQ */
42*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "sqid", wq->sq.qid))
43*4882a593Smuzhiyun 		goto err;
44*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "flushed", wq->flushed))
45*4882a593Smuzhiyun 		goto err;
46*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "memsize", wq->sq.memsize))
47*4882a593Smuzhiyun 		goto err;
48*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "cidx", wq->sq.cidx))
49*4882a593Smuzhiyun 		goto err;
50*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "pidx", wq->sq.pidx))
51*4882a593Smuzhiyun 		goto err;
52*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "wq_pidx", wq->sq.wq_pidx))
53*4882a593Smuzhiyun 		goto err;
54*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "flush_cidx", wq->sq.flush_cidx))
55*4882a593Smuzhiyun 		goto err;
56*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "in_use", wq->sq.in_use))
57*4882a593Smuzhiyun 		goto err;
58*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "size", wq->sq.size))
59*4882a593Smuzhiyun 		goto err;
60*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32_hex(msg, "flags", wq->sq.flags))
61*4882a593Smuzhiyun 		goto err;
62*4882a593Smuzhiyun 	return 0;
63*4882a593Smuzhiyun err:
64*4882a593Smuzhiyun 	return -EMSGSIZE;
65*4882a593Smuzhiyun }
66*4882a593Smuzhiyun 
fill_rq(struct sk_buff * msg,struct t4_wq * wq)67*4882a593Smuzhiyun static int fill_rq(struct sk_buff *msg, struct t4_wq *wq)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun 	/* RQ */
70*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "rqid", wq->rq.qid))
71*4882a593Smuzhiyun 		goto err;
72*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "memsize", wq->rq.memsize))
73*4882a593Smuzhiyun 		goto err;
74*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "cidx", wq->rq.cidx))
75*4882a593Smuzhiyun 		goto err;
76*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "pidx", wq->rq.pidx))
77*4882a593Smuzhiyun 		goto err;
78*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "wq_pidx", wq->rq.wq_pidx))
79*4882a593Smuzhiyun 		goto err;
80*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "msn", wq->rq.msn))
81*4882a593Smuzhiyun 		goto err;
82*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32_hex(msg, "rqt_hwaddr", wq->rq.rqt_hwaddr))
83*4882a593Smuzhiyun 		goto err;
84*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "rqt_size", wq->rq.rqt_size))
85*4882a593Smuzhiyun 		goto err;
86*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "in_use", wq->rq.in_use))
87*4882a593Smuzhiyun 		goto err;
88*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "size", wq->rq.size))
89*4882a593Smuzhiyun 		goto err;
90*4882a593Smuzhiyun 	return 0;
91*4882a593Smuzhiyun err:
92*4882a593Smuzhiyun 	return -EMSGSIZE;
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun 
fill_swsqe(struct sk_buff * msg,struct t4_sq * sq,u16 idx,struct t4_swsqe * sqe)95*4882a593Smuzhiyun static int fill_swsqe(struct sk_buff *msg, struct t4_sq *sq, u16 idx,
96*4882a593Smuzhiyun 		      struct t4_swsqe *sqe)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "idx", idx))
99*4882a593Smuzhiyun 		goto err;
100*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "opcode", sqe->opcode))
101*4882a593Smuzhiyun 		goto err;
102*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "complete", sqe->complete))
103*4882a593Smuzhiyun 		goto err;
104*4882a593Smuzhiyun 	if (sqe->complete &&
105*4882a593Smuzhiyun 	    rdma_nl_put_driver_u32(msg, "cqe_status", CQE_STATUS(&sqe->cqe)))
106*4882a593Smuzhiyun 		goto err;
107*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "signaled", sqe->signaled))
108*4882a593Smuzhiyun 		goto err;
109*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "flushed", sqe->flushed))
110*4882a593Smuzhiyun 		goto err;
111*4882a593Smuzhiyun 	return 0;
112*4882a593Smuzhiyun err:
113*4882a593Smuzhiyun 	return -EMSGSIZE;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun /*
117*4882a593Smuzhiyun  * Dump the first and last pending sqes.
118*4882a593Smuzhiyun  */
fill_swsqes(struct sk_buff * msg,struct t4_sq * sq,u16 first_idx,struct t4_swsqe * first_sqe,u16 last_idx,struct t4_swsqe * last_sqe)119*4882a593Smuzhiyun static int fill_swsqes(struct sk_buff *msg, struct t4_sq *sq,
120*4882a593Smuzhiyun 		       u16 first_idx, struct t4_swsqe *first_sqe,
121*4882a593Smuzhiyun 		       u16 last_idx, struct t4_swsqe *last_sqe)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	if (!first_sqe)
124*4882a593Smuzhiyun 		goto out;
125*4882a593Smuzhiyun 	if (fill_swsqe(msg, sq, first_idx, first_sqe))
126*4882a593Smuzhiyun 		goto err;
127*4882a593Smuzhiyun 	if (!last_sqe)
128*4882a593Smuzhiyun 		goto out;
129*4882a593Smuzhiyun 	if (fill_swsqe(msg, sq, last_idx, last_sqe))
130*4882a593Smuzhiyun 		goto err;
131*4882a593Smuzhiyun out:
132*4882a593Smuzhiyun 	return 0;
133*4882a593Smuzhiyun err:
134*4882a593Smuzhiyun 	return -EMSGSIZE;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun 
c4iw_fill_res_qp_entry(struct sk_buff * msg,struct ib_qp * ibqp)137*4882a593Smuzhiyun int c4iw_fill_res_qp_entry(struct sk_buff *msg, struct ib_qp *ibqp)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun 	struct t4_swsqe *fsp = NULL, *lsp = NULL;
140*4882a593Smuzhiyun 	struct c4iw_qp *qhp = to_c4iw_qp(ibqp);
141*4882a593Smuzhiyun 	u16 first_sq_idx = 0, last_sq_idx = 0;
142*4882a593Smuzhiyun 	struct t4_swsqe first_sqe, last_sqe;
143*4882a593Smuzhiyun 	struct nlattr *table_attr;
144*4882a593Smuzhiyun 	struct t4_wq wq;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	/* User qp state is not available, so don't dump user qps */
147*4882a593Smuzhiyun 	if (qhp->ucontext)
148*4882a593Smuzhiyun 		return 0;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_DRIVER);
151*4882a593Smuzhiyun 	if (!table_attr)
152*4882a593Smuzhiyun 		goto err;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	/* Get a consistent snapshot */
155*4882a593Smuzhiyun 	spin_lock_irq(&qhp->lock);
156*4882a593Smuzhiyun 	wq = qhp->wq;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	/* If there are any pending sqes, copy the first and last */
159*4882a593Smuzhiyun 	if (wq.sq.cidx != wq.sq.pidx) {
160*4882a593Smuzhiyun 		first_sq_idx = wq.sq.cidx;
161*4882a593Smuzhiyun 		first_sqe = qhp->wq.sq.sw_sq[first_sq_idx];
162*4882a593Smuzhiyun 		fsp = &first_sqe;
163*4882a593Smuzhiyun 		last_sq_idx = wq.sq.pidx;
164*4882a593Smuzhiyun 		if (last_sq_idx-- == 0)
165*4882a593Smuzhiyun 			last_sq_idx = wq.sq.size - 1;
166*4882a593Smuzhiyun 		if (last_sq_idx != first_sq_idx) {
167*4882a593Smuzhiyun 			last_sqe = qhp->wq.sq.sw_sq[last_sq_idx];
168*4882a593Smuzhiyun 			lsp = &last_sqe;
169*4882a593Smuzhiyun 		}
170*4882a593Smuzhiyun 	}
171*4882a593Smuzhiyun 	spin_unlock_irq(&qhp->lock);
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	if (fill_sq(msg, &wq))
174*4882a593Smuzhiyun 		goto err_cancel_table;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	if (fill_swsqes(msg, &wq.sq, first_sq_idx, fsp, last_sq_idx, lsp))
177*4882a593Smuzhiyun 		goto err_cancel_table;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	if (fill_rq(msg, &wq))
180*4882a593Smuzhiyun 		goto err_cancel_table;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	nla_nest_end(msg, table_attr);
183*4882a593Smuzhiyun 	return 0;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun err_cancel_table:
186*4882a593Smuzhiyun 	nla_nest_cancel(msg, table_attr);
187*4882a593Smuzhiyun err:
188*4882a593Smuzhiyun 	return -EMSGSIZE;
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun union union_ep {
192*4882a593Smuzhiyun 	struct c4iw_listen_ep lep;
193*4882a593Smuzhiyun 	struct c4iw_ep ep;
194*4882a593Smuzhiyun };
195*4882a593Smuzhiyun 
c4iw_fill_res_cm_id_entry(struct sk_buff * msg,struct rdma_cm_id * cm_id)196*4882a593Smuzhiyun int c4iw_fill_res_cm_id_entry(struct sk_buff *msg,
197*4882a593Smuzhiyun 			      struct rdma_cm_id *cm_id)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun 	struct nlattr *table_attr;
200*4882a593Smuzhiyun 	struct c4iw_ep_common *epcp;
201*4882a593Smuzhiyun 	struct c4iw_listen_ep *listen_ep = NULL;
202*4882a593Smuzhiyun 	struct c4iw_ep *ep = NULL;
203*4882a593Smuzhiyun 	struct iw_cm_id *iw_cm_id;
204*4882a593Smuzhiyun 	union union_ep *uep;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	iw_cm_id = rdma_iw_cm_id(cm_id);
207*4882a593Smuzhiyun 	if (!iw_cm_id)
208*4882a593Smuzhiyun 		return 0;
209*4882a593Smuzhiyun 	epcp = (struct c4iw_ep_common *)iw_cm_id->provider_data;
210*4882a593Smuzhiyun 	if (!epcp)
211*4882a593Smuzhiyun 		return 0;
212*4882a593Smuzhiyun 	uep = kcalloc(1, sizeof(*uep), GFP_KERNEL);
213*4882a593Smuzhiyun 	if (!uep)
214*4882a593Smuzhiyun 		return 0;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_DRIVER);
217*4882a593Smuzhiyun 	if (!table_attr)
218*4882a593Smuzhiyun 		goto err_free_uep;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	/* Get a consistent snapshot */
221*4882a593Smuzhiyun 	mutex_lock(&epcp->mutex);
222*4882a593Smuzhiyun 	if (epcp->state == LISTEN) {
223*4882a593Smuzhiyun 		uep->lep = *(struct c4iw_listen_ep *)epcp;
224*4882a593Smuzhiyun 		mutex_unlock(&epcp->mutex);
225*4882a593Smuzhiyun 		listen_ep = &uep->lep;
226*4882a593Smuzhiyun 		epcp = &listen_ep->com;
227*4882a593Smuzhiyun 	} else {
228*4882a593Smuzhiyun 		uep->ep = *(struct c4iw_ep *)epcp;
229*4882a593Smuzhiyun 		mutex_unlock(&epcp->mutex);
230*4882a593Smuzhiyun 		ep = &uep->ep;
231*4882a593Smuzhiyun 		epcp = &ep->com;
232*4882a593Smuzhiyun 	}
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "state", epcp->state))
235*4882a593Smuzhiyun 		goto err_cancel_table;
236*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u64_hex(msg, "flags", epcp->flags))
237*4882a593Smuzhiyun 		goto err_cancel_table;
238*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u64_hex(msg, "history", epcp->history))
239*4882a593Smuzhiyun 		goto err_cancel_table;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	if (epcp->state == LISTEN) {
242*4882a593Smuzhiyun 		if (rdma_nl_put_driver_u32(msg, "stid", listen_ep->stid))
243*4882a593Smuzhiyun 			goto err_cancel_table;
244*4882a593Smuzhiyun 		if (rdma_nl_put_driver_u32(msg, "backlog", listen_ep->backlog))
245*4882a593Smuzhiyun 			goto err_cancel_table;
246*4882a593Smuzhiyun 	} else {
247*4882a593Smuzhiyun 		if (rdma_nl_put_driver_u32(msg, "hwtid", ep->hwtid))
248*4882a593Smuzhiyun 			goto err_cancel_table;
249*4882a593Smuzhiyun 		if (rdma_nl_put_driver_u32(msg, "ord", ep->ord))
250*4882a593Smuzhiyun 			goto err_cancel_table;
251*4882a593Smuzhiyun 		if (rdma_nl_put_driver_u32(msg, "ird", ep->ird))
252*4882a593Smuzhiyun 			goto err_cancel_table;
253*4882a593Smuzhiyun 		if (rdma_nl_put_driver_u32(msg, "emss", ep->emss))
254*4882a593Smuzhiyun 			goto err_cancel_table;
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 		if (!ep->parent_ep && rdma_nl_put_driver_u32(msg, "atid",
257*4882a593Smuzhiyun 							     ep->atid))
258*4882a593Smuzhiyun 			goto err_cancel_table;
259*4882a593Smuzhiyun 	}
260*4882a593Smuzhiyun 	nla_nest_end(msg, table_attr);
261*4882a593Smuzhiyun 	kfree(uep);
262*4882a593Smuzhiyun 	return 0;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun err_cancel_table:
265*4882a593Smuzhiyun 	nla_nest_cancel(msg, table_attr);
266*4882a593Smuzhiyun err_free_uep:
267*4882a593Smuzhiyun 	kfree(uep);
268*4882a593Smuzhiyun 	return -EMSGSIZE;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun 
fill_cq(struct sk_buff * msg,struct t4_cq * cq)271*4882a593Smuzhiyun static int fill_cq(struct sk_buff *msg, struct t4_cq *cq)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "cqid", cq->cqid))
274*4882a593Smuzhiyun 		goto err;
275*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "memsize", cq->memsize))
276*4882a593Smuzhiyun 		goto err;
277*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "size", cq->size))
278*4882a593Smuzhiyun 		goto err;
279*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "cidx", cq->cidx))
280*4882a593Smuzhiyun 		goto err;
281*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "cidx_inc", cq->cidx_inc))
282*4882a593Smuzhiyun 		goto err;
283*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "sw_cidx", cq->sw_cidx))
284*4882a593Smuzhiyun 		goto err;
285*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "sw_pidx", cq->sw_pidx))
286*4882a593Smuzhiyun 		goto err;
287*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "sw_in_use", cq->sw_in_use))
288*4882a593Smuzhiyun 		goto err;
289*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "vector", cq->vector))
290*4882a593Smuzhiyun 		goto err;
291*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "gen", cq->gen))
292*4882a593Smuzhiyun 		goto err;
293*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "error", cq->error))
294*4882a593Smuzhiyun 		goto err;
295*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u64_hex(msg, "bits_type_ts",
296*4882a593Smuzhiyun 					 be64_to_cpu(cq->bits_type_ts)))
297*4882a593Smuzhiyun 		goto err;
298*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u64_hex(msg, "flags", cq->flags))
299*4882a593Smuzhiyun 		goto err;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	return 0;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun err:
304*4882a593Smuzhiyun 	return -EMSGSIZE;
305*4882a593Smuzhiyun }
306*4882a593Smuzhiyun 
fill_cqe(struct sk_buff * msg,struct t4_cqe * cqe,u16 idx,const char * qstr)307*4882a593Smuzhiyun static int fill_cqe(struct sk_buff *msg, struct t4_cqe *cqe, u16 idx,
308*4882a593Smuzhiyun 		    const char *qstr)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, qstr, idx))
311*4882a593Smuzhiyun 		goto err;
312*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32_hex(msg, "header",
313*4882a593Smuzhiyun 					 be32_to_cpu(cqe->header)))
314*4882a593Smuzhiyun 		goto err;
315*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "len", be32_to_cpu(cqe->len)))
316*4882a593Smuzhiyun 		goto err;
317*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32_hex(msg, "wrid_hi",
318*4882a593Smuzhiyun 					 be32_to_cpu(cqe->u.gen.wrid_hi)))
319*4882a593Smuzhiyun 		goto err;
320*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32_hex(msg, "wrid_low",
321*4882a593Smuzhiyun 					 be32_to_cpu(cqe->u.gen.wrid_low)))
322*4882a593Smuzhiyun 		goto err;
323*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u64_hex(msg, "bits_type_ts",
324*4882a593Smuzhiyun 					 be64_to_cpu(cqe->bits_type_ts)))
325*4882a593Smuzhiyun 		goto err;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	return 0;
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun err:
330*4882a593Smuzhiyun 	return -EMSGSIZE;
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun 
fill_hwcqes(struct sk_buff * msg,struct t4_cq * cq,struct t4_cqe * cqes)333*4882a593Smuzhiyun static int fill_hwcqes(struct sk_buff *msg, struct t4_cq *cq,
334*4882a593Smuzhiyun 		       struct t4_cqe *cqes)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun 	u16 idx;
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	idx = (cq->cidx > 0) ? cq->cidx - 1 : cq->size - 1;
339*4882a593Smuzhiyun 	if (fill_cqe(msg, cqes, idx, "hwcq_idx"))
340*4882a593Smuzhiyun 		goto err;
341*4882a593Smuzhiyun 	idx = cq->cidx;
342*4882a593Smuzhiyun 	if (fill_cqe(msg, cqes + 1, idx, "hwcq_idx"))
343*4882a593Smuzhiyun 		goto err;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	return 0;
346*4882a593Smuzhiyun err:
347*4882a593Smuzhiyun 	return -EMSGSIZE;
348*4882a593Smuzhiyun }
349*4882a593Smuzhiyun 
fill_swcqes(struct sk_buff * msg,struct t4_cq * cq,struct t4_cqe * cqes)350*4882a593Smuzhiyun static int fill_swcqes(struct sk_buff *msg, struct t4_cq *cq,
351*4882a593Smuzhiyun 		       struct t4_cqe *cqes)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun 	u16 idx;
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	if (!cq->sw_in_use)
356*4882a593Smuzhiyun 		return 0;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	idx = cq->sw_cidx;
359*4882a593Smuzhiyun 	if (fill_cqe(msg, cqes, idx, "swcq_idx"))
360*4882a593Smuzhiyun 		goto err;
361*4882a593Smuzhiyun 	if (cq->sw_in_use == 1)
362*4882a593Smuzhiyun 		goto out;
363*4882a593Smuzhiyun 	idx = (cq->sw_pidx > 0) ? cq->sw_pidx - 1 : cq->size - 1;
364*4882a593Smuzhiyun 	if (fill_cqe(msg, cqes + 1, idx, "swcq_idx"))
365*4882a593Smuzhiyun 		goto err;
366*4882a593Smuzhiyun out:
367*4882a593Smuzhiyun 	return 0;
368*4882a593Smuzhiyun err:
369*4882a593Smuzhiyun 	return -EMSGSIZE;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun 
c4iw_fill_res_cq_entry(struct sk_buff * msg,struct ib_cq * ibcq)372*4882a593Smuzhiyun int c4iw_fill_res_cq_entry(struct sk_buff *msg, struct ib_cq *ibcq)
373*4882a593Smuzhiyun {
374*4882a593Smuzhiyun 	struct c4iw_cq *chp = to_c4iw_cq(ibcq);
375*4882a593Smuzhiyun 	struct nlattr *table_attr;
376*4882a593Smuzhiyun 	struct t4_cqe hwcqes[2];
377*4882a593Smuzhiyun 	struct t4_cqe swcqes[2];
378*4882a593Smuzhiyun 	struct t4_cq cq;
379*4882a593Smuzhiyun 	u16 idx;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	/* User cq state is not available, so don't dump user cqs */
382*4882a593Smuzhiyun 	if (ibcq->uobject)
383*4882a593Smuzhiyun 		return 0;
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_DRIVER);
386*4882a593Smuzhiyun 	if (!table_attr)
387*4882a593Smuzhiyun 		goto err;
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	/* Get a consistent snapshot */
390*4882a593Smuzhiyun 	spin_lock_irq(&chp->lock);
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	/* t4_cq struct */
393*4882a593Smuzhiyun 	cq = chp->cq;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	/* get 2 hw cqes: cidx-1, and cidx */
396*4882a593Smuzhiyun 	idx = (cq.cidx > 0) ? cq.cidx - 1 : cq.size - 1;
397*4882a593Smuzhiyun 	hwcqes[0] = chp->cq.queue[idx];
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	idx = cq.cidx;
400*4882a593Smuzhiyun 	hwcqes[1] = chp->cq.queue[idx];
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	/* get first and last sw cqes */
403*4882a593Smuzhiyun 	if (cq.sw_in_use) {
404*4882a593Smuzhiyun 		swcqes[0] = chp->cq.sw_queue[cq.sw_cidx];
405*4882a593Smuzhiyun 		if (cq.sw_in_use > 1) {
406*4882a593Smuzhiyun 			idx = (cq.sw_pidx > 0) ? cq.sw_pidx - 1 : cq.size - 1;
407*4882a593Smuzhiyun 			swcqes[1] = chp->cq.sw_queue[idx];
408*4882a593Smuzhiyun 		}
409*4882a593Smuzhiyun 	}
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	spin_unlock_irq(&chp->lock);
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	if (fill_cq(msg, &cq))
414*4882a593Smuzhiyun 		goto err_cancel_table;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	if (fill_swcqes(msg, &cq, swcqes))
417*4882a593Smuzhiyun 		goto err_cancel_table;
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	if (fill_hwcqes(msg, &cq, hwcqes))
420*4882a593Smuzhiyun 		goto err_cancel_table;
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	nla_nest_end(msg, table_attr);
423*4882a593Smuzhiyun 	return 0;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun err_cancel_table:
426*4882a593Smuzhiyun 	nla_nest_cancel(msg, table_attr);
427*4882a593Smuzhiyun err:
428*4882a593Smuzhiyun 	return -EMSGSIZE;
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun 
c4iw_fill_res_mr_entry(struct sk_buff * msg,struct ib_mr * ibmr)431*4882a593Smuzhiyun int c4iw_fill_res_mr_entry(struct sk_buff *msg, struct ib_mr *ibmr)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun 	struct c4iw_mr *mhp = to_c4iw_mr(ibmr);
434*4882a593Smuzhiyun 	struct c4iw_dev *dev = mhp->rhp;
435*4882a593Smuzhiyun 	u32 stag = mhp->attr.stag;
436*4882a593Smuzhiyun 	struct nlattr *table_attr;
437*4882a593Smuzhiyun 	struct fw_ri_tpte tpte;
438*4882a593Smuzhiyun 	int ret;
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	if (!stag)
441*4882a593Smuzhiyun 		return 0;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	table_attr = nla_nest_start_noflag(msg, RDMA_NLDEV_ATTR_DRIVER);
444*4882a593Smuzhiyun 	if (!table_attr)
445*4882a593Smuzhiyun 		goto err;
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	ret = cxgb4_read_tpte(dev->rdev.lldi.ports[0], stag, (__be32 *)&tpte);
448*4882a593Smuzhiyun 	if (ret) {
449*4882a593Smuzhiyun 		dev_err(&dev->rdev.lldi.pdev->dev,
450*4882a593Smuzhiyun 			"%s cxgb4_read_tpte err %d\n", __func__, ret);
451*4882a593Smuzhiyun 		return 0;
452*4882a593Smuzhiyun 	}
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32_hex(msg, "idx", stag >> 8))
455*4882a593Smuzhiyun 		goto err_cancel_table;
456*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "valid",
457*4882a593Smuzhiyun 			FW_RI_TPTE_VALID_G(ntohl(tpte.valid_to_pdid))))
458*4882a593Smuzhiyun 		goto err_cancel_table;
459*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32_hex(msg, "key", stag & 0xff))
460*4882a593Smuzhiyun 		goto err_cancel_table;
461*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "state",
462*4882a593Smuzhiyun 			FW_RI_TPTE_STAGSTATE_G(ntohl(tpte.valid_to_pdid))))
463*4882a593Smuzhiyun 		goto err_cancel_table;
464*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "pdid",
465*4882a593Smuzhiyun 			FW_RI_TPTE_PDID_G(ntohl(tpte.valid_to_pdid))))
466*4882a593Smuzhiyun 		goto err_cancel_table;
467*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32_hex(msg, "perm",
468*4882a593Smuzhiyun 			FW_RI_TPTE_PERM_G(ntohl(tpte.locread_to_qpid))))
469*4882a593Smuzhiyun 		goto err_cancel_table;
470*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32(msg, "ps",
471*4882a593Smuzhiyun 			FW_RI_TPTE_PS_G(ntohl(tpte.locread_to_qpid))))
472*4882a593Smuzhiyun 		goto err_cancel_table;
473*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u64(msg, "len",
474*4882a593Smuzhiyun 		      ((u64)ntohl(tpte.len_hi) << 32) | ntohl(tpte.len_lo)))
475*4882a593Smuzhiyun 		goto err_cancel_table;
476*4882a593Smuzhiyun 	if (rdma_nl_put_driver_u32_hex(msg, "pbl_addr",
477*4882a593Smuzhiyun 			FW_RI_TPTE_PBLADDR_G(ntohl(tpte.nosnoop_pbladdr))))
478*4882a593Smuzhiyun 		goto err_cancel_table;
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	nla_nest_end(msg, table_attr);
481*4882a593Smuzhiyun 	return 0;
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun err_cancel_table:
484*4882a593Smuzhiyun 	nla_nest_cancel(msg, table_attr);
485*4882a593Smuzhiyun err:
486*4882a593Smuzhiyun 	return -EMSGSIZE;
487*4882a593Smuzhiyun }
488