xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/chelsio/cxgb4/cxgb4_uld.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * cxgb4_uld.c:Chelsio Upper Layer Driver Interface for T4/T5/T6 SGE management
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (c) 2016 Chelsio Communications, Inc. All rights reserved.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * This software is available to you under a choice of one of two
7*4882a593Smuzhiyun  * licenses.  You may choose to be licensed under the terms of the GNU
8*4882a593Smuzhiyun  * General Public License (GPL) Version 2, available from the file
9*4882a593Smuzhiyun  * COPYING in the main directory of this source tree, or the
10*4882a593Smuzhiyun  * OpenIB.org BSD license below:
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  *     Redistribution and use in source and binary forms, with or
13*4882a593Smuzhiyun  *     without modification, are permitted provided that the following
14*4882a593Smuzhiyun  *     conditions are met:
15*4882a593Smuzhiyun  *
16*4882a593Smuzhiyun  *      - Redistributions of source code must retain the above
17*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
18*4882a593Smuzhiyun  *        disclaimer.
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  *      - Redistributions in binary form must reproduce the above
21*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
22*4882a593Smuzhiyun  *        disclaimer in the documentation and/or other materials
23*4882a593Smuzhiyun  *        provided with the distribution.
24*4882a593Smuzhiyun  *
25*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32*4882a593Smuzhiyun  * SOFTWARE.
33*4882a593Smuzhiyun  *
34*4882a593Smuzhiyun  *  Written by: Atul Gupta (atul.gupta@chelsio.com)
35*4882a593Smuzhiyun  *  Written by: Hariprasad Shenai (hariprasad@chelsio.com)
36*4882a593Smuzhiyun  */
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #include <linux/kernel.h>
39*4882a593Smuzhiyun #include <linux/module.h>
40*4882a593Smuzhiyun #include <linux/errno.h>
41*4882a593Smuzhiyun #include <linux/types.h>
42*4882a593Smuzhiyun #include <linux/debugfs.h>
43*4882a593Smuzhiyun #include <linux/export.h>
44*4882a593Smuzhiyun #include <linux/list.h>
45*4882a593Smuzhiyun #include <linux/skbuff.h>
46*4882a593Smuzhiyun #include <linux/pci.h>
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #include "cxgb4.h"
49*4882a593Smuzhiyun #include "cxgb4_uld.h"
50*4882a593Smuzhiyun #include "t4_regs.h"
51*4882a593Smuzhiyun #include "t4fw_api.h"
52*4882a593Smuzhiyun #include "t4_msg.h"
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun #define for_each_uldrxq(m, i) for (i = 0; i < ((m)->nrxq + (m)->nciq); i++)
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /* Flush the aggregated lro sessions */
uldrx_flush_handler(struct sge_rspq * q)57*4882a593Smuzhiyun static void uldrx_flush_handler(struct sge_rspq *q)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	struct adapter *adap = q->adap;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	if (adap->uld[q->uld].lro_flush)
62*4882a593Smuzhiyun 		adap->uld[q->uld].lro_flush(&q->lro_mgr);
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun /**
66*4882a593Smuzhiyun  *	uldrx_handler - response queue handler for ULD queues
67*4882a593Smuzhiyun  *	@q: the response queue that received the packet
68*4882a593Smuzhiyun  *	@rsp: the response queue descriptor holding the offload message
69*4882a593Smuzhiyun  *	@gl: the gather list of packet fragments
70*4882a593Smuzhiyun  *
71*4882a593Smuzhiyun  *	Deliver an ingress offload packet to a ULD.  All processing is done by
72*4882a593Smuzhiyun  *	the ULD, we just maintain statistics.
73*4882a593Smuzhiyun  */
uldrx_handler(struct sge_rspq * q,const __be64 * rsp,const struct pkt_gl * gl)74*4882a593Smuzhiyun static int uldrx_handler(struct sge_rspq *q, const __be64 *rsp,
75*4882a593Smuzhiyun 			 const struct pkt_gl *gl)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	struct adapter *adap = q->adap;
78*4882a593Smuzhiyun 	struct sge_ofld_rxq *rxq = container_of(q, struct sge_ofld_rxq, rspq);
79*4882a593Smuzhiyun 	int ret;
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	/* FW can send CPLs encapsulated in a CPL_FW4_MSG */
82*4882a593Smuzhiyun 	if (((const struct rss_header *)rsp)->opcode == CPL_FW4_MSG &&
83*4882a593Smuzhiyun 	    ((const struct cpl_fw4_msg *)(rsp + 1))->type == FW_TYPE_RSSCPL)
84*4882a593Smuzhiyun 		rsp += 2;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	if (q->flush_handler)
87*4882a593Smuzhiyun 		ret = adap->uld[q->uld].lro_rx_handler(adap->uld[q->uld].handle,
88*4882a593Smuzhiyun 				rsp, gl, &q->lro_mgr,
89*4882a593Smuzhiyun 				&q->napi);
90*4882a593Smuzhiyun 	else
91*4882a593Smuzhiyun 		ret = adap->uld[q->uld].rx_handler(adap->uld[q->uld].handle,
92*4882a593Smuzhiyun 				rsp, gl);
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	if (ret) {
95*4882a593Smuzhiyun 		rxq->stats.nomem++;
96*4882a593Smuzhiyun 		return -1;
97*4882a593Smuzhiyun 	}
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	if (!gl)
100*4882a593Smuzhiyun 		rxq->stats.imm++;
101*4882a593Smuzhiyun 	else if (gl == CXGB4_MSG_AN)
102*4882a593Smuzhiyun 		rxq->stats.an++;
103*4882a593Smuzhiyun 	else
104*4882a593Smuzhiyun 		rxq->stats.pkts++;
105*4882a593Smuzhiyun 	return 0;
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun 
alloc_uld_rxqs(struct adapter * adap,struct sge_uld_rxq_info * rxq_info,bool lro)108*4882a593Smuzhiyun static int alloc_uld_rxqs(struct adapter *adap,
109*4882a593Smuzhiyun 			  struct sge_uld_rxq_info *rxq_info, bool lro)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	unsigned int nq = rxq_info->nrxq + rxq_info->nciq;
112*4882a593Smuzhiyun 	struct sge_ofld_rxq *q = rxq_info->uldrxq;
113*4882a593Smuzhiyun 	unsigned short *ids = rxq_info->rspq_id;
114*4882a593Smuzhiyun 	int i, err, msi_idx, que_idx = 0;
115*4882a593Smuzhiyun 	struct sge *s = &adap->sge;
116*4882a593Smuzhiyun 	unsigned int per_chan;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	per_chan = rxq_info->nrxq / adap->params.nports;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	if (adap->flags & CXGB4_USING_MSIX)
121*4882a593Smuzhiyun 		msi_idx = 1;
122*4882a593Smuzhiyun 	else
123*4882a593Smuzhiyun 		msi_idx = -((int)s->intrq.abs_id + 1);
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	for (i = 0; i < nq; i++, q++) {
126*4882a593Smuzhiyun 		if (i == rxq_info->nrxq) {
127*4882a593Smuzhiyun 			/* start allocation of concentrator queues */
128*4882a593Smuzhiyun 			per_chan = rxq_info->nciq / adap->params.nports;
129*4882a593Smuzhiyun 			que_idx = 0;
130*4882a593Smuzhiyun 		}
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 		if (msi_idx >= 0) {
133*4882a593Smuzhiyun 			msi_idx = cxgb4_get_msix_idx_from_bmap(adap);
134*4882a593Smuzhiyun 			if (msi_idx < 0) {
135*4882a593Smuzhiyun 				err = -ENOSPC;
136*4882a593Smuzhiyun 				goto freeout;
137*4882a593Smuzhiyun 			}
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 			snprintf(adap->msix_info[msi_idx].desc,
140*4882a593Smuzhiyun 				 sizeof(adap->msix_info[msi_idx].desc),
141*4882a593Smuzhiyun 				 "%s-%s%d",
142*4882a593Smuzhiyun 				 adap->port[0]->name, rxq_info->name, i);
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 			q->msix = &adap->msix_info[msi_idx];
145*4882a593Smuzhiyun 		}
146*4882a593Smuzhiyun 		err = t4_sge_alloc_rxq(adap, &q->rspq, false,
147*4882a593Smuzhiyun 				       adap->port[que_idx++ / per_chan],
148*4882a593Smuzhiyun 				       msi_idx,
149*4882a593Smuzhiyun 				       q->fl.size ? &q->fl : NULL,
150*4882a593Smuzhiyun 				       uldrx_handler,
151*4882a593Smuzhiyun 				       lro ? uldrx_flush_handler : NULL,
152*4882a593Smuzhiyun 				       0);
153*4882a593Smuzhiyun 		if (err)
154*4882a593Smuzhiyun 			goto freeout;
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 		memset(&q->stats, 0, sizeof(q->stats));
157*4882a593Smuzhiyun 		if (ids)
158*4882a593Smuzhiyun 			ids[i] = q->rspq.abs_id;
159*4882a593Smuzhiyun 	}
160*4882a593Smuzhiyun 	return 0;
161*4882a593Smuzhiyun freeout:
162*4882a593Smuzhiyun 	q = rxq_info->uldrxq;
163*4882a593Smuzhiyun 	for ( ; i; i--, q++) {
164*4882a593Smuzhiyun 		if (q->rspq.desc)
165*4882a593Smuzhiyun 			free_rspq_fl(adap, &q->rspq,
166*4882a593Smuzhiyun 				     q->fl.size ? &q->fl : NULL);
167*4882a593Smuzhiyun 		if (q->msix)
168*4882a593Smuzhiyun 			cxgb4_free_msix_idx_in_bmap(adap, q->msix->idx);
169*4882a593Smuzhiyun 	}
170*4882a593Smuzhiyun 	return err;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun static int
setup_sge_queues_uld(struct adapter * adap,unsigned int uld_type,bool lro)174*4882a593Smuzhiyun setup_sge_queues_uld(struct adapter *adap, unsigned int uld_type, bool lro)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun 	struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
177*4882a593Smuzhiyun 	int i, ret;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	ret = alloc_uld_rxqs(adap, rxq_info, lro);
180*4882a593Smuzhiyun 	if (ret)
181*4882a593Smuzhiyun 		return ret;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	/* Tell uP to route control queue completions to rdma rspq */
184*4882a593Smuzhiyun 	if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
185*4882a593Smuzhiyun 		struct sge *s = &adap->sge;
186*4882a593Smuzhiyun 		unsigned int cmplqid;
187*4882a593Smuzhiyun 		u32 param, cmdop;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 		cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
190*4882a593Smuzhiyun 		for_each_port(adap, i) {
191*4882a593Smuzhiyun 			cmplqid = rxq_info->uldrxq[i].rspq.cntxt_id;
192*4882a593Smuzhiyun 			param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
193*4882a593Smuzhiyun 				 FW_PARAMS_PARAM_X_V(cmdop) |
194*4882a593Smuzhiyun 				 FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
195*4882a593Smuzhiyun 			ret = t4_set_params(adap, adap->mbox, adap->pf,
196*4882a593Smuzhiyun 					    0, 1, &param, &cmplqid);
197*4882a593Smuzhiyun 		}
198*4882a593Smuzhiyun 	}
199*4882a593Smuzhiyun 	return ret;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun 
t4_free_uld_rxqs(struct adapter * adap,int n,struct sge_ofld_rxq * q)202*4882a593Smuzhiyun static void t4_free_uld_rxqs(struct adapter *adap, int n,
203*4882a593Smuzhiyun 			     struct sge_ofld_rxq *q)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun 	for ( ; n; n--, q++) {
206*4882a593Smuzhiyun 		if (q->rspq.desc)
207*4882a593Smuzhiyun 			free_rspq_fl(adap, &q->rspq,
208*4882a593Smuzhiyun 				     q->fl.size ? &q->fl : NULL);
209*4882a593Smuzhiyun 	}
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun 
free_sge_queues_uld(struct adapter * adap,unsigned int uld_type)212*4882a593Smuzhiyun static void free_sge_queues_uld(struct adapter *adap, unsigned int uld_type)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun 	struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	if (adap->flags & CXGB4_FULL_INIT_DONE && uld_type == CXGB4_ULD_RDMA) {
217*4882a593Smuzhiyun 		struct sge *s = &adap->sge;
218*4882a593Smuzhiyun 		u32 param, cmdop, cmplqid = 0;
219*4882a593Smuzhiyun 		int i;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 		cmdop = FW_PARAMS_PARAM_DMAQ_EQ_CMPLIQID_CTRL;
222*4882a593Smuzhiyun 		for_each_port(adap, i) {
223*4882a593Smuzhiyun 			param = (FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DMAQ) |
224*4882a593Smuzhiyun 				 FW_PARAMS_PARAM_X_V(cmdop) |
225*4882a593Smuzhiyun 				 FW_PARAMS_PARAM_YZ_V(s->ctrlq[i].q.cntxt_id));
226*4882a593Smuzhiyun 			t4_set_params(adap, adap->mbox, adap->pf,
227*4882a593Smuzhiyun 				      0, 1, &param, &cmplqid);
228*4882a593Smuzhiyun 		}
229*4882a593Smuzhiyun 	}
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	if (rxq_info->nciq)
232*4882a593Smuzhiyun 		t4_free_uld_rxqs(adap, rxq_info->nciq,
233*4882a593Smuzhiyun 				 rxq_info->uldrxq + rxq_info->nrxq);
234*4882a593Smuzhiyun 	t4_free_uld_rxqs(adap, rxq_info->nrxq, rxq_info->uldrxq);
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun 
cfg_queues_uld(struct adapter * adap,unsigned int uld_type,const struct cxgb4_uld_info * uld_info)237*4882a593Smuzhiyun static int cfg_queues_uld(struct adapter *adap, unsigned int uld_type,
238*4882a593Smuzhiyun 			  const struct cxgb4_uld_info *uld_info)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun 	struct sge *s = &adap->sge;
241*4882a593Smuzhiyun 	struct sge_uld_rxq_info *rxq_info;
242*4882a593Smuzhiyun 	int i, nrxq, ciq_size;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	rxq_info = kzalloc(sizeof(*rxq_info), GFP_KERNEL);
245*4882a593Smuzhiyun 	if (!rxq_info)
246*4882a593Smuzhiyun 		return -ENOMEM;
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	if (adap->flags & CXGB4_USING_MSIX && uld_info->nrxq > s->nqs_per_uld) {
249*4882a593Smuzhiyun 		i = s->nqs_per_uld;
250*4882a593Smuzhiyun 		rxq_info->nrxq = roundup(i, adap->params.nports);
251*4882a593Smuzhiyun 	} else {
252*4882a593Smuzhiyun 		i = min_t(int, uld_info->nrxq,
253*4882a593Smuzhiyun 			  num_online_cpus());
254*4882a593Smuzhiyun 		rxq_info->nrxq = roundup(i, adap->params.nports);
255*4882a593Smuzhiyun 	}
256*4882a593Smuzhiyun 	if (!uld_info->ciq) {
257*4882a593Smuzhiyun 		rxq_info->nciq = 0;
258*4882a593Smuzhiyun 	} else  {
259*4882a593Smuzhiyun 		if (adap->flags & CXGB4_USING_MSIX)
260*4882a593Smuzhiyun 			rxq_info->nciq = min_t(int, s->nqs_per_uld,
261*4882a593Smuzhiyun 					       num_online_cpus());
262*4882a593Smuzhiyun 		else
263*4882a593Smuzhiyun 			rxq_info->nciq = min_t(int, MAX_OFLD_QSETS,
264*4882a593Smuzhiyun 					       num_online_cpus());
265*4882a593Smuzhiyun 		rxq_info->nciq = ((rxq_info->nciq / adap->params.nports) *
266*4882a593Smuzhiyun 				  adap->params.nports);
267*4882a593Smuzhiyun 		rxq_info->nciq = max_t(int, rxq_info->nciq,
268*4882a593Smuzhiyun 				       adap->params.nports);
269*4882a593Smuzhiyun 	}
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	nrxq = rxq_info->nrxq + rxq_info->nciq; /* total rxq's */
272*4882a593Smuzhiyun 	rxq_info->uldrxq = kcalloc(nrxq, sizeof(struct sge_ofld_rxq),
273*4882a593Smuzhiyun 				   GFP_KERNEL);
274*4882a593Smuzhiyun 	if (!rxq_info->uldrxq) {
275*4882a593Smuzhiyun 		kfree(rxq_info);
276*4882a593Smuzhiyun 		return -ENOMEM;
277*4882a593Smuzhiyun 	}
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	rxq_info->rspq_id = kcalloc(nrxq, sizeof(unsigned short), GFP_KERNEL);
280*4882a593Smuzhiyun 	if (!rxq_info->rspq_id) {
281*4882a593Smuzhiyun 		kfree(rxq_info->uldrxq);
282*4882a593Smuzhiyun 		kfree(rxq_info);
283*4882a593Smuzhiyun 		return -ENOMEM;
284*4882a593Smuzhiyun 	}
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	for (i = 0; i < rxq_info->nrxq; i++) {
287*4882a593Smuzhiyun 		struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 		init_rspq(adap, &r->rspq, 5, 1, uld_info->rxq_size, 64);
290*4882a593Smuzhiyun 		r->rspq.uld = uld_type;
291*4882a593Smuzhiyun 		r->fl.size = 72;
292*4882a593Smuzhiyun 	}
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	ciq_size = 64 + adap->vres.cq.size + adap->tids.nftids;
295*4882a593Smuzhiyun 	if (ciq_size > SGE_MAX_IQ_SIZE) {
296*4882a593Smuzhiyun 		dev_warn(adap->pdev_dev, "CIQ size too small for available IQs\n");
297*4882a593Smuzhiyun 		ciq_size = SGE_MAX_IQ_SIZE;
298*4882a593Smuzhiyun 	}
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	for (i = rxq_info->nrxq; i < nrxq; i++) {
301*4882a593Smuzhiyun 		struct sge_ofld_rxq *r = &rxq_info->uldrxq[i];
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 		init_rspq(adap, &r->rspq, 5, 1, ciq_size, 64);
304*4882a593Smuzhiyun 		r->rspq.uld = uld_type;
305*4882a593Smuzhiyun 	}
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	memcpy(rxq_info->name, uld_info->name, IFNAMSIZ);
308*4882a593Smuzhiyun 	adap->sge.uld_rxq_info[uld_type] = rxq_info;
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	return 0;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun 
free_queues_uld(struct adapter * adap,unsigned int uld_type)313*4882a593Smuzhiyun static void free_queues_uld(struct adapter *adap, unsigned int uld_type)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun 	struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	adap->sge.uld_rxq_info[uld_type] = NULL;
318*4882a593Smuzhiyun 	kfree(rxq_info->rspq_id);
319*4882a593Smuzhiyun 	kfree(rxq_info->uldrxq);
320*4882a593Smuzhiyun 	kfree(rxq_info);
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun static int
request_msix_queue_irqs_uld(struct adapter * adap,unsigned int uld_type)324*4882a593Smuzhiyun request_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun 	struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
327*4882a593Smuzhiyun 	struct msix_info *minfo;
328*4882a593Smuzhiyun 	unsigned int idx;
329*4882a593Smuzhiyun 	int err = 0;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	for_each_uldrxq(rxq_info, idx) {
332*4882a593Smuzhiyun 		minfo = rxq_info->uldrxq[idx].msix;
333*4882a593Smuzhiyun 		err = request_irq(minfo->vec,
334*4882a593Smuzhiyun 				  t4_sge_intr_msix, 0,
335*4882a593Smuzhiyun 				  minfo->desc,
336*4882a593Smuzhiyun 				  &rxq_info->uldrxq[idx].rspq);
337*4882a593Smuzhiyun 		if (err)
338*4882a593Smuzhiyun 			goto unwind;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 		cxgb4_set_msix_aff(adap, minfo->vec,
341*4882a593Smuzhiyun 				   &minfo->aff_mask, idx);
342*4882a593Smuzhiyun 	}
343*4882a593Smuzhiyun 	return 0;
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun unwind:
346*4882a593Smuzhiyun 	while (idx-- > 0) {
347*4882a593Smuzhiyun 		minfo = rxq_info->uldrxq[idx].msix;
348*4882a593Smuzhiyun 		cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
349*4882a593Smuzhiyun 		cxgb4_free_msix_idx_in_bmap(adap, minfo->idx);
350*4882a593Smuzhiyun 		free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq);
351*4882a593Smuzhiyun 	}
352*4882a593Smuzhiyun 	return err;
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun static void
free_msix_queue_irqs_uld(struct adapter * adap,unsigned int uld_type)356*4882a593Smuzhiyun free_msix_queue_irqs_uld(struct adapter *adap, unsigned int uld_type)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun 	struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
359*4882a593Smuzhiyun 	struct msix_info *minfo;
360*4882a593Smuzhiyun 	unsigned int idx;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	for_each_uldrxq(rxq_info, idx) {
363*4882a593Smuzhiyun 		minfo = rxq_info->uldrxq[idx].msix;
364*4882a593Smuzhiyun 		cxgb4_clear_msix_aff(minfo->vec, minfo->aff_mask);
365*4882a593Smuzhiyun 		cxgb4_free_msix_idx_in_bmap(adap, minfo->idx);
366*4882a593Smuzhiyun 		free_irq(minfo->vec, &rxq_info->uldrxq[idx].rspq);
367*4882a593Smuzhiyun 	}
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun 
enable_rx_uld(struct adapter * adap,unsigned int uld_type)370*4882a593Smuzhiyun static void enable_rx_uld(struct adapter *adap, unsigned int uld_type)
371*4882a593Smuzhiyun {
372*4882a593Smuzhiyun 	struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
373*4882a593Smuzhiyun 	int idx;
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	for_each_uldrxq(rxq_info, idx) {
376*4882a593Smuzhiyun 		struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 		if (!q)
379*4882a593Smuzhiyun 			continue;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 		cxgb4_enable_rx(adap, q);
382*4882a593Smuzhiyun 	}
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun 
quiesce_rx_uld(struct adapter * adap,unsigned int uld_type)385*4882a593Smuzhiyun static void quiesce_rx_uld(struct adapter *adap, unsigned int uld_type)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun 	struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
388*4882a593Smuzhiyun 	int idx;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	for_each_uldrxq(rxq_info, idx) {
391*4882a593Smuzhiyun 		struct sge_rspq *q = &rxq_info->uldrxq[idx].rspq;
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 		if (!q)
394*4882a593Smuzhiyun 			continue;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 		cxgb4_quiesce_rx(q);
397*4882a593Smuzhiyun 	}
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun static void
free_sge_txq_uld(struct adapter * adap,struct sge_uld_txq_info * txq_info)401*4882a593Smuzhiyun free_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun 	int nq = txq_info->ntxq;
404*4882a593Smuzhiyun 	int i;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	for (i = 0; i < nq; i++) {
407*4882a593Smuzhiyun 		struct sge_uld_txq *txq = &txq_info->uldtxq[i];
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 		if (txq && txq->q.desc) {
410*4882a593Smuzhiyun 			tasklet_kill(&txq->qresume_tsk);
411*4882a593Smuzhiyun 			t4_ofld_eq_free(adap, adap->mbox, adap->pf, 0,
412*4882a593Smuzhiyun 					txq->q.cntxt_id);
413*4882a593Smuzhiyun 			free_tx_desc(adap, &txq->q, txq->q.in_use, false);
414*4882a593Smuzhiyun 			kfree(txq->q.sdesc);
415*4882a593Smuzhiyun 			__skb_queue_purge(&txq->sendq);
416*4882a593Smuzhiyun 			free_txq(adap, &txq->q);
417*4882a593Smuzhiyun 		}
418*4882a593Smuzhiyun 	}
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun static int
alloc_sge_txq_uld(struct adapter * adap,struct sge_uld_txq_info * txq_info,unsigned int uld_type)422*4882a593Smuzhiyun alloc_sge_txq_uld(struct adapter *adap, struct sge_uld_txq_info *txq_info,
423*4882a593Smuzhiyun 		  unsigned int uld_type)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun 	struct sge *s = &adap->sge;
426*4882a593Smuzhiyun 	int nq = txq_info->ntxq;
427*4882a593Smuzhiyun 	int i, j, err;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	j = nq / adap->params.nports;
430*4882a593Smuzhiyun 	for (i = 0; i < nq; i++) {
431*4882a593Smuzhiyun 		struct sge_uld_txq *txq = &txq_info->uldtxq[i];
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 		txq->q.size = 1024;
434*4882a593Smuzhiyun 		err = t4_sge_alloc_uld_txq(adap, txq, adap->port[i / j],
435*4882a593Smuzhiyun 					   s->fw_evtq.cntxt_id, uld_type);
436*4882a593Smuzhiyun 		if (err)
437*4882a593Smuzhiyun 			goto freeout;
438*4882a593Smuzhiyun 	}
439*4882a593Smuzhiyun 	return 0;
440*4882a593Smuzhiyun freeout:
441*4882a593Smuzhiyun 	free_sge_txq_uld(adap, txq_info);
442*4882a593Smuzhiyun 	return err;
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun static void
release_sge_txq_uld(struct adapter * adap,unsigned int uld_type)446*4882a593Smuzhiyun release_sge_txq_uld(struct adapter *adap, unsigned int uld_type)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun 	struct sge_uld_txq_info *txq_info = NULL;
449*4882a593Smuzhiyun 	int tx_uld_type = TX_ULD(uld_type);
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	txq_info = adap->sge.uld_txq_info[tx_uld_type];
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 	if (txq_info && atomic_dec_and_test(&txq_info->users)) {
454*4882a593Smuzhiyun 		free_sge_txq_uld(adap, txq_info);
455*4882a593Smuzhiyun 		kfree(txq_info->uldtxq);
456*4882a593Smuzhiyun 		kfree(txq_info);
457*4882a593Smuzhiyun 		adap->sge.uld_txq_info[tx_uld_type] = NULL;
458*4882a593Smuzhiyun 	}
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun static int
setup_sge_txq_uld(struct adapter * adap,unsigned int uld_type,const struct cxgb4_uld_info * uld_info)462*4882a593Smuzhiyun setup_sge_txq_uld(struct adapter *adap, unsigned int uld_type,
463*4882a593Smuzhiyun 		  const struct cxgb4_uld_info *uld_info)
464*4882a593Smuzhiyun {
465*4882a593Smuzhiyun 	struct sge_uld_txq_info *txq_info = NULL;
466*4882a593Smuzhiyun 	int tx_uld_type, i;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	tx_uld_type = TX_ULD(uld_type);
469*4882a593Smuzhiyun 	txq_info = adap->sge.uld_txq_info[tx_uld_type];
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	if ((tx_uld_type == CXGB4_TX_OFLD) && txq_info &&
472*4882a593Smuzhiyun 	    (atomic_inc_return(&txq_info->users) > 1))
473*4882a593Smuzhiyun 		return 0;
474*4882a593Smuzhiyun 
475*4882a593Smuzhiyun 	txq_info = kzalloc(sizeof(*txq_info), GFP_KERNEL);
476*4882a593Smuzhiyun 	if (!txq_info)
477*4882a593Smuzhiyun 		return -ENOMEM;
478*4882a593Smuzhiyun 	if (uld_type == CXGB4_ULD_CRYPTO) {
479*4882a593Smuzhiyun 		i = min_t(int, adap->vres.ncrypto_fc,
480*4882a593Smuzhiyun 			  num_online_cpus());
481*4882a593Smuzhiyun 		txq_info->ntxq = rounddown(i, adap->params.nports);
482*4882a593Smuzhiyun 		if (txq_info->ntxq <= 0) {
483*4882a593Smuzhiyun 			dev_warn(adap->pdev_dev, "Crypto Tx Queues can't be zero\n");
484*4882a593Smuzhiyun 			kfree(txq_info);
485*4882a593Smuzhiyun 			return -EINVAL;
486*4882a593Smuzhiyun 		}
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	} else {
489*4882a593Smuzhiyun 		i = min_t(int, uld_info->ntxq, num_online_cpus());
490*4882a593Smuzhiyun 		txq_info->ntxq = roundup(i, adap->params.nports);
491*4882a593Smuzhiyun 	}
492*4882a593Smuzhiyun 	txq_info->uldtxq = kcalloc(txq_info->ntxq, sizeof(struct sge_uld_txq),
493*4882a593Smuzhiyun 				   GFP_KERNEL);
494*4882a593Smuzhiyun 	if (!txq_info->uldtxq) {
495*4882a593Smuzhiyun 		kfree(txq_info);
496*4882a593Smuzhiyun 		return -ENOMEM;
497*4882a593Smuzhiyun 	}
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	if (alloc_sge_txq_uld(adap, txq_info, tx_uld_type)) {
500*4882a593Smuzhiyun 		kfree(txq_info->uldtxq);
501*4882a593Smuzhiyun 		kfree(txq_info);
502*4882a593Smuzhiyun 		return -ENOMEM;
503*4882a593Smuzhiyun 	}
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	atomic_inc(&txq_info->users);
506*4882a593Smuzhiyun 	adap->sge.uld_txq_info[tx_uld_type] = txq_info;
507*4882a593Smuzhiyun 	return 0;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun 
uld_queue_init(struct adapter * adap,unsigned int uld_type,struct cxgb4_lld_info * lli)510*4882a593Smuzhiyun static void uld_queue_init(struct adapter *adap, unsigned int uld_type,
511*4882a593Smuzhiyun 			   struct cxgb4_lld_info *lli)
512*4882a593Smuzhiyun {
513*4882a593Smuzhiyun 	struct sge_uld_rxq_info *rxq_info = adap->sge.uld_rxq_info[uld_type];
514*4882a593Smuzhiyun 	int tx_uld_type = TX_ULD(uld_type);
515*4882a593Smuzhiyun 	struct sge_uld_txq_info *txq_info = adap->sge.uld_txq_info[tx_uld_type];
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	lli->rxq_ids = rxq_info->rspq_id;
518*4882a593Smuzhiyun 	lli->nrxq = rxq_info->nrxq;
519*4882a593Smuzhiyun 	lli->ciq_ids = rxq_info->rspq_id + rxq_info->nrxq;
520*4882a593Smuzhiyun 	lli->nciq = rxq_info->nciq;
521*4882a593Smuzhiyun 	lli->ntxq = txq_info->ntxq;
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun 
t4_uld_mem_alloc(struct adapter * adap)524*4882a593Smuzhiyun int t4_uld_mem_alloc(struct adapter *adap)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun 	struct sge *s = &adap->sge;
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	adap->uld = kcalloc(CXGB4_ULD_MAX, sizeof(*adap->uld), GFP_KERNEL);
529*4882a593Smuzhiyun 	if (!adap->uld)
530*4882a593Smuzhiyun 		return -ENOMEM;
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	s->uld_rxq_info = kcalloc(CXGB4_ULD_MAX,
533*4882a593Smuzhiyun 				  sizeof(struct sge_uld_rxq_info *),
534*4882a593Smuzhiyun 				  GFP_KERNEL);
535*4882a593Smuzhiyun 	if (!s->uld_rxq_info)
536*4882a593Smuzhiyun 		goto err_uld;
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	s->uld_txq_info = kcalloc(CXGB4_TX_MAX,
539*4882a593Smuzhiyun 				  sizeof(struct sge_uld_txq_info *),
540*4882a593Smuzhiyun 				  GFP_KERNEL);
541*4882a593Smuzhiyun 	if (!s->uld_txq_info)
542*4882a593Smuzhiyun 		goto err_uld_rx;
543*4882a593Smuzhiyun 	return 0;
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun err_uld_rx:
546*4882a593Smuzhiyun 	kfree(s->uld_rxq_info);
547*4882a593Smuzhiyun err_uld:
548*4882a593Smuzhiyun 	kfree(adap->uld);
549*4882a593Smuzhiyun 	return -ENOMEM;
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun 
t4_uld_mem_free(struct adapter * adap)552*4882a593Smuzhiyun void t4_uld_mem_free(struct adapter *adap)
553*4882a593Smuzhiyun {
554*4882a593Smuzhiyun 	struct sge *s = &adap->sge;
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	kfree(s->uld_txq_info);
557*4882a593Smuzhiyun 	kfree(s->uld_rxq_info);
558*4882a593Smuzhiyun 	kfree(adap->uld);
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun /* This function should be called with uld_mutex taken. */
cxgb4_shutdown_uld_adapter(struct adapter * adap,enum cxgb4_uld type)562*4882a593Smuzhiyun static void cxgb4_shutdown_uld_adapter(struct adapter *adap, enum cxgb4_uld type)
563*4882a593Smuzhiyun {
564*4882a593Smuzhiyun 	if (adap->uld[type].handle) {
565*4882a593Smuzhiyun 		adap->uld[type].handle = NULL;
566*4882a593Smuzhiyun 		adap->uld[type].add = NULL;
567*4882a593Smuzhiyun 		release_sge_txq_uld(adap, type);
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 		if (adap->flags & CXGB4_FULL_INIT_DONE)
570*4882a593Smuzhiyun 			quiesce_rx_uld(adap, type);
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 		if (adap->flags & CXGB4_USING_MSIX)
573*4882a593Smuzhiyun 			free_msix_queue_irqs_uld(adap, type);
574*4882a593Smuzhiyun 
575*4882a593Smuzhiyun 		free_sge_queues_uld(adap, type);
576*4882a593Smuzhiyun 		free_queues_uld(adap, type);
577*4882a593Smuzhiyun 	}
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun 
t4_uld_clean_up(struct adapter * adap)580*4882a593Smuzhiyun void t4_uld_clean_up(struct adapter *adap)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun 	unsigned int i;
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	if (!is_uld(adap))
585*4882a593Smuzhiyun 		return;
586*4882a593Smuzhiyun 
587*4882a593Smuzhiyun 	mutex_lock(&uld_mutex);
588*4882a593Smuzhiyun 	for (i = 0; i < CXGB4_ULD_MAX; i++) {
589*4882a593Smuzhiyun 		if (!adap->uld[i].handle)
590*4882a593Smuzhiyun 			continue;
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 		cxgb4_shutdown_uld_adapter(adap, i);
593*4882a593Smuzhiyun 	}
594*4882a593Smuzhiyun 	mutex_unlock(&uld_mutex);
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun 
uld_init(struct adapter * adap,struct cxgb4_lld_info * lld)597*4882a593Smuzhiyun static void uld_init(struct adapter *adap, struct cxgb4_lld_info *lld)
598*4882a593Smuzhiyun {
599*4882a593Smuzhiyun 	int i;
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 	lld->pdev = adap->pdev;
602*4882a593Smuzhiyun 	lld->pf = adap->pf;
603*4882a593Smuzhiyun 	lld->l2t = adap->l2t;
604*4882a593Smuzhiyun 	lld->tids = &adap->tids;
605*4882a593Smuzhiyun 	lld->ports = adap->port;
606*4882a593Smuzhiyun 	lld->vr = &adap->vres;
607*4882a593Smuzhiyun 	lld->mtus = adap->params.mtus;
608*4882a593Smuzhiyun 	lld->nchan = adap->params.nports;
609*4882a593Smuzhiyun 	lld->nports = adap->params.nports;
610*4882a593Smuzhiyun 	lld->wr_cred = adap->params.ofldq_wr_cred;
611*4882a593Smuzhiyun 	lld->crypto = adap->params.crypto;
612*4882a593Smuzhiyun 	lld->iscsi_iolen = MAXRXDATA_G(t4_read_reg(adap, TP_PARA_REG2_A));
613*4882a593Smuzhiyun 	lld->iscsi_tagmask = t4_read_reg(adap, ULP_RX_ISCSI_TAGMASK_A);
614*4882a593Smuzhiyun 	lld->iscsi_pgsz_order = t4_read_reg(adap, ULP_RX_ISCSI_PSZ_A);
615*4882a593Smuzhiyun 	lld->iscsi_llimit = t4_read_reg(adap, ULP_RX_ISCSI_LLIMIT_A);
616*4882a593Smuzhiyun 	lld->iscsi_ppm = &adap->iscsi_ppm;
617*4882a593Smuzhiyun 	lld->adapter_type = adap->params.chip;
618*4882a593Smuzhiyun 	lld->cclk_ps = 1000000000 / adap->params.vpd.cclk;
619*4882a593Smuzhiyun 	lld->udb_density = 1 << adap->params.sge.eq_qpp;
620*4882a593Smuzhiyun 	lld->ucq_density = 1 << adap->params.sge.iq_qpp;
621*4882a593Smuzhiyun 	lld->sge_host_page_size = 1 << (adap->params.sge.hps + 10);
622*4882a593Smuzhiyun 	lld->filt_mode = adap->params.tp.vlan_pri_map;
623*4882a593Smuzhiyun 	/* MODQ_REQ_MAP sets queues 0-3 to chan 0-3 */
624*4882a593Smuzhiyun 	for (i = 0; i < NCHAN; i++)
625*4882a593Smuzhiyun 		lld->tx_modq[i] = i;
626*4882a593Smuzhiyun 	lld->gts_reg = adap->regs + MYPF_REG(SGE_PF_GTS_A);
627*4882a593Smuzhiyun 	lld->db_reg = adap->regs + MYPF_REG(SGE_PF_KDOORBELL_A);
628*4882a593Smuzhiyun 	lld->fw_vers = adap->params.fw_vers;
629*4882a593Smuzhiyun 	lld->dbfifo_int_thresh = dbfifo_int_thresh;
630*4882a593Smuzhiyun 	lld->sge_ingpadboundary = adap->sge.fl_align;
631*4882a593Smuzhiyun 	lld->sge_egrstatuspagesize = adap->sge.stat_len;
632*4882a593Smuzhiyun 	lld->sge_pktshift = adap->sge.pktshift;
633*4882a593Smuzhiyun 	lld->ulp_crypto = adap->params.crypto;
634*4882a593Smuzhiyun 	lld->enable_fw_ofld_conn = adap->flags & CXGB4_FW_OFLD_CONN;
635*4882a593Smuzhiyun 	lld->max_ordird_qp = adap->params.max_ordird_qp;
636*4882a593Smuzhiyun 	lld->max_ird_adapter = adap->params.max_ird_adapter;
637*4882a593Smuzhiyun 	lld->ulptx_memwrite_dsgl = adap->params.ulptx_memwrite_dsgl;
638*4882a593Smuzhiyun 	lld->nodeid = dev_to_node(adap->pdev_dev);
639*4882a593Smuzhiyun 	lld->fr_nsmr_tpte_wr_support = adap->params.fr_nsmr_tpte_wr_support;
640*4882a593Smuzhiyun 	lld->write_w_imm_support = adap->params.write_w_imm_support;
641*4882a593Smuzhiyun 	lld->write_cmpl_support = adap->params.write_cmpl_support;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun 
uld_attach(struct adapter * adap,unsigned int uld)644*4882a593Smuzhiyun static int uld_attach(struct adapter *adap, unsigned int uld)
645*4882a593Smuzhiyun {
646*4882a593Smuzhiyun 	struct cxgb4_lld_info lli;
647*4882a593Smuzhiyun 	void *handle;
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	uld_init(adap, &lli);
650*4882a593Smuzhiyun 	uld_queue_init(adap, uld, &lli);
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	handle = adap->uld[uld].add(&lli);
653*4882a593Smuzhiyun 	if (IS_ERR(handle)) {
654*4882a593Smuzhiyun 		dev_warn(adap->pdev_dev,
655*4882a593Smuzhiyun 			 "could not attach to the %s driver, error %ld\n",
656*4882a593Smuzhiyun 			 adap->uld[uld].name, PTR_ERR(handle));
657*4882a593Smuzhiyun 		return PTR_ERR(handle);
658*4882a593Smuzhiyun 	}
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	adap->uld[uld].handle = handle;
661*4882a593Smuzhiyun 	t4_register_netevent_notifier();
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun 	if (adap->flags & CXGB4_FULL_INIT_DONE)
664*4882a593Smuzhiyun 		adap->uld[uld].state_change(handle, CXGB4_STATE_UP);
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 	return 0;
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_CHELSIO_TLS_DEVICE)
cxgb4_uld_in_use(struct adapter * adap)670*4882a593Smuzhiyun static bool cxgb4_uld_in_use(struct adapter *adap)
671*4882a593Smuzhiyun {
672*4882a593Smuzhiyun 	const struct tid_info *t = &adap->tids;
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	return (atomic_read(&t->conns_in_use) || t->stids_in_use);
675*4882a593Smuzhiyun }
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun /* cxgb4_set_ktls_feature: request FW to enable/disable ktls settings.
678*4882a593Smuzhiyun  * @adap: adapter info
679*4882a593Smuzhiyun  * @enable: 1 to enable / 0 to disable ktls settings.
680*4882a593Smuzhiyun  */
cxgb4_set_ktls_feature(struct adapter * adap,bool enable)681*4882a593Smuzhiyun int cxgb4_set_ktls_feature(struct adapter *adap, bool enable)
682*4882a593Smuzhiyun {
683*4882a593Smuzhiyun 	int ret = 0;
684*4882a593Smuzhiyun 	u32 params =
685*4882a593Smuzhiyun 		FW_PARAMS_MNEM_V(FW_PARAMS_MNEM_DEV) |
686*4882a593Smuzhiyun 		FW_PARAMS_PARAM_X_V(FW_PARAMS_PARAM_DEV_KTLS_HW) |
687*4882a593Smuzhiyun 		FW_PARAMS_PARAM_Y_V(enable) |
688*4882a593Smuzhiyun 		FW_PARAMS_PARAM_Z_V(FW_PARAMS_PARAM_DEV_KTLS_HW_USER_ENABLE);
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	if (enable) {
691*4882a593Smuzhiyun 		if (!refcount_read(&adap->chcr_ktls.ktls_refcount)) {
692*4882a593Smuzhiyun 			/* At this moment if ULD connection are up means, other
693*4882a593Smuzhiyun 			 * ULD is/are already active, return failure.
694*4882a593Smuzhiyun 			 */
695*4882a593Smuzhiyun 			if (cxgb4_uld_in_use(adap)) {
696*4882a593Smuzhiyun 				dev_dbg(adap->pdev_dev,
697*4882a593Smuzhiyun 					"ULD connections (tid/stid) active. Can't enable kTLS\n");
698*4882a593Smuzhiyun 				return -EINVAL;
699*4882a593Smuzhiyun 			}
700*4882a593Smuzhiyun 			ret = t4_set_params(adap, adap->mbox, adap->pf,
701*4882a593Smuzhiyun 					    0, 1, &params, &params);
702*4882a593Smuzhiyun 			if (ret)
703*4882a593Smuzhiyun 				return ret;
704*4882a593Smuzhiyun 			refcount_set(&adap->chcr_ktls.ktls_refcount, 1);
705*4882a593Smuzhiyun 			pr_debug("kTLS has been enabled. Restrictions placed on ULD support\n");
706*4882a593Smuzhiyun 		} else {
707*4882a593Smuzhiyun 			/* ktls settings already up, just increment refcount. */
708*4882a593Smuzhiyun 			refcount_inc(&adap->chcr_ktls.ktls_refcount);
709*4882a593Smuzhiyun 		}
710*4882a593Smuzhiyun 	} else {
711*4882a593Smuzhiyun 		/* return failure if refcount is already 0. */
712*4882a593Smuzhiyun 		if (!refcount_read(&adap->chcr_ktls.ktls_refcount))
713*4882a593Smuzhiyun 			return -EINVAL;
714*4882a593Smuzhiyun 		/* decrement refcount and test, if 0, disable ktls feature,
715*4882a593Smuzhiyun 		 * else return command success.
716*4882a593Smuzhiyun 		 */
717*4882a593Smuzhiyun 		if (refcount_dec_and_test(&adap->chcr_ktls.ktls_refcount)) {
718*4882a593Smuzhiyun 			ret = t4_set_params(adap, adap->mbox, adap->pf,
719*4882a593Smuzhiyun 					    0, 1, &params, &params);
720*4882a593Smuzhiyun 			if (ret)
721*4882a593Smuzhiyun 				return ret;
722*4882a593Smuzhiyun 			pr_debug("kTLS is disabled. Restrictions on ULD support removed\n");
723*4882a593Smuzhiyun 		}
724*4882a593Smuzhiyun 	}
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 	return ret;
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun #endif
729*4882a593Smuzhiyun 
cxgb4_uld_alloc_resources(struct adapter * adap,enum cxgb4_uld type,const struct cxgb4_uld_info * p)730*4882a593Smuzhiyun static void cxgb4_uld_alloc_resources(struct adapter *adap,
731*4882a593Smuzhiyun 				      enum cxgb4_uld type,
732*4882a593Smuzhiyun 				      const struct cxgb4_uld_info *p)
733*4882a593Smuzhiyun {
734*4882a593Smuzhiyun 	int ret = 0;
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 	if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
737*4882a593Smuzhiyun 	    (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
738*4882a593Smuzhiyun 		return;
739*4882a593Smuzhiyun 	if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
740*4882a593Smuzhiyun 		return;
741*4882a593Smuzhiyun 	ret = cfg_queues_uld(adap, type, p);
742*4882a593Smuzhiyun 	if (ret)
743*4882a593Smuzhiyun 		goto out;
744*4882a593Smuzhiyun 	ret = setup_sge_queues_uld(adap, type, p->lro);
745*4882a593Smuzhiyun 	if (ret)
746*4882a593Smuzhiyun 		goto free_queues;
747*4882a593Smuzhiyun 	if (adap->flags & CXGB4_USING_MSIX) {
748*4882a593Smuzhiyun 		ret = request_msix_queue_irqs_uld(adap, type);
749*4882a593Smuzhiyun 		if (ret)
750*4882a593Smuzhiyun 			goto free_rxq;
751*4882a593Smuzhiyun 	}
752*4882a593Smuzhiyun 	if (adap->flags & CXGB4_FULL_INIT_DONE)
753*4882a593Smuzhiyun 		enable_rx_uld(adap, type);
754*4882a593Smuzhiyun 	if (adap->uld[type].add)
755*4882a593Smuzhiyun 		goto free_irq;
756*4882a593Smuzhiyun 	ret = setup_sge_txq_uld(adap, type, p);
757*4882a593Smuzhiyun 	if (ret)
758*4882a593Smuzhiyun 		goto free_irq;
759*4882a593Smuzhiyun 	adap->uld[type] = *p;
760*4882a593Smuzhiyun 	ret = uld_attach(adap, type);
761*4882a593Smuzhiyun 	if (ret)
762*4882a593Smuzhiyun 		goto free_txq;
763*4882a593Smuzhiyun 	return;
764*4882a593Smuzhiyun free_txq:
765*4882a593Smuzhiyun 	release_sge_txq_uld(adap, type);
766*4882a593Smuzhiyun free_irq:
767*4882a593Smuzhiyun 	if (adap->flags & CXGB4_FULL_INIT_DONE)
768*4882a593Smuzhiyun 		quiesce_rx_uld(adap, type);
769*4882a593Smuzhiyun 	if (adap->flags & CXGB4_USING_MSIX)
770*4882a593Smuzhiyun 		free_msix_queue_irqs_uld(adap, type);
771*4882a593Smuzhiyun free_rxq:
772*4882a593Smuzhiyun 	free_sge_queues_uld(adap, type);
773*4882a593Smuzhiyun free_queues:
774*4882a593Smuzhiyun 	free_queues_uld(adap, type);
775*4882a593Smuzhiyun out:
776*4882a593Smuzhiyun 	dev_warn(adap->pdev_dev,
777*4882a593Smuzhiyun 		 "ULD registration failed for uld type %d\n", type);
778*4882a593Smuzhiyun }
779*4882a593Smuzhiyun 
cxgb4_uld_enable(struct adapter * adap)780*4882a593Smuzhiyun void cxgb4_uld_enable(struct adapter *adap)
781*4882a593Smuzhiyun {
782*4882a593Smuzhiyun 	struct cxgb4_uld_list *uld_entry;
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	mutex_lock(&uld_mutex);
785*4882a593Smuzhiyun 	list_add_tail(&adap->list_node, &adapter_list);
786*4882a593Smuzhiyun 	list_for_each_entry(uld_entry, &uld_list, list_node)
787*4882a593Smuzhiyun 		cxgb4_uld_alloc_resources(adap, uld_entry->uld_type,
788*4882a593Smuzhiyun 					  &uld_entry->uld_info);
789*4882a593Smuzhiyun 	mutex_unlock(&uld_mutex);
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun /* cxgb4_register_uld - register an upper-layer driver
793*4882a593Smuzhiyun  * @type: the ULD type
794*4882a593Smuzhiyun  * @p: the ULD methods
795*4882a593Smuzhiyun  *
796*4882a593Smuzhiyun  * Registers an upper-layer driver with this driver and notifies the ULD
797*4882a593Smuzhiyun  * about any presently available devices that support its type.
798*4882a593Smuzhiyun  */
cxgb4_register_uld(enum cxgb4_uld type,const struct cxgb4_uld_info * p)799*4882a593Smuzhiyun void cxgb4_register_uld(enum cxgb4_uld type,
800*4882a593Smuzhiyun 			const struct cxgb4_uld_info *p)
801*4882a593Smuzhiyun {
802*4882a593Smuzhiyun 	struct cxgb4_uld_list *uld_entry;
803*4882a593Smuzhiyun 	struct adapter *adap;
804*4882a593Smuzhiyun 
805*4882a593Smuzhiyun 	if (type >= CXGB4_ULD_MAX)
806*4882a593Smuzhiyun 		return;
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	uld_entry = kzalloc(sizeof(*uld_entry), GFP_KERNEL);
809*4882a593Smuzhiyun 	if (!uld_entry)
810*4882a593Smuzhiyun 		return;
811*4882a593Smuzhiyun 
812*4882a593Smuzhiyun 	memcpy(&uld_entry->uld_info, p, sizeof(struct cxgb4_uld_info));
813*4882a593Smuzhiyun 	mutex_lock(&uld_mutex);
814*4882a593Smuzhiyun 	list_for_each_entry(adap, &adapter_list, list_node)
815*4882a593Smuzhiyun 		cxgb4_uld_alloc_resources(adap, type, p);
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 	uld_entry->uld_type = type;
818*4882a593Smuzhiyun 	list_add_tail(&uld_entry->list_node, &uld_list);
819*4882a593Smuzhiyun 	mutex_unlock(&uld_mutex);
820*4882a593Smuzhiyun 	return;
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun EXPORT_SYMBOL(cxgb4_register_uld);
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun /**
825*4882a593Smuzhiyun  *	cxgb4_unregister_uld - unregister an upper-layer driver
826*4882a593Smuzhiyun  *	@type: the ULD type
827*4882a593Smuzhiyun  *
828*4882a593Smuzhiyun  *	Unregisters an existing upper-layer driver.
829*4882a593Smuzhiyun  */
cxgb4_unregister_uld(enum cxgb4_uld type)830*4882a593Smuzhiyun int cxgb4_unregister_uld(enum cxgb4_uld type)
831*4882a593Smuzhiyun {
832*4882a593Smuzhiyun 	struct cxgb4_uld_list *uld_entry, *tmp;
833*4882a593Smuzhiyun 	struct adapter *adap;
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 	if (type >= CXGB4_ULD_MAX)
836*4882a593Smuzhiyun 		return -EINVAL;
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 	mutex_lock(&uld_mutex);
839*4882a593Smuzhiyun 	list_for_each_entry(adap, &adapter_list, list_node) {
840*4882a593Smuzhiyun 		if ((type == CXGB4_ULD_CRYPTO && !is_pci_uld(adap)) ||
841*4882a593Smuzhiyun 		    (type != CXGB4_ULD_CRYPTO && !is_offload(adap)))
842*4882a593Smuzhiyun 			continue;
843*4882a593Smuzhiyun 		if (type == CXGB4_ULD_ISCSIT && is_t4(adap->params.chip))
844*4882a593Smuzhiyun 			continue;
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 		cxgb4_shutdown_uld_adapter(adap, type);
847*4882a593Smuzhiyun 	}
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 	list_for_each_entry_safe(uld_entry, tmp, &uld_list, list_node) {
850*4882a593Smuzhiyun 		if (uld_entry->uld_type == type) {
851*4882a593Smuzhiyun 			list_del(&uld_entry->list_node);
852*4882a593Smuzhiyun 			kfree(uld_entry);
853*4882a593Smuzhiyun 		}
854*4882a593Smuzhiyun 	}
855*4882a593Smuzhiyun 	mutex_unlock(&uld_mutex);
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 	return 0;
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun EXPORT_SYMBOL(cxgb4_unregister_uld);
860