1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (c) 2006-2008 Chelsio, Inc. All rights reserved.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * This software is available to you under a choice of one of two
5*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
6*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
7*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the
8*4882a593Smuzhiyun * OpenIB.org BSD license below:
9*4882a593Smuzhiyun *
10*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or
11*4882a593Smuzhiyun * without modification, are permitted provided that the following
12*4882a593Smuzhiyun * conditions are met:
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * - Redistributions of source code must retain the above
15*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
16*4882a593Smuzhiyun * disclaimer.
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above
19*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
20*4882a593Smuzhiyun * disclaimer in the documentation and/or other materials
21*4882a593Smuzhiyun * provided with the distribution.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24*4882a593Smuzhiyun * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25*4882a593Smuzhiyun * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26*4882a593Smuzhiyun * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27*4882a593Smuzhiyun * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28*4882a593Smuzhiyun * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29*4882a593Smuzhiyun * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30*4882a593Smuzhiyun * SOFTWARE.
31*4882a593Smuzhiyun */
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #include <linux/list.h>
36*4882a593Smuzhiyun #include <linux/slab.h>
37*4882a593Smuzhiyun #include <net/neighbour.h>
38*4882a593Smuzhiyun #include <linux/notifier.h>
39*4882a593Smuzhiyun #include <linux/atomic.h>
40*4882a593Smuzhiyun #include <linux/proc_fs.h>
41*4882a593Smuzhiyun #include <linux/if_vlan.h>
42*4882a593Smuzhiyun #include <net/netevent.h>
43*4882a593Smuzhiyun #include <linux/highmem.h>
44*4882a593Smuzhiyun #include <linux/vmalloc.h>
45*4882a593Smuzhiyun #include <linux/export.h>
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun #include "common.h"
48*4882a593Smuzhiyun #include "regs.h"
49*4882a593Smuzhiyun #include "cxgb3_ioctl.h"
50*4882a593Smuzhiyun #include "cxgb3_ctl_defs.h"
51*4882a593Smuzhiyun #include "cxgb3_defs.h"
52*4882a593Smuzhiyun #include "l2t.h"
53*4882a593Smuzhiyun #include "firmware_exports.h"
54*4882a593Smuzhiyun #include "cxgb3_offload.h"
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun static LIST_HEAD(client_list);
57*4882a593Smuzhiyun static LIST_HEAD(ofld_dev_list);
58*4882a593Smuzhiyun static DEFINE_MUTEX(cxgb3_db_lock);
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun static DEFINE_RWLOCK(adapter_list_lock);
61*4882a593Smuzhiyun static LIST_HEAD(adapter_list);
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun static const unsigned int MAX_ATIDS = 64 * 1024;
64*4882a593Smuzhiyun static const unsigned int ATID_BASE = 0x10000;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun static void cxgb_neigh_update(struct neighbour *neigh);
67*4882a593Smuzhiyun static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new,
68*4882a593Smuzhiyun struct neighbour *neigh, const void *daddr);
69*4882a593Smuzhiyun
offload_activated(struct t3cdev * tdev)70*4882a593Smuzhiyun static inline int offload_activated(struct t3cdev *tdev)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun const struct adapter *adapter = tdev2adap(tdev);
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun return test_bit(OFFLOAD_DEVMAP_BIT, &adapter->open_device_map);
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /**
78*4882a593Smuzhiyun * cxgb3_register_client - register an offload client
79*4882a593Smuzhiyun * @client: the client
80*4882a593Smuzhiyun *
81*4882a593Smuzhiyun * Add the client to the client list,
82*4882a593Smuzhiyun * and call backs the client for each activated offload device
83*4882a593Smuzhiyun */
cxgb3_register_client(struct cxgb3_client * client)84*4882a593Smuzhiyun void cxgb3_register_client(struct cxgb3_client *client)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun struct t3cdev *tdev;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun mutex_lock(&cxgb3_db_lock);
89*4882a593Smuzhiyun list_add_tail(&client->client_list, &client_list);
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun if (client->add) {
92*4882a593Smuzhiyun list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
93*4882a593Smuzhiyun if (offload_activated(tdev))
94*4882a593Smuzhiyun client->add(tdev);
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun mutex_unlock(&cxgb3_db_lock);
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun EXPORT_SYMBOL(cxgb3_register_client);
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /**
103*4882a593Smuzhiyun * cxgb3_unregister_client - unregister an offload client
104*4882a593Smuzhiyun * @client: the client
105*4882a593Smuzhiyun *
106*4882a593Smuzhiyun * Remove the client to the client list,
107*4882a593Smuzhiyun * and call backs the client for each activated offload device.
108*4882a593Smuzhiyun */
cxgb3_unregister_client(struct cxgb3_client * client)109*4882a593Smuzhiyun void cxgb3_unregister_client(struct cxgb3_client *client)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun struct t3cdev *tdev;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun mutex_lock(&cxgb3_db_lock);
114*4882a593Smuzhiyun list_del(&client->client_list);
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun if (client->remove) {
117*4882a593Smuzhiyun list_for_each_entry(tdev, &ofld_dev_list, ofld_dev_list) {
118*4882a593Smuzhiyun if (offload_activated(tdev))
119*4882a593Smuzhiyun client->remove(tdev);
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun mutex_unlock(&cxgb3_db_lock);
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun EXPORT_SYMBOL(cxgb3_unregister_client);
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /**
128*4882a593Smuzhiyun * cxgb3_add_clients - activate registered clients for an offload device
129*4882a593Smuzhiyun * @tdev: the offload device
130*4882a593Smuzhiyun *
131*4882a593Smuzhiyun * Call backs all registered clients once a offload device is activated
132*4882a593Smuzhiyun */
cxgb3_add_clients(struct t3cdev * tdev)133*4882a593Smuzhiyun void cxgb3_add_clients(struct t3cdev *tdev)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun struct cxgb3_client *client;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun mutex_lock(&cxgb3_db_lock);
138*4882a593Smuzhiyun list_for_each_entry(client, &client_list, client_list) {
139*4882a593Smuzhiyun if (client->add)
140*4882a593Smuzhiyun client->add(tdev);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun mutex_unlock(&cxgb3_db_lock);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /**
146*4882a593Smuzhiyun * cxgb3_remove_clients - deactivates registered clients
147*4882a593Smuzhiyun * for an offload device
148*4882a593Smuzhiyun * @tdev: the offload device
149*4882a593Smuzhiyun *
150*4882a593Smuzhiyun * Call backs all registered clients once a offload device is deactivated
151*4882a593Smuzhiyun */
cxgb3_remove_clients(struct t3cdev * tdev)152*4882a593Smuzhiyun void cxgb3_remove_clients(struct t3cdev *tdev)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun struct cxgb3_client *client;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun mutex_lock(&cxgb3_db_lock);
157*4882a593Smuzhiyun list_for_each_entry(client, &client_list, client_list) {
158*4882a593Smuzhiyun if (client->remove)
159*4882a593Smuzhiyun client->remove(tdev);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun mutex_unlock(&cxgb3_db_lock);
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
cxgb3_event_notify(struct t3cdev * tdev,u32 event,u32 port)164*4882a593Smuzhiyun void cxgb3_event_notify(struct t3cdev *tdev, u32 event, u32 port)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun struct cxgb3_client *client;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun mutex_lock(&cxgb3_db_lock);
169*4882a593Smuzhiyun list_for_each_entry(client, &client_list, client_list) {
170*4882a593Smuzhiyun if (client->event_handler)
171*4882a593Smuzhiyun client->event_handler(tdev, event, port);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun mutex_unlock(&cxgb3_db_lock);
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
get_iff_from_mac(struct adapter * adapter,const unsigned char * mac,unsigned int vlan)176*4882a593Smuzhiyun static struct net_device *get_iff_from_mac(struct adapter *adapter,
177*4882a593Smuzhiyun const unsigned char *mac,
178*4882a593Smuzhiyun unsigned int vlan)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun int i;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun for_each_port(adapter, i) {
183*4882a593Smuzhiyun struct net_device *dev = adapter->port[i];
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun if (ether_addr_equal(dev->dev_addr, mac)) {
186*4882a593Smuzhiyun rcu_read_lock();
187*4882a593Smuzhiyun if (vlan && vlan != VLAN_VID_MASK) {
188*4882a593Smuzhiyun dev = __vlan_find_dev_deep_rcu(dev, htons(ETH_P_8021Q), vlan);
189*4882a593Smuzhiyun } else if (netif_is_bond_slave(dev)) {
190*4882a593Smuzhiyun struct net_device *upper_dev;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun while ((upper_dev =
193*4882a593Smuzhiyun netdev_master_upper_dev_get_rcu(dev)))
194*4882a593Smuzhiyun dev = upper_dev;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun rcu_read_unlock();
197*4882a593Smuzhiyun return dev;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun return NULL;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
cxgb_ulp_iscsi_ctl(struct adapter * adapter,unsigned int req,void * data)203*4882a593Smuzhiyun static int cxgb_ulp_iscsi_ctl(struct adapter *adapter, unsigned int req,
204*4882a593Smuzhiyun void *data)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun int i;
207*4882a593Smuzhiyun int ret = 0;
208*4882a593Smuzhiyun unsigned int val = 0;
209*4882a593Smuzhiyun struct ulp_iscsi_info *uiip = data;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun switch (req) {
212*4882a593Smuzhiyun case ULP_ISCSI_GET_PARAMS:
213*4882a593Smuzhiyun uiip->pdev = adapter->pdev;
214*4882a593Smuzhiyun uiip->llimit = t3_read_reg(adapter, A_ULPRX_ISCSI_LLIMIT);
215*4882a593Smuzhiyun uiip->ulimit = t3_read_reg(adapter, A_ULPRX_ISCSI_ULIMIT);
216*4882a593Smuzhiyun uiip->tagmask = t3_read_reg(adapter, A_ULPRX_ISCSI_TAGMASK);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun val = t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ);
219*4882a593Smuzhiyun for (i = 0; i < 4; i++, val >>= 8)
220*4882a593Smuzhiyun uiip->pgsz_factor[i] = val & 0xFF;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun val = t3_read_reg(adapter, A_TP_PARA_REG7);
223*4882a593Smuzhiyun uiip->max_txsz =
224*4882a593Smuzhiyun uiip->max_rxsz = min((val >> S_PMMAXXFERLEN0)&M_PMMAXXFERLEN0,
225*4882a593Smuzhiyun (val >> S_PMMAXXFERLEN1)&M_PMMAXXFERLEN1);
226*4882a593Smuzhiyun /*
227*4882a593Smuzhiyun * On tx, the iscsi pdu has to be <= tx page size and has to
228*4882a593Smuzhiyun * fit into the Tx PM FIFO.
229*4882a593Smuzhiyun */
230*4882a593Smuzhiyun val = min(adapter->params.tp.tx_pg_size,
231*4882a593Smuzhiyun t3_read_reg(adapter, A_PM1_TX_CFG) >> 17);
232*4882a593Smuzhiyun uiip->max_txsz = min(val, uiip->max_txsz);
233*4882a593Smuzhiyun
234*4882a593Smuzhiyun /* set MaxRxData to 16224 */
235*4882a593Smuzhiyun val = t3_read_reg(adapter, A_TP_PARA_REG2);
236*4882a593Smuzhiyun if ((val >> S_MAXRXDATA) != 0x3f60) {
237*4882a593Smuzhiyun val &= (M_RXCOALESCESIZE << S_RXCOALESCESIZE);
238*4882a593Smuzhiyun val |= V_MAXRXDATA(0x3f60);
239*4882a593Smuzhiyun pr_info("%s, iscsi set MaxRxData to 16224 (0x%x)\n",
240*4882a593Smuzhiyun adapter->name, val);
241*4882a593Smuzhiyun t3_write_reg(adapter, A_TP_PARA_REG2, val);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /*
245*4882a593Smuzhiyun * on rx, the iscsi pdu has to be < rx page size and the
246*4882a593Smuzhiyun * the max rx data length programmed in TP
247*4882a593Smuzhiyun */
248*4882a593Smuzhiyun val = min(adapter->params.tp.rx_pg_size,
249*4882a593Smuzhiyun ((t3_read_reg(adapter, A_TP_PARA_REG2)) >>
250*4882a593Smuzhiyun S_MAXRXDATA) & M_MAXRXDATA);
251*4882a593Smuzhiyun uiip->max_rxsz = min(val, uiip->max_rxsz);
252*4882a593Smuzhiyun break;
253*4882a593Smuzhiyun case ULP_ISCSI_SET_PARAMS:
254*4882a593Smuzhiyun t3_write_reg(adapter, A_ULPRX_ISCSI_TAGMASK, uiip->tagmask);
255*4882a593Smuzhiyun /* program the ddp page sizes */
256*4882a593Smuzhiyun for (i = 0; i < 4; i++)
257*4882a593Smuzhiyun val |= (uiip->pgsz_factor[i] & 0xF) << (8 * i);
258*4882a593Smuzhiyun if (val && (val != t3_read_reg(adapter, A_ULPRX_ISCSI_PSZ))) {
259*4882a593Smuzhiyun pr_info("%s, setting iscsi pgsz 0x%x, %u,%u,%u,%u\n",
260*4882a593Smuzhiyun adapter->name, val, uiip->pgsz_factor[0],
261*4882a593Smuzhiyun uiip->pgsz_factor[1], uiip->pgsz_factor[2],
262*4882a593Smuzhiyun uiip->pgsz_factor[3]);
263*4882a593Smuzhiyun t3_write_reg(adapter, A_ULPRX_ISCSI_PSZ, val);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun break;
266*4882a593Smuzhiyun default:
267*4882a593Smuzhiyun ret = -EOPNOTSUPP;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun return ret;
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun /* Response queue used for RDMA events. */
273*4882a593Smuzhiyun #define ASYNC_NOTIF_RSPQ 0
274*4882a593Smuzhiyun
cxgb_rdma_ctl(struct adapter * adapter,unsigned int req,void * data)275*4882a593Smuzhiyun static int cxgb_rdma_ctl(struct adapter *adapter, unsigned int req, void *data)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun int ret = 0;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun switch (req) {
280*4882a593Smuzhiyun case RDMA_GET_PARAMS: {
281*4882a593Smuzhiyun struct rdma_info *rdma = data;
282*4882a593Smuzhiyun struct pci_dev *pdev = adapter->pdev;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun rdma->udbell_physbase = pci_resource_start(pdev, 2);
285*4882a593Smuzhiyun rdma->udbell_len = pci_resource_len(pdev, 2);
286*4882a593Smuzhiyun rdma->tpt_base =
287*4882a593Smuzhiyun t3_read_reg(adapter, A_ULPTX_TPT_LLIMIT);
288*4882a593Smuzhiyun rdma->tpt_top = t3_read_reg(adapter, A_ULPTX_TPT_ULIMIT);
289*4882a593Smuzhiyun rdma->pbl_base =
290*4882a593Smuzhiyun t3_read_reg(adapter, A_ULPTX_PBL_LLIMIT);
291*4882a593Smuzhiyun rdma->pbl_top = t3_read_reg(adapter, A_ULPTX_PBL_ULIMIT);
292*4882a593Smuzhiyun rdma->rqt_base = t3_read_reg(adapter, A_ULPRX_RQ_LLIMIT);
293*4882a593Smuzhiyun rdma->rqt_top = t3_read_reg(adapter, A_ULPRX_RQ_ULIMIT);
294*4882a593Smuzhiyun rdma->kdb_addr = adapter->regs + A_SG_KDOORBELL;
295*4882a593Smuzhiyun rdma->pdev = pdev;
296*4882a593Smuzhiyun break;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun case RDMA_CQ_OP:{
299*4882a593Smuzhiyun unsigned long flags;
300*4882a593Smuzhiyun struct rdma_cq_op *rdma = data;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun /* may be called in any context */
303*4882a593Smuzhiyun spin_lock_irqsave(&adapter->sge.reg_lock, flags);
304*4882a593Smuzhiyun ret = t3_sge_cqcntxt_op(adapter, rdma->id, rdma->op,
305*4882a593Smuzhiyun rdma->credits);
306*4882a593Smuzhiyun spin_unlock_irqrestore(&adapter->sge.reg_lock, flags);
307*4882a593Smuzhiyun break;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun case RDMA_GET_MEM:{
310*4882a593Smuzhiyun struct ch_mem_range *t = data;
311*4882a593Smuzhiyun struct mc7 *mem;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun if ((t->addr & 7) || (t->len & 7))
314*4882a593Smuzhiyun return -EINVAL;
315*4882a593Smuzhiyun if (t->mem_id == MEM_CM)
316*4882a593Smuzhiyun mem = &adapter->cm;
317*4882a593Smuzhiyun else if (t->mem_id == MEM_PMRX)
318*4882a593Smuzhiyun mem = &adapter->pmrx;
319*4882a593Smuzhiyun else if (t->mem_id == MEM_PMTX)
320*4882a593Smuzhiyun mem = &adapter->pmtx;
321*4882a593Smuzhiyun else
322*4882a593Smuzhiyun return -EINVAL;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun ret =
325*4882a593Smuzhiyun t3_mc7_bd_read(mem, t->addr / 8, t->len / 8,
326*4882a593Smuzhiyun (u64 *) t->buf);
327*4882a593Smuzhiyun if (ret)
328*4882a593Smuzhiyun return ret;
329*4882a593Smuzhiyun break;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun case RDMA_CQ_SETUP:{
332*4882a593Smuzhiyun struct rdma_cq_setup *rdma = data;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun spin_lock_irq(&adapter->sge.reg_lock);
335*4882a593Smuzhiyun ret =
336*4882a593Smuzhiyun t3_sge_init_cqcntxt(adapter, rdma->id,
337*4882a593Smuzhiyun rdma->base_addr, rdma->size,
338*4882a593Smuzhiyun ASYNC_NOTIF_RSPQ,
339*4882a593Smuzhiyun rdma->ovfl_mode, rdma->credits,
340*4882a593Smuzhiyun rdma->credit_thres);
341*4882a593Smuzhiyun spin_unlock_irq(&adapter->sge.reg_lock);
342*4882a593Smuzhiyun break;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun case RDMA_CQ_DISABLE:
345*4882a593Smuzhiyun spin_lock_irq(&adapter->sge.reg_lock);
346*4882a593Smuzhiyun ret = t3_sge_disable_cqcntxt(adapter, *(unsigned int *)data);
347*4882a593Smuzhiyun spin_unlock_irq(&adapter->sge.reg_lock);
348*4882a593Smuzhiyun break;
349*4882a593Smuzhiyun case RDMA_CTRL_QP_SETUP:{
350*4882a593Smuzhiyun struct rdma_ctrlqp_setup *rdma = data;
351*4882a593Smuzhiyun
352*4882a593Smuzhiyun spin_lock_irq(&adapter->sge.reg_lock);
353*4882a593Smuzhiyun ret = t3_sge_init_ecntxt(adapter, FW_RI_SGEEC_START, 0,
354*4882a593Smuzhiyun SGE_CNTXT_RDMA,
355*4882a593Smuzhiyun ASYNC_NOTIF_RSPQ,
356*4882a593Smuzhiyun rdma->base_addr, rdma->size,
357*4882a593Smuzhiyun FW_RI_TID_START, 1, 0);
358*4882a593Smuzhiyun spin_unlock_irq(&adapter->sge.reg_lock);
359*4882a593Smuzhiyun break;
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun case RDMA_GET_MIB: {
362*4882a593Smuzhiyun spin_lock(&adapter->stats_lock);
363*4882a593Smuzhiyun t3_tp_get_mib_stats(adapter, (struct tp_mib_stats *)data);
364*4882a593Smuzhiyun spin_unlock(&adapter->stats_lock);
365*4882a593Smuzhiyun break;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun default:
368*4882a593Smuzhiyun ret = -EOPNOTSUPP;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun return ret;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
cxgb_offload_ctl(struct t3cdev * tdev,unsigned int req,void * data)373*4882a593Smuzhiyun static int cxgb_offload_ctl(struct t3cdev *tdev, unsigned int req, void *data)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun struct adapter *adapter = tdev2adap(tdev);
376*4882a593Smuzhiyun struct tid_range *tid;
377*4882a593Smuzhiyun struct mtutab *mtup;
378*4882a593Smuzhiyun struct iff_mac *iffmacp;
379*4882a593Smuzhiyun struct ddp_params *ddpp;
380*4882a593Smuzhiyun struct adap_ports *ports;
381*4882a593Smuzhiyun struct ofld_page_info *rx_page_info;
382*4882a593Smuzhiyun struct tp_params *tp = &adapter->params.tp;
383*4882a593Smuzhiyun int i;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun switch (req) {
386*4882a593Smuzhiyun case GET_MAX_OUTSTANDING_WR:
387*4882a593Smuzhiyun *(unsigned int *)data = FW_WR_NUM;
388*4882a593Smuzhiyun break;
389*4882a593Smuzhiyun case GET_WR_LEN:
390*4882a593Smuzhiyun *(unsigned int *)data = WR_FLITS;
391*4882a593Smuzhiyun break;
392*4882a593Smuzhiyun case GET_TX_MAX_CHUNK:
393*4882a593Smuzhiyun *(unsigned int *)data = 1 << 20; /* 1MB */
394*4882a593Smuzhiyun break;
395*4882a593Smuzhiyun case GET_TID_RANGE:
396*4882a593Smuzhiyun tid = data;
397*4882a593Smuzhiyun tid->num = t3_mc5_size(&adapter->mc5) -
398*4882a593Smuzhiyun adapter->params.mc5.nroutes -
399*4882a593Smuzhiyun adapter->params.mc5.nfilters - adapter->params.mc5.nservers;
400*4882a593Smuzhiyun tid->base = 0;
401*4882a593Smuzhiyun break;
402*4882a593Smuzhiyun case GET_STID_RANGE:
403*4882a593Smuzhiyun tid = data;
404*4882a593Smuzhiyun tid->num = adapter->params.mc5.nservers;
405*4882a593Smuzhiyun tid->base = t3_mc5_size(&adapter->mc5) - tid->num -
406*4882a593Smuzhiyun adapter->params.mc5.nfilters - adapter->params.mc5.nroutes;
407*4882a593Smuzhiyun break;
408*4882a593Smuzhiyun case GET_L2T_CAPACITY:
409*4882a593Smuzhiyun *(unsigned int *)data = 2048;
410*4882a593Smuzhiyun break;
411*4882a593Smuzhiyun case GET_MTUS:
412*4882a593Smuzhiyun mtup = data;
413*4882a593Smuzhiyun mtup->size = NMTUS;
414*4882a593Smuzhiyun mtup->mtus = adapter->params.mtus;
415*4882a593Smuzhiyun break;
416*4882a593Smuzhiyun case GET_IFF_FROM_MAC:
417*4882a593Smuzhiyun iffmacp = data;
418*4882a593Smuzhiyun iffmacp->dev = get_iff_from_mac(adapter, iffmacp->mac_addr,
419*4882a593Smuzhiyun iffmacp->vlan_tag &
420*4882a593Smuzhiyun VLAN_VID_MASK);
421*4882a593Smuzhiyun break;
422*4882a593Smuzhiyun case GET_DDP_PARAMS:
423*4882a593Smuzhiyun ddpp = data;
424*4882a593Smuzhiyun ddpp->llimit = t3_read_reg(adapter, A_ULPRX_TDDP_LLIMIT);
425*4882a593Smuzhiyun ddpp->ulimit = t3_read_reg(adapter, A_ULPRX_TDDP_ULIMIT);
426*4882a593Smuzhiyun ddpp->tag_mask = t3_read_reg(adapter, A_ULPRX_TDDP_TAGMASK);
427*4882a593Smuzhiyun break;
428*4882a593Smuzhiyun case GET_PORTS:
429*4882a593Smuzhiyun ports = data;
430*4882a593Smuzhiyun ports->nports = adapter->params.nports;
431*4882a593Smuzhiyun for_each_port(adapter, i)
432*4882a593Smuzhiyun ports->lldevs[i] = adapter->port[i];
433*4882a593Smuzhiyun break;
434*4882a593Smuzhiyun case ULP_ISCSI_GET_PARAMS:
435*4882a593Smuzhiyun case ULP_ISCSI_SET_PARAMS:
436*4882a593Smuzhiyun if (!offload_running(adapter))
437*4882a593Smuzhiyun return -EAGAIN;
438*4882a593Smuzhiyun return cxgb_ulp_iscsi_ctl(adapter, req, data);
439*4882a593Smuzhiyun case RDMA_GET_PARAMS:
440*4882a593Smuzhiyun case RDMA_CQ_OP:
441*4882a593Smuzhiyun case RDMA_CQ_SETUP:
442*4882a593Smuzhiyun case RDMA_CQ_DISABLE:
443*4882a593Smuzhiyun case RDMA_CTRL_QP_SETUP:
444*4882a593Smuzhiyun case RDMA_GET_MEM:
445*4882a593Smuzhiyun case RDMA_GET_MIB:
446*4882a593Smuzhiyun if (!offload_running(adapter))
447*4882a593Smuzhiyun return -EAGAIN;
448*4882a593Smuzhiyun return cxgb_rdma_ctl(adapter, req, data);
449*4882a593Smuzhiyun case GET_RX_PAGE_INFO:
450*4882a593Smuzhiyun rx_page_info = data;
451*4882a593Smuzhiyun rx_page_info->page_size = tp->rx_pg_size;
452*4882a593Smuzhiyun rx_page_info->num = tp->rx_num_pgs;
453*4882a593Smuzhiyun break;
454*4882a593Smuzhiyun case GET_ISCSI_IPV4ADDR: {
455*4882a593Smuzhiyun struct iscsi_ipv4addr *p = data;
456*4882a593Smuzhiyun struct port_info *pi = netdev_priv(p->dev);
457*4882a593Smuzhiyun p->ipv4addr = pi->iscsi_ipv4addr;
458*4882a593Smuzhiyun break;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun case GET_EMBEDDED_INFO: {
461*4882a593Smuzhiyun struct ch_embedded_info *e = data;
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun spin_lock(&adapter->stats_lock);
464*4882a593Smuzhiyun t3_get_fw_version(adapter, &e->fw_vers);
465*4882a593Smuzhiyun t3_get_tp_version(adapter, &e->tp_vers);
466*4882a593Smuzhiyun spin_unlock(&adapter->stats_lock);
467*4882a593Smuzhiyun break;
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun default:
470*4882a593Smuzhiyun return -EOPNOTSUPP;
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun return 0;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun /*
476*4882a593Smuzhiyun * Dummy handler for Rx offload packets in case we get an offload packet before
477*4882a593Smuzhiyun * proper processing is setup. This complains and drops the packet as it isn't
478*4882a593Smuzhiyun * normal to get offload packets at this stage.
479*4882a593Smuzhiyun */
rx_offload_blackhole(struct t3cdev * dev,struct sk_buff ** skbs,int n)480*4882a593Smuzhiyun static int rx_offload_blackhole(struct t3cdev *dev, struct sk_buff **skbs,
481*4882a593Smuzhiyun int n)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun while (n--)
484*4882a593Smuzhiyun dev_kfree_skb_any(skbs[n]);
485*4882a593Smuzhiyun return 0;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun
dummy_neigh_update(struct t3cdev * dev,struct neighbour * neigh)488*4882a593Smuzhiyun static void dummy_neigh_update(struct t3cdev *dev, struct neighbour *neigh)
489*4882a593Smuzhiyun {
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun
cxgb3_set_dummy_ops(struct t3cdev * dev)492*4882a593Smuzhiyun void cxgb3_set_dummy_ops(struct t3cdev *dev)
493*4882a593Smuzhiyun {
494*4882a593Smuzhiyun dev->recv = rx_offload_blackhole;
495*4882a593Smuzhiyun dev->neigh_update = dummy_neigh_update;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun /*
499*4882a593Smuzhiyun * Free an active-open TID.
500*4882a593Smuzhiyun */
cxgb3_free_atid(struct t3cdev * tdev,int atid)501*4882a593Smuzhiyun void *cxgb3_free_atid(struct t3cdev *tdev, int atid)
502*4882a593Smuzhiyun {
503*4882a593Smuzhiyun struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
504*4882a593Smuzhiyun union active_open_entry *p = atid2entry(t, atid);
505*4882a593Smuzhiyun void *ctx = p->t3c_tid.ctx;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun spin_lock_bh(&t->atid_lock);
508*4882a593Smuzhiyun p->next = t->afree;
509*4882a593Smuzhiyun t->afree = p;
510*4882a593Smuzhiyun t->atids_in_use--;
511*4882a593Smuzhiyun spin_unlock_bh(&t->atid_lock);
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun return ctx;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun EXPORT_SYMBOL(cxgb3_free_atid);
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun /*
519*4882a593Smuzhiyun * Free a server TID and return it to the free pool.
520*4882a593Smuzhiyun */
cxgb3_free_stid(struct t3cdev * tdev,int stid)521*4882a593Smuzhiyun void cxgb3_free_stid(struct t3cdev *tdev, int stid)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
524*4882a593Smuzhiyun union listen_entry *p = stid2entry(t, stid);
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun spin_lock_bh(&t->stid_lock);
527*4882a593Smuzhiyun p->next = t->sfree;
528*4882a593Smuzhiyun t->sfree = p;
529*4882a593Smuzhiyun t->stids_in_use--;
530*4882a593Smuzhiyun spin_unlock_bh(&t->stid_lock);
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun EXPORT_SYMBOL(cxgb3_free_stid);
534*4882a593Smuzhiyun
cxgb3_insert_tid(struct t3cdev * tdev,struct cxgb3_client * client,void * ctx,unsigned int tid)535*4882a593Smuzhiyun void cxgb3_insert_tid(struct t3cdev *tdev, struct cxgb3_client *client,
536*4882a593Smuzhiyun void *ctx, unsigned int tid)
537*4882a593Smuzhiyun {
538*4882a593Smuzhiyun struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun t->tid_tab[tid].client = client;
541*4882a593Smuzhiyun t->tid_tab[tid].ctx = ctx;
542*4882a593Smuzhiyun atomic_inc(&t->tids_in_use);
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun EXPORT_SYMBOL(cxgb3_insert_tid);
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun /*
548*4882a593Smuzhiyun * Populate a TID_RELEASE WR. The skb must be already propely sized.
549*4882a593Smuzhiyun */
mk_tid_release(struct sk_buff * skb,unsigned int tid)550*4882a593Smuzhiyun static inline void mk_tid_release(struct sk_buff *skb, unsigned int tid)
551*4882a593Smuzhiyun {
552*4882a593Smuzhiyun struct cpl_tid_release *req;
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun skb->priority = CPL_PRIORITY_SETUP;
555*4882a593Smuzhiyun req = __skb_put(skb, sizeof(*req));
556*4882a593Smuzhiyun req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
557*4882a593Smuzhiyun OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_TID_RELEASE, tid));
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun
t3_process_tid_release_list(struct work_struct * work)560*4882a593Smuzhiyun static void t3_process_tid_release_list(struct work_struct *work)
561*4882a593Smuzhiyun {
562*4882a593Smuzhiyun struct t3c_data *td = container_of(work, struct t3c_data,
563*4882a593Smuzhiyun tid_release_task);
564*4882a593Smuzhiyun struct sk_buff *skb;
565*4882a593Smuzhiyun struct t3cdev *tdev = td->dev;
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun spin_lock_bh(&td->tid_release_lock);
569*4882a593Smuzhiyun while (td->tid_release_list) {
570*4882a593Smuzhiyun struct t3c_tid_entry *p = td->tid_release_list;
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun td->tid_release_list = p->ctx;
573*4882a593Smuzhiyun spin_unlock_bh(&td->tid_release_lock);
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun skb = alloc_skb(sizeof(struct cpl_tid_release),
576*4882a593Smuzhiyun GFP_KERNEL);
577*4882a593Smuzhiyun if (!skb)
578*4882a593Smuzhiyun skb = td->nofail_skb;
579*4882a593Smuzhiyun if (!skb) {
580*4882a593Smuzhiyun spin_lock_bh(&td->tid_release_lock);
581*4882a593Smuzhiyun p->ctx = (void *)td->tid_release_list;
582*4882a593Smuzhiyun td->tid_release_list = p;
583*4882a593Smuzhiyun break;
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun mk_tid_release(skb, p - td->tid_maps.tid_tab);
586*4882a593Smuzhiyun cxgb3_ofld_send(tdev, skb);
587*4882a593Smuzhiyun p->ctx = NULL;
588*4882a593Smuzhiyun if (skb == td->nofail_skb)
589*4882a593Smuzhiyun td->nofail_skb =
590*4882a593Smuzhiyun alloc_skb(sizeof(struct cpl_tid_release),
591*4882a593Smuzhiyun GFP_KERNEL);
592*4882a593Smuzhiyun spin_lock_bh(&td->tid_release_lock);
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun td->release_list_incomplete = (td->tid_release_list == NULL) ? 0 : 1;
595*4882a593Smuzhiyun spin_unlock_bh(&td->tid_release_lock);
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun if (!td->nofail_skb)
598*4882a593Smuzhiyun td->nofail_skb =
599*4882a593Smuzhiyun alloc_skb(sizeof(struct cpl_tid_release),
600*4882a593Smuzhiyun GFP_KERNEL);
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun /* use ctx as a next pointer in the tid release list */
cxgb3_queue_tid_release(struct t3cdev * tdev,unsigned int tid)604*4882a593Smuzhiyun void cxgb3_queue_tid_release(struct t3cdev *tdev, unsigned int tid)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun struct t3c_data *td = T3C_DATA(tdev);
607*4882a593Smuzhiyun struct t3c_tid_entry *p = &td->tid_maps.tid_tab[tid];
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun spin_lock_bh(&td->tid_release_lock);
610*4882a593Smuzhiyun p->ctx = (void *)td->tid_release_list;
611*4882a593Smuzhiyun p->client = NULL;
612*4882a593Smuzhiyun td->tid_release_list = p;
613*4882a593Smuzhiyun if (!p->ctx || td->release_list_incomplete)
614*4882a593Smuzhiyun schedule_work(&td->tid_release_task);
615*4882a593Smuzhiyun spin_unlock_bh(&td->tid_release_lock);
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun EXPORT_SYMBOL(cxgb3_queue_tid_release);
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun /*
621*4882a593Smuzhiyun * Remove a tid from the TID table. A client may defer processing its last
622*4882a593Smuzhiyun * CPL message if it is locked at the time it arrives, and while the message
623*4882a593Smuzhiyun * sits in the client's backlog the TID may be reused for another connection.
624*4882a593Smuzhiyun * To handle this we atomically switch the TID association if it still points
625*4882a593Smuzhiyun * to the original client context.
626*4882a593Smuzhiyun */
cxgb3_remove_tid(struct t3cdev * tdev,void * ctx,unsigned int tid)627*4882a593Smuzhiyun void cxgb3_remove_tid(struct t3cdev *tdev, void *ctx, unsigned int tid)
628*4882a593Smuzhiyun {
629*4882a593Smuzhiyun struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun BUG_ON(tid >= t->ntids);
632*4882a593Smuzhiyun if (tdev->type == T3A)
633*4882a593Smuzhiyun (void)cmpxchg(&t->tid_tab[tid].ctx, ctx, NULL);
634*4882a593Smuzhiyun else {
635*4882a593Smuzhiyun struct sk_buff *skb;
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_ATOMIC);
638*4882a593Smuzhiyun if (likely(skb)) {
639*4882a593Smuzhiyun mk_tid_release(skb, tid);
640*4882a593Smuzhiyun cxgb3_ofld_send(tdev, skb);
641*4882a593Smuzhiyun t->tid_tab[tid].ctx = NULL;
642*4882a593Smuzhiyun } else
643*4882a593Smuzhiyun cxgb3_queue_tid_release(tdev, tid);
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun atomic_dec(&t->tids_in_use);
646*4882a593Smuzhiyun }
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun EXPORT_SYMBOL(cxgb3_remove_tid);
649*4882a593Smuzhiyun
cxgb3_alloc_atid(struct t3cdev * tdev,struct cxgb3_client * client,void * ctx)650*4882a593Smuzhiyun int cxgb3_alloc_atid(struct t3cdev *tdev, struct cxgb3_client *client,
651*4882a593Smuzhiyun void *ctx)
652*4882a593Smuzhiyun {
653*4882a593Smuzhiyun int atid = -1;
654*4882a593Smuzhiyun struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
655*4882a593Smuzhiyun
656*4882a593Smuzhiyun spin_lock_bh(&t->atid_lock);
657*4882a593Smuzhiyun if (t->afree &&
658*4882a593Smuzhiyun t->atids_in_use + atomic_read(&t->tids_in_use) + MC5_MIN_TIDS <=
659*4882a593Smuzhiyun t->ntids) {
660*4882a593Smuzhiyun union active_open_entry *p = t->afree;
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun atid = (p - t->atid_tab) + t->atid_base;
663*4882a593Smuzhiyun t->afree = p->next;
664*4882a593Smuzhiyun p->t3c_tid.ctx = ctx;
665*4882a593Smuzhiyun p->t3c_tid.client = client;
666*4882a593Smuzhiyun t->atids_in_use++;
667*4882a593Smuzhiyun }
668*4882a593Smuzhiyun spin_unlock_bh(&t->atid_lock);
669*4882a593Smuzhiyun return atid;
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun EXPORT_SYMBOL(cxgb3_alloc_atid);
673*4882a593Smuzhiyun
cxgb3_alloc_stid(struct t3cdev * tdev,struct cxgb3_client * client,void * ctx)674*4882a593Smuzhiyun int cxgb3_alloc_stid(struct t3cdev *tdev, struct cxgb3_client *client,
675*4882a593Smuzhiyun void *ctx)
676*4882a593Smuzhiyun {
677*4882a593Smuzhiyun int stid = -1;
678*4882a593Smuzhiyun struct tid_info *t = &(T3C_DATA(tdev))->tid_maps;
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun spin_lock_bh(&t->stid_lock);
681*4882a593Smuzhiyun if (t->sfree) {
682*4882a593Smuzhiyun union listen_entry *p = t->sfree;
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun stid = (p - t->stid_tab) + t->stid_base;
685*4882a593Smuzhiyun t->sfree = p->next;
686*4882a593Smuzhiyun p->t3c_tid.ctx = ctx;
687*4882a593Smuzhiyun p->t3c_tid.client = client;
688*4882a593Smuzhiyun t->stids_in_use++;
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun spin_unlock_bh(&t->stid_lock);
691*4882a593Smuzhiyun return stid;
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun EXPORT_SYMBOL(cxgb3_alloc_stid);
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun /* Get the t3cdev associated with a net_device */
dev2t3cdev(struct net_device * dev)697*4882a593Smuzhiyun struct t3cdev *dev2t3cdev(struct net_device *dev)
698*4882a593Smuzhiyun {
699*4882a593Smuzhiyun const struct port_info *pi = netdev_priv(dev);
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun return (struct t3cdev *)pi->adapter;
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun EXPORT_SYMBOL(dev2t3cdev);
705*4882a593Smuzhiyun
do_smt_write_rpl(struct t3cdev * dev,struct sk_buff * skb)706*4882a593Smuzhiyun static int do_smt_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
707*4882a593Smuzhiyun {
708*4882a593Smuzhiyun struct cpl_smt_write_rpl *rpl = cplhdr(skb);
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun if (rpl->status != CPL_ERR_NONE)
711*4882a593Smuzhiyun pr_err("Unexpected SMT_WRITE_RPL status %u for entry %u\n",
712*4882a593Smuzhiyun rpl->status, GET_TID(rpl));
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun return CPL_RET_BUF_DONE;
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun
do_l2t_write_rpl(struct t3cdev * dev,struct sk_buff * skb)717*4882a593Smuzhiyun static int do_l2t_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
718*4882a593Smuzhiyun {
719*4882a593Smuzhiyun struct cpl_l2t_write_rpl *rpl = cplhdr(skb);
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun if (rpl->status != CPL_ERR_NONE)
722*4882a593Smuzhiyun pr_err("Unexpected L2T_WRITE_RPL status %u for entry %u\n",
723*4882a593Smuzhiyun rpl->status, GET_TID(rpl));
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun return CPL_RET_BUF_DONE;
726*4882a593Smuzhiyun }
727*4882a593Smuzhiyun
do_rte_write_rpl(struct t3cdev * dev,struct sk_buff * skb)728*4882a593Smuzhiyun static int do_rte_write_rpl(struct t3cdev *dev, struct sk_buff *skb)
729*4882a593Smuzhiyun {
730*4882a593Smuzhiyun struct cpl_rte_write_rpl *rpl = cplhdr(skb);
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun if (rpl->status != CPL_ERR_NONE)
733*4882a593Smuzhiyun pr_err("Unexpected RTE_WRITE_RPL status %u for entry %u\n",
734*4882a593Smuzhiyun rpl->status, GET_TID(rpl));
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun return CPL_RET_BUF_DONE;
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun
do_act_open_rpl(struct t3cdev * dev,struct sk_buff * skb)739*4882a593Smuzhiyun static int do_act_open_rpl(struct t3cdev *dev, struct sk_buff *skb)
740*4882a593Smuzhiyun {
741*4882a593Smuzhiyun struct cpl_act_open_rpl *rpl = cplhdr(skb);
742*4882a593Smuzhiyun unsigned int atid = G_TID(ntohl(rpl->atid));
743*4882a593Smuzhiyun struct t3c_tid_entry *t3c_tid;
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun t3c_tid = lookup_atid(&(T3C_DATA(dev))->tid_maps, atid);
746*4882a593Smuzhiyun if (t3c_tid && t3c_tid->ctx && t3c_tid->client &&
747*4882a593Smuzhiyun t3c_tid->client->handlers &&
748*4882a593Smuzhiyun t3c_tid->client->handlers[CPL_ACT_OPEN_RPL]) {
749*4882a593Smuzhiyun return t3c_tid->client->handlers[CPL_ACT_OPEN_RPL] (dev, skb,
750*4882a593Smuzhiyun t3c_tid->
751*4882a593Smuzhiyun ctx);
752*4882a593Smuzhiyun } else {
753*4882a593Smuzhiyun pr_err("%s: received clientless CPL command 0x%x\n",
754*4882a593Smuzhiyun dev->name, CPL_ACT_OPEN_RPL);
755*4882a593Smuzhiyun return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun }
758*4882a593Smuzhiyun
do_stid_rpl(struct t3cdev * dev,struct sk_buff * skb)759*4882a593Smuzhiyun static int do_stid_rpl(struct t3cdev *dev, struct sk_buff *skb)
760*4882a593Smuzhiyun {
761*4882a593Smuzhiyun union opcode_tid *p = cplhdr(skb);
762*4882a593Smuzhiyun unsigned int stid = G_TID(ntohl(p->opcode_tid));
763*4882a593Smuzhiyun struct t3c_tid_entry *t3c_tid;
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun t3c_tid = lookup_stid(&(T3C_DATA(dev))->tid_maps, stid);
766*4882a593Smuzhiyun if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
767*4882a593Smuzhiyun t3c_tid->client->handlers[p->opcode]) {
768*4882a593Smuzhiyun return t3c_tid->client->handlers[p->opcode] (dev, skb,
769*4882a593Smuzhiyun t3c_tid->ctx);
770*4882a593Smuzhiyun } else {
771*4882a593Smuzhiyun pr_err("%s: received clientless CPL command 0x%x\n",
772*4882a593Smuzhiyun dev->name, p->opcode);
773*4882a593Smuzhiyun return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
774*4882a593Smuzhiyun }
775*4882a593Smuzhiyun }
776*4882a593Smuzhiyun
do_hwtid_rpl(struct t3cdev * dev,struct sk_buff * skb)777*4882a593Smuzhiyun static int do_hwtid_rpl(struct t3cdev *dev, struct sk_buff *skb)
778*4882a593Smuzhiyun {
779*4882a593Smuzhiyun union opcode_tid *p = cplhdr(skb);
780*4882a593Smuzhiyun unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
781*4882a593Smuzhiyun struct t3c_tid_entry *t3c_tid;
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
784*4882a593Smuzhiyun if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
785*4882a593Smuzhiyun t3c_tid->client->handlers[p->opcode]) {
786*4882a593Smuzhiyun return t3c_tid->client->handlers[p->opcode]
787*4882a593Smuzhiyun (dev, skb, t3c_tid->ctx);
788*4882a593Smuzhiyun } else {
789*4882a593Smuzhiyun pr_err("%s: received clientless CPL command 0x%x\n",
790*4882a593Smuzhiyun dev->name, p->opcode);
791*4882a593Smuzhiyun return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
792*4882a593Smuzhiyun }
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun
do_cr(struct t3cdev * dev,struct sk_buff * skb)795*4882a593Smuzhiyun static int do_cr(struct t3cdev *dev, struct sk_buff *skb)
796*4882a593Smuzhiyun {
797*4882a593Smuzhiyun struct cpl_pass_accept_req *req = cplhdr(skb);
798*4882a593Smuzhiyun unsigned int stid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
799*4882a593Smuzhiyun struct tid_info *t = &(T3C_DATA(dev))->tid_maps;
800*4882a593Smuzhiyun struct t3c_tid_entry *t3c_tid;
801*4882a593Smuzhiyun unsigned int tid = GET_TID(req);
802*4882a593Smuzhiyun
803*4882a593Smuzhiyun if (unlikely(tid >= t->ntids)) {
804*4882a593Smuzhiyun printk("%s: passive open TID %u too large\n",
805*4882a593Smuzhiyun dev->name, tid);
806*4882a593Smuzhiyun t3_fatal_err(tdev2adap(dev));
807*4882a593Smuzhiyun return CPL_RET_BUF_DONE;
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun
810*4882a593Smuzhiyun t3c_tid = lookup_stid(t, stid);
811*4882a593Smuzhiyun if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
812*4882a593Smuzhiyun t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]) {
813*4882a593Smuzhiyun return t3c_tid->client->handlers[CPL_PASS_ACCEPT_REQ]
814*4882a593Smuzhiyun (dev, skb, t3c_tid->ctx);
815*4882a593Smuzhiyun } else {
816*4882a593Smuzhiyun pr_err("%s: received clientless CPL command 0x%x\n",
817*4882a593Smuzhiyun dev->name, CPL_PASS_ACCEPT_REQ);
818*4882a593Smuzhiyun return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun /*
823*4882a593Smuzhiyun * Returns an sk_buff for a reply CPL message of size len. If the input
824*4882a593Smuzhiyun * sk_buff has no other users it is trimmed and reused, otherwise a new buffer
825*4882a593Smuzhiyun * is allocated. The input skb must be of size at least len. Note that this
826*4882a593Smuzhiyun * operation does not destroy the original skb data even if it decides to reuse
827*4882a593Smuzhiyun * the buffer.
828*4882a593Smuzhiyun */
cxgb3_get_cpl_reply_skb(struct sk_buff * skb,size_t len,gfp_t gfp)829*4882a593Smuzhiyun static struct sk_buff *cxgb3_get_cpl_reply_skb(struct sk_buff *skb, size_t len,
830*4882a593Smuzhiyun gfp_t gfp)
831*4882a593Smuzhiyun {
832*4882a593Smuzhiyun if (likely(!skb_cloned(skb))) {
833*4882a593Smuzhiyun BUG_ON(skb->len < len);
834*4882a593Smuzhiyun __skb_trim(skb, len);
835*4882a593Smuzhiyun skb_get(skb);
836*4882a593Smuzhiyun } else {
837*4882a593Smuzhiyun skb = alloc_skb(len, gfp);
838*4882a593Smuzhiyun if (skb)
839*4882a593Smuzhiyun __skb_put(skb, len);
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun return skb;
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun
do_abort_req_rss(struct t3cdev * dev,struct sk_buff * skb)844*4882a593Smuzhiyun static int do_abort_req_rss(struct t3cdev *dev, struct sk_buff *skb)
845*4882a593Smuzhiyun {
846*4882a593Smuzhiyun union opcode_tid *p = cplhdr(skb);
847*4882a593Smuzhiyun unsigned int hwtid = G_TID(ntohl(p->opcode_tid));
848*4882a593Smuzhiyun struct t3c_tid_entry *t3c_tid;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
851*4882a593Smuzhiyun if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
852*4882a593Smuzhiyun t3c_tid->client->handlers[p->opcode]) {
853*4882a593Smuzhiyun return t3c_tid->client->handlers[p->opcode]
854*4882a593Smuzhiyun (dev, skb, t3c_tid->ctx);
855*4882a593Smuzhiyun } else {
856*4882a593Smuzhiyun struct cpl_abort_req_rss *req = cplhdr(skb);
857*4882a593Smuzhiyun struct cpl_abort_rpl *rpl;
858*4882a593Smuzhiyun struct sk_buff *reply_skb;
859*4882a593Smuzhiyun unsigned int tid = GET_TID(req);
860*4882a593Smuzhiyun u8 cmd = req->status;
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun if (req->status == CPL_ERR_RTX_NEG_ADVICE ||
863*4882a593Smuzhiyun req->status == CPL_ERR_PERSIST_NEG_ADVICE)
864*4882a593Smuzhiyun goto out;
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun reply_skb = cxgb3_get_cpl_reply_skb(skb,
867*4882a593Smuzhiyun sizeof(struct
868*4882a593Smuzhiyun cpl_abort_rpl),
869*4882a593Smuzhiyun GFP_ATOMIC);
870*4882a593Smuzhiyun
871*4882a593Smuzhiyun if (!reply_skb) {
872*4882a593Smuzhiyun printk("do_abort_req_rss: couldn't get skb!\n");
873*4882a593Smuzhiyun goto out;
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun reply_skb->priority = CPL_PRIORITY_DATA;
876*4882a593Smuzhiyun __skb_put(reply_skb, sizeof(struct cpl_abort_rpl));
877*4882a593Smuzhiyun rpl = cplhdr(reply_skb);
878*4882a593Smuzhiyun rpl->wr.wr_hi =
879*4882a593Smuzhiyun htonl(V_WR_OP(FW_WROPCODE_OFLD_HOST_ABORT_CON_RPL));
880*4882a593Smuzhiyun rpl->wr.wr_lo = htonl(V_WR_TID(tid));
881*4882a593Smuzhiyun OPCODE_TID(rpl) = htonl(MK_OPCODE_TID(CPL_ABORT_RPL, tid));
882*4882a593Smuzhiyun rpl->cmd = cmd;
883*4882a593Smuzhiyun cxgb3_ofld_send(dev, reply_skb);
884*4882a593Smuzhiyun out:
885*4882a593Smuzhiyun return CPL_RET_BUF_DONE;
886*4882a593Smuzhiyun }
887*4882a593Smuzhiyun }
888*4882a593Smuzhiyun
do_act_establish(struct t3cdev * dev,struct sk_buff * skb)889*4882a593Smuzhiyun static int do_act_establish(struct t3cdev *dev, struct sk_buff *skb)
890*4882a593Smuzhiyun {
891*4882a593Smuzhiyun struct cpl_act_establish *req = cplhdr(skb);
892*4882a593Smuzhiyun unsigned int atid = G_PASS_OPEN_TID(ntohl(req->tos_tid));
893*4882a593Smuzhiyun struct tid_info *t = &(T3C_DATA(dev))->tid_maps;
894*4882a593Smuzhiyun struct t3c_tid_entry *t3c_tid;
895*4882a593Smuzhiyun unsigned int tid = GET_TID(req);
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun if (unlikely(tid >= t->ntids)) {
898*4882a593Smuzhiyun printk("%s: active establish TID %u too large\n",
899*4882a593Smuzhiyun dev->name, tid);
900*4882a593Smuzhiyun t3_fatal_err(tdev2adap(dev));
901*4882a593Smuzhiyun return CPL_RET_BUF_DONE;
902*4882a593Smuzhiyun }
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun t3c_tid = lookup_atid(t, atid);
905*4882a593Smuzhiyun if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
906*4882a593Smuzhiyun t3c_tid->client->handlers[CPL_ACT_ESTABLISH]) {
907*4882a593Smuzhiyun return t3c_tid->client->handlers[CPL_ACT_ESTABLISH]
908*4882a593Smuzhiyun (dev, skb, t3c_tid->ctx);
909*4882a593Smuzhiyun } else {
910*4882a593Smuzhiyun pr_err("%s: received clientless CPL command 0x%x\n",
911*4882a593Smuzhiyun dev->name, CPL_ACT_ESTABLISH);
912*4882a593Smuzhiyun return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
913*4882a593Smuzhiyun }
914*4882a593Smuzhiyun }
915*4882a593Smuzhiyun
do_trace(struct t3cdev * dev,struct sk_buff * skb)916*4882a593Smuzhiyun static int do_trace(struct t3cdev *dev, struct sk_buff *skb)
917*4882a593Smuzhiyun {
918*4882a593Smuzhiyun struct cpl_trace_pkt *p = cplhdr(skb);
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun skb->protocol = htons(0xffff);
921*4882a593Smuzhiyun skb->dev = dev->lldev;
922*4882a593Smuzhiyun skb_pull(skb, sizeof(*p));
923*4882a593Smuzhiyun skb_reset_mac_header(skb);
924*4882a593Smuzhiyun netif_receive_skb(skb);
925*4882a593Smuzhiyun return 0;
926*4882a593Smuzhiyun }
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun /*
929*4882a593Smuzhiyun * That skb would better have come from process_responses() where we abuse
930*4882a593Smuzhiyun * ->priority and ->csum to carry our data. NB: if we get to per-arch
931*4882a593Smuzhiyun * ->csum, the things might get really interesting here.
932*4882a593Smuzhiyun */
933*4882a593Smuzhiyun
get_hwtid(struct sk_buff * skb)934*4882a593Smuzhiyun static inline u32 get_hwtid(struct sk_buff *skb)
935*4882a593Smuzhiyun {
936*4882a593Smuzhiyun return ntohl((__force __be32)skb->priority) >> 8 & 0xfffff;
937*4882a593Smuzhiyun }
938*4882a593Smuzhiyun
get_opcode(struct sk_buff * skb)939*4882a593Smuzhiyun static inline u32 get_opcode(struct sk_buff *skb)
940*4882a593Smuzhiyun {
941*4882a593Smuzhiyun return G_OPCODE(ntohl((__force __be32)skb->csum));
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun
do_term(struct t3cdev * dev,struct sk_buff * skb)944*4882a593Smuzhiyun static int do_term(struct t3cdev *dev, struct sk_buff *skb)
945*4882a593Smuzhiyun {
946*4882a593Smuzhiyun unsigned int hwtid = get_hwtid(skb);
947*4882a593Smuzhiyun unsigned int opcode = get_opcode(skb);
948*4882a593Smuzhiyun struct t3c_tid_entry *t3c_tid;
949*4882a593Smuzhiyun
950*4882a593Smuzhiyun t3c_tid = lookup_tid(&(T3C_DATA(dev))->tid_maps, hwtid);
951*4882a593Smuzhiyun if (t3c_tid && t3c_tid->ctx && t3c_tid->client->handlers &&
952*4882a593Smuzhiyun t3c_tid->client->handlers[opcode]) {
953*4882a593Smuzhiyun return t3c_tid->client->handlers[opcode] (dev, skb,
954*4882a593Smuzhiyun t3c_tid->ctx);
955*4882a593Smuzhiyun } else {
956*4882a593Smuzhiyun pr_err("%s: received clientless CPL command 0x%x\n",
957*4882a593Smuzhiyun dev->name, opcode);
958*4882a593Smuzhiyun return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
959*4882a593Smuzhiyun }
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun
nb_callback(struct notifier_block * self,unsigned long event,void * ctx)962*4882a593Smuzhiyun static int nb_callback(struct notifier_block *self, unsigned long event,
963*4882a593Smuzhiyun void *ctx)
964*4882a593Smuzhiyun {
965*4882a593Smuzhiyun switch (event) {
966*4882a593Smuzhiyun case (NETEVENT_NEIGH_UPDATE):{
967*4882a593Smuzhiyun cxgb_neigh_update((struct neighbour *)ctx);
968*4882a593Smuzhiyun break;
969*4882a593Smuzhiyun }
970*4882a593Smuzhiyun case (NETEVENT_REDIRECT):{
971*4882a593Smuzhiyun struct netevent_redirect *nr = ctx;
972*4882a593Smuzhiyun cxgb_redirect(nr->old, nr->new, nr->neigh,
973*4882a593Smuzhiyun nr->daddr);
974*4882a593Smuzhiyun cxgb_neigh_update(nr->neigh);
975*4882a593Smuzhiyun break;
976*4882a593Smuzhiyun }
977*4882a593Smuzhiyun default:
978*4882a593Smuzhiyun break;
979*4882a593Smuzhiyun }
980*4882a593Smuzhiyun return 0;
981*4882a593Smuzhiyun }
982*4882a593Smuzhiyun
983*4882a593Smuzhiyun static struct notifier_block nb = {
984*4882a593Smuzhiyun .notifier_call = nb_callback
985*4882a593Smuzhiyun };
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun /*
988*4882a593Smuzhiyun * Process a received packet with an unknown/unexpected CPL opcode.
989*4882a593Smuzhiyun */
do_bad_cpl(struct t3cdev * dev,struct sk_buff * skb)990*4882a593Smuzhiyun static int do_bad_cpl(struct t3cdev *dev, struct sk_buff *skb)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun pr_err("%s: received bad CPL command 0x%x\n", dev->name, *skb->data);
993*4882a593Smuzhiyun return CPL_RET_BUF_DONE | CPL_RET_BAD_MSG;
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun /*
997*4882a593Smuzhiyun * Handlers for each CPL opcode
998*4882a593Smuzhiyun */
999*4882a593Smuzhiyun static cpl_handler_func cpl_handlers[NUM_CPL_CMDS];
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun /*
1002*4882a593Smuzhiyun * Add a new handler to the CPL dispatch table. A NULL handler may be supplied
1003*4882a593Smuzhiyun * to unregister an existing handler.
1004*4882a593Smuzhiyun */
t3_register_cpl_handler(unsigned int opcode,cpl_handler_func h)1005*4882a593Smuzhiyun void t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
1006*4882a593Smuzhiyun {
1007*4882a593Smuzhiyun if (opcode < NUM_CPL_CMDS)
1008*4882a593Smuzhiyun cpl_handlers[opcode] = h ? h : do_bad_cpl;
1009*4882a593Smuzhiyun else
1010*4882a593Smuzhiyun pr_err("T3C: handler registration for opcode %x failed\n",
1011*4882a593Smuzhiyun opcode);
1012*4882a593Smuzhiyun }
1013*4882a593Smuzhiyun
1014*4882a593Smuzhiyun EXPORT_SYMBOL(t3_register_cpl_handler);
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun /*
1017*4882a593Smuzhiyun * T3CDEV's receive method.
1018*4882a593Smuzhiyun */
process_rx(struct t3cdev * dev,struct sk_buff ** skbs,int n)1019*4882a593Smuzhiyun static int process_rx(struct t3cdev *dev, struct sk_buff **skbs, int n)
1020*4882a593Smuzhiyun {
1021*4882a593Smuzhiyun while (n--) {
1022*4882a593Smuzhiyun struct sk_buff *skb = *skbs++;
1023*4882a593Smuzhiyun unsigned int opcode = get_opcode(skb);
1024*4882a593Smuzhiyun int ret = cpl_handlers[opcode] (dev, skb);
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun #if VALIDATE_TID
1027*4882a593Smuzhiyun if (ret & CPL_RET_UNKNOWN_TID) {
1028*4882a593Smuzhiyun union opcode_tid *p = cplhdr(skb);
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun pr_err("%s: CPL message (opcode %u) had unknown TID %u\n",
1031*4882a593Smuzhiyun dev->name, opcode, G_TID(ntohl(p->opcode_tid)));
1032*4882a593Smuzhiyun }
1033*4882a593Smuzhiyun #endif
1034*4882a593Smuzhiyun if (ret & CPL_RET_BUF_DONE)
1035*4882a593Smuzhiyun kfree_skb(skb);
1036*4882a593Smuzhiyun }
1037*4882a593Smuzhiyun return 0;
1038*4882a593Smuzhiyun }
1039*4882a593Smuzhiyun
1040*4882a593Smuzhiyun /*
1041*4882a593Smuzhiyun * Sends an sk_buff to a T3C driver after dealing with any active network taps.
1042*4882a593Smuzhiyun */
cxgb3_ofld_send(struct t3cdev * dev,struct sk_buff * skb)1043*4882a593Smuzhiyun int cxgb3_ofld_send(struct t3cdev *dev, struct sk_buff *skb)
1044*4882a593Smuzhiyun {
1045*4882a593Smuzhiyun int r;
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun local_bh_disable();
1048*4882a593Smuzhiyun r = dev->send(dev, skb);
1049*4882a593Smuzhiyun local_bh_enable();
1050*4882a593Smuzhiyun return r;
1051*4882a593Smuzhiyun }
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun EXPORT_SYMBOL(cxgb3_ofld_send);
1054*4882a593Smuzhiyun
is_offloading(struct net_device * dev)1055*4882a593Smuzhiyun static int is_offloading(struct net_device *dev)
1056*4882a593Smuzhiyun {
1057*4882a593Smuzhiyun struct adapter *adapter;
1058*4882a593Smuzhiyun int i;
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun read_lock_bh(&adapter_list_lock);
1061*4882a593Smuzhiyun list_for_each_entry(adapter, &adapter_list, adapter_list) {
1062*4882a593Smuzhiyun for_each_port(adapter, i) {
1063*4882a593Smuzhiyun if (dev == adapter->port[i]) {
1064*4882a593Smuzhiyun read_unlock_bh(&adapter_list_lock);
1065*4882a593Smuzhiyun return 1;
1066*4882a593Smuzhiyun }
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun }
1069*4882a593Smuzhiyun read_unlock_bh(&adapter_list_lock);
1070*4882a593Smuzhiyun return 0;
1071*4882a593Smuzhiyun }
1072*4882a593Smuzhiyun
cxgb_neigh_update(struct neighbour * neigh)1073*4882a593Smuzhiyun static void cxgb_neigh_update(struct neighbour *neigh)
1074*4882a593Smuzhiyun {
1075*4882a593Smuzhiyun struct net_device *dev;
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun if (!neigh)
1078*4882a593Smuzhiyun return;
1079*4882a593Smuzhiyun dev = neigh->dev;
1080*4882a593Smuzhiyun if (dev && (is_offloading(dev))) {
1081*4882a593Smuzhiyun struct t3cdev *tdev = dev2t3cdev(dev);
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun BUG_ON(!tdev);
1084*4882a593Smuzhiyun t3_l2t_update(tdev, neigh);
1085*4882a593Smuzhiyun }
1086*4882a593Smuzhiyun }
1087*4882a593Smuzhiyun
set_l2t_ix(struct t3cdev * tdev,u32 tid,struct l2t_entry * e)1088*4882a593Smuzhiyun static void set_l2t_ix(struct t3cdev *tdev, u32 tid, struct l2t_entry *e)
1089*4882a593Smuzhiyun {
1090*4882a593Smuzhiyun struct sk_buff *skb;
1091*4882a593Smuzhiyun struct cpl_set_tcb_field *req;
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
1094*4882a593Smuzhiyun if (!skb) {
1095*4882a593Smuzhiyun pr_err("%s: cannot allocate skb!\n", __func__);
1096*4882a593Smuzhiyun return;
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun skb->priority = CPL_PRIORITY_CONTROL;
1099*4882a593Smuzhiyun req = skb_put(skb, sizeof(*req));
1100*4882a593Smuzhiyun req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
1101*4882a593Smuzhiyun OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SET_TCB_FIELD, tid));
1102*4882a593Smuzhiyun req->reply = 0;
1103*4882a593Smuzhiyun req->cpu_idx = 0;
1104*4882a593Smuzhiyun req->word = htons(W_TCB_L2T_IX);
1105*4882a593Smuzhiyun req->mask = cpu_to_be64(V_TCB_L2T_IX(M_TCB_L2T_IX));
1106*4882a593Smuzhiyun req->val = cpu_to_be64(V_TCB_L2T_IX(e->idx));
1107*4882a593Smuzhiyun tdev->send(tdev, skb);
1108*4882a593Smuzhiyun }
1109*4882a593Smuzhiyun
cxgb_redirect(struct dst_entry * old,struct dst_entry * new,struct neighbour * neigh,const void * daddr)1110*4882a593Smuzhiyun static void cxgb_redirect(struct dst_entry *old, struct dst_entry *new,
1111*4882a593Smuzhiyun struct neighbour *neigh,
1112*4882a593Smuzhiyun const void *daddr)
1113*4882a593Smuzhiyun {
1114*4882a593Smuzhiyun struct net_device *dev;
1115*4882a593Smuzhiyun struct tid_info *ti;
1116*4882a593Smuzhiyun struct t3cdev *tdev;
1117*4882a593Smuzhiyun u32 tid;
1118*4882a593Smuzhiyun int update_tcb;
1119*4882a593Smuzhiyun struct l2t_entry *e;
1120*4882a593Smuzhiyun struct t3c_tid_entry *te;
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun dev = neigh->dev;
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun if (!is_offloading(dev))
1125*4882a593Smuzhiyun return;
1126*4882a593Smuzhiyun tdev = dev2t3cdev(dev);
1127*4882a593Smuzhiyun BUG_ON(!tdev);
1128*4882a593Smuzhiyun
1129*4882a593Smuzhiyun /* Add new L2T entry */
1130*4882a593Smuzhiyun e = t3_l2t_get(tdev, new, dev, daddr);
1131*4882a593Smuzhiyun if (!e) {
1132*4882a593Smuzhiyun pr_err("%s: couldn't allocate new l2t entry!\n", __func__);
1133*4882a593Smuzhiyun return;
1134*4882a593Smuzhiyun }
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun /* Walk tid table and notify clients of dst change. */
1137*4882a593Smuzhiyun ti = &(T3C_DATA(tdev))->tid_maps;
1138*4882a593Smuzhiyun for (tid = 0; tid < ti->ntids; tid++) {
1139*4882a593Smuzhiyun te = lookup_tid(ti, tid);
1140*4882a593Smuzhiyun BUG_ON(!te);
1141*4882a593Smuzhiyun if (te && te->ctx && te->client && te->client->redirect) {
1142*4882a593Smuzhiyun update_tcb = te->client->redirect(te->ctx, old, new, e);
1143*4882a593Smuzhiyun if (update_tcb) {
1144*4882a593Smuzhiyun rcu_read_lock();
1145*4882a593Smuzhiyun l2t_hold(L2DATA(tdev), e);
1146*4882a593Smuzhiyun rcu_read_unlock();
1147*4882a593Smuzhiyun set_l2t_ix(tdev, tid, e);
1148*4882a593Smuzhiyun }
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun }
1151*4882a593Smuzhiyun l2t_release(tdev, e);
1152*4882a593Smuzhiyun }
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun /*
1155*4882a593Smuzhiyun * Allocate and initialize the TID tables. Returns 0 on success.
1156*4882a593Smuzhiyun */
init_tid_tabs(struct tid_info * t,unsigned int ntids,unsigned int natids,unsigned int nstids,unsigned int atid_base,unsigned int stid_base)1157*4882a593Smuzhiyun static int init_tid_tabs(struct tid_info *t, unsigned int ntids,
1158*4882a593Smuzhiyun unsigned int natids, unsigned int nstids,
1159*4882a593Smuzhiyun unsigned int atid_base, unsigned int stid_base)
1160*4882a593Smuzhiyun {
1161*4882a593Smuzhiyun unsigned long size = ntids * sizeof(*t->tid_tab) +
1162*4882a593Smuzhiyun natids * sizeof(*t->atid_tab) + nstids * sizeof(*t->stid_tab);
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun t->tid_tab = kvzalloc(size, GFP_KERNEL);
1165*4882a593Smuzhiyun if (!t->tid_tab)
1166*4882a593Smuzhiyun return -ENOMEM;
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun t->stid_tab = (union listen_entry *)&t->tid_tab[ntids];
1169*4882a593Smuzhiyun t->atid_tab = (union active_open_entry *)&t->stid_tab[nstids];
1170*4882a593Smuzhiyun t->ntids = ntids;
1171*4882a593Smuzhiyun t->nstids = nstids;
1172*4882a593Smuzhiyun t->stid_base = stid_base;
1173*4882a593Smuzhiyun t->sfree = NULL;
1174*4882a593Smuzhiyun t->natids = natids;
1175*4882a593Smuzhiyun t->atid_base = atid_base;
1176*4882a593Smuzhiyun t->afree = NULL;
1177*4882a593Smuzhiyun t->stids_in_use = t->atids_in_use = 0;
1178*4882a593Smuzhiyun atomic_set(&t->tids_in_use, 0);
1179*4882a593Smuzhiyun spin_lock_init(&t->stid_lock);
1180*4882a593Smuzhiyun spin_lock_init(&t->atid_lock);
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun /*
1183*4882a593Smuzhiyun * Setup the free lists for stid_tab and atid_tab.
1184*4882a593Smuzhiyun */
1185*4882a593Smuzhiyun if (nstids) {
1186*4882a593Smuzhiyun while (--nstids)
1187*4882a593Smuzhiyun t->stid_tab[nstids - 1].next = &t->stid_tab[nstids];
1188*4882a593Smuzhiyun t->sfree = t->stid_tab;
1189*4882a593Smuzhiyun }
1190*4882a593Smuzhiyun if (natids) {
1191*4882a593Smuzhiyun while (--natids)
1192*4882a593Smuzhiyun t->atid_tab[natids - 1].next = &t->atid_tab[natids];
1193*4882a593Smuzhiyun t->afree = t->atid_tab;
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun return 0;
1196*4882a593Smuzhiyun }
1197*4882a593Smuzhiyun
free_tid_maps(struct tid_info * t)1198*4882a593Smuzhiyun static void free_tid_maps(struct tid_info *t)
1199*4882a593Smuzhiyun {
1200*4882a593Smuzhiyun kvfree(t->tid_tab);
1201*4882a593Smuzhiyun }
1202*4882a593Smuzhiyun
add_adapter(struct adapter * adap)1203*4882a593Smuzhiyun static inline void add_adapter(struct adapter *adap)
1204*4882a593Smuzhiyun {
1205*4882a593Smuzhiyun write_lock_bh(&adapter_list_lock);
1206*4882a593Smuzhiyun list_add_tail(&adap->adapter_list, &adapter_list);
1207*4882a593Smuzhiyun write_unlock_bh(&adapter_list_lock);
1208*4882a593Smuzhiyun }
1209*4882a593Smuzhiyun
remove_adapter(struct adapter * adap)1210*4882a593Smuzhiyun static inline void remove_adapter(struct adapter *adap)
1211*4882a593Smuzhiyun {
1212*4882a593Smuzhiyun write_lock_bh(&adapter_list_lock);
1213*4882a593Smuzhiyun list_del(&adap->adapter_list);
1214*4882a593Smuzhiyun write_unlock_bh(&adapter_list_lock);
1215*4882a593Smuzhiyun }
1216*4882a593Smuzhiyun
cxgb3_offload_activate(struct adapter * adapter)1217*4882a593Smuzhiyun int cxgb3_offload_activate(struct adapter *adapter)
1218*4882a593Smuzhiyun {
1219*4882a593Smuzhiyun struct t3cdev *dev = &adapter->tdev;
1220*4882a593Smuzhiyun int natids, err;
1221*4882a593Smuzhiyun struct t3c_data *t;
1222*4882a593Smuzhiyun struct tid_range stid_range, tid_range;
1223*4882a593Smuzhiyun struct mtutab mtutab;
1224*4882a593Smuzhiyun unsigned int l2t_capacity;
1225*4882a593Smuzhiyun struct l2t_data *l2td;
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun t = kzalloc(sizeof(*t), GFP_KERNEL);
1228*4882a593Smuzhiyun if (!t)
1229*4882a593Smuzhiyun return -ENOMEM;
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun err = -EOPNOTSUPP;
1232*4882a593Smuzhiyun if (dev->ctl(dev, GET_TX_MAX_CHUNK, &t->tx_max_chunk) < 0 ||
1233*4882a593Smuzhiyun dev->ctl(dev, GET_MAX_OUTSTANDING_WR, &t->max_wrs) < 0 ||
1234*4882a593Smuzhiyun dev->ctl(dev, GET_L2T_CAPACITY, &l2t_capacity) < 0 ||
1235*4882a593Smuzhiyun dev->ctl(dev, GET_MTUS, &mtutab) < 0 ||
1236*4882a593Smuzhiyun dev->ctl(dev, GET_TID_RANGE, &tid_range) < 0 ||
1237*4882a593Smuzhiyun dev->ctl(dev, GET_STID_RANGE, &stid_range) < 0)
1238*4882a593Smuzhiyun goto out_free;
1239*4882a593Smuzhiyun
1240*4882a593Smuzhiyun err = -ENOMEM;
1241*4882a593Smuzhiyun l2td = t3_init_l2t(l2t_capacity);
1242*4882a593Smuzhiyun if (!l2td)
1243*4882a593Smuzhiyun goto out_free;
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun natids = min(tid_range.num / 2, MAX_ATIDS);
1246*4882a593Smuzhiyun err = init_tid_tabs(&t->tid_maps, tid_range.num, natids,
1247*4882a593Smuzhiyun stid_range.num, ATID_BASE, stid_range.base);
1248*4882a593Smuzhiyun if (err)
1249*4882a593Smuzhiyun goto out_free_l2t;
1250*4882a593Smuzhiyun
1251*4882a593Smuzhiyun t->mtus = mtutab.mtus;
1252*4882a593Smuzhiyun t->nmtus = mtutab.size;
1253*4882a593Smuzhiyun
1254*4882a593Smuzhiyun INIT_WORK(&t->tid_release_task, t3_process_tid_release_list);
1255*4882a593Smuzhiyun spin_lock_init(&t->tid_release_lock);
1256*4882a593Smuzhiyun INIT_LIST_HEAD(&t->list_node);
1257*4882a593Smuzhiyun t->dev = dev;
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun RCU_INIT_POINTER(dev->l2opt, l2td);
1260*4882a593Smuzhiyun T3C_DATA(dev) = t;
1261*4882a593Smuzhiyun dev->recv = process_rx;
1262*4882a593Smuzhiyun dev->neigh_update = t3_l2t_update;
1263*4882a593Smuzhiyun
1264*4882a593Smuzhiyun /* Register netevent handler once */
1265*4882a593Smuzhiyun if (list_empty(&adapter_list))
1266*4882a593Smuzhiyun register_netevent_notifier(&nb);
1267*4882a593Smuzhiyun
1268*4882a593Smuzhiyun t->nofail_skb = alloc_skb(sizeof(struct cpl_tid_release), GFP_KERNEL);
1269*4882a593Smuzhiyun t->release_list_incomplete = 0;
1270*4882a593Smuzhiyun
1271*4882a593Smuzhiyun add_adapter(adapter);
1272*4882a593Smuzhiyun return 0;
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun out_free_l2t:
1275*4882a593Smuzhiyun kvfree(l2td);
1276*4882a593Smuzhiyun out_free:
1277*4882a593Smuzhiyun kfree(t);
1278*4882a593Smuzhiyun return err;
1279*4882a593Smuzhiyun }
1280*4882a593Smuzhiyun
clean_l2_data(struct rcu_head * head)1281*4882a593Smuzhiyun static void clean_l2_data(struct rcu_head *head)
1282*4882a593Smuzhiyun {
1283*4882a593Smuzhiyun struct l2t_data *d = container_of(head, struct l2t_data, rcu_head);
1284*4882a593Smuzhiyun kvfree(d);
1285*4882a593Smuzhiyun }
1286*4882a593Smuzhiyun
1287*4882a593Smuzhiyun
cxgb3_offload_deactivate(struct adapter * adapter)1288*4882a593Smuzhiyun void cxgb3_offload_deactivate(struct adapter *adapter)
1289*4882a593Smuzhiyun {
1290*4882a593Smuzhiyun struct t3cdev *tdev = &adapter->tdev;
1291*4882a593Smuzhiyun struct t3c_data *t = T3C_DATA(tdev);
1292*4882a593Smuzhiyun struct l2t_data *d;
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun remove_adapter(adapter);
1295*4882a593Smuzhiyun if (list_empty(&adapter_list))
1296*4882a593Smuzhiyun unregister_netevent_notifier(&nb);
1297*4882a593Smuzhiyun
1298*4882a593Smuzhiyun free_tid_maps(&t->tid_maps);
1299*4882a593Smuzhiyun T3C_DATA(tdev) = NULL;
1300*4882a593Smuzhiyun rcu_read_lock();
1301*4882a593Smuzhiyun d = L2DATA(tdev);
1302*4882a593Smuzhiyun rcu_read_unlock();
1303*4882a593Smuzhiyun RCU_INIT_POINTER(tdev->l2opt, NULL);
1304*4882a593Smuzhiyun call_rcu(&d->rcu_head, clean_l2_data);
1305*4882a593Smuzhiyun kfree_skb(t->nofail_skb);
1306*4882a593Smuzhiyun kfree(t);
1307*4882a593Smuzhiyun }
1308*4882a593Smuzhiyun
register_tdev(struct t3cdev * tdev)1309*4882a593Smuzhiyun static inline void register_tdev(struct t3cdev *tdev)
1310*4882a593Smuzhiyun {
1311*4882a593Smuzhiyun static int unit;
1312*4882a593Smuzhiyun
1313*4882a593Smuzhiyun mutex_lock(&cxgb3_db_lock);
1314*4882a593Smuzhiyun snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++);
1315*4882a593Smuzhiyun list_add_tail(&tdev->ofld_dev_list, &ofld_dev_list);
1316*4882a593Smuzhiyun mutex_unlock(&cxgb3_db_lock);
1317*4882a593Smuzhiyun }
1318*4882a593Smuzhiyun
unregister_tdev(struct t3cdev * tdev)1319*4882a593Smuzhiyun static inline void unregister_tdev(struct t3cdev *tdev)
1320*4882a593Smuzhiyun {
1321*4882a593Smuzhiyun mutex_lock(&cxgb3_db_lock);
1322*4882a593Smuzhiyun list_del(&tdev->ofld_dev_list);
1323*4882a593Smuzhiyun mutex_unlock(&cxgb3_db_lock);
1324*4882a593Smuzhiyun }
1325*4882a593Smuzhiyun
adap2type(struct adapter * adapter)1326*4882a593Smuzhiyun static inline int adap2type(struct adapter *adapter)
1327*4882a593Smuzhiyun {
1328*4882a593Smuzhiyun int type = 0;
1329*4882a593Smuzhiyun
1330*4882a593Smuzhiyun switch (adapter->params.rev) {
1331*4882a593Smuzhiyun case T3_REV_A:
1332*4882a593Smuzhiyun type = T3A;
1333*4882a593Smuzhiyun break;
1334*4882a593Smuzhiyun case T3_REV_B:
1335*4882a593Smuzhiyun case T3_REV_B2:
1336*4882a593Smuzhiyun type = T3B;
1337*4882a593Smuzhiyun break;
1338*4882a593Smuzhiyun case T3_REV_C:
1339*4882a593Smuzhiyun type = T3C;
1340*4882a593Smuzhiyun break;
1341*4882a593Smuzhiyun }
1342*4882a593Smuzhiyun return type;
1343*4882a593Smuzhiyun }
1344*4882a593Smuzhiyun
cxgb3_adapter_ofld(struct adapter * adapter)1345*4882a593Smuzhiyun void cxgb3_adapter_ofld(struct adapter *adapter)
1346*4882a593Smuzhiyun {
1347*4882a593Smuzhiyun struct t3cdev *tdev = &adapter->tdev;
1348*4882a593Smuzhiyun
1349*4882a593Smuzhiyun INIT_LIST_HEAD(&tdev->ofld_dev_list);
1350*4882a593Smuzhiyun
1351*4882a593Smuzhiyun cxgb3_set_dummy_ops(tdev);
1352*4882a593Smuzhiyun tdev->send = t3_offload_tx;
1353*4882a593Smuzhiyun tdev->ctl = cxgb_offload_ctl;
1354*4882a593Smuzhiyun tdev->type = adap2type(adapter);
1355*4882a593Smuzhiyun
1356*4882a593Smuzhiyun register_tdev(tdev);
1357*4882a593Smuzhiyun }
1358*4882a593Smuzhiyun
cxgb3_adapter_unofld(struct adapter * adapter)1359*4882a593Smuzhiyun void cxgb3_adapter_unofld(struct adapter *adapter)
1360*4882a593Smuzhiyun {
1361*4882a593Smuzhiyun struct t3cdev *tdev = &adapter->tdev;
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun tdev->recv = NULL;
1364*4882a593Smuzhiyun tdev->neigh_update = NULL;
1365*4882a593Smuzhiyun
1366*4882a593Smuzhiyun unregister_tdev(tdev);
1367*4882a593Smuzhiyun }
1368*4882a593Smuzhiyun
cxgb3_offload_init(void)1369*4882a593Smuzhiyun void __init cxgb3_offload_init(void)
1370*4882a593Smuzhiyun {
1371*4882a593Smuzhiyun int i;
1372*4882a593Smuzhiyun
1373*4882a593Smuzhiyun for (i = 0; i < NUM_CPL_CMDS; ++i)
1374*4882a593Smuzhiyun cpl_handlers[i] = do_bad_cpl;
1375*4882a593Smuzhiyun
1376*4882a593Smuzhiyun t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
1377*4882a593Smuzhiyun t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
1378*4882a593Smuzhiyun t3_register_cpl_handler(CPL_RTE_WRITE_RPL, do_rte_write_rpl);
1379*4882a593Smuzhiyun t3_register_cpl_handler(CPL_PASS_OPEN_RPL, do_stid_rpl);
1380*4882a593Smuzhiyun t3_register_cpl_handler(CPL_CLOSE_LISTSRV_RPL, do_stid_rpl);
1381*4882a593Smuzhiyun t3_register_cpl_handler(CPL_PASS_ACCEPT_REQ, do_cr);
1382*4882a593Smuzhiyun t3_register_cpl_handler(CPL_PASS_ESTABLISH, do_hwtid_rpl);
1383*4882a593Smuzhiyun t3_register_cpl_handler(CPL_ABORT_RPL_RSS, do_hwtid_rpl);
1384*4882a593Smuzhiyun t3_register_cpl_handler(CPL_ABORT_RPL, do_hwtid_rpl);
1385*4882a593Smuzhiyun t3_register_cpl_handler(CPL_RX_URG_NOTIFY, do_hwtid_rpl);
1386*4882a593Smuzhiyun t3_register_cpl_handler(CPL_RX_DATA, do_hwtid_rpl);
1387*4882a593Smuzhiyun t3_register_cpl_handler(CPL_TX_DATA_ACK, do_hwtid_rpl);
1388*4882a593Smuzhiyun t3_register_cpl_handler(CPL_TX_DMA_ACK, do_hwtid_rpl);
1389*4882a593Smuzhiyun t3_register_cpl_handler(CPL_ACT_OPEN_RPL, do_act_open_rpl);
1390*4882a593Smuzhiyun t3_register_cpl_handler(CPL_PEER_CLOSE, do_hwtid_rpl);
1391*4882a593Smuzhiyun t3_register_cpl_handler(CPL_CLOSE_CON_RPL, do_hwtid_rpl);
1392*4882a593Smuzhiyun t3_register_cpl_handler(CPL_ABORT_REQ_RSS, do_abort_req_rss);
1393*4882a593Smuzhiyun t3_register_cpl_handler(CPL_ACT_ESTABLISH, do_act_establish);
1394*4882a593Smuzhiyun t3_register_cpl_handler(CPL_SET_TCB_RPL, do_hwtid_rpl);
1395*4882a593Smuzhiyun t3_register_cpl_handler(CPL_GET_TCB_RPL, do_hwtid_rpl);
1396*4882a593Smuzhiyun t3_register_cpl_handler(CPL_RDMA_TERMINATE, do_term);
1397*4882a593Smuzhiyun t3_register_cpl_handler(CPL_RDMA_EC_STATUS, do_hwtid_rpl);
1398*4882a593Smuzhiyun t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
1399*4882a593Smuzhiyun t3_register_cpl_handler(CPL_RX_DATA_DDP, do_hwtid_rpl);
1400*4882a593Smuzhiyun t3_register_cpl_handler(CPL_RX_DDP_COMPLETE, do_hwtid_rpl);
1401*4882a593Smuzhiyun t3_register_cpl_handler(CPL_ISCSI_HDR, do_hwtid_rpl);
1402*4882a593Smuzhiyun }
1403