xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/chelsio/cxgb3/l2t.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) 2003-2008 Chelsio, Inc. All rights reserved.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * This software is available to you under a choice of one of two
5*4882a593Smuzhiyun  * licenses.  You may choose to be licensed under the terms of the GNU
6*4882a593Smuzhiyun  * General Public License (GPL) Version 2, available from the file
7*4882a593Smuzhiyun  * COPYING in the main directory of this source tree, or the
8*4882a593Smuzhiyun  * OpenIB.org BSD license below:
9*4882a593Smuzhiyun  *
10*4882a593Smuzhiyun  *     Redistribution and use in source and binary forms, with or
11*4882a593Smuzhiyun  *     without modification, are permitted provided that the following
12*4882a593Smuzhiyun  *     conditions are met:
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  *      - Redistributions of source code must retain the above
15*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
16*4882a593Smuzhiyun  *        disclaimer.
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  *      - Redistributions in binary form must reproduce the above
19*4882a593Smuzhiyun  *        copyright notice, this list of conditions and the following
20*4882a593Smuzhiyun  *        disclaimer in the documentation and/or other materials
21*4882a593Smuzhiyun  *        provided with the distribution.
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24*4882a593Smuzhiyun  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25*4882a593Smuzhiyun  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26*4882a593Smuzhiyun  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27*4882a593Smuzhiyun  * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28*4882a593Smuzhiyun  * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29*4882a593Smuzhiyun  * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30*4882a593Smuzhiyun  * SOFTWARE.
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun #include <linux/skbuff.h>
33*4882a593Smuzhiyun #include <linux/netdevice.h>
34*4882a593Smuzhiyun #include <linux/if.h>
35*4882a593Smuzhiyun #include <linux/if_vlan.h>
36*4882a593Smuzhiyun #include <linux/jhash.h>
37*4882a593Smuzhiyun #include <linux/slab.h>
38*4882a593Smuzhiyun #include <linux/export.h>
39*4882a593Smuzhiyun #include <net/neighbour.h>
40*4882a593Smuzhiyun #include "common.h"
41*4882a593Smuzhiyun #include "t3cdev.h"
42*4882a593Smuzhiyun #include "cxgb3_defs.h"
43*4882a593Smuzhiyun #include "l2t.h"
44*4882a593Smuzhiyun #include "t3_cpl.h"
45*4882a593Smuzhiyun #include "firmware_exports.h"
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun #define VLAN_NONE 0xfff
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun /*
50*4882a593Smuzhiyun  * Module locking notes:  There is a RW lock protecting the L2 table as a
51*4882a593Smuzhiyun  * whole plus a spinlock per L2T entry.  Entry lookups and allocations happen
52*4882a593Smuzhiyun  * under the protection of the table lock, individual entry changes happen
53*4882a593Smuzhiyun  * while holding that entry's spinlock.  The table lock nests outside the
54*4882a593Smuzhiyun  * entry locks.  Allocations of new entries take the table lock as writers so
55*4882a593Smuzhiyun  * no other lookups can happen while allocating new entries.  Entry updates
56*4882a593Smuzhiyun  * take the table lock as readers so multiple entries can be updated in
57*4882a593Smuzhiyun  * parallel.  An L2T entry can be dropped by decrementing its reference count
58*4882a593Smuzhiyun  * and therefore can happen in parallel with entry allocation but no entry
59*4882a593Smuzhiyun  * can change state or increment its ref count during allocation as both of
60*4882a593Smuzhiyun  * these perform lookups.
61*4882a593Smuzhiyun  */
62*4882a593Smuzhiyun 
vlan_prio(const struct l2t_entry * e)63*4882a593Smuzhiyun static inline unsigned int vlan_prio(const struct l2t_entry *e)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 	return e->vlan >> 13;
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun 
arp_hash(u32 key,int ifindex,const struct l2t_data * d)68*4882a593Smuzhiyun static inline unsigned int arp_hash(u32 key, int ifindex,
69*4882a593Smuzhiyun 				    const struct l2t_data *d)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	return jhash_2words(key, ifindex, 0) & (d->nentries - 1);
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
neigh_replace(struct l2t_entry * e,struct neighbour * n)74*4882a593Smuzhiyun static inline void neigh_replace(struct l2t_entry *e, struct neighbour *n)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun 	neigh_hold(n);
77*4882a593Smuzhiyun 	if (e->neigh)
78*4882a593Smuzhiyun 		neigh_release(e->neigh);
79*4882a593Smuzhiyun 	e->neigh = n;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun /*
83*4882a593Smuzhiyun  * Set up an L2T entry and send any packets waiting in the arp queue.  The
84*4882a593Smuzhiyun  * supplied skb is used for the CPL_L2T_WRITE_REQ.  Must be called with the
85*4882a593Smuzhiyun  * entry locked.
86*4882a593Smuzhiyun  */
setup_l2e_send_pending(struct t3cdev * dev,struct sk_buff * skb,struct l2t_entry * e)87*4882a593Smuzhiyun static int setup_l2e_send_pending(struct t3cdev *dev, struct sk_buff *skb,
88*4882a593Smuzhiyun 				  struct l2t_entry *e)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	struct cpl_l2t_write_req *req;
91*4882a593Smuzhiyun 	struct sk_buff *tmp;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	if (!skb) {
94*4882a593Smuzhiyun 		skb = alloc_skb(sizeof(*req), GFP_ATOMIC);
95*4882a593Smuzhiyun 		if (!skb)
96*4882a593Smuzhiyun 			return -ENOMEM;
97*4882a593Smuzhiyun 	}
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	req = __skb_put(skb, sizeof(*req));
100*4882a593Smuzhiyun 	req->wr.wr_hi = htonl(V_WR_OP(FW_WROPCODE_FORWARD));
101*4882a593Smuzhiyun 	OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_L2T_WRITE_REQ, e->idx));
102*4882a593Smuzhiyun 	req->params = htonl(V_L2T_W_IDX(e->idx) | V_L2T_W_IFF(e->smt_idx) |
103*4882a593Smuzhiyun 			    V_L2T_W_VLAN(e->vlan & VLAN_VID_MASK) |
104*4882a593Smuzhiyun 			    V_L2T_W_PRIO(vlan_prio(e)));
105*4882a593Smuzhiyun 	memcpy(e->dmac, e->neigh->ha, sizeof(e->dmac));
106*4882a593Smuzhiyun 	memcpy(req->dst_mac, e->dmac, sizeof(req->dst_mac));
107*4882a593Smuzhiyun 	skb->priority = CPL_PRIORITY_CONTROL;
108*4882a593Smuzhiyun 	cxgb3_ofld_send(dev, skb);
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	skb_queue_walk_safe(&e->arpq, skb, tmp) {
111*4882a593Smuzhiyun 		__skb_unlink(skb, &e->arpq);
112*4882a593Smuzhiyun 		cxgb3_ofld_send(dev, skb);
113*4882a593Smuzhiyun 	}
114*4882a593Smuzhiyun 	e->state = L2T_STATE_VALID;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	return 0;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun /*
120*4882a593Smuzhiyun  * Add a packet to the an L2T entry's queue of packets awaiting resolution.
121*4882a593Smuzhiyun  * Must be called with the entry's lock held.
122*4882a593Smuzhiyun  */
arpq_enqueue(struct l2t_entry * e,struct sk_buff * skb)123*4882a593Smuzhiyun static inline void arpq_enqueue(struct l2t_entry *e, struct sk_buff *skb)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	__skb_queue_tail(&e->arpq, skb);
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun 
t3_l2t_send_slow(struct t3cdev * dev,struct sk_buff * skb,struct l2t_entry * e)128*4882a593Smuzhiyun int t3_l2t_send_slow(struct t3cdev *dev, struct sk_buff *skb,
129*4882a593Smuzhiyun 		     struct l2t_entry *e)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun again:
132*4882a593Smuzhiyun 	switch (e->state) {
133*4882a593Smuzhiyun 	case L2T_STATE_STALE:	/* entry is stale, kick off revalidation */
134*4882a593Smuzhiyun 		neigh_event_send(e->neigh, NULL);
135*4882a593Smuzhiyun 		spin_lock_bh(&e->lock);
136*4882a593Smuzhiyun 		if (e->state == L2T_STATE_STALE)
137*4882a593Smuzhiyun 			e->state = L2T_STATE_VALID;
138*4882a593Smuzhiyun 		spin_unlock_bh(&e->lock);
139*4882a593Smuzhiyun 		fallthrough;
140*4882a593Smuzhiyun 	case L2T_STATE_VALID:	/* fast-path, send the packet on */
141*4882a593Smuzhiyun 		return cxgb3_ofld_send(dev, skb);
142*4882a593Smuzhiyun 	case L2T_STATE_RESOLVING:
143*4882a593Smuzhiyun 		spin_lock_bh(&e->lock);
144*4882a593Smuzhiyun 		if (e->state != L2T_STATE_RESOLVING) {
145*4882a593Smuzhiyun 			/* ARP already completed */
146*4882a593Smuzhiyun 			spin_unlock_bh(&e->lock);
147*4882a593Smuzhiyun 			goto again;
148*4882a593Smuzhiyun 		}
149*4882a593Smuzhiyun 		arpq_enqueue(e, skb);
150*4882a593Smuzhiyun 		spin_unlock_bh(&e->lock);
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 		/*
153*4882a593Smuzhiyun 		 * Only the first packet added to the arpq should kick off
154*4882a593Smuzhiyun 		 * resolution.  However, because the alloc_skb below can fail,
155*4882a593Smuzhiyun 		 * we allow each packet added to the arpq to retry resolution
156*4882a593Smuzhiyun 		 * as a way of recovering from transient memory exhaustion.
157*4882a593Smuzhiyun 		 * A better way would be to use a work request to retry L2T
158*4882a593Smuzhiyun 		 * entries when there's no memory.
159*4882a593Smuzhiyun 		 */
160*4882a593Smuzhiyun 		if (!neigh_event_send(e->neigh, NULL)) {
161*4882a593Smuzhiyun 			skb = alloc_skb(sizeof(struct cpl_l2t_write_req),
162*4882a593Smuzhiyun 					GFP_ATOMIC);
163*4882a593Smuzhiyun 			if (!skb)
164*4882a593Smuzhiyun 				break;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 			spin_lock_bh(&e->lock);
167*4882a593Smuzhiyun 			if (!skb_queue_empty(&e->arpq))
168*4882a593Smuzhiyun 				setup_l2e_send_pending(dev, skb, e);
169*4882a593Smuzhiyun 			else	/* we lost the race */
170*4882a593Smuzhiyun 				__kfree_skb(skb);
171*4882a593Smuzhiyun 			spin_unlock_bh(&e->lock);
172*4882a593Smuzhiyun 		}
173*4882a593Smuzhiyun 	}
174*4882a593Smuzhiyun 	return 0;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun EXPORT_SYMBOL(t3_l2t_send_slow);
178*4882a593Smuzhiyun 
t3_l2t_send_event(struct t3cdev * dev,struct l2t_entry * e)179*4882a593Smuzhiyun void t3_l2t_send_event(struct t3cdev *dev, struct l2t_entry *e)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun again:
182*4882a593Smuzhiyun 	switch (e->state) {
183*4882a593Smuzhiyun 	case L2T_STATE_STALE:	/* entry is stale, kick off revalidation */
184*4882a593Smuzhiyun 		neigh_event_send(e->neigh, NULL);
185*4882a593Smuzhiyun 		spin_lock_bh(&e->lock);
186*4882a593Smuzhiyun 		if (e->state == L2T_STATE_STALE) {
187*4882a593Smuzhiyun 			e->state = L2T_STATE_VALID;
188*4882a593Smuzhiyun 		}
189*4882a593Smuzhiyun 		spin_unlock_bh(&e->lock);
190*4882a593Smuzhiyun 		return;
191*4882a593Smuzhiyun 	case L2T_STATE_VALID:	/* fast-path, send the packet on */
192*4882a593Smuzhiyun 		return;
193*4882a593Smuzhiyun 	case L2T_STATE_RESOLVING:
194*4882a593Smuzhiyun 		spin_lock_bh(&e->lock);
195*4882a593Smuzhiyun 		if (e->state != L2T_STATE_RESOLVING) {
196*4882a593Smuzhiyun 			/* ARP already completed */
197*4882a593Smuzhiyun 			spin_unlock_bh(&e->lock);
198*4882a593Smuzhiyun 			goto again;
199*4882a593Smuzhiyun 		}
200*4882a593Smuzhiyun 		spin_unlock_bh(&e->lock);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 		/*
203*4882a593Smuzhiyun 		 * Only the first packet added to the arpq should kick off
204*4882a593Smuzhiyun 		 * resolution.  However, because the alloc_skb below can fail,
205*4882a593Smuzhiyun 		 * we allow each packet added to the arpq to retry resolution
206*4882a593Smuzhiyun 		 * as a way of recovering from transient memory exhaustion.
207*4882a593Smuzhiyun 		 * A better way would be to use a work request to retry L2T
208*4882a593Smuzhiyun 		 * entries when there's no memory.
209*4882a593Smuzhiyun 		 */
210*4882a593Smuzhiyun 		neigh_event_send(e->neigh, NULL);
211*4882a593Smuzhiyun 	}
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun EXPORT_SYMBOL(t3_l2t_send_event);
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun /*
217*4882a593Smuzhiyun  * Allocate a free L2T entry.  Must be called with l2t_data.lock held.
218*4882a593Smuzhiyun  */
alloc_l2e(struct l2t_data * d)219*4882a593Smuzhiyun static struct l2t_entry *alloc_l2e(struct l2t_data *d)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 	struct l2t_entry *end, *e, **p;
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	if (!atomic_read(&d->nfree))
224*4882a593Smuzhiyun 		return NULL;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	/* there's definitely a free entry */
227*4882a593Smuzhiyun 	for (e = d->rover, end = &d->l2tab[d->nentries]; e != end; ++e)
228*4882a593Smuzhiyun 		if (atomic_read(&e->refcnt) == 0)
229*4882a593Smuzhiyun 			goto found;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	for (e = &d->l2tab[1]; atomic_read(&e->refcnt); ++e) ;
232*4882a593Smuzhiyun found:
233*4882a593Smuzhiyun 	d->rover = e + 1;
234*4882a593Smuzhiyun 	atomic_dec(&d->nfree);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	/*
237*4882a593Smuzhiyun 	 * The entry we found may be an inactive entry that is
238*4882a593Smuzhiyun 	 * presently in the hash table.  We need to remove it.
239*4882a593Smuzhiyun 	 */
240*4882a593Smuzhiyun 	if (e->state != L2T_STATE_UNUSED) {
241*4882a593Smuzhiyun 		int hash = arp_hash(e->addr, e->ifindex, d);
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 		for (p = &d->l2tab[hash].first; *p; p = &(*p)->next)
244*4882a593Smuzhiyun 			if (*p == e) {
245*4882a593Smuzhiyun 				*p = e->next;
246*4882a593Smuzhiyun 				break;
247*4882a593Smuzhiyun 			}
248*4882a593Smuzhiyun 		e->state = L2T_STATE_UNUSED;
249*4882a593Smuzhiyun 	}
250*4882a593Smuzhiyun 	return e;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun /*
254*4882a593Smuzhiyun  * Called when an L2T entry has no more users.  The entry is left in the hash
255*4882a593Smuzhiyun  * table since it is likely to be reused but we also bump nfree to indicate
256*4882a593Smuzhiyun  * that the entry can be reallocated for a different neighbor.  We also drop
257*4882a593Smuzhiyun  * the existing neighbor reference in case the neighbor is going away and is
258*4882a593Smuzhiyun  * waiting on our reference.
259*4882a593Smuzhiyun  *
260*4882a593Smuzhiyun  * Because entries can be reallocated to other neighbors once their ref count
261*4882a593Smuzhiyun  * drops to 0 we need to take the entry's lock to avoid races with a new
262*4882a593Smuzhiyun  * incarnation.
263*4882a593Smuzhiyun  */
t3_l2e_free(struct l2t_data * d,struct l2t_entry * e)264*4882a593Smuzhiyun void t3_l2e_free(struct l2t_data *d, struct l2t_entry *e)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	spin_lock_bh(&e->lock);
267*4882a593Smuzhiyun 	if (atomic_read(&e->refcnt) == 0) {	/* hasn't been recycled */
268*4882a593Smuzhiyun 		if (e->neigh) {
269*4882a593Smuzhiyun 			neigh_release(e->neigh);
270*4882a593Smuzhiyun 			e->neigh = NULL;
271*4882a593Smuzhiyun 		}
272*4882a593Smuzhiyun 	}
273*4882a593Smuzhiyun 	spin_unlock_bh(&e->lock);
274*4882a593Smuzhiyun 	atomic_inc(&d->nfree);
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun EXPORT_SYMBOL(t3_l2e_free);
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun /*
280*4882a593Smuzhiyun  * Update an L2T entry that was previously used for the same next hop as neigh.
281*4882a593Smuzhiyun  * Must be called with softirqs disabled.
282*4882a593Smuzhiyun  */
reuse_entry(struct l2t_entry * e,struct neighbour * neigh)283*4882a593Smuzhiyun static inline void reuse_entry(struct l2t_entry *e, struct neighbour *neigh)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun 	unsigned int nud_state;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	spin_lock(&e->lock);	/* avoid race with t3_l2t_free */
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	if (neigh != e->neigh)
290*4882a593Smuzhiyun 		neigh_replace(e, neigh);
291*4882a593Smuzhiyun 	nud_state = neigh->nud_state;
292*4882a593Smuzhiyun 	if (memcmp(e->dmac, neigh->ha, sizeof(e->dmac)) ||
293*4882a593Smuzhiyun 	    !(nud_state & NUD_VALID))
294*4882a593Smuzhiyun 		e->state = L2T_STATE_RESOLVING;
295*4882a593Smuzhiyun 	else if (nud_state & NUD_CONNECTED)
296*4882a593Smuzhiyun 		e->state = L2T_STATE_VALID;
297*4882a593Smuzhiyun 	else
298*4882a593Smuzhiyun 		e->state = L2T_STATE_STALE;
299*4882a593Smuzhiyun 	spin_unlock(&e->lock);
300*4882a593Smuzhiyun }
301*4882a593Smuzhiyun 
t3_l2t_get(struct t3cdev * cdev,struct dst_entry * dst,struct net_device * dev,const void * daddr)302*4882a593Smuzhiyun struct l2t_entry *t3_l2t_get(struct t3cdev *cdev, struct dst_entry *dst,
303*4882a593Smuzhiyun 			     struct net_device *dev, const void *daddr)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun 	struct l2t_entry *e = NULL;
306*4882a593Smuzhiyun 	struct neighbour *neigh;
307*4882a593Smuzhiyun 	struct port_info *p;
308*4882a593Smuzhiyun 	struct l2t_data *d;
309*4882a593Smuzhiyun 	int hash;
310*4882a593Smuzhiyun 	u32 addr;
311*4882a593Smuzhiyun 	int ifidx;
312*4882a593Smuzhiyun 	int smt_idx;
313*4882a593Smuzhiyun 
314*4882a593Smuzhiyun 	rcu_read_lock();
315*4882a593Smuzhiyun 	neigh = dst_neigh_lookup(dst, daddr);
316*4882a593Smuzhiyun 	if (!neigh)
317*4882a593Smuzhiyun 		goto done_rcu;
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun 	addr = *(u32 *) neigh->primary_key;
320*4882a593Smuzhiyun 	ifidx = neigh->dev->ifindex;
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	if (!dev)
323*4882a593Smuzhiyun 		dev = neigh->dev;
324*4882a593Smuzhiyun 	p = netdev_priv(dev);
325*4882a593Smuzhiyun 	smt_idx = p->port_id;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	d = L2DATA(cdev);
328*4882a593Smuzhiyun 	if (!d)
329*4882a593Smuzhiyun 		goto done_rcu;
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	hash = arp_hash(addr, ifidx, d);
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	write_lock_bh(&d->lock);
334*4882a593Smuzhiyun 	for (e = d->l2tab[hash].first; e; e = e->next)
335*4882a593Smuzhiyun 		if (e->addr == addr && e->ifindex == ifidx &&
336*4882a593Smuzhiyun 		    e->smt_idx == smt_idx) {
337*4882a593Smuzhiyun 			l2t_hold(d, e);
338*4882a593Smuzhiyun 			if (atomic_read(&e->refcnt) == 1)
339*4882a593Smuzhiyun 				reuse_entry(e, neigh);
340*4882a593Smuzhiyun 			goto done_unlock;
341*4882a593Smuzhiyun 		}
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	/* Need to allocate a new entry */
344*4882a593Smuzhiyun 	e = alloc_l2e(d);
345*4882a593Smuzhiyun 	if (e) {
346*4882a593Smuzhiyun 		spin_lock(&e->lock);	/* avoid race with t3_l2t_free */
347*4882a593Smuzhiyun 		e->next = d->l2tab[hash].first;
348*4882a593Smuzhiyun 		d->l2tab[hash].first = e;
349*4882a593Smuzhiyun 		e->state = L2T_STATE_RESOLVING;
350*4882a593Smuzhiyun 		e->addr = addr;
351*4882a593Smuzhiyun 		e->ifindex = ifidx;
352*4882a593Smuzhiyun 		e->smt_idx = smt_idx;
353*4882a593Smuzhiyun 		atomic_set(&e->refcnt, 1);
354*4882a593Smuzhiyun 		neigh_replace(e, neigh);
355*4882a593Smuzhiyun 		if (is_vlan_dev(neigh->dev))
356*4882a593Smuzhiyun 			e->vlan = vlan_dev_vlan_id(neigh->dev);
357*4882a593Smuzhiyun 		else
358*4882a593Smuzhiyun 			e->vlan = VLAN_NONE;
359*4882a593Smuzhiyun 		spin_unlock(&e->lock);
360*4882a593Smuzhiyun 	}
361*4882a593Smuzhiyun done_unlock:
362*4882a593Smuzhiyun 	write_unlock_bh(&d->lock);
363*4882a593Smuzhiyun done_rcu:
364*4882a593Smuzhiyun 	if (neigh)
365*4882a593Smuzhiyun 		neigh_release(neigh);
366*4882a593Smuzhiyun 	rcu_read_unlock();
367*4882a593Smuzhiyun 	return e;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun 
370*4882a593Smuzhiyun EXPORT_SYMBOL(t3_l2t_get);
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun /*
373*4882a593Smuzhiyun  * Called when address resolution fails for an L2T entry to handle packets
374*4882a593Smuzhiyun  * on the arpq head.  If a packet specifies a failure handler it is invoked,
375*4882a593Smuzhiyun  * otherwise the packets is sent to the offload device.
376*4882a593Smuzhiyun  *
377*4882a593Smuzhiyun  * XXX: maybe we should abandon the latter behavior and just require a failure
378*4882a593Smuzhiyun  * handler.
379*4882a593Smuzhiyun  */
handle_failed_resolution(struct t3cdev * dev,struct sk_buff_head * arpq)380*4882a593Smuzhiyun static void handle_failed_resolution(struct t3cdev *dev, struct sk_buff_head *arpq)
381*4882a593Smuzhiyun {
382*4882a593Smuzhiyun 	struct sk_buff *skb, *tmp;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	skb_queue_walk_safe(arpq, skb, tmp) {
385*4882a593Smuzhiyun 		struct l2t_skb_cb *cb = L2T_SKB_CB(skb);
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 		__skb_unlink(skb, arpq);
388*4882a593Smuzhiyun 		if (cb->arp_failure_handler)
389*4882a593Smuzhiyun 			cb->arp_failure_handler(dev, skb);
390*4882a593Smuzhiyun 		else
391*4882a593Smuzhiyun 			cxgb3_ofld_send(dev, skb);
392*4882a593Smuzhiyun 	}
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun /*
396*4882a593Smuzhiyun  * Called when the host's ARP layer makes a change to some entry that is
397*4882a593Smuzhiyun  * loaded into the HW L2 table.
398*4882a593Smuzhiyun  */
t3_l2t_update(struct t3cdev * dev,struct neighbour * neigh)399*4882a593Smuzhiyun void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun 	struct sk_buff_head arpq;
402*4882a593Smuzhiyun 	struct l2t_entry *e;
403*4882a593Smuzhiyun 	struct l2t_data *d = L2DATA(dev);
404*4882a593Smuzhiyun 	u32 addr = *(u32 *) neigh->primary_key;
405*4882a593Smuzhiyun 	int ifidx = neigh->dev->ifindex;
406*4882a593Smuzhiyun 	int hash = arp_hash(addr, ifidx, d);
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	read_lock_bh(&d->lock);
409*4882a593Smuzhiyun 	for (e = d->l2tab[hash].first; e; e = e->next)
410*4882a593Smuzhiyun 		if (e->addr == addr && e->ifindex == ifidx) {
411*4882a593Smuzhiyun 			spin_lock(&e->lock);
412*4882a593Smuzhiyun 			goto found;
413*4882a593Smuzhiyun 		}
414*4882a593Smuzhiyun 	read_unlock_bh(&d->lock);
415*4882a593Smuzhiyun 	return;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun found:
418*4882a593Smuzhiyun 	__skb_queue_head_init(&arpq);
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	read_unlock(&d->lock);
421*4882a593Smuzhiyun 	if (atomic_read(&e->refcnt)) {
422*4882a593Smuzhiyun 		if (neigh != e->neigh)
423*4882a593Smuzhiyun 			neigh_replace(e, neigh);
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 		if (e->state == L2T_STATE_RESOLVING) {
426*4882a593Smuzhiyun 			if (neigh->nud_state & NUD_FAILED) {
427*4882a593Smuzhiyun 				skb_queue_splice_init(&e->arpq, &arpq);
428*4882a593Smuzhiyun 			} else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE))
429*4882a593Smuzhiyun 				setup_l2e_send_pending(dev, NULL, e);
430*4882a593Smuzhiyun 		} else {
431*4882a593Smuzhiyun 			e->state = neigh->nud_state & NUD_CONNECTED ?
432*4882a593Smuzhiyun 			    L2T_STATE_VALID : L2T_STATE_STALE;
433*4882a593Smuzhiyun 			if (!ether_addr_equal(e->dmac, neigh->ha))
434*4882a593Smuzhiyun 				setup_l2e_send_pending(dev, NULL, e);
435*4882a593Smuzhiyun 		}
436*4882a593Smuzhiyun 	}
437*4882a593Smuzhiyun 	spin_unlock_bh(&e->lock);
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	if (!skb_queue_empty(&arpq))
440*4882a593Smuzhiyun 		handle_failed_resolution(dev, &arpq);
441*4882a593Smuzhiyun }
442*4882a593Smuzhiyun 
t3_init_l2t(unsigned int l2t_capacity)443*4882a593Smuzhiyun struct l2t_data *t3_init_l2t(unsigned int l2t_capacity)
444*4882a593Smuzhiyun {
445*4882a593Smuzhiyun 	struct l2t_data *d;
446*4882a593Smuzhiyun 	int i;
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	d = kvzalloc(struct_size(d, l2tab, l2t_capacity), GFP_KERNEL);
449*4882a593Smuzhiyun 	if (!d)
450*4882a593Smuzhiyun 		return NULL;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	d->nentries = l2t_capacity;
453*4882a593Smuzhiyun 	d->rover = &d->l2tab[1];	/* entry 0 is not used */
454*4882a593Smuzhiyun 	atomic_set(&d->nfree, l2t_capacity - 1);
455*4882a593Smuzhiyun 	rwlock_init(&d->lock);
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 	for (i = 0; i < l2t_capacity; ++i) {
458*4882a593Smuzhiyun 		d->l2tab[i].idx = i;
459*4882a593Smuzhiyun 		d->l2tab[i].state = L2T_STATE_UNUSED;
460*4882a593Smuzhiyun 		__skb_queue_head_init(&d->l2tab[i].arpq);
461*4882a593Smuzhiyun 		spin_lock_init(&d->l2tab[i].lock);
462*4882a593Smuzhiyun 		atomic_set(&d->l2tab[i].refcnt, 0);
463*4882a593Smuzhiyun 	}
464*4882a593Smuzhiyun 	return d;
465*4882a593Smuzhiyun }
466