xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/broadcom/brcm80211/brcmfmac/flowring.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: ISC
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2014 Broadcom Corporation
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/types.h>
8*4882a593Smuzhiyun #include <linux/netdevice.h>
9*4882a593Smuzhiyun #include <linux/etherdevice.h>
10*4882a593Smuzhiyun #include <brcmu_utils.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include "core.h"
13*4882a593Smuzhiyun #include "debug.h"
14*4882a593Smuzhiyun #include "bus.h"
15*4882a593Smuzhiyun #include "proto.h"
16*4882a593Smuzhiyun #include "flowring.h"
17*4882a593Smuzhiyun #include "msgbuf.h"
18*4882a593Smuzhiyun #include "common.h"
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #define BRCMF_FLOWRING_HIGH		1024
22*4882a593Smuzhiyun #define BRCMF_FLOWRING_LOW		(BRCMF_FLOWRING_HIGH - 256)
23*4882a593Smuzhiyun #define BRCMF_FLOWRING_INVALID_IFIDX	0xff
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #define BRCMF_FLOWRING_HASH_AP(da, fifo, ifidx) (da[5] * 2 + fifo + ifidx * 16)
26*4882a593Smuzhiyun #define BRCMF_FLOWRING_HASH_STA(fifo, ifidx) (fifo + ifidx * 16)
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun static const u8 brcmf_flowring_prio2fifo[] = {
29*4882a593Smuzhiyun 	0,
30*4882a593Smuzhiyun 	1,
31*4882a593Smuzhiyun 	1,
32*4882a593Smuzhiyun 	0,
33*4882a593Smuzhiyun 	2,
34*4882a593Smuzhiyun 	2,
35*4882a593Smuzhiyun 	3,
36*4882a593Smuzhiyun 	3
37*4882a593Smuzhiyun };
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun static const u8 ALLFFMAC[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun static bool
brcmf_flowring_is_tdls_mac(struct brcmf_flowring * flow,u8 mac[ETH_ALEN])43*4882a593Smuzhiyun brcmf_flowring_is_tdls_mac(struct brcmf_flowring *flow, u8 mac[ETH_ALEN])
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun 	struct brcmf_flowring_tdls_entry *search;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	search = flow->tdls_entry;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	while (search) {
50*4882a593Smuzhiyun 		if (memcmp(search->mac, mac, ETH_ALEN) == 0)
51*4882a593Smuzhiyun 			return true;
52*4882a593Smuzhiyun 		search = search->next;
53*4882a593Smuzhiyun 	}
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	return false;
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun 
brcmf_flowring_lookup(struct brcmf_flowring * flow,u8 da[ETH_ALEN],u8 prio,u8 ifidx)59*4882a593Smuzhiyun u32 brcmf_flowring_lookup(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
60*4882a593Smuzhiyun 			  u8 prio, u8 ifidx)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun 	struct brcmf_flowring_hash *hash;
63*4882a593Smuzhiyun 	u16 hash_idx;
64*4882a593Smuzhiyun 	u32 i;
65*4882a593Smuzhiyun 	bool found;
66*4882a593Smuzhiyun 	bool sta;
67*4882a593Smuzhiyun 	u8 fifo;
68*4882a593Smuzhiyun 	u8 *mac;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	fifo = brcmf_flowring_prio2fifo[prio];
71*4882a593Smuzhiyun 	sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
72*4882a593Smuzhiyun 	mac = da;
73*4882a593Smuzhiyun 	if ((!sta) && (is_multicast_ether_addr(da))) {
74*4882a593Smuzhiyun 		mac = (u8 *)ALLFFMAC;
75*4882a593Smuzhiyun 		fifo = 0;
76*4882a593Smuzhiyun 	}
77*4882a593Smuzhiyun 	if ((sta) && (flow->tdls_active) &&
78*4882a593Smuzhiyun 	    (brcmf_flowring_is_tdls_mac(flow, da))) {
79*4882a593Smuzhiyun 		sta = false;
80*4882a593Smuzhiyun 	}
81*4882a593Smuzhiyun 	hash_idx =  sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
82*4882a593Smuzhiyun 			  BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
83*4882a593Smuzhiyun 	hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
84*4882a593Smuzhiyun 	found = false;
85*4882a593Smuzhiyun 	hash = flow->hash;
86*4882a593Smuzhiyun 	for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
87*4882a593Smuzhiyun 		if ((sta || (memcmp(hash[hash_idx].mac, mac, ETH_ALEN) == 0)) &&
88*4882a593Smuzhiyun 		    (hash[hash_idx].fifo == fifo) &&
89*4882a593Smuzhiyun 		    (hash[hash_idx].ifidx == ifidx)) {
90*4882a593Smuzhiyun 			found = true;
91*4882a593Smuzhiyun 			break;
92*4882a593Smuzhiyun 		}
93*4882a593Smuzhiyun 		hash_idx++;
94*4882a593Smuzhiyun 		hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
95*4882a593Smuzhiyun 	}
96*4882a593Smuzhiyun 	if (found)
97*4882a593Smuzhiyun 		return hash[hash_idx].flowid;
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	return BRCMF_FLOWRING_INVALID_ID;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 
brcmf_flowring_create(struct brcmf_flowring * flow,u8 da[ETH_ALEN],u8 prio,u8 ifidx)103*4882a593Smuzhiyun u32 brcmf_flowring_create(struct brcmf_flowring *flow, u8 da[ETH_ALEN],
104*4882a593Smuzhiyun 			  u8 prio, u8 ifidx)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	struct brcmf_flowring_ring *ring;
107*4882a593Smuzhiyun 	struct brcmf_flowring_hash *hash;
108*4882a593Smuzhiyun 	u16 hash_idx;
109*4882a593Smuzhiyun 	u32 i;
110*4882a593Smuzhiyun 	bool found;
111*4882a593Smuzhiyun 	u8 fifo;
112*4882a593Smuzhiyun 	bool sta;
113*4882a593Smuzhiyun 	u8 *mac;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	fifo = brcmf_flowring_prio2fifo[prio];
116*4882a593Smuzhiyun 	sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
117*4882a593Smuzhiyun 	mac = da;
118*4882a593Smuzhiyun 	if ((!sta) && (is_multicast_ether_addr(da))) {
119*4882a593Smuzhiyun 		mac = (u8 *)ALLFFMAC;
120*4882a593Smuzhiyun 		fifo = 0;
121*4882a593Smuzhiyun 	}
122*4882a593Smuzhiyun 	if ((sta) && (flow->tdls_active) &&
123*4882a593Smuzhiyun 	    (brcmf_flowring_is_tdls_mac(flow, da))) {
124*4882a593Smuzhiyun 		sta = false;
125*4882a593Smuzhiyun 	}
126*4882a593Smuzhiyun 	hash_idx =  sta ? BRCMF_FLOWRING_HASH_STA(fifo, ifidx) :
127*4882a593Smuzhiyun 			  BRCMF_FLOWRING_HASH_AP(mac, fifo, ifidx);
128*4882a593Smuzhiyun 	hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
129*4882a593Smuzhiyun 	found = false;
130*4882a593Smuzhiyun 	hash = flow->hash;
131*4882a593Smuzhiyun 	for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
132*4882a593Smuzhiyun 		if ((hash[hash_idx].ifidx == BRCMF_FLOWRING_INVALID_IFIDX) &&
133*4882a593Smuzhiyun 		    (is_zero_ether_addr(hash[hash_idx].mac))) {
134*4882a593Smuzhiyun 			found = true;
135*4882a593Smuzhiyun 			break;
136*4882a593Smuzhiyun 		}
137*4882a593Smuzhiyun 		hash_idx++;
138*4882a593Smuzhiyun 		hash_idx &= (BRCMF_FLOWRING_HASHSIZE - 1);
139*4882a593Smuzhiyun 	}
140*4882a593Smuzhiyun 	if (found) {
141*4882a593Smuzhiyun 		for (i = 0; i < flow->nrofrings; i++) {
142*4882a593Smuzhiyun 			if (flow->rings[i] == NULL)
143*4882a593Smuzhiyun 				break;
144*4882a593Smuzhiyun 		}
145*4882a593Smuzhiyun 		if (i == flow->nrofrings)
146*4882a593Smuzhiyun 			return -ENOMEM;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 		ring = kzalloc(sizeof(*ring), GFP_ATOMIC);
149*4882a593Smuzhiyun 		if (!ring)
150*4882a593Smuzhiyun 			return -ENOMEM;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 		memcpy(hash[hash_idx].mac, mac, ETH_ALEN);
153*4882a593Smuzhiyun 		hash[hash_idx].fifo = fifo;
154*4882a593Smuzhiyun 		hash[hash_idx].ifidx = ifidx;
155*4882a593Smuzhiyun 		hash[hash_idx].flowid = i;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 		ring->hash_id = hash_idx;
158*4882a593Smuzhiyun 		ring->status = RING_CLOSED;
159*4882a593Smuzhiyun 		skb_queue_head_init(&ring->skblist);
160*4882a593Smuzhiyun 		flow->rings[i] = ring;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 		return i;
163*4882a593Smuzhiyun 	}
164*4882a593Smuzhiyun 	return BRCMF_FLOWRING_INVALID_ID;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 
brcmf_flowring_tid(struct brcmf_flowring * flow,u16 flowid)168*4882a593Smuzhiyun u8 brcmf_flowring_tid(struct brcmf_flowring *flow, u16 flowid)
169*4882a593Smuzhiyun {
170*4882a593Smuzhiyun 	struct brcmf_flowring_ring *ring;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	ring = flow->rings[flowid];
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	return flow->hash[ring->hash_id].fifo;
175*4882a593Smuzhiyun }
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 
brcmf_flowring_block(struct brcmf_flowring * flow,u16 flowid,bool blocked)178*4882a593Smuzhiyun static void brcmf_flowring_block(struct brcmf_flowring *flow, u16 flowid,
179*4882a593Smuzhiyun 				 bool blocked)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun 	struct brcmf_flowring_ring *ring;
182*4882a593Smuzhiyun 	struct brcmf_bus *bus_if;
183*4882a593Smuzhiyun 	struct brcmf_pub *drvr;
184*4882a593Smuzhiyun 	struct brcmf_if *ifp;
185*4882a593Smuzhiyun 	bool currently_blocked;
186*4882a593Smuzhiyun 	int i;
187*4882a593Smuzhiyun 	u8 ifidx;
188*4882a593Smuzhiyun 	unsigned long flags;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	spin_lock_irqsave(&flow->block_lock, flags);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	ring = flow->rings[flowid];
193*4882a593Smuzhiyun 	if (ring->blocked == blocked) {
194*4882a593Smuzhiyun 		spin_unlock_irqrestore(&flow->block_lock, flags);
195*4882a593Smuzhiyun 		return;
196*4882a593Smuzhiyun 	}
197*4882a593Smuzhiyun 	ifidx = brcmf_flowring_ifidx_get(flow, flowid);
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	currently_blocked = false;
200*4882a593Smuzhiyun 	for (i = 0; i < flow->nrofrings; i++) {
201*4882a593Smuzhiyun 		if ((flow->rings[i]) && (i != flowid)) {
202*4882a593Smuzhiyun 			ring = flow->rings[i];
203*4882a593Smuzhiyun 			if ((ring->status == RING_OPEN) &&
204*4882a593Smuzhiyun 			    (brcmf_flowring_ifidx_get(flow, i) == ifidx)) {
205*4882a593Smuzhiyun 				if (ring->blocked) {
206*4882a593Smuzhiyun 					currently_blocked = true;
207*4882a593Smuzhiyun 					break;
208*4882a593Smuzhiyun 				}
209*4882a593Smuzhiyun 			}
210*4882a593Smuzhiyun 		}
211*4882a593Smuzhiyun 	}
212*4882a593Smuzhiyun 	flow->rings[flowid]->blocked = blocked;
213*4882a593Smuzhiyun 	if (currently_blocked) {
214*4882a593Smuzhiyun 		spin_unlock_irqrestore(&flow->block_lock, flags);
215*4882a593Smuzhiyun 		return;
216*4882a593Smuzhiyun 	}
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	bus_if = dev_get_drvdata(flow->dev);
219*4882a593Smuzhiyun 	drvr = bus_if->drvr;
220*4882a593Smuzhiyun 	ifp = brcmf_get_ifp(drvr, ifidx);
221*4882a593Smuzhiyun 	brcmf_txflowblock_if(ifp, BRCMF_NETIF_STOP_REASON_FLOW, blocked);
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	spin_unlock_irqrestore(&flow->block_lock, flags);
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 
brcmf_flowring_delete(struct brcmf_flowring * flow,u16 flowid)227*4882a593Smuzhiyun void brcmf_flowring_delete(struct brcmf_flowring *flow, u16 flowid)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
230*4882a593Smuzhiyun 	struct brcmf_flowring_ring *ring;
231*4882a593Smuzhiyun 	struct brcmf_if *ifp;
232*4882a593Smuzhiyun 	u16 hash_idx;
233*4882a593Smuzhiyun 	u8 ifidx;
234*4882a593Smuzhiyun 	struct sk_buff *skb;
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	ring = flow->rings[flowid];
237*4882a593Smuzhiyun 	if (!ring)
238*4882a593Smuzhiyun 		return;
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	ifidx = brcmf_flowring_ifidx_get(flow, flowid);
241*4882a593Smuzhiyun 	ifp = brcmf_get_ifp(bus_if->drvr, ifidx);
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	brcmf_flowring_block(flow, flowid, false);
244*4882a593Smuzhiyun 	hash_idx = ring->hash_id;
245*4882a593Smuzhiyun 	flow->hash[hash_idx].ifidx = BRCMF_FLOWRING_INVALID_IFIDX;
246*4882a593Smuzhiyun 	eth_zero_addr(flow->hash[hash_idx].mac);
247*4882a593Smuzhiyun 	flow->rings[flowid] = NULL;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	skb = skb_dequeue(&ring->skblist);
250*4882a593Smuzhiyun 	while (skb) {
251*4882a593Smuzhiyun 		brcmf_txfinalize(ifp, skb, false);
252*4882a593Smuzhiyun 		skb = skb_dequeue(&ring->skblist);
253*4882a593Smuzhiyun 	}
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	kfree(ring);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 
brcmf_flowring_enqueue(struct brcmf_flowring * flow,u16 flowid,struct sk_buff * skb)259*4882a593Smuzhiyun u32 brcmf_flowring_enqueue(struct brcmf_flowring *flow, u16 flowid,
260*4882a593Smuzhiyun 			   struct sk_buff *skb)
261*4882a593Smuzhiyun {
262*4882a593Smuzhiyun 	struct brcmf_flowring_ring *ring;
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	ring = flow->rings[flowid];
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	skb_queue_tail(&ring->skblist, skb);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	if (!ring->blocked &&
269*4882a593Smuzhiyun 	    (skb_queue_len(&ring->skblist) > BRCMF_FLOWRING_HIGH)) {
270*4882a593Smuzhiyun 		brcmf_flowring_block(flow, flowid, true);
271*4882a593Smuzhiyun 		brcmf_dbg(MSGBUF, "Flowcontrol: BLOCK for ring %d\n", flowid);
272*4882a593Smuzhiyun 		/* To prevent (work around) possible race condition, check
273*4882a593Smuzhiyun 		 * queue len again. It is also possible to use locking to
274*4882a593Smuzhiyun 		 * protect, but that is undesirable for every enqueue and
275*4882a593Smuzhiyun 		 * dequeue. This simple check will solve a possible race
276*4882a593Smuzhiyun 		 * condition if it occurs.
277*4882a593Smuzhiyun 		 */
278*4882a593Smuzhiyun 		if (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW)
279*4882a593Smuzhiyun 			brcmf_flowring_block(flow, flowid, false);
280*4882a593Smuzhiyun 	}
281*4882a593Smuzhiyun 	return skb_queue_len(&ring->skblist);
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 
brcmf_flowring_dequeue(struct brcmf_flowring * flow,u16 flowid)285*4882a593Smuzhiyun struct sk_buff *brcmf_flowring_dequeue(struct brcmf_flowring *flow, u16 flowid)
286*4882a593Smuzhiyun {
287*4882a593Smuzhiyun 	struct brcmf_flowring_ring *ring;
288*4882a593Smuzhiyun 	struct sk_buff *skb;
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	ring = flow->rings[flowid];
291*4882a593Smuzhiyun 	if (ring->status != RING_OPEN)
292*4882a593Smuzhiyun 		return NULL;
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	skb = skb_dequeue(&ring->skblist);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	if (ring->blocked &&
297*4882a593Smuzhiyun 	    (skb_queue_len(&ring->skblist) < BRCMF_FLOWRING_LOW)) {
298*4882a593Smuzhiyun 		brcmf_flowring_block(flow, flowid, false);
299*4882a593Smuzhiyun 		brcmf_dbg(MSGBUF, "Flowcontrol: OPEN for ring %d\n", flowid);
300*4882a593Smuzhiyun 	}
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	return skb;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 
brcmf_flowring_reinsert(struct brcmf_flowring * flow,u16 flowid,struct sk_buff * skb)306*4882a593Smuzhiyun void brcmf_flowring_reinsert(struct brcmf_flowring *flow, u16 flowid,
307*4882a593Smuzhiyun 			     struct sk_buff *skb)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun 	struct brcmf_flowring_ring *ring;
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	ring = flow->rings[flowid];
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	skb_queue_head(&ring->skblist, skb);
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 
brcmf_flowring_qlen(struct brcmf_flowring * flow,u16 flowid)317*4882a593Smuzhiyun u32 brcmf_flowring_qlen(struct brcmf_flowring *flow, u16 flowid)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun 	struct brcmf_flowring_ring *ring;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	ring = flow->rings[flowid];
322*4882a593Smuzhiyun 	if (!ring)
323*4882a593Smuzhiyun 		return 0;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	if (ring->status != RING_OPEN)
326*4882a593Smuzhiyun 		return 0;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	return skb_queue_len(&ring->skblist);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 
brcmf_flowring_open(struct brcmf_flowring * flow,u16 flowid)332*4882a593Smuzhiyun void brcmf_flowring_open(struct brcmf_flowring *flow, u16 flowid)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	struct brcmf_flowring_ring *ring;
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	ring = flow->rings[flowid];
337*4882a593Smuzhiyun 	if (!ring) {
338*4882a593Smuzhiyun 		brcmf_err("Ring NULL, for flowid %d\n", flowid);
339*4882a593Smuzhiyun 		return;
340*4882a593Smuzhiyun 	}
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	ring->status = RING_OPEN;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 
brcmf_flowring_ifidx_get(struct brcmf_flowring * flow,u16 flowid)346*4882a593Smuzhiyun u8 brcmf_flowring_ifidx_get(struct brcmf_flowring *flow, u16 flowid)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun 	struct brcmf_flowring_ring *ring;
349*4882a593Smuzhiyun 	u16 hash_idx;
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	ring = flow->rings[flowid];
352*4882a593Smuzhiyun 	hash_idx = ring->hash_id;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	return flow->hash[hash_idx].ifidx;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 
brcmf_flowring_attach(struct device * dev,u16 nrofrings)358*4882a593Smuzhiyun struct brcmf_flowring *brcmf_flowring_attach(struct device *dev, u16 nrofrings)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun 	struct brcmf_flowring *flow;
361*4882a593Smuzhiyun 	u32 i;
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	flow = kzalloc(sizeof(*flow), GFP_KERNEL);
364*4882a593Smuzhiyun 	if (flow) {
365*4882a593Smuzhiyun 		flow->dev = dev;
366*4882a593Smuzhiyun 		flow->nrofrings = nrofrings;
367*4882a593Smuzhiyun 		spin_lock_init(&flow->block_lock);
368*4882a593Smuzhiyun 		for (i = 0; i < ARRAY_SIZE(flow->addr_mode); i++)
369*4882a593Smuzhiyun 			flow->addr_mode[i] = ADDR_INDIRECT;
370*4882a593Smuzhiyun 		for (i = 0; i < ARRAY_SIZE(flow->hash); i++)
371*4882a593Smuzhiyun 			flow->hash[i].ifidx = BRCMF_FLOWRING_INVALID_IFIDX;
372*4882a593Smuzhiyun 		flow->rings = kcalloc(nrofrings, sizeof(*flow->rings),
373*4882a593Smuzhiyun 				      GFP_KERNEL);
374*4882a593Smuzhiyun 		if (!flow->rings) {
375*4882a593Smuzhiyun 			kfree(flow);
376*4882a593Smuzhiyun 			flow = NULL;
377*4882a593Smuzhiyun 		}
378*4882a593Smuzhiyun 	}
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	return flow;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 
brcmf_flowring_detach(struct brcmf_flowring * flow)384*4882a593Smuzhiyun void brcmf_flowring_detach(struct brcmf_flowring *flow)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun 	struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
387*4882a593Smuzhiyun 	struct brcmf_pub *drvr = bus_if->drvr;
388*4882a593Smuzhiyun 	struct brcmf_flowring_tdls_entry *search;
389*4882a593Smuzhiyun 	struct brcmf_flowring_tdls_entry *remove;
390*4882a593Smuzhiyun 	u16 flowid;
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	for (flowid = 0; flowid < flow->nrofrings; flowid++) {
393*4882a593Smuzhiyun 		if (flow->rings[flowid])
394*4882a593Smuzhiyun 			brcmf_msgbuf_delete_flowring(drvr, flowid);
395*4882a593Smuzhiyun 	}
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	search = flow->tdls_entry;
398*4882a593Smuzhiyun 	while (search) {
399*4882a593Smuzhiyun 		remove = search;
400*4882a593Smuzhiyun 		search = search->next;
401*4882a593Smuzhiyun 		kfree(remove);
402*4882a593Smuzhiyun 	}
403*4882a593Smuzhiyun 	kfree(flow->rings);
404*4882a593Smuzhiyun 	kfree(flow);
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 
brcmf_flowring_configure_addr_mode(struct brcmf_flowring * flow,int ifidx,enum proto_addr_mode addr_mode)408*4882a593Smuzhiyun void brcmf_flowring_configure_addr_mode(struct brcmf_flowring *flow, int ifidx,
409*4882a593Smuzhiyun 					enum proto_addr_mode addr_mode)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun 	struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
412*4882a593Smuzhiyun 	struct brcmf_pub *drvr = bus_if->drvr;
413*4882a593Smuzhiyun 	u32 i;
414*4882a593Smuzhiyun 	u16 flowid;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	if (flow->addr_mode[ifidx] != addr_mode) {
417*4882a593Smuzhiyun 		for (i = 0; i < ARRAY_SIZE(flow->hash); i++) {
418*4882a593Smuzhiyun 			if (flow->hash[i].ifidx == ifidx) {
419*4882a593Smuzhiyun 				flowid = flow->hash[i].flowid;
420*4882a593Smuzhiyun 				if (flow->rings[flowid]->status != RING_OPEN)
421*4882a593Smuzhiyun 					continue;
422*4882a593Smuzhiyun 				flow->rings[flowid]->status = RING_CLOSING;
423*4882a593Smuzhiyun 				brcmf_msgbuf_delete_flowring(drvr, flowid);
424*4882a593Smuzhiyun 			}
425*4882a593Smuzhiyun 		}
426*4882a593Smuzhiyun 		flow->addr_mode[ifidx] = addr_mode;
427*4882a593Smuzhiyun 	}
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun 
brcmf_flowring_delete_peer(struct brcmf_flowring * flow,int ifidx,u8 peer[ETH_ALEN])431*4882a593Smuzhiyun void brcmf_flowring_delete_peer(struct brcmf_flowring *flow, int ifidx,
432*4882a593Smuzhiyun 				u8 peer[ETH_ALEN])
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun 	struct brcmf_bus *bus_if = dev_get_drvdata(flow->dev);
435*4882a593Smuzhiyun 	struct brcmf_pub *drvr = bus_if->drvr;
436*4882a593Smuzhiyun 	struct brcmf_flowring_hash *hash;
437*4882a593Smuzhiyun 	struct brcmf_flowring_tdls_entry *prev;
438*4882a593Smuzhiyun 	struct brcmf_flowring_tdls_entry *search;
439*4882a593Smuzhiyun 	u32 i;
440*4882a593Smuzhiyun 	u16 flowid;
441*4882a593Smuzhiyun 	bool sta;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	sta = (flow->addr_mode[ifidx] == ADDR_INDIRECT);
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	search = flow->tdls_entry;
446*4882a593Smuzhiyun 	prev = NULL;
447*4882a593Smuzhiyun 	while (search) {
448*4882a593Smuzhiyun 		if (memcmp(search->mac, peer, ETH_ALEN) == 0) {
449*4882a593Smuzhiyun 			sta = false;
450*4882a593Smuzhiyun 			break;
451*4882a593Smuzhiyun 		}
452*4882a593Smuzhiyun 		prev = search;
453*4882a593Smuzhiyun 		search = search->next;
454*4882a593Smuzhiyun 	}
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	hash = flow->hash;
457*4882a593Smuzhiyun 	for (i = 0; i < BRCMF_FLOWRING_HASHSIZE; i++) {
458*4882a593Smuzhiyun 		if ((sta || (memcmp(hash[i].mac, peer, ETH_ALEN) == 0)) &&
459*4882a593Smuzhiyun 		    (hash[i].ifidx == ifidx)) {
460*4882a593Smuzhiyun 			flowid = flow->hash[i].flowid;
461*4882a593Smuzhiyun 			if (flow->rings[flowid]->status == RING_OPEN) {
462*4882a593Smuzhiyun 				flow->rings[flowid]->status = RING_CLOSING;
463*4882a593Smuzhiyun 				brcmf_msgbuf_delete_flowring(drvr, flowid);
464*4882a593Smuzhiyun 			}
465*4882a593Smuzhiyun 		}
466*4882a593Smuzhiyun 	}
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	if (search) {
469*4882a593Smuzhiyun 		if (prev)
470*4882a593Smuzhiyun 			prev->next = search->next;
471*4882a593Smuzhiyun 		else
472*4882a593Smuzhiyun 			flow->tdls_entry = search->next;
473*4882a593Smuzhiyun 		kfree(search);
474*4882a593Smuzhiyun 		if (flow->tdls_entry == NULL)
475*4882a593Smuzhiyun 			flow->tdls_active = false;
476*4882a593Smuzhiyun 	}
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 
brcmf_flowring_add_tdls_peer(struct brcmf_flowring * flow,int ifidx,u8 peer[ETH_ALEN])480*4882a593Smuzhiyun void brcmf_flowring_add_tdls_peer(struct brcmf_flowring *flow, int ifidx,
481*4882a593Smuzhiyun 				  u8 peer[ETH_ALEN])
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun 	struct brcmf_flowring_tdls_entry *tdls_entry;
484*4882a593Smuzhiyun 	struct brcmf_flowring_tdls_entry *search;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	tdls_entry = kzalloc(sizeof(*tdls_entry), GFP_ATOMIC);
487*4882a593Smuzhiyun 	if (tdls_entry == NULL)
488*4882a593Smuzhiyun 		return;
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	memcpy(tdls_entry->mac, peer, ETH_ALEN);
491*4882a593Smuzhiyun 	tdls_entry->next = NULL;
492*4882a593Smuzhiyun 	if (flow->tdls_entry == NULL) {
493*4882a593Smuzhiyun 		flow->tdls_entry = tdls_entry;
494*4882a593Smuzhiyun 	} else {
495*4882a593Smuzhiyun 		search = flow->tdls_entry;
496*4882a593Smuzhiyun 		if (memcmp(search->mac, peer, ETH_ALEN) == 0)
497*4882a593Smuzhiyun 			goto free_entry;
498*4882a593Smuzhiyun 		while (search->next) {
499*4882a593Smuzhiyun 			search = search->next;
500*4882a593Smuzhiyun 			if (memcmp(search->mac, peer, ETH_ALEN) == 0)
501*4882a593Smuzhiyun 				goto free_entry;
502*4882a593Smuzhiyun 		}
503*4882a593Smuzhiyun 		search->next = tdls_entry;
504*4882a593Smuzhiyun 	}
505*4882a593Smuzhiyun 
506*4882a593Smuzhiyun 	flow->tdls_active = true;
507*4882a593Smuzhiyun 	return;
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun free_entry:
510*4882a593Smuzhiyun 	kfree(tdls_entry);
511*4882a593Smuzhiyun }
512