xref: /OK3568_Linux_fs/external/rkwifibt/drivers/bcmdhd/dhd_flowring.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * @file Broadcom Dongle Host Driver (DHD), Flow ring specific code at top level
3  *
4  * Flow rings are transmit traffic (=propagating towards antenna) related entities
5  *
6  *
7  * Copyright (C) 2020, Broadcom.
8  *
9  *      Unless you and Broadcom execute a separate written software license
10  * agreement governing use of this software, this software is licensed to you
11  * under the terms of the GNU General Public License version 2 (the "GPL"),
12  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
13  * following added to such license:
14  *
15  *      As a special exception, the copyright holders of this software give you
16  * permission to link this software with independent modules, and to copy and
17  * distribute the resulting executable under terms of your choice, provided that
18  * you also meet, for each linked independent module, the terms and conditions of
19  * the license of that module.  An independent module is a module which is not
20  * derived from this software.  The special exception does not apply to any
21  * modifications of the software.
22  *
23  *
24  * <<Broadcom-WL-IPTag/Open:>>
25  *
26  * $Id$
27  */
28 
29 /** XXX Twiki: [PCIeFullDongleArchitecture] */
30 
31 #include <typedefs.h>
32 #include <bcmutils.h>
33 #include <bcmendian.h>
34 #include <bcmdevs.h>
35 
36 #include <ethernet.h>
37 #include <bcmevent.h>
38 #include <dngl_stats.h>
39 
40 #include <dhd.h>
41 
42 #include <dhd_flowring.h>
43 #include <dhd_bus.h>
44 #include <dhd_proto.h>
45 #include <dhd_dbg.h>
46 #include <802.1d.h>
47 #include <pcie_core.h>
48 #include <bcmmsgbuf.h>
49 #include <dhd_pcie.h>
50 #include <dhd_config.h>
51 
52 static INLINE int dhd_flow_queue_throttle(flow_queue_t *queue);
53 
54 static INLINE uint16 dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex,
55                                      uint8 prio, char *sa, char *da);
56 
57 static INLINE uint16 dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex,
58                                       uint8 prio, char *sa, char *da);
59 
60 static INLINE int dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex,
61                                 uint8 prio, char *sa, char *da, uint16 *flowid);
62 int dhd_flow_queue_overflow(flow_queue_t *queue, void *pkt);
63 
64 #define FLOW_QUEUE_PKT_NEXT(p)          PKTLINK(p)
65 #define FLOW_QUEUE_PKT_SETNEXT(p, x)    PKTSETLINK((p), (x))
66 
67 #ifdef DHD_REPLACE_LOG_INFO_TO_TRACE
68 #define DHD_FLOWRING_INFO DHD_TRACE
69 #else
70 #define DHD_FLOWRING_INFO DHD_INFO
71 #endif /* DHD_REPLACE_LOG_INFO_TO_TRACE */
72 
73 const uint8 prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 3 };
74 const uint8 prio2tid[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
75 
76 /** Queue overflow throttle. Return value: TRUE if throttle needs to be applied */
77 static INLINE int
dhd_flow_queue_throttle(flow_queue_t * queue)78 dhd_flow_queue_throttle(flow_queue_t *queue)
79 {
80 #if defined(BCM_ROUTER_DHD)
81 	/* Two tests
82 	 * 1) Test whether overall level 2 (grandparent) cummulative threshold crossed.
83 	 * 2) Or test whether queue's budget and overall cummulative threshold crossed.
84 	 */
85 	void *gp_clen_ptr = DHD_FLOW_QUEUE_L2CLEN_PTR(queue);
86 	void *parent_clen_ptr = DHD_FLOW_QUEUE_CLEN_PTR(queue);
87 	int gp_cumm_threshold = DHD_FLOW_QUEUE_L2THRESHOLD(queue);
88 	int cumm_threshold = DHD_FLOW_QUEUE_THRESHOLD(queue);
89 
90 	int ret = ((DHD_CUMM_CTR_READ(gp_clen_ptr) > gp_cumm_threshold) ||
91 		((DHD_FLOW_QUEUE_OVFL(queue, DHD_FLOW_QUEUE_MAX(queue))) &&
92 		(DHD_CUMM_CTR_READ(parent_clen_ptr) > cumm_threshold)));
93 	return ret;
94 #else
95 	return DHD_FLOW_QUEUE_FULL(queue);
96 #endif /* ! BCM_ROUTER_DHD */
97 }
98 
99 int
BCMFASTPATH(dhd_flow_queue_overflow)100 BCMFASTPATH(dhd_flow_queue_overflow)(flow_queue_t *queue, void *pkt)
101 {
102 	return BCME_NORESOURCE;
103 }
104 
105 /** Returns flow ring given a flowid */
106 flow_ring_node_t *
dhd_flow_ring_node(dhd_pub_t * dhdp,uint16 flowid)107 dhd_flow_ring_node(dhd_pub_t *dhdp, uint16 flowid)
108 {
109 	flow_ring_node_t * flow_ring_node;
110 
111 	ASSERT(dhdp != (dhd_pub_t*)NULL);
112 	ASSERT(flowid <= dhdp->max_tx_flowid);
113 	if (flowid > dhdp->max_tx_flowid) {
114 		return NULL;
115 	}
116 
117 	flow_ring_node = &(((flow_ring_node_t*)(dhdp->flow_ring_table))[flowid]);
118 
119 	ASSERT(flow_ring_node->flowid == flowid);
120 	return flow_ring_node;
121 }
122 
123 /** Returns 'backup' queue given a flowid */
124 flow_queue_t *
dhd_flow_queue(dhd_pub_t * dhdp,uint16 flowid)125 dhd_flow_queue(dhd_pub_t *dhdp, uint16 flowid)
126 {
127 	flow_ring_node_t * flow_ring_node = NULL;
128 
129 	flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
130 	if (flow_ring_node)
131 		return &flow_ring_node->queue;
132 	else
133 		return NULL;
134 }
135 
136 /* Flow ring's queue management functions */
137 
138 /** Reinitialize a flow ring's queue. */
139 void
dhd_flow_queue_reinit(dhd_pub_t * dhdp,flow_queue_t * queue,int max)140 dhd_flow_queue_reinit(dhd_pub_t *dhdp, flow_queue_t *queue, int max)
141 {
142 	ASSERT((queue != NULL) && (max > 0));
143 
144 	queue->head = queue->tail = NULL;
145 	queue->len = 0;
146 
147 	/* Set queue's threshold and queue's parent cummulative length counter */
148 	ASSERT(max > 1);
149 	DHD_FLOW_QUEUE_SET_MAX(queue, max);
150 	DHD_FLOW_QUEUE_SET_THRESHOLD(queue, max);
151 	DHD_FLOW_QUEUE_SET_CLEN(queue, &dhdp->cumm_ctr);
152 	DHD_FLOW_QUEUE_SET_L2CLEN(queue, &dhdp->l2cumm_ctr);
153 
154 	queue->failures = 0U;
155 	queue->cb = &dhd_flow_queue_overflow;
156 }
157 
158 /** Initialize a flow ring's queue, called on driver initialization. */
159 void
dhd_flow_queue_init(dhd_pub_t * dhdp,flow_queue_t * queue,int max)160 dhd_flow_queue_init(dhd_pub_t *dhdp, flow_queue_t *queue, int max)
161 {
162 	ASSERT((queue != NULL) && (max > 0));
163 
164 	dll_init(&queue->list);
165 	dhd_flow_queue_reinit(dhdp, queue, max);
166 }
167 
168 /** Register an enqueue overflow callback handler */
169 void
dhd_flow_queue_register(flow_queue_t * queue,flow_queue_cb_t cb)170 dhd_flow_queue_register(flow_queue_t *queue, flow_queue_cb_t cb)
171 {
172 	ASSERT(queue != NULL);
173 	queue->cb = cb;
174 }
175 
176 /**
177  * Enqueue an 802.3 packet at the back of a flow ring's queue. From there, it will travel later on
178  * to the flow ring itself.
179  */
180 int
BCMFASTPATH(dhd_flow_queue_enqueue)181 BCMFASTPATH(dhd_flow_queue_enqueue)(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt)
182 {
183 	int ret = BCME_OK;
184 
185 	ASSERT(queue != NULL);
186 
187 	if (dhd_flow_queue_throttle(queue)) {
188 		queue->failures++;
189 		ret = (*queue->cb)(queue, pkt);
190 		goto done;
191 	}
192 
193 	if (queue->head) {
194 		FLOW_QUEUE_PKT_SETNEXT(queue->tail, pkt);
195 	} else {
196 		queue->head = pkt;
197 	}
198 
199 	FLOW_QUEUE_PKT_SETNEXT(pkt, NULL);
200 
201 	queue->tail = pkt; /* at tail */
202 
203 	queue->len++;
204 	/* increment parent's cummulative length */
205 	DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
206 	/* increment grandparent's cummulative length */
207 	DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue));
208 
209 done:
210 	return ret;
211 }
212 
BCMFASTPATH(dhd_flow_queue_enqueue_head)213 int BCMFASTPATH
214 (dhd_flow_queue_enqueue_head)(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt)
215 {
216 	int ret = BCME_OK;
217 
218 	ASSERT(queue != NULL);
219 
220 	if (dhd_flow_queue_throttle(queue)) {
221 		queue->failures++;
222 		ret = (*queue->cb)(queue, pkt);
223 		goto done;
224 	}
225 
226 	if (queue->head) {
227 		FLOW_QUEUE_PKT_SETNEXT(pkt, queue->head);
228 		queue->head = pkt;
229 
230 	} else {
231 		queue->head = pkt;
232 		FLOW_QUEUE_PKT_SETNEXT(pkt, NULL);
233 		queue->tail = pkt; /* at tail */
234 	}
235 
236 	queue->len++;
237 	/* increment parent's cummulative length */
238 	DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
239 	/* increment grandparent's cummulative length */
240 	DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue));
241 
242 done:
243 	return ret;
244 }
245 
246 /** Dequeue an 802.3 packet from a flow ring's queue, from head (FIFO) */
247 void *
BCMFASTPATH(dhd_flow_queue_dequeue)248 BCMFASTPATH(dhd_flow_queue_dequeue)(dhd_pub_t *dhdp, flow_queue_t *queue)
249 {
250 	void * pkt;
251 
252 	ASSERT(queue != NULL);
253 
254 	pkt = queue->head; /* from head */
255 
256 	if (pkt == NULL) {
257 		ASSERT((queue->len == 0) && (queue->tail == NULL));
258 		goto done;
259 	}
260 
261 	queue->head = FLOW_QUEUE_PKT_NEXT(pkt);
262 	if (queue->head == NULL)
263 		queue->tail = NULL;
264 
265 	queue->len--;
266 	/* decrement parent's cummulative length */
267 	DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
268 	/* decrement grandparent's cummulative length */
269 	DHD_CUMM_CTR_DECR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue));
270 
271 	FLOW_QUEUE_PKT_SETNEXT(pkt, NULL); /* dettach packet from queue */
272 
273 done:
274 	return pkt;
275 }
276 
277 /** Reinsert a dequeued 802.3 packet back at the head */
278 void
BCMFASTPATH(dhd_flow_queue_reinsert)279 BCMFASTPATH(dhd_flow_queue_reinsert)(dhd_pub_t *dhdp, flow_queue_t *queue, void *pkt)
280 {
281 	if (queue->head == NULL) {
282 		queue->tail = pkt;
283 	}
284 
285 	FLOW_QUEUE_PKT_SETNEXT(pkt, queue->head);
286 	queue->head = pkt;
287 	queue->len++;
288 	/* increment parent's cummulative length */
289 	DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_CLEN_PTR(queue));
290 	/* increment grandparent's cummulative length */
291 	DHD_CUMM_CTR_INCR(DHD_FLOW_QUEUE_L2CLEN_PTR(queue));
292 }
293 
294 /** Fetch the backup queue for a flowring, and assign flow control thresholds */
295 void
dhd_flow_ring_config_thresholds(dhd_pub_t * dhdp,uint16 flowid,int queue_budget,int cumm_threshold,void * cumm_ctr,int l2cumm_threshold,void * l2cumm_ctr)296 dhd_flow_ring_config_thresholds(dhd_pub_t *dhdp, uint16 flowid,
297                      int queue_budget, int cumm_threshold, void *cumm_ctr,
298                      int l2cumm_threshold, void *l2cumm_ctr)
299 {
300 	flow_queue_t * queue = NULL;
301 
302 	ASSERT(dhdp != (dhd_pub_t*)NULL);
303 	ASSERT(queue_budget > 1);
304 	ASSERT(cumm_threshold > 1);
305 	ASSERT(cumm_ctr != (void*)NULL);
306 	ASSERT(l2cumm_threshold > 1);
307 	ASSERT(l2cumm_ctr != (void*)NULL);
308 
309 	queue = dhd_flow_queue(dhdp, flowid);
310 	if (queue) {
311 		DHD_FLOW_QUEUE_SET_MAX(queue, queue_budget); /* Max queue length */
312 
313 		/* Set the queue's parent threshold and cummulative counter */
314 		DHD_FLOW_QUEUE_SET_THRESHOLD(queue, cumm_threshold);
315 		DHD_FLOW_QUEUE_SET_CLEN(queue, cumm_ctr);
316 
317 		/* Set the queue's grandparent threshold and cummulative counter */
318 		DHD_FLOW_QUEUE_SET_L2THRESHOLD(queue, l2cumm_threshold);
319 		DHD_FLOW_QUEUE_SET_L2CLEN(queue, l2cumm_ctr);
320 	}
321 }
322 
323 /*
324  * This function returns total number of flowrings that can be created for a INFRA STA.
325  * For prio2ac mapping, it will return 4, prio2ac[8] = { 0, 1, 1, 0, 2, 2, 3, 3 }
326  * For prio2tid mapping, it will return 8, prio2tid[8] = { 0, 1, 2, 3, 4, 5, 6, 7 }
327  */
328 uint8
dhd_num_prio_supported_per_flow_ring(dhd_pub_t * dhdp)329 dhd_num_prio_supported_per_flow_ring(dhd_pub_t *dhdp)
330 {
331 	uint8 prio_count = 0;
332 	int i;
333 	/* Pick all elements one by one */
334 	for (i = 0; i < NUMPRIO; i++)
335 	{
336 		/* Check if the picked element is already counted */
337 		int j;
338 		for (j = 0; j < i; j++) {
339 			if (dhdp->flow_prio_map[i] == dhdp->flow_prio_map[j]) {
340 				break;
341 			}
342 		}
343 		/* If not counted earlier, then count it */
344 		if (i == j) {
345 			prio_count++;
346 		}
347 	}
348 
349 	return prio_count;
350 }
351 
352 uint8
dhd_get_max_multi_client_flow_rings(dhd_pub_t * dhdp)353 dhd_get_max_multi_client_flow_rings(dhd_pub_t *dhdp)
354 {
355 	uint8 reserved_infra_sta_flow_rings = dhd_num_prio_supported_per_flow_ring(dhdp);
356 	uint8 total_tx_flow_rings = (uint8)dhd_get_max_flow_rings(dhdp);
357 	uint8 max_multi_client_flow_rings = total_tx_flow_rings - reserved_infra_sta_flow_rings;
358 	return max_multi_client_flow_rings;
359 }
360 
361 int
dhd_flowid_map_init(dhd_pub_t * dhdp,uint16 max_tx_flow_rings)362 dhd_flowid_map_init(dhd_pub_t *dhdp, uint16 max_tx_flow_rings)
363 {
364 #if defined(DHD_HTPUT_TUNABLES)
365 	uint16 max_normal_tx_flow_rings = max_tx_flow_rings - HTPUT_TOTAL_FLOW_RINGS;
366 #else
367 	uint16 max_normal_tx_flow_rings = max_tx_flow_rings;
368 #endif /* DHD_HTPUT_TUNABLES */
369 
370 	/* Construct a normal flowid allocator from FLOWID_RESERVED to
371 	 * (max_normal_tx_flow_rings - 1)
372 	 */
373 	dhdp->flowid_allocator = id16_map_init(dhdp->osh, max_normal_tx_flow_rings,
374 		FLOWID_RESERVED);
375 	if (dhdp->flowid_allocator == NULL) {
376 		DHD_ERROR(("%s: flowid allocator init failure\n", __FUNCTION__));
377 		return BCME_NOMEM;
378 	}
379 
380 #if defined(DHD_HTPUT_TUNABLES)
381 	if (HTPUT_TOTAL_FLOW_RINGS > 0) {
382 		dhdp->htput_flow_ring_start = max_normal_tx_flow_rings + FLOWID_RESERVED;
383 		/* Construct a htput flowid allocator from htput_flow_ring_start to
384 		 * (htput_flow_ring_start + HTPUT_TOTAL_FLOW_RINGS - 1)
385 		 */
386 		dhdp->htput_flowid_allocator = id16_map_init(dhdp->osh, HTPUT_TOTAL_FLOW_RINGS,
387 			dhdp->htput_flow_ring_start);
388 		if (dhdp->htput_flowid_allocator == NULL) {
389 			DHD_ERROR(("%s: htput flowid allocator init failure\n", __FUNCTION__));
390 			return BCME_NOMEM;
391 		}
392 		dhdp->htput_client_flow_rings = 0u;
393 	}
394 #endif /* !DHD_HTPUT_TUNABLES */
395 
396 	return BCME_OK;
397 }
398 
399 void
dhd_flowid_map_deinit(dhd_pub_t * dhdp)400 dhd_flowid_map_deinit(dhd_pub_t *dhdp)
401 {
402 	if (dhdp->flowid_allocator) {
403 		dhdp->flowid_allocator = id16_map_fini(dhdp->osh, dhdp->flowid_allocator);
404 	}
405 	ASSERT(dhdp->flowid_allocator == NULL);
406 
407 #if defined(DHD_HTPUT_TUNABLES)
408 	if (dhdp->htput_flowid_allocator) {
409 		dhdp->htput_flowid_allocator = id16_map_fini(dhdp->osh,
410 			dhdp->htput_flowid_allocator);
411 		ASSERT(dhdp->htput_flowid_allocator == NULL);
412 	}
413 	dhdp->htput_client_flow_rings = 0u;
414 #endif /* !DHD_HTPUT_TUNABLES */
415 	return;
416 }
417 
418 /** Initializes data structures of multiple flow rings
419  * num_h2d_rings - max_h2d_rings including static and dynamic rings
420  */
421 int
dhd_flow_rings_init(dhd_pub_t * dhdp,uint32 num_h2d_rings)422 dhd_flow_rings_init(dhd_pub_t *dhdp, uint32 num_h2d_rings)
423 {
424 	uint32 idx;
425 	uint32 flow_ring_table_sz = 0;
426 	uint32 if_flow_lkup_sz = 0;
427 	flow_ring_table_t *flow_ring_table = NULL;
428 	if_flow_lkup_t *if_flow_lkup = NULL;
429 	void *lock = NULL;
430 	void *list_lock = NULL;
431 	unsigned long flags;
432 	uint16 max_tx_flow_rings;
433 
434 	DHD_INFO(("%s\n", __FUNCTION__));
435 
436 	/*
437 	 * Only 16-bit flowid map will be allocated for actual number of Tx flowrings
438 	 * excluding common rings.
439 	 * Rest all flowring data structure will be allocated for all num_h2d_rings.
440 	 */
441 	max_tx_flow_rings = dhd_get_max_flow_rings(dhdp);
442 	if (dhd_flowid_map_init(dhdp, max_tx_flow_rings) != BCME_OK) {
443 		DHD_ERROR(("%s: dhd_flowid_map_init failure\n", __FUNCTION__));
444 		goto fail;
445 	}
446 
447 	/* Any Tx flow id should not be > max_tx_flowid */
448 	dhdp->max_tx_flowid = max_tx_flow_rings + FLOWID_RESERVED - 1;
449 
450 	/* Allocate a flow ring table, comprising of requested number of rings */
451 	flow_ring_table_sz = (num_h2d_rings * sizeof(flow_ring_node_t));
452 	flow_ring_table = (flow_ring_table_t *)MALLOCZ(dhdp->osh, flow_ring_table_sz);
453 	if (flow_ring_table == NULL) {
454 		DHD_ERROR(("%s: flow ring table alloc failure\n", __FUNCTION__));
455 		goto fail;
456 	}
457 
458 	/* Initialize flow ring table state */
459 	DHD_CUMM_CTR_INIT(&dhdp->cumm_ctr);
460 	DHD_CUMM_CTR_INIT(&dhdp->l2cumm_ctr);
461 	bzero((uchar *)flow_ring_table, flow_ring_table_sz);
462 	for (idx = 0; idx < num_h2d_rings; idx++) {
463 		flow_ring_table[idx].status = FLOW_RING_STATUS_CLOSED;
464 		flow_ring_table[idx].flowid = (uint16)idx;
465 		flow_ring_table[idx].lock = osl_spin_lock_init(dhdp->osh);
466 #ifdef IDLE_TX_FLOW_MGMT
467 		flow_ring_table[idx].last_active_ts = OSL_SYSUPTIME();
468 #endif /* IDLE_TX_FLOW_MGMT */
469 		if (flow_ring_table[idx].lock == NULL) {
470 			DHD_ERROR(("%s: Failed to init spinlock for queue!\n", __FUNCTION__));
471 			goto fail;
472 		}
473 
474 		dll_init(&flow_ring_table[idx].list);
475 
476 		/* Initialize the per flow ring backup queue */
477 		dhd_flow_queue_init(dhdp, &flow_ring_table[idx].queue,
478 		                    dhdp->conf->flow_ring_queue_threshold);
479 	}
480 
481 	/* Allocate per interface hash table (for fast lookup from interface to flow ring) */
482 	if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS;
483 	if_flow_lkup = (if_flow_lkup_t *)DHD_OS_PREALLOC(dhdp,
484 		DHD_PREALLOC_IF_FLOW_LKUP, if_flow_lkup_sz);
485 	if (if_flow_lkup == NULL) {
486 		DHD_ERROR(("%s: if flow lkup alloc failure\n", __FUNCTION__));
487 		goto fail;
488 	}
489 
490 	/* Initialize per interface hash table */
491 	for (idx = 0; idx < DHD_MAX_IFS; idx++) {
492 		int hash_ix;
493 		if_flow_lkup[idx].status = 0;
494 		if_flow_lkup[idx].role = 0;
495 		for (hash_ix = 0; hash_ix < DHD_FLOWRING_HASH_SIZE; hash_ix++)
496 			if_flow_lkup[idx].fl_hash[hash_ix] = NULL;
497 	}
498 
499 	lock = osl_spin_lock_init(dhdp->osh);
500 	if (lock == NULL)
501 		goto fail;
502 
503 	list_lock = osl_spin_lock_init(dhdp->osh);
504 	if (list_lock == NULL)
505 		goto lock_fail;
506 
507 	dhdp->flow_prio_map_type = DHD_FLOW_PRIO_AC_MAP;
508 	bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
509 
510 	dhdp->max_multi_client_flow_rings = dhd_get_max_multi_client_flow_rings(dhdp);
511 	dhdp->multi_client_flow_rings = 0U;
512 
513 #ifdef DHD_LOSSLESS_ROAMING
514 	dhdp->dequeue_prec_map = ALLPRIO;
515 #endif
516 	/* Now populate into dhd pub */
517 	DHD_FLOWID_LOCK(lock, flags);
518 	dhdp->num_h2d_rings = num_h2d_rings;
519 	dhdp->flow_ring_table = (void *)flow_ring_table;
520 	dhdp->if_flow_lkup = (void *)if_flow_lkup;
521 	dhdp->flowid_lock = lock;
522 	dhdp->flow_rings_inited = TRUE;
523 	dhdp->flowring_list_lock = list_lock;
524 	DHD_FLOWID_UNLOCK(lock, flags);
525 
526 	DHD_INFO(("%s done\n", __FUNCTION__));
527 	return BCME_OK;
528 
529 lock_fail:
530 	/* deinit the spinlock */
531 	osl_spin_lock_deinit(dhdp->osh, lock);
532 
533 fail:
534 	/* Destruct the per interface flow lkup table */
535 	if (if_flow_lkup != NULL) {
536 		DHD_OS_PREFREE(dhdp, if_flow_lkup, if_flow_lkup_sz);
537 	}
538 	if (flow_ring_table != NULL) {
539 		for (idx = 0; idx < num_h2d_rings; idx++) {
540 			if (flow_ring_table[idx].lock != NULL)
541 				osl_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock);
542 		}
543 		MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz);
544 	}
545 	dhd_flowid_map_deinit(dhdp);
546 
547 	return BCME_NOMEM;
548 }
549 
550 /** Deinit Flow Ring specific data structures */
dhd_flow_rings_deinit(dhd_pub_t * dhdp)551 void dhd_flow_rings_deinit(dhd_pub_t *dhdp)
552 {
553 	uint16 idx;
554 	uint32 flow_ring_table_sz;
555 	uint32 if_flow_lkup_sz;
556 	flow_ring_table_t *flow_ring_table;
557 	unsigned long flags;
558 	void *lock;
559 
560 	DHD_INFO(("dhd_flow_rings_deinit\n"));
561 
562 	if (!(dhdp->flow_rings_inited)) {
563 		DHD_ERROR(("dhd_flow_rings not initialized!\n"));
564 		return;
565 	}
566 
567 	if (dhdp->flow_ring_table != NULL) {
568 
569 		ASSERT(dhdp->num_h2d_rings > 0);
570 
571 		DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
572 		flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
573 		dhdp->flow_ring_table = NULL;
574 		DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
575 		for (idx = 0; idx < dhdp->num_h2d_rings; idx++) {
576 			if (flow_ring_table[idx].active) {
577 				dhd_bus_clean_flow_ring(dhdp->bus, &flow_ring_table[idx]);
578 			}
579 			ASSERT(DHD_FLOW_QUEUE_EMPTY(&flow_ring_table[idx].queue));
580 
581 			/* Deinit flow ring queue locks before destroying flow ring table */
582 			if (flow_ring_table[idx].lock != NULL) {
583 				osl_spin_lock_deinit(dhdp->osh, flow_ring_table[idx].lock);
584 			}
585 			flow_ring_table[idx].lock = NULL;
586 
587 		}
588 
589 		/* Destruct the flow ring table */
590 		flow_ring_table_sz = dhdp->num_h2d_rings * sizeof(flow_ring_table_t);
591 		MFREE(dhdp->osh, flow_ring_table, flow_ring_table_sz);
592 	}
593 
594 	DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
595 
596 	/* Destruct the per interface flow lkup table */
597 	if (dhdp->if_flow_lkup != NULL) {
598 		if_flow_lkup_sz = sizeof(if_flow_lkup_t) * DHD_MAX_IFS;
599 		bzero((uchar *)dhdp->if_flow_lkup, if_flow_lkup_sz);
600 		DHD_OS_PREFREE(dhdp, dhdp->if_flow_lkup, if_flow_lkup_sz);
601 		dhdp->if_flow_lkup = NULL;
602 	}
603 
604 	/* Destruct the flowid allocator */
605 	dhd_flowid_map_deinit(dhdp);
606 
607 	dhdp->num_h2d_rings = 0U;
608 	bzero(dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
609 
610 	dhdp->max_multi_client_flow_rings = 0U;
611 	dhdp->multi_client_flow_rings = 0U;
612 
613 	lock = dhdp->flowid_lock;
614 	dhdp->flowid_lock = NULL;
615 
616 	if (lock) {
617 		DHD_FLOWID_UNLOCK(lock, flags);
618 		osl_spin_lock_deinit(dhdp->osh, lock);
619 	}
620 
621 	osl_spin_lock_deinit(dhdp->osh, dhdp->flowring_list_lock);
622 	dhdp->flowring_list_lock = NULL;
623 
624 	ASSERT(dhdp->if_flow_lkup == NULL);
625 	ASSERT(dhdp->flow_ring_table == NULL);
626 	dhdp->flow_rings_inited = FALSE;
627 }
628 
629 /** Uses hash table to quickly map from ifindex to a flow ring 'role' (STA/AP) */
630 uint8
dhd_flow_rings_ifindex2role(dhd_pub_t * dhdp,uint8 ifindex)631 dhd_flow_rings_ifindex2role(dhd_pub_t *dhdp, uint8 ifindex)
632 {
633 	if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
634 	ASSERT(if_flow_lkup);
635 	return if_flow_lkup[ifindex].role;
636 }
637 
638 #ifdef WLTDLS
is_tdls_destination(dhd_pub_t * dhdp,uint8 * da)639 bool is_tdls_destination(dhd_pub_t *dhdp, uint8 *da)
640 {
641 	unsigned long flags;
642 	tdls_peer_node_t *cur = NULL;
643 
644 	DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
645 	/* Check only if tdls peer is added */
646 	if (dhdp->peer_tbl.tdls_peer_count && !(ETHER_ISMULTI(da))) {
647 		cur = dhdp->peer_tbl.node;
648 
649 		while (cur != NULL) {
650 			if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
651 				DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
652 				return TRUE;
653 			}
654 			cur = cur->next;
655 		}
656 	}
657 	DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
658 	return FALSE;
659 }
660 #endif /* WLTDLS */
661 
662 /** Uses hash table to quickly map from ifindex+prio+da to a flow ring id */
663 static INLINE uint16
dhd_flowid_find(dhd_pub_t * dhdp,uint8 ifindex,uint8 prio,char * sa,char * da)664 dhd_flowid_find(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da)
665 {
666 	int hash;
667 	bool ismcast = FALSE;
668 	flow_hash_info_t *cur;
669 	if_flow_lkup_t *if_flow_lkup;
670 	unsigned long flags;
671 
672 	ASSERT(ifindex < DHD_MAX_IFS);
673 	if (ifindex >= DHD_MAX_IFS)
674 		return FLOWID_INVALID;
675 
676 	DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
677 	if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
678 
679 	ASSERT(if_flow_lkup);
680 
681 	if (DHD_IF_ROLE_GENERIC_STA(dhdp, ifindex)) {
682 #ifdef WLTDLS
683 		if (is_tdls_destination(dhdp, da)) {
684 			hash = DHD_FLOWRING_HASHINDEX(da, prio);
685 			cur = if_flow_lkup[ifindex].fl_hash[hash];
686 			while (cur != NULL) {
687 				if (!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN)) {
688 					DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
689 					return cur->flowid;
690 				}
691 				cur = cur->next;
692 			}
693 			DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
694 			return FLOWID_INVALID;
695 		}
696 #endif /* WLTDLS */
697 		/* For STA non TDLS dest and WDS dest flow ring id is mapped based on prio only */
698 		cur = if_flow_lkup[ifindex].fl_hash[prio];
699 		if (cur) {
700 			DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
701 			return cur->flowid;
702 		}
703 	} else {
704 
705 		if (ETHER_ISMULTI(da)) {
706 			ismcast = TRUE;
707 			hash = 0;
708 		} else {
709 			hash = DHD_FLOWRING_HASHINDEX(da, prio);
710 		}
711 
712 		cur = if_flow_lkup[ifindex].fl_hash[hash];
713 
714 		while (cur) {
715 			if ((ismcast && ETHER_ISMULTI(cur->flow_info.da)) ||
716 				(!memcmp(cur->flow_info.da, da, ETHER_ADDR_LEN) &&
717 				(cur->flow_info.tid == prio))) {
718 				DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
719 				return cur->flowid;
720 			}
721 			cur = cur->next;
722 		}
723 	}
724 	DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
725 
726 #ifdef DHD_EFI
727 	DHD_TRACE(("%s: cannot find flowid\n", __FUNCTION__));
728 #else
729 	DHD_FLOWRING_INFO(("%s: cannot find flowid\n", __FUNCTION__));
730 #endif
731 	return FLOWID_INVALID;
732 } /* dhd_flowid_find */
733 
734 static uint16
dhd_flowid_map_alloc(dhd_pub_t * dhdp,uint8 ifindex,uint8 prio,char * da)735 dhd_flowid_map_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *da)
736 {
737 	uint16 flowid = FLOWID_INVALID;
738 	ASSERT(dhdp->flowid_allocator != NULL);
739 
740 #if defined(DHD_HTPUT_TUNABLES)
741 	if (dhdp->htput_flowid_allocator) {
742 		if (prio == HTPUT_FLOW_RING_PRIO) {
743 			if (DHD_IF_ROLE_GENERIC_STA(dhdp, ifindex)) {
744 				/* For STA case, only one flowring per PRIO is created,
745 				 * so no need to have a HTPUT counter variable for STA case.
746 				 * If already HTPUT flowring is allocated for given HTPUT_PRIO,
747 				 * then this function will not even get called as dhd_flowid_find
748 				 * will take care assigning same for those HTPUT_PRIO packets.
749 				 */
750 				flowid = id16_map_alloc(dhdp->htput_flowid_allocator);
751 			} else if (DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex) && !ETHER_ISMULTI(da)) {
752 				/* Use HTPUT flowrings for only HTPUT_NUM_CLIENT_FLOW_RINGS */
753 				if (dhdp->htput_client_flow_rings < HTPUT_NUM_CLIENT_FLOW_RINGS) {
754 					flowid = id16_map_alloc(dhdp->htput_flowid_allocator);
755 					/* increment htput client counter */
756 					if (flowid != FLOWID_INVALID) {
757 						dhdp->htput_client_flow_rings++;
758 					}
759 				}
760 			}
761 		}
762 	}
763 #endif /* !DHD_HTPUT_TUNABLES */
764 
765 	BCM_REFERENCE(flowid);
766 
767 	/*
768 	 * For HTPUT case, if the high throughput flowrings are already allocated
769 	 * for the given role, the control comes here.
770 	 */
771 	if (flowid == FLOWID_INVALID) {
772 		flowid = id16_map_alloc(dhdp->flowid_allocator);
773 	}
774 
775 	return flowid;
776 }
777 
778 /** Create unique Flow ID, called when a flow ring is created. */
779 static INLINE uint16
dhd_flowid_alloc(dhd_pub_t * dhdp,uint8 ifindex,uint8 prio,char * sa,char * da)780 dhd_flowid_alloc(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, char *sa, char *da)
781 {
782 	flow_hash_info_t *fl_hash_node, *cur;
783 	if_flow_lkup_t *if_flow_lkup;
784 	int hash;
785 	uint16 flowid;
786 	unsigned long flags;
787 
788 	fl_hash_node = (flow_hash_info_t *) MALLOCZ(dhdp->osh, sizeof(flow_hash_info_t));
789 	if (fl_hash_node == NULL) {
790 		DHD_ERROR(("%s: flow_hash_info_t memory allocation failed \n", __FUNCTION__));
791 		return FLOWID_INVALID;
792 	}
793 	memcpy(fl_hash_node->flow_info.da, da, sizeof(fl_hash_node->flow_info.da));
794 
795 	DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
796 	flowid = dhd_flowid_map_alloc(dhdp, ifindex, prio, da);
797 	DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
798 
799 	if (flowid == FLOWID_INVALID) {
800 		MFREE(dhdp->osh, fl_hash_node,  sizeof(flow_hash_info_t));
801 		DHD_ERROR_RLMT(("%s: cannot get free flowid \n", __FUNCTION__));
802 		return FLOWID_INVALID;
803 	}
804 
805 	fl_hash_node->flowid = flowid;
806 	fl_hash_node->flow_info.tid = prio;
807 	fl_hash_node->flow_info.ifindex = ifindex;
808 	fl_hash_node->next = NULL;
809 
810 	DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
811 	if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
812 
813 	if (DHD_IF_ROLE_GENERIC_STA(dhdp, ifindex)) {
814 		/* For STA/GC non TDLS dest and WDS dest we allocate entry based on prio only */
815 #ifdef WLTDLS
816 		if (is_tdls_destination(dhdp, da)) {
817 			hash = DHD_FLOWRING_HASHINDEX(da, prio);
818 			cur = if_flow_lkup[ifindex].fl_hash[hash];
819 			if (cur) {
820 				while (cur->next) {
821 					cur = cur->next;
822 				}
823 				cur->next = fl_hash_node;
824 			} else {
825 				if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node;
826 			}
827 		} else
828 #endif /* WLTDLS */
829 			if_flow_lkup[ifindex].fl_hash[prio] = fl_hash_node;
830 	} else {
831 
832 		/* For bcast/mcast assign first slot in in interface */
833 		hash = ETHER_ISMULTI(da) ? 0 : DHD_FLOWRING_HASHINDEX(da, prio);
834 		cur = if_flow_lkup[ifindex].fl_hash[hash];
835 		if (cur) {
836 			while (cur->next) {
837 				cur = cur->next;
838 			}
839 			cur->next = fl_hash_node;
840 		} else
841 			if_flow_lkup[ifindex].fl_hash[hash] = fl_hash_node;
842 	}
843 	DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
844 
845 	DHD_FLOWRING_INFO(("%s: allocated flowid %d\n", __FUNCTION__, fl_hash_node->flowid));
846 
847 	if (fl_hash_node->flowid > dhdp->max_tx_flowid) {
848 		DHD_ERROR(("%s: flowid=%d max_tx_flowid=%d ifindex=%d prio=%d role=%d\n",
849 			__FUNCTION__, fl_hash_node->flowid, dhdp->max_tx_flowid,
850 			ifindex, prio, if_flow_lkup[ifindex].role));
851 		dhd_prhex("da", (uchar *)da, ETHER_ADDR_LEN, DHD_ERROR_VAL);
852 		dhd_prhex("sa", (uchar *)sa, ETHER_ADDR_LEN, DHD_ERROR_VAL);
853 		return FLOWID_INVALID;
854 	}
855 
856 	return fl_hash_node->flowid;
857 } /* dhd_flowid_alloc */
858 
859 /** Get flow ring ID, if not present try to create one */
860 static INLINE int
dhd_flowid_lookup(dhd_pub_t * dhdp,uint8 ifindex,uint8 prio,char * sa,char * da,uint16 * flowid)861 dhd_flowid_lookup(dhd_pub_t *dhdp, uint8 ifindex,
862                   uint8 prio, char *sa, char *da, uint16 *flowid)
863 {
864 	uint16 id;
865 	flow_ring_node_t *flow_ring_node;
866 	flow_ring_table_t *flow_ring_table;
867 	unsigned long flags;
868 	int ret;
869 
870 	DHD_TRACE(("%s\n", __FUNCTION__));
871 
872 	if (!dhdp->flow_ring_table) {
873 		return BCME_ERROR;
874 	}
875 
876 	ASSERT(ifindex < DHD_MAX_IFS);
877 	if (ifindex >= DHD_MAX_IFS)
878 		return BCME_BADARG;
879 
880 	flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
881 
882 	id = dhd_flowid_find(dhdp, ifindex, prio, sa, da);
883 
884 	if (id == FLOWID_INVALID) {
885 		bool if_role_multi_client;
886 		if_flow_lkup_t *if_flow_lkup;
887 		if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
888 
889 		if (!if_flow_lkup[ifindex].status)
890 			return BCME_ERROR;
891 
892 		/* check role for multi client case */
893 		if_role_multi_client = DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex);
894 
895 		/* Abort Flowring creation if multi client flowrings crossed the threshold */
896 #ifdef DHD_LIMIT_MULTI_CLIENT_FLOWRINGS
897 		if (if_role_multi_client &&
898 			(dhdp->multi_client_flow_rings >= dhdp->max_multi_client_flow_rings)) {
899 			DHD_ERROR_RLMT(("%s: Max multi client flow rings reached: %d:%d\n",
900 				__FUNCTION__, dhdp->multi_client_flow_rings,
901 				dhdp->max_multi_client_flow_rings));
902 			return BCME_ERROR;
903 		}
904 #endif /* DHD_LIMIT_MULTI_CLIENT_FLOWRINGS */
905 
906 		/* Do not create Flowring if peer is not associated */
907 #if (defined(linux) || defined(LINUX)) && defined(PCIE_FULL_DONGLE)
908 		if (if_role_multi_client && !ETHER_ISMULTI(da) &&
909 			!dhd_sta_associated(dhdp, ifindex, (uint8 *)da)) {
910 			DHD_ERROR_RLMT(("%s: Skip send pkt without peer addition\n", __FUNCTION__));
911 			return BCME_ERROR;
912 		}
913 #endif /* (linux || LINUX) && PCIE_FULL_DONGLE */
914 
915 		id = dhd_flowid_alloc(dhdp, ifindex, prio, sa, da);
916 		if (id == FLOWID_INVALID) {
917 			DHD_ERROR_RLMT(("%s: alloc flowid ifindex %u status %u\n",
918 				__FUNCTION__, ifindex, if_flow_lkup[ifindex].status));
919 			return BCME_ERROR;
920 		}
921 
922 		ASSERT(id <= dhdp->max_tx_flowid);
923 
924 		/* Only after flowid alloc, increment multi_client_flow_rings */
925 		if (if_role_multi_client) {
926 			dhdp->multi_client_flow_rings++;
927 		}
928 
929 		/* register this flowid in dhd_pub */
930 		dhd_add_flowid(dhdp, ifindex, prio, da, id);
931 
932 		flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id];
933 
934 		DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
935 
936 		/* Init Flow info */
937 		memcpy(flow_ring_node->flow_info.sa, sa, sizeof(flow_ring_node->flow_info.sa));
938 		memcpy(flow_ring_node->flow_info.da, da, sizeof(flow_ring_node->flow_info.da));
939 		flow_ring_node->flow_info.tid = prio;
940 		flow_ring_node->flow_info.ifindex = ifindex;
941 		flow_ring_node->active = TRUE;
942 		flow_ring_node->status = FLOW_RING_STATUS_CREATE_PENDING;
943 
944 #ifdef DEVICE_TX_STUCK_DETECT
945 		flow_ring_node->tx_cmpl = flow_ring_node->tx_cmpl_prev = OSL_SYSUPTIME();
946 		flow_ring_node->stuck_count = 0;
947 #endif /* DEVICE_TX_STUCK_DETECT */
948 #ifdef TX_STATUS_LATENCY_STATS
949 		flow_ring_node->flow_info.num_tx_status = 0;
950 		flow_ring_node->flow_info.cum_tx_status_latency = 0;
951 		flow_ring_node->flow_info.num_tx_pkts = 0;
952 #endif /* TX_STATUS_LATENCY_STATS */
953 #ifdef BCMDBG
954 		bzero(&flow_ring_node->flow_info.tx_status[0],
955 			sizeof(uint32) * DHD_MAX_TX_STATUS_MSGS);
956 #endif
957 		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
958 
959 		/* Create and inform device about the new flow */
960 		if (dhd_bus_flow_ring_create_request(dhdp->bus, (void *)flow_ring_node)
961 				!= BCME_OK) {
962 			DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
963 			flow_ring_node->status = FLOW_RING_STATUS_CLOSED;
964 			flow_ring_node->active = FALSE;
965 			DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
966 			DHD_ERROR(("%s: create error %d\n", __FUNCTION__, id));
967 			return BCME_ERROR;
968 		}
969 
970 		*flowid = id;
971 		return BCME_OK;
972 	} else {
973 		/* if the Flow id was found in the hash */
974 
975 		if (id > dhdp->max_tx_flowid) {
976 			DHD_ERROR(("%s: Invalid flow id : %u, max_tx_flowid : %u\n",
977 				__FUNCTION__, id, dhdp->max_tx_flowid));
978 			*flowid = FLOWID_INVALID;
979 			ASSERT(0);
980 			return BCME_ERROR;
981 		}
982 
983 		flow_ring_node = (flow_ring_node_t *) &flow_ring_table[id];
984 		DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
985 
986 		/*
987 		 * If the flow_ring_node is in Open State or Status pending state then
988 		 * we can return the Flow id to the caller.If the flow_ring_node is in
989 		 * FLOW_RING_STATUS_PENDING this means the creation is in progress and
990 		 * hence the packets should be queued.
991 		 *
992 		 * If the flow_ring_node is in FLOW_RING_STATUS_DELETE_PENDING Or
993 		 * FLOW_RING_STATUS_CLOSED, then we should return Error.
994 		 * Note that if the flowing is being deleted we would mark it as
995 		 * FLOW_RING_STATUS_DELETE_PENDING.  Now before Dongle could respond and
996 		 * before we mark it as FLOW_RING_STATUS_CLOSED we could get tx packets.
997 		 * We should drop the packets in that case.
998 		 * The decission to return OK should NOT be based on 'active' variable, beause
999 		 * active is made TRUE when a flow_ring_node gets allocated and is made
1000 		 * FALSE when the flow ring gets removed and does not reflect the True state
1001 		 * of the Flow ring.
1002 		 * In case if IDLE_TX_FLOW_MGMT is defined, we have to handle two more flowring
1003 		 * states. If the flow_ring_node's status is FLOW_RING_STATUS_SUSPENDED, the flowid
1004 		 * is to be returned and from dhd_bus_txdata, the flowring would be resumed again.
1005 		 * The status FLOW_RING_STATUS_RESUME_PENDING, is equivalent to
1006 		 * FLOW_RING_STATUS_CREATE_PENDING.
1007 		 */
1008 		if (flow_ring_node->status == FLOW_RING_STATUS_DELETE_PENDING ||
1009 			flow_ring_node->status == FLOW_RING_STATUS_CLOSED) {
1010 			*flowid = FLOWID_INVALID;
1011 			ret = BCME_ERROR;
1012 		} else {
1013 			*flowid = id;
1014 			ret = BCME_OK;
1015 		}
1016 
1017 		DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1018 		return ret;
1019 	} /* Flow Id found in the hash */
1020 } /* dhd_flowid_lookup */
1021 
1022 int
dhd_flowid_find_by_ifidx(dhd_pub_t * dhdp,uint8 ifindex,uint16 flowid)1023 dhd_flowid_find_by_ifidx(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid)
1024 {
1025 	int hashidx = 0;
1026 	bool found = FALSE;
1027 	flow_hash_info_t *cur;
1028 	if_flow_lkup_t *if_flow_lkup;
1029 	unsigned long flags;
1030 
1031 	if (!dhdp->flow_ring_table) {
1032 		DHD_ERROR(("%s : dhd->flow_ring_table is NULL\n", __FUNCTION__));
1033 		return BCME_ERROR;
1034 	}
1035 
1036 	DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
1037 	if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
1038 	for (hashidx = 0; hashidx < DHD_FLOWRING_HASH_SIZE; hashidx++) {
1039 		cur = if_flow_lkup[ifindex].fl_hash[hashidx];
1040 		if (cur) {
1041 			if (cur->flowid == flowid) {
1042 				found = TRUE;
1043 			}
1044 
1045 			while (!found && cur) {
1046 				if (cur->flowid == flowid) {
1047 					found = TRUE;
1048 					break;
1049 				}
1050 				cur = cur->next;
1051 			}
1052 
1053 			if (found) {
1054 				DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
1055 				return BCME_OK;
1056 			}
1057 		}
1058 	}
1059 	DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
1060 
1061 	return BCME_ERROR;
1062 }
1063 
1064 int
dhd_flowid_debug_create(dhd_pub_t * dhdp,uint8 ifindex,uint8 prio,char * sa,char * da,uint16 * flowid)1065 dhd_flowid_debug_create(dhd_pub_t *dhdp, uint8 ifindex,
1066 	uint8 prio, char *sa, char *da, uint16 *flowid)
1067 {
1068 	return dhd_flowid_lookup(dhdp, ifindex, prio, sa, da, flowid);
1069 }
1070 
1071 /**
1072  * Assign existing or newly created flowid to an 802.3 packet. This flowid is later on used to
1073  * select the flowring to send the packet to the dongle.
1074  */
1075 int
BCMFASTPATH(dhd_flowid_update)1076 BCMFASTPATH(dhd_flowid_update)(dhd_pub_t *dhdp, uint8 ifindex, uint8 prio, void *pktbuf)
1077 {
1078 	uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
1079 	struct ether_header *eh = (struct ether_header *)pktdata;
1080 	uint16 flowid = 0;
1081 
1082 	ASSERT(ifindex < DHD_MAX_IFS);
1083 
1084 	if (ifindex >= DHD_MAX_IFS) {
1085 		return BCME_BADARG;
1086 	}
1087 
1088 	if (!dhdp->flowid_allocator) {
1089 		DHD_ERROR(("%s: Flow ring not intited yet  \n", __FUNCTION__));
1090 		return BCME_ERROR;
1091 	}
1092 
1093 	if (dhd_flowid_lookup(dhdp, ifindex, prio, (char *)eh->ether_shost, (char *)eh->ether_dhost,
1094 		&flowid) != BCME_OK) {
1095 		return BCME_ERROR;
1096 	}
1097 
1098 	DHD_FLOWRING_INFO(("%s: prio %d flowid %d\n", __FUNCTION__, prio, flowid));
1099 
1100 	/* Tag the packet with flowid */
1101 	DHD_PKT_SET_FLOWID(pktbuf, flowid);
1102 	return BCME_OK;
1103 }
1104 
1105 static void
dhd_flowid_map_free(dhd_pub_t * dhdp,uint8 ifindex,uint16 flowid)1106 dhd_flowid_map_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid)
1107 {
1108 #if defined(DHD_HTPUT_TUNABLES)
1109 	if (dhdp->htput_flowid_allocator) {
1110 		if (DHD_IS_FLOWID_HTPUT(dhdp, flowid)) {
1111 			id16_map_free(dhdp->htput_flowid_allocator, flowid);
1112 			/* decrement htput client counter */
1113 			if (DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex)) {
1114 				dhdp->htput_client_flow_rings--;
1115 			}
1116 			return;
1117 		}
1118 	}
1119 #endif /* !DHD_HTPUT_TUNABLES */
1120 
1121 	id16_map_free(dhdp->flowid_allocator, flowid);
1122 
1123 	return;
1124 }
1125 
1126 void
dhd_flowid_free(dhd_pub_t * dhdp,uint8 ifindex,uint16 flowid)1127 dhd_flowid_free(dhd_pub_t *dhdp, uint8 ifindex, uint16 flowid)
1128 {
1129 	int hashix;
1130 	bool found = FALSE;
1131 	flow_hash_info_t *cur, *prev;
1132 	if_flow_lkup_t *if_flow_lkup;
1133 	unsigned long flags;
1134 	bool if_role_multi_client;
1135 
1136 	ASSERT(ifindex < DHD_MAX_IFS);
1137 	if (ifindex >= DHD_MAX_IFS)
1138 		return;
1139 
1140 	DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
1141 	if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
1142 
1143 	if_role_multi_client = DHD_IF_ROLE_MULTI_CLIENT(dhdp, ifindex);
1144 
1145 	for (hashix = 0; hashix < DHD_FLOWRING_HASH_SIZE; hashix++) {
1146 
1147 		cur = if_flow_lkup[ifindex].fl_hash[hashix];
1148 
1149 		if (cur) {
1150 			if (cur->flowid == flowid) {
1151 				found = TRUE;
1152 			}
1153 
1154 			prev = NULL;
1155 			while (!found && cur) {
1156 				if (cur->flowid == flowid) {
1157 					found = TRUE;
1158 					break;
1159 				}
1160 				prev = cur;
1161 				cur = cur->next;
1162 			}
1163 			if (found) {
1164 				if (!prev) {
1165 					if_flow_lkup[ifindex].fl_hash[hashix] = cur->next;
1166 				} else {
1167 					prev->next = cur->next;
1168 				}
1169 
1170 				/* Decrement multi_client_flow_rings */
1171 				if (if_role_multi_client) {
1172 					dhdp->multi_client_flow_rings--;
1173 				}
1174 
1175 				/* deregister flowid from dhd_pub. */
1176 				dhd_del_flowid(dhdp, ifindex, flowid);
1177 
1178 				dhd_flowid_map_free(dhdp, ifindex, flowid);
1179 				DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
1180 				MFREE(dhdp->osh, cur, sizeof(flow_hash_info_t));
1181 
1182 				return;
1183 			}
1184 		}
1185 	}
1186 
1187 	DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
1188 	DHD_ERROR(("%s: could not free flow ring hash entry flowid %d\n",
1189 	           __FUNCTION__, flowid));
1190 } /* dhd_flowid_free */
1191 
1192 /**
1193  * Delete all Flow rings associated with the given interface. Is called when eg the dongle
1194  * indicates that a wireless link has gone down.
1195  */
1196 void
dhd_flow_rings_delete(dhd_pub_t * dhdp,uint8 ifindex)1197 dhd_flow_rings_delete(dhd_pub_t *dhdp, uint8 ifindex)
1198 {
1199 	uint32 id;
1200 	flow_ring_table_t *flow_ring_table;
1201 
1202 	DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex));
1203 
1204 	ASSERT(ifindex < DHD_MAX_IFS);
1205 	if (ifindex >= DHD_MAX_IFS)
1206 		return;
1207 
1208 	if (!dhdp->flow_ring_table)
1209 		return;
1210 
1211 	flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
1212 	for (id = 0; id < dhdp->num_h2d_rings; id++) {
1213 		if (flow_ring_table[id].active &&
1214 			(flow_ring_table[id].flow_info.ifindex == ifindex) &&
1215 			(flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) {
1216 			dhd_bus_flow_ring_delete_request(dhdp->bus,
1217 			                                 (void *) &flow_ring_table[id]);
1218 		}
1219 	}
1220 }
1221 
1222 void
dhd_flow_rings_flush(dhd_pub_t * dhdp,uint8 ifindex)1223 dhd_flow_rings_flush(dhd_pub_t *dhdp, uint8 ifindex)
1224 {
1225 	uint32 id;
1226 	flow_ring_table_t *flow_ring_table;
1227 
1228 	DHD_INFO(("%s: ifindex %u\n", __FUNCTION__, ifindex));
1229 
1230 	ASSERT(ifindex < DHD_MAX_IFS);
1231 	if (ifindex >= DHD_MAX_IFS)
1232 		return;
1233 
1234 	if (!dhdp->flow_ring_table)
1235 		return;
1236 	flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
1237 
1238 	for (id = 0; id < dhdp->num_h2d_rings; id++) {
1239 		if (flow_ring_table[id].active &&
1240 			(flow_ring_table[id].flow_info.ifindex == ifindex) &&
1241 			(flow_ring_table[id].status == FLOW_RING_STATUS_OPEN)) {
1242 			dhd_bus_flow_ring_flush_request(dhdp->bus,
1243 			                                 (void *) &flow_ring_table[id]);
1244 		}
1245 	}
1246 }
1247 
1248 /** Delete flow ring(s) for given peer address. */
1249 void
dhd_flow_rings_delete_for_peer(dhd_pub_t * dhdp,uint8 ifindex,char * addr)1250 dhd_flow_rings_delete_for_peer(dhd_pub_t *dhdp, uint8 ifindex, char *addr)
1251 {
1252 	uint32 id;
1253 	flow_ring_table_t *flow_ring_table;
1254 
1255 	DHD_ERROR(("%s: ifindex %u\n", __FUNCTION__, ifindex));
1256 
1257 	ASSERT(ifindex < DHD_MAX_IFS);
1258 	if (ifindex >= DHD_MAX_IFS)
1259 		return;
1260 
1261 	if (!dhdp->flow_ring_table)
1262 		return;
1263 
1264 	flow_ring_table = (flow_ring_table_t *)dhdp->flow_ring_table;
1265 	for (id = 0; id < dhdp->num_h2d_rings; id++) {
1266 		/*
1267 		 * Send flowring delete request even if flowring status is
1268 		 * FLOW_RING_STATUS_CREATE_PENDING, to handle cases where DISASSOC_IND
1269 		 * event comes ahead of flowring create response.
1270 		 * Otherwise the flowring will not be deleted later as there will not be any
1271 		 * DISASSOC_IND event. With this change, when create response event comes to DHD,
1272 		 * it will change the status to FLOW_RING_STATUS_OPEN and soon delete response
1273 		 * event will come, upon which DHD will delete the flowring.
1274 		 */
1275 		if (flow_ring_table[id].active &&
1276 			(flow_ring_table[id].flow_info.ifindex == ifindex) &&
1277 			(!memcmp(flow_ring_table[id].flow_info.da, addr, ETHER_ADDR_LEN)) &&
1278 			((flow_ring_table[id].status == FLOW_RING_STATUS_OPEN) ||
1279 			(flow_ring_table[id].status == FLOW_RING_STATUS_CREATE_PENDING))) {
1280 			DHD_ERROR(("%s: deleting flowid %d\n",
1281 				__FUNCTION__, flow_ring_table[id].flowid));
1282 			dhd_bus_flow_ring_delete_request(dhdp->bus,
1283 				(void *) &flow_ring_table[id]);
1284 		}
1285 	}
1286 }
1287 
1288 /** Handles interface ADD, CHANGE, DEL indications from the dongle */
1289 void
dhd_update_interface_flow_info(dhd_pub_t * dhdp,uint8 ifindex,uint8 op,uint8 role)1290 dhd_update_interface_flow_info(dhd_pub_t *dhdp, uint8 ifindex,
1291                                uint8 op, uint8 role)
1292 {
1293 	if_flow_lkup_t *if_flow_lkup;
1294 	unsigned long flags;
1295 
1296 	ASSERT(ifindex < DHD_MAX_IFS);
1297 	if (ifindex >= DHD_MAX_IFS)
1298 		return;
1299 
1300 	DHD_INFO(("%s: ifindex %u op %u role is %u \n",
1301 	          __FUNCTION__, ifindex, op, role));
1302 	if (!dhdp->flowid_allocator) {
1303 		DHD_ERROR(("%s: Flow ring not intited yet  \n", __FUNCTION__));
1304 		return;
1305 	}
1306 
1307 	DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
1308 	if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
1309 
1310 	if (op == WLC_E_IF_ADD || op == WLC_E_IF_CHANGE) {
1311 
1312 		if_flow_lkup[ifindex].role = role;
1313 
1314 		if (role == WLC_E_IF_ROLE_WDS) {
1315 			/**
1316 			 * WDS role does not send WLC_E_LINK event after interface is up.
1317 			 * So to create flowrings for WDS, make status as TRUE in WLC_E_IF itself.
1318 			 * same is true while making the status as FALSE.
1319 			 * TODO: Fix FW to send WLC_E_LINK for WDS role aswell. So that all the
1320 			 * interfaces are handled uniformly.
1321 			 */
1322 			if_flow_lkup[ifindex].status = TRUE;
1323 			DHD_INFO(("%s: Mcast Flow ring for ifindex %d role is %d \n",
1324 			          __FUNCTION__, ifindex, role));
1325 		}
1326 	} else	if ((op == WLC_E_IF_DEL) && (role == WLC_E_IF_ROLE_WDS)) {
1327 		if_flow_lkup[ifindex].status = FALSE;
1328 		DHD_INFO(("%s: cleanup all Flow rings for ifindex %d role is %d \n",
1329 		          __FUNCTION__, ifindex, role));
1330 	}
1331 	DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
1332 }
1333 
1334 /** Handles a STA 'link' indication from the dongle */
1335 int
dhd_update_interface_link_status(dhd_pub_t * dhdp,uint8 ifindex,uint8 status)1336 dhd_update_interface_link_status(dhd_pub_t *dhdp, uint8 ifindex, uint8 status)
1337 {
1338 	if_flow_lkup_t *if_flow_lkup;
1339 	unsigned long flags;
1340 
1341 	ASSERT(ifindex < DHD_MAX_IFS);
1342 	if (ifindex >= DHD_MAX_IFS)
1343 		return BCME_BADARG;
1344 
1345 	DHD_INFO(("%s: ifindex %d status %d\n", __FUNCTION__, ifindex, status));
1346 
1347 	DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
1348 	if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
1349 
1350 	if (status) {
1351 		if_flow_lkup[ifindex].status = TRUE;
1352 	} else {
1353 		if_flow_lkup[ifindex].status = FALSE;
1354 	}
1355 
1356 	DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
1357 
1358 	return BCME_OK;
1359 }
1360 
1361 /** Update flow priority mapping, called on IOVAR */
dhd_update_flow_prio_map(dhd_pub_t * dhdp,uint8 map)1362 int dhd_update_flow_prio_map(dhd_pub_t *dhdp, uint8 map)
1363 {
1364 	uint16 flowid;
1365 	flow_ring_node_t *flow_ring_node;
1366 
1367 	if (map > DHD_FLOW_PRIO_LLR_MAP)
1368 		return BCME_BADOPTION;
1369 
1370 	/* Check if we need to change prio map */
1371 	if (map == dhdp->flow_prio_map_type)
1372 		return BCME_OK;
1373 
1374 	/* If any ring is active we cannot change priority mapping for flow rings */
1375 	for (flowid = 0; flowid < dhdp->num_h2d_rings; flowid++) {
1376 		flow_ring_node = DHD_FLOW_RING(dhdp, flowid);
1377 		if (flow_ring_node->active)
1378 			return BCME_EPERM;
1379 	}
1380 
1381 	/* Inform firmware about new mapping type */
1382 	if (BCME_OK != dhd_flow_prio_map(dhdp, &map, TRUE))
1383 		return BCME_ERROR;
1384 
1385 	/* update internal structures */
1386 	dhdp->flow_prio_map_type = map;
1387 	if (dhdp->flow_prio_map_type == DHD_FLOW_PRIO_TID_MAP)
1388 		bcopy(prio2tid, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
1389 	else
1390 		bcopy(prio2ac, dhdp->flow_prio_map, sizeof(uint8) * NUMPRIO);
1391 
1392 	dhdp->max_multi_client_flow_rings = dhd_get_max_multi_client_flow_rings(dhdp);
1393 
1394 	return BCME_OK;
1395 }
1396 
1397 /** Inform firmware on updated flow priority mapping, called on IOVAR */
dhd_flow_prio_map(dhd_pub_t * dhd,uint8 * map,bool set)1398 int dhd_flow_prio_map(dhd_pub_t *dhd, uint8 *map, bool set)
1399 {
1400 	uint8 iovbuf[WLC_IOCTL_SMLEN];
1401 	int len;
1402 	uint32 val;
1403 	if (!set) {
1404 		bzero(&iovbuf, sizeof(iovbuf));
1405 		len = bcm_mkiovar("bus:fl_prio_map", NULL, 0, (char*)iovbuf, sizeof(iovbuf));
1406 		if (len == 0) {
1407 			return BCME_BUFTOOSHORT;
1408 		}
1409 		if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0) < 0) {
1410 			DHD_ERROR(("%s: failed to get fl_prio_map\n", __FUNCTION__));
1411 			return BCME_ERROR;
1412 		}
1413 		*map = iovbuf[0];
1414 		return BCME_OK;
1415 	}
1416 	val = (uint32)map[0];
1417 	len = bcm_mkiovar("bus:fl_prio_map", (char *)&val, sizeof(val),
1418 		(char*)iovbuf, sizeof(iovbuf));
1419 	if (len == 0) {
1420 		return BCME_BUFTOOSHORT;
1421 	}
1422 	if (dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, len, TRUE, 0) < 0) {
1423 		DHD_ERROR(("%s: failed to set fl_prio_map \n",
1424 			__FUNCTION__));
1425 		return BCME_ERROR;
1426 	}
1427 	return BCME_OK;
1428 }
1429 
1430 uint32
dhd_active_tx_flowring_bkpq_len(dhd_pub_t * dhd)1431 dhd_active_tx_flowring_bkpq_len(dhd_pub_t *dhd)
1432 {
1433 	unsigned long list_lock_flags;
1434 	dll_t *item, *prev;
1435 	flow_ring_node_t *flow_ring_node;
1436 	dhd_bus_t *bus = dhd->bus;
1437 	uint32 active_tx_flowring_qlen = 0;
1438 
1439 	DHD_FLOWRING_LIST_LOCK(bus->dhd->flowring_list_lock, list_lock_flags);
1440 
1441 	for (item = dll_tail_p(&bus->flowring_active_list);
1442 			!dll_end(&bus->flowring_active_list, item); item = prev) {
1443 
1444 		prev = dll_prev_p(item);
1445 
1446 		flow_ring_node = dhd_constlist_to_flowring(item);
1447 		if (flow_ring_node->active) {
1448 			DHD_INFO(("%s :%d\n", __FUNCTION__, flow_ring_node->queue.len));
1449 			active_tx_flowring_qlen += flow_ring_node->queue.len;
1450 		}
1451 	}
1452 	DHD_FLOWRING_LIST_UNLOCK(bus->dhd->flowring_list_lock, list_lock_flags);
1453 	return active_tx_flowring_qlen;
1454 }
1455 
1456 #ifdef DHD_AWDL
1457 /**
1458  * Handle/Intercept awdl peer op IOVAR fired by user
1459  * buf = NULL means delete all peers in awdl interface
1460  */
1461 void
dhd_awdl_peer_op(dhd_pub_t * dhdp,uint8 ifindex,void * buf,uint32 buflen)1462 dhd_awdl_peer_op(dhd_pub_t *dhdp, uint8 ifindex, void *buf, uint32 buflen)
1463 {
1464 	awdl_peer_op_t	*peer = (awdl_peer_op_t *)buf;
1465 	DHD_TRACE(("%s\n", __FUNCTION__));
1466 
1467 	ASSERT(ifindex < DHD_MAX_IFS);
1468 	if (ifindex >= DHD_MAX_IFS)
1469 		return;
1470 	if (!buf) {
1471 		/* Delete all peers in awdl interface */
1472 		if_flow_lkup_t *if_flow_lkup;
1473 		if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
1474 		if (if_flow_lkup[ifindex].role != WLC_E_IF_ROLE_AWDL) {
1475 			DHD_ERROR(("%s: Iinterface %d is not a awdl peer \n",
1476 				__FUNCTION__, ifindex));
1477 			return;
1478 		}
1479 		dhd_flow_rings_delete(dhdp, ifindex);
1480 		return;
1481 	}
1482 	/* Parse awdl_peer_op info now */
1483 	if (buflen < sizeof(awdl_peer_op_t)) {
1484 		DHD_ERROR(("%s: cannot handle awdl_peer_op add/del\n", __FUNCTION__));
1485 		return;
1486 	}
1487 	/**
1488 	 * Only flowring deletion is handled here
1489 	 * Flowring addition is taken care in dhd_flowid_lookup
1490 	 */
1491 	if (peer->opcode == AWDL_PEER_OP_DEL) {
1492 		dhd_del_sta(dhdp, ifindex, &peer->addr.octet[0]);
1493 		dhd_flow_rings_delete_for_peer(dhdp, ifindex, (char *)&peer->addr.octet[0]);
1494 	} else if (peer->opcode == AWDL_PEER_OP_ADD) {
1495 		dhd_findadd_sta(dhdp, ifindex, &peer->addr.octet[0]);
1496 	}
1497 		return;
1498 }
1499 #endif /* DHD_AWDL */
1500