xref: /OK3568_Linux_fs/external/rkwifibt/drivers/bcmdhd/dhd_linux_sock_qos.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Source file for DHD QOS on Socket Flow.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Defines a socket flow and maintains a table of socket flows
5*4882a593Smuzhiyun  * for further analysis in order to upgrade the QOS of the flow.
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun  * Copyright (C) 2020, Broadcom.
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  *      Unless you and Broadcom execute a separate written software license
10*4882a593Smuzhiyun  * agreement governing use of this software, this software is licensed to you
11*4882a593Smuzhiyun  * under the terms of the GNU General Public License version 2 (the "GPL"),
12*4882a593Smuzhiyun  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
13*4882a593Smuzhiyun  * following added to such license:
14*4882a593Smuzhiyun  *
15*4882a593Smuzhiyun  *      As a special exception, the copyright holders of this software give you
16*4882a593Smuzhiyun  * permission to link this software with independent modules, and to copy and
17*4882a593Smuzhiyun  * distribute the resulting executable under terms of your choice, provided that
18*4882a593Smuzhiyun  * you also meet, for each linked independent module, the terms and conditions of
19*4882a593Smuzhiyun  * the license of that module.  An independent module is a module which is not
20*4882a593Smuzhiyun  * derived from this software.  The special exception does not apply to any
21*4882a593Smuzhiyun  * modifications of the software.
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  *
24*4882a593Smuzhiyun  * <<Broadcom-WL-IPTag/Open:>>
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * $Id$
27*4882a593Smuzhiyun  *
28*4882a593Smuzhiyun  */
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun #include <dhd_linux_priv.h>
31*4882a593Smuzhiyun #include <dhd_dbg.h>
32*4882a593Smuzhiyun #include <bcmstdlib_s.h>
33*4882a593Smuzhiyun #include <bcmendian.h>
34*4882a593Smuzhiyun #include <dhd_linux_sock_qos.h>
35*4882a593Smuzhiyun #include <dhd_qos_algo.h>
36*4882a593Smuzhiyun #include <dhd.h>
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #include <net/sock.h>
39*4882a593Smuzhiyun #include <linux/sock_diag.h>
40*4882a593Smuzhiyun #include <linux/netlink.h>
41*4882a593Smuzhiyun #include <linux/list.h>
42*4882a593Smuzhiyun #include <linux/sched.h>
43*4882a593Smuzhiyun #include <linux/math64.h>
44*4882a593Smuzhiyun #include <linux/pkt_sched.h>
45*4882a593Smuzhiyun #include <linux_pkt.h>
46*4882a593Smuzhiyun #include <net/tcp.h>
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun /* Maximum number of Socket Flows supported */
49*4882a593Smuzhiyun #define MAX_SOCK_FLOW	(1024UL)
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun #define SOCK_FLOW_UPGRADE_THRESHOLD	(3)
52*4882a593Smuzhiyun /*
53*4882a593Smuzhiyun  * Mark a Socket Flow as inactive and free the resources
54*4882a593Smuzhiyun  * if there is no packet receied for SOCK_IDLE_THREASHOLD_MS
55*4882a593Smuzhiyun  * of time. Note that this parameter is in milli seconds.
56*4882a593Smuzhiyun  */
57*4882a593Smuzhiyun #define SOCK_IDLE_THRESHOLD_MS	(2000UL)
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun #define DSCP_TOS_CS7 0XE0u
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun extern uint dhd_watchdog_ms;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /* Defines Socket Flow */
64*4882a593Smuzhiyun struct dhd_sock_flow_info
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun 	/* Unique identifiers */
67*4882a593Smuzhiyun 	struct sock *sk;
68*4882a593Smuzhiyun 	unsigned long ino;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun 	/* statistics */
71*4882a593Smuzhiyun 	qos_stat_t stats;
72*4882a593Smuzhiyun 	u64 last_pkt_ns;
73*4882a593Smuzhiyun 	kuid_t uid;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	/* Elements related to upgrade management */
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	/* 0 - No upgrade
78*4882a593Smuzhiyun 	 * 1 - Upgrade
79*4882a593Smuzhiyun 	 */
80*4882a593Smuzhiyun 	unsigned int cur_up_state;
81*4882a593Smuzhiyun 	unsigned int rcm_up_state;
82*4882a593Smuzhiyun 	unsigned int bus_flow_id;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	/* TODO:
85*4882a593Smuzhiyun 	 * Handling Out Of Order during upgrade
86*4882a593Smuzhiyun 	 * Once an upgrade is decided we cannot handover the skb to
87*4882a593Smuzhiyun 	 * FW in the upgraded Flow Ring ... it will create Out of Order Packets.
88*4882a593Smuzhiyun 	 * Instead we can have a output_q per socket flow. Once the upgrade is
89*4882a593Smuzhiyun 	 * decided, we can start adding skbs to the output_q. The last 'skb' given
90*4882a593Smuzhiyun 	 * to the actual Flow ring should be remembered in 'last_skb_orig_fl'.
91*4882a593Smuzhiyun 	 * Once we get a  Tx completion for last_skb_orig_fl we can flush the
92*4882a593Smuzhiyun 	 * contents of output_q to the 'upgraded flowring'. In this solution,
93*4882a593Smuzhiyun 	 * we should also handle the case where output_q hits the watermark
94*4882a593Smuzhiyun 	 * before the completion for 'last_skb_orig_fl' is received. If this condition
95*4882a593Smuzhiyun 	 * happens, not to worry about OOO and flush the contents of output_q.
96*4882a593Smuzhiyun 	 * Probably the last_skb_orig_fl is not sent out due latency in the
97*4882a593Smuzhiyun 	 * existing flow ... the actual problem we are trying to solve.
98*4882a593Smuzhiyun 	 */
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 	/* Management elements */
101*4882a593Smuzhiyun 	struct list_head list;
102*4882a593Smuzhiyun 	unsigned int in_use;
103*4882a593Smuzhiyun };
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun typedef enum _frameburst_state
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	FRMBRST_DISABLED = 0,
108*4882a593Smuzhiyun 	FRMBRST_ENABLED = 1
109*4882a593Smuzhiyun } frameburst_state_t;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun /* Sock QOS Module Structure */
112*4882a593Smuzhiyun typedef struct dhd_sock_qos_info
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	/* Table of Socket Flows */
115*4882a593Smuzhiyun 	struct dhd_sock_flow_info *sk_fl;
116*4882a593Smuzhiyun 	/* maximum number for socket flows supported */
117*4882a593Smuzhiyun 	uint32 max_sock_fl;
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun 	/* TODO: need to make it per flow later on */
120*4882a593Smuzhiyun 	/* global qos algo parameters */
121*4882a593Smuzhiyun 	qos_algo_params_t qos_params;
122*4882a593Smuzhiyun 	/* List in which active Socket Flows live */
123*4882a593Smuzhiyun 	struct list_head sk_fl_list_head;
124*4882a593Smuzhiyun 	void *list_lock;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	/* Time interval a socket flow resource is moved out of the active list */
127*4882a593Smuzhiyun 	uint32 sock_idle_thresh;
128*4882a593Smuzhiyun 	/*
129*4882a593Smuzhiyun 	 * Keep track of number of flows upgraded.
130*4882a593Smuzhiyun 	 * If it reaches a threshold we should stop ugrading
131*4882a593Smuzhiyun 	 * This is to avoid the problem where we overwhelm
132*4882a593Smuzhiyun 	 * the Dongle with upgraded traffic.
133*4882a593Smuzhiyun 	 */
134*4882a593Smuzhiyun 	int num_skfl_upgraded;
135*4882a593Smuzhiyun 	int skfl_upgrade_thresh;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	/* flag that is set to true when the first flow is upgraded
138*4882a593Smuzhiyun 	 * so that FW frameburst is disabled, and set to false
139*4882a593Smuzhiyun 	 * when no more flows are in upgraded state, so that
140*4882a593Smuzhiyun 	 * FW frameburst is re-enabled
141*4882a593Smuzhiyun 	 */
142*4882a593Smuzhiyun 	bool upgrade_active;
143*4882a593Smuzhiyun 	/* fw frameburst state */
144*4882a593Smuzhiyun 	frameburst_state_t frmbrst_state;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	atomic_t on_off;
147*4882a593Smuzhiyun 	atomic_t force_upgrade;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	/* required for enabling/disabling watchdog timer at runtime */
150*4882a593Smuzhiyun 	uint watchdog_ms;
151*4882a593Smuzhiyun } dhd_sock_qos_info_t;
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun #define SK_FL_LIST_LOCK(lock, flags)	(flags) = osl_spin_lock(lock)
154*4882a593Smuzhiyun #define SK_FL_LIST_UNLOCK(lock, flags)	osl_spin_unlock((lock), (flags))
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun int
dhd_init_sock_flows_buf(dhd_info_t * dhd,uint watchdog_ms)157*4882a593Smuzhiyun dhd_init_sock_flows_buf(dhd_info_t *dhd, uint watchdog_ms)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	unsigned long sz;
160*4882a593Smuzhiyun 	unsigned int i;
161*4882a593Smuzhiyun 	struct dhd_sock_flow_info *sk_fl = NULL;
162*4882a593Smuzhiyun 	int val = 0, ret = 0;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	if (dhd == NULL)
165*4882a593Smuzhiyun 		return BCME_BADARG;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	dhd->psk_qos = MALLOCZ(dhd->pub.osh, sizeof(dhd_sock_qos_info_t));
168*4882a593Smuzhiyun 	if (dhd->psk_qos == NULL) {
169*4882a593Smuzhiyun 		DHD_ERROR(("%s(): Failed to alloc psk_qos ! \n", __FUNCTION__));
170*4882a593Smuzhiyun 		return BCME_NOMEM;
171*4882a593Smuzhiyun 	}
172*4882a593Smuzhiyun 	dhd->psk_qos->max_sock_fl = MAX_SOCK_FLOW;
173*4882a593Smuzhiyun 	sz = sizeof(struct dhd_sock_flow_info) * MAX_SOCK_FLOW;
174*4882a593Smuzhiyun 	dhd->psk_qos->sk_fl = MALLOCZ(dhd->pub.osh, sz);
175*4882a593Smuzhiyun 	if (dhd->psk_qos->sk_fl == NULL) {
176*4882a593Smuzhiyun 		DHD_ERROR(("%s(): Failed to allocated sk_fl \r\n", __FUNCTION__));
177*4882a593Smuzhiyun 		return BCME_NOMEM;
178*4882a593Smuzhiyun 	}
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	sk_fl = dhd->psk_qos->sk_fl;
181*4882a593Smuzhiyun 	for (i = 0; i < MAX_SOCK_FLOW; i++, sk_fl++) {
182*4882a593Smuzhiyun 		sk_fl->in_use = 0;
183*4882a593Smuzhiyun 	}
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	dhd->psk_qos->sock_idle_thresh = SOCK_IDLE_THRESHOLD_MS;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	dhd->psk_qos->skfl_upgrade_thresh = SOCK_FLOW_UPGRADE_THRESHOLD;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	INIT_LIST_HEAD(&dhd->psk_qos->sk_fl_list_head);
190*4882a593Smuzhiyun 	dhd->psk_qos->list_lock = osl_spin_lock_init(dhd->pub.osh);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	dhd->psk_qos->watchdog_ms = watchdog_ms;
193*4882a593Smuzhiyun 	/* feature is DISABLED by default */
194*4882a593Smuzhiyun 	dhd_sock_qos_set_status(dhd, 0);
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	qos_algo_params_init(&dhd->psk_qos->qos_params);
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	dhd->psk_qos->frmbrst_state = FRMBRST_ENABLED;
199*4882a593Smuzhiyun 	/* read the initial state of frameburst from FW, cannot
200*4882a593Smuzhiyun 	 * assume that it will always be in enabled state by default.
201*4882a593Smuzhiyun 	 * We will cache the FW frameburst state in host and change
202*4882a593Smuzhiyun 	 * it everytime we change it from host during QoS upgrade.
203*4882a593Smuzhiyun 	 * This decision is taken, because firing an iovar everytime
204*4882a593Smuzhiyun 	 * to query FW frameburst state before deciding whether to
205*4882a593Smuzhiyun 	 * changing the frameburst state or not from host, is sub-optimal,
206*4882a593Smuzhiyun 	 * especially in the Tx path.
207*4882a593Smuzhiyun 	 */
208*4882a593Smuzhiyun 	ret = dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_FAKEFRAG, (char *)&val,
209*4882a593Smuzhiyun 		sizeof(val), FALSE, 0);
210*4882a593Smuzhiyun 	if (ret != BCME_OK) {
211*4882a593Smuzhiyun 		DHD_ERROR(("%s: get fw frameburst failed,"
212*4882a593Smuzhiyun 			" err=%d\n", __FUNCTION__, ret));
213*4882a593Smuzhiyun 	} else {
214*4882a593Smuzhiyun 		DHD_INFO(("%s:fw frameburst = %d", __FUNCTION__, val));
215*4882a593Smuzhiyun 		dhd->psk_qos->frmbrst_state =
216*4882a593Smuzhiyun 			(val == 1) ? FRMBRST_ENABLED : FRMBRST_DISABLED;
217*4882a593Smuzhiyun 	}
218*4882a593Smuzhiyun 	return BCME_OK;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun int
dhd_deinit_sock_flows_buf(dhd_info_t * dhd)222*4882a593Smuzhiyun dhd_deinit_sock_flows_buf(dhd_info_t *dhd)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun 	if (dhd == NULL)
225*4882a593Smuzhiyun 		return BCME_BADARG;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	if (dhd->psk_qos->sk_fl) {
228*4882a593Smuzhiyun 		MFREE(dhd->pub.osh, dhd->psk_qos->sk_fl,
229*4882a593Smuzhiyun 			sizeof(struct dhd_sock_flow_info) * MAX_SOCK_FLOW);
230*4882a593Smuzhiyun 		dhd->psk_qos->sk_fl = NULL;
231*4882a593Smuzhiyun 	}
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	osl_spin_lock_deinit(dhd->pub.osh, dhd->psk_qos->list_lock);
234*4882a593Smuzhiyun 	MFREE(dhd->pub.osh, dhd->psk_qos, sizeof(dhd_sock_qos_info_t));
235*4882a593Smuzhiyun 	dhd->psk_qos = NULL;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	return BCME_OK;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun /* Caller should hold list_lock */
241*4882a593Smuzhiyun static inline struct dhd_sock_flow_info *
__dhd_find_sock_stream_info(dhd_sock_qos_info_t * psk_qos,unsigned long ino)242*4882a593Smuzhiyun __dhd_find_sock_stream_info(dhd_sock_qos_info_t *psk_qos, unsigned long ino)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun 	struct dhd_sock_flow_info *sk_fl = NULL;
245*4882a593Smuzhiyun #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
246*4882a593Smuzhiyun #pragma GCC diagnostic push
247*4882a593Smuzhiyun #pragma GCC diagnostic ignored "-Wcast-qual"
248*4882a593Smuzhiyun #endif
249*4882a593Smuzhiyun 	list_for_each_entry(sk_fl, &psk_qos->sk_fl_list_head,
250*4882a593Smuzhiyun 			list)  {
251*4882a593Smuzhiyun 		if (sk_fl && (sk_fl->ino == ino)) {
252*4882a593Smuzhiyun 			return sk_fl;
253*4882a593Smuzhiyun 		}
254*4882a593Smuzhiyun 	} /* end of list iteration */
255*4882a593Smuzhiyun #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
256*4882a593Smuzhiyun #pragma GCC diagnostic pop
257*4882a593Smuzhiyun #endif
258*4882a593Smuzhiyun 	/* If control comes here, the ino is not found */
259*4882a593Smuzhiyun 	DHD_INFO(("%s(): ino:%lu not found \r\n", __FUNCTION__, ino));
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	return NULL;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun static struct dhd_sock_flow_info *
dhd_alloc_sock_stream_info(dhd_sock_qos_info_t * psk_qos)265*4882a593Smuzhiyun dhd_alloc_sock_stream_info(dhd_sock_qos_info_t *psk_qos)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun 	struct dhd_sock_flow_info *sk_fl = psk_qos->sk_fl;
268*4882a593Smuzhiyun 	int i;
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	for (i = 0; i < psk_qos->max_sock_fl; i++, sk_fl++) {
271*4882a593Smuzhiyun 		if (sk_fl->in_use == 0) {
272*4882a593Smuzhiyun 			DHD_ERROR(("%s: Use sk_fl %p \r\n", __FUNCTION__, sk_fl));
273*4882a593Smuzhiyun 			return sk_fl;
274*4882a593Smuzhiyun 		}
275*4882a593Smuzhiyun 	}
276*4882a593Smuzhiyun 	DHD_INFO(("No Free Socket Stream info \r\n"));
277*4882a593Smuzhiyun 	return NULL;
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun /* Caller should hold list_lock */
281*4882a593Smuzhiyun static inline void
__dhd_free_sock_stream_info(dhd_sock_qos_info_t * psk_qos,struct dhd_sock_flow_info * sk_fl)282*4882a593Smuzhiyun __dhd_free_sock_stream_info(dhd_sock_qos_info_t *psk_qos,
283*4882a593Smuzhiyun 	struct dhd_sock_flow_info *sk_fl)
284*4882a593Smuzhiyun {
285*4882a593Smuzhiyun 	/*
286*4882a593Smuzhiyun 	 * If the socket flow getting freed is an upgraded socket flow,
287*4882a593Smuzhiyun 	 * we can upgrade one more flow.
288*4882a593Smuzhiyun 	 */
289*4882a593Smuzhiyun 	if (sk_fl->cur_up_state == 1) {
290*4882a593Smuzhiyun 		--psk_qos->num_skfl_upgraded;
291*4882a593Smuzhiyun 		ASSERT(psk_qos->num_skfl_upgraded >= 0);
292*4882a593Smuzhiyun 	}
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	/* Remove the flow from active list */
295*4882a593Smuzhiyun 	list_del(&sk_fl->list);
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	DHD_ERROR(("%s(): Cleaning Socket Flow ino:%lu psk_qos->num_skfl_upgraded=%d\r\n",
298*4882a593Smuzhiyun 		__FUNCTION__, sk_fl->ino, psk_qos->num_skfl_upgraded));
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	/* Clear its content */
301*4882a593Smuzhiyun 	memset_s(sk_fl, sizeof(*sk_fl), 0, sizeof(*sk_fl));
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	return;
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun static void
dhd_clean_idle_sock_streams(dhd_sock_qos_info_t * psk_qos)307*4882a593Smuzhiyun dhd_clean_idle_sock_streams(dhd_sock_qos_info_t *psk_qos)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun 	struct dhd_sock_flow_info *sk_fl = NULL, *next = NULL;
310*4882a593Smuzhiyun 	u64 now;
311*4882a593Smuzhiyun 	u64 diff;
312*4882a593Smuzhiyun 	unsigned long flags = 0;
313*4882a593Smuzhiyun 	now = local_clock();
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	SK_FL_LIST_LOCK(psk_qos->list_lock, flags);
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
318*4882a593Smuzhiyun #pragma GCC diagnostic push
319*4882a593Smuzhiyun #pragma GCC diagnostic ignored "-Wcast-qual"
320*4882a593Smuzhiyun #endif
321*4882a593Smuzhiyun 	list_for_each_entry_safe(sk_fl, next, &psk_qos->sk_fl_list_head, list)  {
322*4882a593Smuzhiyun 		if (sk_fl) {
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 			if (sk_fl->in_use == 0) {
325*4882a593Smuzhiyun 				DHD_ERROR_RLMT(("%s:Something wrong,"
326*4882a593Smuzhiyun 					" a free sk_fl living in active stream\n",
327*4882a593Smuzhiyun 					__FUNCTION__));
328*4882a593Smuzhiyun 				DHD_ERROR_RLMT(("sk_fl:%p sk:%p ino:%lu \r\n",
329*4882a593Smuzhiyun 					sk_fl, sk_fl->sk, sk_fl->ino));
330*4882a593Smuzhiyun 				continue;
331*4882a593Smuzhiyun 			}
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 			/* XXX: TODO: need to investigate properly in future.
334*4882a593Smuzhiyun 			 * it is observed that in some hosts (FC25), the
335*4882a593Smuzhiyun 			 * current timestamp is lesser than previous timestamp
336*4882a593Smuzhiyun 			 * leading to false cleanups
337*4882a593Smuzhiyun 			 */
338*4882a593Smuzhiyun 			if (now <= sk_fl->last_pkt_ns)
339*4882a593Smuzhiyun 				continue;
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 			diff = now - sk_fl->last_pkt_ns;
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 			/* Convert diff which is in ns to ms */
344*4882a593Smuzhiyun 			diff = div64_u64(diff, 1000000UL);
345*4882a593Smuzhiyun 			if (diff >= psk_qos->sock_idle_thresh) {
346*4882a593Smuzhiyun 				DHD_ERROR(("sk_fl->sk:%p sk_fl->i_no:%lu \r\n",
347*4882a593Smuzhiyun 					sk_fl->sk, sk_fl->ino));
348*4882a593Smuzhiyun 				if (sk_fl->cur_up_state == 1 &&
349*4882a593Smuzhiyun 					psk_qos->num_skfl_upgraded == 1) {
350*4882a593Smuzhiyun 					psk_qos->upgrade_active = FALSE;
351*4882a593Smuzhiyun 				}
352*4882a593Smuzhiyun 				__dhd_free_sock_stream_info(psk_qos, sk_fl);
353*4882a593Smuzhiyun 			}
354*4882a593Smuzhiyun 		}
355*4882a593Smuzhiyun 	} /* end of list iteration */
356*4882a593Smuzhiyun #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
357*4882a593Smuzhiyun #pragma GCC diagnostic pop
358*4882a593Smuzhiyun #endif
359*4882a593Smuzhiyun 	SK_FL_LIST_UNLOCK(psk_qos->list_lock, flags);
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun static inline int
__dhd_upgrade_sock_flow(dhd_info_t * dhd,struct dhd_sock_flow_info * sk_fl,struct sk_buff * skb)364*4882a593Smuzhiyun __dhd_upgrade_sock_flow(dhd_info_t *dhd,
365*4882a593Smuzhiyun 	struct dhd_sock_flow_info *sk_fl,
366*4882a593Smuzhiyun 	struct sk_buff *skb)
367*4882a593Smuzhiyun {
368*4882a593Smuzhiyun 	dhd_sock_qos_info_t *psk_qos = dhd->psk_qos;
369*4882a593Smuzhiyun #ifdef DHD_HP2P
370*4882a593Smuzhiyun 	dhd_pub_t *dhdp = &dhd->pub;
371*4882a593Smuzhiyun #endif
372*4882a593Smuzhiyun 	uint8 *pktdat = NULL;
373*4882a593Smuzhiyun 	struct ether_header *eh = NULL;
374*4882a593Smuzhiyun 	struct iphdr *iph = NULL;
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	/* Before upgrading a flow,
377*4882a593Smuzhiyun 	 * Check the bound to control the number of flows getting upgraded
378*4882a593Smuzhiyun 	 */
379*4882a593Smuzhiyun 	if ((sk_fl->rcm_up_state == 1) && (sk_fl->cur_up_state == 0)) {
380*4882a593Smuzhiyun 		if (psk_qos->num_skfl_upgraded >= psk_qos->skfl_upgrade_thresh) {
381*4882a593Smuzhiyun 			DHD_ERROR_RLMT(("%s(): Thresh hit num_skfl_upgraded:%d"
382*4882a593Smuzhiyun 				"skfl_upgrade_thresh:%d \r\n",
383*4882a593Smuzhiyun 				__FUNCTION__, psk_qos->num_skfl_upgraded,
384*4882a593Smuzhiyun 				psk_qos->skfl_upgrade_thresh));
385*4882a593Smuzhiyun 			return BCME_ERROR;
386*4882a593Smuzhiyun 		} else {
387*4882a593Smuzhiyun 			if (psk_qos->num_skfl_upgraded == 0) {
388*4882a593Smuzhiyun 				/* if no flows upgraded till now, and this is the
389*4882a593Smuzhiyun 				 * first flow to be upgraded,
390*4882a593Smuzhiyun 				 * then disable frameburst in FW.
391*4882a593Smuzhiyun 				 * The actual iovar to disable frameburst cannot
392*4882a593Smuzhiyun 				 * be fired here because Tx can happen in atomic context
393*4882a593Smuzhiyun 				 * and dhd_iovar can sleep due to proto_block lock being
394*4882a593Smuzhiyun 				 * held. Instead the flag is checked from
395*4882a593Smuzhiyun 				 * 'dhd_analyze_sock_flows' which execs in non-atomic context
396*4882a593Smuzhiyun 				 * and the iovar is fired from there
397*4882a593Smuzhiyun 				 */
398*4882a593Smuzhiyun 				DHD_TRACE(("%s: disable frameburst ..", __FUNCTION__));
399*4882a593Smuzhiyun 				psk_qos->upgrade_active = TRUE;
400*4882a593Smuzhiyun 			}
401*4882a593Smuzhiyun 			++psk_qos->num_skfl_upgraded;
402*4882a593Smuzhiyun 			DHD_ERROR_RLMT(("%s(): upgrade flow sk_fl %p,"
403*4882a593Smuzhiyun 				"num_skfl_upgraded:%d skfl_upgrade_thresh:%d \r\n",
404*4882a593Smuzhiyun 				__FUNCTION__, sk_fl, psk_qos->num_skfl_upgraded,
405*4882a593Smuzhiyun 				psk_qos->skfl_upgrade_thresh));
406*4882a593Smuzhiyun 		}
407*4882a593Smuzhiyun 	}
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	/* Upgrade the skb */
410*4882a593Smuzhiyun #ifdef DHD_HP2P
411*4882a593Smuzhiyun 	if (dhdp->hp2p_capable)
412*4882a593Smuzhiyun 		skb->priority = TC_PRIO_CONTROL;
413*4882a593Smuzhiyun 	else
414*4882a593Smuzhiyun 		skb->priority = TC_PRIO_INTERACTIVE;
415*4882a593Smuzhiyun #else
416*4882a593Smuzhiyun 	skb->priority = TC_PRIO_INTERACTIVE;
417*4882a593Smuzhiyun #endif /* DHD_HP2P  */
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	pktdat = PKTDATA(dhd->pub.osh, skb);
420*4882a593Smuzhiyun 	eh = (struct ether_header *) pktdat;
421*4882a593Smuzhiyun 	if (pktdat && (eh->ether_type == hton16(ETHER_TYPE_IP))) {
422*4882a593Smuzhiyun 		/* 'upgrade' DSCP also, else it is observed that on
423*4882a593Smuzhiyun 		 * AP side if DSCP value is not in sync with L2 prio
424*4882a593Smuzhiyun 		 * then out of order packets are observed
425*4882a593Smuzhiyun 		 */
426*4882a593Smuzhiyun 		iph = (struct iphdr *)(pktdat + sizeof(struct ether_header));
427*4882a593Smuzhiyun 		iph->tos = DSCP_TOS_CS7;
428*4882a593Smuzhiyun 		/* re-compute ip hdr checksum
429*4882a593Smuzhiyun 		 * NOTE: this takes around 1us, need to profile more
430*4882a593Smuzhiyun 		 * accurately to get the number of cpu cycles it takes
431*4882a593Smuzhiyun 		 * in order to get a better idea of the impact of
432*4882a593Smuzhiyun 		 * re computing ip hdr chksum in data path
433*4882a593Smuzhiyun 		 */
434*4882a593Smuzhiyun 		ip_send_check(iph);
435*4882a593Smuzhiyun 	 }
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 	/* Mark the Flow as 'upgraded' */
438*4882a593Smuzhiyun 	if (sk_fl->cur_up_state == 0)
439*4882a593Smuzhiyun 		sk_fl->cur_up_state = 1;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	return BCME_OK;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun static inline int
__dhd_downgrade_sock_flow(dhd_info_t * dhd,struct dhd_sock_flow_info * sk_fl,struct sk_buff * skb)445*4882a593Smuzhiyun __dhd_downgrade_sock_flow(dhd_info_t *dhd,
446*4882a593Smuzhiyun 	struct dhd_sock_flow_info *sk_fl,
447*4882a593Smuzhiyun 	struct sk_buff *skb)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun 	dhd_sock_qos_info_t *psk_qos = dhd->psk_qos;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	if ((sk_fl->rcm_up_state == 0) && (sk_fl->cur_up_state == 1)) {
452*4882a593Smuzhiyun 		/* sanity check */
453*4882a593Smuzhiyun 		ASSERT(psk_qos->num_skfl_upgraded > 0);
454*4882a593Smuzhiyun 		if (psk_qos->num_skfl_upgraded <= 0) {
455*4882a593Smuzhiyun 			DHD_ERROR_RLMT(("%s(): FATAL ! no upgraded flows !\n",
456*4882a593Smuzhiyun 					__FUNCTION__));
457*4882a593Smuzhiyun 			return BCME_ERROR;
458*4882a593Smuzhiyun 		}
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 		if (psk_qos->num_skfl_upgraded == 1) {
461*4882a593Smuzhiyun 			/* if this is the
462*4882a593Smuzhiyun 			 * last flow to be downgraded,
463*4882a593Smuzhiyun 			 * then re-enable frameburst in FW.
464*4882a593Smuzhiyun 			 * The actual iovar to enable frameburst cannot
465*4882a593Smuzhiyun 			 * be fired here because Tx can happen in atomic context
466*4882a593Smuzhiyun 			 * and dhd_iovar can sleep due to proto_block lock being
467*4882a593Smuzhiyun 			 * held. Instead the flag is checked from
468*4882a593Smuzhiyun 			 * 'dhd_analyze_sock_flows' which execs in non-atomic context
469*4882a593Smuzhiyun 			 * and the iovar is fired from there
470*4882a593Smuzhiyun 			 */
471*4882a593Smuzhiyun 			DHD_TRACE(("%s: enable frameburst ..", __FUNCTION__));
472*4882a593Smuzhiyun 			psk_qos->upgrade_active = FALSE;
473*4882a593Smuzhiyun 		}
474*4882a593Smuzhiyun 		--psk_qos->num_skfl_upgraded;
475*4882a593Smuzhiyun 		DHD_ERROR_RLMT(("%s(): downgrade flow sk_fl %p,"
476*4882a593Smuzhiyun 			"num_skfl_upgraded:%d \r\n",
477*4882a593Smuzhiyun 			__FUNCTION__, sk_fl, psk_qos->num_skfl_upgraded));
478*4882a593Smuzhiyun 	}
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	/* Mark the Flow as 'downgraded' */
481*4882a593Smuzhiyun 	if (sk_fl->cur_up_state == 1)
482*4882a593Smuzhiyun 		sk_fl->cur_up_state = 0;
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	return BCME_OK;
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun /*
488*4882a593Smuzhiyun  * Update the stats of a Socket flow.
489*4882a593Smuzhiyun  * Create a new flow if need be.
490*4882a593Smuzhiyun  * If a socket flow has been recommended for upgrade, do so.
491*4882a593Smuzhiyun  */
492*4882a593Smuzhiyun void
dhd_update_sock_flows(dhd_info_t * dhd,struct sk_buff * skb)493*4882a593Smuzhiyun dhd_update_sock_flows(dhd_info_t *dhd, struct sk_buff *skb)
494*4882a593Smuzhiyun {
495*4882a593Smuzhiyun 	struct sock *sk = NULL;
496*4882a593Smuzhiyun 	unsigned long ino = 0;
497*4882a593Smuzhiyun 	struct dhd_sock_flow_info *sk_fl = NULL;
498*4882a593Smuzhiyun 	dhd_sock_qos_info_t *psk_qos = NULL;
499*4882a593Smuzhiyun 	unsigned long flags = 0;
500*4882a593Smuzhiyun 	uint8 prio;
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	BCM_REFERENCE(prio);
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	if ((dhd == NULL) || (skb == NULL)) {
505*4882a593Smuzhiyun 		DHD_ERROR_RLMT(("%s: Invalid args \n", __FUNCTION__));
506*4882a593Smuzhiyun 		return;
507*4882a593Smuzhiyun 	}
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	/* If the Feature is disabled, return */
510*4882a593Smuzhiyun 	if (dhd_sock_qos_get_status(dhd) == 0)
511*4882a593Smuzhiyun 		return;
512*4882a593Smuzhiyun 
513*4882a593Smuzhiyun 	psk_qos = dhd->psk_qos;
514*4882a593Smuzhiyun 	sk = (struct sock *)PKTSOCK(dhd->pub.osh, skb);
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	/* TODO:
517*4882a593Smuzhiyun 	 * Some times sk is NULL, what does that mean ...
518*4882a593Smuzhiyun 	 * is it a broadcast packet generated by Network Stack ????
519*4882a593Smuzhiyun 	 */
520*4882a593Smuzhiyun 	if (sk == NULL) {
521*4882a593Smuzhiyun 		return;
522*4882a593Smuzhiyun 	}
523*4882a593Smuzhiyun 	ino = sock_i_ino(sk);
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun 	/* TODO:
526*4882a593Smuzhiyun 	 * List Lock need not be held for allocating sock stream .. optimize
527*4882a593Smuzhiyun 	 */
528*4882a593Smuzhiyun 	SK_FL_LIST_LOCK(psk_qos->list_lock, flags);
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 	sk_fl = __dhd_find_sock_stream_info(psk_qos, ino);
531*4882a593Smuzhiyun 	if (sk_fl == NULL) {
532*4882a593Smuzhiyun 		/* Allocate new sock stream */
533*4882a593Smuzhiyun 		sk_fl = dhd_alloc_sock_stream_info(psk_qos);
534*4882a593Smuzhiyun 		if (sk_fl == NULL) {
535*4882a593Smuzhiyun 			SK_FL_LIST_UNLOCK(psk_qos->list_lock, flags);
536*4882a593Smuzhiyun 			goto done;
537*4882a593Smuzhiyun 		}
538*4882a593Smuzhiyun 		else {
539*4882a593Smuzhiyun 			/* SK Flow elements updated first time */
540*4882a593Smuzhiyun 			sk_fl->in_use = 1;
541*4882a593Smuzhiyun 			sk_fl->sk = sk;
542*4882a593Smuzhiyun 			sk_fl->ino = ino;
543*4882a593Smuzhiyun 			/* TODO: Seeing a Kernel Warning ... check */
544*4882a593Smuzhiyun 			/* sk_fl->uid = sock_i_uid(sk); */
545*4882a593Smuzhiyun 			sk_fl->cur_up_state = 0;
546*4882a593Smuzhiyun 			list_add_tail(&sk_fl->list, &psk_qos->sk_fl_list_head);
547*4882a593Smuzhiyun 			DHD_ERROR(("%s(): skb %p sk %p sk_fl %p ino %lu"
548*4882a593Smuzhiyun 				" prio 0x%x \r\n", __FUNCTION__, skb,
549*4882a593Smuzhiyun 				sk, sk_fl, ino, skb->priority));
550*4882a593Smuzhiyun 		} /* end of new sk flow allocation */
551*4882a593Smuzhiyun 	} /* end of case when sk flow is found */
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	sk_fl->stats.tx_pkts++;
554*4882a593Smuzhiyun 	sk_fl->stats.tx_bytes += skb->len;
555*4882a593Smuzhiyun 	sk_fl->last_pkt_ns = local_clock();
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	SK_FL_LIST_UNLOCK(psk_qos->list_lock, flags);
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	if (sk_fl->rcm_up_state == 1) {
560*4882a593Smuzhiyun 		__dhd_upgrade_sock_flow(dhd, sk_fl, skb);
561*4882a593Smuzhiyun 	} else {
562*4882a593Smuzhiyun 		__dhd_downgrade_sock_flow(dhd, sk_fl, skb);
563*4882a593Smuzhiyun 	}
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	prio = PKTPRIO(skb);
566*4882a593Smuzhiyun 	DHD_INFO(("%s(): skb:%p skb->priority 0x%x prio %d sk_fl %p\r\n", __FUNCTION__, skb,
567*4882a593Smuzhiyun 		skb->priority, prio, sk_fl));
568*4882a593Smuzhiyun done:
569*4882a593Smuzhiyun 	return;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun static int
dhd_change_frameburst_state(frameburst_state_t newstate,dhd_info_t * dhd)573*4882a593Smuzhiyun dhd_change_frameburst_state(frameburst_state_t newstate, dhd_info_t *dhd)
574*4882a593Smuzhiyun {
575*4882a593Smuzhiyun 	int ret = 0, val = 0;
576*4882a593Smuzhiyun 	dhd_sock_qos_info_t *psk_qos = NULL;
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 	if (!dhd)
579*4882a593Smuzhiyun 		return BCME_BADARG;
580*4882a593Smuzhiyun 	if (!dhd->psk_qos)
581*4882a593Smuzhiyun 		return BCME_BADARG;
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 	psk_qos = dhd->psk_qos;
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	/* Check with the cached frameburst state on host
586*4882a593Smuzhiyun 	 * instead of querying FW frameburst state.
587*4882a593Smuzhiyun 	 * This decision is taken, because firing an iovar everytime
588*4882a593Smuzhiyun 	 * to query FW frameburst state before deciding whether to
589*4882a593Smuzhiyun 	 * changing the frameburst state or not is sub-optimal,
590*4882a593Smuzhiyun 	 * especially in the Tx path.
591*4882a593Smuzhiyun 	 */
592*4882a593Smuzhiyun 	if (psk_qos->frmbrst_state == newstate)
593*4882a593Smuzhiyun 		return BCME_BADOPTION;
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	val = (newstate == FRMBRST_ENABLED) ? 1 : 0;
596*4882a593Smuzhiyun 	ret = dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_FAKEFRAG, (char *)&val,
597*4882a593Smuzhiyun 		sizeof(val), TRUE, 0);
598*4882a593Smuzhiyun 	if (ret != BCME_OK) {
599*4882a593Smuzhiyun 		DHD_ERROR_RLMT(("%s: set frameburst=%d failed,"
600*4882a593Smuzhiyun 			" err=%d\n", __FUNCTION__, val, ret));
601*4882a593Smuzhiyun 	} else {
602*4882a593Smuzhiyun 		/* change the state */
603*4882a593Smuzhiyun 		DHD_INFO(("%s: set frameburst=%d\n", __FUNCTION__, val));
604*4882a593Smuzhiyun 		psk_qos->frmbrst_state = newstate;
605*4882a593Smuzhiyun 	}
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	return ret;
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun 
dhd_analyze_sock_flows(dhd_info_t * dhd,uint32 watchdog_ms)610*4882a593Smuzhiyun void dhd_analyze_sock_flows(dhd_info_t *dhd, uint32 watchdog_ms)
611*4882a593Smuzhiyun {
612*4882a593Smuzhiyun 	struct dhd_sock_flow_info *sk_fl = NULL;
613*4882a593Smuzhiyun 	dhd_sock_qos_info_t *psk_qos = NULL;
614*4882a593Smuzhiyun 	unsigned long flags = 0;
615*4882a593Smuzhiyun 
616*4882a593Smuzhiyun 	if (dhd == NULL) {
617*4882a593Smuzhiyun 		DHD_ERROR_RLMT(("%s: Bad argument \r\n", __FUNCTION__));
618*4882a593Smuzhiyun 		return;
619*4882a593Smuzhiyun 	}
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 	/* Check whether the feature is disabled */
622*4882a593Smuzhiyun 	if (dhd_sock_qos_get_status(dhd) == 0)
623*4882a593Smuzhiyun 		return;
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	psk_qos = dhd->psk_qos;
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 	dhd_clean_idle_sock_streams(dhd->psk_qos);
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 	/* TODO: Plug in the QoS Algorithm here */
630*4882a593Smuzhiyun 	SK_FL_LIST_LOCK(psk_qos->list_lock, flags);
631*4882a593Smuzhiyun #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
632*4882a593Smuzhiyun #pragma GCC diagnostic push
633*4882a593Smuzhiyun #pragma GCC diagnostic ignored "-Wcast-qual"
634*4882a593Smuzhiyun #endif
635*4882a593Smuzhiyun 	list_for_each_entry(sk_fl, &psk_qos->sk_fl_list_head, list)  {
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 		sk_fl->rcm_up_state = dhd_qos_algo(dhd, &sk_fl->stats, &psk_qos->qos_params);
638*4882a593Smuzhiyun 
639*4882a593Smuzhiyun 		/* TODO: Handle downgrades */
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 		/* update sk_flow previous elements on every sampling interval */
642*4882a593Smuzhiyun 		sk_fl->stats.tx_pkts_prev = sk_fl->stats.tx_pkts;
643*4882a593Smuzhiyun 		sk_fl->stats.tx_bytes_prev = sk_fl->stats.tx_bytes;
644*4882a593Smuzhiyun 
645*4882a593Smuzhiyun 		/* TODO: Handle the condition where num_skfl_upgraded reaches the threshold */
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 		/* TODO: Handle the condition where we upgrade all the socket flows
648*4882a593Smuzhiyun 		 * of the uid on which one flow is detected to be upgraded.
649*4882a593Smuzhiyun 		 */
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	} /* end of list iteration */
652*4882a593Smuzhiyun #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
653*4882a593Smuzhiyun #pragma GCC diagnostic pop
654*4882a593Smuzhiyun #endif
655*4882a593Smuzhiyun 	SK_FL_LIST_UNLOCK(psk_qos->list_lock, flags);
656*4882a593Smuzhiyun 
657*4882a593Smuzhiyun 	/* disable frameburst in FW on the first flow upgraded */
658*4882a593Smuzhiyun 	if (psk_qos->upgrade_active) {
659*4882a593Smuzhiyun 		dhd_change_frameburst_state(FRMBRST_DISABLED, dhd);
660*4882a593Smuzhiyun 	} else {
661*4882a593Smuzhiyun 		/* if no upgraded flows remain, either after cleanup,
662*4882a593Smuzhiyun 		 * or after a downgrade,
663*4882a593Smuzhiyun 		 * then re-enable frameburst in FW
664*4882a593Smuzhiyun 		 */
665*4882a593Smuzhiyun 		dhd_change_frameburst_state(FRMBRST_ENABLED, dhd);
666*4882a593Smuzhiyun 	}
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	return;
669*4882a593Smuzhiyun }
670*4882a593Smuzhiyun 
dhd_sock_qos_update_bus_flowid(dhd_info_t * dhd,void * pktbuf,uint32 bus_flow_id)671*4882a593Smuzhiyun void dhd_sock_qos_update_bus_flowid(dhd_info_t *dhd, void *pktbuf,
672*4882a593Smuzhiyun 	uint32 bus_flow_id)
673*4882a593Smuzhiyun {
674*4882a593Smuzhiyun 	BCM_REFERENCE(dhd);
675*4882a593Smuzhiyun 	BCM_REFERENCE(pktbuf);
676*4882a593Smuzhiyun 	BCM_REFERENCE(bus_flow_id);
677*4882a593Smuzhiyun 	return;
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun /* ================= Sysfs interfce support functions ======================== */
681*4882a593Smuzhiyun 
dhd_sock_qos_get_status(dhd_info_t * dhd)682*4882a593Smuzhiyun unsigned long dhd_sock_qos_get_status(dhd_info_t *dhd)
683*4882a593Smuzhiyun {
684*4882a593Smuzhiyun 	if (dhd == NULL)
685*4882a593Smuzhiyun 		return 0;
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	return (atomic_read(&dhd->psk_qos->on_off));
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun 
dhd_sock_qos_set_status(dhd_info_t * dhd,unsigned long on_off)690*4882a593Smuzhiyun void dhd_sock_qos_set_status(dhd_info_t *dhd, unsigned long on_off)
691*4882a593Smuzhiyun {
692*4882a593Smuzhiyun 	if (dhd == NULL)
693*4882a593Smuzhiyun 		return;
694*4882a593Smuzhiyun 
695*4882a593Smuzhiyun 	atomic_set(&dhd->psk_qos->on_off, on_off);
696*4882a593Smuzhiyun 	if (on_off) {
697*4882a593Smuzhiyun 		dhd_watchdog_ms = QOS_SAMPLING_INTVL_MS;
698*4882a593Smuzhiyun 		/* enable watchdog to monitor the socket flows */
699*4882a593Smuzhiyun 		dhd_os_wd_timer(&dhd->pub, QOS_SAMPLING_INTVL_MS);
700*4882a593Smuzhiyun 	} else {
701*4882a593Smuzhiyun 		dhd_watchdog_ms = dhd->psk_qos->watchdog_ms;
702*4882a593Smuzhiyun 		/* disable watchdog or set it back to the original value */
703*4882a593Smuzhiyun 		dhd_os_wd_timer(&dhd->pub, dhd->psk_qos->watchdog_ms);
704*4882a593Smuzhiyun 	}
705*4882a593Smuzhiyun 	return;
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun 
dhd_sock_qos_show_stats(dhd_info_t * dhd,char * buf,ssize_t sz)708*4882a593Smuzhiyun ssize_t dhd_sock_qos_show_stats(dhd_info_t *dhd, char *buf,
709*4882a593Smuzhiyun 	ssize_t sz)
710*4882a593Smuzhiyun {
711*4882a593Smuzhiyun 	dhd_sock_qos_info_t *psk_qos = NULL;
712*4882a593Smuzhiyun 	struct dhd_sock_flow_info *sk_fl = NULL;
713*4882a593Smuzhiyun 	unsigned long flags = 0;
714*4882a593Smuzhiyun 	ssize_t	ret = 0;
715*4882a593Smuzhiyun 	char *p = buf;
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	/* TODO: Should be actual record length */
718*4882a593Smuzhiyun 	unsigned long rec_len = 100;
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun 	if (dhd == NULL)
721*4882a593Smuzhiyun 		return -1;
722*4882a593Smuzhiyun 
723*4882a593Smuzhiyun 	psk_qos = dhd->psk_qos;
724*4882a593Smuzhiyun 
725*4882a593Smuzhiyun 	ret += scnprintf(p, sz-ret-1, "\nino\t sk\t\t\t tx_pkts\t tx_bytes\t"
726*4882a593Smuzhiyun 		"last_pkt_ns\r\n");
727*4882a593Smuzhiyun 	p += ret;
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	SK_FL_LIST_LOCK(psk_qos->list_lock, flags);
730*4882a593Smuzhiyun #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
731*4882a593Smuzhiyun #pragma GCC diagnostic push
732*4882a593Smuzhiyun #pragma GCC diagnostic ignored "-Wcast-qual"
733*4882a593Smuzhiyun #endif
734*4882a593Smuzhiyun 	list_for_each_entry(sk_fl, &psk_qos->sk_fl_list_head, list)  {
735*4882a593Smuzhiyun 		/* Protect the buffer from over run */
736*4882a593Smuzhiyun 		if (ret + rec_len >= sz)
737*4882a593Smuzhiyun 			break;
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 		ret += scnprintf(p, sz-ret-1, "%lu\t %p\t %lu\t %lu\t %llu\t \r\n",
740*4882a593Smuzhiyun 			sk_fl->ino, sk_fl->sk, sk_fl->stats.tx_pkts, sk_fl->stats.tx_bytes,
741*4882a593Smuzhiyun 			sk_fl->last_pkt_ns);
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 		p += ret;
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	}
746*4882a593Smuzhiyun #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
747*4882a593Smuzhiyun #pragma GCC diagnostic pop
748*4882a593Smuzhiyun #endif
749*4882a593Smuzhiyun 	SK_FL_LIST_UNLOCK(psk_qos->list_lock, flags);
750*4882a593Smuzhiyun 
751*4882a593Smuzhiyun 	return ret + 1;
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun 
dhd_sock_qos_clear_stats(dhd_info_t * dhd)754*4882a593Smuzhiyun void dhd_sock_qos_clear_stats(dhd_info_t *dhd)
755*4882a593Smuzhiyun {
756*4882a593Smuzhiyun 	dhd_sock_qos_info_t *psk_qos = NULL;
757*4882a593Smuzhiyun 	struct dhd_sock_flow_info *sk_fl = NULL;
758*4882a593Smuzhiyun 	unsigned long flags = 0;
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	if (dhd == NULL)
761*4882a593Smuzhiyun 		return;
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	psk_qos = dhd->psk_qos;
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 	SK_FL_LIST_LOCK(psk_qos->list_lock, flags);
766*4882a593Smuzhiyun #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
767*4882a593Smuzhiyun #pragma GCC diagnostic push
768*4882a593Smuzhiyun #pragma GCC diagnostic ignored "-Wcast-qual"
769*4882a593Smuzhiyun #endif
770*4882a593Smuzhiyun 	list_for_each_entry(sk_fl, &psk_qos->sk_fl_list_head, list)  {
771*4882a593Smuzhiyun 		sk_fl->stats.tx_pkts = 0;
772*4882a593Smuzhiyun 		sk_fl->stats.tx_bytes = 0;
773*4882a593Smuzhiyun 		sk_fl->stats.tx_pkts_prev = 0;
774*4882a593Smuzhiyun 		sk_fl->stats.tx_bytes_prev = 0;
775*4882a593Smuzhiyun 		sk_fl->last_pkt_ns = 0;
776*4882a593Smuzhiyun 	}
777*4882a593Smuzhiyun #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
778*4882a593Smuzhiyun #pragma GCC diagnostic pop
779*4882a593Smuzhiyun #endif
780*4882a593Smuzhiyun 	SK_FL_LIST_UNLOCK(psk_qos->list_lock, flags);
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	return;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun 
dhd_sock_qos_get_force_upgrade(dhd_info_t * dhd)785*4882a593Smuzhiyun unsigned long dhd_sock_qos_get_force_upgrade(dhd_info_t *dhd)
786*4882a593Smuzhiyun {
787*4882a593Smuzhiyun 	if (dhd == NULL)
788*4882a593Smuzhiyun 		return 0;
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun 	return (atomic_read(&dhd->psk_qos->force_upgrade));
791*4882a593Smuzhiyun }
792*4882a593Smuzhiyun 
dhd_sock_qos_set_force_upgrade(dhd_info_t * dhd,unsigned long force_upgrade)793*4882a593Smuzhiyun void dhd_sock_qos_set_force_upgrade(dhd_info_t *dhd, unsigned long force_upgrade)
794*4882a593Smuzhiyun {
795*4882a593Smuzhiyun 	if (dhd == NULL)
796*4882a593Smuzhiyun 		return;
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 	atomic_set(&dhd->psk_qos->force_upgrade, force_upgrade);
799*4882a593Smuzhiyun 	return;
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun 
dhd_sock_qos_get_numfl_upgrd_thresh(dhd_info_t * dhd)802*4882a593Smuzhiyun int dhd_sock_qos_get_numfl_upgrd_thresh(dhd_info_t *dhd)
803*4882a593Smuzhiyun {
804*4882a593Smuzhiyun 	if (dhd == NULL)
805*4882a593Smuzhiyun 		return 0;
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	return dhd->psk_qos->skfl_upgrade_thresh;
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun 
dhd_sock_qos_set_numfl_upgrd_thresh(dhd_info_t * dhd,int upgrade_thresh)810*4882a593Smuzhiyun void dhd_sock_qos_set_numfl_upgrd_thresh(dhd_info_t *dhd,
811*4882a593Smuzhiyun 		int upgrade_thresh)
812*4882a593Smuzhiyun {
813*4882a593Smuzhiyun 	if (dhd == NULL)
814*4882a593Smuzhiyun 		return;
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	dhd->psk_qos->skfl_upgrade_thresh = upgrade_thresh;
817*4882a593Smuzhiyun 	return;
818*4882a593Smuzhiyun }
819*4882a593Smuzhiyun 
dhd_sock_qos_get_avgpktsize_thresh(dhd_info_t * dhd,unsigned long * avgpktsize_low,unsigned long * avgpktsize_high)820*4882a593Smuzhiyun void dhd_sock_qos_get_avgpktsize_thresh(dhd_info_t *dhd,
821*4882a593Smuzhiyun 		unsigned long *avgpktsize_low,
822*4882a593Smuzhiyun 		unsigned long *avgpktsize_high)
823*4882a593Smuzhiyun {
824*4882a593Smuzhiyun 	qos_algo_params_t *pqos_params = NULL;
825*4882a593Smuzhiyun 
826*4882a593Smuzhiyun 	if (dhd == NULL || avgpktsize_low == NULL ||
827*4882a593Smuzhiyun 		avgpktsize_high == NULL) {
828*4882a593Smuzhiyun 		return;
829*4882a593Smuzhiyun 	}
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	pqos_params = QOS_PARAMS(dhd);
832*4882a593Smuzhiyun 	*avgpktsize_low = pqos_params->avg_pkt_size_low_thresh;
833*4882a593Smuzhiyun 	*avgpktsize_high = pqos_params->avg_pkt_size_high_thresh;
834*4882a593Smuzhiyun 	return;
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun 
dhd_sock_qos_set_avgpktsize_thresh(dhd_info_t * dhd,unsigned long avgpktsize_low,unsigned long avgpktsize_high)837*4882a593Smuzhiyun void dhd_sock_qos_set_avgpktsize_thresh(dhd_info_t *dhd,
838*4882a593Smuzhiyun 		unsigned long avgpktsize_low,
839*4882a593Smuzhiyun 		unsigned long avgpktsize_high)
840*4882a593Smuzhiyun {
841*4882a593Smuzhiyun 	qos_algo_params_t *pqos_params = NULL;
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun 	if (dhd == NULL)
844*4882a593Smuzhiyun 		return;
845*4882a593Smuzhiyun 
846*4882a593Smuzhiyun 	pqos_params = QOS_PARAMS(dhd);
847*4882a593Smuzhiyun 	pqos_params->avg_pkt_size_low_thresh = avgpktsize_low;
848*4882a593Smuzhiyun 	pqos_params->avg_pkt_size_high_thresh = avgpktsize_high;
849*4882a593Smuzhiyun 	return;
850*4882a593Smuzhiyun }
851*4882a593Smuzhiyun 
dhd_sock_qos_get_numpkts_thresh(dhd_info_t * dhd,unsigned long * numpkts_low,unsigned long * numpkts_high)852*4882a593Smuzhiyun void dhd_sock_qos_get_numpkts_thresh(dhd_info_t *dhd,
853*4882a593Smuzhiyun 		unsigned long *numpkts_low,
854*4882a593Smuzhiyun 		unsigned long *numpkts_high)
855*4882a593Smuzhiyun {
856*4882a593Smuzhiyun 	qos_algo_params_t *pqos_params = NULL;
857*4882a593Smuzhiyun 
858*4882a593Smuzhiyun 	if (dhd == NULL || numpkts_low == NULL ||
859*4882a593Smuzhiyun 		numpkts_high == NULL) {
860*4882a593Smuzhiyun 		return;
861*4882a593Smuzhiyun 	}
862*4882a593Smuzhiyun 
863*4882a593Smuzhiyun 	pqos_params = QOS_PARAMS(dhd);
864*4882a593Smuzhiyun 	*numpkts_low = pqos_params->num_pkts_low_thresh;
865*4882a593Smuzhiyun 	*numpkts_high = pqos_params->num_pkts_high_thresh;
866*4882a593Smuzhiyun }
867*4882a593Smuzhiyun 
dhd_sock_qos_set_numpkts_thresh(dhd_info_t * dhd,unsigned long numpkts_low,unsigned long numpkts_high)868*4882a593Smuzhiyun void dhd_sock_qos_set_numpkts_thresh(dhd_info_t *dhd,
869*4882a593Smuzhiyun 		unsigned long numpkts_low,
870*4882a593Smuzhiyun 		unsigned long numpkts_high)
871*4882a593Smuzhiyun {
872*4882a593Smuzhiyun 	qos_algo_params_t *pqos_params = NULL;
873*4882a593Smuzhiyun 
874*4882a593Smuzhiyun 	if (dhd == NULL)
875*4882a593Smuzhiyun 		return;
876*4882a593Smuzhiyun 	pqos_params = QOS_PARAMS(dhd);
877*4882a593Smuzhiyun 	pqos_params->num_pkts_low_thresh = numpkts_low;
878*4882a593Smuzhiyun 	pqos_params->num_pkts_high_thresh = numpkts_high;
879*4882a593Smuzhiyun 	return;
880*4882a593Smuzhiyun }
881*4882a593Smuzhiyun 
dhd_sock_qos_get_detectcnt_thresh(dhd_info_t * dhd,unsigned char * detectcnt_inc,unsigned char * detectcnt_dec)882*4882a593Smuzhiyun void dhd_sock_qos_get_detectcnt_thresh(dhd_info_t *dhd,
883*4882a593Smuzhiyun 		unsigned char *detectcnt_inc,
884*4882a593Smuzhiyun 		unsigned char *detectcnt_dec)
885*4882a593Smuzhiyun {
886*4882a593Smuzhiyun 	qos_algo_params_t *pqos_params = NULL;
887*4882a593Smuzhiyun 
888*4882a593Smuzhiyun 	if (dhd == NULL || detectcnt_inc == NULL ||
889*4882a593Smuzhiyun 		detectcnt_dec == NULL) {
890*4882a593Smuzhiyun 		return;
891*4882a593Smuzhiyun 	}
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 	pqos_params = QOS_PARAMS(dhd);
894*4882a593Smuzhiyun 	*detectcnt_inc = pqos_params->detect_cnt_inc_thresh;
895*4882a593Smuzhiyun 	*detectcnt_dec = pqos_params->detect_cnt_dec_thresh;
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun 
dhd_sock_qos_set_detectcnt_thresh(dhd_info_t * dhd,unsigned char detectcnt_inc,unsigned char detectcnt_dec)898*4882a593Smuzhiyun void dhd_sock_qos_set_detectcnt_thresh(dhd_info_t *dhd,
899*4882a593Smuzhiyun 		unsigned char detectcnt_inc,
900*4882a593Smuzhiyun 		unsigned char detectcnt_dec)
901*4882a593Smuzhiyun {
902*4882a593Smuzhiyun 	qos_algo_params_t *pqos_params = NULL;
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	if (dhd == NULL)
905*4882a593Smuzhiyun 		return;
906*4882a593Smuzhiyun 
907*4882a593Smuzhiyun 	pqos_params = QOS_PARAMS(dhd);
908*4882a593Smuzhiyun 	pqos_params->detect_cnt_inc_thresh = detectcnt_inc;
909*4882a593Smuzhiyun 	pqos_params->detect_cnt_dec_thresh = detectcnt_dec;
910*4882a593Smuzhiyun 	return;
911*4882a593Smuzhiyun }
912*4882a593Smuzhiyun 
dhd_sock_qos_get_detectcnt_upgrd_thresh(dhd_info_t * dhd)913*4882a593Smuzhiyun int dhd_sock_qos_get_detectcnt_upgrd_thresh(dhd_info_t *dhd)
914*4882a593Smuzhiyun {
915*4882a593Smuzhiyun 	qos_algo_params_t *pqos_params = NULL;
916*4882a593Smuzhiyun 
917*4882a593Smuzhiyun 	if (dhd == NULL)
918*4882a593Smuzhiyun 		return 0;
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun 	pqos_params = QOS_PARAMS(dhd);
921*4882a593Smuzhiyun 	return pqos_params->detect_cnt_upgrade_thresh;
922*4882a593Smuzhiyun }
923*4882a593Smuzhiyun 
dhd_sock_qos_set_detectcnt_upgrd_thresh(dhd_info_t * dhd,unsigned char detect_upgrd_thresh)924*4882a593Smuzhiyun void dhd_sock_qos_set_detectcnt_upgrd_thresh(dhd_info_t *dhd,
925*4882a593Smuzhiyun 		unsigned char detect_upgrd_thresh)
926*4882a593Smuzhiyun {
927*4882a593Smuzhiyun 	qos_algo_params_t *pqos_params = NULL;
928*4882a593Smuzhiyun 
929*4882a593Smuzhiyun 	if (dhd == NULL)
930*4882a593Smuzhiyun 		return;
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	pqos_params = QOS_PARAMS(dhd);
933*4882a593Smuzhiyun 	pqos_params->detect_cnt_upgrade_thresh = detect_upgrd_thresh;
934*4882a593Smuzhiyun }
935*4882a593Smuzhiyun 
dhd_sock_qos_get_maxfl(dhd_info_t * dhd)936*4882a593Smuzhiyun int dhd_sock_qos_get_maxfl(dhd_info_t *dhd)
937*4882a593Smuzhiyun {
938*4882a593Smuzhiyun 	if (dhd == NULL)
939*4882a593Smuzhiyun 		return 0;
940*4882a593Smuzhiyun 
941*4882a593Smuzhiyun 	return dhd->psk_qos->max_sock_fl;
942*4882a593Smuzhiyun }
943*4882a593Smuzhiyun 
dhd_sock_qos_set_maxfl(dhd_info_t * dhd,unsigned int maxfl)944*4882a593Smuzhiyun void dhd_sock_qos_set_maxfl(dhd_info_t *dhd,
945*4882a593Smuzhiyun 		unsigned int maxfl)
946*4882a593Smuzhiyun {
947*4882a593Smuzhiyun 	if (dhd == NULL)
948*4882a593Smuzhiyun 		return;
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun 	dhd->psk_qos->max_sock_fl = maxfl;
951*4882a593Smuzhiyun }
952*4882a593Smuzhiyun /* ================= End of Sysfs interfce support functions ======================== */
953*4882a593Smuzhiyun 
954*4882a593Smuzhiyun /* ================= QOS Algorithm ======================== */
955*4882a593Smuzhiyun 
956*4882a593Smuzhiyun /*
957*4882a593Smuzhiyun  * Operates on a flow and returns 1 for upgrade and 0 for
958*4882a593Smuzhiyun  * no up-grade - Has the potential of moving into a separate file
959*4882a593Smuzhiyun  * Takes the dhd pointer too in case if it has to access any platform
960*4882a593Smuzhiyun  * functions like MALLOC that takes dhd->pub.osh as argument.
961*4882a593Smuzhiyun  */
dhd_qos_algo(dhd_info_t * dhd,qos_stat_t * qos,qos_algo_params_t * pqos_params)962*4882a593Smuzhiyun int dhd_qos_algo(dhd_info_t *dhd, qos_stat_t *qos, qos_algo_params_t *pqos_params)
963*4882a593Smuzhiyun {
964*4882a593Smuzhiyun 	unsigned long tx_bytes, tx_pkts, tx_avg_pkt_size;
965*4882a593Smuzhiyun 
966*4882a593Smuzhiyun 	if (!dhd || !qos || !pqos_params) {
967*4882a593Smuzhiyun 		return 0;
968*4882a593Smuzhiyun 	}
969*4882a593Smuzhiyun 
970*4882a593Smuzhiyun 	/* if the user has set the sysfs variable to force upgrade */
971*4882a593Smuzhiyun 	if (atomic_read(&dhd->psk_qos->force_upgrade) == 1) {
972*4882a593Smuzhiyun 		return 1;
973*4882a593Smuzhiyun 	}
974*4882a593Smuzhiyun 
975*4882a593Smuzhiyun 	DHD_TRACE(("%s(): avgpktsize_thrsh %lu:%lu; "
976*4882a593Smuzhiyun 		"numpkts_thrs %lu:%lu; detectcnt_thrs %d:%d;"
977*4882a593Smuzhiyun 		" detectcnt_upgrd_thrs %d\n", __FUNCTION__,
978*4882a593Smuzhiyun 		pqos_params->avg_pkt_size_low_thresh,
979*4882a593Smuzhiyun 		pqos_params->avg_pkt_size_high_thresh,
980*4882a593Smuzhiyun 		pqos_params->num_pkts_low_thresh,
981*4882a593Smuzhiyun 		pqos_params->num_pkts_high_thresh,
982*4882a593Smuzhiyun 		pqos_params->detect_cnt_inc_thresh,
983*4882a593Smuzhiyun 		pqos_params->detect_cnt_dec_thresh,
984*4882a593Smuzhiyun 		pqos_params->detect_cnt_upgrade_thresh));
985*4882a593Smuzhiyun 
986*4882a593Smuzhiyun 	tx_bytes = qos->tx_bytes - qos->tx_bytes_prev;
987*4882a593Smuzhiyun 	tx_pkts = qos->tx_pkts - qos->tx_pkts_prev;
988*4882a593Smuzhiyun 	if ((tx_bytes == 0) || (tx_pkts == 0)) {
989*4882a593Smuzhiyun 		return 0;
990*4882a593Smuzhiyun 	}
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun 	tx_avg_pkt_size = tx_bytes / tx_pkts;
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun 	if ((tx_avg_pkt_size > pqos_params->avg_pkt_size_low_thresh) &&
995*4882a593Smuzhiyun 		(tx_avg_pkt_size < pqos_params->avg_pkt_size_high_thresh) &&
996*4882a593Smuzhiyun 		(tx_pkts > pqos_params->num_pkts_low_thresh) &&
997*4882a593Smuzhiyun 		(tx_pkts < pqos_params->num_pkts_high_thresh)) {
998*4882a593Smuzhiyun 		if (qos->lowlat_detect_count < pqos_params->detect_cnt_inc_thresh) {
999*4882a593Smuzhiyun 			qos->lowlat_detect_count++;
1000*4882a593Smuzhiyun 		}
1001*4882a593Smuzhiyun 	} else if (qos->lowlat_detect_count > pqos_params->detect_cnt_dec_thresh) {
1002*4882a593Smuzhiyun 		qos->lowlat_detect_count--;
1003*4882a593Smuzhiyun 	}
1004*4882a593Smuzhiyun 
1005*4882a593Smuzhiyun 	if (qos->lowlat_detect_count > pqos_params->detect_cnt_upgrade_thresh) {
1006*4882a593Smuzhiyun 		qos->lowlat_flow = TRUE;
1007*4882a593Smuzhiyun 	} else if (qos->lowlat_detect_count == 0) {
1008*4882a593Smuzhiyun 		qos->lowlat_flow = FALSE;
1009*4882a593Smuzhiyun 	}
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun 	DHD_TRACE(("%s(): TX:%lu:%lu:%lu, PUBG:%d::%d\n",
1012*4882a593Smuzhiyun 		__FUNCTION__, tx_avg_pkt_size, tx_bytes, tx_pkts,
1013*4882a593Smuzhiyun 		qos->lowlat_detect_count, qos->lowlat_flow));
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun 	return (qos->lowlat_flow == TRUE) ? 1 : 0;
1016*4882a593Smuzhiyun }
1017*4882a593Smuzhiyun 
qos_algo_params_init(qos_algo_params_t * pqos_params)1018*4882a593Smuzhiyun int qos_algo_params_init(qos_algo_params_t *pqos_params)
1019*4882a593Smuzhiyun {
1020*4882a593Smuzhiyun 	if (!pqos_params)
1021*4882a593Smuzhiyun 		return BCME_BADARG;
1022*4882a593Smuzhiyun 
1023*4882a593Smuzhiyun 	memset(pqos_params, 0, sizeof(*pqos_params));
1024*4882a593Smuzhiyun 	pqos_params->avg_pkt_size_low_thresh = LOWLAT_AVG_PKT_SIZE_LOW;
1025*4882a593Smuzhiyun 	pqos_params->avg_pkt_size_high_thresh = LOWLAT_AVG_PKT_SIZE_HIGH;
1026*4882a593Smuzhiyun 	pqos_params->num_pkts_low_thresh = LOWLAT_NUM_PKTS_LOW;
1027*4882a593Smuzhiyun 	pqos_params->num_pkts_high_thresh = LOWLAT_NUM_PKTS_HIGH;
1028*4882a593Smuzhiyun 	pqos_params->detect_cnt_inc_thresh = LOWLAT_DETECT_CNT_INC_THRESH;
1029*4882a593Smuzhiyun 	pqos_params->detect_cnt_dec_thresh = LOWLAT_DETECT_CNT_DEC_THRESH;
1030*4882a593Smuzhiyun 	pqos_params->detect_cnt_upgrade_thresh = LOWLAT_DETECT_CNT_UPGRADE_THRESH;
1031*4882a593Smuzhiyun 
1032*4882a593Smuzhiyun 	return BCME_OK;
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun /* ================= End of QOS Algorithm ======================== */
1035