1 /*
2 * Source file for DHD QOS on Socket Flow.
3 *
4 * Defines a socket flow and maintains a table of socket flows
5 * for further analysis in order to upgrade the QOS of the flow.
6
7 * Copyright (C) 2020, Broadcom.
8 *
9 * Unless you and Broadcom execute a separate written software license
10 * agreement governing use of this software, this software is licensed to you
11 * under the terms of the GNU General Public License version 2 (the "GPL"),
12 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
13 * following added to such license:
14 *
15 * As a special exception, the copyright holders of this software give you
16 * permission to link this software with independent modules, and to copy and
17 * distribute the resulting executable under terms of your choice, provided that
18 * you also meet, for each linked independent module, the terms and conditions of
19 * the license of that module. An independent module is a module which is not
20 * derived from this software. The special exception does not apply to any
21 * modifications of the software.
22 *
23 *
24 * <<Broadcom-WL-IPTag/Open:>>
25 *
26 * $Id$
27 *
28 */
29
30 #include <dhd_linux_priv.h>
31 #include <dhd_dbg.h>
32 #include <bcmstdlib_s.h>
33 #include <bcmendian.h>
34 #include <dhd_linux_sock_qos.h>
35 #include <dhd_qos_algo.h>
36 #include <dhd.h>
37
38 #include <net/sock.h>
39 #include <linux/sock_diag.h>
40 #include <linux/netlink.h>
41 #include <linux/list.h>
42 #include <linux/sched.h>
43 #include <linux/math64.h>
44 #include <linux/pkt_sched.h>
45 #include <linux_pkt.h>
46 #include <net/tcp.h>
47
48 /* Maximum number of Socket Flows supported */
49 #define MAX_SOCK_FLOW (1024UL)
50
51 #define SOCK_FLOW_UPGRADE_THRESHOLD (3)
52 /*
53 * Mark a Socket Flow as inactive and free the resources
54 * if there is no packet receied for SOCK_IDLE_THREASHOLD_MS
55 * of time. Note that this parameter is in milli seconds.
56 */
57 #define SOCK_IDLE_THRESHOLD_MS (2000UL)
58
59 #define DSCP_TOS_CS7 0XE0u
60
61 extern uint dhd_watchdog_ms;
62
63 /* Defines Socket Flow */
64 struct dhd_sock_flow_info
65 {
66 /* Unique identifiers */
67 struct sock *sk;
68 unsigned long ino;
69
70 /* statistics */
71 qos_stat_t stats;
72 u64 last_pkt_ns;
73 kuid_t uid;
74
75 /* Elements related to upgrade management */
76
77 /* 0 - No upgrade
78 * 1 - Upgrade
79 */
80 unsigned int cur_up_state;
81 unsigned int rcm_up_state;
82 unsigned int bus_flow_id;
83
84 /* TODO:
85 * Handling Out Of Order during upgrade
86 * Once an upgrade is decided we cannot handover the skb to
87 * FW in the upgraded Flow Ring ... it will create Out of Order Packets.
88 * Instead we can have a output_q per socket flow. Once the upgrade is
89 * decided, we can start adding skbs to the output_q. The last 'skb' given
90 * to the actual Flow ring should be remembered in 'last_skb_orig_fl'.
91 * Once we get a Tx completion for last_skb_orig_fl we can flush the
92 * contents of output_q to the 'upgraded flowring'. In this solution,
93 * we should also handle the case where output_q hits the watermark
94 * before the completion for 'last_skb_orig_fl' is received. If this condition
95 * happens, not to worry about OOO and flush the contents of output_q.
96 * Probably the last_skb_orig_fl is not sent out due latency in the
97 * existing flow ... the actual problem we are trying to solve.
98 */
99
100 /* Management elements */
101 struct list_head list;
102 unsigned int in_use;
103 };
104
105 typedef enum _frameburst_state
106 {
107 FRMBRST_DISABLED = 0,
108 FRMBRST_ENABLED = 1
109 } frameburst_state_t;
110
111 /* Sock QOS Module Structure */
112 typedef struct dhd_sock_qos_info
113 {
114 /* Table of Socket Flows */
115 struct dhd_sock_flow_info *sk_fl;
116 /* maximum number for socket flows supported */
117 uint32 max_sock_fl;
118
119 /* TODO: need to make it per flow later on */
120 /* global qos algo parameters */
121 qos_algo_params_t qos_params;
122 /* List in which active Socket Flows live */
123 struct list_head sk_fl_list_head;
124 void *list_lock;
125
126 /* Time interval a socket flow resource is moved out of the active list */
127 uint32 sock_idle_thresh;
128 /*
129 * Keep track of number of flows upgraded.
130 * If it reaches a threshold we should stop ugrading
131 * This is to avoid the problem where we overwhelm
132 * the Dongle with upgraded traffic.
133 */
134 int num_skfl_upgraded;
135 int skfl_upgrade_thresh;
136
137 /* flag that is set to true when the first flow is upgraded
138 * so that FW frameburst is disabled, and set to false
139 * when no more flows are in upgraded state, so that
140 * FW frameburst is re-enabled
141 */
142 bool upgrade_active;
143 /* fw frameburst state */
144 frameburst_state_t frmbrst_state;
145
146 atomic_t on_off;
147 atomic_t force_upgrade;
148
149 /* required for enabling/disabling watchdog timer at runtime */
150 uint watchdog_ms;
151 } dhd_sock_qos_info_t;
152
153 #define SK_FL_LIST_LOCK(lock, flags) (flags) = osl_spin_lock(lock)
154 #define SK_FL_LIST_UNLOCK(lock, flags) osl_spin_unlock((lock), (flags))
155
156 int
dhd_init_sock_flows_buf(dhd_info_t * dhd,uint watchdog_ms)157 dhd_init_sock_flows_buf(dhd_info_t *dhd, uint watchdog_ms)
158 {
159 unsigned long sz;
160 unsigned int i;
161 struct dhd_sock_flow_info *sk_fl = NULL;
162 int val = 0, ret = 0;
163
164 if (dhd == NULL)
165 return BCME_BADARG;
166
167 dhd->psk_qos = MALLOCZ(dhd->pub.osh, sizeof(dhd_sock_qos_info_t));
168 if (dhd->psk_qos == NULL) {
169 DHD_ERROR(("%s(): Failed to alloc psk_qos ! \n", __FUNCTION__));
170 return BCME_NOMEM;
171 }
172 dhd->psk_qos->max_sock_fl = MAX_SOCK_FLOW;
173 sz = sizeof(struct dhd_sock_flow_info) * MAX_SOCK_FLOW;
174 dhd->psk_qos->sk_fl = MALLOCZ(dhd->pub.osh, sz);
175 if (dhd->psk_qos->sk_fl == NULL) {
176 DHD_ERROR(("%s(): Failed to allocated sk_fl \r\n", __FUNCTION__));
177 return BCME_NOMEM;
178 }
179
180 sk_fl = dhd->psk_qos->sk_fl;
181 for (i = 0; i < MAX_SOCK_FLOW; i++, sk_fl++) {
182 sk_fl->in_use = 0;
183 }
184
185 dhd->psk_qos->sock_idle_thresh = SOCK_IDLE_THRESHOLD_MS;
186
187 dhd->psk_qos->skfl_upgrade_thresh = SOCK_FLOW_UPGRADE_THRESHOLD;
188
189 INIT_LIST_HEAD(&dhd->psk_qos->sk_fl_list_head);
190 dhd->psk_qos->list_lock = osl_spin_lock_init(dhd->pub.osh);
191
192 dhd->psk_qos->watchdog_ms = watchdog_ms;
193 /* feature is DISABLED by default */
194 dhd_sock_qos_set_status(dhd, 0);
195
196 qos_algo_params_init(&dhd->psk_qos->qos_params);
197
198 dhd->psk_qos->frmbrst_state = FRMBRST_ENABLED;
199 /* read the initial state of frameburst from FW, cannot
200 * assume that it will always be in enabled state by default.
201 * We will cache the FW frameburst state in host and change
202 * it everytime we change it from host during QoS upgrade.
203 * This decision is taken, because firing an iovar everytime
204 * to query FW frameburst state before deciding whether to
205 * changing the frameburst state or not from host, is sub-optimal,
206 * especially in the Tx path.
207 */
208 ret = dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_FAKEFRAG, (char *)&val,
209 sizeof(val), FALSE, 0);
210 if (ret != BCME_OK) {
211 DHD_ERROR(("%s: get fw frameburst failed,"
212 " err=%d\n", __FUNCTION__, ret));
213 } else {
214 DHD_INFO(("%s:fw frameburst = %d", __FUNCTION__, val));
215 dhd->psk_qos->frmbrst_state =
216 (val == 1) ? FRMBRST_ENABLED : FRMBRST_DISABLED;
217 }
218 return BCME_OK;
219 }
220
221 int
dhd_deinit_sock_flows_buf(dhd_info_t * dhd)222 dhd_deinit_sock_flows_buf(dhd_info_t *dhd)
223 {
224 if (dhd == NULL)
225 return BCME_BADARG;
226
227 if (dhd->psk_qos->sk_fl) {
228 MFREE(dhd->pub.osh, dhd->psk_qos->sk_fl,
229 sizeof(struct dhd_sock_flow_info) * MAX_SOCK_FLOW);
230 dhd->psk_qos->sk_fl = NULL;
231 }
232
233 osl_spin_lock_deinit(dhd->pub.osh, dhd->psk_qos->list_lock);
234 MFREE(dhd->pub.osh, dhd->psk_qos, sizeof(dhd_sock_qos_info_t));
235 dhd->psk_qos = NULL;
236
237 return BCME_OK;
238 }
239
240 /* Caller should hold list_lock */
241 static inline struct dhd_sock_flow_info *
__dhd_find_sock_stream_info(dhd_sock_qos_info_t * psk_qos,unsigned long ino)242 __dhd_find_sock_stream_info(dhd_sock_qos_info_t *psk_qos, unsigned long ino)
243 {
244 struct dhd_sock_flow_info *sk_fl = NULL;
245 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
246 #pragma GCC diagnostic push
247 #pragma GCC diagnostic ignored "-Wcast-qual"
248 #endif
249 list_for_each_entry(sk_fl, &psk_qos->sk_fl_list_head,
250 list) {
251 if (sk_fl && (sk_fl->ino == ino)) {
252 return sk_fl;
253 }
254 } /* end of list iteration */
255 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
256 #pragma GCC diagnostic pop
257 #endif
258 /* If control comes here, the ino is not found */
259 DHD_INFO(("%s(): ino:%lu not found \r\n", __FUNCTION__, ino));
260
261 return NULL;
262 }
263
264 static struct dhd_sock_flow_info *
dhd_alloc_sock_stream_info(dhd_sock_qos_info_t * psk_qos)265 dhd_alloc_sock_stream_info(dhd_sock_qos_info_t *psk_qos)
266 {
267 struct dhd_sock_flow_info *sk_fl = psk_qos->sk_fl;
268 int i;
269
270 for (i = 0; i < psk_qos->max_sock_fl; i++, sk_fl++) {
271 if (sk_fl->in_use == 0) {
272 DHD_ERROR(("%s: Use sk_fl %p \r\n", __FUNCTION__, sk_fl));
273 return sk_fl;
274 }
275 }
276 DHD_INFO(("No Free Socket Stream info \r\n"));
277 return NULL;
278 }
279
280 /* Caller should hold list_lock */
281 static inline void
__dhd_free_sock_stream_info(dhd_sock_qos_info_t * psk_qos,struct dhd_sock_flow_info * sk_fl)282 __dhd_free_sock_stream_info(dhd_sock_qos_info_t *psk_qos,
283 struct dhd_sock_flow_info *sk_fl)
284 {
285 /*
286 * If the socket flow getting freed is an upgraded socket flow,
287 * we can upgrade one more flow.
288 */
289 if (sk_fl->cur_up_state == 1) {
290 --psk_qos->num_skfl_upgraded;
291 ASSERT(psk_qos->num_skfl_upgraded >= 0);
292 }
293
294 /* Remove the flow from active list */
295 list_del(&sk_fl->list);
296
297 DHD_ERROR(("%s(): Cleaning Socket Flow ino:%lu psk_qos->num_skfl_upgraded=%d\r\n",
298 __FUNCTION__, sk_fl->ino, psk_qos->num_skfl_upgraded));
299
300 /* Clear its content */
301 memset_s(sk_fl, sizeof(*sk_fl), 0, sizeof(*sk_fl));
302
303 return;
304 }
305
306 static void
dhd_clean_idle_sock_streams(dhd_sock_qos_info_t * psk_qos)307 dhd_clean_idle_sock_streams(dhd_sock_qos_info_t *psk_qos)
308 {
309 struct dhd_sock_flow_info *sk_fl = NULL, *next = NULL;
310 u64 now;
311 u64 diff;
312 unsigned long flags = 0;
313 now = local_clock();
314
315 SK_FL_LIST_LOCK(psk_qos->list_lock, flags);
316
317 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
318 #pragma GCC diagnostic push
319 #pragma GCC diagnostic ignored "-Wcast-qual"
320 #endif
321 list_for_each_entry_safe(sk_fl, next, &psk_qos->sk_fl_list_head, list) {
322 if (sk_fl) {
323
324 if (sk_fl->in_use == 0) {
325 DHD_ERROR_RLMT(("%s:Something wrong,"
326 " a free sk_fl living in active stream\n",
327 __FUNCTION__));
328 DHD_ERROR_RLMT(("sk_fl:%p sk:%p ino:%lu \r\n",
329 sk_fl, sk_fl->sk, sk_fl->ino));
330 continue;
331 }
332
333 /* XXX: TODO: need to investigate properly in future.
334 * it is observed that in some hosts (FC25), the
335 * current timestamp is lesser than previous timestamp
336 * leading to false cleanups
337 */
338 if (now <= sk_fl->last_pkt_ns)
339 continue;
340
341 diff = now - sk_fl->last_pkt_ns;
342
343 /* Convert diff which is in ns to ms */
344 diff = div64_u64(diff, 1000000UL);
345 if (diff >= psk_qos->sock_idle_thresh) {
346 DHD_ERROR(("sk_fl->sk:%p sk_fl->i_no:%lu \r\n",
347 sk_fl->sk, sk_fl->ino));
348 if (sk_fl->cur_up_state == 1 &&
349 psk_qos->num_skfl_upgraded == 1) {
350 psk_qos->upgrade_active = FALSE;
351 }
352 __dhd_free_sock_stream_info(psk_qos, sk_fl);
353 }
354 }
355 } /* end of list iteration */
356 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
357 #pragma GCC diagnostic pop
358 #endif
359 SK_FL_LIST_UNLOCK(psk_qos->list_lock, flags);
360
361 }
362
363 static inline int
__dhd_upgrade_sock_flow(dhd_info_t * dhd,struct dhd_sock_flow_info * sk_fl,struct sk_buff * skb)364 __dhd_upgrade_sock_flow(dhd_info_t *dhd,
365 struct dhd_sock_flow_info *sk_fl,
366 struct sk_buff *skb)
367 {
368 dhd_sock_qos_info_t *psk_qos = dhd->psk_qos;
369 #ifdef DHD_HP2P
370 dhd_pub_t *dhdp = &dhd->pub;
371 #endif
372 uint8 *pktdat = NULL;
373 struct ether_header *eh = NULL;
374 struct iphdr *iph = NULL;
375
376 /* Before upgrading a flow,
377 * Check the bound to control the number of flows getting upgraded
378 */
379 if ((sk_fl->rcm_up_state == 1) && (sk_fl->cur_up_state == 0)) {
380 if (psk_qos->num_skfl_upgraded >= psk_qos->skfl_upgrade_thresh) {
381 DHD_ERROR_RLMT(("%s(): Thresh hit num_skfl_upgraded:%d"
382 "skfl_upgrade_thresh:%d \r\n",
383 __FUNCTION__, psk_qos->num_skfl_upgraded,
384 psk_qos->skfl_upgrade_thresh));
385 return BCME_ERROR;
386 } else {
387 if (psk_qos->num_skfl_upgraded == 0) {
388 /* if no flows upgraded till now, and this is the
389 * first flow to be upgraded,
390 * then disable frameburst in FW.
391 * The actual iovar to disable frameburst cannot
392 * be fired here because Tx can happen in atomic context
393 * and dhd_iovar can sleep due to proto_block lock being
394 * held. Instead the flag is checked from
395 * 'dhd_analyze_sock_flows' which execs in non-atomic context
396 * and the iovar is fired from there
397 */
398 DHD_TRACE(("%s: disable frameburst ..", __FUNCTION__));
399 psk_qos->upgrade_active = TRUE;
400 }
401 ++psk_qos->num_skfl_upgraded;
402 DHD_ERROR_RLMT(("%s(): upgrade flow sk_fl %p,"
403 "num_skfl_upgraded:%d skfl_upgrade_thresh:%d \r\n",
404 __FUNCTION__, sk_fl, psk_qos->num_skfl_upgraded,
405 psk_qos->skfl_upgrade_thresh));
406 }
407 }
408
409 /* Upgrade the skb */
410 #ifdef DHD_HP2P
411 if (dhdp->hp2p_capable)
412 skb->priority = TC_PRIO_CONTROL;
413 else
414 skb->priority = TC_PRIO_INTERACTIVE;
415 #else
416 skb->priority = TC_PRIO_INTERACTIVE;
417 #endif /* DHD_HP2P */
418
419 pktdat = PKTDATA(dhd->pub.osh, skb);
420 eh = (struct ether_header *) pktdat;
421 if (pktdat && (eh->ether_type == hton16(ETHER_TYPE_IP))) {
422 /* 'upgrade' DSCP also, else it is observed that on
423 * AP side if DSCP value is not in sync with L2 prio
424 * then out of order packets are observed
425 */
426 iph = (struct iphdr *)(pktdat + sizeof(struct ether_header));
427 iph->tos = DSCP_TOS_CS7;
428 /* re-compute ip hdr checksum
429 * NOTE: this takes around 1us, need to profile more
430 * accurately to get the number of cpu cycles it takes
431 * in order to get a better idea of the impact of
432 * re computing ip hdr chksum in data path
433 */
434 ip_send_check(iph);
435 }
436
437 /* Mark the Flow as 'upgraded' */
438 if (sk_fl->cur_up_state == 0)
439 sk_fl->cur_up_state = 1;
440
441 return BCME_OK;
442 }
443
444 static inline int
__dhd_downgrade_sock_flow(dhd_info_t * dhd,struct dhd_sock_flow_info * sk_fl,struct sk_buff * skb)445 __dhd_downgrade_sock_flow(dhd_info_t *dhd,
446 struct dhd_sock_flow_info *sk_fl,
447 struct sk_buff *skb)
448 {
449 dhd_sock_qos_info_t *psk_qos = dhd->psk_qos;
450
451 if ((sk_fl->rcm_up_state == 0) && (sk_fl->cur_up_state == 1)) {
452 /* sanity check */
453 ASSERT(psk_qos->num_skfl_upgraded > 0);
454 if (psk_qos->num_skfl_upgraded <= 0) {
455 DHD_ERROR_RLMT(("%s(): FATAL ! no upgraded flows !\n",
456 __FUNCTION__));
457 return BCME_ERROR;
458 }
459
460 if (psk_qos->num_skfl_upgraded == 1) {
461 /* if this is the
462 * last flow to be downgraded,
463 * then re-enable frameburst in FW.
464 * The actual iovar to enable frameburst cannot
465 * be fired here because Tx can happen in atomic context
466 * and dhd_iovar can sleep due to proto_block lock being
467 * held. Instead the flag is checked from
468 * 'dhd_analyze_sock_flows' which execs in non-atomic context
469 * and the iovar is fired from there
470 */
471 DHD_TRACE(("%s: enable frameburst ..", __FUNCTION__));
472 psk_qos->upgrade_active = FALSE;
473 }
474 --psk_qos->num_skfl_upgraded;
475 DHD_ERROR_RLMT(("%s(): downgrade flow sk_fl %p,"
476 "num_skfl_upgraded:%d \r\n",
477 __FUNCTION__, sk_fl, psk_qos->num_skfl_upgraded));
478 }
479
480 /* Mark the Flow as 'downgraded' */
481 if (sk_fl->cur_up_state == 1)
482 sk_fl->cur_up_state = 0;
483
484 return BCME_OK;
485 }
486
487 /*
488 * Update the stats of a Socket flow.
489 * Create a new flow if need be.
490 * If a socket flow has been recommended for upgrade, do so.
491 */
492 void
dhd_update_sock_flows(dhd_info_t * dhd,struct sk_buff * skb)493 dhd_update_sock_flows(dhd_info_t *dhd, struct sk_buff *skb)
494 {
495 struct sock *sk = NULL;
496 unsigned long ino = 0;
497 struct dhd_sock_flow_info *sk_fl = NULL;
498 dhd_sock_qos_info_t *psk_qos = NULL;
499 unsigned long flags = 0;
500 uint8 prio;
501
502 BCM_REFERENCE(prio);
503
504 if ((dhd == NULL) || (skb == NULL)) {
505 DHD_ERROR_RLMT(("%s: Invalid args \n", __FUNCTION__));
506 return;
507 }
508
509 /* If the Feature is disabled, return */
510 if (dhd_sock_qos_get_status(dhd) == 0)
511 return;
512
513 psk_qos = dhd->psk_qos;
514 sk = (struct sock *)PKTSOCK(dhd->pub.osh, skb);
515
516 /* TODO:
517 * Some times sk is NULL, what does that mean ...
518 * is it a broadcast packet generated by Network Stack ????
519 */
520 if (sk == NULL) {
521 return;
522 }
523 ino = sock_i_ino(sk);
524
525 /* TODO:
526 * List Lock need not be held for allocating sock stream .. optimize
527 */
528 SK_FL_LIST_LOCK(psk_qos->list_lock, flags);
529
530 sk_fl = __dhd_find_sock_stream_info(psk_qos, ino);
531 if (sk_fl == NULL) {
532 /* Allocate new sock stream */
533 sk_fl = dhd_alloc_sock_stream_info(psk_qos);
534 if (sk_fl == NULL) {
535 SK_FL_LIST_UNLOCK(psk_qos->list_lock, flags);
536 goto done;
537 }
538 else {
539 /* SK Flow elements updated first time */
540 sk_fl->in_use = 1;
541 sk_fl->sk = sk;
542 sk_fl->ino = ino;
543 /* TODO: Seeing a Kernel Warning ... check */
544 /* sk_fl->uid = sock_i_uid(sk); */
545 sk_fl->cur_up_state = 0;
546 list_add_tail(&sk_fl->list, &psk_qos->sk_fl_list_head);
547 DHD_ERROR(("%s(): skb %p sk %p sk_fl %p ino %lu"
548 " prio 0x%x \r\n", __FUNCTION__, skb,
549 sk, sk_fl, ino, skb->priority));
550 } /* end of new sk flow allocation */
551 } /* end of case when sk flow is found */
552
553 sk_fl->stats.tx_pkts++;
554 sk_fl->stats.tx_bytes += skb->len;
555 sk_fl->last_pkt_ns = local_clock();
556
557 SK_FL_LIST_UNLOCK(psk_qos->list_lock, flags);
558
559 if (sk_fl->rcm_up_state == 1) {
560 __dhd_upgrade_sock_flow(dhd, sk_fl, skb);
561 } else {
562 __dhd_downgrade_sock_flow(dhd, sk_fl, skb);
563 }
564
565 prio = PKTPRIO(skb);
566 DHD_INFO(("%s(): skb:%p skb->priority 0x%x prio %d sk_fl %p\r\n", __FUNCTION__, skb,
567 skb->priority, prio, sk_fl));
568 done:
569 return;
570 }
571
572 static int
dhd_change_frameburst_state(frameburst_state_t newstate,dhd_info_t * dhd)573 dhd_change_frameburst_state(frameburst_state_t newstate, dhd_info_t *dhd)
574 {
575 int ret = 0, val = 0;
576 dhd_sock_qos_info_t *psk_qos = NULL;
577
578 if (!dhd)
579 return BCME_BADARG;
580 if (!dhd->psk_qos)
581 return BCME_BADARG;
582
583 psk_qos = dhd->psk_qos;
584
585 /* Check with the cached frameburst state on host
586 * instead of querying FW frameburst state.
587 * This decision is taken, because firing an iovar everytime
588 * to query FW frameburst state before deciding whether to
589 * changing the frameburst state or not is sub-optimal,
590 * especially in the Tx path.
591 */
592 if (psk_qos->frmbrst_state == newstate)
593 return BCME_BADOPTION;
594
595 val = (newstate == FRMBRST_ENABLED) ? 1 : 0;
596 ret = dhd_wl_ioctl_cmd(&dhd->pub, WLC_SET_FAKEFRAG, (char *)&val,
597 sizeof(val), TRUE, 0);
598 if (ret != BCME_OK) {
599 DHD_ERROR_RLMT(("%s: set frameburst=%d failed,"
600 " err=%d\n", __FUNCTION__, val, ret));
601 } else {
602 /* change the state */
603 DHD_INFO(("%s: set frameburst=%d\n", __FUNCTION__, val));
604 psk_qos->frmbrst_state = newstate;
605 }
606
607 return ret;
608 }
609
dhd_analyze_sock_flows(dhd_info_t * dhd,uint32 watchdog_ms)610 void dhd_analyze_sock_flows(dhd_info_t *dhd, uint32 watchdog_ms)
611 {
612 struct dhd_sock_flow_info *sk_fl = NULL;
613 dhd_sock_qos_info_t *psk_qos = NULL;
614 unsigned long flags = 0;
615
616 if (dhd == NULL) {
617 DHD_ERROR_RLMT(("%s: Bad argument \r\n", __FUNCTION__));
618 return;
619 }
620
621 /* Check whether the feature is disabled */
622 if (dhd_sock_qos_get_status(dhd) == 0)
623 return;
624
625 psk_qos = dhd->psk_qos;
626
627 dhd_clean_idle_sock_streams(dhd->psk_qos);
628
629 /* TODO: Plug in the QoS Algorithm here */
630 SK_FL_LIST_LOCK(psk_qos->list_lock, flags);
631 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
632 #pragma GCC diagnostic push
633 #pragma GCC diagnostic ignored "-Wcast-qual"
634 #endif
635 list_for_each_entry(sk_fl, &psk_qos->sk_fl_list_head, list) {
636
637 sk_fl->rcm_up_state = dhd_qos_algo(dhd, &sk_fl->stats, &psk_qos->qos_params);
638
639 /* TODO: Handle downgrades */
640
641 /* update sk_flow previous elements on every sampling interval */
642 sk_fl->stats.tx_pkts_prev = sk_fl->stats.tx_pkts;
643 sk_fl->stats.tx_bytes_prev = sk_fl->stats.tx_bytes;
644
645 /* TODO: Handle the condition where num_skfl_upgraded reaches the threshold */
646
647 /* TODO: Handle the condition where we upgrade all the socket flows
648 * of the uid on which one flow is detected to be upgraded.
649 */
650
651 } /* end of list iteration */
652 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
653 #pragma GCC diagnostic pop
654 #endif
655 SK_FL_LIST_UNLOCK(psk_qos->list_lock, flags);
656
657 /* disable frameburst in FW on the first flow upgraded */
658 if (psk_qos->upgrade_active) {
659 dhd_change_frameburst_state(FRMBRST_DISABLED, dhd);
660 } else {
661 /* if no upgraded flows remain, either after cleanup,
662 * or after a downgrade,
663 * then re-enable frameburst in FW
664 */
665 dhd_change_frameburst_state(FRMBRST_ENABLED, dhd);
666 }
667
668 return;
669 }
670
dhd_sock_qos_update_bus_flowid(dhd_info_t * dhd,void * pktbuf,uint32 bus_flow_id)671 void dhd_sock_qos_update_bus_flowid(dhd_info_t *dhd, void *pktbuf,
672 uint32 bus_flow_id)
673 {
674 BCM_REFERENCE(dhd);
675 BCM_REFERENCE(pktbuf);
676 BCM_REFERENCE(bus_flow_id);
677 return;
678 }
679
680 /* ================= Sysfs interfce support functions ======================== */
681
dhd_sock_qos_get_status(dhd_info_t * dhd)682 unsigned long dhd_sock_qos_get_status(dhd_info_t *dhd)
683 {
684 if (dhd == NULL)
685 return 0;
686
687 return (atomic_read(&dhd->psk_qos->on_off));
688 }
689
dhd_sock_qos_set_status(dhd_info_t * dhd,unsigned long on_off)690 void dhd_sock_qos_set_status(dhd_info_t *dhd, unsigned long on_off)
691 {
692 if (dhd == NULL)
693 return;
694
695 atomic_set(&dhd->psk_qos->on_off, on_off);
696 if (on_off) {
697 dhd_watchdog_ms = QOS_SAMPLING_INTVL_MS;
698 /* enable watchdog to monitor the socket flows */
699 dhd_os_wd_timer(&dhd->pub, QOS_SAMPLING_INTVL_MS);
700 } else {
701 dhd_watchdog_ms = dhd->psk_qos->watchdog_ms;
702 /* disable watchdog or set it back to the original value */
703 dhd_os_wd_timer(&dhd->pub, dhd->psk_qos->watchdog_ms);
704 }
705 return;
706 }
707
dhd_sock_qos_show_stats(dhd_info_t * dhd,char * buf,ssize_t sz)708 ssize_t dhd_sock_qos_show_stats(dhd_info_t *dhd, char *buf,
709 ssize_t sz)
710 {
711 dhd_sock_qos_info_t *psk_qos = NULL;
712 struct dhd_sock_flow_info *sk_fl = NULL;
713 unsigned long flags = 0;
714 ssize_t ret = 0;
715 char *p = buf;
716
717 /* TODO: Should be actual record length */
718 unsigned long rec_len = 100;
719
720 if (dhd == NULL)
721 return -1;
722
723 psk_qos = dhd->psk_qos;
724
725 ret += scnprintf(p, sz-ret-1, "\nino\t sk\t\t\t tx_pkts\t tx_bytes\t"
726 "last_pkt_ns\r\n");
727 p += ret;
728
729 SK_FL_LIST_LOCK(psk_qos->list_lock, flags);
730 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
731 #pragma GCC diagnostic push
732 #pragma GCC diagnostic ignored "-Wcast-qual"
733 #endif
734 list_for_each_entry(sk_fl, &psk_qos->sk_fl_list_head, list) {
735 /* Protect the buffer from over run */
736 if (ret + rec_len >= sz)
737 break;
738
739 ret += scnprintf(p, sz-ret-1, "%lu\t %p\t %lu\t %lu\t %llu\t \r\n",
740 sk_fl->ino, sk_fl->sk, sk_fl->stats.tx_pkts, sk_fl->stats.tx_bytes,
741 sk_fl->last_pkt_ns);
742
743 p += ret;
744
745 }
746 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
747 #pragma GCC diagnostic pop
748 #endif
749 SK_FL_LIST_UNLOCK(psk_qos->list_lock, flags);
750
751 return ret + 1;
752 }
753
dhd_sock_qos_clear_stats(dhd_info_t * dhd)754 void dhd_sock_qos_clear_stats(dhd_info_t *dhd)
755 {
756 dhd_sock_qos_info_t *psk_qos = NULL;
757 struct dhd_sock_flow_info *sk_fl = NULL;
758 unsigned long flags = 0;
759
760 if (dhd == NULL)
761 return;
762
763 psk_qos = dhd->psk_qos;
764
765 SK_FL_LIST_LOCK(psk_qos->list_lock, flags);
766 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
767 #pragma GCC diagnostic push
768 #pragma GCC diagnostic ignored "-Wcast-qual"
769 #endif
770 list_for_each_entry(sk_fl, &psk_qos->sk_fl_list_head, list) {
771 sk_fl->stats.tx_pkts = 0;
772 sk_fl->stats.tx_bytes = 0;
773 sk_fl->stats.tx_pkts_prev = 0;
774 sk_fl->stats.tx_bytes_prev = 0;
775 sk_fl->last_pkt_ns = 0;
776 }
777 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
778 #pragma GCC diagnostic pop
779 #endif
780 SK_FL_LIST_UNLOCK(psk_qos->list_lock, flags);
781
782 return;
783 }
784
dhd_sock_qos_get_force_upgrade(dhd_info_t * dhd)785 unsigned long dhd_sock_qos_get_force_upgrade(dhd_info_t *dhd)
786 {
787 if (dhd == NULL)
788 return 0;
789
790 return (atomic_read(&dhd->psk_qos->force_upgrade));
791 }
792
dhd_sock_qos_set_force_upgrade(dhd_info_t * dhd,unsigned long force_upgrade)793 void dhd_sock_qos_set_force_upgrade(dhd_info_t *dhd, unsigned long force_upgrade)
794 {
795 if (dhd == NULL)
796 return;
797
798 atomic_set(&dhd->psk_qos->force_upgrade, force_upgrade);
799 return;
800 }
801
dhd_sock_qos_get_numfl_upgrd_thresh(dhd_info_t * dhd)802 int dhd_sock_qos_get_numfl_upgrd_thresh(dhd_info_t *dhd)
803 {
804 if (dhd == NULL)
805 return 0;
806
807 return dhd->psk_qos->skfl_upgrade_thresh;
808 }
809
dhd_sock_qos_set_numfl_upgrd_thresh(dhd_info_t * dhd,int upgrade_thresh)810 void dhd_sock_qos_set_numfl_upgrd_thresh(dhd_info_t *dhd,
811 int upgrade_thresh)
812 {
813 if (dhd == NULL)
814 return;
815
816 dhd->psk_qos->skfl_upgrade_thresh = upgrade_thresh;
817 return;
818 }
819
dhd_sock_qos_get_avgpktsize_thresh(dhd_info_t * dhd,unsigned long * avgpktsize_low,unsigned long * avgpktsize_high)820 void dhd_sock_qos_get_avgpktsize_thresh(dhd_info_t *dhd,
821 unsigned long *avgpktsize_low,
822 unsigned long *avgpktsize_high)
823 {
824 qos_algo_params_t *pqos_params = NULL;
825
826 if (dhd == NULL || avgpktsize_low == NULL ||
827 avgpktsize_high == NULL) {
828 return;
829 }
830
831 pqos_params = QOS_PARAMS(dhd);
832 *avgpktsize_low = pqos_params->avg_pkt_size_low_thresh;
833 *avgpktsize_high = pqos_params->avg_pkt_size_high_thresh;
834 return;
835 }
836
dhd_sock_qos_set_avgpktsize_thresh(dhd_info_t * dhd,unsigned long avgpktsize_low,unsigned long avgpktsize_high)837 void dhd_sock_qos_set_avgpktsize_thresh(dhd_info_t *dhd,
838 unsigned long avgpktsize_low,
839 unsigned long avgpktsize_high)
840 {
841 qos_algo_params_t *pqos_params = NULL;
842
843 if (dhd == NULL)
844 return;
845
846 pqos_params = QOS_PARAMS(dhd);
847 pqos_params->avg_pkt_size_low_thresh = avgpktsize_low;
848 pqos_params->avg_pkt_size_high_thresh = avgpktsize_high;
849 return;
850 }
851
dhd_sock_qos_get_numpkts_thresh(dhd_info_t * dhd,unsigned long * numpkts_low,unsigned long * numpkts_high)852 void dhd_sock_qos_get_numpkts_thresh(dhd_info_t *dhd,
853 unsigned long *numpkts_low,
854 unsigned long *numpkts_high)
855 {
856 qos_algo_params_t *pqos_params = NULL;
857
858 if (dhd == NULL || numpkts_low == NULL ||
859 numpkts_high == NULL) {
860 return;
861 }
862
863 pqos_params = QOS_PARAMS(dhd);
864 *numpkts_low = pqos_params->num_pkts_low_thresh;
865 *numpkts_high = pqos_params->num_pkts_high_thresh;
866 }
867
dhd_sock_qos_set_numpkts_thresh(dhd_info_t * dhd,unsigned long numpkts_low,unsigned long numpkts_high)868 void dhd_sock_qos_set_numpkts_thresh(dhd_info_t *dhd,
869 unsigned long numpkts_low,
870 unsigned long numpkts_high)
871 {
872 qos_algo_params_t *pqos_params = NULL;
873
874 if (dhd == NULL)
875 return;
876 pqos_params = QOS_PARAMS(dhd);
877 pqos_params->num_pkts_low_thresh = numpkts_low;
878 pqos_params->num_pkts_high_thresh = numpkts_high;
879 return;
880 }
881
dhd_sock_qos_get_detectcnt_thresh(dhd_info_t * dhd,unsigned char * detectcnt_inc,unsigned char * detectcnt_dec)882 void dhd_sock_qos_get_detectcnt_thresh(dhd_info_t *dhd,
883 unsigned char *detectcnt_inc,
884 unsigned char *detectcnt_dec)
885 {
886 qos_algo_params_t *pqos_params = NULL;
887
888 if (dhd == NULL || detectcnt_inc == NULL ||
889 detectcnt_dec == NULL) {
890 return;
891 }
892
893 pqos_params = QOS_PARAMS(dhd);
894 *detectcnt_inc = pqos_params->detect_cnt_inc_thresh;
895 *detectcnt_dec = pqos_params->detect_cnt_dec_thresh;
896 }
897
dhd_sock_qos_set_detectcnt_thresh(dhd_info_t * dhd,unsigned char detectcnt_inc,unsigned char detectcnt_dec)898 void dhd_sock_qos_set_detectcnt_thresh(dhd_info_t *dhd,
899 unsigned char detectcnt_inc,
900 unsigned char detectcnt_dec)
901 {
902 qos_algo_params_t *pqos_params = NULL;
903
904 if (dhd == NULL)
905 return;
906
907 pqos_params = QOS_PARAMS(dhd);
908 pqos_params->detect_cnt_inc_thresh = detectcnt_inc;
909 pqos_params->detect_cnt_dec_thresh = detectcnt_dec;
910 return;
911 }
912
dhd_sock_qos_get_detectcnt_upgrd_thresh(dhd_info_t * dhd)913 int dhd_sock_qos_get_detectcnt_upgrd_thresh(dhd_info_t *dhd)
914 {
915 qos_algo_params_t *pqos_params = NULL;
916
917 if (dhd == NULL)
918 return 0;
919
920 pqos_params = QOS_PARAMS(dhd);
921 return pqos_params->detect_cnt_upgrade_thresh;
922 }
923
dhd_sock_qos_set_detectcnt_upgrd_thresh(dhd_info_t * dhd,unsigned char detect_upgrd_thresh)924 void dhd_sock_qos_set_detectcnt_upgrd_thresh(dhd_info_t *dhd,
925 unsigned char detect_upgrd_thresh)
926 {
927 qos_algo_params_t *pqos_params = NULL;
928
929 if (dhd == NULL)
930 return;
931
932 pqos_params = QOS_PARAMS(dhd);
933 pqos_params->detect_cnt_upgrade_thresh = detect_upgrd_thresh;
934 }
935
dhd_sock_qos_get_maxfl(dhd_info_t * dhd)936 int dhd_sock_qos_get_maxfl(dhd_info_t *dhd)
937 {
938 if (dhd == NULL)
939 return 0;
940
941 return dhd->psk_qos->max_sock_fl;
942 }
943
dhd_sock_qos_set_maxfl(dhd_info_t * dhd,unsigned int maxfl)944 void dhd_sock_qos_set_maxfl(dhd_info_t *dhd,
945 unsigned int maxfl)
946 {
947 if (dhd == NULL)
948 return;
949
950 dhd->psk_qos->max_sock_fl = maxfl;
951 }
952 /* ================= End of Sysfs interfce support functions ======================== */
953
954 /* ================= QOS Algorithm ======================== */
955
956 /*
957 * Operates on a flow and returns 1 for upgrade and 0 for
958 * no up-grade - Has the potential of moving into a separate file
959 * Takes the dhd pointer too in case if it has to access any platform
960 * functions like MALLOC that takes dhd->pub.osh as argument.
961 */
dhd_qos_algo(dhd_info_t * dhd,qos_stat_t * qos,qos_algo_params_t * pqos_params)962 int dhd_qos_algo(dhd_info_t *dhd, qos_stat_t *qos, qos_algo_params_t *pqos_params)
963 {
964 unsigned long tx_bytes, tx_pkts, tx_avg_pkt_size;
965
966 if (!dhd || !qos || !pqos_params) {
967 return 0;
968 }
969
970 /* if the user has set the sysfs variable to force upgrade */
971 if (atomic_read(&dhd->psk_qos->force_upgrade) == 1) {
972 return 1;
973 }
974
975 DHD_TRACE(("%s(): avgpktsize_thrsh %lu:%lu; "
976 "numpkts_thrs %lu:%lu; detectcnt_thrs %d:%d;"
977 " detectcnt_upgrd_thrs %d\n", __FUNCTION__,
978 pqos_params->avg_pkt_size_low_thresh,
979 pqos_params->avg_pkt_size_high_thresh,
980 pqos_params->num_pkts_low_thresh,
981 pqos_params->num_pkts_high_thresh,
982 pqos_params->detect_cnt_inc_thresh,
983 pqos_params->detect_cnt_dec_thresh,
984 pqos_params->detect_cnt_upgrade_thresh));
985
986 tx_bytes = qos->tx_bytes - qos->tx_bytes_prev;
987 tx_pkts = qos->tx_pkts - qos->tx_pkts_prev;
988 if ((tx_bytes == 0) || (tx_pkts == 0)) {
989 return 0;
990 }
991
992 tx_avg_pkt_size = tx_bytes / tx_pkts;
993
994 if ((tx_avg_pkt_size > pqos_params->avg_pkt_size_low_thresh) &&
995 (tx_avg_pkt_size < pqos_params->avg_pkt_size_high_thresh) &&
996 (tx_pkts > pqos_params->num_pkts_low_thresh) &&
997 (tx_pkts < pqos_params->num_pkts_high_thresh)) {
998 if (qos->lowlat_detect_count < pqos_params->detect_cnt_inc_thresh) {
999 qos->lowlat_detect_count++;
1000 }
1001 } else if (qos->lowlat_detect_count > pqos_params->detect_cnt_dec_thresh) {
1002 qos->lowlat_detect_count--;
1003 }
1004
1005 if (qos->lowlat_detect_count > pqos_params->detect_cnt_upgrade_thresh) {
1006 qos->lowlat_flow = TRUE;
1007 } else if (qos->lowlat_detect_count == 0) {
1008 qos->lowlat_flow = FALSE;
1009 }
1010
1011 DHD_TRACE(("%s(): TX:%lu:%lu:%lu, PUBG:%d::%d\n",
1012 __FUNCTION__, tx_avg_pkt_size, tx_bytes, tx_pkts,
1013 qos->lowlat_detect_count, qos->lowlat_flow));
1014
1015 return (qos->lowlat_flow == TRUE) ? 1 : 0;
1016 }
1017
qos_algo_params_init(qos_algo_params_t * pqos_params)1018 int qos_algo_params_init(qos_algo_params_t *pqos_params)
1019 {
1020 if (!pqos_params)
1021 return BCME_BADARG;
1022
1023 memset(pqos_params, 0, sizeof(*pqos_params));
1024 pqos_params->avg_pkt_size_low_thresh = LOWLAT_AVG_PKT_SIZE_LOW;
1025 pqos_params->avg_pkt_size_high_thresh = LOWLAT_AVG_PKT_SIZE_HIGH;
1026 pqos_params->num_pkts_low_thresh = LOWLAT_NUM_PKTS_LOW;
1027 pqos_params->num_pkts_high_thresh = LOWLAT_NUM_PKTS_HIGH;
1028 pqos_params->detect_cnt_inc_thresh = LOWLAT_DETECT_CNT_INC_THRESH;
1029 pqos_params->detect_cnt_dec_thresh = LOWLAT_DETECT_CNT_DEC_THRESH;
1030 pqos_params->detect_cnt_upgrade_thresh = LOWLAT_DETECT_CNT_UPGRADE_THRESH;
1031
1032 return BCME_OK;
1033 }
1034 /* ================= End of QOS Algorithm ======================== */
1035