xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_common.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Broadcom Dongle Host Driver (DHD), common DHD core.
3  *
4  * Copyright (C) 2020, Broadcom.
5  *
6  *      Unless you and Broadcom execute a separate written software license
7  * agreement governing use of this software, this software is licensed to you
8  * under the terms of the GNU General Public License version 2 (the "GPL"),
9  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10  * following added to such license:
11  *
12  *      As a special exception, the copyright holders of this software give you
13  * permission to link this software with independent modules, and to copy and
14  * distribute the resulting executable under terms of your choice, provided that
15  * you also meet, for each linked independent module, the terms and conditions of
16  * the license of that module.  An independent module is a module which is not
17  * derived from this software.  The special exception does not apply to any
18  * modifications of the software.
19  *
20  *
21  * <<Broadcom-WL-IPTag/Open:>>
22  *
23  * $Id$
24  */
25 #include <typedefs.h>
26 #include <osl.h>
27 
28 #include <epivers.h>
29 #include <bcmutils.h>
30 #include <bcmstdlib_s.h>
31 
32 #include <bcmendian.h>
33 #include <dngl_stats.h>
34 #include <dhd.h>
35 #include <dhd_ip.h>
36 #include <bcmevent.h>
37 #include <dhdioctl.h>
38 #ifdef DHD_SDTC_ETB_DUMP
39 #include <bcmiov.h>
40 #endif /* DHD_SDTC_ETB_DUMP */
41 
42 #ifdef BCMDBG
43 #include <dhd_macdbg.h>
44 #endif /* BCMDBG */
45 
46 #ifdef PCIE_FULL_DONGLE
47 #include <bcmmsgbuf.h>
48 #endif /* PCIE_FULL_DONGLE */
49 
50 #ifdef SHOW_LOGTRACE
51 #include <event_log.h>
52 #endif /* SHOW_LOGTRACE */
53 
54 #ifdef BCMPCIE
55 #include <dhd_flowring.h>
56 #endif
57 
58 #include <dhd_bus.h>
59 #include <dhd_proto.h>
60 #include <bcmsdbus.h>
61 #include <dhd_dbg.h>
62 #include <802.1d.h>
63 #include <dhd_debug.h>
64 #include <dhd_dbg_ring.h>
65 #include <dhd_mschdbg.h>
66 #include <msgtrace.h>
67 #include <dhd_config.h>
68 #include <wl_android.h>
69 
70 #ifdef WL_CFG80211
71 #include <wl_cfg80211.h>
72 #include <wl_cfgvif.h>
73 #endif
74 #if defined(OEM_ANDROID) && defined(PNO_SUPPORT)
75 #include <dhd_pno.h>
76 #endif /* (OEM_ANDROID) && (PNO_SUPPORT) */
77 #ifdef RTT_SUPPORT
78 #include <dhd_rtt.h>
79 #endif
80 
81 #ifdef DNGL_EVENT_SUPPORT
82 #include <dnglevent.h>
83 #endif
84 
85 #ifdef IL_BIGENDIAN
86 #include <bcmendian.h>
87 #define htod32(i) (bcmswap32(i))
88 #define htod16(i) (bcmswap16(i))
89 #define dtoh32(i) (bcmswap32(i))
90 #define dtoh16(i) (bcmswap16(i))
91 #define htodchanspec(i) htod16(i)
92 #define dtohchanspec(i) dtoh16(i)
93 #else
94 #define htod32(i) (i)
95 #define htod16(i) (i)
96 #define dtoh32(i) (i)
97 #define dtoh16(i) (i)
98 #define htodchanspec(i) (i)
99 #define dtohchanspec(i) (i)
100 #endif /* IL_BIGENDINA */
101 
102 #ifdef PROP_TXSTATUS
103 #include <wlfc_proto.h>
104 #include <dhd_wlfc.h>
105 #endif
106 
107 #if defined(__linux__)
108 #include <dhd_linux.h>
109 #endif /* __linux__ */
110 
111 #ifdef DHD_WMF
112 #include <dhd_wmf_linux.h>
113 #endif /* DHD_WMF */
114 
115 #ifdef DHD_L2_FILTER
116 #include <dhd_l2_filter.h>
117 #endif /* DHD_L2_FILTER */
118 
119 #ifdef DHD_PSTA
120 #include <dhd_psta.h>
121 #endif /* DHD_PSTA */
122 #ifdef DHD_TIMESYNC
123 #include <dhd_timesync.h>
124 #endif /* DHD_TIMESYNC */
125 
126 #ifdef DHD_WET
127 #include <dhd_wet.h>
128 #endif /* DHD_WET */
129 #if defined(NDIS)
130 #include <siutils.h>
131 #endif
132 
133 #ifdef DHD_LOG_DUMP
134 #include <dhd_dbg.h>
135 #ifdef DHD_PKT_LOGGING
136 #include <dhd_pktlog.h>
137 #endif
138 #endif /* DHD_LOG_DUMP */
139 
140 #ifdef DHD_LOG_PRINT_RATE_LIMIT
141 int log_print_threshold = 0;
142 #endif /* DHD_LOG_PRINT_RATE_LIMIT */
143 
144 #ifdef DHD_DEBUGABILITY_LOG_DUMP_RING
145 int dbgring_msg_level = DHD_ERROR_VAL | DHD_FWLOG_VAL | DHD_INFO_VAL
146 		| DHD_EVENT_VAL | DHD_PKT_MON_VAL | DHD_IOVAR_MEM_VAL;
147 int dhd_msg_level = DHD_ERROR_VAL;
148 #else
149 int dbgring_msg_level = 0;
150 /* For CUSTOMER_HW4/Hikey do not enable DHD_ERROR_MEM_VAL by default */
151 int dhd_msg_level = DHD_ERROR_VAL | DHD_FWLOG_VAL;
152 #endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */
153 
154 #ifdef NDIS
155 extern uint wl_msg_level;
156 #endif
157 
158 #if defined(WL_WLC_SHIM)
159 #include <wl_shim.h>
160 #else
161 #if defined(NDIS)
162 #include <wl_port_if.h>
163 #endif
164 #endif /* WL_WLC_SHIM */
165 
166 #ifdef DHD_DEBUG
167 #include <sdiovar.h>
168 #endif /* DHD_DEBUG */
169 
170 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
171 #include <linux/pm_runtime.h>
172 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
173 
174 #ifdef CSI_SUPPORT
175 #include <dhd_csi.h>
176 #endif /* CSI_SUPPORT */
177 
178 #if defined(BTLOG) && !defined(BCMPCIE)
179 #error "BT logging supported only with PCIe"
180 #endif  /* defined(BTLOG) && !defined(BCMPCIE) */
181 
182 #ifdef SOFTAP
183 char fw_path2[MOD_PARAM_PATHLEN];
184 extern bool softap_enabled;
185 #endif
186 #ifdef PROP_TXSTATUS
187 extern int disable_proptx;
188 #endif /* PROP_TXSTATUS */
189 
190 #ifdef REPORT_FATAL_TIMEOUTS
191 #ifdef BCMINTERNAL
192 /*
193  * Internal Builds are used by DVT.
194  * The timeouts are not required for DVT builds, since they use IOVARs like
195  * SROM programming etc, that takes long time. So make the timeout values
196  * as 0. If DVT needs to use this feature they can enable them using IOVAR
197  *
198  * SVT any way uses external builds
199  */
200 #define SCAN_TIMEOUT_DEFAULT	0
201 #define JOIN_TIMEOUT_DEFAULT	0
202 #define BUS_TIMEOUT_DEFAULT     0
203 #define CMD_TIMEOUT_DEFAULT     0
204 #else
205 /* Default timeout value in ms */
206 #ifdef DHD_EFI
207 #define BUS_TIMEOUT_DEFAULT     800  /* 800ms */
208 #define CMD_TIMEOUT_DEFAULT     1500 /* 1.5s */
209 #define SCAN_TIMEOUT_DEFAULT    0
210 #define JOIN_TIMEOUT_DEFAULT    0
211 #else
212 #define BUS_TIMEOUT_DEFAULT     800
213 #define CMD_TIMEOUT_DEFAULT     1200
214 #define SCAN_TIMEOUT_DEFAULT    17000
215 #define JOIN_TIMEOUT_DEFAULT    7500
216 #endif /* DHD_EFI */
217 #endif /* BCMINTERNAL */
218 #endif /* REPORT_FATAL_TIMEOUTS */
219 
220 #ifdef SHOW_LOGTRACE
221 #define BYTES_AHEAD_NUM		10	/* address in map file is before these many bytes */
222 #define READ_NUM_BYTES		1000 /* read map file each time this No. of bytes */
223 #define GO_BACK_FILE_POS_NUM_BYTES	100 /* set file pos back to cur pos */
224 static char *ramstart_str = " text_start"; /* string in mapfile has addr ramstart */
225 static char *rodata_start_str = " rodata_start"; /* string in mapfile has addr rodata start */
226 static char *rodata_end_str = " rodata_end"; /* string in mapfile has addr rodata end */
227 #define RAMSTART_BIT	0x01
228 #define RDSTART_BIT		0x02
229 #define RDEND_BIT		0x04
230 #define ALL_MAP_VAL		(RAMSTART_BIT | RDSTART_BIT | RDEND_BIT)
231 #endif /* SHOW_LOGTRACE */
232 
233 #ifdef SHOW_LOGTRACE
234 #if defined(LINUX) || defined(linux)
235 /* the fw file path is taken from either the module parameter at
236  * insmod time or is defined as a constant of different values
237  * for different platforms
238  */
239 extern char *st_str_file_path;
240 #else
241 static char *st_str_file_path = "rtecdc.bin";
242 #endif /* LINUX */
243 #endif /* SHOW_LOGTRACE */
244 
245 #ifdef EWP_EDL
246 typedef struct msg_hdr_edl {
247 	uint32 infobuf_ver;
248 	info_buf_payload_hdr_t pyld_hdr;
249 	msgtrace_hdr_t trace_hdr;
250 } msg_hdr_edl_t;
251 #endif /* EWP_EDL */
252 
253 #define DHD_TPUT_MAX_TX_PKTS_BATCH	1000
254 
255 /* Last connection success/failure status */
256 uint32 dhd_conn_event;
257 uint32 dhd_conn_status;
258 uint32 dhd_conn_reason;
259 
260 extern int dhd_iscan_request(void * dhdp, uint16 action);
261 extern void dhd_ind_scan_confirm(void *h, bool status);
262 extern int dhd_iscan_in_progress(void *h);
263 void dhd_iscan_lock(void);
264 void dhd_iscan_unlock(void);
265 extern int dhd_change_mtu(dhd_pub_t *dhd, int new_mtu, int ifidx);
266 #if defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P)
267 extern int dhd_get_concurrent_capabilites(dhd_pub_t *dhd);
268 #endif
269 
270 extern int dhd_socram_dump(struct dhd_bus *bus);
271 extern void dhd_set_packet_filter(dhd_pub_t *dhd);
272 
273 #ifdef DNGL_EVENT_SUPPORT
274 static void dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event,
275 	bcm_dngl_event_msg_t *dngl_event, size_t pktlen);
276 static int dngl_host_event(dhd_pub_t *dhdp, void *pktdata, bcm_dngl_event_msg_t *dngl_event,
277 	size_t pktlen);
278 #endif /* DNGL_EVENT_SUPPORT */
279 
280 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
281 static void copy_hang_info_ioctl_timeout(dhd_pub_t *dhd, int ifidx, wl_ioctl_t *ioc);
282 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
283 
284 #ifdef REPORT_FATAL_TIMEOUTS
285 static void dhd_set_join_error(dhd_pub_t *pub, uint32 mask);
286 #endif /* REPORT_FATAL_TIMEOUTS */
287 
288 #ifdef DHD_SEND_HANG_IOCTL_SUSPEND_ERROR
289 #define MAX_IOCTL_SUSPEND_ERROR	10
290 static int ioctl_suspend_error = 0;
291 #endif /* DHD_SEND_HANG_IOCTL_SUSPEND_ERROR */
292 
293 /* Should ideally read this from target(taken from wlu) */
294 #define MAX_CHUNK_LEN 1408 /* 8 * 8 * 22 */
295 
296 #if defined(OEM_ANDROID)
297 /* note these variables will be used with wext */
298 bool ap_cfg_running = FALSE;
299 bool ap_fw_loaded = FALSE;
300 #endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
301 
302 #ifdef WLEASYMESH
303 extern int dhd_set_1905_almac(dhd_pub_t *dhdp, uint8 ifidx, uint8* ea, bool mcast);
304 extern int dhd_get_1905_almac(dhd_pub_t *dhdp, uint8 ifidx, uint8* ea, bool mcast);
305 #endif /* WLEASYMESH */
306 
307 #define CHIPID_MISMATCH	8
308 
309 #define DHD_VERSION "Dongle Host Driver, version " EPI_VERSION_STR "\n"
310 
311 #if defined(DHD_DEBUG) && defined(DHD_COMPILED)
312 const char dhd_version[] = DHD_VERSION DHD_COMPILED " compiled on "
313 			__DATE__ " at " __TIME__ "\n\0<TIMESTAMP>";
314 #else
315 const char dhd_version[] = DHD_VERSION;
316 #endif /* DHD_DEBUG && DHD_COMPILED */
317 
318 char fw_version[FW_VER_STR_LEN] = "\0";
319 char clm_version[CLM_VER_STR_LEN] = "\0";
320 
321 char bus_api_revision[BUS_API_REV_STR_LEN] = "\0";
322 
323 void dhd_set_timer(void *bus, uint wdtick);
324 
325 #if defined(BCM_ROUTER_DHD)
326 static int traffic_mgmt_add_dwm_filter(dhd_pub_t *dhd,
327 	trf_mgmt_filter_list_t * trf_mgmt_filter_list, int len);
328 #endif
329 
330 static char* ioctl2str(uint32 ioctl);
331 
332 /* IOVar table */
333 enum {
334 	IOV_VERSION = 1,
335 	IOV_WLMSGLEVEL,
336 	IOV_MSGLEVEL,
337 	IOV_BCMERRORSTR,
338 	IOV_BCMERROR,
339 	IOV_WDTICK,
340 	IOV_DUMP,
341 	IOV_CLEARCOUNTS,
342 	IOV_LOGDUMP,
343 	IOV_LOGCAL,
344 	IOV_LOGSTAMP,
345 	IOV_GPIOOB,
346 	IOV_IOCTLTIMEOUT,
347 	IOV_CONS,
348 	IOV_DCONSOLE_POLL,
349 #if defined(DHD_DEBUG)
350 	IOV_DHD_JOIN_TIMEOUT_DBG,
351 	IOV_SCAN_TIMEOUT,
352 	IOV_MEM_DEBUG,
353 #ifdef BCMPCIE
354 	IOV_FLOW_RING_DEBUG,
355 #endif /* BCMPCIE */
356 #endif /* defined(DHD_DEBUG) */
357 #ifdef PROP_TXSTATUS
358 	IOV_PROPTXSTATUS_ENABLE,
359 	IOV_PROPTXSTATUS_MODE,
360 	IOV_PROPTXSTATUS_OPT,
361 #ifdef QMONITOR
362 	IOV_QMON_TIME_THRES,
363 	IOV_QMON_TIME_PERCENT,
364 #endif /* QMONITOR */
365 	IOV_PROPTXSTATUS_MODULE_IGNORE,
366 	IOV_PROPTXSTATUS_CREDIT_IGNORE,
367 	IOV_PROPTXSTATUS_TXSTATUS_IGNORE,
368 	IOV_PROPTXSTATUS_RXPKT_CHK,
369 #endif /* PROP_TXSTATUS */
370 	IOV_BUS_TYPE,
371 	IOV_CHANGEMTU,
372 	IOV_HOSTREORDER_FLOWS,
373 #ifdef DHDTCPACK_SUPPRESS
374 	IOV_TCPACK_SUPPRESS,
375 #endif /* DHDTCPACK_SUPPRESS */
376 #ifdef DHD_WMF
377 	IOV_WMF_BSS_ENAB,
378 	IOV_WMF_UCAST_IGMP,
379 	IOV_WMF_MCAST_DATA_SENDUP,
380 #ifdef WL_IGMP_UCQUERY
381 	IOV_WMF_UCAST_IGMP_QUERY,
382 #endif /* WL_IGMP_UCQUERY */
383 #ifdef DHD_UCAST_UPNP
384 	IOV_WMF_UCAST_UPNP,
385 #endif /* DHD_UCAST_UPNP */
386 	IOV_WMF_PSTA_DISABLE,
387 #endif /* DHD_WMF */
388 #if defined(BCM_ROUTER_DHD)
389 	IOV_TRAFFIC_MGMT_DWM,
390 #endif /* BCM_ROUTER_DHD */
391 	IOV_AP_ISOLATE,
392 #ifdef DHD_L2_FILTER
393 	IOV_DHCP_UNICAST,
394 	IOV_BLOCK_PING,
395 	IOV_PROXY_ARP,
396 	IOV_GRAT_ARP,
397 	IOV_BLOCK_TDLS,
398 #endif /* DHD_L2_FILTER */
399 	IOV_DHD_IE,
400 #ifdef DHD_PSTA
401 	IOV_PSTA,
402 #endif /* DHD_PSTA */
403 #ifdef DHD_WET
404 	IOV_WET,
405 	IOV_WET_HOST_IPV4,
406 	IOV_WET_HOST_MAC,
407 #endif /* DHD_WET */
408 	IOV_CFG80211_OPMODE,
409 	IOV_ASSERT_TYPE,
410 #if defined(NDIS)
411 	IOV_WAKEIND,
412 #endif /* NDIS */
413 #if !defined(NDIS) && !defined(BCM_ROUTER_DHD)
414 	IOV_LMTEST,
415 #endif
416 #ifdef DHD_MCAST_REGEN
417 	IOV_MCAST_REGEN_BSS_ENABLE,
418 #endif
419 #ifdef BCMDBG
420 	IOV_MACDBG_PD11REGS,
421 	IOV_MACDBG_REGLIST,
422 	IOV_MACDBG_PSVMPMEMS,
423 #endif /* BCMDBG */
424 #ifdef SHOW_LOGTRACE
425 	IOV_DUMP_TRACE_LOG,
426 #endif /* SHOW_LOGTRACE */
427 #ifdef REPORT_FATAL_TIMEOUTS
428 	IOV_SCAN_TO,
429 	IOV_JOIN_TO,
430 	IOV_CMD_TO,
431 	IOV_OQS_TO,
432 #endif /* REPORT_FATAL_TIMEOUTS */
433 	IOV_DONGLE_TRAP_TYPE,
434 	IOV_DONGLE_TRAP_INFO,
435 	IOV_BPADDR,
436 	IOV_DUMP_DONGLE, /**< dumps core registers and d11 memories */
437 #if defined(DHD_LOG_DUMP)
438 #if defined(DHD_EFI)
439 	IOV_LOG_CAPTURE_ENABLE,
440 #endif
441 	IOV_LOG_DUMP,
442 #endif /* DHD_LOG_DUMP */
443 #ifdef BTLOG
444 	IOV_DUMP_BT_LOG,
445 	IOV_BTLOG,
446 #endif	/* BTLOG */
447 #ifdef SNAPSHOT_UPLOAD
448 	IOV_BT_MEM_DUMP,
449 	IOV_BT_UPLOAD,
450 #endif	/* SNAPSHOT_UPLOAD */
451 	IOV_TPUT_TEST,
452 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
453 	IOV_PKT_LATENCY,
454 #endif /* defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) */
455 	IOV_DEBUG_BUF_DEST_STAT,
456 #ifdef DHD_PKTTS
457 	IOV_PKTTS_ENAB,
458 	IOV_PKTTS_FLOW,
459 #endif /* DHD_PKTTS */
460 #ifdef DHD_DEBUG
461 	IOV_INDUCE_ERROR,
462 #endif /* DHD_DEBUG */
463 #if defined(DHD_EFI)
464 	IOV_INTR_POLL,
465 #endif
466 	IOV_FIS_TRIGGER,
467 #ifdef WL_IFACE_MGMT_CONF
468 #ifdef WL_CFG80211
469 #ifdef WL_NANP2P
470 	IOV_CONC_DISC,
471 #endif /* WL_NANP2P */
472 #ifdef WL_IFACE_MGMT
473 	IOV_IFACE_POLICY,
474 #endif /* WL_IFACE_MGMT */
475 #endif /* WL_CFG80211 */
476 #endif /* WL_IFACE_MGMT_CONF */
477 #ifdef RTT_GEOFENCE_CONT
478 #if defined (RTT_SUPPORT) && defined (WL_NAN)
479 	IOV_RTT_GEOFENCE_TYPE_OVRD,
480 #endif /* RTT_SUPPORT && WL_NAN */
481 #endif /* RTT_GEOFENCE_CONT */
482 	IOV_FW_VBS,
483 #ifdef DHD_TX_PROFILE
484 	IOV_TX_PROFILE_TAG,
485 	IOV_TX_PROFILE_ENABLE,
486 	IOV_TX_PROFILE_DUMP,
487 #endif /* defined(DHD_TX_PROFILE) */
488 	IOV_CHECK_TRAP_ROT,
489 #if defined(DHD_AWDL)
490 	IOV_AWDL_LLC_ENABLE,
491 #endif
492 #ifdef WLEASYMESH
493 	IOV_1905_AL_UCAST,
494 	IOV_1905_AL_MCAST,
495 #endif /* WLEASYMESH */
496 	IOV_LAST
497 };
498 
499 const bcm_iovar_t dhd_iovars[] = {
500 	/* name         varid                   flags   flags2 type     minlen */
501 	{"version",	IOV_VERSION,		0,	0, IOVT_BUFFER,	0},
502 	{"wlmsglevel",	IOV_WLMSGLEVEL,	0,	0,	IOVT_UINT32,	0 },
503 #ifdef DHD_DEBUG
504 	{"msglevel",	IOV_MSGLEVEL,		0,	0, IOVT_UINT32,	0},
505 	{"mem_debug",   IOV_MEM_DEBUG,  0,      0,      IOVT_BUFFER,    0 },
506 #ifdef BCMPCIE
507 	{"flow_ring_debug", IOV_FLOW_RING_DEBUG, 0, 0, IOVT_BUFFER, 0 },
508 #endif /* BCMPCIE */
509 #ifdef NDIS
510 	{"wlmsglevel",	IOV_WLMSGLEVEL,		0,	0, IOVT_UINT32,	0},
511 #endif /* NDIS */
512 #endif /* DHD_DEBUG */
513 	{"bcmerrorstr", IOV_BCMERRORSTR,	0,	0, IOVT_BUFFER,	BCME_STRLEN},
514 	{"bcmerror",	IOV_BCMERROR,		0,	0, IOVT_INT8,	0},
515 	{"wdtick",	IOV_WDTICK,		0,	0, IOVT_UINT32,	0},
516 	{"dump",	IOV_DUMP,		0,	0, IOVT_BUFFER,	DHD_IOCTL_MAXLEN_32K},
517 	{"cons",	IOV_CONS,		0,	0, IOVT_BUFFER,	0},
518 	{"dconpoll",	IOV_DCONSOLE_POLL,	0,	0, IOVT_UINT32,	0},
519 	{"clearcounts", IOV_CLEARCOUNTS,	0,	0, IOVT_VOID,	0},
520 #ifdef BCMPERFSTATS
521 	{"logdump", IOV_LOGDUMP,		0,	0, IOVT_BUFFER,	DHD_IOCTL_MAXLEN},
522 	{"logcal",	IOV_LOGCAL,		0,	0, IOVT_UINT32,	0},
523 	{"logstamp",	IOV_LOGSTAMP,		0,	0, IOVT_BUFFER,	0},
524 #endif
525 	{"gpioob",	IOV_GPIOOB,		0,	0, IOVT_UINT32,	0},
526 	{"ioctl_timeout", IOV_IOCTLTIMEOUT,	0,	0, IOVT_UINT32,	0},
527 #ifdef PROP_TXSTATUS
528 	{"proptx",	IOV_PROPTXSTATUS_ENABLE,	0,	0, IOVT_BOOL,	0 },
529 	/*
530 	set the proptxtstatus operation mode:
531 	0 - Do not do any proptxtstatus flow control
532 	1 - Use implied credit from a packet status
533 	2 - Use explicit credit
534 	*/
535 	{"ptxmode",	IOV_PROPTXSTATUS_MODE,	0,	0, IOVT_UINT32,	0 },
536 	{"proptx_opt", IOV_PROPTXSTATUS_OPT,	0,	0, IOVT_UINT32,	0 },
537 #ifdef QMONITOR
538 	{"qtime_thres",	IOV_QMON_TIME_THRES,	0,	0, IOVT_UINT32,	0 },
539 	{"qtime_percent", IOV_QMON_TIME_PERCENT, 0,	0, IOVT_UINT32,	0 },
540 #endif /* QMONITOR */
541 	{"pmodule_ignore", IOV_PROPTXSTATUS_MODULE_IGNORE, 0, 0, IOVT_BOOL, 0 },
542 	{"pcredit_ignore", IOV_PROPTXSTATUS_CREDIT_IGNORE, 0, 0, IOVT_BOOL, 0 },
543 	{"ptxstatus_ignore", IOV_PROPTXSTATUS_TXSTATUS_IGNORE, 0, 0,  IOVT_BOOL, 0 },
544 	{"rxpkt_chk", IOV_PROPTXSTATUS_RXPKT_CHK, 0, 0, IOVT_BOOL, 0 },
545 #endif /* PROP_TXSTATUS */
546 	{"bustype", IOV_BUS_TYPE, 0, 0, IOVT_UINT32, 0},
547 	{"changemtu", IOV_CHANGEMTU, 0, 0, IOVT_UINT32, 0 },
548 	{"host_reorder_flows", IOV_HOSTREORDER_FLOWS, 0, 0, IOVT_BUFFER,
549 	(WLHOST_REORDERDATA_MAXFLOWS + 1) },
550 #ifdef DHDTCPACK_SUPPRESS
551 	{"tcpack_suppress",	IOV_TCPACK_SUPPRESS,	0,	0, IOVT_UINT8,	0 },
552 #endif /* DHDTCPACK_SUPPRESS */
553 #ifdef DHD_WMF
554 	{"wmf_bss_enable", IOV_WMF_BSS_ENAB,	0,	0, IOVT_BOOL,	0 },
555 	{"wmf_ucast_igmp", IOV_WMF_UCAST_IGMP,	0,	0, IOVT_BOOL,	0 },
556 	{"wmf_mcast_data_sendup", IOV_WMF_MCAST_DATA_SENDUP,	0,	0, IOVT_BOOL,	0 },
557 #ifdef WL_IGMP_UCQUERY
558 	{"wmf_ucast_igmp_query", IOV_WMF_UCAST_IGMP_QUERY, (0), 0, IOVT_BOOL, 0 },
559 #endif /* WL_IGMP_UCQUERY */
560 #ifdef DHD_UCAST_UPNP
561 	{"wmf_ucast_upnp", IOV_WMF_UCAST_UPNP, (0), 0, IOVT_BOOL, 0 },
562 #endif /* DHD_UCAST_UPNP */
563 	{"wmf_psta_disable", IOV_WMF_PSTA_DISABLE, (0), 0, IOVT_BOOL, 0 },
564 #endif /* DHD_WMF */
565 #if defined(BCM_ROUTER_DHD)
566 	{"trf_mgmt_filters_add", IOV_TRAFFIC_MGMT_DWM, (0), 0, IOVT_BUFFER, 0},
567 #endif /* BCM_ROUTER_DHD */
568 #ifdef DHD_L2_FILTER
569 	{"dhcp_unicast", IOV_DHCP_UNICAST, (0), 0, IOVT_BOOL, 0 },
570 #endif /* DHD_L2_FILTER */
571 	{"ap_isolate", IOV_AP_ISOLATE, (0), 0, IOVT_BOOL, 0},
572 #ifdef DHD_L2_FILTER
573 	{"block_ping", IOV_BLOCK_PING, (0), 0, IOVT_BOOL, 0},
574 	{"proxy_arp", IOV_PROXY_ARP, (0), 0, IOVT_BOOL, 0},
575 	{"grat_arp", IOV_GRAT_ARP, (0), 0, IOVT_BOOL, 0},
576 	{"block_tdls", IOV_BLOCK_TDLS, (0), IOVT_BOOL, 0},
577 #endif /* DHD_L2_FILTER */
578 	{"dhd_ie", IOV_DHD_IE, (0), 0, IOVT_BUFFER, 0},
579 #ifdef DHD_PSTA
580 	/* PSTA/PSR Mode configuration. 0: DIABLED 1: PSTA 2: PSR */
581 	{"psta", IOV_PSTA, 0, 0, IOVT_UINT32, 0},
582 #endif /* DHD PSTA */
583 #ifdef DHD_WET
584 	/* WET Mode configuration. 0: DIABLED 1: WET */
585 	{"wet", IOV_WET, 0, 0, IOVT_UINT32, 0},
586 	{"wet_host_ipv4", IOV_WET_HOST_IPV4, 0, 0, IOVT_UINT32, 0},
587 	{"wet_host_mac", IOV_WET_HOST_MAC, 0, 0, IOVT_BUFFER, 0},
588 #endif /* DHD WET */
589 	{"op_mode",	IOV_CFG80211_OPMODE,	0,	0, IOVT_UINT32,	0 },
590 	{"assert_type", IOV_ASSERT_TYPE, (0), 0, IOVT_UINT32, 0},
591 #if defined(NDIS)
592 	{ "wowl_wakeind", IOV_WAKEIND, 0, 0, IOVT_UINT32, 0 },
593 #endif /* NDIS */
594 #if !defined(NDIS) && !defined(BCM_ROUTER_DHD)
595 	{"lmtest", IOV_LMTEST,	0,	0, IOVT_UINT32,	0 },
596 #endif
597 #ifdef DHD_MCAST_REGEN
598 	{"mcast_regen_bss_enable", IOV_MCAST_REGEN_BSS_ENABLE, 0, 0, IOVT_BOOL, 0},
599 #endif
600 #ifdef BCMDBG
601 	{"pd11regs", IOV_MACDBG_PD11REGS, 0, 0, IOVT_BUFFER, 0},
602 	{"mreglist", IOV_MACDBG_REGLIST, 0, 0, IOVT_BUFFER, 0},
603 	{"psvmpmems", IOV_MACDBG_PSVMPMEMS, 0, 0, IOVT_BUFFER, 0},
604 #endif /* BCMDBG */
605 #ifdef SHOW_LOGTRACE
606 	{"dump_trace_buf", IOV_DUMP_TRACE_LOG,	0, 0, IOVT_BUFFER,	sizeof(trace_buf_info_t) },
607 #endif /* SHOW_LOGTRACE */
608 #ifdef REPORT_FATAL_TIMEOUTS
609 	{"scan_timeout", IOV_SCAN_TO, 0, 0, IOVT_UINT32, 0 },
610 	{"join_timeout", IOV_JOIN_TO, 0, 0, IOVT_UINT32, 0 },
611 	{"cmd_timeout", IOV_CMD_TO, 0, 0, IOVT_UINT32, 0 },
612 	{"oqs_timeout", IOV_OQS_TO, 0, 0, IOVT_UINT32, 0 },
613 #endif /* REPORT_FATAL_TIMEOUTS */
614 	{"trap_type", IOV_DONGLE_TRAP_TYPE, 0, 0, IOVT_UINT32, 0 },
615 	{"trap_info", IOV_DONGLE_TRAP_INFO, 0, 0, IOVT_BUFFER, sizeof(trap_t) },
616 #ifdef DHD_DEBUG
617 	{"bpaddr", IOV_BPADDR,	0, 0, IOVT_BUFFER,	sizeof(sdreg_t) },
618 #endif /* DHD_DEBUG */
619 	{"dump_dongle", IOV_DUMP_DONGLE, 0, 0, IOVT_BUFFER,
620 	MAX(sizeof(dump_dongle_in_t), sizeof(dump_dongle_out_t)) },
621 #if defined(DHD_LOG_DUMP)
622 #if defined(DHD_EFI)
623 	{"log_capture_enable", IOV_LOG_CAPTURE_ENABLE, 0, 0, IOVT_UINT8, 0},
624 #endif
625 	{"log_dump", IOV_LOG_DUMP,	0, 0, IOVT_UINT8, 0},
626 #endif /* DHD_LOG_DUMP */
627 #ifdef BTLOG
628 	{"dump_bt_log", IOV_DUMP_BT_LOG, 0, 0, IOVT_BUFFER, sizeof(bt_log_buf_info_t) },
629 	{"btlog", IOV_BTLOG, 0, 0, IOVT_UINT32, 0 },
630 #endif	/* BTLOG */
631 #ifdef SNAPSHOT_UPLOAD
632 	{"bt_mem_dump", IOV_BT_MEM_DUMP, 0, 0, IOVT_UINT32, 0},
633 	{"bt_upload", IOV_BT_UPLOAD, 0, 0, IOVT_BUFFER, sizeof(bt_log_buf_info_t) },
634 #endif	/* SNAPSHOT_UPLOAD */
635 	{"tput_test", IOV_TPUT_TEST, 0, 0, IOVT_BUFFER, sizeof(tput_test_t)},
636 	{"debug_buf_dest_stat", IOV_DEBUG_BUF_DEST_STAT, 0, 0, IOVT_UINT32, 0 },
637 #ifdef DHD_PKTTS
638 	{"pktts_enab", IOV_PKTTS_ENAB, (0), 0, IOVT_BOOL, 0 },
639 	{"pktts_flow", IOV_PKTTS_FLOW, (0), 0, IOVT_BUFFER, sizeof(tput_test_t) },
640 #endif /* DHD_PKTTS */
641 #if defined(DHD_EFI)
642 	{"intr_poll", IOV_INTR_POLL, 0, 0, IOVT_BUFFER, sizeof(intr_poll_t)},
643 #endif
644 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
645 	{"pkt_latency",	IOV_PKT_LATENCY,	0,	0,	IOVT_UINT32,	0 },
646 #endif /* defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) */
647 #if defined(DHD_SSSR_DUMP)
648 	{"fis_trigger", IOV_FIS_TRIGGER, 0, 0, IOVT_UINT32, 0},
649 #endif
650 #ifdef DHD_DEBUG
651 	{"induce_error", IOV_INDUCE_ERROR, (0), 0, IOVT_UINT16, 0 },
652 #endif /* DHD_DEBUG */
653 #ifdef WL_IFACE_MGMT_CONF
654 #ifdef WL_CFG80211
655 #ifdef WL_NANP2P
656 	{"conc_disc", IOV_CONC_DISC, (0), 0, IOVT_UINT16, 0 },
657 #endif /* WL_NANP2P */
658 #ifdef WL_IFACE_MGMT
659 	{"if_policy", IOV_IFACE_POLICY, (0), 0, IOVT_BUFFER, sizeof(iface_mgmt_data_t)},
660 #endif /* WL_IFACE_MGMT */
661 #endif /* WL_CFG80211 */
662 #endif /* WL_IFACE_MGMT_CONF */
663 #ifdef RTT_GEOFENCE_CONT
664 #if defined (RTT_SUPPORT) && defined (WL_NAN)
665 	{"rtt_geofence_type_ovrd", IOV_RTT_GEOFENCE_TYPE_OVRD, (0), 0, IOVT_BOOL, 0},
666 #endif /* RTT_SUPPORT && WL_NAN */
667 #endif /* RTT_GEOFENCE_CONT */
668 	{"fw_verbose", IOV_FW_VBS, 0, 0, IOVT_UINT32, 0},
669 #ifdef DHD_TX_PROFILE
670 	{"tx_profile_tag", IOV_TX_PROFILE_TAG, 0, 0, IOVT_BUFFER,
671 	sizeof(dhd_tx_profile_protocol_t)},
672 	{"tx_profile_enable",	IOV_TX_PROFILE_ENABLE,	0,	0,	IOVT_BOOL,	0},
673 	{"tx_profile_dump",	IOV_TX_PROFILE_DUMP,	0,	0,	IOVT_UINT32,	0},
674 #endif /* defined(DHD_TX_PROFILE) */
675 	{"check_trap_rot", IOV_CHECK_TRAP_ROT, (0), 0, IOVT_BOOL, 0},
676 #if defined(DHD_AWDL)
677 	{"awdl_llc_enable", IOV_AWDL_LLC_ENABLE, 0, 0, IOVT_BOOL, 0},
678 #endif
679 	/* --- add new iovars *ABOVE* this line --- */
680 #ifdef WLEASYMESH
681 	{"1905_al_ucast", IOV_1905_AL_UCAST, 0, 0, IOVT_BUFFER, ETHER_ADDR_LEN},
682 	{"1905_al_mcast", IOV_1905_AL_MCAST, 0, 0, IOVT_BUFFER, ETHER_ADDR_LEN},
683 #endif /* WLEASYMESH */
684 	{NULL, 0, 0, 0, 0, 0 }
685 };
686 
687 #define DHD_IOVAR_BUF_SIZE	128
688 
689 #if defined(LINUX) || defined(linux) || defined(DHD_EFI)
690 fw_download_status_t
dhd_fw_download_status(dhd_pub_t * dhd_pub)691 dhd_fw_download_status(dhd_pub_t * dhd_pub)
692 {
693 	return dhd_pub->fw_download_status;
694 }
695 #endif /* defined(LINUX) || defined(linux) || defined(DHD_EFI) */
696 
697 bool
dhd_query_bus_erros(dhd_pub_t * dhdp)698 dhd_query_bus_erros(dhd_pub_t *dhdp)
699 {
700 	bool ret = FALSE;
701 
702 	if (dhdp->dongle_reset) {
703 		DHD_ERROR_RLMT(("%s: Dongle Reset occurred, cannot proceed\n",
704 			__FUNCTION__));
705 		ret = TRUE;
706 	}
707 
708 	if (dhdp->dongle_trap_occured) {
709 		DHD_ERROR_RLMT(("%s: FW TRAP has occurred, cannot proceed\n",
710 			__FUNCTION__));
711 		ret = TRUE;
712 #ifdef OEM_ANDROID
713 		dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
714 		dhd_os_send_hang_message(dhdp);
715 #endif /* OEM_ANDROID */
716 	}
717 
718 	if (dhdp->iovar_timeout_occured) {
719 		DHD_ERROR_RLMT(("%s: Resumed on timeout for previous IOVAR, cannot proceed\n",
720 			__FUNCTION__));
721 		ret = TRUE;
722 	}
723 
724 #ifdef PCIE_FULL_DONGLE
725 	if (dhdp->d3ack_timeout_occured) {
726 		DHD_ERROR_RLMT(("%s: Resumed on timeout for previous D3ACK, cannot proceed\n",
727 			__FUNCTION__));
728 		ret = TRUE;
729 	}
730 	if (dhdp->livelock_occured) {
731 		DHD_ERROR_RLMT(("%s: LIVELOCK occurred for previous msg, cannot proceed\n",
732 			__FUNCTION__));
733 		ret = TRUE;
734 	}
735 
736 	if (dhdp->pktid_audit_failed) {
737 		DHD_ERROR_RLMT(("%s: pktid_audit_failed, cannot proceed\n",
738 			__FUNCTION__));
739 		ret = TRUE;
740 	}
741 #endif /* PCIE_FULL_DONGLE */
742 
743 	if (dhdp->iface_op_failed) {
744 		DHD_ERROR_RLMT(("%s: iface_op_failed, cannot proceed\n",
745 			__FUNCTION__));
746 		ret = TRUE;
747 	}
748 
749 	if (dhdp->scan_timeout_occurred) {
750 		DHD_ERROR_RLMT(("%s: scan_timeout_occurred, cannot proceed\n",
751 			__FUNCTION__));
752 		ret = TRUE;
753 	}
754 
755 	if (dhdp->scan_busy_occurred) {
756 		DHD_ERROR_RLMT(("%s: scan_busy_occurred, cannot proceed\n",
757 			__FUNCTION__));
758 		ret = TRUE;
759 	}
760 
761 #ifdef DNGL_AXI_ERROR_LOGGING
762 	if (dhdp->axi_error) {
763 		DHD_ERROR_RLMT(("%s: AXI error occurred, cannot proceed\n",
764 			__FUNCTION__));
765 		ret = TRUE;
766 	}
767 #endif /* DNGL_AXI_ERROR_LOGGING */
768 
769 #if defined(BCMPCIE)
770 	if (dhd_bus_get_linkdown(dhdp)) {
771 		DHD_ERROR_RLMT(("%s : PCIE Link down occurred, cannot proceed\n",
772 			__FUNCTION__));
773 		ret = TRUE;
774 	}
775 
776 	if (dhd_bus_get_cto(dhdp)) {
777 		DHD_ERROR_RLMT(("%s : CTO Recovery reported, cannot proceed\n",
778 			__FUNCTION__));
779 		ret = TRUE;
780 	}
781 #endif
782 
783 	return ret;
784 }
785 
786 void
dhd_clear_bus_errors(dhd_pub_t * dhdp)787 dhd_clear_bus_errors(dhd_pub_t *dhdp)
788 {
789 	if (!dhdp)
790 		return;
791 
792 	dhdp->dongle_reset = FALSE;
793 	dhdp->dongle_trap_occured = FALSE;
794 	dhdp->iovar_timeout_occured = FALSE;
795 #ifdef PCIE_FULL_DONGLE
796 	dhdp->d3ack_timeout_occured = FALSE;
797 	dhdp->livelock_occured = FALSE;
798 	dhdp->pktid_audit_failed = FALSE;
799 #endif
800 	dhdp->iface_op_failed = FALSE;
801 	dhdp->scan_timeout_occurred = FALSE;
802 	dhdp->scan_busy_occurred = FALSE;
803 #ifdef BT_OVER_PCIE
804 	dhdp->dongle_trap_due_to_bt = FALSE;
805 #endif
806 }
807 
808 #ifdef DHD_SSSR_DUMP
809 
810 /* This can be overwritten by module parameter defined in dhd_linux.c */
811 uint sssr_enab = TRUE;
812 
813 #ifdef DHD_FIS_DUMP
814 uint fis_enab = TRUE;
815 #else
816 uint fis_enab = FALSE;
817 #endif /* DHD_FIS_DUMP */
818 
819 int
dhd_sssr_mempool_init(dhd_pub_t * dhd)820 dhd_sssr_mempool_init(dhd_pub_t *dhd)
821 {
822 	dhd->sssr_mempool = (uint8 *) MALLOCZ(dhd->osh, DHD_SSSR_MEMPOOL_SIZE);
823 	if (dhd->sssr_mempool == NULL) {
824 		DHD_ERROR(("%s: MALLOC of sssr_mempool failed\n",
825 			__FUNCTION__));
826 		return BCME_ERROR;
827 	}
828 	return BCME_OK;
829 }
830 
831 void
dhd_sssr_mempool_deinit(dhd_pub_t * dhd)832 dhd_sssr_mempool_deinit(dhd_pub_t *dhd)
833 {
834 	if (dhd->sssr_mempool) {
835 		MFREE(dhd->osh, dhd->sssr_mempool, DHD_SSSR_MEMPOOL_SIZE);
836 		dhd->sssr_mempool = NULL;
837 	}
838 }
839 
840 int
dhd_sssr_reg_info_init(dhd_pub_t * dhd)841 dhd_sssr_reg_info_init(dhd_pub_t *dhd)
842 {
843 	dhd->sssr_reg_info = (sssr_reg_info_cmn_t *) MALLOCZ(dhd->osh, sizeof(sssr_reg_info_cmn_t));
844 	if (dhd->sssr_reg_info == NULL) {
845 		DHD_ERROR(("%s: MALLOC of sssr_reg_info failed\n",
846 			__FUNCTION__));
847 		return BCME_ERROR;
848 	}
849 	return BCME_OK;
850 }
851 
852 void
dhd_sssr_reg_info_deinit(dhd_pub_t * dhd)853 dhd_sssr_reg_info_deinit(dhd_pub_t *dhd)
854 {
855 	if (dhd->sssr_reg_info) {
856 		MFREE(dhd->osh, dhd->sssr_reg_info, sizeof(sssr_reg_info_cmn_t));
857 		dhd->sssr_reg_info = NULL;
858 	}
859 }
860 
861 #ifdef DHD_PCIE_REG_ACCESS
862 static void
dhd_dump_sssr_reg_info_v2(dhd_pub_t * dhd)863 dhd_dump_sssr_reg_info_v2(dhd_pub_t *dhd)
864 {
865 	sssr_reg_info_cmn_t *sssr_reg_info_cmn = dhd->sssr_reg_info;
866 	sssr_reg_info_v2_t *sssr_reg_info = (sssr_reg_info_v2_t *)&sssr_reg_info_cmn->rev2;
867 	int i, j;
868 	uint8 num_d11cores = dhd_d11_slices_num_get(dhd);
869 	DHD_ERROR(("pmu_regs\n"));
870 	DHD_ERROR(("pmuintmask0=0x%x pmuintmask1=0x%x resreqtimer=0x%x "
871 		"macresreqtimer=0x%x macresreqtimer1=0x%x\n",
872 		sssr_reg_info->pmu_regs.base_regs.pmuintmask0,
873 		sssr_reg_info->pmu_regs.base_regs.pmuintmask1,
874 		sssr_reg_info->pmu_regs.base_regs.resreqtimer,
875 		sssr_reg_info->pmu_regs.base_regs.macresreqtimer,
876 		sssr_reg_info->pmu_regs.base_regs.macresreqtimer1));
877 	DHD_ERROR(("chipcommon_regs\n"));
878 	DHD_ERROR(("intmask=0x%x powerctrl=0x%x clockcontrolstatus=0x%x powerctrl_mask=0x%x\n",
879 		sssr_reg_info->chipcommon_regs.base_regs.intmask,
880 		sssr_reg_info->chipcommon_regs.base_regs.powerctrl,
881 		sssr_reg_info->chipcommon_regs.base_regs.clockcontrolstatus,
882 		sssr_reg_info->chipcommon_regs.base_regs.powerctrl_mask));
883 	DHD_ERROR(("arm_regs\n"));
884 	DHD_ERROR(("clockcontrolstatus=0x%x clockcontrolstatus_val=0x%x"
885 		" resetctrl=0x%x extrsrcreq=0x%x\n",
886 		sssr_reg_info->arm_regs.base_regs.clockcontrolstatus,
887 		sssr_reg_info->arm_regs.base_regs.clockcontrolstatus_val,
888 		sssr_reg_info->arm_regs.wrapper_regs.resetctrl,
889 		sssr_reg_info->arm_regs.wrapper_regs.extrsrcreq));
890 	DHD_ERROR(("pcie_regs\n"));
891 	DHD_ERROR(("ltrstate=0x%x clockcontrolstatus=0x%x "
892 		"clockcontrolstatus_val=0x%x extrsrcreq=0x%x\n",
893 		sssr_reg_info->pcie_regs.base_regs.ltrstate,
894 		sssr_reg_info->pcie_regs.base_regs.clockcontrolstatus,
895 		sssr_reg_info->pcie_regs.base_regs.clockcontrolstatus_val,
896 		sssr_reg_info->pcie_regs.wrapper_regs.extrsrcreq));
897 
898 	for (i = 0; i < num_d11cores; i++) {
899 		DHD_ERROR(("mac_regs core[%d]\n", i));
900 		DHD_ERROR(("xmtaddress=0x%x xmtdata=0x%x clockcontrolstatus=0x%x "
901 			"clockcontrolstatus_val=0x%x\n",
902 			sssr_reg_info->mac_regs[i].base_regs.xmtaddress,
903 			sssr_reg_info->mac_regs[i].base_regs.xmtdata,
904 			sssr_reg_info->mac_regs[i].base_regs.clockcontrolstatus,
905 			sssr_reg_info->mac_regs[i].base_regs.clockcontrolstatus_val));
906 		DHD_ERROR(("resetctrl=0x%x extrsrcreq=0x%x ioctrl=0x%x\n",
907 			sssr_reg_info->mac_regs[i].wrapper_regs.resetctrl,
908 			sssr_reg_info->mac_regs[i].wrapper_regs.extrsrcreq,
909 			sssr_reg_info->mac_regs[i].wrapper_regs.ioctrl));
910 		for (j = 0; j < SSSR_D11_RESET_SEQ_STEPS; j++) {
911 			DHD_ERROR(("ioctrl_resetseq_val[%d] 0x%x\n", j,
912 				sssr_reg_info->mac_regs[i].wrapper_regs.ioctrl_resetseq_val[j]));
913 		}
914 		DHD_ERROR(("sr_size=0x%x\n", sssr_reg_info->mac_regs[i].sr_size));
915 	}
916 	DHD_ERROR(("dig_regs\n"));
917 	DHD_ERROR(("dig_sr_addr=0x%x dig_sr_size=0x%x\n",
918 		sssr_reg_info->dig_mem_info.dig_sr_addr,
919 		sssr_reg_info->dig_mem_info.dig_sr_size));
920 }
921 
922 static void
dhd_dump_sssr_reg_info_v3(dhd_pub_t * dhd)923 dhd_dump_sssr_reg_info_v3(dhd_pub_t *dhd)
924 {
925 	sssr_reg_info_cmn_t *sssr_reg_info_cmn = dhd->sssr_reg_info;
926 	sssr_reg_info_v3_t *sssr_reg_info = (sssr_reg_info_v3_t *)&sssr_reg_info_cmn->rev3;
927 	int i;
928 
929 	dhd_dump_sssr_reg_info_v2(dhd);
930 
931 	DHD_ERROR(("FIS Enab in fw : %d\n", sssr_reg_info->fis_enab));
932 
933 	DHD_ERROR(("HWA regs for reset \n"));
934 	DHD_ERROR(("clkenable 0x%x, clkgatingenable 0x%x, clkext 0x%x, "
935 		"clkctlstatus 0x%x, ioctrl 0x%x, resetctrl 0x%x\n",
936 		sssr_reg_info->hwa_regs.base_regs.clkenable,
937 		sssr_reg_info->hwa_regs.base_regs.clkgatingenable,
938 		sssr_reg_info->hwa_regs.base_regs.clkext,
939 		sssr_reg_info->hwa_regs.base_regs.clkctlstatus,
940 		sssr_reg_info->hwa_regs.wrapper_regs.ioctrl,
941 		sssr_reg_info->hwa_regs.wrapper_regs.resetctrl));
942 	DHD_ERROR(("HWA regs value seq for reset \n"));
943 	for (i = 0; i < SSSR_HWA_RESET_SEQ_STEPS; i++) {
944 		DHD_ERROR(("hwa_resetseq_val[%d] 0x%x", i,
945 			sssr_reg_info->hwa_regs.hwa_resetseq_val[i]));
946 	}
947 }
948 
949 static void
dhd_dump_sssr_reg_info_v1(dhd_pub_t * dhd)950 dhd_dump_sssr_reg_info_v1(dhd_pub_t *dhd)
951 {
952 	sssr_reg_info_cmn_t *sssr_reg_info_cmn = dhd->sssr_reg_info;
953 	sssr_reg_info_v1_t *sssr_reg_info = (sssr_reg_info_v1_t *)&sssr_reg_info_cmn->rev1;
954 	int i, j;
955 	uint8 num_d11cores = dhd_d11_slices_num_get(dhd);
956 
957 	DHD_ERROR(("pmu_regs\n"));
958 	DHD_ERROR(("pmuintmask0=0x%x pmuintmask1=0x%x resreqtimer=0x%x "
959 		"macresreqtimer=0x%x macresreqtimer1=0x%x\n",
960 		sssr_reg_info->pmu_regs.base_regs.pmuintmask0,
961 		sssr_reg_info->pmu_regs.base_regs.pmuintmask1,
962 		sssr_reg_info->pmu_regs.base_regs.resreqtimer,
963 		sssr_reg_info->pmu_regs.base_regs.macresreqtimer,
964 		sssr_reg_info->pmu_regs.base_regs.macresreqtimer1));
965 	DHD_ERROR(("chipcommon_regs\n"));
966 	DHD_ERROR(("intmask=0x%x powerctrl=0x%x clockcontrolstatus=0x%x powerctrl_mask=0x%x\n",
967 		sssr_reg_info->chipcommon_regs.base_regs.intmask,
968 		sssr_reg_info->chipcommon_regs.base_regs.powerctrl,
969 		sssr_reg_info->chipcommon_regs.base_regs.clockcontrolstatus,
970 		sssr_reg_info->chipcommon_regs.base_regs.powerctrl_mask));
971 	DHD_ERROR(("arm_regs\n"));
972 	DHD_ERROR(("clockcontrolstatus=0x%x clockcontrolstatus_val=0x%x"
973 		" resetctrl=0x%x itopoobb=0x%x\n",
974 		sssr_reg_info->arm_regs.base_regs.clockcontrolstatus,
975 		sssr_reg_info->arm_regs.base_regs.clockcontrolstatus_val,
976 		sssr_reg_info->arm_regs.wrapper_regs.resetctrl,
977 		sssr_reg_info->arm_regs.wrapper_regs.itopoobb));
978 	DHD_ERROR(("pcie_regs\n"));
979 	DHD_ERROR(("ltrstate=0x%x clockcontrolstatus=0x%x "
980 		"clockcontrolstatus_val=0x%x itopoobb=0x%x\n",
981 		sssr_reg_info->pcie_regs.base_regs.ltrstate,
982 		sssr_reg_info->pcie_regs.base_regs.clockcontrolstatus,
983 		sssr_reg_info->pcie_regs.base_regs.clockcontrolstatus_val,
984 		sssr_reg_info->pcie_regs.wrapper_regs.itopoobb));
985 	DHD_ERROR(("vasip_regs\n"));
986 	DHD_ERROR(("ioctrl=0x%x vasip_sr_addr=0x%x vasip_sr_size=0x%x\n",
987 		sssr_reg_info->vasip_regs.wrapper_regs.ioctrl,
988 		sssr_reg_info->vasip_regs.vasip_sr_addr,
989 		sssr_reg_info->vasip_regs.vasip_sr_size));
990 
991 	for (i = 0; i < num_d11cores; i++) {
992 		DHD_ERROR(("mac_regs core[%d]\n", i));
993 		DHD_ERROR(("xmtaddress=0x%x xmtdata=0x%x clockcontrolstatus=0x%x "
994 			"clockcontrolstatus_val=0x%x\n",
995 			sssr_reg_info->mac_regs[i].base_regs.xmtaddress,
996 			sssr_reg_info->mac_regs[i].base_regs.xmtdata,
997 			sssr_reg_info->mac_regs[i].base_regs.clockcontrolstatus,
998 			sssr_reg_info->mac_regs[i].base_regs.clockcontrolstatus_val));
999 		DHD_ERROR(("resetctrl=0x%x itopoobb=0x%x ioctrl=0x%x\n",
1000 			sssr_reg_info->mac_regs[i].wrapper_regs.resetctrl,
1001 			sssr_reg_info->mac_regs[i].wrapper_regs.itopoobb,
1002 			sssr_reg_info->mac_regs[i].wrapper_regs.ioctrl));
1003 		for (j = 0; j < SSSR_D11_RESET_SEQ_STEPS; j++) {
1004 			DHD_ERROR(("ioctrl_resetseq_val[%d] 0x%x\n", j,
1005 				sssr_reg_info->mac_regs[i].wrapper_regs.ioctrl_resetseq_val[j]));
1006 		}
1007 		DHD_ERROR(("sr_size=0x%x\n", sssr_reg_info->mac_regs[i].sr_size));
1008 	}
1009 }
1010 
1011 #endif /* DHD_PCIE_REG_ACCESS */
1012 
1013 void
dhd_dump_sssr_reg_info(dhd_pub_t * dhd)1014 dhd_dump_sssr_reg_info(dhd_pub_t *dhd)
1015 {
1016 #ifdef DHD_PCIE_REG_ACCESS
1017 	sssr_reg_info_cmn_t *sssr_reg_info_cmn = dhd->sssr_reg_info;
1018 	sssr_reg_info_v1_t *sssr_reg_info = (sssr_reg_info_v1_t *)&sssr_reg_info_cmn->rev1;
1019 
1020 	DHD_ERROR(("************** SSSR REG INFO start version:%d ****************\n",
1021 		sssr_reg_info->version));
1022 	switch (sssr_reg_info->version) {
1023 		case SSSR_REG_INFO_VER_3 :
1024 			dhd_dump_sssr_reg_info_v3(dhd);
1025 			break;
1026 		case SSSR_REG_INFO_VER_2 :
1027 			dhd_dump_sssr_reg_info_v2(dhd);
1028 			break;
1029 		default:
1030 			dhd_dump_sssr_reg_info_v1(dhd);
1031 			break;
1032 	}
1033 	DHD_ERROR(("************** SSSR REG INFO end ****************\n"));
1034 #endif /* DHD_PCIE_REG_ACCESS */
1035 }
1036 
1037 int
dhd_get_sssr_reg_info(dhd_pub_t * dhd)1038 dhd_get_sssr_reg_info(dhd_pub_t *dhd)
1039 {
1040 	int ret;
1041 	/* get sssr_reg_info from firmware */
1042 	ret = dhd_iovar(dhd, 0, "sssr_reg_info", NULL, 0,  (char *)dhd->sssr_reg_info,
1043 		sizeof(sssr_reg_info_cmn_t), FALSE);
1044 	if (ret < 0) {
1045 		DHD_ERROR(("%s: sssr_reg_info failed (error=%d)\n",
1046 			__FUNCTION__, ret));
1047 		return BCME_ERROR;
1048 	}
1049 
1050 	dhd_dump_sssr_reg_info(dhd);
1051 	return BCME_OK;
1052 }
1053 
1054 uint32
dhd_get_sssr_bufsize(dhd_pub_t * dhd)1055 dhd_get_sssr_bufsize(dhd_pub_t *dhd)
1056 {
1057 	int i;
1058 	uint32 sssr_bufsize = 0;
1059 	uint8 num_d11cores;
1060 
1061 	num_d11cores = dhd_d11_slices_num_get(dhd);
1062 
1063 	switch (dhd->sssr_reg_info->rev2.version) {
1064 		case SSSR_REG_INFO_VER_3 :
1065 			/* intentional fall through */
1066 		case SSSR_REG_INFO_VER_2 :
1067 			for (i = 0; i < num_d11cores; i++) {
1068 				sssr_bufsize += dhd->sssr_reg_info->rev2.mac_regs[i].sr_size;
1069 			}
1070 			if ((dhd->sssr_reg_info->rev2.length >
1071 			 OFFSETOF(sssr_reg_info_v2_t, dig_mem_info)) &&
1072 			 dhd->sssr_reg_info->rev2.dig_mem_info.dig_sr_addr) {
1073 				sssr_bufsize += 0; /* TBD */
1074 			}
1075 			break;
1076 		case SSSR_REG_INFO_VER_1 :
1077 			for (i = 0; i < num_d11cores; i++) {
1078 				sssr_bufsize += dhd->sssr_reg_info->rev1.mac_regs[i].sr_size;
1079 			}
1080 			if (dhd->sssr_reg_info->rev1.vasip_regs.vasip_sr_size) {
1081 				sssr_bufsize += dhd->sssr_reg_info->rev1.vasip_regs.vasip_sr_size;
1082 			} else if ((dhd->sssr_reg_info->rev1.length > OFFSETOF(sssr_reg_info_v1_t,
1083 				dig_mem_info)) && dhd->sssr_reg_info->rev1.
1084 				dig_mem_info.dig_sr_addr) {
1085 				sssr_bufsize += dhd->sssr_reg_info->rev1.dig_mem_info.dig_sr_size;
1086 			}
1087 			break;
1088 		case SSSR_REG_INFO_VER_0 :
1089 			for (i = 0; i < num_d11cores; i++) {
1090 				sssr_bufsize += dhd->sssr_reg_info->rev0.mac_regs[i].sr_size;
1091 			}
1092 			if (dhd->sssr_reg_info->rev0.vasip_regs.vasip_sr_size) {
1093 				sssr_bufsize += dhd->sssr_reg_info->rev0.vasip_regs.vasip_sr_size;
1094 			}
1095 			break;
1096 		default :
1097 			DHD_ERROR(("invalid sssr_reg_ver"));
1098 			return BCME_UNSUPPORTED;
1099 	}
1100 
1101 #ifdef DHD_SSSR_DUMP_BEFORE_SR
1102 	/* Double the size as different dumps will be saved before and after SR */
1103 	sssr_bufsize = 2 * sssr_bufsize;
1104 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
1105 
1106 	return sssr_bufsize;
1107 }
1108 
1109 int
dhd_sssr_dump_init(dhd_pub_t * dhd)1110 dhd_sssr_dump_init(dhd_pub_t *dhd)
1111 {
1112 	int i;
1113 	uint32 sssr_bufsize;
1114 	uint32 mempool_used = 0;
1115 	uint8 num_d11cores = 0;
1116 	bool alloc_sssr = FALSE;
1117 	uint32 sr_size = 0;
1118 
1119 	dhd->sssr_inited = FALSE;
1120 	if (!sssr_enab) {
1121 		DHD_ERROR(("%s: sssr dump not inited as instructed by mod param\n", __FUNCTION__));
1122 		return BCME_OK;
1123 	}
1124 
1125 	/* check if sssr mempool is allocated */
1126 	if (dhd->sssr_mempool == NULL) {
1127 		DHD_ERROR(("%s: sssr_mempool is not allocated\n",
1128 			__FUNCTION__));
1129 		return BCME_ERROR;
1130 	}
1131 
1132 	/* check if sssr mempool is allocated */
1133 	if (dhd->sssr_reg_info == NULL) {
1134 		DHD_ERROR(("%s: sssr_reg_info is not allocated\n",
1135 			__FUNCTION__));
1136 		return BCME_ERROR;
1137 	}
1138 
1139 	/* Get SSSR reg info */
1140 	if (dhd_get_sssr_reg_info(dhd) != BCME_OK) {
1141 		DHD_ERROR(("%s: dhd_get_sssr_reg_info failed\n", __FUNCTION__));
1142 		printf("DEBUG_SSSr: %s: dhd_get_sssr_reg_info failed\n", __FUNCTION__);
1143 		return BCME_ERROR;
1144 	}
1145 
1146 	num_d11cores = dhd_d11_slices_num_get(dhd);
1147 	/* Validate structure version and length */
1148 	switch (dhd->sssr_reg_info->rev2.version) {
1149 		case SSSR_REG_INFO_VER_3 :
1150 			if (dhd->sssr_reg_info->rev3.length != sizeof(sssr_reg_info_v3_t)) {
1151 				DHD_ERROR(("%s: dhd->sssr_reg_info->rev2.length (%d : %d)"
1152 					 "mismatch on rev2\n", __FUNCTION__,
1153 					 (int)dhd->sssr_reg_info->rev3.length,
1154 					 (int)sizeof(sssr_reg_info_v3_t)));
1155 				return BCME_ERROR;
1156 			}
1157 			break;
1158 		case SSSR_REG_INFO_VER_2 :
1159 			if (dhd->sssr_reg_info->rev2.length != sizeof(sssr_reg_info_v2_t)) {
1160 				DHD_ERROR(("%s: dhd->sssr_reg_info->rev2.length (%d : %d)"
1161 					 "mismatch on rev2\n", __FUNCTION__,
1162 					 (int)dhd->sssr_reg_info->rev2.length,
1163 					 (int)sizeof(sssr_reg_info_v2_t)));
1164 				return BCME_ERROR;
1165 			}
1166 			break;
1167 		case SSSR_REG_INFO_VER_1 :
1168 			if (dhd->sssr_reg_info->rev1.length != sizeof(sssr_reg_info_v1_t)) {
1169 				DHD_ERROR(("%s: dhd->sssr_reg_info->rev1.length (%d : %d)"
1170 					 "mismatch on rev1\n", __FUNCTION__,
1171 					 (int)dhd->sssr_reg_info->rev1.length,
1172 					 (int)sizeof(sssr_reg_info_v1_t)));
1173 				return BCME_ERROR;
1174 			}
1175 			break;
1176 		case SSSR_REG_INFO_VER_0 :
1177 			if (dhd->sssr_reg_info->rev0.length != sizeof(sssr_reg_info_v0_t)) {
1178 				DHD_ERROR(("%s: dhd->sssr_reg_info->rev0.length (%d : %d)"
1179 					 "mismatch on rev0\n", __FUNCTION__,
1180 					 (int)dhd->sssr_reg_info->rev0.length,
1181 					 (int)sizeof(sssr_reg_info_v0_t)));
1182 				return BCME_ERROR;
1183 			}
1184 			break;
1185 		default :
1186 			DHD_ERROR(("invalid sssr_reg_ver"));
1187 			return BCME_UNSUPPORTED;
1188 	}
1189 
1190 	/* validate fifo size */
1191 	sssr_bufsize = dhd_get_sssr_bufsize(dhd);
1192 	if (sssr_bufsize > DHD_SSSR_MEMPOOL_SIZE) {
1193 		DHD_ERROR(("%s: sssr_bufsize(%d) is greater than sssr_mempool(%d)\n",
1194 			__FUNCTION__, (int)sssr_bufsize, DHD_SSSR_MEMPOOL_SIZE));
1195 		return BCME_ERROR;
1196 	}
1197 
1198 	/* init all pointers to NULL */
1199 	for (i = 0; i < num_d11cores; i++) {
1200 #ifdef DHD_SSSR_DUMP_BEFORE_SR
1201 		dhd->sssr_d11_before[i] = NULL;
1202 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
1203 		dhd->sssr_d11_after[i] = NULL;
1204 	}
1205 
1206 #ifdef DHD_SSSR_DUMP_BEFORE_SR
1207 	dhd->sssr_dig_buf_before = NULL;
1208 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
1209 	dhd->sssr_dig_buf_after = NULL;
1210 
1211 	/* Allocate memory */
1212 	for (i = 0; i < num_d11cores; i++) {
1213 		alloc_sssr = FALSE;
1214 		sr_size = 0;
1215 
1216 		switch (dhd->sssr_reg_info->rev2.version) {
1217 			case SSSR_REG_INFO_VER_3 :
1218 				/* intentional fall through */
1219 			case SSSR_REG_INFO_VER_2 :
1220 				if (dhd->sssr_reg_info->rev2.mac_regs[i].sr_size) {
1221 					alloc_sssr = TRUE;
1222 					sr_size = dhd->sssr_reg_info->rev2.mac_regs[i].sr_size;
1223 				}
1224 				break;
1225 			case SSSR_REG_INFO_VER_1 :
1226 				if (dhd->sssr_reg_info->rev1.mac_regs[i].sr_size) {
1227 					alloc_sssr = TRUE;
1228 					sr_size = dhd->sssr_reg_info->rev1.mac_regs[i].sr_size;
1229 				}
1230 				break;
1231 			case SSSR_REG_INFO_VER_0 :
1232 				if (dhd->sssr_reg_info->rev0.mac_regs[i].sr_size) {
1233 					alloc_sssr = TRUE;
1234 					sr_size = dhd->sssr_reg_info->rev0.mac_regs[i].sr_size;
1235 				}
1236 				break;
1237 			default :
1238 				DHD_ERROR(("invalid sssr_reg_ver"));
1239 				return BCME_UNSUPPORTED;
1240 		}
1241 
1242 		if (alloc_sssr) {
1243 #ifdef DHD_SSSR_DUMP_BEFORE_SR
1244 			dhd->sssr_d11_before[i] = (uint32 *)(dhd->sssr_mempool + mempool_used);
1245 			mempool_used += sr_size;
1246 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
1247 
1248 			dhd->sssr_d11_after[i] = (uint32 *)(dhd->sssr_mempool + mempool_used);
1249 			mempool_used += sr_size;
1250 		}
1251 	}
1252 
1253 	/* Allocate dump memory for VASIP (version 0 or 1) or digital core (version 0, 1, or 2) */
1254 	alloc_sssr = FALSE;
1255 	sr_size = 0;
1256 	switch (dhd->sssr_reg_info->rev2.version) {
1257 		case SSSR_REG_INFO_VER_3 :
1258 			/* intentional fall through */
1259 		case SSSR_REG_INFO_VER_2 :
1260 			if ((dhd->sssr_reg_info->rev2.length >
1261 			 OFFSETOF(sssr_reg_info_v2_t, dig_mem_info)) &&
1262 			 dhd->sssr_reg_info->rev2.dig_mem_info.dig_sr_addr) {
1263 				alloc_sssr = TRUE;
1264 				sr_size = dhd->sssr_reg_info->rev2.dig_mem_info.dig_sr_size;
1265 			}
1266 			break;
1267 		case SSSR_REG_INFO_VER_1 :
1268 			if (dhd->sssr_reg_info->rev1.vasip_regs.vasip_sr_size) {
1269 				alloc_sssr = TRUE;
1270 				sr_size = dhd->sssr_reg_info->rev1.vasip_regs.vasip_sr_size;
1271 			} else if ((dhd->sssr_reg_info->rev1.length > OFFSETOF(sssr_reg_info_v1_t,
1272 				dig_mem_info)) && dhd->sssr_reg_info->rev1.
1273 				dig_mem_info.dig_sr_addr) {
1274 				alloc_sssr = TRUE;
1275 				sr_size = dhd->sssr_reg_info->rev1.dig_mem_info.dig_sr_size;
1276 			}
1277 			break;
1278 		case SSSR_REG_INFO_VER_0 :
1279 			if (dhd->sssr_reg_info->rev0.vasip_regs.vasip_sr_size) {
1280 				alloc_sssr = TRUE;
1281 				sr_size = dhd->sssr_reg_info->rev0.vasip_regs.vasip_sr_size;
1282 			}
1283 			break;
1284 		default :
1285 			DHD_ERROR(("invalid sssr_reg_ver"));
1286 			return BCME_UNSUPPORTED;
1287 	}
1288 
1289 	if (alloc_sssr) {
1290 		dhd->sssr_dig_buf_after = (uint32 *)(dhd->sssr_mempool + mempool_used);
1291 		mempool_used += sr_size;
1292 
1293 #ifdef DHD_SSSR_DUMP_BEFORE_SR
1294 		/* DIG dump before suspend is not applicable. */
1295 		dhd->sssr_dig_buf_before = NULL;
1296 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
1297 	}
1298 
1299 	dhd->sssr_inited = TRUE;
1300 
1301 	return BCME_OK;
1302 
1303 }
1304 
1305 void
dhd_sssr_dump_deinit(dhd_pub_t * dhd)1306 dhd_sssr_dump_deinit(dhd_pub_t *dhd)
1307 {
1308 	int i;
1309 
1310 	dhd->sssr_inited = FALSE;
1311 	/* init all pointers to NULL */
1312 	for (i = 0; i < MAX_NUM_D11_CORES_WITH_SCAN; i++) {
1313 #ifdef DHD_SSSR_DUMP_BEFORE_SR
1314 		dhd->sssr_d11_before[i] = NULL;
1315 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
1316 		dhd->sssr_d11_after[i] = NULL;
1317 	}
1318 #ifdef DHD_SSSR_DUMP_BEFORE_SR
1319 	dhd->sssr_dig_buf_before = NULL;
1320 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
1321 	dhd->sssr_dig_buf_after = NULL;
1322 
1323 	return;
1324 }
1325 
1326 void
dhd_sssr_print_filepath(dhd_pub_t * dhd,char * path)1327 dhd_sssr_print_filepath(dhd_pub_t *dhd, char *path)
1328 {
1329 	bool print_info = FALSE;
1330 	int dump_mode;
1331 
1332 	if (!dhd || !path) {
1333 		DHD_ERROR(("%s: dhd or memdump_path is NULL\n",
1334 			__FUNCTION__));
1335 		return;
1336 	}
1337 
1338 	if (!dhd->sssr_dump_collected) {
1339 		/* SSSR dump is not collected */
1340 		return;
1341 	}
1342 
1343 	dump_mode = dhd->sssr_dump_mode;
1344 
1345 	if (bcmstrstr(path, "core_0_before")) {
1346 		if (dhd->sssr_d11_outofreset[0] &&
1347 			dump_mode == SSSR_DUMP_MODE_SSSR) {
1348 			print_info = TRUE;
1349 		}
1350 	} else if (bcmstrstr(path, "core_0_after")) {
1351 		if (dhd->sssr_d11_outofreset[0]) {
1352 			print_info = TRUE;
1353 		}
1354 	} else if (bcmstrstr(path, "core_1_before")) {
1355 		if (dhd->sssr_d11_outofreset[1] &&
1356 			dump_mode == SSSR_DUMP_MODE_SSSR) {
1357 			print_info = TRUE;
1358 		}
1359 	} else if (bcmstrstr(path, "core_1_after")) {
1360 		if (dhd->sssr_d11_outofreset[1]) {
1361 			print_info = TRUE;
1362 		}
1363 	} else if (bcmstrstr(path, "core_2_before")) {
1364 		if (dhd->sssr_d11_outofreset[2] &&
1365 			dump_mode == SSSR_DUMP_MODE_SSSR) {
1366 			print_info = TRUE;
1367 		}
1368 	} else if (bcmstrstr(path, "core_2_after")) {
1369 		if (dhd->sssr_d11_outofreset[2]) {
1370 			print_info = TRUE;
1371 		}
1372 	} else {
1373 		print_info = TRUE;
1374 	}
1375 
1376 	if (print_info) {
1377 		DHD_ERROR(("%s: file_path = %s%s\n", __FUNCTION__,
1378 			path, FILE_NAME_HAL_TAG));
1379 	}
1380 }
1381 #endif /* DHD_SSSR_DUMP */
1382 
1383 #ifdef DHD_SDTC_ETB_DUMP
1384 /*
1385  * sdtc: system debug trace controller
1386  * etb: embedded trace buf
1387  */
1388 void
dhd_sdtc_etb_init(dhd_pub_t * dhd)1389 dhd_sdtc_etb_init(dhd_pub_t *dhd)
1390 {
1391 	bcm_iov_buf_t *iov_req = NULL;
1392 	etb_addr_info_t *p_etb_addr_info = NULL;
1393 	bcm_iov_buf_t *iov_resp = NULL;
1394 	uint8 *buf = NULL;
1395 	int ret = 0;
1396 	uint16 iovlen = 0;
1397 	uint16 version = 0;
1398 
1399 	BCM_REFERENCE(p_etb_addr_info);
1400 	dhd->sdtc_etb_inited = FALSE;
1401 
1402 	iov_req = MALLOCZ(dhd->osh, WLC_IOCTL_SMLEN);
1403 	if (iov_req == NULL) {
1404 		DHD_ERROR(("%s: Failed to alloc buffer for iovar request\n", __FUNCTION__));
1405 		goto exit;
1406 	}
1407 
1408 	buf = MALLOCZ(dhd->osh, WLC_IOCTL_MAXLEN);
1409 	if (buf == NULL) {
1410 		DHD_ERROR(("%s: Failed to alloc buffer for iovar response\n", __FUNCTION__));
1411 		goto exit;
1412 	}
1413 
1414 	/* fill header */
1415 	iov_req->version = WL_SDTC_IOV_VERSION;
1416 	iov_req->id = WL_SDTC_CMD_ETB_INFO;
1417 	iov_req->len = sizeof(etb_addr_info_t);
1418 	iovlen = OFFSETOF(bcm_iov_buf_t, data) + iov_req->len;
1419 
1420 	ret = dhd_iovar(dhd, 0, "sdtc", (char *)iov_req, iovlen,
1421 		(char *)buf, WLC_IOCTL_MAXLEN, FALSE);
1422 	if (ret < 0) {
1423 		DHD_ERROR(("%s failed to get sdtc etb_info %d\n", __FUNCTION__, ret));
1424 		goto exit;
1425 	}
1426 
1427 	version = dtoh16(*(uint16 *)buf);
1428 	/* Check for version */
1429 	if (version != WL_SDTC_IOV_VERSION) {
1430 		DHD_ERROR(("%s WL_SDTC_IOV_VERSION mis match\n", __FUNCTION__));
1431 		goto exit;
1432 	}
1433 	iov_resp = (bcm_iov_buf_t *)buf;
1434 	if (iov_resp->id == iov_req->id) {
1435 		p_etb_addr_info = (etb_addr_info_t*)iov_resp->data;
1436 		dhd->etb_addr_info.version = p_etb_addr_info->version;
1437 		dhd->etb_addr_info.len = p_etb_addr_info->len;
1438 		dhd->etb_addr_info.etbinfo_addr = p_etb_addr_info->etbinfo_addr;
1439 
1440 		DHD_ERROR(("%s etb_addr_info: ver:%d, len:%d, addr:0x%x\n", __FUNCTION__,
1441 			dhd->etb_addr_info.version, dhd->etb_addr_info.len,
1442 			dhd->etb_addr_info.etbinfo_addr));
1443 	} else {
1444 		DHD_ERROR(("%s Unknown CMD-ID (%d) as  response for request ID %d\n",
1445 			__FUNCTION__, iov_resp->id, iov_req->id));
1446 		goto exit;
1447 	}
1448 
1449 	/* since all the requirements for SDTC and ETB are met mark the capability as TRUE */
1450 	dhd->sdtc_etb_inited = TRUE;
1451 	DHD_ERROR(("%s sdtc_etb_inited: %d\n", __FUNCTION__, dhd->sdtc_etb_inited));
1452 exit:
1453 	if (iov_req) {
1454 		MFREE(dhd->osh, iov_req, WLC_IOCTL_SMLEN);
1455 	}
1456 	if (buf) {
1457 		MFREE(dhd->osh, buf, WLC_IOCTL_MAXLEN);
1458 	}
1459 	return;
1460 }
1461 
1462 void
dhd_sdtc_etb_deinit(dhd_pub_t * dhd)1463 dhd_sdtc_etb_deinit(dhd_pub_t *dhd)
1464 {
1465 	dhd->sdtc_etb_inited = FALSE;
1466 }
1467 
1468 int
dhd_sdtc_etb_mempool_init(dhd_pub_t * dhd)1469 dhd_sdtc_etb_mempool_init(dhd_pub_t *dhd)
1470 {
1471 	dhd->sdtc_etb_mempool = (uint8 *) MALLOCZ(dhd->osh, DHD_SDTC_ETB_MEMPOOL_SIZE);
1472 	if (dhd->sdtc_etb_mempool == NULL) {
1473 		DHD_ERROR(("%s: MALLOC of sdtc_etb_mempool failed\n",
1474 			__FUNCTION__));
1475 		return BCME_ERROR;
1476 	}
1477 	return BCME_OK;
1478 }
1479 
1480 void
dhd_sdtc_etb_mempool_deinit(dhd_pub_t * dhd)1481 dhd_sdtc_etb_mempool_deinit(dhd_pub_t *dhd)
1482 {
1483 	if (dhd->sdtc_etb_mempool) {
1484 		MFREE(dhd->osh, dhd->sdtc_etb_mempool, DHD_SDTC_ETB_MEMPOOL_SIZE);
1485 		dhd->sdtc_etb_mempool = NULL;
1486 	}
1487 }
1488 #endif /* DHD_SDTC_ETB_DUMP */
1489 
1490 #ifdef DHD_FW_COREDUMP
dhd_get_fwdump_buf(dhd_pub_t * dhd_pub,uint32 length)1491 void* dhd_get_fwdump_buf(dhd_pub_t *dhd_pub, uint32 length)
1492 {
1493 	if (!dhd_pub->soc_ram) {
1494 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
1495 		dhd_pub->soc_ram = (uint8*)DHD_OS_PREALLOC(dhd_pub,
1496 			DHD_PREALLOC_MEMDUMP_RAM, length);
1497 #else
1498 		dhd_pub->soc_ram = (uint8*) MALLOC(dhd_pub->osh, length);
1499 
1500 		if ((dhd_pub->soc_ram == NULL) && CAN_SLEEP()) {
1501 			DHD_ERROR(("%s: Try to allocate virtual memory for fw crash snap shot.\n",
1502 				__FUNCTION__));
1503 			dhd_pub->soc_ram = (uint8*) VMALLOC(dhd_pub->osh, length);
1504 		}
1505 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
1506 	}
1507 
1508 	if (dhd_pub->soc_ram == NULL) {
1509 		DHD_ERROR(("%s: Failed to allocate memory for fw crash snap shot.\n",
1510 			__FUNCTION__));
1511 		dhd_pub->soc_ram_length = 0;
1512 	} else {
1513 		memset(dhd_pub->soc_ram, 0, length);
1514 		dhd_pub->soc_ram_length = length;
1515 	}
1516 
1517 	/* soc_ram free handled in dhd_{free,clear} */
1518 	return dhd_pub->soc_ram;
1519 }
1520 #endif /* DHD_FW_COREDUMP */
1521 
1522 /* to NDIS developer, the structure dhd_common is redundant,
1523  * please do NOT merge it back from other branches !!!
1524  */
1525 
1526 int
dhd_common_socram_dump(dhd_pub_t * dhdp)1527 dhd_common_socram_dump(dhd_pub_t *dhdp)
1528 {
1529 #ifdef BCMDBUS
1530 	return 0;
1531 #else
1532 	return dhd_socram_dump(dhdp->bus);
1533 #endif /* BCMDBUS */
1534 }
1535 
1536 int
dhd_dump(dhd_pub_t * dhdp,char * buf,int buflen)1537 dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen)
1538 {
1539 	struct bcmstrbuf b;
1540 	struct bcmstrbuf *strbuf = &b;
1541 #ifdef DHD_MEM_STATS
1542 	uint64 malloc_mem = 0;
1543 	uint64 total_txpath_mem = 0;
1544 	uint64 txpath_bkpq_len = 0;
1545 	uint64 txpath_bkpq_mem = 0;
1546 	uint64 total_dhd_mem = 0;
1547 #endif /* DHD_MEM_STATS */
1548 
1549 	if (!dhdp || !dhdp->prot || !buf) {
1550 		return BCME_ERROR;
1551 	}
1552 
1553 	bcm_binit(strbuf, buf, buflen);
1554 
1555 	/* Base DHD info */
1556 	bcm_bprintf(strbuf, "%s\n", dhd_version);
1557 	bcm_bprintf(strbuf, "\n");
1558 	bcm_bprintf(strbuf, "pub.up %d pub.txoff %d pub.busstate %d\n",
1559 	            dhdp->up, dhdp->txoff, dhdp->busstate);
1560 	bcm_bprintf(strbuf, "pub.hdrlen %u pub.maxctl %u pub.rxsz %u\n",
1561 	            dhdp->hdrlen, dhdp->maxctl, dhdp->rxsz);
1562 	bcm_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac "MACDBG"\n",
1563 	            dhdp->iswl, dhdp->drv_version, MAC2STRDBG(&dhdp->mac));
1564 	bcm_bprintf(strbuf, "pub.bcmerror %d tickcnt %u\n", dhdp->bcmerror, dhdp->tickcnt);
1565 
1566 	bcm_bprintf(strbuf, "dongle stats:\n");
1567 	bcm_bprintf(strbuf, "tx_packets %lu tx_bytes %lu tx_errors %lu tx_dropped %lu\n",
1568 	            dhdp->dstats.tx_packets, dhdp->dstats.tx_bytes,
1569 	            dhdp->dstats.tx_errors, dhdp->dstats.tx_dropped);
1570 	bcm_bprintf(strbuf, "rx_packets %lu rx_bytes %lu rx_errors %lu rx_dropped %lu\n",
1571 	            dhdp->dstats.rx_packets, dhdp->dstats.rx_bytes,
1572 	            dhdp->dstats.rx_errors, dhdp->dstats.rx_dropped);
1573 	bcm_bprintf(strbuf, "multicast %lu\n", dhdp->dstats.multicast);
1574 
1575 	bcm_bprintf(strbuf, "bus stats:\n");
1576 	bcm_bprintf(strbuf, "tx_packets %lu  tx_dropped %lu tx_multicast %lu tx_errors %lu\n",
1577 	            dhdp->tx_packets, dhdp->tx_dropped, dhdp->tx_multicast, dhdp->tx_errors);
1578 	bcm_bprintf(strbuf, "tx_ctlpkts %lu tx_ctlerrs %lu\n",
1579 	            dhdp->tx_ctlpkts, dhdp->tx_ctlerrs);
1580 	bcm_bprintf(strbuf, "rx_packets %lu rx_multicast %lu rx_errors %lu \n",
1581 	            dhdp->rx_packets, dhdp->rx_multicast, dhdp->rx_errors);
1582 	bcm_bprintf(strbuf, "rx_ctlpkts %lu rx_ctlerrs %lu rx_dropped %lu\n",
1583 	            dhdp->rx_ctlpkts, dhdp->rx_ctlerrs, dhdp->rx_dropped);
1584 	bcm_bprintf(strbuf, "rx_readahead_cnt %lu tx_realloc %lu\n",
1585 	            dhdp->rx_readahead_cnt, dhdp->tx_realloc);
1586 	bcm_bprintf(strbuf, "tx_pktgetfail %lu rx_pktgetfail %lu\n",
1587 	            dhdp->tx_pktgetfail, dhdp->rx_pktgetfail);
1588 	bcm_bprintf(strbuf, "tx_big_packets %lu\n",
1589 	            dhdp->tx_big_packets);
1590 	bcm_bprintf(strbuf, "\n");
1591 #ifdef DMAMAP_STATS
1592 	/* Add DMA MAP info */
1593 	bcm_bprintf(strbuf, "DMA MAP stats: \n");
1594 	bcm_bprintf(strbuf, "txdata: %lu size: %luK, rxdata: %lu size: %luK\n",
1595 			dhdp->dma_stats.txdata, KB(dhdp->dma_stats.txdata_sz),
1596 			dhdp->dma_stats.rxdata, KB(dhdp->dma_stats.rxdata_sz));
1597 #ifndef IOCTLRESP_USE_CONSTMEM
1598 	bcm_bprintf(strbuf, "IOCTL RX: %lu size: %luK ,",
1599 			dhdp->dma_stats.ioctl_rx, KB(dhdp->dma_stats.ioctl_rx_sz));
1600 #endif /* !IOCTLRESP_USE_CONSTMEM */
1601 	bcm_bprintf(strbuf, "EVENT RX: %lu size: %luK, INFO RX: %lu size: %luK, "
1602 			"TSBUF RX: %lu size %luK\n",
1603 			dhdp->dma_stats.event_rx, KB(dhdp->dma_stats.event_rx_sz),
1604 			dhdp->dma_stats.info_rx, KB(dhdp->dma_stats.info_rx_sz),
1605 			dhdp->dma_stats.tsbuf_rx, KB(dhdp->dma_stats.tsbuf_rx_sz));
1606 	bcm_bprintf(strbuf, "Total : %luK \n",
1607 			KB(dhdp->dma_stats.txdata_sz + dhdp->dma_stats.rxdata_sz +
1608 			dhdp->dma_stats.ioctl_rx_sz + dhdp->dma_stats.event_rx_sz +
1609 			dhdp->dma_stats.tsbuf_rx_sz));
1610 #endif /* DMAMAP_STATS */
1611 	bcm_bprintf(strbuf, "dhd_induce_error : %u\n", dhdp->dhd_induce_error);
1612 	/* Add any prot info */
1613 	dhd_prot_dump(dhdp, strbuf);
1614 	bcm_bprintf(strbuf, "\n");
1615 
1616 	/* Add any bus info */
1617 	dhd_bus_dump(dhdp, strbuf);
1618 #if defined(BCM_ROUTER_DHD) && defined(HNDCTF)
1619 	/* Add ctf info */
1620 	dhd_ctf_dump(dhdp, strbuf);
1621 #endif /* BCM_ROUTER_DHD && HNDCTF */
1622 
1623 #if defined(DHD_LB_STATS)
1624 	dhd_lb_stats_dump(dhdp, strbuf);
1625 #endif /* DHD_LB_STATS */
1626 
1627 #ifdef DHD_MEM_STATS
1628 
1629 	malloc_mem = MALLOCED(dhdp->osh);
1630 
1631 	txpath_bkpq_len = dhd_active_tx_flowring_bkpq_len(dhdp);
1632 	/*
1633 	 * Instead of traversing the entire queue to find the skbs length,
1634 	 * considering MAX_MTU_SZ as lenth of each skb.
1635 	 */
1636 	txpath_bkpq_mem = (txpath_bkpq_len* MAX_MTU_SZ);
1637 	total_txpath_mem = dhdp->txpath_mem + txpath_bkpq_mem;
1638 
1639 	bcm_bprintf(strbuf, "\nDHD malloc memory_usage: %llubytes %lluKB\n",
1640 		malloc_mem, (malloc_mem / 1024));
1641 
1642 	bcm_bprintf(strbuf, "\nDHD tx-bkpq len: %llu memory_usage: %llubytes %lluKB\n",
1643 		txpath_bkpq_len, txpath_bkpq_mem, (txpath_bkpq_mem / 1024));
1644 	bcm_bprintf(strbuf, "DHD tx-path memory_usage: %llubytes %lluKB\n",
1645 		total_txpath_mem, (total_txpath_mem / 1024));
1646 
1647 	total_dhd_mem = malloc_mem + total_txpath_mem;
1648 #if defined(DHD_LB_STATS)
1649 	total_dhd_mem += dhd_lb_mem_usage(dhdp, strbuf);
1650 #endif /* DHD_LB_STATS */
1651 	bcm_bprintf(strbuf, "\nDHD Totoal memory_usage: %llubytes %lluKB \n",
1652 		total_dhd_mem, (total_dhd_mem / 1024));
1653 #endif /* DHD_MEM_STATS */
1654 #if defined(DHD_LB_STATS)
1655 	bcm_bprintf(strbuf, "\nlb_rxp_stop_thr_hitcnt: %llu lb_rxp_strt_thr_hitcnt: %llu\n",
1656 		dhdp->lb_rxp_stop_thr_hitcnt, dhdp->lb_rxp_strt_thr_hitcnt);
1657 	bcm_bprintf(strbuf, "\nlb_rxp_napi_sched_cnt: %llu lb_rxp_napi_complete_cnt: %llu\n",
1658 		dhdp->lb_rxp_napi_sched_cnt, dhdp->lb_rxp_napi_complete_cnt);
1659 #endif /* DHD_LB_STATS */
1660 
1661 #if defined(DHD_MQ) && defined(DHD_MQ_STATS)
1662 	dhd_mqstats_dump(dhdp, strbuf);
1663 #endif
1664 
1665 #ifdef DHD_WET
1666 	if (dhd_get_wet_mode(dhdp)) {
1667 		bcm_bprintf(strbuf, "Wet Dump:\n");
1668 		dhd_wet_dump(dhdp, strbuf);
1669 		}
1670 #endif /* DHD_WET */
1671 
1672 	DHD_ERROR(("%s bufsize: %d free: %d\n", __FUNCTION__, buflen, strbuf->size));
1673 	/* return remaining buffer length */
1674 	return (!strbuf->size ? BCME_BUFTOOSHORT : strbuf->size);
1675 }
1676 
1677 void
dhd_dump_to_kernelog(dhd_pub_t * dhdp)1678 dhd_dump_to_kernelog(dhd_pub_t *dhdp)
1679 {
1680 	char buf[512];
1681 
1682 	DHD_ERROR(("F/W version: %s\n", fw_version));
1683 	bcm_bprintf_bypass = TRUE;
1684 	dhd_dump(dhdp, buf, sizeof(buf));
1685 	bcm_bprintf_bypass = FALSE;
1686 }
1687 
1688 int
dhd_wl_ioctl_cmd(dhd_pub_t * dhd_pub,int cmd,void * arg,int len,uint8 set,int ifidx)1689 dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, int ifidx)
1690 {
1691 	wl_ioctl_t ioc;
1692 
1693 	ioc.cmd = cmd;
1694 	ioc.buf = arg;
1695 	ioc.len = len;
1696 	ioc.set = set;
1697 
1698 	return dhd_wl_ioctl(dhd_pub, ifidx, &ioc, arg, len);
1699 }
1700 
1701 int
dhd_wl_ioctl_get_intiovar(dhd_pub_t * dhd_pub,char * name,uint * pval,int cmd,uint8 set,int ifidx)1702 dhd_wl_ioctl_get_intiovar(dhd_pub_t *dhd_pub, char *name, uint *pval,
1703 	int cmd, uint8 set, int ifidx)
1704 {
1705 	char iovbuf[WLC_IOCTL_SMLEN];
1706 	int ret = -1;
1707 
1708 	memset(iovbuf, 0, sizeof(iovbuf));
1709 	if (bcm_mkiovar(name, NULL, 0, iovbuf, sizeof(iovbuf))) {
1710 		ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, sizeof(iovbuf), set, ifidx);
1711 		if (!ret) {
1712 			*pval = ltoh32(*((uint*)iovbuf));
1713 		} else {
1714 			DHD_ERROR(("%s: get int iovar %s failed, ERR %d\n",
1715 				__FUNCTION__, name, ret));
1716 		}
1717 	} else {
1718 		DHD_ERROR(("%s: mkiovar %s failed\n",
1719 			__FUNCTION__, name));
1720 	}
1721 
1722 	return ret;
1723 }
1724 
1725 int
dhd_wl_ioctl_set_intiovar(dhd_pub_t * dhd_pub,char * name,uint val,int cmd,uint8 set,int ifidx)1726 dhd_wl_ioctl_set_intiovar(dhd_pub_t *dhd_pub, char *name, uint val,
1727 	int cmd, uint8 set, int ifidx)
1728 {
1729 	char iovbuf[WLC_IOCTL_SMLEN];
1730 	int ret = -1;
1731 	int lval = htol32(val);
1732 	uint len;
1733 
1734 	len = bcm_mkiovar(name, (char*)&lval, sizeof(lval), iovbuf, sizeof(iovbuf));
1735 
1736 	if (len) {
1737 		ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, len, set, ifidx);
1738 		if (ret) {
1739 			DHD_ERROR(("%s: set int iovar %s failed, ERR %d\n",
1740 				__FUNCTION__, name, ret));
1741 		}
1742 	} else {
1743 		DHD_ERROR(("%s: mkiovar %s failed\n",
1744 			__FUNCTION__, name));
1745 	}
1746 
1747 	return ret;
1748 }
1749 
1750 static struct ioctl2str_s {
1751 	uint32 ioctl;
1752 	char *name;
1753 } ioctl2str_array[] = {
1754 	{WLC_UP, "UP"},
1755 	{WLC_DOWN, "DOWN"},
1756 	{WLC_SET_PROMISC, "SET_PROMISC"},
1757 	{WLC_SET_INFRA, "SET_INFRA"},
1758 	{WLC_SET_AUTH, "SET_AUTH"},
1759 	{WLC_SET_SSID, "SET_SSID"},
1760 	{WLC_RESTART, "RESTART"},
1761 	{WLC_SET_CHANNEL, "SET_CHANNEL"},
1762 	{WLC_SET_RATE_PARAMS, "SET_RATE_PARAMS"},
1763 	{WLC_SET_KEY, "SET_KEY"},
1764 	{WLC_SCAN, "SCAN"},
1765 	{WLC_DISASSOC, "DISASSOC"},
1766 	{WLC_REASSOC, "REASSOC"},
1767 	{WLC_SET_COUNTRY, "SET_COUNTRY"},
1768 	{WLC_SET_WAKE, "SET_WAKE"},
1769 	{WLC_SET_SCANSUPPRESS, "SET_SCANSUPPRESS"},
1770 	{WLC_SCB_DEAUTHORIZE, "SCB_DEAUTHORIZE"},
1771 	{WLC_SET_WSEC, "SET_WSEC"},
1772 	{WLC_SET_INTERFERENCE_MODE, "SET_INTERFERENCE_MODE"},
1773 	{WLC_SET_RADAR, "SET_RADAR"},
1774 	{0, NULL}
1775 };
1776 
1777 static char *
ioctl2str(uint32 ioctl)1778 ioctl2str(uint32 ioctl)
1779 {
1780 	struct ioctl2str_s *p = ioctl2str_array;
1781 
1782 	while (p->name != NULL) {
1783 		if (p->ioctl == ioctl) {
1784 			return p->name;
1785 		}
1786 		p++;
1787 	}
1788 
1789 	return "";
1790 }
1791 
1792 /**
1793  * @param ioc          IO control struct, members are partially used by this function.
1794  * @param buf [inout]  Contains parameters to send to dongle, contains dongle response on return.
1795  * @param len          Maximum number of bytes that dongle is allowed to write into 'buf'.
1796  */
1797 int
dhd_wl_ioctl(dhd_pub_t * dhd_pub,int ifidx,wl_ioctl_t * ioc,void * buf,int len)1798 dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifidx, wl_ioctl_t *ioc, void *buf, int len)
1799 {
1800 	int ret = BCME_ERROR;
1801 	unsigned long flags;
1802 #ifdef DUMP_IOCTL_IOV_LIST
1803 	dhd_iov_li_t *iov_li;
1804 #endif /* DUMP_IOCTL_IOV_LIST */
1805 #ifdef REPORT_FATAL_TIMEOUTS
1806 	wl_escan_params_t *eparams;
1807 	uint8 *buf_ptr = (uint8 *)buf;
1808 	uint16 action = 0;
1809 #endif /* REPORT_FATAL_TIMEOUTS */
1810 	int hostsleep_set = 0;
1811 	int hostsleep_val = 0;
1812 
1813 	if (dhd_query_bus_erros(dhd_pub)) {
1814 		return -ENODEV;
1815 	}
1816 
1817 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1818 	DHD_OS_WAKE_LOCK(dhd_pub);
1819 	if (pm_runtime_get_sync(dhd_bus_to_dev(dhd_pub->bus)) < 0) {
1820 		DHD_RPM(("%s: pm_runtime_get_sync error. \n", __FUNCTION__));
1821 		DHD_OS_WAKE_UNLOCK(dhd_pub);
1822 		return BCME_ERROR;
1823 	}
1824 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1825 
1826 #ifdef KEEPIF_ON_DEVICE_RESET
1827 		if (ioc->cmd == WLC_GET_VAR) {
1828 			dbus_config_t config;
1829 			config.general_param = 0;
1830 			if (buf) {
1831 				if (!strcmp(buf, "wowl_activate")) {
1832 					 /* 1 (TRUE) after decreased by 1 */
1833 					config.general_param = 2;
1834 				} else if (!strcmp(buf, "wowl_clear")) {
1835 					 /* 0 (FALSE) after decreased by 1 */
1836 					config.general_param = 1;
1837 				}
1838 			}
1839 			if (config.general_param) {
1840 				config.config_id = DBUS_CONFIG_ID_KEEPIF_ON_DEVRESET;
1841 				config.general_param--;
1842 				dbus_set_config(dhd_pub->dbus, &config);
1843 			}
1844 		}
1845 #endif /* KEEPIF_ON_DEVICE_RESET */
1846 
1847 	if (dhd_os_proto_block(dhd_pub))
1848 	{
1849 #ifdef DHD_LOG_DUMP
1850 		int slen, val, lval, min_len;
1851 		char *msg, tmp[64];
1852 
1853 		/* WLC_GET_VAR */
1854 		if (ioc->cmd == WLC_GET_VAR && buf) {
1855 			min_len = MIN(sizeof(tmp) - 1, strlen(buf));
1856 			memset(tmp, 0, sizeof(tmp));
1857 			bcopy(buf, tmp, min_len);
1858 			tmp[min_len] = '\0';
1859 		}
1860 #endif /* DHD_LOG_DUMP */
1861 
1862 #ifdef DHD_DISCONNECT_TRACE
1863 		if (WLC_DISASSOC == ioc->cmd || WLC_DOWN == ioc->cmd ||
1864 			WLC_DISASSOC_MYAP == ioc->cmd) {
1865 			DHD_ERROR(("IOCTL Disconnect WiFi: %d\n", ioc->cmd));
1866 		}
1867 #endif /* HW_DISCONNECT_TRACE */
1868 		/* logging of iovars that are send to the dongle, ./dhd msglevel +iovar */
1869 		if (ioc->set == TRUE) {
1870 			char *pars = (char *)buf; // points at user buffer
1871 			if (ioc->cmd == WLC_SET_VAR && buf) {
1872 				DHD_DNGL_IOVAR_SET(("iovar:%d: set %s", ifidx, pars));
1873 				if (ioc->len > 1 + sizeof(uint32)) {
1874 					// skip iovar name:
1875 					pars += strnlen(pars, ioc->len - 1 - sizeof(uint32));
1876 					pars++;               // skip NULL character
1877 				}
1878 			} else {
1879 				DHD_DNGL_IOVAR_SET(("ioctl:%d: set %d %s",
1880 					ifidx, ioc->cmd, ioctl2str(ioc->cmd)));
1881 			}
1882 			if (pars != NULL) {
1883 				DHD_DNGL_IOVAR_SET((" 0x%x\n", *(uint32*)pars));
1884 			} else {
1885 				DHD_DNGL_IOVAR_SET((" NULL\n"));
1886 			}
1887 		}
1888 
1889 		DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
1890 		if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub)) {
1891 #ifdef DHD_EFI
1892 			DHD_INFO(("%s: returning as busstate=%d\n",
1893 				__FUNCTION__, dhd_pub->busstate));
1894 #else
1895 			DHD_INFO(("%s: returning as busstate=%d\n",
1896 				__FUNCTION__, dhd_pub->busstate));
1897 #endif /* DHD_EFI */
1898 			DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
1899 			dhd_os_proto_unblock(dhd_pub);
1900 			return -ENODEV;
1901 		}
1902 		DHD_BUS_BUSY_SET_IN_IOVAR(dhd_pub);
1903 		DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
1904 
1905 #ifdef DHD_PCIE_RUNTIMEPM
1906 		dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_wl_ioctl);
1907 #endif /* DHD_PCIE_RUNTIMEPM */
1908 
1909 		DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
1910 		if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd_pub) ||
1911 			dhd_pub->dhd_induce_error == DHD_INDUCE_IOCTL_SUSPEND_ERROR) {
1912 			DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
1913 				__FUNCTION__, dhd_pub->busstate, dhd_pub->dhd_bus_busy_state));
1914 #ifdef DHD_SEND_HANG_IOCTL_SUSPEND_ERROR
1915 			ioctl_suspend_error++;
1916 			if (ioctl_suspend_error > MAX_IOCTL_SUSPEND_ERROR) {
1917 				dhd_pub->hang_reason = HANG_REASON_IOCTL_SUSPEND_ERROR;
1918 				dhd_os_send_hang_message(dhd_pub);
1919 				ioctl_suspend_error = 0;
1920 			}
1921 #endif /* DHD_SEND_HANG_IOCTL_SUSPEND_ERROR */
1922 			DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub);
1923 			dhd_os_busbusy_wake(dhd_pub);
1924 			DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
1925 			dhd_os_proto_unblock(dhd_pub);
1926 			return -ENODEV;
1927 		}
1928 #ifdef DHD_SEND_HANG_IOCTL_SUSPEND_ERROR
1929 		ioctl_suspend_error = 0;
1930 #endif /* DHD_SEND_HANG_IOCTL_SUSPEND_ERROR */
1931 		DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
1932 
1933 #if defined(WL_WLC_SHIM)
1934 		{
1935 			struct wl_shim_node *shim = dhd_pub_shim(dhd_pub);
1936 
1937 			wl_io_pport_t io_pport;
1938 			io_pport.dhd_pub = dhd_pub;
1939 			io_pport.ifidx = ifidx;
1940 
1941 			ret = wl_shim_ioctl(shim, ioc, len, &io_pport);
1942 			if (ret != BCME_OK) {
1943 				DHD_TRACE(("%s: wl_shim_ioctl(%d) ERR %d\n",
1944 					__FUNCTION__, ioc->cmd, ret));
1945 			}
1946 		}
1947 #else
1948 #ifdef DUMP_IOCTL_IOV_LIST
1949 		if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION && buf) {
1950 			if (!(iov_li = MALLOC(dhd_pub->osh, sizeof(*iov_li)))) {
1951 				DHD_ERROR(("iovar dump list item allocation Failed\n"));
1952 			} else {
1953 				iov_li->cmd = ioc->cmd;
1954 				if (buf)
1955 					bcopy((char *)buf, iov_li->buff, strlen((char *)buf)+1);
1956 				dhd_iov_li_append(dhd_pub, &dhd_pub->dump_iovlist_head,
1957 						&iov_li->list);
1958 			}
1959 		}
1960 #endif /* DUMP_IOCTL_IOV_LIST */
1961 
1962 #ifdef REPORT_FATAL_TIMEOUTS
1963 		/* fill in the sync_id to ensure that the scan timeout is always for the
1964 		* current running escan in the FW - the wl app does not fill in an
1965 		* incrementing number for sync_id, it only fills in a random number which
1966 		* increases the chance of 2 consecutive escans having the same sync id
1967 		* This should happen here after dhd_proto_block()
1968 		* is called, so that sync_id does not
1969 		* get incremented if 2 consecutive escans are fired in quick succession
1970 		*/
1971 		if ((ioc->cmd == WLC_SET_VAR &&
1972 				buf != NULL &&
1973 				strcmp("escan", buf) == 0)) {
1974 			eparams = (wl_escan_params_t *) (buf_ptr + strlen("escan") + 1);
1975 			action = dtoh16(eparams->action);
1976 			if (action == WL_SCAN_ACTION_START) {
1977 				++dhd_pub->esync_id;
1978 				/* sync id of 0 is not used for escan,
1979 				* it is used to indicate
1980 				* a normal scan timer is running, so as
1981 				* to ensure that escan abort event
1982 				* does not cancel a normal scan timeout
1983 				*/
1984 				if (dhd_pub->esync_id == 0)
1985 					++dhd_pub->esync_id;
1986 				DHD_INFO(("%s:escan sync id set to = %u \n",
1987 					__FUNCTION__, dhd_pub->esync_id));
1988 				eparams->sync_id = htod16(dhd_pub->esync_id);
1989 			}
1990 		}
1991 #endif /* REPORT_FATAL_TIMEOUTS */
1992 
1993 		if (dhd_conf_check_hostsleep(dhd_pub, ioc->cmd, ioc->buf, len,
1994 				&hostsleep_set, &hostsleep_val, &ret))
1995 			goto exit;
1996 		ret = dhd_prot_ioctl(dhd_pub, ifidx, ioc, buf, len);
1997 		dhd_conf_get_hostsleep(dhd_pub, hostsleep_set, hostsleep_val, ret);
1998 
1999 #ifdef DUMP_IOCTL_IOV_LIST
2000 		if (ret == -ETIMEDOUT) {
2001 			DHD_ERROR(("Last %d issued commands: Latest one is at bottom.\n",
2002 				IOV_LIST_MAX_LEN));
2003 			dhd_iov_li_print(&dhd_pub->dump_iovlist_head);
2004 		}
2005 #endif /* DUMP_IOCTL_IOV_LIST */
2006 #endif /* defined(WL_WLC_SHIM) */
2007 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
2008 		if (ret == -ETIMEDOUT) {
2009 			copy_hang_info_ioctl_timeout(dhd_pub, ifidx, ioc);
2010 		}
2011 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
2012 #ifdef DHD_LOG_DUMP
2013 		if ((ioc->cmd == WLC_GET_VAR || ioc->cmd == WLC_SET_VAR) &&
2014 				buf != NULL) {
2015 			if (buf) {
2016 				lval = 0;
2017 				slen = strlen(buf) + 1;
2018 				msg = (char*)buf;
2019 				if (len >= slen + sizeof(lval)) {
2020 					if (ioc->cmd == WLC_GET_VAR) {
2021 						msg = tmp;
2022 						lval = *(int*)buf;
2023 					} else {
2024 						min_len = MIN(ioc->len - slen, sizeof(int));
2025 						bcopy((msg + slen), &lval, min_len);
2026 					}
2027 					if (!strncmp(msg, "cur_etheraddr",
2028 						strlen("cur_etheraddr"))) {
2029 						lval = 0;
2030 					}
2031 				}
2032 				DHD_IOVAR_MEM((
2033 					"%s: cmd: %d, msg: %s val: 0x%x,"
2034 					" len: %d, set: %d, txn-id: %d\n",
2035 					ioc->cmd == WLC_GET_VAR ?
2036 					"WLC_GET_VAR" : "WLC_SET_VAR",
2037 					ioc->cmd, msg, lval, ioc->len, ioc->set,
2038 					dhd_prot_get_ioctl_trans_id(dhd_pub)));
2039 			} else {
2040 				DHD_IOVAR_MEM(("%s: cmd: %d, len: %d, set: %d, txn-id: %d\n",
2041 					ioc->cmd == WLC_GET_VAR ? "WLC_GET_VAR" : "WLC_SET_VAR",
2042 					ioc->cmd, ioc->len, ioc->set,
2043 					dhd_prot_get_ioctl_trans_id(dhd_pub)));
2044 			}
2045 		} else {
2046 			slen = ioc->len;
2047 			if (buf != NULL && slen != 0) {
2048 				if (slen >= 4) {
2049 					val = *(int*)buf;
2050 				} else if (slen >= 2) {
2051 					val = *(short*)buf;
2052 				} else {
2053 					val = *(char*)buf;
2054 				}
2055 				/* Do not dump for WLC_GET_MAGIC and WLC_GET_VERSION */
2056 				if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION) {
2057 					DHD_IOVAR_MEM(("WLC_IOCTL: cmd: %d, val: %d, len: %d, "
2058 						"set: %d\n", ioc->cmd, val, ioc->len, ioc->set));
2059 				}
2060 			} else {
2061 				DHD_IOVAR_MEM(("WLC_IOCTL: cmd: %d, buf is NULL\n", ioc->cmd));
2062 			}
2063 		}
2064 #endif /* DHD_LOG_DUMP */
2065 #if defined(OEM_ANDROID)
2066 		if (ret && dhd_pub->up) {
2067 			/* Send hang event only if dhd_open() was success */
2068 			dhd_os_check_hang(dhd_pub, ifidx, ret);
2069 		}
2070 
2071 		if (ret == -ETIMEDOUT && !dhd_pub->up) {
2072 			DHD_ERROR(("%s: 'resumed on timeout' error is "
2073 				"occurred before the interface does not"
2074 				" bring up\n", __FUNCTION__));
2075 		}
2076 #endif /* defined(OEM_ANDROID) */
2077 
2078 exit:
2079 		DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
2080 		DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub);
2081 		dhd_os_busbusy_wake(dhd_pub);
2082 		DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
2083 
2084 #ifdef REPORT_FATAL_TIMEOUTS
2085 		if ((ret == BCME_OK && ioc->cmd == WLC_SET_VAR &&
2086 				buf != NULL &&
2087 				strcmp("escan", buf) == 0)) {
2088 			if (action == WL_SCAN_ACTION_START)
2089 				dhd_start_scan_timer(dhd_pub, TRUE);
2090 		}
2091 #endif /* REPORT_FATAL_TIMEOUTS */
2092 
2093 		dhd_os_proto_unblock(dhd_pub);
2094 
2095 #ifdef DETAIL_DEBUG_LOG_FOR_IOCTL
2096 		if (ret < 0) {
2097 			if ((ioc->cmd == WLC_GET_VAR || ioc->cmd == WLC_SET_VAR) &&
2098 					buf != NULL) {
2099 				if (ret == BCME_UNSUPPORTED || ret == BCME_NOTASSOCIATED) {
2100 					DHD_ERROR_MEM(("%s: %s: %s, %s\n",
2101 						__FUNCTION__, ioc->cmd == WLC_GET_VAR ?
2102 						"WLC_GET_VAR" : "WLC_SET_VAR",
2103 						buf? (char *)buf:"NO MESSAGE",
2104 						ret == BCME_UNSUPPORTED ? "UNSUPPORTED"
2105 						: "NOT ASSOCIATED"));
2106 				} else {
2107 					DHD_ERROR_MEM(("%s: %s: %s, ret = %d\n",
2108 						__FUNCTION__, ioc->cmd == WLC_GET_VAR ?
2109 						"WLC_GET_VAR" : "WLC_SET_VAR",
2110 						(char *)buf, ret));
2111 				}
2112 			} else {
2113 				if (ret == BCME_UNSUPPORTED || ret == BCME_NOTASSOCIATED) {
2114 					DHD_ERROR_MEM(("%s: WLC_IOCTL: cmd: %d, %s\n",
2115 						__FUNCTION__, ioc->cmd,
2116 						ret == BCME_UNSUPPORTED ? "UNSUPPORTED" :
2117 						"NOT ASSOCIATED"));
2118 				} else {
2119 					DHD_ERROR_MEM(("%s: WLC_IOCTL: cmd: %d, ret = %d\n",
2120 						__FUNCTION__, ioc->cmd, ret));
2121 				}
2122 			}
2123 		}
2124 #endif /* DETAIL_DEBUG_LOG_FOR_IOCTL */
2125 	}
2126 
2127 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
2128 	pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd_pub->bus));
2129 	pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd_pub->bus));
2130 
2131 	DHD_OS_WAKE_UNLOCK(dhd_pub);
2132 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
2133 
2134 #ifdef WL_MONITOR
2135 	/* Intercept monitor ioctl here, add/del monitor if */
2136 	if (ret == BCME_OK && ioc->cmd == WLC_SET_MONITOR) {
2137 		int val = 0;
2138 		if (buf != NULL && len != 0) {
2139 			if (len >= 4) {
2140 				val = *(int*)buf;
2141 			} else if (len >= 2) {
2142 				val = *(short*)buf;
2143 			} else {
2144 				val = *(char*)buf;
2145 			}
2146 		}
2147 		dhd_set_monitor(dhd_pub, ifidx, val);
2148 	}
2149 #endif /* WL_MONITOR */
2150 
2151 	return ret;
2152 }
2153 
wl_get_port_num(wl_io_pport_t * io_pport)2154 uint wl_get_port_num(wl_io_pport_t *io_pport)
2155 {
2156 	return 0;
2157 }
2158 
2159 /* Get bssidx from iovar params
2160  * Input:   dhd_pub - pointer to dhd_pub_t
2161  *	    params  - IOVAR params
2162  * Output:  idx	    - BSS index
2163  *	    val	    - ponter to the IOVAR arguments
2164  */
2165 static int
dhd_iovar_parse_bssidx(dhd_pub_t * dhd_pub,const char * params,uint32 * idx,const char ** val)2166 dhd_iovar_parse_bssidx(dhd_pub_t *dhd_pub, const char *params, uint32 *idx, const char **val)
2167 {
2168 	char *prefix = "bsscfg:";
2169 	uint32	bssidx;
2170 
2171 	if (!(strncmp(params, prefix, strlen(prefix)))) {
2172 		/* per bss setting should be prefixed with 'bsscfg:' */
2173 		const char *p = params + strlen(prefix);
2174 
2175 		/* Skip Name */
2176 		while (*p != '\0')
2177 			p++;
2178 		/* consider null */
2179 		p = p + 1;
2180 		bcopy(p, &bssidx, sizeof(uint32));
2181 		/* Get corresponding dhd index */
2182 		bssidx = dhd_bssidx2idx(dhd_pub, htod32(bssidx));
2183 
2184 		if (bssidx >= DHD_MAX_IFS) {
2185 			DHD_ERROR(("%s Wrong bssidx provided\n", __FUNCTION__));
2186 			return BCME_ERROR;
2187 		}
2188 
2189 		/* skip bss idx */
2190 		p += sizeof(uint32);
2191 		*val = p;
2192 		*idx = bssidx;
2193 	} else {
2194 		DHD_ERROR(("%s: bad parameter for per bss iovar\n", __FUNCTION__));
2195 		return BCME_ERROR;
2196 	}
2197 
2198 	return BCME_OK;
2199 }
2200 
2201 #if defined(DHD_DEBUG) && defined(BCMDBUS)
2202 /* USB Device console input function */
dhd_bus_console_in(dhd_pub_t * dhd,uchar * msg,uint msglen)2203 int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
2204 {
2205 	DHD_TRACE(("%s \n", __FUNCTION__));
2206 
2207 	return dhd_iovar(dhd, 0, "cons", msg, msglen, NULL, 0, TRUE);
2208 
2209 }
2210 #endif /* DHD_DEBUG && BCMDBUS  */
2211 
2212 #ifdef DHD_DEBUG
2213 int
dhd_mem_debug(dhd_pub_t * dhd,uchar * msg,uint msglen)2214 dhd_mem_debug(dhd_pub_t *dhd, uchar *msg, uint msglen)
2215 {
2216 	unsigned long int_arg = 0;
2217 	char *p;
2218 	char *end_ptr = NULL;
2219 	dhd_dbg_mwli_t *mw_li;
2220 	dll_t *item, *next;
2221 	/* check if mwalloc, mwquery or mwfree was supplied arguement with space */
2222 	p = bcmstrstr((char *)msg, " ");
2223 	if (p != NULL) {
2224 		/* space should be converted to null as separation flag for firmware */
2225 		*p = '\0';
2226 		/* store the argument in int_arg */
2227 		int_arg = bcm_strtoul(p+1, &end_ptr, 10);
2228 	}
2229 
2230 	if (!p && !strcmp(msg, "query")) {
2231 		/* lets query the list inetrnally */
2232 		if (dll_empty(dll_head_p(&dhd->mw_list_head))) {
2233 			DHD_ERROR(("memwaste list is empty, call mwalloc < size > to allocate\n"));
2234 		} else {
2235 			for (item = dll_head_p(&dhd->mw_list_head);
2236 					!dll_end(&dhd->mw_list_head, item); item = next) {
2237 				next = dll_next_p(item);
2238 				mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list);
2239 				DHD_ERROR(("item: <id=%d, size=%d>\n", mw_li->id, mw_li->size));
2240 			}
2241 		}
2242 	} else if (p && end_ptr && (*end_ptr == '\0') && !strcmp(msg, "alloc")) {
2243 		int32 alloc_handle;
2244 		/* convert size into KB and append as integer */
2245 		*((int32 *)(p+1)) = int_arg*1024;
2246 		*(p+1+sizeof(int32)) = '\0';
2247 
2248 		/* recalculated length -> 5 bytes for "alloc" + 4 bytes for size +
2249 		 * 1 bytes for null caracter
2250 		 */
2251 		msglen = strlen(msg) + sizeof(int32) + 1;
2252 		if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, msg, msglen+1, FALSE, 0) < 0) {
2253 			DHD_ERROR(("IOCTL failed for memdebug alloc\n"));
2254 		}
2255 
2256 		/* returned allocated handle from dongle, basically address of the allocated unit */
2257 		alloc_handle = *((int32 *)msg);
2258 
2259 		/* add a node in the list with tuple <id, handle, size> */
2260 		if (alloc_handle == 0) {
2261 			DHD_ERROR(("Reuqested size could not be allocated\n"));
2262 		} else if (!(mw_li = MALLOC(dhd->osh, sizeof(*mw_li)))) {
2263 			DHD_ERROR(("mw list item allocation Failed\n"));
2264 		} else {
2265 			mw_li->id = dhd->mw_id++;
2266 			mw_li->handle = alloc_handle;
2267 			mw_li->size = int_arg;
2268 			/* append the node in the list */
2269 			dll_append(&dhd->mw_list_head, &mw_li->list);
2270 		}
2271 	} else if (p && end_ptr && (*end_ptr == '\0') && !strcmp(msg, "free")) {
2272 		/* inform dongle to free wasted chunk */
2273 		int handle = 0;
2274 		int size = 0;
2275 		for (item = dll_head_p(&dhd->mw_list_head);
2276 				!dll_end(&dhd->mw_list_head, item); item = next) {
2277 			next = dll_next_p(item);
2278 			mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list);
2279 
2280 			if (mw_li->id == (int)int_arg) {
2281 				handle = mw_li->handle;
2282 				size = mw_li->size;
2283 				dll_delete(item);
2284 				MFREE(dhd->osh, mw_li, sizeof(*mw_li));
2285 				if (dll_empty(dll_head_p(&dhd->mw_list_head))) {
2286 					/* reset the id */
2287 					dhd->mw_id = 0;
2288 				}
2289 			}
2290 		}
2291 		if (handle) {
2292 			int len;
2293 			/* append the free handle and the chunk size in first 8 bytes
2294 			 * after the command and null character
2295 			 */
2296 			*((int32 *)(p+1)) = handle;
2297 			*((int32 *)((p+1)+sizeof(int32))) = size;
2298 			/* append null as terminator */
2299 			*(p+1+2*sizeof(int32)) = '\0';
2300 			/* recalculated length -> 4 bytes for "free" + 8 bytes for hadnle and size
2301 			 * + 1 bytes for null caracter
2302 			 */
2303 			len = strlen(msg) + 2*sizeof(int32) + 1;
2304 			/* send iovar to free the chunk */
2305 			if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, msg, len, FALSE, 0) < 0) {
2306 				DHD_ERROR(("IOCTL failed for memdebug free\n"));
2307 			}
2308 		} else {
2309 			DHD_ERROR(("specified id does not exist\n"));
2310 		}
2311 	} else {
2312 		/* for all the wrong argument formats */
2313 		return BCME_BADARG;
2314 	}
2315 	return 0;
2316 }
2317 extern void
dhd_mw_list_delete(dhd_pub_t * dhd,dll_t * list_head)2318 dhd_mw_list_delete(dhd_pub_t *dhd, dll_t *list_head)
2319 {
2320 	dll_t *item;
2321 	dhd_dbg_mwli_t *mw_li;
2322 	while (!(dll_empty(list_head))) {
2323 		item = dll_head_p(list_head);
2324 		mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list);
2325 		dll_delete(item);
2326 		MFREE(dhd->osh, mw_li, sizeof(*mw_li));
2327 	}
2328 }
2329 #ifdef BCMPCIE
2330 int
dhd_flow_ring_debug(dhd_pub_t * dhd,char * msg,uint msglen)2331 dhd_flow_ring_debug(dhd_pub_t *dhd, char *msg, uint msglen)
2332 {
2333 	flow_ring_table_t *flow_ring_table;
2334 	char *cmd;
2335 	char *end_ptr = NULL;
2336 	uint8 prio;
2337 	uint16 flowid;
2338 	int i;
2339 	int ret = 0;
2340 	cmd = bcmstrstr(msg, " ");
2341 	BCM_REFERENCE(prio);
2342 	if (cmd != NULL) {
2343 		/* in order to use string operations append null */
2344 		*cmd = '\0';
2345 	} else {
2346 		DHD_ERROR(("missing: create/delete args\n"));
2347 		return BCME_ERROR;
2348 	}
2349 	if (cmd && !strcmp(msg, "create")) {
2350 		/* extract <"source address", "destination address", "priority"> */
2351 		uint8 sa[ETHER_ADDR_LEN], da[ETHER_ADDR_LEN];
2352 		BCM_REFERENCE(sa);
2353 		BCM_REFERENCE(da);
2354 		msg = msg + strlen("create") + 1;
2355 		/* fill ethernet source address */
2356 		for (i = 0; i < ETHER_ADDR_LEN; i++) {
2357 			sa[i] = (uint8)bcm_strtoul(msg, &end_ptr, 16);
2358 			if (*end_ptr == ':') {
2359 				msg = (end_ptr + 1);
2360 			} else if (i != 5) {
2361 				DHD_ERROR(("not a valid source mac addr\n"));
2362 				return BCME_ERROR;
2363 			}
2364 		}
2365 		if (*end_ptr != ' ') {
2366 			DHD_ERROR(("missing: destiantion mac id\n"));
2367 			return BCME_ERROR;
2368 		} else {
2369 			/* skip space */
2370 			msg = end_ptr + 1;
2371 		}
2372 		/* fill ethernet destination address */
2373 		for (i = 0; i < ETHER_ADDR_LEN; i++) {
2374 			da[i] = (uint8)bcm_strtoul(msg, &end_ptr, 16);
2375 			if (*end_ptr == ':') {
2376 				msg = (end_ptr + 1);
2377 			} else if (i != 5) {
2378 				DHD_ERROR(("not a valid destination  mac addr\n"));
2379 				return BCME_ERROR;
2380 			}
2381 		}
2382 		if (*end_ptr != ' ') {
2383 			DHD_ERROR(("missing: priority\n"));
2384 			return BCME_ERROR;
2385 		} else {
2386 			msg = end_ptr + 1;
2387 		}
2388 		/* parse priority */
2389 		prio = (uint8)bcm_strtoul(msg, &end_ptr, 10);
2390 		if (prio > MAXPRIO) {
2391 			DHD_ERROR(("%s: invalid priority. Must be between 0-7 inclusive\n",
2392 				__FUNCTION__));
2393 			return BCME_ERROR;
2394 		}
2395 
2396 		if (*end_ptr != '\0') {
2397 			DHD_ERROR(("msg not truncated with NULL character\n"));
2398 			return BCME_ERROR;
2399 		}
2400 		ret = dhd_flowid_debug_create(dhd, 0, prio, (char *)sa, (char *)da, &flowid);
2401 		if (ret != BCME_OK) {
2402 			DHD_ERROR(("%s: flowring creation failed ret: %d\n", __FUNCTION__, ret));
2403 			return BCME_ERROR;
2404 		}
2405 		return BCME_OK;
2406 
2407 	} else if (cmd && !strcmp(msg, "delete")) {
2408 		msg = msg + strlen("delete") + 1;
2409 		/* parse flowid */
2410 		flowid = (uint16)bcm_strtoul(msg, &end_ptr, 10);
2411 		if (*end_ptr != '\0') {
2412 			DHD_ERROR(("msg not truncated with NULL character\n"));
2413 			return BCME_ERROR;
2414 		}
2415 
2416 		/* Find flowid from ifidx 0 since this IOVAR creating flowring with ifidx 0 */
2417 		if (dhd_flowid_find_by_ifidx(dhd, 0, flowid) != BCME_OK)
2418 		{
2419 			DHD_ERROR(("%s : Deleting not created flowid: %u\n", __FUNCTION__, flowid));
2420 			return BCME_ERROR;
2421 		}
2422 
2423 		flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
2424 		ret = dhd_bus_flow_ring_delete_request(dhd->bus, (void *)&flow_ring_table[flowid]);
2425 		if (ret != BCME_OK) {
2426 			DHD_ERROR(("%s: flowring deletion failed ret: %d\n", __FUNCTION__, ret));
2427 			return BCME_ERROR;
2428 		}
2429 		return BCME_OK;
2430 	}
2431 	DHD_ERROR(("%s: neither create nor delete\n", __FUNCTION__));
2432 	return BCME_ERROR;
2433 }
2434 #endif /* BCMPCIE */
2435 #endif /* DHD_DEBUG */
2436 
2437 static int
dhd_doiovar(dhd_pub_t * dhd_pub,const bcm_iovar_t * vi,uint32 actionid,const char * name,void * params,int plen,void * arg,uint len,int val_size)2438 dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const char *name,
2439             void *params, int plen, void *arg, uint len, int val_size)
2440 {
2441 	int bcmerror = 0;
2442 	int32 int_val = 0;
2443 	uint32 dhd_ver_len, bus_api_rev_len;
2444 
2445 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2446 	DHD_TRACE(("%s: actionid = %d; name %s\n", __FUNCTION__, actionid, name));
2447 
2448 	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
2449 		goto exit;
2450 
2451 	if (plen >= (int)sizeof(int_val))
2452 		bcopy(params, &int_val, sizeof(int_val));
2453 
2454 	switch (actionid) {
2455 	case IOV_GVAL(IOV_VERSION):
2456 		/* Need to have checked buffer length */
2457 		dhd_ver_len = sizeof(dhd_version) - 1;
2458 		bus_api_rev_len = strlen(bus_api_revision);
2459 		if (len > dhd_ver_len + bus_api_rev_len) {
2460 			bcmerror = memcpy_s((char *)arg, len, dhd_version, dhd_ver_len);
2461 			if (bcmerror != BCME_OK) {
2462 				break;
2463 			}
2464 			bcmerror = memcpy_s((char *)arg + dhd_ver_len, len - dhd_ver_len,
2465 				bus_api_revision, bus_api_rev_len);
2466 			if (bcmerror != BCME_OK) {
2467 				break;
2468 			}
2469 			*((char *)arg + dhd_ver_len + bus_api_rev_len) = '\0';
2470 		}
2471 		break;
2472 
2473 	case IOV_GVAL(IOV_WLMSGLEVEL):
2474 		printf("android_msg_level=0x%x\n", android_msg_level);
2475 		printf("config_msg_level=0x%x\n", config_msg_level);
2476 #if defined(WL_WIRELESS_EXT)
2477 		int_val = (int32)iw_msg_level;
2478 		bcopy(&int_val, arg, val_size);
2479 		printf("iw_msg_level=0x%x\n", iw_msg_level);
2480 #endif
2481 #ifdef WL_CFG80211
2482 		int_val = (int32)wl_dbg_level;
2483 		bcopy(&int_val, arg, val_size);
2484 		printf("cfg_msg_level=0x%x\n", wl_dbg_level);
2485 #endif
2486 		break;
2487 
2488 	case IOV_SVAL(IOV_WLMSGLEVEL):
2489 		if (int_val & DHD_ANDROID_VAL) {
2490 			android_msg_level = (uint)(int_val & 0xFFFF);
2491 			printf("android_msg_level=0x%x\n", android_msg_level);
2492 		}
2493 		if (int_val & DHD_CONFIG_VAL) {
2494 			config_msg_level = (uint)(int_val & 0xFFFF);
2495 			printf("config_msg_level=0x%x\n", config_msg_level);
2496 		}
2497 #if defined(WL_WIRELESS_EXT)
2498 		if (int_val & DHD_IW_VAL) {
2499 			iw_msg_level = (uint)(int_val & 0xFFFF);
2500 			printf("iw_msg_level=0x%x\n", iw_msg_level);
2501 		}
2502 #endif
2503 #ifdef WL_CFG80211
2504 		if (int_val & DHD_CFG_VAL) {
2505 			wl_cfg80211_enable_trace((u32)(int_val & 0xFFFF));
2506 		}
2507 #endif
2508 		break;
2509 
2510 	case IOV_GVAL(IOV_MSGLEVEL):
2511 		int_val = (int32)dhd_msg_level;
2512 		bcopy(&int_val, arg, val_size);
2513 		break;
2514 
2515 	case IOV_SVAL(IOV_MSGLEVEL):
2516 		dhd_msg_level = int_val;
2517 		break;
2518 
2519 	case IOV_GVAL(IOV_BCMERRORSTR):
2520 		bcm_strncpy_s((char *)arg, len, bcmerrorstr(dhd_pub->bcmerror), BCME_STRLEN);
2521 		((char *)arg)[BCME_STRLEN - 1] = 0x00;
2522 		break;
2523 
2524 	case IOV_GVAL(IOV_BCMERROR):
2525 		int_val = (int32)dhd_pub->bcmerror;
2526 		bcopy(&int_val, arg, val_size);
2527 		break;
2528 
2529 #ifndef BCMDBUS
2530 	case IOV_GVAL(IOV_WDTICK):
2531 		int_val = (int32)dhd_watchdog_ms;
2532 		bcopy(&int_val, arg, val_size);
2533 		break;
2534 #endif /* !BCMDBUS */
2535 
2536 	case IOV_SVAL(IOV_WDTICK):
2537 		if (!dhd_pub->up) {
2538 			bcmerror = BCME_NOTUP;
2539 			break;
2540 		}
2541 
2542 		dhd_watchdog_ms = (uint)int_val;
2543 
2544 		dhd_os_wd_timer(dhd_pub, (uint)int_val);
2545 		break;
2546 
2547 	case IOV_GVAL(IOV_DUMP):
2548 		if (dhd_dump(dhd_pub, arg, len) <= 0)
2549 			bcmerror = BCME_ERROR;
2550 		else
2551 			bcmerror = BCME_OK;
2552 		break;
2553 
2554 #ifndef BCMDBUS
2555 	case IOV_GVAL(IOV_DCONSOLE_POLL):
2556 		int_val = (int32)dhd_pub->dhd_console_ms;
2557 		bcopy(&int_val, arg, val_size);
2558 		break;
2559 
2560 	case IOV_SVAL(IOV_DCONSOLE_POLL):
2561 		dhd_pub->dhd_console_ms = (uint)int_val;
2562 		break;
2563 
2564 #if defined(DHD_DEBUG)
2565 	case IOV_SVAL(IOV_CONS):
2566 		if (len > 0) {
2567 #ifdef CONSOLE_DPC
2568 			bcmerror = dhd_bus_txcons(dhd_pub, arg, len - 1);
2569 #else
2570 			bcmerror = dhd_bus_console_in(dhd_pub, arg, len - 1);
2571 #endif
2572 		}
2573 		break;
2574 #endif /* DHD_DEBUG */
2575 #endif /* !BCMDBUS */
2576 
2577 	case IOV_SVAL(IOV_CLEARCOUNTS):
2578 		dhd_pub->tx_packets = dhd_pub->rx_packets = 0;
2579 		dhd_pub->tx_errors = dhd_pub->rx_errors = 0;
2580 		dhd_pub->tx_ctlpkts = dhd_pub->rx_ctlpkts = 0;
2581 		dhd_pub->tx_ctlerrs = dhd_pub->rx_ctlerrs = 0;
2582 		dhd_pub->tx_dropped = 0;
2583 		dhd_pub->rx_dropped = 0;
2584 		dhd_pub->tx_pktgetfail = 0;
2585 		dhd_pub->rx_pktgetfail = 0;
2586 		dhd_pub->rx_readahead_cnt = 0;
2587 		dhd_pub->tx_realloc = 0;
2588 		dhd_pub->wd_dpc_sched = 0;
2589 		dhd_pub->tx_big_packets = 0;
2590 		memset(&dhd_pub->dstats, 0, sizeof(dhd_pub->dstats));
2591 		dhd_bus_clearcounts(dhd_pub);
2592 #ifdef PROP_TXSTATUS
2593 		/* clear proptxstatus related counters */
2594 		dhd_wlfc_clear_counts(dhd_pub);
2595 #endif /* PROP_TXSTATUS */
2596 #if defined(DHD_LB_STATS)
2597 		DHD_LB_STATS_RESET(dhd_pub);
2598 #endif /* DHD_LB_STATS */
2599 		break;
2600 
2601 #ifdef BCMPERFSTATS
2602 	case IOV_GVAL(IOV_LOGDUMP): {
2603 		bcmdumplog((char*)arg, len);
2604 		break;
2605 	}
2606 
2607 	case IOV_SVAL(IOV_LOGCAL): {
2608 		bcmlog("Starting OSL_DELAY (%d usecs)", (uint)int_val, 0);
2609 		OSL_DELAY((uint)int_val);
2610 		bcmlog("Finished OSL_DELAY (%d usecs)", (uint)int_val, 0);
2611 		break;
2612 	}
2613 
2614 	case IOV_SVAL(IOV_LOGSTAMP): {
2615 		int int_val2;
2616 
2617 		if (plen >= 2 * sizeof(int)) {
2618 			bcopy((char *)params + sizeof(int_val), &int_val2, sizeof(int_val2));
2619 			bcmlog("User message %d %d", (uint)int_val, (uint)int_val2);
2620 		} else if (plen >= sizeof(int)) {
2621 			bcmlog("User message %d", (uint)int_val, 0);
2622 		} else {
2623 			bcmlog("User message", 0, 0);
2624 		}
2625 		break;
2626 	}
2627 #endif /* BCMPERFSTATS */
2628 
2629 	case IOV_GVAL(IOV_IOCTLTIMEOUT): {
2630 		int_val = (int32)dhd_os_get_ioctl_resp_timeout();
2631 		bcopy(&int_val, arg, sizeof(int_val));
2632 		break;
2633 	}
2634 
2635 	case IOV_SVAL(IOV_IOCTLTIMEOUT): {
2636 		if (int_val <= 0)
2637 			bcmerror = BCME_BADARG;
2638 		else
2639 			dhd_os_set_ioctl_resp_timeout((unsigned int)int_val);
2640 		break;
2641 	}
2642 
2643 #ifdef PROP_TXSTATUS
2644 	case IOV_GVAL(IOV_PROPTXSTATUS_ENABLE): {
2645 		bool wlfc_enab = FALSE;
2646 		bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab);
2647 		if (bcmerror != BCME_OK)
2648 			goto exit;
2649 		int_val = wlfc_enab ? 1 : 0;
2650 		bcopy(&int_val, arg, val_size);
2651 		break;
2652 	}
2653 	case IOV_SVAL(IOV_PROPTXSTATUS_ENABLE): {
2654 		bool wlfc_enab = FALSE;
2655 		bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab);
2656 		if (bcmerror != BCME_OK)
2657 			goto exit;
2658 
2659 		/* wlfc is already set as desired */
2660 		if (wlfc_enab == (int_val == 0 ? FALSE : TRUE))
2661 			goto exit;
2662 
2663 		if (int_val == TRUE && disable_proptx) {
2664 			disable_proptx = 0;
2665 		}
2666 
2667 		if (int_val == TRUE)
2668 			bcmerror = dhd_wlfc_init(dhd_pub);
2669 		else
2670 			bcmerror = dhd_wlfc_deinit(dhd_pub);
2671 
2672 		break;
2673 	}
2674 	case IOV_GVAL(IOV_PROPTXSTATUS_MODE):
2675 		bcmerror = dhd_wlfc_get_mode(dhd_pub, &int_val);
2676 		if (bcmerror != BCME_OK)
2677 			goto exit;
2678 		bcopy(&int_val, arg, val_size);
2679 		break;
2680 
2681 	case IOV_SVAL(IOV_PROPTXSTATUS_MODE):
2682 		dhd_wlfc_set_mode(dhd_pub, int_val);
2683 		break;
2684 #ifdef QMONITOR
2685 	case IOV_GVAL(IOV_QMON_TIME_THRES): {
2686 		int_val = dhd_qmon_thres(dhd_pub, FALSE, 0);
2687 		bcopy(&int_val, arg, val_size);
2688 		break;
2689 	}
2690 
2691 	case IOV_SVAL(IOV_QMON_TIME_THRES): {
2692 		dhd_qmon_thres(dhd_pub, TRUE, int_val);
2693 		break;
2694 	}
2695 
2696 	case IOV_GVAL(IOV_QMON_TIME_PERCENT): {
2697 		int_val = dhd_qmon_getpercent(dhd_pub);
2698 		bcopy(&int_val, arg, val_size);
2699 		break;
2700 	}
2701 #endif /* QMONITOR */
2702 
2703 	case IOV_GVAL(IOV_PROPTXSTATUS_MODULE_IGNORE):
2704 		bcmerror = dhd_wlfc_get_module_ignore(dhd_pub, &int_val);
2705 		if (bcmerror != BCME_OK)
2706 			goto exit;
2707 		bcopy(&int_val, arg, val_size);
2708 		break;
2709 
2710 	case IOV_SVAL(IOV_PROPTXSTATUS_MODULE_IGNORE):
2711 		dhd_wlfc_set_module_ignore(dhd_pub, int_val);
2712 		break;
2713 
2714 	case IOV_GVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE):
2715 		bcmerror = dhd_wlfc_get_credit_ignore(dhd_pub, &int_val);
2716 		if (bcmerror != BCME_OK)
2717 			goto exit;
2718 		bcopy(&int_val, arg, val_size);
2719 		break;
2720 
2721 	case IOV_SVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE):
2722 		dhd_wlfc_set_credit_ignore(dhd_pub, int_val);
2723 		break;
2724 
2725 	case IOV_GVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE):
2726 		bcmerror = dhd_wlfc_get_txstatus_ignore(dhd_pub, &int_val);
2727 		if (bcmerror != BCME_OK)
2728 			goto exit;
2729 		bcopy(&int_val, arg, val_size);
2730 		break;
2731 
2732 	case IOV_SVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE):
2733 		dhd_wlfc_set_txstatus_ignore(dhd_pub, int_val);
2734 		break;
2735 
2736 	case IOV_GVAL(IOV_PROPTXSTATUS_RXPKT_CHK):
2737 		bcmerror = dhd_wlfc_get_rxpkt_chk(dhd_pub, &int_val);
2738 		if (bcmerror != BCME_OK)
2739 			goto exit;
2740 		bcopy(&int_val, arg, val_size);
2741 		break;
2742 
2743 	case IOV_SVAL(IOV_PROPTXSTATUS_RXPKT_CHK):
2744 		dhd_wlfc_set_rxpkt_chk(dhd_pub, int_val);
2745 		break;
2746 
2747 #endif /* PROP_TXSTATUS */
2748 
2749 	case IOV_GVAL(IOV_BUS_TYPE):
2750 		/* The dhd application queries the driver to check if its usb or sdio.  */
2751 #ifdef BCMDBUS
2752 		int_val = BUS_TYPE_USB;
2753 #endif
2754 #ifdef BCMSDIO
2755 		int_val = BUS_TYPE_SDIO;
2756 #endif
2757 #ifdef PCIE_FULL_DONGLE
2758 		int_val = BUS_TYPE_PCIE;
2759 #endif
2760 		bcopy(&int_val, arg, val_size);
2761 		break;
2762 
2763 	case IOV_SVAL(IOV_CHANGEMTU):
2764 		int_val &= 0xffff;
2765 		bcmerror = dhd_change_mtu(dhd_pub, int_val, 0);
2766 		break;
2767 
2768 	case IOV_GVAL(IOV_HOSTREORDER_FLOWS):
2769 	{
2770 		uint i = 0;
2771 		uint8 *ptr = (uint8 *)arg;
2772 		uint8 count = 0;
2773 
2774 		ptr++;
2775 		for (i = 0; i < WLHOST_REORDERDATA_MAXFLOWS; i++) {
2776 			if (dhd_pub->reorder_bufs[i] != NULL) {
2777 				*ptr = dhd_pub->reorder_bufs[i]->flow_id;
2778 				ptr++;
2779 				count++;
2780 			}
2781 		}
2782 		ptr = (uint8 *)arg;
2783 		*ptr = count;
2784 		break;
2785 	}
2786 #ifdef DHDTCPACK_SUPPRESS
2787 	case IOV_GVAL(IOV_TCPACK_SUPPRESS): {
2788 		int_val = (uint32)dhd_pub->tcpack_sup_mode;
2789 		bcopy(&int_val, arg, val_size);
2790 		break;
2791 	}
2792 	case IOV_SVAL(IOV_TCPACK_SUPPRESS): {
2793 		bcmerror = dhd_tcpack_suppress_set(dhd_pub, (uint8)int_val);
2794 		break;
2795 	}
2796 #endif /* DHDTCPACK_SUPPRESS */
2797 #ifdef DHD_WMF
2798 	case IOV_GVAL(IOV_WMF_BSS_ENAB): {
2799 		uint32	bssidx;
2800 		dhd_wmf_t *wmf;
2801 		const char *val;
2802 
2803 		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
2804 			DHD_ERROR(("%s: wmf_bss_enable: bad parameter\n", __FUNCTION__));
2805 			bcmerror = BCME_BADARG;
2806 			break;
2807 		}
2808 
2809 		wmf = dhd_wmf_conf(dhd_pub, bssidx);
2810 		int_val = wmf->wmf_enable ? 1 :0;
2811 		bcopy(&int_val, arg, val_size);
2812 		break;
2813 	}
2814 	case IOV_SVAL(IOV_WMF_BSS_ENAB): {
2815 		/* Enable/Disable WMF */
2816 		uint32	bssidx;
2817 		dhd_wmf_t *wmf;
2818 		const char *val;
2819 
2820 		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
2821 			DHD_ERROR(("%s: wmf_bss_enable: bad parameter\n", __FUNCTION__));
2822 			bcmerror = BCME_BADARG;
2823 			break;
2824 		}
2825 
2826 		ASSERT(val);
2827 		bcopy(val, &int_val, sizeof(uint32));
2828 		wmf = dhd_wmf_conf(dhd_pub, bssidx);
2829 		if (wmf->wmf_enable == int_val)
2830 			break;
2831 		if (int_val) {
2832 			/* Enable WMF */
2833 			if (dhd_wmf_instance_add(dhd_pub, bssidx) != BCME_OK) {
2834 				DHD_ERROR(("%s: Error in creating WMF instance\n",
2835 				__FUNCTION__));
2836 				break;
2837 			}
2838 			if (dhd_wmf_start(dhd_pub, bssidx) != BCME_OK) {
2839 				DHD_ERROR(("%s: Failed to start WMF\n", __FUNCTION__));
2840 				break;
2841 			}
2842 			wmf->wmf_enable = TRUE;
2843 		} else {
2844 			/* Disable WMF */
2845 			wmf->wmf_enable = FALSE;
2846 			dhd_wmf_stop(dhd_pub, bssidx);
2847 			dhd_wmf_instance_del(dhd_pub, bssidx);
2848 		}
2849 		break;
2850 	}
2851 	case IOV_GVAL(IOV_WMF_UCAST_IGMP):
2852 		int_val = dhd_pub->wmf_ucast_igmp ? 1 : 0;
2853 		bcopy(&int_val, arg, val_size);
2854 		break;
2855 	case IOV_SVAL(IOV_WMF_UCAST_IGMP):
2856 		if (dhd_pub->wmf_ucast_igmp == int_val)
2857 			break;
2858 
2859 		if (int_val >= OFF && int_val <= ON)
2860 			dhd_pub->wmf_ucast_igmp = int_val;
2861 		else
2862 			bcmerror = BCME_RANGE;
2863 		break;
2864 	case IOV_GVAL(IOV_WMF_MCAST_DATA_SENDUP):
2865 		int_val = dhd_wmf_mcast_data_sendup(dhd_pub, 0, FALSE, FALSE);
2866 		bcopy(&int_val, arg, val_size);
2867 		break;
2868 	case IOV_SVAL(IOV_WMF_MCAST_DATA_SENDUP):
2869 		dhd_wmf_mcast_data_sendup(dhd_pub, 0, TRUE, int_val);
2870 		break;
2871 
2872 #ifdef WL_IGMP_UCQUERY
2873 	case IOV_GVAL(IOV_WMF_UCAST_IGMP_QUERY):
2874 		int_val = dhd_pub->wmf_ucast_igmp_query ? 1 : 0;
2875 		bcopy(&int_val, arg, val_size);
2876 		break;
2877 	case IOV_SVAL(IOV_WMF_UCAST_IGMP_QUERY):
2878 		if (dhd_pub->wmf_ucast_igmp_query == int_val)
2879 			break;
2880 
2881 		if (int_val >= OFF && int_val <= ON)
2882 			dhd_pub->wmf_ucast_igmp_query = int_val;
2883 		else
2884 			bcmerror = BCME_RANGE;
2885 		break;
2886 #endif /* WL_IGMP_UCQUERY */
2887 #ifdef DHD_UCAST_UPNP
2888 	case IOV_GVAL(IOV_WMF_UCAST_UPNP):
2889 		int_val = dhd_pub->wmf_ucast_upnp ? 1 : 0;
2890 		bcopy(&int_val, arg, val_size);
2891 		break;
2892 	case IOV_SVAL(IOV_WMF_UCAST_UPNP):
2893 		if (dhd_pub->wmf_ucast_upnp == int_val)
2894 			break;
2895 
2896 		if (int_val >= OFF && int_val <= ON)
2897 			dhd_pub->wmf_ucast_upnp = int_val;
2898 		else
2899 			bcmerror = BCME_RANGE;
2900 		break;
2901 #endif /* DHD_UCAST_UPNP */
2902 
2903 	case IOV_GVAL(IOV_WMF_PSTA_DISABLE): {
2904 		uint32	bssidx;
2905 		const char *val;
2906 
2907 		if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
2908 			DHD_ERROR(("%s: ap isoalate: bad parameter\n", __FUNCTION__));
2909 			bcmerror = BCME_BADARG;
2910 			break;
2911 		}
2912 
2913 		int_val = dhd_get_wmf_psta_disable(dhd_pub, bssidx);
2914 		bcopy(&int_val, arg, val_size);
2915 		break;
2916 	}
2917 
2918 	case IOV_SVAL(IOV_WMF_PSTA_DISABLE): {
2919 		uint32	bssidx;
2920 		const char *val;
2921 
2922 		if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
2923 			DHD_ERROR(("%s: ap isolate: bad parameter\n", __FUNCTION__));
2924 			bcmerror = BCME_BADARG;
2925 			break;
2926 		}
2927 
2928 		ASSERT(val);
2929 		bcopy(val, &int_val, sizeof(uint32));
2930 		dhd_set_wmf_psta_disable(dhd_pub, bssidx, int_val);
2931 		break;
2932 	}
2933 #endif /* DHD_WMF */
2934 
2935 #if defined(BCM_ROUTER_DHD)
2936 	case IOV_SVAL(IOV_TRAFFIC_MGMT_DWM): {
2937 			trf_mgmt_filter_list_t   *trf_mgmt_filter_list =
2938 				(trf_mgmt_filter_list_t *)(arg);
2939 			bcmerror = traffic_mgmt_add_dwm_filter(dhd_pub, trf_mgmt_filter_list, len);
2940 		}
2941 		break;
2942 #endif /* BCM_ROUTER_DHD */
2943 
2944 #ifdef DHD_L2_FILTER
2945 	case IOV_GVAL(IOV_DHCP_UNICAST): {
2946 		uint32 bssidx;
2947 		const char *val;
2948 		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
2949 			DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n",
2950 				__FUNCTION__, name));
2951 			bcmerror = BCME_BADARG;
2952 			break;
2953 		}
2954 		int_val = dhd_get_dhcp_unicast_status(dhd_pub, bssidx);
2955 		memcpy(arg, &int_val, val_size);
2956 		break;
2957 	}
2958 	case IOV_SVAL(IOV_DHCP_UNICAST): {
2959 		uint32	bssidx;
2960 		const char *val;
2961 		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
2962 			DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n",
2963 				__FUNCTION__, name));
2964 			bcmerror = BCME_BADARG;
2965 			break;
2966 		}
2967 		memcpy(&int_val, val, sizeof(int_val));
2968 		bcmerror = dhd_set_dhcp_unicast_status(dhd_pub, bssidx, int_val ? 1 : 0);
2969 		break;
2970 	}
2971 	case IOV_GVAL(IOV_BLOCK_PING): {
2972 		uint32 bssidx;
2973 		const char *val;
2974 
2975 		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
2976 			DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__));
2977 			bcmerror = BCME_BADARG;
2978 			break;
2979 		}
2980 		int_val = dhd_get_block_ping_status(dhd_pub, bssidx);
2981 		memcpy(arg, &int_val, val_size);
2982 		break;
2983 	}
2984 	case IOV_SVAL(IOV_BLOCK_PING): {
2985 		uint32	bssidx;
2986 		const char *val;
2987 
2988 		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
2989 			DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__));
2990 			bcmerror = BCME_BADARG;
2991 			break;
2992 		}
2993 		memcpy(&int_val, val, sizeof(int_val));
2994 		bcmerror = dhd_set_block_ping_status(dhd_pub, bssidx, int_val ? 1 : 0);
2995 		break;
2996 	}
2997 	case IOV_GVAL(IOV_PROXY_ARP): {
2998 		uint32	bssidx;
2999 		const char *val;
3000 
3001 		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
3002 			DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__));
3003 			bcmerror = BCME_BADARG;
3004 			break;
3005 		}
3006 		int_val = dhd_get_parp_status(dhd_pub, bssidx);
3007 		bcopy(&int_val, arg, val_size);
3008 		break;
3009 	}
3010 	case IOV_SVAL(IOV_PROXY_ARP): {
3011 		uint32	bssidx;
3012 		const char *val;
3013 
3014 		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
3015 			DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__));
3016 			bcmerror = BCME_BADARG;
3017 			break;
3018 		}
3019 		bcopy(val, &int_val, sizeof(int_val));
3020 
3021 		/* Issue a iovar request to WL to update the proxy arp capability bit
3022 		 * in the Extended Capability IE of beacons/probe responses.
3023 		 */
3024 		bcmerror = dhd_iovar(dhd_pub, bssidx, "proxy_arp_advertise", val, sizeof(int_val),
3025 				NULL, 0, TRUE);
3026 		if (bcmerror == BCME_OK) {
3027 			dhd_set_parp_status(dhd_pub, bssidx, int_val ? 1 : 0);
3028 		}
3029 		break;
3030 	}
3031 	case IOV_GVAL(IOV_GRAT_ARP): {
3032 		uint32 bssidx;
3033 		const char *val;
3034 
3035 		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
3036 			DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__));
3037 			bcmerror = BCME_BADARG;
3038 			break;
3039 		}
3040 		int_val = dhd_get_grat_arp_status(dhd_pub, bssidx);
3041 		memcpy(arg, &int_val, val_size);
3042 		break;
3043 	}
3044 	case IOV_SVAL(IOV_GRAT_ARP): {
3045 		uint32	bssidx;
3046 		const char *val;
3047 
3048 		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
3049 			DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__));
3050 			bcmerror = BCME_BADARG;
3051 			break;
3052 		}
3053 		memcpy(&int_val, val, sizeof(int_val));
3054 		bcmerror = dhd_set_grat_arp_status(dhd_pub, bssidx, int_val ? 1 : 0);
3055 		break;
3056 	}
3057 	case IOV_GVAL(IOV_BLOCK_TDLS): {
3058 		uint32 bssidx;
3059 		const char *val;
3060 
3061 		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
3062 			DHD_ERROR(("%s: IOV_BLOCK_TDLS: bad parameter\n", __FUNCTION__));
3063 			bcmerror = BCME_BADARG;
3064 			break;
3065 		}
3066 		int_val = dhd_get_block_tdls_status(dhd_pub, bssidx);
3067 		memcpy(arg, &int_val, val_size);
3068 		break;
3069 	}
3070 	case IOV_SVAL(IOV_BLOCK_TDLS): {
3071 		uint32	bssidx;
3072 		const char *val;
3073 
3074 		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
3075 			DHD_ERROR(("%s: IOV_BLOCK_TDLS: bad parameter\n", __FUNCTION__));
3076 			bcmerror = BCME_BADARG;
3077 			break;
3078 		}
3079 		memcpy(&int_val, val, sizeof(int_val));
3080 		bcmerror = dhd_set_block_tdls_status(dhd_pub, bssidx, int_val ? 1 : 0);
3081 		break;
3082 	}
3083 #endif /* DHD_L2_FILTER */
3084 	case IOV_SVAL(IOV_DHD_IE): {
3085 		uint32	bssidx;
3086 		const char *val;
3087 #if (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET))
3088 		uint8 ie_type;
3089 		bcm_tlv_t *qos_map_ie = NULL;
3090 		ie_setbuf_t *ie_getbufp = (ie_setbuf_t *)(arg+4);
3091 		ie_type = ie_getbufp->ie_buffer.ie_list[0].ie_data.id;
3092 #endif /* BCM_ROUTER_DHD && QOS_MAP_SET */
3093 
3094 		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
3095 			DHD_ERROR(("%s: dhd ie: bad parameter\n", __FUNCTION__));
3096 			bcmerror = BCME_BADARG;
3097 			break;
3098 		}
3099 
3100 #if (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET))
3101 		qos_map_ie = (bcm_tlv_t *)(&(ie_getbufp->ie_buffer.ie_list[0].ie_data));
3102 		if (qos_map_ie != NULL && (ie_type == DOT11_MNG_QOS_MAP_ID)) {
3103 				bcmerror = dhd_set_qosmap_up_table(dhd_pub, bssidx, qos_map_ie);
3104 		}
3105 #endif /* BCM_ROUTER_DHD && QOS_MAP_SET */
3106 		break;
3107 	}
3108 	case IOV_GVAL(IOV_AP_ISOLATE): {
3109 		uint32	bssidx;
3110 		const char *val;
3111 
3112 		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
3113 			DHD_ERROR(("%s: ap isoalate: bad parameter\n", __FUNCTION__));
3114 			bcmerror = BCME_BADARG;
3115 			break;
3116 		}
3117 
3118 		int_val = dhd_get_ap_isolate(dhd_pub, bssidx);
3119 		bcopy(&int_val, arg, val_size);
3120 		break;
3121 	}
3122 	case IOV_SVAL(IOV_AP_ISOLATE): {
3123 		uint32	bssidx;
3124 		const char *val;
3125 
3126 		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
3127 			DHD_ERROR(("%s: ap isolate: bad parameter\n", __FUNCTION__));
3128 			bcmerror = BCME_BADARG;
3129 			break;
3130 		}
3131 
3132 		ASSERT(val);
3133 		bcopy(val, &int_val, sizeof(uint32));
3134 		dhd_set_ap_isolate(dhd_pub, bssidx, int_val);
3135 		break;
3136 	}
3137 #ifdef DHD_PSTA
3138 	case IOV_GVAL(IOV_PSTA): {
3139 		int_val = dhd_get_psta_mode(dhd_pub);
3140 		bcopy(&int_val, arg, val_size);
3141 		break;
3142 		}
3143 	case IOV_SVAL(IOV_PSTA): {
3144 		if (int_val >= DHD_MODE_PSTA_DISABLED && int_val <= DHD_MODE_PSR) {
3145 			dhd_set_psta_mode(dhd_pub, int_val);
3146 		} else {
3147 			bcmerror = BCME_RANGE;
3148 		}
3149 		break;
3150 		}
3151 #endif /* DHD_PSTA */
3152 #ifdef DHD_WET
3153 	case IOV_GVAL(IOV_WET):
3154 		 int_val = dhd_get_wet_mode(dhd_pub);
3155 		 bcopy(&int_val, arg, val_size);
3156 		 break;
3157 
3158 	case IOV_SVAL(IOV_WET):
3159 		 if (int_val == 0 || int_val == 1) {
3160 			 dhd_set_wet_mode(dhd_pub, int_val);
3161 			 /* Delete the WET DB when disabled */
3162 			 if (!int_val) {
3163 				 dhd_wet_sta_delete_list(dhd_pub);
3164 			 }
3165 		 } else {
3166 			 bcmerror = BCME_RANGE;
3167 		 }
3168 				 break;
3169 	case IOV_SVAL(IOV_WET_HOST_IPV4):
3170 			dhd_set_wet_host_ipv4(dhd_pub, params, plen);
3171 			break;
3172 	case IOV_SVAL(IOV_WET_HOST_MAC):
3173 			dhd_set_wet_host_mac(dhd_pub, params, plen);
3174 		break;
3175 #endif /* DHD_WET */
3176 #ifdef DHD_MCAST_REGEN
3177 	case IOV_GVAL(IOV_MCAST_REGEN_BSS_ENABLE): {
3178 		uint32	bssidx;
3179 		const char *val;
3180 
3181 		if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
3182 			DHD_ERROR(("%s: mcast_regen_bss_enable: bad parameter\n", __FUNCTION__));
3183 			bcmerror = BCME_BADARG;
3184 			break;
3185 		}
3186 
3187 		int_val = dhd_get_mcast_regen_bss_enable(dhd_pub, bssidx);
3188 		bcopy(&int_val, arg, val_size);
3189 		break;
3190 	}
3191 
3192 	case IOV_SVAL(IOV_MCAST_REGEN_BSS_ENABLE): {
3193 		uint32	bssidx;
3194 		const char *val;
3195 
3196 		if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
3197 			DHD_ERROR(("%s: mcast_regen_bss_enable: bad parameter\n", __FUNCTION__));
3198 			bcmerror = BCME_BADARG;
3199 			break;
3200 		}
3201 
3202 		ASSERT(val);
3203 		bcopy(val, &int_val, sizeof(uint32));
3204 		dhd_set_mcast_regen_bss_enable(dhd_pub, bssidx, int_val);
3205 		break;
3206 	}
3207 #endif /* DHD_MCAST_REGEN */
3208 
3209 	case IOV_GVAL(IOV_CFG80211_OPMODE): {
3210 		int_val = (int32)dhd_pub->op_mode;
3211 		bcopy(&int_val, arg, sizeof(int_val));
3212 		break;
3213 		}
3214 	case IOV_SVAL(IOV_CFG80211_OPMODE): {
3215 		if (int_val <= 0)
3216 			bcmerror = BCME_BADARG;
3217 		else
3218 			dhd_pub->op_mode = int_val;
3219 		break;
3220 	}
3221 
3222 	case IOV_GVAL(IOV_ASSERT_TYPE):
3223 		int_val = g_assert_type;
3224 		bcopy(&int_val, arg, val_size);
3225 		break;
3226 
3227 	case IOV_SVAL(IOV_ASSERT_TYPE):
3228 		g_assert_type = (uint32)int_val;
3229 		break;
3230 
3231 #if defined(NDIS)
3232 	case IOV_GVAL(IOV_WAKEIND):
3233 		dhd_os_wakeind(dhd_pub, &int_val);
3234 		bcopy(&int_val, arg, val_size);
3235 		break;
3236 #endif /* NDIS */
3237 
3238 #if !defined(NDIS) && !defined(BCM_ROUTER_DHD)
3239 	case IOV_GVAL(IOV_LMTEST): {
3240 		*(uint32 *)arg = (uint32)lmtest;
3241 		break;
3242 	}
3243 
3244 	case IOV_SVAL(IOV_LMTEST): {
3245 		uint32 val = *(uint32 *)arg;
3246 		if (val > 50)
3247 			bcmerror = BCME_BADARG;
3248 		else {
3249 			lmtest = (uint)val;
3250 			DHD_ERROR(("%s: lmtest %s\n",
3251 				__FUNCTION__, (lmtest == FALSE)? "OFF" : "ON"));
3252 		}
3253 		break;
3254 	}
3255 #endif /* !NDIS && !BCM_ROUTER_DHD */
3256 #ifdef BCMDBG
3257 	case IOV_GVAL(IOV_MACDBG_PD11REGS):
3258 		bcmerror = dhd_macdbg_pd11regs(dhd_pub, params, plen, arg, len);
3259 		break;
3260 	case IOV_GVAL(IOV_MACDBG_REGLIST):
3261 		bcmerror = dhd_macdbg_reglist(dhd_pub, arg, len);
3262 		break;
3263 	case IOV_GVAL(IOV_MACDBG_PSVMPMEMS):
3264 		bcmerror = dhd_macdbg_psvmpmems(dhd_pub, params, plen, arg, len);
3265 		break;
3266 #endif /* BCMDBG */
3267 
3268 #ifdef SHOW_LOGTRACE
3269 	case IOV_GVAL(IOV_DUMP_TRACE_LOG): {
3270 		trace_buf_info_t *trace_buf_info = (trace_buf_info_t *)arg;
3271 		dhd_dbg_ring_t *dbg_verbose_ring = NULL;
3272 
3273 		dbg_verbose_ring = dhd_dbg_get_ring_from_ring_id(dhd_pub, FW_VERBOSE_RING_ID);
3274 		if (dbg_verbose_ring == NULL) {
3275 			DHD_ERROR(("dbg_verbose_ring is NULL\n"));
3276 			bcmerror = BCME_UNSUPPORTED;
3277 			break;
3278 		}
3279 
3280 		if (trace_buf_info != NULL) {
3281 			bzero(trace_buf_info, sizeof(trace_buf_info_t));
3282 			dhd_dbg_read_ring_into_trace_buf(dbg_verbose_ring, trace_buf_info);
3283 		} else {
3284 			DHD_ERROR(("%s: arg is NULL\n", __FUNCTION__));
3285 			bcmerror = BCME_NOMEM;
3286 		}
3287 		break;
3288 	}
3289 #endif /* SHOW_LOGTRACE */
3290 #ifdef BTLOG
3291 	case IOV_GVAL(IOV_DUMP_BT_LOG): {
3292 		bt_log_buf_info_t *bt_log_buf_info = (bt_log_buf_info_t *)arg;
3293 		uint32 rlen;
3294 
3295 		rlen = dhd_dbg_pull_single_from_ring(dhd_pub, BT_LOG_RING_ID, bt_log_buf_info->buf,
3296 			BT_LOG_BUF_MAX_SIZE, TRUE);
3297 		bt_log_buf_info->size = rlen;
3298 		bt_log_buf_info->availability = BT_LOG_NEXT_BUF_NOT_AVAIL;
3299 		if (rlen == 0) {
3300 			bt_log_buf_info->availability = BT_LOG_BUF_NOT_AVAILABLE;
3301 		} else {
3302 			dhd_dbg_ring_status_t ring_status;
3303 			dhd_dbg_get_ring_status(dhd_pub, BT_LOG_RING_ID, &ring_status);
3304 			if (ring_status.written_bytes != ring_status.read_bytes) {
3305 				bt_log_buf_info->availability = BT_LOG_NEXT_BUF_AVAIL;
3306 			}
3307 		}
3308 		break;
3309 	}
3310 	case IOV_GVAL(IOV_BTLOG):
3311 	{
3312 		uint32 btlog_val = dhd_pub->bt_logging_enabled ? 1 : 0;
3313 		bcopy(&btlog_val, arg, val_size);
3314 	}
3315 		break;
3316 	case IOV_SVAL(IOV_BTLOG):
3317 	{
3318 		if (dhd_pub->busstate != DHD_BUS_DOWN) {
3319 			DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
3320 				__FUNCTION__));
3321 			bcmerror = BCME_NOTDOWN;
3322 			break;
3323 		}
3324 		if (int_val)
3325 			dhd_pub->bt_logging_enabled = TRUE;
3326 		else
3327 			dhd_pub->bt_logging_enabled = FALSE;
3328 	}
3329 		break;
3330 
3331 #endif	/* BTLOG */
3332 #ifdef SNAPSHOT_UPLOAD
3333 	case IOV_SVAL(IOV_BT_MEM_DUMP): {
3334 		dhd_prot_send_snapshot_request(dhd_pub, SNAPSHOT_TYPE_BT, int_val);
3335 		break;
3336 	}
3337 	case IOV_GVAL(IOV_BT_UPLOAD): {
3338 		int status;
3339 		bt_mem_req_t req;
3340 		bt_log_buf_info_t *mem_info = (bt_log_buf_info_t *)arg;
3341 		uint32 size;
3342 		bool is_more;
3343 
3344 		memcpy(&req, params, sizeof(req));
3345 
3346 		status = dhd_prot_get_snapshot(dhd_pub, SNAPSHOT_TYPE_BT, req.offset,
3347 			req.buf_size, mem_info->buf, &size, &is_more);
3348 		if (status == BCME_OK) {
3349 			mem_info->size = size;
3350 			mem_info->availability = is_more ?
3351 				BT_LOG_NEXT_BUF_AVAIL : BT_LOG_NEXT_BUF_NOT_AVAIL;
3352 		} else if (status == BCME_NOTREADY) {
3353 			mem_info->size = 0;
3354 			mem_info->availability = BT_LOG_NOT_READY;
3355 		} else {
3356 			mem_info->size = 0;
3357 			mem_info->availability = BT_LOG_BUF_NOT_AVAILABLE;
3358 		}
3359 		break;
3360 	}
3361 #endif	/* SNAPSHOT_UPLOAD */
3362 #ifdef REPORT_FATAL_TIMEOUTS
3363 	case IOV_GVAL(IOV_SCAN_TO): {
3364 		dhd_get_scan_to_val(dhd_pub, (uint32 *)&int_val);
3365 		bcopy(&int_val, arg, val_size);
3366 		break;
3367 	}
3368 	case IOV_SVAL(IOV_SCAN_TO): {
3369 		dhd_set_scan_to_val(dhd_pub, (uint32)int_val);
3370 		break;
3371 	}
3372 	case IOV_GVAL(IOV_JOIN_TO): {
3373 		dhd_get_join_to_val(dhd_pub, (uint32 *)&int_val);
3374 		bcopy(&int_val, arg, val_size);
3375 		break;
3376 	}
3377 	case IOV_SVAL(IOV_JOIN_TO): {
3378 		dhd_set_join_to_val(dhd_pub, (uint32)int_val);
3379 		break;
3380 	}
3381 	case IOV_GVAL(IOV_CMD_TO): {
3382 		dhd_get_cmd_to_val(dhd_pub, (uint32 *)&int_val);
3383 		bcopy(&int_val, arg, val_size);
3384 		break;
3385 	}
3386 	case IOV_SVAL(IOV_CMD_TO): {
3387 		dhd_set_cmd_to_val(dhd_pub, (uint32)int_val);
3388 		break;
3389 	}
3390 	case IOV_GVAL(IOV_OQS_TO): {
3391 		dhd_get_bus_to_val(dhd_pub, (uint32 *)&int_val);
3392 		bcopy(&int_val, arg, val_size);
3393 		break;
3394 	}
3395 	case IOV_SVAL(IOV_OQS_TO): {
3396 		dhd_set_bus_to_val(dhd_pub, (uint32)int_val);
3397 		break;
3398 	}
3399 #endif /* REPORT_FATAL_TIMEOUTS */
3400 	case IOV_GVAL(IOV_DONGLE_TRAP_TYPE):
3401 		if (dhd_pub->dongle_trap_occured)
3402 			int_val = ltoh32(dhd_pub->last_trap_info.type);
3403 		else
3404 			int_val = 0;
3405 		bcopy(&int_val, arg, val_size);
3406 		break;
3407 
3408 	case IOV_GVAL(IOV_DONGLE_TRAP_INFO):
3409 	{
3410 		struct bcmstrbuf strbuf;
3411 		bcm_binit(&strbuf, arg, len);
3412 		if (dhd_pub->dongle_trap_occured == FALSE) {
3413 			bcm_bprintf(&strbuf, "no trap recorded\n");
3414 			break;
3415 		}
3416 #ifndef BCMDBUS
3417 		dhd_bus_dump_trap_info(dhd_pub->bus, &strbuf);
3418 #endif /* BCMDBUS */
3419 		break;
3420 	}
3421 #ifdef DHD_DEBUG
3422 #if defined(BCMSDIO) || defined(BCMPCIE)
3423 
3424 	case IOV_GVAL(IOV_BPADDR):
3425 		{
3426 			sdreg_t sdreg;
3427 			uint32 addr, size;
3428 
3429 			memcpy(&sdreg, params, sizeof(sdreg));
3430 
3431 			addr = sdreg.offset;
3432 			size = sdreg.func;
3433 
3434 			bcmerror = dhd_bus_readwrite_bp_addr(dhd_pub, addr, size,
3435 				(uint *)&int_val, TRUE);
3436 
3437 			memcpy(arg, &int_val, sizeof(int32));
3438 
3439 			break;
3440 		}
3441 
3442 	case IOV_SVAL(IOV_BPADDR):
3443 		{
3444 			sdreg_t sdreg;
3445 			uint32 addr, size;
3446 
3447 			memcpy(&sdreg, params, sizeof(sdreg));
3448 
3449 			addr = sdreg.offset;
3450 			size = sdreg.func;
3451 
3452 			bcmerror = dhd_bus_readwrite_bp_addr(dhd_pub, addr, size,
3453 				(uint *)&sdreg.value,
3454 				FALSE);
3455 
3456 			break;
3457 		}
3458 #endif /* BCMSDIO || BCMPCIE */
3459 #ifdef BCMPCIE
3460 	case IOV_SVAL(IOV_FLOW_RING_DEBUG):
3461 		{
3462 			bcmerror = dhd_flow_ring_debug(dhd_pub, arg, len);
3463 			break;
3464 		}
3465 #endif /* BCMPCIE */
3466 	case IOV_SVAL(IOV_MEM_DEBUG):
3467 		if (len > 0) {
3468 			bcmerror = dhd_mem_debug(dhd_pub, arg, len - 1);
3469 		}
3470 		break;
3471 #endif /* DHD_DEBUG */
3472 #if defined(DHD_LOG_DUMP)
3473 #if defined(DHD_EFI)
3474 	case IOV_GVAL(IOV_LOG_CAPTURE_ENABLE):
3475 		{
3476 			int_val = dhd_pub->log_capture_enable;
3477 			bcopy(&int_val, arg, val_size);
3478 			break;
3479 		}
3480 
3481 	case IOV_SVAL(IOV_LOG_CAPTURE_ENABLE):
3482 		{
3483 			dhd_pub->log_capture_enable = (uint8)int_val;
3484 			break;
3485 		}
3486 #endif /* DHD_EFI */
3487 	case IOV_GVAL(IOV_LOG_DUMP):
3488 		{
3489 			dhd_prot_debug_info_print(dhd_pub);
3490 			dhd_log_dump_trigger(dhd_pub, CMD_DEFAULT);
3491 			break;
3492 		}
3493 #endif /* DHD_LOG_DUMP */
3494 
3495 	case IOV_GVAL(IOV_TPUT_TEST):
3496 		{
3497 			tput_test_t *tput_data = NULL;
3498 			if (params && plen >= sizeof(tput_test_t)) {
3499 				tput_data = (tput_test_t *)params;
3500 				bcmerror = dhd_tput_test(dhd_pub, tput_data);
3501 			} else {
3502 				DHD_ERROR(("%s: tput test - no input params ! \n", __FUNCTION__));
3503 				bcmerror = BCME_BADARG;
3504 			}
3505 			break;
3506 		}
3507 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
3508 	case IOV_SVAL(IOV_PKT_LATENCY):
3509 		dhd_pub->pkt_latency = (uint32)int_val;
3510 		break;
3511 	case IOV_GVAL(IOV_PKT_LATENCY):
3512 		int_val = (int32)dhd_pub->pkt_latency;
3513 		bcopy(&int_val, arg, val_size);
3514 		break;
3515 #endif /* defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)  */
3516 	case IOV_GVAL(IOV_DEBUG_BUF_DEST_STAT):
3517 		{
3518 			if (dhd_pub->debug_buf_dest_support) {
3519 				debug_buf_dest_stat_t *debug_buf_dest_stat =
3520 					(debug_buf_dest_stat_t *)arg;
3521 				memcpy(debug_buf_dest_stat, dhd_pub->debug_buf_dest_stat,
3522 					sizeof(dhd_pub->debug_buf_dest_stat));
3523 			} else {
3524 				bcmerror = BCME_DISABLED;
3525 			}
3526 			break;
3527 		}
3528 
3529 #ifdef DHD_PKTTS
3530 	case IOV_GVAL(IOV_PKTTS_ENAB): {
3531 		int_val = dhd_get_pktts_enab(dhd_pub);
3532 		(void)memcpy_s(arg, val_size, &int_val, sizeof(int_val));
3533 		break;
3534 	}
3535 	case IOV_SVAL(IOV_PKTTS_ENAB): {
3536 		dhd_set_pktts_enab(dhd_pub, !!int_val);
3537 		break;
3538 	}
3539 
3540 	case IOV_GVAL(IOV_PKTTS_FLOW): {
3541 		bcmerror = dhd_get_pktts_flow(dhd_pub, arg, len);
3542 		break;
3543 	}
3544 	case IOV_SVAL(IOV_PKTTS_FLOW): {
3545 		bcmerror = dhd_set_pktts_flow(dhd_pub, params, plen);
3546 		break;
3547 	}
3548 #endif /* DHD_PKTTS */
3549 
3550 #if defined(DHD_EFI)
3551 	case IOV_SVAL(IOV_INTR_POLL):
3552 		bcmerror = dhd_intr_poll(dhd_pub->bus, arg, len, TRUE);
3553 		break;
3554 
3555 	case IOV_GVAL(IOV_INTR_POLL):
3556 		bcmerror = dhd_intr_poll(dhd_pub->bus, params, plen, FALSE);
3557 		break;
3558 #endif /* DHD_EFI */
3559 
3560 #if defined(DHD_SSSR_DUMP)
3561 	case IOV_GVAL(IOV_FIS_TRIGGER):
3562 		bcmerror = dhd_bus_fis_trigger(dhd_pub);
3563 
3564 		if (bcmerror == BCME_OK) {
3565 			bcmerror = dhd_bus_fis_dump(dhd_pub);
3566 		}
3567 
3568 		int_val = bcmerror;
3569 		bcopy(&int_val, arg, val_size);
3570 		break;
3571 #endif /* defined(DHD_SSSR_DUMP) */
3572 
3573 #ifdef DHD_DEBUG
3574 	case IOV_SVAL(IOV_INDUCE_ERROR): {
3575 		if (int_val >= DHD_INDUCE_ERROR_MAX) {
3576 			DHD_ERROR(("%s: Invalid command : %u\n", __FUNCTION__, (uint16)int_val));
3577 		} else {
3578 			dhd_pub->dhd_induce_error = (uint16)int_val;
3579 #ifdef BCMPCIE
3580 			if (dhd_pub->dhd_induce_error == DHD_INDUCE_BH_CBP_HANG) {
3581 				dhdpcie_induce_cbp_hang(dhd_pub);
3582 			}
3583 #endif /* BCMPCIE */
3584 		}
3585 		break;
3586 	}
3587 #endif /* DHD_DEBUG */
3588 #ifdef WL_IFACE_MGMT_CONF
3589 #ifdef WL_CFG80211
3590 #ifdef WL_NANP2P
3591 	case IOV_GVAL(IOV_CONC_DISC): {
3592 		int_val = wl_cfg80211_get_iface_conc_disc(
3593 			dhd_linux_get_primary_netdev(dhd_pub));
3594 		bcopy(&int_val, arg, sizeof(int_val));
3595 		break;
3596 	}
3597 	case IOV_SVAL(IOV_CONC_DISC): {
3598 		bcmerror = wl_cfg80211_set_iface_conc_disc(
3599 			dhd_linux_get_primary_netdev(dhd_pub), (uint8)int_val);
3600 		break;
3601 	}
3602 #endif /* WL_NANP2P */
3603 #ifdef WL_IFACE_MGMT
3604 	case IOV_GVAL(IOV_IFACE_POLICY): {
3605 		int_val = wl_cfg80211_get_iface_policy(
3606 		dhd_linux_get_primary_netdev(dhd_pub));
3607 		bcopy(&int_val, arg, sizeof(int_val));
3608 		break;
3609 	}
3610 	case IOV_SVAL(IOV_IFACE_POLICY): {
3611 		bcmerror = wl_cfg80211_set_iface_policy(
3612 			dhd_linux_get_primary_netdev(dhd_pub),
3613 			arg, len);
3614 		break;
3615 	}
3616 #endif /* WL_IFACE_MGMT */
3617 #endif /* WL_CFG80211 */
3618 #endif /* WL_IFACE_MGMT_CONF */
3619 #ifdef RTT_GEOFENCE_CONT
3620 #if defined (RTT_SUPPORT) && defined (WL_NAN)
3621 	case IOV_GVAL(IOV_RTT_GEOFENCE_TYPE_OVRD): {
3622 		bool enable = 0;
3623 		dhd_rtt_get_geofence_cont_ind(dhd_pub, &enable);
3624 		int_val = enable ? 1 : 0;
3625 		bcopy(&int_val, arg, val_size);
3626 		break;
3627 	}
3628 	case IOV_SVAL(IOV_RTT_GEOFENCE_TYPE_OVRD): {
3629 		bool enable = *(bool *)arg;
3630 		dhd_rtt_set_geofence_cont_ind(dhd_pub, enable);
3631 		break;
3632 	}
3633 #endif /* RTT_SUPPORT && WL_NAN */
3634 #endif /* RTT_GEOFENCE_CONT */
3635 	case IOV_GVAL(IOV_FW_VBS): {
3636 		*(uint32 *)arg = (uint32)dhd_dbg_get_fwverbose(dhd_pub);
3637 		break;
3638 	}
3639 
3640 	case IOV_SVAL(IOV_FW_VBS): {
3641 		if (int_val < 0) {
3642 			int_val = 0;
3643 		}
3644 		dhd_dbg_set_fwverbose(dhd_pub, (uint32)int_val);
3645 		break;
3646 	}
3647 
3648 #ifdef DHD_TX_PROFILE
3649 	case IOV_SVAL(IOV_TX_PROFILE_TAG):
3650 	{
3651 		/* note: under the current implementation only one type of packet may be
3652 		 * tagged per profile
3653 		 */
3654 		const dhd_tx_profile_protocol_t *protocol = NULL;
3655 		/* for example, we might have a profile of profile_index 6, but at
3656 		 * offset 2 from dhd_pub->protocol_filters.
3657 		 */
3658 		uint8 offset;
3659 
3660 		if (params == NULL) {
3661 			bcmerror = BCME_ERROR;
3662 			break;
3663 		}
3664 
3665 		protocol = (dhd_tx_profile_protocol_t *)params;
3666 
3667 		/* validate */
3668 		if (protocol->version != DHD_TX_PROFILE_VERSION) {
3669 			bcmerror = BCME_VERSION;
3670 			break;
3671 		}
3672 		if (protocol->profile_index > DHD_MAX_PROFILE_INDEX) {
3673 			DHD_ERROR(("%s:\tprofile index must be between 0 and %d\n",
3674 					__FUNCTION__, DHD_MAX_PROFILE_INDEX));
3675 			bcmerror = BCME_RANGE;
3676 			break;
3677 		}
3678 		if (protocol->layer != DHD_TX_PROFILE_DATA_LINK_LAYER && protocol->layer
3679 				!= DHD_TX_PROFILE_NETWORK_LAYER) {
3680 			DHD_ERROR(("%s:\tlayer must be %d or %d\n", __FUNCTION__,
3681 					DHD_TX_PROFILE_DATA_LINK_LAYER,
3682 					DHD_TX_PROFILE_NETWORK_LAYER));
3683 			bcmerror = BCME_BADARG;
3684 			break;
3685 		}
3686 		if (protocol->protocol_number > __UINT16_MAX__) {
3687 			DHD_ERROR(("%s:\tprotocol number must be <= %d\n", __FUNCTION__,
3688 					__UINT16_MAX__));
3689 			bcmerror = BCME_BADLEN;
3690 			break;
3691 		}
3692 
3693 		/* find the dhd_tx_profile_protocol_t */
3694 		for (offset = 0; offset < dhd_pub->num_profiles; offset++) {
3695 			if (dhd_pub->protocol_filters[offset].profile_index ==
3696 					protocol->profile_index) {
3697 				break;
3698 			}
3699 		}
3700 
3701 		if (offset >= DHD_MAX_PROFILES) {
3702 #if DHD_MAX_PROFILES > 1
3703 			DHD_ERROR(("%s:\tonly %d profiles supported at present\n",
3704 					__FUNCTION__, DHD_MAX_PROFILES));
3705 #else /* DHD_MAX_PROFILES > 1 */
3706 			DHD_ERROR(("%s:\tonly %d profile supported at present\n",
3707 					__FUNCTION__, DHD_MAX_PROFILES));
3708 			DHD_ERROR(("%s:\tthere is a profile of index %d\n", __FUNCTION__,
3709 					dhd_pub->protocol_filters->profile_index));
3710 #endif /* DHD_MAX_PROFILES > 1 */
3711 			bcmerror = BCME_NOMEM;
3712 			break;
3713 		}
3714 
3715 		/* memory already allocated in dhd_attach; just assign the value */
3716 		dhd_pub->protocol_filters[offset] = *protocol;
3717 
3718 		if (offset >= dhd_pub->num_profiles) {
3719 			dhd_pub->num_profiles = offset + 1;
3720 		}
3721 
3722 		break;
3723 	}
3724 
3725 	case IOV_SVAL(IOV_TX_PROFILE_ENABLE):
3726 		dhd_pub->tx_profile_enab = int_val ? TRUE : FALSE;
3727 		break;
3728 
3729 	case IOV_GVAL(IOV_TX_PROFILE_ENABLE):
3730 		int_val = dhd_pub->tx_profile_enab;
3731 		bcmerror = memcpy_s(arg, val_size, &int_val, sizeof(int_val));
3732 		break;
3733 
3734 	case IOV_SVAL(IOV_TX_PROFILE_DUMP):
3735 	{
3736 		const dhd_tx_profile_protocol_t *protocol = NULL;
3737 		uint8 offset;
3738 		char *format = "%s:\ttx_profile %s: %d\n";
3739 
3740 		for (offset = 0; offset < dhd_pub->num_profiles; offset++) {
3741 			if (dhd_pub->protocol_filters[offset].profile_index == int_val) {
3742 				protocol = &(dhd_pub->protocol_filters[offset]);
3743 				break;
3744 			}
3745 		}
3746 
3747 		if (protocol == NULL) {
3748 			DHD_ERROR(("%s:\tno profile with index %d\n", __FUNCTION__,
3749 					int_val));
3750 			bcmerror = BCME_ERROR;
3751 			break;
3752 		}
3753 
3754 		printf(format, __FUNCTION__, "profile_index", protocol->profile_index);
3755 		printf(format, __FUNCTION__, "layer", protocol->layer);
3756 		printf(format, __FUNCTION__, "protocol_number", protocol->protocol_number);
3757 		printf(format, __FUNCTION__, "src_port", protocol->src_port);
3758 		printf(format, __FUNCTION__, "dest_port", protocol->dest_port);
3759 
3760 		break;
3761 	}
3762 #endif /* defined(DHD_TX_PROFILE) */
3763 
3764 	case IOV_GVAL(IOV_CHECK_TRAP_ROT): {
3765 		int_val = dhd_pub->check_trap_rot? 1 : 0;
3766 		(void)memcpy_s(arg, val_size, &int_val, sizeof(int_val));
3767 		break;
3768 	}
3769 	case IOV_SVAL(IOV_CHECK_TRAP_ROT): {
3770 		dhd_pub->check_trap_rot = *(bool *)arg;
3771 		break;
3772 	}
3773 
3774 #if defined(DHD_AWDL)
3775 	case IOV_SVAL(IOV_AWDL_LLC_ENABLE): {
3776 		bool bval = *(bool *)arg;
3777 		if (bval != 0 && bval != 1)
3778 			bcmerror = BCME_ERROR;
3779 		else
3780 			dhd_pub->awdl_llc_enabled = bval;
3781 		break;
3782 	}
3783 	case IOV_GVAL(IOV_AWDL_LLC_ENABLE):
3784 		int_val = dhd_pub->awdl_llc_enabled;
3785 		(void)memcpy_s(arg, val_size, &int_val, sizeof(int_val));
3786 		break;
3787 #endif
3788 #ifdef WLEASYMESH
3789 	case IOV_SVAL(IOV_1905_AL_UCAST): {
3790 		uint32  bssidx;
3791 		const char *val;
3792 		uint8 ea[6] = {0};
3793 		if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
3794 			DHD_ERROR(("%s: 1905_al_ucast: bad parameter\n", __FUNCTION__));
3795 			bcmerror = BCME_BADARG;
3796 			break;
3797 		}
3798 		bcopy(val, ea, ETHER_ADDR_LEN);
3799 		printf("IOV_1905_AL_UCAST:" MACDBG "\n", MAC2STRDBG(ea));
3800 		bcmerror = dhd_set_1905_almac(dhd_pub, bssidx, ea, FALSE);
3801 		break;
3802 	}
3803 	case IOV_GVAL(IOV_1905_AL_UCAST): {
3804 		uint32  bssidx;
3805 		const char *val;
3806 		if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
3807 			DHD_ERROR(("%s: 1905_al_ucast: bad parameter\n", __FUNCTION__));
3808 			bcmerror = BCME_BADARG;
3809 			break;
3810 		}
3811 
3812 		bcmerror = dhd_get_1905_almac(dhd_pub, bssidx, arg, FALSE);
3813 		break;
3814 	}
3815 	case IOV_SVAL(IOV_1905_AL_MCAST): {
3816 		uint32  bssidx;
3817 		const char *val;
3818 		uint8 ea[6] = {0};
3819 		if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
3820 			DHD_ERROR(("%s: 1905_al_mcast: bad parameter\n", __FUNCTION__));
3821 			bcmerror = BCME_BADARG;
3822 			break;
3823 		}
3824 		bcopy(val, ea, ETHER_ADDR_LEN);
3825 		printf("IOV_1905_AL_MCAST:" MACDBG "\n", MAC2STRDBG(ea));
3826 		bcmerror = dhd_set_1905_almac(dhd_pub, bssidx, ea, TRUE);
3827 		break;
3828 	}
3829 	case IOV_GVAL(IOV_1905_AL_MCAST): {
3830 		uint32  bssidx;
3831 		const char *val;
3832 		if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
3833 			DHD_ERROR(("%s: 1905_al_mcast: bad parameter\n", __FUNCTION__));
3834 			bcmerror = BCME_BADARG;
3835 			break;
3836 		}
3837 
3838 		bcmerror = dhd_get_1905_almac(dhd_pub, bssidx, arg, TRUE);
3839 		break;
3840 	}
3841 #endif /* WLEASYMESH */
3842 
3843 	default:
3844 		bcmerror = BCME_UNSUPPORTED;
3845 		break;
3846 	}
3847 
3848 exit:
3849 	DHD_TRACE(("%s: actionid %d, bcmerror %d\n", __FUNCTION__, actionid, bcmerror));
3850 	return bcmerror;
3851 }
3852 
3853 #ifdef BCMDONGLEHOST
3854 /* Store the status of a connection attempt for later retrieval by an iovar */
3855 void
dhd_store_conn_status(uint32 event,uint32 status,uint32 reason)3856 dhd_store_conn_status(uint32 event, uint32 status, uint32 reason)
3857 {
3858 	/* Do not overwrite a WLC_E_PRUNE with a WLC_E_SET_SSID
3859 	 * because an encryption/rsn mismatch results in both events, and
3860 	 * the important information is in the WLC_E_PRUNE.
3861 	 */
3862 	if (!(event == WLC_E_SET_SSID && status == WLC_E_STATUS_FAIL &&
3863 	      dhd_conn_event == WLC_E_PRUNE)) {
3864 		dhd_conn_event = event;
3865 		dhd_conn_status = status;
3866 		dhd_conn_reason = reason;
3867 	}
3868 }
3869 #else
3870 #error "BCMDONGLEHOST not defined"
3871 #endif /* BCMDONGLEHOST */
3872 
3873 bool
dhd_prec_enq(dhd_pub_t * dhdp,struct pktq * q,void * pkt,int prec)3874 dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec)
3875 {
3876 	void *p;
3877 	int eprec = -1;		/* precedence to evict from */
3878 	bool discard_oldest;
3879 
3880 	/* Fast case, precedence queue is not full and we are also not
3881 	 * exceeding total queue length
3882 	 */
3883 	if (!pktqprec_full(q, prec) && !pktq_full(q)) {
3884 		pktq_penq(q, prec, pkt);
3885 		return TRUE;
3886 	}
3887 
3888 	/* Determine precedence from which to evict packet, if any */
3889 	if (pktqprec_full(q, prec))
3890 		eprec = prec;
3891 	else if (pktq_full(q)) {
3892 		p = pktq_peek_tail(q, &eprec);
3893 		ASSERT(p);
3894 		if (eprec > prec || eprec < 0)
3895 			return FALSE;
3896 	}
3897 
3898 	/* Evict if needed */
3899 	if (eprec >= 0) {
3900 		/* Detect queueing to unconfigured precedence */
3901 		ASSERT(!pktqprec_empty(q, eprec));
3902 		discard_oldest = AC_BITMAP_TST(dhdp->wme_dp, eprec);
3903 		if (eprec == prec && !discard_oldest)
3904 			return FALSE;		/* refuse newer (incoming) packet */
3905 		/* Evict packet according to discard policy */
3906 		p = discard_oldest ? pktq_pdeq(q, eprec) : pktq_pdeq_tail(q, eprec);
3907 		ASSERT(p);
3908 #ifdef DHDTCPACK_SUPPRESS
3909 		if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) {
3910 			DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n",
3911 				__FUNCTION__, __LINE__));
3912 			dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
3913 		}
3914 #endif /* DHDTCPACK_SUPPRESS */
3915 		PKTFREE(dhdp->osh, p, TRUE);
3916 	}
3917 
3918 	/* Enqueue */
3919 	p = pktq_penq(q, prec, pkt);
3920 	ASSERT(p);
3921 
3922 	return TRUE;
3923 }
3924 
3925 /*
3926  * Functions to drop proper pkts from queue:
3927  *	If one pkt in queue is non-fragmented, drop first non-fragmented pkt only
3928  *	If all pkts in queue are all fragmented, find and drop one whole set fragmented pkts
3929  *	If can't find pkts matching upper 2 cases, drop first pkt anyway
3930  */
3931 bool
dhd_prec_drop_pkts(dhd_pub_t * dhdp,struct pktq * pq,int prec,f_droppkt_t fn)3932 dhd_prec_drop_pkts(dhd_pub_t *dhdp, struct pktq *pq, int prec, f_droppkt_t fn)
3933 {
3934 	struct pktq_prec *q = NULL;
3935 	void *p, *prev = NULL, *next = NULL, *first = NULL, *last = NULL, *prev_first = NULL;
3936 	pkt_frag_t frag_info;
3937 
3938 	ASSERT(dhdp && pq);
3939 	ASSERT(prec >= 0 && prec < pq->num_prec);
3940 
3941 	q = &pq->q[prec];
3942 	p = q->head;
3943 
3944 	if (p == NULL)
3945 		return FALSE;
3946 
3947 	while (p) {
3948 		frag_info = pkt_frag_info(dhdp->osh, p);
3949 		if (frag_info == DHD_PKT_FRAG_NONE) {
3950 			break;
3951 		} else if (frag_info == DHD_PKT_FRAG_FIRST) {
3952 			if (first) {
3953 				/* No last frag pkt, use prev as last */
3954 				last = prev;
3955 				break;
3956 			} else {
3957 				first = p;
3958 				prev_first = prev;
3959 			}
3960 		} else if (frag_info == DHD_PKT_FRAG_LAST) {
3961 			if (first) {
3962 				last = p;
3963 				break;
3964 			}
3965 		}
3966 
3967 		prev = p;
3968 		p = PKTLINK(p);
3969 	}
3970 
3971 	if ((p == NULL) || ((frag_info != DHD_PKT_FRAG_NONE) && !(first && last))) {
3972 		/* Not found matching pkts, use oldest */
3973 		prev = NULL;
3974 		p = q->head;
3975 		frag_info = 0;
3976 	}
3977 
3978 	if (frag_info == DHD_PKT_FRAG_NONE) {
3979 		first = last = p;
3980 		prev_first = prev;
3981 	}
3982 
3983 	p = first;
3984 	while (p) {
3985 		next = PKTLINK(p);
3986 		q->n_pkts--;
3987 		pq->n_pkts_tot--;
3988 
3989 #ifdef WL_TXQ_STALL
3990 		q->dequeue_count++;
3991 #endif
3992 
3993 		PKTSETLINK(p, NULL);
3994 
3995 		if (fn)
3996 			fn(dhdp, prec, p, TRUE);
3997 
3998 		if (p == last)
3999 			break;
4000 
4001 		p = next;
4002 	}
4003 
4004 	if (prev_first == NULL) {
4005 		if ((q->head = next) == NULL)
4006 			q->tail = NULL;
4007 	} else {
4008 		PKTSETLINK(prev_first, next);
4009 		if (!next)
4010 			q->tail = prev_first;
4011 	}
4012 
4013 	return TRUE;
4014 }
4015 
4016 static int
dhd_iovar_op(dhd_pub_t * dhd_pub,const char * name,void * params,int plen,void * arg,uint len,bool set)4017 dhd_iovar_op(dhd_pub_t *dhd_pub, const char *name,
4018 	void *params, int plen, void *arg, uint len, bool set)
4019 {
4020 	int bcmerror = 0;
4021 	uint val_size;
4022 	const bcm_iovar_t *vi = NULL;
4023 	uint32 actionid;
4024 
4025 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4026 
4027 	ASSERT(name);
4028 
4029 	/* Get MUST have return space */
4030 	ASSERT(set || (arg && len));
4031 
4032 	/* Set does NOT take qualifiers */
4033 	ASSERT(!set || (!params && !plen));
4034 
4035 	if ((vi = bcm_iovar_lookup(dhd_iovars, name)) == NULL) {
4036 		bcmerror = BCME_UNSUPPORTED;
4037 		goto exit;
4038 	}
4039 
4040 	DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
4041 		name, (set ? "set" : "get"), len, plen));
4042 
4043 	/* set up 'params' pointer in case this is a set command so that
4044 	 * the convenience int and bool code can be common to set and get
4045 	 */
4046 	if (params == NULL) {
4047 		params = arg;
4048 		plen = len;
4049 	}
4050 
4051 	if (vi->type == IOVT_VOID)
4052 		val_size = 0;
4053 	else if (vi->type == IOVT_BUFFER)
4054 		val_size = len;
4055 	else
4056 		/* all other types are integer sized */
4057 		val_size = sizeof(int);
4058 
4059 	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
4060 
4061 	bcmerror = dhd_doiovar(dhd_pub, vi, actionid, name, params, plen, arg, len, val_size);
4062 
4063 exit:
4064 	return bcmerror;
4065 }
4066 
4067 int
dhd_ioctl(dhd_pub_t * dhd_pub,dhd_ioctl_t * ioc,void * buf,uint buflen)4068 dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void *buf, uint buflen)
4069 {
4070 	int bcmerror = 0;
4071 	unsigned long flags;
4072 
4073 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4074 
4075 	if (!buf) {
4076 		return BCME_BADARG;
4077 	}
4078 
4079 	dhd_os_dhdiovar_lock(dhd_pub);
4080 	switch (ioc->cmd) {
4081 		case DHD_GET_MAGIC:
4082 			if (buflen < sizeof(int))
4083 				bcmerror = BCME_BUFTOOSHORT;
4084 			else
4085 				*(int*)buf = DHD_IOCTL_MAGIC;
4086 			break;
4087 
4088 		case DHD_GET_VERSION:
4089 			if (buflen < sizeof(int))
4090 				bcmerror = BCME_BUFTOOSHORT;
4091 			else
4092 				*(int*)buf = DHD_IOCTL_VERSION;
4093 			break;
4094 
4095 		case DHD_GET_VAR:
4096 		case DHD_SET_VAR:
4097 			{
4098 				char *arg;
4099 				uint arglen;
4100 
4101 				DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
4102 				if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub) &&
4103 					bcmstricmp((char *)buf, "devreset")) {
4104 					/* In platforms like FC19, the FW download is done via IOCTL
4105 					 * and should not return error for IOCTLs fired before FW
4106 					 * Download is done
4107 					 */
4108 					if (dhd_fw_download_status(dhd_pub) == FW_DOWNLOAD_DONE) {
4109 						DHD_ERROR(("%s: return as fw_download_status=%d\n",
4110 							__FUNCTION__,
4111 							dhd_fw_download_status(dhd_pub)));
4112 						DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
4113 						dhd_os_dhdiovar_unlock(dhd_pub);
4114 						return -ENODEV;
4115 					}
4116 				}
4117 				DHD_BUS_BUSY_SET_IN_DHD_IOVAR(dhd_pub);
4118 				DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
4119 
4120 #ifdef DHD_PCIE_RUNTIMEPM
4121 				dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_ioctl);
4122 #endif /* DHD_PCIE_RUNTIMEPM */
4123 
4124 				DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
4125 				if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd_pub)) {
4126 					/* If Suspend/Resume is tested via pcie_suspend IOVAR
4127 					 * then continue to execute the IOVAR, return from here for
4128 					 * other IOVARs, also include pciecfgreg and devreset to go
4129 					 * through.
4130 					 */
4131 #ifdef DHD_EFI
4132 					if (bcmstricmp((char *)buf, "pcie_suspend") &&
4133 						bcmstricmp((char *)buf, "pciecfgreg") &&
4134 						bcmstricmp((char *)buf, "devreset") &&
4135 						bcmstricmp((char *)buf, "sdio_suspend") &&
4136 						bcmstricmp((char *)buf, "control_signal"))
4137 #else
4138 					if (bcmstricmp((char *)buf, "pcie_suspend") &&
4139 					    bcmstricmp((char *)buf, "pciecfgreg") &&
4140 					    bcmstricmp((char *)buf, "devreset") &&
4141 					    bcmstricmp((char *)buf, "sdio_suspend"))
4142 #endif /* DHD_EFI */
4143 					{
4144 						DHD_ERROR(("%s: bus is in suspend(%d)"
4145 							"or suspending(0x%x) state\n",
4146 							__FUNCTION__, dhd_pub->busstate,
4147 							dhd_pub->dhd_bus_busy_state));
4148 						DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
4149 						dhd_os_busbusy_wake(dhd_pub);
4150 						DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
4151 						dhd_os_dhdiovar_unlock(dhd_pub);
4152 						return -ENODEV;
4153 					}
4154 				}
4155 				/* During devreset ioctl, we call dhdpcie_advertise_bus_cleanup,
4156 				 * which will wait for all the busy contexts to get over for
4157 				 * particular time and call ASSERT if timeout happens. As during
4158 				 * devreset ioctal, we made DHD_BUS_BUSY_SET_IN_DHD_IOVAR,
4159 				 * to avoid ASSERT, clear the IOCTL busy state. "devreset" ioctl is
4160 				 * not used in Production platforms but only used in FC19 setups.
4161 				 */
4162 				if (!bcmstricmp((char *)buf, "devreset") ||
4163 #ifdef BCMPCIE
4164 				    (dhd_bus_is_multibp_capable(dhd_pub->bus) &&
4165 				    !bcmstricmp((char *)buf, "dwnldstate")) ||
4166 #endif /* BCMPCIE */
4167 #if defined(DHD_EFI) && defined (BT_OVER_PCIE)
4168 					!bcmstricmp((char *)buf, "btop_test") ||
4169 					!bcmstricmp((char *)buf, "control_signal") ||
4170 #endif /* DHD_EFI && BT_OVER_PCIE */
4171 				    FALSE)
4172 				{
4173 					DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
4174 				}
4175 				DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
4176 
4177 				/* scan past the name to any arguments */
4178 				for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--)
4179 					;
4180 
4181 				if (arglen == 0 || *arg) {
4182 					bcmerror = BCME_BUFTOOSHORT;
4183 					goto unlock_exit;
4184 				}
4185 
4186 				/* account for the NUL terminator */
4187 				arg++, arglen--;
4188 				/* call with the appropriate arguments */
4189 				if (ioc->cmd == DHD_GET_VAR) {
4190 					bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen,
4191 							buf, buflen, IOV_GET);
4192 				} else {
4193 					bcmerror = dhd_iovar_op(dhd_pub, buf, NULL, 0,
4194 							arg, arglen, IOV_SET);
4195 				}
4196 				if (bcmerror != BCME_UNSUPPORTED) {
4197 					goto unlock_exit;
4198 				}
4199 
4200 				/* not in generic table, try protocol module */
4201 				if (ioc->cmd == DHD_GET_VAR) {
4202 					bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg,
4203 							arglen, buf, buflen, IOV_GET);
4204 				} else {
4205 					bcmerror = dhd_prot_iovar_op(dhd_pub, buf,
4206 							NULL, 0, arg, arglen, IOV_SET);
4207 				}
4208 				if (bcmerror != BCME_UNSUPPORTED) {
4209 					goto unlock_exit;
4210 				}
4211 
4212 				/* if still not found, try bus module */
4213 				if (ioc->cmd == DHD_GET_VAR) {
4214 					bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
4215 							arg, arglen, buf, buflen, IOV_GET);
4216 				} else {
4217 					bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
4218 							NULL, 0, arg, arglen, IOV_SET);
4219 				}
4220 				if (bcmerror != BCME_UNSUPPORTED) {
4221 					goto unlock_exit;
4222 				}
4223 
4224 #ifdef DHD_TIMESYNC
4225 				/* check TS module */
4226 				if (ioc->cmd == DHD_GET_VAR)
4227 					bcmerror = dhd_timesync_iovar_op(dhd_pub->ts, buf, arg,
4228 						arglen, buf, buflen, IOV_GET);
4229 				else
4230 					bcmerror = dhd_timesync_iovar_op(dhd_pub->ts, buf,
4231 						NULL, 0, arg, arglen, IOV_SET);
4232 #endif /* DHD_TIMESYNC */
4233 			}
4234 			goto unlock_exit;
4235 
4236 		default:
4237 			bcmerror = BCME_UNSUPPORTED;
4238 	}
4239 	dhd_os_dhdiovar_unlock(dhd_pub);
4240 	return bcmerror;
4241 
4242 unlock_exit:
4243 	DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
4244 	DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
4245 	dhd_os_busbusy_wake(dhd_pub);
4246 	DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
4247 	dhd_os_dhdiovar_unlock(dhd_pub);
4248 	return bcmerror;
4249 }
4250 
4251 #ifdef SHOW_EVENTS
4252 
4253 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
4254 static void
dhd_update_awdl_stats(dhd_pub_t * dhd_pub,const awdl_aws_event_data_t * aw)4255 dhd_update_awdl_stats(dhd_pub_t *dhd_pub, const awdl_aws_event_data_t *aw)
4256 {
4257 	dhd_awdl_stats_t *awdl_stats;
4258 	unsigned long lock_flags;
4259 
4260 	/* since AWDL stats are read on clear to protect against clear,
4261 	 * lock before update
4262 	 */
4263 	DHD_AWDL_STATS_LOCK(dhd_pub->awdl_stats_lock, lock_flags);
4264 	/* Start of AWDL slot */
4265 	if (!(aw->flags & AWDL_AW_LAST_EXT)) {
4266 		dhd_pub->awdl_tx_status_slot =
4267 			((aw->aw_counter/AWDL_SLOT_MULT) % AWDL_NUM_SLOTS);
4268 		awdl_stats = &dhd_pub->awdl_stats[dhd_pub->awdl_tx_status_slot];
4269 		awdl_stats->slot_start_time = OSL_SYSUPTIME_US();
4270 		awdl_stats->fw_slot_start_time = ntoh32_ua(&aw->fw_time);
4271 		awdl_stats->num_slots++;
4272 	} else {
4273 		/* End of AWDL slot */
4274 		awdl_stats = &dhd_pub->awdl_stats[dhd_pub->awdl_tx_status_slot];
4275 		if (awdl_stats->slot_start_time) {
4276 			awdl_stats->cum_slot_time +=
4277 				OSL_SYSUPTIME_US() - awdl_stats->slot_start_time;
4278 			/* FW reports time in us in a 32bit number.
4279 			 * This 32bit number wrap-arround in ~90 minutes.
4280 			 * Below logic considers wrap-arround too
4281 			 */
4282 			awdl_stats->fw_cum_slot_time +=
4283 				((ntoh32_ua(&aw->fw_time) - awdl_stats->fw_slot_start_time) &
4284 					(UINT_MAX));
4285 
4286 		}
4287 	}
4288 	DHD_AWDL_STATS_UNLOCK(dhd_pub->awdl_stats_lock, lock_flags);
4289 }
4290 #endif /* DHD_AWDL && AWDL_SLOT_STATS */
4291 
4292 static void
wl_show_roam_event(dhd_pub_t * dhd_pub,uint status,uint datalen,const char * event_name,char * eabuf,void * event_data)4293 wl_show_roam_event(dhd_pub_t *dhd_pub, uint status, uint datalen,
4294 	const char *event_name, char *eabuf, void *event_data)
4295 {
4296 #ifdef REPORT_FATAL_TIMEOUTS
4297 	OSL_ATOMIC_SET(dhd_pub->osh, &dhd_pub->set_ssid_rcvd, TRUE);
4298 	dhd_clear_join_error(dhd_pub, WLC_SSID_MASK);
4299 #endif /* REPORT_FATAL_TIMEOUTS */
4300 	if (status == WLC_E_STATUS_SUCCESS) {
4301 		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
4302 	} else {
4303 #ifdef REPORT_FATAL_TIMEOUTS
4304 		/*
4305 		 * For secure join if WLC_E_SET_SSID returns with any failure case,
4306 		 * donot expect WLC_E_PSK_SUP. So clear the mask.
4307 		 */
4308 		dhd_clear_join_error(dhd_pub, WLC_WPA_MASK);
4309 #endif /* REPORT_FATAL_TIMEOUTS */
4310 		if (status == WLC_E_STATUS_FAIL) {
4311 			DHD_EVENT(("MACEVENT: %s, failed status %d\n", event_name, status));
4312 		} else if (status == WLC_E_STATUS_NO_NETWORKS) {
4313 			if (datalen) {
4314 				uint8 id = *((uint8 *)event_data);
4315 				if (id != DOT11_MNG_PROPR_ID) {
4316 					wl_roam_event_t *roam_data =
4317 						(wl_roam_event_t *)event_data;
4318 					bcm_xtlv_t *tlv = (bcm_xtlv_t *)roam_data->xtlvs;
4319 					if (tlv->id == WLC_ROAM_NO_NETWORKS_TLV_ID) {
4320 						uint32 *fail_reason = (uint32 *)tlv->data;
4321 						switch (*fail_reason) {
4322 							case WLC_E_REASON_NO_NETWORKS:
4323 								DHD_EVENT(("MACEVENT: %s,"
4324 									" no networks found\n",
4325 									event_name));
4326 								break;
4327 							case WLC_E_REASON_NO_NETWORKS_BY_SCORE:
4328 								DHD_EVENT(("MACEVENT: %s,"
4329 								" no networks found by score\n",
4330 									event_name));
4331 								break;
4332 							default:
4333 								DHD_ERROR(("MACEVENT: %s,"
4334 								" unknown fail reason 0x%x\n",
4335 									event_name,
4336 									*fail_reason));
4337 								ASSERT(0);
4338 						}
4339 					} else {
4340 						DHD_EVENT(("MACEVENT: %s,"
4341 							" no networks found\n",
4342 							event_name));
4343 					}
4344 				} else {
4345 					DHD_EVENT(("MACEVENT: %s,"
4346 						" no networks found\n",
4347 						event_name));
4348 				}
4349 			} else {
4350 				DHD_EVENT(("MACEVENT: %s, no networks found\n",
4351 					event_name));
4352 			}
4353 		} else {
4354 			DHD_EVENT(("MACEVENT: %s, unexpected status %d\n",
4355 				event_name, (int)status));
4356 		}
4357 	}
4358 }
4359 
4360 static void
wl_show_roam_cache_update_event(const char * name,uint status,uint reason,uint datalen,void * event_data)4361 wl_show_roam_cache_update_event(const char *name, uint status,
4362 	uint reason, uint datalen, void *event_data)
4363 {
4364 	wlc_roam_cache_update_event_t *cache_update;
4365 	uint16 len_of_tlvs;
4366 	void *val_tlv_ptr;
4367 	bcm_xtlv_t *val_xtlv;
4368 	char ntoa_buf[ETHER_ADDR_STR_LEN];
4369 	uint idx;
4370 	const char* reason_name = NULL;
4371 	const char* status_name = NULL;
4372 	static struct {
4373 		uint event;
4374 		const char *event_name;
4375 	} reason_names[] = {
4376 		{WLC_E_REASON_INITIAL_ASSOC, "INITIAL ASSOCIATION"},
4377 		{WLC_E_REASON_LOW_RSSI, "LOW_RSSI"},
4378 		{WLC_E_REASON_DEAUTH, "RECEIVED DEAUTHENTICATION"},
4379 		{WLC_E_REASON_DISASSOC, "RECEIVED DISASSOCATION"},
4380 		{WLC_E_REASON_BCNS_LOST, "BEACONS LOST"},
4381 		{WLC_E_REASON_BETTER_AP, "BETTER AP FOUND"},
4382 		{WLC_E_REASON_MINTXRATE, "STUCK AT MIN TX RATE"},
4383 		{WLC_E_REASON_BSSTRANS_REQ, "REQUESTED ROAM"},
4384 		{WLC_E_REASON_TXFAIL, "TOO MANY TXFAILURES"}
4385 	};
4386 
4387 	static struct {
4388 		uint event;
4389 		const char *event_name;
4390 	} status_names[] = {
4391 		{WLC_E_STATUS_SUCCESS, "operation was successful"},
4392 		{WLC_E_STATUS_FAIL, "operation failed"},
4393 		{WLC_E_STATUS_TIMEOUT, "operation timed out"},
4394 		{WLC_E_STATUS_NO_NETWORKS, "failed due to no matching network found"},
4395 		{WLC_E_STATUS_ABORT, "operation was aborted"},
4396 		{WLC_E_STATUS_NO_ACK, "protocol failure: packet not ack'd"},
4397 		{WLC_E_STATUS_UNSOLICITED, "AUTH or ASSOC packet was unsolicited"},
4398 		{WLC_E_STATUS_ATTEMPT, "attempt to assoc to an auto auth configuration"},
4399 		{WLC_E_STATUS_PARTIAL, "scan results are incomplete"},
4400 		{WLC_E_STATUS_NEWSCAN, "scan aborted by another scan"},
4401 		{WLC_E_STATUS_NEWASSOC, "scan aborted due to assoc in progress"},
4402 		{WLC_E_STATUS_11HQUIET, "802.11h quiet period started"},
4403 		{WLC_E_STATUS_SUPPRESS, "user disabled scanning"},
4404 		{WLC_E_STATUS_NOCHANS, "no allowable channels to scan"},
4405 		{WLC_E_STATUS_CS_ABORT, "abort channel select"},
4406 		{WLC_E_STATUS_ERROR, "request failed due to error"},
4407 		{WLC_E_STATUS_INVALID, "Invalid status code"}
4408 	};
4409 
4410 	switch (reason) {
4411 	case WLC_ROAM_CACHE_UPDATE_NEW_ROAM_CACHE:
4412 		DHD_EVENT(("Current roam cache status %d, "
4413 			"reason for cache update is new roam cache\n", status));
4414 		break;
4415 	case WLC_ROAM_CACHE_UPDATE_JOIN:
4416 		DHD_EVENT(("Current roam cache status %d, "
4417 			"reason for cache update is start of join\n", status));
4418 		break;
4419 	case WLC_ROAM_CACHE_UPDATE_RSSI_DELTA:
4420 		DHD_EVENT(("Current roam cache status %d, "
4421 			"reason for cache update is delta in rssi\n", status));
4422 		break;
4423 	case WLC_ROAM_CACHE_UPDATE_MOTION_RSSI_DELTA:
4424 		DHD_EVENT(("Current roam cache status %d, "
4425 			"reason for cache update is motion delta in rssi\n", status));
4426 		break;
4427 	case WLC_ROAM_CACHE_UPDATE_CHANNEL_MISS:
4428 		DHD_EVENT(("Current roam cache status %d, "
4429 			"reason for cache update is missed channel\n", status));
4430 		break;
4431 	case WLC_ROAM_CACHE_UPDATE_START_SPLIT_SCAN:
4432 		DHD_EVENT(("Current roam cache status %d, "
4433 			"reason for cache update is start of split scan\n", status));
4434 		break;
4435 	case WLC_ROAM_CACHE_UPDATE_START_FULL_SCAN:
4436 		DHD_EVENT(("Current roam cache status %d, "
4437 			"reason for cache update is start of full scan\n", status));
4438 		break;
4439 	case WLC_ROAM_CACHE_UPDATE_INIT_ASSOC:
4440 		DHD_EVENT(("Current roam cache status %d, "
4441 			"reason for cache update is init association\n", status));
4442 		break;
4443 	case WLC_ROAM_CACHE_UPDATE_FULL_SCAN_FAILED:
4444 		DHD_EVENT(("Current roam cache status %d, "
4445 			"reason for cache update is failure in full scan\n", status));
4446 		break;
4447 	case WLC_ROAM_CACHE_UPDATE_NO_AP_FOUND:
4448 		DHD_EVENT(("Current roam cache status %d, "
4449 			"reason for cache update is empty scan result\n", status));
4450 		break;
4451 	case WLC_ROAM_CACHE_UPDATE_MISSING_AP:
4452 		DHD_EVENT(("Current roam cache status %d, "
4453 			"reason for cache update is missed ap\n", status));
4454 		break;
4455 	default:
4456 		DHD_EVENT(("Current roam cache status %d, "
4457 			"reason for cache update is unknown %d\n", status, reason));
4458 		break;
4459 	}
4460 
4461 	if (datalen < sizeof(wlc_roam_cache_update_event_t)) {
4462 		DHD_ERROR(("MACEVENT: %s, missing event data\n", name));
4463 		return;
4464 	}
4465 
4466 	cache_update = (wlc_roam_cache_update_event_t *)event_data;
4467 	val_tlv_ptr = (void *)cache_update->xtlvs;
4468 	len_of_tlvs = datalen - sizeof(wlc_roam_cache_update_event_t);
4469 	val_xtlv = (bcm_xtlv_t *)val_tlv_ptr;
4470 	if (val_xtlv->id != WL_RMC_RPT_CMD_DATA) {
4471 		DHD_ERROR(("MACEVENT: %s, unexpected xtlv id %d\n",
4472 			name, val_xtlv->id));
4473 		return;
4474 	}
4475 	val_tlv_ptr = (uint8 *)val_tlv_ptr + BCM_XTLV_HDR_SIZE;
4476 	len_of_tlvs = val_xtlv->len;
4477 
4478 	while (len_of_tlvs && len_of_tlvs > BCM_XTLV_HDR_SIZE) {
4479 		val_xtlv = (bcm_xtlv_t *)val_tlv_ptr;
4480 		switch (val_xtlv->id) {
4481 			case WL_RMC_RPT_XTLV_BSS_INFO:
4482 			{
4483 				rmc_bss_info_v1_t *bss_info = (rmc_bss_info_v1_t *)(val_xtlv->data);
4484 				DHD_EVENT(("\t Current BSS INFO:\n"));
4485 				DHD_EVENT(("\t\tRSSI: %d\n", bss_info->rssi));
4486 				DHD_EVENT(("\t\tNumber of full scans performed "
4487 					"on current BSS: %d\n", bss_info->fullscan_count));
4488 				for (idx = 0; idx < ARRAYSIZE(reason_names); idx++) {
4489 					if (reason_names[idx].event == bss_info->reason) {
4490 						reason_name = reason_names[idx].event_name;
4491 					}
4492 				}
4493 				DHD_EVENT(("\t\tReason code for last full scan: %s(%d)\n",
4494 					reason_name, bss_info->reason));
4495 				DHD_EVENT(("\t\tDelta between current time and "
4496 					"last full scan: %d\n", bss_info->time_full_scan));
4497 				for (idx = 0; idx < ARRAYSIZE(status_names); idx++) {
4498 					if (status_names[idx].event == bss_info->status)
4499 						status_name = status_names[idx].event_name;
4500 				}
4501 				DHD_EVENT(("\t\tLast status code for not roaming: %s(%d)\n",
4502 					status_name, bss_info->status));
4503 
4504 			}
4505 				break;
4506 			case WL_RMC_RPT_XTLV_CANDIDATE_INFO:
4507 			case WL_RMC_RPT_XTLV_USER_CACHE_INFO:
4508 			{
4509 				rmc_candidate_info_v1_t *candidate_info =
4510 					(rmc_candidate_info_v1_t *)(val_xtlv->data);
4511 				if (val_xtlv->id == WL_RMC_RPT_XTLV_CANDIDATE_INFO) {
4512 					DHD_EVENT(("\t Candidate INFO:\n"));
4513 				} else {
4514 					DHD_EVENT(("\t User Candidate INFO:\n"));
4515 				}
4516 				DHD_EVENT(("\t\tBSSID: %s\n",
4517 					bcm_ether_ntoa((const struct ether_addr *)
4518 					&candidate_info->bssid, ntoa_buf)));
4519 				DHD_EVENT(("\t\tRSSI: %d\n", candidate_info->rssi));
4520 				DHD_EVENT(("\t\tChannel: %d\n", candidate_info->ctl_channel));
4521 				DHD_EVENT(("\t\tDelta between current time and last "
4522 					"seen time: %d\n", candidate_info->time_last_seen));
4523 				DHD_EVENT(("\t\tBSS load: %d\n", candidate_info->bss_load));
4524 			}
4525 				break;
4526 			default:
4527 				DHD_ERROR(("MACEVENT: %s, unexpected xtlv id %d\n",
4528 					name, val_xtlv->id));
4529 				return;
4530 		}
4531 		val_tlv_ptr = (uint8 *)val_tlv_ptr + bcm_xtlv_size(val_xtlv,
4532 			BCM_XTLV_OPTION_NONE);
4533 		len_of_tlvs -= (uint16)bcm_xtlv_size(val_xtlv, BCM_XTLV_OPTION_NONE);
4534 	}
4535 }
4536 
4537 static void
wl_show_host_event(dhd_pub_t * dhd_pub,wl_event_msg_t * event,void * event_data,void * raw_event_ptr,char * eventmask)4538 wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data,
4539 	void *raw_event_ptr, char *eventmask)
4540 {
4541 	uint i, status, reason;
4542 	bool group = FALSE, flush_txq = FALSE, link = FALSE;
4543 	bool host_data = FALSE; /* prints  event data after the case  when set */
4544 	const char *auth_str;
4545 	const char *event_name;
4546 	const uchar *buf;
4547 	char err_msg[256], eabuf[ETHER_ADDR_STR_LEN];
4548 	uint event_type, flags, auth_type, datalen;
4549 
4550 	event_type = ntoh32(event->event_type);
4551 	flags = ntoh16(event->flags);
4552 	status = ntoh32(event->status);
4553 	reason = ntoh32(event->reason);
4554 	BCM_REFERENCE(reason);
4555 	auth_type = ntoh32(event->auth_type);
4556 	datalen = (event_data != NULL) ? ntoh32(event->datalen) : 0;
4557 
4558 	/* debug dump of event messages */
4559 	snprintf(eabuf, sizeof(eabuf), MACDBG, MAC2STRDBG(event->addr.octet));
4560 
4561 	event_name = bcmevent_get_name(event_type);
4562 	BCM_REFERENCE(event_name);
4563 
4564 	if (flags & WLC_EVENT_MSG_LINK)
4565 		link = TRUE;
4566 	if (flags & WLC_EVENT_MSG_GROUP)
4567 		group = TRUE;
4568 	if (flags & WLC_EVENT_MSG_FLUSHTXQ)
4569 		flush_txq = TRUE;
4570 
4571 	switch (event_type) {
4572 	case WLC_E_START:
4573 		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
4574 		break;
4575 	case WLC_E_DEAUTH:
4576 	case WLC_E_DISASSOC:
4577 		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
4578 #ifdef REPORT_FATAL_TIMEOUTS
4579 		dhd_clear_join_error(dhd_pub, WLC_SSID_MASK | WLC_WPA_MASK);
4580 #endif /* REPORT_FATAL_TIMEOUTS */
4581 		break;
4582 
4583 	case WLC_E_ASSOC_IND:
4584 	case WLC_E_REASSOC_IND:
4585 
4586 		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
4587 #ifdef REPORT_FATAL_TIMEOUTS
4588 		if (status != WLC_E_STATUS_SUCCESS) {
4589 			dhd_clear_join_error(dhd_pub, WLC_SSID_MASK | WLC_WPA_MASK);
4590 		}
4591 #endif /* REPORT_FATAL_TIMEOUTS */
4592 
4593 		break;
4594 
4595 	case WLC_E_ASSOC:
4596 	case WLC_E_REASSOC:
4597 		if (status == WLC_E_STATUS_SUCCESS) {
4598 			DHD_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n", event_name, eabuf));
4599 		} else if (status == WLC_E_STATUS_TIMEOUT) {
4600 			DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf));
4601 		} else if (status == WLC_E_STATUS_FAIL) {
4602 			DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, status %d reason %d\n",
4603 			       event_name, eabuf, (int)status, (int)reason));
4604 		} else if (status == WLC_E_STATUS_SUPPRESS) {
4605 			DHD_EVENT(("MACEVENT: %s, MAC %s, SUPPRESS\n", event_name, eabuf));
4606 		} else if (status == WLC_E_STATUS_NO_ACK) {
4607 			DHD_EVENT(("MACEVENT: %s, MAC %s, NOACK\n", event_name, eabuf));
4608 		} else {
4609 			DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n",
4610 			       event_name, eabuf, (int)status));
4611 		}
4612 #ifdef REPORT_FATAL_TIMEOUTS
4613 		if (status != WLC_E_STATUS_SUCCESS) {
4614 			dhd_clear_join_error(dhd_pub, WLC_SSID_MASK | WLC_WPA_MASK);
4615 		}
4616 #endif /* REPORT_FATAL_TIMEOUTS */
4617 
4618 		break;
4619 
4620 	case WLC_E_DEAUTH_IND:
4621 	case WLC_E_DISASSOC_IND:
4622 #ifdef REPORT_FATAL_TIMEOUTS
4623 		dhd_clear_join_error(dhd_pub, WLC_SSID_MASK | WLC_WPA_MASK);
4624 #endif /* REPORT_FATAL_TIMEOUTS */
4625 		DHD_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name, eabuf, (int)reason));
4626 		break;
4627 
4628 	case WLC_E_AUTH:
4629 	case WLC_E_AUTH_IND:
4630 		if (auth_type == DOT11_OPEN_SYSTEM)
4631 			auth_str = "Open System";
4632 		else if (auth_type == DOT11_SHARED_KEY)
4633 			auth_str = "Shared Key";
4634 		else if (auth_type == DOT11_SAE)
4635 			auth_str = "SAE";
4636 		else {
4637 			snprintf(err_msg, sizeof(err_msg), "AUTH unknown: %d", (int)auth_type);
4638 			auth_str = err_msg;
4639 		}
4640 
4641 		if (event_type == WLC_E_AUTH_IND) {
4642 			DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name, eabuf, auth_str));
4643 		} else if (status == WLC_E_STATUS_SUCCESS) {
4644 			DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n",
4645 				event_name, eabuf, auth_str));
4646 		} else if (status == WLC_E_STATUS_TIMEOUT) {
4647 			DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
4648 				event_name, eabuf, auth_str));
4649 		} else if (status == WLC_E_STATUS_FAIL) {
4650 			DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, status %d reason %d\n",
4651 			       event_name, eabuf, auth_str, (int)status, (int)reason));
4652 		} else if (status == WLC_E_STATUS_SUPPRESS) {
4653 			DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUPPRESS\n",
4654 			       event_name, eabuf, auth_str));
4655 		} else if (status == WLC_E_STATUS_NO_ACK) {
4656 			DHD_EVENT(("MACEVENT: %s, MAC %s, %s, NOACK\n",
4657 			       event_name, eabuf, auth_str));
4658 		} else {
4659 			DHD_EVENT(("MACEVENT: %s, MAC %s, %s, status %d reason %d\n",
4660 				event_name, eabuf, auth_str, (int)status, (int)reason));
4661 		}
4662 		BCM_REFERENCE(auth_str);
4663 #ifdef REPORT_FATAL_TIMEOUTS
4664 		if (status != WLC_E_STATUS_SUCCESS) {
4665 			dhd_clear_join_error(dhd_pub, WLC_SSID_MASK | WLC_WPA_MASK);
4666 		}
4667 #endif /* REPORT_FATAL_TIMEOUTS */
4668 
4669 		break;
4670 
4671 	case WLC_E_ROAM:
4672 		wl_show_roam_event(dhd_pub, status, datalen,
4673 			event_name, eabuf, event_data);
4674 		break;
4675 	case WLC_E_ROAM_START:
4676 		if (datalen >= sizeof(wlc_roam_start_event_t)) {
4677 			const wlc_roam_start_event_t *roam_start =
4678 				(wlc_roam_start_event_t *)event_data;
4679 			DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d,"
4680 				" reason %d, auth %d, current bss rssi %d\n",
4681 				event_name, event_type, eabuf, (int)status, (int)reason,
4682 				(int)auth_type, (int)roam_start->rssi));
4683 		} else {
4684 			DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
4685 				event_name, event_type, eabuf, (int)status, (int)reason,
4686 				(int)auth_type));
4687 		}
4688 		break;
4689 	case WLC_E_ROAM_PREP:
4690 		if (datalen >= sizeof(wlc_roam_prep_event_t)) {
4691 			const wlc_roam_prep_event_t *roam_prep =
4692 				(wlc_roam_prep_event_t *)event_data;
4693 			DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d,"
4694 				" reason %d, auth %d, target bss rssi %d\n",
4695 				event_name, event_type, eabuf, (int)status, (int)reason,
4696 				(int)auth_type, (int)roam_prep->rssi));
4697 		} else {
4698 			DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
4699 				event_name, event_type, eabuf, (int)status, (int)reason,
4700 				(int)auth_type));
4701 		}
4702 		break;
4703 	case WLC_E_ROAM_CACHE_UPDATE:
4704 		DHD_EVENT(("MACEVENT: %s\n", event_name));
4705 		wl_show_roam_cache_update_event(event_name, status,
4706 			reason, datalen, event_data);
4707 		break;
4708 	case WLC_E_JOIN:
4709 	case WLC_E_SET_SSID:
4710 #ifdef REPORT_FATAL_TIMEOUTS
4711 		OSL_ATOMIC_SET(dhd_pub->osh, &dhd_pub->set_ssid_rcvd, TRUE);
4712 		dhd_clear_join_error(dhd_pub, WLC_SSID_MASK);
4713 #endif /* REPORT_FATAL_TIMEOUTS */
4714 		if (status == WLC_E_STATUS_SUCCESS) {
4715 			DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
4716 		} else {
4717 #ifdef REPORT_FATAL_TIMEOUTS
4718 			/*
4719 			 * For secure join if WLC_E_SET_SSID returns with any failure case,
4720 			 * donot expect WLC_E_PSK_SUP. So clear the mask.
4721 			 */
4722 			dhd_clear_join_error(dhd_pub, WLC_WPA_MASK);
4723 #endif /* REPORT_FATAL_TIMEOUTS */
4724 			if (status == WLC_E_STATUS_FAIL) {
4725 				DHD_EVENT(("MACEVENT: %s, failed status %d\n", event_name, status));
4726 			} else if (status == WLC_E_STATUS_NO_NETWORKS) {
4727 				DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name));
4728 			} else {
4729 				DHD_EVENT(("MACEVENT: %s, unexpected status %d\n",
4730 					event_name, (int)status));
4731 			}
4732 		}
4733 		break;
4734 
4735 	case WLC_E_BEACON_RX:
4736 		if (status == WLC_E_STATUS_SUCCESS) {
4737 			DHD_EVENT(("MACEVENT: %s, SUCCESS\n", event_name));
4738 		} else if (status == WLC_E_STATUS_FAIL) {
4739 			DHD_EVENT(("MACEVENT: %s, FAIL\n", event_name));
4740 		} else {
4741 			DHD_EVENT(("MACEVENT: %s, status %d\n", event_name, status));
4742 		}
4743 		break;
4744 
4745 	case WLC_E_LINK:
4746 		DHD_EVENT(("MACEVENT: %s %s flags:0x%x status:%d reason:%d\n",
4747 			event_name, link?"UP":"DOWN", flags, status, reason));
4748 #ifdef PCIE_FULL_DONGLE
4749 #ifdef REPORT_FATAL_TIMEOUTS
4750 		{
4751 			uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname);
4752 			uint8 role = dhd_flow_rings_ifindex2role(dhd_pub, ifindex);
4753 			if ((role == WLC_E_IF_ROLE_STA) && (!link)) {
4754 				dhd_clear_join_error(dhd_pub, WLC_SSID_MASK | WLC_WPA_MASK);
4755 			}
4756 		}
4757 #endif /* PCIE_FULL_DONGLE */
4758 #endif /* REPORT_FATAL_TIMEOUTS */
4759 		BCM_REFERENCE(link);
4760 		break;
4761 
4762 	case WLC_E_MIC_ERROR:
4763 		DHD_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n",
4764 		       event_name, eabuf, group, flush_txq));
4765 		BCM_REFERENCE(group);
4766 		BCM_REFERENCE(flush_txq);
4767 		break;
4768 
4769 	case WLC_E_ICV_ERROR:
4770 	case WLC_E_UNICAST_DECODE_ERROR:
4771 	case WLC_E_MULTICAST_DECODE_ERROR:
4772 		DHD_EVENT(("MACEVENT: %s, MAC %s\n",
4773 		       event_name, eabuf));
4774 		break;
4775 
4776 	case WLC_E_TXFAIL:
4777 		DHD_EVENT(("MACEVENT: %s, RA %s status %d\n", event_name, eabuf, status));
4778 		break;
4779 
4780 	case WLC_E_ASSOC_REQ_IE:
4781 	case WLC_E_ASSOC_RESP_IE:
4782 	case WLC_E_PMKID_CACHE:
4783 		DHD_EVENT(("MACEVENT: %s\n", event_name));
4784 		break;
4785 
4786 	case WLC_E_SCAN_COMPLETE:
4787 		DHD_EVENT(("MACEVENT: %s\n", event_name));
4788 #ifdef REPORT_FATAL_TIMEOUTS
4789 		dhd_stop_scan_timer(dhd_pub, FALSE, 0);
4790 #endif /* REPORT_FATAL_TIMEOUTS */
4791 		break;
4792 	case WLC_E_RSSI_LQM:
4793 	case WLC_E_PFN_NET_FOUND:
4794 	case WLC_E_PFN_NET_LOST:
4795 	case WLC_E_PFN_SCAN_COMPLETE:
4796 	case WLC_E_PFN_SCAN_NONE:
4797 	case WLC_E_PFN_SCAN_ALLGONE:
4798 	case WLC_E_PFN_GSCAN_FULL_RESULT:
4799 	case WLC_E_PFN_SSID_EXT:
4800 		DHD_EVENT(("PNOEVENT: %s\n", event_name));
4801 		break;
4802 
4803 	case WLC_E_PFN_SCAN_BACKOFF:
4804 	case WLC_E_PFN_BSSID_SCAN_BACKOFF:
4805 		DHD_EVENT(("PNOEVENT: %s, status %d, reason %d\n",
4806 		           event_name, (int)status, (int)reason));
4807 		break;
4808 
4809 	case WLC_E_PSK_SUP:
4810 	case WLC_E_PRUNE:
4811 		DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n",
4812 		           event_name, (int)status, (int)reason));
4813 #ifdef REPORT_FATAL_TIMEOUTS
4814 		dhd_clear_join_error(dhd_pub, WLC_WPA_MASK);
4815 #endif /* REPORT_FATAL_TIMEOUTS */
4816 		break;
4817 
4818 #ifdef WIFI_ACT_FRAME
4819 	case WLC_E_ACTION_FRAME:
4820 		DHD_TRACE(("MACEVENT: %s Bssid %s\n", event_name, eabuf));
4821 		break;
4822 	case WLC_E_ACTION_FRAME_COMPLETE:
4823 		if (datalen >= sizeof(uint32)) {
4824 			const uint32 *pktid = event_data;
4825 			BCM_REFERENCE(pktid);
4826 			DHD_EVENT(("MACEVENT: %s status %d, reason %d, pktid 0x%x\n",
4827 				event_name, (int)status, (int)reason, *pktid));
4828 		}
4829 		break;
4830 #endif /* WIFI_ACT_FRAME */
4831 
4832 #ifdef SHOW_LOGTRACE
4833 	case WLC_E_TRACE:
4834 	{
4835 		dhd_dbg_trace_evnt_handler(dhd_pub, event_data, raw_event_ptr, datalen);
4836 		break;
4837 	}
4838 #endif /* SHOW_LOGTRACE */
4839 
4840 	case WLC_E_RSSI:
4841 		if (datalen >= sizeof(int)) {
4842 			DHD_EVENT(("MACEVENT: %s %d\n", event_name, ntoh32(*((int *)event_data))));
4843 		}
4844 		break;
4845 
4846 	case WLC_E_SERVICE_FOUND:
4847 	case WLC_E_P2PO_ADD_DEVICE:
4848 	case WLC_E_P2PO_DEL_DEVICE:
4849 		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
4850 		break;
4851 
4852 #ifdef BT_WIFI_HANDOBER
4853 	case WLC_E_BT_WIFI_HANDOVER_REQ:
4854 		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
4855 		break;
4856 #endif
4857 #ifdef DHD_AWDL
4858 	case WLC_E_AWDL_AW:
4859 		if (datalen >= sizeof(awdl_aws_event_data_t)) {
4860 			const awdl_aws_event_data_t *aw =
4861 				(awdl_aws_event_data_t *)event_data;
4862 			BCM_REFERENCE(aw);
4863 			DHD_EVENT(("MACEVENT: %s, MAC %s aw_cnt %u ext_cnt %u flags %u "
4864 					"aw_ch %u\n", event_name, eabuf, aw->aw_counter,
4865 					aw->aw_ext_count, aw->flags, CHSPEC_CHANNEL(aw->aw_chan)));
4866 			host_data = TRUE;
4867 
4868 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
4869 			dhd_update_awdl_stats(dhd_pub, aw);
4870 			/* Store last received aw counter */
4871 			dhd_pub->awdl_aw_counter = aw->aw_counter;
4872 #endif /* DHD_AWDL */
4873 		}
4874 		break;
4875 	case WLC_E_AWDL_ROLE:
4876 		DHD_EVENT(("MACEVENT: %s, MAC %s ROLE %d\n", event_name, eabuf, (int)status));
4877 		break;
4878 	case WLC_E_AWDL_EVENT:
4879 		DHD_EVENT(("MACEVENT: %s, MAC %s status %d reason %d\n",
4880 			event_name, eabuf, (int)status, (int)reason));
4881 		if (datalen >= OFFSETOF(awdl_scan_event_data_t, chan_list)) {
4882 			const awdl_scan_event_data_t *scan_evt =
4883 				(awdl_scan_event_data_t *)event_data;
4884 			BCM_REFERENCE(scan_evt);
4885 			DHD_EVENT(("scan_usage %d, nscan_chans %d, ncached_chans %d, "
4886 				"iscan_flags 0x%x\n", scan_evt->scan_usage,
4887 				scan_evt->nscan_chans, scan_evt->ncached_chans,
4888 				scan_evt->flags));
4889 			host_data = TRUE;
4890 		}
4891 		break;
4892 #endif /* DHD_AWDL  */
4893 
4894 	case WLC_E_CCA_CHAN_QUAL:
4895 		/* I would like to check here that datalen >= sizeof(cca_chan_qual_event_t)
4896 		 * but since definition of cca_chan_qual_event_t is different
4897 		 * between blazar and legacy firmware, I will
4898 		 * check only that datalen is bigger than 0.
4899 		 */
4900 		if (datalen > 0) {
4901 			const cca_chan_qual_event_t *cca_event =
4902 				(cca_chan_qual_event_t *)event_data;
4903 			if ((cca_event->id == WL_CHAN_QUAL_FULLPM_CCA) ||
4904 			    (cca_event->id == WL_CHAN_QUAL_FULLPM_CCA_OFDM_DESENSE)) {
4905 				const cca_only_chan_qual_event_t *cca_only_event =
4906 					(const cca_only_chan_qual_event_t *)cca_event;
4907 				BCM_REFERENCE(cca_only_event);
4908 				DHD_EVENT((
4909 					"MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
4910 					" channel 0x%02x\n",
4911 					event_name, event_type, eabuf, (int)status,
4912 					(int)reason, (int)auth_type, cca_event->chanspec));
4913 				DHD_EVENT((
4914 					"\tTOTAL (dur %dms me %dms notme %dms interf %dms"
4915 					" ts 0x%08x)\n",
4916 					cca_only_event->cca_busy_ext.duration,
4917 					cca_only_event->cca_busy_ext.congest_ibss,
4918 					cca_only_event->cca_busy_ext.congest_obss,
4919 					cca_only_event->cca_busy_ext.interference,
4920 					cca_only_event->cca_busy_ext.timestamp));
4921 				DHD_EVENT((
4922 					"\t  !PM (dur %dms me %dms notme %dms interf %dms)\n",
4923 					cca_only_event->cca_busy_nopm.duration,
4924 					cca_only_event->cca_busy_nopm.congest_ibss,
4925 					cca_only_event->cca_busy_nopm.congest_obss,
4926 					cca_only_event->cca_busy_nopm.interference));
4927 				DHD_EVENT((
4928 					"\t   PM (dur %dms me %dms notme %dms interf %dms)\n",
4929 					cca_only_event->cca_busy_pm.duration,
4930 					cca_only_event->cca_busy_pm.congest_ibss,
4931 					cca_only_event->cca_busy_pm.congest_obss,
4932 					cca_only_event->cca_busy_pm.interference));
4933 				if (cca_event->id == WL_CHAN_QUAL_FULLPM_CCA_OFDM_DESENSE) {
4934 					DHD_EVENT(("\t OFDM desense %d\n",
4935 						((const cca_only_chan_qual_event_v2_t *)
4936 						cca_only_event)->ofdm_desense));
4937 				}
4938 			} else if (cca_event->id == WL_CHAN_QUAL_FULL_CCA) {
4939 				DHD_EVENT((
4940 					"MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
4941 					" channel 0x%02x (dur %dms ibss %dms obss %dms interf %dms"
4942 					" ts 0x%08x)\n",
4943 					event_name, event_type, eabuf, (int)status,
4944 					(int)reason, (int)auth_type, cca_event->chanspec,
4945 					cca_event->cca_busy_ext.duration,
4946 					cca_event->cca_busy_ext.congest_ibss,
4947 					cca_event->cca_busy_ext.congest_obss,
4948 					cca_event->cca_busy_ext.interference,
4949 					cca_event->cca_busy_ext.timestamp));
4950 			} else if (cca_event->id == WL_CHAN_QUAL_CCA) {
4951 				DHD_EVENT((
4952 					"MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
4953 					" channel 0x%02x (dur %dms busy %dms ts 0x%08x)\n",
4954 					event_name, event_type, eabuf, (int)status,
4955 					(int)reason, (int)auth_type, cca_event->chanspec,
4956 					cca_event->cca_busy.duration,
4957 					cca_event->cca_busy.congest,
4958 					cca_event->cca_busy.timestamp));
4959 			} else if ((cca_event->id == WL_CHAN_QUAL_NF) ||
4960 			           (cca_event->id == WL_CHAN_QUAL_NF_LTE)) {
4961 				DHD_EVENT((
4962 					"MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
4963 					" channel 0x%02x (NF[%d] %ddB)\n",
4964 					event_name, event_type, eabuf, (int)status,
4965 					(int)reason, (int)auth_type, cca_event->chanspec,
4966 					cca_event->id, cca_event->noise));
4967 			} else {
4968 				DHD_EVENT((
4969 					"MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
4970 					" channel 0x%02x (unknown ID %d)\n",
4971 					event_name, event_type, eabuf, (int)status,
4972 					(int)reason, (int)auth_type, cca_event->chanspec,
4973 					cca_event->id));
4974 			}
4975 		}
4976 		break;
4977 	case WLC_E_ESCAN_RESULT:
4978 		if (datalen >= sizeof(wl_escan_result_v2_t)) {
4979 			const wl_escan_result_v2_t *escan_result =
4980 				(wl_escan_result_v2_t *)event_data;
4981 			BCM_REFERENCE(escan_result);
4982 #ifdef OEM_ANDROID
4983 			/* Because WLC_E_ESCAN_RESULT event log are being print too many.
4984 			* So, DHD_EVENT() changes to be used DHD_TRACE() in HW4 platform.
4985 			*/
4986 			DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d \n",
4987 				event_name, event_type, eabuf, (int)status));
4988 #else
4989 			DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d sync-id %u\n",
4990 				event_name, event_type, eabuf,
4991 				(int)status, dtoh16(escan_result->sync_id)));
4992 #endif /* CUSTOMER_HW4 */
4993 #ifdef REPORT_FATAL_TIMEOUTS
4994 			/* a 'partial' status means the escan is still in progress
4995 			* any other status implies the escan has either finished or aborted
4996 			*/
4997 			if (status != WLC_E_STATUS_PARTIAL) {
4998 				unsigned long timeout_flags = 0;
4999 				uint16 syncid = dtoh16(escan_result->sync_id);
5000 				/* this is to take care of the specific case where
5001 				* escan event returns abort and is processed immediately
5002 				* by dhd before the escan iovar has returned. In that case
5003 				* if the iovar returns success, then we will be starting a
5004 				* timeout even though the escan has already been aborted !
5005 				* So the flag below is checked before starting the escan timeout
5006 				*/
5007 				if (dhd_pub->timeout_info) {
5008 					DHD_TIMER_LOCK(dhd_pub->timeout_info->scan_timer_lock,
5009 						timeout_flags);
5010 					if (!dhd_pub->timeout_info->scan_timer_active &&
5011 						syncid == dhd_pub->esync_id) {
5012 						dhd_pub->timeout_info->escan_aborted = TRUE;
5013 						dhd_pub->timeout_info->abort_syncid = syncid;
5014 						DHD_TIMER_UNLOCK(
5015 							dhd_pub->timeout_info->scan_timer_lock,
5016 							timeout_flags);
5017 						break;
5018 					} else {
5019 						dhd_pub->timeout_info->escan_aborted = FALSE;
5020 					}
5021 					DHD_TIMER_UNLOCK(dhd_pub->timeout_info->scan_timer_lock,
5022 						timeout_flags);
5023 				}
5024 				dhd_stop_scan_timer(dhd_pub, TRUE, dtoh16(escan_result->sync_id));
5025 			}
5026 #endif /* REPORT_FATAL_TIMEOUTS */
5027 		}
5028 		break;
5029 	case WLC_E_IF:
5030 		if (datalen >= sizeof(struct wl_event_data_if)) {
5031 			const struct wl_event_data_if *ifevent =
5032 				(struct wl_event_data_if *)event_data;
5033 			BCM_REFERENCE(ifevent);
5034 
5035 			DHD_EVENT(("MACEVENT: %s, opcode:0x%d  ifidx:%d role:%d\n",
5036 				event_name, ifevent->opcode, ifevent->ifidx, ifevent->role));
5037 		}
5038 		break;
5039 #ifdef SHOW_LOGTRACE
5040 	case WLC_E_MSCH:
5041 	{
5042 		wl_mschdbg_event_handler(dhd_pub, raw_event_ptr, reason, event_data, datalen);
5043 		break;
5044 	}
5045 #endif /* SHOW_LOGTRACE */
5046 
5047 	case WLC_E_PSK_AUTH:
5048 		DHD_EVENT(("MACEVENT: %s, RA %s status %d Reason:%d\n",
5049 			event_name, eabuf, status, reason));
5050 		break;
5051 	case WLC_E_AGGR_EVENT:
5052 		if (datalen >= sizeof(event_aggr_data_t)) {
5053 			const event_aggr_data_t *aggrbuf = event_data;
5054 			int j = 0, len = 0;
5055 			const uint8 *data = aggrbuf->data;
5056 			DHD_EVENT(("MACEVENT: %s, num of events %d total len %d sub events: ",
5057 				event_name, aggrbuf->num_events, aggrbuf->len));
5058 			for (j = 0; j < aggrbuf->num_events; j++)
5059 			{
5060 				const wl_event_msg_t * sub_event = (const wl_event_msg_t *)data;
5061 				if (len > aggrbuf->len) {
5062 					DHD_ERROR(("%s: Aggr events corrupted!",
5063 						__FUNCTION__));
5064 					break;
5065 				}
5066 				DHD_EVENT(("\n Event type: %d ", ntoh32(sub_event->event_type)));
5067 				len += ALIGN_SIZE((ntoh32(sub_event->datalen) +
5068 					sizeof(wl_event_msg_t)), sizeof(uint64));
5069 				buf = (const uchar *)(data + sizeof(wl_event_msg_t));
5070 				BCM_REFERENCE(buf);
5071 				DHD_EVENT((" data (%d) : ", ntoh32(sub_event->datalen)));
5072 				for (i = 0; i < ntoh32(sub_event->datalen); i++) {
5073 					DHD_EVENT((" 0x%02x ", buf[i]));
5074 				}
5075 				data = aggrbuf->data + len;
5076 			}
5077 			DHD_EVENT(("\n"));
5078 		}
5079 		break;
5080 	case WLC_E_PHY_CAL:
5081 		{
5082 			DHD_EVENT(("MACEVENT: %s, reason:%d\n", event_name, reason));
5083 			break;
5084 		}
5085 	case WLC_E_NAN_CRITICAL:
5086 		{
5087 			DHD_EVENT(("MACEVENT: %s, type:%d\n", event_name, reason));
5088 			break;
5089 		}
5090 	case WLC_E_NAN_NON_CRITICAL:
5091 		{
5092 			DHD_TRACE(("MACEVENT: %s, type:%d\n", event_name, reason));
5093 			break;
5094 		}
5095 	case WLC_E_PROXD:
5096 		if (datalen >= sizeof(wl_proxd_event_t)) {
5097 			const wl_proxd_event_t *proxd =
5098 				(wl_proxd_event_t*)event_data;
5099 			DHD_LOG_MEM(("MACEVENT: %s, event:%d, status:%d\n",
5100 				event_name, proxd->type, reason));
5101 		}
5102 		break;
5103 	case WLC_E_RPSNOA:
5104 		if (datalen >= sizeof(rpsnoa_stats_t)) {
5105 			const rpsnoa_stats_t *stat = event_data;
5106 			if (datalen == sizeof(*stat)) {
5107 				DHD_EVENT(("MACEVENT: %s, band %s, status %d, pps %d\n", event_name,
5108 					(stat->band == WLC_BAND_2G) ? "2G":"5G",
5109 					stat->state, stat->last_pps));
5110 			}
5111 		}
5112 		break;
5113 	case WLC_E_WA_LQM:
5114 		if (datalen >= sizeof(wl_event_wa_lqm_t)) {
5115 			const wl_event_wa_lqm_t *event_wa_lqm =
5116 				(wl_event_wa_lqm_t *)event_data;
5117 			const bcm_xtlv_t *subevent;
5118 			const wl_event_wa_lqm_basic_t *elqm_basic;
5119 
5120 			if ((event_wa_lqm->ver != WL_EVENT_WA_LQM_VER) ||
5121 			    (event_wa_lqm->len < sizeof(wl_event_wa_lqm_t) + BCM_XTLV_HDR_SIZE)) {
5122 				DHD_ERROR(("MACEVENT: %s invalid (ver=%d len=%d)\n",
5123 					event_name, event_wa_lqm->ver, event_wa_lqm->len));
5124 				break;
5125 			}
5126 
5127 			subevent = (const bcm_xtlv_t *)event_wa_lqm->subevent;
5128 			 if ((subevent->id != WL_EVENT_WA_LQM_BASIC) ||
5129 			     (subevent->len < sizeof(wl_event_wa_lqm_basic_t))) {
5130 				DHD_ERROR(("MACEVENT: %s invalid sub-type (id=%d len=%d)\n",
5131 					event_name, subevent->id, subevent->len));
5132 				break;
5133 			}
5134 
5135 			elqm_basic = (const wl_event_wa_lqm_basic_t *)subevent->data;
5136 			BCM_REFERENCE(elqm_basic);
5137 			DHD_EVENT(("MACEVENT: %s (RSSI=%d SNR=%d TxRate=%d RxRate=%d)\n",
5138 				event_name, elqm_basic->rssi, elqm_basic->snr,
5139 				elqm_basic->tx_rate, elqm_basic->rx_rate));
5140 		}
5141 		break;
5142 
5143 	case WLC_E_OBSS_DETECTION:
5144 		{
5145 			DHD_EVENT(("MACEVENT: %s, type:%d\n", event_name, reason));
5146 			break;
5147 		}
5148 
5149 	case WLC_E_AP_BCN_MUTE:
5150 		if (datalen >= sizeof(wlc_bcn_mute_miti_event_data_v1_t)) {
5151 			const wlc_bcn_mute_miti_event_data_v1_t
5152 				*bcn_mute_miti_evnt_data = event_data;
5153 			DHD_EVENT(("MACEVENT: %s, reason :%d uatbtt_count: %d\n",
5154 				event_name, reason, bcn_mute_miti_evnt_data->uatbtt_count));
5155 		}
5156 		break;
5157 
5158 	case WLC_E_TWT_SETUP:
5159 		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
5160 		break;
5161 	case WLC_E_TWT_TEARDOWN:
5162 		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
5163 		break;
5164 	case WLC_E_TWT_INFO_FRM:
5165 		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
5166 		break;
5167 	default:
5168 		DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
5169 		       event_name, event_type, eabuf, (int)status, (int)reason,
5170 		       (int)auth_type));
5171 		break;
5172 	}
5173 
5174 	/* show any appended data if message level is set to bytes or host_data is set */
5175 	if ((DHD_BYTES_ON() || (host_data == TRUE)) && DHD_EVENT_ON() && datalen) {
5176 		buf = (uchar *) event_data;
5177 		BCM_REFERENCE(buf);
5178 		DHD_EVENT((" data (%d) : ", datalen));
5179 		for (i = 0; i < datalen; i++) {
5180 			DHD_EVENT((" 0x%02x ", buf[i]));
5181 		}
5182 		DHD_EVENT(("\n"));
5183 	}
5184 } /* wl_show_host_event */
5185 #endif /* SHOW_EVENTS */
5186 
5187 #ifdef DNGL_EVENT_SUPPORT
5188 /* Check whether packet is a BRCM dngl event pkt. If it is, process event data. */
5189 int
dngl_host_event(dhd_pub_t * dhdp,void * pktdata,bcm_dngl_event_msg_t * dngl_event,size_t pktlen)5190 dngl_host_event(dhd_pub_t *dhdp, void *pktdata, bcm_dngl_event_msg_t *dngl_event, size_t pktlen)
5191 {
5192 	bcm_dngl_event_t *pvt_data = (bcm_dngl_event_t *)pktdata;
5193 
5194 	dngl_host_event_process(dhdp, pvt_data, dngl_event, pktlen);
5195 	return BCME_OK;
5196 }
5197 
5198 #ifdef PARSE_DONGLE_HOST_EVENT
5199 typedef struct hck_id_to_str_s {
5200 	uint32 id;
5201 	char *name;
5202 } hck_id_to_str_t;
5203 
5204 hck_id_to_str_t hck_sw_id_to_str[] = {
5205 	{WL_HC_DD_PCIE, "WL_HC_DD_PCIE"},
5206 	{WL_HC_DD_RX_DMA_STALL, "WL_HC_DD_RX_DMA_STALL"},
5207 	{WL_HC_DD_RX_STALL, "WL_HC_DD_RX_STALL"},
5208 	{WL_HC_DD_TX_STALL, "WL_HC_DD_TX_STALL"},
5209 	{WL_HC_DD_SCAN_STALL, "WL_HC_DD_SCAN_STALL"},
5210 	{WL_HC_DD_PHY, "WL_HC_DD_PHY"},
5211 	{WL_HC_DD_REINIT, "WL_HC_DD_REINIT"},
5212 	{WL_HC_DD_TXQ_STALL, "WL_HC_DD_TXQ_STALL"},
5213 	{0, NULL}
5214 };
5215 
5216 hck_id_to_str_t hck_pcie_module_to_str[] = {
5217 	{HEALTH_CHECK_PCIEDEV_INDUCED_IND, "PCIEDEV_INDUCED_IND"},
5218 	{HEALTH_CHECK_PCIEDEV_H2D_DMA_IND, "PCIEDEV_H2D_DMA_IND"},
5219 	{HEALTH_CHECK_PCIEDEV_D2H_DMA_IND, "PCIEDEV_D2H_DMA_IND"},
5220 	{HEALTH_CHECK_PCIEDEV_IOCTL_STALL_IND, "PCIEDEV_IOCTL_STALL_IND"},
5221 	{HEALTH_CHECK_PCIEDEV_D3ACK_STALL_IND, "PCIEDEV_D3ACK_STALL_IND"},
5222 	{HEALTH_CHECK_PCIEDEV_NODS_IND, "PCIEDEV_NODS_IND"},
5223 	{HEALTH_CHECK_PCIEDEV_LINKSPEED_FALLBACK_IND, "PCIEDEV_LINKSPEED_FALLBACK_IND"},
5224 	{HEALTH_CHECK_PCIEDEV_DSACK_STALL_IND, "PCIEDEV_DSACK_STALL_IND"},
5225 	{0, NULL}
5226 };
5227 
5228 hck_id_to_str_t hck_rx_stall_v2_to_str[] = {
5229 	{BCM_RX_HC_RESERVED, "BCM_RX_HC_RESERVED"},
5230 	{BCM_RX_HC_UNSPECIFIED, "BCM_RX_HC_UNSPECIFIED"},
5231 	{BCM_RX_HC_UNICAST_DECRYPT_FAIL, "BCM_RX_HC_UNICAST_DECRYPT_FAIL"},
5232 	{BCM_RX_HC_BCMC_DECRYPT_FAIL, "BCM_RX_HC_BCMC_DECRYPT_FAIL"},
5233 	{BCM_RX_HC_UNICAST_REPLAY, "BCM_RX_HC_UNICAST_REPLAY"},
5234 	{BCM_RX_HC_BCMC_REPLAY, "BCM_RX_HC_BCMC_REPLAY"},
5235 	{BCM_RX_HC_AMPDU_DUP, "BCM_RX_HC_AMPDU_DUP"},
5236 	{0, NULL}
5237 };
5238 
5239 static void
dhd_print_dongle_hck_id(uint32 id,hck_id_to_str_t * hck)5240 dhd_print_dongle_hck_id(uint32 id, hck_id_to_str_t *hck)
5241 {
5242 	while (hck->name != NULL) {
5243 		if (hck->id == id) {
5244 			DHD_ERROR(("DONGLE_HCK_EVENT: %s\n", hck->name));
5245 			return;
5246 		}
5247 		hck++;
5248 	}
5249 }
5250 
5251 void
dhd_parse_hck_common_sw_event(bcm_xtlv_t * wl_hc)5252 dhd_parse_hck_common_sw_event(bcm_xtlv_t *wl_hc)
5253 {
5254 
5255 	wl_rx_hc_info_v2_t *hck_rx_stall_v2;
5256 	uint16 id;
5257 
5258 	id = ltoh16(wl_hc->id);
5259 
5260 	if (id == WL_HC_DD_RX_STALL_V2) {
5261 		/*  map the hck_rx_stall_v2 structure to the value of the XTLV */
5262 		hck_rx_stall_v2 =
5263 			(wl_rx_hc_info_v2_t*)wl_hc;
5264 		DHD_ERROR(("type:%d len:%d if_idx:%d ac:%d pkts:%d"
5265 			" drop:%d alert_th:%d reason:%d peer_ea:"MACF"\n",
5266 			hck_rx_stall_v2->type,
5267 			hck_rx_stall_v2->length,
5268 			hck_rx_stall_v2->if_idx,
5269 			hck_rx_stall_v2->ac,
5270 			hck_rx_stall_v2->rx_hc_pkts,
5271 			hck_rx_stall_v2->rx_hc_dropped_all,
5272 			hck_rx_stall_v2->rx_hc_alert_th,
5273 			hck_rx_stall_v2->reason,
5274 			ETHER_TO_MACF(hck_rx_stall_v2->peer_ea)));
5275 		dhd_print_dongle_hck_id(
5276 				ltoh32(hck_rx_stall_v2->reason),
5277 				hck_rx_stall_v2_to_str);
5278 	} else {
5279 		dhd_print_dongle_hck_id(ltoh16(wl_hc->id),
5280 				hck_sw_id_to_str);
5281 	}
5282 
5283 }
5284 
5285 #endif /* PARSE_DONGLE_HOST_EVENT */
5286 
5287 void
dngl_host_event_process(dhd_pub_t * dhdp,bcm_dngl_event_t * event,bcm_dngl_event_msg_t * dngl_event,size_t pktlen)5288 dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event,
5289 	bcm_dngl_event_msg_t *dngl_event, size_t pktlen)
5290 {
5291 	uint8 *p = (uint8 *)(event + 1);
5292 	uint16 type = ntoh16_ua((void *)&dngl_event->event_type);
5293 	uint16 datalen = ntoh16_ua((void *)&dngl_event->datalen);
5294 	uint16 version = ntoh16_ua((void *)&dngl_event->version);
5295 
5296 	DHD_EVENT(("VERSION:%d, EVENT TYPE:%d, DATALEN:%d\n", version, type, datalen));
5297 	if (datalen > (pktlen - sizeof(bcm_dngl_event_t) + ETHER_TYPE_LEN)) {
5298 		return;
5299 	}
5300 	if (version != BCM_DNGL_EVENT_MSG_VERSION) {
5301 		DHD_ERROR(("%s:version mismatch:%d:%d\n", __FUNCTION__,
5302 			version, BCM_DNGL_EVENT_MSG_VERSION));
5303 		return;
5304 	}
5305 	switch (type) {
5306 	   case DNGL_E_SOCRAM_IND:
5307 		{
5308 		   bcm_dngl_socramind_t *socramind_ptr = (bcm_dngl_socramind_t *)p;
5309 		   uint16 tag = ltoh32(socramind_ptr->tag);
5310 		   uint16 taglen = ltoh32(socramind_ptr->length);
5311 		   p = (uint8 *)socramind_ptr->value;
5312 		   DHD_EVENT(("Tag:%d Len:%d Datalen:%d\n", tag, taglen, datalen));
5313 		   switch (tag) {
5314 			case SOCRAM_IND_ASSERT_TAG:
5315 			    {
5316 				/*
5317 				* The payload consists of -
5318 				* null terminated function name padded till 32 bit boundary +
5319 				* Line number - (32 bits)
5320 				* Caller address (32 bits)
5321 				*/
5322 				char *fnname = (char *)p;
5323 				if (datalen < (ROUNDUP(strlen(fnname) + 1, sizeof(uint32)) +
5324 					sizeof(uint32) * 2)) {
5325 					DHD_ERROR(("Wrong length:%d\n", datalen));
5326 					return;
5327 				}
5328 				DHD_EVENT(("ASSRT Function:%s ", p));
5329 				p += ROUNDUP(strlen(p) + 1, sizeof(uint32));
5330 				DHD_EVENT(("Line:%d ", *(uint32 *)p));
5331 				p += sizeof(uint32);
5332 				DHD_EVENT(("Caller Addr:0x%x\n", *(uint32 *)p));
5333 #ifdef PARSE_DONGLE_HOST_EVENT
5334 				DHD_ERROR(("DONGLE_HCK_EVENT: SOCRAM_IND_ASSERT_TAG\n"));
5335 #endif /* PARSE_DONGLE_HOST_EVENT */
5336 				break;
5337 			    }
5338 			case SOCRAM_IND_TAG_HEALTH_CHECK:
5339 			   {
5340 				bcm_dngl_healthcheck_t *dngl_hc = (bcm_dngl_healthcheck_t *)p;
5341 				DHD_EVENT(("SOCRAM_IND_HEALTHCHECK_TAG:%d Len:%d datalen:%d\n",
5342 					ltoh32(dngl_hc->top_module_tag),
5343 					ltoh32(dngl_hc->top_module_len),
5344 					datalen));
5345 				if (DHD_EVENT_ON()) {
5346 					prhex("HEALTHCHECK", p, MIN(ltoh32(dngl_hc->top_module_len)
5347 						+ BCM_XTLV_HDR_SIZE, datalen));
5348 				}
5349 #ifdef DHD_LOG_DUMP
5350 				memset(dhdp->health_chk_event_data, 0, HEALTH_CHK_BUF_SIZE);
5351 				memcpy(dhdp->health_chk_event_data, p,
5352 						MIN(ltoh32(dngl_hc->top_module_len),
5353 						HEALTH_CHK_BUF_SIZE));
5354 #endif /* DHD_LOG_DUMP */
5355 				p = (uint8 *)dngl_hc->value;
5356 
5357 				switch (ltoh32(dngl_hc->top_module_tag)) {
5358 					case HEALTH_CHECK_TOP_LEVEL_MODULE_PCIEDEV_RTE:
5359 					   {
5360 						bcm_dngl_pcie_hc_t *pcie_hc;
5361 						pcie_hc = (bcm_dngl_pcie_hc_t *)p;
5362 						BCM_REFERENCE(pcie_hc);
5363 						if (ltoh32(dngl_hc->top_module_len) <
5364 								sizeof(bcm_dngl_pcie_hc_t)) {
5365 							DHD_ERROR(("Wrong length:%d\n",
5366 								ltoh32(dngl_hc->top_module_len)));
5367 							return;
5368 						}
5369 						DHD_EVENT(("%d:PCIE HC error:%d flag:0x%x,"
5370 							" control:0x%x\n",
5371 							ltoh32(pcie_hc->version),
5372 							ltoh32(pcie_hc->pcie_err_ind_type),
5373 							ltoh32(pcie_hc->pcie_flag),
5374 							ltoh32(pcie_hc->pcie_control_reg)));
5375 #ifdef PARSE_DONGLE_HOST_EVENT
5376 						dhd_print_dongle_hck_id(
5377 							ltoh32(pcie_hc->pcie_err_ind_type),
5378 								hck_pcie_module_to_str);
5379 #endif /* PARSE_DONGLE_HOST_EVENT */
5380 						break;
5381 					   }
5382 #ifdef HCHK_COMMON_SW_EVENT
5383 					case HCHK_SW_ENTITY_WL_PRIMARY:
5384 					case HCHK_SW_ENTITY_WL_SECONDARY:
5385 					{
5386 						bcm_xtlv_t *wl_hc = (bcm_xtlv_t*)p;
5387 
5388 						if (ltoh32(dngl_hc->top_module_len) <
5389 								sizeof(bcm_xtlv_t)) {
5390 							DHD_ERROR(("WL SW HC Wrong length:%d\n",
5391 								ltoh32(dngl_hc->top_module_len)));
5392 							return;
5393 						}
5394 						BCM_REFERENCE(wl_hc);
5395 						DHD_EVENT(("WL SW HC type %d len %d\n",
5396 						ltoh16(wl_hc->id), ltoh16(wl_hc->len)));
5397 
5398 #ifdef PARSE_DONGLE_HOST_EVENT
5399 						dhd_parse_hck_common_sw_event(wl_hc);
5400 #endif /* PARSE_DONGLE_HOST_EVENT */
5401 						break;
5402 
5403 					}
5404 #endif /* HCHK_COMMON_SW_EVENT */
5405 					default:
5406 					{
5407 						DHD_ERROR(("%s:Unknown module TAG:%d\n",
5408 						  __FUNCTION__,
5409 						  ltoh32(dngl_hc->top_module_tag)));
5410 						break;
5411 					}
5412 				}
5413 				break;
5414 			   }
5415 			default:
5416 			   DHD_ERROR(("%s:Unknown TAG\n", __FUNCTION__));
5417 			   if (p && DHD_EVENT_ON()) {
5418 				   prhex("SOCRAMIND", p, taglen);
5419 			   }
5420 			   break;
5421 		   }
5422 		   break;
5423 		}
5424 	   default:
5425 		DHD_ERROR(("%s:Unknown DNGL Event Type:%d\n", __FUNCTION__, type));
5426 		if (p && DHD_EVENT_ON()) {
5427 			prhex("SOCRAMIND", p, datalen);
5428 		}
5429 		break;
5430 	}
5431 #ifndef BCMDBUS
5432 #ifdef DHD_FW_COREDUMP
5433 	if (dhdp->memdump_enabled) {
5434 		dhdp->memdump_type = DUMP_TYPE_DONGLE_HOST_EVENT;
5435 		if (
5436 #ifdef GDB_PROXY
5437 			!dhdp->gdb_proxy_active &&
5438 #endif /* GDB_PROXY */
5439 			dhd_schedule_socram_dump(dhdp)) {
5440 				DHD_ERROR(("%s: socram dump failed\n", __FUNCTION__));
5441 		}
5442 	}
5443 #else
5444 	dhd_dbg_send_urgent_evt(dhdp, p, datalen);
5445 #endif /* DHD_FW_COREDUMP */
5446 #endif /* !BCMDBUS */
5447 }
5448 
5449 #endif /* DNGL_EVENT_SUPPORT */
5450 
5451 /* Stub for now. Will become real function as soon as shim
5452  * is being integrated to Android, Linux etc.
5453  */
5454 #if !defined(NDIS)
5455 int
wl_event_process_default(wl_event_msg_t * event,struct wl_evt_pport * evt_pport)5456 wl_event_process_default(wl_event_msg_t *event, struct wl_evt_pport *evt_pport)
5457 {
5458 	return BCME_OK;
5459 }
5460 #endif
5461 
5462 int
wl_event_process(dhd_pub_t * dhd_pub,int * ifidx,void * pktdata,uint pktlen,void ** data_ptr,void * raw_event)5463 wl_event_process(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata,
5464 	uint pktlen, void **data_ptr, void *raw_event)
5465 {
5466 	wl_evt_pport_t evt_pport;
5467 	wl_event_msg_t event;
5468 	bcm_event_msg_u_t evu;
5469 	int ret;
5470 
5471 	/* make sure it is a BRCM event pkt and record event data */
5472 	ret = wl_host_event_get_data(pktdata, pktlen, &evu);
5473 	if (ret != BCME_OK) {
5474 		return ret;
5475 	}
5476 
5477 	memcpy(&event, &evu.event, sizeof(wl_event_msg_t));
5478 
5479 	/* convert event from network order to host order */
5480 	wl_event_to_host_order(&event);
5481 
5482 	/* record event params to evt_pport */
5483 	evt_pport.dhd_pub = dhd_pub;
5484 	evt_pport.ifidx = ifidx;
5485 	evt_pport.pktdata = pktdata;
5486 	evt_pport.data_ptr = data_ptr;
5487 	evt_pport.raw_event = raw_event;
5488 	evt_pport.data_len = pktlen;
5489 
5490 #if defined(WL_WLC_SHIM) && defined(WL_WLC_SHIM_EVENTS)
5491 	{
5492 		struct wl_shim_node *shim = dhd_pub_shim(dhd_pub);
5493 		if (shim) {
5494 			ret = wl_shim_event_process(shim, &event, &evt_pport);
5495 		} else {
5496 			/* events can come even before shim is initialized
5497 			 (when waiting for "wlc_ver" response)
5498 			 * handle them in a non-shim way.
5499 			 */
5500 			DHD_ERROR(("%s: Events coming before shim initialization!\n",
5501 				__FUNCTION__));
5502 			ret = wl_event_process_default(&event, &evt_pport);
5503 		}
5504 	}
5505 #else
5506 	ret = wl_event_process_default(&event, &evt_pport);
5507 #endif /* WL_WLC_SHIM && WL_WLC_SHIM_EVENTS */
5508 
5509 	return ret;
5510 } /* wl_event_process */
5511 
5512 /* Check whether packet is a BRCM event pkt. If it is, record event data. */
5513 int
wl_host_event_get_data(void * pktdata,uint pktlen,bcm_event_msg_u_t * evu)5514 wl_host_event_get_data(void *pktdata, uint pktlen, bcm_event_msg_u_t *evu)
5515 {
5516 	int ret;
5517 
5518 	ret = is_wlc_event_frame(pktdata, pktlen, 0, evu);
5519 	if (ret != BCME_OK) {
5520 		DHD_ERROR(("%s: Invalid event frame, err = %d\n",
5521 			__FUNCTION__, ret));
5522 	}
5523 
5524 	return ret;
5525 }
5526 
5527 int
wl_process_host_event(dhd_pub_t * dhd_pub,int * ifidx,void * pktdata,uint pktlen,wl_event_msg_t * event,void ** data_ptr,void * raw_event)5528 wl_process_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen,
5529 	wl_event_msg_t *event, void **data_ptr, void *raw_event)
5530 {
5531 	bcm_event_t *pvt_data = (bcm_event_t *)pktdata;
5532 	bcm_event_msg_u_t evu;
5533 	uint8 *event_data;
5534 	uint32 type, status, datalen, reason;
5535 	uint16 flags;
5536 	uint evlen;
5537 	int ret;
5538 	uint16 usr_subtype;
5539 #if defined(__linux__)
5540 	dhd_if_t *ifp = NULL;
5541 	BCM_REFERENCE(ifp);
5542 #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
5543 
5544 	ret = wl_host_event_get_data(pktdata, pktlen, &evu);
5545 	if (ret != BCME_OK) {
5546 		return ret;
5547 	}
5548 
5549 	usr_subtype = ntoh16_ua((void *)&pvt_data->bcm_hdr.usr_subtype);
5550 	switch (usr_subtype) {
5551 	case BCMILCP_BCM_SUBTYPE_EVENT:
5552 		memcpy(event, &evu.event, sizeof(wl_event_msg_t));
5553 		*data_ptr = &pvt_data[1];
5554 		break;
5555 	case BCMILCP_BCM_SUBTYPE_DNGLEVENT:
5556 #ifdef DNGL_EVENT_SUPPORT
5557 		/* If it is a DNGL event process it first */
5558 		if (dngl_host_event(dhd_pub, pktdata, &evu.dngl_event, pktlen) == BCME_OK) {
5559 			/*
5560 			 * Return error purposely to prevent DNGL event being processed
5561 			 * as BRCM event
5562 			 */
5563 			return BCME_ERROR;
5564 		}
5565 #endif /* DNGL_EVENT_SUPPORT */
5566 		return BCME_NOTFOUND;
5567 	default:
5568 		return BCME_NOTFOUND;
5569 	}
5570 
5571 	/* start wl_event_msg process */
5572 	event_data = *data_ptr;
5573 	type = ntoh32_ua((void *)&event->event_type);
5574 	flags = ntoh16_ua((void *)&event->flags);
5575 	status = ntoh32_ua((void *)&event->status);
5576 	reason = ntoh32_ua((void *)&event->reason);
5577 	datalen = ntoh32_ua((void *)&event->datalen);
5578 	evlen = datalen + sizeof(bcm_event_t);
5579 
5580 	switch (type) {
5581 #ifdef PROP_TXSTATUS
5582 	case WLC_E_FIFO_CREDIT_MAP:
5583 		dhd_wlfc_enable(dhd_pub);
5584 		dhd_wlfc_FIFOcreditmap_event(dhd_pub, event_data);
5585 		WLFC_DBGMESG(("WLC_E_FIFO_CREDIT_MAP:(AC0,AC1,AC2,AC3),(BC_MC),(OTHER): "
5586 			"(%d,%d,%d,%d),(%d),(%d)\n", event_data[0], event_data[1],
5587 			event_data[2],
5588 			event_data[3], event_data[4], event_data[5]));
5589 		break;
5590 
5591 	case WLC_E_BCMC_CREDIT_SUPPORT:
5592 		dhd_wlfc_BCMCCredit_support_event(dhd_pub);
5593 		break;
5594 #ifdef LIMIT_BORROW
5595 	case WLC_E_ALLOW_CREDIT_BORROW:
5596 		dhd_wlfc_disable_credit_borrow_event(dhd_pub, event_data);
5597 		break;
5598 #endif /* LIMIT_BORROW */
5599 #endif /* PROP_TXSTATUS */
5600 
5601 	case WLC_E_ULP:
5602 		break;
5603 	case WLC_E_TDLS_PEER_EVENT:
5604 #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
5605 		{
5606 			dhd_tdls_event_handler(dhd_pub, event);
5607 		}
5608 #endif
5609 		break;
5610 
5611 	case WLC_E_IF:
5612 		{
5613 		struct wl_event_data_if *ifevent = (struct wl_event_data_if *)event_data;
5614 
5615 		/* Ignore the event if NOIF is set */
5616 		if (ifevent->reserved & WLC_E_IF_FLAGS_BSSCFG_NOIF) {
5617 			DHD_ERROR(("WLC_E_IF: NO_IF set, event Ignored\r\n"));
5618 			return (BCME_UNSUPPORTED);
5619 		}
5620 #ifdef PCIE_FULL_DONGLE
5621 		dhd_update_interface_flow_info(dhd_pub, ifevent->ifidx,
5622 			ifevent->opcode, ifevent->role);
5623 #endif
5624 #ifdef PROP_TXSTATUS
5625 		{
5626 			uint8* ea = pvt_data->eth.ether_dhost;
5627 			WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, ["MACDBG"]\n"
5628 						  ifevent->ifidx,
5629 						  ((ifevent->opcode == WLC_E_IF_ADD) ? "ADD":"DEL"),
5630 						  ((ifevent->role == 0) ? "STA":"AP "),
5631 						  MAC2STRDBG(ea)));
5632 			(void)ea;
5633 
5634 			if (ifevent->opcode == WLC_E_IF_CHANGE)
5635 				dhd_wlfc_interface_event(dhd_pub,
5636 					eWLFC_MAC_ENTRY_ACTION_UPDATE,
5637 					ifevent->ifidx, ifevent->role, ea);
5638 			else
5639 				dhd_wlfc_interface_event(dhd_pub,
5640 					((ifevent->opcode == WLC_E_IF_ADD) ?
5641 					eWLFC_MAC_ENTRY_ACTION_ADD : eWLFC_MAC_ENTRY_ACTION_DEL),
5642 					ifevent->ifidx, ifevent->role, ea);
5643 
5644 			/* dhd already has created an interface by default, for 0 */
5645 			if (ifevent->ifidx == 0)
5646 				break;
5647 		}
5648 #endif /* PROP_TXSTATUS */
5649 
5650 		if (ifevent->ifidx > 0 && ifevent->ifidx < DHD_MAX_IFS) {
5651 			if (ifevent->opcode == WLC_E_IF_ADD) {
5652 				if (dhd_event_ifadd(dhd_pub->info, ifevent, event->ifname,
5653 					event->addr.octet)) {
5654 
5655 					DHD_ERROR(("%s: dhd_event_ifadd failed ifidx: %d  %s\n",
5656 						__FUNCTION__, ifevent->ifidx, event->ifname));
5657 					return (BCME_ERROR);
5658 				}
5659 			} else if (ifevent->opcode == WLC_E_IF_DEL) {
5660 #ifdef PCIE_FULL_DONGLE
5661 				dhd_flow_rings_delete(dhd_pub,
5662 					(uint8)dhd_ifname2idx(dhd_pub->info, event->ifname));
5663 #endif /* PCIE_FULL_DONGLE */
5664 				dhd_event_ifdel(dhd_pub->info, ifevent, event->ifname,
5665 					event->addr.octet);
5666 			} else if (ifevent->opcode == WLC_E_IF_CHANGE) {
5667 #ifdef WL_CFG80211
5668 				dhd_event_ifchange(dhd_pub->info, ifevent, event->ifname,
5669 					event->addr.octet);
5670 #endif /* WL_CFG80211 */
5671 			}
5672 		} else {
5673 #if !defined(PROP_TXSTATUS) && !defined(PCIE_FULL_DONGLE) && defined(WL_CFG80211)
5674 			DHD_INFO(("%s: Invalid ifidx %d for %s\n",
5675 			   __FUNCTION__, ifevent->ifidx, event->ifname));
5676 #endif /* !PROP_TXSTATUS && !PCIE_FULL_DONGLE && WL_CFG80211 */
5677 		}
5678 			/* send up the if event: btamp user needs it */
5679 			*ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
5680 			/* push up to external supp/auth */
5681 			dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
5682 		break;
5683 	}
5684 
5685 	case WLC_E_NDIS_LINK:
5686 		break;
5687 	case WLC_E_PFN_NET_FOUND:
5688 	case WLC_E_PFN_SCAN_ALLGONE: /* share with WLC_E_PFN_BSSID_NET_LOST */
5689 	case WLC_E_PFN_NET_LOST:
5690 		break;
5691 #if defined(OEM_ANDROID) && defined(PNO_SUPPORT)
5692 	case WLC_E_PFN_BSSID_NET_FOUND:
5693 	case WLC_E_PFN_BEST_BATCHING:
5694 		dhd_pno_event_handler(dhd_pub, event, (void *)event_data);
5695 		break;
5696 #endif /* #if defined(OEM_ANDROID) && defined(PNO_SUPPORT) */
5697 #if defined(RTT_SUPPORT)
5698 	case WLC_E_PROXD:
5699 #ifndef WL_CFG80211
5700 		dhd_rtt_event_handler(dhd_pub, event, (void *)event_data);
5701 #endif /* WL_CFG80211 */
5702 		break;
5703 #endif /* RTT_SUPPORT */
5704 		/* These are what external supplicant/authenticator wants */
5705 	case WLC_E_ASSOC_IND:
5706 	case WLC_E_AUTH_IND:
5707 	case WLC_E_REASSOC_IND:
5708 		dhd_findadd_sta(dhd_pub,
5709 			dhd_ifname2idx(dhd_pub->info, event->ifname),
5710 			&event->addr.octet);
5711 		break;
5712 #if !defined(BCMDBUS) && defined(DHD_FW_COREDUMP)
5713 	case WLC_E_PSM_WATCHDOG:
5714 		DHD_ERROR(("%s: WLC_E_PSM_WATCHDOG event received : \n", __FUNCTION__));
5715 		if (dhd_socram_dump(dhd_pub->bus) != BCME_OK) {
5716 			DHD_ERROR(("%s: socram dump ERROR : \n", __FUNCTION__));
5717 		}
5718 	break;
5719 #endif
5720 #ifdef DHD_WMF
5721 	case WLC_E_PSTA_PRIMARY_INTF_IND:
5722 		dhd_update_psta_interface_for_sta(dhd_pub, event->ifname,
5723 			(void *)(event->addr.octet), (void*) event_data);
5724 		break;
5725 #endif
5726 #ifdef BCM_ROUTER_DHD
5727 	case WLC_E_DPSTA_INTF_IND:
5728 		dhd_update_dpsta_interface_for_sta(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
5729 			event->ifname), (void*) event_data);
5730 		break;
5731 #endif /* BCM_ROUTER_DHD */
5732 #ifdef BCMDBG
5733 	case WLC_E_MACDBG:
5734 		dhd_macdbg_event_handler(dhd_pub, reason, event_data, datalen);
5735 		break;
5736 #endif /* BCMDBG */
5737 	case WLC_E_NATOE_NFCT:
5738 #ifdef WL_NATOE
5739 		DHD_EVENT(("%s: WLC_E_NATOE_NFCT event received \n", __FUNCTION__));
5740 		dhd_natoe_ct_event(dhd_pub, event_data);
5741 #endif /* WL_NATOE */
5742 	break;
5743 	case WLC_E_SLOTTED_BSS_PEER_OP:
5744 		DHD_EVENT(("%s: WLC_E_SLOTTED_BSS_PEER_OP event received for peer: "
5745 			"" MACDBG ", status = %d\n",
5746 			__FUNCTION__, MAC2STRDBG(event->addr.octet), status));
5747 		if (status == WLC_E_STATUS_SLOTTED_PEER_ADD) {
5748 			dhd_findadd_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
5749 				event->ifname), &event->addr.octet);
5750 		} else if (status == WLC_E_STATUS_SLOTTED_PEER_DEL) {
5751 			uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname);
5752 			BCM_REFERENCE(ifindex);
5753 			dhd_del_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
5754 				event->ifname), &event->addr.octet);
5755 #ifdef PCIE_FULL_DONGLE
5756 			dhd_flow_rings_delete_for_peer(dhd_pub, ifindex,
5757 				(char *)&event->addr.octet[0]);
5758 #endif
5759 		} else {
5760 			DHD_ERROR(("%s: WLC_E_SLOTTED_BSS_PEER_OP: Status is not expected = %d\n",
5761 				__FUNCTION__, status));
5762 		}
5763 		break;
5764 #ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
5765 	case WLC_E_REASSOC:
5766 		ifp = dhd_get_ifp(dhd_pub, event->ifidx);
5767 
5768 		if (!ifp)
5769 			break;
5770 
5771 		/* Consider STA role only since roam is disabled on P2P GC.
5772 		 * Drop EAPOL M1 frame only if roam is done to same BSS.
5773 		 */
5774 		if ((status == WLC_E_STATUS_SUCCESS) &&
5775 			IS_STA_IFACE(ndev_to_wdev(ifp->net)) &&
5776 			wl_cfg80211_is_event_from_connected_bssid(ifp->net, event, event->ifidx)) {
5777 			ifp->recv_reassoc_evt = TRUE;
5778 		}
5779 		break;
5780 #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
5781 #if defined(CSI_SUPPORT)
5782 	case WLC_E_CSI:
5783 		dhd_csi_event_handler(dhd_pub, event, (void *)event_data);
5784 		break;
5785 #endif /* CSI_SUPPORT */
5786 	case WLC_E_LINK:
5787 #ifdef PCIE_FULL_DONGLE
5788 		if (dhd_update_interface_link_status(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
5789 			event->ifname), (uint8)flags) != BCME_OK) {
5790 			DHD_ERROR(("%s: dhd_update_interface_link_status Failed.\n",
5791 				__FUNCTION__));
5792 			break;
5793 		}
5794 		if (!flags) {
5795 			DHD_ERROR(("%s: Deleting all STA from assoc list and flowrings.\n",
5796 				__FUNCTION__));
5797 			/* Delete all sta and flowrings */
5798 			dhd_del_all_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info, event->ifname));
5799 			dhd_flow_rings_delete(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
5800 				event->ifname));
5801 		}
5802 		/* fall through */
5803 #endif /* PCIE_FULL_DONGLE */
5804 	case WLC_E_DEAUTH:
5805 	case WLC_E_DEAUTH_IND:
5806 	case WLC_E_DISASSOC:
5807 	case WLC_E_DISASSOC_IND:
5808 #ifdef PCIE_FULL_DONGLE
5809 		if (type != WLC_E_LINK) {
5810 			uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname);
5811 			uint8 role = dhd_flow_rings_ifindex2role(dhd_pub, ifindex);
5812 			uint8 del_sta = TRUE;
5813 #ifdef WL_CFG80211
5814 			if (role == WLC_E_IF_ROLE_STA &&
5815 				!wl_cfg80211_is_roam_offload(dhd_idx2net(dhd_pub, ifindex)) &&
5816 					!wl_cfg80211_is_event_from_connected_bssid(
5817 						dhd_idx2net(dhd_pub, ifindex), event, *ifidx)) {
5818 				del_sta = FALSE;
5819 			}
5820 #endif /* WL_CFG80211 */
5821 			DHD_EVENT(("%s: Link event %d, flags %x, status %x, role %d, del_sta %d\n",
5822 				__FUNCTION__, type, flags, status, role, del_sta));
5823 
5824 			if (del_sta) {
5825 				DHD_EVENT(("%s: Deleting STA " MACDBG "\n",
5826 					__FUNCTION__, MAC2STRDBG(event->addr.octet)));
5827 
5828 				dhd_del_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
5829 					event->ifname), &event->addr.octet);
5830 				/* Delete all flowrings for STA and P2P Client */
5831 				if (role == WLC_E_IF_ROLE_STA || role == WLC_E_IF_ROLE_P2P_CLIENT) {
5832 					dhd_flow_rings_delete(dhd_pub, ifindex);
5833 				} else {
5834 					dhd_flow_rings_delete_for_peer(dhd_pub, ifindex,
5835 						(char *)&event->addr.octet[0]);
5836 				}
5837 			}
5838 		}
5839 #endif /* PCIE_FULL_DONGLE */
5840 #ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
5841 		/* fall through */
5842 		ifp = dhd_get_ifp(dhd_pub, event->ifidx);
5843 		if (ifp) {
5844 			ifp->recv_reassoc_evt = FALSE;
5845 			ifp->post_roam_evt = FALSE;
5846 		}
5847 #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
5848 		/* fall through */
5849 	default:
5850 		*ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
5851 #ifdef DHD_UPDATE_INTF_MAC
5852 		if ((WLC_E_LINK==type)&&(WLC_EVENT_MSG_LINK&flags)) {
5853 			dhd_event_ifchange(dhd_pub->info,
5854 			(struct wl_event_data_if *)event,
5855 			event->ifname,
5856 			event->addr.octet);
5857 		}
5858 #endif /* DHD_UPDATE_INTF_MAC */
5859 		/* push up to external supp/auth */
5860 		dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
5861 		DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n",
5862 			__FUNCTION__, type, flags, status));
5863 		BCM_REFERENCE(flags);
5864 		BCM_REFERENCE(status);
5865 		BCM_REFERENCE(reason);
5866 
5867 		break;
5868 	}
5869 #if defined(BCM_ROUTER_DHD) || defined(STBAP)
5870 	/* For routers, EAPD will be working on these events.
5871 	 * Overwrite interface name to that event is pushed
5872 	 * to host with its registered interface name
5873 	 */
5874 	memcpy(pvt_data->event.ifname, dhd_ifname(dhd_pub, *ifidx), IFNAMSIZ);
5875 #endif
5876 
5877 #ifdef DHD_STATUS_LOGGING
5878 	if (dhd_pub->statlog) {
5879 		dhd_statlog_process_event(dhd_pub, type, *ifidx,
5880 			status, reason, flags);
5881 	}
5882 #endif /* DHD_STATUS_LOGGING */
5883 
5884 #ifdef SHOW_EVENTS
5885 	if (DHD_FWLOG_ON() || DHD_EVENT_ON()) {
5886 		wl_show_host_event(dhd_pub, event,
5887 			(void *)event_data, raw_event, dhd_pub->enable_log);
5888 	}
5889 #endif /* SHOW_EVENTS */
5890 
5891 	return (BCME_OK);
5892 } /* wl_process_host_event */
5893 
5894 int
wl_host_event(dhd_pub_t * dhd_pub,int * ifidx,void * pktdata,uint pktlen,wl_event_msg_t * event,void ** data_ptr,void * raw_event)5895 wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen,
5896 	wl_event_msg_t *event, void **data_ptr, void *raw_event)
5897 {
5898 	return wl_process_host_event(dhd_pub, ifidx, pktdata, pktlen, event, data_ptr,
5899 			raw_event);
5900 }
5901 
5902 void
dhd_print_buf(void * pbuf,int len,int bytes_per_line)5903 dhd_print_buf(void *pbuf, int len, int bytes_per_line)
5904 {
5905 #ifdef DHD_DEBUG
5906 	int i, j = 0;
5907 	unsigned char *buf = pbuf;
5908 
5909 	if (bytes_per_line == 0) {
5910 		bytes_per_line = len;
5911 	}
5912 
5913 	for (i = 0; i < len; i++) {
5914 		printf("%2.2x", *buf++);
5915 		j++;
5916 		if (j == bytes_per_line) {
5917 			printf("\n");
5918 			j = 0;
5919 		} else {
5920 			printf(":");
5921 		}
5922 	}
5923 	printf("\n");
5924 #endif /* DHD_DEBUG */
5925 }
5926 #ifndef strtoul
5927 #define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
5928 #endif
5929 
5930 /* Convert user's input in hex pattern to byte-size mask */
5931 int
wl_pattern_atoh(char * src,char * dst)5932 wl_pattern_atoh(char *src, char *dst)
5933 {
5934 	int i;
5935 	if (strncmp(src, "0x", 2) != 0 &&
5936 	    strncmp(src, "0X", 2) != 0) {
5937 		DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
5938 		return -1;
5939 	}
5940 	src = src + 2; /* Skip past 0x */
5941 	if (strlen(src) % 2 != 0) {
5942 		DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
5943 		return -1;
5944 	}
5945 	for (i = 0; *src != '\0'; i++) {
5946 		char num[3];
5947 		bcm_strncpy_s(num, sizeof(num), src, 2);
5948 		num[2] = '\0';
5949 		dst[i] = (uint8)strtoul(num, NULL, 16);
5950 		src += 2;
5951 	}
5952 	return i;
5953 }
5954 
5955 #if defined(PKT_FILTER_SUPPORT) || defined(DHD_PKT_LOGGING)
5956 int
pattern_atoh_len(char * src,char * dst,int len)5957 pattern_atoh_len(char *src, char *dst, int len)
5958 {
5959 	int i;
5960 	if (strncmp(src, "0x", HD_PREFIX_SIZE) != 0 &&
5961 			strncmp(src, "0X", HD_PREFIX_SIZE) != 0) {
5962 		DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
5963 		return -1;
5964 	}
5965 	src = src + HD_PREFIX_SIZE; /* Skip past 0x */
5966 	if (strlen(src) % HD_BYTE_SIZE != 0) {
5967 		DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
5968 		return -1;
5969 	}
5970 	for (i = 0; *src != '\0'; i++) {
5971 		char num[HD_BYTE_SIZE + 1];
5972 
5973 		if (i > len - 1) {
5974 			DHD_ERROR(("pattern not in range, idx: %d len: %d\n", i, len));
5975 			return -1;
5976 		}
5977 		bcm_strncpy_s(num, sizeof(num), src, HD_BYTE_SIZE);
5978 		num[HD_BYTE_SIZE] = '\0';
5979 		dst[i] = (uint8)strtoul(num, NULL, 16);
5980 		src += HD_BYTE_SIZE;
5981 	}
5982 	return i;
5983 }
5984 #endif /* PKT_FILTER_SUPPORT || DHD_PKT_LOGGING */
5985 
5986 #ifdef PKT_FILTER_SUPPORT
5987 void
dhd_pktfilter_offload_enable(dhd_pub_t * dhd,char * arg,int enable,int master_mode)5988 dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode)
5989 {
5990 	char				*argv[8];
5991 	int					i = 0;
5992 	const char			*str;
5993 	int					buf_len;
5994 	int					str_len;
5995 	char				*arg_save = 0, *arg_org = 0;
5996 	int					rc;
5997 	char				buf[32] = {0};
5998 	wl_pkt_filter_enable_t	enable_parm;
5999 	wl_pkt_filter_enable_t	* pkt_filterp;
6000 
6001 	if (!arg)
6002 		return;
6003 
6004 	if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
6005 		DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
6006 		goto fail;
6007 	}
6008 	arg_org = arg_save;
6009 	memcpy(arg_save, arg, strlen(arg) + 1);
6010 
6011 	argv[i] = bcmstrtok(&arg_save, " ", 0);
6012 
6013 	i = 0;
6014 	if (argv[i] == NULL) {
6015 		DHD_ERROR(("No args provided\n"));
6016 		goto fail;
6017 	}
6018 
6019 	str = "pkt_filter_enable";
6020 	str_len = strlen(str);
6021 	bcm_strncpy_s(buf, sizeof(buf) - 1, str, sizeof(buf) - 1);
6022 	buf[ sizeof(buf) - 1 ] = '\0';
6023 	buf_len = str_len + 1;
6024 
6025 	pkt_filterp = (wl_pkt_filter_enable_t *)(buf + str_len + 1);
6026 
6027 	/* Parse packet filter id. */
6028 	enable_parm.id = htod32(strtoul(argv[i], NULL, 0));
6029 	if (dhd_conf_del_pkt_filter(dhd, enable_parm.id))
6030 		goto fail;
6031 
6032 	/* Parse enable/disable value. */
6033 	enable_parm.enable = htod32(enable);
6034 
6035 	buf_len += sizeof(enable_parm);
6036 	memcpy((char *)pkt_filterp,
6037 	       &enable_parm,
6038 	       sizeof(enable_parm));
6039 
6040 	/* Enable/disable the specified filter. */
6041 	rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
6042 	rc = rc >= 0 ? 0 : rc;
6043 	if (rc) {
6044 		DHD_ERROR(("%s: failed to %s pktfilter %s, retcode = %d\n",
6045 		__FUNCTION__, enable?"enable":"disable", arg, rc));
6046 		dhd_set_packet_filter(dhd);
6047 		rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
6048 		rc = rc >= 0 ? 0 : rc;
6049 		if (rc) {
6050 			DHD_TRACE_HW4(("%s: 2nd retry failed to add pktfilter %s, retcode = %d\n",
6051 			__FUNCTION__, arg, rc));
6052 		} else {
6053 			DHD_TRACE_HW4(("%s: 2nd retry successfully added pktfilter %s\n",
6054 			__FUNCTION__, arg));
6055 		}
6056 	}
6057 	else
6058 		DHD_TRACE(("%s: successfully %s pktfilter %s\n",
6059 		__FUNCTION__, enable?"enable":"disable", arg));
6060 
6061 	/* Contorl the master mode */
6062 	rc = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_mode",
6063 		master_mode, WLC_SET_VAR, TRUE, 0);
6064 	rc = rc >= 0 ? 0 : rc;
6065 	if (rc)
6066 		DHD_TRACE(("%s: failed to set pkt_filter_mode %d, retcode = %d\n",
6067 			__FUNCTION__, master_mode, rc));
6068 
6069 fail:
6070 	if (arg_org)
6071 		MFREE(dhd->osh, arg_org, strlen(arg) + 1);
6072 }
6073 
6074 /* Packet filter section: extended filters have named offsets, add table here */
6075 typedef struct {
6076 	char *name;
6077 	uint16 base;
6078 } wl_pfbase_t;
6079 
6080 static wl_pfbase_t basenames[] = { WL_PKT_FILTER_BASE_NAMES };
6081 
6082 static int
wl_pkt_filter_base_parse(char * name)6083 wl_pkt_filter_base_parse(char *name)
6084 {
6085 	uint i;
6086 	char *bname, *uname;
6087 
6088 	for (i = 0; i < ARRAYSIZE(basenames); i++) {
6089 		bname = basenames[i].name;
6090 		for (uname = name; *uname; bname++, uname++) {
6091 			if (*bname != bcm_toupper(*uname)) {
6092 				break;
6093 			}
6094 		}
6095 		if (!*uname && !*bname) {
6096 			break;
6097 		}
6098 	}
6099 
6100 	if (i < ARRAYSIZE(basenames)) {
6101 		return basenames[i].base;
6102 	} else {
6103 		return -1;
6104 	}
6105 }
6106 
6107 void
dhd_pktfilter_offload_set(dhd_pub_t * dhd,char * arg)6108 dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg)
6109 {
6110 	const char			*str;
6111 	wl_pkt_filter_t			pkt_filter;
6112 	wl_pkt_filter_t			*pkt_filterp;
6113 	int				buf_len;
6114 	int				str_len;
6115 	int				rc = -1;
6116 	uint32				mask_size;
6117 	uint32				pattern_size;
6118 	char				*argv[MAXPKT_ARG] = {0}, * buf = 0;
6119 	int				i = 0;
6120 	char				*arg_save = 0, *arg_org = 0;
6121 
6122 	if (!arg)
6123 		return;
6124 
6125 	if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
6126 		DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
6127 		goto fail;
6128 	}
6129 
6130 	arg_org = arg_save;
6131 
6132 	if (!(buf = MALLOC(dhd->osh, MAX_PKTFLT_BUF_SIZE))) {
6133 		DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
6134 		goto fail;
6135 	}
6136 
6137 	memset(buf, 0, MAX_PKTFLT_BUF_SIZE);
6138 	memcpy(arg_save, arg, strlen(arg) + 1);
6139 
6140 	if (strlen(arg) > MAX_PKTFLT_BUF_SIZE) {
6141 		DHD_ERROR(("Not enough buffer %d < %d\n", (int)strlen(arg), (int)sizeof(buf)));
6142 		goto fail;
6143 	}
6144 
6145 	argv[i] = bcmstrtok(&arg_save, " ", 0);
6146 	while (argv[i++]) {
6147 		if (i >= MAXPKT_ARG) {
6148 			DHD_ERROR(("Invalid args provided\n"));
6149 			goto fail;
6150 		}
6151 		argv[i] = bcmstrtok(&arg_save, " ", 0);
6152 	}
6153 
6154 	i = 0;
6155 	if (argv[i] == NULL) {
6156 		DHD_ERROR(("No args provided\n"));
6157 		goto fail;
6158 	}
6159 
6160 	str = "pkt_filter_add";
6161 	str_len = strlen(str);
6162 	bcm_strncpy_s(buf, MAX_PKTFLT_BUF_SIZE, str, str_len);
6163 	buf[ str_len ] = '\0';
6164 	buf_len = str_len + 1;
6165 
6166 	pkt_filterp = (wl_pkt_filter_t *) (buf + str_len + 1);
6167 
6168 	/* Parse packet filter id. */
6169 	pkt_filter.id = htod32(strtoul(argv[i], NULL, 0));
6170 
6171 	if (argv[++i] == NULL) {
6172 		DHD_ERROR(("Polarity not provided\n"));
6173 		goto fail;
6174 	}
6175 
6176 	/* Parse filter polarity. */
6177 	pkt_filter.negate_match = htod32(strtoul(argv[i], NULL, 0));
6178 
6179 	if (argv[++i] == NULL) {
6180 		DHD_ERROR(("Filter type not provided\n"));
6181 		goto fail;
6182 	}
6183 
6184 	/* Parse filter type. */
6185 	pkt_filter.type = htod32(strtoul(argv[i], NULL, 0));
6186 
6187 	if ((pkt_filter.type == 0) || (pkt_filter.type == 1)) {
6188 		if (argv[++i] == NULL) {
6189 			DHD_ERROR(("Offset not provided\n"));
6190 			goto fail;
6191 		}
6192 
6193 		/* Parse pattern filter offset. */
6194 		pkt_filter.u.pattern.offset = htod32(strtoul(argv[i], NULL, 0));
6195 
6196 		if (argv[++i] == NULL) {
6197 			DHD_ERROR(("Bitmask not provided\n"));
6198 			goto fail;
6199 		}
6200 
6201 		/* Parse pattern filter mask. */
6202 		rc  = wl_pattern_atoh(argv[i],
6203 			(char *) pkt_filterp->u.pattern.mask_and_pattern);
6204 
6205 		if (rc == -1) {
6206 			DHD_ERROR(("Rejecting: %s\n", argv[i]));
6207 			goto fail;
6208 		}
6209 		mask_size = htod32(rc);
6210 		if (argv[++i] == NULL) {
6211 			DHD_ERROR(("Pattern not provided\n"));
6212 			goto fail;
6213 		}
6214 
6215 		/* Parse pattern filter pattern. */
6216 		rc = wl_pattern_atoh(argv[i],
6217 			(char *) &pkt_filterp->u.pattern.mask_and_pattern[rc]);
6218 
6219 		if (rc == -1) {
6220 			DHD_ERROR(("Rejecting: %s\n", argv[i]));
6221 			goto fail;
6222 		}
6223 		pattern_size = htod32(rc);
6224 		if (mask_size != pattern_size) {
6225 			DHD_ERROR(("Mask and pattern not the same size\n"));
6226 			goto fail;
6227 		}
6228 
6229 		pkt_filter.u.pattern.size_bytes = mask_size;
6230 		buf_len += WL_PKT_FILTER_FIXED_LEN;
6231 		buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * rc);
6232 
6233 		/* Keep-alive attributes are set in local	variable (keep_alive_pkt), and
6234 		 * then memcpy'ed into buffer (keep_alive_pktp) since there is no
6235 		 * guarantee that the buffer is properly aligned.
6236 		 */
6237 		memcpy((char *)pkt_filterp,
6238 			&pkt_filter,
6239 			WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN);
6240 	} else if ((pkt_filter.type == 2) || (pkt_filter.type == 6)) {
6241 		int list_cnt = 0;
6242 		char *endptr = NULL;
6243 		wl_pkt_filter_pattern_listel_t *pf_el =
6244 			(wl_pkt_filter_pattern_listel_t *)&pkt_filterp->u.patlist.patterns[0];
6245 
6246 		while (argv[++i] != NULL) {
6247 			/* Check valid buffer size. */
6248 			if ((buf_len + MAX_PKTFLT_FIXED_BUF_SIZE) > MAX_PKTFLT_BUF_SIZE) {
6249 				DHD_ERROR(("buffer over length MAX_PKTFLT_FIXED_BUF_SIZE\n"));
6250 				goto fail;
6251 			}
6252 
6253 			/* Parse pattern filter base and offset. */
6254 			if (bcm_isdigit(*argv[i])) {
6255 				/* Numeric base */
6256 				rc = strtoul(argv[i], &endptr, 0);
6257 			} else {
6258 				endptr = strchr(argv[i], ':');
6259 				if (endptr) {
6260 					*endptr = '\0';
6261 					rc = wl_pkt_filter_base_parse(argv[i]);
6262 					if (rc == -1) {
6263 						 printf("Invalid base %s\n", argv[i]);
6264 						goto fail;
6265 					}
6266 					*endptr = ':';
6267 				}
6268 			}
6269 
6270 			if (endptr == NULL) {
6271 				printf("Invalid [base:]offset format: %s\n", argv[i]);
6272 				goto fail;
6273 			}
6274 
6275 			if (*endptr == ':') {
6276 				pf_el->base_offs = htod16(rc);
6277 				rc = strtoul(endptr + 1, &endptr, 0);
6278 			} else {
6279 				/* Must have had a numeric offset only */
6280 				pf_el->base_offs = htod16(0);
6281 			}
6282 
6283 			if (*endptr) {
6284 				printf("Invalid [base:]offset format: %s\n", argv[i]);
6285 				goto fail;
6286 			}
6287 			if (rc > 0x0000FFFF) {
6288 				printf("Offset too large\n");
6289 				goto fail;
6290 			}
6291 			pf_el->rel_offs = htod16(rc);
6292 
6293 			/* Clear match_flag (may be set in parsing which follows) */
6294 			pf_el->match_flags = htod16(0);
6295 
6296 			/* Parse pattern filter mask and pattern directly into ioctl buffer */
6297 			if (argv[++i] == NULL) {
6298 				printf("Bitmask not provided\n");
6299 				goto fail;
6300 			}
6301 			rc = wl_pattern_atoh(argv[i], (char*)pf_el->mask_and_data);
6302 			if ((rc == -1) || (rc > MAX_PKTFLT_FIXED_PATTERN_SIZE)) {
6303 				printf("Rejecting: %s\n", argv[i]);
6304 				goto fail;
6305 			}
6306 			mask_size = htod16(rc);
6307 
6308 			if (argv[++i] == NULL) {
6309 				printf("Pattern not provided\n");
6310 				goto fail;
6311 			}
6312 
6313 			endptr = argv[i];
6314 			if (*endptr == '!') {
6315 				pf_el->match_flags =
6316 					htod16(WL_PKT_FILTER_MFLAG_NEG);
6317 				if (*(++endptr) == '\0') {
6318 					printf("Pattern not provided\n");
6319 					goto fail;
6320 				}
6321 			}
6322 			rc = wl_pattern_atoh(endptr, (char*)&pf_el->mask_and_data[rc]);
6323 			if ((rc == -1) || (rc > MAX_PKTFLT_FIXED_PATTERN_SIZE)) {
6324 				printf("Rejecting: %s\n", argv[i]);
6325 				goto fail;
6326 			}
6327 			pattern_size = htod16(rc);
6328 
6329 			if (mask_size != pattern_size) {
6330 				printf("Mask and pattern not the same size\n");
6331 				goto fail;
6332 			}
6333 
6334 			pf_el->size_bytes = mask_size;
6335 
6336 			/* Account for the size of this pattern element */
6337 			buf_len += WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN + 2 * rc;
6338 
6339 			/* Move to next element location in ioctl buffer */
6340 			pf_el = (wl_pkt_filter_pattern_listel_t*)
6341 				((uint8*)pf_el + WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN + 2 * rc);
6342 
6343 			/* Count list element */
6344 			list_cnt++;
6345 		}
6346 
6347 		/* Account for initial fixed size, and copy initial fixed fields */
6348 		buf_len += WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN;
6349 
6350 		if (buf_len > MAX_PKTFLT_BUF_SIZE) {
6351 			DHD_ERROR(("buffer over length MAX_PKTFLT_BUF_SIZE\n"));
6352 			goto fail;
6353 		}
6354 
6355 		/* Update list count and total size */
6356 		pkt_filter.u.patlist.list_cnt = list_cnt;
6357 		pkt_filter.u.patlist.PAD1[0] = 0;
6358 		pkt_filter.u.patlist.totsize = buf + buf_len - (char*)pkt_filterp;
6359 		pkt_filter.u.patlist.totsize -= WL_PKT_FILTER_FIXED_LEN;
6360 
6361 		memcpy((char *)pkt_filterp, &pkt_filter,
6362 			WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN);
6363 	} else {
6364 		DHD_ERROR(("Invalid filter type %d\n", pkt_filter.type));
6365 		goto fail;
6366 	}
6367 
6368 	rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
6369 	rc = rc >= 0 ? 0 : rc;
6370 
6371 	if (rc)
6372 		DHD_ERROR(("%s: failed to add pktfilter %s, retcode = %d\n",
6373 		__FUNCTION__, arg, rc));
6374 	else
6375 		DHD_TRACE(("%s: successfully added pktfilter %s\n",
6376 		__FUNCTION__, arg));
6377 
6378 fail:
6379 	if (arg_org)
6380 		MFREE(dhd->osh, arg_org, strlen(arg) + 1);
6381 
6382 	if (buf)
6383 		MFREE(dhd->osh, buf, MAX_PKTFLT_BUF_SIZE);
6384 }
6385 
6386 void
dhd_pktfilter_offload_delete(dhd_pub_t * dhd,int id)6387 dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id)
6388 {
6389 	int ret;
6390 
6391 	ret = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_delete",
6392 		id, WLC_SET_VAR, TRUE, 0);
6393 	if (ret < 0) {
6394 		DHD_ERROR(("%s: Failed to delete filter ID:%d, ret=%d\n",
6395 			__FUNCTION__, id, ret));
6396 	}
6397 	else
6398 		DHD_TRACE(("%s: successfully deleted pktfilter %d\n",
6399 		__FUNCTION__, id));
6400 }
6401 #endif /* PKT_FILTER_SUPPORT */
6402 
6403 /* ========================== */
6404 /* ==== ARP OFFLOAD SUPPORT = */
6405 /* ========================== */
6406 #ifdef ARP_OFFLOAD_SUPPORT
6407 void
dhd_arp_offload_set(dhd_pub_t * dhd,int arp_mode)6408 dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode)
6409 {
6410 	int retcode;
6411 
6412 	retcode = dhd_wl_ioctl_set_intiovar(dhd, "arp_ol",
6413 		arp_mode, WLC_SET_VAR, TRUE, 0);
6414 
6415 	retcode = retcode >= 0 ? 0 : retcode;
6416 	if (retcode) {
6417 		DHD_ERROR(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n",
6418 			__FUNCTION__, arp_mode, retcode));
6419 	} else {
6420 		DHD_ARPOE(("%s: successfully set ARP offload mode to 0x%x\n",
6421 			__FUNCTION__, arp_mode));
6422 		dhd->arpol_configured = TRUE;
6423 	}
6424 }
6425 
6426 void
dhd_arp_offload_enable(dhd_pub_t * dhd,int arp_enable)6427 dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable)
6428 {
6429 	int retcode;
6430 
6431 	if (!dhd->arpol_configured) {
6432 		/* If arpol is not applied, apply it */
6433 		dhd_arp_offload_set(dhd, dhd_arp_mode);
6434 	}
6435 
6436 	retcode = dhd_wl_ioctl_set_intiovar(dhd, "arpoe",
6437 		arp_enable, WLC_SET_VAR, TRUE, 0);
6438 	retcode = retcode >= 0 ? 0 : retcode;
6439 	if (retcode)
6440 		DHD_ERROR(("%s: failed to enabe ARP offload to %d, retcode = %d\n",
6441 			__FUNCTION__, arp_enable, retcode));
6442 	else
6443 #ifdef DHD_LOG_DUMP
6444 		DHD_LOG_MEM(("%s: successfully enabed ARP offload to %d\n",
6445 			__FUNCTION__, arp_enable));
6446 #else
6447 		DHD_ARPOE(("%s: successfully enabed ARP offload to %d\n",
6448 			__FUNCTION__, arp_enable));
6449 #endif /* DHD_LOG_DUMP */
6450 	if (arp_enable) {
6451 		uint32 version;
6452 		retcode = dhd_wl_ioctl_get_intiovar(dhd, "arp_version",
6453 			&version, WLC_GET_VAR, FALSE, 0);
6454 		if (retcode) {
6455 			DHD_INFO(("%s: fail to get version (maybe version 1:retcode = %d\n",
6456 				__FUNCTION__, retcode));
6457 			dhd->arp_version = 1;
6458 		}
6459 		else {
6460 			DHD_INFO(("%s: ARP Version= %x\n", __FUNCTION__, version));
6461 			dhd->arp_version = version;
6462 		}
6463 	}
6464 }
6465 
6466 /* XXX ANDREY: clear AOE arp_table  */
6467 void
dhd_aoe_arp_clr(dhd_pub_t * dhd,int idx)6468 dhd_aoe_arp_clr(dhd_pub_t *dhd, int idx)
6469 {
6470 	int ret = 0;
6471 
6472 	if (dhd == NULL) return;
6473 	if (dhd->arp_version == 1)
6474 		idx = 0;
6475 
6476 	ret = dhd_iovar(dhd, idx, "arp_table_clear", NULL, 0, NULL, 0, TRUE);
6477 	if (ret < 0)
6478 		DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
6479 	else {
6480 #ifdef DHD_LOG_DUMP
6481 		DHD_LOG_MEM(("%s: ARP table clear\n", __FUNCTION__));
6482 #else
6483 		DHD_TRACE(("%s: ARP table clear\n", __FUNCTION__));
6484 #endif /* DHD_LOG_DUMP */
6485 	}
6486 	/* mac address isn't cleared here but it will be cleared after dongle off */
6487 	dhd->hmac_updated = 0;
6488 }
6489 
6490 /* XXX ANDREY: clear hostip table  */
6491 void
dhd_aoe_hostip_clr(dhd_pub_t * dhd,int idx)6492 dhd_aoe_hostip_clr(dhd_pub_t *dhd, int idx)
6493 {
6494 	int ret = 0;
6495 
6496 	if (dhd == NULL) return;
6497 	if (dhd->arp_version == 1)
6498 		idx = 0;
6499 
6500 	ret = dhd_iovar(dhd, idx, "arp_hostip_clear", NULL, 0, NULL, 0, TRUE);
6501 	if (ret < 0)
6502 		DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
6503 	else {
6504 #ifdef DHD_LOG_DUMP
6505 		DHD_LOG_MEM(("%s: ARP host ip clear\n", __FUNCTION__));
6506 #else
6507 		DHD_TRACE(("%s: ARP host ip clear\n", __FUNCTION__));
6508 #endif /* DHD_LOG_DUMP */
6509 	}
6510 }
6511 
6512 void
dhd_arp_offload_add_ip(dhd_pub_t * dhd,uint32 ipaddr,int idx)6513 dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx)
6514 {
6515 	int ret;
6516 
6517 	if (dhd == NULL) return;
6518 	if (dhd->arp_version == 1)
6519 		idx = 0;
6520 
6521 	ret = dhd_iovar(dhd, idx, "arp_hostip", (char *)&ipaddr, sizeof(ipaddr),
6522 			NULL, 0, TRUE);
6523 	if (ret < 0)
6524 		DHD_ERROR(("%s: ARP ip addr add failed, ret = %d\n", __FUNCTION__, ret));
6525 	else {
6526 		/* mac address is updated in the dongle */
6527 		dhd->hmac_updated = 1;
6528 #ifdef DHD_LOG_DUMP
6529 		DHD_LOG_MEM(("%s: ARP ip addr entry added \n", __FUNCTION__));
6530 #else
6531 		DHD_ARPOE(("%s: ARP ip addr entry added \n", __FUNCTION__));
6532 #endif /* DHD_LOG_DUMP */
6533 	}
6534 }
6535 
6536 int
dhd_arp_get_arp_hostip_table(dhd_pub_t * dhd,void * buf,int buflen,int idx)6537 dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen, int idx)
6538 {
6539 	int ret, i;
6540 	uint32 *ptr32 = buf;
6541 	bool clr_bottom = FALSE;
6542 
6543 	if (!buf)
6544 		return -1;
6545 	if (dhd == NULL) return -1;
6546 	if (dhd->arp_version == 1)
6547 		idx = 0;
6548 
6549 	ret = dhd_iovar(dhd, idx, "arp_hostip", NULL, 0, (char *)buf, buflen,
6550 			FALSE);
6551 	if (ret) {
6552 		DHD_ERROR(("%s: ioctl WLC_GET_VAR error %d\n",
6553 		__FUNCTION__, ret));
6554 
6555 		return -1;
6556 	}
6557 
6558 	/* clean up the buf, ascii reminder */
6559 	for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
6560 		if (!clr_bottom) {
6561 			if (*ptr32 == 0)
6562 				clr_bottom = TRUE;
6563 		} else {
6564 			*ptr32 = 0;
6565 		}
6566 		ptr32++;
6567 	}
6568 
6569 	return 0;
6570 }
6571 #endif /* ARP_OFFLOAD_SUPPORT  */
6572 
6573 /*
6574  * Neighbor Discovery Offload: enable NDO feature
6575  * Called  by ipv6 event handler when interface comes up/goes down
6576  */
6577 int
dhd_ndo_enable(dhd_pub_t * dhd,int ndo_enable)6578 dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable)
6579 {
6580 	int retcode;
6581 
6582 	if (dhd == NULL)
6583 		return -1;
6584 
6585 #if defined(WL_CFG80211) && defined(WL_NAN)
6586 	if (wl_cfgnan_is_dp_active(dhd_linux_get_primary_netdev(dhd))) {
6587 		/* If nan dp is active, skip NDO */
6588 		DHD_INFO(("Active NAN DP, skip NDO\n"));
6589 		return 0;
6590 	}
6591 #endif /* WL_CFG80211 && WL_NAN */
6592 #ifdef WL_CFG80211
6593 	if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
6594 		/* NDO disable on STA+SOFTAP mode */
6595 		ndo_enable = FALSE;
6596 	}
6597 #endif /* WL_CFG80211 */
6598 	retcode = dhd_wl_ioctl_set_intiovar(dhd, "ndoe",
6599 		ndo_enable, WLC_SET_VAR, TRUE, 0);
6600 	if (retcode)
6601 		DHD_ERROR(("%s: failed to enabe ndo to %d, retcode = %d\n",
6602 			__FUNCTION__, ndo_enable, retcode));
6603 	else
6604 		DHD_TRACE(("%s: successfully enabed ndo offload to %d\n",
6605 			__FUNCTION__, ndo_enable));
6606 
6607 	return retcode;
6608 }
6609 
6610 /*
6611  * Neighbor Discover Offload: enable NDO feature
6612  * Called  by ipv6 event handler when interface comes up
6613  */
6614 int
dhd_ndo_add_ip(dhd_pub_t * dhd,char * ipv6addr,int idx)6615 dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipv6addr, int idx)
6616 {
6617 	int iov_len = 0;
6618 	char iovbuf[DHD_IOVAR_BUF_SIZE];
6619 	int retcode;
6620 
6621 	if (dhd == NULL)
6622 		return -1;
6623 
6624 	iov_len = bcm_mkiovar("nd_hostip", (char *)ipv6addr,
6625 		IPV6_ADDR_LEN, iovbuf, sizeof(iovbuf));
6626 	if (!iov_len) {
6627 		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
6628 			__FUNCTION__, sizeof(iovbuf)));
6629 		return -1;
6630 	}
6631 	retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
6632 
6633 	if (retcode)
6634 		DHD_ERROR(("%s: ndo ip addr add failed, retcode = %d\n",
6635 		__FUNCTION__, retcode));
6636 	else
6637 		DHD_TRACE(("%s: ndo ipaddr entry added \n",
6638 		__FUNCTION__));
6639 
6640 	return retcode;
6641 }
6642 
6643 /*
6644  * Neighbor Discover Offload: enable NDO feature
6645  * Called  by ipv6 event handler when interface goes down
6646  */
6647 int
dhd_ndo_remove_ip(dhd_pub_t * dhd,int idx)6648 dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx)
6649 {
6650 	int iov_len = 0;
6651 	char iovbuf[DHD_IOVAR_BUF_SIZE];
6652 	int retcode;
6653 
6654 	if (dhd == NULL)
6655 		return -1;
6656 
6657 	iov_len = bcm_mkiovar("nd_hostip_clear", NULL,
6658 		0, iovbuf, sizeof(iovbuf));
6659 	if (!iov_len) {
6660 		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
6661 			__FUNCTION__, sizeof(iovbuf)));
6662 		return -1;
6663 	}
6664 	retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
6665 
6666 	if (retcode)
6667 		DHD_ERROR(("%s: ndo ip addr remove failed, retcode = %d\n",
6668 		__FUNCTION__, retcode));
6669 	else
6670 		DHD_TRACE(("%s: ndo ipaddr entry removed \n",
6671 		__FUNCTION__));
6672 
6673 	return retcode;
6674 }
6675 /* Enhanced ND offload */
6676 uint16
dhd_ndo_get_version(dhd_pub_t * dhdp)6677 dhd_ndo_get_version(dhd_pub_t *dhdp)
6678 {
6679 	char iovbuf[DHD_IOVAR_BUF_SIZE];
6680 	wl_nd_hostip_t ndo_get_ver;
6681 	int iov_len;
6682 	int retcode;
6683 	uint16 ver = 0;
6684 
6685 	if (dhdp == NULL) {
6686 		return BCME_ERROR;
6687 	}
6688 
6689 	memset(&iovbuf, 0, sizeof(iovbuf));
6690 	ndo_get_ver.version = htod16(WL_ND_HOSTIP_IOV_VER);
6691 	ndo_get_ver.op_type = htod16(WL_ND_HOSTIP_OP_VER);
6692 	ndo_get_ver.length = htod32(WL_ND_HOSTIP_FIXED_LEN + sizeof(uint16));
6693 	ndo_get_ver.u.version = 0;
6694 	iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_get_ver,
6695 		WL_ND_HOSTIP_FIXED_LEN + sizeof(uint16), iovbuf, sizeof(iovbuf));
6696 
6697 	if (!iov_len) {
6698 		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
6699 			__FUNCTION__, sizeof(iovbuf)));
6700 		return BCME_ERROR;
6701 	}
6702 
6703 	retcode = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf, iov_len, FALSE, 0);
6704 
6705 	if (retcode) {
6706 		DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
6707 		/* ver iovar not supported. NDO version is 0 */
6708 		ver = 0;
6709 	} else {
6710 		wl_nd_hostip_t *ndo_ver_ret = (wl_nd_hostip_t *)iovbuf;
6711 
6712 		if ((dtoh16(ndo_ver_ret->version) == WL_ND_HOSTIP_IOV_VER) &&
6713 				(dtoh16(ndo_ver_ret->op_type) == WL_ND_HOSTIP_OP_VER) &&
6714 				(dtoh32(ndo_ver_ret->length) == WL_ND_HOSTIP_FIXED_LEN
6715 					+ sizeof(uint16))) {
6716 			/* nd_hostip iovar version */
6717 			ver = dtoh16(ndo_ver_ret->u.version);
6718 		}
6719 
6720 		DHD_TRACE(("%s: successfully get version: %d\n", __FUNCTION__, ver));
6721 	}
6722 
6723 	return ver;
6724 }
6725 
6726 int
dhd_ndo_add_ip_with_type(dhd_pub_t * dhdp,char * ipv6addr,uint8 type,int idx)6727 dhd_ndo_add_ip_with_type(dhd_pub_t *dhdp, char *ipv6addr, uint8 type, int idx)
6728 {
6729 	char iovbuf[DHD_IOVAR_BUF_SIZE];
6730 	wl_nd_hostip_t ndo_add_addr;
6731 	int iov_len;
6732 	int retcode;
6733 
6734 	if (dhdp == NULL || ipv6addr == 0) {
6735 		return BCME_ERROR;
6736 	}
6737 
6738 	/* wl_nd_hostip_t fixed param */
6739 	ndo_add_addr.version = htod16(WL_ND_HOSTIP_IOV_VER);
6740 	ndo_add_addr.op_type = htod16(WL_ND_HOSTIP_OP_ADD);
6741 	ndo_add_addr.length = htod32(WL_ND_HOSTIP_WITH_ADDR_LEN);
6742 	/* wl_nd_host_ip_addr_t param for add */
6743 	memcpy(&ndo_add_addr.u.host_ip.ip_addr, ipv6addr, IPV6_ADDR_LEN);
6744 	ndo_add_addr.u.host_ip.type = type;
6745 
6746 	iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_add_addr,
6747 		WL_ND_HOSTIP_WITH_ADDR_LEN, iovbuf, sizeof(iovbuf));
6748 	if (!iov_len) {
6749 		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
6750 			__FUNCTION__, sizeof(iovbuf)));
6751 		return BCME_ERROR;
6752 	}
6753 
6754 	retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
6755 	if (retcode) {
6756 		DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
6757 #ifdef NDO_CONFIG_SUPPORT
6758 		if (retcode == BCME_NORESOURCE) {
6759 			/* number of host ip addr exceeds FW capacity, Deactivate ND offload */
6760 			DHD_INFO(("%s: Host IP count exceed device capacity,"
6761 				"ND offload deactivated\n", __FUNCTION__));
6762 			dhdp->ndo_host_ip_overflow = TRUE;
6763 			dhd_ndo_enable(dhdp, FALSE);
6764 		}
6765 #endif /* NDO_CONFIG_SUPPORT */
6766 	} else {
6767 		DHD_TRACE(("%s: successfully added: %d\n", __FUNCTION__, retcode));
6768 	}
6769 
6770 	return retcode;
6771 }
6772 
6773 int
dhd_ndo_remove_ip_by_addr(dhd_pub_t * dhdp,char * ipv6addr,int idx)6774 dhd_ndo_remove_ip_by_addr(dhd_pub_t *dhdp, char *ipv6addr, int idx)
6775 {
6776 	char iovbuf[DHD_IOVAR_BUF_SIZE];
6777 	wl_nd_hostip_t ndo_del_addr;
6778 	int iov_len;
6779 	int retcode;
6780 
6781 	if (dhdp == NULL || ipv6addr == 0) {
6782 		return BCME_ERROR;
6783 	}
6784 
6785 	/* wl_nd_hostip_t fixed param */
6786 	ndo_del_addr.version = htod16(WL_ND_HOSTIP_IOV_VER);
6787 	ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL);
6788 	ndo_del_addr.length = htod32(WL_ND_HOSTIP_WITH_ADDR_LEN);
6789 	/* wl_nd_host_ip_addr_t param for del */
6790 	memcpy(&ndo_del_addr.u.host_ip.ip_addr, ipv6addr, IPV6_ADDR_LEN);
6791 	ndo_del_addr.u.host_ip.type = 0;	/* don't care */
6792 
6793 	iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr,
6794 		WL_ND_HOSTIP_WITH_ADDR_LEN, iovbuf, sizeof(iovbuf));
6795 
6796 	if (!iov_len) {
6797 		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
6798 			__FUNCTION__, sizeof(iovbuf)));
6799 		return BCME_ERROR;
6800 	}
6801 
6802 	retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
6803 	if (retcode) {
6804 		DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
6805 	} else {
6806 		DHD_TRACE(("%s: successfully removed: %d\n", __FUNCTION__, retcode));
6807 	}
6808 
6809 	return retcode;
6810 }
6811 
6812 int
dhd_ndo_remove_ip_by_type(dhd_pub_t * dhdp,uint8 type,int idx)6813 dhd_ndo_remove_ip_by_type(dhd_pub_t *dhdp, uint8 type, int idx)
6814 {
6815 	char iovbuf[DHD_IOVAR_BUF_SIZE];
6816 	wl_nd_hostip_t ndo_del_addr;
6817 	int iov_len;
6818 	int retcode;
6819 
6820 	if (dhdp == NULL) {
6821 		return BCME_ERROR;
6822 	}
6823 
6824 	/* wl_nd_hostip_t fixed param */
6825 	ndo_del_addr.version = htod16(WL_ND_HOSTIP_IOV_VER);
6826 	if (type == WL_ND_IPV6_ADDR_TYPE_UNICAST) {
6827 		ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL_UC);
6828 	} else if (type == WL_ND_IPV6_ADDR_TYPE_ANYCAST) {
6829 		ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL_AC);
6830 	} else {
6831 		return BCME_BADARG;
6832 	}
6833 	ndo_del_addr.length = htod32(WL_ND_HOSTIP_FIXED_LEN);
6834 
6835 	iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr, WL_ND_HOSTIP_FIXED_LEN,
6836 			iovbuf, sizeof(iovbuf));
6837 
6838 	if (!iov_len) {
6839 		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
6840 			__FUNCTION__, sizeof(iovbuf)));
6841 		return BCME_ERROR;
6842 	}
6843 
6844 	retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
6845 	if (retcode) {
6846 		DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
6847 	} else {
6848 		DHD_TRACE(("%s: successfully removed: %d\n", __FUNCTION__, retcode));
6849 	}
6850 
6851 	return retcode;
6852 }
6853 
6854 int
dhd_ndo_unsolicited_na_filter_enable(dhd_pub_t * dhdp,int enable)6855 dhd_ndo_unsolicited_na_filter_enable(dhd_pub_t *dhdp, int enable)
6856 {
6857 	char iovbuf[DHD_IOVAR_BUF_SIZE];
6858 	int iov_len;
6859 	int retcode;
6860 
6861 	if (dhdp == NULL) {
6862 		return BCME_ERROR;
6863 	}
6864 
6865 	iov_len = bcm_mkiovar("nd_unsolicited_na_filter", (char *)&enable, sizeof(int),
6866 			iovbuf, sizeof(iovbuf));
6867 
6868 	if (!iov_len) {
6869 		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
6870 			__FUNCTION__, sizeof(iovbuf)));
6871 		return BCME_ERROR;
6872 	}
6873 
6874 	retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0);
6875 	if (retcode)
6876 		DHD_ERROR(("%s: failed to enable Unsolicited NA filter to %d, retcode = %d\n",
6877 			__FUNCTION__, enable, retcode));
6878 	else {
6879 		DHD_TRACE(("%s: successfully enabled Unsolicited NA filter to %d\n",
6880 			__FUNCTION__, enable));
6881 	}
6882 
6883 	return retcode;
6884 }
6885 #ifdef SIMPLE_ISCAN
6886 
6887 uint iscan_thread_id = 0;
6888 iscan_buf_t * iscan_chain = 0;
6889 
6890 iscan_buf_t *
dhd_iscan_allocate_buf(dhd_pub_t * dhd,iscan_buf_t ** iscanbuf)6891 dhd_iscan_allocate_buf(dhd_pub_t *dhd, iscan_buf_t **iscanbuf)
6892 {
6893 	iscan_buf_t *iscanbuf_alloc = 0;
6894 	iscan_buf_t *iscanbuf_head;
6895 
6896 	DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
6897 	dhd_iscan_lock();
6898 
6899 	iscanbuf_alloc = (iscan_buf_t*)MALLOC(dhd->osh, sizeof(iscan_buf_t));
6900 	if (iscanbuf_alloc == NULL)
6901 		goto fail;
6902 
6903 	iscanbuf_alloc->next = NULL;
6904 	iscanbuf_head = *iscanbuf;
6905 
6906 	DHD_ISCAN(("%s: addr of allocated node = 0x%X"
6907 		   "addr of iscanbuf_head = 0x%X dhd = 0x%X\n",
6908 		   __FUNCTION__, iscanbuf_alloc, iscanbuf_head, dhd));
6909 
6910 	if (iscanbuf_head == NULL) {
6911 		*iscanbuf = iscanbuf_alloc;
6912 		DHD_ISCAN(("%s: Head is allocated\n", __FUNCTION__));
6913 		goto fail;
6914 	}
6915 
6916 	while (iscanbuf_head->next)
6917 		iscanbuf_head = iscanbuf_head->next;
6918 
6919 	iscanbuf_head->next = iscanbuf_alloc;
6920 
6921 fail:
6922 	dhd_iscan_unlock();
6923 	return iscanbuf_alloc;
6924 }
6925 
6926 void
dhd_iscan_free_buf(void * dhdp,iscan_buf_t * iscan_delete)6927 dhd_iscan_free_buf(void *dhdp, iscan_buf_t *iscan_delete)
6928 {
6929 	iscan_buf_t *iscanbuf_free = 0;
6930 	iscan_buf_t *iscanbuf_prv = 0;
6931 	iscan_buf_t *iscanbuf_cur;
6932 	dhd_pub_t *dhd = dhd_bus_pub(dhdp);
6933 	DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
6934 
6935 	dhd_iscan_lock();
6936 
6937 	iscanbuf_cur = iscan_chain;
6938 
6939 	/* If iscan_delete is null then delete the entire
6940 	 * chain or else delete specific one provided
6941 	 */
6942 	if (!iscan_delete) {
6943 		while (iscanbuf_cur) {
6944 			iscanbuf_free = iscanbuf_cur;
6945 			iscanbuf_cur = iscanbuf_cur->next;
6946 			iscanbuf_free->next = 0;
6947 			MFREE(dhd->osh, iscanbuf_free, sizeof(iscan_buf_t));
6948 		}
6949 		iscan_chain = 0;
6950 	} else {
6951 		while (iscanbuf_cur) {
6952 			if (iscanbuf_cur == iscan_delete)
6953 				break;
6954 			iscanbuf_prv = iscanbuf_cur;
6955 			iscanbuf_cur = iscanbuf_cur->next;
6956 		}
6957 		if (iscanbuf_prv)
6958 			iscanbuf_prv->next = iscan_delete->next;
6959 
6960 		iscan_delete->next = 0;
6961 		MFREE(dhd->osh, iscan_delete, sizeof(iscan_buf_t));
6962 
6963 		if (!iscanbuf_prv)
6964 			iscan_chain = 0;
6965 	}
6966 	dhd_iscan_unlock();
6967 }
6968 
6969 iscan_buf_t *
dhd_iscan_result_buf(void)6970 dhd_iscan_result_buf(void)
6971 {
6972 	return iscan_chain;
6973 }
6974 
6975 int
dhd_iscan_issue_request(void * dhdp,wl_iscan_params_t * pParams,uint32 size)6976 dhd_iscan_issue_request(void * dhdp, wl_iscan_params_t *pParams, uint32 size)
6977 {
6978 	int rc = -1;
6979 	dhd_pub_t *dhd = dhd_bus_pub(dhdp);
6980 	char *buf;
6981 	char iovar[] = "iscan";
6982 	uint32 allocSize = 0;
6983 	wl_ioctl_t ioctl;
6984 	int len;
6985 
6986 	if (pParams) {
6987 		allocSize = (size + strlen(iovar) + 1);
6988 		if ((allocSize < size) || (allocSize < strlen(iovar)))
6989 		{
6990 			DHD_ERROR(("%s: overflow - allocation size too large %d < %d + %d!\n",
6991 				__FUNCTION__, allocSize, size, strlen(iovar)));
6992 			goto cleanUp;
6993 		}
6994 		buf = MALLOC(dhd->osh, allocSize);
6995 
6996 		if (buf == NULL)
6997 			{
6998 			DHD_ERROR(("%s: malloc of size %d failed!\n", __FUNCTION__, allocSize));
6999 			goto cleanUp;
7000 			}
7001 		ioctl.cmd = WLC_SET_VAR;
7002 		len = bcm_mkiovar(iovar, (char *)pParams, size, buf, allocSize);
7003 		if (len == 0) {
7004 			rc = BCME_BUFTOOSHORT;
7005 			goto cleanUp;
7006 		}
7007 		rc = dhd_wl_ioctl(dhd, 0, &ioctl, buf, len);
7008 	}
7009 
7010 cleanUp:
7011 	if (buf) {
7012 		MFREE(dhd->osh, buf, allocSize);
7013 	}
7014 
7015 	return rc;
7016 }
7017 
7018 static int
dhd_iscan_get_partial_result(void * dhdp,uint * scan_count)7019 dhd_iscan_get_partial_result(void *dhdp, uint *scan_count)
7020 {
7021 	wl_iscan_results_t *list_buf;
7022 	wl_iscan_results_t list;
7023 	wl_scan_results_t *results;
7024 	iscan_buf_t *iscan_cur;
7025 	int status = -1;
7026 	dhd_pub_t *dhd = dhd_bus_pub(dhdp);
7027 	int rc;
7028 	wl_ioctl_t ioctl;
7029 	int len;
7030 
7031 	DHD_ISCAN(("%s: Enter\n", __FUNCTION__));
7032 
7033 	iscan_cur = dhd_iscan_allocate_buf(dhd, &iscan_chain);
7034 	if (!iscan_cur) {
7035 		DHD_ERROR(("%s: Failed to allocate node\n", __FUNCTION__));
7036 		dhd_iscan_free_buf(dhdp, 0);
7037 		dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT);
7038 		dhd_ind_scan_confirm(dhdp, FALSE);
7039 		goto fail;
7040 	}
7041 
7042 	dhd_iscan_lock();
7043 
7044 	memset(iscan_cur->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN);
7045 	list_buf = (wl_iscan_results_t*)iscan_cur->iscan_buf;
7046 	results = &list_buf->results;
7047 	results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
7048 	results->version = 0;
7049 	results->count = 0;
7050 
7051 	memset(&list, 0, sizeof(list));
7052 	list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN);
7053 	len = bcm_mkiovar("iscanresults", (char *)&list, WL_ISCAN_RESULTS_FIXED_SIZE,
7054 		iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
7055 	if (len == 0) {
7056 		dhd_iscan_free_buf(dhdp, 0);
7057 		dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT);
7058 		dhd_ind_scan_confirm(dhdp, FALSE);
7059 		status = BCME_BUFTOOSHORT;
7060 		goto fail;
7061 	}
7062 	ioctl.cmd = WLC_GET_VAR;
7063 	ioctl.set = FALSE;
7064 	rc = dhd_wl_ioctl(dhd, 0, &ioctl, iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
7065 
7066 	results->buflen = dtoh32(results->buflen);
7067 	results->version = dtoh32(results->version);
7068 	*scan_count = results->count = dtoh32(results->count);
7069 	status = dtoh32(list_buf->status);
7070 	DHD_ISCAN(("%s: Got %d resuls status = (%x)\n", __FUNCTION__, results->count, status));
7071 
7072 	dhd_iscan_unlock();
7073 
7074 	if (!(*scan_count)) {
7075 		 /* TODO: race condition when FLUSH already called */
7076 		dhd_iscan_free_buf(dhdp, 0);
7077 	}
7078 fail:
7079 	return status;
7080 }
7081 
7082 #ifdef NDIS
7083 /* XXXX Following code had bit of OS dependency.
7084  * Cleanup to move the OS dependency to other
7085  * per port code so that iscan logic here can be
7086  * leveraged across all OS's
7087  */
7088 NDIS_EVENT iscan_event;
7089 HANDLE tHandle;
7090 NDIS_SPIN_LOCK	dhd_iscan_queue_lock;
7091 
7092 void
dhd_iscan_lock(void)7093 dhd_iscan_lock(void)
7094 {
7095 	NdisAcquireSpinLock(&dhd_iscan_queue_lock);
7096 }
7097 
7098 void
dhd_iscan_unlock(void)7099 dhd_iscan_unlock(void)
7100 {
7101 	NdisReleaseSpinLock(&dhd_iscan_queue_lock);
7102 }
7103 
7104 void
dhd_iscan_notify(void)7105 dhd_iscan_notify(void)
7106 {
7107 	DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
7108 	NdisSetEvent(&iscan_event);
7109 }
7110 
7111 static void
dhd_iscan_func(void * h)7112 dhd_iscan_func(void *h)
7113 {
7114 	int status;
7115 	uint scan_count;
7116 	dhd_pub_t *dhd = dhd_bus_pub(h);
7117 
7118 	/* Read the priority from registry */
7119 	CeSetThreadPriority(GetCurrentThread(), 128);
7120 	DHD_ISCAN(("%s: thread created\n", __FUNCTION__));
7121 
7122 	while (TRUE) {
7123 		NdisWaitEvent(&iscan_event, 0);		/* wait forever */
7124 		NdisResetEvent(&iscan_event);		/* reset the event */
7125 		DHD_ISCAN(("%s: thread scheduled\n", __FUNCTION__));
7126 
7127 		status = dhd_iscan_get_partial_result(h, &scan_count);
7128 
7129 		if (status == WL_SCAN_RESULTS_PARTIAL) {
7130 			dhd_iscan_request(h, WL_SCAN_ACTION_CONTINUE);
7131 		} else if (status == WL_SCAN_RESULTS_SUCCESS) {
7132 			if (dhd_iscan_in_progress(h)) {
7133 				dhd_ind_scan_confirm(h, TRUE);
7134 			}
7135 		} else if (status == WL_SCAN_RESULTS_ABORTED ||
7136 			status == WL_SCAN_RESULTS_NO_MEM) {
7137 			dhd_iscan_request(h, WL_SCAN_ACTION_ABORT);
7138 			dhd_ind_scan_confirm(h, FALSE);
7139 		} else {
7140 			dhd_iscan_request(h, WL_SCAN_ACTION_ABORT);
7141 			dhd_ind_scan_confirm(h, FALSE);
7142 		}
7143 	}
7144 }
7145 
7146 int
dhd_iscan_attach(void * dhdp)7147 dhd_iscan_attach(void *dhdp)
7148 {
7149 	DHD_ISCAN(("%s: dhdp = 0x%x\n", __FUNCTION__, dhdp));
7150 
7151 	NdisInitializeEvent(&iscan_event);
7152 	NdisResetEvent(&iscan_event);
7153 	NdisAllocateSpinLock(&dhd_iscan_queue_lock);
7154 
7155 	/* XXX - should move to ndishared sublayer */
7156 	tHandle = CreateThread(NULL,
7157 		0,
7158 		(LPTHREAD_START_ROUTINE)dhd_iscan_func,
7159 		(void *)dhdp,
7160 		0,
7161 		&iscan_thread_id);
7162 
7163 	if (!iscan_thread_id)
7164 		return NDIS_STATUS_FAILURE;
7165 
7166 	return NDIS_STATUS_SUCCESS;
7167 }
7168 
7169 void
dhd_iscan_deattach(void * dhdp)7170 dhd_iscan_deattach(void *dhdp)
7171 {
7172 	if (iscan_thread_id)
7173 	{
7174 	NdisFreeEvent(&iscan_event);
7175 	NdisFreeSpinLock(&dhd_iscan_queue_lock);
7176 	CloseHandle(tHandle);
7177 		iscan_thread_id = 0;
7178 	}
7179 }
7180 #endif /* NDIS */
7181 #endif /* SIMPLE_ISCAN */
7182 
7183 /*
7184  * returns = TRUE if associated, FALSE if not associated
7185  */
dhd_is_associated(dhd_pub_t * dhd,uint8 ifidx,int * retval)7186 bool dhd_is_associated(dhd_pub_t *dhd, uint8 ifidx, int *retval)
7187 {
7188 	char bssid[6], zbuf[6];
7189 	int ret = -1;
7190 
7191 	bzero(bssid, 6);
7192 	bzero(zbuf, 6);
7193 
7194 	ret  = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, (char *)&bssid,
7195 		ETHER_ADDR_LEN, FALSE, ifidx);
7196 	/* XXX:AS!!! res can be: -17(BCME_NOTASSOCIATED),-22(BCME_NORESOURCE), and 0(OK)
7197 	  OK - doesn't mean associated yet, the returned bssid
7198 	  still needs to be checked for non zero array
7199 	*/
7200 	DHD_TRACE((" %s WLC_GET_BSSID ioctl res = %d\n", __FUNCTION__, ret));
7201 
7202 	if (ret == BCME_NOTASSOCIATED) {
7203 		DHD_ERROR(("%s: WLC_GET_BSSID, NOT ASSOCIATED\n", __FUNCTION__));
7204 	}
7205 
7206 	if (retval)
7207 		*retval = ret;
7208 
7209 	if (ret < 0)
7210 		return FALSE;
7211 
7212 	if ((memcmp(bssid, zbuf, ETHER_ADDR_LEN) == 0)) {
7213 		DHD_TRACE(("%s: WLC_GET_BSSID ioctl returned zero bssid\n", __FUNCTION__));
7214 		return FALSE;
7215 	}
7216 	return TRUE;
7217 }
7218 
7219 /* Function to estimate possible DTIM_SKIP value */
7220 #if defined(OEM_ANDROID) && defined(BCMPCIE)
7221 int
dhd_get_suspend_bcn_li_dtim(dhd_pub_t * dhd,int * dtim_period,int * bcn_interval)7222 dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval)
7223 {
7224 	int bcn_li_dtim = 1; /* deafult no dtim skip setting */
7225 	int ret = -1;
7226 	int allowed_skip_dtim_cnt = 0;
7227 
7228 	if (dhd->disable_dtim_in_suspend) {
7229 		DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__));
7230 		bcn_li_dtim = 0;
7231 		return bcn_li_dtim;
7232 	}
7233 
7234 	/* Check if associated */
7235 	if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
7236 		DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
7237 		return bcn_li_dtim;
7238 	}
7239 
7240 	if (dtim_period == NULL || bcn_interval == NULL)
7241 		return bcn_li_dtim;
7242 
7243 	/* read associated AP beacon interval */
7244 	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BCNPRD,
7245 		bcn_interval, sizeof(*bcn_interval), FALSE, 0)) < 0) {
7246 		DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__, ret));
7247 		return bcn_li_dtim;
7248 	}
7249 
7250 	/* read associated AP dtim setup */
7251 	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD,
7252 		dtim_period, sizeof(*dtim_period), FALSE, 0)) < 0) {
7253 		DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
7254 		return bcn_li_dtim;
7255 	}
7256 
7257 	/* if not assocated just return */
7258 	if (*dtim_period == 0) {
7259 		return bcn_li_dtim;
7260 	}
7261 
7262 	if (dhd->max_dtim_enable) {
7263 		bcn_li_dtim =
7264 			(int) (MAX_DTIM_ALLOWED_INTERVAL / ((*dtim_period) * (*bcn_interval)));
7265 		if (bcn_li_dtim == 0) {
7266 			bcn_li_dtim = 1;
7267 		}
7268 	} else {
7269 		/* attemp to use platform defined dtim skip interval */
7270 		bcn_li_dtim = dhd->suspend_bcn_li_dtim;
7271 
7272 		/* check if sta listen interval fits into AP dtim */
7273 		if (*dtim_period > CUSTOM_LISTEN_INTERVAL) {
7274 			/* AP DTIM to big for our Listen Interval : no dtim skiping */
7275 			bcn_li_dtim = NO_DTIM_SKIP;
7276 			DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
7277 				__FUNCTION__, *dtim_period, CUSTOM_LISTEN_INTERVAL));
7278 			return bcn_li_dtim;
7279 		}
7280 
7281 		if (((*dtim_period) * (*bcn_interval) * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) {
7282 			allowed_skip_dtim_cnt =
7283 				MAX_DTIM_ALLOWED_INTERVAL / ((*dtim_period) * (*bcn_interval));
7284 			bcn_li_dtim =
7285 				(allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP;
7286 		}
7287 
7288 		if ((bcn_li_dtim * (*dtim_period)) > CUSTOM_LISTEN_INTERVAL) {
7289 			/* Round up dtim_skip to fit into STAs Listen Interval */
7290 			bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / *dtim_period);
7291 			DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
7292 		}
7293 	}
7294 
7295 	if (dhd->conf->suspend_bcn_li_dtim >= 0)
7296 		bcn_li_dtim = dhd->conf->suspend_bcn_li_dtim;
7297 	DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n",
7298 		__FUNCTION__, *bcn_interval, bcn_li_dtim, *dtim_period, CUSTOM_LISTEN_INTERVAL));
7299 
7300 	return bcn_li_dtim;
7301 }
7302 #else /* OEM_ANDROID && BCMPCIE */
7303 int
dhd_get_suspend_bcn_li_dtim(dhd_pub_t * dhd)7304 dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd)
7305 {
7306 	int bcn_li_dtim = 1; /* deafult no dtim skip setting */
7307 	int ret = -1;
7308 	int dtim_period = 0;
7309 	int ap_beacon = 0;
7310 	int allowed_skip_dtim_cnt = 0;
7311 
7312 	if (dhd->disable_dtim_in_suspend) {
7313 		DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__));
7314 		bcn_li_dtim = 0;
7315 		goto exit;
7316 	}
7317 
7318 	/* Check if associated */
7319 	if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
7320 		DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
7321 		goto exit;
7322 	}
7323 
7324 	/* read associated AP beacon interval */
7325 	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BCNPRD,
7326 		&ap_beacon, sizeof(ap_beacon), FALSE, 0)) < 0) {
7327 		DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__, ret));
7328 		goto exit;
7329 	}
7330 
7331 	/* read associated ap's dtim setup */
7332 	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD,
7333 		&dtim_period, sizeof(dtim_period), FALSE, 0)) < 0) {
7334 		DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
7335 		goto exit;
7336 	}
7337 
7338 	/* if not assocated just exit */
7339 	if (dtim_period == 0) {
7340 		goto exit;
7341 	}
7342 
7343 	if (dhd->max_dtim_enable) {
7344 		bcn_li_dtim =
7345 			(int) (MAX_DTIM_ALLOWED_INTERVAL / (ap_beacon * dtim_period));
7346 		if (bcn_li_dtim == 0) {
7347 			bcn_li_dtim = 1;
7348 		}
7349 	} else {
7350 		/* attemp to use platform defined dtim skip interval */
7351 		bcn_li_dtim = dhd->suspend_bcn_li_dtim;
7352 
7353 		/* check if sta listen interval fits into AP dtim */
7354 		if (dtim_period > CUSTOM_LISTEN_INTERVAL) {
7355 			/* AP DTIM to big for our Listen Interval : no dtim skiping */
7356 			bcn_li_dtim = NO_DTIM_SKIP;
7357 			DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
7358 				__FUNCTION__, dtim_period, CUSTOM_LISTEN_INTERVAL));
7359 			goto exit;
7360 		}
7361 
7362 		if ((dtim_period * ap_beacon * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) {
7363 			allowed_skip_dtim_cnt =
7364 				MAX_DTIM_ALLOWED_INTERVAL / (dtim_period * ap_beacon);
7365 			bcn_li_dtim =
7366 				(allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP;
7367 		}
7368 
7369 		if ((bcn_li_dtim * dtim_period) > CUSTOM_LISTEN_INTERVAL) {
7370 			/* Round up dtim_skip to fit into STAs Listen Interval */
7371 			bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / dtim_period);
7372 			DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
7373 		}
7374 	}
7375 
7376 	if (dhd->conf->suspend_bcn_li_dtim >= 0)
7377 		bcn_li_dtim = dhd->conf->suspend_bcn_li_dtim;
7378 	DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n",
7379 		__FUNCTION__, ap_beacon, bcn_li_dtim, dtim_period, CUSTOM_LISTEN_INTERVAL));
7380 
7381 exit:
7382 	return bcn_li_dtim;
7383 }
7384 #endif /* OEM_ANDROID && BCMPCIE */
7385 
7386 #ifdef CONFIG_SILENT_ROAM
7387 int
dhd_sroam_set_mon(dhd_pub_t * dhd,bool set)7388 dhd_sroam_set_mon(dhd_pub_t *dhd, bool set)
7389 {
7390 	int ret = BCME_OK;
7391 	wlc_sroam_t *psroam;
7392 	wlc_sroam_info_t *sroam;
7393 	uint sroamlen = sizeof(*sroam) + SROAM_HDRLEN;
7394 
7395 	/* Check if associated */
7396 	if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
7397 		DHD_TRACE(("%s NOT assoc\n", __FUNCTION__));
7398 		return ret;
7399 	}
7400 
7401 	if (set && (dhd->op_mode &
7402 		(DHD_FLAG_HOSTAP_MODE | DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
7403 		DHD_INFO((" Failed to set sroam %d, op_mode 0x%04x\n", set, dhd->op_mode));
7404 		return ret;
7405 	}
7406 
7407 	if (!dhd->sroam_turn_on) {
7408 		DHD_INFO((" Failed to set sroam %d, sroam turn %d\n", set, dhd->sroam_turn_on));
7409 		return ret;
7410 	}
7411 	psroam = (wlc_sroam_t *)MALLOCZ(dhd->osh, sroamlen);
7412 	if (!psroam) {
7413 		DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
7414 		return BCME_NOMEM;
7415 	}
7416 
7417 	ret = dhd_iovar(dhd, 0, "sroam", NULL, 0, (char *)psroam, sroamlen, FALSE);
7418 	if (ret < 0) {
7419 		DHD_ERROR(("%s Failed to Get sroam %d\n", __FUNCTION__, ret));
7420 		goto done;
7421 	}
7422 
7423 	if (psroam->ver != WLC_SILENT_ROAM_CUR_VER) {
7424 		ret = BCME_VERSION;
7425 		goto done;
7426 	}
7427 
7428 	sroam = (wlc_sroam_info_t *)psroam->data;
7429 	sroam->sroam_on = set;
7430 	DHD_INFO((" Silent roam monitor mode %s\n", set ? "On" : "Off"));
7431 
7432 	ret = dhd_iovar(dhd, 0, "sroam", (char *)psroam, sroamlen, NULL, 0, TRUE);
7433 	if (ret < 0) {
7434 		DHD_ERROR(("%s Failed to Set sroam %d\n", __FUNCTION__, ret));
7435 	}
7436 
7437 done:
7438 	if (psroam) {
7439 	    MFREE(dhd->osh, psroam, sroamlen);
7440 	}
7441 
7442 	return ret;
7443 }
7444 #endif /* CONFIG_SILENT_ROAM */
7445 
7446 /* Check if the mode supports STA MODE */
dhd_support_sta_mode(dhd_pub_t * dhd)7447 bool dhd_support_sta_mode(dhd_pub_t *dhd)
7448 {
7449 
7450 #ifdef  WL_CFG80211
7451 	if (!(dhd->op_mode & DHD_FLAG_STA_MODE))
7452 		return FALSE;
7453 	else
7454 #endif /* WL_CFG80211 */
7455 		return TRUE;
7456 }
7457 
7458 #if defined(KEEP_ALIVE)
dhd_keep_alive_onoff(dhd_pub_t * dhd)7459 int dhd_keep_alive_onoff(dhd_pub_t *dhd)
7460 {
7461 	char				buf[32] = {0};
7462 	const char			*str;
7463 	wl_mkeep_alive_pkt_t	mkeep_alive_pkt = {0, 0, 0, 0, 0, {0}};
7464 	wl_mkeep_alive_pkt_t	*mkeep_alive_pktp;
7465 	int					buf_len;
7466 	int					str_len;
7467 	int res					= -1;
7468 
7469 	if (!dhd_support_sta_mode(dhd))
7470 		return res;
7471 
7472 	DHD_TRACE(("%s execution\n", __FUNCTION__));
7473 
7474 	str = "mkeep_alive";
7475 	str_len = strlen(str);
7476 	strlcpy(buf, str, sizeof(buf));
7477 	mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1);
7478 	mkeep_alive_pkt.period_msec = dhd->conf->keep_alive_period;
7479 	buf_len = str_len + 1;
7480 	mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
7481 	mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
7482 	/* Setup keep alive zero for null packet generation */
7483 	mkeep_alive_pkt.keep_alive_id = 0;
7484 	mkeep_alive_pkt.len_bytes = 0;
7485 	buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
7486 	bzero(mkeep_alive_pkt.data, sizeof(mkeep_alive_pkt.data));
7487 	/* Keep-alive attributes are set in local	variable (mkeep_alive_pkt), and
7488 	 * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
7489 	 * guarantee that the buffer is properly aligned.
7490 	 */
7491 	memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
7492 
7493 	res = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
7494 
7495 	return res;
7496 }
7497 #endif /* defined(KEEP_ALIVE) */
7498 #if defined(OEM_ANDROID)
7499 #define	CSCAN_TLV_TYPE_SSID_IE	'S'
7500 /*
7501  *  SSIDs list parsing from cscan tlv list
7502  */
7503 int
wl_parse_ssid_list_tlv(char ** list_str,wlc_ssid_ext_t * ssid,int max,int * bytes_left)7504 wl_parse_ssid_list_tlv(char** list_str, wlc_ssid_ext_t* ssid, int max, int *bytes_left)
7505 {
7506 	char* str;
7507 	int idx = 0;
7508 	uint8 len;
7509 
7510 	if ((list_str == NULL) || (*list_str == NULL) || (*bytes_left < 0)) {
7511 		DHD_ERROR(("%s error paramters\n", __FUNCTION__));
7512 		return BCME_BADARG;
7513 	}
7514 	str = *list_str;
7515 	while (*bytes_left > 0) {
7516 		if (str[0] != CSCAN_TLV_TYPE_SSID_IE) {
7517 			*list_str = str;
7518 			DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
7519 			return idx;
7520 		}
7521 
7522 		if (idx >= max) {
7523 			DHD_ERROR(("%s number of SSIDs more than %d\n", __FUNCTION__, idx));
7524 			return BCME_BADARG;
7525 		}
7526 
7527 		/* Get proper CSCAN_TLV_TYPE_SSID_IE */
7528 		*bytes_left -= 1;
7529 		if (*bytes_left == 0) {
7530 			DHD_ERROR(("%s no length field.\n", __FUNCTION__));
7531 			return BCME_BADARG;
7532 		}
7533 		str += 1;
7534 		ssid[idx].rssi_thresh = 0;
7535 		ssid[idx].flags = 0;
7536 		len = str[0];
7537 		if (len == 0) {
7538 			/* Broadcast SSID */
7539 			ssid[idx].SSID_len = 0;
7540 			memset((char*)ssid[idx].SSID, 0x0, DOT11_MAX_SSID_LEN);
7541 			*bytes_left -= 1;
7542 			str += 1;
7543 
7544 			DHD_TRACE(("BROADCAST SCAN  left=%d\n", *bytes_left));
7545 		} else if (len <= DOT11_MAX_SSID_LEN) {
7546 			/* Get proper SSID size */
7547 			ssid[idx].SSID_len = len;
7548 			*bytes_left -= 1;
7549 			/* Get SSID */
7550 			if (ssid[idx].SSID_len > *bytes_left) {
7551 				DHD_ERROR(("%s out of memory range len=%d but left=%d\n",
7552 				__FUNCTION__, ssid[idx].SSID_len, *bytes_left));
7553 				return BCME_BADARG;
7554 			}
7555 			str += 1;
7556 			memcpy((char*)ssid[idx].SSID, str, ssid[idx].SSID_len);
7557 
7558 			*bytes_left -= ssid[idx].SSID_len;
7559 			str += ssid[idx].SSID_len;
7560 			ssid[idx].hidden = TRUE;
7561 
7562 			DHD_TRACE(("%s :size=%d left=%d\n",
7563 				(char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left));
7564 		} else {
7565 			DHD_ERROR(("### SSID size more than %d\n", str[0]));
7566 			return BCME_BADARG;
7567 		}
7568 		idx++;
7569 	}
7570 
7571 	*list_str = str;
7572 	return idx;
7573 }
7574 
7575 #if defined(WL_WIRELESS_EXT)
7576 /* Android ComboSCAN support */
7577 
7578 /*
7579  *  data parsing from ComboScan tlv list
7580 */
7581 int
wl_iw_parse_data_tlv(char ** list_str,void * dst,int dst_size,const char token,int input_size,int * bytes_left)7582 wl_iw_parse_data_tlv(char** list_str, void *dst, int dst_size, const char token,
7583                      int input_size, int *bytes_left)
7584 {
7585 	char* str;
7586 	uint16 short_temp;
7587 	uint32 int_temp;
7588 
7589 	if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
7590 		DHD_ERROR(("%s error paramters\n", __FUNCTION__));
7591 		return -1;
7592 	}
7593 	str = *list_str;
7594 
7595 	/* Clean all dest bytes */
7596 	memset(dst, 0, dst_size);
7597 	if (*bytes_left > 0) {
7598 
7599 		if (str[0] != token) {
7600 			DHD_TRACE(("%s NOT Type=%d get=%d left_parse=%d \n",
7601 				__FUNCTION__, token, str[0], *bytes_left));
7602 			return -1;
7603 		}
7604 
7605 		*bytes_left -= 1;
7606 		str += 1;
7607 
7608 		if (input_size == 1) {
7609 			memcpy(dst, str, input_size);
7610 		}
7611 		else if (input_size == 2) {
7612 			memcpy(dst, (char *)htod16(memcpy(&short_temp, str, input_size)),
7613 				input_size);
7614 		}
7615 		else if (input_size == 4) {
7616 			memcpy(dst, (char *)htod32(memcpy(&int_temp, str, input_size)),
7617 				input_size);
7618 		}
7619 
7620 		*bytes_left -= input_size;
7621 		str += input_size;
7622 		*list_str = str;
7623 		return 1;
7624 	}
7625 	return 1;
7626 }
7627 
7628 /*
7629  *  channel list parsing from cscan tlv list
7630 */
7631 int
wl_iw_parse_channel_list_tlv(char ** list_str,uint16 * channel_list,int channel_num,int * bytes_left)7632 wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list,
7633                              int channel_num, int *bytes_left)
7634 {
7635 	char* str;
7636 	int idx = 0;
7637 
7638 	if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
7639 		DHD_ERROR(("%s error paramters\n", __FUNCTION__));
7640 		return -1;
7641 	}
7642 	str = *list_str;
7643 
7644 	while (*bytes_left > 0) {
7645 
7646 		if (str[0] != CSCAN_TLV_TYPE_CHANNEL_IE) {
7647 			*list_str = str;
7648 			DHD_TRACE(("End channel=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
7649 			return idx;
7650 		}
7651 		/* Get proper CSCAN_TLV_TYPE_CHANNEL_IE */
7652 		*bytes_left -= 1;
7653 		str += 1;
7654 
7655 		if (str[0] == 0) {
7656 			/* All channels */
7657 			channel_list[idx] = 0x0;
7658 		}
7659 		else {
7660 			channel_list[idx] = (uint16)str[0];
7661 			DHD_TRACE(("%s channel=%d \n", __FUNCTION__,  channel_list[idx]));
7662 		}
7663 		*bytes_left -= 1;
7664 		str += 1;
7665 
7666 		if (idx++ > 255) {
7667 			DHD_ERROR(("%s Too many channels \n", __FUNCTION__));
7668 			return -1;
7669 		}
7670 	}
7671 
7672 	*list_str = str;
7673 	return idx;
7674 }
7675 
7676 /* Parse a comma-separated list from list_str into ssid array, starting
7677  * at index idx.  Max specifies size of the ssid array.  Parses ssids
7678  * and returns updated idx; if idx >= max not all fit, the excess have
7679  * not been copied.  Returns -1 on empty string, or on ssid too long.
7680  */
7681 int
wl_iw_parse_ssid_list(char ** list_str,wlc_ssid_t * ssid,int idx,int max)7682 wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max)
7683 {
7684 	char* str, *ptr;
7685 
7686 	if ((list_str == NULL) || (*list_str == NULL))
7687 		return -1;
7688 
7689 	for (str = *list_str; str != NULL; str = ptr) {
7690 
7691 		/* check for next TAG */
7692 		if (!strncmp(str, GET_CHANNEL, strlen(GET_CHANNEL))) {
7693 			*list_str	 = str + strlen(GET_CHANNEL);
7694 			return idx;
7695 		}
7696 
7697 		if ((ptr = strchr(str, ',')) != NULL) {
7698 			*ptr++ = '\0';
7699 		}
7700 
7701 		if (strlen(str) > DOT11_MAX_SSID_LEN) {
7702 			DHD_ERROR(("ssid <%s> exceeds %d\n", str, DOT11_MAX_SSID_LEN));
7703 			return -1;
7704 		}
7705 
7706 		if (strlen(str) == 0)
7707 			ssid[idx].SSID_len = 0;
7708 
7709 		if (idx < max) {
7710 			bzero(ssid[idx].SSID, sizeof(ssid[idx].SSID));
7711 			strlcpy((char*)ssid[idx].SSID, str, sizeof(ssid[idx].SSID));
7712 			ssid[idx].SSID_len = sizeof(ssid[idx].SSID);
7713 		}
7714 		idx++;
7715 	}
7716 	return idx;
7717 }
7718 
7719 /*
7720  * Parse channel list from iwpriv CSCAN
7721  */
7722 int
wl_iw_parse_channel_list(char ** list_str,uint16 * channel_list,int channel_num)7723 wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num)
7724 {
7725 	int num;
7726 	int val;
7727 	char* str;
7728 	char* endptr = NULL;
7729 
7730 	if ((list_str == NULL)||(*list_str == NULL))
7731 		return -1;
7732 
7733 	str = *list_str;
7734 	num = 0;
7735 	while (strncmp(str, GET_NPROBE, strlen(GET_NPROBE))) {
7736 		val = (int)strtoul(str, &endptr, 0);
7737 		if (endptr == str) {
7738 			printf("could not parse channel number starting at"
7739 				" substring \"%s\" in list:\n%s\n",
7740 				str, *list_str);
7741 			return -1;
7742 		}
7743 		str = endptr + strspn(endptr, " ,");
7744 
7745 		if (num == channel_num) {
7746 			DHD_ERROR(("too many channels (more than %d) in channel list:\n%s\n",
7747 				channel_num, *list_str));
7748 			return -1;
7749 		}
7750 
7751 		channel_list[num++] = (uint16)val;
7752 	}
7753 	*list_str = str;
7754 	return num;
7755 }
7756 #endif
7757 #endif /* defined(OEM_ANDROID) */
7758 
7759 #if defined(BCM_ROUTER_DHD)
traffic_mgmt_add_dwm_filter(dhd_pub_t * dhd,trf_mgmt_filter_list_t * trf_mgmt_filter_list,int len)7760 static int traffic_mgmt_add_dwm_filter(dhd_pub_t *dhd,
7761 	trf_mgmt_filter_list_t * trf_mgmt_filter_list, int len)
7762 {
7763 	int ret = 0;
7764 	uint32              i;
7765 	trf_mgmt_filter_t   *trf_mgmt_filter;
7766 	uint8               dwm_tbl_entry;
7767 	uint32              dscp = 0;
7768 	uint16              dwm_filter_enabled = 0;
7769 
7770 	/* Check parameter length is adequate */
7771 	if (len < (OFFSETOF(trf_mgmt_filter_list_t, filter) +
7772 		trf_mgmt_filter_list->num_filters * sizeof(trf_mgmt_filter_t))) {
7773 		ret = BCME_BUFTOOSHORT;
7774 		return ret;
7775 	}
7776 
7777 	bzero(&dhd->dhd_tm_dwm_tbl, sizeof(dhd_trf_mgmt_dwm_tbl_t));
7778 
7779 	for (i = 0; i < trf_mgmt_filter_list->num_filters; i++) {
7780 		trf_mgmt_filter = &trf_mgmt_filter_list->filter[i];
7781 
7782 		dwm_filter_enabled = (trf_mgmt_filter->flags & TRF_FILTER_DWM);
7783 
7784 		if (dwm_filter_enabled) {
7785 			dscp = trf_mgmt_filter->dscp;
7786 			if (dscp >= DHD_DWM_TBL_SIZE) {
7787 				ret = BCME_BADARG;
7788 			return ret;
7789 			}
7790 		}
7791 
7792 		dhd->dhd_tm_dwm_tbl.dhd_dwm_enabled = 1;
7793 		/* set WMM AC bits */
7794 		dwm_tbl_entry = (uint8) trf_mgmt_filter->priority;
7795 		DHD_TRF_MGMT_DWM_SET_FILTER(dwm_tbl_entry);
7796 
7797 		/* set favored bits */
7798 		if (trf_mgmt_filter->flags & TRF_FILTER_FAVORED)
7799 			DHD_TRF_MGMT_DWM_SET_FAVORED(dwm_tbl_entry);
7800 
7801 		dhd->dhd_tm_dwm_tbl.dhd_dwm_tbl[dscp] =  dwm_tbl_entry;
7802 	}
7803 	return ret;
7804 }
7805 #endif /* BCM_ROUTER_DHD */
7806 
7807 #ifdef DHD_LINUX_STD_FW_API
dhd_get_download_buffer(dhd_pub_t * dhd,char * file_path,download_type_t component,char ** buffer,int * length)7808 int dhd_get_download_buffer(dhd_pub_t	*dhd, char *file_path, download_type_t component,
7809 	char ** buffer, int *length)
7810 {
7811 	int ret = BCME_ERROR;
7812 	const struct firmware *fw = NULL;
7813 #ifdef SUPPORT_OTA_UPDATE
7814 	uint8 *buf = NULL;
7815 	int len = 0;
7816 	ota_update_info_t *ota_info = &dhd->ota_update_info;
7817 #endif /* SUPPORT_OTA_UPDATE */
7818 
7819 #ifdef SUPPORT_OTA_UPDATE
7820 	if (component == CLM_BLOB) {
7821 		if (ota_info->clm_len) {
7822 			DHD_ERROR(("Using OTA CLM_BLOB\n"));
7823 			buf = ota_info->clm_buf;
7824 			len = ota_info->clm_len;
7825 		}
7826 	}
7827 	else if (component == NVRAM) {
7828 		if (ota_info->nvram_len) {
7829 			DHD_ERROR(("Using OTA NVRAM.\n"));
7830 			buf = ota_info->nvram_buf;
7831 			len = ota_info->nvram_len;
7832 		}
7833 	}
7834 #endif /* SUPPORT_OTA_UPDATE */
7835 
7836 #ifdef SUPPORT_OTA_UPDATE
7837 	if (len) {
7838 		*buffer = (char *)buf;
7839 		*length = len;
7840 	}
7841 	else
7842 #endif /* SUPPORT_OTA_UPDATE */
7843 	{
7844 		if (file_path) {
7845 			ret = dhd_os_get_img_fwreq(&fw, file_path);
7846 			if (ret < 0) {
7847 				DHD_ERROR(("dhd_os_get_img(Request Firmware API) error : %d\n",
7848 					ret));
7849 				goto err;
7850 			} else {
7851 				if ((fw->size <= 0 || fw->size > *length)) {
7852 					DHD_ERROR(("fw->size = %zu, *length = %d\n", fw->size, *length));
7853 					*length = fw->size;
7854 					goto err;
7855 				}
7856 				*buffer = VMALLOCZ(dhd->osh, fw->size);
7857 				if (*buffer == NULL) {
7858 					DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
7859 						__FUNCTION__, (int)fw->size));
7860 					ret = BCME_NOMEM;
7861 					goto err;
7862 				}
7863 				*length = fw->size;
7864 				ret = memcpy_s(*buffer, fw->size, fw->data, fw->size);
7865 				if (ret != BCME_OK) {
7866 					DHD_ERROR(("%s: memcpy_s failed, err : %d\n",
7867 							__FUNCTION__, ret));
7868 					goto err;
7869 				}
7870 				ret = BCME_OK;
7871 			}
7872 		}
7873 	}
7874 err:
7875 	if (fw) {
7876 		dhd_os_close_img_fwreq(fw);
7877 	}
7878 	return ret;
7879 }
7880 
7881 #else
7882 
7883 /* Given filename and download type,  returns a buffer pointer and length
7884 * for download to f/w. Type can be FW or NVRAM.
7885 *
7886 */
dhd_get_download_buffer(dhd_pub_t * dhd,char * file_path,download_type_t component,char ** buffer,int * length)7887 int dhd_get_download_buffer(dhd_pub_t	*dhd, char *file_path, download_type_t component,
7888 	char ** buffer, int *length)
7889 
7890 {
7891 	int ret = BCME_ERROR;
7892 	int len = 0;
7893 	int file_len;
7894 	void *image = NULL;
7895 	uint8 *buf = NULL;
7896 
7897 	/* Point to cache if available. */
7898 #ifdef CACHE_FW_IMAGES
7899 	if (component == FW) {
7900 		if (dhd->cached_fw_length) {
7901 			len = dhd->cached_fw_length;
7902 			buf = dhd->cached_fw;
7903 		}
7904 	} else if (component == NVRAM) {
7905 		if (dhd->cached_nvram_length) {
7906 			len = dhd->cached_nvram_length;
7907 			buf = dhd->cached_nvram;
7908 		}
7909 	} else if (component == CLM_BLOB) {
7910 		if (dhd->cached_clm_length) {
7911 			len = dhd->cached_clm_length;
7912 			buf = dhd->cached_clm;
7913 		}
7914 	} else if (component == TXCAP_BLOB) {
7915 		if (dhd->cached_txcap_length) {
7916 			len = dhd->cached_txcap_length;
7917 			buf = dhd->cached_txcap;
7918 		}
7919 	} else {
7920 		DHD_ERROR(("%s: Invalid component arg %d\n",
7921 			__FUNCTION__, component));
7922 		ret = BCME_BADARG;
7923 		return ret;
7924 	}
7925 #endif /* CACHE_FW_IMAGES */
7926 	/* No Valid cache found on this call */
7927 	if (!len) {
7928 		file_len = *length;
7929 		*length = 0;
7930 
7931 		if (file_path) {
7932 			image = dhd_os_open_image1(dhd, file_path);
7933 			if (image == NULL) {
7934 				printf("%s: Open image file failed %s\n", __FUNCTION__, file_path);
7935 				goto err;
7936 			}
7937 		}
7938 
7939 		buf = MALLOCZ(dhd->osh, file_len);
7940 		if (buf == NULL) {
7941 			DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
7942 				__FUNCTION__, file_len));
7943 			goto err;
7944 		}
7945 
7946 		/* Download image */
7947 #if defined(BCMEMBEDIMAGE) && defined(DHD_EFI)
7948 		if (!image) {
7949 			memcpy(buf, nvram_arr, sizeof(nvram_arr));
7950 			len = sizeof(nvram_arr);
7951 		} else {
7952 			len = dhd_os_get_image_block((char *)buf, file_len, image);
7953 			if ((len <= 0 || len > file_len)) {
7954 				MFREE(dhd->osh, buf, file_len);
7955 				goto err;
7956 			}
7957 		}
7958 #else
7959 		len = dhd_os_get_image_block((char *)buf, file_len, image);
7960 		if ((len <= 0 || len > file_len)) {
7961 			MFREE(dhd->osh, buf, file_len);
7962 			goto err;
7963 		}
7964 #endif /* DHD_EFI */
7965 	}
7966 
7967 	ret = BCME_OK;
7968 	*length = len;
7969 	*buffer = (char *)buf;
7970 
7971 	/* Cache if first call. */
7972 #ifdef CACHE_FW_IMAGES
7973 	if (component == FW) {
7974 		if (!dhd->cached_fw_length) {
7975 			dhd->cached_fw = buf;
7976 			dhd->cached_fw_length = len;
7977 		}
7978 	} else if (component == NVRAM) {
7979 		if (!dhd->cached_nvram_length) {
7980 			dhd->cached_nvram = buf;
7981 			dhd->cached_nvram_length = len;
7982 		}
7983 	} else if (component == CLM_BLOB) {
7984 		if (!dhd->cached_clm_length) {
7985 			dhd->cached_clm = buf;
7986 			dhd->cached_clm_length = len;
7987 		}
7988 	} else if (component == TXCAP_BLOB) {
7989 		if (!dhd->cached_txcap_length) {
7990 			dhd->cached_txcap = buf;
7991 			dhd->cached_txcap_length = len;
7992 		}
7993 	}
7994 #endif /* CACHE_FW_IMAGES */
7995 
7996 err:
7997 	if (image)
7998 		dhd_os_close_image1(dhd, image);
7999 
8000 	return ret;
8001 }
8002 #endif /* DHD_LINUX_STD_FW_API */
8003 
8004 int
dhd_download_2_dongle(dhd_pub_t * dhd,char * iovar,uint16 flag,uint16 dload_type,unsigned char * dload_buf,int len)8005 dhd_download_2_dongle(dhd_pub_t	*dhd, char *iovar, uint16 flag, uint16 dload_type,
8006 	unsigned char *dload_buf, int len)
8007 {
8008 	struct wl_dload_data *dload_ptr = (struct wl_dload_data *)dload_buf;
8009 	int err = 0;
8010 	int dload_data_offset;
8011 	static char iovar_buf[WLC_IOCTL_MEDLEN];
8012 	int iovar_len;
8013 
8014 	memset(iovar_buf, 0, sizeof(iovar_buf));
8015 
8016 	dload_data_offset = OFFSETOF(wl_dload_data_t, data);
8017 	dload_ptr->flag = (DLOAD_HANDLER_VER << DLOAD_FLAG_VER_SHIFT) | flag;
8018 	dload_ptr->dload_type = dload_type;
8019 	dload_ptr->len = htod32(len - dload_data_offset);
8020 	dload_ptr->crc = 0;
8021 	len = ROUNDUP(len, 8);
8022 
8023 	iovar_len = bcm_mkiovar(iovar, (char *)dload_buf,
8024 		(uint)len, iovar_buf, sizeof(iovar_buf));
8025 	if (iovar_len == 0) {
8026 		DHD_ERROR(("%s: insufficient buffer space passed to bcm_mkiovar for '%s' \n",
8027 		           __FUNCTION__, iovar));
8028 		return BCME_BUFTOOSHORT;
8029 	}
8030 
8031 	err = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovar_buf,
8032 			iovar_len, IOV_SET, 0);
8033 
8034 	return err;
8035 }
8036 
8037 int
dhd_download_blob(dhd_pub_t * dhd,unsigned char * buf,uint32 len,char * iovar)8038 dhd_download_blob(dhd_pub_t *dhd, unsigned char *buf,
8039 		uint32 len, char *iovar)
8040 
8041 {
8042 	int chunk_len;
8043 #if (!defined(LINUX) && !defined(linux)) || defined(DHD_LINUX_STD_FW_API)
8044 	int cumulative_len = 0;
8045 #endif /* !LINUX && !linux || DHD_LINUX_STD_FW_API */
8046 	int size2alloc;
8047 	unsigned char *new_buf;
8048 	int err = 0, data_offset;
8049 	uint16 dl_flag = DL_BEGIN;
8050 
8051 	data_offset = OFFSETOF(wl_dload_data_t, data);
8052 	size2alloc = data_offset + MAX_CHUNK_LEN;
8053 	size2alloc = ROUNDUP(size2alloc, 8);
8054 
8055 	if ((new_buf = (unsigned char *)MALLOCZ(dhd->osh, size2alloc)) != NULL) {
8056 		do {
8057 #if (!defined(LINUX) && !defined(linux)) || defined(DHD_LINUX_STD_FW_API)
8058 			if (len >= MAX_CHUNK_LEN)
8059 				chunk_len = MAX_CHUNK_LEN;
8060 			else
8061 				chunk_len = len;
8062 
8063 			memcpy(new_buf + data_offset, buf + cumulative_len, chunk_len);
8064 			cumulative_len += chunk_len;
8065 #else
8066 			chunk_len = dhd_os_get_image_block((char *)(new_buf + data_offset),
8067 				MAX_CHUNK_LEN, buf);
8068 			if (chunk_len < 0) {
8069 				DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n",
8070 					__FUNCTION__, chunk_len));
8071 				err = BCME_ERROR;
8072 				goto exit;
8073 			}
8074 #endif /* !LINUX && !linux || DHD_LINUX_STD_FW_API */
8075 			if (len - chunk_len == 0)
8076 				dl_flag |= DL_END;
8077 
8078 			err = dhd_download_2_dongle(dhd, iovar, dl_flag, DL_TYPE_CLM,
8079 				new_buf, data_offset + chunk_len);
8080 
8081 			dl_flag &= ~DL_BEGIN;
8082 
8083 			len = len - chunk_len;
8084 		} while ((len > 0) && (err == 0));
8085 #if (!defined(LINUX) && !defined(linux)) || defined(DHD_LINUX_STD_FW_API)
8086 		MFREE(dhd->osh, new_buf, size2alloc);
8087 #endif /* !LINUX && !linux */
8088 	} else {
8089 		err = BCME_NOMEM;
8090 	}
8091 #if (defined(LINUX) || defined(linux)) && !defined(DHD_LINUX_STD_FW_API)
8092 exit:
8093 	if (new_buf) {
8094 		MFREE(dhd->osh, new_buf, size2alloc);
8095 	}
8096 #endif /* LINUX || linux */
8097 	return err;
8098 }
8099 
8100 #if defined(CACHE_FW_IMAGES)
8101 int
dhd_download_blob_cached(dhd_pub_t * dhd,char * file_path,uint32 len,char * iovar)8102 dhd_download_blob_cached(dhd_pub_t *dhd, char *file_path,
8103 	uint32 len, char *iovar)
8104 {
8105 	int ret = BCME_ERROR;
8106 	uint chunk_len, size2alloc, data_offset, file_offset;
8107 	unsigned char *pay_load, *dnld_buf;
8108 	char *memblock;
8109 	uint16 dl_flag = DL_BEGIN;
8110 	download_type_t dl_type;
8111 
8112 	data_offset = OFFSETOF(wl_dload_data_t, data);
8113 	size2alloc = data_offset + MAX_CHUNK_LEN;
8114 	size2alloc = ROUNDUP(size2alloc, 8);
8115 	file_offset = 0;
8116 
8117 	if ((dnld_buf = MALLOCZ(dhd->osh, size2alloc)) == NULL) {
8118 		ret = BCME_NOMEM;
8119 		goto exit;
8120 	}
8121 	pay_load = (dnld_buf + data_offset);
8122 
8123 	if (!memcmp("clmload", iovar, strlen("clmload"))) {
8124 		dl_type = CLM_BLOB;
8125 	} else if (!memcmp("txcapload", iovar, strlen("txcapload"))) {
8126 		dl_type = TXCAP_BLOB;
8127 	} else {
8128 		DHD_ERROR(("%s Invalid iovar :%s \n", __FUNCTION__, iovar));
8129 		ret = BCME_BADARG;
8130 		goto exit;
8131 	}
8132 
8133 	ret = dhd_get_download_buffer(dhd, file_path, dl_type, &memblock, (int *)&len);
8134 	if (ret != BCME_OK) {
8135 		DHD_ERROR(("%s: error getting buffer for %s, %s \n", __FUNCTION__,
8136 			file_path, bcmerrorstr(ret)));
8137 		goto exit;
8138 	}
8139 
8140 	do {
8141 		chunk_len = MIN(len, MAX_CHUNK_LEN);
8142 		memcpy(pay_load, memblock + file_offset, chunk_len);
8143 		if (len - chunk_len == 0) {
8144 			dl_flag |= DL_END;
8145 		}
8146 
8147 		ret = dhd_download_2_dongle(dhd, iovar, dl_flag, DL_TYPE_CLM,
8148 			dnld_buf, data_offset + chunk_len);
8149 
8150 		dl_flag &= ~DL_BEGIN;
8151 		len = len - chunk_len;
8152 		file_offset += chunk_len;
8153 	} while ((len > 0) && (ret == 0));
8154 
8155 exit:
8156 	if (dnld_buf) {
8157 		MFREE(dhd->osh, dnld_buf, size2alloc);
8158 	}
8159 
8160 	return ret;
8161 }
8162 
8163 int
dhd_apply_default_txcap(dhd_pub_t * dhd,char * path)8164 dhd_apply_default_txcap(dhd_pub_t  *dhd, char *path)
8165 {
8166 	int ret = BCME_ERROR;
8167 	ret = dhd_download_blob_cached(dhd, path, MAX_TXCAP_BUF_SIZE, "txcapload");
8168 	if (ret) {
8169 		DHD_ERROR(("%s: error downloading blob: %s \n", __FUNCTION__, bcmerrorstr(ret)));
8170 	}
8171 	return ret;
8172 }
8173 
8174 int
dhd_apply_default_clm(dhd_pub_t * dhd,char * clm_path)8175 dhd_apply_default_clm(dhd_pub_t *dhd, char *clm_path)
8176 {
8177 	char *clm_blob_path;
8178 	int len;
8179 	unsigned char *imgbuf = NULL;
8180 	int err = BCME_OK;
8181 	char iovbuf[WLC_IOCTL_SMLEN];
8182 	wl_country_t *cspec;
8183 
8184 	if (clm_path[0] != '\0') {
8185 		if (strlen(clm_path) > MOD_PARAM_PATHLEN) {
8186 			DHD_ERROR(("clm path exceeds max len\n"));
8187 			return BCME_ERROR;
8188 		}
8189 		clm_blob_path = clm_path;
8190 		DHD_TRACE(("clm path from module param:%s\n", clm_path));
8191 	} else {
8192 		clm_blob_path = VENDOR_PATH CONFIG_BCMDHD_CLM_PATH;
8193 	}
8194 
8195 	/* If CLM blob file is found on the filesystem, download the file.
8196 	* After CLM file download or If the blob file is not present,
8197 	* validate the country code before proceeding with the initialization.
8198 	* If country code is not valid, fail the initialization.
8199 	*/
8200 
8201 	imgbuf = dhd_os_open_image((char *)clm_blob_path);
8202 	if (imgbuf == NULL) {
8203 		goto exit;
8204 	}
8205 
8206 	len = dhd_os_get_image_size(imgbuf);
8207 
8208 	if ((len > 0) && (len < MAX_CLM_BUF_SIZE) && imgbuf) {
8209 		len = bcm_mkiovar("country", NULL, 0, iovbuf, sizeof(iovbuf));
8210 		if (len == 0) {
8211 			err = BCME_BUFTOOSHORT;
8212 			goto exit;
8213 		}
8214 		err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
8215 		if (err) {
8216 			DHD_ERROR(("%s: country code get failed\n", __FUNCTION__));
8217 			goto exit;
8218 		}
8219 
8220 		cspec = (wl_country_t *)iovbuf;
8221 		if ((strncmp(cspec->ccode, WL_CCODE_NULL_COUNTRY, WLC_CNTRY_BUF_SZ)) != 0) {
8222 			DHD_ERROR(("%s: CLM already exist in F/W, "
8223 				"new CLM data will be added to the end of existing CLM data!\n",
8224 				__FUNCTION__));
8225 		}
8226 
8227 		/* Found blob file. Download the file */
8228 		DHD_ERROR(("clm file download from %s \n", clm_blob_path));
8229 		if (imgbuf) {
8230 			dhd_os_close_image(imgbuf);
8231 			imgbuf = NULL;
8232 		}
8233 		err = dhd_download_blob_cached(dhd, clm_blob_path, MAX_CLM_BUF_SIZE, "clmload");
8234 		if (err) {
8235 			DHD_ERROR(("%s: CLM download failed err=%d\n", __FUNCTION__, err));
8236 			if (!dhd_bus_skip_clm(dhd)) {
8237 				/* Retrieve clmload_status and print */
8238 				len = bcm_mkiovar("clmload_status", NULL, 0, iovbuf,
8239 						sizeof(iovbuf));
8240 				if (len == 0) {
8241 					err = BCME_BUFTOOSHORT;
8242 					goto exit;
8243 				}
8244 				err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf,
8245 					sizeof(iovbuf), FALSE, 0);
8246 				if (err) {
8247 					DHD_ERROR(("%s: clmload_status get failed err=%d \n",
8248 						__FUNCTION__, err));
8249 				} else {
8250 					DHD_ERROR(("%s: clmload_status: %d \n",
8251 						__FUNCTION__, *((int *)iovbuf)));
8252 					if (*((int *)iovbuf) == CHIPID_MISMATCH) {
8253 						DHD_ERROR(("Chip ID mismatch error \n"));
8254 					}
8255 				}
8256 				err = BCME_ERROR;
8257 				goto exit;
8258 			}
8259 		} else {
8260 			DHD_INFO(("%s: CLM download succeeded \n", __FUNCTION__));
8261 		}
8262 	} else {
8263 		DHD_INFO(("Skipping the clm download. len:%d memblk:%p \n", len, imgbuf));
8264 #ifdef DHD_USE_CLMINFO_PARSER
8265 		err = BCME_ERROR;
8266 		goto exit;
8267 #endif /* DHD_USE_CLMINFO_PARSER */
8268 	}
8269 
8270 	/* Verify country code */
8271 	len = bcm_mkiovar("country", NULL, 0, iovbuf, sizeof(iovbuf));
8272 	if (len == 0) {
8273 		err = BCME_BUFTOOSHORT;
8274 		goto exit;
8275 	}
8276 	err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
8277 	if (err) {
8278 		DHD_ERROR(("%s: country code get failed\n", __FUNCTION__));
8279 		goto exit;
8280 	}
8281 
8282 	cspec = (wl_country_t *)iovbuf;
8283 	if ((strncmp(cspec->ccode, WL_CCODE_NULL_COUNTRY, WLC_CNTRY_BUF_SZ)) == 0) {
8284 		/* Country code not initialized or CLM download not proper */
8285 		DHD_ERROR(("country code not initialized\n"));
8286 		err = BCME_ERROR;
8287 	}
8288 exit:
8289 
8290 	if (imgbuf) {
8291 		dhd_os_close_image(imgbuf);
8292 	}
8293 
8294 	return err;
8295 }
8296 #else
8297 
8298 int
dhd_apply_default_txcap(dhd_pub_t * dhd,char * path)8299 dhd_apply_default_txcap(dhd_pub_t  *dhd, char *path)
8300 {
8301 	return 0;
8302 }
8303 
8304 int
dhd_check_current_clm_data(dhd_pub_t * dhd)8305 dhd_check_current_clm_data(dhd_pub_t *dhd)
8306 {
8307 	char iovbuf[WLC_IOCTL_SMLEN];
8308 	wl_country_t *cspec;
8309 	int err = BCME_OK;
8310 
8311 	memset(iovbuf, 0, sizeof(iovbuf));
8312 	err = bcm_mkiovar("country", NULL, 0, iovbuf, sizeof(iovbuf));
8313 	if (err == 0) {
8314 		err = BCME_BUFTOOSHORT;
8315 		DHD_ERROR(("%s: bcm_mkiovar failed.", __FUNCTION__));
8316 		return err;
8317 	}
8318 	err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
8319 	if (err) {
8320 		DHD_ERROR(("%s: country code get failed\n", __FUNCTION__));
8321 		return err;
8322 	}
8323 	cspec = (wl_country_t *)iovbuf;
8324 	if ((strncmp(cspec->ccode, WL_CCODE_NULL_COUNTRY, WLC_CNTRY_BUF_SZ)) == 0) {
8325 		DHD_ERROR(("%s: ----- This FW is not included CLM data -----\n",
8326 			__FUNCTION__));
8327 		return FALSE;
8328 	}
8329 	DHD_ERROR(("%s: ----- This FW is included CLM data -----\n",
8330 		__FUNCTION__));
8331 	return TRUE;
8332 }
8333 
8334 int
dhd_apply_default_clm(dhd_pub_t * dhd,char * clm_path)8335 dhd_apply_default_clm(dhd_pub_t *dhd, char *clm_path)
8336 {
8337 	char *clm_blob_path;
8338 	int len = 0, memblock_len = 0;
8339 	char *memblock = NULL;
8340 	int err = BCME_OK;
8341 	char iovbuf[WLC_IOCTL_SMLEN];
8342 	int status = FALSE;
8343 
8344 	if (clm_path && clm_path[0] != '\0') {
8345 		if (strlen(clm_path) > MOD_PARAM_PATHLEN) {
8346 			DHD_ERROR(("clm path exceeds max len\n"));
8347 			return BCME_ERROR;
8348 		}
8349 		clm_blob_path = clm_path;
8350 		DHD_TRACE(("clm path from module param:%s\n", clm_path));
8351 	} else {
8352 #ifdef DHD_LINUX_STD_FW_API
8353 		clm_blob_path = DHD_CLM_NAME;
8354 #else
8355 		clm_blob_path = VENDOR_PATH CONFIG_BCMDHD_CLM_PATH;
8356 #endif /* DHD_LINUX_STD_FW_API */
8357 	}
8358 
8359 	/* If CLM blob file is found on the filesystem, download the file.
8360 	 * After CLM file download or If the blob file is not present,
8361 	 * validate the country code before proceeding with the initialization.
8362 	 * If country code is not valid, fail the initialization.
8363 	 */
8364 #if (!defined(LINUX) && !defined(linux)) || defined(DHD_LINUX_STD_FW_API)
8365 	len = MAX_CLM_BUF_SIZE;
8366 	dhd_get_download_buffer(dhd, clm_blob_path, CLM_BLOB, &memblock, &len);
8367 #ifdef DHD_LINUX_STD_FW_API
8368 	memblock_len = len;
8369 #else
8370 	memblock_len = MAX_CLM_BUF_SIZE;
8371 #endif /* DHD_LINUX_STD_FW_API */
8372 #else
8373 	memblock = dhd_os_open_image1(dhd, (char *)clm_blob_path);
8374 	len = dhd_os_get_image_size(memblock);
8375 	BCM_REFERENCE(memblock_len);
8376 #endif /* !LINUX && !linux || DHD_LINUX_STD_FW_API */
8377 
8378 #if defined(LINUX) || defined(linux)
8379 	if (memblock == NULL) {
8380 		printf("%s: Ignore clm file %s\n", __FUNCTION__, clm_path);
8381 #if defined(DHD_BLOB_EXISTENCE_CHECK)
8382 		if (dhd->is_blob) {
8383 			err = BCME_ERROR;
8384 		} else {
8385 			status = dhd_check_current_clm_data(dhd);
8386 			if (status == TRUE) {
8387 				err = BCME_OK;
8388 			} else {
8389 				err = status;
8390 			}
8391 		}
8392 #endif /* DHD_BLOB_EXISTENCE_CHECK */
8393 		goto exit;
8394 	}
8395 #endif /* !LINUX && !linux */
8396 
8397 	if ((len > 0) && (len < MAX_CLM_BUF_SIZE) && memblock) {
8398 		status = dhd_check_current_clm_data(dhd);
8399 		if (status == TRUE) {
8400 #if defined(DHD_BLOB_EXISTENCE_CHECK)
8401 			if (dhd->op_mode != DHD_FLAG_MFG_MODE) {
8402 				if (dhd->is_blob) {
8403 					err = BCME_ERROR;
8404 				}
8405 				goto exit;
8406 			}
8407 #else
8408 			DHD_ERROR(("%s: CLM already exist in F/W, "
8409 				"new CLM data will be added to the end of existing CLM data!\n",
8410 				__FUNCTION__));
8411 #endif /* DHD_BLOB_EXISTENCE_CHECK */
8412 		} else if (status != FALSE) {
8413 			err = status;
8414 			goto exit;
8415 		}
8416 
8417 		/* Found blob file. Download the file */
8418 		DHD_TRACE(("clm file download from %s \n", clm_blob_path));
8419 		err = dhd_download_blob(dhd, (unsigned char*)memblock, len, "clmload");
8420 		if (err) {
8421 			DHD_ERROR(("%s: CLM download failed err=%d\n", __FUNCTION__, err));
8422 			/* Retrieve clmload_status and print */
8423 			memset(iovbuf, 0, sizeof(iovbuf));
8424 			len = bcm_mkiovar("clmload_status", NULL, 0, iovbuf, sizeof(iovbuf));
8425 			if (len == 0) {
8426 				err = BCME_BUFTOOSHORT;
8427 				goto exit;
8428 			}
8429 			err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
8430 			if (err) {
8431 				DHD_ERROR(("%s: clmload_status get failed err=%d \n",
8432 					__FUNCTION__, err));
8433 			} else {
8434 				DHD_ERROR(("%s: clmload_status: %d \n",
8435 					__FUNCTION__, *((int *)iovbuf)));
8436 				if (*((int *)iovbuf) == CHIPID_MISMATCH) {
8437 					DHD_ERROR(("Chip ID mismatch error \n"));
8438 				}
8439 			}
8440 			err = BCME_ERROR;
8441 			goto exit;
8442 		} else {
8443 			DHD_INFO(("%s: CLM download succeeded \n", __FUNCTION__));
8444 		}
8445 	} else {
8446 		DHD_INFO(("Skipping the clm download. len:%d memblk:%p \n", len, memblock));
8447 	}
8448 
8449 	/* Verify country code */
8450 	status = dhd_check_current_clm_data(dhd);
8451 
8452 	if (status != TRUE) {
8453 		/* Country code not initialized or CLM download not proper */
8454 		DHD_ERROR(("country code not initialized\n"));
8455 		err = status;
8456 	}
8457 exit:
8458 
8459 	if (memblock) {
8460 #if (defined(LINUX) || defined(linux)) && !defined(DHD_LINUX_STD_FW_API)
8461 		dhd_os_close_image1(dhd, memblock);
8462 #else
8463 		dhd_free_download_buffer(dhd, memblock, memblock_len);
8464 #endif /* LINUX || linux */
8465 	}
8466 
8467 	return err;
8468 }
8469 #endif /* defined(CACHE_FW_IMAGES) */
8470 
dhd_free_download_buffer(dhd_pub_t * dhd,void * buffer,int length)8471 void dhd_free_download_buffer(dhd_pub_t	*dhd, void *buffer, int length)
8472 {
8473 #ifdef CACHE_FW_IMAGES
8474 	return;
8475 #endif
8476 #if defined(DHD_LINUX_STD_FW_API)
8477 	VMFREE(dhd->osh, buffer, length);
8478 #else
8479 	MFREE(dhd->osh, buffer, length);
8480 #endif /* DHD_LINUX_STD_FW_API */
8481 }
8482 
8483 #ifdef REPORT_FATAL_TIMEOUTS
8484 void
init_dhd_timeouts(dhd_pub_t * pub)8485 init_dhd_timeouts(dhd_pub_t *pub)
8486 {
8487 	pub->timeout_info = MALLOC(pub->osh, sizeof(timeout_info_t));
8488 	if (pub->timeout_info == NULL) {
8489 		DHD_ERROR(("%s: Failed to alloc timeout_info\n", __FUNCTION__));
8490 	} else {
8491 		DHD_INFO(("Initializing dhd_timeouts\n"));
8492 		pub->timeout_info->scan_timer_lock = osl_spin_lock_init(pub->osh);
8493 		pub->timeout_info->join_timer_lock = osl_spin_lock_init(pub->osh);
8494 		pub->timeout_info->bus_timer_lock = osl_spin_lock_init(pub->osh);
8495 		pub->timeout_info->cmd_timer_lock = osl_spin_lock_init(pub->osh);
8496 		pub->timeout_info->scan_timeout_val = SCAN_TIMEOUT_DEFAULT;
8497 		pub->timeout_info->join_timeout_val = JOIN_TIMEOUT_DEFAULT;
8498 		pub->timeout_info->cmd_timeout_val = CMD_TIMEOUT_DEFAULT;
8499 		pub->timeout_info->bus_timeout_val = BUS_TIMEOUT_DEFAULT;
8500 		pub->timeout_info->scan_timer_active = FALSE;
8501 		pub->timeout_info->join_timer_active = FALSE;
8502 		pub->timeout_info->cmd_timer_active = FALSE;
8503 		pub->timeout_info->bus_timer_active = FALSE;
8504 		pub->timeout_info->cmd_join_error = FALSE;
8505 		pub->timeout_info->cmd_request_id = 0;
8506 		OSL_ATOMIC_SET(pub->osh, &pub->set_ssid_rcvd, FALSE);
8507 	}
8508 }
8509 
8510 void
deinit_dhd_timeouts(dhd_pub_t * pub)8511 deinit_dhd_timeouts(dhd_pub_t *pub)
8512 {
8513 	/* stop the join, scan bus, cmd timers
8514 	 * as failing to do so may cause a kernel panic if
8515 	 * an rmmod is done
8516 	 */
8517 	if (!pub->timeout_info) {
8518 		DHD_ERROR(("%s timeout_info pointer is NULL\n", __FUNCTION__));
8519 		ASSERT(0);
8520 		return;
8521 	}
8522 	if (dhd_stop_scan_timer(pub, FALSE, 0)) {
8523 		DHD_ERROR(("%s dhd_stop_scan_timer failed\n", __FUNCTION__));
8524 		ASSERT(0);
8525 	}
8526 	if (dhd_stop_bus_timer(pub)) {
8527 		DHD_ERROR(("%s dhd_stop_bus_timer failed\n", __FUNCTION__));
8528 		ASSERT(0);
8529 	}
8530 	if (dhd_stop_cmd_timer(pub)) {
8531 		DHD_ERROR(("%s dhd_stop_cmd_timer failed\n", __FUNCTION__));
8532 		ASSERT(0);
8533 	}
8534 	if (dhd_stop_join_timer(pub)) {
8535 		DHD_ERROR(("%s dhd_stop_join_timer failed\n", __FUNCTION__));
8536 		ASSERT(0);
8537 	}
8538 
8539 	osl_spin_lock_deinit(pub->osh, pub->timeout_info->scan_timer_lock);
8540 	osl_spin_lock_deinit(pub->osh, pub->timeout_info->join_timer_lock);
8541 	osl_spin_lock_deinit(pub->osh, pub->timeout_info->bus_timer_lock);
8542 	osl_spin_lock_deinit(pub->osh, pub->timeout_info->cmd_timer_lock);
8543 	MFREE(pub->osh, pub->timeout_info, sizeof(timeout_info_t));
8544 }
8545 
8546 static void
dhd_cmd_timeout(void * ctx)8547 dhd_cmd_timeout(void *ctx)
8548 {
8549 	dhd_pub_t *pub = (dhd_pub_t *)ctx;
8550 	unsigned long flags;
8551 
8552 	if (!pub->timeout_info) {
8553 		DHD_ERROR(("DHD: timeout_info NULL\n"));
8554 		ASSERT(0);
8555 		return;
8556 	}
8557 
8558 	DHD_TIMER_LOCK(pub->timeout_info->cmd_timer_lock, flags);
8559 	if (pub->timeout_info && pub->timeout_info->cmd_timer_active) {
8560 		DHD_ERROR(("\nERROR COMMAND TIMEOUT TO:%d\n", pub->timeout_info->cmd_timeout_val));
8561 		DHD_TIMER_UNLOCK(pub->timeout_info->cmd_timer_lock, flags);
8562 #ifdef PCIE_OOB
8563 		/* Assert device_wake so that UART_Rx is available */
8564 		if (dhd_bus_set_device_wake(pub->bus, TRUE)) {
8565 			DHD_ERROR(("%s: dhd_bus_set_device_wake() failed\n", __FUNCTION__));
8566 			ASSERT(0);
8567 		}
8568 #endif /* PCIE_OOB */
8569 		if (dhd_stop_cmd_timer(pub)) {
8570 			DHD_ERROR(("%s: dhd_stop_cmd_timer() failed\n", __FUNCTION__));
8571 			ASSERT(0);
8572 		}
8573 		dhd_wakeup_ioctl_event(pub, IOCTL_RETURN_ON_ERROR);
8574 		if (!dhd_query_bus_erros(pub))
8575 			dhd_send_trap_to_fw_for_timeout(pub, DHD_REASON_COMMAND_TO);
8576 	} else {
8577 		DHD_TIMER_UNLOCK(pub->timeout_info->cmd_timer_lock, flags);
8578 	}
8579 }
8580 
8581 int
dhd_start_cmd_timer(dhd_pub_t * pub)8582 dhd_start_cmd_timer(dhd_pub_t *pub)
8583 {
8584 	int ret = BCME_OK;
8585 	unsigned long flags = 0;
8586 	uint32 cmd_to_ms;
8587 
8588 	if (!pub->timeout_info) {
8589 		DHD_ERROR(("DHD: timeout_info NULL\n"));
8590 		ret = BCME_ERROR;
8591 		ASSERT(0);
8592 		goto exit_null;
8593 	}
8594 	DHD_TIMER_LOCK(pub->timeout_info->cmd_timer_lock, flags);
8595 	cmd_to_ms = pub->timeout_info->cmd_timeout_val;
8596 
8597 	if (pub->timeout_info->cmd_timeout_val == 0) {
8598 		/* Disable Command timer timeout */
8599 		DHD_INFO(("DHD: Command Timeout Disabled\n"));
8600 		goto exit;
8601 	}
8602 	if (pub->timeout_info->cmd_timer_active) {
8603 		DHD_ERROR(("%s:Timer already active\n", __FUNCTION__));
8604 		ret = BCME_ERROR;
8605 		ASSERT(0);
8606 	} else {
8607 		pub->timeout_info->cmd_timer = osl_timer_init(pub->osh,
8608 			"cmd_timer", dhd_cmd_timeout, pub);
8609 		osl_timer_update(pub->osh, pub->timeout_info->cmd_timer,
8610 			cmd_to_ms, 0);
8611 		pub->timeout_info->cmd_timer_active = TRUE;
8612 	}
8613 	if (ret == BCME_OK) {
8614 		DHD_INFO(("%s Cmd Timer started\n", __FUNCTION__));
8615 	}
8616 exit:
8617 	DHD_TIMER_UNLOCK(pub->timeout_info->cmd_timer_lock, flags);
8618 exit_null:
8619 	return ret;
8620 }
8621 
8622 int
dhd_stop_cmd_timer(dhd_pub_t * pub)8623 dhd_stop_cmd_timer(dhd_pub_t *pub)
8624 {
8625 	int ret = BCME_OK;
8626 	unsigned long flags = 0;
8627 
8628 	if (!pub) {
8629 		DHD_ERROR(("DHD: pub NULL\n"));
8630 		ASSERT(0);
8631 		return BCME_ERROR;
8632 	}
8633 
8634 	if (!pub->timeout_info) {
8635 		DHD_ERROR(("DHD: timeout_info NULL\n"));
8636 		ret = BCME_ERROR;
8637 		ASSERT(0);
8638 		goto exit;
8639 	}
8640 	DHD_TIMER_LOCK(pub->timeout_info->cmd_timer_lock, flags);
8641 
8642 	if (pub->timeout_info->cmd_timer_active) {
8643 		osl_timer_del(pub->osh, pub->timeout_info->cmd_timer);
8644 		pub->timeout_info->cmd_timer_active = FALSE;
8645 	}
8646 	else {
8647 		DHD_INFO(("DHD: CMD timer is not active\n"));
8648 	}
8649 	if (ret == BCME_OK) {
8650 		DHD_INFO(("%s Cmd Timer Stopped\n", __FUNCTION__));
8651 	}
8652 	DHD_TIMER_UNLOCK(pub->timeout_info->cmd_timer_lock, flags);
8653 exit:
8654 	return ret;
8655 }
8656 
8657 static int
__dhd_stop_join_timer(dhd_pub_t * pub)8658 __dhd_stop_join_timer(dhd_pub_t *pub)
8659 {
8660 	int ret = BCME_OK;
8661 	if (!pub) {
8662 		DHD_ERROR(("DHD: pub NULL\n"));
8663 		ASSERT(0);
8664 		return BCME_ERROR;
8665 	}
8666 	if (!pub->timeout_info) {
8667 		DHD_ERROR(("DHD: timeout_info NULL\n"));
8668 		ASSERT(0);
8669 		return BCME_ERROR;
8670 	}
8671 
8672 	if (pub->timeout_info->join_timer_active) {
8673 		osl_timer_del(pub->osh, pub->timeout_info->join_timer);
8674 		pub->timeout_info->join_timer_active = FALSE;
8675 		DHD_INFO(("%s join timer stopped\n", __FUNCTION__));
8676 	} else {
8677 		DHD_INFO(("%s join timer is not active\n", __FUNCTION__));
8678 	}
8679 
8680 	return ret;
8681 }
8682 
8683 static void
dhd_join_timeout(void * ctx)8684 dhd_join_timeout(void *ctx)
8685 {
8686 	dhd_pub_t *pub = (dhd_pub_t *)ctx;
8687 	unsigned long flags;
8688 
8689 	if (!pub->timeout_info) {
8690 		DHD_ERROR(("%s DHD: timeout_info NULL\n", __FUNCTION__));
8691 		ASSERT(0);
8692 		return;
8693 	}
8694 
8695 	DHD_TIMER_LOCK(pub->timeout_info->join_timer_lock, flags);
8696 	if (pub->timeout_info->join_timer_active) {
8697 		if (__dhd_stop_join_timer(pub)) {
8698 			DHD_ERROR(("%s: __dhd_stop_join_timer() failed\n", __FUNCTION__));
8699 			ASSERT(0);
8700 		}
8701 		if (pub->timeout_info->cmd_join_error) {
8702 			DHD_ERROR(("\n%s ERROR JOIN TIMEOUT TO:%d:0x%x\n", __FUNCTION__,
8703 				pub->timeout_info->join_timeout_val,
8704 				pub->timeout_info->cmd_join_error));
8705 			if (!dhd_query_bus_erros(pub)) {
8706 				dhd_send_trap_to_fw_for_timeout(pub, DHD_REASON_JOIN_TO);
8707 			}
8708 			pub->timeout_info->cmd_join_error = 0;
8709 		}
8710 	}
8711 	DHD_TIMER_UNLOCK(pub->timeout_info->join_timer_lock, flags);
8712 }
8713 
8714 int
dhd_start_join_timer(dhd_pub_t * pub)8715 dhd_start_join_timer(dhd_pub_t *pub)
8716 {
8717 	int ret = BCME_OK;
8718 	unsigned long flags = 0;
8719 	uint32 join_to_ms;
8720 
8721 	if (!pub->timeout_info) {
8722 		DHD_ERROR(("%s DHD: timeout_info NULL\n", __FUNCTION__));
8723 		ret = BCME_ERROR;
8724 		ASSERT(0);
8725 		goto exit;
8726 	}
8727 
8728 	join_to_ms = pub->timeout_info->join_timeout_val;
8729 	DHD_TIMER_LOCK(pub->timeout_info->join_timer_lock, flags);
8730 	if (pub->timeout_info->join_timer_active) {
8731 		DHD_ERROR(("%s: stopping active timer\n", __FUNCTION__));
8732 		__dhd_stop_join_timer(pub);
8733 	}
8734 	if (pub->timeout_info->join_timeout_val == 0) {
8735 		/* Disable Join timer timeout */
8736 		DHD_INFO(("%s DHD: join timeout disabled\n", __FUNCTION__));
8737 	} else {
8738 		pub->timeout_info->join_timer = osl_timer_init(pub->osh,
8739 			"join_timer", dhd_join_timeout, pub);
8740 		osl_timer_update(pub->osh, pub->timeout_info->join_timer, join_to_ms, 0);
8741 		pub->timeout_info->join_timer_active = TRUE;
8742 		pub->timeout_info->cmd_join_error = 0;
8743 		dhd_set_join_error(pub, WLC_SSID_MASK);
8744 		if (pub->secure_join) {
8745 			dhd_set_join_error(pub, WLC_WPA_MASK);
8746 		}
8747 		DHD_ERROR(("%s: join timer started 0x%x\n", __FUNCTION__,
8748 			pub->timeout_info->cmd_join_error));
8749 	}
8750 	DHD_TIMER_UNLOCK(pub->timeout_info->join_timer_lock, flags);
8751 exit:
8752 	return ret;
8753 }
8754 
8755 int
dhd_stop_join_timer(dhd_pub_t * pub)8756 dhd_stop_join_timer(dhd_pub_t *pub)
8757 {
8758 	int ret = BCME_OK;
8759 	unsigned long flags;
8760 
8761 	if (!pub) {
8762 		DHD_ERROR(("%s DHD: pub NULL\n", __FUNCTION__));
8763 		ASSERT(0);
8764 		return BCME_ERROR;
8765 	}
8766 
8767 	DHD_TIMER_LOCK(pub->timeout_info->join_timer_lock, flags);
8768 	ret = __dhd_stop_join_timer(pub);
8769 	DHD_TIMER_UNLOCK(pub->timeout_info->join_timer_lock, flags);
8770 	return ret;
8771 }
8772 
8773 static void
dhd_set_join_error(dhd_pub_t * pub,uint32 mask)8774 dhd_set_join_error(dhd_pub_t *pub, uint32 mask)
8775 {
8776 	DHD_INFO(("Setting join Error %d\n", mask));
8777 	if (pub->timeout_info) {
8778 		pub->timeout_info->cmd_join_error |= mask;
8779 	}
8780 }
8781 
8782 void
dhd_clear_join_error(dhd_pub_t * pub,uint32 mask)8783 dhd_clear_join_error(dhd_pub_t *pub, uint32 mask)
8784 {
8785 	unsigned long flags;
8786 
8787 	DHD_INFO(("%s clear join error %d\n", __FUNCTION__, mask));
8788 	if (!(pub->timeout_info)) {
8789 		return;
8790 	}
8791 
8792 	DHD_TIMER_LOCK(pub->timeout_info->join_timer_lock, flags);
8793 	pub->timeout_info->cmd_join_error &= ~mask;
8794 	/* If both WLC_SSID_MASK, WLC_WPA_MASK are received cancel the timer */
8795 	if (!(pub->timeout_info->cmd_join_error)) {
8796 		if (__dhd_stop_join_timer(pub)) {
8797 			DHD_ERROR(("%s: dhd_stop_join_timer failed\n", __FUNCTION__));
8798 			ASSERT(0);
8799 		}
8800 	}
8801 	DHD_TIMER_UNLOCK(pub->timeout_info->join_timer_lock, flags);
8802 }
8803 
8804 static void
dhd_scan_timeout(void * ctx)8805 dhd_scan_timeout(void *ctx)
8806 {
8807 	dhd_pub_t *pub = (dhd_pub_t *)ctx;
8808 	unsigned long flags;
8809 
8810 	if (!pub) {
8811 		DHD_ERROR(("DHD: pub NULL\n"));
8812 		ASSERT(0);
8813 		return;
8814 	}
8815 
8816 	if (pub->timeout_info == NULL) {
8817 		DHD_ERROR(("timeout_info pointer is NULL\n"));
8818 		ASSERT(0);
8819 		return;
8820 	}
8821 	DHD_TIMER_LOCK(pub->timeout_info->scan_timer_lock, flags);
8822 	if (pub->timeout_info->scan_timer_active) {
8823 		DHD_ERROR(("\nERROR SCAN TIMEOUT TO:%d\n", pub->timeout_info->scan_timeout_val));
8824 		DHD_TIMER_UNLOCK(pub->timeout_info->scan_timer_lock, flags);
8825 		dhd_stop_scan_timer(pub, FALSE, 0);
8826 		if (!dhd_query_bus_erros(pub))
8827 			dhd_send_trap_to_fw_for_timeout(pub, DHD_REASON_SCAN_TO);
8828 	} else {
8829 		DHD_TIMER_UNLOCK(pub->timeout_info->scan_timer_lock, flags);
8830 	}
8831 }
8832 
8833 int
dhd_start_scan_timer(dhd_pub_t * pub,bool is_escan)8834 dhd_start_scan_timer(dhd_pub_t *pub, bool is_escan)
8835 {
8836 	int ret = BCME_OK;
8837 	unsigned long flags = 0;
8838 	uint32 scan_to_ms;
8839 
8840 	if (!pub->timeout_info) {
8841 		DHD_ERROR(("DHD: timeout_info NULL\n"));
8842 		ret = BCME_ERROR;
8843 		ASSERT(0);
8844 		goto exit_null;
8845 	}
8846 	DHD_TIMER_LOCK(pub->timeout_info->scan_timer_lock, flags);
8847 	scan_to_ms = pub->timeout_info->scan_timeout_val;
8848 
8849 	if (is_escan) {
8850 		if (pub->timeout_info->escan_aborted &&
8851 				pub->esync_id == pub->timeout_info->abort_syncid) {
8852 			pub->timeout_info->escan_aborted = FALSE;
8853 			DHD_INFO(("%s: escan already aborted, do not start timer \n",
8854 				__FUNCTION__));
8855 			goto exit;
8856 		}
8857 		pub->timeout_info->escan_syncid = pub->esync_id;
8858 	} else {
8859 		pub->timeout_info->escan_syncid = 0;
8860 	}
8861 
8862 	if (pub->timeout_info->scan_timer_active) {
8863 		/* cancel any earlier running timer */
8864 		DHD_INFO(("%s:Timer already active, stopping it.\n", __FUNCTION__));
8865 		osl_timer_del(pub->osh, pub->timeout_info->scan_timer);
8866 		pub->timeout_info->scan_timer_active = FALSE;
8867 	}
8868 
8869 	if (pub->timeout_info->scan_timeout_val == 0) {
8870 		/* Disable Scan timer timeout */
8871 		DHD_INFO(("DHD: Scan Timeout Disabled\n"));
8872 	} else {
8873 		pub->timeout_info->scan_timer = osl_timer_init(pub->osh, "scan_timer",
8874 			dhd_scan_timeout, pub);
8875 		pub->timeout_info->scan_timer_active = TRUE;
8876 		osl_timer_update(pub->osh, pub->timeout_info->scan_timer, scan_to_ms, 0);
8877 		DHD_INFO(("%s Scan Timer started\n", __FUNCTION__));
8878 	}
8879 
8880 exit:
8881 	DHD_TIMER_UNLOCK(pub->timeout_info->scan_timer_lock, flags);
8882 exit_null:
8883 	return ret;
8884 }
8885 
8886 int
dhd_stop_scan_timer(dhd_pub_t * pub,bool is_escan,uint16 sync_id)8887 dhd_stop_scan_timer(dhd_pub_t *pub, bool is_escan, uint16 sync_id)
8888 {
8889 	int ret = BCME_OK;
8890 	unsigned long flags = 0;
8891 
8892 	if (!pub) {
8893 		DHD_ERROR(("DHD: pub NULL\n"));
8894 		ASSERT(0);
8895 		return BCME_ERROR;
8896 	}
8897 
8898 	if (!pub->timeout_info) {
8899 		DHD_ERROR(("DHD: timeout_info NULL\n"));
8900 		ret = BCME_ERROR;
8901 		ASSERT(0);
8902 		goto exit_null;
8903 	}
8904 
8905 	DHD_TIMER_LOCK(pub->timeout_info->scan_timer_lock, flags);
8906 
8907 	if (pub->timeout_info->scan_timer_active) {
8908 		if (is_escan) {
8909 			if (sync_id == pub->timeout_info->escan_syncid) {
8910 				osl_timer_del(pub->osh, pub->timeout_info->scan_timer);
8911 				pub->timeout_info->scan_timer_active = FALSE;
8912 				DHD_INFO(("%s Scan Timer Stopped\n", __FUNCTION__));
8913 			}
8914 		} else {
8915 			osl_timer_del(pub->osh, pub->timeout_info->scan_timer);
8916 			pub->timeout_info->scan_timer_active = FALSE;
8917 			DHD_INFO(("%s Scan Timer Stopped\n", __FUNCTION__));
8918 		}
8919 
8920 	} else {
8921 		DHD_INFO(("DHD: SCAN timer is not active\n"));
8922 	}
8923 
8924 	DHD_TIMER_UNLOCK(pub->timeout_info->scan_timer_lock, flags);
8925 
8926 exit_null:
8927 	return ret;
8928 }
8929 
8930 static void
dhd_bus_timeout(void * ctx)8931 dhd_bus_timeout(void *ctx)
8932 {
8933 	dhd_pub_t *pub = (dhd_pub_t *)ctx;
8934 	unsigned long flags;
8935 
8936 	if (pub->timeout_info == NULL) {
8937 		DHD_ERROR(("timeout_info pointer is NULL\n"));
8938 		ASSERT(0);
8939 		return;
8940 	}
8941 
8942 	DHD_TIMER_LOCK(pub->timeout_info->bus_timer_lock, flags);
8943 	if (pub->timeout_info && pub->timeout_info->bus_timer_active) {
8944 		DHD_ERROR(("\nERROR BUS TIMEOUT TO:%d\n", pub->timeout_info->bus_timeout_val));
8945 		DHD_TIMER_UNLOCK(pub->timeout_info->bus_timer_lock, flags);
8946 #ifdef PCIE_OOB
8947 		/* Assert device_wake so that UART_Rx is available */
8948 		if (dhd_bus_set_device_wake(pub->bus, TRUE)) {
8949 			DHD_ERROR(("%s: dhd_bus_set_device_wake() failed\n", __FUNCTION__));
8950 			ASSERT(0);
8951 		}
8952 #endif /* PCIE_OOB */
8953 		if (dhd_stop_bus_timer(pub)) {
8954 			DHD_ERROR(("%s: dhd_stop_bus_timer() failed\n", __FUNCTION__));
8955 			ASSERT(0);
8956 		}
8957 		if (!dhd_query_bus_erros(pub)) {
8958 			dhd_send_trap_to_fw_for_timeout(pub, DHD_REASON_OQS_TO);
8959 		}
8960 #ifdef BCMPCIE
8961 		dhd_msgbuf_iovar_timeout_dump(pub);
8962 #endif /* BCMPCIE */
8963 	} else {
8964 		DHD_TIMER_UNLOCK(pub->timeout_info->bus_timer_lock, flags);
8965 	}
8966 }
8967 
8968 int
dhd_start_bus_timer(dhd_pub_t * pub)8969 dhd_start_bus_timer(dhd_pub_t *pub)
8970 {
8971 	int ret = BCME_OK;
8972 	unsigned long flags = 0;
8973 	uint32 bus_to_ms;
8974 
8975 	if (!pub->timeout_info) {
8976 		DHD_ERROR(("DHD: timeout_info NULL\n"));
8977 		ret = BCME_ERROR;
8978 		ASSERT(0);
8979 		goto exit_null;
8980 	}
8981 	DHD_TIMER_LOCK(pub->timeout_info->bus_timer_lock, flags);
8982 	bus_to_ms = pub->timeout_info->bus_timeout_val;
8983 
8984 	if (pub->timeout_info->bus_timeout_val == 0) {
8985 		/* Disable Bus timer timeout */
8986 		DHD_INFO(("DHD: Bus Timeout Disabled\n"));
8987 		goto exit;
8988 	}
8989 	if (pub->timeout_info->bus_timer_active) {
8990 		DHD_ERROR(("%s:Timer already active\n", __FUNCTION__));
8991 		ret = BCME_ERROR;
8992 		ASSERT(0);
8993 	} else {
8994 		pub->timeout_info->bus_timer = osl_timer_init(pub->osh,
8995 			"bus_timer", dhd_bus_timeout, pub);
8996 		pub->timeout_info->bus_timer_active = TRUE;
8997 		osl_timer_update(pub->osh, pub->timeout_info->bus_timer, bus_to_ms, 0);
8998 	}
8999 	if (ret == BCME_OK) {
9000 		DHD_INFO(("%s: BUS Timer started\n", __FUNCTION__));
9001 	}
9002 exit:
9003 	DHD_TIMER_UNLOCK(pub->timeout_info->bus_timer_lock, flags);
9004 exit_null:
9005 	return ret;
9006 }
9007 
9008 int
dhd_stop_bus_timer(dhd_pub_t * pub)9009 dhd_stop_bus_timer(dhd_pub_t *pub)
9010 {
9011 	int ret = BCME_OK;
9012 	unsigned long flags;
9013 
9014 	if (!pub) {
9015 		DHD_ERROR(("DHD: pub NULL\n"));
9016 		ASSERT(0);
9017 		return BCME_ERROR;
9018 	}
9019 
9020 	if (!pub->timeout_info) {
9021 		DHD_ERROR(("DHD: timeout_info NULL\n"));
9022 		ret = BCME_ERROR;
9023 		ASSERT(0);
9024 		goto exit;
9025 	}
9026 
9027 	DHD_TIMER_LOCK(pub->timeout_info->bus_timer_lock, flags);
9028 
9029 	if (pub->timeout_info->bus_timer_active) {
9030 		osl_timer_del(pub->osh, pub->timeout_info->bus_timer);
9031 		pub->timeout_info->bus_timer_active = FALSE;
9032 	}
9033 	else {
9034 		DHD_INFO(("DHD: BUS timer is not active\n"));
9035 	}
9036 	if (ret == BCME_OK) {
9037 		DHD_INFO(("%s: Bus Timer Stopped\n", __FUNCTION__));
9038 	}
9039 	DHD_TIMER_UNLOCK(pub->timeout_info->bus_timer_lock, flags);
9040 exit:
9041 	return ret;
9042 }
9043 
9044 int
dhd_set_request_id(dhd_pub_t * pub,uint16 id,uint32 cmd)9045 dhd_set_request_id(dhd_pub_t *pub, uint16 id, uint32 cmd)
9046 {
9047 	DHD_INFO(("%s: id:%d\n", __FUNCTION__, id));
9048 	if (pub->timeout_info) {
9049 		pub->timeout_info->cmd_request_id = id;
9050 		pub->timeout_info->cmd = cmd;
9051 		return BCME_OK;
9052 	} else {
9053 		return BCME_ERROR;
9054 	}
9055 }
9056 
9057 uint16
dhd_get_request_id(dhd_pub_t * pub)9058 dhd_get_request_id(dhd_pub_t *pub)
9059 {
9060 	if (pub->timeout_info) {
9061 		return (pub->timeout_info->cmd_request_id);
9062 	} else {
9063 		return 0;
9064 	}
9065 }
9066 
9067 void
dhd_get_scan_to_val(dhd_pub_t * pub,uint32 * to_val)9068 dhd_get_scan_to_val(dhd_pub_t *pub, uint32 *to_val)
9069 {
9070 	if (pub->timeout_info) {
9071 		*to_val = pub->timeout_info->scan_timeout_val;
9072 	} else {
9073 		*to_val = 0;
9074 	}
9075 }
9076 
9077 void
dhd_set_scan_to_val(dhd_pub_t * pub,uint32 to_val)9078 dhd_set_scan_to_val(dhd_pub_t *pub, uint32 to_val)
9079 {
9080 	if (pub->timeout_info) {
9081 		DHD_INFO(("Setting scan TO val:%d\n", to_val));
9082 		pub->timeout_info->scan_timeout_val = to_val;
9083 	}
9084 }
9085 
9086 void
dhd_get_join_to_val(dhd_pub_t * pub,uint32 * to_val)9087 dhd_get_join_to_val(dhd_pub_t *pub, uint32 *to_val)
9088 {
9089 	if (pub->timeout_info) {
9090 		*to_val = pub->timeout_info->join_timeout_val;
9091 	} else {
9092 		*to_val = 0;
9093 	}
9094 }
9095 
9096 void
dhd_set_join_to_val(dhd_pub_t * pub,uint32 to_val)9097 dhd_set_join_to_val(dhd_pub_t *pub, uint32 to_val)
9098 {
9099 	if (pub->timeout_info) {
9100 		DHD_INFO(("Setting join TO val:%d\n", to_val));
9101 		pub->timeout_info->join_timeout_val = to_val;
9102 	}
9103 }
9104 
9105 void
dhd_get_cmd_to_val(dhd_pub_t * pub,uint32 * to_val)9106 dhd_get_cmd_to_val(dhd_pub_t *pub, uint32 *to_val)
9107 {
9108 	if (pub->timeout_info) {
9109 		*to_val = pub->timeout_info->cmd_timeout_val;
9110 	} else {
9111 		*to_val = 0;
9112 	}
9113 }
9114 
9115 void
dhd_set_cmd_to_val(dhd_pub_t * pub,uint32 to_val)9116 dhd_set_cmd_to_val(dhd_pub_t *pub, uint32 to_val)
9117 {
9118 	if (pub->timeout_info) {
9119 		DHD_INFO(("Setting cmd TO val:%d\n", to_val));
9120 		pub->timeout_info->cmd_timeout_val = to_val;
9121 	}
9122 }
9123 
9124 void
dhd_get_bus_to_val(dhd_pub_t * pub,uint32 * to_val)9125 dhd_get_bus_to_val(dhd_pub_t *pub, uint32 *to_val)
9126 {
9127 	if (pub->timeout_info) {
9128 		*to_val = pub->timeout_info->bus_timeout_val;
9129 	} else {
9130 		*to_val = 0;
9131 	}
9132 }
9133 
9134 void
dhd_set_bus_to_val(dhd_pub_t * pub,uint32 to_val)9135 dhd_set_bus_to_val(dhd_pub_t *pub, uint32 to_val)
9136 {
9137 	if (pub->timeout_info) {
9138 		DHD_INFO(("Setting bus TO val:%d\n", to_val));
9139 		pub->timeout_info->bus_timeout_val = to_val;
9140 	}
9141 }
9142 #endif /* REPORT_FATAL_TIMEOUTS */
9143 
9144 #ifdef SHOW_LOGTRACE
9145 int
dhd_parse_logstrs_file(osl_t * osh,char * raw_fmts,int logstrs_size,dhd_event_log_t * event_log)9146 dhd_parse_logstrs_file(osl_t *osh, char *raw_fmts, int logstrs_size,
9147 		dhd_event_log_t *event_log)
9148 {
9149 	uint32 *lognums = NULL;
9150 	char *logstrs = NULL;
9151 	logstr_trailer_t *trailer = NULL;
9152 	int ram_index = 0;
9153 	char **fmts = NULL;
9154 	int num_fmts = 0;
9155 	bool match_fail = TRUE;
9156 	int32 i = 0;
9157 	uint8 *pfw_id = NULL;
9158 	uint32 fwid = 0;
9159 #ifdef DHD_LINUX_STD_FW_API
9160 	int err = 0;
9161 	const struct firmware *fw = NULL;
9162 #else
9163 	void *file = NULL;
9164 	int file_len = 0;
9165 #endif /* DHD_LINUX_STD_FW_API */
9166 	char fwid_str[FWID_STR_LEN];
9167 	uint32 hdr_logstrs_size = 0;
9168 
9169 	/* Read last three words in the logstrs.bin file */
9170 	trailer = (logstr_trailer_t *) (raw_fmts + logstrs_size -
9171 		sizeof(logstr_trailer_t));
9172 
9173 	if (trailer->log_magic == LOGSTRS_MAGIC) {
9174 		/*
9175 		* logstrs.bin has a header.
9176 		*/
9177 		if (trailer->version == 1) {
9178 			logstr_header_v1_t *hdr_v1 = (logstr_header_v1_t *) (raw_fmts +
9179 					logstrs_size - sizeof(logstr_header_v1_t));
9180 			DHD_INFO(("%s: logstr header version = %u\n",
9181 					__FUNCTION__, hdr_v1->version));
9182 			num_fmts =	hdr_v1->rom_logstrs_offset / sizeof(uint32);
9183 			ram_index = (hdr_v1->ram_lognums_offset -
9184 				hdr_v1->rom_lognums_offset) / sizeof(uint32);
9185 			lognums = (uint32 *) &raw_fmts[hdr_v1->rom_lognums_offset];
9186 			logstrs = (char *)	 &raw_fmts[hdr_v1->rom_logstrs_offset];
9187 			hdr_logstrs_size = hdr_v1->logstrs_size;
9188 		} else if (trailer->version == 2) {
9189 			logstr_header_t *hdr = (logstr_header_t *) (raw_fmts + logstrs_size -
9190 					sizeof(logstr_header_t));
9191 			DHD_INFO(("%s: logstr header version = %u; flags = %x\n",
9192 					__FUNCTION__, hdr->version, hdr->flags));
9193 
9194 			/* For ver. 2 of the header, need to match fwid of
9195 			 *  both logstrs.bin and fw bin
9196 			 */
9197 
9198 #ifdef DHD_LINUX_STD_FW_API
9199 			err = dhd_os_get_img_fwreq(&fw, st_str_file_path);
9200 			if (err < 0) {
9201 				DHD_ERROR(("dhd_os_get_img(Request Firmware API) error : %d\n",
9202 					err));
9203 				goto error;
9204 			}
9205 			memset(fwid_str, 0, sizeof(fwid_str));
9206 			err = memcpy_s(fwid_str, (sizeof(fwid_str) - 1),
9207 				&(fw->data[fw->size - (sizeof(fwid_str) - 1)]),
9208 				(sizeof(fwid_str) - 1));
9209 			if (err) {
9210 				DHD_ERROR(("%s: failed to copy raw_fmts, err=%d\n",
9211 					__FUNCTION__, err));
9212 				goto error;
9213 			}
9214 #else
9215 			/* read the FWID from fw bin */
9216 			file = dhd_os_open_image1(NULL, st_str_file_path);
9217 			if (!file) {
9218 				DHD_ERROR(("%s: cannot open fw file !\n", __FUNCTION__));
9219 				goto error;
9220 			}
9221 			file_len = dhd_os_get_image_size(file);
9222 			if (file_len <= 0) {
9223 				DHD_ERROR(("%s: bad fw file length !\n", __FUNCTION__));
9224 				goto error;
9225 			}
9226 			/* fwid is at the end of fw bin in string format */
9227 			if (dhd_os_seek_file(file, file_len - (sizeof(fwid_str) - 1)) < 0) {
9228 				DHD_ERROR(("%s: can't seek file \n", __FUNCTION__));
9229 				goto error;
9230 			}
9231 
9232 			memset(fwid_str, 0, sizeof(fwid_str));
9233 			if (dhd_os_get_image_block(fwid_str, sizeof(fwid_str) - 1, file) <= 0) {
9234 				DHD_ERROR(("%s: read fw file failed !\n", __FUNCTION__));
9235 				goto error;
9236 			}
9237 #endif /* DHD_LINUX_STD_FW_API */
9238 			pfw_id = (uint8 *)bcmstrnstr(fwid_str, sizeof(fwid_str) - 1,
9239 					FWID_STR_1, strlen(FWID_STR_1));
9240 			if (!pfw_id) {
9241 				pfw_id = (uint8 *)bcmstrnstr(fwid_str, sizeof(fwid_str) - 1,
9242 					FWID_STR_2, strlen(FWID_STR_2));
9243 				if (!pfw_id) {
9244 					DHD_ERROR(("%s: could not find id in FW bin!\n",
9245 							__FUNCTION__));
9246 					goto error;
9247 				}
9248 			}
9249 			/* search for the '-' in the fw id str, after which the
9250 			 * actual 4 byte fw id is present
9251 			 */
9252 			while (pfw_id && *pfw_id != '-') {
9253 				++pfw_id;
9254 			}
9255 			++pfw_id;
9256 			fwid = bcm_strtoul((char *)pfw_id, NULL, 16);
9257 
9258 			/* check if fw id in logstrs.bin matches the fw one */
9259 			if (hdr->fw_id != fwid) {
9260 				DHD_ERROR(("%s: logstr id does not match FW!"
9261 					"logstrs_fwid:0x%x, rtecdc_fwid:0x%x\n",
9262 					__FUNCTION__, hdr->fw_id, fwid));
9263 				goto error;
9264 			}
9265 
9266 			match_fail = FALSE;
9267 			num_fmts = hdr->rom_logstrs_offset / sizeof(uint32);
9268 			ram_index = (hdr->ram_lognums_offset -
9269 				hdr->rom_lognums_offset) / sizeof(uint32);
9270 			lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset];
9271 			logstrs = (char *)	 &raw_fmts[hdr->rom_logstrs_offset];
9272 			hdr_logstrs_size = hdr->logstrs_size;
9273 
9274 error:
9275 #ifdef DHD_LINUX_STD_FW_API
9276 			if (fw) {
9277 				dhd_os_close_img_fwreq(fw);
9278 			}
9279 #else
9280 			if (file) {
9281 				dhd_os_close_image1(NULL, file);
9282 			}
9283 #endif /* DHD_LINUX_STD_FW_API */
9284 			if (match_fail) {
9285 				return BCME_DECERR;
9286 			}
9287 		} else {
9288 			DHD_ERROR(("%s: Invalid logstr version %u\n", __FUNCTION__,
9289 					trailer->version));
9290 			return BCME_ERROR;
9291 		}
9292 		if (logstrs_size != hdr_logstrs_size) {
9293 			DHD_ERROR(("%s: bad logstrs_size %d\n", __FUNCTION__, hdr_logstrs_size));
9294 			return BCME_ERROR;
9295 		}
9296 	} else {
9297 		/*
9298 		 * Legacy logstrs.bin format without header.
9299 		 */
9300 		num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32);
9301 
9302 		/* Legacy RAM-only logstrs.bin format:
9303 		 *	  - RAM 'lognums' section
9304 		 *	  - RAM 'logstrs' section.
9305 		 *
9306 		 * 'lognums' is an array of indexes for the strings in the
9307 		 * 'logstrs' section. The first uint32 is an index to the
9308 		 * start of 'logstrs'. Therefore, if this index is divided
9309 		 * by 'sizeof(uint32)' it provides the number of logstr
9310 		 *	entries.
9311 		 */
9312 		ram_index = 0;
9313 		lognums = (uint32 *) raw_fmts;
9314 		logstrs = (char *) &raw_fmts[num_fmts << 2];
9315 	}
9316 	if (num_fmts) {
9317 		if (event_log->fmts != NULL) {
9318 			fmts = event_log->fmts;	/* reuse existing malloced fmts */
9319 		} else {
9320 			fmts = MALLOC(osh, num_fmts  * sizeof(char *));
9321 		}
9322 	}
9323 	if (fmts == NULL) {
9324 		DHD_ERROR(("%s: Failed to allocate fmts memory\n", __FUNCTION__));
9325 		return BCME_ERROR;
9326 	}
9327 	event_log->fmts_size = num_fmts  * sizeof(char *);
9328 
9329 	for (i = 0; i < num_fmts; i++) {
9330 		/* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
9331 		* (they are 0-indexed relative to 'rom_logstrs_offset').
9332 		*
9333 		* RAM lognums are already indexed to point to the correct RAM logstrs (they
9334 		* are 0-indexed relative to the start of the logstrs.bin file).
9335 		*/
9336 		if (i == ram_index) {
9337 			logstrs = raw_fmts;
9338 		}
9339 		fmts[i] = &logstrs[lognums[i]];
9340 	}
9341 	event_log->fmts = fmts;
9342 	event_log->raw_fmts_size = logstrs_size;
9343 	event_log->raw_fmts = raw_fmts;
9344 	event_log->num_fmts = num_fmts;
9345 	return BCME_OK;
9346 } /* dhd_parse_logstrs_file */
9347 
9348 #ifdef DHD_LINUX_STD_FW_API
dhd_parse_map_file(osl_t * osh,void * ptr,uint32 * ramstart,uint32 * rodata_start,uint32 * rodata_end)9349 int dhd_parse_map_file(osl_t *osh, void *ptr, uint32 *ramstart, uint32 *rodata_start,
9350 		uint32 *rodata_end)
9351 {
9352 	char *raw_fmts =  NULL, *raw_fmts_loc = NULL;
9353 	uint32 read_size = READ_NUM_BYTES, offset = 0;
9354 	int error = 0;
9355 	char * cptr = NULL;
9356 	char c;
9357 	uint8 count = 0;
9358 	uint32 size = 0;
9359 
9360 	*ramstart = 0;
9361 	*rodata_start = 0;
9362 	*rodata_end = 0;
9363 	size = (uint32)(((struct firmware *)ptr)->size);
9364 
9365 	/* Allocate 1 byte more than read_size to terminate it with NULL */
9366 	raw_fmts = MALLOCZ(osh, read_size + 1);
9367 	if (raw_fmts == NULL) {
9368 		DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
9369 		goto fail;
9370 	}
9371 
9372 	/* read ram start, rodata_start and rodata_end values from map  file */
9373 	while (count != ALL_MAP_VAL)
9374 	{
9375 		/* Bound check for size before doing memcpy() */
9376 		if ((offset + read_size) > size) {
9377 			read_size = size - offset;
9378 		}
9379 
9380 		error = memcpy_s(raw_fmts, read_size,
9381 			(((char *)((struct firmware *)ptr)->data) + offset), read_size);
9382 		if (error) {
9383 			DHD_ERROR(("%s: failed to copy raw_fmts, err=%d\n",
9384 				__FUNCTION__, error));
9385 			goto fail;
9386 		}
9387 		/* End raw_fmts with NULL as strstr expects NULL terminated strings */
9388 		raw_fmts[read_size] = '\0';
9389 
9390 		/* Get ramstart address */
9391 		raw_fmts_loc = raw_fmts;
9392 		if (!(count & RAMSTART_BIT) &&
9393 			(cptr = bcmstrnstr(raw_fmts_loc, read_size, ramstart_str,
9394 			strlen(ramstart_str)))) {
9395 			cptr = cptr - BYTES_AHEAD_NUM;
9396 			sscanf(cptr, "%x %c text_start", ramstart, &c);
9397 			count |= RAMSTART_BIT;
9398 		}
9399 
9400 		/* Get ram rodata start address */
9401 		raw_fmts_loc = raw_fmts;
9402 		if (!(count & RDSTART_BIT) &&
9403 			(cptr = bcmstrnstr(raw_fmts_loc, read_size, rodata_start_str,
9404 			strlen(rodata_start_str)))) {
9405 			cptr = cptr - BYTES_AHEAD_NUM;
9406 			sscanf(cptr, "%x %c rodata_start", rodata_start, &c);
9407 			count |= RDSTART_BIT;
9408 		}
9409 
9410 		/* Get ram rodata end address */
9411 		raw_fmts_loc = raw_fmts;
9412 		if (!(count & RDEND_BIT) &&
9413 			(cptr = bcmstrnstr(raw_fmts_loc, read_size, rodata_end_str,
9414 			strlen(rodata_end_str)))) {
9415 			cptr = cptr - BYTES_AHEAD_NUM;
9416 			sscanf(cptr, "%x %c rodata_end", rodata_end, &c);
9417 			count |= RDEND_BIT;
9418 		}
9419 
9420 		if ((offset + read_size) >= size) {
9421 			break;
9422 		}
9423 
9424 		memset(raw_fmts, 0, read_size);
9425 		offset += (read_size - GO_BACK_FILE_POS_NUM_BYTES);
9426 	}
9427 
9428 fail:
9429 	if (raw_fmts) {
9430 		MFREE(osh, raw_fmts, read_size + 1);
9431 		raw_fmts = NULL;
9432 	}
9433 	if (count == ALL_MAP_VAL) {
9434 		return BCME_OK;
9435 	}
9436 	else {
9437 		DHD_ERROR(("%s: readmap error 0X%x \n", __FUNCTION__,
9438 				count));
9439 		return BCME_ERROR;
9440 	}
9441 } /* dhd_parse_map_file */
9442 #else
dhd_parse_map_file(osl_t * osh,void * file,uint32 * ramstart,uint32 * rodata_start,uint32 * rodata_end)9443 int dhd_parse_map_file(osl_t *osh, void *file, uint32 *ramstart, uint32 *rodata_start,
9444 		uint32 *rodata_end)
9445 {
9446 	char *raw_fmts =  NULL, *raw_fmts_loc = NULL;
9447 	uint32 read_size = READ_NUM_BYTES;
9448 	int error = 0;
9449 	char * cptr = NULL;
9450 	char c;
9451 	uint8 count = 0;
9452 
9453 	*ramstart = 0;
9454 	*rodata_start = 0;
9455 	*rodata_end = 0;
9456 
9457 	/* Allocate 1 byte more than read_size to terminate it with NULL */
9458 	raw_fmts = MALLOCZ(osh, read_size + 1);
9459 	if (raw_fmts == NULL) {
9460 		DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
9461 		goto fail;
9462 	}
9463 
9464 	/* read ram start, rodata_start and rodata_end values from map  file */
9465 	while (count != ALL_MAP_VAL)
9466 	{
9467 		error = dhd_os_read_file(file, raw_fmts, read_size);
9468 		if (error < 0) {
9469 			DHD_ERROR(("%s: map file read failed err:%d \n", __FUNCTION__,
9470 					error));
9471 			goto fail;
9472 		}
9473 
9474 		/* End raw_fmts with NULL as strstr expects NULL terminated strings */
9475 		raw_fmts[read_size] = '\0';
9476 
9477 		/* Get ramstart address */
9478 		raw_fmts_loc = raw_fmts;
9479 		if (!(count & RAMSTART_BIT) &&
9480 			(cptr = bcmstrnstr(raw_fmts_loc, read_size, ramstart_str,
9481 			strlen(ramstart_str)))) {
9482 			cptr = cptr - BYTES_AHEAD_NUM;
9483 			sscanf(cptr, "%x %c text_start", ramstart, &c);
9484 			count |= RAMSTART_BIT;
9485 		}
9486 
9487 		/* Get ram rodata start address */
9488 		raw_fmts_loc = raw_fmts;
9489 		if (!(count & RDSTART_BIT) &&
9490 			(cptr = bcmstrnstr(raw_fmts_loc, read_size, rodata_start_str,
9491 			strlen(rodata_start_str)))) {
9492 			cptr = cptr - BYTES_AHEAD_NUM;
9493 			sscanf(cptr, "%x %c rodata_start", rodata_start, &c);
9494 			count |= RDSTART_BIT;
9495 		}
9496 
9497 		/* Get ram rodata end address */
9498 		raw_fmts_loc = raw_fmts;
9499 		if (!(count & RDEND_BIT) &&
9500 			(cptr = bcmstrnstr(raw_fmts_loc, read_size, rodata_end_str,
9501 			strlen(rodata_end_str)))) {
9502 			cptr = cptr - BYTES_AHEAD_NUM;
9503 			sscanf(cptr, "%x %c rodata_end", rodata_end, &c);
9504 			count |= RDEND_BIT;
9505 		}
9506 
9507 		if (error < (int)read_size) {
9508 			/*
9509 			* since we reset file pos back to earlier pos by
9510 			* GO_BACK_FILE_POS_NUM_BYTES bytes we won't reach EOF.
9511 			* The reason for this is if string is spreaded across
9512 			* bytes, the read function should not miss it.
9513 			* So if ret value is less than read_size, reached EOF don't read further
9514 			*/
9515 			break;
9516 		}
9517 		memset(raw_fmts, 0, read_size);
9518 		/*
9519 		* go back to predefined NUM of bytes so that we won't miss
9520 		* the string and  addr even if it comes as splited in next read.
9521 		*/
9522 		dhd_os_seek_file(file, -GO_BACK_FILE_POS_NUM_BYTES);
9523 	}
9524 
9525 fail:
9526 	if (raw_fmts) {
9527 		MFREE(osh, raw_fmts, read_size + 1);
9528 		raw_fmts = NULL;
9529 	}
9530 	if (count == ALL_MAP_VAL) {
9531 		return BCME_OK;
9532 	}
9533 	else {
9534 		DHD_ERROR(("%s: readmap error 0X%x \n", __FUNCTION__,
9535 				count));
9536 		return BCME_ERROR;
9537 	}
9538 
9539 } /* dhd_parse_map_file */
9540 #endif /* DHD_LINUX_STD_FW_API */
9541 
9542 #ifdef PCIE_FULL_DONGLE
9543 int
dhd_event_logtrace_infobuf_pkt_process(dhd_pub_t * dhdp,void * pktbuf,dhd_event_log_t * event_data)9544 dhd_event_logtrace_infobuf_pkt_process(dhd_pub_t *dhdp, void *pktbuf,
9545 		dhd_event_log_t *event_data)
9546 {
9547 	uint32 infobuf_version;
9548 	info_buf_payload_hdr_t *payload_hdr_ptr;
9549 	uint16 payload_hdr_type;
9550 	uint16 payload_hdr_length;
9551 
9552 	DHD_TRACE(("%s:Enter\n", __FUNCTION__));
9553 
9554 	if (PKTLEN(dhdp->osh, pktbuf) < sizeof(uint32)) {
9555 		DHD_ERROR(("%s: infobuf too small for version field\n",
9556 			__FUNCTION__));
9557 		goto exit;
9558 	}
9559 	infobuf_version = *((uint32 *)PKTDATA(dhdp->osh, pktbuf));
9560 	PKTPULL(dhdp->osh, pktbuf, sizeof(uint32));
9561 	if (infobuf_version != PCIE_INFOBUF_V1) {
9562 		DHD_ERROR(("%s: infobuf version %d is not PCIE_INFOBUF_V1\n",
9563 			__FUNCTION__, infobuf_version));
9564 		goto exit;
9565 	}
9566 
9567 	/* Version 1 infobuf has a single type/length (and then value) field */
9568 	if (PKTLEN(dhdp->osh, pktbuf) < sizeof(info_buf_payload_hdr_t)) {
9569 		DHD_ERROR(("%s: infobuf too small for v1 type/length  fields\n",
9570 			__FUNCTION__));
9571 		goto exit;
9572 	}
9573 	/* Process/parse the common info payload header (type/length) */
9574 	payload_hdr_ptr = (info_buf_payload_hdr_t *)PKTDATA(dhdp->osh, pktbuf);
9575 	payload_hdr_type = ltoh16(payload_hdr_ptr->type);
9576 	payload_hdr_length = ltoh16(payload_hdr_ptr->length);
9577 	if (payload_hdr_type != PCIE_INFOBUF_V1_TYPE_LOGTRACE) {
9578 		DHD_ERROR(("%s: payload_hdr_type %d is not V1_TYPE_LOGTRACE\n",
9579 			__FUNCTION__, payload_hdr_type));
9580 		goto exit;
9581 	}
9582 	PKTPULL(dhdp->osh, pktbuf, sizeof(info_buf_payload_hdr_t));
9583 
9584 	/* Validate that the specified length isn't bigger than the
9585 	 * provided data.
9586 	 */
9587 	if (payload_hdr_length > PKTLEN(dhdp->osh, pktbuf)) {
9588 		DHD_ERROR(("%s: infobuf logtrace length is bigger"
9589 			" than actual buffer data\n", __FUNCTION__));
9590 		goto exit;
9591 	}
9592 	dhd_dbg_trace_evnt_handler(dhdp, PKTDATA(dhdp->osh, pktbuf),
9593 		event_data, payload_hdr_length);
9594 
9595 	return BCME_OK;
9596 
9597 exit:
9598 	return BCME_ERROR;
9599 } /* dhd_event_logtrace_infobuf_pkt_process */
9600 #endif /* PCIE_FULL_DONGLE */
9601 #endif /* SHOW_LOGTRACE */
9602 
9603 #ifdef BTLOG
9604 int
dhd_bt_log_pkt_process(dhd_pub_t * dhdp,void * pktbuf)9605 dhd_bt_log_pkt_process(dhd_pub_t *dhdp, void *pktbuf)
9606 {
9607 	DHD_TRACE(("%s:Enter\n", __FUNCTION__));
9608 
9609 	dhd_dbg_bt_log_handler(dhdp,
9610 		PKTDATA(dhdp->osh, pktbuf), PKTLEN(dhdp->osh, pktbuf));
9611 
9612 	return BCME_OK;
9613 }
9614 #endif /* BTLOG */
9615 
9616 #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
9617 
9618 /* To handle the TDLS event in the dhd_common.c
9619  */
dhd_tdls_event_handler(dhd_pub_t * dhd_pub,wl_event_msg_t * event)9620 int dhd_tdls_event_handler(dhd_pub_t *dhd_pub, wl_event_msg_t *event)
9621 {
9622 	int ret = BCME_OK;
9623 
9624 	GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST()
9625 	ret = dhd_tdls_update_peer_info(dhd_pub, event);
9626 	GCC_DIAGNOSTIC_POP()
9627 
9628 	return ret;
9629 }
9630 
dhd_free_tdls_peer_list(dhd_pub_t * dhd_pub)9631 int dhd_free_tdls_peer_list(dhd_pub_t *dhd_pub)
9632 {
9633 	tdls_peer_node_t *cur = NULL, *prev = NULL;
9634 	if (!dhd_pub)
9635 		return BCME_ERROR;
9636 	cur = dhd_pub->peer_tbl.node;
9637 
9638 	if ((dhd_pub->peer_tbl.node == NULL) && !dhd_pub->peer_tbl.tdls_peer_count)
9639 		return BCME_ERROR;
9640 
9641 	while (cur != NULL) {
9642 		prev = cur;
9643 		cur = cur->next;
9644 		MFREE(dhd_pub->osh, prev, sizeof(tdls_peer_node_t));
9645 	}
9646 	dhd_pub->peer_tbl.tdls_peer_count = 0;
9647 	dhd_pub->peer_tbl.node = NULL;
9648 	return BCME_OK;
9649 }
9650 #endif	/* #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */
9651 
9652 /* pretty hex print a contiguous buffer
9653 * based on the debug level specified
9654 */
9655 void
dhd_prhex(const char * msg,volatile uchar * buf,uint nbytes,uint8 dbg_level)9656 dhd_prhex(const char *msg, volatile uchar *buf, uint nbytes, uint8 dbg_level)
9657 {
9658 	char line[128], *p;
9659 	int len = sizeof(line);
9660 	int nchar;
9661 	uint i;
9662 
9663 	if (msg && (msg[0] != '\0')) {
9664 		if (dbg_level == DHD_ERROR_VAL)
9665 			DHD_ERROR(("%s:\n", msg));
9666 		else if (dbg_level == DHD_INFO_VAL)
9667 			DHD_INFO(("%s:\n", msg));
9668 		else if (dbg_level == DHD_TRACE_VAL)
9669 			DHD_TRACE(("%s:\n", msg));
9670 	}
9671 
9672 	p = line;
9673 	for (i = 0; i < nbytes; i++) {
9674 		if (i % 16 == 0) {
9675 			nchar = snprintf(p, len, "  %04x: ", i);	/* line prefix */
9676 			p += nchar;
9677 			len -= nchar;
9678 		}
9679 		if (len > 0) {
9680 			nchar = snprintf(p, len, "%02x ", buf[i]);
9681 			p += nchar;
9682 			len -= nchar;
9683 		}
9684 
9685 		if (i % 16 == 15) {
9686 			/* flush line */
9687 			if (dbg_level == DHD_ERROR_VAL)
9688 				DHD_ERROR(("%s:\n", line));
9689 			else if (dbg_level == DHD_INFO_VAL)
9690 				DHD_INFO(("%s:\n", line));
9691 			else if (dbg_level == DHD_TRACE_VAL)
9692 				DHD_TRACE(("%s:\n", line));
9693 			p = line;
9694 			len = sizeof(line);
9695 		}
9696 	}
9697 
9698 	/* flush last partial line */
9699 	if (p != line) {
9700 		if (dbg_level == DHD_ERROR_VAL)
9701 			DHD_ERROR(("%s:\n", line));
9702 		else if (dbg_level == DHD_INFO_VAL)
9703 			DHD_INFO(("%s:\n", line));
9704 		else if (dbg_level == DHD_TRACE_VAL)
9705 			DHD_TRACE(("%s:\n", line));
9706 	}
9707 }
9708 
9709 int
dhd_tput_test(dhd_pub_t * dhd,tput_test_t * tput_data)9710 dhd_tput_test(dhd_pub_t *dhd, tput_test_t *tput_data)
9711 {
9712 	struct ether_header ether_hdr;
9713 	tput_pkt_t tput_pkt;
9714 	void *pkt = NULL;
9715 	uint8 *pktdata = NULL;
9716 	uint32 pktsize = 0;
9717 	uint64 total_size = 0;
9718 	uint32 *crc = 0;
9719 	uint32 pktid = 0;
9720 	uint32 total_num_tx_pkts = 0;
9721 	int err = 0, err_exit = 0;
9722 	uint32 i = 0;
9723 	uint64 time_taken = 0;
9724 	int max_txbufs = 0;
9725 	uint32 n_batches = 0;
9726 	uint32 n_remain = 0;
9727 	uint8 tput_pkt_hdr_size = 0;
9728 	bool batch_cnt = FALSE;
9729 	bool tx_stop_pkt = FALSE;
9730 
9731 #if defined(DHD_EFI) && defined(DHD_INTR_POLL_PERIOD_DYNAMIC)
9732 	uint32 cur_intr_poll_period = 0;
9733 	cur_intr_poll_period = dhd_os_get_intr_poll_period();
9734 	/* before running tput_test, set interrupt poll period to a lesser value */
9735 	dhd_os_set_intr_poll_period(dhd->bus, INTR_POLL_PERIOD_CRITICAL);
9736 #endif	/* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */
9737 
9738 	if (tput_data->version != TPUT_TEST_T_VER ||
9739 		tput_data->length != TPUT_TEST_T_LEN) {
9740 		DHD_ERROR(("%s: wrong structure ver/len! \n", __FUNCTION__));
9741 		err_exit = BCME_BADARG;
9742 		goto exit_error;
9743 	}
9744 
9745 	if (dhd->tput_data.tput_test_running) {
9746 		DHD_ERROR(("%s: tput test already running ! \n", __FUNCTION__));
9747 		err_exit = BCME_BUSY;
9748 		goto exit_error;
9749 	}
9750 #ifdef PCIE_FULL_DONGLE
9751 	/*
9752 	 * 100 bytes to accommodate ether header and tput header. As of today
9753 	 * both occupy 30 bytes. Rest is reserved.
9754 	 */
9755 	if ((tput_data->payload_size > TPUT_TEST_MAX_PAYLOAD) ||
9756 		(tput_data->payload_size > (DHD_FLOWRING_RX_BUFPOST_PKTSZ - 100))) {
9757 		DHD_ERROR(("%s: payload size is too large! max_payload=%u rx_bufpost_size=%u\n",
9758 			__FUNCTION__, TPUT_TEST_MAX_PAYLOAD,
9759 			(DHD_FLOWRING_RX_BUFPOST_PKTSZ - 100)));
9760 		err_exit = BCME_BUFTOOLONG;
9761 		goto exit_error;
9762 	}
9763 #endif
9764 	max_txbufs = dhd_get_max_txbufs(dhd);
9765 	max_txbufs = MIN(max_txbufs, DHD_TPUT_MAX_TX_PKTS_BATCH);
9766 
9767 	if (!(tput_data->num_pkts > 0)) {
9768 		DHD_ERROR(("%s: invalid num_pkts: %d to tx\n",
9769 			__FUNCTION__, tput_data->num_pkts));
9770 		err_exit = BCME_ERROR;
9771 		goto exit_error;
9772 	}
9773 
9774 	memset(&dhd->tput_data, 0, sizeof(dhd->tput_data));
9775 	memcpy(&dhd->tput_data, tput_data, sizeof(*tput_data));
9776 	dhd->tput_data.pkts_bad = dhd->tput_data.pkts_good = 0;
9777 	dhd->tput_data.pkts_cmpl = 0;
9778 	dhd->tput_start_ts = dhd->tput_stop_ts = 0;
9779 
9780 	if (tput_data->flags & TPUT_TEST_USE_ETHERNET_HDR) {
9781 		pktsize = sizeof(ether_hdr) + sizeof(tput_pkt_t) +
9782 				(tput_data->payload_size - 12);
9783 	} else {
9784 		pktsize = sizeof(tput_pkt_t) +
9785 				(tput_data->payload_size - 12);
9786 	}
9787 
9788 	tput_pkt_hdr_size = (uint8)((uint8 *)&tput_pkt.crc32 -
9789 			(uint8 *)&tput_pkt.mac_sta);
9790 
9791 	/* mark the tput test as started */
9792 	dhd->tput_data.tput_test_running = TRUE;
9793 
9794 	if (tput_data->direction == TPUT_DIR_TX) {
9795 		/* for ethernet header */
9796 		memcpy(ether_hdr.ether_shost, tput_data->mac_sta, ETHER_ADDR_LEN);
9797 		memcpy(ether_hdr.ether_dhost, tput_data->mac_ap, ETHER_ADDR_LEN);
9798 		ether_hdr.ether_type = hton16(ETHER_TYPE_IP);
9799 
9800 		/* fill in the tput pkt */
9801 		memset(&tput_pkt, 0, sizeof(tput_pkt));
9802 		memcpy(tput_pkt.mac_ap, tput_data->mac_ap, ETHER_ADDR_LEN);
9803 		memcpy(tput_pkt.mac_sta, tput_data->mac_sta, ETHER_ADDR_LEN);
9804 		tput_pkt.pkt_type = hton16(TPUT_PKT_TYPE_NORMAL);
9805 		tput_pkt.num_pkts = hton32(tput_data->num_pkts);
9806 
9807 		if (tput_data->num_pkts > (uint32)max_txbufs) {
9808 			n_batches = tput_data->num_pkts / max_txbufs;
9809 			n_remain = tput_data->num_pkts % max_txbufs;
9810 		} else {
9811 			n_batches = 0;
9812 			n_remain = tput_data->num_pkts;
9813 		}
9814 		DHD_ERROR(("%s: num_pkts: %u n_batches: %u n_remain: %u\n",
9815 			__FUNCTION__, tput_data->num_pkts, n_batches, n_remain));
9816 
9817 		do {
9818 			/* reset before every batch */
9819 			dhd->batch_tx_pkts_cmpl = 0;
9820 			if (n_batches) {
9821 				dhd->batch_tx_num_pkts = max_txbufs;
9822 				--n_batches;
9823 			} else if (n_remain) {
9824 				dhd->batch_tx_num_pkts = n_remain;
9825 				n_remain = 0;
9826 			} else {
9827 				DHD_ERROR(("Invalid. This should not hit\n"));
9828 			}
9829 
9830 			dhd->tput_start_ts = OSL_SYSUPTIME_US();
9831 			for (i = 0; (i < dhd->batch_tx_num_pkts) || (tx_stop_pkt); ++i) {
9832 				pkt = PKTGET(dhd->osh, pktsize, TRUE);
9833 				if (!pkt) {
9834 					dhd->tput_data.tput_test_running = FALSE;
9835 					DHD_ERROR(("%s: PKTGET fails ! Not enough Tx buffers\n",
9836 						__FUNCTION__));
9837 					DHD_ERROR(("%s: pkts_good:%u; pkts_bad:%u; pkts_cmpl:%u\n",
9838 						__FUNCTION__, dhd->tput_data.pkts_good,
9839 						dhd->tput_data.pkts_bad, dhd->tput_data.pkts_cmpl));
9840 					err_exit = BCME_NOMEM;
9841 					goto exit_error;
9842 				}
9843 				pktdata = PKTDATA(dhd->osh, pkt);
9844 				PKTSETLEN(dhd->osh, pkt, pktsize);
9845 				memset(pktdata, 0, pktsize);
9846 				if (tput_data->flags & TPUT_TEST_USE_ETHERNET_HDR) {
9847 					memcpy(pktdata, &ether_hdr, sizeof(ether_hdr));
9848 					pktdata += sizeof(ether_hdr);
9849 				}
9850 				/* send stop pkt as last pkt */
9851 				if (tx_stop_pkt) {
9852 					tput_pkt.pkt_type = hton16(TPUT_PKT_TYPE_STOP);
9853 					tx_stop_pkt = FALSE;
9854 				} else
9855 					tput_pkt.pkt_type = hton16(TPUT_PKT_TYPE_NORMAL);
9856 				tput_pkt.pkt_id = hton32(pktid++);
9857 				tput_pkt.crc32 = 0;
9858 				memcpy(pktdata, &tput_pkt, sizeof(tput_pkt));
9859 				/* compute crc32 over the pkt-id, num-pkts and data fields */
9860 				crc = (uint32 *)(pktdata + tput_pkt_hdr_size);
9861 				*crc = hton32(hndcrc32(pktdata + tput_pkt_hdr_size + 4,
9862 						8 + (tput_data->payload_size - 12),
9863 						CRC32_INIT_VALUE));
9864 
9865 				err = dhd_sendpkt(dhd, 0, pkt);
9866 				if (err != BCME_OK) {
9867 					DHD_INFO(("%s: send pkt (id = %u) fails (err = %d) ! \n",
9868 						__FUNCTION__, pktid, err));
9869 					dhd->tput_data.pkts_bad++;
9870 				}
9871 				total_num_tx_pkts++;
9872 				if ((total_num_tx_pkts == tput_data->num_pkts) && (!tx_stop_pkt)) {
9873 					tx_stop_pkt = TRUE;
9874 				}
9875 			}
9876 			DHD_INFO(("%s: TX done, wait for completion...\n", __FUNCTION__));
9877 			if (!dhd_os_tput_test_wait(dhd, NULL,
9878 					TPUT_TEST_WAIT_TIMEOUT_DEFAULT)) {
9879 				dhd->tput_stop_ts = OSL_SYSUPTIME_US();
9880 				dhd->tput_data.tput_test_running = FALSE;
9881 				DHD_ERROR(("%s: TX completion timeout !"
9882 					" Total Tx pkts (including STOP) = %u; pkts cmpl = %u; \n",
9883 					__FUNCTION__, total_num_tx_pkts, dhd->batch_tx_pkts_cmpl));
9884 				err_exit = BCME_ERROR;
9885 				goto exit_error;
9886 			}
9887 			if ((dhd->tput_start_ts && dhd->tput_stop_ts &&
9888 				(dhd->tput_stop_ts > dhd->tput_start_ts)) || (time_taken)) {
9889 				if (!time_taken) {
9890 					time_taken = dhd->tput_stop_ts - dhd->tput_start_ts;
9891 				}
9892 			} else {
9893 				dhd->tput_data.tput_test_running = FALSE;
9894 				DHD_ERROR(("%s: bad timestamp while cal tx batch time\n",
9895 					__FUNCTION__));
9896 				err_exit = BCME_ERROR;
9897 				goto exit_error;
9898 			}
9899 			if (n_batches || n_remain) {
9900 				batch_cnt = TRUE;
9901 			} else {
9902 				batch_cnt = FALSE;
9903 			}
9904 		} while (batch_cnt);
9905 	} else {
9906 		/* TPUT_DIR_RX */
9907 		DHD_INFO(("%s: waiting for RX completion... \n", __FUNCTION__));
9908 		if (!dhd_os_tput_test_wait(dhd, NULL, tput_data->timeout_ms)) {
9909 			DHD_ERROR(("%s: RX completion timeout ! \n", __FUNCTION__));
9910 			dhd->tput_stop_ts = OSL_SYSUPTIME_US();
9911 		}
9912 	}
9913 
9914 	/* calculate the throughput in bits per sec */
9915 	if (dhd->tput_start_ts && dhd->tput_stop_ts &&
9916 		(dhd->tput_stop_ts > dhd->tput_start_ts)) {
9917 		time_taken = dhd->tput_stop_ts - dhd->tput_start_ts;
9918 		time_taken = DIV_U64_BY_U32(time_taken, MSEC_PER_SEC); /* convert to ms */
9919 		dhd->tput_data.time_ms = time_taken;
9920 		if (time_taken) {
9921 			total_size = pktsize * dhd->tput_data.pkts_cmpl * 8;
9922 			dhd->tput_data.tput_bps = DIV_U64_BY_U64(total_size, time_taken);
9923 			/* convert from ms to seconds */
9924 			dhd->tput_data.tput_bps = dhd->tput_data.tput_bps * 1000;
9925 		}
9926 	} else {
9927 		DHD_ERROR(("%s: bad timestamp !\n", __FUNCTION__));
9928 	}
9929 	DHD_INFO(("%s: DONE. tput = %llu bps, time = %llu ms\n", __FUNCTION__,
9930 		dhd->tput_data.tput_bps, dhd->tput_data.time_ms));
9931 
9932 	memcpy(tput_data, &dhd->tput_data, sizeof(dhd->tput_data));
9933 
9934 	dhd->tput_data.tput_test_running = FALSE;
9935 
9936 	err_exit = BCME_OK;
9937 
9938 exit_error:
9939 	DHD_ERROR(("%s: pkts_good = %u; pkts_bad = %u; pkts_cmpl = %u\n",
9940 		__FUNCTION__, dhd->tput_data.pkts_good,
9941 		dhd->tput_data.pkts_bad, dhd->tput_data.pkts_cmpl));
9942 #if defined(DHD_EFI) && defined(DHD_INTR_POLL_PERIOD_DYNAMIC)
9943 	/* restore interrupt poll period to the previous existing value */
9944 	dhd_os_set_intr_poll_period(dhd->bus, cur_intr_poll_period);
9945 #endif	/* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */
9946 
9947 	return err_exit;
9948 }
9949 
9950 void
dhd_tput_test_rx(dhd_pub_t * dhd,void * pkt)9951 dhd_tput_test_rx(dhd_pub_t *dhd, void *pkt)
9952 {
9953 	uint8 *pktdata = NULL;
9954 	tput_pkt_t *tput_pkt = NULL;
9955 	uint32 crc = 0;
9956 	uint8 tput_pkt_hdr_size = 0;
9957 
9958 	pktdata = PKTDATA(dhd->osh, pkt);
9959 	if (dhd->tput_data.flags & TPUT_TEST_USE_ETHERNET_HDR)
9960 		pktdata += sizeof(struct ether_header);
9961 	tput_pkt = (tput_pkt_t *)pktdata;
9962 
9963 	/* record the timestamp of the first packet received */
9964 	if (dhd->tput_data.pkts_cmpl == 0) {
9965 		dhd->tput_start_ts = OSL_SYSUPTIME_US();
9966 	}
9967 
9968 	if (ntoh16(tput_pkt->pkt_type) != TPUT_PKT_TYPE_STOP &&
9969 			dhd->tput_data.pkts_cmpl <= dhd->tput_data.num_pkts) {
9970 		dhd->tput_data.pkts_cmpl++;
9971 	}
9972 	/* drop rx packets received beyond the specified # */
9973 	if (dhd->tput_data.pkts_cmpl > dhd->tput_data.num_pkts)
9974 		return;
9975 
9976 	DHD_TRACE(("%s: Rx tput test pkt, id = %u ; type = %u\n", __FUNCTION__,
9977 		ntoh32(tput_pkt->pkt_id), ntoh16(tput_pkt->pkt_type)));
9978 
9979 	/* discard if mac addr of AP/STA does not match the specified ones */
9980 	if ((memcmp(tput_pkt->mac_ap, dhd->tput_data.mac_ap,
9981 			ETHER_ADDR_LEN) != 0) ||
9982 		(memcmp(tput_pkt->mac_sta, dhd->tput_data.mac_sta,
9983 			ETHER_ADDR_LEN) != 0)) {
9984 		dhd->tput_data.pkts_bad++;
9985 		DHD_INFO(("%s: dropping tput pkt with id %u due to bad AP/STA mac !\n",
9986 			__FUNCTION__, ntoh32(tput_pkt->pkt_id)));
9987 		return;
9988 	}
9989 
9990 	tput_pkt_hdr_size = (uint8)((uint8 *)&tput_pkt->crc32 -
9991 			(uint8 *)&tput_pkt->mac_sta);
9992 	pktdata += tput_pkt_hdr_size + 4;
9993 	crc = hndcrc32(pktdata, 8 + (dhd->tput_data.payload_size - 12),
9994 			CRC32_INIT_VALUE);
9995 	if (crc != ntoh32(tput_pkt->crc32)) {
9996 		DHD_INFO(("%s: dropping tput pkt with id %u due to bad CRC !\n",
9997 			__FUNCTION__, ntoh32(tput_pkt->pkt_id)));
9998 		dhd->tput_data.pkts_bad++;
9999 		return;
10000 	}
10001 
10002 	if (ntoh16(tput_pkt->pkt_type) != TPUT_PKT_TYPE_STOP)
10003 		dhd->tput_data.pkts_good++;
10004 
10005 	/* if we have received the stop packet or all the # of pkts, we're done */
10006 	if (ntoh16(tput_pkt->pkt_type) == TPUT_PKT_TYPE_STOP ||
10007 			dhd->tput_data.pkts_cmpl == dhd->tput_data.num_pkts) {
10008 		dhd->tput_stop_ts = OSL_SYSUPTIME_US();
10009 		dhd_os_tput_test_wake(dhd);
10010 	}
10011 }
10012 
10013 #ifdef DUMP_IOCTL_IOV_LIST
10014 void
dhd_iov_li_append(dhd_pub_t * dhd,dll_t * list_head,dll_t * node)10015 dhd_iov_li_append(dhd_pub_t *dhd, dll_t *list_head, dll_t *node)
10016 {
10017 	dll_t *item;
10018 	dhd_iov_li_t *iov_li;
10019 	dhd->dump_iovlist_len++;
10020 
10021 	if (dhd->dump_iovlist_len == IOV_LIST_MAX_LEN+1) {
10022 		item = dll_head_p(list_head);
10023 		iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
10024 		dll_delete(item);
10025 		MFREE(dhd->osh, iov_li, sizeof(*iov_li));
10026 		dhd->dump_iovlist_len--;
10027 	}
10028 	dll_append(list_head, node);
10029 }
10030 
10031 void
dhd_iov_li_print(dll_t * list_head)10032 dhd_iov_li_print(dll_t *list_head)
10033 {
10034 	dhd_iov_li_t *iov_li;
10035 	dll_t *item, *next;
10036 	uint8 index = 0;
10037 	for (item = dll_head_p(list_head); !dll_end(list_head, item); item = next) {
10038 		next = dll_next_p(item);
10039 		iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
10040 		DHD_ERROR(("%d:cmd_name = %s, cmd = %d.\n", ++index, iov_li->buff, iov_li->cmd));
10041 	}
10042 }
10043 
10044 void
dhd_iov_li_delete(dhd_pub_t * dhd,dll_t * list_head)10045 dhd_iov_li_delete(dhd_pub_t *dhd, dll_t *list_head)
10046 {
10047 	dll_t *item;
10048 	dhd_iov_li_t *iov_li;
10049 	while (!(dll_empty(list_head))) {
10050 		item = dll_head_p(list_head);
10051 		iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
10052 		dll_delete(item);
10053 		MFREE(dhd->osh, iov_li, sizeof(*iov_li));
10054 	}
10055 }
10056 #endif /* DUMP_IOCTL_IOV_LIST */
10057 
10058 #ifdef EWP_EDL
10059 /* For now we are allocating memory for EDL ring using DMA_ALLOC_CONSISTENT
10060 * The reason being that, in hikey, if we try to DMA_MAP prealloced memory
10061 * it is failing with an 'out of space in SWIOTLB' error
10062 */
10063 int
dhd_edl_mem_init(dhd_pub_t * dhd)10064 dhd_edl_mem_init(dhd_pub_t *dhd)
10065 {
10066 	int ret = 0;
10067 
10068 	memset(&dhd->edl_ring_mem, 0, sizeof(dhd->edl_ring_mem));
10069 	ret = dhd_dma_buf_alloc(dhd, &dhd->edl_ring_mem, DHD_EDL_RING_SIZE);
10070 	if (ret != BCME_OK) {
10071 		DHD_ERROR(("%s: alloc of edl_ring_mem failed\n",
10072 			__FUNCTION__));
10073 		return BCME_ERROR;
10074 	}
10075 	return BCME_OK;
10076 }
10077 
10078 /*
10079  * NOTE:- that dhd_edl_mem_deinit need NOT be called explicitly, because the dma_buf
10080  * for EDL is freed during 'dhd_prot_detach_edl_rings' which is called during de-init.
10081  */
10082 void
dhd_edl_mem_deinit(dhd_pub_t * dhd)10083 dhd_edl_mem_deinit(dhd_pub_t *dhd)
10084 {
10085 	if (dhd->edl_ring_mem.va != NULL)
10086 		dhd_dma_buf_free(dhd, &dhd->edl_ring_mem);
10087 }
10088 
10089 int
dhd_event_logtrace_process_edl(dhd_pub_t * dhdp,uint8 * data,void * evt_decode_data)10090 dhd_event_logtrace_process_edl(dhd_pub_t *dhdp, uint8 *data,
10091 		void *evt_decode_data)
10092 {
10093 	msg_hdr_edl_t *msg = NULL;
10094 	cmn_msg_hdr_t *cmn_msg_hdr = NULL;
10095 	uint8 *buf = NULL;
10096 
10097 	if (!data || !dhdp || !evt_decode_data) {
10098 		DHD_ERROR(("%s: invalid args ! \n", __FUNCTION__));
10099 		return BCME_ERROR;
10100 	}
10101 
10102 	/* format of data in each work item in the EDL ring:
10103 	* |cmn_msg_hdr_t |payload (var len)|cmn_msg_hdr_t|
10104 	* payload = |infobuf_ver(u32)|info_buf_payload_hdr_t|msgtrace_hdr_t|<var len data>|
10105 	*/
10106 	cmn_msg_hdr = (cmn_msg_hdr_t *)data;
10107 	msg = (msg_hdr_edl_t *)(data + sizeof(cmn_msg_hdr_t));
10108 	buf = (uint8 *)msg;
10109 	/* validate the fields */
10110 	if (ltoh32(msg->infobuf_ver) != PCIE_INFOBUF_V1) {
10111 		DHD_ERROR(("%s: Skipping msg with invalid infobuf ver (0x%x)"
10112 			" expected (0x%x)\n", __FUNCTION__,
10113 			msg->infobuf_ver, PCIE_INFOBUF_V1));
10114 		return BCME_VERSION;
10115 	}
10116 
10117 	/* in EDL, the request_id field of cmn_msg_hdr is overloaded to carry payload length */
10118 	if (sizeof(info_buf_payload_hdr_t) > cmn_msg_hdr->request_id) {
10119 		DHD_ERROR(("%s: infobuf too small for v1 type/length fields\n",
10120 			__FUNCTION__));
10121 		return BCME_BUFTOOLONG;
10122 	}
10123 
10124 	if (ltoh16(msg->pyld_hdr.type) != PCIE_INFOBUF_V1_TYPE_LOGTRACE) {
10125 		DHD_ERROR(("%s: payload_hdr_type %d is not V1_TYPE_LOGTRACE\n",
10126 			__FUNCTION__, ltoh16(msg->pyld_hdr.type)));
10127 		return BCME_BADOPTION;
10128 	}
10129 
10130 	if (ltoh16(msg->pyld_hdr.length) > cmn_msg_hdr->request_id) {
10131 		DHD_ERROR(("%s: infobuf logtrace length %u is bigger"
10132 			" than available buffer size %u\n", __FUNCTION__,
10133 			ltoh16(msg->pyld_hdr.length), cmn_msg_hdr->request_id));
10134 		return BCME_BADLEN;
10135 	}
10136 
10137 	/* dhd_dbg_trace_evnt_handler expects the data to start from msgtrace_hdr_t */
10138 	buf += sizeof(msg->infobuf_ver) + sizeof(msg->pyld_hdr);
10139 	dhd_dbg_trace_evnt_handler(dhdp, buf, evt_decode_data,
10140 		ltoh16(msg->pyld_hdr.length));
10141 
10142 	/*
10143 	 * check 'dhdp->logtrace_pkt_sendup' and if true alloc an skb
10144 	 * copy the event data to the skb and send it up the stack
10145 	 */
10146 	if (dhdp->logtrace_pkt_sendup) {
10147 		DHD_INFO(("%s: send up event log, len %u bytes\n", __FUNCTION__,
10148 				(uint32)(ltoh16(msg->pyld_hdr.length) +
10149 				sizeof(info_buf_payload_hdr_t) + 4)));
10150 		dhd_sendup_info_buf(dhdp, (uint8 *)msg);
10151 	}
10152 
10153 	return BCME_OK;
10154 }
10155 #endif /* EWP_EDL */
10156 
10157 #ifdef DHD_LOG_DUMP
10158 #define DEBUG_DUMP_TRIGGER_INTERVAL_SEC	4
10159 void
dhd_log_dump_trigger(dhd_pub_t * dhdp,int subcmd)10160 dhd_log_dump_trigger(dhd_pub_t *dhdp, int subcmd)
10161 {
10162 #if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
10163 	log_dump_type_t *flush_type;
10164 #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
10165 	uint64 current_time_sec;
10166 
10167 	if (!dhdp) {
10168 		DHD_ERROR(("dhdp is NULL !\n"));
10169 		return;
10170 	}
10171 
10172 	if (subcmd >= CMD_MAX || subcmd < CMD_DEFAULT) {
10173 		DHD_ERROR(("%s : Invalid subcmd \n", __FUNCTION__));
10174 		return;
10175 	}
10176 
10177 	current_time_sec = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
10178 
10179 	DHD_ERROR(("%s: current_time_sec=%lld debug_dump_time_sec=%lld interval=%d\n",
10180 		__FUNCTION__, current_time_sec, dhdp->debug_dump_time_sec,
10181 		DEBUG_DUMP_TRIGGER_INTERVAL_SEC));
10182 
10183 	if ((current_time_sec - dhdp->debug_dump_time_sec) < DEBUG_DUMP_TRIGGER_INTERVAL_SEC) {
10184 		DHD_ERROR(("%s : Last debug dump triggered(%lld) within %d seconds, so SKIP\n",
10185 			__FUNCTION__, dhdp->debug_dump_time_sec, DEBUG_DUMP_TRIGGER_INTERVAL_SEC));
10186 		return;
10187 	}
10188 
10189 	clear_debug_dump_time(dhdp->debug_dump_time_str);
10190 #ifdef DHD_PCIE_RUNTIMEPM
10191 	/* wake up RPM if SYSDUMP is triggered */
10192 	dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
10193 #endif /* DHD_PCIE_RUNTIMEPM */
10194 	/*  */
10195 
10196 	dhdp->debug_dump_subcmd = subcmd;
10197 
10198 	dhdp->debug_dump_time_sec = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
10199 
10200 #if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
10201 	/* flush_type is freed at do_dhd_log_dump function */
10202 	flush_type = MALLOCZ(dhdp->osh, sizeof(log_dump_type_t));
10203 	if (flush_type) {
10204 		*flush_type = DLD_BUF_TYPE_ALL;
10205 		dhd_schedule_log_dump(dhdp, flush_type);
10206 	} else {
10207 		DHD_ERROR(("%s Fail to malloc flush_type\n", __FUNCTION__));
10208 		return;
10209 	}
10210 #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
10211 
10212 	/* Inside dhd_mem_dump, event notification will be sent to HAL and
10213 	 * from other context DHD pushes memdump, debug_dump and pktlog dump
10214 	 * to HAL and HAL will write into file
10215 	 */
10216 #if (defined(BCMPCIE) || defined(BCMSDIO)) && defined(DHD_FW_COREDUMP)
10217 	dhdp->memdump_type = DUMP_TYPE_BY_SYSDUMP;
10218 	dhd_bus_mem_dump(dhdp);
10219 #endif /* BCMPCIE && DHD_FW_COREDUMP */
10220 
10221 #if defined(DHD_PKT_LOGGING) && defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
10222 	dhd_schedule_pktlog_dump(dhdp);
10223 #endif /* DHD_PKT_LOGGING && DHD_DUMP_FILE_WRITE_FROM_KERNEL */
10224 }
10225 #endif /* DHD_LOG_DUMP */
10226 
10227 #if (defined(LINUX) || defined(DHD_EFI)) && defined(SHOW_LOGTRACE)
10228 int
dhd_print_fw_ver_from_file(dhd_pub_t * dhdp,char * fwpath)10229 dhd_print_fw_ver_from_file(dhd_pub_t *dhdp, char *fwpath)
10230 {
10231 	void *file = NULL;
10232 	int size = 0;
10233 	char buf[FW_VER_STR_LEN];
10234 	char *str = NULL;
10235 	int ret = BCME_OK;
10236 
10237 	if (!fwpath)
10238 		return BCME_BADARG;
10239 
10240 	file = dhd_os_open_image1(dhdp, fwpath);
10241 	if (!file) {
10242 		ret = BCME_ERROR;
10243 		goto exit;
10244 	}
10245 	size = dhd_os_get_image_size(file);
10246 	if (!size) {
10247 		ret = BCME_ERROR;
10248 		goto exit;
10249 	}
10250 
10251 	/* seek to the last 'X' bytes in the file */
10252 	if (dhd_os_seek_file(file, size - FW_VER_STR_LEN) != BCME_OK) {
10253 		ret = BCME_ERROR;
10254 		goto exit;
10255 	}
10256 
10257 	/* read the last 'X' bytes of the file to a buffer */
10258 	memset(buf, 0, FW_VER_STR_LEN);
10259 	if (dhd_os_get_image_block(buf, FW_VER_STR_LEN - 1, file) < 0) {
10260 		ret = BCME_ERROR;
10261 		goto exit;
10262 	}
10263 	/* search for 'Version' in the buffer */
10264 	str = bcmstrnstr(buf, FW_VER_STR_LEN, FW_VER_STR, strlen(FW_VER_STR));
10265 	if (!str) {
10266 		ret = BCME_ERROR;
10267 		goto exit;
10268 	}
10269 	/* go back in the buffer to the last ascii character */
10270 	while (str != buf &&
10271 		(*str >= ' ' && *str <= '~')) {
10272 		--str;
10273 	}
10274 	/* reverse the final decrement, so that str is pointing
10275 	* to the first ascii character in the buffer
10276 	*/
10277 	++str;
10278 
10279 	if (strlen(str) > (FW_VER_STR_LEN - 1)) {
10280 		ret = BCME_BADLEN;
10281 		goto exit;
10282 	}
10283 
10284 	DHD_ERROR(("FW version in file '%s': %s\n", fwpath, str));
10285 	/* copy to global variable, so that in case FW load fails, the
10286 	* core capture logs will contain FW version read from the file
10287 	*/
10288 	memset(fw_version, 0, FW_VER_STR_LEN);
10289 	strlcpy(fw_version, str, FW_VER_STR_LEN);
10290 
10291 exit:
10292 	if (file)
10293 		dhd_os_close_image1(dhdp, file);
10294 
10295 	return ret;
10296 }
10297 #endif /* LINUX || DHD_EFI */
10298 
10299 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
10300 void
dhd_clear_awdl_stats(dhd_pub_t * dhd)10301 dhd_clear_awdl_stats(dhd_pub_t *dhd)
10302 {
10303 	unsigned long flags;
10304 	/*
10305 	 * Since event path(ex: WLC_E_AWDL_AW) and bus path(tx status process) update
10306 	 * the AWDL data acquire lock before clearing the AWDL stats.
10307 	 */
10308 	DHD_AWDL_STATS_LOCK(dhd->awdl_stats_lock, flags);
10309 	memset(dhd->awdl_stats, 0, sizeof(dhd->awdl_stats));
10310 	DHD_AWDL_STATS_UNLOCK(dhd->awdl_stats_lock, flags);
10311 }
10312 #endif /* DHD_AWDL && AWDL_SLOT_STATS */
10313 
10314 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
10315 
10316 static void
copy_hang_info_ioctl_timeout(dhd_pub_t * dhd,int ifidx,wl_ioctl_t * ioc)10317 copy_hang_info_ioctl_timeout(dhd_pub_t *dhd, int ifidx, wl_ioctl_t *ioc)
10318 {
10319 	int remain_len;
10320 	int i;
10321 	int *cnt;
10322 	char *dest;
10323 	int bytes_written;
10324 	uint32 ioc_dwlen = 0;
10325 
10326 	if (!dhd || !dhd->hang_info) {
10327 		DHD_ERROR(("%s dhd=%p hang_info=%p\n",
10328 			__FUNCTION__, dhd, (dhd ? dhd->hang_info : NULL)));
10329 		return;
10330 	}
10331 
10332 	cnt = &dhd->hang_info_cnt;
10333 	dest = dhd->hang_info;
10334 
10335 	memset(dest, 0, VENDOR_SEND_HANG_EXT_INFO_LEN);
10336 	(*cnt) = 0;
10337 
10338 	bytes_written = 0;
10339 	remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
10340 
10341 	get_debug_dump_time(dhd->debug_dump_time_hang_str);
10342 	copy_debug_dump_time(dhd->debug_dump_time_str, dhd->debug_dump_time_hang_str);
10343 
10344 	bytes_written += scnprintf(&dest[bytes_written], remain_len, "%d %d %s %d %d %d %d %d %d ",
10345 			HANG_REASON_IOCTL_RESP_TIMEOUT, VENDOR_SEND_HANG_EXT_INFO_VER,
10346 			dhd->debug_dump_time_hang_str,
10347 			ifidx, ioc->cmd, ioc->len, ioc->set, ioc->used, ioc->needed);
10348 	(*cnt) = HANG_FIELD_IOCTL_RESP_TIMEOUT_CNT;
10349 
10350 	clear_debug_dump_time(dhd->debug_dump_time_hang_str);
10351 
10352 	/* Access ioc->buf only if the ioc->len is more than 4 bytes */
10353 	ioc_dwlen = (uint32)(ioc->len / sizeof(uint32));
10354 	if (ioc_dwlen > 0) {
10355 		const uint32 *ioc_buf = (const uint32 *)ioc->buf;
10356 
10357 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
10358 		GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
10359 		bytes_written += scnprintf(&dest[bytes_written], remain_len,
10360 			"%08x", *(uint32 *)(ioc_buf++));
10361 		GCC_DIAGNOSTIC_POP();
10362 		(*cnt)++;
10363 		if ((*cnt) >= HANG_FIELD_CNT_MAX) {
10364 			return;
10365 		}
10366 
10367 		for (i = 1; i < ioc_dwlen && *cnt <= HANG_FIELD_CNT_MAX;
10368 			i++, (*cnt)++) {
10369 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
10370 			GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
10371 			bytes_written += scnprintf(&dest[bytes_written], remain_len, "%c%08x",
10372 				HANG_RAW_DEL, *(uint32 *)(ioc_buf++));
10373 			GCC_DIAGNOSTIC_POP();
10374 		}
10375 	}
10376 
10377 	DHD_INFO(("%s hang info len: %d data: %s\n",
10378 		__FUNCTION__, (int)strlen(dhd->hang_info), dhd->hang_info));
10379 }
10380 
10381 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
10382 
10383 #if defined(DHD_H2D_LOG_TIME_SYNC)
10384 /*
10385  * Helper function:
10386  * Used for Dongle console message time syncing with Host printk
10387  */
dhd_h2d_log_time_sync(dhd_pub_t * dhd)10388 void dhd_h2d_log_time_sync(dhd_pub_t *dhd)
10389 {
10390 	uint64 ts;
10391 
10392 	/*
10393 	 * local_clock() returns time in nano seconds.
10394 	 * Dongle understand only milli seconds time.
10395 	 */
10396 	ts = local_clock();
10397 	/* Nano seconds to milli seconds */
10398 	do_div(ts, 1000000);
10399 	if (dhd_wl_ioctl_set_intiovar(dhd,  "rte_timesync", ts, WLC_SET_VAR, TRUE, 0)) {
10400 		DHD_ERROR(("%s rte_timesync **** FAILED ****\n", __FUNCTION__));
10401 		/* Stopping HOST Dongle console time syncing */
10402 		dhd->dhd_rte_time_sync_ms = 0;
10403 	}
10404 }
10405 #endif /* DHD_H2D_LOG_TIME_SYNC */
10406 
10407 #if defined(LINUX) || defined(linux)
10408 /* configuations of ecounters to be enabled by default in FW */
10409 static ecounters_cfg_t ecounters_cfg_tbl[] = {
10410 	/* Global ecounters */
10411 	{ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_BUS_PCIE},
10412 	// {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_TX_AMPDU_STATS},
10413 	// {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_RX_AMPDU_STATS},
10414 
10415 	/* Slice specific ecounters */
10416 	{ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x0, WL_SLICESTATS_XTLV_PERIODIC_STATE},
10417 	{ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x1, WL_SLICESTATS_XTLV_PERIODIC_STATE},
10418 	{ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x1, WL_IFSTATS_XTLV_WL_SLICE_BTCOEX},
10419 
10420 	/* Interface specific ecounters */
10421 	{ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_IF_PERIODIC_STATE},
10422 	{ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_GENERIC},
10423 	{ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_INFRA_SPECIFIC},
10424 	{ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_MGT_CNT},
10425 
10426 	/* secondary interface */
10427 	/* XXX REMOVE for temporal, will be enabled after decision
10428 	{ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x1, WL_IFSTATS_XTLV_IF_PERIODIC_STATE},
10429 	{ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x1, WL_IFSTATS_XTLV_GENERIC},
10430 	{ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x1, WL_IFSTATS_XTLV_INFRA_SPECIFIC},
10431 	{ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x1, WL_IFSTATS_XTLV_MGT_CNT},
10432 	*/
10433 };
10434 
10435 /* XXX: Same event id shall be defined in consecutive order in the below table */
10436 static event_ecounters_cfg_t event_ecounters_cfg_tbl[] = {
10437 	/* Interface specific event ecounters */
10438 	{WLC_E_DEAUTH_IND, ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_IF_EVENT_STATS},
10439 };
10440 
10441 /* Accepts an argument to -s, -g or -f and creates an XTLV */
10442 int
dhd_create_ecounters_params(dhd_pub_t * dhd,uint16 type,uint16 if_slice_idx,uint16 stats_rep,uint8 ** xtlv)10443 dhd_create_ecounters_params(dhd_pub_t *dhd, uint16 type, uint16 if_slice_idx,
10444 	uint16 stats_rep, uint8 **xtlv)
10445 {
10446 	uint8 *req_xtlv = NULL;
10447 	ecounters_stats_types_report_req_t *req;
10448 	bcm_xtlvbuf_t xtlvbuf, container_xtlvbuf;
10449 	ecountersv2_xtlv_list_elt_t temp;
10450 	uint16 xtlv_len = 0, total_len = 0;
10451 	int rc = BCME_OK;
10452 
10453 	/* fill in the stat type XTLV. For now there is no explicit TLV for the stat type. */
10454 	temp.id = stats_rep;
10455 	temp.len = 0;
10456 
10457 	/* Hence len/data = 0/NULL */
10458 	xtlv_len += temp.len + BCM_XTLV_HDR_SIZE;
10459 
10460 	/* Total length of the container */
10461 	total_len = BCM_XTLV_HDR_SIZE +
10462 		OFFSETOF(ecounters_stats_types_report_req_t, stats_types_req) + xtlv_len;
10463 
10464 	/* Now allocate a structure for the entire request */
10465 	if ((req_xtlv = (uint8 *)MALLOCZ(dhd->osh, total_len)) == NULL) {
10466 		rc = BCME_NOMEM;
10467 		goto fail;
10468 	}
10469 
10470 	/* container XTLV context */
10471 	bcm_xtlv_buf_init(&container_xtlvbuf, (uint8 *)req_xtlv, total_len,
10472 		BCM_XTLV_OPTION_ALIGN32);
10473 
10474 	/* Fill other XTLVs in the container. Leave space for XTLV headers */
10475 	req = (ecounters_stats_types_report_req_t *)(req_xtlv + BCM_XTLV_HDR_SIZE);
10476 	req->flags = type;
10477 	if (type == ECOUNTERS_STATS_TYPES_FLAG_SLICE) {
10478 		req->slice_mask = 0x1 << if_slice_idx;
10479 	} else if (type == ECOUNTERS_STATS_TYPES_FLAG_IFACE) {
10480 		req->if_index = if_slice_idx;
10481 	}
10482 
10483 	/* Fill remaining XTLVs */
10484 	bcm_xtlv_buf_init(&xtlvbuf, (uint8*) req->stats_types_req, xtlv_len,
10485 		BCM_XTLV_OPTION_ALIGN32);
10486 	if (bcm_xtlv_put_data(&xtlvbuf, temp.id, NULL, temp.len)) {
10487 		DHD_ERROR(("Error creating XTLV for requested stats type = %d\n", temp.id));
10488 		rc = BCME_ERROR;
10489 		goto fail;
10490 	}
10491 
10492 	/* fill the top level container and get done with the XTLV container */
10493 	rc = bcm_xtlv_put_data(&container_xtlvbuf, WL_ECOUNTERS_XTLV_REPORT_REQ, NULL,
10494 		bcm_xtlv_buf_len(&xtlvbuf) + OFFSETOF(ecounters_stats_types_report_req_t,
10495 		stats_types_req));
10496 
10497 	if (rc) {
10498 		DHD_ERROR(("Error creating parent XTLV for type = %d\n", req->flags));
10499 		goto fail;
10500 	}
10501 
10502 fail:
10503 	if (rc && req_xtlv) {
10504 		MFREE(dhd->osh, req_xtlv, total_len);
10505 		req_xtlv = NULL;
10506 	}
10507 
10508 	/* update the xtlv pointer */
10509 	*xtlv = req_xtlv;
10510 	return rc;
10511 }
10512 
10513 static int
dhd_ecounter_autoconfig(dhd_pub_t * dhd)10514 dhd_ecounter_autoconfig(dhd_pub_t *dhd)
10515 {
10516 	int rc = BCME_OK;
10517 	uint32 buf;
10518 	rc = dhd_iovar(dhd, 0, "ecounters_autoconfig", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
10519 
10520 	if (rc != BCME_OK) {
10521 
10522 		if (rc != BCME_UNSUPPORTED) {
10523 			rc = BCME_OK;
10524 			DHD_ERROR(("%s Ecounter autoconfig in fw failed : %d\n", __FUNCTION__, rc));
10525 		} else {
10526 			DHD_ERROR(("%s Ecounter autoconfig in FW not supported\n", __FUNCTION__));
10527 		}
10528 	}
10529 
10530 	return rc;
10531 }
10532 
10533 int
dhd_ecounter_configure(dhd_pub_t * dhd,bool enable)10534 dhd_ecounter_configure(dhd_pub_t *dhd, bool enable)
10535 {
10536 	int rc = BCME_OK;
10537 	if (enable) {
10538 		if (dhd_ecounter_autoconfig(dhd) != BCME_OK) {
10539 			if ((rc = dhd_start_ecounters(dhd)) != BCME_OK) {
10540 				DHD_ERROR(("%s Ecounters start failed\n", __FUNCTION__));
10541 			} else if ((rc = dhd_start_event_ecounters(dhd)) != BCME_OK) {
10542 				DHD_ERROR(("%s Event_Ecounters start failed\n", __FUNCTION__));
10543 			}
10544 		}
10545 	} else {
10546 		if ((rc = dhd_stop_ecounters(dhd)) != BCME_OK) {
10547 			DHD_ERROR(("%s Ecounters stop failed\n", __FUNCTION__));
10548 		} else if ((rc = dhd_stop_event_ecounters(dhd)) != BCME_OK) {
10549 			DHD_ERROR(("%s Event_Ecounters stop failed\n", __FUNCTION__));
10550 		}
10551 	}
10552 	return rc;
10553 }
10554 
10555 int
dhd_start_ecounters(dhd_pub_t * dhd)10556 dhd_start_ecounters(dhd_pub_t *dhd)
10557 {
10558 	uint8 i = 0;
10559 	uint8 *start_ptr;
10560 	int rc = BCME_OK;
10561 	bcm_xtlv_t *elt;
10562 	ecounters_config_request_v2_t *req = NULL;
10563 	ecountersv2_processed_xtlv_list_elt *list_elt, *tail = NULL;
10564 	ecountersv2_processed_xtlv_list_elt *processed_containers_list = NULL;
10565 	uint16 total_processed_containers_len = 0;
10566 
10567 	for (i = 0; i < ARRAYSIZE(ecounters_cfg_tbl); i++) {
10568 		ecounters_cfg_t *ecounter_stat = &ecounters_cfg_tbl[i];
10569 
10570 		if ((list_elt = (ecountersv2_processed_xtlv_list_elt *)
10571 			MALLOCZ(dhd->osh, sizeof(*list_elt))) == NULL) {
10572 			DHD_ERROR(("Ecounters v2: No memory to process\n"));
10573 			goto fail;
10574 		}
10575 
10576 		rc = dhd_create_ecounters_params(dhd, ecounter_stat->type,
10577 			ecounter_stat->if_slice_idx, ecounter_stat->stats_rep, &list_elt->data);
10578 
10579 		if (rc) {
10580 			DHD_ERROR(("Ecounters v2: Could not process: stat: %d return code: %d\n",
10581 				ecounter_stat->stats_rep, rc));
10582 
10583 			/* Free allocated memory and go to fail to release any memories allocated
10584 			 * in previous iterations. Note that list_elt->data gets populated in
10585 			 * dhd_create_ecounters_params() and gets freed there itself.
10586 			 */
10587 			MFREE(dhd->osh, list_elt, sizeof(*list_elt));
10588 			list_elt = NULL;
10589 			goto fail;
10590 		}
10591 		elt = (bcm_xtlv_t *) list_elt->data;
10592 
10593 		/* Put the elements in the order they are processed */
10594 		if (processed_containers_list == NULL) {
10595 			processed_containers_list = list_elt;
10596 		} else {
10597 			tail->next = list_elt;
10598 		}
10599 		tail = list_elt;
10600 		/* Size of the XTLV returned */
10601 		total_processed_containers_len += BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE;
10602 	}
10603 
10604 	/* Now create ecounters config request with totallength */
10605 	req = (ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req) +
10606 		total_processed_containers_len);
10607 
10608 	if (req == NULL) {
10609 		rc = BCME_NOMEM;
10610 		goto fail;
10611 	}
10612 
10613 	req->version = ECOUNTERS_VERSION_2;
10614 	req->logset = EVENT_LOG_SET_ECOUNTERS;
10615 	req->reporting_period = ECOUNTERS_DEFAULT_PERIOD;
10616 	req->num_reports = ECOUNTERS_NUM_REPORTS;
10617 	req->len = total_processed_containers_len +
10618 		OFFSETOF(ecounters_config_request_v2_t, ecounters_xtlvs);
10619 
10620 	/* Copy config */
10621 	start_ptr = req->ecounters_xtlvs;
10622 
10623 	/* Now go element by element in the list */
10624 	while (processed_containers_list) {
10625 		list_elt = processed_containers_list;
10626 
10627 		elt = (bcm_xtlv_t *)list_elt->data;
10628 
10629 		memcpy(start_ptr, list_elt->data, BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
10630 		start_ptr += (size_t)(BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
10631 		processed_containers_list = processed_containers_list->next;
10632 
10633 		/* Free allocated memories */
10634 		MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE);
10635 		MFREE(dhd->osh, list_elt, sizeof(*list_elt));
10636 	}
10637 
10638 	if ((rc = dhd_iovar(dhd, 0, "ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) {
10639 		DHD_ERROR(("failed to start ecounters\n"));
10640 	}
10641 
10642 fail:
10643 	if (req) {
10644 		MFREE(dhd->osh, req, sizeof(*req) + total_processed_containers_len);
10645 	}
10646 
10647 	/* Now go element by element in the list */
10648 	while (processed_containers_list) {
10649 		list_elt = processed_containers_list;
10650 		elt = (bcm_xtlv_t *)list_elt->data;
10651 		processed_containers_list = processed_containers_list->next;
10652 
10653 		/* Free allocated memories */
10654 		MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE);
10655 		MFREE(dhd->osh, list_elt, sizeof(*list_elt));
10656 	}
10657 	return rc;
10658 }
10659 
10660 int
dhd_stop_ecounters(dhd_pub_t * dhd)10661 dhd_stop_ecounters(dhd_pub_t *dhd)
10662 {
10663 	int rc = BCME_OK;
10664 	ecounters_config_request_v2_t *req;
10665 
10666 	/* Now create ecounters config request with totallength */
10667 	req = (ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req));
10668 
10669 	if (req == NULL) {
10670 		rc = BCME_NOMEM;
10671 		goto fail;
10672 	}
10673 
10674 	req->version = ECOUNTERS_VERSION_2;
10675 	req->len = OFFSETOF(ecounters_config_request_v2_t, ecounters_xtlvs);
10676 
10677 	if ((rc = dhd_iovar(dhd, 0, "ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) {
10678 		DHD_ERROR(("failed to stop ecounters\n"));
10679 	}
10680 
10681 fail:
10682 	if (req) {
10683 		MFREE(dhd->osh, req, sizeof(*req));
10684 	}
10685 	return rc;
10686 }
10687 
10688 /* configured event_id_array for event ecounters */
10689 typedef struct event_id_array {
10690 	uint8	event_id;
10691 	uint8	str_idx;
10692 } event_id_array_t;
10693 
10694 /* get event id array only from event_ecounters_cfg_tbl[] */
__dhd_event_ecounters_get_event_id_array(event_id_array_t * event_array)10695 static inline int __dhd_event_ecounters_get_event_id_array(event_id_array_t *event_array)
10696 {
10697 	uint8 i;
10698 	uint8 idx = 0;
10699 	int32 prev_evt_id = -1;
10700 
10701 	for (i = 0; i < (uint8)ARRAYSIZE(event_ecounters_cfg_tbl); i++) {
10702 		if (prev_evt_id != event_ecounters_cfg_tbl[i].event_id) {
10703 			if (prev_evt_id >= 0)
10704 				idx++;
10705 			event_array[idx].event_id = event_ecounters_cfg_tbl[i].event_id;
10706 			event_array[idx].str_idx = i;
10707 		}
10708 		prev_evt_id = event_ecounters_cfg_tbl[i].event_id;
10709 	}
10710 	return idx;
10711 }
10712 
10713 /* One event id has limit xtlv num to request based on wl_ifstats_xtlv_id * 2 interface */
10714 #define ECNTRS_MAX_XTLV_NUM (31 * 2)
10715 
10716 int
dhd_start_event_ecounters(dhd_pub_t * dhd)10717 dhd_start_event_ecounters(dhd_pub_t *dhd)
10718 {
10719 	uint8 i, j = 0;
10720 	uint8 event_id_cnt = 0;
10721 	uint16 processed_containers_len = 0;
10722 	uint16 max_xtlv_len = 0;
10723 	int rc = BCME_OK;
10724 	uint8 *ptr;
10725 	uint8 *data;
10726 	event_id_array_t *id_array;
10727 	bcm_xtlv_t *elt = NULL;
10728 	event_ecounters_config_request_v2_t *req = NULL;
10729 
10730 	/* XXX: the size of id_array is limited by the size of event_ecounters_cfg_tbl */
10731 	id_array = (event_id_array_t *)MALLOCZ(dhd->osh, sizeof(event_id_array_t) *
10732 		ARRAYSIZE(event_ecounters_cfg_tbl));
10733 
10734 	if (id_array == NULL) {
10735 		rc = BCME_NOMEM;
10736 		goto fail;
10737 	}
10738 	event_id_cnt = __dhd_event_ecounters_get_event_id_array(id_array);
10739 
10740 	max_xtlv_len = ((BCM_XTLV_HDR_SIZE +
10741 		OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs)) *
10742 		ECNTRS_MAX_XTLV_NUM);
10743 
10744 	/* Now create ecounters config request with max allowed length */
10745 	req = (event_ecounters_config_request_v2_t *)MALLOCZ(dhd->osh,
10746 		sizeof(event_ecounters_config_request_v2_t *) + max_xtlv_len);
10747 
10748 	if (req == NULL) {
10749 		rc = BCME_NOMEM;
10750 		goto fail;
10751 	}
10752 
10753 	for (i = 0; i <= event_id_cnt; i++) {
10754 		/* req initialization by event id */
10755 		req->version = ECOUNTERS_VERSION_2;
10756 		req->logset = EVENT_LOG_SET_ECOUNTERS;
10757 		req->event_id = id_array[i].event_id;
10758 		req->flags = EVENT_ECOUNTERS_FLAGS_ADD;
10759 		req->len = 0;
10760 		processed_containers_len = 0;
10761 
10762 		/* Copy config */
10763 		ptr = req->ecounters_xtlvs;
10764 
10765 		for (j = id_array[i].str_idx; j < (uint8)ARRAYSIZE(event_ecounters_cfg_tbl); j++) {
10766 			event_ecounters_cfg_t *event_ecounter_stat = &event_ecounters_cfg_tbl[j];
10767 			if (id_array[i].event_id != event_ecounter_stat->event_id)
10768 				break;
10769 
10770 			rc = dhd_create_ecounters_params(dhd, event_ecounter_stat->type,
10771 				event_ecounter_stat->if_slice_idx, event_ecounter_stat->stats_rep,
10772 				&data);
10773 
10774 			if (rc) {
10775 				DHD_ERROR(("%s: Could not process: stat: %d return code: %d\n",
10776 					__FUNCTION__, event_ecounter_stat->stats_rep, rc));
10777 				goto fail;
10778 			}
10779 
10780 			elt = (bcm_xtlv_t *)data;
10781 
10782 			memcpy(ptr, elt, BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
10783 			ptr += (size_t)(BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
10784 			processed_containers_len += BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE;
10785 
10786 			/* Free allocated memories alloced by dhd_create_ecounters_params */
10787 			MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE);
10788 
10789 			if (processed_containers_len > max_xtlv_len) {
10790 				DHD_ERROR(("%s XTLV NUM IS OVERFLOWED THAN ALLOWED!!\n",
10791 					__FUNCTION__));
10792 				rc = BCME_BADLEN;
10793 				goto fail;
10794 			}
10795 		}
10796 
10797 		req->len = processed_containers_len +
10798 			OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs);
10799 
10800 		DHD_INFO(("%s req version %d logset %d event_id %d flags %d len %d\n",
10801 			__FUNCTION__, req->version, req->logset, req->event_id,
10802 			req->flags, req->len));
10803 
10804 		rc = dhd_iovar(dhd, 0, "event_ecounters", (char *)req, req->len, NULL, 0, TRUE);
10805 
10806 		if (rc < 0) {
10807 			DHD_ERROR(("failed to start event_ecounters(event id %d) with rc %d\n",
10808 				req->event_id, rc));
10809 			goto fail;
10810 		}
10811 	}
10812 
10813 fail:
10814 	/* Free allocated memories */
10815 	if (req) {
10816 		MFREE(dhd->osh, req, sizeof(event_ecounters_config_request_v2_t *) + max_xtlv_len);
10817 	}
10818 	if (id_array) {
10819 		MFREE(dhd->osh, id_array, sizeof(event_id_array_t) *
10820 			ARRAYSIZE(event_ecounters_cfg_tbl));
10821 	}
10822 
10823 	return rc;
10824 }
10825 
10826 int
dhd_stop_event_ecounters(dhd_pub_t * dhd)10827 dhd_stop_event_ecounters(dhd_pub_t *dhd)
10828 {
10829 	int rc = BCME_OK;
10830 	event_ecounters_config_request_v2_t *req;
10831 
10832 	/* Now create ecounters config request with totallength */
10833 	req = (event_ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req));
10834 
10835 	if (req == NULL) {
10836 		rc = BCME_NOMEM;
10837 		goto fail;
10838 	}
10839 
10840 	req->version = ECOUNTERS_VERSION_2;
10841 	req->flags = EVENT_ECOUNTERS_FLAGS_DEL_ALL;
10842 	req->len = OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs);
10843 
10844 	if ((rc = dhd_iovar(dhd, 0, "event_ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) {
10845 		DHD_ERROR(("failed to stop event_ecounters\n"));
10846 	}
10847 
10848 fail:
10849 	if (req) {
10850 		MFREE(dhd->osh, req, sizeof(*req));
10851 	}
10852 	return rc;
10853 }
10854 #ifdef DHD_LOG_DUMP
10855 int
dhd_dump_debug_ring(dhd_pub_t * dhdp,void * ring_ptr,const void * user_buf,log_dump_section_hdr_t * sec_hdr,char * text_hdr,int buflen,uint32 sec_type)10856 dhd_dump_debug_ring(dhd_pub_t *dhdp, void *ring_ptr, const void *user_buf,
10857 		log_dump_section_hdr_t *sec_hdr,
10858 		char *text_hdr, int buflen, uint32 sec_type)
10859 {
10860 	uint32 rlen = 0;
10861 	uint32 data_len = 0;
10862 	void *data = NULL;
10863 	unsigned long flags = 0;
10864 	int ret = 0;
10865 	dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)ring_ptr;
10866 	int pos = 0;
10867 	int fpos_sechdr = 0;
10868 
10869 	if (!dhdp || !ring || !user_buf || !sec_hdr || !text_hdr) {
10870 		return BCME_BADARG;
10871 	}
10872 	/* do not allow further writes to the ring
10873 	 * till we flush it
10874 	 */
10875 	DHD_DBG_RING_LOCK(ring->lock, flags);
10876 	ring->state = RING_SUSPEND;
10877 	DHD_DBG_RING_UNLOCK(ring->lock, flags);
10878 
10879 	if (dhdp->concise_dbg_buf) {
10880 		/* re-use concise debug buffer temporarily
10881 		 * to pull ring data, to write
10882 		 * record by record to file
10883 		 */
10884 		data_len = CONCISE_DUMP_BUFLEN;
10885 		data = dhdp->concise_dbg_buf;
10886 		ret = dhd_export_debug_data(text_hdr, NULL, user_buf, strlen(text_hdr), &pos);
10887 		/* write the section header now with zero length,
10888 		 * once the correct length is found out, update
10889 		 * it later
10890 		 */
10891 		fpos_sechdr = pos;
10892 		sec_hdr->type = sec_type;
10893 		sec_hdr->length = 0;
10894 		ret = dhd_export_debug_data((char *)sec_hdr, NULL, user_buf,
10895 			sizeof(*sec_hdr), &pos);
10896 		do {
10897 			rlen = dhd_dbg_ring_pull_single(ring, data, data_len, TRUE);
10898 			if (rlen > 0) {
10899 				/* write the log */
10900 				ret = dhd_export_debug_data(data, NULL, user_buf, rlen, &pos);
10901 			}
10902 			DHD_DBGIF(("%s: rlen : %d\n", __FUNCTION__, rlen));
10903 		} while ((rlen > 0));
10904 		/* now update the section header length in the file */
10905 		/* Complete ring size is dumped by HAL, hence updating length to ring size */
10906 		sec_hdr->length = ring->ring_size;
10907 		ret = dhd_export_debug_data((char *)sec_hdr, NULL, user_buf,
10908 			sizeof(*sec_hdr), &fpos_sechdr);
10909 	} else {
10910 		DHD_ERROR(("%s: No concise buffer available !\n", __FUNCTION__));
10911 	}
10912 	DHD_DBG_RING_LOCK(ring->lock, flags);
10913 	ring->state = RING_ACTIVE;
10914 	/* Resetting both read and write pointer,
10915 	 * since all items are read.
10916 	 */
10917 	ring->rp = ring->wp = 0;
10918 	DHD_DBG_RING_UNLOCK(ring->lock, flags);
10919 
10920 	return ret;
10921 }
10922 
10923 int
dhd_log_dump_ring_to_file(dhd_pub_t * dhdp,void * ring_ptr,void * file,unsigned long * file_posn,log_dump_section_hdr_t * sec_hdr,char * text_hdr,uint32 sec_type)10924 dhd_log_dump_ring_to_file(dhd_pub_t *dhdp, void *ring_ptr, void *file,
10925 		unsigned long *file_posn, log_dump_section_hdr_t *sec_hdr,
10926 		char *text_hdr, uint32 sec_type)
10927 {
10928 	uint32 rlen = 0;
10929 	uint32 data_len = 0, total_len = 0;
10930 	void *data = NULL;
10931 	unsigned long fpos_sechdr = 0;
10932 	unsigned long flags = 0;
10933 	int ret = 0;
10934 	dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)ring_ptr;
10935 
10936 	if (!dhdp || !ring || !file || !sec_hdr ||
10937 		!file_posn || !text_hdr)
10938 		return BCME_BADARG;
10939 
10940 	/* do not allow further writes to the ring
10941 	 * till we flush it
10942 	 */
10943 	DHD_DBG_RING_LOCK(ring->lock, flags);
10944 	ring->state = RING_SUSPEND;
10945 	DHD_DBG_RING_UNLOCK(ring->lock, flags);
10946 
10947 	if (dhdp->concise_dbg_buf) {
10948 		/* re-use concise debug buffer temporarily
10949 		 * to pull ring data, to write
10950 		 * record by record to file
10951 		 */
10952 		data_len = CONCISE_DUMP_BUFLEN;
10953 		data = dhdp->concise_dbg_buf;
10954 		dhd_os_write_file_posn(file, file_posn, text_hdr,
10955 				strlen(text_hdr));
10956 		/* write the section header now with zero length,
10957 		 * once the correct length is found out, update
10958 		 * it later
10959 		 */
10960 		dhd_init_sec_hdr(sec_hdr);
10961 		fpos_sechdr = *file_posn;
10962 		sec_hdr->type = sec_type;
10963 		sec_hdr->length = 0;
10964 		dhd_os_write_file_posn(file, file_posn, (char *)sec_hdr,
10965 				sizeof(*sec_hdr));
10966 		do {
10967 			rlen = dhd_dbg_ring_pull_single(ring, data, data_len, TRUE);
10968 			if (rlen > 0) {
10969 				/* write the log */
10970 				ret = dhd_os_write_file_posn(file, file_posn, data, rlen);
10971 				if (ret < 0) {
10972 					DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
10973 					DHD_DBG_RING_LOCK(ring->lock, flags);
10974 					ring->state = RING_ACTIVE;
10975 					DHD_DBG_RING_UNLOCK(ring->lock, flags);
10976 					return BCME_ERROR;
10977 				}
10978 			}
10979 			total_len += rlen;
10980 		} while (rlen > 0);
10981 		/* now update the section header length in the file */
10982 		sec_hdr->length = total_len;
10983 		dhd_os_write_file_posn(file, &fpos_sechdr, (char *)sec_hdr, sizeof(*sec_hdr));
10984 	} else {
10985 		DHD_ERROR(("%s: No concise buffer available !\n", __FUNCTION__));
10986 	}
10987 
10988 	DHD_DBG_RING_LOCK(ring->lock, flags);
10989 	ring->state = RING_ACTIVE;
10990 	/* Resetting both read and write pointer,
10991 	 * since all items are read.
10992 	 */
10993 	ring->rp = ring->wp = 0;
10994 	DHD_DBG_RING_UNLOCK(ring->lock, flags);
10995 	return BCME_OK;
10996 }
10997 
10998 /* logdump cookie */
10999 #define MAX_LOGUDMP_COOKIE_CNT	10u
11000 #define LOGDUMP_COOKIE_STR_LEN	50u
11001 int
dhd_logdump_cookie_init(dhd_pub_t * dhdp,uint8 * buf,uint32 buf_size)11002 dhd_logdump_cookie_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size)
11003 {
11004 	uint32 ring_size;
11005 
11006 	if (!dhdp || !buf) {
11007 		DHD_ERROR(("INVALID PTR: dhdp:%p buf:%p\n", dhdp, buf));
11008 		return BCME_ERROR;
11009 	}
11010 
11011 	ring_size = dhd_ring_get_hdr_size() + LOGDUMP_COOKIE_STR_LEN * MAX_LOGUDMP_COOKIE_CNT;
11012 	if (buf_size < ring_size) {
11013 		DHD_ERROR(("BUF SIZE IS TO SHORT: req:%d buf_size:%d\n",
11014 			ring_size, buf_size));
11015 		return BCME_ERROR;
11016 	}
11017 
11018 	dhdp->logdump_cookie = dhd_ring_init(dhdp, buf, buf_size,
11019 		LOGDUMP_COOKIE_STR_LEN, MAX_LOGUDMP_COOKIE_CNT,
11020 		DHD_RING_TYPE_FIXED);
11021 	if (!dhdp->logdump_cookie) {
11022 		DHD_ERROR(("FAIL TO INIT COOKIE RING\n"));
11023 		return BCME_ERROR;
11024 	}
11025 
11026 	return BCME_OK;
11027 }
11028 
11029 void
dhd_logdump_cookie_deinit(dhd_pub_t * dhdp)11030 dhd_logdump_cookie_deinit(dhd_pub_t *dhdp)
11031 {
11032 	if (!dhdp) {
11033 		return;
11034 	}
11035 	if (dhdp->logdump_cookie) {
11036 		dhd_ring_deinit(dhdp, dhdp->logdump_cookie);
11037 	}
11038 
11039 	return;
11040 }
11041 
11042 #ifdef DHD_TX_PROFILE
11043 int
dhd_tx_profile_detach(dhd_pub_t * dhdp)11044 dhd_tx_profile_detach(dhd_pub_t *dhdp)
11045 {
11046 	int result = BCME_ERROR;
11047 
11048 	if (dhdp != NULL && dhdp->protocol_filters != NULL) {
11049 		MFREE(dhdp->osh, dhdp->protocol_filters, DHD_MAX_PROFILES *
11050 				sizeof(*(dhdp->protocol_filters)));
11051 		dhdp->protocol_filters = NULL;
11052 
11053 		result = BCME_OK;
11054 	}
11055 
11056 	return result;
11057 }
11058 
11059 int
dhd_tx_profile_attach(dhd_pub_t * dhdp)11060 dhd_tx_profile_attach(dhd_pub_t *dhdp)
11061 {
11062 	int result = BCME_ERROR;
11063 
11064 	if (dhdp != NULL) {
11065 		dhdp->protocol_filters = (dhd_tx_profile_protocol_t*)MALLOCZ(dhdp->osh,
11066 				DHD_MAX_PROFILES * sizeof(*(dhdp->protocol_filters)));
11067 
11068 		if (dhdp->protocol_filters != NULL) {
11069 			result = BCME_OK;
11070 		}
11071 	}
11072 
11073 	if (result != BCME_OK) {
11074 		DHD_ERROR(("%s:\tMALLOC of tx profile protocol filters failed\n",
11075 			__FUNCTION__));
11076 	}
11077 
11078 	return result;
11079 }
11080 #endif /* defined(DHD_TX_PROFILE) */
11081 
11082 void
dhd_logdump_cookie_save(dhd_pub_t * dhdp,char * cookie,char * type)11083 dhd_logdump_cookie_save(dhd_pub_t *dhdp, char *cookie, char *type)
11084 {
11085 	char *ptr;
11086 
11087 	if (!dhdp || !cookie || !type || !dhdp->logdump_cookie) {
11088 		DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p cookie=%p"
11089 			" type = %p, cookie_cfg:%p\n", __FUNCTION__,
11090 			dhdp, cookie, type, dhdp?dhdp->logdump_cookie: NULL));
11091 		return;
11092 	}
11093 	ptr = (char *)dhd_ring_get_empty(dhdp->logdump_cookie);
11094 	if (ptr == NULL) {
11095 		DHD_ERROR(("%s : Skip to save due to locking\n", __FUNCTION__));
11096 		return;
11097 	}
11098 	scnprintf(ptr, LOGDUMP_COOKIE_STR_LEN, "%s: %s\n", type, cookie);
11099 	return;
11100 }
11101 
11102 int
dhd_logdump_cookie_get(dhd_pub_t * dhdp,char * ret_cookie,uint32 buf_size)11103 dhd_logdump_cookie_get(dhd_pub_t *dhdp, char *ret_cookie, uint32 buf_size)
11104 {
11105 	char *ptr;
11106 
11107 	if (!dhdp || !ret_cookie || !dhdp->logdump_cookie) {
11108 		DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p"
11109 			"cookie=%p cookie_cfg:%p\n", __FUNCTION__,
11110 			dhdp, ret_cookie, dhdp?dhdp->logdump_cookie: NULL));
11111 		return BCME_ERROR;
11112 	}
11113 	ptr = (char *)dhd_ring_get_first(dhdp->logdump_cookie);
11114 	if (ptr == NULL) {
11115 		DHD_ERROR(("%s : Skip to save due to locking\n", __FUNCTION__));
11116 		return BCME_ERROR;
11117 	}
11118 	memcpy(ret_cookie, ptr, MIN(buf_size, strlen(ptr)));
11119 	dhd_ring_free_first(dhdp->logdump_cookie);
11120 	return BCME_OK;
11121 }
11122 
11123 int
dhd_logdump_cookie_count(dhd_pub_t * dhdp)11124 dhd_logdump_cookie_count(dhd_pub_t *dhdp)
11125 {
11126 	if (!dhdp || !dhdp->logdump_cookie) {
11127 		DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p cookie=%p\n",
11128 			__FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie: NULL));
11129 		return 0;
11130 	}
11131 	return dhd_ring_get_cur_size(dhdp->logdump_cookie);
11132 }
11133 
11134 static inline int
__dhd_log_dump_cookie_to_file(dhd_pub_t * dhdp,void * fp,const void * user_buf,unsigned long * f_pos,char * buf,uint32 buf_size)11135 __dhd_log_dump_cookie_to_file(
11136 	dhd_pub_t *dhdp, void *fp, const void *user_buf, unsigned long *f_pos,
11137 	char *buf, uint32 buf_size)
11138 {
11139 
11140 	uint32 remain = buf_size;
11141 	int ret = BCME_ERROR;
11142 	char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
11143 	log_dump_section_hdr_t sec_hdr;
11144 	uint32 read_idx;
11145 	uint32 write_idx;
11146 
11147 	read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
11148 	write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
11149 	while (dhd_logdump_cookie_count(dhdp) > 0) {
11150 		memset(tmp_buf, 0, sizeof(tmp_buf));
11151 		ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
11152 		if (ret != BCME_OK) {
11153 			return ret;
11154 		}
11155 		remain -= scnprintf(&buf[buf_size - remain], remain, "%s", tmp_buf);
11156 	}
11157 	dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
11158 	dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
11159 
11160 	ret = dhd_export_debug_data(COOKIE_LOG_HDR, fp, user_buf, strlen(COOKIE_LOG_HDR), f_pos);
11161 	if (ret < 0) {
11162 		DHD_ERROR(("%s : Write file Error for cookie hdr\n", __FUNCTION__));
11163 		return ret;
11164 	}
11165 	sec_hdr.magic = LOG_DUMP_MAGIC;
11166 	sec_hdr.timestamp = local_clock();
11167 	sec_hdr.type = LOG_DUMP_SECTION_COOKIE;
11168 	sec_hdr.length = buf_size - remain;
11169 
11170 	ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), f_pos);
11171 	if (ret < 0) {
11172 		DHD_ERROR(("%s : Write file Error for section hdr\n", __FUNCTION__));
11173 		return ret;
11174 	}
11175 
11176 	ret = dhd_export_debug_data(buf, fp, user_buf, sec_hdr.length, f_pos);
11177 	if (ret < 0) {
11178 		DHD_ERROR(("%s : Write file Error for cookie data\n", __FUNCTION__));
11179 	}
11180 
11181 	return ret;
11182 }
11183 
11184 uint32
dhd_log_dump_cookie_len(dhd_pub_t * dhdp)11185 dhd_log_dump_cookie_len(dhd_pub_t *dhdp)
11186 {
11187 	int len = 0;
11188 	char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
11189 	log_dump_section_hdr_t sec_hdr;
11190 	char *buf = NULL;
11191 	int ret = BCME_ERROR;
11192 	uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
11193 	uint32 read_idx;
11194 	uint32 write_idx;
11195 	uint32 remain;
11196 
11197 	remain = buf_size;
11198 
11199 	if (!dhdp || !dhdp->logdump_cookie) {
11200 		DHD_ERROR(("%s At least one ptr is NULL "
11201 			"dhdp = %p cookie %p\n",
11202 			__FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL));
11203 		goto exit;
11204 	}
11205 
11206 	buf = (char *)MALLOCZ(dhdp->osh, buf_size);
11207 	if (!buf) {
11208 		DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
11209 		goto exit;
11210 	}
11211 
11212 	read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
11213 	write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
11214 	while (dhd_logdump_cookie_count(dhdp) > 0) {
11215 		memset(tmp_buf, 0, sizeof(tmp_buf));
11216 		ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
11217 		if (ret != BCME_OK) {
11218 			goto exit;
11219 		}
11220 		remain -= (uint32)strlen(tmp_buf);
11221 	}
11222 	dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
11223 	dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
11224 	len += strlen(COOKIE_LOG_HDR);
11225 	len += sizeof(sec_hdr);
11226 	len += (buf_size - remain);
11227 exit:
11228 	if (buf)
11229 		MFREE(dhdp->osh, buf, buf_size);
11230 	return len;
11231 }
11232 
11233 int
dhd_log_dump_cookie(dhd_pub_t * dhdp,const void * user_buf)11234 dhd_log_dump_cookie(dhd_pub_t *dhdp, const void *user_buf)
11235 {
11236 	int ret = BCME_ERROR;
11237 	char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
11238 	log_dump_section_hdr_t sec_hdr;
11239 	char *buf = NULL;
11240 	uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
11241 	int pos = 0;
11242 	uint32 read_idx;
11243 	uint32 write_idx;
11244 	uint32 remain;
11245 
11246 	remain = buf_size;
11247 
11248 	if (!dhdp || !dhdp->logdump_cookie) {
11249 		DHD_ERROR(("%s At least one ptr is NULL "
11250 			"dhdp = %p cookie %p\n",
11251 			__FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL));
11252 		goto exit;
11253 	}
11254 
11255 	buf = (char *)MALLOCZ(dhdp->osh, buf_size);
11256 	if (!buf) {
11257 		DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
11258 		goto exit;
11259 	}
11260 
11261 	read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
11262 	write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
11263 	while (dhd_logdump_cookie_count(dhdp) > 0) {
11264 		memset(tmp_buf, 0, sizeof(tmp_buf));
11265 		ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
11266 		if (ret != BCME_OK) {
11267 			goto exit;
11268 		}
11269 		remain -= scnprintf(&buf[buf_size - remain], remain, "%s", tmp_buf);
11270 	}
11271 	dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
11272 	dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
11273 	ret = dhd_export_debug_data(COOKIE_LOG_HDR, NULL, user_buf, strlen(COOKIE_LOG_HDR), &pos);
11274 	sec_hdr.magic = LOG_DUMP_MAGIC;
11275 	sec_hdr.timestamp = local_clock();
11276 	sec_hdr.type = LOG_DUMP_SECTION_COOKIE;
11277 	sec_hdr.length = buf_size - remain;
11278 	ret = dhd_export_debug_data((char *)&sec_hdr, NULL, user_buf, sizeof(sec_hdr), &pos);
11279 	ret = dhd_export_debug_data(buf, NULL, user_buf, sec_hdr.length, &pos);
11280 exit:
11281 	if (buf)
11282 		MFREE(dhdp->osh, buf, buf_size);
11283 	return ret;
11284 }
11285 
11286 int
dhd_log_dump_cookie_to_file(dhd_pub_t * dhdp,void * fp,const void * user_buf,unsigned long * f_pos)11287 dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp, const void *user_buf, unsigned long *f_pos)
11288 {
11289 	char *buf;
11290 	int ret = BCME_ERROR;
11291 	uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
11292 
11293 	if (!dhdp || !dhdp->logdump_cookie || (!fp && !user_buf) || !f_pos) {
11294 		DHD_ERROR(("%s At least one ptr is NULL "
11295 			"dhdp = %p cookie %p fp = %p f_pos = %p\n",
11296 			__FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL, fp, f_pos));
11297 		return ret;
11298 	}
11299 
11300 	buf = (char *)MALLOCZ(dhdp->osh, buf_size);
11301 	if (!buf) {
11302 		DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
11303 		return ret;
11304 	}
11305 	ret = __dhd_log_dump_cookie_to_file(dhdp, fp, user_buf, f_pos, buf, buf_size);
11306 	MFREE(dhdp->osh, buf, buf_size);
11307 
11308 	return ret;
11309 }
11310 #endif /* DHD_LOG_DUMP */
11311 #endif /* LINUX || linux */
11312 
11313 #if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
11314 int
dhd_control_he_enab(dhd_pub_t * dhd,uint8 he_enab)11315 dhd_control_he_enab(dhd_pub_t * dhd, uint8 he_enab)
11316 {
11317 	int ret = BCME_OK;
11318 	bcm_xtlv_t *pxtlv = NULL;
11319 	uint8 mybuf[DHD_IOVAR_BUF_SIZE];
11320 	uint16 mybuf_len = sizeof(mybuf);
11321 	pxtlv = (bcm_xtlv_t *)mybuf;
11322 
11323 	ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, &mybuf_len, WL_HE_CMD_ENAB, sizeof(he_enab),
11324 			&he_enab, BCM_XTLV_OPTION_ALIGN32);
11325 
11326 	if (ret != BCME_OK) {
11327 		ret = -EINVAL;
11328 		DHD_ERROR(("%s failed to pack he enab, err: %s\n", __FUNCTION__, bcmerrorstr(ret)));
11329 		return ret;
11330 	}
11331 
11332 	ret = dhd_iovar(dhd, 0, "he", (char *)&mybuf, sizeof(mybuf), NULL, 0, TRUE);
11333 	if (ret < 0) {
11334 		DHD_ERROR(("%s he_enab (%d) set failed, err: %s\n",
11335 				__FUNCTION__, he_enab, bcmerrorstr(ret)));
11336 	} else {
11337 		DHD_ERROR(("%s he_enab (%d) set successed\n", __FUNCTION__, he_enab));
11338 	}
11339 
11340 	return ret;
11341 }
11342 #endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */
11343 
11344 #ifdef CONFIG_ROAM_RSSI_LIMIT
11345 int
dhd_roam_rssi_limit_get(dhd_pub_t * dhd,int * lmt2g,int * lmt5g)11346 dhd_roam_rssi_limit_get(dhd_pub_t *dhd, int *lmt2g, int *lmt5g)
11347 {
11348 	wlc_roam_rssi_limit_t *plmt;
11349 	wlc_roam_rssi_lmt_info_v1_t *pinfo;
11350 	int ret = BCME_OK;
11351 	int plmt_len = sizeof(*pinfo) + ROAMRSSI_HDRLEN;
11352 
11353 	plmt = (wlc_roam_rssi_limit_t *)MALLOCZ(dhd->osh, plmt_len);
11354 	if (!plmt) {
11355 		DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
11356 		return BCME_NOMEM;
11357 	}
11358 
11359 	/* Get roam rssi limit */
11360 	ret = dhd_iovar(dhd, 0, "roam_rssi_limit", NULL, 0, (char *)plmt, plmt_len, FALSE);
11361 	if (ret < 0) {
11362 		DHD_ERROR(("%s Failed to Get roam_rssi_limit %d\n", __FUNCTION__, ret));
11363 		goto done;
11364 	}
11365 
11366 	if (plmt->ver != WLC_ROAM_RSSI_LMT_VER_1) {
11367 	    ret = BCME_VERSION;
11368 	    goto done;
11369 	}
11370 
11371 	pinfo = (wlc_roam_rssi_lmt_info_v1_t *)plmt->data;
11372 	*lmt2g = (int)pinfo->rssi_limit_2g;
11373 	*lmt5g = (int)pinfo->rssi_limit_5g;
11374 
11375 done:
11376 	if (plmt) {
11377 		MFREE(dhd->osh, plmt, plmt_len);
11378 	}
11379 	return ret;
11380 }
11381 
11382 int
dhd_roam_rssi_limit_set(dhd_pub_t * dhd,int lmt2g,int lmt5g)11383 dhd_roam_rssi_limit_set(dhd_pub_t *dhd, int lmt2g, int lmt5g)
11384 {
11385 	wlc_roam_rssi_limit_t *plmt;
11386 	wlc_roam_rssi_lmt_info_v1_t *pinfo;
11387 	int ret = BCME_OK;
11388 	int plmt_len = sizeof(*pinfo) + ROAMRSSI_HDRLEN;
11389 
11390 	/* Sanity check RSSI limit Value */
11391 	if ((lmt2g < ROAMRSSI_2G_MIN) || (lmt2g > ROAMRSSI_2G_MAX)) {
11392 		DHD_ERROR(("%s Not In Range 2G ROAM RSSI Limit\n", __FUNCTION__));
11393 		return BCME_RANGE;
11394 	}
11395 	if ((lmt2g < ROAMRSSI_5G_MIN) || (lmt2g > ROAMRSSI_5G_MAX)) {
11396 		DHD_ERROR(("%s Not In Range 5G ROAM RSSI Limit\n", __FUNCTION__));
11397 		return BCME_RANGE;
11398 	}
11399 
11400 	plmt = (wlc_roam_rssi_limit_t *)MALLOCZ(dhd->osh, plmt_len);
11401 	if (!plmt) {
11402 		DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
11403 		return BCME_NOMEM;
11404 	}
11405 	plmt->ver = WLC_ROAM_RSSI_LMT_VER_1;
11406 	plmt->len = sizeof(*pinfo);
11407 	pinfo = (wlc_roam_rssi_lmt_info_v1_t *)plmt->data;
11408 	pinfo->rssi_limit_2g = (int16)lmt2g;
11409 	pinfo->rssi_limit_5g = (int16)lmt5g;
11410 
11411 	/* Set roam rssi limit */
11412 	ret = dhd_iovar(dhd, 0, "roam_rssi_limit", (char *)plmt, plmt_len, NULL, 0, TRUE);
11413 	if (ret < 0) {
11414 		DHD_ERROR(("%s Failed to Get roam_rssi_limit %d\n", __FUNCTION__, ret));
11415 		goto done;
11416 	}
11417 done:
11418 	if (plmt) {
11419 		MFREE(dhd->osh, plmt, plmt_len);
11420 	}
11421 	return ret;
11422 }
11423 #endif /* CONFIG_ROAM_RSSI_LIMIT */
11424 
11425 #ifdef CONFIG_ROAM_MIN_DELTA
11426 int
dhd_roam_min_delta_get(dhd_pub_t * dhd,uint32 * dt2g,uint32 * dt5g)11427 dhd_roam_min_delta_get(dhd_pub_t *dhd, uint32 *dt2g, uint32 *dt5g)
11428 {
11429 	wlc_roam_min_delta_t *pmin_delta;
11430 	wlc_roam_min_delta_info_v1_t *pmin_delta_info;
11431 	int ret = BCME_OK;
11432 	int plen = sizeof(*pmin_delta_info) + ROAM_MIN_DELTA_HDRLEN;
11433 
11434 	pmin_delta = (wlc_roam_min_delta_t *)MALLOCZ(dhd->osh, plen);
11435 	if (!pmin_delta) {
11436 		DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
11437 		return BCME_NOMEM;
11438 	}
11439 
11440 	/* Get Minimum ROAM score delta */
11441 	ret = dhd_iovar(dhd, 0, "roam_min_delta", NULL, 0, (char *)pmin_delta, plen, FALSE);
11442 	if (ret < 0) {
11443 		DHD_ERROR(("%s Failed to Get roam_min_delta %d\n", __FUNCTION__, ret));
11444 		goto done;
11445 	}
11446 
11447 	if (pmin_delta->ver != WLC_ROAM_MIN_DELTA_VER_1) {
11448 		ret = BCME_VERSION;
11449 		goto done;
11450 	}
11451 
11452 	pmin_delta_info = (wlc_roam_min_delta_info_v1_t *)pmin_delta->data;
11453 	*dt2g = (uint32)pmin_delta_info->roam_min_delta_2g;
11454 	*dt5g = (uint32)pmin_delta_info->roam_min_delta_5g;
11455 
11456 done:
11457 	if (pmin_delta) {
11458 		MFREE(dhd->osh, pmin_delta, plen);
11459 	}
11460 	return ret;
11461 }
11462 
11463 int
dhd_roam_min_delta_set(dhd_pub_t * dhd,uint32 dt2g,uint32 dt5g)11464 dhd_roam_min_delta_set(dhd_pub_t *dhd, uint32 dt2g, uint32 dt5g)
11465 {
11466 	wlc_roam_min_delta_t *pmin_delta;
11467 	wlc_roam_min_delta_info_v1_t *pmin_delta_info;
11468 	int ret = BCME_OK;
11469 	int plen = sizeof(*pmin_delta_info) + ROAM_MIN_DELTA_HDRLEN;
11470 
11471 	/* Sanity check Minimum ROAM score delta */
11472 	if ((dt2g > ROAM_MIN_DELTA_MAX) || (dt5g > ROAM_MIN_DELTA_MAX)) {
11473 		DHD_ERROR(("%s Not In Range Minimum ROAM score delta, 2G: %d, 5G: %d\n",
11474 			__FUNCTION__, dt2g, dt5g));
11475 		return BCME_RANGE;
11476 	}
11477 
11478 	pmin_delta = (wlc_roam_min_delta_t *)MALLOCZ(dhd->osh, plen);
11479 	if (!pmin_delta) {
11480 		DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
11481 		return BCME_NOMEM;
11482 	}
11483 	pmin_delta->ver = WLC_ROAM_MIN_DELTA_VER_1;
11484 	pmin_delta->len = sizeof(*pmin_delta_info);
11485 	pmin_delta_info = (wlc_roam_min_delta_info_v1_t *)pmin_delta->data;
11486 	pmin_delta_info->roam_min_delta_2g = (uint32)dt2g;
11487 	pmin_delta_info->roam_min_delta_5g = (uint32)dt5g;
11488 
11489 	/* Set Minimum ROAM score delta */
11490 	ret = dhd_iovar(dhd, 0, "roam_min_delta", (char *)pmin_delta, plen, NULL, 0, TRUE);
11491 	if (ret < 0) {
11492 		DHD_ERROR(("%s Failed to Set roam_min_delta %d\n", __FUNCTION__, ret));
11493 		goto done;
11494 	}
11495 done:
11496 	if (pmin_delta) {
11497 		MFREE(dhd->osh, pmin_delta, plen);
11498 	}
11499 	return ret;
11500 }
11501 #endif /* CONFIG_ROAM_MIN_DELTA */
11502 
11503 #ifdef HOST_SFH_LLC
11504 #define SSTLOOKUP(proto) (((proto) == 0x80f3) || ((proto) == 0x8137))
11505 /** Convert Ethernet to 802.3 per 802.1H (use bridge-tunnel if type in SST)
11506  * Note:- This function will overwrite the ethernet header in the pkt
11507  * with a 802.3 ethernet + LLC/SNAP header by utilising the headroom
11508  * in the packet. The pkt data pointer should be pointing to the
11509  * start of the packet (at the ethernet header) when the function is called.
11510  * The pkt data pointer will be pointing to the
11511  * start of the new 802.3 header if the function returns successfully
11512  *
11513  *
11514  * Original Ethernet (header length = 14):
11515  * ----------------------------------------------------------------------------------------
11516  * |                                                     |   DA   |   SA   | T |  Data... |
11517  * ----------------------------------------------------------------------------------------
11518  *                                                            6        6     2
11519  *
11520  * Conversion to 802.3 (header length = 22):
11521  *                     (LLC includes ether_type in last 2 bytes):
11522  * ----------------------------------------------------------------------------------------
11523  * |                                      |   DA   |   SA   | L | LLC/SNAP | T |  Data... |
11524  * ----------------------------------------------------------------------------------------
11525  *                                             6        6     2       6      2
11526  */
11527 int
BCMFASTPATH(dhd_ether_to_8023_hdr)11528 BCMFASTPATH(dhd_ether_to_8023_hdr)(osl_t *osh, struct ether_header *eh, void *p)
11529 {
11530 	struct ether_header *neh;
11531 	struct dot11_llc_snap_header *lsh;
11532 	uint16 plen, ether_type;
11533 
11534 	if (PKTHEADROOM(osh, p) < DOT11_LLC_SNAP_HDR_LEN) {
11535 		DHD_ERROR(("%s: FATAL! not enough pkt headroom !\n", __FUNCTION__));
11536 		ASSERT(0);
11537 		return BCME_BUFTOOSHORT;
11538 	}
11539 
11540 	ether_type = ntoh16(eh->ether_type);
11541 	neh = (struct ether_header *)PKTPUSH(osh, p, DOT11_LLC_SNAP_HDR_LEN);
11542 
11543 	/* 802.3 MAC header */
11544 	eacopy((char*)eh->ether_dhost, (char*)neh->ether_dhost);
11545 	eacopy((char*)eh->ether_shost, (char*)neh->ether_shost);
11546 	plen = (uint16)PKTLEN(osh, p) - ETHER_HDR_LEN;
11547 	neh->ether_type = hton16(plen);
11548 
11549 	/* 802.2 LLC header */
11550 	lsh = (struct dot11_llc_snap_header *)&neh[1];
11551 	lsh->dsap = 0xaa;
11552 	lsh->ssap = 0xaa;
11553 	lsh->ctl = 0x03;
11554 
11555 	/* 802.2 SNAP header Use RFC1042 or bridge-tunnel if type in SST per 802.1H */
11556 	lsh->oui[0] = 0x00;
11557 	lsh->oui[1] = 0x00;
11558 	if (SSTLOOKUP(ether_type))
11559 		lsh->oui[2] = 0xf8;
11560 	else
11561 		lsh->oui[2] = 0x00;
11562 	lsh->type = hton16(ether_type);
11563 
11564 	return BCME_OK;
11565 }
11566 
11567 /** Convert 802.3+LLC to ethernet
11568  * Note:- This function will overwrite the 802.3+LLC hdr in the pkt
11569  * with an ethernet header. The pkt data pointer should be pointing to the
11570  * start of the packet (at the 802.3 header) when the function is called.
11571  * The pkt data pointer will be pointing to the
11572  * start of the ethernet header if the function returns successfully
11573  */
11574 int
BCMFASTPATH(dhd_8023_llc_to_ether_hdr)11575 BCMFASTPATH(dhd_8023_llc_to_ether_hdr)(osl_t *osh, struct ether_header *eh8023, void *p)
11576 {
11577 	struct dot11_llc_snap_header *lsh = NULL;
11578 	uint16 ether_type = 0;
11579 	uint8 *pdata = NULL;
11580 
11581 	if (!p || !eh8023)
11582 		return BCME_BADARG;
11583 
11584 	pdata = PKTDATA(osh, p);
11585 	ether_type = ntoh16(eh8023->ether_type);
11586 	/* ether type in 802.3 hdr for sfh llc host insertion case
11587 	 * contains length, replace it with actual ether type at the
11588 	 * end of the LLC hdr
11589 	 */
11590 	if (ether_type < ETHER_TYPE_MIN) {
11591 		/* 802.2 LLC header */
11592 		lsh = (struct dot11_llc_snap_header *)(pdata + sizeof(*eh8023));
11593 		eh8023->ether_type = lsh->type;
11594 		pdata = PKTPULL(osh, p, DOT11_LLC_SNAP_HDR_LEN);
11595 		memcpy_s(pdata, sizeof(*eh8023), eh8023, sizeof(*eh8023));
11596 	 } else {
11597 		DHD_ERROR_RLMT(("ethertype 0x%x is not a length !\n", ether_type));
11598 		return BCME_BADARG;
11599 	 }
11600 
11601 	return BCME_OK;
11602 }
11603 #endif /* HOST_SFH_LLC */
11604 
11605 #ifdef DHD_AWDL
11606 
11607 #define AWDL_MIN_EXTENSION_DEFAULT 0x3u
11608 #define AWDL_PRESENCE_MODE_DEFAULT 0x4u
11609 #define AWDL_FLAGS_DEFAULT 0x0000u
11610 #define AWDL_PID 0x0800u
11611 #define AWDL_USERDATA_SIZE 6u
11612 /** Convert Ethernet to 802.3 + AWDL LLC SNAP header
11613  * Note:- This function will overwrite the ethernet header in the pkt 'p'
11614  * with a 802.3 ethernet + AWDL LLC/SNAP header by utilising the headroom
11615  * in the packet. The pkt data pointer should be pointing to the
11616  * start of the packet (at the ethernet header) when the function is called.
11617  * The pkt data pointer will be pointing to the
11618  * start of the new 802.3 header if the function returns successfully
11619  */
11620 int
BCMFASTPATH(dhd_ether_to_awdl_llc_hdr)11621 BCMFASTPATH(dhd_ether_to_awdl_llc_hdr)(struct dhd_pub *dhd, struct ether_header *eh, void *p)
11622 {
11623 	osl_t *osh = dhd->osh;
11624 	struct ether_header *neh;
11625 	struct dot11_llc_snap_header *lsh;
11626 	uint16 plen, ether_type;
11627 	uint8 *awdl_data = NULL;
11628 	uint16 *seq = NULL;
11629 	uint16 *flags = NULL;
11630 	uint16 *type = NULL;
11631 
11632 	if (PKTHEADROOM(osh, p) < (2 * DOT11_LLC_SNAP_HDR_LEN)) {
11633 		DHD_ERROR(("%s: FATAL! not enough pkt headroom !\n", __FUNCTION__));
11634 		ASSERT(0);
11635 		return BCME_BUFTOOSHORT;
11636 	}
11637 
11638 	ether_type = ntoh16(eh->ether_type);
11639 	neh = (struct ether_header *)PKTPUSH(osh, p, 2 * DOT11_LLC_SNAP_HDR_LEN);
11640 
11641 	/* 802.3 MAC header */
11642 	eacopy((char*)eh->ether_dhost, (char*)neh->ether_dhost);
11643 	eacopy((char*)eh->ether_shost, (char*)neh->ether_shost);
11644 	plen = (uint16)PKTLEN(osh, p) - ETHER_HDR_LEN;
11645 	neh->ether_type = hton16(plen);
11646 
11647 	/* 802.2 LLC header */
11648 	lsh = (struct dot11_llc_snap_header *)&neh[1];
11649 	lsh->dsap = 0xaa;
11650 	lsh->ssap = 0xaa;
11651 	lsh->ctl = 0x03;
11652 
11653 	/* 802.2 SNAP header */
11654 	lsh->oui[0] = 0x00;
11655 	lsh->oui[1] = 0x17;
11656 	lsh->oui[2] = 0xf2;
11657 	lsh->type = hton16(AWDL_PID);
11658 
11659 	/* AWDL upper layer data */
11660 	awdl_data = (uint8 *)&lsh[1];
11661 
11662 	awdl_data[0] = dhd->awdl_minext;
11663 	awdl_data[1] = dhd->awdl_presmode;
11664 
11665 	seq = (uint16 *)&awdl_data[2];
11666 	*seq = dhd->awdl_seq++;
11667 
11668 	flags = (uint16 *)&awdl_data[4];
11669 	*flags = hton16(AWDL_FLAGS_DEFAULT);
11670 
11671 	type = (uint16 *)&awdl_data[6];
11672 	*type = hton16(ether_type);
11673 
11674 	return BCME_OK;
11675 }
11676 
11677 /** Convert 802.3 + AWDL LLC SNAP header to ethernet header
11678  * Note:- This function will overwrite the existing
11679  * 802.3 ethernet + AWDL LLC/SNAP header in the packet 'p'
11680  * with a 14 byte ethernet header
11681  * The pkt data pointer should be pointing to the
11682  * start of the packet (at the 802.3 header) when the function is called.
11683  * The pkt data pointer will be pointing to the
11684  * start of the new ethernet header if the function returns successfully
11685  */
11686 int
dhd_awdl_llc_to_eth_hdr(struct dhd_pub * dhd,struct ether_header * eh,void * p)11687 dhd_awdl_llc_to_eth_hdr(struct dhd_pub *dhd, struct ether_header *eh, void *p)
11688 {
11689 	uint16 *ethertype = NULL;
11690 	uint8 *ptr = NULL;
11691 
11692 	if (!eh || !p || !dhd)
11693 		return BCME_BADARG;
11694 
11695 	ptr = PKTDATA(dhd->osh, p);
11696 
11697 	/* copy ether type instead of length from the
11698 	 * end of the awdl llc header to the ethernet header
11699 	 */
11700 	ptr += sizeof(*eh) + DOT11_LLC_SNAP_HDR_LEN + AWDL_USERDATA_SIZE;
11701 	ethertype = (uint16 *)ptr;
11702 	eh->ether_type = *ethertype;
11703 
11704 	/* overwrite awdl llc header with ethernet header */
11705 	PKTPULL(dhd->osh, p, 2 * DOT11_LLC_SNAP_HDR_LEN);
11706 	ptr = PKTDATA(dhd->osh, p);
11707 	memcpy_s(ptr, sizeof(*eh), eh, sizeof(*eh));
11708 	return BCME_OK;
11709 }
11710 #endif /* DHD_AWDL */
11711 
11712 int
dhd_iovar(dhd_pub_t * pub,int ifidx,char * name,char * param_buf,uint param_len,char * res_buf,uint res_len,bool set)11713 dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *param_buf, uint param_len, char *res_buf,
11714 		uint res_len, bool set)
11715 {
11716 	char *buf = NULL;
11717 	uint input_len;
11718 	wl_ioctl_t ioc;
11719 	int ret;
11720 
11721 	if (res_len > WLC_IOCTL_MAXLEN || param_len > WLC_IOCTL_MAXLEN)
11722 		return BCME_BADARG;
11723 
11724 	input_len = strlen(name) + 1 + param_len;
11725 
11726 	/* WAR to fix GET iovar returning buf too short error
11727 	 * If param len is 0 for get iovar, increment input_len by sizeof(int)
11728 	 * to avoid the length check error in fw
11729 	 */
11730 	if (!set && !param_len) {
11731 		input_len += sizeof(int);
11732 	}
11733 	if (input_len > WLC_IOCTL_MAXLEN)
11734 		return BCME_BADARG;
11735 
11736 	buf = NULL;
11737 	if (set) {
11738 		if (res_buf || res_len != 0) {
11739 			DHD_ERROR(("%s: SET wrong arguemnet\n", __FUNCTION__));
11740 			ret = BCME_BADARG;
11741 			goto exit;
11742 		}
11743 		buf = MALLOCZ(pub->osh, input_len);
11744 		if (!buf) {
11745 			DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__));
11746 			ret = BCME_NOMEM;
11747 			goto exit;
11748 		}
11749 		ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len);
11750 		if (!ret) {
11751 			ret = BCME_NOMEM;
11752 			goto exit;
11753 		}
11754 
11755 		ioc.cmd = WLC_SET_VAR;
11756 		ioc.buf = buf;
11757 		ioc.len = input_len;
11758 		ioc.set = set;
11759 
11760 		ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
11761 	} else {
11762 		if (!res_buf || !res_len) {
11763 			DHD_ERROR(("%s: GET failed. resp_buf NULL or length 0.\n", __FUNCTION__));
11764 			ret = BCME_BADARG;
11765 			goto exit;
11766 		}
11767 
11768 		if (res_len < input_len) {
11769 			DHD_INFO(("%s: res_len(%d) < input_len(%d)\n", __FUNCTION__,
11770 					res_len, input_len));
11771 			buf = MALLOCZ(pub->osh, input_len);
11772 			if (!buf) {
11773 				DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__));
11774 				ret = BCME_NOMEM;
11775 				goto exit;
11776 			}
11777 			ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len);
11778 			if (!ret) {
11779 				ret = BCME_NOMEM;
11780 				goto exit;
11781 			}
11782 
11783 			ioc.cmd = WLC_GET_VAR;
11784 			ioc.buf = buf;
11785 			ioc.len = input_len;
11786 			ioc.set = set;
11787 
11788 			ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
11789 
11790 			if (ret == BCME_OK) {
11791 				memcpy(res_buf, buf, res_len);
11792 			}
11793 		} else {
11794 			memset(res_buf, 0, res_len);
11795 			ret = bcm_mkiovar(name, param_buf, param_len, res_buf, res_len);
11796 			if (!ret) {
11797 				ret = BCME_NOMEM;
11798 				goto exit;
11799 			}
11800 
11801 			ioc.cmd = WLC_GET_VAR;
11802 			ioc.buf = res_buf;
11803 			ioc.len = res_len;
11804 			ioc.set = set;
11805 
11806 			ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
11807 		}
11808 	}
11809 exit:
11810 	if (buf) {
11811 		MFREE(pub->osh, buf, input_len);
11812 	}
11813 	return ret;
11814 }
11815