xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/rockchip_wlan/infineon/bcmdhd/dhd_common.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * Broadcom Dongle Host Driver (DHD), common DHD core.
3  *
4  * Portions of this code are copyright (c) 2022 Cypress Semiconductor Corporation
5  *
6  * Copyright (C) 1999-2017, Broadcom Corporation
7  *
8  *      Unless you and Broadcom execute a separate written software license
9  * agreement governing use of this software, this software is licensed to you
10  * under the terms of the GNU General Public License version 2 (the "GPL"),
11  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12  * following added to such license:
13  *
14  *      As a special exception, the copyright holders of this software give you
15  * permission to link this software with independent modules, and to copy and
16  * distribute the resulting executable under terms of your choice, provided that
17  * you also meet, for each linked independent module, the terms and conditions of
18  * the license of that module.  An independent module is a module which is not
19  * derived from this software.  The special exception does not apply to any
20  * modifications of the software.
21  *
22  *      Notwithstanding the above, under no circumstances may you combine this
23  * software in any way with any other Broadcom software provided under a license
24  * other than the GPL, without Broadcom's express prior written consent.
25  *
26  *
27  * <<Broadcom-WL-IPTag/Open:>>
28  *
29  * $Id: dhd_common.c 701858 2017-05-26 20:20:58Z $
30  */
31 #include <typedefs.h>
32 #include <osl.h>
33 
34 #include <epivers.h>
35 #include <bcmutils.h>
36 #include <bcmstdlib_s.h>
37 
38 #include <bcmendian.h>
39 #include <dngl_stats.h>
40 #include <dhd.h>
41 #include <dhd_ip.h>
42 #include <bcmevent.h>
43 #include <dhdioctl.h>
44 
45 #ifdef PCIE_FULL_DONGLE
46 #include <bcmmsgbuf.h>
47 #endif /* PCIE_FULL_DONGLE */
48 
49 #ifdef SHOW_LOGTRACE
50 #include <event_log.h>
51 #endif /* SHOW_LOGTRACE */
52 
53 #ifdef BCMPCIE
54 #include <dhd_flowring.h>
55 #endif // endif
56 
57 #include <dhd_bus.h>
58 #include <dhd_proto.h>
59 #include <dhd_dbg.h>
60 #include <802.1d.h>
61 #include <dhd_debug.h>
62 #include <dhd_dbg_ring.h>
63 #include <dhd_mschdbg.h>
64 #include <msgtrace.h>
65 
66 #ifdef WL_CFG80211
67 #include <wl_cfg80211.h>
68 #endif // endif
69 #if defined(OEM_ANDROID) && defined(PNO_SUPPORT)
70 #include <dhd_pno.h>
71 #endif /* OEM_ANDROID && PNO_SUPPORT */
72 #ifdef RTT_SUPPORT
73 #include <dhd_rtt.h>
74 #endif // endif
75 
76 #ifdef DNGL_EVENT_SUPPORT
77 #include <dnglevent.h>
78 #endif // endif
79 
80 #define htod32(i) (i)
81 #define htod16(i) (i)
82 #define dtoh32(i) (i)
83 #define dtoh16(i) (i)
84 #define htodchanspec(i) (i)
85 #define dtohchanspec(i) (i)
86 
87 #ifdef PROP_TXSTATUS
88 #include <wlfc_proto.h>
89 #include <dhd_wlfc.h>
90 #endif // endif
91 
92 #include <dhd_linux_priv.h>
93 #if defined(DHD_POST_EAPOL_M1_AFTER_ROAM_EVT)
94 #include <dhd_linux.h>
95 #endif // endif
96 
97 #ifdef DHD_L2_FILTER
98 #include <dhd_l2_filter.h>
99 #endif /* DHD_L2_FILTER */
100 
101 #ifdef DHD_PSTA
102 #include <dhd_psta.h>
103 #endif /* DHD_PSTA */
104 
105 #ifdef DHD_WET
106 #include <dhd_wet.h>
107 #endif /* DHD_WET */
108 
109 #ifdef DHD_LOG_DUMP
110 #include <dhd_dbg.h>
111 #ifdef DHD_PKT_LOGGING
112 #include <dhd_pktlog.h>
113 #endif /* DHD_PKT_LOGGING */
114 #endif /* DHD_LOG_DUMP */
115 
116 #ifdef DHD_LOG_PRINT_RATE_LIMIT
117 int log_print_threshold = 0;
118 #endif /* DHD_LOG_PRINT_RATE_LIMIT */
119 int dhd_msg_level = DHD_ERROR_VAL | DHD_FWLOG_VAL | DHD_EVENT_VAL
120 	/* For CUSTOMER_HW4 do not enable DHD_IOVAR_MEM_VAL by default */
121 #if !defined(BOARD_HIKEY)
122 	| DHD_IOVAR_MEM_VAL
123 #endif // endif
124 #ifndef OEM_ANDROID
125 	| DHD_MSGTRACE_VAL
126 #endif /* OEM_ANDROID */
127 	| DHD_PKT_MON_VAL;
128 
129 #if defined(OEM_ANDROID) && defined(WL_WIRELESS_EXT)
130 #include <wl_iw.h>
131 #endif /* defined(OEM_ANDROID) && defined(WL_WIRELESS_EXT) */
132 
133 #ifdef DHD_ULP
134 #include <dhd_ulp.h>
135 #endif /* DHD_ULP */
136 
137 #ifdef DHD_DEBUG
138 #include <sdiovar.h>
139 #endif /* DHD_DEBUG */
140 
141 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
142 #include <linux/pm_runtime.h>
143 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
144 
145 #ifdef SOFTAP
146 char fw_path2[MOD_PARAM_PATHLEN];
147 extern bool softap_enabled;
148 #endif // endif
149 
150 #ifdef SHOW_LOGTRACE
151 #define BYTES_AHEAD_NUM		10	/* address in map file is before these many bytes */
152 #define READ_NUM_BYTES		1000 /* read map file each time this No. of bytes */
153 #define GO_BACK_FILE_POS_NUM_BYTES	100 /* set file pos back to cur pos */
154 static char *ramstart_str = " text_start"; /* string in mapfile has addr ramstart */
155 static char *rodata_start_str = " rodata_start"; /* string in mapfile has addr rodata start */
156 static char *rodata_end_str = " rodata_end"; /* string in mapfile has addr rodata end */
157 #define RAMSTART_BIT	0x01
158 #define RDSTART_BIT		0x02
159 #define RDEND_BIT		0x04
160 #define ALL_MAP_VAL		(RAMSTART_BIT | RDSTART_BIT | RDEND_BIT)
161 #endif /* SHOW_LOGTRACE */
162 
163 #ifdef SHOW_LOGTRACE
164 /* the fw file path is taken from either the module parameter at
165  * insmod time or is defined as a constant of different values
166  * for different platforms
167  */
168 extern char *st_str_file_path;
169 #endif /* SHOW_LOGTRACE */
170 
171 #define DHD_TPUT_MAX_TX_PKTS_BATCH	1000
172 
173 #ifdef EWP_EDL
174 typedef struct msg_hdr_edl {
175 	uint32 infobuf_ver;
176 	info_buf_payload_hdr_t pyld_hdr;
177 	msgtrace_hdr_t trace_hdr;
178 } msg_hdr_edl_t;
179 #endif /* EWP_EDL */
180 
181 /* Last connection success/failure status */
182 uint32 dhd_conn_event;
183 uint32 dhd_conn_status;
184 uint32 dhd_conn_reason;
185 
186 extern int dhd_iscan_request(void * dhdp, uint16 action);
187 extern void dhd_ind_scan_confirm(void *h, bool status);
188 extern int dhd_iscan_in_progress(void *h);
189 void dhd_iscan_lock(void);
190 void dhd_iscan_unlock(void);
191 extern int dhd_change_mtu(dhd_pub_t *dhd, int new_mtu, int ifidx);
192 #if defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P)
193 extern int dhd_get_concurrent_capabilites(dhd_pub_t *dhd);
194 #endif // endif
195 
196 extern int dhd_socram_dump(struct dhd_bus *bus);
197 extern void dhd_set_packet_filter(dhd_pub_t *dhd);
198 
199 #ifdef DNGL_EVENT_SUPPORT
200 static void dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event,
201 	bcm_dngl_event_msg_t *dngl_event, size_t pktlen);
202 static int dngl_host_event(dhd_pub_t *dhdp, void *pktdata, bcm_dngl_event_msg_t *dngl_event,
203 	size_t pktlen);
204 #endif /* DNGL_EVENT_SUPPORT */
205 
206 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
207 static void copy_hang_info_ioctl_timeout(dhd_pub_t *dhd, int ifidx, wl_ioctl_t *ioc);
208 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
209 
210 #define MAX_CHUNK_LEN 1408 /* 8 * 8 * 22 */
211 
212 #if defined(OEM_ANDROID)
213 bool ap_cfg_running = FALSE;
214 bool ap_fw_loaded = FALSE;
215 #endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
216 
217 /* Version string to report */
218 #ifdef DHD_DEBUG
219 #ifndef SRCBASE
220 #define SRCBASE        "drivers/net/wireless/bcmdhd"
221 #endif // endif
222 #define DHD_COMPILED "\nCompiled in " SRCBASE
223 #endif /* DHD_DEBUG */
224 
225 #define CHIPID_MISMATCH	8
226 
227 #if defined(DHD_DEBUG)
228 const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR
229 	DHD_COMPILED " on " __DATE__ " at " __TIME__;
230 #else
231 const char dhd_version[] = "\nDongle Host Driver, version " EPI_VERSION_STR "\nCompiled from ";
232 #endif // endif
233 char fw_version[FW_VER_STR_LEN] = "\0";
234 char clm_version[CLM_VER_STR_LEN] = "\0";
235 
236 char bus_api_revision[BUS_API_REV_STR_LEN] = "\0";
237 
238 void dhd_set_timer(void *bus, uint wdtick);
239 
240 static char* ioctl2str(uint32 ioctl);
241 
242 /* IOVar table */
243 enum {
244 	IOV_VERSION = 1,
245 	IOV_MSGLEVEL,
246 	IOV_BCMERRORSTR,
247 	IOV_BCMERROR,
248 	IOV_WDTICK,
249 	IOV_DUMP,
250 	IOV_CLEARCOUNTS,
251 	IOV_LOGDUMP,
252 	IOV_LOGCAL,
253 	IOV_LOGSTAMP,
254 	IOV_GPIOOB,
255 	IOV_IOCTLTIMEOUT,
256 	IOV_CONS,
257 	IOV_DCONSOLE_POLL,
258 #if defined(DHD_DEBUG)
259 	IOV_DHD_JOIN_TIMEOUT_DBG,
260 	IOV_SCAN_TIMEOUT,
261 	IOV_MEM_DEBUG,
262 #ifdef BCMPCIE
263 	IOV_FLOW_RING_DEBUG,
264 #endif /* BCMPCIE */
265 #endif /* defined(DHD_DEBUG) */
266 #ifdef PROP_TXSTATUS
267 	IOV_PROPTXSTATUS_ENABLE,
268 	IOV_PROPTXSTATUS_MODE,
269 	IOV_PROPTXSTATUS_OPT,
270 	IOV_PROPTXSTATUS_MODULE_IGNORE,
271 	IOV_PROPTXSTATUS_CREDIT_IGNORE,
272 	IOV_PROPTXSTATUS_TXSTATUS_IGNORE,
273 	IOV_PROPTXSTATUS_RXPKT_CHK,
274 #endif /* PROP_TXSTATUS */
275 	IOV_BUS_TYPE,
276 	IOV_CHANGEMTU,
277 	IOV_HOSTREORDER_FLOWS,
278 #ifdef DHDTCPACK_SUPPRESS
279 	IOV_TCPACK_SUPPRESS,
280 #endif /* DHDTCPACK_SUPPRESS */
281 	IOV_AP_ISOLATE,
282 #ifdef DHD_L2_FILTER
283 	IOV_DHCP_UNICAST,
284 	IOV_BLOCK_PING,
285 	IOV_PROXY_ARP,
286 	IOV_GRAT_ARP,
287 	IOV_BLOCK_TDLS,
288 #endif /* DHD_L2_FILTER */
289 	IOV_DHD_IE,
290 #ifdef DHD_PSTA
291 	IOV_PSTA,
292 #endif /* DHD_PSTA */
293 #ifdef DHD_WET
294 	IOV_WET,
295 	IOV_WET_HOST_IPV4,
296 	IOV_WET_HOST_MAC,
297 #endif /* DHD_WET */
298 	IOV_CFG80211_OPMODE,
299 	IOV_ASSERT_TYPE,
300 	IOV_LMTEST,
301 #ifdef DHD_MCAST_REGEN
302 	IOV_MCAST_REGEN_BSS_ENABLE,
303 #endif // endif
304 #ifdef SHOW_LOGTRACE
305 	IOV_DUMP_TRACE_LOG,
306 #endif /* SHOW_LOGTRACE */
307 	IOV_DONGLE_TRAP_TYPE,
308 	IOV_DONGLE_TRAP_INFO,
309 	IOV_BPADDR,
310 	IOV_DUMP_DONGLE, /**< dumps core registers and d11 memories */
311 #if defined(DHD_LOG_DUMP)
312 	IOV_LOG_DUMP,
313 #endif /* DHD_LOG_DUMP */
314 	IOV_TPUT_TEST,
315 	IOV_FIS_TRIGGER,
316 	IOV_DEBUG_BUF_DEST_STAT,
317 #ifdef DHD_DEBUG
318 	IOV_INDUCE_ERROR,
319 #endif /* DHD_DEBUG */
320 #ifdef WL_IFACE_MGMT_CONF
321 #ifdef WL_CFG80211
322 #ifdef WL_NANP2P
323 	IOV_CONC_DISC,
324 #endif /* WL_NANP2P */
325 #ifdef WL_IFACE_MGMT
326 	IOV_IFACE_POLICY,
327 #endif /* WL_IFACE_MGMT */
328 #endif /* WL_CFG80211 */
329 #endif /* WL_IFACE_MGMT_CONF */
330 	IOV_LAST
331 };
332 
333 const bcm_iovar_t dhd_iovars[] = {
334 	/* name         varid                   flags   flags2 type     minlen */
335 	{"version",	IOV_VERSION,		0,	0, IOVT_BUFFER,	sizeof(dhd_version)},
336 #ifdef DHD_DEBUG
337 	{"msglevel",	IOV_MSGLEVEL,		0,	0, IOVT_UINT32,	0},
338 	{"mem_debug",   IOV_MEM_DEBUG,  0,      0,      IOVT_BUFFER,    0 },
339 #ifdef BCMPCIE
340 	{"flow_ring_debug", IOV_FLOW_RING_DEBUG, 0, 0, IOVT_BUFFER, 0 },
341 #endif /* BCMPCIE */
342 #endif /* DHD_DEBUG */
343 	{"bcmerrorstr", IOV_BCMERRORSTR,	0,	0, IOVT_BUFFER,	BCME_STRLEN},
344 	{"bcmerror",	IOV_BCMERROR,		0,	0, IOVT_INT8,	0},
345 	{"wdtick",	IOV_WDTICK,		0,	0, IOVT_UINT32,	0},
346 	{"dump",	IOV_DUMP,		0,	0, IOVT_BUFFER,	DHD_IOCTL_MAXLEN},
347 	{"cons",	IOV_CONS,		0,	0, IOVT_BUFFER,	0},
348 	{"dconpoll",	IOV_DCONSOLE_POLL,	0,	0, IOVT_UINT32,	0},
349 	{"clearcounts", IOV_CLEARCOUNTS,	0,	0, IOVT_VOID,	0},
350 	{"gpioob",	IOV_GPIOOB,		0,	0, IOVT_UINT32,	0},
351 	{"ioctl_timeout", IOV_IOCTLTIMEOUT,	0,	0, IOVT_UINT32,	0},
352 #ifdef PROP_TXSTATUS
353 	{"proptx",	IOV_PROPTXSTATUS_ENABLE,	0,	0, IOVT_BOOL,	0 },
354 	/*
355 	set the proptxtstatus operation mode:
356 	0 - Do not do any proptxtstatus flow control
357 	1 - Use implied credit from a packet status
358 	2 - Use explicit credit
359 	*/
360 	{"ptxmode",	IOV_PROPTXSTATUS_MODE,	0,	0, IOVT_UINT32,	0 },
361 	{"proptx_opt", IOV_PROPTXSTATUS_OPT,	0,	0, IOVT_UINT32,	0 },
362 	{"pmodule_ignore", IOV_PROPTXSTATUS_MODULE_IGNORE, 0, 0, IOVT_BOOL, 0 },
363 	{"pcredit_ignore", IOV_PROPTXSTATUS_CREDIT_IGNORE, 0, 0, IOVT_BOOL, 0 },
364 	{"ptxstatus_ignore", IOV_PROPTXSTATUS_TXSTATUS_IGNORE, 0, 0,  IOVT_BOOL, 0 },
365 	{"rxpkt_chk", IOV_PROPTXSTATUS_RXPKT_CHK, 0, 0, IOVT_BOOL, 0 },
366 #endif /* PROP_TXSTATUS */
367 	{"bustype", IOV_BUS_TYPE, 0, 0, IOVT_UINT32, 0},
368 	{"changemtu", IOV_CHANGEMTU, 0, 0, IOVT_UINT32, 0 },
369 	{"host_reorder_flows", IOV_HOSTREORDER_FLOWS, 0, 0, IOVT_BUFFER,
370 	(WLHOST_REORDERDATA_MAXFLOWS + 1) },
371 #ifdef DHDTCPACK_SUPPRESS
372 	{"tcpack_suppress",	IOV_TCPACK_SUPPRESS,	0,	0, IOVT_UINT8,	0 },
373 #endif /* DHDTCPACK_SUPPRESS */
374 #ifdef DHD_L2_FILTER
375 	{"dhcp_unicast", IOV_DHCP_UNICAST, (0), 0, IOVT_BOOL, 0 },
376 #endif /* DHD_L2_FILTER */
377 	{"ap_isolate", IOV_AP_ISOLATE, (0), 0, IOVT_BOOL, 0},
378 #ifdef DHD_L2_FILTER
379 	{"block_ping", IOV_BLOCK_PING, (0), 0, IOVT_BOOL, 0},
380 	{"proxy_arp", IOV_PROXY_ARP, (0), 0, IOVT_BOOL, 0},
381 	{"grat_arp", IOV_GRAT_ARP, (0), 0, IOVT_BOOL, 0},
382 	{"block_tdls", IOV_BLOCK_TDLS, (0), IOVT_BOOL, 0},
383 #endif /* DHD_L2_FILTER */
384 	{"dhd_ie", IOV_DHD_IE, (0), 0, IOVT_BUFFER, 0},
385 #ifdef DHD_PSTA
386 	/* PSTA/PSR Mode configuration. 0: DIABLED 1: PSTA 2: PSR */
387 	{"psta", IOV_PSTA, 0, 0, IOVT_UINT32, 0},
388 #endif /* DHD PSTA */
389 #ifdef DHD_WET
390 	/* WET Mode configuration. 0: DIABLED 1: WET */
391 	{"wet", IOV_WET, 0, 0, IOVT_UINT32, 0},
392 	{"wet_host_ipv4", IOV_WET_HOST_IPV4, 0, 0, IOVT_UINT32, 0},
393 	{"wet_host_mac", IOV_WET_HOST_MAC, 0, 0, IOVT_BUFFER, 0},
394 #endif /* DHD WET */
395 	{"op_mode",	IOV_CFG80211_OPMODE,	0,	0, IOVT_UINT32,	0 },
396 	{"assert_type", IOV_ASSERT_TYPE, (0), 0, IOVT_UINT32, 0},
397 	{"lmtest", IOV_LMTEST,	0,	0, IOVT_UINT32,	0 },
398 #ifdef DHD_MCAST_REGEN
399 	{"mcast_regen_bss_enable", IOV_MCAST_REGEN_BSS_ENABLE, 0, 0, IOVT_BOOL, 0},
400 #endif // endif
401 #ifdef SHOW_LOGTRACE
402 	{"dump_trace_buf", IOV_DUMP_TRACE_LOG,	0, 0, IOVT_BUFFER,	sizeof(trace_buf_info_t) },
403 #endif /* SHOW_LOGTRACE */
404 	{"trap_type", IOV_DONGLE_TRAP_TYPE, 0, 0, IOVT_UINT32, 0 },
405 	{"trap_info", IOV_DONGLE_TRAP_INFO, 0, 0, IOVT_BUFFER, sizeof(trap_t) },
406 #ifdef DHD_DEBUG
407 	{"bpaddr", IOV_BPADDR,	0, 0, IOVT_BUFFER,	sizeof(sdreg_t) },
408 #endif /* DHD_DEBUG */
409 	{"dump_dongle", IOV_DUMP_DONGLE, 0, 0, IOVT_BUFFER,
410 	MAX(sizeof(dump_dongle_in_t), sizeof(dump_dongle_out_t)) },
411 #if defined(DHD_LOG_DUMP)
412 	{"log_dump", IOV_LOG_DUMP,	0, 0, IOVT_UINT8, 0},
413 #endif /* DHD_LOG_DUMP */
414 #ifndef OEM_ANDROID
415 	{"tput_test", IOV_TPUT_TEST, 0, 0, IOVT_BUFFER, sizeof(tput_test_t)},
416 #endif // endif
417 	{"debug_buf_dest_stat", IOV_DEBUG_BUF_DEST_STAT, 0, 0, IOVT_UINT32, 0 },
418 #ifdef DHD_DEBUG
419 	{"induce_error", IOV_INDUCE_ERROR, (0), 0, IOVT_UINT16, 0 },
420 #endif /* DHD_DEBUG */
421 #ifdef WL_IFACE_MGMT_CONF
422 #ifdef WL_CFG80211
423 #ifdef WL_NANP2P
424 	{"conc_disc", IOV_CONC_DISC, (0), 0, IOVT_UINT16, 0 },
425 #endif /* WL_NANP2P */
426 #ifdef WL_IFACE_MGMT
427 	{"if_policy", IOV_IFACE_POLICY, (0), 0, IOVT_BUFFER, sizeof(iface_mgmt_data_t)},
428 #endif /* WL_IFACE_MGMT */
429 #endif /* WL_CFG80211 */
430 #endif /* WL_IFACE_MGMT_CONF */
431 	{NULL, 0, 0, 0, 0, 0 }
432 };
433 
434 #define DHD_IOVAR_BUF_SIZE	128
435 
436 bool
dhd_query_bus_erros(dhd_pub_t * dhdp)437 dhd_query_bus_erros(dhd_pub_t *dhdp)
438 {
439 	bool ret = FALSE;
440 
441 	if (dhdp->dongle_reset) {
442 		DHD_ERROR_RLMT(("%s: Dongle Reset occurred, cannot proceed\n",
443 			__FUNCTION__));
444 		ret = TRUE;
445 	}
446 
447 	if (dhdp->dongle_trap_occured) {
448 		DHD_ERROR_RLMT(("%s: FW TRAP has occurred, cannot proceed\n",
449 			__FUNCTION__));
450 		ret = TRUE;
451 #ifdef OEM_ANDROID
452 		dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
453 		dhd_os_send_hang_message(dhdp);
454 #endif /* OEM_ANDROID */
455 	}
456 
457 	if (dhdp->iovar_timeout_occured) {
458 		DHD_ERROR_RLMT(("%s: Resumed on timeout for previous IOVAR, cannot proceed\n",
459 			__FUNCTION__));
460 		ret = TRUE;
461 	}
462 
463 #ifdef PCIE_FULL_DONGLE
464 	if (dhdp->d3ack_timeout_occured) {
465 		DHD_ERROR_RLMT(("%s: Resumed on timeout for previous D3ACK, cannot proceed\n",
466 			__FUNCTION__));
467 		ret = TRUE;
468 	}
469 	if (dhdp->livelock_occured) {
470 		DHD_ERROR_RLMT(("%s: LIVELOCK occurred for previous msg, cannot proceed\n",
471 			__FUNCTION__));
472 		ret = TRUE;
473 	}
474 
475 	if (dhdp->pktid_audit_failed) {
476 		DHD_ERROR_RLMT(("%s: pktid_audit_failed, cannot proceed\n",
477 			__FUNCTION__));
478 		ret = TRUE;
479 	}
480 #endif /* PCIE_FULL_DONGLE */
481 
482 	if (dhdp->iface_op_failed) {
483 		DHD_ERROR_RLMT(("%s: iface_op_failed, cannot proceed\n",
484 			__FUNCTION__));
485 		ret = TRUE;
486 	}
487 
488 	if (dhdp->scan_timeout_occurred) {
489 		DHD_ERROR_RLMT(("%s: scan_timeout_occurred, cannot proceed\n",
490 			__FUNCTION__));
491 		ret = TRUE;
492 	}
493 
494 	if (dhdp->scan_busy_occurred) {
495 		DHD_ERROR_RLMT(("%s: scan_busy_occurred, cannot proceed\n",
496 			__FUNCTION__));
497 		ret = TRUE;
498 	}
499 
500 #ifdef DNGL_AXI_ERROR_LOGGING
501 	if (dhdp->axi_error) {
502 		DHD_ERROR_RLMT(("%s: AXI error occurred, cannot proceed\n",
503 			__FUNCTION__));
504 		ret = TRUE;
505 	}
506 #endif /* DNGL_AXI_ERROR_LOGGING */
507 
508 	if (dhd_bus_get_linkdown(dhdp)) {
509 		DHD_ERROR_RLMT(("%s : PCIE Link down occurred, cannot proceed\n",
510 			__FUNCTION__));
511 		ret = TRUE;
512 	}
513 
514 	if (dhd_bus_get_cto(dhdp)) {
515 		DHD_ERROR_RLMT(("%s : CTO Recovery reported, cannot proceed\n",
516 			__FUNCTION__));
517 		ret = TRUE;
518 	}
519 
520 	return ret;
521 }
522 
523 void
dhd_clear_bus_errors(dhd_pub_t * dhdp)524 dhd_clear_bus_errors(dhd_pub_t *dhdp)
525 {
526 	if (!dhdp)
527 		return;
528 
529 	dhdp->dongle_reset = FALSE;
530 	dhdp->dongle_trap_occured = FALSE;
531 	dhdp->iovar_timeout_occured = FALSE;
532 #ifdef PCIE_FULL_DONGLE
533 	dhdp->d3ack_timeout_occured = FALSE;
534 	dhdp->livelock_occured = FALSE;
535 	dhdp->pktid_audit_failed = FALSE;
536 #endif // endif
537 	dhdp->iface_op_failed = FALSE;
538 	dhdp->scan_timeout_occurred = FALSE;
539 	dhdp->scan_busy_occurred = FALSE;
540 }
541 
542 #ifdef DHD_SSSR_DUMP
543 
544 /* This can be overwritten by module parameter defined in dhd_linux.c */
545 uint support_sssr_dump = TRUE;
546 
547 int
dhd_sssr_mempool_init(dhd_pub_t * dhd)548 dhd_sssr_mempool_init(dhd_pub_t *dhd)
549 {
550 	dhd->sssr_mempool = (uint8 *) VMALLOCZ(dhd->osh, DHD_SSSR_MEMPOOL_SIZE);
551 	if (dhd->sssr_mempool == NULL) {
552 		DHD_ERROR(("%s: MALLOC of sssr_mempool failed\n",
553 			__FUNCTION__));
554 		return BCME_ERROR;
555 	}
556 	return BCME_OK;
557 }
558 
559 void
dhd_sssr_mempool_deinit(dhd_pub_t * dhd)560 dhd_sssr_mempool_deinit(dhd_pub_t *dhd)
561 {
562 	if (dhd->sssr_mempool) {
563 		VMFREE(dhd->osh, dhd->sssr_mempool, DHD_SSSR_MEMPOOL_SIZE);
564 		dhd->sssr_mempool = NULL;
565 	}
566 }
567 
568 void
dhd_dump_sssr_reg_info(sssr_reg_info_v1_t * sssr_reg_info)569 dhd_dump_sssr_reg_info(sssr_reg_info_v1_t *sssr_reg_info)
570 {
571 #ifdef DHD_PCIE_REG_ACCESS
572 	int i, j;
573 	DHD_ERROR(("************** SSSR REG INFO start ****************\n"));
574 	DHD_ERROR(("pmu_regs\n"));
575 	DHD_ERROR(("pmuintmask0=0x%x pmuintmask1=0x%x resreqtimer=0x%x "
576 		"macresreqtimer=0x%x macresreqtimer1=0x%x\n",
577 		sssr_reg_info->pmu_regs.base_regs.pmuintmask0,
578 		sssr_reg_info->pmu_regs.base_regs.pmuintmask1,
579 		sssr_reg_info->pmu_regs.base_regs.resreqtimer,
580 		sssr_reg_info->pmu_regs.base_regs.macresreqtimer,
581 		sssr_reg_info->pmu_regs.base_regs.macresreqtimer1));
582 	DHD_ERROR(("chipcommon_regs\n"));
583 	DHD_ERROR(("intmask=0x%x powerctrl=0x%x clockcontrolstatus=0x%x powerctrl_mask=0x%x\n",
584 		sssr_reg_info->chipcommon_regs.base_regs.intmask,
585 		sssr_reg_info->chipcommon_regs.base_regs.powerctrl,
586 		sssr_reg_info->chipcommon_regs.base_regs.clockcontrolstatus,
587 		sssr_reg_info->chipcommon_regs.base_regs.powerctrl_mask));
588 	DHD_ERROR(("arm_regs\n"));
589 	DHD_ERROR(("clockcontrolstatus=0x%x clockcontrolstatus_val=0x%x"
590 		" resetctrl=0x%x itopoobb=0x%x\n",
591 		sssr_reg_info->arm_regs.base_regs.clockcontrolstatus,
592 		sssr_reg_info->arm_regs.base_regs.clockcontrolstatus_val,
593 		sssr_reg_info->arm_regs.wrapper_regs.resetctrl,
594 		sssr_reg_info->arm_regs.wrapper_regs.itopoobb));
595 	DHD_ERROR(("pcie_regs\n"));
596 	DHD_ERROR(("ltrstate=0x%x clockcontrolstatus=0x%x "
597 		"clockcontrolstatus_val=0x%x itopoobb=0x%x\n",
598 		sssr_reg_info->pcie_regs.base_regs.ltrstate,
599 		sssr_reg_info->pcie_regs.base_regs.clockcontrolstatus,
600 		sssr_reg_info->pcie_regs.base_regs.clockcontrolstatus_val,
601 		sssr_reg_info->pcie_regs.wrapper_regs.itopoobb));
602 	DHD_ERROR(("vasip_regs\n"));
603 	DHD_ERROR(("ioctrl=0x%x vasip_sr_addr=0x%x vasip_sr_size=0x%x\n",
604 		sssr_reg_info->vasip_regs.wrapper_regs.ioctrl,
605 		sssr_reg_info->vasip_regs.vasip_sr_addr,
606 		sssr_reg_info->vasip_regs.vasip_sr_size));
607 
608 	for (i = 0; i < MAX_NUM_D11CORES; i++) {
609 		DHD_ERROR(("mac_regs core[%d]\n", i));
610 		DHD_ERROR(("xmtaddress=0x%x xmtdata=0x%x clockcontrolstatus=0x%x "
611 			"clockcontrolstatus_val=0x%x\n",
612 			sssr_reg_info->mac_regs[i].base_regs.xmtaddress,
613 			sssr_reg_info->mac_regs[i].base_regs.xmtdata,
614 			sssr_reg_info->mac_regs[i].base_regs.clockcontrolstatus,
615 			sssr_reg_info->mac_regs[i].base_regs.clockcontrolstatus_val));
616 		DHD_ERROR(("resetctrl=0x%x itopoobb=0x%x ioctrl=0x%x\n",
617 			sssr_reg_info->mac_regs[i].wrapper_regs.resetctrl,
618 			sssr_reg_info->mac_regs[i].wrapper_regs.itopoobb,
619 			sssr_reg_info->mac_regs[i].wrapper_regs.ioctrl));
620 		for (j = 0; j < SSSR_D11_RESET_SEQ_STEPS; j++) {
621 			DHD_ERROR(("ioctrl_resetseq_val[%d] 0x%x\n", j,
622 				sssr_reg_info->mac_regs[i].wrapper_regs.ioctrl_resetseq_val[j]));
623 		}
624 		DHD_ERROR(("sr_size=0x%x\n", sssr_reg_info->mac_regs[i].sr_size));
625 	}
626 	DHD_ERROR(("************** SSSR REG INFO end ****************\n"));
627 #endif /* DHD_PCIE_REG_ACCESS */
628 }
629 
630 int
dhd_get_sssr_reg_info(dhd_pub_t * dhd)631 dhd_get_sssr_reg_info(dhd_pub_t *dhd)
632 {
633 	int ret;
634 	/* get sssr_reg_info from firmware */
635 	memset((void *)&dhd->sssr_reg_info, 0, sizeof(dhd->sssr_reg_info));
636 	ret = dhd_iovar(dhd, 0, "sssr_reg_info", NULL, 0,  (char *)&dhd->sssr_reg_info,
637 		sizeof(dhd->sssr_reg_info), FALSE);
638 	if (ret < 0) {
639 		DHD_ERROR(("%s: sssr_reg_info failed (error=%d)\n",
640 			__FUNCTION__, ret));
641 		return BCME_ERROR;
642 	}
643 
644 	dhd_dump_sssr_reg_info(&dhd->sssr_reg_info);
645 	return BCME_OK;
646 }
647 
648 uint32
dhd_get_sssr_bufsize(dhd_pub_t * dhd)649 dhd_get_sssr_bufsize(dhd_pub_t *dhd)
650 {
651 	int i;
652 	uint32 sssr_bufsize = 0;
653 	/* Init all pointers to NULL */
654 	for (i = 0; i < MAX_NUM_D11CORES; i++) {
655 		sssr_bufsize += dhd->sssr_reg_info.mac_regs[i].sr_size;
656 	}
657 	sssr_bufsize += dhd->sssr_reg_info.vasip_regs.vasip_sr_size;
658 
659 	/* Double the size as different dumps will be saved before and after SR */
660 	sssr_bufsize = 2 * sssr_bufsize;
661 
662 	return sssr_bufsize;
663 }
664 
665 int
dhd_sssr_dump_init(dhd_pub_t * dhd)666 dhd_sssr_dump_init(dhd_pub_t *dhd)
667 {
668 	int i;
669 	uint32 sssr_bufsize;
670 	uint32 mempool_used = 0;
671 
672 	dhd->sssr_inited = FALSE;
673 
674 	if (!support_sssr_dump) {
675 		DHD_ERROR(("%s: sssr dump not inited as instructed by mod param\n", __FUNCTION__));
676 		return BCME_OK;
677 	}
678 
679 	/* check if sssr mempool is allocated */
680 	if (dhd->sssr_mempool == NULL) {
681 		DHD_ERROR(("%s: sssr_mempool is not allocated\n",
682 			__FUNCTION__));
683 		return BCME_ERROR;
684 	}
685 
686 	/* Get SSSR reg info */
687 	if (dhd_get_sssr_reg_info(dhd) != BCME_OK) {
688 		DHD_ERROR(("%s: dhd_get_sssr_reg_info failed\n", __FUNCTION__));
689 		return BCME_ERROR;
690 	}
691 
692 	/* Validate structure version */
693 	if (dhd->sssr_reg_info.version > SSSR_REG_INFO_VER_1) {
694 		DHD_ERROR(("%s: dhd->sssr_reg_info.version (%d : %d) mismatch\n",
695 			__FUNCTION__, (int)dhd->sssr_reg_info.version, SSSR_REG_INFO_VER));
696 		return BCME_ERROR;
697 	}
698 
699 	/* Validate structure length */
700 	if (dhd->sssr_reg_info.length < sizeof(sssr_reg_info_v0_t)) {
701 		DHD_ERROR(("%s: dhd->sssr_reg_info.length (%d : %d) mismatch\n",
702 			__FUNCTION__, (int)dhd->sssr_reg_info.length,
703 			(int)sizeof(dhd->sssr_reg_info)));
704 		return BCME_ERROR;
705 	}
706 
707 	/* validate fifo size */
708 	sssr_bufsize = dhd_get_sssr_bufsize(dhd);
709 	if (sssr_bufsize > DHD_SSSR_MEMPOOL_SIZE) {
710 		DHD_ERROR(("%s: sssr_bufsize(%d) is greater than sssr_mempool(%d)\n",
711 			__FUNCTION__, (int)sssr_bufsize, DHD_SSSR_MEMPOOL_SIZE));
712 		return BCME_ERROR;
713 	}
714 
715 	/* init all pointers to NULL */
716 	for (i = 0; i < MAX_NUM_D11CORES; i++) {
717 		dhd->sssr_d11_before[i] = NULL;
718 		dhd->sssr_d11_after[i] = NULL;
719 	}
720 	dhd->sssr_dig_buf_before = NULL;
721 	dhd->sssr_dig_buf_after = NULL;
722 
723 	/* Allocate memory */
724 	for (i = 0; i < MAX_NUM_D11CORES; i++) {
725 		if (dhd->sssr_reg_info.mac_regs[i].sr_size) {
726 			dhd->sssr_d11_before[i] = (uint32 *)(dhd->sssr_mempool + mempool_used);
727 			mempool_used += dhd->sssr_reg_info.mac_regs[i].sr_size;
728 
729 			dhd->sssr_d11_after[i] = (uint32 *)(dhd->sssr_mempool + mempool_used);
730 			mempool_used += dhd->sssr_reg_info.mac_regs[i].sr_size;
731 		}
732 	}
733 
734 	if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
735 		dhd->sssr_dig_buf_before = (uint32 *)(dhd->sssr_mempool + mempool_used);
736 		mempool_used += dhd->sssr_reg_info.vasip_regs.vasip_sr_size;
737 
738 		dhd->sssr_dig_buf_after = (uint32 *)(dhd->sssr_mempool + mempool_used);
739 		mempool_used += dhd->sssr_reg_info.vasip_regs.vasip_sr_size;
740 	} else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
741 		dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
742 		dhd->sssr_dig_buf_before = (uint32 *)(dhd->sssr_mempool + mempool_used);
743 		mempool_used += dhd->sssr_reg_info.dig_mem_info.dig_sr_size;
744 
745 		dhd->sssr_dig_buf_after = (uint32 *)(dhd->sssr_mempool + mempool_used);
746 		mempool_used += dhd->sssr_reg_info.dig_mem_info.dig_sr_size;
747 	}
748 
749 	dhd->sssr_inited = TRUE;
750 
751 	return BCME_OK;
752 
753 }
754 
755 void
dhd_sssr_dump_deinit(dhd_pub_t * dhd)756 dhd_sssr_dump_deinit(dhd_pub_t *dhd)
757 {
758 	int i;
759 
760 	dhd->sssr_inited = FALSE;
761 	/* init all pointers to NULL */
762 	for (i = 0; i < MAX_NUM_D11CORES; i++) {
763 		dhd->sssr_d11_before[i] = NULL;
764 		dhd->sssr_d11_after[i] = NULL;
765 	}
766 	dhd->sssr_dig_buf_before = NULL;
767 	dhd->sssr_dig_buf_after = NULL;
768 
769 	return;
770 }
771 
772 void
dhd_sssr_print_filepath(dhd_pub_t * dhd,char * path)773 dhd_sssr_print_filepath(dhd_pub_t *dhd, char *path)
774 {
775 	bool print_info = FALSE;
776 	int dump_mode;
777 
778 	if (!dhd || !path) {
779 		DHD_ERROR(("%s: dhd or memdump_path is NULL\n",
780 			__FUNCTION__));
781 		return;
782 	}
783 
784 	if (!dhd->sssr_dump_collected) {
785 		/* SSSR dump is not collected */
786 		return;
787 	}
788 
789 	dump_mode = dhd->sssr_dump_mode;
790 
791 	if (bcmstrstr(path, "core_0_before")) {
792 		if (dhd->sssr_d11_outofreset[0] &&
793 			dump_mode == SSSR_DUMP_MODE_SSSR) {
794 			print_info = TRUE;
795 		}
796 	} else if (bcmstrstr(path, "core_0_after")) {
797 		if (dhd->sssr_d11_outofreset[0]) {
798 			print_info = TRUE;
799 		}
800 	} else if (bcmstrstr(path, "core_1_before")) {
801 		if (dhd->sssr_d11_outofreset[1] &&
802 			dump_mode == SSSR_DUMP_MODE_SSSR) {
803 			print_info = TRUE;
804 		}
805 	} else if (bcmstrstr(path, "core_1_after")) {
806 		if (dhd->sssr_d11_outofreset[1]) {
807 			print_info = TRUE;
808 		}
809 	} else {
810 		print_info = TRUE;
811 	}
812 
813 	if (print_info) {
814 		DHD_ERROR(("%s: file_path = %s%s\n", __FUNCTION__,
815 			path, FILE_NAME_HAL_TAG));
816 	}
817 }
818 #endif /* DHD_SSSR_DUMP */
819 
820 #ifdef DHD_FW_COREDUMP
dhd_get_fwdump_buf(dhd_pub_t * dhd_pub,uint32 length)821 void* dhd_get_fwdump_buf(dhd_pub_t *dhd_pub, uint32 length)
822 {
823 	if (!dhd_pub->soc_ram) {
824 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
825 		dhd_pub->soc_ram = (uint8*)DHD_OS_PREALLOC(dhd_pub,
826 			DHD_PREALLOC_MEMDUMP_RAM, length);
827 #else
828 		dhd_pub->soc_ram = (uint8*) MALLOC(dhd_pub->osh, length);
829 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
830 	}
831 
832 	if (dhd_pub->soc_ram == NULL) {
833 		DHD_ERROR(("%s: Failed to allocate memory for fw crash snap shot.\n",
834 			__FUNCTION__));
835 		dhd_pub->soc_ram_length = 0;
836 	} else {
837 		memset(dhd_pub->soc_ram, 0, length);
838 		dhd_pub->soc_ram_length = length;
839 	}
840 
841 	/* soc_ram free handled in dhd_{free,clear} */
842 	return dhd_pub->soc_ram;
843 }
844 #endif /* DHD_FW_COREDUMP */
845 
846 /* to NDIS developer, the structure dhd_common is redundant,
847  * please do NOT merge it back from other branches !!!
848  */
849 
850 int
dhd_common_socram_dump(dhd_pub_t * dhdp)851 dhd_common_socram_dump(dhd_pub_t *dhdp)
852 {
853 	return dhd_socram_dump(dhdp->bus);
854 }
855 
856 int
dhd_dump(dhd_pub_t * dhdp,char * buf,int buflen)857 dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen)
858 {
859 	struct bcmstrbuf b;
860 	struct bcmstrbuf *strbuf = &b;
861 
862 	if (!dhdp || !dhdp->prot || !buf) {
863 		return BCME_ERROR;
864 	}
865 
866 	bcm_binit(strbuf, buf, buflen);
867 
868 	/* Base DHD info */
869 	bcm_bprintf(strbuf, "%s\n", dhd_version);
870 	bcm_bprintf(strbuf, "\n");
871 	bcm_bprintf(strbuf, "pub.up %d pub.txoff %d pub.busstate %d\n",
872 	            dhdp->up, dhdp->txoff, dhdp->busstate);
873 	bcm_bprintf(strbuf, "pub.hdrlen %u pub.maxctl %u pub.rxsz %u\n",
874 	            dhdp->hdrlen, dhdp->maxctl, dhdp->rxsz);
875 	bcm_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac "MACDBG"\n",
876 	            dhdp->iswl, dhdp->drv_version, MAC2STRDBG(&dhdp->mac));
877 	bcm_bprintf(strbuf, "pub.bcmerror %d tickcnt %u\n", dhdp->bcmerror, dhdp->tickcnt);
878 
879 	bcm_bprintf(strbuf, "dongle stats:\n");
880 	bcm_bprintf(strbuf, "tx_packets %lu tx_bytes %lu tx_errors %lu tx_dropped %lu\n",
881 	            dhdp->dstats.tx_packets, dhdp->dstats.tx_bytes,
882 	            dhdp->dstats.tx_errors, dhdp->dstats.tx_dropped);
883 	bcm_bprintf(strbuf, "rx_packets %lu rx_bytes %lu rx_errors %lu rx_dropped %lu\n",
884 	            dhdp->dstats.rx_packets, dhdp->dstats.rx_bytes,
885 	            dhdp->dstats.rx_errors, dhdp->dstats.rx_dropped);
886 	bcm_bprintf(strbuf, "multicast %lu\n", dhdp->dstats.multicast);
887 
888 	bcm_bprintf(strbuf, "bus stats:\n");
889 	bcm_bprintf(strbuf, "tx_packets %lu  tx_dropped %lu tx_multicast %lu tx_errors %lu\n",
890 	            dhdp->tx_packets, dhdp->tx_dropped, dhdp->tx_multicast, dhdp->tx_errors);
891 	bcm_bprintf(strbuf, "tx_ctlpkts %lu tx_ctlerrs %lu\n",
892 	            dhdp->tx_ctlpkts, dhdp->tx_ctlerrs);
893 	bcm_bprintf(strbuf, "rx_packets %lu rx_multicast %lu rx_errors %lu \n",
894 	            dhdp->rx_packets, dhdp->rx_multicast, dhdp->rx_errors);
895 	bcm_bprintf(strbuf, "rx_ctlpkts %lu rx_ctlerrs %lu rx_dropped %lu\n",
896 	            dhdp->rx_ctlpkts, dhdp->rx_ctlerrs, dhdp->rx_dropped);
897 	bcm_bprintf(strbuf, "rx_readahead_cnt %lu tx_realloc %lu\n",
898 	            dhdp->rx_readahead_cnt, dhdp->tx_realloc);
899 	bcm_bprintf(strbuf, "tx_pktgetfail %lu rx_pktgetfail %lu\n",
900 	            dhdp->tx_pktgetfail, dhdp->rx_pktgetfail);
901 	bcm_bprintf(strbuf, "tx_big_packets %lu\n",
902 	            dhdp->tx_big_packets);
903 	bcm_bprintf(strbuf, "\n");
904 #ifdef DMAMAP_STATS
905 	/* Add DMA MAP info */
906 	bcm_bprintf(strbuf, "DMA MAP stats: \n");
907 	bcm_bprintf(strbuf, "txdata: %lu size: %luK, rxdata: %lu size: %luK\n",
908 			dhdp->dma_stats.txdata, KB(dhdp->dma_stats.txdata_sz),
909 			dhdp->dma_stats.rxdata, KB(dhdp->dma_stats.rxdata_sz));
910 #ifndef IOCTLRESP_USE_CONSTMEM
911 	bcm_bprintf(strbuf, "IOCTL RX: %lu size: %luK ,",
912 			dhdp->dma_stats.ioctl_rx, KB(dhdp->dma_stats.ioctl_rx_sz));
913 #endif /* !IOCTLRESP_USE_CONSTMEM */
914 	bcm_bprintf(strbuf, "EVENT RX: %lu size: %luK, INFO RX: %lu size: %luK, "
915 			"TSBUF RX: %lu size %luK\n",
916 			dhdp->dma_stats.event_rx, KB(dhdp->dma_stats.event_rx_sz),
917 			dhdp->dma_stats.info_rx, KB(dhdp->dma_stats.info_rx_sz),
918 			dhdp->dma_stats.tsbuf_rx, KB(dhdp->dma_stats.tsbuf_rx_sz));
919 	bcm_bprintf(strbuf, "Total : %luK \n",
920 			KB(dhdp->dma_stats.txdata_sz + dhdp->dma_stats.rxdata_sz +
921 			dhdp->dma_stats.ioctl_rx_sz + dhdp->dma_stats.event_rx_sz +
922 			dhdp->dma_stats.tsbuf_rx_sz));
923 #endif /* DMAMAP_STATS */
924 	bcm_bprintf(strbuf, "dhd_induce_error : %u\n", dhdp->dhd_induce_error);
925 	/* Add any prot info */
926 	dhd_prot_dump(dhdp, strbuf);
927 	bcm_bprintf(strbuf, "\n");
928 
929 	/* Add any bus info */
930 	dhd_bus_dump(dhdp, strbuf);
931 
932 #if defined(DHD_LB_STATS)
933 	dhd_lb_stats_dump(dhdp, strbuf);
934 #endif /* DHD_LB_STATS */
935 #ifdef DHD_WET
936 	if (dhd_get_wet_mode(dhdp)) {
937 		bcm_bprintf(strbuf, "Wet Dump:\n");
938 		dhd_wet_dump(dhdp, strbuf);
939 		}
940 #endif /* DHD_WET */
941 
942 	/* return remaining buffer length */
943 	return (!strbuf->size ? BCME_BUFTOOSHORT : strbuf->size);
944 }
945 
946 void
dhd_dump_to_kernelog(dhd_pub_t * dhdp)947 dhd_dump_to_kernelog(dhd_pub_t *dhdp)
948 {
949 	char buf[512];
950 
951 	DHD_ERROR(("F/W version: %s\n", fw_version));
952 	bcm_bprintf_bypass = TRUE;
953 	dhd_dump(dhdp, buf, sizeof(buf));
954 	bcm_bprintf_bypass = FALSE;
955 }
956 
957 int
dhd_wl_ioctl_cmd(dhd_pub_t * dhd_pub,int cmd,void * arg,int len,uint8 set,int ifidx)958 dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, int ifidx)
959 {
960 	wl_ioctl_t ioc;
961 
962 	ioc.cmd = cmd;
963 	ioc.buf = arg;
964 	ioc.len = len;
965 	ioc.set = set;
966 
967 	return dhd_wl_ioctl(dhd_pub, ifidx, &ioc, arg, len);
968 }
969 
970 int
dhd_wl_ioctl_get_intiovar(dhd_pub_t * dhd_pub,char * name,uint * pval,int cmd,uint8 set,int ifidx)971 dhd_wl_ioctl_get_intiovar(dhd_pub_t *dhd_pub, char *name, uint *pval,
972 	int cmd, uint8 set, int ifidx)
973 {
974 	char iovbuf[WLC_IOCTL_SMLEN];
975 	int ret = -1;
976 
977 	memset(iovbuf, 0, sizeof(iovbuf));
978 	if (bcm_mkiovar(name, NULL, 0, iovbuf, sizeof(iovbuf))) {
979 		ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, sizeof(iovbuf), set, ifidx);
980 		if (!ret) {
981 			*pval = ltoh32(*((uint*)iovbuf));
982 		} else {
983 			DHD_ERROR(("%s: get int iovar %s failed, ERR %d\n",
984 				__FUNCTION__, name, ret));
985 		}
986 	} else {
987 		DHD_ERROR(("%s: mkiovar %s failed\n",
988 			__FUNCTION__, name));
989 	}
990 
991 	return ret;
992 }
993 
994 int
dhd_wl_ioctl_set_intiovar(dhd_pub_t * dhd_pub,char * name,uint val,int cmd,uint8 set,int ifidx)995 dhd_wl_ioctl_set_intiovar(dhd_pub_t *dhd_pub, char *name, uint val,
996 	int cmd, uint8 set, int ifidx)
997 {
998 	char iovbuf[WLC_IOCTL_SMLEN];
999 	int ret = -1;
1000 	int lval = htol32(val);
1001 	uint len;
1002 
1003 	len = bcm_mkiovar(name, (char*)&lval, sizeof(lval), iovbuf, sizeof(iovbuf));
1004 
1005 	if (len) {
1006 		ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, len, set, ifidx);
1007 		if (ret) {
1008 			DHD_ERROR(("%s: set int iovar %s failed, ERR %d\n",
1009 				__FUNCTION__, name, ret));
1010 		}
1011 	} else {
1012 		DHD_ERROR(("%s: mkiovar %s failed\n",
1013 			__FUNCTION__, name));
1014 	}
1015 
1016 	return ret;
1017 }
1018 
1019 static struct ioctl2str_s {
1020 	uint32 ioctl;
1021 	char *name;
1022 } ioctl2str_array[] = {
1023 	{WLC_UP, "UP"},
1024 	{WLC_DOWN, "DOWN"},
1025 	{WLC_SET_PROMISC, "SET_PROMISC"},
1026 	{WLC_SET_INFRA, "SET_INFRA"},
1027 	{WLC_SET_AUTH, "SET_AUTH"},
1028 	{WLC_SET_SSID, "SET_SSID"},
1029 	{WLC_RESTART, "RESTART"},
1030 	{WLC_SET_CHANNEL, "SET_CHANNEL"},
1031 	{WLC_SET_RATE_PARAMS, "SET_RATE_PARAMS"},
1032 	{WLC_SET_KEY, "SET_KEY"},
1033 	{WLC_SCAN, "SCAN"},
1034 	{WLC_DISASSOC, "DISASSOC"},
1035 	{WLC_REASSOC, "REASSOC"},
1036 	{WLC_SET_COUNTRY, "SET_COUNTRY"},
1037 	{WLC_SET_WAKE, "SET_WAKE"},
1038 	{WLC_SET_SCANSUPPRESS, "SET_SCANSUPPRESS"},
1039 	{WLC_SCB_DEAUTHORIZE, "SCB_DEAUTHORIZE"},
1040 	{WLC_SET_WSEC, "SET_WSEC"},
1041 	{WLC_SET_INTERFERENCE_MODE, "SET_INTERFERENCE_MODE"},
1042 	{WLC_SET_RADAR, "SET_RADAR"},
1043 	{0, NULL}
1044 };
1045 
1046 static char *
ioctl2str(uint32 ioctl)1047 ioctl2str(uint32 ioctl)
1048 {
1049 	struct ioctl2str_s *p = ioctl2str_array;
1050 
1051 	while (p->name != NULL) {
1052 		if (p->ioctl == ioctl) {
1053 			return p->name;
1054 		}
1055 		p++;
1056 	}
1057 
1058 	return "";
1059 }
1060 
1061 /**
1062  * @param ioc          IO control struct, members are partially used by this function.
1063  * @param buf [inout]  Contains parameters to send to dongle, contains dongle response on return.
1064  * @param len          Maximum number of bytes that dongle is allowed to write into 'buf'.
1065  */
1066 int
dhd_wl_ioctl(dhd_pub_t * dhd_pub,int ifidx,wl_ioctl_t * ioc,void * buf,int len)1067 dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifidx, wl_ioctl_t *ioc, void *buf, int len)
1068 {
1069 	int ret = BCME_ERROR;
1070 	unsigned long flags;
1071 #ifdef DUMP_IOCTL_IOV_LIST
1072 	dhd_iov_li_t *iov_li;
1073 #endif /* DUMP_IOCTL_IOV_LIST */
1074 
1075 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1076 	DHD_OS_WAKE_LOCK(dhd_pub);
1077 	if (pm_runtime_get_sync(dhd_bus_to_dev(dhd_pub->bus)) < 0) {
1078 		DHD_RPM(("%s: pm_runtime_get_sync error. \n", __FUNCTION__));
1079 		DHD_OS_WAKE_UNLOCK(dhd_pub);
1080 		return BCME_ERROR;
1081 	}
1082 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1083 
1084 #ifdef KEEPIF_ON_DEVICE_RESET
1085 		if (ioc->cmd == WLC_GET_VAR) {
1086 			dbus_config_t config;
1087 			config.general_param = 0;
1088 			if (buf) {
1089 				if (!strcmp(buf, "wowl_activate")) {
1090 					 /* 1 (TRUE) after decreased by 1 */
1091 					config.general_param = 2;
1092 				} else if (!strcmp(buf, "wowl_clear")) {
1093 					 /* 0 (FALSE) after decreased by 1 */
1094 					config.general_param = 1;
1095 				}
1096 			}
1097 			if (config.general_param) {
1098 				config.config_id = DBUS_CONFIG_ID_KEEPIF_ON_DEVRESET;
1099 				config.general_param--;
1100 				dbus_set_config(dhd_pub->dbus, &config);
1101 			}
1102 		}
1103 #endif /* KEEPIF_ON_DEVICE_RESET */
1104 
1105 	if (dhd_os_proto_block(dhd_pub))
1106 	{
1107 #ifdef DHD_LOG_DUMP
1108 		int slen, val, lval, min_len;
1109 		char *msg, tmp[64];
1110 
1111 		/* WLC_GET_VAR */
1112 		if (ioc->cmd == WLC_GET_VAR && buf) {
1113 			min_len = MIN(sizeof(tmp) - 1, strlen(buf));
1114 			memset(tmp, 0, sizeof(tmp));
1115 			bcopy(buf, tmp, min_len);
1116 			tmp[min_len] = '\0';
1117 		}
1118 #endif /* DHD_LOG_DUMP */
1119 
1120 #ifdef DHD_DISCONNECT_TRACE
1121 		if ((WLC_DISASSOC == ioc->cmd) || (WLC_DOWN == ioc->cmd) ||
1122 			(WLC_DISASSOC_MYAP == ioc->cmd)) {
1123 			DHD_ERROR(("IOCTL Disconnect WiFi: %d\n", ioc->cmd));
1124 		}
1125 #endif /* HW_DISCONNECT_TRACE */
1126 
1127 		/* logging of iovars that are send to the dongle, ./dhd msglevel +iovar */
1128 		if (ioc->set == TRUE) {
1129 			char *pars = (char *)buf; // points at user buffer
1130 			if (ioc->cmd == WLC_SET_VAR && buf) {
1131 				DHD_DNGL_IOVAR_SET(("iovar:%d: set %s", ifidx, pars));
1132 				if (ioc->len > 1 + sizeof(uint32)) {
1133 					// skip iovar name:
1134 					pars += strnlen(pars, ioc->len - 1 - sizeof(uint32));
1135 					pars++;               // skip NULL character
1136 				}
1137 			} else {
1138 				DHD_DNGL_IOVAR_SET(("ioctl:%d: set %d %s",
1139 					ifidx, ioc->cmd, ioctl2str(ioc->cmd)));
1140 			}
1141 			if (pars != NULL) {
1142 				DHD_DNGL_IOVAR_SET((" 0x%x\n", *(uint32*)pars));
1143 			} else {
1144 				DHD_DNGL_IOVAR_SET((" NULL\n"));
1145 			}
1146 		}
1147 
1148 		DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
1149 		if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub)) {
1150 			DHD_INFO(("%s: returning as busstate=%d\n",
1151 				__FUNCTION__, dhd_pub->busstate));
1152 			DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
1153 			dhd_os_proto_unblock(dhd_pub);
1154 			return -ENODEV;
1155 		}
1156 		DHD_BUS_BUSY_SET_IN_IOVAR(dhd_pub);
1157 		DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
1158 
1159 #ifdef DHD_PCIE_RUNTIMEPM
1160 		dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_wl_ioctl);
1161 #endif /* DHD_PCIE_RUNTIMEPM */
1162 
1163 		DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
1164 		if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd_pub)) {
1165 			DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
1166 				__FUNCTION__, dhd_pub->busstate, dhd_pub->dhd_bus_busy_state));
1167 			DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub);
1168 			dhd_os_busbusy_wake(dhd_pub);
1169 			DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
1170 			dhd_os_proto_unblock(dhd_pub);
1171 			return -ENODEV;
1172 		}
1173 		DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
1174 
1175 #ifdef DUMP_IOCTL_IOV_LIST
1176 		if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION && buf) {
1177 			if (!(iov_li = MALLOC(dhd_pub->osh, sizeof(*iov_li)))) {
1178 				DHD_ERROR(("iovar dump list item allocation Failed\n"));
1179 			} else {
1180 				iov_li->cmd = ioc->cmd;
1181 				if (buf)
1182 					bcopy((char *)buf, iov_li->buff, strlen((char *)buf)+1);
1183 				dhd_iov_li_append(dhd_pub, &dhd_pub->dump_iovlist_head,
1184 						&iov_li->list);
1185 			}
1186 		}
1187 #endif /* DUMP_IOCTL_IOV_LIST */
1188 
1189 		ret = dhd_prot_ioctl(dhd_pub, ifidx, ioc, buf, len);
1190 
1191 #ifdef DUMP_IOCTL_IOV_LIST
1192 		if (ret == -ETIMEDOUT) {
1193 			DHD_ERROR(("Last %d issued commands: Latest one is at bottom.\n",
1194 				IOV_LIST_MAX_LEN));
1195 			dhd_iov_li_print(&dhd_pub->dump_iovlist_head);
1196 		}
1197 #endif /* DUMP_IOCTL_IOV_LIST */
1198 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
1199 		if (ret == -ETIMEDOUT) {
1200 			copy_hang_info_ioctl_timeout(dhd_pub, ifidx, ioc);
1201 		}
1202 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
1203 #ifdef DHD_LOG_DUMP
1204 		if ((ioc->cmd == WLC_GET_VAR || ioc->cmd == WLC_SET_VAR) &&
1205 				buf != NULL) {
1206 			if (buf) {
1207 				lval = 0;
1208 				slen = strlen(buf) + 1;
1209 				msg = (char*)buf;
1210 				if (len >= slen + sizeof(lval)) {
1211 					if (ioc->cmd == WLC_GET_VAR) {
1212 						msg = tmp;
1213 						lval = *(int*)buf;
1214 					} else {
1215 						min_len = MIN(ioc->len - slen, sizeof(int));
1216 						bcopy((msg + slen), &lval, min_len);
1217 					}
1218 					if (!strncmp(msg, "cur_etheraddr",
1219 						strlen("cur_etheraddr"))) {
1220 						lval = 0;
1221 					}
1222 				}
1223 				DHD_IOVAR_MEM((
1224 					"%s: cmd: %d, msg: %s val: 0x%x,"
1225 					" len: %d, set: %d, txn-id: %d\n",
1226 					ioc->cmd == WLC_GET_VAR ?
1227 					"WLC_GET_VAR" : "WLC_SET_VAR",
1228 					ioc->cmd, msg, lval, ioc->len, ioc->set,
1229 					dhd_prot_get_ioctl_trans_id(dhd_pub)));
1230 			} else {
1231 				DHD_IOVAR_MEM(("%s: cmd: %d, len: %d, set: %d, txn-id: %d\n",
1232 					ioc->cmd == WLC_GET_VAR ? "WLC_GET_VAR" : "WLC_SET_VAR",
1233 					ioc->cmd, ioc->len, ioc->set,
1234 					dhd_prot_get_ioctl_trans_id(dhd_pub)));
1235 			}
1236 		} else {
1237 			slen = ioc->len;
1238 			if (buf != NULL && slen != 0) {
1239 				if (slen >= 4) {
1240 					val = *(int*)buf;
1241 				} else if (slen >= 2) {
1242 					val = *(short*)buf;
1243 				} else {
1244 					val = *(char*)buf;
1245 				}
1246 				/* Do not dump for WLC_GET_MAGIC and WLC_GET_VERSION */
1247 				if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION)
1248 					DHD_IOVAR_MEM(("WLC_IOCTL: cmd: %d, val: %d, len: %d, "
1249 						"set: %d\n", ioc->cmd, val, ioc->len, ioc->set));
1250 			} else {
1251 				DHD_IOVAR_MEM(("WLC_IOCTL: cmd: %d, buf is NULL\n", ioc->cmd));
1252 			}
1253 		}
1254 #endif /* DHD_LOG_DUMP */
1255 #if defined(OEM_ANDROID)
1256 		if (ret && dhd_pub->up) {
1257 			/* Send hang event only if dhd_open() was success */
1258 			dhd_os_check_hang(dhd_pub, ifidx, ret);
1259 		}
1260 
1261 		if (ret == -ETIMEDOUT && !dhd_pub->up) {
1262 			DHD_ERROR(("%s: 'resumed on timeout' error is "
1263 				"occurred before the interface does not"
1264 				" bring up\n", __FUNCTION__));
1265 		}
1266 #endif /* defined(OEM_ANDROID) */
1267 
1268 		DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
1269 		DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub);
1270 		dhd_os_busbusy_wake(dhd_pub);
1271 		DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
1272 
1273 		dhd_os_proto_unblock(dhd_pub);
1274 
1275 	}
1276 
1277 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1278 	pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd_pub->bus));
1279 	pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd_pub->bus));
1280 
1281 	DHD_OS_WAKE_UNLOCK(dhd_pub);
1282 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1283 
1284 	return ret;
1285 }
1286 
wl_get_port_num(wl_io_pport_t * io_pport)1287 uint wl_get_port_num(wl_io_pport_t *io_pport)
1288 {
1289 	return 0;
1290 }
1291 
1292 /* Get bssidx from iovar params
1293  * Input:   dhd_pub - pointer to dhd_pub_t
1294  *	    params  - IOVAR params
1295  * Output:  idx	    - BSS index
1296  *	    val	    - ponter to the IOVAR arguments
1297  */
1298 static int
dhd_iovar_parse_bssidx(dhd_pub_t * dhd_pub,const char * params,uint32 * idx,const char ** val)1299 dhd_iovar_parse_bssidx(dhd_pub_t *dhd_pub, const char *params, uint32 *idx, const char **val)
1300 {
1301 	char *prefix = "bsscfg:";
1302 	uint32	bssidx;
1303 
1304 	if (!(strncmp(params, prefix, strlen(prefix)))) {
1305 		/* per bss setting should be prefixed with 'bsscfg:' */
1306 		const char *p = params + strlen(prefix);
1307 
1308 		/* Skip Name */
1309 		while (*p != '\0')
1310 			p++;
1311 		/* consider null */
1312 		p = p + 1;
1313 		bcopy(p, &bssidx, sizeof(uint32));
1314 		/* Get corresponding dhd index */
1315 		bssidx = dhd_bssidx2idx(dhd_pub, htod32(bssidx));
1316 
1317 		if (bssidx >= DHD_MAX_IFS) {
1318 			DHD_ERROR(("%s Wrong bssidx provided\n", __FUNCTION__));
1319 			return BCME_ERROR;
1320 		}
1321 
1322 		/* skip bss idx */
1323 		p += sizeof(uint32);
1324 		*val = p;
1325 		*idx = bssidx;
1326 	} else {
1327 		DHD_ERROR(("%s: bad parameter for per bss iovar\n", __FUNCTION__));
1328 		return BCME_ERROR;
1329 	}
1330 
1331 	return BCME_OK;
1332 }
1333 
1334 #if defined(DHD_DEBUG) && defined(BCMDHDUSB)
1335 /* USB Device console input function */
dhd_bus_console_in(dhd_pub_t * dhd,uchar * msg,uint msglen)1336 int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
1337 {
1338 	DHD_TRACE(("%s \n", __FUNCTION__));
1339 
1340 	return dhd_iovar(dhd, 0, "cons", msg, msglen, NULL, 0, TRUE);
1341 
1342 }
1343 #endif /* DHD_DEBUG && BCMDHDUSB  */
1344 
1345 #ifdef DHD_DEBUG
1346 int
dhd_mem_debug(dhd_pub_t * dhd,uchar * msg,uint msglen)1347 dhd_mem_debug(dhd_pub_t *dhd, uchar *msg, uint msglen)
1348 {
1349 	unsigned long int_arg = 0;
1350 	char *p;
1351 	char *end_ptr = NULL;
1352 	dhd_dbg_mwli_t *mw_li;
1353 	dll_t *item, *next;
1354 	/* check if mwalloc, mwquery or mwfree was supplied arguement with space */
1355 	p = bcmstrstr((char *)msg, " ");
1356 	if (p != NULL) {
1357 		/* space should be converted to null as separation flag for firmware */
1358 		*p = '\0';
1359 		/* store the argument in int_arg */
1360 		int_arg = bcm_strtoul(p+1, &end_ptr, 10);
1361 	}
1362 
1363 	if (!p && !strcmp(msg, "query")) {
1364 		/* lets query the list inetrnally */
1365 		if (dll_empty(dll_head_p(&dhd->mw_list_head))) {
1366 			DHD_ERROR(("memwaste list is empty, call mwalloc < size > to allocate\n"));
1367 		} else {
1368 			for (item = dll_head_p(&dhd->mw_list_head);
1369 					!dll_end(&dhd->mw_list_head, item); item = next) {
1370 				next = dll_next_p(item);
1371 				mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list);
1372 				DHD_ERROR(("item: <id=%d, size=%d>\n", mw_li->id, mw_li->size));
1373 			}
1374 		}
1375 	} else if (p && end_ptr && (*end_ptr == '\0') && !strcmp(msg, "alloc")) {
1376 		int32 alloc_handle;
1377 		/* convert size into KB and append as integer */
1378 		*((int32 *)(p+1)) = int_arg*1024;
1379 		*(p+1+sizeof(int32)) = '\0';
1380 
1381 		/* recalculated length -> 5 bytes for "alloc" + 4 bytes for size +
1382 		 * 1 bytes for null caracter
1383 		 */
1384 		msglen = strlen(msg) + sizeof(int32) + 1;
1385 		if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, msg, msglen+1, FALSE, 0) < 0) {
1386 			DHD_ERROR(("IOCTL failed for memdebug alloc\n"));
1387 		}
1388 
1389 		/* returned allocated handle from dongle, basically address of the allocated unit */
1390 		alloc_handle = *((int32 *)msg);
1391 
1392 		/* add a node in the list with tuple <id, handle, size> */
1393 		if (alloc_handle == 0) {
1394 			DHD_ERROR(("Reuqested size could not be allocated\n"));
1395 		} else if (!(mw_li = MALLOC(dhd->osh, sizeof(*mw_li)))) {
1396 			DHD_ERROR(("mw list item allocation Failed\n"));
1397 		} else {
1398 			mw_li->id = dhd->mw_id++;
1399 			mw_li->handle = alloc_handle;
1400 			mw_li->size = int_arg;
1401 			/* append the node in the list */
1402 			dll_append(&dhd->mw_list_head, &mw_li->list);
1403 		}
1404 	} else if (p && end_ptr && (*end_ptr == '\0') && !strcmp(msg, "free")) {
1405 		/* inform dongle to free wasted chunk */
1406 		int handle = 0;
1407 		int size = 0;
1408 		for (item = dll_head_p(&dhd->mw_list_head);
1409 				!dll_end(&dhd->mw_list_head, item); item = next) {
1410 			next = dll_next_p(item);
1411 			mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list);
1412 
1413 			if (mw_li->id == (int)int_arg) {
1414 				handle = mw_li->handle;
1415 				size = mw_li->size;
1416 				dll_delete(item);
1417 				MFREE(dhd->osh, mw_li, sizeof(*mw_li));
1418 				if (dll_empty(dll_head_p(&dhd->mw_list_head))) {
1419 					/* reset the id */
1420 					dhd->mw_id = 0;
1421 				}
1422 			}
1423 		}
1424 		if (handle) {
1425 			int len;
1426 			/* append the free handle and the chunk size in first 8 bytes
1427 			 * after the command and null character
1428 			 */
1429 			*((int32 *)(p+1)) = handle;
1430 			*((int32 *)((p+1)+sizeof(int32))) = size;
1431 			/* append null as terminator */
1432 			*(p+1+2*sizeof(int32)) = '\0';
1433 			/* recalculated length -> 4 bytes for "free" + 8 bytes for hadnle and size
1434 			 * + 1 bytes for null caracter
1435 			 */
1436 			len = strlen(msg) + 2*sizeof(int32) + 1;
1437 			/* send iovar to free the chunk */
1438 			if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, msg, len, FALSE, 0) < 0) {
1439 				DHD_ERROR(("IOCTL failed for memdebug free\n"));
1440 			}
1441 		} else {
1442 			DHD_ERROR(("specified id does not exist\n"));
1443 		}
1444 	} else {
1445 		/* for all the wrong argument formats */
1446 		return BCME_BADARG;
1447 	}
1448 	return 0;
1449 }
1450 extern void
dhd_mw_list_delete(dhd_pub_t * dhd,dll_t * list_head)1451 dhd_mw_list_delete(dhd_pub_t *dhd, dll_t *list_head)
1452 {
1453 	dll_t *item;
1454 	dhd_dbg_mwli_t *mw_li;
1455 	while (!(dll_empty(list_head))) {
1456 		item = dll_head_p(list_head);
1457 		mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list);
1458 		dll_delete(item);
1459 		MFREE(dhd->osh, mw_li, sizeof(*mw_li));
1460 	}
1461 }
1462 #ifdef BCMPCIE
1463 int
dhd_flow_ring_debug(dhd_pub_t * dhd,char * msg,uint msglen)1464 dhd_flow_ring_debug(dhd_pub_t *dhd, char *msg, uint msglen)
1465 {
1466 	flow_ring_table_t *flow_ring_table;
1467 	char *cmd;
1468 	char *end_ptr = NULL;
1469 	uint8 prio;
1470 	uint16 flowid;
1471 	int i;
1472 	int ret = 0;
1473 	cmd = bcmstrstr(msg, " ");
1474 	BCM_REFERENCE(prio);
1475 	if (cmd != NULL) {
1476 		/* in order to use string operations append null */
1477 		*cmd = '\0';
1478 	} else {
1479 		DHD_ERROR(("missing: create/delete args\n"));
1480 		return BCME_ERROR;
1481 	}
1482 	if (cmd && !strcmp(msg, "create")) {
1483 		/* extract <"source address", "destination address", "priority"> */
1484 		uint8 sa[ETHER_ADDR_LEN], da[ETHER_ADDR_LEN];
1485 		BCM_REFERENCE(sa);
1486 		BCM_REFERENCE(da);
1487 		msg = msg + strlen("create") + 1;
1488 		/* fill ethernet source address */
1489 		for (i = 0; i < ETHER_ADDR_LEN; i++) {
1490 			sa[i] = (uint8)bcm_strtoul(msg, &end_ptr, 16);
1491 			if (*end_ptr == ':') {
1492 				msg = (end_ptr + 1);
1493 			} else if (i != 5) {
1494 				DHD_ERROR(("not a valid source mac addr\n"));
1495 				return BCME_ERROR;
1496 			}
1497 		}
1498 		if (*end_ptr != ' ') {
1499 			DHD_ERROR(("missing: destiantion mac id\n"));
1500 			return BCME_ERROR;
1501 		} else {
1502 			/* skip space */
1503 			msg = end_ptr + 1;
1504 		}
1505 		/* fill ethernet destination address */
1506 		for (i = 0; i < ETHER_ADDR_LEN; i++) {
1507 			da[i] = (uint8)bcm_strtoul(msg, &end_ptr, 16);
1508 			if (*end_ptr == ':') {
1509 				msg = (end_ptr + 1);
1510 			} else if (i != 5) {
1511 				DHD_ERROR(("not a valid destination  mac addr\n"));
1512 				return BCME_ERROR;
1513 			}
1514 		}
1515 		if (*end_ptr != ' ') {
1516 			DHD_ERROR(("missing: priority\n"));
1517 			return BCME_ERROR;
1518 		} else {
1519 			msg = end_ptr + 1;
1520 		}
1521 		/* parse priority */
1522 		prio = (uint8)bcm_strtoul(msg, &end_ptr, 10);
1523 		if (prio > MAXPRIO) {
1524 			DHD_ERROR(("%s: invalid priority. Must be between 0-7 inclusive\n",
1525 				__FUNCTION__));
1526 			return BCME_ERROR;
1527 		}
1528 
1529 		if (*end_ptr != '\0') {
1530 			DHD_ERROR(("msg not truncated with NULL character\n"));
1531 			return BCME_ERROR;
1532 		}
1533 		ret = dhd_flowid_debug_create(dhd, 0, prio, (char *)sa, (char *)da, &flowid);
1534 		if (ret != BCME_OK) {
1535 			DHD_ERROR(("%s: flowring creation failed ret: %d\n", __FUNCTION__, ret));
1536 			return BCME_ERROR;
1537 		}
1538 		return BCME_OK;
1539 
1540 	} else if (cmd && !strcmp(msg, "delete")) {
1541 		msg = msg + strlen("delete") + 1;
1542 		/* parse flowid */
1543 		flowid = (uint16)bcm_strtoul(msg, &end_ptr, 10);
1544 		if (*end_ptr != '\0') {
1545 			DHD_ERROR(("msg not truncated with NULL character\n"));
1546 			return BCME_ERROR;
1547 		}
1548 
1549 		/* Find flowid from ifidx 0 since this IOVAR creating flowring with ifidx 0 */
1550 		if (dhd_flowid_find_by_ifidx(dhd, 0, flowid) != BCME_OK)
1551 		{
1552 			DHD_ERROR(("%s : Deleting not created flowid: %u\n", __FUNCTION__, flowid));
1553 			return BCME_ERROR;
1554 		}
1555 
1556 		flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
1557 		ret = dhd_bus_flow_ring_delete_request(dhd->bus, (void *)&flow_ring_table[flowid]);
1558 		if (ret != BCME_OK) {
1559 			DHD_ERROR(("%s: flowring deletion failed ret: %d\n", __FUNCTION__, ret));
1560 			return BCME_ERROR;
1561 		}
1562 		return BCME_OK;
1563 	}
1564 	DHD_ERROR(("%s: neither create nor delete\n", __FUNCTION__));
1565 	return BCME_ERROR;
1566 }
1567 #endif /* BCMPCIE */
1568 #endif /* DHD_DEBUG */
1569 
1570 static int
dhd_doiovar(dhd_pub_t * dhd_pub,const bcm_iovar_t * vi,uint32 actionid,const char * name,void * params,int plen,void * arg,int len,int val_size)1571 dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const char *name,
1572             void *params, int plen, void *arg, int len, int val_size)
1573 {
1574 	int bcmerror = 0;
1575 	int32 int_val = 0;
1576 	uint32 dhd_ver_len, bus_api_rev_len;
1577 
1578 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1579 	DHD_TRACE(("%s: actionid = %d; name %s\n", __FUNCTION__, actionid, name));
1580 
1581 	if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
1582 		goto exit;
1583 
1584 	if (plen >= (int)sizeof(int_val))
1585 		bcopy(params, &int_val, sizeof(int_val));
1586 
1587 	switch (actionid) {
1588 	case IOV_GVAL(IOV_VERSION):
1589 		/* Need to have checked buffer length */
1590 		dhd_ver_len = strlen(dhd_version);
1591 		bus_api_rev_len = strlen(bus_api_revision);
1592 		if (dhd_ver_len)
1593 			bcm_strncpy_s((char*)arg, dhd_ver_len, dhd_version, dhd_ver_len);
1594 		if (bus_api_rev_len)
1595 			bcm_strncat_s((char*)arg + dhd_ver_len, bus_api_rev_len, bus_api_revision,
1596 				bus_api_rev_len);
1597 		break;
1598 
1599 	case IOV_GVAL(IOV_MSGLEVEL):
1600 		int_val = (int32)dhd_msg_level;
1601 		bcopy(&int_val, arg, val_size);
1602 		break;
1603 
1604 	case IOV_SVAL(IOV_MSGLEVEL):
1605 #ifdef WL_CFG80211
1606 		/* Enable DHD and WL logs in oneshot */
1607 		if (int_val & DHD_WL_VAL2)
1608 			wl_cfg80211_enable_trace(TRUE, int_val & (~DHD_WL_VAL2));
1609 		else if (int_val & DHD_WL_VAL)
1610 			wl_cfg80211_enable_trace(FALSE, WL_DBG_DBG);
1611 		if (!(int_val & DHD_WL_VAL2))
1612 #endif /* WL_CFG80211 */
1613 		dhd_msg_level = int_val;
1614 		break;
1615 	case IOV_GVAL(IOV_BCMERRORSTR):
1616 		bcm_strncpy_s((char *)arg, len, bcmerrorstr(dhd_pub->bcmerror), BCME_STRLEN);
1617 		((char *)arg)[BCME_STRLEN - 1] = 0x00;
1618 		break;
1619 
1620 	case IOV_GVAL(IOV_BCMERROR):
1621 		int_val = (int32)dhd_pub->bcmerror;
1622 		bcopy(&int_val, arg, val_size);
1623 		break;
1624 
1625 	case IOV_GVAL(IOV_WDTICK):
1626 		int_val = (int32)dhd_watchdog_ms;
1627 		bcopy(&int_val, arg, val_size);
1628 		break;
1629 
1630 	case IOV_SVAL(IOV_WDTICK):
1631 		if (!dhd_pub->up) {
1632 			bcmerror = BCME_NOTUP;
1633 			break;
1634 		}
1635 
1636 		dhd_watchdog_ms = (uint)int_val;
1637 
1638 		dhd_os_wd_timer(dhd_pub, (uint)int_val);
1639 		break;
1640 
1641 	case IOV_GVAL(IOV_DUMP):
1642 		if (dhd_dump(dhd_pub, arg, len) <= 0)
1643 			bcmerror = BCME_ERROR;
1644 		else
1645 			bcmerror = BCME_OK;
1646 		break;
1647 
1648 	case IOV_GVAL(IOV_DCONSOLE_POLL):
1649 		int_val = (int32)dhd_pub->dhd_console_ms;
1650 		bcopy(&int_val, arg, val_size);
1651 		break;
1652 
1653 	case IOV_SVAL(IOV_DCONSOLE_POLL):
1654 		dhd_pub->dhd_console_ms = (uint)int_val;
1655 		break;
1656 
1657 #if defined(DHD_DEBUG)
1658 	case IOV_SVAL(IOV_CONS):
1659 		if (len > 0)
1660 			bcmerror = dhd_bus_console_in(dhd_pub, arg, len - 1);
1661 		break;
1662 #endif /* DHD_DEBUG */
1663 
1664 	case IOV_SVAL(IOV_CLEARCOUNTS):
1665 		dhd_pub->tx_packets = dhd_pub->rx_packets = 0;
1666 		dhd_pub->tx_errors = dhd_pub->rx_errors = 0;
1667 		dhd_pub->tx_ctlpkts = dhd_pub->rx_ctlpkts = 0;
1668 		dhd_pub->tx_ctlerrs = dhd_pub->rx_ctlerrs = 0;
1669 		dhd_pub->tx_dropped = 0;
1670 		dhd_pub->rx_dropped = 0;
1671 		dhd_pub->tx_pktgetfail = 0;
1672 		dhd_pub->rx_pktgetfail = 0;
1673 		dhd_pub->rx_readahead_cnt = 0;
1674 		dhd_pub->tx_realloc = 0;
1675 		dhd_pub->wd_dpc_sched = 0;
1676 		dhd_pub->tx_big_packets = 0;
1677 		memset(&dhd_pub->dstats, 0, sizeof(dhd_pub->dstats));
1678 		dhd_bus_clearcounts(dhd_pub);
1679 #ifdef PROP_TXSTATUS
1680 		/* clear proptxstatus related counters */
1681 		dhd_wlfc_clear_counts(dhd_pub);
1682 #endif /* PROP_TXSTATUS */
1683 #if defined(DHD_LB_STATS)
1684 		DHD_LB_STATS_RESET(dhd_pub);
1685 #endif /* DHD_LB_STATS */
1686 		break;
1687 
1688 	case IOV_GVAL(IOV_IOCTLTIMEOUT): {
1689 		int_val = (int32)dhd_os_get_ioctl_resp_timeout();
1690 		bcopy(&int_val, arg, sizeof(int_val));
1691 		break;
1692 	}
1693 
1694 	case IOV_SVAL(IOV_IOCTLTIMEOUT): {
1695 		if (int_val <= 0)
1696 			bcmerror = BCME_BADARG;
1697 		else
1698 			dhd_os_set_ioctl_resp_timeout((unsigned int)int_val);
1699 		break;
1700 	}
1701 
1702 #ifdef PROP_TXSTATUS
1703 	case IOV_GVAL(IOV_PROPTXSTATUS_ENABLE): {
1704 		bool wlfc_enab = FALSE;
1705 		bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab);
1706 		if (bcmerror != BCME_OK)
1707 			goto exit;
1708 		int_val = wlfc_enab ? 1 : 0;
1709 		bcopy(&int_val, arg, val_size);
1710 		break;
1711 	}
1712 	case IOV_SVAL(IOV_PROPTXSTATUS_ENABLE): {
1713 		bool wlfc_enab = FALSE;
1714 		bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab);
1715 		if (bcmerror != BCME_OK)
1716 			goto exit;
1717 
1718 		/* wlfc is already set as desired */
1719 		if (wlfc_enab == (int_val == 0 ? FALSE : TRUE))
1720 			goto exit;
1721 
1722 		if (int_val == TRUE)
1723 			bcmerror = dhd_wlfc_init(dhd_pub);
1724 		else
1725 			bcmerror = dhd_wlfc_deinit(dhd_pub);
1726 
1727 		break;
1728 	}
1729 	case IOV_GVAL(IOV_PROPTXSTATUS_MODE):
1730 		bcmerror = dhd_wlfc_get_mode(dhd_pub, &int_val);
1731 		if (bcmerror != BCME_OK)
1732 			goto exit;
1733 		bcopy(&int_val, arg, val_size);
1734 		break;
1735 
1736 	case IOV_SVAL(IOV_PROPTXSTATUS_MODE):
1737 		dhd_wlfc_set_mode(dhd_pub, int_val);
1738 		break;
1739 
1740 	case IOV_GVAL(IOV_PROPTXSTATUS_MODULE_IGNORE):
1741 		bcmerror = dhd_wlfc_get_module_ignore(dhd_pub, &int_val);
1742 		if (bcmerror != BCME_OK)
1743 			goto exit;
1744 		bcopy(&int_val, arg, val_size);
1745 		break;
1746 
1747 	case IOV_SVAL(IOV_PROPTXSTATUS_MODULE_IGNORE):
1748 		dhd_wlfc_set_module_ignore(dhd_pub, int_val);
1749 		break;
1750 
1751 	case IOV_GVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE):
1752 		bcmerror = dhd_wlfc_get_credit_ignore(dhd_pub, &int_val);
1753 		if (bcmerror != BCME_OK)
1754 			goto exit;
1755 		bcopy(&int_val, arg, val_size);
1756 		break;
1757 
1758 	case IOV_SVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE):
1759 		dhd_wlfc_set_credit_ignore(dhd_pub, int_val);
1760 		break;
1761 
1762 	case IOV_GVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE):
1763 		bcmerror = dhd_wlfc_get_txstatus_ignore(dhd_pub, &int_val);
1764 		if (bcmerror != BCME_OK)
1765 			goto exit;
1766 		bcopy(&int_val, arg, val_size);
1767 		break;
1768 
1769 	case IOV_SVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE):
1770 		dhd_wlfc_set_txstatus_ignore(dhd_pub, int_val);
1771 		break;
1772 
1773 	case IOV_GVAL(IOV_PROPTXSTATUS_RXPKT_CHK):
1774 		bcmerror = dhd_wlfc_get_rxpkt_chk(dhd_pub, &int_val);
1775 		if (bcmerror != BCME_OK)
1776 			goto exit;
1777 		bcopy(&int_val, arg, val_size);
1778 		break;
1779 
1780 	case IOV_SVAL(IOV_PROPTXSTATUS_RXPKT_CHK):
1781 		dhd_wlfc_set_rxpkt_chk(dhd_pub, int_val);
1782 		break;
1783 
1784 #endif /* PROP_TXSTATUS */
1785 
1786 	case IOV_GVAL(IOV_BUS_TYPE):
1787 		/* The dhd application queries the driver to check if its usb or sdio.  */
1788 #ifdef BCMDHDUSB
1789 		int_val = BUS_TYPE_USB;
1790 #endif // endif
1791 #ifdef BCMSDIO
1792 		int_val = BUS_TYPE_SDIO;
1793 #endif // endif
1794 #ifdef PCIE_FULL_DONGLE
1795 		int_val = BUS_TYPE_PCIE;
1796 #endif // endif
1797 		bcopy(&int_val, arg, val_size);
1798 		break;
1799 
1800 	case IOV_SVAL(IOV_CHANGEMTU):
1801 		int_val &= 0xffff;
1802 		bcmerror = dhd_change_mtu(dhd_pub, int_val, 0);
1803 		break;
1804 
1805 	case IOV_GVAL(IOV_HOSTREORDER_FLOWS):
1806 	{
1807 		uint i = 0;
1808 		uint8 *ptr = (uint8 *)arg;
1809 		uint8 count = 0;
1810 
1811 		ptr++;
1812 		for (i = 0; i < WLHOST_REORDERDATA_MAXFLOWS; i++) {
1813 			if (dhd_pub->reorder_bufs[i] != NULL) {
1814 				*ptr = dhd_pub->reorder_bufs[i]->flow_id;
1815 				ptr++;
1816 				count++;
1817 			}
1818 		}
1819 		ptr = (uint8 *)arg;
1820 		*ptr = count;
1821 		break;
1822 	}
1823 #ifdef DHDTCPACK_SUPPRESS
1824 	case IOV_GVAL(IOV_TCPACK_SUPPRESS): {
1825 		int_val = (uint32)dhd_pub->tcpack_sup_mode;
1826 		bcopy(&int_val, arg, val_size);
1827 		break;
1828 	}
1829 	case IOV_SVAL(IOV_TCPACK_SUPPRESS): {
1830 		bcmerror = dhd_tcpack_suppress_set(dhd_pub, (uint8)int_val);
1831 		break;
1832 	}
1833 #endif /* DHDTCPACK_SUPPRESS */
1834 
1835 #ifdef DHD_L2_FILTER
1836 	case IOV_GVAL(IOV_DHCP_UNICAST): {
1837 		uint32 bssidx;
1838 		const char *val;
1839 		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1840 			DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n",
1841 				__FUNCTION__, name));
1842 			bcmerror = BCME_BADARG;
1843 			break;
1844 		}
1845 		int_val = dhd_get_dhcp_unicast_status(dhd_pub, bssidx);
1846 		memcpy(arg, &int_val, val_size);
1847 		break;
1848 	}
1849 	case IOV_SVAL(IOV_DHCP_UNICAST): {
1850 		uint32	bssidx;
1851 		const char *val;
1852 		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1853 			DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n",
1854 				__FUNCTION__, name));
1855 			bcmerror = BCME_BADARG;
1856 			break;
1857 		}
1858 		memcpy(&int_val, val, sizeof(int_val));
1859 		bcmerror = dhd_set_dhcp_unicast_status(dhd_pub, bssidx, int_val ? 1 : 0);
1860 		break;
1861 	}
1862 	case IOV_GVAL(IOV_BLOCK_PING): {
1863 		uint32 bssidx;
1864 		const char *val;
1865 
1866 		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1867 			DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__));
1868 			bcmerror = BCME_BADARG;
1869 			break;
1870 		}
1871 		int_val = dhd_get_block_ping_status(dhd_pub, bssidx);
1872 		memcpy(arg, &int_val, val_size);
1873 		break;
1874 	}
1875 	case IOV_SVAL(IOV_BLOCK_PING): {
1876 		uint32	bssidx;
1877 		const char *val;
1878 
1879 		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1880 			DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__));
1881 			bcmerror = BCME_BADARG;
1882 			break;
1883 		}
1884 		memcpy(&int_val, val, sizeof(int_val));
1885 		bcmerror = dhd_set_block_ping_status(dhd_pub, bssidx, int_val ? 1 : 0);
1886 		break;
1887 	}
1888 	case IOV_GVAL(IOV_PROXY_ARP): {
1889 		uint32	bssidx;
1890 		const char *val;
1891 
1892 		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1893 			DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__));
1894 			bcmerror = BCME_BADARG;
1895 			break;
1896 		}
1897 		int_val = dhd_get_parp_status(dhd_pub, bssidx);
1898 		bcopy(&int_val, arg, val_size);
1899 		break;
1900 	}
1901 	case IOV_SVAL(IOV_PROXY_ARP): {
1902 		uint32	bssidx;
1903 		const char *val;
1904 
1905 		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1906 			DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__));
1907 			bcmerror = BCME_BADARG;
1908 			break;
1909 		}
1910 		bcopy(val, &int_val, sizeof(int_val));
1911 
1912 		/* Issue a iovar request to WL to update the proxy arp capability bit
1913 		 * in the Extended Capability IE of beacons/probe responses.
1914 		 */
1915 		bcmerror = dhd_iovar(dhd_pub, bssidx, "proxy_arp_advertise", val, sizeof(int_val),
1916 				NULL, 0, TRUE);
1917 		if (bcmerror == BCME_OK) {
1918 			dhd_set_parp_status(dhd_pub, bssidx, int_val ? 1 : 0);
1919 		}
1920 		break;
1921 	}
1922 	case IOV_GVAL(IOV_GRAT_ARP): {
1923 		uint32 bssidx;
1924 		const char *val;
1925 
1926 		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1927 			DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__));
1928 			bcmerror = BCME_BADARG;
1929 			break;
1930 		}
1931 		int_val = dhd_get_grat_arp_status(dhd_pub, bssidx);
1932 		memcpy(arg, &int_val, val_size);
1933 		break;
1934 	}
1935 	case IOV_SVAL(IOV_GRAT_ARP): {
1936 		uint32	bssidx;
1937 		const char *val;
1938 
1939 		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1940 			DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__));
1941 			bcmerror = BCME_BADARG;
1942 			break;
1943 		}
1944 		memcpy(&int_val, val, sizeof(int_val));
1945 		bcmerror = dhd_set_grat_arp_status(dhd_pub, bssidx, int_val ? 1 : 0);
1946 		break;
1947 	}
1948 	case IOV_GVAL(IOV_BLOCK_TDLS): {
1949 		uint32 bssidx;
1950 		const char *val;
1951 
1952 		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1953 			DHD_ERROR(("%s: IOV_BLOCK_TDLS: bad parameter\n", __FUNCTION__));
1954 			bcmerror = BCME_BADARG;
1955 			break;
1956 		}
1957 		int_val = dhd_get_block_tdls_status(dhd_pub, bssidx);
1958 		memcpy(arg, &int_val, val_size);
1959 		break;
1960 	}
1961 	case IOV_SVAL(IOV_BLOCK_TDLS): {
1962 		uint32	bssidx;
1963 		const char *val;
1964 
1965 		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1966 			DHD_ERROR(("%s: IOV_BLOCK_TDLS: bad parameter\n", __FUNCTION__));
1967 			bcmerror = BCME_BADARG;
1968 			break;
1969 		}
1970 		memcpy(&int_val, val, sizeof(int_val));
1971 		bcmerror = dhd_set_block_tdls_status(dhd_pub, bssidx, int_val ? 1 : 0);
1972 		break;
1973 	}
1974 #endif /* DHD_L2_FILTER */
1975 	case IOV_SVAL(IOV_DHD_IE): {
1976 		uint32	bssidx;
1977 		const char *val;
1978 
1979 		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1980 			DHD_ERROR(("%s: dhd ie: bad parameter\n", __FUNCTION__));
1981 			bcmerror = BCME_BADARG;
1982 			break;
1983 		}
1984 
1985 		break;
1986 	}
1987 	case IOV_GVAL(IOV_AP_ISOLATE): {
1988 		uint32	bssidx;
1989 		const char *val;
1990 
1991 		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1992 			DHD_ERROR(("%s: ap isoalate: bad parameter\n", __FUNCTION__));
1993 			bcmerror = BCME_BADARG;
1994 			break;
1995 		}
1996 
1997 		int_val = dhd_get_ap_isolate(dhd_pub, bssidx);
1998 		bcopy(&int_val, arg, val_size);
1999 		break;
2000 	}
2001 	case IOV_SVAL(IOV_AP_ISOLATE): {
2002 		uint32	bssidx;
2003 		const char *val;
2004 
2005 		if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
2006 			DHD_ERROR(("%s: ap isolate: bad parameter\n", __FUNCTION__));
2007 			bcmerror = BCME_BADARG;
2008 			break;
2009 		}
2010 
2011 		ASSERT(val);
2012 		bcopy(val, &int_val, sizeof(uint32));
2013 		dhd_set_ap_isolate(dhd_pub, bssidx, int_val);
2014 		break;
2015 	}
2016 #ifdef DHD_PSTA
2017 	case IOV_GVAL(IOV_PSTA): {
2018 		int_val = dhd_get_psta_mode(dhd_pub);
2019 		bcopy(&int_val, arg, val_size);
2020 		break;
2021 		}
2022 	case IOV_SVAL(IOV_PSTA): {
2023 		if (int_val >= DHD_MODE_PSTA_DISABLED && int_val <= DHD_MODE_PSR) {
2024 			dhd_set_psta_mode(dhd_pub, int_val);
2025 		} else {
2026 			bcmerror = BCME_RANGE;
2027 		}
2028 		break;
2029 		}
2030 #endif /* DHD_PSTA */
2031 #ifdef DHD_WET
2032 	case IOV_GVAL(IOV_WET):
2033 		 int_val = dhd_get_wet_mode(dhd_pub);
2034 		 bcopy(&int_val, arg, val_size);
2035 		 break;
2036 
2037 	case IOV_SVAL(IOV_WET):
2038 		 if (int_val == 0 || int_val == 1) {
2039 			 dhd_set_wet_mode(dhd_pub, int_val);
2040 			 /* Delete the WET DB when disabled */
2041 			 if (!int_val) {
2042 				 dhd_wet_sta_delete_list(dhd_pub);
2043 			 }
2044 		 } else {
2045 			 bcmerror = BCME_RANGE;
2046 		 }
2047 				 break;
2048 	case IOV_SVAL(IOV_WET_HOST_IPV4):
2049 			dhd_set_wet_host_ipv4(dhd_pub, params, plen);
2050 			break;
2051 	case IOV_SVAL(IOV_WET_HOST_MAC):
2052 			dhd_set_wet_host_mac(dhd_pub, params, plen);
2053 		break;
2054 #endif /* DHD_WET */
2055 #ifdef DHD_MCAST_REGEN
2056 	case IOV_GVAL(IOV_MCAST_REGEN_BSS_ENABLE): {
2057 		uint32	bssidx;
2058 		const char *val;
2059 
2060 		if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
2061 			DHD_ERROR(("%s: mcast_regen_bss_enable: bad parameter\n", __FUNCTION__));
2062 			bcmerror = BCME_BADARG;
2063 			break;
2064 		}
2065 
2066 		int_val = dhd_get_mcast_regen_bss_enable(dhd_pub, bssidx);
2067 		bcopy(&int_val, arg, val_size);
2068 		break;
2069 	}
2070 
2071 	case IOV_SVAL(IOV_MCAST_REGEN_BSS_ENABLE): {
2072 		uint32	bssidx;
2073 		const char *val;
2074 
2075 		if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
2076 			DHD_ERROR(("%s: mcast_regen_bss_enable: bad parameter\n", __FUNCTION__));
2077 			bcmerror = BCME_BADARG;
2078 			break;
2079 		}
2080 
2081 		ASSERT(val);
2082 		bcopy(val, &int_val, sizeof(uint32));
2083 		dhd_set_mcast_regen_bss_enable(dhd_pub, bssidx, int_val);
2084 		break;
2085 	}
2086 #endif /* DHD_MCAST_REGEN */
2087 
2088 	case IOV_GVAL(IOV_CFG80211_OPMODE): {
2089 		int_val = (int32)dhd_pub->op_mode;
2090 		bcopy(&int_val, arg, sizeof(int_val));
2091 		break;
2092 		}
2093 	case IOV_SVAL(IOV_CFG80211_OPMODE): {
2094 		if (int_val <= 0)
2095 			bcmerror = BCME_BADARG;
2096 		else
2097 			dhd_pub->op_mode = int_val;
2098 		break;
2099 	}
2100 
2101 	case IOV_GVAL(IOV_ASSERT_TYPE):
2102 		int_val = g_assert_type;
2103 		bcopy(&int_val, arg, val_size);
2104 		break;
2105 
2106 	case IOV_SVAL(IOV_ASSERT_TYPE):
2107 		g_assert_type = (uint32)int_val;
2108 		break;
2109 
2110 #if !defined(MACOSX_DHD)
2111 	case IOV_GVAL(IOV_LMTEST): {
2112 		*(uint32 *)arg = (uint32)lmtest;
2113 		break;
2114 	}
2115 
2116 	case IOV_SVAL(IOV_LMTEST): {
2117 		uint32 val = *(uint32 *)arg;
2118 		if (val > 50)
2119 			bcmerror = BCME_BADARG;
2120 		else {
2121 			lmtest = (uint)val;
2122 			DHD_ERROR(("%s: lmtest %s\n",
2123 				__FUNCTION__, (lmtest == FALSE)? "OFF" : "ON"));
2124 		}
2125 		break;
2126 	}
2127 #endif // endif
2128 
2129 #ifdef SHOW_LOGTRACE
2130 	case IOV_GVAL(IOV_DUMP_TRACE_LOG): {
2131 		trace_buf_info_t *trace_buf_info = (trace_buf_info_t *)arg;
2132 		dhd_dbg_ring_t *dbg_verbose_ring = NULL;
2133 
2134 		dbg_verbose_ring = dhd_dbg_get_ring_from_ring_id(dhd_pub, FW_VERBOSE_RING_ID);
2135 		if (dbg_verbose_ring == NULL) {
2136 			DHD_ERROR(("dbg_verbose_ring is NULL\n"));
2137 			bcmerror = BCME_UNSUPPORTED;
2138 			break;
2139 		}
2140 
2141 		if (trace_buf_info != NULL) {
2142 			bzero(trace_buf_info, sizeof(trace_buf_info_t));
2143 			dhd_dbg_read_ring_into_trace_buf(dbg_verbose_ring, trace_buf_info);
2144 		} else {
2145 			DHD_ERROR(("%s: arg is NULL\n", __FUNCTION__));
2146 			bcmerror = BCME_NOMEM;
2147 		}
2148 		break;
2149 	}
2150 #endif /* SHOW_LOGTRACE */
2151 	case IOV_GVAL(IOV_DONGLE_TRAP_TYPE):
2152 		if (dhd_pub->dongle_trap_occured)
2153 			int_val = ltoh32(dhd_pub->last_trap_info.type);
2154 		else
2155 			int_val = 0;
2156 		bcopy(&int_val, arg, val_size);
2157 		break;
2158 
2159 	case IOV_GVAL(IOV_DONGLE_TRAP_INFO):
2160 	{
2161 		struct bcmstrbuf strbuf;
2162 		bcm_binit(&strbuf, arg, len);
2163 		if (dhd_pub->dongle_trap_occured == FALSE) {
2164 			bcm_bprintf(&strbuf, "no trap recorded\n");
2165 			break;
2166 		}
2167 		dhd_bus_dump_trap_info(dhd_pub->bus, &strbuf);
2168 		break;
2169 	}
2170 #ifdef DHD_DEBUG
2171 #if defined(BCMSDIO) || defined(BCMPCIE)
2172 
2173 	case IOV_GVAL(IOV_BPADDR):
2174 		{
2175 			sdreg_t sdreg;
2176 			uint32 addr, size;
2177 
2178 			memcpy(&sdreg, params, sizeof(sdreg));
2179 
2180 			addr = sdreg.offset;
2181 			size = sdreg.func;
2182 
2183 			bcmerror = dhd_bus_readwrite_bp_addr(dhd_pub, addr, size,
2184 				(uint *)&int_val, TRUE);
2185 
2186 			memcpy(arg, &int_val, sizeof(int32));
2187 
2188 			break;
2189 		}
2190 
2191 	case IOV_SVAL(IOV_BPADDR):
2192 		{
2193 			sdreg_t sdreg;
2194 			uint32 addr, size;
2195 
2196 			memcpy(&sdreg, params, sizeof(sdreg));
2197 
2198 			addr = sdreg.offset;
2199 			size = sdreg.func;
2200 
2201 			bcmerror = dhd_bus_readwrite_bp_addr(dhd_pub, addr, size,
2202 				(uint *)&sdreg.value,
2203 				FALSE);
2204 
2205 			break;
2206 		}
2207 #endif /* BCMSDIO || BCMPCIE */
2208 #ifdef BCMPCIE
2209 	case IOV_SVAL(IOV_FLOW_RING_DEBUG):
2210 		{
2211 			bcmerror = dhd_flow_ring_debug(dhd_pub, arg, len);
2212 			break;
2213 		}
2214 #endif /* BCMPCIE */
2215 	case IOV_SVAL(IOV_MEM_DEBUG):
2216 		if (len > 0) {
2217 			bcmerror = dhd_mem_debug(dhd_pub, arg, len - 1);
2218 		}
2219 		break;
2220 #endif /* DHD_DEBUG */
2221 #if defined(DHD_LOG_DUMP)
2222 	case IOV_GVAL(IOV_LOG_DUMP):
2223 		{
2224 			dhd_prot_debug_info_print(dhd_pub);
2225 			dhd_log_dump_trigger(dhd_pub, CMD_DEFAULT);
2226 			break;
2227 		}
2228 #endif /* DHD_LOG_DUMP */
2229 #ifndef OEM_ANDROID
2230 	case IOV_GVAL(IOV_TPUT_TEST):
2231 		{
2232 			tput_test_t *tput_data = NULL;
2233 			if (params && plen >= sizeof(tput_test_t)) {
2234 				tput_data = (tput_test_t *)params;
2235 				bcmerror = dhd_tput_test(dhd_pub, tput_data);
2236 			} else {
2237 				DHD_ERROR(("%s: tput test - no input params ! \n", __FUNCTION__));
2238 				bcmerror = BCME_BADARG;
2239 			}
2240 			break;
2241 		}
2242 #endif /* OEM_ANDROID */
2243 	case IOV_GVAL(IOV_DEBUG_BUF_DEST_STAT):
2244 		{
2245 			if (dhd_pub->debug_buf_dest_support) {
2246 				debug_buf_dest_stat_t *debug_buf_dest_stat =
2247 					(debug_buf_dest_stat_t *)arg;
2248 				memcpy(debug_buf_dest_stat, dhd_pub->debug_buf_dest_stat,
2249 					sizeof(dhd_pub->debug_buf_dest_stat));
2250 			} else {
2251 				bcmerror = BCME_DISABLED;
2252 			}
2253 			break;
2254 		}
2255 
2256 #ifdef DHD_DEBUG
2257 	case IOV_SVAL(IOV_INDUCE_ERROR): {
2258 		if (int_val >= DHD_INDUCE_ERROR_MAX) {
2259 			DHD_ERROR(("%s: Invalid command : %u\n", __FUNCTION__, (uint16)int_val));
2260 		} else {
2261 			dhd_pub->dhd_induce_error = (uint16)int_val;
2262 		}
2263 		break;
2264 	}
2265 #endif /* DHD_DEBUG */
2266 
2267 #ifdef WL_IFACE_MGMT_CONF
2268 #ifdef WL_CFG80211
2269 #ifdef WL_NANP2P
2270 	case IOV_GVAL(IOV_CONC_DISC): {
2271 		int_val = wl_cfg80211_get_iface_conc_disc(
2272 			dhd_linux_get_primary_netdev(dhd_pub));
2273 		bcopy(&int_val, arg, sizeof(int_val));
2274 		break;
2275 	}
2276 	case IOV_SVAL(IOV_CONC_DISC): {
2277 		bcmerror = wl_cfg80211_set_iface_conc_disc(
2278 			dhd_linux_get_primary_netdev(dhd_pub), (uint8)int_val);
2279 		break;
2280 	}
2281 #endif /* WL_NANP2P */
2282 #ifdef WL_IFACE_MGMT
2283 	case IOV_GVAL(IOV_IFACE_POLICY): {
2284 		int_val = wl_cfg80211_get_iface_policy(
2285 			dhd_linux_get_primary_netdev(dhd_pub));
2286 		bcopy(&int_val, arg, sizeof(int_val));
2287 		break;
2288 	}
2289 	case IOV_SVAL(IOV_IFACE_POLICY): {
2290 		bcmerror = wl_cfg80211_set_iface_policy(
2291 			dhd_linux_get_primary_netdev(dhd_pub),
2292 			arg, len);
2293 		break;
2294 	}
2295 #endif /* WL_IFACE_MGMT */
2296 #endif /* WL_CFG80211 */
2297 #endif /* WL_IFACE_MGMT_CONF */
2298 	default:
2299 		bcmerror = BCME_UNSUPPORTED;
2300 		break;
2301 	}
2302 
2303 exit:
2304 	DHD_TRACE(("%s: actionid %d, bcmerror %d\n", __FUNCTION__, actionid, bcmerror));
2305 	return bcmerror;
2306 }
2307 
2308 /* Store the status of a connection attempt for later retrieval by an iovar */
2309 void
dhd_store_conn_status(uint32 event,uint32 status,uint32 reason)2310 dhd_store_conn_status(uint32 event, uint32 status, uint32 reason)
2311 {
2312 	/* Do not overwrite a WLC_E_PRUNE with a WLC_E_SET_SSID
2313 	 * because an encryption/rsn mismatch results in both events, and
2314 	 * the important information is in the WLC_E_PRUNE.
2315 	 */
2316 	if (!(event == WLC_E_SET_SSID && status == WLC_E_STATUS_FAIL &&
2317 	      dhd_conn_event == WLC_E_PRUNE)) {
2318 		dhd_conn_event = event;
2319 		dhd_conn_status = status;
2320 		dhd_conn_reason = reason;
2321 	}
2322 }
2323 
2324 bool
dhd_prec_enq(dhd_pub_t * dhdp,struct pktq * q,void * pkt,int prec)2325 dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec)
2326 {
2327 	void *p;
2328 	int eprec = -1;		/* precedence to evict from */
2329 	bool discard_oldest;
2330 
2331 	/* Fast case, precedence queue is not full and we are also not
2332 	 * exceeding total queue length
2333 	 */
2334 	if (!pktqprec_full(q, prec) && !pktq_full(q)) {
2335 		pktq_penq(q, prec, pkt);
2336 		return TRUE;
2337 	}
2338 
2339 	/* Determine precedence from which to evict packet, if any */
2340 	if (pktqprec_full(q, prec))
2341 		eprec = prec;
2342 	else if (pktq_full(q)) {
2343 		p = pktq_peek_tail(q, &eprec);
2344 		ASSERT(p);
2345 		if (eprec > prec || eprec < 0)
2346 			return FALSE;
2347 	}
2348 
2349 	/* Evict if needed */
2350 	if (eprec >= 0) {
2351 		/* Detect queueing to unconfigured precedence */
2352 		ASSERT(!pktqprec_empty(q, eprec));
2353 		discard_oldest = AC_BITMAP_TST(dhdp->wme_dp, eprec);
2354 		if (eprec == prec && !discard_oldest)
2355 			return FALSE;		/* refuse newer (incoming) packet */
2356 		/* Evict packet according to discard policy */
2357 		p = discard_oldest ? pktq_pdeq(q, eprec) : pktq_pdeq_tail(q, eprec);
2358 		ASSERT(p);
2359 #ifdef DHDTCPACK_SUPPRESS
2360 		if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) {
2361 			DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n",
2362 				__FUNCTION__, __LINE__));
2363 			dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
2364 		}
2365 #endif /* DHDTCPACK_SUPPRESS */
2366 		PKTFREE(dhdp->osh, p, TRUE);
2367 	}
2368 
2369 	/* Enqueue */
2370 	p = pktq_penq(q, prec, pkt);
2371 	ASSERT(p);
2372 
2373 	return TRUE;
2374 }
2375 
2376 /*
2377  * Functions to drop proper pkts from queue:
2378  *	If one pkt in queue is non-fragmented, drop first non-fragmented pkt only
2379  *	If all pkts in queue are all fragmented, find and drop one whole set fragmented pkts
2380  *	If can't find pkts matching upper 2 cases, drop first pkt anyway
2381  */
2382 bool
dhd_prec_drop_pkts(dhd_pub_t * dhdp,struct pktq * pq,int prec,f_droppkt_t fn)2383 dhd_prec_drop_pkts(dhd_pub_t *dhdp, struct pktq *pq, int prec, f_droppkt_t fn)
2384 {
2385 	struct pktq_prec *q = NULL;
2386 	void *p, *prev = NULL, *next = NULL, *first = NULL, *last = NULL, *prev_first = NULL;
2387 	pkt_frag_t frag_info;
2388 
2389 	ASSERT(dhdp && pq);
2390 	ASSERT(prec >= 0 && prec < pq->num_prec);
2391 
2392 	q = &pq->q[prec];
2393 	p = q->head;
2394 
2395 	if (p == NULL)
2396 		return FALSE;
2397 
2398 	while (p) {
2399 		frag_info = pkt_frag_info(dhdp->osh, p);
2400 		if (frag_info == DHD_PKT_FRAG_NONE) {
2401 			break;
2402 		} else if (frag_info == DHD_PKT_FRAG_FIRST) {
2403 			if (first) {
2404 				/* No last frag pkt, use prev as last */
2405 				last = prev;
2406 				break;
2407 			} else {
2408 				first = p;
2409 				prev_first = prev;
2410 			}
2411 		} else if (frag_info == DHD_PKT_FRAG_LAST) {
2412 			if (first) {
2413 				last = p;
2414 				break;
2415 			}
2416 		}
2417 
2418 		prev = p;
2419 		p = PKTLINK(p);
2420 	}
2421 
2422 	if ((p == NULL) || ((frag_info != DHD_PKT_FRAG_NONE) && !(first && last))) {
2423 		/* Not found matching pkts, use oldest */
2424 		prev = NULL;
2425 		p = q->head;
2426 		frag_info = 0;
2427 	}
2428 
2429 	if (frag_info == DHD_PKT_FRAG_NONE) {
2430 		first = last = p;
2431 		prev_first = prev;
2432 	}
2433 
2434 	p = first;
2435 	while (p) {
2436 		next = PKTLINK(p);
2437 		q->n_pkts--;
2438 		pq->n_pkts_tot--;
2439 
2440 #ifdef WL_TXQ_STALL
2441 		q->dequeue_count++;
2442 #endif // endif
2443 
2444 		PKTSETLINK(p, NULL);
2445 
2446 		if (fn)
2447 			fn(dhdp, prec, p, TRUE);
2448 
2449 		if (p == last)
2450 			break;
2451 
2452 		p = next;
2453 	}
2454 
2455 	if (prev_first == NULL) {
2456 		if ((q->head = next) == NULL)
2457 			q->tail = NULL;
2458 	} else {
2459 		PKTSETLINK(prev_first, next);
2460 		if (!next)
2461 			q->tail = prev_first;
2462 	}
2463 
2464 	return TRUE;
2465 }
2466 
2467 static int
dhd_iovar_op(dhd_pub_t * dhd_pub,const char * name,void * params,int plen,void * arg,int len,bool set)2468 dhd_iovar_op(dhd_pub_t *dhd_pub, const char *name,
2469 	void *params, int plen, void *arg, int len, bool set)
2470 {
2471 	int bcmerror = 0;
2472 	int val_size;
2473 	const bcm_iovar_t *vi = NULL;
2474 	uint32 actionid;
2475 
2476 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2477 
2478 	ASSERT(name);
2479 	ASSERT(len >= 0);
2480 
2481 	/* Get MUST have return space */
2482 	ASSERT(set || (arg && len));
2483 
2484 	/* Set does NOT take qualifiers */
2485 	ASSERT(!set || (!params && !plen));
2486 
2487 	if ((vi = bcm_iovar_lookup(dhd_iovars, name)) == NULL) {
2488 		bcmerror = BCME_UNSUPPORTED;
2489 		goto exit;
2490 	}
2491 
2492 	DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
2493 		name, (set ? "set" : "get"), len, plen));
2494 
2495 	/* set up 'params' pointer in case this is a set command so that
2496 	 * the convenience int and bool code can be common to set and get
2497 	 */
2498 	if (params == NULL) {
2499 		params = arg;
2500 		plen = len;
2501 	}
2502 
2503 	if (vi->type == IOVT_VOID)
2504 		val_size = 0;
2505 	else if (vi->type == IOVT_BUFFER)
2506 		val_size = len;
2507 	else
2508 		/* all other types are integer sized */
2509 		val_size = sizeof(int);
2510 
2511 	actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
2512 
2513 	bcmerror = dhd_doiovar(dhd_pub, vi, actionid, name, params, plen, arg, len, val_size);
2514 
2515 exit:
2516 	return bcmerror;
2517 }
2518 
2519 int
dhd_ioctl(dhd_pub_t * dhd_pub,dhd_ioctl_t * ioc,void * buf,uint buflen)2520 dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void *buf, uint buflen)
2521 {
2522 	int bcmerror = 0;
2523 	unsigned long flags;
2524 
2525 	DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2526 
2527 	if (!buf) {
2528 		return BCME_BADARG;
2529 	}
2530 
2531 	dhd_os_dhdiovar_lock(dhd_pub);
2532 	switch (ioc->cmd) {
2533 		case DHD_GET_MAGIC:
2534 			if (buflen < sizeof(int))
2535 				bcmerror = BCME_BUFTOOSHORT;
2536 			else
2537 				*(int*)buf = DHD_IOCTL_MAGIC;
2538 			break;
2539 
2540 		case DHD_GET_VERSION:
2541 			if (buflen < sizeof(int))
2542 				bcmerror = BCME_BUFTOOSHORT;
2543 			else
2544 				*(int*)buf = DHD_IOCTL_VERSION;
2545 			break;
2546 
2547 		case DHD_GET_VAR:
2548 		case DHD_SET_VAR:
2549 			{
2550 				char *arg;
2551 				uint arglen;
2552 
2553 				DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
2554 				if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub) &&
2555 					bcmstricmp((char *)buf, "devreset")) {
2556 					/* In platforms like FC19, the FW download is done via IOCTL
2557 					 * and should not return error for IOCTLs fired before FW
2558 					 * Download is done
2559 					 */
2560 					if (dhd_fw_download_status(dhd_pub) == FW_DOWNLOAD_DONE) {
2561 						DHD_ERROR(("%s: returning as busstate=%d\n",
2562 								__FUNCTION__, dhd_pub->busstate));
2563 						DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
2564 						dhd_os_dhdiovar_unlock(dhd_pub);
2565 						return -ENODEV;
2566 					}
2567 				}
2568 				DHD_BUS_BUSY_SET_IN_DHD_IOVAR(dhd_pub);
2569 				DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
2570 
2571 #ifdef DHD_PCIE_RUNTIMEPM
2572 				dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_ioctl);
2573 #endif /* DHD_PCIE_RUNTIMEPM */
2574 
2575 				DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
2576 				if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd_pub)) {
2577 					/* If Suspend/Resume is tested via pcie_suspend IOVAR
2578 					 * then continue to execute the IOVAR, return from here for
2579 					 * other IOVARs, also include pciecfgreg and devreset to go
2580 					 * through.
2581 					 */
2582 					if (bcmstricmp((char *)buf, "pcie_suspend") &&
2583 					    bcmstricmp((char *)buf, "pciecfgreg") &&
2584 					    bcmstricmp((char *)buf, "devreset") &&
2585 					    bcmstricmp((char *)buf, "sdio_suspend")) {
2586 						DHD_ERROR(("%s: bus is in suspend(%d)"
2587 							"or suspending(0x%x) state\n",
2588 							__FUNCTION__, dhd_pub->busstate,
2589 							dhd_pub->dhd_bus_busy_state));
2590 						DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
2591 						dhd_os_busbusy_wake(dhd_pub);
2592 						DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
2593 						dhd_os_dhdiovar_unlock(dhd_pub);
2594 						return -ENODEV;
2595 					}
2596 				}
2597 				/* During devreset ioctl, we call dhdpcie_advertise_bus_cleanup,
2598 				 * which will wait for all the busy contexts to get over for
2599 				 * particular time and call ASSERT if timeout happens. As during
2600 				 * devreset ioctal, we made DHD_BUS_BUSY_SET_IN_DHD_IOVAR,
2601 				 * to avoid ASSERT, clear the IOCTL busy state. "devreset" ioctl is
2602 				 * not used in Production platforms but only used in FC19 setups.
2603 				 */
2604 				if (!bcmstricmp((char *)buf, "devreset") ||
2605 #ifdef BCMPCIE
2606 					(dhd_bus_is_multibp_capable(dhd_pub->bus) &&
2607 					!bcmstricmp((char *)buf, "dwnldstate")) ||
2608 #endif /* BCMPCIE */
2609 					FALSE)
2610 				{
2611 					DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
2612 				}
2613 				DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
2614 
2615 				/* scan past the name to any arguments */
2616 				for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--)
2617 					;
2618 
2619 				if (*arg) {
2620 					bcmerror = BCME_BUFTOOSHORT;
2621 					goto unlock_exit;
2622 				}
2623 
2624 				/* account for the NUL terminator */
2625 				arg++, arglen--;
2626 				/* call with the appropriate arguments */
2627 				if (ioc->cmd == DHD_GET_VAR) {
2628 					bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen,
2629 							buf, buflen, IOV_GET);
2630 				} else {
2631 					bcmerror = dhd_iovar_op(dhd_pub, buf, NULL, 0,
2632 							arg, arglen, IOV_SET);
2633 				}
2634 				if (bcmerror != BCME_UNSUPPORTED) {
2635 					goto unlock_exit;
2636 				}
2637 
2638 				/* not in generic table, try protocol module */
2639 				if (ioc->cmd == DHD_GET_VAR) {
2640 					bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg,
2641 							arglen, buf, buflen, IOV_GET);
2642 				} else {
2643 					bcmerror = dhd_prot_iovar_op(dhd_pub, buf,
2644 							NULL, 0, arg, arglen, IOV_SET);
2645 				}
2646 				if (bcmerror != BCME_UNSUPPORTED) {
2647 					goto unlock_exit;
2648 				}
2649 
2650 				/* if still not found, try bus module */
2651 				if (ioc->cmd == DHD_GET_VAR) {
2652 					bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
2653 							arg, arglen, buf, buflen, IOV_GET);
2654 				} else {
2655 					bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
2656 							NULL, 0, arg, arglen, IOV_SET);
2657 				}
2658 				if (bcmerror != BCME_UNSUPPORTED) {
2659 					goto unlock_exit;
2660 				}
2661 
2662 			}
2663 			goto unlock_exit;
2664 
2665 		default:
2666 			bcmerror = BCME_UNSUPPORTED;
2667 	}
2668 	dhd_os_dhdiovar_unlock(dhd_pub);
2669 	return bcmerror;
2670 
2671 unlock_exit:
2672 	DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
2673 	DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
2674 	dhd_os_busbusy_wake(dhd_pub);
2675 	DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
2676 	dhd_os_dhdiovar_unlock(dhd_pub);
2677 	return bcmerror;
2678 }
2679 
2680 #ifdef SHOW_EVENTS
2681 
2682 static void
wl_show_host_event(dhd_pub_t * dhd_pub,wl_event_msg_t * event,void * event_data,void * raw_event_ptr,char * eventmask)2683 wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data,
2684 	void *raw_event_ptr, char *eventmask)
2685 {
2686 	uint i, status, reason;
2687 	bool group = FALSE, flush_txq = FALSE, link = FALSE;
2688 	bool host_data = FALSE; /* prints  event data after the case  when set */
2689 	const char *auth_str;
2690 	const char *event_name;
2691 	uchar *buf;
2692 	char err_msg[256], eabuf[ETHER_ADDR_STR_LEN];
2693 	uint event_type, flags, auth_type, datalen;
2694 
2695 	event_type = ntoh32(event->event_type);
2696 	flags = ntoh16(event->flags);
2697 	status = ntoh32(event->status);
2698 	reason = ntoh32(event->reason);
2699 	BCM_REFERENCE(reason);
2700 	auth_type = ntoh32(event->auth_type);
2701 	datalen = ntoh32(event->datalen);
2702 
2703 	/* debug dump of event messages */
2704 	snprintf(eabuf, sizeof(eabuf), MACDBG, MAC2STRDBG(event->addr.octet));
2705 
2706 	event_name = bcmevent_get_name(event_type);
2707 	BCM_REFERENCE(event_name);
2708 
2709 	if (flags & WLC_EVENT_MSG_LINK)
2710 		link = TRUE;
2711 	if (flags & WLC_EVENT_MSG_GROUP)
2712 		group = TRUE;
2713 	if (flags & WLC_EVENT_MSG_FLUSHTXQ)
2714 		flush_txq = TRUE;
2715 
2716 	switch (event_type) {
2717 	case WLC_E_START:
2718 	case WLC_E_DEAUTH:
2719 	case WLC_E_DISASSOC:
2720 		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
2721 		break;
2722 
2723 	case WLC_E_ASSOC_IND:
2724 	case WLC_E_REASSOC_IND:
2725 
2726 		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
2727 
2728 		break;
2729 
2730 	case WLC_E_ASSOC:
2731 	case WLC_E_REASSOC:
2732 		if (status == WLC_E_STATUS_SUCCESS) {
2733 			DHD_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n", event_name, eabuf));
2734 		} else if (status == WLC_E_STATUS_TIMEOUT) {
2735 			DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf));
2736 		} else if (status == WLC_E_STATUS_FAIL) {
2737 			DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, status %d reason %d\n",
2738 			       event_name, eabuf, (int)status, (int)reason));
2739 		} else {
2740 			DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n",
2741 			       event_name, eabuf, (int)status));
2742 		}
2743 
2744 		break;
2745 
2746 	case WLC_E_DEAUTH_IND:
2747 	case WLC_E_DISASSOC_IND:
2748 		DHD_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name, eabuf, (int)reason));
2749 		break;
2750 
2751 	case WLC_E_AUTH:
2752 	case WLC_E_AUTH_IND:
2753 		if (auth_type == DOT11_OPEN_SYSTEM)
2754 			auth_str = "Open System";
2755 		else if (auth_type == DOT11_SHARED_KEY)
2756 			auth_str = "Shared Key";
2757 		else if (auth_type == DOT11_SAE)
2758 			auth_str = "SAE";
2759 		else {
2760 			snprintf(err_msg, sizeof(err_msg), "AUTH unknown: %d", (int)auth_type);
2761 			auth_str = err_msg;
2762 		}
2763 
2764 		if (event_type == WLC_E_AUTH_IND) {
2765 			DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name, eabuf, auth_str));
2766 		} else if (status == WLC_E_STATUS_SUCCESS) {
2767 			DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n",
2768 				event_name, eabuf, auth_str));
2769 		} else if (status == WLC_E_STATUS_TIMEOUT) {
2770 			DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
2771 				event_name, eabuf, auth_str));
2772 		} else if (status == WLC_E_STATUS_FAIL) {
2773 			DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, status %d reason %d\n",
2774 			       event_name, eabuf, auth_str, (int)status, (int)reason));
2775 		} else if (status == WLC_E_STATUS_NO_ACK) {
2776 			DHD_EVENT(("MACEVENT: %s, MAC %s, %s, NOACK\n",
2777 			       event_name, eabuf, auth_str));
2778 		} else {
2779 			DHD_EVENT(("MACEVENT: %s, MAC %s, %s, status %d reason %d\n",
2780 				event_name, eabuf, auth_str, (int)status, (int)reason));
2781 		}
2782 		BCM_REFERENCE(auth_str);
2783 
2784 		break;
2785 
2786 	case WLC_E_JOIN:
2787 	case WLC_E_ROAM:
2788 	case WLC_E_SET_SSID:
2789 		if (status == WLC_E_STATUS_SUCCESS) {
2790 			DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
2791 		} else {
2792 			if (status == WLC_E_STATUS_FAIL) {
2793 				DHD_EVENT(("MACEVENT: %s, failed status %d\n", event_name, status));
2794 			} else if (status == WLC_E_STATUS_NO_NETWORKS) {
2795 				DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name));
2796 			} else {
2797 				DHD_EVENT(("MACEVENT: %s, unexpected status %d\n",
2798 					event_name, (int)status));
2799 			}
2800 		}
2801 		break;
2802 
2803 	case WLC_E_BEACON_RX:
2804 		if (status == WLC_E_STATUS_SUCCESS) {
2805 			DHD_EVENT(("MACEVENT: %s, SUCCESS\n", event_name));
2806 		} else if (status == WLC_E_STATUS_FAIL) {
2807 			DHD_EVENT(("MACEVENT: %s, FAIL\n", event_name));
2808 		} else {
2809 			DHD_EVENT(("MACEVENT: %s, status %d\n", event_name, status));
2810 		}
2811 		break;
2812 
2813 	case WLC_E_LINK:
2814 		DHD_EVENT(("MACEVENT: %s %s flags:0x%x status:%d\n",
2815 			event_name, link?"UP":"DOWN", flags, status));
2816 		BCM_REFERENCE(link);
2817 		break;
2818 
2819 	case WLC_E_MIC_ERROR:
2820 		DHD_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n",
2821 		       event_name, eabuf, group, flush_txq));
2822 		BCM_REFERENCE(group);
2823 		BCM_REFERENCE(flush_txq);
2824 		break;
2825 
2826 	case WLC_E_ICV_ERROR:
2827 	case WLC_E_UNICAST_DECODE_ERROR:
2828 	case WLC_E_MULTICAST_DECODE_ERROR:
2829 		DHD_EVENT(("MACEVENT: %s, MAC %s\n",
2830 		       event_name, eabuf));
2831 		break;
2832 
2833 	case WLC_E_TXFAIL:
2834 		DHD_EVENT(("MACEVENT: %s, RA %s status %d\n", event_name, eabuf, status));
2835 		break;
2836 
2837 	case WLC_E_ASSOC_REQ_IE:
2838 	case WLC_E_ASSOC_RESP_IE:
2839 	case WLC_E_PMKID_CACHE:
2840 		DHD_EVENT(("MACEVENT: %s\n", event_name));
2841 		break;
2842 
2843 	case WLC_E_SCAN_COMPLETE:
2844 		DHD_EVENT(("MACEVENT: %s\n", event_name));
2845 		break;
2846 
2847 	case WLC_E_IND_DOS_STATUS:
2848 		DHD_EVENT(("MACEVENT: %s\n", event_name));
2849 		break;
2850 
2851 	case WLC_E_RSSI_LQM:
2852 	case WLC_E_PFN_NET_FOUND:
2853 	case WLC_E_PFN_NET_LOST:
2854 	case WLC_E_PFN_SCAN_COMPLETE:
2855 	case WLC_E_PFN_SCAN_NONE:
2856 	case WLC_E_PFN_SCAN_ALLGONE:
2857 	case WLC_E_PFN_GSCAN_FULL_RESULT:
2858 	case WLC_E_PFN_SSID_EXT:
2859 		DHD_EVENT(("PNOEVENT: %s\n", event_name));
2860 		break;
2861 
2862 	case WLC_E_PFN_SCAN_BACKOFF:
2863 	case WLC_E_PFN_BSSID_SCAN_BACKOFF:
2864 		DHD_EVENT(("PNOEVENT: %s, status %d, reason %d\n",
2865 		           event_name, (int)status, (int)reason));
2866 		break;
2867 
2868 	case WLC_E_PSK_SUP:
2869 	case WLC_E_PRUNE:
2870 		DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n",
2871 		           event_name, (int)status, (int)reason));
2872 		break;
2873 
2874 #ifdef WIFI_ACT_FRAME
2875 	case WLC_E_ACTION_FRAME:
2876 		DHD_TRACE(("MACEVENT: %s Bssid %s\n", event_name, eabuf));
2877 		break;
2878 #endif /* WIFI_ACT_FRAME */
2879 
2880 #ifdef SHOW_LOGTRACE
2881 	case WLC_E_TRACE:
2882 	{
2883 		dhd_dbg_trace_evnt_handler(dhd_pub, event_data, raw_event_ptr, datalen);
2884 		break;
2885 	}
2886 #endif /* SHOW_LOGTRACE */
2887 
2888 	case WLC_E_RSSI:
2889 		DHD_EVENT(("MACEVENT: %s %d\n", event_name, ntoh32(*((int *)event_data))));
2890 		break;
2891 
2892 	case WLC_E_SERVICE_FOUND:
2893 	case WLC_E_P2PO_ADD_DEVICE:
2894 	case WLC_E_P2PO_DEL_DEVICE:
2895 		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
2896 		break;
2897 
2898 #ifdef BT_WIFI_HANDOBER
2899 	case WLC_E_BT_WIFI_HANDOVER_REQ:
2900 		DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
2901 		break;
2902 #endif // endif
2903 
2904 	case WLC_E_CCA_CHAN_QUAL:
2905 		if (datalen) {
2906 			cca_chan_qual_event_t *cca_event = (cca_chan_qual_event_t *)event_data;
2907 			if (cca_event->id == WL_CHAN_QUAL_FULLPM_CCA) {
2908 				cca_only_chan_qual_event_t *cca_only_event =
2909 					(cca_only_chan_qual_event_t *)cca_event;
2910 				BCM_REFERENCE(cca_only_event);
2911 				DHD_EVENT((
2912 					"MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
2913 					" channel 0x%02x\n",
2914 					event_name, event_type, eabuf, (int)status,
2915 					(int)reason, (int)auth_type, cca_event->chanspec));
2916 				DHD_EVENT((
2917 					"\tTOTAL (dur %dms me %dms notme %dms interf %dms"
2918 					" ts 0x%08x)\n",
2919 					cca_only_event->cca_busy_ext.duration,
2920 					cca_only_event->cca_busy_ext.congest_ibss,
2921 					cca_only_event->cca_busy_ext.congest_obss,
2922 					cca_only_event->cca_busy_ext.interference,
2923 					cca_only_event->cca_busy_ext.timestamp));
2924 				DHD_EVENT((
2925 					"\t  !PM (dur %dms me %dms notme %dms interf %dms)\n",
2926 					cca_only_event->cca_busy_nopm.duration,
2927 					cca_only_event->cca_busy_nopm.congest_ibss,
2928 					cca_only_event->cca_busy_nopm.congest_obss,
2929 					cca_only_event->cca_busy_nopm.interference));
2930 				DHD_EVENT((
2931 					"\t   PM (dur %dms me %dms notme %dms interf %dms)\n",
2932 					cca_only_event->cca_busy_pm.duration,
2933 					cca_only_event->cca_busy_pm.congest_ibss,
2934 					cca_only_event->cca_busy_pm.congest_obss,
2935 					cca_only_event->cca_busy_pm.interference));
2936 			} else if (cca_event->id == WL_CHAN_QUAL_FULL_CCA) {
2937 				DHD_EVENT((
2938 					"MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
2939 					" channel 0x%02x (dur %dms ibss %dms obss %dms interf %dms"
2940 					" ts 0x%08x)\n",
2941 					event_name, event_type, eabuf, (int)status,
2942 					(int)reason, (int)auth_type, cca_event->chanspec,
2943 					cca_event->cca_busy_ext.duration,
2944 					cca_event->cca_busy_ext.congest_ibss,
2945 					cca_event->cca_busy_ext.congest_obss,
2946 					cca_event->cca_busy_ext.interference,
2947 					cca_event->cca_busy_ext.timestamp));
2948 			} else if (cca_event->id == WL_CHAN_QUAL_CCA) {
2949 				DHD_EVENT((
2950 					"MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
2951 					" channel 0x%02x (dur %dms busy %dms ts 0x%08x)\n",
2952 					event_name, event_type, eabuf, (int)status,
2953 					(int)reason, (int)auth_type, cca_event->chanspec,
2954 					cca_event->cca_busy.duration,
2955 					cca_event->cca_busy.congest,
2956 					cca_event->cca_busy.timestamp));
2957 			} else if ((cca_event->id == WL_CHAN_QUAL_NF) ||
2958 			           (cca_event->id == WL_CHAN_QUAL_NF_LTE)) {
2959 				DHD_EVENT((
2960 					"MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
2961 					" channel 0x%02x (NF[%d] %ddB)\n",
2962 					event_name, event_type, eabuf, (int)status,
2963 					(int)reason, (int)auth_type, cca_event->chanspec,
2964 					cca_event->id, cca_event->noise));
2965 			} else {
2966 				DHD_EVENT((
2967 					"MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
2968 					" channel 0x%02x (unknown ID %d)\n",
2969 					event_name, event_type, eabuf, (int)status,
2970 					(int)reason, (int)auth_type, cca_event->chanspec,
2971 					cca_event->id));
2972 			}
2973 		}
2974 		break;
2975 	case WLC_E_ESCAN_RESULT:
2976 	{
2977 		wl_escan_result_v2_t *escan_result =
2978 				(wl_escan_result_v2_t *)event_data;
2979 		BCM_REFERENCE(escan_result);
2980 #ifdef OEM_ANDROID
2981 		if ((status == WLC_E_STATUS_SUCCESS) || (status == WLC_E_STATUS_ABORT)) {
2982 			DHD_EVENT(("MACEVENT: %s %d, status %d sync-id %u\n",
2983 				event_name, event_type, (int)status,
2984 				dtoh16(escan_result->sync_id)));
2985 		} else {
2986 			DHD_TRACE(("MACEVENT: %s %d, MAC %s, status %d \n",
2987 				event_name, event_type, eabuf, (int)status));
2988 		}
2989 #else
2990 		DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d sync-id %u\n",
2991 		       event_name, event_type, eabuf, (int)status, dtoh16(escan_result->sync_id)));
2992 #endif // endif
2993 
2994 		break;
2995 	}
2996 	case WLC_E_IF:
2997 	{
2998 		struct wl_event_data_if *ifevent = (struct wl_event_data_if *)event_data;
2999 		BCM_REFERENCE(ifevent);
3000 
3001 		DHD_EVENT(("MACEVENT: %s, opcode:0x%d  ifidx:%d role:%d\n",
3002 		event_name, ifevent->opcode, ifevent->ifidx, ifevent->role));
3003 		break;
3004 	}
3005 #ifdef SHOW_LOGTRACE
3006 	case WLC_E_MSCH:
3007 	{
3008 		wl_mschdbg_event_handler(dhd_pub, raw_event_ptr, reason, event_data, datalen);
3009 		break;
3010 	}
3011 #endif /* SHOW_LOGTRACE */
3012 
3013 	case WLC_E_PSK_AUTH:
3014 		DHD_EVENT(("MACEVENT: %s, RA %s status %d Reason:%d\n",
3015 			event_name, eabuf, status, reason));
3016 		break;
3017 	case WLC_E_AGGR_EVENT:
3018 		{
3019 			event_aggr_data_t *aggrbuf = event_data;
3020 			int j = 0, len = 0;
3021 			uint8 *data = aggrbuf->data;
3022 			DHD_EVENT(("MACEVENT: %s, num of events %d total len %d sub events: ",
3023 					event_name, aggrbuf->num_events, aggrbuf->len));
3024 			for (j = 0; j < aggrbuf->num_events; j++)
3025 			{
3026 				wl_event_msg_t * sub_event = (wl_event_msg_t *)data;
3027 				if (len > aggrbuf->len) {
3028 					DHD_ERROR(("%s: Aggr events corrupted!",
3029 						__FUNCTION__));
3030 					break;
3031 				}
3032 				DHD_EVENT(("\n Event type: %d ", ntoh32(sub_event->event_type)));
3033 				len += ALIGN_SIZE((ntoh32(sub_event->datalen) +
3034 						sizeof(wl_event_msg_t)), sizeof(uint64));
3035 				buf = (uchar *)(data + sizeof(wl_event_msg_t));
3036 				BCM_REFERENCE(buf);
3037 				DHD_EVENT((" data (%d) : ", ntoh32(sub_event->datalen)));
3038 				for (i = 0; i < ntoh32(sub_event->datalen); i++) {
3039 					DHD_EVENT((" 0x%02x ", buf[i]));
3040 				}
3041 				data = aggrbuf->data + len;
3042 			}
3043 			DHD_EVENT(("\n"));
3044 		}
3045 		break;
3046 	case WLC_E_NAN_CRITICAL:
3047 		{
3048 			DHD_LOG_MEM(("MACEVENT: %s, type:%d\n", event_name, reason));
3049 			break;
3050 		}
3051 	case WLC_E_NAN_NON_CRITICAL:
3052 		{
3053 			DHD_TRACE(("MACEVENT: %s, type:%d\n", event_name, reason));
3054 			break;
3055 		}
3056 	case WLC_E_PROXD:
3057 		{
3058 			wl_proxd_event_t *proxd = (wl_proxd_event_t*)event_data;
3059 			DHD_LOG_MEM(("MACEVENT: %s, event:%d, status:%d\n",
3060 				event_name, proxd->type, reason));
3061 			break;
3062 		}
3063 	case WLC_E_RPSNOA:
3064 		{
3065 			rpsnoa_stats_t *stat = event_data;
3066 			if (datalen == sizeof(*stat)) {
3067 				DHD_EVENT(("MACEVENT: %s, band %s, status %d, pps %d\n", event_name,
3068 					(stat->band == WLC_BAND_2G) ? "2G":"5G",
3069 					stat->state, stat->last_pps));
3070 			}
3071 			break;
3072 		}
3073 	case WLC_E_PHY_CAL:
3074 		{
3075 			DHD_EVENT(("MACEVENT: %s, reason:%d\n", event_name, reason));
3076 			break;
3077 		}
3078 	case WLC_E_WA_LQM:
3079 		{
3080 			wl_event_wa_lqm_t *event_wa_lqm = (wl_event_wa_lqm_t *)event_data;
3081 			bcm_xtlv_t *subevent;
3082 			wl_event_wa_lqm_basic_t *elqm_basic;
3083 
3084 			if ((event_wa_lqm->ver != WL_EVENT_WA_LQM_VER) ||
3085 			    (event_wa_lqm->len < sizeof(wl_event_wa_lqm_t) + BCM_XTLV_HDR_SIZE)) {
3086 				DHD_ERROR(("MACEVENT: %s invalid (ver=%d len=%d)\n",
3087 					event_name, event_wa_lqm->ver, event_wa_lqm->len));
3088 				break;
3089 			}
3090 
3091 			subevent = (bcm_xtlv_t *)event_wa_lqm->subevent;
3092 			 if ((subevent->id != WL_EVENT_WA_LQM_BASIC) ||
3093 			     (subevent->len < sizeof(wl_event_wa_lqm_basic_t))) {
3094 				DHD_ERROR(("MACEVENT: %s invalid sub-type (id=%d len=%d)\n",
3095 					event_name, subevent->id, subevent->len));
3096 				break;
3097 			}
3098 
3099 			elqm_basic = (wl_event_wa_lqm_basic_t *)subevent->data;
3100 			BCM_REFERENCE(elqm_basic);
3101 			DHD_EVENT(("MACEVENT: %s (RSSI=%d SNR=%d TxRate=%d RxRate=%d)\n",
3102 				event_name, elqm_basic->rssi, elqm_basic->snr,
3103 				elqm_basic->tx_rate, elqm_basic->rx_rate));
3104 			break;
3105 		}
3106 	case WLC_E_OVERTEMP:
3107 	{
3108 		DHD_EVENT(("MACEVENT: %s\n", event_name));
3109 		break;
3110 	}
3111 	default:
3112 		DHD_INFO(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
3113 		       event_name, event_type, eabuf, (int)status, (int)reason,
3114 		       (int)auth_type));
3115 		break;
3116 	}
3117 
3118 	/* show any appended data if message level is set to bytes or host_data is set */
3119 	if ((DHD_BYTES_ON() || (host_data == TRUE)) && DHD_EVENT_ON() && datalen) {
3120 		buf = (uchar *) event_data;
3121 		BCM_REFERENCE(buf);
3122 		DHD_EVENT((" data (%d) : ", datalen));
3123 		for (i = 0; i < datalen; i++) {
3124 			DHD_EVENT((" 0x%02x ", buf[i]));
3125 		}
3126 		DHD_EVENT(("\n"));
3127 	}
3128 } /* wl_show_host_event */
3129 #endif /* SHOW_EVENTS */
3130 
3131 #ifdef DNGL_EVENT_SUPPORT
3132 /* Check whether packet is a BRCM dngl event pkt. If it is, process event data. */
3133 int
dngl_host_event(dhd_pub_t * dhdp,void * pktdata,bcm_dngl_event_msg_t * dngl_event,size_t pktlen)3134 dngl_host_event(dhd_pub_t *dhdp, void *pktdata, bcm_dngl_event_msg_t *dngl_event, size_t pktlen)
3135 {
3136 	bcm_dngl_event_t *pvt_data = (bcm_dngl_event_t *)pktdata;
3137 
3138 	dngl_host_event_process(dhdp, pvt_data, dngl_event, pktlen);
3139 	return BCME_OK;
3140 }
3141 
3142 #ifdef PARSE_DONGLE_HOST_EVENT
3143 typedef struct hck_id_to_str_s {
3144 	uint32 id;
3145 	char *name;
3146 } hck_id_to_str_t;
3147 
3148 hck_id_to_str_t hck_sw_id_to_str[] = {
3149 	{WL_HC_DD_PCIE, "WL_HC_DD_PCIE"},
3150 	{WL_HC_DD_RX_DMA_STALL, "WL_HC_DD_RX_DMA_STALL"},
3151 	{WL_HC_DD_RX_STALL, "WL_HC_DD_RX_STALL"},
3152 	{WL_HC_DD_TX_STALL, "WL_HC_DD_TX_STALL"},
3153 	{WL_HC_DD_SCAN_STALL, "WL_HC_DD_SCAN_STALL"},
3154 	{WL_HC_DD_PHY, "WL_HC_DD_PHY"},
3155 	{WL_HC_DD_REINIT, "WL_HC_DD_REINIT"},
3156 	{WL_HC_DD_TXQ_STALL, "WL_HC_DD_TXQ_STALL"},
3157 	{0, NULL}
3158 };
3159 
3160 hck_id_to_str_t hck_pcie_module_to_str[] = {
3161 	{HEALTH_CHECK_PCIEDEV_INDUCED_IND, "PCIEDEV_INDUCED_IND"},
3162 	{HEALTH_CHECK_PCIEDEV_H2D_DMA_IND, "PCIEDEV_H2D_DMA_IND"},
3163 	{HEALTH_CHECK_PCIEDEV_D2H_DMA_IND, "PCIEDEV_D2H_DMA_IND"},
3164 	{HEALTH_CHECK_PCIEDEV_IOCTL_STALL_IND, "PCIEDEV_IOCTL_STALL_IND"},
3165 	{HEALTH_CHECK_PCIEDEV_D3ACK_STALL_IND, "PCIEDEV_D3ACK_STALL_IND"},
3166 	{HEALTH_CHECK_PCIEDEV_NODS_IND, "PCIEDEV_NODS_IND"},
3167 	{HEALTH_CHECK_PCIEDEV_LINKSPEED_FALLBACK_IND, "PCIEDEV_LINKSPEED_FALLBACK_IND"},
3168 	{HEALTH_CHECK_PCIEDEV_DSACK_STALL_IND, "PCIEDEV_DSACK_STALL_IND"},
3169 	{0, NULL}
3170 };
3171 
3172 static void
dhd_print_dongle_hck_id(uint32 id,hck_id_to_str_t * hck)3173 dhd_print_dongle_hck_id(uint32 id, hck_id_to_str_t *hck)
3174 {
3175 	while (hck->name != NULL) {
3176 		if (hck->id == id) {
3177 			DHD_ERROR(("DONGLE_HCK_EVENT: %s\n", hck->name));
3178 			return;
3179 		}
3180 		hck++;
3181 	}
3182 }
3183 #endif /* PARSE_DONGLE_HOST_EVENT */
3184 
3185 void
dngl_host_event_process(dhd_pub_t * dhdp,bcm_dngl_event_t * event,bcm_dngl_event_msg_t * dngl_event,size_t pktlen)3186 dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event,
3187 	bcm_dngl_event_msg_t *dngl_event, size_t pktlen)
3188 {
3189 	uint8 *p = (uint8 *)(event + 1);
3190 	uint16 type = ntoh16_ua((void *)&dngl_event->event_type);
3191 	uint16 datalen = ntoh16_ua((void *)&dngl_event->datalen);
3192 	uint16 version = ntoh16_ua((void *)&dngl_event->version);
3193 
3194 	DHD_EVENT(("VERSION:%d, EVENT TYPE:%d, DATALEN:%d\n", version, type, datalen));
3195 	if (datalen > (pktlen - sizeof(bcm_dngl_event_t) + ETHER_TYPE_LEN)) {
3196 		return;
3197 	}
3198 	if (version != BCM_DNGL_EVENT_MSG_VERSION) {
3199 		DHD_ERROR(("%s:version mismatch:%d:%d\n", __FUNCTION__,
3200 			version, BCM_DNGL_EVENT_MSG_VERSION));
3201 		return;
3202 	}
3203 	switch (type) {
3204 	   case DNGL_E_SOCRAM_IND:
3205 		{
3206 		   bcm_dngl_socramind_t *socramind_ptr = (bcm_dngl_socramind_t *)p;
3207 		   uint16 tag = ltoh32(socramind_ptr->tag);
3208 		   uint16 taglen = ltoh32(socramind_ptr->length);
3209 		   p = (uint8 *)socramind_ptr->value;
3210 		   DHD_EVENT(("Tag:%d Len:%d Datalen:%d\n", tag, taglen, datalen));
3211 		   switch (tag) {
3212 			case SOCRAM_IND_ASSERT_TAG:
3213 			    {
3214 				/*
3215 				* The payload consists of -
3216 				* null terminated function name padded till 32 bit boundary +
3217 				* Line number - (32 bits)
3218 				* Caller address (32 bits)
3219 				*/
3220 				char *fnname = (char *)p;
3221 				if (datalen < (ROUNDUP(strlen(fnname) + 1, sizeof(uint32)) +
3222 					sizeof(uint32) * 2)) {
3223 					DHD_ERROR(("Wrong length:%d\n", datalen));
3224 					return;
3225 				}
3226 				DHD_EVENT(("ASSRT Function:%s ", p));
3227 				p += ROUNDUP(strlen(p) + 1, sizeof(uint32));
3228 				DHD_EVENT(("Line:%d ", *(uint32 *)p));
3229 				p += sizeof(uint32);
3230 				DHD_EVENT(("Caller Addr:0x%x\n", *(uint32 *)p));
3231 #ifdef PARSE_DONGLE_HOST_EVENT
3232 				DHD_ERROR(("DONGLE_HCK_EVENT: SOCRAM_IND_ASSERT_TAG\n"));
3233 #endif /* PARSE_DONGLE_HOST_EVENT */
3234 				break;
3235 			    }
3236 			case SOCRAM_IND_TAG_HEALTH_CHECK:
3237 			   {
3238 				bcm_dngl_healthcheck_t *dngl_hc = (bcm_dngl_healthcheck_t *)p;
3239 				DHD_EVENT(("SOCRAM_IND_HEALTHCHECK_TAG:%d Len:%d datalen:%d\n",
3240 					ltoh32(dngl_hc->top_module_tag),
3241 					ltoh32(dngl_hc->top_module_len),
3242 					datalen));
3243 				if (DHD_EVENT_ON()) {
3244 					prhex("HEALTHCHECK", p, MIN(ltoh32(dngl_hc->top_module_len)
3245 						+ BCM_XTLV_HDR_SIZE, datalen));
3246 				}
3247 #ifdef DHD_LOG_DUMP
3248 				memset(dhdp->health_chk_event_data, 0, HEALTH_CHK_BUF_SIZE);
3249 				memcpy(dhdp->health_chk_event_data, p,
3250 						MIN(ltoh32(dngl_hc->top_module_len),
3251 						HEALTH_CHK_BUF_SIZE));
3252 #endif /* DHD_LOG_DUMP */
3253 				p = (uint8 *)dngl_hc->value;
3254 
3255 				switch (ltoh32(dngl_hc->top_module_tag)) {
3256 					case HEALTH_CHECK_TOP_LEVEL_MODULE_PCIEDEV_RTE:
3257 					   {
3258 						bcm_dngl_pcie_hc_t *pcie_hc;
3259 						pcie_hc = (bcm_dngl_pcie_hc_t *)p;
3260 						BCM_REFERENCE(pcie_hc);
3261 						if (ltoh32(dngl_hc->top_module_len) <
3262 								sizeof(bcm_dngl_pcie_hc_t)) {
3263 							DHD_ERROR(("Wrong length:%d\n",
3264 								ltoh32(dngl_hc->top_module_len)));
3265 							return;
3266 						}
3267 						DHD_EVENT(("%d:PCIE HC error:%d flag:0x%x,"
3268 							" control:0x%x\n",
3269 							ltoh32(pcie_hc->version),
3270 							ltoh32(pcie_hc->pcie_err_ind_type),
3271 							ltoh32(pcie_hc->pcie_flag),
3272 							ltoh32(pcie_hc->pcie_control_reg)));
3273 #ifdef PARSE_DONGLE_HOST_EVENT
3274 						dhd_print_dongle_hck_id(
3275 							ltoh32(pcie_hc->pcie_err_ind_type),
3276 								hck_pcie_module_to_str);
3277 #endif /* PARSE_DONGLE_HOST_EVENT */
3278 						break;
3279 					   }
3280 #ifdef HCHK_COMMON_SW_EVENT
3281 					case HCHK_SW_ENTITY_WL_PRIMARY:
3282 					case HCHK_SW_ENTITY_WL_SECONDARY:
3283 					{
3284 						bcm_xtlv_t *wl_hc = (bcm_xtlv_t*)p;
3285 
3286 						if (ltoh32(dngl_hc->top_module_len) <
3287 								sizeof(bcm_xtlv_t)) {
3288 							DHD_ERROR(("WL SW HC Wrong length:%d\n",
3289 								ltoh32(dngl_hc->top_module_len)));
3290 							return;
3291 						}
3292 						BCM_REFERENCE(wl_hc);
3293 						DHD_EVENT(("WL SW HC type %d len %d\n",
3294 						ltoh16(wl_hc->id), ltoh16(wl_hc->len)));
3295 #ifdef PARSE_DONGLE_HOST_EVENT
3296 						dhd_print_dongle_hck_id(ltoh16(wl_hc->id),
3297 							hck_sw_id_to_str);
3298 #endif /* PARSE_DONGLE_HOST_EVENT */
3299 						break;
3300 
3301 					}
3302 #endif /* HCHK_COMMON_SW_EVENT */
3303 					default:
3304 					{
3305 						DHD_ERROR(("%s:Unknown module TAG:%d\n",
3306 						  __FUNCTION__,
3307 						  ltoh32(dngl_hc->top_module_tag)));
3308 						break;
3309 					}
3310 				}
3311 				break;
3312 			   }
3313 			default:
3314 			   DHD_ERROR(("%s:Unknown TAG\n", __FUNCTION__));
3315 			   if (p && DHD_EVENT_ON()) {
3316 				   prhex("SOCRAMIND", p, taglen);
3317 			   }
3318 			   break;
3319 		   }
3320 		   break;
3321 		}
3322 	   default:
3323 		DHD_ERROR(("%s:Unknown DNGL Event Type:%d\n", __FUNCTION__, type));
3324 		if (p && DHD_EVENT_ON()) {
3325 			prhex("SOCRAMIND", p, datalen);
3326 		}
3327 		break;
3328 	}
3329 #ifdef DHD_FW_COREDUMP
3330 	if (dhdp->memdump_enabled) {
3331 		dhdp->memdump_type = DUMP_TYPE_DONGLE_HOST_EVENT;
3332 		if (dhd_socram_dump(dhdp->bus)) {
3333 			DHD_ERROR(("%s: socram dump failed\n", __FUNCTION__));
3334 		}
3335 	}
3336 #else
3337 	dhd_dbg_send_urgent_evt(dhdp, p, datalen);
3338 #endif /* DHD_FW_COREDUMP */
3339 }
3340 
3341 #endif /* DNGL_EVENT_SUPPORT */
3342 
3343 /* Stub for now. Will become real function as soon as shim
3344  * is being integrated to Android, Linux etc.
3345  */
3346 int
wl_event_process_default(wl_event_msg_t * event,struct wl_evt_pport * evt_pport)3347 wl_event_process_default(wl_event_msg_t *event, struct wl_evt_pport *evt_pport)
3348 {
3349 	return BCME_OK;
3350 }
3351 
3352 int
wl_event_process(dhd_pub_t * dhd_pub,int * ifidx,void * pktdata,uint pktlen,void ** data_ptr,void * raw_event)3353 wl_event_process(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata,
3354 	uint pktlen, void **data_ptr, void *raw_event)
3355 {
3356 	wl_evt_pport_t evt_pport;
3357 	wl_event_msg_t event;
3358 	bcm_event_msg_u_t evu;
3359 	int ret;
3360 
3361 	/* make sure it is a BRCM event pkt and record event data */
3362 	ret = wl_host_event_get_data(pktdata, pktlen, &evu);
3363 	if (ret != BCME_OK) {
3364 		return ret;
3365 	}
3366 
3367 	memcpy(&event, &evu.event, sizeof(wl_event_msg_t));
3368 
3369 	/* convert event from network order to host order */
3370 	wl_event_to_host_order(&event);
3371 
3372 	/* record event params to evt_pport */
3373 	evt_pport.dhd_pub = dhd_pub;
3374 	evt_pport.ifidx = ifidx;
3375 	evt_pport.pktdata = pktdata;
3376 	evt_pport.data_ptr = data_ptr;
3377 	evt_pport.raw_event = raw_event;
3378 	evt_pport.data_len = pktlen;
3379 
3380 	ret = wl_event_process_default(&event, &evt_pport);
3381 
3382 	return ret;
3383 } /* wl_event_process */
3384 
3385 /* Check whether packet is a BRCM event pkt. If it is, record event data. */
3386 int
wl_host_event_get_data(void * pktdata,uint pktlen,bcm_event_msg_u_t * evu)3387 wl_host_event_get_data(void *pktdata, uint pktlen, bcm_event_msg_u_t *evu)
3388 {
3389 	int ret;
3390 
3391 	ret = is_wlc_event_frame(pktdata, pktlen, 0, evu);
3392 	if (ret != BCME_OK) {
3393 		DHD_ERROR(("%s: Invalid event frame, err = %d\n",
3394 			__FUNCTION__, ret));
3395 	}
3396 
3397 	return ret;
3398 }
3399 
3400 int
wl_process_host_event(dhd_pub_t * dhd_pub,int * ifidx,void * pktdata,uint pktlen,wl_event_msg_t * event,void ** data_ptr,void * raw_event)3401 wl_process_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen,
3402 	wl_event_msg_t *event, void **data_ptr, void *raw_event)
3403 {
3404 	bcm_event_t *pvt_data = (bcm_event_t *)pktdata;
3405 	bcm_event_msg_u_t evu;
3406 	uint8 *event_data;
3407 	uint32 type, status, datalen, reason;
3408 	uint16 flags;
3409 	uint evlen;
3410 	int ret;
3411 	uint16 usr_subtype;
3412 #ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
3413 	dhd_if_t *ifp = NULL;
3414 #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
3415 
3416 	ret = wl_host_event_get_data(pktdata, pktlen, &evu);
3417 	if (ret != BCME_OK) {
3418 		return ret;
3419 	}
3420 
3421 	usr_subtype = ntoh16_ua((void *)&pvt_data->bcm_hdr.usr_subtype);
3422 	switch (usr_subtype) {
3423 	case BCMILCP_BCM_SUBTYPE_EVENT:
3424 		memcpy(event, &evu.event, sizeof(wl_event_msg_t));
3425 		*data_ptr = &pvt_data[1];
3426 		break;
3427 	case BCMILCP_BCM_SUBTYPE_DNGLEVENT:
3428 #ifdef DNGL_EVENT_SUPPORT
3429 		/* If it is a DNGL event process it first */
3430 		if (dngl_host_event(dhd_pub, pktdata, &evu.dngl_event, pktlen) == BCME_OK) {
3431 			/*
3432 			 * Return error purposely to prevent DNGL event being processed
3433 			 * as BRCM event
3434 			 */
3435 			return BCME_ERROR;
3436 		}
3437 #endif /* DNGL_EVENT_SUPPORT */
3438 		return BCME_NOTFOUND;
3439 	default:
3440 		return BCME_NOTFOUND;
3441 	}
3442 
3443 	/* start wl_event_msg process */
3444 	event_data = *data_ptr;
3445 	type = ntoh32_ua((void *)&event->event_type);
3446 	flags = ntoh16_ua((void *)&event->flags);
3447 	status = ntoh32_ua((void *)&event->status);
3448 	reason = ntoh32_ua((void *)&event->reason);
3449 	datalen = ntoh32_ua((void *)&event->datalen);
3450 	evlen = datalen + sizeof(bcm_event_t);
3451 
3452 	switch (type) {
3453 #ifdef PROP_TXSTATUS
3454 	case WLC_E_FIFO_CREDIT_MAP:
3455 		dhd_wlfc_enable(dhd_pub);
3456 		dhd_wlfc_FIFOcreditmap_event(dhd_pub, event_data);
3457 		WLFC_DBGMESG(("WLC_E_FIFO_CREDIT_MAP:(AC0,AC1,AC2,AC3),(BC_MC),(OTHER): "
3458 			"(%d,%d,%d,%d),(%d),(%d)\n", event_data[0], event_data[1],
3459 			event_data[2],
3460 			event_data[3], event_data[4], event_data[5]));
3461 		break;
3462 
3463 	case WLC_E_BCMC_CREDIT_SUPPORT:
3464 		dhd_wlfc_BCMCCredit_support_event(dhd_pub);
3465 		break;
3466 #ifdef LIMIT_BORROW
3467 	case WLC_E_ALLOW_CREDIT_BORROW:
3468 		dhd_wlfc_disable_credit_borrow_event(dhd_pub, event_data);
3469 		break;
3470 #endif /* LIMIT_BORROW */
3471 #endif /* PROP_TXSTATUS */
3472 
3473 	case WLC_E_ULP:
3474 #ifdef DHD_ULP
3475 	{
3476 		wl_ulp_event_t *ulp_evt = (wl_ulp_event_t *)event_data;
3477 
3478 		/* Flush and disable console messages */
3479 		if (ulp_evt->ulp_dongle_action == WL_ULP_DISABLE_CONSOLE) {
3480 #ifdef DHD_ULP_NOT_USED
3481 			dhd_bus_ulp_disable_console(dhd_pub);
3482 #endif /* DHD_ULP_NOT_USED */
3483 		}
3484 		if (ulp_evt->ulp_dongle_action == WL_ULP_UCODE_DOWNLOAD) {
3485 			dhd_bus_ucode_download(dhd_pub->bus);
3486 		}
3487 	}
3488 #endif /* DHD_ULP */
3489 		break;
3490 	case WLC_E_TDLS_PEER_EVENT:
3491 #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
3492 		{
3493 			dhd_tdls_event_handler(dhd_pub, event);
3494 		}
3495 #endif // endif
3496 		break;
3497 
3498 	case WLC_E_IF:
3499 		{
3500 		struct wl_event_data_if *ifevent = (struct wl_event_data_if *)event_data;
3501 
3502 		/* Ignore the event if NOIF is set */
3503 		if (ifevent->reserved & WLC_E_IF_FLAGS_BSSCFG_NOIF) {
3504 			DHD_ERROR(("WLC_E_IF: NO_IF set, event Ignored\r\n"));
3505 			return (BCME_UNSUPPORTED);
3506 		}
3507 #ifdef PCIE_FULL_DONGLE
3508 		dhd_update_interface_flow_info(dhd_pub, ifevent->ifidx,
3509 			ifevent->opcode, ifevent->role);
3510 #endif // endif
3511 #ifdef PROP_TXSTATUS
3512 		{
3513 			uint8* ea = &event->addr.octet[0];
3514 			WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, ["MACDBG"]\n"
3515 						  ifevent->ifidx,
3516 						  ((ifevent->opcode == WLC_E_IF_ADD) ? "ADD":"DEL"),
3517 						  ((ifevent->role == 0) ? "STA":"AP "),
3518 						  MAC2STRDBG(ea)));
3519 			(void)ea;
3520 
3521 			if (ifevent->opcode == WLC_E_IF_CHANGE)
3522 				dhd_wlfc_interface_event(dhd_pub,
3523 					eWLFC_MAC_ENTRY_ACTION_UPDATE,
3524 					ifevent->ifidx, ifevent->role, ea);
3525 			else
3526 				dhd_wlfc_interface_event(dhd_pub,
3527 					((ifevent->opcode == WLC_E_IF_ADD) ?
3528 					eWLFC_MAC_ENTRY_ACTION_ADD : eWLFC_MAC_ENTRY_ACTION_DEL),
3529 					ifevent->ifidx, ifevent->role, ea);
3530 
3531 			/* dhd already has created an interface by default, for 0 */
3532 			if (ifevent->ifidx == 0)
3533 				break;
3534 		}
3535 #endif /* PROP_TXSTATUS */
3536 
3537 		if (ifevent->ifidx > 0 && ifevent->ifidx < DHD_MAX_IFS) {
3538 			if (ifevent->opcode == WLC_E_IF_ADD) {
3539 				if (dhd_event_ifadd(dhd_pub->info, ifevent, event->ifname,
3540 					event->addr.octet)) {
3541 
3542 					DHD_ERROR(("%s: dhd_event_ifadd failed ifidx: %d  %s\n",
3543 						__FUNCTION__, ifevent->ifidx, event->ifname));
3544 					return (BCME_ERROR);
3545 				}
3546 			} else if (ifevent->opcode == WLC_E_IF_DEL) {
3547 #ifdef PCIE_FULL_DONGLE
3548 				/* Delete flowrings unconditionally for i/f delete */
3549 				dhd_flow_rings_delete(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
3550 					event->ifname));
3551 #endif /* PCIE_FULL_DONGLE */
3552 				dhd_event_ifdel(dhd_pub->info, ifevent, event->ifname,
3553 					event->addr.octet);
3554 			} else if (ifevent->opcode == WLC_E_IF_CHANGE) {
3555 #ifdef WL_CFG80211
3556 				dhd_event_ifchange(dhd_pub->info, ifevent, event->ifname,
3557 					event->addr.octet);
3558 #endif /* WL_CFG80211 */
3559 			}
3560 		} else {
3561 #if !defined(PROP_TXSTATUS) && !defined(PCIE_FULL_DONGLE) && defined(WL_CFG80211)
3562 			DHD_INFO(("%s: Invalid ifidx %d for %s\n",
3563 			   __FUNCTION__, ifevent->ifidx, event->ifname));
3564 #endif /* !PROP_TXSTATUS && !PCIE_FULL_DONGLE && WL_CFG80211 */
3565 		}
3566 			/* send up the if event: btamp user needs it */
3567 			*ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
3568 			/* push up to external supp/auth */
3569 			dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
3570 		break;
3571 	}
3572 #ifdef WL_CFG80211
3573 	case WLC_E_OVERTEMP:
3574 	{
3575 		wl_cfg80211_overtemp_event(dhd_idx2net(dhd_pub, event->ifidx));
3576 		break;
3577 	}
3578 #endif /* WL_CFG80211 */
3579 
3580 	case WLC_E_NDIS_LINK:
3581 		break;
3582 	case WLC_E_PFN_NET_FOUND:
3583 	case WLC_E_PFN_SCAN_ALLGONE: /* share with WLC_E_PFN_BSSID_NET_LOST */
3584 	case WLC_E_PFN_NET_LOST:
3585 		break;
3586 #if defined(OEM_ANDROID) && defined(PNO_SUPPORT)
3587 	case WLC_E_PFN_BSSID_NET_FOUND:
3588 	case WLC_E_PFN_BEST_BATCHING:
3589 		dhd_pno_event_handler(dhd_pub, event, (void *)event_data);
3590 		break;
3591 #endif /* #if defined(OEM_ANDROID) && defined(PNO_SUPPORT) */
3592 #if defined(RTT_SUPPORT)
3593 	case WLC_E_PROXD:
3594 #ifndef WL_CFG80211
3595 		dhd_rtt_event_handler(dhd_pub, event, (void *)event_data);
3596 #endif /* WL_CFG80211 */
3597 		break;
3598 #endif /* RTT_SUPPORT */
3599 		/* These are what external supplicant/authenticator wants */
3600 	case WLC_E_ASSOC_IND:
3601 	case WLC_E_AUTH_IND:
3602 	case WLC_E_REASSOC_IND:
3603 		dhd_findadd_sta(dhd_pub,
3604 			dhd_ifname2idx(dhd_pub->info, event->ifname),
3605 			&event->addr.octet);
3606 		break;
3607 #if defined(DHD_FW_COREDUMP)
3608 	case WLC_E_PSM_WATCHDOG:
3609 		DHD_ERROR(("%s: WLC_E_PSM_WATCHDOG event received : \n", __FUNCTION__));
3610 		if (dhd_socram_dump(dhd_pub->bus) != BCME_OK) {
3611 			DHD_ERROR(("%s: socram dump ERROR : \n", __FUNCTION__));
3612 		}
3613 	break;
3614 #endif // endif
3615 	case WLC_E_NATOE_NFCT:
3616 #ifdef WL_NATOE
3617 		DHD_EVENT(("%s: WLC_E_NATOE_NFCT event received \n", __FUNCTION__));
3618 		dhd_natoe_ct_event(dhd_pub, event_data);
3619 #endif /* WL_NATOE */
3620 	break;
3621 #ifdef WL_NAN
3622 	case WLC_E_SLOTTED_BSS_PEER_OP:
3623 		DHD_EVENT(("%s: WLC_E_SLOTTED_BSS_PEER_OP event received for peer: "
3624 			"" MACDBG ", status = %d\n",
3625 			__FUNCTION__, MAC2STRDBG(event->addr.octet), status));
3626 		if (status == WLC_E_STATUS_SLOTTED_PEER_ADD) {
3627 			dhd_findadd_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
3628 				event->ifname), &event->addr.octet);
3629 		} else if (status == WLC_E_STATUS_SLOTTED_PEER_DEL) {
3630 			uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname);
3631 			BCM_REFERENCE(ifindex);
3632 			dhd_del_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
3633 				event->ifname), &event->addr.octet);
3634 #ifdef PCIE_FULL_DONGLE
3635 			dhd_flow_rings_delete_for_peer(dhd_pub, ifindex,
3636 				(char *)&event->addr.octet[0]);
3637 #endif // endif
3638 		} else {
3639 			DHD_ERROR(("%s: WLC_E_SLOTTED_BSS_PEER_OP: Status is not expected = %d\n",
3640 				__FUNCTION__, status));
3641 		}
3642 		break;
3643 #endif /* WL_NAN */
3644 #ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
3645 	case WLC_E_REASSOC:
3646 		ifp = dhd_get_ifp(dhd_pub, event->ifidx);
3647 
3648 		if (!ifp)
3649 			break;
3650 
3651 		/* Consider STA role only since roam is disabled on P2P GC.
3652 		 * Drop EAPOL M1 frame only if roam is done to same BSS.
3653 		 */
3654 		if ((status == WLC_E_STATUS_SUCCESS) &&
3655 			IS_STA_IFACE(ndev_to_wdev(ifp->net)) &&
3656 			wl_cfg80211_is_event_from_connected_bssid(ifp->net, event, event->ifidx)) {
3657 			ifp->recv_reassoc_evt = TRUE;
3658 		}
3659 		break;
3660 #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
3661 #ifdef BCMSDIO
3662 	case WLC_E_AP_STARTED:
3663 		if (FW_SUPPORTED(dhd_pub, idsup)) {
3664 			dhd_pub->info->iflist[*ifidx]->role = WLC_E_IF_ROLE_AP;
3665 		}
3666 		break;
3667 #endif /* BCMSDIO */
3668 	case WLC_E_LINK:
3669 #ifdef PCIE_FULL_DONGLE
3670 		if (dhd_update_interface_link_status(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
3671 			event->ifname), (uint8)flags) != BCME_OK) {
3672 			DHD_ERROR(("%s: dhd_update_interface_link_status Failed.\n",
3673 				__FUNCTION__));
3674 			break;
3675 		}
3676 		if (!flags) {
3677 			DHD_ERROR(("%s: Deleting all STA from assoc list and flowrings.\n",
3678 				__FUNCTION__));
3679 			/* Delete all sta and flowrings */
3680 			dhd_del_all_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info, event->ifname));
3681 			dhd_flow_rings_delete(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
3682 				event->ifname));
3683 		}
3684 #endif /* PCIE_FULL_DONGLE */
3685 		/* fall through */
3686 	case WLC_E_DEAUTH:
3687 	case WLC_E_DEAUTH_IND:
3688 	case WLC_E_DISASSOC:
3689 	case WLC_E_DISASSOC_IND:
3690 #ifdef PCIE_FULL_DONGLE
3691 		if (type != WLC_E_LINK) {
3692 			uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname);
3693 			uint8 role = dhd_flow_rings_ifindex2role(dhd_pub, ifindex);
3694 			uint8 del_sta = TRUE;
3695 #ifdef WL_CFG80211
3696 			if (role == WLC_E_IF_ROLE_STA &&
3697 				!wl_cfg80211_is_roam_offload(dhd_idx2net(dhd_pub, ifindex)) &&
3698 					!wl_cfg80211_is_event_from_connected_bssid(
3699 						dhd_idx2net(dhd_pub, ifindex), event, *ifidx)) {
3700 				del_sta = FALSE;
3701 			}
3702 #endif /* WL_CFG80211 */
3703 			DHD_EVENT(("%s: Link event %d, flags %x, status %x, role %d, del_sta %d\n",
3704 				__FUNCTION__, type, flags, status, role, del_sta));
3705 
3706 			if (del_sta) {
3707 				DHD_EVENT(("%s: Deleting STA " MACDBG "\n",
3708 					__FUNCTION__, MAC2STRDBG(event->addr.octet)));
3709 
3710 				dhd_del_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
3711 					event->ifname), &event->addr.octet);
3712 				/* Delete all flowrings for STA and P2P Client */
3713 				if (role == WLC_E_IF_ROLE_STA || role == WLC_E_IF_ROLE_P2P_CLIENT) {
3714 					dhd_flow_rings_delete(dhd_pub, ifindex);
3715 				} else {
3716 					dhd_flow_rings_delete_for_peer(dhd_pub, ifindex,
3717 						(char *)&event->addr.octet[0]);
3718 				}
3719 			}
3720 		}
3721 #endif /* PCIE_FULL_DONGLE */
3722 #ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
3723 		/* fall through */
3724 		ifp = dhd_get_ifp(dhd_pub, event->ifidx);
3725 		if (ifp) {
3726 			ifp->recv_reassoc_evt = FALSE;
3727 			ifp->post_roam_evt = FALSE;
3728 		}
3729 #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
3730 		/* fall through */
3731 	default:
3732 		*ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
3733 		/* push up to external supp/auth */
3734 		dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
3735 		DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n",
3736 			__FUNCTION__, type, flags, status));
3737 		BCM_REFERENCE(flags);
3738 		BCM_REFERENCE(status);
3739 		BCM_REFERENCE(reason);
3740 
3741 		break;
3742 	}
3743 #if defined(STBAP)
3744 	/* For routers, EAPD will be working on these events.
3745 	 * Overwrite interface name to that event is pushed
3746 	 * to host with its registered interface name
3747 	 */
3748 	memcpy(pvt_data->event.ifname, dhd_ifname(dhd_pub, *ifidx), IFNAMSIZ);
3749 #endif // endif
3750 
3751 #ifdef DHD_STATUS_LOGGING
3752 	if (dhd_pub->statlog) {
3753 		dhd_statlog_process_event(dhd_pub, type, *ifidx,
3754 			status, reason, flags);
3755 	}
3756 #endif /* DHD_STATUS_LOGGING */
3757 
3758 #ifdef SHOW_EVENTS
3759 	if (DHD_FWLOG_ON() || DHD_EVENT_ON()) {
3760 		wl_show_host_event(dhd_pub, event,
3761 			(void *)event_data, raw_event, dhd_pub->enable_log);
3762 	}
3763 #endif /* SHOW_EVENTS */
3764 
3765 	return (BCME_OK);
3766 } /* wl_process_host_event */
3767 
3768 int
wl_host_event(dhd_pub_t * dhd_pub,int * ifidx,void * pktdata,uint pktlen,wl_event_msg_t * event,void ** data_ptr,void * raw_event)3769 wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen,
3770 	wl_event_msg_t *event, void **data_ptr, void *raw_event)
3771 {
3772 	return wl_process_host_event(dhd_pub, ifidx, pktdata, pktlen, event, data_ptr,
3773 			raw_event);
3774 }
3775 
3776 void
dhd_print_buf(void * pbuf,int len,int bytes_per_line)3777 dhd_print_buf(void *pbuf, int len, int bytes_per_line)
3778 {
3779 #ifdef DHD_DEBUG
3780 	int i, j = 0;
3781 	unsigned char *buf = pbuf;
3782 
3783 	if (bytes_per_line == 0) {
3784 		bytes_per_line = len;
3785 	}
3786 
3787 	for (i = 0; i < len; i++) {
3788 		printf("%2.2x", *buf++);
3789 		j++;
3790 		if (j == bytes_per_line) {
3791 			printf("\n");
3792 			j = 0;
3793 		} else {
3794 			printf(":");
3795 		}
3796 	}
3797 	printf("\n");
3798 #endif /* DHD_DEBUG */
3799 }
3800 #ifndef strtoul
3801 #define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
3802 #endif // endif
3803 
3804 #if defined(PKT_FILTER_SUPPORT) || defined(DHD_PKT_LOGGING)
3805 /* Convert user's input in hex pattern to byte-size mask */
3806 int
wl_pattern_atoh(char * src,char * dst)3807 wl_pattern_atoh(char *src, char *dst)
3808 {
3809 	int i;
3810 	if (strncmp(src, "0x", 2) != 0 &&
3811 	    strncmp(src, "0X", 2) != 0) {
3812 		DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
3813 		return -1;
3814 	}
3815 	src = src + 2; /* Skip past 0x */
3816 	if (strlen(src) % 2 != 0) {
3817 		DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
3818 		return -1;
3819 	}
3820 	for (i = 0; *src != '\0'; i++) {
3821 		char num[3];
3822 		bcm_strncpy_s(num, sizeof(num), src, 2);
3823 		num[2] = '\0';
3824 		dst[i] = (uint8)strtoul(num, NULL, 16);
3825 		src += 2;
3826 	}
3827 	return i;
3828 }
3829 
3830 int
pattern_atoh_len(char * src,char * dst,int len)3831 pattern_atoh_len(char *src, char *dst, int len)
3832 {
3833 	int i;
3834 	if (strncmp(src, "0x", HD_PREFIX_SIZE) != 0 &&
3835 			strncmp(src, "0X", HD_PREFIX_SIZE) != 0) {
3836 		DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
3837 		return -1;
3838 	}
3839 	src = src + HD_PREFIX_SIZE; /* Skip past 0x */
3840 	if (strlen(src) % HD_BYTE_SIZE != 0) {
3841 		DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
3842 		return -1;
3843 	}
3844 	for (i = 0; *src != '\0'; i++) {
3845 		char num[HD_BYTE_SIZE + 1];
3846 
3847 		if (i > len - 1) {
3848 			DHD_ERROR(("pattern not in range, idx: %d len: %d\n", i, len));
3849 			return -1;
3850 		}
3851 		bcm_strncpy_s(num, sizeof(num), src, HD_BYTE_SIZE);
3852 		num[HD_BYTE_SIZE] = '\0';
3853 		dst[i] = (uint8)strtoul(num, NULL, 16);
3854 		src += HD_BYTE_SIZE;
3855 	}
3856 	return i;
3857 }
3858 #endif /* PKT_FILTER_SUPPORT || DHD_PKT_LOGGING */
3859 
3860 #ifdef PKT_FILTER_SUPPORT
3861 void
dhd_pktfilter_offload_enable(dhd_pub_t * dhd,char * arg,int enable,int master_mode)3862 dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode)
3863 {
3864 	char				*argv[8];
3865 	int					i = 0;
3866 	const char			*str;
3867 	int					buf_len;
3868 	int					str_len;
3869 	char				*arg_save = 0, *arg_org = 0;
3870 	int					rc;
3871 	char				buf[32] = {0};
3872 	wl_pkt_filter_enable_t	enable_parm;
3873 	wl_pkt_filter_enable_t	* pkt_filterp;
3874 
3875 	if (!arg)
3876 		return;
3877 
3878 	if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
3879 		DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
3880 		goto fail;
3881 	}
3882 	arg_org = arg_save;
3883 	memcpy(arg_save, arg, strlen(arg) + 1);
3884 
3885 	argv[i] = bcmstrtok(&arg_save, " ", 0);
3886 
3887 	i = 0;
3888 	if (argv[i] == NULL) {
3889 		DHD_ERROR(("No args provided\n"));
3890 		goto fail;
3891 	}
3892 
3893 	str = "pkt_filter_enable";
3894 	str_len = strlen(str);
3895 	bcm_strncpy_s(buf, sizeof(buf) - 1, str, sizeof(buf) - 1);
3896 	buf[ sizeof(buf) - 1 ] = '\0';
3897 	buf_len = str_len + 1;
3898 
3899 	pkt_filterp = (wl_pkt_filter_enable_t *)(buf + str_len + 1);
3900 
3901 	/* Parse packet filter id. */
3902 	enable_parm.id = htod32(strtoul(argv[i], NULL, 0));
3903 
3904 	/* Parse enable/disable value. */
3905 	enable_parm.enable = htod32(enable);
3906 
3907 	buf_len += sizeof(enable_parm);
3908 	memcpy((char *)pkt_filterp,
3909 	       &enable_parm,
3910 	       sizeof(enable_parm));
3911 
3912 	/* Enable/disable the specified filter. */
3913 	rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
3914 	rc = rc >= 0 ? 0 : rc;
3915 	if (rc) {
3916 		DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
3917 		__FUNCTION__, arg, rc));
3918 		dhd_set_packet_filter(dhd);
3919 		rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
3920 		rc = rc >= 0 ? 0 : rc;
3921 		if (rc) {
3922 			DHD_TRACE_HW4(("%s: 2nd retry failed to add pktfilter %s, retcode = %d\n",
3923 			__FUNCTION__, arg, rc));
3924 		} else {
3925 			DHD_TRACE_HW4(("%s: 2nd retry successfully added pktfilter %s\n",
3926 			__FUNCTION__, arg));
3927 		}
3928 	}
3929 	else
3930 		DHD_TRACE(("%s: successfully added pktfilter %s\n",
3931 		__FUNCTION__, arg));
3932 
3933 	/* Contorl the master mode */
3934 	rc = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_mode",
3935 		master_mode, WLC_SET_VAR, TRUE, 0);
3936 	rc = rc >= 0 ? 0 : rc;
3937 	if (rc)
3938 		DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
3939 		__FUNCTION__, arg, rc));
3940 
3941 fail:
3942 	if (arg_org)
3943 		MFREE(dhd->osh, arg_org, strlen(arg) + 1);
3944 }
3945 
3946 /* Packet filter section: extended filters have named offsets, add table here */
3947 typedef struct {
3948 	char *name;
3949 	uint16 base;
3950 } wl_pfbase_t;
3951 
3952 static wl_pfbase_t basenames[] = { WL_PKT_FILTER_BASE_NAMES };
3953 
3954 static int
wl_pkt_filter_base_parse(char * name)3955 wl_pkt_filter_base_parse(char *name)
3956 {
3957 	uint i;
3958 	char *bname, *uname;
3959 
3960 	for (i = 0; i < ARRAYSIZE(basenames); i++) {
3961 		bname = basenames[i].name;
3962 		for (uname = name; *uname; bname++, uname++) {
3963 			if (*bname != bcm_toupper(*uname)) {
3964 				break;
3965 			}
3966 		}
3967 		if (!*uname && !*bname) {
3968 			break;
3969 		}
3970 	}
3971 
3972 	if (i < ARRAYSIZE(basenames)) {
3973 		return basenames[i].base;
3974 	} else {
3975 		return -1;
3976 	}
3977 }
3978 
3979 void
dhd_pktfilter_offload_set(dhd_pub_t * dhd,char * arg)3980 dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg)
3981 {
3982 	const char			*str;
3983 	wl_pkt_filter_t			pkt_filter;
3984 	wl_pkt_filter_t			*pkt_filterp;
3985 	int				buf_len;
3986 	int				str_len;
3987 	int				rc = -1;
3988 	uint32				mask_size;
3989 	uint32				pattern_size;
3990 	char				*argv[MAXPKT_ARG] = {0}, * buf = 0;
3991 	int				i = 0;
3992 	char				*arg_save = 0, *arg_org = 0;
3993 
3994 	if (!arg)
3995 		return;
3996 
3997 	if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
3998 		DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
3999 		goto fail;
4000 	}
4001 
4002 	arg_org = arg_save;
4003 
4004 	if (!(buf = MALLOC(dhd->osh, MAX_PKTFLT_BUF_SIZE))) {
4005 		DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
4006 		goto fail;
4007 	}
4008 
4009 	memset(buf, 0, MAX_PKTFLT_BUF_SIZE);
4010 	memcpy(arg_save, arg, strlen(arg) + 1);
4011 
4012 	if (strlen(arg) > MAX_PKTFLT_BUF_SIZE) {
4013 		DHD_ERROR(("Not enough buffer %d < %d\n", (int)strlen(arg), (int)sizeof(buf)));
4014 		goto fail;
4015 	}
4016 
4017 	argv[i] = bcmstrtok(&arg_save, " ", 0);
4018 	while (argv[i++]) {
4019 		if (i >= MAXPKT_ARG) {
4020 			DHD_ERROR(("Invalid args provided\n"));
4021 			goto fail;
4022 		}
4023 		argv[i] = bcmstrtok(&arg_save, " ", 0);
4024 	}
4025 
4026 	i = 0;
4027 	if (argv[i] == NULL) {
4028 		DHD_ERROR(("No args provided\n"));
4029 		goto fail;
4030 	}
4031 
4032 	str = "pkt_filter_add";
4033 	str_len = strlen(str);
4034 	bcm_strncpy_s(buf, MAX_PKTFLT_BUF_SIZE, str, str_len);
4035 	buf[ str_len ] = '\0';
4036 	buf_len = str_len + 1;
4037 
4038 	pkt_filterp = (wl_pkt_filter_t *) (buf + str_len + 1);
4039 
4040 	/* Parse packet filter id. */
4041 	pkt_filter.id = htod32(strtoul(argv[i], NULL, 0));
4042 
4043 	if (argv[++i] == NULL) {
4044 		DHD_ERROR(("Polarity not provided\n"));
4045 		goto fail;
4046 	}
4047 
4048 	/* Parse filter polarity. */
4049 	pkt_filter.negate_match = htod32(strtoul(argv[i], NULL, 0));
4050 
4051 	if (argv[++i] == NULL) {
4052 		DHD_ERROR(("Filter type not provided\n"));
4053 		goto fail;
4054 	}
4055 
4056 	/* Parse filter type. */
4057 	pkt_filter.type = htod32(strtoul(argv[i], NULL, 0));
4058 
4059 	if ((pkt_filter.type == 0) || (pkt_filter.type == 1)) {
4060 		if (argv[++i] == NULL) {
4061 			DHD_ERROR(("Offset not provided\n"));
4062 			goto fail;
4063 		}
4064 
4065 		/* Parse pattern filter offset. */
4066 		pkt_filter.u.pattern.offset = htod32(strtoul(argv[i], NULL, 0));
4067 
4068 		if (argv[++i] == NULL) {
4069 			DHD_ERROR(("Bitmask not provided\n"));
4070 			goto fail;
4071 		}
4072 
4073 		/* Parse pattern filter mask. */
4074 		rc  = wl_pattern_atoh(argv[i],
4075 			(char *) pkt_filterp->u.pattern.mask_and_pattern);
4076 
4077 		if (rc == -1) {
4078 			DHD_ERROR(("Rejecting: %s\n", argv[i]));
4079 			goto fail;
4080 		}
4081 		mask_size = htod32(rc);
4082 		if (argv[++i] == NULL) {
4083 			DHD_ERROR(("Pattern not provided\n"));
4084 			goto fail;
4085 		}
4086 
4087 		/* Parse pattern filter pattern. */
4088 		rc = wl_pattern_atoh(argv[i],
4089 			(char *) &pkt_filterp->u.pattern.mask_and_pattern[mask_size]);
4090 
4091 		if (rc == -1) {
4092 			DHD_ERROR(("Rejecting: %s\n", argv[i]));
4093 			goto fail;
4094 		}
4095 		pattern_size = htod32(rc);
4096 		if (mask_size != pattern_size) {
4097 			DHD_ERROR(("Mask and pattern not the same size\n"));
4098 			goto fail;
4099 		}
4100 
4101 		pkt_filter.u.pattern.size_bytes = mask_size;
4102 		buf_len += WL_PKT_FILTER_FIXED_LEN;
4103 		buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size);
4104 
4105 		/* Keep-alive attributes are set in local	variable (keep_alive_pkt), and
4106 		 * then memcpy'ed into buffer (keep_alive_pktp) since there is no
4107 		 * guarantee that the buffer is properly aligned.
4108 		 */
4109 		memcpy((char *)pkt_filterp,
4110 			&pkt_filter,
4111 			WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN);
4112 	} else if ((pkt_filter.type == 2) || (pkt_filter.type == 6)) {
4113 		int list_cnt = 0;
4114 		char *endptr = NULL;
4115 		wl_pkt_filter_pattern_listel_t *pf_el =
4116 			(wl_pkt_filter_pattern_listel_t *)&pkt_filterp->u.patlist.patterns[0];
4117 
4118 		while (argv[++i] != NULL) {
4119 			/* Check valid buffer size. */
4120 			if ((buf_len + MAX_PKTFLT_FIXED_BUF_SIZE) > MAX_PKTFLT_BUF_SIZE) {
4121 				DHD_ERROR(("buffer over length MAX_PKTFLT_FIXED_BUF_SIZE\n"));
4122 				goto fail;
4123 			}
4124 
4125 			/* Parse pattern filter base and offset. */
4126 			if (bcm_isdigit(*argv[i])) {
4127 				/* Numeric base */
4128 				rc = strtoul(argv[i], &endptr, 0);
4129 			} else {
4130 				endptr = strchr(argv[i], ':');
4131 				if (endptr) {
4132 					*endptr = '\0';
4133 					rc = wl_pkt_filter_base_parse(argv[i]);
4134 					if (rc == -1) {
4135 						printf("Invalid base %s\n", argv[i]);
4136 						goto fail;
4137 					}
4138 					*endptr = ':';
4139 				}
4140 			}
4141 
4142 			if (endptr == NULL) {
4143 				printf("Invalid [base:]offset format: %s\n", argv[i]);
4144 				goto fail;
4145 			}
4146 
4147 			if (*endptr == ':') {
4148 				pf_el->base_offs = htod16(rc);
4149 				rc = strtoul(endptr + 1, &endptr, 0);
4150 			} else {
4151 				/* Must have had a numeric offset only */
4152 				pf_el->base_offs = htod16(0);
4153 			}
4154 
4155 			if (*endptr) {
4156 				printf("Invalid [base:]offset format: %s\n", argv[i]);
4157 				goto fail;
4158 			}
4159 			if (rc > 0x0000FFFF) {
4160 				printf("Offset too large\n");
4161 				goto fail;
4162 			}
4163 			pf_el->rel_offs = htod16(rc);
4164 
4165 			/* Clear match_flag (may be set in parsing which follows) */
4166 			pf_el->match_flags = htod16(0);
4167 
4168 			/* Parse pattern filter mask and pattern directly into ioctl buffer */
4169 			if (argv[++i] == NULL) {
4170 				printf("Bitmask not provided\n");
4171 				goto fail;
4172 			}
4173 			rc = wl_pattern_atoh(argv[i], (char*)pf_el->mask_and_data);
4174 			if ((rc == -1) || (rc > MAX_PKTFLT_FIXED_PATTERN_SIZE)) {
4175 				printf("Rejecting: %s\n", argv[i]);
4176 				goto fail;
4177 			}
4178 			mask_size = htod16(rc);
4179 
4180 			if (argv[++i] == NULL) {
4181 				printf("Pattern not provided\n");
4182 				goto fail;
4183 			}
4184 
4185 			if (*argv[i] == '!') {
4186 				pf_el->match_flags =
4187 					htod16(WL_PKT_FILTER_MFLAG_NEG);
4188 				(argv[i])++;
4189 			}
4190 			if (*argv[i] == '\0') {
4191 				printf("Pattern not provided\n");
4192 				goto fail;
4193 			}
4194 			rc = wl_pattern_atoh(argv[i], (char*)&pf_el->mask_and_data[rc]);
4195 			if ((rc == -1) || (rc > MAX_PKTFLT_FIXED_PATTERN_SIZE)) {
4196 				printf("Rejecting: %s\n", argv[i]);
4197 				goto fail;
4198 			}
4199 			pattern_size = htod16(rc);
4200 
4201 			if (mask_size != pattern_size) {
4202 				printf("Mask and pattern not the same size\n");
4203 				goto fail;
4204 			}
4205 
4206 			pf_el->size_bytes = mask_size;
4207 
4208 			/* Account for the size of this pattern element */
4209 			buf_len += WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN + 2 * rc;
4210 
4211 			/* Move to next element location in ioctl buffer */
4212 			pf_el = (wl_pkt_filter_pattern_listel_t*)
4213 				((uint8*)pf_el + WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN + 2 * rc);
4214 
4215 			/* Count list element */
4216 			list_cnt++;
4217 		}
4218 
4219 		/* Account for initial fixed size, and copy initial fixed fields */
4220 		buf_len += WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN;
4221 
4222 		if (buf_len > MAX_PKTFLT_BUF_SIZE) {
4223 			DHD_ERROR(("buffer over length MAX_PKTFLT_BUF_SIZE\n"));
4224 			goto fail;
4225 		}
4226 		/* Update list count and total size */
4227 		pkt_filter.u.patlist.list_cnt = list_cnt;
4228 		pkt_filter.u.patlist.PAD1[0] = 0;
4229 		pkt_filter.u.patlist.totsize = buf + buf_len - (char*)pkt_filterp;
4230 		pkt_filter.u.patlist.totsize -= WL_PKT_FILTER_FIXED_LEN;
4231 
4232 		memcpy((char *)pkt_filterp, &pkt_filter,
4233 			WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN);
4234 	} else {
4235 		DHD_ERROR(("Invalid filter type %d\n", pkt_filter.type));
4236 		goto fail;
4237 	}
4238 
4239 	rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
4240 	rc = rc >= 0 ? 0 : rc;
4241 
4242 	if (rc)
4243 		DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
4244 		__FUNCTION__, arg, rc));
4245 	else
4246 		DHD_TRACE(("%s: successfully added pktfilter %s\n",
4247 		__FUNCTION__, arg));
4248 
4249 fail:
4250 	if (arg_org)
4251 		MFREE(dhd->osh, arg_org, strlen(arg) + 1);
4252 
4253 	if (buf)
4254 		MFREE(dhd->osh, buf, MAX_PKTFLT_BUF_SIZE);
4255 }
4256 
4257 void
dhd_pktfilter_offload_delete(dhd_pub_t * dhd,int id)4258 dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id)
4259 {
4260 	int ret;
4261 
4262 	ret = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_delete",
4263 		id, WLC_SET_VAR, TRUE, 0);
4264 	if (ret < 0) {
4265 		DHD_ERROR(("%s: Failed to delete filter ID:%d, ret=%d\n",
4266 			__FUNCTION__, id, ret));
4267 	}
4268 }
4269 #endif /* PKT_FILTER_SUPPORT */
4270 
4271 /* ========================== */
4272 /* ==== ARP OFFLOAD SUPPORT = */
4273 /* ========================== */
4274 #ifdef ARP_OFFLOAD_SUPPORT
4275 void
dhd_arp_offload_set(dhd_pub_t * dhd,int arp_mode)4276 dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode)
4277 {
4278 	int retcode;
4279 
4280 	retcode = dhd_wl_ioctl_set_intiovar(dhd, "arp_ol",
4281 		arp_mode, WLC_SET_VAR, TRUE, 0);
4282 
4283 	retcode = retcode >= 0 ? 0 : retcode;
4284 	if (retcode)
4285 		DHD_TRACE(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n",
4286 			__FUNCTION__, arp_mode, retcode));
4287 	else
4288 		DHD_TRACE(("%s: successfully set ARP offload mode to 0x%x\n",
4289 			__FUNCTION__, arp_mode));
4290 }
4291 
4292 void
dhd_arp_offload_enable(dhd_pub_t * dhd,int arp_enable)4293 dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable)
4294 {
4295 	int retcode;
4296 #ifdef WL_CFG80211
4297 	/* Do not enable arp offload in case of non-STA interfaces active */
4298 	if (arp_enable &&
4299 		(wl_cfg80211_check_vif_in_use(dhd_linux_get_primary_netdev(dhd)))) {
4300 		DHD_TRACE(("%s: Virtual interfaces active, ignore arp offload request \n",
4301 			__FUNCTION__));
4302 		return;
4303 	}
4304 #endif /* WL_CFG80211 */
4305 	retcode = dhd_wl_ioctl_set_intiovar(dhd, "arpoe",
4306 		arp_enable, WLC_SET_VAR, TRUE, 0);
4307 
4308 	retcode = retcode >= 0 ? 0 : retcode;
4309 	if (retcode)
4310 		DHD_TRACE(("%s: failed to enabe ARP offload to %d, retcode = %d\n",
4311 			__FUNCTION__, arp_enable, retcode));
4312 	else
4313 		DHD_TRACE(("%s: successfully enabed ARP offload to %d\n",
4314 			__FUNCTION__, arp_enable));
4315 	if (arp_enable) {
4316 		uint32 version;
4317 		retcode = dhd_wl_ioctl_get_intiovar(dhd, "arp_version",
4318 			&version, WLC_GET_VAR, FALSE, 0);
4319 		if (retcode) {
4320 			DHD_INFO(("%s: fail to get version (maybe version 1:retcode = %d\n",
4321 				__FUNCTION__, retcode));
4322 			dhd->arp_version = 1;
4323 		}
4324 		else {
4325 			DHD_INFO(("%s: ARP Version= %x\n", __FUNCTION__, version));
4326 			dhd->arp_version = version;
4327 		}
4328 	}
4329 }
4330 
4331 void
dhd_aoe_arp_clr(dhd_pub_t * dhd,int idx)4332 dhd_aoe_arp_clr(dhd_pub_t *dhd, int idx)
4333 {
4334 	int ret = 0;
4335 
4336 	if (dhd == NULL) return;
4337 	if (dhd->arp_version == 1)
4338 		idx = 0;
4339 
4340 	ret = dhd_iovar(dhd, idx, "arp_table_clear", NULL, 0, NULL, 0, TRUE);
4341 	if (ret < 0)
4342 		DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
4343 }
4344 
4345 void
dhd_aoe_hostip_clr(dhd_pub_t * dhd,int idx)4346 dhd_aoe_hostip_clr(dhd_pub_t *dhd, int idx)
4347 {
4348 	int ret = 0;
4349 
4350 	if (dhd == NULL) return;
4351 	if (dhd->arp_version == 1)
4352 		idx = 0;
4353 
4354 	ret = dhd_iovar(dhd, idx, "arp_hostip_clear", NULL, 0, NULL, 0, TRUE);
4355 	if (ret < 0)
4356 		DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
4357 }
4358 
4359 void
dhd_arp_offload_add_ip(dhd_pub_t * dhd,uint32 ipaddr,int idx)4360 dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx)
4361 {
4362 	int ret;
4363 
4364 	if (dhd == NULL) return;
4365 	if (dhd->arp_version == 1)
4366 		idx = 0;
4367 
4368 	ret = dhd_iovar(dhd, idx, "arp_hostip", (char *)&ipaddr, sizeof(ipaddr),
4369 			NULL, 0, TRUE);
4370 	if (ret)
4371 		DHD_TRACE(("%s: ARP ip addr add failed, ret = %d\n", __FUNCTION__, ret));
4372 	else
4373 		DHD_TRACE(("%s: sARP H ipaddr entry added \n",
4374 		__FUNCTION__));
4375 }
4376 
4377 int
dhd_arp_get_arp_hostip_table(dhd_pub_t * dhd,void * buf,int buflen,int idx)4378 dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen, int idx)
4379 {
4380 	int ret, i;
4381 	uint32 *ptr32 = buf;
4382 	bool clr_bottom = FALSE;
4383 
4384 	if (!buf)
4385 		return -1;
4386 	if (dhd == NULL) return -1;
4387 	if (dhd->arp_version == 1)
4388 		idx = 0;
4389 
4390 	ret = dhd_iovar(dhd, idx, "arp_hostip", NULL, 0, (char *)buf, buflen,
4391 			FALSE);
4392 	if (ret) {
4393 		DHD_TRACE(("%s: ioctl WLC_GET_VAR error %d\n",
4394 		__FUNCTION__, ret));
4395 
4396 		return -1;
4397 	}
4398 
4399 	/* clean up the buf, ascii reminder */
4400 	for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
4401 		if (!clr_bottom) {
4402 			if (*ptr32 == 0)
4403 				clr_bottom = TRUE;
4404 		} else {
4405 			*ptr32 = 0;
4406 		}
4407 		ptr32++;
4408 	}
4409 
4410 	return 0;
4411 }
4412 #endif /* ARP_OFFLOAD_SUPPORT  */
4413 
4414 /*
4415  * Neighbor Discovery Offload: enable NDO feature
4416  * Called  by ipv6 event handler when interface comes up/goes down
4417  */
4418 int
dhd_ndo_enable(dhd_pub_t * dhd,int ndo_enable)4419 dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable)
4420 {
4421 	int retcode;
4422 
4423 	if (dhd == NULL)
4424 		return -1;
4425 
4426 #if defined(WL_CFG80211) && defined(WL_NAN)
4427 	if (wl_cfgnan_is_dp_active(dhd_linux_get_primary_netdev(dhd))) {
4428 		/* If nan dp is active, skip NDO */
4429 		DHD_INFO(("Active NAN DP, skip NDO\n"));
4430 		return 0;
4431 	}
4432 #endif /* WL_CFG80211 && WL_NAN */
4433 #ifdef WL_CFG80211
4434 	if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
4435 		/* NDO disable on STA+SOFTAP mode */
4436 		ndo_enable = FALSE;
4437 	}
4438 #endif /* WL_CFG80211 */
4439 	retcode = dhd_wl_ioctl_set_intiovar(dhd, "ndoe",
4440 		ndo_enable, WLC_SET_VAR, TRUE, 0);
4441 	if (retcode)
4442 		DHD_ERROR(("%s: failed to enabe ndo to %d, retcode = %d\n",
4443 			__FUNCTION__, ndo_enable, retcode));
4444 	else
4445 		DHD_TRACE(("%s: successfully enabed ndo offload to %d\n",
4446 			__FUNCTION__, ndo_enable));
4447 
4448 	return retcode;
4449 }
4450 
4451 /*
4452  * Neighbor Discover Offload: enable NDO feature
4453  * Called  by ipv6 event handler when interface comes up
4454  */
4455 int
dhd_ndo_add_ip(dhd_pub_t * dhd,char * ipv6addr,int idx)4456 dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipv6addr, int idx)
4457 {
4458 	int iov_len = 0;
4459 	char iovbuf[DHD_IOVAR_BUF_SIZE];
4460 	int retcode;
4461 
4462 	if (dhd == NULL)
4463 		return -1;
4464 
4465 	iov_len = bcm_mkiovar("nd_hostip", (char *)ipv6addr,
4466 		IPV6_ADDR_LEN, iovbuf, sizeof(iovbuf));
4467 	if (!iov_len) {
4468 		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
4469 			__FUNCTION__, sizeof(iovbuf)));
4470 		return -1;
4471 	}
4472 	retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
4473 
4474 	if (retcode)
4475 		DHD_ERROR(("%s: ndo ip addr add failed, retcode = %d\n",
4476 		__FUNCTION__, retcode));
4477 	else
4478 		DHD_TRACE(("%s: ndo ipaddr entry added \n",
4479 		__FUNCTION__));
4480 
4481 	return retcode;
4482 }
4483 
4484 #ifdef REVERSE_AIFSN
4485 int
check_reverse_aifsn_condition(dhd_pub_t * dhd,struct net_device * ndev)4486 check_reverse_aifsn_condition(dhd_pub_t *dhd, struct net_device *ndev)
4487 {
4488 	int iov_len = 0;
4489 	char iovbuf[DHD_IOVAR_BUF_SIZE];
4490 	edcf_acparam_t *ac_params = NULL;
4491 	int retcode;
4492 	u8 aci, aifsn;
4493 	int ifidx;
4494 
4495 	if (dhd == NULL)
4496 		return -1;
4497 
4498 	ifidx = dhd_net2idx(dhd->info, ndev);
4499 	if (ifidx == DHD_BAD_IF)
4500 		return -1;
4501 
4502 	dhd->aifsn_reverse = FALSE;
4503 
4504 	strcpy(iovbuf, "wme_ac_sta");
4505 	iov_len = sizeof(iovbuf);
4506 
4507 	retcode = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, iov_len, FALSE, ifidx);
4508 	if (retcode) {
4509 		DHD_ERROR(("%s: could not get wme_ac_sta  params(%d)\n\n",
4510 			__FUNCTION__, retcode));
4511 		return -1;
4512 	}
4513 
4514 	ac_params = (edcf_acparam_t *)iovbuf;
4515 	for (aci = 0; aci < AC_COUNT; aci++) {
4516 		aifsn = ac_params[aci].ACI & EDCF_AIFSN_MASK;
4517 		if (aci == AC_VI && aifsn == 10) {
4518 			DHD_ERROR(("[%s] Reverse AIFSN for AC_VI:10 \n", __FUNCTION__));
4519 			dhd->aifsn_reverse = TRUE;
4520 			break;
4521 		}
4522 	}
4523 	return 0;
4524 }
4525 #endif /* REVERSE_AIFSN */
4526 
4527 /*
4528  * Neighbor Discover Offload: enable NDO feature
4529  * Called  by ipv6 event handler when interface goes down
4530  */
4531 int
dhd_ndo_remove_ip(dhd_pub_t * dhd,int idx)4532 dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx)
4533 {
4534 	int iov_len = 0;
4535 	char iovbuf[DHD_IOVAR_BUF_SIZE];
4536 	int retcode;
4537 
4538 	if (dhd == NULL)
4539 		return -1;
4540 
4541 	iov_len = bcm_mkiovar("nd_hostip_clear", NULL,
4542 		0, iovbuf, sizeof(iovbuf));
4543 	if (!iov_len) {
4544 		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
4545 			__FUNCTION__, sizeof(iovbuf)));
4546 		return -1;
4547 	}
4548 	retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
4549 
4550 	if (retcode)
4551 		DHD_ERROR(("%s: ndo ip addr remove failed, retcode = %d\n",
4552 		__FUNCTION__, retcode));
4553 	else
4554 		DHD_TRACE(("%s: ndo ipaddr entry removed \n",
4555 		__FUNCTION__));
4556 
4557 	return retcode;
4558 }
4559 /* Enhanced ND offload */
4560 uint16
dhd_ndo_get_version(dhd_pub_t * dhdp)4561 dhd_ndo_get_version(dhd_pub_t *dhdp)
4562 {
4563 	char iovbuf[DHD_IOVAR_BUF_SIZE];
4564 	wl_nd_hostip_t ndo_get_ver;
4565 	int iov_len;
4566 	int retcode;
4567 	uint16 ver = 0;
4568 
4569 	if (dhdp == NULL) {
4570 		return BCME_ERROR;
4571 	}
4572 
4573 	memset(&iovbuf, 0, sizeof(iovbuf));
4574 	ndo_get_ver.version = htod16(WL_ND_HOSTIP_IOV_VER);
4575 	ndo_get_ver.op_type = htod16(WL_ND_HOSTIP_OP_VER);
4576 	ndo_get_ver.length = htod32(WL_ND_HOSTIP_FIXED_LEN + sizeof(uint16));
4577 	ndo_get_ver.u.version = 0;
4578 	iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_get_ver,
4579 		WL_ND_HOSTIP_FIXED_LEN + sizeof(uint16), iovbuf, sizeof(iovbuf));
4580 
4581 	if (!iov_len) {
4582 		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
4583 			__FUNCTION__, sizeof(iovbuf)));
4584 		return BCME_ERROR;
4585 	}
4586 
4587 	retcode = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf, iov_len, FALSE, 0);
4588 
4589 	if (retcode) {
4590 		DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
4591 		/* ver iovar not supported. NDO version is 0 */
4592 		ver = 0;
4593 	} else {
4594 		wl_nd_hostip_t *ndo_ver_ret = (wl_nd_hostip_t *)iovbuf;
4595 
4596 		if ((dtoh16(ndo_ver_ret->version) == WL_ND_HOSTIP_IOV_VER) &&
4597 				(dtoh16(ndo_ver_ret->op_type) == WL_ND_HOSTIP_OP_VER) &&
4598 				(dtoh32(ndo_ver_ret->length) == WL_ND_HOSTIP_FIXED_LEN
4599 					+ sizeof(uint16))) {
4600 			/* nd_hostip iovar version */
4601 			ver = dtoh16(ndo_ver_ret->u.version);
4602 		}
4603 
4604 		DHD_TRACE(("%s: successfully get version: %d\n", __FUNCTION__, ver));
4605 	}
4606 
4607 	return ver;
4608 }
4609 
4610 int
dhd_ndo_add_ip_with_type(dhd_pub_t * dhdp,char * ipv6addr,uint8 type,int idx)4611 dhd_ndo_add_ip_with_type(dhd_pub_t *dhdp, char *ipv6addr, uint8 type, int idx)
4612 {
4613 	char iovbuf[DHD_IOVAR_BUF_SIZE];
4614 	wl_nd_hostip_t ndo_add_addr;
4615 	int iov_len;
4616 	int retcode;
4617 
4618 	if (dhdp == NULL || ipv6addr == 0) {
4619 		return BCME_ERROR;
4620 	}
4621 
4622 	/* wl_nd_hostip_t fixed param */
4623 	ndo_add_addr.version = htod16(WL_ND_HOSTIP_IOV_VER);
4624 	ndo_add_addr.op_type = htod16(WL_ND_HOSTIP_OP_ADD);
4625 	ndo_add_addr.length = htod32(WL_ND_HOSTIP_WITH_ADDR_LEN);
4626 	/* wl_nd_host_ip_addr_t param for add */
4627 	memcpy(&ndo_add_addr.u.host_ip.ip_addr, ipv6addr, IPV6_ADDR_LEN);
4628 	ndo_add_addr.u.host_ip.type = type;
4629 
4630 	iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_add_addr,
4631 		WL_ND_HOSTIP_WITH_ADDR_LEN, iovbuf, sizeof(iovbuf));
4632 	if (!iov_len) {
4633 		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
4634 			__FUNCTION__, sizeof(iovbuf)));
4635 		return BCME_ERROR;
4636 	}
4637 
4638 	retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
4639 	if (retcode) {
4640 		DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
4641 #ifdef NDO_CONFIG_SUPPORT
4642 		if (retcode == BCME_NORESOURCE) {
4643 			/* number of host ip addr exceeds FW capacity, Deactivate ND offload */
4644 			DHD_INFO(("%s: Host IP count exceed device capacity,"
4645 				"ND offload deactivated\n", __FUNCTION__));
4646 			dhdp->ndo_host_ip_overflow = TRUE;
4647 			dhd_ndo_enable(dhdp, FALSE);
4648 		}
4649 #endif /* NDO_CONFIG_SUPPORT */
4650 	} else {
4651 		DHD_TRACE(("%s: successfully added: %d\n", __FUNCTION__, retcode));
4652 	}
4653 
4654 	return retcode;
4655 }
4656 
4657 int
dhd_ndo_remove_ip_by_addr(dhd_pub_t * dhdp,char * ipv6addr,int idx)4658 dhd_ndo_remove_ip_by_addr(dhd_pub_t *dhdp, char *ipv6addr, int idx)
4659 {
4660 	char iovbuf[DHD_IOVAR_BUF_SIZE];
4661 	wl_nd_hostip_t ndo_del_addr;
4662 	int iov_len;
4663 	int retcode;
4664 
4665 	if (dhdp == NULL || ipv6addr == 0) {
4666 		return BCME_ERROR;
4667 	}
4668 
4669 	/* wl_nd_hostip_t fixed param */
4670 	ndo_del_addr.version = htod16(WL_ND_HOSTIP_IOV_VER);
4671 	ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL);
4672 	ndo_del_addr.length = htod32(WL_ND_HOSTIP_WITH_ADDR_LEN);
4673 	/* wl_nd_host_ip_addr_t param for del */
4674 	memcpy(&ndo_del_addr.u.host_ip.ip_addr, ipv6addr, IPV6_ADDR_LEN);
4675 	ndo_del_addr.u.host_ip.type = 0;	/* don't care */
4676 
4677 	iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr,
4678 		WL_ND_HOSTIP_WITH_ADDR_LEN, iovbuf, sizeof(iovbuf));
4679 
4680 	if (!iov_len) {
4681 		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
4682 			__FUNCTION__, sizeof(iovbuf)));
4683 		return BCME_ERROR;
4684 	}
4685 
4686 	retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
4687 	if (retcode) {
4688 		DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
4689 	} else {
4690 		DHD_TRACE(("%s: successfully removed: %d\n", __FUNCTION__, retcode));
4691 	}
4692 
4693 	return retcode;
4694 }
4695 
4696 int
dhd_ndo_remove_ip_by_type(dhd_pub_t * dhdp,uint8 type,int idx)4697 dhd_ndo_remove_ip_by_type(dhd_pub_t *dhdp, uint8 type, int idx)
4698 {
4699 	char iovbuf[DHD_IOVAR_BUF_SIZE];
4700 	wl_nd_hostip_t ndo_del_addr;
4701 	int iov_len;
4702 	int retcode;
4703 
4704 	if (dhdp == NULL) {
4705 		return BCME_ERROR;
4706 	}
4707 
4708 	/* wl_nd_hostip_t fixed param */
4709 	ndo_del_addr.version = htod16(WL_ND_HOSTIP_IOV_VER);
4710 	if (type == WL_ND_IPV6_ADDR_TYPE_UNICAST) {
4711 		ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL_UC);
4712 	} else if (type == WL_ND_IPV6_ADDR_TYPE_ANYCAST) {
4713 		ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL_AC);
4714 	} else {
4715 		return BCME_BADARG;
4716 	}
4717 	ndo_del_addr.length = htod32(WL_ND_HOSTIP_FIXED_LEN);
4718 
4719 	iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr, WL_ND_HOSTIP_FIXED_LEN,
4720 			iovbuf, sizeof(iovbuf));
4721 
4722 	if (!iov_len) {
4723 		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
4724 			__FUNCTION__, sizeof(iovbuf)));
4725 		return BCME_ERROR;
4726 	}
4727 
4728 	retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
4729 	if (retcode) {
4730 		DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
4731 	} else {
4732 		DHD_TRACE(("%s: successfully removed: %d\n", __FUNCTION__, retcode));
4733 	}
4734 
4735 	return retcode;
4736 }
4737 
4738 int
dhd_ndo_unsolicited_na_filter_enable(dhd_pub_t * dhdp,int enable)4739 dhd_ndo_unsolicited_na_filter_enable(dhd_pub_t *dhdp, int enable)
4740 {
4741 	char iovbuf[DHD_IOVAR_BUF_SIZE];
4742 	int iov_len;
4743 	int retcode;
4744 
4745 	if (dhdp == NULL) {
4746 		return BCME_ERROR;
4747 	}
4748 
4749 	iov_len = bcm_mkiovar("nd_unsolicited_na_filter", (char *)&enable, sizeof(int),
4750 			iovbuf, sizeof(iovbuf));
4751 
4752 	if (!iov_len) {
4753 		DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
4754 			__FUNCTION__, sizeof(iovbuf)));
4755 		return BCME_ERROR;
4756 	}
4757 
4758 	retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0);
4759 	if (retcode)
4760 		DHD_ERROR(("%s: failed to enable Unsolicited NA filter to %d, retcode = %d\n",
4761 			__FUNCTION__, enable, retcode));
4762 	else {
4763 		DHD_TRACE(("%s: successfully enabled Unsolicited NA filter to %d\n",
4764 			__FUNCTION__, enable));
4765 	}
4766 
4767 	return retcode;
4768 }
4769 #ifdef SIMPLE_ISCAN
4770 
4771 uint iscan_thread_id = 0;
4772 iscan_buf_t * iscan_chain = 0;
4773 
4774 iscan_buf_t *
dhd_iscan_allocate_buf(dhd_pub_t * dhd,iscan_buf_t ** iscanbuf)4775 dhd_iscan_allocate_buf(dhd_pub_t *dhd, iscan_buf_t **iscanbuf)
4776 {
4777 	iscan_buf_t *iscanbuf_alloc = 0;
4778 	iscan_buf_t *iscanbuf_head;
4779 
4780 	DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
4781 	dhd_iscan_lock();
4782 
4783 	iscanbuf_alloc = (iscan_buf_t*)MALLOC(dhd->osh, sizeof(iscan_buf_t));
4784 	if (iscanbuf_alloc == NULL)
4785 		goto fail;
4786 
4787 	iscanbuf_alloc->next = NULL;
4788 	iscanbuf_head = *iscanbuf;
4789 
4790 	DHD_ISCAN(("%s: addr of allocated node = 0x%X"
4791 		   "addr of iscanbuf_head = 0x%X dhd = 0x%X\n",
4792 		   __FUNCTION__, iscanbuf_alloc, iscanbuf_head, dhd));
4793 
4794 	if (iscanbuf_head == NULL) {
4795 		*iscanbuf = iscanbuf_alloc;
4796 		DHD_ISCAN(("%s: Head is allocated\n", __FUNCTION__));
4797 		goto fail;
4798 	}
4799 
4800 	while (iscanbuf_head->next)
4801 		iscanbuf_head = iscanbuf_head->next;
4802 
4803 	iscanbuf_head->next = iscanbuf_alloc;
4804 
4805 fail:
4806 	dhd_iscan_unlock();
4807 	return iscanbuf_alloc;
4808 }
4809 
4810 void
dhd_iscan_free_buf(void * dhdp,iscan_buf_t * iscan_delete)4811 dhd_iscan_free_buf(void *dhdp, iscan_buf_t *iscan_delete)
4812 {
4813 	iscan_buf_t *iscanbuf_free = 0;
4814 	iscan_buf_t *iscanbuf_prv = 0;
4815 	iscan_buf_t *iscanbuf_cur;
4816 	dhd_pub_t *dhd = dhd_bus_pub(dhdp);
4817 	DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
4818 
4819 	dhd_iscan_lock();
4820 
4821 	iscanbuf_cur = iscan_chain;
4822 
4823 	/* If iscan_delete is null then delete the entire
4824 	 * chain or else delete specific one provided
4825 	 */
4826 	if (!iscan_delete) {
4827 		while (iscanbuf_cur) {
4828 			iscanbuf_free = iscanbuf_cur;
4829 			iscanbuf_cur = iscanbuf_cur->next;
4830 			iscanbuf_free->next = 0;
4831 			MFREE(dhd->osh, iscanbuf_free, sizeof(iscan_buf_t));
4832 		}
4833 		iscan_chain = 0;
4834 	} else {
4835 		while (iscanbuf_cur) {
4836 			if (iscanbuf_cur == iscan_delete)
4837 				break;
4838 			iscanbuf_prv = iscanbuf_cur;
4839 			iscanbuf_cur = iscanbuf_cur->next;
4840 		}
4841 		if (iscanbuf_prv)
4842 			iscanbuf_prv->next = iscan_delete->next;
4843 
4844 		iscan_delete->next = 0;
4845 		MFREE(dhd->osh, iscan_delete, sizeof(iscan_buf_t));
4846 
4847 		if (!iscanbuf_prv)
4848 			iscan_chain = 0;
4849 	}
4850 	dhd_iscan_unlock();
4851 }
4852 
4853 iscan_buf_t *
dhd_iscan_result_buf(void)4854 dhd_iscan_result_buf(void)
4855 {
4856 	return iscan_chain;
4857 }
4858 
4859 int
dhd_iscan_issue_request(void * dhdp,wl_iscan_params_t * pParams,uint32 size)4860 dhd_iscan_issue_request(void * dhdp, wl_iscan_params_t *pParams, uint32 size)
4861 {
4862 	int rc = -1;
4863 	dhd_pub_t *dhd = dhd_bus_pub(dhdp);
4864 	char *buf;
4865 	char iovar[] = "iscan";
4866 	uint32 allocSize = 0;
4867 	wl_ioctl_t ioctl;
4868 	int len;
4869 
4870 	if (pParams) {
4871 		allocSize = (size + strlen(iovar) + 1);
4872 		if ((allocSize < size) || (allocSize < strlen(iovar)))
4873 		{
4874 			DHD_ERROR(("%s: overflow - allocation size too large %d < %d + %d!\n",
4875 				__FUNCTION__, allocSize, size, strlen(iovar)));
4876 			goto cleanUp;
4877 		}
4878 		buf = MALLOC(dhd->osh, allocSize);
4879 
4880 		if (buf == NULL)
4881 			{
4882 			DHD_ERROR(("%s: malloc of size %d failed!\n", __FUNCTION__, allocSize));
4883 			goto cleanUp;
4884 			}
4885 		ioctl.cmd = WLC_SET_VAR;
4886 		len = bcm_mkiovar(iovar, (char *)pParams, size, buf, allocSize);
4887 		if (len == 0) {
4888 			rc = BCME_BUFTOOSHORT;
4889 			goto cleanUp;
4890 		}
4891 		rc = dhd_wl_ioctl(dhd, 0, &ioctl, buf, len);
4892 	}
4893 
4894 cleanUp:
4895 	if (buf) {
4896 		MFREE(dhd->osh, buf, allocSize);
4897 	}
4898 
4899 	return rc;
4900 }
4901 
4902 static int
dhd_iscan_get_partial_result(void * dhdp,uint * scan_count)4903 dhd_iscan_get_partial_result(void *dhdp, uint *scan_count)
4904 {
4905 	wl_iscan_results_t *list_buf;
4906 	wl_iscan_results_t list;
4907 	wl_scan_results_t *results;
4908 	iscan_buf_t *iscan_cur;
4909 	int status = -1;
4910 	dhd_pub_t *dhd = dhd_bus_pub(dhdp);
4911 	int rc;
4912 	wl_ioctl_t ioctl;
4913 	int len;
4914 
4915 	DHD_ISCAN(("%s: Enter\n", __FUNCTION__));
4916 
4917 	iscan_cur = dhd_iscan_allocate_buf(dhd, &iscan_chain);
4918 	if (!iscan_cur) {
4919 		DHD_ERROR(("%s: Failed to allocate node\n", __FUNCTION__));
4920 		dhd_iscan_free_buf(dhdp, 0);
4921 		dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT);
4922 		dhd_ind_scan_confirm(dhdp, FALSE);
4923 		goto fail;
4924 	}
4925 
4926 	dhd_iscan_lock();
4927 
4928 	memset(iscan_cur->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN);
4929 	list_buf = (wl_iscan_results_t*)iscan_cur->iscan_buf;
4930 	results = &list_buf->results;
4931 	results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
4932 	results->version = 0;
4933 	results->count = 0;
4934 
4935 	memset(&list, 0, sizeof(list));
4936 	list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN);
4937 	len = bcm_mkiovar("iscanresults", (char *)&list, WL_ISCAN_RESULTS_FIXED_SIZE,
4938 		iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
4939 	if (len == 0) {
4940 		dhd_iscan_free_buf(dhdp, 0);
4941 		dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT);
4942 		dhd_ind_scan_confirm(dhdp, FALSE);
4943 		status = BCME_BUFTOOSHORT;
4944 		goto fail;
4945 	}
4946 	ioctl.cmd = WLC_GET_VAR;
4947 	ioctl.set = FALSE;
4948 	rc = dhd_wl_ioctl(dhd, 0, &ioctl, iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
4949 
4950 	results->buflen = dtoh32(results->buflen);
4951 	results->version = dtoh32(results->version);
4952 	*scan_count = results->count = dtoh32(results->count);
4953 	status = dtoh32(list_buf->status);
4954 	DHD_ISCAN(("%s: Got %d resuls status = (%x)\n", __FUNCTION__, results->count, status));
4955 
4956 	dhd_iscan_unlock();
4957 
4958 	if (!(*scan_count)) {
4959 		 /* TODO: race condition when FLUSH already called */
4960 		dhd_iscan_free_buf(dhdp, 0);
4961 	}
4962 fail:
4963 	return status;
4964 }
4965 
4966 #endif /* SIMPLE_ISCAN */
4967 
4968 /*
4969  * returns = TRUE if associated, FALSE if not associated
4970  */
dhd_is_associated(dhd_pub_t * dhd,uint8 ifidx,int * retval)4971 bool dhd_is_associated(dhd_pub_t *dhd, uint8 ifidx, int *retval)
4972 {
4973 	char bssid[6], zbuf[6];
4974 	int ret = -1;
4975 
4976 	bzero(bssid, 6);
4977 	bzero(zbuf, 6);
4978 
4979 	ret  = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, (char *)&bssid,
4980 		ETHER_ADDR_LEN, FALSE, ifidx);
4981 	DHD_TRACE((" %s WLC_GET_BSSID ioctl res = %d\n", __FUNCTION__, ret));
4982 
4983 	if (ret == BCME_NOTASSOCIATED) {
4984 		DHD_TRACE(("%s: not associated! res:%d\n", __FUNCTION__, ret));
4985 	}
4986 
4987 	if (retval)
4988 		*retval = ret;
4989 
4990 	if (ret < 0)
4991 		return FALSE;
4992 
4993 	if ((memcmp(bssid, zbuf, ETHER_ADDR_LEN) == 0)) {
4994 		DHD_TRACE(("%s: WLC_GET_BSSID ioctl returned zero bssid\n", __FUNCTION__));
4995 		return FALSE;
4996 	}
4997 	return TRUE;
4998 }
4999 
5000 /* Function to estimate possible DTIM_SKIP value */
5001 #if defined(OEM_ANDROID) && defined(BCMPCIE)
5002 int
dhd_get_suspend_bcn_li_dtim(dhd_pub_t * dhd,int * dtim_period,int * bcn_interval)5003 dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval)
5004 {
5005 	int bcn_li_dtim = 1; /* deafult no dtim skip setting */
5006 	int ret = -1;
5007 	int allowed_skip_dtim_cnt = 0;
5008 
5009 	if (dhd->disable_dtim_in_suspend) {
5010 		DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__));
5011 		bcn_li_dtim = 0;
5012 		return bcn_li_dtim;
5013 	}
5014 
5015 	/* Check if associated */
5016 	if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
5017 		DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
5018 		return bcn_li_dtim;
5019 	}
5020 
5021 	if (dtim_period == NULL || bcn_interval == NULL)
5022 		return bcn_li_dtim;
5023 
5024 	/* read associated AP beacon interval */
5025 	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BCNPRD,
5026 		bcn_interval, sizeof(*bcn_interval), FALSE, 0)) < 0) {
5027 		DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__, ret));
5028 		return bcn_li_dtim;
5029 	}
5030 
5031 	/* read associated AP dtim setup */
5032 	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD,
5033 		dtim_period, sizeof(*dtim_period), FALSE, 0)) < 0) {
5034 		DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
5035 		return bcn_li_dtim;
5036 	}
5037 
5038 	/* if not assocated just return */
5039 	if (*dtim_period == 0) {
5040 		return bcn_li_dtim;
5041 	}
5042 
5043 	if (dhd->max_dtim_enable) {
5044 		bcn_li_dtim =
5045 			(int) (MAX_DTIM_ALLOWED_INTERVAL / ((*dtim_period) * (*bcn_interval)));
5046 		if (bcn_li_dtim == 0) {
5047 			bcn_li_dtim = 1;
5048 		}
5049 	} else {
5050 		/* attemp to use platform defined dtim skip interval */
5051 		bcn_li_dtim = dhd->suspend_bcn_li_dtim;
5052 
5053 		/* check if sta listen interval fits into AP dtim */
5054 		if (*dtim_period > CUSTOM_LISTEN_INTERVAL) {
5055 			/* AP DTIM to big for our Listen Interval : no dtim skiping */
5056 			bcn_li_dtim = NO_DTIM_SKIP;
5057 			DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
5058 				__FUNCTION__, *dtim_period, CUSTOM_LISTEN_INTERVAL));
5059 			return bcn_li_dtim;
5060 		}
5061 
5062 		if (((*dtim_period) * (*bcn_interval) * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) {
5063 			allowed_skip_dtim_cnt =
5064 				MAX_DTIM_ALLOWED_INTERVAL / ((*dtim_period) * (*bcn_interval));
5065 			bcn_li_dtim =
5066 				(allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP;
5067 		}
5068 
5069 		if ((bcn_li_dtim * (*dtim_period)) > CUSTOM_LISTEN_INTERVAL) {
5070 			/* Round up dtim_skip to fit into STAs Listen Interval */
5071 			bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / *dtim_period);
5072 			DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
5073 		}
5074 	}
5075 
5076 	DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n",
5077 		__FUNCTION__, *bcn_interval, bcn_li_dtim, *dtim_period, CUSTOM_LISTEN_INTERVAL));
5078 
5079 	return bcn_li_dtim;
5080 }
5081 #else /* OEM_ANDROID && BCMPCIE */
5082 int
dhd_get_suspend_bcn_li_dtim(dhd_pub_t * dhd)5083 dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd)
5084 {
5085 	int bcn_li_dtim = 1; /* deafult no dtim skip setting */
5086 	int ret = -1;
5087 	int dtim_period = 0;
5088 	int ap_beacon = 0;
5089 	int allowed_skip_dtim_cnt = 0;
5090 
5091 	if (dhd->disable_dtim_in_suspend) {
5092 		DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__));
5093 		bcn_li_dtim = 0;
5094 		goto exit;
5095 	}
5096 
5097 	/* Check if associated */
5098 	if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
5099 		DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
5100 		goto exit;
5101 	}
5102 
5103 	/* read associated AP beacon interval */
5104 	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BCNPRD,
5105 		&ap_beacon, sizeof(ap_beacon), FALSE, 0)) < 0) {
5106 		DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__, ret));
5107 		goto exit;
5108 	}
5109 
5110 	/* read associated ap's dtim setup */
5111 	if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD,
5112 		&dtim_period, sizeof(dtim_period), FALSE, 0)) < 0) {
5113 		DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
5114 		goto exit;
5115 	}
5116 
5117 	/* if not assocated just exit */
5118 	if (dtim_period == 0) {
5119 		goto exit;
5120 	}
5121 
5122 	if (dhd->max_dtim_enable) {
5123 		bcn_li_dtim =
5124 			(int) (MAX_DTIM_ALLOWED_INTERVAL / (ap_beacon * dtim_period));
5125 		if (bcn_li_dtim == 0) {
5126 			bcn_li_dtim = 1;
5127 		}
5128 	} else {
5129 		/* attemp to use platform defined dtim skip interval */
5130 		bcn_li_dtim = dhd->suspend_bcn_li_dtim;
5131 
5132 		/* check if sta listen interval fits into AP dtim */
5133 		if (dtim_period > CUSTOM_LISTEN_INTERVAL) {
5134 			/* AP DTIM to big for our Listen Interval : no dtim skiping */
5135 			bcn_li_dtim = NO_DTIM_SKIP;
5136 			DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
5137 				__FUNCTION__, dtim_period, CUSTOM_LISTEN_INTERVAL));
5138 			goto exit;
5139 		}
5140 
5141 		if ((dtim_period * ap_beacon * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) {
5142 			allowed_skip_dtim_cnt =
5143 				MAX_DTIM_ALLOWED_INTERVAL / (dtim_period * ap_beacon);
5144 			bcn_li_dtim =
5145 				(allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP;
5146 		}
5147 
5148 		if ((bcn_li_dtim * dtim_period) > CUSTOM_LISTEN_INTERVAL) {
5149 			/* Round up dtim_skip to fit into STAs Listen Interval */
5150 			bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / dtim_period);
5151 			DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
5152 		}
5153 	}
5154 
5155 	DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n",
5156 		__FUNCTION__, ap_beacon, bcn_li_dtim, dtim_period, CUSTOM_LISTEN_INTERVAL));
5157 
5158 exit:
5159 	return bcn_li_dtim;
5160 }
5161 #endif /* OEM_ANDROID && BCMPCIE */
5162 
5163 #ifdef CONFIG_SILENT_ROAM
5164 int
dhd_sroam_set_mon(dhd_pub_t * dhd,bool set)5165 dhd_sroam_set_mon(dhd_pub_t *dhd, bool set)
5166 {
5167 	int ret = BCME_OK;
5168 	wlc_sroam_t *psroam;
5169 	wlc_sroam_info_t *sroam;
5170 	uint sroamlen = sizeof(*sroam) + SROAM_HDRLEN;
5171 
5172 	/* Check if associated */
5173 	if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
5174 		DHD_TRACE(("%s NOT assoc\n", __FUNCTION__));
5175 		return ret;
5176 	}
5177 
5178 	if (set && (dhd->op_mode &
5179 		(DHD_FLAG_HOSTAP_MODE | DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
5180 		DHD_INFO((" Failed to set sroam %d, op_mode 0x%04x\n", set, dhd->op_mode));
5181 		return ret;
5182 	}
5183 
5184 	if (!dhd->sroam_turn_on) {
5185 		DHD_INFO((" Failed to set sroam %d, sroam turn %d\n", set, dhd->sroam_turn_on));
5186 		return ret;
5187 	}
5188 	psroam = (wlc_sroam_t *)MALLOCZ(dhd->osh, sroamlen);
5189 	if (!psroam) {
5190 		DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
5191 		return BCME_NOMEM;
5192 	}
5193 
5194 	ret = dhd_iovar(dhd, 0, "sroam", NULL, 0, (char *)psroam, sroamlen, FALSE);
5195 	if (ret < 0) {
5196 		DHD_ERROR(("%s Failed to Get sroam %d\n", __FUNCTION__, ret));
5197 		goto done;
5198 	}
5199 
5200 	if (psroam->ver != WLC_SILENT_ROAM_CUR_VER) {
5201 		ret = BCME_VERSION;
5202 		goto done;
5203 	}
5204 
5205 	sroam = (wlc_sroam_info_t *)psroam->data;
5206 	sroam->sroam_on = set;
5207 	DHD_INFO((" Silent roam monitor mode %s\n", set ? "On" : "Off"));
5208 
5209 	ret = dhd_iovar(dhd, 0, "sroam", (char *)psroam, sroamlen, NULL, 0, TRUE);
5210 	if (ret < 0) {
5211 		DHD_ERROR(("%s Failed to Set sroam %d\n", __FUNCTION__, ret));
5212 	}
5213 
5214 done:
5215 	if (psroam) {
5216 		MFREE(dhd->osh, psroam, sroamlen);
5217 	}
5218 	return ret;
5219 
5220 }
5221 #endif /* CONFIG_SILENT_ROAM */
5222 
5223 /* Check if the mode supports STA MODE */
dhd_support_sta_mode(dhd_pub_t * dhd)5224 bool dhd_support_sta_mode(dhd_pub_t *dhd)
5225 {
5226 
5227 #ifdef  WL_CFG80211
5228 	if (!(dhd->op_mode & DHD_FLAG_STA_MODE))
5229 		return FALSE;
5230 	else
5231 #endif /* WL_CFG80211 */
5232 		return TRUE;
5233 }
5234 
5235 #if defined(KEEP_ALIVE)
dhd_keep_alive_onoff(dhd_pub_t * dhd)5236 int dhd_keep_alive_onoff(dhd_pub_t *dhd)
5237 {
5238 	char				buf[32] = {0};
5239 	const char			*str;
5240 	wl_mkeep_alive_pkt_t	mkeep_alive_pkt = {0, 0, 0, 0, 0, {0}};
5241 	wl_mkeep_alive_pkt_t	*mkeep_alive_pktp;
5242 	int					buf_len;
5243 	int					str_len;
5244 	int res					= -1;
5245 
5246 	if (!dhd_support_sta_mode(dhd))
5247 		return res;
5248 
5249 	DHD_TRACE(("%s execution\n", __FUNCTION__));
5250 
5251 	str = "mkeep_alive";
5252 	str_len = strlen(str);
5253 	strncpy(buf, str, sizeof(buf) - 1);
5254 	buf[ sizeof(buf) - 1 ] = '\0';
5255 	mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1);
5256 	mkeep_alive_pkt.period_msec = CUSTOM_KEEP_ALIVE_SETTING;
5257 	buf_len = str_len + 1;
5258 	mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
5259 	mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
5260 	/* Setup keep alive zero for null packet generation */
5261 	mkeep_alive_pkt.keep_alive_id = 0;
5262 	mkeep_alive_pkt.len_bytes = 0;
5263 	buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
5264 	bzero(mkeep_alive_pkt.data, sizeof(mkeep_alive_pkt.data));
5265 	/* Keep-alive attributes are set in local	variable (mkeep_alive_pkt), and
5266 	 * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
5267 	 * guarantee that the buffer is properly aligned.
5268 	 */
5269 	memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
5270 
5271 	res = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
5272 
5273 	return res;
5274 }
5275 #endif /* defined(KEEP_ALIVE) */
5276 #if defined(OEM_ANDROID)
5277 #define	CSCAN_TLV_TYPE_SSID_IE	'S'
5278 /*
5279  *  SSIDs list parsing from cscan tlv list
5280  */
5281 int
wl_parse_ssid_list_tlv(char ** list_str,wlc_ssid_ext_t * ssid,int max,int * bytes_left)5282 wl_parse_ssid_list_tlv(char** list_str, wlc_ssid_ext_t* ssid, int max, int *bytes_left)
5283 {
5284 	char* str;
5285 	int idx = 0;
5286 	uint8 len;
5287 
5288 	if ((list_str == NULL) || (*list_str == NULL) || (*bytes_left < 0)) {
5289 		DHD_ERROR(("%s error paramters\n", __FUNCTION__));
5290 		return BCME_BADARG;
5291 	}
5292 	str = *list_str;
5293 	while (*bytes_left > 0) {
5294 		if (str[0] != CSCAN_TLV_TYPE_SSID_IE) {
5295 			*list_str = str;
5296 			DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
5297 			return idx;
5298 		}
5299 
5300 		if (idx >= max) {
5301 			DHD_ERROR(("%s number of SSIDs more than %d\n", __FUNCTION__, idx));
5302 			return BCME_BADARG;
5303 		}
5304 
5305 		/* Get proper CSCAN_TLV_TYPE_SSID_IE */
5306 		*bytes_left -= 1;
5307 		if (*bytes_left == 0) {
5308 			DHD_ERROR(("%s no length field.\n", __FUNCTION__));
5309 			return BCME_BADARG;
5310 		}
5311 		str += 1;
5312 		ssid[idx].rssi_thresh = 0;
5313 		ssid[idx].flags = 0;
5314 		len = str[0];
5315 		if (len == 0) {
5316 			/* Broadcast SSID */
5317 			ssid[idx].SSID_len = 0;
5318 			memset((char*)ssid[idx].SSID, 0x0, DOT11_MAX_SSID_LEN);
5319 			*bytes_left -= 1;
5320 			str += 1;
5321 
5322 			DHD_TRACE(("BROADCAST SCAN  left=%d\n", *bytes_left));
5323 		} else if (len <= DOT11_MAX_SSID_LEN) {
5324 			/* Get proper SSID size */
5325 			ssid[idx].SSID_len = len;
5326 			*bytes_left -= 1;
5327 			/* Get SSID */
5328 			if (ssid[idx].SSID_len > *bytes_left) {
5329 				DHD_ERROR(("%s out of memory range len=%d but left=%d\n",
5330 				__FUNCTION__, ssid[idx].SSID_len, *bytes_left));
5331 				return BCME_BADARG;
5332 			}
5333 			str += 1;
5334 			memcpy((char*)ssid[idx].SSID, str, ssid[idx].SSID_len);
5335 
5336 			*bytes_left -= ssid[idx].SSID_len;
5337 			str += ssid[idx].SSID_len;
5338 			ssid[idx].hidden = TRUE;
5339 
5340 			DHD_TRACE(("%s :size=%d left=%d\n",
5341 				(char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left));
5342 		} else {
5343 			DHD_ERROR(("### SSID size more than %d\n", str[0]));
5344 			return BCME_BADARG;
5345 		}
5346 		idx++;
5347 	}
5348 
5349 	*list_str = str;
5350 	return idx;
5351 }
5352 /* Android ComboSCAN support */
5353 
5354 /*
5355  *  data parsing from ComboScan tlv list
5356 */
5357 int
wl_iw_parse_data_tlv(char ** list_str,void * dst,int dst_size,const char token,int input_size,int * bytes_left)5358 wl_iw_parse_data_tlv(char** list_str, void *dst, int dst_size, const char token,
5359                      int input_size, int *bytes_left)
5360 {
5361 	char* str;
5362 	uint16 short_temp;
5363 	uint32 int_temp;
5364 
5365 	if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
5366 		DHD_ERROR(("%s error paramters\n", __FUNCTION__));
5367 		return -1;
5368 	}
5369 	str = *list_str;
5370 
5371 	/* Clean all dest bytes */
5372 	memset(dst, 0, dst_size);
5373 	if (*bytes_left > 0) {
5374 
5375 		if (str[0] != token) {
5376 			DHD_TRACE(("%s NOT Type=%d get=%d left_parse=%d \n",
5377 				__FUNCTION__, token, str[0], *bytes_left));
5378 			return -1;
5379 		}
5380 
5381 		*bytes_left -= 1;
5382 		str += 1;
5383 
5384 		if (input_size == 1) {
5385 			memcpy(dst, str, input_size);
5386 		}
5387 		else if (input_size == 2) {
5388 			memcpy(dst, (char *)htod16(memcpy(&short_temp, str, input_size)),
5389 				input_size);
5390 		}
5391 		else if (input_size == 4) {
5392 			memcpy(dst, (char *)htod32(memcpy(&int_temp, str, input_size)),
5393 				input_size);
5394 		}
5395 
5396 		*bytes_left -= input_size;
5397 		str += input_size;
5398 		*list_str = str;
5399 		return 1;
5400 	}
5401 	return 1;
5402 }
5403 
5404 /*
5405  *  channel list parsing from cscan tlv list
5406 */
5407 int
wl_iw_parse_channel_list_tlv(char ** list_str,uint16 * channel_list,int channel_num,int * bytes_left)5408 wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list,
5409                              int channel_num, int *bytes_left)
5410 {
5411 	char* str;
5412 	int idx = 0;
5413 
5414 	if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
5415 		DHD_ERROR(("%s error paramters\n", __FUNCTION__));
5416 		return -1;
5417 	}
5418 	str = *list_str;
5419 
5420 	while (*bytes_left > 0) {
5421 
5422 		if (str[0] != CSCAN_TLV_TYPE_CHANNEL_IE) {
5423 			*list_str = str;
5424 			DHD_TRACE(("End channel=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
5425 			return idx;
5426 		}
5427 		/* Get proper CSCAN_TLV_TYPE_CHANNEL_IE */
5428 		*bytes_left -= 1;
5429 		str += 1;
5430 
5431 		if (str[0] == 0) {
5432 			/* All channels */
5433 			channel_list[idx] = 0x0;
5434 		}
5435 		else {
5436 			channel_list[idx] = (uint16)str[0];
5437 			DHD_TRACE(("%s channel=%d \n", __FUNCTION__,  channel_list[idx]));
5438 		}
5439 		*bytes_left -= 1;
5440 		str += 1;
5441 
5442 		if (idx++ > 255) {
5443 			DHD_ERROR(("%s Too many channels \n", __FUNCTION__));
5444 			return -1;
5445 		}
5446 	}
5447 
5448 	*list_str = str;
5449 	return idx;
5450 }
5451 
5452 /* Parse a comma-separated list from list_str into ssid array, starting
5453  * at index idx.  Max specifies size of the ssid array.  Parses ssids
5454  * and returns updated idx; if idx >= max not all fit, the excess have
5455  * not been copied.  Returns -1 on empty string, or on ssid too long.
5456  */
5457 int
wl_iw_parse_ssid_list(char ** list_str,wlc_ssid_t * ssid,int idx,int max)5458 wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max)
5459 {
5460 	char* str, *ptr;
5461 
5462 	if ((list_str == NULL) || (*list_str == NULL))
5463 		return -1;
5464 
5465 	for (str = *list_str; str != NULL; str = ptr) {
5466 
5467 		/* check for next TAG */
5468 		if (!strncmp(str, GET_CHANNEL, strlen(GET_CHANNEL))) {
5469 			*list_str	 = str + strlen(GET_CHANNEL);
5470 			return idx;
5471 		}
5472 
5473 		if ((ptr = strchr(str, ',')) != NULL) {
5474 			*ptr++ = '\0';
5475 		}
5476 
5477 		if (strlen(str) > DOT11_MAX_SSID_LEN) {
5478 			DHD_ERROR(("ssid <%s> exceeds %d\n", str, DOT11_MAX_SSID_LEN));
5479 			return -1;
5480 		}
5481 
5482 		if (strlen(str) == 0)
5483 			ssid[idx].SSID_len = 0;
5484 
5485 		if (idx < max) {
5486 			bzero(ssid[idx].SSID, sizeof(ssid[idx].SSID));
5487 			strncpy((char*)ssid[idx].SSID, str, sizeof(ssid[idx].SSID) - 1);
5488 			ssid[idx].SSID_len = strlen(str);
5489 		}
5490 		idx++;
5491 	}
5492 	return idx;
5493 }
5494 
5495 /*
5496  * Parse channel list from iwpriv CSCAN
5497  */
5498 int
wl_iw_parse_channel_list(char ** list_str,uint16 * channel_list,int channel_num)5499 wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num)
5500 {
5501 	int num;
5502 	int val;
5503 	char* str;
5504 	char* endptr = NULL;
5505 
5506 	if ((list_str == NULL)||(*list_str == NULL))
5507 		return -1;
5508 
5509 	str = *list_str;
5510 	num = 0;
5511 	while (strncmp(str, GET_NPROBE, strlen(GET_NPROBE))) {
5512 		val = (int)strtoul(str, &endptr, 0);
5513 		if (endptr == str) {
5514 			printf("could not parse channel number starting at"
5515 				" substring \"%s\" in list:\n%s\n",
5516 				str, *list_str);
5517 			return -1;
5518 		}
5519 		str = endptr + strspn(endptr, " ,");
5520 
5521 		if (num == channel_num) {
5522 			DHD_ERROR(("too many channels (more than %d) in channel list:\n%s\n",
5523 				channel_num, *list_str));
5524 			return -1;
5525 		}
5526 
5527 		channel_list[num++] = (uint16)val;
5528 	}
5529 	*list_str = str;
5530 	return num;
5531 }
5532 #endif /* defined(OEM_ANDROID) */
5533 
5534 /* Given filename and download type,  returns a buffer pointer and length
5535 * for download to f/w. Type can be FW or NVRAM.
5536 *
5537 */
dhd_get_download_buffer(dhd_pub_t * dhd,char * file_path,download_type_t component,char ** buffer,int * length)5538 int dhd_get_download_buffer(dhd_pub_t	*dhd, char *file_path, download_type_t component,
5539 	char ** buffer, int *length)
5540 
5541 {
5542 	int ret = BCME_ERROR;
5543 	int len = 0;
5544 	int file_len;
5545 	void *image = NULL;
5546 	uint8 *buf = NULL;
5547 
5548 	/* Point to cache if available. */
5549 	/* No Valid cache found on this call */
5550 	if (!len) {
5551 		file_len = *length;
5552 		*length = 0;
5553 
5554 		if (file_path) {
5555 			image = dhd_os_open_image1(dhd, file_path);
5556 			if (image == NULL) {
5557 				goto err;
5558 			}
5559 		}
5560 
5561 		buf = MALLOCZ(dhd->osh, file_len);
5562 		if (buf == NULL) {
5563 			DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
5564 				__FUNCTION__, file_len));
5565 			goto err;
5566 		}
5567 
5568 		/* Download image */
5569 		len = dhd_os_get_image_block((char *)buf, file_len, image);
5570 		if ((len <= 0 || len > file_len)) {
5571 			MFREE(dhd->osh, buf, file_len);
5572 			goto err;
5573 		}
5574 	}
5575 
5576 	ret = BCME_OK;
5577 	*length = len;
5578 	*buffer = (char *)buf;
5579 
5580 	/* Cache if first call. */
5581 
5582 err:
5583 	if (image)
5584 		dhd_os_close_image1(dhd, image);
5585 
5586 	return ret;
5587 }
5588 
5589 int
dhd_download_2_dongle(dhd_pub_t * dhd,char * iovar,uint16 flag,uint16 dload_type,unsigned char * dload_buf,int len)5590 dhd_download_2_dongle(dhd_pub_t	*dhd, char *iovar, uint16 flag, uint16 dload_type,
5591 	unsigned char *dload_buf, int len)
5592 {
5593 	struct wl_dload_data *dload_ptr = (struct wl_dload_data *)dload_buf;
5594 	int err = 0;
5595 	int dload_data_offset;
5596 	static char iovar_buf[WLC_IOCTL_MEDLEN];
5597 	int iovar_len;
5598 
5599 	memset(iovar_buf, 0, sizeof(iovar_buf));
5600 
5601 	dload_data_offset = OFFSETOF(wl_dload_data_t, data);
5602 	dload_ptr->flag = (DLOAD_HANDLER_VER << DLOAD_FLAG_VER_SHIFT) | flag;
5603 	dload_ptr->dload_type = dload_type;
5604 	dload_ptr->len = htod32(len - dload_data_offset);
5605 	dload_ptr->crc = 0;
5606 	len = ROUNDUP(len, 8);
5607 
5608 	iovar_len = bcm_mkiovar(iovar, (char *)dload_buf,
5609 		(uint)len, iovar_buf, sizeof(iovar_buf));
5610 	if (iovar_len == 0) {
5611 		DHD_ERROR(("%s: insufficient buffer space passed to bcm_mkiovar for '%s' \n",
5612 		           __FUNCTION__, iovar));
5613 		return BCME_BUFTOOSHORT;
5614 	}
5615 
5616 	err = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovar_buf,
5617 			iovar_len, IOV_SET, 0);
5618 
5619 	return err;
5620 }
5621 
5622 int
dhd_download_blob(dhd_pub_t * dhd,unsigned char * buf,uint32 len,char * iovar)5623 dhd_download_blob(dhd_pub_t *dhd, unsigned char *buf,
5624 		uint32 len, char *iovar)
5625 
5626 {
5627 	int chunk_len;
5628 	int size2alloc;
5629 	unsigned char *new_buf;
5630 	int err = 0, data_offset;
5631 	uint16 dl_flag = DL_BEGIN;
5632 
5633 	data_offset = OFFSETOF(wl_dload_data_t, data);
5634 	size2alloc = data_offset + MAX_CHUNK_LEN;
5635 	size2alloc = ROUNDUP(size2alloc, 8);
5636 
5637 	if ((new_buf = (unsigned char *)MALLOCZ(dhd->osh, size2alloc)) != NULL) {
5638 		do {
5639 			chunk_len = dhd_os_get_image_block((char *)(new_buf + data_offset),
5640 				MAX_CHUNK_LEN, buf);
5641 			if (chunk_len < 0) {
5642 				DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n",
5643 					__FUNCTION__, chunk_len));
5644 				err = BCME_ERROR;
5645 				goto exit;
5646 			}
5647 			if (len - chunk_len == 0)
5648 				dl_flag |= DL_END;
5649 
5650 			err = dhd_download_2_dongle(dhd, iovar, dl_flag, DL_TYPE_CLM,
5651 				new_buf, data_offset + chunk_len);
5652 
5653 			dl_flag &= ~DL_BEGIN;
5654 
5655 			len = len - chunk_len;
5656 		} while ((len > 0) && (err == 0));
5657 	} else {
5658 		err = BCME_NOMEM;
5659 	}
5660 exit:
5661 	if (new_buf) {
5662 		MFREE(dhd->osh, new_buf, size2alloc);
5663 	}
5664 	return err;
5665 }
5666 
5667 int
dhd_apply_default_txcap(dhd_pub_t * dhd,char * path)5668 dhd_apply_default_txcap(dhd_pub_t  *dhd, char *path)
5669 {
5670 	return 0;
5671 }
5672 
5673 int
dhd_check_current_clm_data(dhd_pub_t * dhd)5674 dhd_check_current_clm_data(dhd_pub_t *dhd)
5675 {
5676 	char iovbuf[WLC_IOCTL_SMLEN];
5677 	wl_country_t *cspec;
5678 	int err = BCME_OK;
5679 
5680 	memset(iovbuf, 0, sizeof(iovbuf));
5681 	err = bcm_mkiovar("country", NULL, 0, iovbuf, sizeof(iovbuf));
5682 	if (err == 0) {
5683 		err = BCME_BUFTOOSHORT;
5684 		DHD_ERROR(("%s: bcm_mkiovar failed.", __FUNCTION__));
5685 		return err;
5686 	}
5687 	err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
5688 	if (err) {
5689 		DHD_ERROR(("%s: country code get failed\n", __FUNCTION__));
5690 		return err;
5691 	}
5692 	cspec = (wl_country_t *)iovbuf;
5693 	if ((strncmp(cspec->ccode, WL_CCODE_NULL_COUNTRY, WLC_CNTRY_BUF_SZ)) == 0) {
5694 		DHD_ERROR(("%s: ----- This FW is not included CLM data -----\n",
5695 			__FUNCTION__));
5696 		return FALSE;
5697 	}
5698 	DHD_ERROR(("%s: ----- This FW is included CLM data -----\n",
5699 		__FUNCTION__));
5700 	return TRUE;
5701 }
5702 
5703 int
dhd_apply_default_clm(dhd_pub_t * dhd,char * clm_path)5704 dhd_apply_default_clm(dhd_pub_t *dhd, char *clm_path)
5705 {
5706 	char *clm_blob_path;
5707 	int len;
5708 	char *memblock = NULL;
5709 	int err = BCME_OK;
5710 	char iovbuf[WLC_IOCTL_SMLEN];
5711 	int status = FALSE;
5712 
5713 	if (clm_path[0] != '\0') {
5714 		if (strlen(clm_path) > MOD_PARAM_PATHLEN) {
5715 			DHD_ERROR(("clm path exceeds max len\n"));
5716 			return BCME_ERROR;
5717 		}
5718 		clm_blob_path = "/vendor/etc/firmware/4359_cypress_auto.clm_blob";//clm_path;
5719 		DHD_TRACE(("clm path from module param:%s\n", clm_path));
5720 	} else {
5721 		clm_blob_path = "/vendor/etc/firmware/4359_cypress_auto.clm_blob";//VENDOR_PATH CONFIG_BCMDHD_CLM_PATH;
5722 	}
5723 
5724 	/* If CLM blob file is found on the filesystem, download the file.
5725 	 * After CLM file download or If the blob file is not present,
5726 	 * validate the country code before proceeding with the initialization.
5727 	 * If country code is not valid, fail the initialization.
5728 	 */
5729 	memblock = dhd_os_open_image1(dhd, (char *)clm_blob_path);
5730 	if (memblock == NULL) {
5731 #if defined(DHD_BLOB_EXISTENCE_CHECK)
5732 		if (dhd->is_blob) {
5733 			err = BCME_ERROR;
5734 		} else {
5735 			status = dhd_check_current_clm_data(dhd);
5736 			if (status == TRUE) {
5737 				err = BCME_OK;
5738 			} else {
5739 				err = status;
5740 			}
5741 		}
5742 #endif /* DHD_BLOB_EXISTENCE_CHECK */
5743 		goto exit;
5744 	}
5745 
5746 	len = dhd_os_get_image_size(memblock);
5747 
5748 	if ((len > 0) && (len < MAX_CLM_BUF_SIZE) && memblock) {
5749 		status = dhd_check_current_clm_data(dhd);
5750 		if (status == TRUE) {
5751 #if defined(DHD_BLOB_EXISTENCE_CHECK)
5752 			if (dhd->op_mode != DHD_FLAG_MFG_MODE) {
5753 				if (dhd->is_blob) {
5754 					err = BCME_ERROR;
5755 				}
5756 				goto exit;
5757 			}
5758 #else
5759 			DHD_ERROR(("%s: CLM already exist in F/W, "
5760 				"new CLM data will be added to the end of existing CLM data!\n",
5761 				__FUNCTION__));
5762 #endif /* DHD_BLOB_EXISTENCE_CHECK */
5763 		} else if (status != FALSE) {
5764 			err = status;
5765 			goto exit;
5766 		}
5767 
5768 		/* Found blob file. Download the file */
5769 		DHD_TRACE(("clm file download from %s \n", clm_blob_path));
5770 		err = dhd_download_blob(dhd, (unsigned char*)memblock, len, "clmload");
5771 		if (err) {
5772 			DHD_ERROR(("%s: CLM download failed err=%d\n", __FUNCTION__, err));
5773 			/* Retrieve clmload_status and print */
5774 			memset(iovbuf, 0, sizeof(iovbuf));
5775 			len = bcm_mkiovar("clmload_status", NULL, 0, iovbuf, sizeof(iovbuf));
5776 			if (len == 0) {
5777 				err = BCME_BUFTOOSHORT;
5778 				goto exit;
5779 			}
5780 			err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
5781 			if (err) {
5782 				DHD_ERROR(("%s: clmload_status get failed err=%d \n",
5783 					__FUNCTION__, err));
5784 			} else {
5785 				DHD_ERROR(("%s: clmload_status: %d \n",
5786 					__FUNCTION__, *((int *)iovbuf)));
5787 				if (*((int *)iovbuf) == CHIPID_MISMATCH) {
5788 					DHD_ERROR(("Chip ID mismatch error \n"));
5789 				}
5790 			}
5791 			err = BCME_ERROR;
5792 			goto exit;
5793 		} else {
5794 			DHD_INFO(("%s: CLM download succeeded \n", __FUNCTION__));
5795 		}
5796 	} else {
5797 		DHD_INFO(("Skipping the clm download. len:%d memblk:%p \n", len, memblock));
5798 	}
5799 
5800 	/* Verify country code */
5801 	status = dhd_check_current_clm_data(dhd);
5802 
5803 	if (status != TRUE) {
5804 		/* Country code not initialized or CLM download not proper */
5805 		DHD_ERROR(("country code not initialized\n"));
5806 		err = status;
5807 	}
5808 exit:
5809 
5810 	if (memblock) {
5811 		dhd_os_close_image1(dhd, memblock);
5812 	}
5813 
5814 	return err;
5815 }
5816 
dhd_free_download_buffer(dhd_pub_t * dhd,void * buffer,int length)5817 void dhd_free_download_buffer(dhd_pub_t	*dhd, void *buffer, int length)
5818 {
5819 	MFREE(dhd->osh, buffer, length);
5820 }
5821 
5822 #ifdef SHOW_LOGTRACE
5823 int
dhd_parse_logstrs_file(osl_t * osh,char * raw_fmts,int logstrs_size,dhd_event_log_t * event_log)5824 dhd_parse_logstrs_file(osl_t *osh, char *raw_fmts, int logstrs_size,
5825 		dhd_event_log_t *event_log)
5826 {
5827 	uint32 *lognums = NULL;
5828 	char *logstrs = NULL;
5829 	logstr_trailer_t *trailer = NULL;
5830 	int ram_index = 0;
5831 	char **fmts = NULL;
5832 	int num_fmts = 0;
5833 	bool match_fail = TRUE;
5834 	int32 i = 0;
5835 	uint8 *pfw_id = NULL;
5836 	uint32 fwid = 0;
5837 	void *file = NULL;
5838 	int file_len = 0;
5839 	char fwid_str[FWID_STR_LEN];
5840 	uint32 hdr_logstrs_size = 0;
5841 
5842 	/* Read last three words in the logstrs.bin file */
5843 	trailer = (logstr_trailer_t *) (raw_fmts + logstrs_size -
5844 		sizeof(logstr_trailer_t));
5845 
5846 	if (trailer->log_magic == LOGSTRS_MAGIC) {
5847 		/*
5848 		* logstrs.bin has a header.
5849 		*/
5850 		if (trailer->version == 1) {
5851 			logstr_header_v1_t *hdr_v1 = (logstr_header_v1_t *) (raw_fmts +
5852 					logstrs_size - sizeof(logstr_header_v1_t));
5853 			DHD_INFO(("%s: logstr header version = %u\n",
5854 					__FUNCTION__, hdr_v1->version));
5855 			num_fmts =	hdr_v1->rom_logstrs_offset / sizeof(uint32);
5856 			ram_index = (hdr_v1->ram_lognums_offset -
5857 				hdr_v1->rom_lognums_offset) / sizeof(uint32);
5858 			lognums = (uint32 *) &raw_fmts[hdr_v1->rom_lognums_offset];
5859 			logstrs = (char *)	 &raw_fmts[hdr_v1->rom_logstrs_offset];
5860 			hdr_logstrs_size = hdr_v1->logstrs_size;
5861 		} else if (trailer->version == 2) {
5862 			logstr_header_t *hdr = (logstr_header_t *) (raw_fmts + logstrs_size -
5863 					sizeof(logstr_header_t));
5864 			DHD_INFO(("%s: logstr header version = %u; flags = %x\n",
5865 					__FUNCTION__, hdr->trailer.version, hdr->trailer.flags));
5866 
5867 			/* For ver. 2 of the header, need to match fwid of
5868 			 *  both logstrs.bin and fw bin
5869 			 */
5870 
5871 			/* read the FWID from fw bin */
5872 			file = dhd_os_open_image1(NULL, st_str_file_path);
5873 			if (!file) {
5874 				DHD_ERROR(("%s: cannot open fw file !\n", __FUNCTION__));
5875 				goto error;
5876 			}
5877 			file_len = dhd_os_get_image_size(file);
5878 			if (file_len <= 0) {
5879 				DHD_ERROR(("%s: bad fw file length !\n", __FUNCTION__));
5880 				goto error;
5881 			}
5882 			/* fwid is at the end of fw bin in string format */
5883 			if (dhd_os_seek_file(file, file_len - (sizeof(fwid_str) - 1)) < 0) {
5884 				DHD_ERROR(("%s: can't seek file \n", __FUNCTION__));
5885 				goto error;
5886 			}
5887 
5888 			memset(fwid_str, 0, sizeof(fwid_str));
5889 			if (dhd_os_get_image_block(fwid_str, sizeof(fwid_str) - 1, file) <= 0) {
5890 				DHD_ERROR(("%s: read fw file failed !\n", __FUNCTION__));
5891 				goto error;
5892 			}
5893 			pfw_id = (uint8 *)bcmstrnstr(fwid_str, sizeof(fwid_str) - 1,
5894 					FWID_STR_1, strlen(FWID_STR_1));
5895 			if (!pfw_id) {
5896 				pfw_id = (uint8 *)bcmstrnstr(fwid_str, sizeof(fwid_str) - 1,
5897 					FWID_STR_2, strlen(FWID_STR_2));
5898 				if (!pfw_id) {
5899 					DHD_ERROR(("%s: could not find id in FW bin!\n",
5900 						__FUNCTION__));
5901 					goto error;
5902 				}
5903 			}
5904 			/* search for the '-' in the fw id str, after which the
5905 			 * actual 4 byte fw id is present
5906 			 */
5907 			while (pfw_id && *pfw_id != '-') {
5908 				++pfw_id;
5909 			}
5910 			++pfw_id;
5911 			fwid = bcm_strtoul((char *)pfw_id, NULL, 16);
5912 
5913 			/* check if fw id in logstrs.bin matches the fw one */
5914 			if (hdr->trailer.fw_id != fwid) {
5915 				DHD_ERROR(("%s: logstr id does not match FW!"
5916 					"logstrs_fwid:0x%x, rtecdc_fwid:0x%x\n",
5917 					__FUNCTION__, hdr->trailer.fw_id, fwid));
5918 				goto error;
5919 			}
5920 
5921 			match_fail = FALSE;
5922 			num_fmts = hdr->rom_logstrs_offset / sizeof(uint32);
5923 			ram_index = (hdr->ram_lognums_offset -
5924 				hdr->rom_lognums_offset) / sizeof(uint32);
5925 			lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset];
5926 			logstrs = (char *)	 &raw_fmts[hdr->rom_logstrs_offset];
5927 			hdr_logstrs_size = hdr->logstrs_size;
5928 
5929 error:
5930 			if (file) {
5931 				dhd_os_close_image1(NULL, file);
5932 			}
5933 			if (match_fail) {
5934 				return BCME_DECERR;
5935 			}
5936 		} else {
5937 			DHD_ERROR(("%s: Invalid logstr version %u\n", __FUNCTION__,
5938 					trailer->version));
5939 			return BCME_ERROR;
5940 		}
5941 		if (logstrs_size != hdr_logstrs_size) {
5942 			DHD_ERROR(("%s: bad logstrs_size %d\n", __FUNCTION__, hdr_logstrs_size));
5943 			return BCME_ERROR;
5944 		}
5945 	} else {
5946 		/*
5947 		 * Legacy logstrs.bin format without header.
5948 		 */
5949 		num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32);
5950 
5951 		/* Legacy RAM-only logstrs.bin format:
5952 		 *	  - RAM 'lognums' section
5953 		 *	  - RAM 'logstrs' section.
5954 		 *
5955 		 * 'lognums' is an array of indexes for the strings in the
5956 		 * 'logstrs' section. The first uint32 is an index to the
5957 		 * start of 'logstrs'. Therefore, if this index is divided
5958 		 * by 'sizeof(uint32)' it provides the number of logstr
5959 		 *	entries.
5960 		 */
5961 		ram_index = 0;
5962 		lognums = (uint32 *) raw_fmts;
5963 		logstrs = (char *) &raw_fmts[num_fmts << 2];
5964 	}
5965 	if (num_fmts)
5966 		fmts = MALLOC(osh, num_fmts  * sizeof(char *));
5967 	if (fmts == NULL) {
5968 		DHD_ERROR(("%s: Failed to allocate fmts memory\n", __FUNCTION__));
5969 		return BCME_ERROR;
5970 	}
5971 	event_log->fmts_size = num_fmts  * sizeof(char *);
5972 
5973 	for (i = 0; i < num_fmts; i++) {
5974 		/* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
5975 		* (they are 0-indexed relative to 'rom_logstrs_offset').
5976 		*
5977 		* RAM lognums are already indexed to point to the correct RAM logstrs (they
5978 		* are 0-indexed relative to the start of the logstrs.bin file).
5979 		*/
5980 		if (i == ram_index) {
5981 			logstrs = raw_fmts;
5982 		}
5983 		fmts[i] = &logstrs[lognums[i]];
5984 	}
5985 	event_log->fmts = fmts;
5986 	event_log->raw_fmts_size = logstrs_size;
5987 	event_log->raw_fmts = raw_fmts;
5988 	event_log->num_fmts = num_fmts;
5989 	return BCME_OK;
5990 } /* dhd_parse_logstrs_file */
5991 
dhd_parse_map_file(osl_t * osh,void * file,uint32 * ramstart,uint32 * rodata_start,uint32 * rodata_end)5992 int dhd_parse_map_file(osl_t *osh, void *file, uint32 *ramstart, uint32 *rodata_start,
5993 		uint32 *rodata_end)
5994 {
5995 	char *raw_fmts =  NULL, *raw_fmts_loc = NULL;
5996 	uint32 read_size = READ_NUM_BYTES;
5997 	int error = 0;
5998 	char * cptr = NULL;
5999 	char c;
6000 	uint8 count = 0;
6001 
6002 	*ramstart = 0;
6003 	*rodata_start = 0;
6004 	*rodata_end = 0;
6005 
6006 	/* Allocate 1 byte more than read_size to terminate it with NULL */
6007 	raw_fmts = MALLOCZ(osh, read_size + 1);
6008 	if (raw_fmts == NULL) {
6009 		DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
6010 		goto fail;
6011 	}
6012 
6013 	/* read ram start, rodata_start and rodata_end values from map  file */
6014 	while (count != ALL_MAP_VAL)
6015 	{
6016 		error = dhd_os_read_file(file, raw_fmts, read_size);
6017 		if (error < 0) {
6018 			DHD_ERROR(("%s: map file read failed err:%d \n", __FUNCTION__,
6019 					error));
6020 			goto fail;
6021 		}
6022 
6023 		/* End raw_fmts with NULL as strstr expects NULL terminated strings */
6024 		raw_fmts[read_size] = '\0';
6025 
6026 		/* Get ramstart address */
6027 		raw_fmts_loc = raw_fmts;
6028 		if (!(count & RAMSTART_BIT) &&
6029 			(cptr = bcmstrnstr(raw_fmts_loc, read_size, ramstart_str,
6030 			strlen(ramstart_str)))) {
6031 			cptr = cptr - BYTES_AHEAD_NUM;
6032 			sscanf(cptr, "%x %c text_start", ramstart, &c);
6033 			count |= RAMSTART_BIT;
6034 		}
6035 
6036 		/* Get ram rodata start address */
6037 		raw_fmts_loc = raw_fmts;
6038 		if (!(count & RDSTART_BIT) &&
6039 			(cptr = bcmstrnstr(raw_fmts_loc, read_size, rodata_start_str,
6040 			strlen(rodata_start_str)))) {
6041 			cptr = cptr - BYTES_AHEAD_NUM;
6042 			sscanf(cptr, "%x %c rodata_start", rodata_start, &c);
6043 			count |= RDSTART_BIT;
6044 		}
6045 
6046 		/* Get ram rodata end address */
6047 		raw_fmts_loc = raw_fmts;
6048 		if (!(count & RDEND_BIT) &&
6049 			(cptr = bcmstrnstr(raw_fmts_loc, read_size, rodata_end_str,
6050 			strlen(rodata_end_str)))) {
6051 			cptr = cptr - BYTES_AHEAD_NUM;
6052 			sscanf(cptr, "%x %c rodata_end", rodata_end, &c);
6053 			count |= RDEND_BIT;
6054 		}
6055 
6056 		if (error < (int)read_size) {
6057 			/*
6058 			* since we reset file pos back to earlier pos by
6059 			* GO_BACK_FILE_POS_NUM_BYTES bytes we won't reach EOF.
6060 			* The reason for this is if string is spreaded across
6061 			* bytes, the read function should not miss it.
6062 			* So if ret value is less than read_size, reached EOF don't read further
6063 			*/
6064 			break;
6065 		}
6066 		memset(raw_fmts, 0, read_size);
6067 		/*
6068 		* go back to predefined NUM of bytes so that we won't miss
6069 		* the string and  addr even if it comes as splited in next read.
6070 		*/
6071 		dhd_os_seek_file(file, -GO_BACK_FILE_POS_NUM_BYTES);
6072 	}
6073 
6074 fail:
6075 	if (raw_fmts) {
6076 		MFREE(osh, raw_fmts, read_size + 1);
6077 		raw_fmts = NULL;
6078 	}
6079 	if (count == ALL_MAP_VAL) {
6080 		return BCME_OK;
6081 	}
6082 	else {
6083 		DHD_ERROR(("%s: readmap error 0X%x \n", __FUNCTION__,
6084 				count));
6085 		return BCME_ERROR;
6086 	}
6087 
6088 } /* dhd_parse_map_file */
6089 
6090 #ifdef PCIE_FULL_DONGLE
6091 int
dhd_event_logtrace_infobuf_pkt_process(dhd_pub_t * dhdp,void * pktbuf,dhd_event_log_t * event_data)6092 dhd_event_logtrace_infobuf_pkt_process(dhd_pub_t *dhdp, void *pktbuf,
6093 		dhd_event_log_t *event_data)
6094 {
6095 	uint32 infobuf_version;
6096 	info_buf_payload_hdr_t *payload_hdr_ptr;
6097 	uint16 payload_hdr_type;
6098 	uint16 payload_hdr_length;
6099 
6100 	DHD_TRACE(("%s:Enter\n", __FUNCTION__));
6101 
6102 	if (PKTLEN(dhdp->osh, pktbuf) < sizeof(uint32)) {
6103 		DHD_ERROR(("%s: infobuf too small for version field\n",
6104 			__FUNCTION__));
6105 		goto exit;
6106 	}
6107 	infobuf_version = *((uint32 *)PKTDATA(dhdp->osh, pktbuf));
6108 	PKTPULL(dhdp->osh, pktbuf, sizeof(uint32));
6109 	if (infobuf_version != PCIE_INFOBUF_V1) {
6110 		DHD_ERROR(("%s: infobuf version %d is not PCIE_INFOBUF_V1\n",
6111 			__FUNCTION__, infobuf_version));
6112 		goto exit;
6113 	}
6114 
6115 	/* Version 1 infobuf has a single type/length (and then value) field */
6116 	if (PKTLEN(dhdp->osh, pktbuf) < sizeof(info_buf_payload_hdr_t)) {
6117 		DHD_ERROR(("%s: infobuf too small for v1 type/length  fields\n",
6118 			__FUNCTION__));
6119 		goto exit;
6120 	}
6121 	/* Process/parse the common info payload header (type/length) */
6122 	payload_hdr_ptr = (info_buf_payload_hdr_t *)PKTDATA(dhdp->osh, pktbuf);
6123 	payload_hdr_type = ltoh16(payload_hdr_ptr->type);
6124 	payload_hdr_length = ltoh16(payload_hdr_ptr->length);
6125 	if (payload_hdr_type != PCIE_INFOBUF_V1_TYPE_LOGTRACE) {
6126 		DHD_ERROR(("%s: payload_hdr_type %d is not V1_TYPE_LOGTRACE\n",
6127 			__FUNCTION__, payload_hdr_type));
6128 		goto exit;
6129 	}
6130 	PKTPULL(dhdp->osh, pktbuf, sizeof(info_buf_payload_hdr_t));
6131 
6132 	/* Validate that the specified length isn't bigger than the
6133 	 * provided data.
6134 	 */
6135 	if (payload_hdr_length > PKTLEN(dhdp->osh, pktbuf)) {
6136 		DHD_ERROR(("%s: infobuf logtrace length is bigger"
6137 			" than actual buffer data\n", __FUNCTION__));
6138 		goto exit;
6139 	}
6140 	dhd_dbg_trace_evnt_handler(dhdp, PKTDATA(dhdp->osh, pktbuf),
6141 		event_data, payload_hdr_length);
6142 
6143 	return BCME_OK;
6144 
6145 exit:
6146 	return BCME_ERROR;
6147 } /* dhd_event_logtrace_infobuf_pkt_process */
6148 #endif /* PCIE_FULL_DONGLE */
6149 #endif /* SHOW_LOGTRACE */
6150 
6151 #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
6152 
6153 /* To handle the TDLS event in the dhd_common.c
6154  */
dhd_tdls_event_handler(dhd_pub_t * dhd_pub,wl_event_msg_t * event)6155 int dhd_tdls_event_handler(dhd_pub_t *dhd_pub, wl_event_msg_t *event)
6156 {
6157 	int ret = BCME_OK;
6158 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
6159 #pragma GCC diagnostic push
6160 #pragma GCC diagnostic ignored "-Wcast-qual"
6161 #endif // endif
6162 	ret = dhd_tdls_update_peer_info(dhd_pub, event);
6163 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
6164 #pragma GCC diagnostic pop
6165 #endif // endif
6166 	return ret;
6167 }
6168 
dhd_free_tdls_peer_list(dhd_pub_t * dhd_pub)6169 int dhd_free_tdls_peer_list(dhd_pub_t *dhd_pub)
6170 {
6171 	tdls_peer_node_t *cur = NULL, *prev = NULL;
6172 	if (!dhd_pub)
6173 		return BCME_ERROR;
6174 	cur = dhd_pub->peer_tbl.node;
6175 
6176 	if ((dhd_pub->peer_tbl.node == NULL) && !dhd_pub->peer_tbl.tdls_peer_count)
6177 		return BCME_ERROR;
6178 
6179 	while (cur != NULL) {
6180 		prev = cur;
6181 		cur = cur->next;
6182 		MFREE(dhd_pub->osh, prev, sizeof(tdls_peer_node_t));
6183 	}
6184 	dhd_pub->peer_tbl.tdls_peer_count = 0;
6185 	dhd_pub->peer_tbl.node = NULL;
6186 	return BCME_OK;
6187 }
6188 #endif	/* #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */
6189 
6190 /* pretty hex print a contiguous buffer
6191 * based on the debug level specified
6192 */
6193 void
dhd_prhex(const char * msg,volatile uchar * buf,uint nbytes,uint8 dbg_level)6194 dhd_prhex(const char *msg, volatile uchar *buf, uint nbytes, uint8 dbg_level)
6195 {
6196 	char line[128], *p;
6197 	int len = sizeof(line);
6198 	int nchar;
6199 	uint i;
6200 
6201 	if (msg && (msg[0] != '\0')) {
6202 		if (dbg_level == DHD_ERROR_VAL)
6203 			DHD_ERROR(("%s:\n", msg));
6204 		else if (dbg_level == DHD_INFO_VAL)
6205 			DHD_INFO(("%s:\n", msg));
6206 		else if (dbg_level == DHD_TRACE_VAL)
6207 			DHD_TRACE(("%s:\n", msg));
6208 	}
6209 
6210 	p = line;
6211 	for (i = 0; i < nbytes; i++) {
6212 		if (i % 16 == 0) {
6213 			nchar = snprintf(p, len, "  %04x: ", i);	/* line prefix */
6214 			p += nchar;
6215 			len -= nchar;
6216 		}
6217 		if (len > 0) {
6218 			nchar = snprintf(p, len, "%02x ", buf[i]);
6219 			p += nchar;
6220 			len -= nchar;
6221 		}
6222 
6223 		if (i % 16 == 15) {
6224 			/* flush line */
6225 			if (dbg_level == DHD_ERROR_VAL)
6226 				DHD_ERROR(("%s:\n", line));
6227 			else if (dbg_level == DHD_INFO_VAL)
6228 				DHD_INFO(("%s:\n", line));
6229 			else if (dbg_level == DHD_TRACE_VAL)
6230 				DHD_TRACE(("%s:\n", line));
6231 			p = line;
6232 			len = sizeof(line);
6233 		}
6234 	}
6235 
6236 	/* flush last partial line */
6237 	if (p != line) {
6238 		if (dbg_level == DHD_ERROR_VAL)
6239 			DHD_ERROR(("%s:\n", line));
6240 		else if (dbg_level == DHD_INFO_VAL)
6241 			DHD_INFO(("%s:\n", line));
6242 		else if (dbg_level == DHD_TRACE_VAL)
6243 			DHD_TRACE(("%s:\n", line));
6244 	}
6245 }
6246 
6247 #ifndef OEM_ANDROID
6248 int
dhd_tput_test(dhd_pub_t * dhd,tput_test_t * tput_data)6249 dhd_tput_test(dhd_pub_t *dhd, tput_test_t *tput_data)
6250 {
6251 	struct ether_header ether_hdr;
6252 	tput_pkt_t tput_pkt;
6253 	void *pkt = NULL;
6254 	uint8 *pktdata = NULL;
6255 	uint32 pktsize = 0;
6256 	uint64 total_size = 0;
6257 	uint32 *crc = 0;
6258 	uint32 pktid = 0;
6259 	uint32 total_num_tx_pkts = 0;
6260 	int err = 0, err_exit = 0;
6261 	uint32 i = 0;
6262 	uint64 time_taken = 0;
6263 	int max_txbufs = 0;
6264 	uint32 n_batches = 0;
6265 	uint32 n_remain = 0;
6266 	uint8 tput_pkt_hdr_size = 0;
6267 	bool batch_cnt = FALSE;
6268 	bool tx_stop_pkt = FALSE;
6269 
6270 	if (tput_data->version != TPUT_TEST_T_VER ||
6271 		tput_data->length != TPUT_TEST_T_LEN) {
6272 		DHD_ERROR(("%s: wrong structure ver/len! \n", __FUNCTION__));
6273 		err_exit = BCME_BADARG;
6274 		goto exit_error;
6275 	}
6276 
6277 	if (dhd->tput_data.tput_test_running) {
6278 		DHD_ERROR(("%s: tput test already running ! \n", __FUNCTION__));
6279 		err_exit = BCME_BUSY;
6280 		goto exit_error;
6281 	}
6282 #ifdef PCIE_FULL_DONGLE
6283 	/*
6284 	 * 100 bytes to accommodate ether header and tput header. As of today
6285 	 * both occupy 30 bytes. Rest is reserved.
6286 	 */
6287 	if ((tput_data->payload_size > TPUT_TEST_MAX_PAYLOAD) ||
6288 		(tput_data->payload_size > (DHD_FLOWRING_RX_BUFPOST_PKTSZ - 100))) {
6289 		DHD_ERROR(("%s: payload size is too large! max_payload=%u rx_bufpost_size=%u\n",
6290 			__FUNCTION__, TPUT_TEST_MAX_PAYLOAD,
6291 			(DHD_FLOWRING_RX_BUFPOST_PKTSZ - 100)));
6292 		err_exit = BCME_BUFTOOLONG;
6293 		goto exit_error;
6294 	}
6295 #endif // endif
6296 	max_txbufs = dhd_get_max_txbufs(dhd);
6297 	max_txbufs = MIN(max_txbufs, DHD_TPUT_MAX_TX_PKTS_BATCH);
6298 
6299 	if (!(tput_data->num_pkts > 0)) {
6300 		DHD_ERROR(("%s: invalid num_pkts: %d to tx\n",
6301 			__FUNCTION__, tput_data->num_pkts));
6302 		err_exit = BCME_ERROR;
6303 		goto exit_error;
6304 	}
6305 
6306 	memset(&dhd->tput_data, 0, sizeof(dhd->tput_data));
6307 	memcpy(&dhd->tput_data, tput_data, sizeof(*tput_data));
6308 	dhd->tput_data.pkts_bad = dhd->tput_data.pkts_good = 0;
6309 	dhd->tput_data.pkts_cmpl = 0;
6310 	dhd->tput_start_ts = dhd->tput_stop_ts = 0;
6311 
6312 	if (tput_data->flags & TPUT_TEST_USE_ETHERNET_HDR) {
6313 		pktsize = sizeof(ether_hdr) + sizeof(tput_pkt_t) +
6314 				(tput_data->payload_size - 12);
6315 	} else {
6316 		pktsize = sizeof(tput_pkt_t) +
6317 				(tput_data->payload_size - 12);
6318 	}
6319 
6320 	tput_pkt_hdr_size = (uint8)((uint8 *)&tput_pkt.crc32 -
6321 			(uint8 *)&tput_pkt.mac_sta);
6322 
6323 	/* mark the tput test as started */
6324 	dhd->tput_data.tput_test_running = TRUE;
6325 
6326 	if (tput_data->direction == TPUT_DIR_TX) {
6327 		/* for ethernet header */
6328 		memcpy(ether_hdr.ether_shost, tput_data->mac_sta, ETHER_ADDR_LEN);
6329 		memcpy(ether_hdr.ether_dhost, tput_data->mac_ap, ETHER_ADDR_LEN);
6330 		ether_hdr.ether_type = hton16(ETHER_TYPE_IP);
6331 
6332 		/* fill in the tput pkt */
6333 		memset(&tput_pkt, 0, sizeof(tput_pkt));
6334 		memcpy(tput_pkt.mac_ap, tput_data->mac_ap, ETHER_ADDR_LEN);
6335 		memcpy(tput_pkt.mac_sta, tput_data->mac_sta, ETHER_ADDR_LEN);
6336 		tput_pkt.pkt_type = hton16(TPUT_PKT_TYPE_NORMAL);
6337 		tput_pkt.num_pkts = hton32(tput_data->num_pkts);
6338 
6339 		if (tput_data->num_pkts > (uint32)max_txbufs) {
6340 			n_batches = tput_data->num_pkts / max_txbufs;
6341 			n_remain = tput_data->num_pkts % max_txbufs;
6342 		} else {
6343 			n_batches = 0;
6344 			n_remain = tput_data->num_pkts;
6345 		}
6346 		DHD_ERROR(("%s: num_pkts: %u n_batches: %u n_remain: %u\n",
6347 			__FUNCTION__, tput_data->num_pkts, n_batches, n_remain));
6348 
6349 		do {
6350 			/* reset before every batch */
6351 			dhd->batch_tx_pkts_cmpl = 0;
6352 			if (n_batches) {
6353 				dhd->batch_tx_num_pkts = max_txbufs;
6354 				--n_batches;
6355 			} else if (n_remain) {
6356 				dhd->batch_tx_num_pkts = n_remain;
6357 				n_remain = 0;
6358 			} else {
6359 				DHD_ERROR(("Invalid. This should not hit\n"));
6360 			}
6361 
6362 			dhd->tput_start_ts = OSL_SYSUPTIME_US();
6363 			for (i = 0; (i < dhd->batch_tx_num_pkts) || (tx_stop_pkt); ++i) {
6364 				pkt = PKTGET(dhd->osh, pktsize, TRUE);
6365 				if (!pkt) {
6366 					dhd->tput_data.tput_test_running = FALSE;
6367 					DHD_ERROR(("%s: PKTGET fails ! Not enough Tx buffers\n",
6368 						__FUNCTION__));
6369 					DHD_ERROR(("%s: pkts_good:%u; pkts_bad:%u; pkts_cmpl:%u\n",
6370 						__FUNCTION__, dhd->tput_data.pkts_good,
6371 						dhd->tput_data.pkts_bad, dhd->tput_data.pkts_cmpl));
6372 					err_exit = BCME_NOMEM;
6373 					goto exit_error;
6374 				}
6375 				pktdata = PKTDATA(dhd->osh, pkt);
6376 				PKTSETLEN(dhd->osh, pkt, pktsize);
6377 				memset(pktdata, 0, pktsize);
6378 				if (tput_data->flags & TPUT_TEST_USE_ETHERNET_HDR) {
6379 					memcpy(pktdata, &ether_hdr, sizeof(ether_hdr));
6380 					pktdata += sizeof(ether_hdr);
6381 				}
6382 				/* send stop pkt as last pkt */
6383 				if (tx_stop_pkt) {
6384 					tput_pkt.pkt_type = hton16(TPUT_PKT_TYPE_STOP);
6385 					tx_stop_pkt = FALSE;
6386 				} else
6387 					tput_pkt.pkt_type = hton16(TPUT_PKT_TYPE_NORMAL);
6388 				tput_pkt.pkt_id = hton32(pktid++);
6389 				tput_pkt.crc32 = 0;
6390 				memcpy(pktdata, &tput_pkt, sizeof(tput_pkt));
6391 				/* compute crc32 over the pkt-id, num-pkts and data fields */
6392 				crc = (uint32 *)(pktdata + tput_pkt_hdr_size);
6393 				*crc = hton32(hndcrc32(pktdata + tput_pkt_hdr_size + 4,
6394 						8 + (tput_data->payload_size - 12),
6395 						CRC32_INIT_VALUE));
6396 
6397 				err = dhd_sendpkt(dhd, 0, pkt);
6398 				if (err != BCME_OK) {
6399 					DHD_INFO(("%s: send pkt (id = %u) fails (err = %d) ! \n",
6400 						__FUNCTION__, pktid, err));
6401 					dhd->tput_data.pkts_bad++;
6402 				}
6403 				total_num_tx_pkts++;
6404 				if ((total_num_tx_pkts == tput_data->num_pkts) && (!tx_stop_pkt)) {
6405 					tx_stop_pkt = TRUE;
6406 				}
6407 			}
6408 			DHD_INFO(("%s: TX done, wait for completion...\n", __FUNCTION__));
6409 			if (!dhd_os_tput_test_wait(dhd, NULL,
6410 					TPUT_TEST_WAIT_TIMEOUT_DEFAULT)) {
6411 				dhd->tput_stop_ts = OSL_SYSUPTIME_US();
6412 				dhd->tput_data.tput_test_running = FALSE;
6413 				DHD_ERROR(("%s: TX completion timeout !"
6414 					" Total Tx pkts (including STOP) = %u; pkts cmpl = %u; \n",
6415 					__FUNCTION__, total_num_tx_pkts, dhd->batch_tx_pkts_cmpl));
6416 				err_exit = BCME_ERROR;
6417 				goto exit_error;
6418 			}
6419 			if (dhd->tput_start_ts && dhd->tput_stop_ts &&
6420 					(dhd->tput_stop_ts > dhd->tput_start_ts)) {
6421 				time_taken += dhd->tput_stop_ts - dhd->tput_start_ts;
6422 			} else {
6423 				dhd->tput_data.tput_test_running = FALSE;
6424 				DHD_ERROR(("%s: bad timestamp while cal tx batch time\n",
6425 					__FUNCTION__));
6426 				err_exit = BCME_ERROR;
6427 				goto exit_error;
6428 			}
6429 			if (n_batches || n_remain) {
6430 				batch_cnt = TRUE;
6431 			} else {
6432 				batch_cnt = FALSE;
6433 			}
6434 		} while (batch_cnt);
6435 	} else {
6436 		/* TPUT_DIR_RX */
6437 		DHD_INFO(("%s: waiting for RX completion... \n", __FUNCTION__));
6438 		if (!dhd_os_tput_test_wait(dhd, NULL, tput_data->timeout_ms)) {
6439 			DHD_ERROR(("%s: RX completion timeout ! \n", __FUNCTION__));
6440 			dhd->tput_stop_ts = OSL_SYSUPTIME_US();
6441 		}
6442 	}
6443 
6444 	/* calculate the throughput in bits per sec */
6445 	if ((dhd->tput_start_ts && dhd->tput_stop_ts &&
6446 		(dhd->tput_stop_ts > dhd->tput_start_ts)) || (time_taken)) {
6447 		if (!time_taken) {
6448 			time_taken = dhd->tput_stop_ts - dhd->tput_start_ts;
6449 		}
6450 		time_taken = DIV_U64_BY_U32(time_taken, MSEC_PER_SEC); /* convert to ms */
6451 		dhd->tput_data.time_ms = time_taken;
6452 		if (time_taken) {
6453 			total_size = (pktsize * dhd->tput_data.pkts_cmpl * 8);
6454 			dhd->tput_data.tput_bps = DIV_U64_BY_U64(total_size, time_taken);
6455 			/* convert from ms to seconds */
6456 			dhd->tput_data.tput_bps = dhd->tput_data.tput_bps * MSEC_PER_SEC;
6457 		}
6458 	} else {
6459 		DHD_ERROR(("%s: bad timestamp !\n", __FUNCTION__));
6460 	}
6461 	DHD_INFO(("%s: DONE. tput = %llu bps, time = %llu ms\n", __FUNCTION__,
6462 		dhd->tput_data.tput_bps, dhd->tput_data.time_ms));
6463 	memcpy(tput_data, &dhd->tput_data, sizeof(dhd->tput_data));
6464 
6465 	dhd->tput_data.tput_test_running = FALSE;
6466 
6467 	err_exit = BCME_OK;
6468 
6469 exit_error:
6470 	DHD_ERROR(("%s: pkts_good = %u; pkts_bad = %u; pkts_cmpl = %u\n",
6471 		__FUNCTION__, dhd->tput_data.pkts_good,
6472 		dhd->tput_data.pkts_bad, dhd->tput_data.pkts_cmpl));
6473 
6474 	return err_exit;
6475 }
6476 
6477 void
dhd_tput_test_rx(dhd_pub_t * dhd,void * pkt)6478 dhd_tput_test_rx(dhd_pub_t *dhd, void *pkt)
6479 {
6480 	uint8 *pktdata = NULL;
6481 	tput_pkt_t *tput_pkt = NULL;
6482 	uint32 crc = 0;
6483 	uint8 tput_pkt_hdr_size = 0;
6484 
6485 	pktdata = PKTDATA(dhd->osh, pkt);
6486 	if (dhd->tput_data.flags & TPUT_TEST_USE_ETHERNET_HDR)
6487 		pktdata += sizeof(struct ether_header);
6488 	tput_pkt = (tput_pkt_t *)pktdata;
6489 
6490 	/* record the timestamp of the first packet received */
6491 	if (dhd->tput_data.pkts_cmpl == 0) {
6492 		dhd->tput_start_ts = OSL_SYSUPTIME_US();
6493 	}
6494 
6495 	if (ntoh16(tput_pkt->pkt_type) != TPUT_PKT_TYPE_STOP &&
6496 			dhd->tput_data.pkts_cmpl <= dhd->tput_data.num_pkts) {
6497 		dhd->tput_data.pkts_cmpl++;
6498 	}
6499 	/* drop rx packets received beyond the specified # */
6500 	if (dhd->tput_data.pkts_cmpl > dhd->tput_data.num_pkts)
6501 		return;
6502 
6503 	DHD_TRACE(("%s: Rx tput test pkt, id = %u ; type = %u\n", __FUNCTION__,
6504 		ntoh32(tput_pkt->pkt_id), ntoh16(tput_pkt->pkt_type)));
6505 
6506 	/* discard if mac addr of AP/STA does not match the specified ones */
6507 	if ((memcmp(tput_pkt->mac_ap, dhd->tput_data.mac_ap,
6508 			ETHER_ADDR_LEN) != 0) ||
6509 		(memcmp(tput_pkt->mac_sta, dhd->tput_data.mac_sta,
6510 			ETHER_ADDR_LEN) != 0)) {
6511 		dhd->tput_data.pkts_bad++;
6512 		DHD_INFO(("%s: dropping tput pkt with id %u due to bad AP/STA mac !\n",
6513 			__FUNCTION__, ntoh32(tput_pkt->pkt_id)));
6514 		return;
6515 	}
6516 
6517 	tput_pkt_hdr_size = (uint8)((uint8 *)&tput_pkt->crc32 -
6518 			(uint8 *)&tput_pkt->mac_sta);
6519 	pktdata += tput_pkt_hdr_size + 4;
6520 	crc = hndcrc32(pktdata, 8 + (dhd->tput_data.payload_size - 12),
6521 			CRC32_INIT_VALUE);
6522 	if (crc != ntoh32(tput_pkt->crc32)) {
6523 		DHD_INFO(("%s: dropping tput pkt with id %u due to bad CRC !\n",
6524 			__FUNCTION__, ntoh32(tput_pkt->pkt_id)));
6525 		dhd->tput_data.pkts_bad++;
6526 		return;
6527 	}
6528 
6529 	if (ntoh16(tput_pkt->pkt_type) != TPUT_PKT_TYPE_STOP)
6530 		dhd->tput_data.pkts_good++;
6531 
6532 	/* if we have received the stop packet or all the # of pkts, we're done */
6533 	if (ntoh16(tput_pkt->pkt_type) == TPUT_PKT_TYPE_STOP ||
6534 			dhd->tput_data.pkts_cmpl == dhd->tput_data.num_pkts) {
6535 		dhd->tput_stop_ts = OSL_SYSUPTIME_US();
6536 		dhd_os_tput_test_wake(dhd);
6537 	}
6538 }
6539 #endif /* OEM_ANDROID */
6540 
6541 #ifdef DUMP_IOCTL_IOV_LIST
6542 void
dhd_iov_li_append(dhd_pub_t * dhd,dll_t * list_head,dll_t * node)6543 dhd_iov_li_append(dhd_pub_t *dhd, dll_t *list_head, dll_t *node)
6544 {
6545 	dll_t *item;
6546 	dhd_iov_li_t *iov_li;
6547 	dhd->dump_iovlist_len++;
6548 
6549 	if (dhd->dump_iovlist_len == IOV_LIST_MAX_LEN+1) {
6550 		item = dll_head_p(list_head);
6551 		iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
6552 		dll_delete(item);
6553 		MFREE(dhd->osh, iov_li, sizeof(*iov_li));
6554 		dhd->dump_iovlist_len--;
6555 	}
6556 	dll_append(list_head, node);
6557 }
6558 
6559 void
dhd_iov_li_print(dll_t * list_head)6560 dhd_iov_li_print(dll_t *list_head)
6561 {
6562 	dhd_iov_li_t *iov_li;
6563 	dll_t *item, *next;
6564 	uint8 index = 0;
6565 	for (item = dll_head_p(list_head); !dll_end(list_head, item); item = next) {
6566 		next = dll_next_p(item);
6567 		iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
6568 		DHD_ERROR(("%d:cmd_name = %s, cmd = %d.\n", ++index, iov_li->buff, iov_li->cmd));
6569 	}
6570 }
6571 
6572 void
dhd_iov_li_delete(dhd_pub_t * dhd,dll_t * list_head)6573 dhd_iov_li_delete(dhd_pub_t *dhd, dll_t *list_head)
6574 {
6575 	dll_t *item;
6576 	dhd_iov_li_t *iov_li;
6577 	while (!(dll_empty(list_head))) {
6578 		item = dll_head_p(list_head);
6579 		iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
6580 		dll_delete(item);
6581 		MFREE(dhd->osh, iov_li, sizeof(*iov_li));
6582 	}
6583 }
6584 #endif /* DUMP_IOCTL_IOV_LIST */
6585 
6586 /* configuations of ecounters to be enabled by default in FW */
6587 static ecounters_cfg_t ecounters_cfg_tbl[] = {
6588 	/* Global ecounters */
6589 	{ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_BUS_PCIE},
6590 	// {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_TX_AMPDU_STATS},
6591 	// {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_RX_AMPDU_STATS},
6592 
6593 	/* Slice specific ecounters */
6594 	{ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x0, WL_SLICESTATS_XTLV_PERIODIC_STATE},
6595 	{ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x1, WL_SLICESTATS_XTLV_PERIODIC_STATE},
6596 	{ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x1, WL_IFSTATS_XTLV_WL_SLICE_BTCOEX},
6597 
6598 	/* Interface specific ecounters */
6599 	{ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_IF_PERIODIC_STATE},
6600 	{ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_GENERIC},
6601 	{ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_INFRA_SPECIFIC},
6602 	{ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_MGT_CNT},
6603 
6604 	/* secondary interface */
6605 };
6606 
6607 static event_ecounters_cfg_t event_ecounters_cfg_tbl[] = {
6608 	/* Interface specific event ecounters */
6609 	{WLC_E_DEAUTH_IND, ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_IF_EVENT_STATS},
6610 };
6611 
6612 /* Accepts an argument to -s, -g or -f and creates an XTLV */
6613 int
dhd_create_ecounters_params(dhd_pub_t * dhd,uint16 type,uint16 if_slice_idx,uint16 stats_rep,uint8 ** xtlv)6614 dhd_create_ecounters_params(dhd_pub_t *dhd, uint16 type, uint16 if_slice_idx,
6615 	uint16 stats_rep, uint8 **xtlv)
6616 {
6617 	uint8 *req_xtlv = NULL;
6618 	ecounters_stats_types_report_req_t *req;
6619 	bcm_xtlvbuf_t xtlvbuf, container_xtlvbuf;
6620 	ecountersv2_xtlv_list_elt_t temp;
6621 	uint16 xtlv_len = 0, total_len = 0;
6622 	int rc = BCME_OK;
6623 
6624 	/* fill in the stat type XTLV. For now there is no explicit TLV for the stat type. */
6625 	temp.id = stats_rep;
6626 	temp.len = 0;
6627 
6628 	/* Hence len/data = 0/NULL */
6629 	xtlv_len += temp.len + BCM_XTLV_HDR_SIZE;
6630 
6631 	/* Total length of the container */
6632 	total_len = BCM_XTLV_HDR_SIZE +
6633 		OFFSETOF(ecounters_stats_types_report_req_t, stats_types_req) + xtlv_len;
6634 
6635 	/* Now allocate a structure for the entire request */
6636 	if ((req_xtlv = (uint8 *)MALLOCZ(dhd->osh, total_len)) == NULL) {
6637 		rc = BCME_NOMEM;
6638 		goto fail;
6639 	}
6640 
6641 	/* container XTLV context */
6642 	bcm_xtlv_buf_init(&container_xtlvbuf, (uint8 *)req_xtlv, total_len,
6643 		BCM_XTLV_OPTION_ALIGN32);
6644 
6645 	/* Fill other XTLVs in the container. Leave space for XTLV headers */
6646 	req = (ecounters_stats_types_report_req_t *)(req_xtlv + BCM_XTLV_HDR_SIZE);
6647 	req->flags = type;
6648 	if (type == ECOUNTERS_STATS_TYPES_FLAG_SLICE) {
6649 		req->slice_mask = 0x1 << if_slice_idx;
6650 	} else if (type == ECOUNTERS_STATS_TYPES_FLAG_IFACE) {
6651 		req->if_index = if_slice_idx;
6652 	}
6653 
6654 	/* Fill remaining XTLVs */
6655 	bcm_xtlv_buf_init(&xtlvbuf, (uint8*) req->stats_types_req, xtlv_len,
6656 		BCM_XTLV_OPTION_ALIGN32);
6657 	if (bcm_xtlv_put_data(&xtlvbuf, temp.id, NULL, temp.len)) {
6658 		DHD_ERROR(("Error creating XTLV for requested stats type = %d\n", temp.id));
6659 		rc = BCME_ERROR;
6660 		goto fail;
6661 	}
6662 
6663 	/* fill the top level container and get done with the XTLV container */
6664 	rc = bcm_xtlv_put_data(&container_xtlvbuf, WL_ECOUNTERS_XTLV_REPORT_REQ, NULL,
6665 		bcm_xtlv_buf_len(&xtlvbuf) + OFFSETOF(ecounters_stats_types_report_req_t,
6666 		stats_types_req));
6667 
6668 	if (rc) {
6669 		DHD_ERROR(("Error creating parent XTLV for type = %d\n", req->flags));
6670 		goto fail;
6671 	}
6672 
6673 fail:
6674 	if (rc && req_xtlv) {
6675 		MFREE(dhd->osh, req_xtlv, total_len);
6676 		req_xtlv = NULL;
6677 	}
6678 
6679 	/* update the xtlv pointer */
6680 	*xtlv = req_xtlv;
6681 	return rc;
6682 }
6683 
6684 int
dhd_get_preserve_log_numbers(dhd_pub_t * dhd,uint32 * logset_mask)6685 dhd_get_preserve_log_numbers(dhd_pub_t *dhd, uint32 *logset_mask)
6686 {
6687 	wl_el_set_type_t logset_type, logset_op;
6688 	int ret = BCME_ERROR;
6689 	int i = 0, err = 0;
6690 
6691 	if (!dhd || !logset_mask)
6692 		return BCME_BADARG;
6693 
6694 	*logset_mask = 0;
6695 	memset(&logset_type, 0, sizeof(logset_type));
6696 	memset(&logset_op, 0, sizeof(logset_op));
6697 	logset_type.version = htod16(EVENT_LOG_SET_TYPE_CURRENT_VERSION);
6698 	logset_type.len = htod16(sizeof(wl_el_set_type_t));
6699 	for (i = 0; i < dhd->event_log_max_sets; i++) {
6700 		logset_type.set = i;
6701 		err = dhd_iovar(dhd, 0, "event_log_set_type", (char *)&logset_type,
6702 				sizeof(logset_type), (char *)&logset_op, sizeof(logset_op), FALSE);
6703 		/* the iovar may return 'unsupported' error if a log set number is not present
6704 		* in the fw, so we should not return on error !
6705 		*/
6706 		if (err == BCME_OK &&
6707 				logset_op.type == EVENT_LOG_SET_TYPE_PRSRV) {
6708 			*logset_mask |= 0x01u << i;
6709 			ret = BCME_OK;
6710 			DHD_ERROR(("[INIT] logset:%d is preserve/chatty\n", i));
6711 		}
6712 	}
6713 
6714 	return ret;
6715 }
6716 
6717 static int
dhd_ecounter_autoconfig(dhd_pub_t * dhd)6718 dhd_ecounter_autoconfig(dhd_pub_t *dhd)
6719 {
6720 	int rc = BCME_OK;
6721 	uint32 buf;
6722 	rc = dhd_iovar(dhd, 0, "ecounters_autoconfig", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
6723 
6724 	if (rc != BCME_OK) {
6725 
6726 		if (rc != BCME_UNSUPPORTED) {
6727 			rc = BCME_OK;
6728 			DHD_ERROR(("%s Ecounter autoconfig in fw failed : %d\n", __FUNCTION__, rc));
6729 		} else {
6730 			DHD_ERROR(("%s Ecounter autoconfig in FW not supported\n", __FUNCTION__));
6731 		}
6732 	}
6733 
6734 	return rc;
6735 }
6736 
6737 int
dhd_ecounter_configure(dhd_pub_t * dhd,bool enable)6738 dhd_ecounter_configure(dhd_pub_t *dhd, bool enable)
6739 {
6740 	int rc = BCME_OK;
6741 	if (enable) {
6742 		if (dhd_ecounter_autoconfig(dhd) != BCME_OK) {
6743 			if ((rc = dhd_start_ecounters(dhd)) != BCME_OK) {
6744 				DHD_ERROR(("%s Ecounters start failed\n", __FUNCTION__));
6745 			} else if ((rc = dhd_start_event_ecounters(dhd)) != BCME_OK) {
6746 				DHD_ERROR(("%s Event_Ecounters start failed\n", __FUNCTION__));
6747 			}
6748 		}
6749 	} else {
6750 		if ((rc = dhd_stop_ecounters(dhd)) != BCME_OK) {
6751 			DHD_ERROR(("%s Ecounters stop failed\n", __FUNCTION__));
6752 		} else if ((rc = dhd_stop_event_ecounters(dhd)) != BCME_OK) {
6753 			DHD_ERROR(("%s Event_Ecounters stop failed\n", __FUNCTION__));
6754 		}
6755 	}
6756 	return rc;
6757 }
6758 
6759 int
dhd_start_ecounters(dhd_pub_t * dhd)6760 dhd_start_ecounters(dhd_pub_t *dhd)
6761 {
6762 	uint8 i = 0;
6763 	uint8 *start_ptr;
6764 	int rc = BCME_OK;
6765 	bcm_xtlv_t *elt;
6766 	ecounters_config_request_v2_t *req = NULL;
6767 	ecountersv2_processed_xtlv_list_elt *list_elt, *tail = NULL;
6768 	ecountersv2_processed_xtlv_list_elt *processed_containers_list = NULL;
6769 	uint16 total_processed_containers_len = 0;
6770 
6771 	for (i = 0; i < ARRAYSIZE(ecounters_cfg_tbl); i++) {
6772 		ecounters_cfg_t *ecounter_stat = &ecounters_cfg_tbl[i];
6773 
6774 		if ((list_elt = (ecountersv2_processed_xtlv_list_elt *)
6775 			MALLOCZ(dhd->osh, sizeof(*list_elt))) == NULL) {
6776 			DHD_ERROR(("Ecounters v2: No memory to process\n"));
6777 			goto fail;
6778 		}
6779 
6780 		rc = dhd_create_ecounters_params(dhd, ecounter_stat->type,
6781 			ecounter_stat->if_slice_idx, ecounter_stat->stats_rep, &list_elt->data);
6782 
6783 		if (rc) {
6784 			DHD_ERROR(("Ecounters v2: Could not process: stat: %d return code: %d\n",
6785 				ecounter_stat->stats_rep, rc));
6786 
6787 			/* Free allocated memory and go to fail to release any memories allocated
6788 			 * in previous iterations. Note that list_elt->data gets populated in
6789 			 * dhd_create_ecounters_params() and gets freed there itself.
6790 			 */
6791 			MFREE(dhd->osh, list_elt, sizeof(*list_elt));
6792 			list_elt = NULL;
6793 			goto fail;
6794 		}
6795 		elt = (bcm_xtlv_t *) list_elt->data;
6796 
6797 		/* Put the elements in the order they are processed */
6798 		if (processed_containers_list == NULL) {
6799 			processed_containers_list = list_elt;
6800 		} else {
6801 			tail->next = list_elt;
6802 		}
6803 		tail = list_elt;
6804 		/* Size of the XTLV returned */
6805 		total_processed_containers_len += BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE;
6806 	}
6807 
6808 	/* Now create ecounters config request with totallength */
6809 	req = (ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req) +
6810 		total_processed_containers_len);
6811 
6812 	if (req == NULL) {
6813 		rc = BCME_NOMEM;
6814 		goto fail;
6815 	}
6816 
6817 	req->version = ECOUNTERS_VERSION_2;
6818 	req->logset = EVENT_LOG_SET_ECOUNTERS;
6819 	req->reporting_period = ECOUNTERS_DEFAULT_PERIOD;
6820 	req->num_reports = ECOUNTERS_NUM_REPORTS;
6821 	req->len = total_processed_containers_len +
6822 		OFFSETOF(ecounters_config_request_v2_t, ecounters_xtlvs);
6823 
6824 	/* Copy config */
6825 	start_ptr = req->ecounters_xtlvs;
6826 
6827 	/* Now go element by element in the list */
6828 	while (processed_containers_list) {
6829 		list_elt = processed_containers_list;
6830 
6831 		elt = (bcm_xtlv_t *)list_elt->data;
6832 
6833 		memcpy(start_ptr, list_elt->data, BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
6834 		start_ptr += (size_t)(BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
6835 		processed_containers_list = processed_containers_list->next;
6836 
6837 		/* Free allocated memories */
6838 		MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE);
6839 		MFREE(dhd->osh, list_elt, sizeof(*list_elt));
6840 	}
6841 
6842 	if ((rc = dhd_iovar(dhd, 0, "ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) {
6843 		DHD_ERROR(("failed to start ecounters\n"));
6844 	}
6845 
6846 fail:
6847 	if (req) {
6848 		MFREE(dhd->osh, req, sizeof(*req) + total_processed_containers_len);
6849 	}
6850 
6851 	/* Now go element by element in the list */
6852 	while (processed_containers_list) {
6853 		list_elt = processed_containers_list;
6854 		elt = (bcm_xtlv_t *)list_elt->data;
6855 		processed_containers_list = processed_containers_list->next;
6856 
6857 		/* Free allocated memories */
6858 		MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE);
6859 		MFREE(dhd->osh, list_elt, sizeof(*list_elt));
6860 	}
6861 	return rc;
6862 }
6863 
6864 int
dhd_stop_ecounters(dhd_pub_t * dhd)6865 dhd_stop_ecounters(dhd_pub_t *dhd)
6866 {
6867 	int rc = BCME_OK;
6868 	ecounters_config_request_v2_t *req;
6869 
6870 	/* Now create ecounters config request with totallength */
6871 	req = (ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req));
6872 
6873 	if (req == NULL) {
6874 		rc = BCME_NOMEM;
6875 		goto fail;
6876 	}
6877 
6878 	req->version = ECOUNTERS_VERSION_2;
6879 	req->len = OFFSETOF(ecounters_config_request_v2_t, ecounters_xtlvs);
6880 
6881 	if ((rc = dhd_iovar(dhd, 0, "ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) {
6882 		DHD_ERROR(("failed to stop ecounters\n"));
6883 	}
6884 
6885 fail:
6886 	if (req) {
6887 		MFREE(dhd->osh, req, sizeof(*req));
6888 	}
6889 	return rc;
6890 }
6891 
6892 /* configured event_id_array for event ecounters */
6893 typedef struct event_id_array {
6894 	uint8	event_id;
6895 	uint8	str_idx;
6896 } event_id_array_t;
6897 
6898 /* get event id array only from event_ecounters_cfg_tbl[] */
__dhd_event_ecounters_get_event_id_array(event_id_array_t * event_array)6899 static inline int __dhd_event_ecounters_get_event_id_array(event_id_array_t *event_array)
6900 {
6901 	uint8 i;
6902 	uint8 idx = 0;
6903 	int32 prev_evt_id = -1;
6904 
6905 	for (i = 0; i < (uint8)ARRAYSIZE(event_ecounters_cfg_tbl); i++) {
6906 		if (prev_evt_id != event_ecounters_cfg_tbl[i].event_id) {
6907 			if (prev_evt_id >= 0)
6908 				idx++;
6909 			event_array[idx].event_id = event_ecounters_cfg_tbl[i].event_id;
6910 			event_array[idx].str_idx = i;
6911 		}
6912 		prev_evt_id = event_ecounters_cfg_tbl[i].event_id;
6913 	}
6914 	return idx;
6915 }
6916 
6917 /* One event id has limit xtlv num to request based on wl_ifstats_xtlv_id * 2 interface */
6918 #define ECNTRS_MAX_XTLV_NUM (31 * 2)
6919 
6920 int
dhd_start_event_ecounters(dhd_pub_t * dhd)6921 dhd_start_event_ecounters(dhd_pub_t *dhd)
6922 {
6923 	uint8 i, j = 0;
6924 	uint8 event_id_cnt = 0;
6925 	uint16 processed_containers_len = 0;
6926 	uint16 max_xtlv_len = 0;
6927 	int rc = BCME_OK;
6928 	uint8 *ptr;
6929 	uint8 *data;
6930 	event_id_array_t *id_array;
6931 	bcm_xtlv_t *elt = NULL;
6932 	event_ecounters_config_request_v2_t *req = NULL;
6933 
6934 	id_array = (event_id_array_t *)MALLOCZ(dhd->osh, sizeof(event_id_array_t) *
6935 		ARRAYSIZE(event_ecounters_cfg_tbl));
6936 
6937 	if (id_array == NULL) {
6938 		rc = BCME_NOMEM;
6939 		goto fail;
6940 	}
6941 	event_id_cnt = __dhd_event_ecounters_get_event_id_array(id_array);
6942 
6943 	max_xtlv_len = ((BCM_XTLV_HDR_SIZE +
6944 		OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs)) *
6945 		ECNTRS_MAX_XTLV_NUM);
6946 
6947 	/* Now create ecounters config request with max allowed length */
6948 	req = (event_ecounters_config_request_v2_t *)MALLOCZ(dhd->osh,
6949 		sizeof(event_ecounters_config_request_v2_t *) + max_xtlv_len);
6950 
6951 	if (req == NULL) {
6952 		rc = BCME_NOMEM;
6953 		goto fail;
6954 	}
6955 
6956 	for (i = 0; i <= event_id_cnt; i++) {
6957 		/* req initialization by event id */
6958 		req->version = ECOUNTERS_VERSION_2;
6959 		req->logset = EVENT_LOG_SET_ECOUNTERS;
6960 		req->event_id = id_array[i].event_id;
6961 		req->flags = EVENT_ECOUNTERS_FLAGS_ADD;
6962 		req->len = 0;
6963 		processed_containers_len = 0;
6964 
6965 		/* Copy config */
6966 		ptr = req->ecounters_xtlvs;
6967 
6968 		for (j = id_array[i].str_idx; j < (uint8)ARRAYSIZE(event_ecounters_cfg_tbl); j++) {
6969 			event_ecounters_cfg_t *event_ecounter_stat = &event_ecounters_cfg_tbl[j];
6970 			if (id_array[i].event_id != event_ecounter_stat->event_id)
6971 				break;
6972 
6973 			rc = dhd_create_ecounters_params(dhd, event_ecounter_stat->type,
6974 				event_ecounter_stat->if_slice_idx, event_ecounter_stat->stats_rep,
6975 				&data);
6976 
6977 			if (rc) {
6978 				DHD_ERROR(("%s: Could not process: stat: %d return code: %d\n",
6979 					__FUNCTION__, event_ecounter_stat->stats_rep, rc));
6980 				goto fail;
6981 			}
6982 
6983 			elt = (bcm_xtlv_t *)data;
6984 
6985 			memcpy(ptr, elt, BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
6986 			ptr += (size_t)(BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
6987 			processed_containers_len += BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE;
6988 
6989 			/* Free allocated memories alloced by dhd_create_ecounters_params */
6990 			MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE);
6991 
6992 			if (processed_containers_len > max_xtlv_len) {
6993 				DHD_ERROR(("%s XTLV NUM IS OVERFLOWED THAN ALLOWED!!\n",
6994 					__FUNCTION__));
6995 				rc = BCME_BADLEN;
6996 				goto fail;
6997 			}
6998 		}
6999 
7000 		req->len = processed_containers_len +
7001 			OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs);
7002 
7003 		DHD_INFO(("%s req version %d logset %d event_id %d flags %d len %d\n",
7004 			__FUNCTION__, req->version, req->logset, req->event_id,
7005 			req->flags, req->len));
7006 
7007 		rc = dhd_iovar(dhd, 0, "event_ecounters", (char *)req, req->len, NULL, 0, TRUE);
7008 
7009 		if (rc < 0) {
7010 			DHD_ERROR(("failed to start event_ecounters(event id %d) with rc %d\n",
7011 				req->event_id, rc));
7012 			goto fail;
7013 		}
7014 	}
7015 
7016 fail:
7017 	/* Free allocated memories */
7018 	if (req) {
7019 		MFREE(dhd->osh, req, sizeof(event_ecounters_config_request_v2_t *) + max_xtlv_len);
7020 	}
7021 	if (id_array) {
7022 		MFREE(dhd->osh, id_array, sizeof(event_id_array_t) *
7023 			ARRAYSIZE(event_ecounters_cfg_tbl));
7024 	}
7025 
7026 	return rc;
7027 }
7028 
7029 int
dhd_stop_event_ecounters(dhd_pub_t * dhd)7030 dhd_stop_event_ecounters(dhd_pub_t *dhd)
7031 {
7032 	int rc = BCME_OK;
7033 	event_ecounters_config_request_v2_t *req;
7034 
7035 	/* Now create ecounters config request with totallength */
7036 	req = (event_ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req));
7037 
7038 	if (req == NULL) {
7039 		rc = BCME_NOMEM;
7040 		goto fail;
7041 	}
7042 
7043 	req->version = ECOUNTERS_VERSION_2;
7044 	req->flags = EVENT_ECOUNTERS_FLAGS_DEL_ALL;
7045 	req->len = OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs);
7046 
7047 	if ((rc = dhd_iovar(dhd, 0, "event_ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) {
7048 		DHD_ERROR(("failed to stop event_ecounters\n"));
7049 	}
7050 
7051 fail:
7052 	if (req) {
7053 		MFREE(dhd->osh, req, sizeof(*req));
7054 	}
7055 	return rc;
7056 }
7057 
7058 #ifdef DHD_LOG_DUMP
7059 int
dhd_dump_debug_ring(dhd_pub_t * dhdp,void * ring_ptr,const void * user_buf,log_dump_section_hdr_t * sec_hdr,char * text_hdr,int buflen,uint32 sec_type)7060 dhd_dump_debug_ring(dhd_pub_t *dhdp, void *ring_ptr, const void *user_buf,
7061 		log_dump_section_hdr_t *sec_hdr,
7062 		char *text_hdr, int buflen, uint32 sec_type)
7063 {
7064 	uint32 rlen = 0;
7065 	uint32 data_len = 0;
7066 	void *data = NULL;
7067 	unsigned long flags = 0;
7068 	int ret = 0;
7069 	dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)ring_ptr;
7070 	int pos = 0;
7071 	int fpos_sechdr = 0;
7072 
7073 	if (!dhdp || !ring || !user_buf || !sec_hdr || !text_hdr) {
7074 		return BCME_BADARG;
7075 	}
7076 	/* do not allow further writes to the ring
7077 	 * till we flush it
7078 	 */
7079 	DHD_DBG_RING_LOCK(ring->lock, flags);
7080 	ring->state = RING_SUSPEND;
7081 	DHD_DBG_RING_UNLOCK(ring->lock, flags);
7082 
7083 	if (dhdp->concise_dbg_buf) {
7084 		/* re-use concise debug buffer temporarily
7085 		 * to pull ring data, to write
7086 		 * record by record to file
7087 		 */
7088 		data_len = CONCISE_DUMP_BUFLEN;
7089 		data = dhdp->concise_dbg_buf;
7090 		ret = dhd_export_debug_data(text_hdr, NULL, user_buf, strlen(text_hdr), &pos);
7091 		/* write the section header now with zero length,
7092 		 * once the correct length is found out, update
7093 		 * it later
7094 		 */
7095 		fpos_sechdr = pos;
7096 		sec_hdr->type = sec_type;
7097 		sec_hdr->length = 0;
7098 		ret = dhd_export_debug_data((char *)sec_hdr, NULL, user_buf,
7099 			sizeof(*sec_hdr), &pos);
7100 		do {
7101 			rlen = dhd_dbg_ring_pull_single(ring, data, data_len, TRUE);
7102 			if (rlen > 0) {
7103 				/* write the log */
7104 				ret = dhd_export_debug_data(data, NULL, user_buf, rlen, &pos);
7105 			}
7106 			DHD_DBGIF(("%s: rlen : %d\n", __FUNCTION__, rlen));
7107 		} while ((rlen > 0));
7108 		/* now update the section header length in the file */
7109 		/* Complete ring size is dumped by HAL, hence updating length to ring size */
7110 		sec_hdr->length = ring->ring_size;
7111 		ret = dhd_export_debug_data((char *)sec_hdr, NULL, user_buf,
7112 			sizeof(*sec_hdr), &fpos_sechdr);
7113 	} else {
7114 		DHD_ERROR(("%s: No concise buffer available !\n", __FUNCTION__));
7115 	}
7116 	DHD_DBG_RING_LOCK(ring->lock, flags);
7117 	ring->state = RING_ACTIVE;
7118 	/* Resetting both read and write pointer,
7119 	 * since all items are read.
7120 	 */
7121 	ring->rp = ring->wp = 0;
7122 	DHD_DBG_RING_UNLOCK(ring->lock, flags);
7123 
7124 	return ret;
7125 }
7126 
7127 int
dhd_log_dump_ring_to_file(dhd_pub_t * dhdp,void * ring_ptr,void * file,unsigned long * file_posn,log_dump_section_hdr_t * sec_hdr,char * text_hdr,uint32 sec_type)7128 dhd_log_dump_ring_to_file(dhd_pub_t *dhdp, void *ring_ptr, void *file,
7129 		unsigned long *file_posn, log_dump_section_hdr_t *sec_hdr,
7130 		char *text_hdr, uint32 sec_type)
7131 {
7132 	uint32 rlen = 0;
7133 	uint32 data_len = 0, total_len = 0;
7134 	void *data = NULL;
7135 	unsigned long fpos_sechdr = 0;
7136 	unsigned long flags = 0;
7137 	int ret = 0;
7138 	dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)ring_ptr;
7139 
7140 	if (!dhdp || !ring || !file || !sec_hdr ||
7141 		!file_posn || !text_hdr)
7142 		return BCME_BADARG;
7143 
7144 	/* do not allow further writes to the ring
7145 	 * till we flush it
7146 	 */
7147 	DHD_DBG_RING_LOCK(ring->lock, flags);
7148 	ring->state = RING_SUSPEND;
7149 	DHD_DBG_RING_UNLOCK(ring->lock, flags);
7150 
7151 	if (dhdp->concise_dbg_buf) {
7152 		/* re-use concise debug buffer temporarily
7153 		 * to pull ring data, to write
7154 		 * record by record to file
7155 		 */
7156 		data_len = CONCISE_DUMP_BUFLEN;
7157 		data = dhdp->concise_dbg_buf;
7158 		dhd_os_write_file_posn(file, file_posn, text_hdr,
7159 				strlen(text_hdr));
7160 		/* write the section header now with zero length,
7161 		 * once the correct length is found out, update
7162 		 * it later
7163 		 */
7164 		dhd_init_sec_hdr(sec_hdr);
7165 		fpos_sechdr = *file_posn;
7166 		sec_hdr->type = sec_type;
7167 		sec_hdr->length = 0;
7168 		dhd_os_write_file_posn(file, file_posn, (char *)sec_hdr,
7169 				sizeof(*sec_hdr));
7170 		do {
7171 			rlen = dhd_dbg_ring_pull_single(ring, data, data_len, TRUE);
7172 			if (rlen > 0) {
7173 				/* write the log */
7174 				ret = dhd_os_write_file_posn(file, file_posn, data, rlen);
7175 				if (ret < 0) {
7176 					DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
7177 					DHD_DBG_RING_LOCK(ring->lock, flags);
7178 					ring->state = RING_ACTIVE;
7179 					DHD_DBG_RING_UNLOCK(ring->lock, flags);
7180 					return BCME_ERROR;
7181 				}
7182 			}
7183 			total_len += rlen;
7184 		} while (rlen > 0);
7185 		/* now update the section header length in the file */
7186 		sec_hdr->length = total_len;
7187 		dhd_os_write_file_posn(file, &fpos_sechdr, (char *)sec_hdr, sizeof(*sec_hdr));
7188 	} else {
7189 		DHD_ERROR(("%s: No concise buffer available !\n", __FUNCTION__));
7190 	}
7191 
7192 	DHD_DBG_RING_LOCK(ring->lock, flags);
7193 	ring->state = RING_ACTIVE;
7194 	/* Resetting both read and write pointer,
7195 	 * since all items are read.
7196 	 */
7197 	ring->rp = ring->wp = 0;
7198 	DHD_DBG_RING_UNLOCK(ring->lock, flags);
7199 	return BCME_OK;
7200 }
7201 
7202 /* logdump cookie */
7203 #define MAX_LOGUDMP_COOKIE_CNT	10u
7204 #define LOGDUMP_COOKIE_STR_LEN	50u
7205 int
dhd_logdump_cookie_init(dhd_pub_t * dhdp,uint8 * buf,uint32 buf_size)7206 dhd_logdump_cookie_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size)
7207 {
7208 	uint32 ring_size;
7209 
7210 	if (!dhdp || !buf) {
7211 		DHD_ERROR(("INVALID PTR: dhdp:%p buf:%p\n", dhdp, buf));
7212 		return BCME_ERROR;
7213 	}
7214 
7215 	ring_size = dhd_ring_get_hdr_size() + LOGDUMP_COOKIE_STR_LEN * MAX_LOGUDMP_COOKIE_CNT;
7216 	if (buf_size < ring_size) {
7217 		DHD_ERROR(("BUF SIZE IS TO SHORT: req:%d buf_size:%d\n",
7218 			ring_size, buf_size));
7219 		return BCME_ERROR;
7220 	}
7221 
7222 	dhdp->logdump_cookie = dhd_ring_init(dhdp, buf, buf_size,
7223 		LOGDUMP_COOKIE_STR_LEN, MAX_LOGUDMP_COOKIE_CNT,
7224 		DHD_RING_TYPE_FIXED);
7225 	if (!dhdp->logdump_cookie) {
7226 		DHD_ERROR(("FAIL TO INIT COOKIE RING\n"));
7227 		return BCME_ERROR;
7228 	}
7229 
7230 	return BCME_OK;
7231 }
7232 
7233 void
dhd_logdump_cookie_deinit(dhd_pub_t * dhdp)7234 dhd_logdump_cookie_deinit(dhd_pub_t *dhdp)
7235 {
7236 	if (!dhdp) {
7237 		return;
7238 	}
7239 	if (dhdp->logdump_cookie) {
7240 		dhd_ring_deinit(dhdp, dhdp->logdump_cookie);
7241 	}
7242 
7243 	return;
7244 }
7245 
7246 void
dhd_logdump_cookie_save(dhd_pub_t * dhdp,char * cookie,char * type)7247 dhd_logdump_cookie_save(dhd_pub_t *dhdp, char *cookie, char *type)
7248 {
7249 	char *ptr;
7250 
7251 	if (!dhdp || !cookie || !type || !dhdp->logdump_cookie) {
7252 		DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p cookie=%p"
7253 			" type = %p, cookie_cfg:%p\n", __FUNCTION__,
7254 			dhdp, cookie, type, dhdp?dhdp->logdump_cookie: NULL));
7255 		return;
7256 	}
7257 	ptr = (char *)dhd_ring_get_empty(dhdp->logdump_cookie);
7258 	if (ptr == NULL) {
7259 		DHD_ERROR(("%s : Skip to save due to locking\n", __FUNCTION__));
7260 		return;
7261 	}
7262 	scnprintf(ptr, LOGDUMP_COOKIE_STR_LEN, "%s: %s\n", type, cookie);
7263 	return;
7264 }
7265 
7266 int
dhd_logdump_cookie_get(dhd_pub_t * dhdp,char * ret_cookie,uint32 buf_size)7267 dhd_logdump_cookie_get(dhd_pub_t *dhdp, char *ret_cookie, uint32 buf_size)
7268 {
7269 	char *ptr;
7270 
7271 	if (!dhdp || !ret_cookie || !dhdp->logdump_cookie) {
7272 		DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p"
7273 			"cookie=%p cookie_cfg:%p\n", __FUNCTION__,
7274 			dhdp, ret_cookie, dhdp?dhdp->logdump_cookie: NULL));
7275 		return BCME_ERROR;
7276 	}
7277 	ptr = (char *)dhd_ring_get_first(dhdp->logdump_cookie);
7278 	if (ptr == NULL) {
7279 		DHD_ERROR(("%s : Skip to save due to locking\n", __FUNCTION__));
7280 		return BCME_ERROR;
7281 	}
7282 	memcpy(ret_cookie, ptr, MIN(buf_size, strlen(ptr)));
7283 	dhd_ring_free_first(dhdp->logdump_cookie);
7284 	return BCME_OK;
7285 }
7286 
7287 int
dhd_logdump_cookie_count(dhd_pub_t * dhdp)7288 dhd_logdump_cookie_count(dhd_pub_t *dhdp)
7289 {
7290 	if (!dhdp || !dhdp->logdump_cookie) {
7291 		DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p cookie=%p\n",
7292 			__FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie: NULL));
7293 		return 0;
7294 	}
7295 	return dhd_ring_get_cur_size(dhdp->logdump_cookie);
7296 }
7297 
7298 static inline int
__dhd_log_dump_cookie_to_file(dhd_pub_t * dhdp,void * fp,const void * user_buf,unsigned long * f_pos,char * buf,uint32 buf_size)7299 __dhd_log_dump_cookie_to_file(
7300 	dhd_pub_t *dhdp, void *fp, const void *user_buf, unsigned long *f_pos,
7301 	char *buf, uint32 buf_size)
7302 {
7303 
7304 	uint32 remain = buf_size;
7305 	int ret = BCME_ERROR;
7306 	char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
7307 	log_dump_section_hdr_t sec_hdr;
7308 	uint32 read_idx;
7309 	uint32 write_idx;
7310 
7311 	read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
7312 	write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
7313 	while (dhd_logdump_cookie_count(dhdp) > 0) {
7314 		memset(tmp_buf, 0, sizeof(tmp_buf));
7315 		ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
7316 		if (ret != BCME_OK) {
7317 			return ret;
7318 		}
7319 		remain -= scnprintf(&buf[buf_size - remain], remain, "%s", tmp_buf);
7320 	}
7321 	dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
7322 	dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
7323 
7324 	ret = dhd_export_debug_data(COOKIE_LOG_HDR, fp, user_buf, strlen(COOKIE_LOG_HDR), f_pos);
7325 	if (ret < 0) {
7326 		DHD_ERROR(("%s : Write file Error for cookie hdr\n", __FUNCTION__));
7327 		return ret;
7328 	}
7329 	sec_hdr.magic = LOG_DUMP_MAGIC;
7330 	sec_hdr.timestamp = local_clock();
7331 	sec_hdr.type = LOG_DUMP_SECTION_COOKIE;
7332 	sec_hdr.length = buf_size - remain;
7333 
7334 	ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), f_pos);
7335 	if (ret < 0) {
7336 		DHD_ERROR(("%s : Write file Error for section hdr\n", __FUNCTION__));
7337 		return ret;
7338 	}
7339 
7340 	ret = dhd_export_debug_data(buf, fp, user_buf, sec_hdr.length, f_pos);
7341 	if (ret < 0) {
7342 		DHD_ERROR(("%s : Write file Error for cookie data\n", __FUNCTION__));
7343 	}
7344 
7345 	return ret;
7346 }
7347 
7348 uint32
dhd_log_dump_cookie_len(dhd_pub_t * dhdp)7349 dhd_log_dump_cookie_len(dhd_pub_t *dhdp)
7350 {
7351 	int len = 0;
7352 	char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
7353 	log_dump_section_hdr_t sec_hdr;
7354 	char *buf = NULL;
7355 	int ret = BCME_ERROR;
7356 	uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
7357 	uint32 read_idx;
7358 	uint32 write_idx;
7359 	uint32 remain;
7360 
7361 	remain = buf_size;
7362 
7363 	if (!dhdp || !dhdp->logdump_cookie) {
7364 		DHD_ERROR(("%s At least one ptr is NULL "
7365 			"dhdp = %p cookie %p\n",
7366 			__FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL));
7367 		goto exit;
7368 	}
7369 
7370 	buf = (char *)MALLOCZ(dhdp->osh, buf_size);
7371 	if (!buf) {
7372 		DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
7373 		goto exit;
7374 	}
7375 
7376 	read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
7377 	write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
7378 	while (dhd_logdump_cookie_count(dhdp) > 0) {
7379 		memset(tmp_buf, 0, sizeof(tmp_buf));
7380 		ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
7381 		if (ret != BCME_OK) {
7382 			goto exit;
7383 		}
7384 		remain -= (uint32)strlen(tmp_buf);
7385 	}
7386 	dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
7387 	dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
7388 	len += strlen(COOKIE_LOG_HDR);
7389 	len += sizeof(sec_hdr);
7390 	len += (buf_size - remain);
7391 exit:
7392 	if (buf)
7393 		MFREE(dhdp->osh, buf, buf_size);
7394 	return len;
7395 }
7396 
7397 int
dhd_log_dump_cookie(dhd_pub_t * dhdp,const void * user_buf)7398 dhd_log_dump_cookie(dhd_pub_t *dhdp, const void *user_buf)
7399 {
7400 	int ret = BCME_ERROR;
7401 	char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
7402 	log_dump_section_hdr_t sec_hdr;
7403 	char *buf = NULL;
7404 	uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
7405 	int pos = 0;
7406 	uint32 read_idx;
7407 	uint32 write_idx;
7408 	uint32 remain;
7409 
7410 	remain = buf_size;
7411 
7412 	if (!dhdp || !dhdp->logdump_cookie) {
7413 		DHD_ERROR(("%s At least one ptr is NULL "
7414 			"dhdp = %p cookie %p\n",
7415 			__FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL));
7416 		goto exit;
7417 	}
7418 
7419 	buf = (char *)MALLOCZ(dhdp->osh, buf_size);
7420 	if (!buf) {
7421 		DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
7422 		goto exit;
7423 	}
7424 
7425 	read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
7426 	write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
7427 	while (dhd_logdump_cookie_count(dhdp) > 0) {
7428 		memset(tmp_buf, 0, sizeof(tmp_buf));
7429 		ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
7430 		if (ret != BCME_OK) {
7431 			goto exit;
7432 		}
7433 		remain -= scnprintf(&buf[buf_size - remain], remain, "%s", tmp_buf);
7434 	}
7435 	dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
7436 	dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
7437 	ret = dhd_export_debug_data(COOKIE_LOG_HDR, NULL, user_buf, strlen(COOKIE_LOG_HDR), &pos);
7438 	sec_hdr.magic = LOG_DUMP_MAGIC;
7439 	sec_hdr.timestamp = local_clock();
7440 	sec_hdr.type = LOG_DUMP_SECTION_COOKIE;
7441 	sec_hdr.length = buf_size - remain;
7442 	ret = dhd_export_debug_data((char *)&sec_hdr, NULL, user_buf, sizeof(sec_hdr), &pos);
7443 	ret = dhd_export_debug_data(buf, NULL, user_buf, sec_hdr.length, &pos);
7444 exit:
7445 	if (buf)
7446 		MFREE(dhdp->osh, buf, buf_size);
7447 	return ret;
7448 }
7449 
7450 int
dhd_log_dump_cookie_to_file(dhd_pub_t * dhdp,void * fp,const void * user_buf,unsigned long * f_pos)7451 dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp, const void *user_buf, unsigned long *f_pos)
7452 {
7453 	char *buf;
7454 	int ret = BCME_ERROR;
7455 	uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
7456 
7457 	if (!dhdp || !dhdp->logdump_cookie || (!fp && !user_buf) || !f_pos) {
7458 		DHD_ERROR(("%s At least one ptr is NULL "
7459 			"dhdp = %p cookie %p fp = %p f_pos = %p\n",
7460 			__FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL, fp, f_pos));
7461 		return ret;
7462 	}
7463 
7464 	buf = (char *)MALLOCZ(dhdp->osh, buf_size);
7465 	if (!buf) {
7466 		DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
7467 		return ret;
7468 	}
7469 	ret = __dhd_log_dump_cookie_to_file(dhdp, fp, user_buf, f_pos, buf, buf_size);
7470 	MFREE(dhdp->osh, buf, buf_size);
7471 
7472 	return ret;
7473 }
7474 
7475 #endif /* DHD_LOG_DUMP */
7476 
7477 #ifdef DHD_LOG_DUMP
7478 #define DEBUG_DUMP_TRIGGER_INTERVAL_SEC	4
7479 void
dhd_log_dump_trigger(dhd_pub_t * dhdp,int subcmd)7480 dhd_log_dump_trigger(dhd_pub_t *dhdp, int subcmd)
7481 {
7482 #if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
7483 	log_dump_type_t *flush_type;
7484 #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
7485 	uint64 current_time_sec;
7486 
7487 	if (!dhdp) {
7488 		DHD_ERROR(("dhdp is NULL !\n"));
7489 		return;
7490 	}
7491 
7492 	if (subcmd >= CMD_MAX || subcmd < CMD_DEFAULT) {
7493 		DHD_ERROR(("%s : Invalid subcmd \n", __FUNCTION__));
7494 		return;
7495 	}
7496 
7497 	current_time_sec = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
7498 
7499 	DHD_ERROR(("%s: current_time_sec=%lld debug_dump_time_sec=%lld interval=%d\n",
7500 		__FUNCTION__, current_time_sec, dhdp->debug_dump_time_sec,
7501 		DEBUG_DUMP_TRIGGER_INTERVAL_SEC));
7502 
7503 	if ((current_time_sec - dhdp->debug_dump_time_sec) < DEBUG_DUMP_TRIGGER_INTERVAL_SEC) {
7504 		DHD_ERROR(("%s : Last debug dump triggered(%lld) within %d seconds, so SKIP\n",
7505 			__FUNCTION__, dhdp->debug_dump_time_sec, DEBUG_DUMP_TRIGGER_INTERVAL_SEC));
7506 		return;
7507 	}
7508 
7509 	clear_debug_dump_time(dhdp->debug_dump_time_str);
7510 #ifdef DHD_PCIE_RUNTIMEPM
7511 	/* wake up RPM if SYSDUMP is triggered */
7512 	dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
7513 #endif /* DHD_PCIE_RUNTIMEPM */
7514 	/*  */
7515 	dhdp->debug_dump_subcmd = subcmd;
7516 
7517 	dhdp->debug_dump_time_sec = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
7518 
7519 #if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
7520 	/* flush_type is freed at do_dhd_log_dump function */
7521 	flush_type = MALLOCZ(dhdp->osh, sizeof(log_dump_type_t));
7522 	if (flush_type) {
7523 		*flush_type = DLD_BUF_TYPE_ALL;
7524 		dhd_schedule_log_dump(dhdp, flush_type);
7525 	} else {
7526 		DHD_ERROR(("%s Fail to malloc flush_type\n", __FUNCTION__));
7527 		return;
7528 	}
7529 #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
7530 
7531 	/* Inside dhd_mem_dump, event notification will be sent to HAL and
7532 	 * from other context DHD pushes memdump, debug_dump and pktlog dump
7533 	 * to HAL and HAL will write into file
7534 	 */
7535 #if (defined(BCMPCIE) || defined(BCMSDIO)) && defined(DHD_FW_COREDUMP)
7536 	dhdp->memdump_type = DUMP_TYPE_BY_SYSDUMP;
7537 	dhd_bus_mem_dump(dhdp);
7538 #endif /* BCMPCIE && DHD_FW_COREDUMP */
7539 
7540 #if defined(DHD_PKT_LOGGING) && defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
7541 	dhd_schedule_pktlog_dump(dhdp);
7542 #endif /* DHD_PKT_LOGGING && DHD_DUMP_FILE_WRITE_FROM_KERNEL */
7543 }
7544 #endif /* DHD_LOG_DUMP */
7545 
7546 #ifdef EWP_EDL
7547 /* For now we are allocating memory for EDL ring using DMA_ALLOC_CONSISTENT
7548 * The reason being that, in hikey, if we try to DMA_MAP prealloced memory
7549 * it is failing with an 'out of space in SWIOTLB' error
7550 */
7551 int
dhd_edl_mem_init(dhd_pub_t * dhd)7552 dhd_edl_mem_init(dhd_pub_t *dhd)
7553 {
7554 	int ret = 0;
7555 
7556 	memset(&dhd->edl_ring_mem, 0, sizeof(dhd->edl_ring_mem));
7557 	ret = dhd_dma_buf_alloc(dhd, &dhd->edl_ring_mem, DHD_EDL_RING_SIZE);
7558 	if (ret != BCME_OK) {
7559 		DHD_ERROR(("%s: alloc of edl_ring_mem failed\n",
7560 			__FUNCTION__));
7561 		return BCME_ERROR;
7562 	}
7563 	return BCME_OK;
7564 }
7565 
7566 /* NOTE:- that dhd_edl_mem_deinit need NOT be called explicitly, because the dma_buf
7567 * for EDL is freed during 'dhd_prot_detach_edl_rings' which is called during de-init.
7568 */
7569 void
dhd_edl_mem_deinit(dhd_pub_t * dhd)7570 dhd_edl_mem_deinit(dhd_pub_t *dhd)
7571 {
7572 	if (dhd->edl_ring_mem.va != NULL)
7573 		dhd_dma_buf_free(dhd, &dhd->edl_ring_mem);
7574 }
7575 
7576 int
dhd_event_logtrace_process_edl(dhd_pub_t * dhdp,uint8 * data,void * evt_decode_data)7577 dhd_event_logtrace_process_edl(dhd_pub_t *dhdp, uint8 *data,
7578 		void *evt_decode_data)
7579 {
7580 	msg_hdr_edl_t *msg = NULL;
7581 	cmn_msg_hdr_t *cmn_msg_hdr = NULL;
7582 	uint8 *buf = NULL;
7583 
7584 	if (!data || !dhdp || !evt_decode_data) {
7585 		DHD_ERROR(("%s: invalid args ! \n", __FUNCTION__));
7586 		return BCME_ERROR;
7587 	}
7588 
7589 	/* format of data in each work item in the EDL ring:
7590 	* |cmn_msg_hdr_t |payload (var len)|cmn_msg_hdr_t|
7591 	* payload = |infobuf_ver(u32)|info_buf_payload_hdr_t|msgtrace_hdr_t|<var len data>|
7592 	*/
7593 	cmn_msg_hdr = (cmn_msg_hdr_t *)data;
7594 	msg = (msg_hdr_edl_t *)(data + sizeof(cmn_msg_hdr_t));
7595 	buf = (uint8 *)msg;
7596 	/* validate the fields */
7597 	if (ltoh32(msg->infobuf_ver) != PCIE_INFOBUF_V1) {
7598 		DHD_ERROR(("%s: Skipping msg with invalid infobuf ver (0x%x)"
7599 			" expected (0x%x)\n", __FUNCTION__,
7600 			msg->infobuf_ver, PCIE_INFOBUF_V1));
7601 		return BCME_VERSION;
7602 	}
7603 
7604 	/* in EDL, the request_id field of cmn_msg_hdr is overloaded to carry payload length */
7605 	if (sizeof(info_buf_payload_hdr_t) > cmn_msg_hdr->request_id) {
7606 		DHD_ERROR(("%s: infobuf too small for v1 type/length fields\n",
7607 			__FUNCTION__));
7608 		return BCME_BUFTOOLONG;
7609 	}
7610 
7611 	if (ltoh16(msg->pyld_hdr.type) != PCIE_INFOBUF_V1_TYPE_LOGTRACE) {
7612 		DHD_ERROR(("%s: payload_hdr_type %d is not V1_TYPE_LOGTRACE\n",
7613 			__FUNCTION__, ltoh16(msg->pyld_hdr.type)));
7614 		return BCME_BADOPTION;
7615 	}
7616 
7617 	if (ltoh16(msg->pyld_hdr.length) > cmn_msg_hdr->request_id) {
7618 		DHD_ERROR(("%s: infobuf logtrace length %u is bigger"
7619 			" than available buffer size %u\n", __FUNCTION__,
7620 			ltoh16(msg->pyld_hdr.length), cmn_msg_hdr->request_id));
7621 		return BCME_BADLEN;
7622 	}
7623 
7624 	/* dhd_dbg_trace_evnt_handler expects the data to start from msgtrace_hdr_t */
7625 	buf += sizeof(msg->infobuf_ver) + sizeof(msg->pyld_hdr);
7626 	dhd_dbg_trace_evnt_handler(dhdp, buf, evt_decode_data,
7627 		ltoh16(msg->pyld_hdr.length));
7628 
7629 	/* check 'dhdp->logtrace_pkt_sendup' and if true alloc an skb
7630 	* copy the event data to the skb and send it up the stack
7631 	*/
7632 #ifdef BCMPCIE
7633 	if (dhdp->logtrace_pkt_sendup) {
7634 		DHD_INFO(("%s: send up event log, len %u bytes\n", __FUNCTION__,
7635 			(uint32)(ltoh16(msg->pyld_hdr.length) +
7636 			sizeof(info_buf_payload_hdr_t) + 4)));
7637 		dhd_sendup_info_buf(dhdp, (uint8 *)msg);
7638 	}
7639 #endif /* BCMPCIE */
7640 
7641 	return BCME_OK;
7642 }
7643 #endif /* EWP_EDL */
7644 
7645 #if defined(SHOW_LOGTRACE)
7646 int
dhd_print_fw_ver_from_file(dhd_pub_t * dhdp,char * fwpath)7647 dhd_print_fw_ver_from_file(dhd_pub_t *dhdp, char *fwpath)
7648 {
7649 	void *file = NULL;
7650 	int size = 0;
7651 	char buf[FW_VER_STR_LEN];
7652 	char *str = NULL;
7653 	int ret = BCME_OK;
7654 
7655 	if (!fwpath)
7656 		return BCME_BADARG;
7657 
7658 	file = dhd_os_open_image1(dhdp, fwpath);
7659 	if (!file) {
7660 		ret = BCME_ERROR;
7661 		goto exit;
7662 	}
7663 	size = dhd_os_get_image_size(file);
7664 	if (!size) {
7665 		ret = BCME_ERROR;
7666 		goto exit;
7667 	}
7668 
7669 	/* seek to the last 'X' bytes in the file */
7670 	if (dhd_os_seek_file(file, size - FW_VER_STR_LEN) != BCME_OK) {
7671 		ret = BCME_ERROR;
7672 		goto exit;
7673 	}
7674 
7675 	/* read the last 'X' bytes of the file to a buffer */
7676 	memset(buf, 0, FW_VER_STR_LEN);
7677 	if (dhd_os_get_image_block(buf, FW_VER_STR_LEN - 1, file) < 0) {
7678 		ret = BCME_ERROR;
7679 		goto exit;
7680 	}
7681 	/* search for 'Version' in the buffer */
7682 	str = bcmstrnstr(buf, FW_VER_STR_LEN, FW_VER_STR, strlen(FW_VER_STR));
7683 	if (!str) {
7684 		ret = BCME_ERROR;
7685 		goto exit;
7686 	}
7687 	/* go back in the buffer to the last ascii character */
7688 	while (str != buf &&
7689 		(*str >= ' ' && *str <= '~')) {
7690 		--str;
7691 	}
7692 	/* reverse the final decrement, so that str is pointing
7693 	* to the first ascii character in the buffer
7694 	*/
7695 	++str;
7696 
7697 	if (strlen(str) > (FW_VER_STR_LEN - 1)) {
7698 		ret = BCME_BADLEN;
7699 		goto exit;
7700 	}
7701 
7702 	DHD_ERROR(("FW version in file '%s': %s\n", fwpath, str));
7703 	/* copy to global variable, so that in case FW load fails, the
7704 	* core capture logs will contain FW version read from the file
7705 	*/
7706 	memset(fw_version, 0, FW_VER_STR_LEN);
7707 	strlcpy(fw_version, str, FW_VER_STR_LEN);
7708 
7709 exit:
7710 	if (file)
7711 		dhd_os_close_image1(dhdp, file);
7712 
7713 	return ret;
7714 }
7715 #endif // endif
7716 
7717 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
7718 
7719 /* Ignore compiler warnings due to -Werror=cast-qual */
7720 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
7721 #pragma GCC diagnostic push
7722 #pragma GCC diagnostic ignored "-Wcast-qual"
7723 #endif // endif
7724 
7725 static void
copy_hang_info_ioctl_timeout(dhd_pub_t * dhd,int ifidx,wl_ioctl_t * ioc)7726 copy_hang_info_ioctl_timeout(dhd_pub_t *dhd, int ifidx, wl_ioctl_t *ioc)
7727 {
7728 	int remain_len;
7729 	int i;
7730 	int *cnt;
7731 	char *dest;
7732 	int bytes_written;
7733 	uint32 ioc_dwlen = 0;
7734 
7735 	if (!dhd || !dhd->hang_info) {
7736 		DHD_ERROR(("%s dhd=%p hang_info=%p\n",
7737 			__FUNCTION__, dhd, (dhd ? dhd->hang_info : NULL)));
7738 		return;
7739 	}
7740 
7741 	cnt = &dhd->hang_info_cnt;
7742 	dest = dhd->hang_info;
7743 
7744 	memset(dest, 0, VENDOR_SEND_HANG_EXT_INFO_LEN);
7745 	(*cnt) = 0;
7746 
7747 	bytes_written = 0;
7748 	remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
7749 
7750 	get_debug_dump_time(dhd->debug_dump_time_hang_str);
7751 	copy_debug_dump_time(dhd->debug_dump_time_str, dhd->debug_dump_time_hang_str);
7752 
7753 	bytes_written += scnprintf(&dest[bytes_written], remain_len, "%d %d %s %d %d %d %d %d %d ",
7754 			HANG_REASON_IOCTL_RESP_TIMEOUT, VENDOR_SEND_HANG_EXT_INFO_VER,
7755 			dhd->debug_dump_time_hang_str,
7756 			ifidx, ioc->cmd, ioc->len, ioc->set, ioc->used, ioc->needed);
7757 	(*cnt) = HANG_FIELD_IOCTL_RESP_TIMEOUT_CNT;
7758 
7759 	clear_debug_dump_time(dhd->debug_dump_time_hang_str);
7760 
7761 	/* Access ioc->buf only if the ioc->len is more than 4 bytes */
7762 	ioc_dwlen = (uint32)(ioc->len / sizeof(uint32));
7763 	if (ioc_dwlen > 0) {
7764 		const uint32 *ioc_buf = (const uint32 *)ioc->buf;
7765 
7766 		remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
7767 		bytes_written += scnprintf(&dest[bytes_written], remain_len,
7768 			"%08x", *(uint32 *)(ioc_buf++));
7769 		(*cnt)++;
7770 		if ((*cnt) >= HANG_FIELD_CNT_MAX) {
7771 			return;
7772 		}
7773 
7774 		for (i = 1; i < ioc_dwlen && *cnt <= HANG_FIELD_CNT_MAX;
7775 			i++, (*cnt)++) {
7776 			remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
7777 			bytes_written += scnprintf(&dest[bytes_written], remain_len, "%c%08x",
7778 				HANG_RAW_DEL, *(uint32 *)(ioc_buf++));
7779 		}
7780 	}
7781 
7782 	DHD_INFO(("%s hang info len: %d data: %s\n",
7783 		__FUNCTION__, (int)strlen(dhd->hang_info), dhd->hang_info));
7784 }
7785 
7786 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
7787 #pragma GCC diagnostic pop
7788 #endif // endif
7789 
7790 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
7791 
7792 #if defined(DHD_H2D_LOG_TIME_SYNC)
7793 /*
7794  * Helper function:
7795  * Used for Dongle console message time syncing with Host printk
7796  */
dhd_h2d_log_time_sync(dhd_pub_t * dhd)7797 void dhd_h2d_log_time_sync(dhd_pub_t *dhd)
7798 {
7799 	uint64 ts;
7800 
7801 	/*
7802 	 * local_clock() returns time in nano seconds.
7803 	 * Dongle understand only milli seconds time.
7804 	 */
7805 	ts = local_clock();
7806 	/* Nano seconds to milli seconds */
7807 	do_div(ts, 1000000);
7808 	if (dhd_wl_ioctl_set_intiovar(dhd,  "rte_timesync", ts, WLC_SET_VAR, TRUE, 0)) {
7809 		DHD_ERROR(("%s rte_timesync **** FAILED ****\n", __FUNCTION__));
7810 		/* Stopping HOST Dongle console time syncing */
7811 		dhd->dhd_rte_time_sync_ms = 0;
7812 	}
7813 }
7814 #endif /* DHD_H2D_LOG_TIME_SYNC */
7815 
7816 #if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
7817 int
dhd_control_he_enab(dhd_pub_t * dhd,uint8 he_enab)7818 dhd_control_he_enab(dhd_pub_t * dhd, uint8 he_enab)
7819 {
7820 	int ret = BCME_OK;
7821 	bcm_xtlv_t *pxtlv = NULL;
7822 	uint8 mybuf[DHD_IOVAR_BUF_SIZE];
7823 	uint16 mybuf_len = sizeof(mybuf);
7824 	pxtlv = (bcm_xtlv_t *)mybuf;
7825 
7826 	ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, &mybuf_len, WL_HE_CMD_ENAB, sizeof(he_enab),
7827 			&he_enab, BCM_XTLV_OPTION_ALIGN32);
7828 
7829 	if (ret != BCME_OK) {
7830 		ret = -EINVAL;
7831 		DHD_ERROR(("%s failed to pack he enab, err: %s\n", __FUNCTION__, bcmerrorstr(ret)));
7832 		return ret;
7833 	}
7834 
7835 	ret = dhd_iovar(dhd, 0, "he", (char *)&mybuf, sizeof(mybuf), NULL, 0, TRUE);
7836 	if (ret < 0) {
7837 		DHD_ERROR(("%s he_enab (%d) set failed, err: %s\n",
7838 			__FUNCTION__, he_enab, bcmerrorstr(ret)));
7839 	} else {
7840 		DHD_ERROR(("%s he_enab (%d) set successed\n", __FUNCTION__, he_enab));
7841 	}
7842 
7843 	return ret;
7844 }
7845 #endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */
7846