1 /*
2 * Broadcom Dongle Host Driver (DHD), common DHD core.
3 *
4 * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation
5 *
6 * Copyright (C) 1999-2017, Broadcom Corporation
7 *
8 * Unless you and Broadcom execute a separate written software license
9 * agreement governing use of this software, this software is licensed to you
10 * under the terms of the GNU General Public License version 2 (the "GPL"),
11 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
12 * following added to such license:
13 *
14 * As a special exception, the copyright holders of this software give you
15 * permission to link this software with independent modules, and to copy and
16 * distribute the resulting executable under terms of your choice, provided that
17 * you also meet, for each linked independent module, the terms and conditions of
18 * the license of that module. An independent module is a module which is not
19 * derived from this software. The special exception does not apply to any
20 * modifications of the software.
21 *
22 * Notwithstanding the above, under no circumstances may you combine this
23 * software in any way with any other Broadcom software provided under a license
24 * other than the GPL, without Broadcom's express prior written consent.
25 *
26 *
27 * <<Broadcom-WL-IPTag/Open:>>
28 *
29 * $Id: dhd_common.c 701858 2017-05-26 20:20:58Z $
30 */
31 #include <typedefs.h>
32 #include <osl.h>
33
34 #include <epivers.h>
35 #include <bcmutils.h>
36 #include <bcmstdlib_s.h>
37
38 #include <bcmendian.h>
39 #include <dngl_stats.h>
40 #include <dhd.h>
41 #include <dhd_ip.h>
42 #include <bcmevent.h>
43 #include <dhdioctl.h>
44
45 #ifdef PCIE_FULL_DONGLE
46 #include <bcmmsgbuf.h>
47 #endif /* PCIE_FULL_DONGLE */
48
49 #ifdef SHOW_LOGTRACE
50 #include <event_log.h>
51 #endif /* SHOW_LOGTRACE */
52
53 #ifdef BCMPCIE
54 #include <dhd_flowring.h>
55 #endif // endif
56
57 #include <dhd_bus.h>
58 #include <dhd_proto.h>
59 #include <dhd_dbg.h>
60 #include <802.1d.h>
61 #include <dhd_debug.h>
62 #include <dhd_dbg_ring.h>
63 #include <dhd_mschdbg.h>
64 #include <msgtrace.h>
65
66 #ifdef WL_CFG80211
67 #include <wl_cfg80211.h>
68 #endif // endif
69 #if defined(OEM_ANDROID) && defined(PNO_SUPPORT)
70 #include <dhd_pno.h>
71 #endif /* OEM_ANDROID && PNO_SUPPORT */
72 #ifdef RTT_SUPPORT
73 #include <dhd_rtt.h>
74 #endif // endif
75
76 #ifdef DNGL_EVENT_SUPPORT
77 #include <dnglevent.h>
78 #endif // endif
79
80 #define htod32(i) (i)
81 #define htod16(i) (i)
82 #define dtoh32(i) (i)
83 #define dtoh16(i) (i)
84 #define htodchanspec(i) (i)
85 #define dtohchanspec(i) (i)
86
87 #ifdef PROP_TXSTATUS
88 #include <wlfc_proto.h>
89 #include <dhd_wlfc.h>
90 #endif // endif
91
92 #if defined(DHD_POST_EAPOL_M1_AFTER_ROAM_EVT)
93 #include <dhd_linux.h>
94 #endif // endif
95
96 #ifdef DHD_L2_FILTER
97 #include <dhd_l2_filter.h>
98 #endif /* DHD_L2_FILTER */
99
100 #ifdef DHD_PSTA
101 #include <dhd_psta.h>
102 #endif /* DHD_PSTA */
103
104 #ifdef DHD_WET
105 #include <dhd_wet.h>
106 #endif /* DHD_WET */
107
108 #ifdef DHD_LOG_DUMP
109 #include <dhd_dbg.h>
110 #ifdef DHD_PKT_LOGGING
111 #include <dhd_pktlog.h>
112 #endif /* DHD_PKT_LOGGING */
113 #endif /* DHD_LOG_DUMP */
114
115 #ifdef DHD_LOG_PRINT_RATE_LIMIT
116 int log_print_threshold = 0;
117 #endif /* DHD_LOG_PRINT_RATE_LIMIT */
118 int dhd_msg_level = DHD_ERROR_VAL | DHD_FWLOG_VAL | DHD_EVENT_VAL
119 /* For CUSTOMER_HW4 do not enable DHD_IOVAR_MEM_VAL by default */
120 #if !defined(BOARD_HIKEY)
121 | DHD_IOVAR_MEM_VAL
122 #endif // endif
123 #ifndef OEM_ANDROID
124 | DHD_MSGTRACE_VAL
125 #endif /* OEM_ANDROID */
126 | DHD_PKT_MON_VAL;
127
128 #if defined(OEM_ANDROID) && defined(WL_WIRELESS_EXT)
129 #include <wl_iw.h>
130 #endif /* defined(OEM_ANDROID) && defined(WL_WIRELESS_EXT) */
131
132 #ifdef DHD_ULP
133 #include <dhd_ulp.h>
134 #endif /* DHD_ULP */
135
136 #ifdef DHD_DEBUG
137 #include <sdiovar.h>
138 #endif /* DHD_DEBUG */
139
140 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
141 #include <linux/pm_runtime.h>
142 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
143
144 #ifdef SOFTAP
145 char fw_path2[MOD_PARAM_PATHLEN];
146 extern bool softap_enabled;
147 #endif // endif
148
149 #ifdef SHOW_LOGTRACE
150 #define BYTES_AHEAD_NUM 10 /* address in map file is before these many bytes */
151 #define READ_NUM_BYTES 1000 /* read map file each time this No. of bytes */
152 #define GO_BACK_FILE_POS_NUM_BYTES 100 /* set file pos back to cur pos */
153 static char *ramstart_str = " text_start"; /* string in mapfile has addr ramstart */
154 static char *rodata_start_str = " rodata_start"; /* string in mapfile has addr rodata start */
155 static char *rodata_end_str = " rodata_end"; /* string in mapfile has addr rodata end */
156 #define RAMSTART_BIT 0x01
157 #define RDSTART_BIT 0x02
158 #define RDEND_BIT 0x04
159 #define ALL_MAP_VAL (RAMSTART_BIT | RDSTART_BIT | RDEND_BIT)
160 #endif /* SHOW_LOGTRACE */
161
162 #ifdef SHOW_LOGTRACE
163 /* the fw file path is taken from either the module parameter at
164 * insmod time or is defined as a constant of different values
165 * for different platforms
166 */
167 extern char *st_str_file_path;
168 #endif /* SHOW_LOGTRACE */
169
170 #define DHD_TPUT_MAX_TX_PKTS_BATCH 1000
171
172 #ifdef EWP_EDL
173 typedef struct msg_hdr_edl {
174 uint32 infobuf_ver;
175 info_buf_payload_hdr_t pyld_hdr;
176 msgtrace_hdr_t trace_hdr;
177 } msg_hdr_edl_t;
178 #endif /* EWP_EDL */
179
180 /* Last connection success/failure status */
181 uint32 dhd_conn_event;
182 uint32 dhd_conn_status;
183 uint32 dhd_conn_reason;
184
185 extern int dhd_iscan_request(void * dhdp, uint16 action);
186 extern void dhd_ind_scan_confirm(void *h, bool status);
187 extern int dhd_iscan_in_progress(void *h);
188 void dhd_iscan_lock(void);
189 void dhd_iscan_unlock(void);
190 extern int dhd_change_mtu(dhd_pub_t *dhd, int new_mtu, int ifidx);
191 #if defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P)
192 extern int dhd_get_concurrent_capabilites(dhd_pub_t *dhd);
193 #endif // endif
194
195 extern int dhd_socram_dump(struct dhd_bus *bus);
196 extern void dhd_set_packet_filter(dhd_pub_t *dhd);
197
198 #ifdef DNGL_EVENT_SUPPORT
199 static void dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event,
200 bcm_dngl_event_msg_t *dngl_event, size_t pktlen);
201 static int dngl_host_event(dhd_pub_t *dhdp, void *pktdata, bcm_dngl_event_msg_t *dngl_event,
202 size_t pktlen);
203 #endif /* DNGL_EVENT_SUPPORT */
204
205 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
206 static void copy_hang_info_ioctl_timeout(dhd_pub_t *dhd, int ifidx, wl_ioctl_t *ioc);
207 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
208
209 #define MAX_CHUNK_LEN 1408 /* 8 * 8 * 22 */
210
211 #if defined(OEM_ANDROID)
212 bool ap_cfg_running = FALSE;
213 bool ap_fw_loaded = FALSE;
214 #endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
215
216 /* Version string to report */
217 #ifdef DHD_DEBUG
218 #ifndef SRCBASE
219 #define SRCBASE "drivers/net/wireless/bcmdhd"
220 #endif // endif
221 #define DHD_COMPILED "\nCompiled in " SRCBASE
222 #endif /* DHD_DEBUG */
223
224 #define CHIPID_MISMATCH 8
225
226 #if defined(DHD_DEBUG)
227 const char dhd_version[] = "Dongle Host Driver, version " EPI_VERSION_STR
228 DHD_COMPILED " on " __DATE__ " at " __TIME__;
229 #else
230 const char dhd_version[] = "\nDongle Host Driver, version " EPI_VERSION_STR "\nCompiled from ";
231 #endif // endif
232 char fw_version[FW_VER_STR_LEN] = "\0";
233 char clm_version[CLM_VER_STR_LEN] = "\0";
234
235 char bus_api_revision[BUS_API_REV_STR_LEN] = "\0";
236
237 void dhd_set_timer(void *bus, uint wdtick);
238
239 static char* ioctl2str(uint32 ioctl);
240
241 /* IOVar table */
242 enum {
243 IOV_VERSION = 1,
244 IOV_MSGLEVEL,
245 IOV_BCMERRORSTR,
246 IOV_BCMERROR,
247 IOV_WDTICK,
248 IOV_DUMP,
249 IOV_CLEARCOUNTS,
250 IOV_LOGDUMP,
251 IOV_LOGCAL,
252 IOV_LOGSTAMP,
253 IOV_GPIOOB,
254 IOV_IOCTLTIMEOUT,
255 IOV_CONS,
256 IOV_DCONSOLE_POLL,
257 #if defined(DHD_DEBUG)
258 IOV_DHD_JOIN_TIMEOUT_DBG,
259 IOV_SCAN_TIMEOUT,
260 IOV_MEM_DEBUG,
261 #ifdef BCMPCIE
262 IOV_FLOW_RING_DEBUG,
263 #endif /* BCMPCIE */
264 #endif /* defined(DHD_DEBUG) */
265 #ifdef PROP_TXSTATUS
266 IOV_PROPTXSTATUS_ENABLE,
267 IOV_PROPTXSTATUS_MODE,
268 IOV_PROPTXSTATUS_OPT,
269 IOV_PROPTXSTATUS_MODULE_IGNORE,
270 IOV_PROPTXSTATUS_CREDIT_IGNORE,
271 IOV_PROPTXSTATUS_TXSTATUS_IGNORE,
272 IOV_PROPTXSTATUS_RXPKT_CHK,
273 #endif /* PROP_TXSTATUS */
274 IOV_BUS_TYPE,
275 IOV_CHANGEMTU,
276 IOV_HOSTREORDER_FLOWS,
277 #ifdef DHDTCPACK_SUPPRESS
278 IOV_TCPACK_SUPPRESS,
279 #endif /* DHDTCPACK_SUPPRESS */
280 IOV_AP_ISOLATE,
281 #ifdef DHD_L2_FILTER
282 IOV_DHCP_UNICAST,
283 IOV_BLOCK_PING,
284 IOV_PROXY_ARP,
285 IOV_GRAT_ARP,
286 IOV_BLOCK_TDLS,
287 #endif /* DHD_L2_FILTER */
288 IOV_DHD_IE,
289 #ifdef DHD_PSTA
290 IOV_PSTA,
291 #endif /* DHD_PSTA */
292 #ifdef DHD_WET
293 IOV_WET,
294 IOV_WET_HOST_IPV4,
295 IOV_WET_HOST_MAC,
296 #endif /* DHD_WET */
297 IOV_CFG80211_OPMODE,
298 IOV_ASSERT_TYPE,
299 IOV_LMTEST,
300 #ifdef DHD_MCAST_REGEN
301 IOV_MCAST_REGEN_BSS_ENABLE,
302 #endif // endif
303 #ifdef SHOW_LOGTRACE
304 IOV_DUMP_TRACE_LOG,
305 #endif /* SHOW_LOGTRACE */
306 IOV_DONGLE_TRAP_TYPE,
307 IOV_DONGLE_TRAP_INFO,
308 IOV_BPADDR,
309 IOV_DUMP_DONGLE, /**< dumps core registers and d11 memories */
310 #if defined(DHD_LOG_DUMP)
311 IOV_LOG_DUMP,
312 #endif /* DHD_LOG_DUMP */
313 IOV_TPUT_TEST,
314 IOV_FIS_TRIGGER,
315 IOV_DEBUG_BUF_DEST_STAT,
316 #ifdef DHD_DEBUG
317 IOV_INDUCE_ERROR,
318 #endif /* DHD_DEBUG */
319 #ifdef WL_IFACE_MGMT_CONF
320 #ifdef WL_CFG80211
321 #ifdef WL_NANP2P
322 IOV_CONC_DISC,
323 #endif /* WL_NANP2P */
324 #ifdef WL_IFACE_MGMT
325 IOV_IFACE_POLICY,
326 #endif /* WL_IFACE_MGMT */
327 #endif /* WL_CFG80211 */
328 #endif /* WL_IFACE_MGMT_CONF */
329 IOV_LAST
330 };
331
332 const bcm_iovar_t dhd_iovars[] = {
333 /* name varid flags flags2 type minlen */
334 {"version", IOV_VERSION, 0, 0, IOVT_BUFFER, sizeof(dhd_version)},
335 #ifdef DHD_DEBUG
336 {"msglevel", IOV_MSGLEVEL, 0, 0, IOVT_UINT32, 0},
337 {"mem_debug", IOV_MEM_DEBUG, 0, 0, IOVT_BUFFER, 0 },
338 #ifdef BCMPCIE
339 {"flow_ring_debug", IOV_FLOW_RING_DEBUG, 0, 0, IOVT_BUFFER, 0 },
340 #endif /* BCMPCIE */
341 #endif /* DHD_DEBUG */
342 {"bcmerrorstr", IOV_BCMERRORSTR, 0, 0, IOVT_BUFFER, BCME_STRLEN},
343 {"bcmerror", IOV_BCMERROR, 0, 0, IOVT_INT8, 0},
344 {"wdtick", IOV_WDTICK, 0, 0, IOVT_UINT32, 0},
345 {"dump", IOV_DUMP, 0, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN},
346 {"cons", IOV_CONS, 0, 0, IOVT_BUFFER, 0},
347 {"dconpoll", IOV_DCONSOLE_POLL, 0, 0, IOVT_UINT32, 0},
348 {"clearcounts", IOV_CLEARCOUNTS, 0, 0, IOVT_VOID, 0},
349 {"gpioob", IOV_GPIOOB, 0, 0, IOVT_UINT32, 0},
350 {"ioctl_timeout", IOV_IOCTLTIMEOUT, 0, 0, IOVT_UINT32, 0},
351 #ifdef PROP_TXSTATUS
352 {"proptx", IOV_PROPTXSTATUS_ENABLE, 0, 0, IOVT_BOOL, 0 },
353 /*
354 set the proptxtstatus operation mode:
355 0 - Do not do any proptxtstatus flow control
356 1 - Use implied credit from a packet status
357 2 - Use explicit credit
358 */
359 {"ptxmode", IOV_PROPTXSTATUS_MODE, 0, 0, IOVT_UINT32, 0 },
360 {"proptx_opt", IOV_PROPTXSTATUS_OPT, 0, 0, IOVT_UINT32, 0 },
361 {"pmodule_ignore", IOV_PROPTXSTATUS_MODULE_IGNORE, 0, 0, IOVT_BOOL, 0 },
362 {"pcredit_ignore", IOV_PROPTXSTATUS_CREDIT_IGNORE, 0, 0, IOVT_BOOL, 0 },
363 {"ptxstatus_ignore", IOV_PROPTXSTATUS_TXSTATUS_IGNORE, 0, 0, IOVT_BOOL, 0 },
364 {"rxpkt_chk", IOV_PROPTXSTATUS_RXPKT_CHK, 0, 0, IOVT_BOOL, 0 },
365 #endif /* PROP_TXSTATUS */
366 {"bustype", IOV_BUS_TYPE, 0, 0, IOVT_UINT32, 0},
367 {"changemtu", IOV_CHANGEMTU, 0, 0, IOVT_UINT32, 0 },
368 {"host_reorder_flows", IOV_HOSTREORDER_FLOWS, 0, 0, IOVT_BUFFER,
369 (WLHOST_REORDERDATA_MAXFLOWS + 1) },
370 #ifdef DHDTCPACK_SUPPRESS
371 {"tcpack_suppress", IOV_TCPACK_SUPPRESS, 0, 0, IOVT_UINT8, 0 },
372 #endif /* DHDTCPACK_SUPPRESS */
373 #ifdef DHD_L2_FILTER
374 {"dhcp_unicast", IOV_DHCP_UNICAST, (0), 0, IOVT_BOOL, 0 },
375 #endif /* DHD_L2_FILTER */
376 {"ap_isolate", IOV_AP_ISOLATE, (0), 0, IOVT_BOOL, 0},
377 #ifdef DHD_L2_FILTER
378 {"block_ping", IOV_BLOCK_PING, (0), 0, IOVT_BOOL, 0},
379 {"proxy_arp", IOV_PROXY_ARP, (0), 0, IOVT_BOOL, 0},
380 {"grat_arp", IOV_GRAT_ARP, (0), 0, IOVT_BOOL, 0},
381 {"block_tdls", IOV_BLOCK_TDLS, (0), IOVT_BOOL, 0},
382 #endif /* DHD_L2_FILTER */
383 {"dhd_ie", IOV_DHD_IE, (0), 0, IOVT_BUFFER, 0},
384 #ifdef DHD_PSTA
385 /* PSTA/PSR Mode configuration. 0: DIABLED 1: PSTA 2: PSR */
386 {"psta", IOV_PSTA, 0, 0, IOVT_UINT32, 0},
387 #endif /* DHD PSTA */
388 #ifdef DHD_WET
389 /* WET Mode configuration. 0: DIABLED 1: WET */
390 {"wet", IOV_WET, 0, 0, IOVT_UINT32, 0},
391 {"wet_host_ipv4", IOV_WET_HOST_IPV4, 0, 0, IOVT_UINT32, 0},
392 {"wet_host_mac", IOV_WET_HOST_MAC, 0, 0, IOVT_BUFFER, 0},
393 #endif /* DHD WET */
394 {"op_mode", IOV_CFG80211_OPMODE, 0, 0, IOVT_UINT32, 0 },
395 {"assert_type", IOV_ASSERT_TYPE, (0), 0, IOVT_UINT32, 0},
396 {"lmtest", IOV_LMTEST, 0, 0, IOVT_UINT32, 0 },
397 #ifdef DHD_MCAST_REGEN
398 {"mcast_regen_bss_enable", IOV_MCAST_REGEN_BSS_ENABLE, 0, 0, IOVT_BOOL, 0},
399 #endif // endif
400 #ifdef SHOW_LOGTRACE
401 {"dump_trace_buf", IOV_DUMP_TRACE_LOG, 0, 0, IOVT_BUFFER, sizeof(trace_buf_info_t) },
402 #endif /* SHOW_LOGTRACE */
403 {"trap_type", IOV_DONGLE_TRAP_TYPE, 0, 0, IOVT_UINT32, 0 },
404 {"trap_info", IOV_DONGLE_TRAP_INFO, 0, 0, IOVT_BUFFER, sizeof(trap_t) },
405 #ifdef DHD_DEBUG
406 {"bpaddr", IOV_BPADDR, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
407 #endif /* DHD_DEBUG */
408 {"dump_dongle", IOV_DUMP_DONGLE, 0, 0, IOVT_BUFFER,
409 MAX(sizeof(dump_dongle_in_t), sizeof(dump_dongle_out_t)) },
410 #if defined(DHD_LOG_DUMP)
411 {"log_dump", IOV_LOG_DUMP, 0, 0, IOVT_UINT8, 0},
412 #endif /* DHD_LOG_DUMP */
413 #ifndef OEM_ANDROID
414 {"tput_test", IOV_TPUT_TEST, 0, 0, IOVT_BUFFER, sizeof(tput_test_t)},
415 #endif // endif
416 {"debug_buf_dest_stat", IOV_DEBUG_BUF_DEST_STAT, 0, 0, IOVT_UINT32, 0 },
417 #ifdef DHD_DEBUG
418 {"induce_error", IOV_INDUCE_ERROR, (0), 0, IOVT_UINT16, 0 },
419 #endif /* DHD_DEBUG */
420 #ifdef WL_IFACE_MGMT_CONF
421 #ifdef WL_CFG80211
422 #ifdef WL_NANP2P
423 {"conc_disc", IOV_CONC_DISC, (0), 0, IOVT_UINT16, 0 },
424 #endif /* WL_NANP2P */
425 #ifdef WL_IFACE_MGMT
426 {"if_policy", IOV_IFACE_POLICY, (0), 0, IOVT_BUFFER, sizeof(iface_mgmt_data_t)},
427 #endif /* WL_IFACE_MGMT */
428 #endif /* WL_CFG80211 */
429 #endif /* WL_IFACE_MGMT_CONF */
430 {NULL, 0, 0, 0, 0, 0 }
431 };
432
433 #define DHD_IOVAR_BUF_SIZE 128
434
435 bool
dhd_query_bus_erros(dhd_pub_t * dhdp)436 dhd_query_bus_erros(dhd_pub_t *dhdp)
437 {
438 bool ret = FALSE;
439
440 if (dhdp->dongle_reset) {
441 DHD_ERROR_RLMT(("%s: Dongle Reset occurred, cannot proceed\n",
442 __FUNCTION__));
443 ret = TRUE;
444 }
445
446 if (dhdp->dongle_trap_occured) {
447 DHD_ERROR_RLMT(("%s: FW TRAP has occurred, cannot proceed\n",
448 __FUNCTION__));
449 ret = TRUE;
450 #ifdef OEM_ANDROID
451 dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
452 dhd_os_send_hang_message(dhdp);
453 #endif /* OEM_ANDROID */
454 }
455
456 if (dhdp->iovar_timeout_occured) {
457 DHD_ERROR_RLMT(("%s: Resumed on timeout for previous IOVAR, cannot proceed\n",
458 __FUNCTION__));
459 ret = TRUE;
460 }
461
462 #ifdef PCIE_FULL_DONGLE
463 if (dhdp->d3ack_timeout_occured) {
464 DHD_ERROR_RLMT(("%s: Resumed on timeout for previous D3ACK, cannot proceed\n",
465 __FUNCTION__));
466 ret = TRUE;
467 }
468 if (dhdp->livelock_occured) {
469 DHD_ERROR_RLMT(("%s: LIVELOCK occurred for previous msg, cannot proceed\n",
470 __FUNCTION__));
471 ret = TRUE;
472 }
473
474 if (dhdp->pktid_audit_failed) {
475 DHD_ERROR_RLMT(("%s: pktid_audit_failed, cannot proceed\n",
476 __FUNCTION__));
477 ret = TRUE;
478 }
479 #endif /* PCIE_FULL_DONGLE */
480
481 if (dhdp->iface_op_failed) {
482 DHD_ERROR_RLMT(("%s: iface_op_failed, cannot proceed\n",
483 __FUNCTION__));
484 ret = TRUE;
485 }
486
487 if (dhdp->scan_timeout_occurred) {
488 DHD_ERROR_RLMT(("%s: scan_timeout_occurred, cannot proceed\n",
489 __FUNCTION__));
490 ret = TRUE;
491 }
492
493 if (dhdp->scan_busy_occurred) {
494 DHD_ERROR_RLMT(("%s: scan_busy_occurred, cannot proceed\n",
495 __FUNCTION__));
496 ret = TRUE;
497 }
498
499 #ifdef DNGL_AXI_ERROR_LOGGING
500 if (dhdp->axi_error) {
501 DHD_ERROR_RLMT(("%s: AXI error occurred, cannot proceed\n",
502 __FUNCTION__));
503 ret = TRUE;
504 }
505 #endif /* DNGL_AXI_ERROR_LOGGING */
506
507 if (dhd_bus_get_linkdown(dhdp)) {
508 DHD_ERROR_RLMT(("%s : PCIE Link down occurred, cannot proceed\n",
509 __FUNCTION__));
510 ret = TRUE;
511 }
512
513 if (dhd_bus_get_cto(dhdp)) {
514 DHD_ERROR_RLMT(("%s : CTO Recovery reported, cannot proceed\n",
515 __FUNCTION__));
516 ret = TRUE;
517 }
518
519 return ret;
520 }
521
522 void
dhd_clear_bus_errors(dhd_pub_t * dhdp)523 dhd_clear_bus_errors(dhd_pub_t *dhdp)
524 {
525 if (!dhdp)
526 return;
527
528 dhdp->dongle_reset = FALSE;
529 dhdp->dongle_trap_occured = FALSE;
530 dhdp->iovar_timeout_occured = FALSE;
531 #ifdef PCIE_FULL_DONGLE
532 dhdp->d3ack_timeout_occured = FALSE;
533 dhdp->livelock_occured = FALSE;
534 dhdp->pktid_audit_failed = FALSE;
535 #endif // endif
536 dhdp->iface_op_failed = FALSE;
537 dhdp->scan_timeout_occurred = FALSE;
538 dhdp->scan_busy_occurred = FALSE;
539 }
540
541 #ifdef DHD_SSSR_DUMP
542
543 /* This can be overwritten by module parameter defined in dhd_linux.c */
544 uint support_sssr_dump = TRUE;
545
546 int
dhd_sssr_mempool_init(dhd_pub_t * dhd)547 dhd_sssr_mempool_init(dhd_pub_t *dhd)
548 {
549 dhd->sssr_mempool = (uint8 *) MALLOCZ(dhd->osh, DHD_SSSR_MEMPOOL_SIZE);
550 if (dhd->sssr_mempool == NULL) {
551 DHD_ERROR(("%s: MALLOC of sssr_mempool failed\n",
552 __FUNCTION__));
553 return BCME_ERROR;
554 }
555 return BCME_OK;
556 }
557
558 void
dhd_sssr_mempool_deinit(dhd_pub_t * dhd)559 dhd_sssr_mempool_deinit(dhd_pub_t *dhd)
560 {
561 if (dhd->sssr_mempool) {
562 MFREE(dhd->osh, dhd->sssr_mempool, DHD_SSSR_MEMPOOL_SIZE);
563 dhd->sssr_mempool = NULL;
564 }
565 }
566
567 void
dhd_dump_sssr_reg_info(sssr_reg_info_v1_t * sssr_reg_info)568 dhd_dump_sssr_reg_info(sssr_reg_info_v1_t *sssr_reg_info)
569 {
570 #ifdef DHD_PCIE_REG_ACCESS
571 int i, j;
572 DHD_ERROR(("************** SSSR REG INFO start ****************\n"));
573 DHD_ERROR(("pmu_regs\n"));
574 DHD_ERROR(("pmuintmask0=0x%x pmuintmask1=0x%x resreqtimer=0x%x "
575 "macresreqtimer=0x%x macresreqtimer1=0x%x\n",
576 sssr_reg_info->pmu_regs.base_regs.pmuintmask0,
577 sssr_reg_info->pmu_regs.base_regs.pmuintmask1,
578 sssr_reg_info->pmu_regs.base_regs.resreqtimer,
579 sssr_reg_info->pmu_regs.base_regs.macresreqtimer,
580 sssr_reg_info->pmu_regs.base_regs.macresreqtimer1));
581 DHD_ERROR(("chipcommon_regs\n"));
582 DHD_ERROR(("intmask=0x%x powerctrl=0x%x clockcontrolstatus=0x%x powerctrl_mask=0x%x\n",
583 sssr_reg_info->chipcommon_regs.base_regs.intmask,
584 sssr_reg_info->chipcommon_regs.base_regs.powerctrl,
585 sssr_reg_info->chipcommon_regs.base_regs.clockcontrolstatus,
586 sssr_reg_info->chipcommon_regs.base_regs.powerctrl_mask));
587 DHD_ERROR(("arm_regs\n"));
588 DHD_ERROR(("clockcontrolstatus=0x%x clockcontrolstatus_val=0x%x"
589 " resetctrl=0x%x itopoobb=0x%x\n",
590 sssr_reg_info->arm_regs.base_regs.clockcontrolstatus,
591 sssr_reg_info->arm_regs.base_regs.clockcontrolstatus_val,
592 sssr_reg_info->arm_regs.wrapper_regs.resetctrl,
593 sssr_reg_info->arm_regs.wrapper_regs.itopoobb));
594 DHD_ERROR(("pcie_regs\n"));
595 DHD_ERROR(("ltrstate=0x%x clockcontrolstatus=0x%x "
596 "clockcontrolstatus_val=0x%x itopoobb=0x%x\n",
597 sssr_reg_info->pcie_regs.base_regs.ltrstate,
598 sssr_reg_info->pcie_regs.base_regs.clockcontrolstatus,
599 sssr_reg_info->pcie_regs.base_regs.clockcontrolstatus_val,
600 sssr_reg_info->pcie_regs.wrapper_regs.itopoobb));
601 DHD_ERROR(("vasip_regs\n"));
602 DHD_ERROR(("ioctrl=0x%x vasip_sr_addr=0x%x vasip_sr_size=0x%x\n",
603 sssr_reg_info->vasip_regs.wrapper_regs.ioctrl,
604 sssr_reg_info->vasip_regs.vasip_sr_addr,
605 sssr_reg_info->vasip_regs.vasip_sr_size));
606
607 for (i = 0; i < MAX_NUM_D11CORES; i++) {
608 DHD_ERROR(("mac_regs core[%d]\n", i));
609 DHD_ERROR(("xmtaddress=0x%x xmtdata=0x%x clockcontrolstatus=0x%x "
610 "clockcontrolstatus_val=0x%x\n",
611 sssr_reg_info->mac_regs[i].base_regs.xmtaddress,
612 sssr_reg_info->mac_regs[i].base_regs.xmtdata,
613 sssr_reg_info->mac_regs[i].base_regs.clockcontrolstatus,
614 sssr_reg_info->mac_regs[i].base_regs.clockcontrolstatus_val));
615 DHD_ERROR(("resetctrl=0x%x itopoobb=0x%x ioctrl=0x%x\n",
616 sssr_reg_info->mac_regs[i].wrapper_regs.resetctrl,
617 sssr_reg_info->mac_regs[i].wrapper_regs.itopoobb,
618 sssr_reg_info->mac_regs[i].wrapper_regs.ioctrl));
619 for (j = 0; j < SSSR_D11_RESET_SEQ_STEPS; j++) {
620 DHD_ERROR(("ioctrl_resetseq_val[%d] 0x%x\n", j,
621 sssr_reg_info->mac_regs[i].wrapper_regs.ioctrl_resetseq_val[j]));
622 }
623 DHD_ERROR(("sr_size=0x%x\n", sssr_reg_info->mac_regs[i].sr_size));
624 }
625 DHD_ERROR(("************** SSSR REG INFO end ****************\n"));
626 #endif /* DHD_PCIE_REG_ACCESS */
627 }
628
629 int
dhd_get_sssr_reg_info(dhd_pub_t * dhd)630 dhd_get_sssr_reg_info(dhd_pub_t *dhd)
631 {
632 int ret;
633 /* get sssr_reg_info from firmware */
634 memset((void *)&dhd->sssr_reg_info, 0, sizeof(dhd->sssr_reg_info));
635 ret = dhd_iovar(dhd, 0, "sssr_reg_info", NULL, 0, (char *)&dhd->sssr_reg_info,
636 sizeof(dhd->sssr_reg_info), FALSE);
637 if (ret < 0) {
638 DHD_ERROR(("%s: sssr_reg_info failed (error=%d)\n",
639 __FUNCTION__, ret));
640 return BCME_ERROR;
641 }
642
643 dhd_dump_sssr_reg_info(&dhd->sssr_reg_info);
644 return BCME_OK;
645 }
646
647 uint32
dhd_get_sssr_bufsize(dhd_pub_t * dhd)648 dhd_get_sssr_bufsize(dhd_pub_t *dhd)
649 {
650 int i;
651 uint32 sssr_bufsize = 0;
652 /* Init all pointers to NULL */
653 for (i = 0; i < MAX_NUM_D11CORES; i++) {
654 sssr_bufsize += dhd->sssr_reg_info.mac_regs[i].sr_size;
655 }
656 sssr_bufsize += dhd->sssr_reg_info.vasip_regs.vasip_sr_size;
657
658 /* Double the size as different dumps will be saved before and after SR */
659 sssr_bufsize = 2 * sssr_bufsize;
660
661 return sssr_bufsize;
662 }
663
664 int
dhd_sssr_dump_init(dhd_pub_t * dhd)665 dhd_sssr_dump_init(dhd_pub_t *dhd)
666 {
667 int i;
668 uint32 sssr_bufsize;
669 uint32 mempool_used = 0;
670
671 dhd->sssr_inited = FALSE;
672
673 if (!support_sssr_dump) {
674 DHD_ERROR(("%s: sssr dump not inited as instructed by mod param\n", __FUNCTION__));
675 return BCME_OK;
676 }
677
678 /* check if sssr mempool is allocated */
679 if (dhd->sssr_mempool == NULL) {
680 DHD_ERROR(("%s: sssr_mempool is not allocated\n",
681 __FUNCTION__));
682 return BCME_ERROR;
683 }
684
685 /* Get SSSR reg info */
686 if (dhd_get_sssr_reg_info(dhd) != BCME_OK) {
687 DHD_ERROR(("%s: dhd_get_sssr_reg_info failed\n", __FUNCTION__));
688 return BCME_ERROR;
689 }
690
691 /* Validate structure version */
692 if (dhd->sssr_reg_info.version > SSSR_REG_INFO_VER_1) {
693 DHD_ERROR(("%s: dhd->sssr_reg_info.version (%d : %d) mismatch\n",
694 __FUNCTION__, (int)dhd->sssr_reg_info.version, SSSR_REG_INFO_VER));
695 return BCME_ERROR;
696 }
697
698 /* Validate structure length */
699 if (dhd->sssr_reg_info.length < sizeof(sssr_reg_info_v0_t)) {
700 DHD_ERROR(("%s: dhd->sssr_reg_info.length (%d : %d) mismatch\n",
701 __FUNCTION__, (int)dhd->sssr_reg_info.length,
702 (int)sizeof(dhd->sssr_reg_info)));
703 return BCME_ERROR;
704 }
705
706 /* validate fifo size */
707 sssr_bufsize = dhd_get_sssr_bufsize(dhd);
708 if (sssr_bufsize > DHD_SSSR_MEMPOOL_SIZE) {
709 DHD_ERROR(("%s: sssr_bufsize(%d) is greater than sssr_mempool(%d)\n",
710 __FUNCTION__, (int)sssr_bufsize, DHD_SSSR_MEMPOOL_SIZE));
711 return BCME_ERROR;
712 }
713
714 /* init all pointers to NULL */
715 for (i = 0; i < MAX_NUM_D11CORES; i++) {
716 dhd->sssr_d11_before[i] = NULL;
717 dhd->sssr_d11_after[i] = NULL;
718 }
719 dhd->sssr_dig_buf_before = NULL;
720 dhd->sssr_dig_buf_after = NULL;
721
722 /* Allocate memory */
723 for (i = 0; i < MAX_NUM_D11CORES; i++) {
724 if (dhd->sssr_reg_info.mac_regs[i].sr_size) {
725 dhd->sssr_d11_before[i] = (uint32 *)(dhd->sssr_mempool + mempool_used);
726 mempool_used += dhd->sssr_reg_info.mac_regs[i].sr_size;
727
728 dhd->sssr_d11_after[i] = (uint32 *)(dhd->sssr_mempool + mempool_used);
729 mempool_used += dhd->sssr_reg_info.mac_regs[i].sr_size;
730 }
731 }
732
733 if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
734 dhd->sssr_dig_buf_before = (uint32 *)(dhd->sssr_mempool + mempool_used);
735 mempool_used += dhd->sssr_reg_info.vasip_regs.vasip_sr_size;
736
737 dhd->sssr_dig_buf_after = (uint32 *)(dhd->sssr_mempool + mempool_used);
738 mempool_used += dhd->sssr_reg_info.vasip_regs.vasip_sr_size;
739 } else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
740 dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
741 dhd->sssr_dig_buf_before = (uint32 *)(dhd->sssr_mempool + mempool_used);
742 mempool_used += dhd->sssr_reg_info.dig_mem_info.dig_sr_size;
743
744 dhd->sssr_dig_buf_after = (uint32 *)(dhd->sssr_mempool + mempool_used);
745 mempool_used += dhd->sssr_reg_info.dig_mem_info.dig_sr_size;
746 }
747
748 dhd->sssr_inited = TRUE;
749
750 return BCME_OK;
751
752 }
753
754 void
dhd_sssr_dump_deinit(dhd_pub_t * dhd)755 dhd_sssr_dump_deinit(dhd_pub_t *dhd)
756 {
757 int i;
758
759 dhd->sssr_inited = FALSE;
760 /* init all pointers to NULL */
761 for (i = 0; i < MAX_NUM_D11CORES; i++) {
762 dhd->sssr_d11_before[i] = NULL;
763 dhd->sssr_d11_after[i] = NULL;
764 }
765 dhd->sssr_dig_buf_before = NULL;
766 dhd->sssr_dig_buf_after = NULL;
767
768 return;
769 }
770
771 void
dhd_sssr_print_filepath(dhd_pub_t * dhd,char * path)772 dhd_sssr_print_filepath(dhd_pub_t *dhd, char *path)
773 {
774 bool print_info = FALSE;
775 int dump_mode;
776
777 if (!dhd || !path) {
778 DHD_ERROR(("%s: dhd or memdump_path is NULL\n",
779 __FUNCTION__));
780 return;
781 }
782
783 if (!dhd->sssr_dump_collected) {
784 /* SSSR dump is not collected */
785 return;
786 }
787
788 dump_mode = dhd->sssr_dump_mode;
789
790 if (bcmstrstr(path, "core_0_before")) {
791 if (dhd->sssr_d11_outofreset[0] &&
792 dump_mode == SSSR_DUMP_MODE_SSSR) {
793 print_info = TRUE;
794 }
795 } else if (bcmstrstr(path, "core_0_after")) {
796 if (dhd->sssr_d11_outofreset[0]) {
797 print_info = TRUE;
798 }
799 } else if (bcmstrstr(path, "core_1_before")) {
800 if (dhd->sssr_d11_outofreset[1] &&
801 dump_mode == SSSR_DUMP_MODE_SSSR) {
802 print_info = TRUE;
803 }
804 } else if (bcmstrstr(path, "core_1_after")) {
805 if (dhd->sssr_d11_outofreset[1]) {
806 print_info = TRUE;
807 }
808 } else {
809 print_info = TRUE;
810 }
811
812 if (print_info) {
813 DHD_ERROR(("%s: file_path = %s%s\n", __FUNCTION__,
814 path, FILE_NAME_HAL_TAG));
815 }
816 }
817 #endif /* DHD_SSSR_DUMP */
818
819 #ifdef DHD_FW_COREDUMP
dhd_get_fwdump_buf(dhd_pub_t * dhd_pub,uint32 length)820 void* dhd_get_fwdump_buf(dhd_pub_t *dhd_pub, uint32 length)
821 {
822 if (!dhd_pub->soc_ram) {
823 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
824 dhd_pub->soc_ram = (uint8*)DHD_OS_PREALLOC(dhd_pub,
825 DHD_PREALLOC_MEMDUMP_RAM, length);
826 #else
827 dhd_pub->soc_ram = (uint8*) MALLOC(dhd_pub->osh, length);
828 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
829 }
830
831 if (dhd_pub->soc_ram == NULL) {
832 DHD_ERROR(("%s: Failed to allocate memory for fw crash snap shot.\n",
833 __FUNCTION__));
834 dhd_pub->soc_ram_length = 0;
835 } else {
836 memset(dhd_pub->soc_ram, 0, length);
837 dhd_pub->soc_ram_length = length;
838 }
839
840 /* soc_ram free handled in dhd_{free,clear} */
841 return dhd_pub->soc_ram;
842 }
843 #endif /* DHD_FW_COREDUMP */
844
845 /* to NDIS developer, the structure dhd_common is redundant,
846 * please do NOT merge it back from other branches !!!
847 */
848
849 int
dhd_common_socram_dump(dhd_pub_t * dhdp)850 dhd_common_socram_dump(dhd_pub_t *dhdp)
851 {
852 return dhd_socram_dump(dhdp->bus);
853 }
854
855 int
dhd_dump(dhd_pub_t * dhdp,char * buf,int buflen)856 dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen)
857 {
858 struct bcmstrbuf b;
859 struct bcmstrbuf *strbuf = &b;
860
861 if (!dhdp || !dhdp->prot || !buf) {
862 return BCME_ERROR;
863 }
864
865 bcm_binit(strbuf, buf, buflen);
866
867 /* Base DHD info */
868 bcm_bprintf(strbuf, "%s\n", dhd_version);
869 bcm_bprintf(strbuf, "\n");
870 bcm_bprintf(strbuf, "pub.up %d pub.txoff %d pub.busstate %d\n",
871 dhdp->up, dhdp->txoff, dhdp->busstate);
872 bcm_bprintf(strbuf, "pub.hdrlen %u pub.maxctl %u pub.rxsz %u\n",
873 dhdp->hdrlen, dhdp->maxctl, dhdp->rxsz);
874 bcm_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac "MACDBG"\n",
875 dhdp->iswl, dhdp->drv_version, MAC2STRDBG(&dhdp->mac));
876 bcm_bprintf(strbuf, "pub.bcmerror %d tickcnt %u\n", dhdp->bcmerror, dhdp->tickcnt);
877
878 bcm_bprintf(strbuf, "dongle stats:\n");
879 bcm_bprintf(strbuf, "tx_packets %lu tx_bytes %lu tx_errors %lu tx_dropped %lu\n",
880 dhdp->dstats.tx_packets, dhdp->dstats.tx_bytes,
881 dhdp->dstats.tx_errors, dhdp->dstats.tx_dropped);
882 bcm_bprintf(strbuf, "rx_packets %lu rx_bytes %lu rx_errors %lu rx_dropped %lu\n",
883 dhdp->dstats.rx_packets, dhdp->dstats.rx_bytes,
884 dhdp->dstats.rx_errors, dhdp->dstats.rx_dropped);
885 bcm_bprintf(strbuf, "multicast %lu\n", dhdp->dstats.multicast);
886
887 bcm_bprintf(strbuf, "bus stats:\n");
888 bcm_bprintf(strbuf, "tx_packets %lu tx_dropped %lu tx_multicast %lu tx_errors %lu\n",
889 dhdp->tx_packets, dhdp->tx_dropped, dhdp->tx_multicast, dhdp->tx_errors);
890 bcm_bprintf(strbuf, "tx_ctlpkts %lu tx_ctlerrs %lu\n",
891 dhdp->tx_ctlpkts, dhdp->tx_ctlerrs);
892 bcm_bprintf(strbuf, "rx_packets %lu rx_multicast %lu rx_errors %lu \n",
893 dhdp->rx_packets, dhdp->rx_multicast, dhdp->rx_errors);
894 bcm_bprintf(strbuf, "rx_ctlpkts %lu rx_ctlerrs %lu rx_dropped %lu\n",
895 dhdp->rx_ctlpkts, dhdp->rx_ctlerrs, dhdp->rx_dropped);
896 bcm_bprintf(strbuf, "rx_readahead_cnt %lu tx_realloc %lu\n",
897 dhdp->rx_readahead_cnt, dhdp->tx_realloc);
898 bcm_bprintf(strbuf, "tx_pktgetfail %lu rx_pktgetfail %lu\n",
899 dhdp->tx_pktgetfail, dhdp->rx_pktgetfail);
900 bcm_bprintf(strbuf, "tx_big_packets %lu\n",
901 dhdp->tx_big_packets);
902 bcm_bprintf(strbuf, "\n");
903 #ifdef DMAMAP_STATS
904 /* Add DMA MAP info */
905 bcm_bprintf(strbuf, "DMA MAP stats: \n");
906 bcm_bprintf(strbuf, "txdata: %lu size: %luK, rxdata: %lu size: %luK\n",
907 dhdp->dma_stats.txdata, KB(dhdp->dma_stats.txdata_sz),
908 dhdp->dma_stats.rxdata, KB(dhdp->dma_stats.rxdata_sz));
909 #ifndef IOCTLRESP_USE_CONSTMEM
910 bcm_bprintf(strbuf, "IOCTL RX: %lu size: %luK ,",
911 dhdp->dma_stats.ioctl_rx, KB(dhdp->dma_stats.ioctl_rx_sz));
912 #endif /* !IOCTLRESP_USE_CONSTMEM */
913 bcm_bprintf(strbuf, "EVENT RX: %lu size: %luK, INFO RX: %lu size: %luK, "
914 "TSBUF RX: %lu size %luK\n",
915 dhdp->dma_stats.event_rx, KB(dhdp->dma_stats.event_rx_sz),
916 dhdp->dma_stats.info_rx, KB(dhdp->dma_stats.info_rx_sz),
917 dhdp->dma_stats.tsbuf_rx, KB(dhdp->dma_stats.tsbuf_rx_sz));
918 bcm_bprintf(strbuf, "Total : %luK \n",
919 KB(dhdp->dma_stats.txdata_sz + dhdp->dma_stats.rxdata_sz +
920 dhdp->dma_stats.ioctl_rx_sz + dhdp->dma_stats.event_rx_sz +
921 dhdp->dma_stats.tsbuf_rx_sz));
922 #endif /* DMAMAP_STATS */
923 bcm_bprintf(strbuf, "dhd_induce_error : %u\n", dhdp->dhd_induce_error);
924 /* Add any prot info */
925 dhd_prot_dump(dhdp, strbuf);
926 bcm_bprintf(strbuf, "\n");
927
928 /* Add any bus info */
929 dhd_bus_dump(dhdp, strbuf);
930
931 #if defined(DHD_LB_STATS)
932 dhd_lb_stats_dump(dhdp, strbuf);
933 #endif /* DHD_LB_STATS */
934 #ifdef DHD_WET
935 if (dhd_get_wet_mode(dhdp)) {
936 bcm_bprintf(strbuf, "Wet Dump:\n");
937 dhd_wet_dump(dhdp, strbuf);
938 }
939 #endif /* DHD_WET */
940
941 /* return remaining buffer length */
942 return (!strbuf->size ? BCME_BUFTOOSHORT : strbuf->size);
943 }
944
945 void
dhd_dump_to_kernelog(dhd_pub_t * dhdp)946 dhd_dump_to_kernelog(dhd_pub_t *dhdp)
947 {
948 char buf[512];
949
950 DHD_ERROR(("F/W version: %s\n", fw_version));
951 bcm_bprintf_bypass = TRUE;
952 dhd_dump(dhdp, buf, sizeof(buf));
953 bcm_bprintf_bypass = FALSE;
954 }
955
956 int
dhd_wl_ioctl_cmd(dhd_pub_t * dhd_pub,int cmd,void * arg,int len,uint8 set,int ifidx)957 dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, int ifidx)
958 {
959 wl_ioctl_t ioc;
960
961 ioc.cmd = cmd;
962 ioc.buf = arg;
963 ioc.len = len;
964 ioc.set = set;
965
966 return dhd_wl_ioctl(dhd_pub, ifidx, &ioc, arg, len);
967 }
968
969 int
dhd_wl_ioctl_get_intiovar(dhd_pub_t * dhd_pub,char * name,uint * pval,int cmd,uint8 set,int ifidx)970 dhd_wl_ioctl_get_intiovar(dhd_pub_t *dhd_pub, char *name, uint *pval,
971 int cmd, uint8 set, int ifidx)
972 {
973 char iovbuf[WLC_IOCTL_SMLEN];
974 int ret = -1;
975
976 memset(iovbuf, 0, sizeof(iovbuf));
977 if (bcm_mkiovar(name, NULL, 0, iovbuf, sizeof(iovbuf))) {
978 ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, sizeof(iovbuf), set, ifidx);
979 if (!ret) {
980 *pval = ltoh32(*((uint*)iovbuf));
981 } else {
982 DHD_ERROR(("%s: get int iovar %s failed, ERR %d\n",
983 __FUNCTION__, name, ret));
984 }
985 } else {
986 DHD_ERROR(("%s: mkiovar %s failed\n",
987 __FUNCTION__, name));
988 }
989
990 return ret;
991 }
992
993 int
dhd_wl_ioctl_set_intiovar(dhd_pub_t * dhd_pub,char * name,uint val,int cmd,uint8 set,int ifidx)994 dhd_wl_ioctl_set_intiovar(dhd_pub_t *dhd_pub, char *name, uint val,
995 int cmd, uint8 set, int ifidx)
996 {
997 char iovbuf[WLC_IOCTL_SMLEN];
998 int ret = -1;
999 int lval = htol32(val);
1000 uint len;
1001
1002 len = bcm_mkiovar(name, (char*)&lval, sizeof(lval), iovbuf, sizeof(iovbuf));
1003
1004 if (len) {
1005 ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, len, set, ifidx);
1006 if (ret) {
1007 DHD_ERROR(("%s: set int iovar %s failed, ERR %d\n",
1008 __FUNCTION__, name, ret));
1009 }
1010 } else {
1011 DHD_ERROR(("%s: mkiovar %s failed\n",
1012 __FUNCTION__, name));
1013 }
1014
1015 return ret;
1016 }
1017
1018 static struct ioctl2str_s {
1019 uint32 ioctl;
1020 char *name;
1021 } ioctl2str_array[] = {
1022 {WLC_UP, "UP"},
1023 {WLC_DOWN, "DOWN"},
1024 {WLC_SET_PROMISC, "SET_PROMISC"},
1025 {WLC_SET_INFRA, "SET_INFRA"},
1026 {WLC_SET_AUTH, "SET_AUTH"},
1027 {WLC_SET_SSID, "SET_SSID"},
1028 {WLC_RESTART, "RESTART"},
1029 {WLC_SET_CHANNEL, "SET_CHANNEL"},
1030 {WLC_SET_RATE_PARAMS, "SET_RATE_PARAMS"},
1031 {WLC_SET_KEY, "SET_KEY"},
1032 {WLC_SCAN, "SCAN"},
1033 {WLC_DISASSOC, "DISASSOC"},
1034 {WLC_REASSOC, "REASSOC"},
1035 {WLC_SET_COUNTRY, "SET_COUNTRY"},
1036 {WLC_SET_WAKE, "SET_WAKE"},
1037 {WLC_SET_SCANSUPPRESS, "SET_SCANSUPPRESS"},
1038 {WLC_SCB_DEAUTHORIZE, "SCB_DEAUTHORIZE"},
1039 {WLC_SET_WSEC, "SET_WSEC"},
1040 {WLC_SET_INTERFERENCE_MODE, "SET_INTERFERENCE_MODE"},
1041 {WLC_SET_RADAR, "SET_RADAR"},
1042 {0, NULL}
1043 };
1044
1045 static char *
ioctl2str(uint32 ioctl)1046 ioctl2str(uint32 ioctl)
1047 {
1048 struct ioctl2str_s *p = ioctl2str_array;
1049
1050 while (p->name != NULL) {
1051 if (p->ioctl == ioctl) {
1052 return p->name;
1053 }
1054 p++;
1055 }
1056
1057 return "";
1058 }
1059
1060 /**
1061 * @param ioc IO control struct, members are partially used by this function.
1062 * @param buf [inout] Contains parameters to send to dongle, contains dongle response on return.
1063 * @param len Maximum number of bytes that dongle is allowed to write into 'buf'.
1064 */
1065 int
dhd_wl_ioctl(dhd_pub_t * dhd_pub,int ifidx,wl_ioctl_t * ioc,void * buf,int len)1066 dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifidx, wl_ioctl_t *ioc, void *buf, int len)
1067 {
1068 int ret = BCME_ERROR;
1069 unsigned long flags;
1070 #ifdef DUMP_IOCTL_IOV_LIST
1071 dhd_iov_li_t *iov_li;
1072 #endif /* DUMP_IOCTL_IOV_LIST */
1073
1074 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1075 DHD_OS_WAKE_LOCK(dhd_pub);
1076 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd_pub->bus)) < 0) {
1077 DHD_RPM(("%s: pm_runtime_get_sync error. \n", __FUNCTION__));
1078 DHD_OS_WAKE_UNLOCK(dhd_pub);
1079 return BCME_ERROR;
1080 }
1081 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1082
1083 #ifdef KEEPIF_ON_DEVICE_RESET
1084 if (ioc->cmd == WLC_GET_VAR) {
1085 dbus_config_t config;
1086 config.general_param = 0;
1087 if (buf) {
1088 if (!strcmp(buf, "wowl_activate")) {
1089 /* 1 (TRUE) after decreased by 1 */
1090 config.general_param = 2;
1091 } else if (!strcmp(buf, "wowl_clear")) {
1092 /* 0 (FALSE) after decreased by 1 */
1093 config.general_param = 1;
1094 }
1095 }
1096 if (config.general_param) {
1097 config.config_id = DBUS_CONFIG_ID_KEEPIF_ON_DEVRESET;
1098 config.general_param--;
1099 dbus_set_config(dhd_pub->dbus, &config);
1100 }
1101 }
1102 #endif /* KEEPIF_ON_DEVICE_RESET */
1103
1104 if (dhd_os_proto_block(dhd_pub))
1105 {
1106 #ifdef DHD_LOG_DUMP
1107 int slen, val, lval, min_len;
1108 char *msg, tmp[64];
1109
1110 /* WLC_GET_VAR */
1111 if (ioc->cmd == WLC_GET_VAR && buf) {
1112 min_len = MIN(sizeof(tmp) - 1, strlen(buf));
1113 memset(tmp, 0, sizeof(tmp));
1114 bcopy(buf, tmp, min_len);
1115 tmp[min_len] = '\0';
1116 }
1117 #endif /* DHD_LOG_DUMP */
1118
1119 #ifdef DHD_DISCONNECT_TRACE
1120 if ((WLC_DISASSOC == ioc->cmd) || (WLC_DOWN == ioc->cmd) ||
1121 (WLC_DISASSOC_MYAP == ioc->cmd)) {
1122 DHD_ERROR(("IOCTL Disconnect WiFi: %d\n", ioc->cmd));
1123 }
1124 #endif /* HW_DISCONNECT_TRACE */
1125
1126 /* logging of iovars that are send to the dongle, ./dhd msglevel +iovar */
1127 if (ioc->set == TRUE) {
1128 char *pars = (char *)buf; // points at user buffer
1129 if (ioc->cmd == WLC_SET_VAR && buf) {
1130 DHD_DNGL_IOVAR_SET(("iovar:%d: set %s", ifidx, pars));
1131 if (ioc->len > 1 + sizeof(uint32)) {
1132 // skip iovar name:
1133 pars += strnlen(pars, ioc->len - 1 - sizeof(uint32));
1134 pars++; // skip NULL character
1135 }
1136 } else {
1137 DHD_DNGL_IOVAR_SET(("ioctl:%d: set %d %s",
1138 ifidx, ioc->cmd, ioctl2str(ioc->cmd)));
1139 }
1140 if (pars != NULL) {
1141 DHD_DNGL_IOVAR_SET((" 0x%x\n", *(uint32*)pars));
1142 } else {
1143 DHD_DNGL_IOVAR_SET((" NULL\n"));
1144 }
1145 }
1146
1147 DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
1148 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub)) {
1149 DHD_INFO(("%s: returning as busstate=%d\n",
1150 __FUNCTION__, dhd_pub->busstate));
1151 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
1152 dhd_os_proto_unblock(dhd_pub);
1153 return -ENODEV;
1154 }
1155 DHD_BUS_BUSY_SET_IN_IOVAR(dhd_pub);
1156 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
1157
1158 #ifdef DHD_PCIE_RUNTIMEPM
1159 dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_wl_ioctl);
1160 #endif /* DHD_PCIE_RUNTIMEPM */
1161
1162 DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
1163 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd_pub)) {
1164 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
1165 __FUNCTION__, dhd_pub->busstate, dhd_pub->dhd_bus_busy_state));
1166 DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub);
1167 dhd_os_busbusy_wake(dhd_pub);
1168 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
1169 dhd_os_proto_unblock(dhd_pub);
1170 return -ENODEV;
1171 }
1172 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
1173
1174 #ifdef DUMP_IOCTL_IOV_LIST
1175 if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION && buf) {
1176 if (!(iov_li = MALLOC(dhd_pub->osh, sizeof(*iov_li)))) {
1177 DHD_ERROR(("iovar dump list item allocation Failed\n"));
1178 } else {
1179 iov_li->cmd = ioc->cmd;
1180 if (buf)
1181 bcopy((char *)buf, iov_li->buff, strlen((char *)buf)+1);
1182 dhd_iov_li_append(dhd_pub, &dhd_pub->dump_iovlist_head,
1183 &iov_li->list);
1184 }
1185 }
1186 #endif /* DUMP_IOCTL_IOV_LIST */
1187
1188 ret = dhd_prot_ioctl(dhd_pub, ifidx, ioc, buf, len);
1189
1190 #ifdef DUMP_IOCTL_IOV_LIST
1191 if (ret == -ETIMEDOUT) {
1192 DHD_ERROR(("Last %d issued commands: Latest one is at bottom.\n",
1193 IOV_LIST_MAX_LEN));
1194 dhd_iov_li_print(&dhd_pub->dump_iovlist_head);
1195 }
1196 #endif /* DUMP_IOCTL_IOV_LIST */
1197 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
1198 if (ret == -ETIMEDOUT) {
1199 copy_hang_info_ioctl_timeout(dhd_pub, ifidx, ioc);
1200 }
1201 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
1202 #ifdef DHD_LOG_DUMP
1203 if ((ioc->cmd == WLC_GET_VAR || ioc->cmd == WLC_SET_VAR) &&
1204 buf != NULL) {
1205 if (buf) {
1206 lval = 0;
1207 slen = strlen(buf) + 1;
1208 msg = (char*)buf;
1209 if (len >= slen + sizeof(lval)) {
1210 if (ioc->cmd == WLC_GET_VAR) {
1211 msg = tmp;
1212 lval = *(int*)buf;
1213 } else {
1214 min_len = MIN(ioc->len - slen, sizeof(int));
1215 bcopy((msg + slen), &lval, min_len);
1216 }
1217 if (!strncmp(msg, "cur_etheraddr",
1218 strlen("cur_etheraddr"))) {
1219 lval = 0;
1220 }
1221 }
1222 DHD_IOVAR_MEM((
1223 "%s: cmd: %d, msg: %s val: 0x%x,"
1224 " len: %d, set: %d, txn-id: %d\n",
1225 ioc->cmd == WLC_GET_VAR ?
1226 "WLC_GET_VAR" : "WLC_SET_VAR",
1227 ioc->cmd, msg, lval, ioc->len, ioc->set,
1228 dhd_prot_get_ioctl_trans_id(dhd_pub)));
1229 } else {
1230 DHD_IOVAR_MEM(("%s: cmd: %d, len: %d, set: %d, txn-id: %d\n",
1231 ioc->cmd == WLC_GET_VAR ? "WLC_GET_VAR" : "WLC_SET_VAR",
1232 ioc->cmd, ioc->len, ioc->set,
1233 dhd_prot_get_ioctl_trans_id(dhd_pub)));
1234 }
1235 } else {
1236 slen = ioc->len;
1237 if (buf != NULL && slen != 0) {
1238 if (slen >= 4) {
1239 val = *(int*)buf;
1240 } else if (slen >= 2) {
1241 val = *(short*)buf;
1242 } else {
1243 val = *(char*)buf;
1244 }
1245 /* Do not dump for WLC_GET_MAGIC and WLC_GET_VERSION */
1246 if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION)
1247 DHD_IOVAR_MEM(("WLC_IOCTL: cmd: %d, val: %d, len: %d, "
1248 "set: %d\n", ioc->cmd, val, ioc->len, ioc->set));
1249 } else {
1250 DHD_IOVAR_MEM(("WLC_IOCTL: cmd: %d, buf is NULL\n", ioc->cmd));
1251 }
1252 }
1253 #endif /* DHD_LOG_DUMP */
1254 #if defined(OEM_ANDROID)
1255 if (ret && dhd_pub->up) {
1256 /* Send hang event only if dhd_open() was success */
1257 dhd_os_check_hang(dhd_pub, ifidx, ret);
1258 }
1259
1260 if (ret == -ETIMEDOUT && !dhd_pub->up) {
1261 DHD_ERROR(("%s: 'resumed on timeout' error is "
1262 "occurred before the interface does not"
1263 " bring up\n", __FUNCTION__));
1264 }
1265 #endif /* defined(OEM_ANDROID) */
1266
1267 DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
1268 DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub);
1269 dhd_os_busbusy_wake(dhd_pub);
1270 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
1271
1272 dhd_os_proto_unblock(dhd_pub);
1273
1274 }
1275
1276 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1277 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd_pub->bus));
1278 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd_pub->bus));
1279
1280 DHD_OS_WAKE_UNLOCK(dhd_pub);
1281 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1282
1283 return ret;
1284 }
1285
wl_get_port_num(wl_io_pport_t * io_pport)1286 uint wl_get_port_num(wl_io_pport_t *io_pport)
1287 {
1288 return 0;
1289 }
1290
1291 /* Get bssidx from iovar params
1292 * Input: dhd_pub - pointer to dhd_pub_t
1293 * params - IOVAR params
1294 * Output: idx - BSS index
1295 * val - ponter to the IOVAR arguments
1296 */
1297 static int
dhd_iovar_parse_bssidx(dhd_pub_t * dhd_pub,const char * params,uint32 * idx,const char ** val)1298 dhd_iovar_parse_bssidx(dhd_pub_t *dhd_pub, const char *params, uint32 *idx, const char **val)
1299 {
1300 char *prefix = "bsscfg:";
1301 uint32 bssidx;
1302
1303 if (!(strncmp(params, prefix, strlen(prefix)))) {
1304 /* per bss setting should be prefixed with 'bsscfg:' */
1305 const char *p = params + strlen(prefix);
1306
1307 /* Skip Name */
1308 while (*p != '\0')
1309 p++;
1310 /* consider null */
1311 p = p + 1;
1312 bcopy(p, &bssidx, sizeof(uint32));
1313 /* Get corresponding dhd index */
1314 bssidx = dhd_bssidx2idx(dhd_pub, htod32(bssidx));
1315
1316 if (bssidx >= DHD_MAX_IFS) {
1317 DHD_ERROR(("%s Wrong bssidx provided\n", __FUNCTION__));
1318 return BCME_ERROR;
1319 }
1320
1321 /* skip bss idx */
1322 p += sizeof(uint32);
1323 *val = p;
1324 *idx = bssidx;
1325 } else {
1326 DHD_ERROR(("%s: bad parameter for per bss iovar\n", __FUNCTION__));
1327 return BCME_ERROR;
1328 }
1329
1330 return BCME_OK;
1331 }
1332
1333 #if defined(DHD_DEBUG) && defined(BCMDHDUSB)
1334 /* USB Device console input function */
dhd_bus_console_in(dhd_pub_t * dhd,uchar * msg,uint msglen)1335 int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
1336 {
1337 DHD_TRACE(("%s \n", __FUNCTION__));
1338
1339 return dhd_iovar(dhd, 0, "cons", msg, msglen, NULL, 0, TRUE);
1340
1341 }
1342 #endif /* DHD_DEBUG && BCMDHDUSB */
1343
1344 #ifdef DHD_DEBUG
1345 int
dhd_mem_debug(dhd_pub_t * dhd,uchar * msg,uint msglen)1346 dhd_mem_debug(dhd_pub_t *dhd, uchar *msg, uint msglen)
1347 {
1348 unsigned long int_arg = 0;
1349 char *p;
1350 char *end_ptr = NULL;
1351 dhd_dbg_mwli_t *mw_li;
1352 dll_t *item, *next;
1353 /* check if mwalloc, mwquery or mwfree was supplied arguement with space */
1354 p = bcmstrstr((char *)msg, " ");
1355 if (p != NULL) {
1356 /* space should be converted to null as separation flag for firmware */
1357 *p = '\0';
1358 /* store the argument in int_arg */
1359 int_arg = bcm_strtoul(p+1, &end_ptr, 10);
1360 }
1361
1362 if (!p && !strcmp(msg, "query")) {
1363 /* lets query the list inetrnally */
1364 if (dll_empty(dll_head_p(&dhd->mw_list_head))) {
1365 DHD_ERROR(("memwaste list is empty, call mwalloc < size > to allocate\n"));
1366 } else {
1367 for (item = dll_head_p(&dhd->mw_list_head);
1368 !dll_end(&dhd->mw_list_head, item); item = next) {
1369 next = dll_next_p(item);
1370 mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list);
1371 DHD_ERROR(("item: <id=%d, size=%d>\n", mw_li->id, mw_li->size));
1372 }
1373 }
1374 } else if (p && end_ptr && (*end_ptr == '\0') && !strcmp(msg, "alloc")) {
1375 int32 alloc_handle;
1376 /* convert size into KB and append as integer */
1377 *((int32 *)(p+1)) = int_arg*1024;
1378 *(p+1+sizeof(int32)) = '\0';
1379
1380 /* recalculated length -> 5 bytes for "alloc" + 4 bytes for size +
1381 * 1 bytes for null caracter
1382 */
1383 msglen = strlen(msg) + sizeof(int32) + 1;
1384 if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, msg, msglen+1, FALSE, 0) < 0) {
1385 DHD_ERROR(("IOCTL failed for memdebug alloc\n"));
1386 }
1387
1388 /* returned allocated handle from dongle, basically address of the allocated unit */
1389 alloc_handle = *((int32 *)msg);
1390
1391 /* add a node in the list with tuple <id, handle, size> */
1392 if (alloc_handle == 0) {
1393 DHD_ERROR(("Reuqested size could not be allocated\n"));
1394 } else if (!(mw_li = MALLOC(dhd->osh, sizeof(*mw_li)))) {
1395 DHD_ERROR(("mw list item allocation Failed\n"));
1396 } else {
1397 mw_li->id = dhd->mw_id++;
1398 mw_li->handle = alloc_handle;
1399 mw_li->size = int_arg;
1400 /* append the node in the list */
1401 dll_append(&dhd->mw_list_head, &mw_li->list);
1402 }
1403 } else if (p && end_ptr && (*end_ptr == '\0') && !strcmp(msg, "free")) {
1404 /* inform dongle to free wasted chunk */
1405 int handle = 0;
1406 int size = 0;
1407 for (item = dll_head_p(&dhd->mw_list_head);
1408 !dll_end(&dhd->mw_list_head, item); item = next) {
1409 next = dll_next_p(item);
1410 mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list);
1411
1412 if (mw_li->id == (int)int_arg) {
1413 handle = mw_li->handle;
1414 size = mw_li->size;
1415 dll_delete(item);
1416 MFREE(dhd->osh, mw_li, sizeof(*mw_li));
1417 if (dll_empty(dll_head_p(&dhd->mw_list_head))) {
1418 /* reset the id */
1419 dhd->mw_id = 0;
1420 }
1421 }
1422 }
1423 if (handle) {
1424 int len;
1425 /* append the free handle and the chunk size in first 8 bytes
1426 * after the command and null character
1427 */
1428 *((int32 *)(p+1)) = handle;
1429 *((int32 *)((p+1)+sizeof(int32))) = size;
1430 /* append null as terminator */
1431 *(p+1+2*sizeof(int32)) = '\0';
1432 /* recalculated length -> 4 bytes for "free" + 8 bytes for hadnle and size
1433 * + 1 bytes for null caracter
1434 */
1435 len = strlen(msg) + 2*sizeof(int32) + 1;
1436 /* send iovar to free the chunk */
1437 if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, msg, len, FALSE, 0) < 0) {
1438 DHD_ERROR(("IOCTL failed for memdebug free\n"));
1439 }
1440 } else {
1441 DHD_ERROR(("specified id does not exist\n"));
1442 }
1443 } else {
1444 /* for all the wrong argument formats */
1445 return BCME_BADARG;
1446 }
1447 return 0;
1448 }
1449 extern void
dhd_mw_list_delete(dhd_pub_t * dhd,dll_t * list_head)1450 dhd_mw_list_delete(dhd_pub_t *dhd, dll_t *list_head)
1451 {
1452 dll_t *item;
1453 dhd_dbg_mwli_t *mw_li;
1454 while (!(dll_empty(list_head))) {
1455 item = dll_head_p(list_head);
1456 mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list);
1457 dll_delete(item);
1458 MFREE(dhd->osh, mw_li, sizeof(*mw_li));
1459 }
1460 }
1461 #ifdef BCMPCIE
1462 int
dhd_flow_ring_debug(dhd_pub_t * dhd,char * msg,uint msglen)1463 dhd_flow_ring_debug(dhd_pub_t *dhd, char *msg, uint msglen)
1464 {
1465 flow_ring_table_t *flow_ring_table;
1466 char *cmd;
1467 char *end_ptr = NULL;
1468 uint8 prio;
1469 uint16 flowid;
1470 int i;
1471 int ret = 0;
1472 cmd = bcmstrstr(msg, " ");
1473 BCM_REFERENCE(prio);
1474 if (cmd != NULL) {
1475 /* in order to use string operations append null */
1476 *cmd = '\0';
1477 } else {
1478 DHD_ERROR(("missing: create/delete args\n"));
1479 return BCME_ERROR;
1480 }
1481 if (cmd && !strcmp(msg, "create")) {
1482 /* extract <"source address", "destination address", "priority"> */
1483 uint8 sa[ETHER_ADDR_LEN], da[ETHER_ADDR_LEN];
1484 BCM_REFERENCE(sa);
1485 BCM_REFERENCE(da);
1486 msg = msg + strlen("create") + 1;
1487 /* fill ethernet source address */
1488 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1489 sa[i] = (uint8)bcm_strtoul(msg, &end_ptr, 16);
1490 if (*end_ptr == ':') {
1491 msg = (end_ptr + 1);
1492 } else if (i != 5) {
1493 DHD_ERROR(("not a valid source mac addr\n"));
1494 return BCME_ERROR;
1495 }
1496 }
1497 if (*end_ptr != ' ') {
1498 DHD_ERROR(("missing: destiantion mac id\n"));
1499 return BCME_ERROR;
1500 } else {
1501 /* skip space */
1502 msg = end_ptr + 1;
1503 }
1504 /* fill ethernet destination address */
1505 for (i = 0; i < ETHER_ADDR_LEN; i++) {
1506 da[i] = (uint8)bcm_strtoul(msg, &end_ptr, 16);
1507 if (*end_ptr == ':') {
1508 msg = (end_ptr + 1);
1509 } else if (i != 5) {
1510 DHD_ERROR(("not a valid destination mac addr\n"));
1511 return BCME_ERROR;
1512 }
1513 }
1514 if (*end_ptr != ' ') {
1515 DHD_ERROR(("missing: priority\n"));
1516 return BCME_ERROR;
1517 } else {
1518 msg = end_ptr + 1;
1519 }
1520 /* parse priority */
1521 prio = (uint8)bcm_strtoul(msg, &end_ptr, 10);
1522 if (prio > MAXPRIO) {
1523 DHD_ERROR(("%s: invalid priority. Must be between 0-7 inclusive\n",
1524 __FUNCTION__));
1525 return BCME_ERROR;
1526 }
1527
1528 if (*end_ptr != '\0') {
1529 DHD_ERROR(("msg not truncated with NULL character\n"));
1530 return BCME_ERROR;
1531 }
1532 ret = dhd_flowid_debug_create(dhd, 0, prio, (char *)sa, (char *)da, &flowid);
1533 if (ret != BCME_OK) {
1534 DHD_ERROR(("%s: flowring creation failed ret: %d\n", __FUNCTION__, ret));
1535 return BCME_ERROR;
1536 }
1537 return BCME_OK;
1538
1539 } else if (cmd && !strcmp(msg, "delete")) {
1540 msg = msg + strlen("delete") + 1;
1541 /* parse flowid */
1542 flowid = (uint16)bcm_strtoul(msg, &end_ptr, 10);
1543 if (*end_ptr != '\0') {
1544 DHD_ERROR(("msg not truncated with NULL character\n"));
1545 return BCME_ERROR;
1546 }
1547
1548 /* Find flowid from ifidx 0 since this IOVAR creating flowring with ifidx 0 */
1549 if (dhd_flowid_find_by_ifidx(dhd, 0, flowid) != BCME_OK)
1550 {
1551 DHD_ERROR(("%s : Deleting not created flowid: %u\n", __FUNCTION__, flowid));
1552 return BCME_ERROR;
1553 }
1554
1555 flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
1556 ret = dhd_bus_flow_ring_delete_request(dhd->bus, (void *)&flow_ring_table[flowid]);
1557 if (ret != BCME_OK) {
1558 DHD_ERROR(("%s: flowring deletion failed ret: %d\n", __FUNCTION__, ret));
1559 return BCME_ERROR;
1560 }
1561 return BCME_OK;
1562 }
1563 DHD_ERROR(("%s: neither create nor delete\n", __FUNCTION__));
1564 return BCME_ERROR;
1565 }
1566 #endif /* BCMPCIE */
1567 #endif /* DHD_DEBUG */
1568
1569 static int
dhd_doiovar(dhd_pub_t * dhd_pub,const bcm_iovar_t * vi,uint32 actionid,const char * name,void * params,int plen,void * arg,int len,int val_size)1570 dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const char *name,
1571 void *params, int plen, void *arg, int len, int val_size)
1572 {
1573 int bcmerror = 0;
1574 int32 int_val = 0;
1575 uint32 dhd_ver_len, bus_api_rev_len;
1576
1577 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
1578 DHD_TRACE(("%s: actionid = %d; name %s\n", __FUNCTION__, actionid, name));
1579
1580 if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
1581 goto exit;
1582
1583 if (plen >= (int)sizeof(int_val))
1584 bcopy(params, &int_val, sizeof(int_val));
1585
1586 switch (actionid) {
1587 case IOV_GVAL(IOV_VERSION):
1588 /* Need to have checked buffer length */
1589 dhd_ver_len = strlen(dhd_version);
1590 bus_api_rev_len = strlen(bus_api_revision);
1591 if (dhd_ver_len)
1592 bcm_strncpy_s((char*)arg, dhd_ver_len, dhd_version, dhd_ver_len);
1593 if (bus_api_rev_len)
1594 bcm_strncat_s((char*)arg + dhd_ver_len, bus_api_rev_len, bus_api_revision,
1595 bus_api_rev_len);
1596 break;
1597
1598 case IOV_GVAL(IOV_MSGLEVEL):
1599 int_val = (int32)dhd_msg_level;
1600 bcopy(&int_val, arg, val_size);
1601 break;
1602
1603 case IOV_SVAL(IOV_MSGLEVEL):
1604 #ifdef WL_CFG80211
1605 /* Enable DHD and WL logs in oneshot */
1606 if (int_val & DHD_WL_VAL2)
1607 wl_cfg80211_enable_trace(TRUE, int_val & (~DHD_WL_VAL2));
1608 else if (int_val & DHD_WL_VAL)
1609 wl_cfg80211_enable_trace(FALSE, WL_DBG_DBG);
1610 if (!(int_val & DHD_WL_VAL2))
1611 #endif /* WL_CFG80211 */
1612 dhd_msg_level = int_val;
1613 break;
1614 case IOV_GVAL(IOV_BCMERRORSTR):
1615 bcm_strncpy_s((char *)arg, len, bcmerrorstr(dhd_pub->bcmerror), BCME_STRLEN);
1616 ((char *)arg)[BCME_STRLEN - 1] = 0x00;
1617 break;
1618
1619 case IOV_GVAL(IOV_BCMERROR):
1620 int_val = (int32)dhd_pub->bcmerror;
1621 bcopy(&int_val, arg, val_size);
1622 break;
1623
1624 case IOV_GVAL(IOV_WDTICK):
1625 int_val = (int32)dhd_watchdog_ms;
1626 bcopy(&int_val, arg, val_size);
1627 break;
1628
1629 case IOV_SVAL(IOV_WDTICK):
1630 if (!dhd_pub->up) {
1631 bcmerror = BCME_NOTUP;
1632 break;
1633 }
1634
1635 dhd_watchdog_ms = (uint)int_val;
1636
1637 dhd_os_wd_timer(dhd_pub, (uint)int_val);
1638 break;
1639
1640 case IOV_GVAL(IOV_DUMP):
1641 if (dhd_dump(dhd_pub, arg, len) <= 0)
1642 bcmerror = BCME_ERROR;
1643 else
1644 bcmerror = BCME_OK;
1645 break;
1646
1647 case IOV_GVAL(IOV_DCONSOLE_POLL):
1648 int_val = (int32)dhd_pub->dhd_console_ms;
1649 bcopy(&int_val, arg, val_size);
1650 break;
1651
1652 case IOV_SVAL(IOV_DCONSOLE_POLL):
1653 dhd_pub->dhd_console_ms = (uint)int_val;
1654 break;
1655
1656 #if defined(DHD_DEBUG)
1657 case IOV_SVAL(IOV_CONS):
1658 if (len > 0)
1659 bcmerror = dhd_bus_console_in(dhd_pub, arg, len - 1);
1660 break;
1661 #endif /* DHD_DEBUG */
1662
1663 case IOV_SVAL(IOV_CLEARCOUNTS):
1664 dhd_pub->tx_packets = dhd_pub->rx_packets = 0;
1665 dhd_pub->tx_errors = dhd_pub->rx_errors = 0;
1666 dhd_pub->tx_ctlpkts = dhd_pub->rx_ctlpkts = 0;
1667 dhd_pub->tx_ctlerrs = dhd_pub->rx_ctlerrs = 0;
1668 dhd_pub->tx_dropped = 0;
1669 dhd_pub->rx_dropped = 0;
1670 dhd_pub->tx_pktgetfail = 0;
1671 dhd_pub->rx_pktgetfail = 0;
1672 dhd_pub->rx_readahead_cnt = 0;
1673 dhd_pub->tx_realloc = 0;
1674 dhd_pub->wd_dpc_sched = 0;
1675 dhd_pub->tx_big_packets = 0;
1676 memset(&dhd_pub->dstats, 0, sizeof(dhd_pub->dstats));
1677 dhd_bus_clearcounts(dhd_pub);
1678 #ifdef PROP_TXSTATUS
1679 /* clear proptxstatus related counters */
1680 dhd_wlfc_clear_counts(dhd_pub);
1681 #endif /* PROP_TXSTATUS */
1682 #if defined(DHD_LB_STATS)
1683 DHD_LB_STATS_RESET(dhd_pub);
1684 #endif /* DHD_LB_STATS */
1685 break;
1686
1687 case IOV_GVAL(IOV_IOCTLTIMEOUT): {
1688 int_val = (int32)dhd_os_get_ioctl_resp_timeout();
1689 bcopy(&int_val, arg, sizeof(int_val));
1690 break;
1691 }
1692
1693 case IOV_SVAL(IOV_IOCTLTIMEOUT): {
1694 if (int_val <= 0)
1695 bcmerror = BCME_BADARG;
1696 else
1697 dhd_os_set_ioctl_resp_timeout((unsigned int)int_val);
1698 break;
1699 }
1700
1701 #ifdef PROP_TXSTATUS
1702 case IOV_GVAL(IOV_PROPTXSTATUS_ENABLE): {
1703 bool wlfc_enab = FALSE;
1704 bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab);
1705 if (bcmerror != BCME_OK)
1706 goto exit;
1707 int_val = wlfc_enab ? 1 : 0;
1708 bcopy(&int_val, arg, val_size);
1709 break;
1710 }
1711 case IOV_SVAL(IOV_PROPTXSTATUS_ENABLE): {
1712 bool wlfc_enab = FALSE;
1713 bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab);
1714 if (bcmerror != BCME_OK)
1715 goto exit;
1716
1717 /* wlfc is already set as desired */
1718 if (wlfc_enab == (int_val == 0 ? FALSE : TRUE))
1719 goto exit;
1720
1721 if (int_val == TRUE)
1722 bcmerror = dhd_wlfc_init(dhd_pub);
1723 else
1724 bcmerror = dhd_wlfc_deinit(dhd_pub);
1725
1726 break;
1727 }
1728 case IOV_GVAL(IOV_PROPTXSTATUS_MODE):
1729 bcmerror = dhd_wlfc_get_mode(dhd_pub, &int_val);
1730 if (bcmerror != BCME_OK)
1731 goto exit;
1732 bcopy(&int_val, arg, val_size);
1733 break;
1734
1735 case IOV_SVAL(IOV_PROPTXSTATUS_MODE):
1736 dhd_wlfc_set_mode(dhd_pub, int_val);
1737 break;
1738
1739 case IOV_GVAL(IOV_PROPTXSTATUS_MODULE_IGNORE):
1740 bcmerror = dhd_wlfc_get_module_ignore(dhd_pub, &int_val);
1741 if (bcmerror != BCME_OK)
1742 goto exit;
1743 bcopy(&int_val, arg, val_size);
1744 break;
1745
1746 case IOV_SVAL(IOV_PROPTXSTATUS_MODULE_IGNORE):
1747 dhd_wlfc_set_module_ignore(dhd_pub, int_val);
1748 break;
1749
1750 case IOV_GVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE):
1751 bcmerror = dhd_wlfc_get_credit_ignore(dhd_pub, &int_val);
1752 if (bcmerror != BCME_OK)
1753 goto exit;
1754 bcopy(&int_val, arg, val_size);
1755 break;
1756
1757 case IOV_SVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE):
1758 dhd_wlfc_set_credit_ignore(dhd_pub, int_val);
1759 break;
1760
1761 case IOV_GVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE):
1762 bcmerror = dhd_wlfc_get_txstatus_ignore(dhd_pub, &int_val);
1763 if (bcmerror != BCME_OK)
1764 goto exit;
1765 bcopy(&int_val, arg, val_size);
1766 break;
1767
1768 case IOV_SVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE):
1769 dhd_wlfc_set_txstatus_ignore(dhd_pub, int_val);
1770 break;
1771
1772 case IOV_GVAL(IOV_PROPTXSTATUS_RXPKT_CHK):
1773 bcmerror = dhd_wlfc_get_rxpkt_chk(dhd_pub, &int_val);
1774 if (bcmerror != BCME_OK)
1775 goto exit;
1776 bcopy(&int_val, arg, val_size);
1777 break;
1778
1779 case IOV_SVAL(IOV_PROPTXSTATUS_RXPKT_CHK):
1780 dhd_wlfc_set_rxpkt_chk(dhd_pub, int_val);
1781 break;
1782
1783 #endif /* PROP_TXSTATUS */
1784
1785 case IOV_GVAL(IOV_BUS_TYPE):
1786 /* The dhd application queries the driver to check if its usb or sdio. */
1787 #ifdef BCMDHDUSB
1788 int_val = BUS_TYPE_USB;
1789 #endif // endif
1790 #ifdef BCMSDIO
1791 int_val = BUS_TYPE_SDIO;
1792 #endif // endif
1793 #ifdef PCIE_FULL_DONGLE
1794 int_val = BUS_TYPE_PCIE;
1795 #endif // endif
1796 bcopy(&int_val, arg, val_size);
1797 break;
1798
1799 case IOV_SVAL(IOV_CHANGEMTU):
1800 int_val &= 0xffff;
1801 bcmerror = dhd_change_mtu(dhd_pub, int_val, 0);
1802 break;
1803
1804 case IOV_GVAL(IOV_HOSTREORDER_FLOWS):
1805 {
1806 uint i = 0;
1807 uint8 *ptr = (uint8 *)arg;
1808 uint8 count = 0;
1809
1810 ptr++;
1811 for (i = 0; i < WLHOST_REORDERDATA_MAXFLOWS; i++) {
1812 if (dhd_pub->reorder_bufs[i] != NULL) {
1813 *ptr = dhd_pub->reorder_bufs[i]->flow_id;
1814 ptr++;
1815 count++;
1816 }
1817 }
1818 ptr = (uint8 *)arg;
1819 *ptr = count;
1820 break;
1821 }
1822 #ifdef DHDTCPACK_SUPPRESS
1823 case IOV_GVAL(IOV_TCPACK_SUPPRESS): {
1824 int_val = (uint32)dhd_pub->tcpack_sup_mode;
1825 bcopy(&int_val, arg, val_size);
1826 break;
1827 }
1828 case IOV_SVAL(IOV_TCPACK_SUPPRESS): {
1829 bcmerror = dhd_tcpack_suppress_set(dhd_pub, (uint8)int_val);
1830 break;
1831 }
1832 #endif /* DHDTCPACK_SUPPRESS */
1833
1834 #ifdef DHD_L2_FILTER
1835 case IOV_GVAL(IOV_DHCP_UNICAST): {
1836 uint32 bssidx;
1837 const char *val;
1838 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1839 DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n",
1840 __FUNCTION__, name));
1841 bcmerror = BCME_BADARG;
1842 break;
1843 }
1844 int_val = dhd_get_dhcp_unicast_status(dhd_pub, bssidx);
1845 memcpy(arg, &int_val, val_size);
1846 break;
1847 }
1848 case IOV_SVAL(IOV_DHCP_UNICAST): {
1849 uint32 bssidx;
1850 const char *val;
1851 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1852 DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n",
1853 __FUNCTION__, name));
1854 bcmerror = BCME_BADARG;
1855 break;
1856 }
1857 memcpy(&int_val, val, sizeof(int_val));
1858 bcmerror = dhd_set_dhcp_unicast_status(dhd_pub, bssidx, int_val ? 1 : 0);
1859 break;
1860 }
1861 case IOV_GVAL(IOV_BLOCK_PING): {
1862 uint32 bssidx;
1863 const char *val;
1864
1865 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1866 DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__));
1867 bcmerror = BCME_BADARG;
1868 break;
1869 }
1870 int_val = dhd_get_block_ping_status(dhd_pub, bssidx);
1871 memcpy(arg, &int_val, val_size);
1872 break;
1873 }
1874 case IOV_SVAL(IOV_BLOCK_PING): {
1875 uint32 bssidx;
1876 const char *val;
1877
1878 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1879 DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__));
1880 bcmerror = BCME_BADARG;
1881 break;
1882 }
1883 memcpy(&int_val, val, sizeof(int_val));
1884 bcmerror = dhd_set_block_ping_status(dhd_pub, bssidx, int_val ? 1 : 0);
1885 break;
1886 }
1887 case IOV_GVAL(IOV_PROXY_ARP): {
1888 uint32 bssidx;
1889 const char *val;
1890
1891 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1892 DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__));
1893 bcmerror = BCME_BADARG;
1894 break;
1895 }
1896 int_val = dhd_get_parp_status(dhd_pub, bssidx);
1897 bcopy(&int_val, arg, val_size);
1898 break;
1899 }
1900 case IOV_SVAL(IOV_PROXY_ARP): {
1901 uint32 bssidx;
1902 const char *val;
1903
1904 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1905 DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__));
1906 bcmerror = BCME_BADARG;
1907 break;
1908 }
1909 bcopy(val, &int_val, sizeof(int_val));
1910
1911 /* Issue a iovar request to WL to update the proxy arp capability bit
1912 * in the Extended Capability IE of beacons/probe responses.
1913 */
1914 bcmerror = dhd_iovar(dhd_pub, bssidx, "proxy_arp_advertise", val, sizeof(int_val),
1915 NULL, 0, TRUE);
1916 if (bcmerror == BCME_OK) {
1917 dhd_set_parp_status(dhd_pub, bssidx, int_val ? 1 : 0);
1918 }
1919 break;
1920 }
1921 case IOV_GVAL(IOV_GRAT_ARP): {
1922 uint32 bssidx;
1923 const char *val;
1924
1925 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1926 DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__));
1927 bcmerror = BCME_BADARG;
1928 break;
1929 }
1930 int_val = dhd_get_grat_arp_status(dhd_pub, bssidx);
1931 memcpy(arg, &int_val, val_size);
1932 break;
1933 }
1934 case IOV_SVAL(IOV_GRAT_ARP): {
1935 uint32 bssidx;
1936 const char *val;
1937
1938 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1939 DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__));
1940 bcmerror = BCME_BADARG;
1941 break;
1942 }
1943 memcpy(&int_val, val, sizeof(int_val));
1944 bcmerror = dhd_set_grat_arp_status(dhd_pub, bssidx, int_val ? 1 : 0);
1945 break;
1946 }
1947 case IOV_GVAL(IOV_BLOCK_TDLS): {
1948 uint32 bssidx;
1949 const char *val;
1950
1951 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1952 DHD_ERROR(("%s: IOV_BLOCK_TDLS: bad parameter\n", __FUNCTION__));
1953 bcmerror = BCME_BADARG;
1954 break;
1955 }
1956 int_val = dhd_get_block_tdls_status(dhd_pub, bssidx);
1957 memcpy(arg, &int_val, val_size);
1958 break;
1959 }
1960 case IOV_SVAL(IOV_BLOCK_TDLS): {
1961 uint32 bssidx;
1962 const char *val;
1963
1964 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1965 DHD_ERROR(("%s: IOV_BLOCK_TDLS: bad parameter\n", __FUNCTION__));
1966 bcmerror = BCME_BADARG;
1967 break;
1968 }
1969 memcpy(&int_val, val, sizeof(int_val));
1970 bcmerror = dhd_set_block_tdls_status(dhd_pub, bssidx, int_val ? 1 : 0);
1971 break;
1972 }
1973 #endif /* DHD_L2_FILTER */
1974 case IOV_SVAL(IOV_DHD_IE): {
1975 uint32 bssidx;
1976 const char *val;
1977
1978 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1979 DHD_ERROR(("%s: dhd ie: bad parameter\n", __FUNCTION__));
1980 bcmerror = BCME_BADARG;
1981 break;
1982 }
1983
1984 break;
1985 }
1986 case IOV_GVAL(IOV_AP_ISOLATE): {
1987 uint32 bssidx;
1988 const char *val;
1989
1990 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
1991 DHD_ERROR(("%s: ap isoalate: bad parameter\n", __FUNCTION__));
1992 bcmerror = BCME_BADARG;
1993 break;
1994 }
1995
1996 int_val = dhd_get_ap_isolate(dhd_pub, bssidx);
1997 bcopy(&int_val, arg, val_size);
1998 break;
1999 }
2000 case IOV_SVAL(IOV_AP_ISOLATE): {
2001 uint32 bssidx;
2002 const char *val;
2003
2004 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
2005 DHD_ERROR(("%s: ap isolate: bad parameter\n", __FUNCTION__));
2006 bcmerror = BCME_BADARG;
2007 break;
2008 }
2009
2010 ASSERT(val);
2011 bcopy(val, &int_val, sizeof(uint32));
2012 dhd_set_ap_isolate(dhd_pub, bssidx, int_val);
2013 break;
2014 }
2015 #ifdef DHD_PSTA
2016 case IOV_GVAL(IOV_PSTA): {
2017 int_val = dhd_get_psta_mode(dhd_pub);
2018 bcopy(&int_val, arg, val_size);
2019 break;
2020 }
2021 case IOV_SVAL(IOV_PSTA): {
2022 if (int_val >= DHD_MODE_PSTA_DISABLED && int_val <= DHD_MODE_PSR) {
2023 dhd_set_psta_mode(dhd_pub, int_val);
2024 } else {
2025 bcmerror = BCME_RANGE;
2026 }
2027 break;
2028 }
2029 #endif /* DHD_PSTA */
2030 #ifdef DHD_WET
2031 case IOV_GVAL(IOV_WET):
2032 int_val = dhd_get_wet_mode(dhd_pub);
2033 bcopy(&int_val, arg, val_size);
2034 break;
2035
2036 case IOV_SVAL(IOV_WET):
2037 if (int_val == 0 || int_val == 1) {
2038 dhd_set_wet_mode(dhd_pub, int_val);
2039 /* Delete the WET DB when disabled */
2040 if (!int_val) {
2041 dhd_wet_sta_delete_list(dhd_pub);
2042 }
2043 } else {
2044 bcmerror = BCME_RANGE;
2045 }
2046 break;
2047 case IOV_SVAL(IOV_WET_HOST_IPV4):
2048 dhd_set_wet_host_ipv4(dhd_pub, params, plen);
2049 break;
2050 case IOV_SVAL(IOV_WET_HOST_MAC):
2051 dhd_set_wet_host_mac(dhd_pub, params, plen);
2052 break;
2053 #endif /* DHD_WET */
2054 #ifdef DHD_MCAST_REGEN
2055 case IOV_GVAL(IOV_MCAST_REGEN_BSS_ENABLE): {
2056 uint32 bssidx;
2057 const char *val;
2058
2059 if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
2060 DHD_ERROR(("%s: mcast_regen_bss_enable: bad parameter\n", __FUNCTION__));
2061 bcmerror = BCME_BADARG;
2062 break;
2063 }
2064
2065 int_val = dhd_get_mcast_regen_bss_enable(dhd_pub, bssidx);
2066 bcopy(&int_val, arg, val_size);
2067 break;
2068 }
2069
2070 case IOV_SVAL(IOV_MCAST_REGEN_BSS_ENABLE): {
2071 uint32 bssidx;
2072 const char *val;
2073
2074 if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
2075 DHD_ERROR(("%s: mcast_regen_bss_enable: bad parameter\n", __FUNCTION__));
2076 bcmerror = BCME_BADARG;
2077 break;
2078 }
2079
2080 ASSERT(val);
2081 bcopy(val, &int_val, sizeof(uint32));
2082 dhd_set_mcast_regen_bss_enable(dhd_pub, bssidx, int_val);
2083 break;
2084 }
2085 #endif /* DHD_MCAST_REGEN */
2086
2087 case IOV_GVAL(IOV_CFG80211_OPMODE): {
2088 int_val = (int32)dhd_pub->op_mode;
2089 bcopy(&int_val, arg, sizeof(int_val));
2090 break;
2091 }
2092 case IOV_SVAL(IOV_CFG80211_OPMODE): {
2093 if (int_val <= 0)
2094 bcmerror = BCME_BADARG;
2095 else
2096 dhd_pub->op_mode = int_val;
2097 break;
2098 }
2099
2100 case IOV_GVAL(IOV_ASSERT_TYPE):
2101 int_val = g_assert_type;
2102 bcopy(&int_val, arg, val_size);
2103 break;
2104
2105 case IOV_SVAL(IOV_ASSERT_TYPE):
2106 g_assert_type = (uint32)int_val;
2107 break;
2108
2109 #if !defined(MACOSX_DHD)
2110 case IOV_GVAL(IOV_LMTEST): {
2111 *(uint32 *)arg = (uint32)lmtest;
2112 break;
2113 }
2114
2115 case IOV_SVAL(IOV_LMTEST): {
2116 uint32 val = *(uint32 *)arg;
2117 if (val > 50)
2118 bcmerror = BCME_BADARG;
2119 else {
2120 lmtest = (uint)val;
2121 DHD_ERROR(("%s: lmtest %s\n",
2122 __FUNCTION__, (lmtest == FALSE)? "OFF" : "ON"));
2123 }
2124 break;
2125 }
2126 #endif // endif
2127
2128 #ifdef SHOW_LOGTRACE
2129 case IOV_GVAL(IOV_DUMP_TRACE_LOG): {
2130 trace_buf_info_t *trace_buf_info = (trace_buf_info_t *)arg;
2131 dhd_dbg_ring_t *dbg_verbose_ring = NULL;
2132
2133 dbg_verbose_ring = dhd_dbg_get_ring_from_ring_id(dhd_pub, FW_VERBOSE_RING_ID);
2134 if (dbg_verbose_ring == NULL) {
2135 DHD_ERROR(("dbg_verbose_ring is NULL\n"));
2136 bcmerror = BCME_UNSUPPORTED;
2137 break;
2138 }
2139
2140 if (trace_buf_info != NULL) {
2141 bzero(trace_buf_info, sizeof(trace_buf_info_t));
2142 dhd_dbg_read_ring_into_trace_buf(dbg_verbose_ring, trace_buf_info);
2143 } else {
2144 DHD_ERROR(("%s: arg is NULL\n", __FUNCTION__));
2145 bcmerror = BCME_NOMEM;
2146 }
2147 break;
2148 }
2149 #endif /* SHOW_LOGTRACE */
2150 case IOV_GVAL(IOV_DONGLE_TRAP_TYPE):
2151 if (dhd_pub->dongle_trap_occured)
2152 int_val = ltoh32(dhd_pub->last_trap_info.type);
2153 else
2154 int_val = 0;
2155 bcopy(&int_val, arg, val_size);
2156 break;
2157
2158 case IOV_GVAL(IOV_DONGLE_TRAP_INFO):
2159 {
2160 struct bcmstrbuf strbuf;
2161 bcm_binit(&strbuf, arg, len);
2162 if (dhd_pub->dongle_trap_occured == FALSE) {
2163 bcm_bprintf(&strbuf, "no trap recorded\n");
2164 break;
2165 }
2166 dhd_bus_dump_trap_info(dhd_pub->bus, &strbuf);
2167 break;
2168 }
2169 #ifdef DHD_DEBUG
2170 #if defined(BCMSDIO) || defined(BCMPCIE)
2171
2172 case IOV_GVAL(IOV_BPADDR):
2173 {
2174 sdreg_t sdreg;
2175 uint32 addr, size;
2176
2177 memcpy(&sdreg, params, sizeof(sdreg));
2178
2179 addr = sdreg.offset;
2180 size = sdreg.func;
2181
2182 bcmerror = dhd_bus_readwrite_bp_addr(dhd_pub, addr, size,
2183 (uint *)&int_val, TRUE);
2184
2185 memcpy(arg, &int_val, sizeof(int32));
2186
2187 break;
2188 }
2189
2190 case IOV_SVAL(IOV_BPADDR):
2191 {
2192 sdreg_t sdreg;
2193 uint32 addr, size;
2194
2195 memcpy(&sdreg, params, sizeof(sdreg));
2196
2197 addr = sdreg.offset;
2198 size = sdreg.func;
2199
2200 bcmerror = dhd_bus_readwrite_bp_addr(dhd_pub, addr, size,
2201 (uint *)&sdreg.value,
2202 FALSE);
2203
2204 break;
2205 }
2206 #endif /* BCMSDIO || BCMPCIE */
2207 #ifdef BCMPCIE
2208 case IOV_SVAL(IOV_FLOW_RING_DEBUG):
2209 {
2210 bcmerror = dhd_flow_ring_debug(dhd_pub, arg, len);
2211 break;
2212 }
2213 #endif /* BCMPCIE */
2214 case IOV_SVAL(IOV_MEM_DEBUG):
2215 if (len > 0) {
2216 bcmerror = dhd_mem_debug(dhd_pub, arg, len - 1);
2217 }
2218 break;
2219 #endif /* DHD_DEBUG */
2220 #if defined(DHD_LOG_DUMP)
2221 case IOV_GVAL(IOV_LOG_DUMP):
2222 {
2223 dhd_prot_debug_info_print(dhd_pub);
2224 dhd_log_dump_trigger(dhd_pub, CMD_DEFAULT);
2225 break;
2226 }
2227 #endif /* DHD_LOG_DUMP */
2228 #ifndef OEM_ANDROID
2229 case IOV_GVAL(IOV_TPUT_TEST):
2230 {
2231 tput_test_t *tput_data = NULL;
2232 if (params && plen >= sizeof(tput_test_t)) {
2233 tput_data = (tput_test_t *)params;
2234 bcmerror = dhd_tput_test(dhd_pub, tput_data);
2235 } else {
2236 DHD_ERROR(("%s: tput test - no input params ! \n", __FUNCTION__));
2237 bcmerror = BCME_BADARG;
2238 }
2239 break;
2240 }
2241 #endif /* OEM_ANDROID */
2242 case IOV_GVAL(IOV_DEBUG_BUF_DEST_STAT):
2243 {
2244 if (dhd_pub->debug_buf_dest_support) {
2245 debug_buf_dest_stat_t *debug_buf_dest_stat =
2246 (debug_buf_dest_stat_t *)arg;
2247 memcpy(debug_buf_dest_stat, dhd_pub->debug_buf_dest_stat,
2248 sizeof(dhd_pub->debug_buf_dest_stat));
2249 } else {
2250 bcmerror = BCME_DISABLED;
2251 }
2252 break;
2253 }
2254
2255 #ifdef DHD_DEBUG
2256 case IOV_SVAL(IOV_INDUCE_ERROR): {
2257 if (int_val >= DHD_INDUCE_ERROR_MAX) {
2258 DHD_ERROR(("%s: Invalid command : %u\n", __FUNCTION__, (uint16)int_val));
2259 } else {
2260 dhd_pub->dhd_induce_error = (uint16)int_val;
2261 }
2262 break;
2263 }
2264 #endif /* DHD_DEBUG */
2265
2266 #ifdef WL_IFACE_MGMT_CONF
2267 #ifdef WL_CFG80211
2268 #ifdef WL_NANP2P
2269 case IOV_GVAL(IOV_CONC_DISC): {
2270 int_val = wl_cfg80211_get_iface_conc_disc(
2271 dhd_linux_get_primary_netdev(dhd_pub));
2272 bcopy(&int_val, arg, sizeof(int_val));
2273 break;
2274 }
2275 case IOV_SVAL(IOV_CONC_DISC): {
2276 bcmerror = wl_cfg80211_set_iface_conc_disc(
2277 dhd_linux_get_primary_netdev(dhd_pub), (uint8)int_val);
2278 break;
2279 }
2280 #endif /* WL_NANP2P */
2281 #ifdef WL_IFACE_MGMT
2282 case IOV_GVAL(IOV_IFACE_POLICY): {
2283 int_val = wl_cfg80211_get_iface_policy(
2284 dhd_linux_get_primary_netdev(dhd_pub));
2285 bcopy(&int_val, arg, sizeof(int_val));
2286 break;
2287 }
2288 case IOV_SVAL(IOV_IFACE_POLICY): {
2289 bcmerror = wl_cfg80211_set_iface_policy(
2290 dhd_linux_get_primary_netdev(dhd_pub),
2291 arg, len);
2292 break;
2293 }
2294 #endif /* WL_IFACE_MGMT */
2295 #endif /* WL_CFG80211 */
2296 #endif /* WL_IFACE_MGMT_CONF */
2297 default:
2298 bcmerror = BCME_UNSUPPORTED;
2299 break;
2300 }
2301
2302 exit:
2303 DHD_TRACE(("%s: actionid %d, bcmerror %d\n", __FUNCTION__, actionid, bcmerror));
2304 return bcmerror;
2305 }
2306
2307 /* Store the status of a connection attempt for later retrieval by an iovar */
2308 void
dhd_store_conn_status(uint32 event,uint32 status,uint32 reason)2309 dhd_store_conn_status(uint32 event, uint32 status, uint32 reason)
2310 {
2311 /* Do not overwrite a WLC_E_PRUNE with a WLC_E_SET_SSID
2312 * because an encryption/rsn mismatch results in both events, and
2313 * the important information is in the WLC_E_PRUNE.
2314 */
2315 if (!(event == WLC_E_SET_SSID && status == WLC_E_STATUS_FAIL &&
2316 dhd_conn_event == WLC_E_PRUNE)) {
2317 dhd_conn_event = event;
2318 dhd_conn_status = status;
2319 dhd_conn_reason = reason;
2320 }
2321 }
2322
2323 bool
dhd_prec_enq(dhd_pub_t * dhdp,struct pktq * q,void * pkt,int prec)2324 dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec)
2325 {
2326 void *p;
2327 int eprec = -1; /* precedence to evict from */
2328 bool discard_oldest;
2329
2330 /* Fast case, precedence queue is not full and we are also not
2331 * exceeding total queue length
2332 */
2333 if (!pktqprec_full(q, prec) && !pktq_full(q)) {
2334 pktq_penq(q, prec, pkt);
2335 return TRUE;
2336 }
2337
2338 /* Determine precedence from which to evict packet, if any */
2339 if (pktqprec_full(q, prec))
2340 eprec = prec;
2341 else if (pktq_full(q)) {
2342 p = pktq_peek_tail(q, &eprec);
2343 ASSERT(p);
2344 if (eprec > prec || eprec < 0)
2345 return FALSE;
2346 }
2347
2348 /* Evict if needed */
2349 if (eprec >= 0) {
2350 /* Detect queueing to unconfigured precedence */
2351 ASSERT(!pktqprec_empty(q, eprec));
2352 discard_oldest = AC_BITMAP_TST(dhdp->wme_dp, eprec);
2353 if (eprec == prec && !discard_oldest)
2354 return FALSE; /* refuse newer (incoming) packet */
2355 /* Evict packet according to discard policy */
2356 p = discard_oldest ? pktq_pdeq(q, eprec) : pktq_pdeq_tail(q, eprec);
2357 ASSERT(p);
2358 #ifdef DHDTCPACK_SUPPRESS
2359 if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) {
2360 DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n",
2361 __FUNCTION__, __LINE__));
2362 dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
2363 }
2364 #endif /* DHDTCPACK_SUPPRESS */
2365 PKTFREE(dhdp->osh, p, TRUE);
2366 }
2367
2368 /* Enqueue */
2369 p = pktq_penq(q, prec, pkt);
2370 ASSERT(p);
2371
2372 return TRUE;
2373 }
2374
2375 /*
2376 * Functions to drop proper pkts from queue:
2377 * If one pkt in queue is non-fragmented, drop first non-fragmented pkt only
2378 * If all pkts in queue are all fragmented, find and drop one whole set fragmented pkts
2379 * If can't find pkts matching upper 2 cases, drop first pkt anyway
2380 */
2381 bool
dhd_prec_drop_pkts(dhd_pub_t * dhdp,struct pktq * pq,int prec,f_droppkt_t fn)2382 dhd_prec_drop_pkts(dhd_pub_t *dhdp, struct pktq *pq, int prec, f_droppkt_t fn)
2383 {
2384 struct pktq_prec *q = NULL;
2385 void *p, *prev = NULL, *next = NULL, *first = NULL, *last = NULL, *prev_first = NULL;
2386 pkt_frag_t frag_info;
2387
2388 ASSERT(dhdp && pq);
2389 ASSERT(prec >= 0 && prec < pq->num_prec);
2390
2391 q = &pq->q[prec];
2392 p = q->head;
2393
2394 if (p == NULL)
2395 return FALSE;
2396
2397 while (p) {
2398 frag_info = pkt_frag_info(dhdp->osh, p);
2399 if (frag_info == DHD_PKT_FRAG_NONE) {
2400 break;
2401 } else if (frag_info == DHD_PKT_FRAG_FIRST) {
2402 if (first) {
2403 /* No last frag pkt, use prev as last */
2404 last = prev;
2405 break;
2406 } else {
2407 first = p;
2408 prev_first = prev;
2409 }
2410 } else if (frag_info == DHD_PKT_FRAG_LAST) {
2411 if (first) {
2412 last = p;
2413 break;
2414 }
2415 }
2416
2417 prev = p;
2418 p = PKTLINK(p);
2419 }
2420
2421 if ((p == NULL) || ((frag_info != DHD_PKT_FRAG_NONE) && !(first && last))) {
2422 /* Not found matching pkts, use oldest */
2423 prev = NULL;
2424 p = q->head;
2425 frag_info = 0;
2426 }
2427
2428 if (frag_info == DHD_PKT_FRAG_NONE) {
2429 first = last = p;
2430 prev_first = prev;
2431 }
2432
2433 p = first;
2434 while (p) {
2435 next = PKTLINK(p);
2436 q->n_pkts--;
2437 pq->n_pkts_tot--;
2438
2439 #ifdef WL_TXQ_STALL
2440 q->dequeue_count++;
2441 #endif // endif
2442
2443 PKTSETLINK(p, NULL);
2444
2445 if (fn)
2446 fn(dhdp, prec, p, TRUE);
2447
2448 if (p == last)
2449 break;
2450
2451 p = next;
2452 }
2453
2454 if (prev_first == NULL) {
2455 if ((q->head = next) == NULL)
2456 q->tail = NULL;
2457 } else {
2458 PKTSETLINK(prev_first, next);
2459 if (!next)
2460 q->tail = prev_first;
2461 }
2462
2463 return TRUE;
2464 }
2465
2466 static int
dhd_iovar_op(dhd_pub_t * dhd_pub,const char * name,void * params,int plen,void * arg,int len,bool set)2467 dhd_iovar_op(dhd_pub_t *dhd_pub, const char *name,
2468 void *params, int plen, void *arg, int len, bool set)
2469 {
2470 int bcmerror = 0;
2471 int val_size;
2472 const bcm_iovar_t *vi = NULL;
2473 uint32 actionid;
2474
2475 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2476
2477 ASSERT(name);
2478 ASSERT(len >= 0);
2479
2480 /* Get MUST have return space */
2481 ASSERT(set || (arg && len));
2482
2483 /* Set does NOT take qualifiers */
2484 ASSERT(!set || (!params && !plen));
2485
2486 if ((vi = bcm_iovar_lookup(dhd_iovars, name)) == NULL) {
2487 bcmerror = BCME_UNSUPPORTED;
2488 goto exit;
2489 }
2490
2491 DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
2492 name, (set ? "set" : "get"), len, plen));
2493
2494 /* set up 'params' pointer in case this is a set command so that
2495 * the convenience int and bool code can be common to set and get
2496 */
2497 if (params == NULL) {
2498 params = arg;
2499 plen = len;
2500 }
2501
2502 if (vi->type == IOVT_VOID)
2503 val_size = 0;
2504 else if (vi->type == IOVT_BUFFER)
2505 val_size = len;
2506 else
2507 /* all other types are integer sized */
2508 val_size = sizeof(int);
2509
2510 actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
2511
2512 bcmerror = dhd_doiovar(dhd_pub, vi, actionid, name, params, plen, arg, len, val_size);
2513
2514 exit:
2515 return bcmerror;
2516 }
2517
2518 int
dhd_ioctl(dhd_pub_t * dhd_pub,dhd_ioctl_t * ioc,void * buf,uint buflen)2519 dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void *buf, uint buflen)
2520 {
2521 int bcmerror = 0;
2522 unsigned long flags;
2523
2524 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2525
2526 if (!buf) {
2527 return BCME_BADARG;
2528 }
2529
2530 dhd_os_dhdiovar_lock(dhd_pub);
2531 switch (ioc->cmd) {
2532 case DHD_GET_MAGIC:
2533 if (buflen < sizeof(int))
2534 bcmerror = BCME_BUFTOOSHORT;
2535 else
2536 *(int*)buf = DHD_IOCTL_MAGIC;
2537 break;
2538
2539 case DHD_GET_VERSION:
2540 if (buflen < sizeof(int))
2541 bcmerror = BCME_BUFTOOSHORT;
2542 else
2543 *(int*)buf = DHD_IOCTL_VERSION;
2544 break;
2545
2546 case DHD_GET_VAR:
2547 case DHD_SET_VAR:
2548 {
2549 char *arg;
2550 uint arglen;
2551
2552 DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
2553 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub) &&
2554 bcmstricmp((char *)buf, "devreset")) {
2555 /* In platforms like FC19, the FW download is done via IOCTL
2556 * and should not return error for IOCTLs fired before FW
2557 * Download is done
2558 */
2559 if (dhd_fw_download_status(dhd_pub) == FW_DOWNLOAD_DONE) {
2560 DHD_ERROR(("%s: returning as busstate=%d\n",
2561 __FUNCTION__, dhd_pub->busstate));
2562 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
2563 dhd_os_dhdiovar_unlock(dhd_pub);
2564 return -ENODEV;
2565 }
2566 }
2567 DHD_BUS_BUSY_SET_IN_DHD_IOVAR(dhd_pub);
2568 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
2569
2570 #ifdef DHD_PCIE_RUNTIMEPM
2571 dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_ioctl);
2572 #endif /* DHD_PCIE_RUNTIMEPM */
2573
2574 DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
2575 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd_pub)) {
2576 /* If Suspend/Resume is tested via pcie_suspend IOVAR
2577 * then continue to execute the IOVAR, return from here for
2578 * other IOVARs, also include pciecfgreg and devreset to go
2579 * through.
2580 */
2581 if (bcmstricmp((char *)buf, "pcie_suspend") &&
2582 bcmstricmp((char *)buf, "pciecfgreg") &&
2583 bcmstricmp((char *)buf, "devreset") &&
2584 bcmstricmp((char *)buf, "sdio_suspend")) {
2585 DHD_ERROR(("%s: bus is in suspend(%d)"
2586 "or suspending(0x%x) state\n",
2587 __FUNCTION__, dhd_pub->busstate,
2588 dhd_pub->dhd_bus_busy_state));
2589 DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
2590 dhd_os_busbusy_wake(dhd_pub);
2591 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
2592 dhd_os_dhdiovar_unlock(dhd_pub);
2593 return -ENODEV;
2594 }
2595 }
2596 /* During devreset ioctl, we call dhdpcie_advertise_bus_cleanup,
2597 * which will wait for all the busy contexts to get over for
2598 * particular time and call ASSERT if timeout happens. As during
2599 * devreset ioctal, we made DHD_BUS_BUSY_SET_IN_DHD_IOVAR,
2600 * to avoid ASSERT, clear the IOCTL busy state. "devreset" ioctl is
2601 * not used in Production platforms but only used in FC19 setups.
2602 */
2603 if (!bcmstricmp((char *)buf, "devreset") ||
2604 #ifdef BCMPCIE
2605 (dhd_bus_is_multibp_capable(dhd_pub->bus) &&
2606 !bcmstricmp((char *)buf, "dwnldstate")) ||
2607 #endif /* BCMPCIE */
2608 FALSE)
2609 {
2610 DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
2611 }
2612 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
2613
2614 /* scan past the name to any arguments */
2615 for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--)
2616 ;
2617
2618 if (*arg) {
2619 bcmerror = BCME_BUFTOOSHORT;
2620 goto unlock_exit;
2621 }
2622
2623 /* account for the NUL terminator */
2624 arg++, arglen--;
2625 /* call with the appropriate arguments */
2626 if (ioc->cmd == DHD_GET_VAR) {
2627 bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen,
2628 buf, buflen, IOV_GET);
2629 } else {
2630 bcmerror = dhd_iovar_op(dhd_pub, buf, NULL, 0,
2631 arg, arglen, IOV_SET);
2632 }
2633 if (bcmerror != BCME_UNSUPPORTED) {
2634 goto unlock_exit;
2635 }
2636
2637 /* not in generic table, try protocol module */
2638 if (ioc->cmd == DHD_GET_VAR) {
2639 bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg,
2640 arglen, buf, buflen, IOV_GET);
2641 } else {
2642 bcmerror = dhd_prot_iovar_op(dhd_pub, buf,
2643 NULL, 0, arg, arglen, IOV_SET);
2644 }
2645 if (bcmerror != BCME_UNSUPPORTED) {
2646 goto unlock_exit;
2647 }
2648
2649 /* if still not found, try bus module */
2650 if (ioc->cmd == DHD_GET_VAR) {
2651 bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
2652 arg, arglen, buf, buflen, IOV_GET);
2653 } else {
2654 bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
2655 NULL, 0, arg, arglen, IOV_SET);
2656 }
2657 if (bcmerror != BCME_UNSUPPORTED) {
2658 goto unlock_exit;
2659 }
2660
2661 }
2662 goto unlock_exit;
2663
2664 default:
2665 bcmerror = BCME_UNSUPPORTED;
2666 }
2667 dhd_os_dhdiovar_unlock(dhd_pub);
2668 return bcmerror;
2669
2670 unlock_exit:
2671 DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
2672 DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
2673 dhd_os_busbusy_wake(dhd_pub);
2674 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
2675 dhd_os_dhdiovar_unlock(dhd_pub);
2676 return bcmerror;
2677 }
2678
2679 #ifdef SHOW_EVENTS
2680
2681 static void
wl_show_host_event(dhd_pub_t * dhd_pub,wl_event_msg_t * event,void * event_data,void * raw_event_ptr,char * eventmask)2682 wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data,
2683 void *raw_event_ptr, char *eventmask)
2684 {
2685 uint i, status, reason;
2686 bool group = FALSE, flush_txq = FALSE, link = FALSE;
2687 bool host_data = FALSE; /* prints event data after the case when set */
2688 const char *auth_str;
2689 const char *event_name;
2690 uchar *buf;
2691 char err_msg[256], eabuf[ETHER_ADDR_STR_LEN];
2692 uint event_type, flags, auth_type, datalen;
2693
2694 event_type = ntoh32(event->event_type);
2695 flags = ntoh16(event->flags);
2696 status = ntoh32(event->status);
2697 reason = ntoh32(event->reason);
2698 BCM_REFERENCE(reason);
2699 auth_type = ntoh32(event->auth_type);
2700 datalen = ntoh32(event->datalen);
2701
2702 /* debug dump of event messages */
2703 snprintf(eabuf, sizeof(eabuf), MACDBG, MAC2STRDBG(event->addr.octet));
2704
2705 event_name = bcmevent_get_name(event_type);
2706 BCM_REFERENCE(event_name);
2707
2708 if (flags & WLC_EVENT_MSG_LINK)
2709 link = TRUE;
2710 if (flags & WLC_EVENT_MSG_GROUP)
2711 group = TRUE;
2712 if (flags & WLC_EVENT_MSG_FLUSHTXQ)
2713 flush_txq = TRUE;
2714
2715 switch (event_type) {
2716 case WLC_E_START:
2717 case WLC_E_DEAUTH:
2718 case WLC_E_DISASSOC:
2719 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
2720 break;
2721
2722 case WLC_E_ASSOC_IND:
2723 case WLC_E_REASSOC_IND:
2724
2725 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
2726
2727 break;
2728
2729 case WLC_E_ASSOC:
2730 case WLC_E_REASSOC:
2731 if (status == WLC_E_STATUS_SUCCESS) {
2732 DHD_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n", event_name, eabuf));
2733 } else if (status == WLC_E_STATUS_TIMEOUT) {
2734 DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf));
2735 } else if (status == WLC_E_STATUS_FAIL) {
2736 DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, status %d reason %d\n",
2737 event_name, eabuf, (int)status, (int)reason));
2738 } else {
2739 DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n",
2740 event_name, eabuf, (int)status));
2741 }
2742
2743 break;
2744
2745 case WLC_E_DEAUTH_IND:
2746 case WLC_E_DISASSOC_IND:
2747 DHD_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name, eabuf, (int)reason));
2748 break;
2749
2750 case WLC_E_AUTH:
2751 case WLC_E_AUTH_IND:
2752 if (auth_type == DOT11_OPEN_SYSTEM)
2753 auth_str = "Open System";
2754 else if (auth_type == DOT11_SHARED_KEY)
2755 auth_str = "Shared Key";
2756 else if (auth_type == DOT11_SAE)
2757 auth_str = "SAE";
2758 else {
2759 snprintf(err_msg, sizeof(err_msg), "AUTH unknown: %d", (int)auth_type);
2760 auth_str = err_msg;
2761 }
2762
2763 if (event_type == WLC_E_AUTH_IND) {
2764 DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name, eabuf, auth_str));
2765 } else if (status == WLC_E_STATUS_SUCCESS) {
2766 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n",
2767 event_name, eabuf, auth_str));
2768 } else if (status == WLC_E_STATUS_TIMEOUT) {
2769 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
2770 event_name, eabuf, auth_str));
2771 } else if (status == WLC_E_STATUS_FAIL) {
2772 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, status %d reason %d\n",
2773 event_name, eabuf, auth_str, (int)status, (int)reason));
2774 } else if (status == WLC_E_STATUS_NO_ACK) {
2775 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, NOACK\n",
2776 event_name, eabuf, auth_str));
2777 } else {
2778 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, status %d reason %d\n",
2779 event_name, eabuf, auth_str, (int)status, (int)reason));
2780 }
2781 BCM_REFERENCE(auth_str);
2782
2783 break;
2784
2785 case WLC_E_JOIN:
2786 case WLC_E_ROAM:
2787 case WLC_E_SET_SSID:
2788 if (status == WLC_E_STATUS_SUCCESS) {
2789 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
2790 } else {
2791 if (status == WLC_E_STATUS_FAIL) {
2792 DHD_EVENT(("MACEVENT: %s, failed status %d\n", event_name, status));
2793 } else if (status == WLC_E_STATUS_NO_NETWORKS) {
2794 DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name));
2795 } else {
2796 DHD_EVENT(("MACEVENT: %s, unexpected status %d\n",
2797 event_name, (int)status));
2798 }
2799 }
2800 break;
2801
2802 case WLC_E_BEACON_RX:
2803 if (status == WLC_E_STATUS_SUCCESS) {
2804 DHD_EVENT(("MACEVENT: %s, SUCCESS\n", event_name));
2805 } else if (status == WLC_E_STATUS_FAIL) {
2806 DHD_EVENT(("MACEVENT: %s, FAIL\n", event_name));
2807 } else {
2808 DHD_EVENT(("MACEVENT: %s, status %d\n", event_name, status));
2809 }
2810 break;
2811
2812 case WLC_E_LINK:
2813 DHD_EVENT(("MACEVENT: %s %s flags:0x%x status:%d\n",
2814 event_name, link?"UP":"DOWN", flags, status));
2815 BCM_REFERENCE(link);
2816 break;
2817
2818 case WLC_E_MIC_ERROR:
2819 DHD_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n",
2820 event_name, eabuf, group, flush_txq));
2821 BCM_REFERENCE(group);
2822 BCM_REFERENCE(flush_txq);
2823 break;
2824
2825 case WLC_E_ICV_ERROR:
2826 case WLC_E_UNICAST_DECODE_ERROR:
2827 case WLC_E_MULTICAST_DECODE_ERROR:
2828 DHD_EVENT(("MACEVENT: %s, MAC %s\n",
2829 event_name, eabuf));
2830 break;
2831
2832 case WLC_E_TXFAIL:
2833 DHD_EVENT(("MACEVENT: %s, RA %s status %d\n", event_name, eabuf, status));
2834 break;
2835
2836 case WLC_E_ASSOC_REQ_IE:
2837 case WLC_E_ASSOC_RESP_IE:
2838 case WLC_E_PMKID_CACHE:
2839 DHD_EVENT(("MACEVENT: %s\n", event_name));
2840 break;
2841
2842 case WLC_E_SCAN_COMPLETE:
2843 DHD_EVENT(("MACEVENT: %s\n", event_name));
2844 break;
2845
2846 case WLC_E_IND_DOS_STATUS:
2847 DHD_EVENT(("MACEVENT: %s\n", event_name));
2848 break;
2849
2850 case WLC_E_RSSI_LQM:
2851 case WLC_E_PFN_NET_FOUND:
2852 case WLC_E_PFN_NET_LOST:
2853 case WLC_E_PFN_SCAN_COMPLETE:
2854 case WLC_E_PFN_SCAN_NONE:
2855 case WLC_E_PFN_SCAN_ALLGONE:
2856 case WLC_E_PFN_GSCAN_FULL_RESULT:
2857 case WLC_E_PFN_SSID_EXT:
2858 DHD_EVENT(("PNOEVENT: %s\n", event_name));
2859 break;
2860
2861 case WLC_E_PFN_SCAN_BACKOFF:
2862 case WLC_E_PFN_BSSID_SCAN_BACKOFF:
2863 DHD_EVENT(("PNOEVENT: %s, status %d, reason %d\n",
2864 event_name, (int)status, (int)reason));
2865 break;
2866
2867 case WLC_E_PSK_SUP:
2868 case WLC_E_PRUNE:
2869 DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n",
2870 event_name, (int)status, (int)reason));
2871 break;
2872
2873 #ifdef WIFI_ACT_FRAME
2874 case WLC_E_ACTION_FRAME:
2875 DHD_TRACE(("MACEVENT: %s Bssid %s\n", event_name, eabuf));
2876 break;
2877 #endif /* WIFI_ACT_FRAME */
2878
2879 #ifdef SHOW_LOGTRACE
2880 case WLC_E_TRACE:
2881 {
2882 dhd_dbg_trace_evnt_handler(dhd_pub, event_data, raw_event_ptr, datalen);
2883 break;
2884 }
2885 #endif /* SHOW_LOGTRACE */
2886
2887 case WLC_E_RSSI:
2888 DHD_EVENT(("MACEVENT: %s %d\n", event_name, ntoh32(*((int *)event_data))));
2889 break;
2890
2891 case WLC_E_SERVICE_FOUND:
2892 case WLC_E_P2PO_ADD_DEVICE:
2893 case WLC_E_P2PO_DEL_DEVICE:
2894 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
2895 break;
2896
2897 #ifdef BT_WIFI_HANDOBER
2898 case WLC_E_BT_WIFI_HANDOVER_REQ:
2899 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
2900 break;
2901 #endif // endif
2902
2903 case WLC_E_CCA_CHAN_QUAL:
2904 if (datalen) {
2905 cca_chan_qual_event_t *cca_event = (cca_chan_qual_event_t *)event_data;
2906 if (cca_event->id == WL_CHAN_QUAL_FULLPM_CCA) {
2907 cca_only_chan_qual_event_t *cca_only_event =
2908 (cca_only_chan_qual_event_t *)cca_event;
2909 BCM_REFERENCE(cca_only_event);
2910 DHD_EVENT((
2911 "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
2912 " channel 0x%02x\n",
2913 event_name, event_type, eabuf, (int)status,
2914 (int)reason, (int)auth_type, cca_event->chanspec));
2915 DHD_EVENT((
2916 "\tTOTAL (dur %dms me %dms notme %dms interf %dms"
2917 " ts 0x%08x)\n",
2918 cca_only_event->cca_busy_ext.duration,
2919 cca_only_event->cca_busy_ext.congest_ibss,
2920 cca_only_event->cca_busy_ext.congest_obss,
2921 cca_only_event->cca_busy_ext.interference,
2922 cca_only_event->cca_busy_ext.timestamp));
2923 DHD_EVENT((
2924 "\t !PM (dur %dms me %dms notme %dms interf %dms)\n",
2925 cca_only_event->cca_busy_nopm.duration,
2926 cca_only_event->cca_busy_nopm.congest_ibss,
2927 cca_only_event->cca_busy_nopm.congest_obss,
2928 cca_only_event->cca_busy_nopm.interference));
2929 DHD_EVENT((
2930 "\t PM (dur %dms me %dms notme %dms interf %dms)\n",
2931 cca_only_event->cca_busy_pm.duration,
2932 cca_only_event->cca_busy_pm.congest_ibss,
2933 cca_only_event->cca_busy_pm.congest_obss,
2934 cca_only_event->cca_busy_pm.interference));
2935 } else if (cca_event->id == WL_CHAN_QUAL_FULL_CCA) {
2936 DHD_EVENT((
2937 "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
2938 " channel 0x%02x (dur %dms ibss %dms obss %dms interf %dms"
2939 " ts 0x%08x)\n",
2940 event_name, event_type, eabuf, (int)status,
2941 (int)reason, (int)auth_type, cca_event->chanspec,
2942 cca_event->cca_busy_ext.duration,
2943 cca_event->cca_busy_ext.congest_ibss,
2944 cca_event->cca_busy_ext.congest_obss,
2945 cca_event->cca_busy_ext.interference,
2946 cca_event->cca_busy_ext.timestamp));
2947 } else if (cca_event->id == WL_CHAN_QUAL_CCA) {
2948 DHD_EVENT((
2949 "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
2950 " channel 0x%02x (dur %dms busy %dms ts 0x%08x)\n",
2951 event_name, event_type, eabuf, (int)status,
2952 (int)reason, (int)auth_type, cca_event->chanspec,
2953 cca_event->cca_busy.duration,
2954 cca_event->cca_busy.congest,
2955 cca_event->cca_busy.timestamp));
2956 } else if ((cca_event->id == WL_CHAN_QUAL_NF) ||
2957 (cca_event->id == WL_CHAN_QUAL_NF_LTE)) {
2958 DHD_EVENT((
2959 "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
2960 " channel 0x%02x (NF[%d] %ddB)\n",
2961 event_name, event_type, eabuf, (int)status,
2962 (int)reason, (int)auth_type, cca_event->chanspec,
2963 cca_event->id, cca_event->noise));
2964 } else {
2965 DHD_EVENT((
2966 "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
2967 " channel 0x%02x (unknown ID %d)\n",
2968 event_name, event_type, eabuf, (int)status,
2969 (int)reason, (int)auth_type, cca_event->chanspec,
2970 cca_event->id));
2971 }
2972 }
2973 break;
2974 case WLC_E_ESCAN_RESULT:
2975 {
2976 wl_escan_result_v2_t *escan_result =
2977 (wl_escan_result_v2_t *)event_data;
2978 BCM_REFERENCE(escan_result);
2979 #ifdef OEM_ANDROID
2980 if ((status == WLC_E_STATUS_SUCCESS) || (status == WLC_E_STATUS_ABORT)) {
2981 DHD_EVENT(("MACEVENT: %s %d, status %d sync-id %u\n",
2982 event_name, event_type, (int)status,
2983 dtoh16(escan_result->sync_id)));
2984 } else {
2985 DHD_TRACE(("MACEVENT: %s %d, MAC %s, status %d \n",
2986 event_name, event_type, eabuf, (int)status));
2987 }
2988 #else
2989 DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d sync-id %u\n",
2990 event_name, event_type, eabuf, (int)status, dtoh16(escan_result->sync_id)));
2991 #endif // endif
2992
2993 break;
2994 }
2995 case WLC_E_IF:
2996 {
2997 struct wl_event_data_if *ifevent = (struct wl_event_data_if *)event_data;
2998 BCM_REFERENCE(ifevent);
2999
3000 DHD_EVENT(("MACEVENT: %s, opcode:0x%d ifidx:%d role:%d\n",
3001 event_name, ifevent->opcode, ifevent->ifidx, ifevent->role));
3002 break;
3003 }
3004 #ifdef SHOW_LOGTRACE
3005 case WLC_E_MSCH:
3006 {
3007 wl_mschdbg_event_handler(dhd_pub, raw_event_ptr, reason, event_data, datalen);
3008 break;
3009 }
3010 #endif /* SHOW_LOGTRACE */
3011
3012 case WLC_E_PSK_AUTH:
3013 DHD_EVENT(("MACEVENT: %s, RA %s status %d Reason:%d\n",
3014 event_name, eabuf, status, reason));
3015 break;
3016 case WLC_E_AGGR_EVENT:
3017 {
3018 event_aggr_data_t *aggrbuf = event_data;
3019 int j = 0, len = 0;
3020 uint8 *data = aggrbuf->data;
3021 DHD_EVENT(("MACEVENT: %s, num of events %d total len %d sub events: ",
3022 event_name, aggrbuf->num_events, aggrbuf->len));
3023 for (j = 0; j < aggrbuf->num_events; j++)
3024 {
3025 wl_event_msg_t * sub_event = (wl_event_msg_t *)data;
3026 if (len > aggrbuf->len) {
3027 DHD_ERROR(("%s: Aggr events corrupted!",
3028 __FUNCTION__));
3029 break;
3030 }
3031 DHD_EVENT(("\n Event type: %d ", ntoh32(sub_event->event_type)));
3032 len += ALIGN_SIZE((ntoh32(sub_event->datalen) +
3033 sizeof(wl_event_msg_t)), sizeof(uint64));
3034 buf = (uchar *)(data + sizeof(wl_event_msg_t));
3035 BCM_REFERENCE(buf);
3036 DHD_EVENT((" data (%d) : ", ntoh32(sub_event->datalen)));
3037 for (i = 0; i < ntoh32(sub_event->datalen); i++) {
3038 DHD_EVENT((" 0x%02x ", buf[i]));
3039 }
3040 data = aggrbuf->data + len;
3041 }
3042 DHD_EVENT(("\n"));
3043 }
3044 break;
3045 case WLC_E_NAN_CRITICAL:
3046 {
3047 DHD_LOG_MEM(("MACEVENT: %s, type:%d\n", event_name, reason));
3048 break;
3049 }
3050 case WLC_E_NAN_NON_CRITICAL:
3051 {
3052 DHD_TRACE(("MACEVENT: %s, type:%d\n", event_name, reason));
3053 break;
3054 }
3055 case WLC_E_PROXD:
3056 {
3057 wl_proxd_event_t *proxd = (wl_proxd_event_t*)event_data;
3058 DHD_LOG_MEM(("MACEVENT: %s, event:%d, status:%d\n",
3059 event_name, proxd->type, reason));
3060 break;
3061 }
3062 case WLC_E_RPSNOA:
3063 {
3064 rpsnoa_stats_t *stat = event_data;
3065 if (datalen == sizeof(*stat)) {
3066 DHD_EVENT(("MACEVENT: %s, band %s, status %d, pps %d\n", event_name,
3067 (stat->band == WLC_BAND_2G) ? "2G":"5G",
3068 stat->state, stat->last_pps));
3069 }
3070 break;
3071 }
3072 case WLC_E_PHY_CAL:
3073 {
3074 DHD_EVENT(("MACEVENT: %s, reason:%d\n", event_name, reason));
3075 break;
3076 }
3077 case WLC_E_WA_LQM:
3078 {
3079 wl_event_wa_lqm_t *event_wa_lqm = (wl_event_wa_lqm_t *)event_data;
3080 bcm_xtlv_t *subevent;
3081 wl_event_wa_lqm_basic_t *elqm_basic;
3082
3083 if ((event_wa_lqm->ver != WL_EVENT_WA_LQM_VER) ||
3084 (event_wa_lqm->len < sizeof(wl_event_wa_lqm_t) + BCM_XTLV_HDR_SIZE)) {
3085 DHD_ERROR(("MACEVENT: %s invalid (ver=%d len=%d)\n",
3086 event_name, event_wa_lqm->ver, event_wa_lqm->len));
3087 break;
3088 }
3089
3090 subevent = (bcm_xtlv_t *)event_wa_lqm->subevent;
3091 if ((subevent->id != WL_EVENT_WA_LQM_BASIC) ||
3092 (subevent->len < sizeof(wl_event_wa_lqm_basic_t))) {
3093 DHD_ERROR(("MACEVENT: %s invalid sub-type (id=%d len=%d)\n",
3094 event_name, subevent->id, subevent->len));
3095 break;
3096 }
3097
3098 elqm_basic = (wl_event_wa_lqm_basic_t *)subevent->data;
3099 BCM_REFERENCE(elqm_basic);
3100 DHD_EVENT(("MACEVENT: %s (RSSI=%d SNR=%d TxRate=%d RxRate=%d)\n",
3101 event_name, elqm_basic->rssi, elqm_basic->snr,
3102 elqm_basic->tx_rate, elqm_basic->rx_rate));
3103 break;
3104 }
3105 default:
3106 DHD_INFO(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
3107 event_name, event_type, eabuf, (int)status, (int)reason,
3108 (int)auth_type));
3109 break;
3110 }
3111
3112 /* show any appended data if message level is set to bytes or host_data is set */
3113 if ((DHD_BYTES_ON() || (host_data == TRUE)) && DHD_EVENT_ON() && datalen) {
3114 buf = (uchar *) event_data;
3115 BCM_REFERENCE(buf);
3116 DHD_EVENT((" data (%d) : ", datalen));
3117 for (i = 0; i < datalen; i++) {
3118 DHD_EVENT((" 0x%02x ", buf[i]));
3119 }
3120 DHD_EVENT(("\n"));
3121 }
3122 } /* wl_show_host_event */
3123 #endif /* SHOW_EVENTS */
3124
3125 #ifdef DNGL_EVENT_SUPPORT
3126 /* Check whether packet is a BRCM dngl event pkt. If it is, process event data. */
3127 int
dngl_host_event(dhd_pub_t * dhdp,void * pktdata,bcm_dngl_event_msg_t * dngl_event,size_t pktlen)3128 dngl_host_event(dhd_pub_t *dhdp, void *pktdata, bcm_dngl_event_msg_t *dngl_event, size_t pktlen)
3129 {
3130 bcm_dngl_event_t *pvt_data = (bcm_dngl_event_t *)pktdata;
3131
3132 dngl_host_event_process(dhdp, pvt_data, dngl_event, pktlen);
3133 return BCME_OK;
3134 }
3135
3136 #ifdef PARSE_DONGLE_HOST_EVENT
3137 typedef struct hck_id_to_str_s {
3138 uint32 id;
3139 char *name;
3140 } hck_id_to_str_t;
3141
3142 hck_id_to_str_t hck_sw_id_to_str[] = {
3143 {WL_HC_DD_PCIE, "WL_HC_DD_PCIE"},
3144 {WL_HC_DD_RX_DMA_STALL, "WL_HC_DD_RX_DMA_STALL"},
3145 {WL_HC_DD_RX_STALL, "WL_HC_DD_RX_STALL"},
3146 {WL_HC_DD_TX_STALL, "WL_HC_DD_TX_STALL"},
3147 {WL_HC_DD_SCAN_STALL, "WL_HC_DD_SCAN_STALL"},
3148 {WL_HC_DD_PHY, "WL_HC_DD_PHY"},
3149 {WL_HC_DD_REINIT, "WL_HC_DD_REINIT"},
3150 {WL_HC_DD_TXQ_STALL, "WL_HC_DD_TXQ_STALL"},
3151 {0, NULL}
3152 };
3153
3154 hck_id_to_str_t hck_pcie_module_to_str[] = {
3155 {HEALTH_CHECK_PCIEDEV_INDUCED_IND, "PCIEDEV_INDUCED_IND"},
3156 {HEALTH_CHECK_PCIEDEV_H2D_DMA_IND, "PCIEDEV_H2D_DMA_IND"},
3157 {HEALTH_CHECK_PCIEDEV_D2H_DMA_IND, "PCIEDEV_D2H_DMA_IND"},
3158 {HEALTH_CHECK_PCIEDEV_IOCTL_STALL_IND, "PCIEDEV_IOCTL_STALL_IND"},
3159 {HEALTH_CHECK_PCIEDEV_D3ACK_STALL_IND, "PCIEDEV_D3ACK_STALL_IND"},
3160 {HEALTH_CHECK_PCIEDEV_NODS_IND, "PCIEDEV_NODS_IND"},
3161 {HEALTH_CHECK_PCIEDEV_LINKSPEED_FALLBACK_IND, "PCIEDEV_LINKSPEED_FALLBACK_IND"},
3162 {HEALTH_CHECK_PCIEDEV_DSACK_STALL_IND, "PCIEDEV_DSACK_STALL_IND"},
3163 {0, NULL}
3164 };
3165
3166 static void
dhd_print_dongle_hck_id(uint32 id,hck_id_to_str_t * hck)3167 dhd_print_dongle_hck_id(uint32 id, hck_id_to_str_t *hck)
3168 {
3169 while (hck->name != NULL) {
3170 if (hck->id == id) {
3171 DHD_ERROR(("DONGLE_HCK_EVENT: %s\n", hck->name));
3172 return;
3173 }
3174 hck++;
3175 }
3176 }
3177 #endif /* PARSE_DONGLE_HOST_EVENT */
3178
3179 void
dngl_host_event_process(dhd_pub_t * dhdp,bcm_dngl_event_t * event,bcm_dngl_event_msg_t * dngl_event,size_t pktlen)3180 dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event,
3181 bcm_dngl_event_msg_t *dngl_event, size_t pktlen)
3182 {
3183 uint8 *p = (uint8 *)(event + 1);
3184 uint16 type = ntoh16_ua((void *)&dngl_event->event_type);
3185 uint16 datalen = ntoh16_ua((void *)&dngl_event->datalen);
3186 uint16 version = ntoh16_ua((void *)&dngl_event->version);
3187
3188 DHD_EVENT(("VERSION:%d, EVENT TYPE:%d, DATALEN:%d\n", version, type, datalen));
3189 if (datalen > (pktlen - sizeof(bcm_dngl_event_t) + ETHER_TYPE_LEN)) {
3190 return;
3191 }
3192 if (version != BCM_DNGL_EVENT_MSG_VERSION) {
3193 DHD_ERROR(("%s:version mismatch:%d:%d\n", __FUNCTION__,
3194 version, BCM_DNGL_EVENT_MSG_VERSION));
3195 return;
3196 }
3197 switch (type) {
3198 case DNGL_E_SOCRAM_IND:
3199 {
3200 bcm_dngl_socramind_t *socramind_ptr = (bcm_dngl_socramind_t *)p;
3201 uint16 tag = ltoh32(socramind_ptr->tag);
3202 uint16 taglen = ltoh32(socramind_ptr->length);
3203 p = (uint8 *)socramind_ptr->value;
3204 DHD_EVENT(("Tag:%d Len:%d Datalen:%d\n", tag, taglen, datalen));
3205 switch (tag) {
3206 case SOCRAM_IND_ASSERT_TAG:
3207 {
3208 /*
3209 * The payload consists of -
3210 * null terminated function name padded till 32 bit boundary +
3211 * Line number - (32 bits)
3212 * Caller address (32 bits)
3213 */
3214 char *fnname = (char *)p;
3215 if (datalen < (ROUNDUP(strlen(fnname) + 1, sizeof(uint32)) +
3216 sizeof(uint32) * 2)) {
3217 DHD_ERROR(("Wrong length:%d\n", datalen));
3218 return;
3219 }
3220 DHD_EVENT(("ASSRT Function:%s ", p));
3221 p += ROUNDUP(strlen(p) + 1, sizeof(uint32));
3222 DHD_EVENT(("Line:%d ", *(uint32 *)p));
3223 p += sizeof(uint32);
3224 DHD_EVENT(("Caller Addr:0x%x\n", *(uint32 *)p));
3225 #ifdef PARSE_DONGLE_HOST_EVENT
3226 DHD_ERROR(("DONGLE_HCK_EVENT: SOCRAM_IND_ASSERT_TAG\n"));
3227 #endif /* PARSE_DONGLE_HOST_EVENT */
3228 break;
3229 }
3230 case SOCRAM_IND_TAG_HEALTH_CHECK:
3231 {
3232 bcm_dngl_healthcheck_t *dngl_hc = (bcm_dngl_healthcheck_t *)p;
3233 DHD_EVENT(("SOCRAM_IND_HEALTHCHECK_TAG:%d Len:%d datalen:%d\n",
3234 ltoh32(dngl_hc->top_module_tag),
3235 ltoh32(dngl_hc->top_module_len),
3236 datalen));
3237 if (DHD_EVENT_ON()) {
3238 prhex("HEALTHCHECK", p, MIN(ltoh32(dngl_hc->top_module_len)
3239 + BCM_XTLV_HDR_SIZE, datalen));
3240 }
3241 #ifdef DHD_LOG_DUMP
3242 memset(dhdp->health_chk_event_data, 0, HEALTH_CHK_BUF_SIZE);
3243 memcpy(dhdp->health_chk_event_data, p,
3244 MIN(ltoh32(dngl_hc->top_module_len),
3245 HEALTH_CHK_BUF_SIZE));
3246 #endif /* DHD_LOG_DUMP */
3247 p = (uint8 *)dngl_hc->value;
3248
3249 switch (ltoh32(dngl_hc->top_module_tag)) {
3250 case HEALTH_CHECK_TOP_LEVEL_MODULE_PCIEDEV_RTE:
3251 {
3252 bcm_dngl_pcie_hc_t *pcie_hc;
3253 pcie_hc = (bcm_dngl_pcie_hc_t *)p;
3254 BCM_REFERENCE(pcie_hc);
3255 if (ltoh32(dngl_hc->top_module_len) <
3256 sizeof(bcm_dngl_pcie_hc_t)) {
3257 DHD_ERROR(("Wrong length:%d\n",
3258 ltoh32(dngl_hc->top_module_len)));
3259 return;
3260 }
3261 DHD_EVENT(("%d:PCIE HC error:%d flag:0x%x,"
3262 " control:0x%x\n",
3263 ltoh32(pcie_hc->version),
3264 ltoh32(pcie_hc->pcie_err_ind_type),
3265 ltoh32(pcie_hc->pcie_flag),
3266 ltoh32(pcie_hc->pcie_control_reg)));
3267 #ifdef PARSE_DONGLE_HOST_EVENT
3268 dhd_print_dongle_hck_id(
3269 ltoh32(pcie_hc->pcie_err_ind_type),
3270 hck_pcie_module_to_str);
3271 #endif /* PARSE_DONGLE_HOST_EVENT */
3272 break;
3273 }
3274 #ifdef HCHK_COMMON_SW_EVENT
3275 case HCHK_SW_ENTITY_WL_PRIMARY:
3276 case HCHK_SW_ENTITY_WL_SECONDARY:
3277 {
3278 bcm_xtlv_t *wl_hc = (bcm_xtlv_t*)p;
3279
3280 if (ltoh32(dngl_hc->top_module_len) <
3281 sizeof(bcm_xtlv_t)) {
3282 DHD_ERROR(("WL SW HC Wrong length:%d\n",
3283 ltoh32(dngl_hc->top_module_len)));
3284 return;
3285 }
3286 BCM_REFERENCE(wl_hc);
3287 DHD_EVENT(("WL SW HC type %d len %d\n",
3288 ltoh16(wl_hc->id), ltoh16(wl_hc->len)));
3289 #ifdef PARSE_DONGLE_HOST_EVENT
3290 dhd_print_dongle_hck_id(ltoh16(wl_hc->id),
3291 hck_sw_id_to_str);
3292 #endif /* PARSE_DONGLE_HOST_EVENT */
3293 break;
3294
3295 }
3296 #endif /* HCHK_COMMON_SW_EVENT */
3297 default:
3298 {
3299 DHD_ERROR(("%s:Unknown module TAG:%d\n",
3300 __FUNCTION__,
3301 ltoh32(dngl_hc->top_module_tag)));
3302 break;
3303 }
3304 }
3305 break;
3306 }
3307 default:
3308 DHD_ERROR(("%s:Unknown TAG\n", __FUNCTION__));
3309 if (p && DHD_EVENT_ON()) {
3310 prhex("SOCRAMIND", p, taglen);
3311 }
3312 break;
3313 }
3314 break;
3315 }
3316 default:
3317 DHD_ERROR(("%s:Unknown DNGL Event Type:%d\n", __FUNCTION__, type));
3318 if (p && DHD_EVENT_ON()) {
3319 prhex("SOCRAMIND", p, datalen);
3320 }
3321 break;
3322 }
3323 #ifdef DHD_FW_COREDUMP
3324 if (dhdp->memdump_enabled) {
3325 dhdp->memdump_type = DUMP_TYPE_DONGLE_HOST_EVENT;
3326 if (dhd_socram_dump(dhdp->bus)) {
3327 DHD_ERROR(("%s: socram dump failed\n", __FUNCTION__));
3328 }
3329 }
3330 #else
3331 dhd_dbg_send_urgent_evt(dhdp, p, datalen);
3332 #endif /* DHD_FW_COREDUMP */
3333 }
3334
3335 #endif /* DNGL_EVENT_SUPPORT */
3336
3337 /* Stub for now. Will become real function as soon as shim
3338 * is being integrated to Android, Linux etc.
3339 */
3340 int
wl_event_process_default(wl_event_msg_t * event,struct wl_evt_pport * evt_pport)3341 wl_event_process_default(wl_event_msg_t *event, struct wl_evt_pport *evt_pport)
3342 {
3343 return BCME_OK;
3344 }
3345
3346 int
wl_event_process(dhd_pub_t * dhd_pub,int * ifidx,void * pktdata,uint pktlen,void ** data_ptr,void * raw_event)3347 wl_event_process(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata,
3348 uint pktlen, void **data_ptr, void *raw_event)
3349 {
3350 wl_evt_pport_t evt_pport;
3351 wl_event_msg_t event;
3352 bcm_event_msg_u_t evu;
3353 int ret;
3354
3355 /* make sure it is a BRCM event pkt and record event data */
3356 ret = wl_host_event_get_data(pktdata, pktlen, &evu);
3357 if (ret != BCME_OK) {
3358 return ret;
3359 }
3360
3361 memcpy(&event, &evu.event, sizeof(wl_event_msg_t));
3362
3363 /* convert event from network order to host order */
3364 wl_event_to_host_order(&event);
3365
3366 /* record event params to evt_pport */
3367 evt_pport.dhd_pub = dhd_pub;
3368 evt_pport.ifidx = ifidx;
3369 evt_pport.pktdata = pktdata;
3370 evt_pport.data_ptr = data_ptr;
3371 evt_pport.raw_event = raw_event;
3372 evt_pport.data_len = pktlen;
3373
3374 ret = wl_event_process_default(&event, &evt_pport);
3375
3376 return ret;
3377 } /* wl_event_process */
3378
3379 /* Check whether packet is a BRCM event pkt. If it is, record event data. */
3380 int
wl_host_event_get_data(void * pktdata,uint pktlen,bcm_event_msg_u_t * evu)3381 wl_host_event_get_data(void *pktdata, uint pktlen, bcm_event_msg_u_t *evu)
3382 {
3383 int ret;
3384
3385 ret = is_wlc_event_frame(pktdata, pktlen, 0, evu);
3386 if (ret != BCME_OK) {
3387 DHD_ERROR(("%s: Invalid event frame, err = %d\n",
3388 __FUNCTION__, ret));
3389 }
3390
3391 return ret;
3392 }
3393
3394 int
wl_process_host_event(dhd_pub_t * dhd_pub,int * ifidx,void * pktdata,uint pktlen,wl_event_msg_t * event,void ** data_ptr,void * raw_event)3395 wl_process_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen,
3396 wl_event_msg_t *event, void **data_ptr, void *raw_event)
3397 {
3398 bcm_event_t *pvt_data = (bcm_event_t *)pktdata;
3399 bcm_event_msg_u_t evu;
3400 uint8 *event_data;
3401 uint32 type, status, datalen, reason;
3402 uint16 flags;
3403 uint evlen;
3404 int ret;
3405 uint16 usr_subtype;
3406 #ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
3407 dhd_if_t *ifp = NULL;
3408 #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
3409
3410 ret = wl_host_event_get_data(pktdata, pktlen, &evu);
3411 if (ret != BCME_OK) {
3412 return ret;
3413 }
3414
3415 usr_subtype = ntoh16_ua((void *)&pvt_data->bcm_hdr.usr_subtype);
3416 switch (usr_subtype) {
3417 case BCMILCP_BCM_SUBTYPE_EVENT:
3418 memcpy(event, &evu.event, sizeof(wl_event_msg_t));
3419 *data_ptr = &pvt_data[1];
3420 break;
3421 case BCMILCP_BCM_SUBTYPE_DNGLEVENT:
3422 #ifdef DNGL_EVENT_SUPPORT
3423 /* If it is a DNGL event process it first */
3424 if (dngl_host_event(dhd_pub, pktdata, &evu.dngl_event, pktlen) == BCME_OK) {
3425 /*
3426 * Return error purposely to prevent DNGL event being processed
3427 * as BRCM event
3428 */
3429 return BCME_ERROR;
3430 }
3431 #endif /* DNGL_EVENT_SUPPORT */
3432 return BCME_NOTFOUND;
3433 default:
3434 return BCME_NOTFOUND;
3435 }
3436
3437 /* start wl_event_msg process */
3438 event_data = *data_ptr;
3439 type = ntoh32_ua((void *)&event->event_type);
3440 flags = ntoh16_ua((void *)&event->flags);
3441 status = ntoh32_ua((void *)&event->status);
3442 reason = ntoh32_ua((void *)&event->reason);
3443 datalen = ntoh32_ua((void *)&event->datalen);
3444 evlen = datalen + sizeof(bcm_event_t);
3445
3446 switch (type) {
3447 #ifdef PROP_TXSTATUS
3448 case WLC_E_FIFO_CREDIT_MAP:
3449 dhd_wlfc_enable(dhd_pub);
3450 dhd_wlfc_FIFOcreditmap_event(dhd_pub, event_data);
3451 WLFC_DBGMESG(("WLC_E_FIFO_CREDIT_MAP:(AC0,AC1,AC2,AC3),(BC_MC),(OTHER): "
3452 "(%d,%d,%d,%d),(%d),(%d)\n", event_data[0], event_data[1],
3453 event_data[2],
3454 event_data[3], event_data[4], event_data[5]));
3455 break;
3456
3457 case WLC_E_BCMC_CREDIT_SUPPORT:
3458 dhd_wlfc_BCMCCredit_support_event(dhd_pub);
3459 break;
3460 #ifdef LIMIT_BORROW
3461 case WLC_E_ALLOW_CREDIT_BORROW:
3462 dhd_wlfc_disable_credit_borrow_event(dhd_pub, event_data);
3463 break;
3464 #endif /* LIMIT_BORROW */
3465 #endif /* PROP_TXSTATUS */
3466
3467 case WLC_E_ULP:
3468 #ifdef DHD_ULP
3469 {
3470 wl_ulp_event_t *ulp_evt = (wl_ulp_event_t *)event_data;
3471
3472 /* Flush and disable console messages */
3473 if (ulp_evt->ulp_dongle_action == WL_ULP_DISABLE_CONSOLE) {
3474 #ifdef DHD_ULP_NOT_USED
3475 dhd_bus_ulp_disable_console(dhd_pub);
3476 #endif /* DHD_ULP_NOT_USED */
3477 }
3478 if (ulp_evt->ulp_dongle_action == WL_ULP_UCODE_DOWNLOAD) {
3479 dhd_bus_ucode_download(dhd_pub->bus);
3480 }
3481 }
3482 #endif /* DHD_ULP */
3483 break;
3484 case WLC_E_TDLS_PEER_EVENT:
3485 #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
3486 {
3487 dhd_tdls_event_handler(dhd_pub, event);
3488 }
3489 #endif // endif
3490 break;
3491
3492 case WLC_E_IF:
3493 {
3494 struct wl_event_data_if *ifevent = (struct wl_event_data_if *)event_data;
3495
3496 /* Ignore the event if NOIF is set */
3497 if (ifevent->reserved & WLC_E_IF_FLAGS_BSSCFG_NOIF) {
3498 DHD_ERROR(("WLC_E_IF: NO_IF set, event Ignored\r\n"));
3499 return (BCME_UNSUPPORTED);
3500 }
3501 #ifdef PCIE_FULL_DONGLE
3502 dhd_update_interface_flow_info(dhd_pub, ifevent->ifidx,
3503 ifevent->opcode, ifevent->role);
3504 #endif // endif
3505 #ifdef PROP_TXSTATUS
3506 {
3507 uint8* ea = &event->addr.octet[0];
3508 WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, ["MACDBG"]\n"
3509 ifevent->ifidx,
3510 ((ifevent->opcode == WLC_E_IF_ADD) ? "ADD":"DEL"),
3511 ((ifevent->role == 0) ? "STA":"AP "),
3512 MAC2STRDBG(ea)));
3513 (void)ea;
3514
3515 if (ifevent->opcode == WLC_E_IF_CHANGE)
3516 dhd_wlfc_interface_event(dhd_pub,
3517 eWLFC_MAC_ENTRY_ACTION_UPDATE,
3518 ifevent->ifidx, ifevent->role, ea);
3519 else
3520 dhd_wlfc_interface_event(dhd_pub,
3521 ((ifevent->opcode == WLC_E_IF_ADD) ?
3522 eWLFC_MAC_ENTRY_ACTION_ADD : eWLFC_MAC_ENTRY_ACTION_DEL),
3523 ifevent->ifidx, ifevent->role, ea);
3524
3525 /* dhd already has created an interface by default, for 0 */
3526 if (ifevent->ifidx == 0)
3527 break;
3528 }
3529 #endif /* PROP_TXSTATUS */
3530
3531 if (ifevent->ifidx > 0 && ifevent->ifidx < DHD_MAX_IFS) {
3532 if (ifevent->opcode == WLC_E_IF_ADD) {
3533 if (dhd_event_ifadd(dhd_pub->info, ifevent, event->ifname,
3534 event->addr.octet)) {
3535
3536 DHD_ERROR(("%s: dhd_event_ifadd failed ifidx: %d %s\n",
3537 __FUNCTION__, ifevent->ifidx, event->ifname));
3538 return (BCME_ERROR);
3539 }
3540 } else if (ifevent->opcode == WLC_E_IF_DEL) {
3541 #ifdef PCIE_FULL_DONGLE
3542 /* Delete flowrings unconditionally for i/f delete */
3543 dhd_flow_rings_delete(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
3544 event->ifname));
3545 #endif /* PCIE_FULL_DONGLE */
3546 dhd_event_ifdel(dhd_pub->info, ifevent, event->ifname,
3547 event->addr.octet);
3548 } else if (ifevent->opcode == WLC_E_IF_CHANGE) {
3549 #ifdef WL_CFG80211
3550 dhd_event_ifchange(dhd_pub->info, ifevent, event->ifname,
3551 event->addr.octet);
3552 #endif /* WL_CFG80211 */
3553 }
3554 } else {
3555 #if !defined(PROP_TXSTATUS) && !defined(PCIE_FULL_DONGLE) && defined(WL_CFG80211)
3556 DHD_INFO(("%s: Invalid ifidx %d for %s\n",
3557 __FUNCTION__, ifevent->ifidx, event->ifname));
3558 #endif /* !PROP_TXSTATUS && !PCIE_FULL_DONGLE && WL_CFG80211 */
3559 }
3560 /* send up the if event: btamp user needs it */
3561 *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
3562 /* push up to external supp/auth */
3563 dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
3564 break;
3565 }
3566
3567 case WLC_E_NDIS_LINK:
3568 break;
3569 case WLC_E_PFN_NET_FOUND:
3570 case WLC_E_PFN_SCAN_ALLGONE: /* share with WLC_E_PFN_BSSID_NET_LOST */
3571 case WLC_E_PFN_NET_LOST:
3572 break;
3573 #if defined(OEM_ANDROID) && defined(PNO_SUPPORT)
3574 case WLC_E_PFN_BSSID_NET_FOUND:
3575 case WLC_E_PFN_BEST_BATCHING:
3576 dhd_pno_event_handler(dhd_pub, event, (void *)event_data);
3577 break;
3578 #endif /* #if defined(OEM_ANDROID) && defined(PNO_SUPPORT) */
3579 #if defined(RTT_SUPPORT)
3580 case WLC_E_PROXD:
3581 #ifndef WL_CFG80211
3582 dhd_rtt_event_handler(dhd_pub, event, (void *)event_data);
3583 #endif /* WL_CFG80211 */
3584 break;
3585 #endif /* RTT_SUPPORT */
3586 /* These are what external supplicant/authenticator wants */
3587 case WLC_E_ASSOC_IND:
3588 case WLC_E_AUTH_IND:
3589 case WLC_E_REASSOC_IND:
3590 dhd_findadd_sta(dhd_pub,
3591 dhd_ifname2idx(dhd_pub->info, event->ifname),
3592 &event->addr.octet);
3593 break;
3594 #if defined(DHD_FW_COREDUMP)
3595 case WLC_E_PSM_WATCHDOG:
3596 DHD_ERROR(("%s: WLC_E_PSM_WATCHDOG event received : \n", __FUNCTION__));
3597 if (dhd_socram_dump(dhd_pub->bus) != BCME_OK) {
3598 DHD_ERROR(("%s: socram dump ERROR : \n", __FUNCTION__));
3599 }
3600 break;
3601 #endif // endif
3602 case WLC_E_NATOE_NFCT:
3603 #ifdef WL_NATOE
3604 DHD_EVENT(("%s: WLC_E_NATOE_NFCT event received \n", __FUNCTION__));
3605 dhd_natoe_ct_event(dhd_pub, event_data);
3606 #endif /* WL_NATOE */
3607 break;
3608 #ifdef WL_NAN
3609 case WLC_E_SLOTTED_BSS_PEER_OP:
3610 DHD_EVENT(("%s: WLC_E_SLOTTED_BSS_PEER_OP event received for peer: "
3611 "" MACDBG ", status = %d\n",
3612 __FUNCTION__, MAC2STRDBG(event->addr.octet), status));
3613 if (status == WLC_E_STATUS_SLOTTED_PEER_ADD) {
3614 dhd_findadd_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
3615 event->ifname), &event->addr.octet);
3616 } else if (status == WLC_E_STATUS_SLOTTED_PEER_DEL) {
3617 uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname);
3618 BCM_REFERENCE(ifindex);
3619 dhd_del_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
3620 event->ifname), &event->addr.octet);
3621 #ifdef PCIE_FULL_DONGLE
3622 dhd_flow_rings_delete_for_peer(dhd_pub, ifindex,
3623 (char *)&event->addr.octet[0]);
3624 #endif // endif
3625 } else {
3626 DHD_ERROR(("%s: WLC_E_SLOTTED_BSS_PEER_OP: Status is not expected = %d\n",
3627 __FUNCTION__, status));
3628 }
3629 break;
3630 #endif /* WL_NAN */
3631 #ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
3632 case WLC_E_REASSOC:
3633 ifp = dhd_get_ifp(dhd_pub, event->ifidx);
3634
3635 if (!ifp)
3636 break;
3637
3638 /* Consider STA role only since roam is disabled on P2P GC.
3639 * Drop EAPOL M1 frame only if roam is done to same BSS.
3640 */
3641 if ((status == WLC_E_STATUS_SUCCESS) &&
3642 IS_STA_IFACE(ndev_to_wdev(ifp->net)) &&
3643 wl_cfg80211_is_event_from_connected_bssid(ifp->net, event, event->ifidx)) {
3644 ifp->recv_reassoc_evt = TRUE;
3645 }
3646 break;
3647 #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
3648 case WLC_E_LINK:
3649 #ifdef PCIE_FULL_DONGLE
3650 if (dhd_update_interface_link_status(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
3651 event->ifname), (uint8)flags) != BCME_OK) {
3652 DHD_ERROR(("%s: dhd_update_interface_link_status Failed.\n",
3653 __FUNCTION__));
3654 break;
3655 }
3656 if (!flags) {
3657 DHD_ERROR(("%s: Deleting all STA from assoc list and flowrings.\n",
3658 __FUNCTION__));
3659 /* Delete all sta and flowrings */
3660 dhd_del_all_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info, event->ifname));
3661 dhd_flow_rings_delete(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
3662 event->ifname));
3663 }
3664 #endif /* PCIE_FULL_DONGLE */
3665 /* fall through */
3666 case WLC_E_DEAUTH:
3667 case WLC_E_DEAUTH_IND:
3668 case WLC_E_DISASSOC:
3669 case WLC_E_DISASSOC_IND:
3670 #ifdef PCIE_FULL_DONGLE
3671 if (type != WLC_E_LINK) {
3672 uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname);
3673 uint8 role = dhd_flow_rings_ifindex2role(dhd_pub, ifindex);
3674 uint8 del_sta = TRUE;
3675 #ifdef WL_CFG80211
3676 if (role == WLC_E_IF_ROLE_STA &&
3677 !wl_cfg80211_is_roam_offload(dhd_idx2net(dhd_pub, ifindex)) &&
3678 !wl_cfg80211_is_event_from_connected_bssid(
3679 dhd_idx2net(dhd_pub, ifindex), event, *ifidx)) {
3680 del_sta = FALSE;
3681 }
3682 #endif /* WL_CFG80211 */
3683 DHD_EVENT(("%s: Link event %d, flags %x, status %x, role %d, del_sta %d\n",
3684 __FUNCTION__, type, flags, status, role, del_sta));
3685
3686 if (del_sta) {
3687 DHD_EVENT(("%s: Deleting STA " MACDBG "\n",
3688 __FUNCTION__, MAC2STRDBG(event->addr.octet)));
3689
3690 dhd_del_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
3691 event->ifname), &event->addr.octet);
3692 /* Delete all flowrings for STA and P2P Client */
3693 if (role == WLC_E_IF_ROLE_STA || role == WLC_E_IF_ROLE_P2P_CLIENT) {
3694 dhd_flow_rings_delete(dhd_pub, ifindex);
3695 } else {
3696 dhd_flow_rings_delete_for_peer(dhd_pub, ifindex,
3697 (char *)&event->addr.octet[0]);
3698 }
3699 }
3700 }
3701 #endif /* PCIE_FULL_DONGLE */
3702 #ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
3703 /* fall through */
3704 ifp = dhd_get_ifp(dhd_pub, event->ifidx);
3705 if (ifp) {
3706 ifp->recv_reassoc_evt = FALSE;
3707 ifp->post_roam_evt = FALSE;
3708 }
3709 #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
3710 /* fall through */
3711 default:
3712 *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
3713 /* push up to external supp/auth */
3714 dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
3715 DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n",
3716 __FUNCTION__, type, flags, status));
3717 BCM_REFERENCE(flags);
3718 BCM_REFERENCE(status);
3719 BCM_REFERENCE(reason);
3720
3721 break;
3722 }
3723 #if defined(STBAP)
3724 /* For routers, EAPD will be working on these events.
3725 * Overwrite interface name to that event is pushed
3726 * to host with its registered interface name
3727 */
3728 memcpy(pvt_data->event.ifname, dhd_ifname(dhd_pub, *ifidx), IFNAMSIZ);
3729 #endif // endif
3730
3731 #ifdef DHD_STATUS_LOGGING
3732 if (dhd_pub->statlog) {
3733 dhd_statlog_process_event(dhd_pub, type, *ifidx,
3734 status, reason, flags);
3735 }
3736 #endif /* DHD_STATUS_LOGGING */
3737
3738 #ifdef SHOW_EVENTS
3739 if (DHD_FWLOG_ON() || DHD_EVENT_ON()) {
3740 wl_show_host_event(dhd_pub, event,
3741 (void *)event_data, raw_event, dhd_pub->enable_log);
3742 }
3743 #endif /* SHOW_EVENTS */
3744
3745 return (BCME_OK);
3746 } /* wl_process_host_event */
3747
3748 int
wl_host_event(dhd_pub_t * dhd_pub,int * ifidx,void * pktdata,uint pktlen,wl_event_msg_t * event,void ** data_ptr,void * raw_event)3749 wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen,
3750 wl_event_msg_t *event, void **data_ptr, void *raw_event)
3751 {
3752 return wl_process_host_event(dhd_pub, ifidx, pktdata, pktlen, event, data_ptr,
3753 raw_event);
3754 }
3755
3756 void
dhd_print_buf(void * pbuf,int len,int bytes_per_line)3757 dhd_print_buf(void *pbuf, int len, int bytes_per_line)
3758 {
3759 #ifdef DHD_DEBUG
3760 int i, j = 0;
3761 unsigned char *buf = pbuf;
3762
3763 if (bytes_per_line == 0) {
3764 bytes_per_line = len;
3765 }
3766
3767 for (i = 0; i < len; i++) {
3768 printf("%2.2x", *buf++);
3769 j++;
3770 if (j == bytes_per_line) {
3771 printf("\n");
3772 j = 0;
3773 } else {
3774 printf(":");
3775 }
3776 }
3777 printf("\n");
3778 #endif /* DHD_DEBUG */
3779 }
3780 #ifndef strtoul
3781 #define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
3782 #endif // endif
3783
3784 #if defined(PKT_FILTER_SUPPORT) || defined(DHD_PKT_LOGGING)
3785 /* Convert user's input in hex pattern to byte-size mask */
3786 int
wl_pattern_atoh(char * src,char * dst)3787 wl_pattern_atoh(char *src, char *dst)
3788 {
3789 int i;
3790 if (strncmp(src, "0x", 2) != 0 &&
3791 strncmp(src, "0X", 2) != 0) {
3792 DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
3793 return -1;
3794 }
3795 src = src + 2; /* Skip past 0x */
3796 if (strlen(src) % 2 != 0) {
3797 DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
3798 return -1;
3799 }
3800 for (i = 0; *src != '\0'; i++) {
3801 char num[3];
3802 bcm_strncpy_s(num, sizeof(num), src, 2);
3803 num[2] = '\0';
3804 dst[i] = (uint8)strtoul(num, NULL, 16);
3805 src += 2;
3806 }
3807 return i;
3808 }
3809
3810 int
pattern_atoh_len(char * src,char * dst,int len)3811 pattern_atoh_len(char *src, char *dst, int len)
3812 {
3813 int i;
3814 if (strncmp(src, "0x", HD_PREFIX_SIZE) != 0 &&
3815 strncmp(src, "0X", HD_PREFIX_SIZE) != 0) {
3816 DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
3817 return -1;
3818 }
3819 src = src + HD_PREFIX_SIZE; /* Skip past 0x */
3820 if (strlen(src) % HD_BYTE_SIZE != 0) {
3821 DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
3822 return -1;
3823 }
3824 for (i = 0; *src != '\0'; i++) {
3825 char num[HD_BYTE_SIZE + 1];
3826
3827 if (i > len - 1) {
3828 DHD_ERROR(("pattern not in range, idx: %d len: %d\n", i, len));
3829 return -1;
3830 }
3831 bcm_strncpy_s(num, sizeof(num), src, HD_BYTE_SIZE);
3832 num[HD_BYTE_SIZE] = '\0';
3833 dst[i] = (uint8)strtoul(num, NULL, 16);
3834 src += HD_BYTE_SIZE;
3835 }
3836 return i;
3837 }
3838 #endif /* PKT_FILTER_SUPPORT || DHD_PKT_LOGGING */
3839
3840 #ifdef PKT_FILTER_SUPPORT
3841 void
dhd_pktfilter_offload_enable(dhd_pub_t * dhd,char * arg,int enable,int master_mode)3842 dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode)
3843 {
3844 char *argv[8];
3845 int i = 0;
3846 const char *str;
3847 int buf_len;
3848 int str_len;
3849 char *arg_save = 0, *arg_org = 0;
3850 int rc;
3851 char buf[32] = {0};
3852 wl_pkt_filter_enable_t enable_parm;
3853 wl_pkt_filter_enable_t * pkt_filterp;
3854
3855 if (!arg)
3856 return;
3857
3858 if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
3859 DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
3860 goto fail;
3861 }
3862 arg_org = arg_save;
3863 memcpy(arg_save, arg, strlen(arg) + 1);
3864
3865 argv[i] = bcmstrtok(&arg_save, " ", 0);
3866
3867 i = 0;
3868 if (argv[i] == NULL) {
3869 DHD_ERROR(("No args provided\n"));
3870 goto fail;
3871 }
3872
3873 str = "pkt_filter_enable";
3874 str_len = strlen(str);
3875 bcm_strncpy_s(buf, sizeof(buf) - 1, str, sizeof(buf) - 1);
3876 buf[ sizeof(buf) - 1 ] = '\0';
3877 buf_len = str_len + 1;
3878
3879 pkt_filterp = (wl_pkt_filter_enable_t *)(buf + str_len + 1);
3880
3881 /* Parse packet filter id. */
3882 enable_parm.id = htod32(strtoul(argv[i], NULL, 0));
3883
3884 /* Parse enable/disable value. */
3885 enable_parm.enable = htod32(enable);
3886
3887 buf_len += sizeof(enable_parm);
3888 memcpy((char *)pkt_filterp,
3889 &enable_parm,
3890 sizeof(enable_parm));
3891
3892 /* Enable/disable the specified filter. */
3893 rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
3894 rc = rc >= 0 ? 0 : rc;
3895 if (rc) {
3896 DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
3897 __FUNCTION__, arg, rc));
3898 dhd_set_packet_filter(dhd);
3899 rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
3900 rc = rc >= 0 ? 0 : rc;
3901 if (rc) {
3902 DHD_TRACE_HW4(("%s: 2nd retry failed to add pktfilter %s, retcode = %d\n",
3903 __FUNCTION__, arg, rc));
3904 } else {
3905 DHD_TRACE_HW4(("%s: 2nd retry successfully added pktfilter %s\n",
3906 __FUNCTION__, arg));
3907 }
3908 }
3909 else
3910 DHD_TRACE(("%s: successfully added pktfilter %s\n",
3911 __FUNCTION__, arg));
3912
3913 /* Contorl the master mode */
3914 rc = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_mode",
3915 master_mode, WLC_SET_VAR, TRUE, 0);
3916 rc = rc >= 0 ? 0 : rc;
3917 if (rc)
3918 DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
3919 __FUNCTION__, arg, rc));
3920
3921 fail:
3922 if (arg_org)
3923 MFREE(dhd->osh, arg_org, strlen(arg) + 1);
3924 }
3925
3926 /* Packet filter section: extended filters have named offsets, add table here */
3927 typedef struct {
3928 char *name;
3929 uint16 base;
3930 } wl_pfbase_t;
3931
3932 static wl_pfbase_t basenames[] = { WL_PKT_FILTER_BASE_NAMES };
3933
3934 static int
wl_pkt_filter_base_parse(char * name)3935 wl_pkt_filter_base_parse(char *name)
3936 {
3937 uint i;
3938 char *bname, *uname;
3939
3940 for (i = 0; i < ARRAYSIZE(basenames); i++) {
3941 bname = basenames[i].name;
3942 for (uname = name; *uname; bname++, uname++) {
3943 if (*bname != bcm_toupper(*uname)) {
3944 break;
3945 }
3946 }
3947 if (!*uname && !*bname) {
3948 break;
3949 }
3950 }
3951
3952 if (i < ARRAYSIZE(basenames)) {
3953 return basenames[i].base;
3954 } else {
3955 return -1;
3956 }
3957 }
3958
3959 void
dhd_pktfilter_offload_set(dhd_pub_t * dhd,char * arg)3960 dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg)
3961 {
3962 const char *str;
3963 wl_pkt_filter_t pkt_filter;
3964 wl_pkt_filter_t *pkt_filterp;
3965 int buf_len;
3966 int str_len;
3967 int rc = -1;
3968 uint32 mask_size;
3969 uint32 pattern_size;
3970 char *argv[MAXPKT_ARG] = {0}, * buf = 0;
3971 int i = 0;
3972 char *arg_save = 0, *arg_org = 0;
3973
3974 if (!arg)
3975 return;
3976
3977 if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
3978 DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
3979 goto fail;
3980 }
3981
3982 arg_org = arg_save;
3983
3984 if (!(buf = MALLOC(dhd->osh, MAX_PKTFLT_BUF_SIZE))) {
3985 DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
3986 goto fail;
3987 }
3988
3989 memset(buf, 0, MAX_PKTFLT_BUF_SIZE);
3990 memcpy(arg_save, arg, strlen(arg) + 1);
3991
3992 if (strlen(arg) > MAX_PKTFLT_BUF_SIZE) {
3993 DHD_ERROR(("Not enough buffer %d < %d\n", (int)strlen(arg), (int)sizeof(buf)));
3994 goto fail;
3995 }
3996
3997 argv[i] = bcmstrtok(&arg_save, " ", 0);
3998 while (argv[i++]) {
3999 if (i >= MAXPKT_ARG) {
4000 DHD_ERROR(("Invalid args provided\n"));
4001 goto fail;
4002 }
4003 argv[i] = bcmstrtok(&arg_save, " ", 0);
4004 }
4005
4006 i = 0;
4007 if (argv[i] == NULL) {
4008 DHD_ERROR(("No args provided\n"));
4009 goto fail;
4010 }
4011
4012 str = "pkt_filter_add";
4013 str_len = strlen(str);
4014 bcm_strncpy_s(buf, MAX_PKTFLT_BUF_SIZE, str, str_len);
4015 buf[ str_len ] = '\0';
4016 buf_len = str_len + 1;
4017
4018 pkt_filterp = (wl_pkt_filter_t *) (buf + str_len + 1);
4019
4020 /* Parse packet filter id. */
4021 pkt_filter.id = htod32(strtoul(argv[i], NULL, 0));
4022
4023 if (argv[++i] == NULL) {
4024 DHD_ERROR(("Polarity not provided\n"));
4025 goto fail;
4026 }
4027
4028 /* Parse filter polarity. */
4029 pkt_filter.negate_match = htod32(strtoul(argv[i], NULL, 0));
4030
4031 if (argv[++i] == NULL) {
4032 DHD_ERROR(("Filter type not provided\n"));
4033 goto fail;
4034 }
4035
4036 /* Parse filter type. */
4037 pkt_filter.type = htod32(strtoul(argv[i], NULL, 0));
4038
4039 if ((pkt_filter.type == 0) || (pkt_filter.type == 1)) {
4040 if (argv[++i] == NULL) {
4041 DHD_ERROR(("Offset not provided\n"));
4042 goto fail;
4043 }
4044
4045 /* Parse pattern filter offset. */
4046 pkt_filter.u.pattern.offset = htod32(strtoul(argv[i], NULL, 0));
4047
4048 if (argv[++i] == NULL) {
4049 DHD_ERROR(("Bitmask not provided\n"));
4050 goto fail;
4051 }
4052
4053 /* Parse pattern filter mask. */
4054 rc = wl_pattern_atoh(argv[i],
4055 (char *) pkt_filterp->u.pattern.mask_and_pattern);
4056
4057 if (rc == -1) {
4058 DHD_ERROR(("Rejecting: %s\n", argv[i]));
4059 goto fail;
4060 }
4061 mask_size = htod32(rc);
4062 if (argv[++i] == NULL) {
4063 DHD_ERROR(("Pattern not provided\n"));
4064 goto fail;
4065 }
4066
4067 /* Parse pattern filter pattern. */
4068 rc = wl_pattern_atoh(argv[i],
4069 (char *) &pkt_filterp->u.pattern.mask_and_pattern[mask_size]);
4070
4071 if (rc == -1) {
4072 DHD_ERROR(("Rejecting: %s\n", argv[i]));
4073 goto fail;
4074 }
4075 pattern_size = htod32(rc);
4076 if (mask_size != pattern_size) {
4077 DHD_ERROR(("Mask and pattern not the same size\n"));
4078 goto fail;
4079 }
4080
4081 pkt_filter.u.pattern.size_bytes = mask_size;
4082 buf_len += WL_PKT_FILTER_FIXED_LEN;
4083 buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * mask_size);
4084
4085 /* Keep-alive attributes are set in local variable (keep_alive_pkt), and
4086 * then memcpy'ed into buffer (keep_alive_pktp) since there is no
4087 * guarantee that the buffer is properly aligned.
4088 */
4089 memcpy((char *)pkt_filterp,
4090 &pkt_filter,
4091 WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN);
4092 } else if ((pkt_filter.type == 2) || (pkt_filter.type == 6)) {
4093 int list_cnt = 0;
4094 char *endptr = NULL;
4095 wl_pkt_filter_pattern_listel_t *pf_el =
4096 (wl_pkt_filter_pattern_listel_t *)&pkt_filterp->u.patlist.patterns[0];
4097
4098 while (argv[++i] != NULL) {
4099 /* Check valid buffer size. */
4100 if ((buf_len + MAX_PKTFLT_FIXED_BUF_SIZE) > MAX_PKTFLT_BUF_SIZE) {
4101 DHD_ERROR(("buffer over length MAX_PKTFLT_FIXED_BUF_SIZE\n"));
4102 goto fail;
4103 }
4104
4105 /* Parse pattern filter base and offset. */
4106 if (bcm_isdigit(*argv[i])) {
4107 /* Numeric base */
4108 rc = strtoul(argv[i], &endptr, 0);
4109 } else {
4110 endptr = strchr(argv[i], ':');
4111 if (endptr) {
4112 *endptr = '\0';
4113 rc = wl_pkt_filter_base_parse(argv[i]);
4114 if (rc == -1) {
4115 printf("Invalid base %s\n", argv[i]);
4116 goto fail;
4117 }
4118 *endptr = ':';
4119 }
4120 }
4121
4122 if (endptr == NULL) {
4123 printf("Invalid [base:]offset format: %s\n", argv[i]);
4124 goto fail;
4125 }
4126
4127 if (*endptr == ':') {
4128 pf_el->base_offs = htod16(rc);
4129 rc = strtoul(endptr + 1, &endptr, 0);
4130 } else {
4131 /* Must have had a numeric offset only */
4132 pf_el->base_offs = htod16(0);
4133 }
4134
4135 if (*endptr) {
4136 printf("Invalid [base:]offset format: %s\n", argv[i]);
4137 goto fail;
4138 }
4139 if (rc > 0x0000FFFF) {
4140 printf("Offset too large\n");
4141 goto fail;
4142 }
4143 pf_el->rel_offs = htod16(rc);
4144
4145 /* Clear match_flag (may be set in parsing which follows) */
4146 pf_el->match_flags = htod16(0);
4147
4148 /* Parse pattern filter mask and pattern directly into ioctl buffer */
4149 if (argv[++i] == NULL) {
4150 printf("Bitmask not provided\n");
4151 goto fail;
4152 }
4153 rc = wl_pattern_atoh(argv[i], (char*)pf_el->mask_and_data);
4154 if ((rc == -1) || (rc > MAX_PKTFLT_FIXED_PATTERN_SIZE)) {
4155 printf("Rejecting: %s\n", argv[i]);
4156 goto fail;
4157 }
4158 mask_size = htod16(rc);
4159
4160 if (argv[++i] == NULL) {
4161 printf("Pattern not provided\n");
4162 goto fail;
4163 }
4164
4165 if (*argv[i] == '!') {
4166 pf_el->match_flags =
4167 htod16(WL_PKT_FILTER_MFLAG_NEG);
4168 (argv[i])++;
4169 }
4170 if (*argv[i] == '\0') {
4171 printf("Pattern not provided\n");
4172 goto fail;
4173 }
4174 rc = wl_pattern_atoh(argv[i], (char*)&pf_el->mask_and_data[rc]);
4175 if ((rc == -1) || (rc > MAX_PKTFLT_FIXED_PATTERN_SIZE)) {
4176 printf("Rejecting: %s\n", argv[i]);
4177 goto fail;
4178 }
4179 pattern_size = htod16(rc);
4180
4181 if (mask_size != pattern_size) {
4182 printf("Mask and pattern not the same size\n");
4183 goto fail;
4184 }
4185
4186 pf_el->size_bytes = mask_size;
4187
4188 /* Account for the size of this pattern element */
4189 buf_len += WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN + 2 * rc;
4190
4191 /* Move to next element location in ioctl buffer */
4192 pf_el = (wl_pkt_filter_pattern_listel_t*)
4193 ((uint8*)pf_el + WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN + 2 * rc);
4194
4195 /* Count list element */
4196 list_cnt++;
4197 }
4198
4199 /* Account for initial fixed size, and copy initial fixed fields */
4200 buf_len += WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN;
4201
4202 if (buf_len > MAX_PKTFLT_BUF_SIZE) {
4203 DHD_ERROR(("buffer over length MAX_PKTFLT_BUF_SIZE\n"));
4204 goto fail;
4205 }
4206 /* Update list count and total size */
4207 pkt_filter.u.patlist.list_cnt = list_cnt;
4208 pkt_filter.u.patlist.PAD1[0] = 0;
4209 pkt_filter.u.patlist.totsize = buf + buf_len - (char*)pkt_filterp;
4210 pkt_filter.u.patlist.totsize -= WL_PKT_FILTER_FIXED_LEN;
4211
4212 memcpy((char *)pkt_filterp, &pkt_filter,
4213 WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN);
4214 } else {
4215 DHD_ERROR(("Invalid filter type %d\n", pkt_filter.type));
4216 goto fail;
4217 }
4218
4219 rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
4220 rc = rc >= 0 ? 0 : rc;
4221
4222 if (rc)
4223 DHD_TRACE(("%s: failed to add pktfilter %s, retcode = %d\n",
4224 __FUNCTION__, arg, rc));
4225 else
4226 DHD_TRACE(("%s: successfully added pktfilter %s\n",
4227 __FUNCTION__, arg));
4228
4229 fail:
4230 if (arg_org)
4231 MFREE(dhd->osh, arg_org, strlen(arg) + 1);
4232
4233 if (buf)
4234 MFREE(dhd->osh, buf, MAX_PKTFLT_BUF_SIZE);
4235 }
4236
4237 void
dhd_pktfilter_offload_delete(dhd_pub_t * dhd,int id)4238 dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id)
4239 {
4240 int ret;
4241
4242 ret = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_delete",
4243 id, WLC_SET_VAR, TRUE, 0);
4244 if (ret < 0) {
4245 DHD_ERROR(("%s: Failed to delete filter ID:%d, ret=%d\n",
4246 __FUNCTION__, id, ret));
4247 }
4248 }
4249 #endif /* PKT_FILTER_SUPPORT */
4250
4251 /* ========================== */
4252 /* ==== ARP OFFLOAD SUPPORT = */
4253 /* ========================== */
4254 #ifdef ARP_OFFLOAD_SUPPORT
4255 void
dhd_arp_offload_set(dhd_pub_t * dhd,int arp_mode)4256 dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode)
4257 {
4258 int retcode;
4259
4260 retcode = dhd_wl_ioctl_set_intiovar(dhd, "arp_ol",
4261 arp_mode, WLC_SET_VAR, TRUE, 0);
4262
4263 retcode = retcode >= 0 ? 0 : retcode;
4264 if (retcode)
4265 DHD_TRACE(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n",
4266 __FUNCTION__, arp_mode, retcode));
4267 else
4268 DHD_TRACE(("%s: successfully set ARP offload mode to 0x%x\n",
4269 __FUNCTION__, arp_mode));
4270 }
4271
4272 void
dhd_arp_offload_enable(dhd_pub_t * dhd,int arp_enable)4273 dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable)
4274 {
4275 int retcode;
4276 #ifdef WL_CFG80211
4277 /* Do not enable arp offload in case of non-STA interfaces active */
4278 if (arp_enable &&
4279 (wl_cfg80211_check_vif_in_use(dhd_linux_get_primary_netdev(dhd)))) {
4280 DHD_TRACE(("%s: Virtual interfaces active, ignore arp offload request \n",
4281 __FUNCTION__));
4282 return;
4283 }
4284 #endif /* WL_CFG80211 */
4285 retcode = dhd_wl_ioctl_set_intiovar(dhd, "arpoe",
4286 arp_enable, WLC_SET_VAR, TRUE, 0);
4287
4288 retcode = retcode >= 0 ? 0 : retcode;
4289 if (retcode)
4290 DHD_TRACE(("%s: failed to enabe ARP offload to %d, retcode = %d\n",
4291 __FUNCTION__, arp_enable, retcode));
4292 else
4293 DHD_TRACE(("%s: successfully enabed ARP offload to %d\n",
4294 __FUNCTION__, arp_enable));
4295 if (arp_enable) {
4296 uint32 version;
4297 retcode = dhd_wl_ioctl_get_intiovar(dhd, "arp_version",
4298 &version, WLC_GET_VAR, FALSE, 0);
4299 if (retcode) {
4300 DHD_INFO(("%s: fail to get version (maybe version 1:retcode = %d\n",
4301 __FUNCTION__, retcode));
4302 dhd->arp_version = 1;
4303 }
4304 else {
4305 DHD_INFO(("%s: ARP Version= %x\n", __FUNCTION__, version));
4306 dhd->arp_version = version;
4307 }
4308 }
4309 }
4310
4311 void
dhd_aoe_arp_clr(dhd_pub_t * dhd,int idx)4312 dhd_aoe_arp_clr(dhd_pub_t *dhd, int idx)
4313 {
4314 int ret = 0;
4315
4316 if (dhd == NULL) return;
4317 if (dhd->arp_version == 1)
4318 idx = 0;
4319
4320 ret = dhd_iovar(dhd, idx, "arp_table_clear", NULL, 0, NULL, 0, TRUE);
4321 if (ret < 0)
4322 DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
4323 }
4324
4325 void
dhd_aoe_hostip_clr(dhd_pub_t * dhd,int idx)4326 dhd_aoe_hostip_clr(dhd_pub_t *dhd, int idx)
4327 {
4328 int ret = 0;
4329
4330 if (dhd == NULL) return;
4331 if (dhd->arp_version == 1)
4332 idx = 0;
4333
4334 ret = dhd_iovar(dhd, idx, "arp_hostip_clear", NULL, 0, NULL, 0, TRUE);
4335 if (ret < 0)
4336 DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
4337 }
4338
4339 void
dhd_arp_offload_add_ip(dhd_pub_t * dhd,uint32 ipaddr,int idx)4340 dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx)
4341 {
4342 int ret;
4343
4344 if (dhd == NULL) return;
4345 if (dhd->arp_version == 1)
4346 idx = 0;
4347
4348 ret = dhd_iovar(dhd, idx, "arp_hostip", (char *)&ipaddr, sizeof(ipaddr),
4349 NULL, 0, TRUE);
4350 if (ret)
4351 DHD_TRACE(("%s: ARP ip addr add failed, ret = %d\n", __FUNCTION__, ret));
4352 else
4353 DHD_TRACE(("%s: sARP H ipaddr entry added \n",
4354 __FUNCTION__));
4355 }
4356
4357 int
dhd_arp_get_arp_hostip_table(dhd_pub_t * dhd,void * buf,int buflen,int idx)4358 dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen, int idx)
4359 {
4360 int ret, i;
4361 uint32 *ptr32 = buf;
4362 bool clr_bottom = FALSE;
4363
4364 if (!buf)
4365 return -1;
4366 if (dhd == NULL) return -1;
4367 if (dhd->arp_version == 1)
4368 idx = 0;
4369
4370 ret = dhd_iovar(dhd, idx, "arp_hostip", NULL, 0, (char *)buf, buflen,
4371 FALSE);
4372 if (ret) {
4373 DHD_TRACE(("%s: ioctl WLC_GET_VAR error %d\n",
4374 __FUNCTION__, ret));
4375
4376 return -1;
4377 }
4378
4379 /* clean up the buf, ascii reminder */
4380 for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
4381 if (!clr_bottom) {
4382 if (*ptr32 == 0)
4383 clr_bottom = TRUE;
4384 } else {
4385 *ptr32 = 0;
4386 }
4387 ptr32++;
4388 }
4389
4390 return 0;
4391 }
4392 #endif /* ARP_OFFLOAD_SUPPORT */
4393
4394 /*
4395 * Neighbor Discovery Offload: enable NDO feature
4396 * Called by ipv6 event handler when interface comes up/goes down
4397 */
4398 int
dhd_ndo_enable(dhd_pub_t * dhd,int ndo_enable)4399 dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable)
4400 {
4401 int retcode;
4402
4403 if (dhd == NULL)
4404 return -1;
4405
4406 #if defined(WL_CFG80211) && defined(WL_NAN)
4407 if (wl_cfgnan_is_dp_active(dhd_linux_get_primary_netdev(dhd))) {
4408 /* If nan dp is active, skip NDO */
4409 DHD_INFO(("Active NAN DP, skip NDO\n"));
4410 return 0;
4411 }
4412 #endif /* WL_CFG80211 && WL_NAN */
4413 #ifdef WL_CFG80211
4414 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
4415 /* NDO disable on STA+SOFTAP mode */
4416 ndo_enable = FALSE;
4417 }
4418 #endif /* WL_CFG80211 */
4419 retcode = dhd_wl_ioctl_set_intiovar(dhd, "ndoe",
4420 ndo_enable, WLC_SET_VAR, TRUE, 0);
4421 if (retcode)
4422 DHD_ERROR(("%s: failed to enabe ndo to %d, retcode = %d\n",
4423 __FUNCTION__, ndo_enable, retcode));
4424 else
4425 DHD_TRACE(("%s: successfully enabed ndo offload to %d\n",
4426 __FUNCTION__, ndo_enable));
4427
4428 return retcode;
4429 }
4430
4431 /*
4432 * Neighbor Discover Offload: enable NDO feature
4433 * Called by ipv6 event handler when interface comes up
4434 */
4435 int
dhd_ndo_add_ip(dhd_pub_t * dhd,char * ipv6addr,int idx)4436 dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipv6addr, int idx)
4437 {
4438 int iov_len = 0;
4439 char iovbuf[DHD_IOVAR_BUF_SIZE];
4440 int retcode;
4441
4442 if (dhd == NULL)
4443 return -1;
4444
4445 iov_len = bcm_mkiovar("nd_hostip", (char *)ipv6addr,
4446 IPV6_ADDR_LEN, iovbuf, sizeof(iovbuf));
4447 if (!iov_len) {
4448 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
4449 __FUNCTION__, sizeof(iovbuf)));
4450 return -1;
4451 }
4452 retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
4453
4454 if (retcode)
4455 DHD_ERROR(("%s: ndo ip addr add failed, retcode = %d\n",
4456 __FUNCTION__, retcode));
4457 else
4458 DHD_TRACE(("%s: ndo ipaddr entry added \n",
4459 __FUNCTION__));
4460
4461 return retcode;
4462 }
4463
4464 #ifdef REVERSE_AIFSN
4465 int
check_reverse_aifsn_condition(dhd_pub_t * dhd,struct net_device * ndev)4466 check_reverse_aifsn_condition(dhd_pub_t *dhd, struct net_device *ndev)
4467 {
4468 int iov_len = 0;
4469 char iovbuf[DHD_IOVAR_BUF_SIZE];
4470 edcf_acparam_t *ac_params = NULL;
4471 int retcode;
4472 u8 aci, aifsn;
4473 int ifidx;
4474
4475 if (dhd == NULL)
4476 return -1;
4477
4478 ifidx = dhd_net2idx(dhd->info, ndev);
4479 if (ifidx == DHD_BAD_IF)
4480 return -1;
4481
4482 dhd->aifsn_reverse = FALSE;
4483
4484 strcpy(iovbuf, "wme_ac_sta");
4485 iov_len = sizeof(iovbuf);
4486
4487 retcode = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, iov_len, FALSE, ifidx);
4488 if (retcode) {
4489 DHD_ERROR(("%s: could not get wme_ac_sta params(%d)\n\n",
4490 __FUNCTION__, retcode));
4491 return -1;
4492 }
4493
4494 ac_params = (edcf_acparam_t *)iovbuf;
4495 for (aci = 0; aci < AC_COUNT; aci++) {
4496 aifsn = ac_params[aci].ACI & EDCF_AIFSN_MASK;
4497 if (aci == AC_VI && aifsn == 10) {
4498 DHD_ERROR(("[%s] Reverse AIFSN for AC_VI:10 \n", __FUNCTION__));
4499 dhd->aifsn_reverse = TRUE;
4500 break;
4501 }
4502 }
4503 return 0;
4504 }
4505 #endif /* REVERSE_AIFSN */
4506
4507 /*
4508 * Neighbor Discover Offload: enable NDO feature
4509 * Called by ipv6 event handler when interface goes down
4510 */
4511 int
dhd_ndo_remove_ip(dhd_pub_t * dhd,int idx)4512 dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx)
4513 {
4514 int iov_len = 0;
4515 char iovbuf[DHD_IOVAR_BUF_SIZE];
4516 int retcode;
4517
4518 if (dhd == NULL)
4519 return -1;
4520
4521 iov_len = bcm_mkiovar("nd_hostip_clear", NULL,
4522 0, iovbuf, sizeof(iovbuf));
4523 if (!iov_len) {
4524 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
4525 __FUNCTION__, sizeof(iovbuf)));
4526 return -1;
4527 }
4528 retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
4529
4530 if (retcode)
4531 DHD_ERROR(("%s: ndo ip addr remove failed, retcode = %d\n",
4532 __FUNCTION__, retcode));
4533 else
4534 DHD_TRACE(("%s: ndo ipaddr entry removed \n",
4535 __FUNCTION__));
4536
4537 return retcode;
4538 }
4539 /* Enhanced ND offload */
4540 uint16
dhd_ndo_get_version(dhd_pub_t * dhdp)4541 dhd_ndo_get_version(dhd_pub_t *dhdp)
4542 {
4543 char iovbuf[DHD_IOVAR_BUF_SIZE];
4544 wl_nd_hostip_t ndo_get_ver;
4545 int iov_len;
4546 int retcode;
4547 uint16 ver = 0;
4548
4549 if (dhdp == NULL) {
4550 return BCME_ERROR;
4551 }
4552
4553 memset(&iovbuf, 0, sizeof(iovbuf));
4554 ndo_get_ver.version = htod16(WL_ND_HOSTIP_IOV_VER);
4555 ndo_get_ver.op_type = htod16(WL_ND_HOSTIP_OP_VER);
4556 ndo_get_ver.length = htod32(WL_ND_HOSTIP_FIXED_LEN + sizeof(uint16));
4557 ndo_get_ver.u.version = 0;
4558 iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_get_ver,
4559 WL_ND_HOSTIP_FIXED_LEN + sizeof(uint16), iovbuf, sizeof(iovbuf));
4560
4561 if (!iov_len) {
4562 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
4563 __FUNCTION__, sizeof(iovbuf)));
4564 return BCME_ERROR;
4565 }
4566
4567 retcode = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf, iov_len, FALSE, 0);
4568
4569 if (retcode) {
4570 DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
4571 /* ver iovar not supported. NDO version is 0 */
4572 ver = 0;
4573 } else {
4574 wl_nd_hostip_t *ndo_ver_ret = (wl_nd_hostip_t *)iovbuf;
4575
4576 if ((dtoh16(ndo_ver_ret->version) == WL_ND_HOSTIP_IOV_VER) &&
4577 (dtoh16(ndo_ver_ret->op_type) == WL_ND_HOSTIP_OP_VER) &&
4578 (dtoh32(ndo_ver_ret->length) == WL_ND_HOSTIP_FIXED_LEN
4579 + sizeof(uint16))) {
4580 /* nd_hostip iovar version */
4581 ver = dtoh16(ndo_ver_ret->u.version);
4582 }
4583
4584 DHD_TRACE(("%s: successfully get version: %d\n", __FUNCTION__, ver));
4585 }
4586
4587 return ver;
4588 }
4589
4590 int
dhd_ndo_add_ip_with_type(dhd_pub_t * dhdp,char * ipv6addr,uint8 type,int idx)4591 dhd_ndo_add_ip_with_type(dhd_pub_t *dhdp, char *ipv6addr, uint8 type, int idx)
4592 {
4593 char iovbuf[DHD_IOVAR_BUF_SIZE];
4594 wl_nd_hostip_t ndo_add_addr;
4595 int iov_len;
4596 int retcode;
4597
4598 if (dhdp == NULL || ipv6addr == 0) {
4599 return BCME_ERROR;
4600 }
4601
4602 /* wl_nd_hostip_t fixed param */
4603 ndo_add_addr.version = htod16(WL_ND_HOSTIP_IOV_VER);
4604 ndo_add_addr.op_type = htod16(WL_ND_HOSTIP_OP_ADD);
4605 ndo_add_addr.length = htod32(WL_ND_HOSTIP_WITH_ADDR_LEN);
4606 /* wl_nd_host_ip_addr_t param for add */
4607 memcpy(&ndo_add_addr.u.host_ip.ip_addr, ipv6addr, IPV6_ADDR_LEN);
4608 ndo_add_addr.u.host_ip.type = type;
4609
4610 iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_add_addr,
4611 WL_ND_HOSTIP_WITH_ADDR_LEN, iovbuf, sizeof(iovbuf));
4612 if (!iov_len) {
4613 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
4614 __FUNCTION__, sizeof(iovbuf)));
4615 return BCME_ERROR;
4616 }
4617
4618 retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
4619 if (retcode) {
4620 DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
4621 #ifdef NDO_CONFIG_SUPPORT
4622 if (retcode == BCME_NORESOURCE) {
4623 /* number of host ip addr exceeds FW capacity, Deactivate ND offload */
4624 DHD_INFO(("%s: Host IP count exceed device capacity,"
4625 "ND offload deactivated\n", __FUNCTION__));
4626 dhdp->ndo_host_ip_overflow = TRUE;
4627 dhd_ndo_enable(dhdp, FALSE);
4628 }
4629 #endif /* NDO_CONFIG_SUPPORT */
4630 } else {
4631 DHD_TRACE(("%s: successfully added: %d\n", __FUNCTION__, retcode));
4632 }
4633
4634 return retcode;
4635 }
4636
4637 int
dhd_ndo_remove_ip_by_addr(dhd_pub_t * dhdp,char * ipv6addr,int idx)4638 dhd_ndo_remove_ip_by_addr(dhd_pub_t *dhdp, char *ipv6addr, int idx)
4639 {
4640 char iovbuf[DHD_IOVAR_BUF_SIZE];
4641 wl_nd_hostip_t ndo_del_addr;
4642 int iov_len;
4643 int retcode;
4644
4645 if (dhdp == NULL || ipv6addr == 0) {
4646 return BCME_ERROR;
4647 }
4648
4649 /* wl_nd_hostip_t fixed param */
4650 ndo_del_addr.version = htod16(WL_ND_HOSTIP_IOV_VER);
4651 ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL);
4652 ndo_del_addr.length = htod32(WL_ND_HOSTIP_WITH_ADDR_LEN);
4653 /* wl_nd_host_ip_addr_t param for del */
4654 memcpy(&ndo_del_addr.u.host_ip.ip_addr, ipv6addr, IPV6_ADDR_LEN);
4655 ndo_del_addr.u.host_ip.type = 0; /* don't care */
4656
4657 iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr,
4658 WL_ND_HOSTIP_WITH_ADDR_LEN, iovbuf, sizeof(iovbuf));
4659
4660 if (!iov_len) {
4661 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
4662 __FUNCTION__, sizeof(iovbuf)));
4663 return BCME_ERROR;
4664 }
4665
4666 retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
4667 if (retcode) {
4668 DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
4669 } else {
4670 DHD_TRACE(("%s: successfully removed: %d\n", __FUNCTION__, retcode));
4671 }
4672
4673 return retcode;
4674 }
4675
4676 int
dhd_ndo_remove_ip_by_type(dhd_pub_t * dhdp,uint8 type,int idx)4677 dhd_ndo_remove_ip_by_type(dhd_pub_t *dhdp, uint8 type, int idx)
4678 {
4679 char iovbuf[DHD_IOVAR_BUF_SIZE];
4680 wl_nd_hostip_t ndo_del_addr;
4681 int iov_len;
4682 int retcode;
4683
4684 if (dhdp == NULL) {
4685 return BCME_ERROR;
4686 }
4687
4688 /* wl_nd_hostip_t fixed param */
4689 ndo_del_addr.version = htod16(WL_ND_HOSTIP_IOV_VER);
4690 if (type == WL_ND_IPV6_ADDR_TYPE_UNICAST) {
4691 ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL_UC);
4692 } else if (type == WL_ND_IPV6_ADDR_TYPE_ANYCAST) {
4693 ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL_AC);
4694 } else {
4695 return BCME_BADARG;
4696 }
4697 ndo_del_addr.length = htod32(WL_ND_HOSTIP_FIXED_LEN);
4698
4699 iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr, WL_ND_HOSTIP_FIXED_LEN,
4700 iovbuf, sizeof(iovbuf));
4701
4702 if (!iov_len) {
4703 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
4704 __FUNCTION__, sizeof(iovbuf)));
4705 return BCME_ERROR;
4706 }
4707
4708 retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
4709 if (retcode) {
4710 DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
4711 } else {
4712 DHD_TRACE(("%s: successfully removed: %d\n", __FUNCTION__, retcode));
4713 }
4714
4715 return retcode;
4716 }
4717
4718 int
dhd_ndo_unsolicited_na_filter_enable(dhd_pub_t * dhdp,int enable)4719 dhd_ndo_unsolicited_na_filter_enable(dhd_pub_t *dhdp, int enable)
4720 {
4721 char iovbuf[DHD_IOVAR_BUF_SIZE];
4722 int iov_len;
4723 int retcode;
4724
4725 if (dhdp == NULL) {
4726 return BCME_ERROR;
4727 }
4728
4729 iov_len = bcm_mkiovar("nd_unsolicited_na_filter", (char *)&enable, sizeof(int),
4730 iovbuf, sizeof(iovbuf));
4731
4732 if (!iov_len) {
4733 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
4734 __FUNCTION__, sizeof(iovbuf)));
4735 return BCME_ERROR;
4736 }
4737
4738 retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0);
4739 if (retcode)
4740 DHD_ERROR(("%s: failed to enable Unsolicited NA filter to %d, retcode = %d\n",
4741 __FUNCTION__, enable, retcode));
4742 else {
4743 DHD_TRACE(("%s: successfully enabled Unsolicited NA filter to %d\n",
4744 __FUNCTION__, enable));
4745 }
4746
4747 return retcode;
4748 }
4749 #ifdef SIMPLE_ISCAN
4750
4751 uint iscan_thread_id = 0;
4752 iscan_buf_t * iscan_chain = 0;
4753
4754 iscan_buf_t *
dhd_iscan_allocate_buf(dhd_pub_t * dhd,iscan_buf_t ** iscanbuf)4755 dhd_iscan_allocate_buf(dhd_pub_t *dhd, iscan_buf_t **iscanbuf)
4756 {
4757 iscan_buf_t *iscanbuf_alloc = 0;
4758 iscan_buf_t *iscanbuf_head;
4759
4760 DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
4761 dhd_iscan_lock();
4762
4763 iscanbuf_alloc = (iscan_buf_t*)MALLOC(dhd->osh, sizeof(iscan_buf_t));
4764 if (iscanbuf_alloc == NULL)
4765 goto fail;
4766
4767 iscanbuf_alloc->next = NULL;
4768 iscanbuf_head = *iscanbuf;
4769
4770 DHD_ISCAN(("%s: addr of allocated node = 0x%X"
4771 "addr of iscanbuf_head = 0x%X dhd = 0x%X\n",
4772 __FUNCTION__, iscanbuf_alloc, iscanbuf_head, dhd));
4773
4774 if (iscanbuf_head == NULL) {
4775 *iscanbuf = iscanbuf_alloc;
4776 DHD_ISCAN(("%s: Head is allocated\n", __FUNCTION__));
4777 goto fail;
4778 }
4779
4780 while (iscanbuf_head->next)
4781 iscanbuf_head = iscanbuf_head->next;
4782
4783 iscanbuf_head->next = iscanbuf_alloc;
4784
4785 fail:
4786 dhd_iscan_unlock();
4787 return iscanbuf_alloc;
4788 }
4789
4790 void
dhd_iscan_free_buf(void * dhdp,iscan_buf_t * iscan_delete)4791 dhd_iscan_free_buf(void *dhdp, iscan_buf_t *iscan_delete)
4792 {
4793 iscan_buf_t *iscanbuf_free = 0;
4794 iscan_buf_t *iscanbuf_prv = 0;
4795 iscan_buf_t *iscanbuf_cur;
4796 dhd_pub_t *dhd = dhd_bus_pub(dhdp);
4797 DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
4798
4799 dhd_iscan_lock();
4800
4801 iscanbuf_cur = iscan_chain;
4802
4803 /* If iscan_delete is null then delete the entire
4804 * chain or else delete specific one provided
4805 */
4806 if (!iscan_delete) {
4807 while (iscanbuf_cur) {
4808 iscanbuf_free = iscanbuf_cur;
4809 iscanbuf_cur = iscanbuf_cur->next;
4810 iscanbuf_free->next = 0;
4811 MFREE(dhd->osh, iscanbuf_free, sizeof(iscan_buf_t));
4812 }
4813 iscan_chain = 0;
4814 } else {
4815 while (iscanbuf_cur) {
4816 if (iscanbuf_cur == iscan_delete)
4817 break;
4818 iscanbuf_prv = iscanbuf_cur;
4819 iscanbuf_cur = iscanbuf_cur->next;
4820 }
4821 if (iscanbuf_prv)
4822 iscanbuf_prv->next = iscan_delete->next;
4823
4824 iscan_delete->next = 0;
4825 MFREE(dhd->osh, iscan_delete, sizeof(iscan_buf_t));
4826
4827 if (!iscanbuf_prv)
4828 iscan_chain = 0;
4829 }
4830 dhd_iscan_unlock();
4831 }
4832
4833 iscan_buf_t *
dhd_iscan_result_buf(void)4834 dhd_iscan_result_buf(void)
4835 {
4836 return iscan_chain;
4837 }
4838
4839 int
dhd_iscan_issue_request(void * dhdp,wl_iscan_params_t * pParams,uint32 size)4840 dhd_iscan_issue_request(void * dhdp, wl_iscan_params_t *pParams, uint32 size)
4841 {
4842 int rc = -1;
4843 dhd_pub_t *dhd = dhd_bus_pub(dhdp);
4844 char *buf;
4845 char iovar[] = "iscan";
4846 uint32 allocSize = 0;
4847 wl_ioctl_t ioctl;
4848 int len;
4849
4850 if (pParams) {
4851 allocSize = (size + strlen(iovar) + 1);
4852 if ((allocSize < size) || (allocSize < strlen(iovar)))
4853 {
4854 DHD_ERROR(("%s: overflow - allocation size too large %d < %d + %d!\n",
4855 __FUNCTION__, allocSize, size, strlen(iovar)));
4856 goto cleanUp;
4857 }
4858 buf = MALLOC(dhd->osh, allocSize);
4859
4860 if (buf == NULL)
4861 {
4862 DHD_ERROR(("%s: malloc of size %d failed!\n", __FUNCTION__, allocSize));
4863 goto cleanUp;
4864 }
4865 ioctl.cmd = WLC_SET_VAR;
4866 len = bcm_mkiovar(iovar, (char *)pParams, size, buf, allocSize);
4867 if (len == 0) {
4868 rc = BCME_BUFTOOSHORT;
4869 goto cleanUp;
4870 }
4871 rc = dhd_wl_ioctl(dhd, 0, &ioctl, buf, len);
4872 }
4873
4874 cleanUp:
4875 if (buf) {
4876 MFREE(dhd->osh, buf, allocSize);
4877 }
4878
4879 return rc;
4880 }
4881
4882 static int
dhd_iscan_get_partial_result(void * dhdp,uint * scan_count)4883 dhd_iscan_get_partial_result(void *dhdp, uint *scan_count)
4884 {
4885 wl_iscan_results_t *list_buf;
4886 wl_iscan_results_t list;
4887 wl_scan_results_t *results;
4888 iscan_buf_t *iscan_cur;
4889 int status = -1;
4890 dhd_pub_t *dhd = dhd_bus_pub(dhdp);
4891 int rc;
4892 wl_ioctl_t ioctl;
4893 int len;
4894
4895 DHD_ISCAN(("%s: Enter\n", __FUNCTION__));
4896
4897 iscan_cur = dhd_iscan_allocate_buf(dhd, &iscan_chain);
4898 if (!iscan_cur) {
4899 DHD_ERROR(("%s: Failed to allocate node\n", __FUNCTION__));
4900 dhd_iscan_free_buf(dhdp, 0);
4901 dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT);
4902 dhd_ind_scan_confirm(dhdp, FALSE);
4903 goto fail;
4904 }
4905
4906 dhd_iscan_lock();
4907
4908 memset(iscan_cur->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN);
4909 list_buf = (wl_iscan_results_t*)iscan_cur->iscan_buf;
4910 results = &list_buf->results;
4911 results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
4912 results->version = 0;
4913 results->count = 0;
4914
4915 memset(&list, 0, sizeof(list));
4916 list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN);
4917 len = bcm_mkiovar("iscanresults", (char *)&list, WL_ISCAN_RESULTS_FIXED_SIZE,
4918 iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
4919 if (len == 0) {
4920 dhd_iscan_free_buf(dhdp, 0);
4921 dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT);
4922 dhd_ind_scan_confirm(dhdp, FALSE);
4923 status = BCME_BUFTOOSHORT;
4924 goto fail;
4925 }
4926 ioctl.cmd = WLC_GET_VAR;
4927 ioctl.set = FALSE;
4928 rc = dhd_wl_ioctl(dhd, 0, &ioctl, iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
4929
4930 results->buflen = dtoh32(results->buflen);
4931 results->version = dtoh32(results->version);
4932 *scan_count = results->count = dtoh32(results->count);
4933 status = dtoh32(list_buf->status);
4934 DHD_ISCAN(("%s: Got %d resuls status = (%x)\n", __FUNCTION__, results->count, status));
4935
4936 dhd_iscan_unlock();
4937
4938 if (!(*scan_count)) {
4939 /* TODO: race condition when FLUSH already called */
4940 dhd_iscan_free_buf(dhdp, 0);
4941 }
4942 fail:
4943 return status;
4944 }
4945
4946 #endif /* SIMPLE_ISCAN */
4947
4948 /*
4949 * returns = TRUE if associated, FALSE if not associated
4950 */
dhd_is_associated(dhd_pub_t * dhd,uint8 ifidx,int * retval)4951 bool dhd_is_associated(dhd_pub_t *dhd, uint8 ifidx, int *retval)
4952 {
4953 char bssid[6], zbuf[6];
4954 int ret = -1;
4955
4956 bzero(bssid, 6);
4957 bzero(zbuf, 6);
4958
4959 ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, (char *)&bssid,
4960 ETHER_ADDR_LEN, FALSE, ifidx);
4961 DHD_TRACE((" %s WLC_GET_BSSID ioctl res = %d\n", __FUNCTION__, ret));
4962
4963 if (ret == BCME_NOTASSOCIATED) {
4964 DHD_TRACE(("%s: not associated! res:%d\n", __FUNCTION__, ret));
4965 }
4966
4967 if (retval)
4968 *retval = ret;
4969
4970 if (ret < 0)
4971 return FALSE;
4972
4973 if ((memcmp(bssid, zbuf, ETHER_ADDR_LEN) == 0)) {
4974 DHD_TRACE(("%s: WLC_GET_BSSID ioctl returned zero bssid\n", __FUNCTION__));
4975 return FALSE;
4976 }
4977 return TRUE;
4978 }
4979
4980 /* Function to estimate possible DTIM_SKIP value */
4981 #if defined(OEM_ANDROID) && defined(BCMPCIE)
4982 int
dhd_get_suspend_bcn_li_dtim(dhd_pub_t * dhd,int * dtim_period,int * bcn_interval)4983 dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval)
4984 {
4985 int bcn_li_dtim = 1; /* deafult no dtim skip setting */
4986 int ret = -1;
4987 int allowed_skip_dtim_cnt = 0;
4988
4989 if (dhd->disable_dtim_in_suspend) {
4990 DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__));
4991 bcn_li_dtim = 0;
4992 return bcn_li_dtim;
4993 }
4994
4995 /* Check if associated */
4996 if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
4997 DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
4998 return bcn_li_dtim;
4999 }
5000
5001 if (dtim_period == NULL || bcn_interval == NULL)
5002 return bcn_li_dtim;
5003
5004 /* read associated AP beacon interval */
5005 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BCNPRD,
5006 bcn_interval, sizeof(*bcn_interval), FALSE, 0)) < 0) {
5007 DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__, ret));
5008 return bcn_li_dtim;
5009 }
5010
5011 /* read associated AP dtim setup */
5012 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD,
5013 dtim_period, sizeof(*dtim_period), FALSE, 0)) < 0) {
5014 DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
5015 return bcn_li_dtim;
5016 }
5017
5018 /* if not assocated just return */
5019 if (*dtim_period == 0) {
5020 return bcn_li_dtim;
5021 }
5022
5023 if (dhd->max_dtim_enable) {
5024 bcn_li_dtim =
5025 (int) (MAX_DTIM_ALLOWED_INTERVAL / ((*dtim_period) * (*bcn_interval)));
5026 if (bcn_li_dtim == 0) {
5027 bcn_li_dtim = 1;
5028 }
5029 } else {
5030 /* attemp to use platform defined dtim skip interval */
5031 bcn_li_dtim = dhd->suspend_bcn_li_dtim;
5032
5033 /* check if sta listen interval fits into AP dtim */
5034 if (*dtim_period > CUSTOM_LISTEN_INTERVAL) {
5035 /* AP DTIM to big for our Listen Interval : no dtim skiping */
5036 bcn_li_dtim = NO_DTIM_SKIP;
5037 DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
5038 __FUNCTION__, *dtim_period, CUSTOM_LISTEN_INTERVAL));
5039 return bcn_li_dtim;
5040 }
5041
5042 if (((*dtim_period) * (*bcn_interval) * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) {
5043 allowed_skip_dtim_cnt =
5044 MAX_DTIM_ALLOWED_INTERVAL / ((*dtim_period) * (*bcn_interval));
5045 bcn_li_dtim =
5046 (allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP;
5047 }
5048
5049 if ((bcn_li_dtim * (*dtim_period)) > CUSTOM_LISTEN_INTERVAL) {
5050 /* Round up dtim_skip to fit into STAs Listen Interval */
5051 bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / *dtim_period);
5052 DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
5053 }
5054 }
5055
5056 DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n",
5057 __FUNCTION__, *bcn_interval, bcn_li_dtim, *dtim_period, CUSTOM_LISTEN_INTERVAL));
5058
5059 return bcn_li_dtim;
5060 }
5061 #else /* OEM_ANDROID && BCMPCIE */
5062 int
dhd_get_suspend_bcn_li_dtim(dhd_pub_t * dhd)5063 dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd)
5064 {
5065 int bcn_li_dtim = 1; /* deafult no dtim skip setting */
5066 int ret = -1;
5067 int dtim_period = 0;
5068 int ap_beacon = 0;
5069 int allowed_skip_dtim_cnt = 0;
5070
5071 if (dhd->disable_dtim_in_suspend) {
5072 DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__));
5073 bcn_li_dtim = 0;
5074 goto exit;
5075 }
5076
5077 /* Check if associated */
5078 if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
5079 DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
5080 goto exit;
5081 }
5082
5083 /* read associated AP beacon interval */
5084 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BCNPRD,
5085 &ap_beacon, sizeof(ap_beacon), FALSE, 0)) < 0) {
5086 DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__, ret));
5087 goto exit;
5088 }
5089
5090 /* read associated ap's dtim setup */
5091 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD,
5092 &dtim_period, sizeof(dtim_period), FALSE, 0)) < 0) {
5093 DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
5094 goto exit;
5095 }
5096
5097 /* if not assocated just exit */
5098 if (dtim_period == 0) {
5099 goto exit;
5100 }
5101
5102 if (dhd->max_dtim_enable) {
5103 bcn_li_dtim =
5104 (int) (MAX_DTIM_ALLOWED_INTERVAL / (ap_beacon * dtim_period));
5105 if (bcn_li_dtim == 0) {
5106 bcn_li_dtim = 1;
5107 }
5108 } else {
5109 /* attemp to use platform defined dtim skip interval */
5110 bcn_li_dtim = dhd->suspend_bcn_li_dtim;
5111
5112 /* check if sta listen interval fits into AP dtim */
5113 if (dtim_period > CUSTOM_LISTEN_INTERVAL) {
5114 /* AP DTIM to big for our Listen Interval : no dtim skiping */
5115 bcn_li_dtim = NO_DTIM_SKIP;
5116 DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
5117 __FUNCTION__, dtim_period, CUSTOM_LISTEN_INTERVAL));
5118 goto exit;
5119 }
5120
5121 if ((dtim_period * ap_beacon * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) {
5122 allowed_skip_dtim_cnt =
5123 MAX_DTIM_ALLOWED_INTERVAL / (dtim_period * ap_beacon);
5124 bcn_li_dtim =
5125 (allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP;
5126 }
5127
5128 if ((bcn_li_dtim * dtim_period) > CUSTOM_LISTEN_INTERVAL) {
5129 /* Round up dtim_skip to fit into STAs Listen Interval */
5130 bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / dtim_period);
5131 DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
5132 }
5133 }
5134
5135 DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n",
5136 __FUNCTION__, ap_beacon, bcn_li_dtim, dtim_period, CUSTOM_LISTEN_INTERVAL));
5137
5138 exit:
5139 return bcn_li_dtim;
5140 }
5141 #endif /* OEM_ANDROID && BCMPCIE */
5142
5143 #ifdef CONFIG_SILENT_ROAM
5144 int
dhd_sroam_set_mon(dhd_pub_t * dhd,bool set)5145 dhd_sroam_set_mon(dhd_pub_t *dhd, bool set)
5146 {
5147 int ret = BCME_OK;
5148 wlc_sroam_t *psroam;
5149 wlc_sroam_info_t *sroam;
5150 uint sroamlen = sizeof(*sroam) + SROAM_HDRLEN;
5151
5152 /* Check if associated */
5153 if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
5154 DHD_TRACE(("%s NOT assoc\n", __FUNCTION__));
5155 return ret;
5156 }
5157
5158 if (set && (dhd->op_mode &
5159 (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
5160 DHD_INFO((" Failed to set sroam %d, op_mode 0x%04x\n", set, dhd->op_mode));
5161 return ret;
5162 }
5163
5164 if (!dhd->sroam_turn_on) {
5165 DHD_INFO((" Failed to set sroam %d, sroam turn %d\n", set, dhd->sroam_turn_on));
5166 return ret;
5167 }
5168 psroam = (wlc_sroam_t *)MALLOCZ(dhd->osh, sroamlen);
5169 if (!psroam) {
5170 DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
5171 return BCME_NOMEM;
5172 }
5173
5174 ret = dhd_iovar(dhd, 0, "sroam", NULL, 0, (char *)psroam, sroamlen, FALSE);
5175 if (ret < 0) {
5176 DHD_ERROR(("%s Failed to Get sroam %d\n", __FUNCTION__, ret));
5177 goto done;
5178 }
5179
5180 if (psroam->ver != WLC_SILENT_ROAM_CUR_VER) {
5181 ret = BCME_VERSION;
5182 goto done;
5183 }
5184
5185 sroam = (wlc_sroam_info_t *)psroam->data;
5186 sroam->sroam_on = set;
5187 DHD_INFO((" Silent roam monitor mode %s\n", set ? "On" : "Off"));
5188
5189 ret = dhd_iovar(dhd, 0, "sroam", (char *)psroam, sroamlen, NULL, 0, TRUE);
5190 if (ret < 0) {
5191 DHD_ERROR(("%s Failed to Set sroam %d\n", __FUNCTION__, ret));
5192 }
5193
5194 done:
5195 if (psroam) {
5196 MFREE(dhd->osh, psroam, sroamlen);
5197 }
5198 return ret;
5199
5200 }
5201 #endif /* CONFIG_SILENT_ROAM */
5202
5203 /* Check if the mode supports STA MODE */
dhd_support_sta_mode(dhd_pub_t * dhd)5204 bool dhd_support_sta_mode(dhd_pub_t *dhd)
5205 {
5206
5207 #ifdef WL_CFG80211
5208 if (!(dhd->op_mode & DHD_FLAG_STA_MODE))
5209 return FALSE;
5210 else
5211 #endif /* WL_CFG80211 */
5212 return TRUE;
5213 }
5214
5215 #if defined(KEEP_ALIVE)
dhd_keep_alive_onoff(dhd_pub_t * dhd)5216 int dhd_keep_alive_onoff(dhd_pub_t *dhd)
5217 {
5218 char buf[32] = {0};
5219 const char *str;
5220 wl_mkeep_alive_pkt_t mkeep_alive_pkt = {0, 0, 0, 0, 0, {0}};
5221 wl_mkeep_alive_pkt_t *mkeep_alive_pktp;
5222 int buf_len;
5223 int str_len;
5224 int res = -1;
5225
5226 if (!dhd_support_sta_mode(dhd))
5227 return res;
5228
5229 DHD_TRACE(("%s execution\n", __FUNCTION__));
5230
5231 str = "mkeep_alive";
5232 str_len = strlen(str);
5233 strncpy(buf, str, sizeof(buf) - 1);
5234 buf[ sizeof(buf) - 1 ] = '\0';
5235 mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1);
5236 mkeep_alive_pkt.period_msec = CUSTOM_KEEP_ALIVE_SETTING;
5237 buf_len = str_len + 1;
5238 mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
5239 mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
5240 /* Setup keep alive zero for null packet generation */
5241 mkeep_alive_pkt.keep_alive_id = 0;
5242 mkeep_alive_pkt.len_bytes = 0;
5243 buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
5244 bzero(mkeep_alive_pkt.data, sizeof(mkeep_alive_pkt.data));
5245 /* Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
5246 * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
5247 * guarantee that the buffer is properly aligned.
5248 */
5249 memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
5250
5251 res = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
5252
5253 return res;
5254 }
5255 #endif /* defined(KEEP_ALIVE) */
5256 #if defined(OEM_ANDROID)
5257 #define CSCAN_TLV_TYPE_SSID_IE 'S'
5258 /*
5259 * SSIDs list parsing from cscan tlv list
5260 */
5261 int
wl_parse_ssid_list_tlv(char ** list_str,wlc_ssid_ext_t * ssid,int max,int * bytes_left)5262 wl_parse_ssid_list_tlv(char** list_str, wlc_ssid_ext_t* ssid, int max, int *bytes_left)
5263 {
5264 char* str;
5265 int idx = 0;
5266 uint8 len;
5267
5268 if ((list_str == NULL) || (*list_str == NULL) || (*bytes_left < 0)) {
5269 DHD_ERROR(("%s error paramters\n", __FUNCTION__));
5270 return BCME_BADARG;
5271 }
5272 str = *list_str;
5273 while (*bytes_left > 0) {
5274 if (str[0] != CSCAN_TLV_TYPE_SSID_IE) {
5275 *list_str = str;
5276 DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
5277 return idx;
5278 }
5279
5280 if (idx >= max) {
5281 DHD_ERROR(("%s number of SSIDs more than %d\n", __FUNCTION__, idx));
5282 return BCME_BADARG;
5283 }
5284
5285 /* Get proper CSCAN_TLV_TYPE_SSID_IE */
5286 *bytes_left -= 1;
5287 if (*bytes_left == 0) {
5288 DHD_ERROR(("%s no length field.\n", __FUNCTION__));
5289 return BCME_BADARG;
5290 }
5291 str += 1;
5292 ssid[idx].rssi_thresh = 0;
5293 ssid[idx].flags = 0;
5294 len = str[0];
5295 if (len == 0) {
5296 /* Broadcast SSID */
5297 ssid[idx].SSID_len = 0;
5298 memset((char*)ssid[idx].SSID, 0x0, DOT11_MAX_SSID_LEN);
5299 *bytes_left -= 1;
5300 str += 1;
5301
5302 DHD_TRACE(("BROADCAST SCAN left=%d\n", *bytes_left));
5303 } else if (len <= DOT11_MAX_SSID_LEN) {
5304 /* Get proper SSID size */
5305 ssid[idx].SSID_len = len;
5306 *bytes_left -= 1;
5307 /* Get SSID */
5308 if (ssid[idx].SSID_len > *bytes_left) {
5309 DHD_ERROR(("%s out of memory range len=%d but left=%d\n",
5310 __FUNCTION__, ssid[idx].SSID_len, *bytes_left));
5311 return BCME_BADARG;
5312 }
5313 str += 1;
5314 memcpy((char*)ssid[idx].SSID, str, ssid[idx].SSID_len);
5315
5316 *bytes_left -= ssid[idx].SSID_len;
5317 str += ssid[idx].SSID_len;
5318 ssid[idx].hidden = TRUE;
5319
5320 DHD_TRACE(("%s :size=%d left=%d\n",
5321 (char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left));
5322 } else {
5323 DHD_ERROR(("### SSID size more than %d\n", str[0]));
5324 return BCME_BADARG;
5325 }
5326 idx++;
5327 }
5328
5329 *list_str = str;
5330 return idx;
5331 }
5332 /* Android ComboSCAN support */
5333
5334 /*
5335 * data parsing from ComboScan tlv list
5336 */
5337 int
wl_iw_parse_data_tlv(char ** list_str,void * dst,int dst_size,const char token,int input_size,int * bytes_left)5338 wl_iw_parse_data_tlv(char** list_str, void *dst, int dst_size, const char token,
5339 int input_size, int *bytes_left)
5340 {
5341 char* str;
5342 uint16 short_temp;
5343 uint32 int_temp;
5344
5345 if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
5346 DHD_ERROR(("%s error paramters\n", __FUNCTION__));
5347 return -1;
5348 }
5349 str = *list_str;
5350
5351 /* Clean all dest bytes */
5352 memset(dst, 0, dst_size);
5353 if (*bytes_left > 0) {
5354
5355 if (str[0] != token) {
5356 DHD_TRACE(("%s NOT Type=%d get=%d left_parse=%d \n",
5357 __FUNCTION__, token, str[0], *bytes_left));
5358 return -1;
5359 }
5360
5361 *bytes_left -= 1;
5362 str += 1;
5363
5364 if (input_size == 1) {
5365 memcpy(dst, str, input_size);
5366 }
5367 else if (input_size == 2) {
5368 memcpy(dst, (char *)htod16(memcpy(&short_temp, str, input_size)),
5369 input_size);
5370 }
5371 else if (input_size == 4) {
5372 memcpy(dst, (char *)htod32(memcpy(&int_temp, str, input_size)),
5373 input_size);
5374 }
5375
5376 *bytes_left -= input_size;
5377 str += input_size;
5378 *list_str = str;
5379 return 1;
5380 }
5381 return 1;
5382 }
5383
5384 /*
5385 * channel list parsing from cscan tlv list
5386 */
5387 int
wl_iw_parse_channel_list_tlv(char ** list_str,uint16 * channel_list,int channel_num,int * bytes_left)5388 wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list,
5389 int channel_num, int *bytes_left)
5390 {
5391 char* str;
5392 int idx = 0;
5393
5394 if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
5395 DHD_ERROR(("%s error paramters\n", __FUNCTION__));
5396 return -1;
5397 }
5398 str = *list_str;
5399
5400 while (*bytes_left > 0) {
5401
5402 if (str[0] != CSCAN_TLV_TYPE_CHANNEL_IE) {
5403 *list_str = str;
5404 DHD_TRACE(("End channel=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
5405 return idx;
5406 }
5407 /* Get proper CSCAN_TLV_TYPE_CHANNEL_IE */
5408 *bytes_left -= 1;
5409 str += 1;
5410
5411 if (str[0] == 0) {
5412 /* All channels */
5413 channel_list[idx] = 0x0;
5414 }
5415 else {
5416 channel_list[idx] = (uint16)str[0];
5417 DHD_TRACE(("%s channel=%d \n", __FUNCTION__, channel_list[idx]));
5418 }
5419 *bytes_left -= 1;
5420 str += 1;
5421
5422 if (idx++ > 255) {
5423 DHD_ERROR(("%s Too many channels \n", __FUNCTION__));
5424 return -1;
5425 }
5426 }
5427
5428 *list_str = str;
5429 return idx;
5430 }
5431
5432 /* Parse a comma-separated list from list_str into ssid array, starting
5433 * at index idx. Max specifies size of the ssid array. Parses ssids
5434 * and returns updated idx; if idx >= max not all fit, the excess have
5435 * not been copied. Returns -1 on empty string, or on ssid too long.
5436 */
5437 int
wl_iw_parse_ssid_list(char ** list_str,wlc_ssid_t * ssid,int idx,int max)5438 wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max)
5439 {
5440 char* str, *ptr;
5441
5442 if ((list_str == NULL) || (*list_str == NULL))
5443 return -1;
5444
5445 for (str = *list_str; str != NULL; str = ptr) {
5446
5447 /* check for next TAG */
5448 if (!strncmp(str, GET_CHANNEL, strlen(GET_CHANNEL))) {
5449 *list_str = str + strlen(GET_CHANNEL);
5450 return idx;
5451 }
5452
5453 if ((ptr = strchr(str, ',')) != NULL) {
5454 *ptr++ = '\0';
5455 }
5456
5457 if (strlen(str) > DOT11_MAX_SSID_LEN) {
5458 DHD_ERROR(("ssid <%s> exceeds %d\n", str, DOT11_MAX_SSID_LEN));
5459 return -1;
5460 }
5461
5462 if (strlen(str) == 0)
5463 ssid[idx].SSID_len = 0;
5464
5465 if (idx < max) {
5466 bzero(ssid[idx].SSID, sizeof(ssid[idx].SSID));
5467 strncpy((char*)ssid[idx].SSID, str, sizeof(ssid[idx].SSID) - 1);
5468 ssid[idx].SSID_len = strlen(str);
5469 }
5470 idx++;
5471 }
5472 return idx;
5473 }
5474
5475 /*
5476 * Parse channel list from iwpriv CSCAN
5477 */
5478 int
wl_iw_parse_channel_list(char ** list_str,uint16 * channel_list,int channel_num)5479 wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num)
5480 {
5481 int num;
5482 int val;
5483 char* str;
5484 char* endptr = NULL;
5485
5486 if ((list_str == NULL)||(*list_str == NULL))
5487 return -1;
5488
5489 str = *list_str;
5490 num = 0;
5491 while (strncmp(str, GET_NPROBE, strlen(GET_NPROBE))) {
5492 val = (int)strtoul(str, &endptr, 0);
5493 if (endptr == str) {
5494 printf("could not parse channel number starting at"
5495 " substring \"%s\" in list:\n%s\n",
5496 str, *list_str);
5497 return -1;
5498 }
5499 str = endptr + strspn(endptr, " ,");
5500
5501 if (num == channel_num) {
5502 DHD_ERROR(("too many channels (more than %d) in channel list:\n%s\n",
5503 channel_num, *list_str));
5504 return -1;
5505 }
5506
5507 channel_list[num++] = (uint16)val;
5508 }
5509 *list_str = str;
5510 return num;
5511 }
5512 #endif /* defined(OEM_ANDROID) */
5513
5514 /* Given filename and download type, returns a buffer pointer and length
5515 * for download to f/w. Type can be FW or NVRAM.
5516 *
5517 */
dhd_get_download_buffer(dhd_pub_t * dhd,char * file_path,download_type_t component,char ** buffer,int * length)5518 int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t component,
5519 char ** buffer, int *length)
5520
5521 {
5522 int ret = BCME_ERROR;
5523 int len = 0;
5524 int file_len;
5525 void *image = NULL;
5526 uint8 *buf = NULL;
5527
5528 /* Point to cache if available. */
5529 /* No Valid cache found on this call */
5530 if (!len) {
5531 file_len = *length;
5532 *length = 0;
5533
5534 if (file_path) {
5535 image = dhd_os_open_image1(dhd, file_path);
5536 if (image == NULL) {
5537 goto err;
5538 }
5539 }
5540
5541 buf = MALLOCZ(dhd->osh, file_len);
5542 if (buf == NULL) {
5543 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
5544 __FUNCTION__, file_len));
5545 goto err;
5546 }
5547
5548 /* Download image */
5549 len = dhd_os_get_image_block((char *)buf, file_len, image);
5550 if ((len <= 0 || len > file_len)) {
5551 MFREE(dhd->osh, buf, file_len);
5552 goto err;
5553 }
5554 }
5555
5556 ret = BCME_OK;
5557 *length = len;
5558 *buffer = (char *)buf;
5559
5560 /* Cache if first call. */
5561
5562 err:
5563 if (image)
5564 dhd_os_close_image1(dhd, image);
5565
5566 return ret;
5567 }
5568
5569 int
dhd_download_2_dongle(dhd_pub_t * dhd,char * iovar,uint16 flag,uint16 dload_type,unsigned char * dload_buf,int len)5570 dhd_download_2_dongle(dhd_pub_t *dhd, char *iovar, uint16 flag, uint16 dload_type,
5571 unsigned char *dload_buf, int len)
5572 {
5573 struct wl_dload_data *dload_ptr = (struct wl_dload_data *)dload_buf;
5574 int err = 0;
5575 int dload_data_offset;
5576 static char iovar_buf[WLC_IOCTL_MEDLEN];
5577 int iovar_len;
5578
5579 memset(iovar_buf, 0, sizeof(iovar_buf));
5580
5581 dload_data_offset = OFFSETOF(wl_dload_data_t, data);
5582 dload_ptr->flag = (DLOAD_HANDLER_VER << DLOAD_FLAG_VER_SHIFT) | flag;
5583 dload_ptr->dload_type = dload_type;
5584 dload_ptr->len = htod32(len - dload_data_offset);
5585 dload_ptr->crc = 0;
5586 len = ROUNDUP(len, 8);
5587
5588 iovar_len = bcm_mkiovar(iovar, (char *)dload_buf,
5589 (uint)len, iovar_buf, sizeof(iovar_buf));
5590 if (iovar_len == 0) {
5591 DHD_ERROR(("%s: insufficient buffer space passed to bcm_mkiovar for '%s' \n",
5592 __FUNCTION__, iovar));
5593 return BCME_BUFTOOSHORT;
5594 }
5595
5596 err = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovar_buf,
5597 iovar_len, IOV_SET, 0);
5598
5599 return err;
5600 }
5601
5602 int
dhd_download_blob(dhd_pub_t * dhd,unsigned char * buf,uint32 len,char * iovar)5603 dhd_download_blob(dhd_pub_t *dhd, unsigned char *buf,
5604 uint32 len, char *iovar)
5605
5606 {
5607 int chunk_len;
5608 int size2alloc;
5609 unsigned char *new_buf;
5610 int err = 0, data_offset;
5611 uint16 dl_flag = DL_BEGIN;
5612
5613 data_offset = OFFSETOF(wl_dload_data_t, data);
5614 size2alloc = data_offset + MAX_CHUNK_LEN;
5615 size2alloc = ROUNDUP(size2alloc, 8);
5616
5617 if ((new_buf = (unsigned char *)MALLOCZ(dhd->osh, size2alloc)) != NULL) {
5618 do {
5619 chunk_len = dhd_os_get_image_block((char *)(new_buf + data_offset),
5620 MAX_CHUNK_LEN, buf);
5621 if (chunk_len < 0) {
5622 DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n",
5623 __FUNCTION__, chunk_len));
5624 err = BCME_ERROR;
5625 goto exit;
5626 }
5627 if (len - chunk_len == 0)
5628 dl_flag |= DL_END;
5629
5630 err = dhd_download_2_dongle(dhd, iovar, dl_flag, DL_TYPE_CLM,
5631 new_buf, data_offset + chunk_len);
5632
5633 dl_flag &= ~DL_BEGIN;
5634
5635 len = len - chunk_len;
5636 } while ((len > 0) && (err == 0));
5637 } else {
5638 err = BCME_NOMEM;
5639 }
5640 exit:
5641 if (new_buf) {
5642 MFREE(dhd->osh, new_buf, size2alloc);
5643 }
5644 return err;
5645 }
5646
5647 int
dhd_apply_default_txcap(dhd_pub_t * dhd,char * path)5648 dhd_apply_default_txcap(dhd_pub_t *dhd, char *path)
5649 {
5650 return 0;
5651 }
5652
5653 int
dhd_check_current_clm_data(dhd_pub_t * dhd)5654 dhd_check_current_clm_data(dhd_pub_t *dhd)
5655 {
5656 char iovbuf[WLC_IOCTL_SMLEN];
5657 wl_country_t *cspec;
5658 int err = BCME_OK;
5659
5660 memset(iovbuf, 0, sizeof(iovbuf));
5661 err = bcm_mkiovar("country", NULL, 0, iovbuf, sizeof(iovbuf));
5662 if (err == 0) {
5663 err = BCME_BUFTOOSHORT;
5664 DHD_ERROR(("%s: bcm_mkiovar failed.", __FUNCTION__));
5665 return err;
5666 }
5667 err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
5668 if (err) {
5669 DHD_ERROR(("%s: country code get failed\n", __FUNCTION__));
5670 return err;
5671 }
5672 cspec = (wl_country_t *)iovbuf;
5673 if ((strncmp(cspec->ccode, WL_CCODE_NULL_COUNTRY, WLC_CNTRY_BUF_SZ)) == 0) {
5674 DHD_ERROR(("%s: ----- This FW is not included CLM data -----\n",
5675 __FUNCTION__));
5676 return FALSE;
5677 }
5678 DHD_ERROR(("%s: ----- This FW is included CLM data -----\n",
5679 __FUNCTION__));
5680 return TRUE;
5681 }
5682
5683 int
dhd_apply_default_clm(dhd_pub_t * dhd,char * clm_path)5684 dhd_apply_default_clm(dhd_pub_t *dhd, char *clm_path)
5685 {
5686 #ifdef DHD_WITHOUT_CLM_SUPPORT
5687 return BCME_OK;
5688 #else /* DHD_WITHOUT_CLM_SUPPORT */
5689 char *clm_blob_path;
5690 int len;
5691 char *memblock = NULL;
5692 int err = BCME_OK;
5693 char iovbuf[WLC_IOCTL_SMLEN];
5694 int status = FALSE;
5695
5696 if (clm_path[0] != '\0') {
5697 if (strlen(clm_path) > MOD_PARAM_PATHLEN) {
5698 DHD_ERROR(("clm path exceeds max len\n"));
5699 return BCME_ERROR;
5700 }
5701 clm_blob_path = clm_path;
5702 DHD_TRACE(("clm path from module param:%s\n", clm_path));
5703 } else {
5704 clm_blob_path = VENDOR_PATH CONFIG_BCMDHD_CLM_PATH;
5705 }
5706
5707 /* If CLM blob file is found on the filesystem, download the file.
5708 * After CLM file download or If the blob file is not present,
5709 * validate the country code before proceeding with the initialization.
5710 * If country code is not valid, fail the initialization.
5711 */
5712 memblock = dhd_os_open_image1(dhd, (char *)clm_blob_path);
5713 if (memblock == NULL) {
5714 #if defined(DHD_BLOB_EXISTENCE_CHECK)
5715 if (dhd->is_blob) {
5716 err = BCME_ERROR;
5717 } else {
5718 status = dhd_check_current_clm_data(dhd);
5719 if (status == TRUE) {
5720 err = BCME_OK;
5721 } else {
5722 err = status;
5723 }
5724 }
5725 #endif /* DHD_BLOB_EXISTENCE_CHECK */
5726 goto exit;
5727 }
5728
5729 len = dhd_os_get_image_size(memblock);
5730
5731 if ((len > 0) && (len < MAX_CLM_BUF_SIZE) && memblock) {
5732 status = dhd_check_current_clm_data(dhd);
5733 if (status == TRUE) {
5734 #if defined(DHD_BLOB_EXISTENCE_CHECK)
5735 if (dhd->op_mode != DHD_FLAG_MFG_MODE) {
5736 if (dhd->is_blob) {
5737 err = BCME_ERROR;
5738 }
5739 goto exit;
5740 }
5741 #else
5742 DHD_ERROR(("%s: CLM already exist in F/W, "
5743 "new CLM data will be added to the end of existing CLM data!\n",
5744 __FUNCTION__));
5745 #endif /* DHD_BLOB_EXISTENCE_CHECK */
5746 } else if (status != FALSE) {
5747 err = status;
5748 goto exit;
5749 }
5750
5751 /* Found blob file. Download the file */
5752 DHD_TRACE(("clm file download from %s \n", clm_blob_path));
5753 err = dhd_download_blob(dhd, (unsigned char*)memblock, len, "clmload");
5754 if (err) {
5755 DHD_ERROR(("%s: CLM download failed err=%d\n", __FUNCTION__, err));
5756 /* Retrieve clmload_status and print */
5757 memset(iovbuf, 0, sizeof(iovbuf));
5758 len = bcm_mkiovar("clmload_status", NULL, 0, iovbuf, sizeof(iovbuf));
5759 if (len == 0) {
5760 err = BCME_BUFTOOSHORT;
5761 goto exit;
5762 }
5763 err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
5764 if (err) {
5765 DHD_ERROR(("%s: clmload_status get failed err=%d \n",
5766 __FUNCTION__, err));
5767 } else {
5768 DHD_ERROR(("%s: clmload_status: %d \n",
5769 __FUNCTION__, *((int *)iovbuf)));
5770 if (*((int *)iovbuf) == CHIPID_MISMATCH) {
5771 DHD_ERROR(("Chip ID mismatch error \n"));
5772 }
5773 }
5774 err = BCME_ERROR;
5775 goto exit;
5776 } else {
5777 DHD_INFO(("%s: CLM download succeeded \n", __FUNCTION__));
5778 }
5779 } else {
5780 DHD_INFO(("Skipping the clm download. len:%d memblk:%p \n", len, memblock));
5781 }
5782
5783 /* Verify country code */
5784 status = dhd_check_current_clm_data(dhd);
5785
5786 if (status != TRUE) {
5787 /* Country code not initialized or CLM download not proper */
5788 DHD_ERROR(("country code not initialized\n"));
5789 err = status;
5790 }
5791 exit:
5792
5793 if (memblock) {
5794 dhd_os_close_image1(dhd, memblock);
5795 }
5796
5797 return err;
5798 #endif /* DHD_WITHOUT_CLM_SUPPORT */
5799 }
5800
dhd_free_download_buffer(dhd_pub_t * dhd,void * buffer,int length)5801 void dhd_free_download_buffer(dhd_pub_t *dhd, void *buffer, int length)
5802 {
5803 MFREE(dhd->osh, buffer, length);
5804 }
5805
5806 #ifdef SHOW_LOGTRACE
5807 int
dhd_parse_logstrs_file(osl_t * osh,char * raw_fmts,int logstrs_size,dhd_event_log_t * event_log)5808 dhd_parse_logstrs_file(osl_t *osh, char *raw_fmts, int logstrs_size,
5809 dhd_event_log_t *event_log)
5810 {
5811 uint32 *lognums = NULL;
5812 char *logstrs = NULL;
5813 logstr_trailer_t *trailer = NULL;
5814 int ram_index = 0;
5815 char **fmts = NULL;
5816 int num_fmts = 0;
5817 bool match_fail = TRUE;
5818 int32 i = 0;
5819 uint8 *pfw_id = NULL;
5820 uint32 fwid = 0;
5821 void *file = NULL;
5822 int file_len = 0;
5823 char fwid_str[FWID_STR_LEN];
5824 uint32 hdr_logstrs_size = 0;
5825
5826 /* Read last three words in the logstrs.bin file */
5827 trailer = (logstr_trailer_t *) (raw_fmts + logstrs_size -
5828 sizeof(logstr_trailer_t));
5829
5830 if (trailer->log_magic == LOGSTRS_MAGIC) {
5831 /*
5832 * logstrs.bin has a header.
5833 */
5834 if (trailer->version == 1) {
5835 logstr_header_v1_t *hdr_v1 = (logstr_header_v1_t *) (raw_fmts +
5836 logstrs_size - sizeof(logstr_header_v1_t));
5837 DHD_INFO(("%s: logstr header version = %u\n",
5838 __FUNCTION__, hdr_v1->version));
5839 num_fmts = hdr_v1->rom_logstrs_offset / sizeof(uint32);
5840 ram_index = (hdr_v1->ram_lognums_offset -
5841 hdr_v1->rom_lognums_offset) / sizeof(uint32);
5842 lognums = (uint32 *) &raw_fmts[hdr_v1->rom_lognums_offset];
5843 logstrs = (char *) &raw_fmts[hdr_v1->rom_logstrs_offset];
5844 hdr_logstrs_size = hdr_v1->logstrs_size;
5845 } else if (trailer->version == 2) {
5846 logstr_header_t *hdr = (logstr_header_t *) (raw_fmts + logstrs_size -
5847 sizeof(logstr_header_t));
5848 DHD_INFO(("%s: logstr header version = %u; flags = %x\n",
5849 __FUNCTION__, hdr->trailer.version, hdr->trailer.flags));
5850
5851 /* For ver. 2 of the header, need to match fwid of
5852 * both logstrs.bin and fw bin
5853 */
5854
5855 /* read the FWID from fw bin */
5856 file = dhd_os_open_image1(NULL, st_str_file_path);
5857 if (!file) {
5858 DHD_ERROR(("%s: cannot open fw file !\n", __FUNCTION__));
5859 goto error;
5860 }
5861 file_len = dhd_os_get_image_size(file);
5862 if (file_len <= 0) {
5863 DHD_ERROR(("%s: bad fw file length !\n", __FUNCTION__));
5864 goto error;
5865 }
5866 /* fwid is at the end of fw bin in string format */
5867 if (dhd_os_seek_file(file, file_len - (sizeof(fwid_str) - 1)) < 0) {
5868 DHD_ERROR(("%s: can't seek file \n", __FUNCTION__));
5869 goto error;
5870 }
5871
5872 memset(fwid_str, 0, sizeof(fwid_str));
5873 if (dhd_os_get_image_block(fwid_str, sizeof(fwid_str) - 1, file) <= 0) {
5874 DHD_ERROR(("%s: read fw file failed !\n", __FUNCTION__));
5875 goto error;
5876 }
5877 pfw_id = (uint8 *)bcmstrnstr(fwid_str, sizeof(fwid_str) - 1,
5878 FWID_STR_1, strlen(FWID_STR_1));
5879 if (!pfw_id) {
5880 pfw_id = (uint8 *)bcmstrnstr(fwid_str, sizeof(fwid_str) - 1,
5881 FWID_STR_2, strlen(FWID_STR_2));
5882 if (!pfw_id) {
5883 DHD_ERROR(("%s: could not find id in FW bin!\n",
5884 __FUNCTION__));
5885 goto error;
5886 }
5887 }
5888 /* search for the '-' in the fw id str, after which the
5889 * actual 4 byte fw id is present
5890 */
5891 while (pfw_id && *pfw_id != '-') {
5892 ++pfw_id;
5893 }
5894 ++pfw_id;
5895 fwid = bcm_strtoul((char *)pfw_id, NULL, 16);
5896
5897 /* check if fw id in logstrs.bin matches the fw one */
5898 if (hdr->trailer.fw_id != fwid) {
5899 DHD_ERROR(("%s: logstr id does not match FW!"
5900 "logstrs_fwid:0x%x, rtecdc_fwid:0x%x\n",
5901 __FUNCTION__, hdr->trailer.fw_id, fwid));
5902 goto error;
5903 }
5904
5905 match_fail = FALSE;
5906 num_fmts = hdr->rom_logstrs_offset / sizeof(uint32);
5907 ram_index = (hdr->ram_lognums_offset -
5908 hdr->rom_lognums_offset) / sizeof(uint32);
5909 lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset];
5910 logstrs = (char *) &raw_fmts[hdr->rom_logstrs_offset];
5911 hdr_logstrs_size = hdr->logstrs_size;
5912
5913 error:
5914 if (file) {
5915 dhd_os_close_image1(NULL, file);
5916 }
5917 if (match_fail) {
5918 return BCME_DECERR;
5919 }
5920 } else {
5921 DHD_ERROR(("%s: Invalid logstr version %u\n", __FUNCTION__,
5922 trailer->version));
5923 return BCME_ERROR;
5924 }
5925 if (logstrs_size != hdr_logstrs_size) {
5926 DHD_ERROR(("%s: bad logstrs_size %d\n", __FUNCTION__, hdr_logstrs_size));
5927 return BCME_ERROR;
5928 }
5929 } else {
5930 /*
5931 * Legacy logstrs.bin format without header.
5932 */
5933 num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32);
5934
5935 /* Legacy RAM-only logstrs.bin format:
5936 * - RAM 'lognums' section
5937 * - RAM 'logstrs' section.
5938 *
5939 * 'lognums' is an array of indexes for the strings in the
5940 * 'logstrs' section. The first uint32 is an index to the
5941 * start of 'logstrs'. Therefore, if this index is divided
5942 * by 'sizeof(uint32)' it provides the number of logstr
5943 * entries.
5944 */
5945 ram_index = 0;
5946 lognums = (uint32 *) raw_fmts;
5947 logstrs = (char *) &raw_fmts[num_fmts << 2];
5948 }
5949 if (num_fmts)
5950 fmts = MALLOC(osh, num_fmts * sizeof(char *));
5951 if (fmts == NULL) {
5952 DHD_ERROR(("%s: Failed to allocate fmts memory\n", __FUNCTION__));
5953 return BCME_ERROR;
5954 }
5955 event_log->fmts_size = num_fmts * sizeof(char *);
5956
5957 for (i = 0; i < num_fmts; i++) {
5958 /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
5959 * (they are 0-indexed relative to 'rom_logstrs_offset').
5960 *
5961 * RAM lognums are already indexed to point to the correct RAM logstrs (they
5962 * are 0-indexed relative to the start of the logstrs.bin file).
5963 */
5964 if (i == ram_index) {
5965 logstrs = raw_fmts;
5966 }
5967 fmts[i] = &logstrs[lognums[i]];
5968 }
5969 event_log->fmts = fmts;
5970 event_log->raw_fmts_size = logstrs_size;
5971 event_log->raw_fmts = raw_fmts;
5972 event_log->num_fmts = num_fmts;
5973 return BCME_OK;
5974 } /* dhd_parse_logstrs_file */
5975
dhd_parse_map_file(osl_t * osh,void * file,uint32 * ramstart,uint32 * rodata_start,uint32 * rodata_end)5976 int dhd_parse_map_file(osl_t *osh, void *file, uint32 *ramstart, uint32 *rodata_start,
5977 uint32 *rodata_end)
5978 {
5979 char *raw_fmts = NULL, *raw_fmts_loc = NULL;
5980 uint32 read_size = READ_NUM_BYTES;
5981 int error = 0;
5982 char * cptr = NULL;
5983 char c;
5984 uint8 count = 0;
5985
5986 *ramstart = 0;
5987 *rodata_start = 0;
5988 *rodata_end = 0;
5989
5990 /* Allocate 1 byte more than read_size to terminate it with NULL */
5991 raw_fmts = MALLOCZ(osh, read_size + 1);
5992 if (raw_fmts == NULL) {
5993 DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
5994 goto fail;
5995 }
5996
5997 /* read ram start, rodata_start and rodata_end values from map file */
5998 while (count != ALL_MAP_VAL)
5999 {
6000 error = dhd_os_read_file(file, raw_fmts, read_size);
6001 if (error < 0) {
6002 DHD_ERROR(("%s: map file read failed err:%d \n", __FUNCTION__,
6003 error));
6004 goto fail;
6005 }
6006
6007 /* End raw_fmts with NULL as strstr expects NULL terminated strings */
6008 raw_fmts[read_size] = '\0';
6009
6010 /* Get ramstart address */
6011 raw_fmts_loc = raw_fmts;
6012 if (!(count & RAMSTART_BIT) &&
6013 (cptr = bcmstrnstr(raw_fmts_loc, read_size, ramstart_str,
6014 strlen(ramstart_str)))) {
6015 cptr = cptr - BYTES_AHEAD_NUM;
6016 sscanf(cptr, "%x %c text_start", ramstart, &c);
6017 count |= RAMSTART_BIT;
6018 }
6019
6020 /* Get ram rodata start address */
6021 raw_fmts_loc = raw_fmts;
6022 if (!(count & RDSTART_BIT) &&
6023 (cptr = bcmstrnstr(raw_fmts_loc, read_size, rodata_start_str,
6024 strlen(rodata_start_str)))) {
6025 cptr = cptr - BYTES_AHEAD_NUM;
6026 sscanf(cptr, "%x %c rodata_start", rodata_start, &c);
6027 count |= RDSTART_BIT;
6028 }
6029
6030 /* Get ram rodata end address */
6031 raw_fmts_loc = raw_fmts;
6032 if (!(count & RDEND_BIT) &&
6033 (cptr = bcmstrnstr(raw_fmts_loc, read_size, rodata_end_str,
6034 strlen(rodata_end_str)))) {
6035 cptr = cptr - BYTES_AHEAD_NUM;
6036 sscanf(cptr, "%x %c rodata_end", rodata_end, &c);
6037 count |= RDEND_BIT;
6038 }
6039
6040 if (error < (int)read_size) {
6041 /*
6042 * since we reset file pos back to earlier pos by
6043 * GO_BACK_FILE_POS_NUM_BYTES bytes we won't reach EOF.
6044 * The reason for this is if string is spreaded across
6045 * bytes, the read function should not miss it.
6046 * So if ret value is less than read_size, reached EOF don't read further
6047 */
6048 break;
6049 }
6050 memset(raw_fmts, 0, read_size);
6051 /*
6052 * go back to predefined NUM of bytes so that we won't miss
6053 * the string and addr even if it comes as splited in next read.
6054 */
6055 dhd_os_seek_file(file, -GO_BACK_FILE_POS_NUM_BYTES);
6056 }
6057
6058 fail:
6059 if (raw_fmts) {
6060 MFREE(osh, raw_fmts, read_size + 1);
6061 raw_fmts = NULL;
6062 }
6063 if (count == ALL_MAP_VAL) {
6064 return BCME_OK;
6065 }
6066 else {
6067 DHD_ERROR(("%s: readmap error 0X%x \n", __FUNCTION__,
6068 count));
6069 return BCME_ERROR;
6070 }
6071
6072 } /* dhd_parse_map_file */
6073
6074 #ifdef PCIE_FULL_DONGLE
6075 int
dhd_event_logtrace_infobuf_pkt_process(dhd_pub_t * dhdp,void * pktbuf,dhd_event_log_t * event_data)6076 dhd_event_logtrace_infobuf_pkt_process(dhd_pub_t *dhdp, void *pktbuf,
6077 dhd_event_log_t *event_data)
6078 {
6079 uint32 infobuf_version;
6080 info_buf_payload_hdr_t *payload_hdr_ptr;
6081 uint16 payload_hdr_type;
6082 uint16 payload_hdr_length;
6083
6084 DHD_TRACE(("%s:Enter\n", __FUNCTION__));
6085
6086 if (PKTLEN(dhdp->osh, pktbuf) < sizeof(uint32)) {
6087 DHD_ERROR(("%s: infobuf too small for version field\n",
6088 __FUNCTION__));
6089 goto exit;
6090 }
6091 infobuf_version = *((uint32 *)PKTDATA(dhdp->osh, pktbuf));
6092 PKTPULL(dhdp->osh, pktbuf, sizeof(uint32));
6093 if (infobuf_version != PCIE_INFOBUF_V1) {
6094 DHD_ERROR(("%s: infobuf version %d is not PCIE_INFOBUF_V1\n",
6095 __FUNCTION__, infobuf_version));
6096 goto exit;
6097 }
6098
6099 /* Version 1 infobuf has a single type/length (and then value) field */
6100 if (PKTLEN(dhdp->osh, pktbuf) < sizeof(info_buf_payload_hdr_t)) {
6101 DHD_ERROR(("%s: infobuf too small for v1 type/length fields\n",
6102 __FUNCTION__));
6103 goto exit;
6104 }
6105 /* Process/parse the common info payload header (type/length) */
6106 payload_hdr_ptr = (info_buf_payload_hdr_t *)PKTDATA(dhdp->osh, pktbuf);
6107 payload_hdr_type = ltoh16(payload_hdr_ptr->type);
6108 payload_hdr_length = ltoh16(payload_hdr_ptr->length);
6109 if (payload_hdr_type != PCIE_INFOBUF_V1_TYPE_LOGTRACE) {
6110 DHD_ERROR(("%s: payload_hdr_type %d is not V1_TYPE_LOGTRACE\n",
6111 __FUNCTION__, payload_hdr_type));
6112 goto exit;
6113 }
6114 PKTPULL(dhdp->osh, pktbuf, sizeof(info_buf_payload_hdr_t));
6115
6116 /* Validate that the specified length isn't bigger than the
6117 * provided data.
6118 */
6119 if (payload_hdr_length > PKTLEN(dhdp->osh, pktbuf)) {
6120 DHD_ERROR(("%s: infobuf logtrace length is bigger"
6121 " than actual buffer data\n", __FUNCTION__));
6122 goto exit;
6123 }
6124 dhd_dbg_trace_evnt_handler(dhdp, PKTDATA(dhdp->osh, pktbuf),
6125 event_data, payload_hdr_length);
6126
6127 return BCME_OK;
6128
6129 exit:
6130 return BCME_ERROR;
6131 } /* dhd_event_logtrace_infobuf_pkt_process */
6132 #endif /* PCIE_FULL_DONGLE */
6133 #endif /* SHOW_LOGTRACE */
6134
6135 #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
6136
6137 /* To handle the TDLS event in the dhd_common.c
6138 */
dhd_tdls_event_handler(dhd_pub_t * dhd_pub,wl_event_msg_t * event)6139 int dhd_tdls_event_handler(dhd_pub_t *dhd_pub, wl_event_msg_t *event)
6140 {
6141 int ret = BCME_OK;
6142 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
6143 #pragma GCC diagnostic push
6144 #pragma GCC diagnostic ignored "-Wcast-qual"
6145 #endif // endif
6146 ret = dhd_tdls_update_peer_info(dhd_pub, event);
6147 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
6148 #pragma GCC diagnostic pop
6149 #endif // endif
6150 return ret;
6151 }
6152
dhd_free_tdls_peer_list(dhd_pub_t * dhd_pub)6153 int dhd_free_tdls_peer_list(dhd_pub_t *dhd_pub)
6154 {
6155 tdls_peer_node_t *cur = NULL, *prev = NULL;
6156 if (!dhd_pub)
6157 return BCME_ERROR;
6158 cur = dhd_pub->peer_tbl.node;
6159
6160 if ((dhd_pub->peer_tbl.node == NULL) && !dhd_pub->peer_tbl.tdls_peer_count)
6161 return BCME_ERROR;
6162
6163 while (cur != NULL) {
6164 prev = cur;
6165 cur = cur->next;
6166 MFREE(dhd_pub->osh, prev, sizeof(tdls_peer_node_t));
6167 }
6168 dhd_pub->peer_tbl.tdls_peer_count = 0;
6169 dhd_pub->peer_tbl.node = NULL;
6170 return BCME_OK;
6171 }
6172 #endif /* #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */
6173
6174 /* pretty hex print a contiguous buffer
6175 * based on the debug level specified
6176 */
6177 void
dhd_prhex(const char * msg,volatile uchar * buf,uint nbytes,uint8 dbg_level)6178 dhd_prhex(const char *msg, volatile uchar *buf, uint nbytes, uint8 dbg_level)
6179 {
6180 char line[128], *p;
6181 int len = sizeof(line);
6182 int nchar;
6183 uint i;
6184
6185 if (msg && (msg[0] != '\0')) {
6186 if (dbg_level == DHD_ERROR_VAL)
6187 DHD_ERROR(("%s:\n", msg));
6188 else if (dbg_level == DHD_INFO_VAL)
6189 DHD_INFO(("%s:\n", msg));
6190 else if (dbg_level == DHD_TRACE_VAL)
6191 DHD_TRACE(("%s:\n", msg));
6192 }
6193
6194 p = line;
6195 for (i = 0; i < nbytes; i++) {
6196 if (i % 16 == 0) {
6197 nchar = snprintf(p, len, " %04x: ", i); /* line prefix */
6198 p += nchar;
6199 len -= nchar;
6200 }
6201 if (len > 0) {
6202 nchar = snprintf(p, len, "%02x ", buf[i]);
6203 p += nchar;
6204 len -= nchar;
6205 }
6206
6207 if (i % 16 == 15) {
6208 /* flush line */
6209 if (dbg_level == DHD_ERROR_VAL)
6210 DHD_ERROR(("%s:\n", line));
6211 else if (dbg_level == DHD_INFO_VAL)
6212 DHD_INFO(("%s:\n", line));
6213 else if (dbg_level == DHD_TRACE_VAL)
6214 DHD_TRACE(("%s:\n", line));
6215 p = line;
6216 len = sizeof(line);
6217 }
6218 }
6219
6220 /* flush last partial line */
6221 if (p != line) {
6222 if (dbg_level == DHD_ERROR_VAL)
6223 DHD_ERROR(("%s:\n", line));
6224 else if (dbg_level == DHD_INFO_VAL)
6225 DHD_INFO(("%s:\n", line));
6226 else if (dbg_level == DHD_TRACE_VAL)
6227 DHD_TRACE(("%s:\n", line));
6228 }
6229 }
6230
6231 #ifndef OEM_ANDROID
6232 int
dhd_tput_test(dhd_pub_t * dhd,tput_test_t * tput_data)6233 dhd_tput_test(dhd_pub_t *dhd, tput_test_t *tput_data)
6234 {
6235 struct ether_header ether_hdr;
6236 tput_pkt_t tput_pkt;
6237 void *pkt = NULL;
6238 uint8 *pktdata = NULL;
6239 uint32 pktsize = 0;
6240 uint64 total_size = 0;
6241 uint32 *crc = 0;
6242 uint32 pktid = 0;
6243 uint32 total_num_tx_pkts = 0;
6244 int err = 0, err_exit = 0;
6245 uint32 i = 0;
6246 uint64 time_taken = 0;
6247 int max_txbufs = 0;
6248 uint32 n_batches = 0;
6249 uint32 n_remain = 0;
6250 uint8 tput_pkt_hdr_size = 0;
6251 bool batch_cnt = FALSE;
6252 bool tx_stop_pkt = FALSE;
6253
6254 if (tput_data->version != TPUT_TEST_T_VER ||
6255 tput_data->length != TPUT_TEST_T_LEN) {
6256 DHD_ERROR(("%s: wrong structure ver/len! \n", __FUNCTION__));
6257 err_exit = BCME_BADARG;
6258 goto exit_error;
6259 }
6260
6261 if (dhd->tput_data.tput_test_running) {
6262 DHD_ERROR(("%s: tput test already running ! \n", __FUNCTION__));
6263 err_exit = BCME_BUSY;
6264 goto exit_error;
6265 }
6266 #ifdef PCIE_FULL_DONGLE
6267 /*
6268 * 100 bytes to accommodate ether header and tput header. As of today
6269 * both occupy 30 bytes. Rest is reserved.
6270 */
6271 if ((tput_data->payload_size > TPUT_TEST_MAX_PAYLOAD) ||
6272 (tput_data->payload_size > (DHD_FLOWRING_RX_BUFPOST_PKTSZ - 100))) {
6273 DHD_ERROR(("%s: payload size is too large! max_payload=%u rx_bufpost_size=%u\n",
6274 __FUNCTION__, TPUT_TEST_MAX_PAYLOAD,
6275 (DHD_FLOWRING_RX_BUFPOST_PKTSZ - 100)));
6276 err_exit = BCME_BUFTOOLONG;
6277 goto exit_error;
6278 }
6279 #endif // endif
6280 max_txbufs = dhd_get_max_txbufs(dhd);
6281 max_txbufs = MIN(max_txbufs, DHD_TPUT_MAX_TX_PKTS_BATCH);
6282
6283 if (!(tput_data->num_pkts > 0)) {
6284 DHD_ERROR(("%s: invalid num_pkts: %d to tx\n",
6285 __FUNCTION__, tput_data->num_pkts));
6286 err_exit = BCME_ERROR;
6287 goto exit_error;
6288 }
6289
6290 memset(&dhd->tput_data, 0, sizeof(dhd->tput_data));
6291 memcpy(&dhd->tput_data, tput_data, sizeof(*tput_data));
6292 dhd->tput_data.pkts_bad = dhd->tput_data.pkts_good = 0;
6293 dhd->tput_data.pkts_cmpl = 0;
6294 dhd->tput_start_ts = dhd->tput_stop_ts = 0;
6295
6296 if (tput_data->flags & TPUT_TEST_USE_ETHERNET_HDR) {
6297 pktsize = sizeof(ether_hdr) + sizeof(tput_pkt_t) +
6298 (tput_data->payload_size - 12);
6299 } else {
6300 pktsize = sizeof(tput_pkt_t) +
6301 (tput_data->payload_size - 12);
6302 }
6303
6304 tput_pkt_hdr_size = (uint8)((uint8 *)&tput_pkt.crc32 -
6305 (uint8 *)&tput_pkt.mac_sta);
6306
6307 /* mark the tput test as started */
6308 dhd->tput_data.tput_test_running = TRUE;
6309
6310 if (tput_data->direction == TPUT_DIR_TX) {
6311 /* for ethernet header */
6312 memcpy(ether_hdr.ether_shost, tput_data->mac_sta, ETHER_ADDR_LEN);
6313 memcpy(ether_hdr.ether_dhost, tput_data->mac_ap, ETHER_ADDR_LEN);
6314 ether_hdr.ether_type = hton16(ETHER_TYPE_IP);
6315
6316 /* fill in the tput pkt */
6317 memset(&tput_pkt, 0, sizeof(tput_pkt));
6318 memcpy(tput_pkt.mac_ap, tput_data->mac_ap, ETHER_ADDR_LEN);
6319 memcpy(tput_pkt.mac_sta, tput_data->mac_sta, ETHER_ADDR_LEN);
6320 tput_pkt.pkt_type = hton16(TPUT_PKT_TYPE_NORMAL);
6321 tput_pkt.num_pkts = hton32(tput_data->num_pkts);
6322
6323 if (tput_data->num_pkts > (uint32)max_txbufs) {
6324 n_batches = tput_data->num_pkts / max_txbufs;
6325 n_remain = tput_data->num_pkts % max_txbufs;
6326 } else {
6327 n_batches = 0;
6328 n_remain = tput_data->num_pkts;
6329 }
6330 DHD_ERROR(("%s: num_pkts: %u n_batches: %u n_remain: %u\n",
6331 __FUNCTION__, tput_data->num_pkts, n_batches, n_remain));
6332
6333 do {
6334 /* reset before every batch */
6335 dhd->batch_tx_pkts_cmpl = 0;
6336 if (n_batches) {
6337 dhd->batch_tx_num_pkts = max_txbufs;
6338 --n_batches;
6339 } else if (n_remain) {
6340 dhd->batch_tx_num_pkts = n_remain;
6341 n_remain = 0;
6342 } else {
6343 DHD_ERROR(("Invalid. This should not hit\n"));
6344 }
6345
6346 dhd->tput_start_ts = OSL_SYSUPTIME_US();
6347 for (i = 0; (i < dhd->batch_tx_num_pkts) || (tx_stop_pkt); ++i) {
6348 pkt = PKTGET(dhd->osh, pktsize, TRUE);
6349 if (!pkt) {
6350 dhd->tput_data.tput_test_running = FALSE;
6351 DHD_ERROR(("%s: PKTGET fails ! Not enough Tx buffers\n",
6352 __FUNCTION__));
6353 DHD_ERROR(("%s: pkts_good:%u; pkts_bad:%u; pkts_cmpl:%u\n",
6354 __FUNCTION__, dhd->tput_data.pkts_good,
6355 dhd->tput_data.pkts_bad, dhd->tput_data.pkts_cmpl));
6356 err_exit = BCME_NOMEM;
6357 goto exit_error;
6358 }
6359 pktdata = PKTDATA(dhd->osh, pkt);
6360 PKTSETLEN(dhd->osh, pkt, pktsize);
6361 memset(pktdata, 0, pktsize);
6362 if (tput_data->flags & TPUT_TEST_USE_ETHERNET_HDR) {
6363 memcpy(pktdata, ðer_hdr, sizeof(ether_hdr));
6364 pktdata += sizeof(ether_hdr);
6365 }
6366 /* send stop pkt as last pkt */
6367 if (tx_stop_pkt) {
6368 tput_pkt.pkt_type = hton16(TPUT_PKT_TYPE_STOP);
6369 tx_stop_pkt = FALSE;
6370 } else
6371 tput_pkt.pkt_type = hton16(TPUT_PKT_TYPE_NORMAL);
6372 tput_pkt.pkt_id = hton32(pktid++);
6373 tput_pkt.crc32 = 0;
6374 memcpy(pktdata, &tput_pkt, sizeof(tput_pkt));
6375 /* compute crc32 over the pkt-id, num-pkts and data fields */
6376 crc = (uint32 *)(pktdata + tput_pkt_hdr_size);
6377 *crc = hton32(hndcrc32(pktdata + tput_pkt_hdr_size + 4,
6378 8 + (tput_data->payload_size - 12),
6379 CRC32_INIT_VALUE));
6380
6381 err = dhd_sendpkt(dhd, 0, pkt);
6382 if (err != BCME_OK) {
6383 DHD_INFO(("%s: send pkt (id = %u) fails (err = %d) ! \n",
6384 __FUNCTION__, pktid, err));
6385 dhd->tput_data.pkts_bad++;
6386 }
6387 total_num_tx_pkts++;
6388 if ((total_num_tx_pkts == tput_data->num_pkts) && (!tx_stop_pkt)) {
6389 tx_stop_pkt = TRUE;
6390 }
6391 }
6392 DHD_INFO(("%s: TX done, wait for completion...\n", __FUNCTION__));
6393 if (!dhd_os_tput_test_wait(dhd, NULL,
6394 TPUT_TEST_WAIT_TIMEOUT_DEFAULT)) {
6395 dhd->tput_stop_ts = OSL_SYSUPTIME_US();
6396 dhd->tput_data.tput_test_running = FALSE;
6397 DHD_ERROR(("%s: TX completion timeout !"
6398 " Total Tx pkts (including STOP) = %u; pkts cmpl = %u; \n",
6399 __FUNCTION__, total_num_tx_pkts, dhd->batch_tx_pkts_cmpl));
6400 err_exit = BCME_ERROR;
6401 goto exit_error;
6402 }
6403 if (dhd->tput_start_ts && dhd->tput_stop_ts &&
6404 (dhd->tput_stop_ts > dhd->tput_start_ts)) {
6405 time_taken += dhd->tput_stop_ts - dhd->tput_start_ts;
6406 } else {
6407 dhd->tput_data.tput_test_running = FALSE;
6408 DHD_ERROR(("%s: bad timestamp while cal tx batch time\n",
6409 __FUNCTION__));
6410 err_exit = BCME_ERROR;
6411 goto exit_error;
6412 }
6413 if (n_batches || n_remain) {
6414 batch_cnt = TRUE;
6415 } else {
6416 batch_cnt = FALSE;
6417 }
6418 } while (batch_cnt);
6419 } else {
6420 /* TPUT_DIR_RX */
6421 DHD_INFO(("%s: waiting for RX completion... \n", __FUNCTION__));
6422 if (!dhd_os_tput_test_wait(dhd, NULL, tput_data->timeout_ms)) {
6423 DHD_ERROR(("%s: RX completion timeout ! \n", __FUNCTION__));
6424 dhd->tput_stop_ts = OSL_SYSUPTIME_US();
6425 }
6426 }
6427
6428 /* calculate the throughput in bits per sec */
6429 if ((dhd->tput_start_ts && dhd->tput_stop_ts &&
6430 (dhd->tput_stop_ts > dhd->tput_start_ts)) || (time_taken)) {
6431 if (!time_taken) {
6432 time_taken = dhd->tput_stop_ts - dhd->tput_start_ts;
6433 }
6434 time_taken = DIV_U64_BY_U32(time_taken, MSEC_PER_SEC); /* convert to ms */
6435 dhd->tput_data.time_ms = time_taken;
6436 if (time_taken) {
6437 total_size = (pktsize * dhd->tput_data.pkts_cmpl * 8);
6438 dhd->tput_data.tput_bps = DIV_U64_BY_U64(total_size, time_taken);
6439 /* convert from ms to seconds */
6440 dhd->tput_data.tput_bps = dhd->tput_data.tput_bps * MSEC_PER_SEC;
6441 }
6442 } else {
6443 DHD_ERROR(("%s: bad timestamp !\n", __FUNCTION__));
6444 }
6445 DHD_INFO(("%s: DONE. tput = %llu bps, time = %llu ms\n", __FUNCTION__,
6446 dhd->tput_data.tput_bps, dhd->tput_data.time_ms));
6447 memcpy(tput_data, &dhd->tput_data, sizeof(dhd->tput_data));
6448
6449 dhd->tput_data.tput_test_running = FALSE;
6450
6451 err_exit = BCME_OK;
6452
6453 exit_error:
6454 DHD_ERROR(("%s: pkts_good = %u; pkts_bad = %u; pkts_cmpl = %u\n",
6455 __FUNCTION__, dhd->tput_data.pkts_good,
6456 dhd->tput_data.pkts_bad, dhd->tput_data.pkts_cmpl));
6457
6458 return err_exit;
6459 }
6460
6461 void
dhd_tput_test_rx(dhd_pub_t * dhd,void * pkt)6462 dhd_tput_test_rx(dhd_pub_t *dhd, void *pkt)
6463 {
6464 uint8 *pktdata = NULL;
6465 tput_pkt_t *tput_pkt = NULL;
6466 uint32 crc = 0;
6467 uint8 tput_pkt_hdr_size = 0;
6468
6469 pktdata = PKTDATA(dhd->osh, pkt);
6470 if (dhd->tput_data.flags & TPUT_TEST_USE_ETHERNET_HDR)
6471 pktdata += sizeof(struct ether_header);
6472 tput_pkt = (tput_pkt_t *)pktdata;
6473
6474 /* record the timestamp of the first packet received */
6475 if (dhd->tput_data.pkts_cmpl == 0) {
6476 dhd->tput_start_ts = OSL_SYSUPTIME_US();
6477 }
6478
6479 if (ntoh16(tput_pkt->pkt_type) != TPUT_PKT_TYPE_STOP &&
6480 dhd->tput_data.pkts_cmpl <= dhd->tput_data.num_pkts) {
6481 dhd->tput_data.pkts_cmpl++;
6482 }
6483 /* drop rx packets received beyond the specified # */
6484 if (dhd->tput_data.pkts_cmpl > dhd->tput_data.num_pkts)
6485 return;
6486
6487 DHD_TRACE(("%s: Rx tput test pkt, id = %u ; type = %u\n", __FUNCTION__,
6488 ntoh32(tput_pkt->pkt_id), ntoh16(tput_pkt->pkt_type)));
6489
6490 /* discard if mac addr of AP/STA does not match the specified ones */
6491 if ((memcmp(tput_pkt->mac_ap, dhd->tput_data.mac_ap,
6492 ETHER_ADDR_LEN) != 0) ||
6493 (memcmp(tput_pkt->mac_sta, dhd->tput_data.mac_sta,
6494 ETHER_ADDR_LEN) != 0)) {
6495 dhd->tput_data.pkts_bad++;
6496 DHD_INFO(("%s: dropping tput pkt with id %u due to bad AP/STA mac !\n",
6497 __FUNCTION__, ntoh32(tput_pkt->pkt_id)));
6498 return;
6499 }
6500
6501 tput_pkt_hdr_size = (uint8)((uint8 *)&tput_pkt->crc32 -
6502 (uint8 *)&tput_pkt->mac_sta);
6503 pktdata += tput_pkt_hdr_size + 4;
6504 crc = hndcrc32(pktdata, 8 + (dhd->tput_data.payload_size - 12),
6505 CRC32_INIT_VALUE);
6506 if (crc != ntoh32(tput_pkt->crc32)) {
6507 DHD_INFO(("%s: dropping tput pkt with id %u due to bad CRC !\n",
6508 __FUNCTION__, ntoh32(tput_pkt->pkt_id)));
6509 dhd->tput_data.pkts_bad++;
6510 return;
6511 }
6512
6513 if (ntoh16(tput_pkt->pkt_type) != TPUT_PKT_TYPE_STOP)
6514 dhd->tput_data.pkts_good++;
6515
6516 /* if we have received the stop packet or all the # of pkts, we're done */
6517 if (ntoh16(tput_pkt->pkt_type) == TPUT_PKT_TYPE_STOP ||
6518 dhd->tput_data.pkts_cmpl == dhd->tput_data.num_pkts) {
6519 dhd->tput_stop_ts = OSL_SYSUPTIME_US();
6520 dhd_os_tput_test_wake(dhd);
6521 }
6522 }
6523 #endif /* OEM_ANDROID */
6524
6525 #ifdef DUMP_IOCTL_IOV_LIST
6526 void
dhd_iov_li_append(dhd_pub_t * dhd,dll_t * list_head,dll_t * node)6527 dhd_iov_li_append(dhd_pub_t *dhd, dll_t *list_head, dll_t *node)
6528 {
6529 dll_t *item;
6530 dhd_iov_li_t *iov_li;
6531 dhd->dump_iovlist_len++;
6532
6533 if (dhd->dump_iovlist_len == IOV_LIST_MAX_LEN+1) {
6534 item = dll_head_p(list_head);
6535 iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
6536 dll_delete(item);
6537 MFREE(dhd->osh, iov_li, sizeof(*iov_li));
6538 dhd->dump_iovlist_len--;
6539 }
6540 dll_append(list_head, node);
6541 }
6542
6543 void
dhd_iov_li_print(dll_t * list_head)6544 dhd_iov_li_print(dll_t *list_head)
6545 {
6546 dhd_iov_li_t *iov_li;
6547 dll_t *item, *next;
6548 uint8 index = 0;
6549 for (item = dll_head_p(list_head); !dll_end(list_head, item); item = next) {
6550 next = dll_next_p(item);
6551 iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
6552 DHD_ERROR(("%d:cmd_name = %s, cmd = %d.\n", ++index, iov_li->buff, iov_li->cmd));
6553 }
6554 }
6555
6556 void
dhd_iov_li_delete(dhd_pub_t * dhd,dll_t * list_head)6557 dhd_iov_li_delete(dhd_pub_t *dhd, dll_t *list_head)
6558 {
6559 dll_t *item;
6560 dhd_iov_li_t *iov_li;
6561 while (!(dll_empty(list_head))) {
6562 item = dll_head_p(list_head);
6563 iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
6564 dll_delete(item);
6565 MFREE(dhd->osh, iov_li, sizeof(*iov_li));
6566 }
6567 }
6568 #endif /* DUMP_IOCTL_IOV_LIST */
6569
6570 /* configuations of ecounters to be enabled by default in FW */
6571 static ecounters_cfg_t ecounters_cfg_tbl[] = {
6572 /* Global ecounters */
6573 {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_BUS_PCIE},
6574 // {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_TX_AMPDU_STATS},
6575 // {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_RX_AMPDU_STATS},
6576
6577 /* Slice specific ecounters */
6578 {ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x0, WL_SLICESTATS_XTLV_PERIODIC_STATE},
6579 {ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x1, WL_SLICESTATS_XTLV_PERIODIC_STATE},
6580 {ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x1, WL_IFSTATS_XTLV_WL_SLICE_BTCOEX},
6581
6582 /* Interface specific ecounters */
6583 {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_IF_PERIODIC_STATE},
6584 {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_GENERIC},
6585 {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_INFRA_SPECIFIC},
6586 {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_MGT_CNT},
6587
6588 /* secondary interface */
6589 };
6590
6591 static event_ecounters_cfg_t event_ecounters_cfg_tbl[] = {
6592 /* Interface specific event ecounters */
6593 {WLC_E_DEAUTH_IND, ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_IF_EVENT_STATS},
6594 };
6595
6596 /* Accepts an argument to -s, -g or -f and creates an XTLV */
6597 int
dhd_create_ecounters_params(dhd_pub_t * dhd,uint16 type,uint16 if_slice_idx,uint16 stats_rep,uint8 ** xtlv)6598 dhd_create_ecounters_params(dhd_pub_t *dhd, uint16 type, uint16 if_slice_idx,
6599 uint16 stats_rep, uint8 **xtlv)
6600 {
6601 uint8 *req_xtlv = NULL;
6602 ecounters_stats_types_report_req_t *req;
6603 bcm_xtlvbuf_t xtlvbuf, container_xtlvbuf;
6604 ecountersv2_xtlv_list_elt_t temp;
6605 uint16 xtlv_len = 0, total_len = 0;
6606 int rc = BCME_OK;
6607
6608 /* fill in the stat type XTLV. For now there is no explicit TLV for the stat type. */
6609 temp.id = stats_rep;
6610 temp.len = 0;
6611
6612 /* Hence len/data = 0/NULL */
6613 xtlv_len += temp.len + BCM_XTLV_HDR_SIZE;
6614
6615 /* Total length of the container */
6616 total_len = BCM_XTLV_HDR_SIZE +
6617 OFFSETOF(ecounters_stats_types_report_req_t, stats_types_req) + xtlv_len;
6618
6619 /* Now allocate a structure for the entire request */
6620 if ((req_xtlv = (uint8 *)MALLOCZ(dhd->osh, total_len)) == NULL) {
6621 rc = BCME_NOMEM;
6622 goto fail;
6623 }
6624
6625 /* container XTLV context */
6626 bcm_xtlv_buf_init(&container_xtlvbuf, (uint8 *)req_xtlv, total_len,
6627 BCM_XTLV_OPTION_ALIGN32);
6628
6629 /* Fill other XTLVs in the container. Leave space for XTLV headers */
6630 req = (ecounters_stats_types_report_req_t *)(req_xtlv + BCM_XTLV_HDR_SIZE);
6631 req->flags = type;
6632 if (type == ECOUNTERS_STATS_TYPES_FLAG_SLICE) {
6633 req->slice_mask = 0x1 << if_slice_idx;
6634 } else if (type == ECOUNTERS_STATS_TYPES_FLAG_IFACE) {
6635 req->if_index = if_slice_idx;
6636 }
6637
6638 /* Fill remaining XTLVs */
6639 bcm_xtlv_buf_init(&xtlvbuf, (uint8*) req->stats_types_req, xtlv_len,
6640 BCM_XTLV_OPTION_ALIGN32);
6641 if (bcm_xtlv_put_data(&xtlvbuf, temp.id, NULL, temp.len)) {
6642 DHD_ERROR(("Error creating XTLV for requested stats type = %d\n", temp.id));
6643 rc = BCME_ERROR;
6644 goto fail;
6645 }
6646
6647 /* fill the top level container and get done with the XTLV container */
6648 rc = bcm_xtlv_put_data(&container_xtlvbuf, WL_ECOUNTERS_XTLV_REPORT_REQ, NULL,
6649 bcm_xtlv_buf_len(&xtlvbuf) + OFFSETOF(ecounters_stats_types_report_req_t,
6650 stats_types_req));
6651
6652 if (rc) {
6653 DHD_ERROR(("Error creating parent XTLV for type = %d\n", req->flags));
6654 goto fail;
6655 }
6656
6657 fail:
6658 if (rc && req_xtlv) {
6659 MFREE(dhd->osh, req_xtlv, total_len);
6660 req_xtlv = NULL;
6661 }
6662
6663 /* update the xtlv pointer */
6664 *xtlv = req_xtlv;
6665 return rc;
6666 }
6667
6668 int
dhd_get_preserve_log_numbers(dhd_pub_t * dhd,uint32 * logset_mask)6669 dhd_get_preserve_log_numbers(dhd_pub_t *dhd, uint32 *logset_mask)
6670 {
6671 wl_el_set_type_t logset_type, logset_op;
6672 int ret = BCME_ERROR;
6673 int i = 0, err = 0;
6674
6675 if (!dhd || !logset_mask)
6676 return BCME_BADARG;
6677
6678 *logset_mask = 0;
6679 memset(&logset_type, 0, sizeof(logset_type));
6680 memset(&logset_op, 0, sizeof(logset_op));
6681 logset_type.version = htod16(EVENT_LOG_SET_TYPE_CURRENT_VERSION);
6682 logset_type.len = htod16(sizeof(wl_el_set_type_t));
6683 for (i = 0; i < dhd->event_log_max_sets; i++) {
6684 logset_type.set = i;
6685 err = dhd_iovar(dhd, 0, "event_log_set_type", (char *)&logset_type,
6686 sizeof(logset_type), (char *)&logset_op, sizeof(logset_op), FALSE);
6687 /* the iovar may return 'unsupported' error if a log set number is not present
6688 * in the fw, so we should not return on error !
6689 */
6690 if (err == BCME_OK &&
6691 logset_op.type == EVENT_LOG_SET_TYPE_PRSRV) {
6692 *logset_mask |= 0x01u << i;
6693 ret = BCME_OK;
6694 DHD_ERROR(("[INIT] logset:%d is preserve/chatty\n", i));
6695 }
6696 }
6697
6698 return ret;
6699 }
6700
6701 static int
dhd_ecounter_autoconfig(dhd_pub_t * dhd)6702 dhd_ecounter_autoconfig(dhd_pub_t *dhd)
6703 {
6704 int rc = BCME_OK;
6705 uint32 buf;
6706 rc = dhd_iovar(dhd, 0, "ecounters_autoconfig", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
6707
6708 if (rc != BCME_OK) {
6709
6710 if (rc != BCME_UNSUPPORTED) {
6711 rc = BCME_OK;
6712 DHD_ERROR(("%s Ecounter autoconfig in fw failed : %d\n", __FUNCTION__, rc));
6713 } else {
6714 DHD_ERROR(("%s Ecounter autoconfig in FW not supported\n", __FUNCTION__));
6715 }
6716 }
6717
6718 return rc;
6719 }
6720
6721 int
dhd_ecounter_configure(dhd_pub_t * dhd,bool enable)6722 dhd_ecounter_configure(dhd_pub_t *dhd, bool enable)
6723 {
6724 int rc = BCME_OK;
6725 if (enable) {
6726 if (dhd_ecounter_autoconfig(dhd) != BCME_OK) {
6727 if ((rc = dhd_start_ecounters(dhd)) != BCME_OK) {
6728 DHD_ERROR(("%s Ecounters start failed\n", __FUNCTION__));
6729 } else if ((rc = dhd_start_event_ecounters(dhd)) != BCME_OK) {
6730 DHD_ERROR(("%s Event_Ecounters start failed\n", __FUNCTION__));
6731 }
6732 }
6733 } else {
6734 if ((rc = dhd_stop_ecounters(dhd)) != BCME_OK) {
6735 DHD_ERROR(("%s Ecounters stop failed\n", __FUNCTION__));
6736 } else if ((rc = dhd_stop_event_ecounters(dhd)) != BCME_OK) {
6737 DHD_ERROR(("%s Event_Ecounters stop failed\n", __FUNCTION__));
6738 }
6739 }
6740 return rc;
6741 }
6742
6743 int
dhd_start_ecounters(dhd_pub_t * dhd)6744 dhd_start_ecounters(dhd_pub_t *dhd)
6745 {
6746 uint8 i = 0;
6747 uint8 *start_ptr;
6748 int rc = BCME_OK;
6749 bcm_xtlv_t *elt;
6750 ecounters_config_request_v2_t *req = NULL;
6751 ecountersv2_processed_xtlv_list_elt *list_elt, *tail = NULL;
6752 ecountersv2_processed_xtlv_list_elt *processed_containers_list = NULL;
6753 uint16 total_processed_containers_len = 0;
6754
6755 for (i = 0; i < ARRAYSIZE(ecounters_cfg_tbl); i++) {
6756 ecounters_cfg_t *ecounter_stat = &ecounters_cfg_tbl[i];
6757
6758 if ((list_elt = (ecountersv2_processed_xtlv_list_elt *)
6759 MALLOCZ(dhd->osh, sizeof(*list_elt))) == NULL) {
6760 DHD_ERROR(("Ecounters v2: No memory to process\n"));
6761 goto fail;
6762 }
6763
6764 rc = dhd_create_ecounters_params(dhd, ecounter_stat->type,
6765 ecounter_stat->if_slice_idx, ecounter_stat->stats_rep, &list_elt->data);
6766
6767 if (rc) {
6768 DHD_ERROR(("Ecounters v2: Could not process: stat: %d return code: %d\n",
6769 ecounter_stat->stats_rep, rc));
6770
6771 /* Free allocated memory and go to fail to release any memories allocated
6772 * in previous iterations. Note that list_elt->data gets populated in
6773 * dhd_create_ecounters_params() and gets freed there itself.
6774 */
6775 MFREE(dhd->osh, list_elt, sizeof(*list_elt));
6776 list_elt = NULL;
6777 goto fail;
6778 }
6779 elt = (bcm_xtlv_t *) list_elt->data;
6780
6781 /* Put the elements in the order they are processed */
6782 if (processed_containers_list == NULL) {
6783 processed_containers_list = list_elt;
6784 } else {
6785 tail->next = list_elt;
6786 }
6787 tail = list_elt;
6788 /* Size of the XTLV returned */
6789 total_processed_containers_len += BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE;
6790 }
6791
6792 /* Now create ecounters config request with totallength */
6793 req = (ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req) +
6794 total_processed_containers_len);
6795
6796 if (req == NULL) {
6797 rc = BCME_NOMEM;
6798 goto fail;
6799 }
6800
6801 req->version = ECOUNTERS_VERSION_2;
6802 req->logset = EVENT_LOG_SET_ECOUNTERS;
6803 req->reporting_period = ECOUNTERS_DEFAULT_PERIOD;
6804 req->num_reports = ECOUNTERS_NUM_REPORTS;
6805 req->len = total_processed_containers_len +
6806 OFFSETOF(ecounters_config_request_v2_t, ecounters_xtlvs);
6807
6808 /* Copy config */
6809 start_ptr = req->ecounters_xtlvs;
6810
6811 /* Now go element by element in the list */
6812 while (processed_containers_list) {
6813 list_elt = processed_containers_list;
6814
6815 elt = (bcm_xtlv_t *)list_elt->data;
6816
6817 memcpy(start_ptr, list_elt->data, BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
6818 start_ptr += (size_t)(BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
6819 processed_containers_list = processed_containers_list->next;
6820
6821 /* Free allocated memories */
6822 MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE);
6823 MFREE(dhd->osh, list_elt, sizeof(*list_elt));
6824 }
6825
6826 if ((rc = dhd_iovar(dhd, 0, "ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) {
6827 DHD_ERROR(("failed to start ecounters\n"));
6828 }
6829
6830 fail:
6831 if (req) {
6832 MFREE(dhd->osh, req, sizeof(*req) + total_processed_containers_len);
6833 }
6834
6835 /* Now go element by element in the list */
6836 while (processed_containers_list) {
6837 list_elt = processed_containers_list;
6838 elt = (bcm_xtlv_t *)list_elt->data;
6839 processed_containers_list = processed_containers_list->next;
6840
6841 /* Free allocated memories */
6842 MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE);
6843 MFREE(dhd->osh, list_elt, sizeof(*list_elt));
6844 }
6845 return rc;
6846 }
6847
6848 int
dhd_stop_ecounters(dhd_pub_t * dhd)6849 dhd_stop_ecounters(dhd_pub_t *dhd)
6850 {
6851 int rc = BCME_OK;
6852 ecounters_config_request_v2_t *req;
6853
6854 /* Now create ecounters config request with totallength */
6855 req = (ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req));
6856
6857 if (req == NULL) {
6858 rc = BCME_NOMEM;
6859 goto fail;
6860 }
6861
6862 req->version = ECOUNTERS_VERSION_2;
6863 req->len = OFFSETOF(ecounters_config_request_v2_t, ecounters_xtlvs);
6864
6865 if ((rc = dhd_iovar(dhd, 0, "ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) {
6866 DHD_ERROR(("failed to stop ecounters\n"));
6867 }
6868
6869 fail:
6870 if (req) {
6871 MFREE(dhd->osh, req, sizeof(*req));
6872 }
6873 return rc;
6874 }
6875
6876 /* configured event_id_array for event ecounters */
6877 typedef struct event_id_array {
6878 uint8 event_id;
6879 uint8 str_idx;
6880 } event_id_array_t;
6881
6882 /* get event id array only from event_ecounters_cfg_tbl[] */
__dhd_event_ecounters_get_event_id_array(event_id_array_t * event_array)6883 static inline int __dhd_event_ecounters_get_event_id_array(event_id_array_t *event_array)
6884 {
6885 uint8 i;
6886 uint8 idx = 0;
6887 int32 prev_evt_id = -1;
6888
6889 for (i = 0; i < (uint8)ARRAYSIZE(event_ecounters_cfg_tbl); i++) {
6890 if (prev_evt_id != event_ecounters_cfg_tbl[i].event_id) {
6891 if (prev_evt_id >= 0)
6892 idx++;
6893 event_array[idx].event_id = event_ecounters_cfg_tbl[i].event_id;
6894 event_array[idx].str_idx = i;
6895 }
6896 prev_evt_id = event_ecounters_cfg_tbl[i].event_id;
6897 }
6898 return idx;
6899 }
6900
6901 /* One event id has limit xtlv num to request based on wl_ifstats_xtlv_id * 2 interface */
6902 #define ECNTRS_MAX_XTLV_NUM (31 * 2)
6903
6904 int
dhd_start_event_ecounters(dhd_pub_t * dhd)6905 dhd_start_event_ecounters(dhd_pub_t *dhd)
6906 {
6907 uint8 i, j = 0;
6908 uint8 event_id_cnt = 0;
6909 uint16 processed_containers_len = 0;
6910 uint16 max_xtlv_len = 0;
6911 int rc = BCME_OK;
6912 uint8 *ptr;
6913 uint8 *data;
6914 event_id_array_t *id_array;
6915 bcm_xtlv_t *elt = NULL;
6916 event_ecounters_config_request_v2_t *req = NULL;
6917
6918 id_array = (event_id_array_t *)MALLOCZ(dhd->osh, sizeof(event_id_array_t) *
6919 ARRAYSIZE(event_ecounters_cfg_tbl));
6920
6921 if (id_array == NULL) {
6922 rc = BCME_NOMEM;
6923 goto fail;
6924 }
6925 event_id_cnt = __dhd_event_ecounters_get_event_id_array(id_array);
6926
6927 max_xtlv_len = ((BCM_XTLV_HDR_SIZE +
6928 OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs)) *
6929 ECNTRS_MAX_XTLV_NUM);
6930
6931 /* Now create ecounters config request with max allowed length */
6932 req = (event_ecounters_config_request_v2_t *)MALLOCZ(dhd->osh,
6933 sizeof(event_ecounters_config_request_v2_t *) + max_xtlv_len);
6934
6935 if (req == NULL) {
6936 rc = BCME_NOMEM;
6937 goto fail;
6938 }
6939
6940 for (i = 0; i <= event_id_cnt; i++) {
6941 /* req initialization by event id */
6942 req->version = ECOUNTERS_VERSION_2;
6943 req->logset = EVENT_LOG_SET_ECOUNTERS;
6944 req->event_id = id_array[i].event_id;
6945 req->flags = EVENT_ECOUNTERS_FLAGS_ADD;
6946 req->len = 0;
6947 processed_containers_len = 0;
6948
6949 /* Copy config */
6950 ptr = req->ecounters_xtlvs;
6951
6952 for (j = id_array[i].str_idx; j < (uint8)ARRAYSIZE(event_ecounters_cfg_tbl); j++) {
6953 event_ecounters_cfg_t *event_ecounter_stat = &event_ecounters_cfg_tbl[j];
6954 if (id_array[i].event_id != event_ecounter_stat->event_id)
6955 break;
6956
6957 rc = dhd_create_ecounters_params(dhd, event_ecounter_stat->type,
6958 event_ecounter_stat->if_slice_idx, event_ecounter_stat->stats_rep,
6959 &data);
6960
6961 if (rc) {
6962 DHD_ERROR(("%s: Could not process: stat: %d return code: %d\n",
6963 __FUNCTION__, event_ecounter_stat->stats_rep, rc));
6964 goto fail;
6965 }
6966
6967 elt = (bcm_xtlv_t *)data;
6968
6969 memcpy(ptr, elt, BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
6970 ptr += (size_t)(BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
6971 processed_containers_len += BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE;
6972
6973 /* Free allocated memories alloced by dhd_create_ecounters_params */
6974 MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE);
6975
6976 if (processed_containers_len > max_xtlv_len) {
6977 DHD_ERROR(("%s XTLV NUM IS OVERFLOWED THAN ALLOWED!!\n",
6978 __FUNCTION__));
6979 rc = BCME_BADLEN;
6980 goto fail;
6981 }
6982 }
6983
6984 req->len = processed_containers_len +
6985 OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs);
6986
6987 DHD_INFO(("%s req version %d logset %d event_id %d flags %d len %d\n",
6988 __FUNCTION__, req->version, req->logset, req->event_id,
6989 req->flags, req->len));
6990
6991 rc = dhd_iovar(dhd, 0, "event_ecounters", (char *)req, req->len, NULL, 0, TRUE);
6992
6993 if (rc < 0) {
6994 DHD_ERROR(("failed to start event_ecounters(event id %d) with rc %d\n",
6995 req->event_id, rc));
6996 goto fail;
6997 }
6998 }
6999
7000 fail:
7001 /* Free allocated memories */
7002 if (req) {
7003 MFREE(dhd->osh, req, sizeof(event_ecounters_config_request_v2_t *) + max_xtlv_len);
7004 }
7005 if (id_array) {
7006 MFREE(dhd->osh, id_array, sizeof(event_id_array_t) *
7007 ARRAYSIZE(event_ecounters_cfg_tbl));
7008 }
7009
7010 return rc;
7011 }
7012
7013 int
dhd_stop_event_ecounters(dhd_pub_t * dhd)7014 dhd_stop_event_ecounters(dhd_pub_t *dhd)
7015 {
7016 int rc = BCME_OK;
7017 event_ecounters_config_request_v2_t *req;
7018
7019 /* Now create ecounters config request with totallength */
7020 req = (event_ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req));
7021
7022 if (req == NULL) {
7023 rc = BCME_NOMEM;
7024 goto fail;
7025 }
7026
7027 req->version = ECOUNTERS_VERSION_2;
7028 req->flags = EVENT_ECOUNTERS_FLAGS_DEL_ALL;
7029 req->len = OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs);
7030
7031 if ((rc = dhd_iovar(dhd, 0, "event_ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) {
7032 DHD_ERROR(("failed to stop event_ecounters\n"));
7033 }
7034
7035 fail:
7036 if (req) {
7037 MFREE(dhd->osh, req, sizeof(*req));
7038 }
7039 return rc;
7040 }
7041
7042 #ifdef DHD_LOG_DUMP
7043 int
dhd_dump_debug_ring(dhd_pub_t * dhdp,void * ring_ptr,const void * user_buf,log_dump_section_hdr_t * sec_hdr,char * text_hdr,int buflen,uint32 sec_type)7044 dhd_dump_debug_ring(dhd_pub_t *dhdp, void *ring_ptr, const void *user_buf,
7045 log_dump_section_hdr_t *sec_hdr,
7046 char *text_hdr, int buflen, uint32 sec_type)
7047 {
7048 uint32 rlen = 0;
7049 uint32 data_len = 0;
7050 void *data = NULL;
7051 unsigned long flags = 0;
7052 int ret = 0;
7053 dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)ring_ptr;
7054 int pos = 0;
7055 int fpos_sechdr = 0;
7056
7057 if (!dhdp || !ring || !user_buf || !sec_hdr || !text_hdr) {
7058 return BCME_BADARG;
7059 }
7060 /* do not allow further writes to the ring
7061 * till we flush it
7062 */
7063 DHD_DBG_RING_LOCK(ring->lock, flags);
7064 ring->state = RING_SUSPEND;
7065 DHD_DBG_RING_UNLOCK(ring->lock, flags);
7066
7067 if (dhdp->concise_dbg_buf) {
7068 /* re-use concise debug buffer temporarily
7069 * to pull ring data, to write
7070 * record by record to file
7071 */
7072 data_len = CONCISE_DUMP_BUFLEN;
7073 data = dhdp->concise_dbg_buf;
7074 ret = dhd_export_debug_data(text_hdr, NULL, user_buf, strlen(text_hdr), &pos);
7075 /* write the section header now with zero length,
7076 * once the correct length is found out, update
7077 * it later
7078 */
7079 fpos_sechdr = pos;
7080 sec_hdr->type = sec_type;
7081 sec_hdr->length = 0;
7082 ret = dhd_export_debug_data((char *)sec_hdr, NULL, user_buf,
7083 sizeof(*sec_hdr), &pos);
7084 do {
7085 rlen = dhd_dbg_ring_pull_single(ring, data, data_len, TRUE);
7086 if (rlen > 0) {
7087 /* write the log */
7088 ret = dhd_export_debug_data(data, NULL, user_buf, rlen, &pos);
7089 }
7090 DHD_DBGIF(("%s: rlen : %d\n", __FUNCTION__, rlen));
7091 } while ((rlen > 0));
7092 /* now update the section header length in the file */
7093 /* Complete ring size is dumped by HAL, hence updating length to ring size */
7094 sec_hdr->length = ring->ring_size;
7095 ret = dhd_export_debug_data((char *)sec_hdr, NULL, user_buf,
7096 sizeof(*sec_hdr), &fpos_sechdr);
7097 } else {
7098 DHD_ERROR(("%s: No concise buffer available !\n", __FUNCTION__));
7099 }
7100 DHD_DBG_RING_LOCK(ring->lock, flags);
7101 ring->state = RING_ACTIVE;
7102 /* Resetting both read and write pointer,
7103 * since all items are read.
7104 */
7105 ring->rp = ring->wp = 0;
7106 DHD_DBG_RING_UNLOCK(ring->lock, flags);
7107
7108 return ret;
7109 }
7110
7111 int
dhd_log_dump_ring_to_file(dhd_pub_t * dhdp,void * ring_ptr,void * file,unsigned long * file_posn,log_dump_section_hdr_t * sec_hdr,char * text_hdr,uint32 sec_type)7112 dhd_log_dump_ring_to_file(dhd_pub_t *dhdp, void *ring_ptr, void *file,
7113 unsigned long *file_posn, log_dump_section_hdr_t *sec_hdr,
7114 char *text_hdr, uint32 sec_type)
7115 {
7116 uint32 rlen = 0;
7117 uint32 data_len = 0, total_len = 0;
7118 void *data = NULL;
7119 unsigned long fpos_sechdr = 0;
7120 unsigned long flags = 0;
7121 int ret = 0;
7122 dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)ring_ptr;
7123
7124 if (!dhdp || !ring || !file || !sec_hdr ||
7125 !file_posn || !text_hdr)
7126 return BCME_BADARG;
7127
7128 /* do not allow further writes to the ring
7129 * till we flush it
7130 */
7131 DHD_DBG_RING_LOCK(ring->lock, flags);
7132 ring->state = RING_SUSPEND;
7133 DHD_DBG_RING_UNLOCK(ring->lock, flags);
7134
7135 if (dhdp->concise_dbg_buf) {
7136 /* re-use concise debug buffer temporarily
7137 * to pull ring data, to write
7138 * record by record to file
7139 */
7140 data_len = CONCISE_DUMP_BUFLEN;
7141 data = dhdp->concise_dbg_buf;
7142 dhd_os_write_file_posn(file, file_posn, text_hdr,
7143 strlen(text_hdr));
7144 /* write the section header now with zero length,
7145 * once the correct length is found out, update
7146 * it later
7147 */
7148 dhd_init_sec_hdr(sec_hdr);
7149 fpos_sechdr = *file_posn;
7150 sec_hdr->type = sec_type;
7151 sec_hdr->length = 0;
7152 dhd_os_write_file_posn(file, file_posn, (char *)sec_hdr,
7153 sizeof(*sec_hdr));
7154 do {
7155 rlen = dhd_dbg_ring_pull_single(ring, data, data_len, TRUE);
7156 if (rlen > 0) {
7157 /* write the log */
7158 ret = dhd_os_write_file_posn(file, file_posn, data, rlen);
7159 if (ret < 0) {
7160 DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
7161 DHD_DBG_RING_LOCK(ring->lock, flags);
7162 ring->state = RING_ACTIVE;
7163 DHD_DBG_RING_UNLOCK(ring->lock, flags);
7164 return BCME_ERROR;
7165 }
7166 }
7167 total_len += rlen;
7168 } while (rlen > 0);
7169 /* now update the section header length in the file */
7170 sec_hdr->length = total_len;
7171 dhd_os_write_file_posn(file, &fpos_sechdr, (char *)sec_hdr, sizeof(*sec_hdr));
7172 } else {
7173 DHD_ERROR(("%s: No concise buffer available !\n", __FUNCTION__));
7174 }
7175
7176 DHD_DBG_RING_LOCK(ring->lock, flags);
7177 ring->state = RING_ACTIVE;
7178 /* Resetting both read and write pointer,
7179 * since all items are read.
7180 */
7181 ring->rp = ring->wp = 0;
7182 DHD_DBG_RING_UNLOCK(ring->lock, flags);
7183 return BCME_OK;
7184 }
7185
7186 /* logdump cookie */
7187 #define MAX_LOGUDMP_COOKIE_CNT 10u
7188 #define LOGDUMP_COOKIE_STR_LEN 50u
7189 int
dhd_logdump_cookie_init(dhd_pub_t * dhdp,uint8 * buf,uint32 buf_size)7190 dhd_logdump_cookie_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size)
7191 {
7192 uint32 ring_size;
7193
7194 if (!dhdp || !buf) {
7195 DHD_ERROR(("INVALID PTR: dhdp:%p buf:%p\n", dhdp, buf));
7196 return BCME_ERROR;
7197 }
7198
7199 ring_size = dhd_ring_get_hdr_size() + LOGDUMP_COOKIE_STR_LEN * MAX_LOGUDMP_COOKIE_CNT;
7200 if (buf_size < ring_size) {
7201 DHD_ERROR(("BUF SIZE IS TO SHORT: req:%d buf_size:%d\n",
7202 ring_size, buf_size));
7203 return BCME_ERROR;
7204 }
7205
7206 dhdp->logdump_cookie = dhd_ring_init(dhdp, buf, buf_size,
7207 LOGDUMP_COOKIE_STR_LEN, MAX_LOGUDMP_COOKIE_CNT,
7208 DHD_RING_TYPE_FIXED);
7209 if (!dhdp->logdump_cookie) {
7210 DHD_ERROR(("FAIL TO INIT COOKIE RING\n"));
7211 return BCME_ERROR;
7212 }
7213
7214 return BCME_OK;
7215 }
7216
7217 void
dhd_logdump_cookie_deinit(dhd_pub_t * dhdp)7218 dhd_logdump_cookie_deinit(dhd_pub_t *dhdp)
7219 {
7220 if (!dhdp) {
7221 return;
7222 }
7223 if (dhdp->logdump_cookie) {
7224 dhd_ring_deinit(dhdp, dhdp->logdump_cookie);
7225 }
7226
7227 return;
7228 }
7229
7230 void
dhd_logdump_cookie_save(dhd_pub_t * dhdp,char * cookie,char * type)7231 dhd_logdump_cookie_save(dhd_pub_t *dhdp, char *cookie, char *type)
7232 {
7233 char *ptr;
7234
7235 if (!dhdp || !cookie || !type || !dhdp->logdump_cookie) {
7236 DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p cookie=%p"
7237 " type = %p, cookie_cfg:%p\n", __FUNCTION__,
7238 dhdp, cookie, type, dhdp?dhdp->logdump_cookie: NULL));
7239 return;
7240 }
7241 ptr = (char *)dhd_ring_get_empty(dhdp->logdump_cookie);
7242 if (ptr == NULL) {
7243 DHD_ERROR(("%s : Skip to save due to locking\n", __FUNCTION__));
7244 return;
7245 }
7246 scnprintf(ptr, LOGDUMP_COOKIE_STR_LEN, "%s: %s\n", type, cookie);
7247 return;
7248 }
7249
7250 int
dhd_logdump_cookie_get(dhd_pub_t * dhdp,char * ret_cookie,uint32 buf_size)7251 dhd_logdump_cookie_get(dhd_pub_t *dhdp, char *ret_cookie, uint32 buf_size)
7252 {
7253 char *ptr;
7254
7255 if (!dhdp || !ret_cookie || !dhdp->logdump_cookie) {
7256 DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p"
7257 "cookie=%p cookie_cfg:%p\n", __FUNCTION__,
7258 dhdp, ret_cookie, dhdp?dhdp->logdump_cookie: NULL));
7259 return BCME_ERROR;
7260 }
7261 ptr = (char *)dhd_ring_get_first(dhdp->logdump_cookie);
7262 if (ptr == NULL) {
7263 DHD_ERROR(("%s : Skip to save due to locking\n", __FUNCTION__));
7264 return BCME_ERROR;
7265 }
7266 memcpy(ret_cookie, ptr, MIN(buf_size, strlen(ptr)));
7267 dhd_ring_free_first(dhdp->logdump_cookie);
7268 return BCME_OK;
7269 }
7270
7271 int
dhd_logdump_cookie_count(dhd_pub_t * dhdp)7272 dhd_logdump_cookie_count(dhd_pub_t *dhdp)
7273 {
7274 if (!dhdp || !dhdp->logdump_cookie) {
7275 DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p cookie=%p\n",
7276 __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie: NULL));
7277 return 0;
7278 }
7279 return dhd_ring_get_cur_size(dhdp->logdump_cookie);
7280 }
7281
7282 static inline int
__dhd_log_dump_cookie_to_file(dhd_pub_t * dhdp,void * fp,const void * user_buf,unsigned long * f_pos,char * buf,uint32 buf_size)7283 __dhd_log_dump_cookie_to_file(
7284 dhd_pub_t *dhdp, void *fp, const void *user_buf, unsigned long *f_pos,
7285 char *buf, uint32 buf_size)
7286 {
7287
7288 uint32 remain = buf_size;
7289 int ret = BCME_ERROR;
7290 char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
7291 log_dump_section_hdr_t sec_hdr;
7292 uint32 read_idx;
7293 uint32 write_idx;
7294
7295 read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
7296 write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
7297 while (dhd_logdump_cookie_count(dhdp) > 0) {
7298 memset(tmp_buf, 0, sizeof(tmp_buf));
7299 ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
7300 if (ret != BCME_OK) {
7301 return ret;
7302 }
7303 remain -= scnprintf(&buf[buf_size - remain], remain, "%s", tmp_buf);
7304 }
7305 dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
7306 dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
7307
7308 ret = dhd_export_debug_data(COOKIE_LOG_HDR, fp, user_buf, strlen(COOKIE_LOG_HDR), f_pos);
7309 if (ret < 0) {
7310 DHD_ERROR(("%s : Write file Error for cookie hdr\n", __FUNCTION__));
7311 return ret;
7312 }
7313 sec_hdr.magic = LOG_DUMP_MAGIC;
7314 sec_hdr.timestamp = local_clock();
7315 sec_hdr.type = LOG_DUMP_SECTION_COOKIE;
7316 sec_hdr.length = buf_size - remain;
7317
7318 ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), f_pos);
7319 if (ret < 0) {
7320 DHD_ERROR(("%s : Write file Error for section hdr\n", __FUNCTION__));
7321 return ret;
7322 }
7323
7324 ret = dhd_export_debug_data(buf, fp, user_buf, sec_hdr.length, f_pos);
7325 if (ret < 0) {
7326 DHD_ERROR(("%s : Write file Error for cookie data\n", __FUNCTION__));
7327 }
7328
7329 return ret;
7330 }
7331
7332 uint32
dhd_log_dump_cookie_len(dhd_pub_t * dhdp)7333 dhd_log_dump_cookie_len(dhd_pub_t *dhdp)
7334 {
7335 int len = 0;
7336 char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
7337 log_dump_section_hdr_t sec_hdr;
7338 char *buf = NULL;
7339 int ret = BCME_ERROR;
7340 uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
7341 uint32 read_idx;
7342 uint32 write_idx;
7343 uint32 remain;
7344
7345 remain = buf_size;
7346
7347 if (!dhdp || !dhdp->logdump_cookie) {
7348 DHD_ERROR(("%s At least one ptr is NULL "
7349 "dhdp = %p cookie %p\n",
7350 __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL));
7351 goto exit;
7352 }
7353
7354 buf = (char *)MALLOCZ(dhdp->osh, buf_size);
7355 if (!buf) {
7356 DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
7357 goto exit;
7358 }
7359
7360 read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
7361 write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
7362 while (dhd_logdump_cookie_count(dhdp) > 0) {
7363 memset(tmp_buf, 0, sizeof(tmp_buf));
7364 ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
7365 if (ret != BCME_OK) {
7366 goto exit;
7367 }
7368 remain -= (uint32)strlen(tmp_buf);
7369 }
7370 dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
7371 dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
7372 len += strlen(COOKIE_LOG_HDR);
7373 len += sizeof(sec_hdr);
7374 len += (buf_size - remain);
7375 exit:
7376 if (buf)
7377 MFREE(dhdp->osh, buf, buf_size);
7378 return len;
7379 }
7380
7381 int
dhd_log_dump_cookie(dhd_pub_t * dhdp,const void * user_buf)7382 dhd_log_dump_cookie(dhd_pub_t *dhdp, const void *user_buf)
7383 {
7384 int ret = BCME_ERROR;
7385 char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
7386 log_dump_section_hdr_t sec_hdr;
7387 char *buf = NULL;
7388 uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
7389 int pos = 0;
7390 uint32 read_idx;
7391 uint32 write_idx;
7392 uint32 remain;
7393
7394 remain = buf_size;
7395
7396 if (!dhdp || !dhdp->logdump_cookie) {
7397 DHD_ERROR(("%s At least one ptr is NULL "
7398 "dhdp = %p cookie %p\n",
7399 __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL));
7400 goto exit;
7401 }
7402
7403 buf = (char *)MALLOCZ(dhdp->osh, buf_size);
7404 if (!buf) {
7405 DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
7406 goto exit;
7407 }
7408
7409 read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
7410 write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
7411 while (dhd_logdump_cookie_count(dhdp) > 0) {
7412 memset(tmp_buf, 0, sizeof(tmp_buf));
7413 ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
7414 if (ret != BCME_OK) {
7415 goto exit;
7416 }
7417 remain -= scnprintf(&buf[buf_size - remain], remain, "%s", tmp_buf);
7418 }
7419 dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
7420 dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
7421 ret = dhd_export_debug_data(COOKIE_LOG_HDR, NULL, user_buf, strlen(COOKIE_LOG_HDR), &pos);
7422 sec_hdr.magic = LOG_DUMP_MAGIC;
7423 sec_hdr.timestamp = local_clock();
7424 sec_hdr.type = LOG_DUMP_SECTION_COOKIE;
7425 sec_hdr.length = buf_size - remain;
7426 ret = dhd_export_debug_data((char *)&sec_hdr, NULL, user_buf, sizeof(sec_hdr), &pos);
7427 ret = dhd_export_debug_data(buf, NULL, user_buf, sec_hdr.length, &pos);
7428 exit:
7429 if (buf)
7430 MFREE(dhdp->osh, buf, buf_size);
7431 return ret;
7432 }
7433
7434 int
dhd_log_dump_cookie_to_file(dhd_pub_t * dhdp,void * fp,const void * user_buf,unsigned long * f_pos)7435 dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp, const void *user_buf, unsigned long *f_pos)
7436 {
7437 char *buf;
7438 int ret = BCME_ERROR;
7439 uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
7440
7441 if (!dhdp || !dhdp->logdump_cookie || (!fp && !user_buf) || !f_pos) {
7442 DHD_ERROR(("%s At least one ptr is NULL "
7443 "dhdp = %p cookie %p fp = %p f_pos = %p\n",
7444 __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL, fp, f_pos));
7445 return ret;
7446 }
7447
7448 buf = (char *)MALLOCZ(dhdp->osh, buf_size);
7449 if (!buf) {
7450 DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
7451 return ret;
7452 }
7453 ret = __dhd_log_dump_cookie_to_file(dhdp, fp, user_buf, f_pos, buf, buf_size);
7454 MFREE(dhdp->osh, buf, buf_size);
7455
7456 return ret;
7457 }
7458
7459 #endif /* DHD_LOG_DUMP */
7460
7461 #ifdef DHD_LOG_DUMP
7462 #define DEBUG_DUMP_TRIGGER_INTERVAL_SEC 4
7463 void
dhd_log_dump_trigger(dhd_pub_t * dhdp,int subcmd)7464 dhd_log_dump_trigger(dhd_pub_t *dhdp, int subcmd)
7465 {
7466 #if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
7467 log_dump_type_t *flush_type;
7468 #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
7469 uint64 current_time_sec;
7470
7471 if (!dhdp) {
7472 DHD_ERROR(("dhdp is NULL !\n"));
7473 return;
7474 }
7475
7476 if (subcmd >= CMD_MAX || subcmd < CMD_DEFAULT) {
7477 DHD_ERROR(("%s : Invalid subcmd \n", __FUNCTION__));
7478 return;
7479 }
7480
7481 current_time_sec = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
7482
7483 DHD_ERROR(("%s: current_time_sec=%lld debug_dump_time_sec=%lld interval=%d\n",
7484 __FUNCTION__, current_time_sec, dhdp->debug_dump_time_sec,
7485 DEBUG_DUMP_TRIGGER_INTERVAL_SEC));
7486
7487 if ((current_time_sec - dhdp->debug_dump_time_sec) < DEBUG_DUMP_TRIGGER_INTERVAL_SEC) {
7488 DHD_ERROR(("%s : Last debug dump triggered(%lld) within %d seconds, so SKIP\n",
7489 __FUNCTION__, dhdp->debug_dump_time_sec, DEBUG_DUMP_TRIGGER_INTERVAL_SEC));
7490 return;
7491 }
7492
7493 clear_debug_dump_time(dhdp->debug_dump_time_str);
7494 #ifdef DHD_PCIE_RUNTIMEPM
7495 /* wake up RPM if SYSDUMP is triggered */
7496 dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
7497 #endif /* DHD_PCIE_RUNTIMEPM */
7498 /* */
7499 dhdp->debug_dump_subcmd = subcmd;
7500
7501 dhdp->debug_dump_time_sec = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
7502
7503 #if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
7504 /* flush_type is freed at do_dhd_log_dump function */
7505 flush_type = MALLOCZ(dhdp->osh, sizeof(log_dump_type_t));
7506 if (flush_type) {
7507 *flush_type = DLD_BUF_TYPE_ALL;
7508 dhd_schedule_log_dump(dhdp, flush_type);
7509 } else {
7510 DHD_ERROR(("%s Fail to malloc flush_type\n", __FUNCTION__));
7511 return;
7512 }
7513 #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
7514
7515 /* Inside dhd_mem_dump, event notification will be sent to HAL and
7516 * from other context DHD pushes memdump, debug_dump and pktlog dump
7517 * to HAL and HAL will write into file
7518 */
7519 #if (defined(BCMPCIE) || defined(BCMSDIO)) && defined(DHD_FW_COREDUMP)
7520 dhdp->memdump_type = DUMP_TYPE_BY_SYSDUMP;
7521 dhd_bus_mem_dump(dhdp);
7522 #endif /* BCMPCIE && DHD_FW_COREDUMP */
7523
7524 #if defined(DHD_PKT_LOGGING) && defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
7525 dhd_schedule_pktlog_dump(dhdp);
7526 #endif /* DHD_PKT_LOGGING && DHD_DUMP_FILE_WRITE_FROM_KERNEL */
7527 }
7528 #endif /* DHD_LOG_DUMP */
7529
7530 #ifdef EWP_EDL
7531 /* For now we are allocating memory for EDL ring using DMA_ALLOC_CONSISTENT
7532 * The reason being that, in hikey, if we try to DMA_MAP prealloced memory
7533 * it is failing with an 'out of space in SWIOTLB' error
7534 */
7535 int
dhd_edl_mem_init(dhd_pub_t * dhd)7536 dhd_edl_mem_init(dhd_pub_t *dhd)
7537 {
7538 int ret = 0;
7539
7540 memset(&dhd->edl_ring_mem, 0, sizeof(dhd->edl_ring_mem));
7541 ret = dhd_dma_buf_alloc(dhd, &dhd->edl_ring_mem, DHD_EDL_RING_SIZE);
7542 if (ret != BCME_OK) {
7543 DHD_ERROR(("%s: alloc of edl_ring_mem failed\n",
7544 __FUNCTION__));
7545 return BCME_ERROR;
7546 }
7547 return BCME_OK;
7548 }
7549
7550 /* NOTE:- that dhd_edl_mem_deinit need NOT be called explicitly, because the dma_buf
7551 * for EDL is freed during 'dhd_prot_detach_edl_rings' which is called during de-init.
7552 */
7553 void
dhd_edl_mem_deinit(dhd_pub_t * dhd)7554 dhd_edl_mem_deinit(dhd_pub_t *dhd)
7555 {
7556 if (dhd->edl_ring_mem.va != NULL)
7557 dhd_dma_buf_free(dhd, &dhd->edl_ring_mem);
7558 }
7559
7560 int
dhd_event_logtrace_process_edl(dhd_pub_t * dhdp,uint8 * data,void * evt_decode_data)7561 dhd_event_logtrace_process_edl(dhd_pub_t *dhdp, uint8 *data,
7562 void *evt_decode_data)
7563 {
7564 msg_hdr_edl_t *msg = NULL;
7565 cmn_msg_hdr_t *cmn_msg_hdr = NULL;
7566 uint8 *buf = NULL;
7567
7568 if (!data || !dhdp || !evt_decode_data) {
7569 DHD_ERROR(("%s: invalid args ! \n", __FUNCTION__));
7570 return BCME_ERROR;
7571 }
7572
7573 /* format of data in each work item in the EDL ring:
7574 * |cmn_msg_hdr_t |payload (var len)|cmn_msg_hdr_t|
7575 * payload = |infobuf_ver(u32)|info_buf_payload_hdr_t|msgtrace_hdr_t|<var len data>|
7576 */
7577 cmn_msg_hdr = (cmn_msg_hdr_t *)data;
7578 msg = (msg_hdr_edl_t *)(data + sizeof(cmn_msg_hdr_t));
7579 buf = (uint8 *)msg;
7580 /* validate the fields */
7581 if (ltoh32(msg->infobuf_ver) != PCIE_INFOBUF_V1) {
7582 DHD_ERROR(("%s: Skipping msg with invalid infobuf ver (0x%x)"
7583 " expected (0x%x)\n", __FUNCTION__,
7584 msg->infobuf_ver, PCIE_INFOBUF_V1));
7585 return BCME_VERSION;
7586 }
7587
7588 /* in EDL, the request_id field of cmn_msg_hdr is overloaded to carry payload length */
7589 if (sizeof(info_buf_payload_hdr_t) > cmn_msg_hdr->request_id) {
7590 DHD_ERROR(("%s: infobuf too small for v1 type/length fields\n",
7591 __FUNCTION__));
7592 return BCME_BUFTOOLONG;
7593 }
7594
7595 if (ltoh16(msg->pyld_hdr.type) != PCIE_INFOBUF_V1_TYPE_LOGTRACE) {
7596 DHD_ERROR(("%s: payload_hdr_type %d is not V1_TYPE_LOGTRACE\n",
7597 __FUNCTION__, ltoh16(msg->pyld_hdr.type)));
7598 return BCME_BADOPTION;
7599 }
7600
7601 if (ltoh16(msg->pyld_hdr.length) > cmn_msg_hdr->request_id) {
7602 DHD_ERROR(("%s: infobuf logtrace length %u is bigger"
7603 " than available buffer size %u\n", __FUNCTION__,
7604 ltoh16(msg->pyld_hdr.length), cmn_msg_hdr->request_id));
7605 return BCME_BADLEN;
7606 }
7607
7608 /* dhd_dbg_trace_evnt_handler expects the data to start from msgtrace_hdr_t */
7609 buf += sizeof(msg->infobuf_ver) + sizeof(msg->pyld_hdr);
7610 dhd_dbg_trace_evnt_handler(dhdp, buf, evt_decode_data,
7611 ltoh16(msg->pyld_hdr.length));
7612
7613 /* check 'dhdp->logtrace_pkt_sendup' and if true alloc an skb
7614 * copy the event data to the skb and send it up the stack
7615 */
7616 #ifdef BCMPCIE
7617 if (dhdp->logtrace_pkt_sendup) {
7618 DHD_INFO(("%s: send up event log, len %u bytes\n", __FUNCTION__,
7619 (uint32)(ltoh16(msg->pyld_hdr.length) +
7620 sizeof(info_buf_payload_hdr_t) + 4)));
7621 dhd_sendup_info_buf(dhdp, (uint8 *)msg);
7622 }
7623 #endif /* BCMPCIE */
7624
7625 return BCME_OK;
7626 }
7627 #endif /* EWP_EDL */
7628
7629 #if defined(SHOW_LOGTRACE)
7630 int
dhd_print_fw_ver_from_file(dhd_pub_t * dhdp,char * fwpath)7631 dhd_print_fw_ver_from_file(dhd_pub_t *dhdp, char *fwpath)
7632 {
7633 void *file = NULL;
7634 int size = 0;
7635 char buf[FW_VER_STR_LEN];
7636 char *str = NULL;
7637 int ret = BCME_OK;
7638
7639 if (!fwpath)
7640 return BCME_BADARG;
7641
7642 file = dhd_os_open_image1(dhdp, fwpath);
7643 if (!file) {
7644 ret = BCME_ERROR;
7645 goto exit;
7646 }
7647 size = dhd_os_get_image_size(file);
7648 if (!size) {
7649 ret = BCME_ERROR;
7650 goto exit;
7651 }
7652
7653 /* seek to the last 'X' bytes in the file */
7654 if (dhd_os_seek_file(file, size - FW_VER_STR_LEN) != BCME_OK) {
7655 ret = BCME_ERROR;
7656 goto exit;
7657 }
7658
7659 /* read the last 'X' bytes of the file to a buffer */
7660 memset(buf, 0, FW_VER_STR_LEN);
7661 if (dhd_os_get_image_block(buf, FW_VER_STR_LEN - 1, file) < 0) {
7662 ret = BCME_ERROR;
7663 goto exit;
7664 }
7665 /* search for 'Version' in the buffer */
7666 str = bcmstrnstr(buf, FW_VER_STR_LEN, FW_VER_STR, strlen(FW_VER_STR));
7667 if (!str) {
7668 ret = BCME_ERROR;
7669 goto exit;
7670 }
7671 /* go back in the buffer to the last ascii character */
7672 while (str != buf &&
7673 (*str >= ' ' && *str <= '~')) {
7674 --str;
7675 }
7676 /* reverse the final decrement, so that str is pointing
7677 * to the first ascii character in the buffer
7678 */
7679 ++str;
7680
7681 if (strlen(str) > (FW_VER_STR_LEN - 1)) {
7682 ret = BCME_BADLEN;
7683 goto exit;
7684 }
7685
7686 DHD_ERROR(("FW version in file '%s': %s\n", fwpath, str));
7687 /* copy to global variable, so that in case FW load fails, the
7688 * core capture logs will contain FW version read from the file
7689 */
7690 memset(fw_version, 0, FW_VER_STR_LEN);
7691 strlcpy(fw_version, str, FW_VER_STR_LEN);
7692
7693 exit:
7694 if (file)
7695 dhd_os_close_image1(dhdp, file);
7696
7697 return ret;
7698 }
7699 #endif // endif
7700
7701 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
7702
7703 /* Ignore compiler warnings due to -Werror=cast-qual */
7704 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
7705 #pragma GCC diagnostic push
7706 #pragma GCC diagnostic ignored "-Wcast-qual"
7707 #endif // endif
7708
7709 static void
copy_hang_info_ioctl_timeout(dhd_pub_t * dhd,int ifidx,wl_ioctl_t * ioc)7710 copy_hang_info_ioctl_timeout(dhd_pub_t *dhd, int ifidx, wl_ioctl_t *ioc)
7711 {
7712 int remain_len;
7713 int i;
7714 int *cnt;
7715 char *dest;
7716 int bytes_written;
7717 uint32 ioc_dwlen = 0;
7718
7719 if (!dhd || !dhd->hang_info) {
7720 DHD_ERROR(("%s dhd=%p hang_info=%p\n",
7721 __FUNCTION__, dhd, (dhd ? dhd->hang_info : NULL)));
7722 return;
7723 }
7724
7725 cnt = &dhd->hang_info_cnt;
7726 dest = dhd->hang_info;
7727
7728 memset(dest, 0, VENDOR_SEND_HANG_EXT_INFO_LEN);
7729 (*cnt) = 0;
7730
7731 bytes_written = 0;
7732 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
7733
7734 get_debug_dump_time(dhd->debug_dump_time_hang_str);
7735 copy_debug_dump_time(dhd->debug_dump_time_str, dhd->debug_dump_time_hang_str);
7736
7737 bytes_written += scnprintf(&dest[bytes_written], remain_len, "%d %d %s %d %d %d %d %d %d ",
7738 HANG_REASON_IOCTL_RESP_TIMEOUT, VENDOR_SEND_HANG_EXT_INFO_VER,
7739 dhd->debug_dump_time_hang_str,
7740 ifidx, ioc->cmd, ioc->len, ioc->set, ioc->used, ioc->needed);
7741 (*cnt) = HANG_FIELD_IOCTL_RESP_TIMEOUT_CNT;
7742
7743 clear_debug_dump_time(dhd->debug_dump_time_hang_str);
7744
7745 /* Access ioc->buf only if the ioc->len is more than 4 bytes */
7746 ioc_dwlen = (uint32)(ioc->len / sizeof(uint32));
7747 if (ioc_dwlen > 0) {
7748 const uint32 *ioc_buf = (const uint32 *)ioc->buf;
7749
7750 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
7751 bytes_written += scnprintf(&dest[bytes_written], remain_len,
7752 "%08x", *(uint32 *)(ioc_buf++));
7753 (*cnt)++;
7754 if ((*cnt) >= HANG_FIELD_CNT_MAX) {
7755 return;
7756 }
7757
7758 for (i = 1; i < ioc_dwlen && *cnt <= HANG_FIELD_CNT_MAX;
7759 i++, (*cnt)++) {
7760 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
7761 bytes_written += scnprintf(&dest[bytes_written], remain_len, "%c%08x",
7762 HANG_RAW_DEL, *(uint32 *)(ioc_buf++));
7763 }
7764 }
7765
7766 DHD_INFO(("%s hang info len: %d data: %s\n",
7767 __FUNCTION__, (int)strlen(dhd->hang_info), dhd->hang_info));
7768 }
7769
7770 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
7771 #pragma GCC diagnostic pop
7772 #endif // endif
7773
7774 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
7775
7776 #if defined(DHD_H2D_LOG_TIME_SYNC)
7777 /*
7778 * Helper function:
7779 * Used for Dongle console message time syncing with Host printk
7780 */
dhd_h2d_log_time_sync(dhd_pub_t * dhd)7781 void dhd_h2d_log_time_sync(dhd_pub_t *dhd)
7782 {
7783 uint64 ts;
7784
7785 /*
7786 * local_clock() returns time in nano seconds.
7787 * Dongle understand only milli seconds time.
7788 */
7789 ts = local_clock();
7790 /* Nano seconds to milli seconds */
7791 do_div(ts, 1000000);
7792 if (dhd_wl_ioctl_set_intiovar(dhd, "rte_timesync", ts, WLC_SET_VAR, TRUE, 0)) {
7793 DHD_ERROR(("%s rte_timesync **** FAILED ****\n", __FUNCTION__));
7794 /* Stopping HOST Dongle console time syncing */
7795 dhd->dhd_rte_time_sync_ms = 0;
7796 }
7797 }
7798 #endif /* DHD_H2D_LOG_TIME_SYNC */
7799
7800 #if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
7801 int
dhd_control_he_enab(dhd_pub_t * dhd,uint8 he_enab)7802 dhd_control_he_enab(dhd_pub_t * dhd, uint8 he_enab)
7803 {
7804 int ret = BCME_OK;
7805 bcm_xtlv_t *pxtlv = NULL;
7806 uint8 mybuf[DHD_IOVAR_BUF_SIZE];
7807 uint16 mybuf_len = sizeof(mybuf);
7808 pxtlv = (bcm_xtlv_t *)mybuf;
7809
7810 ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, &mybuf_len, WL_HE_CMD_ENAB, sizeof(he_enab),
7811 &he_enab, BCM_XTLV_OPTION_ALIGN32);
7812
7813 if (ret != BCME_OK) {
7814 ret = -EINVAL;
7815 DHD_ERROR(("%s failed to pack he enab, err: %s\n", __FUNCTION__, bcmerrorstr(ret)));
7816 return ret;
7817 }
7818
7819 ret = dhd_iovar(dhd, 0, "he", (char *)&mybuf, sizeof(mybuf), NULL, 0, TRUE);
7820 if (ret < 0) {
7821 DHD_ERROR(("%s he_enab (%d) set failed, err: %s\n",
7822 __FUNCTION__, he_enab, bcmerrorstr(ret)));
7823 } else {
7824 DHD_ERROR(("%s he_enab (%d) set successed\n", __FUNCTION__, he_enab));
7825 }
7826
7827 return ret;
7828 }
7829 #endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */
7830