1 /*
2 * Broadcom Dongle Host Driver (DHD), common DHD core.
3 *
4 * Copyright (C) 2020, Broadcom.
5 *
6 * Unless you and Broadcom execute a separate written software license
7 * agreement governing use of this software, this software is licensed to you
8 * under the terms of the GNU General Public License version 2 (the "GPL"),
9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10 * following added to such license:
11 *
12 * As a special exception, the copyright holders of this software give you
13 * permission to link this software with independent modules, and to copy and
14 * distribute the resulting executable under terms of your choice, provided that
15 * you also meet, for each linked independent module, the terms and conditions of
16 * the license of that module. An independent module is a module which is not
17 * derived from this software. The special exception does not apply to any
18 * modifications of the software.
19 *
20 *
21 * <<Broadcom-WL-IPTag/Open:>>
22 *
23 * $Id$
24 */
25 #include <typedefs.h>
26 #include <osl.h>
27
28 #include <epivers.h>
29 #include <bcmutils.h>
30 #include <bcmstdlib_s.h>
31
32 #include <bcmendian.h>
33 #include <dngl_stats.h>
34 #include <dhd.h>
35 #include <dhd_ip.h>
36 #include <bcmevent.h>
37 #include <dhdioctl.h>
38 #ifdef DHD_SDTC_ETB_DUMP
39 #include <bcmiov.h>
40 #endif /* DHD_SDTC_ETB_DUMP */
41
42 #ifdef BCMDBG
43 #include <dhd_macdbg.h>
44 #endif /* BCMDBG */
45
46 #ifdef PCIE_FULL_DONGLE
47 #include <bcmmsgbuf.h>
48 #endif /* PCIE_FULL_DONGLE */
49
50 #ifdef SHOW_LOGTRACE
51 #include <event_log.h>
52 #endif /* SHOW_LOGTRACE */
53
54 #ifdef BCMPCIE
55 #include <dhd_flowring.h>
56 #endif
57
58 #include <dhd_bus.h>
59 #include <dhd_proto.h>
60 #include <bcmsdbus.h>
61 #include <dhd_dbg.h>
62 #include <802.1d.h>
63 #include <dhd_debug.h>
64 #include <dhd_dbg_ring.h>
65 #include <dhd_mschdbg.h>
66 #include <msgtrace.h>
67 #include <dhd_config.h>
68 #include <wl_android.h>
69
70 #ifdef WL_CFG80211
71 #include <wl_cfg80211.h>
72 #include <wl_cfgvif.h>
73 #endif
74 #if defined(OEM_ANDROID) && defined(PNO_SUPPORT)
75 #include <dhd_pno.h>
76 #endif /* (OEM_ANDROID) && (PNO_SUPPORT) */
77 #ifdef RTT_SUPPORT
78 #include <dhd_rtt.h>
79 #endif
80
81 #ifdef DNGL_EVENT_SUPPORT
82 #include <dnglevent.h>
83 #endif
84
85 #ifdef IL_BIGENDIAN
86 #include <bcmendian.h>
87 #define htod32(i) (bcmswap32(i))
88 #define htod16(i) (bcmswap16(i))
89 #define dtoh32(i) (bcmswap32(i))
90 #define dtoh16(i) (bcmswap16(i))
91 #define htodchanspec(i) htod16(i)
92 #define dtohchanspec(i) dtoh16(i)
93 #else
94 #define htod32(i) (i)
95 #define htod16(i) (i)
96 #define dtoh32(i) (i)
97 #define dtoh16(i) (i)
98 #define htodchanspec(i) (i)
99 #define dtohchanspec(i) (i)
100 #endif /* IL_BIGENDINA */
101
102 #ifdef PROP_TXSTATUS
103 #include <wlfc_proto.h>
104 #include <dhd_wlfc.h>
105 #endif
106
107 #if defined(__linux__)
108 #include <dhd_linux.h>
109 #endif /* __linux__ */
110
111 #ifdef DHD_WMF
112 #include <dhd_wmf_linux.h>
113 #endif /* DHD_WMF */
114
115 #ifdef DHD_L2_FILTER
116 #include <dhd_l2_filter.h>
117 #endif /* DHD_L2_FILTER */
118
119 #ifdef DHD_PSTA
120 #include <dhd_psta.h>
121 #endif /* DHD_PSTA */
122 #ifdef DHD_TIMESYNC
123 #include <dhd_timesync.h>
124 #endif /* DHD_TIMESYNC */
125
126 #ifdef DHD_WET
127 #include <dhd_wet.h>
128 #endif /* DHD_WET */
129 #if defined(NDIS)
130 #include <siutils.h>
131 #endif
132
133 #ifdef DHD_LOG_DUMP
134 #include <dhd_dbg.h>
135 #ifdef DHD_PKT_LOGGING
136 #include <dhd_pktlog.h>
137 #endif
138 #endif /* DHD_LOG_DUMP */
139
140 #ifdef DHD_LOG_PRINT_RATE_LIMIT
141 int log_print_threshold = 0;
142 #endif /* DHD_LOG_PRINT_RATE_LIMIT */
143
144 #ifdef DHD_DEBUGABILITY_LOG_DUMP_RING
145 int dbgring_msg_level = DHD_ERROR_VAL | DHD_FWLOG_VAL | DHD_INFO_VAL
146 | DHD_EVENT_VAL | DHD_PKT_MON_VAL | DHD_IOVAR_MEM_VAL;
147 int dhd_msg_level = DHD_ERROR_VAL;
148 #else
149 int dbgring_msg_level = 0;
150 /* For CUSTOMER_HW4/Hikey do not enable DHD_ERROR_MEM_VAL by default */
151 int dhd_msg_level = DHD_ERROR_VAL | DHD_FWLOG_VAL;
152 #endif /* DHD_DEBUGABILITY_LOG_DUMP_RING */
153
154 #ifdef NDIS
155 extern uint wl_msg_level;
156 #endif
157
158 #if defined(WL_WLC_SHIM)
159 #include <wl_shim.h>
160 #else
161 #if defined(NDIS)
162 #include <wl_port_if.h>
163 #endif
164 #endif /* WL_WLC_SHIM */
165
166 #ifdef DHD_DEBUG
167 #include <sdiovar.h>
168 #endif /* DHD_DEBUG */
169
170 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
171 #include <linux/pm_runtime.h>
172 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
173
174 #ifdef CSI_SUPPORT
175 #include <dhd_csi.h>
176 #endif /* CSI_SUPPORT */
177
178 #if defined(BTLOG) && !defined(BCMPCIE)
179 #error "BT logging supported only with PCIe"
180 #endif /* defined(BTLOG) && !defined(BCMPCIE) */
181
182 #ifdef SOFTAP
183 char fw_path2[MOD_PARAM_PATHLEN];
184 extern bool softap_enabled;
185 #endif
186 #ifdef PROP_TXSTATUS
187 extern int disable_proptx;
188 #endif /* PROP_TXSTATUS */
189
190 #ifdef REPORT_FATAL_TIMEOUTS
191 #ifdef BCMINTERNAL
192 /*
193 * Internal Builds are used by DVT.
194 * The timeouts are not required for DVT builds, since they use IOVARs like
195 * SROM programming etc, that takes long time. So make the timeout values
196 * as 0. If DVT needs to use this feature they can enable them using IOVAR
197 *
198 * SVT any way uses external builds
199 */
200 #define SCAN_TIMEOUT_DEFAULT 0
201 #define JOIN_TIMEOUT_DEFAULT 0
202 #define BUS_TIMEOUT_DEFAULT 0
203 #define CMD_TIMEOUT_DEFAULT 0
204 #else
205 /* Default timeout value in ms */
206 #ifdef DHD_EFI
207 #define BUS_TIMEOUT_DEFAULT 800 /* 800ms */
208 #define CMD_TIMEOUT_DEFAULT 1500 /* 1.5s */
209 #define SCAN_TIMEOUT_DEFAULT 0
210 #define JOIN_TIMEOUT_DEFAULT 0
211 #else
212 #define BUS_TIMEOUT_DEFAULT 800
213 #define CMD_TIMEOUT_DEFAULT 1200
214 #define SCAN_TIMEOUT_DEFAULT 17000
215 #define JOIN_TIMEOUT_DEFAULT 7500
216 #endif /* DHD_EFI */
217 #endif /* BCMINTERNAL */
218 #endif /* REPORT_FATAL_TIMEOUTS */
219
220 #ifdef SHOW_LOGTRACE
221 #define BYTES_AHEAD_NUM 10 /* address in map file is before these many bytes */
222 #define READ_NUM_BYTES 1000 /* read map file each time this No. of bytes */
223 #define GO_BACK_FILE_POS_NUM_BYTES 100 /* set file pos back to cur pos */
224 static char *ramstart_str = " text_start"; /* string in mapfile has addr ramstart */
225 static char *rodata_start_str = " rodata_start"; /* string in mapfile has addr rodata start */
226 static char *rodata_end_str = " rodata_end"; /* string in mapfile has addr rodata end */
227 #define RAMSTART_BIT 0x01
228 #define RDSTART_BIT 0x02
229 #define RDEND_BIT 0x04
230 #define ALL_MAP_VAL (RAMSTART_BIT | RDSTART_BIT | RDEND_BIT)
231 #endif /* SHOW_LOGTRACE */
232
233 #ifdef SHOW_LOGTRACE
234 #if defined(LINUX) || defined(linux)
235 /* the fw file path is taken from either the module parameter at
236 * insmod time or is defined as a constant of different values
237 * for different platforms
238 */
239 extern char *st_str_file_path;
240 #else
241 static char *st_str_file_path = "rtecdc.bin";
242 #endif /* LINUX */
243 #endif /* SHOW_LOGTRACE */
244
245 #ifdef EWP_EDL
246 typedef struct msg_hdr_edl {
247 uint32 infobuf_ver;
248 info_buf_payload_hdr_t pyld_hdr;
249 msgtrace_hdr_t trace_hdr;
250 } msg_hdr_edl_t;
251 #endif /* EWP_EDL */
252
253 #define DHD_TPUT_MAX_TX_PKTS_BATCH 1000
254
255 /* Last connection success/failure status */
256 uint32 dhd_conn_event;
257 uint32 dhd_conn_status;
258 uint32 dhd_conn_reason;
259
260 extern int dhd_iscan_request(void * dhdp, uint16 action);
261 extern void dhd_ind_scan_confirm(void *h, bool status);
262 extern int dhd_iscan_in_progress(void *h);
263 void dhd_iscan_lock(void);
264 void dhd_iscan_unlock(void);
265 extern int dhd_change_mtu(dhd_pub_t *dhd, int new_mtu, int ifidx);
266 #if defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P)
267 extern int dhd_get_concurrent_capabilites(dhd_pub_t *dhd);
268 #endif
269
270 extern int dhd_socram_dump(struct dhd_bus *bus);
271 extern void dhd_set_packet_filter(dhd_pub_t *dhd);
272
273 #ifdef DNGL_EVENT_SUPPORT
274 static void dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event,
275 bcm_dngl_event_msg_t *dngl_event, size_t pktlen);
276 static int dngl_host_event(dhd_pub_t *dhdp, void *pktdata, bcm_dngl_event_msg_t *dngl_event,
277 size_t pktlen);
278 #endif /* DNGL_EVENT_SUPPORT */
279
280 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
281 static void copy_hang_info_ioctl_timeout(dhd_pub_t *dhd, int ifidx, wl_ioctl_t *ioc);
282 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
283
284 #ifdef REPORT_FATAL_TIMEOUTS
285 static void dhd_set_join_error(dhd_pub_t *pub, uint32 mask);
286 #endif /* REPORT_FATAL_TIMEOUTS */
287
288 #ifdef DHD_SEND_HANG_IOCTL_SUSPEND_ERROR
289 #define MAX_IOCTL_SUSPEND_ERROR 10
290 static int ioctl_suspend_error = 0;
291 #endif /* DHD_SEND_HANG_IOCTL_SUSPEND_ERROR */
292
293 /* Should ideally read this from target(taken from wlu) */
294 #define MAX_CHUNK_LEN 1408 /* 8 * 8 * 22 */
295
296 #if defined(OEM_ANDROID)
297 /* note these variables will be used with wext */
298 bool ap_cfg_running = FALSE;
299 bool ap_fw_loaded = FALSE;
300 #endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
301
302 #ifdef WLEASYMESH
303 extern int dhd_set_1905_almac(dhd_pub_t *dhdp, uint8 ifidx, uint8* ea, bool mcast);
304 extern int dhd_get_1905_almac(dhd_pub_t *dhdp, uint8 ifidx, uint8* ea, bool mcast);
305 #endif /* WLEASYMESH */
306
307 #define CHIPID_MISMATCH 8
308
309 #define DHD_VERSION "Dongle Host Driver, version " EPI_VERSION_STR "\n"
310
311 #if defined(DHD_DEBUG) && defined(DHD_COMPILED)
312 const char dhd_version[] = DHD_VERSION DHD_COMPILED " compiled on "
313 __DATE__ " at " __TIME__ "\n\0<TIMESTAMP>";
314 #else
315 const char dhd_version[] = DHD_VERSION;
316 #endif /* DHD_DEBUG && DHD_COMPILED */
317
318 char fw_version[FW_VER_STR_LEN] = "\0";
319 char clm_version[CLM_VER_STR_LEN] = "\0";
320
321 char bus_api_revision[BUS_API_REV_STR_LEN] = "\0";
322
323 void dhd_set_timer(void *bus, uint wdtick);
324
325 #if defined(BCM_ROUTER_DHD)
326 static int traffic_mgmt_add_dwm_filter(dhd_pub_t *dhd,
327 trf_mgmt_filter_list_t * trf_mgmt_filter_list, int len);
328 #endif
329
330 static char* ioctl2str(uint32 ioctl);
331
332 /* IOVar table */
333 enum {
334 IOV_VERSION = 1,
335 IOV_WLMSGLEVEL,
336 IOV_MSGLEVEL,
337 IOV_BCMERRORSTR,
338 IOV_BCMERROR,
339 IOV_WDTICK,
340 IOV_DUMP,
341 IOV_CLEARCOUNTS,
342 IOV_LOGDUMP,
343 IOV_LOGCAL,
344 IOV_LOGSTAMP,
345 IOV_GPIOOB,
346 IOV_IOCTLTIMEOUT,
347 IOV_CONS,
348 IOV_DCONSOLE_POLL,
349 #if defined(DHD_DEBUG)
350 IOV_DHD_JOIN_TIMEOUT_DBG,
351 IOV_SCAN_TIMEOUT,
352 IOV_MEM_DEBUG,
353 #ifdef BCMPCIE
354 IOV_FLOW_RING_DEBUG,
355 #endif /* BCMPCIE */
356 #endif /* defined(DHD_DEBUG) */
357 #ifdef PROP_TXSTATUS
358 IOV_PROPTXSTATUS_ENABLE,
359 IOV_PROPTXSTATUS_MODE,
360 IOV_PROPTXSTATUS_OPT,
361 #ifdef QMONITOR
362 IOV_QMON_TIME_THRES,
363 IOV_QMON_TIME_PERCENT,
364 #endif /* QMONITOR */
365 IOV_PROPTXSTATUS_MODULE_IGNORE,
366 IOV_PROPTXSTATUS_CREDIT_IGNORE,
367 IOV_PROPTXSTATUS_TXSTATUS_IGNORE,
368 IOV_PROPTXSTATUS_RXPKT_CHK,
369 #endif /* PROP_TXSTATUS */
370 IOV_BUS_TYPE,
371 IOV_CHANGEMTU,
372 IOV_HOSTREORDER_FLOWS,
373 #ifdef DHDTCPACK_SUPPRESS
374 IOV_TCPACK_SUPPRESS,
375 #endif /* DHDTCPACK_SUPPRESS */
376 #ifdef DHD_WMF
377 IOV_WMF_BSS_ENAB,
378 IOV_WMF_UCAST_IGMP,
379 IOV_WMF_MCAST_DATA_SENDUP,
380 #ifdef WL_IGMP_UCQUERY
381 IOV_WMF_UCAST_IGMP_QUERY,
382 #endif /* WL_IGMP_UCQUERY */
383 #ifdef DHD_UCAST_UPNP
384 IOV_WMF_UCAST_UPNP,
385 #endif /* DHD_UCAST_UPNP */
386 IOV_WMF_PSTA_DISABLE,
387 #endif /* DHD_WMF */
388 #if defined(BCM_ROUTER_DHD)
389 IOV_TRAFFIC_MGMT_DWM,
390 #endif /* BCM_ROUTER_DHD */
391 IOV_AP_ISOLATE,
392 #ifdef DHD_L2_FILTER
393 IOV_DHCP_UNICAST,
394 IOV_BLOCK_PING,
395 IOV_PROXY_ARP,
396 IOV_GRAT_ARP,
397 IOV_BLOCK_TDLS,
398 #endif /* DHD_L2_FILTER */
399 IOV_DHD_IE,
400 #ifdef DHD_PSTA
401 IOV_PSTA,
402 #endif /* DHD_PSTA */
403 #ifdef DHD_WET
404 IOV_WET,
405 IOV_WET_HOST_IPV4,
406 IOV_WET_HOST_MAC,
407 #endif /* DHD_WET */
408 IOV_CFG80211_OPMODE,
409 IOV_ASSERT_TYPE,
410 #if defined(NDIS)
411 IOV_WAKEIND,
412 #endif /* NDIS */
413 #if !defined(NDIS) && !defined(BCM_ROUTER_DHD)
414 IOV_LMTEST,
415 #endif
416 #ifdef DHD_MCAST_REGEN
417 IOV_MCAST_REGEN_BSS_ENABLE,
418 #endif
419 #ifdef BCMDBG
420 IOV_MACDBG_PD11REGS,
421 IOV_MACDBG_REGLIST,
422 IOV_MACDBG_PSVMPMEMS,
423 #endif /* BCMDBG */
424 #ifdef SHOW_LOGTRACE
425 IOV_DUMP_TRACE_LOG,
426 #endif /* SHOW_LOGTRACE */
427 #ifdef REPORT_FATAL_TIMEOUTS
428 IOV_SCAN_TO,
429 IOV_JOIN_TO,
430 IOV_CMD_TO,
431 IOV_OQS_TO,
432 #endif /* REPORT_FATAL_TIMEOUTS */
433 IOV_DONGLE_TRAP_TYPE,
434 IOV_DONGLE_TRAP_INFO,
435 IOV_BPADDR,
436 IOV_DUMP_DONGLE, /**< dumps core registers and d11 memories */
437 #if defined(DHD_LOG_DUMP)
438 #if defined(DHD_EFI)
439 IOV_LOG_CAPTURE_ENABLE,
440 #endif
441 IOV_LOG_DUMP,
442 #endif /* DHD_LOG_DUMP */
443 #ifdef BTLOG
444 IOV_DUMP_BT_LOG,
445 IOV_BTLOG,
446 #endif /* BTLOG */
447 #ifdef SNAPSHOT_UPLOAD
448 IOV_BT_MEM_DUMP,
449 IOV_BT_UPLOAD,
450 #endif /* SNAPSHOT_UPLOAD */
451 IOV_TPUT_TEST,
452 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
453 IOV_PKT_LATENCY,
454 #endif /* defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) */
455 IOV_DEBUG_BUF_DEST_STAT,
456 #ifdef DHD_PKTTS
457 IOV_PKTTS_ENAB,
458 IOV_PKTTS_FLOW,
459 #endif /* DHD_PKTTS */
460 #ifdef DHD_DEBUG
461 IOV_INDUCE_ERROR,
462 #endif /* DHD_DEBUG */
463 #if defined(DHD_EFI)
464 IOV_INTR_POLL,
465 #endif
466 IOV_FIS_TRIGGER,
467 #ifdef WL_IFACE_MGMT_CONF
468 #ifdef WL_CFG80211
469 #ifdef WL_NANP2P
470 IOV_CONC_DISC,
471 #endif /* WL_NANP2P */
472 #ifdef WL_IFACE_MGMT
473 IOV_IFACE_POLICY,
474 #endif /* WL_IFACE_MGMT */
475 #endif /* WL_CFG80211 */
476 #endif /* WL_IFACE_MGMT_CONF */
477 #ifdef RTT_GEOFENCE_CONT
478 #if defined (RTT_SUPPORT) && defined (WL_NAN)
479 IOV_RTT_GEOFENCE_TYPE_OVRD,
480 #endif /* RTT_SUPPORT && WL_NAN */
481 #endif /* RTT_GEOFENCE_CONT */
482 IOV_FW_VBS,
483 #ifdef DHD_TX_PROFILE
484 IOV_TX_PROFILE_TAG,
485 IOV_TX_PROFILE_ENABLE,
486 IOV_TX_PROFILE_DUMP,
487 #endif /* defined(DHD_TX_PROFILE) */
488 IOV_CHECK_TRAP_ROT,
489 #if defined(DHD_AWDL)
490 IOV_AWDL_LLC_ENABLE,
491 #endif
492 #ifdef WLEASYMESH
493 IOV_1905_AL_UCAST,
494 IOV_1905_AL_MCAST,
495 #endif /* WLEASYMESH */
496 IOV_LAST
497 };
498
499 const bcm_iovar_t dhd_iovars[] = {
500 /* name varid flags flags2 type minlen */
501 {"version", IOV_VERSION, 0, 0, IOVT_BUFFER, 0},
502 {"wlmsglevel", IOV_WLMSGLEVEL, 0, 0, IOVT_UINT32, 0 },
503 #ifdef DHD_DEBUG
504 {"msglevel", IOV_MSGLEVEL, 0, 0, IOVT_UINT32, 0},
505 {"mem_debug", IOV_MEM_DEBUG, 0, 0, IOVT_BUFFER, 0 },
506 #ifdef BCMPCIE
507 {"flow_ring_debug", IOV_FLOW_RING_DEBUG, 0, 0, IOVT_BUFFER, 0 },
508 #endif /* BCMPCIE */
509 #ifdef NDIS
510 {"wlmsglevel", IOV_WLMSGLEVEL, 0, 0, IOVT_UINT32, 0},
511 #endif /* NDIS */
512 #endif /* DHD_DEBUG */
513 {"bcmerrorstr", IOV_BCMERRORSTR, 0, 0, IOVT_BUFFER, BCME_STRLEN},
514 {"bcmerror", IOV_BCMERROR, 0, 0, IOVT_INT8, 0},
515 {"wdtick", IOV_WDTICK, 0, 0, IOVT_UINT32, 0},
516 {"dump", IOV_DUMP, 0, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN_32K},
517 {"cons", IOV_CONS, 0, 0, IOVT_BUFFER, 0},
518 {"dconpoll", IOV_DCONSOLE_POLL, 0, 0, IOVT_UINT32, 0},
519 {"clearcounts", IOV_CLEARCOUNTS, 0, 0, IOVT_VOID, 0},
520 #ifdef BCMPERFSTATS
521 {"logdump", IOV_LOGDUMP, 0, 0, IOVT_BUFFER, DHD_IOCTL_MAXLEN},
522 {"logcal", IOV_LOGCAL, 0, 0, IOVT_UINT32, 0},
523 {"logstamp", IOV_LOGSTAMP, 0, 0, IOVT_BUFFER, 0},
524 #endif
525 {"gpioob", IOV_GPIOOB, 0, 0, IOVT_UINT32, 0},
526 {"ioctl_timeout", IOV_IOCTLTIMEOUT, 0, 0, IOVT_UINT32, 0},
527 #ifdef PROP_TXSTATUS
528 {"proptx", IOV_PROPTXSTATUS_ENABLE, 0, 0, IOVT_BOOL, 0 },
529 /*
530 set the proptxtstatus operation mode:
531 0 - Do not do any proptxtstatus flow control
532 1 - Use implied credit from a packet status
533 2 - Use explicit credit
534 */
535 {"ptxmode", IOV_PROPTXSTATUS_MODE, 0, 0, IOVT_UINT32, 0 },
536 {"proptx_opt", IOV_PROPTXSTATUS_OPT, 0, 0, IOVT_UINT32, 0 },
537 #ifdef QMONITOR
538 {"qtime_thres", IOV_QMON_TIME_THRES, 0, 0, IOVT_UINT32, 0 },
539 {"qtime_percent", IOV_QMON_TIME_PERCENT, 0, 0, IOVT_UINT32, 0 },
540 #endif /* QMONITOR */
541 {"pmodule_ignore", IOV_PROPTXSTATUS_MODULE_IGNORE, 0, 0, IOVT_BOOL, 0 },
542 {"pcredit_ignore", IOV_PROPTXSTATUS_CREDIT_IGNORE, 0, 0, IOVT_BOOL, 0 },
543 {"ptxstatus_ignore", IOV_PROPTXSTATUS_TXSTATUS_IGNORE, 0, 0, IOVT_BOOL, 0 },
544 {"rxpkt_chk", IOV_PROPTXSTATUS_RXPKT_CHK, 0, 0, IOVT_BOOL, 0 },
545 #endif /* PROP_TXSTATUS */
546 {"bustype", IOV_BUS_TYPE, 0, 0, IOVT_UINT32, 0},
547 {"changemtu", IOV_CHANGEMTU, 0, 0, IOVT_UINT32, 0 },
548 {"host_reorder_flows", IOV_HOSTREORDER_FLOWS, 0, 0, IOVT_BUFFER,
549 (WLHOST_REORDERDATA_MAXFLOWS + 1) },
550 #ifdef DHDTCPACK_SUPPRESS
551 {"tcpack_suppress", IOV_TCPACK_SUPPRESS, 0, 0, IOVT_UINT8, 0 },
552 #endif /* DHDTCPACK_SUPPRESS */
553 #ifdef DHD_WMF
554 {"wmf_bss_enable", IOV_WMF_BSS_ENAB, 0, 0, IOVT_BOOL, 0 },
555 {"wmf_ucast_igmp", IOV_WMF_UCAST_IGMP, 0, 0, IOVT_BOOL, 0 },
556 {"wmf_mcast_data_sendup", IOV_WMF_MCAST_DATA_SENDUP, 0, 0, IOVT_BOOL, 0 },
557 #ifdef WL_IGMP_UCQUERY
558 {"wmf_ucast_igmp_query", IOV_WMF_UCAST_IGMP_QUERY, (0), 0, IOVT_BOOL, 0 },
559 #endif /* WL_IGMP_UCQUERY */
560 #ifdef DHD_UCAST_UPNP
561 {"wmf_ucast_upnp", IOV_WMF_UCAST_UPNP, (0), 0, IOVT_BOOL, 0 },
562 #endif /* DHD_UCAST_UPNP */
563 {"wmf_psta_disable", IOV_WMF_PSTA_DISABLE, (0), 0, IOVT_BOOL, 0 },
564 #endif /* DHD_WMF */
565 #if defined(BCM_ROUTER_DHD)
566 {"trf_mgmt_filters_add", IOV_TRAFFIC_MGMT_DWM, (0), 0, IOVT_BUFFER, 0},
567 #endif /* BCM_ROUTER_DHD */
568 #ifdef DHD_L2_FILTER
569 {"dhcp_unicast", IOV_DHCP_UNICAST, (0), 0, IOVT_BOOL, 0 },
570 #endif /* DHD_L2_FILTER */
571 {"ap_isolate", IOV_AP_ISOLATE, (0), 0, IOVT_BOOL, 0},
572 #ifdef DHD_L2_FILTER
573 {"block_ping", IOV_BLOCK_PING, (0), 0, IOVT_BOOL, 0},
574 {"proxy_arp", IOV_PROXY_ARP, (0), 0, IOVT_BOOL, 0},
575 {"grat_arp", IOV_GRAT_ARP, (0), 0, IOVT_BOOL, 0},
576 {"block_tdls", IOV_BLOCK_TDLS, (0), IOVT_BOOL, 0},
577 #endif /* DHD_L2_FILTER */
578 {"dhd_ie", IOV_DHD_IE, (0), 0, IOVT_BUFFER, 0},
579 #ifdef DHD_PSTA
580 /* PSTA/PSR Mode configuration. 0: DIABLED 1: PSTA 2: PSR */
581 {"psta", IOV_PSTA, 0, 0, IOVT_UINT32, 0},
582 #endif /* DHD PSTA */
583 #ifdef DHD_WET
584 /* WET Mode configuration. 0: DIABLED 1: WET */
585 {"wet", IOV_WET, 0, 0, IOVT_UINT32, 0},
586 {"wet_host_ipv4", IOV_WET_HOST_IPV4, 0, 0, IOVT_UINT32, 0},
587 {"wet_host_mac", IOV_WET_HOST_MAC, 0, 0, IOVT_BUFFER, 0},
588 #endif /* DHD WET */
589 {"op_mode", IOV_CFG80211_OPMODE, 0, 0, IOVT_UINT32, 0 },
590 {"assert_type", IOV_ASSERT_TYPE, (0), 0, IOVT_UINT32, 0},
591 #if defined(NDIS)
592 { "wowl_wakeind", IOV_WAKEIND, 0, 0, IOVT_UINT32, 0 },
593 #endif /* NDIS */
594 #if !defined(NDIS) && !defined(BCM_ROUTER_DHD)
595 {"lmtest", IOV_LMTEST, 0, 0, IOVT_UINT32, 0 },
596 #endif
597 #ifdef DHD_MCAST_REGEN
598 {"mcast_regen_bss_enable", IOV_MCAST_REGEN_BSS_ENABLE, 0, 0, IOVT_BOOL, 0},
599 #endif
600 #ifdef BCMDBG
601 {"pd11regs", IOV_MACDBG_PD11REGS, 0, 0, IOVT_BUFFER, 0},
602 {"mreglist", IOV_MACDBG_REGLIST, 0, 0, IOVT_BUFFER, 0},
603 {"psvmpmems", IOV_MACDBG_PSVMPMEMS, 0, 0, IOVT_BUFFER, 0},
604 #endif /* BCMDBG */
605 #ifdef SHOW_LOGTRACE
606 {"dump_trace_buf", IOV_DUMP_TRACE_LOG, 0, 0, IOVT_BUFFER, sizeof(trace_buf_info_t) },
607 #endif /* SHOW_LOGTRACE */
608 #ifdef REPORT_FATAL_TIMEOUTS
609 {"scan_timeout", IOV_SCAN_TO, 0, 0, IOVT_UINT32, 0 },
610 {"join_timeout", IOV_JOIN_TO, 0, 0, IOVT_UINT32, 0 },
611 {"cmd_timeout", IOV_CMD_TO, 0, 0, IOVT_UINT32, 0 },
612 {"oqs_timeout", IOV_OQS_TO, 0, 0, IOVT_UINT32, 0 },
613 #endif /* REPORT_FATAL_TIMEOUTS */
614 {"trap_type", IOV_DONGLE_TRAP_TYPE, 0, 0, IOVT_UINT32, 0 },
615 {"trap_info", IOV_DONGLE_TRAP_INFO, 0, 0, IOVT_BUFFER, sizeof(trap_t) },
616 #ifdef DHD_DEBUG
617 {"bpaddr", IOV_BPADDR, 0, 0, IOVT_BUFFER, sizeof(sdreg_t) },
618 #endif /* DHD_DEBUG */
619 {"dump_dongle", IOV_DUMP_DONGLE, 0, 0, IOVT_BUFFER,
620 MAX(sizeof(dump_dongle_in_t), sizeof(dump_dongle_out_t)) },
621 #if defined(DHD_LOG_DUMP)
622 #if defined(DHD_EFI)
623 {"log_capture_enable", IOV_LOG_CAPTURE_ENABLE, 0, 0, IOVT_UINT8, 0},
624 #endif
625 {"log_dump", IOV_LOG_DUMP, 0, 0, IOVT_UINT8, 0},
626 #endif /* DHD_LOG_DUMP */
627 #ifdef BTLOG
628 {"dump_bt_log", IOV_DUMP_BT_LOG, 0, 0, IOVT_BUFFER, sizeof(bt_log_buf_info_t) },
629 {"btlog", IOV_BTLOG, 0, 0, IOVT_UINT32, 0 },
630 #endif /* BTLOG */
631 #ifdef SNAPSHOT_UPLOAD
632 {"bt_mem_dump", IOV_BT_MEM_DUMP, 0, 0, IOVT_UINT32, 0},
633 {"bt_upload", IOV_BT_UPLOAD, 0, 0, IOVT_BUFFER, sizeof(bt_log_buf_info_t) },
634 #endif /* SNAPSHOT_UPLOAD */
635 {"tput_test", IOV_TPUT_TEST, 0, 0, IOVT_BUFFER, sizeof(tput_test_t)},
636 {"debug_buf_dest_stat", IOV_DEBUG_BUF_DEST_STAT, 0, 0, IOVT_UINT32, 0 },
637 #ifdef DHD_PKTTS
638 {"pktts_enab", IOV_PKTTS_ENAB, (0), 0, IOVT_BOOL, 0 },
639 {"pktts_flow", IOV_PKTTS_FLOW, (0), 0, IOVT_BUFFER, sizeof(tput_test_t) },
640 #endif /* DHD_PKTTS */
641 #if defined(DHD_EFI)
642 {"intr_poll", IOV_INTR_POLL, 0, 0, IOVT_BUFFER, sizeof(intr_poll_t)},
643 #endif
644 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
645 {"pkt_latency", IOV_PKT_LATENCY, 0, 0, IOVT_UINT32, 0 },
646 #endif /* defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) */
647 #if defined(DHD_SSSR_DUMP)
648 {"fis_trigger", IOV_FIS_TRIGGER, 0, 0, IOVT_UINT32, 0},
649 #endif
650 #ifdef DHD_DEBUG
651 {"induce_error", IOV_INDUCE_ERROR, (0), 0, IOVT_UINT16, 0 },
652 #endif /* DHD_DEBUG */
653 #ifdef WL_IFACE_MGMT_CONF
654 #ifdef WL_CFG80211
655 #ifdef WL_NANP2P
656 {"conc_disc", IOV_CONC_DISC, (0), 0, IOVT_UINT16, 0 },
657 #endif /* WL_NANP2P */
658 #ifdef WL_IFACE_MGMT
659 {"if_policy", IOV_IFACE_POLICY, (0), 0, IOVT_BUFFER, sizeof(iface_mgmt_data_t)},
660 #endif /* WL_IFACE_MGMT */
661 #endif /* WL_CFG80211 */
662 #endif /* WL_IFACE_MGMT_CONF */
663 #ifdef RTT_GEOFENCE_CONT
664 #if defined (RTT_SUPPORT) && defined (WL_NAN)
665 {"rtt_geofence_type_ovrd", IOV_RTT_GEOFENCE_TYPE_OVRD, (0), 0, IOVT_BOOL, 0},
666 #endif /* RTT_SUPPORT && WL_NAN */
667 #endif /* RTT_GEOFENCE_CONT */
668 {"fw_verbose", IOV_FW_VBS, 0, 0, IOVT_UINT32, 0},
669 #ifdef DHD_TX_PROFILE
670 {"tx_profile_tag", IOV_TX_PROFILE_TAG, 0, 0, IOVT_BUFFER,
671 sizeof(dhd_tx_profile_protocol_t)},
672 {"tx_profile_enable", IOV_TX_PROFILE_ENABLE, 0, 0, IOVT_BOOL, 0},
673 {"tx_profile_dump", IOV_TX_PROFILE_DUMP, 0, 0, IOVT_UINT32, 0},
674 #endif /* defined(DHD_TX_PROFILE) */
675 {"check_trap_rot", IOV_CHECK_TRAP_ROT, (0), 0, IOVT_BOOL, 0},
676 #if defined(DHD_AWDL)
677 {"awdl_llc_enable", IOV_AWDL_LLC_ENABLE, 0, 0, IOVT_BOOL, 0},
678 #endif
679 /* --- add new iovars *ABOVE* this line --- */
680 #ifdef WLEASYMESH
681 {"1905_al_ucast", IOV_1905_AL_UCAST, 0, 0, IOVT_BUFFER, ETHER_ADDR_LEN},
682 {"1905_al_mcast", IOV_1905_AL_MCAST, 0, 0, IOVT_BUFFER, ETHER_ADDR_LEN},
683 #endif /* WLEASYMESH */
684 {NULL, 0, 0, 0, 0, 0 }
685 };
686
687 #define DHD_IOVAR_BUF_SIZE 128
688
689 #if defined(LINUX) || defined(linux) || defined(DHD_EFI)
690 fw_download_status_t
dhd_fw_download_status(dhd_pub_t * dhd_pub)691 dhd_fw_download_status(dhd_pub_t * dhd_pub)
692 {
693 return dhd_pub->fw_download_status;
694 }
695 #endif /* defined(LINUX) || defined(linux) || defined(DHD_EFI) */
696
697 bool
dhd_query_bus_erros(dhd_pub_t * dhdp)698 dhd_query_bus_erros(dhd_pub_t *dhdp)
699 {
700 bool ret = FALSE;
701
702 if (dhdp->dongle_reset) {
703 DHD_ERROR_RLMT(("%s: Dongle Reset occurred, cannot proceed\n",
704 __FUNCTION__));
705 ret = TRUE;
706 }
707
708 if (dhdp->dongle_trap_occured) {
709 DHD_ERROR_RLMT(("%s: FW TRAP has occurred, cannot proceed\n",
710 __FUNCTION__));
711 ret = TRUE;
712 #ifdef OEM_ANDROID
713 dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
714 dhd_os_send_hang_message(dhdp);
715 #endif /* OEM_ANDROID */
716 }
717
718 if (dhdp->iovar_timeout_occured) {
719 DHD_ERROR_RLMT(("%s: Resumed on timeout for previous IOVAR, cannot proceed\n",
720 __FUNCTION__));
721 ret = TRUE;
722 }
723
724 #ifdef PCIE_FULL_DONGLE
725 if (dhdp->d3ack_timeout_occured) {
726 DHD_ERROR_RLMT(("%s: Resumed on timeout for previous D3ACK, cannot proceed\n",
727 __FUNCTION__));
728 ret = TRUE;
729 }
730 if (dhdp->livelock_occured) {
731 DHD_ERROR_RLMT(("%s: LIVELOCK occurred for previous msg, cannot proceed\n",
732 __FUNCTION__));
733 ret = TRUE;
734 }
735
736 if (dhdp->pktid_audit_failed) {
737 DHD_ERROR_RLMT(("%s: pktid_audit_failed, cannot proceed\n",
738 __FUNCTION__));
739 ret = TRUE;
740 }
741 #endif /* PCIE_FULL_DONGLE */
742
743 if (dhdp->iface_op_failed) {
744 DHD_ERROR_RLMT(("%s: iface_op_failed, cannot proceed\n",
745 __FUNCTION__));
746 ret = TRUE;
747 }
748
749 if (dhdp->scan_timeout_occurred) {
750 DHD_ERROR_RLMT(("%s: scan_timeout_occurred, cannot proceed\n",
751 __FUNCTION__));
752 ret = TRUE;
753 }
754
755 if (dhdp->scan_busy_occurred) {
756 DHD_ERROR_RLMT(("%s: scan_busy_occurred, cannot proceed\n",
757 __FUNCTION__));
758 ret = TRUE;
759 }
760
761 #ifdef DNGL_AXI_ERROR_LOGGING
762 if (dhdp->axi_error) {
763 DHD_ERROR_RLMT(("%s: AXI error occurred, cannot proceed\n",
764 __FUNCTION__));
765 ret = TRUE;
766 }
767 #endif /* DNGL_AXI_ERROR_LOGGING */
768
769 #if defined(BCMPCIE)
770 if (dhd_bus_get_linkdown(dhdp)) {
771 DHD_ERROR_RLMT(("%s : PCIE Link down occurred, cannot proceed\n",
772 __FUNCTION__));
773 ret = TRUE;
774 }
775
776 if (dhd_bus_get_cto(dhdp)) {
777 DHD_ERROR_RLMT(("%s : CTO Recovery reported, cannot proceed\n",
778 __FUNCTION__));
779 ret = TRUE;
780 }
781 #endif
782
783 return ret;
784 }
785
786 void
dhd_clear_bus_errors(dhd_pub_t * dhdp)787 dhd_clear_bus_errors(dhd_pub_t *dhdp)
788 {
789 if (!dhdp)
790 return;
791
792 dhdp->dongle_reset = FALSE;
793 dhdp->dongle_trap_occured = FALSE;
794 dhdp->iovar_timeout_occured = FALSE;
795 #ifdef PCIE_FULL_DONGLE
796 dhdp->d3ack_timeout_occured = FALSE;
797 dhdp->livelock_occured = FALSE;
798 dhdp->pktid_audit_failed = FALSE;
799 #endif
800 dhdp->iface_op_failed = FALSE;
801 dhdp->scan_timeout_occurred = FALSE;
802 dhdp->scan_busy_occurred = FALSE;
803 #ifdef BT_OVER_PCIE
804 dhdp->dongle_trap_due_to_bt = FALSE;
805 #endif
806 }
807
808 #ifdef DHD_SSSR_DUMP
809
810 /* This can be overwritten by module parameter defined in dhd_linux.c */
811 uint sssr_enab = TRUE;
812
813 #ifdef DHD_FIS_DUMP
814 uint fis_enab = TRUE;
815 #else
816 uint fis_enab = FALSE;
817 #endif /* DHD_FIS_DUMP */
818
819 int
dhd_sssr_mempool_init(dhd_pub_t * dhd)820 dhd_sssr_mempool_init(dhd_pub_t *dhd)
821 {
822 dhd->sssr_mempool = (uint8 *) MALLOCZ(dhd->osh, DHD_SSSR_MEMPOOL_SIZE);
823 if (dhd->sssr_mempool == NULL) {
824 DHD_ERROR(("%s: MALLOC of sssr_mempool failed\n",
825 __FUNCTION__));
826 return BCME_ERROR;
827 }
828 return BCME_OK;
829 }
830
831 void
dhd_sssr_mempool_deinit(dhd_pub_t * dhd)832 dhd_sssr_mempool_deinit(dhd_pub_t *dhd)
833 {
834 if (dhd->sssr_mempool) {
835 MFREE(dhd->osh, dhd->sssr_mempool, DHD_SSSR_MEMPOOL_SIZE);
836 dhd->sssr_mempool = NULL;
837 }
838 }
839
840 int
dhd_sssr_reg_info_init(dhd_pub_t * dhd)841 dhd_sssr_reg_info_init(dhd_pub_t *dhd)
842 {
843 dhd->sssr_reg_info = (sssr_reg_info_cmn_t *) MALLOCZ(dhd->osh, sizeof(sssr_reg_info_cmn_t));
844 if (dhd->sssr_reg_info == NULL) {
845 DHD_ERROR(("%s: MALLOC of sssr_reg_info failed\n",
846 __FUNCTION__));
847 return BCME_ERROR;
848 }
849 return BCME_OK;
850 }
851
852 void
dhd_sssr_reg_info_deinit(dhd_pub_t * dhd)853 dhd_sssr_reg_info_deinit(dhd_pub_t *dhd)
854 {
855 if (dhd->sssr_reg_info) {
856 MFREE(dhd->osh, dhd->sssr_reg_info, sizeof(sssr_reg_info_cmn_t));
857 dhd->sssr_reg_info = NULL;
858 }
859 }
860
861 #ifdef DHD_PCIE_REG_ACCESS
862 static void
dhd_dump_sssr_reg_info_v2(dhd_pub_t * dhd)863 dhd_dump_sssr_reg_info_v2(dhd_pub_t *dhd)
864 {
865 sssr_reg_info_cmn_t *sssr_reg_info_cmn = dhd->sssr_reg_info;
866 sssr_reg_info_v2_t *sssr_reg_info = (sssr_reg_info_v2_t *)&sssr_reg_info_cmn->rev2;
867 int i, j;
868 uint8 num_d11cores = dhd_d11_slices_num_get(dhd);
869 DHD_ERROR(("pmu_regs\n"));
870 DHD_ERROR(("pmuintmask0=0x%x pmuintmask1=0x%x resreqtimer=0x%x "
871 "macresreqtimer=0x%x macresreqtimer1=0x%x\n",
872 sssr_reg_info->pmu_regs.base_regs.pmuintmask0,
873 sssr_reg_info->pmu_regs.base_regs.pmuintmask1,
874 sssr_reg_info->pmu_regs.base_regs.resreqtimer,
875 sssr_reg_info->pmu_regs.base_regs.macresreqtimer,
876 sssr_reg_info->pmu_regs.base_regs.macresreqtimer1));
877 DHD_ERROR(("chipcommon_regs\n"));
878 DHD_ERROR(("intmask=0x%x powerctrl=0x%x clockcontrolstatus=0x%x powerctrl_mask=0x%x\n",
879 sssr_reg_info->chipcommon_regs.base_regs.intmask,
880 sssr_reg_info->chipcommon_regs.base_regs.powerctrl,
881 sssr_reg_info->chipcommon_regs.base_regs.clockcontrolstatus,
882 sssr_reg_info->chipcommon_regs.base_regs.powerctrl_mask));
883 DHD_ERROR(("arm_regs\n"));
884 DHD_ERROR(("clockcontrolstatus=0x%x clockcontrolstatus_val=0x%x"
885 " resetctrl=0x%x extrsrcreq=0x%x\n",
886 sssr_reg_info->arm_regs.base_regs.clockcontrolstatus,
887 sssr_reg_info->arm_regs.base_regs.clockcontrolstatus_val,
888 sssr_reg_info->arm_regs.wrapper_regs.resetctrl,
889 sssr_reg_info->arm_regs.wrapper_regs.extrsrcreq));
890 DHD_ERROR(("pcie_regs\n"));
891 DHD_ERROR(("ltrstate=0x%x clockcontrolstatus=0x%x "
892 "clockcontrolstatus_val=0x%x extrsrcreq=0x%x\n",
893 sssr_reg_info->pcie_regs.base_regs.ltrstate,
894 sssr_reg_info->pcie_regs.base_regs.clockcontrolstatus,
895 sssr_reg_info->pcie_regs.base_regs.clockcontrolstatus_val,
896 sssr_reg_info->pcie_regs.wrapper_regs.extrsrcreq));
897
898 for (i = 0; i < num_d11cores; i++) {
899 DHD_ERROR(("mac_regs core[%d]\n", i));
900 DHD_ERROR(("xmtaddress=0x%x xmtdata=0x%x clockcontrolstatus=0x%x "
901 "clockcontrolstatus_val=0x%x\n",
902 sssr_reg_info->mac_regs[i].base_regs.xmtaddress,
903 sssr_reg_info->mac_regs[i].base_regs.xmtdata,
904 sssr_reg_info->mac_regs[i].base_regs.clockcontrolstatus,
905 sssr_reg_info->mac_regs[i].base_regs.clockcontrolstatus_val));
906 DHD_ERROR(("resetctrl=0x%x extrsrcreq=0x%x ioctrl=0x%x\n",
907 sssr_reg_info->mac_regs[i].wrapper_regs.resetctrl,
908 sssr_reg_info->mac_regs[i].wrapper_regs.extrsrcreq,
909 sssr_reg_info->mac_regs[i].wrapper_regs.ioctrl));
910 for (j = 0; j < SSSR_D11_RESET_SEQ_STEPS; j++) {
911 DHD_ERROR(("ioctrl_resetseq_val[%d] 0x%x\n", j,
912 sssr_reg_info->mac_regs[i].wrapper_regs.ioctrl_resetseq_val[j]));
913 }
914 DHD_ERROR(("sr_size=0x%x\n", sssr_reg_info->mac_regs[i].sr_size));
915 }
916 DHD_ERROR(("dig_regs\n"));
917 DHD_ERROR(("dig_sr_addr=0x%x dig_sr_size=0x%x\n",
918 sssr_reg_info->dig_mem_info.dig_sr_addr,
919 sssr_reg_info->dig_mem_info.dig_sr_size));
920 }
921
922 static void
dhd_dump_sssr_reg_info_v3(dhd_pub_t * dhd)923 dhd_dump_sssr_reg_info_v3(dhd_pub_t *dhd)
924 {
925 sssr_reg_info_cmn_t *sssr_reg_info_cmn = dhd->sssr_reg_info;
926 sssr_reg_info_v3_t *sssr_reg_info = (sssr_reg_info_v3_t *)&sssr_reg_info_cmn->rev3;
927 int i;
928
929 dhd_dump_sssr_reg_info_v2(dhd);
930
931 DHD_ERROR(("FIS Enab in fw : %d\n", sssr_reg_info->fis_enab));
932
933 DHD_ERROR(("HWA regs for reset \n"));
934 DHD_ERROR(("clkenable 0x%x, clkgatingenable 0x%x, clkext 0x%x, "
935 "clkctlstatus 0x%x, ioctrl 0x%x, resetctrl 0x%x\n",
936 sssr_reg_info->hwa_regs.base_regs.clkenable,
937 sssr_reg_info->hwa_regs.base_regs.clkgatingenable,
938 sssr_reg_info->hwa_regs.base_regs.clkext,
939 sssr_reg_info->hwa_regs.base_regs.clkctlstatus,
940 sssr_reg_info->hwa_regs.wrapper_regs.ioctrl,
941 sssr_reg_info->hwa_regs.wrapper_regs.resetctrl));
942 DHD_ERROR(("HWA regs value seq for reset \n"));
943 for (i = 0; i < SSSR_HWA_RESET_SEQ_STEPS; i++) {
944 DHD_ERROR(("hwa_resetseq_val[%d] 0x%x", i,
945 sssr_reg_info->hwa_regs.hwa_resetseq_val[i]));
946 }
947 }
948
949 static void
dhd_dump_sssr_reg_info_v1(dhd_pub_t * dhd)950 dhd_dump_sssr_reg_info_v1(dhd_pub_t *dhd)
951 {
952 sssr_reg_info_cmn_t *sssr_reg_info_cmn = dhd->sssr_reg_info;
953 sssr_reg_info_v1_t *sssr_reg_info = (sssr_reg_info_v1_t *)&sssr_reg_info_cmn->rev1;
954 int i, j;
955 uint8 num_d11cores = dhd_d11_slices_num_get(dhd);
956
957 DHD_ERROR(("pmu_regs\n"));
958 DHD_ERROR(("pmuintmask0=0x%x pmuintmask1=0x%x resreqtimer=0x%x "
959 "macresreqtimer=0x%x macresreqtimer1=0x%x\n",
960 sssr_reg_info->pmu_regs.base_regs.pmuintmask0,
961 sssr_reg_info->pmu_regs.base_regs.pmuintmask1,
962 sssr_reg_info->pmu_regs.base_regs.resreqtimer,
963 sssr_reg_info->pmu_regs.base_regs.macresreqtimer,
964 sssr_reg_info->pmu_regs.base_regs.macresreqtimer1));
965 DHD_ERROR(("chipcommon_regs\n"));
966 DHD_ERROR(("intmask=0x%x powerctrl=0x%x clockcontrolstatus=0x%x powerctrl_mask=0x%x\n",
967 sssr_reg_info->chipcommon_regs.base_regs.intmask,
968 sssr_reg_info->chipcommon_regs.base_regs.powerctrl,
969 sssr_reg_info->chipcommon_regs.base_regs.clockcontrolstatus,
970 sssr_reg_info->chipcommon_regs.base_regs.powerctrl_mask));
971 DHD_ERROR(("arm_regs\n"));
972 DHD_ERROR(("clockcontrolstatus=0x%x clockcontrolstatus_val=0x%x"
973 " resetctrl=0x%x itopoobb=0x%x\n",
974 sssr_reg_info->arm_regs.base_regs.clockcontrolstatus,
975 sssr_reg_info->arm_regs.base_regs.clockcontrolstatus_val,
976 sssr_reg_info->arm_regs.wrapper_regs.resetctrl,
977 sssr_reg_info->arm_regs.wrapper_regs.itopoobb));
978 DHD_ERROR(("pcie_regs\n"));
979 DHD_ERROR(("ltrstate=0x%x clockcontrolstatus=0x%x "
980 "clockcontrolstatus_val=0x%x itopoobb=0x%x\n",
981 sssr_reg_info->pcie_regs.base_regs.ltrstate,
982 sssr_reg_info->pcie_regs.base_regs.clockcontrolstatus,
983 sssr_reg_info->pcie_regs.base_regs.clockcontrolstatus_val,
984 sssr_reg_info->pcie_regs.wrapper_regs.itopoobb));
985 DHD_ERROR(("vasip_regs\n"));
986 DHD_ERROR(("ioctrl=0x%x vasip_sr_addr=0x%x vasip_sr_size=0x%x\n",
987 sssr_reg_info->vasip_regs.wrapper_regs.ioctrl,
988 sssr_reg_info->vasip_regs.vasip_sr_addr,
989 sssr_reg_info->vasip_regs.vasip_sr_size));
990
991 for (i = 0; i < num_d11cores; i++) {
992 DHD_ERROR(("mac_regs core[%d]\n", i));
993 DHD_ERROR(("xmtaddress=0x%x xmtdata=0x%x clockcontrolstatus=0x%x "
994 "clockcontrolstatus_val=0x%x\n",
995 sssr_reg_info->mac_regs[i].base_regs.xmtaddress,
996 sssr_reg_info->mac_regs[i].base_regs.xmtdata,
997 sssr_reg_info->mac_regs[i].base_regs.clockcontrolstatus,
998 sssr_reg_info->mac_regs[i].base_regs.clockcontrolstatus_val));
999 DHD_ERROR(("resetctrl=0x%x itopoobb=0x%x ioctrl=0x%x\n",
1000 sssr_reg_info->mac_regs[i].wrapper_regs.resetctrl,
1001 sssr_reg_info->mac_regs[i].wrapper_regs.itopoobb,
1002 sssr_reg_info->mac_regs[i].wrapper_regs.ioctrl));
1003 for (j = 0; j < SSSR_D11_RESET_SEQ_STEPS; j++) {
1004 DHD_ERROR(("ioctrl_resetseq_val[%d] 0x%x\n", j,
1005 sssr_reg_info->mac_regs[i].wrapper_regs.ioctrl_resetseq_val[j]));
1006 }
1007 DHD_ERROR(("sr_size=0x%x\n", sssr_reg_info->mac_regs[i].sr_size));
1008 }
1009 }
1010
1011 #endif /* DHD_PCIE_REG_ACCESS */
1012
1013 void
dhd_dump_sssr_reg_info(dhd_pub_t * dhd)1014 dhd_dump_sssr_reg_info(dhd_pub_t *dhd)
1015 {
1016 #ifdef DHD_PCIE_REG_ACCESS
1017 sssr_reg_info_cmn_t *sssr_reg_info_cmn = dhd->sssr_reg_info;
1018 sssr_reg_info_v1_t *sssr_reg_info = (sssr_reg_info_v1_t *)&sssr_reg_info_cmn->rev1;
1019
1020 DHD_ERROR(("************** SSSR REG INFO start version:%d ****************\n",
1021 sssr_reg_info->version));
1022 switch (sssr_reg_info->version) {
1023 case SSSR_REG_INFO_VER_3 :
1024 dhd_dump_sssr_reg_info_v3(dhd);
1025 break;
1026 case SSSR_REG_INFO_VER_2 :
1027 dhd_dump_sssr_reg_info_v2(dhd);
1028 break;
1029 default:
1030 dhd_dump_sssr_reg_info_v1(dhd);
1031 break;
1032 }
1033 DHD_ERROR(("************** SSSR REG INFO end ****************\n"));
1034 #endif /* DHD_PCIE_REG_ACCESS */
1035 }
1036
1037 int
dhd_get_sssr_reg_info(dhd_pub_t * dhd)1038 dhd_get_sssr_reg_info(dhd_pub_t *dhd)
1039 {
1040 int ret;
1041 /* get sssr_reg_info from firmware */
1042 ret = dhd_iovar(dhd, 0, "sssr_reg_info", NULL, 0, (char *)dhd->sssr_reg_info,
1043 sizeof(sssr_reg_info_cmn_t), FALSE);
1044 if (ret < 0) {
1045 DHD_ERROR(("%s: sssr_reg_info failed (error=%d)\n",
1046 __FUNCTION__, ret));
1047 return BCME_ERROR;
1048 }
1049
1050 dhd_dump_sssr_reg_info(dhd);
1051 return BCME_OK;
1052 }
1053
1054 uint32
dhd_get_sssr_bufsize(dhd_pub_t * dhd)1055 dhd_get_sssr_bufsize(dhd_pub_t *dhd)
1056 {
1057 int i;
1058 uint32 sssr_bufsize = 0;
1059 uint8 num_d11cores;
1060
1061 num_d11cores = dhd_d11_slices_num_get(dhd);
1062
1063 switch (dhd->sssr_reg_info->rev2.version) {
1064 case SSSR_REG_INFO_VER_3 :
1065 /* intentional fall through */
1066 case SSSR_REG_INFO_VER_2 :
1067 for (i = 0; i < num_d11cores; i++) {
1068 sssr_bufsize += dhd->sssr_reg_info->rev2.mac_regs[i].sr_size;
1069 }
1070 if ((dhd->sssr_reg_info->rev2.length >
1071 OFFSETOF(sssr_reg_info_v2_t, dig_mem_info)) &&
1072 dhd->sssr_reg_info->rev2.dig_mem_info.dig_sr_addr) {
1073 sssr_bufsize += 0; /* TBD */
1074 }
1075 break;
1076 case SSSR_REG_INFO_VER_1 :
1077 for (i = 0; i < num_d11cores; i++) {
1078 sssr_bufsize += dhd->sssr_reg_info->rev1.mac_regs[i].sr_size;
1079 }
1080 if (dhd->sssr_reg_info->rev1.vasip_regs.vasip_sr_size) {
1081 sssr_bufsize += dhd->sssr_reg_info->rev1.vasip_regs.vasip_sr_size;
1082 } else if ((dhd->sssr_reg_info->rev1.length > OFFSETOF(sssr_reg_info_v1_t,
1083 dig_mem_info)) && dhd->sssr_reg_info->rev1.
1084 dig_mem_info.dig_sr_addr) {
1085 sssr_bufsize += dhd->sssr_reg_info->rev1.dig_mem_info.dig_sr_size;
1086 }
1087 break;
1088 case SSSR_REG_INFO_VER_0 :
1089 for (i = 0; i < num_d11cores; i++) {
1090 sssr_bufsize += dhd->sssr_reg_info->rev0.mac_regs[i].sr_size;
1091 }
1092 if (dhd->sssr_reg_info->rev0.vasip_regs.vasip_sr_size) {
1093 sssr_bufsize += dhd->sssr_reg_info->rev0.vasip_regs.vasip_sr_size;
1094 }
1095 break;
1096 default :
1097 DHD_ERROR(("invalid sssr_reg_ver"));
1098 return BCME_UNSUPPORTED;
1099 }
1100
1101 #ifdef DHD_SSSR_DUMP_BEFORE_SR
1102 /* Double the size as different dumps will be saved before and after SR */
1103 sssr_bufsize = 2 * sssr_bufsize;
1104 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
1105
1106 return sssr_bufsize;
1107 }
1108
1109 int
dhd_sssr_dump_init(dhd_pub_t * dhd)1110 dhd_sssr_dump_init(dhd_pub_t *dhd)
1111 {
1112 int i;
1113 uint32 sssr_bufsize;
1114 uint32 mempool_used = 0;
1115 uint8 num_d11cores = 0;
1116 bool alloc_sssr = FALSE;
1117 uint32 sr_size = 0;
1118
1119 dhd->sssr_inited = FALSE;
1120 if (!sssr_enab) {
1121 DHD_ERROR(("%s: sssr dump not inited as instructed by mod param\n", __FUNCTION__));
1122 return BCME_OK;
1123 }
1124
1125 /* check if sssr mempool is allocated */
1126 if (dhd->sssr_mempool == NULL) {
1127 DHD_ERROR(("%s: sssr_mempool is not allocated\n",
1128 __FUNCTION__));
1129 return BCME_ERROR;
1130 }
1131
1132 /* check if sssr mempool is allocated */
1133 if (dhd->sssr_reg_info == NULL) {
1134 DHD_ERROR(("%s: sssr_reg_info is not allocated\n",
1135 __FUNCTION__));
1136 return BCME_ERROR;
1137 }
1138
1139 /* Get SSSR reg info */
1140 if (dhd_get_sssr_reg_info(dhd) != BCME_OK) {
1141 DHD_ERROR(("%s: dhd_get_sssr_reg_info failed\n", __FUNCTION__));
1142 printf("DEBUG_SSSr: %s: dhd_get_sssr_reg_info failed\n", __FUNCTION__);
1143 return BCME_ERROR;
1144 }
1145
1146 num_d11cores = dhd_d11_slices_num_get(dhd);
1147 /* Validate structure version and length */
1148 switch (dhd->sssr_reg_info->rev2.version) {
1149 case SSSR_REG_INFO_VER_3 :
1150 if (dhd->sssr_reg_info->rev3.length != sizeof(sssr_reg_info_v3_t)) {
1151 DHD_ERROR(("%s: dhd->sssr_reg_info->rev2.length (%d : %d)"
1152 "mismatch on rev2\n", __FUNCTION__,
1153 (int)dhd->sssr_reg_info->rev3.length,
1154 (int)sizeof(sssr_reg_info_v3_t)));
1155 return BCME_ERROR;
1156 }
1157 break;
1158 case SSSR_REG_INFO_VER_2 :
1159 if (dhd->sssr_reg_info->rev2.length != sizeof(sssr_reg_info_v2_t)) {
1160 DHD_ERROR(("%s: dhd->sssr_reg_info->rev2.length (%d : %d)"
1161 "mismatch on rev2\n", __FUNCTION__,
1162 (int)dhd->sssr_reg_info->rev2.length,
1163 (int)sizeof(sssr_reg_info_v2_t)));
1164 return BCME_ERROR;
1165 }
1166 break;
1167 case SSSR_REG_INFO_VER_1 :
1168 if (dhd->sssr_reg_info->rev1.length != sizeof(sssr_reg_info_v1_t)) {
1169 DHD_ERROR(("%s: dhd->sssr_reg_info->rev1.length (%d : %d)"
1170 "mismatch on rev1\n", __FUNCTION__,
1171 (int)dhd->sssr_reg_info->rev1.length,
1172 (int)sizeof(sssr_reg_info_v1_t)));
1173 return BCME_ERROR;
1174 }
1175 break;
1176 case SSSR_REG_INFO_VER_0 :
1177 if (dhd->sssr_reg_info->rev0.length != sizeof(sssr_reg_info_v0_t)) {
1178 DHD_ERROR(("%s: dhd->sssr_reg_info->rev0.length (%d : %d)"
1179 "mismatch on rev0\n", __FUNCTION__,
1180 (int)dhd->sssr_reg_info->rev0.length,
1181 (int)sizeof(sssr_reg_info_v0_t)));
1182 return BCME_ERROR;
1183 }
1184 break;
1185 default :
1186 DHD_ERROR(("invalid sssr_reg_ver"));
1187 return BCME_UNSUPPORTED;
1188 }
1189
1190 /* validate fifo size */
1191 sssr_bufsize = dhd_get_sssr_bufsize(dhd);
1192 if (sssr_bufsize > DHD_SSSR_MEMPOOL_SIZE) {
1193 DHD_ERROR(("%s: sssr_bufsize(%d) is greater than sssr_mempool(%d)\n",
1194 __FUNCTION__, (int)sssr_bufsize, DHD_SSSR_MEMPOOL_SIZE));
1195 return BCME_ERROR;
1196 }
1197
1198 /* init all pointers to NULL */
1199 for (i = 0; i < num_d11cores; i++) {
1200 #ifdef DHD_SSSR_DUMP_BEFORE_SR
1201 dhd->sssr_d11_before[i] = NULL;
1202 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
1203 dhd->sssr_d11_after[i] = NULL;
1204 }
1205
1206 #ifdef DHD_SSSR_DUMP_BEFORE_SR
1207 dhd->sssr_dig_buf_before = NULL;
1208 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
1209 dhd->sssr_dig_buf_after = NULL;
1210
1211 /* Allocate memory */
1212 for (i = 0; i < num_d11cores; i++) {
1213 alloc_sssr = FALSE;
1214 sr_size = 0;
1215
1216 switch (dhd->sssr_reg_info->rev2.version) {
1217 case SSSR_REG_INFO_VER_3 :
1218 /* intentional fall through */
1219 case SSSR_REG_INFO_VER_2 :
1220 if (dhd->sssr_reg_info->rev2.mac_regs[i].sr_size) {
1221 alloc_sssr = TRUE;
1222 sr_size = dhd->sssr_reg_info->rev2.mac_regs[i].sr_size;
1223 }
1224 break;
1225 case SSSR_REG_INFO_VER_1 :
1226 if (dhd->sssr_reg_info->rev1.mac_regs[i].sr_size) {
1227 alloc_sssr = TRUE;
1228 sr_size = dhd->sssr_reg_info->rev1.mac_regs[i].sr_size;
1229 }
1230 break;
1231 case SSSR_REG_INFO_VER_0 :
1232 if (dhd->sssr_reg_info->rev0.mac_regs[i].sr_size) {
1233 alloc_sssr = TRUE;
1234 sr_size = dhd->sssr_reg_info->rev0.mac_regs[i].sr_size;
1235 }
1236 break;
1237 default :
1238 DHD_ERROR(("invalid sssr_reg_ver"));
1239 return BCME_UNSUPPORTED;
1240 }
1241
1242 if (alloc_sssr) {
1243 #ifdef DHD_SSSR_DUMP_BEFORE_SR
1244 dhd->sssr_d11_before[i] = (uint32 *)(dhd->sssr_mempool + mempool_used);
1245 mempool_used += sr_size;
1246 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
1247
1248 dhd->sssr_d11_after[i] = (uint32 *)(dhd->sssr_mempool + mempool_used);
1249 mempool_used += sr_size;
1250 }
1251 }
1252
1253 /* Allocate dump memory for VASIP (version 0 or 1) or digital core (version 0, 1, or 2) */
1254 alloc_sssr = FALSE;
1255 sr_size = 0;
1256 switch (dhd->sssr_reg_info->rev2.version) {
1257 case SSSR_REG_INFO_VER_3 :
1258 /* intentional fall through */
1259 case SSSR_REG_INFO_VER_2 :
1260 if ((dhd->sssr_reg_info->rev2.length >
1261 OFFSETOF(sssr_reg_info_v2_t, dig_mem_info)) &&
1262 dhd->sssr_reg_info->rev2.dig_mem_info.dig_sr_addr) {
1263 alloc_sssr = TRUE;
1264 sr_size = dhd->sssr_reg_info->rev2.dig_mem_info.dig_sr_size;
1265 }
1266 break;
1267 case SSSR_REG_INFO_VER_1 :
1268 if (dhd->sssr_reg_info->rev1.vasip_regs.vasip_sr_size) {
1269 alloc_sssr = TRUE;
1270 sr_size = dhd->sssr_reg_info->rev1.vasip_regs.vasip_sr_size;
1271 } else if ((dhd->sssr_reg_info->rev1.length > OFFSETOF(sssr_reg_info_v1_t,
1272 dig_mem_info)) && dhd->sssr_reg_info->rev1.
1273 dig_mem_info.dig_sr_addr) {
1274 alloc_sssr = TRUE;
1275 sr_size = dhd->sssr_reg_info->rev1.dig_mem_info.dig_sr_size;
1276 }
1277 break;
1278 case SSSR_REG_INFO_VER_0 :
1279 if (dhd->sssr_reg_info->rev0.vasip_regs.vasip_sr_size) {
1280 alloc_sssr = TRUE;
1281 sr_size = dhd->sssr_reg_info->rev0.vasip_regs.vasip_sr_size;
1282 }
1283 break;
1284 default :
1285 DHD_ERROR(("invalid sssr_reg_ver"));
1286 return BCME_UNSUPPORTED;
1287 }
1288
1289 if (alloc_sssr) {
1290 dhd->sssr_dig_buf_after = (uint32 *)(dhd->sssr_mempool + mempool_used);
1291 mempool_used += sr_size;
1292
1293 #ifdef DHD_SSSR_DUMP_BEFORE_SR
1294 /* DIG dump before suspend is not applicable. */
1295 dhd->sssr_dig_buf_before = NULL;
1296 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
1297 }
1298
1299 dhd->sssr_inited = TRUE;
1300
1301 return BCME_OK;
1302
1303 }
1304
1305 void
dhd_sssr_dump_deinit(dhd_pub_t * dhd)1306 dhd_sssr_dump_deinit(dhd_pub_t *dhd)
1307 {
1308 int i;
1309
1310 dhd->sssr_inited = FALSE;
1311 /* init all pointers to NULL */
1312 for (i = 0; i < MAX_NUM_D11_CORES_WITH_SCAN; i++) {
1313 #ifdef DHD_SSSR_DUMP_BEFORE_SR
1314 dhd->sssr_d11_before[i] = NULL;
1315 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
1316 dhd->sssr_d11_after[i] = NULL;
1317 }
1318 #ifdef DHD_SSSR_DUMP_BEFORE_SR
1319 dhd->sssr_dig_buf_before = NULL;
1320 #endif /* DHD_SSSR_DUMP_BEFORE_SR */
1321 dhd->sssr_dig_buf_after = NULL;
1322
1323 return;
1324 }
1325
1326 void
dhd_sssr_print_filepath(dhd_pub_t * dhd,char * path)1327 dhd_sssr_print_filepath(dhd_pub_t *dhd, char *path)
1328 {
1329 bool print_info = FALSE;
1330 int dump_mode;
1331
1332 if (!dhd || !path) {
1333 DHD_ERROR(("%s: dhd or memdump_path is NULL\n",
1334 __FUNCTION__));
1335 return;
1336 }
1337
1338 if (!dhd->sssr_dump_collected) {
1339 /* SSSR dump is not collected */
1340 return;
1341 }
1342
1343 dump_mode = dhd->sssr_dump_mode;
1344
1345 if (bcmstrstr(path, "core_0_before")) {
1346 if (dhd->sssr_d11_outofreset[0] &&
1347 dump_mode == SSSR_DUMP_MODE_SSSR) {
1348 print_info = TRUE;
1349 }
1350 } else if (bcmstrstr(path, "core_0_after")) {
1351 if (dhd->sssr_d11_outofreset[0]) {
1352 print_info = TRUE;
1353 }
1354 } else if (bcmstrstr(path, "core_1_before")) {
1355 if (dhd->sssr_d11_outofreset[1] &&
1356 dump_mode == SSSR_DUMP_MODE_SSSR) {
1357 print_info = TRUE;
1358 }
1359 } else if (bcmstrstr(path, "core_1_after")) {
1360 if (dhd->sssr_d11_outofreset[1]) {
1361 print_info = TRUE;
1362 }
1363 } else if (bcmstrstr(path, "core_2_before")) {
1364 if (dhd->sssr_d11_outofreset[2] &&
1365 dump_mode == SSSR_DUMP_MODE_SSSR) {
1366 print_info = TRUE;
1367 }
1368 } else if (bcmstrstr(path, "core_2_after")) {
1369 if (dhd->sssr_d11_outofreset[2]) {
1370 print_info = TRUE;
1371 }
1372 } else {
1373 print_info = TRUE;
1374 }
1375
1376 if (print_info) {
1377 DHD_ERROR(("%s: file_path = %s%s\n", __FUNCTION__,
1378 path, FILE_NAME_HAL_TAG));
1379 }
1380 }
1381 #endif /* DHD_SSSR_DUMP */
1382
1383 #ifdef DHD_SDTC_ETB_DUMP
1384 /*
1385 * sdtc: system debug trace controller
1386 * etb: embedded trace buf
1387 */
1388 void
dhd_sdtc_etb_init(dhd_pub_t * dhd)1389 dhd_sdtc_etb_init(dhd_pub_t *dhd)
1390 {
1391 bcm_iov_buf_t *iov_req = NULL;
1392 etb_addr_info_t *p_etb_addr_info = NULL;
1393 bcm_iov_buf_t *iov_resp = NULL;
1394 uint8 *buf = NULL;
1395 int ret = 0;
1396 uint16 iovlen = 0;
1397 uint16 version = 0;
1398
1399 BCM_REFERENCE(p_etb_addr_info);
1400 dhd->sdtc_etb_inited = FALSE;
1401
1402 iov_req = MALLOCZ(dhd->osh, WLC_IOCTL_SMLEN);
1403 if (iov_req == NULL) {
1404 DHD_ERROR(("%s: Failed to alloc buffer for iovar request\n", __FUNCTION__));
1405 goto exit;
1406 }
1407
1408 buf = MALLOCZ(dhd->osh, WLC_IOCTL_MAXLEN);
1409 if (buf == NULL) {
1410 DHD_ERROR(("%s: Failed to alloc buffer for iovar response\n", __FUNCTION__));
1411 goto exit;
1412 }
1413
1414 /* fill header */
1415 iov_req->version = WL_SDTC_IOV_VERSION;
1416 iov_req->id = WL_SDTC_CMD_ETB_INFO;
1417 iov_req->len = sizeof(etb_addr_info_t);
1418 iovlen = OFFSETOF(bcm_iov_buf_t, data) + iov_req->len;
1419
1420 ret = dhd_iovar(dhd, 0, "sdtc", (char *)iov_req, iovlen,
1421 (char *)buf, WLC_IOCTL_MAXLEN, FALSE);
1422 if (ret < 0) {
1423 DHD_ERROR(("%s failed to get sdtc etb_info %d\n", __FUNCTION__, ret));
1424 goto exit;
1425 }
1426
1427 version = dtoh16(*(uint16 *)buf);
1428 /* Check for version */
1429 if (version != WL_SDTC_IOV_VERSION) {
1430 DHD_ERROR(("%s WL_SDTC_IOV_VERSION mis match\n", __FUNCTION__));
1431 goto exit;
1432 }
1433 iov_resp = (bcm_iov_buf_t *)buf;
1434 if (iov_resp->id == iov_req->id) {
1435 p_etb_addr_info = (etb_addr_info_t*)iov_resp->data;
1436 dhd->etb_addr_info.version = p_etb_addr_info->version;
1437 dhd->etb_addr_info.len = p_etb_addr_info->len;
1438 dhd->etb_addr_info.etbinfo_addr = p_etb_addr_info->etbinfo_addr;
1439
1440 DHD_ERROR(("%s etb_addr_info: ver:%d, len:%d, addr:0x%x\n", __FUNCTION__,
1441 dhd->etb_addr_info.version, dhd->etb_addr_info.len,
1442 dhd->etb_addr_info.etbinfo_addr));
1443 } else {
1444 DHD_ERROR(("%s Unknown CMD-ID (%d) as response for request ID %d\n",
1445 __FUNCTION__, iov_resp->id, iov_req->id));
1446 goto exit;
1447 }
1448
1449 /* since all the requirements for SDTC and ETB are met mark the capability as TRUE */
1450 dhd->sdtc_etb_inited = TRUE;
1451 DHD_ERROR(("%s sdtc_etb_inited: %d\n", __FUNCTION__, dhd->sdtc_etb_inited));
1452 exit:
1453 if (iov_req) {
1454 MFREE(dhd->osh, iov_req, WLC_IOCTL_SMLEN);
1455 }
1456 if (buf) {
1457 MFREE(dhd->osh, buf, WLC_IOCTL_MAXLEN);
1458 }
1459 return;
1460 }
1461
1462 void
dhd_sdtc_etb_deinit(dhd_pub_t * dhd)1463 dhd_sdtc_etb_deinit(dhd_pub_t *dhd)
1464 {
1465 dhd->sdtc_etb_inited = FALSE;
1466 }
1467
1468 int
dhd_sdtc_etb_mempool_init(dhd_pub_t * dhd)1469 dhd_sdtc_etb_mempool_init(dhd_pub_t *dhd)
1470 {
1471 dhd->sdtc_etb_mempool = (uint8 *) MALLOCZ(dhd->osh, DHD_SDTC_ETB_MEMPOOL_SIZE);
1472 if (dhd->sdtc_etb_mempool == NULL) {
1473 DHD_ERROR(("%s: MALLOC of sdtc_etb_mempool failed\n",
1474 __FUNCTION__));
1475 return BCME_ERROR;
1476 }
1477 return BCME_OK;
1478 }
1479
1480 void
dhd_sdtc_etb_mempool_deinit(dhd_pub_t * dhd)1481 dhd_sdtc_etb_mempool_deinit(dhd_pub_t *dhd)
1482 {
1483 if (dhd->sdtc_etb_mempool) {
1484 MFREE(dhd->osh, dhd->sdtc_etb_mempool, DHD_SDTC_ETB_MEMPOOL_SIZE);
1485 dhd->sdtc_etb_mempool = NULL;
1486 }
1487 }
1488 #endif /* DHD_SDTC_ETB_DUMP */
1489
1490 #ifdef DHD_FW_COREDUMP
dhd_get_fwdump_buf(dhd_pub_t * dhd_pub,uint32 length)1491 void* dhd_get_fwdump_buf(dhd_pub_t *dhd_pub, uint32 length)
1492 {
1493 if (!dhd_pub->soc_ram) {
1494 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
1495 dhd_pub->soc_ram = (uint8*)DHD_OS_PREALLOC(dhd_pub,
1496 DHD_PREALLOC_MEMDUMP_RAM, length);
1497 #else
1498 dhd_pub->soc_ram = (uint8*) MALLOC(dhd_pub->osh, length);
1499
1500 if ((dhd_pub->soc_ram == NULL) && CAN_SLEEP()) {
1501 DHD_ERROR(("%s: Try to allocate virtual memory for fw crash snap shot.\n",
1502 __FUNCTION__));
1503 dhd_pub->soc_ram = (uint8*) VMALLOC(dhd_pub->osh, length);
1504 }
1505 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
1506 }
1507
1508 if (dhd_pub->soc_ram == NULL) {
1509 DHD_ERROR(("%s: Failed to allocate memory for fw crash snap shot.\n",
1510 __FUNCTION__));
1511 dhd_pub->soc_ram_length = 0;
1512 } else {
1513 memset(dhd_pub->soc_ram, 0, length);
1514 dhd_pub->soc_ram_length = length;
1515 }
1516
1517 /* soc_ram free handled in dhd_{free,clear} */
1518 return dhd_pub->soc_ram;
1519 }
1520 #endif /* DHD_FW_COREDUMP */
1521
1522 /* to NDIS developer, the structure dhd_common is redundant,
1523 * please do NOT merge it back from other branches !!!
1524 */
1525
1526 int
dhd_common_socram_dump(dhd_pub_t * dhdp)1527 dhd_common_socram_dump(dhd_pub_t *dhdp)
1528 {
1529 #ifdef BCMDBUS
1530 return 0;
1531 #else
1532 return dhd_socram_dump(dhdp->bus);
1533 #endif /* BCMDBUS */
1534 }
1535
1536 int
dhd_dump(dhd_pub_t * dhdp,char * buf,int buflen)1537 dhd_dump(dhd_pub_t *dhdp, char *buf, int buflen)
1538 {
1539 struct bcmstrbuf b;
1540 struct bcmstrbuf *strbuf = &b;
1541 #ifdef DHD_MEM_STATS
1542 uint64 malloc_mem = 0;
1543 uint64 total_txpath_mem = 0;
1544 uint64 txpath_bkpq_len = 0;
1545 uint64 txpath_bkpq_mem = 0;
1546 uint64 total_dhd_mem = 0;
1547 #endif /* DHD_MEM_STATS */
1548
1549 if (!dhdp || !dhdp->prot || !buf) {
1550 return BCME_ERROR;
1551 }
1552
1553 bcm_binit(strbuf, buf, buflen);
1554
1555 /* Base DHD info */
1556 bcm_bprintf(strbuf, "%s\n", dhd_version);
1557 bcm_bprintf(strbuf, "\n");
1558 bcm_bprintf(strbuf, "pub.up %d pub.txoff %d pub.busstate %d\n",
1559 dhdp->up, dhdp->txoff, dhdp->busstate);
1560 bcm_bprintf(strbuf, "pub.hdrlen %u pub.maxctl %u pub.rxsz %u\n",
1561 dhdp->hdrlen, dhdp->maxctl, dhdp->rxsz);
1562 bcm_bprintf(strbuf, "pub.iswl %d pub.drv_version %ld pub.mac "MACDBG"\n",
1563 dhdp->iswl, dhdp->drv_version, MAC2STRDBG(&dhdp->mac));
1564 bcm_bprintf(strbuf, "pub.bcmerror %d tickcnt %u\n", dhdp->bcmerror, dhdp->tickcnt);
1565
1566 bcm_bprintf(strbuf, "dongle stats:\n");
1567 bcm_bprintf(strbuf, "tx_packets %lu tx_bytes %lu tx_errors %lu tx_dropped %lu\n",
1568 dhdp->dstats.tx_packets, dhdp->dstats.tx_bytes,
1569 dhdp->dstats.tx_errors, dhdp->dstats.tx_dropped);
1570 bcm_bprintf(strbuf, "rx_packets %lu rx_bytes %lu rx_errors %lu rx_dropped %lu\n",
1571 dhdp->dstats.rx_packets, dhdp->dstats.rx_bytes,
1572 dhdp->dstats.rx_errors, dhdp->dstats.rx_dropped);
1573 bcm_bprintf(strbuf, "multicast %lu\n", dhdp->dstats.multicast);
1574
1575 bcm_bprintf(strbuf, "bus stats:\n");
1576 bcm_bprintf(strbuf, "tx_packets %lu tx_dropped %lu tx_multicast %lu tx_errors %lu\n",
1577 dhdp->tx_packets, dhdp->tx_dropped, dhdp->tx_multicast, dhdp->tx_errors);
1578 bcm_bprintf(strbuf, "tx_ctlpkts %lu tx_ctlerrs %lu\n",
1579 dhdp->tx_ctlpkts, dhdp->tx_ctlerrs);
1580 bcm_bprintf(strbuf, "rx_packets %lu rx_multicast %lu rx_errors %lu \n",
1581 dhdp->rx_packets, dhdp->rx_multicast, dhdp->rx_errors);
1582 bcm_bprintf(strbuf, "rx_ctlpkts %lu rx_ctlerrs %lu rx_dropped %lu\n",
1583 dhdp->rx_ctlpkts, dhdp->rx_ctlerrs, dhdp->rx_dropped);
1584 bcm_bprintf(strbuf, "rx_readahead_cnt %lu tx_realloc %lu\n",
1585 dhdp->rx_readahead_cnt, dhdp->tx_realloc);
1586 bcm_bprintf(strbuf, "tx_pktgetfail %lu rx_pktgetfail %lu\n",
1587 dhdp->tx_pktgetfail, dhdp->rx_pktgetfail);
1588 bcm_bprintf(strbuf, "tx_big_packets %lu\n",
1589 dhdp->tx_big_packets);
1590 bcm_bprintf(strbuf, "\n");
1591 #ifdef DMAMAP_STATS
1592 /* Add DMA MAP info */
1593 bcm_bprintf(strbuf, "DMA MAP stats: \n");
1594 bcm_bprintf(strbuf, "txdata: %lu size: %luK, rxdata: %lu size: %luK\n",
1595 dhdp->dma_stats.txdata, KB(dhdp->dma_stats.txdata_sz),
1596 dhdp->dma_stats.rxdata, KB(dhdp->dma_stats.rxdata_sz));
1597 #ifndef IOCTLRESP_USE_CONSTMEM
1598 bcm_bprintf(strbuf, "IOCTL RX: %lu size: %luK ,",
1599 dhdp->dma_stats.ioctl_rx, KB(dhdp->dma_stats.ioctl_rx_sz));
1600 #endif /* !IOCTLRESP_USE_CONSTMEM */
1601 bcm_bprintf(strbuf, "EVENT RX: %lu size: %luK, INFO RX: %lu size: %luK, "
1602 "TSBUF RX: %lu size %luK\n",
1603 dhdp->dma_stats.event_rx, KB(dhdp->dma_stats.event_rx_sz),
1604 dhdp->dma_stats.info_rx, KB(dhdp->dma_stats.info_rx_sz),
1605 dhdp->dma_stats.tsbuf_rx, KB(dhdp->dma_stats.tsbuf_rx_sz));
1606 bcm_bprintf(strbuf, "Total : %luK \n",
1607 KB(dhdp->dma_stats.txdata_sz + dhdp->dma_stats.rxdata_sz +
1608 dhdp->dma_stats.ioctl_rx_sz + dhdp->dma_stats.event_rx_sz +
1609 dhdp->dma_stats.tsbuf_rx_sz));
1610 #endif /* DMAMAP_STATS */
1611 bcm_bprintf(strbuf, "dhd_induce_error : %u\n", dhdp->dhd_induce_error);
1612 /* Add any prot info */
1613 dhd_prot_dump(dhdp, strbuf);
1614 bcm_bprintf(strbuf, "\n");
1615
1616 /* Add any bus info */
1617 dhd_bus_dump(dhdp, strbuf);
1618 #if defined(BCM_ROUTER_DHD) && defined(HNDCTF)
1619 /* Add ctf info */
1620 dhd_ctf_dump(dhdp, strbuf);
1621 #endif /* BCM_ROUTER_DHD && HNDCTF */
1622
1623 #if defined(DHD_LB_STATS)
1624 dhd_lb_stats_dump(dhdp, strbuf);
1625 #endif /* DHD_LB_STATS */
1626
1627 #ifdef DHD_MEM_STATS
1628
1629 malloc_mem = MALLOCED(dhdp->osh);
1630
1631 txpath_bkpq_len = dhd_active_tx_flowring_bkpq_len(dhdp);
1632 /*
1633 * Instead of traversing the entire queue to find the skbs length,
1634 * considering MAX_MTU_SZ as lenth of each skb.
1635 */
1636 txpath_bkpq_mem = (txpath_bkpq_len* MAX_MTU_SZ);
1637 total_txpath_mem = dhdp->txpath_mem + txpath_bkpq_mem;
1638
1639 bcm_bprintf(strbuf, "\nDHD malloc memory_usage: %llubytes %lluKB\n",
1640 malloc_mem, (malloc_mem / 1024));
1641
1642 bcm_bprintf(strbuf, "\nDHD tx-bkpq len: %llu memory_usage: %llubytes %lluKB\n",
1643 txpath_bkpq_len, txpath_bkpq_mem, (txpath_bkpq_mem / 1024));
1644 bcm_bprintf(strbuf, "DHD tx-path memory_usage: %llubytes %lluKB\n",
1645 total_txpath_mem, (total_txpath_mem / 1024));
1646
1647 total_dhd_mem = malloc_mem + total_txpath_mem;
1648 #if defined(DHD_LB_STATS)
1649 total_dhd_mem += dhd_lb_mem_usage(dhdp, strbuf);
1650 #endif /* DHD_LB_STATS */
1651 bcm_bprintf(strbuf, "\nDHD Totoal memory_usage: %llubytes %lluKB \n",
1652 total_dhd_mem, (total_dhd_mem / 1024));
1653 #endif /* DHD_MEM_STATS */
1654 #if defined(DHD_LB_STATS)
1655 bcm_bprintf(strbuf, "\nlb_rxp_stop_thr_hitcnt: %llu lb_rxp_strt_thr_hitcnt: %llu\n",
1656 dhdp->lb_rxp_stop_thr_hitcnt, dhdp->lb_rxp_strt_thr_hitcnt);
1657 bcm_bprintf(strbuf, "\nlb_rxp_napi_sched_cnt: %llu lb_rxp_napi_complete_cnt: %llu\n",
1658 dhdp->lb_rxp_napi_sched_cnt, dhdp->lb_rxp_napi_complete_cnt);
1659 #endif /* DHD_LB_STATS */
1660
1661 #if defined(DHD_MQ) && defined(DHD_MQ_STATS)
1662 dhd_mqstats_dump(dhdp, strbuf);
1663 #endif
1664
1665 #ifdef DHD_WET
1666 if (dhd_get_wet_mode(dhdp)) {
1667 bcm_bprintf(strbuf, "Wet Dump:\n");
1668 dhd_wet_dump(dhdp, strbuf);
1669 }
1670 #endif /* DHD_WET */
1671
1672 DHD_ERROR(("%s bufsize: %d free: %d\n", __FUNCTION__, buflen, strbuf->size));
1673 /* return remaining buffer length */
1674 return (!strbuf->size ? BCME_BUFTOOSHORT : strbuf->size);
1675 }
1676
1677 void
dhd_dump_to_kernelog(dhd_pub_t * dhdp)1678 dhd_dump_to_kernelog(dhd_pub_t *dhdp)
1679 {
1680 char buf[512];
1681
1682 DHD_ERROR(("F/W version: %s\n", fw_version));
1683 bcm_bprintf_bypass = TRUE;
1684 dhd_dump(dhdp, buf, sizeof(buf));
1685 bcm_bprintf_bypass = FALSE;
1686 }
1687
1688 int
dhd_wl_ioctl_cmd(dhd_pub_t * dhd_pub,int cmd,void * arg,int len,uint8 set,int ifidx)1689 dhd_wl_ioctl_cmd(dhd_pub_t *dhd_pub, int cmd, void *arg, int len, uint8 set, int ifidx)
1690 {
1691 wl_ioctl_t ioc;
1692
1693 ioc.cmd = cmd;
1694 ioc.buf = arg;
1695 ioc.len = len;
1696 ioc.set = set;
1697
1698 return dhd_wl_ioctl(dhd_pub, ifidx, &ioc, arg, len);
1699 }
1700
1701 int
dhd_wl_ioctl_get_intiovar(dhd_pub_t * dhd_pub,char * name,uint * pval,int cmd,uint8 set,int ifidx)1702 dhd_wl_ioctl_get_intiovar(dhd_pub_t *dhd_pub, char *name, uint *pval,
1703 int cmd, uint8 set, int ifidx)
1704 {
1705 char iovbuf[WLC_IOCTL_SMLEN];
1706 int ret = -1;
1707
1708 memset(iovbuf, 0, sizeof(iovbuf));
1709 if (bcm_mkiovar(name, NULL, 0, iovbuf, sizeof(iovbuf))) {
1710 ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, sizeof(iovbuf), set, ifidx);
1711 if (!ret) {
1712 *pval = ltoh32(*((uint*)iovbuf));
1713 } else {
1714 DHD_ERROR(("%s: get int iovar %s failed, ERR %d\n",
1715 __FUNCTION__, name, ret));
1716 }
1717 } else {
1718 DHD_ERROR(("%s: mkiovar %s failed\n",
1719 __FUNCTION__, name));
1720 }
1721
1722 return ret;
1723 }
1724
1725 int
dhd_wl_ioctl_set_intiovar(dhd_pub_t * dhd_pub,char * name,uint val,int cmd,uint8 set,int ifidx)1726 dhd_wl_ioctl_set_intiovar(dhd_pub_t *dhd_pub, char *name, uint val,
1727 int cmd, uint8 set, int ifidx)
1728 {
1729 char iovbuf[WLC_IOCTL_SMLEN];
1730 int ret = -1;
1731 int lval = htol32(val);
1732 uint len;
1733
1734 len = bcm_mkiovar(name, (char*)&lval, sizeof(lval), iovbuf, sizeof(iovbuf));
1735
1736 if (len) {
1737 ret = dhd_wl_ioctl_cmd(dhd_pub, cmd, iovbuf, len, set, ifidx);
1738 if (ret) {
1739 DHD_ERROR(("%s: set int iovar %s failed, ERR %d\n",
1740 __FUNCTION__, name, ret));
1741 }
1742 } else {
1743 DHD_ERROR(("%s: mkiovar %s failed\n",
1744 __FUNCTION__, name));
1745 }
1746
1747 return ret;
1748 }
1749
1750 static struct ioctl2str_s {
1751 uint32 ioctl;
1752 char *name;
1753 } ioctl2str_array[] = {
1754 {WLC_UP, "UP"},
1755 {WLC_DOWN, "DOWN"},
1756 {WLC_SET_PROMISC, "SET_PROMISC"},
1757 {WLC_SET_INFRA, "SET_INFRA"},
1758 {WLC_SET_AUTH, "SET_AUTH"},
1759 {WLC_SET_SSID, "SET_SSID"},
1760 {WLC_RESTART, "RESTART"},
1761 {WLC_SET_CHANNEL, "SET_CHANNEL"},
1762 {WLC_SET_RATE_PARAMS, "SET_RATE_PARAMS"},
1763 {WLC_SET_KEY, "SET_KEY"},
1764 {WLC_SCAN, "SCAN"},
1765 {WLC_DISASSOC, "DISASSOC"},
1766 {WLC_REASSOC, "REASSOC"},
1767 {WLC_SET_COUNTRY, "SET_COUNTRY"},
1768 {WLC_SET_WAKE, "SET_WAKE"},
1769 {WLC_SET_SCANSUPPRESS, "SET_SCANSUPPRESS"},
1770 {WLC_SCB_DEAUTHORIZE, "SCB_DEAUTHORIZE"},
1771 {WLC_SET_WSEC, "SET_WSEC"},
1772 {WLC_SET_INTERFERENCE_MODE, "SET_INTERFERENCE_MODE"},
1773 {WLC_SET_RADAR, "SET_RADAR"},
1774 {0, NULL}
1775 };
1776
1777 static char *
ioctl2str(uint32 ioctl)1778 ioctl2str(uint32 ioctl)
1779 {
1780 struct ioctl2str_s *p = ioctl2str_array;
1781
1782 while (p->name != NULL) {
1783 if (p->ioctl == ioctl) {
1784 return p->name;
1785 }
1786 p++;
1787 }
1788
1789 return "";
1790 }
1791
1792 /**
1793 * @param ioc IO control struct, members are partially used by this function.
1794 * @param buf [inout] Contains parameters to send to dongle, contains dongle response on return.
1795 * @param len Maximum number of bytes that dongle is allowed to write into 'buf'.
1796 */
1797 int
dhd_wl_ioctl(dhd_pub_t * dhd_pub,int ifidx,wl_ioctl_t * ioc,void * buf,int len)1798 dhd_wl_ioctl(dhd_pub_t *dhd_pub, int ifidx, wl_ioctl_t *ioc, void *buf, int len)
1799 {
1800 int ret = BCME_ERROR;
1801 unsigned long flags;
1802 #ifdef DUMP_IOCTL_IOV_LIST
1803 dhd_iov_li_t *iov_li;
1804 #endif /* DUMP_IOCTL_IOV_LIST */
1805 #ifdef REPORT_FATAL_TIMEOUTS
1806 wl_escan_params_t *eparams;
1807 uint8 *buf_ptr = (uint8 *)buf;
1808 uint16 action = 0;
1809 #endif /* REPORT_FATAL_TIMEOUTS */
1810 int hostsleep_set = 0;
1811 int hostsleep_val = 0;
1812
1813 if (dhd_query_bus_erros(dhd_pub)) {
1814 return -ENODEV;
1815 }
1816
1817 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
1818 DHD_OS_WAKE_LOCK(dhd_pub);
1819 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd_pub->bus)) < 0) {
1820 DHD_RPM(("%s: pm_runtime_get_sync error. \n", __FUNCTION__));
1821 DHD_OS_WAKE_UNLOCK(dhd_pub);
1822 return BCME_ERROR;
1823 }
1824 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
1825
1826 #ifdef KEEPIF_ON_DEVICE_RESET
1827 if (ioc->cmd == WLC_GET_VAR) {
1828 dbus_config_t config;
1829 config.general_param = 0;
1830 if (buf) {
1831 if (!strcmp(buf, "wowl_activate")) {
1832 /* 1 (TRUE) after decreased by 1 */
1833 config.general_param = 2;
1834 } else if (!strcmp(buf, "wowl_clear")) {
1835 /* 0 (FALSE) after decreased by 1 */
1836 config.general_param = 1;
1837 }
1838 }
1839 if (config.general_param) {
1840 config.config_id = DBUS_CONFIG_ID_KEEPIF_ON_DEVRESET;
1841 config.general_param--;
1842 dbus_set_config(dhd_pub->dbus, &config);
1843 }
1844 }
1845 #endif /* KEEPIF_ON_DEVICE_RESET */
1846
1847 if (dhd_os_proto_block(dhd_pub))
1848 {
1849 #ifdef DHD_LOG_DUMP
1850 int slen, val, lval, min_len;
1851 char *msg, tmp[64];
1852
1853 /* WLC_GET_VAR */
1854 if (ioc->cmd == WLC_GET_VAR && buf) {
1855 min_len = MIN(sizeof(tmp) - 1, strlen(buf));
1856 memset(tmp, 0, sizeof(tmp));
1857 bcopy(buf, tmp, min_len);
1858 tmp[min_len] = '\0';
1859 }
1860 #endif /* DHD_LOG_DUMP */
1861
1862 #ifdef DHD_DISCONNECT_TRACE
1863 if (WLC_DISASSOC == ioc->cmd || WLC_DOWN == ioc->cmd ||
1864 WLC_DISASSOC_MYAP == ioc->cmd) {
1865 DHD_ERROR(("IOCTL Disconnect WiFi: %d\n", ioc->cmd));
1866 }
1867 #endif /* HW_DISCONNECT_TRACE */
1868 /* logging of iovars that are send to the dongle, ./dhd msglevel +iovar */
1869 if (ioc->set == TRUE) {
1870 char *pars = (char *)buf; // points at user buffer
1871 if (ioc->cmd == WLC_SET_VAR && buf) {
1872 DHD_DNGL_IOVAR_SET(("iovar:%d: set %s", ifidx, pars));
1873 if (ioc->len > 1 + sizeof(uint32)) {
1874 // skip iovar name:
1875 pars += strnlen(pars, ioc->len - 1 - sizeof(uint32));
1876 pars++; // skip NULL character
1877 }
1878 } else {
1879 DHD_DNGL_IOVAR_SET(("ioctl:%d: set %d %s",
1880 ifidx, ioc->cmd, ioctl2str(ioc->cmd)));
1881 }
1882 if (pars != NULL) {
1883 DHD_DNGL_IOVAR_SET((" 0x%x\n", *(uint32*)pars));
1884 } else {
1885 DHD_DNGL_IOVAR_SET((" NULL\n"));
1886 }
1887 }
1888
1889 DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
1890 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub)) {
1891 #ifdef DHD_EFI
1892 DHD_INFO(("%s: returning as busstate=%d\n",
1893 __FUNCTION__, dhd_pub->busstate));
1894 #else
1895 DHD_INFO(("%s: returning as busstate=%d\n",
1896 __FUNCTION__, dhd_pub->busstate));
1897 #endif /* DHD_EFI */
1898 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
1899 dhd_os_proto_unblock(dhd_pub);
1900 return -ENODEV;
1901 }
1902 DHD_BUS_BUSY_SET_IN_IOVAR(dhd_pub);
1903 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
1904
1905 #ifdef DHD_PCIE_RUNTIMEPM
1906 dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_wl_ioctl);
1907 #endif /* DHD_PCIE_RUNTIMEPM */
1908
1909 DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
1910 if (DHD_BUS_CHECK_SUSPEND_OR_ANY_SUSPEND_IN_PROGRESS(dhd_pub) ||
1911 dhd_pub->dhd_induce_error == DHD_INDUCE_IOCTL_SUSPEND_ERROR) {
1912 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
1913 __FUNCTION__, dhd_pub->busstate, dhd_pub->dhd_bus_busy_state));
1914 #ifdef DHD_SEND_HANG_IOCTL_SUSPEND_ERROR
1915 ioctl_suspend_error++;
1916 if (ioctl_suspend_error > MAX_IOCTL_SUSPEND_ERROR) {
1917 dhd_pub->hang_reason = HANG_REASON_IOCTL_SUSPEND_ERROR;
1918 dhd_os_send_hang_message(dhd_pub);
1919 ioctl_suspend_error = 0;
1920 }
1921 #endif /* DHD_SEND_HANG_IOCTL_SUSPEND_ERROR */
1922 DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub);
1923 dhd_os_busbusy_wake(dhd_pub);
1924 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
1925 dhd_os_proto_unblock(dhd_pub);
1926 return -ENODEV;
1927 }
1928 #ifdef DHD_SEND_HANG_IOCTL_SUSPEND_ERROR
1929 ioctl_suspend_error = 0;
1930 #endif /* DHD_SEND_HANG_IOCTL_SUSPEND_ERROR */
1931 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
1932
1933 #if defined(WL_WLC_SHIM)
1934 {
1935 struct wl_shim_node *shim = dhd_pub_shim(dhd_pub);
1936
1937 wl_io_pport_t io_pport;
1938 io_pport.dhd_pub = dhd_pub;
1939 io_pport.ifidx = ifidx;
1940
1941 ret = wl_shim_ioctl(shim, ioc, len, &io_pport);
1942 if (ret != BCME_OK) {
1943 DHD_TRACE(("%s: wl_shim_ioctl(%d) ERR %d\n",
1944 __FUNCTION__, ioc->cmd, ret));
1945 }
1946 }
1947 #else
1948 #ifdef DUMP_IOCTL_IOV_LIST
1949 if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION && buf) {
1950 if (!(iov_li = MALLOC(dhd_pub->osh, sizeof(*iov_li)))) {
1951 DHD_ERROR(("iovar dump list item allocation Failed\n"));
1952 } else {
1953 iov_li->cmd = ioc->cmd;
1954 if (buf)
1955 bcopy((char *)buf, iov_li->buff, strlen((char *)buf)+1);
1956 dhd_iov_li_append(dhd_pub, &dhd_pub->dump_iovlist_head,
1957 &iov_li->list);
1958 }
1959 }
1960 #endif /* DUMP_IOCTL_IOV_LIST */
1961
1962 #ifdef REPORT_FATAL_TIMEOUTS
1963 /* fill in the sync_id to ensure that the scan timeout is always for the
1964 * current running escan in the FW - the wl app does not fill in an
1965 * incrementing number for sync_id, it only fills in a random number which
1966 * increases the chance of 2 consecutive escans having the same sync id
1967 * This should happen here after dhd_proto_block()
1968 * is called, so that sync_id does not
1969 * get incremented if 2 consecutive escans are fired in quick succession
1970 */
1971 if ((ioc->cmd == WLC_SET_VAR &&
1972 buf != NULL &&
1973 strcmp("escan", buf) == 0)) {
1974 eparams = (wl_escan_params_t *) (buf_ptr + strlen("escan") + 1);
1975 action = dtoh16(eparams->action);
1976 if (action == WL_SCAN_ACTION_START) {
1977 ++dhd_pub->esync_id;
1978 /* sync id of 0 is not used for escan,
1979 * it is used to indicate
1980 * a normal scan timer is running, so as
1981 * to ensure that escan abort event
1982 * does not cancel a normal scan timeout
1983 */
1984 if (dhd_pub->esync_id == 0)
1985 ++dhd_pub->esync_id;
1986 DHD_INFO(("%s:escan sync id set to = %u \n",
1987 __FUNCTION__, dhd_pub->esync_id));
1988 eparams->sync_id = htod16(dhd_pub->esync_id);
1989 }
1990 }
1991 #endif /* REPORT_FATAL_TIMEOUTS */
1992
1993 if (dhd_conf_check_hostsleep(dhd_pub, ioc->cmd, ioc->buf, len,
1994 &hostsleep_set, &hostsleep_val, &ret))
1995 goto exit;
1996 ret = dhd_prot_ioctl(dhd_pub, ifidx, ioc, buf, len);
1997 dhd_conf_get_hostsleep(dhd_pub, hostsleep_set, hostsleep_val, ret);
1998
1999 #ifdef DUMP_IOCTL_IOV_LIST
2000 if (ret == -ETIMEDOUT) {
2001 DHD_ERROR(("Last %d issued commands: Latest one is at bottom.\n",
2002 IOV_LIST_MAX_LEN));
2003 dhd_iov_li_print(&dhd_pub->dump_iovlist_head);
2004 }
2005 #endif /* DUMP_IOCTL_IOV_LIST */
2006 #endif /* defined(WL_WLC_SHIM) */
2007 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
2008 if (ret == -ETIMEDOUT) {
2009 copy_hang_info_ioctl_timeout(dhd_pub, ifidx, ioc);
2010 }
2011 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
2012 #ifdef DHD_LOG_DUMP
2013 if ((ioc->cmd == WLC_GET_VAR || ioc->cmd == WLC_SET_VAR) &&
2014 buf != NULL) {
2015 if (buf) {
2016 lval = 0;
2017 slen = strlen(buf) + 1;
2018 msg = (char*)buf;
2019 if (len >= slen + sizeof(lval)) {
2020 if (ioc->cmd == WLC_GET_VAR) {
2021 msg = tmp;
2022 lval = *(int*)buf;
2023 } else {
2024 min_len = MIN(ioc->len - slen, sizeof(int));
2025 bcopy((msg + slen), &lval, min_len);
2026 }
2027 if (!strncmp(msg, "cur_etheraddr",
2028 strlen("cur_etheraddr"))) {
2029 lval = 0;
2030 }
2031 }
2032 DHD_IOVAR_MEM((
2033 "%s: cmd: %d, msg: %s val: 0x%x,"
2034 " len: %d, set: %d, txn-id: %d\n",
2035 ioc->cmd == WLC_GET_VAR ?
2036 "WLC_GET_VAR" : "WLC_SET_VAR",
2037 ioc->cmd, msg, lval, ioc->len, ioc->set,
2038 dhd_prot_get_ioctl_trans_id(dhd_pub)));
2039 } else {
2040 DHD_IOVAR_MEM(("%s: cmd: %d, len: %d, set: %d, txn-id: %d\n",
2041 ioc->cmd == WLC_GET_VAR ? "WLC_GET_VAR" : "WLC_SET_VAR",
2042 ioc->cmd, ioc->len, ioc->set,
2043 dhd_prot_get_ioctl_trans_id(dhd_pub)));
2044 }
2045 } else {
2046 slen = ioc->len;
2047 if (buf != NULL && slen != 0) {
2048 if (slen >= 4) {
2049 val = *(int*)buf;
2050 } else if (slen >= 2) {
2051 val = *(short*)buf;
2052 } else {
2053 val = *(char*)buf;
2054 }
2055 /* Do not dump for WLC_GET_MAGIC and WLC_GET_VERSION */
2056 if (ioc->cmd != WLC_GET_MAGIC && ioc->cmd != WLC_GET_VERSION) {
2057 DHD_IOVAR_MEM(("WLC_IOCTL: cmd: %d, val: %d, len: %d, "
2058 "set: %d\n", ioc->cmd, val, ioc->len, ioc->set));
2059 }
2060 } else {
2061 DHD_IOVAR_MEM(("WLC_IOCTL: cmd: %d, buf is NULL\n", ioc->cmd));
2062 }
2063 }
2064 #endif /* DHD_LOG_DUMP */
2065 #if defined(OEM_ANDROID)
2066 if (ret && dhd_pub->up) {
2067 /* Send hang event only if dhd_open() was success */
2068 dhd_os_check_hang(dhd_pub, ifidx, ret);
2069 }
2070
2071 if (ret == -ETIMEDOUT && !dhd_pub->up) {
2072 DHD_ERROR(("%s: 'resumed on timeout' error is "
2073 "occurred before the interface does not"
2074 " bring up\n", __FUNCTION__));
2075 }
2076 #endif /* defined(OEM_ANDROID) */
2077
2078 exit:
2079 DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
2080 DHD_BUS_BUSY_CLEAR_IN_IOVAR(dhd_pub);
2081 dhd_os_busbusy_wake(dhd_pub);
2082 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
2083
2084 #ifdef REPORT_FATAL_TIMEOUTS
2085 if ((ret == BCME_OK && ioc->cmd == WLC_SET_VAR &&
2086 buf != NULL &&
2087 strcmp("escan", buf) == 0)) {
2088 if (action == WL_SCAN_ACTION_START)
2089 dhd_start_scan_timer(dhd_pub, TRUE);
2090 }
2091 #endif /* REPORT_FATAL_TIMEOUTS */
2092
2093 dhd_os_proto_unblock(dhd_pub);
2094
2095 #ifdef DETAIL_DEBUG_LOG_FOR_IOCTL
2096 if (ret < 0) {
2097 if ((ioc->cmd == WLC_GET_VAR || ioc->cmd == WLC_SET_VAR) &&
2098 buf != NULL) {
2099 if (ret == BCME_UNSUPPORTED || ret == BCME_NOTASSOCIATED) {
2100 DHD_ERROR_MEM(("%s: %s: %s, %s\n",
2101 __FUNCTION__, ioc->cmd == WLC_GET_VAR ?
2102 "WLC_GET_VAR" : "WLC_SET_VAR",
2103 buf? (char *)buf:"NO MESSAGE",
2104 ret == BCME_UNSUPPORTED ? "UNSUPPORTED"
2105 : "NOT ASSOCIATED"));
2106 } else {
2107 DHD_ERROR_MEM(("%s: %s: %s, ret = %d\n",
2108 __FUNCTION__, ioc->cmd == WLC_GET_VAR ?
2109 "WLC_GET_VAR" : "WLC_SET_VAR",
2110 (char *)buf, ret));
2111 }
2112 } else {
2113 if (ret == BCME_UNSUPPORTED || ret == BCME_NOTASSOCIATED) {
2114 DHD_ERROR_MEM(("%s: WLC_IOCTL: cmd: %d, %s\n",
2115 __FUNCTION__, ioc->cmd,
2116 ret == BCME_UNSUPPORTED ? "UNSUPPORTED" :
2117 "NOT ASSOCIATED"));
2118 } else {
2119 DHD_ERROR_MEM(("%s: WLC_IOCTL: cmd: %d, ret = %d\n",
2120 __FUNCTION__, ioc->cmd, ret));
2121 }
2122 }
2123 }
2124 #endif /* DETAIL_DEBUG_LOG_FOR_IOCTL */
2125 }
2126
2127 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
2128 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd_pub->bus));
2129 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd_pub->bus));
2130
2131 DHD_OS_WAKE_UNLOCK(dhd_pub);
2132 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
2133
2134 #ifdef WL_MONITOR
2135 /* Intercept monitor ioctl here, add/del monitor if */
2136 if (ret == BCME_OK && ioc->cmd == WLC_SET_MONITOR) {
2137 int val = 0;
2138 if (buf != NULL && len != 0) {
2139 if (len >= 4) {
2140 val = *(int*)buf;
2141 } else if (len >= 2) {
2142 val = *(short*)buf;
2143 } else {
2144 val = *(char*)buf;
2145 }
2146 }
2147 dhd_set_monitor(dhd_pub, ifidx, val);
2148 }
2149 #endif /* WL_MONITOR */
2150
2151 return ret;
2152 }
2153
wl_get_port_num(wl_io_pport_t * io_pport)2154 uint wl_get_port_num(wl_io_pport_t *io_pport)
2155 {
2156 return 0;
2157 }
2158
2159 /* Get bssidx from iovar params
2160 * Input: dhd_pub - pointer to dhd_pub_t
2161 * params - IOVAR params
2162 * Output: idx - BSS index
2163 * val - ponter to the IOVAR arguments
2164 */
2165 static int
dhd_iovar_parse_bssidx(dhd_pub_t * dhd_pub,const char * params,uint32 * idx,const char ** val)2166 dhd_iovar_parse_bssidx(dhd_pub_t *dhd_pub, const char *params, uint32 *idx, const char **val)
2167 {
2168 char *prefix = "bsscfg:";
2169 uint32 bssidx;
2170
2171 if (!(strncmp(params, prefix, strlen(prefix)))) {
2172 /* per bss setting should be prefixed with 'bsscfg:' */
2173 const char *p = params + strlen(prefix);
2174
2175 /* Skip Name */
2176 while (*p != '\0')
2177 p++;
2178 /* consider null */
2179 p = p + 1;
2180 bcopy(p, &bssidx, sizeof(uint32));
2181 /* Get corresponding dhd index */
2182 bssidx = dhd_bssidx2idx(dhd_pub, htod32(bssidx));
2183
2184 if (bssidx >= DHD_MAX_IFS) {
2185 DHD_ERROR(("%s Wrong bssidx provided\n", __FUNCTION__));
2186 return BCME_ERROR;
2187 }
2188
2189 /* skip bss idx */
2190 p += sizeof(uint32);
2191 *val = p;
2192 *idx = bssidx;
2193 } else {
2194 DHD_ERROR(("%s: bad parameter for per bss iovar\n", __FUNCTION__));
2195 return BCME_ERROR;
2196 }
2197
2198 return BCME_OK;
2199 }
2200
2201 #if defined(DHD_DEBUG) && defined(BCMDBUS)
2202 /* USB Device console input function */
dhd_bus_console_in(dhd_pub_t * dhd,uchar * msg,uint msglen)2203 int dhd_bus_console_in(dhd_pub_t *dhd, uchar *msg, uint msglen)
2204 {
2205 DHD_TRACE(("%s \n", __FUNCTION__));
2206
2207 return dhd_iovar(dhd, 0, "cons", msg, msglen, NULL, 0, TRUE);
2208
2209 }
2210 #endif /* DHD_DEBUG && BCMDBUS */
2211
2212 #ifdef DHD_DEBUG
2213 int
dhd_mem_debug(dhd_pub_t * dhd,uchar * msg,uint msglen)2214 dhd_mem_debug(dhd_pub_t *dhd, uchar *msg, uint msglen)
2215 {
2216 unsigned long int_arg = 0;
2217 char *p;
2218 char *end_ptr = NULL;
2219 dhd_dbg_mwli_t *mw_li;
2220 dll_t *item, *next;
2221 /* check if mwalloc, mwquery or mwfree was supplied arguement with space */
2222 p = bcmstrstr((char *)msg, " ");
2223 if (p != NULL) {
2224 /* space should be converted to null as separation flag for firmware */
2225 *p = '\0';
2226 /* store the argument in int_arg */
2227 int_arg = bcm_strtoul(p+1, &end_ptr, 10);
2228 }
2229
2230 if (!p && !strcmp(msg, "query")) {
2231 /* lets query the list inetrnally */
2232 if (dll_empty(dll_head_p(&dhd->mw_list_head))) {
2233 DHD_ERROR(("memwaste list is empty, call mwalloc < size > to allocate\n"));
2234 } else {
2235 for (item = dll_head_p(&dhd->mw_list_head);
2236 !dll_end(&dhd->mw_list_head, item); item = next) {
2237 next = dll_next_p(item);
2238 mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list);
2239 DHD_ERROR(("item: <id=%d, size=%d>\n", mw_li->id, mw_li->size));
2240 }
2241 }
2242 } else if (p && end_ptr && (*end_ptr == '\0') && !strcmp(msg, "alloc")) {
2243 int32 alloc_handle;
2244 /* convert size into KB and append as integer */
2245 *((int32 *)(p+1)) = int_arg*1024;
2246 *(p+1+sizeof(int32)) = '\0';
2247
2248 /* recalculated length -> 5 bytes for "alloc" + 4 bytes for size +
2249 * 1 bytes for null caracter
2250 */
2251 msglen = strlen(msg) + sizeof(int32) + 1;
2252 if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, msg, msglen+1, FALSE, 0) < 0) {
2253 DHD_ERROR(("IOCTL failed for memdebug alloc\n"));
2254 }
2255
2256 /* returned allocated handle from dongle, basically address of the allocated unit */
2257 alloc_handle = *((int32 *)msg);
2258
2259 /* add a node in the list with tuple <id, handle, size> */
2260 if (alloc_handle == 0) {
2261 DHD_ERROR(("Reuqested size could not be allocated\n"));
2262 } else if (!(mw_li = MALLOC(dhd->osh, sizeof(*mw_li)))) {
2263 DHD_ERROR(("mw list item allocation Failed\n"));
2264 } else {
2265 mw_li->id = dhd->mw_id++;
2266 mw_li->handle = alloc_handle;
2267 mw_li->size = int_arg;
2268 /* append the node in the list */
2269 dll_append(&dhd->mw_list_head, &mw_li->list);
2270 }
2271 } else if (p && end_ptr && (*end_ptr == '\0') && !strcmp(msg, "free")) {
2272 /* inform dongle to free wasted chunk */
2273 int handle = 0;
2274 int size = 0;
2275 for (item = dll_head_p(&dhd->mw_list_head);
2276 !dll_end(&dhd->mw_list_head, item); item = next) {
2277 next = dll_next_p(item);
2278 mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list);
2279
2280 if (mw_li->id == (int)int_arg) {
2281 handle = mw_li->handle;
2282 size = mw_li->size;
2283 dll_delete(item);
2284 MFREE(dhd->osh, mw_li, sizeof(*mw_li));
2285 if (dll_empty(dll_head_p(&dhd->mw_list_head))) {
2286 /* reset the id */
2287 dhd->mw_id = 0;
2288 }
2289 }
2290 }
2291 if (handle) {
2292 int len;
2293 /* append the free handle and the chunk size in first 8 bytes
2294 * after the command and null character
2295 */
2296 *((int32 *)(p+1)) = handle;
2297 *((int32 *)((p+1)+sizeof(int32))) = size;
2298 /* append null as terminator */
2299 *(p+1+2*sizeof(int32)) = '\0';
2300 /* recalculated length -> 4 bytes for "free" + 8 bytes for hadnle and size
2301 * + 1 bytes for null caracter
2302 */
2303 len = strlen(msg) + 2*sizeof(int32) + 1;
2304 /* send iovar to free the chunk */
2305 if (dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, msg, len, FALSE, 0) < 0) {
2306 DHD_ERROR(("IOCTL failed for memdebug free\n"));
2307 }
2308 } else {
2309 DHD_ERROR(("specified id does not exist\n"));
2310 }
2311 } else {
2312 /* for all the wrong argument formats */
2313 return BCME_BADARG;
2314 }
2315 return 0;
2316 }
2317 extern void
dhd_mw_list_delete(dhd_pub_t * dhd,dll_t * list_head)2318 dhd_mw_list_delete(dhd_pub_t *dhd, dll_t *list_head)
2319 {
2320 dll_t *item;
2321 dhd_dbg_mwli_t *mw_li;
2322 while (!(dll_empty(list_head))) {
2323 item = dll_head_p(list_head);
2324 mw_li = (dhd_dbg_mwli_t *)CONTAINEROF(item, dhd_dbg_mwli_t, list);
2325 dll_delete(item);
2326 MFREE(dhd->osh, mw_li, sizeof(*mw_li));
2327 }
2328 }
2329 #ifdef BCMPCIE
2330 int
dhd_flow_ring_debug(dhd_pub_t * dhd,char * msg,uint msglen)2331 dhd_flow_ring_debug(dhd_pub_t *dhd, char *msg, uint msglen)
2332 {
2333 flow_ring_table_t *flow_ring_table;
2334 char *cmd;
2335 char *end_ptr = NULL;
2336 uint8 prio;
2337 uint16 flowid;
2338 int i;
2339 int ret = 0;
2340 cmd = bcmstrstr(msg, " ");
2341 BCM_REFERENCE(prio);
2342 if (cmd != NULL) {
2343 /* in order to use string operations append null */
2344 *cmd = '\0';
2345 } else {
2346 DHD_ERROR(("missing: create/delete args\n"));
2347 return BCME_ERROR;
2348 }
2349 if (cmd && !strcmp(msg, "create")) {
2350 /* extract <"source address", "destination address", "priority"> */
2351 uint8 sa[ETHER_ADDR_LEN], da[ETHER_ADDR_LEN];
2352 BCM_REFERENCE(sa);
2353 BCM_REFERENCE(da);
2354 msg = msg + strlen("create") + 1;
2355 /* fill ethernet source address */
2356 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2357 sa[i] = (uint8)bcm_strtoul(msg, &end_ptr, 16);
2358 if (*end_ptr == ':') {
2359 msg = (end_ptr + 1);
2360 } else if (i != 5) {
2361 DHD_ERROR(("not a valid source mac addr\n"));
2362 return BCME_ERROR;
2363 }
2364 }
2365 if (*end_ptr != ' ') {
2366 DHD_ERROR(("missing: destiantion mac id\n"));
2367 return BCME_ERROR;
2368 } else {
2369 /* skip space */
2370 msg = end_ptr + 1;
2371 }
2372 /* fill ethernet destination address */
2373 for (i = 0; i < ETHER_ADDR_LEN; i++) {
2374 da[i] = (uint8)bcm_strtoul(msg, &end_ptr, 16);
2375 if (*end_ptr == ':') {
2376 msg = (end_ptr + 1);
2377 } else if (i != 5) {
2378 DHD_ERROR(("not a valid destination mac addr\n"));
2379 return BCME_ERROR;
2380 }
2381 }
2382 if (*end_ptr != ' ') {
2383 DHD_ERROR(("missing: priority\n"));
2384 return BCME_ERROR;
2385 } else {
2386 msg = end_ptr + 1;
2387 }
2388 /* parse priority */
2389 prio = (uint8)bcm_strtoul(msg, &end_ptr, 10);
2390 if (prio > MAXPRIO) {
2391 DHD_ERROR(("%s: invalid priority. Must be between 0-7 inclusive\n",
2392 __FUNCTION__));
2393 return BCME_ERROR;
2394 }
2395
2396 if (*end_ptr != '\0') {
2397 DHD_ERROR(("msg not truncated with NULL character\n"));
2398 return BCME_ERROR;
2399 }
2400 ret = dhd_flowid_debug_create(dhd, 0, prio, (char *)sa, (char *)da, &flowid);
2401 if (ret != BCME_OK) {
2402 DHD_ERROR(("%s: flowring creation failed ret: %d\n", __FUNCTION__, ret));
2403 return BCME_ERROR;
2404 }
2405 return BCME_OK;
2406
2407 } else if (cmd && !strcmp(msg, "delete")) {
2408 msg = msg + strlen("delete") + 1;
2409 /* parse flowid */
2410 flowid = (uint16)bcm_strtoul(msg, &end_ptr, 10);
2411 if (*end_ptr != '\0') {
2412 DHD_ERROR(("msg not truncated with NULL character\n"));
2413 return BCME_ERROR;
2414 }
2415
2416 /* Find flowid from ifidx 0 since this IOVAR creating flowring with ifidx 0 */
2417 if (dhd_flowid_find_by_ifidx(dhd, 0, flowid) != BCME_OK)
2418 {
2419 DHD_ERROR(("%s : Deleting not created flowid: %u\n", __FUNCTION__, flowid));
2420 return BCME_ERROR;
2421 }
2422
2423 flow_ring_table = (flow_ring_table_t *)dhd->flow_ring_table;
2424 ret = dhd_bus_flow_ring_delete_request(dhd->bus, (void *)&flow_ring_table[flowid]);
2425 if (ret != BCME_OK) {
2426 DHD_ERROR(("%s: flowring deletion failed ret: %d\n", __FUNCTION__, ret));
2427 return BCME_ERROR;
2428 }
2429 return BCME_OK;
2430 }
2431 DHD_ERROR(("%s: neither create nor delete\n", __FUNCTION__));
2432 return BCME_ERROR;
2433 }
2434 #endif /* BCMPCIE */
2435 #endif /* DHD_DEBUG */
2436
2437 static int
dhd_doiovar(dhd_pub_t * dhd_pub,const bcm_iovar_t * vi,uint32 actionid,const char * name,void * params,int plen,void * arg,uint len,int val_size)2438 dhd_doiovar(dhd_pub_t *dhd_pub, const bcm_iovar_t *vi, uint32 actionid, const char *name,
2439 void *params, int plen, void *arg, uint len, int val_size)
2440 {
2441 int bcmerror = 0;
2442 int32 int_val = 0;
2443 uint32 dhd_ver_len, bus_api_rev_len;
2444
2445 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
2446 DHD_TRACE(("%s: actionid = %d; name %s\n", __FUNCTION__, actionid, name));
2447
2448 if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, IOV_ISSET(actionid))) != 0)
2449 goto exit;
2450
2451 if (plen >= (int)sizeof(int_val))
2452 bcopy(params, &int_val, sizeof(int_val));
2453
2454 switch (actionid) {
2455 case IOV_GVAL(IOV_VERSION):
2456 /* Need to have checked buffer length */
2457 dhd_ver_len = sizeof(dhd_version) - 1;
2458 bus_api_rev_len = strlen(bus_api_revision);
2459 if (len > dhd_ver_len + bus_api_rev_len) {
2460 bcmerror = memcpy_s((char *)arg, len, dhd_version, dhd_ver_len);
2461 if (bcmerror != BCME_OK) {
2462 break;
2463 }
2464 bcmerror = memcpy_s((char *)arg + dhd_ver_len, len - dhd_ver_len,
2465 bus_api_revision, bus_api_rev_len);
2466 if (bcmerror != BCME_OK) {
2467 break;
2468 }
2469 *((char *)arg + dhd_ver_len + bus_api_rev_len) = '\0';
2470 }
2471 break;
2472
2473 case IOV_GVAL(IOV_WLMSGLEVEL):
2474 printf("android_msg_level=0x%x\n", android_msg_level);
2475 printf("config_msg_level=0x%x\n", config_msg_level);
2476 #if defined(WL_WIRELESS_EXT)
2477 int_val = (int32)iw_msg_level;
2478 bcopy(&int_val, arg, val_size);
2479 printf("iw_msg_level=0x%x\n", iw_msg_level);
2480 #endif
2481 #ifdef WL_CFG80211
2482 int_val = (int32)wl_dbg_level;
2483 bcopy(&int_val, arg, val_size);
2484 printf("cfg_msg_level=0x%x\n", wl_dbg_level);
2485 #endif
2486 break;
2487
2488 case IOV_SVAL(IOV_WLMSGLEVEL):
2489 if (int_val & DHD_ANDROID_VAL) {
2490 android_msg_level = (uint)(int_val & 0xFFFF);
2491 printf("android_msg_level=0x%x\n", android_msg_level);
2492 }
2493 if (int_val & DHD_CONFIG_VAL) {
2494 config_msg_level = (uint)(int_val & 0xFFFF);
2495 printf("config_msg_level=0x%x\n", config_msg_level);
2496 }
2497 #if defined(WL_WIRELESS_EXT)
2498 if (int_val & DHD_IW_VAL) {
2499 iw_msg_level = (uint)(int_val & 0xFFFF);
2500 printf("iw_msg_level=0x%x\n", iw_msg_level);
2501 }
2502 #endif
2503 #ifdef WL_CFG80211
2504 if (int_val & DHD_CFG_VAL) {
2505 wl_cfg80211_enable_trace((u32)(int_val & 0xFFFF));
2506 }
2507 #endif
2508 break;
2509
2510 case IOV_GVAL(IOV_MSGLEVEL):
2511 int_val = (int32)dhd_msg_level;
2512 bcopy(&int_val, arg, val_size);
2513 break;
2514
2515 case IOV_SVAL(IOV_MSGLEVEL):
2516 dhd_msg_level = int_val;
2517 break;
2518
2519 case IOV_GVAL(IOV_BCMERRORSTR):
2520 bcm_strncpy_s((char *)arg, len, bcmerrorstr(dhd_pub->bcmerror), BCME_STRLEN);
2521 ((char *)arg)[BCME_STRLEN - 1] = 0x00;
2522 break;
2523
2524 case IOV_GVAL(IOV_BCMERROR):
2525 int_val = (int32)dhd_pub->bcmerror;
2526 bcopy(&int_val, arg, val_size);
2527 break;
2528
2529 #ifndef BCMDBUS
2530 case IOV_GVAL(IOV_WDTICK):
2531 int_val = (int32)dhd_watchdog_ms;
2532 bcopy(&int_val, arg, val_size);
2533 break;
2534 #endif /* !BCMDBUS */
2535
2536 case IOV_SVAL(IOV_WDTICK):
2537 if (!dhd_pub->up) {
2538 bcmerror = BCME_NOTUP;
2539 break;
2540 }
2541
2542 dhd_watchdog_ms = (uint)int_val;
2543
2544 dhd_os_wd_timer(dhd_pub, (uint)int_val);
2545 break;
2546
2547 case IOV_GVAL(IOV_DUMP):
2548 if (dhd_dump(dhd_pub, arg, len) <= 0)
2549 bcmerror = BCME_ERROR;
2550 else
2551 bcmerror = BCME_OK;
2552 break;
2553
2554 #ifndef BCMDBUS
2555 case IOV_GVAL(IOV_DCONSOLE_POLL):
2556 int_val = (int32)dhd_pub->dhd_console_ms;
2557 bcopy(&int_val, arg, val_size);
2558 break;
2559
2560 case IOV_SVAL(IOV_DCONSOLE_POLL):
2561 dhd_pub->dhd_console_ms = (uint)int_val;
2562 break;
2563
2564 #if defined(DHD_DEBUG)
2565 case IOV_SVAL(IOV_CONS):
2566 if (len > 0) {
2567 #ifdef CONSOLE_DPC
2568 bcmerror = dhd_bus_txcons(dhd_pub, arg, len - 1);
2569 #else
2570 bcmerror = dhd_bus_console_in(dhd_pub, arg, len - 1);
2571 #endif
2572 }
2573 break;
2574 #endif /* DHD_DEBUG */
2575 #endif /* !BCMDBUS */
2576
2577 case IOV_SVAL(IOV_CLEARCOUNTS):
2578 dhd_pub->tx_packets = dhd_pub->rx_packets = 0;
2579 dhd_pub->tx_errors = dhd_pub->rx_errors = 0;
2580 dhd_pub->tx_ctlpkts = dhd_pub->rx_ctlpkts = 0;
2581 dhd_pub->tx_ctlerrs = dhd_pub->rx_ctlerrs = 0;
2582 dhd_pub->tx_dropped = 0;
2583 dhd_pub->rx_dropped = 0;
2584 dhd_pub->tx_pktgetfail = 0;
2585 dhd_pub->rx_pktgetfail = 0;
2586 dhd_pub->rx_readahead_cnt = 0;
2587 dhd_pub->tx_realloc = 0;
2588 dhd_pub->wd_dpc_sched = 0;
2589 dhd_pub->tx_big_packets = 0;
2590 memset(&dhd_pub->dstats, 0, sizeof(dhd_pub->dstats));
2591 dhd_bus_clearcounts(dhd_pub);
2592 #ifdef PROP_TXSTATUS
2593 /* clear proptxstatus related counters */
2594 dhd_wlfc_clear_counts(dhd_pub);
2595 #endif /* PROP_TXSTATUS */
2596 #if defined(DHD_LB_STATS)
2597 DHD_LB_STATS_RESET(dhd_pub);
2598 #endif /* DHD_LB_STATS */
2599 break;
2600
2601 #ifdef BCMPERFSTATS
2602 case IOV_GVAL(IOV_LOGDUMP): {
2603 bcmdumplog((char*)arg, len);
2604 break;
2605 }
2606
2607 case IOV_SVAL(IOV_LOGCAL): {
2608 bcmlog("Starting OSL_DELAY (%d usecs)", (uint)int_val, 0);
2609 OSL_DELAY((uint)int_val);
2610 bcmlog("Finished OSL_DELAY (%d usecs)", (uint)int_val, 0);
2611 break;
2612 }
2613
2614 case IOV_SVAL(IOV_LOGSTAMP): {
2615 int int_val2;
2616
2617 if (plen >= 2 * sizeof(int)) {
2618 bcopy((char *)params + sizeof(int_val), &int_val2, sizeof(int_val2));
2619 bcmlog("User message %d %d", (uint)int_val, (uint)int_val2);
2620 } else if (plen >= sizeof(int)) {
2621 bcmlog("User message %d", (uint)int_val, 0);
2622 } else {
2623 bcmlog("User message", 0, 0);
2624 }
2625 break;
2626 }
2627 #endif /* BCMPERFSTATS */
2628
2629 case IOV_GVAL(IOV_IOCTLTIMEOUT): {
2630 int_val = (int32)dhd_os_get_ioctl_resp_timeout();
2631 bcopy(&int_val, arg, sizeof(int_val));
2632 break;
2633 }
2634
2635 case IOV_SVAL(IOV_IOCTLTIMEOUT): {
2636 if (int_val <= 0)
2637 bcmerror = BCME_BADARG;
2638 else
2639 dhd_os_set_ioctl_resp_timeout((unsigned int)int_val);
2640 break;
2641 }
2642
2643 #ifdef PROP_TXSTATUS
2644 case IOV_GVAL(IOV_PROPTXSTATUS_ENABLE): {
2645 bool wlfc_enab = FALSE;
2646 bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab);
2647 if (bcmerror != BCME_OK)
2648 goto exit;
2649 int_val = wlfc_enab ? 1 : 0;
2650 bcopy(&int_val, arg, val_size);
2651 break;
2652 }
2653 case IOV_SVAL(IOV_PROPTXSTATUS_ENABLE): {
2654 bool wlfc_enab = FALSE;
2655 bcmerror = dhd_wlfc_get_enable(dhd_pub, &wlfc_enab);
2656 if (bcmerror != BCME_OK)
2657 goto exit;
2658
2659 /* wlfc is already set as desired */
2660 if (wlfc_enab == (int_val == 0 ? FALSE : TRUE))
2661 goto exit;
2662
2663 if (int_val == TRUE && disable_proptx) {
2664 disable_proptx = 0;
2665 }
2666
2667 if (int_val == TRUE)
2668 bcmerror = dhd_wlfc_init(dhd_pub);
2669 else
2670 bcmerror = dhd_wlfc_deinit(dhd_pub);
2671
2672 break;
2673 }
2674 case IOV_GVAL(IOV_PROPTXSTATUS_MODE):
2675 bcmerror = dhd_wlfc_get_mode(dhd_pub, &int_val);
2676 if (bcmerror != BCME_OK)
2677 goto exit;
2678 bcopy(&int_val, arg, val_size);
2679 break;
2680
2681 case IOV_SVAL(IOV_PROPTXSTATUS_MODE):
2682 dhd_wlfc_set_mode(dhd_pub, int_val);
2683 break;
2684 #ifdef QMONITOR
2685 case IOV_GVAL(IOV_QMON_TIME_THRES): {
2686 int_val = dhd_qmon_thres(dhd_pub, FALSE, 0);
2687 bcopy(&int_val, arg, val_size);
2688 break;
2689 }
2690
2691 case IOV_SVAL(IOV_QMON_TIME_THRES): {
2692 dhd_qmon_thres(dhd_pub, TRUE, int_val);
2693 break;
2694 }
2695
2696 case IOV_GVAL(IOV_QMON_TIME_PERCENT): {
2697 int_val = dhd_qmon_getpercent(dhd_pub);
2698 bcopy(&int_val, arg, val_size);
2699 break;
2700 }
2701 #endif /* QMONITOR */
2702
2703 case IOV_GVAL(IOV_PROPTXSTATUS_MODULE_IGNORE):
2704 bcmerror = dhd_wlfc_get_module_ignore(dhd_pub, &int_val);
2705 if (bcmerror != BCME_OK)
2706 goto exit;
2707 bcopy(&int_val, arg, val_size);
2708 break;
2709
2710 case IOV_SVAL(IOV_PROPTXSTATUS_MODULE_IGNORE):
2711 dhd_wlfc_set_module_ignore(dhd_pub, int_val);
2712 break;
2713
2714 case IOV_GVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE):
2715 bcmerror = dhd_wlfc_get_credit_ignore(dhd_pub, &int_val);
2716 if (bcmerror != BCME_OK)
2717 goto exit;
2718 bcopy(&int_val, arg, val_size);
2719 break;
2720
2721 case IOV_SVAL(IOV_PROPTXSTATUS_CREDIT_IGNORE):
2722 dhd_wlfc_set_credit_ignore(dhd_pub, int_val);
2723 break;
2724
2725 case IOV_GVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE):
2726 bcmerror = dhd_wlfc_get_txstatus_ignore(dhd_pub, &int_val);
2727 if (bcmerror != BCME_OK)
2728 goto exit;
2729 bcopy(&int_val, arg, val_size);
2730 break;
2731
2732 case IOV_SVAL(IOV_PROPTXSTATUS_TXSTATUS_IGNORE):
2733 dhd_wlfc_set_txstatus_ignore(dhd_pub, int_val);
2734 break;
2735
2736 case IOV_GVAL(IOV_PROPTXSTATUS_RXPKT_CHK):
2737 bcmerror = dhd_wlfc_get_rxpkt_chk(dhd_pub, &int_val);
2738 if (bcmerror != BCME_OK)
2739 goto exit;
2740 bcopy(&int_val, arg, val_size);
2741 break;
2742
2743 case IOV_SVAL(IOV_PROPTXSTATUS_RXPKT_CHK):
2744 dhd_wlfc_set_rxpkt_chk(dhd_pub, int_val);
2745 break;
2746
2747 #endif /* PROP_TXSTATUS */
2748
2749 case IOV_GVAL(IOV_BUS_TYPE):
2750 /* The dhd application queries the driver to check if its usb or sdio. */
2751 #ifdef BCMDBUS
2752 int_val = BUS_TYPE_USB;
2753 #endif
2754 #ifdef BCMSDIO
2755 int_val = BUS_TYPE_SDIO;
2756 #endif
2757 #ifdef PCIE_FULL_DONGLE
2758 int_val = BUS_TYPE_PCIE;
2759 #endif
2760 bcopy(&int_val, arg, val_size);
2761 break;
2762
2763 case IOV_SVAL(IOV_CHANGEMTU):
2764 int_val &= 0xffff;
2765 bcmerror = dhd_change_mtu(dhd_pub, int_val, 0);
2766 break;
2767
2768 case IOV_GVAL(IOV_HOSTREORDER_FLOWS):
2769 {
2770 uint i = 0;
2771 uint8 *ptr = (uint8 *)arg;
2772 uint8 count = 0;
2773
2774 ptr++;
2775 for (i = 0; i < WLHOST_REORDERDATA_MAXFLOWS; i++) {
2776 if (dhd_pub->reorder_bufs[i] != NULL) {
2777 *ptr = dhd_pub->reorder_bufs[i]->flow_id;
2778 ptr++;
2779 count++;
2780 }
2781 }
2782 ptr = (uint8 *)arg;
2783 *ptr = count;
2784 break;
2785 }
2786 #ifdef DHDTCPACK_SUPPRESS
2787 case IOV_GVAL(IOV_TCPACK_SUPPRESS): {
2788 int_val = (uint32)dhd_pub->tcpack_sup_mode;
2789 bcopy(&int_val, arg, val_size);
2790 break;
2791 }
2792 case IOV_SVAL(IOV_TCPACK_SUPPRESS): {
2793 bcmerror = dhd_tcpack_suppress_set(dhd_pub, (uint8)int_val);
2794 break;
2795 }
2796 #endif /* DHDTCPACK_SUPPRESS */
2797 #ifdef DHD_WMF
2798 case IOV_GVAL(IOV_WMF_BSS_ENAB): {
2799 uint32 bssidx;
2800 dhd_wmf_t *wmf;
2801 const char *val;
2802
2803 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
2804 DHD_ERROR(("%s: wmf_bss_enable: bad parameter\n", __FUNCTION__));
2805 bcmerror = BCME_BADARG;
2806 break;
2807 }
2808
2809 wmf = dhd_wmf_conf(dhd_pub, bssidx);
2810 int_val = wmf->wmf_enable ? 1 :0;
2811 bcopy(&int_val, arg, val_size);
2812 break;
2813 }
2814 case IOV_SVAL(IOV_WMF_BSS_ENAB): {
2815 /* Enable/Disable WMF */
2816 uint32 bssidx;
2817 dhd_wmf_t *wmf;
2818 const char *val;
2819
2820 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
2821 DHD_ERROR(("%s: wmf_bss_enable: bad parameter\n", __FUNCTION__));
2822 bcmerror = BCME_BADARG;
2823 break;
2824 }
2825
2826 ASSERT(val);
2827 bcopy(val, &int_val, sizeof(uint32));
2828 wmf = dhd_wmf_conf(dhd_pub, bssidx);
2829 if (wmf->wmf_enable == int_val)
2830 break;
2831 if (int_val) {
2832 /* Enable WMF */
2833 if (dhd_wmf_instance_add(dhd_pub, bssidx) != BCME_OK) {
2834 DHD_ERROR(("%s: Error in creating WMF instance\n",
2835 __FUNCTION__));
2836 break;
2837 }
2838 if (dhd_wmf_start(dhd_pub, bssidx) != BCME_OK) {
2839 DHD_ERROR(("%s: Failed to start WMF\n", __FUNCTION__));
2840 break;
2841 }
2842 wmf->wmf_enable = TRUE;
2843 } else {
2844 /* Disable WMF */
2845 wmf->wmf_enable = FALSE;
2846 dhd_wmf_stop(dhd_pub, bssidx);
2847 dhd_wmf_instance_del(dhd_pub, bssidx);
2848 }
2849 break;
2850 }
2851 case IOV_GVAL(IOV_WMF_UCAST_IGMP):
2852 int_val = dhd_pub->wmf_ucast_igmp ? 1 : 0;
2853 bcopy(&int_val, arg, val_size);
2854 break;
2855 case IOV_SVAL(IOV_WMF_UCAST_IGMP):
2856 if (dhd_pub->wmf_ucast_igmp == int_val)
2857 break;
2858
2859 if (int_val >= OFF && int_val <= ON)
2860 dhd_pub->wmf_ucast_igmp = int_val;
2861 else
2862 bcmerror = BCME_RANGE;
2863 break;
2864 case IOV_GVAL(IOV_WMF_MCAST_DATA_SENDUP):
2865 int_val = dhd_wmf_mcast_data_sendup(dhd_pub, 0, FALSE, FALSE);
2866 bcopy(&int_val, arg, val_size);
2867 break;
2868 case IOV_SVAL(IOV_WMF_MCAST_DATA_SENDUP):
2869 dhd_wmf_mcast_data_sendup(dhd_pub, 0, TRUE, int_val);
2870 break;
2871
2872 #ifdef WL_IGMP_UCQUERY
2873 case IOV_GVAL(IOV_WMF_UCAST_IGMP_QUERY):
2874 int_val = dhd_pub->wmf_ucast_igmp_query ? 1 : 0;
2875 bcopy(&int_val, arg, val_size);
2876 break;
2877 case IOV_SVAL(IOV_WMF_UCAST_IGMP_QUERY):
2878 if (dhd_pub->wmf_ucast_igmp_query == int_val)
2879 break;
2880
2881 if (int_val >= OFF && int_val <= ON)
2882 dhd_pub->wmf_ucast_igmp_query = int_val;
2883 else
2884 bcmerror = BCME_RANGE;
2885 break;
2886 #endif /* WL_IGMP_UCQUERY */
2887 #ifdef DHD_UCAST_UPNP
2888 case IOV_GVAL(IOV_WMF_UCAST_UPNP):
2889 int_val = dhd_pub->wmf_ucast_upnp ? 1 : 0;
2890 bcopy(&int_val, arg, val_size);
2891 break;
2892 case IOV_SVAL(IOV_WMF_UCAST_UPNP):
2893 if (dhd_pub->wmf_ucast_upnp == int_val)
2894 break;
2895
2896 if (int_val >= OFF && int_val <= ON)
2897 dhd_pub->wmf_ucast_upnp = int_val;
2898 else
2899 bcmerror = BCME_RANGE;
2900 break;
2901 #endif /* DHD_UCAST_UPNP */
2902
2903 case IOV_GVAL(IOV_WMF_PSTA_DISABLE): {
2904 uint32 bssidx;
2905 const char *val;
2906
2907 if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
2908 DHD_ERROR(("%s: ap isoalate: bad parameter\n", __FUNCTION__));
2909 bcmerror = BCME_BADARG;
2910 break;
2911 }
2912
2913 int_val = dhd_get_wmf_psta_disable(dhd_pub, bssidx);
2914 bcopy(&int_val, arg, val_size);
2915 break;
2916 }
2917
2918 case IOV_SVAL(IOV_WMF_PSTA_DISABLE): {
2919 uint32 bssidx;
2920 const char *val;
2921
2922 if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
2923 DHD_ERROR(("%s: ap isolate: bad parameter\n", __FUNCTION__));
2924 bcmerror = BCME_BADARG;
2925 break;
2926 }
2927
2928 ASSERT(val);
2929 bcopy(val, &int_val, sizeof(uint32));
2930 dhd_set_wmf_psta_disable(dhd_pub, bssidx, int_val);
2931 break;
2932 }
2933 #endif /* DHD_WMF */
2934
2935 #if defined(BCM_ROUTER_DHD)
2936 case IOV_SVAL(IOV_TRAFFIC_MGMT_DWM): {
2937 trf_mgmt_filter_list_t *trf_mgmt_filter_list =
2938 (trf_mgmt_filter_list_t *)(arg);
2939 bcmerror = traffic_mgmt_add_dwm_filter(dhd_pub, trf_mgmt_filter_list, len);
2940 }
2941 break;
2942 #endif /* BCM_ROUTER_DHD */
2943
2944 #ifdef DHD_L2_FILTER
2945 case IOV_GVAL(IOV_DHCP_UNICAST): {
2946 uint32 bssidx;
2947 const char *val;
2948 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
2949 DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n",
2950 __FUNCTION__, name));
2951 bcmerror = BCME_BADARG;
2952 break;
2953 }
2954 int_val = dhd_get_dhcp_unicast_status(dhd_pub, bssidx);
2955 memcpy(arg, &int_val, val_size);
2956 break;
2957 }
2958 case IOV_SVAL(IOV_DHCP_UNICAST): {
2959 uint32 bssidx;
2960 const char *val;
2961 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
2962 DHD_ERROR(("%s: IOV_DHCP_UNICAST: bad parameterand name = %s\n",
2963 __FUNCTION__, name));
2964 bcmerror = BCME_BADARG;
2965 break;
2966 }
2967 memcpy(&int_val, val, sizeof(int_val));
2968 bcmerror = dhd_set_dhcp_unicast_status(dhd_pub, bssidx, int_val ? 1 : 0);
2969 break;
2970 }
2971 case IOV_GVAL(IOV_BLOCK_PING): {
2972 uint32 bssidx;
2973 const char *val;
2974
2975 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
2976 DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__));
2977 bcmerror = BCME_BADARG;
2978 break;
2979 }
2980 int_val = dhd_get_block_ping_status(dhd_pub, bssidx);
2981 memcpy(arg, &int_val, val_size);
2982 break;
2983 }
2984 case IOV_SVAL(IOV_BLOCK_PING): {
2985 uint32 bssidx;
2986 const char *val;
2987
2988 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
2989 DHD_ERROR(("%s: IOV_BLOCK_PING: bad parameter\n", __FUNCTION__));
2990 bcmerror = BCME_BADARG;
2991 break;
2992 }
2993 memcpy(&int_val, val, sizeof(int_val));
2994 bcmerror = dhd_set_block_ping_status(dhd_pub, bssidx, int_val ? 1 : 0);
2995 break;
2996 }
2997 case IOV_GVAL(IOV_PROXY_ARP): {
2998 uint32 bssidx;
2999 const char *val;
3000
3001 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
3002 DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__));
3003 bcmerror = BCME_BADARG;
3004 break;
3005 }
3006 int_val = dhd_get_parp_status(dhd_pub, bssidx);
3007 bcopy(&int_val, arg, val_size);
3008 break;
3009 }
3010 case IOV_SVAL(IOV_PROXY_ARP): {
3011 uint32 bssidx;
3012 const char *val;
3013
3014 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
3015 DHD_ERROR(("%s: IOV_PROXY_ARP: bad parameter\n", __FUNCTION__));
3016 bcmerror = BCME_BADARG;
3017 break;
3018 }
3019 bcopy(val, &int_val, sizeof(int_val));
3020
3021 /* Issue a iovar request to WL to update the proxy arp capability bit
3022 * in the Extended Capability IE of beacons/probe responses.
3023 */
3024 bcmerror = dhd_iovar(dhd_pub, bssidx, "proxy_arp_advertise", val, sizeof(int_val),
3025 NULL, 0, TRUE);
3026 if (bcmerror == BCME_OK) {
3027 dhd_set_parp_status(dhd_pub, bssidx, int_val ? 1 : 0);
3028 }
3029 break;
3030 }
3031 case IOV_GVAL(IOV_GRAT_ARP): {
3032 uint32 bssidx;
3033 const char *val;
3034
3035 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
3036 DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__));
3037 bcmerror = BCME_BADARG;
3038 break;
3039 }
3040 int_val = dhd_get_grat_arp_status(dhd_pub, bssidx);
3041 memcpy(arg, &int_val, val_size);
3042 break;
3043 }
3044 case IOV_SVAL(IOV_GRAT_ARP): {
3045 uint32 bssidx;
3046 const char *val;
3047
3048 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
3049 DHD_ERROR(("%s: IOV_GRAT_ARP: bad parameter\n", __FUNCTION__));
3050 bcmerror = BCME_BADARG;
3051 break;
3052 }
3053 memcpy(&int_val, val, sizeof(int_val));
3054 bcmerror = dhd_set_grat_arp_status(dhd_pub, bssidx, int_val ? 1 : 0);
3055 break;
3056 }
3057 case IOV_GVAL(IOV_BLOCK_TDLS): {
3058 uint32 bssidx;
3059 const char *val;
3060
3061 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
3062 DHD_ERROR(("%s: IOV_BLOCK_TDLS: bad parameter\n", __FUNCTION__));
3063 bcmerror = BCME_BADARG;
3064 break;
3065 }
3066 int_val = dhd_get_block_tdls_status(dhd_pub, bssidx);
3067 memcpy(arg, &int_val, val_size);
3068 break;
3069 }
3070 case IOV_SVAL(IOV_BLOCK_TDLS): {
3071 uint32 bssidx;
3072 const char *val;
3073
3074 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
3075 DHD_ERROR(("%s: IOV_BLOCK_TDLS: bad parameter\n", __FUNCTION__));
3076 bcmerror = BCME_BADARG;
3077 break;
3078 }
3079 memcpy(&int_val, val, sizeof(int_val));
3080 bcmerror = dhd_set_block_tdls_status(dhd_pub, bssidx, int_val ? 1 : 0);
3081 break;
3082 }
3083 #endif /* DHD_L2_FILTER */
3084 case IOV_SVAL(IOV_DHD_IE): {
3085 uint32 bssidx;
3086 const char *val;
3087 #if (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET))
3088 uint8 ie_type;
3089 bcm_tlv_t *qos_map_ie = NULL;
3090 ie_setbuf_t *ie_getbufp = (ie_setbuf_t *)(arg+4);
3091 ie_type = ie_getbufp->ie_buffer.ie_list[0].ie_data.id;
3092 #endif /* BCM_ROUTER_DHD && QOS_MAP_SET */
3093
3094 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
3095 DHD_ERROR(("%s: dhd ie: bad parameter\n", __FUNCTION__));
3096 bcmerror = BCME_BADARG;
3097 break;
3098 }
3099
3100 #if (defined(BCM_ROUTER_DHD) && defined(QOS_MAP_SET))
3101 qos_map_ie = (bcm_tlv_t *)(&(ie_getbufp->ie_buffer.ie_list[0].ie_data));
3102 if (qos_map_ie != NULL && (ie_type == DOT11_MNG_QOS_MAP_ID)) {
3103 bcmerror = dhd_set_qosmap_up_table(dhd_pub, bssidx, qos_map_ie);
3104 }
3105 #endif /* BCM_ROUTER_DHD && QOS_MAP_SET */
3106 break;
3107 }
3108 case IOV_GVAL(IOV_AP_ISOLATE): {
3109 uint32 bssidx;
3110 const char *val;
3111
3112 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
3113 DHD_ERROR(("%s: ap isoalate: bad parameter\n", __FUNCTION__));
3114 bcmerror = BCME_BADARG;
3115 break;
3116 }
3117
3118 int_val = dhd_get_ap_isolate(dhd_pub, bssidx);
3119 bcopy(&int_val, arg, val_size);
3120 break;
3121 }
3122 case IOV_SVAL(IOV_AP_ISOLATE): {
3123 uint32 bssidx;
3124 const char *val;
3125
3126 if (dhd_iovar_parse_bssidx(dhd_pub, name, &bssidx, &val) != BCME_OK) {
3127 DHD_ERROR(("%s: ap isolate: bad parameter\n", __FUNCTION__));
3128 bcmerror = BCME_BADARG;
3129 break;
3130 }
3131
3132 ASSERT(val);
3133 bcopy(val, &int_val, sizeof(uint32));
3134 dhd_set_ap_isolate(dhd_pub, bssidx, int_val);
3135 break;
3136 }
3137 #ifdef DHD_PSTA
3138 case IOV_GVAL(IOV_PSTA): {
3139 int_val = dhd_get_psta_mode(dhd_pub);
3140 bcopy(&int_val, arg, val_size);
3141 break;
3142 }
3143 case IOV_SVAL(IOV_PSTA): {
3144 if (int_val >= DHD_MODE_PSTA_DISABLED && int_val <= DHD_MODE_PSR) {
3145 dhd_set_psta_mode(dhd_pub, int_val);
3146 } else {
3147 bcmerror = BCME_RANGE;
3148 }
3149 break;
3150 }
3151 #endif /* DHD_PSTA */
3152 #ifdef DHD_WET
3153 case IOV_GVAL(IOV_WET):
3154 int_val = dhd_get_wet_mode(dhd_pub);
3155 bcopy(&int_val, arg, val_size);
3156 break;
3157
3158 case IOV_SVAL(IOV_WET):
3159 if (int_val == 0 || int_val == 1) {
3160 dhd_set_wet_mode(dhd_pub, int_val);
3161 /* Delete the WET DB when disabled */
3162 if (!int_val) {
3163 dhd_wet_sta_delete_list(dhd_pub);
3164 }
3165 } else {
3166 bcmerror = BCME_RANGE;
3167 }
3168 break;
3169 case IOV_SVAL(IOV_WET_HOST_IPV4):
3170 dhd_set_wet_host_ipv4(dhd_pub, params, plen);
3171 break;
3172 case IOV_SVAL(IOV_WET_HOST_MAC):
3173 dhd_set_wet_host_mac(dhd_pub, params, plen);
3174 break;
3175 #endif /* DHD_WET */
3176 #ifdef DHD_MCAST_REGEN
3177 case IOV_GVAL(IOV_MCAST_REGEN_BSS_ENABLE): {
3178 uint32 bssidx;
3179 const char *val;
3180
3181 if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
3182 DHD_ERROR(("%s: mcast_regen_bss_enable: bad parameter\n", __FUNCTION__));
3183 bcmerror = BCME_BADARG;
3184 break;
3185 }
3186
3187 int_val = dhd_get_mcast_regen_bss_enable(dhd_pub, bssidx);
3188 bcopy(&int_val, arg, val_size);
3189 break;
3190 }
3191
3192 case IOV_SVAL(IOV_MCAST_REGEN_BSS_ENABLE): {
3193 uint32 bssidx;
3194 const char *val;
3195
3196 if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
3197 DHD_ERROR(("%s: mcast_regen_bss_enable: bad parameter\n", __FUNCTION__));
3198 bcmerror = BCME_BADARG;
3199 break;
3200 }
3201
3202 ASSERT(val);
3203 bcopy(val, &int_val, sizeof(uint32));
3204 dhd_set_mcast_regen_bss_enable(dhd_pub, bssidx, int_val);
3205 break;
3206 }
3207 #endif /* DHD_MCAST_REGEN */
3208
3209 case IOV_GVAL(IOV_CFG80211_OPMODE): {
3210 int_val = (int32)dhd_pub->op_mode;
3211 bcopy(&int_val, arg, sizeof(int_val));
3212 break;
3213 }
3214 case IOV_SVAL(IOV_CFG80211_OPMODE): {
3215 if (int_val <= 0)
3216 bcmerror = BCME_BADARG;
3217 else
3218 dhd_pub->op_mode = int_val;
3219 break;
3220 }
3221
3222 case IOV_GVAL(IOV_ASSERT_TYPE):
3223 int_val = g_assert_type;
3224 bcopy(&int_val, arg, val_size);
3225 break;
3226
3227 case IOV_SVAL(IOV_ASSERT_TYPE):
3228 g_assert_type = (uint32)int_val;
3229 break;
3230
3231 #if defined(NDIS)
3232 case IOV_GVAL(IOV_WAKEIND):
3233 dhd_os_wakeind(dhd_pub, &int_val);
3234 bcopy(&int_val, arg, val_size);
3235 break;
3236 #endif /* NDIS */
3237
3238 #if !defined(NDIS) && !defined(BCM_ROUTER_DHD)
3239 case IOV_GVAL(IOV_LMTEST): {
3240 *(uint32 *)arg = (uint32)lmtest;
3241 break;
3242 }
3243
3244 case IOV_SVAL(IOV_LMTEST): {
3245 uint32 val = *(uint32 *)arg;
3246 if (val > 50)
3247 bcmerror = BCME_BADARG;
3248 else {
3249 lmtest = (uint)val;
3250 DHD_ERROR(("%s: lmtest %s\n",
3251 __FUNCTION__, (lmtest == FALSE)? "OFF" : "ON"));
3252 }
3253 break;
3254 }
3255 #endif /* !NDIS && !BCM_ROUTER_DHD */
3256 #ifdef BCMDBG
3257 case IOV_GVAL(IOV_MACDBG_PD11REGS):
3258 bcmerror = dhd_macdbg_pd11regs(dhd_pub, params, plen, arg, len);
3259 break;
3260 case IOV_GVAL(IOV_MACDBG_REGLIST):
3261 bcmerror = dhd_macdbg_reglist(dhd_pub, arg, len);
3262 break;
3263 case IOV_GVAL(IOV_MACDBG_PSVMPMEMS):
3264 bcmerror = dhd_macdbg_psvmpmems(dhd_pub, params, plen, arg, len);
3265 break;
3266 #endif /* BCMDBG */
3267
3268 #ifdef SHOW_LOGTRACE
3269 case IOV_GVAL(IOV_DUMP_TRACE_LOG): {
3270 trace_buf_info_t *trace_buf_info = (trace_buf_info_t *)arg;
3271 dhd_dbg_ring_t *dbg_verbose_ring = NULL;
3272
3273 dbg_verbose_ring = dhd_dbg_get_ring_from_ring_id(dhd_pub, FW_VERBOSE_RING_ID);
3274 if (dbg_verbose_ring == NULL) {
3275 DHD_ERROR(("dbg_verbose_ring is NULL\n"));
3276 bcmerror = BCME_UNSUPPORTED;
3277 break;
3278 }
3279
3280 if (trace_buf_info != NULL) {
3281 bzero(trace_buf_info, sizeof(trace_buf_info_t));
3282 dhd_dbg_read_ring_into_trace_buf(dbg_verbose_ring, trace_buf_info);
3283 } else {
3284 DHD_ERROR(("%s: arg is NULL\n", __FUNCTION__));
3285 bcmerror = BCME_NOMEM;
3286 }
3287 break;
3288 }
3289 #endif /* SHOW_LOGTRACE */
3290 #ifdef BTLOG
3291 case IOV_GVAL(IOV_DUMP_BT_LOG): {
3292 bt_log_buf_info_t *bt_log_buf_info = (bt_log_buf_info_t *)arg;
3293 uint32 rlen;
3294
3295 rlen = dhd_dbg_pull_single_from_ring(dhd_pub, BT_LOG_RING_ID, bt_log_buf_info->buf,
3296 BT_LOG_BUF_MAX_SIZE, TRUE);
3297 bt_log_buf_info->size = rlen;
3298 bt_log_buf_info->availability = BT_LOG_NEXT_BUF_NOT_AVAIL;
3299 if (rlen == 0) {
3300 bt_log_buf_info->availability = BT_LOG_BUF_NOT_AVAILABLE;
3301 } else {
3302 dhd_dbg_ring_status_t ring_status;
3303 dhd_dbg_get_ring_status(dhd_pub, BT_LOG_RING_ID, &ring_status);
3304 if (ring_status.written_bytes != ring_status.read_bytes) {
3305 bt_log_buf_info->availability = BT_LOG_NEXT_BUF_AVAIL;
3306 }
3307 }
3308 break;
3309 }
3310 case IOV_GVAL(IOV_BTLOG):
3311 {
3312 uint32 btlog_val = dhd_pub->bt_logging_enabled ? 1 : 0;
3313 bcopy(&btlog_val, arg, val_size);
3314 }
3315 break;
3316 case IOV_SVAL(IOV_BTLOG):
3317 {
3318 if (dhd_pub->busstate != DHD_BUS_DOWN) {
3319 DHD_ERROR(("%s: Can change only when bus down (before FW download)\n",
3320 __FUNCTION__));
3321 bcmerror = BCME_NOTDOWN;
3322 break;
3323 }
3324 if (int_val)
3325 dhd_pub->bt_logging_enabled = TRUE;
3326 else
3327 dhd_pub->bt_logging_enabled = FALSE;
3328 }
3329 break;
3330
3331 #endif /* BTLOG */
3332 #ifdef SNAPSHOT_UPLOAD
3333 case IOV_SVAL(IOV_BT_MEM_DUMP): {
3334 dhd_prot_send_snapshot_request(dhd_pub, SNAPSHOT_TYPE_BT, int_val);
3335 break;
3336 }
3337 case IOV_GVAL(IOV_BT_UPLOAD): {
3338 int status;
3339 bt_mem_req_t req;
3340 bt_log_buf_info_t *mem_info = (bt_log_buf_info_t *)arg;
3341 uint32 size;
3342 bool is_more;
3343
3344 memcpy(&req, params, sizeof(req));
3345
3346 status = dhd_prot_get_snapshot(dhd_pub, SNAPSHOT_TYPE_BT, req.offset,
3347 req.buf_size, mem_info->buf, &size, &is_more);
3348 if (status == BCME_OK) {
3349 mem_info->size = size;
3350 mem_info->availability = is_more ?
3351 BT_LOG_NEXT_BUF_AVAIL : BT_LOG_NEXT_BUF_NOT_AVAIL;
3352 } else if (status == BCME_NOTREADY) {
3353 mem_info->size = 0;
3354 mem_info->availability = BT_LOG_NOT_READY;
3355 } else {
3356 mem_info->size = 0;
3357 mem_info->availability = BT_LOG_BUF_NOT_AVAILABLE;
3358 }
3359 break;
3360 }
3361 #endif /* SNAPSHOT_UPLOAD */
3362 #ifdef REPORT_FATAL_TIMEOUTS
3363 case IOV_GVAL(IOV_SCAN_TO): {
3364 dhd_get_scan_to_val(dhd_pub, (uint32 *)&int_val);
3365 bcopy(&int_val, arg, val_size);
3366 break;
3367 }
3368 case IOV_SVAL(IOV_SCAN_TO): {
3369 dhd_set_scan_to_val(dhd_pub, (uint32)int_val);
3370 break;
3371 }
3372 case IOV_GVAL(IOV_JOIN_TO): {
3373 dhd_get_join_to_val(dhd_pub, (uint32 *)&int_val);
3374 bcopy(&int_val, arg, val_size);
3375 break;
3376 }
3377 case IOV_SVAL(IOV_JOIN_TO): {
3378 dhd_set_join_to_val(dhd_pub, (uint32)int_val);
3379 break;
3380 }
3381 case IOV_GVAL(IOV_CMD_TO): {
3382 dhd_get_cmd_to_val(dhd_pub, (uint32 *)&int_val);
3383 bcopy(&int_val, arg, val_size);
3384 break;
3385 }
3386 case IOV_SVAL(IOV_CMD_TO): {
3387 dhd_set_cmd_to_val(dhd_pub, (uint32)int_val);
3388 break;
3389 }
3390 case IOV_GVAL(IOV_OQS_TO): {
3391 dhd_get_bus_to_val(dhd_pub, (uint32 *)&int_val);
3392 bcopy(&int_val, arg, val_size);
3393 break;
3394 }
3395 case IOV_SVAL(IOV_OQS_TO): {
3396 dhd_set_bus_to_val(dhd_pub, (uint32)int_val);
3397 break;
3398 }
3399 #endif /* REPORT_FATAL_TIMEOUTS */
3400 case IOV_GVAL(IOV_DONGLE_TRAP_TYPE):
3401 if (dhd_pub->dongle_trap_occured)
3402 int_val = ltoh32(dhd_pub->last_trap_info.type);
3403 else
3404 int_val = 0;
3405 bcopy(&int_val, arg, val_size);
3406 break;
3407
3408 case IOV_GVAL(IOV_DONGLE_TRAP_INFO):
3409 {
3410 struct bcmstrbuf strbuf;
3411 bcm_binit(&strbuf, arg, len);
3412 if (dhd_pub->dongle_trap_occured == FALSE) {
3413 bcm_bprintf(&strbuf, "no trap recorded\n");
3414 break;
3415 }
3416 #ifndef BCMDBUS
3417 dhd_bus_dump_trap_info(dhd_pub->bus, &strbuf);
3418 #endif /* BCMDBUS */
3419 break;
3420 }
3421 #ifdef DHD_DEBUG
3422 #if defined(BCMSDIO) || defined(BCMPCIE)
3423
3424 case IOV_GVAL(IOV_BPADDR):
3425 {
3426 sdreg_t sdreg;
3427 uint32 addr, size;
3428
3429 memcpy(&sdreg, params, sizeof(sdreg));
3430
3431 addr = sdreg.offset;
3432 size = sdreg.func;
3433
3434 bcmerror = dhd_bus_readwrite_bp_addr(dhd_pub, addr, size,
3435 (uint *)&int_val, TRUE);
3436
3437 memcpy(arg, &int_val, sizeof(int32));
3438
3439 break;
3440 }
3441
3442 case IOV_SVAL(IOV_BPADDR):
3443 {
3444 sdreg_t sdreg;
3445 uint32 addr, size;
3446
3447 memcpy(&sdreg, params, sizeof(sdreg));
3448
3449 addr = sdreg.offset;
3450 size = sdreg.func;
3451
3452 bcmerror = dhd_bus_readwrite_bp_addr(dhd_pub, addr, size,
3453 (uint *)&sdreg.value,
3454 FALSE);
3455
3456 break;
3457 }
3458 #endif /* BCMSDIO || BCMPCIE */
3459 #ifdef BCMPCIE
3460 case IOV_SVAL(IOV_FLOW_RING_DEBUG):
3461 {
3462 bcmerror = dhd_flow_ring_debug(dhd_pub, arg, len);
3463 break;
3464 }
3465 #endif /* BCMPCIE */
3466 case IOV_SVAL(IOV_MEM_DEBUG):
3467 if (len > 0) {
3468 bcmerror = dhd_mem_debug(dhd_pub, arg, len - 1);
3469 }
3470 break;
3471 #endif /* DHD_DEBUG */
3472 #if defined(DHD_LOG_DUMP)
3473 #if defined(DHD_EFI)
3474 case IOV_GVAL(IOV_LOG_CAPTURE_ENABLE):
3475 {
3476 int_val = dhd_pub->log_capture_enable;
3477 bcopy(&int_val, arg, val_size);
3478 break;
3479 }
3480
3481 case IOV_SVAL(IOV_LOG_CAPTURE_ENABLE):
3482 {
3483 dhd_pub->log_capture_enable = (uint8)int_val;
3484 break;
3485 }
3486 #endif /* DHD_EFI */
3487 case IOV_GVAL(IOV_LOG_DUMP):
3488 {
3489 dhd_prot_debug_info_print(dhd_pub);
3490 dhd_log_dump_trigger(dhd_pub, CMD_DEFAULT);
3491 break;
3492 }
3493 #endif /* DHD_LOG_DUMP */
3494
3495 case IOV_GVAL(IOV_TPUT_TEST):
3496 {
3497 tput_test_t *tput_data = NULL;
3498 if (params && plen >= sizeof(tput_test_t)) {
3499 tput_data = (tput_test_t *)params;
3500 bcmerror = dhd_tput_test(dhd_pub, tput_data);
3501 } else {
3502 DHD_ERROR(("%s: tput test - no input params ! \n", __FUNCTION__));
3503 bcmerror = BCME_BADARG;
3504 }
3505 break;
3506 }
3507 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
3508 case IOV_SVAL(IOV_PKT_LATENCY):
3509 dhd_pub->pkt_latency = (uint32)int_val;
3510 break;
3511 case IOV_GVAL(IOV_PKT_LATENCY):
3512 int_val = (int32)dhd_pub->pkt_latency;
3513 bcopy(&int_val, arg, val_size);
3514 break;
3515 #endif /* defined(DHD_AWDL) && defined(AWDL_SLOT_STATS) */
3516 case IOV_GVAL(IOV_DEBUG_BUF_DEST_STAT):
3517 {
3518 if (dhd_pub->debug_buf_dest_support) {
3519 debug_buf_dest_stat_t *debug_buf_dest_stat =
3520 (debug_buf_dest_stat_t *)arg;
3521 memcpy(debug_buf_dest_stat, dhd_pub->debug_buf_dest_stat,
3522 sizeof(dhd_pub->debug_buf_dest_stat));
3523 } else {
3524 bcmerror = BCME_DISABLED;
3525 }
3526 break;
3527 }
3528
3529 #ifdef DHD_PKTTS
3530 case IOV_GVAL(IOV_PKTTS_ENAB): {
3531 int_val = dhd_get_pktts_enab(dhd_pub);
3532 (void)memcpy_s(arg, val_size, &int_val, sizeof(int_val));
3533 break;
3534 }
3535 case IOV_SVAL(IOV_PKTTS_ENAB): {
3536 dhd_set_pktts_enab(dhd_pub, !!int_val);
3537 break;
3538 }
3539
3540 case IOV_GVAL(IOV_PKTTS_FLOW): {
3541 bcmerror = dhd_get_pktts_flow(dhd_pub, arg, len);
3542 break;
3543 }
3544 case IOV_SVAL(IOV_PKTTS_FLOW): {
3545 bcmerror = dhd_set_pktts_flow(dhd_pub, params, plen);
3546 break;
3547 }
3548 #endif /* DHD_PKTTS */
3549
3550 #if defined(DHD_EFI)
3551 case IOV_SVAL(IOV_INTR_POLL):
3552 bcmerror = dhd_intr_poll(dhd_pub->bus, arg, len, TRUE);
3553 break;
3554
3555 case IOV_GVAL(IOV_INTR_POLL):
3556 bcmerror = dhd_intr_poll(dhd_pub->bus, params, plen, FALSE);
3557 break;
3558 #endif /* DHD_EFI */
3559
3560 #if defined(DHD_SSSR_DUMP)
3561 case IOV_GVAL(IOV_FIS_TRIGGER):
3562 bcmerror = dhd_bus_fis_trigger(dhd_pub);
3563
3564 if (bcmerror == BCME_OK) {
3565 bcmerror = dhd_bus_fis_dump(dhd_pub);
3566 }
3567
3568 int_val = bcmerror;
3569 bcopy(&int_val, arg, val_size);
3570 break;
3571 #endif /* defined(DHD_SSSR_DUMP) */
3572
3573 #ifdef DHD_DEBUG
3574 case IOV_SVAL(IOV_INDUCE_ERROR): {
3575 if (int_val >= DHD_INDUCE_ERROR_MAX) {
3576 DHD_ERROR(("%s: Invalid command : %u\n", __FUNCTION__, (uint16)int_val));
3577 } else {
3578 dhd_pub->dhd_induce_error = (uint16)int_val;
3579 #ifdef BCMPCIE
3580 if (dhd_pub->dhd_induce_error == DHD_INDUCE_BH_CBP_HANG) {
3581 dhdpcie_induce_cbp_hang(dhd_pub);
3582 }
3583 #endif /* BCMPCIE */
3584 }
3585 break;
3586 }
3587 #endif /* DHD_DEBUG */
3588 #ifdef WL_IFACE_MGMT_CONF
3589 #ifdef WL_CFG80211
3590 #ifdef WL_NANP2P
3591 case IOV_GVAL(IOV_CONC_DISC): {
3592 int_val = wl_cfg80211_get_iface_conc_disc(
3593 dhd_linux_get_primary_netdev(dhd_pub));
3594 bcopy(&int_val, arg, sizeof(int_val));
3595 break;
3596 }
3597 case IOV_SVAL(IOV_CONC_DISC): {
3598 bcmerror = wl_cfg80211_set_iface_conc_disc(
3599 dhd_linux_get_primary_netdev(dhd_pub), (uint8)int_val);
3600 break;
3601 }
3602 #endif /* WL_NANP2P */
3603 #ifdef WL_IFACE_MGMT
3604 case IOV_GVAL(IOV_IFACE_POLICY): {
3605 int_val = wl_cfg80211_get_iface_policy(
3606 dhd_linux_get_primary_netdev(dhd_pub));
3607 bcopy(&int_val, arg, sizeof(int_val));
3608 break;
3609 }
3610 case IOV_SVAL(IOV_IFACE_POLICY): {
3611 bcmerror = wl_cfg80211_set_iface_policy(
3612 dhd_linux_get_primary_netdev(dhd_pub),
3613 arg, len);
3614 break;
3615 }
3616 #endif /* WL_IFACE_MGMT */
3617 #endif /* WL_CFG80211 */
3618 #endif /* WL_IFACE_MGMT_CONF */
3619 #ifdef RTT_GEOFENCE_CONT
3620 #if defined (RTT_SUPPORT) && defined (WL_NAN)
3621 case IOV_GVAL(IOV_RTT_GEOFENCE_TYPE_OVRD): {
3622 bool enable = 0;
3623 dhd_rtt_get_geofence_cont_ind(dhd_pub, &enable);
3624 int_val = enable ? 1 : 0;
3625 bcopy(&int_val, arg, val_size);
3626 break;
3627 }
3628 case IOV_SVAL(IOV_RTT_GEOFENCE_TYPE_OVRD): {
3629 bool enable = *(bool *)arg;
3630 dhd_rtt_set_geofence_cont_ind(dhd_pub, enable);
3631 break;
3632 }
3633 #endif /* RTT_SUPPORT && WL_NAN */
3634 #endif /* RTT_GEOFENCE_CONT */
3635 case IOV_GVAL(IOV_FW_VBS): {
3636 *(uint32 *)arg = (uint32)dhd_dbg_get_fwverbose(dhd_pub);
3637 break;
3638 }
3639
3640 case IOV_SVAL(IOV_FW_VBS): {
3641 if (int_val < 0) {
3642 int_val = 0;
3643 }
3644 dhd_dbg_set_fwverbose(dhd_pub, (uint32)int_val);
3645 break;
3646 }
3647
3648 #ifdef DHD_TX_PROFILE
3649 case IOV_SVAL(IOV_TX_PROFILE_TAG):
3650 {
3651 /* note: under the current implementation only one type of packet may be
3652 * tagged per profile
3653 */
3654 const dhd_tx_profile_protocol_t *protocol = NULL;
3655 /* for example, we might have a profile of profile_index 6, but at
3656 * offset 2 from dhd_pub->protocol_filters.
3657 */
3658 uint8 offset;
3659
3660 if (params == NULL) {
3661 bcmerror = BCME_ERROR;
3662 break;
3663 }
3664
3665 protocol = (dhd_tx_profile_protocol_t *)params;
3666
3667 /* validate */
3668 if (protocol->version != DHD_TX_PROFILE_VERSION) {
3669 bcmerror = BCME_VERSION;
3670 break;
3671 }
3672 if (protocol->profile_index > DHD_MAX_PROFILE_INDEX) {
3673 DHD_ERROR(("%s:\tprofile index must be between 0 and %d\n",
3674 __FUNCTION__, DHD_MAX_PROFILE_INDEX));
3675 bcmerror = BCME_RANGE;
3676 break;
3677 }
3678 if (protocol->layer != DHD_TX_PROFILE_DATA_LINK_LAYER && protocol->layer
3679 != DHD_TX_PROFILE_NETWORK_LAYER) {
3680 DHD_ERROR(("%s:\tlayer must be %d or %d\n", __FUNCTION__,
3681 DHD_TX_PROFILE_DATA_LINK_LAYER,
3682 DHD_TX_PROFILE_NETWORK_LAYER));
3683 bcmerror = BCME_BADARG;
3684 break;
3685 }
3686 if (protocol->protocol_number > __UINT16_MAX__) {
3687 DHD_ERROR(("%s:\tprotocol number must be <= %d\n", __FUNCTION__,
3688 __UINT16_MAX__));
3689 bcmerror = BCME_BADLEN;
3690 break;
3691 }
3692
3693 /* find the dhd_tx_profile_protocol_t */
3694 for (offset = 0; offset < dhd_pub->num_profiles; offset++) {
3695 if (dhd_pub->protocol_filters[offset].profile_index ==
3696 protocol->profile_index) {
3697 break;
3698 }
3699 }
3700
3701 if (offset >= DHD_MAX_PROFILES) {
3702 #if DHD_MAX_PROFILES > 1
3703 DHD_ERROR(("%s:\tonly %d profiles supported at present\n",
3704 __FUNCTION__, DHD_MAX_PROFILES));
3705 #else /* DHD_MAX_PROFILES > 1 */
3706 DHD_ERROR(("%s:\tonly %d profile supported at present\n",
3707 __FUNCTION__, DHD_MAX_PROFILES));
3708 DHD_ERROR(("%s:\tthere is a profile of index %d\n", __FUNCTION__,
3709 dhd_pub->protocol_filters->profile_index));
3710 #endif /* DHD_MAX_PROFILES > 1 */
3711 bcmerror = BCME_NOMEM;
3712 break;
3713 }
3714
3715 /* memory already allocated in dhd_attach; just assign the value */
3716 dhd_pub->protocol_filters[offset] = *protocol;
3717
3718 if (offset >= dhd_pub->num_profiles) {
3719 dhd_pub->num_profiles = offset + 1;
3720 }
3721
3722 break;
3723 }
3724
3725 case IOV_SVAL(IOV_TX_PROFILE_ENABLE):
3726 dhd_pub->tx_profile_enab = int_val ? TRUE : FALSE;
3727 break;
3728
3729 case IOV_GVAL(IOV_TX_PROFILE_ENABLE):
3730 int_val = dhd_pub->tx_profile_enab;
3731 bcmerror = memcpy_s(arg, val_size, &int_val, sizeof(int_val));
3732 break;
3733
3734 case IOV_SVAL(IOV_TX_PROFILE_DUMP):
3735 {
3736 const dhd_tx_profile_protocol_t *protocol = NULL;
3737 uint8 offset;
3738 char *format = "%s:\ttx_profile %s: %d\n";
3739
3740 for (offset = 0; offset < dhd_pub->num_profiles; offset++) {
3741 if (dhd_pub->protocol_filters[offset].profile_index == int_val) {
3742 protocol = &(dhd_pub->protocol_filters[offset]);
3743 break;
3744 }
3745 }
3746
3747 if (protocol == NULL) {
3748 DHD_ERROR(("%s:\tno profile with index %d\n", __FUNCTION__,
3749 int_val));
3750 bcmerror = BCME_ERROR;
3751 break;
3752 }
3753
3754 printf(format, __FUNCTION__, "profile_index", protocol->profile_index);
3755 printf(format, __FUNCTION__, "layer", protocol->layer);
3756 printf(format, __FUNCTION__, "protocol_number", protocol->protocol_number);
3757 printf(format, __FUNCTION__, "src_port", protocol->src_port);
3758 printf(format, __FUNCTION__, "dest_port", protocol->dest_port);
3759
3760 break;
3761 }
3762 #endif /* defined(DHD_TX_PROFILE) */
3763
3764 case IOV_GVAL(IOV_CHECK_TRAP_ROT): {
3765 int_val = dhd_pub->check_trap_rot? 1 : 0;
3766 (void)memcpy_s(arg, val_size, &int_val, sizeof(int_val));
3767 break;
3768 }
3769 case IOV_SVAL(IOV_CHECK_TRAP_ROT): {
3770 dhd_pub->check_trap_rot = *(bool *)arg;
3771 break;
3772 }
3773
3774 #if defined(DHD_AWDL)
3775 case IOV_SVAL(IOV_AWDL_LLC_ENABLE): {
3776 bool bval = *(bool *)arg;
3777 if (bval != 0 && bval != 1)
3778 bcmerror = BCME_ERROR;
3779 else
3780 dhd_pub->awdl_llc_enabled = bval;
3781 break;
3782 }
3783 case IOV_GVAL(IOV_AWDL_LLC_ENABLE):
3784 int_val = dhd_pub->awdl_llc_enabled;
3785 (void)memcpy_s(arg, val_size, &int_val, sizeof(int_val));
3786 break;
3787 #endif
3788 #ifdef WLEASYMESH
3789 case IOV_SVAL(IOV_1905_AL_UCAST): {
3790 uint32 bssidx;
3791 const char *val;
3792 uint8 ea[6] = {0};
3793 if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
3794 DHD_ERROR(("%s: 1905_al_ucast: bad parameter\n", __FUNCTION__));
3795 bcmerror = BCME_BADARG;
3796 break;
3797 }
3798 bcopy(val, ea, ETHER_ADDR_LEN);
3799 printf("IOV_1905_AL_UCAST:" MACDBG "\n", MAC2STRDBG(ea));
3800 bcmerror = dhd_set_1905_almac(dhd_pub, bssidx, ea, FALSE);
3801 break;
3802 }
3803 case IOV_GVAL(IOV_1905_AL_UCAST): {
3804 uint32 bssidx;
3805 const char *val;
3806 if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
3807 DHD_ERROR(("%s: 1905_al_ucast: bad parameter\n", __FUNCTION__));
3808 bcmerror = BCME_BADARG;
3809 break;
3810 }
3811
3812 bcmerror = dhd_get_1905_almac(dhd_pub, bssidx, arg, FALSE);
3813 break;
3814 }
3815 case IOV_SVAL(IOV_1905_AL_MCAST): {
3816 uint32 bssidx;
3817 const char *val;
3818 uint8 ea[6] = {0};
3819 if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
3820 DHD_ERROR(("%s: 1905_al_mcast: bad parameter\n", __FUNCTION__));
3821 bcmerror = BCME_BADARG;
3822 break;
3823 }
3824 bcopy(val, ea, ETHER_ADDR_LEN);
3825 printf("IOV_1905_AL_MCAST:" MACDBG "\n", MAC2STRDBG(ea));
3826 bcmerror = dhd_set_1905_almac(dhd_pub, bssidx, ea, TRUE);
3827 break;
3828 }
3829 case IOV_GVAL(IOV_1905_AL_MCAST): {
3830 uint32 bssidx;
3831 const char *val;
3832 if (dhd_iovar_parse_bssidx(dhd_pub, (char *)name, &bssidx, &val) != BCME_OK) {
3833 DHD_ERROR(("%s: 1905_al_mcast: bad parameter\n", __FUNCTION__));
3834 bcmerror = BCME_BADARG;
3835 break;
3836 }
3837
3838 bcmerror = dhd_get_1905_almac(dhd_pub, bssidx, arg, TRUE);
3839 break;
3840 }
3841 #endif /* WLEASYMESH */
3842
3843 default:
3844 bcmerror = BCME_UNSUPPORTED;
3845 break;
3846 }
3847
3848 exit:
3849 DHD_TRACE(("%s: actionid %d, bcmerror %d\n", __FUNCTION__, actionid, bcmerror));
3850 return bcmerror;
3851 }
3852
3853 #ifdef BCMDONGLEHOST
3854 /* Store the status of a connection attempt for later retrieval by an iovar */
3855 void
dhd_store_conn_status(uint32 event,uint32 status,uint32 reason)3856 dhd_store_conn_status(uint32 event, uint32 status, uint32 reason)
3857 {
3858 /* Do not overwrite a WLC_E_PRUNE with a WLC_E_SET_SSID
3859 * because an encryption/rsn mismatch results in both events, and
3860 * the important information is in the WLC_E_PRUNE.
3861 */
3862 if (!(event == WLC_E_SET_SSID && status == WLC_E_STATUS_FAIL &&
3863 dhd_conn_event == WLC_E_PRUNE)) {
3864 dhd_conn_event = event;
3865 dhd_conn_status = status;
3866 dhd_conn_reason = reason;
3867 }
3868 }
3869 #else
3870 #error "BCMDONGLEHOST not defined"
3871 #endif /* BCMDONGLEHOST */
3872
3873 bool
dhd_prec_enq(dhd_pub_t * dhdp,struct pktq * q,void * pkt,int prec)3874 dhd_prec_enq(dhd_pub_t *dhdp, struct pktq *q, void *pkt, int prec)
3875 {
3876 void *p;
3877 int eprec = -1; /* precedence to evict from */
3878 bool discard_oldest;
3879
3880 /* Fast case, precedence queue is not full and we are also not
3881 * exceeding total queue length
3882 */
3883 if (!pktqprec_full(q, prec) && !pktq_full(q)) {
3884 pktq_penq(q, prec, pkt);
3885 return TRUE;
3886 }
3887
3888 /* Determine precedence from which to evict packet, if any */
3889 if (pktqprec_full(q, prec))
3890 eprec = prec;
3891 else if (pktq_full(q)) {
3892 p = pktq_peek_tail(q, &eprec);
3893 ASSERT(p);
3894 if (eprec > prec || eprec < 0)
3895 return FALSE;
3896 }
3897
3898 /* Evict if needed */
3899 if (eprec >= 0) {
3900 /* Detect queueing to unconfigured precedence */
3901 ASSERT(!pktqprec_empty(q, eprec));
3902 discard_oldest = AC_BITMAP_TST(dhdp->wme_dp, eprec);
3903 if (eprec == prec && !discard_oldest)
3904 return FALSE; /* refuse newer (incoming) packet */
3905 /* Evict packet according to discard policy */
3906 p = discard_oldest ? pktq_pdeq(q, eprec) : pktq_pdeq_tail(q, eprec);
3907 ASSERT(p);
3908 #ifdef DHDTCPACK_SUPPRESS
3909 if (dhd_tcpack_check_xmit(dhdp, p) == BCME_ERROR) {
3910 DHD_ERROR(("%s %d: tcpack_suppress ERROR!!! Stop using it\n",
3911 __FUNCTION__, __LINE__));
3912 dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
3913 }
3914 #endif /* DHDTCPACK_SUPPRESS */
3915 PKTFREE(dhdp->osh, p, TRUE);
3916 }
3917
3918 /* Enqueue */
3919 p = pktq_penq(q, prec, pkt);
3920 ASSERT(p);
3921
3922 return TRUE;
3923 }
3924
3925 /*
3926 * Functions to drop proper pkts from queue:
3927 * If one pkt in queue is non-fragmented, drop first non-fragmented pkt only
3928 * If all pkts in queue are all fragmented, find and drop one whole set fragmented pkts
3929 * If can't find pkts matching upper 2 cases, drop first pkt anyway
3930 */
3931 bool
dhd_prec_drop_pkts(dhd_pub_t * dhdp,struct pktq * pq,int prec,f_droppkt_t fn)3932 dhd_prec_drop_pkts(dhd_pub_t *dhdp, struct pktq *pq, int prec, f_droppkt_t fn)
3933 {
3934 struct pktq_prec *q = NULL;
3935 void *p, *prev = NULL, *next = NULL, *first = NULL, *last = NULL, *prev_first = NULL;
3936 pkt_frag_t frag_info;
3937
3938 ASSERT(dhdp && pq);
3939 ASSERT(prec >= 0 && prec < pq->num_prec);
3940
3941 q = &pq->q[prec];
3942 p = q->head;
3943
3944 if (p == NULL)
3945 return FALSE;
3946
3947 while (p) {
3948 frag_info = pkt_frag_info(dhdp->osh, p);
3949 if (frag_info == DHD_PKT_FRAG_NONE) {
3950 break;
3951 } else if (frag_info == DHD_PKT_FRAG_FIRST) {
3952 if (first) {
3953 /* No last frag pkt, use prev as last */
3954 last = prev;
3955 break;
3956 } else {
3957 first = p;
3958 prev_first = prev;
3959 }
3960 } else if (frag_info == DHD_PKT_FRAG_LAST) {
3961 if (first) {
3962 last = p;
3963 break;
3964 }
3965 }
3966
3967 prev = p;
3968 p = PKTLINK(p);
3969 }
3970
3971 if ((p == NULL) || ((frag_info != DHD_PKT_FRAG_NONE) && !(first && last))) {
3972 /* Not found matching pkts, use oldest */
3973 prev = NULL;
3974 p = q->head;
3975 frag_info = 0;
3976 }
3977
3978 if (frag_info == DHD_PKT_FRAG_NONE) {
3979 first = last = p;
3980 prev_first = prev;
3981 }
3982
3983 p = first;
3984 while (p) {
3985 next = PKTLINK(p);
3986 q->n_pkts--;
3987 pq->n_pkts_tot--;
3988
3989 #ifdef WL_TXQ_STALL
3990 q->dequeue_count++;
3991 #endif
3992
3993 PKTSETLINK(p, NULL);
3994
3995 if (fn)
3996 fn(dhdp, prec, p, TRUE);
3997
3998 if (p == last)
3999 break;
4000
4001 p = next;
4002 }
4003
4004 if (prev_first == NULL) {
4005 if ((q->head = next) == NULL)
4006 q->tail = NULL;
4007 } else {
4008 PKTSETLINK(prev_first, next);
4009 if (!next)
4010 q->tail = prev_first;
4011 }
4012
4013 return TRUE;
4014 }
4015
4016 static int
dhd_iovar_op(dhd_pub_t * dhd_pub,const char * name,void * params,int plen,void * arg,uint len,bool set)4017 dhd_iovar_op(dhd_pub_t *dhd_pub, const char *name,
4018 void *params, int plen, void *arg, uint len, bool set)
4019 {
4020 int bcmerror = 0;
4021 uint val_size;
4022 const bcm_iovar_t *vi = NULL;
4023 uint32 actionid;
4024
4025 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4026
4027 ASSERT(name);
4028
4029 /* Get MUST have return space */
4030 ASSERT(set || (arg && len));
4031
4032 /* Set does NOT take qualifiers */
4033 ASSERT(!set || (!params && !plen));
4034
4035 if ((vi = bcm_iovar_lookup(dhd_iovars, name)) == NULL) {
4036 bcmerror = BCME_UNSUPPORTED;
4037 goto exit;
4038 }
4039
4040 DHD_CTL(("%s: %s %s, len %d plen %d\n", __FUNCTION__,
4041 name, (set ? "set" : "get"), len, plen));
4042
4043 /* set up 'params' pointer in case this is a set command so that
4044 * the convenience int and bool code can be common to set and get
4045 */
4046 if (params == NULL) {
4047 params = arg;
4048 plen = len;
4049 }
4050
4051 if (vi->type == IOVT_VOID)
4052 val_size = 0;
4053 else if (vi->type == IOVT_BUFFER)
4054 val_size = len;
4055 else
4056 /* all other types are integer sized */
4057 val_size = sizeof(int);
4058
4059 actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid);
4060
4061 bcmerror = dhd_doiovar(dhd_pub, vi, actionid, name, params, plen, arg, len, val_size);
4062
4063 exit:
4064 return bcmerror;
4065 }
4066
4067 int
dhd_ioctl(dhd_pub_t * dhd_pub,dhd_ioctl_t * ioc,void * buf,uint buflen)4068 dhd_ioctl(dhd_pub_t * dhd_pub, dhd_ioctl_t *ioc, void *buf, uint buflen)
4069 {
4070 int bcmerror = 0;
4071 unsigned long flags;
4072
4073 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4074
4075 if (!buf) {
4076 return BCME_BADARG;
4077 }
4078
4079 dhd_os_dhdiovar_lock(dhd_pub);
4080 switch (ioc->cmd) {
4081 case DHD_GET_MAGIC:
4082 if (buflen < sizeof(int))
4083 bcmerror = BCME_BUFTOOSHORT;
4084 else
4085 *(int*)buf = DHD_IOCTL_MAGIC;
4086 break;
4087
4088 case DHD_GET_VERSION:
4089 if (buflen < sizeof(int))
4090 bcmerror = BCME_BUFTOOSHORT;
4091 else
4092 *(int*)buf = DHD_IOCTL_VERSION;
4093 break;
4094
4095 case DHD_GET_VAR:
4096 case DHD_SET_VAR:
4097 {
4098 char *arg;
4099 uint arglen;
4100
4101 DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
4102 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhd_pub) &&
4103 bcmstricmp((char *)buf, "devreset")) {
4104 /* In platforms like FC19, the FW download is done via IOCTL
4105 * and should not return error for IOCTLs fired before FW
4106 * Download is done
4107 */
4108 if (dhd_fw_download_status(dhd_pub) == FW_DOWNLOAD_DONE) {
4109 DHD_ERROR(("%s: return as fw_download_status=%d\n",
4110 __FUNCTION__,
4111 dhd_fw_download_status(dhd_pub)));
4112 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
4113 dhd_os_dhdiovar_unlock(dhd_pub);
4114 return -ENODEV;
4115 }
4116 }
4117 DHD_BUS_BUSY_SET_IN_DHD_IOVAR(dhd_pub);
4118 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
4119
4120 #ifdef DHD_PCIE_RUNTIMEPM
4121 dhdpcie_runtime_bus_wake(dhd_pub, TRUE, dhd_ioctl);
4122 #endif /* DHD_PCIE_RUNTIMEPM */
4123
4124 DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
4125 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhd_pub)) {
4126 /* If Suspend/Resume is tested via pcie_suspend IOVAR
4127 * then continue to execute the IOVAR, return from here for
4128 * other IOVARs, also include pciecfgreg and devreset to go
4129 * through.
4130 */
4131 #ifdef DHD_EFI
4132 if (bcmstricmp((char *)buf, "pcie_suspend") &&
4133 bcmstricmp((char *)buf, "pciecfgreg") &&
4134 bcmstricmp((char *)buf, "devreset") &&
4135 bcmstricmp((char *)buf, "sdio_suspend") &&
4136 bcmstricmp((char *)buf, "control_signal"))
4137 #else
4138 if (bcmstricmp((char *)buf, "pcie_suspend") &&
4139 bcmstricmp((char *)buf, "pciecfgreg") &&
4140 bcmstricmp((char *)buf, "devreset") &&
4141 bcmstricmp((char *)buf, "sdio_suspend"))
4142 #endif /* DHD_EFI */
4143 {
4144 DHD_ERROR(("%s: bus is in suspend(%d)"
4145 "or suspending(0x%x) state\n",
4146 __FUNCTION__, dhd_pub->busstate,
4147 dhd_pub->dhd_bus_busy_state));
4148 DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
4149 dhd_os_busbusy_wake(dhd_pub);
4150 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
4151 dhd_os_dhdiovar_unlock(dhd_pub);
4152 return -ENODEV;
4153 }
4154 }
4155 /* During devreset ioctl, we call dhdpcie_advertise_bus_cleanup,
4156 * which will wait for all the busy contexts to get over for
4157 * particular time and call ASSERT if timeout happens. As during
4158 * devreset ioctal, we made DHD_BUS_BUSY_SET_IN_DHD_IOVAR,
4159 * to avoid ASSERT, clear the IOCTL busy state. "devreset" ioctl is
4160 * not used in Production platforms but only used in FC19 setups.
4161 */
4162 if (!bcmstricmp((char *)buf, "devreset") ||
4163 #ifdef BCMPCIE
4164 (dhd_bus_is_multibp_capable(dhd_pub->bus) &&
4165 !bcmstricmp((char *)buf, "dwnldstate")) ||
4166 #endif /* BCMPCIE */
4167 #if defined(DHD_EFI) && defined (BT_OVER_PCIE)
4168 !bcmstricmp((char *)buf, "btop_test") ||
4169 !bcmstricmp((char *)buf, "control_signal") ||
4170 #endif /* DHD_EFI && BT_OVER_PCIE */
4171 FALSE)
4172 {
4173 DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
4174 }
4175 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
4176
4177 /* scan past the name to any arguments */
4178 for (arg = buf, arglen = buflen; *arg && arglen; arg++, arglen--)
4179 ;
4180
4181 if (arglen == 0 || *arg) {
4182 bcmerror = BCME_BUFTOOSHORT;
4183 goto unlock_exit;
4184 }
4185
4186 /* account for the NUL terminator */
4187 arg++, arglen--;
4188 /* call with the appropriate arguments */
4189 if (ioc->cmd == DHD_GET_VAR) {
4190 bcmerror = dhd_iovar_op(dhd_pub, buf, arg, arglen,
4191 buf, buflen, IOV_GET);
4192 } else {
4193 bcmerror = dhd_iovar_op(dhd_pub, buf, NULL, 0,
4194 arg, arglen, IOV_SET);
4195 }
4196 if (bcmerror != BCME_UNSUPPORTED) {
4197 goto unlock_exit;
4198 }
4199
4200 /* not in generic table, try protocol module */
4201 if (ioc->cmd == DHD_GET_VAR) {
4202 bcmerror = dhd_prot_iovar_op(dhd_pub, buf, arg,
4203 arglen, buf, buflen, IOV_GET);
4204 } else {
4205 bcmerror = dhd_prot_iovar_op(dhd_pub, buf,
4206 NULL, 0, arg, arglen, IOV_SET);
4207 }
4208 if (bcmerror != BCME_UNSUPPORTED) {
4209 goto unlock_exit;
4210 }
4211
4212 /* if still not found, try bus module */
4213 if (ioc->cmd == DHD_GET_VAR) {
4214 bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
4215 arg, arglen, buf, buflen, IOV_GET);
4216 } else {
4217 bcmerror = dhd_bus_iovar_op(dhd_pub, buf,
4218 NULL, 0, arg, arglen, IOV_SET);
4219 }
4220 if (bcmerror != BCME_UNSUPPORTED) {
4221 goto unlock_exit;
4222 }
4223
4224 #ifdef DHD_TIMESYNC
4225 /* check TS module */
4226 if (ioc->cmd == DHD_GET_VAR)
4227 bcmerror = dhd_timesync_iovar_op(dhd_pub->ts, buf, arg,
4228 arglen, buf, buflen, IOV_GET);
4229 else
4230 bcmerror = dhd_timesync_iovar_op(dhd_pub->ts, buf,
4231 NULL, 0, arg, arglen, IOV_SET);
4232 #endif /* DHD_TIMESYNC */
4233 }
4234 goto unlock_exit;
4235
4236 default:
4237 bcmerror = BCME_UNSUPPORTED;
4238 }
4239 dhd_os_dhdiovar_unlock(dhd_pub);
4240 return bcmerror;
4241
4242 unlock_exit:
4243 DHD_LINUX_GENERAL_LOCK(dhd_pub, flags);
4244 DHD_BUS_BUSY_CLEAR_IN_DHD_IOVAR(dhd_pub);
4245 dhd_os_busbusy_wake(dhd_pub);
4246 DHD_LINUX_GENERAL_UNLOCK(dhd_pub, flags);
4247 dhd_os_dhdiovar_unlock(dhd_pub);
4248 return bcmerror;
4249 }
4250
4251 #ifdef SHOW_EVENTS
4252
4253 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
4254 static void
dhd_update_awdl_stats(dhd_pub_t * dhd_pub,const awdl_aws_event_data_t * aw)4255 dhd_update_awdl_stats(dhd_pub_t *dhd_pub, const awdl_aws_event_data_t *aw)
4256 {
4257 dhd_awdl_stats_t *awdl_stats;
4258 unsigned long lock_flags;
4259
4260 /* since AWDL stats are read on clear to protect against clear,
4261 * lock before update
4262 */
4263 DHD_AWDL_STATS_LOCK(dhd_pub->awdl_stats_lock, lock_flags);
4264 /* Start of AWDL slot */
4265 if (!(aw->flags & AWDL_AW_LAST_EXT)) {
4266 dhd_pub->awdl_tx_status_slot =
4267 ((aw->aw_counter/AWDL_SLOT_MULT) % AWDL_NUM_SLOTS);
4268 awdl_stats = &dhd_pub->awdl_stats[dhd_pub->awdl_tx_status_slot];
4269 awdl_stats->slot_start_time = OSL_SYSUPTIME_US();
4270 awdl_stats->fw_slot_start_time = ntoh32_ua(&aw->fw_time);
4271 awdl_stats->num_slots++;
4272 } else {
4273 /* End of AWDL slot */
4274 awdl_stats = &dhd_pub->awdl_stats[dhd_pub->awdl_tx_status_slot];
4275 if (awdl_stats->slot_start_time) {
4276 awdl_stats->cum_slot_time +=
4277 OSL_SYSUPTIME_US() - awdl_stats->slot_start_time;
4278 /* FW reports time in us in a 32bit number.
4279 * This 32bit number wrap-arround in ~90 minutes.
4280 * Below logic considers wrap-arround too
4281 */
4282 awdl_stats->fw_cum_slot_time +=
4283 ((ntoh32_ua(&aw->fw_time) - awdl_stats->fw_slot_start_time) &
4284 (UINT_MAX));
4285
4286 }
4287 }
4288 DHD_AWDL_STATS_UNLOCK(dhd_pub->awdl_stats_lock, lock_flags);
4289 }
4290 #endif /* DHD_AWDL && AWDL_SLOT_STATS */
4291
4292 static void
wl_show_roam_event(dhd_pub_t * dhd_pub,uint status,uint datalen,const char * event_name,char * eabuf,void * event_data)4293 wl_show_roam_event(dhd_pub_t *dhd_pub, uint status, uint datalen,
4294 const char *event_name, char *eabuf, void *event_data)
4295 {
4296 #ifdef REPORT_FATAL_TIMEOUTS
4297 OSL_ATOMIC_SET(dhd_pub->osh, &dhd_pub->set_ssid_rcvd, TRUE);
4298 dhd_clear_join_error(dhd_pub, WLC_SSID_MASK);
4299 #endif /* REPORT_FATAL_TIMEOUTS */
4300 if (status == WLC_E_STATUS_SUCCESS) {
4301 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
4302 } else {
4303 #ifdef REPORT_FATAL_TIMEOUTS
4304 /*
4305 * For secure join if WLC_E_SET_SSID returns with any failure case,
4306 * donot expect WLC_E_PSK_SUP. So clear the mask.
4307 */
4308 dhd_clear_join_error(dhd_pub, WLC_WPA_MASK);
4309 #endif /* REPORT_FATAL_TIMEOUTS */
4310 if (status == WLC_E_STATUS_FAIL) {
4311 DHD_EVENT(("MACEVENT: %s, failed status %d\n", event_name, status));
4312 } else if (status == WLC_E_STATUS_NO_NETWORKS) {
4313 if (datalen) {
4314 uint8 id = *((uint8 *)event_data);
4315 if (id != DOT11_MNG_PROPR_ID) {
4316 wl_roam_event_t *roam_data =
4317 (wl_roam_event_t *)event_data;
4318 bcm_xtlv_t *tlv = (bcm_xtlv_t *)roam_data->xtlvs;
4319 if (tlv->id == WLC_ROAM_NO_NETWORKS_TLV_ID) {
4320 uint32 *fail_reason = (uint32 *)tlv->data;
4321 switch (*fail_reason) {
4322 case WLC_E_REASON_NO_NETWORKS:
4323 DHD_EVENT(("MACEVENT: %s,"
4324 " no networks found\n",
4325 event_name));
4326 break;
4327 case WLC_E_REASON_NO_NETWORKS_BY_SCORE:
4328 DHD_EVENT(("MACEVENT: %s,"
4329 " no networks found by score\n",
4330 event_name));
4331 break;
4332 default:
4333 DHD_ERROR(("MACEVENT: %s,"
4334 " unknown fail reason 0x%x\n",
4335 event_name,
4336 *fail_reason));
4337 ASSERT(0);
4338 }
4339 } else {
4340 DHD_EVENT(("MACEVENT: %s,"
4341 " no networks found\n",
4342 event_name));
4343 }
4344 } else {
4345 DHD_EVENT(("MACEVENT: %s,"
4346 " no networks found\n",
4347 event_name));
4348 }
4349 } else {
4350 DHD_EVENT(("MACEVENT: %s, no networks found\n",
4351 event_name));
4352 }
4353 } else {
4354 DHD_EVENT(("MACEVENT: %s, unexpected status %d\n",
4355 event_name, (int)status));
4356 }
4357 }
4358 }
4359
4360 static void
wl_show_roam_cache_update_event(const char * name,uint status,uint reason,uint datalen,void * event_data)4361 wl_show_roam_cache_update_event(const char *name, uint status,
4362 uint reason, uint datalen, void *event_data)
4363 {
4364 wlc_roam_cache_update_event_t *cache_update;
4365 uint16 len_of_tlvs;
4366 void *val_tlv_ptr;
4367 bcm_xtlv_t *val_xtlv;
4368 char ntoa_buf[ETHER_ADDR_STR_LEN];
4369 uint idx;
4370 const char* reason_name = NULL;
4371 const char* status_name = NULL;
4372 static struct {
4373 uint event;
4374 const char *event_name;
4375 } reason_names[] = {
4376 {WLC_E_REASON_INITIAL_ASSOC, "INITIAL ASSOCIATION"},
4377 {WLC_E_REASON_LOW_RSSI, "LOW_RSSI"},
4378 {WLC_E_REASON_DEAUTH, "RECEIVED DEAUTHENTICATION"},
4379 {WLC_E_REASON_DISASSOC, "RECEIVED DISASSOCATION"},
4380 {WLC_E_REASON_BCNS_LOST, "BEACONS LOST"},
4381 {WLC_E_REASON_BETTER_AP, "BETTER AP FOUND"},
4382 {WLC_E_REASON_MINTXRATE, "STUCK AT MIN TX RATE"},
4383 {WLC_E_REASON_BSSTRANS_REQ, "REQUESTED ROAM"},
4384 {WLC_E_REASON_TXFAIL, "TOO MANY TXFAILURES"}
4385 };
4386
4387 static struct {
4388 uint event;
4389 const char *event_name;
4390 } status_names[] = {
4391 {WLC_E_STATUS_SUCCESS, "operation was successful"},
4392 {WLC_E_STATUS_FAIL, "operation failed"},
4393 {WLC_E_STATUS_TIMEOUT, "operation timed out"},
4394 {WLC_E_STATUS_NO_NETWORKS, "failed due to no matching network found"},
4395 {WLC_E_STATUS_ABORT, "operation was aborted"},
4396 {WLC_E_STATUS_NO_ACK, "protocol failure: packet not ack'd"},
4397 {WLC_E_STATUS_UNSOLICITED, "AUTH or ASSOC packet was unsolicited"},
4398 {WLC_E_STATUS_ATTEMPT, "attempt to assoc to an auto auth configuration"},
4399 {WLC_E_STATUS_PARTIAL, "scan results are incomplete"},
4400 {WLC_E_STATUS_NEWSCAN, "scan aborted by another scan"},
4401 {WLC_E_STATUS_NEWASSOC, "scan aborted due to assoc in progress"},
4402 {WLC_E_STATUS_11HQUIET, "802.11h quiet period started"},
4403 {WLC_E_STATUS_SUPPRESS, "user disabled scanning"},
4404 {WLC_E_STATUS_NOCHANS, "no allowable channels to scan"},
4405 {WLC_E_STATUS_CS_ABORT, "abort channel select"},
4406 {WLC_E_STATUS_ERROR, "request failed due to error"},
4407 {WLC_E_STATUS_INVALID, "Invalid status code"}
4408 };
4409
4410 switch (reason) {
4411 case WLC_ROAM_CACHE_UPDATE_NEW_ROAM_CACHE:
4412 DHD_EVENT(("Current roam cache status %d, "
4413 "reason for cache update is new roam cache\n", status));
4414 break;
4415 case WLC_ROAM_CACHE_UPDATE_JOIN:
4416 DHD_EVENT(("Current roam cache status %d, "
4417 "reason for cache update is start of join\n", status));
4418 break;
4419 case WLC_ROAM_CACHE_UPDATE_RSSI_DELTA:
4420 DHD_EVENT(("Current roam cache status %d, "
4421 "reason for cache update is delta in rssi\n", status));
4422 break;
4423 case WLC_ROAM_CACHE_UPDATE_MOTION_RSSI_DELTA:
4424 DHD_EVENT(("Current roam cache status %d, "
4425 "reason for cache update is motion delta in rssi\n", status));
4426 break;
4427 case WLC_ROAM_CACHE_UPDATE_CHANNEL_MISS:
4428 DHD_EVENT(("Current roam cache status %d, "
4429 "reason for cache update is missed channel\n", status));
4430 break;
4431 case WLC_ROAM_CACHE_UPDATE_START_SPLIT_SCAN:
4432 DHD_EVENT(("Current roam cache status %d, "
4433 "reason for cache update is start of split scan\n", status));
4434 break;
4435 case WLC_ROAM_CACHE_UPDATE_START_FULL_SCAN:
4436 DHD_EVENT(("Current roam cache status %d, "
4437 "reason for cache update is start of full scan\n", status));
4438 break;
4439 case WLC_ROAM_CACHE_UPDATE_INIT_ASSOC:
4440 DHD_EVENT(("Current roam cache status %d, "
4441 "reason for cache update is init association\n", status));
4442 break;
4443 case WLC_ROAM_CACHE_UPDATE_FULL_SCAN_FAILED:
4444 DHD_EVENT(("Current roam cache status %d, "
4445 "reason for cache update is failure in full scan\n", status));
4446 break;
4447 case WLC_ROAM_CACHE_UPDATE_NO_AP_FOUND:
4448 DHD_EVENT(("Current roam cache status %d, "
4449 "reason for cache update is empty scan result\n", status));
4450 break;
4451 case WLC_ROAM_CACHE_UPDATE_MISSING_AP:
4452 DHD_EVENT(("Current roam cache status %d, "
4453 "reason for cache update is missed ap\n", status));
4454 break;
4455 default:
4456 DHD_EVENT(("Current roam cache status %d, "
4457 "reason for cache update is unknown %d\n", status, reason));
4458 break;
4459 }
4460
4461 if (datalen < sizeof(wlc_roam_cache_update_event_t)) {
4462 DHD_ERROR(("MACEVENT: %s, missing event data\n", name));
4463 return;
4464 }
4465
4466 cache_update = (wlc_roam_cache_update_event_t *)event_data;
4467 val_tlv_ptr = (void *)cache_update->xtlvs;
4468 len_of_tlvs = datalen - sizeof(wlc_roam_cache_update_event_t);
4469 val_xtlv = (bcm_xtlv_t *)val_tlv_ptr;
4470 if (val_xtlv->id != WL_RMC_RPT_CMD_DATA) {
4471 DHD_ERROR(("MACEVENT: %s, unexpected xtlv id %d\n",
4472 name, val_xtlv->id));
4473 return;
4474 }
4475 val_tlv_ptr = (uint8 *)val_tlv_ptr + BCM_XTLV_HDR_SIZE;
4476 len_of_tlvs = val_xtlv->len;
4477
4478 while (len_of_tlvs && len_of_tlvs > BCM_XTLV_HDR_SIZE) {
4479 val_xtlv = (bcm_xtlv_t *)val_tlv_ptr;
4480 switch (val_xtlv->id) {
4481 case WL_RMC_RPT_XTLV_BSS_INFO:
4482 {
4483 rmc_bss_info_v1_t *bss_info = (rmc_bss_info_v1_t *)(val_xtlv->data);
4484 DHD_EVENT(("\t Current BSS INFO:\n"));
4485 DHD_EVENT(("\t\tRSSI: %d\n", bss_info->rssi));
4486 DHD_EVENT(("\t\tNumber of full scans performed "
4487 "on current BSS: %d\n", bss_info->fullscan_count));
4488 for (idx = 0; idx < ARRAYSIZE(reason_names); idx++) {
4489 if (reason_names[idx].event == bss_info->reason) {
4490 reason_name = reason_names[idx].event_name;
4491 }
4492 }
4493 DHD_EVENT(("\t\tReason code for last full scan: %s(%d)\n",
4494 reason_name, bss_info->reason));
4495 DHD_EVENT(("\t\tDelta between current time and "
4496 "last full scan: %d\n", bss_info->time_full_scan));
4497 for (idx = 0; idx < ARRAYSIZE(status_names); idx++) {
4498 if (status_names[idx].event == bss_info->status)
4499 status_name = status_names[idx].event_name;
4500 }
4501 DHD_EVENT(("\t\tLast status code for not roaming: %s(%d)\n",
4502 status_name, bss_info->status));
4503
4504 }
4505 break;
4506 case WL_RMC_RPT_XTLV_CANDIDATE_INFO:
4507 case WL_RMC_RPT_XTLV_USER_CACHE_INFO:
4508 {
4509 rmc_candidate_info_v1_t *candidate_info =
4510 (rmc_candidate_info_v1_t *)(val_xtlv->data);
4511 if (val_xtlv->id == WL_RMC_RPT_XTLV_CANDIDATE_INFO) {
4512 DHD_EVENT(("\t Candidate INFO:\n"));
4513 } else {
4514 DHD_EVENT(("\t User Candidate INFO:\n"));
4515 }
4516 DHD_EVENT(("\t\tBSSID: %s\n",
4517 bcm_ether_ntoa((const struct ether_addr *)
4518 &candidate_info->bssid, ntoa_buf)));
4519 DHD_EVENT(("\t\tRSSI: %d\n", candidate_info->rssi));
4520 DHD_EVENT(("\t\tChannel: %d\n", candidate_info->ctl_channel));
4521 DHD_EVENT(("\t\tDelta between current time and last "
4522 "seen time: %d\n", candidate_info->time_last_seen));
4523 DHD_EVENT(("\t\tBSS load: %d\n", candidate_info->bss_load));
4524 }
4525 break;
4526 default:
4527 DHD_ERROR(("MACEVENT: %s, unexpected xtlv id %d\n",
4528 name, val_xtlv->id));
4529 return;
4530 }
4531 val_tlv_ptr = (uint8 *)val_tlv_ptr + bcm_xtlv_size(val_xtlv,
4532 BCM_XTLV_OPTION_NONE);
4533 len_of_tlvs -= (uint16)bcm_xtlv_size(val_xtlv, BCM_XTLV_OPTION_NONE);
4534 }
4535 }
4536
4537 static void
wl_show_host_event(dhd_pub_t * dhd_pub,wl_event_msg_t * event,void * event_data,void * raw_event_ptr,char * eventmask)4538 wl_show_host_event(dhd_pub_t *dhd_pub, wl_event_msg_t *event, void *event_data,
4539 void *raw_event_ptr, char *eventmask)
4540 {
4541 uint i, status, reason;
4542 bool group = FALSE, flush_txq = FALSE, link = FALSE;
4543 bool host_data = FALSE; /* prints event data after the case when set */
4544 const char *auth_str;
4545 const char *event_name;
4546 const uchar *buf;
4547 char err_msg[256], eabuf[ETHER_ADDR_STR_LEN];
4548 uint event_type, flags, auth_type, datalen;
4549
4550 event_type = ntoh32(event->event_type);
4551 flags = ntoh16(event->flags);
4552 status = ntoh32(event->status);
4553 reason = ntoh32(event->reason);
4554 BCM_REFERENCE(reason);
4555 auth_type = ntoh32(event->auth_type);
4556 datalen = (event_data != NULL) ? ntoh32(event->datalen) : 0;
4557
4558 /* debug dump of event messages */
4559 snprintf(eabuf, sizeof(eabuf), MACDBG, MAC2STRDBG(event->addr.octet));
4560
4561 event_name = bcmevent_get_name(event_type);
4562 BCM_REFERENCE(event_name);
4563
4564 if (flags & WLC_EVENT_MSG_LINK)
4565 link = TRUE;
4566 if (flags & WLC_EVENT_MSG_GROUP)
4567 group = TRUE;
4568 if (flags & WLC_EVENT_MSG_FLUSHTXQ)
4569 flush_txq = TRUE;
4570
4571 switch (event_type) {
4572 case WLC_E_START:
4573 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
4574 break;
4575 case WLC_E_DEAUTH:
4576 case WLC_E_DISASSOC:
4577 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
4578 #ifdef REPORT_FATAL_TIMEOUTS
4579 dhd_clear_join_error(dhd_pub, WLC_SSID_MASK | WLC_WPA_MASK);
4580 #endif /* REPORT_FATAL_TIMEOUTS */
4581 break;
4582
4583 case WLC_E_ASSOC_IND:
4584 case WLC_E_REASSOC_IND:
4585
4586 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
4587 #ifdef REPORT_FATAL_TIMEOUTS
4588 if (status != WLC_E_STATUS_SUCCESS) {
4589 dhd_clear_join_error(dhd_pub, WLC_SSID_MASK | WLC_WPA_MASK);
4590 }
4591 #endif /* REPORT_FATAL_TIMEOUTS */
4592
4593 break;
4594
4595 case WLC_E_ASSOC:
4596 case WLC_E_REASSOC:
4597 if (status == WLC_E_STATUS_SUCCESS) {
4598 DHD_EVENT(("MACEVENT: %s, MAC %s, SUCCESS\n", event_name, eabuf));
4599 } else if (status == WLC_E_STATUS_TIMEOUT) {
4600 DHD_EVENT(("MACEVENT: %s, MAC %s, TIMEOUT\n", event_name, eabuf));
4601 } else if (status == WLC_E_STATUS_FAIL) {
4602 DHD_EVENT(("MACEVENT: %s, MAC %s, FAILURE, status %d reason %d\n",
4603 event_name, eabuf, (int)status, (int)reason));
4604 } else if (status == WLC_E_STATUS_SUPPRESS) {
4605 DHD_EVENT(("MACEVENT: %s, MAC %s, SUPPRESS\n", event_name, eabuf));
4606 } else if (status == WLC_E_STATUS_NO_ACK) {
4607 DHD_EVENT(("MACEVENT: %s, MAC %s, NOACK\n", event_name, eabuf));
4608 } else {
4609 DHD_EVENT(("MACEVENT: %s, MAC %s, unexpected status %d\n",
4610 event_name, eabuf, (int)status));
4611 }
4612 #ifdef REPORT_FATAL_TIMEOUTS
4613 if (status != WLC_E_STATUS_SUCCESS) {
4614 dhd_clear_join_error(dhd_pub, WLC_SSID_MASK | WLC_WPA_MASK);
4615 }
4616 #endif /* REPORT_FATAL_TIMEOUTS */
4617
4618 break;
4619
4620 case WLC_E_DEAUTH_IND:
4621 case WLC_E_DISASSOC_IND:
4622 #ifdef REPORT_FATAL_TIMEOUTS
4623 dhd_clear_join_error(dhd_pub, WLC_SSID_MASK | WLC_WPA_MASK);
4624 #endif /* REPORT_FATAL_TIMEOUTS */
4625 DHD_EVENT(("MACEVENT: %s, MAC %s, reason %d\n", event_name, eabuf, (int)reason));
4626 break;
4627
4628 case WLC_E_AUTH:
4629 case WLC_E_AUTH_IND:
4630 if (auth_type == DOT11_OPEN_SYSTEM)
4631 auth_str = "Open System";
4632 else if (auth_type == DOT11_SHARED_KEY)
4633 auth_str = "Shared Key";
4634 else if (auth_type == DOT11_SAE)
4635 auth_str = "SAE";
4636 else {
4637 snprintf(err_msg, sizeof(err_msg), "AUTH unknown: %d", (int)auth_type);
4638 auth_str = err_msg;
4639 }
4640
4641 if (event_type == WLC_E_AUTH_IND) {
4642 DHD_EVENT(("MACEVENT: %s, MAC %s, %s\n", event_name, eabuf, auth_str));
4643 } else if (status == WLC_E_STATUS_SUCCESS) {
4644 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUCCESS\n",
4645 event_name, eabuf, auth_str));
4646 } else if (status == WLC_E_STATUS_TIMEOUT) {
4647 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, TIMEOUT\n",
4648 event_name, eabuf, auth_str));
4649 } else if (status == WLC_E_STATUS_FAIL) {
4650 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, FAILURE, status %d reason %d\n",
4651 event_name, eabuf, auth_str, (int)status, (int)reason));
4652 } else if (status == WLC_E_STATUS_SUPPRESS) {
4653 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, SUPPRESS\n",
4654 event_name, eabuf, auth_str));
4655 } else if (status == WLC_E_STATUS_NO_ACK) {
4656 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, NOACK\n",
4657 event_name, eabuf, auth_str));
4658 } else {
4659 DHD_EVENT(("MACEVENT: %s, MAC %s, %s, status %d reason %d\n",
4660 event_name, eabuf, auth_str, (int)status, (int)reason));
4661 }
4662 BCM_REFERENCE(auth_str);
4663 #ifdef REPORT_FATAL_TIMEOUTS
4664 if (status != WLC_E_STATUS_SUCCESS) {
4665 dhd_clear_join_error(dhd_pub, WLC_SSID_MASK | WLC_WPA_MASK);
4666 }
4667 #endif /* REPORT_FATAL_TIMEOUTS */
4668
4669 break;
4670
4671 case WLC_E_ROAM:
4672 wl_show_roam_event(dhd_pub, status, datalen,
4673 event_name, eabuf, event_data);
4674 break;
4675 case WLC_E_ROAM_START:
4676 if (datalen >= sizeof(wlc_roam_start_event_t)) {
4677 const wlc_roam_start_event_t *roam_start =
4678 (wlc_roam_start_event_t *)event_data;
4679 DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d,"
4680 " reason %d, auth %d, current bss rssi %d\n",
4681 event_name, event_type, eabuf, (int)status, (int)reason,
4682 (int)auth_type, (int)roam_start->rssi));
4683 } else {
4684 DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
4685 event_name, event_type, eabuf, (int)status, (int)reason,
4686 (int)auth_type));
4687 }
4688 break;
4689 case WLC_E_ROAM_PREP:
4690 if (datalen >= sizeof(wlc_roam_prep_event_t)) {
4691 const wlc_roam_prep_event_t *roam_prep =
4692 (wlc_roam_prep_event_t *)event_data;
4693 DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d,"
4694 " reason %d, auth %d, target bss rssi %d\n",
4695 event_name, event_type, eabuf, (int)status, (int)reason,
4696 (int)auth_type, (int)roam_prep->rssi));
4697 } else {
4698 DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
4699 event_name, event_type, eabuf, (int)status, (int)reason,
4700 (int)auth_type));
4701 }
4702 break;
4703 case WLC_E_ROAM_CACHE_UPDATE:
4704 DHD_EVENT(("MACEVENT: %s\n", event_name));
4705 wl_show_roam_cache_update_event(event_name, status,
4706 reason, datalen, event_data);
4707 break;
4708 case WLC_E_JOIN:
4709 case WLC_E_SET_SSID:
4710 #ifdef REPORT_FATAL_TIMEOUTS
4711 OSL_ATOMIC_SET(dhd_pub->osh, &dhd_pub->set_ssid_rcvd, TRUE);
4712 dhd_clear_join_error(dhd_pub, WLC_SSID_MASK);
4713 #endif /* REPORT_FATAL_TIMEOUTS */
4714 if (status == WLC_E_STATUS_SUCCESS) {
4715 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
4716 } else {
4717 #ifdef REPORT_FATAL_TIMEOUTS
4718 /*
4719 * For secure join if WLC_E_SET_SSID returns with any failure case,
4720 * donot expect WLC_E_PSK_SUP. So clear the mask.
4721 */
4722 dhd_clear_join_error(dhd_pub, WLC_WPA_MASK);
4723 #endif /* REPORT_FATAL_TIMEOUTS */
4724 if (status == WLC_E_STATUS_FAIL) {
4725 DHD_EVENT(("MACEVENT: %s, failed status %d\n", event_name, status));
4726 } else if (status == WLC_E_STATUS_NO_NETWORKS) {
4727 DHD_EVENT(("MACEVENT: %s, no networks found\n", event_name));
4728 } else {
4729 DHD_EVENT(("MACEVENT: %s, unexpected status %d\n",
4730 event_name, (int)status));
4731 }
4732 }
4733 break;
4734
4735 case WLC_E_BEACON_RX:
4736 if (status == WLC_E_STATUS_SUCCESS) {
4737 DHD_EVENT(("MACEVENT: %s, SUCCESS\n", event_name));
4738 } else if (status == WLC_E_STATUS_FAIL) {
4739 DHD_EVENT(("MACEVENT: %s, FAIL\n", event_name));
4740 } else {
4741 DHD_EVENT(("MACEVENT: %s, status %d\n", event_name, status));
4742 }
4743 break;
4744
4745 case WLC_E_LINK:
4746 DHD_EVENT(("MACEVENT: %s %s flags:0x%x status:%d reason:%d\n",
4747 event_name, link?"UP":"DOWN", flags, status, reason));
4748 #ifdef PCIE_FULL_DONGLE
4749 #ifdef REPORT_FATAL_TIMEOUTS
4750 {
4751 uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname);
4752 uint8 role = dhd_flow_rings_ifindex2role(dhd_pub, ifindex);
4753 if ((role == WLC_E_IF_ROLE_STA) && (!link)) {
4754 dhd_clear_join_error(dhd_pub, WLC_SSID_MASK | WLC_WPA_MASK);
4755 }
4756 }
4757 #endif /* PCIE_FULL_DONGLE */
4758 #endif /* REPORT_FATAL_TIMEOUTS */
4759 BCM_REFERENCE(link);
4760 break;
4761
4762 case WLC_E_MIC_ERROR:
4763 DHD_EVENT(("MACEVENT: %s, MAC %s, Group %d, Flush %d\n",
4764 event_name, eabuf, group, flush_txq));
4765 BCM_REFERENCE(group);
4766 BCM_REFERENCE(flush_txq);
4767 break;
4768
4769 case WLC_E_ICV_ERROR:
4770 case WLC_E_UNICAST_DECODE_ERROR:
4771 case WLC_E_MULTICAST_DECODE_ERROR:
4772 DHD_EVENT(("MACEVENT: %s, MAC %s\n",
4773 event_name, eabuf));
4774 break;
4775
4776 case WLC_E_TXFAIL:
4777 DHD_EVENT(("MACEVENT: %s, RA %s status %d\n", event_name, eabuf, status));
4778 break;
4779
4780 case WLC_E_ASSOC_REQ_IE:
4781 case WLC_E_ASSOC_RESP_IE:
4782 case WLC_E_PMKID_CACHE:
4783 DHD_EVENT(("MACEVENT: %s\n", event_name));
4784 break;
4785
4786 case WLC_E_SCAN_COMPLETE:
4787 DHD_EVENT(("MACEVENT: %s\n", event_name));
4788 #ifdef REPORT_FATAL_TIMEOUTS
4789 dhd_stop_scan_timer(dhd_pub, FALSE, 0);
4790 #endif /* REPORT_FATAL_TIMEOUTS */
4791 break;
4792 case WLC_E_RSSI_LQM:
4793 case WLC_E_PFN_NET_FOUND:
4794 case WLC_E_PFN_NET_LOST:
4795 case WLC_E_PFN_SCAN_COMPLETE:
4796 case WLC_E_PFN_SCAN_NONE:
4797 case WLC_E_PFN_SCAN_ALLGONE:
4798 case WLC_E_PFN_GSCAN_FULL_RESULT:
4799 case WLC_E_PFN_SSID_EXT:
4800 DHD_EVENT(("PNOEVENT: %s\n", event_name));
4801 break;
4802
4803 case WLC_E_PFN_SCAN_BACKOFF:
4804 case WLC_E_PFN_BSSID_SCAN_BACKOFF:
4805 DHD_EVENT(("PNOEVENT: %s, status %d, reason %d\n",
4806 event_name, (int)status, (int)reason));
4807 break;
4808
4809 case WLC_E_PSK_SUP:
4810 case WLC_E_PRUNE:
4811 DHD_EVENT(("MACEVENT: %s, status %d, reason %d\n",
4812 event_name, (int)status, (int)reason));
4813 #ifdef REPORT_FATAL_TIMEOUTS
4814 dhd_clear_join_error(dhd_pub, WLC_WPA_MASK);
4815 #endif /* REPORT_FATAL_TIMEOUTS */
4816 break;
4817
4818 #ifdef WIFI_ACT_FRAME
4819 case WLC_E_ACTION_FRAME:
4820 DHD_TRACE(("MACEVENT: %s Bssid %s\n", event_name, eabuf));
4821 break;
4822 case WLC_E_ACTION_FRAME_COMPLETE:
4823 if (datalen >= sizeof(uint32)) {
4824 const uint32 *pktid = event_data;
4825 BCM_REFERENCE(pktid);
4826 DHD_EVENT(("MACEVENT: %s status %d, reason %d, pktid 0x%x\n",
4827 event_name, (int)status, (int)reason, *pktid));
4828 }
4829 break;
4830 #endif /* WIFI_ACT_FRAME */
4831
4832 #ifdef SHOW_LOGTRACE
4833 case WLC_E_TRACE:
4834 {
4835 dhd_dbg_trace_evnt_handler(dhd_pub, event_data, raw_event_ptr, datalen);
4836 break;
4837 }
4838 #endif /* SHOW_LOGTRACE */
4839
4840 case WLC_E_RSSI:
4841 if (datalen >= sizeof(int)) {
4842 DHD_EVENT(("MACEVENT: %s %d\n", event_name, ntoh32(*((int *)event_data))));
4843 }
4844 break;
4845
4846 case WLC_E_SERVICE_FOUND:
4847 case WLC_E_P2PO_ADD_DEVICE:
4848 case WLC_E_P2PO_DEL_DEVICE:
4849 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
4850 break;
4851
4852 #ifdef BT_WIFI_HANDOBER
4853 case WLC_E_BT_WIFI_HANDOVER_REQ:
4854 DHD_EVENT(("MACEVENT: %s, MAC %s\n", event_name, eabuf));
4855 break;
4856 #endif
4857 #ifdef DHD_AWDL
4858 case WLC_E_AWDL_AW:
4859 if (datalen >= sizeof(awdl_aws_event_data_t)) {
4860 const awdl_aws_event_data_t *aw =
4861 (awdl_aws_event_data_t *)event_data;
4862 BCM_REFERENCE(aw);
4863 DHD_EVENT(("MACEVENT: %s, MAC %s aw_cnt %u ext_cnt %u flags %u "
4864 "aw_ch %u\n", event_name, eabuf, aw->aw_counter,
4865 aw->aw_ext_count, aw->flags, CHSPEC_CHANNEL(aw->aw_chan)));
4866 host_data = TRUE;
4867
4868 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
4869 dhd_update_awdl_stats(dhd_pub, aw);
4870 /* Store last received aw counter */
4871 dhd_pub->awdl_aw_counter = aw->aw_counter;
4872 #endif /* DHD_AWDL */
4873 }
4874 break;
4875 case WLC_E_AWDL_ROLE:
4876 DHD_EVENT(("MACEVENT: %s, MAC %s ROLE %d\n", event_name, eabuf, (int)status));
4877 break;
4878 case WLC_E_AWDL_EVENT:
4879 DHD_EVENT(("MACEVENT: %s, MAC %s status %d reason %d\n",
4880 event_name, eabuf, (int)status, (int)reason));
4881 if (datalen >= OFFSETOF(awdl_scan_event_data_t, chan_list)) {
4882 const awdl_scan_event_data_t *scan_evt =
4883 (awdl_scan_event_data_t *)event_data;
4884 BCM_REFERENCE(scan_evt);
4885 DHD_EVENT(("scan_usage %d, nscan_chans %d, ncached_chans %d, "
4886 "iscan_flags 0x%x\n", scan_evt->scan_usage,
4887 scan_evt->nscan_chans, scan_evt->ncached_chans,
4888 scan_evt->flags));
4889 host_data = TRUE;
4890 }
4891 break;
4892 #endif /* DHD_AWDL */
4893
4894 case WLC_E_CCA_CHAN_QUAL:
4895 /* I would like to check here that datalen >= sizeof(cca_chan_qual_event_t)
4896 * but since definition of cca_chan_qual_event_t is different
4897 * between blazar and legacy firmware, I will
4898 * check only that datalen is bigger than 0.
4899 */
4900 if (datalen > 0) {
4901 const cca_chan_qual_event_t *cca_event =
4902 (cca_chan_qual_event_t *)event_data;
4903 if ((cca_event->id == WL_CHAN_QUAL_FULLPM_CCA) ||
4904 (cca_event->id == WL_CHAN_QUAL_FULLPM_CCA_OFDM_DESENSE)) {
4905 const cca_only_chan_qual_event_t *cca_only_event =
4906 (const cca_only_chan_qual_event_t *)cca_event;
4907 BCM_REFERENCE(cca_only_event);
4908 DHD_EVENT((
4909 "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
4910 " channel 0x%02x\n",
4911 event_name, event_type, eabuf, (int)status,
4912 (int)reason, (int)auth_type, cca_event->chanspec));
4913 DHD_EVENT((
4914 "\tTOTAL (dur %dms me %dms notme %dms interf %dms"
4915 " ts 0x%08x)\n",
4916 cca_only_event->cca_busy_ext.duration,
4917 cca_only_event->cca_busy_ext.congest_ibss,
4918 cca_only_event->cca_busy_ext.congest_obss,
4919 cca_only_event->cca_busy_ext.interference,
4920 cca_only_event->cca_busy_ext.timestamp));
4921 DHD_EVENT((
4922 "\t !PM (dur %dms me %dms notme %dms interf %dms)\n",
4923 cca_only_event->cca_busy_nopm.duration,
4924 cca_only_event->cca_busy_nopm.congest_ibss,
4925 cca_only_event->cca_busy_nopm.congest_obss,
4926 cca_only_event->cca_busy_nopm.interference));
4927 DHD_EVENT((
4928 "\t PM (dur %dms me %dms notme %dms interf %dms)\n",
4929 cca_only_event->cca_busy_pm.duration,
4930 cca_only_event->cca_busy_pm.congest_ibss,
4931 cca_only_event->cca_busy_pm.congest_obss,
4932 cca_only_event->cca_busy_pm.interference));
4933 if (cca_event->id == WL_CHAN_QUAL_FULLPM_CCA_OFDM_DESENSE) {
4934 DHD_EVENT(("\t OFDM desense %d\n",
4935 ((const cca_only_chan_qual_event_v2_t *)
4936 cca_only_event)->ofdm_desense));
4937 }
4938 } else if (cca_event->id == WL_CHAN_QUAL_FULL_CCA) {
4939 DHD_EVENT((
4940 "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
4941 " channel 0x%02x (dur %dms ibss %dms obss %dms interf %dms"
4942 " ts 0x%08x)\n",
4943 event_name, event_type, eabuf, (int)status,
4944 (int)reason, (int)auth_type, cca_event->chanspec,
4945 cca_event->cca_busy_ext.duration,
4946 cca_event->cca_busy_ext.congest_ibss,
4947 cca_event->cca_busy_ext.congest_obss,
4948 cca_event->cca_busy_ext.interference,
4949 cca_event->cca_busy_ext.timestamp));
4950 } else if (cca_event->id == WL_CHAN_QUAL_CCA) {
4951 DHD_EVENT((
4952 "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
4953 " channel 0x%02x (dur %dms busy %dms ts 0x%08x)\n",
4954 event_name, event_type, eabuf, (int)status,
4955 (int)reason, (int)auth_type, cca_event->chanspec,
4956 cca_event->cca_busy.duration,
4957 cca_event->cca_busy.congest,
4958 cca_event->cca_busy.timestamp));
4959 } else if ((cca_event->id == WL_CHAN_QUAL_NF) ||
4960 (cca_event->id == WL_CHAN_QUAL_NF_LTE)) {
4961 DHD_EVENT((
4962 "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
4963 " channel 0x%02x (NF[%d] %ddB)\n",
4964 event_name, event_type, eabuf, (int)status,
4965 (int)reason, (int)auth_type, cca_event->chanspec,
4966 cca_event->id, cca_event->noise));
4967 } else {
4968 DHD_EVENT((
4969 "MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d,"
4970 " channel 0x%02x (unknown ID %d)\n",
4971 event_name, event_type, eabuf, (int)status,
4972 (int)reason, (int)auth_type, cca_event->chanspec,
4973 cca_event->id));
4974 }
4975 }
4976 break;
4977 case WLC_E_ESCAN_RESULT:
4978 if (datalen >= sizeof(wl_escan_result_v2_t)) {
4979 const wl_escan_result_v2_t *escan_result =
4980 (wl_escan_result_v2_t *)event_data;
4981 BCM_REFERENCE(escan_result);
4982 #ifdef OEM_ANDROID
4983 /* Because WLC_E_ESCAN_RESULT event log are being print too many.
4984 * So, DHD_EVENT() changes to be used DHD_TRACE() in HW4 platform.
4985 */
4986 DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d \n",
4987 event_name, event_type, eabuf, (int)status));
4988 #else
4989 DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d sync-id %u\n",
4990 event_name, event_type, eabuf,
4991 (int)status, dtoh16(escan_result->sync_id)));
4992 #endif /* CUSTOMER_HW4 */
4993 #ifdef REPORT_FATAL_TIMEOUTS
4994 /* a 'partial' status means the escan is still in progress
4995 * any other status implies the escan has either finished or aborted
4996 */
4997 if (status != WLC_E_STATUS_PARTIAL) {
4998 unsigned long timeout_flags = 0;
4999 uint16 syncid = dtoh16(escan_result->sync_id);
5000 /* this is to take care of the specific case where
5001 * escan event returns abort and is processed immediately
5002 * by dhd before the escan iovar has returned. In that case
5003 * if the iovar returns success, then we will be starting a
5004 * timeout even though the escan has already been aborted !
5005 * So the flag below is checked before starting the escan timeout
5006 */
5007 if (dhd_pub->timeout_info) {
5008 DHD_TIMER_LOCK(dhd_pub->timeout_info->scan_timer_lock,
5009 timeout_flags);
5010 if (!dhd_pub->timeout_info->scan_timer_active &&
5011 syncid == dhd_pub->esync_id) {
5012 dhd_pub->timeout_info->escan_aborted = TRUE;
5013 dhd_pub->timeout_info->abort_syncid = syncid;
5014 DHD_TIMER_UNLOCK(
5015 dhd_pub->timeout_info->scan_timer_lock,
5016 timeout_flags);
5017 break;
5018 } else {
5019 dhd_pub->timeout_info->escan_aborted = FALSE;
5020 }
5021 DHD_TIMER_UNLOCK(dhd_pub->timeout_info->scan_timer_lock,
5022 timeout_flags);
5023 }
5024 dhd_stop_scan_timer(dhd_pub, TRUE, dtoh16(escan_result->sync_id));
5025 }
5026 #endif /* REPORT_FATAL_TIMEOUTS */
5027 }
5028 break;
5029 case WLC_E_IF:
5030 if (datalen >= sizeof(struct wl_event_data_if)) {
5031 const struct wl_event_data_if *ifevent =
5032 (struct wl_event_data_if *)event_data;
5033 BCM_REFERENCE(ifevent);
5034
5035 DHD_EVENT(("MACEVENT: %s, opcode:0x%d ifidx:%d role:%d\n",
5036 event_name, ifevent->opcode, ifevent->ifidx, ifevent->role));
5037 }
5038 break;
5039 #ifdef SHOW_LOGTRACE
5040 case WLC_E_MSCH:
5041 {
5042 wl_mschdbg_event_handler(dhd_pub, raw_event_ptr, reason, event_data, datalen);
5043 break;
5044 }
5045 #endif /* SHOW_LOGTRACE */
5046
5047 case WLC_E_PSK_AUTH:
5048 DHD_EVENT(("MACEVENT: %s, RA %s status %d Reason:%d\n",
5049 event_name, eabuf, status, reason));
5050 break;
5051 case WLC_E_AGGR_EVENT:
5052 if (datalen >= sizeof(event_aggr_data_t)) {
5053 const event_aggr_data_t *aggrbuf = event_data;
5054 int j = 0, len = 0;
5055 const uint8 *data = aggrbuf->data;
5056 DHD_EVENT(("MACEVENT: %s, num of events %d total len %d sub events: ",
5057 event_name, aggrbuf->num_events, aggrbuf->len));
5058 for (j = 0; j < aggrbuf->num_events; j++)
5059 {
5060 const wl_event_msg_t * sub_event = (const wl_event_msg_t *)data;
5061 if (len > aggrbuf->len) {
5062 DHD_ERROR(("%s: Aggr events corrupted!",
5063 __FUNCTION__));
5064 break;
5065 }
5066 DHD_EVENT(("\n Event type: %d ", ntoh32(sub_event->event_type)));
5067 len += ALIGN_SIZE((ntoh32(sub_event->datalen) +
5068 sizeof(wl_event_msg_t)), sizeof(uint64));
5069 buf = (const uchar *)(data + sizeof(wl_event_msg_t));
5070 BCM_REFERENCE(buf);
5071 DHD_EVENT((" data (%d) : ", ntoh32(sub_event->datalen)));
5072 for (i = 0; i < ntoh32(sub_event->datalen); i++) {
5073 DHD_EVENT((" 0x%02x ", buf[i]));
5074 }
5075 data = aggrbuf->data + len;
5076 }
5077 DHD_EVENT(("\n"));
5078 }
5079 break;
5080 case WLC_E_PHY_CAL:
5081 {
5082 DHD_EVENT(("MACEVENT: %s, reason:%d\n", event_name, reason));
5083 break;
5084 }
5085 case WLC_E_NAN_CRITICAL:
5086 {
5087 DHD_EVENT(("MACEVENT: %s, type:%d\n", event_name, reason));
5088 break;
5089 }
5090 case WLC_E_NAN_NON_CRITICAL:
5091 {
5092 DHD_TRACE(("MACEVENT: %s, type:%d\n", event_name, reason));
5093 break;
5094 }
5095 case WLC_E_PROXD:
5096 if (datalen >= sizeof(wl_proxd_event_t)) {
5097 const wl_proxd_event_t *proxd =
5098 (wl_proxd_event_t*)event_data;
5099 DHD_LOG_MEM(("MACEVENT: %s, event:%d, status:%d\n",
5100 event_name, proxd->type, reason));
5101 }
5102 break;
5103 case WLC_E_RPSNOA:
5104 if (datalen >= sizeof(rpsnoa_stats_t)) {
5105 const rpsnoa_stats_t *stat = event_data;
5106 if (datalen == sizeof(*stat)) {
5107 DHD_EVENT(("MACEVENT: %s, band %s, status %d, pps %d\n", event_name,
5108 (stat->band == WLC_BAND_2G) ? "2G":"5G",
5109 stat->state, stat->last_pps));
5110 }
5111 }
5112 break;
5113 case WLC_E_WA_LQM:
5114 if (datalen >= sizeof(wl_event_wa_lqm_t)) {
5115 const wl_event_wa_lqm_t *event_wa_lqm =
5116 (wl_event_wa_lqm_t *)event_data;
5117 const bcm_xtlv_t *subevent;
5118 const wl_event_wa_lqm_basic_t *elqm_basic;
5119
5120 if ((event_wa_lqm->ver != WL_EVENT_WA_LQM_VER) ||
5121 (event_wa_lqm->len < sizeof(wl_event_wa_lqm_t) + BCM_XTLV_HDR_SIZE)) {
5122 DHD_ERROR(("MACEVENT: %s invalid (ver=%d len=%d)\n",
5123 event_name, event_wa_lqm->ver, event_wa_lqm->len));
5124 break;
5125 }
5126
5127 subevent = (const bcm_xtlv_t *)event_wa_lqm->subevent;
5128 if ((subevent->id != WL_EVENT_WA_LQM_BASIC) ||
5129 (subevent->len < sizeof(wl_event_wa_lqm_basic_t))) {
5130 DHD_ERROR(("MACEVENT: %s invalid sub-type (id=%d len=%d)\n",
5131 event_name, subevent->id, subevent->len));
5132 break;
5133 }
5134
5135 elqm_basic = (const wl_event_wa_lqm_basic_t *)subevent->data;
5136 BCM_REFERENCE(elqm_basic);
5137 DHD_EVENT(("MACEVENT: %s (RSSI=%d SNR=%d TxRate=%d RxRate=%d)\n",
5138 event_name, elqm_basic->rssi, elqm_basic->snr,
5139 elqm_basic->tx_rate, elqm_basic->rx_rate));
5140 }
5141 break;
5142
5143 case WLC_E_OBSS_DETECTION:
5144 {
5145 DHD_EVENT(("MACEVENT: %s, type:%d\n", event_name, reason));
5146 break;
5147 }
5148
5149 case WLC_E_AP_BCN_MUTE:
5150 if (datalen >= sizeof(wlc_bcn_mute_miti_event_data_v1_t)) {
5151 const wlc_bcn_mute_miti_event_data_v1_t
5152 *bcn_mute_miti_evnt_data = event_data;
5153 DHD_EVENT(("MACEVENT: %s, reason :%d uatbtt_count: %d\n",
5154 event_name, reason, bcn_mute_miti_evnt_data->uatbtt_count));
5155 }
5156 break;
5157 #ifdef WL_TWT
5158 case WLC_E_TWT:
5159 DHD_EVENT(("MACEVENT: %s, type:%d\n", event_name, reason));
5160 break;
5161 #endif /* WL_TWT */
5162 default:
5163 DHD_EVENT(("MACEVENT: %s %d, MAC %s, status %d, reason %d, auth %d\n",
5164 event_name, event_type, eabuf, (int)status, (int)reason,
5165 (int)auth_type));
5166 break;
5167 }
5168
5169 /* show any appended data if message level is set to bytes or host_data is set */
5170 if ((DHD_BYTES_ON() || (host_data == TRUE)) && DHD_EVENT_ON() && datalen) {
5171 buf = (uchar *) event_data;
5172 BCM_REFERENCE(buf);
5173 DHD_EVENT((" data (%d) : ", datalen));
5174 for (i = 0; i < datalen; i++) {
5175 DHD_EVENT((" 0x%02x ", buf[i]));
5176 }
5177 DHD_EVENT(("\n"));
5178 }
5179 } /* wl_show_host_event */
5180 #endif /* SHOW_EVENTS */
5181
5182 #ifdef DNGL_EVENT_SUPPORT
5183 /* Check whether packet is a BRCM dngl event pkt. If it is, process event data. */
5184 int
dngl_host_event(dhd_pub_t * dhdp,void * pktdata,bcm_dngl_event_msg_t * dngl_event,size_t pktlen)5185 dngl_host_event(dhd_pub_t *dhdp, void *pktdata, bcm_dngl_event_msg_t *dngl_event, size_t pktlen)
5186 {
5187 bcm_dngl_event_t *pvt_data = (bcm_dngl_event_t *)pktdata;
5188
5189 dngl_host_event_process(dhdp, pvt_data, dngl_event, pktlen);
5190 return BCME_OK;
5191 }
5192
5193 #ifdef PARSE_DONGLE_HOST_EVENT
5194 typedef struct hck_id_to_str_s {
5195 uint32 id;
5196 char *name;
5197 } hck_id_to_str_t;
5198
5199 hck_id_to_str_t hck_sw_id_to_str[] = {
5200 {WL_HC_DD_PCIE, "WL_HC_DD_PCIE"},
5201 {WL_HC_DD_RX_DMA_STALL, "WL_HC_DD_RX_DMA_STALL"},
5202 {WL_HC_DD_RX_STALL, "WL_HC_DD_RX_STALL"},
5203 {WL_HC_DD_TX_STALL, "WL_HC_DD_TX_STALL"},
5204 {WL_HC_DD_SCAN_STALL, "WL_HC_DD_SCAN_STALL"},
5205 {WL_HC_DD_PHY, "WL_HC_DD_PHY"},
5206 {WL_HC_DD_REINIT, "WL_HC_DD_REINIT"},
5207 {WL_HC_DD_TXQ_STALL, "WL_HC_DD_TXQ_STALL"},
5208 {0, NULL}
5209 };
5210
5211 hck_id_to_str_t hck_pcie_module_to_str[] = {
5212 {HEALTH_CHECK_PCIEDEV_INDUCED_IND, "PCIEDEV_INDUCED_IND"},
5213 {HEALTH_CHECK_PCIEDEV_H2D_DMA_IND, "PCIEDEV_H2D_DMA_IND"},
5214 {HEALTH_CHECK_PCIEDEV_D2H_DMA_IND, "PCIEDEV_D2H_DMA_IND"},
5215 {HEALTH_CHECK_PCIEDEV_IOCTL_STALL_IND, "PCIEDEV_IOCTL_STALL_IND"},
5216 {HEALTH_CHECK_PCIEDEV_D3ACK_STALL_IND, "PCIEDEV_D3ACK_STALL_IND"},
5217 {HEALTH_CHECK_PCIEDEV_NODS_IND, "PCIEDEV_NODS_IND"},
5218 {HEALTH_CHECK_PCIEDEV_LINKSPEED_FALLBACK_IND, "PCIEDEV_LINKSPEED_FALLBACK_IND"},
5219 {HEALTH_CHECK_PCIEDEV_DSACK_STALL_IND, "PCIEDEV_DSACK_STALL_IND"},
5220 {0, NULL}
5221 };
5222
5223 hck_id_to_str_t hck_rx_stall_v2_to_str[] = {
5224 {BCM_RX_HC_RESERVED, "BCM_RX_HC_RESERVED"},
5225 {BCM_RX_HC_UNSPECIFIED, "BCM_RX_HC_UNSPECIFIED"},
5226 {BCM_RX_HC_UNICAST_DECRYPT_FAIL, "BCM_RX_HC_UNICAST_DECRYPT_FAIL"},
5227 {BCM_RX_HC_BCMC_DECRYPT_FAIL, "BCM_RX_HC_BCMC_DECRYPT_FAIL"},
5228 {BCM_RX_HC_UNICAST_REPLAY, "BCM_RX_HC_UNICAST_REPLAY"},
5229 {BCM_RX_HC_BCMC_REPLAY, "BCM_RX_HC_BCMC_REPLAY"},
5230 {BCM_RX_HC_AMPDU_DUP, "BCM_RX_HC_AMPDU_DUP"},
5231 {0, NULL}
5232 };
5233
5234 static void
dhd_print_dongle_hck_id(uint32 id,hck_id_to_str_t * hck)5235 dhd_print_dongle_hck_id(uint32 id, hck_id_to_str_t *hck)
5236 {
5237 while (hck->name != NULL) {
5238 if (hck->id == id) {
5239 DHD_ERROR(("DONGLE_HCK_EVENT: %s\n", hck->name));
5240 return;
5241 }
5242 hck++;
5243 }
5244 }
5245
5246 void
dhd_parse_hck_common_sw_event(bcm_xtlv_t * wl_hc)5247 dhd_parse_hck_common_sw_event(bcm_xtlv_t *wl_hc)
5248 {
5249
5250 wl_rx_hc_info_v2_t *hck_rx_stall_v2;
5251 uint16 id;
5252
5253 id = ltoh16(wl_hc->id);
5254
5255 if (id == WL_HC_DD_RX_STALL_V2) {
5256 /* map the hck_rx_stall_v2 structure to the value of the XTLV */
5257 hck_rx_stall_v2 =
5258 (wl_rx_hc_info_v2_t*)wl_hc;
5259 DHD_ERROR(("type:%d len:%d if_idx:%d ac:%d pkts:%d"
5260 " drop:%d alert_th:%d reason:%d peer_ea:"MACF"\n",
5261 hck_rx_stall_v2->type,
5262 hck_rx_stall_v2->length,
5263 hck_rx_stall_v2->if_idx,
5264 hck_rx_stall_v2->ac,
5265 hck_rx_stall_v2->rx_hc_pkts,
5266 hck_rx_stall_v2->rx_hc_dropped_all,
5267 hck_rx_stall_v2->rx_hc_alert_th,
5268 hck_rx_stall_v2->reason,
5269 ETHER_TO_MACF(hck_rx_stall_v2->peer_ea)));
5270 dhd_print_dongle_hck_id(
5271 ltoh32(hck_rx_stall_v2->reason),
5272 hck_rx_stall_v2_to_str);
5273 } else {
5274 dhd_print_dongle_hck_id(ltoh16(wl_hc->id),
5275 hck_sw_id_to_str);
5276 }
5277
5278 }
5279
5280 #endif /* PARSE_DONGLE_HOST_EVENT */
5281
5282 void
dngl_host_event_process(dhd_pub_t * dhdp,bcm_dngl_event_t * event,bcm_dngl_event_msg_t * dngl_event,size_t pktlen)5283 dngl_host_event_process(dhd_pub_t *dhdp, bcm_dngl_event_t *event,
5284 bcm_dngl_event_msg_t *dngl_event, size_t pktlen)
5285 {
5286 uint8 *p = (uint8 *)(event + 1);
5287 uint16 type = ntoh16_ua((void *)&dngl_event->event_type);
5288 uint16 datalen = ntoh16_ua((void *)&dngl_event->datalen);
5289 uint16 version = ntoh16_ua((void *)&dngl_event->version);
5290
5291 DHD_EVENT(("VERSION:%d, EVENT TYPE:%d, DATALEN:%d\n", version, type, datalen));
5292 if (datalen > (pktlen - sizeof(bcm_dngl_event_t) + ETHER_TYPE_LEN)) {
5293 return;
5294 }
5295 if (version != BCM_DNGL_EVENT_MSG_VERSION) {
5296 DHD_ERROR(("%s:version mismatch:%d:%d\n", __FUNCTION__,
5297 version, BCM_DNGL_EVENT_MSG_VERSION));
5298 return;
5299 }
5300 switch (type) {
5301 case DNGL_E_SOCRAM_IND:
5302 {
5303 bcm_dngl_socramind_t *socramind_ptr = (bcm_dngl_socramind_t *)p;
5304 uint16 tag = ltoh32(socramind_ptr->tag);
5305 uint16 taglen = ltoh32(socramind_ptr->length);
5306 p = (uint8 *)socramind_ptr->value;
5307 DHD_EVENT(("Tag:%d Len:%d Datalen:%d\n", tag, taglen, datalen));
5308 switch (tag) {
5309 case SOCRAM_IND_ASSERT_TAG:
5310 {
5311 /*
5312 * The payload consists of -
5313 * null terminated function name padded till 32 bit boundary +
5314 * Line number - (32 bits)
5315 * Caller address (32 bits)
5316 */
5317 char *fnname = (char *)p;
5318 if (datalen < (ROUNDUP(strlen(fnname) + 1, sizeof(uint32)) +
5319 sizeof(uint32) * 2)) {
5320 DHD_ERROR(("Wrong length:%d\n", datalen));
5321 return;
5322 }
5323 DHD_EVENT(("ASSRT Function:%s ", p));
5324 p += ROUNDUP(strlen(p) + 1, sizeof(uint32));
5325 DHD_EVENT(("Line:%d ", *(uint32 *)p));
5326 p += sizeof(uint32);
5327 DHD_EVENT(("Caller Addr:0x%x\n", *(uint32 *)p));
5328 #ifdef PARSE_DONGLE_HOST_EVENT
5329 DHD_ERROR(("DONGLE_HCK_EVENT: SOCRAM_IND_ASSERT_TAG\n"));
5330 #endif /* PARSE_DONGLE_HOST_EVENT */
5331 break;
5332 }
5333 case SOCRAM_IND_TAG_HEALTH_CHECK:
5334 {
5335 bcm_dngl_healthcheck_t *dngl_hc = (bcm_dngl_healthcheck_t *)p;
5336 DHD_EVENT(("SOCRAM_IND_HEALTHCHECK_TAG:%d Len:%d datalen:%d\n",
5337 ltoh32(dngl_hc->top_module_tag),
5338 ltoh32(dngl_hc->top_module_len),
5339 datalen));
5340 if (DHD_EVENT_ON()) {
5341 prhex("HEALTHCHECK", p, MIN(ltoh32(dngl_hc->top_module_len)
5342 + BCM_XTLV_HDR_SIZE, datalen));
5343 }
5344 #ifdef DHD_LOG_DUMP
5345 memset(dhdp->health_chk_event_data, 0, HEALTH_CHK_BUF_SIZE);
5346 memcpy(dhdp->health_chk_event_data, p,
5347 MIN(ltoh32(dngl_hc->top_module_len),
5348 HEALTH_CHK_BUF_SIZE));
5349 #endif /* DHD_LOG_DUMP */
5350 p = (uint8 *)dngl_hc->value;
5351
5352 switch (ltoh32(dngl_hc->top_module_tag)) {
5353 case HEALTH_CHECK_TOP_LEVEL_MODULE_PCIEDEV_RTE:
5354 {
5355 bcm_dngl_pcie_hc_t *pcie_hc;
5356 pcie_hc = (bcm_dngl_pcie_hc_t *)p;
5357 BCM_REFERENCE(pcie_hc);
5358 if (ltoh32(dngl_hc->top_module_len) <
5359 sizeof(bcm_dngl_pcie_hc_t)) {
5360 DHD_ERROR(("Wrong length:%d\n",
5361 ltoh32(dngl_hc->top_module_len)));
5362 return;
5363 }
5364 DHD_EVENT(("%d:PCIE HC error:%d flag:0x%x,"
5365 " control:0x%x\n",
5366 ltoh32(pcie_hc->version),
5367 ltoh32(pcie_hc->pcie_err_ind_type),
5368 ltoh32(pcie_hc->pcie_flag),
5369 ltoh32(pcie_hc->pcie_control_reg)));
5370 #ifdef PARSE_DONGLE_HOST_EVENT
5371 dhd_print_dongle_hck_id(
5372 ltoh32(pcie_hc->pcie_err_ind_type),
5373 hck_pcie_module_to_str);
5374 #endif /* PARSE_DONGLE_HOST_EVENT */
5375 break;
5376 }
5377 #ifdef HCHK_COMMON_SW_EVENT
5378 case HCHK_SW_ENTITY_WL_PRIMARY:
5379 case HCHK_SW_ENTITY_WL_SECONDARY:
5380 {
5381 bcm_xtlv_t *wl_hc = (bcm_xtlv_t*)p;
5382
5383 if (ltoh32(dngl_hc->top_module_len) <
5384 sizeof(bcm_xtlv_t)) {
5385 DHD_ERROR(("WL SW HC Wrong length:%d\n",
5386 ltoh32(dngl_hc->top_module_len)));
5387 return;
5388 }
5389 BCM_REFERENCE(wl_hc);
5390 DHD_EVENT(("WL SW HC type %d len %d\n",
5391 ltoh16(wl_hc->id), ltoh16(wl_hc->len)));
5392
5393 #ifdef PARSE_DONGLE_HOST_EVENT
5394 dhd_parse_hck_common_sw_event(wl_hc);
5395 #endif /* PARSE_DONGLE_HOST_EVENT */
5396 break;
5397
5398 }
5399 #endif /* HCHK_COMMON_SW_EVENT */
5400 default:
5401 {
5402 DHD_ERROR(("%s:Unknown module TAG:%d\n",
5403 __FUNCTION__,
5404 ltoh32(dngl_hc->top_module_tag)));
5405 break;
5406 }
5407 }
5408 break;
5409 }
5410 default:
5411 DHD_ERROR(("%s:Unknown TAG\n", __FUNCTION__));
5412 if (p && DHD_EVENT_ON()) {
5413 prhex("SOCRAMIND", p, taglen);
5414 }
5415 break;
5416 }
5417 break;
5418 }
5419 default:
5420 DHD_ERROR(("%s:Unknown DNGL Event Type:%d\n", __FUNCTION__, type));
5421 if (p && DHD_EVENT_ON()) {
5422 prhex("SOCRAMIND", p, datalen);
5423 }
5424 break;
5425 }
5426 #ifndef BCMDBUS
5427 #ifdef DHD_FW_COREDUMP
5428 if (dhdp->memdump_enabled) {
5429 dhdp->memdump_type = DUMP_TYPE_DONGLE_HOST_EVENT;
5430 if (
5431 #ifdef GDB_PROXY
5432 !dhdp->gdb_proxy_active &&
5433 #endif /* GDB_PROXY */
5434 dhd_schedule_socram_dump(dhdp)) {
5435 DHD_ERROR(("%s: socram dump failed\n", __FUNCTION__));
5436 }
5437 }
5438 #else
5439 dhd_dbg_send_urgent_evt(dhdp, p, datalen);
5440 #endif /* DHD_FW_COREDUMP */
5441 #endif /* !BCMDBUS */
5442 }
5443
5444 #endif /* DNGL_EVENT_SUPPORT */
5445
5446 /* Stub for now. Will become real function as soon as shim
5447 * is being integrated to Android, Linux etc.
5448 */
5449 #if !defined(NDIS)
5450 int
wl_event_process_default(wl_event_msg_t * event,struct wl_evt_pport * evt_pport)5451 wl_event_process_default(wl_event_msg_t *event, struct wl_evt_pport *evt_pport)
5452 {
5453 return BCME_OK;
5454 }
5455 #endif
5456
5457 int
wl_event_process(dhd_pub_t * dhd_pub,int * ifidx,void * pktdata,uint pktlen,void ** data_ptr,void * raw_event)5458 wl_event_process(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata,
5459 uint pktlen, void **data_ptr, void *raw_event)
5460 {
5461 wl_evt_pport_t evt_pport;
5462 wl_event_msg_t event;
5463 bcm_event_msg_u_t evu;
5464 int ret;
5465
5466 /* make sure it is a BRCM event pkt and record event data */
5467 ret = wl_host_event_get_data(pktdata, pktlen, &evu);
5468 if (ret != BCME_OK) {
5469 return ret;
5470 }
5471
5472 memcpy(&event, &evu.event, sizeof(wl_event_msg_t));
5473
5474 /* convert event from network order to host order */
5475 wl_event_to_host_order(&event);
5476
5477 /* record event params to evt_pport */
5478 evt_pport.dhd_pub = dhd_pub;
5479 evt_pport.ifidx = ifidx;
5480 evt_pport.pktdata = pktdata;
5481 evt_pport.data_ptr = data_ptr;
5482 evt_pport.raw_event = raw_event;
5483 evt_pport.data_len = pktlen;
5484
5485 #if defined(WL_WLC_SHIM) && defined(WL_WLC_SHIM_EVENTS)
5486 {
5487 struct wl_shim_node *shim = dhd_pub_shim(dhd_pub);
5488 if (shim) {
5489 ret = wl_shim_event_process(shim, &event, &evt_pport);
5490 } else {
5491 /* events can come even before shim is initialized
5492 (when waiting for "wlc_ver" response)
5493 * handle them in a non-shim way.
5494 */
5495 DHD_ERROR(("%s: Events coming before shim initialization!\n",
5496 __FUNCTION__));
5497 ret = wl_event_process_default(&event, &evt_pport);
5498 }
5499 }
5500 #else
5501 ret = wl_event_process_default(&event, &evt_pport);
5502 #endif /* WL_WLC_SHIM && WL_WLC_SHIM_EVENTS */
5503
5504 return ret;
5505 } /* wl_event_process */
5506
5507 /* Check whether packet is a BRCM event pkt. If it is, record event data. */
5508 int
wl_host_event_get_data(void * pktdata,uint pktlen,bcm_event_msg_u_t * evu)5509 wl_host_event_get_data(void *pktdata, uint pktlen, bcm_event_msg_u_t *evu)
5510 {
5511 int ret;
5512
5513 ret = is_wlc_event_frame(pktdata, pktlen, 0, evu);
5514 if (ret != BCME_OK) {
5515 DHD_ERROR(("%s: Invalid event frame, err = %d\n",
5516 __FUNCTION__, ret));
5517 }
5518
5519 return ret;
5520 }
5521
5522 int
wl_process_host_event(dhd_pub_t * dhd_pub,int * ifidx,void * pktdata,uint pktlen,wl_event_msg_t * event,void ** data_ptr,void * raw_event)5523 wl_process_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen,
5524 wl_event_msg_t *event, void **data_ptr, void *raw_event)
5525 {
5526 bcm_event_t *pvt_data = (bcm_event_t *)pktdata;
5527 bcm_event_msg_u_t evu;
5528 uint8 *event_data;
5529 uint32 type, status, datalen, reason;
5530 uint16 flags;
5531 uint evlen;
5532 int ret;
5533 uint16 usr_subtype;
5534 #if defined(__linux__)
5535 dhd_if_t *ifp = NULL;
5536 BCM_REFERENCE(ifp);
5537 #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
5538
5539 ret = wl_host_event_get_data(pktdata, pktlen, &evu);
5540 if (ret != BCME_OK) {
5541 return ret;
5542 }
5543
5544 usr_subtype = ntoh16_ua((void *)&pvt_data->bcm_hdr.usr_subtype);
5545 switch (usr_subtype) {
5546 case BCMILCP_BCM_SUBTYPE_EVENT:
5547 memcpy(event, &evu.event, sizeof(wl_event_msg_t));
5548 *data_ptr = &pvt_data[1];
5549 break;
5550 case BCMILCP_BCM_SUBTYPE_DNGLEVENT:
5551 #ifdef DNGL_EVENT_SUPPORT
5552 /* If it is a DNGL event process it first */
5553 if (dngl_host_event(dhd_pub, pktdata, &evu.dngl_event, pktlen) == BCME_OK) {
5554 /*
5555 * Return error purposely to prevent DNGL event being processed
5556 * as BRCM event
5557 */
5558 return BCME_ERROR;
5559 }
5560 #endif /* DNGL_EVENT_SUPPORT */
5561 return BCME_NOTFOUND;
5562 default:
5563 return BCME_NOTFOUND;
5564 }
5565
5566 /* start wl_event_msg process */
5567 event_data = *data_ptr;
5568 type = ntoh32_ua((void *)&event->event_type);
5569 flags = ntoh16_ua((void *)&event->flags);
5570 status = ntoh32_ua((void *)&event->status);
5571 reason = ntoh32_ua((void *)&event->reason);
5572 datalen = ntoh32_ua((void *)&event->datalen);
5573 evlen = datalen + sizeof(bcm_event_t);
5574
5575 switch (type) {
5576 #ifdef PROP_TXSTATUS
5577 case WLC_E_FIFO_CREDIT_MAP:
5578 dhd_wlfc_enable(dhd_pub);
5579 dhd_wlfc_FIFOcreditmap_event(dhd_pub, event_data);
5580 WLFC_DBGMESG(("WLC_E_FIFO_CREDIT_MAP:(AC0,AC1,AC2,AC3),(BC_MC),(OTHER): "
5581 "(%d,%d,%d,%d),(%d),(%d)\n", event_data[0], event_data[1],
5582 event_data[2],
5583 event_data[3], event_data[4], event_data[5]));
5584 break;
5585
5586 case WLC_E_BCMC_CREDIT_SUPPORT:
5587 dhd_wlfc_BCMCCredit_support_event(dhd_pub);
5588 break;
5589 #ifdef LIMIT_BORROW
5590 case WLC_E_ALLOW_CREDIT_BORROW:
5591 dhd_wlfc_disable_credit_borrow_event(dhd_pub, event_data);
5592 break;
5593 #endif /* LIMIT_BORROW */
5594 #endif /* PROP_TXSTATUS */
5595
5596 case WLC_E_ULP:
5597 break;
5598 case WLC_E_TDLS_PEER_EVENT:
5599 #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
5600 {
5601 dhd_tdls_event_handler(dhd_pub, event);
5602 }
5603 #endif
5604 break;
5605
5606 case WLC_E_IF:
5607 {
5608 struct wl_event_data_if *ifevent = (struct wl_event_data_if *)event_data;
5609
5610 /* Ignore the event if NOIF is set */
5611 if (ifevent->reserved & WLC_E_IF_FLAGS_BSSCFG_NOIF) {
5612 DHD_ERROR(("WLC_E_IF: NO_IF set, event Ignored\r\n"));
5613 return (BCME_UNSUPPORTED);
5614 }
5615 #ifdef PCIE_FULL_DONGLE
5616 dhd_update_interface_flow_info(dhd_pub, ifevent->ifidx,
5617 ifevent->opcode, ifevent->role);
5618 #endif
5619 #ifdef PROP_TXSTATUS
5620 {
5621 uint8* ea = pvt_data->eth.ether_dhost;
5622 WLFC_DBGMESG(("WLC_E_IF: idx:%d, action:%s, iftype:%s, ["MACDBG"]\n"
5623 ifevent->ifidx,
5624 ((ifevent->opcode == WLC_E_IF_ADD) ? "ADD":"DEL"),
5625 ((ifevent->role == 0) ? "STA":"AP "),
5626 MAC2STRDBG(ea)));
5627 (void)ea;
5628
5629 if (ifevent->opcode == WLC_E_IF_CHANGE)
5630 dhd_wlfc_interface_event(dhd_pub,
5631 eWLFC_MAC_ENTRY_ACTION_UPDATE,
5632 ifevent->ifidx, ifevent->role, ea);
5633 else
5634 dhd_wlfc_interface_event(dhd_pub,
5635 ((ifevent->opcode == WLC_E_IF_ADD) ?
5636 eWLFC_MAC_ENTRY_ACTION_ADD : eWLFC_MAC_ENTRY_ACTION_DEL),
5637 ifevent->ifidx, ifevent->role, ea);
5638
5639 /* dhd already has created an interface by default, for 0 */
5640 if (ifevent->ifidx == 0)
5641 break;
5642 }
5643 #endif /* PROP_TXSTATUS */
5644
5645 if (ifevent->ifidx > 0 && ifevent->ifidx < DHD_MAX_IFS) {
5646 if (ifevent->opcode == WLC_E_IF_ADD) {
5647 if (dhd_event_ifadd(dhd_pub->info, ifevent, event->ifname,
5648 event->addr.octet)) {
5649
5650 DHD_ERROR(("%s: dhd_event_ifadd failed ifidx: %d %s\n",
5651 __FUNCTION__, ifevent->ifidx, event->ifname));
5652 return (BCME_ERROR);
5653 }
5654 } else if (ifevent->opcode == WLC_E_IF_DEL) {
5655 #ifdef PCIE_FULL_DONGLE
5656 dhd_flow_rings_delete(dhd_pub,
5657 (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname));
5658 #endif /* PCIE_FULL_DONGLE */
5659 dhd_event_ifdel(dhd_pub->info, ifevent, event->ifname,
5660 event->addr.octet);
5661 } else if (ifevent->opcode == WLC_E_IF_CHANGE) {
5662 #ifdef WL_CFG80211
5663 dhd_event_ifchange(dhd_pub->info, ifevent, event->ifname,
5664 event->addr.octet);
5665 #endif /* WL_CFG80211 */
5666 }
5667 } else {
5668 #if !defined(PROP_TXSTATUS) && !defined(PCIE_FULL_DONGLE) && defined(WL_CFG80211)
5669 DHD_INFO(("%s: Invalid ifidx %d for %s\n",
5670 __FUNCTION__, ifevent->ifidx, event->ifname));
5671 #endif /* !PROP_TXSTATUS && !PCIE_FULL_DONGLE && WL_CFG80211 */
5672 }
5673 /* send up the if event: btamp user needs it */
5674 *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
5675 /* push up to external supp/auth */
5676 dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
5677 break;
5678 }
5679
5680 case WLC_E_NDIS_LINK:
5681 break;
5682 case WLC_E_PFN_NET_FOUND:
5683 case WLC_E_PFN_SCAN_ALLGONE: /* share with WLC_E_PFN_BSSID_NET_LOST */
5684 case WLC_E_PFN_NET_LOST:
5685 break;
5686 #if defined(OEM_ANDROID) && defined(PNO_SUPPORT)
5687 case WLC_E_PFN_BSSID_NET_FOUND:
5688 case WLC_E_PFN_BEST_BATCHING:
5689 dhd_pno_event_handler(dhd_pub, event, (void *)event_data);
5690 break;
5691 #endif /* #if defined(OEM_ANDROID) && defined(PNO_SUPPORT) */
5692 #if defined(RTT_SUPPORT)
5693 case WLC_E_PROXD:
5694 #ifndef WL_CFG80211
5695 dhd_rtt_event_handler(dhd_pub, event, (void *)event_data);
5696 #endif /* WL_CFG80211 */
5697 break;
5698 #endif /* RTT_SUPPORT */
5699 /* These are what external supplicant/authenticator wants */
5700 case WLC_E_ASSOC_IND:
5701 case WLC_E_AUTH_IND:
5702 case WLC_E_REASSOC_IND:
5703 dhd_findadd_sta(dhd_pub,
5704 dhd_ifname2idx(dhd_pub->info, event->ifname),
5705 &event->addr.octet);
5706 break;
5707 #if !defined(BCMDBUS) && defined(DHD_FW_COREDUMP)
5708 case WLC_E_PSM_WATCHDOG:
5709 DHD_ERROR(("%s: WLC_E_PSM_WATCHDOG event received : \n", __FUNCTION__));
5710 if (dhd_socram_dump(dhd_pub->bus) != BCME_OK) {
5711 DHD_ERROR(("%s: socram dump ERROR : \n", __FUNCTION__));
5712 }
5713 break;
5714 #endif
5715 #ifdef DHD_WMF
5716 case WLC_E_PSTA_PRIMARY_INTF_IND:
5717 dhd_update_psta_interface_for_sta(dhd_pub, event->ifname,
5718 (void *)(event->addr.octet), (void*) event_data);
5719 break;
5720 #endif
5721 #ifdef BCM_ROUTER_DHD
5722 case WLC_E_DPSTA_INTF_IND:
5723 dhd_update_dpsta_interface_for_sta(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
5724 event->ifname), (void*) event_data);
5725 break;
5726 #endif /* BCM_ROUTER_DHD */
5727 #ifdef BCMDBG
5728 case WLC_E_MACDBG:
5729 dhd_macdbg_event_handler(dhd_pub, reason, event_data, datalen);
5730 break;
5731 #endif /* BCMDBG */
5732 case WLC_E_NATOE_NFCT:
5733 #ifdef WL_NATOE
5734 DHD_EVENT(("%s: WLC_E_NATOE_NFCT event received \n", __FUNCTION__));
5735 dhd_natoe_ct_event(dhd_pub, event_data);
5736 #endif /* WL_NATOE */
5737 break;
5738 case WLC_E_SLOTTED_BSS_PEER_OP:
5739 DHD_EVENT(("%s: WLC_E_SLOTTED_BSS_PEER_OP event received for peer: "
5740 "" MACDBG ", status = %d\n",
5741 __FUNCTION__, MAC2STRDBG(event->addr.octet), status));
5742 if (status == WLC_E_STATUS_SLOTTED_PEER_ADD) {
5743 dhd_findadd_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
5744 event->ifname), &event->addr.octet);
5745 } else if (status == WLC_E_STATUS_SLOTTED_PEER_DEL) {
5746 uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname);
5747 BCM_REFERENCE(ifindex);
5748 dhd_del_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
5749 event->ifname), &event->addr.octet);
5750 #ifdef PCIE_FULL_DONGLE
5751 dhd_flow_rings_delete_for_peer(dhd_pub, ifindex,
5752 (char *)&event->addr.octet[0]);
5753 #endif
5754 } else {
5755 DHD_ERROR(("%s: WLC_E_SLOTTED_BSS_PEER_OP: Status is not expected = %d\n",
5756 __FUNCTION__, status));
5757 }
5758 break;
5759 #ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
5760 case WLC_E_REASSOC:
5761 ifp = dhd_get_ifp(dhd_pub, event->ifidx);
5762
5763 if (!ifp)
5764 break;
5765
5766 /* Consider STA role only since roam is disabled on P2P GC.
5767 * Drop EAPOL M1 frame only if roam is done to same BSS.
5768 */
5769 if ((status == WLC_E_STATUS_SUCCESS) &&
5770 IS_STA_IFACE(ndev_to_wdev(ifp->net)) &&
5771 wl_cfg80211_is_event_from_connected_bssid(ifp->net, event, event->ifidx)) {
5772 ifp->recv_reassoc_evt = TRUE;
5773 }
5774 break;
5775 #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
5776 #if defined(CSI_SUPPORT)
5777 case WLC_E_CSI:
5778 dhd_csi_event_handler(dhd_pub, event, (void *)event_data);
5779 break;
5780 #endif /* CSI_SUPPORT */
5781 case WLC_E_LINK:
5782 #ifdef PCIE_FULL_DONGLE
5783 if (dhd_update_interface_link_status(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
5784 event->ifname), (uint8)flags) != BCME_OK) {
5785 DHD_ERROR(("%s: dhd_update_interface_link_status Failed.\n",
5786 __FUNCTION__));
5787 break;
5788 }
5789 if (!flags) {
5790 DHD_ERROR(("%s: Deleting all STA from assoc list and flowrings.\n",
5791 __FUNCTION__));
5792 /* Delete all sta and flowrings */
5793 dhd_del_all_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info, event->ifname));
5794 dhd_flow_rings_delete(dhd_pub, (uint8)dhd_ifname2idx(dhd_pub->info,
5795 event->ifname));
5796 }
5797 /* fall through */
5798 #endif /* PCIE_FULL_DONGLE */
5799 case WLC_E_DEAUTH:
5800 case WLC_E_DEAUTH_IND:
5801 case WLC_E_DISASSOC:
5802 case WLC_E_DISASSOC_IND:
5803 #ifdef PCIE_FULL_DONGLE
5804 if (type != WLC_E_LINK) {
5805 uint8 ifindex = (uint8)dhd_ifname2idx(dhd_pub->info, event->ifname);
5806 uint8 role = dhd_flow_rings_ifindex2role(dhd_pub, ifindex);
5807 uint8 del_sta = TRUE;
5808 #ifdef WL_CFG80211
5809 if (role == WLC_E_IF_ROLE_STA &&
5810 !wl_cfg80211_is_roam_offload(dhd_idx2net(dhd_pub, ifindex)) &&
5811 !wl_cfg80211_is_event_from_connected_bssid(
5812 dhd_idx2net(dhd_pub, ifindex), event, *ifidx)) {
5813 del_sta = FALSE;
5814 }
5815 #endif /* WL_CFG80211 */
5816 DHD_EVENT(("%s: Link event %d, flags %x, status %x, role %d, del_sta %d\n",
5817 __FUNCTION__, type, flags, status, role, del_sta));
5818
5819 if (del_sta) {
5820 DHD_EVENT(("%s: Deleting STA " MACDBG "\n",
5821 __FUNCTION__, MAC2STRDBG(event->addr.octet)));
5822
5823 dhd_del_sta(dhd_pub, dhd_ifname2idx(dhd_pub->info,
5824 event->ifname), &event->addr.octet);
5825 /* Delete all flowrings for STA and P2P Client */
5826 if (role == WLC_E_IF_ROLE_STA || role == WLC_E_IF_ROLE_P2P_CLIENT) {
5827 dhd_flow_rings_delete(dhd_pub, ifindex);
5828 } else {
5829 dhd_flow_rings_delete_for_peer(dhd_pub, ifindex,
5830 (char *)&event->addr.octet[0]);
5831 }
5832 }
5833 }
5834 #endif /* PCIE_FULL_DONGLE */
5835 #ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
5836 /* fall through */
5837 ifp = dhd_get_ifp(dhd_pub, event->ifidx);
5838 if (ifp) {
5839 ifp->recv_reassoc_evt = FALSE;
5840 ifp->post_roam_evt = FALSE;
5841 }
5842 #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
5843 /* fall through */
5844 default:
5845 *ifidx = dhd_ifname2idx(dhd_pub->info, event->ifname);
5846 #ifdef DHD_UPDATE_INTF_MAC
5847 if ((WLC_E_LINK==type)&&(WLC_EVENT_MSG_LINK&flags)) {
5848 dhd_event_ifchange(dhd_pub->info,
5849 (struct wl_event_data_if *)event,
5850 event->ifname,
5851 event->addr.octet);
5852 }
5853 #endif /* DHD_UPDATE_INTF_MAC */
5854 /* push up to external supp/auth */
5855 dhd_event(dhd_pub->info, (char *)pvt_data, evlen, *ifidx);
5856 DHD_TRACE(("%s: MAC event %d, flags %x, status %x\n",
5857 __FUNCTION__, type, flags, status));
5858 BCM_REFERENCE(flags);
5859 BCM_REFERENCE(status);
5860 BCM_REFERENCE(reason);
5861
5862 break;
5863 }
5864 #if defined(BCM_ROUTER_DHD) || defined(STBAP)
5865 /* For routers, EAPD will be working on these events.
5866 * Overwrite interface name to that event is pushed
5867 * to host with its registered interface name
5868 */
5869 memcpy(pvt_data->event.ifname, dhd_ifname(dhd_pub, *ifidx), IFNAMSIZ);
5870 #endif
5871
5872 #ifdef DHD_STATUS_LOGGING
5873 if (dhd_pub->statlog) {
5874 dhd_statlog_process_event(dhd_pub, type, *ifidx,
5875 status, reason, flags);
5876 }
5877 #endif /* DHD_STATUS_LOGGING */
5878
5879 #ifdef SHOW_EVENTS
5880 if (DHD_FWLOG_ON() || DHD_EVENT_ON()) {
5881 wl_show_host_event(dhd_pub, event,
5882 (void *)event_data, raw_event, dhd_pub->enable_log);
5883 }
5884 #endif /* SHOW_EVENTS */
5885
5886 return (BCME_OK);
5887 } /* wl_process_host_event */
5888
5889 int
wl_host_event(dhd_pub_t * dhd_pub,int * ifidx,void * pktdata,uint pktlen,wl_event_msg_t * event,void ** data_ptr,void * raw_event)5890 wl_host_event(dhd_pub_t *dhd_pub, int *ifidx, void *pktdata, uint pktlen,
5891 wl_event_msg_t *event, void **data_ptr, void *raw_event)
5892 {
5893 return wl_process_host_event(dhd_pub, ifidx, pktdata, pktlen, event, data_ptr,
5894 raw_event);
5895 }
5896
5897 void
dhd_print_buf(void * pbuf,int len,int bytes_per_line)5898 dhd_print_buf(void *pbuf, int len, int bytes_per_line)
5899 {
5900 #ifdef DHD_DEBUG
5901 int i, j = 0;
5902 unsigned char *buf = pbuf;
5903
5904 if (bytes_per_line == 0) {
5905 bytes_per_line = len;
5906 }
5907
5908 for (i = 0; i < len; i++) {
5909 printf("%2.2x", *buf++);
5910 j++;
5911 if (j == bytes_per_line) {
5912 printf("\n");
5913 j = 0;
5914 } else {
5915 printf(":");
5916 }
5917 }
5918 printf("\n");
5919 #endif /* DHD_DEBUG */
5920 }
5921 #ifndef strtoul
5922 #define strtoul(nptr, endptr, base) bcm_strtoul((nptr), (endptr), (base))
5923 #endif
5924
5925 /* Convert user's input in hex pattern to byte-size mask */
5926 int
wl_pattern_atoh(char * src,char * dst)5927 wl_pattern_atoh(char *src, char *dst)
5928 {
5929 int i;
5930 if (strncmp(src, "0x", 2) != 0 &&
5931 strncmp(src, "0X", 2) != 0) {
5932 DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
5933 return -1;
5934 }
5935 src = src + 2; /* Skip past 0x */
5936 if (strlen(src) % 2 != 0) {
5937 DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
5938 return -1;
5939 }
5940 for (i = 0; *src != '\0'; i++) {
5941 char num[3];
5942 bcm_strncpy_s(num, sizeof(num), src, 2);
5943 num[2] = '\0';
5944 dst[i] = (uint8)strtoul(num, NULL, 16);
5945 src += 2;
5946 }
5947 return i;
5948 }
5949
5950 #if defined(PKT_FILTER_SUPPORT) || defined(DHD_PKT_LOGGING)
5951 int
pattern_atoh_len(char * src,char * dst,int len)5952 pattern_atoh_len(char *src, char *dst, int len)
5953 {
5954 int i;
5955 if (strncmp(src, "0x", HD_PREFIX_SIZE) != 0 &&
5956 strncmp(src, "0X", HD_PREFIX_SIZE) != 0) {
5957 DHD_ERROR(("Mask invalid format. Needs to start with 0x\n"));
5958 return -1;
5959 }
5960 src = src + HD_PREFIX_SIZE; /* Skip past 0x */
5961 if (strlen(src) % HD_BYTE_SIZE != 0) {
5962 DHD_ERROR(("Mask invalid format. Needs to be of even length\n"));
5963 return -1;
5964 }
5965 for (i = 0; *src != '\0'; i++) {
5966 char num[HD_BYTE_SIZE + 1];
5967
5968 if (i > len - 1) {
5969 DHD_ERROR(("pattern not in range, idx: %d len: %d\n", i, len));
5970 return -1;
5971 }
5972 bcm_strncpy_s(num, sizeof(num), src, HD_BYTE_SIZE);
5973 num[HD_BYTE_SIZE] = '\0';
5974 dst[i] = (uint8)strtoul(num, NULL, 16);
5975 src += HD_BYTE_SIZE;
5976 }
5977 return i;
5978 }
5979 #endif /* PKT_FILTER_SUPPORT || DHD_PKT_LOGGING */
5980
5981 #ifdef PKT_FILTER_SUPPORT
5982 void
dhd_pktfilter_offload_enable(dhd_pub_t * dhd,char * arg,int enable,int master_mode)5983 dhd_pktfilter_offload_enable(dhd_pub_t * dhd, char *arg, int enable, int master_mode)
5984 {
5985 char *argv[8];
5986 int i = 0;
5987 const char *str;
5988 int buf_len;
5989 int str_len;
5990 char *arg_save = 0, *arg_org = 0;
5991 int rc;
5992 char buf[32] = {0};
5993 wl_pkt_filter_enable_t enable_parm;
5994 wl_pkt_filter_enable_t * pkt_filterp;
5995
5996 if (!arg)
5997 return;
5998
5999 if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
6000 DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
6001 goto fail;
6002 }
6003 arg_org = arg_save;
6004 memcpy(arg_save, arg, strlen(arg) + 1);
6005
6006 argv[i] = bcmstrtok(&arg_save, " ", 0);
6007
6008 i = 0;
6009 if (argv[i] == NULL) {
6010 DHD_ERROR(("No args provided\n"));
6011 goto fail;
6012 }
6013
6014 str = "pkt_filter_enable";
6015 str_len = strlen(str);
6016 bcm_strncpy_s(buf, sizeof(buf) - 1, str, sizeof(buf) - 1);
6017 buf[ sizeof(buf) - 1 ] = '\0';
6018 buf_len = str_len + 1;
6019
6020 pkt_filterp = (wl_pkt_filter_enable_t *)(buf + str_len + 1);
6021
6022 /* Parse packet filter id. */
6023 enable_parm.id = htod32(strtoul(argv[i], NULL, 0));
6024 if (dhd_conf_del_pkt_filter(dhd, enable_parm.id))
6025 goto fail;
6026
6027 /* Parse enable/disable value. */
6028 enable_parm.enable = htod32(enable);
6029
6030 buf_len += sizeof(enable_parm);
6031 memcpy((char *)pkt_filterp,
6032 &enable_parm,
6033 sizeof(enable_parm));
6034
6035 /* Enable/disable the specified filter. */
6036 rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
6037 rc = rc >= 0 ? 0 : rc;
6038 if (rc) {
6039 DHD_ERROR(("%s: failed to %s pktfilter %s, retcode = %d\n",
6040 __FUNCTION__, enable?"enable":"disable", arg, rc));
6041 dhd_set_packet_filter(dhd);
6042 rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
6043 rc = rc >= 0 ? 0 : rc;
6044 if (rc) {
6045 DHD_TRACE_HW4(("%s: 2nd retry failed to add pktfilter %s, retcode = %d\n",
6046 __FUNCTION__, arg, rc));
6047 } else {
6048 DHD_TRACE_HW4(("%s: 2nd retry successfully added pktfilter %s\n",
6049 __FUNCTION__, arg));
6050 }
6051 }
6052 else
6053 DHD_TRACE(("%s: successfully %s pktfilter %s\n",
6054 __FUNCTION__, enable?"enable":"disable", arg));
6055
6056 /* Contorl the master mode */
6057 rc = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_mode",
6058 master_mode, WLC_SET_VAR, TRUE, 0);
6059 rc = rc >= 0 ? 0 : rc;
6060 if (rc)
6061 DHD_TRACE(("%s: failed to set pkt_filter_mode %d, retcode = %d\n",
6062 __FUNCTION__, master_mode, rc));
6063
6064 fail:
6065 if (arg_org)
6066 MFREE(dhd->osh, arg_org, strlen(arg) + 1);
6067 }
6068
6069 /* Packet filter section: extended filters have named offsets, add table here */
6070 typedef struct {
6071 char *name;
6072 uint16 base;
6073 } wl_pfbase_t;
6074
6075 static wl_pfbase_t basenames[] = { WL_PKT_FILTER_BASE_NAMES };
6076
6077 static int
wl_pkt_filter_base_parse(char * name)6078 wl_pkt_filter_base_parse(char *name)
6079 {
6080 uint i;
6081 char *bname, *uname;
6082
6083 for (i = 0; i < ARRAYSIZE(basenames); i++) {
6084 bname = basenames[i].name;
6085 for (uname = name; *uname; bname++, uname++) {
6086 if (*bname != bcm_toupper(*uname)) {
6087 break;
6088 }
6089 }
6090 if (!*uname && !*bname) {
6091 break;
6092 }
6093 }
6094
6095 if (i < ARRAYSIZE(basenames)) {
6096 return basenames[i].base;
6097 } else {
6098 return -1;
6099 }
6100 }
6101
6102 void
dhd_pktfilter_offload_set(dhd_pub_t * dhd,char * arg)6103 dhd_pktfilter_offload_set(dhd_pub_t * dhd, char *arg)
6104 {
6105 const char *str;
6106 wl_pkt_filter_t pkt_filter;
6107 wl_pkt_filter_t *pkt_filterp;
6108 int buf_len;
6109 int str_len;
6110 int rc = -1;
6111 uint32 mask_size;
6112 uint32 pattern_size;
6113 char *argv[MAXPKT_ARG] = {0}, * buf = 0;
6114 int i = 0;
6115 char *arg_save = 0, *arg_org = 0;
6116
6117 if (!arg)
6118 return;
6119
6120 if (!(arg_save = MALLOC(dhd->osh, strlen(arg) + 1))) {
6121 DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
6122 goto fail;
6123 }
6124
6125 arg_org = arg_save;
6126
6127 if (!(buf = MALLOC(dhd->osh, MAX_PKTFLT_BUF_SIZE))) {
6128 DHD_ERROR(("%s: malloc failed\n", __FUNCTION__));
6129 goto fail;
6130 }
6131
6132 memset(buf, 0, MAX_PKTFLT_BUF_SIZE);
6133 memcpy(arg_save, arg, strlen(arg) + 1);
6134
6135 if (strlen(arg) > MAX_PKTFLT_BUF_SIZE) {
6136 DHD_ERROR(("Not enough buffer %d < %d\n", (int)strlen(arg), (int)sizeof(buf)));
6137 goto fail;
6138 }
6139
6140 argv[i] = bcmstrtok(&arg_save, " ", 0);
6141 while (argv[i++]) {
6142 if (i >= MAXPKT_ARG) {
6143 DHD_ERROR(("Invalid args provided\n"));
6144 goto fail;
6145 }
6146 argv[i] = bcmstrtok(&arg_save, " ", 0);
6147 }
6148
6149 i = 0;
6150 if (argv[i] == NULL) {
6151 DHD_ERROR(("No args provided\n"));
6152 goto fail;
6153 }
6154
6155 str = "pkt_filter_add";
6156 str_len = strlen(str);
6157 bcm_strncpy_s(buf, MAX_PKTFLT_BUF_SIZE, str, str_len);
6158 buf[ str_len ] = '\0';
6159 buf_len = str_len + 1;
6160
6161 pkt_filterp = (wl_pkt_filter_t *) (buf + str_len + 1);
6162
6163 /* Parse packet filter id. */
6164 pkt_filter.id = htod32(strtoul(argv[i], NULL, 0));
6165
6166 if (argv[++i] == NULL) {
6167 DHD_ERROR(("Polarity not provided\n"));
6168 goto fail;
6169 }
6170
6171 /* Parse filter polarity. */
6172 pkt_filter.negate_match = htod32(strtoul(argv[i], NULL, 0));
6173
6174 if (argv[++i] == NULL) {
6175 DHD_ERROR(("Filter type not provided\n"));
6176 goto fail;
6177 }
6178
6179 /* Parse filter type. */
6180 pkt_filter.type = htod32(strtoul(argv[i], NULL, 0));
6181
6182 if ((pkt_filter.type == 0) || (pkt_filter.type == 1)) {
6183 if (argv[++i] == NULL) {
6184 DHD_ERROR(("Offset not provided\n"));
6185 goto fail;
6186 }
6187
6188 /* Parse pattern filter offset. */
6189 pkt_filter.u.pattern.offset = htod32(strtoul(argv[i], NULL, 0));
6190
6191 if (argv[++i] == NULL) {
6192 DHD_ERROR(("Bitmask not provided\n"));
6193 goto fail;
6194 }
6195
6196 /* Parse pattern filter mask. */
6197 rc = wl_pattern_atoh(argv[i],
6198 (char *) pkt_filterp->u.pattern.mask_and_pattern);
6199
6200 if (rc == -1) {
6201 DHD_ERROR(("Rejecting: %s\n", argv[i]));
6202 goto fail;
6203 }
6204 mask_size = htod32(rc);
6205 if (argv[++i] == NULL) {
6206 DHD_ERROR(("Pattern not provided\n"));
6207 goto fail;
6208 }
6209
6210 /* Parse pattern filter pattern. */
6211 rc = wl_pattern_atoh(argv[i],
6212 (char *) &pkt_filterp->u.pattern.mask_and_pattern[rc]);
6213
6214 if (rc == -1) {
6215 DHD_ERROR(("Rejecting: %s\n", argv[i]));
6216 goto fail;
6217 }
6218 pattern_size = htod32(rc);
6219 if (mask_size != pattern_size) {
6220 DHD_ERROR(("Mask and pattern not the same size\n"));
6221 goto fail;
6222 }
6223
6224 pkt_filter.u.pattern.size_bytes = mask_size;
6225 buf_len += WL_PKT_FILTER_FIXED_LEN;
6226 buf_len += (WL_PKT_FILTER_PATTERN_FIXED_LEN + 2 * rc);
6227
6228 /* Keep-alive attributes are set in local variable (keep_alive_pkt), and
6229 * then memcpy'ed into buffer (keep_alive_pktp) since there is no
6230 * guarantee that the buffer is properly aligned.
6231 */
6232 memcpy((char *)pkt_filterp,
6233 &pkt_filter,
6234 WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_FIXED_LEN);
6235 } else if ((pkt_filter.type == 2) || (pkt_filter.type == 6)) {
6236 int list_cnt = 0;
6237 char *endptr = NULL;
6238 wl_pkt_filter_pattern_listel_t *pf_el =
6239 (wl_pkt_filter_pattern_listel_t *)&pkt_filterp->u.patlist.patterns[0];
6240
6241 while (argv[++i] != NULL) {
6242 /* Check valid buffer size. */
6243 if ((buf_len + MAX_PKTFLT_FIXED_BUF_SIZE) > MAX_PKTFLT_BUF_SIZE) {
6244 DHD_ERROR(("buffer over length MAX_PKTFLT_FIXED_BUF_SIZE\n"));
6245 goto fail;
6246 }
6247
6248 /* Parse pattern filter base and offset. */
6249 if (bcm_isdigit(*argv[i])) {
6250 /* Numeric base */
6251 rc = strtoul(argv[i], &endptr, 0);
6252 } else {
6253 endptr = strchr(argv[i], ':');
6254 if (endptr) {
6255 *endptr = '\0';
6256 rc = wl_pkt_filter_base_parse(argv[i]);
6257 if (rc == -1) {
6258 printf("Invalid base %s\n", argv[i]);
6259 goto fail;
6260 }
6261 *endptr = ':';
6262 }
6263 }
6264
6265 if (endptr == NULL) {
6266 printf("Invalid [base:]offset format: %s\n", argv[i]);
6267 goto fail;
6268 }
6269
6270 if (*endptr == ':') {
6271 pf_el->base_offs = htod16(rc);
6272 rc = strtoul(endptr + 1, &endptr, 0);
6273 } else {
6274 /* Must have had a numeric offset only */
6275 pf_el->base_offs = htod16(0);
6276 }
6277
6278 if (*endptr) {
6279 printf("Invalid [base:]offset format: %s\n", argv[i]);
6280 goto fail;
6281 }
6282 if (rc > 0x0000FFFF) {
6283 printf("Offset too large\n");
6284 goto fail;
6285 }
6286 pf_el->rel_offs = htod16(rc);
6287
6288 /* Clear match_flag (may be set in parsing which follows) */
6289 pf_el->match_flags = htod16(0);
6290
6291 /* Parse pattern filter mask and pattern directly into ioctl buffer */
6292 if (argv[++i] == NULL) {
6293 printf("Bitmask not provided\n");
6294 goto fail;
6295 }
6296 rc = wl_pattern_atoh(argv[i], (char*)pf_el->mask_and_data);
6297 if ((rc == -1) || (rc > MAX_PKTFLT_FIXED_PATTERN_SIZE)) {
6298 printf("Rejecting: %s\n", argv[i]);
6299 goto fail;
6300 }
6301 mask_size = htod16(rc);
6302
6303 if (argv[++i] == NULL) {
6304 printf("Pattern not provided\n");
6305 goto fail;
6306 }
6307
6308 endptr = argv[i];
6309 if (*endptr == '!') {
6310 pf_el->match_flags =
6311 htod16(WL_PKT_FILTER_MFLAG_NEG);
6312 if (*(++endptr) == '\0') {
6313 printf("Pattern not provided\n");
6314 goto fail;
6315 }
6316 }
6317 rc = wl_pattern_atoh(endptr, (char*)&pf_el->mask_and_data[rc]);
6318 if ((rc == -1) || (rc > MAX_PKTFLT_FIXED_PATTERN_SIZE)) {
6319 printf("Rejecting: %s\n", argv[i]);
6320 goto fail;
6321 }
6322 pattern_size = htod16(rc);
6323
6324 if (mask_size != pattern_size) {
6325 printf("Mask and pattern not the same size\n");
6326 goto fail;
6327 }
6328
6329 pf_el->size_bytes = mask_size;
6330
6331 /* Account for the size of this pattern element */
6332 buf_len += WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN + 2 * rc;
6333
6334 /* Move to next element location in ioctl buffer */
6335 pf_el = (wl_pkt_filter_pattern_listel_t*)
6336 ((uint8*)pf_el + WL_PKT_FILTER_PATTERN_LISTEL_FIXED_LEN + 2 * rc);
6337
6338 /* Count list element */
6339 list_cnt++;
6340 }
6341
6342 /* Account for initial fixed size, and copy initial fixed fields */
6343 buf_len += WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN;
6344
6345 if (buf_len > MAX_PKTFLT_BUF_SIZE) {
6346 DHD_ERROR(("buffer over length MAX_PKTFLT_BUF_SIZE\n"));
6347 goto fail;
6348 }
6349
6350 /* Update list count and total size */
6351 pkt_filter.u.patlist.list_cnt = list_cnt;
6352 pkt_filter.u.patlist.PAD1[0] = 0;
6353 pkt_filter.u.patlist.totsize = buf + buf_len - (char*)pkt_filterp;
6354 pkt_filter.u.patlist.totsize -= WL_PKT_FILTER_FIXED_LEN;
6355
6356 memcpy((char *)pkt_filterp, &pkt_filter,
6357 WL_PKT_FILTER_FIXED_LEN + WL_PKT_FILTER_PATTERN_LIST_FIXED_LEN);
6358 } else {
6359 DHD_ERROR(("Invalid filter type %d\n", pkt_filter.type));
6360 goto fail;
6361 }
6362
6363 rc = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
6364 rc = rc >= 0 ? 0 : rc;
6365
6366 if (rc)
6367 DHD_ERROR(("%s: failed to add pktfilter %s, retcode = %d\n",
6368 __FUNCTION__, arg, rc));
6369 else
6370 DHD_TRACE(("%s: successfully added pktfilter %s\n",
6371 __FUNCTION__, arg));
6372
6373 fail:
6374 if (arg_org)
6375 MFREE(dhd->osh, arg_org, strlen(arg) + 1);
6376
6377 if (buf)
6378 MFREE(dhd->osh, buf, MAX_PKTFLT_BUF_SIZE);
6379 }
6380
6381 void
dhd_pktfilter_offload_delete(dhd_pub_t * dhd,int id)6382 dhd_pktfilter_offload_delete(dhd_pub_t *dhd, int id)
6383 {
6384 int ret;
6385
6386 ret = dhd_wl_ioctl_set_intiovar(dhd, "pkt_filter_delete",
6387 id, WLC_SET_VAR, TRUE, 0);
6388 if (ret < 0) {
6389 DHD_ERROR(("%s: Failed to delete filter ID:%d, ret=%d\n",
6390 __FUNCTION__, id, ret));
6391 }
6392 else
6393 DHD_TRACE(("%s: successfully deleted pktfilter %d\n",
6394 __FUNCTION__, id));
6395 }
6396 #endif /* PKT_FILTER_SUPPORT */
6397
6398 /* ========================== */
6399 /* ==== ARP OFFLOAD SUPPORT = */
6400 /* ========================== */
6401 #ifdef ARP_OFFLOAD_SUPPORT
6402 void
dhd_arp_offload_set(dhd_pub_t * dhd,int arp_mode)6403 dhd_arp_offload_set(dhd_pub_t * dhd, int arp_mode)
6404 {
6405 int retcode;
6406
6407 retcode = dhd_wl_ioctl_set_intiovar(dhd, "arp_ol",
6408 arp_mode, WLC_SET_VAR, TRUE, 0);
6409
6410 retcode = retcode >= 0 ? 0 : retcode;
6411 if (retcode) {
6412 DHD_ERROR(("%s: failed to set ARP offload mode to 0x%x, retcode = %d\n",
6413 __FUNCTION__, arp_mode, retcode));
6414 } else {
6415 DHD_ARPOE(("%s: successfully set ARP offload mode to 0x%x\n",
6416 __FUNCTION__, arp_mode));
6417 dhd->arpol_configured = TRUE;
6418 }
6419 }
6420
6421 void
dhd_arp_offload_enable(dhd_pub_t * dhd,int arp_enable)6422 dhd_arp_offload_enable(dhd_pub_t * dhd, int arp_enable)
6423 {
6424 int retcode;
6425
6426 if (!dhd->arpol_configured) {
6427 /* If arpol is not applied, apply it */
6428 dhd_arp_offload_set(dhd, dhd_arp_mode);
6429 }
6430
6431 retcode = dhd_wl_ioctl_set_intiovar(dhd, "arpoe",
6432 arp_enable, WLC_SET_VAR, TRUE, 0);
6433 retcode = retcode >= 0 ? 0 : retcode;
6434 if (retcode)
6435 DHD_ERROR(("%s: failed to enabe ARP offload to %d, retcode = %d\n",
6436 __FUNCTION__, arp_enable, retcode));
6437 else
6438 #ifdef DHD_LOG_DUMP
6439 DHD_LOG_MEM(("%s: successfully enabed ARP offload to %d\n",
6440 __FUNCTION__, arp_enable));
6441 #else
6442 DHD_ARPOE(("%s: successfully enabed ARP offload to %d\n",
6443 __FUNCTION__, arp_enable));
6444 #endif /* DHD_LOG_DUMP */
6445 if (arp_enable) {
6446 uint32 version;
6447 retcode = dhd_wl_ioctl_get_intiovar(dhd, "arp_version",
6448 &version, WLC_GET_VAR, FALSE, 0);
6449 if (retcode) {
6450 DHD_INFO(("%s: fail to get version (maybe version 1:retcode = %d\n",
6451 __FUNCTION__, retcode));
6452 dhd->arp_version = 1;
6453 }
6454 else {
6455 DHD_INFO(("%s: ARP Version= %x\n", __FUNCTION__, version));
6456 dhd->arp_version = version;
6457 }
6458 }
6459 }
6460
6461 /* XXX ANDREY: clear AOE arp_table */
6462 void
dhd_aoe_arp_clr(dhd_pub_t * dhd,int idx)6463 dhd_aoe_arp_clr(dhd_pub_t *dhd, int idx)
6464 {
6465 int ret = 0;
6466
6467 if (dhd == NULL) return;
6468 if (dhd->arp_version == 1)
6469 idx = 0;
6470
6471 ret = dhd_iovar(dhd, idx, "arp_table_clear", NULL, 0, NULL, 0, TRUE);
6472 if (ret < 0)
6473 DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
6474 else {
6475 #ifdef DHD_LOG_DUMP
6476 DHD_LOG_MEM(("%s: ARP table clear\n", __FUNCTION__));
6477 #else
6478 DHD_TRACE(("%s: ARP table clear\n", __FUNCTION__));
6479 #endif /* DHD_LOG_DUMP */
6480 }
6481 /* mac address isn't cleared here but it will be cleared after dongle off */
6482 dhd->hmac_updated = 0;
6483 }
6484
6485 /* XXX ANDREY: clear hostip table */
6486 void
dhd_aoe_hostip_clr(dhd_pub_t * dhd,int idx)6487 dhd_aoe_hostip_clr(dhd_pub_t *dhd, int idx)
6488 {
6489 int ret = 0;
6490
6491 if (dhd == NULL) return;
6492 if (dhd->arp_version == 1)
6493 idx = 0;
6494
6495 ret = dhd_iovar(dhd, idx, "arp_hostip_clear", NULL, 0, NULL, 0, TRUE);
6496 if (ret < 0)
6497 DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
6498 else {
6499 #ifdef DHD_LOG_DUMP
6500 DHD_LOG_MEM(("%s: ARP host ip clear\n", __FUNCTION__));
6501 #else
6502 DHD_TRACE(("%s: ARP host ip clear\n", __FUNCTION__));
6503 #endif /* DHD_LOG_DUMP */
6504 }
6505 }
6506
6507 void
dhd_arp_offload_add_ip(dhd_pub_t * dhd,uint32 ipaddr,int idx)6508 dhd_arp_offload_add_ip(dhd_pub_t *dhd, uint32 ipaddr, int idx)
6509 {
6510 int ret;
6511
6512 if (dhd == NULL) return;
6513 if (dhd->arp_version == 1)
6514 idx = 0;
6515
6516 ret = dhd_iovar(dhd, idx, "arp_hostip", (char *)&ipaddr, sizeof(ipaddr),
6517 NULL, 0, TRUE);
6518 if (ret < 0)
6519 DHD_ERROR(("%s: ARP ip addr add failed, ret = %d\n", __FUNCTION__, ret));
6520 else {
6521 /* mac address is updated in the dongle */
6522 dhd->hmac_updated = 1;
6523 #ifdef DHD_LOG_DUMP
6524 DHD_LOG_MEM(("%s: ARP ip addr entry added \n", __FUNCTION__));
6525 #else
6526 DHD_ARPOE(("%s: ARP ip addr entry added \n", __FUNCTION__));
6527 #endif /* DHD_LOG_DUMP */
6528 }
6529 }
6530
6531 int
dhd_arp_get_arp_hostip_table(dhd_pub_t * dhd,void * buf,int buflen,int idx)6532 dhd_arp_get_arp_hostip_table(dhd_pub_t *dhd, void *buf, int buflen, int idx)
6533 {
6534 int ret, i;
6535 uint32 *ptr32 = buf;
6536 bool clr_bottom = FALSE;
6537
6538 if (!buf)
6539 return -1;
6540 if (dhd == NULL) return -1;
6541 if (dhd->arp_version == 1)
6542 idx = 0;
6543
6544 ret = dhd_iovar(dhd, idx, "arp_hostip", NULL, 0, (char *)buf, buflen,
6545 FALSE);
6546 if (ret) {
6547 DHD_ERROR(("%s: ioctl WLC_GET_VAR error %d\n",
6548 __FUNCTION__, ret));
6549
6550 return -1;
6551 }
6552
6553 /* clean up the buf, ascii reminder */
6554 for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
6555 if (!clr_bottom) {
6556 if (*ptr32 == 0)
6557 clr_bottom = TRUE;
6558 } else {
6559 *ptr32 = 0;
6560 }
6561 ptr32++;
6562 }
6563
6564 return 0;
6565 }
6566 #endif /* ARP_OFFLOAD_SUPPORT */
6567
6568 /*
6569 * Neighbor Discovery Offload: enable NDO feature
6570 * Called by ipv6 event handler when interface comes up/goes down
6571 */
6572 int
dhd_ndo_enable(dhd_pub_t * dhd,int ndo_enable)6573 dhd_ndo_enable(dhd_pub_t * dhd, int ndo_enable)
6574 {
6575 int retcode;
6576
6577 if (dhd == NULL)
6578 return -1;
6579
6580 #if defined(WL_CFG80211) && defined(WL_NAN)
6581 if (wl_cfgnan_is_dp_active(dhd_linux_get_primary_netdev(dhd))) {
6582 /* If nan dp is active, skip NDO */
6583 DHD_INFO(("Active NAN DP, skip NDO\n"));
6584 return 0;
6585 }
6586 #endif /* WL_CFG80211 && WL_NAN */
6587 #ifdef WL_CFG80211
6588 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
6589 /* NDO disable on STA+SOFTAP mode */
6590 ndo_enable = FALSE;
6591 }
6592 #endif /* WL_CFG80211 */
6593 retcode = dhd_wl_ioctl_set_intiovar(dhd, "ndoe",
6594 ndo_enable, WLC_SET_VAR, TRUE, 0);
6595 if (retcode)
6596 DHD_ERROR(("%s: failed to enabe ndo to %d, retcode = %d\n",
6597 __FUNCTION__, ndo_enable, retcode));
6598 else
6599 DHD_TRACE(("%s: successfully enabed ndo offload to %d\n",
6600 __FUNCTION__, ndo_enable));
6601
6602 return retcode;
6603 }
6604
6605 /*
6606 * Neighbor Discover Offload: enable NDO feature
6607 * Called by ipv6 event handler when interface comes up
6608 */
6609 int
dhd_ndo_add_ip(dhd_pub_t * dhd,char * ipv6addr,int idx)6610 dhd_ndo_add_ip(dhd_pub_t *dhd, char* ipv6addr, int idx)
6611 {
6612 int iov_len = 0;
6613 char iovbuf[DHD_IOVAR_BUF_SIZE];
6614 int retcode;
6615
6616 if (dhd == NULL)
6617 return -1;
6618
6619 iov_len = bcm_mkiovar("nd_hostip", (char *)ipv6addr,
6620 IPV6_ADDR_LEN, iovbuf, sizeof(iovbuf));
6621 if (!iov_len) {
6622 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
6623 __FUNCTION__, sizeof(iovbuf)));
6624 return -1;
6625 }
6626 retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
6627
6628 if (retcode)
6629 DHD_ERROR(("%s: ndo ip addr add failed, retcode = %d\n",
6630 __FUNCTION__, retcode));
6631 else
6632 DHD_TRACE(("%s: ndo ipaddr entry added \n",
6633 __FUNCTION__));
6634
6635 return retcode;
6636 }
6637
6638 /*
6639 * Neighbor Discover Offload: enable NDO feature
6640 * Called by ipv6 event handler when interface goes down
6641 */
6642 int
dhd_ndo_remove_ip(dhd_pub_t * dhd,int idx)6643 dhd_ndo_remove_ip(dhd_pub_t *dhd, int idx)
6644 {
6645 int iov_len = 0;
6646 char iovbuf[DHD_IOVAR_BUF_SIZE];
6647 int retcode;
6648
6649 if (dhd == NULL)
6650 return -1;
6651
6652 iov_len = bcm_mkiovar("nd_hostip_clear", NULL,
6653 0, iovbuf, sizeof(iovbuf));
6654 if (!iov_len) {
6655 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
6656 __FUNCTION__, sizeof(iovbuf)));
6657 return -1;
6658 }
6659 retcode = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
6660
6661 if (retcode)
6662 DHD_ERROR(("%s: ndo ip addr remove failed, retcode = %d\n",
6663 __FUNCTION__, retcode));
6664 else
6665 DHD_TRACE(("%s: ndo ipaddr entry removed \n",
6666 __FUNCTION__));
6667
6668 return retcode;
6669 }
6670 /* Enhanced ND offload */
6671 uint16
dhd_ndo_get_version(dhd_pub_t * dhdp)6672 dhd_ndo_get_version(dhd_pub_t *dhdp)
6673 {
6674 char iovbuf[DHD_IOVAR_BUF_SIZE];
6675 wl_nd_hostip_t ndo_get_ver;
6676 int iov_len;
6677 int retcode;
6678 uint16 ver = 0;
6679
6680 if (dhdp == NULL) {
6681 return BCME_ERROR;
6682 }
6683
6684 memset(&iovbuf, 0, sizeof(iovbuf));
6685 ndo_get_ver.version = htod16(WL_ND_HOSTIP_IOV_VER);
6686 ndo_get_ver.op_type = htod16(WL_ND_HOSTIP_OP_VER);
6687 ndo_get_ver.length = htod32(WL_ND_HOSTIP_FIXED_LEN + sizeof(uint16));
6688 ndo_get_ver.u.version = 0;
6689 iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_get_ver,
6690 WL_ND_HOSTIP_FIXED_LEN + sizeof(uint16), iovbuf, sizeof(iovbuf));
6691
6692 if (!iov_len) {
6693 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
6694 __FUNCTION__, sizeof(iovbuf)));
6695 return BCME_ERROR;
6696 }
6697
6698 retcode = dhd_wl_ioctl_cmd(dhdp, WLC_GET_VAR, iovbuf, iov_len, FALSE, 0);
6699
6700 if (retcode) {
6701 DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
6702 /* ver iovar not supported. NDO version is 0 */
6703 ver = 0;
6704 } else {
6705 wl_nd_hostip_t *ndo_ver_ret = (wl_nd_hostip_t *)iovbuf;
6706
6707 if ((dtoh16(ndo_ver_ret->version) == WL_ND_HOSTIP_IOV_VER) &&
6708 (dtoh16(ndo_ver_ret->op_type) == WL_ND_HOSTIP_OP_VER) &&
6709 (dtoh32(ndo_ver_ret->length) == WL_ND_HOSTIP_FIXED_LEN
6710 + sizeof(uint16))) {
6711 /* nd_hostip iovar version */
6712 ver = dtoh16(ndo_ver_ret->u.version);
6713 }
6714
6715 DHD_TRACE(("%s: successfully get version: %d\n", __FUNCTION__, ver));
6716 }
6717
6718 return ver;
6719 }
6720
6721 int
dhd_ndo_add_ip_with_type(dhd_pub_t * dhdp,char * ipv6addr,uint8 type,int idx)6722 dhd_ndo_add_ip_with_type(dhd_pub_t *dhdp, char *ipv6addr, uint8 type, int idx)
6723 {
6724 char iovbuf[DHD_IOVAR_BUF_SIZE];
6725 wl_nd_hostip_t ndo_add_addr;
6726 int iov_len;
6727 int retcode;
6728
6729 if (dhdp == NULL || ipv6addr == 0) {
6730 return BCME_ERROR;
6731 }
6732
6733 /* wl_nd_hostip_t fixed param */
6734 ndo_add_addr.version = htod16(WL_ND_HOSTIP_IOV_VER);
6735 ndo_add_addr.op_type = htod16(WL_ND_HOSTIP_OP_ADD);
6736 ndo_add_addr.length = htod32(WL_ND_HOSTIP_WITH_ADDR_LEN);
6737 /* wl_nd_host_ip_addr_t param for add */
6738 memcpy(&ndo_add_addr.u.host_ip.ip_addr, ipv6addr, IPV6_ADDR_LEN);
6739 ndo_add_addr.u.host_ip.type = type;
6740
6741 iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_add_addr,
6742 WL_ND_HOSTIP_WITH_ADDR_LEN, iovbuf, sizeof(iovbuf));
6743 if (!iov_len) {
6744 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
6745 __FUNCTION__, sizeof(iovbuf)));
6746 return BCME_ERROR;
6747 }
6748
6749 retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
6750 if (retcode) {
6751 DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
6752 #ifdef NDO_CONFIG_SUPPORT
6753 if (retcode == BCME_NORESOURCE) {
6754 /* number of host ip addr exceeds FW capacity, Deactivate ND offload */
6755 DHD_INFO(("%s: Host IP count exceed device capacity,"
6756 "ND offload deactivated\n", __FUNCTION__));
6757 dhdp->ndo_host_ip_overflow = TRUE;
6758 dhd_ndo_enable(dhdp, FALSE);
6759 }
6760 #endif /* NDO_CONFIG_SUPPORT */
6761 } else {
6762 DHD_TRACE(("%s: successfully added: %d\n", __FUNCTION__, retcode));
6763 }
6764
6765 return retcode;
6766 }
6767
6768 int
dhd_ndo_remove_ip_by_addr(dhd_pub_t * dhdp,char * ipv6addr,int idx)6769 dhd_ndo_remove_ip_by_addr(dhd_pub_t *dhdp, char *ipv6addr, int idx)
6770 {
6771 char iovbuf[DHD_IOVAR_BUF_SIZE];
6772 wl_nd_hostip_t ndo_del_addr;
6773 int iov_len;
6774 int retcode;
6775
6776 if (dhdp == NULL || ipv6addr == 0) {
6777 return BCME_ERROR;
6778 }
6779
6780 /* wl_nd_hostip_t fixed param */
6781 ndo_del_addr.version = htod16(WL_ND_HOSTIP_IOV_VER);
6782 ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL);
6783 ndo_del_addr.length = htod32(WL_ND_HOSTIP_WITH_ADDR_LEN);
6784 /* wl_nd_host_ip_addr_t param for del */
6785 memcpy(&ndo_del_addr.u.host_ip.ip_addr, ipv6addr, IPV6_ADDR_LEN);
6786 ndo_del_addr.u.host_ip.type = 0; /* don't care */
6787
6788 iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr,
6789 WL_ND_HOSTIP_WITH_ADDR_LEN, iovbuf, sizeof(iovbuf));
6790
6791 if (!iov_len) {
6792 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
6793 __FUNCTION__, sizeof(iovbuf)));
6794 return BCME_ERROR;
6795 }
6796
6797 retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
6798 if (retcode) {
6799 DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
6800 } else {
6801 DHD_TRACE(("%s: successfully removed: %d\n", __FUNCTION__, retcode));
6802 }
6803
6804 return retcode;
6805 }
6806
6807 int
dhd_ndo_remove_ip_by_type(dhd_pub_t * dhdp,uint8 type,int idx)6808 dhd_ndo_remove_ip_by_type(dhd_pub_t *dhdp, uint8 type, int idx)
6809 {
6810 char iovbuf[DHD_IOVAR_BUF_SIZE];
6811 wl_nd_hostip_t ndo_del_addr;
6812 int iov_len;
6813 int retcode;
6814
6815 if (dhdp == NULL) {
6816 return BCME_ERROR;
6817 }
6818
6819 /* wl_nd_hostip_t fixed param */
6820 ndo_del_addr.version = htod16(WL_ND_HOSTIP_IOV_VER);
6821 if (type == WL_ND_IPV6_ADDR_TYPE_UNICAST) {
6822 ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL_UC);
6823 } else if (type == WL_ND_IPV6_ADDR_TYPE_ANYCAST) {
6824 ndo_del_addr.op_type = htod16(WL_ND_HOSTIP_OP_DEL_AC);
6825 } else {
6826 return BCME_BADARG;
6827 }
6828 ndo_del_addr.length = htod32(WL_ND_HOSTIP_FIXED_LEN);
6829
6830 iov_len = bcm_mkiovar("nd_hostip", (char *)&ndo_del_addr, WL_ND_HOSTIP_FIXED_LEN,
6831 iovbuf, sizeof(iovbuf));
6832
6833 if (!iov_len) {
6834 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
6835 __FUNCTION__, sizeof(iovbuf)));
6836 return BCME_ERROR;
6837 }
6838
6839 retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, idx);
6840 if (retcode) {
6841 DHD_ERROR(("%s: failed, retcode = %d\n", __FUNCTION__, retcode));
6842 } else {
6843 DHD_TRACE(("%s: successfully removed: %d\n", __FUNCTION__, retcode));
6844 }
6845
6846 return retcode;
6847 }
6848
6849 int
dhd_ndo_unsolicited_na_filter_enable(dhd_pub_t * dhdp,int enable)6850 dhd_ndo_unsolicited_na_filter_enable(dhd_pub_t *dhdp, int enable)
6851 {
6852 char iovbuf[DHD_IOVAR_BUF_SIZE];
6853 int iov_len;
6854 int retcode;
6855
6856 if (dhdp == NULL) {
6857 return BCME_ERROR;
6858 }
6859
6860 iov_len = bcm_mkiovar("nd_unsolicited_na_filter", (char *)&enable, sizeof(int),
6861 iovbuf, sizeof(iovbuf));
6862
6863 if (!iov_len) {
6864 DHD_ERROR(("%s: Insufficient iovar buffer size %zu \n",
6865 __FUNCTION__, sizeof(iovbuf)));
6866 return BCME_ERROR;
6867 }
6868
6869 retcode = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, iovbuf, iov_len, TRUE, 0);
6870 if (retcode)
6871 DHD_ERROR(("%s: failed to enable Unsolicited NA filter to %d, retcode = %d\n",
6872 __FUNCTION__, enable, retcode));
6873 else {
6874 DHD_TRACE(("%s: successfully enabled Unsolicited NA filter to %d\n",
6875 __FUNCTION__, enable));
6876 }
6877
6878 return retcode;
6879 }
6880 #ifdef SIMPLE_ISCAN
6881
6882 uint iscan_thread_id = 0;
6883 iscan_buf_t * iscan_chain = 0;
6884
6885 iscan_buf_t *
dhd_iscan_allocate_buf(dhd_pub_t * dhd,iscan_buf_t ** iscanbuf)6886 dhd_iscan_allocate_buf(dhd_pub_t *dhd, iscan_buf_t **iscanbuf)
6887 {
6888 iscan_buf_t *iscanbuf_alloc = 0;
6889 iscan_buf_t *iscanbuf_head;
6890
6891 DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
6892 dhd_iscan_lock();
6893
6894 iscanbuf_alloc = (iscan_buf_t*)MALLOC(dhd->osh, sizeof(iscan_buf_t));
6895 if (iscanbuf_alloc == NULL)
6896 goto fail;
6897
6898 iscanbuf_alloc->next = NULL;
6899 iscanbuf_head = *iscanbuf;
6900
6901 DHD_ISCAN(("%s: addr of allocated node = 0x%X"
6902 "addr of iscanbuf_head = 0x%X dhd = 0x%X\n",
6903 __FUNCTION__, iscanbuf_alloc, iscanbuf_head, dhd));
6904
6905 if (iscanbuf_head == NULL) {
6906 *iscanbuf = iscanbuf_alloc;
6907 DHD_ISCAN(("%s: Head is allocated\n", __FUNCTION__));
6908 goto fail;
6909 }
6910
6911 while (iscanbuf_head->next)
6912 iscanbuf_head = iscanbuf_head->next;
6913
6914 iscanbuf_head->next = iscanbuf_alloc;
6915
6916 fail:
6917 dhd_iscan_unlock();
6918 return iscanbuf_alloc;
6919 }
6920
6921 void
dhd_iscan_free_buf(void * dhdp,iscan_buf_t * iscan_delete)6922 dhd_iscan_free_buf(void *dhdp, iscan_buf_t *iscan_delete)
6923 {
6924 iscan_buf_t *iscanbuf_free = 0;
6925 iscan_buf_t *iscanbuf_prv = 0;
6926 iscan_buf_t *iscanbuf_cur;
6927 dhd_pub_t *dhd = dhd_bus_pub(dhdp);
6928 DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
6929
6930 dhd_iscan_lock();
6931
6932 iscanbuf_cur = iscan_chain;
6933
6934 /* If iscan_delete is null then delete the entire
6935 * chain or else delete specific one provided
6936 */
6937 if (!iscan_delete) {
6938 while (iscanbuf_cur) {
6939 iscanbuf_free = iscanbuf_cur;
6940 iscanbuf_cur = iscanbuf_cur->next;
6941 iscanbuf_free->next = 0;
6942 MFREE(dhd->osh, iscanbuf_free, sizeof(iscan_buf_t));
6943 }
6944 iscan_chain = 0;
6945 } else {
6946 while (iscanbuf_cur) {
6947 if (iscanbuf_cur == iscan_delete)
6948 break;
6949 iscanbuf_prv = iscanbuf_cur;
6950 iscanbuf_cur = iscanbuf_cur->next;
6951 }
6952 if (iscanbuf_prv)
6953 iscanbuf_prv->next = iscan_delete->next;
6954
6955 iscan_delete->next = 0;
6956 MFREE(dhd->osh, iscan_delete, sizeof(iscan_buf_t));
6957
6958 if (!iscanbuf_prv)
6959 iscan_chain = 0;
6960 }
6961 dhd_iscan_unlock();
6962 }
6963
6964 iscan_buf_t *
dhd_iscan_result_buf(void)6965 dhd_iscan_result_buf(void)
6966 {
6967 return iscan_chain;
6968 }
6969
6970 int
dhd_iscan_issue_request(void * dhdp,wl_iscan_params_t * pParams,uint32 size)6971 dhd_iscan_issue_request(void * dhdp, wl_iscan_params_t *pParams, uint32 size)
6972 {
6973 int rc = -1;
6974 dhd_pub_t *dhd = dhd_bus_pub(dhdp);
6975 char *buf;
6976 char iovar[] = "iscan";
6977 uint32 allocSize = 0;
6978 wl_ioctl_t ioctl;
6979 int len;
6980
6981 if (pParams) {
6982 allocSize = (size + strlen(iovar) + 1);
6983 if ((allocSize < size) || (allocSize < strlen(iovar)))
6984 {
6985 DHD_ERROR(("%s: overflow - allocation size too large %d < %d + %d!\n",
6986 __FUNCTION__, allocSize, size, strlen(iovar)));
6987 goto cleanUp;
6988 }
6989 buf = MALLOC(dhd->osh, allocSize);
6990
6991 if (buf == NULL)
6992 {
6993 DHD_ERROR(("%s: malloc of size %d failed!\n", __FUNCTION__, allocSize));
6994 goto cleanUp;
6995 }
6996 ioctl.cmd = WLC_SET_VAR;
6997 len = bcm_mkiovar(iovar, (char *)pParams, size, buf, allocSize);
6998 if (len == 0) {
6999 rc = BCME_BUFTOOSHORT;
7000 goto cleanUp;
7001 }
7002 rc = dhd_wl_ioctl(dhd, 0, &ioctl, buf, len);
7003 }
7004
7005 cleanUp:
7006 if (buf) {
7007 MFREE(dhd->osh, buf, allocSize);
7008 }
7009
7010 return rc;
7011 }
7012
7013 static int
dhd_iscan_get_partial_result(void * dhdp,uint * scan_count)7014 dhd_iscan_get_partial_result(void *dhdp, uint *scan_count)
7015 {
7016 wl_iscan_results_t *list_buf;
7017 wl_iscan_results_t list;
7018 wl_scan_results_t *results;
7019 iscan_buf_t *iscan_cur;
7020 int status = -1;
7021 dhd_pub_t *dhd = dhd_bus_pub(dhdp);
7022 int rc;
7023 wl_ioctl_t ioctl;
7024 int len;
7025
7026 DHD_ISCAN(("%s: Enter\n", __FUNCTION__));
7027
7028 iscan_cur = dhd_iscan_allocate_buf(dhd, &iscan_chain);
7029 if (!iscan_cur) {
7030 DHD_ERROR(("%s: Failed to allocate node\n", __FUNCTION__));
7031 dhd_iscan_free_buf(dhdp, 0);
7032 dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT);
7033 dhd_ind_scan_confirm(dhdp, FALSE);
7034 goto fail;
7035 }
7036
7037 dhd_iscan_lock();
7038
7039 memset(iscan_cur->iscan_buf, 0, WLC_IW_ISCAN_MAXLEN);
7040 list_buf = (wl_iscan_results_t*)iscan_cur->iscan_buf;
7041 results = &list_buf->results;
7042 results->buflen = WL_ISCAN_RESULTS_FIXED_SIZE;
7043 results->version = 0;
7044 results->count = 0;
7045
7046 memset(&list, 0, sizeof(list));
7047 list.results.buflen = htod32(WLC_IW_ISCAN_MAXLEN);
7048 len = bcm_mkiovar("iscanresults", (char *)&list, WL_ISCAN_RESULTS_FIXED_SIZE,
7049 iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
7050 if (len == 0) {
7051 dhd_iscan_free_buf(dhdp, 0);
7052 dhd_iscan_request(dhdp, WL_SCAN_ACTION_ABORT);
7053 dhd_ind_scan_confirm(dhdp, FALSE);
7054 status = BCME_BUFTOOSHORT;
7055 goto fail;
7056 }
7057 ioctl.cmd = WLC_GET_VAR;
7058 ioctl.set = FALSE;
7059 rc = dhd_wl_ioctl(dhd, 0, &ioctl, iscan_cur->iscan_buf, WLC_IW_ISCAN_MAXLEN);
7060
7061 results->buflen = dtoh32(results->buflen);
7062 results->version = dtoh32(results->version);
7063 *scan_count = results->count = dtoh32(results->count);
7064 status = dtoh32(list_buf->status);
7065 DHD_ISCAN(("%s: Got %d resuls status = (%x)\n", __FUNCTION__, results->count, status));
7066
7067 dhd_iscan_unlock();
7068
7069 if (!(*scan_count)) {
7070 /* TODO: race condition when FLUSH already called */
7071 dhd_iscan_free_buf(dhdp, 0);
7072 }
7073 fail:
7074 return status;
7075 }
7076
7077 #ifdef NDIS
7078 /* XXXX Following code had bit of OS dependency.
7079 * Cleanup to move the OS dependency to other
7080 * per port code so that iscan logic here can be
7081 * leveraged across all OS's
7082 */
7083 NDIS_EVENT iscan_event;
7084 HANDLE tHandle;
7085 NDIS_SPIN_LOCK dhd_iscan_queue_lock;
7086
7087 void
dhd_iscan_lock(void)7088 dhd_iscan_lock(void)
7089 {
7090 NdisAcquireSpinLock(&dhd_iscan_queue_lock);
7091 }
7092
7093 void
dhd_iscan_unlock(void)7094 dhd_iscan_unlock(void)
7095 {
7096 NdisReleaseSpinLock(&dhd_iscan_queue_lock);
7097 }
7098
7099 void
dhd_iscan_notify(void)7100 dhd_iscan_notify(void)
7101 {
7102 DHD_ISCAN(("%s: Entered\n", __FUNCTION__));
7103 NdisSetEvent(&iscan_event);
7104 }
7105
7106 static void
dhd_iscan_func(void * h)7107 dhd_iscan_func(void *h)
7108 {
7109 int status;
7110 uint scan_count;
7111 dhd_pub_t *dhd = dhd_bus_pub(h);
7112
7113 /* Read the priority from registry */
7114 CeSetThreadPriority(GetCurrentThread(), 128);
7115 DHD_ISCAN(("%s: thread created\n", __FUNCTION__));
7116
7117 while (TRUE) {
7118 NdisWaitEvent(&iscan_event, 0); /* wait forever */
7119 NdisResetEvent(&iscan_event); /* reset the event */
7120 DHD_ISCAN(("%s: thread scheduled\n", __FUNCTION__));
7121
7122 status = dhd_iscan_get_partial_result(h, &scan_count);
7123
7124 if (status == WL_SCAN_RESULTS_PARTIAL) {
7125 dhd_iscan_request(h, WL_SCAN_ACTION_CONTINUE);
7126 } else if (status == WL_SCAN_RESULTS_SUCCESS) {
7127 if (dhd_iscan_in_progress(h)) {
7128 dhd_ind_scan_confirm(h, TRUE);
7129 }
7130 } else if (status == WL_SCAN_RESULTS_ABORTED ||
7131 status == WL_SCAN_RESULTS_NO_MEM) {
7132 dhd_iscan_request(h, WL_SCAN_ACTION_ABORT);
7133 dhd_ind_scan_confirm(h, FALSE);
7134 } else {
7135 dhd_iscan_request(h, WL_SCAN_ACTION_ABORT);
7136 dhd_ind_scan_confirm(h, FALSE);
7137 }
7138 }
7139 }
7140
7141 int
dhd_iscan_attach(void * dhdp)7142 dhd_iscan_attach(void *dhdp)
7143 {
7144 DHD_ISCAN(("%s: dhdp = 0x%x\n", __FUNCTION__, dhdp));
7145
7146 NdisInitializeEvent(&iscan_event);
7147 NdisResetEvent(&iscan_event);
7148 NdisAllocateSpinLock(&dhd_iscan_queue_lock);
7149
7150 /* XXX - should move to ndishared sublayer */
7151 tHandle = CreateThread(NULL,
7152 0,
7153 (LPTHREAD_START_ROUTINE)dhd_iscan_func,
7154 (void *)dhdp,
7155 0,
7156 &iscan_thread_id);
7157
7158 if (!iscan_thread_id)
7159 return NDIS_STATUS_FAILURE;
7160
7161 return NDIS_STATUS_SUCCESS;
7162 }
7163
7164 void
dhd_iscan_deattach(void * dhdp)7165 dhd_iscan_deattach(void *dhdp)
7166 {
7167 if (iscan_thread_id)
7168 {
7169 NdisFreeEvent(&iscan_event);
7170 NdisFreeSpinLock(&dhd_iscan_queue_lock);
7171 CloseHandle(tHandle);
7172 iscan_thread_id = 0;
7173 }
7174 }
7175 #endif /* NDIS */
7176 #endif /* SIMPLE_ISCAN */
7177
7178 /*
7179 * returns = TRUE if associated, FALSE if not associated
7180 */
dhd_is_associated(dhd_pub_t * dhd,uint8 ifidx,int * retval)7181 bool dhd_is_associated(dhd_pub_t *dhd, uint8 ifidx, int *retval)
7182 {
7183 char bssid[6], zbuf[6];
7184 int ret = -1;
7185
7186 bzero(bssid, 6);
7187 bzero(zbuf, 6);
7188
7189 ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BSSID, (char *)&bssid,
7190 ETHER_ADDR_LEN, FALSE, ifidx);
7191 /* XXX:AS!!! res can be: -17(BCME_NOTASSOCIATED),-22(BCME_NORESOURCE), and 0(OK)
7192 OK - doesn't mean associated yet, the returned bssid
7193 still needs to be checked for non zero array
7194 */
7195 DHD_TRACE((" %s WLC_GET_BSSID ioctl res = %d\n", __FUNCTION__, ret));
7196
7197 if (ret == BCME_NOTASSOCIATED) {
7198 DHD_ERROR(("%s: WLC_GET_BSSID, NOT ASSOCIATED\n", __FUNCTION__));
7199 }
7200
7201 if (retval)
7202 *retval = ret;
7203
7204 if (ret < 0)
7205 return FALSE;
7206
7207 if ((memcmp(bssid, zbuf, ETHER_ADDR_LEN) == 0)) {
7208 DHD_TRACE(("%s: WLC_GET_BSSID ioctl returned zero bssid\n", __FUNCTION__));
7209 return FALSE;
7210 }
7211 return TRUE;
7212 }
7213
7214 /* Function to estimate possible DTIM_SKIP value */
7215 #if defined(OEM_ANDROID) && defined(BCMPCIE)
7216 int
dhd_get_suspend_bcn_li_dtim(dhd_pub_t * dhd,int * dtim_period,int * bcn_interval)7217 dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd, int *dtim_period, int *bcn_interval)
7218 {
7219 int bcn_li_dtim = 1; /* deafult no dtim skip setting */
7220 int ret = -1;
7221 int allowed_skip_dtim_cnt = 0;
7222
7223 if (dhd->disable_dtim_in_suspend) {
7224 DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__));
7225 bcn_li_dtim = 0;
7226 return bcn_li_dtim;
7227 }
7228
7229 /* Check if associated */
7230 if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
7231 DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
7232 return bcn_li_dtim;
7233 }
7234
7235 if (dtim_period == NULL || bcn_interval == NULL)
7236 return bcn_li_dtim;
7237
7238 /* read associated AP beacon interval */
7239 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BCNPRD,
7240 bcn_interval, sizeof(*bcn_interval), FALSE, 0)) < 0) {
7241 DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__, ret));
7242 return bcn_li_dtim;
7243 }
7244
7245 /* read associated AP dtim setup */
7246 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD,
7247 dtim_period, sizeof(*dtim_period), FALSE, 0)) < 0) {
7248 DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
7249 return bcn_li_dtim;
7250 }
7251
7252 /* if not assocated just return */
7253 if (*dtim_period == 0) {
7254 return bcn_li_dtim;
7255 }
7256
7257 if (dhd->max_dtim_enable) {
7258 bcn_li_dtim =
7259 (int) (MAX_DTIM_ALLOWED_INTERVAL / ((*dtim_period) * (*bcn_interval)));
7260 if (bcn_li_dtim == 0) {
7261 bcn_li_dtim = 1;
7262 }
7263 } else {
7264 /* attemp to use platform defined dtim skip interval */
7265 bcn_li_dtim = dhd->suspend_bcn_li_dtim;
7266
7267 /* check if sta listen interval fits into AP dtim */
7268 if (*dtim_period > CUSTOM_LISTEN_INTERVAL) {
7269 /* AP DTIM to big for our Listen Interval : no dtim skiping */
7270 bcn_li_dtim = NO_DTIM_SKIP;
7271 DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
7272 __FUNCTION__, *dtim_period, CUSTOM_LISTEN_INTERVAL));
7273 return bcn_li_dtim;
7274 }
7275
7276 if (((*dtim_period) * (*bcn_interval) * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) {
7277 allowed_skip_dtim_cnt =
7278 MAX_DTIM_ALLOWED_INTERVAL / ((*dtim_period) * (*bcn_interval));
7279 bcn_li_dtim =
7280 (allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP;
7281 }
7282
7283 if ((bcn_li_dtim * (*dtim_period)) > CUSTOM_LISTEN_INTERVAL) {
7284 /* Round up dtim_skip to fit into STAs Listen Interval */
7285 bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / *dtim_period);
7286 DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
7287 }
7288 }
7289
7290 if (dhd->conf->suspend_bcn_li_dtim >= 0)
7291 bcn_li_dtim = dhd->conf->suspend_bcn_li_dtim;
7292 DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n",
7293 __FUNCTION__, *bcn_interval, bcn_li_dtim, *dtim_period, CUSTOM_LISTEN_INTERVAL));
7294
7295 return bcn_li_dtim;
7296 }
7297 #else /* OEM_ANDROID && BCMPCIE */
7298 int
dhd_get_suspend_bcn_li_dtim(dhd_pub_t * dhd)7299 dhd_get_suspend_bcn_li_dtim(dhd_pub_t *dhd)
7300 {
7301 int bcn_li_dtim = 1; /* deafult no dtim skip setting */
7302 int ret = -1;
7303 int dtim_period = 0;
7304 int ap_beacon = 0;
7305 int allowed_skip_dtim_cnt = 0;
7306
7307 if (dhd->disable_dtim_in_suspend) {
7308 DHD_ERROR(("%s Disable bcn_li_dtim in suspend\n", __FUNCTION__));
7309 bcn_li_dtim = 0;
7310 goto exit;
7311 }
7312
7313 /* Check if associated */
7314 if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
7315 DHD_TRACE(("%s NOT assoc ret %d\n", __FUNCTION__, ret));
7316 goto exit;
7317 }
7318
7319 /* read associated AP beacon interval */
7320 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_BCNPRD,
7321 &ap_beacon, sizeof(ap_beacon), FALSE, 0)) < 0) {
7322 DHD_ERROR(("%s get beacon failed code %d\n", __FUNCTION__, ret));
7323 goto exit;
7324 }
7325
7326 /* read associated ap's dtim setup */
7327 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_GET_DTIMPRD,
7328 &dtim_period, sizeof(dtim_period), FALSE, 0)) < 0) {
7329 DHD_ERROR(("%s failed code %d\n", __FUNCTION__, ret));
7330 goto exit;
7331 }
7332
7333 /* if not assocated just exit */
7334 if (dtim_period == 0) {
7335 goto exit;
7336 }
7337
7338 if (dhd->max_dtim_enable) {
7339 bcn_li_dtim =
7340 (int) (MAX_DTIM_ALLOWED_INTERVAL / (ap_beacon * dtim_period));
7341 if (bcn_li_dtim == 0) {
7342 bcn_li_dtim = 1;
7343 }
7344 } else {
7345 /* attemp to use platform defined dtim skip interval */
7346 bcn_li_dtim = dhd->suspend_bcn_li_dtim;
7347
7348 /* check if sta listen interval fits into AP dtim */
7349 if (dtim_period > CUSTOM_LISTEN_INTERVAL) {
7350 /* AP DTIM to big for our Listen Interval : no dtim skiping */
7351 bcn_li_dtim = NO_DTIM_SKIP;
7352 DHD_ERROR(("%s DTIM=%d > Listen=%d : too big ...\n",
7353 __FUNCTION__, dtim_period, CUSTOM_LISTEN_INTERVAL));
7354 goto exit;
7355 }
7356
7357 if ((dtim_period * ap_beacon * bcn_li_dtim) > MAX_DTIM_ALLOWED_INTERVAL) {
7358 allowed_skip_dtim_cnt =
7359 MAX_DTIM_ALLOWED_INTERVAL / (dtim_period * ap_beacon);
7360 bcn_li_dtim =
7361 (allowed_skip_dtim_cnt != 0) ? allowed_skip_dtim_cnt : NO_DTIM_SKIP;
7362 }
7363
7364 if ((bcn_li_dtim * dtim_period) > CUSTOM_LISTEN_INTERVAL) {
7365 /* Round up dtim_skip to fit into STAs Listen Interval */
7366 bcn_li_dtim = (int)(CUSTOM_LISTEN_INTERVAL / dtim_period);
7367 DHD_TRACE(("%s agjust dtim_skip as %d\n", __FUNCTION__, bcn_li_dtim));
7368 }
7369 }
7370
7371 if (dhd->conf->suspend_bcn_li_dtim >= 0)
7372 bcn_li_dtim = dhd->conf->suspend_bcn_li_dtim;
7373 DHD_ERROR(("%s beacon=%d bcn_li_dtim=%d DTIM=%d Listen=%d\n",
7374 __FUNCTION__, ap_beacon, bcn_li_dtim, dtim_period, CUSTOM_LISTEN_INTERVAL));
7375
7376 exit:
7377 return bcn_li_dtim;
7378 }
7379 #endif /* OEM_ANDROID && BCMPCIE */
7380
7381 #ifdef CONFIG_SILENT_ROAM
7382 int
dhd_sroam_set_mon(dhd_pub_t * dhd,bool set)7383 dhd_sroam_set_mon(dhd_pub_t *dhd, bool set)
7384 {
7385 int ret = BCME_OK;
7386 wlc_sroam_t *psroam;
7387 wlc_sroam_info_t *sroam;
7388 uint sroamlen = sizeof(*sroam) + SROAM_HDRLEN;
7389
7390 /* Check if associated */
7391 if (dhd_is_associated(dhd, 0, NULL) == FALSE) {
7392 DHD_TRACE(("%s NOT assoc\n", __FUNCTION__));
7393 return ret;
7394 }
7395
7396 if (set && (dhd->op_mode &
7397 (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE))) {
7398 DHD_INFO((" Failed to set sroam %d, op_mode 0x%04x\n", set, dhd->op_mode));
7399 return ret;
7400 }
7401
7402 if (!dhd->sroam_turn_on) {
7403 DHD_INFO((" Failed to set sroam %d, sroam turn %d\n", set, dhd->sroam_turn_on));
7404 return ret;
7405 }
7406 psroam = (wlc_sroam_t *)MALLOCZ(dhd->osh, sroamlen);
7407 if (!psroam) {
7408 DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
7409 return BCME_NOMEM;
7410 }
7411
7412 ret = dhd_iovar(dhd, 0, "sroam", NULL, 0, (char *)psroam, sroamlen, FALSE);
7413 if (ret < 0) {
7414 DHD_ERROR(("%s Failed to Get sroam %d\n", __FUNCTION__, ret));
7415 goto done;
7416 }
7417
7418 if (psroam->ver != WLC_SILENT_ROAM_CUR_VER) {
7419 ret = BCME_VERSION;
7420 goto done;
7421 }
7422
7423 sroam = (wlc_sroam_info_t *)psroam->data;
7424 sroam->sroam_on = set;
7425 DHD_INFO((" Silent roam monitor mode %s\n", set ? "On" : "Off"));
7426
7427 ret = dhd_iovar(dhd, 0, "sroam", (char *)psroam, sroamlen, NULL, 0, TRUE);
7428 if (ret < 0) {
7429 DHD_ERROR(("%s Failed to Set sroam %d\n", __FUNCTION__, ret));
7430 }
7431
7432 done:
7433 if (psroam) {
7434 MFREE(dhd->osh, psroam, sroamlen);
7435 }
7436
7437 return ret;
7438 }
7439 #endif /* CONFIG_SILENT_ROAM */
7440
7441 /* Check if the mode supports STA MODE */
dhd_support_sta_mode(dhd_pub_t * dhd)7442 bool dhd_support_sta_mode(dhd_pub_t *dhd)
7443 {
7444
7445 #ifdef WL_CFG80211
7446 if (!(dhd->op_mode & DHD_FLAG_STA_MODE))
7447 return FALSE;
7448 else
7449 #endif /* WL_CFG80211 */
7450 return TRUE;
7451 }
7452
7453 #if defined(KEEP_ALIVE)
dhd_keep_alive_onoff(dhd_pub_t * dhd)7454 int dhd_keep_alive_onoff(dhd_pub_t *dhd)
7455 {
7456 char buf[32] = {0};
7457 const char *str;
7458 wl_mkeep_alive_pkt_t mkeep_alive_pkt = {0, 0, 0, 0, 0, {0}};
7459 wl_mkeep_alive_pkt_t *mkeep_alive_pktp;
7460 int buf_len;
7461 int str_len;
7462 int res = -1;
7463
7464 if (!dhd_support_sta_mode(dhd))
7465 return res;
7466
7467 DHD_TRACE(("%s execution\n", __FUNCTION__));
7468
7469 str = "mkeep_alive";
7470 str_len = strlen(str);
7471 strlcpy(buf, str, sizeof(buf));
7472 mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (buf + str_len + 1);
7473 mkeep_alive_pkt.period_msec = dhd->conf->keep_alive_period;
7474 buf_len = str_len + 1;
7475 mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
7476 mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
7477 /* Setup keep alive zero for null packet generation */
7478 mkeep_alive_pkt.keep_alive_id = 0;
7479 mkeep_alive_pkt.len_bytes = 0;
7480 buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
7481 bzero(mkeep_alive_pkt.data, sizeof(mkeep_alive_pkt.data));
7482 /* Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
7483 * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
7484 * guarantee that the buffer is properly aligned.
7485 */
7486 memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
7487
7488 res = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, buf, buf_len, TRUE, 0);
7489
7490 return res;
7491 }
7492 #endif /* defined(KEEP_ALIVE) */
7493 #if defined(OEM_ANDROID)
7494 #define CSCAN_TLV_TYPE_SSID_IE 'S'
7495 /*
7496 * SSIDs list parsing from cscan tlv list
7497 */
7498 int
wl_parse_ssid_list_tlv(char ** list_str,wlc_ssid_ext_t * ssid,int max,int * bytes_left)7499 wl_parse_ssid_list_tlv(char** list_str, wlc_ssid_ext_t* ssid, int max, int *bytes_left)
7500 {
7501 char* str;
7502 int idx = 0;
7503 uint8 len;
7504
7505 if ((list_str == NULL) || (*list_str == NULL) || (*bytes_left < 0)) {
7506 DHD_ERROR(("%s error paramters\n", __FUNCTION__));
7507 return BCME_BADARG;
7508 }
7509 str = *list_str;
7510 while (*bytes_left > 0) {
7511 if (str[0] != CSCAN_TLV_TYPE_SSID_IE) {
7512 *list_str = str;
7513 DHD_TRACE(("nssid=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
7514 return idx;
7515 }
7516
7517 if (idx >= max) {
7518 DHD_ERROR(("%s number of SSIDs more than %d\n", __FUNCTION__, idx));
7519 return BCME_BADARG;
7520 }
7521
7522 /* Get proper CSCAN_TLV_TYPE_SSID_IE */
7523 *bytes_left -= 1;
7524 if (*bytes_left == 0) {
7525 DHD_ERROR(("%s no length field.\n", __FUNCTION__));
7526 return BCME_BADARG;
7527 }
7528 str += 1;
7529 ssid[idx].rssi_thresh = 0;
7530 ssid[idx].flags = 0;
7531 len = str[0];
7532 if (len == 0) {
7533 /* Broadcast SSID */
7534 ssid[idx].SSID_len = 0;
7535 memset((char*)ssid[idx].SSID, 0x0, DOT11_MAX_SSID_LEN);
7536 *bytes_left -= 1;
7537 str += 1;
7538
7539 DHD_TRACE(("BROADCAST SCAN left=%d\n", *bytes_left));
7540 } else if (len <= DOT11_MAX_SSID_LEN) {
7541 /* Get proper SSID size */
7542 ssid[idx].SSID_len = len;
7543 *bytes_left -= 1;
7544 /* Get SSID */
7545 if (ssid[idx].SSID_len > *bytes_left) {
7546 DHD_ERROR(("%s out of memory range len=%d but left=%d\n",
7547 __FUNCTION__, ssid[idx].SSID_len, *bytes_left));
7548 return BCME_BADARG;
7549 }
7550 str += 1;
7551 memcpy((char*)ssid[idx].SSID, str, ssid[idx].SSID_len);
7552
7553 *bytes_left -= ssid[idx].SSID_len;
7554 str += ssid[idx].SSID_len;
7555 ssid[idx].hidden = TRUE;
7556
7557 DHD_TRACE(("%s :size=%d left=%d\n",
7558 (char*)ssid[idx].SSID, ssid[idx].SSID_len, *bytes_left));
7559 } else {
7560 DHD_ERROR(("### SSID size more than %d\n", str[0]));
7561 return BCME_BADARG;
7562 }
7563 idx++;
7564 }
7565
7566 *list_str = str;
7567 return idx;
7568 }
7569
7570 #if defined(WL_WIRELESS_EXT)
7571 /* Android ComboSCAN support */
7572
7573 /*
7574 * data parsing from ComboScan tlv list
7575 */
7576 int
wl_iw_parse_data_tlv(char ** list_str,void * dst,int dst_size,const char token,int input_size,int * bytes_left)7577 wl_iw_parse_data_tlv(char** list_str, void *dst, int dst_size, const char token,
7578 int input_size, int *bytes_left)
7579 {
7580 char* str;
7581 uint16 short_temp;
7582 uint32 int_temp;
7583
7584 if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
7585 DHD_ERROR(("%s error paramters\n", __FUNCTION__));
7586 return -1;
7587 }
7588 str = *list_str;
7589
7590 /* Clean all dest bytes */
7591 memset(dst, 0, dst_size);
7592 if (*bytes_left > 0) {
7593
7594 if (str[0] != token) {
7595 DHD_TRACE(("%s NOT Type=%d get=%d left_parse=%d \n",
7596 __FUNCTION__, token, str[0], *bytes_left));
7597 return -1;
7598 }
7599
7600 *bytes_left -= 1;
7601 str += 1;
7602
7603 if (input_size == 1) {
7604 memcpy(dst, str, input_size);
7605 }
7606 else if (input_size == 2) {
7607 memcpy(dst, (char *)htod16(memcpy(&short_temp, str, input_size)),
7608 input_size);
7609 }
7610 else if (input_size == 4) {
7611 memcpy(dst, (char *)htod32(memcpy(&int_temp, str, input_size)),
7612 input_size);
7613 }
7614
7615 *bytes_left -= input_size;
7616 str += input_size;
7617 *list_str = str;
7618 return 1;
7619 }
7620 return 1;
7621 }
7622
7623 /*
7624 * channel list parsing from cscan tlv list
7625 */
7626 int
wl_iw_parse_channel_list_tlv(char ** list_str,uint16 * channel_list,int channel_num,int * bytes_left)7627 wl_iw_parse_channel_list_tlv(char** list_str, uint16* channel_list,
7628 int channel_num, int *bytes_left)
7629 {
7630 char* str;
7631 int idx = 0;
7632
7633 if ((list_str == NULL) || (*list_str == NULL) ||(bytes_left == NULL) || (*bytes_left < 0)) {
7634 DHD_ERROR(("%s error paramters\n", __FUNCTION__));
7635 return -1;
7636 }
7637 str = *list_str;
7638
7639 while (*bytes_left > 0) {
7640
7641 if (str[0] != CSCAN_TLV_TYPE_CHANNEL_IE) {
7642 *list_str = str;
7643 DHD_TRACE(("End channel=%d left_parse=%d %d\n", idx, *bytes_left, str[0]));
7644 return idx;
7645 }
7646 /* Get proper CSCAN_TLV_TYPE_CHANNEL_IE */
7647 *bytes_left -= 1;
7648 str += 1;
7649
7650 if (str[0] == 0) {
7651 /* All channels */
7652 channel_list[idx] = 0x0;
7653 }
7654 else {
7655 channel_list[idx] = (uint16)str[0];
7656 DHD_TRACE(("%s channel=%d \n", __FUNCTION__, channel_list[idx]));
7657 }
7658 *bytes_left -= 1;
7659 str += 1;
7660
7661 if (idx++ > 255) {
7662 DHD_ERROR(("%s Too many channels \n", __FUNCTION__));
7663 return -1;
7664 }
7665 }
7666
7667 *list_str = str;
7668 return idx;
7669 }
7670
7671 /* Parse a comma-separated list from list_str into ssid array, starting
7672 * at index idx. Max specifies size of the ssid array. Parses ssids
7673 * and returns updated idx; if idx >= max not all fit, the excess have
7674 * not been copied. Returns -1 on empty string, or on ssid too long.
7675 */
7676 int
wl_iw_parse_ssid_list(char ** list_str,wlc_ssid_t * ssid,int idx,int max)7677 wl_iw_parse_ssid_list(char** list_str, wlc_ssid_t* ssid, int idx, int max)
7678 {
7679 char* str, *ptr;
7680
7681 if ((list_str == NULL) || (*list_str == NULL))
7682 return -1;
7683
7684 for (str = *list_str; str != NULL; str = ptr) {
7685
7686 /* check for next TAG */
7687 if (!strncmp(str, GET_CHANNEL, strlen(GET_CHANNEL))) {
7688 *list_str = str + strlen(GET_CHANNEL);
7689 return idx;
7690 }
7691
7692 if ((ptr = strchr(str, ',')) != NULL) {
7693 *ptr++ = '\0';
7694 }
7695
7696 if (strlen(str) > DOT11_MAX_SSID_LEN) {
7697 DHD_ERROR(("ssid <%s> exceeds %d\n", str, DOT11_MAX_SSID_LEN));
7698 return -1;
7699 }
7700
7701 if (strlen(str) == 0)
7702 ssid[idx].SSID_len = 0;
7703
7704 if (idx < max) {
7705 bzero(ssid[idx].SSID, sizeof(ssid[idx].SSID));
7706 strlcpy((char*)ssid[idx].SSID, str, sizeof(ssid[idx].SSID));
7707 ssid[idx].SSID_len = sizeof(ssid[idx].SSID);
7708 }
7709 idx++;
7710 }
7711 return idx;
7712 }
7713
7714 /*
7715 * Parse channel list from iwpriv CSCAN
7716 */
7717 int
wl_iw_parse_channel_list(char ** list_str,uint16 * channel_list,int channel_num)7718 wl_iw_parse_channel_list(char** list_str, uint16* channel_list, int channel_num)
7719 {
7720 int num;
7721 int val;
7722 char* str;
7723 char* endptr = NULL;
7724
7725 if ((list_str == NULL)||(*list_str == NULL))
7726 return -1;
7727
7728 str = *list_str;
7729 num = 0;
7730 while (strncmp(str, GET_NPROBE, strlen(GET_NPROBE))) {
7731 val = (int)strtoul(str, &endptr, 0);
7732 if (endptr == str) {
7733 printf("could not parse channel number starting at"
7734 " substring \"%s\" in list:\n%s\n",
7735 str, *list_str);
7736 return -1;
7737 }
7738 str = endptr + strspn(endptr, " ,");
7739
7740 if (num == channel_num) {
7741 DHD_ERROR(("too many channels (more than %d) in channel list:\n%s\n",
7742 channel_num, *list_str));
7743 return -1;
7744 }
7745
7746 channel_list[num++] = (uint16)val;
7747 }
7748 *list_str = str;
7749 return num;
7750 }
7751 #endif
7752 #endif /* defined(OEM_ANDROID) */
7753
7754 #if defined(BCM_ROUTER_DHD)
traffic_mgmt_add_dwm_filter(dhd_pub_t * dhd,trf_mgmt_filter_list_t * trf_mgmt_filter_list,int len)7755 static int traffic_mgmt_add_dwm_filter(dhd_pub_t *dhd,
7756 trf_mgmt_filter_list_t * trf_mgmt_filter_list, int len)
7757 {
7758 int ret = 0;
7759 uint32 i;
7760 trf_mgmt_filter_t *trf_mgmt_filter;
7761 uint8 dwm_tbl_entry;
7762 uint32 dscp = 0;
7763 uint16 dwm_filter_enabled = 0;
7764
7765 /* Check parameter length is adequate */
7766 if (len < (OFFSETOF(trf_mgmt_filter_list_t, filter) +
7767 trf_mgmt_filter_list->num_filters * sizeof(trf_mgmt_filter_t))) {
7768 ret = BCME_BUFTOOSHORT;
7769 return ret;
7770 }
7771
7772 bzero(&dhd->dhd_tm_dwm_tbl, sizeof(dhd_trf_mgmt_dwm_tbl_t));
7773
7774 for (i = 0; i < trf_mgmt_filter_list->num_filters; i++) {
7775 trf_mgmt_filter = &trf_mgmt_filter_list->filter[i];
7776
7777 dwm_filter_enabled = (trf_mgmt_filter->flags & TRF_FILTER_DWM);
7778
7779 if (dwm_filter_enabled) {
7780 dscp = trf_mgmt_filter->dscp;
7781 if (dscp >= DHD_DWM_TBL_SIZE) {
7782 ret = BCME_BADARG;
7783 return ret;
7784 }
7785 }
7786
7787 dhd->dhd_tm_dwm_tbl.dhd_dwm_enabled = 1;
7788 /* set WMM AC bits */
7789 dwm_tbl_entry = (uint8) trf_mgmt_filter->priority;
7790 DHD_TRF_MGMT_DWM_SET_FILTER(dwm_tbl_entry);
7791
7792 /* set favored bits */
7793 if (trf_mgmt_filter->flags & TRF_FILTER_FAVORED)
7794 DHD_TRF_MGMT_DWM_SET_FAVORED(dwm_tbl_entry);
7795
7796 dhd->dhd_tm_dwm_tbl.dhd_dwm_tbl[dscp] = dwm_tbl_entry;
7797 }
7798 return ret;
7799 }
7800 #endif /* BCM_ROUTER_DHD */
7801
7802 #ifdef DHD_LINUX_STD_FW_API
dhd_get_download_buffer(dhd_pub_t * dhd,char * file_path,download_type_t component,char ** buffer,int * length)7803 int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t component,
7804 char ** buffer, int *length)
7805 {
7806 int ret = BCME_ERROR;
7807 const struct firmware *fw = NULL;
7808 #ifdef SUPPORT_OTA_UPDATE
7809 uint8 *buf = NULL;
7810 int len = 0;
7811 ota_update_info_t *ota_info = &dhd->ota_update_info;
7812 #endif /* SUPPORT_OTA_UPDATE */
7813
7814 #ifdef SUPPORT_OTA_UPDATE
7815 if (component == CLM_BLOB) {
7816 if (ota_info->clm_len) {
7817 DHD_ERROR(("Using OTA CLM_BLOB\n"));
7818 buf = ota_info->clm_buf;
7819 len = ota_info->clm_len;
7820 }
7821 }
7822 else if (component == NVRAM) {
7823 if (ota_info->nvram_len) {
7824 DHD_ERROR(("Using OTA NVRAM.\n"));
7825 buf = ota_info->nvram_buf;
7826 len = ota_info->nvram_len;
7827 }
7828 }
7829 #endif /* SUPPORT_OTA_UPDATE */
7830
7831 #ifdef SUPPORT_OTA_UPDATE
7832 if (len) {
7833 *buffer = (char *)buf;
7834 *length = len;
7835 }
7836 else
7837 #endif /* SUPPORT_OTA_UPDATE */
7838 {
7839 if (file_path) {
7840 ret = dhd_os_get_img_fwreq(&fw, file_path);
7841 if (ret < 0) {
7842 DHD_ERROR(("dhd_os_get_img(Request Firmware API) error : %d\n",
7843 ret));
7844 goto err;
7845 } else {
7846 if ((fw->size <= 0 || fw->size > *length)) {
7847 DHD_ERROR(("fw->size = %zu, *length = %d\n", fw->size, *length));
7848 *length = fw->size;
7849 goto err;
7850 }
7851 *buffer = VMALLOCZ(dhd->osh, fw->size);
7852 if (*buffer == NULL) {
7853 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
7854 __FUNCTION__, (int)fw->size));
7855 ret = BCME_NOMEM;
7856 goto err;
7857 }
7858 *length = fw->size;
7859 ret = memcpy_s(*buffer, fw->size, fw->data, fw->size);
7860 if (ret != BCME_OK) {
7861 DHD_ERROR(("%s: memcpy_s failed, err : %d\n",
7862 __FUNCTION__, ret));
7863 goto err;
7864 }
7865 ret = BCME_OK;
7866 }
7867 }
7868 }
7869 err:
7870 if (fw) {
7871 dhd_os_close_img_fwreq(fw);
7872 }
7873 return ret;
7874 }
7875
7876 #else
7877
7878 /* Given filename and download type, returns a buffer pointer and length
7879 * for download to f/w. Type can be FW or NVRAM.
7880 *
7881 */
dhd_get_download_buffer(dhd_pub_t * dhd,char * file_path,download_type_t component,char ** buffer,int * length)7882 int dhd_get_download_buffer(dhd_pub_t *dhd, char *file_path, download_type_t component,
7883 char ** buffer, int *length)
7884
7885 {
7886 int ret = BCME_ERROR;
7887 int len = 0;
7888 int file_len;
7889 void *image = NULL;
7890 uint8 *buf = NULL;
7891
7892 /* Point to cache if available. */
7893 #ifdef CACHE_FW_IMAGES
7894 if (component == FW) {
7895 if (dhd->cached_fw_length) {
7896 len = dhd->cached_fw_length;
7897 buf = dhd->cached_fw;
7898 }
7899 } else if (component == NVRAM) {
7900 if (dhd->cached_nvram_length) {
7901 len = dhd->cached_nvram_length;
7902 buf = dhd->cached_nvram;
7903 }
7904 } else if (component == CLM_BLOB) {
7905 if (dhd->cached_clm_length) {
7906 len = dhd->cached_clm_length;
7907 buf = dhd->cached_clm;
7908 }
7909 } else if (component == TXCAP_BLOB) {
7910 if (dhd->cached_txcap_length) {
7911 len = dhd->cached_txcap_length;
7912 buf = dhd->cached_txcap;
7913 }
7914 } else {
7915 DHD_ERROR(("%s: Invalid component arg %d\n",
7916 __FUNCTION__, component));
7917 ret = BCME_BADARG;
7918 return ret;
7919 }
7920 #endif /* CACHE_FW_IMAGES */
7921 /* No Valid cache found on this call */
7922 if (!len) {
7923 file_len = *length;
7924 *length = 0;
7925
7926 if (file_path) {
7927 image = dhd_os_open_image1(dhd, file_path);
7928 if (image == NULL) {
7929 printf("%s: Open image file failed %s\n", __FUNCTION__, file_path);
7930 goto err;
7931 }
7932 }
7933
7934 buf = MALLOCZ(dhd->osh, file_len);
7935 if (buf == NULL) {
7936 DHD_ERROR(("%s: Failed to allocate memory %d bytes\n",
7937 __FUNCTION__, file_len));
7938 goto err;
7939 }
7940
7941 /* Download image */
7942 #if defined(BCMEMBEDIMAGE) && defined(DHD_EFI)
7943 if (!image) {
7944 memcpy(buf, nvram_arr, sizeof(nvram_arr));
7945 len = sizeof(nvram_arr);
7946 } else {
7947 len = dhd_os_get_image_block((char *)buf, file_len, image);
7948 if ((len <= 0 || len > file_len)) {
7949 MFREE(dhd->osh, buf, file_len);
7950 goto err;
7951 }
7952 }
7953 #else
7954 len = dhd_os_get_image_block((char *)buf, file_len, image);
7955 if ((len <= 0 || len > file_len)) {
7956 MFREE(dhd->osh, buf, file_len);
7957 goto err;
7958 }
7959 #endif /* DHD_EFI */
7960 }
7961
7962 ret = BCME_OK;
7963 *length = len;
7964 *buffer = (char *)buf;
7965
7966 /* Cache if first call. */
7967 #ifdef CACHE_FW_IMAGES
7968 if (component == FW) {
7969 if (!dhd->cached_fw_length) {
7970 dhd->cached_fw = buf;
7971 dhd->cached_fw_length = len;
7972 }
7973 } else if (component == NVRAM) {
7974 if (!dhd->cached_nvram_length) {
7975 dhd->cached_nvram = buf;
7976 dhd->cached_nvram_length = len;
7977 }
7978 } else if (component == CLM_BLOB) {
7979 if (!dhd->cached_clm_length) {
7980 dhd->cached_clm = buf;
7981 dhd->cached_clm_length = len;
7982 }
7983 } else if (component == TXCAP_BLOB) {
7984 if (!dhd->cached_txcap_length) {
7985 dhd->cached_txcap = buf;
7986 dhd->cached_txcap_length = len;
7987 }
7988 }
7989 #endif /* CACHE_FW_IMAGES */
7990
7991 err:
7992 if (image)
7993 dhd_os_close_image1(dhd, image);
7994
7995 return ret;
7996 }
7997 #endif /* DHD_LINUX_STD_FW_API */
7998
7999 int
dhd_download_2_dongle(dhd_pub_t * dhd,char * iovar,uint16 flag,uint16 dload_type,unsigned char * dload_buf,int len)8000 dhd_download_2_dongle(dhd_pub_t *dhd, char *iovar, uint16 flag, uint16 dload_type,
8001 unsigned char *dload_buf, int len)
8002 {
8003 struct wl_dload_data *dload_ptr = (struct wl_dload_data *)dload_buf;
8004 int err = 0;
8005 int dload_data_offset;
8006 static char iovar_buf[WLC_IOCTL_MEDLEN];
8007 int iovar_len;
8008
8009 memset(iovar_buf, 0, sizeof(iovar_buf));
8010
8011 dload_data_offset = OFFSETOF(wl_dload_data_t, data);
8012 dload_ptr->flag = (DLOAD_HANDLER_VER << DLOAD_FLAG_VER_SHIFT) | flag;
8013 dload_ptr->dload_type = dload_type;
8014 dload_ptr->len = htod32(len - dload_data_offset);
8015 dload_ptr->crc = 0;
8016 len = ROUNDUP(len, 8);
8017
8018 iovar_len = bcm_mkiovar(iovar, (char *)dload_buf,
8019 (uint)len, iovar_buf, sizeof(iovar_buf));
8020 if (iovar_len == 0) {
8021 DHD_ERROR(("%s: insufficient buffer space passed to bcm_mkiovar for '%s' \n",
8022 __FUNCTION__, iovar));
8023 return BCME_BUFTOOSHORT;
8024 }
8025
8026 err = dhd_wl_ioctl_cmd(dhd, WLC_SET_VAR, iovar_buf,
8027 iovar_len, IOV_SET, 0);
8028
8029 return err;
8030 }
8031
8032 int
dhd_download_blob(dhd_pub_t * dhd,unsigned char * buf,uint32 len,char * iovar)8033 dhd_download_blob(dhd_pub_t *dhd, unsigned char *buf,
8034 uint32 len, char *iovar)
8035
8036 {
8037 int chunk_len;
8038 #if (!defined(LINUX) && !defined(linux)) || defined(DHD_LINUX_STD_FW_API)
8039 int cumulative_len = 0;
8040 #endif /* !LINUX && !linux || DHD_LINUX_STD_FW_API */
8041 int size2alloc;
8042 unsigned char *new_buf;
8043 int err = 0, data_offset;
8044 uint16 dl_flag = DL_BEGIN;
8045
8046 data_offset = OFFSETOF(wl_dload_data_t, data);
8047 size2alloc = data_offset + MAX_CHUNK_LEN;
8048 size2alloc = ROUNDUP(size2alloc, 8);
8049
8050 if ((new_buf = (unsigned char *)MALLOCZ(dhd->osh, size2alloc)) != NULL) {
8051 do {
8052 #if (!defined(LINUX) && !defined(linux)) || defined(DHD_LINUX_STD_FW_API)
8053 if (len >= MAX_CHUNK_LEN)
8054 chunk_len = MAX_CHUNK_LEN;
8055 else
8056 chunk_len = len;
8057
8058 memcpy(new_buf + data_offset, buf + cumulative_len, chunk_len);
8059 cumulative_len += chunk_len;
8060 #else
8061 chunk_len = dhd_os_get_image_block((char *)(new_buf + data_offset),
8062 MAX_CHUNK_LEN, buf);
8063 if (chunk_len < 0) {
8064 DHD_ERROR(("%s: dhd_os_get_image_block failed (%d)\n",
8065 __FUNCTION__, chunk_len));
8066 err = BCME_ERROR;
8067 goto exit;
8068 }
8069 #endif /* !LINUX && !linux || DHD_LINUX_STD_FW_API */
8070 if (len - chunk_len == 0)
8071 dl_flag |= DL_END;
8072
8073 err = dhd_download_2_dongle(dhd, iovar, dl_flag, DL_TYPE_CLM,
8074 new_buf, data_offset + chunk_len);
8075
8076 dl_flag &= ~DL_BEGIN;
8077
8078 len = len - chunk_len;
8079 } while ((len > 0) && (err == 0));
8080 #if (!defined(LINUX) && !defined(linux)) || defined(DHD_LINUX_STD_FW_API)
8081 MFREE(dhd->osh, new_buf, size2alloc);
8082 #endif /* !LINUX && !linux */
8083 } else {
8084 err = BCME_NOMEM;
8085 }
8086 #if (defined(LINUX) || defined(linux)) && !defined(DHD_LINUX_STD_FW_API)
8087 exit:
8088 if (new_buf) {
8089 MFREE(dhd->osh, new_buf, size2alloc);
8090 }
8091 #endif /* LINUX || linux */
8092 return err;
8093 }
8094
8095 #if defined(CACHE_FW_IMAGES)
8096 int
dhd_download_blob_cached(dhd_pub_t * dhd,char * file_path,uint32 len,char * iovar)8097 dhd_download_blob_cached(dhd_pub_t *dhd, char *file_path,
8098 uint32 len, char *iovar)
8099 {
8100 int ret = BCME_ERROR;
8101 uint chunk_len, size2alloc, data_offset, file_offset;
8102 unsigned char *pay_load, *dnld_buf;
8103 char *memblock;
8104 uint16 dl_flag = DL_BEGIN;
8105 download_type_t dl_type;
8106
8107 data_offset = OFFSETOF(wl_dload_data_t, data);
8108 size2alloc = data_offset + MAX_CHUNK_LEN;
8109 size2alloc = ROUNDUP(size2alloc, 8);
8110 file_offset = 0;
8111
8112 if ((dnld_buf = MALLOCZ(dhd->osh, size2alloc)) == NULL) {
8113 ret = BCME_NOMEM;
8114 goto exit;
8115 }
8116 pay_load = (dnld_buf + data_offset);
8117
8118 if (!memcmp("clmload", iovar, strlen("clmload"))) {
8119 dl_type = CLM_BLOB;
8120 } else if (!memcmp("txcapload", iovar, strlen("txcapload"))) {
8121 dl_type = TXCAP_BLOB;
8122 } else {
8123 DHD_ERROR(("%s Invalid iovar :%s \n", __FUNCTION__, iovar));
8124 ret = BCME_BADARG;
8125 goto exit;
8126 }
8127
8128 ret = dhd_get_download_buffer(dhd, file_path, dl_type, &memblock, (int *)&len);
8129 if (ret != BCME_OK) {
8130 DHD_ERROR(("%s: error getting buffer for %s, %s \n", __FUNCTION__,
8131 file_path, bcmerrorstr(ret)));
8132 goto exit;
8133 }
8134
8135 do {
8136 chunk_len = MIN(len, MAX_CHUNK_LEN);
8137 memcpy(pay_load, memblock + file_offset, chunk_len);
8138 if (len - chunk_len == 0) {
8139 dl_flag |= DL_END;
8140 }
8141
8142 ret = dhd_download_2_dongle(dhd, iovar, dl_flag, DL_TYPE_CLM,
8143 dnld_buf, data_offset + chunk_len);
8144
8145 dl_flag &= ~DL_BEGIN;
8146 len = len - chunk_len;
8147 file_offset += chunk_len;
8148 } while ((len > 0) && (ret == 0));
8149
8150 exit:
8151 if (dnld_buf) {
8152 MFREE(dhd->osh, dnld_buf, size2alloc);
8153 }
8154
8155 return ret;
8156 }
8157
8158 int
dhd_apply_default_txcap(dhd_pub_t * dhd,char * path)8159 dhd_apply_default_txcap(dhd_pub_t *dhd, char *path)
8160 {
8161 int ret = BCME_ERROR;
8162 ret = dhd_download_blob_cached(dhd, path, MAX_TXCAP_BUF_SIZE, "txcapload");
8163 if (ret) {
8164 DHD_ERROR(("%s: error downloading blob: %s \n", __FUNCTION__, bcmerrorstr(ret)));
8165 }
8166 return ret;
8167 }
8168
8169 int
dhd_apply_default_clm(dhd_pub_t * dhd,char * clm_path)8170 dhd_apply_default_clm(dhd_pub_t *dhd, char *clm_path)
8171 {
8172 char *clm_blob_path;
8173 int len;
8174 unsigned char *imgbuf = NULL;
8175 int err = BCME_OK;
8176 char iovbuf[WLC_IOCTL_SMLEN];
8177 wl_country_t *cspec;
8178
8179 if (clm_path[0] != '\0') {
8180 if (strlen(clm_path) > MOD_PARAM_PATHLEN) {
8181 DHD_ERROR(("clm path exceeds max len\n"));
8182 return BCME_ERROR;
8183 }
8184 clm_blob_path = clm_path;
8185 DHD_TRACE(("clm path from module param:%s\n", clm_path));
8186 } else {
8187 clm_blob_path = VENDOR_PATH CONFIG_BCMDHD_CLM_PATH;
8188 }
8189
8190 /* If CLM blob file is found on the filesystem, download the file.
8191 * After CLM file download or If the blob file is not present,
8192 * validate the country code before proceeding with the initialization.
8193 * If country code is not valid, fail the initialization.
8194 */
8195
8196 imgbuf = dhd_os_open_image((char *)clm_blob_path);
8197 if (imgbuf == NULL) {
8198 goto exit;
8199 }
8200
8201 len = dhd_os_get_image_size(imgbuf);
8202
8203 if ((len > 0) && (len < MAX_CLM_BUF_SIZE) && imgbuf) {
8204 len = bcm_mkiovar("country", NULL, 0, iovbuf, sizeof(iovbuf));
8205 if (len == 0) {
8206 err = BCME_BUFTOOSHORT;
8207 goto exit;
8208 }
8209 err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
8210 if (err) {
8211 DHD_ERROR(("%s: country code get failed\n", __FUNCTION__));
8212 goto exit;
8213 }
8214
8215 cspec = (wl_country_t *)iovbuf;
8216 if ((strncmp(cspec->ccode, WL_CCODE_NULL_COUNTRY, WLC_CNTRY_BUF_SZ)) != 0) {
8217 DHD_ERROR(("%s: CLM already exist in F/W, "
8218 "new CLM data will be added to the end of existing CLM data!\n",
8219 __FUNCTION__));
8220 }
8221
8222 /* Found blob file. Download the file */
8223 DHD_ERROR(("clm file download from %s \n", clm_blob_path));
8224 if (imgbuf) {
8225 dhd_os_close_image(imgbuf);
8226 imgbuf = NULL;
8227 }
8228 err = dhd_download_blob_cached(dhd, clm_blob_path, MAX_CLM_BUF_SIZE, "clmload");
8229 if (err) {
8230 DHD_ERROR(("%s: CLM download failed err=%d\n", __FUNCTION__, err));
8231 if (!dhd_bus_skip_clm(dhd)) {
8232 /* Retrieve clmload_status and print */
8233 len = bcm_mkiovar("clmload_status", NULL, 0, iovbuf,
8234 sizeof(iovbuf));
8235 if (len == 0) {
8236 err = BCME_BUFTOOSHORT;
8237 goto exit;
8238 }
8239 err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf,
8240 sizeof(iovbuf), FALSE, 0);
8241 if (err) {
8242 DHD_ERROR(("%s: clmload_status get failed err=%d \n",
8243 __FUNCTION__, err));
8244 } else {
8245 DHD_ERROR(("%s: clmload_status: %d \n",
8246 __FUNCTION__, *((int *)iovbuf)));
8247 if (*((int *)iovbuf) == CHIPID_MISMATCH) {
8248 DHD_ERROR(("Chip ID mismatch error \n"));
8249 }
8250 }
8251 err = BCME_ERROR;
8252 goto exit;
8253 }
8254 } else {
8255 DHD_INFO(("%s: CLM download succeeded \n", __FUNCTION__));
8256 }
8257 } else {
8258 DHD_INFO(("Skipping the clm download. len:%d memblk:%p \n", len, imgbuf));
8259 #ifdef DHD_USE_CLMINFO_PARSER
8260 err = BCME_ERROR;
8261 goto exit;
8262 #endif /* DHD_USE_CLMINFO_PARSER */
8263 }
8264
8265 /* Verify country code */
8266 len = bcm_mkiovar("country", NULL, 0, iovbuf, sizeof(iovbuf));
8267 if (len == 0) {
8268 err = BCME_BUFTOOSHORT;
8269 goto exit;
8270 }
8271 err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
8272 if (err) {
8273 DHD_ERROR(("%s: country code get failed\n", __FUNCTION__));
8274 goto exit;
8275 }
8276
8277 cspec = (wl_country_t *)iovbuf;
8278 if ((strncmp(cspec->ccode, WL_CCODE_NULL_COUNTRY, WLC_CNTRY_BUF_SZ)) == 0) {
8279 /* Country code not initialized or CLM download not proper */
8280 DHD_ERROR(("country code not initialized\n"));
8281 err = BCME_ERROR;
8282 }
8283 exit:
8284
8285 if (imgbuf) {
8286 dhd_os_close_image(imgbuf);
8287 }
8288
8289 return err;
8290 }
8291 #else
8292
8293 int
dhd_apply_default_txcap(dhd_pub_t * dhd,char * path)8294 dhd_apply_default_txcap(dhd_pub_t *dhd, char *path)
8295 {
8296 return 0;
8297 }
8298
8299 int
dhd_check_current_clm_data(dhd_pub_t * dhd)8300 dhd_check_current_clm_data(dhd_pub_t *dhd)
8301 {
8302 char iovbuf[WLC_IOCTL_SMLEN];
8303 wl_country_t *cspec;
8304 int err = BCME_OK;
8305
8306 memset(iovbuf, 0, sizeof(iovbuf));
8307 err = bcm_mkiovar("country", NULL, 0, iovbuf, sizeof(iovbuf));
8308 if (err == 0) {
8309 err = BCME_BUFTOOSHORT;
8310 DHD_ERROR(("%s: bcm_mkiovar failed.", __FUNCTION__));
8311 return err;
8312 }
8313 err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
8314 if (err) {
8315 DHD_ERROR(("%s: country code get failed\n", __FUNCTION__));
8316 return err;
8317 }
8318 cspec = (wl_country_t *)iovbuf;
8319 if ((strncmp(cspec->ccode, WL_CCODE_NULL_COUNTRY, WLC_CNTRY_BUF_SZ)) == 0) {
8320 DHD_ERROR(("%s: ----- This FW is not included CLM data -----\n",
8321 __FUNCTION__));
8322 return FALSE;
8323 }
8324 DHD_ERROR(("%s: ----- This FW is included CLM data -----\n",
8325 __FUNCTION__));
8326 return TRUE;
8327 }
8328
8329 int
dhd_apply_default_clm(dhd_pub_t * dhd,char * clm_path)8330 dhd_apply_default_clm(dhd_pub_t *dhd, char *clm_path)
8331 {
8332 char *clm_blob_path;
8333 int len = 0, memblock_len = 0;
8334 char *memblock = NULL;
8335 int err = BCME_OK;
8336 char iovbuf[WLC_IOCTL_SMLEN];
8337 int status = FALSE;
8338
8339 if (clm_path && clm_path[0] != '\0') {
8340 if (strlen(clm_path) > MOD_PARAM_PATHLEN) {
8341 DHD_ERROR(("clm path exceeds max len\n"));
8342 return BCME_ERROR;
8343 }
8344 clm_blob_path = clm_path;
8345 DHD_TRACE(("clm path from module param:%s\n", clm_path));
8346 } else {
8347 #ifdef DHD_LINUX_STD_FW_API
8348 clm_blob_path = DHD_CLM_NAME;
8349 #else
8350 clm_blob_path = VENDOR_PATH CONFIG_BCMDHD_CLM_PATH;
8351 #endif /* DHD_LINUX_STD_FW_API */
8352 }
8353
8354 /* If CLM blob file is found on the filesystem, download the file.
8355 * After CLM file download or If the blob file is not present,
8356 * validate the country code before proceeding with the initialization.
8357 * If country code is not valid, fail the initialization.
8358 */
8359 #if (!defined(LINUX) && !defined(linux)) || defined(DHD_LINUX_STD_FW_API)
8360 len = MAX_CLM_BUF_SIZE;
8361 dhd_get_download_buffer(dhd, clm_blob_path, CLM_BLOB, &memblock, &len);
8362 #ifdef DHD_LINUX_STD_FW_API
8363 memblock_len = len;
8364 #else
8365 memblock_len = MAX_CLM_BUF_SIZE;
8366 #endif /* DHD_LINUX_STD_FW_API */
8367 #else
8368 memblock = dhd_os_open_image1(dhd, (char *)clm_blob_path);
8369 len = dhd_os_get_image_size(memblock);
8370 BCM_REFERENCE(memblock_len);
8371 #endif /* !LINUX && !linux || DHD_LINUX_STD_FW_API */
8372
8373 #if defined(LINUX) || defined(linux)
8374 if (memblock == NULL) {
8375 printf("%s: Ignore clm file %s\n", __FUNCTION__, clm_path);
8376 #if defined(DHD_BLOB_EXISTENCE_CHECK)
8377 if (dhd->is_blob) {
8378 err = BCME_ERROR;
8379 } else {
8380 status = dhd_check_current_clm_data(dhd);
8381 if (status == TRUE) {
8382 err = BCME_OK;
8383 } else {
8384 err = status;
8385 }
8386 }
8387 #endif /* DHD_BLOB_EXISTENCE_CHECK */
8388 goto exit;
8389 }
8390 #endif /* !LINUX && !linux */
8391
8392 if ((len > 0) && (len < MAX_CLM_BUF_SIZE) && memblock) {
8393 status = dhd_check_current_clm_data(dhd);
8394 if (status == TRUE) {
8395 #if defined(DHD_BLOB_EXISTENCE_CHECK)
8396 if (dhd->op_mode != DHD_FLAG_MFG_MODE) {
8397 if (dhd->is_blob) {
8398 err = BCME_ERROR;
8399 }
8400 goto exit;
8401 }
8402 #else
8403 DHD_ERROR(("%s: CLM already exist in F/W, "
8404 "new CLM data will be added to the end of existing CLM data!\n",
8405 __FUNCTION__));
8406 #endif /* DHD_BLOB_EXISTENCE_CHECK */
8407 } else if (status != FALSE) {
8408 err = status;
8409 goto exit;
8410 }
8411
8412 /* Found blob file. Download the file */
8413 DHD_TRACE(("clm file download from %s \n", clm_blob_path));
8414 err = dhd_download_blob(dhd, (unsigned char*)memblock, len, "clmload");
8415 if (err) {
8416 DHD_ERROR(("%s: CLM download failed err=%d\n", __FUNCTION__, err));
8417 /* Retrieve clmload_status and print */
8418 memset(iovbuf, 0, sizeof(iovbuf));
8419 len = bcm_mkiovar("clmload_status", NULL, 0, iovbuf, sizeof(iovbuf));
8420 if (len == 0) {
8421 err = BCME_BUFTOOSHORT;
8422 goto exit;
8423 }
8424 err = dhd_wl_ioctl_cmd(dhd, WLC_GET_VAR, iovbuf, sizeof(iovbuf), FALSE, 0);
8425 if (err) {
8426 DHD_ERROR(("%s: clmload_status get failed err=%d \n",
8427 __FUNCTION__, err));
8428 } else {
8429 DHD_ERROR(("%s: clmload_status: %d \n",
8430 __FUNCTION__, *((int *)iovbuf)));
8431 if (*((int *)iovbuf) == CHIPID_MISMATCH) {
8432 DHD_ERROR(("Chip ID mismatch error \n"));
8433 }
8434 }
8435 err = BCME_ERROR;
8436 goto exit;
8437 } else {
8438 DHD_INFO(("%s: CLM download succeeded \n", __FUNCTION__));
8439 }
8440 } else {
8441 DHD_INFO(("Skipping the clm download. len:%d memblk:%p \n", len, memblock));
8442 }
8443
8444 /* Verify country code */
8445 status = dhd_check_current_clm_data(dhd);
8446
8447 if (status != TRUE) {
8448 /* Country code not initialized or CLM download not proper */
8449 DHD_ERROR(("country code not initialized\n"));
8450 err = status;
8451 }
8452 exit:
8453
8454 if (memblock) {
8455 #if (defined(LINUX) || defined(linux)) && !defined(DHD_LINUX_STD_FW_API)
8456 dhd_os_close_image1(dhd, memblock);
8457 #else
8458 dhd_free_download_buffer(dhd, memblock, memblock_len);
8459 #endif /* LINUX || linux */
8460 }
8461
8462 return err;
8463 }
8464 #endif /* defined(CACHE_FW_IMAGES) */
8465
dhd_free_download_buffer(dhd_pub_t * dhd,void * buffer,int length)8466 void dhd_free_download_buffer(dhd_pub_t *dhd, void *buffer, int length)
8467 {
8468 #ifdef CACHE_FW_IMAGES
8469 return;
8470 #endif
8471 #if defined(DHD_LINUX_STD_FW_API)
8472 VMFREE(dhd->osh, buffer, length);
8473 #else
8474 MFREE(dhd->osh, buffer, length);
8475 #endif /* DHD_LINUX_STD_FW_API */
8476 }
8477
8478 #ifdef REPORT_FATAL_TIMEOUTS
8479 void
init_dhd_timeouts(dhd_pub_t * pub)8480 init_dhd_timeouts(dhd_pub_t *pub)
8481 {
8482 pub->timeout_info = MALLOC(pub->osh, sizeof(timeout_info_t));
8483 if (pub->timeout_info == NULL) {
8484 DHD_ERROR(("%s: Failed to alloc timeout_info\n", __FUNCTION__));
8485 } else {
8486 DHD_INFO(("Initializing dhd_timeouts\n"));
8487 pub->timeout_info->scan_timer_lock = osl_spin_lock_init(pub->osh);
8488 pub->timeout_info->join_timer_lock = osl_spin_lock_init(pub->osh);
8489 pub->timeout_info->bus_timer_lock = osl_spin_lock_init(pub->osh);
8490 pub->timeout_info->cmd_timer_lock = osl_spin_lock_init(pub->osh);
8491 pub->timeout_info->scan_timeout_val = SCAN_TIMEOUT_DEFAULT;
8492 pub->timeout_info->join_timeout_val = JOIN_TIMEOUT_DEFAULT;
8493 pub->timeout_info->cmd_timeout_val = CMD_TIMEOUT_DEFAULT;
8494 pub->timeout_info->bus_timeout_val = BUS_TIMEOUT_DEFAULT;
8495 pub->timeout_info->scan_timer_active = FALSE;
8496 pub->timeout_info->join_timer_active = FALSE;
8497 pub->timeout_info->cmd_timer_active = FALSE;
8498 pub->timeout_info->bus_timer_active = FALSE;
8499 pub->timeout_info->cmd_join_error = FALSE;
8500 pub->timeout_info->cmd_request_id = 0;
8501 OSL_ATOMIC_SET(pub->osh, &pub->set_ssid_rcvd, FALSE);
8502 }
8503 }
8504
8505 void
deinit_dhd_timeouts(dhd_pub_t * pub)8506 deinit_dhd_timeouts(dhd_pub_t *pub)
8507 {
8508 /* stop the join, scan bus, cmd timers
8509 * as failing to do so may cause a kernel panic if
8510 * an rmmod is done
8511 */
8512 if (!pub->timeout_info) {
8513 DHD_ERROR(("%s timeout_info pointer is NULL\n", __FUNCTION__));
8514 ASSERT(0);
8515 return;
8516 }
8517 if (dhd_stop_scan_timer(pub, FALSE, 0)) {
8518 DHD_ERROR(("%s dhd_stop_scan_timer failed\n", __FUNCTION__));
8519 ASSERT(0);
8520 }
8521 if (dhd_stop_bus_timer(pub)) {
8522 DHD_ERROR(("%s dhd_stop_bus_timer failed\n", __FUNCTION__));
8523 ASSERT(0);
8524 }
8525 if (dhd_stop_cmd_timer(pub)) {
8526 DHD_ERROR(("%s dhd_stop_cmd_timer failed\n", __FUNCTION__));
8527 ASSERT(0);
8528 }
8529 if (dhd_stop_join_timer(pub)) {
8530 DHD_ERROR(("%s dhd_stop_join_timer failed\n", __FUNCTION__));
8531 ASSERT(0);
8532 }
8533
8534 osl_spin_lock_deinit(pub->osh, pub->timeout_info->scan_timer_lock);
8535 osl_spin_lock_deinit(pub->osh, pub->timeout_info->join_timer_lock);
8536 osl_spin_lock_deinit(pub->osh, pub->timeout_info->bus_timer_lock);
8537 osl_spin_lock_deinit(pub->osh, pub->timeout_info->cmd_timer_lock);
8538 MFREE(pub->osh, pub->timeout_info, sizeof(timeout_info_t));
8539 }
8540
8541 static void
dhd_cmd_timeout(void * ctx)8542 dhd_cmd_timeout(void *ctx)
8543 {
8544 dhd_pub_t *pub = (dhd_pub_t *)ctx;
8545 unsigned long flags;
8546
8547 if (!pub->timeout_info) {
8548 DHD_ERROR(("DHD: timeout_info NULL\n"));
8549 ASSERT(0);
8550 return;
8551 }
8552
8553 DHD_TIMER_LOCK(pub->timeout_info->cmd_timer_lock, flags);
8554 if (pub->timeout_info && pub->timeout_info->cmd_timer_active) {
8555 DHD_ERROR(("\nERROR COMMAND TIMEOUT TO:%d\n", pub->timeout_info->cmd_timeout_val));
8556 DHD_TIMER_UNLOCK(pub->timeout_info->cmd_timer_lock, flags);
8557 #ifdef PCIE_OOB
8558 /* Assert device_wake so that UART_Rx is available */
8559 if (dhd_bus_set_device_wake(pub->bus, TRUE)) {
8560 DHD_ERROR(("%s: dhd_bus_set_device_wake() failed\n", __FUNCTION__));
8561 ASSERT(0);
8562 }
8563 #endif /* PCIE_OOB */
8564 if (dhd_stop_cmd_timer(pub)) {
8565 DHD_ERROR(("%s: dhd_stop_cmd_timer() failed\n", __FUNCTION__));
8566 ASSERT(0);
8567 }
8568 dhd_wakeup_ioctl_event(pub, IOCTL_RETURN_ON_ERROR);
8569 if (!dhd_query_bus_erros(pub))
8570 dhd_send_trap_to_fw_for_timeout(pub, DHD_REASON_COMMAND_TO);
8571 } else {
8572 DHD_TIMER_UNLOCK(pub->timeout_info->cmd_timer_lock, flags);
8573 }
8574 }
8575
8576 int
dhd_start_cmd_timer(dhd_pub_t * pub)8577 dhd_start_cmd_timer(dhd_pub_t *pub)
8578 {
8579 int ret = BCME_OK;
8580 unsigned long flags = 0;
8581 uint32 cmd_to_ms;
8582
8583 if (!pub->timeout_info) {
8584 DHD_ERROR(("DHD: timeout_info NULL\n"));
8585 ret = BCME_ERROR;
8586 ASSERT(0);
8587 goto exit_null;
8588 }
8589 DHD_TIMER_LOCK(pub->timeout_info->cmd_timer_lock, flags);
8590 cmd_to_ms = pub->timeout_info->cmd_timeout_val;
8591
8592 if (pub->timeout_info->cmd_timeout_val == 0) {
8593 /* Disable Command timer timeout */
8594 DHD_INFO(("DHD: Command Timeout Disabled\n"));
8595 goto exit;
8596 }
8597 if (pub->timeout_info->cmd_timer_active) {
8598 DHD_ERROR(("%s:Timer already active\n", __FUNCTION__));
8599 ret = BCME_ERROR;
8600 ASSERT(0);
8601 } else {
8602 pub->timeout_info->cmd_timer = osl_timer_init(pub->osh,
8603 "cmd_timer", dhd_cmd_timeout, pub);
8604 osl_timer_update(pub->osh, pub->timeout_info->cmd_timer,
8605 cmd_to_ms, 0);
8606 pub->timeout_info->cmd_timer_active = TRUE;
8607 }
8608 if (ret == BCME_OK) {
8609 DHD_INFO(("%s Cmd Timer started\n", __FUNCTION__));
8610 }
8611 exit:
8612 DHD_TIMER_UNLOCK(pub->timeout_info->cmd_timer_lock, flags);
8613 exit_null:
8614 return ret;
8615 }
8616
8617 int
dhd_stop_cmd_timer(dhd_pub_t * pub)8618 dhd_stop_cmd_timer(dhd_pub_t *pub)
8619 {
8620 int ret = BCME_OK;
8621 unsigned long flags = 0;
8622
8623 if (!pub) {
8624 DHD_ERROR(("DHD: pub NULL\n"));
8625 ASSERT(0);
8626 return BCME_ERROR;
8627 }
8628
8629 if (!pub->timeout_info) {
8630 DHD_ERROR(("DHD: timeout_info NULL\n"));
8631 ret = BCME_ERROR;
8632 ASSERT(0);
8633 goto exit;
8634 }
8635 DHD_TIMER_LOCK(pub->timeout_info->cmd_timer_lock, flags);
8636
8637 if (pub->timeout_info->cmd_timer_active) {
8638 osl_timer_del(pub->osh, pub->timeout_info->cmd_timer);
8639 pub->timeout_info->cmd_timer_active = FALSE;
8640 }
8641 else {
8642 DHD_INFO(("DHD: CMD timer is not active\n"));
8643 }
8644 if (ret == BCME_OK) {
8645 DHD_INFO(("%s Cmd Timer Stopped\n", __FUNCTION__));
8646 }
8647 DHD_TIMER_UNLOCK(pub->timeout_info->cmd_timer_lock, flags);
8648 exit:
8649 return ret;
8650 }
8651
8652 static int
__dhd_stop_join_timer(dhd_pub_t * pub)8653 __dhd_stop_join_timer(dhd_pub_t *pub)
8654 {
8655 int ret = BCME_OK;
8656 if (!pub) {
8657 DHD_ERROR(("DHD: pub NULL\n"));
8658 ASSERT(0);
8659 return BCME_ERROR;
8660 }
8661 if (!pub->timeout_info) {
8662 DHD_ERROR(("DHD: timeout_info NULL\n"));
8663 ASSERT(0);
8664 return BCME_ERROR;
8665 }
8666
8667 if (pub->timeout_info->join_timer_active) {
8668 osl_timer_del(pub->osh, pub->timeout_info->join_timer);
8669 pub->timeout_info->join_timer_active = FALSE;
8670 DHD_INFO(("%s join timer stopped\n", __FUNCTION__));
8671 } else {
8672 DHD_INFO(("%s join timer is not active\n", __FUNCTION__));
8673 }
8674
8675 return ret;
8676 }
8677
8678 static void
dhd_join_timeout(void * ctx)8679 dhd_join_timeout(void *ctx)
8680 {
8681 dhd_pub_t *pub = (dhd_pub_t *)ctx;
8682 unsigned long flags;
8683
8684 if (!pub->timeout_info) {
8685 DHD_ERROR(("%s DHD: timeout_info NULL\n", __FUNCTION__));
8686 ASSERT(0);
8687 return;
8688 }
8689
8690 DHD_TIMER_LOCK(pub->timeout_info->join_timer_lock, flags);
8691 if (pub->timeout_info->join_timer_active) {
8692 if (__dhd_stop_join_timer(pub)) {
8693 DHD_ERROR(("%s: __dhd_stop_join_timer() failed\n", __FUNCTION__));
8694 ASSERT(0);
8695 }
8696 if (pub->timeout_info->cmd_join_error) {
8697 DHD_ERROR(("\n%s ERROR JOIN TIMEOUT TO:%d:0x%x\n", __FUNCTION__,
8698 pub->timeout_info->join_timeout_val,
8699 pub->timeout_info->cmd_join_error));
8700 if (!dhd_query_bus_erros(pub)) {
8701 dhd_send_trap_to_fw_for_timeout(pub, DHD_REASON_JOIN_TO);
8702 }
8703 pub->timeout_info->cmd_join_error = 0;
8704 }
8705 }
8706 DHD_TIMER_UNLOCK(pub->timeout_info->join_timer_lock, flags);
8707 }
8708
8709 int
dhd_start_join_timer(dhd_pub_t * pub)8710 dhd_start_join_timer(dhd_pub_t *pub)
8711 {
8712 int ret = BCME_OK;
8713 unsigned long flags = 0;
8714 uint32 join_to_ms;
8715
8716 if (!pub->timeout_info) {
8717 DHD_ERROR(("%s DHD: timeout_info NULL\n", __FUNCTION__));
8718 ret = BCME_ERROR;
8719 ASSERT(0);
8720 goto exit;
8721 }
8722
8723 join_to_ms = pub->timeout_info->join_timeout_val;
8724 DHD_TIMER_LOCK(pub->timeout_info->join_timer_lock, flags);
8725 if (pub->timeout_info->join_timer_active) {
8726 DHD_ERROR(("%s: stopping active timer\n", __FUNCTION__));
8727 __dhd_stop_join_timer(pub);
8728 }
8729 if (pub->timeout_info->join_timeout_val == 0) {
8730 /* Disable Join timer timeout */
8731 DHD_INFO(("%s DHD: join timeout disabled\n", __FUNCTION__));
8732 } else {
8733 pub->timeout_info->join_timer = osl_timer_init(pub->osh,
8734 "join_timer", dhd_join_timeout, pub);
8735 osl_timer_update(pub->osh, pub->timeout_info->join_timer, join_to_ms, 0);
8736 pub->timeout_info->join_timer_active = TRUE;
8737 pub->timeout_info->cmd_join_error = 0;
8738 dhd_set_join_error(pub, WLC_SSID_MASK);
8739 if (pub->secure_join) {
8740 dhd_set_join_error(pub, WLC_WPA_MASK);
8741 }
8742 DHD_ERROR(("%s: join timer started 0x%x\n", __FUNCTION__,
8743 pub->timeout_info->cmd_join_error));
8744 }
8745 DHD_TIMER_UNLOCK(pub->timeout_info->join_timer_lock, flags);
8746 exit:
8747 return ret;
8748 }
8749
8750 int
dhd_stop_join_timer(dhd_pub_t * pub)8751 dhd_stop_join_timer(dhd_pub_t *pub)
8752 {
8753 int ret = BCME_OK;
8754 unsigned long flags;
8755
8756 if (!pub) {
8757 DHD_ERROR(("%s DHD: pub NULL\n", __FUNCTION__));
8758 ASSERT(0);
8759 return BCME_ERROR;
8760 }
8761
8762 DHD_TIMER_LOCK(pub->timeout_info->join_timer_lock, flags);
8763 ret = __dhd_stop_join_timer(pub);
8764 DHD_TIMER_UNLOCK(pub->timeout_info->join_timer_lock, flags);
8765 return ret;
8766 }
8767
8768 static void
dhd_set_join_error(dhd_pub_t * pub,uint32 mask)8769 dhd_set_join_error(dhd_pub_t *pub, uint32 mask)
8770 {
8771 DHD_INFO(("Setting join Error %d\n", mask));
8772 if (pub->timeout_info) {
8773 pub->timeout_info->cmd_join_error |= mask;
8774 }
8775 }
8776
8777 void
dhd_clear_join_error(dhd_pub_t * pub,uint32 mask)8778 dhd_clear_join_error(dhd_pub_t *pub, uint32 mask)
8779 {
8780 unsigned long flags;
8781
8782 DHD_INFO(("%s clear join error %d\n", __FUNCTION__, mask));
8783 if (!(pub->timeout_info)) {
8784 return;
8785 }
8786
8787 DHD_TIMER_LOCK(pub->timeout_info->join_timer_lock, flags);
8788 pub->timeout_info->cmd_join_error &= ~mask;
8789 /* If both WLC_SSID_MASK, WLC_WPA_MASK are received cancel the timer */
8790 if (!(pub->timeout_info->cmd_join_error)) {
8791 if (__dhd_stop_join_timer(pub)) {
8792 DHD_ERROR(("%s: dhd_stop_join_timer failed\n", __FUNCTION__));
8793 ASSERT(0);
8794 }
8795 }
8796 DHD_TIMER_UNLOCK(pub->timeout_info->join_timer_lock, flags);
8797 }
8798
8799 static void
dhd_scan_timeout(void * ctx)8800 dhd_scan_timeout(void *ctx)
8801 {
8802 dhd_pub_t *pub = (dhd_pub_t *)ctx;
8803 unsigned long flags;
8804
8805 if (!pub) {
8806 DHD_ERROR(("DHD: pub NULL\n"));
8807 ASSERT(0);
8808 return;
8809 }
8810
8811 if (pub->timeout_info == NULL) {
8812 DHD_ERROR(("timeout_info pointer is NULL\n"));
8813 ASSERT(0);
8814 return;
8815 }
8816 DHD_TIMER_LOCK(pub->timeout_info->scan_timer_lock, flags);
8817 if (pub->timeout_info->scan_timer_active) {
8818 DHD_ERROR(("\nERROR SCAN TIMEOUT TO:%d\n", pub->timeout_info->scan_timeout_val));
8819 DHD_TIMER_UNLOCK(pub->timeout_info->scan_timer_lock, flags);
8820 dhd_stop_scan_timer(pub, FALSE, 0);
8821 if (!dhd_query_bus_erros(pub))
8822 dhd_send_trap_to_fw_for_timeout(pub, DHD_REASON_SCAN_TO);
8823 } else {
8824 DHD_TIMER_UNLOCK(pub->timeout_info->scan_timer_lock, flags);
8825 }
8826 }
8827
8828 int
dhd_start_scan_timer(dhd_pub_t * pub,bool is_escan)8829 dhd_start_scan_timer(dhd_pub_t *pub, bool is_escan)
8830 {
8831 int ret = BCME_OK;
8832 unsigned long flags = 0;
8833 uint32 scan_to_ms;
8834
8835 if (!pub->timeout_info) {
8836 DHD_ERROR(("DHD: timeout_info NULL\n"));
8837 ret = BCME_ERROR;
8838 ASSERT(0);
8839 goto exit_null;
8840 }
8841 DHD_TIMER_LOCK(pub->timeout_info->scan_timer_lock, flags);
8842 scan_to_ms = pub->timeout_info->scan_timeout_val;
8843
8844 if (is_escan) {
8845 if (pub->timeout_info->escan_aborted &&
8846 pub->esync_id == pub->timeout_info->abort_syncid) {
8847 pub->timeout_info->escan_aborted = FALSE;
8848 DHD_INFO(("%s: escan already aborted, do not start timer \n",
8849 __FUNCTION__));
8850 goto exit;
8851 }
8852 pub->timeout_info->escan_syncid = pub->esync_id;
8853 } else {
8854 pub->timeout_info->escan_syncid = 0;
8855 }
8856
8857 if (pub->timeout_info->scan_timer_active) {
8858 /* cancel any earlier running timer */
8859 DHD_INFO(("%s:Timer already active, stopping it.\n", __FUNCTION__));
8860 osl_timer_del(pub->osh, pub->timeout_info->scan_timer);
8861 pub->timeout_info->scan_timer_active = FALSE;
8862 }
8863
8864 if (pub->timeout_info->scan_timeout_val == 0) {
8865 /* Disable Scan timer timeout */
8866 DHD_INFO(("DHD: Scan Timeout Disabled\n"));
8867 } else {
8868 pub->timeout_info->scan_timer = osl_timer_init(pub->osh, "scan_timer",
8869 dhd_scan_timeout, pub);
8870 pub->timeout_info->scan_timer_active = TRUE;
8871 osl_timer_update(pub->osh, pub->timeout_info->scan_timer, scan_to_ms, 0);
8872 DHD_INFO(("%s Scan Timer started\n", __FUNCTION__));
8873 }
8874
8875 exit:
8876 DHD_TIMER_UNLOCK(pub->timeout_info->scan_timer_lock, flags);
8877 exit_null:
8878 return ret;
8879 }
8880
8881 int
dhd_stop_scan_timer(dhd_pub_t * pub,bool is_escan,uint16 sync_id)8882 dhd_stop_scan_timer(dhd_pub_t *pub, bool is_escan, uint16 sync_id)
8883 {
8884 int ret = BCME_OK;
8885 unsigned long flags = 0;
8886
8887 if (!pub) {
8888 DHD_ERROR(("DHD: pub NULL\n"));
8889 ASSERT(0);
8890 return BCME_ERROR;
8891 }
8892
8893 if (!pub->timeout_info) {
8894 DHD_ERROR(("DHD: timeout_info NULL\n"));
8895 ret = BCME_ERROR;
8896 ASSERT(0);
8897 goto exit_null;
8898 }
8899
8900 DHD_TIMER_LOCK(pub->timeout_info->scan_timer_lock, flags);
8901
8902 if (pub->timeout_info->scan_timer_active) {
8903 if (is_escan) {
8904 if (sync_id == pub->timeout_info->escan_syncid) {
8905 osl_timer_del(pub->osh, pub->timeout_info->scan_timer);
8906 pub->timeout_info->scan_timer_active = FALSE;
8907 DHD_INFO(("%s Scan Timer Stopped\n", __FUNCTION__));
8908 }
8909 } else {
8910 osl_timer_del(pub->osh, pub->timeout_info->scan_timer);
8911 pub->timeout_info->scan_timer_active = FALSE;
8912 DHD_INFO(("%s Scan Timer Stopped\n", __FUNCTION__));
8913 }
8914
8915 } else {
8916 DHD_INFO(("DHD: SCAN timer is not active\n"));
8917 }
8918
8919 DHD_TIMER_UNLOCK(pub->timeout_info->scan_timer_lock, flags);
8920
8921 exit_null:
8922 return ret;
8923 }
8924
8925 static void
dhd_bus_timeout(void * ctx)8926 dhd_bus_timeout(void *ctx)
8927 {
8928 dhd_pub_t *pub = (dhd_pub_t *)ctx;
8929 unsigned long flags;
8930
8931 if (pub->timeout_info == NULL) {
8932 DHD_ERROR(("timeout_info pointer is NULL\n"));
8933 ASSERT(0);
8934 return;
8935 }
8936
8937 DHD_TIMER_LOCK(pub->timeout_info->bus_timer_lock, flags);
8938 if (pub->timeout_info && pub->timeout_info->bus_timer_active) {
8939 DHD_ERROR(("\nERROR BUS TIMEOUT TO:%d\n", pub->timeout_info->bus_timeout_val));
8940 DHD_TIMER_UNLOCK(pub->timeout_info->bus_timer_lock, flags);
8941 #ifdef PCIE_OOB
8942 /* Assert device_wake so that UART_Rx is available */
8943 if (dhd_bus_set_device_wake(pub->bus, TRUE)) {
8944 DHD_ERROR(("%s: dhd_bus_set_device_wake() failed\n", __FUNCTION__));
8945 ASSERT(0);
8946 }
8947 #endif /* PCIE_OOB */
8948 if (dhd_stop_bus_timer(pub)) {
8949 DHD_ERROR(("%s: dhd_stop_bus_timer() failed\n", __FUNCTION__));
8950 ASSERT(0);
8951 }
8952 if (!dhd_query_bus_erros(pub)) {
8953 dhd_send_trap_to_fw_for_timeout(pub, DHD_REASON_OQS_TO);
8954 }
8955 #ifdef BCMPCIE
8956 dhd_msgbuf_iovar_timeout_dump(pub);
8957 #endif /* BCMPCIE */
8958 } else {
8959 DHD_TIMER_UNLOCK(pub->timeout_info->bus_timer_lock, flags);
8960 }
8961 }
8962
8963 int
dhd_start_bus_timer(dhd_pub_t * pub)8964 dhd_start_bus_timer(dhd_pub_t *pub)
8965 {
8966 int ret = BCME_OK;
8967 unsigned long flags = 0;
8968 uint32 bus_to_ms;
8969
8970 if (!pub->timeout_info) {
8971 DHD_ERROR(("DHD: timeout_info NULL\n"));
8972 ret = BCME_ERROR;
8973 ASSERT(0);
8974 goto exit_null;
8975 }
8976 DHD_TIMER_LOCK(pub->timeout_info->bus_timer_lock, flags);
8977 bus_to_ms = pub->timeout_info->bus_timeout_val;
8978
8979 if (pub->timeout_info->bus_timeout_val == 0) {
8980 /* Disable Bus timer timeout */
8981 DHD_INFO(("DHD: Bus Timeout Disabled\n"));
8982 goto exit;
8983 }
8984 if (pub->timeout_info->bus_timer_active) {
8985 DHD_ERROR(("%s:Timer already active\n", __FUNCTION__));
8986 ret = BCME_ERROR;
8987 ASSERT(0);
8988 } else {
8989 pub->timeout_info->bus_timer = osl_timer_init(pub->osh,
8990 "bus_timer", dhd_bus_timeout, pub);
8991 pub->timeout_info->bus_timer_active = TRUE;
8992 osl_timer_update(pub->osh, pub->timeout_info->bus_timer, bus_to_ms, 0);
8993 }
8994 if (ret == BCME_OK) {
8995 DHD_INFO(("%s: BUS Timer started\n", __FUNCTION__));
8996 }
8997 exit:
8998 DHD_TIMER_UNLOCK(pub->timeout_info->bus_timer_lock, flags);
8999 exit_null:
9000 return ret;
9001 }
9002
9003 int
dhd_stop_bus_timer(dhd_pub_t * pub)9004 dhd_stop_bus_timer(dhd_pub_t *pub)
9005 {
9006 int ret = BCME_OK;
9007 unsigned long flags;
9008
9009 if (!pub) {
9010 DHD_ERROR(("DHD: pub NULL\n"));
9011 ASSERT(0);
9012 return BCME_ERROR;
9013 }
9014
9015 if (!pub->timeout_info) {
9016 DHD_ERROR(("DHD: timeout_info NULL\n"));
9017 ret = BCME_ERROR;
9018 ASSERT(0);
9019 goto exit;
9020 }
9021
9022 DHD_TIMER_LOCK(pub->timeout_info->bus_timer_lock, flags);
9023
9024 if (pub->timeout_info->bus_timer_active) {
9025 osl_timer_del(pub->osh, pub->timeout_info->bus_timer);
9026 pub->timeout_info->bus_timer_active = FALSE;
9027 }
9028 else {
9029 DHD_INFO(("DHD: BUS timer is not active\n"));
9030 }
9031 if (ret == BCME_OK) {
9032 DHD_INFO(("%s: Bus Timer Stopped\n", __FUNCTION__));
9033 }
9034 DHD_TIMER_UNLOCK(pub->timeout_info->bus_timer_lock, flags);
9035 exit:
9036 return ret;
9037 }
9038
9039 int
dhd_set_request_id(dhd_pub_t * pub,uint16 id,uint32 cmd)9040 dhd_set_request_id(dhd_pub_t *pub, uint16 id, uint32 cmd)
9041 {
9042 DHD_INFO(("%s: id:%d\n", __FUNCTION__, id));
9043 if (pub->timeout_info) {
9044 pub->timeout_info->cmd_request_id = id;
9045 pub->timeout_info->cmd = cmd;
9046 return BCME_OK;
9047 } else {
9048 return BCME_ERROR;
9049 }
9050 }
9051
9052 uint16
dhd_get_request_id(dhd_pub_t * pub)9053 dhd_get_request_id(dhd_pub_t *pub)
9054 {
9055 if (pub->timeout_info) {
9056 return (pub->timeout_info->cmd_request_id);
9057 } else {
9058 return 0;
9059 }
9060 }
9061
9062 void
dhd_get_scan_to_val(dhd_pub_t * pub,uint32 * to_val)9063 dhd_get_scan_to_val(dhd_pub_t *pub, uint32 *to_val)
9064 {
9065 if (pub->timeout_info) {
9066 *to_val = pub->timeout_info->scan_timeout_val;
9067 } else {
9068 *to_val = 0;
9069 }
9070 }
9071
9072 void
dhd_set_scan_to_val(dhd_pub_t * pub,uint32 to_val)9073 dhd_set_scan_to_val(dhd_pub_t *pub, uint32 to_val)
9074 {
9075 if (pub->timeout_info) {
9076 DHD_INFO(("Setting scan TO val:%d\n", to_val));
9077 pub->timeout_info->scan_timeout_val = to_val;
9078 }
9079 }
9080
9081 void
dhd_get_join_to_val(dhd_pub_t * pub,uint32 * to_val)9082 dhd_get_join_to_val(dhd_pub_t *pub, uint32 *to_val)
9083 {
9084 if (pub->timeout_info) {
9085 *to_val = pub->timeout_info->join_timeout_val;
9086 } else {
9087 *to_val = 0;
9088 }
9089 }
9090
9091 void
dhd_set_join_to_val(dhd_pub_t * pub,uint32 to_val)9092 dhd_set_join_to_val(dhd_pub_t *pub, uint32 to_val)
9093 {
9094 if (pub->timeout_info) {
9095 DHD_INFO(("Setting join TO val:%d\n", to_val));
9096 pub->timeout_info->join_timeout_val = to_val;
9097 }
9098 }
9099
9100 void
dhd_get_cmd_to_val(dhd_pub_t * pub,uint32 * to_val)9101 dhd_get_cmd_to_val(dhd_pub_t *pub, uint32 *to_val)
9102 {
9103 if (pub->timeout_info) {
9104 *to_val = pub->timeout_info->cmd_timeout_val;
9105 } else {
9106 *to_val = 0;
9107 }
9108 }
9109
9110 void
dhd_set_cmd_to_val(dhd_pub_t * pub,uint32 to_val)9111 dhd_set_cmd_to_val(dhd_pub_t *pub, uint32 to_val)
9112 {
9113 if (pub->timeout_info) {
9114 DHD_INFO(("Setting cmd TO val:%d\n", to_val));
9115 pub->timeout_info->cmd_timeout_val = to_val;
9116 }
9117 }
9118
9119 void
dhd_get_bus_to_val(dhd_pub_t * pub,uint32 * to_val)9120 dhd_get_bus_to_val(dhd_pub_t *pub, uint32 *to_val)
9121 {
9122 if (pub->timeout_info) {
9123 *to_val = pub->timeout_info->bus_timeout_val;
9124 } else {
9125 *to_val = 0;
9126 }
9127 }
9128
9129 void
dhd_set_bus_to_val(dhd_pub_t * pub,uint32 to_val)9130 dhd_set_bus_to_val(dhd_pub_t *pub, uint32 to_val)
9131 {
9132 if (pub->timeout_info) {
9133 DHD_INFO(("Setting bus TO val:%d\n", to_val));
9134 pub->timeout_info->bus_timeout_val = to_val;
9135 }
9136 }
9137 #endif /* REPORT_FATAL_TIMEOUTS */
9138
9139 #ifdef SHOW_LOGTRACE
9140 int
dhd_parse_logstrs_file(osl_t * osh,char * raw_fmts,int logstrs_size,dhd_event_log_t * event_log)9141 dhd_parse_logstrs_file(osl_t *osh, char *raw_fmts, int logstrs_size,
9142 dhd_event_log_t *event_log)
9143 {
9144 uint32 *lognums = NULL;
9145 char *logstrs = NULL;
9146 logstr_trailer_t *trailer = NULL;
9147 int ram_index = 0;
9148 char **fmts = NULL;
9149 int num_fmts = 0;
9150 bool match_fail = TRUE;
9151 int32 i = 0;
9152 uint8 *pfw_id = NULL;
9153 uint32 fwid = 0;
9154 #ifdef DHD_LINUX_STD_FW_API
9155 int err = 0;
9156 const struct firmware *fw = NULL;
9157 #else
9158 void *file = NULL;
9159 int file_len = 0;
9160 #endif /* DHD_LINUX_STD_FW_API */
9161 char fwid_str[FWID_STR_LEN];
9162 uint32 hdr_logstrs_size = 0;
9163
9164 /* Read last three words in the logstrs.bin file */
9165 trailer = (logstr_trailer_t *) (raw_fmts + logstrs_size -
9166 sizeof(logstr_trailer_t));
9167
9168 if (trailer->log_magic == LOGSTRS_MAGIC) {
9169 /*
9170 * logstrs.bin has a header.
9171 */
9172 if (trailer->version == 1) {
9173 logstr_header_v1_t *hdr_v1 = (logstr_header_v1_t *) (raw_fmts +
9174 logstrs_size - sizeof(logstr_header_v1_t));
9175 DHD_INFO(("%s: logstr header version = %u\n",
9176 __FUNCTION__, hdr_v1->version));
9177 num_fmts = hdr_v1->rom_logstrs_offset / sizeof(uint32);
9178 ram_index = (hdr_v1->ram_lognums_offset -
9179 hdr_v1->rom_lognums_offset) / sizeof(uint32);
9180 lognums = (uint32 *) &raw_fmts[hdr_v1->rom_lognums_offset];
9181 logstrs = (char *) &raw_fmts[hdr_v1->rom_logstrs_offset];
9182 hdr_logstrs_size = hdr_v1->logstrs_size;
9183 } else if (trailer->version == 2) {
9184 logstr_header_t *hdr = (logstr_header_t *) (raw_fmts + logstrs_size -
9185 sizeof(logstr_header_t));
9186 DHD_INFO(("%s: logstr header version = %u; flags = %x\n",
9187 __FUNCTION__, hdr->version, hdr->flags));
9188
9189 /* For ver. 2 of the header, need to match fwid of
9190 * both logstrs.bin and fw bin
9191 */
9192
9193 #ifdef DHD_LINUX_STD_FW_API
9194 err = dhd_os_get_img_fwreq(&fw, st_str_file_path);
9195 if (err < 0) {
9196 DHD_ERROR(("dhd_os_get_img(Request Firmware API) error : %d\n",
9197 err));
9198 goto error;
9199 }
9200 memset(fwid_str, 0, sizeof(fwid_str));
9201 err = memcpy_s(fwid_str, (sizeof(fwid_str) - 1),
9202 &(fw->data[fw->size - (sizeof(fwid_str) - 1)]),
9203 (sizeof(fwid_str) - 1));
9204 if (err) {
9205 DHD_ERROR(("%s: failed to copy raw_fmts, err=%d\n",
9206 __FUNCTION__, err));
9207 goto error;
9208 }
9209 #else
9210 /* read the FWID from fw bin */
9211 file = dhd_os_open_image1(NULL, st_str_file_path);
9212 if (!file) {
9213 DHD_ERROR(("%s: cannot open fw file !\n", __FUNCTION__));
9214 goto error;
9215 }
9216 file_len = dhd_os_get_image_size(file);
9217 if (file_len <= 0) {
9218 DHD_ERROR(("%s: bad fw file length !\n", __FUNCTION__));
9219 goto error;
9220 }
9221 /* fwid is at the end of fw bin in string format */
9222 if (dhd_os_seek_file(file, file_len - (sizeof(fwid_str) - 1)) < 0) {
9223 DHD_ERROR(("%s: can't seek file \n", __FUNCTION__));
9224 goto error;
9225 }
9226
9227 memset(fwid_str, 0, sizeof(fwid_str));
9228 if (dhd_os_get_image_block(fwid_str, sizeof(fwid_str) - 1, file) <= 0) {
9229 DHD_ERROR(("%s: read fw file failed !\n", __FUNCTION__));
9230 goto error;
9231 }
9232 #endif /* DHD_LINUX_STD_FW_API */
9233 pfw_id = (uint8 *)bcmstrnstr(fwid_str, sizeof(fwid_str) - 1,
9234 FWID_STR_1, strlen(FWID_STR_1));
9235 if (!pfw_id) {
9236 pfw_id = (uint8 *)bcmstrnstr(fwid_str, sizeof(fwid_str) - 1,
9237 FWID_STR_2, strlen(FWID_STR_2));
9238 if (!pfw_id) {
9239 DHD_ERROR(("%s: could not find id in FW bin!\n",
9240 __FUNCTION__));
9241 goto error;
9242 }
9243 }
9244 /* search for the '-' in the fw id str, after which the
9245 * actual 4 byte fw id is present
9246 */
9247 while (pfw_id && *pfw_id != '-') {
9248 ++pfw_id;
9249 }
9250 ++pfw_id;
9251 fwid = bcm_strtoul((char *)pfw_id, NULL, 16);
9252
9253 /* check if fw id in logstrs.bin matches the fw one */
9254 if (hdr->fw_id != fwid) {
9255 DHD_ERROR(("%s: logstr id does not match FW!"
9256 "logstrs_fwid:0x%x, rtecdc_fwid:0x%x\n",
9257 __FUNCTION__, hdr->fw_id, fwid));
9258 goto error;
9259 }
9260
9261 match_fail = FALSE;
9262 num_fmts = hdr->rom_logstrs_offset / sizeof(uint32);
9263 ram_index = (hdr->ram_lognums_offset -
9264 hdr->rom_lognums_offset) / sizeof(uint32);
9265 lognums = (uint32 *) &raw_fmts[hdr->rom_lognums_offset];
9266 logstrs = (char *) &raw_fmts[hdr->rom_logstrs_offset];
9267 hdr_logstrs_size = hdr->logstrs_size;
9268
9269 error:
9270 #ifdef DHD_LINUX_STD_FW_API
9271 if (fw) {
9272 dhd_os_close_img_fwreq(fw);
9273 }
9274 #else
9275 if (file) {
9276 dhd_os_close_image1(NULL, file);
9277 }
9278 #endif /* DHD_LINUX_STD_FW_API */
9279 if (match_fail) {
9280 return BCME_DECERR;
9281 }
9282 } else {
9283 DHD_ERROR(("%s: Invalid logstr version %u\n", __FUNCTION__,
9284 trailer->version));
9285 return BCME_ERROR;
9286 }
9287 if (logstrs_size != hdr_logstrs_size) {
9288 DHD_ERROR(("%s: bad logstrs_size %d\n", __FUNCTION__, hdr_logstrs_size));
9289 return BCME_ERROR;
9290 }
9291 } else {
9292 /*
9293 * Legacy logstrs.bin format without header.
9294 */
9295 num_fmts = *((uint32 *) (raw_fmts)) / sizeof(uint32);
9296
9297 /* Legacy RAM-only logstrs.bin format:
9298 * - RAM 'lognums' section
9299 * - RAM 'logstrs' section.
9300 *
9301 * 'lognums' is an array of indexes for the strings in the
9302 * 'logstrs' section. The first uint32 is an index to the
9303 * start of 'logstrs'. Therefore, if this index is divided
9304 * by 'sizeof(uint32)' it provides the number of logstr
9305 * entries.
9306 */
9307 ram_index = 0;
9308 lognums = (uint32 *) raw_fmts;
9309 logstrs = (char *) &raw_fmts[num_fmts << 2];
9310 }
9311 if (num_fmts) {
9312 if (event_log->fmts != NULL) {
9313 fmts = event_log->fmts; /* reuse existing malloced fmts */
9314 } else {
9315 fmts = MALLOC(osh, num_fmts * sizeof(char *));
9316 }
9317 }
9318 if (fmts == NULL) {
9319 DHD_ERROR(("%s: Failed to allocate fmts memory\n", __FUNCTION__));
9320 return BCME_ERROR;
9321 }
9322 event_log->fmts_size = num_fmts * sizeof(char *);
9323
9324 for (i = 0; i < num_fmts; i++) {
9325 /* ROM lognums index into logstrs using 'rom_logstrs_offset' as a base
9326 * (they are 0-indexed relative to 'rom_logstrs_offset').
9327 *
9328 * RAM lognums are already indexed to point to the correct RAM logstrs (they
9329 * are 0-indexed relative to the start of the logstrs.bin file).
9330 */
9331 if (i == ram_index) {
9332 logstrs = raw_fmts;
9333 }
9334 fmts[i] = &logstrs[lognums[i]];
9335 }
9336 event_log->fmts = fmts;
9337 event_log->raw_fmts_size = logstrs_size;
9338 event_log->raw_fmts = raw_fmts;
9339 event_log->num_fmts = num_fmts;
9340 return BCME_OK;
9341 } /* dhd_parse_logstrs_file */
9342
9343 #ifdef DHD_LINUX_STD_FW_API
dhd_parse_map_file(osl_t * osh,void * ptr,uint32 * ramstart,uint32 * rodata_start,uint32 * rodata_end)9344 int dhd_parse_map_file(osl_t *osh, void *ptr, uint32 *ramstart, uint32 *rodata_start,
9345 uint32 *rodata_end)
9346 {
9347 char *raw_fmts = NULL, *raw_fmts_loc = NULL;
9348 uint32 read_size = READ_NUM_BYTES, offset = 0;
9349 int error = 0;
9350 char * cptr = NULL;
9351 char c;
9352 uint8 count = 0;
9353 uint32 size = 0;
9354
9355 *ramstart = 0;
9356 *rodata_start = 0;
9357 *rodata_end = 0;
9358 size = (uint32)(((struct firmware *)ptr)->size);
9359
9360 /* Allocate 1 byte more than read_size to terminate it with NULL */
9361 raw_fmts = MALLOCZ(osh, read_size + 1);
9362 if (raw_fmts == NULL) {
9363 DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
9364 goto fail;
9365 }
9366
9367 /* read ram start, rodata_start and rodata_end values from map file */
9368 while (count != ALL_MAP_VAL)
9369 {
9370 /* Bound check for size before doing memcpy() */
9371 if ((offset + read_size) > size) {
9372 read_size = size - offset;
9373 }
9374
9375 error = memcpy_s(raw_fmts, read_size,
9376 (((char *)((struct firmware *)ptr)->data) + offset), read_size);
9377 if (error) {
9378 DHD_ERROR(("%s: failed to copy raw_fmts, err=%d\n",
9379 __FUNCTION__, error));
9380 goto fail;
9381 }
9382 /* End raw_fmts with NULL as strstr expects NULL terminated strings */
9383 raw_fmts[read_size] = '\0';
9384
9385 /* Get ramstart address */
9386 raw_fmts_loc = raw_fmts;
9387 if (!(count & RAMSTART_BIT) &&
9388 (cptr = bcmstrnstr(raw_fmts_loc, read_size, ramstart_str,
9389 strlen(ramstart_str)))) {
9390 cptr = cptr - BYTES_AHEAD_NUM;
9391 sscanf(cptr, "%x %c text_start", ramstart, &c);
9392 count |= RAMSTART_BIT;
9393 }
9394
9395 /* Get ram rodata start address */
9396 raw_fmts_loc = raw_fmts;
9397 if (!(count & RDSTART_BIT) &&
9398 (cptr = bcmstrnstr(raw_fmts_loc, read_size, rodata_start_str,
9399 strlen(rodata_start_str)))) {
9400 cptr = cptr - BYTES_AHEAD_NUM;
9401 sscanf(cptr, "%x %c rodata_start", rodata_start, &c);
9402 count |= RDSTART_BIT;
9403 }
9404
9405 /* Get ram rodata end address */
9406 raw_fmts_loc = raw_fmts;
9407 if (!(count & RDEND_BIT) &&
9408 (cptr = bcmstrnstr(raw_fmts_loc, read_size, rodata_end_str,
9409 strlen(rodata_end_str)))) {
9410 cptr = cptr - BYTES_AHEAD_NUM;
9411 sscanf(cptr, "%x %c rodata_end", rodata_end, &c);
9412 count |= RDEND_BIT;
9413 }
9414
9415 if ((offset + read_size) >= size) {
9416 break;
9417 }
9418
9419 memset(raw_fmts, 0, read_size);
9420 offset += (read_size - GO_BACK_FILE_POS_NUM_BYTES);
9421 }
9422
9423 fail:
9424 if (raw_fmts) {
9425 MFREE(osh, raw_fmts, read_size + 1);
9426 raw_fmts = NULL;
9427 }
9428 if (count == ALL_MAP_VAL) {
9429 return BCME_OK;
9430 }
9431 else {
9432 DHD_ERROR(("%s: readmap error 0X%x \n", __FUNCTION__,
9433 count));
9434 return BCME_ERROR;
9435 }
9436 } /* dhd_parse_map_file */
9437 #else
dhd_parse_map_file(osl_t * osh,void * file,uint32 * ramstart,uint32 * rodata_start,uint32 * rodata_end)9438 int dhd_parse_map_file(osl_t *osh, void *file, uint32 *ramstart, uint32 *rodata_start,
9439 uint32 *rodata_end)
9440 {
9441 char *raw_fmts = NULL, *raw_fmts_loc = NULL;
9442 uint32 read_size = READ_NUM_BYTES;
9443 int error = 0;
9444 char * cptr = NULL;
9445 char c;
9446 uint8 count = 0;
9447
9448 *ramstart = 0;
9449 *rodata_start = 0;
9450 *rodata_end = 0;
9451
9452 /* Allocate 1 byte more than read_size to terminate it with NULL */
9453 raw_fmts = MALLOCZ(osh, read_size + 1);
9454 if (raw_fmts == NULL) {
9455 DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
9456 goto fail;
9457 }
9458
9459 /* read ram start, rodata_start and rodata_end values from map file */
9460 while (count != ALL_MAP_VAL)
9461 {
9462 error = dhd_os_read_file(file, raw_fmts, read_size);
9463 if (error < 0) {
9464 DHD_ERROR(("%s: map file read failed err:%d \n", __FUNCTION__,
9465 error));
9466 goto fail;
9467 }
9468
9469 /* End raw_fmts with NULL as strstr expects NULL terminated strings */
9470 raw_fmts[read_size] = '\0';
9471
9472 /* Get ramstart address */
9473 raw_fmts_loc = raw_fmts;
9474 if (!(count & RAMSTART_BIT) &&
9475 (cptr = bcmstrnstr(raw_fmts_loc, read_size, ramstart_str,
9476 strlen(ramstart_str)))) {
9477 cptr = cptr - BYTES_AHEAD_NUM;
9478 sscanf(cptr, "%x %c text_start", ramstart, &c);
9479 count |= RAMSTART_BIT;
9480 }
9481
9482 /* Get ram rodata start address */
9483 raw_fmts_loc = raw_fmts;
9484 if (!(count & RDSTART_BIT) &&
9485 (cptr = bcmstrnstr(raw_fmts_loc, read_size, rodata_start_str,
9486 strlen(rodata_start_str)))) {
9487 cptr = cptr - BYTES_AHEAD_NUM;
9488 sscanf(cptr, "%x %c rodata_start", rodata_start, &c);
9489 count |= RDSTART_BIT;
9490 }
9491
9492 /* Get ram rodata end address */
9493 raw_fmts_loc = raw_fmts;
9494 if (!(count & RDEND_BIT) &&
9495 (cptr = bcmstrnstr(raw_fmts_loc, read_size, rodata_end_str,
9496 strlen(rodata_end_str)))) {
9497 cptr = cptr - BYTES_AHEAD_NUM;
9498 sscanf(cptr, "%x %c rodata_end", rodata_end, &c);
9499 count |= RDEND_BIT;
9500 }
9501
9502 if (error < (int)read_size) {
9503 /*
9504 * since we reset file pos back to earlier pos by
9505 * GO_BACK_FILE_POS_NUM_BYTES bytes we won't reach EOF.
9506 * The reason for this is if string is spreaded across
9507 * bytes, the read function should not miss it.
9508 * So if ret value is less than read_size, reached EOF don't read further
9509 */
9510 break;
9511 }
9512 memset(raw_fmts, 0, read_size);
9513 /*
9514 * go back to predefined NUM of bytes so that we won't miss
9515 * the string and addr even if it comes as splited in next read.
9516 */
9517 dhd_os_seek_file(file, -GO_BACK_FILE_POS_NUM_BYTES);
9518 }
9519
9520 fail:
9521 if (raw_fmts) {
9522 MFREE(osh, raw_fmts, read_size + 1);
9523 raw_fmts = NULL;
9524 }
9525 if (count == ALL_MAP_VAL) {
9526 return BCME_OK;
9527 }
9528 else {
9529 DHD_ERROR(("%s: readmap error 0X%x \n", __FUNCTION__,
9530 count));
9531 return BCME_ERROR;
9532 }
9533
9534 } /* dhd_parse_map_file */
9535 #endif /* DHD_LINUX_STD_FW_API */
9536
9537 #ifdef PCIE_FULL_DONGLE
9538 int
dhd_event_logtrace_infobuf_pkt_process(dhd_pub_t * dhdp,void * pktbuf,dhd_event_log_t * event_data)9539 dhd_event_logtrace_infobuf_pkt_process(dhd_pub_t *dhdp, void *pktbuf,
9540 dhd_event_log_t *event_data)
9541 {
9542 uint32 infobuf_version;
9543 info_buf_payload_hdr_t *payload_hdr_ptr;
9544 uint16 payload_hdr_type;
9545 uint16 payload_hdr_length;
9546
9547 DHD_TRACE(("%s:Enter\n", __FUNCTION__));
9548
9549 if (PKTLEN(dhdp->osh, pktbuf) < sizeof(uint32)) {
9550 DHD_ERROR(("%s: infobuf too small for version field\n",
9551 __FUNCTION__));
9552 goto exit;
9553 }
9554 infobuf_version = *((uint32 *)PKTDATA(dhdp->osh, pktbuf));
9555 PKTPULL(dhdp->osh, pktbuf, sizeof(uint32));
9556 if (infobuf_version != PCIE_INFOBUF_V1) {
9557 DHD_ERROR(("%s: infobuf version %d is not PCIE_INFOBUF_V1\n",
9558 __FUNCTION__, infobuf_version));
9559 goto exit;
9560 }
9561
9562 /* Version 1 infobuf has a single type/length (and then value) field */
9563 if (PKTLEN(dhdp->osh, pktbuf) < sizeof(info_buf_payload_hdr_t)) {
9564 DHD_ERROR(("%s: infobuf too small for v1 type/length fields\n",
9565 __FUNCTION__));
9566 goto exit;
9567 }
9568 /* Process/parse the common info payload header (type/length) */
9569 payload_hdr_ptr = (info_buf_payload_hdr_t *)PKTDATA(dhdp->osh, pktbuf);
9570 payload_hdr_type = ltoh16(payload_hdr_ptr->type);
9571 payload_hdr_length = ltoh16(payload_hdr_ptr->length);
9572 if (payload_hdr_type != PCIE_INFOBUF_V1_TYPE_LOGTRACE) {
9573 DHD_ERROR(("%s: payload_hdr_type %d is not V1_TYPE_LOGTRACE\n",
9574 __FUNCTION__, payload_hdr_type));
9575 goto exit;
9576 }
9577 PKTPULL(dhdp->osh, pktbuf, sizeof(info_buf_payload_hdr_t));
9578
9579 /* Validate that the specified length isn't bigger than the
9580 * provided data.
9581 */
9582 if (payload_hdr_length > PKTLEN(dhdp->osh, pktbuf)) {
9583 DHD_ERROR(("%s: infobuf logtrace length is bigger"
9584 " than actual buffer data\n", __FUNCTION__));
9585 goto exit;
9586 }
9587 dhd_dbg_trace_evnt_handler(dhdp, PKTDATA(dhdp->osh, pktbuf),
9588 event_data, payload_hdr_length);
9589
9590 return BCME_OK;
9591
9592 exit:
9593 return BCME_ERROR;
9594 } /* dhd_event_logtrace_infobuf_pkt_process */
9595 #endif /* PCIE_FULL_DONGLE */
9596 #endif /* SHOW_LOGTRACE */
9597
9598 #ifdef BTLOG
9599 int
dhd_bt_log_pkt_process(dhd_pub_t * dhdp,void * pktbuf)9600 dhd_bt_log_pkt_process(dhd_pub_t *dhdp, void *pktbuf)
9601 {
9602 DHD_TRACE(("%s:Enter\n", __FUNCTION__));
9603
9604 dhd_dbg_bt_log_handler(dhdp,
9605 PKTDATA(dhdp->osh, pktbuf), PKTLEN(dhdp->osh, pktbuf));
9606
9607 return BCME_OK;
9608 }
9609 #endif /* BTLOG */
9610
9611 #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
9612
9613 /* To handle the TDLS event in the dhd_common.c
9614 */
dhd_tdls_event_handler(dhd_pub_t * dhd_pub,wl_event_msg_t * event)9615 int dhd_tdls_event_handler(dhd_pub_t *dhd_pub, wl_event_msg_t *event)
9616 {
9617 int ret = BCME_OK;
9618
9619 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST()
9620 ret = dhd_tdls_update_peer_info(dhd_pub, event);
9621 GCC_DIAGNOSTIC_POP()
9622
9623 return ret;
9624 }
9625
dhd_free_tdls_peer_list(dhd_pub_t * dhd_pub)9626 int dhd_free_tdls_peer_list(dhd_pub_t *dhd_pub)
9627 {
9628 tdls_peer_node_t *cur = NULL, *prev = NULL;
9629 if (!dhd_pub)
9630 return BCME_ERROR;
9631 cur = dhd_pub->peer_tbl.node;
9632
9633 if ((dhd_pub->peer_tbl.node == NULL) && !dhd_pub->peer_tbl.tdls_peer_count)
9634 return BCME_ERROR;
9635
9636 while (cur != NULL) {
9637 prev = cur;
9638 cur = cur->next;
9639 MFREE(dhd_pub->osh, prev, sizeof(tdls_peer_node_t));
9640 }
9641 dhd_pub->peer_tbl.tdls_peer_count = 0;
9642 dhd_pub->peer_tbl.node = NULL;
9643 return BCME_OK;
9644 }
9645 #endif /* #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE) */
9646
9647 /* pretty hex print a contiguous buffer
9648 * based on the debug level specified
9649 */
9650 void
dhd_prhex(const char * msg,volatile uchar * buf,uint nbytes,uint8 dbg_level)9651 dhd_prhex(const char *msg, volatile uchar *buf, uint nbytes, uint8 dbg_level)
9652 {
9653 char line[128], *p;
9654 int len = sizeof(line);
9655 int nchar;
9656 uint i;
9657
9658 if (msg && (msg[0] != '\0')) {
9659 if (dbg_level == DHD_ERROR_VAL)
9660 DHD_ERROR(("%s:\n", msg));
9661 else if (dbg_level == DHD_INFO_VAL)
9662 DHD_INFO(("%s:\n", msg));
9663 else if (dbg_level == DHD_TRACE_VAL)
9664 DHD_TRACE(("%s:\n", msg));
9665 }
9666
9667 p = line;
9668 for (i = 0; i < nbytes; i++) {
9669 if (i % 16 == 0) {
9670 nchar = snprintf(p, len, " %04x: ", i); /* line prefix */
9671 p += nchar;
9672 len -= nchar;
9673 }
9674 if (len > 0) {
9675 nchar = snprintf(p, len, "%02x ", buf[i]);
9676 p += nchar;
9677 len -= nchar;
9678 }
9679
9680 if (i % 16 == 15) {
9681 /* flush line */
9682 if (dbg_level == DHD_ERROR_VAL)
9683 DHD_ERROR(("%s:\n", line));
9684 else if (dbg_level == DHD_INFO_VAL)
9685 DHD_INFO(("%s:\n", line));
9686 else if (dbg_level == DHD_TRACE_VAL)
9687 DHD_TRACE(("%s:\n", line));
9688 p = line;
9689 len = sizeof(line);
9690 }
9691 }
9692
9693 /* flush last partial line */
9694 if (p != line) {
9695 if (dbg_level == DHD_ERROR_VAL)
9696 DHD_ERROR(("%s:\n", line));
9697 else if (dbg_level == DHD_INFO_VAL)
9698 DHD_INFO(("%s:\n", line));
9699 else if (dbg_level == DHD_TRACE_VAL)
9700 DHD_TRACE(("%s:\n", line));
9701 }
9702 }
9703
9704 int
dhd_tput_test(dhd_pub_t * dhd,tput_test_t * tput_data)9705 dhd_tput_test(dhd_pub_t *dhd, tput_test_t *tput_data)
9706 {
9707 struct ether_header ether_hdr;
9708 tput_pkt_t tput_pkt;
9709 void *pkt = NULL;
9710 uint8 *pktdata = NULL;
9711 uint32 pktsize = 0;
9712 uint64 total_size = 0;
9713 uint32 *crc = 0;
9714 uint32 pktid = 0;
9715 uint32 total_num_tx_pkts = 0;
9716 int err = 0, err_exit = 0;
9717 uint32 i = 0;
9718 uint64 time_taken = 0;
9719 int max_txbufs = 0;
9720 uint32 n_batches = 0;
9721 uint32 n_remain = 0;
9722 uint8 tput_pkt_hdr_size = 0;
9723 bool batch_cnt = FALSE;
9724 bool tx_stop_pkt = FALSE;
9725
9726 #if defined(DHD_EFI) && defined(DHD_INTR_POLL_PERIOD_DYNAMIC)
9727 uint32 cur_intr_poll_period = 0;
9728 cur_intr_poll_period = dhd_os_get_intr_poll_period();
9729 /* before running tput_test, set interrupt poll period to a lesser value */
9730 dhd_os_set_intr_poll_period(dhd->bus, INTR_POLL_PERIOD_CRITICAL);
9731 #endif /* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */
9732
9733 if (tput_data->version != TPUT_TEST_T_VER ||
9734 tput_data->length != TPUT_TEST_T_LEN) {
9735 DHD_ERROR(("%s: wrong structure ver/len! \n", __FUNCTION__));
9736 err_exit = BCME_BADARG;
9737 goto exit_error;
9738 }
9739
9740 if (dhd->tput_data.tput_test_running) {
9741 DHD_ERROR(("%s: tput test already running ! \n", __FUNCTION__));
9742 err_exit = BCME_BUSY;
9743 goto exit_error;
9744 }
9745 #ifdef PCIE_FULL_DONGLE
9746 /*
9747 * 100 bytes to accommodate ether header and tput header. As of today
9748 * both occupy 30 bytes. Rest is reserved.
9749 */
9750 if ((tput_data->payload_size > TPUT_TEST_MAX_PAYLOAD) ||
9751 (tput_data->payload_size > (DHD_FLOWRING_RX_BUFPOST_PKTSZ - 100))) {
9752 DHD_ERROR(("%s: payload size is too large! max_payload=%u rx_bufpost_size=%u\n",
9753 __FUNCTION__, TPUT_TEST_MAX_PAYLOAD,
9754 (DHD_FLOWRING_RX_BUFPOST_PKTSZ - 100)));
9755 err_exit = BCME_BUFTOOLONG;
9756 goto exit_error;
9757 }
9758 #endif
9759 max_txbufs = dhd_get_max_txbufs(dhd);
9760 max_txbufs = MIN(max_txbufs, DHD_TPUT_MAX_TX_PKTS_BATCH);
9761
9762 if (!(tput_data->num_pkts > 0)) {
9763 DHD_ERROR(("%s: invalid num_pkts: %d to tx\n",
9764 __FUNCTION__, tput_data->num_pkts));
9765 err_exit = BCME_ERROR;
9766 goto exit_error;
9767 }
9768
9769 memset(&dhd->tput_data, 0, sizeof(dhd->tput_data));
9770 memcpy(&dhd->tput_data, tput_data, sizeof(*tput_data));
9771 dhd->tput_data.pkts_bad = dhd->tput_data.pkts_good = 0;
9772 dhd->tput_data.pkts_cmpl = 0;
9773 dhd->tput_start_ts = dhd->tput_stop_ts = 0;
9774
9775 if (tput_data->flags & TPUT_TEST_USE_ETHERNET_HDR) {
9776 pktsize = sizeof(ether_hdr) + sizeof(tput_pkt_t) +
9777 (tput_data->payload_size - 12);
9778 } else {
9779 pktsize = sizeof(tput_pkt_t) +
9780 (tput_data->payload_size - 12);
9781 }
9782
9783 tput_pkt_hdr_size = (uint8)((uint8 *)&tput_pkt.crc32 -
9784 (uint8 *)&tput_pkt.mac_sta);
9785
9786 /* mark the tput test as started */
9787 dhd->tput_data.tput_test_running = TRUE;
9788
9789 if (tput_data->direction == TPUT_DIR_TX) {
9790 /* for ethernet header */
9791 memcpy(ether_hdr.ether_shost, tput_data->mac_sta, ETHER_ADDR_LEN);
9792 memcpy(ether_hdr.ether_dhost, tput_data->mac_ap, ETHER_ADDR_LEN);
9793 ether_hdr.ether_type = hton16(ETHER_TYPE_IP);
9794
9795 /* fill in the tput pkt */
9796 memset(&tput_pkt, 0, sizeof(tput_pkt));
9797 memcpy(tput_pkt.mac_ap, tput_data->mac_ap, ETHER_ADDR_LEN);
9798 memcpy(tput_pkt.mac_sta, tput_data->mac_sta, ETHER_ADDR_LEN);
9799 tput_pkt.pkt_type = hton16(TPUT_PKT_TYPE_NORMAL);
9800 tput_pkt.num_pkts = hton32(tput_data->num_pkts);
9801
9802 if (tput_data->num_pkts > (uint32)max_txbufs) {
9803 n_batches = tput_data->num_pkts / max_txbufs;
9804 n_remain = tput_data->num_pkts % max_txbufs;
9805 } else {
9806 n_batches = 0;
9807 n_remain = tput_data->num_pkts;
9808 }
9809 DHD_ERROR(("%s: num_pkts: %u n_batches: %u n_remain: %u\n",
9810 __FUNCTION__, tput_data->num_pkts, n_batches, n_remain));
9811
9812 do {
9813 /* reset before every batch */
9814 dhd->batch_tx_pkts_cmpl = 0;
9815 if (n_batches) {
9816 dhd->batch_tx_num_pkts = max_txbufs;
9817 --n_batches;
9818 } else if (n_remain) {
9819 dhd->batch_tx_num_pkts = n_remain;
9820 n_remain = 0;
9821 } else {
9822 DHD_ERROR(("Invalid. This should not hit\n"));
9823 }
9824
9825 dhd->tput_start_ts = OSL_SYSUPTIME_US();
9826 for (i = 0; (i < dhd->batch_tx_num_pkts) || (tx_stop_pkt); ++i) {
9827 pkt = PKTGET(dhd->osh, pktsize, TRUE);
9828 if (!pkt) {
9829 dhd->tput_data.tput_test_running = FALSE;
9830 DHD_ERROR(("%s: PKTGET fails ! Not enough Tx buffers\n",
9831 __FUNCTION__));
9832 DHD_ERROR(("%s: pkts_good:%u; pkts_bad:%u; pkts_cmpl:%u\n",
9833 __FUNCTION__, dhd->tput_data.pkts_good,
9834 dhd->tput_data.pkts_bad, dhd->tput_data.pkts_cmpl));
9835 err_exit = BCME_NOMEM;
9836 goto exit_error;
9837 }
9838 pktdata = PKTDATA(dhd->osh, pkt);
9839 PKTSETLEN(dhd->osh, pkt, pktsize);
9840 memset(pktdata, 0, pktsize);
9841 if (tput_data->flags & TPUT_TEST_USE_ETHERNET_HDR) {
9842 memcpy(pktdata, ðer_hdr, sizeof(ether_hdr));
9843 pktdata += sizeof(ether_hdr);
9844 }
9845 /* send stop pkt as last pkt */
9846 if (tx_stop_pkt) {
9847 tput_pkt.pkt_type = hton16(TPUT_PKT_TYPE_STOP);
9848 tx_stop_pkt = FALSE;
9849 } else
9850 tput_pkt.pkt_type = hton16(TPUT_PKT_TYPE_NORMAL);
9851 tput_pkt.pkt_id = hton32(pktid++);
9852 tput_pkt.crc32 = 0;
9853 memcpy(pktdata, &tput_pkt, sizeof(tput_pkt));
9854 /* compute crc32 over the pkt-id, num-pkts and data fields */
9855 crc = (uint32 *)(pktdata + tput_pkt_hdr_size);
9856 *crc = hton32(hndcrc32(pktdata + tput_pkt_hdr_size + 4,
9857 8 + (tput_data->payload_size - 12),
9858 CRC32_INIT_VALUE));
9859
9860 err = dhd_sendpkt(dhd, 0, pkt);
9861 if (err != BCME_OK) {
9862 DHD_INFO(("%s: send pkt (id = %u) fails (err = %d) ! \n",
9863 __FUNCTION__, pktid, err));
9864 dhd->tput_data.pkts_bad++;
9865 }
9866 total_num_tx_pkts++;
9867 if ((total_num_tx_pkts == tput_data->num_pkts) && (!tx_stop_pkt)) {
9868 tx_stop_pkt = TRUE;
9869 }
9870 }
9871 DHD_INFO(("%s: TX done, wait for completion...\n", __FUNCTION__));
9872 if (!dhd_os_tput_test_wait(dhd, NULL,
9873 TPUT_TEST_WAIT_TIMEOUT_DEFAULT)) {
9874 dhd->tput_stop_ts = OSL_SYSUPTIME_US();
9875 dhd->tput_data.tput_test_running = FALSE;
9876 DHD_ERROR(("%s: TX completion timeout !"
9877 " Total Tx pkts (including STOP) = %u; pkts cmpl = %u; \n",
9878 __FUNCTION__, total_num_tx_pkts, dhd->batch_tx_pkts_cmpl));
9879 err_exit = BCME_ERROR;
9880 goto exit_error;
9881 }
9882 if ((dhd->tput_start_ts && dhd->tput_stop_ts &&
9883 (dhd->tput_stop_ts > dhd->tput_start_ts)) || (time_taken)) {
9884 if (!time_taken) {
9885 time_taken = dhd->tput_stop_ts - dhd->tput_start_ts;
9886 }
9887 } else {
9888 dhd->tput_data.tput_test_running = FALSE;
9889 DHD_ERROR(("%s: bad timestamp while cal tx batch time\n",
9890 __FUNCTION__));
9891 err_exit = BCME_ERROR;
9892 goto exit_error;
9893 }
9894 if (n_batches || n_remain) {
9895 batch_cnt = TRUE;
9896 } else {
9897 batch_cnt = FALSE;
9898 }
9899 } while (batch_cnt);
9900 } else {
9901 /* TPUT_DIR_RX */
9902 DHD_INFO(("%s: waiting for RX completion... \n", __FUNCTION__));
9903 if (!dhd_os_tput_test_wait(dhd, NULL, tput_data->timeout_ms)) {
9904 DHD_ERROR(("%s: RX completion timeout ! \n", __FUNCTION__));
9905 dhd->tput_stop_ts = OSL_SYSUPTIME_US();
9906 }
9907 }
9908
9909 /* calculate the throughput in bits per sec */
9910 if (dhd->tput_start_ts && dhd->tput_stop_ts &&
9911 (dhd->tput_stop_ts > dhd->tput_start_ts)) {
9912 time_taken = dhd->tput_stop_ts - dhd->tput_start_ts;
9913 time_taken = DIV_U64_BY_U32(time_taken, MSEC_PER_SEC); /* convert to ms */
9914 dhd->tput_data.time_ms = time_taken;
9915 if (time_taken) {
9916 total_size = pktsize * dhd->tput_data.pkts_cmpl * 8;
9917 dhd->tput_data.tput_bps = DIV_U64_BY_U64(total_size, time_taken);
9918 /* convert from ms to seconds */
9919 dhd->tput_data.tput_bps = dhd->tput_data.tput_bps * 1000;
9920 }
9921 } else {
9922 DHD_ERROR(("%s: bad timestamp !\n", __FUNCTION__));
9923 }
9924 DHD_INFO(("%s: DONE. tput = %llu bps, time = %llu ms\n", __FUNCTION__,
9925 dhd->tput_data.tput_bps, dhd->tput_data.time_ms));
9926
9927 memcpy(tput_data, &dhd->tput_data, sizeof(dhd->tput_data));
9928
9929 dhd->tput_data.tput_test_running = FALSE;
9930
9931 err_exit = BCME_OK;
9932
9933 exit_error:
9934 DHD_ERROR(("%s: pkts_good = %u; pkts_bad = %u; pkts_cmpl = %u\n",
9935 __FUNCTION__, dhd->tput_data.pkts_good,
9936 dhd->tput_data.pkts_bad, dhd->tput_data.pkts_cmpl));
9937 #if defined(DHD_EFI) && defined(DHD_INTR_POLL_PERIOD_DYNAMIC)
9938 /* restore interrupt poll period to the previous existing value */
9939 dhd_os_set_intr_poll_period(dhd->bus, cur_intr_poll_period);
9940 #endif /* DHD_EFI && DHD_INTR_POLL_PERIOD_DYNAMIC */
9941
9942 return err_exit;
9943 }
9944
9945 void
dhd_tput_test_rx(dhd_pub_t * dhd,void * pkt)9946 dhd_tput_test_rx(dhd_pub_t *dhd, void *pkt)
9947 {
9948 uint8 *pktdata = NULL;
9949 tput_pkt_t *tput_pkt = NULL;
9950 uint32 crc = 0;
9951 uint8 tput_pkt_hdr_size = 0;
9952
9953 pktdata = PKTDATA(dhd->osh, pkt);
9954 if (dhd->tput_data.flags & TPUT_TEST_USE_ETHERNET_HDR)
9955 pktdata += sizeof(struct ether_header);
9956 tput_pkt = (tput_pkt_t *)pktdata;
9957
9958 /* record the timestamp of the first packet received */
9959 if (dhd->tput_data.pkts_cmpl == 0) {
9960 dhd->tput_start_ts = OSL_SYSUPTIME_US();
9961 }
9962
9963 if (ntoh16(tput_pkt->pkt_type) != TPUT_PKT_TYPE_STOP &&
9964 dhd->tput_data.pkts_cmpl <= dhd->tput_data.num_pkts) {
9965 dhd->tput_data.pkts_cmpl++;
9966 }
9967 /* drop rx packets received beyond the specified # */
9968 if (dhd->tput_data.pkts_cmpl > dhd->tput_data.num_pkts)
9969 return;
9970
9971 DHD_TRACE(("%s: Rx tput test pkt, id = %u ; type = %u\n", __FUNCTION__,
9972 ntoh32(tput_pkt->pkt_id), ntoh16(tput_pkt->pkt_type)));
9973
9974 /* discard if mac addr of AP/STA does not match the specified ones */
9975 if ((memcmp(tput_pkt->mac_ap, dhd->tput_data.mac_ap,
9976 ETHER_ADDR_LEN) != 0) ||
9977 (memcmp(tput_pkt->mac_sta, dhd->tput_data.mac_sta,
9978 ETHER_ADDR_LEN) != 0)) {
9979 dhd->tput_data.pkts_bad++;
9980 DHD_INFO(("%s: dropping tput pkt with id %u due to bad AP/STA mac !\n",
9981 __FUNCTION__, ntoh32(tput_pkt->pkt_id)));
9982 return;
9983 }
9984
9985 tput_pkt_hdr_size = (uint8)((uint8 *)&tput_pkt->crc32 -
9986 (uint8 *)&tput_pkt->mac_sta);
9987 pktdata += tput_pkt_hdr_size + 4;
9988 crc = hndcrc32(pktdata, 8 + (dhd->tput_data.payload_size - 12),
9989 CRC32_INIT_VALUE);
9990 if (crc != ntoh32(tput_pkt->crc32)) {
9991 DHD_INFO(("%s: dropping tput pkt with id %u due to bad CRC !\n",
9992 __FUNCTION__, ntoh32(tput_pkt->pkt_id)));
9993 dhd->tput_data.pkts_bad++;
9994 return;
9995 }
9996
9997 if (ntoh16(tput_pkt->pkt_type) != TPUT_PKT_TYPE_STOP)
9998 dhd->tput_data.pkts_good++;
9999
10000 /* if we have received the stop packet or all the # of pkts, we're done */
10001 if (ntoh16(tput_pkt->pkt_type) == TPUT_PKT_TYPE_STOP ||
10002 dhd->tput_data.pkts_cmpl == dhd->tput_data.num_pkts) {
10003 dhd->tput_stop_ts = OSL_SYSUPTIME_US();
10004 dhd_os_tput_test_wake(dhd);
10005 }
10006 }
10007
10008 #ifdef DUMP_IOCTL_IOV_LIST
10009 void
dhd_iov_li_append(dhd_pub_t * dhd,dll_t * list_head,dll_t * node)10010 dhd_iov_li_append(dhd_pub_t *dhd, dll_t *list_head, dll_t *node)
10011 {
10012 dll_t *item;
10013 dhd_iov_li_t *iov_li;
10014 dhd->dump_iovlist_len++;
10015
10016 if (dhd->dump_iovlist_len == IOV_LIST_MAX_LEN+1) {
10017 item = dll_head_p(list_head);
10018 iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
10019 dll_delete(item);
10020 MFREE(dhd->osh, iov_li, sizeof(*iov_li));
10021 dhd->dump_iovlist_len--;
10022 }
10023 dll_append(list_head, node);
10024 }
10025
10026 void
dhd_iov_li_print(dll_t * list_head)10027 dhd_iov_li_print(dll_t *list_head)
10028 {
10029 dhd_iov_li_t *iov_li;
10030 dll_t *item, *next;
10031 uint8 index = 0;
10032 for (item = dll_head_p(list_head); !dll_end(list_head, item); item = next) {
10033 next = dll_next_p(item);
10034 iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
10035 DHD_ERROR(("%d:cmd_name = %s, cmd = %d.\n", ++index, iov_li->buff, iov_li->cmd));
10036 }
10037 }
10038
10039 void
dhd_iov_li_delete(dhd_pub_t * dhd,dll_t * list_head)10040 dhd_iov_li_delete(dhd_pub_t *dhd, dll_t *list_head)
10041 {
10042 dll_t *item;
10043 dhd_iov_li_t *iov_li;
10044 while (!(dll_empty(list_head))) {
10045 item = dll_head_p(list_head);
10046 iov_li = (dhd_iov_li_t *)CONTAINEROF(item, dhd_iov_li_t, list);
10047 dll_delete(item);
10048 MFREE(dhd->osh, iov_li, sizeof(*iov_li));
10049 }
10050 }
10051 #endif /* DUMP_IOCTL_IOV_LIST */
10052
10053 #ifdef EWP_EDL
10054 /* For now we are allocating memory for EDL ring using DMA_ALLOC_CONSISTENT
10055 * The reason being that, in hikey, if we try to DMA_MAP prealloced memory
10056 * it is failing with an 'out of space in SWIOTLB' error
10057 */
10058 int
dhd_edl_mem_init(dhd_pub_t * dhd)10059 dhd_edl_mem_init(dhd_pub_t *dhd)
10060 {
10061 int ret = 0;
10062
10063 memset(&dhd->edl_ring_mem, 0, sizeof(dhd->edl_ring_mem));
10064 ret = dhd_dma_buf_alloc(dhd, &dhd->edl_ring_mem, DHD_EDL_RING_SIZE);
10065 if (ret != BCME_OK) {
10066 DHD_ERROR(("%s: alloc of edl_ring_mem failed\n",
10067 __FUNCTION__));
10068 return BCME_ERROR;
10069 }
10070 return BCME_OK;
10071 }
10072
10073 /*
10074 * NOTE:- that dhd_edl_mem_deinit need NOT be called explicitly, because the dma_buf
10075 * for EDL is freed during 'dhd_prot_detach_edl_rings' which is called during de-init.
10076 */
10077 void
dhd_edl_mem_deinit(dhd_pub_t * dhd)10078 dhd_edl_mem_deinit(dhd_pub_t *dhd)
10079 {
10080 if (dhd->edl_ring_mem.va != NULL)
10081 dhd_dma_buf_free(dhd, &dhd->edl_ring_mem);
10082 }
10083
10084 int
dhd_event_logtrace_process_edl(dhd_pub_t * dhdp,uint8 * data,void * evt_decode_data)10085 dhd_event_logtrace_process_edl(dhd_pub_t *dhdp, uint8 *data,
10086 void *evt_decode_data)
10087 {
10088 msg_hdr_edl_t *msg = NULL;
10089 cmn_msg_hdr_t *cmn_msg_hdr = NULL;
10090 uint8 *buf = NULL;
10091
10092 if (!data || !dhdp || !evt_decode_data) {
10093 DHD_ERROR(("%s: invalid args ! \n", __FUNCTION__));
10094 return BCME_ERROR;
10095 }
10096
10097 /* format of data in each work item in the EDL ring:
10098 * |cmn_msg_hdr_t |payload (var len)|cmn_msg_hdr_t|
10099 * payload = |infobuf_ver(u32)|info_buf_payload_hdr_t|msgtrace_hdr_t|<var len data>|
10100 */
10101 cmn_msg_hdr = (cmn_msg_hdr_t *)data;
10102 msg = (msg_hdr_edl_t *)(data + sizeof(cmn_msg_hdr_t));
10103 buf = (uint8 *)msg;
10104 /* validate the fields */
10105 if (ltoh32(msg->infobuf_ver) != PCIE_INFOBUF_V1) {
10106 DHD_ERROR(("%s: Skipping msg with invalid infobuf ver (0x%x)"
10107 " expected (0x%x)\n", __FUNCTION__,
10108 msg->infobuf_ver, PCIE_INFOBUF_V1));
10109 return BCME_VERSION;
10110 }
10111
10112 /* in EDL, the request_id field of cmn_msg_hdr is overloaded to carry payload length */
10113 if (sizeof(info_buf_payload_hdr_t) > cmn_msg_hdr->request_id) {
10114 DHD_ERROR(("%s: infobuf too small for v1 type/length fields\n",
10115 __FUNCTION__));
10116 return BCME_BUFTOOLONG;
10117 }
10118
10119 if (ltoh16(msg->pyld_hdr.type) != PCIE_INFOBUF_V1_TYPE_LOGTRACE) {
10120 DHD_ERROR(("%s: payload_hdr_type %d is not V1_TYPE_LOGTRACE\n",
10121 __FUNCTION__, ltoh16(msg->pyld_hdr.type)));
10122 return BCME_BADOPTION;
10123 }
10124
10125 if (ltoh16(msg->pyld_hdr.length) > cmn_msg_hdr->request_id) {
10126 DHD_ERROR(("%s: infobuf logtrace length %u is bigger"
10127 " than available buffer size %u\n", __FUNCTION__,
10128 ltoh16(msg->pyld_hdr.length), cmn_msg_hdr->request_id));
10129 return BCME_BADLEN;
10130 }
10131
10132 /* dhd_dbg_trace_evnt_handler expects the data to start from msgtrace_hdr_t */
10133 buf += sizeof(msg->infobuf_ver) + sizeof(msg->pyld_hdr);
10134 dhd_dbg_trace_evnt_handler(dhdp, buf, evt_decode_data,
10135 ltoh16(msg->pyld_hdr.length));
10136
10137 /*
10138 * check 'dhdp->logtrace_pkt_sendup' and if true alloc an skb
10139 * copy the event data to the skb and send it up the stack
10140 */
10141 if (dhdp->logtrace_pkt_sendup) {
10142 DHD_INFO(("%s: send up event log, len %u bytes\n", __FUNCTION__,
10143 (uint32)(ltoh16(msg->pyld_hdr.length) +
10144 sizeof(info_buf_payload_hdr_t) + 4)));
10145 dhd_sendup_info_buf(dhdp, (uint8 *)msg);
10146 }
10147
10148 return BCME_OK;
10149 }
10150 #endif /* EWP_EDL */
10151
10152 #ifdef DHD_LOG_DUMP
10153 #define DEBUG_DUMP_TRIGGER_INTERVAL_SEC 4
10154 void
dhd_log_dump_trigger(dhd_pub_t * dhdp,int subcmd)10155 dhd_log_dump_trigger(dhd_pub_t *dhdp, int subcmd)
10156 {
10157 #if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
10158 log_dump_type_t *flush_type;
10159 #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
10160 uint64 current_time_sec;
10161
10162 if (!dhdp) {
10163 DHD_ERROR(("dhdp is NULL !\n"));
10164 return;
10165 }
10166
10167 if (subcmd >= CMD_MAX || subcmd < CMD_DEFAULT) {
10168 DHD_ERROR(("%s : Invalid subcmd \n", __FUNCTION__));
10169 return;
10170 }
10171
10172 current_time_sec = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
10173
10174 DHD_ERROR(("%s: current_time_sec=%lld debug_dump_time_sec=%lld interval=%d\n",
10175 __FUNCTION__, current_time_sec, dhdp->debug_dump_time_sec,
10176 DEBUG_DUMP_TRIGGER_INTERVAL_SEC));
10177
10178 if ((current_time_sec - dhdp->debug_dump_time_sec) < DEBUG_DUMP_TRIGGER_INTERVAL_SEC) {
10179 DHD_ERROR(("%s : Last debug dump triggered(%lld) within %d seconds, so SKIP\n",
10180 __FUNCTION__, dhdp->debug_dump_time_sec, DEBUG_DUMP_TRIGGER_INTERVAL_SEC));
10181 return;
10182 }
10183
10184 clear_debug_dump_time(dhdp->debug_dump_time_str);
10185 #ifdef DHD_PCIE_RUNTIMEPM
10186 /* wake up RPM if SYSDUMP is triggered */
10187 dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
10188 #endif /* DHD_PCIE_RUNTIMEPM */
10189 /* */
10190
10191 dhdp->debug_dump_subcmd = subcmd;
10192
10193 dhdp->debug_dump_time_sec = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
10194
10195 #if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
10196 /* flush_type is freed at do_dhd_log_dump function */
10197 flush_type = MALLOCZ(dhdp->osh, sizeof(log_dump_type_t));
10198 if (flush_type) {
10199 *flush_type = DLD_BUF_TYPE_ALL;
10200 dhd_schedule_log_dump(dhdp, flush_type);
10201 } else {
10202 DHD_ERROR(("%s Fail to malloc flush_type\n", __FUNCTION__));
10203 return;
10204 }
10205 #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
10206
10207 /* Inside dhd_mem_dump, event notification will be sent to HAL and
10208 * from other context DHD pushes memdump, debug_dump and pktlog dump
10209 * to HAL and HAL will write into file
10210 */
10211 #if (defined(BCMPCIE) || defined(BCMSDIO)) && defined(DHD_FW_COREDUMP)
10212 dhdp->memdump_type = DUMP_TYPE_BY_SYSDUMP;
10213 dhd_bus_mem_dump(dhdp);
10214 #endif /* BCMPCIE && DHD_FW_COREDUMP */
10215
10216 #if defined(DHD_PKT_LOGGING) && defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
10217 dhd_schedule_pktlog_dump(dhdp);
10218 #endif /* DHD_PKT_LOGGING && DHD_DUMP_FILE_WRITE_FROM_KERNEL */
10219 }
10220 #endif /* DHD_LOG_DUMP */
10221
10222 #if (defined(LINUX) || defined(DHD_EFI)) && defined(SHOW_LOGTRACE)
10223 int
dhd_print_fw_ver_from_file(dhd_pub_t * dhdp,char * fwpath)10224 dhd_print_fw_ver_from_file(dhd_pub_t *dhdp, char *fwpath)
10225 {
10226 void *file = NULL;
10227 int size = 0;
10228 char buf[FW_VER_STR_LEN];
10229 char *str = NULL;
10230 int ret = BCME_OK;
10231
10232 if (!fwpath)
10233 return BCME_BADARG;
10234
10235 file = dhd_os_open_image1(dhdp, fwpath);
10236 if (!file) {
10237 ret = BCME_ERROR;
10238 goto exit;
10239 }
10240 size = dhd_os_get_image_size(file);
10241 if (!size) {
10242 ret = BCME_ERROR;
10243 goto exit;
10244 }
10245
10246 /* seek to the last 'X' bytes in the file */
10247 if (dhd_os_seek_file(file, size - FW_VER_STR_LEN) != BCME_OK) {
10248 ret = BCME_ERROR;
10249 goto exit;
10250 }
10251
10252 /* read the last 'X' bytes of the file to a buffer */
10253 memset(buf, 0, FW_VER_STR_LEN);
10254 if (dhd_os_get_image_block(buf, FW_VER_STR_LEN - 1, file) < 0) {
10255 ret = BCME_ERROR;
10256 goto exit;
10257 }
10258 /* search for 'Version' in the buffer */
10259 str = bcmstrnstr(buf, FW_VER_STR_LEN, FW_VER_STR, strlen(FW_VER_STR));
10260 if (!str) {
10261 ret = BCME_ERROR;
10262 goto exit;
10263 }
10264 /* go back in the buffer to the last ascii character */
10265 while (str != buf &&
10266 (*str >= ' ' && *str <= '~')) {
10267 --str;
10268 }
10269 /* reverse the final decrement, so that str is pointing
10270 * to the first ascii character in the buffer
10271 */
10272 ++str;
10273
10274 if (strlen(str) > (FW_VER_STR_LEN - 1)) {
10275 ret = BCME_BADLEN;
10276 goto exit;
10277 }
10278
10279 DHD_ERROR(("FW version in file '%s': %s\n", fwpath, str));
10280 /* copy to global variable, so that in case FW load fails, the
10281 * core capture logs will contain FW version read from the file
10282 */
10283 memset(fw_version, 0, FW_VER_STR_LEN);
10284 strlcpy(fw_version, str, FW_VER_STR_LEN);
10285
10286 exit:
10287 if (file)
10288 dhd_os_close_image1(dhdp, file);
10289
10290 return ret;
10291 }
10292 #endif /* LINUX || DHD_EFI */
10293
10294 #if defined(DHD_AWDL) && defined(AWDL_SLOT_STATS)
10295 void
dhd_clear_awdl_stats(dhd_pub_t * dhd)10296 dhd_clear_awdl_stats(dhd_pub_t *dhd)
10297 {
10298 unsigned long flags;
10299 /*
10300 * Since event path(ex: WLC_E_AWDL_AW) and bus path(tx status process) update
10301 * the AWDL data acquire lock before clearing the AWDL stats.
10302 */
10303 DHD_AWDL_STATS_LOCK(dhd->awdl_stats_lock, flags);
10304 memset(dhd->awdl_stats, 0, sizeof(dhd->awdl_stats));
10305 DHD_AWDL_STATS_UNLOCK(dhd->awdl_stats_lock, flags);
10306 }
10307 #endif /* DHD_AWDL && AWDL_SLOT_STATS */
10308
10309 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
10310
10311 static void
copy_hang_info_ioctl_timeout(dhd_pub_t * dhd,int ifidx,wl_ioctl_t * ioc)10312 copy_hang_info_ioctl_timeout(dhd_pub_t *dhd, int ifidx, wl_ioctl_t *ioc)
10313 {
10314 int remain_len;
10315 int i;
10316 int *cnt;
10317 char *dest;
10318 int bytes_written;
10319 uint32 ioc_dwlen = 0;
10320
10321 if (!dhd || !dhd->hang_info) {
10322 DHD_ERROR(("%s dhd=%p hang_info=%p\n",
10323 __FUNCTION__, dhd, (dhd ? dhd->hang_info : NULL)));
10324 return;
10325 }
10326
10327 cnt = &dhd->hang_info_cnt;
10328 dest = dhd->hang_info;
10329
10330 memset(dest, 0, VENDOR_SEND_HANG_EXT_INFO_LEN);
10331 (*cnt) = 0;
10332
10333 bytes_written = 0;
10334 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
10335
10336 get_debug_dump_time(dhd->debug_dump_time_hang_str);
10337 copy_debug_dump_time(dhd->debug_dump_time_str, dhd->debug_dump_time_hang_str);
10338
10339 bytes_written += scnprintf(&dest[bytes_written], remain_len, "%d %d %s %d %d %d %d %d %d ",
10340 HANG_REASON_IOCTL_RESP_TIMEOUT, VENDOR_SEND_HANG_EXT_INFO_VER,
10341 dhd->debug_dump_time_hang_str,
10342 ifidx, ioc->cmd, ioc->len, ioc->set, ioc->used, ioc->needed);
10343 (*cnt) = HANG_FIELD_IOCTL_RESP_TIMEOUT_CNT;
10344
10345 clear_debug_dump_time(dhd->debug_dump_time_hang_str);
10346
10347 /* Access ioc->buf only if the ioc->len is more than 4 bytes */
10348 ioc_dwlen = (uint32)(ioc->len / sizeof(uint32));
10349 if (ioc_dwlen > 0) {
10350 const uint32 *ioc_buf = (const uint32 *)ioc->buf;
10351
10352 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
10353 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
10354 bytes_written += scnprintf(&dest[bytes_written], remain_len,
10355 "%08x", *(uint32 *)(ioc_buf++));
10356 GCC_DIAGNOSTIC_POP();
10357 (*cnt)++;
10358 if ((*cnt) >= HANG_FIELD_CNT_MAX) {
10359 return;
10360 }
10361
10362 for (i = 1; i < ioc_dwlen && *cnt <= HANG_FIELD_CNT_MAX;
10363 i++, (*cnt)++) {
10364 remain_len = VENDOR_SEND_HANG_EXT_INFO_LEN - bytes_written;
10365 GCC_DIAGNOSTIC_PUSH_SUPPRESS_CAST();
10366 bytes_written += scnprintf(&dest[bytes_written], remain_len, "%c%08x",
10367 HANG_RAW_DEL, *(uint32 *)(ioc_buf++));
10368 GCC_DIAGNOSTIC_POP();
10369 }
10370 }
10371
10372 DHD_INFO(("%s hang info len: %d data: %s\n",
10373 __FUNCTION__, (int)strlen(dhd->hang_info), dhd->hang_info));
10374 }
10375
10376 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
10377
10378 #if defined(DHD_H2D_LOG_TIME_SYNC)
10379 /*
10380 * Helper function:
10381 * Used for Dongle console message time syncing with Host printk
10382 */
dhd_h2d_log_time_sync(dhd_pub_t * dhd)10383 void dhd_h2d_log_time_sync(dhd_pub_t *dhd)
10384 {
10385 uint64 ts;
10386
10387 /*
10388 * local_clock() returns time in nano seconds.
10389 * Dongle understand only milli seconds time.
10390 */
10391 ts = local_clock();
10392 /* Nano seconds to milli seconds */
10393 do_div(ts, 1000000);
10394 if (dhd_wl_ioctl_set_intiovar(dhd, "rte_timesync", ts, WLC_SET_VAR, TRUE, 0)) {
10395 DHD_ERROR(("%s rte_timesync **** FAILED ****\n", __FUNCTION__));
10396 /* Stopping HOST Dongle console time syncing */
10397 dhd->dhd_rte_time_sync_ms = 0;
10398 }
10399 }
10400 #endif /* DHD_H2D_LOG_TIME_SYNC */
10401
10402 #if defined(LINUX) || defined(linux)
10403 /* configuations of ecounters to be enabled by default in FW */
10404 static ecounters_cfg_t ecounters_cfg_tbl[] = {
10405 /* Global ecounters */
10406 {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_BUS_PCIE},
10407 // {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_TX_AMPDU_STATS},
10408 // {ECOUNTERS_STATS_TYPES_FLAG_GLOBAL, 0x0, WL_IFSTATS_XTLV_RX_AMPDU_STATS},
10409
10410 /* Slice specific ecounters */
10411 {ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x0, WL_SLICESTATS_XTLV_PERIODIC_STATE},
10412 {ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x1, WL_SLICESTATS_XTLV_PERIODIC_STATE},
10413 {ECOUNTERS_STATS_TYPES_FLAG_SLICE, 0x1, WL_IFSTATS_XTLV_WL_SLICE_BTCOEX},
10414
10415 /* Interface specific ecounters */
10416 {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_IF_PERIODIC_STATE},
10417 {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_GENERIC},
10418 {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_INFRA_SPECIFIC},
10419 {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_MGT_CNT},
10420
10421 /* secondary interface */
10422 /* XXX REMOVE for temporal, will be enabled after decision
10423 {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x1, WL_IFSTATS_XTLV_IF_PERIODIC_STATE},
10424 {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x1, WL_IFSTATS_XTLV_GENERIC},
10425 {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x1, WL_IFSTATS_XTLV_INFRA_SPECIFIC},
10426 {ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x1, WL_IFSTATS_XTLV_MGT_CNT},
10427 */
10428 };
10429
10430 /* XXX: Same event id shall be defined in consecutive order in the below table */
10431 static event_ecounters_cfg_t event_ecounters_cfg_tbl[] = {
10432 /* Interface specific event ecounters */
10433 {WLC_E_DEAUTH_IND, ECOUNTERS_STATS_TYPES_FLAG_IFACE, 0x0, WL_IFSTATS_XTLV_IF_EVENT_STATS},
10434 };
10435
10436 /* Accepts an argument to -s, -g or -f and creates an XTLV */
10437 int
dhd_create_ecounters_params(dhd_pub_t * dhd,uint16 type,uint16 if_slice_idx,uint16 stats_rep,uint8 ** xtlv)10438 dhd_create_ecounters_params(dhd_pub_t *dhd, uint16 type, uint16 if_slice_idx,
10439 uint16 stats_rep, uint8 **xtlv)
10440 {
10441 uint8 *req_xtlv = NULL;
10442 ecounters_stats_types_report_req_t *req;
10443 bcm_xtlvbuf_t xtlvbuf, container_xtlvbuf;
10444 ecountersv2_xtlv_list_elt_t temp;
10445 uint16 xtlv_len = 0, total_len = 0;
10446 int rc = BCME_OK;
10447
10448 /* fill in the stat type XTLV. For now there is no explicit TLV for the stat type. */
10449 temp.id = stats_rep;
10450 temp.len = 0;
10451
10452 /* Hence len/data = 0/NULL */
10453 xtlv_len += temp.len + BCM_XTLV_HDR_SIZE;
10454
10455 /* Total length of the container */
10456 total_len = BCM_XTLV_HDR_SIZE +
10457 OFFSETOF(ecounters_stats_types_report_req_t, stats_types_req) + xtlv_len;
10458
10459 /* Now allocate a structure for the entire request */
10460 if ((req_xtlv = (uint8 *)MALLOCZ(dhd->osh, total_len)) == NULL) {
10461 rc = BCME_NOMEM;
10462 goto fail;
10463 }
10464
10465 /* container XTLV context */
10466 bcm_xtlv_buf_init(&container_xtlvbuf, (uint8 *)req_xtlv, total_len,
10467 BCM_XTLV_OPTION_ALIGN32);
10468
10469 /* Fill other XTLVs in the container. Leave space for XTLV headers */
10470 req = (ecounters_stats_types_report_req_t *)(req_xtlv + BCM_XTLV_HDR_SIZE);
10471 req->flags = type;
10472 if (type == ECOUNTERS_STATS_TYPES_FLAG_SLICE) {
10473 req->slice_mask = 0x1 << if_slice_idx;
10474 } else if (type == ECOUNTERS_STATS_TYPES_FLAG_IFACE) {
10475 req->if_index = if_slice_idx;
10476 }
10477
10478 /* Fill remaining XTLVs */
10479 bcm_xtlv_buf_init(&xtlvbuf, (uint8*) req->stats_types_req, xtlv_len,
10480 BCM_XTLV_OPTION_ALIGN32);
10481 if (bcm_xtlv_put_data(&xtlvbuf, temp.id, NULL, temp.len)) {
10482 DHD_ERROR(("Error creating XTLV for requested stats type = %d\n", temp.id));
10483 rc = BCME_ERROR;
10484 goto fail;
10485 }
10486
10487 /* fill the top level container and get done with the XTLV container */
10488 rc = bcm_xtlv_put_data(&container_xtlvbuf, WL_ECOUNTERS_XTLV_REPORT_REQ, NULL,
10489 bcm_xtlv_buf_len(&xtlvbuf) + OFFSETOF(ecounters_stats_types_report_req_t,
10490 stats_types_req));
10491
10492 if (rc) {
10493 DHD_ERROR(("Error creating parent XTLV for type = %d\n", req->flags));
10494 goto fail;
10495 }
10496
10497 fail:
10498 if (rc && req_xtlv) {
10499 MFREE(dhd->osh, req_xtlv, total_len);
10500 req_xtlv = NULL;
10501 }
10502
10503 /* update the xtlv pointer */
10504 *xtlv = req_xtlv;
10505 return rc;
10506 }
10507
10508 static int
dhd_ecounter_autoconfig(dhd_pub_t * dhd)10509 dhd_ecounter_autoconfig(dhd_pub_t *dhd)
10510 {
10511 int rc = BCME_OK;
10512 uint32 buf;
10513 rc = dhd_iovar(dhd, 0, "ecounters_autoconfig", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
10514
10515 if (rc != BCME_OK) {
10516
10517 if (rc != BCME_UNSUPPORTED) {
10518 rc = BCME_OK;
10519 DHD_ERROR(("%s Ecounter autoconfig in fw failed : %d\n", __FUNCTION__, rc));
10520 } else {
10521 DHD_ERROR(("%s Ecounter autoconfig in FW not supported\n", __FUNCTION__));
10522 }
10523 }
10524
10525 return rc;
10526 }
10527
10528 int
dhd_ecounter_configure(dhd_pub_t * dhd,bool enable)10529 dhd_ecounter_configure(dhd_pub_t *dhd, bool enable)
10530 {
10531 int rc = BCME_OK;
10532 if (enable) {
10533 if (dhd_ecounter_autoconfig(dhd) != BCME_OK) {
10534 if ((rc = dhd_start_ecounters(dhd)) != BCME_OK) {
10535 DHD_ERROR(("%s Ecounters start failed\n", __FUNCTION__));
10536 } else if ((rc = dhd_start_event_ecounters(dhd)) != BCME_OK) {
10537 DHD_ERROR(("%s Event_Ecounters start failed\n", __FUNCTION__));
10538 }
10539 }
10540 } else {
10541 if ((rc = dhd_stop_ecounters(dhd)) != BCME_OK) {
10542 DHD_ERROR(("%s Ecounters stop failed\n", __FUNCTION__));
10543 } else if ((rc = dhd_stop_event_ecounters(dhd)) != BCME_OK) {
10544 DHD_ERROR(("%s Event_Ecounters stop failed\n", __FUNCTION__));
10545 }
10546 }
10547 return rc;
10548 }
10549
10550 int
dhd_start_ecounters(dhd_pub_t * dhd)10551 dhd_start_ecounters(dhd_pub_t *dhd)
10552 {
10553 uint8 i = 0;
10554 uint8 *start_ptr;
10555 int rc = BCME_OK;
10556 bcm_xtlv_t *elt;
10557 ecounters_config_request_v2_t *req = NULL;
10558 ecountersv2_processed_xtlv_list_elt *list_elt, *tail = NULL;
10559 ecountersv2_processed_xtlv_list_elt *processed_containers_list = NULL;
10560 uint16 total_processed_containers_len = 0;
10561
10562 for (i = 0; i < ARRAYSIZE(ecounters_cfg_tbl); i++) {
10563 ecounters_cfg_t *ecounter_stat = &ecounters_cfg_tbl[i];
10564
10565 if ((list_elt = (ecountersv2_processed_xtlv_list_elt *)
10566 MALLOCZ(dhd->osh, sizeof(*list_elt))) == NULL) {
10567 DHD_ERROR(("Ecounters v2: No memory to process\n"));
10568 goto fail;
10569 }
10570
10571 rc = dhd_create_ecounters_params(dhd, ecounter_stat->type,
10572 ecounter_stat->if_slice_idx, ecounter_stat->stats_rep, &list_elt->data);
10573
10574 if (rc) {
10575 DHD_ERROR(("Ecounters v2: Could not process: stat: %d return code: %d\n",
10576 ecounter_stat->stats_rep, rc));
10577
10578 /* Free allocated memory and go to fail to release any memories allocated
10579 * in previous iterations. Note that list_elt->data gets populated in
10580 * dhd_create_ecounters_params() and gets freed there itself.
10581 */
10582 MFREE(dhd->osh, list_elt, sizeof(*list_elt));
10583 list_elt = NULL;
10584 goto fail;
10585 }
10586 elt = (bcm_xtlv_t *) list_elt->data;
10587
10588 /* Put the elements in the order they are processed */
10589 if (processed_containers_list == NULL) {
10590 processed_containers_list = list_elt;
10591 } else {
10592 tail->next = list_elt;
10593 }
10594 tail = list_elt;
10595 /* Size of the XTLV returned */
10596 total_processed_containers_len += BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE;
10597 }
10598
10599 /* Now create ecounters config request with totallength */
10600 req = (ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req) +
10601 total_processed_containers_len);
10602
10603 if (req == NULL) {
10604 rc = BCME_NOMEM;
10605 goto fail;
10606 }
10607
10608 req->version = ECOUNTERS_VERSION_2;
10609 req->logset = EVENT_LOG_SET_ECOUNTERS;
10610 req->reporting_period = ECOUNTERS_DEFAULT_PERIOD;
10611 req->num_reports = ECOUNTERS_NUM_REPORTS;
10612 req->len = total_processed_containers_len +
10613 OFFSETOF(ecounters_config_request_v2_t, ecounters_xtlvs);
10614
10615 /* Copy config */
10616 start_ptr = req->ecounters_xtlvs;
10617
10618 /* Now go element by element in the list */
10619 while (processed_containers_list) {
10620 list_elt = processed_containers_list;
10621
10622 elt = (bcm_xtlv_t *)list_elt->data;
10623
10624 memcpy(start_ptr, list_elt->data, BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
10625 start_ptr += (size_t)(BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
10626 processed_containers_list = processed_containers_list->next;
10627
10628 /* Free allocated memories */
10629 MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE);
10630 MFREE(dhd->osh, list_elt, sizeof(*list_elt));
10631 }
10632
10633 if ((rc = dhd_iovar(dhd, 0, "ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) {
10634 DHD_ERROR(("failed to start ecounters\n"));
10635 }
10636
10637 fail:
10638 if (req) {
10639 MFREE(dhd->osh, req, sizeof(*req) + total_processed_containers_len);
10640 }
10641
10642 /* Now go element by element in the list */
10643 while (processed_containers_list) {
10644 list_elt = processed_containers_list;
10645 elt = (bcm_xtlv_t *)list_elt->data;
10646 processed_containers_list = processed_containers_list->next;
10647
10648 /* Free allocated memories */
10649 MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE);
10650 MFREE(dhd->osh, list_elt, sizeof(*list_elt));
10651 }
10652 return rc;
10653 }
10654
10655 int
dhd_stop_ecounters(dhd_pub_t * dhd)10656 dhd_stop_ecounters(dhd_pub_t *dhd)
10657 {
10658 int rc = BCME_OK;
10659 ecounters_config_request_v2_t *req;
10660
10661 /* Now create ecounters config request with totallength */
10662 req = (ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req));
10663
10664 if (req == NULL) {
10665 rc = BCME_NOMEM;
10666 goto fail;
10667 }
10668
10669 req->version = ECOUNTERS_VERSION_2;
10670 req->len = OFFSETOF(ecounters_config_request_v2_t, ecounters_xtlvs);
10671
10672 if ((rc = dhd_iovar(dhd, 0, "ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) {
10673 DHD_ERROR(("failed to stop ecounters\n"));
10674 }
10675
10676 fail:
10677 if (req) {
10678 MFREE(dhd->osh, req, sizeof(*req));
10679 }
10680 return rc;
10681 }
10682
10683 /* configured event_id_array for event ecounters */
10684 typedef struct event_id_array {
10685 uint8 event_id;
10686 uint8 str_idx;
10687 } event_id_array_t;
10688
10689 /* get event id array only from event_ecounters_cfg_tbl[] */
__dhd_event_ecounters_get_event_id_array(event_id_array_t * event_array)10690 static inline int __dhd_event_ecounters_get_event_id_array(event_id_array_t *event_array)
10691 {
10692 uint8 i;
10693 uint8 idx = 0;
10694 int32 prev_evt_id = -1;
10695
10696 for (i = 0; i < (uint8)ARRAYSIZE(event_ecounters_cfg_tbl); i++) {
10697 if (prev_evt_id != event_ecounters_cfg_tbl[i].event_id) {
10698 if (prev_evt_id >= 0)
10699 idx++;
10700 event_array[idx].event_id = event_ecounters_cfg_tbl[i].event_id;
10701 event_array[idx].str_idx = i;
10702 }
10703 prev_evt_id = event_ecounters_cfg_tbl[i].event_id;
10704 }
10705 return idx;
10706 }
10707
10708 /* One event id has limit xtlv num to request based on wl_ifstats_xtlv_id * 2 interface */
10709 #define ECNTRS_MAX_XTLV_NUM (31 * 2)
10710
10711 int
dhd_start_event_ecounters(dhd_pub_t * dhd)10712 dhd_start_event_ecounters(dhd_pub_t *dhd)
10713 {
10714 uint8 i, j = 0;
10715 uint8 event_id_cnt = 0;
10716 uint16 processed_containers_len = 0;
10717 uint16 max_xtlv_len = 0;
10718 int rc = BCME_OK;
10719 uint8 *ptr;
10720 uint8 *data;
10721 event_id_array_t *id_array;
10722 bcm_xtlv_t *elt = NULL;
10723 event_ecounters_config_request_v2_t *req = NULL;
10724
10725 /* XXX: the size of id_array is limited by the size of event_ecounters_cfg_tbl */
10726 id_array = (event_id_array_t *)MALLOCZ(dhd->osh, sizeof(event_id_array_t) *
10727 ARRAYSIZE(event_ecounters_cfg_tbl));
10728
10729 if (id_array == NULL) {
10730 rc = BCME_NOMEM;
10731 goto fail;
10732 }
10733 event_id_cnt = __dhd_event_ecounters_get_event_id_array(id_array);
10734
10735 max_xtlv_len = ((BCM_XTLV_HDR_SIZE +
10736 OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs)) *
10737 ECNTRS_MAX_XTLV_NUM);
10738
10739 /* Now create ecounters config request with max allowed length */
10740 req = (event_ecounters_config_request_v2_t *)MALLOCZ(dhd->osh,
10741 sizeof(event_ecounters_config_request_v2_t *) + max_xtlv_len);
10742
10743 if (req == NULL) {
10744 rc = BCME_NOMEM;
10745 goto fail;
10746 }
10747
10748 for (i = 0; i <= event_id_cnt; i++) {
10749 /* req initialization by event id */
10750 req->version = ECOUNTERS_VERSION_2;
10751 req->logset = EVENT_LOG_SET_ECOUNTERS;
10752 req->event_id = id_array[i].event_id;
10753 req->flags = EVENT_ECOUNTERS_FLAGS_ADD;
10754 req->len = 0;
10755 processed_containers_len = 0;
10756
10757 /* Copy config */
10758 ptr = req->ecounters_xtlvs;
10759
10760 for (j = id_array[i].str_idx; j < (uint8)ARRAYSIZE(event_ecounters_cfg_tbl); j++) {
10761 event_ecounters_cfg_t *event_ecounter_stat = &event_ecounters_cfg_tbl[j];
10762 if (id_array[i].event_id != event_ecounter_stat->event_id)
10763 break;
10764
10765 rc = dhd_create_ecounters_params(dhd, event_ecounter_stat->type,
10766 event_ecounter_stat->if_slice_idx, event_ecounter_stat->stats_rep,
10767 &data);
10768
10769 if (rc) {
10770 DHD_ERROR(("%s: Could not process: stat: %d return code: %d\n",
10771 __FUNCTION__, event_ecounter_stat->stats_rep, rc));
10772 goto fail;
10773 }
10774
10775 elt = (bcm_xtlv_t *)data;
10776
10777 memcpy(ptr, elt, BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
10778 ptr += (size_t)(BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE);
10779 processed_containers_len += BCM_XTLV_LEN(elt) + BCM_XTLV_HDR_SIZE;
10780
10781 /* Free allocated memories alloced by dhd_create_ecounters_params */
10782 MFREE(dhd->osh, elt, elt->len + BCM_XTLV_HDR_SIZE);
10783
10784 if (processed_containers_len > max_xtlv_len) {
10785 DHD_ERROR(("%s XTLV NUM IS OVERFLOWED THAN ALLOWED!!\n",
10786 __FUNCTION__));
10787 rc = BCME_BADLEN;
10788 goto fail;
10789 }
10790 }
10791
10792 req->len = processed_containers_len +
10793 OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs);
10794
10795 DHD_INFO(("%s req version %d logset %d event_id %d flags %d len %d\n",
10796 __FUNCTION__, req->version, req->logset, req->event_id,
10797 req->flags, req->len));
10798
10799 rc = dhd_iovar(dhd, 0, "event_ecounters", (char *)req, req->len, NULL, 0, TRUE);
10800
10801 if (rc < 0) {
10802 DHD_ERROR(("failed to start event_ecounters(event id %d) with rc %d\n",
10803 req->event_id, rc));
10804 goto fail;
10805 }
10806 }
10807
10808 fail:
10809 /* Free allocated memories */
10810 if (req) {
10811 MFREE(dhd->osh, req, sizeof(event_ecounters_config_request_v2_t *) + max_xtlv_len);
10812 }
10813 if (id_array) {
10814 MFREE(dhd->osh, id_array, sizeof(event_id_array_t) *
10815 ARRAYSIZE(event_ecounters_cfg_tbl));
10816 }
10817
10818 return rc;
10819 }
10820
10821 int
dhd_stop_event_ecounters(dhd_pub_t * dhd)10822 dhd_stop_event_ecounters(dhd_pub_t *dhd)
10823 {
10824 int rc = BCME_OK;
10825 event_ecounters_config_request_v2_t *req;
10826
10827 /* Now create ecounters config request with totallength */
10828 req = (event_ecounters_config_request_v2_t *)MALLOCZ(dhd->osh, sizeof(*req));
10829
10830 if (req == NULL) {
10831 rc = BCME_NOMEM;
10832 goto fail;
10833 }
10834
10835 req->version = ECOUNTERS_VERSION_2;
10836 req->flags = EVENT_ECOUNTERS_FLAGS_DEL_ALL;
10837 req->len = OFFSETOF(event_ecounters_config_request_v2_t, ecounters_xtlvs);
10838
10839 if ((rc = dhd_iovar(dhd, 0, "event_ecounters", (char *)req, req->len, NULL, 0, TRUE)) < 0) {
10840 DHD_ERROR(("failed to stop event_ecounters\n"));
10841 }
10842
10843 fail:
10844 if (req) {
10845 MFREE(dhd->osh, req, sizeof(*req));
10846 }
10847 return rc;
10848 }
10849 #ifdef DHD_LOG_DUMP
10850 int
dhd_dump_debug_ring(dhd_pub_t * dhdp,void * ring_ptr,const void * user_buf,log_dump_section_hdr_t * sec_hdr,char * text_hdr,int buflen,uint32 sec_type)10851 dhd_dump_debug_ring(dhd_pub_t *dhdp, void *ring_ptr, const void *user_buf,
10852 log_dump_section_hdr_t *sec_hdr,
10853 char *text_hdr, int buflen, uint32 sec_type)
10854 {
10855 uint32 rlen = 0;
10856 uint32 data_len = 0;
10857 void *data = NULL;
10858 unsigned long flags = 0;
10859 int ret = 0;
10860 dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)ring_ptr;
10861 int pos = 0;
10862 int fpos_sechdr = 0;
10863
10864 if (!dhdp || !ring || !user_buf || !sec_hdr || !text_hdr) {
10865 return BCME_BADARG;
10866 }
10867 /* do not allow further writes to the ring
10868 * till we flush it
10869 */
10870 DHD_DBG_RING_LOCK(ring->lock, flags);
10871 ring->state = RING_SUSPEND;
10872 DHD_DBG_RING_UNLOCK(ring->lock, flags);
10873
10874 if (dhdp->concise_dbg_buf) {
10875 /* re-use concise debug buffer temporarily
10876 * to pull ring data, to write
10877 * record by record to file
10878 */
10879 data_len = CONCISE_DUMP_BUFLEN;
10880 data = dhdp->concise_dbg_buf;
10881 ret = dhd_export_debug_data(text_hdr, NULL, user_buf, strlen(text_hdr), &pos);
10882 /* write the section header now with zero length,
10883 * once the correct length is found out, update
10884 * it later
10885 */
10886 fpos_sechdr = pos;
10887 sec_hdr->type = sec_type;
10888 sec_hdr->length = 0;
10889 ret = dhd_export_debug_data((char *)sec_hdr, NULL, user_buf,
10890 sizeof(*sec_hdr), &pos);
10891 do {
10892 rlen = dhd_dbg_ring_pull_single(ring, data, data_len, TRUE);
10893 if (rlen > 0) {
10894 /* write the log */
10895 ret = dhd_export_debug_data(data, NULL, user_buf, rlen, &pos);
10896 }
10897 DHD_DBGIF(("%s: rlen : %d\n", __FUNCTION__, rlen));
10898 } while ((rlen > 0));
10899 /* now update the section header length in the file */
10900 /* Complete ring size is dumped by HAL, hence updating length to ring size */
10901 sec_hdr->length = ring->ring_size;
10902 ret = dhd_export_debug_data((char *)sec_hdr, NULL, user_buf,
10903 sizeof(*sec_hdr), &fpos_sechdr);
10904 } else {
10905 DHD_ERROR(("%s: No concise buffer available !\n", __FUNCTION__));
10906 }
10907 DHD_DBG_RING_LOCK(ring->lock, flags);
10908 ring->state = RING_ACTIVE;
10909 /* Resetting both read and write pointer,
10910 * since all items are read.
10911 */
10912 ring->rp = ring->wp = 0;
10913 DHD_DBG_RING_UNLOCK(ring->lock, flags);
10914
10915 return ret;
10916 }
10917
10918 int
dhd_log_dump_ring_to_file(dhd_pub_t * dhdp,void * ring_ptr,void * file,unsigned long * file_posn,log_dump_section_hdr_t * sec_hdr,char * text_hdr,uint32 sec_type)10919 dhd_log_dump_ring_to_file(dhd_pub_t *dhdp, void *ring_ptr, void *file,
10920 unsigned long *file_posn, log_dump_section_hdr_t *sec_hdr,
10921 char *text_hdr, uint32 sec_type)
10922 {
10923 uint32 rlen = 0;
10924 uint32 data_len = 0, total_len = 0;
10925 void *data = NULL;
10926 unsigned long fpos_sechdr = 0;
10927 unsigned long flags = 0;
10928 int ret = 0;
10929 dhd_dbg_ring_t *ring = (dhd_dbg_ring_t *)ring_ptr;
10930
10931 if (!dhdp || !ring || !file || !sec_hdr ||
10932 !file_posn || !text_hdr)
10933 return BCME_BADARG;
10934
10935 /* do not allow further writes to the ring
10936 * till we flush it
10937 */
10938 DHD_DBG_RING_LOCK(ring->lock, flags);
10939 ring->state = RING_SUSPEND;
10940 DHD_DBG_RING_UNLOCK(ring->lock, flags);
10941
10942 if (dhdp->concise_dbg_buf) {
10943 /* re-use concise debug buffer temporarily
10944 * to pull ring data, to write
10945 * record by record to file
10946 */
10947 data_len = CONCISE_DUMP_BUFLEN;
10948 data = dhdp->concise_dbg_buf;
10949 dhd_os_write_file_posn(file, file_posn, text_hdr,
10950 strlen(text_hdr));
10951 /* write the section header now with zero length,
10952 * once the correct length is found out, update
10953 * it later
10954 */
10955 dhd_init_sec_hdr(sec_hdr);
10956 fpos_sechdr = *file_posn;
10957 sec_hdr->type = sec_type;
10958 sec_hdr->length = 0;
10959 dhd_os_write_file_posn(file, file_posn, (char *)sec_hdr,
10960 sizeof(*sec_hdr));
10961 do {
10962 rlen = dhd_dbg_ring_pull_single(ring, data, data_len, TRUE);
10963 if (rlen > 0) {
10964 /* write the log */
10965 ret = dhd_os_write_file_posn(file, file_posn, data, rlen);
10966 if (ret < 0) {
10967 DHD_ERROR(("%s: write file error !\n", __FUNCTION__));
10968 DHD_DBG_RING_LOCK(ring->lock, flags);
10969 ring->state = RING_ACTIVE;
10970 DHD_DBG_RING_UNLOCK(ring->lock, flags);
10971 return BCME_ERROR;
10972 }
10973 }
10974 total_len += rlen;
10975 } while (rlen > 0);
10976 /* now update the section header length in the file */
10977 sec_hdr->length = total_len;
10978 dhd_os_write_file_posn(file, &fpos_sechdr, (char *)sec_hdr, sizeof(*sec_hdr));
10979 } else {
10980 DHD_ERROR(("%s: No concise buffer available !\n", __FUNCTION__));
10981 }
10982
10983 DHD_DBG_RING_LOCK(ring->lock, flags);
10984 ring->state = RING_ACTIVE;
10985 /* Resetting both read and write pointer,
10986 * since all items are read.
10987 */
10988 ring->rp = ring->wp = 0;
10989 DHD_DBG_RING_UNLOCK(ring->lock, flags);
10990 return BCME_OK;
10991 }
10992
10993 /* logdump cookie */
10994 #define MAX_LOGUDMP_COOKIE_CNT 10u
10995 #define LOGDUMP_COOKIE_STR_LEN 50u
10996 int
dhd_logdump_cookie_init(dhd_pub_t * dhdp,uint8 * buf,uint32 buf_size)10997 dhd_logdump_cookie_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size)
10998 {
10999 uint32 ring_size;
11000
11001 if (!dhdp || !buf) {
11002 DHD_ERROR(("INVALID PTR: dhdp:%p buf:%p\n", dhdp, buf));
11003 return BCME_ERROR;
11004 }
11005
11006 ring_size = dhd_ring_get_hdr_size() + LOGDUMP_COOKIE_STR_LEN * MAX_LOGUDMP_COOKIE_CNT;
11007 if (buf_size < ring_size) {
11008 DHD_ERROR(("BUF SIZE IS TO SHORT: req:%d buf_size:%d\n",
11009 ring_size, buf_size));
11010 return BCME_ERROR;
11011 }
11012
11013 dhdp->logdump_cookie = dhd_ring_init(dhdp, buf, buf_size,
11014 LOGDUMP_COOKIE_STR_LEN, MAX_LOGUDMP_COOKIE_CNT,
11015 DHD_RING_TYPE_FIXED);
11016 if (!dhdp->logdump_cookie) {
11017 DHD_ERROR(("FAIL TO INIT COOKIE RING\n"));
11018 return BCME_ERROR;
11019 }
11020
11021 return BCME_OK;
11022 }
11023
11024 void
dhd_logdump_cookie_deinit(dhd_pub_t * dhdp)11025 dhd_logdump_cookie_deinit(dhd_pub_t *dhdp)
11026 {
11027 if (!dhdp) {
11028 return;
11029 }
11030 if (dhdp->logdump_cookie) {
11031 dhd_ring_deinit(dhdp, dhdp->logdump_cookie);
11032 }
11033
11034 return;
11035 }
11036
11037 #ifdef DHD_TX_PROFILE
11038 int
dhd_tx_profile_detach(dhd_pub_t * dhdp)11039 dhd_tx_profile_detach(dhd_pub_t *dhdp)
11040 {
11041 int result = BCME_ERROR;
11042
11043 if (dhdp != NULL && dhdp->protocol_filters != NULL) {
11044 MFREE(dhdp->osh, dhdp->protocol_filters, DHD_MAX_PROFILES *
11045 sizeof(*(dhdp->protocol_filters)));
11046 dhdp->protocol_filters = NULL;
11047
11048 result = BCME_OK;
11049 }
11050
11051 return result;
11052 }
11053
11054 int
dhd_tx_profile_attach(dhd_pub_t * dhdp)11055 dhd_tx_profile_attach(dhd_pub_t *dhdp)
11056 {
11057 int result = BCME_ERROR;
11058
11059 if (dhdp != NULL) {
11060 dhdp->protocol_filters = (dhd_tx_profile_protocol_t*)MALLOCZ(dhdp->osh,
11061 DHD_MAX_PROFILES * sizeof(*(dhdp->protocol_filters)));
11062
11063 if (dhdp->protocol_filters != NULL) {
11064 result = BCME_OK;
11065 }
11066 }
11067
11068 if (result != BCME_OK) {
11069 DHD_ERROR(("%s:\tMALLOC of tx profile protocol filters failed\n",
11070 __FUNCTION__));
11071 }
11072
11073 return result;
11074 }
11075 #endif /* defined(DHD_TX_PROFILE) */
11076
11077 void
dhd_logdump_cookie_save(dhd_pub_t * dhdp,char * cookie,char * type)11078 dhd_logdump_cookie_save(dhd_pub_t *dhdp, char *cookie, char *type)
11079 {
11080 char *ptr;
11081
11082 if (!dhdp || !cookie || !type || !dhdp->logdump_cookie) {
11083 DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p cookie=%p"
11084 " type = %p, cookie_cfg:%p\n", __FUNCTION__,
11085 dhdp, cookie, type, dhdp?dhdp->logdump_cookie: NULL));
11086 return;
11087 }
11088 ptr = (char *)dhd_ring_get_empty(dhdp->logdump_cookie);
11089 if (ptr == NULL) {
11090 DHD_ERROR(("%s : Skip to save due to locking\n", __FUNCTION__));
11091 return;
11092 }
11093 scnprintf(ptr, LOGDUMP_COOKIE_STR_LEN, "%s: %s\n", type, cookie);
11094 return;
11095 }
11096
11097 int
dhd_logdump_cookie_get(dhd_pub_t * dhdp,char * ret_cookie,uint32 buf_size)11098 dhd_logdump_cookie_get(dhd_pub_t *dhdp, char *ret_cookie, uint32 buf_size)
11099 {
11100 char *ptr;
11101
11102 if (!dhdp || !ret_cookie || !dhdp->logdump_cookie) {
11103 DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p"
11104 "cookie=%p cookie_cfg:%p\n", __FUNCTION__,
11105 dhdp, ret_cookie, dhdp?dhdp->logdump_cookie: NULL));
11106 return BCME_ERROR;
11107 }
11108 ptr = (char *)dhd_ring_get_first(dhdp->logdump_cookie);
11109 if (ptr == NULL) {
11110 DHD_ERROR(("%s : Skip to save due to locking\n", __FUNCTION__));
11111 return BCME_ERROR;
11112 }
11113 memcpy(ret_cookie, ptr, MIN(buf_size, strlen(ptr)));
11114 dhd_ring_free_first(dhdp->logdump_cookie);
11115 return BCME_OK;
11116 }
11117
11118 int
dhd_logdump_cookie_count(dhd_pub_t * dhdp)11119 dhd_logdump_cookie_count(dhd_pub_t *dhdp)
11120 {
11121 if (!dhdp || !dhdp->logdump_cookie) {
11122 DHD_ERROR(("%s: At least one buffer ptr is NULL dhdp=%p cookie=%p\n",
11123 __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie: NULL));
11124 return 0;
11125 }
11126 return dhd_ring_get_cur_size(dhdp->logdump_cookie);
11127 }
11128
11129 static inline int
__dhd_log_dump_cookie_to_file(dhd_pub_t * dhdp,void * fp,const void * user_buf,unsigned long * f_pos,char * buf,uint32 buf_size)11130 __dhd_log_dump_cookie_to_file(
11131 dhd_pub_t *dhdp, void *fp, const void *user_buf, unsigned long *f_pos,
11132 char *buf, uint32 buf_size)
11133 {
11134
11135 uint32 remain = buf_size;
11136 int ret = BCME_ERROR;
11137 char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
11138 log_dump_section_hdr_t sec_hdr;
11139 uint32 read_idx;
11140 uint32 write_idx;
11141
11142 read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
11143 write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
11144 while (dhd_logdump_cookie_count(dhdp) > 0) {
11145 memset(tmp_buf, 0, sizeof(tmp_buf));
11146 ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
11147 if (ret != BCME_OK) {
11148 return ret;
11149 }
11150 remain -= scnprintf(&buf[buf_size - remain], remain, "%s", tmp_buf);
11151 }
11152 dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
11153 dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
11154
11155 ret = dhd_export_debug_data(COOKIE_LOG_HDR, fp, user_buf, strlen(COOKIE_LOG_HDR), f_pos);
11156 if (ret < 0) {
11157 DHD_ERROR(("%s : Write file Error for cookie hdr\n", __FUNCTION__));
11158 return ret;
11159 }
11160 sec_hdr.magic = LOG_DUMP_MAGIC;
11161 sec_hdr.timestamp = local_clock();
11162 sec_hdr.type = LOG_DUMP_SECTION_COOKIE;
11163 sec_hdr.length = buf_size - remain;
11164
11165 ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), f_pos);
11166 if (ret < 0) {
11167 DHD_ERROR(("%s : Write file Error for section hdr\n", __FUNCTION__));
11168 return ret;
11169 }
11170
11171 ret = dhd_export_debug_data(buf, fp, user_buf, sec_hdr.length, f_pos);
11172 if (ret < 0) {
11173 DHD_ERROR(("%s : Write file Error for cookie data\n", __FUNCTION__));
11174 }
11175
11176 return ret;
11177 }
11178
11179 uint32
dhd_log_dump_cookie_len(dhd_pub_t * dhdp)11180 dhd_log_dump_cookie_len(dhd_pub_t *dhdp)
11181 {
11182 int len = 0;
11183 char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
11184 log_dump_section_hdr_t sec_hdr;
11185 char *buf = NULL;
11186 int ret = BCME_ERROR;
11187 uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
11188 uint32 read_idx;
11189 uint32 write_idx;
11190 uint32 remain;
11191
11192 remain = buf_size;
11193
11194 if (!dhdp || !dhdp->logdump_cookie) {
11195 DHD_ERROR(("%s At least one ptr is NULL "
11196 "dhdp = %p cookie %p\n",
11197 __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL));
11198 goto exit;
11199 }
11200
11201 buf = (char *)MALLOCZ(dhdp->osh, buf_size);
11202 if (!buf) {
11203 DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
11204 goto exit;
11205 }
11206
11207 read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
11208 write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
11209 while (dhd_logdump_cookie_count(dhdp) > 0) {
11210 memset(tmp_buf, 0, sizeof(tmp_buf));
11211 ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
11212 if (ret != BCME_OK) {
11213 goto exit;
11214 }
11215 remain -= (uint32)strlen(tmp_buf);
11216 }
11217 dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
11218 dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
11219 len += strlen(COOKIE_LOG_HDR);
11220 len += sizeof(sec_hdr);
11221 len += (buf_size - remain);
11222 exit:
11223 if (buf)
11224 MFREE(dhdp->osh, buf, buf_size);
11225 return len;
11226 }
11227
11228 int
dhd_log_dump_cookie(dhd_pub_t * dhdp,const void * user_buf)11229 dhd_log_dump_cookie(dhd_pub_t *dhdp, const void *user_buf)
11230 {
11231 int ret = BCME_ERROR;
11232 char tmp_buf[LOGDUMP_COOKIE_STR_LEN];
11233 log_dump_section_hdr_t sec_hdr;
11234 char *buf = NULL;
11235 uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
11236 int pos = 0;
11237 uint32 read_idx;
11238 uint32 write_idx;
11239 uint32 remain;
11240
11241 remain = buf_size;
11242
11243 if (!dhdp || !dhdp->logdump_cookie) {
11244 DHD_ERROR(("%s At least one ptr is NULL "
11245 "dhdp = %p cookie %p\n",
11246 __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL));
11247 goto exit;
11248 }
11249
11250 buf = (char *)MALLOCZ(dhdp->osh, buf_size);
11251 if (!buf) {
11252 DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
11253 goto exit;
11254 }
11255
11256 read_idx = dhd_ring_get_read_idx(dhdp->logdump_cookie);
11257 write_idx = dhd_ring_get_write_idx(dhdp->logdump_cookie);
11258 while (dhd_logdump_cookie_count(dhdp) > 0) {
11259 memset(tmp_buf, 0, sizeof(tmp_buf));
11260 ret = dhd_logdump_cookie_get(dhdp, tmp_buf, LOGDUMP_COOKIE_STR_LEN);
11261 if (ret != BCME_OK) {
11262 goto exit;
11263 }
11264 remain -= scnprintf(&buf[buf_size - remain], remain, "%s", tmp_buf);
11265 }
11266 dhd_ring_set_read_idx(dhdp->logdump_cookie, read_idx);
11267 dhd_ring_set_write_idx(dhdp->logdump_cookie, write_idx);
11268 ret = dhd_export_debug_data(COOKIE_LOG_HDR, NULL, user_buf, strlen(COOKIE_LOG_HDR), &pos);
11269 sec_hdr.magic = LOG_DUMP_MAGIC;
11270 sec_hdr.timestamp = local_clock();
11271 sec_hdr.type = LOG_DUMP_SECTION_COOKIE;
11272 sec_hdr.length = buf_size - remain;
11273 ret = dhd_export_debug_data((char *)&sec_hdr, NULL, user_buf, sizeof(sec_hdr), &pos);
11274 ret = dhd_export_debug_data(buf, NULL, user_buf, sec_hdr.length, &pos);
11275 exit:
11276 if (buf)
11277 MFREE(dhdp->osh, buf, buf_size);
11278 return ret;
11279 }
11280
11281 int
dhd_log_dump_cookie_to_file(dhd_pub_t * dhdp,void * fp,const void * user_buf,unsigned long * f_pos)11282 dhd_log_dump_cookie_to_file(dhd_pub_t *dhdp, void *fp, const void *user_buf, unsigned long *f_pos)
11283 {
11284 char *buf;
11285 int ret = BCME_ERROR;
11286 uint32 buf_size = MAX_LOGUDMP_COOKIE_CNT * LOGDUMP_COOKIE_STR_LEN;
11287
11288 if (!dhdp || !dhdp->logdump_cookie || (!fp && !user_buf) || !f_pos) {
11289 DHD_ERROR(("%s At least one ptr is NULL "
11290 "dhdp = %p cookie %p fp = %p f_pos = %p\n",
11291 __FUNCTION__, dhdp, dhdp?dhdp->logdump_cookie:NULL, fp, f_pos));
11292 return ret;
11293 }
11294
11295 buf = (char *)MALLOCZ(dhdp->osh, buf_size);
11296 if (!buf) {
11297 DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
11298 return ret;
11299 }
11300 ret = __dhd_log_dump_cookie_to_file(dhdp, fp, user_buf, f_pos, buf, buf_size);
11301 MFREE(dhdp->osh, buf, buf_size);
11302
11303 return ret;
11304 }
11305 #endif /* DHD_LOG_DUMP */
11306 #endif /* LINUX || linux */
11307
11308 #if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
11309 int
dhd_control_he_enab(dhd_pub_t * dhd,uint8 he_enab)11310 dhd_control_he_enab(dhd_pub_t * dhd, uint8 he_enab)
11311 {
11312 int ret = BCME_OK;
11313 bcm_xtlv_t *pxtlv = NULL;
11314 uint8 mybuf[DHD_IOVAR_BUF_SIZE];
11315 uint16 mybuf_len = sizeof(mybuf);
11316 pxtlv = (bcm_xtlv_t *)mybuf;
11317
11318 ret = bcm_pack_xtlv_entry((uint8**)&pxtlv, &mybuf_len, WL_HE_CMD_ENAB, sizeof(he_enab),
11319 &he_enab, BCM_XTLV_OPTION_ALIGN32);
11320
11321 if (ret != BCME_OK) {
11322 ret = -EINVAL;
11323 DHD_ERROR(("%s failed to pack he enab, err: %s\n", __FUNCTION__, bcmerrorstr(ret)));
11324 return ret;
11325 }
11326
11327 ret = dhd_iovar(dhd, 0, "he", (char *)&mybuf, sizeof(mybuf), NULL, 0, TRUE);
11328 if (ret < 0) {
11329 DHD_ERROR(("%s he_enab (%d) set failed, err: %s\n",
11330 __FUNCTION__, he_enab, bcmerrorstr(ret)));
11331 } else {
11332 DHD_ERROR(("%s he_enab (%d) set successed\n", __FUNCTION__, he_enab));
11333 }
11334
11335 return ret;
11336 }
11337 #endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */
11338
11339 #ifdef CONFIG_ROAM_RSSI_LIMIT
11340 int
dhd_roam_rssi_limit_get(dhd_pub_t * dhd,int * lmt2g,int * lmt5g)11341 dhd_roam_rssi_limit_get(dhd_pub_t *dhd, int *lmt2g, int *lmt5g)
11342 {
11343 wlc_roam_rssi_limit_t *plmt;
11344 wlc_roam_rssi_lmt_info_v1_t *pinfo;
11345 int ret = BCME_OK;
11346 int plmt_len = sizeof(*pinfo) + ROAMRSSI_HDRLEN;
11347
11348 plmt = (wlc_roam_rssi_limit_t *)MALLOCZ(dhd->osh, plmt_len);
11349 if (!plmt) {
11350 DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
11351 return BCME_NOMEM;
11352 }
11353
11354 /* Get roam rssi limit */
11355 ret = dhd_iovar(dhd, 0, "roam_rssi_limit", NULL, 0, (char *)plmt, plmt_len, FALSE);
11356 if (ret < 0) {
11357 DHD_ERROR(("%s Failed to Get roam_rssi_limit %d\n", __FUNCTION__, ret));
11358 goto done;
11359 }
11360
11361 if (plmt->ver != WLC_ROAM_RSSI_LMT_VER_1) {
11362 ret = BCME_VERSION;
11363 goto done;
11364 }
11365
11366 pinfo = (wlc_roam_rssi_lmt_info_v1_t *)plmt->data;
11367 *lmt2g = (int)pinfo->rssi_limit_2g;
11368 *lmt5g = (int)pinfo->rssi_limit_5g;
11369
11370 done:
11371 if (plmt) {
11372 MFREE(dhd->osh, plmt, plmt_len);
11373 }
11374 return ret;
11375 }
11376
11377 int
dhd_roam_rssi_limit_set(dhd_pub_t * dhd,int lmt2g,int lmt5g)11378 dhd_roam_rssi_limit_set(dhd_pub_t *dhd, int lmt2g, int lmt5g)
11379 {
11380 wlc_roam_rssi_limit_t *plmt;
11381 wlc_roam_rssi_lmt_info_v1_t *pinfo;
11382 int ret = BCME_OK;
11383 int plmt_len = sizeof(*pinfo) + ROAMRSSI_HDRLEN;
11384
11385 /* Sanity check RSSI limit Value */
11386 if ((lmt2g < ROAMRSSI_2G_MIN) || (lmt2g > ROAMRSSI_2G_MAX)) {
11387 DHD_ERROR(("%s Not In Range 2G ROAM RSSI Limit\n", __FUNCTION__));
11388 return BCME_RANGE;
11389 }
11390 if ((lmt2g < ROAMRSSI_5G_MIN) || (lmt2g > ROAMRSSI_5G_MAX)) {
11391 DHD_ERROR(("%s Not In Range 5G ROAM RSSI Limit\n", __FUNCTION__));
11392 return BCME_RANGE;
11393 }
11394
11395 plmt = (wlc_roam_rssi_limit_t *)MALLOCZ(dhd->osh, plmt_len);
11396 if (!plmt) {
11397 DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
11398 return BCME_NOMEM;
11399 }
11400 plmt->ver = WLC_ROAM_RSSI_LMT_VER_1;
11401 plmt->len = sizeof(*pinfo);
11402 pinfo = (wlc_roam_rssi_lmt_info_v1_t *)plmt->data;
11403 pinfo->rssi_limit_2g = (int16)lmt2g;
11404 pinfo->rssi_limit_5g = (int16)lmt5g;
11405
11406 /* Set roam rssi limit */
11407 ret = dhd_iovar(dhd, 0, "roam_rssi_limit", (char *)plmt, plmt_len, NULL, 0, TRUE);
11408 if (ret < 0) {
11409 DHD_ERROR(("%s Failed to Get roam_rssi_limit %d\n", __FUNCTION__, ret));
11410 goto done;
11411 }
11412 done:
11413 if (plmt) {
11414 MFREE(dhd->osh, plmt, plmt_len);
11415 }
11416 return ret;
11417 }
11418 #endif /* CONFIG_ROAM_RSSI_LIMIT */
11419
11420 #ifdef CONFIG_ROAM_MIN_DELTA
11421 int
dhd_roam_min_delta_get(dhd_pub_t * dhd,uint32 * dt2g,uint32 * dt5g)11422 dhd_roam_min_delta_get(dhd_pub_t *dhd, uint32 *dt2g, uint32 *dt5g)
11423 {
11424 wlc_roam_min_delta_t *pmin_delta;
11425 wlc_roam_min_delta_info_v1_t *pmin_delta_info;
11426 int ret = BCME_OK;
11427 int plen = sizeof(*pmin_delta_info) + ROAM_MIN_DELTA_HDRLEN;
11428
11429 pmin_delta = (wlc_roam_min_delta_t *)MALLOCZ(dhd->osh, plen);
11430 if (!pmin_delta) {
11431 DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
11432 return BCME_NOMEM;
11433 }
11434
11435 /* Get Minimum ROAM score delta */
11436 ret = dhd_iovar(dhd, 0, "roam_min_delta", NULL, 0, (char *)pmin_delta, plen, FALSE);
11437 if (ret < 0) {
11438 DHD_ERROR(("%s Failed to Get roam_min_delta %d\n", __FUNCTION__, ret));
11439 goto done;
11440 }
11441
11442 if (pmin_delta->ver != WLC_ROAM_MIN_DELTA_VER_1) {
11443 ret = BCME_VERSION;
11444 goto done;
11445 }
11446
11447 pmin_delta_info = (wlc_roam_min_delta_info_v1_t *)pmin_delta->data;
11448 *dt2g = (uint32)pmin_delta_info->roam_min_delta_2g;
11449 *dt5g = (uint32)pmin_delta_info->roam_min_delta_5g;
11450
11451 done:
11452 if (pmin_delta) {
11453 MFREE(dhd->osh, pmin_delta, plen);
11454 }
11455 return ret;
11456 }
11457
11458 int
dhd_roam_min_delta_set(dhd_pub_t * dhd,uint32 dt2g,uint32 dt5g)11459 dhd_roam_min_delta_set(dhd_pub_t *dhd, uint32 dt2g, uint32 dt5g)
11460 {
11461 wlc_roam_min_delta_t *pmin_delta;
11462 wlc_roam_min_delta_info_v1_t *pmin_delta_info;
11463 int ret = BCME_OK;
11464 int plen = sizeof(*pmin_delta_info) + ROAM_MIN_DELTA_HDRLEN;
11465
11466 /* Sanity check Minimum ROAM score delta */
11467 if ((dt2g > ROAM_MIN_DELTA_MAX) || (dt5g > ROAM_MIN_DELTA_MAX)) {
11468 DHD_ERROR(("%s Not In Range Minimum ROAM score delta, 2G: %d, 5G: %d\n",
11469 __FUNCTION__, dt2g, dt5g));
11470 return BCME_RANGE;
11471 }
11472
11473 pmin_delta = (wlc_roam_min_delta_t *)MALLOCZ(dhd->osh, plen);
11474 if (!pmin_delta) {
11475 DHD_ERROR(("%s Fail to malloc buffer\n", __FUNCTION__));
11476 return BCME_NOMEM;
11477 }
11478 pmin_delta->ver = WLC_ROAM_MIN_DELTA_VER_1;
11479 pmin_delta->len = sizeof(*pmin_delta_info);
11480 pmin_delta_info = (wlc_roam_min_delta_info_v1_t *)pmin_delta->data;
11481 pmin_delta_info->roam_min_delta_2g = (uint32)dt2g;
11482 pmin_delta_info->roam_min_delta_5g = (uint32)dt5g;
11483
11484 /* Set Minimum ROAM score delta */
11485 ret = dhd_iovar(dhd, 0, "roam_min_delta", (char *)pmin_delta, plen, NULL, 0, TRUE);
11486 if (ret < 0) {
11487 DHD_ERROR(("%s Failed to Set roam_min_delta %d\n", __FUNCTION__, ret));
11488 goto done;
11489 }
11490 done:
11491 if (pmin_delta) {
11492 MFREE(dhd->osh, pmin_delta, plen);
11493 }
11494 return ret;
11495 }
11496 #endif /* CONFIG_ROAM_MIN_DELTA */
11497
11498 #ifdef HOST_SFH_LLC
11499 #define SSTLOOKUP(proto) (((proto) == 0x80f3) || ((proto) == 0x8137))
11500 /** Convert Ethernet to 802.3 per 802.1H (use bridge-tunnel if type in SST)
11501 * Note:- This function will overwrite the ethernet header in the pkt
11502 * with a 802.3 ethernet + LLC/SNAP header by utilising the headroom
11503 * in the packet. The pkt data pointer should be pointing to the
11504 * start of the packet (at the ethernet header) when the function is called.
11505 * The pkt data pointer will be pointing to the
11506 * start of the new 802.3 header if the function returns successfully
11507 *
11508 *
11509 * Original Ethernet (header length = 14):
11510 * ----------------------------------------------------------------------------------------
11511 * | | DA | SA | T | Data... |
11512 * ----------------------------------------------------------------------------------------
11513 * 6 6 2
11514 *
11515 * Conversion to 802.3 (header length = 22):
11516 * (LLC includes ether_type in last 2 bytes):
11517 * ----------------------------------------------------------------------------------------
11518 * | | DA | SA | L | LLC/SNAP | T | Data... |
11519 * ----------------------------------------------------------------------------------------
11520 * 6 6 2 6 2
11521 */
11522 int
BCMFASTPATH(dhd_ether_to_8023_hdr)11523 BCMFASTPATH(dhd_ether_to_8023_hdr)(osl_t *osh, struct ether_header *eh, void *p)
11524 {
11525 struct ether_header *neh;
11526 struct dot11_llc_snap_header *lsh;
11527 uint16 plen, ether_type;
11528
11529 if (PKTHEADROOM(osh, p) < DOT11_LLC_SNAP_HDR_LEN) {
11530 DHD_ERROR(("%s: FATAL! not enough pkt headroom !\n", __FUNCTION__));
11531 ASSERT(0);
11532 return BCME_BUFTOOSHORT;
11533 }
11534
11535 ether_type = ntoh16(eh->ether_type);
11536 neh = (struct ether_header *)PKTPUSH(osh, p, DOT11_LLC_SNAP_HDR_LEN);
11537
11538 /* 802.3 MAC header */
11539 eacopy((char*)eh->ether_dhost, (char*)neh->ether_dhost);
11540 eacopy((char*)eh->ether_shost, (char*)neh->ether_shost);
11541 plen = (uint16)PKTLEN(osh, p) - ETHER_HDR_LEN;
11542 neh->ether_type = hton16(plen);
11543
11544 /* 802.2 LLC header */
11545 lsh = (struct dot11_llc_snap_header *)&neh[1];
11546 lsh->dsap = 0xaa;
11547 lsh->ssap = 0xaa;
11548 lsh->ctl = 0x03;
11549
11550 /* 802.2 SNAP header Use RFC1042 or bridge-tunnel if type in SST per 802.1H */
11551 lsh->oui[0] = 0x00;
11552 lsh->oui[1] = 0x00;
11553 if (SSTLOOKUP(ether_type))
11554 lsh->oui[2] = 0xf8;
11555 else
11556 lsh->oui[2] = 0x00;
11557 lsh->type = hton16(ether_type);
11558
11559 return BCME_OK;
11560 }
11561
11562 /** Convert 802.3+LLC to ethernet
11563 * Note:- This function will overwrite the 802.3+LLC hdr in the pkt
11564 * with an ethernet header. The pkt data pointer should be pointing to the
11565 * start of the packet (at the 802.3 header) when the function is called.
11566 * The pkt data pointer will be pointing to the
11567 * start of the ethernet header if the function returns successfully
11568 */
11569 int
BCMFASTPATH(dhd_8023_llc_to_ether_hdr)11570 BCMFASTPATH(dhd_8023_llc_to_ether_hdr)(osl_t *osh, struct ether_header *eh8023, void *p)
11571 {
11572 struct dot11_llc_snap_header *lsh = NULL;
11573 uint16 ether_type = 0;
11574 uint8 *pdata = NULL;
11575
11576 if (!p || !eh8023)
11577 return BCME_BADARG;
11578
11579 pdata = PKTDATA(osh, p);
11580 ether_type = ntoh16(eh8023->ether_type);
11581 /* ether type in 802.3 hdr for sfh llc host insertion case
11582 * contains length, replace it with actual ether type at the
11583 * end of the LLC hdr
11584 */
11585 if (ether_type < ETHER_TYPE_MIN) {
11586 /* 802.2 LLC header */
11587 lsh = (struct dot11_llc_snap_header *)(pdata + sizeof(*eh8023));
11588 eh8023->ether_type = lsh->type;
11589 pdata = PKTPULL(osh, p, DOT11_LLC_SNAP_HDR_LEN);
11590 memcpy_s(pdata, sizeof(*eh8023), eh8023, sizeof(*eh8023));
11591 } else {
11592 DHD_ERROR_RLMT(("ethertype 0x%x is not a length !\n", ether_type));
11593 return BCME_BADARG;
11594 }
11595
11596 return BCME_OK;
11597 }
11598 #endif /* HOST_SFH_LLC */
11599
11600 #ifdef DHD_AWDL
11601
11602 #define AWDL_MIN_EXTENSION_DEFAULT 0x3u
11603 #define AWDL_PRESENCE_MODE_DEFAULT 0x4u
11604 #define AWDL_FLAGS_DEFAULT 0x0000u
11605 #define AWDL_PID 0x0800u
11606 #define AWDL_USERDATA_SIZE 6u
11607 /** Convert Ethernet to 802.3 + AWDL LLC SNAP header
11608 * Note:- This function will overwrite the ethernet header in the pkt 'p'
11609 * with a 802.3 ethernet + AWDL LLC/SNAP header by utilising the headroom
11610 * in the packet. The pkt data pointer should be pointing to the
11611 * start of the packet (at the ethernet header) when the function is called.
11612 * The pkt data pointer will be pointing to the
11613 * start of the new 802.3 header if the function returns successfully
11614 */
11615 int
BCMFASTPATH(dhd_ether_to_awdl_llc_hdr)11616 BCMFASTPATH(dhd_ether_to_awdl_llc_hdr)(struct dhd_pub *dhd, struct ether_header *eh, void *p)
11617 {
11618 osl_t *osh = dhd->osh;
11619 struct ether_header *neh;
11620 struct dot11_llc_snap_header *lsh;
11621 uint16 plen, ether_type;
11622 uint8 *awdl_data = NULL;
11623 uint16 *seq = NULL;
11624 uint16 *flags = NULL;
11625 uint16 *type = NULL;
11626
11627 if (PKTHEADROOM(osh, p) < (2 * DOT11_LLC_SNAP_HDR_LEN)) {
11628 DHD_ERROR(("%s: FATAL! not enough pkt headroom !\n", __FUNCTION__));
11629 ASSERT(0);
11630 return BCME_BUFTOOSHORT;
11631 }
11632
11633 ether_type = ntoh16(eh->ether_type);
11634 neh = (struct ether_header *)PKTPUSH(osh, p, 2 * DOT11_LLC_SNAP_HDR_LEN);
11635
11636 /* 802.3 MAC header */
11637 eacopy((char*)eh->ether_dhost, (char*)neh->ether_dhost);
11638 eacopy((char*)eh->ether_shost, (char*)neh->ether_shost);
11639 plen = (uint16)PKTLEN(osh, p) - ETHER_HDR_LEN;
11640 neh->ether_type = hton16(plen);
11641
11642 /* 802.2 LLC header */
11643 lsh = (struct dot11_llc_snap_header *)&neh[1];
11644 lsh->dsap = 0xaa;
11645 lsh->ssap = 0xaa;
11646 lsh->ctl = 0x03;
11647
11648 /* 802.2 SNAP header */
11649 lsh->oui[0] = 0x00;
11650 lsh->oui[1] = 0x17;
11651 lsh->oui[2] = 0xf2;
11652 lsh->type = hton16(AWDL_PID);
11653
11654 /* AWDL upper layer data */
11655 awdl_data = (uint8 *)&lsh[1];
11656
11657 awdl_data[0] = dhd->awdl_minext;
11658 awdl_data[1] = dhd->awdl_presmode;
11659
11660 seq = (uint16 *)&awdl_data[2];
11661 *seq = dhd->awdl_seq++;
11662
11663 flags = (uint16 *)&awdl_data[4];
11664 *flags = hton16(AWDL_FLAGS_DEFAULT);
11665
11666 type = (uint16 *)&awdl_data[6];
11667 *type = hton16(ether_type);
11668
11669 return BCME_OK;
11670 }
11671
11672 /** Convert 802.3 + AWDL LLC SNAP header to ethernet header
11673 * Note:- This function will overwrite the existing
11674 * 802.3 ethernet + AWDL LLC/SNAP header in the packet 'p'
11675 * with a 14 byte ethernet header
11676 * The pkt data pointer should be pointing to the
11677 * start of the packet (at the 802.3 header) when the function is called.
11678 * The pkt data pointer will be pointing to the
11679 * start of the new ethernet header if the function returns successfully
11680 */
11681 int
dhd_awdl_llc_to_eth_hdr(struct dhd_pub * dhd,struct ether_header * eh,void * p)11682 dhd_awdl_llc_to_eth_hdr(struct dhd_pub *dhd, struct ether_header *eh, void *p)
11683 {
11684 uint16 *ethertype = NULL;
11685 uint8 *ptr = NULL;
11686
11687 if (!eh || !p || !dhd)
11688 return BCME_BADARG;
11689
11690 ptr = PKTDATA(dhd->osh, p);
11691
11692 /* copy ether type instead of length from the
11693 * end of the awdl llc header to the ethernet header
11694 */
11695 ptr += sizeof(*eh) + DOT11_LLC_SNAP_HDR_LEN + AWDL_USERDATA_SIZE;
11696 ethertype = (uint16 *)ptr;
11697 eh->ether_type = *ethertype;
11698
11699 /* overwrite awdl llc header with ethernet header */
11700 PKTPULL(dhd->osh, p, 2 * DOT11_LLC_SNAP_HDR_LEN);
11701 ptr = PKTDATA(dhd->osh, p);
11702 memcpy_s(ptr, sizeof(*eh), eh, sizeof(*eh));
11703 return BCME_OK;
11704 }
11705 #endif /* DHD_AWDL */
11706
11707 int
dhd_iovar(dhd_pub_t * pub,int ifidx,char * name,char * param_buf,uint param_len,char * res_buf,uint res_len,bool set)11708 dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *param_buf, uint param_len, char *res_buf,
11709 uint res_len, bool set)
11710 {
11711 char *buf = NULL;
11712 uint input_len;
11713 wl_ioctl_t ioc;
11714 int ret;
11715
11716 if (res_len > WLC_IOCTL_MAXLEN || param_len > WLC_IOCTL_MAXLEN)
11717 return BCME_BADARG;
11718
11719 input_len = strlen(name) + 1 + param_len;
11720
11721 /* WAR to fix GET iovar returning buf too short error
11722 * If param len is 0 for get iovar, increment input_len by sizeof(int)
11723 * to avoid the length check error in fw
11724 */
11725 if (!set && !param_len) {
11726 input_len += sizeof(int);
11727 }
11728 if (input_len > WLC_IOCTL_MAXLEN)
11729 return BCME_BADARG;
11730
11731 buf = NULL;
11732 if (set) {
11733 if (res_buf || res_len != 0) {
11734 DHD_ERROR(("%s: SET wrong arguemnet\n", __FUNCTION__));
11735 ret = BCME_BADARG;
11736 goto exit;
11737 }
11738 buf = MALLOCZ(pub->osh, input_len);
11739 if (!buf) {
11740 DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__));
11741 ret = BCME_NOMEM;
11742 goto exit;
11743 }
11744 ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len);
11745 if (!ret) {
11746 ret = BCME_NOMEM;
11747 goto exit;
11748 }
11749
11750 ioc.cmd = WLC_SET_VAR;
11751 ioc.buf = buf;
11752 ioc.len = input_len;
11753 ioc.set = set;
11754
11755 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
11756 } else {
11757 if (!res_buf || !res_len) {
11758 DHD_ERROR(("%s: GET failed. resp_buf NULL or length 0.\n", __FUNCTION__));
11759 ret = BCME_BADARG;
11760 goto exit;
11761 }
11762
11763 if (res_len < input_len) {
11764 DHD_INFO(("%s: res_len(%d) < input_len(%d)\n", __FUNCTION__,
11765 res_len, input_len));
11766 buf = MALLOCZ(pub->osh, input_len);
11767 if (!buf) {
11768 DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__));
11769 ret = BCME_NOMEM;
11770 goto exit;
11771 }
11772 ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len);
11773 if (!ret) {
11774 ret = BCME_NOMEM;
11775 goto exit;
11776 }
11777
11778 ioc.cmd = WLC_GET_VAR;
11779 ioc.buf = buf;
11780 ioc.len = input_len;
11781 ioc.set = set;
11782
11783 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
11784
11785 if (ret == BCME_OK) {
11786 memcpy(res_buf, buf, res_len);
11787 }
11788 } else {
11789 memset(res_buf, 0, res_len);
11790 ret = bcm_mkiovar(name, param_buf, param_len, res_buf, res_len);
11791 if (!ret) {
11792 ret = BCME_NOMEM;
11793 goto exit;
11794 }
11795
11796 ioc.cmd = WLC_GET_VAR;
11797 ioc.buf = res_buf;
11798 ioc.len = res_len;
11799 ioc.set = set;
11800
11801 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
11802 }
11803 }
11804 exit:
11805 if (buf) {
11806 MFREE(pub->osh, buf, input_len);
11807 }
11808 return ret;
11809 }
11810