1 /*
2 * Broadcom Dongle Host Driver (DHD), Linux-specific network interface
3 * Basically selected code segments from usb-cdc.c and usb-rndis.c
4 *
5 * Portions of this code are copyright (c) 2022 Cypress Semiconductor Corporation
6 *
7 * Copyright (C) 1999-2017, Broadcom Corporation
8 *
9 * Unless you and Broadcom execute a separate written software license
10 * agreement governing use of this software, this software is licensed to you
11 * under the terms of the GNU General Public License version 2 (the "GPL"),
12 * available at http://www.broadcom.com/licenses/GPLv2.php, with the
13 * following added to such license:
14 *
15 * As a special exception, the copyright holders of this software give you
16 * permission to link this software with independent modules, and to copy and
17 * distribute the resulting executable under terms of your choice, provided that
18 * you also meet, for each linked independent module, the terms and conditions of
19 * the license of that module. An independent module is a module which is not
20 * derived from this software. The special exception does not apply to any
21 * modifications of the software.
22 *
23 * Notwithstanding the above, under no circumstances may you combine this
24 * software in any way with any other Broadcom software provided under a license
25 * other than the GPL, without Broadcom's express prior written consent.
26 *
27 *
28 * <<Broadcom-WL-IPTag/Open:>>
29 *
30 * $Id: dhd_linux.c 702611 2017-06-02 06:40:15Z $
31 */
32
33 #include <typedefs.h>
34 #include <linuxver.h>
35 #include <osl.h>
36 #include <bcmstdlib_s.h>
37 #ifdef SHOW_LOGTRACE
38 #include <linux/syscalls.h>
39 #include <event_log.h>
40 #endif /* SHOW_LOGTRACE */
41
42 #ifdef PCIE_FULL_DONGLE
43 #include <bcmmsgbuf.h>
44 #endif /* PCIE_FULL_DONGLE */
45
46 #include <linux/init.h>
47 #include <linux/kernel.h>
48 #include <linux/slab.h>
49 #include <linux/skbuff.h>
50 #include <linux/netdevice.h>
51 #include <linux/inetdevice.h>
52 #include <linux/rtnetlink.h>
53 #include <linux/etherdevice.h>
54 #include <linux/random.h>
55 #include <linux/spinlock.h>
56 #include <linux/ethtool.h>
57 #include <linux/fcntl.h>
58 #include <linux/fs.h>
59 #include <linux/ip.h>
60 #include <linux/reboot.h>
61 #include <linux/notifier.h>
62 #include <linux/irq.h>
63 #include <net/addrconf.h>
64 #ifdef ENABLE_ADAPTIVE_SCHED
65 #include <linux/cpufreq.h>
66 #endif /* ENABLE_ADAPTIVE_SCHED */
67 #include <linux/rtc.h>
68 #include <linux/namei.h>
69 #include <asm/uaccess.h>
70 #include <asm/unaligned.h>
71 #include <dhd_linux_priv.h>
72
73 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
74 #include <uapi/linux/sched/types.h>
75 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
76
77 #include <epivers.h>
78 #include <bcmutils.h>
79 #include <bcmendian.h>
80 #include <bcmdevs.h>
81 #include <bcmiov.h>
82
83 #include <ethernet.h>
84 #include <bcmevent.h>
85 #include <vlan.h>
86 #include <802.3.h>
87
88 #include <dhd_linux_wq.h>
89 #include <dhd.h>
90 #include <dhd_linux.h>
91 #include <dhd_linux_pktdump.h>
92 #ifdef DHD_WET
93 #include <dhd_wet.h>
94 #endif /* DHD_WET */
95 #ifdef PCIE_FULL_DONGLE
96 #include <dhd_flowring.h>
97 #endif // endif
98 #include <dhd_bus.h>
99 #include <dhd_proto.h>
100 #include <dhd_dbg.h>
101 #include <dhd_dbg_ring.h>
102 #include <dhd_debug.h>
103 #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
104 #include <linux/wakelock.h>
105 #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
106 #if defined(WL_CFG80211)
107 #include <wl_cfg80211.h>
108 #ifdef WL_BAM
109 #include <wl_bam.h>
110 #endif /* WL_BAM */
111 #endif /* WL_CFG80211 */
112 #ifdef PNO_SUPPORT
113 #include <dhd_pno.h>
114 #endif // endif
115 #ifdef RTT_SUPPORT
116 #include <dhd_rtt.h>
117 #endif // endif
118
119 #if defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \
120 defined(CONFIG_SOC_EXYNOS9820)
121 #include <linux/exynos-pci-ctrl.h>
122 #endif /* CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 */
123
124 #ifdef DHD_L2_FILTER
125 #include <bcmicmp.h>
126 #include <bcm_l2_filter.h>
127 #include <dhd_l2_filter.h>
128 #endif /* DHD_L2_FILTER */
129
130 #ifdef DHD_PSTA
131 #include <dhd_psta.h>
132 #endif /* DHD_PSTA */
133
134 #ifdef AMPDU_VO_ENABLE
135 #include <802.1d.h>
136 #endif /* AMPDU_VO_ENABLE */
137 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0))
138 #include <uapi/linux/sched/types.h>
139 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 12, 0) */
140
141 #if defined(DHDTCPACK_SUPPRESS) || defined(DHDTCPSYNC_FLOOD_BLK)
142 #include <dhd_ip.h>
143 #endif /* DHDTCPACK_SUPPRESS || DHDTCPSYNC_FLOOD_BLK */
144 #include <dhd_daemon.h>
145 #ifdef DHD_PKT_LOGGING
146 #include <dhd_pktlog.h>
147 #endif /* DHD_PKT_LOGGING */
148 #ifdef DHD_4WAYM4_FAIL_DISCONNECT
149 #include <eapol.h>
150 #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
151
152 #ifdef DHD_BANDSTEER
153 #include <dhd_bandsteer.h>
154 #endif /* DHD_BANDSTEER */
155 #ifdef DHD_DEBUG_PAGEALLOC
156 typedef void (*page_corrupt_cb_t)(void *handle, void *addr_corrupt, size_t len);
157 void dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len);
158 extern void register_page_corrupt_cb(page_corrupt_cb_t cb, void* handle);
159 #endif /* DHD_DEBUG_PAGEALLOC */
160
161 #define IP_PROT_RESERVED 0xFF
162
163 #ifdef DHD_4WAYM4_FAIL_DISCONNECT
164 static void dhd_m4_state_handler(struct work_struct * work);
165 #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
166
167 #ifdef DHDTCPSYNC_FLOOD_BLK
168 static void dhd_blk_tsfl_handler(struct work_struct * work);
169 #endif /* DHDTCPSYNC_FLOOD_BLK */
170
171 #ifdef WL_NATOE
172 #include <dhd_linux_nfct.h>
173 #endif /* WL_NATOE */
174
175 #if defined(OEM_ANDROID) && defined(SOFTAP)
176 extern bool ap_cfg_running;
177 extern bool ap_fw_loaded;
178 #endif // endif
179
180 #ifdef FIX_CPU_MIN_CLOCK
181 #include <linux/pm_qos.h>
182 #endif /* FIX_CPU_MIN_CLOCK */
183
184 #ifdef SET_RANDOM_MAC_SOFTAP
185 #ifndef CONFIG_DHD_SET_RANDOM_MAC_VAL
186 #define CONFIG_DHD_SET_RANDOM_MAC_VAL 0x001A11
187 #endif // endif
188 static u32 vendor_oui = CONFIG_DHD_SET_RANDOM_MAC_VAL;
189 #endif /* SET_RANDOM_MAC_SOFTAP */
190
191 #ifdef ENABLE_ADAPTIVE_SCHED
192 #define DEFAULT_CPUFREQ_THRESH 1000000 /* threshold frequency : 1000000 = 1GHz */
193 #ifndef CUSTOM_CPUFREQ_THRESH
194 #define CUSTOM_CPUFREQ_THRESH DEFAULT_CPUFREQ_THRESH
195 #endif /* CUSTOM_CPUFREQ_THRESH */
196 #endif /* ENABLE_ADAPTIVE_SCHED */
197
198 /* enable HOSTIP cache update from the host side when an eth0:N is up */
199 #define AOE_IP_ALIAS_SUPPORT 1
200
201 #ifdef PROP_TXSTATUS
202 #include <wlfc_proto.h>
203 #include <dhd_wlfc.h>
204 #endif // endif
205
206 #if defined(OEM_ANDROID)
207 #include <wl_android.h>
208 #endif // endif
209
210 /* Maximum STA per radio */
211 #define DHD_MAX_STA 32
212
213 #ifdef DHD_EVENT_LOG_FILTER
214 #include <dhd_event_log_filter.h>
215 #endif /* DHD_EVENT_LOG_FILTER */
216
217 const uint8 wme_fifo2ac[] = { 0, 1, 2, 3, 1, 1 };
218 const uint8 prio2fifo[8] = { 1, 0, 0, 1, 2, 2, 3, 3 };
219 #define WME_PRIO2AC(prio) wme_fifo2ac[prio2fifo[(prio)]]
220
221 #ifdef ARP_OFFLOAD_SUPPORT
222 void aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx);
223 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
224 unsigned long event, void *ptr);
225 static struct notifier_block dhd_inetaddr_notifier = {
226 .notifier_call = dhd_inetaddr_notifier_call
227 };
228 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
229 * created in kernel notifier link list (with 'next' pointing to itself)
230 */
231 static bool dhd_inetaddr_notifier_registered = FALSE;
232 #endif /* ARP_OFFLOAD_SUPPORT */
233
234 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
235 int dhd_inet6addr_notifier_call(struct notifier_block *this,
236 unsigned long event, void *ptr);
237 static struct notifier_block dhd_inet6addr_notifier = {
238 .notifier_call = dhd_inet6addr_notifier_call
239 };
240 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
241 * created in kernel notifier link list (with 'next' pointing to itself)
242 */
243 static bool dhd_inet6addr_notifier_registered = FALSE;
244 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
245
246 #if defined(CONFIG_PM_SLEEP)
247 #include <linux/suspend.h>
248 volatile bool dhd_mmc_suspend = FALSE;
249 DECLARE_WAIT_QUEUE_HEAD(dhd_dpc_wait);
250 #endif /* defined(CONFIG_PM_SLEEP) */
251
252 #if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID)
253 extern void dhd_enable_oob_intr(struct dhd_bus *bus, bool enable);
254 #endif /* defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) */
255 #if defined(OEM_ANDROID)
256 static void dhd_hang_process(struct work_struct *work_data);
257 #endif /* #OEM_ANDROID */
258 MODULE_LICENSE("GPL and additional rights");
259 MODULE_IMPORT_NS(VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver);
260
261 #ifdef CONFIG_BCM_DETECT_CONSECUTIVE_HANG
262 #define MAX_CONSECUTIVE_HANG_COUNTS 5
263 #endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */
264
265 #include <dhd_bus.h>
266
267 #ifdef DHD_ULP
268 #include <dhd_ulp.h>
269 #endif /* DHD_ULP */
270
271 #ifndef PROP_TXSTATUS
272 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen)
273 #else
274 #define DBUS_RX_BUFFER_SIZE_DHD(net) (net->mtu + net->hard_header_len + dhd->pub.hdrlen + 128)
275 #endif // endif
276
277 #ifdef PROP_TXSTATUS
278 extern bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx);
279 extern void dhd_wlfc_plat_init(void *dhd);
280 extern void dhd_wlfc_plat_deinit(void *dhd);
281 #endif /* PROP_TXSTATUS */
282 #ifdef USE_DYNAMIC_F2_BLKSIZE
283 extern uint sd_f2_blocksize;
284 extern int dhdsdio_func_blocksize(dhd_pub_t *dhd, int function_num, int block_size);
285 #endif /* USE_DYNAMIC_F2_BLKSIZE */
286
287 /* Linux wireless extension support */
288 #if defined(WL_WIRELESS_EXT)
289 #include <wl_iw.h>
290 extern wl_iw_extra_params_t g_wl_iw_params;
291 #endif /* defined(WL_WIRELESS_EXT) */
292
293 #ifdef CONFIG_PARTIALSUSPEND_SLP
294 #include <linux/partialsuspend_slp.h>
295 #define CONFIG_HAS_EARLYSUSPEND
296 #define DHD_USE_EARLYSUSPEND
297 #define register_early_suspend register_pre_suspend
298 #define unregister_early_suspend unregister_pre_suspend
299 #define early_suspend pre_suspend
300 #define EARLY_SUSPEND_LEVEL_BLANK_SCREEN 50
301 #else
302 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
303 #include <linux/earlysuspend.h>
304 #endif /* defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) */
305 #endif /* CONFIG_PARTIALSUSPEND_SLP */
306
307 #if defined(OEM_ANDROID) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0))
308 #include <linux/nl80211.h>
309 #endif /* OEM_ANDROID && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 11, 0)) */
310
311 #if defined(PKT_FILTER_SUPPORT) && defined(APF)
312 static int __dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id,
313 u8* program, uint32 program_len);
314 static int __dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id,
315 uint32 mode, uint32 enable);
316 static int __dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id);
317 #endif /* PKT_FILTER_SUPPORT && APF */
318
319 #if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT) && defined(DHD_FW_COREDUMP)
320 static int dhd_wait_for_file_dump(dhd_pub_t *dhdp);
321 #endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT && DHD_FW_COREDUMP */
322
323 #if defined(ARGOS_NOTIFY_CB)
324 /* ARGOS notifer data */
325 static struct notifier_block argos_wifi; /* STA */
326 static struct notifier_block argos_p2p; /* P2P */
327 argos_rps_ctrl argos_rps_ctrl_data;
328 #endif // endif
329
330 #ifdef DHD_FW_COREDUMP
331 static void dhd_mem_dump(void *dhd_info, void *event_info, u8 event);
332 #endif /* DHD_FW_COREDUMP */
333
334 #ifdef DHD_LOG_DUMP
335
336 struct dhd_log_dump_buf g_dld_buf[DLD_BUFFER_NUM];
337
338 /* Only header for log dump buffers is stored in array
339 * header for sections like 'dhd dump', 'ext trap'
340 * etc, is not in the array, because they are not log
341 * ring buffers
342 */
343 dld_hdr_t dld_hdrs[DLD_BUFFER_NUM] = {
344 {GENERAL_LOG_HDR, LOG_DUMP_SECTION_GENERAL},
345 {PRESERVE_LOG_HDR, LOG_DUMP_SECTION_PRESERVE},
346 {SPECIAL_LOG_HDR, LOG_DUMP_SECTION_SPECIAL}
347 };
348
349 static int dld_buf_size[DLD_BUFFER_NUM] = {
350 LOG_DUMP_GENERAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_GENERAL */
351 LOG_DUMP_PRESERVE_MAX_BUFSIZE, /* DLD_BUF_TYPE_PRESERVE */
352 LOG_DUMP_SPECIAL_MAX_BUFSIZE, /* DLD_BUF_TYPE_SPECIAL */
353 };
354
355 static void dhd_log_dump_init(dhd_pub_t *dhd);
356 static void dhd_log_dump_deinit(dhd_pub_t *dhd);
357 static void dhd_log_dump(void *handle, void *event_info, u8 event);
358 static int do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type);
359 static int dhd_log_flush(dhd_pub_t *dhdp, log_dump_type_t *type);
360 static void dhd_get_time_str(dhd_pub_t *dhdp, char *time_str, int size);
361 void dhd_get_debug_dump_len(void *handle, struct sk_buff *skb, void *event_info, u8 event);
362 void cfgvendor_log_dump_len(dhd_pub_t *dhdp, log_dump_type_t *type, struct sk_buff *skb);
363 static void dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size);
364 static void dhd_log_dump_buf_addr(dhd_pub_t *dhdp, log_dump_type_t *type);
365 #endif /* DHD_LOG_DUMP */
366
367 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
368 #include <linux/workqueue.h>
369 #include <linux/pm_runtime.h>
370 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
371
372 #ifdef DHD_DEBUG_UART
373 #include <linux/kmod.h>
374 #define DHD_DEBUG_UART_EXEC_PATH "/system/bin/wldu"
375 static void dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event);
376 static void dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd);
377 #endif /* DHD_DEBUG_UART */
378
379 static int dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused);
380 static struct notifier_block dhd_reboot_notifier = {
381 .notifier_call = dhd_reboot_callback,
382 .priority = 1,
383 };
384
385 #ifdef OEM_ANDROID
386 #ifdef BCMPCIE
387 static int is_reboot = 0;
388 #endif /* BCMPCIE */
389 #endif /* OEM_ANDROID */
390
391 dhd_pub_t *g_dhd_pub = NULL;
392
393 #if defined(BT_OVER_SDIO)
394 #include "dhd_bt_interface.h"
395 #endif /* defined (BT_OVER_SDIO) */
396
397 #ifdef WL_STATIC_IF
398 bool dhd_is_static_ndev(dhd_pub_t *dhdp, struct net_device *ndev);
399 #endif /* WL_STATIC_IF */
400
401 atomic_t exit_in_progress = ATOMIC_INIT(0);
402
403 static void dhd_process_daemon_msg(struct sk_buff *skb);
404 static void dhd_destroy_to_notifier_skt(void);
405 static int dhd_create_to_notifier_skt(void);
406 static struct sock *nl_to_event_sk = NULL;
407 int sender_pid = 0;
408
409 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0))
410 struct netlink_kernel_cfg dhd_netlink_cfg = {
411 .groups = 1,
412 .input = dhd_process_daemon_msg,
413 };
414 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 6, 0)) */
415
416 #if defined(BT_OVER_SDIO)
417 /* Flag to indicate if driver is initialized */
418 uint dhd_driver_init_done = TRUE;
419 #else
420 /* Flag to indicate if driver is initialized */
421 uint dhd_driver_init_done = FALSE;
422 #endif // endif
423 /* Flag to indicate if we should download firmware on driver load */
424 uint dhd_download_fw_on_driverload = TRUE;
425
426 /* Definitions to provide path to the firmware and nvram
427 * example nvram_path[MOD_PARAM_PATHLEN]="/projects/wlan/nvram.txt"
428 */
429 char firmware_path[MOD_PARAM_PATHLEN];
430 char nvram_path[MOD_PARAM_PATHLEN];
431 char clm_path[MOD_PARAM_PATHLEN] = "/vendor/etc/firmware/cyfmac4373-sdio.clm_blob";
432 #ifdef DHD_UCODE_DOWNLOAD
433 char ucode_path[MOD_PARAM_PATHLEN];
434 #endif /* DHD_UCODE_DOWNLOAD */
435
436 module_param_string(clm_path, clm_path, MOD_PARAM_PATHLEN, 0660);
437
438 /* backup buffer for firmware and nvram path */
439 char fw_bak_path[MOD_PARAM_PATHLEN];
440 char nv_bak_path[MOD_PARAM_PATHLEN];
441
442 /* information string to keep firmware, chio, cheip version info visiable from log */
443 char info_string[MOD_PARAM_INFOLEN];
444 module_param_string(info_string, info_string, MOD_PARAM_INFOLEN, 0444);
445 int op_mode = 0;
446 int disable_proptx = 0;
447 module_param(op_mode, int, 0644);
448 #if defined(OEM_ANDROID)
449 extern int wl_control_wl_start(struct net_device *dev);
450 #if defined(BCMLXSDMMC)
451 struct semaphore dhd_registration_sem;
452 #endif /* BCMXSDMMC */
453 #endif /* defined(OEM_ANDROID) */
454
455 #ifdef DHD_LOG_DUMP
456 int logdump_max_filesize = LOG_DUMP_MAX_FILESIZE;
457 module_param(logdump_max_filesize, int, 0644);
458 int logdump_max_bufsize = LOG_DUMP_GENERAL_MAX_BUFSIZE;
459 module_param(logdump_max_bufsize, int, 0644);
460 int logdump_prsrv_tailsize = DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE;
461 int logdump_periodic_flush = FALSE;
462 module_param(logdump_periodic_flush, int, 0644);
463 #ifdef EWP_ECNTRS_LOGGING
464 int logdump_ecntr_enable = TRUE;
465 #else
466 int logdump_ecntr_enable = FALSE;
467 #endif /* EWP_ECNTRS_LOGGING */
468 module_param(logdump_ecntr_enable, int, 0644);
469 #ifdef EWP_RTT_LOGGING
470 int logdump_rtt_enable = TRUE;
471 #else
472 int logdump_rtt_enable = FALSE;
473 #endif /* EWP_RTT_LOGGING */
474 module_param(logdump_rtt_enable, int, 0644);
475 #endif /* DHD_LOG_DUMP */
476 #ifdef EWP_EDL
477 int host_edl_support = TRUE;
478 module_param(host_edl_support, int, 0644);
479 #endif // endif
480
481 /* deferred handlers */
482 static void dhd_ifadd_event_handler(void *handle, void *event_info, u8 event);
483 static void dhd_ifdel_event_handler(void *handle, void *event_info, u8 event);
484 #ifndef DHD_DIRECT_SET_MAC
485 static void dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event);
486 #endif // endif
487 static void dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event);
488 #ifdef WL_NATOE
489 static void dhd_natoe_ct_event_hanlder(void *handle, void *event_info, u8 event);
490 static void dhd_natoe_ct_ioctl_handler(void *handle, void *event_info, uint8 event);
491 #endif /* WL_NATOE */
492
493 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
494 static void dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event);
495 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
496 #ifdef WL_CFG80211
497 extern void dhd_netdev_free(struct net_device *ndev);
498 #endif /* WL_CFG80211 */
499 static dhd_if_t * dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev);
500
501 #if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
502 /* update rx_pkt_chainable state of dhd interface */
503 static void dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx);
504 #endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
505
506 /* Error bits */
507 module_param(dhd_msg_level, int, 0);
508
509 #ifdef ARP_OFFLOAD_SUPPORT
510 /* ARP offload enable */
511 uint dhd_arp_enable = TRUE;
512 module_param(dhd_arp_enable, uint, 0);
513
514 /* ARP offload agent mode : Enable ARP Host Auto-Reply and ARP Peer Auto-Reply */
515
516 #ifdef ENABLE_ARP_SNOOP_MODE
517 uint dhd_arp_mode = (ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_SNOOP | ARP_OL_HOST_AUTO_REPLY |
518 ARP_OL_UPDATE_HOST_CACHE);
519 #else
520 uint dhd_arp_mode = ARP_OL_AGENT | ARP_OL_PEER_AUTO_REPLY | ARP_OL_UPDATE_HOST_CACHE;
521 #endif /* ENABLE_ARP_SNOOP_MODE */
522
523 module_param(dhd_arp_mode, uint, 0);
524 #endif /* ARP_OFFLOAD_SUPPORT */
525
526 /* Disable Prop tx */
527 module_param(disable_proptx, int, 0644);
528 /* load firmware and/or nvram values from the filesystem */
529 module_param_string(firmware_path, firmware_path, MOD_PARAM_PATHLEN, 0660);
530 module_param_string(nvram_path, nvram_path, MOD_PARAM_PATHLEN, 0660);
531 #ifdef DHD_UCODE_DOWNLOAD
532 module_param_string(ucode_path, ucode_path, MOD_PARAM_PATHLEN, 0660);
533 #endif /* DHD_UCODE_DOWNLOAD */
534
535 /* wl event forwarding */
536 #ifdef WL_EVENT_ENAB
537 uint wl_event_enable = true;
538 #else
539 uint wl_event_enable = false;
540 #endif /* WL_EVENT_ENAB */
541 module_param(wl_event_enable, uint, 0660);
542
543 /* wl event forwarding */
544 #ifdef LOGTRACE_PKT_SENDUP
545 uint logtrace_pkt_sendup = true;
546 #else
547 uint logtrace_pkt_sendup = false;
548 #endif /* LOGTRACE_PKT_SENDUP */
549 module_param(logtrace_pkt_sendup, uint, 0660);
550
551 /* Watchdog interval */
552 /* extend watchdog expiration to 2 seconds when DPC is running */
553 #define WATCHDOG_EXTEND_INTERVAL (2000)
554
555 uint dhd_watchdog_ms = CUSTOM_DHD_WATCHDOG_MS;
556 module_param(dhd_watchdog_ms, uint, 0);
557
558 #ifdef DHD_PCIE_RUNTIMEPM
559 uint dhd_runtimepm_ms = CUSTOM_DHD_RUNTIME_MS;
560 #endif /* DHD_PCIE_RUNTIMEPMT */
561 #if defined(DHD_DEBUG)
562 /* Console poll interval */
563 #if defined(OEM_ANDROID)
564 uint dhd_console_ms = 0;
565 #else
566 uint dhd_console_ms = 250;
567 #endif /* OEM_ANDROID */
568 module_param(dhd_console_ms, uint, 0644);
569 #else
570 uint dhd_console_ms = 0;
571 #endif /* DHD_DEBUG */
572
573 uint dhd_slpauto = TRUE;
574 module_param(dhd_slpauto, uint, 0);
575
576 #ifdef PKT_FILTER_SUPPORT
577 /* Global Pkt filter enable control */
578 uint dhd_pkt_filter_enable = TRUE;
579 module_param(dhd_pkt_filter_enable, uint, 0);
580 #endif // endif
581
582 /* Pkt filter init setup */
583 uint dhd_pkt_filter_init = 0;
584 module_param(dhd_pkt_filter_init, uint, 0);
585
586 /* Pkt filter mode control */
587 #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
588 uint dhd_master_mode = FALSE;
589 #else
590 uint dhd_master_mode = TRUE;
591 #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
592 module_param(dhd_master_mode, uint, 0);
593
594 int dhd_watchdog_prio = 0;
595 module_param(dhd_watchdog_prio, int, 0);
596
597 /* DPC thread priority */
598 int dhd_dpc_prio = CUSTOM_DPC_PRIO_SETTING;
599 module_param(dhd_dpc_prio, int, 0);
600
601 /* RX frame thread priority */
602 int dhd_rxf_prio = CUSTOM_RXF_PRIO_SETTING;
603 module_param(dhd_rxf_prio, int, 0);
604
605 #if !defined(BCMDHDUSB)
606 extern int dhd_dongle_ramsize;
607 module_param(dhd_dongle_ramsize, int, 0);
608 #endif /* BCMDHDUSB */
609
610 #ifdef WL_CFG80211
611 int passive_channel_skip = 0;
612 module_param(passive_channel_skip, int, (S_IRUSR|S_IWUSR));
613 #endif /* WL_CFG80211 */
614
615 #ifdef DHD_MSI_SUPPORT
616 uint enable_msi = TRUE;
617 module_param(enable_msi, uint, 0);
618 #endif /* PCIE_FULL_DONGLE */
619
620 #ifdef DHD_SSSR_DUMP
621 int dhdpcie_sssr_dump_get_before_after_len(dhd_pub_t *dhd, uint32 *arr_len);
622 extern uint support_sssr_dump;
623 module_param(support_sssr_dump, uint, 0);
624 #endif /* DHD_SSSR_DUMP */
625
626 /* Keep track of number of instances */
627 static int dhd_found = 0;
628 static int instance_base = 0; /* Starting instance number */
629 module_param(instance_base, int, 0644);
630
631 /* Takes value of LL of OTP param customvar2=0xKKLLMMNN.
632 * LL is module variant
633 */
634 uint32 hw_module_variant = 0;
635 module_param(hw_module_variant, uint, 0644);
636
637 #if defined(DHD_LB_RXP)
638 static int dhd_napi_weight = 32;
639 module_param(dhd_napi_weight, int, 0644);
640 #endif /* DHD_LB_RXP */
641
642 #ifdef PCIE_FULL_DONGLE
643 extern int h2d_max_txpost;
644 module_param(h2d_max_txpost, int, 0644);
645
646 extern uint dma_ring_indices;
647 module_param(dma_ring_indices, uint, 0644);
648
649 extern bool h2d_phase;
650 module_param(h2d_phase, bool, 0644);
651 extern bool force_trap_bad_h2d_phase;
652 module_param(force_trap_bad_h2d_phase, bool, 0644);
653 #endif /* PCIE_FULL_DONGLE */
654
655 #ifdef FORCE_TPOWERON
656 /*
657 * On Fire's reference platform, coming out of L1.2,
658 * there is a constant delay of 45us between CLKREQ# and stable REFCLK
659 * Due to this delay, with tPowerOn < 50
660 * there is a chance of the refclk sense to trigger on noise.
661 *
662 * 0x29 when written to L1SSControl2 translates to 50us.
663 */
664 #define FORCE_TPOWERON_50US 0x29
665 uint32 tpoweron_scale = FORCE_TPOWERON_50US; /* default 50us */
666 module_param(tpoweron_scale, uint, 0644);
667 #endif /* FORCE_TPOWERON */
668
669 #ifdef SHOW_LOGTRACE
670 #if defined(CUSTOMER_HW4_DEBUG)
671 static char *logstrs_path = PLATFORM_PATH"logstrs.bin";
672 char *st_str_file_path = PLATFORM_PATH"rtecdc.bin";
673 static char *map_file_path = PLATFORM_PATH"rtecdc.map";
674 static char *rom_st_str_file_path = PLATFORM_PATH"roml.bin";
675 static char *rom_map_file_path = PLATFORM_PATH"roml.map";
676 #elif defined(CUSTOMER_HW2) || defined(BOARD_HIKEY)
677 static char *logstrs_path = "/data/misc/wifi/logstrs.bin";
678 char *st_str_file_path = "/data/misc/wifi/rtecdc.bin";
679 static char *map_file_path = "/data/misc/wifi/rtecdc.map";
680 static char *rom_st_str_file_path = "/data/misc/wifi/roml.bin";
681 static char *rom_map_file_path = "/data/misc/wifi/roml.map";
682 #elif defined(OEM_ANDROID) /* For Brix KK Live Image */
683 static char *logstrs_path = "/installmedia/logstrs.bin";
684 char *st_str_file_path = "/installmedia/rtecdc.bin";
685 static char *map_file_path = "/installmedia/rtecdc.map";
686 static char *rom_st_str_file_path = "/installmedia/roml.bin";
687 static char *rom_map_file_path = "/installmedia/roml.map";
688 #else /* For Linux platforms */
689 static char *logstrs_path = "/root/logstrs.bin";
690 char *st_str_file_path = "/root/rtecdc.bin";
691 static char *map_file_path = "/root/rtecdc.map";
692 static char *rom_st_str_file_path = "/root/roml.bin";
693 static char *rom_map_file_path = "/root/roml.map";
694 #endif /* CUSTOMER_HW4_DEBUG || CUSTOMER_HW2 || BOARD_HIKEY */
695 static char *ram_file_str = "rtecdc";
696 static char *rom_file_str = "roml";
697
698 module_param(logstrs_path, charp, S_IRUGO);
699 module_param(st_str_file_path, charp, S_IRUGO);
700 module_param(map_file_path, charp, S_IRUGO);
701 module_param(rom_st_str_file_path, charp, S_IRUGO);
702 module_param(rom_map_file_path, charp, S_IRUGO);
703
704 static int dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp);
705 static int dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start,
706 uint32 *rodata_end);
707 static int dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file,
708 char *map_file);
709 #endif /* SHOW_LOGTRACE */
710
711 #ifdef BCMSDIO
712 #define DHD_IF_ROLE(pub, idx) ((pub)->info->iflist[idx]->role)
713 #define DHD_IF_ROLE_AP(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_AP)
714 #define DHD_IF_ROLE_STA(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_STA)
715 #define DHD_IF_ROLE_P2PGO(pub, idx) (DHD_IF_ROLE(pub, idx) == WLC_E_IF_ROLE_P2P_GO)
716
dhd_set_role(dhd_pub_t * dhdp,int role,int bssidx)717 void dhd_set_role(dhd_pub_t *dhdp, int role, int bssidx)
718 {
719 int ifidx = dhd_bssidx2idx(dhdp, bssidx);
720 DHD_TRACE(("dhd_set_role ifidx %d role %d\n", ifidx, role));
721 dhdp->info->iflist[ifidx]->role = role;
722 }
723 #endif /* BCMSDIO */
724
725 #ifdef USE_WFA_CERT_CONF
726 int g_frameburst = 1;
727 #endif /* USE_WFA_CERT_CONF */
728
729 static int dhd_get_pend_8021x_cnt(dhd_info_t *dhd);
730
731 /* DHD Perimiter lock only used in router with bypass forwarding. */
732 #define DHD_PERIM_RADIO_INIT() do { /* noop */ } while (0)
733 #define DHD_PERIM_LOCK_TRY(unit, flag) do { /* noop */ } while (0)
734 #define DHD_PERIM_UNLOCK_TRY(unit, flag) do { /* noop */ } while (0)
735
736 #define DHD_IF_STA_LIST_LOCK_INIT(ifp) spin_lock_init(&(ifp)->sta_list_lock)
737 #define DHD_IF_STA_LIST_LOCK(ifp, flags) \
738 spin_lock_irqsave(&(ifp)->sta_list_lock, (flags))
739 #define DHD_IF_STA_LIST_UNLOCK(ifp, flags) \
740 spin_unlock_irqrestore(&(ifp)->sta_list_lock, (flags))
741
742 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
743 static struct list_head * dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp,
744 struct list_head *snapshot_list);
745 static void dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list);
746 #define DHD_IF_WMF_UCFORWARD_LOCK(dhd, ifp, slist) ({ dhd_sta_list_snapshot(dhd, ifp, slist); })
747 #define DHD_IF_WMF_UCFORWARD_UNLOCK(dhd, slist) ({ dhd_sta_list_snapshot_free(dhd, slist); })
748 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
749
750 /* Control fw roaming */
751 #ifdef BCMCCX
752 uint dhd_roam_disable = 0;
753 #else
754 #ifdef OEM_ANDROID
755 uint dhd_roam_disable = 0;
756 #else
757 uint dhd_roam_disable = 1;
758 #endif // endif
759 #endif /* BCMCCX */
760
761 #ifdef BCMDBGFS
762 extern void dhd_dbgfs_init(dhd_pub_t *dhdp);
763 extern void dhd_dbgfs_remove(void);
764 #endif // endif
765
766 static uint pcie_txs_metadata_enable = 0; /* Enable TX status metadta report */
767 module_param(pcie_txs_metadata_enable, int, 0);
768
769 /* Control radio state */
770 uint dhd_radio_up = 1;
771
772 /* Network inteface name */
773 char iface_name[IFNAMSIZ] = {'\0'};
774 module_param_string(iface_name, iface_name, IFNAMSIZ, 0);
775
776 #ifdef WL_VIF_SUPPORT
777 /* Virtual inteface name */
778 char vif_name[IFNAMSIZ] = "wlan";
779 module_param_string(vif_name, vif_name, IFNAMSIZ, 0);
780
781 int vif_num = 0;
782 module_param(vif_num, int, 0);
783 #endif /* WL_VIF_SUPPORT */
784
785 /* The following are specific to the SDIO dongle */
786
787 /* IOCTL response timeout */
788 int dhd_ioctl_timeout_msec = IOCTL_RESP_TIMEOUT;
789
790 /* DS Exit response timeout */
791 int ds_exit_timeout_msec = DS_EXIT_TIMEOUT;
792
793 /* Idle timeout for backplane clock */
794 int dhd_idletime = DHD_IDLETIME_TICKS;
795 module_param(dhd_idletime, int, 0);
796
797 /* Use polling */
798 uint dhd_poll = FALSE;
799 module_param(dhd_poll, uint, 0);
800
801 /* Use interrupts */
802 uint dhd_intr = TRUE;
803 module_param(dhd_intr, uint, 0);
804
805 /* SDIO Drive Strength (in milliamps) */
806 uint dhd_sdiod_drive_strength = 6;
807 module_param(dhd_sdiod_drive_strength, uint, 0);
808
809 #ifdef BCMSDIO
810 /* Tx/Rx bounds */
811 extern uint dhd_txbound;
812 extern uint dhd_rxbound;
813 module_param(dhd_txbound, uint, 0);
814 module_param(dhd_rxbound, uint, 0);
815
816 /* Deferred transmits */
817 extern uint dhd_deferred_tx;
818 module_param(dhd_deferred_tx, uint, 0);
819
820 #endif /* BCMSDIO */
821
822 #ifdef SDTEST
823 /* Echo packet generator (pkts/s) */
824 uint dhd_pktgen = 0;
825 module_param(dhd_pktgen, uint, 0);
826
827 /* Echo packet len (0 => sawtooth, max 2040) */
828 uint dhd_pktgen_len = 0;
829 module_param(dhd_pktgen_len, uint, 0);
830 #endif /* SDTEST */
831
832 #if defined(BCMSUP_4WAY_HANDSHAKE)
833 /* Use in dongle supplicant for 4-way handshake */
834 #if defined(WLFBT) || defined(WL_ENABLE_IDSUP)
835 /* Enable idsup by default (if supported in fw) */
836 uint dhd_use_idsup = 1;
837 #else
838 uint dhd_use_idsup = 0;
839 #endif /* WLFBT || WL_ENABLE_IDSUP */
840 module_param(dhd_use_idsup, uint, 0);
841 #endif /* BCMSUP_4WAY_HANDSHAKE */
842
843 #if (defined(OEM_ANDROID) && !defined(BCMQT))
844 /* Allow delayed firmware download for debug purpose */
845 int allow_delay_fwdl = FALSE;
846 #else
847 int allow_delay_fwdl = TRUE;
848 #endif // endif
849 module_param(allow_delay_fwdl, int, 0);
850
851 #ifdef ECOUNTER_PERIODIC_DISABLE
852 uint enable_ecounter = FALSE;
853 #else
854 uint enable_ecounter = TRUE;
855 #endif // endif
856 module_param(enable_ecounter, uint, 0);
857
858 /* TCM verification flag */
859 uint dhd_tcm_test_enable = FALSE;
860 module_param(dhd_tcm_test_enable, uint, 0644);
861
862 /* WAR to avoid system hang during FW trap */
863 #ifdef DHD_FW_COREDUMP
864 uint disable_bug_on = FALSE;
865 module_param(disable_bug_on, uint, 0);
866 #endif /* DHD_FW_COREDUMP */
867
868 extern char dhd_version[];
869 extern char fw_version[];
870 extern char clm_version[];
871
872 int dhd_net_bus_devreset(struct net_device *dev, uint8 flag);
873 static void dhd_net_if_lock_local(dhd_info_t *dhd);
874 static void dhd_net_if_unlock_local(dhd_info_t *dhd);
875 static void dhd_suspend_lock(dhd_pub_t *dhdp);
876 static void dhd_suspend_unlock(dhd_pub_t *dhdp);
877
878 #ifdef DHD_MONITOR_INTERFACE
879 /* Monitor interface */
880 int dhd_monitor_init(void *dhd_pub);
881 int dhd_monitor_uninit(void);
882 #endif /* DHD_MONITOR_INTERFACE */
883
884 #ifdef DHD_PM_CONTROL_FROM_FILE
885 bool g_pm_control;
886 #ifdef DHD_EXPORT_CNTL_FILE
887 int pmmode_val;
888 #endif /* DHD_EXPORT_CNTL_FILE */
889 void sec_control_pm(dhd_pub_t *dhd, uint *);
890 #endif /* DHD_PM_CONTROL_FROM_FILE */
891
892 #if defined(WL_WIRELESS_EXT)
893 struct iw_statistics *dhd_get_wireless_stats(struct net_device *dev);
894 #endif /* defined(WL_WIRELESS_EXT) */
895
896 static void dhd_dpc(ulong data);
897 /* forward decl */
898 extern int dhd_wait_pend8021x(struct net_device *dev);
899 void dhd_os_wd_timer_extend(void *bus, bool extend);
900
901 #ifdef TOE
902 #ifndef BDC
903 #error TOE requires BDC
904 #endif /* !BDC */
905 static int dhd_toe_get(dhd_info_t *dhd, int idx, uint32 *toe_ol);
906 static int dhd_toe_set(dhd_info_t *dhd, int idx, uint32 toe_ol);
907 #endif /* TOE */
908
909 static int dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
910 wl_event_msg_t *event_ptr, void **data_ptr);
911
912 #if defined(CONFIG_PM_SLEEP)
dhd_pm_callback(struct notifier_block * nfb,unsigned long action,void * ignored)913 static int dhd_pm_callback(struct notifier_block *nfb, unsigned long action, void *ignored)
914 {
915 int ret = NOTIFY_DONE;
916 bool suspend = FALSE;
917
918 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
919 #pragma GCC diagnostic push
920 #pragma GCC diagnostic ignored "-Wcast-qual"
921 #endif // endif
922 dhd_info_t *dhdinfo = (dhd_info_t*)container_of(nfb, struct dhd_info, pm_notifier);
923 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
924 #pragma GCC diagnostic pop
925 #endif // endif
926
927 BCM_REFERENCE(dhdinfo);
928 BCM_REFERENCE(suspend);
929
930 switch (action) {
931 case PM_HIBERNATION_PREPARE:
932 case PM_SUSPEND_PREPARE:
933 suspend = TRUE;
934 break;
935
936 case PM_POST_HIBERNATION:
937 case PM_POST_SUSPEND:
938 suspend = FALSE;
939 break;
940 }
941
942 #if defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS)
943 if (suspend) {
944 DHD_OS_WAKE_LOCK_WAIVE(&dhdinfo->pub);
945 dhd_wlfc_suspend(&dhdinfo->pub);
946 DHD_OS_WAKE_LOCK_RESTORE(&dhdinfo->pub);
947 } else {
948 dhd_wlfc_resume(&dhdinfo->pub);
949 }
950 #endif /* defined(SUPPORT_P2P_GO_PS) && defined(PROP_TXSTATUS) */
951
952 dhd_mmc_suspend = suspend;
953 smp_mb();
954
955 return ret;
956 }
957
958 /* to make sure we won't register the same notifier twice, otherwise a loop is likely to be
959 * created in kernel notifier link list (with 'next' pointing to itself)
960 */
961 static bool dhd_pm_notifier_registered = FALSE;
962
963 extern int register_pm_notifier(struct notifier_block *nb);
964 extern int unregister_pm_notifier(struct notifier_block *nb);
965 #endif /* CONFIG_PM_SLEEP */
966
967 /* Request scheduling of the bus rx frame */
968 static void dhd_sched_rxf(dhd_pub_t *dhdp, void *skb);
969 static void dhd_os_rxflock(dhd_pub_t *pub);
970 static void dhd_os_rxfunlock(dhd_pub_t *pub);
971
972 #if defined(DHD_H2D_LOG_TIME_SYNC)
973 static void
974 dhd_deferred_work_rte_log_time_sync(void *handle, void *event_info, u8 event);
975 #endif /* DHD_H2D_LOG_TIME_SYNC */
976
977 /** priv_link is the link between netdev and the dhdif and dhd_info structs. */
978 typedef struct dhd_dev_priv {
979 dhd_info_t * dhd; /* cached pointer to dhd_info in netdevice priv */
980 dhd_if_t * ifp; /* cached pointer to dhd_if in netdevice priv */
981 int ifidx; /* interface index */
982 void * lkup;
983 } dhd_dev_priv_t;
984
985 #define DHD_DEV_PRIV_SIZE (sizeof(dhd_dev_priv_t))
986 #define DHD_DEV_PRIV(dev) ((dhd_dev_priv_t *)DEV_PRIV(dev))
987 #define DHD_DEV_INFO(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->dhd)
988 #define DHD_DEV_IFP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifp)
989 #define DHD_DEV_IFIDX(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->ifidx)
990 #define DHD_DEV_LKUP(dev) (((dhd_dev_priv_t *)DEV_PRIV(dev))->lkup)
991
992 #if defined(DHD_OF_SUPPORT)
993 extern int dhd_wlan_init(void);
994 #endif /* defined(DHD_OF_SUPPORT) */
995 /** Clear the dhd net_device's private structure. */
996 static inline void
dhd_dev_priv_clear(struct net_device * dev)997 dhd_dev_priv_clear(struct net_device * dev)
998 {
999 dhd_dev_priv_t * dev_priv;
1000 ASSERT(dev != (struct net_device *)NULL);
1001 dev_priv = DHD_DEV_PRIV(dev);
1002 dev_priv->dhd = (dhd_info_t *)NULL;
1003 dev_priv->ifp = (dhd_if_t *)NULL;
1004 dev_priv->ifidx = DHD_BAD_IF;
1005 dev_priv->lkup = (void *)NULL;
1006 }
1007
1008 /** Setup the dhd net_device's private structure. */
1009 static inline void
dhd_dev_priv_save(struct net_device * dev,dhd_info_t * dhd,dhd_if_t * ifp,int ifidx)1010 dhd_dev_priv_save(struct net_device * dev, dhd_info_t * dhd, dhd_if_t * ifp,
1011 int ifidx)
1012 {
1013 dhd_dev_priv_t * dev_priv;
1014 ASSERT(dev != (struct net_device *)NULL);
1015 dev_priv = DHD_DEV_PRIV(dev);
1016 dev_priv->dhd = dhd;
1017 dev_priv->ifp = ifp;
1018 dev_priv->ifidx = ifidx;
1019 }
1020
1021 /* Return interface pointer */
dhd_get_ifp(dhd_pub_t * dhdp,uint32 ifidx)1022 struct dhd_if * dhd_get_ifp(dhd_pub_t *dhdp, uint32 ifidx)
1023 {
1024 ASSERT(ifidx < DHD_MAX_IFS);
1025
1026 if (!dhdp || !dhdp->info || ifidx >= DHD_MAX_IFS)
1027 return NULL;
1028
1029 return dhdp->info->iflist[ifidx];
1030 }
1031
1032 /** Dummy objects are defined with state representing bad|down.
1033 * Performance gains from reducing branch conditionals, instruction parallelism,
1034 * dual issue, reducing load shadows, avail of larger pipelines.
1035 * Use DHD_XXX_NULL instead of (dhd_xxx_t *)NULL, whenever an object pointer
1036 * is accessed via the dhd_sta_t.
1037 */
1038
1039 /* Dummy dhd_info object */
1040 dhd_info_t dhd_info_null = {
1041 .pub = {
1042 .info = &dhd_info_null,
1043 #ifdef DHDTCPACK_SUPPRESS
1044 .tcpack_sup_mode = TCPACK_SUP_REPLACE,
1045 #endif /* DHDTCPACK_SUPPRESS */
1046 .up = FALSE,
1047 .busstate = DHD_BUS_DOWN
1048 }
1049 };
1050 #define DHD_INFO_NULL (&dhd_info_null)
1051 #define DHD_PUB_NULL (&dhd_info_null.pub)
1052
1053 /* Dummy netdevice object */
1054 struct net_device dhd_net_dev_null = {
1055 .reg_state = NETREG_UNREGISTERED
1056 };
1057 #define DHD_NET_DEV_NULL (&dhd_net_dev_null)
1058
1059 /* Dummy dhd_if object */
1060 dhd_if_t dhd_if_null = {
1061 #ifdef WMF
1062 .wmf = { .wmf_enable = TRUE },
1063 #endif // endif
1064 .info = DHD_INFO_NULL,
1065 .net = DHD_NET_DEV_NULL,
1066 .idx = DHD_BAD_IF
1067 };
1068 #define DHD_IF_NULL (&dhd_if_null)
1069
1070 #define DHD_STA_NULL ((dhd_sta_t *)NULL)
1071
1072 /** Interface STA list management. */
1073
1074 /** Alloc/Free a dhd_sta object from the dhd instances' sta_pool. */
1075 static void dhd_sta_free(dhd_pub_t *pub, dhd_sta_t *sta);
1076 static dhd_sta_t * dhd_sta_alloc(dhd_pub_t * dhdp);
1077
1078 /* Delete a dhd_sta or flush all dhd_sta in an interface's sta_list. */
1079 static void dhd_if_del_sta_list(dhd_if_t * ifp);
1080 static void dhd_if_flush_sta(dhd_if_t * ifp);
1081
1082 /* Construct/Destruct a sta pool. */
1083 static int dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta);
1084 static void dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta);
1085 /* Clear the pool of dhd_sta_t objects for built-in type driver */
1086 static void dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta);
1087
1088 /** Reset a dhd_sta object and free into the dhd pool. */
1089 static void
dhd_sta_free(dhd_pub_t * dhdp,dhd_sta_t * sta)1090 dhd_sta_free(dhd_pub_t * dhdp, dhd_sta_t * sta)
1091 {
1092 #ifdef PCIE_FULL_DONGLE
1093 int prio;
1094 #endif // endif
1095
1096 ASSERT((sta != DHD_STA_NULL) && (sta->idx != ID16_INVALID));
1097
1098 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
1099
1100 #ifdef PCIE_FULL_DONGLE
1101 /*
1102 * Flush and free all packets in all flowring's queues belonging to sta.
1103 * Packets in flow ring will be flushed later.
1104 */
1105 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1106 uint16 flowid = sta->flowid[prio];
1107
1108 if (flowid != FLOWID_INVALID) {
1109 unsigned long flags;
1110 flow_ring_node_t * flow_ring_node;
1111
1112 #ifdef DHDTCPACK_SUPPRESS
1113 /* Clean tcp_ack_info_tbl in order to prevent access to flushed pkt,
1114 * when there is a newly coming packet from network stack.
1115 */
1116 dhd_tcpack_info_tbl_clean(dhdp);
1117 #endif /* DHDTCPACK_SUPPRESS */
1118
1119 flow_ring_node = dhd_flow_ring_node(dhdp, flowid);
1120 if (flow_ring_node) {
1121 flow_queue_t *queue = &flow_ring_node->queue;
1122
1123 DHD_FLOWRING_LOCK(flow_ring_node->lock, flags);
1124 flow_ring_node->status = FLOW_RING_STATUS_STA_FREEING;
1125
1126 if (!DHD_FLOW_QUEUE_EMPTY(queue)) {
1127 void * pkt;
1128 while ((pkt = dhd_flow_queue_dequeue(dhdp, queue)) !=
1129 NULL) {
1130 PKTFREE(dhdp->osh, pkt, TRUE);
1131 }
1132 }
1133
1134 DHD_FLOWRING_UNLOCK(flow_ring_node->lock, flags);
1135 ASSERT(DHD_FLOW_QUEUE_EMPTY(queue));
1136 }
1137 }
1138
1139 sta->flowid[prio] = FLOWID_INVALID;
1140 }
1141 #endif /* PCIE_FULL_DONGLE */
1142
1143 id16_map_free(dhdp->staid_allocator, sta->idx);
1144 DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
1145 sta->ifp = DHD_IF_NULL; /* dummy dhd_if object */
1146 sta->ifidx = DHD_BAD_IF;
1147 bzero(sta->ea.octet, ETHER_ADDR_LEN);
1148 INIT_LIST_HEAD(&sta->list);
1149 sta->idx = ID16_INVALID; /* implying free */
1150 }
1151
1152 /** Allocate a dhd_sta object from the dhd pool. */
1153 static dhd_sta_t *
dhd_sta_alloc(dhd_pub_t * dhdp)1154 dhd_sta_alloc(dhd_pub_t * dhdp)
1155 {
1156 uint16 idx;
1157 dhd_sta_t * sta;
1158 dhd_sta_pool_t * sta_pool;
1159
1160 ASSERT((dhdp->staid_allocator != NULL) && (dhdp->sta_pool != NULL));
1161
1162 idx = id16_map_alloc(dhdp->staid_allocator);
1163 if (idx == ID16_INVALID) {
1164 DHD_ERROR(("%s: cannot get free staid\n", __FUNCTION__));
1165 return DHD_STA_NULL;
1166 }
1167
1168 sta_pool = (dhd_sta_pool_t *)(dhdp->sta_pool);
1169 sta = &sta_pool[idx];
1170
1171 ASSERT((sta->idx == ID16_INVALID) &&
1172 (sta->ifp == DHD_IF_NULL) && (sta->ifidx == DHD_BAD_IF));
1173
1174 DHD_CUMM_CTR_INIT(&sta->cumm_ctr);
1175
1176 sta->idx = idx; /* implying allocated */
1177
1178 return sta;
1179 }
1180
1181 /** Delete all STAs in an interface's STA list. */
1182 static void
dhd_if_del_sta_list(dhd_if_t * ifp)1183 dhd_if_del_sta_list(dhd_if_t *ifp)
1184 {
1185 dhd_sta_t *sta, *next;
1186 unsigned long flags;
1187
1188 DHD_IF_STA_LIST_LOCK(ifp, flags);
1189 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1190 #pragma GCC diagnostic push
1191 #pragma GCC diagnostic ignored "-Wcast-qual"
1192 #endif // endif
1193 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1194 list_del(&sta->list);
1195 dhd_sta_free(&ifp->info->pub, sta);
1196 }
1197 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1198 #pragma GCC diagnostic pop
1199 #endif // endif
1200 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1201
1202 return;
1203 }
1204
1205 /** Router/GMAC3: Flush all station entries in the forwarder's WOFA database. */
1206 static void
dhd_if_flush_sta(dhd_if_t * ifp)1207 dhd_if_flush_sta(dhd_if_t * ifp)
1208 {
1209 }
1210
1211 /** Construct a pool of dhd_sta_t objects to be used by interfaces. */
1212 static int
dhd_sta_pool_init(dhd_pub_t * dhdp,int max_sta)1213 dhd_sta_pool_init(dhd_pub_t *dhdp, int max_sta)
1214 {
1215 int idx, sta_pool_memsz;
1216 #ifdef PCIE_FULL_DONGLE
1217 int prio;
1218 #endif /* PCIE_FULL_DONGLE */
1219 dhd_sta_t * sta;
1220 dhd_sta_pool_t * sta_pool;
1221 void * staid_allocator;
1222
1223 ASSERT(dhdp != (dhd_pub_t *)NULL);
1224 ASSERT((dhdp->staid_allocator == NULL) && (dhdp->sta_pool == NULL));
1225
1226 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1227 staid_allocator = id16_map_init(dhdp->osh, max_sta, 1);
1228 if (staid_allocator == NULL) {
1229 DHD_ERROR(("%s: sta id allocator init failure\n", __FUNCTION__));
1230 return BCME_ERROR;
1231 }
1232
1233 /* Pre allocate a pool of dhd_sta objects (one extra). */
1234 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t)); /* skip idx 0 */
1235 sta_pool = (dhd_sta_pool_t *)MALLOC(dhdp->osh, sta_pool_memsz);
1236 if (sta_pool == NULL) {
1237 DHD_ERROR(("%s: sta table alloc failure\n", __FUNCTION__));
1238 id16_map_fini(dhdp->osh, staid_allocator);
1239 return BCME_ERROR;
1240 }
1241
1242 dhdp->sta_pool = sta_pool;
1243 dhdp->staid_allocator = staid_allocator;
1244
1245 /* Initialize all sta(s) for the pre-allocated free pool. */
1246 bzero((uchar *)sta_pool, sta_pool_memsz);
1247 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1248 sta = &sta_pool[idx];
1249 sta->idx = id16_map_alloc(staid_allocator);
1250 ASSERT(sta->idx <= max_sta);
1251 }
1252
1253 /* Now place them into the pre-allocated free pool. */
1254 for (idx = 1; idx <= max_sta; idx++) {
1255 sta = &sta_pool[idx];
1256 #ifdef PCIE_FULL_DONGLE
1257 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1258 sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
1259 }
1260 #endif /* PCIE_FULL_DONGLE */
1261 dhd_sta_free(dhdp, sta);
1262 }
1263
1264 return BCME_OK;
1265 }
1266
1267 /** Destruct the pool of dhd_sta_t objects.
1268 * Caller must ensure that no STA objects are currently associated with an if.
1269 */
1270 static void
dhd_sta_pool_fini(dhd_pub_t * dhdp,int max_sta)1271 dhd_sta_pool_fini(dhd_pub_t *dhdp, int max_sta)
1272 {
1273 dhd_sta_pool_t * sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1274
1275 if (sta_pool) {
1276 int idx;
1277 int sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1278 for (idx = 1; idx <= max_sta; idx++) {
1279 ASSERT(sta_pool[idx].ifp == DHD_IF_NULL);
1280 ASSERT(sta_pool[idx].idx == ID16_INVALID);
1281 }
1282 MFREE(dhdp->osh, dhdp->sta_pool, sta_pool_memsz);
1283 dhdp->sta_pool = NULL;
1284 }
1285
1286 id16_map_fini(dhdp->osh, dhdp->staid_allocator);
1287 dhdp->staid_allocator = NULL;
1288 }
1289
1290 /* Clear the pool of dhd_sta_t objects for built-in type driver */
1291 static void
dhd_sta_pool_clear(dhd_pub_t * dhdp,int max_sta)1292 dhd_sta_pool_clear(dhd_pub_t *dhdp, int max_sta)
1293 {
1294 int idx, sta_pool_memsz;
1295 #ifdef PCIE_FULL_DONGLE
1296 int prio;
1297 #endif /* PCIE_FULL_DONGLE */
1298 dhd_sta_t * sta;
1299 dhd_sta_pool_t * sta_pool;
1300 void *staid_allocator;
1301
1302 if (!dhdp) {
1303 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
1304 return;
1305 }
1306
1307 sta_pool = (dhd_sta_pool_t *)dhdp->sta_pool;
1308 staid_allocator = dhdp->staid_allocator;
1309
1310 if (!sta_pool) {
1311 DHD_ERROR(("%s: sta_pool is NULL\n", __FUNCTION__));
1312 return;
1313 }
1314
1315 if (!staid_allocator) {
1316 DHD_ERROR(("%s: staid_allocator is NULL\n", __FUNCTION__));
1317 return;
1318 }
1319
1320 /* clear free pool */
1321 sta_pool_memsz = ((max_sta + 1) * sizeof(dhd_sta_t));
1322 bzero((uchar *)sta_pool, sta_pool_memsz);
1323
1324 /* dhd_sta objects per radio are managed in a table. id#0 reserved. */
1325 id16_map_clear(staid_allocator, max_sta, 1);
1326
1327 /* Initialize all sta(s) for the pre-allocated free pool. */
1328 for (idx = max_sta; idx >= 1; idx--) { /* skip sta_pool[0] */
1329 sta = &sta_pool[idx];
1330 sta->idx = id16_map_alloc(staid_allocator);
1331 ASSERT(sta->idx <= max_sta);
1332 }
1333 /* Now place them into the pre-allocated free pool. */
1334 for (idx = 1; idx <= max_sta; idx++) {
1335 sta = &sta_pool[idx];
1336 #ifdef PCIE_FULL_DONGLE
1337 for (prio = 0; prio < (int)NUMPRIO; prio++) {
1338 sta->flowid[prio] = FLOWID_INVALID; /* Flow rings do not exist */
1339 }
1340 #endif /* PCIE_FULL_DONGLE */
1341 dhd_sta_free(dhdp, sta);
1342 }
1343 }
1344
1345 /** Find STA with MAC address ea in an interface's STA list. */
1346 dhd_sta_t *
dhd_find_sta(void * pub,int ifidx,void * ea)1347 dhd_find_sta(void *pub, int ifidx, void *ea)
1348 {
1349 dhd_sta_t *sta;
1350 dhd_if_t *ifp;
1351 unsigned long flags;
1352
1353 ASSERT(ea != NULL);
1354 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1355 if (ifp == NULL)
1356 return DHD_STA_NULL;
1357
1358 DHD_IF_STA_LIST_LOCK(ifp, flags);
1359 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1360 #pragma GCC diagnostic push
1361 #pragma GCC diagnostic ignored "-Wcast-qual"
1362 #endif // endif
1363 list_for_each_entry(sta, &ifp->sta_list, list) {
1364 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1365 DHD_INFO(("%s: Found STA " MACDBG "\n",
1366 __FUNCTION__, MAC2STRDBG((char *)ea)));
1367 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1368 return sta;
1369 }
1370 }
1371 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1372 #pragma GCC diagnostic pop
1373 #endif // endif
1374 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1375
1376 return DHD_STA_NULL;
1377 }
1378
1379 /** Add STA into the interface's STA list. */
1380 dhd_sta_t *
dhd_add_sta(void * pub,int ifidx,void * ea)1381 dhd_add_sta(void *pub, int ifidx, void *ea)
1382 {
1383 dhd_sta_t *sta;
1384 dhd_if_t *ifp;
1385 unsigned long flags;
1386
1387 ASSERT(ea != NULL);
1388 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1389 if (ifp == NULL)
1390 return DHD_STA_NULL;
1391
1392 if (!memcmp(ifp->net->dev_addr, ea, ETHER_ADDR_LEN)) {
1393 DHD_ERROR(("%s: Serious FAILURE, receive own MAC %pM !!\n", __FUNCTION__, ea));
1394 return DHD_STA_NULL;
1395 }
1396
1397 sta = dhd_sta_alloc((dhd_pub_t *)pub);
1398 if (sta == DHD_STA_NULL) {
1399 DHD_ERROR(("%s: Alloc failed\n", __FUNCTION__));
1400 return DHD_STA_NULL;
1401 }
1402
1403 memcpy(sta->ea.octet, ea, ETHER_ADDR_LEN);
1404
1405 /* link the sta and the dhd interface */
1406 sta->ifp = ifp;
1407 sta->ifidx = ifidx;
1408 INIT_LIST_HEAD(&sta->list);
1409
1410 DHD_IF_STA_LIST_LOCK(ifp, flags);
1411
1412 list_add_tail(&sta->list, &ifp->sta_list);
1413
1414 DHD_ERROR(("%s: Adding STA " MACDBG "\n",
1415 __FUNCTION__, MAC2STRDBG((char *)ea)));
1416
1417 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1418
1419 return sta;
1420 }
1421
1422 /** Delete all STAs from the interface's STA list. */
1423 void
dhd_del_all_sta(void * pub,int ifidx)1424 dhd_del_all_sta(void *pub, int ifidx)
1425 {
1426 dhd_sta_t *sta, *next;
1427 dhd_if_t *ifp;
1428 unsigned long flags;
1429
1430 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1431 if (ifp == NULL)
1432 return;
1433
1434 DHD_IF_STA_LIST_LOCK(ifp, flags);
1435 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1436 #pragma GCC diagnostic push
1437 #pragma GCC diagnostic ignored "-Wcast-qual"
1438 #endif // endif
1439 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1440
1441 list_del(&sta->list);
1442 dhd_sta_free(&ifp->info->pub, sta);
1443 #ifdef DHD_L2_FILTER
1444 if (ifp->parp_enable) {
1445 /* clear Proxy ARP cache of specific Ethernet Address */
1446 bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh,
1447 ifp->phnd_arp_table, FALSE,
1448 sta->ea.octet, FALSE, ((dhd_pub_t*)pub)->tickcnt);
1449 }
1450 #endif /* DHD_L2_FILTER */
1451 }
1452 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1453 #pragma GCC diagnostic pop
1454 #endif // endif
1455 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1456
1457 return;
1458 }
1459
1460 /** Delete STA from the interface's STA list. */
1461 void
dhd_del_sta(void * pub,int ifidx,void * ea)1462 dhd_del_sta(void *pub, int ifidx, void *ea)
1463 {
1464 dhd_sta_t *sta, *next;
1465 dhd_if_t *ifp;
1466 unsigned long flags;
1467
1468 ASSERT(ea != NULL);
1469 ifp = dhd_get_ifp((dhd_pub_t *)pub, ifidx);
1470 if (ifp == NULL)
1471 return;
1472
1473 DHD_IF_STA_LIST_LOCK(ifp, flags);
1474 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1475 #pragma GCC diagnostic push
1476 #pragma GCC diagnostic ignored "-Wcast-qual"
1477 #endif // endif
1478 list_for_each_entry_safe(sta, next, &ifp->sta_list, list) {
1479 if (!memcmp(sta->ea.octet, ea, ETHER_ADDR_LEN)) {
1480 DHD_ERROR(("%s: Deleting STA " MACDBG "\n",
1481 __FUNCTION__, MAC2STRDBG(sta->ea.octet)));
1482 list_del(&sta->list);
1483 dhd_sta_free(&ifp->info->pub, sta);
1484 }
1485 }
1486 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
1487 #pragma GCC diagnostic pop
1488 #endif // endif
1489 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1490 #ifdef DHD_L2_FILTER
1491 if (ifp->parp_enable) {
1492 /* clear Proxy ARP cache of specific Ethernet Address */
1493 bcm_l2_filter_arp_table_update(((dhd_pub_t*)pub)->osh, ifp->phnd_arp_table, FALSE,
1494 ea, FALSE, ((dhd_pub_t*)pub)->tickcnt);
1495 }
1496 #endif /* DHD_L2_FILTER */
1497 return;
1498 }
1499
1500 /** Add STA if it doesn't exist. Not reentrant. */
1501 dhd_sta_t*
dhd_findadd_sta(void * pub,int ifidx,void * ea)1502 dhd_findadd_sta(void *pub, int ifidx, void *ea)
1503 {
1504 dhd_sta_t *sta;
1505
1506 sta = dhd_find_sta(pub, ifidx, ea);
1507
1508 if (!sta) {
1509 /* Add entry */
1510 sta = dhd_add_sta(pub, ifidx, ea);
1511 }
1512
1513 return sta;
1514 }
1515
1516 #if defined(DHD_IGMP_UCQUERY) || defined(DHD_UCAST_UPNP)
1517 static struct list_head *
dhd_sta_list_snapshot(dhd_info_t * dhd,dhd_if_t * ifp,struct list_head * snapshot_list)1518 dhd_sta_list_snapshot(dhd_info_t *dhd, dhd_if_t *ifp, struct list_head *snapshot_list)
1519 {
1520 unsigned long flags;
1521 dhd_sta_t *sta, *snapshot;
1522
1523 INIT_LIST_HEAD(snapshot_list);
1524
1525 DHD_IF_STA_LIST_LOCK(ifp, flags);
1526
1527 list_for_each_entry(sta, &ifp->sta_list, list) {
1528 /* allocate one and add to snapshot */
1529 snapshot = (dhd_sta_t *)MALLOC(dhd->pub.osh, sizeof(dhd_sta_t));
1530 if (snapshot == NULL) {
1531 DHD_ERROR(("%s: Cannot allocate memory\n", __FUNCTION__));
1532 continue;
1533 }
1534
1535 memcpy(snapshot->ea.octet, sta->ea.octet, ETHER_ADDR_LEN);
1536
1537 INIT_LIST_HEAD(&snapshot->list);
1538 list_add_tail(&snapshot->list, snapshot_list);
1539 }
1540
1541 DHD_IF_STA_LIST_UNLOCK(ifp, flags);
1542
1543 return snapshot_list;
1544 }
1545
1546 static void
dhd_sta_list_snapshot_free(dhd_info_t * dhd,struct list_head * snapshot_list)1547 dhd_sta_list_snapshot_free(dhd_info_t *dhd, struct list_head *snapshot_list)
1548 {
1549 dhd_sta_t *sta, *next;
1550
1551 list_for_each_entry_safe(sta, next, snapshot_list, list) {
1552 list_del(&sta->list);
1553 MFREE(dhd->pub.osh, sta, sizeof(dhd_sta_t));
1554 }
1555 }
1556 #endif /* DHD_IGMP_UCQUERY || DHD_UCAST_UPNP */
1557
1558 #if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
1559 void
dhd_axi_error_dispatch(dhd_pub_t * dhdp)1560 dhd_axi_error_dispatch(dhd_pub_t *dhdp)
1561 {
1562 dhd_info_t *dhd = dhdp->info;
1563 schedule_work(&dhd->axi_error_dispatcher_work);
1564 }
1565
dhd_axi_error_dispatcher_fn(struct work_struct * work)1566 static void dhd_axi_error_dispatcher_fn(struct work_struct * work)
1567 {
1568 struct dhd_info *dhd =
1569 container_of(work, struct dhd_info, axi_error_dispatcher_work);
1570 dhd_axi_error(&dhd->pub);
1571 }
1572 #endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
1573
1574 /** Returns dhd iflist index corresponding the the bssidx provided by apps */
dhd_bssidx2idx(dhd_pub_t * dhdp,uint32 bssidx)1575 int dhd_bssidx2idx(dhd_pub_t *dhdp, uint32 bssidx)
1576 {
1577 dhd_if_t *ifp;
1578 dhd_info_t *dhd = dhdp->info;
1579 int i;
1580
1581 ASSERT(bssidx < DHD_MAX_IFS);
1582 ASSERT(dhdp);
1583
1584 for (i = 0; i < DHD_MAX_IFS; i++) {
1585 ifp = dhd->iflist[i];
1586 if (ifp && (ifp->bssidx == bssidx)) {
1587 DHD_TRACE(("Index manipulated for %s from %d to %d\n",
1588 ifp->name, bssidx, i));
1589 break;
1590 }
1591 }
1592 return i;
1593 }
1594
dhd_rxf_enqueue(dhd_pub_t * dhdp,void * skb)1595 static inline int dhd_rxf_enqueue(dhd_pub_t *dhdp, void* skb)
1596 {
1597 uint32 store_idx;
1598 uint32 sent_idx;
1599
1600 if (!skb) {
1601 DHD_ERROR(("dhd_rxf_enqueue: NULL skb!!!\n"));
1602 return BCME_ERROR;
1603 }
1604
1605 dhd_os_rxflock(dhdp);
1606 store_idx = dhdp->store_idx;
1607 sent_idx = dhdp->sent_idx;
1608 if (dhdp->skbbuf[store_idx] != NULL) {
1609 /* Make sure the previous packets are processed */
1610 dhd_os_rxfunlock(dhdp);
1611 DHD_ERROR(("dhd_rxf_enqueue: pktbuf not consumed %p, store idx %d sent idx %d\n",
1612 skb, store_idx, sent_idx));
1613 /* removed msleep here, should use wait_event_timeout if we
1614 * want to give rx frame thread a chance to run
1615 */
1616 #if defined(WAIT_DEQUEUE)
1617 OSL_SLEEP(1);
1618 #endif // endif
1619 return BCME_ERROR;
1620 }
1621 DHD_TRACE(("dhd_rxf_enqueue: Store SKB %p. idx %d -> %d\n",
1622 skb, store_idx, (store_idx + 1) & (MAXSKBPEND - 1)));
1623 dhdp->skbbuf[store_idx] = skb;
1624 dhdp->store_idx = (store_idx + 1) & (MAXSKBPEND - 1);
1625 dhd_os_rxfunlock(dhdp);
1626
1627 return BCME_OK;
1628 }
1629
dhd_rxf_dequeue(dhd_pub_t * dhdp)1630 static inline void* dhd_rxf_dequeue(dhd_pub_t *dhdp)
1631 {
1632 uint32 store_idx;
1633 uint32 sent_idx;
1634 void *skb;
1635
1636 dhd_os_rxflock(dhdp);
1637
1638 store_idx = dhdp->store_idx;
1639 sent_idx = dhdp->sent_idx;
1640 skb = dhdp->skbbuf[sent_idx];
1641
1642 if (skb == NULL) {
1643 dhd_os_rxfunlock(dhdp);
1644 DHD_ERROR(("dhd_rxf_dequeue: Dequeued packet is NULL, store idx %d sent idx %d\n",
1645 store_idx, sent_idx));
1646 return NULL;
1647 }
1648
1649 dhdp->skbbuf[sent_idx] = NULL;
1650 dhdp->sent_idx = (sent_idx + 1) & (MAXSKBPEND - 1);
1651
1652 DHD_TRACE(("dhd_rxf_dequeue: netif_rx_ni(%p), sent idx %d\n",
1653 skb, sent_idx));
1654
1655 dhd_os_rxfunlock(dhdp);
1656
1657 return skb;
1658 }
1659
dhd_process_cid_mac(dhd_pub_t * dhdp,bool prepost)1660 int dhd_process_cid_mac(dhd_pub_t *dhdp, bool prepost)
1661 {
1662 if (prepost) { /* pre process */
1663 dhd_read_cis(dhdp);
1664 dhd_check_module_cid(dhdp);
1665 dhd_check_module_mac(dhdp);
1666 dhd_set_macaddr_from_file(dhdp);
1667 } else { /* post process */
1668 dhd_write_macaddr(&dhdp->mac);
1669 dhd_clear_cis(dhdp);
1670 }
1671
1672 return 0;
1673 }
1674
1675 #if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT) && defined(DHD_FW_COREDUMP)
dhd_wait_for_file_dump(dhd_pub_t * dhdp)1676 static int dhd_wait_for_file_dump(dhd_pub_t *dhdp)
1677 {
1678 struct net_device *primary_ndev;
1679 struct bcm_cfg80211 *cfg;
1680 unsigned long flags = 0;
1681 primary_ndev = dhd_linux_get_primary_netdev(dhdp);
1682
1683 if (!primary_ndev) {
1684 DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__));
1685 return BCME_ERROR;
1686 }
1687 cfg = wl_get_cfg(primary_ndev);
1688
1689 if (!cfg) {
1690 DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
1691 return BCME_ERROR;
1692 }
1693
1694 DHD_GENERAL_LOCK(dhdp, flags);
1695 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
1696 DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp);
1697 dhd_os_busbusy_wake(dhdp);
1698 DHD_GENERAL_UNLOCK(dhdp, flags);
1699 DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__));
1700 return BCME_ERROR;
1701 }
1702 DHD_BUS_BUSY_SET_IN_HALDUMP(dhdp);
1703 DHD_GENERAL_UNLOCK(dhdp, flags);
1704
1705 DHD_OS_WAKE_LOCK(dhdp);
1706 /* check for hal started and only then send event if not clear dump state here */
1707 if (wl_cfg80211_is_hal_started(cfg)) {
1708 int timeleft = 0;
1709
1710 DHD_ERROR(("[DUMP] %s: HAL started. send urgent event\n", __FUNCTION__));
1711 dhd_dbg_send_urgent_evt(dhdp, NULL, 0);
1712
1713 DHD_ERROR(("%s: wait to clear dhd_bus_busy_state: 0x%x\n",
1714 __FUNCTION__, dhdp->dhd_bus_busy_state));
1715 timeleft = dhd_os_busbusy_wait_bitmask(dhdp,
1716 &dhdp->dhd_bus_busy_state, DHD_BUS_BUSY_IN_HALDUMP, 0);
1717 if ((dhdp->dhd_bus_busy_state & DHD_BUS_BUSY_IN_HALDUMP) != 0) {
1718 DHD_ERROR(("%s: Timed out dhd_bus_busy_state=0x%x\n",
1719 __FUNCTION__, dhdp->dhd_bus_busy_state));
1720 }
1721 } else {
1722 DHD_ERROR(("[DUMP] %s: HAL Not started. skip urgent event\n", __FUNCTION__));
1723 }
1724 DHD_OS_WAKE_UNLOCK(dhdp);
1725 /* In case of dhd_os_busbusy_wait_bitmask() timeout,
1726 * hal dump bit will not be cleared. Hence clearing it here.
1727 */
1728 DHD_GENERAL_LOCK(dhdp, flags);
1729 DHD_BUS_BUSY_CLEAR_IN_HALDUMP(dhdp);
1730 dhd_os_busbusy_wake(dhdp);
1731 DHD_GENERAL_UNLOCK(dhdp, flags);
1732
1733 return BCME_OK;
1734 }
1735 #endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT && DHD_FW_CORE_DUMP */
1736
1737 #ifdef PKT_FILTER_SUPPORT
1738 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
1739 static bool
_turn_on_arp_filter(dhd_pub_t * dhd,int op_mode_param)1740 _turn_on_arp_filter(dhd_pub_t *dhd, int op_mode_param)
1741 {
1742 bool _apply = FALSE;
1743 /* In case of IBSS mode, apply arp pkt filter */
1744 if (op_mode_param & DHD_FLAG_IBSS_MODE) {
1745 _apply = TRUE;
1746 goto exit;
1747 }
1748 /* In case of P2P GO or GC, apply pkt filter to pass arp pkt to host */
1749 if (op_mode_param & (DHD_FLAG_P2P_GC_MODE | DHD_FLAG_P2P_GO_MODE)) {
1750 _apply = TRUE;
1751 goto exit;
1752 }
1753
1754 exit:
1755 return _apply;
1756 }
1757 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
1758
1759 void
dhd_set_packet_filter(dhd_pub_t * dhd)1760 dhd_set_packet_filter(dhd_pub_t *dhd)
1761 {
1762 int i;
1763
1764 DHD_TRACE(("%s: enter\n", __FUNCTION__));
1765 if (dhd_pkt_filter_enable) {
1766 for (i = 0; i < dhd->pktfilter_count; i++) {
1767 dhd_pktfilter_offload_set(dhd, dhd->pktfilter[i]);
1768 }
1769 }
1770 }
1771
1772 void
dhd_enable_packet_filter(int value,dhd_pub_t * dhd)1773 dhd_enable_packet_filter(int value, dhd_pub_t *dhd)
1774 {
1775 int i;
1776
1777 DHD_ERROR(("%s: enter, value = %d\n", __FUNCTION__, value));
1778 if ((dhd->op_mode & DHD_FLAG_HOSTAP_MODE) && value) {
1779 DHD_ERROR(("%s: DHD_FLAG_HOSTAP_MODE\n", __FUNCTION__));
1780 return;
1781 }
1782 /* 1 - Enable packet filter, only allow unicast packet to send up */
1783 /* 0 - Disable packet filter */
1784 if (dhd_pkt_filter_enable && (!value ||
1785 (dhd_support_sta_mode(dhd) && !dhd->dhcp_in_progress)))
1786 {
1787 for (i = 0; i < dhd->pktfilter_count; i++) {
1788 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
1789 if (value && (i == DHD_ARP_FILTER_NUM) &&
1790 !_turn_on_arp_filter(dhd, dhd->op_mode)) {
1791 DHD_TRACE(("Do not turn on ARP white list pkt filter:"
1792 "val %d, cnt %d, op_mode 0x%x\n",
1793 value, i, dhd->op_mode));
1794 continue;
1795 }
1796 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
1797 dhd_pktfilter_offload_enable(dhd, dhd->pktfilter[i],
1798 value, dhd_master_mode);
1799 }
1800 }
1801 }
1802
1803 int
dhd_packet_filter_add_remove(dhd_pub_t * dhdp,int add_remove,int num)1804 dhd_packet_filter_add_remove(dhd_pub_t *dhdp, int add_remove, int num)
1805 {
1806 char *filterp = NULL;
1807 int filter_id = 0;
1808
1809 switch (num) {
1810 case DHD_BROADCAST_FILTER_NUM:
1811 filterp = "101 0 0 0 0xFFFFFFFFFFFF 0xFFFFFFFFFFFF";
1812 filter_id = 101;
1813 break;
1814 case DHD_MULTICAST4_FILTER_NUM:
1815 filter_id = 102;
1816 if (FW_SUPPORTED((dhdp), pf6)) {
1817 if (dhdp->pktfilter[num] != NULL) {
1818 dhd_pktfilter_offload_delete(dhdp, filter_id);
1819 dhdp->pktfilter[num] = NULL;
1820 }
1821 if (!add_remove) {
1822 filterp = DISCARD_IPV4_MCAST;
1823 add_remove = 1;
1824 break;
1825 }
1826 }
1827 filterp = "102 0 0 0 0xFFFFFF 0x01005E";
1828 break;
1829 case DHD_MULTICAST6_FILTER_NUM:
1830 filter_id = 103;
1831 if (FW_SUPPORTED((dhdp), pf6)) {
1832 if (dhdp->pktfilter[num] != NULL) {
1833 dhd_pktfilter_offload_delete(dhdp, filter_id);
1834 dhdp->pktfilter[num] = NULL;
1835 }
1836 if (!add_remove) {
1837 filterp = DISCARD_IPV6_MCAST;
1838 add_remove = 1;
1839 break;
1840 }
1841 }
1842 filterp = "103 0 0 0 0xFFFF 0x3333";
1843 break;
1844 case DHD_MDNS_FILTER_NUM:
1845 filterp = "104 0 0 0 0xFFFFFFFFFFFF 0x01005E0000FB";
1846 filter_id = 104;
1847 break;
1848 case DHD_ARP_FILTER_NUM:
1849 filterp = "105 0 0 12 0xFFFF 0x0806";
1850 filter_id = 105;
1851 break;
1852 case DHD_BROADCAST_ARP_FILTER_NUM:
1853 filterp = "106 0 0 0 0xFFFFFFFFFFFF0000000000000806"
1854 " 0xFFFFFFFFFFFF0000000000000806";
1855 filter_id = 106;
1856 break;
1857 default:
1858 return -EINVAL;
1859 }
1860
1861 /* Add filter */
1862 if (add_remove) {
1863 dhdp->pktfilter[num] = filterp;
1864 dhd_pktfilter_offload_set(dhdp, dhdp->pktfilter[num]);
1865 } else { /* Delete filter */
1866 if (dhdp->pktfilter[num]) {
1867 dhd_pktfilter_offload_delete(dhdp, filter_id);
1868 dhdp->pktfilter[num] = NULL;
1869 }
1870 }
1871
1872 return 0;
1873 }
1874 #endif /* PKT_FILTER_SUPPORT */
1875
dhd_set_suspend(int value,dhd_pub_t * dhd)1876 static int dhd_set_suspend(int value, dhd_pub_t *dhd)
1877 {
1878 #ifndef SUPPORT_PM2_ONLY
1879 int power_mode = PM_MAX;
1880 #endif /* SUPPORT_PM2_ONLY */
1881 /* wl_pkt_filter_enable_t enable_parm; */
1882 int bcn_li_dtim = 0; /* Default bcn_li_dtim in resume mode is 0 */
1883 int ret = 0;
1884 #ifdef DHD_USE_EARLYSUSPEND
1885 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
1886 int bcn_timeout = 0;
1887 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
1888 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
1889 int roam_time_thresh = 0; /* (ms) */
1890 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
1891 #ifndef ENABLE_FW_ROAM_SUSPEND
1892 uint roamvar = 1;
1893 #endif /* ENABLE_FW_ROAM_SUSPEND */
1894 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
1895 int bcn_li_bcn = 1;
1896 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
1897 uint nd_ra_filter = 0;
1898 #ifdef ENABLE_IPMCAST_FILTER
1899 int ipmcast_l2filter;
1900 #endif /* ENABLE_IPMCAST_FILTER */
1901 #ifdef CUSTOM_EVENT_PM_WAKE
1902 uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
1903 #endif /* CUSTOM_EVENT_PM_WAKE */
1904 #endif /* DHD_USE_EARLYSUSPEND */
1905 #ifdef PASS_ALL_MCAST_PKTS
1906 struct dhd_info *dhdinfo;
1907 uint32 allmulti;
1908 uint i;
1909 #endif /* PASS_ALL_MCAST_PKTS */
1910 #ifdef DYNAMIC_SWOOB_DURATION
1911 #ifndef CUSTOM_INTR_WIDTH
1912 #define CUSTOM_INTR_WIDTH 100
1913 int intr_width = 0;
1914 #endif /* CUSTOM_INTR_WIDTH */
1915 #endif /* DYNAMIC_SWOOB_DURATION */
1916
1917 #if defined(OEM_ANDROID) && defined(BCMPCIE)
1918 int lpas = 0;
1919 int dtim_period = 0;
1920 int bcn_interval = 0;
1921 int bcn_to_dly = 0;
1922 #if defined(CUSTOM_BCN_TIMEOUT_IN_SUSPEND) && defined(DHD_USE_EARLYSUSPEND)
1923 bcn_timeout = CUSTOM_BCN_TIMEOUT_SETTING;
1924 #else
1925 int bcn_timeout = CUSTOM_BCN_TIMEOUT_SETTING;
1926 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND && DHD_USE_EARLYSUSPEND */
1927 #endif /* OEM_ANDROID && BCMPCIE */
1928
1929 if (!dhd)
1930 return -ENODEV;
1931
1932 #ifdef PASS_ALL_MCAST_PKTS
1933 dhdinfo = dhd->info;
1934 #endif /* PASS_ALL_MCAST_PKTS */
1935
1936 DHD_TRACE(("%s: enter, value = %d in_suspend=%d\n",
1937 __FUNCTION__, value, dhd->in_suspend));
1938
1939 dhd_suspend_lock(dhd);
1940
1941 #ifdef CUSTOM_SET_CPUCORE
1942 DHD_TRACE(("%s set cpucore(suspend%d)\n", __FUNCTION__, value));
1943 /* set specific cpucore */
1944 dhd_set_cpucore(dhd, TRUE);
1945 #endif /* CUSTOM_SET_CPUCORE */
1946 if (dhd->up) {
1947 if (value && dhd->in_suspend) {
1948 #ifdef PKT_FILTER_SUPPORT
1949 dhd->early_suspended = 1;
1950 #endif // endif
1951 /* Kernel suspended */
1952 DHD_ERROR(("%s: force extra Suspend setting \n", __FUNCTION__));
1953
1954 #ifndef SUPPORT_PM2_ONLY
1955 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
1956 sizeof(power_mode), TRUE, 0);
1957 #endif /* SUPPORT_PM2_ONLY */
1958
1959 #ifdef PKT_FILTER_SUPPORT
1960 /* Enable packet filter,
1961 * only allow unicast packet to send up
1962 */
1963 dhd_enable_packet_filter(1, dhd);
1964 #ifdef APF
1965 dhd_dev_apf_enable_filter(dhd_linux_get_primary_netdev(dhd));
1966 #endif /* APF */
1967 #endif /* PKT_FILTER_SUPPORT */
1968 #ifdef ARP_OFFLOAD_SUPPORT
1969 dhd_arp_offload_enable(dhd, TRUE);
1970 #endif /* ARP_OFFLOAD_SUPPORT */
1971
1972 #ifdef PASS_ALL_MCAST_PKTS
1973 allmulti = 0;
1974 for (i = 0; i < DHD_MAX_IFS; i++) {
1975 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net) {
1976 ret = dhd_iovar(dhd, i, "allmulti",
1977 (char *)&allmulti,
1978 sizeof(allmulti),
1979 NULL, 0, TRUE);
1980 if (ret < 0) {
1981 DHD_ERROR(("%s allmulti failed %d\n",
1982 __FUNCTION__, ret));
1983 }
1984 }
1985 }
1986 #endif /* PASS_ALL_MCAST_PKTS */
1987
1988 /* If DTIM skip is set up as default, force it to wake
1989 * each third DTIM for better power savings. Note that
1990 * one side effect is a chance to miss BC/MC packet.
1991 */
1992 #ifdef WLTDLS
1993 /* Do not set bcn_li_ditm on WFD mode */
1994 if (dhd->tdls_mode) {
1995 bcn_li_dtim = 0;
1996 } else
1997 #endif /* WLTDLS */
1998 #if defined(OEM_ANDROID) && defined(BCMPCIE)
1999 bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd, &dtim_period,
2000 &bcn_interval);
2001 ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
2002 sizeof(bcn_li_dtim), NULL, 0, TRUE);
2003 if (ret < 0) {
2004 DHD_ERROR(("%s bcn_li_dtim failed %d\n",
2005 __FUNCTION__, ret));
2006 }
2007 if ((bcn_li_dtim * dtim_period * bcn_interval) >=
2008 MIN_DTIM_FOR_ROAM_THRES_EXTEND) {
2009 /*
2010 * Increase max roaming threshold from 2 secs to 8 secs
2011 * the real roam threshold is MIN(max_roam_threshold,
2012 * bcn_timeout/2)
2013 */
2014 lpas = 1;
2015 ret = dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas),
2016 NULL, 0, TRUE);
2017 if (ret < 0) {
2018 DHD_ERROR(("%s lpas failed %d\n", __FUNCTION__,
2019 ret));
2020 }
2021 bcn_to_dly = 1;
2022 /*
2023 * if bcn_to_dly is 1, the real roam threshold is
2024 * MIN(max_roam_threshold, bcn_timeout -1);
2025 * notify link down event after roaming procedure complete
2026 * if we hit bcn_timeout while we are in roaming progress.
2027 */
2028 ret = dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly,
2029 sizeof(bcn_to_dly), NULL, 0, TRUE);
2030 if (ret < 0) {
2031 DHD_ERROR(("%s bcn_to_dly failed %d\n",
2032 __FUNCTION__, ret));
2033 }
2034 /* Increase beacon timeout to 6 secs or use bigger one */
2035 bcn_timeout = max(bcn_timeout, BCN_TIMEOUT_IN_SUSPEND);
2036 ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
2037 sizeof(bcn_timeout), NULL, 0, TRUE);
2038 if (ret < 0) {
2039 DHD_ERROR(("%s bcn_timeout failed %d\n",
2040 __FUNCTION__, ret));
2041 }
2042 }
2043 #else
2044 bcn_li_dtim = dhd_get_suspend_bcn_li_dtim(dhd);
2045 if (dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
2046 sizeof(bcn_li_dtim), NULL, 0, TRUE) < 0)
2047 DHD_ERROR(("%s: set dtim failed\n", __FUNCTION__));
2048 #endif /* OEM_ANDROID && BCMPCIE */
2049 #ifdef WL_CFG80211
2050 /* Disable cfg80211 feature events during suspend */
2051 ret = wl_cfg80211_config_suspend_events(
2052 dhd_linux_get_primary_netdev(dhd), FALSE);
2053 if (ret < 0) {
2054 DHD_ERROR(("failed to disable events (%d)\n", ret));
2055 }
2056 #endif /* WL_CFG80211 */
2057 #ifdef DHD_USE_EARLYSUSPEND
2058 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2059 bcn_timeout = CUSTOM_BCN_TIMEOUT_IN_SUSPEND;
2060 ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
2061 sizeof(bcn_timeout), NULL, 0, TRUE);
2062 if (ret < 0) {
2063 DHD_ERROR(("%s bcn_timeout failed %d\n", __FUNCTION__,
2064 ret));
2065 }
2066 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2067 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2068 roam_time_thresh = CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND;
2069 ret = dhd_iovar(dhd, 0, "roam_time_thresh",
2070 (char *)&roam_time_thresh,
2071 sizeof(roam_time_thresh), NULL, 0, TRUE);
2072 if (ret < 0) {
2073 DHD_ERROR(("%s roam_time_thresh failed %d\n",
2074 __FUNCTION__, ret));
2075 }
2076 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2077 #ifndef ENABLE_FW_ROAM_SUSPEND
2078 /* Disable firmware roaming during suspend */
2079 ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar,
2080 sizeof(roamvar), NULL, 0, TRUE);
2081 if (ret < 0) {
2082 DHD_ERROR(("%s roam_off failed %d\n",
2083 __FUNCTION__, ret));
2084 }
2085 #endif /* ENABLE_FW_ROAM_SUSPEND */
2086 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
2087 if (bcn_li_dtim) {
2088 bcn_li_bcn = 0;
2089 }
2090 ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
2091 sizeof(bcn_li_bcn), NULL, 0, TRUE);
2092 if (ret < 0) {
2093 DHD_ERROR(("%s bcn_li_bcn failed %d\n", __FUNCTION__, ret));
2094 }
2095 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2096 #if defined(WL_CFG80211) && defined(WL_BCNRECV)
2097 ret = wl_android_bcnrecv_suspend(dhd_linux_get_primary_netdev(dhd));
2098 if (ret != BCME_OK) {
2099 DHD_ERROR(("failed to stop beacon recv event on"
2100 " suspend state (%d)\n", ret));
2101 }
2102 #endif /* WL_CFG80211 && WL_BCNRECV */
2103 #ifdef NDO_CONFIG_SUPPORT
2104 if (dhd->ndo_enable) {
2105 if (!dhd->ndo_host_ip_overflow) {
2106 /* enable ND offload on suspend */
2107 ret = dhd_ndo_enable(dhd, TRUE);
2108 if (ret < 0) {
2109 DHD_ERROR(("%s: failed to enable NDO\n",
2110 __FUNCTION__));
2111 }
2112 } else {
2113 DHD_INFO(("%s: NDO disabled on suspend due to"
2114 "HW capacity\n", __FUNCTION__));
2115 }
2116 }
2117 #endif /* NDO_CONFIG_SUPPORT */
2118 #ifndef APF
2119 if (FW_SUPPORTED(dhd, ndoe)) {
2120 #else
2121 if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf)) {
2122 #endif /* APF */
2123 /* enable IPv6 RA filter in firmware during suspend */
2124 nd_ra_filter = 1;
2125 ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable",
2126 (char *)&nd_ra_filter, sizeof(nd_ra_filter),
2127 NULL, 0, TRUE);
2128 if (ret < 0)
2129 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
2130 ret));
2131 }
2132 dhd_os_suppress_logging(dhd, TRUE);
2133 #ifdef ENABLE_IPMCAST_FILTER
2134 ipmcast_l2filter = 1;
2135 ret = dhd_iovar(dhd, 0, "ipmcast_l2filter",
2136 (char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter),
2137 NULL, 0, TRUE);
2138 if (ret < 0) {
2139 DHD_ERROR(("failed to set ipmcast_l2filter (%d)\n", ret));
2140 }
2141 #endif /* ENABLE_IPMCAST_FILTER */
2142 #ifdef DYNAMIC_SWOOB_DURATION
2143 intr_width = CUSTOM_INTR_WIDTH;
2144 ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width,
2145 sizeof(intr_width), NULL, 0, TRUE);
2146 if (ret < 0) {
2147 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
2148 }
2149 #endif /* DYNAMIC_SWOOB_DURATION */
2150 #ifdef CUSTOM_EVENT_PM_WAKE
2151 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE * 4;
2152 ret = dhd_iovar(dhd, 0, "const_awake_thresh",
2153 (char *)&pm_awake_thresh,
2154 sizeof(pm_awake_thresh), NULL, 0, TRUE);
2155 if (ret < 0) {
2156 DHD_ERROR(("%s set const_awake_thresh failed %d\n",
2157 __FUNCTION__, ret));
2158 }
2159 #endif /* CUSTOM_EVENT_PM_WAKE */
2160 #ifdef CONFIG_SILENT_ROAM
2161 if (!dhd->sroamed) {
2162 ret = dhd_sroam_set_mon(dhd, TRUE);
2163 if (ret < 0) {
2164 DHD_ERROR(("%s set sroam failed %d\n",
2165 __FUNCTION__, ret));
2166 }
2167 }
2168 dhd->sroamed = FALSE;
2169 #endif /* CONFIG_SILENT_ROAM */
2170 #endif /* DHD_USE_EARLYSUSPEND */
2171 } else {
2172 #ifdef PKT_FILTER_SUPPORT
2173 dhd->early_suspended = 0;
2174 #endif // endif
2175 /* Kernel resumed */
2176 DHD_ERROR(("%s: Remove extra suspend setting \n", __FUNCTION__));
2177 #ifdef DYNAMIC_SWOOB_DURATION
2178 intr_width = 0;
2179 ret = dhd_iovar(dhd, 0, "bus:intr_width", (char *)&intr_width,
2180 sizeof(intr_width), NULL, 0, TRUE);
2181 if (ret < 0) {
2182 DHD_ERROR(("failed to set intr_width (%d)\n", ret));
2183 }
2184 #endif /* DYNAMIC_SWOOB_DURATION */
2185 #ifndef SUPPORT_PM2_ONLY
2186 power_mode = PM_FAST;
2187 dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode,
2188 sizeof(power_mode), TRUE, 0);
2189 #endif /* SUPPORT_PM2_ONLY */
2190 #if defined(WL_CFG80211) && defined(WL_BCNRECV)
2191 ret = wl_android_bcnrecv_resume(dhd_linux_get_primary_netdev(dhd));
2192 if (ret != BCME_OK) {
2193 DHD_ERROR(("failed to resume beacon recv state (%d)\n",
2194 ret));
2195 }
2196 #endif /* WL_CF80211 && WL_BCNRECV */
2197 #ifdef ARP_OFFLOAD_SUPPORT
2198 dhd_arp_offload_enable(dhd, FALSE);
2199 #endif /* ARP_OFFLOAD_SUPPORT */
2200 #ifdef PKT_FILTER_SUPPORT
2201 /* disable pkt filter */
2202 dhd_enable_packet_filter(0, dhd);
2203 #ifdef APF
2204 dhd_dev_apf_disable_filter(dhd_linux_get_primary_netdev(dhd));
2205 #endif /* APF */
2206 #endif /* PKT_FILTER_SUPPORT */
2207 #ifdef PASS_ALL_MCAST_PKTS
2208 allmulti = 1;
2209 for (i = 0; i < DHD_MAX_IFS; i++) {
2210 if (dhdinfo->iflist[i] && dhdinfo->iflist[i]->net)
2211 ret = dhd_iovar(dhd, i, "allmulti",
2212 (char *)&allmulti,
2213 sizeof(allmulti), NULL,
2214 0, TRUE);
2215 if (ret < 0) {
2216 DHD_ERROR(("%s: allmulti failed:%d\n",
2217 __FUNCTION__, ret));
2218 }
2219 }
2220 #endif /* PASS_ALL_MCAST_PKTS */
2221 #if defined(OEM_ANDROID) && defined(BCMPCIE)
2222 /* restore pre-suspend setting */
2223 ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
2224 sizeof(bcn_li_dtim), NULL, 0, TRUE);
2225 if (ret < 0) {
2226 DHD_ERROR(("%s:bcn_li_ditm failed:%d\n",
2227 __FUNCTION__, ret));
2228 }
2229 ret = dhd_iovar(dhd, 0, "lpas", (char *)&lpas, sizeof(lpas), NULL,
2230 0, TRUE);
2231 if (ret < 0) {
2232 DHD_ERROR(("%s:lpas failed:%d\n", __FUNCTION__, ret));
2233 }
2234 ret = dhd_iovar(dhd, 0, "bcn_to_dly", (char *)&bcn_to_dly,
2235 sizeof(bcn_to_dly), NULL, 0, TRUE);
2236 if (ret < 0) {
2237 DHD_ERROR(("%s:bcn_to_dly failed:%d\n", __FUNCTION__, ret));
2238 }
2239 ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
2240 sizeof(bcn_timeout), NULL, 0, TRUE);
2241 if (ret < 0) {
2242 DHD_ERROR(("%s:bcn_timeout failed:%d\n",
2243 __FUNCTION__, ret));
2244 }
2245 #else
2246 /* restore pre-suspend setting for dtim_skip */
2247 ret = dhd_iovar(dhd, 0, "bcn_li_dtim", (char *)&bcn_li_dtim,
2248 sizeof(bcn_li_dtim), NULL, 0, TRUE);
2249 if (ret < 0) {
2250 DHD_ERROR(("%s:bcn_li_ditm fail:%d\n", __FUNCTION__, ret));
2251 }
2252 #endif /* OEM_ANDROID && BCMPCIE */
2253 #ifdef DHD_USE_EARLYSUSPEND
2254 #ifdef CUSTOM_BCN_TIMEOUT_IN_SUSPEND
2255 bcn_timeout = CUSTOM_BCN_TIMEOUT;
2256 ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout,
2257 sizeof(bcn_timeout), NULL, 0, TRUE);
2258 if (ret < 0) {
2259 DHD_ERROR(("%s:bcn_timeout failed:%d\n",
2260 __FUNCTION__, ret));
2261 }
2262 #endif /* CUSTOM_BCN_TIMEOUT_IN_SUSPEND */
2263 #ifdef CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND
2264 roam_time_thresh = 2000;
2265 ret = dhd_iovar(dhd, 0, "roam_time_thresh",
2266 (char *)&roam_time_thresh,
2267 sizeof(roam_time_thresh), NULL, 0, TRUE);
2268 if (ret < 0) {
2269 DHD_ERROR(("%s:roam_time_thresh failed:%d\n",
2270 __FUNCTION__, ret));
2271 }
2272
2273 #endif /* CUSTOM_ROAM_TIME_THRESH_IN_SUSPEND */
2274 #ifndef ENABLE_FW_ROAM_SUSPEND
2275 roamvar = dhd_roam_disable;
2276 ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar,
2277 sizeof(roamvar), NULL, 0, TRUE);
2278 if (ret < 0) {
2279 DHD_ERROR(("%s: roam_off fail:%d\n", __FUNCTION__, ret));
2280 }
2281 #endif /* ENABLE_FW_ROAM_SUSPEND */
2282 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
2283 ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn,
2284 sizeof(bcn_li_bcn), NULL, 0, TRUE);
2285 if (ret < 0) {
2286 DHD_ERROR(("%s: bcn_li_bcn failed:%d\n",
2287 __FUNCTION__, ret));
2288 }
2289 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
2290 #ifdef NDO_CONFIG_SUPPORT
2291 if (dhd->ndo_enable) {
2292 /* Disable ND offload on resume */
2293 ret = dhd_ndo_enable(dhd, FALSE);
2294 if (ret < 0) {
2295 DHD_ERROR(("%s: failed to disable NDO\n",
2296 __FUNCTION__));
2297 }
2298 }
2299 #endif /* NDO_CONFIG_SUPPORT */
2300 #ifndef APF
2301 if (FW_SUPPORTED(dhd, ndoe)) {
2302 #else
2303 if (FW_SUPPORTED(dhd, ndoe) && !FW_SUPPORTED(dhd, apf)) {
2304 #endif /* APF */
2305 /* disable IPv6 RA filter in firmware during suspend */
2306 nd_ra_filter = 0;
2307 ret = dhd_iovar(dhd, 0, "nd_ra_filter_enable",
2308 (char *)&nd_ra_filter, sizeof(nd_ra_filter),
2309 NULL, 0, TRUE);
2310 if (ret < 0) {
2311 DHD_ERROR(("failed to set nd_ra_filter (%d)\n",
2312 ret));
2313 }
2314 }
2315 dhd_os_suppress_logging(dhd, FALSE);
2316 #ifdef ENABLE_IPMCAST_FILTER
2317 ipmcast_l2filter = 0;
2318 ret = dhd_iovar(dhd, 0, "ipmcast_l2filter",
2319 (char *)&ipmcast_l2filter, sizeof(ipmcast_l2filter),
2320 NULL, 0, TRUE);
2321 if (ret < 0) {
2322 DHD_ERROR(("failed to clear ipmcast_l2filter ret:%d", ret));
2323 }
2324 #endif /* ENABLE_IPMCAST_FILTER */
2325 #ifdef CUSTOM_EVENT_PM_WAKE
2326 ret = dhd_iovar(dhd, 0, "const_awake_thresh",
2327 (char *)&pm_awake_thresh,
2328 sizeof(pm_awake_thresh), NULL, 0, TRUE);
2329 if (ret < 0) {
2330 DHD_ERROR(("%s set const_awake_thresh failed %d\n",
2331 __FUNCTION__, ret));
2332 }
2333 #endif /* CUSTOM_EVENT_PM_WAKE */
2334 #ifdef CONFIG_SILENT_ROAM
2335 ret = dhd_sroam_set_mon(dhd, FALSE);
2336 if (ret < 0) {
2337 DHD_ERROR(("%s set sroam failed %d\n", __FUNCTION__, ret));
2338 }
2339 #endif /* CONFIG_SILENT_ROAM */
2340 #endif /* DHD_USE_EARLYSUSPEND */
2341 #ifdef WL_CFG80211
2342 /* Enable cfg80211 feature events during resume */
2343 ret = wl_cfg80211_config_suspend_events(
2344 dhd_linux_get_primary_netdev(dhd), TRUE);
2345 if (ret < 0) {
2346 DHD_ERROR(("failed to enable events (%d)\n", ret));
2347 }
2348 #endif /* WL_CFG80211 */
2349 #ifdef DHD_LB_IRQSET
2350 dhd_irq_set_affinity(dhd, dhd->info->cpumask_primary);
2351 #endif /* DHD_LB_IRQSET */
2352 }
2353 }
2354 dhd_suspend_unlock(dhd);
2355
2356 return 0;
2357 }
2358
2359 static int dhd_suspend_resume_helper(struct dhd_info *dhd, int val, int force)
2360 {
2361 dhd_pub_t *dhdp = &dhd->pub;
2362 int ret = 0;
2363
2364 DHD_OS_WAKE_LOCK(dhdp);
2365 DHD_PERIM_LOCK(dhdp);
2366
2367 /* Set flag when early suspend was called */
2368 dhdp->in_suspend = val;
2369 if ((force || !dhdp->suspend_disable_flag) &&
2370 dhd_support_sta_mode(dhdp))
2371 {
2372 ret = dhd_set_suspend(val, dhdp);
2373 }
2374
2375 DHD_PERIM_UNLOCK(dhdp);
2376 DHD_OS_WAKE_UNLOCK(dhdp);
2377 return ret;
2378 }
2379
2380 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
2381 static void dhd_early_suspend(struct early_suspend *h)
2382 {
2383 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
2384 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
2385
2386 if (dhd)
2387 dhd_suspend_resume_helper(dhd, 1, 0);
2388 }
2389
2390 static void dhd_late_resume(struct early_suspend *h)
2391 {
2392 struct dhd_info *dhd = container_of(h, struct dhd_info, early_suspend);
2393 DHD_TRACE_HW4(("%s: enter\n", __FUNCTION__));
2394
2395 if (dhd)
2396 dhd_suspend_resume_helper(dhd, 0, 0);
2397 }
2398 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
2399
2400 /*
2401 * Generalized timeout mechanism. Uses spin sleep with exponential back-off until
2402 * the sleep time reaches one jiffy, then switches over to task delay. Usage:
2403 *
2404 * dhd_timeout_start(&tmo, usec);
2405 * while (!dhd_timeout_expired(&tmo))
2406 * if (poll_something())
2407 * break;
2408 * if (dhd_timeout_expired(&tmo))
2409 * fatal();
2410 */
2411
2412 void
2413 dhd_timeout_start(dhd_timeout_t *tmo, uint usec)
2414 {
2415 tmo->limit = usec;
2416 tmo->increment = 0;
2417 tmo->elapsed = 0;
2418 tmo->tick = jiffies_to_usecs(1);
2419 }
2420
2421 int
2422 dhd_timeout_expired(dhd_timeout_t *tmo)
2423 {
2424 /* Does nothing the first call */
2425 if (tmo->increment == 0) {
2426 tmo->increment = 1;
2427 return 0;
2428 }
2429
2430 if (tmo->elapsed >= tmo->limit)
2431 return 1;
2432
2433 /* Add the delay that's about to take place */
2434 tmo->elapsed += tmo->increment;
2435
2436 if ((!CAN_SLEEP()) || tmo->increment < tmo->tick) {
2437 OSL_DELAY(tmo->increment);
2438 tmo->increment *= 2;
2439 if (tmo->increment > tmo->tick)
2440 tmo->increment = tmo->tick;
2441 } else {
2442 /*
2443 * OSL_SLEEP() is corresponding to usleep_range(). In non-atomic
2444 * context where the exact wakeup time is flexible, it would be good
2445 * to use usleep_range() instead of udelay(). It takes a few advantages
2446 * such as improving responsiveness and reducing power.
2447 */
2448 OSL_SLEEP(jiffies_to_msecs(1));
2449 }
2450
2451 return 0;
2452 }
2453
2454 int
2455 dhd_net2idx(dhd_info_t *dhd, struct net_device *net)
2456 {
2457 int i = 0;
2458
2459 if (!dhd) {
2460 DHD_ERROR(("%s : DHD_BAD_IF return\n", __FUNCTION__));
2461 return DHD_BAD_IF;
2462 }
2463
2464 while (i < DHD_MAX_IFS) {
2465 if (dhd->iflist[i] && dhd->iflist[i]->net && (dhd->iflist[i]->net == net))
2466 return i;
2467 i++;
2468 }
2469
2470 return DHD_BAD_IF;
2471 }
2472
2473 struct net_device * dhd_idx2net(void *pub, int ifidx)
2474 {
2475 struct dhd_pub *dhd_pub = (struct dhd_pub *)pub;
2476 struct dhd_info *dhd_info;
2477
2478 if (!dhd_pub || ifidx < 0 || ifidx >= DHD_MAX_IFS)
2479 return NULL;
2480 dhd_info = dhd_pub->info;
2481 if (dhd_info && dhd_info->iflist[ifidx])
2482 return dhd_info->iflist[ifidx]->net;
2483 return NULL;
2484 }
2485
2486 int
2487 dhd_ifname2idx(dhd_info_t *dhd, char *name)
2488 {
2489 int i = DHD_MAX_IFS;
2490
2491 ASSERT(dhd);
2492
2493 if (name == NULL || *name == '\0')
2494 return 0;
2495
2496 while (--i > 0)
2497 if (dhd->iflist[i] && !strncmp(dhd->iflist[i]->dngl_name, name, IFNAMSIZ))
2498 break;
2499
2500 DHD_TRACE(("%s: return idx %d for \"%s\"\n", __FUNCTION__, i, name));
2501
2502 return i; /* default - the primary interface */
2503 }
2504
2505 char *
2506 dhd_ifname(dhd_pub_t *dhdp, int ifidx)
2507 {
2508 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
2509
2510 ASSERT(dhd);
2511
2512 if (ifidx < 0 || ifidx >= DHD_MAX_IFS) {
2513 DHD_ERROR(("%s: ifidx %d out of range\n", __FUNCTION__, ifidx));
2514 return "<if_bad>";
2515 }
2516
2517 if (dhd->iflist[ifidx] == NULL) {
2518 DHD_ERROR(("%s: null i/f %d\n", __FUNCTION__, ifidx));
2519 return "<if_null>";
2520 }
2521
2522 if (dhd->iflist[ifidx]->net)
2523 return dhd->iflist[ifidx]->net->name;
2524
2525 return "<if_none>";
2526 }
2527
2528 uint8 *
2529 dhd_bssidx2bssid(dhd_pub_t *dhdp, int idx)
2530 {
2531 int i;
2532 dhd_info_t *dhd = (dhd_info_t *)dhdp;
2533
2534 ASSERT(dhd);
2535 for (i = 0; i < DHD_MAX_IFS; i++)
2536 if (dhd->iflist[i] && dhd->iflist[i]->bssidx == idx)
2537 return dhd->iflist[i]->mac_addr;
2538
2539 return NULL;
2540 }
2541
2542 static void
2543 _dhd_set_multicast_list(dhd_info_t *dhd, int ifidx)
2544 {
2545 struct net_device *dev;
2546 struct netdev_hw_addr *ha;
2547 uint32 allmulti, cnt;
2548
2549 wl_ioctl_t ioc;
2550 char *buf, *bufp;
2551 uint buflen;
2552 int ret;
2553
2554 #ifdef MCAST_LIST_ACCUMULATION
2555 int i;
2556 uint32 cnt_iface[DHD_MAX_IFS];
2557 cnt = 0;
2558 allmulti = 0;
2559
2560 for (i = 0; i < DHD_MAX_IFS; i++) {
2561 if (dhd->iflist[i]) {
2562 dev = dhd->iflist[i]->net;
2563 if (!dev)
2564 continue;
2565 netif_addr_lock_bh(dev);
2566 cnt_iface[i] = netdev_mc_count(dev);
2567 cnt += cnt_iface[i];
2568 netif_addr_unlock_bh(dev);
2569
2570 /* Determine initial value of allmulti flag */
2571 allmulti |= (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
2572 }
2573 }
2574 #else /* !MCAST_LIST_ACCUMULATION */
2575 if (!dhd->iflist[ifidx]) {
2576 DHD_ERROR(("%s : dhd->iflist[%d] was NULL\n", __FUNCTION__, ifidx));
2577 return;
2578 }
2579 dev = dhd->iflist[ifidx]->net;
2580 if (!dev)
2581 return;
2582 netif_addr_lock_bh(dev);
2583 cnt = netdev_mc_count(dev);
2584 netif_addr_unlock_bh(dev);
2585
2586 /* Determine initial value of allmulti flag */
2587 allmulti = (dev->flags & IFF_ALLMULTI) ? TRUE : FALSE;
2588 #endif /* MCAST_LIST_ACCUMULATION */
2589
2590 #ifdef PASS_ALL_MCAST_PKTS
2591 #ifdef PKT_FILTER_SUPPORT
2592 if (!dhd->pub.early_suspended)
2593 #endif /* PKT_FILTER_SUPPORT */
2594 allmulti = TRUE;
2595 #endif /* PASS_ALL_MCAST_PKTS */
2596
2597 /* Send down the multicast list first. */
2598
2599 buflen = sizeof("mcast_list") + sizeof(cnt) + (cnt * ETHER_ADDR_LEN);
2600 if (!(bufp = buf = MALLOC(dhd->pub.osh, buflen))) {
2601 DHD_ERROR(("%s: out of memory for mcast_list, cnt %d\n",
2602 dhd_ifname(&dhd->pub, ifidx), cnt));
2603 return;
2604 }
2605
2606 strncpy(bufp, "mcast_list", buflen - 1);
2607 bufp[buflen - 1] = '\0';
2608 bufp += strlen("mcast_list") + 1;
2609
2610 cnt = htol32(cnt);
2611 memcpy(bufp, &cnt, sizeof(cnt));
2612 bufp += sizeof(cnt);
2613
2614 #ifdef MCAST_LIST_ACCUMULATION
2615 for (i = 0; i < DHD_MAX_IFS; i++) {
2616 if (dhd->iflist[i]) {
2617 DHD_TRACE(("_dhd_set_multicast_list: ifidx %d\n", i));
2618 dev = dhd->iflist[i]->net;
2619
2620 netif_addr_lock_bh(dev);
2621 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2622 #pragma GCC diagnostic push
2623 #pragma GCC diagnostic ignored "-Wcast-qual"
2624 #endif // endif
2625 netdev_for_each_mc_addr(ha, dev) {
2626 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2627 #pragma GCC diagnostic pop
2628 #endif // endif
2629 if (!cnt_iface[i])
2630 break;
2631 memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
2632 bufp += ETHER_ADDR_LEN;
2633 DHD_TRACE(("_dhd_set_multicast_list: cnt "
2634 "%d " MACDBG "\n",
2635 cnt_iface[i], MAC2STRDBG(ha->addr)));
2636 cnt_iface[i]--;
2637 }
2638 netif_addr_unlock_bh(dev);
2639 }
2640 }
2641 #else /* !MCAST_LIST_ACCUMULATION */
2642 netif_addr_lock_bh(dev);
2643 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2644 #pragma GCC diagnostic push
2645 #pragma GCC diagnostic ignored "-Wcast-qual"
2646 #endif // endif
2647 netdev_for_each_mc_addr(ha, dev) {
2648 if (!cnt)
2649 break;
2650 memcpy(bufp, ha->addr, ETHER_ADDR_LEN);
2651 bufp += ETHER_ADDR_LEN;
2652 cnt--;
2653 }
2654 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
2655 #pragma GCC diagnostic pop
2656 #endif // endif
2657 netif_addr_unlock_bh(dev);
2658 #endif /* MCAST_LIST_ACCUMULATION */
2659
2660 memset(&ioc, 0, sizeof(ioc));
2661 ioc.cmd = WLC_SET_VAR;
2662 ioc.buf = buf;
2663 ioc.len = buflen;
2664 ioc.set = TRUE;
2665
2666 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
2667 if (ret < 0) {
2668 DHD_ERROR(("%s: set mcast_list failed, cnt %d\n",
2669 dhd_ifname(&dhd->pub, ifidx), cnt));
2670 allmulti = cnt ? TRUE : allmulti;
2671 }
2672
2673 MFREE(dhd->pub.osh, buf, buflen);
2674
2675 /* Now send the allmulti setting. This is based on the setting in the
2676 * net_device flags, but might be modified above to be turned on if we
2677 * were trying to set some addresses and dongle rejected it...
2678 */
2679
2680 allmulti = htol32(allmulti);
2681 ret = dhd_iovar(&dhd->pub, ifidx, "allmulti", (char *)&allmulti,
2682 sizeof(allmulti), NULL, 0, TRUE);
2683 if (ret < 0) {
2684 DHD_ERROR(("%s: set allmulti %d failed\n",
2685 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
2686 }
2687
2688 /* Finally, pick up the PROMISC flag as well, like the NIC driver does */
2689
2690 #ifdef MCAST_LIST_ACCUMULATION
2691 allmulti = 0;
2692 for (i = 0; i < DHD_MAX_IFS; i++) {
2693 if (dhd->iflist[i]) {
2694 dev = dhd->iflist[i]->net;
2695 allmulti |= (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
2696 }
2697 }
2698 #else
2699 allmulti = (dev->flags & IFF_PROMISC) ? TRUE : FALSE;
2700 #endif /* MCAST_LIST_ACCUMULATION */
2701
2702 allmulti = htol32(allmulti);
2703
2704 memset(&ioc, 0, sizeof(ioc));
2705 ioc.cmd = WLC_SET_PROMISC;
2706 ioc.buf = &allmulti;
2707 ioc.len = sizeof(allmulti);
2708 ioc.set = TRUE;
2709
2710 ret = dhd_wl_ioctl(&dhd->pub, ifidx, &ioc, ioc.buf, ioc.len);
2711 if (ret < 0) {
2712 DHD_ERROR(("%s: set promisc %d failed\n",
2713 dhd_ifname(&dhd->pub, ifidx), ltoh32(allmulti)));
2714 }
2715 }
2716
2717 int
2718 _dhd_set_mac_address(dhd_info_t *dhd, int ifidx, uint8 *addr)
2719 {
2720 int ret;
2721
2722 ret = dhd_iovar(&dhd->pub, ifidx, "cur_etheraddr", (char *)addr,
2723 ETHER_ADDR_LEN, NULL, 0, TRUE);
2724 if (ret < 0) {
2725 DHD_ERROR(("%s: set cur_etheraddr failed\n", dhd_ifname(&dhd->pub, ifidx)));
2726 } else {
2727 memcpy(dhd->iflist[ifidx]->net->dev_addr, addr, ETHER_ADDR_LEN);
2728 if (ifidx == 0)
2729 memcpy(dhd->pub.mac.octet, addr, ETHER_ADDR_LEN);
2730 }
2731
2732 return ret;
2733 }
2734
2735 #ifdef SOFTAP
2736 extern struct net_device *ap_net_dev;
2737 extern tsk_ctl_t ap_eth_ctl; /* ap netdev heper thread ctl */
2738 #endif // endif
2739
2740 #ifdef DHD_PSTA
2741 /* Get psta/psr configuration configuration */
2742 int dhd_get_psta_mode(dhd_pub_t *dhdp)
2743 {
2744 dhd_info_t *dhd = dhdp->info;
2745 return (int)dhd->psta_mode;
2746 }
2747 /* Set psta/psr configuration configuration */
2748 int dhd_set_psta_mode(dhd_pub_t *dhdp, uint32 val)
2749 {
2750 dhd_info_t *dhd = dhdp->info;
2751 dhd->psta_mode = val;
2752 return 0;
2753 }
2754 #endif /* DHD_PSTA */
2755
2756 #if (defined(DHD_WET) || defined(DHD_MCAST_REGEN) || defined(DHD_L2_FILTER))
2757 static void
2758 dhd_update_rx_pkt_chainable_state(dhd_pub_t* dhdp, uint32 idx)
2759 {
2760 dhd_info_t *dhd = dhdp->info;
2761 dhd_if_t *ifp;
2762
2763 ASSERT(idx < DHD_MAX_IFS);
2764
2765 ifp = dhd->iflist[idx];
2766
2767 if (
2768 #ifdef DHD_L2_FILTER
2769 (ifp->block_ping) ||
2770 #endif // endif
2771 #ifdef DHD_WET
2772 (dhd->wet_mode) ||
2773 #endif // endif
2774 #ifdef DHD_MCAST_REGEN
2775 (ifp->mcast_regen_bss_enable) ||
2776 #endif // endif
2777 FALSE) {
2778 ifp->rx_pkt_chainable = FALSE;
2779 }
2780 }
2781 #endif /* DHD_WET || DHD_MCAST_REGEN || DHD_L2_FILTER */
2782
2783 #ifdef DHD_WET
2784 /* Get wet configuration configuration */
2785 int dhd_get_wet_mode(dhd_pub_t *dhdp)
2786 {
2787 dhd_info_t *dhd = dhdp->info;
2788 return (int)dhd->wet_mode;
2789 }
2790
2791 /* Set wet configuration configuration */
2792 int dhd_set_wet_mode(dhd_pub_t *dhdp, uint32 val)
2793 {
2794 dhd_info_t *dhd = dhdp->info;
2795 dhd->wet_mode = val;
2796 dhd_update_rx_pkt_chainable_state(dhdp, 0);
2797 return 0;
2798 }
2799 #endif /* DHD_WET */
2800
2801 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
2802 int32 dhd_role_to_nl80211_iftype(int32 role)
2803 {
2804 switch (role) {
2805 case WLC_E_IF_ROLE_STA:
2806 return NL80211_IFTYPE_STATION;
2807 case WLC_E_IF_ROLE_AP:
2808 return NL80211_IFTYPE_AP;
2809 case WLC_E_IF_ROLE_WDS:
2810 return NL80211_IFTYPE_WDS;
2811 case WLC_E_IF_ROLE_P2P_GO:
2812 return NL80211_IFTYPE_P2P_GO;
2813 case WLC_E_IF_ROLE_P2P_CLIENT:
2814 return NL80211_IFTYPE_P2P_CLIENT;
2815 case WLC_E_IF_ROLE_IBSS:
2816 case WLC_E_IF_ROLE_NAN:
2817 return NL80211_IFTYPE_ADHOC;
2818 default:
2819 return NL80211_IFTYPE_UNSPECIFIED;
2820 }
2821 }
2822 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
2823
2824 static void
2825 dhd_ifadd_event_handler(void *handle, void *event_info, u8 event)
2826 {
2827 dhd_info_t *dhd = handle;
2828 dhd_if_event_t *if_event = event_info;
2829 int ifidx, bssidx;
2830 int ret;
2831 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
2832 struct wl_if_event_info info;
2833 #else
2834 struct net_device *ndev;
2835 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
2836
2837 BCM_REFERENCE(ret);
2838 if (event != DHD_WQ_WORK_IF_ADD) {
2839 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
2840 return;
2841 }
2842
2843 if (!dhd) {
2844 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
2845 return;
2846 }
2847
2848 if (!if_event) {
2849 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
2850 return;
2851 }
2852
2853 dhd_net_if_lock_local(dhd);
2854 DHD_OS_WAKE_LOCK(&dhd->pub);
2855 DHD_PERIM_LOCK(&dhd->pub);
2856
2857 ifidx = if_event->event.ifidx;
2858 bssidx = if_event->event.bssidx;
2859 DHD_TRACE(("%s: registering if with ifidx %d\n", __FUNCTION__, ifidx));
2860
2861 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
2862 if (if_event->event.ifidx > 0) {
2863 u8 *mac_addr;
2864 bzero(&info, sizeof(info));
2865 info.ifidx = ifidx;
2866 info.bssidx = bssidx;
2867 info.role = if_event->event.role;
2868 strncpy(info.name, if_event->name, IFNAMSIZ);
2869 if (is_valid_ether_addr(if_event->mac)) {
2870 mac_addr = if_event->mac;
2871 } else {
2872 mac_addr = NULL;
2873 }
2874
2875 if (wl_cfg80211_post_ifcreate(dhd->pub.info->iflist[0]->net,
2876 &info, mac_addr, NULL, true) == NULL) {
2877 /* Do the post interface create ops */
2878 DHD_ERROR(("Post ifcreate ops failed. Returning \n"));
2879 goto done;
2880 }
2881 }
2882 #else
2883 /* This path is for non-android case */
2884 /* The interface name in host and in event msg are same */
2885 /* if name in event msg is used to create dongle if list on host */
2886 ndev = dhd_allocate_if(&dhd->pub, ifidx, if_event->name,
2887 if_event->mac, bssidx, TRUE, if_event->name);
2888 if (!ndev) {
2889 DHD_ERROR(("%s: net device alloc failed \n", __FUNCTION__));
2890 goto done;
2891 }
2892
2893 DHD_PERIM_UNLOCK(&dhd->pub);
2894 ret = dhd_register_if(&dhd->pub, ifidx, TRUE);
2895 DHD_PERIM_LOCK(&dhd->pub);
2896 if (ret != BCME_OK) {
2897 DHD_ERROR(("%s: dhd_register_if failed\n", __FUNCTION__));
2898 dhd_remove_if(&dhd->pub, ifidx, TRUE);
2899 goto done;
2900 }
2901 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
2902
2903 #ifndef PCIE_FULL_DONGLE
2904 /* Turn on AP isolation in the firmware for interfaces operating in AP mode */
2905 if (FW_SUPPORTED((&dhd->pub), ap) && (if_event->event.role != WLC_E_IF_ROLE_STA)) {
2906 uint32 var_int = 1;
2907 ret = dhd_iovar(&dhd->pub, ifidx, "ap_isolate", (char *)&var_int, sizeof(var_int),
2908 NULL, 0, TRUE);
2909 if (ret != BCME_OK) {
2910 DHD_ERROR(("%s: Failed to set ap_isolate to dongle\n", __FUNCTION__));
2911 dhd_remove_if(&dhd->pub, ifidx, TRUE);
2912 }
2913 }
2914 #endif /* PCIE_FULL_DONGLE */
2915
2916 done:
2917 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
2918
2919 DHD_PERIM_UNLOCK(&dhd->pub);
2920 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2921 dhd_net_if_unlock_local(dhd);
2922 }
2923
2924 static void
2925 dhd_ifdel_event_handler(void *handle, void *event_info, u8 event)
2926 {
2927 dhd_info_t *dhd = handle;
2928 int ifidx;
2929 dhd_if_event_t *if_event = event_info;
2930
2931 if (event != DHD_WQ_WORK_IF_DEL) {
2932 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
2933 return;
2934 }
2935
2936 if (!dhd) {
2937 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
2938 return;
2939 }
2940
2941 if (!if_event) {
2942 DHD_ERROR(("%s: event data is null \n", __FUNCTION__));
2943 return;
2944 }
2945
2946 dhd_net_if_lock_local(dhd);
2947 DHD_OS_WAKE_LOCK(&dhd->pub);
2948 DHD_PERIM_LOCK(&dhd->pub);
2949
2950 ifidx = if_event->event.ifidx;
2951 DHD_TRACE(("Removing interface with idx %d\n", ifidx));
2952
2953 DHD_PERIM_UNLOCK(&dhd->pub);
2954 if (!dhd->pub.info->iflist[ifidx]) {
2955 /* No matching netdev found */
2956 DHD_ERROR(("Netdev not found! Do nothing.\n"));
2957 goto done;
2958 }
2959 #if defined(WL_CFG80211) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0))
2960 if (if_event->event.ifidx > 0) {
2961 /* Do the post interface del ops */
2962 if (wl_cfg80211_post_ifdel(dhd->pub.info->iflist[ifidx]->net,
2963 true, if_event->event.ifidx) != 0) {
2964 DHD_TRACE(("Post ifdel ops failed. Returning \n"));
2965 goto done;
2966 }
2967 }
2968 #else
2969 /* For non-cfg80211 drivers */
2970 dhd_remove_if(&dhd->pub, ifidx, TRUE);
2971 #endif /* WL_CFG80211 && LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0) */
2972
2973 done:
2974 DHD_PERIM_LOCK(&dhd->pub);
2975 MFREE(dhd->pub.osh, if_event, sizeof(dhd_if_event_t));
2976 DHD_PERIM_UNLOCK(&dhd->pub);
2977 DHD_OS_WAKE_UNLOCK(&dhd->pub);
2978 dhd_net_if_unlock_local(dhd);
2979 }
2980
2981 #ifndef DHD_DIRECT_SET_MAC
2982 static void
2983 dhd_set_mac_addr_handler(void *handle, void *event_info, u8 event)
2984 {
2985 dhd_info_t *dhd = handle;
2986 dhd_if_t *ifp = event_info;
2987
2988 if (event != DHD_WQ_WORK_SET_MAC) {
2989 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
2990 }
2991
2992 if (!dhd) {
2993 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
2994 return;
2995 }
2996
2997 dhd_net_if_lock_local(dhd);
2998 DHD_OS_WAKE_LOCK(&dhd->pub);
2999 DHD_PERIM_LOCK(&dhd->pub);
3000
3001 #ifdef SOFTAP
3002 {
3003 unsigned long flags;
3004 bool in_ap = FALSE;
3005 DHD_GENERAL_LOCK(&dhd->pub, flags);
3006 in_ap = (ap_net_dev != NULL);
3007 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3008
3009 if (in_ap) {
3010 DHD_ERROR(("attempt to set MAC for %s in AP Mode, blocked. \n",
3011 ifp->net->name));
3012 goto done;
3013 }
3014 }
3015 #endif /* SOFTAP */
3016
3017 if (ifp == NULL || !dhd->pub.up) {
3018 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
3019 goto done;
3020 }
3021
3022 DHD_ERROR(("%s: MACID is overwritten\n", __FUNCTION__));
3023 ifp->set_macaddress = FALSE;
3024 if (_dhd_set_mac_address(dhd, ifp->idx, ifp->mac_addr) == 0)
3025 DHD_INFO(("%s: MACID is overwritten\n", __FUNCTION__));
3026 else
3027 DHD_ERROR(("%s: _dhd_set_mac_address() failed\n", __FUNCTION__));
3028
3029 done:
3030 DHD_PERIM_UNLOCK(&dhd->pub);
3031 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3032 dhd_net_if_unlock_local(dhd);
3033 }
3034 #endif /* DHD_DIRECT_SET_MAC */
3035
3036 static void
3037 dhd_set_mcast_list_handler(void *handle, void *event_info, u8 event)
3038 {
3039 dhd_info_t *dhd = handle;
3040 int ifidx = (int)((long int)event_info);
3041 dhd_if_t *ifp = NULL;
3042
3043 if (event != DHD_WQ_WORK_SET_MCAST_LIST) {
3044 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
3045 return;
3046 }
3047
3048 if (!dhd) {
3049 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
3050 return;
3051 }
3052
3053 dhd_net_if_lock_local(dhd);
3054 DHD_OS_WAKE_LOCK(&dhd->pub);
3055 DHD_PERIM_LOCK(&dhd->pub);
3056
3057 ifp = dhd->iflist[ifidx];
3058
3059 if (ifp == NULL || !dhd->pub.up) {
3060 DHD_ERROR(("%s: interface info not available/down \n", __FUNCTION__));
3061 goto done;
3062 }
3063
3064 #ifdef SOFTAP
3065 {
3066 bool in_ap = FALSE;
3067 unsigned long flags;
3068 DHD_GENERAL_LOCK(&dhd->pub, flags);
3069 in_ap = (ap_net_dev != NULL);
3070 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3071
3072 if (in_ap) {
3073 DHD_ERROR(("set MULTICAST list for %s in AP Mode, blocked. \n",
3074 ifp->net->name));
3075 ifp->set_multicast = FALSE;
3076 goto done;
3077 }
3078 }
3079 #endif /* SOFTAP */
3080
3081 ifidx = ifp->idx;
3082
3083 #ifdef MCAST_LIST_ACCUMULATION
3084 ifidx = 0;
3085 #endif /* MCAST_LIST_ACCUMULATION */
3086
3087 _dhd_set_multicast_list(dhd, ifidx);
3088 DHD_INFO(("%s: set multicast list for if %d\n", __FUNCTION__, ifidx));
3089
3090 done:
3091 DHD_PERIM_UNLOCK(&dhd->pub);
3092 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3093 dhd_net_if_unlock_local(dhd);
3094 }
3095
3096 static int
3097 dhd_set_mac_address(struct net_device *dev, void *addr)
3098 {
3099 int ret = 0;
3100
3101 dhd_info_t *dhd = DHD_DEV_INFO(dev);
3102 struct sockaddr *sa = (struct sockaddr *)addr;
3103 int ifidx;
3104 dhd_if_t *dhdif;
3105
3106 ifidx = dhd_net2idx(dhd, dev);
3107 if (ifidx == DHD_BAD_IF)
3108 return -1;
3109
3110 dhdif = dhd->iflist[ifidx];
3111
3112 dhd_net_if_lock_local(dhd);
3113 memcpy(dhdif->mac_addr, sa->sa_data, ETHER_ADDR_LEN);
3114 dhdif->set_macaddress = TRUE;
3115 dhd_net_if_unlock_local(dhd);
3116 #ifdef DHD_DIRECT_SET_MAC
3117 /* It needs to update new mac address on this context */
3118 ret = _dhd_set_mac_address(dhd, ifidx, dhdif->mac_addr);
3119 dhdif->set_macaddress = FALSE;
3120 #else
3121 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)dhdif, DHD_WQ_WORK_SET_MAC,
3122 dhd_set_mac_addr_handler, DHD_WQ_WORK_PRIORITY_LOW);
3123 #endif // endif
3124 return ret;
3125 }
3126
3127 static void
3128 dhd_set_multicast_list(struct net_device *dev)
3129 {
3130 dhd_info_t *dhd = DHD_DEV_INFO(dev);
3131 int ifidx;
3132
3133 ifidx = dhd_net2idx(dhd, dev);
3134 if (ifidx == DHD_BAD_IF)
3135 return;
3136
3137 dhd->iflist[ifidx]->set_multicast = TRUE;
3138 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)((long int)ifidx),
3139 DHD_WQ_WORK_SET_MCAST_LIST, dhd_set_mcast_list_handler, DHD_WQ_WORK_PRIORITY_LOW);
3140 }
3141
3142 #ifdef DHD_UCODE_DOWNLOAD
3143 /* Get ucode path */
3144 char *
3145 dhd_get_ucode_path(dhd_pub_t *dhdp)
3146 {
3147 dhd_info_t *dhd = dhdp->info;
3148 return dhd->uc_path;
3149 }
3150 #endif /* DHD_UCODE_DOWNLOAD */
3151
3152 #ifdef PROP_TXSTATUS
3153 int
3154 dhd_os_wlfc_block(dhd_pub_t *pub)
3155 {
3156 dhd_info_t *di = (dhd_info_t *)(pub->info);
3157 ASSERT(di != NULL);
3158 spin_lock_bh(&di->wlfc_spinlock);
3159 return 1;
3160 }
3161
3162 int
3163 dhd_os_wlfc_unblock(dhd_pub_t *pub)
3164 {
3165 dhd_info_t *di = (dhd_info_t *)(pub->info);
3166
3167 ASSERT(di != NULL);
3168 spin_unlock_bh(&di->wlfc_spinlock);
3169 return 1;
3170 }
3171
3172 #endif /* PROP_TXSTATUS */
3173
3174 /* This routine do not support Packet chain feature, Currently tested for
3175 * proxy arp feature
3176 */
3177 int dhd_sendup(dhd_pub_t *dhdp, int ifidx, void *p)
3178 {
3179 struct sk_buff *skb;
3180 void *skbhead = NULL;
3181 void *skbprev = NULL;
3182 dhd_if_t *ifp;
3183 ASSERT(!PKTISCHAINED(p));
3184 skb = PKTTONATIVE(dhdp->osh, p);
3185
3186 ifp = dhdp->info->iflist[ifidx];
3187 skb->dev = ifp->net;
3188
3189 skb->protocol = eth_type_trans(skb, skb->dev);
3190
3191 if (in_interrupt()) {
3192 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
3193 __FUNCTION__, __LINE__);
3194 netif_rx(skb);
3195 } else {
3196 if (dhdp->info->rxthread_enabled) {
3197 if (!skbhead) {
3198 skbhead = skb;
3199 } else {
3200 PKTSETNEXT(dhdp->osh, skbprev, skb);
3201 }
3202 skbprev = skb;
3203 } else {
3204 /* If the receive is not processed inside an ISR,
3205 * the softirqd must be woken explicitly to service
3206 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
3207 * by netif_rx_ni(), but in earlier kernels, we need
3208 * to do it manually.
3209 */
3210 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
3211 __FUNCTION__, __LINE__);
3212 netif_rx_ni(skb);
3213 }
3214 }
3215
3216 if (dhdp->info->rxthread_enabled && skbhead)
3217 dhd_sched_rxf(dhdp, skbhead);
3218
3219 return BCME_OK;
3220 }
3221
3222 int BCMFASTPATH
3223 __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
3224 {
3225 int ret = BCME_OK;
3226 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
3227 struct ether_header *eh = NULL;
3228 bool pkt_ether_type_802_1x = FALSE;
3229 uint8 pkt_flow_prio;
3230
3231 #if defined(DHD_L2_FILTER)
3232 dhd_if_t *ifp = dhd_get_ifp(dhdp, ifidx);
3233 #endif // endif
3234
3235 /* Reject if down */
3236 if (!dhdp->up || (dhdp->busstate == DHD_BUS_DOWN)) {
3237 /* free the packet here since the caller won't */
3238 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3239 return -ENODEV;
3240 }
3241
3242 #ifdef PCIE_FULL_DONGLE
3243 if (dhdp->busstate == DHD_BUS_SUSPEND) {
3244 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
3245 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3246 return NETDEV_TX_BUSY;
3247 }
3248 #endif /* PCIE_FULL_DONGLE */
3249
3250 /* Reject if pktlen > MAX_MTU_SZ */
3251 if (PKTLEN(dhdp->osh, pktbuf) > MAX_MTU_SZ) {
3252 /* free the packet here since the caller won't */
3253 dhdp->tx_big_packets++;
3254 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3255 return BCME_ERROR;
3256 }
3257
3258 #ifdef DHD_L2_FILTER
3259 /* if dhcp_unicast is enabled, we need to convert the */
3260 /* broadcast DHCP ACK/REPLY packets to Unicast. */
3261 if (ifp->dhcp_unicast) {
3262 uint8* mac_addr;
3263 uint8* ehptr = NULL;
3264 int ret;
3265 ret = bcm_l2_filter_get_mac_addr_dhcp_pkt(dhdp->osh, pktbuf, ifidx, &mac_addr);
3266 if (ret == BCME_OK) {
3267 /* if given mac address having valid entry in sta list
3268 * copy the given mac address, and return with BCME_OK
3269 */
3270 if (dhd_find_sta(dhdp, ifidx, mac_addr)) {
3271 ehptr = PKTDATA(dhdp->osh, pktbuf);
3272 bcopy(mac_addr, ehptr + ETHER_DEST_OFFSET, ETHER_ADDR_LEN);
3273 }
3274 }
3275 }
3276
3277 if (ifp->grat_arp && DHD_IF_ROLE_AP(dhdp, ifidx)) {
3278 if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
3279 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3280 return BCME_ERROR;
3281 }
3282 }
3283
3284 if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
3285 ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, TRUE);
3286
3287 /* Drop the packets if l2 filter has processed it already
3288 * otherwise continue with the normal path
3289 */
3290 if (ret == BCME_OK) {
3291 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3292 return BCME_ERROR;
3293 }
3294 }
3295 #endif /* DHD_L2_FILTER */
3296 /* Update multicast statistic */
3297 if (PKTLEN(dhdp->osh, pktbuf) >= ETHER_HDR_LEN) {
3298 uint8 *pktdata = (uint8 *)PKTDATA(dhdp->osh, pktbuf);
3299 eh = (struct ether_header *)pktdata;
3300
3301 if (ETHER_ISMULTI(eh->ether_dhost))
3302 dhdp->tx_multicast++;
3303 if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X) {
3304 #ifdef DHD_LOSSLESS_ROAMING
3305 uint8 prio = (uint8)PKTPRIO(pktbuf);
3306
3307 /* back up 802.1x's priority */
3308 dhdp->prio_8021x = prio;
3309 #endif /* DHD_LOSSLESS_ROAMING */
3310 pkt_ether_type_802_1x = TRUE;
3311 DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_TRANSMIT_REQUESTED);
3312 atomic_inc(&dhd->pend_8021x_cnt);
3313 #if defined(WL_CFG80211) && defined(WL_WPS_SYNC)
3314 wl_handle_wps_states(dhd_idx2net(dhdp, ifidx),
3315 pktdata, PKTLEN(dhdp->osh, pktbuf), TRUE);
3316 #endif /* WL_CFG80211 && WL_WPS_SYNC */
3317 }
3318 dhd_dump_pkt(dhdp, ifidx, pktdata,
3319 (uint32)PKTLEN(dhdp->osh, pktbuf), TRUE, NULL, NULL);
3320 } else {
3321 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3322 return BCME_ERROR;
3323 }
3324
3325 {
3326 /* Look into the packet and update the packet priority */
3327 #ifndef PKTPRIO_OVERRIDE
3328 if (PKTPRIO(pktbuf) == 0)
3329 #endif /* !PKTPRIO_OVERRIDE */
3330 {
3331 #if defined(QOS_MAP_SET)
3332 pktsetprio_qms(pktbuf, wl_get_up_table(dhdp, ifidx), FALSE);
3333 #else
3334 pktsetprio(pktbuf, FALSE);
3335 #endif /* QOS_MAP_SET */
3336 }
3337 #ifndef PKTPRIO_OVERRIDE
3338 else {
3339 /* Some protocols like OZMO use priority values from 256..263.
3340 * these are magic values to indicate a specific 802.1d priority.
3341 * make sure that priority field is in range of 0..7
3342 */
3343 PKTSETPRIO(pktbuf, PKTPRIO(pktbuf) & 0x7);
3344 }
3345 #endif /* !PKTPRIO_OVERRIDE */
3346 }
3347
3348 BCM_REFERENCE(pkt_ether_type_802_1x);
3349 BCM_REFERENCE(pkt_flow_prio);
3350
3351 #ifdef SUPPORT_SET_TID
3352 dhd_set_tid_based_on_uid(dhdp, pktbuf);
3353 #endif /* SUPPORT_SET_TID */
3354
3355 #ifdef PCIE_FULL_DONGLE
3356 /*
3357 * Lkup the per interface hash table, for a matching flowring. If one is not
3358 * available, allocate a unique flowid and add a flowring entry.
3359 * The found or newly created flowid is placed into the pktbuf's tag.
3360 */
3361
3362 #ifdef DHD_LOSSLESS_ROAMING
3363 /* For LLR override and use flowring with prio 7 for 802.1x packets */
3364 if (pkt_ether_type_802_1x) {
3365 pkt_flow_prio = PRIO_8021D_NC;
3366 } else
3367 #endif /* DHD_LOSSLESS_ROAMING */
3368 {
3369 pkt_flow_prio = dhdp->flow_prio_map[(PKTPRIO(pktbuf))];
3370 }
3371
3372 ret = dhd_flowid_update(dhdp, ifidx, pkt_flow_prio, pktbuf);
3373 if (ret != BCME_OK) {
3374 if (ntoh16(eh->ether_type) == ETHER_TYPE_802_1X) {
3375 atomic_dec(&dhd->pend_8021x_cnt);
3376 }
3377 PKTCFREE(dhd->pub.osh, pktbuf, TRUE);
3378 return ret;
3379 }
3380 #endif /* PCIE_FULL_DONGLE */
3381
3382 #ifdef PROP_TXSTATUS
3383 if (dhd_wlfc_is_supported(dhdp)) {
3384 /* store the interface ID */
3385 DHD_PKTTAG_SETIF(PKTTAG(pktbuf), ifidx);
3386
3387 /* store destination MAC in the tag as well */
3388 DHD_PKTTAG_SETDSTN(PKTTAG(pktbuf), eh->ether_dhost);
3389
3390 /* decide which FIFO this packet belongs to */
3391 if (ETHER_ISMULTI(eh->ether_dhost))
3392 /* one additional queue index (highest AC + 1) is used for bc/mc queue */
3393 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), AC_COUNT);
3394 else
3395 DHD_PKTTAG_SETFIFO(PKTTAG(pktbuf), WME_PRIO2AC(PKTPRIO(pktbuf)));
3396 } else
3397 #endif /* PROP_TXSTATUS */
3398 {
3399 /* If the protocol uses a data header, apply it */
3400 dhd_prot_hdrpush(dhdp, ifidx, pktbuf);
3401 }
3402
3403 /* Use bus module to send data frame */
3404 #ifdef PROP_TXSTATUS
3405 {
3406 if (dhd_wlfc_commit_packets(dhdp, (f_commitpkt_t)dhd_bus_txdata,
3407 dhdp->bus, pktbuf, TRUE) == WLFC_UNSUPPORTED) {
3408 /* non-proptxstatus way */
3409 #ifdef BCMPCIE
3410 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
3411 #else
3412 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
3413 #endif /* BCMPCIE */
3414 }
3415 }
3416 #else
3417 #ifdef BCMPCIE
3418 ret = dhd_bus_txdata(dhdp->bus, pktbuf, (uint8)ifidx);
3419 #else
3420 ret = dhd_bus_txdata(dhdp->bus, pktbuf);
3421 #endif /* BCMPCIE */
3422 #endif /* PROP_TXSTATUS */
3423
3424 return ret;
3425 }
3426
3427 int BCMFASTPATH
3428 dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
3429 {
3430 int ret = 0;
3431 unsigned long flags;
3432 dhd_if_t *ifp;
3433
3434 DHD_GENERAL_LOCK(dhdp, flags);
3435 ifp = dhd_get_ifp(dhdp, ifidx);
3436 if (!ifp || ifp->del_in_progress) {
3437 DHD_ERROR(("%s: ifp:%p del_in_progress:%d\n",
3438 __FUNCTION__, ifp, ifp ? ifp->del_in_progress : 0));
3439 DHD_GENERAL_UNLOCK(dhdp, flags);
3440 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3441 return -ENODEV;
3442 }
3443 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
3444 DHD_ERROR(("%s: returning as busstate=%d\n",
3445 __FUNCTION__, dhdp->busstate));
3446 DHD_GENERAL_UNLOCK(dhdp, flags);
3447 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3448 return -ENODEV;
3449 }
3450 DHD_IF_SET_TX_ACTIVE(ifp, DHD_TX_SEND_PKT);
3451 DHD_BUS_BUSY_SET_IN_SEND_PKT(dhdp);
3452 DHD_GENERAL_UNLOCK(dhdp, flags);
3453
3454 #ifdef DHD_PCIE_RUNTIMEPM
3455 if (dhdpcie_runtime_bus_wake(dhdp, FALSE, __builtin_return_address(0))) {
3456 DHD_ERROR(("%s : pcie is still in suspend state!!\n", __FUNCTION__));
3457 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3458 ret = -EBUSY;
3459 goto exit;
3460 }
3461 #endif /* DHD_PCIE_RUNTIMEPM */
3462
3463 DHD_GENERAL_LOCK(dhdp, flags);
3464 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
3465 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
3466 __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
3467 DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp);
3468 DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_SEND_PKT);
3469 dhd_os_tx_completion_wake(dhdp);
3470 dhd_os_busbusy_wake(dhdp);
3471 DHD_GENERAL_UNLOCK(dhdp, flags);
3472 PKTCFREE(dhdp->osh, pktbuf, TRUE);
3473 return -ENODEV;
3474 }
3475 DHD_GENERAL_UNLOCK(dhdp, flags);
3476
3477 ret = __dhd_sendpkt(dhdp, ifidx, pktbuf);
3478
3479 #ifdef DHD_PCIE_RUNTIMEPM
3480 exit:
3481 #endif // endif
3482 DHD_GENERAL_LOCK(dhdp, flags);
3483 DHD_BUS_BUSY_CLEAR_IN_SEND_PKT(dhdp);
3484 DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_SEND_PKT);
3485 dhd_os_tx_completion_wake(dhdp);
3486 dhd_os_busbusy_wake(dhdp);
3487 DHD_GENERAL_UNLOCK(dhdp, flags);
3488 return ret;
3489 }
3490
3491 #ifndef DHD_MONITOR_INTERFACE
3492 static
3493 #endif /* DHD_MONITOR_INTERFACE */
3494 #ifdef CFI_CHECK
3495 netdev_tx_t BCMFASTPATH
3496 #else /* CFI_CHECK */
3497 int BCMFASTPATH
3498 #endif /* CFI_CHECK */
3499 dhd_start_xmit(struct sk_buff *skb, struct net_device *net)
3500 {
3501 int ret;
3502 uint datalen;
3503 void *pktbuf;
3504 dhd_info_t *dhd = DHD_DEV_INFO(net);
3505 dhd_if_t *ifp = NULL;
3506 int ifidx;
3507 unsigned long flags;
3508 uint8 htsfdlystat_sz = 0;
3509
3510 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3511
3512 if (dhd_query_bus_erros(&dhd->pub)) {
3513 #ifdef CFI_CHECK
3514 return NETDEV_TX_BUSY;
3515 #else
3516 return -ENODEV;
3517 #endif /* CFI_CHECK */
3518 }
3519
3520 DHD_GENERAL_LOCK(&dhd->pub, flags);
3521 DHD_BUS_BUSY_SET_IN_TX(&dhd->pub);
3522 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3523
3524 #ifdef DHD_PCIE_RUNTIMEPM
3525 if (dhdpcie_runtime_bus_wake(&dhd->pub, FALSE, dhd_start_xmit)) {
3526 /* In order to avoid pkt loss. Return NETDEV_TX_BUSY until run-time resumed. */
3527 /* stop the network queue temporarily until resume done */
3528 DHD_GENERAL_LOCK(&dhd->pub, flags);
3529 if (!dhdpcie_is_resume_done(&dhd->pub)) {
3530 dhd_bus_stop_queue(dhd->pub.bus);
3531 }
3532 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
3533 dhd_os_busbusy_wake(&dhd->pub);
3534 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3535 return NETDEV_TX_BUSY;
3536 }
3537 #endif /* DHD_PCIE_RUNTIMEPM */
3538
3539 DHD_GENERAL_LOCK(&dhd->pub, flags);
3540 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(&dhd->pub)) {
3541 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state!!\n",
3542 __FUNCTION__, dhd->pub.busstate, dhd->pub.dhd_bus_busy_state));
3543 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
3544 #ifdef PCIE_FULL_DONGLE
3545 /* Stop tx queues if suspend is in progress */
3546 if (DHD_BUS_CHECK_ANY_SUSPEND_IN_PROGRESS(&dhd->pub)) {
3547 dhd_bus_stop_queue(dhd->pub.bus);
3548 }
3549 #endif /* PCIE_FULL_DONGLE */
3550 dhd_os_busbusy_wake(&dhd->pub);
3551 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3552 return NETDEV_TX_BUSY;
3553 }
3554
3555 DHD_OS_WAKE_LOCK(&dhd->pub);
3556 DHD_PERIM_LOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
3557
3558 #if defined(DHD_HANG_SEND_UP_TEST)
3559 if (dhd->pub.req_hang_type == HANG_REASON_BUS_DOWN) {
3560 DHD_ERROR(("%s: making DHD_BUS_DOWN\n", __FUNCTION__));
3561 dhd->pub.busstate = DHD_BUS_DOWN;
3562 }
3563 #endif /* DHD_HANG_SEND_UP_TEST */
3564
3565 /* Reject if down */
3566 if (dhd->pub.hang_was_sent || DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub)) {
3567 DHD_ERROR(("%s: xmit rejected pub.up=%d busstate=%d \n",
3568 __FUNCTION__, dhd->pub.up, dhd->pub.busstate));
3569 netif_stop_queue(net);
3570 #if defined(OEM_ANDROID)
3571 /* Send Event when bus down detected during data session */
3572 if (dhd->pub.up && !dhd->pub.hang_was_sent) {
3573 DHD_ERROR(("%s: Event HANG sent up\n", __FUNCTION__));
3574 dhd->pub.hang_reason = HANG_REASON_BUS_DOWN;
3575 net_os_send_hang_message(net);
3576 }
3577 #endif /* OEM_ANDROID */
3578 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
3579 dhd_os_busbusy_wake(&dhd->pub);
3580 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3581 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
3582 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3583 return NETDEV_TX_BUSY;
3584 }
3585
3586 ifp = DHD_DEV_IFP(net);
3587 ifidx = DHD_DEV_IFIDX(net);
3588 if (!ifp || (ifidx == DHD_BAD_IF) ||
3589 ifp->del_in_progress) {
3590 DHD_ERROR(("%s: ifidx %d ifp:%p del_in_progress:%d\n",
3591 __FUNCTION__, ifidx, ifp, (ifp ? ifp->del_in_progress : 0)));
3592 netif_stop_queue(net);
3593 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
3594 dhd_os_busbusy_wake(&dhd->pub);
3595 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3596 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
3597 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3598 return NETDEV_TX_BUSY;
3599 }
3600
3601 DHD_IF_SET_TX_ACTIVE(ifp, DHD_TX_START_XMIT);
3602 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3603
3604 ASSERT(ifidx == dhd_net2idx(dhd, net));
3605 ASSERT((ifp != NULL) && ((ifidx < DHD_MAX_IFS) && (ifp == dhd->iflist[ifidx])));
3606
3607 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
3608
3609 /* re-align socket buffer if "skb->data" is odd address */
3610 if (((unsigned long)(skb->data)) & 0x1) {
3611 unsigned char *data = skb->data;
3612 uint32 length = skb->len;
3613 PKTPUSH(dhd->pub.osh, skb, 1);
3614 memmove(skb->data, data, length);
3615 PKTSETLEN(dhd->pub.osh, skb, length);
3616 }
3617
3618 datalen = PKTLEN(dhd->pub.osh, skb);
3619
3620 /* Make sure there's enough room for any header */
3621 if (skb_headroom(skb) < dhd->pub.hdrlen + htsfdlystat_sz) {
3622 struct sk_buff *skb2;
3623
3624 DHD_INFO(("%s: insufficient headroom\n",
3625 dhd_ifname(&dhd->pub, ifidx)));
3626 dhd->pub.tx_realloc++;
3627
3628 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
3629 skb2 = skb_realloc_headroom(skb, dhd->pub.hdrlen + htsfdlystat_sz);
3630
3631 dev_kfree_skb(skb);
3632 if ((skb = skb2) == NULL) {
3633 DHD_ERROR(("%s: skb_realloc_headroom failed\n",
3634 dhd_ifname(&dhd->pub, ifidx)));
3635 ret = -ENOMEM;
3636 goto done;
3637 }
3638 bcm_object_trace_opr(skb, BCM_OBJDBG_ADD_PKT, __FUNCTION__, __LINE__);
3639 }
3640
3641 /* Convert to packet */
3642 if (!(pktbuf = PKTFRMNATIVE(dhd->pub.osh, skb))) {
3643 DHD_ERROR(("%s: PKTFRMNATIVE failed\n",
3644 dhd_ifname(&dhd->pub, ifidx)));
3645 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE, __FUNCTION__, __LINE__);
3646 dev_kfree_skb_any(skb);
3647 ret = -ENOMEM;
3648 goto done;
3649 }
3650
3651 #ifdef DHD_WET
3652 /* wet related packet proto manipulation should be done in DHD
3653 since dongle doesn't have complete payload
3654 */
3655 if (WET_ENABLED(&dhd->pub) &&
3656 (dhd_wet_send_proc(dhd->pub.wet_info, pktbuf, &pktbuf) < 0)) {
3657 DHD_INFO(("%s:%s: wet send proc failed\n",
3658 __FUNCTION__, dhd_ifname(&dhd->pub, ifidx)));
3659 PKTFREE(dhd->pub.osh, pktbuf, FALSE);
3660 ret = -EFAULT;
3661 goto done;
3662 }
3663 #endif /* DHD_WET */
3664
3665 #ifdef DHD_PSTA
3666 /* PSR related packet proto manipulation should be done in DHD
3667 * since dongle doesn't have complete payload
3668 */
3669 if (PSR_ENABLED(&dhd->pub) &&
3670 (dhd_psta_proc(&dhd->pub, ifidx, &pktbuf, TRUE) < 0)) {
3671
3672 DHD_ERROR(("%s:%s: psta send proc failed\n", __FUNCTION__,
3673 dhd_ifname(&dhd->pub, ifidx)));
3674 }
3675 #endif /* DHD_PSTA */
3676 #ifdef CONFIG_ARCH_MSM
3677 #if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0)
3678 if (skb->sk) {
3679 sk_pacing_shift_update(skb->sk, 8);
3680 }
3681 #endif /* LINUX_VERSION_CODE >= 4.16.0 */
3682 #endif /* CONFIG_ARCH_MSM */
3683 #ifdef DHDTCPSYNC_FLOOD_BLK
3684 if (dhd_tcpdata_get_flag(&dhd->pub, pktbuf) == FLAG_SYNCACK) {
3685 ifp->tsyncack_txed ++;
3686 }
3687 #endif /* DHDTCPSYNC_FLOOD_BLK */
3688
3689 #ifdef DHDTCPACK_SUPPRESS
3690 if (dhd->pub.tcpack_sup_mode == TCPACK_SUP_HOLD) {
3691 /* If this packet has been hold or got freed, just return */
3692 if (dhd_tcpack_hold(&dhd->pub, pktbuf, ifidx)) {
3693 ret = 0;
3694 goto done;
3695 }
3696 } else {
3697 /* If this packet has replaced another packet and got freed, just return */
3698 if (dhd_tcpack_suppress(&dhd->pub, pktbuf)) {
3699 ret = 0;
3700 goto done;
3701 }
3702 }
3703 #endif /* DHDTCPACK_SUPPRESS */
3704
3705 /*
3706 * If Load Balance is enabled queue the packet
3707 * else send directly from here.
3708 */
3709 #if defined(DHD_LB_TXP)
3710 ret = dhd_lb_sendpkt(dhd, net, ifidx, pktbuf);
3711 #else
3712 ret = __dhd_sendpkt(&dhd->pub, ifidx, pktbuf);
3713 #endif // endif
3714
3715 done:
3716 if (ret) {
3717 ifp->stats.tx_dropped++;
3718 dhd->pub.tx_dropped++;
3719 } else {
3720 #ifdef PROP_TXSTATUS
3721 /* tx_packets counter can counted only when wlfc is disabled */
3722 if (!dhd_wlfc_is_supported(&dhd->pub))
3723 #endif // endif
3724 {
3725 dhd->pub.tx_packets++;
3726 ifp->stats.tx_packets++;
3727 ifp->stats.tx_bytes += datalen;
3728 }
3729 }
3730
3731 DHD_GENERAL_LOCK(&dhd->pub, flags);
3732 DHD_BUS_BUSY_CLEAR_IN_TX(&dhd->pub);
3733 DHD_IF_CLR_TX_ACTIVE(ifp, DHD_TX_START_XMIT);
3734 dhd_os_tx_completion_wake(&dhd->pub);
3735 dhd_os_busbusy_wake(&dhd->pub);
3736 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
3737 DHD_PERIM_UNLOCK_TRY(DHD_FWDER_UNIT(dhd), lock_taken);
3738 DHD_OS_WAKE_UNLOCK(&dhd->pub);
3739 /* Return ok: we always eat the packet */
3740 return NETDEV_TX_OK;
3741 }
3742
3743 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
3744 void dhd_rx_wq_wakeup(struct work_struct *ptr)
3745 {
3746 struct dhd_rx_tx_work *work;
3747 struct dhd_pub * pub;
3748
3749 work = container_of(ptr, struct dhd_rx_tx_work, work);
3750
3751 pub = work->pub;
3752
3753 DHD_RPM(("%s: ENTER. \n", __FUNCTION__));
3754
3755 if (atomic_read(&pub->block_bus) || pub->busstate == DHD_BUS_DOWN) {
3756 return;
3757 }
3758
3759 DHD_OS_WAKE_LOCK(pub);
3760 if (pm_runtime_get_sync(dhd_bus_to_dev(pub->bus)) >= 0) {
3761
3762 // do nothing but wakeup the bus.
3763 pm_runtime_mark_last_busy(dhd_bus_to_dev(pub->bus));
3764 pm_runtime_put_autosuspend(dhd_bus_to_dev(pub->bus));
3765 }
3766 DHD_OS_WAKE_UNLOCK(pub);
3767 kfree(work);
3768 }
3769
3770 void dhd_start_xmit_wq_adapter(struct work_struct *ptr)
3771 {
3772 struct dhd_rx_tx_work *work;
3773 int ret;
3774 dhd_info_t *dhd;
3775 struct dhd_bus * bus;
3776
3777 work = container_of(ptr, struct dhd_rx_tx_work, work);
3778
3779 dhd = DHD_DEV_INFO(work->net);
3780
3781 bus = dhd->pub.bus;
3782
3783 if (atomic_read(&dhd->pub.block_bus)) {
3784 kfree_skb(work->skb);
3785 kfree(work);
3786 dhd_netif_start_queue(bus);
3787 return;
3788 }
3789
3790 if (pm_runtime_get_sync(dhd_bus_to_dev(bus)) >= 0) {
3791 ret = dhd_start_xmit(work->skb, work->net);
3792 pm_runtime_mark_last_busy(dhd_bus_to_dev(bus));
3793 pm_runtime_put_autosuspend(dhd_bus_to_dev(bus));
3794 }
3795 kfree(work);
3796 dhd_netif_start_queue(bus);
3797
3798 if (ret)
3799 netdev_err(work->net,
3800 "error: dhd_start_xmit():%d\n", ret);
3801 }
3802
3803 #ifdef CFI_CHECK
3804 netdev_tx_t BCMFASTPATH
3805 #else
3806 int BCMFASTPATH
3807 #endif /* CFI_CHECK */
3808 dhd_start_xmit_wrapper(struct sk_buff *skb, struct net_device *net)
3809 {
3810 struct dhd_rx_tx_work *start_xmit_work;
3811 int ret;
3812 dhd_info_t *dhd = DHD_DEV_INFO(net);
3813
3814 if (dhd->pub.busstate == DHD_BUS_SUSPEND) {
3815 DHD_RPM(("%s: wakeup the bus using workqueue.\n", __FUNCTION__));
3816
3817 dhd_netif_stop_queue(dhd->pub.bus);
3818
3819 start_xmit_work = (struct dhd_rx_tx_work*)
3820 kmalloc(sizeof(*start_xmit_work), GFP_ATOMIC);
3821
3822 if (!start_xmit_work) {
3823 netdev_err(net,
3824 "error: failed to alloc start_xmit_work\n");
3825 #ifdef CFI_CHECK
3826 ret = NETDEV_TX_BUSY;
3827 #else
3828 ret = -ENOMEM;
3829 #endif /* CFI_CHECK */
3830 goto exit;
3831 }
3832
3833 INIT_WORK(&start_xmit_work->work, dhd_start_xmit_wq_adapter);
3834 start_xmit_work->skb = skb;
3835 start_xmit_work->net = net;
3836 queue_work(dhd->tx_wq, &start_xmit_work->work);
3837 #ifdef CFI_CHECK
3838 ret = NETDEV_TX_OK;
3839 #else
3840 ret = NET_XMIT_SUCCESS;
3841 #endif /* CFI_CHECK */
3842
3843 } else if (dhd->pub.busstate == DHD_BUS_DATA) {
3844 ret = dhd_start_xmit(skb, net);
3845 } else {
3846 /* when bus is down */
3847 #ifdef CFI_CHECK
3848 ret = NETDEV_TX_BUSY;
3849 #else
3850 ret = -ENODEV;
3851 #endif /* CFI_CHECK */
3852 }
3853
3854 exit:
3855 return ret;
3856 }
3857
3858 void
3859 dhd_bus_wakeup_work(dhd_pub_t *dhdp)
3860 {
3861 struct dhd_rx_tx_work *rx_work;
3862 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
3863
3864 rx_work = kmalloc(sizeof(*rx_work), GFP_ATOMIC);
3865 if (!rx_work) {
3866 DHD_ERROR(("%s: start_rx_work alloc error. \n", __FUNCTION__));
3867 return;
3868 }
3869
3870 INIT_WORK(&rx_work->work, dhd_rx_wq_wakeup);
3871 rx_work->pub = dhdp;
3872 queue_work(dhd->rx_wq, &rx_work->work);
3873
3874 }
3875 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
3876
3877 static void
3878 __dhd_txflowcontrol(dhd_pub_t *dhdp, struct net_device *net, bool state)
3879 {
3880
3881 if ((state == ON) && (dhdp->txoff == FALSE)) {
3882 netif_stop_queue(net);
3883 dhd_prot_update_pktid_txq_stop_cnt(dhdp);
3884 } else if (state == ON) {
3885 DHD_ERROR(("%s: Netif Queue has already stopped\n", __FUNCTION__));
3886 }
3887 if ((state == OFF) && (dhdp->txoff == TRUE)) {
3888 netif_wake_queue(net);
3889 dhd_prot_update_pktid_txq_start_cnt(dhdp);
3890 } else if (state == OFF) {
3891 DHD_ERROR(("%s: Netif Queue has already started\n", __FUNCTION__));
3892 }
3893 }
3894
3895 void
3896 dhd_txflowcontrol(dhd_pub_t *dhdp, int ifidx, bool state)
3897 {
3898 struct net_device *net;
3899 dhd_info_t *dhd = dhdp->info;
3900 int i;
3901
3902 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
3903
3904 ASSERT(dhd);
3905
3906 #ifdef DHD_LOSSLESS_ROAMING
3907 /* block flowcontrol during roaming */
3908 if ((dhdp->dequeue_prec_map == 1 << PRIO_8021D_NC) && state == ON) {
3909 return;
3910 }
3911 #endif // endif
3912
3913 if (ifidx == ALL_INTERFACES) {
3914 for (i = 0; i < DHD_MAX_IFS; i++) {
3915 if (dhd->iflist[i]) {
3916 net = dhd->iflist[i]->net;
3917 __dhd_txflowcontrol(dhdp, net, state);
3918 }
3919 }
3920 } else {
3921 if (dhd->iflist[ifidx]) {
3922 net = dhd->iflist[ifidx]->net;
3923 __dhd_txflowcontrol(dhdp, net, state);
3924 }
3925 }
3926 dhdp->txoff = state;
3927 }
3928
3929 #ifdef DHD_MCAST_REGEN
3930 /*
3931 * Description: This function is called to do the reverse translation
3932 *
3933 * Input eh - pointer to the ethernet header
3934 */
3935 int32
3936 dhd_mcast_reverse_translation(struct ether_header *eh)
3937 {
3938 uint8 *iph;
3939 uint32 dest_ip;
3940
3941 iph = (uint8 *)eh + ETHER_HDR_LEN;
3942 dest_ip = ntoh32(*((uint32 *)(iph + IPV4_DEST_IP_OFFSET)));
3943
3944 /* Only IP packets are handled */
3945 if (eh->ether_type != hton16(ETHER_TYPE_IP))
3946 return BCME_ERROR;
3947
3948 /* Non-IPv4 multicast packets are not handled */
3949 if (IP_VER(iph) != IP_VER_4)
3950 return BCME_ERROR;
3951
3952 /*
3953 * The packet has a multicast IP and unicast MAC. That means
3954 * we have to do the reverse translation
3955 */
3956 if (IPV4_ISMULTI(dest_ip) && !ETHER_ISMULTI(&eh->ether_dhost)) {
3957 ETHER_FILL_MCAST_ADDR_FROM_IP(eh->ether_dhost, dest_ip);
3958 return BCME_OK;
3959 }
3960
3961 return BCME_ERROR;
3962 }
3963 #endif /* MCAST_REGEN */
3964
3965 #ifdef SHOW_LOGTRACE
3966 static void
3967 dhd_netif_rx_ni(struct sk_buff * skb)
3968 {
3969 /* Do not call netif_recieve_skb as this workqueue scheduler is
3970 * not from NAPI Also as we are not in INTR context, do not call
3971 * netif_rx, instead call netif_rx_ni (for kerenl >= 2.6) which
3972 * does netif_rx, disables irq, raise NET_IF_RX softirq and
3973 * enables interrupts back
3974 */
3975 netif_rx_ni(skb);
3976 }
3977
3978 static int
3979 dhd_event_logtrace_pkt_process(dhd_pub_t *dhdp, struct sk_buff * skb)
3980 {
3981 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
3982 int ret = BCME_OK;
3983 uint datalen;
3984 bcm_event_msg_u_t evu;
3985 void *data = NULL;
3986 void *pktdata = NULL;
3987 bcm_event_t *pvt_data;
3988 uint pktlen;
3989
3990 DHD_TRACE(("%s:Enter\n", __FUNCTION__));
3991
3992 /* In dhd_rx_frame, header is stripped using skb_pull
3993 * of size ETH_HLEN, so adjust pktlen accordingly
3994 */
3995 pktlen = skb->len + ETH_HLEN;
3996
3997 pktdata = (void *)skb_mac_header(skb);
3998 ret = wl_host_event_get_data(pktdata, pktlen, &evu);
3999
4000 if (ret != BCME_OK) {
4001 DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
4002 __FUNCTION__, ret));
4003 goto exit;
4004 }
4005
4006 datalen = ntoh32(evu.event.datalen);
4007
4008 pvt_data = (bcm_event_t *)pktdata;
4009 data = &pvt_data[1];
4010
4011 dhd_dbg_trace_evnt_handler(dhdp, data, &dhd->event_data, datalen);
4012
4013 exit:
4014 return ret;
4015 }
4016
4017 /*
4018 * dhd_event_logtrace_process_items processes
4019 * each skb from evt_trace_queue.
4020 * Returns TRUE if more packets to be processed
4021 * else returns FALSE
4022 */
4023
4024 static int
4025 dhd_event_logtrace_process_items(dhd_info_t *dhd)
4026 {
4027 dhd_pub_t *dhdp;
4028 struct sk_buff *skb;
4029 uint32 qlen;
4030 uint32 process_len;
4031
4032 if (!dhd) {
4033 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
4034 return 0;
4035 }
4036
4037 dhdp = &dhd->pub;
4038
4039 if (!dhdp) {
4040 DHD_ERROR(("%s: dhd pub is null \n", __FUNCTION__));
4041 return 0;
4042 }
4043
4044 qlen = skb_queue_len(&dhd->evt_trace_queue);
4045 process_len = MIN(qlen, DHD_EVENT_LOGTRACE_BOUND);
4046
4047 /* Run while loop till bound is reached or skb queue is empty */
4048 while (process_len--) {
4049 int ifid = 0;
4050 skb = skb_dequeue(&dhd->evt_trace_queue);
4051 if (skb == NULL) {
4052 DHD_ERROR(("%s: skb is NULL, which is not valid case\n",
4053 __FUNCTION__));
4054 break;
4055 }
4056 BCM_REFERENCE(ifid);
4057 #ifdef PCIE_FULL_DONGLE
4058 /* Check if pkt is from INFO ring or WLC_E_TRACE */
4059 ifid = DHD_PKTTAG_IFID((dhd_pkttag_fr_t *)PKTTAG(skb));
4060 if (ifid == DHD_DUMMY_INFO_IF) {
4061 /* Process logtrace from info rings */
4062 dhd_event_logtrace_infobuf_pkt_process(dhdp, skb, &dhd->event_data);
4063 } else
4064 #endif /* PCIE_FULL_DONGLE */
4065 {
4066 /* Processing WLC_E_TRACE case OR non PCIE PCIE_FULL_DONGLE case */
4067 dhd_event_logtrace_pkt_process(dhdp, skb);
4068 }
4069
4070 /* Dummy sleep so that scheduler kicks in after processing any logprints */
4071 OSL_SLEEP(0);
4072
4073 /* Send packet up if logtrace_pkt_sendup is TRUE */
4074 if (dhdp->logtrace_pkt_sendup) {
4075 #ifdef DHD_USE_STATIC_CTRLBUF
4076 /* If bufs are allocated via static buf pool
4077 * and logtrace_pkt_sendup enabled, make a copy,
4078 * free the local one and send the copy up.
4079 */
4080 void *npkt = PKTDUP(dhdp->osh, skb);
4081 /* Clone event and send it up */
4082 PKTFREE_STATIC(dhdp->osh, skb, FALSE);
4083 if (npkt) {
4084 skb = npkt;
4085 } else {
4086 DHD_ERROR(("skb clone failed. dropping logtrace pkt.\n"));
4087 /* Packet is already freed, go to next packet */
4088 continue;
4089 }
4090 #endif /* DHD_USE_STATIC_CTRLBUF */
4091 #ifdef PCIE_FULL_DONGLE
4092 /* For infobuf packets as if is DHD_DUMMY_INFO_IF,
4093 * to send skb to network layer, assign skb->dev with
4094 * Primary interface n/w device
4095 */
4096 if (ifid == DHD_DUMMY_INFO_IF) {
4097 skb = PKTTONATIVE(dhdp->osh, skb);
4098 skb->dev = dhd->iflist[0]->net;
4099 }
4100 #endif /* PCIE_FULL_DONGLE */
4101 /* Send pkt UP */
4102 dhd_netif_rx_ni(skb);
4103 } else {
4104 /* Don't send up. Free up the packet. */
4105 #ifdef DHD_USE_STATIC_CTRLBUF
4106 PKTFREE_STATIC(dhdp->osh, skb, FALSE);
4107 #else
4108 PKTFREE(dhdp->osh, skb, FALSE);
4109 #endif /* DHD_USE_STATIC_CTRLBUF */
4110 }
4111 }
4112
4113 /* Reschedule if more packets to be processed */
4114 return (qlen >= DHD_EVENT_LOGTRACE_BOUND);
4115 }
4116
4117 #ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
4118 static int
4119 dhd_logtrace_thread(void *data)
4120 {
4121 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
4122 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
4123 dhd_pub_t *dhdp = (dhd_pub_t *)&dhd->pub;
4124 int ret;
4125
4126 while (1) {
4127 dhdp->logtrace_thr_ts.entry_time = OSL_LOCALTIME_NS();
4128 if (!binary_sema_down(tsk)) {
4129 dhdp->logtrace_thr_ts.sem_down_time = OSL_LOCALTIME_NS();
4130 SMP_RD_BARRIER_DEPENDS();
4131 if (dhd->pub.dongle_reset == FALSE) {
4132 do {
4133 /* Check terminated before processing the items */
4134 if (tsk->terminated) {
4135 DHD_ERROR(("%s: task terminated\n", __FUNCTION__));
4136 goto exit;
4137 }
4138 #ifdef EWP_EDL
4139 /* check if EDL is being used */
4140 if (dhd->pub.dongle_edl_support) {
4141 ret = dhd_prot_process_edl_complete(&dhd->pub,
4142 &dhd->event_data);
4143 } else {
4144 ret = dhd_event_logtrace_process_items(dhd);
4145 }
4146 #else
4147 ret = dhd_event_logtrace_process_items(dhd);
4148 #endif /* EWP_EDL */
4149 /* if ret > 0, bound has reached so to be fair to other
4150 * processes need to yield the scheduler.
4151 * The comment above yield()'s definition says:
4152 * If you want to use yield() to wait for something,
4153 * use wait_event().
4154 * If you want to use yield() to be 'nice' for others,
4155 * use cond_resched().
4156 * If you still want to use yield(), do not!
4157 */
4158 if (ret > 0) {
4159 cond_resched();
4160 OSL_SLEEP(DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS);
4161 } else if (ret < 0) {
4162 DHD_ERROR(("%s: ERROR should not reach here\n",
4163 __FUNCTION__));
4164 }
4165 } while (ret > 0);
4166 }
4167 if (tsk->flush_ind) {
4168 DHD_ERROR(("%s: flushed\n", __FUNCTION__));
4169 dhdp->logtrace_thr_ts.flush_time = OSL_LOCALTIME_NS();
4170 tsk->flush_ind = 0;
4171 complete(&tsk->flushed);
4172 }
4173 } else {
4174 DHD_ERROR(("%s: unexpted break\n", __FUNCTION__));
4175 dhdp->logtrace_thr_ts.unexpected_break_time = OSL_LOCALTIME_NS();
4176 break;
4177 }
4178 }
4179 exit:
4180 complete_and_exit(&tsk->completed, 0);
4181 dhdp->logtrace_thr_ts.complete_time = OSL_LOCALTIME_NS();
4182 }
4183 #else
4184 static void
4185 dhd_event_logtrace_process(struct work_struct * work)
4186 {
4187 int ret = 0;
4188 /* Ignore compiler warnings due to -Werror=cast-qual */
4189 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
4190 #pragma GCC diagnostic push
4191 #pragma GCC diagnostic ignored "-Wcast-qual"
4192 #endif // endif
4193 struct delayed_work *dw = to_delayed_work(work);
4194 struct dhd_info *dhd =
4195 container_of(dw, struct dhd_info, event_log_dispatcher_work);
4196 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
4197 #pragma GCC diagnostic pop
4198 #endif // endif
4199 #ifdef EWP_EDL
4200 if (dhd->pub.dongle_edl_support) {
4201 ret = dhd_prot_process_edl_complete(&dhd->pub, &dhd->event_data);
4202 } else {
4203 ret = dhd_event_logtrace_process_items(dhd);
4204 }
4205 #else
4206 ret = dhd_event_logtrace_process_items(dhd);
4207 #endif /* EWP_EDL */
4208
4209 if (ret > 0) {
4210 schedule_delayed_work(&(dhd)->event_log_dispatcher_work,
4211 msecs_to_jiffies(DHD_EVENT_LOGTRACE_RESCHEDULE_DELAY_MS));
4212 }
4213
4214 return;
4215 }
4216 #endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
4217
4218 void
4219 dhd_schedule_logtrace(void *dhd_info)
4220 {
4221 dhd_info_t *dhd = (dhd_info_t *)dhd_info;
4222
4223 #ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
4224 if (dhd->thr_logtrace_ctl.thr_pid >= 0) {
4225 binary_sema_up(&dhd->thr_logtrace_ctl);
4226 } else {
4227 DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
4228 dhd->thr_logtrace_ctl.thr_pid));
4229 }
4230 #else
4231 schedule_delayed_work(&dhd->event_log_dispatcher_work, 0);
4232 #endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
4233 return;
4234 }
4235
4236 void
4237 dhd_cancel_logtrace_process_sync(dhd_info_t *dhd)
4238 {
4239 #ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
4240 if (dhd->thr_logtrace_ctl.thr_pid >= 0) {
4241 PROC_STOP_USING_BINARY_SEMA(&dhd->thr_logtrace_ctl);
4242 } else {
4243 DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
4244 dhd->thr_logtrace_ctl.thr_pid));
4245 }
4246 #else
4247 cancel_delayed_work_sync(&dhd->event_log_dispatcher_work);
4248 #endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
4249 }
4250
4251 void
4252 dhd_flush_logtrace_process(dhd_info_t *dhd)
4253 {
4254 #ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
4255 if (dhd->thr_logtrace_ctl.thr_pid >= 0) {
4256 PROC_FLUSH_USING_BINARY_SEMA(&dhd->thr_logtrace_ctl);
4257 } else {
4258 DHD_ERROR(("%s: thr_logtrace_ctl(%ld) not inited\n", __FUNCTION__,
4259 dhd->thr_logtrace_ctl.thr_pid));
4260 }
4261 #else
4262 flush_delayed_work(&dhd->event_log_dispatcher_work);
4263 #endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
4264 }
4265
4266 int
4267 dhd_init_logtrace_process(dhd_info_t *dhd)
4268 {
4269 #ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
4270 dhd->thr_logtrace_ctl.thr_pid = DHD_PID_KT_INVALID;
4271 PROC_START(dhd_logtrace_thread, dhd, &dhd->thr_logtrace_ctl, 0, "dhd_logtrace_thread");
4272 if (dhd->thr_logtrace_ctl.thr_pid < 0) {
4273 DHD_ERROR(("%s: init logtrace process failed\n", __FUNCTION__));
4274 return BCME_ERROR;
4275 } else {
4276 DHD_ERROR(("%s: thr_logtrace_ctl(%ld) succedded\n", __FUNCTION__,
4277 dhd->thr_logtrace_ctl.thr_pid));
4278 }
4279 #else
4280 INIT_DELAYED_WORK(&dhd->event_log_dispatcher_work, dhd_event_logtrace_process);
4281 #endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
4282 return BCME_OK;
4283 }
4284
4285 int
4286 dhd_reinit_logtrace_process(dhd_info_t *dhd)
4287 {
4288 #ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
4289 /* Re-init only if PROC_STOP from dhd_stop was called
4290 * which can be checked via thr_pid
4291 */
4292 if (dhd->thr_logtrace_ctl.thr_pid < 0) {
4293 PROC_START(dhd_logtrace_thread, dhd, &dhd->thr_logtrace_ctl,
4294 0, "dhd_logtrace_thread");
4295 if (dhd->thr_logtrace_ctl.thr_pid < 0) {
4296 DHD_ERROR(("%s: reinit logtrace process failed\n", __FUNCTION__));
4297 return BCME_ERROR;
4298 } else {
4299 DHD_ERROR(("%s: thr_logtrace_ctl(%ld) succedded\n", __FUNCTION__,
4300 dhd->thr_logtrace_ctl.thr_pid));
4301 }
4302 }
4303 #else
4304 /* No need to re-init for WQ as calcel_delayed_work_sync will
4305 * will not delete the WQ
4306 */
4307 #endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
4308 return BCME_OK;
4309 }
4310
4311 void
4312 dhd_event_logtrace_enqueue(dhd_pub_t *dhdp, int ifidx, void *pktbuf)
4313 {
4314 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4315
4316 #ifdef PCIE_FULL_DONGLE
4317 /* Add ifidx in the PKTTAG */
4318 DHD_PKTTAG_SET_IFID((dhd_pkttag_fr_t *)PKTTAG(pktbuf), ifidx);
4319 #endif /* PCIE_FULL_DONGLE */
4320 skb_queue_tail(&dhd->evt_trace_queue, pktbuf);
4321
4322 dhd_schedule_logtrace(dhd);
4323 }
4324
4325 void
4326 dhd_event_logtrace_flush_queue(dhd_pub_t *dhdp)
4327 {
4328 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4329 struct sk_buff *skb;
4330
4331 while ((skb = skb_dequeue(&dhd->evt_trace_queue)) != NULL) {
4332 #ifdef DHD_USE_STATIC_CTRLBUF
4333 PKTFREE_STATIC(dhdp->osh, skb, FALSE);
4334 #else
4335 PKTFREE(dhdp->osh, skb, FALSE);
4336 #endif /* DHD_USE_STATIC_CTRLBUF */
4337 }
4338 }
4339
4340 #ifdef BCMPCIE
4341 void
4342 dhd_sendup_info_buf(dhd_pub_t *dhdp, uint8 *msg)
4343 {
4344 struct sk_buff *skb = NULL;
4345 uint32 pktsize = 0;
4346 void *pkt = NULL;
4347 info_buf_payload_hdr_t *infobuf = NULL;
4348 dhd_info_t *dhd = dhdp->info;
4349 uint8 *pktdata = NULL;
4350
4351 if (!msg)
4352 return;
4353
4354 /* msg = |infobuf_ver(u32)|info_buf_payload_hdr_t|msgtrace_hdr_t|<var len data>| */
4355 infobuf = (info_buf_payload_hdr_t *)(msg + sizeof(uint32));
4356 pktsize = (uint32)(ltoh16(infobuf->length) + sizeof(info_buf_payload_hdr_t) +
4357 sizeof(uint32));
4358 pkt = PKTGET(dhdp->osh, pktsize, FALSE);
4359 if (!pkt) {
4360 DHD_ERROR(("%s: skb alloc failed ! not sending event log up.\n", __FUNCTION__));
4361 } else {
4362 PKTSETLEN(dhdp->osh, pkt, pktsize);
4363 pktdata = PKTDATA(dhdp->osh, pkt);
4364 memcpy(pktdata, msg, pktsize);
4365 /* For infobuf packets assign skb->dev with
4366 * Primary interface n/w device
4367 */
4368 skb = PKTTONATIVE(dhdp->osh, pkt);
4369 skb->dev = dhd->iflist[0]->net;
4370 /* Send pkt UP */
4371 dhd_netif_rx_ni(skb);
4372 }
4373 }
4374 #endif /* BCMPCIE */
4375 #endif /* SHOW_LOGTRACE */
4376
4377 /** Called when a frame is received by the dongle on interface 'ifidx' */
4378 void
4379 dhd_rx_frame(dhd_pub_t *dhdp, int ifidx, void *pktbuf, int numpkt, uint8 chan)
4380 {
4381 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
4382 struct sk_buff *skb;
4383 uchar *eth;
4384 uint len;
4385 void *data, *pnext = NULL;
4386 int i;
4387 dhd_if_t *ifp;
4388 wl_event_msg_t event;
4389 #if (defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX))
4390 int tout_rx = 0;
4391 int tout_ctrl = 0;
4392 #endif /* OEM_ANDROID || OEM_EMBEDDED_LINUX */
4393 void *skbhead = NULL;
4394 void *skbprev = NULL;
4395 uint16 protocol;
4396 unsigned char *dump_data;
4397 #ifdef DHD_MCAST_REGEN
4398 uint8 interface_role;
4399 if_flow_lkup_t *if_flow_lkup;
4400 unsigned long flags;
4401 #endif // endif
4402 #ifdef DHD_WAKE_STATUS
4403 int pkt_wake = 0;
4404 wake_counts_t *wcp = NULL;
4405 #endif /* DHD_WAKE_STATUS */
4406
4407 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
4408 BCM_REFERENCE(dump_data);
4409
4410 for (i = 0; pktbuf && i < numpkt; i++, pktbuf = pnext) {
4411 struct ether_header *eh;
4412
4413 pnext = PKTNEXT(dhdp->osh, pktbuf);
4414 PKTSETNEXT(dhdp->osh, pktbuf, NULL);
4415
4416 /* info ring "debug" data, which is not a 802.3 frame, is sent/hacked with a
4417 * special ifidx of DHD_DUMMY_INFO_IF. This is just internal to dhd to get the data
4418 * from dhd_msgbuf.c:dhd_prot_infobuf_cmplt_process() to here (dhd_rx_frame).
4419 */
4420 if (ifidx == DHD_DUMMY_INFO_IF) {
4421 /* Event msg printing is called from dhd_rx_frame which is in Tasklet
4422 * context in case of PCIe FD, in case of other bus this will be from
4423 * DPC context. If we get bunch of events from Dongle then printing all
4424 * of them from Tasklet/DPC context that too in data path is costly.
4425 * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
4426 * events with type WLC_E_TRACE.
4427 * We'll print this console logs from the WorkQueue context by enqueing SKB
4428 * here and Dequeuing will be done in WorkQueue and will be freed only if
4429 * logtrace_pkt_sendup is TRUE
4430 */
4431 #ifdef SHOW_LOGTRACE
4432 dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf);
4433 #else /* !SHOW_LOGTRACE */
4434 /* If SHOW_LOGTRACE not defined and ifidx is DHD_DUMMY_INFO_IF,
4435 * free the PKT here itself
4436 */
4437 #ifdef DHD_USE_STATIC_CTRLBUF
4438 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4439 #else
4440 PKTFREE(dhdp->osh, pktbuf, FALSE);
4441 #endif /* DHD_USE_STATIC_CTRLBUF */
4442 #endif /* SHOW_LOGTRACE */
4443 continue;
4444 }
4445 #ifdef DHD_WAKE_STATUS
4446 pkt_wake = dhd_bus_get_bus_wake(dhdp);
4447 wcp = dhd_bus_get_wakecount(dhdp);
4448 if (wcp == NULL) {
4449 /* If wakeinfo count buffer is null do not update wake count values */
4450 pkt_wake = 0;
4451 }
4452 #endif /* DHD_WAKE_STATUS */
4453
4454 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
4455
4456 if (ifidx >= DHD_MAX_IFS) {
4457 DHD_ERROR(("%s: ifidx(%d) Out of bound. drop packet\n",
4458 __FUNCTION__, ifidx));
4459 if (ntoh16(eh->ether_type) == ETHER_TYPE_BRCM) {
4460 #ifdef DHD_USE_STATIC_CTRLBUF
4461 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4462 #else
4463 PKTFREE(dhdp->osh, pktbuf, FALSE);
4464 #endif /* DHD_USE_STATIC_CTRLBUF */
4465 } else {
4466 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4467 }
4468 continue;
4469 }
4470
4471 ifp = dhd->iflist[ifidx];
4472 if (ifp == NULL) {
4473 DHD_ERROR(("%s: ifp is NULL. drop packet\n",
4474 __FUNCTION__));
4475 if (ntoh16(eh->ether_type) == ETHER_TYPE_BRCM) {
4476 #ifdef DHD_USE_STATIC_CTRLBUF
4477 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4478 #else
4479 PKTFREE(dhdp->osh, pktbuf, FALSE);
4480 #endif /* DHD_USE_STATIC_CTRLBUF */
4481 } else {
4482 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4483 }
4484 continue;
4485 }
4486
4487 /* Dropping only data packets before registering net device to avoid kernel panic */
4488 #ifndef PROP_TXSTATUS_VSDB
4489 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED) &&
4490 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
4491 #else
4492 if ((!ifp->net || ifp->net->reg_state != NETREG_REGISTERED || !dhd->pub.up) &&
4493 (ntoh16(eh->ether_type) != ETHER_TYPE_BRCM))
4494 #endif /* PROP_TXSTATUS_VSDB */
4495 {
4496 DHD_ERROR(("%s: net device is NOT registered yet. drop packet\n",
4497 __FUNCTION__));
4498 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4499 continue;
4500 }
4501
4502 #ifdef PROP_TXSTATUS
4503 if (dhd_wlfc_is_header_only_pkt(dhdp, pktbuf)) {
4504 /* WLFC may send header only packet when
4505 there is an urgent message but no packet to
4506 piggy-back on
4507 */
4508 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4509 continue;
4510 }
4511 #endif // endif
4512 #ifdef DHD_L2_FILTER
4513 /* If block_ping is enabled drop the ping packet */
4514 if (ifp->block_ping) {
4515 if (bcm_l2_filter_block_ping(dhdp->osh, pktbuf) == BCME_OK) {
4516 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4517 continue;
4518 }
4519 }
4520 if (ifp->grat_arp && DHD_IF_ROLE_STA(dhdp, ifidx)) {
4521 if (bcm_l2_filter_gratuitous_arp(dhdp->osh, pktbuf) == BCME_OK) {
4522 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4523 continue;
4524 }
4525 }
4526 if (ifp->parp_enable && DHD_IF_ROLE_AP(dhdp, ifidx)) {
4527 int ret = dhd_l2_filter_pkt_handle(dhdp, ifidx, pktbuf, FALSE);
4528
4529 /* Drop the packets if l2 filter has processed it already
4530 * otherwise continue with the normal path
4531 */
4532 if (ret == BCME_OK) {
4533 PKTCFREE(dhdp->osh, pktbuf, TRUE);
4534 continue;
4535 }
4536 }
4537 if (ifp->block_tdls) {
4538 if (bcm_l2_filter_block_tdls(dhdp->osh, pktbuf) == BCME_OK) {
4539 PKTCFREE(dhdp->osh, pktbuf, FALSE);
4540 continue;
4541 }
4542 }
4543 #endif /* DHD_L2_FILTER */
4544
4545 #ifdef DHD_MCAST_REGEN
4546 DHD_FLOWID_LOCK(dhdp->flowid_lock, flags);
4547 if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
4548 ASSERT(if_flow_lkup);
4549
4550 interface_role = if_flow_lkup[ifidx].role;
4551 DHD_FLOWID_UNLOCK(dhdp->flowid_lock, flags);
4552
4553 if (ifp->mcast_regen_bss_enable && (interface_role != WLC_E_IF_ROLE_WDS) &&
4554 !DHD_IF_ROLE_AP(dhdp, ifidx) &&
4555 ETHER_ISUCAST(eh->ether_dhost)) {
4556 if (dhd_mcast_reverse_translation(eh) == BCME_OK) {
4557 #ifdef DHD_PSTA
4558 /* Change bsscfg to primary bsscfg for unicast-multicast packets */
4559 if ((dhd_get_psta_mode(dhdp) == DHD_MODE_PSTA) ||
4560 (dhd_get_psta_mode(dhdp) == DHD_MODE_PSR)) {
4561 if (ifidx != 0) {
4562 /* Let the primary in PSTA interface handle this
4563 * frame after unicast to Multicast conversion
4564 */
4565 ifp = dhd_get_ifp(dhdp, 0);
4566 ASSERT(ifp);
4567 }
4568 }
4569 }
4570 #endif /* PSTA */
4571 }
4572 #endif /* MCAST_REGEN */
4573
4574 #ifdef DHDTCPSYNC_FLOOD_BLK
4575 if (dhd_tcpdata_get_flag(dhdp, pktbuf) == FLAG_SYNC) {
4576 int delta_sec;
4577 int delta_sync;
4578 int sync_per_sec;
4579 u64 curr_time = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
4580 ifp->tsync_rcvd ++;
4581 delta_sync = ifp->tsync_rcvd - ifp->tsyncack_txed;
4582 delta_sec = curr_time - ifp->last_sync;
4583 if (delta_sec > 1) {
4584 sync_per_sec = delta_sync/delta_sec;
4585 if (sync_per_sec > TCP_SYNC_FLOOD_LIMIT) {
4586 schedule_work(&ifp->blk_tsfl_work);
4587 DHD_ERROR(("ifx %d TCP SYNC Flood attack suspected! "
4588 "sync recvied %d pkt/sec \n",
4589 ifidx, sync_per_sec));
4590 }
4591 dhd_reset_tcpsync_info_by_ifp(ifp);
4592 }
4593
4594 }
4595 #endif /* DHDTCPSYNC_FLOOD_BLK */
4596
4597 #ifdef DHDTCPACK_SUPPRESS
4598 dhd_tcpdata_info_get(dhdp, pktbuf);
4599 #endif // endif
4600 skb = PKTTONATIVE(dhdp->osh, pktbuf);
4601
4602 ASSERT(ifp);
4603 skb->dev = ifp->net;
4604 #ifdef DHD_WET
4605 /* wet related packet proto manipulation should be done in DHD
4606 * since dongle doesn't have complete payload
4607 */
4608 if (WET_ENABLED(&dhd->pub) && (dhd_wet_recv_proc(dhd->pub.wet_info,
4609 pktbuf) < 0)) {
4610 DHD_INFO(("%s:%s: wet recv proc failed\n",
4611 __FUNCTION__, dhd_ifname(dhdp, ifidx)));
4612 }
4613 #endif /* DHD_WET */
4614
4615 #ifdef DHD_PSTA
4616 if (PSR_ENABLED(dhdp) &&
4617 (dhd_psta_proc(dhdp, ifidx, &pktbuf, FALSE) < 0)) {
4618 DHD_ERROR(("%s:%s: psta recv proc failed\n", __FUNCTION__,
4619 dhd_ifname(dhdp, ifidx)));
4620 }
4621 #endif /* DHD_PSTA */
4622
4623 DHD_TRACE(("\nAp isolate in dhd is %d\n", ifp->ap_isolate));
4624 if (ifidx >= 0 && dhdp != NULL && dhdp->info != NULL &&
4625 dhdp->info->iflist[ifidx] != NULL) {
4626 if ((DHD_IF_ROLE_AP(dhdp, ifidx) || DHD_IF_ROLE_P2PGO(dhdp, ifidx)) &&
4627 (!ifp->ap_isolate)) {
4628 DHD_TRACE(("%s: MACADDR: " MACDBG " ifidx %d\n",
4629 __FUNCTION__,
4630 MAC2STRDBG(dhdp->info->iflist[ifidx]->mac_addr),
4631 ifidx));
4632 DHD_TRACE(("%s: DEST: " MACDBG " ifidx %d\n",
4633 __FUNCTION__, MAC2STRDBG(eh->ether_dhost), ifidx));
4634 eh = (struct ether_header *)PKTDATA(dhdp->osh, pktbuf);
4635 if (ETHER_ISUCAST(eh->ether_dhost)) {
4636 if (dhd_find_sta(dhdp, ifidx, (void *)eh->ether_dhost)) {
4637 DHD_TRACE(("\nPacket not for us send down\n"));
4638 dhd_sendpkt(dhdp, ifidx, pktbuf);
4639 continue;
4640 }
4641 } else {
4642 void *npktbuf = PKTDUP(dhdp->osh, pktbuf);
4643 if (npktbuf) {
4644 DHD_TRACE(("\ncalling bcmc dhd_sendpkt"
4645 "and send dup up\n"));
4646 dhd_sendpkt(dhdp, ifidx, npktbuf);
4647 }
4648 }
4649 }
4650 }
4651
4652 #ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
4653 if (IS_STA_IFACE(ndev_to_wdev(ifp->net)) &&
4654 (ifp->recv_reassoc_evt == TRUE) && (ifp->post_roam_evt == FALSE) &&
4655 (dhd_is_4way_msg((char *)(skb->data)) == EAPOL_4WAY_M1)) {
4656 DHD_ERROR(("%s: Reassoc is in progress. "
4657 "Drop EAPOL M1 frame\n", __FUNCTION__));
4658 PKTFREE(dhdp->osh, pktbuf, FALSE);
4659 continue;
4660 }
4661 #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
4662 /* Get the protocol, maintain skb around eth_type_trans()
4663 * The main reason for this hack is for the limitation of
4664 * Linux 2.4 where 'eth_type_trans' uses the 'net->hard_header_len'
4665 * to perform skb_pull inside vs ETH_HLEN. Since to avoid
4666 * coping of the packet coming from the network stack to add
4667 * BDC, Hardware header etc, during network interface registration
4668 * we set the 'net->hard_header_len' to ETH_HLEN + extra space required
4669 * for BDC, Hardware header etc. and not just the ETH_HLEN
4670 */
4671 eth = skb->data;
4672 len = skb->len;
4673 dump_data = skb->data;
4674 protocol = (skb->data[12] << 8) | skb->data[13];
4675
4676 if (protocol == ETHER_TYPE_802_1X) {
4677 DBG_EVENT_LOG(dhdp, WIFI_EVENT_DRIVER_EAPOL_FRAME_RECEIVED);
4678 #if defined(WL_CFG80211) && defined(WL_WPS_SYNC)
4679 wl_handle_wps_states(ifp->net, dump_data, len, FALSE);
4680 #endif /* WL_CFG80211 && WL_WPS_SYNC */
4681 #ifdef DHD_4WAYM4_FAIL_DISCONNECT
4682 if (dhd_is_4way_msg((uint8 *)(skb->data)) == EAPOL_4WAY_M3) {
4683 OSL_ATOMIC_SET(dhdp->osh, &ifp->m4state, M3_RXED);
4684 }
4685 #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
4686 }
4687 dhd_rx_pkt_dump(dhdp, ifidx, dump_data, len);
4688 dhd_dump_pkt(dhdp, ifidx, dump_data, len, FALSE, NULL, NULL);
4689
4690 #if defined(DHD_WAKE_STATUS) && defined(DHD_WAKEPKT_DUMP)
4691 if (pkt_wake) {
4692 prhex("[wakepkt_dump]", (char*)dump_data, MIN(len, 32));
4693 }
4694 #endif /* DHD_WAKE_STATUS && DHD_WAKEPKT_DUMP */
4695
4696 skb->protocol = eth_type_trans(skb, skb->dev);
4697
4698 if (skb->pkt_type == PACKET_MULTICAST) {
4699 dhd->pub.rx_multicast++;
4700 ifp->stats.multicast++;
4701 }
4702
4703 skb->data = eth;
4704 skb->len = len;
4705
4706 DHD_DBG_PKT_MON_RX(dhdp, skb);
4707 #ifdef DHD_PKT_LOGGING
4708 DHD_PKTLOG_RX(dhdp, skb);
4709 #endif /* DHD_PKT_LOGGING */
4710 /* Strip header, count, deliver upward */
4711 skb_pull(skb, ETH_HLEN);
4712
4713 /* Process special event packets and then discard them */
4714 memset(&event, 0, sizeof(event));
4715
4716 if (ntoh16(skb->protocol) == ETHER_TYPE_BRCM) {
4717 bcm_event_msg_u_t evu;
4718 int ret_event, event_type;
4719 void *pkt_data = skb_mac_header(skb);
4720
4721 ret_event = wl_host_event_get_data(pkt_data, len, &evu);
4722
4723 if (ret_event != BCME_OK) {
4724 DHD_ERROR(("%s: wl_host_event_get_data err = %d\n",
4725 __FUNCTION__, ret_event));
4726 #ifdef DHD_USE_STATIC_CTRLBUF
4727 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4728 #else
4729 PKTFREE(dhdp->osh, pktbuf, FALSE);
4730 #endif // endif
4731 continue;
4732 }
4733
4734 memcpy(&event, &evu.event, sizeof(wl_event_msg_t));
4735 event_type = ntoh32_ua((void *)&event.event_type);
4736 #ifdef SHOW_LOGTRACE
4737 /* Event msg printing is called from dhd_rx_frame which is in Tasklet
4738 * context in case of PCIe FD, in case of other bus this will be from
4739 * DPC context. If we get bunch of events from Dongle then printing all
4740 * of them from Tasklet/DPC context that too in data path is costly.
4741 * Also in the new Dongle SW(4359, 4355 onwards) console prints too come as
4742 * events with type WLC_E_TRACE.
4743 * We'll print this console logs from the WorkQueue context by enqueing SKB
4744 * here and Dequeuing will be done in WorkQueue and will be freed only if
4745 * logtrace_pkt_sendup is true
4746 */
4747 if (event_type == WLC_E_TRACE) {
4748 DHD_TRACE(("%s: WLC_E_TRACE\n", __FUNCTION__));
4749 dhd_event_logtrace_enqueue(dhdp, ifidx, pktbuf);
4750 continue;
4751 }
4752 #endif /* SHOW_LOGTRACE */
4753
4754 ret_event = dhd_wl_host_event(dhd, ifidx, pkt_data, len, &event, &data);
4755
4756 wl_event_to_host_order(&event);
4757 #if (defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX))
4758 if (!tout_ctrl)
4759 tout_ctrl = DHD_PACKET_TIMEOUT_MS;
4760 #endif /* (defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX)) */
4761
4762 #if (defined(OEM_ANDROID) && defined(PNO_SUPPORT))
4763 if (event_type == WLC_E_PFN_NET_FOUND) {
4764 /* enforce custom wake lock to garantee that Kernel not suspended */
4765 tout_ctrl = CUSTOM_PNO_EVENT_LOCK_xTIME * DHD_PACKET_TIMEOUT_MS;
4766 }
4767 #endif /* PNO_SUPPORT */
4768 if (numpkt != 1) {
4769 DHD_TRACE(("%s: Got BRCM event packet in a chained packet.\n",
4770 __FUNCTION__));
4771 }
4772
4773 #ifdef DHD_WAKE_STATUS
4774 if (unlikely(pkt_wake)) {
4775 #ifdef DHD_WAKE_EVENT_STATUS
4776 if (event.event_type < WLC_E_LAST) {
4777 wcp->rc_event[event.event_type]++;
4778 wcp->rcwake++;
4779 pkt_wake = 0;
4780 }
4781 #endif /* DHD_WAKE_EVENT_STATUS */
4782 }
4783 #endif /* DHD_WAKE_STATUS */
4784
4785 /* For delete virtual interface event, wl_host_event returns positive
4786 * i/f index, do not proceed. just free the pkt.
4787 */
4788 if ((event_type == WLC_E_IF) && (ret_event > 0)) {
4789 DHD_ERROR(("%s: interface is deleted. Free event packet\n",
4790 __FUNCTION__));
4791 #ifdef DHD_USE_STATIC_CTRLBUF
4792 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4793 #else
4794 PKTFREE(dhdp->osh, pktbuf, FALSE);
4795 #endif // endif
4796 continue;
4797 }
4798
4799 /*
4800 * For the event packets, there is a possibility
4801 * of ifidx getting modifed.Thus update the ifp
4802 * once again.
4803 */
4804 ASSERT(ifidx < DHD_MAX_IFS && dhd->iflist[ifidx]);
4805 ifp = dhd->iflist[ifidx];
4806 #ifndef PROP_TXSTATUS_VSDB
4807 if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED)))
4808 #else
4809 if (!(ifp && ifp->net && (ifp->net->reg_state == NETREG_REGISTERED) &&
4810 dhd->pub.up))
4811 #endif /* PROP_TXSTATUS_VSDB */
4812 {
4813 DHD_ERROR(("%s: net device is NOT registered. drop event packet\n",
4814 __FUNCTION__));
4815 #ifdef DHD_USE_STATIC_CTRLBUF
4816 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4817 #else
4818 PKTFREE(dhdp->osh, pktbuf, FALSE);
4819 #endif // endif
4820 continue;
4821 }
4822
4823 if (dhdp->wl_event_enabled) {
4824 #ifdef DHD_USE_STATIC_CTRLBUF
4825 /* If event bufs are allocated via static buf pool
4826 * and wl events are enabled, make a copy, free the
4827 * local one and send the copy up.
4828 */
4829 void *npkt = PKTDUP(dhdp->osh, skb);
4830 /* Clone event and send it up */
4831 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4832 if (npkt) {
4833 skb = npkt;
4834 } else {
4835 DHD_ERROR(("skb clone failed. dropping event.\n"));
4836 continue;
4837 }
4838 #endif /* DHD_USE_STATIC_CTRLBUF */
4839 } else {
4840 /* If event enabled not explictly set, drop events */
4841 #ifdef DHD_USE_STATIC_CTRLBUF
4842 PKTFREE_STATIC(dhdp->osh, pktbuf, FALSE);
4843 #else
4844 PKTFREE(dhdp->osh, pktbuf, FALSE);
4845 #endif /* DHD_USE_STATIC_CTRLBUF */
4846 continue;
4847 }
4848 } else {
4849 #if (defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX))
4850 tout_rx = DHD_PACKET_TIMEOUT_MS;
4851 #endif /* OEM_ANDROID || OEM_EMBEDDED_LINUX */
4852
4853 #ifdef PROP_TXSTATUS
4854 dhd_wlfc_save_rxpath_ac_time(dhdp, (uint8)PKTPRIO(skb));
4855 #endif /* PROP_TXSTATUS */
4856
4857 #ifdef DHD_WAKE_STATUS
4858 if (unlikely(pkt_wake)) {
4859 wcp->rxwake++;
4860 #ifdef DHD_WAKE_RX_STATUS
4861 #define ETHER_ICMP6_HEADER 20
4862 #define ETHER_IPV6_SADDR (ETHER_ICMP6_HEADER + 2)
4863 #define ETHER_IPV6_DAADR (ETHER_IPV6_SADDR + IPV6_ADDR_LEN)
4864 #define ETHER_ICMPV6_TYPE (ETHER_IPV6_DAADR + IPV6_ADDR_LEN)
4865
4866 if (ntoh16(skb->protocol) == ETHER_TYPE_ARP) /* ARP */
4867 wcp->rx_arp++;
4868 if (dump_data[0] == 0xFF) { /* Broadcast */
4869 wcp->rx_bcast++;
4870 } else if (dump_data[0] & 0x01) { /* Multicast */
4871 wcp->rx_mcast++;
4872 if (ntoh16(skb->protocol) == ETHER_TYPE_IPV6) {
4873 wcp->rx_multi_ipv6++;
4874 if ((skb->len > ETHER_ICMP6_HEADER) &&
4875 (dump_data[ETHER_ICMP6_HEADER] == IPPROTO_ICMPV6)) {
4876 wcp->rx_icmpv6++;
4877 if (skb->len > ETHER_ICMPV6_TYPE) {
4878 switch (dump_data[ETHER_ICMPV6_TYPE]) {
4879 case NDISC_ROUTER_ADVERTISEMENT:
4880 wcp->rx_icmpv6_ra++;
4881 break;
4882 case NDISC_NEIGHBOUR_ADVERTISEMENT:
4883 wcp->rx_icmpv6_na++;
4884 break;
4885 case NDISC_NEIGHBOUR_SOLICITATION:
4886 wcp->rx_icmpv6_ns++;
4887 break;
4888 }
4889 }
4890 }
4891 } else if (dump_data[2] == 0x5E) {
4892 wcp->rx_multi_ipv4++;
4893 } else {
4894 wcp->rx_multi_other++;
4895 }
4896 } else { /* Unicast */
4897 wcp->rx_ucast++;
4898 }
4899 #undef ETHER_ICMP6_HEADER
4900 #undef ETHER_IPV6_SADDR
4901 #undef ETHER_IPV6_DAADR
4902 #undef ETHER_ICMPV6_TYPE
4903 #endif /* DHD_WAKE_RX_STATUS */
4904 pkt_wake = 0;
4905 }
4906 #endif /* DHD_WAKE_STATUS */
4907 }
4908
4909 #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
4910 ifp->net->last_rx = jiffies;
4911 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0) */
4912
4913 if (ntoh16(skb->protocol) != ETHER_TYPE_BRCM) {
4914 dhdp->dstats.rx_bytes += skb->len;
4915 dhdp->rx_packets++; /* Local count */
4916 ifp->stats.rx_bytes += skb->len;
4917 ifp->stats.rx_packets++;
4918 }
4919
4920 if (in_interrupt()) {
4921 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
4922 __FUNCTION__, __LINE__);
4923 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4924 #if defined(DHD_LB_RXP)
4925 netif_receive_skb(skb);
4926 #else /* !defined(DHD_LB_RXP) */
4927 netif_rx(skb);
4928 #endif /* !defined(DHD_LB_RXP) */
4929 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4930 } else {
4931 if (dhd->rxthread_enabled) {
4932 if (!skbhead)
4933 skbhead = skb;
4934 else
4935 PKTSETNEXT(dhdp->osh, skbprev, skb);
4936 skbprev = skb;
4937 } else {
4938
4939 /* If the receive is not processed inside an ISR,
4940 * the softirqd must be woken explicitly to service
4941 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
4942 * by netif_rx_ni(), but in earlier kernels, we need
4943 * to do it manually.
4944 */
4945 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
4946 __FUNCTION__, __LINE__);
4947
4948 #if defined(ARGOS_NOTIFY_CB)
4949 argos_register_notifier_deinit();
4950 #endif // endif
4951 #if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
4952 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
4953 #endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
4954 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4955 #if defined(DHD_LB_RXP)
4956 netif_receive_skb(skb);
4957 #else /* !defined(DHD_LB_RXP) */
4958 netif_rx_ni(skb);
4959 #endif /* defined(DHD_LB_RXP) */
4960 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
4961 }
4962 }
4963 }
4964
4965 if (dhd->rxthread_enabled && skbhead)
4966 dhd_sched_rxf(dhdp, skbhead);
4967
4968 #if (defined(OEM_ANDROID) || defined(OEM_EMBEDDED_LINUX))
4969 DHD_OS_WAKE_LOCK_RX_TIMEOUT_ENABLE(dhdp, tout_rx);
4970 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(dhdp, tout_ctrl);
4971 #endif /* OEM_ANDROID || OEM_EMBEDDED_LINUX */
4972 }
4973
4974 void
4975 dhd_event(struct dhd_info *dhd, char *evpkt, int evlen, int ifidx)
4976 {
4977 /* Linux version has nothing to do */
4978 return;
4979 }
4980
4981 void
4982 dhd_txcomplete(dhd_pub_t *dhdp, void *txp, bool success)
4983 {
4984 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
4985 struct ether_header *eh;
4986 uint16 type;
4987
4988 dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
4989
4990 eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
4991 type = ntoh16(eh->ether_type);
4992
4993 if (type == ETHER_TYPE_802_1X) {
4994 atomic_dec(&dhd->pend_8021x_cnt);
4995 }
4996
4997 #ifdef PROP_TXSTATUS
4998 if (dhdp->wlfc_state && (dhdp->proptxstatus_mode != WLFC_FCMODE_NONE)) {
4999 dhd_if_t *ifp = dhd->iflist[DHD_PKTTAG_IF(PKTTAG(txp))];
5000 uint datalen = PKTLEN(dhd->pub.osh, txp);
5001 if (ifp != NULL) {
5002 if (success) {
5003 dhd->pub.tx_packets++;
5004 ifp->stats.tx_packets++;
5005 ifp->stats.tx_bytes += datalen;
5006 } else {
5007 ifp->stats.tx_dropped++;
5008 }
5009 }
5010 }
5011 #endif // endif
5012 }
5013
5014 static struct net_device_stats *
5015 dhd_get_stats(struct net_device *net)
5016 {
5017 dhd_info_t *dhd = DHD_DEV_INFO(net);
5018 dhd_if_t *ifp;
5019
5020 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5021
5022 if (!dhd) {
5023 DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
5024 goto error;
5025 }
5026
5027 ifp = dhd_get_ifp_by_ndev(&dhd->pub, net);
5028 if (!ifp) {
5029 /* return empty stats */
5030 DHD_ERROR(("%s: BAD_IF\n", __FUNCTION__));
5031 goto error;
5032 }
5033
5034 if (dhd->pub.up) {
5035 /* Use the protocol to get dongle stats */
5036 dhd_prot_dstats(&dhd->pub);
5037 }
5038 return &ifp->stats;
5039
5040 error:
5041 memset(&net->stats, 0, sizeof(net->stats));
5042 return &net->stats;
5043 }
5044
5045 static int
5046 dhd_watchdog_thread(void *data)
5047 {
5048 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
5049 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
5050 /* This thread doesn't need any user-level access,
5051 * so get rid of all our resources
5052 */
5053 if (dhd_watchdog_prio > 0) {
5054 struct sched_param param;
5055 param.sched_priority = (dhd_watchdog_prio < MAX_RT_PRIO)?
5056 dhd_watchdog_prio:(MAX_RT_PRIO-1);
5057 setScheduler(current, SCHED_FIFO, ¶m);
5058 }
5059
5060 while (1) {
5061 if (down_interruptible (&tsk->sema) == 0) {
5062 unsigned long flags;
5063 unsigned long jiffies_at_start = jiffies;
5064 unsigned long time_lapse;
5065 #ifdef BCMPCIE
5066 DHD_OS_WD_WAKE_LOCK(&dhd->pub);
5067 #endif /* BCMPCIE */
5068
5069 SMP_RD_BARRIER_DEPENDS();
5070 if (tsk->terminated) {
5071 #ifdef BCMPCIE
5072 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
5073 #endif /* BCMPCIE */
5074 break;
5075 }
5076
5077 if (dhd->pub.dongle_reset == FALSE) {
5078 DHD_TIMER(("%s:\n", __FUNCTION__));
5079 dhd_bus_watchdog(&dhd->pub);
5080
5081 DHD_GENERAL_LOCK(&dhd->pub, flags);
5082 /* Count the tick for reference */
5083 dhd->pub.tickcnt++;
5084 #ifdef DHD_L2_FILTER
5085 dhd_l2_filter_watchdog(&dhd->pub);
5086 #endif /* DHD_L2_FILTER */
5087 time_lapse = jiffies - jiffies_at_start;
5088
5089 /* Reschedule the watchdog */
5090 if (dhd->wd_timer_valid) {
5091 mod_timer(&dhd->timer,
5092 jiffies +
5093 msecs_to_jiffies(dhd_watchdog_ms) -
5094 min(msecs_to_jiffies(dhd_watchdog_ms), time_lapse));
5095 }
5096 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5097 }
5098 #ifdef BCMPCIE
5099 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
5100 #endif /* BCMPCIE */
5101 } else {
5102 break;
5103 }
5104 }
5105
5106 complete_and_exit(&tsk->completed, 0);
5107 }
5108
5109 static void dhd_watchdog(ulong data)
5110 {
5111 dhd_info_t *dhd = (dhd_info_t *)data;
5112 unsigned long flags;
5113
5114 if (dhd->pub.dongle_reset) {
5115 return;
5116 }
5117
5118 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
5119 up(&dhd->thr_wdt_ctl.sema);
5120 return;
5121 }
5122
5123 #ifdef BCMPCIE
5124 DHD_OS_WD_WAKE_LOCK(&dhd->pub);
5125 #endif /* BCMPCIE */
5126 /* Call the bus module watchdog */
5127 dhd_bus_watchdog(&dhd->pub);
5128
5129 DHD_GENERAL_LOCK(&dhd->pub, flags);
5130 /* Count the tick for reference */
5131 dhd->pub.tickcnt++;
5132
5133 #ifdef DHD_L2_FILTER
5134 dhd_l2_filter_watchdog(&dhd->pub);
5135 #endif /* DHD_L2_FILTER */
5136 /* Reschedule the watchdog */
5137 if (dhd->wd_timer_valid)
5138 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
5139 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5140 #ifdef BCMPCIE
5141 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
5142 #endif /* BCMPCIE */
5143 }
5144
5145 #ifdef DHD_PCIE_RUNTIMEPM
5146 static int
5147 dhd_rpm_state_thread(void *data)
5148 {
5149 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
5150 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
5151
5152 while (1) {
5153 if (down_interruptible (&tsk->sema) == 0) {
5154 unsigned long flags;
5155 unsigned long jiffies_at_start = jiffies;
5156 unsigned long time_lapse;
5157
5158 SMP_RD_BARRIER_DEPENDS();
5159 if (tsk->terminated) {
5160 break;
5161 }
5162
5163 if (dhd->pub.dongle_reset == FALSE) {
5164 DHD_TIMER(("%s:\n", __FUNCTION__));
5165 if (dhd->pub.up) {
5166 dhd_runtimepm_state(&dhd->pub);
5167 }
5168
5169 DHD_GENERAL_LOCK(&dhd->pub, flags);
5170 time_lapse = jiffies - jiffies_at_start;
5171
5172 /* Reschedule the watchdog */
5173 if (dhd->rpm_timer_valid) {
5174 mod_timer(&dhd->rpm_timer,
5175 jiffies +
5176 msecs_to_jiffies(dhd_runtimepm_ms) -
5177 min(msecs_to_jiffies(dhd_runtimepm_ms),
5178 time_lapse));
5179 }
5180 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
5181 }
5182 } else {
5183 break;
5184 }
5185 }
5186
5187 complete_and_exit(&tsk->completed, 0);
5188 }
5189
5190 static void dhd_runtimepm(ulong data)
5191 {
5192 dhd_info_t *dhd = (dhd_info_t *)data;
5193
5194 if (dhd->pub.dongle_reset) {
5195 return;
5196 }
5197
5198 if (dhd->thr_rpm_ctl.thr_pid >= 0) {
5199 up(&dhd->thr_rpm_ctl.sema);
5200 return;
5201 }
5202 }
5203
5204 void dhd_runtime_pm_disable(dhd_pub_t *dhdp)
5205 {
5206 dhd_os_runtimepm_timer(dhdp, 0);
5207 dhdpcie_runtime_bus_wake(dhdp, CAN_SLEEP(), __builtin_return_address(0));
5208 }
5209
5210 void dhd_runtime_pm_enable(dhd_pub_t *dhdp)
5211 {
5212 /* Enable Runtime PM except for MFG Mode */
5213 if (!(dhdp->op_mode & DHD_FLAG_MFG_MODE)) {
5214 if (dhd_get_idletime(dhdp)) {
5215 dhd_os_runtimepm_timer(dhdp, dhd_runtimepm_ms);
5216 }
5217 }
5218 }
5219
5220 #endif /* DHD_PCIE_RUNTIMEPM */
5221
5222 #ifdef ENABLE_ADAPTIVE_SCHED
5223 static void
5224 dhd_sched_policy(int prio)
5225 {
5226 struct sched_param param;
5227 if (cpufreq_quick_get(0) <= CUSTOM_CPUFREQ_THRESH) {
5228 param.sched_priority = 0;
5229 setScheduler(current, SCHED_NORMAL, ¶m);
5230 } else {
5231 if (get_scheduler_policy(current) != SCHED_FIFO) {
5232 param.sched_priority = (prio < MAX_RT_PRIO)? prio : (MAX_RT_PRIO-1);
5233 setScheduler(current, SCHED_FIFO, ¶m);
5234 }
5235 }
5236 }
5237 #endif /* ENABLE_ADAPTIVE_SCHED */
5238 #ifdef DEBUG_CPU_FREQ
5239 static int dhd_cpufreq_notifier(struct notifier_block *nb, unsigned long val, void *data)
5240 {
5241 dhd_info_t *dhd = container_of(nb, struct dhd_info, freq_trans);
5242 struct cpufreq_freqs *freq = data;
5243 if (dhd) {
5244 if (!dhd->new_freq)
5245 goto exit;
5246 if (val == CPUFREQ_POSTCHANGE) {
5247 DHD_ERROR(("cpu freq is changed to %u kHZ on CPU %d\n",
5248 freq->new, freq->cpu));
5249 *per_cpu_ptr(dhd->new_freq, freq->cpu) = freq->new;
5250 }
5251 }
5252 exit:
5253 return 0;
5254 }
5255 #endif /* DEBUG_CPU_FREQ */
5256 static int
5257 dhd_dpc_thread(void *data)
5258 {
5259 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
5260 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
5261
5262 /* This thread doesn't need any user-level access,
5263 * so get rid of all our resources
5264 */
5265 if (dhd_dpc_prio > 0)
5266 {
5267 struct sched_param param;
5268 param.sched_priority = (dhd_dpc_prio < MAX_RT_PRIO)?dhd_dpc_prio:(MAX_RT_PRIO-1);
5269 setScheduler(current, SCHED_FIFO, ¶m);
5270 }
5271
5272 #ifdef CUSTOM_DPC_CPUCORE
5273 set_cpus_allowed_ptr(current, cpumask_of(CUSTOM_DPC_CPUCORE));
5274 #endif // endif
5275 #ifdef CUSTOM_SET_CPUCORE
5276 dhd->pub.current_dpc = current;
5277 #endif /* CUSTOM_SET_CPUCORE */
5278 /* Run until signal received */
5279 while (1) {
5280 if (!binary_sema_down(tsk)) {
5281 #ifdef ENABLE_ADAPTIVE_SCHED
5282 dhd_sched_policy(dhd_dpc_prio);
5283 #endif /* ENABLE_ADAPTIVE_SCHED */
5284 SMP_RD_BARRIER_DEPENDS();
5285 if (tsk->terminated) {
5286 break;
5287 }
5288
5289 /* Call bus dpc unless it indicated down (then clean stop) */
5290 if (dhd->pub.busstate != DHD_BUS_DOWN) {
5291 #ifdef DEBUG_DPC_THREAD_WATCHDOG
5292 int resched_cnt = 0;
5293 #endif /* DEBUG_DPC_THREAD_WATCHDOG */
5294 dhd_os_wd_timer_extend(&dhd->pub, TRUE);
5295 while (dhd_bus_dpc(dhd->pub.bus)) {
5296 /* process all data */
5297 #ifdef DEBUG_DPC_THREAD_WATCHDOG
5298 resched_cnt++;
5299 if (resched_cnt > MAX_RESCHED_CNT) {
5300 DHD_INFO(("%s Calling msleep to"
5301 "let other processes run. \n",
5302 __FUNCTION__));
5303 dhd->pub.dhd_bug_on = true;
5304 resched_cnt = 0;
5305 OSL_SLEEP(1);
5306 }
5307 #endif /* DEBUG_DPC_THREAD_WATCHDOG */
5308 }
5309 dhd_os_wd_timer_extend(&dhd->pub, FALSE);
5310 DHD_OS_WAKE_UNLOCK(&dhd->pub);
5311 } else {
5312 if (dhd->pub.up)
5313 dhd_bus_stop(dhd->pub.bus, TRUE);
5314 DHD_OS_WAKE_UNLOCK(&dhd->pub);
5315 }
5316 } else {
5317 break;
5318 }
5319 }
5320 complete_and_exit(&tsk->completed, 0);
5321 }
5322
5323 static int
5324 dhd_rxf_thread(void *data)
5325 {
5326 tsk_ctl_t *tsk = (tsk_ctl_t *)data;
5327 dhd_info_t *dhd = (dhd_info_t *)tsk->parent;
5328 #if defined(WAIT_DEQUEUE)
5329 #define RXF_WATCHDOG_TIME 250 /* BARK_TIME(1000) / */
5330 ulong watchdogTime = OSL_SYSUPTIME(); /* msec */
5331 #endif // endif
5332 dhd_pub_t *pub = &dhd->pub;
5333
5334 /* This thread doesn't need any user-level access,
5335 * so get rid of all our resources
5336 */
5337 if (dhd_rxf_prio > 0)
5338 {
5339 struct sched_param param;
5340 param.sched_priority = (dhd_rxf_prio < MAX_RT_PRIO)?dhd_rxf_prio:(MAX_RT_PRIO-1);
5341 setScheduler(current, SCHED_FIFO, ¶m);
5342 }
5343
5344 #ifdef CUSTOM_SET_CPUCORE
5345 dhd->pub.current_rxf = current;
5346 #endif /* CUSTOM_SET_CPUCORE */
5347 /* Run until signal received */
5348 while (1) {
5349 if (down_interruptible(&tsk->sema) == 0) {
5350 void *skb;
5351 #ifdef ENABLE_ADAPTIVE_SCHED
5352 dhd_sched_policy(dhd_rxf_prio);
5353 #endif /* ENABLE_ADAPTIVE_SCHED */
5354
5355 SMP_RD_BARRIER_DEPENDS();
5356
5357 if (tsk->terminated) {
5358 break;
5359 }
5360 skb = dhd_rxf_dequeue(pub);
5361
5362 if (skb == NULL) {
5363 continue;
5364 }
5365 while (skb) {
5366 void *skbnext = PKTNEXT(pub->osh, skb);
5367 PKTSETNEXT(pub->osh, skb, NULL);
5368 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
5369 __FUNCTION__, __LINE__);
5370 netif_rx_ni(skb);
5371 skb = skbnext;
5372 }
5373 #if defined(WAIT_DEQUEUE)
5374 if (OSL_SYSUPTIME() - watchdogTime > RXF_WATCHDOG_TIME) {
5375 OSL_SLEEP(1);
5376 watchdogTime = OSL_SYSUPTIME();
5377 }
5378 #endif // endif
5379
5380 DHD_OS_WAKE_UNLOCK(pub);
5381 } else {
5382 break;
5383 }
5384 }
5385 complete_and_exit(&tsk->completed, 0);
5386 }
5387
5388 #ifdef BCMPCIE
5389 void dhd_dpc_enable(dhd_pub_t *dhdp)
5390 {
5391 #if defined(DHD_LB_RXP) || defined(DHD_LB_TXP)
5392 dhd_info_t *dhd;
5393
5394 if (!dhdp || !dhdp->info)
5395 return;
5396 dhd = dhdp->info;
5397 #endif /* DHD_LB_RXP || DHD_LB_TXP */
5398
5399 #ifdef DHD_LB_RXP
5400 __skb_queue_head_init(&dhd->rx_pend_queue);
5401 #endif /* DHD_LB_RXP */
5402
5403 #ifdef DHD_LB_TXP
5404 skb_queue_head_init(&dhd->tx_pend_queue);
5405 #endif /* DHD_LB_TXP */
5406 }
5407 #endif /* BCMPCIE */
5408
5409 #ifdef BCMPCIE
5410 void
5411 dhd_dpc_kill(dhd_pub_t *dhdp)
5412 {
5413 dhd_info_t *dhd;
5414
5415 if (!dhdp) {
5416 return;
5417 }
5418
5419 dhd = dhdp->info;
5420
5421 if (!dhd) {
5422 return;
5423 }
5424
5425 if (dhd->thr_dpc_ctl.thr_pid < 0) {
5426 tasklet_kill(&dhd->tasklet);
5427 DHD_ERROR(("%s: tasklet disabled\n", __FUNCTION__));
5428 }
5429
5430 #ifdef DHD_LB
5431 #ifdef DHD_LB_RXP
5432 cancel_work_sync(&dhd->rx_napi_dispatcher_work);
5433 __skb_queue_purge(&dhd->rx_pend_queue);
5434 #endif /* DHD_LB_RXP */
5435 #ifdef DHD_LB_TXP
5436 cancel_work_sync(&dhd->tx_dispatcher_work);
5437 skb_queue_purge(&dhd->tx_pend_queue);
5438 #endif /* DHD_LB_TXP */
5439
5440 /* Kill the Load Balancing Tasklets */
5441 #if defined(DHD_LB_TXC)
5442 tasklet_kill(&dhd->tx_compl_tasklet);
5443 #endif /* DHD_LB_TXC */
5444 #if defined(DHD_LB_RXC)
5445 tasklet_kill(&dhd->rx_compl_tasklet);
5446 #endif /* DHD_LB_RXC */
5447 #if defined(DHD_LB_TXP)
5448 tasklet_kill(&dhd->tx_tasklet);
5449 #endif /* DHD_LB_TXP */
5450 #endif /* DHD_LB */
5451 }
5452
5453 void
5454 dhd_dpc_tasklet_kill(dhd_pub_t *dhdp)
5455 {
5456 dhd_info_t *dhd;
5457
5458 if (!dhdp) {
5459 return;
5460 }
5461
5462 dhd = dhdp->info;
5463
5464 if (!dhd) {
5465 return;
5466 }
5467
5468 if (dhd->thr_dpc_ctl.thr_pid < 0) {
5469 tasklet_kill(&dhd->tasklet);
5470 }
5471 }
5472 #endif /* BCMPCIE */
5473
5474 static void
5475 dhd_dpc(ulong data)
5476 {
5477 dhd_info_t *dhd;
5478
5479 dhd = (dhd_info_t *)data;
5480
5481 /* this (tasklet) can be scheduled in dhd_sched_dpc[dhd_linux.c]
5482 * down below , wake lock is set,
5483 * the tasklet is initialized in dhd_attach()
5484 */
5485 /* Call bus dpc unless it indicated down (then clean stop) */
5486 if (dhd->pub.busstate != DHD_BUS_DOWN) {
5487 #if defined(DHD_LB_STATS) && defined(PCIE_FULL_DONGLE)
5488 DHD_LB_STATS_INCR(dhd->dhd_dpc_cnt);
5489 #endif /* DHD_LB_STATS && PCIE_FULL_DONGLE */
5490 if (dhd_bus_dpc(dhd->pub.bus)) {
5491 tasklet_schedule(&dhd->tasklet);
5492 }
5493 } else {
5494 dhd_bus_stop(dhd->pub.bus, TRUE);
5495 }
5496 }
5497
5498 void
5499 dhd_sched_dpc(dhd_pub_t *dhdp)
5500 {
5501 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5502
5503 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
5504 DHD_OS_WAKE_LOCK(dhdp);
5505 /* If the semaphore does not get up,
5506 * wake unlock should be done here
5507 */
5508 if (!binary_sema_up(&dhd->thr_dpc_ctl)) {
5509 DHD_OS_WAKE_UNLOCK(dhdp);
5510 }
5511 return;
5512 } else {
5513 dhd_bus_set_dpc_sched_time(dhdp);
5514 tasklet_schedule(&dhd->tasklet);
5515 }
5516 }
5517
5518 static void
5519 dhd_sched_rxf(dhd_pub_t *dhdp, void *skb)
5520 {
5521 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5522
5523 DHD_OS_WAKE_LOCK(dhdp);
5524
5525 DHD_TRACE(("dhd_sched_rxf: Enter\n"));
5526 do {
5527 if (dhd_rxf_enqueue(dhdp, skb) == BCME_OK)
5528 break;
5529 } while (1);
5530 if (dhd->thr_rxf_ctl.thr_pid >= 0) {
5531 up(&dhd->thr_rxf_ctl.sema);
5532 }
5533 return;
5534 }
5535
5536 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
5537 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
5538
5539 #ifdef TOE
5540 /* Retrieve current toe component enables, which are kept as a bitmap in toe_ol iovar */
5541 static int
5542 dhd_toe_get(dhd_info_t *dhd, int ifidx, uint32 *toe_ol)
5543 {
5544 char buf[32];
5545 int ret;
5546
5547 ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
5548
5549 if (ret < 0) {
5550 if (ret == -EIO) {
5551 DHD_ERROR(("%s: toe not supported by device\n", dhd_ifname(&dhd->pub,
5552 ifidx)));
5553 return -EOPNOTSUPP;
5554 }
5555
5556 DHD_INFO(("%s: could not get toe_ol: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
5557 return ret;
5558 }
5559
5560 memcpy(toe_ol, buf, sizeof(uint32));
5561 return 0;
5562 }
5563
5564 /* Set current toe component enables in toe_ol iovar, and set toe global enable iovar */
5565 static int
5566 dhd_toe_set(dhd_info_t *dhd, int ifidx, uint32 toe_ol)
5567 {
5568 int toe, ret;
5569
5570 /* Set toe_ol as requested */
5571 ret = dhd_iovar(&dhd->pub, ifidx, "toe_ol", (char *)&toe_ol, sizeof(toe_ol), NULL, 0, TRUE);
5572 if (ret < 0) {
5573 DHD_ERROR(("%s: could not set toe_ol: ret=%d\n",
5574 dhd_ifname(&dhd->pub, ifidx), ret));
5575 return ret;
5576 }
5577
5578 /* Enable toe globally only if any components are enabled. */
5579 toe = (toe_ol != 0);
5580 ret = dhd_iovar(&dhd->pub, ifidx, "toe", (char *)&toe, sizeof(toe), NULL, 0, TRUE);
5581 if (ret < 0) {
5582 DHD_ERROR(("%s: could not set toe: ret=%d\n", dhd_ifname(&dhd->pub, ifidx), ret));
5583 return ret;
5584 }
5585
5586 return 0;
5587 }
5588 #endif /* TOE */
5589
5590 #if defined(WL_CFG80211) && defined(NUM_SCB_MAX_PROBE)
5591 void dhd_set_scb_probe(dhd_pub_t *dhd)
5592 {
5593 wl_scb_probe_t scb_probe;
5594 char iovbuf[WL_EVENTING_MASK_LEN + sizeof(wl_scb_probe_t)];
5595 int ret;
5596
5597 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
5598 return;
5599 }
5600
5601 ret = dhd_iovar(dhd, 0, "scb_probe", NULL, 0, iovbuf, sizeof(iovbuf), FALSE);
5602 if (ret < 0) {
5603 DHD_ERROR(("%s: GET max_scb_probe failed\n", __FUNCTION__));
5604 }
5605
5606 memcpy(&scb_probe, iovbuf, sizeof(wl_scb_probe_t));
5607
5608 scb_probe.scb_max_probe = NUM_SCB_MAX_PROBE;
5609
5610 ret = dhd_iovar(dhd, 0, "scb_probe", (char *)&scb_probe, sizeof(wl_scb_probe_t), NULL, 0,
5611 TRUE);
5612 if (ret < 0) {
5613 DHD_ERROR(("%s: max_scb_probe setting failed\n", __FUNCTION__));
5614 return;
5615 }
5616 }
5617 #endif /* WL_CFG80211 && NUM_SCB_MAX_PROBE */
5618
5619 static void
5620 dhd_ethtool_get_drvinfo(struct net_device *net, struct ethtool_drvinfo *info)
5621 {
5622 dhd_info_t *dhd = DHD_DEV_INFO(net);
5623
5624 snprintf(info->driver, sizeof(info->driver), "wl");
5625 snprintf(info->version, sizeof(info->version), "%lu", dhd->pub.drv_version);
5626 }
5627
5628 struct ethtool_ops dhd_ethtool_ops = {
5629 .get_drvinfo = dhd_ethtool_get_drvinfo
5630 };
5631
5632 static int
5633 dhd_ethtool(dhd_info_t *dhd, void *uaddr)
5634 {
5635 struct ethtool_drvinfo info;
5636 char drvname[sizeof(info.driver)];
5637 uint32 cmd;
5638 #ifdef TOE
5639 struct ethtool_value edata;
5640 uint32 toe_cmpnt, csum_dir;
5641 int ret;
5642 #endif // endif
5643
5644 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
5645
5646 /* all ethtool calls start with a cmd word */
5647 if (copy_from_user(&cmd, uaddr, sizeof (uint32)))
5648 return -EFAULT;
5649
5650 switch (cmd) {
5651 case ETHTOOL_GDRVINFO:
5652 /* Copy out any request driver name */
5653 if (copy_from_user(&info, uaddr, sizeof(info)))
5654 return -EFAULT;
5655 strncpy(drvname, info.driver, sizeof(info.driver));
5656 drvname[sizeof(info.driver)-1] = '\0';
5657
5658 /* clear struct for return */
5659 memset(&info, 0, sizeof(info));
5660 info.cmd = cmd;
5661
5662 /* if dhd requested, identify ourselves */
5663 if (strcmp(drvname, "?dhd") == 0) {
5664 snprintf(info.driver, sizeof(info.driver), "dhd");
5665 strncpy(info.version, EPI_VERSION_STR, sizeof(info.version) - 1);
5666 info.version[sizeof(info.version) - 1] = '\0';
5667 }
5668
5669 /* otherwise, require dongle to be up */
5670 else if (!dhd->pub.up) {
5671 DHD_ERROR(("%s: dongle is not up\n", __FUNCTION__));
5672 return -ENODEV;
5673 }
5674
5675 /* finally, report dongle driver type */
5676 else if (dhd->pub.iswl)
5677 snprintf(info.driver, sizeof(info.driver), "wl");
5678 else
5679 snprintf(info.driver, sizeof(info.driver), "xx");
5680
5681 snprintf(info.version, sizeof(info.version), "%lu", dhd->pub.drv_version);
5682 if (copy_to_user(uaddr, &info, sizeof(info)))
5683 return -EFAULT;
5684 DHD_CTL(("%s: given %*s, returning %s\n", __FUNCTION__,
5685 (int)sizeof(drvname), drvname, info.driver));
5686 break;
5687
5688 #ifdef TOE
5689 /* Get toe offload components from dongle */
5690 case ETHTOOL_GRXCSUM:
5691 case ETHTOOL_GTXCSUM:
5692 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
5693 return ret;
5694
5695 csum_dir = (cmd == ETHTOOL_GTXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
5696
5697 edata.cmd = cmd;
5698 edata.data = (toe_cmpnt & csum_dir) ? 1 : 0;
5699
5700 if (copy_to_user(uaddr, &edata, sizeof(edata)))
5701 return -EFAULT;
5702 break;
5703
5704 /* Set toe offload components in dongle */
5705 case ETHTOOL_SRXCSUM:
5706 case ETHTOOL_STXCSUM:
5707 if (copy_from_user(&edata, uaddr, sizeof(edata)))
5708 return -EFAULT;
5709
5710 /* Read the current settings, update and write back */
5711 if ((ret = dhd_toe_get(dhd, 0, &toe_cmpnt)) < 0)
5712 return ret;
5713
5714 csum_dir = (cmd == ETHTOOL_STXCSUM) ? TOE_TX_CSUM_OL : TOE_RX_CSUM_OL;
5715
5716 if (edata.data != 0)
5717 toe_cmpnt |= csum_dir;
5718 else
5719 toe_cmpnt &= ~csum_dir;
5720
5721 if ((ret = dhd_toe_set(dhd, 0, toe_cmpnt)) < 0)
5722 return ret;
5723
5724 /* If setting TX checksum mode, tell Linux the new mode */
5725 if (cmd == ETHTOOL_STXCSUM) {
5726 if (edata.data)
5727 dhd->iflist[0]->net->features |= NETIF_F_IP_CSUM;
5728 else
5729 dhd->iflist[0]->net->features &= ~NETIF_F_IP_CSUM;
5730 }
5731
5732 break;
5733 #endif /* TOE */
5734
5735 default:
5736 return -EOPNOTSUPP;
5737 }
5738
5739 return 0;
5740 }
5741
5742 static bool dhd_check_hang(struct net_device *net, dhd_pub_t *dhdp, int error)
5743 {
5744 #if defined(OEM_ANDROID)
5745 if (!dhdp) {
5746 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
5747 return FALSE;
5748 }
5749
5750 if (!dhdp->up)
5751 return FALSE;
5752
5753 #if !defined(BCMPCIE)
5754 if (dhdp->info->thr_dpc_ctl.thr_pid < 0) {
5755 DHD_ERROR(("%s : skipped due to negative pid - unloading?\n", __FUNCTION__));
5756 return FALSE;
5757 }
5758 #endif // endif
5759
5760 if ((error == -ETIMEDOUT) || (error == -EREMOTEIO) ||
5761 ((dhdp->busstate == DHD_BUS_DOWN) && (!dhdp->dongle_reset))) {
5762 #ifdef BCMPCIE
5763 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d d3acke=%d e=%d s=%d\n",
5764 __FUNCTION__, dhdp->rxcnt_timeout, dhdp->txcnt_timeout,
5765 dhdp->d3ackcnt_timeout, error, dhdp->busstate));
5766 #else
5767 DHD_ERROR(("%s: Event HANG send up due to re=%d te=%d e=%d s=%d\n", __FUNCTION__,
5768 dhdp->rxcnt_timeout, dhdp->txcnt_timeout, error, dhdp->busstate));
5769 #endif /* BCMPCIE */
5770 if (dhdp->hang_reason == 0) {
5771 if (dhdp->dongle_trap_occured) {
5772 dhdp->hang_reason = HANG_REASON_DONGLE_TRAP;
5773 #ifdef BCMPCIE
5774 } else if (dhdp->d3ackcnt_timeout) {
5775 dhdp->hang_reason = dhdp->is_sched_error ?
5776 HANG_REASON_D3_ACK_TIMEOUT_SCHED_ERROR :
5777 HANG_REASON_D3_ACK_TIMEOUT;
5778 #endif /* BCMPCIE */
5779 } else {
5780 dhdp->hang_reason = dhdp->is_sched_error ?
5781 HANG_REASON_IOCTL_RESP_TIMEOUT_SCHED_ERROR :
5782 HANG_REASON_IOCTL_RESP_TIMEOUT;
5783 }
5784 }
5785 net_os_send_hang_message(net);
5786 return TRUE;
5787 }
5788 #endif /* OEM_ANDROID */
5789 return FALSE;
5790 }
5791
5792 #ifdef WL_MONITOR
5793 bool
5794 dhd_monitor_enabled(dhd_pub_t *dhd, int ifidx)
5795 {
5796 return (dhd->info->monitor_type != 0);
5797 }
5798
5799 void
5800 dhd_rx_mon_pkt(dhd_pub_t *dhdp, host_rxbuf_cmpl_t* msg, void *pkt, int ifidx)
5801 {
5802 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
5803 {
5804 uint8 amsdu_flag = (msg->flags & BCMPCIE_PKT_FLAGS_MONITOR_MASK) >>
5805 BCMPCIE_PKT_FLAGS_MONITOR_SHIFT;
5806 switch (amsdu_flag) {
5807 case BCMPCIE_PKT_FLAGS_MONITOR_NO_AMSDU:
5808 default:
5809 if (!dhd->monitor_skb) {
5810 if ((dhd->monitor_skb = PKTTONATIVE(dhdp->osh, pkt))
5811 == NULL)
5812 return;
5813 }
5814 if (dhd->monitor_type && dhd->monitor_dev)
5815 dhd->monitor_skb->dev = dhd->monitor_dev;
5816 else {
5817 PKTFREE(dhdp->osh, pkt, FALSE);
5818 dhd->monitor_skb = NULL;
5819 return;
5820 }
5821 dhd->monitor_skb->protocol =
5822 eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
5823 dhd->monitor_len = 0;
5824 break;
5825
5826 case BCMPCIE_PKT_FLAGS_MONITOR_FIRST_PKT:
5827 if (!dhd->monitor_skb) {
5828 if ((dhd->monitor_skb = dev_alloc_skb(MAX_MON_PKT_SIZE))
5829 == NULL)
5830 return;
5831 dhd->monitor_len = 0;
5832 }
5833 if (dhd->monitor_type && dhd->monitor_dev)
5834 dhd->monitor_skb->dev = dhd->monitor_dev;
5835 else {
5836 PKTFREE(dhdp->osh, pkt, FALSE);
5837 dev_kfree_skb(dhd->monitor_skb);
5838 return;
5839 }
5840 memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb),
5841 PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
5842 dhd->monitor_len = PKTLEN(dhdp->osh, pkt);
5843 PKTFREE(dhdp->osh, pkt, FALSE);
5844 return;
5845
5846 case BCMPCIE_PKT_FLAGS_MONITOR_INTER_PKT:
5847 memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len,
5848 PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
5849 dhd->monitor_len += PKTLEN(dhdp->osh, pkt);
5850 PKTFREE(dhdp->osh, pkt, FALSE);
5851 return;
5852
5853 case BCMPCIE_PKT_FLAGS_MONITOR_LAST_PKT:
5854 memcpy(PKTDATA(dhdp->osh, dhd->monitor_skb) + dhd->monitor_len,
5855 PKTDATA(dhdp->osh, pkt), PKTLEN(dhdp->osh, pkt));
5856 dhd->monitor_len += PKTLEN(dhdp->osh, pkt);
5857 PKTFREE(dhdp->osh, pkt, FALSE);
5858 skb_put(dhd->monitor_skb, dhd->monitor_len);
5859 dhd->monitor_skb->protocol =
5860 eth_type_trans(dhd->monitor_skb, dhd->monitor_skb->dev);
5861 dhd->monitor_len = 0;
5862 break;
5863 }
5864 }
5865
5866 if (in_interrupt()) {
5867 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
5868 __FUNCTION__, __LINE__);
5869 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
5870 netif_rx(dhd->monitor_skb);
5871 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
5872 } else {
5873 /* If the receive is not processed inside an ISR,
5874 * the softirqd must be woken explicitly to service
5875 * the NET_RX_SOFTIRQ. In 2.6 kernels, this is handled
5876 * by netif_rx_ni(), but in earlier kernels, we need
5877 * to do it manually.
5878 */
5879 bcm_object_trace_opr(dhd->monitor_skb, BCM_OBJDBG_REMOVE,
5880 __FUNCTION__, __LINE__);
5881
5882 DHD_PERIM_UNLOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
5883 netif_rx_ni(dhd->monitor_skb);
5884 DHD_PERIM_LOCK_ALL((dhd->fwder_unit % FWDER_MAX_UNIT));
5885 }
5886
5887 dhd->monitor_skb = NULL;
5888 }
5889
5890 typedef struct dhd_mon_dev_priv {
5891 struct net_device_stats stats;
5892 } dhd_mon_dev_priv_t;
5893
5894 #define DHD_MON_DEV_PRIV_SIZE (sizeof(dhd_mon_dev_priv_t))
5895 #define DHD_MON_DEV_PRIV(dev) ((dhd_mon_dev_priv_t *)DEV_PRIV(dev))
5896 #define DHD_MON_DEV_STATS(dev) (((dhd_mon_dev_priv_t *)DEV_PRIV(dev))->stats)
5897
5898 #ifdef CFI_CHECK
5899 static netdev_tx_t
5900 #else
5901 static int
5902 #endif /* CFI_CHECK */
5903 dhd_monitor_start(struct sk_buff *skb, struct net_device *dev)
5904 {
5905 PKTFREE(NULL, skb, FALSE);
5906 #ifdef CFI_CHECK
5907 return NETDEV_TX_OK;
5908 #else
5909 return 0;
5910 #endif /* CFI_CHECK */
5911 }
5912
5913 static int
5914 dhd_monitor_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5915 {
5916 return 0;
5917 }
5918
5919 static struct net_device_stats*
5920 dhd_monitor_get_stats(struct net_device *dev)
5921 {
5922 return &DHD_MON_DEV_STATS(dev);
5923 }
5924
5925 static const struct net_device_ops netdev_monitor_ops =
5926 {
5927 .ndo_start_xmit = dhd_monitor_start,
5928 .ndo_get_stats = dhd_monitor_get_stats,
5929 .ndo_do_ioctl = dhd_monitor_ioctl
5930 };
5931
5932 static void
5933 dhd_add_monitor_if(dhd_info_t *dhd)
5934 {
5935 struct net_device *dev;
5936 char *devname;
5937 uint32 scan_suppress = FALSE;
5938 int ret = BCME_OK;
5939
5940 if (!dhd) {
5941 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
5942 return;
5943 }
5944
5945 if (dhd->monitor_dev) {
5946 DHD_ERROR(("%s: monitor i/f already exists", __FUNCTION__));
5947 return;
5948 }
5949
5950 dev = alloc_etherdev(DHD_MON_DEV_PRIV_SIZE);
5951 if (!dev) {
5952 DHD_ERROR(("%s: alloc wlif failed\n", __FUNCTION__));
5953 return;
5954 }
5955
5956 devname = "radiotap";
5957
5958 snprintf(dev->name, sizeof(dev->name), "%s%u", devname, dhd->unit);
5959
5960 #ifndef ARPHRD_IEEE80211_PRISM /* From Linux 2.4.18 */
5961 #define ARPHRD_IEEE80211_PRISM 802
5962 #endif // endif
5963
5964 #ifndef ARPHRD_IEEE80211_RADIOTAP
5965 #define ARPHRD_IEEE80211_RADIOTAP 803 /* IEEE 802.11 + radiotap header */
5966 #endif /* ARPHRD_IEEE80211_RADIOTAP */
5967
5968 dev->type = ARPHRD_IEEE80211_RADIOTAP;
5969
5970 dev->netdev_ops = &netdev_monitor_ops;
5971
5972 if (register_netdevice(dev)) {
5973 DHD_ERROR(("%s, register_netdev failed for %s\n",
5974 __FUNCTION__, dev->name));
5975 free_netdev(dev);
5976 return;
5977 }
5978
5979 if (FW_SUPPORTED((&dhd->pub), monitor)) {
5980 #ifdef DHD_PCIE_RUNTIMEPM
5981 /* Disable RuntimePM in monitor mode */
5982 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
5983 DHD_ERROR(("%s : disable runtime PM in monitor mode\n", __FUNCTION__));
5984 #endif /* DHD_PCIE_RUNTIME_PM */
5985 scan_suppress = TRUE;
5986 /* Set the SCAN SUPPRESS Flag in the firmware to disable scan in Monitor mode */
5987 ret = dhd_iovar(&dhd->pub, 0, "scansuppress", (char *)&scan_suppress,
5988 sizeof(scan_suppress), NULL, 0, TRUE);
5989 if (ret < 0) {
5990 DHD_ERROR(("%s: scansuppress set failed, ret=%d\n", __FUNCTION__, ret));
5991 }
5992 }
5993
5994 dhd->monitor_dev = dev;
5995 }
5996
5997 static void
5998 dhd_del_monitor_if(dhd_info_t *dhd)
5999 {
6000 int ret = BCME_OK;
6001 uint32 scan_suppress = FALSE;
6002
6003 if (!dhd) {
6004 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
6005 return;
6006 }
6007
6008 if (!dhd->monitor_dev) {
6009 DHD_ERROR(("%s: monitor i/f doesn't exist", __FUNCTION__));
6010 return;
6011 }
6012
6013 if (FW_SUPPORTED((&dhd->pub), monitor)) {
6014 #ifdef DHD_PCIE_RUNTIMEPM
6015 /* Enable RuntimePM */
6016 DHD_ENABLE_RUNTIME_PM(&dhd->pub);
6017 DHD_ERROR(("%s : enabled runtime PM\n", __FUNCTION__));
6018 #endif /* DHD_PCIE_RUNTIME_PM */
6019 scan_suppress = FALSE;
6020 /* Unset the SCAN SUPPRESS Flag in the firmware to enable scan */
6021 ret = dhd_iovar(&dhd->pub, 0, "scansuppress", (char *)&scan_suppress,
6022 sizeof(scan_suppress), NULL, 0, TRUE);
6023 if (ret < 0) {
6024 DHD_ERROR(("%s: scansuppress set failed, ret=%d\n", __FUNCTION__, ret));
6025 }
6026 }
6027
6028 if (dhd->monitor_dev) {
6029 if (dhd->monitor_dev->reg_state == NETREG_UNINITIALIZED) {
6030 free_netdev(dhd->monitor_dev);
6031 } else {
6032 unregister_netdevice(dhd->monitor_dev);
6033 }
6034 dhd->monitor_dev = NULL;
6035 }
6036 }
6037
6038 static void
6039 dhd_set_monitor(dhd_pub_t *pub, int ifidx, int val)
6040 {
6041 dhd_info_t *dhd = pub->info;
6042
6043 DHD_TRACE(("%s: val %d\n", __FUNCTION__, val));
6044
6045 dhd_net_if_lock_local(dhd);
6046 if (!val) {
6047 /* Delete monitor */
6048 dhd_del_monitor_if(dhd);
6049 } else {
6050 /* Add monitor */
6051 dhd_add_monitor_if(dhd);
6052 }
6053 dhd->monitor_type = val;
6054 dhd_net_if_unlock_local(dhd);
6055 }
6056 #endif /* WL_MONITOR */
6057
6058 #if defined(DHD_H2D_LOG_TIME_SYNC)
6059 /*
6060 * Helper function:
6061 * Used for RTE console message time syncing with Host printk
6062 */
6063 void dhd_h2d_log_time_sync_deferred_wq_schedule(dhd_pub_t *dhdp)
6064 {
6065 dhd_info_t *info = dhdp->info;
6066
6067 /* Ideally the "state" should be always TRUE */
6068 dhd_deferred_schedule_work(info->dhd_deferred_wq, NULL,
6069 DHD_WQ_WORK_H2D_CONSOLE_TIME_STAMP_MATCH,
6070 dhd_deferred_work_rte_log_time_sync,
6071 DHD_WQ_WORK_PRIORITY_LOW);
6072 }
6073
6074 void
6075 dhd_deferred_work_rte_log_time_sync(void *handle, void *event_info, u8 event)
6076 {
6077 dhd_info_t *dhd_info = handle;
6078 dhd_pub_t *dhd;
6079
6080 if (event != DHD_WQ_WORK_H2D_CONSOLE_TIME_STAMP_MATCH) {
6081 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
6082 return;
6083 }
6084
6085 if (!dhd_info) {
6086 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
6087 return;
6088 }
6089
6090 dhd = &dhd_info->pub;
6091
6092 /*
6093 * Function to send IOVAR for console timesyncing
6094 * between Host and Dongle.
6095 * If the IOVAR fails,
6096 * 1. dhd_rte_time_sync_ms is set to 0 and
6097 * 2. HOST Dongle console time sync will *not* happen.
6098 */
6099 dhd_h2d_log_time_sync(dhd);
6100 }
6101 #endif /* DHD_H2D_LOG_TIME_SYNC */
6102
6103 int dhd_ioctl_process(dhd_pub_t *pub, int ifidx, dhd_ioctl_t *ioc, void *data_buf)
6104 {
6105 int bcmerror = BCME_OK;
6106 int buflen = 0;
6107 struct net_device *net;
6108
6109 net = dhd_idx2net(pub, ifidx);
6110 if (!net) {
6111 bcmerror = BCME_BADARG;
6112 /*
6113 * The netdev pointer is bad means the DHD can't communicate
6114 * to higher layers, so just return from here
6115 */
6116 return bcmerror;
6117 }
6118
6119 /* check for local dhd ioctl and handle it */
6120 if (ioc->driver == DHD_IOCTL_MAGIC) {
6121 /* This is a DHD IOVAR, truncate buflen to DHD_IOCTL_MAXLEN */
6122 if (data_buf)
6123 buflen = MIN(ioc->len, DHD_IOCTL_MAXLEN);
6124 bcmerror = dhd_ioctl((void *)pub, ioc, data_buf, buflen);
6125 if (bcmerror)
6126 pub->bcmerror = bcmerror;
6127 goto done;
6128 }
6129
6130 /* This is a WL IOVAR, truncate buflen to WLC_IOCTL_MAXLEN */
6131 if (data_buf)
6132 buflen = MIN(ioc->len, WLC_IOCTL_MAXLEN);
6133
6134 /* send to dongle (must be up, and wl). */
6135 if (pub->busstate == DHD_BUS_DOWN || pub->busstate == DHD_BUS_LOAD) {
6136 if ((!pub->dongle_trap_occured) && allow_delay_fwdl) {
6137 int ret;
6138 if (atomic_read(&exit_in_progress)) {
6139 DHD_ERROR(("%s module exit in progress\n", __func__));
6140 bcmerror = BCME_DONGLE_DOWN;
6141 goto done;
6142 }
6143 ret = dhd_bus_start(pub);
6144 if (ret != 0) {
6145 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
6146 bcmerror = BCME_DONGLE_DOWN;
6147 goto done;
6148 }
6149 } else {
6150 bcmerror = BCME_DONGLE_DOWN;
6151 goto done;
6152 }
6153 }
6154
6155 if (!pub->iswl) {
6156 bcmerror = BCME_DONGLE_DOWN;
6157 goto done;
6158 }
6159
6160 /*
6161 * Flush the TX queue if required for proper message serialization:
6162 * Intercept WLC_SET_KEY IOCTL - serialize M4 send and set key IOCTL to
6163 * prevent M4 encryption and
6164 * intercept WLC_DISASSOC IOCTL - serialize WPS-DONE and WLC_DISASSOC IOCTL to
6165 * prevent disassoc frame being sent before WPS-DONE frame.
6166 */
6167 if (ioc->cmd == WLC_SET_KEY ||
6168 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
6169 strncmp("wsec_key", data_buf, 9) == 0) ||
6170 (ioc->cmd == WLC_SET_VAR && data_buf != NULL &&
6171 strncmp("bsscfg:wsec_key", data_buf, 15) == 0) ||
6172 ioc->cmd == WLC_DISASSOC)
6173 dhd_wait_pend8021x(net);
6174
6175 if ((ioc->cmd == WLC_SET_VAR || ioc->cmd == WLC_GET_VAR) &&
6176 data_buf != NULL && strncmp("rpc_", data_buf, 4) == 0) {
6177 bcmerror = BCME_UNSUPPORTED;
6178 goto done;
6179 }
6180
6181 bcmerror = dhd_wl_ioctl(pub, ifidx, (wl_ioctl_t *)ioc, data_buf, buflen);
6182
6183 #ifdef WL_MONITOR
6184 /* Intercept monitor ioctl here, add/del monitor if */
6185 if (bcmerror == BCME_OK && ioc->cmd == WLC_SET_MONITOR) {
6186 int val = 0;
6187 if (data_buf != NULL && buflen != 0) {
6188 if (buflen >= 4) {
6189 val = *(int*)data_buf;
6190 } else if (buflen >= 2) {
6191 val = *(short*)data_buf;
6192 } else {
6193 val = *(char*)data_buf;
6194 }
6195 }
6196 dhd_set_monitor(pub, ifidx, val);
6197 }
6198 #endif /* WL_MONITOR */
6199
6200 done:
6201 #if defined(OEM_ANDROID)
6202 dhd_check_hang(net, pub, bcmerror);
6203 #endif /* OEM_ANDROID */
6204
6205 return bcmerror;
6206 }
6207
6208 /**
6209 * Called by the OS (optionally via a wrapper function).
6210 * @param net Linux per dongle instance
6211 * @param ifr Linux request structure
6212 * @param cmd e.g. SIOCETHTOOL
6213 */
6214 static int
6215 dhd_ioctl_entry(struct net_device *net, struct ifreq *ifr, int cmd)
6216 {
6217 dhd_info_t *dhd = DHD_DEV_INFO(net);
6218 dhd_ioctl_t ioc;
6219 int bcmerror = 0;
6220 int ifidx;
6221 int ret;
6222 void *local_buf = NULL; /**< buffer in kernel space */
6223 void __user *ioc_buf_user = NULL; /**< buffer in user space */
6224 u16 buflen = 0;
6225
6226 #ifdef ENABLE_INSMOD_NO_FW_LOAD
6227 allow_delay_fwdl = 1;
6228 #endif /* ENABLE_INSMOD_NO_FW_LOAD */
6229 if (atomic_read(&exit_in_progress)) {
6230 DHD_ERROR(("%s module exit in progress\n", __func__));
6231 bcmerror = BCME_DONGLE_DOWN;
6232 return OSL_ERROR(bcmerror);
6233 }
6234
6235 DHD_OS_WAKE_LOCK(&dhd->pub);
6236 DHD_PERIM_LOCK(&dhd->pub);
6237
6238 #if defined(OEM_ANDROID)
6239 #ifndef ENABLE_INSMOD_NO_FW_LOAD
6240 /* Interface up check for built-in type */
6241 if (!dhd_download_fw_on_driverload && dhd->pub.up == FALSE) {
6242 DHD_TRACE(("%s: Interface is down \n", __FUNCTION__));
6243 DHD_PERIM_UNLOCK(&dhd->pub);
6244 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6245 return OSL_ERROR(BCME_NOTUP);
6246 }
6247 #endif /* ENABLE_INSMOD_NO_FW_LOAD */
6248 #endif /* (OEM_ANDROID) */
6249
6250 ifidx = dhd_net2idx(dhd, net);
6251 DHD_TRACE(("%s: ifidx %d, cmd 0x%04x\n", __FUNCTION__, ifidx, cmd));
6252
6253 #if defined(WL_STATIC_IF)
6254 /* skip for static ndev when it is down */
6255 if (dhd_is_static_ndev(&dhd->pub, net) && !(net->flags & IFF_UP)) {
6256 DHD_PERIM_UNLOCK(&dhd->pub);
6257 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6258 return -1;
6259 }
6260 #endif /* WL_STATIC_iF */
6261
6262 if (ifidx == DHD_BAD_IF) {
6263 DHD_ERROR(("%s: BAD IF\n", __FUNCTION__));
6264 DHD_PERIM_UNLOCK(&dhd->pub);
6265 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6266 return -1;
6267 }
6268
6269 #if defined(WL_WIRELESS_EXT)
6270 /* linux wireless extensions */
6271 if ((cmd >= SIOCIWFIRST) && (cmd <= SIOCIWLAST)) {
6272 /* may recurse, do NOT lock */
6273 ret = wl_iw_ioctl(net, ifr, cmd);
6274 DHD_PERIM_UNLOCK(&dhd->pub);
6275 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6276 return ret;
6277 }
6278 #endif /* defined(WL_WIRELESS_EXT) */
6279
6280 if (cmd == SIOCETHTOOL) {
6281 ret = dhd_ethtool(dhd, (void*)ifr->ifr_data);
6282 DHD_PERIM_UNLOCK(&dhd->pub);
6283 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6284 return ret;
6285 }
6286
6287 #if defined(OEM_ANDROID)
6288 if (cmd == SIOCDEVPRIVATE+1) {
6289 ret = wl_android_priv_cmd(net, ifr);
6290 dhd_check_hang(net, &dhd->pub, ret);
6291 DHD_PERIM_UNLOCK(&dhd->pub);
6292 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6293 return ret;
6294 }
6295
6296 #endif /* OEM_ANDROID */
6297
6298 if (cmd != SIOCDEVPRIVATE) {
6299 DHD_PERIM_UNLOCK(&dhd->pub);
6300 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6301 return -EOPNOTSUPP;
6302 }
6303
6304 memset(&ioc, 0, sizeof(ioc));
6305
6306 {
6307 /* Copy the ioc control structure part of ioctl request */
6308 if (copy_from_user(&ioc, ifr->ifr_data, sizeof(wl_ioctl_t))) {
6309 bcmerror = BCME_BADADDR;
6310 goto done;
6311 }
6312
6313 /* To differentiate between wl and dhd read 4 more byes */
6314 if ((copy_from_user(&ioc.driver, (char *)ifr->ifr_data + sizeof(wl_ioctl_t),
6315 sizeof(uint)) != 0)) {
6316 bcmerror = BCME_BADADDR;
6317 goto done;
6318 }
6319 }
6320
6321 if (!capable(CAP_NET_ADMIN)) {
6322 bcmerror = BCME_EPERM;
6323 goto done;
6324 }
6325
6326 /* Take backup of ioc.buf and restore later */
6327 ioc_buf_user = ioc.buf;
6328
6329 if (ioc.len > 0) {
6330 buflen = MIN(ioc.len, DHD_IOCTL_MAXLEN);
6331 if (!(local_buf = MALLOC(dhd->pub.osh, buflen+1))) {
6332 bcmerror = BCME_NOMEM;
6333 goto done;
6334 }
6335
6336 DHD_PERIM_UNLOCK(&dhd->pub);
6337 if (copy_from_user(local_buf, ioc.buf, buflen)) {
6338 DHD_PERIM_LOCK(&dhd->pub);
6339 bcmerror = BCME_BADADDR;
6340 goto done;
6341 }
6342 DHD_PERIM_LOCK(&dhd->pub);
6343
6344 *((char *)local_buf + buflen) = '\0';
6345
6346 /* For some platforms accessing userspace memory
6347 * of ioc.buf is causing kernel panic, so to avoid that
6348 * make ioc.buf pointing to kernel space memory local_buf
6349 */
6350 ioc.buf = local_buf;
6351 }
6352
6353 #if defined(OEM_ANDROID)
6354 /* Skip all the non DHD iovars (wl iovars) after f/w hang */
6355 if (ioc.driver != DHD_IOCTL_MAGIC && dhd->pub.hang_was_sent) {
6356 DHD_TRACE(("%s: HANG was sent up earlier\n", __FUNCTION__));
6357 DHD_OS_WAKE_LOCK_CTRL_TIMEOUT_ENABLE(&dhd->pub, DHD_EVENT_TIMEOUT_MS);
6358 bcmerror = BCME_DONGLE_DOWN;
6359 goto done;
6360 }
6361 #endif /* OEM_ANDROID */
6362
6363 bcmerror = dhd_ioctl_process(&dhd->pub, ifidx, &ioc, local_buf);
6364
6365 /* Restore back userspace pointer to ioc.buf */
6366 ioc.buf = ioc_buf_user;
6367
6368 if (!bcmerror && buflen && local_buf && ioc.buf) {
6369 DHD_PERIM_UNLOCK(&dhd->pub);
6370 if (copy_to_user(ioc.buf, local_buf, buflen))
6371 bcmerror = -EFAULT;
6372 DHD_PERIM_LOCK(&dhd->pub);
6373 }
6374
6375 done:
6376 if (local_buf)
6377 MFREE(dhd->pub.osh, local_buf, buflen+1);
6378
6379 DHD_PERIM_UNLOCK(&dhd->pub);
6380 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6381
6382 return OSL_ERROR(bcmerror);
6383 }
6384
6385 #if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP)
6386 /* Flags to indicate if we distingish power off policy when
6387 * user set the memu "Keep Wi-Fi on during sleep" to "Never"
6388 */
6389 int trigger_deep_sleep = 0;
6390 #endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */
6391
6392 #ifdef FIX_CPU_MIN_CLOCK
6393 static int dhd_init_cpufreq_fix(dhd_info_t *dhd)
6394 {
6395 if (dhd) {
6396 #if defined(OEM_ANDROID)
6397 mutex_init(&dhd->cpufreq_fix);
6398 #endif // endif
6399 dhd->cpufreq_fix_status = FALSE;
6400 }
6401 return 0;
6402 }
6403
6404 static void dhd_fix_cpu_freq(dhd_info_t *dhd)
6405 {
6406 #if defined(OEM_ANDROID)
6407 mutex_lock(&dhd->cpufreq_fix);
6408 #endif // endif
6409 if (dhd && !dhd->cpufreq_fix_status) {
6410 pm_qos_add_request(&dhd->dhd_cpu_qos, PM_QOS_CPU_FREQ_MIN, 300000);
6411 #ifdef FIX_BUS_MIN_CLOCK
6412 pm_qos_add_request(&dhd->dhd_bus_qos, PM_QOS_BUS_THROUGHPUT, 400000);
6413 #endif /* FIX_BUS_MIN_CLOCK */
6414 DHD_ERROR(("pm_qos_add_requests called\n"));
6415
6416 dhd->cpufreq_fix_status = TRUE;
6417 }
6418 #if defined(OEM_ANDROID)
6419 mutex_unlock(&dhd->cpufreq_fix);
6420 #endif // endif
6421 }
6422
6423 static void dhd_rollback_cpu_freq(dhd_info_t *dhd)
6424 {
6425 #if defined(OEM_ANDROID)
6426 mutex_lock(&dhd ->cpufreq_fix);
6427 #endif // endif
6428 if (dhd && dhd->cpufreq_fix_status != TRUE) {
6429 #if defined(OEM_ANDROID)
6430 mutex_unlock(&dhd->cpufreq_fix);
6431 #endif // endif
6432 return;
6433 }
6434
6435 pm_qos_remove_request(&dhd->dhd_cpu_qos);
6436 #ifdef FIX_BUS_MIN_CLOCK
6437 pm_qos_remove_request(&dhd->dhd_bus_qos);
6438 #endif /* FIX_BUS_MIN_CLOCK */
6439 DHD_ERROR(("pm_qos_add_requests called\n"));
6440
6441 dhd->cpufreq_fix_status = FALSE;
6442 #if defined(OEM_ANDROID)
6443 mutex_unlock(&dhd->cpufreq_fix);
6444 #endif // endif
6445 }
6446 #endif /* FIX_CPU_MIN_CLOCK */
6447
6448 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
6449 static int
6450 dhd_ioctl_entry_wrapper(struct net_device *net, struct ifreq *ifr, int cmd)
6451 {
6452 int error;
6453 dhd_info_t *dhd = DHD_DEV_INFO(net);
6454
6455 if (atomic_read(&dhd->pub.block_bus))
6456 return -EHOSTDOWN;
6457
6458 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) < 0)
6459 return BCME_ERROR;
6460
6461 error = dhd_ioctl_entry(net, ifr, cmd);
6462
6463 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
6464 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
6465
6466 return error;
6467 }
6468 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
6469
6470 #if defined(BT_OVER_SDIO)
6471
6472 void
6473 dhdsdio_bus_usr_cnt_inc(dhd_pub_t *dhdp)
6474 {
6475 dhdp->info->bus_user_count++;
6476 }
6477
6478 void
6479 dhdsdio_bus_usr_cnt_dec(dhd_pub_t *dhdp)
6480 {
6481 dhdp->info->bus_user_count--;
6482 }
6483
6484 /* Return values:
6485 * Success: Returns 0
6486 * Failure: Returns -1 or errono code
6487 */
6488 int
6489 dhd_bus_get(wlan_bt_handle_t handle, bus_owner_t owner)
6490 {
6491 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
6492 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
6493 int ret = 0;
6494
6495 mutex_lock(&dhd->bus_user_lock);
6496 ++dhd->bus_user_count;
6497 if (dhd->bus_user_count < 0) {
6498 DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__));
6499 ret = -1;
6500 goto exit;
6501 }
6502
6503 if (dhd->bus_user_count == 1) {
6504
6505 dhd->pub.hang_was_sent = 0;
6506
6507 /* First user, turn on WL_REG, start the bus */
6508 DHD_ERROR(("%s(): First user Turn On WL_REG & start the bus", __FUNCTION__));
6509
6510 if (!wifi_platform_set_power(dhd->adapter, TRUE, WIFI_TURNON_DELAY)) {
6511 /* Enable F1 */
6512 ret = dhd_bus_resume(dhdp, 0);
6513 if (ret) {
6514 DHD_ERROR(("%s(): Failed to enable F1, err=%d\n",
6515 __FUNCTION__, ret));
6516 goto exit;
6517 }
6518 }
6519
6520 dhd_update_fw_nv_path(dhd);
6521 /* update firmware and nvram path to sdio bus */
6522 dhd_bus_update_fw_nv_path(dhd->pub.bus,
6523 dhd->fw_path, dhd->nv_path);
6524 /* download the firmware, Enable F2 */
6525 /* TODO: Should be done only in case of FW switch */
6526 ret = dhd_bus_devreset(dhdp, FALSE);
6527 dhd_bus_resume(dhdp, 1);
6528 if (!ret) {
6529 if (dhd_sync_with_dongle(&dhd->pub) < 0) {
6530 DHD_ERROR(("%s(): Sync with dongle failed!!\n", __FUNCTION__));
6531 ret = -EFAULT;
6532 }
6533 } else {
6534 DHD_ERROR(("%s(): Failed to download, err=%d\n", __FUNCTION__, ret));
6535 }
6536 } else {
6537 DHD_ERROR(("%s(): BUS is already acquired, just increase the count %d \r\n",
6538 __FUNCTION__, dhd->bus_user_count));
6539 }
6540 exit:
6541 mutex_unlock(&dhd->bus_user_lock);
6542 return ret;
6543 }
6544 EXPORT_SYMBOL(dhd_bus_get);
6545
6546 /* Return values:
6547 * Success: Returns 0
6548 * Failure: Returns -1 or errono code
6549 */
6550 int
6551 dhd_bus_put(wlan_bt_handle_t handle, bus_owner_t owner)
6552 {
6553 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
6554 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
6555 int ret = 0;
6556 BCM_REFERENCE(owner);
6557
6558 mutex_lock(&dhd->bus_user_lock);
6559 --dhd->bus_user_count;
6560 if (dhd->bus_user_count < 0) {
6561 DHD_ERROR(("%s(): bus_user_count is negative, which is invalid\n", __FUNCTION__));
6562 dhd->bus_user_count = 0;
6563 ret = -1;
6564 goto exit;
6565 }
6566
6567 if (dhd->bus_user_count == 0) {
6568 /* Last user, stop the bus and turn Off WL_REG */
6569 DHD_ERROR(("%s(): There are no owners left Trunf Off WL_REG & stop the bus \r\n",
6570 __FUNCTION__));
6571 #ifdef PROP_TXSTATUS
6572 if (dhd->pub.wlfc_enabled) {
6573 dhd_wlfc_deinit(&dhd->pub);
6574 }
6575 #endif /* PROP_TXSTATUS */
6576 #ifdef PNO_SUPPORT
6577 if (dhd->pub.pno_state) {
6578 dhd_pno_deinit(&dhd->pub);
6579 }
6580 #endif /* PNO_SUPPORT */
6581 #ifdef RTT_SUPPORT
6582 if (dhd->pub.rtt_state) {
6583 dhd_rtt_deinit(&dhd->pub);
6584 }
6585 #endif /* RTT_SUPPORT */
6586 ret = dhd_bus_devreset(dhdp, TRUE);
6587 if (!ret) {
6588 dhd_bus_suspend(dhdp);
6589 wifi_platform_set_power(dhd->adapter, FALSE, WIFI_TURNOFF_DELAY);
6590 }
6591 } else {
6592 DHD_ERROR(("%s(): Other owners using bus, decrease the count %d \r\n",
6593 __FUNCTION__, dhd->bus_user_count));
6594 }
6595 exit:
6596 mutex_unlock(&dhd->bus_user_lock);
6597 return ret;
6598 }
6599 EXPORT_SYMBOL(dhd_bus_put);
6600
6601 int
6602 dhd_net_bus_get(struct net_device *dev)
6603 {
6604 dhd_info_t *dhd = DHD_DEV_INFO(dev);
6605 return dhd_bus_get(&dhd->pub, WLAN_MODULE);
6606 }
6607
6608 int
6609 dhd_net_bus_put(struct net_device *dev)
6610 {
6611 dhd_info_t *dhd = DHD_DEV_INFO(dev);
6612 return dhd_bus_put(&dhd->pub, WLAN_MODULE);
6613 }
6614
6615 /*
6616 * Function to enable the Bus Clock
6617 * Returns BCME_OK on success and BCME_xxx on failure
6618 *
6619 * This function is not callable from non-sleepable context
6620 */
6621 int dhd_bus_clk_enable(wlan_bt_handle_t handle, bus_owner_t owner)
6622 {
6623 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
6624
6625 int ret;
6626
6627 dhd_os_sdlock(dhdp);
6628 /*
6629 * The second argument is TRUE, that means, we expect
6630 * the function to "wait" until the clocks are really
6631 * available
6632 */
6633 ret = __dhdsdio_clk_enable(dhdp->bus, owner, TRUE);
6634 dhd_os_sdunlock(dhdp);
6635
6636 return ret;
6637 }
6638 EXPORT_SYMBOL(dhd_bus_clk_enable);
6639
6640 /*
6641 * Function to disable the Bus Clock
6642 * Returns BCME_OK on success and BCME_xxx on failure
6643 *
6644 * This function is not callable from non-sleepable context
6645 */
6646 int dhd_bus_clk_disable(wlan_bt_handle_t handle, bus_owner_t owner)
6647 {
6648 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
6649
6650 int ret;
6651
6652 dhd_os_sdlock(dhdp);
6653 /*
6654 * The second argument is TRUE, that means, we expect
6655 * the function to "wait" until the clocks are really
6656 * disabled
6657 */
6658 ret = __dhdsdio_clk_disable(dhdp->bus, owner, TRUE);
6659 dhd_os_sdunlock(dhdp);
6660
6661 return ret;
6662 }
6663 EXPORT_SYMBOL(dhd_bus_clk_disable);
6664
6665 /*
6666 * Function to reset bt_use_count counter to zero.
6667 *
6668 * This function is not callable from non-sleepable context
6669 */
6670 void dhd_bus_reset_bt_use_count(wlan_bt_handle_t handle)
6671 {
6672 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
6673
6674 /* take the lock and reset bt use count */
6675 dhd_os_sdlock(dhdp);
6676 dhdsdio_reset_bt_use_count(dhdp->bus);
6677 dhd_os_sdunlock(dhdp);
6678 }
6679 EXPORT_SYMBOL(dhd_bus_reset_bt_use_count);
6680
6681 void dhd_bus_retry_hang_recovery(wlan_bt_handle_t handle)
6682 {
6683 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
6684 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
6685
6686 dhdp->hang_was_sent = 0;
6687
6688 dhd_os_send_hang_message(&dhd->pub);
6689 }
6690 EXPORT_SYMBOL(dhd_bus_retry_hang_recovery);
6691
6692 #endif /* BT_OVER_SDIO */
6693
6694 static int
6695 dhd_stop(struct net_device *net)
6696 {
6697 int ifidx = 0;
6698 bool skip_reset = false;
6699 #if defined(WL_CFG80211)
6700 unsigned long flags = 0;
6701 #ifdef WL_STATIC_IF
6702 struct bcm_cfg80211 *cfg = wl_get_cfg(net);
6703 #endif /* WL_STATIC_IF */
6704 #endif /* WL_CFG80211 */
6705 dhd_info_t *dhd = DHD_DEV_INFO(net);
6706 DHD_OS_WAKE_LOCK(&dhd->pub);
6707 DHD_PERIM_LOCK(&dhd->pub);
6708 DHD_TRACE(("%s: Enter %p\n", __FUNCTION__, net));
6709 dhd->pub.rxcnt_timeout = 0;
6710 dhd->pub.txcnt_timeout = 0;
6711
6712 #ifdef BCMPCIE
6713 dhd->pub.d3ackcnt_timeout = 0;
6714 #endif /* BCMPCIE */
6715
6716 mutex_lock(&dhd->pub.ndev_op_sync);
6717
6718 if (dhd->pub.up == 0) {
6719 goto exit;
6720 }
6721 #if defined(DHD_HANG_SEND_UP_TEST)
6722 if (dhd->pub.req_hang_type) {
6723 DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
6724 __FUNCTION__, dhd->pub.req_hang_type));
6725 dhd->pub.req_hang_type = 0;
6726 }
6727 #endif /* DHD_HANG_SEND_UP_TEST */
6728
6729 dhd_if_flush_sta(DHD_DEV_IFP(net));
6730
6731 #ifdef FIX_CPU_MIN_CLOCK
6732 if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE)
6733 dhd_rollback_cpu_freq(dhd);
6734 #endif /* FIX_CPU_MIN_CLOCK */
6735
6736 ifidx = dhd_net2idx(dhd, net);
6737 BCM_REFERENCE(ifidx);
6738
6739 DHD_ERROR(("%s: ######### dhd_stop called for ifidx=%d #########\n", __FUNCTION__, ifidx));
6740
6741 #if defined(WL_STATIC_IF) && defined(WL_CFG80211)
6742 /* If static if is operational, don't reset the chip */
6743 if (static_if_ndev_get_state(cfg, net) == NDEV_STATE_FW_IF_CREATED) {
6744 DHD_ERROR(("static if operational. skip chip reset.\n"));
6745 skip_reset = true;
6746 wl_cfg80211_sta_ifdown(net);
6747 goto exit;
6748 }
6749 #endif /* WL_STATIC_IF && WL_CFG80211 */
6750
6751 #if defined(WL_VIF_SUPPORT)
6752 if (vif_num > 0) {
6753 DHD_ERROR(("virtual if operational. skip chip reset.\n"));
6754 skip_reset = true;
6755 wl_cfg80211_sta_ifdown(net);
6756 goto exit;
6757 }
6758 #endif /* WL_VIF_SUPPORT */
6759
6760 DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
6761 #ifdef WL_CFG80211
6762
6763 /* Disable Runtime PM before interface down */
6764 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
6765
6766 spin_lock_irqsave(&dhd->pub.up_lock, flags);
6767 dhd->pub.up = 0;
6768 spin_unlock_irqrestore(&dhd->pub.up_lock, flags);
6769 #else
6770 dhd->pub.up = 0;
6771 #endif /* WL_CFG80211 */
6772
6773 #ifdef WL_CFG80211
6774 if (ifidx == 0) {
6775 dhd_if_t *ifp;
6776 wl_cfg80211_down(net);
6777
6778 ifp = dhd->iflist[0];
6779 /*
6780 * For CFG80211: Clean up all the left over virtual interfaces
6781 * when the primary Interface is brought down. [ifconfig wlan0 down]
6782 */
6783 if (!dhd_download_fw_on_driverload) {
6784 DHD_STATLOG_CTRL(&dhd->pub, ST(WLAN_POWER_OFF), ifidx, 0);
6785 if ((dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) &&
6786 (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211)) {
6787 int i;
6788 #ifdef WL_CFG80211_P2P_DEV_IF
6789 wl_cfg80211_del_p2p_wdev(net);
6790 #endif /* WL_CFG80211_P2P_DEV_IF */
6791 #ifdef DHD_4WAYM4_FAIL_DISCONNECT
6792 dhd_cleanup_m4_state_work(&dhd->pub, ifidx);
6793 #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
6794 #ifdef DHD_PKTDUMP_ROAM
6795 dhd_dump_pkt_clear(&dhd->pub);
6796 #endif /* DHD_PKTDUMP_ROAM */
6797
6798 dhd_net_if_lock_local(dhd);
6799 for (i = 1; i < DHD_MAX_IFS; i++)
6800 dhd_remove_if(&dhd->pub, i, FALSE);
6801
6802 if (ifp && ifp->net) {
6803 dhd_if_del_sta_list(ifp);
6804 }
6805 #ifdef ARP_OFFLOAD_SUPPORT
6806 if (dhd_inetaddr_notifier_registered) {
6807 dhd_inetaddr_notifier_registered = FALSE;
6808 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
6809 }
6810 #endif /* ARP_OFFLOAD_SUPPORT */
6811 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
6812 if (dhd_inet6addr_notifier_registered) {
6813 dhd_inet6addr_notifier_registered = FALSE;
6814 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
6815 }
6816 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
6817 dhd_net_if_unlock_local(dhd);
6818 }
6819 cancel_work_sync(dhd->dhd_deferred_wq);
6820
6821 #ifdef SHOW_LOGTRACE
6822 /* Wait till event logs work/kthread finishes */
6823 dhd_cancel_logtrace_process_sync(dhd);
6824 #endif /* SHOW_LOGTRACE */
6825
6826 #if defined(DHD_LB_RXP)
6827 __skb_queue_purge(&dhd->rx_pend_queue);
6828 #endif /* DHD_LB_RXP */
6829
6830 #if defined(DHD_LB_TXP)
6831 skb_queue_purge(&dhd->tx_pend_queue);
6832 #endif /* DHD_LB_TXP */
6833 }
6834
6835 #if defined(ARGOS_NOTIFY_CB)
6836 argos_register_notifier_deinit();
6837 #endif // endif
6838 #ifdef DHDTCPACK_SUPPRESS
6839 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
6840 #endif /* DHDTCPACK_SUPPRESS */
6841 #if defined(DHD_LB_RXP)
6842 if (ifp && ifp->net == dhd->rx_napi_netdev) {
6843 DHD_INFO(("%s napi<%p> disabled ifp->net<%p,%s>\n",
6844 __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
6845 skb_queue_purge(&dhd->rx_napi_queue);
6846 napi_disable(&dhd->rx_napi_struct);
6847 netif_napi_del(&dhd->rx_napi_struct);
6848 dhd->rx_napi_netdev = NULL;
6849 }
6850 #endif /* DHD_LB_RXP */
6851 }
6852 #endif /* WL_CFG80211 */
6853
6854 DHD_SSSR_DUMP_DEINIT(&dhd->pub);
6855
6856 #ifdef PROP_TXSTATUS
6857 dhd_wlfc_cleanup(&dhd->pub, NULL, 0);
6858 #endif // endif
6859 #ifdef SHOW_LOGTRACE
6860 if (!dhd_download_fw_on_driverload) {
6861 /* Release the skbs from queue for WLC_E_TRACE event */
6862 dhd_event_logtrace_flush_queue(&dhd->pub);
6863 if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
6864 if (dhd->event_data.fmts) {
6865 MFREE(dhd->pub.osh, dhd->event_data.fmts,
6866 dhd->event_data.fmts_size);
6867 dhd->event_data.fmts = NULL;
6868 }
6869 if (dhd->event_data.raw_fmts) {
6870 MFREE(dhd->pub.osh, dhd->event_data.raw_fmts,
6871 dhd->event_data.raw_fmts_size);
6872 dhd->event_data.raw_fmts = NULL;
6873 }
6874 if (dhd->event_data.raw_sstr) {
6875 MFREE(dhd->pub.osh, dhd->event_data.raw_sstr,
6876 dhd->event_data.raw_sstr_size);
6877 dhd->event_data.raw_sstr = NULL;
6878 }
6879 if (dhd->event_data.rom_raw_sstr) {
6880 MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr,
6881 dhd->event_data.rom_raw_sstr_size);
6882 dhd->event_data.rom_raw_sstr = NULL;
6883 }
6884 dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT;
6885 }
6886 }
6887 #endif /* SHOW_LOGTRACE */
6888 #ifdef APF
6889 dhd_dev_apf_delete_filter(net);
6890 #endif /* APF */
6891
6892 /* Stop the protocol module */
6893 dhd_prot_stop(&dhd->pub);
6894
6895 OLD_MOD_DEC_USE_COUNT;
6896 exit:
6897 if (skip_reset == false) {
6898 #if defined(WL_CFG80211) && defined(OEM_ANDROID)
6899 if (ifidx == 0 && !dhd_download_fw_on_driverload) {
6900 #if defined(BT_OVER_SDIO)
6901 dhd_bus_put(&dhd->pub, WLAN_MODULE);
6902 wl_android_set_wifi_on_flag(FALSE);
6903 #else
6904 wl_android_wifi_off(net, TRUE);
6905 #endif /* BT_OVER_SDIO */
6906 }
6907 #ifdef SUPPORT_DEEP_SLEEP
6908 else {
6909 /* CSP#505233: Flags to indicate if we distingish
6910 * power off policy when user set the memu
6911 * "Keep Wi-Fi on during sleep" to "Never"
6912 */
6913 if (trigger_deep_sleep) {
6914 dhd_deepsleep(net, 1);
6915 trigger_deep_sleep = 0;
6916 }
6917 }
6918 #endif /* SUPPORT_DEEP_SLEEP */
6919 #endif /* defined(WL_CFG80211) && defined(OEM_ANDROID) */
6920 dhd->pub.hang_was_sent = 0;
6921 dhd->pub.hang_was_pending = 0;
6922
6923 /* Clear country spec for for built-in type driver */
6924 if (!dhd_download_fw_on_driverload) {
6925 dhd->pub.dhd_cspec.country_abbrev[0] = 0x00;
6926 dhd->pub.dhd_cspec.rev = 0;
6927 dhd->pub.dhd_cspec.ccode[0] = 0x00;
6928 }
6929
6930 #ifdef BCMDBGFS
6931 dhd_dbgfs_remove();
6932 #endif // endif
6933 }
6934
6935 DHD_PERIM_UNLOCK(&dhd->pub);
6936 DHD_OS_WAKE_UNLOCK(&dhd->pub);
6937
6938 /* Destroy wakelock */
6939 if (!dhd_download_fw_on_driverload &&
6940 (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) &&
6941 (skip_reset == false)) {
6942 DHD_OS_WAKE_LOCK_DESTROY(dhd);
6943 dhd->dhd_state &= ~DHD_ATTACH_STATE_WAKELOCKS_INIT;
6944 }
6945
6946 mutex_unlock(&dhd->pub.ndev_op_sync);
6947 return 0;
6948 }
6949
6950 #if defined(OEM_ANDROID) && defined(WL_CFG80211) && (defined(USE_INITIAL_2G_SCAN) || \
6951 defined(USE_INITIAL_SHORT_DWELL_TIME))
6952 extern bool g_first_broadcast_scan;
6953 #endif /* OEM_ANDROID && WL_CFG80211 && (USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME) */
6954
6955 #ifdef WL11U
6956 static int dhd_interworking_enable(dhd_pub_t *dhd)
6957 {
6958 uint32 enable = true;
6959 int ret = BCME_OK;
6960
6961 ret = dhd_iovar(dhd, 0, "interworking", (char *)&enable, sizeof(enable), NULL, 0, TRUE);
6962 if (ret < 0) {
6963 DHD_ERROR(("%s: enableing interworking failed, ret=%d\n", __FUNCTION__, ret));
6964 }
6965
6966 return ret;
6967 }
6968 #endif /* WL11u */
6969
6970 static int
6971 dhd_open(struct net_device *net)
6972 {
6973 dhd_info_t *dhd = DHD_DEV_INFO(net);
6974 #ifdef TOE
6975 uint32 toe_ol;
6976 #endif // endif
6977 int ifidx;
6978 int32 ret = 0;
6979
6980 #if defined(PREVENT_REOPEN_DURING_HANG)
6981 /* WAR : to prevent calling dhd_open abnormally in quick succession after hang event */
6982 if (dhd->pub.hang_was_sent == 1) {
6983 DHD_ERROR(("%s: HANG was sent up earlier\n", __FUNCTION__));
6984 /* Force to bring down WLAN interface in case dhd_stop() is not called
6985 * from the upper layer when HANG event is triggered.
6986 */
6987 if (!dhd_download_fw_on_driverload && dhd->pub.up == 1) {
6988 DHD_ERROR(("%s: WLAN interface is not brought down\n", __FUNCTION__));
6989 dhd_stop(net);
6990 } else {
6991 return -1;
6992 }
6993 }
6994 #endif /* PREVENT_REOPEN_DURING_HANG */
6995
6996 mutex_lock(&dhd->pub.ndev_op_sync);
6997
6998 if (dhd->pub.up == 1) {
6999 /* already up */
7000 DHD_ERROR(("Primary net_device is already up \n"));
7001 mutex_unlock(&dhd->pub.ndev_op_sync);
7002 return BCME_OK;
7003 }
7004
7005 if (!dhd_download_fw_on_driverload) {
7006 if (!dhd_driver_init_done) {
7007 DHD_ERROR(("%s: WLAN driver is not initialized\n", __FUNCTION__));
7008 mutex_unlock(&dhd->pub.ndev_op_sync);
7009 return -1;
7010 }
7011 /* Init wakelock */
7012 if (!(dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
7013 DHD_OS_WAKE_LOCK_INIT(dhd);
7014 dhd->dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
7015 }
7016
7017 #ifdef SHOW_LOGTRACE
7018 skb_queue_head_init(&dhd->evt_trace_queue);
7019
7020 if (!(dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT)) {
7021 ret = dhd_init_logstrs_array(dhd->pub.osh, &dhd->event_data);
7022 if (ret == BCME_OK) {
7023 dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data,
7024 st_str_file_path, map_file_path);
7025 dhd_init_static_strs_array(dhd->pub.osh, &dhd->event_data,
7026 rom_st_str_file_path, rom_map_file_path);
7027 dhd->dhd_state |= DHD_ATTACH_LOGTRACE_INIT;
7028 }
7029 }
7030 #endif /* SHOW_LOGTRACE */
7031 }
7032
7033 #if defined(MULTIPLE_SUPPLICANT)
7034 #if defined(OEM_ANDROID) && defined(BCMSDIO)
7035 if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
7036 DHD_ERROR(("%s : dhd_open: call dev open before insmod complete!\n", __FUNCTION__));
7037 }
7038 mutex_lock(&_dhd_sdio_mutex_lock_);
7039 #endif // endif
7040 #endif /* MULTIPLE_SUPPLICANT */
7041
7042 DHD_OS_WAKE_LOCK(&dhd->pub);
7043 DHD_PERIM_LOCK(&dhd->pub);
7044 dhd->pub.dongle_trap_occured = 0;
7045 dhd->pub.hang_was_sent = 0;
7046 dhd->pub.hang_was_pending = 0;
7047 dhd->pub.hang_reason = 0;
7048 dhd->pub.iovar_timeout_occured = 0;
7049 #ifdef PCIE_FULL_DONGLE
7050 dhd->pub.d3ack_timeout_occured = 0;
7051 dhd->pub.livelock_occured = 0;
7052 dhd->pub.pktid_audit_failed = 0;
7053 #endif /* PCIE_FULL_DONGLE */
7054 dhd->pub.iface_op_failed = 0;
7055 dhd->pub.scan_timeout_occurred = 0;
7056 dhd->pub.scan_busy_occurred = 0;
7057 dhd->pub.smmu_fault_occurred = 0;
7058
7059 #ifdef DHD_LOSSLESS_ROAMING
7060 dhd->pub.dequeue_prec_map = ALLPRIO;
7061 #endif // endif
7062
7063 #if defined(OEM_ANDROID) && !defined(WL_CFG80211)
7064 /*
7065 * Force start if ifconfig_up gets called before START command
7066 * We keep WEXT's wl_control_wl_start to provide backward compatibility
7067 * This should be removed in the future
7068 */
7069 ret = wl_control_wl_start(net);
7070 if (ret != 0) {
7071 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
7072 ret = -1;
7073 goto exit;
7074 }
7075
7076 #endif /* defined(OEM_ANDROID) && !defined(WL_CFG80211) */
7077
7078 ifidx = dhd_net2idx(dhd, net);
7079 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
7080
7081 if (ifidx < 0) {
7082 DHD_ERROR(("%s: Error: called with invalid IF\n", __FUNCTION__));
7083 ret = -1;
7084 goto exit;
7085 }
7086
7087 if (!dhd->iflist[ifidx]) {
7088 DHD_ERROR(("%s: Error: called when IF already deleted\n", __FUNCTION__));
7089 ret = -1;
7090 goto exit;
7091 }
7092
7093 if (ifidx == 0) {
7094 atomic_set(&dhd->pend_8021x_cnt, 0);
7095 #if defined(WL_CFG80211) && defined(OEM_ANDROID)
7096 if (!dhd_download_fw_on_driverload) {
7097 DHD_ERROR(("\n%s\n", dhd_version));
7098 DHD_STATLOG_CTRL(&dhd->pub, ST(WLAN_POWER_ON), ifidx, 0);
7099 #if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
7100 g_first_broadcast_scan = TRUE;
7101 #endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
7102 #ifdef SHOW_LOGTRACE
7103 /* dhd_cancel_logtrace_process_sync is called in dhd_stop
7104 * for built-in models. Need to start logtrace kthread before
7105 * calling wifi on, because once wifi is on, EDL will be in action
7106 * any moment, and if kthread is not active, FW event logs will
7107 * not be available
7108 */
7109 if (dhd_reinit_logtrace_process(dhd) != BCME_OK) {
7110 goto exit;
7111 }
7112 #endif /* SHOW_LOGTRACE */
7113 #if defined(BT_OVER_SDIO)
7114 ret = dhd_bus_get(&dhd->pub, WLAN_MODULE);
7115 wl_android_set_wifi_on_flag(TRUE);
7116 #else
7117 ret = wl_android_wifi_on(net);
7118 #endif /* BT_OVER_SDIO */
7119 if (ret != 0) {
7120 DHD_ERROR(("%s : wl_android_wifi_on failed (%d)\n",
7121 __FUNCTION__, ret));
7122 ret = -1;
7123 goto exit;
7124 }
7125 }
7126 #ifdef SUPPORT_DEEP_SLEEP
7127 else {
7128 /* Flags to indicate if we distingish
7129 * power off policy when user set the memu
7130 * "Keep Wi-Fi on during sleep" to "Never"
7131 */
7132 if (trigger_deep_sleep) {
7133 #if defined(USE_INITIAL_2G_SCAN) || defined(USE_INITIAL_SHORT_DWELL_TIME)
7134 g_first_broadcast_scan = TRUE;
7135 #endif /* USE_INITIAL_2G_SCAN || USE_INITIAL_SHORT_DWELL_TIME */
7136 dhd_deepsleep(net, 0);
7137 trigger_deep_sleep = 0;
7138 }
7139 }
7140 #endif /* SUPPORT_DEEP_SLEEP */
7141 #ifdef FIX_CPU_MIN_CLOCK
7142 if (dhd_get_fw_mode(dhd) == DHD_FLAG_HOSTAP_MODE) {
7143 dhd_init_cpufreq_fix(dhd);
7144 dhd_fix_cpu_freq(dhd);
7145 }
7146 #endif /* FIX_CPU_MIN_CLOCK */
7147 #endif /* defined(WL_CFG80211) && defined(OEM_ANDROID) */
7148
7149 if (dhd->pub.busstate != DHD_BUS_DATA) {
7150
7151 /* try to bring up bus */
7152 DHD_PERIM_UNLOCK(&dhd->pub);
7153
7154 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
7155 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) >= 0) {
7156 ret = dhd_bus_start(&dhd->pub);
7157 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
7158 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
7159 }
7160 #else
7161 ret = dhd_bus_start(&dhd->pub);
7162 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
7163
7164 DHD_PERIM_LOCK(&dhd->pub);
7165 if (ret) {
7166 DHD_ERROR(("%s: failed with code %d\n", __FUNCTION__, ret));
7167 ret = -1;
7168 goto exit;
7169 }
7170
7171 }
7172
7173 #ifdef BT_OVER_SDIO
7174 if (dhd->pub.is_bt_recovery_required) {
7175 DHD_ERROR(("%s: Send Hang Notification 2 to BT\n", __FUNCTION__));
7176 bcmsdh_btsdio_process_dhd_hang_notification(TRUE);
7177 }
7178 dhd->pub.is_bt_recovery_required = FALSE;
7179 #endif // endif
7180
7181 /* dhd_sync_with_dongle has been called in dhd_bus_start or wl_android_wifi_on */
7182 memcpy(net->dev_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
7183
7184 #ifdef TOE
7185 /* Get current TOE mode from dongle */
7186 if (dhd_toe_get(dhd, ifidx, &toe_ol) >= 0 && (toe_ol & TOE_TX_CSUM_OL) != 0) {
7187 dhd->iflist[ifidx]->net->features |= NETIF_F_IP_CSUM;
7188 } else {
7189 dhd->iflist[ifidx]->net->features &= ~NETIF_F_IP_CSUM;
7190 }
7191 #endif /* TOE */
7192
7193 #if defined(DHD_LB_RXP)
7194 __skb_queue_head_init(&dhd->rx_pend_queue);
7195 if (dhd->rx_napi_netdev == NULL) {
7196 dhd->rx_napi_netdev = dhd->iflist[ifidx]->net;
7197 memset(&dhd->rx_napi_struct, 0, sizeof(struct napi_struct));
7198 netif_napi_add(dhd->rx_napi_netdev, &dhd->rx_napi_struct,
7199 dhd_napi_poll, dhd_napi_weight);
7200 DHD_INFO(("%s napi<%p> enabled ifp->net<%p,%s>\n",
7201 __FUNCTION__, &dhd->rx_napi_struct, net, net->name));
7202 napi_enable(&dhd->rx_napi_struct);
7203 DHD_INFO(("%s load balance init rx_napi_struct\n", __FUNCTION__));
7204 skb_queue_head_init(&dhd->rx_napi_queue);
7205 } /* rx_napi_netdev == NULL */
7206 #endif /* DHD_LB_RXP */
7207
7208 #if defined(DHD_LB_TXP)
7209 /* Use the variant that uses locks */
7210 skb_queue_head_init(&dhd->tx_pend_queue);
7211 #endif /* DHD_LB_TXP */
7212
7213 #if defined(WL_CFG80211)
7214 if (unlikely(wl_cfg80211_up(net))) {
7215 DHD_ERROR(("%s: failed to bring up cfg80211\n", __FUNCTION__));
7216 ret = -1;
7217 goto exit;
7218 }
7219 if (!dhd_download_fw_on_driverload) {
7220 #ifdef ARP_OFFLOAD_SUPPORT
7221 dhd->pend_ipaddr = 0;
7222 if (!dhd_inetaddr_notifier_registered) {
7223 dhd_inetaddr_notifier_registered = TRUE;
7224 register_inetaddr_notifier(&dhd_inetaddr_notifier);
7225 }
7226 #endif /* ARP_OFFLOAD_SUPPORT */
7227 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
7228 if (!dhd_inet6addr_notifier_registered) {
7229 dhd_inet6addr_notifier_registered = TRUE;
7230 register_inet6addr_notifier(&dhd_inet6addr_notifier);
7231 }
7232 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
7233 }
7234
7235 #if defined(DHD_CONTROL_PCIE_ASPM_WIFI_TURNON)
7236 dhd_bus_aspm_enable_rc_ep(dhd->pub.bus, TRUE);
7237 #endif /* DHD_CONTROL_PCIE_ASPM_WIFI_TURNON */
7238 #if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
7239 dhd_irq_set_affinity(&dhd->pub, cpumask_of(0));
7240 #endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
7241 #ifdef DHD_LB_IRQSET
7242 dhd_irq_set_affinity(&dhd->pub, dhd->cpumask_primary);
7243 #endif /* DHD_LB_IRQSET */
7244 #if defined(ARGOS_NOTIFY_CB)
7245 argos_register_notifier_init(net);
7246 #endif // endif
7247 #if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
7248 #if defined(SET_RPS_CPUS)
7249 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
7250 #else
7251 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
7252 #endif // endif
7253 #endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
7254 #if defined(NUM_SCB_MAX_PROBE)
7255 dhd_set_scb_probe(&dhd->pub);
7256 #endif /* NUM_SCB_MAX_PROBE */
7257 #endif /* WL_CFG80211 */
7258 }
7259
7260 dhd->pub.up = 1;
7261
7262 if (wl_event_enable) {
7263 /* For wl utility to receive events */
7264 dhd->pub.wl_event_enabled = true;
7265 } else {
7266 dhd->pub.wl_event_enabled = false;
7267 }
7268
7269 if (logtrace_pkt_sendup) {
7270 /* For any deamon to recieve logtrace */
7271 dhd->pub.logtrace_pkt_sendup = true;
7272 } else {
7273 dhd->pub.logtrace_pkt_sendup = false;
7274 }
7275
7276 OLD_MOD_INC_USE_COUNT;
7277
7278 #ifdef BCMDBGFS
7279 dhd_dbgfs_init(&dhd->pub);
7280 #endif // endif
7281
7282 exit:
7283 mutex_unlock(&dhd->pub.ndev_op_sync);
7284 if (ret) {
7285 dhd_stop(net);
7286 }
7287
7288 DHD_PERIM_UNLOCK(&dhd->pub);
7289 DHD_OS_WAKE_UNLOCK(&dhd->pub);
7290
7291 #if defined(MULTIPLE_SUPPLICANT)
7292 #if defined(OEM_ANDROID) && defined(BCMSDIO)
7293 mutex_unlock(&_dhd_sdio_mutex_lock_);
7294 #endif // endif
7295 #endif /* MULTIPLE_SUPPLICANT */
7296
7297 return ret;
7298 }
7299
7300 /*
7301 * ndo_start handler for primary ndev
7302 */
7303 static int
7304 dhd_pri_open(struct net_device *net)
7305 {
7306 s32 ret;
7307
7308 ret = dhd_open(net);
7309 if (unlikely(ret)) {
7310 DHD_ERROR(("Failed to open primary dev ret %d\n", ret));
7311 return ret;
7312 }
7313
7314 /* Allow transmit calls */
7315 netif_start_queue(net);
7316 DHD_ERROR(("[%s] tx queue started\n", net->name));
7317 return ret;
7318 }
7319
7320 /*
7321 * ndo_stop handler for primary ndev
7322 */
7323 static int
7324 dhd_pri_stop(struct net_device *net)
7325 {
7326 s32 ret;
7327
7328 /* stop tx queue */
7329 netif_stop_queue(net);
7330 DHD_ERROR(("[%s] tx queue stopped\n", net->name));
7331
7332 ret = dhd_stop(net);
7333 if (unlikely(ret)) {
7334 DHD_ERROR(("dhd_stop failed: %d\n", ret));
7335 return ret;
7336 }
7337
7338 return ret;
7339 }
7340
7341 #if defined(WL_STATIC_IF) && defined(WL_CFG80211)
7342 /*
7343 * For static I/Fs, the firmware interface init
7344 * is done from the IFF_UP context.
7345 */
7346 static int
7347 dhd_static_if_open(struct net_device *net)
7348 {
7349 s32 ret = 0;
7350 struct bcm_cfg80211 *cfg;
7351 struct net_device *primary_netdev = NULL;
7352
7353 cfg = wl_get_cfg(net);
7354 primary_netdev = bcmcfg_to_prmry_ndev(cfg);
7355
7356 if (!is_static_iface(cfg, net)) {
7357 DHD_TRACE(("non-static interface (%s)..do nothing \n", net->name));
7358 ret = BCME_OK;
7359 goto done;
7360 }
7361
7362 DHD_INFO(("[%s][STATIC_IF] Enter \n", net->name));
7363 /* Ensure fw is initialized. If it is already initialized,
7364 * dhd_open will return success.
7365 */
7366 ret = dhd_open(primary_netdev);
7367 if (unlikely(ret)) {
7368 DHD_ERROR(("Failed to open primary dev ret %d\n", ret));
7369 goto done;
7370 }
7371
7372 ret = wl_cfg80211_static_if_open(net);
7373 if (!ret) {
7374 /* Allow transmit calls */
7375 netif_start_queue(net);
7376 }
7377 done:
7378 return ret;
7379 }
7380
7381 static int
7382 dhd_static_if_stop(struct net_device *net)
7383 {
7384 struct bcm_cfg80211 *cfg;
7385 struct net_device *primary_netdev = NULL;
7386 int ret = BCME_OK;
7387 dhd_info_t *dhd = DHD_DEV_INFO(net);
7388
7389 DHD_INFO(("[%s][STATIC_IF] Enter \n", net->name));
7390
7391 /* Ensure queue is disabled */
7392 netif_tx_disable(net);
7393
7394 cfg = wl_get_cfg(net);
7395 if (!is_static_iface(cfg, net)) {
7396 DHD_TRACE(("non-static interface (%s)..do nothing \n", net->name));
7397 return BCME_OK;
7398 }
7399
7400 ret = wl_cfg80211_static_if_close(net);
7401
7402 if (dhd->pub.up == 0) {
7403 /* If fw is down, return */
7404 DHD_ERROR(("fw down\n"));
7405 return BCME_OK;
7406 }
7407 /* If STA iface is not in operational, invoke dhd_close from this
7408 * context.
7409 */
7410 primary_netdev = bcmcfg_to_prmry_ndev(cfg);
7411 if (!(primary_netdev->flags & IFF_UP)) {
7412 ret = dhd_stop(primary_netdev);
7413 } else {
7414 DHD_ERROR(("Skipped dhd_stop, as sta is operational\n"));
7415 }
7416
7417 return ret;
7418 }
7419 #endif /* WL_STATIC_IF && WL_CF80211 */
7420
7421 int dhd_do_driver_init(struct net_device *net)
7422 {
7423 dhd_info_t *dhd = NULL;
7424
7425 if (!net) {
7426 DHD_ERROR(("Primary Interface not initialized \n"));
7427 return -EINVAL;
7428 }
7429
7430 #ifdef MULTIPLE_SUPPLICANT
7431 #if defined(OEM_ANDROID) && defined(BCMSDIO)
7432 if (mutex_is_locked(&_dhd_sdio_mutex_lock_) != 0) {
7433 DHD_ERROR(("%s : dhdsdio_probe is already running!\n", __FUNCTION__));
7434 return 0;
7435 }
7436 #endif /* OEM_ANDROID & BCMSDIO */
7437 #endif /* MULTIPLE_SUPPLICANT */
7438
7439 /* && defined(OEM_ANDROID) && defined(BCMSDIO) */
7440 dhd = DHD_DEV_INFO(net);
7441
7442 /* If driver is already initialized, do nothing
7443 */
7444 if (dhd->pub.busstate == DHD_BUS_DATA) {
7445 DHD_TRACE(("Driver already Inititalized. Nothing to do"));
7446 return 0;
7447 }
7448
7449 if (dhd_open(net) < 0) {
7450 DHD_ERROR(("Driver Init Failed \n"));
7451 return -1;
7452 }
7453
7454 return 0;
7455 }
7456
7457 int
7458 dhd_event_ifadd(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
7459 {
7460
7461 #ifdef WL_CFG80211
7462 if (wl_cfg80211_notify_ifadd(dhd_linux_get_primary_netdev(&dhdinfo->pub),
7463 ifevent->ifidx, name, mac, ifevent->bssidx, ifevent->role) == BCME_OK)
7464 return BCME_OK;
7465 #endif // endif
7466
7467 /* handle IF event caused by wl commands, SoftAP, WEXT and
7468 * anything else. This has to be done asynchronously otherwise
7469 * DPC will be blocked (and iovars will timeout as DPC has no chance
7470 * to read the response back)
7471 */
7472 if (ifevent->ifidx > 0) {
7473 dhd_if_event_t *if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
7474 if (if_event == NULL) {
7475 DHD_ERROR(("dhd_event_ifadd: Failed MALLOC, malloced %d bytes",
7476 MALLOCED(dhdinfo->pub.osh)));
7477 return BCME_NOMEM;
7478 }
7479
7480 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
7481 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
7482 strncpy(if_event->name, name, IFNAMSIZ);
7483 if_event->name[IFNAMSIZ - 1] = '\0';
7484 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event,
7485 DHD_WQ_WORK_IF_ADD, dhd_ifadd_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
7486 }
7487
7488 return BCME_OK;
7489 }
7490
7491 int
7492 dhd_event_ifdel(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
7493 {
7494 dhd_if_event_t *if_event;
7495
7496 #ifdef WL_CFG80211
7497 if (wl_cfg80211_notify_ifdel(dhd_linux_get_primary_netdev(&dhdinfo->pub),
7498 ifevent->ifidx, name, mac, ifevent->bssidx) == BCME_OK)
7499 return BCME_OK;
7500 #endif /* WL_CFG80211 */
7501
7502 /* handle IF event caused by wl commands, SoftAP, WEXT and
7503 * anything else
7504 */
7505 if_event = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_event_t));
7506 if (if_event == NULL) {
7507 DHD_ERROR(("dhd_event_ifdel: malloc failed for if_event, malloced %d bytes",
7508 MALLOCED(dhdinfo->pub.osh)));
7509 return BCME_NOMEM;
7510 }
7511 memcpy(&if_event->event, ifevent, sizeof(if_event->event));
7512 memcpy(if_event->mac, mac, ETHER_ADDR_LEN);
7513 strncpy(if_event->name, name, IFNAMSIZ);
7514 if_event->name[IFNAMSIZ - 1] = '\0';
7515 dhd_deferred_schedule_work(dhdinfo->dhd_deferred_wq, (void *)if_event, DHD_WQ_WORK_IF_DEL,
7516 dhd_ifdel_event_handler, DHD_WQ_WORK_PRIORITY_LOW);
7517
7518 return BCME_OK;
7519 }
7520
7521 int
7522 dhd_event_ifchange(dhd_info_t *dhdinfo, wl_event_data_if_t *ifevent, char *name, uint8 *mac)
7523 {
7524 #ifdef WL_CFG80211
7525 wl_cfg80211_notify_ifchange(dhd_linux_get_primary_netdev(&dhdinfo->pub),
7526 ifevent->ifidx, name, mac, ifevent->bssidx);
7527 #endif /* WL_CFG80211 */
7528 return BCME_OK;
7529 }
7530
7531 #ifdef WL_NATOE
7532 /* Handler to update natoe info and bind with new subscriptions if there is change in config */
7533 static void
7534 dhd_natoe_ct_event_hanlder(void *handle, void *event_info, u8 event)
7535 {
7536 dhd_info_t *dhd = handle;
7537 wl_event_data_natoe_t *natoe = event_info;
7538 dhd_nfct_info_t *nfct = dhd->pub.nfct;
7539
7540 if (event != DHD_WQ_WORK_NATOE_EVENT) {
7541 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
7542 return;
7543 }
7544
7545 if (!dhd) {
7546 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
7547 return;
7548 }
7549 if (natoe->natoe_active && natoe->sta_ip && natoe->start_port && natoe->end_port &&
7550 (natoe->start_port < natoe->end_port)) {
7551 /* Rebind subscriptions to start receiving notifications from groups */
7552 if (dhd_ct_nl_bind(nfct, nfct->subscriptions) < 0) {
7553 dhd_ct_close(nfct);
7554 }
7555 dhd_ct_send_dump_req(nfct);
7556 } else if (!natoe->natoe_active) {
7557 /* Rebind subscriptions to stop receiving notifications from groups */
7558 if (dhd_ct_nl_bind(nfct, CT_NULL_SUBSCRIPTION) < 0) {
7559 dhd_ct_close(nfct);
7560 }
7561 }
7562 }
7563
7564 /* As NATOE enable/disbale event is received, we have to bind with new NL subscriptions.
7565 * Scheduling workq to switch from tasklet context as bind call may sleep in handler
7566 */
7567 int
7568 dhd_natoe_ct_event(dhd_pub_t *dhd, char *data)
7569 {
7570 wl_event_data_natoe_t *event_data = (wl_event_data_natoe_t *)data;
7571
7572 if (dhd->nfct) {
7573 wl_event_data_natoe_t *natoe = dhd->nfct->natoe_info;
7574 uint8 prev_enable = natoe->natoe_active;
7575
7576 spin_lock_bh(&dhd->nfct_lock);
7577 memcpy(natoe, event_data, sizeof(*event_data));
7578 spin_unlock_bh(&dhd->nfct_lock);
7579
7580 if (prev_enable != event_data->natoe_active) {
7581 dhd_deferred_schedule_work(dhd->info->dhd_deferred_wq,
7582 (void *)natoe, DHD_WQ_WORK_NATOE_EVENT,
7583 dhd_natoe_ct_event_hanlder, DHD_WQ_WORK_PRIORITY_LOW);
7584 }
7585 return BCME_OK;
7586 }
7587 DHD_ERROR(("%s ERROR NFCT is not enabled \n", __FUNCTION__));
7588 return BCME_ERROR;
7589 }
7590
7591 /* Handler to send natoe ioctl to dongle */
7592 static void
7593 dhd_natoe_ct_ioctl_handler(void *handle, void *event_info, uint8 event)
7594 {
7595 dhd_info_t *dhd = handle;
7596 dhd_ct_ioc_t *ct_ioc = event_info;
7597
7598 if (event != DHD_WQ_WORK_NATOE_IOCTL) {
7599 DHD_ERROR(("%s: unexpected event \n", __FUNCTION__));
7600 return;
7601 }
7602
7603 if (!dhd) {
7604 DHD_ERROR(("%s: dhd info not available \n", __FUNCTION__));
7605 return;
7606 }
7607
7608 if (dhd_natoe_prep_send_exception_port_ioctl(&dhd->pub, ct_ioc) < 0) {
7609 DHD_ERROR(("%s: Error in sending NATOE IOCTL \n", __FUNCTION__));
7610 }
7611 }
7612
7613 /* When Netlink message contains port collision info, the info must be sent to dongle FW
7614 * For that we have to switch context from softirq/tasklet by scheduling workq for natoe_ct ioctl
7615 */
7616 void
7617 dhd_natoe_ct_ioctl_schedule_work(dhd_pub_t *dhd, dhd_ct_ioc_t *ioc)
7618 {
7619
7620 dhd_deferred_schedule_work(dhd->info->dhd_deferred_wq, (void *)ioc,
7621 DHD_WQ_WORK_NATOE_IOCTL, dhd_natoe_ct_ioctl_handler,
7622 DHD_WQ_WORK_PRIORITY_HIGH);
7623 }
7624 #endif /* WL_NATOE */
7625
7626 /* This API maps ndev to ifp inclusive of static IFs */
7627 static dhd_if_t *
7628 dhd_get_ifp_by_ndev(dhd_pub_t *dhdp, struct net_device *ndev)
7629 {
7630 dhd_if_t *ifp = NULL;
7631 #ifdef WL_STATIC_IF
7632 u32 ifidx = (DHD_MAX_IFS + DHD_MAX_STATIC_IFS - 1);
7633 #else
7634 u32 ifidx = (DHD_MAX_IFS - 1);
7635 #endif /* WL_STATIC_IF */
7636
7637 dhd_info_t *dhdinfo = (dhd_info_t *)dhdp->info;
7638 do {
7639 ifp = dhdinfo->iflist[ifidx];
7640 if (ifp && (ifp->net == ndev)) {
7641 DHD_TRACE(("match found for %s. ifidx:%d\n",
7642 ndev->name, ifidx));
7643 return ifp;
7644 }
7645 } while (ifidx--);
7646
7647 DHD_ERROR(("no entry found for %s\n", ndev->name));
7648 return NULL;
7649 }
7650
7651 bool
7652 dhd_is_static_ndev(dhd_pub_t *dhdp, struct net_device *ndev)
7653 {
7654 dhd_if_t *ifp = NULL;
7655
7656 if (!dhdp || !ndev) {
7657 DHD_ERROR(("wrong input\n"));
7658 ASSERT(0);
7659 return false;
7660 }
7661
7662 ifp = dhd_get_ifp_by_ndev(dhdp, ndev);
7663 return (ifp && (ifp->static_if == true));
7664 }
7665
7666 #ifdef WL_STATIC_IF
7667 /* In some cases, while registering I/F, the actual ifidx, bssidx and dngl_name
7668 * are not known. For e.g: static i/f case. This function lets to update it once
7669 * it is known.
7670 */
7671 s32
7672 dhd_update_iflist_info(dhd_pub_t *dhdp, struct net_device *ndev, int ifidx,
7673 uint8 *mac, uint8 bssidx, const char *dngl_name, int if_state)
7674 {
7675 dhd_info_t *dhdinfo = (dhd_info_t *)dhdp->info;
7676 dhd_if_t *ifp, *ifp_new;
7677 s32 cur_idx;
7678 dhd_dev_priv_t * dev_priv;
7679
7680 DHD_TRACE(("[STATIC_IF] update ifinfo for state:%d ifidx:%d\n",
7681 if_state, ifidx));
7682
7683 ASSERT(dhdinfo && (ifidx < (DHD_MAX_IFS + DHD_MAX_STATIC_IFS)));
7684
7685 if ((ifp = dhd_get_ifp_by_ndev(dhdp, ndev)) == NULL) {
7686 return -ENODEV;
7687 }
7688 cur_idx = ifp->idx;
7689
7690 if (if_state == NDEV_STATE_OS_IF_CREATED) {
7691 /* mark static if */
7692 ifp->static_if = TRUE;
7693 return BCME_OK;
7694 }
7695
7696 ifp_new = dhdinfo->iflist[ifidx];
7697 if (ifp_new && (ifp_new != ifp)) {
7698 /* There should be only one entry for a given ifidx. */
7699 DHD_ERROR(("ifp ptr already present for ifidx:%d\n", ifidx));
7700 ASSERT(0);
7701 dhdp->hang_reason = HANG_REASON_IFACE_ADD_FAILURE;
7702 net_os_send_hang_message(ifp->net);
7703 return -EINVAL;
7704 }
7705
7706 /* For static if delete case, cleanup the if before ifidx update */
7707 if ((if_state == NDEV_STATE_FW_IF_DELETED) ||
7708 (if_state == NDEV_STATE_FW_IF_FAILED)) {
7709 dhd_cleanup_if(ifp->net);
7710 dev_priv = DHD_DEV_PRIV(ndev);
7711 dev_priv->ifidx = ifidx;
7712 }
7713
7714 /* update the iflist ifidx slot with cached info */
7715 dhdinfo->iflist[ifidx] = ifp;
7716 dhdinfo->iflist[cur_idx] = NULL;
7717
7718 /* update the values */
7719 ifp->idx = ifidx;
7720 ifp->bssidx = bssidx;
7721
7722 if (if_state == NDEV_STATE_FW_IF_CREATED) {
7723 dhd_dev_priv_save(ndev, dhdinfo, ifp, ifidx);
7724 /* initialize the dongle provided if name */
7725 if (dngl_name) {
7726 strlcpy(ifp->dngl_name, dngl_name, IFNAMSIZ);
7727 } else if (ndev->name[0] != '\0') {
7728 strlcpy(ifp->dngl_name, ndev->name, IFNAMSIZ);
7729 }
7730 if (mac != NULL) {
7731 (void)memcpy_s(&ifp->mac_addr, ETHER_ADDR_LEN, mac, ETHER_ADDR_LEN);
7732 }
7733 }
7734 DHD_INFO(("[STATIC_IF] ifp ptr updated for ifidx:%d curidx:%d if_state:%d\n",
7735 ifidx, cur_idx, if_state));
7736 return BCME_OK;
7737 }
7738 #endif /* WL_STATIC_IF */
7739
7740 /* unregister and free the existing net_device interface (if any) in iflist and
7741 * allocate a new one. the slot is reused. this function does NOT register the
7742 * new interface to linux kernel. dhd_register_if does the job
7743 */
7744 struct net_device*
7745 dhd_allocate_if(dhd_pub_t *dhdpub, int ifidx, const char *name,
7746 uint8 *mac, uint8 bssidx, bool need_rtnl_lock, const char *dngl_name)
7747 {
7748 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
7749 dhd_if_t *ifp;
7750
7751 ASSERT(dhdinfo && (ifidx < (DHD_MAX_IFS + DHD_MAX_STATIC_IFS)));
7752
7753 ifp = dhdinfo->iflist[ifidx];
7754
7755 if (ifp != NULL) {
7756 if (ifp->net != NULL) {
7757 DHD_ERROR(("%s: free existing IF %s ifidx:%d \n",
7758 __FUNCTION__, ifp->net->name, ifidx));
7759
7760 if (ifidx == 0) {
7761 /* For primary ifidx (0), there shouldn't be
7762 * any netdev present already.
7763 */
7764 DHD_ERROR(("Primary ifidx populated already\n"));
7765 ASSERT(0);
7766 return NULL;
7767 }
7768
7769 dhd_dev_priv_clear(ifp->net); /* clear net_device private */
7770
7771 /* in unregister_netdev case, the interface gets freed by net->destructor
7772 * (which is set to free_netdev)
7773 */
7774 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
7775 free_netdev(ifp->net);
7776 } else {
7777 netif_stop_queue(ifp->net);
7778 if (need_rtnl_lock)
7779 unregister_netdev(ifp->net);
7780 else
7781 unregister_netdevice(ifp->net);
7782 }
7783 ifp->net = NULL;
7784 }
7785 } else {
7786 ifp = MALLOC(dhdinfo->pub.osh, sizeof(dhd_if_t));
7787 if (ifp == NULL) {
7788 DHD_ERROR(("%s: OOM - dhd_if_t(%zu)\n", __FUNCTION__, sizeof(dhd_if_t)));
7789 return NULL;
7790 }
7791 }
7792
7793 memset(ifp, 0, sizeof(dhd_if_t));
7794 ifp->info = dhdinfo;
7795 ifp->idx = ifidx;
7796 ifp->bssidx = bssidx;
7797 #ifdef DHD_MCAST_REGEN
7798 ifp->mcast_regen_bss_enable = FALSE;
7799 #endif // endif
7800 /* set to TRUE rx_pkt_chainable at alloc time */
7801 ifp->rx_pkt_chainable = TRUE;
7802
7803 if (mac != NULL)
7804 memcpy(&ifp->mac_addr, mac, ETHER_ADDR_LEN);
7805
7806 /* Allocate etherdev, including space for private structure */
7807 ifp->net = alloc_etherdev(DHD_DEV_PRIV_SIZE);
7808 if (ifp->net == NULL) {
7809 DHD_ERROR(("%s: OOM - alloc_etherdev(%zu)\n", __FUNCTION__, sizeof(dhdinfo)));
7810 goto fail;
7811 }
7812
7813 /* Setup the dhd interface's netdevice private structure. */
7814 dhd_dev_priv_save(ifp->net, dhdinfo, ifp, ifidx);
7815
7816 if (name && name[0]) {
7817 strncpy(ifp->net->name, name, IFNAMSIZ);
7818 ifp->net->name[IFNAMSIZ - 1] = '\0';
7819 }
7820
7821 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 11, 9))
7822 #define IFP_NET_DESTRUCTOR ifp->net->priv_destructor
7823 #else
7824 #define IFP_NET_DESTRUCTOR ifp->net->destructor
7825 #endif // endif
7826
7827 #ifdef WL_CFG80211
7828 if (ifidx == 0) {
7829 IFP_NET_DESTRUCTOR = free_netdev;
7830 } else {
7831 IFP_NET_DESTRUCTOR = dhd_netdev_free;
7832 }
7833 #else
7834 IFP_NET_DESTRUCTOR = free_netdev;
7835 #endif /* WL_CFG80211 */
7836 strncpy(ifp->name, ifp->net->name, IFNAMSIZ);
7837 ifp->name[IFNAMSIZ - 1] = '\0';
7838 dhdinfo->iflist[ifidx] = ifp;
7839
7840 /* initialize the dongle provided if name */
7841 if (dngl_name) {
7842 strncpy(ifp->dngl_name, dngl_name, IFNAMSIZ);
7843 } else if (name) {
7844 strncpy(ifp->dngl_name, name, IFNAMSIZ);
7845 }
7846
7847 /* Initialize STA info list */
7848 INIT_LIST_HEAD(&ifp->sta_list);
7849 DHD_IF_STA_LIST_LOCK_INIT(ifp);
7850
7851 #ifdef DHD_L2_FILTER
7852 ifp->phnd_arp_table = init_l2_filter_arp_table(dhdpub->osh);
7853 ifp->parp_allnode = TRUE;
7854 #endif /* DHD_L2_FILTER */
7855
7856 DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
7857
7858 #ifdef DHD_4WAYM4_FAIL_DISCONNECT
7859 INIT_DELAYED_WORK(&ifp->m4state_work, dhd_m4_state_handler);
7860 #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
7861
7862 #ifdef DHD_POST_EAPOL_M1_AFTER_ROAM_EVT
7863 ifp->recv_reassoc_evt = FALSE;
7864 ifp->post_roam_evt = FALSE;
7865 #endif /* DHD_POST_EAPOL_M1_AFTER_ROAM_EVT */
7866
7867 #ifdef DHDTCPSYNC_FLOOD_BLK
7868 INIT_WORK(&ifp->blk_tsfl_work, dhd_blk_tsfl_handler);
7869 dhd_reset_tcpsync_info_by_ifp(ifp);
7870 #endif /* DHDTCPSYNC_FLOOD_BLK */
7871
7872 return ifp->net;
7873
7874 fail:
7875 if (ifp != NULL) {
7876 if (ifp->net != NULL) {
7877 #if defined(DHD_LB_RXP) && defined(PCIE_FULL_DONGLE)
7878 if (ifp->net == dhdinfo->rx_napi_netdev) {
7879 napi_disable(&dhdinfo->rx_napi_struct);
7880 netif_napi_del(&dhdinfo->rx_napi_struct);
7881 skb_queue_purge(&dhdinfo->rx_napi_queue);
7882 dhdinfo->rx_napi_netdev = NULL;
7883 }
7884 #endif /* DHD_LB_RXP && PCIE_FULL_DONGLE */
7885 dhd_dev_priv_clear(ifp->net);
7886 free_netdev(ifp->net);
7887 ifp->net = NULL;
7888 }
7889 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
7890 ifp = NULL;
7891 }
7892
7893 dhdinfo->iflist[ifidx] = NULL;
7894 return NULL;
7895 }
7896
7897 static void
7898 dhd_cleanup_ifp(dhd_pub_t *dhdp, dhd_if_t *ifp)
7899 {
7900 #ifdef PCIE_FULL_DONGLE
7901 s32 ifidx = 0;
7902 if_flow_lkup_t *if_flow_lkup = (if_flow_lkup_t *)dhdp->if_flow_lkup;
7903 #endif /* PCIE_FULL_DONGLE */
7904
7905 if (ifp != NULL) {
7906 if ((ifp->idx < 0) || (ifp->idx >= DHD_MAX_IFS)) {
7907 DHD_ERROR(("Wrong idx:%d \n", ifp->idx));
7908 ASSERT(0);
7909 return;
7910 }
7911 #ifdef DHD_L2_FILTER
7912 bcm_l2_filter_arp_table_update(dhdpub->osh, ifp->phnd_arp_table, TRUE,
7913 NULL, FALSE, dhdpub->tickcnt);
7914 deinit_l2_filter_arp_table(dhdpub->osh, ifp->phnd_arp_table);
7915 ifp->phnd_arp_table = NULL;
7916 #endif /* DHD_L2_FILTER */
7917
7918 dhd_if_del_sta_list(ifp);
7919 #ifdef PCIE_FULL_DONGLE
7920 /* Delete flowrings of virtual interface */
7921 ifidx = ifp->idx;
7922 if ((ifidx != 0) && (if_flow_lkup[ifidx].role != WLC_E_IF_ROLE_AP)) {
7923 dhd_flow_rings_delete(dhdp, ifidx);
7924 }
7925 #endif /* PCIE_FULL_DONGLE */
7926 }
7927 }
7928
7929 void
7930 dhd_cleanup_if(struct net_device *net)
7931 {
7932 dhd_info_t *dhdinfo = DHD_DEV_INFO(net);
7933 dhd_pub_t *dhdp = &dhdinfo->pub;
7934 dhd_if_t *ifp;
7935
7936 if (!(ifp = dhd_get_ifp_by_ndev(dhdp, net)) ||
7937 (ifp->idx >= DHD_MAX_IFS)) {
7938 DHD_ERROR(("Wrong ifidx: %p, %d\n", ifp, ifp ? ifp->idx : -1));
7939 ASSERT(0);
7940 return;
7941 }
7942
7943 dhd_cleanup_ifp(dhdp, ifp);
7944 }
7945
7946 /* unregister and free the the net_device interface associated with the indexed
7947 * slot, also free the slot memory and set the slot pointer to NULL
7948 */
7949 #define DHD_TX_COMPLETION_TIMEOUT 5000
7950 int
7951 dhd_remove_if(dhd_pub_t *dhdpub, int ifidx, bool need_rtnl_lock)
7952 {
7953 dhd_info_t *dhdinfo = (dhd_info_t *)dhdpub->info;
7954 dhd_if_t *ifp;
7955 unsigned long flags;
7956 long timeout;
7957
7958 ifp = dhdinfo->iflist[ifidx];
7959
7960 if (ifp != NULL) {
7961 #ifdef DHD_4WAYM4_FAIL_DISCONNECT
7962 cancel_delayed_work_sync(&ifp->m4state_work);
7963 #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
7964
7965 #ifdef DHDTCPSYNC_FLOOD_BLK
7966 cancel_work_sync(&ifp->blk_tsfl_work);
7967 #endif /* DHDTCPSYNC_FLOOD_BLK */
7968
7969 #ifdef WL_STATIC_IF
7970 /* static IF will be handled in detach */
7971 if (ifp->static_if) {
7972 DHD_TRACE(("Skip del iface for static interface\n"));
7973 return BCME_OK;
7974 }
7975 #endif /* WL_STATIC_IF */
7976 if (ifp->net != NULL) {
7977 DHD_ERROR(("deleting interface '%s' idx %d\n", ifp->net->name, ifp->idx));
7978
7979 DHD_GENERAL_LOCK(dhdpub, flags);
7980 ifp->del_in_progress = true;
7981 DHD_GENERAL_UNLOCK(dhdpub, flags);
7982
7983 /* If TX is in progress, hold the if del */
7984 if (DHD_IF_IS_TX_ACTIVE(ifp)) {
7985 DHD_INFO(("TX in progress. Wait for it to be complete."));
7986 timeout = wait_event_timeout(dhdpub->tx_completion_wait,
7987 ((ifp->tx_paths_active & DHD_TX_CONTEXT_MASK) == 0),
7988 msecs_to_jiffies(DHD_TX_COMPLETION_TIMEOUT));
7989 if (!timeout) {
7990 /* Tx completion timeout. Attempt proceeding ahead */
7991 DHD_ERROR(("Tx completion timed out!\n"));
7992 ASSERT(0);
7993 }
7994 } else {
7995 DHD_TRACE(("No outstanding TX!\n"));
7996 }
7997 dhdinfo->iflist[ifidx] = NULL;
7998 /* in unregister_netdev case, the interface gets freed by net->destructor
7999 * (which is set to free_netdev)
8000 */
8001 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
8002 free_netdev(ifp->net);
8003 } else {
8004 netif_tx_disable(ifp->net);
8005
8006 #if defined(SET_RPS_CPUS)
8007 custom_rps_map_clear(ifp->net->_rx);
8008 #endif /* SET_RPS_CPUS */
8009 #if defined(SET_RPS_CPUS)
8010 #if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE))
8011 dhd_tcpack_suppress_set(dhdpub, TCPACK_SUP_OFF);
8012 #endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
8013 #endif // endif
8014 if (need_rtnl_lock)
8015 unregister_netdev(ifp->net);
8016 else
8017 unregister_netdevice(ifp->net);
8018 }
8019 ifp->net = NULL;
8020 DHD_GENERAL_LOCK(dhdpub, flags);
8021 ifp->del_in_progress = false;
8022 DHD_GENERAL_UNLOCK(dhdpub, flags);
8023 }
8024 dhd_cleanup_ifp(dhdpub, ifp);
8025 DHD_CUMM_CTR_INIT(&ifp->cumm_ctr);
8026
8027 MFREE(dhdinfo->pub.osh, ifp, sizeof(*ifp));
8028 ifp = NULL;
8029 }
8030
8031 return BCME_OK;
8032 }
8033
8034 static struct net_device_ops dhd_ops_pri = {
8035 .ndo_open = dhd_pri_open,
8036 .ndo_stop = dhd_pri_stop,
8037 .ndo_get_stats = dhd_get_stats,
8038 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
8039 .ndo_do_ioctl = dhd_ioctl_entry_wrapper,
8040 .ndo_start_xmit = dhd_start_xmit_wrapper,
8041 #else
8042 .ndo_do_ioctl = dhd_ioctl_entry,
8043 .ndo_start_xmit = dhd_start_xmit,
8044 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
8045 .ndo_set_mac_address = dhd_set_mac_address,
8046 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
8047 .ndo_set_rx_mode = dhd_set_multicast_list,
8048 #else
8049 .ndo_set_multicast_list = dhd_set_multicast_list,
8050 #endif // endif
8051 };
8052
8053 static struct net_device_ops dhd_ops_virt = {
8054 #if defined(WL_CFG80211) && defined(WL_STATIC_IF)
8055 .ndo_open = dhd_static_if_open,
8056 .ndo_stop = dhd_static_if_stop,
8057 #endif // endif
8058 .ndo_get_stats = dhd_get_stats,
8059 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
8060 .ndo_do_ioctl = dhd_ioctl_entry_wrapper,
8061 .ndo_start_xmit = dhd_start_xmit_wrapper,
8062 #else
8063 .ndo_do_ioctl = dhd_ioctl_entry,
8064 .ndo_start_xmit = dhd_start_xmit,
8065 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
8066 .ndo_set_mac_address = dhd_set_mac_address,
8067 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0))
8068 .ndo_set_rx_mode = dhd_set_multicast_list,
8069 #else
8070 .ndo_set_multicast_list = dhd_set_multicast_list,
8071 #endif // endif
8072 };
8073
8074 int
8075 dhd_os_write_file_posn(void *fp, unsigned long *posn, void *buf,
8076 unsigned long buflen)
8077 {
8078 loff_t wr_posn = *posn;
8079
8080 if (!fp || !buf || buflen == 0)
8081 return -1;
8082
8083 if (vfs_write((struct file *)fp, buf, buflen, &wr_posn) < 0)
8084 return -1;
8085
8086 *posn = wr_posn;
8087 return 0;
8088 }
8089
8090 #ifdef SHOW_LOGTRACE
8091 int
8092 dhd_os_read_file(void *file, char *buf, uint32 size)
8093 {
8094 struct file *filep = (struct file *)file;
8095
8096 if (!file || !buf)
8097 return -1;
8098
8099 return vfs_read(filep, buf, size, &filep->f_pos);
8100 }
8101
8102 int
8103 dhd_os_seek_file(void *file, int64 offset)
8104 {
8105 struct file *filep = (struct file *)file;
8106 if (!file)
8107 return -1;
8108
8109 /* offset can be -ve */
8110 filep->f_pos = filep->f_pos + offset;
8111
8112 return 0;
8113 }
8114
8115 static int
8116 dhd_init_logstrs_array(osl_t *osh, dhd_event_log_t *temp)
8117 {
8118 struct file *filep = NULL;
8119 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
8120 struct kstat stat;
8121 int error = 0;
8122 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) */
8123 #if defined(KERNEL_DS) && defined(USER_DS)
8124 mm_segment_t fs;
8125 #endif /* KERNEL_DS && USER_DS */
8126 char *raw_fmts = NULL;
8127 int logstrs_size = 0;
8128
8129 #if defined(KERNEL_DS) && defined(USER_DS)
8130 fs = get_fs();
8131 set_fs(KERNEL_DS);
8132 #endif /* KERNEL_DS && USER_DS */
8133
8134 filep = filp_open(logstrs_path, O_RDONLY, 0);
8135
8136 if (IS_ERR(filep)) {
8137 DHD_ERROR_NO_HW4(("%s: Failed to open the file %s \n", __FUNCTION__, logstrs_path));
8138 goto fail;
8139 }
8140 #if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0))
8141 error = vfs_stat(logstrs_path, &stat);
8142 if (error) {
8143 DHD_ERROR_NO_HW4(("%s: Failed to stat file %s \n", __FUNCTION__, logstrs_path));
8144 goto fail;
8145 }
8146 logstrs_size = (int) stat.size;
8147 #else
8148 logstrs_size = i_size_read(file_inode(filep));
8149 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(5, 10, 0) */
8150 if (logstrs_size == 0) {
8151 DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__));
8152 goto fail1;
8153 }
8154
8155 raw_fmts = MALLOC(osh, logstrs_size);
8156 if (raw_fmts == NULL) {
8157 DHD_ERROR(("%s: Failed to allocate memory \n", __FUNCTION__));
8158 goto fail;
8159 }
8160
8161 if (vfs_read(filep, raw_fmts, logstrs_size, &filep->f_pos) != logstrs_size) {
8162 DHD_ERROR_NO_HW4(("%s: Failed to read file %s\n", __FUNCTION__, logstrs_path));
8163 goto fail;
8164 }
8165
8166 if (dhd_parse_logstrs_file(osh, raw_fmts, logstrs_size, temp)
8167 == BCME_OK) {
8168 filp_close(filep, NULL);
8169 #if defined(KERNEL_DS) && defined(USER_DS)
8170 set_fs(fs);
8171 #endif /* KERNEL_DS && USER_DS */
8172 return BCME_OK;
8173 }
8174
8175 fail:
8176 if (raw_fmts) {
8177 MFREE(osh, raw_fmts, logstrs_size);
8178 raw_fmts = NULL;
8179 }
8180
8181 fail1:
8182 if (!IS_ERR(filep))
8183 filp_close(filep, NULL);
8184
8185 #if defined(KERNEL_DS) && defined(USER_DS)
8186 set_fs(fs);
8187 #endif /* KERNEL_DS && USER_DS */
8188
8189 temp->fmts = NULL;
8190 return BCME_ERROR;
8191 }
8192
8193 static int
8194 dhd_read_map(osl_t *osh, char *fname, uint32 *ramstart, uint32 *rodata_start,
8195 uint32 *rodata_end)
8196 {
8197 struct file *filep = NULL;
8198 #if defined(KERNEL_DS) && defined(USER_DS)
8199 mm_segment_t fs;
8200 #endif /* KERNEL_DS && USER_DS */
8201
8202 int err = BCME_ERROR;
8203
8204 if (fname == NULL) {
8205 DHD_ERROR(("%s: ERROR fname is NULL \n", __FUNCTION__));
8206 return BCME_ERROR;
8207 }
8208
8209 #if defined(KERNEL_DS) && defined(USER_DS)
8210 fs = get_fs();
8211 set_fs(KERNEL_DS);
8212 #endif /* KERNEL_DS && USER_DS */
8213
8214 filep = filp_open(fname, O_RDONLY, 0);
8215 if (IS_ERR(filep)) {
8216 DHD_ERROR_NO_HW4(("%s: Failed to open %s \n", __FUNCTION__, fname));
8217 goto fail;
8218 }
8219
8220 if ((err = dhd_parse_map_file(osh, filep, ramstart,
8221 rodata_start, rodata_end)) < 0)
8222 goto fail;
8223
8224 fail:
8225 if (!IS_ERR(filep))
8226 filp_close(filep, NULL);
8227
8228 #if defined(KERNEL_DS) && defined(USER_DS)
8229 set_fs(fs);
8230 #endif /* KERNEL_DS && USER_DS */
8231
8232 return err;
8233 }
8234
8235 static int
8236 dhd_init_static_strs_array(osl_t *osh, dhd_event_log_t *temp, char *str_file, char *map_file)
8237 {
8238 struct file *filep = NULL;
8239 #if defined(KERNEL_DS) && defined(USER_DS)
8240 mm_segment_t fs;
8241 #endif /* KERNEL_DS && USER_DS */
8242 char *raw_fmts = NULL;
8243 uint32 logstrs_size = 0;
8244 int error = 0;
8245 uint32 ramstart = 0;
8246 uint32 rodata_start = 0;
8247 uint32 rodata_end = 0;
8248 uint32 logfilebase = 0;
8249
8250 error = dhd_read_map(osh, map_file, &ramstart, &rodata_start, &rodata_end);
8251 if (error != BCME_OK) {
8252 DHD_ERROR(("readmap Error!! \n"));
8253 /* don't do event log parsing in actual case */
8254 if (strstr(str_file, ram_file_str) != NULL) {
8255 temp->raw_sstr = NULL;
8256 } else if (strstr(str_file, rom_file_str) != NULL) {
8257 temp->rom_raw_sstr = NULL;
8258 }
8259 return error;
8260 }
8261 DHD_ERROR(("ramstart: 0x%x, rodata_start: 0x%x, rodata_end:0x%x\n",
8262 ramstart, rodata_start, rodata_end));
8263
8264 #if defined(KERNEL_DS) && defined(USER_DS)
8265 fs = get_fs();
8266 set_fs(KERNEL_DS);
8267 #endif /* KERNEL_DS && USER_DS */
8268
8269 filep = filp_open(str_file, O_RDONLY, 0);
8270 if (IS_ERR(filep)) {
8271 DHD_ERROR(("%s: Failed to open the file %s \n", __FUNCTION__, str_file));
8272 goto fail;
8273 }
8274
8275 if (TRUE) {
8276 /* Full file size is huge. Just read required part */
8277 logstrs_size = rodata_end - rodata_start;
8278 logfilebase = rodata_start - ramstart;
8279 }
8280
8281 if (logstrs_size == 0) {
8282 DHD_ERROR(("%s: return as logstrs_size is 0\n", __FUNCTION__));
8283 goto fail1;
8284 }
8285
8286 raw_fmts = MALLOC(osh, logstrs_size);
8287 if (raw_fmts == NULL) {
8288 DHD_ERROR(("%s: Failed to allocate raw_fmts memory \n", __FUNCTION__));
8289 goto fail;
8290 }
8291
8292 if (TRUE) {
8293 error = generic_file_llseek(filep, logfilebase, SEEK_SET);
8294 if (error < 0) {
8295 DHD_ERROR(("%s: %s llseek failed %d \n", __FUNCTION__, str_file, error));
8296 goto fail;
8297 }
8298 }
8299
8300 error = vfs_read(filep, raw_fmts, logstrs_size, (&filep->f_pos));
8301 if (error != logstrs_size) {
8302 DHD_ERROR(("%s: %s read failed %d \n", __FUNCTION__, str_file, error));
8303 goto fail;
8304 }
8305
8306 if (strstr(str_file, ram_file_str) != NULL) {
8307 temp->raw_sstr = raw_fmts;
8308 temp->raw_sstr_size = logstrs_size;
8309 temp->rodata_start = rodata_start;
8310 temp->rodata_end = rodata_end;
8311 } else if (strstr(str_file, rom_file_str) != NULL) {
8312 temp->rom_raw_sstr = raw_fmts;
8313 temp->rom_raw_sstr_size = logstrs_size;
8314 temp->rom_rodata_start = rodata_start;
8315 temp->rom_rodata_end = rodata_end;
8316 }
8317
8318 filp_close(filep, NULL);
8319 #if defined(KERNEL_DS) && defined(USER_DS)
8320 set_fs(fs);
8321 #endif /* KERNEL_DS && USER_DS */
8322
8323 return BCME_OK;
8324
8325 fail:
8326 if (raw_fmts) {
8327 MFREE(osh, raw_fmts, logstrs_size);
8328 raw_fmts = NULL;
8329 }
8330
8331 fail1:
8332 if (!IS_ERR(filep))
8333 filp_close(filep, NULL);
8334
8335 #if defined(KERNEL_DS) && defined(USER_DS)
8336 set_fs(fs);
8337 #endif /* KERNEL_DS && USER_DS */
8338
8339 if (strstr(str_file, ram_file_str) != NULL) {
8340 temp->raw_sstr = NULL;
8341 } else if (strstr(str_file, rom_file_str) != NULL) {
8342 temp->rom_raw_sstr = NULL;
8343 }
8344
8345 return error;
8346 } /* dhd_init_static_strs_array */
8347
8348 #endif /* SHOW_LOGTRACE */
8349
8350 #ifdef DHD_ERPOM
8351 uint enable_erpom = 0;
8352 module_param(enable_erpom, int, 0);
8353
8354 int
8355 dhd_wlan_power_off_handler(void *handler, unsigned char reason)
8356 {
8357 dhd_pub_t *dhdp = (dhd_pub_t *)handler;
8358 bool dongle_isolation = dhdp->dongle_isolation;
8359
8360 DHD_ERROR(("%s: WLAN DHD cleanup reason: %d\n", __FUNCTION__, reason));
8361
8362 if ((reason == BY_BT_DUE_TO_BT) || (reason == BY_BT_DUE_TO_WLAN)) {
8363 #if defined(DHD_FW_COREDUMP)
8364 /* save core dump to a file */
8365 if (dhdp->memdump_enabled) {
8366 #ifdef DHD_SSSR_DUMP
8367 dhdp->collect_sssr = TRUE;
8368 #endif /* DHD_SSSR_DUMP */
8369 dhdp->memdump_type = DUMP_TYPE_DUE_TO_BT;
8370 dhd_bus_mem_dump(dhdp);
8371 }
8372 #endif /* DHD_FW_COREDUMP */
8373 }
8374
8375 /* pause data on all the interfaces */
8376 dhd_bus_stop_queue(dhdp->bus);
8377
8378 /* Devreset function will perform FLR again, to avoid it set dongle_isolation */
8379 dhdp->dongle_isolation = TRUE;
8380 dhd_bus_devreset(dhdp, 1); /* DHD structure cleanup */
8381 dhdp->dongle_isolation = dongle_isolation; /* Restore the old value */
8382 return 0;
8383 }
8384
8385 int
8386 dhd_wlan_power_on_handler(void *handler, unsigned char reason)
8387 {
8388 dhd_pub_t *dhdp = (dhd_pub_t *)handler;
8389 bool dongle_isolation = dhdp->dongle_isolation;
8390
8391 DHD_ERROR(("%s: WLAN DHD re-init reason: %d\n", __FUNCTION__, reason));
8392 /* Devreset function will perform FLR again, to avoid it set dongle_isolation */
8393 dhdp->dongle_isolation = TRUE;
8394 dhd_bus_devreset(dhdp, 0); /* DHD structure re-init */
8395 dhdp->dongle_isolation = dongle_isolation; /* Restore the old value */
8396 /* resume data on all the interfaces */
8397 dhd_bus_start_queue(dhdp->bus);
8398 return 0;
8399
8400 }
8401
8402 #endif /* DHD_ERPOM */
8403
8404 /** Called once for each hardware (dongle) instance that this DHD manages */
8405 dhd_pub_t *
8406 dhd_attach(osl_t *osh, struct dhd_bus *bus, uint bus_hdrlen)
8407 {
8408 dhd_info_t *dhd = NULL;
8409 struct net_device *net = NULL;
8410 char if_name[IFNAMSIZ] = {'\0'};
8411 uint32 bus_type = -1;
8412 uint32 bus_num = -1;
8413 uint32 slot_num = -1;
8414 #ifdef SHOW_LOGTRACE
8415 int ret;
8416 #endif /* SHOW_LOGTRACE */
8417 #ifdef DHD_ERPOM
8418 pom_func_handler_t *pom_handler;
8419 #endif /* DHD_ERPOM */
8420 wifi_adapter_info_t *adapter = NULL;
8421
8422 dhd_attach_states_t dhd_state = DHD_ATTACH_STATE_INIT;
8423 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
8424
8425 #ifdef PCIE_FULL_DONGLE
8426 ASSERT(sizeof(dhd_pkttag_fd_t) <= OSL_PKTTAG_SZ);
8427 ASSERT(sizeof(dhd_pkttag_fr_t) <= OSL_PKTTAG_SZ);
8428 #endif /* PCIE_FULL_DONGLE */
8429
8430 /* will implement get_ids for DBUS later */
8431 #if defined(BCMSDIO)
8432 dhd_bus_get_ids(bus, &bus_type, &bus_num, &slot_num);
8433 #endif // endif
8434 adapter = dhd_wifi_platform_get_adapter(bus_type, bus_num, slot_num);
8435
8436 /* Allocate primary dhd_info */
8437 dhd = wifi_platform_prealloc(adapter, DHD_PREALLOC_DHD_INFO, sizeof(dhd_info_t));
8438 if (dhd == NULL) {
8439 dhd = MALLOC(osh, sizeof(dhd_info_t));
8440 if (dhd == NULL) {
8441 DHD_ERROR(("%s: OOM - alloc dhd_info\n", __FUNCTION__));
8442 goto dhd_null_flag;
8443 }
8444 }
8445 memset(dhd, 0, sizeof(dhd_info_t));
8446 dhd_state |= DHD_ATTACH_STATE_DHD_ALLOC;
8447
8448 #ifdef SHOW_LOGTRACE
8449 /* Create ring proc entries */
8450 dhd_dbg_ring_proc_create(&dhd->pub);
8451
8452 if (dhd_init_logtrace_process(dhd) != BCME_OK) {
8453 goto fail;
8454 }
8455 #endif /* SHOW_LOGTRACE */
8456
8457 dhd->unit = dhd_found + instance_base; /* do not increment dhd_found, yet */
8458
8459 dhd->pub.osh = osh;
8460 #ifdef DUMP_IOCTL_IOV_LIST
8461 dll_init(&(dhd->pub.dump_iovlist_head));
8462 #endif /* DUMP_IOCTL_IOV_LIST */
8463 dhd->pub.dhd_console_ms = dhd_console_ms; /* assigns default value */
8464 dhd->adapter = adapter;
8465 #ifdef BT_OVER_SDIO
8466 dhd->pub.is_bt_recovery_required = FALSE;
8467 mutex_init(&dhd->bus_user_lock);
8468 #endif /* BT_OVER_SDIO */
8469
8470 g_dhd_pub = &dhd->pub;
8471
8472 #ifdef DHD_DEBUG
8473 dll_init(&(dhd->pub.mw_list_head));
8474 #endif /* DHD_DEBUG */
8475
8476 #ifdef GET_CUSTOM_MAC_ENABLE
8477 wifi_platform_get_mac_addr(dhd->adapter, dhd->pub.mac.octet);
8478 #endif /* GET_CUSTOM_MAC_ENABLE */
8479 #ifdef CUSTOM_FORCE_NODFS_FLAG
8480 dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
8481 dhd->pub.force_country_change = TRUE;
8482 #endif /* CUSTOM_FORCE_NODFS_FLAG */
8483 #ifdef CUSTOM_COUNTRY_CODE
8484 get_customized_country_code(dhd->adapter,
8485 dhd->pub.dhd_cspec.country_abbrev, &dhd->pub.dhd_cspec,
8486 dhd->pub.dhd_cflags);
8487 #endif /* CUSTOM_COUNTRY_CODE */
8488 dhd->thr_dpc_ctl.thr_pid = DHD_PID_KT_TL_INVALID;
8489 dhd->thr_wdt_ctl.thr_pid = DHD_PID_KT_INVALID;
8490 #ifdef DHD_WET
8491 dhd->pub.wet_info = dhd_get_wet_info(&dhd->pub);
8492 #endif /* DHD_WET */
8493 /* Initialize thread based operation and lock */
8494 sema_init(&dhd->sdsem, 1);
8495
8496 /* Some DHD modules (e.g. cfg80211) configures operation mode based on firmware name.
8497 * This is indeed a hack but we have to make it work properly before we have a better
8498 * solution
8499 */
8500 dhd_update_fw_nv_path(dhd);
8501 dhd->pub.pcie_txs_metadata_enable = pcie_txs_metadata_enable;
8502
8503 /* Link to info module */
8504 dhd->pub.info = dhd;
8505
8506 /* Link to bus module */
8507 dhd->pub.bus = bus;
8508 dhd->pub.hdrlen = bus_hdrlen;
8509 dhd->pub.txoff = FALSE;
8510
8511 /* Set network interface name if it was provided as module parameter */
8512 if (iface_name[0]) {
8513 int len;
8514 char ch;
8515 strncpy(if_name, iface_name, IFNAMSIZ);
8516 if_name[IFNAMSIZ - 1] = 0;
8517 len = strlen(if_name);
8518 ch = if_name[len - 1];
8519 if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
8520 strncat(if_name, "%d", 2);
8521 }
8522
8523 /* Passing NULL to dngl_name to ensure host gets if_name in dngl_name member */
8524 net = dhd_allocate_if(&dhd->pub, 0, if_name, NULL, 0, TRUE, NULL);
8525 if (net == NULL) {
8526 goto fail;
8527 }
8528 mutex_init(&dhd->pub.ndev_op_sync);
8529
8530 dhd_state |= DHD_ATTACH_STATE_ADD_IF;
8531 #ifdef DHD_L2_FILTER
8532 /* initialize the l2_filter_cnt */
8533 dhd->pub.l2_filter_cnt = 0;
8534 #endif // endif
8535 net->netdev_ops = NULL;
8536
8537 mutex_init(&dhd->dhd_iovar_mutex);
8538 sema_init(&dhd->proto_sem, 1);
8539 #ifdef DHD_ULP
8540 if (!(dhd_ulp_init(osh, &dhd->pub)))
8541 goto fail;
8542 #endif /* DHD_ULP */
8543
8544 #if defined(DHD_HANG_SEND_UP_TEST)
8545 dhd->pub.req_hang_type = 0;
8546 #endif /* DHD_HANG_SEND_UP_TEST */
8547
8548 #ifdef PROP_TXSTATUS
8549 spin_lock_init(&dhd->wlfc_spinlock);
8550
8551 dhd->pub.skip_fc = dhd_wlfc_skip_fc;
8552 dhd->pub.plat_init = dhd_wlfc_plat_init;
8553 dhd->pub.plat_deinit = dhd_wlfc_plat_deinit;
8554
8555 #ifdef DHD_WLFC_THREAD
8556 init_waitqueue_head(&dhd->pub.wlfc_wqhead);
8557 dhd->pub.wlfc_thread = kthread_create(dhd_wlfc_transfer_packets, &dhd->pub, "wlfc-thread");
8558 if (IS_ERR(dhd->pub.wlfc_thread)) {
8559 DHD_ERROR(("create wlfc thread failed\n"));
8560 goto fail;
8561 } else {
8562 wake_up_process(dhd->pub.wlfc_thread);
8563 }
8564 #endif /* DHD_WLFC_THREAD */
8565 #endif /* PROP_TXSTATUS */
8566
8567 /* Initialize other structure content */
8568 init_waitqueue_head(&dhd->ioctl_resp_wait);
8569 init_waitqueue_head(&dhd->d3ack_wait);
8570 init_waitqueue_head(&dhd->ctrl_wait);
8571 init_waitqueue_head(&dhd->dhd_bus_busy_state_wait);
8572 init_waitqueue_head(&dhd->dmaxfer_wait);
8573 init_waitqueue_head(&dhd->pub.tx_completion_wait);
8574 dhd->pub.dhd_bus_busy_state = 0;
8575 /* Initialize the spinlocks */
8576 spin_lock_init(&dhd->sdlock);
8577 spin_lock_init(&dhd->txqlock);
8578 spin_lock_init(&dhd->dhd_lock);
8579 spin_lock_init(&dhd->rxf_lock);
8580 #ifdef WLTDLS
8581 spin_lock_init(&dhd->pub.tdls_lock);
8582 #endif /* WLTDLS */
8583 #if defined(RXFRAME_THREAD)
8584 dhd->rxthread_enabled = TRUE;
8585 #endif /* defined(RXFRAME_THREAD) */
8586
8587 #ifdef DHDTCPACK_SUPPRESS
8588 spin_lock_init(&dhd->tcpack_lock);
8589 #endif /* DHDTCPACK_SUPPRESS */
8590
8591 /* Initialize Wakelock stuff */
8592 spin_lock_init(&dhd->wakelock_spinlock);
8593 spin_lock_init(&dhd->wakelock_evt_spinlock);
8594 DHD_OS_WAKE_LOCK_INIT(dhd);
8595 dhd->wakelock_counter = 0;
8596 /* wakelocks prevent a system from going into a low power state */
8597 #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
8598 wake_lock_init(&dhd->wl_wdwake, WAKE_LOCK_SUSPEND, "wlan_wd_wake");
8599 #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
8600
8601 #if defined(OEM_ANDROID)
8602 mutex_init(&dhd->dhd_net_if_mutex);
8603 mutex_init(&dhd->dhd_suspend_mutex);
8604 #if defined(PKT_FILTER_SUPPORT) && defined(APF)
8605 mutex_init(&dhd->dhd_apf_mutex);
8606 #endif /* PKT_FILTER_SUPPORT && APF */
8607 #endif /* defined(OEM_ANDROID) */
8608 dhd_state |= DHD_ATTACH_STATE_WAKELOCKS_INIT;
8609
8610 /* Attach and link in the protocol */
8611 if (dhd_prot_attach(&dhd->pub) != 0) {
8612 DHD_ERROR(("dhd_prot_attach failed\n"));
8613 goto fail;
8614 }
8615 dhd_state |= DHD_ATTACH_STATE_PROT_ATTACH;
8616
8617 #ifdef WL_CFG80211
8618 spin_lock_init(&dhd->pub.up_lock);
8619 /* Attach and link in the cfg80211 */
8620 if (unlikely(wl_cfg80211_attach(net, &dhd->pub))) {
8621 DHD_ERROR(("wl_cfg80211_attach failed\n"));
8622 goto fail;
8623 }
8624
8625 #ifdef DHD_MONITOR_INTERFACE
8626 dhd_monitor_init(&dhd->pub);
8627 #endif /* DHD_MONITOR_INTERFACE */
8628 dhd_state |= DHD_ATTACH_STATE_CFG80211;
8629 #endif /* WL_CFG80211 */
8630
8631 #if defined(WL_WIRELESS_EXT)
8632 /* Attach and link in the iw */
8633 if (!(dhd_state & DHD_ATTACH_STATE_CFG80211)) {
8634 if (wl_iw_attach(net, (void *)&dhd->pub) != 0) {
8635 DHD_ERROR(("wl_iw_attach failed\n"));
8636 goto fail;
8637 }
8638 dhd_state |= DHD_ATTACH_STATE_WL_ATTACH;
8639 }
8640 #endif /* defined(WL_WIRELESS_EXT) */
8641
8642 #ifdef SHOW_LOGTRACE
8643 ret = dhd_init_logstrs_array(osh, &dhd->event_data);
8644 if (ret == BCME_OK) {
8645 dhd_init_static_strs_array(osh, &dhd->event_data, st_str_file_path, map_file_path);
8646 dhd_init_static_strs_array(osh, &dhd->event_data, rom_st_str_file_path,
8647 rom_map_file_path);
8648 dhd_state |= DHD_ATTACH_LOGTRACE_INIT;
8649 }
8650 #endif /* SHOW_LOGTRACE */
8651
8652 #ifdef DEBUGABILITY
8653 /* attach debug if support */
8654 if (dhd_os_dbg_attach(&dhd->pub)) {
8655 DHD_ERROR(("%s debug module attach failed\n", __FUNCTION__));
8656 goto fail;
8657 }
8658 #if defined(SHOW_LOGTRACE) && defined(DBG_RING_LOG_INIT_DEFAULT)
8659 /* enable verbose ring to support dump_trace_buf */
8660 dhd_os_start_logging(&dhd->pub, FW_VERBOSE_RING_NAME, 3, 0, 0, 0);
8661 #endif /* SHOW_LOGTRACE */
8662
8663 #ifdef DBG_PKT_MON
8664 dhd->pub.dbg->pkt_mon_lock = dhd_os_spin_lock_init(dhd->pub.osh);
8665 #ifdef DBG_PKT_MON_INIT_DEFAULT
8666 dhd_os_dbg_attach_pkt_monitor(&dhd->pub);
8667 #endif /* DBG_PKT_MON_INIT_DEFAULT */
8668 #endif /* DBG_PKT_MON */
8669 #endif /* DEBUGABILITY */
8670
8671 #ifdef DHD_STATUS_LOGGING
8672 dhd->pub.statlog = dhd_attach_statlog(&dhd->pub, MAX_STATLOG_ITEM,
8673 MAX_STATLOG_REQ_ITEM, STATLOG_LOGBUF_LEN);
8674 if (dhd->pub.statlog == NULL) {
8675 DHD_ERROR(("%s: alloc statlog failed\n", __FUNCTION__));
8676 }
8677 #endif /* DHD_STATUS_LOGGING */
8678
8679 #ifdef DHD_LOG_DUMP
8680 dhd_log_dump_init(&dhd->pub);
8681 #endif /* DHD_LOG_DUMP */
8682 #ifdef DHD_PKTDUMP_ROAM
8683 dhd_dump_pkt_init(&dhd->pub);
8684 #endif /* DHD_PKTDUMP_ROAM */
8685 #ifdef DHD_PKT_LOGGING
8686 dhd_os_attach_pktlog(&dhd->pub);
8687 #endif /* DHD_PKT_LOGGING */
8688
8689 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
8690 dhd->pub.hang_info = MALLOCZ(osh, VENDOR_SEND_HANG_EXT_INFO_LEN);
8691 if (dhd->pub.hang_info == NULL) {
8692 DHD_ERROR(("%s: alloc hang_info failed\n", __FUNCTION__));
8693 }
8694 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
8695 if (dhd_sta_pool_init(&dhd->pub, DHD_MAX_STA) != BCME_OK) {
8696 DHD_ERROR(("%s: Initializing %u sta\n", __FUNCTION__, DHD_MAX_STA));
8697 goto fail;
8698 }
8699
8700 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
8701 dhd->tx_wq = alloc_workqueue("bcmdhd-tx-wq", WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
8702 if (!dhd->tx_wq) {
8703 DHD_ERROR(("%s: alloc_workqueue(bcmdhd-tx-wq) failed\n", __FUNCTION__));
8704 goto fail;
8705 }
8706 dhd->rx_wq = alloc_workqueue("bcmdhd-rx-wq", WQ_HIGHPRI | WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
8707 if (!dhd->rx_wq) {
8708 DHD_ERROR(("%s: alloc_workqueue(bcmdhd-rx-wq) failed\n", __FUNCTION__));
8709 destroy_workqueue(dhd->tx_wq);
8710 dhd->tx_wq = NULL;
8711 goto fail;
8712 }
8713 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
8714
8715 /* Set up the watchdog timer */
8716 init_timer_compat(&dhd->timer, dhd_watchdog, dhd);
8717 dhd->default_wd_interval = dhd_watchdog_ms;
8718
8719 if (dhd_watchdog_prio >= 0) {
8720 /* Initialize watchdog thread */
8721 PROC_START(dhd_watchdog_thread, dhd, &dhd->thr_wdt_ctl, 0, "dhd_watchdog_thread");
8722 if (dhd->thr_wdt_ctl.thr_pid < 0) {
8723 goto fail;
8724 }
8725
8726 } else {
8727 dhd->thr_wdt_ctl.thr_pid = -1;
8728 }
8729
8730 #ifdef DHD_PCIE_RUNTIMEPM
8731 /* Setup up the runtime PM Idlecount timer */
8732 init_timer_compat(&dhd->rpm_timer, dhd_runtimepm, dhd);
8733 dhd->rpm_timer_valid = FALSE;
8734
8735 dhd->thr_rpm_ctl.thr_pid = DHD_PID_KT_INVALID;
8736 PROC_START(dhd_rpm_state_thread, dhd, &dhd->thr_rpm_ctl, 0, "dhd_rpm_state_thread");
8737 if (dhd->thr_rpm_ctl.thr_pid < 0) {
8738 goto fail;
8739 }
8740 #endif /* DHD_PCIE_RUNTIMEPM */
8741
8742 #ifdef SHOW_LOGTRACE
8743 skb_queue_head_init(&dhd->evt_trace_queue);
8744 #endif /* SHOW_LOGTRACE */
8745
8746 /* Set up the bottom half handler */
8747 if (dhd_dpc_prio >= 0) {
8748 /* Initialize DPC thread */
8749 PROC_START(dhd_dpc_thread, dhd, &dhd->thr_dpc_ctl, 0, "dhd_dpc");
8750 if (dhd->thr_dpc_ctl.thr_pid < 0) {
8751 goto fail;
8752 }
8753 } else {
8754 /* use tasklet for dpc */
8755 tasklet_init(&dhd->tasklet, dhd_dpc, (ulong)dhd);
8756 dhd->thr_dpc_ctl.thr_pid = -1;
8757 }
8758
8759 if (dhd->rxthread_enabled) {
8760 bzero(&dhd->pub.skbbuf[0], sizeof(void *) * MAXSKBPEND);
8761 /* Initialize RXF thread */
8762 PROC_START(dhd_rxf_thread, dhd, &dhd->thr_rxf_ctl, 0, "dhd_rxf");
8763 if (dhd->thr_rxf_ctl.thr_pid < 0) {
8764 goto fail;
8765 }
8766 }
8767
8768 dhd_state |= DHD_ATTACH_STATE_THREADS_CREATED;
8769
8770 #if defined(CONFIG_PM_SLEEP)
8771 if (!dhd_pm_notifier_registered) {
8772 dhd_pm_notifier_registered = TRUE;
8773 dhd->pm_notifier.notifier_call = dhd_pm_callback;
8774 dhd->pm_notifier.priority = 10;
8775 register_pm_notifier(&dhd->pm_notifier);
8776 }
8777
8778 #endif /* CONFIG_PM_SLEEP */
8779
8780 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
8781 dhd->early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN + 20;
8782 dhd->early_suspend.suspend = dhd_early_suspend;
8783 dhd->early_suspend.resume = dhd_late_resume;
8784 register_early_suspend(&dhd->early_suspend);
8785 dhd_state |= DHD_ATTACH_STATE_EARLYSUSPEND_DONE;
8786 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
8787
8788 #ifdef ARP_OFFLOAD_SUPPORT
8789 dhd->pend_ipaddr = 0;
8790 if (!dhd_inetaddr_notifier_registered) {
8791 dhd_inetaddr_notifier_registered = TRUE;
8792 register_inetaddr_notifier(&dhd_inetaddr_notifier);
8793 }
8794 #endif /* ARP_OFFLOAD_SUPPORT */
8795
8796 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
8797 if (!dhd_inet6addr_notifier_registered) {
8798 dhd_inet6addr_notifier_registered = TRUE;
8799 register_inet6addr_notifier(&dhd_inet6addr_notifier);
8800 }
8801 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
8802 dhd->dhd_deferred_wq = dhd_deferred_work_init((void *)dhd);
8803 #if defined(OEM_ANDROID)
8804 INIT_WORK(&dhd->dhd_hang_process_work, dhd_hang_process);
8805 #endif /* #if OEM_ANDROID */
8806 #ifdef DEBUG_CPU_FREQ
8807 dhd->new_freq = alloc_percpu(int);
8808 dhd->freq_trans.notifier_call = dhd_cpufreq_notifier;
8809 cpufreq_register_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
8810 #endif // endif
8811 #ifdef DHDTCPACK_SUPPRESS
8812 #ifdef BCMSDIO
8813 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_DELAYTX);
8814 #elif defined(BCMPCIE)
8815 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_HOLD);
8816 #else
8817 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
8818 #endif /* BCMSDIO */
8819 #endif /* DHDTCPACK_SUPPRESS */
8820
8821 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
8822 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
8823
8824 #ifdef DHD_DEBUG_PAGEALLOC
8825 register_page_corrupt_cb(dhd_page_corrupt_cb, &dhd->pub);
8826 #endif /* DHD_DEBUG_PAGEALLOC */
8827
8828 #if defined(DHD_LB)
8829
8830 dhd_lb_set_default_cpus(dhd);
8831 DHD_LB_STATS_INIT(&dhd->pub);
8832
8833 /* Initialize the CPU Masks */
8834 if (dhd_cpumasks_init(dhd) == 0) {
8835 /* Now we have the current CPU maps, run through candidacy */
8836 dhd_select_cpu_candidacy(dhd);
8837
8838 /* Register the call backs to CPU Hotplug sub-system */
8839 dhd_register_cpuhp_callback(dhd);
8840
8841 } else {
8842 /*
8843 * We are unable to initialize CPU masks, so candidacy algorithm
8844 * won't run, but still Load Balancing will be honoured based
8845 * on the CPUs allocated for a given job statically during init
8846 */
8847 dhd->cpu_notifier.notifier_call = NULL;
8848 DHD_ERROR(("%s():dhd_cpumasks_init failed CPUs for JOB would be static\n",
8849 __FUNCTION__));
8850 }
8851
8852 #ifdef DHD_LB_TXP
8853 #ifdef DHD_LB_TXP_DEFAULT_ENAB
8854 /* Trun ON the feature by default */
8855 atomic_set(&dhd->lb_txp_active, 1);
8856 #else
8857 /* Trun OFF the feature by default */
8858 atomic_set(&dhd->lb_txp_active, 0);
8859 #endif /* DHD_LB_TXP_DEFAULT_ENAB */
8860 #endif /* DHD_LB_TXP */
8861
8862 #ifdef DHD_LB_RXP
8863 /* Trun ON the feature by default */
8864 atomic_set(&dhd->lb_rxp_active, 1);
8865 #endif /* DHD_LB_RXP */
8866
8867 /* Initialize the Load Balancing Tasklets and Napi object */
8868 #if defined(DHD_LB_TXC)
8869 tasklet_init(&dhd->tx_compl_tasklet,
8870 dhd_lb_tx_compl_handler, (ulong)(&dhd->pub));
8871 INIT_WORK(&dhd->tx_compl_dispatcher_work, dhd_tx_compl_dispatcher_fn);
8872 DHD_INFO(("%s load balance init tx_compl_tasklet\n", __FUNCTION__));
8873 #endif /* DHD_LB_TXC */
8874 #if defined(DHD_LB_RXC)
8875 tasklet_init(&dhd->rx_compl_tasklet,
8876 dhd_lb_rx_compl_handler, (ulong)(&dhd->pub));
8877 INIT_WORK(&dhd->rx_compl_dispatcher_work, dhd_rx_compl_dispatcher_fn);
8878 DHD_INFO(("%s load balance init rx_compl_tasklet\n", __FUNCTION__));
8879 #endif /* DHD_LB_RXC */
8880
8881 #if defined(DHD_LB_RXP)
8882 __skb_queue_head_init(&dhd->rx_pend_queue);
8883 skb_queue_head_init(&dhd->rx_napi_queue);
8884 /* Initialize the work that dispatches NAPI job to a given core */
8885 INIT_WORK(&dhd->rx_napi_dispatcher_work, dhd_rx_napi_dispatcher_fn);
8886 DHD_INFO(("%s load balance init rx_napi_queue\n", __FUNCTION__));
8887 #endif /* DHD_LB_RXP */
8888
8889 #if defined(DHD_LB_TXP)
8890 INIT_WORK(&dhd->tx_dispatcher_work, dhd_tx_dispatcher_work);
8891 skb_queue_head_init(&dhd->tx_pend_queue);
8892 /* Initialize the work that dispatches TX job to a given core */
8893 tasklet_init(&dhd->tx_tasklet,
8894 dhd_lb_tx_handler, (ulong)(dhd));
8895 DHD_INFO(("%s load balance init tx_pend_queue\n", __FUNCTION__));
8896 #endif /* DHD_LB_TXP */
8897
8898 dhd_state |= DHD_ATTACH_STATE_LB_ATTACH_DONE;
8899 #endif /* DHD_LB */
8900
8901 #if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
8902 INIT_WORK(&dhd->axi_error_dispatcher_work, dhd_axi_error_dispatcher_fn);
8903 #endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
8904
8905 #if defined(BCMPCIE)
8906 dhd->pub.extended_trap_data = MALLOCZ(osh, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
8907 if (dhd->pub.extended_trap_data == NULL) {
8908 DHD_ERROR(("%s: Failed to alloc extended_trap_data\n", __FUNCTION__));
8909 }
8910 #ifdef DNGL_AXI_ERROR_LOGGING
8911 dhd->pub.axi_err_dump = MALLOCZ(osh, sizeof(dhd_axi_error_dump_t));
8912 if (dhd->pub.axi_err_dump == NULL) {
8913 DHD_ERROR(("%s: Failed to alloc axi_err_dump\n", __FUNCTION__));
8914 }
8915 #endif /* DNGL_AXI_ERROR_LOGGING */
8916 #endif /* BCMPCIE && ETD */
8917
8918 DHD_SSSR_MEMPOOL_INIT(&dhd->pub);
8919
8920 #ifdef EWP_EDL
8921 if (host_edl_support) {
8922 if (DHD_EDL_MEM_INIT(&dhd->pub) != BCME_OK) {
8923 host_edl_support = FALSE;
8924 }
8925 }
8926 #endif /* EWP_EDL */
8927
8928 (void)dhd_sysfs_init(dhd);
8929
8930 #ifdef WL_NATOE
8931 /* Open Netlink socket for NF_CONNTRACK notifications */
8932 dhd->pub.nfct = dhd_ct_open(&dhd->pub, NFNL_SUBSYS_CTNETLINK | NFNL_SUBSYS_CTNETLINK_EXP,
8933 CT_ALL);
8934 #endif /* WL_NATOE */
8935
8936 dhd_state |= DHD_ATTACH_STATE_DONE;
8937 dhd->dhd_state = dhd_state;
8938
8939 dhd_found++;
8940
8941 #ifdef DHD_DUMP_MNGR
8942 dhd->pub.dump_file_manage =
8943 (dhd_dump_file_manage_t *)MALLOCZ(dhd->pub.osh, sizeof(dhd_dump_file_manage_t));
8944 if (unlikely(!dhd->pub.dump_file_manage)) {
8945 DHD_ERROR(("%s(): could not allocate memory for - "
8946 "dhd_dump_file_manage_t\n", __FUNCTION__));
8947 }
8948 #endif /* DHD_DUMP_MNGR */
8949 #ifdef DHD_FW_COREDUMP
8950 /* Set memdump default values */
8951 #ifdef CUSTOMER_HW4_DEBUG
8952 dhd->pub.memdump_enabled = DUMP_DISABLED;
8953 #elif defined(OEM_ANDROID)
8954 dhd->pub.memdump_enabled = DUMP_MEMFILE_BUGON;
8955 #else
8956 dhd->pub.memdump_enabled = DUMP_MEMFILE;
8957 #endif /* CUSTOMER_HW4_DEBUG */
8958 /* Check the memdump capability */
8959 dhd_get_memdump_info(&dhd->pub);
8960 #endif /* DHD_FW_COREDUMP */
8961
8962 #ifdef DHD_ERPOM
8963 if (enable_erpom) {
8964 pom_handler = &dhd->pub.pom_wlan_handler;
8965 pom_handler->func_id = WLAN_FUNC_ID;
8966 pom_handler->handler = (void *)g_dhd_pub;
8967 pom_handler->power_off = dhd_wlan_power_off_handler;
8968 pom_handler->power_on = dhd_wlan_power_on_handler;
8969
8970 dhd->pub.pom_func_register = NULL;
8971 dhd->pub.pom_func_deregister = NULL;
8972 dhd->pub.pom_toggle_reg_on = NULL;
8973
8974 dhd->pub.pom_func_register = symbol_get(pom_func_register);
8975 dhd->pub.pom_func_deregister = symbol_get(pom_func_deregister);
8976 dhd->pub.pom_toggle_reg_on = symbol_get(pom_toggle_reg_on);
8977
8978 symbol_put(pom_func_register);
8979 symbol_put(pom_func_deregister);
8980 symbol_put(pom_toggle_reg_on);
8981
8982 if (!dhd->pub.pom_func_register ||
8983 !dhd->pub.pom_func_deregister ||
8984 !dhd->pub.pom_toggle_reg_on) {
8985 DHD_ERROR(("%s, enable_erpom enabled through module parameter but "
8986 "POM is not loaded\n", __FUNCTION__));
8987 ASSERT(0);
8988 goto fail;
8989 }
8990 dhd->pub.pom_func_register(pom_handler);
8991 dhd->pub.enable_erpom = TRUE;
8992
8993 }
8994 #endif /* DHD_ERPOM */
8995 return &dhd->pub;
8996
8997 fail:
8998 if (dhd_state >= DHD_ATTACH_STATE_DHD_ALLOC) {
8999 DHD_TRACE(("%s: Calling dhd_detach dhd_state 0x%x &dhd->pub %p\n",
9000 __FUNCTION__, dhd_state, &dhd->pub));
9001 dhd->dhd_state = dhd_state;
9002 dhd_detach(&dhd->pub);
9003 dhd_free(&dhd->pub);
9004 }
9005
9006 dhd_null_flag:
9007 return NULL;
9008 }
9009
9010 int dhd_get_fw_mode(dhd_info_t *dhdinfo)
9011 {
9012 if (strstr(dhdinfo->fw_path, "_apsta") != NULL)
9013 return DHD_FLAG_HOSTAP_MODE;
9014 if (strstr(dhdinfo->fw_path, "_p2p") != NULL)
9015 return DHD_FLAG_P2P_MODE;
9016 if (strstr(dhdinfo->fw_path, "_ibss") != NULL)
9017 return DHD_FLAG_IBSS_MODE;
9018 if (strstr(dhdinfo->fw_path, "_mfg") != NULL)
9019 return DHD_FLAG_MFG_MODE;
9020
9021 return DHD_FLAG_STA_MODE;
9022 }
9023
9024 int dhd_bus_get_fw_mode(dhd_pub_t *dhdp)
9025 {
9026 return dhd_get_fw_mode(dhdp->info);
9027 }
9028
9029 extern char * nvram_get(const char *name);
9030
9031
9032 int cis_chipvendor = 0;
9033 unsigned short cis_device = 0;
9034 char fw_path[1024] = {0};
9035 char nv_path[1024] = {0};
9036
9037 #define DEFAULT_BCMDHD_FW_PATH "/vendor/etc/firmware/"
9038 #define DEFAULT_BCMDHD_NVRAM_PATH "/vendor/etc/firmware/"
9039
9040 #define FW_CYW43364 "fw_cyw43364.bin"
9041 #define FW_CYW43438 "fw_cyw43438.bin"
9042 #define FW_CYW43455 "fw_cyw43455.bin"
9043 #define FW_CYW4354 "fw_cyw4354.bin"
9044 #define FW_CYW4373 "cyfmac4373-sdio.bin"
9045
9046 #define NVRAM_CYW43364 "nvram_azw432.txt"
9047 #define NVRAM_CYW43438 "nvram_azw372.txt"
9048 #define NVRAM_CYW43455 "nvram_azw256.txt"
9049 #define NVRAM_CYW4354 "nvram_azw235.txt"
9050 #define NVRAM_CYW4373 "brcmfmac4373-sdio.txt"
9051
9052 bool dhd_update_fw_nv_path(dhd_info_t *dhdinfo)
9053 {
9054 int fw_len;
9055 int nv_len;
9056 const char *fw = NULL;
9057 const char *nv = NULL;
9058 #ifdef DHD_UCODE_DOWNLOAD
9059 int uc_len;
9060 const char *uc = NULL;
9061 #endif /* DHD_UCODE_DOWNLOAD */
9062 wifi_adapter_info_t *adapter = dhdinfo->adapter;
9063 int fw_path_len = sizeof(dhdinfo->fw_path);
9064 int nv_path_len = sizeof(dhdinfo->nv_path);
9065
9066 /* Update firmware and nvram path. The path may be from adapter info or module parameter
9067 * The path from adapter info is used for initialization only (as it won't change).
9068 *
9069 * The firmware_path/nvram_path module parameter may be changed by the system at run
9070 * time. When it changes we need to copy it to dhdinfo->fw_path. Also Android private
9071 * command may change dhdinfo->fw_path. As such we need to clear the path info in
9072 * module parameter after it is copied. We won't update the path until the module parameter
9073 * is changed again (first character is not '\0')
9074 */
9075
9076 /* set default firmware and nvram path for built-in type driver */
9077 if (!dhd_download_fw_on_driverload) {
9078 #ifdef CONFIG_BCMDHD_FW_PATH
9079 fw = VENDOR_PATH CONFIG_BCMDHD_FW_PATH;
9080 #endif /* CONFIG_BCMDHD_FW_PATH */
9081 #ifdef CONFIG_BCMDHD_NVRAM_PATH
9082 nv = VENDOR_PATH CONFIG_BCMDHD_NVRAM_PATH;
9083 #endif /* CONFIG_BCMDHD_NVRAM_PATH */
9084 }
9085
9086 /* check if we need to initialize the path */
9087 if (dhdinfo->fw_path[0] == '\0') {
9088 if (adapter && adapter->fw_path && adapter->fw_path[0] != '\0')
9089 fw = adapter->fw_path;
9090 }
9091 if (dhdinfo->nv_path[0] == '\0') {
9092 if (adapter && adapter->nv_path && adapter->nv_path[0] != '\0')
9093 nv = adapter->nv_path;
9094 }
9095
9096 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
9097 *
9098 * TODO: need a solution for multi-chip, can't use the same firmware for all chips
9099 */
9100 if (firmware_path[0] != '\0')
9101 fw = firmware_path;
9102
9103 if (nvram_path[0] != '\0')
9104 nv = nvram_path;
9105
9106
9107 if (cis_device == BCM43455_CHIP_ID && cis_chipvendor == 0x81) {
9108 sprintf(fw_path, "%s%s", DEFAULT_BCMDHD_FW_PATH, FW_CYW43455);
9109 sprintf(nv_path, "%s%s", DEFAULT_BCMDHD_NVRAM_PATH, NVRAM_CYW43455);
9110 DHD_ERROR(("Adding CYW43455 firmware and NVRAM path by CIS\n"
9111 "\tfirmware path: %s\n"
9112 "\tNVRAM path: %s\n", fw_path, nv_path));
9113
9114 fw = fw_path;
9115 nv = nv_path;
9116 } else if (cis_device == BCM43430_CHIP_ID) {
9117 if (cis_chipvendor == 0x81) {
9118 sprintf(fw_path, "%s%s", DEFAULT_BCMDHD_FW_PATH, FW_CYW43438);
9119 sprintf(nv_path, "%s%s", DEFAULT_BCMDHD_NVRAM_PATH, NVRAM_CYW43438);
9120 DHD_ERROR(("Adding CYW43438 firmware and NVRAM path by CIS\n"
9121 "\tfirmware path: %s\n"
9122 "\tNVRAM path: %s\n", fw_path, nv_path));
9123 fw = fw_path;
9124 nv = nv_path;
9125 } else {
9126 sprintf(fw_path, "%s%s", DEFAULT_BCMDHD_FW_PATH, FW_CYW43364);
9127 sprintf(nv_path, "%s%s", DEFAULT_BCMDHD_NVRAM_PATH, NVRAM_CYW43364);
9128 DHD_ERROR(("Adding CYW43364 firmware and NVRAM path by CIS\n"
9129 "\tfirmware path: %s\n"
9130 "\tNVRAM path: %s\n", fw_path, nv_path));
9131 fw = fw_path;
9132 nv = nv_path;
9133 }
9134 } else if (cis_device == BCM4354_CHIP_ID) {
9135 sprintf(fw_path, "%s%s", DEFAULT_BCMDHD_FW_PATH, FW_CYW4354);
9136 sprintf(nv_path, "%s%s", DEFAULT_BCMDHD_NVRAM_PATH, NVRAM_CYW4354);
9137 DHD_ERROR(("Adding CYW4354 firmware and NVRAM path by CIS\n"
9138 "\tfirmware path: %s\n"
9139 "\tNVRAM path: %s\n", fw_path, nv_path));
9140 fw = fw_path;
9141 nv = nv_path;
9142 }
9143 else if (cis_device == BCM4373_CHIP_ID){
9144 sprintf(fw_path, "%s%s", DEFAULT_BCMDHD_FW_PATH, FW_CYW4373);
9145 sprintf(nv_path, "%s%s", DEFAULT_BCMDHD_NVRAM_PATH, NVRAM_CYW4373);
9146 DHD_ERROR(("Adding CYW4373 firmware and NVRAM path by CIS\n"
9147 "\tfirmware path: %s\n"
9148 "\tNVRAM path: %s\n", fw_path, nv_path));
9149 fw = fw_path;
9150 nv = nv_path;
9151 }
9152
9153 #ifdef DHD_UCODE_DOWNLOAD
9154 if (ucode_path[0] != '\0')
9155 uc = ucode_path;
9156 #endif /* DHD_UCODE_DOWNLOAD */
9157
9158 if (fw && fw[0] != '\0') {
9159 fw_len = strlen(fw);
9160 if (fw_len >= fw_path_len) {
9161 DHD_ERROR(("fw path len exceeds max len of dhdinfo->fw_path\n"));
9162 return FALSE;
9163 }
9164 strncpy(dhdinfo->fw_path, fw, fw_path_len);
9165 if (dhdinfo->fw_path[fw_len-1] == '\n')
9166 dhdinfo->fw_path[fw_len-1] = '\0';
9167 }
9168 if (nv && nv[0] != '\0') {
9169 nv_len = strlen(nv);
9170 if (nv_len >= nv_path_len) {
9171 DHD_ERROR(("nvram path len exceeds max len of dhdinfo->nv_path\n"));
9172 return FALSE;
9173 }
9174 memset(dhdinfo->nv_path, 0, nv_path_len);
9175 strncpy(dhdinfo->nv_path, nv, nv_path_len);
9176 dhdinfo->nv_path[nv_len] = '\0';
9177 #ifdef DHD_USE_SINGLE_NVRAM_FILE
9178 /* Remove "_net" or "_mfg" tag from current nvram path */
9179 {
9180 char *nvram_tag = "nvram_";
9181 char *ext_tag = ".txt";
9182 char *sp_nvram = strnstr(dhdinfo->nv_path, nvram_tag, nv_path_len);
9183 bool valid_buf = sp_nvram && ((uint32)(sp_nvram + strlen(nvram_tag) +
9184 strlen(ext_tag) - dhdinfo->nv_path) <= nv_path_len);
9185 if (valid_buf) {
9186 char *sp = sp_nvram + strlen(nvram_tag) - 1;
9187 uint32 padding_size = (uint32)(dhdinfo->nv_path +
9188 nv_path_len - sp);
9189 memset(sp, 0, padding_size);
9190 strncat(dhdinfo->nv_path, ext_tag, strlen(ext_tag));
9191 nv_len = strlen(dhdinfo->nv_path);
9192 DHD_INFO(("%s: new nvram path = %s\n",
9193 __FUNCTION__, dhdinfo->nv_path));
9194 } else if (sp_nvram) {
9195 DHD_ERROR(("%s: buffer space for nvram path is not enough\n",
9196 __FUNCTION__));
9197 return FALSE;
9198 } else {
9199 DHD_ERROR(("%s: Couldn't find the nvram tag. current"
9200 " nvram path = %s\n", __FUNCTION__, dhdinfo->nv_path));
9201 }
9202 }
9203 #endif /* DHD_USE_SINGLE_NVRAM_FILE */
9204 if (dhdinfo->nv_path[nv_len-1] == '\n')
9205 dhdinfo->nv_path[nv_len-1] = '\0';
9206 }
9207 #ifdef DHD_UCODE_DOWNLOAD
9208 if (uc && uc[0] != '\0') {
9209 uc_len = strlen(uc);
9210 if (uc_len >= sizeof(dhdinfo->uc_path)) {
9211 DHD_ERROR(("uc path len exceeds max len of dhdinfo->uc_path\n"));
9212 return FALSE;
9213 }
9214 strncpy(dhdinfo->uc_path, uc, sizeof(dhdinfo->uc_path));
9215 if (dhdinfo->uc_path[uc_len-1] == '\n')
9216 dhdinfo->uc_path[uc_len-1] = '\0';
9217 }
9218 #endif /* DHD_UCODE_DOWNLOAD */
9219
9220 /* clear the path in module parameter */
9221 if (dhd_download_fw_on_driverload) {
9222 firmware_path[0] = '\0';
9223 nvram_path[0] = '\0';
9224 }
9225 #ifdef DHD_UCODE_DOWNLOAD
9226 ucode_path[0] = '\0';
9227 DHD_ERROR(("ucode path: %s\n", dhdinfo->uc_path));
9228 #endif /* DHD_UCODE_DOWNLOAD */
9229
9230 /* fw_path and nv_path are not mandatory for BCMEMBEDIMAGE */
9231 if (dhdinfo->fw_path[0] == '\0') {
9232 DHD_ERROR(("firmware path not found\n"));
9233 return FALSE;
9234 }
9235 if (dhdinfo->nv_path[0] == '\0') {
9236 DHD_ERROR(("nvram path not found\n"));
9237 return FALSE;
9238 }
9239
9240 return TRUE;
9241 }
9242
9243 #if defined(BT_OVER_SDIO)
9244 extern bool dhd_update_btfw_path(dhd_info_t *dhdinfo, char* btfw_path)
9245 {
9246 int fw_len;
9247 const char *fw = NULL;
9248 wifi_adapter_info_t *adapter = dhdinfo->adapter;
9249
9250 /* Update bt firmware path. The path may be from adapter info or module parameter
9251 * The path from adapter info is used for initialization only (as it won't change).
9252 *
9253 * The btfw_path module parameter may be changed by the system at run
9254 * time. When it changes we need to copy it to dhdinfo->btfw_path. Also Android private
9255 * command may change dhdinfo->btfw_path. As such we need to clear the path info in
9256 * module parameter after it is copied. We won't update the path until the module parameter
9257 * is changed again (first character is not '\0')
9258 */
9259
9260 /* set default firmware and nvram path for built-in type driver */
9261 if (!dhd_download_fw_on_driverload) {
9262 #ifdef CONFIG_BCMDHD_BTFW_PATH
9263 fw = CONFIG_BCMDHD_BTFW_PATH;
9264 #endif /* CONFIG_BCMDHD_FW_PATH */
9265 }
9266
9267 /* check if we need to initialize the path */
9268 if (dhdinfo->btfw_path[0] == '\0') {
9269 if (adapter && adapter->btfw_path && adapter->btfw_path[0] != '\0')
9270 fw = adapter->btfw_path;
9271 }
9272
9273 /* Use module parameter if it is valid, EVEN IF the path has not been initialized
9274 */
9275 if (btfw_path[0] != '\0')
9276 fw = btfw_path;
9277
9278 if (fw && fw[0] != '\0') {
9279 fw_len = strlen(fw);
9280 if (fw_len >= sizeof(dhdinfo->btfw_path)) {
9281 DHD_ERROR(("fw path len exceeds max len of dhdinfo->btfw_path\n"));
9282 return FALSE;
9283 }
9284 strncpy(dhdinfo->btfw_path, fw, sizeof(dhdinfo->btfw_path));
9285 if (dhdinfo->btfw_path[fw_len-1] == '\n')
9286 dhdinfo->btfw_path[fw_len-1] = '\0';
9287 }
9288
9289 /* clear the path in module parameter */
9290 btfw_path[0] = '\0';
9291
9292 if (dhdinfo->btfw_path[0] == '\0') {
9293 DHD_ERROR(("bt firmware path not found\n"));
9294 return FALSE;
9295 }
9296
9297 return TRUE;
9298 }
9299 #endif /* defined (BT_OVER_SDIO) */
9300
9301 #ifdef CUSTOMER_HW4_DEBUG
9302 bool dhd_validate_chipid(dhd_pub_t *dhdp)
9303 {
9304 uint chipid = dhd_bus_chip_id(dhdp);
9305 uint config_chipid;
9306
9307 #ifdef BCM4375_CHIP
9308 config_chipid = BCM4375_CHIP_ID;
9309 #elif defined(BCM4361_CHIP)
9310 config_chipid = BCM4361_CHIP_ID;
9311 #elif defined(BCM4359_CHIP)
9312 config_chipid = BCM4359_CHIP_ID;
9313 #elif defined(BCM4358_CHIP)
9314 config_chipid = BCM4358_CHIP_ID;
9315 #elif defined(BCM4354_CHIP)
9316 config_chipid = BCM4354_CHIP_ID;
9317 #elif defined(BCM4339_CHIP)
9318 config_chipid = BCM4339_CHIP_ID;
9319 #elif defined(BCM4335_CHIP)
9320 config_chipid = BCM4335_CHIP_ID;
9321 #elif defined(BCM43430_CHIP)
9322 config_chipid = BCM43430_CHIP_ID;
9323 #elif defined(BCM43018_CHIP)
9324 config_chipid = BCM43018_CHIP_ID;
9325 #elif defined(BCM43455_CHIP)
9326 config_chipid = BCM4345_CHIP_ID;
9327 #elif defined(BCM43454_CHIP)
9328 config_chipid = BCM43454_CHIP_ID;
9329 #elif defined(BCM43012_CHIP_)
9330 config_chipid = BCM43012_CHIP_ID;
9331 #else
9332 DHD_ERROR(("%s: Unknown chip id, if you use new chipset,"
9333 " please add CONFIG_BCMXXXX into the Kernel and"
9334 " BCMXXXX_CHIP definition into the DHD driver\n",
9335 __FUNCTION__));
9336 config_chipid = 0;
9337
9338 return FALSE;
9339 #endif /* BCM4354_CHIP */
9340
9341 #if defined(BCM4354_CHIP) && defined(SUPPORT_MULTIPLE_REVISION)
9342 if (chipid == BCM4350_CHIP_ID && config_chipid == BCM4354_CHIP_ID) {
9343 return TRUE;
9344 }
9345 #endif /* BCM4354_CHIP && SUPPORT_MULTIPLE_REVISION */
9346 #if defined(BCM4358_CHIP) && defined(SUPPORT_MULTIPLE_REVISION)
9347 if (chipid == BCM43569_CHIP_ID && config_chipid == BCM4358_CHIP_ID) {
9348 return TRUE;
9349 }
9350 #endif /* BCM4358_CHIP && SUPPORT_MULTIPLE_REVISION */
9351 #if defined(BCM4359_CHIP)
9352 if (chipid == BCM4355_CHIP_ID && config_chipid == BCM4359_CHIP_ID) {
9353 return TRUE;
9354 }
9355 #endif /* BCM4359_CHIP */
9356 #if defined(BCM4361_CHIP)
9357 if (chipid == BCM4347_CHIP_ID && config_chipid == BCM4361_CHIP_ID) {
9358 return TRUE;
9359 }
9360 #endif /* BCM4361_CHIP */
9361
9362 return config_chipid == chipid;
9363 }
9364 #endif /* CUSTOMER_HW4_DEBUG */
9365
9366 #if defined(BT_OVER_SDIO)
9367 wlan_bt_handle_t dhd_bt_get_pub_hndl(void)
9368 {
9369 DHD_ERROR(("%s: g_dhd_pub %p\n", __FUNCTION__, g_dhd_pub));
9370 /* assuming that dhd_pub_t type pointer is available from a global variable */
9371 return (wlan_bt_handle_t) g_dhd_pub;
9372 } EXPORT_SYMBOL(dhd_bt_get_pub_hndl);
9373
9374 int dhd_download_btfw(wlan_bt_handle_t handle, char* btfw_path)
9375 {
9376 int ret = -1;
9377 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
9378 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
9379
9380 /* Download BT firmware image to the dongle */
9381 if (dhd->pub.busstate == DHD_BUS_DATA && dhd_update_btfw_path(dhd, btfw_path)) {
9382 DHD_INFO(("%s: download btfw from: %s\n", __FUNCTION__, dhd->btfw_path));
9383 ret = dhd_bus_download_btfw(dhd->pub.bus, dhd->pub.osh, dhd->btfw_path);
9384 if (ret < 0) {
9385 DHD_ERROR(("%s: failed to download btfw from: %s\n",
9386 __FUNCTION__, dhd->btfw_path));
9387 return ret;
9388 }
9389 }
9390 return ret;
9391 } EXPORT_SYMBOL(dhd_download_btfw);
9392 #endif /* defined (BT_OVER_SDIO) */
9393
9394 int
9395 dhd_bus_start(dhd_pub_t *dhdp)
9396 {
9397 int ret = -1;
9398 dhd_info_t *dhd = (dhd_info_t*)dhdp->info;
9399 unsigned long flags;
9400
9401 #if defined(DHD_DEBUG) && defined(BCMSDIO)
9402 int fw_download_start = 0, fw_download_end = 0, f2_sync_start = 0, f2_sync_end = 0;
9403 #endif /* DHD_DEBUG && BCMSDIO */
9404 ASSERT(dhd);
9405
9406 DHD_TRACE(("Enter %s:\n", __FUNCTION__));
9407 dhdp->dongle_trap_occured = 0;
9408 #ifdef DHD_SSSR_DUMP
9409 /* Flag to indicate sssr dump is collected */
9410 dhdp->sssr_dump_collected = 0;
9411 #endif /* DHD_SSSR_DUMP */
9412 dhdp->iovar_timeout_occured = 0;
9413 #ifdef PCIE_FULL_DONGLE
9414 dhdp->d3ack_timeout_occured = 0;
9415 dhdp->livelock_occured = 0;
9416 dhdp->pktid_audit_failed = 0;
9417 #endif /* PCIE_FULL_DONGLE */
9418 dhd->pub.iface_op_failed = 0;
9419 dhd->pub.scan_timeout_occurred = 0;
9420 dhd->pub.scan_busy_occurred = 0;
9421 /* Clear induced error during initialize */
9422 dhd->pub.dhd_induce_error = DHD_INDUCE_ERROR_CLEAR;
9423
9424 /* set default value for now. Will be updated again in dhd_preinit_ioctls()
9425 * after querying FW
9426 */
9427 dhdp->event_log_max_sets = NUM_EVENT_LOG_SETS;
9428 dhdp->event_log_max_sets_queried = FALSE;
9429 dhdp->smmu_fault_occurred = 0;
9430 #ifdef DNGL_AXI_ERROR_LOGGING
9431 dhdp->axi_error = FALSE;
9432 #endif /* DNGL_AXI_ERROR_LOGGING */
9433
9434 DHD_PERIM_LOCK(dhdp);
9435 /* try to download image and nvram to the dongle */
9436 if (dhd->pub.busstate == DHD_BUS_DOWN && dhd_update_fw_nv_path(dhd)) {
9437 /* Indicate FW Download has not yet done */
9438 dhd->pub.fw_download_status = FW_DOWNLOAD_IN_PROGRESS;
9439 DHD_INFO(("%s download fw %s, nv %s\n", __FUNCTION__, dhd->fw_path, dhd->nv_path));
9440 #if defined(DHD_DEBUG) && defined(BCMSDIO)
9441 fw_download_start = OSL_SYSUPTIME();
9442 #endif /* DHD_DEBUG && BCMSDIO */
9443 ret = dhd_bus_download_firmware(dhd->pub.bus, dhd->pub.osh,
9444 dhd->fw_path, dhd->nv_path);
9445 #if defined(DHD_DEBUG) && defined(BCMSDIO)
9446 fw_download_end = OSL_SYSUPTIME();
9447 #endif /* DHD_DEBUG && BCMSDIO */
9448 if (ret < 0) {
9449 DHD_ERROR(("%s: failed to download firmware %s\n",
9450 __FUNCTION__, dhd->fw_path));
9451 DHD_PERIM_UNLOCK(dhdp);
9452 return ret;
9453 }
9454 /* Indicate FW Download has succeeded */
9455 dhd->pub.fw_download_status = FW_DOWNLOAD_DONE;
9456 }
9457 if (dhd->pub.busstate != DHD_BUS_LOAD) {
9458 DHD_PERIM_UNLOCK(dhdp);
9459 return -ENETDOWN;
9460 }
9461
9462 #ifdef BCMSDIO
9463 dhd_os_sdlock(dhdp);
9464 #endif /* BCMSDIO */
9465
9466 /* Start the watchdog timer */
9467 dhd->pub.tickcnt = 0;
9468 dhd_os_wd_timer(&dhd->pub, dhd_watchdog_ms);
9469
9470 /* Bring up the bus */
9471 if ((ret = dhd_bus_init(&dhd->pub, FALSE)) != 0) {
9472
9473 DHD_ERROR(("%s, dhd_bus_init failed %d\n", __FUNCTION__, ret));
9474 #ifdef BCMSDIO
9475 dhd_os_sdunlock(dhdp);
9476 #endif /* BCMSDIO */
9477 DHD_PERIM_UNLOCK(dhdp);
9478 return ret;
9479 }
9480
9481 DHD_ENABLE_RUNTIME_PM(&dhd->pub);
9482
9483 #ifdef DHD_ULP
9484 dhd_ulp_set_ulp_state(dhdp, DHD_ULP_DISABLED);
9485 #endif /* DHD_ULP */
9486 #if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE)
9487 /* Host registration for OOB interrupt */
9488 if (dhd_bus_oob_intr_register(dhdp)) {
9489 /* deactivate timer and wait for the handler to finish */
9490 #if !defined(BCMPCIE_OOB_HOST_WAKE)
9491 DHD_GENERAL_LOCK(&dhd->pub, flags);
9492 dhd->wd_timer_valid = FALSE;
9493 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
9494 del_timer_sync(&dhd->timer);
9495
9496 #endif /* !BCMPCIE_OOB_HOST_WAKE */
9497 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
9498 DHD_PERIM_UNLOCK(dhdp);
9499 DHD_ERROR(("%s Host failed to register for OOB\n", __FUNCTION__));
9500 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
9501 return -ENODEV;
9502 }
9503
9504 #if defined(BCMPCIE_OOB_HOST_WAKE)
9505 dhd_bus_oob_intr_set(dhdp, TRUE);
9506 #else
9507 /* Enable oob at firmware */
9508 dhd_enable_oob_intr(dhd->pub.bus, TRUE);
9509 #endif /* BCMPCIE_OOB_HOST_WAKE */
9510 #endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */
9511 #ifdef PCIE_FULL_DONGLE
9512 {
9513 /* max_h2d_rings includes H2D common rings */
9514 uint32 max_h2d_rings = dhd_bus_max_h2d_queues(dhd->pub.bus);
9515
9516 DHD_ERROR(("%s: Initializing %u h2drings\n", __FUNCTION__,
9517 max_h2d_rings));
9518 if ((ret = dhd_flow_rings_init(&dhd->pub, max_h2d_rings)) != BCME_OK) {
9519 #ifdef BCMSDIO
9520 dhd_os_sdunlock(dhdp);
9521 #endif /* BCMSDIO */
9522 DHD_PERIM_UNLOCK(dhdp);
9523 return ret;
9524 }
9525 }
9526 #endif /* PCIE_FULL_DONGLE */
9527
9528 /* Do protocol initialization necessary for IOCTL/IOVAR */
9529 ret = dhd_prot_init(&dhd->pub);
9530 if (unlikely(ret) != BCME_OK) {
9531 DHD_PERIM_UNLOCK(dhdp);
9532 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
9533 return ret;
9534 }
9535
9536 /* If bus is not ready, can't come up */
9537 if (dhd->pub.busstate != DHD_BUS_DATA) {
9538 DHD_GENERAL_LOCK(&dhd->pub, flags);
9539 dhd->wd_timer_valid = FALSE;
9540 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
9541 del_timer_sync(&dhd->timer);
9542 DHD_ERROR(("%s failed bus is not ready\n", __FUNCTION__));
9543 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
9544 #ifdef BCMSDIO
9545 dhd_os_sdunlock(dhdp);
9546 #endif /* BCMSDIO */
9547 DHD_PERIM_UNLOCK(dhdp);
9548 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
9549 return -ENODEV;
9550 }
9551
9552 #ifdef BCMSDIO
9553 dhd_os_sdunlock(dhdp);
9554 #endif /* BCMSDIO */
9555
9556 /* Bus is ready, query any dongle information */
9557 #if defined(DHD_DEBUG) && defined(BCMSDIO)
9558 f2_sync_start = OSL_SYSUPTIME();
9559 #endif /* DHD_DEBUG && BCMSDIO */
9560 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0) {
9561 DHD_GENERAL_LOCK(&dhd->pub, flags);
9562 dhd->wd_timer_valid = FALSE;
9563 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
9564 del_timer_sync(&dhd->timer);
9565 DHD_ERROR(("%s failed to sync with dongle\n", __FUNCTION__));
9566 DHD_OS_WD_WAKE_UNLOCK(&dhd->pub);
9567 DHD_PERIM_UNLOCK(dhdp);
9568 return ret;
9569 }
9570
9571 #if defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \
9572 defined(CONFIG_SOC_EXYNOS9820)
9573 DHD_ERROR(("%s: Enable L1ss EP side\n", __FUNCTION__));
9574 exynos_pcie_l1ss_ctrl(1, PCIE_L1SS_CTRL_WIFI);
9575 #endif /* CONFIG_SOC_EXYNOS8895 || CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 */
9576
9577 #if defined(DHD_DEBUG) && defined(BCMSDIO)
9578 f2_sync_end = OSL_SYSUPTIME();
9579 DHD_ERROR(("Time taken for FW download and F2 ready is: %d msec\n",
9580 (fw_download_end - fw_download_start) + (f2_sync_end - f2_sync_start)));
9581 #endif /* DHD_DEBUG && BCMSDIO */
9582
9583 #ifdef ARP_OFFLOAD_SUPPORT
9584 if (dhd->pend_ipaddr) {
9585 #ifdef AOE_IP_ALIAS_SUPPORT
9586 aoe_update_host_ipv4_table(&dhd->pub, dhd->pend_ipaddr, TRUE, 0);
9587 #endif /* AOE_IP_ALIAS_SUPPORT */
9588 dhd->pend_ipaddr = 0;
9589 }
9590 #endif /* ARP_OFFLOAD_SUPPORT */
9591
9592 DHD_PERIM_UNLOCK(dhdp);
9593
9594 return 0;
9595 }
9596 #ifdef WLTDLS
9597 int _dhd_tdls_enable(dhd_pub_t *dhd, bool tdls_on, bool auto_on, struct ether_addr *mac)
9598 {
9599 uint32 tdls = tdls_on;
9600 int ret = 0;
9601 uint32 tdls_auto_op = 0;
9602 uint32 tdls_idle_time = CUSTOM_TDLS_IDLE_MODE_SETTING;
9603 int32 tdls_rssi_high = CUSTOM_TDLS_RSSI_THRESHOLD_HIGH;
9604 int32 tdls_rssi_low = CUSTOM_TDLS_RSSI_THRESHOLD_LOW;
9605 uint32 tdls_pktcnt_high = CUSTOM_TDLS_PCKTCNT_THRESHOLD_HIGH;
9606 uint32 tdls_pktcnt_low = CUSTOM_TDLS_PCKTCNT_THRESHOLD_LOW;
9607
9608 BCM_REFERENCE(mac);
9609 if (!FW_SUPPORTED(dhd, tdls))
9610 return BCME_ERROR;
9611
9612 if (dhd->tdls_enable == tdls_on)
9613 goto auto_mode;
9614 ret = dhd_iovar(dhd, 0, "tdls_enable", (char *)&tdls, sizeof(tdls), NULL, 0, TRUE);
9615 if (ret < 0) {
9616 DHD_ERROR(("%s: tdls %d failed %d\n", __FUNCTION__, tdls, ret));
9617 goto exit;
9618 }
9619 dhd->tdls_enable = tdls_on;
9620 auto_mode:
9621
9622 tdls_auto_op = auto_on;
9623 ret = dhd_iovar(dhd, 0, "tdls_auto_op", (char *)&tdls_auto_op, sizeof(tdls_auto_op), NULL,
9624 0, TRUE);
9625 if (ret < 0) {
9626 DHD_ERROR(("%s: tdls_auto_op failed %d\n", __FUNCTION__, ret));
9627 goto exit;
9628 }
9629
9630 if (tdls_auto_op) {
9631 ret = dhd_iovar(dhd, 0, "tdls_idle_time", (char *)&tdls_idle_time,
9632 sizeof(tdls_idle_time), NULL, 0, TRUE);
9633 if (ret < 0) {
9634 DHD_ERROR(("%s: tdls_idle_time failed %d\n", __FUNCTION__, ret));
9635 goto exit;
9636 }
9637 ret = dhd_iovar(dhd, 0, "tdls_rssi_high", (char *)&tdls_rssi_high,
9638 sizeof(tdls_rssi_high), NULL, 0, TRUE);
9639 if (ret < 0) {
9640 DHD_ERROR(("%s: tdls_rssi_high failed %d\n", __FUNCTION__, ret));
9641 goto exit;
9642 }
9643 ret = dhd_iovar(dhd, 0, "tdls_rssi_low", (char *)&tdls_rssi_low,
9644 sizeof(tdls_rssi_low), NULL, 0, TRUE);
9645 if (ret < 0) {
9646 DHD_ERROR(("%s: tdls_rssi_low failed %d\n", __FUNCTION__, ret));
9647 goto exit;
9648 }
9649 ret = dhd_iovar(dhd, 0, "tdls_trigger_pktcnt_high", (char *)&tdls_pktcnt_high,
9650 sizeof(tdls_pktcnt_high), NULL, 0, TRUE);
9651 if (ret < 0) {
9652 DHD_ERROR(("%s: tdls_trigger_pktcnt_high failed %d\n", __FUNCTION__, ret));
9653 goto exit;
9654 }
9655 ret = dhd_iovar(dhd, 0, "tdls_trigger_pktcnt_low", (char *)&tdls_pktcnt_low,
9656 sizeof(tdls_pktcnt_low), NULL, 0, TRUE);
9657 if (ret < 0) {
9658 DHD_ERROR(("%s: tdls_trigger_pktcnt_low failed %d\n", __FUNCTION__, ret));
9659 goto exit;
9660 }
9661 }
9662
9663 exit:
9664 return ret;
9665 }
9666 int dhd_tdls_enable(struct net_device *dev, bool tdls_on, bool auto_on, struct ether_addr *mac)
9667 {
9668 dhd_info_t *dhd = DHD_DEV_INFO(dev);
9669 int ret = 0;
9670 if (dhd)
9671 ret = _dhd_tdls_enable(&dhd->pub, tdls_on, auto_on, mac);
9672 else
9673 ret = BCME_ERROR;
9674 return ret;
9675 }
9676 int
9677 dhd_tdls_set_mode(dhd_pub_t *dhd, bool wfd_mode)
9678 {
9679 int ret = 0;
9680 bool auto_on = false;
9681 uint32 mode = wfd_mode;
9682
9683 #ifdef ENABLE_TDLS_AUTO_MODE
9684 if (wfd_mode) {
9685 auto_on = false;
9686 } else {
9687 auto_on = true;
9688 }
9689 #else
9690 auto_on = false;
9691 #endif /* ENABLE_TDLS_AUTO_MODE */
9692 ret = _dhd_tdls_enable(dhd, false, auto_on, NULL);
9693 if (ret < 0) {
9694 DHD_ERROR(("Disable tdls_auto_op failed. %d\n", ret));
9695 return ret;
9696 }
9697
9698 ret = dhd_iovar(dhd, 0, "tdls_wfd_mode", (char *)&mode, sizeof(mode), NULL, 0, TRUE);
9699 if ((ret < 0) && (ret != BCME_UNSUPPORTED)) {
9700 DHD_ERROR(("%s: tdls_wfd_mode faile_wfd_mode %d\n", __FUNCTION__, ret));
9701 return ret;
9702 }
9703
9704 ret = _dhd_tdls_enable(dhd, true, auto_on, NULL);
9705 if (ret < 0) {
9706 DHD_ERROR(("enable tdls_auto_op failed. %d\n", ret));
9707 return ret;
9708 }
9709
9710 dhd->tdls_mode = mode;
9711 return ret;
9712 }
9713 #ifdef PCIE_FULL_DONGLE
9714 int dhd_tdls_update_peer_info(dhd_pub_t *dhdp, wl_event_msg_t *event)
9715 {
9716 dhd_pub_t *dhd_pub = dhdp;
9717 tdls_peer_node_t *cur = dhd_pub->peer_tbl.node;
9718 tdls_peer_node_t *new = NULL, *prev = NULL;
9719 int ifindex = dhd_ifname2idx(dhd_pub->info, event->ifname);
9720 uint8 *da = (uint8 *)&event->addr.octet[0];
9721 bool connect = FALSE;
9722 uint32 reason = ntoh32(event->reason);
9723 unsigned long flags;
9724
9725 /* No handling needed for peer discovered reason */
9726 if (reason == WLC_E_TDLS_PEER_DISCOVERED) {
9727 return BCME_ERROR;
9728 }
9729 if (reason == WLC_E_TDLS_PEER_CONNECTED)
9730 connect = TRUE;
9731 else if (reason == WLC_E_TDLS_PEER_DISCONNECTED)
9732 connect = FALSE;
9733 else
9734 {
9735 DHD_ERROR(("%s: TDLS Event reason is unknown\n", __FUNCTION__));
9736 return BCME_ERROR;
9737 }
9738 if (ifindex == DHD_BAD_IF)
9739 return BCME_ERROR;
9740
9741 if (connect) {
9742 while (cur != NULL) {
9743 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
9744 DHD_ERROR(("%s: TDLS Peer exist already %d\n",
9745 __FUNCTION__, __LINE__));
9746 return BCME_ERROR;
9747 }
9748 cur = cur->next;
9749 }
9750
9751 new = MALLOC(dhd_pub->osh, sizeof(tdls_peer_node_t));
9752 if (new == NULL) {
9753 DHD_ERROR(("%s: Failed to allocate memory\n", __FUNCTION__));
9754 return BCME_ERROR;
9755 }
9756 memcpy(new->addr, da, ETHER_ADDR_LEN);
9757 DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
9758 new->next = dhd_pub->peer_tbl.node;
9759 dhd_pub->peer_tbl.node = new;
9760 dhd_pub->peer_tbl.tdls_peer_count++;
9761 DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
9762
9763 } else {
9764 while (cur != NULL) {
9765 if (!memcmp(da, cur->addr, ETHER_ADDR_LEN)) {
9766 dhd_flow_rings_delete_for_peer(dhd_pub, (uint8)ifindex, da);
9767 DHD_TDLS_LOCK(&dhdp->tdls_lock, flags);
9768 if (prev)
9769 prev->next = cur->next;
9770 else
9771 dhd_pub->peer_tbl.node = cur->next;
9772 MFREE(dhd_pub->osh, cur, sizeof(tdls_peer_node_t));
9773 dhd_pub->peer_tbl.tdls_peer_count--;
9774 DHD_TDLS_UNLOCK(&dhdp->tdls_lock, flags);
9775 return BCME_OK;
9776 }
9777 prev = cur;
9778 cur = cur->next;
9779 }
9780 DHD_ERROR(("%s: TDLS Peer Entry Not found\n", __FUNCTION__));
9781 }
9782 return BCME_OK;
9783 }
9784 #endif /* PCIE_FULL_DONGLE */
9785 #endif // endif
9786
9787 bool dhd_is_concurrent_mode(dhd_pub_t *dhd)
9788 {
9789 if (!dhd)
9790 return FALSE;
9791
9792 if (dhd->op_mode & DHD_FLAG_CONCURR_MULTI_CHAN_MODE)
9793 return TRUE;
9794 else if ((dhd->op_mode & DHD_FLAG_CONCURR_SINGLE_CHAN_MODE) ==
9795 DHD_FLAG_CONCURR_SINGLE_CHAN_MODE)
9796 return TRUE;
9797 else
9798 return FALSE;
9799 }
9800 #if defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P)
9801 /* From Android JerryBean release, the concurrent mode is enabled by default and the firmware
9802 * name would be fw_bcmdhd.bin. So we need to determine whether P2P is enabled in the STA
9803 * firmware and accordingly enable concurrent mode (Apply P2P settings). SoftAP firmware
9804 * would still be named as fw_bcmdhd_apsta.
9805 */
9806 uint32
9807 dhd_get_concurrent_capabilites(dhd_pub_t *dhd)
9808 {
9809 int32 ret = 0;
9810 char buf[WLC_IOCTL_SMLEN];
9811 bool mchan_supported = FALSE;
9812 /* if dhd->op_mode is already set for HOSTAP and Manufacturing
9813 * test mode, that means we only will use the mode as it is
9814 */
9815 if (dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))
9816 return 0;
9817 if (FW_SUPPORTED(dhd, vsdb)) {
9818 mchan_supported = TRUE;
9819 }
9820 if (!FW_SUPPORTED(dhd, p2p)) {
9821 DHD_TRACE(("Chip does not support p2p\n"));
9822 return 0;
9823 } else {
9824 /* Chip supports p2p but ensure that p2p is really implemented in firmware or not */
9825 memset(buf, 0, sizeof(buf));
9826 ret = dhd_iovar(dhd, 0, "p2p", NULL, 0, (char *)&buf,
9827 sizeof(buf), FALSE);
9828 if (ret < 0) {
9829 DHD_ERROR(("%s: Get P2P failed (error=%d)\n", __FUNCTION__, ret));
9830 return 0;
9831 } else {
9832 if (buf[0] == 1) {
9833 /* By default, chip supports single chan concurrency,
9834 * now lets check for mchan
9835 */
9836 ret = DHD_FLAG_CONCURR_SINGLE_CHAN_MODE;
9837 if (mchan_supported)
9838 ret |= DHD_FLAG_CONCURR_MULTI_CHAN_MODE;
9839 if (FW_SUPPORTED(dhd, rsdb)) {
9840 ret |= DHD_FLAG_RSDB_MODE;
9841 }
9842 #ifdef WL_SUPPORT_MULTIP2P
9843 if (FW_SUPPORTED(dhd, mp2p)) {
9844 ret |= DHD_FLAG_MP2P_MODE;
9845 }
9846 #endif /* WL_SUPPORT_MULTIP2P */
9847 #if defined(WL_ENABLE_P2P_IF) || defined(WL_CFG80211_P2P_DEV_IF)
9848 return ret;
9849 #else
9850 return 0;
9851 #endif /* WL_ENABLE_P2P_IF || WL_CFG80211_P2P_DEV_IF */
9852 }
9853 }
9854 }
9855 return 0;
9856 }
9857 #endif /* defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P) */
9858
9859 #ifdef WLAIBSS
9860 int
9861 dhd_preinit_aibss_ioctls(dhd_pub_t *dhd, char *iov_buf_smlen)
9862 {
9863 int ret = BCME_OK;
9864 aibss_bcn_force_config_t bcn_config;
9865 uint32 aibss;
9866 #ifdef WLAIBSS_PS
9867 uint32 aibss_ps;
9868 s32 atim;
9869 #endif /* WLAIBSS_PS */
9870 int ibss_coalesce;
9871
9872 aibss = 1;
9873 ret = dhd_iovar(dhd, 0, "aibss", (char *)&aibss, sizeof(aibss), NULL, 0, TRUE);
9874 if (ret < 0) {
9875 if (ret == BCME_UNSUPPORTED) {
9876 DHD_ERROR(("%s aibss is not supported\n",
9877 __FUNCTION__));
9878 return BCME_OK;
9879 } else {
9880 DHD_ERROR(("%s Set aibss to %d failed %d\n",
9881 __FUNCTION__, aibss, ret));
9882 return ret;
9883 }
9884 }
9885
9886 #ifdef WLAIBSS_PS
9887 aibss_ps = 1;
9888 ret = dhd_iovar(dhd, 0, "aibss_ps", (char *)&aibss_ps, sizeof(aibss_ps), NULL, 0, TRUE);
9889 if (ret < 0) {
9890 DHD_ERROR(("%s Set aibss PS to %d failed %d\n",
9891 __FUNCTION__, aibss, ret));
9892 return ret;
9893 }
9894
9895 atim = 10;
9896 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ATIM,
9897 (char *)&atim, sizeof(atim), TRUE, 0)) < 0) {
9898 DHD_ERROR(("%s Enable custom IBSS ATIM mode failed %d\n",
9899 __FUNCTION__, ret));
9900 return ret;
9901 }
9902 #endif /* WLAIBSS_PS */
9903
9904 memset(&bcn_config, 0, sizeof(bcn_config));
9905 bcn_config.initial_min_bcn_dur = AIBSS_INITIAL_MIN_BCN_DUR;
9906 bcn_config.min_bcn_dur = AIBSS_MIN_BCN_DUR;
9907 bcn_config.bcn_flood_dur = AIBSS_BCN_FLOOD_DUR;
9908 bcn_config.version = AIBSS_BCN_FORCE_CONFIG_VER_0;
9909 bcn_config.len = sizeof(bcn_config);
9910
9911 ret = dhd_iovar(dhd, 0, "aibss_bcn_force_config", (char *)&bcn_config,
9912 sizeof(aibss_bcn_force_config_t), NULL, 0, TRUE);
9913 if (ret < 0) {
9914 DHD_ERROR(("%s Set aibss_bcn_force_config to %d, %d, %d failed %d\n",
9915 __FUNCTION__, AIBSS_INITIAL_MIN_BCN_DUR, AIBSS_MIN_BCN_DUR,
9916 AIBSS_BCN_FLOOD_DUR, ret));
9917 return ret;
9918 }
9919
9920 ibss_coalesce = IBSS_COALESCE_DEFAULT;
9921 ret = dhd_iovar(dhd, 0, "ibss_coalesce_allowed", (char *)&ibss_coalesce,
9922 sizeof(ibss_coalesce), NULL, 0, TRUE);
9923 if (ret < 0) {
9924 DHD_ERROR(("%s Set ibss_coalesce_allowed failed %d\n",
9925 __FUNCTION__, ret));
9926 return ret;
9927 }
9928
9929 dhd->op_mode |= DHD_FLAG_IBSS_MODE;
9930 return BCME_OK;
9931 }
9932 #endif /* WLAIBSS */
9933
9934 #if defined(WLADPS) || defined(WLADPS_PRIVATE_CMD)
9935 #ifdef WL_BAM
9936 static int
9937 dhd_check_adps_bad_ap(dhd_pub_t *dhd)
9938 {
9939 struct net_device *ndev;
9940 struct bcm_cfg80211 *cfg;
9941 struct wl_profile *profile;
9942 struct ether_addr bssid;
9943
9944 if (!dhd_is_associated(dhd, 0, NULL)) {
9945 DHD_ERROR(("%s - not associated\n", __FUNCTION__));
9946 return BCME_OK;
9947 }
9948
9949 ndev = dhd_linux_get_primary_netdev(dhd);
9950 if (!ndev) {
9951 DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__));
9952 return -ENODEV;
9953 }
9954
9955 cfg = wl_get_cfg(ndev);
9956 if (!cfg) {
9957 DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
9958 return -EINVAL;
9959 }
9960
9961 profile = wl_get_profile_by_netdev(cfg, ndev);
9962 memcpy(bssid.octet, profile->bssid, ETHER_ADDR_LEN);
9963 if (wl_adps_bad_ap_check(cfg, &bssid)) {
9964 if (wl_adps_enabled(cfg, ndev)) {
9965 wl_adps_set_suspend(cfg, ndev, ADPS_SUSPEND);
9966 }
9967 }
9968
9969 return BCME_OK;
9970 }
9971 #endif /* WL_BAM */
9972
9973 int
9974 dhd_enable_adps(dhd_pub_t *dhd, uint8 on)
9975 {
9976 int i;
9977 int len;
9978 int ret = BCME_OK;
9979
9980 bcm_iov_buf_t *iov_buf = NULL;
9981 wl_adps_params_v1_t *data = NULL;
9982
9983 len = OFFSETOF(bcm_iov_buf_t, data) + sizeof(*data);
9984 iov_buf = MALLOC(dhd->osh, len);
9985 if (iov_buf == NULL) {
9986 DHD_ERROR(("%s - failed to allocate %d bytes for iov_buf\n", __FUNCTION__, len));
9987 ret = BCME_NOMEM;
9988 goto exit;
9989 }
9990
9991 iov_buf->version = WL_ADPS_IOV_VER;
9992 iov_buf->len = sizeof(*data);
9993 iov_buf->id = WL_ADPS_IOV_MODE;
9994
9995 data = (wl_adps_params_v1_t *)iov_buf->data;
9996 data->version = ADPS_SUB_IOV_VERSION_1;
9997 data->length = sizeof(*data);
9998 data->mode = on;
9999
10000 for (i = 1; i <= MAX_BANDS; i++) {
10001 data->band = i;
10002 ret = dhd_iovar(dhd, 0, "adps", (char *)iov_buf, len, NULL, 0, TRUE);
10003 if (ret < 0) {
10004 if (ret == BCME_UNSUPPORTED) {
10005 DHD_ERROR(("%s adps is not supported\n", __FUNCTION__));
10006 ret = BCME_OK;
10007 goto exit;
10008 }
10009 else {
10010 DHD_ERROR(("%s fail to set adps %s for band %d (%d)\n",
10011 __FUNCTION__, on ? "On" : "Off", i, ret));
10012 goto exit;
10013 }
10014 }
10015 }
10016
10017 #ifdef WL_BAM
10018 if (on) {
10019 dhd_check_adps_bad_ap(dhd);
10020 }
10021 #endif /* WL_BAM */
10022
10023 exit:
10024 if (iov_buf) {
10025 MFREE(dhd->osh, iov_buf, len);
10026 iov_buf = NULL;
10027 }
10028 return ret;
10029 }
10030 #endif /* WLADPS || WLADPS_PRIVATE_CMD */
10031
10032 int
10033 dhd_preinit_ioctls(dhd_pub_t *dhd)
10034 {
10035 int ret = 0;
10036 char eventmask[WL_EVENTING_MASK_LEN];
10037 char iovbuf[WL_EVENTING_MASK_LEN + 12]; /* Room for "event_msgs" + '\0' + bitvec */
10038 uint32 buf_key_b4_m4 = 1;
10039 uint8 msglen;
10040 eventmsgs_ext_t *eventmask_msg = NULL;
10041 uint32 event_log_max_sets = 0;
10042 char* iov_buf = NULL;
10043 int ret2 = 0;
10044 uint32 wnm_cap = 0;
10045 #if defined(BCMSUP_4WAY_HANDSHAKE)
10046 uint32 sup_wpa = 1;
10047 #endif /* BCMSUP_4WAY_HANDSHAKE */
10048 #if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
10049 defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
10050 uint32 ampdu_ba_wsize = 0;
10051 #endif /* CUSTOM_AMPDU_BA_WSIZE ||(WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
10052 #if defined(CUSTOM_AMPDU_MPDU)
10053 int32 ampdu_mpdu = 0;
10054 #endif // endif
10055 #if defined(CUSTOM_AMPDU_RELEASE)
10056 int32 ampdu_release = 0;
10057 #endif // endif
10058 #if defined(CUSTOM_AMSDU_AGGSF)
10059 int32 amsdu_aggsf = 0;
10060 #endif // endif
10061
10062 #if defined(BCMSDIO)
10063 #ifdef PROP_TXSTATUS
10064 int wlfc_enable = TRUE;
10065 #ifndef DISABLE_11N
10066 uint32 hostreorder = 1;
10067 uint chipid = 0;
10068 #endif /* DISABLE_11N */
10069 #endif /* PROP_TXSTATUS */
10070 #endif // endif
10071 #ifndef PCIE_FULL_DONGLE
10072 uint32 wl_ap_isolate;
10073 #endif /* PCIE_FULL_DONGLE */
10074 uint32 frameburst = CUSTOM_FRAMEBURST_SET;
10075 uint wnm_bsstrans_resp = 0;
10076 #ifdef SUPPORT_SET_CAC
10077 uint32 cac = 1;
10078 #endif /* SUPPORT_SET_CAC */
10079 #ifdef DHD_BUS_MEM_ACCESS
10080 uint32 enable_memuse = 1;
10081 #endif /* DHD_BUS_MEM_ACCESS */
10082
10083 #ifdef OEM_ANDROID
10084 #ifdef DHD_ENABLE_LPC
10085 uint32 lpc = 1;
10086 #endif /* DHD_ENABLE_LPC */
10087 uint power_mode = PM_FAST;
10088 #if defined(BCMSDIO)
10089 uint32 dongle_align = DHD_SDALIGN;
10090 uint32 glom = CUSTOM_GLOM_SETTING;
10091 #endif /* defined(BCMSDIO) */
10092 #if (defined(CUSTOMER_HW2) || defined(BOARD_HIKEY)) && defined(USE_WL_CREDALL)
10093 uint32 credall = 1;
10094 #endif // endif
10095 uint bcn_timeout = CUSTOM_BCN_TIMEOUT;
10096 uint scancache_enab = TRUE;
10097 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
10098 uint32 bcn_li_bcn = 1;
10099 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
10100 uint retry_max = CUSTOM_ASSOC_RETRY_MAX;
10101 #if defined(ARP_OFFLOAD_SUPPORT)
10102 int arpoe = 0;
10103 #endif // endif
10104 int scan_assoc_time = DHD_SCAN_ASSOC_ACTIVE_TIME;
10105 int scan_unassoc_time = DHD_SCAN_UNASSOC_ACTIVE_TIME;
10106 int scan_passive_time = DHD_SCAN_PASSIVE_TIME;
10107 char buf[WLC_IOCTL_SMLEN];
10108 char *ptr;
10109 uint32 listen_interval = CUSTOM_LISTEN_INTERVAL; /* Default Listen Interval in Beacons */
10110 #if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
10111 wl_el_tag_params_t *el_tag = NULL;
10112 #endif /* DHD_8021X_DUMP */
10113 #ifdef ROAM_ENABLE
10114 uint roamvar = 0;
10115 int roam_trigger[2] = {CUSTOM_ROAM_TRIGGER_SETTING, WLC_BAND_ALL};
10116 int roam_scan_period[2] = {10, WLC_BAND_ALL};
10117 int roam_delta[2] = {CUSTOM_ROAM_DELTA_SETTING, WLC_BAND_ALL};
10118 #ifdef ROAM_AP_ENV_DETECTION
10119 int roam_env_mode = AP_ENV_INDETERMINATE;
10120 #endif /* ROAM_AP_ENV_DETECTION */
10121 #ifdef FULL_ROAMING_SCAN_PERIOD_60_SEC
10122 int roam_fullscan_period = 60;
10123 #else /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
10124 int roam_fullscan_period = 120;
10125 #endif /* FULL_ROAMING_SCAN_PERIOD_60_SEC */
10126 #ifdef DISABLE_BCNLOSS_ROAM
10127 uint roam_bcnloss_off = 1;
10128 #endif /* DISABLE_BCNLOSS_ROAM */
10129 #else
10130 #ifdef DISABLE_BUILTIN_ROAM
10131 uint roamvar = 1;
10132 #endif /* DISABLE_BUILTIN_ROAM */
10133 #endif /* ROAM_ENABLE */
10134
10135 #if defined(SOFTAP)
10136 uint dtim = 1;
10137 #endif // endif
10138 #if (defined(AP) && !defined(WLP2P)) || (!defined(AP) && defined(WL_CFG80211))
10139 struct ether_addr p2p_ea;
10140 #endif // endif
10141 #ifdef BCMCCX
10142 uint32 ccx = 1;
10143 #endif // endif
10144 #ifdef SOFTAP_UAPSD_OFF
10145 uint32 wme_apsd = 0;
10146 #endif /* SOFTAP_UAPSD_OFF */
10147 #if (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC)
10148 uint32 apsta = 1; /* Enable APSTA mode */
10149 #elif defined(SOFTAP_AND_GC)
10150 uint32 apsta = 0;
10151 int ap_mode = 1;
10152 #endif /* (defined(AP) || defined(WLP2P)) && !defined(SOFTAP_AND_GC) */
10153 #ifdef GET_CUSTOM_MAC_ENABLE
10154 struct ether_addr ea_addr;
10155 #endif /* GET_CUSTOM_MAC_ENABLE */
10156 #ifdef OKC_SUPPORT
10157 uint32 okc = 1;
10158 #endif // endif
10159
10160 #ifdef DISABLE_11N
10161 uint32 nmode = 0;
10162 #endif /* DISABLE_11N */
10163
10164 #ifdef USE_WL_TXBF
10165 uint32 txbf = 1;
10166 #endif /* USE_WL_TXBF */
10167 #ifdef DISABLE_TXBFR
10168 uint32 txbf_bfr_cap = 0;
10169 #endif /* DISABLE_TXBFR */
10170 #ifdef AMPDU_VO_ENABLE
10171 struct ampdu_tid_control tid;
10172 #endif // endif
10173 #if defined(PROP_TXSTATUS)
10174 #ifdef USE_WFA_CERT_CONF
10175 uint32 proptx = 0;
10176 #endif /* USE_WFA_CERT_CONF */
10177 #endif /* PROP_TXSTATUS */
10178 #ifdef DHD_SET_FW_HIGHSPEED
10179 uint32 ack_ratio = 250;
10180 uint32 ack_ratio_depth = 64;
10181 #endif /* DHD_SET_FW_HIGHSPEED */
10182 #if defined(SUPPORT_2G_VHT) || defined(SUPPORT_5G_1024QAM_VHT)
10183 uint32 vht_features = 0; /* init to 0, will be set based on each support */
10184 #endif /* SUPPORT_2G_VHT || SUPPORT_5G_1024QAM_VHT */
10185 #ifdef DISABLE_11N_PROPRIETARY_RATES
10186 uint32 ht_features = 0;
10187 #endif /* DISABLE_11N_PROPRIETARY_RATES */
10188 #ifdef CUSTOM_PSPRETEND_THR
10189 uint32 pspretend_thr = CUSTOM_PSPRETEND_THR;
10190 #endif // endif
10191 #ifdef CUSTOM_EVENT_PM_WAKE
10192 uint32 pm_awake_thresh = CUSTOM_EVENT_PM_WAKE;
10193 #endif /* CUSTOM_EVENT_PM_WAKE */
10194 #ifdef DISABLE_PRUNED_SCAN
10195 uint32 scan_features = 0;
10196 #endif /* DISABLE_PRUNED_SCAN */
10197 #ifdef BCMPCIE_OOB_HOST_WAKE
10198 uint32 hostwake_oob = 0;
10199 #endif /* BCMPCIE_OOB_HOST_WAKE */
10200 #ifdef EVENT_LOG_RATE_HC
10201 /* threshold number of lines per second */
10202 #define EVENT_LOG_RATE_HC_THRESHOLD 1000
10203 uint32 event_log_rate_hc = EVENT_LOG_RATE_HC_THRESHOLD;
10204 #endif /* EVENT_LOG_RATE_HC */
10205 #if defined(WBTEXT) && defined(WBTEXT_BTMDELTA)
10206 uint32 btmdelta = WBTEXT_BTMDELTA;
10207 #endif /* WBTEXT && WBTEXT_BTMDELTA */
10208 wl_wlc_version_t wlc_ver;
10209
10210 #ifdef PKT_FILTER_SUPPORT
10211 dhd_pkt_filter_enable = TRUE;
10212 #ifdef APF
10213 dhd->apf_set = FALSE;
10214 #endif /* APF */
10215 #endif /* PKT_FILTER_SUPPORT */
10216 dhd->suspend_bcn_li_dtim = CUSTOM_SUSPEND_BCN_LI_DTIM;
10217 #ifdef ENABLE_MAX_DTIM_IN_SUSPEND
10218 dhd->max_dtim_enable = TRUE;
10219 #else
10220 dhd->max_dtim_enable = FALSE;
10221 #endif /* ENABLE_MAX_DTIM_IN_SUSPEND */
10222 dhd->disable_dtim_in_suspend = FALSE;
10223 #ifdef CUSTOM_SET_OCLOFF
10224 dhd->ocl_off = FALSE;
10225 #endif /* CUSTOM_SET_OCLOFF */
10226 #ifdef SUPPORT_SET_TID
10227 dhd->tid_mode = SET_TID_OFF;
10228 dhd->target_uid = 0;
10229 dhd->target_tid = 0;
10230 #endif /* SUPPORT_SET_TID */
10231 DHD_TRACE(("Enter %s\n", __FUNCTION__));
10232 dhd->op_mode = 0;
10233
10234 #if defined(CUSTOM_COUNTRY_CODE) && (defined(CUSTOMER_HW2) || defined(BOARD_HIKEY))
10235 /* clear AP flags */
10236 dhd->dhd_cflags &= ~WLAN_PLAT_AP_FLAG;
10237 #endif /* CUSTOM_COUNTRY_CODE && (CUSTOMER_HW2 || BOARD_HIKEY) */
10238
10239 #ifdef CUSTOMER_HW4_DEBUG
10240 if (!dhd_validate_chipid(dhd)) {
10241 DHD_ERROR(("%s: CONFIG_BCMXXX and CHIP ID(%x) is mismatched\n",
10242 __FUNCTION__, dhd_bus_chip_id(dhd)));
10243 #ifndef SUPPORT_MULTIPLE_CHIPS
10244 ret = BCME_BADARG;
10245 goto done;
10246 #endif /* !SUPPORT_MULTIPLE_CHIPS */
10247 }
10248 #endif /* CUSTOMER_HW4_DEBUG */
10249
10250 /* query for 'ver' to get version info from firmware */
10251 memset(buf, 0, sizeof(buf));
10252 ptr = buf;
10253 ret = dhd_iovar(dhd, 0, "ver", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
10254 if (ret < 0)
10255 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
10256 else {
10257 bcmstrtok(&ptr, "\n", 0);
10258 /* Print fw version info */
10259 DHD_ERROR(("Firmware version = %s\n", buf));
10260 strncpy(fw_version, buf, FW_VER_STR_LEN);
10261 fw_version[FW_VER_STR_LEN-1] = '\0';
10262 #if defined(BCMSDIO) || defined(BCMPCIE) || defined(BCMSPI)
10263 dhd_set_version_info(dhd, buf);
10264 #endif /* BCMSDIO || BCMPCIE */
10265 }
10266
10267 #ifdef BOARD_HIKEY
10268 /* Set op_mode as MFG_MODE if WLTEST is present in "wl ver" */
10269 if (strstr(fw_version, "WLTEST") != NULL) {
10270 DHD_ERROR(("%s: wl ver has WLTEST, setting op_mode as DHD_FLAG_MFG_MODE\n",
10271 __FUNCTION__));
10272 op_mode = DHD_FLAG_MFG_MODE;
10273 }
10274 #endif /* BOARD_HIKEY */
10275
10276 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
10277 (op_mode == DHD_FLAG_MFG_MODE)) {
10278 dhd->op_mode = DHD_FLAG_MFG_MODE;
10279 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
10280 /* disable runtimePM by default in MFG mode. */
10281 pm_runtime_disable(dhd_bus_to_dev(dhd->bus));
10282 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
10283 #ifdef DHD_PCIE_RUNTIMEPM
10284 /* Disable RuntimePM in mfg mode */
10285 DHD_DISABLE_RUNTIME_PM(dhd);
10286 DHD_ERROR(("%s : Disable RuntimePM in Manufactring Firmware\n", __FUNCTION__));
10287 #endif /* DHD_PCIE_RUNTIME_PM */
10288 /* Check and adjust IOCTL response timeout for Manufactring firmware */
10289 dhd_os_set_ioctl_resp_timeout(MFG_IOCTL_RESP_TIMEOUT);
10290 DHD_ERROR(("%s : Set IOCTL response time for Manufactring Firmware\n",
10291 __FUNCTION__));
10292 } else {
10293 dhd_os_set_ioctl_resp_timeout(IOCTL_RESP_TIMEOUT);
10294 DHD_INFO(("%s : Set IOCTL response time.\n", __FUNCTION__));
10295 }
10296 #ifdef BCMPCIE_OOB_HOST_WAKE
10297 ret = dhd_iovar(dhd, 0, "bus:hostwake_oob", NULL, 0, (char *)&hostwake_oob,
10298 sizeof(hostwake_oob), FALSE);
10299 if (ret < 0) {
10300 DHD_ERROR(("%s: hostwake_oob IOVAR not present, proceed\n", __FUNCTION__));
10301 } else {
10302 if (hostwake_oob == 0) {
10303 DHD_ERROR(("%s: hostwake_oob is not enabled in the NVRAM, STOP\n",
10304 __FUNCTION__));
10305 ret = BCME_UNSUPPORTED;
10306 goto done;
10307 } else {
10308 DHD_ERROR(("%s: hostwake_oob enabled\n", __FUNCTION__));
10309 }
10310 }
10311 #endif /* BCMPCIE_OOB_HOST_WAKE */
10312
10313 #ifdef DNGL_AXI_ERROR_LOGGING
10314 ret = dhd_iovar(dhd, 0, "axierror_logbuf_addr", NULL, 0, (char *)&dhd->axierror_logbuf_addr,
10315 sizeof(dhd->axierror_logbuf_addr), FALSE);
10316 if (ret < 0) {
10317 DHD_ERROR(("%s: axierror_logbuf_addr IOVAR not present, proceed\n", __FUNCTION__));
10318 dhd->axierror_logbuf_addr = 0;
10319 } else {
10320 DHD_ERROR(("%s: axierror_logbuf_addr : 0x%x\n", __FUNCTION__,
10321 dhd->axierror_logbuf_addr));
10322 }
10323 #endif /* DNGL_AXI_ERROR_LOGGING */
10324
10325 #ifdef EVENT_LOG_RATE_HC
10326 ret = dhd_iovar(dhd, 0, "event_log_rate_hc", (char *)&event_log_rate_hc,
10327 sizeof(event_log_rate_hc), NULL, 0, TRUE);
10328 if (ret < 0) {
10329 DHD_ERROR(("%s event_log_rate_hc set failed %d\n", __FUNCTION__, ret));
10330 } else {
10331 DHD_ERROR(("%s event_log_rate_hc set with threshold:%d\n", __FUNCTION__,
10332 event_log_rate_hc));
10333 }
10334 #endif /* EVENT_LOG_RATE_HC */
10335
10336 #ifdef GET_CUSTOM_MAC_ENABLE
10337 ret = wifi_platform_get_mac_addr(dhd->info->adapter, ea_addr.octet);
10338 if (!ret) {
10339 ret = dhd_iovar(dhd, 0, "cur_etheraddr", (char *)&ea_addr, ETHER_ADDR_LEN, NULL, 0,
10340 TRUE);
10341 if (ret < 0) {
10342 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
10343 ret = BCME_NOTUP;
10344 goto done;
10345 }
10346 memcpy(dhd->mac.octet, ea_addr.octet, ETHER_ADDR_LEN);
10347 } else {
10348 #endif /* GET_CUSTOM_MAC_ENABLE */
10349 /* Get the default device MAC address directly from firmware */
10350 ret = dhd_iovar(dhd, 0, "cur_etheraddr", NULL, 0, (char *)&buf, sizeof(buf), FALSE);
10351 if (ret < 0) {
10352 DHD_ERROR(("%s: can't get MAC address , error=%d\n", __FUNCTION__, ret));
10353 ret = BCME_NOTUP;
10354 goto done;
10355 }
10356 /* Update public MAC address after reading from Firmware */
10357 memcpy(dhd->mac.octet, buf, ETHER_ADDR_LEN);
10358
10359 #ifdef GET_CUSTOM_MAC_ENABLE
10360 }
10361 #endif /* GET_CUSTOM_MAC_ENABLE */
10362
10363 if ((ret = dhd_apply_default_clm(dhd, clm_path)) < 0) {
10364 DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__));
10365 goto done;
10366 }
10367
10368 /* get a capabilities from firmware */
10369 {
10370 uint32 cap_buf_size = sizeof(dhd->fw_capabilities);
10371 memset(dhd->fw_capabilities, 0, cap_buf_size);
10372 ret = dhd_iovar(dhd, 0, "cap", NULL, 0, dhd->fw_capabilities, (cap_buf_size - 1),
10373 FALSE);
10374 if (ret < 0) {
10375 DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
10376 __FUNCTION__, ret));
10377 return 0;
10378 }
10379
10380 memmove(&dhd->fw_capabilities[1], dhd->fw_capabilities, (cap_buf_size - 1));
10381 dhd->fw_capabilities[0] = ' ';
10382 dhd->fw_capabilities[cap_buf_size - 2] = ' ';
10383 dhd->fw_capabilities[cap_buf_size - 1] = '\0';
10384 }
10385
10386 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_HOSTAP_MODE) ||
10387 (op_mode == DHD_FLAG_HOSTAP_MODE)) {
10388 #ifdef SET_RANDOM_MAC_SOFTAP
10389 uint rand_mac;
10390 #endif /* SET_RANDOM_MAC_SOFTAP */
10391 dhd->op_mode = DHD_FLAG_HOSTAP_MODE;
10392 #if defined(ARP_OFFLOAD_SUPPORT)
10393 arpoe = 0;
10394 #endif // endif
10395 #ifdef PKT_FILTER_SUPPORT
10396 dhd_pkt_filter_enable = FALSE;
10397 #endif // endif
10398 #ifdef SET_RANDOM_MAC_SOFTAP
10399 SRANDOM32((uint)jiffies);
10400 rand_mac = RANDOM32();
10401 iovbuf[0] = (unsigned char)(vendor_oui >> 16) | 0x02; /* local admin bit */
10402 iovbuf[1] = (unsigned char)(vendor_oui >> 8);
10403 iovbuf[2] = (unsigned char)vendor_oui;
10404 iovbuf[3] = (unsigned char)(rand_mac & 0x0F) | 0xF0;
10405 iovbuf[4] = (unsigned char)(rand_mac >> 8);
10406 iovbuf[5] = (unsigned char)(rand_mac >> 16);
10407
10408 ret = dhd_iovar(dhd, 0, "cur_etheraddr", (char *)&iovbuf, ETHER_ADDR_LEN, NULL, 0,
10409 TRUE);
10410 if (ret < 0) {
10411 DHD_ERROR(("%s: can't set MAC address , error=%d\n", __FUNCTION__, ret));
10412 } else
10413 memcpy(dhd->mac.octet, iovbuf, ETHER_ADDR_LEN);
10414 #endif /* SET_RANDOM_MAC_SOFTAP */
10415 #ifdef USE_DYNAMIC_F2_BLKSIZE
10416 dhdsdio_func_blocksize(dhd, 2, sd_f2_blocksize);
10417 #endif /* USE_DYNAMIC_F2_BLKSIZE */
10418 #ifdef SOFTAP_UAPSD_OFF
10419 ret = dhd_iovar(dhd, 0, "wme_apsd", (char *)&wme_apsd, sizeof(wme_apsd), NULL, 0,
10420 TRUE);
10421 if (ret < 0) {
10422 DHD_ERROR(("%s: set wme_apsd 0 fail (error=%d)\n",
10423 __FUNCTION__, ret));
10424 }
10425 #endif /* SOFTAP_UAPSD_OFF */
10426 #if defined(CUSTOM_COUNTRY_CODE) && (defined(CUSTOMER_HW2) || defined(BOARD_HIKEY))
10427 /* set AP flag for specific country code of SOFTAP */
10428 dhd->dhd_cflags |= WLAN_PLAT_AP_FLAG | WLAN_PLAT_NODFS_FLAG;
10429 #endif /* CUSTOM_COUNTRY_CODE && (CUSTOMER_HW2 || BOARD_HIKEY) */
10430 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_MFG_MODE) ||
10431 (op_mode == DHD_FLAG_MFG_MODE)) {
10432 #if defined(ARP_OFFLOAD_SUPPORT)
10433 arpoe = 0;
10434 #endif /* ARP_OFFLOAD_SUPPORT */
10435 #ifdef PKT_FILTER_SUPPORT
10436 dhd_pkt_filter_enable = FALSE;
10437 #endif /* PKT_FILTER_SUPPORT */
10438 dhd->op_mode = DHD_FLAG_MFG_MODE;
10439 #ifdef USE_DYNAMIC_F2_BLKSIZE
10440 dhdsdio_func_blocksize(dhd, 2, DYNAMIC_F2_BLKSIZE_FOR_NONLEGACY);
10441 #endif /* USE_DYNAMIC_F2_BLKSIZE */
10442 #ifndef CUSTOM_SET_ANTNPM
10443 #ifndef IGUANA_LEGACY_CHIPS
10444 if (FW_SUPPORTED(dhd, rsdb)) {
10445 wl_config_t rsdb_mode;
10446 memset(&rsdb_mode, 0, sizeof(rsdb_mode));
10447 ret = dhd_iovar(dhd, 0, "rsdb_mode", (char *)&rsdb_mode, sizeof(rsdb_mode),
10448 NULL, 0, TRUE);
10449 if (ret < 0) {
10450 DHD_ERROR(("%s Disable rsdb_mode is failed ret= %d\n",
10451 __FUNCTION__, ret));
10452 }
10453 }
10454 #endif /* IGUANA_LEGACY_CHIPS */
10455 #endif /* !CUSTOM_SET_ANTNPM */
10456 } else {
10457 uint32 concurrent_mode = 0;
10458 if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_P2P_MODE) ||
10459 (op_mode == DHD_FLAG_P2P_MODE)) {
10460 #if defined(ARP_OFFLOAD_SUPPORT)
10461 arpoe = 0;
10462 #endif // endif
10463 #ifdef PKT_FILTER_SUPPORT
10464 dhd_pkt_filter_enable = FALSE;
10465 #endif // endif
10466 dhd->op_mode = DHD_FLAG_P2P_MODE;
10467 } else if ((!op_mode && dhd_get_fw_mode(dhd->info) == DHD_FLAG_IBSS_MODE) ||
10468 (op_mode == DHD_FLAG_IBSS_MODE)) {
10469 dhd->op_mode = DHD_FLAG_IBSS_MODE;
10470 } else
10471 dhd->op_mode = DHD_FLAG_STA_MODE;
10472 #if defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P)
10473 if (dhd->op_mode != DHD_FLAG_IBSS_MODE &&
10474 (concurrent_mode = dhd_get_concurrent_capabilites(dhd))) {
10475 #if defined(ARP_OFFLOAD_SUPPORT)
10476 arpoe = 1;
10477 #endif // endif
10478 dhd->op_mode |= concurrent_mode;
10479 }
10480
10481 /* Check if we are enabling p2p */
10482 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
10483 ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0,
10484 TRUE);
10485 if (ret < 0)
10486 DHD_ERROR(("%s APSTA for P2P failed ret= %d\n", __FUNCTION__, ret));
10487
10488 #if defined(SOFTAP_AND_GC)
10489 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_AP,
10490 (char *)&ap_mode, sizeof(ap_mode), TRUE, 0)) < 0) {
10491 DHD_ERROR(("%s WLC_SET_AP failed %d\n", __FUNCTION__, ret));
10492 }
10493 #endif // endif
10494 memcpy(&p2p_ea, &dhd->mac, ETHER_ADDR_LEN);
10495 ETHER_SET_LOCALADDR(&p2p_ea);
10496 ret = dhd_iovar(dhd, 0, "p2p_da_override", (char *)&p2p_ea, sizeof(p2p_ea),
10497 NULL, 0, TRUE);
10498 if (ret < 0)
10499 DHD_ERROR(("%s p2p_da_override ret= %d\n", __FUNCTION__, ret));
10500 else
10501 DHD_INFO(("dhd_preinit_ioctls: p2p_da_override succeeded\n"));
10502 }
10503 #else
10504 (void)concurrent_mode;
10505 #endif /* defined(OEM_ANDROID) && !defined(AP) && defined(WLP2P) */
10506 }
10507
10508 #ifdef DISABLE_PRUNED_SCAN
10509 if (FW_SUPPORTED(dhd, rsdb)) {
10510 ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features,
10511 sizeof(scan_features), iovbuf, sizeof(iovbuf), FALSE);
10512 if (ret < 0) {
10513 DHD_ERROR(("%s get scan_features is failed ret=%d\n",
10514 __FUNCTION__, ret));
10515 } else {
10516 memcpy(&scan_features, iovbuf, 4);
10517 scan_features &= ~RSDB_SCAN_DOWNGRADED_CH_PRUNE_ROAM;
10518 ret = dhd_iovar(dhd, 0, "scan_features", (char *)&scan_features,
10519 sizeof(scan_features), NULL, 0, TRUE);
10520 if (ret < 0) {
10521 DHD_ERROR(("%s set scan_features is failed ret=%d\n",
10522 __FUNCTION__, ret));
10523 }
10524 }
10525 }
10526 #endif /* DISABLE_PRUNED_SCAN */
10527
10528 DHD_ERROR(("Firmware up: op_mode=0x%04x, MAC="MACDBG"\n",
10529 dhd->op_mode, MAC2STRDBG(dhd->mac.octet)));
10530 #if defined(CUSTOMER_HW2) || defined(BOARD_HIKEY)
10531 #if defined(DHD_BLOB_EXISTENCE_CHECK)
10532 if (!dhd->is_blob)
10533 #endif /* DHD_BLOB_EXISTENCE_CHECK */
10534 {
10535 /* get a ccode and revision for the country code */
10536 #if defined(CUSTOM_COUNTRY_CODE)
10537 get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
10538 &dhd->dhd_cspec, dhd->dhd_cflags);
10539 #else
10540 get_customized_country_code(dhd->info->adapter, dhd->dhd_cspec.country_abbrev,
10541 &dhd->dhd_cspec);
10542 #endif /* CUSTOM_COUNTRY_CODE */
10543 }
10544 #endif /* CUSTOMER_HW2 || BOARD_HIKEY */
10545
10546 #if defined(RXFRAME_THREAD) && defined(RXTHREAD_ONLYSTA)
10547 if (dhd->op_mode == DHD_FLAG_HOSTAP_MODE)
10548 dhd->info->rxthread_enabled = FALSE;
10549 else
10550 dhd->info->rxthread_enabled = TRUE;
10551 #endif // endif
10552 /* Set Country code */
10553 if (dhd->dhd_cspec.ccode[0] != 0) {
10554 ret = dhd_iovar(dhd, 0, "country", (char *)&dhd->dhd_cspec, sizeof(wl_country_t),
10555 NULL, 0, TRUE);
10556 if (ret < 0)
10557 DHD_ERROR(("%s: country code setting failed\n", __FUNCTION__));
10558 }
10559
10560 /* Set Listen Interval */
10561 ret = dhd_iovar(dhd, 0, "assoc_listen", (char *)&listen_interval, sizeof(listen_interval),
10562 NULL, 0, TRUE);
10563 if (ret < 0)
10564 DHD_ERROR(("%s assoc_listen failed %d\n", __FUNCTION__, ret));
10565
10566 #if defined(ROAM_ENABLE) || defined(DISABLE_BUILTIN_ROAM)
10567 #ifdef USE_WFA_CERT_CONF
10568 if (sec_get_param_wfa_cert(dhd, SET_PARAM_ROAMOFF, &roamvar) == BCME_OK) {
10569 DHD_ERROR(("%s: read roam_off param =%d\n", __FUNCTION__, roamvar));
10570 }
10571 #endif /* USE_WFA_CERT_CONF */
10572 /* Disable built-in roaming to allowed ext supplicant to take care of roaming */
10573 ret = dhd_iovar(dhd, 0, "roam_off", (char *)&roamvar, sizeof(roamvar), NULL, 0, TRUE);
10574 #endif /* ROAM_ENABLE || DISABLE_BUILTIN_ROAM */
10575 #if defined(ROAM_ENABLE)
10576 #ifdef DISABLE_BCNLOSS_ROAM
10577 ret = dhd_iovar(dhd, 0, "roam_bcnloss_off", (char *)&roam_bcnloss_off,
10578 sizeof(roam_bcnloss_off), NULL, 0, TRUE);
10579 #endif /* DISABLE_BCNLOSS_ROAM */
10580 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_TRIGGER, roam_trigger,
10581 sizeof(roam_trigger), TRUE, 0)) < 0)
10582 DHD_ERROR(("%s: roam trigger set failed %d\n", __FUNCTION__, ret));
10583 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_SCAN_PERIOD, roam_scan_period,
10584 sizeof(roam_scan_period), TRUE, 0)) < 0)
10585 DHD_ERROR(("%s: roam scan period set failed %d\n", __FUNCTION__, ret));
10586 if ((dhd_wl_ioctl_cmd(dhd, WLC_SET_ROAM_DELTA, roam_delta,
10587 sizeof(roam_delta), TRUE, 0)) < 0)
10588 DHD_ERROR(("%s: roam delta set failed %d\n", __FUNCTION__, ret));
10589 ret = dhd_iovar(dhd, 0, "fullroamperiod", (char *)&roam_fullscan_period,
10590 sizeof(roam_fullscan_period), NULL, 0, TRUE);
10591 if (ret < 0)
10592 DHD_ERROR(("%s: roam fullscan period set failed %d\n", __FUNCTION__, ret));
10593 #ifdef ROAM_AP_ENV_DETECTION
10594 if (roam_trigger[0] == WL_AUTO_ROAM_TRIGGER) {
10595 if (dhd_iovar(dhd, 0, "roam_env_detection", (char *)&roam_env_mode,
10596 sizeof(roam_env_mode), NULL, 0, TRUE) == BCME_OK)
10597 dhd->roam_env_detection = TRUE;
10598 else
10599 dhd->roam_env_detection = FALSE;
10600 }
10601 #endif /* ROAM_AP_ENV_DETECTION */
10602 #endif /* ROAM_ENABLE */
10603
10604 #ifdef CUSTOM_EVENT_PM_WAKE
10605 ret = dhd_iovar(dhd, 0, "const_awake_thresh", (char *)&pm_awake_thresh,
10606 sizeof(pm_awake_thresh), NULL, 0, TRUE);
10607 if (ret < 0) {
10608 DHD_ERROR(("%s set const_awake_thresh failed %d\n", __FUNCTION__, ret));
10609 }
10610 #endif /* CUSTOM_EVENT_PM_WAKE */
10611 #ifdef OKC_SUPPORT
10612 ret = dhd_iovar(dhd, 0, "okc_enable", (char *)&okc, sizeof(okc), NULL, 0, TRUE);
10613 #endif // endif
10614 #ifdef BCMCCX
10615 ret = dhd_iovar(dhd, 0, "ccx_enable", (char *)&ccx, sizeof(ccx), NULL, 0, TRUE);
10616 #endif /* BCMCCX */
10617
10618 #ifdef WLTDLS
10619 dhd->tdls_enable = FALSE;
10620 dhd_tdls_set_mode(dhd, false);
10621 #endif /* WLTDLS */
10622
10623 #ifdef DHD_ENABLE_LPC
10624 /* Set lpc 1 */
10625 ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE);
10626 if (ret < 0) {
10627 DHD_ERROR(("%s Set lpc failed %d\n", __FUNCTION__, ret));
10628
10629 if (ret == BCME_NOTDOWN) {
10630 uint wl_down = 1;
10631 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
10632 (char *)&wl_down, sizeof(wl_down), TRUE, 0);
10633 DHD_ERROR(("%s lpc fail WL_DOWN : %d, lpc = %d\n", __FUNCTION__, ret, lpc));
10634
10635 ret = dhd_iovar(dhd, 0, "lpc", (char *)&lpc, sizeof(lpc), NULL, 0, TRUE);
10636 DHD_ERROR(("%s Set lpc ret --> %d\n", __FUNCTION__, ret));
10637 }
10638 }
10639 #endif /* DHD_ENABLE_LPC */
10640
10641 #ifdef WLADPS
10642 if (dhd->op_mode & DHD_FLAG_STA_MODE) {
10643 if ((ret = dhd_enable_adps(dhd, ADPS_ENABLE)) != BCME_OK) {
10644 DHD_ERROR(("%s dhd_enable_adps failed %d\n",
10645 __FUNCTION__, ret));
10646 }
10647 }
10648 #endif /* WLADPS */
10649
10650 #ifdef DHD_PM_CONTROL_FROM_FILE
10651 sec_control_pm(dhd, &power_mode);
10652 #else
10653 #ifndef H2_BRING_UP
10654 /* Set PowerSave mode */
10655 (void) dhd_wl_ioctl_cmd(dhd, WLC_SET_PM, (char *)&power_mode, sizeof(power_mode), TRUE, 0);
10656 #endif // endif
10657 #endif /* DHD_PM_CONTROL_FROM_FILE */
10658
10659 #if defined(BCMSDIO)
10660 /* Match Host and Dongle rx alignment */
10661 ret = dhd_iovar(dhd, 0, "bus:txglomalign", (char *)&dongle_align, sizeof(dongle_align),
10662 NULL, 0, TRUE);
10663
10664 #if (defined(CUSTOMER_HW2) || defined(BOARD_HIKEY))&& defined(USE_WL_CREDALL)
10665 /* enable credall to reduce the chance of no bus credit happened. */
10666 ret = dhd_iovar(dhd, 0, "bus:credall", (char *)&credall, sizeof(credall), NULL, 0, TRUE);
10667 #endif // endif
10668
10669 #ifdef USE_WFA_CERT_CONF
10670 if (sec_get_param_wfa_cert(dhd, SET_PARAM_BUS_TXGLOM_MODE, &glom) == BCME_OK) {
10671 DHD_ERROR(("%s, read txglom param =%d\n", __FUNCTION__, glom));
10672 }
10673 #endif /* USE_WFA_CERT_CONF */
10674 if (glom != DEFAULT_GLOM_VALUE) {
10675 DHD_INFO(("%s set glom=0x%X\n", __FUNCTION__, glom));
10676 ret = dhd_iovar(dhd, 0, "bus:txglom", (char *)&glom, sizeof(glom), NULL, 0, TRUE);
10677 }
10678 #endif /* defined(BCMSDIO) */
10679
10680 /* Setup timeout if Beacons are lost and roam is off to report link down */
10681 ret = dhd_iovar(dhd, 0, "bcn_timeout", (char *)&bcn_timeout, sizeof(bcn_timeout), NULL, 0,
10682 TRUE);
10683
10684 /* Setup assoc_retry_max count to reconnect target AP in dongle */
10685 ret = dhd_iovar(dhd, 0, "assoc_retry_max", (char *)&retry_max, sizeof(retry_max), NULL, 0,
10686 TRUE);
10687
10688 #if defined(AP) && !defined(WLP2P)
10689 ret = dhd_iovar(dhd, 0, "apsta", (char *)&apsta, sizeof(apsta), NULL, 0, TRUE);
10690
10691 #endif /* defined(AP) && !defined(WLP2P) */
10692
10693 #ifdef MIMO_ANT_SETTING
10694 dhd_sel_ant_from_file(dhd);
10695 #endif /* MIMO_ANT_SETTING */
10696
10697 #if defined(OEM_ANDROID) && defined(SOFTAP)
10698 if (ap_fw_loaded == TRUE) {
10699 dhd_wl_ioctl_cmd(dhd, WLC_SET_DTIMPRD, (char *)&dtim, sizeof(dtim), TRUE, 0);
10700 }
10701 #endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
10702
10703 #if defined(KEEP_ALIVE)
10704 {
10705 /* Set Keep Alive : be sure to use FW with -keepalive */
10706 int res;
10707
10708 #if defined(OEM_ANDROID) && defined(SOFTAP)
10709 if (ap_fw_loaded == FALSE)
10710 #endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
10711 if (!(dhd->op_mode &
10712 (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
10713 if ((res = dhd_keep_alive_onoff(dhd)) < 0)
10714 DHD_ERROR(("%s set keeplive failed %d\n",
10715 __FUNCTION__, res));
10716 }
10717 }
10718 #endif /* defined(KEEP_ALIVE) */
10719
10720 #ifdef USE_WL_TXBF
10721 ret = dhd_iovar(dhd, 0, "txbf", (char *)&txbf, sizeof(txbf), NULL, 0, TRUE);
10722 if (ret < 0)
10723 DHD_ERROR(("%s Set txbf failed %d\n", __FUNCTION__, ret));
10724
10725 #endif /* USE_WL_TXBF */
10726
10727 ret = dhd_iovar(dhd, 0, "scancache", (char *)&scancache_enab, sizeof(scancache_enab), NULL,
10728 0, TRUE);
10729 if (ret < 0) {
10730 DHD_ERROR(("%s Set scancache failed %d\n", __FUNCTION__, ret));
10731 }
10732
10733 #else /* OEM_ANDROID */
10734
10735 if ((ret = dhd_apply_default_clm(dhd, clm_path)) < 0) {
10736 DHD_ERROR(("%s: CLM set failed. Abort initialization.\n", __FUNCTION__));
10737 goto done;
10738 }
10739
10740 #if defined(KEEP_ALIVE)
10741 if (!(dhd->op_mode &
10742 (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE))) {
10743 if ((ret = dhd_keep_alive_onoff(dhd)) < 0)
10744 DHD_ERROR(("%s set keeplive failed %d\n",
10745 __FUNCTION__, ret));
10746 }
10747 #endif // endif
10748
10749 /* get a capabilities from firmware */
10750 memset(dhd->fw_capabilities, 0, sizeof(dhd->fw_capabilities));
10751 ret = dhd_iovar(dhd, 0, "cap", NULL, 0, dhd->fw_capabilities, sizeof(dhd->fw_capabilities),
10752 FALSE);
10753 if (ret < 0) {
10754 DHD_ERROR(("%s: Get Capability failed (error=%d)\n",
10755 __FUNCTION__, ret));
10756 goto done;
10757 }
10758 #endif /* OEM_ANDROID */
10759
10760 ret = dhd_iovar(dhd, 0, "event_log_max_sets", NULL, 0, (char *)&event_log_max_sets,
10761 sizeof(event_log_max_sets), FALSE);
10762 if (ret == BCME_OK) {
10763 dhd->event_log_max_sets = event_log_max_sets;
10764 } else {
10765 dhd->event_log_max_sets = NUM_EVENT_LOG_SETS;
10766 }
10767 /* Make sure max_sets is set first with wmb and then sets_queried,
10768 * this will be used during parsing the logsets in the reverse order.
10769 */
10770 OSL_SMP_WMB();
10771 dhd->event_log_max_sets_queried = TRUE;
10772 DHD_ERROR(("%s: event_log_max_sets: %d ret: %d\n",
10773 __FUNCTION__, dhd->event_log_max_sets, ret));
10774 #ifdef DHD_BUS_MEM_ACCESS
10775 ret = dhd_iovar(dhd, 0, "enable_memuse", (char *)&enable_memuse,
10776 sizeof(enable_memuse), iovbuf, sizeof(iovbuf), FALSE);
10777 if (ret < 0) {
10778 DHD_ERROR(("%s: enable_memuse is failed ret=%d\n",
10779 __FUNCTION__, ret));
10780 } else {
10781 DHD_ERROR(("%s: enable_memuse = %d\n",
10782 __FUNCTION__, enable_memuse));
10783 }
10784 #endif /* DHD_BUS_MEM_ACCESS */
10785
10786 #ifdef DISABLE_TXBFR
10787 ret = dhd_iovar(dhd, 0, "txbf_bfr_cap", (char *)&txbf_bfr_cap, sizeof(txbf_bfr_cap), NULL,
10788 0, TRUE);
10789 if (ret < 0) {
10790 DHD_ERROR(("%s Clear txbf_bfr_cap failed %d\n", __FUNCTION__, ret));
10791 }
10792 #endif /* DISABLE_TXBFR */
10793
10794 #ifdef USE_WFA_CERT_CONF
10795 #ifdef USE_WL_FRAMEBURST
10796 if (sec_get_param_wfa_cert(dhd, SET_PARAM_FRAMEBURST, &frameburst) == BCME_OK) {
10797 DHD_ERROR(("%s, read frameburst param=%d\n", __FUNCTION__, frameburst));
10798 }
10799 #endif /* USE_WL_FRAMEBURST */
10800 g_frameburst = frameburst;
10801 #endif /* USE_WFA_CERT_CONF */
10802 #ifdef DISABLE_WL_FRAMEBURST_SOFTAP
10803 /* Disable Framebursting for SofAP */
10804 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE) {
10805 frameburst = 0;
10806 }
10807 #endif /* DISABLE_WL_FRAMEBURST_SOFTAP */
10808 /* Set frameburst to value */
10809 if ((ret = dhd_wl_ioctl_cmd(dhd, WLC_SET_FAKEFRAG, (char *)&frameburst,
10810 sizeof(frameburst), TRUE, 0)) < 0) {
10811 DHD_INFO(("%s frameburst not supported %d\n", __FUNCTION__, ret));
10812 }
10813 #ifdef DHD_SET_FW_HIGHSPEED
10814 /* Set ack_ratio */
10815 ret = dhd_iovar(dhd, 0, "ack_ratio", (char *)&ack_ratio, sizeof(ack_ratio), NULL, 0, TRUE);
10816 if (ret < 0) {
10817 DHD_ERROR(("%s Set ack_ratio failed %d\n", __FUNCTION__, ret));
10818 }
10819
10820 /* Set ack_ratio_depth */
10821 ret = dhd_iovar(dhd, 0, "ack_ratio_depth", (char *)&ack_ratio_depth,
10822 sizeof(ack_ratio_depth), NULL, 0, TRUE);
10823 if (ret < 0) {
10824 DHD_ERROR(("%s Set ack_ratio_depth failed %d\n", __FUNCTION__, ret));
10825 }
10826 #endif /* DHD_SET_FW_HIGHSPEED */
10827
10828 iov_buf = (char*)MALLOC(dhd->osh, WLC_IOCTL_SMLEN);
10829 if (iov_buf == NULL) {
10830 DHD_ERROR(("failed to allocate %d bytes for iov_buf\n", WLC_IOCTL_SMLEN));
10831 ret = BCME_NOMEM;
10832 goto done;
10833 }
10834
10835 #ifdef WLAIBSS
10836 /* Apply AIBSS configurations */
10837 if ((ret = dhd_preinit_aibss_ioctls(dhd, iov_buf)) != BCME_OK) {
10838 DHD_ERROR(("%s dhd_preinit_aibss_ioctls failed %d\n",
10839 __FUNCTION__, ret));
10840 goto done;
10841 }
10842 #endif /* WLAIBSS */
10843
10844 #if defined(CUSTOM_AMPDU_BA_WSIZE) || (defined(WLAIBSS) && \
10845 defined(CUSTOM_IBSS_AMPDU_BA_WSIZE))
10846 /* Set ampdu ba wsize to 64 or 16 */
10847 #ifdef CUSTOM_AMPDU_BA_WSIZE
10848 ampdu_ba_wsize = CUSTOM_AMPDU_BA_WSIZE;
10849 #endif // endif
10850 #if defined(WLAIBSS) && defined(CUSTOM_IBSS_AMPDU_BA_WSIZE)
10851 if (dhd->op_mode == DHD_FLAG_IBSS_MODE)
10852 ampdu_ba_wsize = CUSTOM_IBSS_AMPDU_BA_WSIZE;
10853 #endif /* WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE */
10854 if (ampdu_ba_wsize != 0) {
10855 ret = dhd_iovar(dhd, 0, "ampdu_ba_wsize", (char *)&du_ba_wsize,
10856 sizeof(ampdu_ba_wsize), NULL, 0, TRUE);
10857 if (ret < 0) {
10858 DHD_ERROR(("%s Set ampdu_ba_wsize to %d failed %d\n",
10859 __FUNCTION__, ampdu_ba_wsize, ret));
10860 }
10861 }
10862 #endif /* CUSTOM_AMPDU_BA_WSIZE || (WLAIBSS && CUSTOM_IBSS_AMPDU_BA_WSIZE) */
10863
10864 #if defined(CUSTOM_AMPDU_MPDU)
10865 ampdu_mpdu = CUSTOM_AMPDU_MPDU;
10866 if (ampdu_mpdu != 0 && (ampdu_mpdu <= ampdu_ba_wsize)) {
10867 ret = dhd_iovar(dhd, 0, "ampdu_mpdu", (char *)&du_mpdu, sizeof(ampdu_mpdu),
10868 NULL, 0, TRUE);
10869 if (ret < 0) {
10870 DHD_ERROR(("%s Set ampdu_mpdu to %d failed %d\n",
10871 __FUNCTION__, CUSTOM_AMPDU_MPDU, ret));
10872 }
10873 }
10874 #endif /* CUSTOM_AMPDU_MPDU */
10875
10876 #if defined(CUSTOM_AMPDU_RELEASE)
10877 ampdu_release = CUSTOM_AMPDU_RELEASE;
10878 if (ampdu_release != 0 && (ampdu_release <= ampdu_ba_wsize)) {
10879 ret = dhd_iovar(dhd, 0, "ampdu_release", (char *)&du_release,
10880 sizeof(ampdu_release), NULL, 0, TRUE);
10881 if (ret < 0) {
10882 DHD_ERROR(("%s Set ampdu_release to %d failed %d\n",
10883 __FUNCTION__, CUSTOM_AMPDU_RELEASE, ret));
10884 }
10885 }
10886 #endif /* CUSTOM_AMPDU_RELEASE */
10887
10888 #if defined(CUSTOM_AMSDU_AGGSF)
10889 amsdu_aggsf = CUSTOM_AMSDU_AGGSF;
10890 if (amsdu_aggsf != 0) {
10891 ret = dhd_iovar(dhd, 0, "amsdu_aggsf", (char *)&amsdu_aggsf, sizeof(amsdu_aggsf),
10892 NULL, 0, TRUE);
10893 if (ret < 0) {
10894 DHD_ERROR(("%s Set amsdu_aggsf to %d failed %d\n",
10895 __FUNCTION__, CUSTOM_AMSDU_AGGSF, ret));
10896 }
10897 }
10898 #endif /* CUSTOM_AMSDU_AGGSF */
10899
10900 #if defined(BCMSUP_4WAY_HANDSHAKE)
10901 /* Read 4-way handshake requirements */
10902 if (dhd_use_idsup == 1) {
10903 ret = dhd_iovar(dhd, 0, "sup_wpa", (char *)&sup_wpa, sizeof(sup_wpa),
10904 (char *)&iovbuf, sizeof(iovbuf), FALSE);
10905 /* sup_wpa iovar returns NOTREADY status on some platforms using modularized
10906 * in-dongle supplicant.
10907 */
10908 if (ret >= 0 || ret == BCME_NOTREADY)
10909 dhd->fw_4way_handshake = TRUE;
10910 DHD_TRACE(("4-way handshake mode is: %d\n", dhd->fw_4way_handshake));
10911 }
10912 #endif /* BCMSUP_4WAY_HANDSHAKE */
10913 #if defined(SUPPORT_2G_VHT) || defined(SUPPORT_5G_1024QAM_VHT)
10914 ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, sizeof(vht_features),
10915 NULL, 0, FALSE);
10916 if (ret < 0) {
10917 DHD_ERROR(("%s vht_features get failed %d\n", __FUNCTION__, ret));
10918 vht_features = 0;
10919 } else {
10920 #ifdef SUPPORT_2G_VHT
10921 vht_features |= 0x3; /* 2G support */
10922 #endif /* SUPPORT_2G_VHT */
10923 #ifdef SUPPORT_5G_1024QAM_VHT
10924 vht_features |= 0x6; /* 5G 1024 QAM support */
10925 #endif /* SUPPORT_5G_1024QAM_VHT */
10926 }
10927 if (vht_features) {
10928 ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features, sizeof(vht_features),
10929 NULL, 0, TRUE);
10930 if (ret < 0) {
10931 DHD_ERROR(("%s vht_features set failed %d\n", __FUNCTION__, ret));
10932
10933 if (ret == BCME_NOTDOWN) {
10934 uint wl_down = 1;
10935 ret = dhd_wl_ioctl_cmd(dhd, WLC_DOWN,
10936 (char *)&wl_down, sizeof(wl_down), TRUE, 0);
10937 DHD_ERROR(("%s vht_features fail WL_DOWN : %d,"
10938 " vht_features = 0x%x\n",
10939 __FUNCTION__, ret, vht_features));
10940
10941 ret = dhd_iovar(dhd, 0, "vht_features", (char *)&vht_features,
10942 sizeof(vht_features), NULL, 0, TRUE);
10943
10944 DHD_ERROR(("%s vht_features set. ret --> %d\n", __FUNCTION__, ret));
10945 }
10946 }
10947 }
10948 #endif /* SUPPORT_2G_VHT || SUPPORT_5G_1024QAM_VHT */
10949 #ifdef DISABLE_11N_PROPRIETARY_RATES
10950 ret = dhd_iovar(dhd, 0, "ht_features", (char *)&ht_features, sizeof(ht_features), NULL, 0,
10951 TRUE);
10952 if (ret < 0) {
10953 DHD_ERROR(("%s ht_features set failed %d\n", __FUNCTION__, ret));
10954 }
10955 #endif /* DISABLE_11N_PROPRIETARY_RATES */
10956 #if defined(DISABLE_HE_ENAB) || defined(CUSTOM_CONTROL_HE_ENAB)
10957 #if defined(DISABLE_HE_ENAB)
10958 control_he_enab = 0;
10959 #endif /* DISABLE_HE_ENAB */
10960 dhd_control_he_enab(dhd, control_he_enab);
10961 #endif /* DISABLE_HE_ENAB || CUSTOM_CONTROL_HE_ENAB */
10962
10963 #ifdef CUSTOM_PSPRETEND_THR
10964 /* Turn off MPC in AP mode */
10965 ret = dhd_iovar(dhd, 0, "pspretend_threshold", (char *)&pspretend_thr,
10966 sizeof(pspretend_thr), NULL, 0, TRUE);
10967 if (ret < 0) {
10968 DHD_ERROR(("%s pspretend_threshold for HostAPD failed %d\n",
10969 __FUNCTION__, ret));
10970 }
10971 #endif // endif
10972
10973 ret = dhd_iovar(dhd, 0, "buf_key_b4_m4", (char *)&buf_key_b4_m4, sizeof(buf_key_b4_m4),
10974 NULL, 0, TRUE);
10975 if (ret < 0) {
10976 DHD_ERROR(("%s buf_key_b4_m4 set failed %d\n", __FUNCTION__, ret));
10977 }
10978 #ifdef SUPPORT_SET_CAC
10979 ret = dhd_iovar(dhd, 0, "cac", (char *)&cac, sizeof(cac), NULL, 0, TRUE);
10980 if (ret < 0) {
10981 DHD_ERROR(("%s Failed to set cac to %d, %d\n", __FUNCTION__, cac, ret));
10982 }
10983 #endif /* SUPPORT_SET_CAC */
10984 #ifdef DHD_ULP
10985 /* Get the required details from dongle during preinit ioctl */
10986 dhd_ulp_preinit(dhd);
10987 #endif /* DHD_ULP */
10988
10989 /* Read event_msgs mask */
10990 ret = dhd_iovar(dhd, 0, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, iovbuf,
10991 sizeof(iovbuf), FALSE);
10992 if (ret < 0) {
10993 DHD_ERROR(("%s read Event mask failed %d\n", __FUNCTION__, ret));
10994 goto done;
10995 }
10996 bcopy(iovbuf, eventmask, WL_EVENTING_MASK_LEN);
10997
10998 /* Setup event_msgs */
10999 setbit(eventmask, WLC_E_SET_SSID);
11000 setbit(eventmask, WLC_E_PRUNE);
11001 setbit(eventmask, WLC_E_AUTH);
11002 setbit(eventmask, WLC_E_AUTH_IND);
11003 setbit(eventmask, WLC_E_ASSOC);
11004 setbit(eventmask, WLC_E_REASSOC);
11005 setbit(eventmask, WLC_E_REASSOC_IND);
11006 if (!(dhd->op_mode & DHD_FLAG_IBSS_MODE))
11007 setbit(eventmask, WLC_E_DEAUTH);
11008 setbit(eventmask, WLC_E_DEAUTH_IND);
11009 setbit(eventmask, WLC_E_DISASSOC_IND);
11010 setbit(eventmask, WLC_E_DISASSOC);
11011 setbit(eventmask, WLC_E_JOIN);
11012 setbit(eventmask, WLC_E_START);
11013 setbit(eventmask, WLC_E_ASSOC_IND);
11014 setbit(eventmask, WLC_E_PSK_SUP);
11015 setbit(eventmask, WLC_E_LINK);
11016 setbit(eventmask, WLC_E_MIC_ERROR);
11017 setbit(eventmask, WLC_E_ASSOC_REQ_IE);
11018 setbit(eventmask, WLC_E_ASSOC_RESP_IE);
11019 #ifdef LIMIT_BORROW
11020 setbit(eventmask, WLC_E_ALLOW_CREDIT_BORROW);
11021 #endif // endif
11022 #ifndef WL_CFG80211
11023 setbit(eventmask, WLC_E_PMKID_CACHE);
11024 setbit(eventmask, WLC_E_TXFAIL);
11025 #endif // endif
11026 setbit(eventmask, WLC_E_JOIN_START);
11027 setbit(eventmask, WLC_E_SCAN_COMPLETE);
11028 #ifdef DHD_DEBUG
11029 setbit(eventmask, WLC_E_SCAN_CONFIRM_IND);
11030 #endif // endif
11031 #ifdef PNO_SUPPORT
11032 setbit(eventmask, WLC_E_PFN_NET_FOUND);
11033 setbit(eventmask, WLC_E_PFN_BEST_BATCHING);
11034 setbit(eventmask, WLC_E_PFN_BSSID_NET_FOUND);
11035 setbit(eventmask, WLC_E_PFN_BSSID_NET_LOST);
11036 #endif /* PNO_SUPPORT */
11037 /* enable dongle roaming event */
11038 #ifdef WL_CFG80211
11039 #if !defined(ROAM_EVT_DISABLE)
11040 setbit(eventmask, WLC_E_ROAM);
11041 #endif /* !ROAM_EVT_DISABLE */
11042 setbit(eventmask, WLC_E_BSSID);
11043 #endif /* WL_CFG80211 */
11044 #ifdef BCMCCX
11045 setbit(eventmask, WLC_E_ADDTS_IND);
11046 setbit(eventmask, WLC_E_DELTS_IND);
11047 #endif /* BCMCCX */
11048 #ifdef WLTDLS
11049 setbit(eventmask, WLC_E_TDLS_PEER_EVENT);
11050 #endif /* WLTDLS */
11051 #ifdef RTT_SUPPORT
11052 setbit(eventmask, WLC_E_PROXD);
11053 #endif /* RTT_SUPPORT */
11054 #if !defined(WL_CFG80211) && !defined(OEM_ANDROID)
11055 setbit(eventmask, WLC_E_ESCAN_RESULT);
11056 #endif // endif
11057 #ifdef WL_CFG80211
11058 setbit(eventmask, WLC_E_ESCAN_RESULT);
11059 setbit(eventmask, WLC_E_AP_STARTED);
11060 setbit(eventmask, WLC_E_ACTION_FRAME_RX);
11061 if (dhd->op_mode & DHD_FLAG_P2P_MODE) {
11062 setbit(eventmask, WLC_E_P2P_DISC_LISTEN_COMPLETE);
11063 }
11064 #endif /* WL_CFG80211 */
11065 #ifdef WLAIBSS
11066 setbit(eventmask, WLC_E_AIBSS_TXFAIL);
11067 #endif /* WLAIBSS */
11068
11069 #if defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE)
11070 if (dhd_logtrace_from_file(dhd)) {
11071 setbit(eventmask, WLC_E_TRACE);
11072 } else {
11073 clrbit(eventmask, WLC_E_TRACE);
11074 }
11075 #elif defined(SHOW_LOGTRACE)
11076 setbit(eventmask, WLC_E_TRACE);
11077 #else
11078 clrbit(eventmask, WLC_E_TRACE);
11079 #endif /* defined(SHOW_LOGTRACE) && defined(LOGTRACE_FROM_FILE) */
11080
11081 setbit(eventmask, WLC_E_CSA_COMPLETE_IND);
11082 #ifdef CUSTOM_EVENT_PM_WAKE
11083 setbit(eventmask, WLC_E_EXCESS_PM_WAKE_EVENT);
11084 #endif /* CUSTOM_EVENT_PM_WAKE */
11085 #ifdef DHD_LOSSLESS_ROAMING
11086 setbit(eventmask, WLC_E_ROAM_PREP);
11087 #endif // endif
11088 /* nan events */
11089 setbit(eventmask, WLC_E_NAN);
11090 #if defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING)
11091 dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
11092 #endif /* defined(PCIE_FULL_DONGLE) && defined(DHD_LOSSLESS_ROAMING) */
11093
11094 #if defined(BCMPCIE) && defined(EAPOL_PKT_PRIO)
11095 dhd_update_flow_prio_map(dhd, DHD_FLOW_PRIO_LLR_MAP);
11096 #endif /* defined(BCMPCIE) && defined(EAPOL_PKT_PRIO) */
11097
11098 /* Write updated Event mask */
11099 ret = dhd_iovar(dhd, 0, "event_msgs", eventmask, WL_EVENTING_MASK_LEN, NULL, 0, TRUE);
11100 if (ret < 0) {
11101 DHD_ERROR(("%s Set Event mask failed %d\n", __FUNCTION__, ret));
11102 goto done;
11103 }
11104
11105 /* make up event mask ext message iovar for event larger than 128 */
11106 msglen = ROUNDUP(WLC_E_LAST, NBBY)/NBBY + EVENTMSGS_EXT_STRUCT_SIZE;
11107 eventmask_msg = (eventmsgs_ext_t*)MALLOC(dhd->osh, msglen);
11108 if (eventmask_msg == NULL) {
11109 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n", msglen));
11110 ret = BCME_NOMEM;
11111 goto done;
11112 }
11113 bzero(eventmask_msg, msglen);
11114 eventmask_msg->ver = EVENTMSGS_VER;
11115 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
11116
11117 /* Read event_msgs_ext mask */
11118 ret2 = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, iov_buf,
11119 WLC_IOCTL_SMLEN, FALSE);
11120
11121 if (ret2 == 0) { /* event_msgs_ext must be supported */
11122 bcopy(iov_buf, eventmask_msg, msglen);
11123 #ifdef RSSI_MONITOR_SUPPORT
11124 setbit(eventmask_msg->mask, WLC_E_RSSI_LQM);
11125 #endif /* RSSI_MONITOR_SUPPORT */
11126 #ifdef GSCAN_SUPPORT
11127 setbit(eventmask_msg->mask, WLC_E_PFN_GSCAN_FULL_RESULT);
11128 setbit(eventmask_msg->mask, WLC_E_PFN_SCAN_COMPLETE);
11129 setbit(eventmask_msg->mask, WLC_E_PFN_SSID_EXT);
11130 setbit(eventmask_msg->mask, WLC_E_ROAM_EXP_EVENT);
11131 #endif /* GSCAN_SUPPORT */
11132 setbit(eventmask_msg->mask, WLC_E_RSSI_LQM);
11133 #ifdef BT_WIFI_HANDOVER
11134 setbit(eventmask_msg->mask, WLC_E_BT_WIFI_HANDOVER_REQ);
11135 #endif /* BT_WIFI_HANDOVER */
11136 #ifdef DBG_PKT_MON
11137 setbit(eventmask_msg->mask, WLC_E_ROAM_PREP);
11138 #endif /* DBG_PKT_MON */
11139 #ifdef DHD_ULP
11140 setbit(eventmask_msg->mask, WLC_E_ULP);
11141 #endif // endif
11142 #ifdef WL_NATOE
11143 setbit(eventmask_msg->mask, WLC_E_NATOE_NFCT);
11144 #endif /* WL_NATOE */
11145 #ifdef WL_NAN
11146 setbit(eventmask_msg->mask, WLC_E_SLOTTED_BSS_PEER_OP);
11147 #endif /* WL_NAN */
11148 #ifdef WL_MBO
11149 setbit(eventmask_msg->mask, WLC_E_MBO);
11150 #endif /* WL_MBO */
11151 #ifdef WL_BCNRECV
11152 setbit(eventmask_msg->mask, WLC_E_BCNRECV_ABORTED);
11153 #endif /* WL_BCNRECV */
11154 #ifdef WL_CAC_TS
11155 setbit(eventmask_msg->mask, WLC_E_ADDTS_IND);
11156 setbit(eventmask_msg->mask, WLC_E_DELTS_IND);
11157 #endif /* WL_CAC_TS */
11158 #ifdef WL_CHAN_UTIL
11159 setbit(eventmask_msg->mask, WLC_E_BSS_LOAD);
11160 #endif /* WL_CHAN_UTIL */
11161 #ifdef WL_SAE
11162 setbit(eventmask_msg->mask, WLC_E_EXT_AUTH_REQ);
11163 setbit(eventmask_msg->mask, WLC_E_EXT_AUTH_FRAME_RX);
11164 setbit(eventmask_msg->mask, WLC_E_MGMT_FRAME_TXSTATUS);
11165 setbit(eventmask_msg->mask, WLC_E_MGMT_FRAME_OFF_CHAN_COMPLETE);
11166 #endif /* WL_SAE */
11167 #ifndef CONFIG_SOC_S5E5515
11168 setbit(eventmask_msg->mask, WLC_E_IND_DOS_STATUS);
11169 #endif // endif
11170 #ifdef ENABLE_HOGSQS
11171 setbit(eventmask_msg->mask, WLC_E_LDF_HOGGER);
11172 #endif /* ENABLE_HOGSQS */
11173
11174 /* over temp event */
11175 setbit(eventmask_msg->mask, WLC_E_OVERTEMP);
11176
11177 /* Write updated Event mask */
11178 eventmask_msg->ver = EVENTMSGS_VER;
11179 eventmask_msg->command = EVENTMSGS_SET_MASK;
11180 eventmask_msg->len = ROUNDUP(WLC_E_LAST, NBBY)/NBBY;
11181 ret = dhd_iovar(dhd, 0, "event_msgs_ext", (char *)eventmask_msg, msglen, NULL, 0,
11182 TRUE);
11183 if (ret < 0) {
11184 DHD_ERROR(("%s write event mask ext failed %d\n", __FUNCTION__, ret));
11185 goto done;
11186 }
11187 } else if (ret2 == BCME_UNSUPPORTED || ret2 == BCME_VERSION) {
11188 /* Skip for BCME_UNSUPPORTED or BCME_VERSION */
11189 DHD_ERROR(("%s event_msgs_ext not support or version mismatch %d\n",
11190 __FUNCTION__, ret2));
11191 } else {
11192 DHD_ERROR(("%s read event mask ext failed %d\n", __FUNCTION__, ret2));
11193 ret = ret2;
11194 goto done;
11195 }
11196
11197 #if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
11198 /* Enabling event log trace for EAP events */
11199 el_tag = (wl_el_tag_params_t *)MALLOC(dhd->osh, sizeof(wl_el_tag_params_t));
11200 if (el_tag == NULL) {
11201 DHD_ERROR(("failed to allocate %d bytes for event_msg_ext\n",
11202 (int)sizeof(wl_el_tag_params_t)));
11203 ret = BCME_NOMEM;
11204 goto done;
11205 }
11206 el_tag->tag = EVENT_LOG_TAG_4WAYHANDSHAKE;
11207 el_tag->set = 1;
11208 el_tag->flags = EVENT_LOG_TAG_FLAG_LOG;
11209 ret = dhd_iovar(dhd, 0, "event_log_tag_control", (char *)el_tag, sizeof(*el_tag), NULL, 0,
11210 TRUE);
11211 #endif /* DHD_8021X_DUMP */
11212
11213 #ifdef OEM_ANDROID
11214 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_CHANNEL_TIME, (char *)&scan_assoc_time,
11215 sizeof(scan_assoc_time), TRUE, 0);
11216 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_UNASSOC_TIME, (char *)&scan_unassoc_time,
11217 sizeof(scan_unassoc_time), TRUE, 0);
11218 dhd_wl_ioctl_cmd(dhd, WLC_SET_SCAN_PASSIVE_TIME, (char *)&scan_passive_time,
11219 sizeof(scan_passive_time), TRUE, 0);
11220
11221 #ifdef ARP_OFFLOAD_SUPPORT
11222 /* Set and enable ARP offload feature for STA only */
11223 #if defined(OEM_ANDROID) && defined(SOFTAP)
11224 if (arpoe && !ap_fw_loaded) {
11225 #else
11226 if (arpoe) {
11227 #endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
11228 dhd_arp_offload_enable(dhd, TRUE);
11229 dhd_arp_offload_set(dhd, dhd_arp_mode);
11230 } else {
11231 dhd_arp_offload_enable(dhd, FALSE);
11232 dhd_arp_offload_set(dhd, 0);
11233 }
11234 dhd_arp_enable = arpoe;
11235 #endif /* ARP_OFFLOAD_SUPPORT */
11236
11237 #ifdef PKT_FILTER_SUPPORT
11238 /* Setup default defintions for pktfilter , enable in suspend */
11239 dhd->pktfilter_count = 6;
11240 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = NULL;
11241 if (!FW_SUPPORTED(dhd, pf6)) {
11242 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = NULL;
11243 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
11244 } else {
11245 /* Immediately pkt filter TYPE 6 Discard IPv4/IPv6 Multicast Packet */
11246 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = DISCARD_IPV4_MCAST;
11247 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = DISCARD_IPV6_MCAST;
11248 }
11249 /* apply APP pktfilter */
11250 dhd->pktfilter[DHD_ARP_FILTER_NUM] = "105 0 0 12 0xFFFF 0x0806";
11251
11252 #ifdef BLOCK_IPV6_PACKET
11253 /* Setup filter to allow only IPv4 unicast frames */
11254 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 "
11255 HEX_PREF_STR UNI_FILTER_STR ZERO_ADDR_STR ETHER_TYPE_STR IPV6_FILTER_STR
11256 " "
11257 HEX_PREF_STR ZERO_ADDR_STR ZERO_ADDR_STR ETHER_TYPE_STR ZERO_TYPE_STR;
11258 #else
11259 /* Setup filter to allow only unicast */
11260 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0x01 0x00";
11261 #endif /* BLOCK_IPV6_PACKET */
11262
11263 #ifdef PASS_IPV4_SUSPEND
11264 dhd->pktfilter[DHD_MDNS_FILTER_NUM] = "104 0 0 0 0xFFFFFF 0x01005E";
11265 #else
11266 /* Add filter to pass multicastDNS packet and NOT filter out as Broadcast */
11267 dhd->pktfilter[DHD_MDNS_FILTER_NUM] = NULL;
11268 #endif /* PASS_IPV4_SUSPEND */
11269 if (FW_SUPPORTED(dhd, pf6)) {
11270 /* Immediately pkt filter TYPE 6 Dicard Broadcast IP packet */
11271 dhd->pktfilter[DHD_IP4BCAST_DROP_FILTER_NUM] = DISCARD_IPV4_BCAST;
11272 /* Immediately pkt filter TYPE 6 Dicard Cisco STP packet */
11273 dhd->pktfilter[DHD_LLC_STP_DROP_FILTER_NUM] = DISCARD_LLC_STP;
11274 /* Immediately pkt filter TYPE 6 Dicard Cisco XID protocol */
11275 dhd->pktfilter[DHD_LLC_XID_DROP_FILTER_NUM] = DISCARD_LLC_XID;
11276 dhd->pktfilter_count = 10;
11277 }
11278
11279 #ifdef GAN_LITE_NAT_KEEPALIVE_FILTER
11280 dhd->pktfilter_count = 4;
11281 /* Setup filter to block broadcast and NAT Keepalive packets */
11282 /* discard all broadcast packets */
11283 dhd->pktfilter[DHD_UNICAST_FILTER_NUM] = "100 0 0 0 0xffffff 0xffffff";
11284 /* discard NAT Keepalive packets */
11285 dhd->pktfilter[DHD_BROADCAST_FILTER_NUM] = "102 0 0 36 0xffffffff 0x11940009";
11286 /* discard NAT Keepalive packets */
11287 dhd->pktfilter[DHD_MULTICAST4_FILTER_NUM] = "104 0 0 38 0xffffffff 0x11940009";
11288 dhd->pktfilter[DHD_MULTICAST6_FILTER_NUM] = NULL;
11289 #endif /* GAN_LITE_NAT_KEEPALIVE_FILTER */
11290
11291 #if defined(SOFTAP)
11292 if (ap_fw_loaded) {
11293 dhd_enable_packet_filter(0, dhd);
11294 }
11295 #endif /* defined(SOFTAP) */
11296 dhd_set_packet_filter(dhd);
11297 #endif /* PKT_FILTER_SUPPORT */
11298 #ifdef DISABLE_11N
11299 ret = dhd_iovar(dhd, 0, "nmode", (char *)&nmode, sizeof(nmode), NULL, 0, TRUE);
11300 if (ret < 0)
11301 DHD_ERROR(("%s wl nmode 0 failed %d\n", __FUNCTION__, ret));
11302 #endif /* DISABLE_11N */
11303
11304 #ifdef ENABLE_BCN_LI_BCN_WAKEUP
11305 ret = dhd_iovar(dhd, 0, "bcn_li_bcn", (char *)&bcn_li_bcn, sizeof(bcn_li_bcn), NULL, 0,
11306 TRUE);
11307 #endif /* ENABLE_BCN_LI_BCN_WAKEUP */
11308 #ifdef AMPDU_VO_ENABLE
11309 tid.tid = PRIO_8021D_VO; /* Enable TID(6) for voice */
11310 tid.enable = TRUE;
11311 ret = dhd_iovar(dhd, 0, "ampdu_tid", (char *)&tid, sizeof(tid), NULL, 0, TRUE);
11312
11313 tid.tid = PRIO_8021D_NC; /* Enable TID(7) for voice */
11314 tid.enable = TRUE;
11315 ret = dhd_iovar(dhd, 0, "ampdu_tid", (char *)&tid, sizeof(tid), NULL, 0, TRUE);
11316 #endif // endif
11317 /* query for 'clmver' to get clm version info from firmware */
11318 memset(buf, 0, sizeof(buf));
11319 ret = dhd_iovar(dhd, 0, "clmver", NULL, 0, buf, sizeof(buf), FALSE);
11320 if (ret < 0)
11321 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
11322 else {
11323 char *ver_temp_buf = NULL;
11324
11325 if ((ver_temp_buf = bcmstrstr(buf, "Data:")) == NULL) {
11326 DHD_ERROR(("Couldn't find \"Data:\"\n"));
11327 } else {
11328 ptr = (ver_temp_buf + strlen("Data:"));
11329 if ((ver_temp_buf = bcmstrtok(&ptr, "\n", 0)) == NULL) {
11330 DHD_ERROR(("Couldn't find New line character\n"));
11331 } else {
11332 memset(clm_version, 0, CLM_VER_STR_LEN);
11333 strncpy(clm_version, ver_temp_buf,
11334 MIN(strlen(ver_temp_buf) + 1, CLM_VER_STR_LEN - 1));
11335 DHD_INFO(("CLM version = %s\n", clm_version));
11336 }
11337 }
11338
11339 #if defined(CUSTOMER_HW4_DEBUG)
11340 if ((ver_temp_buf = bcmstrstr(ptr, "Customization:")) == NULL) {
11341 DHD_ERROR(("Couldn't find \"Customization:\"\n"));
11342 } else {
11343 char tokenlim;
11344 ptr = (ver_temp_buf + strlen("Customization:"));
11345 if ((ver_temp_buf = bcmstrtok(&ptr, "(\n", &tokenlim)) == NULL) {
11346 DHD_ERROR(("Couldn't find project blob version"
11347 "or New line character\n"));
11348 } else if (tokenlim == '(') {
11349 snprintf(clm_version,
11350 CLM_VER_STR_LEN - 1, "%s, Blob ver = Major : %s minor : ",
11351 clm_version, ver_temp_buf);
11352 DHD_INFO(("[INFO]CLM/Blob version = %s\n", clm_version));
11353 if ((ver_temp_buf = bcmstrtok(&ptr, "\n", &tokenlim)) == NULL) {
11354 DHD_ERROR(("Couldn't find New line character\n"));
11355 } else {
11356 snprintf(clm_version,
11357 strlen(clm_version) + strlen(ver_temp_buf),
11358 "%s%s", clm_version, ver_temp_buf);
11359 DHD_INFO(("[INFO]CLM/Blob/project version = %s\n",
11360 clm_version));
11361
11362 }
11363 } else if (tokenlim == '\n') {
11364 snprintf(clm_version,
11365 strlen(clm_version) + strlen(", Blob ver = Major : ") + 1,
11366 "%s, Blob ver = Major : ", clm_version);
11367 snprintf(clm_version,
11368 strlen(clm_version) + strlen(ver_temp_buf) + 1,
11369 "%s%s", clm_version, ver_temp_buf);
11370 DHD_INFO(("[INFO]CLM/Blob/project version = %s\n", clm_version));
11371 }
11372 }
11373 #endif /* CUSTOMER_HW4_DEBUG */
11374 if (strlen(clm_version)) {
11375 DHD_ERROR(("CLM version = %s\n", clm_version));
11376 } else {
11377 DHD_ERROR(("Couldn't find CLM version!\n"));
11378 }
11379 }
11380
11381 #ifdef WRITE_WLANINFO
11382 sec_save_wlinfo(fw_version, EPI_VERSION_STR, dhd->info->nv_path, clm_version);
11383 #endif /* WRITE_WLANINFO */
11384
11385 /* query for 'wlc_ver' to get version info from firmware */
11386 memset(&wlc_ver, 0, sizeof(wl_wlc_version_t));
11387 ret = dhd_iovar(dhd, 0, "wlc_ver", NULL, 0, (char *)&wlc_ver,
11388 sizeof(wl_wlc_version_t), FALSE);
11389 if (ret < 0)
11390 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
11391 else {
11392 dhd->wlc_ver_major = wlc_ver.wlc_ver_major;
11393 dhd->wlc_ver_minor = wlc_ver.wlc_ver_minor;
11394 }
11395 #endif /* defined(OEM_ANDROID) */
11396 #ifdef GEN_SOFTAP_INFO_FILE
11397 sec_save_softap_info();
11398 #endif /* GEN_SOFTAP_INFO_FILE */
11399
11400 #if defined(BCMSDIO) && !defined(BCMSPI)
11401 dhd_txglom_enable(dhd, TRUE);
11402 #endif /* BCMSDIO && !BCMSPI */
11403
11404 #if defined(BCMSDIO)
11405 #ifdef PROP_TXSTATUS
11406 if (disable_proptx ||
11407 #ifdef PROP_TXSTATUS_VSDB
11408 /* enable WLFC only if the firmware is VSDB when it is in STA mode */
11409 (!FW_SUPPORTED(dhd, ap)) ||
11410 #endif /* PROP_TXSTATUS_VSDB */
11411 FALSE) {
11412 wlfc_enable = FALSE;
11413 }
11414
11415 #if defined(PROP_TXSTATUS)
11416 #ifdef USE_WFA_CERT_CONF
11417 if (sec_get_param_wfa_cert(dhd, SET_PARAM_PROPTX, &proptx) == BCME_OK) {
11418 DHD_ERROR(("%s , read proptx param=%d\n", __FUNCTION__, proptx));
11419 wlfc_enable = proptx;
11420 }
11421 #endif /* USE_WFA_CERT_CONF */
11422 #endif /* PROP_TXSTATUS */
11423
11424 #ifndef DISABLE_11N
11425 ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder, sizeof(hostreorder),
11426 NULL, 0, TRUE);
11427 chipid = dhd_bus_chip_id(dhd);
11428 if (ret2 < 0) {
11429 DHD_ERROR(("%s wl ampdu_hostreorder failed %d\n", __FUNCTION__, ret2));
11430 if (ret2 != BCME_UNSUPPORTED && chipid != BCM4373_CHIP_ID)
11431 ret = ret2;
11432
11433 if (ret == BCME_NOTDOWN) {
11434 uint wl_down = 1;
11435 ret2 = dhd_wl_ioctl_cmd(dhd, WLC_DOWN, (char *)&wl_down,
11436 sizeof(wl_down), TRUE, 0);
11437 DHD_ERROR(("%s ampdu_hostreorder fail WL_DOWN : %d, hostreorder :%d\n",
11438 __FUNCTION__, ret2, hostreorder));
11439
11440 ret2 = dhd_iovar(dhd, 0, "ampdu_hostreorder", (char *)&hostreorder,
11441 sizeof(hostreorder), NULL, 0, TRUE);
11442 DHD_ERROR(("%s wl ampdu_hostreorder. ret --> %d\n", __FUNCTION__, ret2));
11443 if (ret2 != BCME_UNSUPPORTED && chipid != BCM4373_CHIP_ID)
11444 ret = ret2;
11445 }
11446 if (ret2 != BCME_OK)
11447 hostreorder = 0;
11448 }
11449 #endif /* DISABLE_11N */
11450
11451 if (wlfc_enable)
11452 dhd_wlfc_init(dhd);
11453 #ifndef DISABLE_11N
11454 else if (hostreorder)
11455 dhd_wlfc_hostreorder_init(dhd);
11456 #endif /* DISABLE_11N */
11457
11458 #endif /* PROP_TXSTATUS */
11459 #endif /* BCMSDIO || BCMBUS */
11460 #ifndef PCIE_FULL_DONGLE
11461 /* For FD we need all the packets at DHD to handle intra-BSS forwarding */
11462 if (FW_SUPPORTED(dhd, ap)) {
11463 wl_ap_isolate = AP_ISOLATE_SENDUP_ALL;
11464 ret = dhd_iovar(dhd, 0, "ap_isolate", (char *)&wl_ap_isolate, sizeof(wl_ap_isolate),
11465 NULL, 0, TRUE);
11466 if (ret < 0)
11467 DHD_ERROR(("%s failed %d\n", __FUNCTION__, ret));
11468 }
11469 #endif /* PCIE_FULL_DONGLE */
11470 #ifdef PNO_SUPPORT
11471 if (!dhd->pno_state) {
11472 dhd_pno_init(dhd);
11473 }
11474 #endif // endif
11475 #ifdef RTT_SUPPORT
11476 if (!dhd->rtt_state) {
11477 ret = dhd_rtt_init(dhd);
11478 if (ret < 0) {
11479 DHD_ERROR(("%s failed to initialize RTT\n", __FUNCTION__));
11480 }
11481 }
11482 #endif // endif
11483 #ifdef FILTER_IE
11484 /* Failure to configure filter IE is not a fatal error, ignore it. */
11485 if (!(dhd->op_mode & (DHD_FLAG_HOSTAP_MODE | DHD_FLAG_MFG_MODE)))
11486 dhd_read_from_file(dhd);
11487 #endif /* FILTER_IE */
11488 #ifdef WL11U
11489 dhd_interworking_enable(dhd);
11490 #endif /* WL11U */
11491
11492 #ifdef NDO_CONFIG_SUPPORT
11493 dhd->ndo_enable = FALSE;
11494 dhd->ndo_host_ip_overflow = FALSE;
11495 dhd->ndo_max_host_ip = NDO_MAX_HOST_IP_ENTRIES;
11496 #endif /* NDO_CONFIG_SUPPORT */
11497
11498 /* ND offload version supported */
11499 dhd->ndo_version = dhd_ndo_get_version(dhd);
11500 if (dhd->ndo_version > 0) {
11501 DHD_INFO(("%s: ndo version %d\n", __FUNCTION__, dhd->ndo_version));
11502
11503 #ifdef NDO_CONFIG_SUPPORT
11504 /* enable Unsolicited NA filter */
11505 ret = dhd_ndo_unsolicited_na_filter_enable(dhd, 1);
11506 if (ret < 0) {
11507 DHD_ERROR(("%s failed to enable Unsolicited NA filter\n", __FUNCTION__));
11508 }
11509 #endif /* NDO_CONFIG_SUPPORT */
11510 }
11511
11512 /* check dongle supports wbtext (product policy) or not */
11513 dhd->wbtext_support = FALSE;
11514 if (dhd_wl_ioctl_get_intiovar(dhd, "wnm_bsstrans_resp", &wnm_bsstrans_resp,
11515 WLC_GET_VAR, FALSE, 0) != BCME_OK) {
11516 DHD_ERROR(("failed to get wnm_bsstrans_resp\n"));
11517 }
11518 dhd->wbtext_policy = wnm_bsstrans_resp;
11519 if (dhd->wbtext_policy == WL_BSSTRANS_POLICY_PRODUCT_WBTEXT) {
11520 dhd->wbtext_support = TRUE;
11521 }
11522 #ifndef WBTEXT
11523 /* driver can turn off wbtext feature through makefile */
11524 if (dhd->wbtext_support) {
11525 if (dhd_wl_ioctl_set_intiovar(dhd, "wnm_bsstrans_resp",
11526 WL_BSSTRANS_POLICY_ROAM_ALWAYS,
11527 WLC_SET_VAR, FALSE, 0) != BCME_OK) {
11528 DHD_ERROR(("failed to disable WBTEXT\n"));
11529 }
11530 }
11531 #endif /* !WBTEXT */
11532
11533 #ifdef DHD_NON_DMA_M2M_CORRUPTION
11534 /* check pcie non dma loopback */
11535 if (dhd->op_mode == DHD_FLAG_MFG_MODE &&
11536 (dhd_bus_dmaxfer_lpbk(dhd, M2M_NON_DMA_LPBK) < 0)) {
11537 goto done;
11538 }
11539 #endif /* DHD_NON_DMA_M2M_CORRUPTION */
11540
11541 /* WNM capabilities */
11542 wnm_cap = 0
11543 #ifdef WL11U
11544 | WL_WNM_BSSTRANS | WL_WNM_NOTIF
11545 #endif // endif
11546 #ifdef WBTEXT
11547 | WL_WNM_BSSTRANS | WL_WNM_MAXIDLE
11548 #endif // endif
11549 ;
11550 #if defined(WL_MBO) && defined(WL_OCE)
11551 if (FW_SUPPORTED(dhd, estm)) {
11552 wnm_cap |= WL_WNM_ESTM;
11553 }
11554 #endif /* WL_MBO && WL_OCE */
11555 if (dhd_iovar(dhd, 0, "wnm", (char *)&wnm_cap, sizeof(wnm_cap), NULL, 0, TRUE) < 0) {
11556 DHD_ERROR(("failed to set WNM capabilities\n"));
11557 }
11558
11559 if (FW_SUPPORTED(dhd, ecounters) && enable_ecounter) {
11560 dhd_ecounter_configure(dhd, TRUE);
11561 }
11562
11563 /* store the preserve log set numbers */
11564 if (dhd_get_preserve_log_numbers(dhd, &dhd->logset_prsrv_mask)
11565 != BCME_OK) {
11566 DHD_ERROR(("%s: Failed to get preserve log # !\n", __FUNCTION__));
11567 }
11568
11569 #if defined(WBTEXT) && defined(WBTEXT_BTMDELTA)
11570 if (dhd_iovar(dhd, 0, "wnm_btmdelta", (char *)&btmdelta, sizeof(btmdelta),
11571 NULL, 0, TRUE) < 0) {
11572 DHD_ERROR(("failed to set BTM delta\n"));
11573 }
11574 #endif /* WBTEXT && WBTEXT_BTMDELTA */
11575
11576 #ifdef WL_MONITOR
11577 if (FW_SUPPORTED(dhd, monitor)) {
11578 dhd->monitor_enable = TRUE;
11579 DHD_ERROR(("%s: Monitor mode is enabled in FW cap\n", __FUNCTION__));
11580 } else {
11581 dhd->monitor_enable = FALSE;
11582 DHD_ERROR(("%s: Monitor mode is not enabled in FW cap\n", __FUNCTION__));
11583 }
11584 #endif /* WL_MONITOR */
11585
11586 #ifdef CONFIG_SILENT_ROAM
11587 dhd->sroam_turn_on = TRUE;
11588 dhd->sroamed = FALSE;
11589 #endif /* CONFIG_SILENT_ROAM */
11590
11591 done:
11592
11593 if (eventmask_msg) {
11594 MFREE(dhd->osh, eventmask_msg, msglen);
11595 eventmask_msg = NULL;
11596 }
11597 if (iov_buf) {
11598 MFREE(dhd->osh, iov_buf, WLC_IOCTL_SMLEN);
11599 iov_buf = NULL;
11600 }
11601 #if defined(DHD_8021X_DUMP) && defined(SHOW_LOGTRACE)
11602 if (el_tag) {
11603 MFREE(dhd->osh, el_tag, sizeof(wl_el_tag_params_t));
11604 el_tag = NULL;
11605 }
11606 #endif /* DHD_8021X_DUMP */
11607 return ret;
11608 }
11609
11610 int
11611 dhd_iovar(dhd_pub_t *pub, int ifidx, char *name, char *param_buf, uint param_len, char *res_buf,
11612 uint res_len, int set)
11613 {
11614 char *buf = NULL;
11615 int input_len;
11616 wl_ioctl_t ioc;
11617 int ret;
11618
11619 if (res_len > WLC_IOCTL_MAXLEN || param_len > WLC_IOCTL_MAXLEN)
11620 return BCME_BADARG;
11621
11622 input_len = strlen(name) + 1 + param_len;
11623 if (input_len > WLC_IOCTL_MAXLEN)
11624 return BCME_BADARG;
11625
11626 buf = NULL;
11627 if (set) {
11628 if (res_buf || res_len != 0) {
11629 DHD_ERROR(("%s: SET wrong arguemnet\n", __FUNCTION__));
11630 ret = BCME_BADARG;
11631 goto exit;
11632 }
11633 buf = MALLOCZ(pub->osh, input_len);
11634 if (!buf) {
11635 DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__));
11636 ret = BCME_NOMEM;
11637 goto exit;
11638 }
11639 ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len);
11640 if (!ret) {
11641 ret = BCME_NOMEM;
11642 goto exit;
11643 }
11644
11645 ioc.cmd = WLC_SET_VAR;
11646 ioc.buf = buf;
11647 ioc.len = input_len;
11648 ioc.set = set;
11649
11650 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
11651 } else {
11652 if (!res_buf || !res_len) {
11653 DHD_ERROR(("%s: GET failed. resp_buf NULL or length 0.\n", __FUNCTION__));
11654 ret = BCME_BADARG;
11655 goto exit;
11656 }
11657
11658 if (res_len < input_len) {
11659 DHD_INFO(("%s: res_len(%d) < input_len(%d)\n", __FUNCTION__,
11660 res_len, input_len));
11661 buf = MALLOCZ(pub->osh, input_len);
11662 if (!buf) {
11663 DHD_ERROR(("%s: mem alloc failed\n", __FUNCTION__));
11664 ret = BCME_NOMEM;
11665 goto exit;
11666 }
11667 ret = bcm_mkiovar(name, param_buf, param_len, buf, input_len);
11668 if (!ret) {
11669 ret = BCME_NOMEM;
11670 goto exit;
11671 }
11672
11673 ioc.cmd = WLC_GET_VAR;
11674 ioc.buf = buf;
11675 ioc.len = input_len;
11676 ioc.set = set;
11677
11678 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
11679
11680 if (ret == BCME_OK) {
11681 memcpy(res_buf, buf, res_len);
11682 }
11683 } else {
11684 memset(res_buf, 0, res_len);
11685 ret = bcm_mkiovar(name, param_buf, param_len, res_buf, res_len);
11686 if (!ret) {
11687 ret = BCME_NOMEM;
11688 goto exit;
11689 }
11690
11691 ioc.cmd = WLC_GET_VAR;
11692 ioc.buf = res_buf;
11693 ioc.len = res_len;
11694 ioc.set = set;
11695
11696 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
11697 }
11698 }
11699 exit:
11700 if (buf) {
11701 MFREE(pub->osh, buf, input_len);
11702 buf = NULL;
11703 }
11704 return ret;
11705 }
11706
11707 int
11708 dhd_getiovar(dhd_pub_t *pub, int ifidx, char *name, char *cmd_buf,
11709 uint cmd_len, char **resptr, uint resp_len)
11710 {
11711 int len = resp_len;
11712 int ret;
11713 char *buf = *resptr;
11714 wl_ioctl_t ioc;
11715 if (resp_len > WLC_IOCTL_MAXLEN)
11716 return BCME_BADARG;
11717
11718 memset(buf, 0, resp_len);
11719
11720 ret = bcm_mkiovar(name, cmd_buf, cmd_len, buf, len);
11721 if (ret == 0) {
11722 return BCME_BUFTOOSHORT;
11723 }
11724
11725 memset(&ioc, 0, sizeof(ioc));
11726
11727 ioc.cmd = WLC_GET_VAR;
11728 ioc.buf = buf;
11729 ioc.len = len;
11730 ioc.set = 0;
11731
11732 ret = dhd_wl_ioctl(pub, ifidx, &ioc, ioc.buf, ioc.len);
11733
11734 return ret;
11735 }
11736
11737 int dhd_change_mtu(dhd_pub_t *dhdp, int new_mtu, int ifidx)
11738 {
11739 struct dhd_info *dhd = dhdp->info;
11740 struct net_device *dev = NULL;
11741
11742 ASSERT(dhd && dhd->iflist[ifidx]);
11743 dev = dhd->iflist[ifidx]->net;
11744 ASSERT(dev);
11745
11746 if (netif_running(dev)) {
11747 DHD_ERROR(("%s: Must be down to change its MTU", dev->name));
11748 return BCME_NOTDOWN;
11749 }
11750
11751 #define DHD_MIN_MTU 1500
11752 #define DHD_MAX_MTU 1752
11753
11754 if ((new_mtu < DHD_MIN_MTU) || (new_mtu > DHD_MAX_MTU)) {
11755 DHD_ERROR(("%s: MTU size %d is invalid.\n", __FUNCTION__, new_mtu));
11756 return BCME_BADARG;
11757 }
11758
11759 dev->mtu = new_mtu;
11760 return 0;
11761 }
11762
11763 #ifdef ARP_OFFLOAD_SUPPORT
11764 /* add or remove AOE host ip(s) (up to 8 IPs on the interface) */
11765 void
11766 aoe_update_host_ipv4_table(dhd_pub_t *dhd_pub, u32 ipa, bool add, int idx)
11767 {
11768 u32 ipv4_buf[MAX_IPV4_ENTRIES]; /* temp save for AOE host_ip table */
11769 int i;
11770 int ret;
11771
11772 bzero(ipv4_buf, sizeof(ipv4_buf));
11773
11774 /* display what we've got */
11775 ret = dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
11776 DHD_ARPOE(("%s: hostip table read from Dongle:\n", __FUNCTION__));
11777 #ifdef AOE_DBG
11778 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
11779 #endif // endif
11780 /* now we saved hoste_ip table, clr it in the dongle AOE */
11781 dhd_aoe_hostip_clr(dhd_pub, idx);
11782
11783 if (ret) {
11784 DHD_ERROR(("%s failed\n", __FUNCTION__));
11785 return;
11786 }
11787
11788 for (i = 0; i < MAX_IPV4_ENTRIES; i++) {
11789 if (add && (ipv4_buf[i] == 0)) {
11790 ipv4_buf[i] = ipa;
11791 add = FALSE; /* added ipa to local table */
11792 DHD_ARPOE(("%s: Saved new IP in temp arp_hostip[%d]\n",
11793 __FUNCTION__, i));
11794 } else if (ipv4_buf[i] == ipa) {
11795 ipv4_buf[i] = 0;
11796 DHD_ARPOE(("%s: removed IP:%x from temp table %d\n",
11797 __FUNCTION__, ipa, i));
11798 }
11799
11800 if (ipv4_buf[i] != 0) {
11801 /* add back host_ip entries from our local cache */
11802 dhd_arp_offload_add_ip(dhd_pub, ipv4_buf[i], idx);
11803 DHD_ARPOE(("%s: added IP:%x to dongle arp_hostip[%d]\n\n",
11804 __FUNCTION__, ipv4_buf[i], i));
11805 }
11806 }
11807 #ifdef AOE_DBG
11808 /* see the resulting hostip table */
11809 dhd_arp_get_arp_hostip_table(dhd_pub, ipv4_buf, sizeof(ipv4_buf), idx);
11810 DHD_ARPOE(("%s: read back arp_hostip table:\n", __FUNCTION__));
11811 dhd_print_buf(ipv4_buf, 32, 4); /* max 8 IPs 4b each */
11812 #endif // endif
11813 }
11814
11815 /*
11816 * Notification mechanism from kernel to our driver. This function is called by the Linux kernel
11817 * whenever there is an event related to an IP address.
11818 * ptr : kernel provided pointer to IP address that has changed
11819 */
11820 static int dhd_inetaddr_notifier_call(struct notifier_block *this,
11821 unsigned long event,
11822 void *ptr)
11823 {
11824 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
11825
11826 dhd_info_t *dhd;
11827 dhd_pub_t *dhd_pub;
11828 int idx;
11829
11830 if (!dhd_arp_enable)
11831 return NOTIFY_DONE;
11832 if (!ifa || !(ifa->ifa_dev->dev))
11833 return NOTIFY_DONE;
11834
11835 /* Filter notifications meant for non Broadcom devices */
11836 if ((ifa->ifa_dev->dev->netdev_ops != &dhd_ops_pri) &&
11837 (ifa->ifa_dev->dev->netdev_ops != &dhd_ops_virt)) {
11838 #if defined(WL_ENABLE_P2P_IF)
11839 if (!wl_cfgp2p_is_ifops(ifa->ifa_dev->dev->netdev_ops))
11840 #endif /* WL_ENABLE_P2P_IF */
11841 return NOTIFY_DONE;
11842 }
11843
11844 dhd = DHD_DEV_INFO(ifa->ifa_dev->dev);
11845 if (!dhd)
11846 return NOTIFY_DONE;
11847
11848 dhd_pub = &dhd->pub;
11849
11850 if (dhd_pub->arp_version == 1) {
11851 idx = 0;
11852 } else {
11853 for (idx = 0; idx < DHD_MAX_IFS; idx++) {
11854 if (dhd->iflist[idx] && dhd->iflist[idx]->net == ifa->ifa_dev->dev)
11855 break;
11856 }
11857 if (idx < DHD_MAX_IFS)
11858 DHD_TRACE(("ifidx : %p %s %d\n", dhd->iflist[idx]->net,
11859 dhd->iflist[idx]->name, dhd->iflist[idx]->idx));
11860 else {
11861 DHD_ERROR(("Cannot find ifidx for(%s) set to 0\n", ifa->ifa_label));
11862 idx = 0;
11863 }
11864 }
11865
11866 switch (event) {
11867 case NETDEV_UP:
11868 DHD_ARPOE(("%s: [%s] Up IP: 0x%x\n",
11869 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
11870
11871 /*
11872 * Skip if Bus is not in a state to transport the IOVAR
11873 * (or) the Dongle is not ready.
11874 */
11875 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(&dhd->pub) ||
11876 dhd->pub.busstate == DHD_BUS_LOAD) {
11877 DHD_ERROR(("%s: bus not ready, exit NETDEV_UP : %d\n",
11878 __FUNCTION__, dhd->pub.busstate));
11879 if (dhd->pend_ipaddr) {
11880 DHD_ERROR(("%s: overwrite pending ipaddr: 0x%x\n",
11881 __FUNCTION__, dhd->pend_ipaddr));
11882 }
11883 dhd->pend_ipaddr = ifa->ifa_address;
11884 break;
11885 }
11886
11887 #ifdef AOE_IP_ALIAS_SUPPORT
11888 DHD_ARPOE(("%s:add aliased IP to AOE hostip cache\n",
11889 __FUNCTION__));
11890 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, TRUE, idx);
11891 #endif /* AOE_IP_ALIAS_SUPPORT */
11892 break;
11893
11894 case NETDEV_DOWN:
11895 DHD_ARPOE(("%s: [%s] Down IP: 0x%x\n",
11896 __FUNCTION__, ifa->ifa_label, ifa->ifa_address));
11897 dhd->pend_ipaddr = 0;
11898 #ifdef AOE_IP_ALIAS_SUPPORT
11899 DHD_ARPOE(("%s:interface is down, AOE clr all for this if\n",
11900 __FUNCTION__));
11901 if ((dhd_pub->op_mode & DHD_FLAG_HOSTAP_MODE) ||
11902 (ifa->ifa_dev->dev != dhd_linux_get_primary_netdev(dhd_pub))) {
11903 aoe_update_host_ipv4_table(dhd_pub, ifa->ifa_address, FALSE, idx);
11904 } else
11905 #endif /* AOE_IP_ALIAS_SUPPORT */
11906 {
11907 dhd_aoe_hostip_clr(&dhd->pub, idx);
11908 dhd_aoe_arp_clr(&dhd->pub, idx);
11909 }
11910 break;
11911
11912 default:
11913 DHD_ARPOE(("%s: do noting for [%s] Event: %lu\n",
11914 __func__, ifa->ifa_label, event));
11915 break;
11916 }
11917 return NOTIFY_DONE;
11918 }
11919 #endif /* ARP_OFFLOAD_SUPPORT */
11920
11921 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
11922 /* Neighbor Discovery Offload: defered handler */
11923 static void
11924 dhd_inet6_work_handler(void *dhd_info, void *event_data, u8 event)
11925 {
11926 struct ipv6_work_info_t *ndo_work = (struct ipv6_work_info_t *)event_data;
11927 dhd_info_t *dhd = (dhd_info_t *)dhd_info;
11928 dhd_pub_t *dhdp;
11929 int ret;
11930
11931 if (!dhd) {
11932 DHD_ERROR(("%s: invalid dhd_info\n", __FUNCTION__));
11933 goto done;
11934 }
11935 dhdp = &dhd->pub;
11936
11937 if (event != DHD_WQ_WORK_IPV6_NDO) {
11938 DHD_ERROR(("%s: unexpected event\n", __FUNCTION__));
11939 goto done;
11940 }
11941
11942 if (!ndo_work) {
11943 DHD_ERROR(("%s: ipv6 work info is not initialized\n", __FUNCTION__));
11944 return;
11945 }
11946
11947 switch (ndo_work->event) {
11948 case NETDEV_UP:
11949 #ifndef NDO_CONFIG_SUPPORT
11950 DHD_TRACE(("%s: Enable NDO \n ", __FUNCTION__));
11951 ret = dhd_ndo_enable(dhdp, TRUE);
11952 if (ret < 0) {
11953 DHD_ERROR(("%s: Enabling NDO Failed %d\n", __FUNCTION__, ret));
11954 }
11955 #endif /* !NDO_CONFIG_SUPPORT */
11956 DHD_TRACE(("%s: Add a host ip for NDO\n", __FUNCTION__));
11957 if (dhdp->ndo_version > 0) {
11958 /* inet6 addr notifier called only for unicast address */
11959 ret = dhd_ndo_add_ip_with_type(dhdp, &ndo_work->ipv6_addr[0],
11960 WL_ND_IPV6_ADDR_TYPE_UNICAST, ndo_work->if_idx);
11961 } else {
11962 ret = dhd_ndo_add_ip(dhdp, &ndo_work->ipv6_addr[0],
11963 ndo_work->if_idx);
11964 }
11965 if (ret < 0) {
11966 DHD_ERROR(("%s: Adding a host ip for NDO failed %d\n",
11967 __FUNCTION__, ret));
11968 }
11969 break;
11970 case NETDEV_DOWN:
11971 if (dhdp->ndo_version > 0) {
11972 DHD_TRACE(("%s: Remove a host ip for NDO\n", __FUNCTION__));
11973 ret = dhd_ndo_remove_ip_by_addr(dhdp,
11974 &ndo_work->ipv6_addr[0], ndo_work->if_idx);
11975 } else {
11976 DHD_TRACE(("%s: Clear host ip table for NDO \n", __FUNCTION__));
11977 ret = dhd_ndo_remove_ip(dhdp, ndo_work->if_idx);
11978 }
11979 if (ret < 0) {
11980 DHD_ERROR(("%s: Removing host ip for NDO failed %d\n",
11981 __FUNCTION__, ret));
11982 goto done;
11983 }
11984 #ifdef NDO_CONFIG_SUPPORT
11985 if (dhdp->ndo_host_ip_overflow) {
11986 ret = dhd_dev_ndo_update_inet6addr(
11987 dhd_idx2net(dhdp, ndo_work->if_idx));
11988 if ((ret < 0) && (ret != BCME_NORESOURCE)) {
11989 DHD_ERROR(("%s: Updating host ip for NDO failed %d\n",
11990 __FUNCTION__, ret));
11991 goto done;
11992 }
11993 }
11994 #else /* !NDO_CONFIG_SUPPORT */
11995 DHD_TRACE(("%s: Disable NDO\n ", __FUNCTION__));
11996 ret = dhd_ndo_enable(dhdp, FALSE);
11997 if (ret < 0) {
11998 DHD_ERROR(("%s: disabling NDO Failed %d\n", __FUNCTION__, ret));
11999 goto done;
12000 }
12001 #endif /* NDO_CONFIG_SUPPORT */
12002 break;
12003
12004 default:
12005 DHD_ERROR(("%s: unknown notifier event \n", __FUNCTION__));
12006 break;
12007 }
12008 done:
12009
12010 /* free ndo_work. alloced while scheduling the work */
12011 if (ndo_work) {
12012 kfree(ndo_work);
12013 }
12014
12015 return;
12016 } /* dhd_init_logstrs_array */
12017
12018 /*
12019 * Neighbor Discovery Offload: Called when an interface
12020 * is assigned with ipv6 address.
12021 * Handles only primary interface
12022 */
12023 int dhd_inet6addr_notifier_call(struct notifier_block *this, unsigned long event, void *ptr)
12024 {
12025 dhd_info_t *dhd;
12026 dhd_pub_t *dhdp;
12027 struct inet6_ifaddr *inet6_ifa = ptr;
12028 struct ipv6_work_info_t *ndo_info;
12029 int idx;
12030
12031 /* Filter notifications meant for non Broadcom devices */
12032 if (inet6_ifa->idev->dev->netdev_ops != &dhd_ops_pri) {
12033 return NOTIFY_DONE;
12034 }
12035
12036 dhd = DHD_DEV_INFO(inet6_ifa->idev->dev);
12037 if (!dhd) {
12038 return NOTIFY_DONE;
12039 }
12040 dhdp = &dhd->pub;
12041
12042 /* Supports only primary interface */
12043 idx = dhd_net2idx(dhd, inet6_ifa->idev->dev);
12044 if (idx != 0) {
12045 return NOTIFY_DONE;
12046 }
12047
12048 /* FW capability */
12049 if (!FW_SUPPORTED(dhdp, ndoe)) {
12050 return NOTIFY_DONE;
12051 }
12052
12053 ndo_info = (struct ipv6_work_info_t *)kzalloc(sizeof(struct ipv6_work_info_t), GFP_ATOMIC);
12054 if (!ndo_info) {
12055 DHD_ERROR(("%s: ipv6 work alloc failed\n", __FUNCTION__));
12056 return NOTIFY_DONE;
12057 }
12058
12059 /* fill up ndo_info */
12060 ndo_info->event = event;
12061 ndo_info->if_idx = idx;
12062 memcpy(ndo_info->ipv6_addr, &inet6_ifa->addr, IPV6_ADDR_LEN);
12063
12064 /* defer the work to thread as it may block kernel */
12065 dhd_deferred_schedule_work(dhd->dhd_deferred_wq, (void *)ndo_info, DHD_WQ_WORK_IPV6_NDO,
12066 dhd_inet6_work_handler, DHD_WQ_WORK_PRIORITY_LOW);
12067 return NOTIFY_DONE;
12068 }
12069 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
12070
12071 /* Network attach to be invoked from the bus probe handlers */
12072 int
12073 dhd_attach_net(dhd_pub_t *dhdp, bool need_rtnl_lock)
12074 {
12075 struct net_device *primary_ndev;
12076 BCM_REFERENCE(primary_ndev);
12077
12078 /* Register primary net device */
12079 if (dhd_register_if(dhdp, 0, need_rtnl_lock) != 0) {
12080 return BCME_ERROR;
12081 }
12082
12083 #if defined(WL_CFG80211)
12084 primary_ndev = dhd_linux_get_primary_netdev(dhdp);
12085 if (wl_cfg80211_net_attach(primary_ndev) < 0) {
12086 /* fail the init */
12087 dhd_remove_if(dhdp, 0, TRUE);
12088 return BCME_ERROR;
12089 }
12090 #endif /* WL_CFG80211 */
12091 return BCME_OK;
12092 }
12093
12094 int
12095 dhd_register_if(dhd_pub_t *dhdp, int ifidx, bool need_rtnl_lock)
12096 {
12097 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
12098 dhd_if_t *ifp;
12099 struct net_device *net = NULL;
12100 int err = 0;
12101 uint8 temp_addr[ETHER_ADDR_LEN] = { 0x00, 0x90, 0x4c, 0x11, 0x22, 0x33 };
12102
12103 DHD_TRACE(("%s: ifidx %d\n", __FUNCTION__, ifidx));
12104
12105 if (dhd == NULL || dhd->iflist[ifidx] == NULL) {
12106 DHD_ERROR(("%s: Invalid Interface\n", __FUNCTION__));
12107 return BCME_ERROR;
12108 }
12109
12110 ASSERT(dhd && dhd->iflist[ifidx]);
12111 ifp = dhd->iflist[ifidx];
12112 net = ifp->net;
12113 ASSERT(net && (ifp->idx == ifidx));
12114
12115 ASSERT(!net->netdev_ops);
12116 net->netdev_ops = &dhd_ops_virt;
12117
12118 /* Ok, link into the network layer... */
12119 if (ifidx == 0) {
12120 /*
12121 * device functions for the primary interface only
12122 */
12123 net->netdev_ops = &dhd_ops_pri;
12124 if (!ETHER_ISNULLADDR(dhd->pub.mac.octet))
12125 memcpy(temp_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
12126 memcpy(dhd->iflist[0]->mac_addr, dhd->pub.mac.octet, ETHER_ADDR_LEN);
12127 } else {
12128 /*
12129 * We have to use the primary MAC for virtual interfaces
12130 */
12131 memcpy(temp_addr, ifp->mac_addr, ETHER_ADDR_LEN);
12132 #if defined(OEM_ANDROID)
12133 /*
12134 * Android sets the locally administered bit to indicate that this is a
12135 * portable hotspot. This will not work in simultaneous AP/STA mode,
12136 * nor with P2P. Need to set the Donlge's MAC address, and then use that.
12137 */
12138 if (!memcmp(temp_addr, dhd->iflist[0]->mac_addr,
12139 ETHER_ADDR_LEN)) {
12140 DHD_ERROR(("%s interface [%s]: set locally administered bit in MAC\n",
12141 __func__, net->name));
12142 temp_addr[0] |= 0x02;
12143 memcpy(dhd->iflist[ifidx]->mac_addr, temp_addr, ETHER_ADDR_LEN);
12144 }
12145 #endif /* defined(OEM_ANDROID) */
12146 }
12147
12148 net->hard_header_len = ETH_HLEN + dhd->pub.hdrlen;
12149 net->ethtool_ops = &dhd_ethtool_ops;
12150
12151 #if defined(WL_WIRELESS_EXT)
12152 #if WIRELESS_EXT < 19
12153 net->get_wireless_stats = dhd_get_wireless_stats;
12154 #endif /* WIRELESS_EXT < 19 */
12155 #if WIRELESS_EXT > 12
12156 net->wireless_handlers = &wl_iw_handler_def;
12157 #endif /* WIRELESS_EXT > 12 */
12158 #endif /* defined(WL_WIRELESS_EXT) */
12159
12160 dhd->pub.rxsz = DBUS_RX_BUFFER_SIZE_DHD(net);
12161
12162 memcpy(net->dev_addr, temp_addr, ETHER_ADDR_LEN);
12163
12164 if (ifidx == 0)
12165 printf("%s\n", dhd_version);
12166
12167 if (need_rtnl_lock)
12168 err = register_netdev(net);
12169 else
12170 err = register_netdevice(net);
12171
12172 if (err != 0) {
12173 DHD_ERROR(("couldn't register the net device [%s], err %d\n", net->name, err));
12174 goto fail;
12175 }
12176
12177 printf("Register interface [%s] MAC: "MACDBG"\n\n", net->name,
12178 #if defined(CUSTOMER_HW4_DEBUG)
12179 MAC2STRDBG(dhd->pub.mac.octet));
12180 #else
12181 MAC2STRDBG(net->dev_addr));
12182 #endif /* CUSTOMER_HW4_DEBUG */
12183
12184 #if defined(OEM_ANDROID) && defined(SOFTAP) && defined(WL_WIRELESS_EXT) && \
12185 !defined(WL_CFG80211)
12186 wl_iw_iscan_set_scan_broadcast_prep(net, 1);
12187 #endif // endif
12188
12189 #if defined(OEM_ANDROID) && (defined(BCMPCIE) || defined(BCMLXSDMMC))
12190 if (ifidx == 0) {
12191 #ifdef BCMLXSDMMC
12192 up(&dhd_registration_sem);
12193 #endif /* BCMLXSDMMC */
12194 #ifndef ENABLE_INSMOD_NO_FW_LOAD
12195 if (!dhd_download_fw_on_driverload) {
12196 #ifdef WL_CFG80211
12197 wl_terminate_event_handler(net);
12198 #endif /* WL_CFG80211 */
12199 #if defined(DHD_LB_RXP)
12200 __skb_queue_purge(&dhd->rx_pend_queue);
12201 #endif /* DHD_LB_RXP */
12202
12203 #if defined(DHD_LB_TXP)
12204 skb_queue_purge(&dhd->tx_pend_queue);
12205 #endif /* DHD_LB_TXP */
12206
12207 #ifdef SHOW_LOGTRACE
12208 /* Release the skbs from queue for WLC_E_TRACE event */
12209 dhd_event_logtrace_flush_queue(dhdp);
12210 #endif /* SHOW_LOGTRACE */
12211
12212 #if defined(BCMPCIE) && defined(DHDTCPACK_SUPPRESS)
12213 dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
12214 #endif /* BCMPCIE && DHDTCPACK_SUPPRESS */
12215 dhd_net_bus_devreset(net, TRUE);
12216 #ifdef BCMLXSDMMC
12217 dhd_net_bus_suspend(net);
12218 #endif /* BCMLXSDMMC */
12219 wifi_platform_set_power(dhdp->info->adapter, FALSE, WIFI_TURNOFF_DELAY);
12220 #if defined(BT_OVER_SDIO)
12221 dhd->bus_user_count--;
12222 #endif /* BT_OVER_SDIO */
12223 }
12224 #endif /* ENABLE_INSMOD_NO_FW_LOAD */
12225 }
12226 #endif /* OEM_ANDROID && (BCMPCIE || BCMLXSDMMC) */
12227 return 0;
12228
12229 fail:
12230 net->netdev_ops = NULL;
12231 return err;
12232 }
12233
12234 #ifdef WL_VIF_SUPPORT
12235 #define MAX_VIF_NUM 8
12236 int
12237 dhd_register_vif(dhd_pub_t *dhdp)
12238 {
12239 dhd_info_t *dhd = (dhd_info_t *)dhdp->info;
12240 dhd_if_t *ifp;
12241 struct net_device *net;
12242 int err = BCME_OK, i;
12243 char viface_name[IFNAMSIZ] = {'\0'};
12244 ifp = dhd->iflist[0];
12245 net = ifp->net;
12246 if (vif_num && vif_num > MAX_VIF_NUM)
12247 vif_num = MAX_VIF_NUM;
12248 /* Set virtual interface name if it was provided as module parameter */
12249 if (vif_name[0]) {
12250 int len;
12251 char ch;
12252 strncpy(viface_name, vif_name, IFNAMSIZ);
12253 viface_name[IFNAMSIZ - 1] = 0;
12254 len = strlen(viface_name);
12255 ch = viface_name[len - 1];
12256 if ((ch > '9' || ch < '0') && (len < IFNAMSIZ - 2))
12257 strcat(viface_name, "%d");
12258 } else {
12259 DHD_ERROR(("%s check vif_name\n", __FUNCTION__));
12260 return BCME_BADOPTION;
12261 }
12262
12263 DHD_INFO(("%s Virtual interface [%s]:\n", __FUNCTION__, viface_name));
12264 rtnl_lock();
12265 for (i = 0; i < vif_num; i++) {
12266 if (wl_cfg80211_add_if(wl_get_cfg(net), net, WL_IF_TYPE_STA, viface_name, NULL)
12267 == NULL) {
12268 DHD_ERROR(("%s error Virtual interface [%s], i:%d\n", __FUNCTION__,
12269 viface_name, i));
12270 break;
12271 }
12272 }
12273 rtnl_unlock();
12274 return err;
12275 }
12276 #endif /* WL_VIF_SUPPORT */
12277 void
12278 dhd_bus_detach(dhd_pub_t *dhdp)
12279 {
12280 dhd_info_t *dhd;
12281
12282 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
12283
12284 if (dhdp) {
12285 dhd = (dhd_info_t *)dhdp->info;
12286 if (dhd) {
12287
12288 /*
12289 * In case of Android cfg80211 driver, the bus is down in dhd_stop,
12290 * calling stop again will cuase SD read/write errors.
12291 */
12292 if (dhd->pub.busstate != DHD_BUS_DOWN) {
12293 /* Stop the protocol module */
12294 dhd_prot_stop(&dhd->pub);
12295
12296 /* Stop the bus module */
12297 dhd_bus_stop(dhd->pub.bus, TRUE);
12298 }
12299
12300 #if defined(OOB_INTR_ONLY) || defined(BCMSPI_ANDROID) || defined(BCMPCIE_OOB_HOST_WAKE)
12301 dhd_bus_oob_intr_unregister(dhdp);
12302 #endif /* OOB_INTR_ONLY || BCMSPI_ANDROID || BCMPCIE_OOB_HOST_WAKE */
12303 }
12304 }
12305 }
12306
12307 void dhd_detach(dhd_pub_t *dhdp)
12308 {
12309 dhd_info_t *dhd;
12310 unsigned long flags;
12311 int timer_valid = FALSE;
12312 struct net_device *dev = NULL;
12313 #ifdef WL_CFG80211
12314 struct bcm_cfg80211 *cfg = NULL;
12315 #endif // endif
12316 if (!dhdp)
12317 return;
12318
12319 dhd = (dhd_info_t *)dhdp->info;
12320 if (!dhd)
12321 return;
12322
12323 if (dhd->iflist[0])
12324 dev = dhd->iflist[0]->net;
12325
12326 if (dev) {
12327 rtnl_lock();
12328 if (dev->flags & IFF_UP) {
12329 /* If IFF_UP is still up, it indicates that
12330 * "ifconfig wlan0 down" hasn't been called.
12331 * So invoke dev_close explicitly here to
12332 * bring down the interface.
12333 */
12334 DHD_TRACE(("IFF_UP flag is up. Enforcing dev_close from detach \n"));
12335 dev_close(dev);
12336 }
12337 rtnl_unlock();
12338 }
12339
12340 DHD_TRACE(("%s: Enter state 0x%x\n", __FUNCTION__, dhd->dhd_state));
12341
12342 DHD_ERROR(("%s: making dhdpub up FALSE\n", __FUNCTION__));
12343 dhd->pub.up = 0;
12344 if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
12345 /* Give sufficient time for threads to start running in case
12346 * dhd_attach() has failed
12347 */
12348 OSL_SLEEP(100);
12349 }
12350 #ifdef DHD_WET
12351 dhd_free_wet_info(&dhd->pub, dhd->pub.wet_info);
12352 #endif /* DHD_WET */
12353 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
12354 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
12355
12356 #ifdef PROP_TXSTATUS
12357 #ifdef DHD_WLFC_THREAD
12358 if (dhd->pub.wlfc_thread) {
12359 kthread_stop(dhd->pub.wlfc_thread);
12360 dhdp->wlfc_thread_go = TRUE;
12361 wake_up_interruptible(&dhdp->wlfc_wqhead);
12362 }
12363 dhd->pub.wlfc_thread = NULL;
12364 #endif /* DHD_WLFC_THREAD */
12365 #endif /* PROP_TXSTATUS */
12366
12367 #ifdef WL_CFG80211
12368 if (dev)
12369 wl_cfg80211_down(dev);
12370 #endif /* WL_CFG80211 */
12371
12372 if (dhd->dhd_state & DHD_ATTACH_STATE_PROT_ATTACH) {
12373
12374 #if defined(OEM_ANDROID) || !defined(BCMSDIO)
12375 dhd_bus_detach(dhdp);
12376 #endif /* OEM_ANDROID || !BCMSDIO */
12377 #ifdef OEM_ANDROID
12378 #ifdef BCMPCIE
12379 if (is_reboot == SYS_RESTART) {
12380 extern bcmdhd_wifi_platdata_t *dhd_wifi_platdata;
12381 if (dhd_wifi_platdata && !dhdp->dongle_reset) {
12382 dhdpcie_bus_clock_stop(dhdp->bus);
12383 wifi_platform_set_power(dhd_wifi_platdata->adapters,
12384 FALSE, WIFI_TURNOFF_DELAY);
12385 }
12386 }
12387 #endif /* BCMPCIE */
12388 #endif /* OEM_ANDROID */
12389 #ifndef PCIE_FULL_DONGLE
12390 #if defined(OEM_ANDROID) || !defined(BCMSDIO)
12391 if (dhdp->prot)
12392 dhd_prot_detach(dhdp);
12393 #endif /* OEM_ANDROID || !BCMSDIO */
12394 #endif /* !PCIE_FULL_DONGLE */
12395 }
12396
12397 #ifdef ARP_OFFLOAD_SUPPORT
12398 if (dhd_inetaddr_notifier_registered) {
12399 dhd_inetaddr_notifier_registered = FALSE;
12400 unregister_inetaddr_notifier(&dhd_inetaddr_notifier);
12401 }
12402 #endif /* ARP_OFFLOAD_SUPPORT */
12403 #if defined(CONFIG_IPV6) && defined(IPV6_NDO_SUPPORT)
12404 if (dhd_inet6addr_notifier_registered) {
12405 dhd_inet6addr_notifier_registered = FALSE;
12406 unregister_inet6addr_notifier(&dhd_inet6addr_notifier);
12407 }
12408 #endif /* CONFIG_IPV6 && IPV6_NDO_SUPPORT */
12409 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
12410 if (dhd->dhd_state & DHD_ATTACH_STATE_EARLYSUSPEND_DONE) {
12411 if (dhd->early_suspend.suspend)
12412 unregister_early_suspend(&dhd->early_suspend);
12413 }
12414 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
12415
12416 #if defined(WL_WIRELESS_EXT)
12417 if (dhd->dhd_state & DHD_ATTACH_STATE_WL_ATTACH) {
12418 /* Detatch and unlink in the iw */
12419 wl_iw_detach();
12420 }
12421 #endif /* defined(WL_WIRELESS_EXT) */
12422
12423 #ifdef DHD_ULP
12424 dhd_ulp_deinit(dhd->pub.osh, dhdp);
12425 #endif /* DHD_ULP */
12426
12427 /* delete all interfaces, start with virtual */
12428 if (dhd->dhd_state & DHD_ATTACH_STATE_ADD_IF) {
12429 int i = 1;
12430 dhd_if_t *ifp;
12431
12432 /* Cleanup virtual interfaces */
12433 dhd_net_if_lock_local(dhd);
12434 for (i = 1; i < DHD_MAX_IFS; i++) {
12435 if (dhd->iflist[i]) {
12436 dhd_remove_if(&dhd->pub, i, TRUE);
12437 }
12438 }
12439 dhd_net_if_unlock_local(dhd);
12440
12441 /* delete primary interface 0 */
12442 ifp = dhd->iflist[0];
12443 if (ifp && ifp->net) {
12444
12445 #ifdef WL_CFG80211
12446 cfg = wl_get_cfg(ifp->net);
12447 #endif // endif
12448 /* in unregister_netdev case, the interface gets freed by net->destructor
12449 * (which is set to free_netdev)
12450 */
12451 if (ifp->net->reg_state == NETREG_UNINITIALIZED) {
12452 free_netdev(ifp->net);
12453 } else {
12454 #if defined(ARGOS_NOTIFY_CB)
12455 argos_register_notifier_deinit();
12456 #endif // endif
12457 #ifdef SET_RPS_CPUS
12458 custom_rps_map_clear(ifp->net->_rx);
12459 #endif /* SET_RPS_CPUS */
12460 netif_tx_disable(ifp->net);
12461 unregister_netdev(ifp->net);
12462 }
12463 #ifdef PCIE_FULL_DONGLE
12464 ifp->net = DHD_NET_DEV_NULL;
12465 #else
12466 ifp->net = NULL;
12467 #endif /* PCIE_FULL_DONGLE */
12468 #if defined(BCMSDIO) && !defined(OEM_ANDROID)
12469 dhd_bus_detach(dhdp);
12470
12471 if (dhdp->prot)
12472 dhd_prot_detach(dhdp);
12473 #endif /* BCMSDIO && !OEM_ANDROID */
12474
12475 #ifdef DHD_L2_FILTER
12476 bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE,
12477 NULL, FALSE, dhdp->tickcnt);
12478 deinit_l2_filter_arp_table(dhdp->osh, ifp->phnd_arp_table);
12479 ifp->phnd_arp_table = NULL;
12480 #endif /* DHD_L2_FILTER */
12481
12482 dhd_if_del_sta_list(ifp);
12483
12484 MFREE(dhd->pub.osh, ifp, sizeof(*ifp));
12485 dhd->iflist[0] = NULL;
12486 }
12487 }
12488
12489 /* Clear the watchdog timer */
12490 DHD_GENERAL_LOCK(&dhd->pub, flags);
12491 timer_valid = dhd->wd_timer_valid;
12492 dhd->wd_timer_valid = FALSE;
12493 DHD_GENERAL_UNLOCK(&dhd->pub, flags);
12494 if (timer_valid)
12495 del_timer_sync(&dhd->timer);
12496 DHD_DISABLE_RUNTIME_PM(&dhd->pub);
12497
12498 if (dhd->dhd_state & DHD_ATTACH_STATE_THREADS_CREATED) {
12499 #ifdef DHD_PCIE_RUNTIMEPM
12500 if (dhd->thr_rpm_ctl.thr_pid >= 0) {
12501 PROC_STOP(&dhd->thr_rpm_ctl);
12502 }
12503 #endif /* DHD_PCIE_RUNTIMEPM */
12504 if (dhd->thr_wdt_ctl.thr_pid >= 0) {
12505 PROC_STOP(&dhd->thr_wdt_ctl);
12506 }
12507
12508 if (dhd->rxthread_enabled && dhd->thr_rxf_ctl.thr_pid >= 0) {
12509 PROC_STOP(&dhd->thr_rxf_ctl);
12510 }
12511
12512 if (dhd->thr_dpc_ctl.thr_pid >= 0) {
12513 PROC_STOP(&dhd->thr_dpc_ctl);
12514 } else
12515 {
12516 tasklet_kill(&dhd->tasklet);
12517 }
12518 }
12519
12520 #ifdef WL_NATOE
12521 if (dhd->pub.nfct) {
12522 dhd_ct_close(dhd->pub.nfct);
12523 }
12524 #endif /* WL_NATOE */
12525
12526 #ifdef DHD_LB
12527 if (dhd->dhd_state & DHD_ATTACH_STATE_LB_ATTACH_DONE) {
12528 /* Clear the flag first to avoid calling the cpu notifier */
12529 dhd->dhd_state &= ~DHD_ATTACH_STATE_LB_ATTACH_DONE;
12530
12531 /* Kill the Load Balancing Tasklets */
12532 #ifdef DHD_LB_RXP
12533 cancel_work_sync(&dhd->rx_napi_dispatcher_work);
12534 __skb_queue_purge(&dhd->rx_pend_queue);
12535 #endif /* DHD_LB_RXP */
12536 #ifdef DHD_LB_TXP
12537 cancel_work_sync(&dhd->tx_dispatcher_work);
12538 tasklet_kill(&dhd->tx_tasklet);
12539 __skb_queue_purge(&dhd->tx_pend_queue);
12540 #endif /* DHD_LB_TXP */
12541 #ifdef DHD_LB_TXC
12542 cancel_work_sync(&dhd->tx_compl_dispatcher_work);
12543 tasklet_kill(&dhd->tx_compl_tasklet);
12544 #endif /* DHD_LB_TXC */
12545 #ifdef DHD_LB_RXC
12546 tasklet_kill(&dhd->rx_compl_tasklet);
12547 #endif /* DHD_LB_RXC */
12548
12549 /* Unregister from CPU Hotplug framework */
12550 dhd_unregister_cpuhp_callback(dhd);
12551
12552 dhd_cpumasks_deinit(dhd);
12553 DHD_LB_STATS_DEINIT(&dhd->pub);
12554 }
12555 #endif /* DHD_LB */
12556
12557 #if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
12558 cancel_work_sync(&dhd->axi_error_dispatcher_work);
12559 #endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
12560
12561 DHD_SSSR_MEMPOOL_DEINIT(&dhd->pub);
12562
12563 #ifdef WL_CFG80211
12564 if (dhd->dhd_state & DHD_ATTACH_STATE_CFG80211) {
12565 if (!cfg) {
12566 DHD_ERROR(("cfg NULL!\n"));
12567 ASSERT(0);
12568 } else {
12569 wl_cfg80211_detach(cfg);
12570 #ifdef DHD_MONITOR_INTERFACE
12571 dhd_monitor_uninit();
12572 #endif /* DHD_MONITOR_INTERFACE */
12573 }
12574 }
12575 #endif /* WL_CFG80211 */
12576
12577 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
12578 destroy_workqueue(dhd->tx_wq);
12579 dhd->tx_wq = NULL;
12580 destroy_workqueue(dhd->rx_wq);
12581 dhd->rx_wq = NULL;
12582 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
12583 #ifdef DEBUGABILITY
12584 if (dhdp->dbg) {
12585 #ifdef DBG_PKT_MON
12586 dhd_os_dbg_detach_pkt_monitor(dhdp);
12587 dhd_os_spin_lock_deinit(dhd->pub.osh, dhd->pub.dbg->pkt_mon_lock);
12588 #endif /* DBG_PKT_MON */
12589 }
12590 #endif /* DEBUGABILITY */
12591 if (dhdp->dbg) {
12592 dhd_os_dbg_detach(dhdp);
12593 }
12594 #ifdef DHD_PKT_LOGGING
12595 dhd_os_detach_pktlog(dhdp);
12596 #endif /* DHD_PKT_LOGGING */
12597 #ifdef DHD_STATUS_LOGGING
12598 dhd_detach_statlog(dhdp);
12599 #endif /* DHD_STATUS_LOGGING */
12600 #ifdef DHD_PKTDUMP_ROAM
12601 dhd_dump_pkt_deinit(dhdp);
12602 #endif /* DHD_PKTDUMP_ROAM */
12603 #ifdef WL_CFGVENDOR_SEND_HANG_EVENT
12604 if (dhd->pub.hang_info) {
12605 MFREE(dhd->pub.osh, dhd->pub.hang_info, VENDOR_SEND_HANG_EXT_INFO_LEN);
12606 }
12607 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT */
12608 #ifdef SHOW_LOGTRACE
12609 /* Release the skbs from queue for WLC_E_TRACE event */
12610 dhd_event_logtrace_flush_queue(dhdp);
12611
12612 /* Wait till event logtrace context finishes */
12613 dhd_cancel_logtrace_process_sync(dhd);
12614
12615 /* Remove ring proc entries */
12616 dhd_dbg_ring_proc_destroy(&dhd->pub);
12617
12618 if (dhd->dhd_state & DHD_ATTACH_LOGTRACE_INIT) {
12619 if (dhd->event_data.fmts) {
12620 MFREE(dhd->pub.osh, dhd->event_data.fmts,
12621 dhd->event_data.fmts_size);
12622 dhd->event_data.fmts = NULL;
12623 }
12624 if (dhd->event_data.raw_fmts) {
12625 MFREE(dhd->pub.osh, dhd->event_data.raw_fmts,
12626 dhd->event_data.raw_fmts_size);
12627 dhd->event_data.raw_fmts = NULL;
12628 }
12629 if (dhd->event_data.raw_sstr) {
12630 MFREE(dhd->pub.osh, dhd->event_data.raw_sstr,
12631 dhd->event_data.raw_sstr_size);
12632 dhd->event_data.raw_sstr = NULL;
12633 }
12634 if (dhd->event_data.rom_raw_sstr) {
12635 MFREE(dhd->pub.osh, dhd->event_data.rom_raw_sstr,
12636 dhd->event_data.rom_raw_sstr_size);
12637 dhd->event_data.rom_raw_sstr = NULL;
12638 }
12639 dhd->dhd_state &= ~DHD_ATTACH_LOGTRACE_INIT;
12640 }
12641 #endif /* SHOW_LOGTRACE */
12642 #ifdef PNO_SUPPORT
12643 if (dhdp->pno_state)
12644 dhd_pno_deinit(dhdp);
12645 #endif // endif
12646 #ifdef RTT_SUPPORT
12647 if (dhdp->rtt_state) {
12648 dhd_rtt_deinit(dhdp);
12649 }
12650 #endif // endif
12651 #if defined(CONFIG_PM_SLEEP)
12652 if (dhd_pm_notifier_registered) {
12653 unregister_pm_notifier(&dhd->pm_notifier);
12654 dhd_pm_notifier_registered = FALSE;
12655 }
12656 #endif /* CONFIG_PM_SLEEP */
12657
12658 #ifdef DEBUG_CPU_FREQ
12659 if (dhd->new_freq)
12660 free_percpu(dhd->new_freq);
12661 dhd->new_freq = NULL;
12662 cpufreq_unregister_notifier(&dhd->freq_trans, CPUFREQ_TRANSITION_NOTIFIER);
12663 #endif // endif
12664 #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
12665 dhd->wakelock_wd_counter = 0;
12666 wake_lock_destroy(&dhd->wl_wdwake);
12667 #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
12668 if (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) {
12669 DHD_TRACE(("wd wakelock count:%d\n", dhd->wakelock_wd_counter));
12670 DHD_OS_WAKE_LOCK_DESTROY(dhd);
12671 }
12672
12673 #ifdef DHDTCPACK_SUPPRESS
12674 /* This will free all MEM allocated for TCPACK SUPPRESS */
12675 dhd_tcpack_suppress_set(&dhd->pub, TCPACK_SUP_OFF);
12676 #endif /* DHDTCPACK_SUPPRESS */
12677
12678 #ifdef PCIE_FULL_DONGLE
12679 dhd_flow_rings_deinit(dhdp);
12680 if (dhdp->prot)
12681 dhd_prot_detach(dhdp);
12682 #endif // endif
12683
12684 #if defined(WLTDLS) && defined(PCIE_FULL_DONGLE)
12685 dhd_free_tdls_peer_list(dhdp);
12686 #endif // endif
12687
12688 #ifdef DUMP_IOCTL_IOV_LIST
12689 dhd_iov_li_delete(dhdp, &(dhdp->dump_iovlist_head));
12690 #endif /* DUMP_IOCTL_IOV_LIST */
12691 #ifdef DHD_DEBUG
12692 /* memory waste feature list initilization */
12693 dhd_mw_list_delete(dhdp, &(dhdp->mw_list_head));
12694 #endif /* DHD_DEBUG */
12695 #ifdef WL_MONITOR
12696 dhd_del_monitor_if(dhd);
12697 #endif /* WL_MONITOR */
12698
12699 #ifdef DHD_ERPOM
12700 if (dhdp->enable_erpom) {
12701 dhdp->pom_func_deregister(&dhdp->pom_wlan_handler);
12702 }
12703 #endif /* DHD_ERPOM */
12704
12705 cancel_work_sync(&dhd->dhd_hang_process_work);
12706
12707 /* Prefer adding de-init code above this comment unless necessary.
12708 * The idea is to cancel work queue, sysfs and flags at the end.
12709 */
12710 dhd_deferred_work_deinit(dhd->dhd_deferred_wq);
12711 dhd->dhd_deferred_wq = NULL;
12712
12713 /* log dump related buffers should be freed after wq is purged */
12714 #ifdef DHD_LOG_DUMP
12715 dhd_log_dump_deinit(&dhd->pub);
12716 #endif /* DHD_LOG_DUMP */
12717 #if defined(BCMPCIE)
12718 if (dhdp->extended_trap_data)
12719 {
12720 MFREE(dhdp->osh, dhdp->extended_trap_data, BCMPCIE_EXT_TRAP_DATA_MAXLEN);
12721 dhdp->extended_trap_data = NULL;
12722 }
12723 #ifdef DNGL_AXI_ERROR_LOGGING
12724 if (dhdp->axi_err_dump)
12725 {
12726 MFREE(dhdp->osh, dhdp->axi_err_dump, sizeof(dhd_axi_error_dump_t));
12727 dhdp->axi_err_dump = NULL;
12728 }
12729 #endif /* DNGL_AXI_ERROR_LOGGING */
12730 #endif /* BCMPCIE */
12731
12732 #ifdef DHD_DUMP_MNGR
12733 if (dhd->pub.dump_file_manage) {
12734 MFREE(dhd->pub.osh, dhd->pub.dump_file_manage,
12735 sizeof(dhd_dump_file_manage_t));
12736 }
12737 #endif /* DHD_DUMP_MNGR */
12738 dhd_sysfs_exit(dhd);
12739 dhd->pub.fw_download_status = FW_UNLOADED;
12740
12741 #if defined(BT_OVER_SDIO)
12742 mutex_destroy(&dhd->bus_user_lock);
12743 #endif /* BT_OVER_SDIO */
12744
12745 } /* dhd_detach */
12746
12747 void
12748 dhd_free(dhd_pub_t *dhdp)
12749 {
12750 dhd_info_t *dhd;
12751 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
12752
12753 if (dhdp) {
12754 int i;
12755 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
12756 if (dhdp->reorder_bufs[i]) {
12757 reorder_info_t *ptr;
12758 uint32 buf_size = sizeof(struct reorder_info);
12759
12760 ptr = dhdp->reorder_bufs[i];
12761
12762 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
12763 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
12764 i, ptr->max_idx, buf_size));
12765
12766 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
12767 dhdp->reorder_bufs[i] = NULL;
12768 }
12769 }
12770
12771 dhd_sta_pool_fini(dhdp, DHD_MAX_STA);
12772
12773 dhd = (dhd_info_t *)dhdp->info;
12774 if (dhdp->soc_ram) {
12775 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
12776 DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
12777 #else
12778 MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
12779 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
12780 dhdp->soc_ram = NULL;
12781 }
12782 if (dhd != NULL) {
12783
12784 /* If pointer is allocated by dhd_os_prealloc then avoid MFREE */
12785 if (dhd != (dhd_info_t *)dhd_os_prealloc(dhdp,
12786 DHD_PREALLOC_DHD_INFO, 0, FALSE))
12787 MFREE(dhd->pub.osh, dhd, sizeof(*dhd));
12788 dhd = NULL;
12789 }
12790 }
12791 }
12792
12793 void
12794 dhd_clear(dhd_pub_t *dhdp)
12795 {
12796 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
12797
12798 if (dhdp) {
12799 int i;
12800 #ifdef DHDTCPACK_SUPPRESS
12801 /* Clean up timer/data structure for any remaining/pending packet or timer. */
12802 dhd_tcpack_info_tbl_clean(dhdp);
12803 #endif /* DHDTCPACK_SUPPRESS */
12804 for (i = 0; i < ARRAYSIZE(dhdp->reorder_bufs); i++) {
12805 if (dhdp->reorder_bufs[i]) {
12806 reorder_info_t *ptr;
12807 uint32 buf_size = sizeof(struct reorder_info);
12808
12809 ptr = dhdp->reorder_bufs[i];
12810
12811 buf_size += ((ptr->max_idx + 1) * sizeof(void*));
12812 DHD_REORDER(("free flow id buf %d, maxidx is %d, buf_size %d\n",
12813 i, ptr->max_idx, buf_size));
12814
12815 MFREE(dhdp->osh, dhdp->reorder_bufs[i], buf_size);
12816 dhdp->reorder_bufs[i] = NULL;
12817 }
12818 }
12819
12820 dhd_sta_pool_clear(dhdp, DHD_MAX_STA);
12821
12822 if (dhdp->soc_ram) {
12823 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
12824 DHD_OS_PREFREE(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
12825 #else
12826 MFREE(dhdp->osh, dhdp->soc_ram, dhdp->soc_ram_length);
12827 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
12828 dhdp->soc_ram = NULL;
12829 }
12830 }
12831 }
12832
12833 static void
12834 dhd_module_cleanup(void)
12835 {
12836 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
12837
12838 dhd_bus_unregister();
12839
12840 #if defined(OEM_ANDROID)
12841 wl_android_exit();
12842 #endif /* OEM_ANDROID */
12843
12844 dhd_wifi_platform_unregister_drv();
12845 }
12846
12847 static void __exit
12848 dhd_module_exit(void)
12849 {
12850 atomic_set(&exit_in_progress, 1);
12851 dhd_module_cleanup();
12852 unregister_reboot_notifier(&dhd_reboot_notifier);
12853 dhd_destroy_to_notifier_skt();
12854 }
12855
12856 static int __init
12857 dhd_module_init(void)
12858 {
12859 int err;
12860 int retry = POWERUP_MAX_RETRY;
12861
12862 DHD_ERROR(("%s in\n", __FUNCTION__));
12863
12864 DHD_PERIM_RADIO_INIT();
12865
12866 if (firmware_path[0] != '\0') {
12867 strncpy(fw_bak_path, firmware_path, MOD_PARAM_PATHLEN);
12868 fw_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
12869 }
12870
12871 if (nvram_path[0] != '\0') {
12872 strncpy(nv_bak_path, nvram_path, MOD_PARAM_PATHLEN);
12873 nv_bak_path[MOD_PARAM_PATHLEN-1] = '\0';
12874 }
12875
12876 do {
12877 err = dhd_wifi_platform_register_drv();
12878 if (!err) {
12879 register_reboot_notifier(&dhd_reboot_notifier);
12880 break;
12881 } else {
12882 DHD_ERROR(("%s: Failed to load the driver, try cnt %d\n",
12883 __FUNCTION__, retry));
12884 strncpy(firmware_path, fw_bak_path, MOD_PARAM_PATHLEN);
12885 firmware_path[MOD_PARAM_PATHLEN-1] = '\0';
12886 strncpy(nvram_path, nv_bak_path, MOD_PARAM_PATHLEN);
12887 nvram_path[MOD_PARAM_PATHLEN-1] = '\0';
12888 }
12889 } while (retry--);
12890
12891 dhd_create_to_notifier_skt();
12892
12893 if (err) {
12894 DHD_ERROR(("%s: Failed to load driver max retry reached**\n", __FUNCTION__));
12895 } else {
12896 if (!dhd_download_fw_on_driverload) {
12897 dhd_driver_init_done = TRUE;
12898 }
12899 }
12900
12901 DHD_ERROR(("%s out\n", __FUNCTION__));
12902
12903 return err;
12904 }
12905
12906 static int
12907 dhd_reboot_callback(struct notifier_block *this, unsigned long code, void *unused)
12908 {
12909 DHD_TRACE(("%s: code = %ld\n", __FUNCTION__, code));
12910 if (code == SYS_RESTART) {
12911 #ifdef OEM_ANDROID
12912 #ifdef BCMPCIE
12913 is_reboot = code;
12914 #endif /* BCMPCIE */
12915 #else
12916 dhd_module_cleanup();
12917 #endif /* OEM_ANDROID */
12918 }
12919 return NOTIFY_DONE;
12920 }
12921
12922 #if defined(CONFIG_DEFERRED_INITCALLS) && !defined(EXYNOS_PCIE_MODULE_PATCH)
12923 #if defined(CONFIG_MACH_UNIVERSAL7420) || defined(CONFIG_SOC_EXYNOS8890) || \
12924 defined(CONFIG_ARCH_MSM8996) || defined(CONFIG_ARCH_MSM8998) || \
12925 defined(CONFIG_SOC_EXYNOS8895) || defined(CONFIG_SOC_EXYNOS9810) || \
12926 defined(CONFIG_ARCH_SDM845) || defined(CONFIG_SOC_EXYNOS9820) || \
12927 defined(CONFIG_ARCH_SM8150)
12928 deferred_module_init_sync(dhd_module_init);
12929 #else
12930 deferred_module_init(dhd_module_init);
12931 #endif /* CONFIG_MACH_UNIVERSAL7420 || CONFIG_SOC_EXYNOS8890 ||
12932 * CONFIG_ARCH_MSM8996 || CONFIG_ARCH_MSM8998 || CONFIG_SOC_EXYNOS8895
12933 * CONFIG_SOC_EXYNOS9810 || CONFIG_ARCH_SDM845 || CONFIG_SOC_EXYNOS9820
12934 * CONFIG_ARCH_SM8150
12935 */
12936 #elif defined(USE_LATE_INITCALL_SYNC)
12937 late_initcall_sync(dhd_module_init);
12938 #else
12939 late_initcall(dhd_module_init);
12940 #endif /* USE_LATE_INITCALL_SYNC */
12941
12942 module_exit(dhd_module_exit);
12943
12944 /*
12945 * OS specific functions required to implement DHD driver in OS independent way
12946 */
12947 int
12948 dhd_os_proto_block(dhd_pub_t *pub)
12949 {
12950 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
12951
12952 if (dhd) {
12953 DHD_PERIM_UNLOCK(pub);
12954
12955 down(&dhd->proto_sem);
12956
12957 DHD_PERIM_LOCK(pub);
12958 return 1;
12959 }
12960
12961 return 0;
12962 }
12963
12964 int
12965 dhd_os_proto_unblock(dhd_pub_t *pub)
12966 {
12967 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
12968
12969 if (dhd) {
12970 up(&dhd->proto_sem);
12971 return 1;
12972 }
12973
12974 return 0;
12975 }
12976
12977 void
12978 dhd_os_dhdiovar_lock(dhd_pub_t *pub)
12979 {
12980 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
12981
12982 if (dhd) {
12983 mutex_lock(&dhd->dhd_iovar_mutex);
12984 }
12985 }
12986
12987 void
12988 dhd_os_dhdiovar_unlock(dhd_pub_t *pub)
12989 {
12990 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
12991
12992 if (dhd) {
12993 mutex_unlock(&dhd->dhd_iovar_mutex);
12994 }
12995 }
12996
12997 void
12998 dhd_os_logdump_lock(dhd_pub_t *pub)
12999 {
13000 dhd_info_t *dhd = NULL;
13001
13002 if (!pub)
13003 return;
13004
13005 dhd = (dhd_info_t *)(pub->info);
13006
13007 if (dhd) {
13008 mutex_lock(&dhd->logdump_lock);
13009 }
13010 }
13011
13012 void
13013 dhd_os_logdump_unlock(dhd_pub_t *pub)
13014 {
13015 dhd_info_t *dhd = NULL;
13016
13017 if (!pub)
13018 return;
13019
13020 dhd = (dhd_info_t *)(pub->info);
13021
13022 if (dhd) {
13023 mutex_unlock(&dhd->logdump_lock);
13024 }
13025 }
13026
13027 unsigned long
13028 dhd_os_dbgring_lock(void *lock)
13029 {
13030 if (!lock)
13031 return 0;
13032
13033 mutex_lock((struct mutex *)lock);
13034
13035 return 0;
13036 }
13037
13038 void
13039 dhd_os_dbgring_unlock(void *lock, unsigned long flags)
13040 {
13041 BCM_REFERENCE(flags);
13042
13043 if (!lock)
13044 return;
13045
13046 mutex_unlock((struct mutex *)lock);
13047 }
13048
13049 unsigned int
13050 dhd_os_get_ioctl_resp_timeout(void)
13051 {
13052 return ((unsigned int)dhd_ioctl_timeout_msec);
13053 }
13054
13055 void
13056 dhd_os_set_ioctl_resp_timeout(unsigned int timeout_msec)
13057 {
13058 dhd_ioctl_timeout_msec = (int)timeout_msec;
13059 }
13060
13061 int
13062 dhd_os_ioctl_resp_wait(dhd_pub_t *pub, uint *condition)
13063 {
13064 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13065 int timeout;
13066
13067 /* Convert timeout in millsecond to jiffies */
13068 timeout = msecs_to_jiffies(dhd_ioctl_timeout_msec);
13069
13070 DHD_PERIM_UNLOCK(pub);
13071
13072 timeout = wait_event_timeout(dhd->ioctl_resp_wait, (*condition), timeout);
13073
13074 DHD_PERIM_LOCK(pub);
13075
13076 return timeout;
13077 }
13078
13079 int
13080 dhd_os_ioctl_resp_wake(dhd_pub_t *pub)
13081 {
13082 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
13083
13084 wake_up(&dhd->ioctl_resp_wait);
13085 return 0;
13086 }
13087
13088 int
13089 dhd_os_d3ack_wait(dhd_pub_t *pub, uint *condition)
13090 {
13091 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13092 int timeout;
13093
13094 /* Convert timeout in millsecond to jiffies */
13095 timeout = msecs_to_jiffies(D3_ACK_RESP_TIMEOUT);
13096
13097 DHD_PERIM_UNLOCK(pub);
13098
13099 timeout = wait_event_timeout(dhd->d3ack_wait, (*condition), timeout);
13100
13101 DHD_PERIM_LOCK(pub);
13102
13103 return timeout;
13104 }
13105
13106 int
13107 dhd_os_d3ack_wake(dhd_pub_t *pub)
13108 {
13109 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
13110
13111 wake_up(&dhd->d3ack_wait);
13112 return 0;
13113 }
13114
13115 int
13116 dhd_os_busbusy_wait_negation(dhd_pub_t *pub, uint *condition)
13117 {
13118 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13119 int timeout;
13120
13121 /* Wait for bus usage contexts to gracefully exit within some timeout value
13122 * Set time out to little higher than dhd_ioctl_timeout_msec,
13123 * so that IOCTL timeout should not get affected.
13124 */
13125 /* Convert timeout in millsecond to jiffies */
13126 timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
13127
13128 timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, !(*condition), timeout);
13129
13130 return timeout;
13131 }
13132
13133 /*
13134 * Wait until the condition *var == condition is met.
13135 * Returns 0 if the @condition evaluated to false after the timeout elapsed
13136 * Returns 1 if the @condition evaluated to true
13137 */
13138 int
13139 dhd_os_busbusy_wait_condition(dhd_pub_t *pub, uint *var, uint condition)
13140 {
13141 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13142 int timeout;
13143
13144 /* Convert timeout in millsecond to jiffies */
13145 timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
13146
13147 timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait, (*var == condition), timeout);
13148
13149 return timeout;
13150 }
13151
13152 /*
13153 * Wait until the '(*var & bitmask) == condition' is met.
13154 * Returns 0 if the @condition evaluated to false after the timeout elapsed
13155 * Returns 1 if the @condition evaluated to true
13156 */
13157 int
13158 dhd_os_busbusy_wait_bitmask(dhd_pub_t *pub, uint *var,
13159 uint bitmask, uint condition)
13160 {
13161 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13162 int timeout;
13163
13164 /* Convert timeout in millsecond to jiffies */
13165 timeout = msecs_to_jiffies(DHD_BUS_BUSY_TIMEOUT);
13166
13167 timeout = wait_event_timeout(dhd->dhd_bus_busy_state_wait,
13168 ((*var & bitmask) == condition), timeout);
13169
13170 return timeout;
13171 }
13172
13173 int
13174 dhd_os_dmaxfer_wait(dhd_pub_t *pub, uint *condition)
13175 {
13176 int ret = 0;
13177 dhd_info_t * dhd = (dhd_info_t *)(pub->info);
13178 int timeout;
13179
13180 timeout = msecs_to_jiffies(IOCTL_DMAXFER_TIMEOUT);
13181
13182 DHD_PERIM_UNLOCK(pub);
13183 ret = wait_event_timeout(dhd->dmaxfer_wait, (*condition), timeout);
13184 DHD_PERIM_LOCK(pub);
13185
13186 return ret;
13187
13188 }
13189
13190 int
13191 dhd_os_dmaxfer_wake(dhd_pub_t *pub)
13192 {
13193 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
13194
13195 wake_up(&dhd->dmaxfer_wait);
13196 return 0;
13197 }
13198
13199 void
13200 dhd_os_tx_completion_wake(dhd_pub_t *dhd)
13201 {
13202 /* Call wmb() to make sure before waking up the other event value gets updated */
13203 OSL_SMP_WMB();
13204 wake_up(&dhd->tx_completion_wait);
13205 }
13206
13207 /* Fix compilation error for FC11 */
13208 INLINE int
13209 dhd_os_busbusy_wake(dhd_pub_t *pub)
13210 {
13211 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
13212 /* Call wmb() to make sure before waking up the other event value gets updated */
13213 OSL_SMP_WMB();
13214 wake_up(&dhd->dhd_bus_busy_state_wait);
13215 return 0;
13216 }
13217
13218 void
13219 dhd_os_wd_timer_extend(void *bus, bool extend)
13220 {
13221 dhd_pub_t *pub = bus;
13222 dhd_info_t *dhd = (dhd_info_t *)pub->info;
13223
13224 if (extend)
13225 dhd_os_wd_timer(bus, WATCHDOG_EXTEND_INTERVAL);
13226 else
13227 dhd_os_wd_timer(bus, dhd->default_wd_interval);
13228 }
13229
13230 void
13231 dhd_os_wd_timer(void *bus, uint wdtick)
13232 {
13233 dhd_pub_t *pub = bus;
13234 dhd_info_t *dhd = (dhd_info_t *)pub->info;
13235 unsigned long flags;
13236
13237 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
13238
13239 if (!dhd) {
13240 DHD_ERROR(("%s: dhd NULL\n", __FUNCTION__));
13241 return;
13242 }
13243
13244 DHD_GENERAL_LOCK(pub, flags);
13245
13246 /* don't start the wd until fw is loaded */
13247 if (pub->busstate == DHD_BUS_DOWN) {
13248 DHD_GENERAL_UNLOCK(pub, flags);
13249 #ifdef BCMSDIO
13250 if (!wdtick) {
13251 DHD_OS_WD_WAKE_UNLOCK(pub);
13252 }
13253 #endif /* BCMSDIO */
13254 return;
13255 }
13256
13257 /* Totally stop the timer */
13258 if (!wdtick && dhd->wd_timer_valid == TRUE) {
13259 dhd->wd_timer_valid = FALSE;
13260 DHD_GENERAL_UNLOCK(pub, flags);
13261 del_timer_sync(&dhd->timer);
13262 #ifdef BCMSDIO
13263 DHD_OS_WD_WAKE_UNLOCK(pub);
13264 #endif /* BCMSDIO */
13265 return;
13266 }
13267
13268 if (wdtick) {
13269 #ifdef BCMSDIO
13270 DHD_OS_WD_WAKE_LOCK(pub);
13271 dhd_watchdog_ms = (uint)wdtick;
13272 #endif /* BCMSDIO */
13273 /* Re arm the timer, at last watchdog period */
13274 mod_timer(&dhd->timer, jiffies + msecs_to_jiffies(dhd_watchdog_ms));
13275 dhd->wd_timer_valid = TRUE;
13276 }
13277 DHD_GENERAL_UNLOCK(pub, flags);
13278 }
13279
13280 #ifdef DHD_PCIE_RUNTIMEPM
13281 void
13282 dhd_os_runtimepm_timer(void *bus, uint tick)
13283 {
13284 dhd_pub_t *pub = bus;
13285 dhd_info_t *dhd = (dhd_info_t *)pub->info;
13286 unsigned long flags;
13287
13288 DHD_TRACE(("%s: Enter\n", __FUNCTION__));
13289
13290 if (!dhd) {
13291 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
13292 return;
13293 }
13294
13295 DHD_GENERAL_LOCK(pub, flags);
13296
13297 /* don't start the RPM until fw is loaded */
13298 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(pub)) {
13299 DHD_GENERAL_UNLOCK(pub, flags);
13300 return;
13301 }
13302
13303 /* If tick is non-zero, the request is to start the timer */
13304 if (tick) {
13305 /* Start the timer only if its not already running */
13306 if (dhd->rpm_timer_valid == FALSE) {
13307 mod_timer(&dhd->rpm_timer, jiffies + msecs_to_jiffies(dhd_runtimepm_ms));
13308 dhd->rpm_timer_valid = TRUE;
13309 DHD_ERROR(("DHD Runtime PM Enabled \n"));
13310 }
13311 } else {
13312 /* tick is zero, we have to stop the timer */
13313 /* Stop the timer only if its running, otherwise we don't have to do anything */
13314 if (dhd->rpm_timer_valid == TRUE) {
13315 dhd->rpm_timer_valid = FALSE;
13316 DHD_GENERAL_UNLOCK(pub, flags);
13317 del_timer_sync(&dhd->rpm_timer);
13318 DHD_ERROR(("DHD Runtime PM Disabled \n"));
13319 /* we have already released the lock, so just go to exit */
13320 goto exit;
13321 }
13322 }
13323
13324 DHD_GENERAL_UNLOCK(pub, flags);
13325 exit:
13326 return;
13327
13328 }
13329
13330 #endif /* DHD_PCIE_RUNTIMEPM */
13331
13332 void *
13333 dhd_os_open_image1(dhd_pub_t *pub, char *filename)
13334 {
13335 struct file *fp;
13336 int size;
13337
13338 fp = filp_open(filename, O_RDONLY, 0);
13339 /*
13340 * 2.6.11 (FC4) supports filp_open() but later revs don't?
13341 * Alternative:
13342 * fp = open_namei(AT_FDCWD, filename, O_RD, 0);
13343 * ???
13344 */
13345 if (IS_ERR(fp)) {
13346 fp = NULL;
13347 goto err;
13348 }
13349
13350 if (!S_ISREG(file_inode(fp)->i_mode)) {
13351 DHD_ERROR(("%s: %s is not regular file\n", __FUNCTION__, filename));
13352 fp = NULL;
13353 goto err;
13354 }
13355
13356 size = i_size_read(file_inode(fp));
13357 if (size <= 0) {
13358 DHD_ERROR(("%s: %s file size invalid %d\n", __FUNCTION__, filename, size));
13359 fp = NULL;
13360 goto err;
13361 }
13362
13363 DHD_ERROR(("%s: %s (%d bytes) open success\n", __FUNCTION__, filename, size));
13364
13365 err:
13366 return fp;
13367 }
13368
13369 int
13370 dhd_os_get_image_block(char *buf, int len, void *image)
13371 {
13372 struct file *fp = (struct file *)image;
13373 int rdlen;
13374 int size;
13375
13376 if (!image) {
13377 return 0;
13378 }
13379
13380 size = i_size_read(file_inode(fp));
13381 rdlen = compat_kernel_read(fp, fp->f_pos, buf, MIN(len, size));
13382
13383 if (len >= size && size != rdlen) {
13384 return -EIO;
13385 }
13386
13387 if (rdlen > 0) {
13388 fp->f_pos += rdlen;
13389 }
13390
13391 return rdlen;
13392 }
13393
13394 #if defined(BT_OVER_SDIO)
13395 int
13396 dhd_os_gets_image(dhd_pub_t *pub, char *str, int len, void *image)
13397 {
13398 struct file *fp = (struct file *)image;
13399 int rd_len;
13400 uint str_len = 0;
13401 char *str_end = NULL;
13402
13403 if (!image)
13404 return 0;
13405
13406 rd_len = compat_kernel_read(fp, fp->f_pos, str, len);
13407 str_end = strnchr(str, len, '\n');
13408 if (str_end == NULL) {
13409 goto err;
13410 }
13411 str_len = (uint)(str_end - str);
13412
13413 /* Advance file pointer past the string length */
13414 fp->f_pos += str_len + 1;
13415 bzero(str_end, rd_len - str_len);
13416
13417 err:
13418 return str_len;
13419 }
13420 #endif /* defined (BT_OVER_SDIO) */
13421
13422 int
13423 dhd_os_get_image_size(void *image)
13424 {
13425 struct file *fp = (struct file *)image;
13426 int size;
13427 if (!image) {
13428 return 0;
13429 }
13430
13431 size = i_size_read(file_inode(fp));
13432
13433 return size;
13434 }
13435
13436 void
13437 dhd_os_close_image1(dhd_pub_t *pub, void *image)
13438 {
13439 if (image) {
13440 filp_close((struct file *)image, NULL);
13441 }
13442 }
13443
13444 void
13445 dhd_os_sdlock(dhd_pub_t *pub)
13446 {
13447 dhd_info_t *dhd;
13448
13449 dhd = (dhd_info_t *)(pub->info);
13450
13451 if (dhd_dpc_prio >= 0)
13452 down(&dhd->sdsem);
13453 else
13454 spin_lock_bh(&dhd->sdlock);
13455 }
13456
13457 void
13458 dhd_os_sdunlock(dhd_pub_t *pub)
13459 {
13460 dhd_info_t *dhd;
13461
13462 dhd = (dhd_info_t *)(pub->info);
13463
13464 if (dhd_dpc_prio >= 0)
13465 up(&dhd->sdsem);
13466 else
13467 spin_unlock_bh(&dhd->sdlock);
13468 }
13469
13470 void
13471 dhd_os_sdlock_txq(dhd_pub_t *pub)
13472 {
13473 dhd_info_t *dhd;
13474
13475 dhd = (dhd_info_t *)(pub->info);
13476 spin_lock_bh(&dhd->txqlock);
13477 }
13478
13479 void
13480 dhd_os_sdunlock_txq(dhd_pub_t *pub)
13481 {
13482 dhd_info_t *dhd;
13483
13484 dhd = (dhd_info_t *)(pub->info);
13485 spin_unlock_bh(&dhd->txqlock);
13486 }
13487
13488 void
13489 dhd_os_sdlock_rxq(dhd_pub_t *pub)
13490 {
13491 }
13492
13493 void
13494 dhd_os_sdunlock_rxq(dhd_pub_t *pub)
13495 {
13496 }
13497
13498 static void
13499 dhd_os_rxflock(dhd_pub_t *pub)
13500 {
13501 dhd_info_t *dhd;
13502
13503 dhd = (dhd_info_t *)(pub->info);
13504 spin_lock_bh(&dhd->rxf_lock);
13505
13506 }
13507
13508 static void
13509 dhd_os_rxfunlock(dhd_pub_t *pub)
13510 {
13511 dhd_info_t *dhd;
13512
13513 dhd = (dhd_info_t *)(pub->info);
13514 spin_unlock_bh(&dhd->rxf_lock);
13515 }
13516
13517 #ifdef DHDTCPACK_SUPPRESS
13518 unsigned long
13519 dhd_os_tcpacklock(dhd_pub_t *pub)
13520 {
13521 dhd_info_t *dhd;
13522 unsigned long flags = 0;
13523
13524 dhd = (dhd_info_t *)(pub->info);
13525
13526 if (dhd) {
13527 #ifdef BCMSDIO
13528 spin_lock_bh(&dhd->tcpack_lock);
13529 #else
13530 spin_lock_irqsave(&dhd->tcpack_lock, flags);
13531 #endif /* BCMSDIO */
13532 }
13533
13534 return flags;
13535 }
13536
13537 void
13538 dhd_os_tcpackunlock(dhd_pub_t *pub, unsigned long flags)
13539 {
13540 dhd_info_t *dhd;
13541
13542 #ifdef BCMSDIO
13543 BCM_REFERENCE(flags);
13544 #endif /* BCMSDIO */
13545
13546 dhd = (dhd_info_t *)(pub->info);
13547
13548 if (dhd) {
13549 #ifdef BCMSDIO
13550 spin_unlock_bh(&dhd->tcpack_lock);
13551 #else
13552 spin_unlock_irqrestore(&dhd->tcpack_lock, flags);
13553 #endif /* BCMSDIO */
13554 }
13555 }
13556 #endif /* DHDTCPACK_SUPPRESS */
13557
13558 uint8* dhd_os_prealloc(dhd_pub_t *dhdpub, int section, uint size, bool kmalloc_if_fail)
13559 {
13560 uint8* buf;
13561 gfp_t flags = CAN_SLEEP() ? GFP_KERNEL: GFP_ATOMIC;
13562
13563 buf = (uint8*)wifi_platform_prealloc(dhdpub->info->adapter, section, size);
13564 if (buf == NULL && kmalloc_if_fail)
13565 buf = kmalloc(size, flags);
13566
13567 return buf;
13568 }
13569
13570 void dhd_os_prefree(dhd_pub_t *dhdpub, void *addr, uint size)
13571 {
13572 }
13573
13574 #if defined(WL_WIRELESS_EXT)
13575 struct iw_statistics *
13576 dhd_get_wireless_stats(struct net_device *dev)
13577 {
13578 int res = 0;
13579 dhd_info_t *dhd = DHD_DEV_INFO(dev);
13580
13581 if (!dhd->pub.up) {
13582 return NULL;
13583 }
13584
13585 res = wl_iw_get_wireless_stats(dev, &dhd->iw.wstats);
13586
13587 if (res == 0)
13588 return &dhd->iw.wstats;
13589 else
13590 return NULL;
13591 }
13592 #endif /* defined(WL_WIRELESS_EXT) */
13593
13594 static int
13595 dhd_wl_host_event(dhd_info_t *dhd, int ifidx, void *pktdata, uint16 pktlen,
13596 wl_event_msg_t *event, void **data)
13597 {
13598 int bcmerror = 0;
13599 #ifdef WL_CFG80211
13600 unsigned long flags = 0;
13601 #endif /* WL_CFG80211 */
13602 ASSERT(dhd != NULL);
13603
13604 #ifdef SHOW_LOGTRACE
13605 bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
13606 &dhd->event_data);
13607 #else
13608 bcmerror = wl_process_host_event(&dhd->pub, &ifidx, pktdata, pktlen, event, data,
13609 NULL);
13610 #endif /* SHOW_LOGTRACE */
13611 if (unlikely(bcmerror != BCME_OK)) {
13612 return bcmerror;
13613 }
13614
13615 if (ntoh32(event->event_type) == WLC_E_IF) {
13616 /* WLC_E_IF event types are consumed by wl_process_host_event.
13617 * For ifadd/del ops, the netdev ptr may not be valid at this
13618 * point. so return before invoking cfg80211/wext handlers.
13619 */
13620 return BCME_OK;
13621 }
13622
13623 #if defined(WL_WIRELESS_EXT)
13624 if (event->bsscfgidx == 0) {
13625 /*
13626 * Wireless ext is on primary interface only
13627 */
13628 ASSERT(dhd->iflist[ifidx] != NULL);
13629 ASSERT(dhd->iflist[ifidx]->net != NULL);
13630
13631 if (dhd->iflist[ifidx]->net) {
13632 wl_iw_event(dhd->iflist[ifidx]->net, event, *data);
13633 }
13634 }
13635 #endif /* defined(WL_WIRELESS_EXT) */
13636
13637 #ifdef WL_CFG80211
13638 if (dhd->iflist[ifidx]->net) {
13639 spin_lock_irqsave(&dhd->pub.up_lock, flags);
13640 if (dhd->pub.up) {
13641 wl_cfg80211_event(dhd->iflist[ifidx]->net, event, *data);
13642 }
13643 spin_unlock_irqrestore(&dhd->pub.up_lock, flags);
13644 }
13645 #endif /* defined(WL_CFG80211) */
13646
13647 return (bcmerror);
13648 }
13649
13650 /* send up locally generated event */
13651 void
13652 dhd_sendup_event(dhd_pub_t *dhdp, wl_event_msg_t *event, void *data)
13653 {
13654 switch (ntoh32(event->event_type)) {
13655 /* Handle error case or further events here */
13656 default:
13657 break;
13658 }
13659 }
13660
13661 #ifdef LOG_INTO_TCPDUMP
13662 void
13663 dhd_sendup_log(dhd_pub_t *dhdp, void *data, int data_len)
13664 {
13665 struct sk_buff *p, *skb;
13666 uint32 pktlen;
13667 int len;
13668 dhd_if_t *ifp;
13669 dhd_info_t *dhd;
13670 uchar *skb_data;
13671 int ifidx = 0;
13672 struct ether_header eth;
13673
13674 pktlen = sizeof(eth) + data_len;
13675 dhd = dhdp->info;
13676
13677 if ((p = PKTGET(dhdp->osh, pktlen, FALSE))) {
13678 ASSERT(ISALIGNED((uintptr)PKTDATA(dhdp->osh, p), sizeof(uint32)));
13679
13680 bcopy(&dhdp->mac, ð.ether_dhost, ETHER_ADDR_LEN);
13681 bcopy(&dhdp->mac, ð.ether_shost, ETHER_ADDR_LEN);
13682 ETHER_TOGGLE_LOCALADDR(ð.ether_shost);
13683 eth.ether_type = hton16(ETHER_TYPE_BRCM);
13684
13685 bcopy((void *)ð, PKTDATA(dhdp->osh, p), sizeof(eth));
13686 bcopy(data, PKTDATA(dhdp->osh, p) + sizeof(eth), data_len);
13687 skb = PKTTONATIVE(dhdp->osh, p);
13688 skb_data = skb->data;
13689 len = skb->len;
13690
13691 ifidx = dhd_ifname2idx(dhd, "wlan0");
13692 ifp = dhd->iflist[ifidx];
13693 if (ifp == NULL)
13694 ifp = dhd->iflist[0];
13695
13696 ASSERT(ifp);
13697 skb->dev = ifp->net;
13698 skb->protocol = eth_type_trans(skb, skb->dev);
13699 skb->data = skb_data;
13700 skb->len = len;
13701
13702 /* Strip header, count, deliver upward */
13703 skb_pull(skb, ETH_HLEN);
13704
13705 bcm_object_trace_opr(skb, BCM_OBJDBG_REMOVE,
13706 __FUNCTION__, __LINE__);
13707 /* Send the packet */
13708 if (in_interrupt()) {
13709 netif_rx(skb);
13710 } else {
13711 netif_rx_ni(skb);
13712 }
13713 } else {
13714 /* Could not allocate a sk_buf */
13715 DHD_ERROR(("%s: unable to alloc sk_buf", __FUNCTION__));
13716 }
13717 }
13718 #endif /* LOG_INTO_TCPDUMP */
13719
13720 void dhd_wait_for_event(dhd_pub_t *dhd, bool *lockvar)
13721 {
13722 #if defined(BCMSDIO)
13723 struct dhd_info *dhdinfo = dhd->info;
13724
13725 int timeout = msecs_to_jiffies(IOCTL_RESP_TIMEOUT);
13726
13727 dhd_os_sdunlock(dhd);
13728 wait_event_timeout(dhdinfo->ctrl_wait, (*lockvar == FALSE), timeout);
13729 dhd_os_sdlock(dhd);
13730 #endif /* defined(BCMSDIO) */
13731 return;
13732 } /* dhd_init_static_strs_array */
13733
13734 void dhd_wait_event_wakeup(dhd_pub_t *dhd)
13735 {
13736 #if defined(BCMSDIO)
13737 struct dhd_info *dhdinfo = dhd->info;
13738 if (waitqueue_active(&dhdinfo->ctrl_wait))
13739 wake_up(&dhdinfo->ctrl_wait);
13740 #endif // endif
13741 return;
13742 }
13743
13744 #if defined(BCMSDIO) || defined(BCMPCIE)
13745 int
13746 dhd_net_bus_devreset(struct net_device *dev, uint8 flag)
13747 {
13748 int ret;
13749
13750 dhd_info_t *dhd = DHD_DEV_INFO(dev);
13751
13752 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
13753 if (pm_runtime_get_sync(dhd_bus_to_dev(dhd->pub.bus)) < 0)
13754 return BCME_ERROR;
13755 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
13756
13757 if (flag == TRUE) {
13758 /* Issue wl down command before resetting the chip */
13759 if (dhd_wl_ioctl_cmd(&dhd->pub, WLC_DOWN, NULL, 0, TRUE, 0) < 0) {
13760 DHD_TRACE(("%s: wl down failed\n", __FUNCTION__));
13761 }
13762 #ifdef PROP_TXSTATUS
13763 if (dhd->pub.wlfc_enabled) {
13764 dhd_wlfc_deinit(&dhd->pub);
13765 }
13766 #endif /* PROP_TXSTATUS */
13767 #ifdef PNO_SUPPORT
13768 if (dhd->pub.pno_state) {
13769 dhd_pno_deinit(&dhd->pub);
13770 }
13771 #endif // endif
13772 #ifdef RTT_SUPPORT
13773 if (dhd->pub.rtt_state) {
13774 dhd_rtt_deinit(&dhd->pub);
13775 }
13776 #endif /* RTT_SUPPORT */
13777
13778 #if defined(DBG_PKT_MON) && !defined(DBG_PKT_MON_INIT_DEFAULT)
13779 dhd_os_dbg_detach_pkt_monitor(&dhd->pub);
13780 #endif /* DBG_PKT_MON */
13781 }
13782
13783 #ifdef BCMSDIO
13784 if (!flag) {
13785 dhd_update_fw_nv_path(dhd);
13786 /* update firmware and nvram path to sdio bus */
13787 dhd_bus_update_fw_nv_path(dhd->pub.bus,
13788 dhd->fw_path, dhd->nv_path);
13789 }
13790 #endif /* BCMSDIO */
13791
13792 ret = dhd_bus_devreset(&dhd->pub, flag);
13793
13794 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
13795 pm_runtime_mark_last_busy(dhd_bus_to_dev(dhd->pub.bus));
13796 pm_runtime_put_autosuspend(dhd_bus_to_dev(dhd->pub.bus));
13797 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
13798
13799 if (flag) {
13800 /* Clear some flags for recovery logic */
13801 dhd->pub.dongle_trap_occured = 0;
13802 dhd->pub.iovar_timeout_occured = 0;
13803 #ifdef PCIE_FULL_DONGLE
13804 dhd->pub.d3ack_timeout_occured = 0;
13805 dhd->pub.livelock_occured = 0;
13806 dhd->pub.pktid_audit_failed = 0;
13807 #endif /* PCIE_FULL_DONGLE */
13808 dhd->pub.iface_op_failed = 0;
13809 dhd->pub.scan_timeout_occurred = 0;
13810 dhd->pub.scan_busy_occurred = 0;
13811 dhd->pub.smmu_fault_occurred = 0;
13812 }
13813
13814 if (ret) {
13815 DHD_ERROR(("%s: dhd_bus_devreset: %d\n", __FUNCTION__, ret));
13816 }
13817
13818 return ret;
13819 }
13820
13821 #ifdef BCMSDIO
13822 int
13823 dhd_net_bus_suspend(struct net_device *dev)
13824 {
13825 dhd_info_t *dhd = DHD_DEV_INFO(dev);
13826 return dhd_bus_suspend(&dhd->pub);
13827 }
13828
13829 int
13830 dhd_net_bus_resume(struct net_device *dev, uint8 stage)
13831 {
13832 dhd_info_t *dhd = DHD_DEV_INFO(dev);
13833 return dhd_bus_resume(&dhd->pub, stage);
13834 }
13835
13836 #endif /* BCMSDIO */
13837 #endif /* BCMSDIO || BCMPCIE */
13838
13839 int net_os_set_suspend_disable(struct net_device *dev, int val)
13840 {
13841 dhd_info_t *dhd = DHD_DEV_INFO(dev);
13842 int ret = 0;
13843
13844 if (dhd) {
13845 ret = dhd->pub.suspend_disable_flag;
13846 dhd->pub.suspend_disable_flag = val;
13847 }
13848 return ret;
13849 }
13850
13851 int net_os_set_suspend(struct net_device *dev, int val, int force)
13852 {
13853 int ret = 0;
13854 dhd_info_t *dhd = DHD_DEV_INFO(dev);
13855
13856 if (dhd) {
13857 #ifdef CONFIG_MACH_UNIVERSAL7420
13858 #endif /* CONFIG_MACH_UNIVERSAL7420 */
13859 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
13860 ret = dhd_set_suspend(val, &dhd->pub);
13861 #else
13862 ret = dhd_suspend_resume_helper(dhd, val, force);
13863 #endif // endif
13864 #ifdef WL_CFG80211
13865 wl_cfg80211_update_power_mode(dev);
13866 #endif // endif
13867 }
13868 return ret;
13869 }
13870
13871 int net_os_set_suspend_bcn_li_dtim(struct net_device *dev, int val)
13872 {
13873 dhd_info_t *dhd = DHD_DEV_INFO(dev);
13874
13875 if (dhd) {
13876 DHD_ERROR(("%s: Set bcn_li_dtim in suspend %d\n",
13877 __FUNCTION__, val));
13878 dhd->pub.suspend_bcn_li_dtim = val;
13879 }
13880
13881 return 0;
13882 }
13883
13884 int net_os_set_max_dtim_enable(struct net_device *dev, int val)
13885 {
13886 dhd_info_t *dhd = DHD_DEV_INFO(dev);
13887
13888 if (dhd) {
13889 DHD_ERROR(("%s: use MAX bcn_li_dtim in suspend %s\n",
13890 __FUNCTION__, (val ? "Enable" : "Disable")));
13891 if (val) {
13892 dhd->pub.max_dtim_enable = TRUE;
13893 } else {
13894 dhd->pub.max_dtim_enable = FALSE;
13895 }
13896 } else {
13897 return -1;
13898 }
13899
13900 return 0;
13901 }
13902
13903 #ifdef DISABLE_DTIM_IN_SUSPEND
13904 int net_os_set_disable_dtim_in_suspend(struct net_device *dev, int val)
13905 {
13906 dhd_info_t *dhd = DHD_DEV_INFO(dev);
13907
13908 if (dhd) {
13909 DHD_ERROR(("%s: Disable bcn_li_dtim in suspend %s\n",
13910 __FUNCTION__, (val ? "Enable" : "Disable")));
13911 if (val) {
13912 dhd->pub.disable_dtim_in_suspend = TRUE;
13913 } else {
13914 dhd->pub.disable_dtim_in_suspend = FALSE;
13915 }
13916 } else {
13917 return -1;
13918 }
13919
13920 return 0;
13921 }
13922 #endif /* DISABLE_DTIM_IN_SUSPEND */
13923
13924 #ifdef PKT_FILTER_SUPPORT
13925 int net_os_rxfilter_add_remove(struct net_device *dev, int add_remove, int num)
13926 {
13927 int ret = 0;
13928
13929 #ifndef GAN_LITE_NAT_KEEPALIVE_FILTER
13930 dhd_info_t *dhd = DHD_DEV_INFO(dev);
13931
13932 DHD_ERROR(("%s: add_remove = %d, num = %d\n", __FUNCTION__, add_remove, num));
13933 if (!dhd || (num == DHD_UNICAST_FILTER_NUM)) {
13934 return 0;
13935 }
13936
13937 #ifdef BLOCK_IPV6_PACKET
13938 /* customer want to use NO IPV6 packets only */
13939 if (num == DHD_MULTICAST6_FILTER_NUM) {
13940 return 0;
13941 }
13942 #endif /* BLOCK_IPV6_PACKET */
13943
13944 if (num >= dhd->pub.pktfilter_count) {
13945 return -EINVAL;
13946 }
13947
13948 ret = dhd_packet_filter_add_remove(&dhd->pub, add_remove, num);
13949 #endif /* !GAN_LITE_NAT_KEEPALIVE_FILTER */
13950
13951 return ret;
13952 }
13953
13954 int dhd_os_enable_packet_filter(dhd_pub_t *dhdp, int val)
13955
13956 {
13957 int ret = 0;
13958
13959 /* Packet filtering is set only if we still in early-suspend and
13960 * we need either to turn it ON or turn it OFF
13961 * We can always turn it OFF in case of early-suspend, but we turn it
13962 * back ON only if suspend_disable_flag was not set
13963 */
13964 if (dhdp && dhdp->up) {
13965 if (dhdp->in_suspend) {
13966 if (!val || (val && !dhdp->suspend_disable_flag))
13967 dhd_enable_packet_filter(val, dhdp);
13968 }
13969 }
13970 return ret;
13971 }
13972
13973 /* function to enable/disable packet for Network device */
13974 int net_os_enable_packet_filter(struct net_device *dev, int val)
13975 {
13976 dhd_info_t *dhd = DHD_DEV_INFO(dev);
13977
13978 DHD_ERROR(("%s: val = %d\n", __FUNCTION__, val));
13979 return dhd_os_enable_packet_filter(&dhd->pub, val);
13980 }
13981 #endif /* PKT_FILTER_SUPPORT */
13982
13983 int
13984 dhd_dev_init_ioctl(struct net_device *dev)
13985 {
13986 dhd_info_t *dhd = DHD_DEV_INFO(dev);
13987 int ret;
13988
13989 if ((ret = dhd_sync_with_dongle(&dhd->pub)) < 0)
13990 goto done;
13991
13992 done:
13993 return ret;
13994 }
13995
13996 int
13997 dhd_dev_get_feature_set(struct net_device *dev)
13998 {
13999 dhd_info_t *ptr = *(dhd_info_t **)netdev_priv(dev);
14000 dhd_pub_t *dhd = (&ptr->pub);
14001 int feature_set = 0;
14002
14003 if (FW_SUPPORTED(dhd, sta)) {
14004 #if defined(OEM_ANDROID)
14005 feature_set |= WIFI_FEATURE_SET_LATENCY_MODE;
14006 feature_set |= WIFI_FEATURE_SET_TX_POWER_LIMIT;
14007 #endif /* OEM_ANDROID */
14008 feature_set |= WIFI_FEATURE_INFRA;
14009 }
14010 if (FW_SUPPORTED(dhd, dualband))
14011 feature_set |= WIFI_FEATURE_INFRA_5G;
14012 if (FW_SUPPORTED(dhd, p2p)) {
14013 feature_set |= WIFI_FEATURE_P2P;
14014 #if defined(OEM_ANDROID)
14015 feature_set |= WIFI_FEATURE_P2P_RAND_MAC;
14016 #endif /* OEM_ANDROID */
14017 }
14018 if (dhd->op_mode & DHD_FLAG_HOSTAP_MODE)
14019 feature_set |= WIFI_FEATURE_SOFT_AP;
14020 if (FW_SUPPORTED(dhd, tdls))
14021 feature_set |= WIFI_FEATURE_TDLS;
14022 if (FW_SUPPORTED(dhd, vsdb))
14023 feature_set |= WIFI_FEATURE_TDLS_OFFCHANNEL;
14024 if (FW_SUPPORTED(dhd, nan)) {
14025 feature_set |= WIFI_FEATURE_NAN;
14026 /* NAN is essentail for d2d rtt */
14027 if (FW_SUPPORTED(dhd, rttd2d))
14028 feature_set |= WIFI_FEATURE_D2D_RTT;
14029 }
14030 #ifdef RTT_SUPPORT
14031 feature_set |= WIFI_FEATURE_D2D_RTT;
14032 feature_set |= WIFI_FEATURE_D2AP_RTT;
14033 #endif /* RTT_SUPPORT */
14034 #ifdef LINKSTAT_SUPPORT
14035 feature_set |= WIFI_FEATURE_LINKSTAT;
14036 #endif /* LINKSTAT_SUPPORT */
14037
14038 #if defined(PNO_SUPPORT) && !defined(DISABLE_ANDROID_PNO)
14039 if (dhd_is_pno_supported(dhd)) {
14040 feature_set |= WIFI_FEATURE_PNO;
14041 #ifdef GSCAN_SUPPORT
14042 feature_set |= WIFI_FEATURE_GSCAN;
14043 feature_set |= WIFI_FEATURE_HAL_EPNO;
14044 #endif /* GSCAN_SUPPORT */
14045 }
14046 #endif /* PNO_SUPPORT && !DISABLE_ANDROID_PNO */
14047 #ifdef RSSI_MONITOR_SUPPORT
14048 if (FW_SUPPORTED(dhd, rssi_mon)) {
14049 feature_set |= WIFI_FEATURE_RSSI_MONITOR;
14050 }
14051 #endif /* RSSI_MONITOR_SUPPORT */
14052 #ifdef WL11U
14053 feature_set |= WIFI_FEATURE_HOTSPOT;
14054 #endif /* WL11U */
14055 #ifdef NDO_CONFIG_SUPPORT
14056 if (FW_SUPPORTED(dhd, ndoe))
14057 feature_set |= WIFI_FEATURE_CONFIG_NDO;
14058 #endif /* NDO_CONFIG_SUPPORT */
14059 #ifdef KEEP_ALIVE
14060 feature_set |= WIFI_FEATURE_MKEEP_ALIVE;
14061 #endif /* KEEP_ALIVE */
14062 #ifdef SUPPORT_RANDOM_MAC_SCAN
14063 feature_set |= WIFI_FEATURE_SCAN_RAND;
14064 #endif /* SUPPORT_RANDOM_MAC_SCAN */
14065 #ifdef FILTER_IE
14066 if (FW_SUPPORTED(dhd, fie)) {
14067 feature_set |= WIFI_FEATURE_FILTER_IE;
14068 }
14069 #endif /* FILTER_IE */
14070 #ifdef ROAMEXP_SUPPORT
14071 /* Check if the Android O roam feature is supported by FW */
14072 if (!(BCME_UNSUPPORTED == dhd_dev_set_whitelist_ssid(dev, NULL, 0, true))) {
14073 feature_set |= WIFI_FEATURE_CONTROL_ROAMING;
14074 }
14075 #endif /* ROAMEXP_SUPPORT */
14076 return feature_set;
14077 }
14078
14079 int
14080 dhd_dev_get_feature_set_matrix(struct net_device *dev, int num)
14081 {
14082 int feature_set_full;
14083 int ret = 0;
14084
14085 feature_set_full = dhd_dev_get_feature_set(dev);
14086
14087 /* Common feature set for all interface */
14088 ret = (feature_set_full & WIFI_FEATURE_INFRA) |
14089 (feature_set_full & WIFI_FEATURE_INFRA_5G) |
14090 (feature_set_full & WIFI_FEATURE_D2D_RTT) |
14091 (feature_set_full & WIFI_FEATURE_D2AP_RTT) |
14092 (feature_set_full & WIFI_FEATURE_RSSI_MONITOR) |
14093 (feature_set_full & WIFI_FEATURE_EPR);
14094
14095 /* Specific feature group for each interface */
14096 switch (num) {
14097 case 0:
14098 ret |= (feature_set_full & WIFI_FEATURE_P2P) |
14099 /* Not supported yet */
14100 /* (feature_set_full & WIFI_FEATURE_NAN) | */
14101 (feature_set_full & WIFI_FEATURE_TDLS) |
14102 (feature_set_full & WIFI_FEATURE_PNO) |
14103 (feature_set_full & WIFI_FEATURE_HAL_EPNO) |
14104 (feature_set_full & WIFI_FEATURE_BATCH_SCAN) |
14105 (feature_set_full & WIFI_FEATURE_GSCAN) |
14106 (feature_set_full & WIFI_FEATURE_HOTSPOT) |
14107 (feature_set_full & WIFI_FEATURE_ADDITIONAL_STA);
14108 break;
14109
14110 case 1:
14111 ret |= (feature_set_full & WIFI_FEATURE_P2P);
14112 /* Not yet verified NAN with P2P */
14113 /* (feature_set_full & WIFI_FEATURE_NAN) | */
14114 break;
14115
14116 case 2:
14117 ret |= (feature_set_full & WIFI_FEATURE_NAN) |
14118 (feature_set_full & WIFI_FEATURE_TDLS) |
14119 (feature_set_full & WIFI_FEATURE_TDLS_OFFCHANNEL);
14120 break;
14121
14122 default:
14123 ret = WIFI_FEATURE_INVALID;
14124 DHD_ERROR(("%s: Out of index(%d) for get feature set\n", __FUNCTION__, num));
14125 break;
14126 }
14127
14128 return ret;
14129 }
14130 #ifdef CUSTOM_FORCE_NODFS_FLAG
14131 int
14132 dhd_dev_set_nodfs(struct net_device *dev, u32 nodfs)
14133 {
14134 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14135
14136 if (nodfs) {
14137 if (dhd->pub.dhd_cflags & WLAN_PLAT_NODFS_FLAG) {
14138 return 0;
14139 }
14140 dhd->pub.dhd_cflags |= WLAN_PLAT_NODFS_FLAG;
14141 } else {
14142 if (!(dhd->pub.dhd_cflags & WLAN_PLAT_NODFS_FLAG)) {
14143 return 0;
14144 }
14145 dhd->pub.dhd_cflags &= ~WLAN_PLAT_NODFS_FLAG;
14146 }
14147 dhd->pub.force_country_change = TRUE;
14148 return 0;
14149 }
14150 #endif /* CUSTOM_FORCE_NODFS_FLAG */
14151 #ifdef NDO_CONFIG_SUPPORT
14152 int
14153 dhd_dev_ndo_cfg(struct net_device *dev, u8 enable)
14154 {
14155 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14156 dhd_pub_t *dhdp = &dhd->pub;
14157 int ret = 0;
14158
14159 if (enable) {
14160 /* enable ND offload feature (will be enabled in FW on suspend) */
14161 dhdp->ndo_enable = TRUE;
14162
14163 /* Update changes of anycast address & DAD failed address */
14164 ret = dhd_dev_ndo_update_inet6addr(dev);
14165 if ((ret < 0) && (ret != BCME_NORESOURCE)) {
14166 DHD_ERROR(("%s: failed to update host ip addr: %d\n", __FUNCTION__, ret));
14167 return ret;
14168 }
14169 } else {
14170 /* disable ND offload feature */
14171 dhdp->ndo_enable = FALSE;
14172
14173 /* disable ND offload in FW */
14174 ret = dhd_ndo_enable(dhdp, FALSE);
14175 if (ret < 0) {
14176 DHD_ERROR(("%s: failed to disable NDO: %d\n", __FUNCTION__, ret));
14177 }
14178 }
14179 return ret;
14180 }
14181
14182 /* #pragma used as a WAR to fix build failure,
14183 * ignore dropping of 'const' qualifier in 'list_entry' macro
14184 * this pragma disables the warning only for the following function
14185 */
14186 #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
14187 #pragma GCC diagnostic push
14188 #pragma GCC diagnostic ignored "-Wcast-qual"
14189 #endif /* __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6 */
14190 static int
14191 dhd_dev_ndo_get_valid_inet6addr_count(struct inet6_dev *inet6)
14192 {
14193 struct inet6_ifaddr *ifa;
14194 struct ifacaddr6 *acaddr = NULL;
14195 int addr_count = 0;
14196
14197 /* lock */
14198 read_lock_bh(&inet6->lock);
14199
14200 /* Count valid unicast address */
14201 list_for_each_entry(ifa, &inet6->addr_list, if_list) {
14202 if ((ifa->flags & IFA_F_DADFAILED) == 0) {
14203 addr_count++;
14204 }
14205 }
14206
14207 /* Count anycast address */
14208 acaddr = inet6->ac_list;
14209 while (acaddr) {
14210 addr_count++;
14211 acaddr = acaddr->aca_next;
14212 }
14213
14214 /* unlock */
14215 read_unlock_bh(&inet6->lock);
14216
14217 return addr_count;
14218 }
14219
14220 int
14221 dhd_dev_ndo_update_inet6addr(struct net_device *dev)
14222 {
14223 dhd_info_t *dhd;
14224 dhd_pub_t *dhdp;
14225 struct inet6_dev *inet6;
14226 struct inet6_ifaddr *ifa;
14227 struct ifacaddr6 *acaddr = NULL;
14228 struct in6_addr *ipv6_addr = NULL;
14229 int cnt, i;
14230 int ret = BCME_OK;
14231
14232 /*
14233 * this function evaulates host ip address in struct inet6_dev
14234 * unicast addr in inet6_dev->addr_list
14235 * anycast addr in inet6_dev->ac_list
14236 * while evaluating inet6_dev, read_lock_bh() is required to prevent
14237 * access on null(freed) pointer.
14238 */
14239
14240 if (dev) {
14241 inet6 = dev->ip6_ptr;
14242 if (!inet6) {
14243 DHD_ERROR(("%s: Invalid inet6_dev\n", __FUNCTION__));
14244 return BCME_ERROR;
14245 }
14246
14247 dhd = DHD_DEV_INFO(dev);
14248 if (!dhd) {
14249 DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__));
14250 return BCME_ERROR;
14251 }
14252 dhdp = &dhd->pub;
14253
14254 if (dhd_net2idx(dhd, dev) != 0) {
14255 DHD_ERROR(("%s: Not primary interface\n", __FUNCTION__));
14256 return BCME_ERROR;
14257 }
14258 } else {
14259 DHD_ERROR(("%s: Invalid net_device\n", __FUNCTION__));
14260 return BCME_ERROR;
14261 }
14262
14263 /* Check host IP overflow */
14264 cnt = dhd_dev_ndo_get_valid_inet6addr_count(inet6);
14265 if (cnt > dhdp->ndo_max_host_ip) {
14266 if (!dhdp->ndo_host_ip_overflow) {
14267 dhdp->ndo_host_ip_overflow = TRUE;
14268 /* Disable ND offload in FW */
14269 DHD_INFO(("%s: Host IP overflow, disable NDO\n", __FUNCTION__));
14270 ret = dhd_ndo_enable(dhdp, FALSE);
14271 }
14272
14273 return ret;
14274 }
14275
14276 /*
14277 * Allocate ipv6 addr buffer to store addresses to be added/removed.
14278 * driver need to lock inet6_dev while accessing structure. but, driver
14279 * cannot use ioctl while inet6_dev locked since it requires scheduling
14280 * hence, copy addresses to the buffer and do ioctl after unlock.
14281 */
14282 ipv6_addr = (struct in6_addr *)MALLOC(dhdp->osh,
14283 sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
14284 if (!ipv6_addr) {
14285 DHD_ERROR(("%s: failed to alloc ipv6 addr buffer\n", __FUNCTION__));
14286 return BCME_NOMEM;
14287 }
14288
14289 /* Find DAD failed unicast address to be removed */
14290 cnt = 0;
14291 read_lock_bh(&inet6->lock);
14292 list_for_each_entry(ifa, &inet6->addr_list, if_list) {
14293 /* DAD failed unicast address */
14294 if ((ifa->flags & IFA_F_DADFAILED) &&
14295 (cnt < dhdp->ndo_max_host_ip)) {
14296 memcpy(&ipv6_addr[cnt], &ifa->addr, sizeof(struct in6_addr));
14297 cnt++;
14298 }
14299 }
14300 read_unlock_bh(&inet6->lock);
14301
14302 /* Remove DAD failed unicast address */
14303 for (i = 0; i < cnt; i++) {
14304 DHD_INFO(("%s: Remove DAD failed addr\n", __FUNCTION__));
14305 ret = dhd_ndo_remove_ip_by_addr(dhdp, (char *)&ipv6_addr[i], 0);
14306 if (ret < 0) {
14307 goto done;
14308 }
14309 }
14310
14311 /* Remove all anycast address */
14312 ret = dhd_ndo_remove_ip_by_type(dhdp, WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
14313 if (ret < 0) {
14314 goto done;
14315 }
14316
14317 /*
14318 * if ND offload was disabled due to host ip overflow,
14319 * attempt to add valid unicast address.
14320 */
14321 if (dhdp->ndo_host_ip_overflow) {
14322 /* Find valid unicast address */
14323 cnt = 0;
14324 read_lock_bh(&inet6->lock);
14325 list_for_each_entry(ifa, &inet6->addr_list, if_list) {
14326 /* valid unicast address */
14327 if (!(ifa->flags & IFA_F_DADFAILED) &&
14328 (cnt < dhdp->ndo_max_host_ip)) {
14329 memcpy(&ipv6_addr[cnt], &ifa->addr,
14330 sizeof(struct in6_addr));
14331 cnt++;
14332 }
14333 }
14334 read_unlock_bh(&inet6->lock);
14335
14336 /* Add valid unicast address */
14337 for (i = 0; i < cnt; i++) {
14338 ret = dhd_ndo_add_ip_with_type(dhdp,
14339 (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_UNICAST, 0);
14340 if (ret < 0) {
14341 goto done;
14342 }
14343 }
14344 }
14345
14346 /* Find anycast address */
14347 cnt = 0;
14348 read_lock_bh(&inet6->lock);
14349 acaddr = inet6->ac_list;
14350 while (acaddr) {
14351 if (cnt < dhdp->ndo_max_host_ip) {
14352 memcpy(&ipv6_addr[cnt], &acaddr->aca_addr, sizeof(struct in6_addr));
14353 cnt++;
14354 }
14355 acaddr = acaddr->aca_next;
14356 }
14357 read_unlock_bh(&inet6->lock);
14358
14359 /* Add anycast address */
14360 for (i = 0; i < cnt; i++) {
14361 ret = dhd_ndo_add_ip_with_type(dhdp,
14362 (char *)&ipv6_addr[i], WL_ND_IPV6_ADDR_TYPE_ANYCAST, 0);
14363 if (ret < 0) {
14364 goto done;
14365 }
14366 }
14367
14368 /* Now All host IP addr were added successfully */
14369 if (dhdp->ndo_host_ip_overflow) {
14370 dhdp->ndo_host_ip_overflow = FALSE;
14371 if (dhdp->in_suspend) {
14372 /* drvier is in (early) suspend state, need to enable ND offload in FW */
14373 DHD_INFO(("%s: enable NDO\n", __FUNCTION__));
14374 ret = dhd_ndo_enable(dhdp, TRUE);
14375 }
14376 }
14377
14378 done:
14379 if (ipv6_addr) {
14380 MFREE(dhdp->osh, ipv6_addr, sizeof(struct in6_addr) * dhdp->ndo_max_host_ip);
14381 }
14382
14383 return ret;
14384 }
14385 #if __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)
14386 #pragma GCC diagnostic pop
14387 #endif /* __GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6) */
14388 #endif /* NDO_CONFIG_SUPPORT */
14389
14390 #ifdef PNO_SUPPORT
14391 /* Linux wrapper to call common dhd_pno_stop_for_ssid */
14392 int
14393 dhd_dev_pno_stop_for_ssid(struct net_device *dev)
14394 {
14395 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14396
14397 return (dhd_pno_stop_for_ssid(&dhd->pub));
14398 }
14399 /* Linux wrapper to call common dhd_pno_set_for_ssid */
14400 int
14401 dhd_dev_pno_set_for_ssid(struct net_device *dev, wlc_ssid_ext_t* ssids_local, int nssid,
14402 uint16 scan_fr, int pno_repeat, int pno_freq_expo_max, uint16 *channel_list, int nchan)
14403 {
14404 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14405
14406 return (dhd_pno_set_for_ssid(&dhd->pub, ssids_local, nssid, scan_fr,
14407 pno_repeat, pno_freq_expo_max, channel_list, nchan));
14408 }
14409
14410 /* Linux wrapper to call common dhd_pno_enable */
14411 int
14412 dhd_dev_pno_enable(struct net_device *dev, int enable)
14413 {
14414 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14415
14416 return (dhd_pno_enable(&dhd->pub, enable));
14417 }
14418
14419 /* Linux wrapper to call common dhd_pno_set_for_hotlist */
14420 int
14421 dhd_dev_pno_set_for_hotlist(struct net_device *dev, wl_pfn_bssid_t *p_pfn_bssid,
14422 struct dhd_pno_hotlist_params *hotlist_params)
14423 {
14424 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14425 return (dhd_pno_set_for_hotlist(&dhd->pub, p_pfn_bssid, hotlist_params));
14426 }
14427 /* Linux wrapper to call common dhd_dev_pno_stop_for_batch */
14428 int
14429 dhd_dev_pno_stop_for_batch(struct net_device *dev)
14430 {
14431 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14432 return (dhd_pno_stop_for_batch(&dhd->pub));
14433 }
14434 /* Linux wrapper to call common dhd_dev_pno_set_for_batch */
14435 int
14436 dhd_dev_pno_set_for_batch(struct net_device *dev, struct dhd_pno_batch_params *batch_params)
14437 {
14438 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14439 return (dhd_pno_set_for_batch(&dhd->pub, batch_params));
14440 }
14441 /* Linux wrapper to call common dhd_dev_pno_get_for_batch */
14442 int
14443 dhd_dev_pno_get_for_batch(struct net_device *dev, char *buf, int bufsize)
14444 {
14445 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14446 return (dhd_pno_get_for_batch(&dhd->pub, buf, bufsize, PNO_STATUS_NORMAL));
14447 }
14448 #endif /* PNO_SUPPORT */
14449
14450 #if defined(OEM_ANDROID) && defined(PNO_SUPPORT)
14451 #ifdef GSCAN_SUPPORT
14452 bool
14453 dhd_dev_is_legacy_pno_enabled(struct net_device *dev)
14454 {
14455 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14456
14457 return (dhd_is_legacy_pno_enabled(&dhd->pub));
14458 }
14459
14460 int
14461 dhd_dev_set_epno(struct net_device *dev)
14462 {
14463 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14464 if (!dhd) {
14465 return BCME_ERROR;
14466 }
14467 return dhd_pno_set_epno(&dhd->pub);
14468 }
14469 int
14470 dhd_dev_flush_fw_epno(struct net_device *dev)
14471 {
14472 dhd_info_t *dhd = DHD_DEV_INFO(dev);
14473 if (!dhd) {
14474 return BCME_ERROR;
14475 }
14476 return dhd_pno_flush_fw_epno(&dhd->pub);
14477 }
14478
14479 /* Linux wrapper to call common dhd_pno_set_cfg_gscan */
14480 int
14481 dhd_dev_pno_set_cfg_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
14482 void *buf, bool flush)
14483 {
14484 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14485
14486 return (dhd_pno_set_cfg_gscan(&dhd->pub, type, buf, flush));
14487 }
14488
14489 /* Linux wrapper to call common dhd_wait_batch_results_complete */
14490 int
14491 dhd_dev_wait_batch_results_complete(struct net_device *dev)
14492 {
14493 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14494
14495 return (dhd_wait_batch_results_complete(&dhd->pub));
14496 }
14497
14498 /* Linux wrapper to call common dhd_pno_lock_batch_results */
14499 int
14500 dhd_dev_pno_lock_access_batch_results(struct net_device *dev)
14501 {
14502 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14503
14504 return (dhd_pno_lock_batch_results(&dhd->pub));
14505 }
14506 /* Linux wrapper to call common dhd_pno_unlock_batch_results */
14507 void
14508 dhd_dev_pno_unlock_access_batch_results(struct net_device *dev)
14509 {
14510 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14511
14512 return (dhd_pno_unlock_batch_results(&dhd->pub));
14513 }
14514
14515 /* Linux wrapper to call common dhd_pno_initiate_gscan_request */
14516 int
14517 dhd_dev_pno_run_gscan(struct net_device *dev, bool run, bool flush)
14518 {
14519 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14520
14521 return (dhd_pno_initiate_gscan_request(&dhd->pub, run, flush));
14522 }
14523
14524 /* Linux wrapper to call common dhd_pno_enable_full_scan_result */
14525 int
14526 dhd_dev_pno_enable_full_scan_result(struct net_device *dev, bool real_time_flag)
14527 {
14528 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14529
14530 return (dhd_pno_enable_full_scan_result(&dhd->pub, real_time_flag));
14531 }
14532
14533 /* Linux wrapper to call common dhd_handle_hotlist_scan_evt */
14534 void *
14535 dhd_dev_hotlist_scan_event(struct net_device *dev,
14536 const void *data, int *send_evt_bytes, hotlist_type_t type, u32 *buf_len)
14537 {
14538 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14539
14540 return (dhd_handle_hotlist_scan_evt(&dhd->pub, data, send_evt_bytes, type, buf_len));
14541 }
14542
14543 /* Linux wrapper to call common dhd_process_full_gscan_result */
14544 void *
14545 dhd_dev_process_full_gscan_result(struct net_device *dev,
14546 const void *data, uint32 len, int *send_evt_bytes)
14547 {
14548 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14549
14550 return (dhd_process_full_gscan_result(&dhd->pub, data, len, send_evt_bytes));
14551 }
14552
14553 void
14554 dhd_dev_gscan_hotlist_cache_cleanup(struct net_device *dev, hotlist_type_t type)
14555 {
14556 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14557
14558 dhd_gscan_hotlist_cache_cleanup(&dhd->pub, type);
14559
14560 return;
14561 }
14562
14563 int
14564 dhd_dev_gscan_batch_cache_cleanup(struct net_device *dev)
14565 {
14566 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14567
14568 return (dhd_gscan_batch_cache_cleanup(&dhd->pub));
14569 }
14570
14571 /* Linux wrapper to call common dhd_retreive_batch_scan_results */
14572 int
14573 dhd_dev_retrieve_batch_scan(struct net_device *dev)
14574 {
14575 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14576
14577 return (dhd_retreive_batch_scan_results(&dhd->pub));
14578 }
14579 /* Linux wrapper to call common dhd_pno_process_epno_result */
14580 void * dhd_dev_process_epno_result(struct net_device *dev,
14581 const void *data, uint32 event, int *send_evt_bytes)
14582 {
14583 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14584
14585 return (dhd_pno_process_epno_result(&dhd->pub, data, event, send_evt_bytes));
14586 }
14587
14588 int
14589 dhd_dev_set_lazy_roam_cfg(struct net_device *dev,
14590 wlc_roam_exp_params_t *roam_param)
14591 {
14592 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14593 wl_roam_exp_cfg_t roam_exp_cfg;
14594 int err;
14595
14596 if (!roam_param) {
14597 return BCME_BADARG;
14598 }
14599
14600 DHD_INFO(("a_band_boost_thr %d a_band_penalty_thr %d\n",
14601 roam_param->a_band_boost_threshold, roam_param->a_band_penalty_threshold));
14602 DHD_INFO(("a_band_boost_factor %d a_band_penalty_factor %d cur_bssid_boost %d\n",
14603 roam_param->a_band_boost_factor, roam_param->a_band_penalty_factor,
14604 roam_param->cur_bssid_boost));
14605 DHD_INFO(("alert_roam_trigger_thr %d a_band_max_boost %d\n",
14606 roam_param->alert_roam_trigger_threshold, roam_param->a_band_max_boost));
14607
14608 memcpy(&roam_exp_cfg.params, roam_param, sizeof(*roam_param));
14609 roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
14610 roam_exp_cfg.flags = ROAM_EXP_CFG_PRESENT;
14611 if (dhd->pub.lazy_roam_enable) {
14612 roam_exp_cfg.flags |= ROAM_EXP_ENABLE_FLAG;
14613 }
14614 err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
14615 (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
14616 TRUE);
14617 if (err < 0) {
14618 DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
14619 }
14620 return err;
14621 }
14622
14623 int
14624 dhd_dev_lazy_roam_enable(struct net_device *dev, uint32 enable)
14625 {
14626 int err;
14627 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14628 wl_roam_exp_cfg_t roam_exp_cfg;
14629
14630 memset(&roam_exp_cfg, 0, sizeof(roam_exp_cfg));
14631 roam_exp_cfg.version = ROAM_EXP_CFG_VERSION;
14632 if (enable) {
14633 roam_exp_cfg.flags = ROAM_EXP_ENABLE_FLAG;
14634 }
14635
14636 err = dhd_iovar(&dhd->pub, 0, "roam_exp_params",
14637 (char *)&roam_exp_cfg, sizeof(roam_exp_cfg), NULL, 0,
14638 TRUE);
14639 if (err < 0) {
14640 DHD_ERROR(("%s : Failed to execute roam_exp_params %d\n", __FUNCTION__, err));
14641 } else {
14642 dhd->pub.lazy_roam_enable = (enable != 0);
14643 }
14644 return err;
14645 }
14646 int
14647 dhd_dev_set_lazy_roam_bssid_pref(struct net_device *dev,
14648 wl_bssid_pref_cfg_t *bssid_pref, uint32 flush)
14649 {
14650 int err;
14651 uint len;
14652 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14653
14654 bssid_pref->version = BSSID_PREF_LIST_VERSION;
14655 /* By default programming bssid pref flushes out old values */
14656 bssid_pref->flags = (flush && !bssid_pref->count) ? ROAM_EXP_CLEAR_BSSID_PREF: 0;
14657 len = sizeof(wl_bssid_pref_cfg_t);
14658 if (bssid_pref->count) {
14659 len += (bssid_pref->count - 1) * sizeof(wl_bssid_pref_list_t);
14660 }
14661 err = dhd_iovar(&dhd->pub, 0, "roam_exp_bssid_pref",
14662 (char *)bssid_pref, len, NULL, 0, TRUE);
14663 if (err != BCME_OK) {
14664 DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err));
14665 }
14666 return err;
14667 }
14668 #endif /* GSCAN_SUPPORT */
14669 #if defined(GSCAN_SUPPORT) || defined(ROAMEXP_SUPPORT)
14670 int
14671 dhd_dev_set_blacklist_bssid(struct net_device *dev, maclist_t *blacklist,
14672 uint32 len, uint32 flush)
14673 {
14674 int err;
14675 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14676 int macmode;
14677
14678 if (blacklist) {
14679 err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACLIST, (char *)blacklist,
14680 len, TRUE, 0);
14681 if (err != BCME_OK) {
14682 DHD_ERROR(("%s : WLC_SET_MACLIST failed %d\n", __FUNCTION__, err));
14683 return err;
14684 }
14685 }
14686 /* By default programming blacklist flushes out old values */
14687 macmode = (flush && !blacklist) ? WLC_MACMODE_DISABLED : WLC_MACMODE_DENY;
14688 err = dhd_wl_ioctl_cmd(&(dhd->pub), WLC_SET_MACMODE, (char *)&macmode,
14689 sizeof(macmode), TRUE, 0);
14690 if (err != BCME_OK) {
14691 DHD_ERROR(("%s : WLC_SET_MACMODE failed %d\n", __FUNCTION__, err));
14692 }
14693 return err;
14694 }
14695 int
14696 dhd_dev_set_whitelist_ssid(struct net_device *dev, wl_ssid_whitelist_t *ssid_whitelist,
14697 uint32 len, uint32 flush)
14698 {
14699 int err;
14700 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14701 wl_ssid_whitelist_t whitelist_ssid_flush;
14702
14703 if (!ssid_whitelist) {
14704 if (flush) {
14705 ssid_whitelist = &whitelist_ssid_flush;
14706 ssid_whitelist->ssid_count = 0;
14707 } else {
14708 DHD_ERROR(("%s : Nothing to do here\n", __FUNCTION__));
14709 return BCME_BADARG;
14710 }
14711 }
14712 ssid_whitelist->version = SSID_WHITELIST_VERSION;
14713 ssid_whitelist->flags = flush ? ROAM_EXP_CLEAR_SSID_WHITELIST : 0;
14714 err = dhd_iovar(&dhd->pub, 0, "roam_exp_ssid_whitelist", (char *)ssid_whitelist, len, NULL,
14715 0, TRUE);
14716 if (err != BCME_OK) {
14717 DHD_ERROR(("%s : Failed to execute roam_exp_bssid_pref %d\n", __FUNCTION__, err));
14718 }
14719 return err;
14720 }
14721 #endif /* GSCAN_SUPPORT || ROAMEXP_SUPPORT */
14722
14723 #if defined(GSCAN_SUPPORT) || defined(DHD_GET_VALID_CHANNELS)
14724 /* Linux wrapper to call common dhd_pno_get_gscan */
14725 void *
14726 dhd_dev_pno_get_gscan(struct net_device *dev, dhd_pno_gscan_cmd_cfg_t type,
14727 void *info, uint32 *len)
14728 {
14729 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14730
14731 return (dhd_pno_get_gscan(&dhd->pub, type, info, len));
14732 }
14733 #endif /* GSCAN_SUPPORT || DHD_GET_VALID_CHANNELS */
14734 #endif /* defined(OEM_ANDROID) && defined(PNO_SUPPORT) */
14735
14736 #ifdef RSSI_MONITOR_SUPPORT
14737 int
14738 dhd_dev_set_rssi_monitor_cfg(struct net_device *dev, int start,
14739 int8 max_rssi, int8 min_rssi)
14740 {
14741 int err;
14742 wl_rssi_monitor_cfg_t rssi_monitor;
14743 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14744
14745 rssi_monitor.version = RSSI_MONITOR_VERSION;
14746 rssi_monitor.max_rssi = max_rssi;
14747 rssi_monitor.min_rssi = min_rssi;
14748 rssi_monitor.flags = start ? 0: RSSI_MONITOR_STOP;
14749 err = dhd_iovar(&dhd->pub, 0, "rssi_monitor", (char *)&rssi_monitor, sizeof(rssi_monitor),
14750 NULL, 0, TRUE);
14751 if (err < 0 && err != BCME_UNSUPPORTED) {
14752 DHD_ERROR(("%s : Failed to execute rssi_monitor %d\n", __FUNCTION__, err));
14753 }
14754 return err;
14755 }
14756 #endif /* RSSI_MONITOR_SUPPORT */
14757
14758 #ifdef DHDTCPACK_SUPPRESS
14759 int
14760 dhd_dev_set_tcpack_sup_mode_cfg(struct net_device *dev, uint8 enable)
14761 {
14762 int err;
14763 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14764
14765 err = dhd_tcpack_suppress_set(&dhd->pub, enable);
14766 if (err != BCME_OK) {
14767 DHD_ERROR(("%s : Failed to set tcpack_suppress mode: %d\n", __FUNCTION__, err));
14768 }
14769 return err;
14770 }
14771 #endif /* DHDTCPACK_SUPPRESS */
14772
14773 int
14774 dhd_dev_cfg_rand_mac_oui(struct net_device *dev, uint8 *oui)
14775 {
14776 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14777 dhd_pub_t *dhdp = &dhd->pub;
14778
14779 if (!dhdp || !oui) {
14780 DHD_ERROR(("NULL POINTER : %s\n",
14781 __FUNCTION__));
14782 return BCME_ERROR;
14783 }
14784 if (ETHER_ISMULTI(oui)) {
14785 DHD_ERROR(("Expected unicast OUI\n"));
14786 return BCME_ERROR;
14787 } else {
14788 uint8 *rand_mac_oui = dhdp->rand_mac_oui;
14789 memcpy(rand_mac_oui, oui, DOT11_OUI_LEN);
14790 DHD_ERROR(("Random MAC OUI to be used - "MACOUIDBG"\n",
14791 MACOUI2STRDBG(rand_mac_oui)));
14792 }
14793 return BCME_OK;
14794 }
14795
14796 int
14797 dhd_set_rand_mac_oui(dhd_pub_t *dhd)
14798 {
14799 int err;
14800 wl_pfn_macaddr_cfg_t wl_cfg;
14801 uint8 *rand_mac_oui = dhd->rand_mac_oui;
14802
14803 memset(&wl_cfg.macaddr, 0, ETHER_ADDR_LEN);
14804 memcpy(&wl_cfg.macaddr, rand_mac_oui, DOT11_OUI_LEN);
14805 wl_cfg.version = WL_PFN_MACADDR_CFG_VER;
14806 if (ETHER_ISNULLADDR(&wl_cfg.macaddr)) {
14807 wl_cfg.flags = 0;
14808 } else {
14809 wl_cfg.flags = (WL_PFN_MAC_OUI_ONLY_MASK | WL_PFN_SET_MAC_UNASSOC_MASK);
14810 }
14811
14812 DHD_ERROR(("Setting rand mac oui to FW - "MACOUIDBG"\n",
14813 MACOUI2STRDBG(rand_mac_oui)));
14814
14815 err = dhd_iovar(dhd, 0, "pfn_macaddr", (char *)&wl_cfg, sizeof(wl_cfg), NULL, 0, TRUE);
14816 if (err < 0) {
14817 DHD_ERROR(("%s : failed to execute pfn_macaddr %d\n", __FUNCTION__, err));
14818 }
14819 return err;
14820 }
14821
14822 #if defined(RTT_SUPPORT) && defined(WL_CFG80211)
14823 /* Linux wrapper to call common dhd_pno_set_cfg_gscan */
14824 int
14825 dhd_dev_rtt_set_cfg(struct net_device *dev, void *buf)
14826 {
14827 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14828
14829 return (dhd_rtt_set_cfg(&dhd->pub, buf));
14830 }
14831
14832 int
14833 dhd_dev_rtt_cancel_cfg(struct net_device *dev, struct ether_addr *mac_list, int mac_cnt)
14834 {
14835 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14836
14837 return (dhd_rtt_stop(&dhd->pub, mac_list, mac_cnt));
14838 }
14839
14840 int
14841 dhd_dev_rtt_register_noti_callback(struct net_device *dev, void *ctx, dhd_rtt_compl_noti_fn noti_fn)
14842 {
14843 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14844
14845 return (dhd_rtt_register_noti_callback(&dhd->pub, ctx, noti_fn));
14846 }
14847
14848 int
14849 dhd_dev_rtt_unregister_noti_callback(struct net_device *dev, dhd_rtt_compl_noti_fn noti_fn)
14850 {
14851 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14852
14853 return (dhd_rtt_unregister_noti_callback(&dhd->pub, noti_fn));
14854 }
14855
14856 int
14857 dhd_dev_rtt_capability(struct net_device *dev, rtt_capabilities_t *capa)
14858 {
14859 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14860
14861 return (dhd_rtt_capability(&dhd->pub, capa));
14862 }
14863
14864 int
14865 dhd_dev_rtt_avail_channel(struct net_device *dev, wifi_channel_info *channel_info)
14866 {
14867 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14868 return (dhd_rtt_avail_channel(&dhd->pub, channel_info));
14869 }
14870
14871 int
14872 dhd_dev_rtt_enable_responder(struct net_device *dev, wifi_channel_info *channel_info)
14873 {
14874 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14875 return (dhd_rtt_enable_responder(&dhd->pub, channel_info));
14876 }
14877
14878 int dhd_dev_rtt_cancel_responder(struct net_device *dev)
14879 {
14880 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
14881 return (dhd_rtt_cancel_responder(&dhd->pub));
14882 }
14883
14884 #endif /* RTT_SUPPORT */
14885
14886 #ifdef KEEP_ALIVE
14887 #define KA_TEMP_BUF_SIZE 512
14888 #define KA_FRAME_SIZE 300
14889
14890 int
14891 dhd_dev_start_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id, uint8 *ip_pkt,
14892 uint16 ip_pkt_len, uint8* src_mac, uint8* dst_mac, uint32 period_msec)
14893 {
14894 const int ETHERTYPE_LEN = 2;
14895 char *pbuf = NULL;
14896 const char *str;
14897 wl_mkeep_alive_pkt_t mkeep_alive_pkt;
14898 wl_mkeep_alive_pkt_t *mkeep_alive_pktp = NULL;
14899 int buf_len = 0;
14900 int str_len = 0;
14901 int res = BCME_ERROR;
14902 int len_bytes = 0;
14903 int i = 0;
14904
14905 /* ether frame to have both max IP pkt (256 bytes) and ether header */
14906 char *pmac_frame = NULL;
14907 char *pmac_frame_begin = NULL;
14908
14909 /*
14910 * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
14911 * dongle shall reject a mkeep_alive request.
14912 */
14913 if (!dhd_support_sta_mode(dhd_pub))
14914 return res;
14915
14916 DHD_TRACE(("%s execution\n", __FUNCTION__));
14917
14918 if ((pbuf = MALLOCZ(dhd_pub->osh, KA_TEMP_BUF_SIZE)) == NULL) {
14919 DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE));
14920 res = BCME_NOMEM;
14921 return res;
14922 }
14923
14924 if ((pmac_frame = MALLOCZ(dhd_pub->osh, KA_FRAME_SIZE)) == NULL) {
14925 DHD_ERROR(("failed to allocate mac_frame with size %d\n", KA_FRAME_SIZE));
14926 res = BCME_NOMEM;
14927 goto exit;
14928 }
14929 pmac_frame_begin = pmac_frame;
14930
14931 /*
14932 * Get current mkeep-alive status.
14933 */
14934 res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id, sizeof(mkeep_alive_id), pbuf,
14935 KA_TEMP_BUF_SIZE, FALSE);
14936 if (res < 0) {
14937 DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res));
14938 goto exit;
14939 } else {
14940 /* Check available ID whether it is occupied */
14941 mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf;
14942 if (dtoh32(mkeep_alive_pktp->period_msec != 0)) {
14943 DHD_ERROR(("%s: Get mkeep_alive failed, ID %u is in use.\n",
14944 __FUNCTION__, mkeep_alive_id));
14945
14946 /* Current occupied ID info */
14947 DHD_ERROR(("%s: mkeep_alive\n", __FUNCTION__));
14948 DHD_ERROR((" Id : %d\n"
14949 " Period: %d msec\n"
14950 " Length: %d\n"
14951 " Packet: 0x",
14952 mkeep_alive_pktp->keep_alive_id,
14953 dtoh32(mkeep_alive_pktp->period_msec),
14954 dtoh16(mkeep_alive_pktp->len_bytes)));
14955
14956 for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) {
14957 DHD_ERROR(("%02x", mkeep_alive_pktp->data[i]));
14958 }
14959 DHD_ERROR(("\n"));
14960
14961 res = BCME_NOTFOUND;
14962 goto exit;
14963 }
14964 }
14965
14966 /* Request the specified ID */
14967 memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
14968 memset(pbuf, 0, KA_TEMP_BUF_SIZE);
14969 str = "mkeep_alive";
14970 str_len = strlen(str);
14971 strncpy(pbuf, str, str_len);
14972 pbuf[str_len] = '\0';
14973
14974 mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) (pbuf + str_len + 1);
14975 mkeep_alive_pkt.period_msec = htod32(period_msec);
14976 buf_len = str_len + 1;
14977 mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
14978 mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
14979
14980 /* ID assigned */
14981 mkeep_alive_pkt.keep_alive_id = mkeep_alive_id;
14982
14983 buf_len += WL_MKEEP_ALIVE_FIXED_LEN;
14984
14985 /*
14986 * Build up Ethernet Frame
14987 */
14988
14989 /* Mapping dest mac addr */
14990 memcpy(pmac_frame, dst_mac, ETHER_ADDR_LEN);
14991 pmac_frame += ETHER_ADDR_LEN;
14992
14993 /* Mapping src mac addr */
14994 memcpy(pmac_frame, src_mac, ETHER_ADDR_LEN);
14995 pmac_frame += ETHER_ADDR_LEN;
14996
14997 /* Mapping Ethernet type (ETHERTYPE_IP: 0x0800) */
14998 *(pmac_frame++) = 0x08;
14999 *(pmac_frame++) = 0x00;
15000
15001 /* Mapping IP pkt */
15002 memcpy(pmac_frame, ip_pkt, ip_pkt_len);
15003 pmac_frame += ip_pkt_len;
15004
15005 /*
15006 * Length of ether frame (assume to be all hexa bytes)
15007 * = src mac + dst mac + ether type + ip pkt len
15008 */
15009 len_bytes = ETHER_ADDR_LEN*2 + ETHERTYPE_LEN + ip_pkt_len;
15010 memcpy(mkeep_alive_pktp->data, pmac_frame_begin, len_bytes);
15011 buf_len += len_bytes;
15012 mkeep_alive_pkt.len_bytes = htod16(len_bytes);
15013
15014 /*
15015 * Keep-alive attributes are set in local variable (mkeep_alive_pkt), and
15016 * then memcpy'ed into buffer (mkeep_alive_pktp) since there is no
15017 * guarantee that the buffer is properly aligned.
15018 */
15019 memcpy((char *)mkeep_alive_pktp, &mkeep_alive_pkt, WL_MKEEP_ALIVE_FIXED_LEN);
15020
15021 res = dhd_wl_ioctl_cmd(dhd_pub, WLC_SET_VAR, pbuf, buf_len, TRUE, 0);
15022 exit:
15023 if (pmac_frame_begin) {
15024 MFREE(dhd_pub->osh, pmac_frame_begin, KA_FRAME_SIZE);
15025 pmac_frame_begin = NULL;
15026 }
15027 if (pbuf) {
15028 MFREE(dhd_pub->osh, pbuf, KA_TEMP_BUF_SIZE);
15029 pbuf = NULL;
15030 }
15031 return res;
15032 }
15033
15034 int
15035 dhd_dev_stop_mkeep_alive(dhd_pub_t *dhd_pub, uint8 mkeep_alive_id)
15036 {
15037 char *pbuf = NULL;
15038 wl_mkeep_alive_pkt_t mkeep_alive_pkt;
15039 wl_mkeep_alive_pkt_t *mkeep_alive_pktp = NULL;
15040 int res = BCME_ERROR;
15041 int i = 0;
15042
15043 /*
15044 * The mkeep_alive packet is for STA interface only; if the bss is configured as AP,
15045 * dongle shall reject a mkeep_alive request.
15046 */
15047 if (!dhd_support_sta_mode(dhd_pub))
15048 return res;
15049
15050 DHD_TRACE(("%s execution\n", __FUNCTION__));
15051
15052 /*
15053 * Get current mkeep-alive status. Skip ID 0 which is being used for NULL pkt.
15054 */
15055 if ((pbuf = MALLOC(dhd_pub->osh, KA_TEMP_BUF_SIZE)) == NULL) {
15056 DHD_ERROR(("failed to allocate buf with size %d\n", KA_TEMP_BUF_SIZE));
15057 return res;
15058 }
15059
15060 res = dhd_iovar(dhd_pub, 0, "mkeep_alive", &mkeep_alive_id,
15061 sizeof(mkeep_alive_id), pbuf, KA_TEMP_BUF_SIZE, FALSE);
15062 if (res < 0) {
15063 DHD_ERROR(("%s: Get mkeep_alive failed (error=%d)\n", __FUNCTION__, res));
15064 goto exit;
15065 } else {
15066 /* Check occupied ID */
15067 mkeep_alive_pktp = (wl_mkeep_alive_pkt_t *) pbuf;
15068 DHD_INFO(("%s: mkeep_alive\n", __FUNCTION__));
15069 DHD_INFO((" Id : %d\n"
15070 " Period: %d msec\n"
15071 " Length: %d\n"
15072 " Packet: 0x",
15073 mkeep_alive_pktp->keep_alive_id,
15074 dtoh32(mkeep_alive_pktp->period_msec),
15075 dtoh16(mkeep_alive_pktp->len_bytes)));
15076
15077 for (i = 0; i < mkeep_alive_pktp->len_bytes; i++) {
15078 DHD_INFO(("%02x", mkeep_alive_pktp->data[i]));
15079 }
15080 DHD_INFO(("\n"));
15081 }
15082
15083 /* Make it stop if available */
15084 if (dtoh32(mkeep_alive_pktp->period_msec != 0)) {
15085 DHD_INFO(("stop mkeep_alive on ID %d\n", mkeep_alive_id));
15086 memset(&mkeep_alive_pkt, 0, sizeof(wl_mkeep_alive_pkt_t));
15087
15088 mkeep_alive_pkt.period_msec = 0;
15089 mkeep_alive_pkt.version = htod16(WL_MKEEP_ALIVE_VERSION);
15090 mkeep_alive_pkt.length = htod16(WL_MKEEP_ALIVE_FIXED_LEN);
15091 mkeep_alive_pkt.keep_alive_id = mkeep_alive_id;
15092
15093 res = dhd_iovar(dhd_pub, 0, "mkeep_alive",
15094 (char *)&mkeep_alive_pkt,
15095 WL_MKEEP_ALIVE_FIXED_LEN, NULL, 0, TRUE);
15096 } else {
15097 DHD_ERROR(("%s: ID %u does not exist.\n", __FUNCTION__, mkeep_alive_id));
15098 res = BCME_NOTFOUND;
15099 }
15100 exit:
15101 if (pbuf) {
15102 MFREE(dhd_pub->osh, pbuf, KA_TEMP_BUF_SIZE);
15103 pbuf = NULL;
15104 }
15105 return res;
15106 }
15107 #endif /* KEEP_ALIVE */
15108
15109 #if defined(PKT_FILTER_SUPPORT) && defined(APF)
15110 static void _dhd_apf_lock_local(dhd_info_t *dhd)
15111 {
15112 if (dhd) {
15113 mutex_lock(&dhd->dhd_apf_mutex);
15114 }
15115 }
15116
15117 static void _dhd_apf_unlock_local(dhd_info_t *dhd)
15118 {
15119 if (dhd) {
15120 mutex_unlock(&dhd->dhd_apf_mutex);
15121 }
15122 }
15123
15124 static int
15125 __dhd_apf_add_filter(struct net_device *ndev, uint32 filter_id,
15126 u8* program, uint32 program_len)
15127 {
15128 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15129 dhd_pub_t *dhdp = &dhd->pub;
15130 wl_pkt_filter_t * pkt_filterp;
15131 wl_apf_program_t *apf_program;
15132 char *buf;
15133 u32 cmd_len, buf_len;
15134 int ifidx, ret;
15135 char cmd[] = "pkt_filter_add";
15136
15137 ifidx = dhd_net2idx(dhd, ndev);
15138 if (ifidx == DHD_BAD_IF) {
15139 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
15140 return -ENODEV;
15141 }
15142
15143 cmd_len = sizeof(cmd);
15144
15145 /* Check if the program_len is more than the expected len
15146 * and if the program is NULL return from here.
15147 */
15148 if ((program_len > WL_APF_PROGRAM_MAX_SIZE) || (program == NULL)) {
15149 DHD_ERROR(("%s Invalid program_len: %d, program: %pK\n",
15150 __FUNCTION__, program_len, program));
15151 return -EINVAL;
15152 }
15153 buf_len = cmd_len + WL_PKT_FILTER_FIXED_LEN +
15154 WL_APF_PROGRAM_FIXED_LEN + program_len;
15155
15156 buf = MALLOCZ(dhdp->osh, buf_len);
15157 if (unlikely(!buf)) {
15158 DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
15159 return -ENOMEM;
15160 }
15161
15162 memcpy(buf, cmd, cmd_len);
15163
15164 pkt_filterp = (wl_pkt_filter_t *) (buf + cmd_len);
15165 pkt_filterp->id = htod32(filter_id);
15166 pkt_filterp->negate_match = htod32(FALSE);
15167 pkt_filterp->type = htod32(WL_PKT_FILTER_TYPE_APF_MATCH);
15168
15169 apf_program = &pkt_filterp->u.apf_program;
15170 apf_program->version = htod16(WL_APF_INTERNAL_VERSION);
15171 apf_program->instr_len = htod16(program_len);
15172 memcpy(apf_program->instrs, program, program_len);
15173
15174 ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
15175 if (unlikely(ret)) {
15176 DHD_ERROR(("%s: failed to add APF filter, id=%d, ret=%d\n",
15177 __FUNCTION__, filter_id, ret));
15178 }
15179
15180 if (buf) {
15181 MFREE(dhdp->osh, buf, buf_len);
15182 }
15183 return ret;
15184 }
15185
15186 static int
15187 __dhd_apf_config_filter(struct net_device *ndev, uint32 filter_id,
15188 uint32 mode, uint32 enable)
15189 {
15190 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15191 dhd_pub_t *dhdp = &dhd->pub;
15192 wl_pkt_filter_enable_t * pkt_filterp;
15193 char *buf;
15194 u32 cmd_len, buf_len;
15195 int ifidx, ret;
15196 char cmd[] = "pkt_filter_enable";
15197
15198 ifidx = dhd_net2idx(dhd, ndev);
15199 if (ifidx == DHD_BAD_IF) {
15200 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
15201 return -ENODEV;
15202 }
15203
15204 cmd_len = sizeof(cmd);
15205 buf_len = cmd_len + sizeof(*pkt_filterp);
15206
15207 buf = MALLOCZ(dhdp->osh, buf_len);
15208 if (unlikely(!buf)) {
15209 DHD_ERROR(("%s: MALLOC failure, %d bytes\n", __FUNCTION__, buf_len));
15210 return -ENOMEM;
15211 }
15212
15213 memcpy(buf, cmd, cmd_len);
15214
15215 pkt_filterp = (wl_pkt_filter_enable_t *) (buf + cmd_len);
15216 pkt_filterp->id = htod32(filter_id);
15217 pkt_filterp->enable = htod32(enable);
15218
15219 ret = dhd_wl_ioctl_cmd(dhdp, WLC_SET_VAR, buf, buf_len, TRUE, ifidx);
15220 if (unlikely(ret)) {
15221 DHD_ERROR(("%s: failed to enable APF filter, id=%d, ret=%d\n",
15222 __FUNCTION__, filter_id, ret));
15223 goto exit;
15224 }
15225
15226 ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_mode", dhd_master_mode,
15227 WLC_SET_VAR, TRUE, ifidx);
15228 if (unlikely(ret)) {
15229 DHD_ERROR(("%s: failed to set APF filter mode, id=%d, ret=%d\n",
15230 __FUNCTION__, filter_id, ret));
15231 }
15232
15233 exit:
15234 if (buf) {
15235 MFREE(dhdp->osh, buf, buf_len);
15236 }
15237 return ret;
15238 }
15239
15240 static int
15241 __dhd_apf_delete_filter(struct net_device *ndev, uint32 filter_id)
15242 {
15243 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
15244 dhd_pub_t *dhdp = &dhd->pub;
15245 int ifidx, ret;
15246
15247 ifidx = dhd_net2idx(dhd, ndev);
15248 if (ifidx == DHD_BAD_IF) {
15249 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
15250 return -ENODEV;
15251 }
15252
15253 ret = dhd_wl_ioctl_set_intiovar(dhdp, "pkt_filter_delete",
15254 htod32(filter_id), WLC_SET_VAR, TRUE, ifidx);
15255 if (unlikely(ret)) {
15256 DHD_ERROR(("%s: failed to delete APF filter, id=%d, ret=%d\n",
15257 __FUNCTION__, filter_id, ret));
15258 }
15259
15260 return ret;
15261 }
15262
15263 void dhd_apf_lock(struct net_device *dev)
15264 {
15265 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15266 _dhd_apf_lock_local(dhd);
15267 }
15268
15269 void dhd_apf_unlock(struct net_device *dev)
15270 {
15271 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15272 _dhd_apf_unlock_local(dhd);
15273 }
15274
15275 int
15276 dhd_dev_apf_get_version(struct net_device *ndev, uint32 *version)
15277 {
15278 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15279 dhd_pub_t *dhdp = &dhd->pub;
15280 int ifidx, ret;
15281
15282 if (!FW_SUPPORTED(dhdp, apf)) {
15283 DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
15284
15285 /*
15286 * Notify Android framework that APF is not supported by setting
15287 * version as zero.
15288 */
15289 *version = 0;
15290 return BCME_OK;
15291 }
15292
15293 ifidx = dhd_net2idx(dhd, ndev);
15294 if (ifidx == DHD_BAD_IF) {
15295 DHD_ERROR(("%s: bad ifidx\n", __FUNCTION__));
15296 return -ENODEV;
15297 }
15298
15299 ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_ver", version,
15300 WLC_GET_VAR, FALSE, ifidx);
15301 if (unlikely(ret)) {
15302 DHD_ERROR(("%s: failed to get APF version, ret=%d\n",
15303 __FUNCTION__, ret));
15304 }
15305
15306 return ret;
15307 }
15308
15309 int
15310 dhd_dev_apf_get_max_len(struct net_device *ndev, uint32 *max_len)
15311 {
15312 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
15313 dhd_pub_t *dhdp = &dhd->pub;
15314 int ifidx, ret;
15315
15316 if (!FW_SUPPORTED(dhdp, apf)) {
15317 DHD_ERROR(("%s: firmware doesn't support APF\n", __FUNCTION__));
15318 *max_len = 0;
15319 return BCME_OK;
15320 }
15321
15322 ifidx = dhd_net2idx(dhd, ndev);
15323 if (ifidx == DHD_BAD_IF) {
15324 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
15325 return -ENODEV;
15326 }
15327
15328 ret = dhd_wl_ioctl_get_intiovar(dhdp, "apf_size_limit", max_len,
15329 WLC_GET_VAR, FALSE, ifidx);
15330 if (unlikely(ret)) {
15331 DHD_ERROR(("%s: failed to get APF size limit, ret=%d\n",
15332 __FUNCTION__, ret));
15333 }
15334
15335 return ret;
15336 }
15337
15338 int
15339 dhd_dev_apf_add_filter(struct net_device *ndev, u8* program,
15340 uint32 program_len)
15341 {
15342 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15343 dhd_pub_t *dhdp = &dhd->pub;
15344 int ret;
15345
15346 DHD_APF_LOCK(ndev);
15347
15348 /* delete, if filter already exists */
15349 if (dhdp->apf_set) {
15350 ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
15351 if (unlikely(ret)) {
15352 goto exit;
15353 }
15354 dhdp->apf_set = FALSE;
15355 }
15356
15357 ret = __dhd_apf_add_filter(ndev, PKT_FILTER_APF_ID, program, program_len);
15358 if (ret) {
15359 goto exit;
15360 }
15361 dhdp->apf_set = TRUE;
15362
15363 if (dhdp->in_suspend && dhdp->apf_set && !(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
15364 /* Driver is still in (early) suspend state, enable APF filter back */
15365 ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
15366 PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
15367 }
15368 exit:
15369 DHD_APF_UNLOCK(ndev);
15370
15371 return ret;
15372 }
15373
15374 int
15375 dhd_dev_apf_enable_filter(struct net_device *ndev)
15376 {
15377 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15378 dhd_pub_t *dhdp = &dhd->pub;
15379 int ret = 0;
15380 bool nan_dp_active = false;
15381
15382 DHD_APF_LOCK(ndev);
15383 #ifdef WL_NAN
15384 nan_dp_active = wl_cfgnan_is_dp_active(ndev);
15385 #endif /* WL_NAN */
15386 if (dhdp->apf_set && (!(dhdp->op_mode & DHD_FLAG_HOSTAP_MODE) &&
15387 !nan_dp_active)) {
15388 ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
15389 PKT_FILTER_MODE_FORWARD_ON_MATCH, TRUE);
15390 }
15391
15392 DHD_APF_UNLOCK(ndev);
15393
15394 return ret;
15395 }
15396
15397 int
15398 dhd_dev_apf_disable_filter(struct net_device *ndev)
15399 {
15400 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15401 dhd_pub_t *dhdp = &dhd->pub;
15402 int ret = 0;
15403
15404 DHD_APF_LOCK(ndev);
15405
15406 if (dhdp->apf_set) {
15407 ret = __dhd_apf_config_filter(ndev, PKT_FILTER_APF_ID,
15408 PKT_FILTER_MODE_FORWARD_ON_MATCH, FALSE);
15409 }
15410
15411 DHD_APF_UNLOCK(ndev);
15412
15413 return ret;
15414 }
15415
15416 int
15417 dhd_dev_apf_delete_filter(struct net_device *ndev)
15418 {
15419 dhd_info_t *dhd = DHD_DEV_INFO(ndev);
15420 dhd_pub_t *dhdp = &dhd->pub;
15421 int ret = 0;
15422
15423 DHD_APF_LOCK(ndev);
15424
15425 if (dhdp->apf_set) {
15426 ret = __dhd_apf_delete_filter(ndev, PKT_FILTER_APF_ID);
15427 if (!ret) {
15428 dhdp->apf_set = FALSE;
15429 }
15430 }
15431
15432 DHD_APF_UNLOCK(ndev);
15433
15434 return ret;
15435 }
15436 #endif /* PKT_FILTER_SUPPORT && APF */
15437
15438 #if defined(OEM_ANDROID)
15439 static void dhd_hang_process(struct work_struct *work_data)
15440 {
15441 struct net_device *dev;
15442 #ifdef IFACE_HANG_FORCE_DEV_CLOSE
15443 struct net_device *ndev;
15444 uint8 i = 0;
15445 #endif /* IFACE_HANG_FORCE_DEV_CLOSE */
15446 /* Ignore compiler warnings due to -Werror=cast-qual */
15447 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
15448 #pragma GCC diagnostic push
15449 #pragma GCC diagnostic ignored "-Wcast-qual"
15450 #endif // endif
15451 struct dhd_info *dhd =
15452 container_of(work_data, dhd_info_t, dhd_hang_process_work);
15453 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
15454 #pragma GCC diagnostic pop
15455 #endif // endif
15456
15457 dev = dhd->iflist[0]->net;
15458
15459 if (dev) {
15460 #if defined(WL_WIRELESS_EXT)
15461 wl_iw_send_priv_event(dev, "HANG");
15462 #endif // endif
15463 #if defined(WL_CFG80211)
15464 wl_cfg80211_hang(dev, WLAN_REASON_UNSPECIFIED);
15465 #endif // endif
15466 }
15467 #ifdef IFACE_HANG_FORCE_DEV_CLOSE
15468 /*
15469 * For HW2, dev_close need to be done to recover
15470 * from upper layer after hang. For Interposer skip
15471 * dev_close so that dhd iovars can be used to take
15472 * socramdump after crash, also skip for HW4 as
15473 * handling of hang event is different
15474 */
15475
15476 rtnl_lock();
15477 for (i = 0; i < DHD_MAX_IFS; i++) {
15478 ndev = dhd->iflist[i] ? dhd->iflist[i]->net : NULL;
15479 if (ndev && (ndev->flags & IFF_UP)) {
15480 DHD_ERROR(("ndev->name : %s dev close\n",
15481 ndev->name));
15482 dev_close(ndev);
15483 }
15484 }
15485 rtnl_unlock();
15486 #endif /* IFACE_HANG_FORCE_DEV_CLOSE */
15487 }
15488
15489 #ifdef EXYNOS_PCIE_LINKDOWN_RECOVERY
15490 extern dhd_pub_t *link_recovery;
15491 void dhd_host_recover_link(void)
15492 {
15493 DHD_ERROR(("****** %s ******\n", __FUNCTION__));
15494 link_recovery->hang_reason = HANG_REASON_PCIE_LINK_DOWN_RC_DETECT;
15495 dhd_bus_set_linkdown(link_recovery, TRUE);
15496 dhd_os_send_hang_message(link_recovery);
15497 }
15498 EXPORT_SYMBOL(dhd_host_recover_link);
15499 #endif /* EXYNOS_PCIE_LINKDOWN_RECOVERY */
15500
15501 int dhd_os_send_hang_message(dhd_pub_t *dhdp)
15502 {
15503 int ret = 0;
15504 #ifdef WL_CFG80211
15505 struct net_device *primary_ndev;
15506 struct bcm_cfg80211 *cfg;
15507 #ifdef DHD_FILE_DUMP_EVENT
15508 dhd_info_t *dhd_info = NULL;
15509 #endif /* DHD_FILE_DUMP_EVENT */
15510 #endif /* WL_CFG80211 */
15511
15512 if (!dhdp) {
15513 DHD_ERROR(("%s: dhdp is null\n", __FUNCTION__));
15514 return -EINVAL;
15515 }
15516
15517 #if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT)
15518 dhd_info = (dhd_info_t *)dhdp->info;
15519
15520 if (dhd_info->scheduled_memdump) {
15521 DHD_ERROR_RLMT(("[DUMP]:%s, memdump in progress. return\n", __FUNCTION__));
15522 dhdp->hang_was_pending = 1;
15523 return BCME_OK;
15524 }
15525 #endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */
15526
15527 #ifdef WL_CFG80211
15528 primary_ndev = dhd_linux_get_primary_netdev(dhdp);
15529 if (!primary_ndev) {
15530 DHD_ERROR(("%s: Cannot find primary netdev\n", __FUNCTION__));
15531 return -ENODEV;
15532 }
15533 cfg = wl_get_cfg(primary_ndev);
15534 if (!cfg) {
15535 DHD_ERROR(("%s: Cannot find cfg\n", __FUNCTION__));
15536 return -EINVAL;
15537 }
15538
15539 /* Skip sending HANG event to framework if driver is not ready */
15540 if (!wl_get_drv_status(cfg, READY, primary_ndev)) {
15541 DHD_ERROR(("%s: device is not ready\n", __FUNCTION__));
15542 return -ENODEV;
15543 }
15544 #endif /* WL_CFG80211 */
15545
15546 #if defined(DHD_HANG_SEND_UP_TEST)
15547 if (dhdp->req_hang_type) {
15548 DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
15549 __FUNCTION__, dhdp->req_hang_type));
15550 dhdp->req_hang_type = 0;
15551 }
15552 #endif /* DHD_HANG_SEND_UP_TEST */
15553
15554 if (!dhdp->hang_was_sent) {
15555 #if defined(CONFIG_BCM_DETECT_CONSECUTIVE_HANG)
15556 dhdp->hang_counts++;
15557 if (dhdp->hang_counts >= MAX_CONSECUTIVE_HANG_COUNTS) {
15558 DHD_ERROR(("%s, Consecutive hang from Dongle :%u\n",
15559 __func__, dhdp->hang_counts));
15560 BUG_ON(1);
15561 }
15562 #endif /* CONFIG_BCM_DETECT_CONSECUTIVE_HANG */
15563 #ifdef DHD_DEBUG_UART
15564 /* If PCIe lane has broken, execute the debug uart application
15565 * to gether a ramdump data from dongle via uart
15566 */
15567 if (!dhdp->info->duart_execute) {
15568 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
15569 (void *)dhdp, DHD_WQ_WORK_DEBUG_UART_DUMP,
15570 dhd_debug_uart_exec_rd, DHD_WQ_WORK_PRIORITY_HIGH);
15571 }
15572 #endif /* DHD_DEBUG_UART */
15573 dhdp->hang_was_sent = 1;
15574 #ifdef BT_OVER_SDIO
15575 dhdp->is_bt_recovery_required = TRUE;
15576 #endif // endif
15577 schedule_work(&dhdp->info->dhd_hang_process_work);
15578
15579 }
15580 return ret;
15581 }
15582
15583 int net_os_send_hang_message(struct net_device *dev)
15584 {
15585 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15586 int ret = 0;
15587
15588 if (dhd) {
15589 /* Report FW problem when enabled */
15590 if (dhd->pub.hang_report) {
15591 #ifdef BT_OVER_SDIO
15592 if (netif_running(dev)) {
15593 #endif /* BT_OVER_SDIO */
15594 ret = dhd_os_send_hang_message(&dhd->pub);
15595 #ifdef BT_OVER_SDIO
15596 }
15597 DHD_ERROR(("%s: HANG -> Reset BT\n", __FUNCTION__));
15598 bcmsdh_btsdio_process_dhd_hang_notification(!netif_running(dev));
15599 #endif /* BT_OVER_SDIO */
15600 } else {
15601 DHD_ERROR(("%s: FW HANG ignored (for testing purpose) and not sent up\n",
15602 __FUNCTION__));
15603 }
15604 }
15605 return ret;
15606 }
15607
15608 int net_os_send_hang_message_reason(struct net_device *dev, const char *string_num)
15609 {
15610 dhd_info_t *dhd = NULL;
15611 dhd_pub_t *dhdp = NULL;
15612 int reason;
15613
15614 dhd = DHD_DEV_INFO(dev);
15615 if (dhd) {
15616 dhdp = &dhd->pub;
15617 }
15618
15619 if (!dhd || !dhdp) {
15620 return 0;
15621 }
15622
15623 reason = bcm_strtoul(string_num, NULL, 0);
15624 DHD_INFO(("%s: Enter, reason=0x%x\n", __FUNCTION__, reason));
15625
15626 if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
15627 reason = 0;
15628 }
15629
15630 dhdp->hang_reason = reason;
15631
15632 return net_os_send_hang_message(dev);
15633 }
15634 #endif /* OEM_ANDROID */
15635
15636 int dhd_net_wifi_platform_set_power(struct net_device *dev, bool on, unsigned long delay_msec)
15637 {
15638 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15639 return wifi_platform_set_power(dhd->adapter, on, delay_msec);
15640 }
15641
15642 bool dhd_force_country_change(struct net_device *dev)
15643 {
15644 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15645
15646 if (dhd && dhd->pub.up)
15647 return dhd->pub.force_country_change;
15648 return FALSE;
15649 }
15650
15651 void dhd_get_customized_country_code(struct net_device *dev, char *country_iso_code,
15652 wl_country_t *cspec)
15653 {
15654 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15655 #if defined(DHD_BLOB_EXISTENCE_CHECK)
15656 if (!dhd->pub.is_blob)
15657 #endif /* DHD_BLOB_EXISTENCE_CHECK */
15658 {
15659 #if defined(CUSTOM_COUNTRY_CODE)
15660 get_customized_country_code(dhd->adapter, country_iso_code, cspec,
15661 dhd->pub.dhd_cflags);
15662 #else
15663 get_customized_country_code(dhd->adapter, country_iso_code, cspec);
15664 #endif /* CUSTOM_COUNTRY_CODE */
15665 }
15666 #if defined(DHD_BLOB_EXISTENCE_CHECK) && !defined(CUSTOM_COUNTRY_CODE)
15667 else {
15668 /* Replace the ccode to XZ if ccode is undefined country */
15669 if (strncmp(country_iso_code, "", WLC_CNTRY_BUF_SZ) == 0) {
15670 strlcpy(country_iso_code, "XZ", WLC_CNTRY_BUF_SZ);
15671 strlcpy(cspec->country_abbrev, country_iso_code, WLC_CNTRY_BUF_SZ);
15672 strlcpy(cspec->ccode, country_iso_code, WLC_CNTRY_BUF_SZ);
15673 DHD_ERROR(("%s: ccode change to %s\n", __FUNCTION__, country_iso_code));
15674 }
15675 }
15676 #endif /* DHD_BLOB_EXISTENCE_CHECK && !CUSTOM_COUNTRY_CODE */
15677
15678 BCM_REFERENCE(dhd);
15679 }
15680 void dhd_bus_country_set(struct net_device *dev, wl_country_t *cspec, bool notify)
15681 {
15682 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15683 #ifdef WL_CFG80211
15684 struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
15685 #endif // endif
15686
15687 if (dhd && dhd->pub.up) {
15688 dhd->pub.force_country_change = FALSE;
15689 memcpy(&dhd->pub.dhd_cspec, cspec, sizeof(wl_country_t));
15690 #ifdef WL_CFG80211
15691 wl_update_wiphybands(cfg, notify);
15692 #endif // endif
15693 }
15694 }
15695
15696 void dhd_bus_band_set(struct net_device *dev, uint band)
15697 {
15698 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15699 #ifdef WL_CFG80211
15700 struct bcm_cfg80211 *cfg = wl_get_cfg(dev);
15701 #endif // endif
15702 if (dhd && dhd->pub.up) {
15703 #ifdef WL_CFG80211
15704 wl_update_wiphybands(cfg, true);
15705 #endif // endif
15706 }
15707 }
15708
15709 int dhd_net_set_fw_path(struct net_device *dev, char *fw)
15710 {
15711 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15712
15713 if (!fw || fw[0] == '\0')
15714 return -EINVAL;
15715
15716 strncpy(dhd->fw_path, fw, sizeof(dhd->fw_path) - 1);
15717 dhd->fw_path[sizeof(dhd->fw_path)-1] = '\0';
15718
15719 #if defined(OEM_ANDROID) && defined(SOFTAP)
15720 if (strstr(fw, "apsta") != NULL) {
15721 DHD_INFO(("GOT APSTA FIRMWARE\n"));
15722 ap_fw_loaded = TRUE;
15723 } else {
15724 DHD_INFO(("GOT STA FIRMWARE\n"));
15725 ap_fw_loaded = FALSE;
15726 }
15727 #endif /* defined(OEM_ANDROID) && defined(SOFTAP) */
15728 return 0;
15729 }
15730
15731 void dhd_net_if_lock(struct net_device *dev)
15732 {
15733 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15734 dhd_net_if_lock_local(dhd);
15735 }
15736
15737 void dhd_net_if_unlock(struct net_device *dev)
15738 {
15739 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15740 dhd_net_if_unlock_local(dhd);
15741 }
15742
15743 static void dhd_net_if_lock_local(dhd_info_t *dhd)
15744 {
15745 #if defined(OEM_ANDROID)
15746 if (dhd)
15747 mutex_lock(&dhd->dhd_net_if_mutex);
15748 #endif // endif
15749 }
15750
15751 static void dhd_net_if_unlock_local(dhd_info_t *dhd)
15752 {
15753 #if defined(OEM_ANDROID)
15754 if (dhd)
15755 mutex_unlock(&dhd->dhd_net_if_mutex);
15756 #endif // endif
15757 }
15758
15759 static void dhd_suspend_lock(dhd_pub_t *pub)
15760 {
15761 #if defined(OEM_ANDROID)
15762 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
15763 if (dhd)
15764 mutex_lock(&dhd->dhd_suspend_mutex);
15765 #endif // endif
15766 }
15767
15768 static void dhd_suspend_unlock(dhd_pub_t *pub)
15769 {
15770 #if defined(OEM_ANDROID)
15771 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
15772 if (dhd)
15773 mutex_unlock(&dhd->dhd_suspend_mutex);
15774 #endif // endif
15775 }
15776
15777 unsigned long dhd_os_general_spin_lock(dhd_pub_t *pub)
15778 {
15779 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
15780 unsigned long flags = 0;
15781
15782 if (dhd)
15783 spin_lock_irqsave(&dhd->dhd_lock, flags);
15784
15785 return flags;
15786 }
15787
15788 void dhd_os_general_spin_unlock(dhd_pub_t *pub, unsigned long flags)
15789 {
15790 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
15791
15792 if (dhd)
15793 spin_unlock_irqrestore(&dhd->dhd_lock, flags);
15794 }
15795
15796 /* Linux specific multipurpose spinlock API */
15797 void *
15798 dhd_os_spin_lock_init(osl_t *osh)
15799 {
15800 /* Adding 4 bytes since the sizeof(spinlock_t) could be 0 */
15801 /* if CONFIG_SMP and CONFIG_DEBUG_SPINLOCK are not defined */
15802 /* and this results in kernel asserts in internal builds */
15803 spinlock_t * lock = MALLOC(osh, sizeof(spinlock_t) + 4);
15804 if (lock)
15805 spin_lock_init(lock);
15806 return ((void *)lock);
15807 }
15808 void
15809 dhd_os_spin_lock_deinit(osl_t *osh, void *lock)
15810 {
15811 if (lock)
15812 MFREE(osh, lock, sizeof(spinlock_t) + 4);
15813 }
15814 unsigned long
15815 dhd_os_spin_lock(void *lock)
15816 {
15817 unsigned long flags = 0;
15818
15819 if (lock)
15820 spin_lock_irqsave((spinlock_t *)lock, flags);
15821
15822 return flags;
15823 }
15824 void
15825 dhd_os_spin_unlock(void *lock, unsigned long flags)
15826 {
15827 if (lock)
15828 spin_unlock_irqrestore((spinlock_t *)lock, flags);
15829 }
15830
15831 void *
15832 dhd_os_dbgring_lock_init(osl_t *osh)
15833 {
15834 struct mutex *mtx = NULL;
15835
15836 mtx = MALLOCZ(osh, sizeof(*mtx));
15837 if (mtx)
15838 mutex_init(mtx);
15839
15840 return mtx;
15841 }
15842
15843 void
15844 dhd_os_dbgring_lock_deinit(osl_t *osh, void *mtx)
15845 {
15846 if (mtx) {
15847 mutex_destroy(mtx);
15848 MFREE(osh, mtx, sizeof(struct mutex));
15849 }
15850 }
15851
15852 static int
15853 dhd_get_pend_8021x_cnt(dhd_info_t *dhd)
15854 {
15855 return (atomic_read(&dhd->pend_8021x_cnt));
15856 }
15857
15858 #define MAX_WAIT_FOR_8021X_TX 100
15859
15860 int
15861 dhd_wait_pend8021x(struct net_device *dev)
15862 {
15863 dhd_info_t *dhd = DHD_DEV_INFO(dev);
15864 int timeout = msecs_to_jiffies(10);
15865 int ntimes = MAX_WAIT_FOR_8021X_TX;
15866 int pend = dhd_get_pend_8021x_cnt(dhd);
15867
15868 while (ntimes && pend) {
15869 if (pend) {
15870 set_current_state(TASK_INTERRUPTIBLE);
15871 DHD_PERIM_UNLOCK(&dhd->pub);
15872 schedule_timeout(timeout);
15873 DHD_PERIM_LOCK(&dhd->pub);
15874 set_current_state(TASK_RUNNING);
15875 ntimes--;
15876 }
15877 pend = dhd_get_pend_8021x_cnt(dhd);
15878 }
15879 if (ntimes == 0)
15880 {
15881 atomic_set(&dhd->pend_8021x_cnt, 0);
15882 DHD_ERROR(("%s: TIMEOUT\n", __FUNCTION__));
15883 }
15884 return pend;
15885 }
15886
15887 #if defined(DHD_DEBUG)
15888 int write_file(const char * file_name, uint32 flags, uint8 *buf, int size)
15889 {
15890 int ret = 0;
15891 struct file *fp = NULL;
15892 loff_t pos = 0;
15893 /* change to KERNEL_DS address limit */
15894 #if defined(KERNEL_DS) && defined(USER_DS)
15895 mm_segment_t old_fs;
15896 old_fs = get_fs();
15897 set_fs(KERNEL_DS);
15898 #endif /* KERNEL_DS && USER_DS */
15899 /* open file to write */
15900 fp = filp_open(file_name, flags, 0664);
15901 if (IS_ERR(fp)) {
15902 DHD_ERROR(("open file error, err = %ld\n", PTR_ERR(fp)));
15903 goto exit;
15904 }
15905
15906 /* Write buf to file */
15907 ret = vfs_write(fp, buf, size, &pos);
15908 if (ret < 0) {
15909 DHD_ERROR(("write file error, err = %d\n", ret));
15910 goto exit;
15911 }
15912
15913 /* Sync file from filesystem to physical media */
15914 ret = vfs_fsync(fp, 0);
15915 if (ret < 0) {
15916 DHD_ERROR(("sync file error, error = %d\n", ret));
15917 goto exit;
15918 }
15919 ret = BCME_OK;
15920
15921 exit:
15922 /* close file before return */
15923 if (!IS_ERR(fp))
15924 filp_close(fp, current->files);
15925
15926 /* restore previous address limit */
15927 #if defined(KERNEL_DS) && defined(USER_DS)
15928 set_fs(old_fs);
15929 #endif /* KERNEL_DS && USER_DS */
15930 return ret;
15931 }
15932 #endif // endif
15933
15934 #ifdef DHD_DEBUG
15935 static void
15936 dhd_convert_memdump_type_to_str(uint32 type, char *buf, int substr_type)
15937 {
15938 char *type_str = NULL;
15939
15940 switch (type) {
15941 case DUMP_TYPE_RESUMED_ON_TIMEOUT:
15942 type_str = "resumed_on_timeout";
15943 break;
15944 case DUMP_TYPE_D3_ACK_TIMEOUT:
15945 type_str = "D3_ACK_timeout";
15946 break;
15947 case DUMP_TYPE_DONGLE_TRAP:
15948 type_str = "Dongle_Trap";
15949 break;
15950 case DUMP_TYPE_MEMORY_CORRUPTION:
15951 type_str = "Memory_Corruption";
15952 break;
15953 case DUMP_TYPE_PKTID_AUDIT_FAILURE:
15954 type_str = "PKTID_AUDIT_Fail";
15955 break;
15956 case DUMP_TYPE_PKTID_INVALID:
15957 type_str = "PKTID_INVALID";
15958 break;
15959 case DUMP_TYPE_SCAN_TIMEOUT:
15960 type_str = "SCAN_timeout";
15961 break;
15962 case DUMP_TYPE_SCAN_BUSY:
15963 type_str = "SCAN_Busy";
15964 break;
15965 case DUMP_TYPE_BY_SYSDUMP:
15966 if (substr_type == CMD_UNWANTED) {
15967 type_str = "BY_SYSDUMP_FORUSER_unwanted";
15968 } else if (substr_type == CMD_DISCONNECTED) {
15969 type_str = "BY_SYSDUMP_FORUSER_disconnected";
15970 } else {
15971 type_str = "BY_SYSDUMP_FORUSER";
15972 }
15973 break;
15974 case DUMP_TYPE_BY_LIVELOCK:
15975 type_str = "BY_LIVELOCK";
15976 break;
15977 case DUMP_TYPE_AP_LINKUP_FAILURE:
15978 type_str = "BY_AP_LINK_FAILURE";
15979 break;
15980 case DUMP_TYPE_AP_ABNORMAL_ACCESS:
15981 type_str = "INVALID_ACCESS";
15982 break;
15983 case DUMP_TYPE_RESUMED_ON_TIMEOUT_RX:
15984 type_str = "ERROR_RX_TIMED_OUT";
15985 break;
15986 case DUMP_TYPE_RESUMED_ON_TIMEOUT_TX:
15987 type_str = "ERROR_TX_TIMED_OUT";
15988 break;
15989 case DUMP_TYPE_CFG_VENDOR_TRIGGERED:
15990 type_str = "CFG_VENDOR_TRIGGERED";
15991 break;
15992 case DUMP_TYPE_RESUMED_ON_INVALID_RING_RDWR:
15993 type_str = "BY_INVALID_RING_RDWR";
15994 break;
15995 case DUMP_TYPE_IFACE_OP_FAILURE:
15996 type_str = "BY_IFACE_OP_FAILURE";
15997 break;
15998 case DUMP_TYPE_TRANS_ID_MISMATCH:
15999 type_str = "BY_TRANS_ID_MISMATCH";
16000 break;
16001 #ifdef DEBUG_DNGL_INIT_FAIL
16002 case DUMP_TYPE_DONGLE_INIT_FAILURE:
16003 type_str = "DONGLE_INIT_FAIL";
16004 break;
16005 #endif /* DEBUG_DNGL_INIT_FAIL */
16006 #ifdef SUPPORT_LINKDOWN_RECOVERY
16007 case DUMP_TYPE_READ_SHM_FAIL:
16008 type_str = "READ_SHM_FAIL";
16009 break;
16010 #endif /* SUPPORT_LINKDOWN_RECOVERY */
16011 case DUMP_TYPE_DONGLE_HOST_EVENT:
16012 type_str = "BY_DONGLE_HOST_EVENT";
16013 break;
16014 case DUMP_TYPE_SMMU_FAULT:
16015 type_str = "SMMU_FAULT";
16016 break;
16017 case DUMP_TYPE_BY_USER:
16018 type_str = "BY_USER";
16019 break;
16020 #ifdef DHD_ERPOM
16021 case DUMP_TYPE_DUE_TO_BT:
16022 type_str = "DUE_TO_BT";
16023 break;
16024 #endif /* DHD_ERPOM */
16025 case DUMP_TYPE_LOGSET_BEYOND_RANGE:
16026 type_str = "LOGSET_BEYOND_RANGE";
16027 break;
16028 case DUMP_TYPE_CTO_RECOVERY:
16029 type_str = "CTO_RECOVERY";
16030 break;
16031 case DUMP_TYPE_SEQUENTIAL_PRIVCMD_ERROR:
16032 type_str = "SEQUENTIAL_PRIVCMD_ERROR";
16033 break;
16034 case DUMP_TYPE_PROXD_TIMEOUT:
16035 type_str = "PROXD_TIMEOUT";
16036 break;
16037 case DUMP_TYPE_PKTID_POOL_DEPLETED:
16038 type_str = "PKTID_POOL_DEPLETED";
16039 break;
16040 default:
16041 type_str = "Unknown_type";
16042 break;
16043 }
16044
16045 strncpy(buf, type_str, strlen(type_str));
16046 buf[strlen(type_str)] = 0;
16047 }
16048
16049 void
16050 dhd_get_memdump_filename(struct net_device *ndev, char *memdump_path, int len, char *fname)
16051 {
16052 char memdump_type[32];
16053 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(ndev);
16054 dhd_pub_t *dhdp = &dhd->pub;
16055
16056 /* Init file name */
16057 memset(memdump_path, 0, len);
16058 memset(memdump_type, 0, sizeof(memdump_type));
16059 dhd_convert_memdump_type_to_str(dhdp->memdump_type, memdump_type, dhdp->debug_dump_subcmd);
16060 clear_debug_dump_time(dhdp->debug_dump_time_str);
16061 get_debug_dump_time(dhdp->debug_dump_time_str);
16062 #ifdef CUSTOMER_HW4_DEBUG
16063 snprintf(memdump_path, len, "%s%s_%s_" "%s",
16064 DHD_COMMON_DUMP_PATH, fname, memdump_type, dhdp->debug_dump_time_str);
16065 #elif defined(CUSTOMER_HW2) || defined(BOARD_HIKEY)
16066 snprintf(memdump_path, len, "%s%s_%s_" "%s",
16067 DHD_COMMON_DUMP_PATH, fname, memdump_type, dhdp->debug_dump_time_str);
16068 #elif defined(OEM_ANDROID) && (defined(BOARD_PANDA) || defined(__ARM_ARCH_7A__))
16069 snprintf(memdump_path, len, "%s%s_%s_" "%s",
16070 DHD_COMMON_DUMP_PATH, fname, memdump_type, dhdp->debug_dump_time_str);
16071 #elif defined(OEM_ANDROID)
16072 snprintf(memdump_path, len, "%s%s_%s_" "%s",
16073 DHD_COMMON_DUMP_PATH, fname, memdump_type, dhdp->debug_dump_time_str);
16074 #else
16075 snprintf(memdump_path, len, "%s%s_%s_" "%s",
16076 DHD_COMMON_DUMP_PATH, fname, memdump_type, dhdp->debug_dump_time_str);
16077 #endif /* CUSTOMER_HW4_DEBUG */
16078 if (strstr(fname, "sssr_dump")) {
16079 DHD_SSSR_PRINT_FILEPATH(dhdp, memdump_path);
16080 } else {
16081 DHD_ERROR(("%s: file_path = %s%s\n", __FUNCTION__,
16082 memdump_path, FILE_NAME_HAL_TAG));
16083 }
16084 }
16085
16086 int
16087 write_dump_to_file(dhd_pub_t *dhd, uint8 *buf, int size, char *fname)
16088 {
16089 int ret = 0;
16090 char memdump_path[128];
16091 char memdump_type[32];
16092 uint32 file_mode;
16093
16094 /* Init file name */
16095 memset(memdump_path, 0, sizeof(memdump_path));
16096 memset(memdump_type, 0, sizeof(memdump_type));
16097 dhd_convert_memdump_type_to_str(dhd->memdump_type, memdump_type, dhd->debug_dump_subcmd);
16098 clear_debug_dump_time(dhd->debug_dump_time_str);
16099 get_debug_dump_time(dhd->debug_dump_time_str);
16100 #ifdef CUSTOMER_HW4_DEBUG
16101 snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s",
16102 DHD_COMMON_DUMP_PATH, fname, memdump_type, dhd->debug_dump_time_str);
16103 file_mode = O_CREAT | O_WRONLY | O_SYNC;
16104 #elif defined(CUSTOMER_HW2) || defined(BOARD_HIKEY)
16105 snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s",
16106 DHD_COMMON_DUMP_PATH, fname, memdump_type, dhd->debug_dump_time_str);
16107 file_mode = O_CREAT | O_WRONLY | O_SYNC;
16108 #elif defined(OEM_ANDROID) && (defined(BOARD_PANDA) || defined(__ARM_ARCH_7A__))
16109 snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s",
16110 DHD_COMMON_DUMP_PATH, fname, memdump_type, dhd->debug_dump_time_str);
16111 file_mode = O_CREAT | O_WRONLY;
16112 #elif defined(OEM_ANDROID)
16113 snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s",
16114 "/root/", fname, memdump_type, dhd->debug_dump_time_str);
16115 /* Extra flags O_DIRECT and O_SYNC are required for Brix Android, as we are
16116 * calling BUG_ON immediately after collecting the socram dump.
16117 * So the file write operation should directly write the contents into the
16118 * file instead of caching it. O_TRUNC flag ensures that file will be re-written
16119 * instead of appending.
16120 */
16121 file_mode = O_CREAT | O_WRONLY | O_SYNC;
16122 {
16123 struct file *fp = filp_open(memdump_path, file_mode, 0664);
16124 /* Check if it is live Brix image having /installmedia, else use /data */
16125 if (IS_ERR(fp)) {
16126 DHD_ERROR(("open file %s, try /tmp/\n", memdump_path));
16127 snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s",
16128 "/tmp/", fname, memdump_type, dhd->debug_dump_time_str);
16129 } else {
16130 filp_close(fp, NULL);
16131 }
16132 }
16133 #else
16134 snprintf(memdump_path, sizeof(memdump_path), "%s%s_%s_" "%s",
16135 DHD_COMMON_DUMP_PATH, fname, memdump_type, dhd->debug_dump_time_str);
16136 file_mode = O_CREAT | O_WRONLY;
16137 #endif /* CUSTOMER_HW4_DEBUG */
16138
16139 /* print SOCRAM dump file path */
16140 DHD_ERROR(("%s: file_path = %s\n", __FUNCTION__, memdump_path));
16141
16142 #ifdef DHD_LOG_DUMP
16143 dhd_print_buf_addr(dhd, "write_dump_to_file", buf, size);
16144 #endif /* DHD_LOG_DUMP */
16145
16146 /* Write file */
16147 ret = write_file(memdump_path, file_mode, buf, size);
16148
16149 #ifdef DHD_DUMP_MNGR
16150 if (ret == BCME_OK) {
16151 dhd_dump_file_manage_enqueue(dhd, memdump_path, fname);
16152 }
16153 #endif /* DHD_DUMP_MNGR */
16154
16155 return ret;
16156 }
16157 #endif /* DHD_DEBUG */
16158
16159 int dhd_os_wake_lock_timeout(dhd_pub_t *pub)
16160 {
16161 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16162 unsigned long flags;
16163 int ret = 0;
16164
16165 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16166 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16167 ret = dhd->wakelock_rx_timeout_enable > dhd->wakelock_ctrl_timeout_enable ?
16168 dhd->wakelock_rx_timeout_enable : dhd->wakelock_ctrl_timeout_enable;
16169 #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
16170 if (dhd->wakelock_rx_timeout_enable)
16171 wake_lock_timeout(&dhd->wl_rxwake,
16172 msecs_to_jiffies(dhd->wakelock_rx_timeout_enable));
16173 if (dhd->wakelock_ctrl_timeout_enable)
16174 wake_lock_timeout(&dhd->wl_ctrlwake,
16175 msecs_to_jiffies(dhd->wakelock_ctrl_timeout_enable));
16176 #endif // endif
16177 dhd->wakelock_rx_timeout_enable = 0;
16178 dhd->wakelock_ctrl_timeout_enable = 0;
16179 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16180 }
16181 return ret;
16182 }
16183
16184 int net_os_wake_lock_timeout(struct net_device *dev)
16185 {
16186 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16187 int ret = 0;
16188
16189 if (dhd)
16190 ret = dhd_os_wake_lock_timeout(&dhd->pub);
16191 return ret;
16192 }
16193
16194 int dhd_os_wake_lock_rx_timeout_enable(dhd_pub_t *pub, int val)
16195 {
16196 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16197 unsigned long flags;
16198
16199 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16200 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16201 if (val > dhd->wakelock_rx_timeout_enable)
16202 dhd->wakelock_rx_timeout_enable = val;
16203 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16204 }
16205 return 0;
16206 }
16207
16208 int dhd_os_wake_lock_ctrl_timeout_enable(dhd_pub_t *pub, int val)
16209 {
16210 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16211 unsigned long flags;
16212
16213 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16214 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16215 if (val > dhd->wakelock_ctrl_timeout_enable)
16216 dhd->wakelock_ctrl_timeout_enable = val;
16217 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16218 }
16219 return 0;
16220 }
16221
16222 int dhd_os_wake_lock_ctrl_timeout_cancel(dhd_pub_t *pub)
16223 {
16224 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16225 unsigned long flags;
16226
16227 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16228 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16229 dhd->wakelock_ctrl_timeout_enable = 0;
16230 #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
16231 if (wake_lock_active(&dhd->wl_ctrlwake))
16232 wake_unlock(&dhd->wl_ctrlwake);
16233 #endif // endif
16234 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16235 }
16236 return 0;
16237 }
16238
16239 int net_os_wake_lock_rx_timeout_enable(struct net_device *dev, int val)
16240 {
16241 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16242 int ret = 0;
16243
16244 if (dhd)
16245 ret = dhd_os_wake_lock_rx_timeout_enable(&dhd->pub, val);
16246 return ret;
16247 }
16248
16249 int net_os_wake_lock_ctrl_timeout_enable(struct net_device *dev, int val)
16250 {
16251 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16252 int ret = 0;
16253
16254 if (dhd)
16255 ret = dhd_os_wake_lock_ctrl_timeout_enable(&dhd->pub, val);
16256 return ret;
16257 }
16258
16259 #if defined(DHD_TRACE_WAKE_LOCK)
16260 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16261 #include <linux/hashtable.h>
16262 #else
16263 #include <linux/hash.h>
16264 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16265
16266 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16267 /* Define 2^5 = 32 bucket size hash table */
16268 DEFINE_HASHTABLE(wklock_history, 5);
16269 #else
16270 /* Define 2^5 = 32 bucket size hash table */
16271 struct hlist_head wklock_history[32] = { [0 ... 31] = HLIST_HEAD_INIT };
16272 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16273
16274 atomic_t trace_wklock_onoff;
16275 typedef enum dhd_wklock_type {
16276 DHD_WAKE_LOCK,
16277 DHD_WAKE_UNLOCK,
16278 DHD_WAIVE_LOCK,
16279 DHD_RESTORE_LOCK
16280 } dhd_wklock_t;
16281
16282 struct wk_trace_record {
16283 unsigned long addr; /* Address of the instruction */
16284 dhd_wklock_t lock_type; /* lock_type */
16285 unsigned long long counter; /* counter information */
16286 struct hlist_node wklock_node; /* hash node */
16287 };
16288
16289 static struct wk_trace_record *find_wklock_entry(unsigned long addr)
16290 {
16291 struct wk_trace_record *wklock_info;
16292 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16293 hash_for_each_possible(wklock_history, wklock_info, wklock_node, addr)
16294 #else
16295 struct hlist_node *entry;
16296 int index = hash_long(addr, ilog2(ARRAY_SIZE(wklock_history)));
16297 hlist_for_each_entry(wklock_info, entry, &wklock_history[index], wklock_node)
16298 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16299 {
16300 if (wklock_info->addr == addr) {
16301 return wklock_info;
16302 }
16303 }
16304 return NULL;
16305 }
16306
16307 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16308 #define HASH_ADD(hashtable, node, key) \
16309 do { \
16310 hash_add(hashtable, node, key); \
16311 } while (0);
16312 #else
16313 #define HASH_ADD(hashtable, node, key) \
16314 do { \
16315 int index = hash_long(key, ilog2(ARRAY_SIZE(hashtable))); \
16316 hlist_add_head(node, &hashtable[index]); \
16317 } while (0);
16318 #endif /* KERNEL_VER < KERNEL_VERSION(3, 7, 0) */
16319
16320 #define STORE_WKLOCK_RECORD(wklock_type) \
16321 do { \
16322 struct wk_trace_record *wklock_info = NULL; \
16323 unsigned long func_addr = (unsigned long)__builtin_return_address(0); \
16324 wklock_info = find_wklock_entry(func_addr); \
16325 if (wklock_info) { \
16326 if (wklock_type == DHD_WAIVE_LOCK || wklock_type == DHD_RESTORE_LOCK) { \
16327 wklock_info->counter = dhd->wakelock_counter; \
16328 } else { \
16329 wklock_info->counter++; \
16330 } \
16331 } else { \
16332 wklock_info = kzalloc(sizeof(*wklock_info), GFP_ATOMIC); \
16333 if (!wklock_info) {\
16334 printk("Can't allocate wk_trace_record \n"); \
16335 } else { \
16336 wklock_info->addr = func_addr; \
16337 wklock_info->lock_type = wklock_type; \
16338 if (wklock_type == DHD_WAIVE_LOCK || \
16339 wklock_type == DHD_RESTORE_LOCK) { \
16340 wklock_info->counter = dhd->wakelock_counter; \
16341 } else { \
16342 wklock_info->counter++; \
16343 } \
16344 HASH_ADD(wklock_history, &wklock_info->wklock_node, func_addr); \
16345 } \
16346 } \
16347 } while (0);
16348
16349 static inline void dhd_wk_lock_rec_dump(void)
16350 {
16351 int bkt;
16352 struct wk_trace_record *wklock_info;
16353
16354 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16355 hash_for_each(wklock_history, bkt, wklock_info, wklock_node)
16356 #else
16357 struct hlist_node *entry = NULL;
16358 int max_index = ARRAY_SIZE(wklock_history);
16359 for (bkt = 0; bkt < max_index; bkt++)
16360 hlist_for_each_entry(wklock_info, entry, &wklock_history[bkt], wklock_node)
16361 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16362 {
16363 switch (wklock_info->lock_type) {
16364 case DHD_WAKE_LOCK:
16365 printk("wakelock lock : %pS lock_counter : %llu \n",
16366 (void *)wklock_info->addr, wklock_info->counter);
16367 break;
16368 case DHD_WAKE_UNLOCK:
16369 printk("wakelock unlock : %pS, unlock_counter : %llu \n",
16370 (void *)wklock_info->addr, wklock_info->counter);
16371 break;
16372 case DHD_WAIVE_LOCK:
16373 printk("wakelock waive : %pS before_waive : %llu \n",
16374 (void *)wklock_info->addr, wklock_info->counter);
16375 break;
16376 case DHD_RESTORE_LOCK:
16377 printk("wakelock restore : %pS, after_waive : %llu \n",
16378 (void *)wklock_info->addr, wklock_info->counter);
16379 break;
16380 }
16381 }
16382 }
16383
16384 static void dhd_wk_lock_trace_init(struct dhd_info *dhd)
16385 {
16386 unsigned long flags;
16387 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
16388 int i;
16389 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16390
16391 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16392 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16393 hash_init(wklock_history);
16394 #else
16395 for (i = 0; i < ARRAY_SIZE(wklock_history); i++)
16396 INIT_HLIST_HEAD(&wklock_history[i]);
16397 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16398 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16399 atomic_set(&trace_wklock_onoff, 1);
16400 }
16401
16402 static void dhd_wk_lock_trace_deinit(struct dhd_info *dhd)
16403 {
16404 int bkt;
16405 struct wk_trace_record *wklock_info;
16406 struct hlist_node *tmp;
16407 unsigned long flags;
16408 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 7, 0))
16409 struct hlist_node *entry = NULL;
16410 int max_index = ARRAY_SIZE(wklock_history);
16411 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0) */
16412
16413 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16414 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16415 hash_for_each_safe(wklock_history, bkt, tmp, wklock_info, wklock_node)
16416 #else
16417 for (bkt = 0; bkt < max_index; bkt++)
16418 hlist_for_each_entry_safe(wklock_info, entry, tmp,
16419 &wklock_history[bkt], wklock_node)
16420 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
16421 {
16422 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
16423 hash_del(&wklock_info->wklock_node);
16424 #else
16425 hlist_del_init(&wklock_info->wklock_node);
16426 #endif /* KERNEL_VER >= KERNEL_VERSION(3, 7, 0)) */
16427 kfree(wklock_info);
16428 }
16429 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16430 }
16431
16432 void dhd_wk_lock_stats_dump(dhd_pub_t *dhdp)
16433 {
16434 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
16435 unsigned long flags;
16436
16437 printk(KERN_ERR"DHD Printing wl_wake Lock/Unlock Record \r\n");
16438 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16439 dhd_wk_lock_rec_dump();
16440 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16441
16442 }
16443 #else
16444 #define STORE_WKLOCK_RECORD(wklock_type)
16445 #endif /* ! DHD_TRACE_WAKE_LOCK */
16446
16447 int dhd_os_wake_lock(dhd_pub_t *pub)
16448 {
16449 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16450 unsigned long flags;
16451 int ret = 0;
16452
16453 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16454 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16455 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
16456 #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
16457 wake_lock(&dhd->wl_wifi);
16458 #elif defined(BCMSDIO)
16459 dhd_bus_dev_pm_stay_awake(pub);
16460 #endif // endif
16461 }
16462 #ifdef DHD_TRACE_WAKE_LOCK
16463 if (atomic_read(&trace_wklock_onoff)) {
16464 STORE_WKLOCK_RECORD(DHD_WAKE_LOCK);
16465 }
16466 #endif /* DHD_TRACE_WAKE_LOCK */
16467 dhd->wakelock_counter++;
16468 ret = dhd->wakelock_counter;
16469 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16470 }
16471
16472 return ret;
16473 }
16474
16475 void dhd_event_wake_lock(dhd_pub_t *pub)
16476 {
16477 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16478
16479 if (dhd) {
16480 #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
16481 wake_lock(&dhd->wl_evtwake);
16482 #elif defined(BCMSDIO)
16483 dhd_bus_dev_pm_stay_awake(pub);
16484 #endif // endif
16485 }
16486 }
16487
16488 void
16489 dhd_pm_wake_lock_timeout(dhd_pub_t *pub, int val)
16490 {
16491 #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
16492 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16493
16494 if (dhd) {
16495 wake_lock_timeout(&dhd->wl_pmwake, msecs_to_jiffies(val));
16496 }
16497 #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
16498 }
16499
16500 void
16501 dhd_txfl_wake_lock_timeout(dhd_pub_t *pub, int val)
16502 {
16503 #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
16504 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16505
16506 if (dhd) {
16507 wake_lock_timeout(&dhd->wl_txflwake, msecs_to_jiffies(val));
16508 }
16509 #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
16510 }
16511
16512 int net_os_wake_lock(struct net_device *dev)
16513 {
16514 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16515 int ret = 0;
16516
16517 if (dhd)
16518 ret = dhd_os_wake_lock(&dhd->pub);
16519 return ret;
16520 }
16521
16522 int dhd_os_wake_unlock(dhd_pub_t *pub)
16523 {
16524 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16525 unsigned long flags;
16526 int ret = 0;
16527
16528 dhd_os_wake_lock_timeout(pub);
16529 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16530 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16531
16532 if (dhd->wakelock_counter > 0) {
16533 dhd->wakelock_counter--;
16534 #ifdef DHD_TRACE_WAKE_LOCK
16535 if (atomic_read(&trace_wklock_onoff)) {
16536 STORE_WKLOCK_RECORD(DHD_WAKE_UNLOCK);
16537 }
16538 #endif /* DHD_TRACE_WAKE_LOCK */
16539 if (dhd->wakelock_counter == 0 && !dhd->waive_wakelock) {
16540 #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
16541 wake_unlock(&dhd->wl_wifi);
16542 #elif defined(BCMSDIO)
16543 dhd_bus_dev_pm_relax(pub);
16544 #endif // endif
16545 }
16546 ret = dhd->wakelock_counter;
16547 }
16548 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16549 }
16550 return ret;
16551 }
16552
16553 void dhd_event_wake_unlock(dhd_pub_t *pub)
16554 {
16555 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16556
16557 if (dhd) {
16558 #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
16559 wake_unlock(&dhd->wl_evtwake);
16560 #elif defined(BCMSDIO)
16561 dhd_bus_dev_pm_relax(pub);
16562 #endif // endif
16563 }
16564 }
16565
16566 void dhd_pm_wake_unlock(dhd_pub_t *pub)
16567 {
16568 #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
16569 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16570
16571 if (dhd) {
16572 /* if wl_pmwake is active, unlock it */
16573 if (wake_lock_active(&dhd->wl_pmwake)) {
16574 wake_unlock(&dhd->wl_pmwake);
16575 }
16576 }
16577 #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
16578 }
16579
16580 void dhd_txfl_wake_unlock(dhd_pub_t *pub)
16581 {
16582 #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
16583 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16584
16585 if (dhd) {
16586 /* if wl_txflwake is active, unlock it */
16587 if (wake_lock_active(&dhd->wl_txflwake)) {
16588 wake_unlock(&dhd->wl_txflwake);
16589 }
16590 }
16591 #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
16592 }
16593
16594 int dhd_os_check_wakelock(dhd_pub_t *pub)
16595 {
16596 #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK) || defined(BCMSDIO)
16597 dhd_info_t *dhd;
16598
16599 if (!pub)
16600 return 0;
16601 dhd = (dhd_info_t *)(pub->info);
16602 #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK || BCMSDIO */
16603
16604 #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
16605 /* Indicate to the SD Host to avoid going to suspend if internal locks are up */
16606 if (dhd && (wake_lock_active(&dhd->wl_wifi) ||
16607 (wake_lock_active(&dhd->wl_wdwake))))
16608 return 1;
16609 #elif defined(BCMSDIO)
16610 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub))
16611 return 1;
16612 #endif // endif
16613 return 0;
16614 }
16615
16616 int
16617 dhd_os_check_wakelock_all(dhd_pub_t *pub)
16618 {
16619 #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK) || defined(BCMSDIO)
16620 #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
16621 int l1, l2, l3, l4, l7, l8, l9;
16622 int l5 = 0, l6 = 0;
16623 int c, lock_active;
16624 #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
16625 dhd_info_t *dhd;
16626
16627 if (!pub) {
16628 return 0;
16629 }
16630 dhd = (dhd_info_t *)(pub->info);
16631 if (!dhd) {
16632 return 0;
16633 }
16634 #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK || BCMSDIO */
16635
16636 #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
16637 c = dhd->wakelock_counter;
16638 l1 = wake_lock_active(&dhd->wl_wifi);
16639 l2 = wake_lock_active(&dhd->wl_wdwake);
16640 l3 = wake_lock_active(&dhd->wl_rxwake);
16641 l4 = wake_lock_active(&dhd->wl_ctrlwake);
16642 l7 = wake_lock_active(&dhd->wl_evtwake);
16643 #ifdef BCMPCIE_OOB_HOST_WAKE
16644 l5 = wake_lock_active(&dhd->wl_intrwake);
16645 #endif /* BCMPCIE_OOB_HOST_WAKE */
16646 #ifdef DHD_USE_SCAN_WAKELOCK
16647 l6 = wake_lock_active(&dhd->wl_scanwake);
16648 #endif /* DHD_USE_SCAN_WAKELOCK */
16649 l8 = wake_lock_active(&dhd->wl_pmwake);
16650 l9 = wake_lock_active(&dhd->wl_txflwake);
16651 lock_active = (l1 || l2 || l3 || l4 || l5 || l6 || l7 || l8 || l9);
16652
16653 /* Indicate to the Host to avoid going to suspend if internal locks are up */
16654 if (lock_active) {
16655 DHD_ERROR(("%s wakelock c-%d wl-%d wd-%d rx-%d "
16656 "ctl-%d intr-%d scan-%d evt-%d, pm-%d, txfl-%d\n",
16657 __FUNCTION__, c, l1, l2, l3, l4, l5, l6, l7, l8, l9));
16658 return 1;
16659 }
16660 #elif defined(BCMSDIO)
16661 if (dhd && (dhd->wakelock_counter > 0) && dhd_bus_dev_pm_enabled(pub)) {
16662 return 1;
16663 }
16664 #endif /* defined(BCMSDIO) */
16665 return 0;
16666 }
16667
16668 int net_os_wake_unlock(struct net_device *dev)
16669 {
16670 dhd_info_t *dhd = DHD_DEV_INFO(dev);
16671 int ret = 0;
16672
16673 if (dhd)
16674 ret = dhd_os_wake_unlock(&dhd->pub);
16675 return ret;
16676 }
16677
16678 int dhd_os_wd_wake_lock(dhd_pub_t *pub)
16679 {
16680 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16681 unsigned long flags;
16682 int ret = 0;
16683
16684 if (dhd) {
16685 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16686 if (dhd->wakelock_wd_counter == 0 && !dhd->waive_wakelock) {
16687 #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
16688 /* if wakelock_wd_counter was never used : lock it at once */
16689 wake_lock(&dhd->wl_wdwake);
16690 #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
16691 }
16692 dhd->wakelock_wd_counter++;
16693 ret = dhd->wakelock_wd_counter;
16694 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16695 }
16696 return ret;
16697 }
16698
16699 int dhd_os_wd_wake_unlock(dhd_pub_t *pub)
16700 {
16701 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16702 unsigned long flags;
16703 int ret = 0;
16704
16705 if (dhd) {
16706 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16707 if (dhd->wakelock_wd_counter > 0) {
16708 dhd->wakelock_wd_counter = 0;
16709 if (!dhd->waive_wakelock) {
16710 #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
16711 wake_unlock(&dhd->wl_wdwake);
16712 #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
16713 }
16714 }
16715 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16716 }
16717 return ret;
16718 }
16719
16720 #ifdef BCMPCIE_OOB_HOST_WAKE
16721 void
16722 dhd_os_oob_irq_wake_lock_timeout(dhd_pub_t *pub, int val)
16723 {
16724 #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
16725 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16726
16727 if (dhd) {
16728 wake_lock_timeout(&dhd->wl_intrwake, msecs_to_jiffies(val));
16729 }
16730 #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
16731 }
16732
16733 void
16734 dhd_os_oob_irq_wake_unlock(dhd_pub_t *pub)
16735 {
16736 #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
16737 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16738
16739 if (dhd) {
16740 /* if wl_intrwake is active, unlock it */
16741 if (wake_lock_active(&dhd->wl_intrwake)) {
16742 wake_unlock(&dhd->wl_intrwake);
16743 }
16744 }
16745 #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
16746 }
16747 #endif /* BCMPCIE_OOB_HOST_WAKE */
16748
16749 #ifdef DHD_USE_SCAN_WAKELOCK
16750 void
16751 dhd_os_scan_wake_lock_timeout(dhd_pub_t *pub, int val)
16752 {
16753 #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
16754 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16755
16756 if (dhd) {
16757 wake_lock_timeout(&dhd->wl_scanwake, msecs_to_jiffies(val));
16758 }
16759 #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
16760 }
16761
16762 void
16763 dhd_os_scan_wake_unlock(dhd_pub_t *pub)
16764 {
16765 #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
16766 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16767
16768 if (dhd) {
16769 /* if wl_scanwake is active, unlock it */
16770 if (wake_lock_active(&dhd->wl_scanwake)) {
16771 wake_unlock(&dhd->wl_scanwake);
16772 }
16773 }
16774 #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
16775 }
16776 #endif /* DHD_USE_SCAN_WAKELOCK */
16777
16778 /* waive wakelocks for operations such as IOVARs in suspend function, must be closed
16779 * by a paired function call to dhd_wakelock_restore. returns current wakelock counter
16780 */
16781 int dhd_os_wake_lock_waive(dhd_pub_t *pub)
16782 {
16783 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16784 unsigned long flags;
16785 int ret = 0;
16786
16787 if (dhd && (dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT)) {
16788 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16789
16790 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
16791 if (dhd->waive_wakelock == FALSE) {
16792 #ifdef DHD_TRACE_WAKE_LOCK
16793 if (atomic_read(&trace_wklock_onoff)) {
16794 STORE_WKLOCK_RECORD(DHD_WAIVE_LOCK);
16795 }
16796 #endif /* DHD_TRACE_WAKE_LOCK */
16797 /* record current lock status */
16798 dhd->wakelock_before_waive = dhd->wakelock_counter;
16799 dhd->waive_wakelock = TRUE;
16800 }
16801 ret = dhd->wakelock_wd_counter;
16802 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16803 }
16804 return ret;
16805 }
16806
16807 int dhd_os_wake_lock_restore(dhd_pub_t *pub)
16808 {
16809 dhd_info_t *dhd = (dhd_info_t *)(pub->info);
16810 unsigned long flags;
16811 int ret = 0;
16812
16813 if (!dhd)
16814 return 0;
16815 if ((dhd->dhd_state & DHD_ATTACH_STATE_WAKELOCKS_INIT) == 0)
16816 return 0;
16817
16818 spin_lock_irqsave(&dhd->wakelock_spinlock, flags);
16819
16820 /* dhd_wakelock_waive/dhd_wakelock_restore must be paired */
16821 if (!dhd->waive_wakelock)
16822 goto exit;
16823
16824 dhd->waive_wakelock = FALSE;
16825 /* if somebody else acquires wakelock between dhd_wakelock_waive/dhd_wakelock_restore,
16826 * we need to make it up by calling wake_lock or pm_stay_awake. or if somebody releases
16827 * the lock in between, do the same by calling wake_unlock or pm_relax
16828 */
16829 #ifdef DHD_TRACE_WAKE_LOCK
16830 if (atomic_read(&trace_wklock_onoff)) {
16831 STORE_WKLOCK_RECORD(DHD_RESTORE_LOCK);
16832 }
16833 #endif /* DHD_TRACE_WAKE_LOCK */
16834
16835 if (dhd->wakelock_before_waive == 0 && dhd->wakelock_counter > 0) {
16836 #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
16837 wake_lock(&dhd->wl_wifi);
16838 #elif defined(BCMSDIO)
16839 dhd_bus_dev_pm_stay_awake(&dhd->pub);
16840 #endif // endif
16841 } else if (dhd->wakelock_before_waive > 0 && dhd->wakelock_counter == 0) {
16842 #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
16843 wake_unlock(&dhd->wl_wifi);
16844 #elif defined(BCMSDIO)
16845 dhd_bus_dev_pm_relax(&dhd->pub);
16846 #endif // endif
16847 }
16848 dhd->wakelock_before_waive = 0;
16849 exit:
16850 ret = dhd->wakelock_wd_counter;
16851 spin_unlock_irqrestore(&dhd->wakelock_spinlock, flags);
16852 return ret;
16853 }
16854
16855 void dhd_os_wake_lock_init(struct dhd_info *dhd)
16856 {
16857 DHD_TRACE(("%s: initialize wake_lock_counters\n", __FUNCTION__));
16858 dhd->wakelock_counter = 0;
16859 dhd->wakelock_rx_timeout_enable = 0;
16860 dhd->wakelock_ctrl_timeout_enable = 0;
16861 /* wakelocks prevent a system from going into a low power state */
16862 #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
16863 wake_lock_init(&dhd->wl_wifi, WAKE_LOCK_SUSPEND, "wlan_wake");
16864 wake_lock_init(&dhd->wl_rxwake, WAKE_LOCK_SUSPEND, "wlan_rx_wake");
16865 wake_lock_init(&dhd->wl_ctrlwake, WAKE_LOCK_SUSPEND, "wlan_ctrl_wake");
16866 wake_lock_init(&dhd->wl_evtwake, WAKE_LOCK_SUSPEND, "wlan_evt_wake");
16867 wake_lock_init(&dhd->wl_pmwake, WAKE_LOCK_SUSPEND, "wlan_pm_wake");
16868 wake_lock_init(&dhd->wl_txflwake, WAKE_LOCK_SUSPEND, "wlan_txfl_wake");
16869 #ifdef BCMPCIE_OOB_HOST_WAKE
16870 wake_lock_init(&dhd->wl_intrwake, WAKE_LOCK_SUSPEND, "wlan_oob_irq_wake");
16871 #endif /* BCMPCIE_OOB_HOST_WAKE */
16872 #ifdef DHD_USE_SCAN_WAKELOCK
16873 wake_lock_init(&dhd->wl_scanwake, WAKE_LOCK_SUSPEND, "wlan_scan_wake");
16874 #endif /* DHD_USE_SCAN_WAKELOCK */
16875 #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
16876 #ifdef DHD_TRACE_WAKE_LOCK
16877 dhd_wk_lock_trace_init(dhd);
16878 #endif /* DHD_TRACE_WAKE_LOCK */
16879 }
16880
16881 void dhd_os_wake_lock_destroy(struct dhd_info *dhd)
16882 {
16883 DHD_TRACE(("%s: deinit wake_lock_counters\n", __FUNCTION__));
16884 #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK)
16885 dhd->wakelock_counter = 0;
16886 dhd->wakelock_rx_timeout_enable = 0;
16887 dhd->wakelock_ctrl_timeout_enable = 0;
16888 wake_lock_destroy(&dhd->wl_wifi);
16889 wake_lock_destroy(&dhd->wl_rxwake);
16890 wake_lock_destroy(&dhd->wl_ctrlwake);
16891 wake_lock_destroy(&dhd->wl_evtwake);
16892 wake_lock_destroy(&dhd->wl_pmwake);
16893 wake_lock_destroy(&dhd->wl_txflwake);
16894 #ifdef BCMPCIE_OOB_HOST_WAKE
16895 wake_lock_destroy(&dhd->wl_intrwake);
16896 #endif /* BCMPCIE_OOB_HOST_WAKE */
16897 #ifdef DHD_USE_SCAN_WAKELOCK
16898 wake_lock_destroy(&dhd->wl_scanwake);
16899 #endif /* DHD_USE_SCAN_WAKELOCK */
16900 #ifdef DHD_TRACE_WAKE_LOCK
16901 dhd_wk_lock_trace_deinit(dhd);
16902 #endif /* DHD_TRACE_WAKE_LOCK */
16903 #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */
16904 }
16905
16906 bool dhd_os_check_if_up(dhd_pub_t *pub)
16907 {
16908 if (!pub)
16909 return FALSE;
16910 return pub->up;
16911 }
16912
16913 #if defined(BCMSDIO) || defined(BCMPCIE)
16914 /* function to collect firmware, chip id and chip version info */
16915 void dhd_set_version_info(dhd_pub_t *dhdp, char *fw)
16916 {
16917 int i;
16918
16919 i = snprintf(info_string, sizeof(info_string),
16920 " Driver: %s\n Firmware: %s ", EPI_VERSION_STR, fw);
16921
16922 if (!dhdp)
16923 return;
16924
16925 i = snprintf(&info_string[i], sizeof(info_string) - i,
16926 "\n Chip: %x Rev %x Pkg %x", dhd_bus_chip_id(dhdp),
16927 dhd_bus_chiprev_id(dhdp), dhd_bus_chippkg_id(dhdp));
16928 }
16929 #endif /* BCMSDIO || BCMPCIE */
16930 int dhd_ioctl_entry_local(struct net_device *net, wl_ioctl_t *ioc, int cmd)
16931 {
16932 int ifidx;
16933 int ret = 0;
16934 dhd_info_t *dhd = NULL;
16935
16936 if (!net || !DEV_PRIV(net)) {
16937 DHD_ERROR(("%s invalid parameter net %p dev_priv %p\n",
16938 __FUNCTION__, net, DEV_PRIV(net)));
16939 return -EINVAL;
16940 }
16941
16942 dhd = DHD_DEV_INFO(net);
16943 if (!dhd)
16944 return -EINVAL;
16945
16946 ifidx = dhd_net2idx(dhd, net);
16947 if (ifidx == DHD_BAD_IF) {
16948 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
16949 return -ENODEV;
16950 }
16951
16952 DHD_OS_WAKE_LOCK(&dhd->pub);
16953 DHD_PERIM_LOCK(&dhd->pub);
16954
16955 ret = dhd_wl_ioctl(&dhd->pub, ifidx, ioc, ioc->buf, ioc->len);
16956 dhd_check_hang(net, &dhd->pub, ret);
16957
16958 DHD_PERIM_UNLOCK(&dhd->pub);
16959 DHD_OS_WAKE_UNLOCK(&dhd->pub);
16960
16961 return ret;
16962 }
16963
16964 bool dhd_os_check_hang(dhd_pub_t *dhdp, int ifidx, int ret)
16965 {
16966 struct net_device *net;
16967
16968 net = dhd_idx2net(dhdp, ifidx);
16969 if (!net) {
16970 DHD_ERROR(("%s : Invalid index : %d\n", __FUNCTION__, ifidx));
16971 return -EINVAL;
16972 }
16973
16974 return dhd_check_hang(net, dhdp, ret);
16975 }
16976
16977 /* Return instance */
16978 int dhd_get_instance(dhd_pub_t *dhdp)
16979 {
16980 return dhdp->info->unit;
16981 }
16982
16983 #if defined(WL_CFG80211) && defined(SUPPORT_DEEP_SLEEP)
16984 #define MAX_TRY_CNT 5 /* Number of tries to disable deepsleep */
16985 int dhd_deepsleep(struct net_device *dev, int flag)
16986 {
16987 char iovbuf[20];
16988 uint powervar = 0;
16989 dhd_info_t *dhd;
16990 dhd_pub_t *dhdp;
16991 int cnt = 0;
16992 int ret = 0;
16993
16994 dhd = DHD_DEV_INFO(dev);
16995 dhdp = &dhd->pub;
16996
16997 switch (flag) {
16998 case 1 : /* Deepsleep on */
16999 DHD_ERROR(("[WiFi] Deepsleep On\n"));
17000 /* give some time to sysioc_work before deepsleep */
17001 OSL_SLEEP(200);
17002 #ifdef PKT_FILTER_SUPPORT
17003 /* disable pkt filter */
17004 dhd_enable_packet_filter(0, dhdp);
17005 #endif /* PKT_FILTER_SUPPORT */
17006 /* Disable MPC */
17007 powervar = 0;
17008 ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar), NULL,
17009 0, TRUE);
17010
17011 /* Enable Deepsleep */
17012 powervar = 1;
17013 ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar, sizeof(powervar),
17014 NULL, 0, TRUE);
17015 break;
17016
17017 case 0: /* Deepsleep Off */
17018 DHD_ERROR(("[WiFi] Deepsleep Off\n"));
17019
17020 /* Disable Deepsleep */
17021 for (cnt = 0; cnt < MAX_TRY_CNT; cnt++) {
17022 powervar = 0;
17023 ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar,
17024 sizeof(powervar), NULL, 0, TRUE);
17025
17026 ret = dhd_iovar(dhdp, 0, "deepsleep", (char *)&powervar,
17027 sizeof(powervar), iovbuf, sizeof(iovbuf), FALSE);
17028 if (ret < 0) {
17029 DHD_ERROR(("the error of dhd deepsleep status"
17030 " ret value :%d\n", ret));
17031 } else {
17032 if (!(*(int *)iovbuf)) {
17033 DHD_ERROR(("deepsleep mode is 0,"
17034 " count: %d\n", cnt));
17035 break;
17036 }
17037 }
17038 }
17039
17040 /* Enable MPC */
17041 powervar = 1;
17042 ret = dhd_iovar(dhdp, 0, "mpc", (char *)&powervar, sizeof(powervar), NULL,
17043 0, TRUE);
17044 break;
17045 }
17046
17047 return 0;
17048 }
17049 #endif /* WL_CFG80211 && SUPPORT_DEEP_SLEEP */
17050
17051 #ifdef PROP_TXSTATUS
17052
17053 void dhd_wlfc_plat_init(void *dhd)
17054 {
17055 #ifdef USE_DYNAMIC_F2_BLKSIZE
17056 dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize);
17057 #endif /* USE_DYNAMIC_F2_BLKSIZE */
17058 return;
17059 }
17060
17061 void dhd_wlfc_plat_deinit(void *dhd)
17062 {
17063 #ifdef USE_DYNAMIC_F2_BLKSIZE
17064 dhdsdio_func_blocksize((dhd_pub_t *)dhd, 2, sd_f2_blocksize);
17065 #endif /* USE_DYNAMIC_F2_BLKSIZE */
17066 return;
17067 }
17068
17069 bool dhd_wlfc_skip_fc(void * dhdp, uint8 idx)
17070 {
17071 #ifdef SKIP_WLFC_ON_CONCURRENT
17072
17073 #ifdef WL_CFG80211
17074 struct net_device * net = dhd_idx2net((dhd_pub_t *)dhdp, idx);
17075 if (net)
17076 /* enable flow control in vsdb mode */
17077 return !(wl_cfg80211_is_concurrent_mode(net));
17078 #else
17079 return TRUE; /* skip flow control */
17080 #endif /* WL_CFG80211 */
17081
17082 #else
17083 return FALSE;
17084 #endif /* SKIP_WLFC_ON_CONCURRENT */
17085 return FALSE;
17086 }
17087 #endif /* PROP_TXSTATUS */
17088
17089 #ifdef BCMDBGFS
17090 #include <linux/debugfs.h>
17091
17092 typedef struct dhd_dbgfs {
17093 struct dentry *debugfs_dir;
17094 struct dentry *debugfs_mem;
17095 dhd_pub_t *dhdp;
17096 uint32 size;
17097 } dhd_dbgfs_t;
17098
17099 dhd_dbgfs_t g_dbgfs;
17100
17101 extern uint32 dhd_readregl(void *bp, uint32 addr);
17102 extern uint32 dhd_writeregl(void *bp, uint32 addr, uint32 data);
17103
17104 static int
17105 dhd_dbg_state_open(struct inode *inode, struct file *file)
17106 {
17107 file->private_data = inode->i_private;
17108 return 0;
17109 }
17110
17111 static ssize_t
17112 dhd_dbg_state_read(struct file *file, char __user *ubuf,
17113 size_t count, loff_t *ppos)
17114 {
17115 ssize_t rval;
17116 uint32 tmp;
17117 loff_t pos = *ppos;
17118 size_t ret;
17119
17120 if (pos < 0)
17121 return -EINVAL;
17122 if (pos >= g_dbgfs.size || !count)
17123 return 0;
17124 if (count > g_dbgfs.size - pos)
17125 count = g_dbgfs.size - pos;
17126
17127 /* Basically enforce aligned 4 byte reads. It's up to the user to work out the details */
17128 tmp = dhd_readregl(g_dbgfs.dhdp->bus, file->f_pos & (~3));
17129
17130 ret = copy_to_user(ubuf, &tmp, 4);
17131 if (ret == count)
17132 return -EFAULT;
17133
17134 count -= ret;
17135 *ppos = pos + count;
17136 rval = count;
17137
17138 return rval;
17139 }
17140
17141 static ssize_t
17142 dhd_debugfs_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos)
17143 {
17144 loff_t pos = *ppos;
17145 size_t ret;
17146 uint32 buf;
17147
17148 if (pos < 0)
17149 return -EINVAL;
17150 if (pos >= g_dbgfs.size || !count)
17151 return 0;
17152 if (count > g_dbgfs.size - pos)
17153 count = g_dbgfs.size - pos;
17154
17155 ret = copy_from_user(&buf, ubuf, sizeof(uint32));
17156 if (ret == count)
17157 return -EFAULT;
17158
17159 /* Basically enforce aligned 4 byte writes. It's up to the user to work out the details */
17160 dhd_writeregl(g_dbgfs.dhdp->bus, file->f_pos & (~3), buf);
17161
17162 return count;
17163 }
17164
17165 loff_t
17166 dhd_debugfs_lseek(struct file *file, loff_t off, int whence)
17167 {
17168 loff_t pos = -1;
17169
17170 switch (whence) {
17171 case 0:
17172 pos = off;
17173 break;
17174 case 1:
17175 pos = file->f_pos + off;
17176 break;
17177 case 2:
17178 pos = g_dbgfs.size - off;
17179 }
17180 return (pos < 0 || pos > g_dbgfs.size) ? -EINVAL : (file->f_pos = pos);
17181 }
17182
17183 static const struct file_operations dhd_dbg_state_ops = {
17184 .read = dhd_dbg_state_read,
17185 .write = dhd_debugfs_write,
17186 .open = dhd_dbg_state_open,
17187 .llseek = dhd_debugfs_lseek
17188 };
17189
17190 static void dhd_dbgfs_create(void)
17191 {
17192 if (g_dbgfs.debugfs_dir) {
17193 g_dbgfs.debugfs_mem = debugfs_create_file("mem", 0644, g_dbgfs.debugfs_dir,
17194 NULL, &dhd_dbg_state_ops);
17195 }
17196 }
17197
17198 void dhd_dbgfs_init(dhd_pub_t *dhdp)
17199 {
17200 g_dbgfs.dhdp = dhdp;
17201 g_dbgfs.size = 0x20000000; /* Allow access to various cores regs */
17202
17203 g_dbgfs.debugfs_dir = debugfs_create_dir("dhd", 0);
17204 if (IS_ERR(g_dbgfs.debugfs_dir)) {
17205 g_dbgfs.debugfs_dir = NULL;
17206 return;
17207 }
17208
17209 dhd_dbgfs_create();
17210
17211 return;
17212 }
17213
17214 void dhd_dbgfs_remove(void)
17215 {
17216 debugfs_remove(g_dbgfs.debugfs_mem);
17217 debugfs_remove(g_dbgfs.debugfs_dir);
17218
17219 bzero((unsigned char *) &g_dbgfs, sizeof(g_dbgfs));
17220 }
17221 #endif /* BCMDBGFS */
17222
17223 #ifdef CUSTOM_SET_CPUCORE
17224 void dhd_set_cpucore(dhd_pub_t *dhd, int set)
17225 {
17226 int e_dpc = 0, e_rxf = 0, retry_set = 0;
17227
17228 if (!(dhd->chan_isvht80)) {
17229 DHD_ERROR(("%s: chan_status(%d) cpucore!!!\n", __FUNCTION__, dhd->chan_isvht80));
17230 return;
17231 }
17232
17233 if (DPC_CPUCORE) {
17234 do {
17235 if (set == TRUE) {
17236 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
17237 cpumask_of(DPC_CPUCORE));
17238 } else {
17239 e_dpc = set_cpus_allowed_ptr(dhd->current_dpc,
17240 cpumask_of(PRIMARY_CPUCORE));
17241 }
17242 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
17243 DHD_ERROR(("%s: dpc(%d) invalid cpu!\n", __FUNCTION__, e_dpc));
17244 return;
17245 }
17246 if (e_dpc < 0)
17247 OSL_SLEEP(1);
17248 } while (e_dpc < 0);
17249 }
17250 if (RXF_CPUCORE) {
17251 do {
17252 if (set == TRUE) {
17253 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
17254 cpumask_of(RXF_CPUCORE));
17255 } else {
17256 e_rxf = set_cpus_allowed_ptr(dhd->current_rxf,
17257 cpumask_of(PRIMARY_CPUCORE));
17258 }
17259 if (retry_set++ > MAX_RETRY_SET_CPUCORE) {
17260 DHD_ERROR(("%s: rxf(%d) invalid cpu!\n", __FUNCTION__, e_rxf));
17261 return;
17262 }
17263 if (e_rxf < 0)
17264 OSL_SLEEP(1);
17265 } while (e_rxf < 0);
17266 }
17267 #ifdef DHD_OF_SUPPORT
17268 interrupt_set_cpucore(set, DPC_CPUCORE, PRIMARY_CPUCORE);
17269 #endif /* DHD_OF_SUPPORT */
17270 DHD_TRACE(("%s: set(%d) cpucore success!\n", __FUNCTION__, set));
17271
17272 return;
17273 }
17274 #endif /* CUSTOM_SET_CPUCORE */
17275
17276 #ifdef DHD_MCAST_REGEN
17277 /* Get interface specific ap_isolate configuration */
17278 int dhd_get_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx)
17279 {
17280 dhd_info_t *dhd = dhdp->info;
17281 dhd_if_t *ifp;
17282
17283 ASSERT(idx < DHD_MAX_IFS);
17284
17285 ifp = dhd->iflist[idx];
17286
17287 return ifp->mcast_regen_bss_enable;
17288 }
17289
17290 /* Set interface specific mcast_regen configuration */
17291 int dhd_set_mcast_regen_bss_enable(dhd_pub_t *dhdp, uint32 idx, int val)
17292 {
17293 dhd_info_t *dhd = dhdp->info;
17294 dhd_if_t *ifp;
17295
17296 ASSERT(idx < DHD_MAX_IFS);
17297
17298 ifp = dhd->iflist[idx];
17299
17300 ifp->mcast_regen_bss_enable = val;
17301
17302 /* Disable rx_pkt_chain feature for interface, if mcast_regen feature
17303 * is enabled
17304 */
17305 dhd_update_rx_pkt_chainable_state(dhdp, idx);
17306 return BCME_OK;
17307 }
17308 #endif /* DHD_MCAST_REGEN */
17309
17310 /* Get interface specific ap_isolate configuration */
17311 int dhd_get_ap_isolate(dhd_pub_t *dhdp, uint32 idx)
17312 {
17313 dhd_info_t *dhd = dhdp->info;
17314 dhd_if_t *ifp;
17315
17316 ASSERT(idx < DHD_MAX_IFS);
17317
17318 ifp = dhd->iflist[idx];
17319
17320 return ifp->ap_isolate;
17321 }
17322
17323 /* Set interface specific ap_isolate configuration */
17324 int dhd_set_ap_isolate(dhd_pub_t *dhdp, uint32 idx, int val)
17325 {
17326 dhd_info_t *dhd = dhdp->info;
17327 dhd_if_t *ifp;
17328
17329 ASSERT(idx < DHD_MAX_IFS);
17330
17331 ifp = dhd->iflist[idx];
17332
17333 if (ifp)
17334 ifp->ap_isolate = val;
17335
17336 return 0;
17337 }
17338
17339 #ifdef DHD_RND_DEBUG
17340
17341 #ifdef CUSTOMER_HW4_DEBUG
17342 #define RNDINFO PLATFORM_PATH".rnd"
17343 #elif defined(CUSTOMER_HW2) || defined(BOARD_HIKEY)
17344 #define RNDINFO "/data/misc/wifi/.rnd"
17345 #elif defined(OEM_ANDROID) && (defined(BOARD_PANDA) || defined(__ARM_ARCH_7A__))
17346 #define RNDINFO "/data/misc/wifi/.rnd"
17347 #elif defined(OEM_ANDROID)
17348 #define RNDINFO_LIVE "/installmedia/.rnd"
17349 #define RNDINFO_INST "/data/.rnd"
17350 #define RNDINFO RNDINFO_LIVE
17351 #else /* FC19 and Others */
17352 #define RNDINFO "/root/.rnd"
17353 #endif /* CUSTOMER_HW4_DEBUG */
17354
17355 #define RND_IN RNDINFO".in"
17356 #define RND_OUT RNDINFO".out"
17357
17358 int
17359 dhd_get_rnd_info(dhd_pub_t *dhd)
17360 {
17361 struct file *fp = NULL;
17362 int ret = BCME_ERROR;
17363 char *filepath = RND_IN;
17364 uint32 file_mode = O_RDONLY;
17365 #if defined(KERNEL_DS) && defined(USER_DS)
17366 mm_segment_t old_fs;
17367 #endif /* KERNEL_DS && USER_DS */
17368 loff_t pos = 0;
17369
17370 /* Read memdump info from the file */
17371 fp = filp_open(filepath, file_mode, 0);
17372 if (IS_ERR(fp)) {
17373 DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
17374 #if defined(CONFIG_X86) && defined(OEM_ANDROID)
17375 /* Check if it is Live Brix Image */
17376 if (bcmstrstr(filepath, RNDINFO_LIVE)) {
17377 goto err1;
17378 }
17379 /* Try if it is Installed Brix Image */
17380 filepath = RNDINFO_INST".in";
17381 DHD_ERROR(("%s: Try File [%s]\n", __FUNCTION__, filepath));
17382 fp = filp_open(filepath, file_mode, 0);
17383 if (IS_ERR(fp)) {
17384 DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
17385 goto err1;
17386 }
17387 #else /* Non Brix Android platform */
17388 goto err1;
17389 #endif /* CONFIG_X86 && OEM_ANDROID */
17390 }
17391
17392 #if defined(KERNEL_DS) && defined(USER_DS)
17393 old_fs = get_fs();
17394 set_fs(KERNEL_DS);
17395 #endif /* KERNEL_DS && USER_DS */
17396
17397 /* Handle success case */
17398 ret = vfs_read(fp, (char *)&dhd->rnd_len, sizeof(dhd->rnd_len), &pos);
17399 if (ret < 0) {
17400 DHD_ERROR(("%s: rnd_len read error, ret=%d\n", __FUNCTION__, ret));
17401 goto err2;
17402 }
17403
17404 dhd->rnd_buf = MALLOCZ(dhd->osh, dhd->rnd_len);
17405 if (!dhd->rnd_buf) {
17406 DHD_ERROR(("%s: MALLOC failed\n", __FUNCTION__));
17407 goto err2;
17408 }
17409
17410 ret = vfs_read(fp, (char *)dhd->rnd_buf, dhd->rnd_len, &pos);
17411 if (ret < 0) {
17412 DHD_ERROR(("%s: rnd_buf read error, ret=%d\n", __FUNCTION__, ret));
17413 goto err3;
17414 }
17415
17416 #if defined(KERNEL_DS) && defined(USER_DS)
17417 set_fs(old_fs);
17418 #endif /* KERNEL_DS && USER_DS */
17419 filp_close(fp, NULL);
17420
17421 DHD_ERROR(("%s: RND read from %s\n", __FUNCTION__, filepath));
17422 return BCME_OK;
17423
17424 err3:
17425 MFREE(dhd->osh, dhd->rnd_buf, dhd->rnd_len);
17426 dhd->rnd_buf = NULL;
17427 err2:
17428 #if defined(KERNEL_DS) && defined(USER_DS)
17429 set_fs(old_fs);
17430 #endif /* KERNEL_DS && USER_DS */
17431 filp_close(fp, NULL);
17432 err1:
17433 return BCME_ERROR;
17434 }
17435
17436 int
17437 dhd_dump_rnd_info(dhd_pub_t *dhd, uint8 *rnd_buf, uint32 rnd_len)
17438 {
17439 struct file *fp = NULL;
17440 int ret = BCME_OK;
17441 char *filepath = RND_OUT;
17442 uint32 file_mode = O_CREAT | O_WRONLY | O_SYNC;
17443 #if defined(KERNEL_DS) && defined(USER_DS)
17444 mm_segment_t old_fs;
17445 #endif /* KERNEL_DS && USER_DS */
17446 loff_t pos = 0;
17447
17448 /* Read memdump info from the file */
17449 fp = filp_open(filepath, file_mode, 0664);
17450 if (IS_ERR(fp)) {
17451 DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
17452 #if defined(CONFIG_X86) && defined(OEM_ANDROID)
17453 /* Check if it is Live Brix Image */
17454 if (bcmstrstr(filepath, RNDINFO_LIVE)) {
17455 goto err1;
17456 }
17457 /* Try if it is Installed Brix Image */
17458 filepath = RNDINFO_INST".out";
17459 DHD_ERROR(("%s: Try File [%s]\n", __FUNCTION__, filepath));
17460 fp = filp_open(filepath, file_mode, 0664);
17461 if (IS_ERR(fp)) {
17462 DHD_ERROR(("%s: File [%s] doesn't exist\n", __FUNCTION__, filepath));
17463 goto err1;
17464 }
17465 #else /* Non Brix Android platform */
17466 goto err1;
17467 #endif /* CONFIG_X86 && OEM_ANDROID */
17468 }
17469
17470 #if defined(KERNEL_DS) && defined(USER_DS)
17471 old_fs = get_fs();
17472 set_fs(KERNEL_DS);
17473 #endif /* KERNEL_DS && USER_DS */
17474
17475 /* Handle success case */
17476 ret = vfs_write(fp, (char *)&rnd_len, sizeof(rnd_len), &pos);
17477 if (ret < 0) {
17478 DHD_ERROR(("%s: rnd_len write error, ret=%d\n", __FUNCTION__, ret));
17479 goto err2;
17480 }
17481
17482 ret = vfs_write(fp, (char *)rnd_buf, rnd_len, &pos);
17483 if (ret < 0) {
17484 DHD_ERROR(("%s: rnd_buf write error, ret=%d\n", __FUNCTION__, ret));
17485 goto err2;
17486 }
17487
17488 #if defined(KERNEL_DS) && defined(USER_DS)
17489 set_fs(old_fs);
17490 #endif /* KERNEL_DS && USER_DS */
17491 filp_close(fp, NULL);
17492 DHD_ERROR(("%s: RND written to %s\n", __FUNCTION__, filepath));
17493 return BCME_OK;
17494
17495 err2:
17496 #if defined(KERNEL_DS) && defined(USER_DS)
17497 set_fs(old_fs);
17498 #endif /* KERNEL_DS && USER_DS */
17499 filp_close(fp, NULL);
17500 err1:
17501 return BCME_ERROR;
17502
17503 }
17504 #endif /* DHD_RND_DEBUG */
17505
17506 #ifdef DHD_FW_COREDUMP
17507 void dhd_schedule_memdump(dhd_pub_t *dhdp, uint8 *buf, uint32 size)
17508 {
17509 unsigned long flags = 0;
17510 dhd_dump_t *dump = NULL;
17511 dhd_info_t *dhd_info = NULL;
17512 #if !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
17513 log_dump_type_t type = DLD_BUF_TYPE_ALL;
17514 #endif /* !DHD_DUMP_FILE_WRITE_FROM_KERNEL */
17515
17516 dhd_info = (dhd_info_t *)dhdp->info;
17517 dump = (dhd_dump_t *)MALLOC(dhdp->osh, sizeof(dhd_dump_t));
17518 if (dump == NULL) {
17519 DHD_ERROR(("%s: dhd dump memory allocation failed\n", __FUNCTION__));
17520 return;
17521 }
17522 dump->buf = buf;
17523 dump->bufsize = size;
17524 #ifdef BCMPCIE
17525 dhd_get_hscb_info(dhdp, (void*)(&dump->hscb_buf),
17526 (uint32 *)(&dump->hscb_bufsize));
17527 #else /* BCMPCIE */
17528 dump->hscb_bufsize = 0;
17529 #endif /* BCMPCIE */
17530
17531 #ifdef DHD_LOG_DUMP
17532 dhd_print_buf_addr(dhdp, "memdump", buf, size);
17533 #if !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
17534 /* Print out buffer infomation */
17535 dhd_log_dump_buf_addr(dhdp, &type);
17536 #endif /* !DHD_DUMP_FILE_WRITE_FROM_KERNEL */
17537 #endif /* DHD_LOG_DUMP */
17538
17539 if (dhdp->memdump_enabled == DUMP_MEMONLY && (!disable_bug_on)) {
17540 BUG_ON(1);
17541 }
17542
17543 #if defined(DEBUG_DNGL_INIT_FAIL) || defined(DHD_ERPOM) || \
17544 defined(DNGL_AXI_ERROR_LOGGING)
17545 if (
17546 #if defined(DEBUG_DNGL_INIT_FAIL)
17547 (dhdp->memdump_type == DUMP_TYPE_DONGLE_INIT_FAILURE) ||
17548 #endif /* DEBUG_DNGL_INIT_FAIL */
17549 #ifdef DHD_ERPOM
17550 (dhdp->memdump_type == DUMP_TYPE_DUE_TO_BT) ||
17551 #endif /* DHD_ERPOM */
17552 #ifdef DNGL_AXI_ERROR_LOGGING
17553 (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT) ||
17554 #endif /* DNGL_AXI_ERROR_LOGGING */
17555 FALSE)
17556 {
17557 #if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) && defined(DHD_LOG_DUMP)
17558 log_dump_type_t *flush_type = NULL;
17559 #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL && DHD_LOG_DUMP */
17560 dhd_info->scheduled_memdump = FALSE;
17561 dhd_mem_dump((void *)dhdp->info, (void *)dump, 0);
17562 #if defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL) && defined(DHD_LOG_DUMP)
17563 /* for dongle init fail cases, 'dhd_mem_dump' does
17564 * not call 'dhd_log_dump', so call it here.
17565 */
17566 flush_type = MALLOCZ(dhdp->osh,
17567 sizeof(log_dump_type_t));
17568 if (flush_type) {
17569 *flush_type = DLD_BUF_TYPE_ALL;
17570 DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
17571 dhd_log_dump(dhdp->info, flush_type, 0);
17572 }
17573 #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL && DHD_LOG_DUMP */
17574 return;
17575 }
17576 #endif /* DEBUG_DNGL_INIT_FAIL || DHD_ERPOM || DNGL_AXI_ERROR_LOGGING */
17577
17578 dhd_info->scheduled_memdump = TRUE;
17579 /* bus busy bit for mem dump will be cleared in mem dump
17580 * work item context, after mem dump file is written
17581 */
17582 DHD_GENERAL_LOCK(dhdp, flags);
17583 DHD_BUS_BUSY_SET_IN_MEMDUMP(dhdp);
17584 DHD_GENERAL_UNLOCK(dhdp, flags);
17585 DHD_ERROR(("%s: scheduling mem dump.. \n", __FUNCTION__));
17586 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, (void *)dump,
17587 DHD_WQ_WORK_SOC_RAM_DUMP, dhd_mem_dump, DHD_WQ_WORK_PRIORITY_HIGH);
17588 }
17589 static void
17590 dhd_mem_dump(void *handle, void *event_info, u8 event)
17591 {
17592 dhd_info_t *dhd = handle;
17593 dhd_pub_t *dhdp = NULL;
17594 unsigned long flags = 0;
17595 int ret = 0;
17596 dhd_dump_t *dump = NULL;
17597
17598 DHD_ERROR(("%s: ENTER, memdump type %u\n", __FUNCTION__, dhd->pub.memdump_type));
17599
17600 if (!dhd) {
17601 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
17602 return;
17603 }
17604
17605 dhdp = &dhd->pub;
17606 if (!dhdp) {
17607 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
17608 return;
17609 }
17610
17611 DHD_GENERAL_LOCK(dhdp, flags);
17612 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
17613 DHD_GENERAL_UNLOCK(dhdp, flags);
17614 DHD_ERROR(("%s: bus is down! can't collect mem dump. \n", __FUNCTION__));
17615 ret = -ENODEV;
17616 goto exit;
17617 }
17618 DHD_GENERAL_UNLOCK(dhdp, flags);
17619
17620 #ifdef DHD_SSSR_DUMP
17621 if (dhdp->sssr_inited && dhdp->collect_sssr) {
17622 dhdpcie_sssr_dump(dhdp);
17623 }
17624 dhdp->collect_sssr = FALSE;
17625 #endif /* DHD_SSSR_DUMP */
17626 #if defined(WL_CFG80211) && defined(DHD_FILE_DUMP_EVENT)
17627 dhd_wait_for_file_dump(dhdp);
17628 #endif /* WL_CFG80211 && DHD_FILE_DUMP_EVENT */
17629
17630 dump = (dhd_dump_t *)event_info;
17631 if (!dump) {
17632 DHD_ERROR(("%s: dump is NULL\n", __FUNCTION__));
17633 ret = -EINVAL;
17634 goto exit;
17635 }
17636
17637 /*
17638 * If kernel does not have file write access enabled
17639 * then skip writing dumps to files.
17640 * The dumps will be pushed to HAL layer which will
17641 * write into files
17642 */
17643 #ifdef DHD_DUMP_FILE_WRITE_FROM_KERNEL
17644
17645 if (write_dump_to_file(&dhd->pub, dump->buf, dump->bufsize, "mem_dump")) {
17646 DHD_ERROR(("%s: writing SoC_RAM dump to the file failed\n", __FUNCTION__));
17647 #ifdef DHD_DEBUG_UART
17648 dhd->pub.memdump_success = FALSE;
17649 #endif /* DHD_DEBUG_UART */
17650 }
17651
17652 /* directly call dhd_log_dump for debug_dump collection from the mem_dump work queue
17653 * context, no need to schedule another work queue for log dump. In case of
17654 * user initiated DEBUG_DUMP wpa_cli command (DUMP_TYPE_BY_SYSDUMP),
17655 * cfg layer is itself scheduling the log_dump work queue.
17656 * that path is not disturbed. If 'dhd_mem_dump' is called directly then we will not
17657 * collect debug_dump as it may be called from non-sleepable context.
17658 */
17659 #ifdef DHD_LOG_DUMP
17660 if (dhd->scheduled_memdump &&
17661 dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP) {
17662 log_dump_type_t *flush_type = MALLOCZ(dhdp->osh,
17663 sizeof(log_dump_type_t));
17664 if (flush_type) {
17665 *flush_type = DLD_BUF_TYPE_ALL;
17666 DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
17667 dhd_log_dump(dhd, flush_type, 0);
17668 }
17669 }
17670 #endif /* DHD_LOG_DUMP */
17671
17672 #ifdef DHD_PKT_LOGGING
17673 copy_debug_dump_time(dhdp->debug_dump_time_pktlog_str, dhdp->debug_dump_time_str);
17674 #endif /* DHD_PKT_LOGGING */
17675 clear_debug_dump_time(dhdp->debug_dump_time_str);
17676
17677 /* before calling bug on, wait for other logs to be dumped.
17678 * we cannot wait in case dhd_mem_dump is called directly
17679 * as it may not be in a sleepable context
17680 */
17681 if (dhd->scheduled_memdump) {
17682 uint bitmask = 0;
17683 int timeleft = 0;
17684 #ifdef DHD_SSSR_DUMP
17685 bitmask |= DHD_BUS_BUSY_IN_SSSRDUMP;
17686 #endif // endif
17687 if (bitmask != 0) {
17688 DHD_ERROR(("%s: wait to clear dhd_bus_busy_state: 0x%x\n",
17689 __FUNCTION__, dhdp->dhd_bus_busy_state));
17690 timeleft = dhd_os_busbusy_wait_bitmask(dhdp,
17691 &dhdp->dhd_bus_busy_state, bitmask, 0);
17692 if ((timeleft == 0) || (timeleft == 1)) {
17693 DHD_ERROR(("%s: Timed out dhd_bus_busy_state=0x%x\n",
17694 __FUNCTION__, dhdp->dhd_bus_busy_state));
17695 }
17696 }
17697 }
17698
17699 if (dump->hscb_buf && dump->hscb_bufsize) {
17700 DHD_ERROR(("%s: write HSCB dump... \n", __FUNCTION__));
17701 if (write_dump_to_file(&dhd->pub, dump->hscb_buf,
17702 dump->hscb_bufsize, "mem_dump_hscb")) {
17703 DHD_ERROR(("%s: writing HSCB dump to the file failed\n", __FUNCTION__));
17704 #ifdef DHD_DEBUG_UART
17705 dhd->pub.memdump_success = FALSE;
17706 #endif /* DHD_DEBUG_UART */
17707 }
17708 }
17709 #endif /* DHD_DUMP_FILE_WRITE_FROM_KERNEL */
17710
17711 DHD_ERROR(("%s: memdump type %u\n", __FUNCTION__, dhd->pub.memdump_type));
17712 if (dhd->pub.memdump_enabled == DUMP_MEMFILE_BUGON &&
17713 #ifdef DHD_LOG_DUMP
17714 dhd->pub.memdump_type != DUMP_TYPE_BY_SYSDUMP &&
17715 #endif /* DHD_LOG_DUMP */
17716 dhd->pub.memdump_type != DUMP_TYPE_BY_USER &&
17717 #ifdef DHD_DEBUG_UART
17718 dhd->pub.memdump_success == TRUE &&
17719 #endif /* DHD_DEBUG_UART */
17720 #ifdef DNGL_EVENT_SUPPORT
17721 dhd->pub.memdump_type != DUMP_TYPE_DONGLE_HOST_EVENT &&
17722 #endif /* DNGL_EVENT_SUPPORT */
17723 dhd->pub.memdump_type != DUMP_TYPE_CFG_VENDOR_TRIGGERED) {
17724
17725 #ifdef SHOW_LOGTRACE
17726 /* Wait till logtrace context is flushed */
17727 dhd_flush_logtrace_process(dhd);
17728 #endif /* SHOW_LOGTRACE */
17729
17730 DHD_ERROR(("%s: call BUG_ON \n", __FUNCTION__));
17731 if (!disable_bug_on) {
17732 BUG_ON(1);
17733 }
17734 }
17735 DHD_ERROR(("%s: No BUG ON, memdump type %u \n", __FUNCTION__, dhd->pub.memdump_type));
17736
17737 exit:
17738 if (dump) {
17739 MFREE(dhd->pub.osh, dump, sizeof(dhd_dump_t));
17740 }
17741 DHD_GENERAL_LOCK(dhdp, flags);
17742 DHD_BUS_BUSY_CLEAR_IN_MEMDUMP(&dhd->pub);
17743 dhd_os_busbusy_wake(dhdp);
17744 DHD_GENERAL_UNLOCK(dhdp, flags);
17745 dhd->scheduled_memdump = FALSE;
17746 #ifdef OEM_ANDROID
17747 if (dhdp->hang_was_pending) {
17748 DHD_ERROR(("%s: Send pending HANG event...\n", __FUNCTION__));
17749 dhd_os_send_hang_message(dhdp);
17750 dhdp->hang_was_pending = 0;
17751 }
17752 #endif /* OEM_ANDROID */
17753 DHD_ERROR(("%s: EXIT %d\n", __FUNCTION__, ret));
17754 return;
17755 }
17756 #endif /* DHD_FW_COREDUMP */
17757
17758 #ifdef DHD_SSSR_DUMP
17759 int
17760 dhd_sssr_dump_dig_buf_before(void *dev, const void *user_buf, uint32 len)
17761 {
17762 dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
17763 dhd_pub_t *dhdp = &dhd_info->pub;
17764 int pos = 0, ret = BCME_ERROR;
17765 uint dig_buf_size = 0;
17766
17767 if (dhdp->sssr_reg_info.vasip_regs.vasip_sr_size) {
17768 dig_buf_size = dhdp->sssr_reg_info.vasip_regs.vasip_sr_size;
17769 } else if ((dhdp->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
17770 dhdp->sssr_reg_info.dig_mem_info.dig_sr_size) {
17771 dig_buf_size = dhdp->sssr_reg_info.dig_mem_info.dig_sr_size;
17772 }
17773
17774 if (dhdp->sssr_dig_buf_before && (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
17775 ret = dhd_export_debug_data((char *)dhdp->sssr_dig_buf_before,
17776 NULL, user_buf, dig_buf_size, &pos);
17777 }
17778 return ret;
17779 }
17780
17781 int
17782 dhd_sssr_dump_dig_buf_after(void *dev, const void *user_buf, uint32 len)
17783 {
17784 dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
17785 dhd_pub_t *dhdp = &dhd_info->pub;
17786 int pos = 0, ret = BCME_ERROR;
17787 uint dig_buf_size = 0;
17788
17789 if (dhdp->sssr_reg_info.vasip_regs.vasip_sr_size) {
17790 dig_buf_size = dhdp->sssr_reg_info.vasip_regs.vasip_sr_size;
17791 } else if ((dhdp->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
17792 dhdp->sssr_reg_info.dig_mem_info.dig_sr_size) {
17793 dig_buf_size = dhdp->sssr_reg_info.dig_mem_info.dig_sr_size;
17794 }
17795
17796 if (dhdp->sssr_dig_buf_after) {
17797 ret = dhd_export_debug_data((char *)dhdp->sssr_dig_buf_after,
17798 NULL, user_buf, dig_buf_size, &pos);
17799 }
17800 return ret;
17801 }
17802
17803 int
17804 dhd_sssr_dump_d11_buf_before(void *dev, const void *user_buf, uint32 len, int core)
17805 {
17806 dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
17807 dhd_pub_t *dhdp = &dhd_info->pub;
17808 int pos = 0, ret = BCME_ERROR;
17809
17810 if (dhdp->sssr_d11_before[core] &&
17811 dhdp->sssr_d11_outofreset[core] &&
17812 (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
17813 ret = dhd_export_debug_data((char *)dhdp->sssr_d11_before[core],
17814 NULL, user_buf, len, &pos);
17815 }
17816 return ret;
17817 }
17818
17819 int
17820 dhd_sssr_dump_d11_buf_after(void *dev, const void *user_buf, uint32 len, int core)
17821 {
17822 dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
17823 dhd_pub_t *dhdp = &dhd_info->pub;
17824 int pos = 0, ret = BCME_ERROR;
17825
17826 if (dhdp->sssr_d11_after[core] &&
17827 dhdp->sssr_d11_outofreset[core]) {
17828 ret = dhd_export_debug_data((char *)dhdp->sssr_d11_after[core],
17829 NULL, user_buf, len, &pos);
17830 }
17831 return ret;
17832 }
17833
17834 static void
17835 dhd_sssr_dump_to_file(dhd_info_t* dhdinfo)
17836 {
17837 dhd_info_t *dhd = dhdinfo;
17838 dhd_pub_t *dhdp;
17839 int i;
17840 char before_sr_dump[128];
17841 char after_sr_dump[128];
17842 unsigned long flags = 0;
17843 uint dig_buf_size = 0;
17844
17845 DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
17846
17847 if (!dhd) {
17848 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
17849 return;
17850 }
17851
17852 dhdp = &dhd->pub;
17853
17854 DHD_GENERAL_LOCK(dhdp, flags);
17855 DHD_BUS_BUSY_SET_IN_SSSRDUMP(dhdp);
17856 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
17857 DHD_GENERAL_UNLOCK(dhdp, flags);
17858 DHD_ERROR(("%s: bus is down! can't collect sssr dump. \n", __FUNCTION__));
17859 goto exit;
17860 }
17861 DHD_GENERAL_UNLOCK(dhdp, flags);
17862
17863 for (i = 0; i < MAX_NUM_D11CORES; i++) {
17864 /* Init file name */
17865 memset(before_sr_dump, 0, sizeof(before_sr_dump));
17866 memset(after_sr_dump, 0, sizeof(after_sr_dump));
17867
17868 snprintf(before_sr_dump, sizeof(before_sr_dump), "%s_%d_%s",
17869 "sssr_dump_core", i, "before_SR");
17870 snprintf(after_sr_dump, sizeof(after_sr_dump), "%s_%d_%s",
17871 "sssr_dump_core", i, "after_SR");
17872
17873 if (dhdp->sssr_d11_before[i] && dhdp->sssr_d11_outofreset[i] &&
17874 (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
17875 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_before[i],
17876 dhdp->sssr_reg_info.mac_regs[i].sr_size, before_sr_dump)) {
17877 DHD_ERROR(("%s: writing SSSR MAIN dump before to the file failed\n",
17878 __FUNCTION__));
17879 }
17880 }
17881 if (dhdp->sssr_d11_after[i] && dhdp->sssr_d11_outofreset[i]) {
17882 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_d11_after[i],
17883 dhdp->sssr_reg_info.mac_regs[i].sr_size, after_sr_dump)) {
17884 DHD_ERROR(("%s: writing SSSR AUX dump after to the file failed\n",
17885 __FUNCTION__));
17886 }
17887 }
17888 }
17889
17890 if (dhdp->sssr_reg_info.vasip_regs.vasip_sr_size) {
17891 dig_buf_size = dhdp->sssr_reg_info.vasip_regs.vasip_sr_size;
17892 } else if ((dhdp->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
17893 dhdp->sssr_reg_info.dig_mem_info.dig_sr_size) {
17894 dig_buf_size = dhdp->sssr_reg_info.dig_mem_info.dig_sr_size;
17895 }
17896
17897 if (dhdp->sssr_dig_buf_before && (dhdp->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
17898 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_before,
17899 dig_buf_size, "sssr_dump_dig_before_SR")) {
17900 DHD_ERROR(("%s: writing SSSR Dig dump before to the file failed\n",
17901 __FUNCTION__));
17902 }
17903 }
17904
17905 if (dhdp->sssr_dig_buf_after) {
17906 if (write_dump_to_file(dhdp, (uint8 *)dhdp->sssr_dig_buf_after,
17907 dig_buf_size, "sssr_dump_dig_after_SR")) {
17908 DHD_ERROR(("%s: writing SSSR Dig VASIP dump after to the file failed\n",
17909 __FUNCTION__));
17910 }
17911 }
17912
17913 exit:
17914 DHD_GENERAL_LOCK(dhdp, flags);
17915 DHD_BUS_BUSY_CLEAR_IN_SSSRDUMP(dhdp);
17916 dhd_os_busbusy_wake(dhdp);
17917 DHD_GENERAL_UNLOCK(dhdp, flags);
17918 }
17919
17920 void
17921 dhd_write_sssr_dump(dhd_pub_t *dhdp, uint32 dump_mode)
17922 {
17923 dhdp->sssr_dump_mode = dump_mode;
17924
17925 /*
17926 * If kernel does not have file write access enabled
17927 * then skip writing dumps to files.
17928 * The dumps will be pushed to HAL layer which will
17929 * write into files
17930 */
17931 #if !defined(DHD_DUMP_FILE_WRITE_FROM_KERNEL)
17932 return;
17933 #endif /* !DHD_DUMP_FILE_WRITE_FROM_KERNEL */
17934
17935 /*
17936 * dhd_mem_dump -> dhd_sssr_dump -> dhd_write_sssr_dump
17937 * Without workqueue -
17938 * DUMP_TYPE_DONGLE_INIT_FAILURE/DUMP_TYPE_DUE_TO_BT/DUMP_TYPE_SMMU_FAULT
17939 * : These are called in own handler, not in the interrupt context
17940 * With workqueue - all other DUMP_TYPEs : dhd_mem_dump is called in workqueue
17941 * Thus, it doesn't neeed to dump SSSR in workqueue
17942 */
17943 DHD_ERROR(("%s: writing sssr dump to file... \n", __FUNCTION__));
17944 dhd_sssr_dump_to_file(dhdp->info);
17945
17946 }
17947 #endif /* DHD_SSSR_DUMP */
17948
17949 #ifdef DHD_LOG_DUMP
17950 static void
17951 dhd_log_dump(void *handle, void *event_info, u8 event)
17952 {
17953 dhd_info_t *dhd = handle;
17954 log_dump_type_t *type = (log_dump_type_t *)event_info;
17955
17956 if (!dhd || !type) {
17957 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
17958 return;
17959 }
17960
17961 #ifdef WL_CFG80211
17962 /* flush the fw side logs */
17963 wl_flush_fw_log_buffer(dhd_linux_get_primary_netdev(&dhd->pub),
17964 FW_LOGSET_MASK_ALL);
17965 #endif // endif
17966 /* there are currently 3 possible contexts from which
17967 * log dump can be scheduled -
17968 * 1.TRAP 2.supplicant DEBUG_DUMP pvt driver command
17969 * 3.HEALTH CHECK event
17970 * The concise debug info buffer is a shared resource
17971 * and in case a trap is one of the contexts then both the
17972 * scheduled work queues need to run because trap data is
17973 * essential for debugging. Hence a mutex lock is acquired
17974 * before calling do_dhd_log_dump().
17975 */
17976 DHD_ERROR(("%s: calling log dump.. \n", __FUNCTION__));
17977 dhd_os_logdump_lock(&dhd->pub);
17978 DHD_OS_WAKE_LOCK(&dhd->pub);
17979 if (do_dhd_log_dump(&dhd->pub, type) != BCME_OK) {
17980 DHD_ERROR(("%s: writing debug dump to the file failed\n", __FUNCTION__));
17981 }
17982 DHD_OS_WAKE_UNLOCK(&dhd->pub);
17983 dhd_os_logdump_unlock(&dhd->pub);
17984 }
17985
17986 void dhd_schedule_log_dump(dhd_pub_t *dhdp, void *type)
17987 {
17988 DHD_ERROR(("%s: scheduling log dump.. \n", __FUNCTION__));
17989 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
17990 type, DHD_WQ_WORK_DHD_LOG_DUMP,
17991 dhd_log_dump, DHD_WQ_WORK_PRIORITY_HIGH);
17992 }
17993
17994 static void
17995 dhd_print_buf_addr(dhd_pub_t *dhdp, char *name, void *buf, unsigned int size)
17996 {
17997 if ((dhdp->memdump_enabled == DUMP_MEMONLY) ||
17998 (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON) ||
17999 (dhdp->memdump_type == DUMP_TYPE_SMMU_FAULT)) {
18000 #if defined(CONFIG_ARM64)
18001 DHD_ERROR(("-------- %s: buf(va)=%llx, buf(pa)=%llx, bufsize=%d\n",
18002 name, (uint64)buf, (uint64)__virt_to_phys((ulong)buf), size));
18003 #elif defined(__ARM_ARCH_7A__)
18004 DHD_ERROR(("-------- %s: buf(va)=%x, buf(pa)=%x, bufsize=%d\n",
18005 name, (uint32)buf, (uint32)__virt_to_phys((ulong)buf), size));
18006 #endif /* __ARM_ARCH_7A__ */
18007 }
18008 }
18009
18010 static void
18011 dhd_log_dump_buf_addr(dhd_pub_t *dhdp, log_dump_type_t *type)
18012 {
18013 int i;
18014 unsigned long wr_size = 0;
18015 struct dhd_log_dump_buf *dld_buf = &g_dld_buf[0];
18016 size_t log_size = 0;
18017 char buf_name[DHD_PRINT_BUF_NAME_LEN];
18018 dhd_dbg_ring_t *ring = NULL;
18019
18020 BCM_REFERENCE(ring);
18021
18022 for (i = 0; i < DLD_BUFFER_NUM; i++) {
18023 dld_buf = &g_dld_buf[i];
18024 log_size = (unsigned long)dld_buf->max -
18025 (unsigned long)dld_buf->buffer;
18026 if (dld_buf->wraparound) {
18027 wr_size = log_size;
18028 } else {
18029 wr_size = (unsigned long)dld_buf->present -
18030 (unsigned long)dld_buf->front;
18031 }
18032 scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d]", i);
18033 dhd_print_buf_addr(dhdp, buf_name, dld_buf, dld_buf_size[i]);
18034 scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] buffer", i);
18035 dhd_print_buf_addr(dhdp, buf_name, dld_buf->buffer, wr_size);
18036 scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] present", i);
18037 dhd_print_buf_addr(dhdp, buf_name, dld_buf->present, wr_size);
18038 scnprintf(buf_name, sizeof(buf_name), "dlb_buf[%d] front", i);
18039 dhd_print_buf_addr(dhdp, buf_name, dld_buf->front, wr_size);
18040 }
18041
18042 #ifdef EWP_ECNTRS_LOGGING
18043 /* periodic flushing of ecounters is NOT supported */
18044 if (*type == DLD_BUF_TYPE_ALL &&
18045 logdump_ecntr_enable &&
18046 dhdp->ecntr_dbg_ring) {
18047
18048 ring = (dhd_dbg_ring_t *)dhdp->ecntr_dbg_ring;
18049 dhd_print_buf_addr(dhdp, "ecntr_dbg_ring", ring, LOG_DUMP_ECNTRS_MAX_BUFSIZE);
18050 dhd_print_buf_addr(dhdp, "ecntr_dbg_ring ring_buf", ring->ring_buf,
18051 LOG_DUMP_ECNTRS_MAX_BUFSIZE);
18052 }
18053 #endif /* EWP_ECNTRS_LOGGING */
18054
18055 #ifdef DHD_STATUS_LOGGING
18056 if (dhdp->statlog) {
18057 dhd_print_buf_addr(dhdp, "statlog_logbuf", dhd_statlog_get_logbuf(dhdp),
18058 dhd_statlog_get_logbuf_len(dhdp));
18059 }
18060 #endif /* DHD_STATUS_LOGGING */
18061
18062 #ifdef EWP_RTT_LOGGING
18063 /* periodic flushing of ecounters is NOT supported */
18064 if (*type == DLD_BUF_TYPE_ALL &&
18065 logdump_rtt_enable &&
18066 dhdp->rtt_dbg_ring) {
18067
18068 ring = (dhd_dbg_ring_t *)dhdp->rtt_dbg_ring;
18069 dhd_print_buf_addr(dhdp, "rtt_dbg_ring", ring, LOG_DUMP_RTT_MAX_BUFSIZE);
18070 dhd_print_buf_addr(dhdp, "rtt_dbg_ring ring_buf", ring->ring_buf,
18071 LOG_DUMP_RTT_MAX_BUFSIZE);
18072 }
18073 #endif /* EWP_RTT_LOGGING */
18074
18075 #ifdef BCMPCIE
18076 if (dhdp->dongle_trap_occured && dhdp->extended_trap_data) {
18077 dhd_print_buf_addr(dhdp, "extended_trap_data", dhdp->extended_trap_data,
18078 BCMPCIE_EXT_TRAP_DATA_MAXLEN);
18079 }
18080 #endif /* BCMPCIE */
18081
18082 #if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
18083 /* if health check event was received */
18084 if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
18085 dhd_print_buf_addr(dhdp, "health_chk_event_data", dhdp->health_chk_event_data,
18086 HEALTH_CHK_BUF_SIZE);
18087 }
18088 #endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
18089
18090 /* append the concise debug information */
18091 if (dhdp->concise_dbg_buf) {
18092 dhd_print_buf_addr(dhdp, "concise_dbg_buf", dhdp->concise_dbg_buf,
18093 CONCISE_DUMP_BUFLEN);
18094 }
18095 }
18096
18097 #ifdef CUSTOMER_HW4_DEBUG
18098 static void
18099 dhd_log_dump_print_to_kmsg(char *bufptr, unsigned long len)
18100 {
18101 char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE + 1];
18102 char *end = NULL;
18103 unsigned long plen = 0;
18104
18105 if (!bufptr || !len)
18106 return;
18107
18108 memset(tmp_buf, 0, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE);
18109 end = bufptr + len;
18110 while (bufptr < end) {
18111 if ((bufptr + DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE) < end) {
18112 memcpy(tmp_buf, bufptr, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE);
18113 tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = '\0';
18114 printf("%s", tmp_buf);
18115 bufptr += DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE;
18116 } else {
18117 plen = (unsigned long)end - (unsigned long)bufptr;
18118 memcpy(tmp_buf, bufptr, plen);
18119 tmp_buf[plen] = '\0';
18120 printf("%s", tmp_buf);
18121 bufptr += plen;
18122 }
18123 }
18124 }
18125
18126 static void
18127 dhd_log_dump_print_tail(dhd_pub_t *dhdp,
18128 struct dhd_log_dump_buf *dld_buf,
18129 uint tail_len)
18130 {
18131 char *flush_ptr1 = NULL, *flush_ptr2 = NULL;
18132 unsigned long len_flush1 = 0, len_flush2 = 0;
18133 unsigned long flags = 0;
18134
18135 /* need to hold the lock before accessing 'present' and 'remain' ptrs */
18136 spin_lock_irqsave(&dld_buf->lock, flags);
18137 flush_ptr1 = dld_buf->present - tail_len;
18138 if (flush_ptr1 >= dld_buf->front) {
18139 /* tail content is within the buffer */
18140 flush_ptr2 = NULL;
18141 len_flush1 = tail_len;
18142 } else if (dld_buf->wraparound) {
18143 /* tail content spans the buffer length i.e, wrap around */
18144 flush_ptr1 = dld_buf->front;
18145 len_flush1 = (unsigned long)dld_buf->present - (unsigned long)flush_ptr1;
18146 len_flush2 = (unsigned long)tail_len - len_flush1;
18147 flush_ptr2 = (char *)((unsigned long)dld_buf->max -
18148 (unsigned long)len_flush2);
18149 } else {
18150 /* amt of logs in buffer is less than tail size */
18151 flush_ptr1 = dld_buf->front;
18152 flush_ptr2 = NULL;
18153 len_flush1 = (unsigned long)dld_buf->present - (unsigned long)dld_buf->front;
18154 }
18155 spin_unlock_irqrestore(&dld_buf->lock, flags);
18156
18157 printf("\n================= LOG_DUMP tail =================\n");
18158 if (flush_ptr2) {
18159 dhd_log_dump_print_to_kmsg(flush_ptr2, len_flush2);
18160 }
18161 dhd_log_dump_print_to_kmsg(flush_ptr1, len_flush1);
18162 printf("\n===================================================\n");
18163 }
18164 #endif /* CUSTOMER_HW4_DEBUG */
18165
18166 #ifdef DHD_SSSR_DUMP
18167 int
18168 dhdpcie_sssr_dump_get_before_after_len(dhd_pub_t *dhd, uint32 *arr_len)
18169 {
18170 int i = 0;
18171
18172 DHD_ERROR(("%s\n", __FUNCTION__));
18173
18174 /* core 0 */
18175 i = 0;
18176 if (dhd->sssr_d11_before[i] && dhd->sssr_d11_outofreset[i] &&
18177 (dhd->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
18178 arr_len[SSSR_C0_D11_BEFORE] = (dhd->sssr_reg_info.mac_regs[i].sr_size);
18179 DHD_ERROR(("%s: arr_len[SSSR_C0_D11_BEFORE] : %d\n", __FUNCTION__,
18180 arr_len[SSSR_C0_D11_BEFORE]));
18181 #ifdef DHD_LOG_DUMP
18182 dhd_print_buf_addr(dhd, "SSSR_C0_D11_BEFORE",
18183 dhd->sssr_d11_before[i], arr_len[SSSR_C0_D11_BEFORE]);
18184 #endif /* DHD_LOG_DUMP */
18185 }
18186 if (dhd->sssr_d11_after[i] && dhd->sssr_d11_outofreset[i]) {
18187 arr_len[SSSR_C0_D11_AFTER] = (dhd->sssr_reg_info.mac_regs[i].sr_size);
18188 DHD_ERROR(("%s: arr_len[SSSR_C0_D11_AFTER] : %d\n", __FUNCTION__,
18189 arr_len[SSSR_C0_D11_AFTER]));
18190 #ifdef DHD_LOG_DUMP
18191 dhd_print_buf_addr(dhd, "SSSR_C0_D11_AFTER",
18192 dhd->sssr_d11_after[i], arr_len[SSSR_C0_D11_AFTER]);
18193 #endif /* DHD_LOG_DUMP */
18194 }
18195
18196 /* core 1 */
18197 i = 1;
18198 if (dhd->sssr_d11_before[i] && dhd->sssr_d11_outofreset[i] &&
18199 (dhd->sssr_dump_mode == SSSR_DUMP_MODE_SSSR)) {
18200 arr_len[SSSR_C1_D11_BEFORE] = (dhd->sssr_reg_info.mac_regs[i].sr_size);
18201 DHD_ERROR(("%s: arr_len[SSSR_C1_D11_BEFORE] : %d\n", __FUNCTION__,
18202 arr_len[SSSR_C1_D11_BEFORE]));
18203 #ifdef DHD_LOG_DUMP
18204 dhd_print_buf_addr(dhd, "SSSR_C1_D11_BEFORE",
18205 dhd->sssr_d11_before[i], arr_len[SSSR_C1_D11_BEFORE]);
18206 #endif /* DHD_LOG_DUMP */
18207 }
18208 if (dhd->sssr_d11_after[i] && dhd->sssr_d11_outofreset[i]) {
18209 arr_len[SSSR_C1_D11_AFTER] = (dhd->sssr_reg_info.mac_regs[i].sr_size);
18210 DHD_ERROR(("%s: arr_len[SSSR_C1_D11_AFTER] : %d\n", __FUNCTION__,
18211 arr_len[SSSR_C1_D11_AFTER]));
18212 #ifdef DHD_LOG_DUMP
18213 dhd_print_buf_addr(dhd, "SSSR_C1_D11_AFTER",
18214 dhd->sssr_d11_after[i], arr_len[SSSR_C1_D11_AFTER]);
18215 #endif /* DHD_LOG_DUMP */
18216 }
18217
18218 if (dhd->sssr_reg_info.vasip_regs.vasip_sr_size) {
18219 arr_len[SSSR_DIG_BEFORE] = (dhd->sssr_reg_info.vasip_regs.vasip_sr_size);
18220 arr_len[SSSR_DIG_AFTER] = (dhd->sssr_reg_info.vasip_regs.vasip_sr_size);
18221 DHD_ERROR(("%s: arr_len[SSSR_DIG_BEFORE] : %d\n", __FUNCTION__,
18222 arr_len[SSSR_DIG_BEFORE]));
18223 DHD_ERROR(("%s: arr_len[SSSR_DIG_AFTER] : %d\n", __FUNCTION__,
18224 arr_len[SSSR_DIG_AFTER]));
18225 #ifdef DHD_LOG_DUMP
18226 if (dhd->sssr_dig_buf_before) {
18227 dhd_print_buf_addr(dhd, "SSSR_DIG_BEFORE",
18228 dhd->sssr_dig_buf_before, arr_len[SSSR_DIG_BEFORE]);
18229 }
18230 if (dhd->sssr_dig_buf_after) {
18231 dhd_print_buf_addr(dhd, "SSSR_DIG_AFTER",
18232 dhd->sssr_dig_buf_after, arr_len[SSSR_DIG_AFTER]);
18233 }
18234 #endif /* DHD_LOG_DUMP */
18235 } else if ((dhd->sssr_reg_info.length > OFFSETOF(sssr_reg_info_v1_t, dig_mem_info)) &&
18236 dhd->sssr_reg_info.dig_mem_info.dig_sr_addr) {
18237 arr_len[SSSR_DIG_BEFORE] = (dhd->sssr_reg_info.dig_mem_info.dig_sr_size);
18238 arr_len[SSSR_DIG_AFTER] = (dhd->sssr_reg_info.dig_mem_info.dig_sr_size);
18239 DHD_ERROR(("%s: arr_len[SSSR_DIG_BEFORE] : %d\n", __FUNCTION__,
18240 arr_len[SSSR_DIG_BEFORE]));
18241 DHD_ERROR(("%s: arr_len[SSSR_DIG_AFTER] : %d\n", __FUNCTION__,
18242 arr_len[SSSR_DIG_AFTER]));
18243 #ifdef DHD_LOG_DUMP
18244 if (dhd->sssr_dig_buf_before) {
18245 dhd_print_buf_addr(dhd, "SSSR_DIG_BEFORE",
18246 dhd->sssr_dig_buf_before, arr_len[SSSR_DIG_BEFORE]);
18247 }
18248 if (dhd->sssr_dig_buf_after) {
18249 dhd_print_buf_addr(dhd, "SSSR_DIG_AFTER",
18250 dhd->sssr_dig_buf_after, arr_len[SSSR_DIG_AFTER]);
18251 }
18252 #endif /* DHD_LOG_DUMP */
18253 }
18254 return BCME_OK;
18255 }
18256
18257 void
18258 dhd_nla_put_sssr_dump_len(void *ndev, uint32 *arr_len)
18259 {
18260 dhd_info_t *dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
18261 dhd_pub_t *dhdp = &dhd_info->pub;
18262
18263 if (dhdp->sssr_dump_collected) {
18264 dhdpcie_sssr_dump_get_before_after_len(dhdp, arr_len);
18265 }
18266 }
18267 #endif /* DHD_SSSR_DUMP */
18268
18269 uint32
18270 dhd_get_time_str_len()
18271 {
18272 char *ts = NULL, time_str[128];
18273
18274 ts = dhd_log_dump_get_timestamp();
18275 snprintf(time_str, sizeof(time_str),
18276 "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
18277 return strlen(time_str);
18278 }
18279
18280 #ifdef BCMPCIE
18281 uint32
18282 dhd_get_ext_trap_len(void *ndev, dhd_pub_t *dhdp)
18283 {
18284 int length = 0;
18285 log_dump_section_hdr_t sec_hdr;
18286 dhd_info_t *dhd_info;
18287
18288 if (ndev) {
18289 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
18290 dhdp = &dhd_info->pub;
18291 }
18292
18293 if (!dhdp)
18294 return length;
18295
18296 if (dhdp->extended_trap_data) {
18297 length = (strlen(EXT_TRAP_LOG_HDR)
18298 + sizeof(sec_hdr) + BCMPCIE_EXT_TRAP_DATA_MAXLEN);
18299 }
18300 return length;
18301 }
18302 #endif /* BCMPCIE */
18303
18304 #if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
18305 uint32
18306 dhd_get_health_chk_len(void *ndev, dhd_pub_t *dhdp)
18307 {
18308 int length = 0;
18309 log_dump_section_hdr_t sec_hdr;
18310 dhd_info_t *dhd_info;
18311
18312 if (ndev) {
18313 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
18314 dhdp = &dhd_info->pub;
18315 }
18316
18317 if (!dhdp)
18318 return length;
18319
18320 if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
18321 length = (strlen(HEALTH_CHK_LOG_HDR)
18322 + sizeof(sec_hdr) + HEALTH_CHK_BUF_SIZE);
18323 }
18324 return length;
18325 }
18326 #endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
18327
18328 uint32
18329 dhd_get_dhd_dump_len(void *ndev, dhd_pub_t *dhdp)
18330 {
18331 int length = 0;
18332 log_dump_section_hdr_t sec_hdr;
18333 dhd_info_t *dhd_info;
18334 uint32 remain_len = 0;
18335
18336 if (ndev) {
18337 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
18338 dhdp = &dhd_info->pub;
18339 }
18340
18341 if (!dhdp)
18342 return length;
18343
18344 if (dhdp->concise_dbg_buf) {
18345 remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
18346 if (remain_len <= 0) {
18347 DHD_ERROR(("%s: error getting concise debug info !\n",
18348 __FUNCTION__));
18349 return length;
18350 }
18351 length = (strlen(DHD_DUMP_LOG_HDR) + sizeof(sec_hdr) +
18352 (CONCISE_DUMP_BUFLEN - remain_len));
18353 }
18354 return length;
18355 }
18356
18357 uint32
18358 dhd_get_cookie_log_len(void *ndev, dhd_pub_t *dhdp)
18359 {
18360 int length = 0;
18361 dhd_info_t *dhd_info;
18362
18363 if (ndev) {
18364 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
18365 dhdp = &dhd_info->pub;
18366 }
18367
18368 if (!dhdp)
18369 return length;
18370
18371 if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) {
18372 length = dhd_log_dump_cookie_len(dhdp);
18373 }
18374 return length;
18375
18376 }
18377
18378 #ifdef DHD_DUMP_PCIE_RINGS
18379 uint32
18380 dhd_get_flowring_len(void *ndev, dhd_pub_t *dhdp)
18381 {
18382 int length = 0;
18383 log_dump_section_hdr_t sec_hdr;
18384 dhd_info_t *dhd_info;
18385 uint16 h2d_flowrings_total;
18386 uint32 remain_len = 0;
18387
18388 if (ndev) {
18389 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
18390 dhdp = &dhd_info->pub;
18391 }
18392
18393 if (!dhdp)
18394 return length;
18395
18396 if (dhdp->concise_dbg_buf) {
18397 remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
18398 if (remain_len <= 0) {
18399 DHD_ERROR(("%s: error getting concise debug info !\n",
18400 __FUNCTION__));
18401 return length;
18402 }
18403 }
18404
18405 length += strlen(FLOWRING_DUMP_HDR);
18406 length += CONCISE_DUMP_BUFLEN - remain_len;
18407 length += sizeof(sec_hdr);
18408 h2d_flowrings_total = dhd_get_max_flow_rings(dhdp);
18409 length += ((H2DRING_TXPOST_ITEMSIZE
18410 * H2DRING_TXPOST_MAX_ITEM * h2d_flowrings_total)
18411 + (D2HRING_TXCMPLT_ITEMSIZE * D2HRING_TXCMPLT_MAX_ITEM)
18412 + (H2DRING_RXPOST_ITEMSIZE * H2DRING_RXPOST_MAX_ITEM)
18413 + (D2HRING_RXCMPLT_ITEMSIZE * D2HRING_RXCMPLT_MAX_ITEM)
18414 + (H2DRING_CTRL_SUB_ITEMSIZE * H2DRING_CTRL_SUB_MAX_ITEM)
18415 + (D2HRING_CTRL_CMPLT_ITEMSIZE * D2HRING_CTRL_CMPLT_MAX_ITEM)
18416 #ifdef EWP_EDL
18417 + (D2HRING_EDL_HDR_SIZE * D2HRING_EDL_MAX_ITEM));
18418 #else
18419 + (H2DRING_INFO_BUFPOST_ITEMSIZE * H2DRING_DYNAMIC_INFO_MAX_ITEM)
18420 + (D2HRING_INFO_BUFCMPLT_ITEMSIZE * D2HRING_DYNAMIC_INFO_MAX_ITEM));
18421 #endif /* EWP_EDL */
18422 return length;
18423 }
18424 #endif /* DHD_DUMP_PCIE_RINGS */
18425
18426 #ifdef EWP_ECNTRS_LOGGING
18427 uint32
18428 dhd_get_ecntrs_len(void *ndev, dhd_pub_t *dhdp)
18429 {
18430 dhd_info_t *dhd_info;
18431 log_dump_section_hdr_t sec_hdr;
18432 int length = 0;
18433 dhd_dbg_ring_t *ring;
18434
18435 if (ndev) {
18436 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
18437 dhdp = &dhd_info->pub;
18438 }
18439
18440 if (!dhdp)
18441 return length;
18442
18443 if (logdump_ecntr_enable && dhdp->ecntr_dbg_ring) {
18444 ring = (dhd_dbg_ring_t *)dhdp->ecntr_dbg_ring;
18445 length = ring->ring_size + strlen(ECNTRS_LOG_HDR) + sizeof(sec_hdr);
18446 }
18447 return length;
18448 }
18449 #endif /* EWP_ECNTRS_LOGGING */
18450
18451 #ifdef EWP_RTT_LOGGING
18452 uint32
18453 dhd_get_rtt_len(void *ndev, dhd_pub_t *dhdp)
18454 {
18455 dhd_info_t *dhd_info;
18456 log_dump_section_hdr_t sec_hdr;
18457 int length = 0;
18458 dhd_dbg_ring_t *ring;
18459
18460 if (ndev) {
18461 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
18462 dhdp = &dhd_info->pub;
18463 }
18464
18465 if (!dhdp)
18466 return length;
18467
18468 if (logdump_rtt_enable && dhdp->rtt_dbg_ring) {
18469 ring = (dhd_dbg_ring_t *)dhdp->rtt_dbg_ring;
18470 length = ring->ring_size + strlen(RTT_LOG_HDR) + sizeof(sec_hdr);
18471 }
18472 return length;
18473 }
18474 #endif /* EWP_RTT_LOGGING */
18475
18476 int
18477 dhd_get_dld_log_dump(void *dev, dhd_pub_t *dhdp, const void *user_buf,
18478 void *fp, uint32 len, int type, void *pos)
18479 {
18480 int ret = BCME_OK;
18481 struct dhd_log_dump_buf *dld_buf;
18482 log_dump_section_hdr_t sec_hdr;
18483 dhd_info_t *dhd_info;
18484
18485 dld_buf = &g_dld_buf[type];
18486
18487 if (dev) {
18488 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
18489 dhdp = &dhd_info->pub;
18490 } else if (!dhdp) {
18491 return BCME_ERROR;
18492 }
18493
18494 DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
18495
18496 dhd_init_sec_hdr(&sec_hdr);
18497
18498 /* write the section header first */
18499 ret = dhd_export_debug_data(dld_hdrs[type].hdr_str, fp, user_buf,
18500 strlen(dld_hdrs[type].hdr_str), pos);
18501 if (ret < 0)
18502 goto exit;
18503 len -= (uint32)strlen(dld_hdrs[type].hdr_str);
18504 len -= (uint32)sizeof(sec_hdr);
18505 sec_hdr.type = dld_hdrs[type].sec_type;
18506 sec_hdr.length = len;
18507 ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
18508 if (ret < 0)
18509 goto exit;
18510 ret = dhd_export_debug_data(dld_buf->buffer, fp, user_buf, len, pos);
18511 if (ret < 0)
18512 goto exit;
18513
18514 exit:
18515 return ret;
18516 }
18517
18518 static int
18519 dhd_log_flush(dhd_pub_t *dhdp, log_dump_type_t *type)
18520 {
18521 unsigned long flags = 0;
18522 #ifdef EWP_EDL
18523 int i = 0;
18524 #endif /* EWP_EDL */
18525 dhd_info_t *dhd_info = NULL;
18526
18527 /* if dhdp is null, its extremely unlikely that log dump will be scheduled
18528 * so not freeing 'type' here is ok, even if we want to free 'type'
18529 * we cannot do so, since 'dhdp->osh' is unavailable
18530 * as dhdp is null
18531 */
18532 if (!dhdp || !type) {
18533 if (dhdp) {
18534 DHD_GENERAL_LOCK(dhdp, flags);
18535 DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp);
18536 dhd_os_busbusy_wake(dhdp);
18537 DHD_GENERAL_UNLOCK(dhdp, flags);
18538 }
18539 return BCME_ERROR;
18540 }
18541
18542 dhd_info = (dhd_info_t *)dhdp->info;
18543 /* in case of trap get preserve logs from ETD */
18544 #if defined(BCMPCIE) && defined(EWP_ETD_PRSRV_LOGS)
18545 if (dhdp->dongle_trap_occured &&
18546 dhdp->extended_trap_data) {
18547 dhdpcie_get_etd_preserve_logs(dhdp, (uint8 *)dhdp->extended_trap_data,
18548 &dhd_info->event_data);
18549 }
18550 #endif /* BCMPCIE */
18551
18552 /* flush the event work items to get any fw events/logs
18553 * flush_work is a blocking call
18554 */
18555 #ifdef EWP_EDL
18556 if (dhd_info->pub.dongle_edl_support) {
18557 /* wait till existing edl items are processed */
18558 dhd_flush_logtrace_process(dhd_info);
18559 /* dhd_flush_logtrace_process will ensure the work items in the ring
18560 * (EDL ring) from rd to wr are processed. But if wr had
18561 * wrapped around, only the work items from rd to ring-end are processed.
18562 * So to ensure that the work items at the
18563 * beginning of ring are also processed in the wrap around case, call
18564 * it twice
18565 */
18566 for (i = 0; i < 2; i++) {
18567 /* blocks till the edl items are processed */
18568 dhd_flush_logtrace_process(dhd_info);
18569 }
18570 } else {
18571 dhd_flush_logtrace_process(dhd_info);
18572 }
18573 #else
18574 dhd_flush_logtrace_process(dhd_info);
18575 #endif /* EWP_EDL */
18576
18577 #ifdef CUSTOMER_HW4_DEBUG
18578 /* print last 'x' KB of preserve buffer data to kmsg console
18579 * this is to address cases where debug_dump is not
18580 * available for debugging
18581 */
18582 dhd_log_dump_print_tail(dhdp,
18583 &g_dld_buf[DLD_BUF_TYPE_PRESERVE], logdump_prsrv_tailsize);
18584 #endif /* CUSTOMER_HW4_DEBUG */
18585 return BCME_OK;
18586 }
18587
18588 int
18589 dhd_get_debug_dump_file_name(void *dev, dhd_pub_t *dhdp, char *dump_path, int size)
18590 {
18591 dhd_info_t *dhd_info;
18592
18593 if (dev) {
18594 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
18595 dhdp = &dhd_info->pub;
18596 }
18597
18598 if (!dhdp)
18599 return BCME_ERROR;
18600
18601 memset(dump_path, 0, size);
18602
18603 switch (dhdp->debug_dump_subcmd) {
18604 case CMD_UNWANTED:
18605 snprintf(dump_path, size, "%s",
18606 DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE
18607 DHD_DUMP_SUBSTR_UNWANTED);
18608 break;
18609 case CMD_DISCONNECTED:
18610 snprintf(dump_path, size, "%s",
18611 DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE
18612 DHD_DUMP_SUBSTR_DISCONNECTED);
18613 break;
18614 default:
18615 snprintf(dump_path, size, "%s",
18616 DHD_COMMON_DUMP_PATH DHD_DEBUG_DUMP_TYPE);
18617 }
18618
18619 if (!dhdp->logdump_periodic_flush) {
18620 get_debug_dump_time(dhdp->debug_dump_time_str);
18621 snprintf(dump_path + strlen(dump_path),
18622 size - strlen(dump_path),
18623 "_%s", dhdp->debug_dump_time_str);
18624 }
18625 return BCME_OK;
18626 }
18627
18628 uint32
18629 dhd_get_dld_len(int log_type)
18630 {
18631 unsigned long wr_size = 0;
18632 unsigned long buf_size = 0;
18633 unsigned long flags = 0;
18634 struct dhd_log_dump_buf *dld_buf;
18635 log_dump_section_hdr_t sec_hdr;
18636
18637 /* calculate the length of the log */
18638 dld_buf = &g_dld_buf[log_type];
18639 buf_size = (unsigned long)dld_buf->max -
18640 (unsigned long)dld_buf->buffer;
18641
18642 if (dld_buf->wraparound) {
18643 wr_size = buf_size;
18644 } else {
18645 /* need to hold the lock before accessing 'present' and 'remain' ptrs */
18646 spin_lock_irqsave(&dld_buf->lock, flags);
18647 wr_size = (unsigned long)dld_buf->present -
18648 (unsigned long)dld_buf->front;
18649 spin_unlock_irqrestore(&dld_buf->lock, flags);
18650 }
18651 return (wr_size + sizeof(sec_hdr) + strlen(dld_hdrs[log_type].hdr_str));
18652 }
18653
18654 static void
18655 dhd_get_time_str(dhd_pub_t *dhdp, char *time_str, int size)
18656 {
18657 char *ts = NULL;
18658 memset(time_str, 0, size);
18659 ts = dhd_log_dump_get_timestamp();
18660 snprintf(time_str, size,
18661 "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
18662 }
18663
18664 int
18665 dhd_print_time_str(const void *user_buf, void *fp, uint32 len, void *pos)
18666 {
18667 char *ts = NULL;
18668 int ret = 0;
18669 char time_str[128];
18670
18671 memset_s(time_str, sizeof(time_str), 0, sizeof(time_str));
18672 ts = dhd_log_dump_get_timestamp();
18673 snprintf(time_str, sizeof(time_str),
18674 "\n\n ========== LOG DUMP TAKEN AT : %s =========\n", ts);
18675
18676 /* write the timestamp hdr to the file first */
18677 ret = dhd_export_debug_data(time_str, fp, user_buf, strlen(time_str), pos);
18678 if (ret < 0) {
18679 DHD_ERROR(("write file error, err = %d\n", ret));
18680 }
18681 return ret;
18682 }
18683
18684 #if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT)
18685 int
18686 dhd_print_health_chk_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
18687 void *fp, uint32 len, void *pos)
18688 {
18689 int ret = BCME_OK;
18690 log_dump_section_hdr_t sec_hdr;
18691 dhd_info_t *dhd_info;
18692
18693 if (dev) {
18694 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
18695 dhdp = &dhd_info->pub;
18696 }
18697
18698 if (!dhdp)
18699 return BCME_ERROR;
18700
18701 dhd_init_sec_hdr(&sec_hdr);
18702
18703 if (dhdp->memdump_type == DUMP_TYPE_DONGLE_HOST_EVENT) {
18704 /* write the section header first */
18705 ret = dhd_export_debug_data(HEALTH_CHK_LOG_HDR, fp, user_buf,
18706 strlen(HEALTH_CHK_LOG_HDR), pos);
18707 if (ret < 0)
18708 goto exit;
18709
18710 len -= (uint32)strlen(HEALTH_CHK_LOG_HDR);
18711 sec_hdr.type = LOG_DUMP_SECTION_HEALTH_CHK;
18712 sec_hdr.length = HEALTH_CHK_BUF_SIZE;
18713 ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
18714 if (ret < 0)
18715 goto exit;
18716
18717 len -= (uint32)sizeof(sec_hdr);
18718 /* write the log */
18719 ret = dhd_export_debug_data((char *)dhdp->health_chk_event_data, fp,
18720 user_buf, len, pos);
18721 if (ret < 0)
18722 goto exit;
18723 }
18724 exit:
18725 return ret;
18726 }
18727 #endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT */
18728
18729 #ifdef BCMPCIE
18730 int
18731 dhd_print_ext_trap_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
18732 void *fp, uint32 len, void *pos)
18733 {
18734 int ret = BCME_OK;
18735 log_dump_section_hdr_t sec_hdr;
18736 dhd_info_t *dhd_info;
18737
18738 if (dev) {
18739 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
18740 dhdp = &dhd_info->pub;
18741 }
18742
18743 if (!dhdp)
18744 return BCME_ERROR;
18745
18746 dhd_init_sec_hdr(&sec_hdr);
18747
18748 /* append extended trap data to the file in case of traps */
18749 if (dhdp->dongle_trap_occured &&
18750 dhdp->extended_trap_data) {
18751 /* write the section header first */
18752 ret = dhd_export_debug_data(EXT_TRAP_LOG_HDR, fp, user_buf,
18753 strlen(EXT_TRAP_LOG_HDR), pos);
18754 if (ret < 0)
18755 goto exit;
18756
18757 len -= (uint32)strlen(EXT_TRAP_LOG_HDR);
18758 sec_hdr.type = LOG_DUMP_SECTION_EXT_TRAP;
18759 sec_hdr.length = BCMPCIE_EXT_TRAP_DATA_MAXLEN;
18760 ret = dhd_export_debug_data((uint8 *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
18761 if (ret < 0)
18762 goto exit;
18763
18764 len -= (uint32)sizeof(sec_hdr);
18765 /* write the log */
18766 ret = dhd_export_debug_data((uint8 *)dhdp->extended_trap_data, fp,
18767 user_buf, len, pos);
18768 if (ret < 0)
18769 goto exit;
18770 }
18771 exit:
18772 return ret;
18773 }
18774 #endif /* BCMPCIE */
18775
18776 int
18777 dhd_print_dump_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
18778 void *fp, uint32 len, void *pos)
18779 {
18780 int ret = BCME_OK;
18781 log_dump_section_hdr_t sec_hdr;
18782 dhd_info_t *dhd_info;
18783
18784 if (dev) {
18785 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
18786 dhdp = &dhd_info->pub;
18787 }
18788
18789 if (!dhdp)
18790 return BCME_ERROR;
18791
18792 dhd_init_sec_hdr(&sec_hdr);
18793
18794 ret = dhd_export_debug_data(DHD_DUMP_LOG_HDR, fp, user_buf, strlen(DHD_DUMP_LOG_HDR), pos);
18795 if (ret < 0)
18796 goto exit;
18797
18798 len -= (uint32)strlen(DHD_DUMP_LOG_HDR);
18799 sec_hdr.type = LOG_DUMP_SECTION_DHD_DUMP;
18800 sec_hdr.length = len;
18801 ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
18802 if (ret < 0)
18803 goto exit;
18804
18805 len -= (uint32)sizeof(sec_hdr);
18806
18807 if (dhdp->concise_dbg_buf) {
18808 dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
18809 ret = dhd_export_debug_data(dhdp->concise_dbg_buf, fp, user_buf, len, pos);
18810 if (ret < 0)
18811 goto exit;
18812 }
18813
18814 exit:
18815 return ret;
18816 }
18817
18818 int
18819 dhd_print_cookie_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
18820 void *fp, uint32 len, void *pos)
18821 {
18822 int ret = BCME_OK;
18823 dhd_info_t *dhd_info;
18824
18825 if (dev) {
18826 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
18827 dhdp = &dhd_info->pub;
18828 }
18829
18830 if (!dhdp)
18831 return BCME_ERROR;
18832
18833 if (dhdp->logdump_cookie && dhd_logdump_cookie_count(dhdp) > 0) {
18834 ret = dhd_log_dump_cookie_to_file(dhdp, fp, user_buf, (unsigned long *)pos);
18835 }
18836 return ret;
18837 }
18838
18839 #ifdef DHD_DUMP_PCIE_RINGS
18840 int
18841 dhd_print_flowring_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
18842 void *fp, uint32 len, void *pos)
18843 {
18844 log_dump_section_hdr_t sec_hdr;
18845 int ret = BCME_OK;
18846 uint32 remain_len = 0;
18847 dhd_info_t *dhd_info;
18848
18849 if (dev) {
18850 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
18851 dhdp = &dhd_info->pub;
18852 }
18853
18854 if (!dhdp)
18855 return BCME_ERROR;
18856
18857 dhd_init_sec_hdr(&sec_hdr);
18858
18859 remain_len = dhd_dump(dhdp, (char *)dhdp->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
18860 memset(dhdp->concise_dbg_buf, 0, CONCISE_DUMP_BUFLEN);
18861
18862 /* write the section header first */
18863 ret = dhd_export_debug_data(FLOWRING_DUMP_HDR, fp, user_buf,
18864 strlen(FLOWRING_DUMP_HDR), pos);
18865 if (ret < 0)
18866 goto exit;
18867
18868 /* Write the ring summary */
18869 ret = dhd_export_debug_data(dhdp->concise_dbg_buf, fp, user_buf,
18870 (CONCISE_DUMP_BUFLEN - remain_len), pos);
18871 if (ret < 0)
18872 goto exit;
18873
18874 sec_hdr.type = LOG_DUMP_SECTION_FLOWRING;
18875 sec_hdr.length = len;
18876 ret = dhd_export_debug_data((char *)&sec_hdr, fp, user_buf, sizeof(sec_hdr), pos);
18877 if (ret < 0)
18878 goto exit;
18879
18880 /* write the log */
18881 ret = dhd_d2h_h2d_ring_dump(dhdp, fp, user_buf, (unsigned long *)pos, TRUE);
18882 if (ret < 0)
18883 goto exit;
18884
18885 exit:
18886 return ret;
18887 }
18888 #endif /* DHD_DUMP_PCIE_RINGS */
18889
18890 #ifdef EWP_ECNTRS_LOGGING
18891 int
18892 dhd_print_ecntrs_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
18893 void *fp, uint32 len, void *pos)
18894 {
18895 log_dump_section_hdr_t sec_hdr;
18896 int ret = BCME_OK;
18897 dhd_info_t *dhd_info;
18898
18899 if (dev) {
18900 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
18901 dhdp = &dhd_info->pub;
18902 }
18903
18904 if (!dhdp)
18905 return BCME_ERROR;
18906
18907 dhd_init_sec_hdr(&sec_hdr);
18908
18909 if (logdump_ecntr_enable &&
18910 dhdp->ecntr_dbg_ring) {
18911 sec_hdr.type = LOG_DUMP_SECTION_ECNTRS;
18912 ret = dhd_dump_debug_ring(dhdp, dhdp->ecntr_dbg_ring,
18913 user_buf, &sec_hdr, ECNTRS_LOG_HDR, len, LOG_DUMP_SECTION_ECNTRS);
18914 }
18915 return ret;
18916
18917 }
18918 #endif /* EWP_ECNTRS_LOGGING */
18919
18920 #ifdef EWP_RTT_LOGGING
18921 int
18922 dhd_print_rtt_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
18923 void *fp, uint32 len, void *pos)
18924 {
18925 log_dump_section_hdr_t sec_hdr;
18926 int ret = BCME_OK;
18927 dhd_info_t *dhd_info;
18928
18929 if (dev) {
18930 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
18931 dhdp = &dhd_info->pub;
18932 }
18933
18934 if (!dhdp)
18935 return BCME_ERROR;
18936
18937 dhd_init_sec_hdr(&sec_hdr);
18938
18939 if (logdump_rtt_enable && dhdp->rtt_dbg_ring) {
18940 ret = dhd_dump_debug_ring(dhdp, dhdp->rtt_dbg_ring,
18941 user_buf, &sec_hdr, RTT_LOG_HDR, len, LOG_DUMP_SECTION_RTT);
18942 }
18943 return ret;
18944
18945 }
18946 #endif /* EWP_RTT_LOGGING */
18947
18948 #ifdef DHD_STATUS_LOGGING
18949 int
18950 dhd_print_status_log_data(void *dev, dhd_pub_t *dhdp, const void *user_buf,
18951 void *fp, uint32 len, void *pos)
18952 {
18953 dhd_info_t *dhd_info;
18954
18955 if (dev) {
18956 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)dev);
18957 dhdp = &dhd_info->pub;
18958 }
18959
18960 if (!dhdp) {
18961 return BCME_ERROR;
18962 }
18963
18964 return dhd_statlog_write_logdump(dhdp, user_buf, fp, len, pos);
18965 }
18966
18967 uint32
18968 dhd_get_status_log_len(void *ndev, dhd_pub_t *dhdp)
18969 {
18970 dhd_info_t *dhd_info;
18971 uint32 length = 0;
18972
18973 if (ndev) {
18974 dhd_info = *(dhd_info_t **)netdev_priv((struct net_device *)ndev);
18975 dhdp = &dhd_info->pub;
18976 }
18977
18978 if (dhdp) {
18979 length = dhd_statlog_get_logbuf_len(dhdp);
18980 }
18981
18982 return length;
18983 }
18984 #endif /* DHD_STATUS_LOGGING */
18985
18986 void
18987 dhd_init_sec_hdr(log_dump_section_hdr_t *sec_hdr)
18988 {
18989 /* prep the section header */
18990 memset(sec_hdr, 0, sizeof(*sec_hdr));
18991 sec_hdr->magic = LOG_DUMP_MAGIC;
18992 sec_hdr->timestamp = local_clock();
18993 }
18994
18995 /* Must hold 'dhd_os_logdump_lock' before calling this function ! */
18996 static int
18997 do_dhd_log_dump(dhd_pub_t *dhdp, log_dump_type_t *type)
18998 {
18999 int ret = 0, i = 0;
19000 struct file *fp = NULL;
19001 #if defined(KERNEL_DS) && defined(USER_DS)
19002 mm_segment_t old_fs;
19003 #endif /* KERNEL_DS && USER_DS */
19004 loff_t pos = 0;
19005 char dump_path[128];
19006 uint32 file_mode;
19007 unsigned long flags = 0;
19008 size_t log_size = 0;
19009 size_t fspace_remain = 0;
19010 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0))
19011 int isize = 0;
19012 #else
19013 struct kstat stat;
19014 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) */
19015 char time_str[128];
19016 unsigned int len = 0;
19017 log_dump_section_hdr_t sec_hdr;
19018
19019 DHD_ERROR(("%s: ENTER \n", __FUNCTION__));
19020
19021 DHD_GENERAL_LOCK(dhdp, flags);
19022 if (DHD_BUS_CHECK_DOWN_OR_DOWN_IN_PROGRESS(dhdp)) {
19023 DHD_GENERAL_UNLOCK(dhdp, flags);
19024 DHD_ERROR(("%s: bus is down! can't collect log dump. \n", __FUNCTION__));
19025 goto exit1;
19026 }
19027 DHD_BUS_BUSY_SET_IN_LOGDUMP(dhdp);
19028 DHD_GENERAL_UNLOCK(dhdp, flags);
19029
19030 if ((ret = dhd_log_flush(dhdp, type)) < 0) {
19031 goto exit1;
19032 }
19033 /* change to KERNEL_DS address limit */
19034 #if defined(KERNEL_DS) && defined(USER_DS)
19035 old_fs = get_fs();
19036 set_fs(KERNEL_DS);
19037 #endif /* KERNEL_DS && USER_DS */
19038 dhd_get_debug_dump_file_name(NULL, dhdp, dump_path, sizeof(dump_path));
19039
19040 DHD_ERROR(("debug_dump_path = %s\n", dump_path));
19041 DHD_ERROR(("DHD version: %s\n", dhd_version));
19042 DHD_ERROR(("F/W version: %s\n", fw_version));
19043
19044 dhd_log_dump_buf_addr(dhdp, type);
19045
19046 dhd_get_time_str(dhdp, time_str, 128);
19047
19048 /* if this is the first time after dhd is loaded,
19049 * or, if periodic flush is disabled, clear the log file
19050 */
19051 if (!dhdp->logdump_periodic_flush || dhdp->last_file_posn == 0)
19052 file_mode = O_CREAT | O_WRONLY | O_SYNC | O_TRUNC;
19053 else
19054 file_mode = O_CREAT | O_RDWR | O_SYNC;
19055
19056 fp = filp_open(dump_path, file_mode, 0664);
19057 if (IS_ERR(fp)) {
19058 /* If android installed image, try '/data' directory */
19059 #if defined(CONFIG_X86) && defined(OEM_ANDROID)
19060 DHD_ERROR(("%s: File open error on Installed android image, trying /data...\n",
19061 __FUNCTION__));
19062 snprintf(dump_path, sizeof(dump_path), "/root/" DHD_DEBUG_DUMP_TYPE);
19063 if (!dhdp->logdump_periodic_flush) {
19064 snprintf(dump_path + strlen(dump_path),
19065 sizeof(dump_path) - strlen(dump_path),
19066 "_%s", dhdp->debug_dump_time_str);
19067 }
19068 fp = filp_open(dump_path, file_mode, 0664);
19069 if (IS_ERR(fp)) {
19070 ret = PTR_ERR(fp);
19071 DHD_ERROR(("open file error, err = %d\n", ret));
19072 goto exit2;
19073 }
19074 DHD_ERROR(("debug_dump_path = %s\n", dump_path));
19075 #else
19076 ret = PTR_ERR(fp);
19077 DHD_ERROR(("open file error, err = %d\n", ret));
19078 goto exit2;
19079 #endif /* CONFIG_X86 && OEM_ANDROID */
19080 }
19081
19082 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0))
19083 isize = i_size_read(file_inode(fp));
19084
19085 /* if some one else has changed the file */
19086 if (dhdp->last_file_posn != 0 &&
19087 isize < dhdp->last_file_posn) {
19088 dhdp->last_file_posn = 0;
19089 }
19090 #else
19091 ret = vfs_stat(dump_path, &stat);
19092 if (ret < 0) {
19093 DHD_ERROR(("file stat error, err = %d\n", ret));
19094 goto exit2;
19095 }
19096
19097 /* if some one else has changed the file */
19098 if (dhdp->last_file_posn != 0 &&
19099 stat.size < dhdp->last_file_posn) {
19100 dhdp->last_file_posn = 0;
19101 }
19102 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(5, 10, 0) */
19103 if (dhdp->logdump_periodic_flush) {
19104 log_size = strlen(time_str) + strlen(DHD_DUMP_LOG_HDR) + sizeof(sec_hdr);
19105 /* calculate the amount of space required to dump all logs */
19106 for (i = 0; i < DLD_BUFFER_NUM; ++i) {
19107 if (*type != DLD_BUF_TYPE_ALL && i != *type)
19108 continue;
19109
19110 if (g_dld_buf[i].wraparound) {
19111 log_size += (unsigned long)g_dld_buf[i].max
19112 - (unsigned long)g_dld_buf[i].buffer;
19113 } else {
19114 spin_lock_irqsave(&g_dld_buf[i].lock, flags);
19115 log_size += (unsigned long)g_dld_buf[i].present -
19116 (unsigned long)g_dld_buf[i].front;
19117 spin_unlock_irqrestore(&g_dld_buf[i].lock, flags);
19118 }
19119 log_size += strlen(dld_hdrs[i].hdr_str) + sizeof(sec_hdr);
19120
19121 if (*type != DLD_BUF_TYPE_ALL && i == *type)
19122 break;
19123 }
19124
19125 ret = generic_file_llseek(fp, dhdp->last_file_posn, SEEK_CUR);
19126 if (ret < 0) {
19127 DHD_ERROR(("file seek last posn error ! err = %d \n", ret));
19128 goto exit2;
19129 }
19130 pos = fp->f_pos;
19131
19132 /* if the max file size is reached, wrap around to beginning of the file
19133 * we're treating the file as a large ring buffer
19134 */
19135 fspace_remain = logdump_max_filesize - pos;
19136 if (log_size > fspace_remain) {
19137 fp->f_pos -= pos;
19138 pos = fp->f_pos;
19139 }
19140 }
19141
19142 dhd_print_time_str(0, fp, len, &pos);
19143
19144 for (i = 0; i < DLD_BUFFER_NUM; ++i) {
19145
19146 if (*type != DLD_BUF_TYPE_ALL && i != *type)
19147 continue;
19148
19149 len = dhd_get_dld_len(i);
19150 dhd_get_dld_log_dump(NULL, dhdp, 0, fp, len, i, &pos);
19151 if (*type != DLD_BUF_TYPE_ALL)
19152 break;
19153 }
19154
19155 #ifdef EWP_ECNTRS_LOGGING
19156 /* periodic flushing of ecounters is NOT supported */
19157 if (*type == DLD_BUF_TYPE_ALL &&
19158 logdump_ecntr_enable &&
19159 dhdp->ecntr_dbg_ring) {
19160 dhd_log_dump_ring_to_file(dhdp, dhdp->ecntr_dbg_ring,
19161 fp, (unsigned long *)&pos,
19162 &sec_hdr, ECNTRS_LOG_HDR, LOG_DUMP_SECTION_ECNTRS);
19163 }
19164 #endif /* EWP_ECNTRS_LOGGING */
19165
19166 #ifdef DHD_STATUS_LOGGING
19167 if (dhdp->statlog) {
19168 /* write the statlog */
19169 len = dhd_get_status_log_len(NULL, dhdp);
19170 if (len) {
19171 if (dhd_print_status_log_data(NULL, dhdp, 0, fp,
19172 len, &pos) < 0) {
19173 goto exit2;
19174 }
19175 }
19176 }
19177 #endif /* DHD_STATUS_LOGGING */
19178
19179 #ifdef EWP_RTT_LOGGING
19180 /* periodic flushing of ecounters is NOT supported */
19181 if (*type == DLD_BUF_TYPE_ALL &&
19182 logdump_rtt_enable &&
19183 dhdp->rtt_dbg_ring) {
19184 dhd_log_dump_ring_to_file(dhdp, dhdp->rtt_dbg_ring,
19185 fp, (unsigned long *)&pos,
19186 &sec_hdr, RTT_LOG_HDR, LOG_DUMP_SECTION_RTT);
19187 }
19188 #endif /* EWP_RTT_LOGGING */
19189
19190 #ifdef BCMPCIE
19191 len = dhd_get_ext_trap_len(NULL, dhdp);
19192 if (len) {
19193 if (dhd_print_ext_trap_data(NULL, dhdp, 0, fp, len, &pos) < 0)
19194 goto exit2;
19195 }
19196 #endif /* BCMPCIE */
19197
19198 #if defined(DHD_FW_COREDUMP) && defined(DNGL_EVENT_SUPPORT) && defined(BCMPCIE)
19199 len = dhd_get_health_chk_len(NULL, dhdp);
19200 if (len) {
19201 if (dhd_print_ext_trap_data(NULL, dhdp, 0, fp, len, &pos) < 0)
19202 goto exit2;
19203 }
19204 #endif /* DHD_FW_COREDUMP && DNGL_EVENT_SUPPORT && BCMPCIE */
19205
19206 len = dhd_get_dhd_dump_len(NULL, dhdp);
19207 if (len) {
19208 if (dhd_print_dump_data(NULL, dhdp, 0, fp, len, &pos) < 0)
19209 goto exit2;
19210 }
19211
19212 len = dhd_get_cookie_log_len(NULL, dhdp);
19213 if (len) {
19214 if (dhd_print_cookie_data(NULL, dhdp, 0, fp, len, &pos) < 0)
19215 goto exit2;
19216 }
19217
19218 #ifdef DHD_DUMP_PCIE_RINGS
19219 len = dhd_get_flowring_len(NULL, dhdp);
19220 if (len) {
19221 if (dhd_print_flowring_data(NULL, dhdp, 0, fp, len, &pos) < 0)
19222 goto exit2;
19223 }
19224 #endif // endif
19225
19226 if (dhdp->logdump_periodic_flush) {
19227 /* store the last position written to in the file for future use */
19228 dhdp->last_file_posn = pos;
19229 }
19230
19231 exit2:
19232 if (!IS_ERR(fp) && fp != NULL) {
19233 filp_close(fp, NULL);
19234 DHD_ERROR(("%s: Finished writing log dump to file - '%s' \n",
19235 __FUNCTION__, dump_path));
19236 }
19237 #if defined(KERNEL_DS) && defined(USER_DS)
19238 set_fs(old_fs);
19239 #endif /* KERNEL_DS && USER_DS */
19240 exit1:
19241 if (type) {
19242 MFREE(dhdp->osh, type, sizeof(*type));
19243 }
19244 DHD_GENERAL_LOCK(dhdp, flags);
19245 DHD_BUS_BUSY_CLEAR_IN_LOGDUMP(dhdp);
19246 dhd_os_busbusy_wake(dhdp);
19247 DHD_GENERAL_UNLOCK(dhdp, flags);
19248
19249 #ifdef DHD_DUMP_MNGR
19250 if (ret >= 0) {
19251 dhd_dump_file_manage_enqueue(dhdp, dump_path, DHD_DEBUG_DUMP_TYPE);
19252 }
19253 #endif /* DHD_DUMP_MNGR */
19254
19255 return (ret < 0) ? BCME_ERROR : BCME_OK;
19256 }
19257 #endif /* DHD_LOG_DUMP */
19258
19259 /* This function writes data to the file pointed by fp, OR
19260 * copies data to the user buffer sent by upper layer(HAL).
19261 */
19262 int
19263 dhd_export_debug_data(void *mem_buf, void *fp, const void *user_buf, int buf_len, void *pos)
19264 {
19265 int ret = BCME_OK;
19266
19267 if (fp) {
19268 ret = vfs_write(fp, mem_buf, buf_len, (loff_t *)pos);
19269 if (ret < 0) {
19270 DHD_ERROR(("write file error, err = %d\n", ret));
19271 goto exit;
19272 }
19273 } else {
19274 {
19275 ret = copy_to_user((void *)((uintptr_t)user_buf + (*(int *)pos)),
19276 mem_buf, buf_len);
19277 if (ret) {
19278 DHD_ERROR(("failed to copy into user buffer : %d\n", ret));
19279 goto exit;
19280 }
19281 }
19282 (*(int *)pos) += buf_len;
19283 }
19284 exit:
19285 return ret;
19286 }
19287
19288 /*
19289 * This call is to get the memdump size so that,
19290 * halutil can alloc that much buffer in user space.
19291 */
19292 int
19293 dhd_os_socram_dump(struct net_device *dev, uint32 *dump_size)
19294 {
19295 int ret = BCME_OK;
19296 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19297 dhd_pub_t *dhdp = &dhd->pub;
19298
19299 if (dhdp->busstate == DHD_BUS_DOWN) {
19300 DHD_ERROR(("%s: bus is down\n", __FUNCTION__));
19301 return BCME_ERROR;
19302 }
19303
19304 if (DHD_BUS_CHECK_SUSPEND_OR_SUSPEND_IN_PROGRESS(dhdp)) {
19305 DHD_ERROR(("%s: bus is in suspend(%d) or suspending(0x%x) state, so skip\n",
19306 __FUNCTION__, dhdp->busstate, dhdp->dhd_bus_busy_state));
19307 return BCME_ERROR;
19308 }
19309 #ifdef DHD_PCIE_RUNTIMEPM
19310 dhdpcie_runtime_bus_wake(dhdp, TRUE, __builtin_return_address(0));
19311 #endif /* DHD_PCIE_RUNTIMEPM */
19312 ret = dhd_common_socram_dump(dhdp);
19313 if (ret == BCME_OK) {
19314 *dump_size = dhdp->soc_ram_length;
19315 }
19316 return ret;
19317 }
19318
19319 /*
19320 * This is to get the actual memdup after getting the memdump size
19321 */
19322 int
19323 dhd_os_get_socram_dump(struct net_device *dev, char **buf, uint32 *size)
19324 {
19325 int ret = BCME_OK;
19326 int orig_len = 0;
19327 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19328 dhd_pub_t *dhdp = &dhd->pub;
19329 if (buf == NULL)
19330 return BCME_ERROR;
19331 orig_len = *size;
19332 if (dhdp->soc_ram) {
19333 if (orig_len >= dhdp->soc_ram_length) {
19334 *buf = dhdp->soc_ram;
19335 *size = dhdp->soc_ram_length;
19336 } else {
19337 ret = BCME_BUFTOOSHORT;
19338 DHD_ERROR(("The length of the buffer is too short"
19339 " to save the memory dump with %d\n", dhdp->soc_ram_length));
19340 }
19341 } else {
19342 DHD_ERROR(("socram_dump is not ready to get\n"));
19343 ret = BCME_NOTREADY;
19344 }
19345 return ret;
19346 }
19347
19348 int
19349 dhd_os_get_version(struct net_device *dev, bool dhd_ver, char **buf, uint32 size)
19350 {
19351 char *fw_str;
19352
19353 if (size == 0)
19354 return BCME_BADARG;
19355
19356 fw_str = strstr(info_string, "Firmware: ");
19357 if (fw_str == NULL) {
19358 return BCME_ERROR;
19359 }
19360
19361 memset(*buf, 0, size);
19362 if (dhd_ver) {
19363 strncpy(*buf, dhd_version, size - 1);
19364 } else {
19365 strncpy(*buf, fw_str, size - 1);
19366 }
19367 return BCME_OK;
19368 }
19369
19370 #ifdef DHD_PKT_LOGGING
19371 int
19372 dhd_os_get_pktlog_dump(void *dev, const void *user_buf, uint32 len)
19373 {
19374 int ret = BCME_OK;
19375 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19376 dhd_pub_t *dhdp = &dhd->pub;
19377 if (user_buf == NULL) {
19378 DHD_ERROR(("%s(): user buffer is NULL\n", __FUNCTION__));
19379 return BCME_ERROR;
19380 }
19381
19382 ret = dhd_pktlog_dump_write_memory(dhdp, user_buf, len);
19383 if (ret < 0) {
19384 DHD_ERROR(("%s(): fail to dump pktlog, err = %d\n", __FUNCTION__, ret));
19385 return ret;
19386 }
19387 return ret;
19388 }
19389
19390 uint32
19391 dhd_os_get_pktlog_dump_size(struct net_device *dev)
19392 {
19393 uint32 size = 0;
19394 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19395 dhd_pub_t *dhdp = &dhd->pub;
19396
19397 size = dhd_pktlog_get_dump_length(dhdp);
19398 if (size == 0) {
19399 DHD_ERROR(("%s(): fail to get pktlog size, err = %d\n", __FUNCTION__, size));
19400 }
19401 return size;
19402 }
19403
19404 void
19405 dhd_os_get_pktlogdump_filename(struct net_device *dev, char *dump_path, int len)
19406 {
19407 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19408 dhd_pub_t *dhdp = &dhd->pub;
19409
19410 dhd_pktlog_get_filename(dhdp, dump_path, len);
19411 }
19412 #endif /* DHD_PKT_LOGGING */
19413 #ifdef DNGL_AXI_ERROR_LOGGING
19414 int
19415 dhd_os_get_axi_error_dump(void *dev, const void *user_buf, uint32 len)
19416 {
19417 int ret = BCME_OK;
19418 dhd_info_t *dhd = *(dhd_info_t **)netdev_priv(dev);
19419 dhd_pub_t *dhdp = &dhd->pub;
19420 loff_t pos = 0;
19421 if (user_buf == NULL) {
19422 DHD_ERROR(("%s(): user buffer is NULL\n", __FUNCTION__));
19423 return BCME_ERROR;
19424 }
19425
19426 ret = dhd_export_debug_data((char *)dhdp->axi_err_dump,
19427 NULL, user_buf, sizeof(dhd_axi_error_dump_t), &pos);
19428
19429 if (ret < 0) {
19430 DHD_ERROR(("%s(): fail to dump pktlog, err = %d\n", __FUNCTION__, ret));
19431 return ret;
19432 }
19433 return ret;
19434 }
19435
19436 int
19437 dhd_os_get_axi_error_dump_size(struct net_device *dev)
19438 {
19439 int size = -1;
19440
19441 size = sizeof(dhd_axi_error_dump_t);
19442 if (size < 0) {
19443 DHD_ERROR(("%s(): fail to get axi error size, err = %d\n", __FUNCTION__, size));
19444 }
19445 return size;
19446 }
19447
19448 void
19449 dhd_os_get_axi_error_filename(struct net_device *dev, char *dump_path, int len)
19450 {
19451 snprintf(dump_path, len, "%s",
19452 DHD_COMMON_DUMP_PATH DHD_DUMP_AXI_ERROR_FILENAME);
19453 }
19454 #endif /* DNGL_AXI_ERROR_LOGGING */
19455
19456 bool dhd_sta_associated(dhd_pub_t *dhdp, uint32 bssidx, uint8 *mac)
19457 {
19458 return dhd_find_sta(dhdp, bssidx, mac) ? TRUE : FALSE;
19459 }
19460
19461 #ifdef DHD_L2_FILTER
19462 arp_table_t*
19463 dhd_get_ifp_arp_table_handle(dhd_pub_t *dhdp, uint32 bssidx)
19464 {
19465 dhd_info_t *dhd = dhdp->info;
19466 dhd_if_t *ifp;
19467
19468 ASSERT(bssidx < DHD_MAX_IFS);
19469
19470 ifp = dhd->iflist[bssidx];
19471 return ifp->phnd_arp_table;
19472 }
19473
19474 int dhd_get_parp_status(dhd_pub_t *dhdp, uint32 idx)
19475 {
19476 dhd_info_t *dhd = dhdp->info;
19477 dhd_if_t *ifp;
19478
19479 ASSERT(idx < DHD_MAX_IFS);
19480
19481 ifp = dhd->iflist[idx];
19482
19483 if (ifp)
19484 return ifp->parp_enable;
19485 else
19486 return FALSE;
19487 }
19488
19489 /* Set interface specific proxy arp configuration */
19490 int dhd_set_parp_status(dhd_pub_t *dhdp, uint32 idx, int val)
19491 {
19492 dhd_info_t *dhd = dhdp->info;
19493 dhd_if_t *ifp;
19494 ASSERT(idx < DHD_MAX_IFS);
19495 ifp = dhd->iflist[idx];
19496
19497 if (!ifp)
19498 return BCME_ERROR;
19499
19500 /* At present all 3 variables are being
19501 * handled at once
19502 */
19503 ifp->parp_enable = val;
19504 ifp->parp_discard = val;
19505 ifp->parp_allnode = val;
19506
19507 /* Flush ARP entries when disabled */
19508 if (val == FALSE) {
19509 bcm_l2_filter_arp_table_update(dhdp->osh, ifp->phnd_arp_table, TRUE, NULL,
19510 FALSE, dhdp->tickcnt);
19511 }
19512 return BCME_OK;
19513 }
19514
19515 bool dhd_parp_discard_is_enabled(dhd_pub_t *dhdp, uint32 idx)
19516 {
19517 dhd_info_t *dhd = dhdp->info;
19518 dhd_if_t *ifp;
19519
19520 ASSERT(idx < DHD_MAX_IFS);
19521
19522 ifp = dhd->iflist[idx];
19523
19524 ASSERT(ifp);
19525 return ifp->parp_discard;
19526 }
19527
19528 bool
19529 dhd_parp_allnode_is_enabled(dhd_pub_t *dhdp, uint32 idx)
19530 {
19531 dhd_info_t *dhd = dhdp->info;
19532 dhd_if_t *ifp;
19533
19534 ASSERT(idx < DHD_MAX_IFS);
19535
19536 ifp = dhd->iflist[idx];
19537
19538 ASSERT(ifp);
19539
19540 return ifp->parp_allnode;
19541 }
19542
19543 int dhd_get_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx)
19544 {
19545 dhd_info_t *dhd = dhdp->info;
19546 dhd_if_t *ifp;
19547
19548 ASSERT(idx < DHD_MAX_IFS);
19549
19550 ifp = dhd->iflist[idx];
19551
19552 ASSERT(ifp);
19553
19554 return ifp->dhcp_unicast;
19555 }
19556
19557 int dhd_set_dhcp_unicast_status(dhd_pub_t *dhdp, uint32 idx, int val)
19558 {
19559 dhd_info_t *dhd = dhdp->info;
19560 dhd_if_t *ifp;
19561 ASSERT(idx < DHD_MAX_IFS);
19562 ifp = dhd->iflist[idx];
19563
19564 ASSERT(ifp);
19565
19566 ifp->dhcp_unicast = val;
19567 return BCME_OK;
19568 }
19569
19570 int dhd_get_block_ping_status(dhd_pub_t *dhdp, uint32 idx)
19571 {
19572 dhd_info_t *dhd = dhdp->info;
19573 dhd_if_t *ifp;
19574
19575 ASSERT(idx < DHD_MAX_IFS);
19576
19577 ifp = dhd->iflist[idx];
19578
19579 ASSERT(ifp);
19580
19581 return ifp->block_ping;
19582 }
19583
19584 int dhd_set_block_ping_status(dhd_pub_t *dhdp, uint32 idx, int val)
19585 {
19586 dhd_info_t *dhd = dhdp->info;
19587 dhd_if_t *ifp;
19588 ASSERT(idx < DHD_MAX_IFS);
19589 ifp = dhd->iflist[idx];
19590
19591 ASSERT(ifp);
19592
19593 ifp->block_ping = val;
19594 /* Disable rx_pkt_chain feature for interface if block_ping option is
19595 * enabled
19596 */
19597 dhd_update_rx_pkt_chainable_state(dhdp, idx);
19598 return BCME_OK;
19599 }
19600
19601 int dhd_get_grat_arp_status(dhd_pub_t *dhdp, uint32 idx)
19602 {
19603 dhd_info_t *dhd = dhdp->info;
19604 dhd_if_t *ifp;
19605
19606 ASSERT(idx < DHD_MAX_IFS);
19607
19608 ifp = dhd->iflist[idx];
19609
19610 ASSERT(ifp);
19611
19612 return ifp->grat_arp;
19613 }
19614
19615 int dhd_set_grat_arp_status(dhd_pub_t *dhdp, uint32 idx, int val)
19616 {
19617 dhd_info_t *dhd = dhdp->info;
19618 dhd_if_t *ifp;
19619 ASSERT(idx < DHD_MAX_IFS);
19620 ifp = dhd->iflist[idx];
19621
19622 ASSERT(ifp);
19623
19624 ifp->grat_arp = val;
19625
19626 return BCME_OK;
19627 }
19628
19629 int dhd_get_block_tdls_status(dhd_pub_t *dhdp, uint32 idx)
19630 {
19631 dhd_info_t *dhd = dhdp->info;
19632 dhd_if_t *ifp;
19633
19634 ASSERT(idx < DHD_MAX_IFS);
19635
19636 ifp = dhd->iflist[idx];
19637
19638 ASSERT(ifp);
19639
19640 return ifp->block_tdls;
19641 }
19642
19643 int dhd_set_block_tdls_status(dhd_pub_t *dhdp, uint32 idx, int val)
19644 {
19645 dhd_info_t *dhd = dhdp->info;
19646 dhd_if_t *ifp;
19647 ASSERT(idx < DHD_MAX_IFS);
19648 ifp = dhd->iflist[idx];
19649
19650 ASSERT(ifp);
19651
19652 ifp->block_tdls = val;
19653
19654 return BCME_OK;
19655 }
19656 #endif /* DHD_L2_FILTER */
19657
19658 #if defined(SET_RPS_CPUS)
19659 int dhd_rps_cpus_enable(struct net_device *net, int enable)
19660 {
19661 dhd_info_t *dhd = DHD_DEV_INFO(net);
19662 dhd_if_t *ifp;
19663 int ifidx;
19664 char * RPS_CPU_SETBUF;
19665
19666 ifidx = dhd_net2idx(dhd, net);
19667 if (ifidx == DHD_BAD_IF) {
19668 DHD_ERROR(("%s bad ifidx\n", __FUNCTION__));
19669 return -ENODEV;
19670 }
19671
19672 if (ifidx == PRIMARY_INF) {
19673 if (dhd->pub.op_mode == DHD_FLAG_IBSS_MODE) {
19674 DHD_INFO(("%s : set for IBSS.\n", __FUNCTION__));
19675 RPS_CPU_SETBUF = RPS_CPUS_MASK_IBSS;
19676 } else {
19677 DHD_INFO(("%s : set for BSS.\n", __FUNCTION__));
19678 RPS_CPU_SETBUF = RPS_CPUS_MASK;
19679 }
19680 } else if (ifidx == VIRTUAL_INF) {
19681 DHD_INFO(("%s : set for P2P.\n", __FUNCTION__));
19682 RPS_CPU_SETBUF = RPS_CPUS_MASK_P2P;
19683 } else {
19684 DHD_ERROR(("%s : Invalid index : %d.\n", __FUNCTION__, ifidx));
19685 return -EINVAL;
19686 }
19687
19688 ifp = dhd->iflist[ifidx];
19689 if (ifp) {
19690 if (enable) {
19691 DHD_INFO(("%s : set rps_cpus as [%s]\n", __FUNCTION__, RPS_CPU_SETBUF));
19692 custom_rps_map_set(ifp->net->_rx, RPS_CPU_SETBUF, strlen(RPS_CPU_SETBUF));
19693 } else {
19694 custom_rps_map_clear(ifp->net->_rx);
19695 }
19696 } else {
19697 DHD_ERROR(("%s : ifp is NULL!!\n", __FUNCTION__));
19698 return -ENODEV;
19699 }
19700 return BCME_OK;
19701 }
19702
19703 int custom_rps_map_set(struct netdev_rx_queue *queue, char *buf, size_t len)
19704 {
19705 struct rps_map *old_map, *map;
19706 cpumask_var_t mask;
19707 int err, cpu, i;
19708 static DEFINE_SPINLOCK(rps_map_lock);
19709
19710 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
19711
19712 if (!alloc_cpumask_var(&mask, GFP_KERNEL)) {
19713 DHD_ERROR(("%s : alloc_cpumask_var fail.\n", __FUNCTION__));
19714 return -ENOMEM;
19715 }
19716
19717 err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
19718 if (err) {
19719 free_cpumask_var(mask);
19720 DHD_ERROR(("%s : bitmap_parse fail.\n", __FUNCTION__));
19721 return err;
19722 }
19723
19724 map = kzalloc(max_t(unsigned int,
19725 RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
19726 GFP_KERNEL);
19727 if (!map) {
19728 free_cpumask_var(mask);
19729 DHD_ERROR(("%s : map malloc fail.\n", __FUNCTION__));
19730 return -ENOMEM;
19731 }
19732
19733 i = 0;
19734 for_each_cpu(cpu, mask) {
19735 map->cpus[i++] = cpu;
19736 }
19737
19738 if (i) {
19739 map->len = i;
19740 } else {
19741 kfree(map);
19742 map = NULL;
19743 free_cpumask_var(mask);
19744 DHD_ERROR(("%s : mapping cpu fail.\n", __FUNCTION__));
19745 return -1;
19746 }
19747
19748 spin_lock(&rps_map_lock);
19749 old_map = rcu_dereference_protected(queue->rps_map,
19750 lockdep_is_held(&rps_map_lock));
19751 rcu_assign_pointer(queue->rps_map, map);
19752 spin_unlock(&rps_map_lock);
19753
19754 if (map) {
19755 static_key_slow_inc(&rps_needed);
19756 }
19757 if (old_map) {
19758 kfree_rcu(old_map, rcu);
19759 static_key_slow_dec(&rps_needed);
19760 }
19761 free_cpumask_var(mask);
19762
19763 DHD_INFO(("%s : Done. mapping cpu nummber : %d\n", __FUNCTION__, map->len));
19764 return map->len;
19765 }
19766
19767 void custom_rps_map_clear(struct netdev_rx_queue *queue)
19768 {
19769 struct rps_map *map;
19770
19771 DHD_INFO(("%s : Entered.\n", __FUNCTION__));
19772
19773 map = rcu_dereference_protected(queue->rps_map, 1);
19774 if (map) {
19775 RCU_INIT_POINTER(queue->rps_map, NULL);
19776 kfree_rcu(map, rcu);
19777 DHD_INFO(("%s : rps_cpus map clear.\n", __FUNCTION__));
19778 }
19779 }
19780 #endif // endif
19781
19782 #if defined(ARGOS_NOTIFY_CB)
19783
19784 static int argos_status_notifier_wifi_cb(struct notifier_block *notifier,
19785 unsigned long speed, void *v);
19786 static int argos_status_notifier_p2p_cb(struct notifier_block *notifier,
19787 unsigned long speed, void *v);
19788
19789 int
19790 argos_register_notifier_init(struct net_device *net)
19791 {
19792 int ret = 0;
19793
19794 DHD_INFO(("DHD: %s: \n", __FUNCTION__));
19795 argos_rps_ctrl_data.wlan_primary_netdev = net;
19796 argos_rps_ctrl_data.argos_rps_cpus_enabled = 0;
19797
19798 if (argos_wifi.notifier_call == NULL) {
19799 argos_wifi.notifier_call = argos_status_notifier_wifi_cb;
19800 ret = sec_argos_register_notifier(&argos_wifi, ARGOS_WIFI_TABLE_LABEL);
19801 if (ret < 0) {
19802 DHD_ERROR(("DHD:Failed to register WIFI notifier, ret=%d\n", ret));
19803 goto exit;
19804 }
19805 }
19806
19807 if (argos_p2p.notifier_call == NULL) {
19808 argos_p2p.notifier_call = argos_status_notifier_p2p_cb;
19809 ret = sec_argos_register_notifier(&argos_p2p, ARGOS_P2P_TABLE_LABEL);
19810 if (ret < 0) {
19811 DHD_ERROR(("DHD:Failed to register P2P notifier, ret=%d\n", ret));
19812 sec_argos_unregister_notifier(&argos_wifi, ARGOS_WIFI_TABLE_LABEL);
19813 goto exit;
19814 }
19815 }
19816
19817 return 0;
19818
19819 exit:
19820 if (argos_wifi.notifier_call) {
19821 argos_wifi.notifier_call = NULL;
19822 }
19823
19824 if (argos_p2p.notifier_call) {
19825 argos_p2p.notifier_call = NULL;
19826 }
19827
19828 return ret;
19829 }
19830
19831 int
19832 argos_register_notifier_deinit(void)
19833 {
19834 DHD_INFO(("DHD: %s: \n", __FUNCTION__));
19835
19836 if (argos_rps_ctrl_data.wlan_primary_netdev == NULL) {
19837 DHD_ERROR(("DHD: primary_net_dev is null %s: \n", __FUNCTION__));
19838 return -1;
19839 }
19840 #ifndef DHD_LB
19841 custom_rps_map_clear(argos_rps_ctrl_data.wlan_primary_netdev->_rx);
19842 #endif /* !DHD_LB */
19843
19844 if (argos_p2p.notifier_call) {
19845 sec_argos_unregister_notifier(&argos_p2p, ARGOS_P2P_TABLE_LABEL);
19846 argos_p2p.notifier_call = NULL;
19847 }
19848
19849 if (argos_wifi.notifier_call) {
19850 sec_argos_unregister_notifier(&argos_wifi, ARGOS_WIFI_TABLE_LABEL);
19851 argos_wifi.notifier_call = NULL;
19852 }
19853
19854 argos_rps_ctrl_data.wlan_primary_netdev = NULL;
19855 argos_rps_ctrl_data.argos_rps_cpus_enabled = 0;
19856
19857 return 0;
19858 }
19859
19860 int
19861 argos_status_notifier_wifi_cb(struct notifier_block *notifier,
19862 unsigned long speed, void *v)
19863 {
19864 dhd_info_t *dhd;
19865 dhd_pub_t *dhdp;
19866 #if defined(ARGOS_NOTIFY_CB)
19867 unsigned int pcie_irq = 0;
19868 #endif /* ARGOS_NOTIFY_CB */
19869 DHD_INFO(("DHD: %s: speed=%ld\n", __FUNCTION__, speed));
19870
19871 if (argos_rps_ctrl_data.wlan_primary_netdev == NULL) {
19872 goto exit;
19873 }
19874
19875 dhd = DHD_DEV_INFO(argos_rps_ctrl_data.wlan_primary_netdev);
19876 if (dhd == NULL) {
19877 goto exit;
19878 }
19879
19880 dhdp = &dhd->pub;
19881 if (dhdp == NULL || !dhdp->up) {
19882 goto exit;
19883 }
19884 /* Check if reported TPut value is more than threshold value */
19885 if (speed > RPS_TPUT_THRESHOLD) {
19886 if (argos_rps_ctrl_data.argos_rps_cpus_enabled == 0) {
19887 /* It does not need to configre rps_cpus
19888 * if Load Balance is enabled
19889 */
19890 #ifndef DHD_LB
19891 int err = 0;
19892
19893 if (cpu_online(RPS_CPUS_WLAN_CORE_ID)) {
19894 err = custom_rps_map_set(
19895 argos_rps_ctrl_data.wlan_primary_netdev->_rx,
19896 RPS_CPUS_MASK, strlen(RPS_CPUS_MASK));
19897 } else {
19898 DHD_ERROR(("DHD: %s: RPS_Set fail,"
19899 " Core=%d Offline\n", __FUNCTION__,
19900 RPS_CPUS_WLAN_CORE_ID));
19901 err = -1;
19902 }
19903
19904 if (err < 0) {
19905 DHD_ERROR(("DHD: %s: Failed to RPS_CPUs. "
19906 "speed=%ld, error=%d\n",
19907 __FUNCTION__, speed, err));
19908 } else {
19909 #endif /* !DHD_LB */
19910 #if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE))
19911 if (dhdp->tcpack_sup_mode != TCPACK_SUP_HOLD) {
19912 DHD_ERROR(("%s : set ack suppress. TCPACK_SUP_ON(%d)\n",
19913 __FUNCTION__, TCPACK_SUP_HOLD));
19914 dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_HOLD);
19915 }
19916 #endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
19917 argos_rps_ctrl_data.argos_rps_cpus_enabled = 1;
19918 #ifndef DHD_LB
19919 DHD_ERROR(("DHD: %s: Set RPS_CPUs, speed=%ld\n",
19920 __FUNCTION__, speed));
19921 }
19922 #endif /* !DHD_LB */
19923 }
19924 } else {
19925 if (argos_rps_ctrl_data.argos_rps_cpus_enabled == 1) {
19926 #if (defined(DHDTCPACK_SUPPRESS) && defined(BCMPCIE))
19927 if (dhdp->tcpack_sup_mode != TCPACK_SUP_OFF) {
19928 DHD_ERROR(("%s : set ack suppress. TCPACK_SUP_OFF\n",
19929 __FUNCTION__));
19930 dhd_tcpack_suppress_set(dhdp, TCPACK_SUP_OFF);
19931 }
19932 #endif /* DHDTCPACK_SUPPRESS && BCMPCIE */
19933 #ifndef DHD_LB
19934 /* It does not need to configre rps_cpus
19935 * if Load Balance is enabled
19936 */
19937 custom_rps_map_clear(argos_rps_ctrl_data.wlan_primary_netdev->_rx);
19938 DHD_ERROR(("DHD: %s: Clear RPS_CPUs, speed=%ld\n", __FUNCTION__, speed));
19939 OSL_SLEEP(DELAY_TO_CLEAR_RPS_CPUS);
19940 #endif /* !DHD_LB */
19941 argos_rps_ctrl_data.argos_rps_cpus_enabled = 0;
19942 }
19943 }
19944
19945 exit:
19946 return NOTIFY_OK;
19947 }
19948
19949 int
19950 argos_status_notifier_p2p_cb(struct notifier_block *notifier,
19951 unsigned long speed, void *v)
19952 {
19953 DHD_INFO(("DHD: %s: speed=%ld\n", __FUNCTION__, speed));
19954 return argos_status_notifier_wifi_cb(notifier, speed, v);
19955 }
19956 #endif // endif
19957
19958 #ifdef DHD_DEBUG_PAGEALLOC
19959
19960 void
19961 dhd_page_corrupt_cb(void *handle, void *addr_corrupt, size_t len)
19962 {
19963 dhd_pub_t *dhdp = (dhd_pub_t *)handle;
19964
19965 DHD_ERROR(("%s: Got dhd_page_corrupt_cb 0x%p %d\n",
19966 __FUNCTION__, addr_corrupt, (uint32)len));
19967
19968 DHD_OS_WAKE_LOCK(dhdp);
19969 prhex("Page Corruption:", addr_corrupt, len);
19970 dhd_dump_to_kernelog(dhdp);
19971 #if defined(BCMPCIE) && defined(DHD_FW_COREDUMP)
19972 /* Load the dongle side dump to host memory and then BUG_ON() */
19973 dhdp->memdump_enabled = DUMP_MEMONLY;
19974 dhdp->memdump_type = DUMP_TYPE_MEMORY_CORRUPTION;
19975 dhd_bus_mem_dump(dhdp);
19976 #endif /* BCMPCIE && DHD_FW_COREDUMP */
19977 DHD_OS_WAKE_UNLOCK(dhdp);
19978 }
19979 EXPORT_SYMBOL(dhd_page_corrupt_cb);
19980 #endif /* DHD_DEBUG_PAGEALLOC */
19981
19982 #if defined(BCMPCIE) && defined(DHD_PKTID_AUDIT_ENABLED)
19983 void
19984 dhd_pktid_error_handler(dhd_pub_t *dhdp)
19985 {
19986 DHD_ERROR(("%s: Got Pkt Id Audit failure \n", __FUNCTION__));
19987 DHD_OS_WAKE_LOCK(dhdp);
19988 dhd_dump_to_kernelog(dhdp);
19989 #ifdef DHD_FW_COREDUMP
19990 /* Load the dongle side dump to host memory */
19991 if (dhdp->memdump_enabled == DUMP_DISABLED) {
19992 dhdp->memdump_enabled = DUMP_MEMFILE;
19993 }
19994 dhdp->memdump_type = DUMP_TYPE_PKTID_AUDIT_FAILURE;
19995 dhd_bus_mem_dump(dhdp);
19996 #endif /* DHD_FW_COREDUMP */
19997 #ifdef OEM_ANDROID
19998 dhdp->hang_reason = HANG_REASON_PCIE_PKTID_ERROR;
19999 dhd_os_check_hang(dhdp, 0, -EREMOTEIO);
20000 #endif /* OEM_ANDROID */
20001 DHD_OS_WAKE_UNLOCK(dhdp);
20002 }
20003 #endif /* BCMPCIE && DHD_PKTID_AUDIT_ENABLED */
20004
20005 struct net_device *
20006 dhd_linux_get_primary_netdev(dhd_pub_t *dhdp)
20007 {
20008 dhd_info_t *dhd = dhdp->info;
20009
20010 if (dhd->iflist[0] && dhd->iflist[0]->net)
20011 return dhd->iflist[0]->net;
20012 else
20013 return NULL;
20014 }
20015
20016 fw_download_status_t
20017 dhd_fw_download_status(dhd_pub_t * dhd_pub)
20018 {
20019 return dhd_pub->fw_download_status;
20020 }
20021
20022 static int
20023 dhd_create_to_notifier_skt(void)
20024 {
20025 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0))
20026 /* Kernel 3.7 onwards this API accepts only 3 arguments. */
20027 /* Kernel version 3.6 is a special case which accepts 4 arguments */
20028 nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, &dhd_netlink_cfg);
20029 #elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 0))
20030 /* Kernel version 3.5 and below use this old API format */
20031 nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, 0,
20032 dhd_process_daemon_msg, NULL, THIS_MODULE);
20033 #else
20034 nl_to_event_sk = netlink_kernel_create(&init_net, BCM_NL_USER, THIS_MODULE,
20035 &dhd_netlink_cfg);
20036 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 7, 0)) */
20037 if (!nl_to_event_sk)
20038 {
20039 printf("Error creating socket.\n");
20040 return -1;
20041 }
20042 DHD_INFO(("nl_to socket created successfully...\n"));
20043 return 0;
20044 }
20045
20046 void
20047 dhd_destroy_to_notifier_skt(void)
20048 {
20049 DHD_INFO(("Destroying nl_to socket\n"));
20050 netlink_kernel_release(nl_to_event_sk);
20051 }
20052
20053 static void
20054 dhd_recv_msg_from_daemon(struct sk_buff *skb)
20055 {
20056 struct nlmsghdr *nlh;
20057 bcm_to_info_t *cmd;
20058
20059 nlh = (struct nlmsghdr *)skb->data;
20060 cmd = (bcm_to_info_t *)nlmsg_data(nlh);
20061 if ((cmd->magic == BCM_TO_MAGIC) && (cmd->reason == REASON_DAEMON_STARTED)) {
20062 sender_pid = ((struct nlmsghdr *)(skb->data))->nlmsg_pid;
20063 DHD_INFO(("DHD Daemon Started\n"));
20064 }
20065 }
20066
20067 int
20068 dhd_send_msg_to_daemon(struct sk_buff *skb, void *data, int size)
20069 {
20070 struct nlmsghdr *nlh;
20071 struct sk_buff *skb_out;
20072 int ret = BCME_ERROR;
20073
20074 BCM_REFERENCE(skb);
20075 if (sender_pid == 0) {
20076 DHD_INFO(("Invalid PID 0\n"));
20077 skb_out = NULL;
20078 goto err;
20079 }
20080
20081 if ((skb_out = nlmsg_new(size, 0)) == NULL) {
20082 DHD_ERROR(("%s: skb alloc failed\n", __FUNCTION__));
20083 ret = BCME_NOMEM;
20084 goto err;
20085 }
20086 nlh = nlmsg_put(skb_out, 0, 0, NLMSG_DONE, size, 0);
20087 if (nlh == NULL) {
20088 DHD_ERROR(("%s: nlmsg_put failed\n", __FUNCTION__));
20089 goto err;
20090 }
20091 NETLINK_CB(skb_out).dst_group = 0; /* Unicast */
20092 (void)memcpy_s(nlmsg_data(nlh), size, (char *)data, size);
20093
20094 if ((ret = nlmsg_unicast(nl_to_event_sk, skb_out, sender_pid)) < 0) {
20095 DHD_ERROR(("Error sending message, ret:%d\n", ret));
20096 /* skb is already freed inside nlmsg_unicast() on error case */
20097 /* explicitly making skb_out to NULL to avoid double free */
20098 skb_out = NULL;
20099 goto err;
20100 }
20101 return BCME_OK;
20102 err:
20103 if (skb_out) {
20104 nlmsg_free(skb_out);
20105 }
20106 return ret;
20107 }
20108
20109 static void
20110 dhd_process_daemon_msg(struct sk_buff *skb)
20111 {
20112 bcm_to_info_t to_info;
20113
20114 to_info.magic = BCM_TO_MAGIC;
20115 to_info.reason = REASON_DAEMON_STARTED;
20116 to_info.trap = NO_TRAP;
20117
20118 dhd_recv_msg_from_daemon(skb);
20119 dhd_send_msg_to_daemon(skb, &to_info, sizeof(to_info));
20120 }
20121
20122 #ifdef DHD_LOG_DUMP
20123 bool
20124 dhd_log_dump_ecntr_enabled(void)
20125 {
20126 return (bool)logdump_ecntr_enable;
20127 }
20128
20129 bool
20130 dhd_log_dump_rtt_enabled(void)
20131 {
20132 return (bool)logdump_rtt_enable;
20133 }
20134
20135 void
20136 dhd_log_dump_init(dhd_pub_t *dhd)
20137 {
20138 struct dhd_log_dump_buf *dld_buf, *dld_buf_special;
20139 int i = 0;
20140 uint8 *prealloc_buf = NULL, *bufptr = NULL;
20141 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
20142 int prealloc_idx = DHD_PREALLOC_DHD_LOG_DUMP_BUF;
20143 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
20144 int ret;
20145 dhd_dbg_ring_t *ring = NULL;
20146 unsigned long flags = 0;
20147 dhd_info_t *dhd_info = dhd->info;
20148 void *cookie_buf = NULL;
20149
20150 BCM_REFERENCE(ret);
20151 BCM_REFERENCE(ring);
20152 BCM_REFERENCE(flags);
20153
20154 /* sanity check */
20155 if (logdump_prsrv_tailsize <= 0 ||
20156 logdump_prsrv_tailsize > DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE) {
20157 logdump_prsrv_tailsize = DHD_LOG_DUMP_MAX_TAIL_FLUSH_SIZE;
20158 }
20159 /* now adjust the preserve log flush size based on the
20160 * kernel printk log buffer size
20161 */
20162 #ifdef CONFIG_LOG_BUF_SHIFT
20163 DHD_ERROR(("%s: kernel log buf size = %uKB; logdump_prsrv_tailsize = %uKB;"
20164 " limit prsrv tail size to = %uKB\n",
20165 __FUNCTION__, (1 << CONFIG_LOG_BUF_SHIFT)/1024,
20166 logdump_prsrv_tailsize/1024, LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE/1024));
20167
20168 if (logdump_prsrv_tailsize > LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE) {
20169 logdump_prsrv_tailsize = LOG_DUMP_KERNEL_TAIL_FLUSH_SIZE;
20170 }
20171 #else
20172 DHD_ERROR(("%s: logdump_prsrv_tailsize = %uKB \n",
20173 __FUNCTION__, logdump_prsrv_tailsize/1024);
20174 #endif /* CONFIG_LOG_BUF_SHIFT */
20175
20176 mutex_init(&dhd_info->logdump_lock);
20177
20178 /* initialize log dump buf structures */
20179 memset(g_dld_buf, 0, sizeof(struct dhd_log_dump_buf) * DLD_BUFFER_NUM);
20180
20181 /* set the log dump buffer size based on the module_param */
20182 if (logdump_max_bufsize > LOG_DUMP_GENERAL_MAX_BUFSIZE ||
20183 logdump_max_bufsize <= 0)
20184 dld_buf_size[DLD_BUF_TYPE_GENERAL] = LOG_DUMP_GENERAL_MAX_BUFSIZE;
20185 else
20186 dld_buf_size[DLD_BUF_TYPE_GENERAL] = logdump_max_bufsize;
20187
20188 /* pre-alloc the memory for the log buffers & 'special' buffer */
20189 dld_buf_special = &g_dld_buf[DLD_BUF_TYPE_SPECIAL];
20190 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
20191 DHD_ERROR(("%s : Try to allocate memory total(%d) special(%d)\n",
20192 __FUNCTION__, LOG_DUMP_TOTAL_BUFSIZE, LOG_DUMP_SPECIAL_MAX_BUFSIZE));
20193 prealloc_buf = DHD_OS_PREALLOC(dhd, prealloc_idx++, LOG_DUMP_TOTAL_BUFSIZE);
20194 dld_buf_special->buffer = DHD_OS_PREALLOC(dhd, prealloc_idx++,
20195 dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
20196 #else
20197 prealloc_buf = VMALLOCZ(dhd->osh, LOG_DUMP_TOTAL_BUFSIZE);
20198 dld_buf_special->buffer = VMALLOCZ(dhd->osh, dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
20199 #endif /* CONFIG_DHD_USE_STATIC_BUF && DHD_USE_STATIC_MEMDUMP */
20200 if (!prealloc_buf) {
20201 DHD_ERROR(("Failed to pre-allocate memory for log buffers !\n"));
20202 goto fail;
20203 }
20204 if (!dld_buf_special->buffer) {
20205 DHD_ERROR(("Failed to pre-allocate memory for special buffer !\n"));
20206 goto fail;
20207 }
20208
20209 bufptr = prealloc_buf;
20210 for (i = 0; i < DLD_BUFFER_NUM; i++) {
20211 dld_buf = &g_dld_buf[i];
20212 dld_buf->dhd_pub = dhd;
20213 spin_lock_init(&dld_buf->lock);
20214 dld_buf->wraparound = 0;
20215 if (i != DLD_BUF_TYPE_SPECIAL) {
20216 dld_buf->buffer = bufptr;
20217 dld_buf->max = (unsigned long)dld_buf->buffer + dld_buf_size[i];
20218 bufptr = (uint8 *)dld_buf->max;
20219 } else {
20220 dld_buf->max = (unsigned long)dld_buf->buffer + dld_buf_size[i];
20221 }
20222 dld_buf->present = dld_buf->front = dld_buf->buffer;
20223 dld_buf->remain = dld_buf_size[i];
20224 dld_buf->enable = 1;
20225 }
20226
20227 #ifdef EWP_ECNTRS_LOGGING
20228 /* now use the rest of the pre-alloc'd memory for filter and ecounter log */
20229 dhd->ecntr_dbg_ring = MALLOCZ(dhd->osh, sizeof(dhd_dbg_ring_t));
20230 if (!dhd->ecntr_dbg_ring)
20231 goto fail;
20232
20233 ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring;
20234 ret = dhd_dbg_ring_init(dhd, ring, ECNTR_RING_ID,
20235 ECNTR_RING_NAME, LOG_DUMP_ECNTRS_MAX_BUFSIZE,
20236 bufptr, TRUE);
20237 if (ret != BCME_OK) {
20238 DHD_ERROR(("%s: unable to init ecntr ring !\n",
20239 __FUNCTION__));
20240 goto fail;
20241 }
20242 DHD_DBG_RING_LOCK(ring->lock, flags);
20243 ring->state = RING_ACTIVE;
20244 ring->threshold = 0;
20245 DHD_DBG_RING_UNLOCK(ring->lock, flags);
20246
20247 bufptr += LOG_DUMP_ECNTRS_MAX_BUFSIZE;
20248 #endif /* EWP_ECNTRS_LOGGING */
20249
20250 #ifdef EWP_RTT_LOGGING
20251 /* now use the rest of the pre-alloc'd memory for filter and ecounter log */
20252 dhd->rtt_dbg_ring = MALLOCZ(dhd->osh, sizeof(dhd_dbg_ring_t));
20253 if (!dhd->rtt_dbg_ring)
20254 goto fail;
20255
20256 ring = (dhd_dbg_ring_t *)dhd->rtt_dbg_ring;
20257 ret = dhd_dbg_ring_init(dhd, ring, RTT_RING_ID,
20258 RTT_RING_NAME, LOG_DUMP_RTT_MAX_BUFSIZE,
20259 bufptr, TRUE);
20260 if (ret != BCME_OK) {
20261 DHD_ERROR(("%s: unable to init ecntr ring !\n",
20262 __FUNCTION__));
20263 goto fail;
20264 }
20265 DHD_DBG_RING_LOCK(ring->lock, flags);
20266 ring->state = RING_ACTIVE;
20267 ring->threshold = 0;
20268 DHD_DBG_RING_UNLOCK(ring->lock, flags);
20269
20270 bufptr += LOG_DUMP_RTT_MAX_BUFSIZE;
20271 #endif /* EWP_RTT_LOGGING */
20272
20273 /* Concise buffer is used as intermediate buffer for following purposes
20274 * a) pull ecounters records temporarily before
20275 * writing it to file
20276 * b) to store dhd dump data before putting it to file
20277 * It should have a size equal to
20278 * MAX(largest possible ecntr record, 'dhd dump' data size)
20279 */
20280 dhd->concise_dbg_buf = MALLOC(dhd->osh, CONCISE_DUMP_BUFLEN);
20281 if (!dhd->concise_dbg_buf) {
20282 DHD_ERROR(("%s: unable to alloc mem for concise debug info !\n",
20283 __FUNCTION__));
20284 goto fail;
20285 }
20286
20287 #if defined(DHD_EVENT_LOG_FILTER)
20288 ret = dhd_event_log_filter_init(dhd,
20289 bufptr,
20290 LOG_DUMP_FILTER_MAX_BUFSIZE);
20291 if (ret != BCME_OK) {
20292 goto fail;
20293 }
20294 #endif /* DHD_EVENT_LOG_FILTER */
20295
20296 cookie_buf = MALLOC(dhd->osh, LOG_DUMP_COOKIE_BUFSIZE);
20297 if (!cookie_buf) {
20298 DHD_ERROR(("%s: unable to alloc mem for logdump cookie buffer\n",
20299 __FUNCTION__));
20300 goto fail;
20301 }
20302 ret = dhd_logdump_cookie_init(dhd, cookie_buf, LOG_DUMP_COOKIE_BUFSIZE);
20303 if (ret != BCME_OK) {
20304 MFREE(dhd->osh, cookie_buf, LOG_DUMP_COOKIE_BUFSIZE);
20305 goto fail;
20306 }
20307 return;
20308
20309 fail:
20310
20311 if (dhd->logdump_cookie) {
20312 dhd_logdump_cookie_deinit(dhd);
20313 MFREE(dhd->osh, dhd->logdump_cookie, LOG_DUMP_COOKIE_BUFSIZE);
20314 dhd->logdump_cookie = NULL;
20315 }
20316 #if defined(DHD_EVENT_LOG_FILTER)
20317 if (dhd->event_log_filter) {
20318 dhd_event_log_filter_deinit(dhd);
20319 }
20320 #endif /* DHD_EVENT_LOG_FILTER */
20321
20322 if (dhd->concise_dbg_buf) {
20323 MFREE(dhd->osh, dhd->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
20324 }
20325
20326 #ifdef EWP_ECNTRS_LOGGING
20327 if (dhd->ecntr_dbg_ring) {
20328 ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring;
20329 dhd_dbg_ring_deinit(dhd, ring);
20330 ring->ring_buf = NULL;
20331 ring->ring_size = 0;
20332 MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
20333 dhd->ecntr_dbg_ring = NULL;
20334 }
20335 #endif /* EWP_ECNTRS_LOGGING */
20336
20337 #ifdef EWP_RTT_LOGGING
20338 if (dhd->rtt_dbg_ring) {
20339 ring = (dhd_dbg_ring_t *)dhd->rtt_dbg_ring;
20340 dhd_dbg_ring_deinit(dhd, ring);
20341 ring->ring_buf = NULL;
20342 ring->ring_size = 0;
20343 MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
20344 dhd->rtt_dbg_ring = NULL;
20345 }
20346 #endif /* EWP_RTT_LOGGING */
20347
20348 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
20349 if (prealloc_buf) {
20350 DHD_OS_PREFREE(dhd, prealloc_buf, LOG_DUMP_TOTAL_BUFSIZE);
20351 }
20352 if (dld_buf_special->buffer) {
20353 DHD_OS_PREFREE(dhd, dld_buf_special->buffer,
20354 dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
20355 }
20356 #else
20357 if (prealloc_buf) {
20358 VMFREE(dhd->osh, prealloc_buf, LOG_DUMP_TOTAL_BUFSIZE);
20359 }
20360 if (dld_buf_special->buffer) {
20361 VMFREE(dhd->osh, dld_buf_special->buffer,
20362 dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
20363 }
20364 #endif /* CONFIG_DHD_USE_STATIC_BUF */
20365 for (i = 0; i < DLD_BUFFER_NUM; i++) {
20366 dld_buf = &g_dld_buf[i];
20367 dld_buf->enable = 0;
20368 dld_buf->buffer = NULL;
20369 }
20370
20371 mutex_destroy(&dhd_info->logdump_lock);
20372 }
20373
20374 void
20375 dhd_log_dump_deinit(dhd_pub_t *dhd)
20376 {
20377 struct dhd_log_dump_buf *dld_buf = NULL, *dld_buf_special = NULL;
20378 int i = 0;
20379 dhd_info_t *dhd_info = dhd->info;
20380 dhd_dbg_ring_t *ring = NULL;
20381
20382 BCM_REFERENCE(ring);
20383
20384 if (dhd->concise_dbg_buf) {
20385 MFREE(dhd->osh, dhd->concise_dbg_buf, CONCISE_DUMP_BUFLEN);
20386 dhd->concise_dbg_buf = NULL;
20387 }
20388
20389 if (dhd->logdump_cookie) {
20390 dhd_logdump_cookie_deinit(dhd);
20391 MFREE(dhd->osh, dhd->logdump_cookie, LOG_DUMP_COOKIE_BUFSIZE);
20392 dhd->logdump_cookie = NULL;
20393 }
20394
20395 #if defined(DHD_EVENT_LOG_FILTER)
20396 if (dhd->event_log_filter) {
20397 dhd_event_log_filter_deinit(dhd);
20398 }
20399 #endif /* DHD_EVENT_LOG_FILTER */
20400
20401 #ifdef EWP_ECNTRS_LOGGING
20402 if (dhd->ecntr_dbg_ring) {
20403 ring = (dhd_dbg_ring_t *)dhd->ecntr_dbg_ring;
20404 dhd_dbg_ring_deinit(dhd, ring);
20405 ring->ring_buf = NULL;
20406 ring->ring_size = 0;
20407 MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
20408 dhd->ecntr_dbg_ring = NULL;
20409 }
20410 #endif /* EWP_ECNTRS_LOGGING */
20411
20412 #ifdef EWP_RTT_LOGGING
20413 if (dhd->rtt_dbg_ring) {
20414 ring = (dhd_dbg_ring_t *)dhd->rtt_dbg_ring;
20415 dhd_dbg_ring_deinit(dhd, ring);
20416 ring->ring_buf = NULL;
20417 ring->ring_size = 0;
20418 MFREE(dhd->osh, ring, sizeof(dhd_dbg_ring_t));
20419 dhd->rtt_dbg_ring = NULL;
20420 }
20421 #endif /* EWP_RTT_LOGGING */
20422
20423 /* 'general' buffer points to start of the pre-alloc'd memory */
20424 dld_buf = &g_dld_buf[DLD_BUF_TYPE_GENERAL];
20425 dld_buf_special = &g_dld_buf[DLD_BUF_TYPE_SPECIAL];
20426 #if defined(CONFIG_DHD_USE_STATIC_BUF) && defined(DHD_USE_STATIC_MEMDUMP)
20427 if (dld_buf->buffer) {
20428 DHD_OS_PREFREE(dhd, dld_buf->buffer, LOG_DUMP_TOTAL_BUFSIZE);
20429 }
20430 if (dld_buf_special->buffer) {
20431 DHD_OS_PREFREE(dhd, dld_buf_special->buffer,
20432 dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
20433 }
20434 #else
20435 if (dld_buf->buffer) {
20436 VMFREE(dhd->osh, dld_buf->buffer, LOG_DUMP_TOTAL_BUFSIZE);
20437 }
20438 if (dld_buf_special->buffer) {
20439 VMFREE(dhd->osh, dld_buf_special->buffer,
20440 dld_buf_size[DLD_BUF_TYPE_SPECIAL]);
20441 }
20442 #endif /* CONFIG_DHD_USE_STATIC_BUF */
20443 for (i = 0; i < DLD_BUFFER_NUM; i++) {
20444 dld_buf = &g_dld_buf[i];
20445 dld_buf->enable = 0;
20446 dld_buf->buffer = NULL;
20447 }
20448
20449 mutex_destroy(&dhd_info->logdump_lock);
20450 }
20451
20452 void
20453 dhd_log_dump_write(int type, char *binary_data,
20454 int binary_len, const char *fmt, ...)
20455 {
20456 int len = 0;
20457 char tmp_buf[DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE] = {0, };
20458 va_list args;
20459 unsigned long flags = 0;
20460 struct dhd_log_dump_buf *dld_buf = NULL;
20461 bool flush_log = FALSE;
20462
20463 if (type < 0 || type >= DLD_BUFFER_NUM) {
20464 DHD_INFO(("%s: Unknown DHD_LOG_DUMP_BUF_TYPE(%d).\n",
20465 __FUNCTION__, type));
20466 return;
20467 }
20468
20469 dld_buf = &g_dld_buf[type];
20470
20471 if (dld_buf->enable != 1) {
20472 return;
20473 }
20474
20475 va_start(args, fmt);
20476 len = vsnprintf(tmp_buf, DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE, fmt, args);
20477 /* Non ANSI C99 compliant returns -1,
20478 * ANSI compliant return len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE
20479 */
20480 va_end(args);
20481 if (len < 0) {
20482 return;
20483 }
20484
20485 if (len >= DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE) {
20486 len = DHD_LOG_DUMP_MAX_TEMP_BUFFER_SIZE - 1;
20487 tmp_buf[len] = '\0';
20488 }
20489
20490 /* make a critical section to eliminate race conditions */
20491 spin_lock_irqsave(&dld_buf->lock, flags);
20492 if (dld_buf->remain < len) {
20493 dld_buf->wraparound = 1;
20494 dld_buf->present = dld_buf->front;
20495 dld_buf->remain = dld_buf_size[type];
20496 /* if wrap around happens, flush the ring buffer to the file */
20497 flush_log = TRUE;
20498 }
20499
20500 memcpy(dld_buf->present, tmp_buf, len);
20501 dld_buf->remain -= len;
20502 dld_buf->present += len;
20503 spin_unlock_irqrestore(&dld_buf->lock, flags);
20504
20505 /* double check invalid memory operation */
20506 ASSERT((unsigned long)dld_buf->present <= dld_buf->max);
20507
20508 if (dld_buf->dhd_pub) {
20509 dhd_pub_t *dhdp = (dhd_pub_t *)dld_buf->dhd_pub;
20510 dhdp->logdump_periodic_flush =
20511 logdump_periodic_flush;
20512 if (logdump_periodic_flush && flush_log) {
20513 log_dump_type_t *flush_type = MALLOCZ(dhdp->osh,
20514 sizeof(log_dump_type_t));
20515 if (flush_type) {
20516 *flush_type = type;
20517 dhd_schedule_log_dump(dld_buf->dhd_pub, flush_type);
20518 }
20519 }
20520 }
20521 }
20522
20523 char*
20524 dhd_log_dump_get_timestamp(void)
20525 {
20526 static char buf[16];
20527 u64 ts_nsec;
20528 unsigned long rem_nsec;
20529
20530 ts_nsec = local_clock();
20531 rem_nsec = DIV_AND_MOD_U64_BY_U32(ts_nsec, NSEC_PER_SEC);
20532 snprintf(buf, sizeof(buf), "%5lu.%06lu",
20533 (unsigned long)ts_nsec, rem_nsec / NSEC_PER_USEC);
20534
20535 return buf;
20536 }
20537 #endif /* DHD_LOG_DUMP */
20538
20539 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
20540 void
20541 dhd_flush_rx_tx_wq(dhd_pub_t *dhdp)
20542 {
20543 dhd_info_t * dhd;
20544
20545 if (dhdp) {
20546 dhd = dhdp->info;
20547 if (dhd) {
20548 flush_workqueue(dhd->tx_wq);
20549 flush_workqueue(dhd->rx_wq);
20550 }
20551 }
20552
20553 return;
20554 }
20555 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
20556
20557 #ifdef DHD_DEBUG_UART
20558 bool
20559 dhd_debug_uart_is_running(struct net_device *dev)
20560 {
20561 dhd_info_t *dhd = DHD_DEV_INFO(dev);
20562
20563 if (dhd->duart_execute) {
20564 return TRUE;
20565 }
20566
20567 return FALSE;
20568 }
20569
20570 static void
20571 dhd_debug_uart_exec_rd(void *handle, void *event_info, u8 event)
20572 {
20573 dhd_pub_t *dhdp = handle;
20574 dhd_debug_uart_exec(dhdp, "rd");
20575 }
20576
20577 static void
20578 dhd_debug_uart_exec(dhd_pub_t *dhdp, char *cmd)
20579 {
20580 int ret;
20581
20582 char *argv[] = {DHD_DEBUG_UART_EXEC_PATH, cmd, NULL};
20583 char *envp[] = {"HOME=/", "TERM=linux", "PATH=/sbin:/system/bin", NULL};
20584
20585 #ifdef DHD_FW_COREDUMP
20586 if (dhdp->memdump_enabled == DUMP_MEMFILE_BUGON)
20587 #endif // endif
20588 {
20589 if (dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN_RC_DETECT ||
20590 dhdp->hang_reason == HANG_REASON_PCIE_LINK_DOWN_EP_DETECT ||
20591 #ifdef DHD_FW_COREDUMP
20592 dhdp->memdump_success == FALSE ||
20593 #endif // endif
20594 FALSE) {
20595 dhdp->info->duart_execute = TRUE;
20596 DHD_ERROR(("DHD: %s - execute %s %s\n",
20597 __FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd));
20598 ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
20599 DHD_ERROR(("DHD: %s - %s %s ret = %d\n",
20600 __FUNCTION__, DHD_DEBUG_UART_EXEC_PATH, cmd, ret));
20601 dhdp->info->duart_execute = FALSE;
20602
20603 #ifdef DHD_LOG_DUMP
20604 if (dhdp->memdump_type != DUMP_TYPE_BY_SYSDUMP)
20605 #endif // endif
20606 {
20607 BUG_ON(1);
20608 }
20609 }
20610 }
20611 }
20612 #endif /* DHD_DEBUG_UART */
20613
20614 #if defined(DHD_BLOB_EXISTENCE_CHECK)
20615 void
20616 dhd_set_blob_support(dhd_pub_t *dhdp, char *fw_path)
20617 {
20618 struct file *fp;
20619 char *filepath = VENDOR_PATH CONFIG_BCMDHD_CLM_PATH;
20620 fp = filp_open(filepath, O_RDONLY, 0);
20621 if (IS_ERR(fp)) {
20622 DHD_ERROR(("%s: ----- blob file doesn't exist (%s) -----\n", __FUNCTION__,
20623 filepath));
20624 dhdp->is_blob = FALSE;
20625 } else {
20626 DHD_ERROR(("%s: ----- blob file exists (%s)-----\n", __FUNCTION__, filepath));
20627 dhdp->is_blob = TRUE;
20628 #if defined(CONCATE_BLOB)
20629 strncat(fw_path, "_blob", strlen("_blob"));
20630 #else
20631 BCM_REFERENCE(fw_path);
20632 #endif /* SKIP_CONCATE_BLOB */
20633 filp_close(fp, NULL);
20634 }
20635 }
20636 #endif /* DHD_BLOB_EXISTENCE_CHECK */
20637
20638 #if defined(PCIE_FULL_DONGLE)
20639 /** test / loopback */
20640 void
20641 dmaxfer_free_dmaaddr_handler(void *handle, void *event_info, u8 event)
20642 {
20643 dmaxref_mem_map_t *dmmap = (dmaxref_mem_map_t *)event_info;
20644 dhd_info_t *dhd_info = (dhd_info_t *)handle;
20645
20646 if (event != DHD_WQ_WORK_DMA_LB_MEM_REL) {
20647 DHD_ERROR(("%s: Unexpected event \n", __FUNCTION__));
20648 return;
20649 }
20650 if (dhd_info == NULL) {
20651 DHD_ERROR(("%s: Invalid dhd_info\n", __FUNCTION__));
20652 return;
20653 }
20654 if (dmmap == NULL) {
20655 DHD_ERROR(("%s: dmmap is null\n", __FUNCTION__));
20656 return;
20657 }
20658 dmaxfer_free_prev_dmaaddr(&dhd_info->pub, dmmap);
20659 }
20660
20661 void
20662 dhd_schedule_dmaxfer_free(dhd_pub_t *dhdp, dmaxref_mem_map_t *dmmap)
20663 {
20664 dhd_info_t *dhd_info = dhdp->info;
20665
20666 dhd_deferred_schedule_work(dhd_info->dhd_deferred_wq, (void *)dmmap,
20667 DHD_WQ_WORK_DMA_LB_MEM_REL, dmaxfer_free_dmaaddr_handler, DHD_WQ_WORK_PRIORITY_LOW);
20668 }
20669 #endif /* PCIE_FULL_DONGLE */
20670 /* ---------------------------- End of sysfs implementation ------------------------------------- */
20671
20672 #ifdef SET_PCIE_IRQ_CPU_CORE
20673 void
20674 dhd_set_irq_cpucore(dhd_pub_t *dhdp, int affinity_cmd)
20675 {
20676 unsigned int pcie_irq = 0;
20677
20678 if (!dhdp) {
20679 DHD_ERROR(("%s : dhd is NULL\n", __FUNCTION__));
20680 return;
20681 }
20682
20683 if (!dhdp->bus) {
20684 DHD_ERROR(("%s : dhd->bus is NULL\n", __FUNCTION__));
20685 return;
20686 }
20687
20688 DHD_ERROR(("Enter %s, PCIe affinity cmd=0x%x\n", __FUNCTION__, affinity_cmd));
20689
20690 if (dhdpcie_get_pcieirq(dhdp->bus, &pcie_irq)) {
20691 DHD_ERROR(("%s : Can't get interrupt number\n", __FUNCTION__));
20692 return;
20693 }
20694
20695 /*
20696 irq_set_affinity() assign dedicated CPU core PCIe interrupt
20697 If dedicated CPU core is not on-line,
20698 PCIe interrupt scheduled on CPU core 0
20699 */
20700 switch (affinity_cmd) {
20701 case PCIE_IRQ_AFFINITY_OFF:
20702 break;
20703 case PCIE_IRQ_AFFINITY_BIG_CORE_ANY:
20704 #if defined(CONFIG_ARCH_SM8150)
20705 irq_set_affinity_hint(pcie_irq, dhdp->info->cpumask_primary);
20706 irq_set_affinity(pcie_irq, dhdp->info->cpumask_primary);
20707 #else /* Exynos and Others */
20708 irq_set_affinity(pcie_irq, dhdp->info->cpumask_primary);
20709 #endif /* CONFIG_ARCH_SM8150 */
20710 break;
20711 #if defined(CONFIG_SOC_EXYNOS9810) || defined(CONFIG_SOC_EXYNOS9820)
20712 case PCIE_IRQ_AFFINITY_BIG_CORE_EXYNOS:
20713 DHD_ERROR(("%s, PCIe IRQ:%u set Core %d\n",
20714 __FUNCTION__, pcie_irq, PCIE_IRQ_CPU_CORE));
20715 irq_set_affinity(pcie_irq, cpumask_of(PCIE_IRQ_CPU_CORE));
20716 break;
20717 #endif /* CONFIG_SOC_EXYNOS9810 || CONFIG_SOC_EXYNOS9820 */
20718 default:
20719 DHD_ERROR(("%s, Unknown PCIe affinity cmd=0x%x\n",
20720 __FUNCTION__, affinity_cmd));
20721 }
20722 }
20723 #endif /* SET_PCIE_IRQ_CPU_CORE */
20724
20725 int
20726 dhd_write_file(const char *filepath, char *buf, int buf_len)
20727 {
20728 struct file *fp = NULL;
20729 int ret = 0;
20730 /* change to KERNEL_DS address limit */
20731 #if defined(KERNEL_DS) && defined(USER_DS)
20732 mm_segment_t old_fs;
20733 old_fs = get_fs();
20734 set_fs(KERNEL_DS);
20735 #endif /* KERNEL_DS && USER_DS */
20736
20737 /* File is always created. */
20738 fp = filp_open(filepath, O_RDWR | O_CREAT, 0664);
20739 if (IS_ERR(fp)) {
20740 DHD_ERROR(("%s: Couldn't open file '%s' err %ld\n",
20741 __FUNCTION__, filepath, PTR_ERR(fp)));
20742 ret = BCME_ERROR;
20743 } else {
20744 if (fp->f_mode & FMODE_WRITE) {
20745 ret = vfs_write(fp, buf, buf_len, &fp->f_pos);
20746 if (ret < 0) {
20747 DHD_ERROR(("%s: Couldn't write file '%s'\n",
20748 __FUNCTION__, filepath));
20749 ret = BCME_ERROR;
20750 } else {
20751 ret = BCME_OK;
20752 }
20753 }
20754 filp_close(fp, NULL);
20755 }
20756
20757 /* restore previous address limit */
20758 #if defined(KERNEL_DS) && defined(USER_DS)
20759 set_fs(old_fs);
20760 #endif /* KERNEL_DS && USER_DS */
20761
20762 return ret;
20763 }
20764
20765 int
20766 dhd_read_file(const char *filepath, char *buf, int buf_len)
20767 {
20768 struct file *fp = NULL;
20769 int ret;
20770 /* change to KERNEL_DS address limit */
20771 #if defined(KERNEL_DS) && defined(USER_DS)
20772 mm_segment_t old_fs;
20773 old_fs = get_fs();
20774 set_fs(KERNEL_DS);
20775 #endif /* KERNEL_DS && USER_DS */
20776 fp = filp_open(filepath, O_RDONLY, 0);
20777 if (IS_ERR(fp)) {
20778 #if defined(KERNEL_DS) && defined(USER_DS)
20779 set_fs(old_fs);
20780 #endif /* KERNEL_DS && USER_DS */
20781 DHD_ERROR(("%s: File %s doesn't exist\n", __FUNCTION__, filepath));
20782 return BCME_ERROR;
20783 }
20784
20785 ret = compat_kernel_read(fp, 0, buf, buf_len);
20786 filp_close(fp, NULL);
20787
20788 /* restore previous address limit */
20789 #if defined(KERNEL_DS) && defined(USER_DS)
20790 set_fs(old_fs);
20791 #endif /* KERNEL_DS && USER_DS */
20792
20793 /* Return the number of bytes read */
20794 if (ret > 0) {
20795 /* Success to read */
20796 ret = 0;
20797 } else {
20798 DHD_ERROR(("%s: Couldn't read the file %s, ret=%d\n",
20799 __FUNCTION__, filepath, ret));
20800 ret = BCME_ERROR;
20801 }
20802
20803 return ret;
20804 }
20805
20806 int
20807 dhd_write_file_and_check(const char *filepath, char *buf, int buf_len)
20808 {
20809 int ret;
20810
20811 ret = dhd_write_file(filepath, buf, buf_len);
20812 if (ret < 0) {
20813 return ret;
20814 }
20815
20816 /* Read the file again and check if the file size is not zero */
20817 memset(buf, 0, buf_len);
20818 ret = dhd_read_file(filepath, buf, buf_len);
20819
20820 return ret;
20821 }
20822
20823 #ifdef DHD_BANDSTEER
20824 /*
20825 * Function return true only if there exactly two GO interfaces
20826 * TODO: Make it flexible to have AP + AP
20827 */
20828 s32
20829 dhd_bandsteer_get_ifaces(void *pub, void *ifaces)
20830 {
20831 dhd_if_t *iflist; /* For supporting multiple interfaces */
20832 uint8 idx;
20833 uint8 ap_idx_count = 0;
20834 dhd_pub_t *dhd = (dhd_pub_t *) pub;
20835 dhd_bandsteer_iface_info_t *bsd_ifp = (dhd_bandsteer_iface_info_t *)ifaces;
20836
20837 DHD_INFO(("%s: entered\n", __FUNCTION__));
20838 for (idx = 0; idx < DHD_MAX_IFS; idx++) {
20839 iflist = dhd->info->iflist[idx];
20840 if (iflist == NULL) {
20841 continue;
20842 }
20843
20844 if (iflist->net != NULL) {
20845 if (iflist->net->ieee80211_ptr != NULL) {
20846 if (
20847 (iflist->net->ieee80211_ptr->iftype == NL80211_IFTYPE_P2P_GO) ||
20848 (iflist->net->ieee80211_ptr->iftype == NL80211_IFTYPE_AP)) {
20849 ap_idx_count++;
20850 if (ap_idx_count > 2) {
20851 continue;
20852 }
20853 bsd_ifp->ndev = iflist->net;
20854 bsd_ifp->bssidx = iflist->bssidx;
20855 bsd_ifp++;
20856 }
20857 }
20858 }
20859 }
20860 if (ap_idx_count == 2) {
20861 return BCME_OK;
20862 } else {
20863 return BCME_ERROR;
20864 }
20865 }
20866
20867 void
20868 dhd_bandsteer_schedule_work_on_timeout(dhd_bandsteer_mac_entry_t *dhd_bandsteer_mac)
20869 {
20870 dhd_bandsteer_context_t *dhd_bandsteer_cntx = dhd_bandsteer_mac->dhd_bandsteer_cntx;
20871 dhd_pub_t *dhd = (dhd_pub_t *) dhd_bandsteer_cntx->dhd_pub;
20872
20873 dhd_deferred_schedule_work(dhd->info->dhd_deferred_wq,
20874 (void *)dhd_bandsteer_mac, DHD_WQ_WORK_BANDSTEER_STEP_MOVE,
20875 dhd_bandsteer_workqueue_wrapper, DHD_WQ_WORK_PRIORITY_LOW);
20876 }
20877 #endif /* DHD_BANDSTEER */
20878
20879 #ifdef FILTER_IE
20880 int dhd_read_from_file(dhd_pub_t *dhd)
20881 {
20882 int ret = 0, nread = 0;
20883 void *fd;
20884 uint8 *buf;
20885 NULL_CHECK(dhd, "dhd is NULL", ret);
20886
20887 buf = MALLOCZ(dhd->osh, FILE_BLOCK_READ_SIZE);
20888 if (!buf) {
20889 DHD_ERROR(("error: failed to alllocate buf.\n"));
20890 return BCME_NOMEM;
20891 }
20892
20893 /* open file to read */
20894 fd = dhd_os_open_image1(dhd, FILTER_IE_PATH);
20895 if (!fd) {
20896 DHD_ERROR(("error: failed to open %s\n", FILTER_IE_PATH));
20897 ret = BCME_EPERM;
20898 goto exit;
20899 }
20900 nread = dhd_os_get_image_block(buf, (FILE_BLOCK_READ_SIZE - 1), fd);
20901 if (nread > 0) {
20902 buf[nread] = '\0';
20903 if ((ret = dhd_parse_filter_ie(dhd, buf)) < 0) {
20904 DHD_ERROR(("error: failed to parse filter ie\n"));
20905 }
20906 } else {
20907 DHD_ERROR(("error: zero length file.failed to read\n"));
20908 ret = BCME_ERROR;
20909 }
20910 dhd_os_close_image1(dhd, fd);
20911 exit:
20912 if (buf) {
20913 MFREE(dhd->osh, buf, FILE_BLOCK_READ_SIZE);
20914 buf = NULL;
20915 }
20916 return ret;
20917 }
20918
20919 int dhd_get_filter_ie_count(dhd_pub_t *dhdp, uint8* buf)
20920 {
20921 uint8* pstr = buf;
20922 int element_count = 0;
20923
20924 if (buf == NULL) {
20925 return BCME_ERROR;
20926 }
20927
20928 while (*pstr != '\0') {
20929 if (*pstr == '\n') {
20930 element_count++;
20931 }
20932 pstr++;
20933 }
20934 /*
20935 * New line character must not be present after last line.
20936 * To count last line
20937 */
20938 element_count++;
20939
20940 return element_count;
20941 }
20942
20943 int dhd_parse_oui(dhd_pub_t *dhd, uint8 *inbuf, uint8 *oui, int len)
20944 {
20945 uint8 i, j, msb, lsb, oui_len = 0;
20946 /*
20947 * OUI can vary from 3 bytes to 5 bytes.
20948 * While reading from file as ascii input it can
20949 * take maximum size of 14 bytes and minumum size of
20950 * 8 bytes including ":"
20951 * Example 5byte OUI <AB:DE:BE:CD:FA>
20952 * Example 3byte OUI <AB:DC:EF>
20953 */
20954
20955 if ((inbuf == NULL) || (len < 8) || (len > 14)) {
20956 DHD_ERROR(("error: failed to parse OUI \n"));
20957 return BCME_ERROR;
20958 }
20959
20960 for (j = 0, i = 0; i < len; i += 3, ++j) {
20961 if (!bcm_isxdigit(inbuf[i]) || !bcm_isxdigit(inbuf[i + 1])) {
20962 DHD_ERROR(("error: invalid OUI format \n"));
20963 return BCME_ERROR;
20964 }
20965 msb = inbuf[i] > '9' ? bcm_toupper(inbuf[i]) - 'A' + 10 : inbuf[i] - '0';
20966 lsb = inbuf[i + 1] > '9' ? bcm_toupper(inbuf[i + 1]) -
20967 'A' + 10 : inbuf[i + 1] - '0';
20968 oui[j] = (msb << 4) | lsb;
20969 }
20970 /* Size of oui.It can vary from 3/4/5 */
20971 oui_len = j;
20972
20973 return oui_len;
20974 }
20975
20976 int dhd_check_valid_ie(dhd_pub_t *dhdp, uint8* buf, int len)
20977 {
20978 int i = 0;
20979
20980 while (i < len) {
20981 if (!bcm_isdigit(buf[i])) {
20982 DHD_ERROR(("error: non digit value found in filter_ie \n"));
20983 return BCME_ERROR;
20984 }
20985 i++;
20986 }
20987 if (bcm_atoi((char*)buf) > 255) {
20988 DHD_ERROR(("error: element id cannot be greater than 255 \n"));
20989 return BCME_ERROR;
20990 }
20991
20992 return BCME_OK;
20993 }
20994
20995 int dhd_parse_filter_ie(dhd_pub_t *dhd, uint8 *buf)
20996 {
20997 int element_count = 0, i = 0, oui_size = 0, ret = 0;
20998 uint16 bufsize, buf_space_left, id = 0, len = 0;
20999 uint16 filter_iovsize, all_tlvsize;
21000 wl_filter_ie_tlv_t *p_ie_tlv = NULL;
21001 wl_filter_ie_iov_v1_t *p_filter_iov = (wl_filter_ie_iov_v1_t *) NULL;
21002 char *token = NULL, *ele_token = NULL, *oui_token = NULL, *type = NULL;
21003 uint8 data[20];
21004
21005 element_count = dhd_get_filter_ie_count(dhd, buf);
21006 DHD_INFO(("total element count %d \n", element_count));
21007 /* Calculate the whole buffer size */
21008 filter_iovsize = sizeof(wl_filter_ie_iov_v1_t) + FILTER_IE_BUFSZ;
21009 p_filter_iov = MALLOCZ(dhd->osh, filter_iovsize);
21010
21011 if (p_filter_iov == NULL) {
21012 DHD_ERROR(("error: failed to allocate %d bytes of memory\n", filter_iovsize));
21013 return BCME_ERROR;
21014 }
21015
21016 /* setup filter iovar header */
21017 p_filter_iov->version = WL_FILTER_IE_VERSION;
21018 p_filter_iov->len = filter_iovsize;
21019 p_filter_iov->fixed_length = p_filter_iov->len - FILTER_IE_BUFSZ;
21020 p_filter_iov->pktflag = FC_PROBE_REQ;
21021 p_filter_iov->option = WL_FILTER_IE_CHECK_SUB_OPTION;
21022 /* setup TLVs */
21023 bufsize = filter_iovsize - WL_FILTER_IE_IOV_HDR_SIZE; /* adjust available size for TLVs */
21024 p_ie_tlv = (wl_filter_ie_tlv_t *)&p_filter_iov->tlvs[0];
21025 buf_space_left = bufsize;
21026
21027 while ((i < element_count) && (buf != NULL)) {
21028 len = 0;
21029 /* token contains one line of input data */
21030 token = bcmstrtok((char**)&buf, "\n", NULL);
21031 if (token == NULL) {
21032 break;
21033 }
21034 if ((ele_token = bcmstrstr(token, ",")) == NULL) {
21035 /* only element id is present */
21036 if (dhd_check_valid_ie(dhd, token, strlen(token)) == BCME_ERROR) {
21037 DHD_ERROR(("error: Invalid element id \n"));
21038 ret = BCME_ERROR;
21039 goto exit;
21040 }
21041 id = bcm_atoi((char*)token);
21042 data[len++] = WL_FILTER_IE_SET;
21043 } else {
21044 /* oui is present */
21045 ele_token = bcmstrtok(&token, ",", NULL);
21046 if ((ele_token == NULL) || (dhd_check_valid_ie(dhd, ele_token,
21047 strlen(ele_token)) == BCME_ERROR)) {
21048 DHD_ERROR(("error: Invalid element id \n"));
21049 ret = BCME_ERROR;
21050 goto exit;
21051 }
21052 id = bcm_atoi((char*)ele_token);
21053 data[len++] = WL_FILTER_IE_SET;
21054 if ((oui_token = bcmstrstr(token, ",")) == NULL) {
21055 oui_size = dhd_parse_oui(dhd, token, &(data[len]), strlen(token));
21056 if (oui_size == BCME_ERROR) {
21057 DHD_ERROR(("error: Invalid OUI \n"));
21058 ret = BCME_ERROR;
21059 goto exit;
21060 }
21061 len += oui_size;
21062 } else {
21063 /* type is present */
21064 oui_token = bcmstrtok(&token, ",", NULL);
21065 if ((oui_token == NULL) || ((oui_size =
21066 dhd_parse_oui(dhd, oui_token,
21067 &(data[len]), strlen(oui_token))) == BCME_ERROR)) {
21068 DHD_ERROR(("error: Invalid OUI \n"));
21069 ret = BCME_ERROR;
21070 goto exit;
21071 }
21072 len += oui_size;
21073 if ((type = bcmstrstr(token, ",")) == NULL) {
21074 if (dhd_check_valid_ie(dhd, token,
21075 strlen(token)) == BCME_ERROR) {
21076 DHD_ERROR(("error: Invalid type \n"));
21077 ret = BCME_ERROR;
21078 goto exit;
21079 }
21080 data[len++] = bcm_atoi((char*)token);
21081 } else {
21082 /* subtype is present */
21083 type = bcmstrtok(&token, ",", NULL);
21084 if ((type == NULL) || (dhd_check_valid_ie(dhd, type,
21085 strlen(type)) == BCME_ERROR)) {
21086 DHD_ERROR(("error: Invalid type \n"));
21087 ret = BCME_ERROR;
21088 goto exit;
21089 }
21090 data[len++] = bcm_atoi((char*)type);
21091 /* subtype is last element */
21092 if ((token == NULL) || (*token == '\0') ||
21093 (dhd_check_valid_ie(dhd, token,
21094 strlen(token)) == BCME_ERROR)) {
21095 DHD_ERROR(("error: Invalid subtype \n"));
21096 ret = BCME_ERROR;
21097 goto exit;
21098 }
21099 data[len++] = bcm_atoi((char*)token);
21100 }
21101 }
21102 }
21103 ret = bcm_pack_xtlv_entry((uint8 **)&p_ie_tlv,
21104 &buf_space_left, id, len, data, BCM_XTLV_OPTION_ALIGN32);
21105 if (ret != BCME_OK) {
21106 DHD_ERROR(("%s : bcm_pack_xtlv_entry() failed ,"
21107 "status=%d\n", __FUNCTION__, ret));
21108 goto exit;
21109 }
21110 i++;
21111 }
21112 if (i == 0) {
21113 /* file is empty or first line is blank */
21114 DHD_ERROR(("error: filter_ie file is empty or first line is blank \n"));
21115 ret = BCME_ERROR;
21116 goto exit;
21117 }
21118 /* update the iov header, set len to include all TLVs + header */
21119 all_tlvsize = (bufsize - buf_space_left);
21120 p_filter_iov->len = htol16(all_tlvsize + WL_FILTER_IE_IOV_HDR_SIZE);
21121 ret = dhd_iovar(dhd, 0, "filter_ie", (void *)p_filter_iov,
21122 p_filter_iov->len, NULL, 0, TRUE);
21123 if (ret != BCME_OK) {
21124 DHD_ERROR(("error: IOVAR failed, status=%d\n", ret));
21125 }
21126 exit:
21127 /* clean up */
21128 if (p_filter_iov) {
21129 MFREE(dhd->osh, p_filter_iov, filter_iovsize);
21130 p_filter_iov = NULL;
21131 }
21132 return ret;
21133 }
21134 #endif /* FILTER_IE */
21135 #ifdef DHD_WAKE_STATUS
21136 wake_counts_t*
21137 dhd_get_wakecount(dhd_pub_t *dhdp)
21138 {
21139 return dhd_bus_get_wakecount(dhdp);
21140 }
21141 #endif /* DHD_WAKE_STATUS */
21142
21143 int
21144 dhd_get_random_bytes(uint8 *buf, uint len)
21145 {
21146 #ifdef BCMPCIE
21147 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0))
21148 int rndlen = get_random_bytes_arch(buf, len);
21149 if (rndlen != len) {
21150 bzero(buf, len);
21151 get_random_bytes(buf, len);
21152 }
21153 #else
21154 get_random_bytes_arch(buf, len);
21155 #endif // endif
21156 #endif /* BCMPCIE */
21157 return BCME_OK;
21158 }
21159
21160 #if defined(DHD_HANG_SEND_UP_TEST)
21161 void
21162 dhd_make_hang_with_reason(struct net_device *dev, const char *string_num)
21163 {
21164 dhd_info_t *dhd = NULL;
21165 dhd_pub_t *dhdp = NULL;
21166 uint reason = HANG_REASON_MAX;
21167 uint32 fw_test_code = 0;
21168 dhd = DHD_DEV_INFO(dev);
21169
21170 if (dhd) {
21171 dhdp = &dhd->pub;
21172 }
21173
21174 if (!dhd || !dhdp) {
21175 return;
21176 }
21177
21178 reason = (uint) bcm_strtoul(string_num, NULL, 0);
21179 DHD_ERROR(("Enter %s, reason=0x%x\n", __FUNCTION__, reason));
21180
21181 if (reason == 0) {
21182 if (dhdp->req_hang_type) {
21183 DHD_ERROR(("%s, Clear HANG test request 0x%x\n",
21184 __FUNCTION__, dhdp->req_hang_type));
21185 dhdp->req_hang_type = 0;
21186 return;
21187 } else {
21188 DHD_ERROR(("%s, No requested HANG test\n", __FUNCTION__));
21189 return;
21190 }
21191 } else if ((reason <= HANG_REASON_MASK) || (reason >= HANG_REASON_MAX)) {
21192 DHD_ERROR(("Invalid HANG request, reason 0x%x\n", reason));
21193 return;
21194 }
21195
21196 if (dhdp->req_hang_type != 0) {
21197 DHD_ERROR(("Already HANG requested for test\n"));
21198 return;
21199 }
21200
21201 switch (reason) {
21202 case HANG_REASON_IOCTL_RESP_TIMEOUT:
21203 DHD_ERROR(("Make HANG!!!: IOCTL response timeout(0x%x)\n", reason));
21204 dhdp->req_hang_type = reason;
21205 fw_test_code = 102; /* resumed on timeour */
21206 (void) dhd_wl_ioctl_set_intiovar(dhdp, "bus:disconnect", fw_test_code,
21207 WLC_SET_VAR, TRUE, 0);
21208 break;
21209 case HANG_REASON_DONGLE_TRAP:
21210 DHD_ERROR(("Make HANG!!!: Dongle trap (0x%x)\n", reason));
21211 dhdp->req_hang_type = reason;
21212 fw_test_code = 99; /* dongle trap */
21213 (void) dhd_wl_ioctl_set_intiovar(dhdp, "bus:disconnect", fw_test_code,
21214 WLC_SET_VAR, TRUE, 0);
21215 break;
21216 case HANG_REASON_D3_ACK_TIMEOUT:
21217 DHD_ERROR(("Make HANG!!!: D3 ACK timeout (0x%x)\n", reason));
21218 dhdp->req_hang_type = reason;
21219 break;
21220 case HANG_REASON_BUS_DOWN:
21221 DHD_ERROR(("Make HANG!!!: BUS down(0x%x)\n", reason));
21222 dhdp->req_hang_type = reason;
21223 break;
21224 case HANG_REASON_PCIE_LINK_DOWN_RC_DETECT:
21225 case HANG_REASON_PCIE_LINK_DOWN_EP_DETECT:
21226 case HANG_REASON_MSGBUF_LIVELOCK:
21227 dhdp->req_hang_type = 0;
21228 DHD_ERROR(("Does not support requested HANG(0x%x)\n", reason));
21229 break;
21230 case HANG_REASON_IFACE_DEL_FAILURE:
21231 dhdp->req_hang_type = 0;
21232 DHD_ERROR(("Does not support requested HANG(0x%x)\n", reason));
21233 break;
21234 case HANG_REASON_HT_AVAIL_ERROR:
21235 dhdp->req_hang_type = 0;
21236 DHD_ERROR(("PCIe does not support requested HANG(0x%x)\n", reason));
21237 break;
21238 case HANG_REASON_PCIE_RC_LINK_UP_FAIL:
21239 DHD_ERROR(("Make HANG!!!:Link Up(0x%x)\n", reason));
21240 dhdp->req_hang_type = reason;
21241 break;
21242 default:
21243 dhdp->req_hang_type = 0;
21244 DHD_ERROR(("Unknown HANG request (0x%x)\n", reason));
21245 break;
21246 }
21247 }
21248 #endif /* DHD_HANG_SEND_UP_TEST */
21249
21250 #ifdef DHD_ERPOM
21251 static void
21252 dhd_error_recovery(void *handle, void *event_info, u8 event)
21253 {
21254 dhd_info_t *dhd = handle;
21255 dhd_pub_t *dhdp;
21256 int ret = 0;
21257
21258 if (!dhd) {
21259 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
21260 return;
21261 }
21262
21263 dhdp = &dhd->pub;
21264
21265 if (!(dhd->dhd_state & DHD_ATTACH_STATE_DONE)) {
21266 DHD_ERROR(("%s: init not completed, cannot initiate recovery\n",
21267 __FUNCTION__));
21268 return;
21269 }
21270
21271 ret = dhd_bus_perform_flr_with_quiesce(dhdp, dhdp->bus, FALSE);
21272 if (ret != BCME_DNGL_DEVRESET) {
21273 DHD_ERROR(("%s: dhd_bus_perform_flr_with_quiesce failed with ret: %d,"
21274 "toggle REG_ON\n", __FUNCTION__, ret));
21275 /* toggle REG_ON */
21276 dhdp->pom_toggle_reg_on(WLAN_FUNC_ID, BY_WLAN_DUE_TO_WLAN);
21277 return;
21278 }
21279 }
21280
21281 void
21282 dhd_schedule_reset(dhd_pub_t *dhdp)
21283 {
21284 if (dhdp->enable_erpom) {
21285 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq, NULL,
21286 DHD_WQ_WORK_ERROR_RECOVERY, dhd_error_recovery, DHD_WQ_WORK_PRIORITY_HIGH);
21287 }
21288 }
21289 #endif /* DHD_ERPOM */
21290
21291 #ifdef DHD_PKT_LOGGING
21292 void
21293 dhd_pktlog_dump(void *handle, void *event_info, u8 event)
21294 {
21295 dhd_info_t *dhd = handle;
21296
21297 if (!dhd) {
21298 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
21299 return;
21300 }
21301
21302 if (dhd_pktlog_dump_write_file(&dhd->pub)) {
21303 DHD_ERROR(("%s: writing pktlog dump file failed\n", __FUNCTION__));
21304 return;
21305 }
21306 }
21307
21308 void
21309 dhd_schedule_pktlog_dump(dhd_pub_t *dhdp)
21310 {
21311 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
21312 (void*)NULL, DHD_WQ_WORK_PKTLOG_DUMP,
21313 dhd_pktlog_dump, DHD_WQ_WORK_PRIORITY_HIGH);
21314 }
21315 #endif /* DHD_PKT_LOGGING */
21316
21317 #ifdef BIGDATA_SOFTAP
21318 void dhd_schedule_gather_ap_stadata(void *bcm_cfg, void *ndev, const wl_event_msg_t *e)
21319 {
21320 struct bcm_cfg80211 *cfg;
21321 dhd_pub_t *dhdp;
21322 ap_sta_wq_data_t *p_wq_data;
21323
21324 if (!bcm_cfg || !ndev || !e) {
21325 WL_ERR(("bcm_cfg=%p ndev=%p e=%p\n", bcm_cfg, ndev, e));
21326 return;
21327 }
21328
21329 cfg = (struct bcm_cfg80211 *)bcm_cfg;
21330 dhdp = (dhd_pub_t *)cfg->pub;
21331
21332 if (!dhdp || !cfg->ap_sta_info) {
21333 WL_ERR(("dhdp=%p ap_sta_info=%p\n", dhdp, cfg->ap_sta_info));
21334 return;
21335 }
21336
21337 p_wq_data = (ap_sta_wq_data_t *)MALLOCZ(dhdp->osh, sizeof(ap_sta_wq_data_t));
21338 if (unlikely(!p_wq_data)) {
21339 DHD_ERROR(("%s(): could not allocate memory for - "
21340 "ap_sta_wq_data_t\n", __FUNCTION__));
21341 return;
21342 }
21343
21344 mutex_lock(&cfg->ap_sta_info->wq_data_sync);
21345
21346 memcpy(&p_wq_data->e, e, sizeof(wl_event_msg_t));
21347 p_wq_data->dhdp = dhdp;
21348 p_wq_data->bcm_cfg = cfg;
21349 p_wq_data->ndev = (struct net_device *)ndev;
21350
21351 mutex_unlock(&cfg->ap_sta_info->wq_data_sync);
21352
21353 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
21354 p_wq_data, DHD_WQ_WORK_GET_BIGDATA_AP,
21355 wl_gather_ap_stadata, DHD_WQ_WORK_PRIORITY_HIGH);
21356
21357 }
21358 #endif /* BIGDATA_SOFTAP */
21359
21360 void
21361 get_debug_dump_time(char *str)
21362 {
21363 struct timespec64 curtime;
21364 unsigned long long local_time;
21365
21366 struct rtc_time tm;
21367
21368 if (!strlen(str)) {
21369 ktime_get_real_ts64(&curtime);
21370 local_time = (u64)(curtime.tv_sec -
21371 (sys_tz.tz_minuteswest * DHD_LOG_DUMP_TS_MULTIPLIER_VALUE));
21372 rtc_time_to_tm(local_time, &tm);
21373
21374 snprintf(str, DEBUG_DUMP_TIME_BUF_LEN, DHD_LOG_DUMP_TS_FMT_YYMMDDHHMMSSMSMS,
21375 tm.tm_year - 100, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min,
21376 tm.tm_sec, (int)(curtime.tv_nsec/NSEC_PER_MSEC));
21377 }
21378 }
21379
21380 void
21381 clear_debug_dump_time(char *str)
21382 {
21383 memset(str, 0, DEBUG_DUMP_TIME_BUF_LEN);
21384 }
21385 #if defined(WL_CFGVENDOR_SEND_HANG_EVENT) || defined(DHD_PKT_LOGGING)
21386 void
21387 copy_debug_dump_time(char *dest, char *src)
21388 {
21389 memcpy(dest, src, DEBUG_DUMP_TIME_BUF_LEN);
21390 }
21391 #endif /* WL_CFGVENDOR_SEND_HANG_EVENT || DHD_PKT_LOGGING */
21392
21393 void
21394 dhd_print_tasklet_status(dhd_pub_t *dhd)
21395 {
21396 dhd_info_t *dhdinfo;
21397
21398 if (!dhd) {
21399 DHD_ERROR(("%s : DHD is null\n", __FUNCTION__));
21400 return;
21401 }
21402
21403 dhdinfo = dhd->info;
21404
21405 if (!dhdinfo) {
21406 DHD_ERROR(("%s : DHD INFO is null \n", __FUNCTION__));
21407 return;
21408 }
21409
21410 DHD_ERROR(("DHD Tasklet status : 0x%lx\n", dhdinfo->tasklet.state));
21411 }
21412
21413 /*
21414 * DHD RING
21415 */
21416 #define DHD_RING_ERR_INTERNAL(fmt, ...) DHD_ERROR(("EWPF-" fmt, ##__VA_ARGS__))
21417 #define DHD_RING_TRACE_INTERNAL(fmt, ...) DHD_INFO(("EWPF-" fmt, ##__VA_ARGS__))
21418
21419 #define DHD_RING_ERR(x) DHD_RING_ERR_INTERNAL x
21420 #define DHD_RING_TRACE(x) DHD_RING_TRACE_INTERNAL x
21421
21422 #define DHD_RING_MAGIC 0x20170910
21423 #define DHD_RING_IDX_INVALID 0xffffffff
21424
21425 #define DHD_RING_SYNC_LOCK_INIT(osh) dhd_os_spin_lock_init(osh)
21426 #define DHD_RING_SYNC_LOCK_DEINIT(osh, lock) dhd_os_spin_lock_deinit(osh, lock)
21427 #define DHD_RING_SYNC_LOCK(lock, flags) (flags) = dhd_os_spin_lock(lock)
21428 #define DHD_RING_SYNC_UNLOCK(lock, flags) dhd_os_spin_unlock(lock, flags)
21429
21430 typedef struct {
21431 uint32 elem_size;
21432 uint32 elem_cnt;
21433 uint32 write_idx; /* next write index, -1 : not started */
21434 uint32 read_idx; /* next read index, -1 : not start */
21435
21436 /* protected elements during serialization */
21437 int lock_idx; /* start index of locked, element will not be overried */
21438 int lock_count; /* number of locked, from lock idx */
21439
21440 /* saved data elements */
21441 void *elem;
21442 } dhd_fixed_ring_info_t;
21443
21444 typedef struct {
21445 uint32 elem_size;
21446 uint32 elem_cnt;
21447 uint32 idx; /* -1 : not started */
21448 uint32 rsvd; /* reserved for future use */
21449
21450 /* protected elements during serialization */
21451 atomic_t ring_locked;
21452 /* check the overwriting */
21453 uint32 ring_overwrited;
21454
21455 /* saved data elements */
21456 void *elem;
21457 } dhd_singleidx_ring_info_t;
21458
21459 typedef struct {
21460 uint32 magic;
21461 uint32 type;
21462 void *ring_sync; /* spinlock for sync */
21463 union {
21464 dhd_fixed_ring_info_t fixed;
21465 dhd_singleidx_ring_info_t single;
21466 };
21467 } dhd_ring_info_t;
21468
21469 uint32
21470 dhd_ring_get_hdr_size(void)
21471 {
21472 return sizeof(dhd_ring_info_t);
21473 }
21474
21475 void *
21476 dhd_ring_init(dhd_pub_t *dhdp, uint8 *buf, uint32 buf_size, uint32 elem_size,
21477 uint32 elem_cnt, uint32 type)
21478 {
21479 dhd_ring_info_t *ret_ring;
21480
21481 if (!buf) {
21482 DHD_RING_ERR(("NO RING BUFFER\n"));
21483 return NULL;
21484 }
21485
21486 if (buf_size < dhd_ring_get_hdr_size() + elem_size * elem_cnt) {
21487 DHD_RING_ERR(("RING SIZE IS TOO SMALL\n"));
21488 return NULL;
21489 }
21490
21491 if (type != DHD_RING_TYPE_FIXED && type != DHD_RING_TYPE_SINGLE_IDX) {
21492 DHD_RING_ERR(("UNSUPPORTED RING TYPE\n"));
21493 return NULL;
21494 }
21495
21496 ret_ring = (dhd_ring_info_t *)buf;
21497 ret_ring->type = type;
21498 ret_ring->ring_sync = DHD_RING_SYNC_LOCK_INIT(dhdp->osh);
21499 ret_ring->magic = DHD_RING_MAGIC;
21500
21501 if (type == DHD_RING_TYPE_FIXED) {
21502 ret_ring->fixed.read_idx = DHD_RING_IDX_INVALID;
21503 ret_ring->fixed.write_idx = DHD_RING_IDX_INVALID;
21504 ret_ring->fixed.lock_idx = DHD_RING_IDX_INVALID;
21505 ret_ring->fixed.elem = buf + sizeof(dhd_ring_info_t);
21506 ret_ring->fixed.elem_size = elem_size;
21507 ret_ring->fixed.elem_cnt = elem_cnt;
21508 } else {
21509 ret_ring->single.idx = DHD_RING_IDX_INVALID;
21510 atomic_set(&ret_ring->single.ring_locked, 0);
21511 ret_ring->single.ring_overwrited = 0;
21512 ret_ring->single.rsvd = 0;
21513 ret_ring->single.elem = buf + sizeof(dhd_ring_info_t);
21514 ret_ring->single.elem_size = elem_size;
21515 ret_ring->single.elem_cnt = elem_cnt;
21516 }
21517
21518 return ret_ring;
21519 }
21520
21521 void
21522 dhd_ring_deinit(dhd_pub_t *dhdp, void *_ring)
21523 {
21524 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21525 if (!ring) {
21526 return;
21527 }
21528
21529 if (ring->magic != DHD_RING_MAGIC) {
21530 return;
21531 }
21532
21533 if (ring->type != DHD_RING_TYPE_FIXED &&
21534 ring->type != DHD_RING_TYPE_SINGLE_IDX) {
21535 return;
21536 }
21537
21538 DHD_RING_SYNC_LOCK_DEINIT(dhdp->osh, ring->ring_sync);
21539 ring->ring_sync = NULL;
21540 if (ring->type == DHD_RING_TYPE_FIXED) {
21541 dhd_fixed_ring_info_t *fixed = &ring->fixed;
21542 memset(fixed->elem, 0, fixed->elem_size * fixed->elem_cnt);
21543 fixed->elem_size = fixed->elem_cnt = 0;
21544 } else {
21545 dhd_singleidx_ring_info_t *single = &ring->single;
21546 memset(single->elem, 0, single->elem_size * single->elem_cnt);
21547 single->elem_size = single->elem_cnt = 0;
21548 }
21549 ring->type = 0;
21550 ring->magic = 0;
21551 }
21552
21553 static inline uint32
21554 __dhd_ring_ptr2idx(void *ring, void *ptr, char *sig, uint32 type)
21555 {
21556 uint32 diff;
21557 uint32 ret_idx = (uint32)DHD_RING_IDX_INVALID;
21558 uint32 elem_size, elem_cnt;
21559 void *elem;
21560
21561 if (type == DHD_RING_TYPE_FIXED) {
21562 dhd_fixed_ring_info_t *fixed = (dhd_fixed_ring_info_t *)ring;
21563 elem_size = fixed->elem_size;
21564 elem_cnt = fixed->elem_cnt;
21565 elem = fixed->elem;
21566 } else if (type == DHD_RING_TYPE_SINGLE_IDX) {
21567 dhd_singleidx_ring_info_t *single = (dhd_singleidx_ring_info_t *)ring;
21568 elem_size = single->elem_size;
21569 elem_cnt = single->elem_cnt;
21570 elem = single->elem;
21571 } else {
21572 DHD_RING_ERR(("UNSUPPORTED RING TYPE %d\n", type));
21573 return ret_idx;
21574 }
21575
21576 if (ptr < elem) {
21577 DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, elem));
21578 return ret_idx;
21579 }
21580 diff = (uint32)((uint8 *)ptr - (uint8 *)elem);
21581 if (diff % elem_size != 0) {
21582 DHD_RING_ERR(("INVALID POINTER %s:%p, ring->elem:%p\n", sig, ptr, elem));
21583 return ret_idx;
21584 }
21585 ret_idx = diff / elem_size;
21586 if (ret_idx >= elem_cnt) {
21587 DHD_RING_ERR(("INVALID POINTER max:%d cur:%d\n", elem_cnt, ret_idx));
21588 }
21589 return ret_idx;
21590 }
21591
21592 /* Sub functions for fixed ring */
21593 /* get counts between two indexes of ring buffer (internal only) */
21594 static inline int
21595 __dhd_fixed_ring_get_count(dhd_fixed_ring_info_t *ring, int start, int end)
21596 {
21597 if (start == DHD_RING_IDX_INVALID || end == DHD_RING_IDX_INVALID) {
21598 return 0;
21599 }
21600
21601 return (ring->elem_cnt + end - start) % ring->elem_cnt + 1;
21602 }
21603
21604 static inline int
21605 __dhd_fixed_ring_get_cur_size(dhd_fixed_ring_info_t *ring)
21606 {
21607 return __dhd_fixed_ring_get_count(ring, ring->read_idx, ring->write_idx);
21608 }
21609
21610 static inline void *
21611 __dhd_fixed_ring_get_first(dhd_fixed_ring_info_t *ring)
21612 {
21613 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21614 return NULL;
21615 }
21616 return (uint8 *)ring->elem + (ring->elem_size * ring->read_idx);
21617 }
21618
21619 static inline void
21620 __dhd_fixed_ring_free_first(dhd_fixed_ring_info_t *ring)
21621 {
21622 uint32 next_idx;
21623
21624 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21625 DHD_RING_ERR(("EMPTY RING\n"));
21626 return;
21627 }
21628
21629 next_idx = (ring->read_idx + 1) % ring->elem_cnt;
21630 if (ring->read_idx == ring->write_idx) {
21631 /* Become empty */
21632 ring->read_idx = ring->write_idx = DHD_RING_IDX_INVALID;
21633 return;
21634 }
21635
21636 ring->read_idx = next_idx;
21637 return;
21638 }
21639
21640 static inline void *
21641 __dhd_fixed_ring_get_last(dhd_fixed_ring_info_t *ring)
21642 {
21643 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21644 return NULL;
21645 }
21646 return (uint8 *)ring->elem + (ring->elem_size * ring->write_idx);
21647 }
21648
21649 static inline void *
21650 __dhd_fixed_ring_get_empty(dhd_fixed_ring_info_t *ring)
21651 {
21652 uint32 tmp_idx;
21653
21654 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21655 ring->read_idx = ring->write_idx = 0;
21656 return (uint8 *)ring->elem;
21657 }
21658
21659 /* check next index is not locked */
21660 tmp_idx = (ring->write_idx + 1) % ring->elem_cnt;
21661 if (ring->lock_idx == tmp_idx) {
21662 return NULL;
21663 }
21664
21665 ring->write_idx = tmp_idx;
21666 if (ring->write_idx == ring->read_idx) {
21667 /* record is full, drop oldest one */
21668 ring->read_idx = (ring->read_idx + 1) % ring->elem_cnt;
21669
21670 }
21671 return (uint8 *)ring->elem + (ring->elem_size * ring->write_idx);
21672 }
21673
21674 static inline void *
21675 __dhd_fixed_ring_get_next(dhd_fixed_ring_info_t *ring, void *prev, uint32 type)
21676 {
21677 uint32 cur_idx;
21678
21679 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21680 DHD_RING_ERR(("EMPTY RING\n"));
21681 return NULL;
21682 }
21683
21684 cur_idx = __dhd_ring_ptr2idx(ring, prev, "NEXT", type);
21685 if (cur_idx >= ring->elem_cnt) {
21686 return NULL;
21687 }
21688
21689 if (cur_idx == ring->write_idx) {
21690 /* no more new record */
21691 return NULL;
21692 }
21693
21694 cur_idx = (cur_idx + 1) % ring->elem_cnt;
21695 return (uint8 *)ring->elem + ring->elem_size * cur_idx;
21696 }
21697
21698 static inline void *
21699 __dhd_fixed_ring_get_prev(dhd_fixed_ring_info_t *ring, void *prev, uint32 type)
21700 {
21701 uint32 cur_idx;
21702
21703 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21704 DHD_RING_ERR(("EMPTY RING\n"));
21705 return NULL;
21706 }
21707 cur_idx = __dhd_ring_ptr2idx(ring, prev, "PREV", type);
21708 if (cur_idx >= ring->elem_cnt) {
21709 return NULL;
21710 }
21711 if (cur_idx == ring->read_idx) {
21712 /* no more new record */
21713 return NULL;
21714 }
21715
21716 cur_idx = (cur_idx + ring->elem_cnt - 1) % ring->elem_cnt;
21717 return (uint8 *)ring->elem + ring->elem_size * cur_idx;
21718 }
21719
21720 static inline void
21721 __dhd_fixed_ring_lock(dhd_fixed_ring_info_t *ring, void *first_ptr, void *last_ptr, uint32 type)
21722 {
21723 uint32 first_idx;
21724 uint32 last_idx;
21725 uint32 ring_filled_cnt;
21726 uint32 tmp_cnt;
21727
21728 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21729 DHD_RING_ERR(("EMPTY RING\n"));
21730 return;
21731 }
21732
21733 if (first_ptr) {
21734 first_idx = __dhd_ring_ptr2idx(ring, first_ptr, "LCK FIRST", type);
21735 if (first_idx >= ring->elem_cnt) {
21736 return;
21737 }
21738 } else {
21739 first_idx = ring->read_idx;
21740 }
21741
21742 if (last_ptr) {
21743 last_idx = __dhd_ring_ptr2idx(ring, last_ptr, "LCK LAST", type);
21744 if (last_idx >= ring->elem_cnt) {
21745 return;
21746 }
21747 } else {
21748 last_idx = ring->write_idx;
21749 }
21750
21751 ring_filled_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, ring->write_idx);
21752 tmp_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, first_idx);
21753 if (tmp_cnt > ring_filled_cnt) {
21754 DHD_RING_ERR(("LOCK FIRST IS TO EMPTY ELEM: write: %d read: %d cur:%d\n",
21755 ring->write_idx, ring->read_idx, first_idx));
21756 return;
21757 }
21758
21759 tmp_cnt = __dhd_fixed_ring_get_count(ring, ring->read_idx, last_idx);
21760 if (tmp_cnt > ring_filled_cnt) {
21761 DHD_RING_ERR(("LOCK LAST IS TO EMPTY ELEM: write: %d read: %d cur:%d\n",
21762 ring->write_idx, ring->read_idx, last_idx));
21763 return;
21764 }
21765
21766 ring->lock_idx = first_idx;
21767 ring->lock_count = __dhd_fixed_ring_get_count(ring, first_idx, last_idx);
21768 return;
21769 }
21770
21771 static inline void
21772 __dhd_fixed_ring_lock_free(dhd_fixed_ring_info_t *ring)
21773 {
21774 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21775 DHD_RING_ERR(("EMPTY RING\n"));
21776 return;
21777 }
21778
21779 ring->lock_idx = DHD_RING_IDX_INVALID;
21780 ring->lock_count = 0;
21781 return;
21782 }
21783 static inline void *
21784 __dhd_fixed_ring_lock_get_first(dhd_fixed_ring_info_t *ring)
21785 {
21786 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21787 DHD_RING_ERR(("EMPTY RING\n"));
21788 return NULL;
21789 }
21790 if (ring->lock_idx == DHD_RING_IDX_INVALID) {
21791 DHD_RING_ERR(("NO LOCK POINT\n"));
21792 return NULL;
21793 }
21794 return (uint8 *)ring->elem + ring->elem_size * ring->lock_idx;
21795 }
21796
21797 static inline void *
21798 __dhd_fixed_ring_lock_get_last(dhd_fixed_ring_info_t *ring)
21799 {
21800 int lock_last_idx;
21801 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21802 DHD_RING_ERR(("EMPTY RING\n"));
21803 return NULL;
21804 }
21805 if (ring->lock_idx == DHD_RING_IDX_INVALID) {
21806 DHD_RING_ERR(("NO LOCK POINT\n"));
21807 return NULL;
21808 }
21809
21810 lock_last_idx = (ring->lock_idx + ring->lock_count - 1) % ring->elem_cnt;
21811 return (uint8 *)ring->elem + ring->elem_size * lock_last_idx;
21812 }
21813
21814 static inline int
21815 __dhd_fixed_ring_lock_get_count(dhd_fixed_ring_info_t *ring)
21816 {
21817 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21818 DHD_RING_ERR(("EMPTY RING\n"));
21819 return BCME_ERROR;
21820 }
21821 if (ring->lock_idx == DHD_RING_IDX_INVALID) {
21822 DHD_RING_ERR(("NO LOCK POINT\n"));
21823 return BCME_ERROR;
21824 }
21825 return ring->lock_count;
21826 }
21827
21828 static inline void
21829 __dhd_fixed_ring_lock_free_first(dhd_fixed_ring_info_t *ring)
21830 {
21831 if (ring->read_idx == DHD_RING_IDX_INVALID) {
21832 DHD_RING_ERR(("EMPTY RING\n"));
21833 return;
21834 }
21835 if (ring->lock_idx == DHD_RING_IDX_INVALID) {
21836 DHD_RING_ERR(("NO LOCK POINT\n"));
21837 return;
21838 }
21839
21840 ring->lock_count--;
21841 if (ring->lock_count <= 0) {
21842 ring->lock_idx = DHD_RING_IDX_INVALID;
21843 } else {
21844 ring->lock_idx = (ring->lock_idx + 1) % ring->elem_cnt;
21845 }
21846 return;
21847 }
21848
21849 static inline void
21850 __dhd_fixed_ring_set_read_idx(dhd_fixed_ring_info_t *ring, uint32 idx)
21851 {
21852 ring->read_idx = idx;
21853 }
21854
21855 static inline void
21856 __dhd_fixed_ring_set_write_idx(dhd_fixed_ring_info_t *ring, uint32 idx)
21857 {
21858 ring->write_idx = idx;
21859 }
21860
21861 static inline uint32
21862 __dhd_fixed_ring_get_read_idx(dhd_fixed_ring_info_t *ring)
21863 {
21864 return ring->read_idx;
21865 }
21866
21867 static inline uint32
21868 __dhd_fixed_ring_get_write_idx(dhd_fixed_ring_info_t *ring)
21869 {
21870 return ring->write_idx;
21871 }
21872
21873 /* Sub functions for single index ring */
21874 static inline void *
21875 __dhd_singleidx_ring_get_first(dhd_singleidx_ring_info_t *ring)
21876 {
21877 uint32 tmp_idx = 0;
21878
21879 if (ring->idx == DHD_RING_IDX_INVALID) {
21880 return NULL;
21881 }
21882
21883 if (ring->ring_overwrited) {
21884 tmp_idx = (ring->idx + 1) % ring->elem_cnt;
21885 }
21886
21887 return (uint8 *)ring->elem + (ring->elem_size * tmp_idx);
21888 }
21889
21890 static inline void *
21891 __dhd_singleidx_ring_get_last(dhd_singleidx_ring_info_t *ring)
21892 {
21893 if (ring->idx == DHD_RING_IDX_INVALID) {
21894 return NULL;
21895 }
21896
21897 return (uint8 *)ring->elem + (ring->elem_size * ring->idx);
21898 }
21899
21900 static inline void *
21901 __dhd_singleidx_ring_get_empty(dhd_singleidx_ring_info_t *ring)
21902 {
21903 if (ring->idx == DHD_RING_IDX_INVALID) {
21904 ring->idx = 0;
21905 return (uint8 *)ring->elem;
21906 }
21907
21908 /* check the lock is held */
21909 if (atomic_read(&ring->ring_locked)) {
21910 return NULL;
21911 }
21912
21913 /* check the index rollover */
21914 if (!ring->ring_overwrited && ring->idx == (ring->elem_cnt - 1)) {
21915 ring->ring_overwrited = 1;
21916 }
21917
21918 ring->idx = (ring->idx + 1) % ring->elem_cnt;
21919
21920 return (uint8 *)ring->elem + (ring->elem_size * ring->idx);
21921 }
21922
21923 static inline void *
21924 __dhd_singleidx_ring_get_next(dhd_singleidx_ring_info_t *ring, void *prev, uint32 type)
21925 {
21926 uint32 cur_idx;
21927
21928 if (ring->idx == DHD_RING_IDX_INVALID) {
21929 DHD_RING_ERR(("EMPTY RING\n"));
21930 return NULL;
21931 }
21932
21933 cur_idx = __dhd_ring_ptr2idx(ring, prev, "NEXT", type);
21934 if (cur_idx >= ring->elem_cnt) {
21935 return NULL;
21936 }
21937
21938 if (cur_idx == ring->idx) {
21939 /* no more new record */
21940 return NULL;
21941 }
21942
21943 cur_idx = (cur_idx + 1) % ring->elem_cnt;
21944
21945 return (uint8 *)ring->elem + ring->elem_size * cur_idx;
21946 }
21947
21948 static inline void *
21949 __dhd_singleidx_ring_get_prev(dhd_singleidx_ring_info_t *ring, void *prev, uint32 type)
21950 {
21951 uint32 cur_idx;
21952
21953 if (ring->idx == DHD_RING_IDX_INVALID) {
21954 DHD_RING_ERR(("EMPTY RING\n"));
21955 return NULL;
21956 }
21957 cur_idx = __dhd_ring_ptr2idx(ring, prev, "PREV", type);
21958 if (cur_idx >= ring->elem_cnt) {
21959 return NULL;
21960 }
21961
21962 if (!ring->ring_overwrited && cur_idx == 0) {
21963 /* no more new record */
21964 return NULL;
21965 }
21966
21967 cur_idx = (cur_idx + ring->elem_cnt - 1) % ring->elem_cnt;
21968 if (ring->ring_overwrited && cur_idx == ring->idx) {
21969 /* no more new record */
21970 return NULL;
21971 }
21972
21973 return (uint8 *)ring->elem + ring->elem_size * cur_idx;
21974 }
21975
21976 static inline void
21977 __dhd_singleidx_ring_whole_lock(dhd_singleidx_ring_info_t *ring)
21978 {
21979 if (!atomic_read(&ring->ring_locked)) {
21980 atomic_set(&ring->ring_locked, 1);
21981 }
21982 }
21983
21984 static inline void
21985 __dhd_singleidx_ring_whole_unlock(dhd_singleidx_ring_info_t *ring)
21986 {
21987 if (atomic_read(&ring->ring_locked)) {
21988 atomic_set(&ring->ring_locked, 0);
21989 }
21990 }
21991
21992 /* Get first element : oldest element */
21993 void *
21994 dhd_ring_get_first(void *_ring)
21995 {
21996 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
21997 void *ret = NULL;
21998 unsigned long flags;
21999
22000 if (!ring || ring->magic != DHD_RING_MAGIC) {
22001 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
22002 return NULL;
22003 }
22004
22005 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22006 if (ring->type == DHD_RING_TYPE_FIXED) {
22007 ret = __dhd_fixed_ring_get_first(&ring->fixed);
22008 }
22009 if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
22010 ret = __dhd_singleidx_ring_get_first(&ring->single);
22011 }
22012 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22013 return ret;
22014 }
22015
22016 /* Free first element : oldest element */
22017 void
22018 dhd_ring_free_first(void *_ring)
22019 {
22020 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
22021 unsigned long flags;
22022
22023 if (!ring || ring->magic != DHD_RING_MAGIC) {
22024 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
22025 return;
22026 }
22027
22028 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22029 if (ring->type == DHD_RING_TYPE_FIXED) {
22030 __dhd_fixed_ring_free_first(&ring->fixed);
22031 }
22032 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22033 }
22034
22035 void
22036 dhd_ring_set_read_idx(void *_ring, uint32 read_idx)
22037 {
22038 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
22039 unsigned long flags;
22040
22041 if (!ring || ring->magic != DHD_RING_MAGIC) {
22042 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
22043 return;
22044 }
22045
22046 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22047 if (ring->type == DHD_RING_TYPE_FIXED) {
22048 __dhd_fixed_ring_set_read_idx(&ring->fixed, read_idx);
22049 }
22050 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22051 }
22052
22053 void
22054 dhd_ring_set_write_idx(void *_ring, uint32 write_idx)
22055 {
22056 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
22057 unsigned long flags;
22058
22059 if (!ring || ring->magic != DHD_RING_MAGIC) {
22060 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
22061 return;
22062 }
22063
22064 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22065 if (ring->type == DHD_RING_TYPE_FIXED) {
22066 __dhd_fixed_ring_set_write_idx(&ring->fixed, write_idx);
22067 }
22068 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22069 }
22070
22071 uint32
22072 dhd_ring_get_read_idx(void *_ring)
22073 {
22074 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
22075 uint32 read_idx = DHD_RING_IDX_INVALID;
22076 unsigned long flags;
22077
22078 if (!ring || ring->magic != DHD_RING_MAGIC) {
22079 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
22080 return read_idx;
22081 }
22082
22083 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22084 if (ring->type == DHD_RING_TYPE_FIXED) {
22085 read_idx = __dhd_fixed_ring_get_read_idx(&ring->fixed);
22086 }
22087 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22088
22089 return read_idx;
22090 }
22091
22092 uint32
22093 dhd_ring_get_write_idx(void *_ring)
22094 {
22095 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
22096 uint32 write_idx = DHD_RING_IDX_INVALID;
22097 unsigned long flags;
22098
22099 if (!ring || ring->magic != DHD_RING_MAGIC) {
22100 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
22101 return write_idx;
22102 }
22103
22104 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22105 if (ring->type == DHD_RING_TYPE_FIXED) {
22106 write_idx = __dhd_fixed_ring_get_write_idx(&ring->fixed);
22107 }
22108 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22109
22110 return write_idx;
22111 }
22112
22113 /* Get latest element */
22114 void *
22115 dhd_ring_get_last(void *_ring)
22116 {
22117 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
22118 void *ret = NULL;
22119 unsigned long flags;
22120
22121 if (!ring || ring->magic != DHD_RING_MAGIC) {
22122 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
22123 return NULL;
22124 }
22125
22126 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22127 if (ring->type == DHD_RING_TYPE_FIXED) {
22128 ret = __dhd_fixed_ring_get_last(&ring->fixed);
22129 }
22130 if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
22131 ret = __dhd_singleidx_ring_get_last(&ring->single);
22132 }
22133 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22134 return ret;
22135 }
22136
22137 /* Get next point can be written
22138 * will overwrite which doesn't read
22139 * will return NULL if next pointer is locked
22140 */
22141 void *
22142 dhd_ring_get_empty(void *_ring)
22143 {
22144 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
22145 void *ret = NULL;
22146 unsigned long flags;
22147
22148 if (!ring || ring->magic != DHD_RING_MAGIC) {
22149 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
22150 return NULL;
22151 }
22152
22153 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22154 if (ring->type == DHD_RING_TYPE_FIXED) {
22155 ret = __dhd_fixed_ring_get_empty(&ring->fixed);
22156 }
22157 if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
22158 ret = __dhd_singleidx_ring_get_empty(&ring->single);
22159 }
22160 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22161 return ret;
22162 }
22163
22164 void *
22165 dhd_ring_get_next(void *_ring, void *cur)
22166 {
22167 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
22168 void *ret = NULL;
22169 unsigned long flags;
22170
22171 if (!ring || ring->magic != DHD_RING_MAGIC) {
22172 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
22173 return NULL;
22174 }
22175
22176 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22177 if (ring->type == DHD_RING_TYPE_FIXED) {
22178 ret = __dhd_fixed_ring_get_next(&ring->fixed, cur, ring->type);
22179 }
22180 if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
22181 ret = __dhd_singleidx_ring_get_next(&ring->single, cur, ring->type);
22182 }
22183 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22184 return ret;
22185 }
22186
22187 void *
22188 dhd_ring_get_prev(void *_ring, void *cur)
22189 {
22190 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
22191 void *ret = NULL;
22192 unsigned long flags;
22193
22194 if (!ring || ring->magic != DHD_RING_MAGIC) {
22195 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
22196 return NULL;
22197 }
22198
22199 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22200 if (ring->type == DHD_RING_TYPE_FIXED) {
22201 ret = __dhd_fixed_ring_get_prev(&ring->fixed, cur, ring->type);
22202 }
22203 if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
22204 ret = __dhd_singleidx_ring_get_prev(&ring->single, cur, ring->type);
22205 }
22206 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22207 return ret;
22208 }
22209
22210 int
22211 dhd_ring_get_cur_size(void *_ring)
22212 {
22213 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
22214 int cnt = 0;
22215 unsigned long flags;
22216
22217 if (!ring || ring->magic != DHD_RING_MAGIC) {
22218 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
22219 return cnt;
22220 }
22221
22222 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22223 if (ring->type == DHD_RING_TYPE_FIXED) {
22224 cnt = __dhd_fixed_ring_get_cur_size(&ring->fixed);
22225 }
22226 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22227 return cnt;
22228 }
22229
22230 /* protect element between lock_ptr and write_idx */
22231 void
22232 dhd_ring_lock(void *_ring, void *first_ptr, void *last_ptr)
22233 {
22234 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
22235 unsigned long flags;
22236
22237 if (!ring || ring->magic != DHD_RING_MAGIC) {
22238 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
22239 return;
22240 }
22241
22242 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22243 if (ring->type == DHD_RING_TYPE_FIXED) {
22244 __dhd_fixed_ring_lock(&ring->fixed, first_ptr, last_ptr, ring->type);
22245 }
22246 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22247 }
22248
22249 /* free all lock */
22250 void
22251 dhd_ring_lock_free(void *_ring)
22252 {
22253 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
22254 unsigned long flags;
22255
22256 if (!ring || ring->magic != DHD_RING_MAGIC) {
22257 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
22258 return;
22259 }
22260
22261 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22262 if (ring->type == DHD_RING_TYPE_FIXED) {
22263 __dhd_fixed_ring_lock_free(&ring->fixed);
22264 }
22265 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22266 }
22267
22268 void *
22269 dhd_ring_lock_get_first(void *_ring)
22270 {
22271 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
22272 void *ret = NULL;
22273 unsigned long flags;
22274
22275 if (!ring || ring->magic != DHD_RING_MAGIC) {
22276 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
22277 return NULL;
22278 }
22279
22280 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22281 if (ring->type == DHD_RING_TYPE_FIXED) {
22282 ret = __dhd_fixed_ring_lock_get_first(&ring->fixed);
22283 }
22284 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22285 return ret;
22286 }
22287
22288 void *
22289 dhd_ring_lock_get_last(void *_ring)
22290 {
22291 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
22292 void *ret = NULL;
22293 unsigned long flags;
22294
22295 if (!ring || ring->magic != DHD_RING_MAGIC) {
22296 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
22297 return NULL;
22298 }
22299
22300 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22301 if (ring->type == DHD_RING_TYPE_FIXED) {
22302 ret = __dhd_fixed_ring_lock_get_last(&ring->fixed);
22303 }
22304 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22305 return ret;
22306 }
22307
22308 int
22309 dhd_ring_lock_get_count(void *_ring)
22310 {
22311 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
22312 int ret = BCME_ERROR;
22313 unsigned long flags;
22314
22315 if (!ring || ring->magic != DHD_RING_MAGIC) {
22316 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
22317 return ret;
22318 }
22319
22320 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22321 if (ring->type == DHD_RING_TYPE_FIXED) {
22322 ret = __dhd_fixed_ring_lock_get_count(&ring->fixed);
22323 }
22324 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22325 return ret;
22326 }
22327
22328 /* free first locked element */
22329 void
22330 dhd_ring_lock_free_first(void *_ring)
22331 {
22332 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
22333 unsigned long flags;
22334
22335 if (!ring || ring->magic != DHD_RING_MAGIC) {
22336 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
22337 return;
22338 }
22339
22340 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22341 if (ring->type == DHD_RING_TYPE_FIXED) {
22342 __dhd_fixed_ring_lock_free_first(&ring->fixed);
22343 }
22344 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22345 }
22346
22347 void
22348 dhd_ring_whole_lock(void *_ring)
22349 {
22350 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
22351 unsigned long flags;
22352
22353 if (!ring || ring->magic != DHD_RING_MAGIC) {
22354 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
22355 return;
22356 }
22357
22358 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22359 if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
22360 __dhd_singleidx_ring_whole_lock(&ring->single);
22361 }
22362 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22363 }
22364
22365 void
22366 dhd_ring_whole_unlock(void *_ring)
22367 {
22368 dhd_ring_info_t *ring = (dhd_ring_info_t *)_ring;
22369 unsigned long flags;
22370
22371 if (!ring || ring->magic != DHD_RING_MAGIC) {
22372 DHD_RING_ERR(("%s :INVALID RING INFO\n", __FUNCTION__));
22373 return;
22374 }
22375
22376 DHD_RING_SYNC_LOCK(ring->ring_sync, flags);
22377 if (ring->type == DHD_RING_TYPE_SINGLE_IDX) {
22378 __dhd_singleidx_ring_whole_unlock(&ring->single);
22379 }
22380 DHD_RING_SYNC_UNLOCK(ring->ring_sync, flags);
22381 }
22382
22383 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0))
22384 #define DHD_VFS_INODE(dir) (dir->d_inode)
22385 #else
22386 #define DHD_VFS_INODE(dir) d_inode(dir)
22387 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 19, 0) */
22388
22389 #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0))
22390 #define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b)
22391 #else
22392 #define DHD_VFS_UNLINK(dir, b, c) vfs_unlink(DHD_VFS_INODE(dir), b, c)
22393 #endif /* LINUX_VERSION_CODE < KERNEL_VERSION(3, 13, 0) */
22394 int
22395 dhd_file_delete(char *path)
22396 {
22397 struct path file_path;
22398 int err;
22399 struct dentry *dir;
22400
22401 err = kern_path(path, 0, &file_path);
22402
22403 if (err < 0) {
22404 DHD_ERROR(("Failed to get kern-path delete file: %s error: %d\n", path, err));
22405 return err;
22406 }
22407 if (
22408 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0))
22409 !d_is_file(file_path.dentry) ||
22410 #if (LINUX_VERSION_CODE > KERNEL_VERSION(4, 1, 0))
22411 d_really_is_negative(file_path.dentry) ||
22412 #endif /* LINUX_VERSION_CODE > KERNEL_VERSION(4, 1, 0) */
22413 #endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(3, 13, 0) */
22414 FALSE)
22415 {
22416 err = -EINVAL;
22417 } else {
22418 dir = dget_parent(file_path.dentry);
22419
22420 if (!IS_ERR(dir)) {
22421 err = DHD_VFS_UNLINK(dir, file_path.dentry, NULL);
22422 dput(dir);
22423 } else {
22424 err = PTR_ERR(dir);
22425 }
22426 }
22427
22428 path_put(&file_path);
22429
22430 if (err < 0) {
22431 DHD_ERROR(("Failed to delete file: %s error: %d\n", path, err));
22432 }
22433
22434 return err;
22435 }
22436 #ifdef DHD_DUMP_MNGR
22437 static int
22438 dhd_dump_file_manage_idx(dhd_dump_file_manage_t *fm_ptr, char *fname)
22439 {
22440 int i;
22441 int fm_idx = -1;
22442
22443 for (i = 0; i < DHD_DUMP_TYPE_COUNT_MAX; i++) {
22444 if (strlen(fm_ptr->elems[i].type_name) == 0) {
22445 fm_idx = i;
22446 break;
22447 }
22448 if (!(strncmp(fname, fm_ptr->elems[i].type_name, strlen(fname)))) {
22449 fm_idx = i;
22450 break;
22451 }
22452 }
22453
22454 if (fm_idx == -1) {
22455 return fm_idx;
22456 }
22457
22458 if (strlen(fm_ptr->elems[fm_idx].type_name) == 0) {
22459 strncpy(fm_ptr->elems[fm_idx].type_name, fname, DHD_DUMP_TYPE_NAME_SIZE);
22460 fm_ptr->elems[fm_idx].type_name[DHD_DUMP_TYPE_NAME_SIZE - 1] = '\0';
22461 fm_ptr->elems[fm_idx].file_idx = 0;
22462 }
22463
22464 return fm_idx;
22465 }
22466
22467 /*
22468 * dhd_dump_file_manage_enqueue - enqueue dump file path
22469 * and delete odest file if file count is max.
22470 */
22471 void
22472 dhd_dump_file_manage_enqueue(dhd_pub_t *dhd, char *dump_path, char *fname)
22473 {
22474 int fm_idx;
22475 int fp_idx;
22476 dhd_dump_file_manage_t *fm_ptr;
22477 DFM_elem_t *elem;
22478
22479 if (!dhd || !dhd->dump_file_manage) {
22480 DHD_ERROR(("%s(): dhdp=%p dump_file_manage=%p\n",
22481 __FUNCTION__, dhd, (dhd ? dhd->dump_file_manage : NULL)));
22482 return;
22483 }
22484
22485 fm_ptr = dhd->dump_file_manage;
22486
22487 /* find file_manage idx */
22488 DHD_INFO(("%s(): fname: %s dump_path: %s\n", __FUNCTION__, fname, dump_path));
22489 if ((fm_idx = dhd_dump_file_manage_idx(fm_ptr, fname)) < 0) {
22490 DHD_ERROR(("%s(): Out of file manager entries, fname: %s\n",
22491 __FUNCTION__, fname));
22492 return;
22493 }
22494
22495 elem = &fm_ptr->elems[fm_idx];
22496 fp_idx = elem->file_idx;
22497 DHD_INFO(("%s(): fm_idx: %d fp_idx: %d path: %s\n",
22498 __FUNCTION__, fm_idx, fp_idx, elem->file_path[fp_idx]));
22499
22500 /* delete oldest file */
22501 if (strlen(elem->file_path[fp_idx]) != 0) {
22502 if (dhd_file_delete(elem->file_path[fp_idx]) < 0) {
22503 DHD_ERROR(("%s(): Failed to delete file: %s\n",
22504 __FUNCTION__, elem->file_path[fp_idx]));
22505 } else {
22506 DHD_ERROR(("%s(): Successed to delete file: %s\n",
22507 __FUNCTION__, elem->file_path[fp_idx]));
22508 }
22509 }
22510
22511 /* save dump file path */
22512 strncpy(elem->file_path[fp_idx], dump_path, DHD_DUMP_FILE_PATH_SIZE);
22513 elem->file_path[fp_idx][DHD_DUMP_FILE_PATH_SIZE - 1] = '\0';
22514
22515 /* change file index to next file index */
22516 elem->file_idx = (elem->file_idx + 1) % DHD_DUMP_FILE_COUNT_MAX;
22517 }
22518 #endif /* DHD_DUMP_MNGR */
22519
22520 #ifdef DHD_MAP_LOGGING
22521 /* Will be called from SMMU fault handler */
22522 void
22523 dhd_smmu_fault_handler(uint32 axid, ulong fault_addr)
22524 {
22525 dhd_pub_t *dhdp = (dhd_pub_t *)g_dhd_pub;
22526 uint32 irq = (uint32)-1;
22527
22528 DHD_ERROR(("%s: Trigger SMMU Fault\n", __FUNCTION__));
22529 DHD_ERROR(("%s: axid:0x%x, fault_addr:0x%lx", __FUNCTION__, axid, fault_addr));
22530 dhdp->smmu_fault_occurred = TRUE;
22531 #ifdef DNGL_AXI_ERROR_LOGGING
22532 dhdp->axi_error = TRUE;
22533 dhdp->axi_err_dump->axid = axid;
22534 dhdp->axi_err_dump->fault_address = fault_addr;
22535 #endif /* DNGL_AXI_ERROR_LOGGING */
22536
22537 /* Disable PCIe IRQ */
22538 dhdpcie_get_pcieirq(dhdp->bus, &irq);
22539 if (irq != (uint32)-1) {
22540 disable_irq_nosync(irq);
22541 }
22542
22543 /* Take debug information first */
22544 DHD_OS_WAKE_LOCK(dhdp);
22545 dhd_prot_smmu_fault_dump(dhdp);
22546 DHD_OS_WAKE_UNLOCK(dhdp);
22547
22548 /* Take AXI information if possible */
22549 #ifdef DNGL_AXI_ERROR_LOGGING
22550 #ifdef DHD_USE_WQ_FOR_DNGL_AXI_ERROR
22551 dhd_axi_error_dispatch(dhdp);
22552 #else
22553 dhd_axi_error(dhdp);
22554 #endif /* DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
22555 #endif /* DNGL_AXI_ERROR_LOGGING */
22556 }
22557 EXPORT_SYMBOL(dhd_smmu_fault_handler);
22558 #endif /* DHD_MAP_LOGGING */
22559
22560 #ifdef DHD_WIFI_SHUTDOWN
22561 void wifi_plat_dev_drv_shutdown(struct platform_device *pdev)
22562 {
22563 dhd_pub_t *dhd_pub = NULL;
22564 dhd_info_t *dhd_info = NULL;
22565 dhd_if_t *dhd_if = NULL;
22566
22567 DHD_ERROR(("%s enter\n", __FUNCTION__));
22568 dhd_pub = g_dhd_pub;
22569
22570 if (dhd_os_check_if_up(dhd_pub)) {
22571 dhd_info = (dhd_info_t *)dhd_pub->info;
22572 dhd_if = dhd_info->iflist[0];
22573 ASSERT(dhd_if);
22574 ASSERT(dhd_if->net);
22575 if (dhd_if && dhd_if->net) {
22576 dhd_stop(dhd_if->net);
22577 }
22578 }
22579 }
22580 #endif /* DHD_WIFI_SHUTDOWN */
22581
22582 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0))
22583 int
22584 compat_kernel_read(struct file *file, loff_t offset, char *addr, unsigned long count)
22585 {
22586 return (int)kernel_read(file, addr, (size_t)count, &offset);
22587 }
22588 #else
22589 int
22590 compat_kernel_read(struct file *file, loff_t offset, char *addr, unsigned long count)
22591 {
22592 return kernel_read(file, offset, addr, count);
22593 }
22594 #endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 14, 0)) */
22595
22596 #ifdef DHDTCPSYNC_FLOOD_BLK
22597 static void dhd_blk_tsfl_handler(struct work_struct * work)
22598 {
22599 dhd_if_t *ifp = NULL;
22600 dhd_pub_t *dhdp = NULL;
22601 /* Ignore compiler warnings due to -Werror=cast-qual */
22602 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
22603 #pragma GCC diagnostic push
22604 #pragma GCC diagnostic ignored "-Wcast-qual"
22605 #endif /* STRICT_GCC_WARNINGS && __GNUC__ */
22606 ifp = container_of(work, dhd_if_t, blk_tsfl_work);
22607 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
22608 #pragma GCC diagnostic pop
22609 #endif /* STRICT_GCC_WARNINGS && __GNUC__ */
22610 if (ifp) {
22611 dhdp = &ifp->info->pub;
22612 if (dhdp) {
22613 if ((dhdp->op_mode & DHD_FLAG_P2P_GO_MODE)||
22614 (dhdp->op_mode & DHD_FLAG_HOSTAP_MODE)) {
22615 DHD_ERROR(("Disassoc due to TCP SYNC FLOOD ATTACK\n"));
22616 wl_cfg80211_del_all_sta(ifp->net, WLAN_REASON_UNSPECIFIED);
22617 } else if ((dhdp->op_mode & DHD_FLAG_P2P_GC_MODE)||
22618 (dhdp->op_mode & DHD_FLAG_STA_MODE)) {
22619 DHD_ERROR(("Diconnect due to TCP SYNC FLOOD ATTACK\n"));
22620 wl_cfg80211_disassoc(ifp->net, WLAN_REASON_UNSPECIFIED);
22621 }
22622 }
22623 }
22624 }
22625 void dhd_reset_tcpsync_info_by_ifp(dhd_if_t *ifp)
22626 {
22627 ifp->tsync_rcvd = 0;
22628 ifp->tsyncack_txed = 0;
22629 ifp->last_sync = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
22630 }
22631 void dhd_reset_tcpsync_info_by_dev(struct net_device *dev)
22632 {
22633 dhd_if_t *ifp = NULL;
22634 if (dev) {
22635 ifp = DHD_DEV_IFP(dev);
22636 }
22637 if (ifp) {
22638 ifp->tsync_rcvd = 0;
22639 ifp->tsyncack_txed = 0;
22640 ifp->last_sync = DIV_U64_BY_U32(OSL_LOCALTIME_NS(), NSEC_PER_SEC);
22641 }
22642 }
22643 #endif /* DHDTCPSYNC_FLOOD_BLK */
22644
22645 #ifdef DHD_4WAYM4_FAIL_DISCONNECT
22646 static void dhd_m4_state_handler(struct work_struct *work)
22647 {
22648 dhd_if_t *ifp = NULL;
22649 /* Ignore compiler warnings due to -Werror=cast-qual */
22650 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
22651 #pragma GCC diagnostic push
22652 #pragma GCC diagnostic ignored "-Wcast-qual"
22653 #endif // endif
22654 struct delayed_work *dw = to_delayed_work(work);
22655 ifp = container_of(dw, dhd_if_t, m4state_work);
22656 #if defined(STRICT_GCC_WARNINGS) && defined(__GNUC__)
22657 #pragma GCC diagnostic pop
22658 #endif // endif
22659
22660 if (ifp && ifp->net &&
22661 (OSL_ATOMIC_READ(ifp->info->pub->osh, &ifp->m4state) == M4_TXFAILED)) {
22662 DHD_ERROR(("Disassoc for 4WAY_HANDSHAKE_TIMEOUT at %s\n",
22663 ifp->net->name));
22664 wl_cfg80211_disassoc(ifp->net, WLAN_REASON_4WAY_HANDSHAKE_TIMEOUT);
22665 }
22666 }
22667
22668 void
22669 dhd_eap_txcomplete(dhd_pub_t *dhdp, void *txp, bool success, int ifidx)
22670 {
22671 dhd_info_t *dhd = (dhd_info_t *)(dhdp->info);
22672 struct ether_header *eh;
22673 uint16 type;
22674
22675 if (!success) {
22676 dhd_prot_hdrpull(dhdp, NULL, txp, NULL, NULL);
22677
22678 eh = (struct ether_header *)PKTDATA(dhdp->osh, txp);
22679 type = ntoh16(eh->ether_type);
22680 if (type == ETHER_TYPE_802_1X) {
22681 if (dhd_is_4way_msg((uint8 *)eh) == EAPOL_4WAY_M4) {
22682 dhd_if_t *ifp = NULL;
22683 ifp = dhd->iflist[ifidx];
22684 if (!ifp || !ifp->net) {
22685 return;
22686 }
22687
22688 DHD_INFO(("%s: M4 TX failed on %d.\n",
22689 __FUNCTION__, ifidx));
22690
22691 OSL_ATOMIC_SET(dhdp->osh, &ifp->m4state, M4_TXFAILED);
22692 schedule_delayed_work(&ifp->m4state_work,
22693 msecs_to_jiffies(MAX_4WAY_TIMEOUT_MS));
22694 }
22695 }
22696 }
22697 }
22698
22699 void
22700 dhd_cleanup_m4_state_work(dhd_pub_t *dhdp, int ifidx)
22701 {
22702 dhd_info_t *dhdinfo;
22703 dhd_if_t *ifp;
22704
22705 if ((ifidx < 0) || (ifidx >= DHD_MAX_IFS)) {
22706 DHD_ERROR(("%s: invalid ifidx %d\n", __FUNCTION__, ifidx));
22707 return;
22708 }
22709
22710 dhdinfo = (dhd_info_t *)(dhdp->info);
22711 if (!dhdinfo) {
22712 DHD_ERROR(("%s: dhdinfo is NULL\n", __FUNCTION__));
22713 return;
22714 }
22715
22716 ifp = dhdinfo->iflist[ifidx];
22717 if (ifp) {
22718 cancel_delayed_work_sync(&ifp->m4state_work);
22719 }
22720 }
22721 #endif /* DHD_4WAYM4_FAIL_DISCONNECT */
22722
22723 #ifdef DHD_HP2P
22724 unsigned long
22725 dhd_os_hp2plock(dhd_pub_t *pub)
22726 {
22727 dhd_info_t *dhd;
22728 unsigned long flags = 0;
22729
22730 dhd = (dhd_info_t *)(pub->info);
22731
22732 if (dhd) {
22733 spin_lock_irqsave(&dhd->hp2p_lock, flags);
22734 }
22735
22736 return flags;
22737 }
22738
22739 void
22740 dhd_os_hp2punlock(dhd_pub_t *pub, unsigned long flags)
22741 {
22742 dhd_info_t *dhd;
22743
22744 dhd = (dhd_info_t *)(pub->info);
22745
22746 if (dhd) {
22747 spin_unlock_irqrestore(&dhd->hp2p_lock, flags);
22748 }
22749 }
22750 #endif /* DHD_HP2P */
22751 #ifdef DNGL_AXI_ERROR_LOGGING
22752 static void
22753 dhd_axi_error_dump(void *handle, void *event_info, u8 event)
22754 {
22755 dhd_info_t *dhd = (dhd_info_t *)handle;
22756 dhd_pub_t *dhdp = NULL;
22757
22758 if (!dhd) {
22759 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
22760 goto exit;
22761 }
22762
22763 dhdp = &dhd->pub;
22764 if (!dhdp) {
22765 DHD_ERROR(("%s: dhdp is NULL\n", __FUNCTION__));
22766 goto exit;
22767 }
22768
22769 /**
22770 * First save axi error information to a file
22771 * because panic should happen right after this.
22772 * After dhd reset, dhd reads the file, and do hang event process
22773 * to send axi error stored on the file to Bigdata server
22774 */
22775 if (dhdp->axi_err_dump->etd_axi_error_v1.version != HND_EXT_TRAP_AXIERROR_VERSION_1) {
22776 DHD_ERROR(("%s: Invalid AXI version: 0x%x\n",
22777 __FUNCTION__, dhdp->axi_err_dump->etd_axi_error_v1.version));
22778 }
22779
22780 DHD_OS_WAKE_LOCK(dhdp);
22781 #ifdef DHD_FW_COREDUMP
22782 #ifdef DHD_SSSR_DUMP
22783 dhdp->collect_sssr = TRUE;
22784 #endif /* DHD_SSSR_DUMP */
22785 DHD_ERROR(("%s: scheduling mem dump.. \n", __FUNCTION__));
22786 dhd_schedule_memdump(dhdp, dhdp->soc_ram, dhdp->soc_ram_length);
22787 #endif /* DHD_FW_COREDUMP */
22788 DHD_OS_WAKE_UNLOCK(dhdp);
22789
22790 exit:
22791 /* Trigger kernel panic after taking necessary dumps */
22792 BUG_ON(1);
22793 }
22794
22795 void dhd_schedule_axi_error_dump(dhd_pub_t *dhdp, void *type)
22796 {
22797 DHD_ERROR(("%s: scheduling axi_error_dump.. \n", __FUNCTION__));
22798 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
22799 type, DHD_WQ_WORK_AXI_ERROR_DUMP,
22800 dhd_axi_error_dump, DHD_WQ_WORK_PRIORITY_HIGH);
22801 }
22802 #endif /* DNGL_AXI_ERROR_LOGGING */
22803
22804 #ifdef BCMPCIE
22805 static void
22806 dhd_cto_recovery_handler(void *handle, void *event_info, u8 event)
22807 {
22808 dhd_info_t *dhd = handle;
22809 dhd_pub_t *dhdp = NULL;
22810
22811 if (!dhd) {
22812 DHD_ERROR(("%s: dhd is NULL\n", __FUNCTION__));
22813 BUG_ON(1);
22814 return;
22815 }
22816
22817 dhdp = &dhd->pub;
22818 dhdpcie_cto_recovery_handler(dhdp);
22819 }
22820
22821 void
22822 dhd_schedule_cto_recovery(dhd_pub_t *dhdp)
22823 {
22824 DHD_ERROR(("%s: scheduling cto recovery.. \n", __FUNCTION__));
22825 dhd_deferred_schedule_work(dhdp->info->dhd_deferred_wq,
22826 NULL, DHD_WQ_WORK_CTO_RECOVERY,
22827 dhd_cto_recovery_handler, DHD_WQ_WORK_PRIORITY_HIGH);
22828 }
22829 #endif /* BCMPCIE */
22830
22831 #ifdef SUPPORT_SET_TID
22832 /*
22833 * Set custom TID value for UDP frame based on UID value.
22834 * This will be triggered by android private command below.
22835 * DRIVER SET_TID <Mode:uint8> <Target UID:uint32> <Custom TID:uint8>
22836 * Mode 0(SET_TID_OFF) : Disable changing TID
22837 * Mode 1(SET_TID_ALL_UDP) : Change TID for all UDP frames
22838 * Mode 2(SET_TID_BASED_ON_UID) : Change TID for UDP frames based on target UID
22839 */
22840 void
22841 dhd_set_tid_based_on_uid(dhd_pub_t *dhdp, void *pkt)
22842 {
22843 struct ether_header *eh = NULL;
22844 struct sock *sk = NULL;
22845 uint8 *pktdata = NULL;
22846 uint8 *ip_hdr = NULL;
22847 uint8 cur_prio;
22848 uint8 prio;
22849 uint32 uid;
22850
22851 if (dhdp->tid_mode == SET_TID_OFF) {
22852 return;
22853 }
22854
22855 pktdata = (uint8 *)PKTDATA(dhdp->osh, pkt);
22856 eh = (struct ether_header *) pktdata;
22857 ip_hdr = (uint8 *)eh + ETHER_HDR_LEN;
22858
22859 if (IPV4_PROT(ip_hdr) != IP_PROT_UDP) {
22860 return;
22861 }
22862
22863 cur_prio = PKTPRIO(pkt);
22864 prio = dhdp->target_tid;
22865 uid = dhdp->target_uid;
22866
22867 if ((cur_prio == prio) ||
22868 (cur_prio != PRIO_8021D_BE)) {
22869 return;
22870 }
22871
22872 sk = ((struct sk_buff*)(pkt))->sk;
22873
22874 if ((dhdp->tid_mode == SET_TID_ALL_UDP) ||
22875 (sk && (uid == __kuid_val(sock_i_uid(sk))))) {
22876 PKTSETPRIO(pkt, prio);
22877 }
22878 }
22879 #endif /* SUPPORT_SET_TID */
22880