1*4882a593Smuzhiyun /* 2*4882a593Smuzhiyun * DHD Linux header file - contains private structure definition of the Linux specific layer 3*4882a593Smuzhiyun * 4*4882a593Smuzhiyun * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation 5*4882a593Smuzhiyun * 6*4882a593Smuzhiyun * Copyright (C) 1999-2017, Broadcom Corporation 7*4882a593Smuzhiyun * 8*4882a593Smuzhiyun * Unless you and Broadcom execute a separate written software license 9*4882a593Smuzhiyun * agreement governing use of this software, this software is licensed to you 10*4882a593Smuzhiyun * under the terms of the GNU General Public License version 2 (the "GPL"), 11*4882a593Smuzhiyun * available at http://www.broadcom.com/licenses/GPLv2.php, with the 12*4882a593Smuzhiyun * following added to such license: 13*4882a593Smuzhiyun * 14*4882a593Smuzhiyun * As a special exception, the copyright holders of this software give you 15*4882a593Smuzhiyun * permission to link this software with independent modules, and to copy and 16*4882a593Smuzhiyun * distribute the resulting executable under terms of your choice, provided that 17*4882a593Smuzhiyun * you also meet, for each linked independent module, the terms and conditions of 18*4882a593Smuzhiyun * the license of that module. An independent module is a module which is not 19*4882a593Smuzhiyun * derived from this software. The special exception does not apply to any 20*4882a593Smuzhiyun * modifications of the software. 21*4882a593Smuzhiyun * 22*4882a593Smuzhiyun * Notwithstanding the above, under no circumstances may you combine this 23*4882a593Smuzhiyun * software in any way with any other Broadcom software provided under a license 24*4882a593Smuzhiyun * other than the GPL, without Broadcom's express prior written consent. 25*4882a593Smuzhiyun * 26*4882a593Smuzhiyun * 27*4882a593Smuzhiyun * <<Broadcom-WL-IPTag/Open:>> 28*4882a593Smuzhiyun * 29*4882a593Smuzhiyun * $Id$ 30*4882a593Smuzhiyun */ 31*4882a593Smuzhiyun 32*4882a593Smuzhiyun #ifndef __DHD_LINUX_PRIV_H__ 33*4882a593Smuzhiyun #define __DHD_LINUX_PRIV_H__ 34*4882a593Smuzhiyun 35*4882a593Smuzhiyun #include <osl.h> 36*4882a593Smuzhiyun 37*4882a593Smuzhiyun #ifdef SHOW_LOGTRACE 38*4882a593Smuzhiyun #include <linux/syscalls.h> 39*4882a593Smuzhiyun #include <event_log.h> 40*4882a593Smuzhiyun #endif /* SHOW_LOGTRACE */ 41*4882a593Smuzhiyun #include <linux/skbuff.h> 42*4882a593Smuzhiyun #include <linux/spinlock.h> 43*4882a593Smuzhiyun #include <dngl_stats.h> 44*4882a593Smuzhiyun #include <dhd.h> 45*4882a593Smuzhiyun #include <dhd_dbg.h> 46*4882a593Smuzhiyun #include <dhd_debug.h> 47*4882a593Smuzhiyun #include <dhd_linux.h> 48*4882a593Smuzhiyun #include <dhd_bus.h> 49*4882a593Smuzhiyun 50*4882a593Smuzhiyun #ifdef PCIE_FULL_DONGLE 51*4882a593Smuzhiyun #include <bcmmsgbuf.h> 52*4882a593Smuzhiyun #include <dhd_flowring.h> 53*4882a593Smuzhiyun #endif /* PCIE_FULL_DONGLE */ 54*4882a593Smuzhiyun 55*4882a593Smuzhiyun /* 56*4882a593Smuzhiyun * Do not include this header except for the dhd_linux.c dhd_linux_sysfs.c 57*4882a593Smuzhiyun * Local private structure (extension of pub) 58*4882a593Smuzhiyun */ 59*4882a593Smuzhiyun typedef struct dhd_info { 60*4882a593Smuzhiyun #if defined(WL_WIRELESS_EXT) 61*4882a593Smuzhiyun wl_iw_t iw; /* wireless extensions state (must be first) */ 62*4882a593Smuzhiyun #endif /* defined(WL_WIRELESS_EXT) */ 63*4882a593Smuzhiyun dhd_pub_t pub; 64*4882a593Smuzhiyun /* for supporting multiple interfaces. 65*4882a593Smuzhiyun * static_ifs hold the net ifaces without valid FW IF 66*4882a593Smuzhiyun */ 67*4882a593Smuzhiyun dhd_if_t *iflist[DHD_MAX_IFS + DHD_MAX_STATIC_IFS]; 68*4882a593Smuzhiyun 69*4882a593Smuzhiyun void *adapter; /* adapter information, interrupt, fw path etc. */ 70*4882a593Smuzhiyun char fw_path[PATH_MAX]; /* path to firmware image */ 71*4882a593Smuzhiyun char nv_path[PATH_MAX]; /* path to nvram vars file */ 72*4882a593Smuzhiyun #ifdef DHD_UCODE_DOWNLOAD 73*4882a593Smuzhiyun char uc_path[PATH_MAX]; /* path to ucode image */ 74*4882a593Smuzhiyun #endif /* DHD_UCODE_DOWNLOAD */ 75*4882a593Smuzhiyun 76*4882a593Smuzhiyun /* serialize dhd iovars */ 77*4882a593Smuzhiyun struct mutex dhd_iovar_mutex; 78*4882a593Smuzhiyun 79*4882a593Smuzhiyun struct semaphore proto_sem; 80*4882a593Smuzhiyun #ifdef PROP_TXSTATUS 81*4882a593Smuzhiyun spinlock_t wlfc_spinlock; 82*4882a593Smuzhiyun 83*4882a593Smuzhiyun #endif /* PROP_TXSTATUS */ 84*4882a593Smuzhiyun wait_queue_head_t ioctl_resp_wait; 85*4882a593Smuzhiyun wait_queue_head_t d3ack_wait; 86*4882a593Smuzhiyun wait_queue_head_t dhd_bus_busy_state_wait; 87*4882a593Smuzhiyun wait_queue_head_t dmaxfer_wait; 88*4882a593Smuzhiyun uint32 default_wd_interval; 89*4882a593Smuzhiyun 90*4882a593Smuzhiyun timer_list_compat_t timer; 91*4882a593Smuzhiyun bool wd_timer_valid; 92*4882a593Smuzhiyun #ifdef DHD_PCIE_RUNTIMEPM 93*4882a593Smuzhiyun timer_list_compat_t rpm_timer; 94*4882a593Smuzhiyun bool rpm_timer_valid; 95*4882a593Smuzhiyun tsk_ctl_t thr_rpm_ctl; 96*4882a593Smuzhiyun #endif /* DHD_PCIE_RUNTIMEPM */ 97*4882a593Smuzhiyun struct tasklet_struct tasklet; 98*4882a593Smuzhiyun spinlock_t sdlock; 99*4882a593Smuzhiyun spinlock_t txqlock; 100*4882a593Smuzhiyun spinlock_t dhd_lock; 101*4882a593Smuzhiyun 102*4882a593Smuzhiyun struct semaphore sdsem; 103*4882a593Smuzhiyun tsk_ctl_t thr_dpc_ctl; 104*4882a593Smuzhiyun tsk_ctl_t thr_wdt_ctl; 105*4882a593Smuzhiyun 106*4882a593Smuzhiyun tsk_ctl_t thr_rxf_ctl; 107*4882a593Smuzhiyun spinlock_t rxf_lock; 108*4882a593Smuzhiyun bool rxthread_enabled; 109*4882a593Smuzhiyun 110*4882a593Smuzhiyun /* Wakelocks */ 111*4882a593Smuzhiyun #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK) 112*4882a593Smuzhiyun struct wake_lock wl_wifi; /* Wifi wakelock */ 113*4882a593Smuzhiyun struct wake_lock wl_rxwake; /* Wifi rx wakelock */ 114*4882a593Smuzhiyun struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */ 115*4882a593Smuzhiyun struct wake_lock wl_wdwake; /* Wifi wd wakelock */ 116*4882a593Smuzhiyun struct wake_lock wl_evtwake; /* Wifi event wakelock */ 117*4882a593Smuzhiyun struct wake_lock wl_pmwake; /* Wifi pm handler wakelock */ 118*4882a593Smuzhiyun struct wake_lock wl_txflwake; /* Wifi tx flow wakelock */ 119*4882a593Smuzhiyun #ifdef BCMPCIE_OOB_HOST_WAKE 120*4882a593Smuzhiyun struct wake_lock wl_intrwake; /* Host wakeup wakelock */ 121*4882a593Smuzhiyun #endif /* BCMPCIE_OOB_HOST_WAKE */ 122*4882a593Smuzhiyun #ifdef DHD_USE_SCAN_WAKELOCK 123*4882a593Smuzhiyun struct wake_lock wl_scanwake; /* Wifi scan wakelock */ 124*4882a593Smuzhiyun #endif /* DHD_USE_SCAN_WAKELOCK */ 125*4882a593Smuzhiyun #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */ 126*4882a593Smuzhiyun 127*4882a593Smuzhiyun #if defined(OEM_ANDROID) 128*4882a593Smuzhiyun /* net_device interface lock, prevent race conditions among net_dev interface 129*4882a593Smuzhiyun * calls and wifi_on or wifi_off 130*4882a593Smuzhiyun */ 131*4882a593Smuzhiyun struct mutex dhd_net_if_mutex; 132*4882a593Smuzhiyun struct mutex dhd_suspend_mutex; 133*4882a593Smuzhiyun #if defined(PKT_FILTER_SUPPORT) && defined(APF) 134*4882a593Smuzhiyun struct mutex dhd_apf_mutex; 135*4882a593Smuzhiyun #endif /* PKT_FILTER_SUPPORT && APF */ 136*4882a593Smuzhiyun #endif /* OEM_ANDROID */ 137*4882a593Smuzhiyun spinlock_t wakelock_spinlock; 138*4882a593Smuzhiyun spinlock_t wakelock_evt_spinlock; 139*4882a593Smuzhiyun uint32 wakelock_counter; 140*4882a593Smuzhiyun int wakelock_wd_counter; 141*4882a593Smuzhiyun int wakelock_rx_timeout_enable; 142*4882a593Smuzhiyun int wakelock_ctrl_timeout_enable; 143*4882a593Smuzhiyun bool waive_wakelock; 144*4882a593Smuzhiyun uint32 wakelock_before_waive; 145*4882a593Smuzhiyun 146*4882a593Smuzhiyun /* Thread to issue ioctl for multicast */ 147*4882a593Smuzhiyun wait_queue_head_t ctrl_wait; 148*4882a593Smuzhiyun atomic_t pend_8021x_cnt; 149*4882a593Smuzhiyun dhd_attach_states_t dhd_state; 150*4882a593Smuzhiyun #ifdef SHOW_LOGTRACE 151*4882a593Smuzhiyun dhd_event_log_t event_data; 152*4882a593Smuzhiyun #endif /* SHOW_LOGTRACE */ 153*4882a593Smuzhiyun 154*4882a593Smuzhiyun #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) 155*4882a593Smuzhiyun struct early_suspend early_suspend; 156*4882a593Smuzhiyun #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */ 157*4882a593Smuzhiyun 158*4882a593Smuzhiyun #ifdef ARP_OFFLOAD_SUPPORT 159*4882a593Smuzhiyun u32 pend_ipaddr; 160*4882a593Smuzhiyun #endif /* ARP_OFFLOAD_SUPPORT */ 161*4882a593Smuzhiyun #ifdef DHDTCPACK_SUPPRESS 162*4882a593Smuzhiyun spinlock_t tcpack_lock; 163*4882a593Smuzhiyun #endif /* DHDTCPACK_SUPPRESS */ 164*4882a593Smuzhiyun #ifdef FIX_CPU_MIN_CLOCK 165*4882a593Smuzhiyun bool cpufreq_fix_status; 166*4882a593Smuzhiyun struct mutex cpufreq_fix; 167*4882a593Smuzhiyun struct pm_qos_request dhd_cpu_qos; 168*4882a593Smuzhiyun #ifdef FIX_BUS_MIN_CLOCK 169*4882a593Smuzhiyun struct pm_qos_request dhd_bus_qos; 170*4882a593Smuzhiyun #endif /* FIX_BUS_MIN_CLOCK */ 171*4882a593Smuzhiyun #endif /* FIX_CPU_MIN_CLOCK */ 172*4882a593Smuzhiyun void *dhd_deferred_wq; 173*4882a593Smuzhiyun #ifdef DEBUG_CPU_FREQ 174*4882a593Smuzhiyun struct notifier_block freq_trans; 175*4882a593Smuzhiyun int __percpu *new_freq; 176*4882a593Smuzhiyun #endif // endif 177*4882a593Smuzhiyun unsigned int unit; 178*4882a593Smuzhiyun struct notifier_block pm_notifier; 179*4882a593Smuzhiyun #ifdef DHD_PSTA 180*4882a593Smuzhiyun uint32 psta_mode; /* PSTA or PSR */ 181*4882a593Smuzhiyun #endif /* DHD_PSTA */ 182*4882a593Smuzhiyun #ifdef DHD_WET 183*4882a593Smuzhiyun uint32 wet_mode; 184*4882a593Smuzhiyun #endif /* DHD_WET */ 185*4882a593Smuzhiyun #ifdef DHD_DEBUG 186*4882a593Smuzhiyun dhd_dump_t *dump; 187*4882a593Smuzhiyun struct timer_list join_timer; 188*4882a593Smuzhiyun u32 join_timeout_val; 189*4882a593Smuzhiyun bool join_timer_active; 190*4882a593Smuzhiyun uint scan_time_count; 191*4882a593Smuzhiyun struct timer_list scan_timer; 192*4882a593Smuzhiyun bool scan_timer_active; 193*4882a593Smuzhiyun #endif // endif 194*4882a593Smuzhiyun #if defined(DHD_LB) 195*4882a593Smuzhiyun /* CPU Load Balance dynamic CPU selection */ 196*4882a593Smuzhiyun 197*4882a593Smuzhiyun /* Variable that tracks the currect CPUs available for candidacy */ 198*4882a593Smuzhiyun cpumask_var_t cpumask_curr_avail; 199*4882a593Smuzhiyun 200*4882a593Smuzhiyun /* Primary and secondary CPU mask */ 201*4882a593Smuzhiyun cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */ 202*4882a593Smuzhiyun cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */ 203*4882a593Smuzhiyun 204*4882a593Smuzhiyun struct notifier_block cpu_notifier; 205*4882a593Smuzhiyun 206*4882a593Smuzhiyun /* Tasklet to handle Tx Completion packet freeing */ 207*4882a593Smuzhiyun struct tasklet_struct tx_compl_tasklet; 208*4882a593Smuzhiyun atomic_t tx_compl_cpu; 209*4882a593Smuzhiyun 210*4882a593Smuzhiyun /* Tasklet to handle RxBuf Post during Rx completion */ 211*4882a593Smuzhiyun struct tasklet_struct rx_compl_tasklet; 212*4882a593Smuzhiyun atomic_t rx_compl_cpu; 213*4882a593Smuzhiyun 214*4882a593Smuzhiyun /* Napi struct for handling rx packet sendup. Packets are removed from 215*4882a593Smuzhiyun * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then 216*4882a593Smuzhiyun * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled 217*4882a593Smuzhiyun * to run to rx_napi_cpu. 218*4882a593Smuzhiyun */ 219*4882a593Smuzhiyun struct sk_buff_head rx_pend_queue ____cacheline_aligned; 220*4882a593Smuzhiyun struct sk_buff_head rx_napi_queue ____cacheline_aligned; 221*4882a593Smuzhiyun struct napi_struct rx_napi_struct ____cacheline_aligned; 222*4882a593Smuzhiyun atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */ 223*4882a593Smuzhiyun struct net_device *rx_napi_netdev; /* netdev of primary interface */ 224*4882a593Smuzhiyun 225*4882a593Smuzhiyun struct work_struct rx_napi_dispatcher_work; 226*4882a593Smuzhiyun struct work_struct tx_compl_dispatcher_work; 227*4882a593Smuzhiyun struct work_struct tx_dispatcher_work; 228*4882a593Smuzhiyun struct work_struct rx_compl_dispatcher_work; 229*4882a593Smuzhiyun 230*4882a593Smuzhiyun /* Number of times DPC Tasklet ran */ 231*4882a593Smuzhiyun uint32 dhd_dpc_cnt; 232*4882a593Smuzhiyun /* Number of times NAPI processing got scheduled */ 233*4882a593Smuzhiyun uint32 napi_sched_cnt; 234*4882a593Smuzhiyun /* Number of times NAPI processing ran on each available core */ 235*4882a593Smuzhiyun uint32 *napi_percpu_run_cnt; 236*4882a593Smuzhiyun /* Number of times RX Completions got scheduled */ 237*4882a593Smuzhiyun uint32 rxc_sched_cnt; 238*4882a593Smuzhiyun /* Number of times RX Completion ran on each available core */ 239*4882a593Smuzhiyun uint32 *rxc_percpu_run_cnt; 240*4882a593Smuzhiyun /* Number of times TX Completions got scheduled */ 241*4882a593Smuzhiyun uint32 txc_sched_cnt; 242*4882a593Smuzhiyun /* Number of times TX Completions ran on each available core */ 243*4882a593Smuzhiyun uint32 *txc_percpu_run_cnt; 244*4882a593Smuzhiyun /* CPU status */ 245*4882a593Smuzhiyun /* Number of times each CPU came online */ 246*4882a593Smuzhiyun uint32 *cpu_online_cnt; 247*4882a593Smuzhiyun /* Number of times each CPU went offline */ 248*4882a593Smuzhiyun uint32 *cpu_offline_cnt; 249*4882a593Smuzhiyun 250*4882a593Smuzhiyun /* Number of times TX processing run on each core */ 251*4882a593Smuzhiyun uint32 *txp_percpu_run_cnt; 252*4882a593Smuzhiyun /* Number of times TX start run on each core */ 253*4882a593Smuzhiyun uint32 *tx_start_percpu_run_cnt; 254*4882a593Smuzhiyun 255*4882a593Smuzhiyun /* Tx load balancing */ 256*4882a593Smuzhiyun 257*4882a593Smuzhiyun /* TODO: Need to see if batch processing is really required in case of TX 258*4882a593Smuzhiyun * processing. In case of RX the Dongle can send a bunch of rx completions, 259*4882a593Smuzhiyun * hence we took a 3 queue approach 260*4882a593Smuzhiyun * enque - adds the skbs to rx_pend_queue 261*4882a593Smuzhiyun * dispatch - uses a lock and adds the list of skbs from pend queue to 262*4882a593Smuzhiyun * napi queue 263*4882a593Smuzhiyun * napi processing - copies the pend_queue into a local queue and works 264*4882a593Smuzhiyun * on it. 265*4882a593Smuzhiyun * But for TX its going to be 1 skb at a time, so we are just thinking 266*4882a593Smuzhiyun * of using only one queue and use the lock supported skb queue functions 267*4882a593Smuzhiyun * to add and process it. If its in-efficient we'll re-visit the queue 268*4882a593Smuzhiyun * design. 269*4882a593Smuzhiyun */ 270*4882a593Smuzhiyun 271*4882a593Smuzhiyun /* When the NET_TX tries to send a TX packet put it into tx_pend_queue */ 272*4882a593Smuzhiyun /* struct sk_buff_head tx_pend_queue ____cacheline_aligned; */ 273*4882a593Smuzhiyun /* 274*4882a593Smuzhiyun * From the Tasklet that actually sends out data 275*4882a593Smuzhiyun * copy the list tx_pend_queue into tx_active_queue. There by we need 276*4882a593Smuzhiyun * to spinlock to only perform the copy the rest of the code ie to 277*4882a593Smuzhiyun * construct the tx_pend_queue and the code to process tx_active_queue 278*4882a593Smuzhiyun * can be lockless. The concept is borrowed as is from RX processing 279*4882a593Smuzhiyun */ 280*4882a593Smuzhiyun /* struct sk_buff_head tx_active_queue ____cacheline_aligned; */ 281*4882a593Smuzhiyun 282*4882a593Smuzhiyun /* Control TXP in runtime, enable by default */ 283*4882a593Smuzhiyun atomic_t lb_txp_active; 284*4882a593Smuzhiyun 285*4882a593Smuzhiyun /* Control RXP in runtime, enable by default */ 286*4882a593Smuzhiyun atomic_t lb_rxp_active; 287*4882a593Smuzhiyun 288*4882a593Smuzhiyun /* 289*4882a593Smuzhiyun * When the NET_TX tries to send a TX packet put it into tx_pend_queue 290*4882a593Smuzhiyun * For now, the processing tasklet will also direcly operate on this 291*4882a593Smuzhiyun * queue 292*4882a593Smuzhiyun */ 293*4882a593Smuzhiyun struct sk_buff_head tx_pend_queue ____cacheline_aligned; 294*4882a593Smuzhiyun 295*4882a593Smuzhiyun /* Control RXP in runtime, enable by default */ 296*4882a593Smuzhiyun /* cpu on which the DHD Tx is happenning */ 297*4882a593Smuzhiyun atomic_t tx_cpu; 298*4882a593Smuzhiyun 299*4882a593Smuzhiyun /* CPU on which the Network stack is calling the DHD's xmit function */ 300*4882a593Smuzhiyun atomic_t net_tx_cpu; 301*4882a593Smuzhiyun 302*4882a593Smuzhiyun /* Tasklet context from which the DHD's TX processing happens */ 303*4882a593Smuzhiyun struct tasklet_struct tx_tasklet; 304*4882a593Smuzhiyun 305*4882a593Smuzhiyun /* 306*4882a593Smuzhiyun * Consumer Histogram - NAPI RX Packet processing 307*4882a593Smuzhiyun * ----------------------------------------------- 308*4882a593Smuzhiyun * On Each CPU, when the NAPI RX Packet processing call back was invoked 309*4882a593Smuzhiyun * how many packets were processed is captured in this data structure. 310*4882a593Smuzhiyun * Now its difficult to capture the "exact" number of packets processed. 311*4882a593Smuzhiyun * So considering the packet counter to be a 32 bit one, we have a 312*4882a593Smuzhiyun * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets 313*4882a593Smuzhiyun * processed is rounded off to the next power of 2 and put in the 314*4882a593Smuzhiyun * approriate "bin" the value in the bin gets incremented. 315*4882a593Smuzhiyun * For example, assume that in CPU 1 if NAPI Rx runs 3 times 316*4882a593Smuzhiyun * and the packet count processed is as follows (assume the bin counters are 0) 317*4882a593Smuzhiyun * iteration 1 - 10 (the bin counter 2^4 increments to 1) 318*4882a593Smuzhiyun * iteration 2 - 30 (the bin counter 2^5 increments to 1) 319*4882a593Smuzhiyun * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2) 320*4882a593Smuzhiyun */ 321*4882a593Smuzhiyun uint32 *napi_rx_hist[HIST_BIN_SIZE]; 322*4882a593Smuzhiyun uint32 *txc_hist[HIST_BIN_SIZE]; 323*4882a593Smuzhiyun uint32 *rxc_hist[HIST_BIN_SIZE]; 324*4882a593Smuzhiyun #endif /* DHD_LB */ 325*4882a593Smuzhiyun #if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR) 326*4882a593Smuzhiyun struct work_struct axi_error_dispatcher_work; 327*4882a593Smuzhiyun #endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */ 328*4882a593Smuzhiyun #ifdef SHOW_LOGTRACE 329*4882a593Smuzhiyun #ifdef DHD_USE_KTHREAD_FOR_LOGTRACE 330*4882a593Smuzhiyun tsk_ctl_t thr_logtrace_ctl; 331*4882a593Smuzhiyun #else 332*4882a593Smuzhiyun struct delayed_work event_log_dispatcher_work; 333*4882a593Smuzhiyun #endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */ 334*4882a593Smuzhiyun #endif /* SHOW_LOGTRACE */ 335*4882a593Smuzhiyun 336*4882a593Smuzhiyun #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) 337*4882a593Smuzhiyun #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ 338*4882a593Smuzhiyun struct kobject dhd_kobj; 339*4882a593Smuzhiyun struct kobject dhd_conf_file_kobj; 340*4882a593Smuzhiyun struct timer_list timesync_timer; 341*4882a593Smuzhiyun #if defined(BT_OVER_SDIO) 342*4882a593Smuzhiyun char btfw_path[PATH_MAX]; 343*4882a593Smuzhiyun #endif /* defined (BT_OVER_SDIO) */ 344*4882a593Smuzhiyun #ifdef WL_MONITOR 345*4882a593Smuzhiyun struct net_device *monitor_dev; /* monitor pseudo device */ 346*4882a593Smuzhiyun struct sk_buff *monitor_skb; 347*4882a593Smuzhiyun uint monitor_len; 348*4882a593Smuzhiyun uint monitor_type; /* monitor pseudo device */ 349*4882a593Smuzhiyun #endif /* WL_MONITOR */ 350*4882a593Smuzhiyun #if defined(BT_OVER_SDIO) 351*4882a593Smuzhiyun struct mutex bus_user_lock; /* lock for sdio bus apis shared between WLAN & BT */ 352*4882a593Smuzhiyun int bus_user_count; /* User counts of sdio bus shared between WLAN & BT */ 353*4882a593Smuzhiyun #endif /* BT_OVER_SDIO */ 354*4882a593Smuzhiyun #ifdef SHOW_LOGTRACE 355*4882a593Smuzhiyun struct sk_buff_head evt_trace_queue ____cacheline_aligned; 356*4882a593Smuzhiyun #endif // endif 357*4882a593Smuzhiyun #ifdef DHD_PCIE_NATIVE_RUNTIMEPM 358*4882a593Smuzhiyun struct workqueue_struct *tx_wq; 359*4882a593Smuzhiyun struct workqueue_struct *rx_wq; 360*4882a593Smuzhiyun #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ 361*4882a593Smuzhiyun #ifdef DHD_DEBUG_UART 362*4882a593Smuzhiyun bool duart_execute; 363*4882a593Smuzhiyun #endif /* DHD_DEBUG_UART */ 364*4882a593Smuzhiyun struct mutex logdump_lock; 365*4882a593Smuzhiyun /* indicates mem_dump was scheduled as work queue or called directly */ 366*4882a593Smuzhiyun bool scheduled_memdump; 367*4882a593Smuzhiyun struct work_struct dhd_hang_process_work; 368*4882a593Smuzhiyun #ifdef DHD_HP2P 369*4882a593Smuzhiyun spinlock_t hp2p_lock; 370*4882a593Smuzhiyun #endif /* DHD_HP2P */ 371*4882a593Smuzhiyun } dhd_info_t; 372*4882a593Smuzhiyun 373*4882a593Smuzhiyun extern int dhd_sysfs_init(dhd_info_t *dhd); 374*4882a593Smuzhiyun extern void dhd_sysfs_exit(dhd_info_t *dhd); 375*4882a593Smuzhiyun extern void dhd_dbg_ring_proc_create(dhd_pub_t *dhdp); 376*4882a593Smuzhiyun extern void dhd_dbg_ring_proc_destroy(dhd_pub_t *dhdp); 377*4882a593Smuzhiyun 378*4882a593Smuzhiyun int __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf); 379*4882a593Smuzhiyun 380*4882a593Smuzhiyun #if defined(DHD_LB) 381*4882a593Smuzhiyun #if defined(DHD_LB_TXP) 382*4882a593Smuzhiyun int dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net, int ifidx, void *skb); 383*4882a593Smuzhiyun void dhd_tx_dispatcher_work(struct work_struct * work); 384*4882a593Smuzhiyun void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp); 385*4882a593Smuzhiyun void dhd_lb_tx_dispatch(dhd_pub_t *dhdp); 386*4882a593Smuzhiyun void dhd_lb_tx_handler(unsigned long data); 387*4882a593Smuzhiyun #endif /* DHD_LB_TXP */ 388*4882a593Smuzhiyun 389*4882a593Smuzhiyun #if defined(DHD_LB_RXP) 390*4882a593Smuzhiyun int dhd_napi_poll(struct napi_struct *napi, int budget); 391*4882a593Smuzhiyun void dhd_rx_napi_dispatcher_fn(struct work_struct * work); 392*4882a593Smuzhiyun void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp); 393*4882a593Smuzhiyun void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx); 394*4882a593Smuzhiyun #endif /* DHD_LB_RXP */ 395*4882a593Smuzhiyun 396*4882a593Smuzhiyun void dhd_lb_set_default_cpus(dhd_info_t *dhd); 397*4882a593Smuzhiyun void dhd_cpumasks_deinit(dhd_info_t *dhd); 398*4882a593Smuzhiyun int dhd_cpumasks_init(dhd_info_t *dhd); 399*4882a593Smuzhiyun 400*4882a593Smuzhiyun void dhd_select_cpu_candidacy(dhd_info_t *dhd); 401*4882a593Smuzhiyun 402*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) 403*4882a593Smuzhiyun int dhd_cpu_startup_callback(unsigned int cpu); 404*4882a593Smuzhiyun int dhd_cpu_teardown_callback(unsigned int cpu); 405*4882a593Smuzhiyun #else 406*4882a593Smuzhiyun int dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu); 407*4882a593Smuzhiyun #endif /* LINUX_VERSION_CODE < 4.10.0 */ 408*4882a593Smuzhiyun 409*4882a593Smuzhiyun int dhd_register_cpuhp_callback(dhd_info_t *dhd); 410*4882a593Smuzhiyun int dhd_unregister_cpuhp_callback(dhd_info_t *dhd); 411*4882a593Smuzhiyun 412*4882a593Smuzhiyun #if defined(DHD_LB_TXC) 413*4882a593Smuzhiyun void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp); 414*4882a593Smuzhiyun #endif /* DHD_LB_TXC */ 415*4882a593Smuzhiyun 416*4882a593Smuzhiyun #if defined(DHD_LB_RXC) 417*4882a593Smuzhiyun void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp); 418*4882a593Smuzhiyun void dhd_rx_compl_dispatcher_fn(struct work_struct * work); 419*4882a593Smuzhiyun #endif /* DHD_LB_RXC */ 420*4882a593Smuzhiyun 421*4882a593Smuzhiyun #endif /* DHD_LB */ 422*4882a593Smuzhiyun 423*4882a593Smuzhiyun #if defined(DHD_LB_IRQSET) || defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON) 424*4882a593Smuzhiyun void dhd_irq_set_affinity(dhd_pub_t *dhdp, const struct cpumask *cpumask); 425*4882a593Smuzhiyun #endif /* DHD_LB_IRQSET || DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */ 426*4882a593Smuzhiyun 427*4882a593Smuzhiyun #endif /* __DHD_LINUX_PRIV_H__ */ 428