1 /* 2 * DHD Linux header file - contains private structure definition of the Linux specific layer 3 * 4 * Portions of this code are copyright (c) 2021 Cypress Semiconductor Corporation 5 * 6 * Copyright (C) 1999-2017, Broadcom Corporation 7 * 8 * Unless you and Broadcom execute a separate written software license 9 * agreement governing use of this software, this software is licensed to you 10 * under the terms of the GNU General Public License version 2 (the "GPL"), 11 * available at http://www.broadcom.com/licenses/GPLv2.php, with the 12 * following added to such license: 13 * 14 * As a special exception, the copyright holders of this software give you 15 * permission to link this software with independent modules, and to copy and 16 * distribute the resulting executable under terms of your choice, provided that 17 * you also meet, for each linked independent module, the terms and conditions of 18 * the license of that module. An independent module is a module which is not 19 * derived from this software. The special exception does not apply to any 20 * modifications of the software. 21 * 22 * Notwithstanding the above, under no circumstances may you combine this 23 * software in any way with any other Broadcom software provided under a license 24 * other than the GPL, without Broadcom's express prior written consent. 25 * 26 * 27 * <<Broadcom-WL-IPTag/Open:>> 28 * 29 * $Id$ 30 */ 31 32 #ifndef __DHD_LINUX_PRIV_H__ 33 #define __DHD_LINUX_PRIV_H__ 34 35 #include <osl.h> 36 37 #ifdef SHOW_LOGTRACE 38 #include <linux/syscalls.h> 39 #include <event_log.h> 40 #endif /* SHOW_LOGTRACE */ 41 #include <linux/skbuff.h> 42 #include <linux/spinlock.h> 43 #include <dngl_stats.h> 44 #include <dhd.h> 45 #include <dhd_dbg.h> 46 #include <dhd_debug.h> 47 #include <dhd_linux.h> 48 #include <dhd_bus.h> 49 50 #ifdef PCIE_FULL_DONGLE 51 #include <bcmmsgbuf.h> 52 #include <dhd_flowring.h> 53 #endif /* PCIE_FULL_DONGLE */ 54 55 /* 56 * Do not include this header except for the dhd_linux.c dhd_linux_sysfs.c 57 * Local private structure (extension of pub) 58 */ 59 typedef struct dhd_info { 60 #if defined(WL_WIRELESS_EXT) 61 wl_iw_t iw; /* wireless extensions state (must be first) */ 62 #endif /* defined(WL_WIRELESS_EXT) */ 63 dhd_pub_t pub; 64 /* for supporting multiple interfaces. 65 * static_ifs hold the net ifaces without valid FW IF 66 */ 67 dhd_if_t *iflist[DHD_MAX_IFS + DHD_MAX_STATIC_IFS]; 68 69 void *adapter; /* adapter information, interrupt, fw path etc. */ 70 char fw_path[PATH_MAX]; /* path to firmware image */ 71 char nv_path[PATH_MAX]; /* path to nvram vars file */ 72 #ifdef DHD_UCODE_DOWNLOAD 73 char uc_path[PATH_MAX]; /* path to ucode image */ 74 #endif /* DHD_UCODE_DOWNLOAD */ 75 76 /* serialize dhd iovars */ 77 struct mutex dhd_iovar_mutex; 78 79 struct semaphore proto_sem; 80 #ifdef PROP_TXSTATUS 81 spinlock_t wlfc_spinlock; 82 83 #endif /* PROP_TXSTATUS */ 84 wait_queue_head_t ioctl_resp_wait; 85 wait_queue_head_t d3ack_wait; 86 wait_queue_head_t dhd_bus_busy_state_wait; 87 wait_queue_head_t dmaxfer_wait; 88 uint32 default_wd_interval; 89 90 timer_list_compat_t timer; 91 bool wd_timer_valid; 92 #ifdef DHD_PCIE_RUNTIMEPM 93 timer_list_compat_t rpm_timer; 94 bool rpm_timer_valid; 95 tsk_ctl_t thr_rpm_ctl; 96 #endif /* DHD_PCIE_RUNTIMEPM */ 97 struct tasklet_struct tasklet; 98 spinlock_t sdlock; 99 spinlock_t txqlock; 100 spinlock_t dhd_lock; 101 102 struct semaphore sdsem; 103 tsk_ctl_t thr_dpc_ctl; 104 tsk_ctl_t thr_wdt_ctl; 105 106 tsk_ctl_t thr_rxf_ctl; 107 spinlock_t rxf_lock; 108 bool rxthread_enabled; 109 110 /* Wakelocks */ 111 #if defined(CONFIG_PM_WAKELOCKS) || defined(CONFIG_HAS_WAKELOCK) 112 struct wake_lock wl_wifi; /* Wifi wakelock */ 113 struct wake_lock wl_rxwake; /* Wifi rx wakelock */ 114 struct wake_lock wl_ctrlwake; /* Wifi ctrl wakelock */ 115 struct wake_lock wl_wdwake; /* Wifi wd wakelock */ 116 struct wake_lock wl_evtwake; /* Wifi event wakelock */ 117 struct wake_lock wl_pmwake; /* Wifi pm handler wakelock */ 118 struct wake_lock wl_txflwake; /* Wifi tx flow wakelock */ 119 #ifdef BCMPCIE_OOB_HOST_WAKE 120 struct wake_lock wl_intrwake; /* Host wakeup wakelock */ 121 #endif /* BCMPCIE_OOB_HOST_WAKE */ 122 #ifdef DHD_USE_SCAN_WAKELOCK 123 struct wake_lock wl_scanwake; /* Wifi scan wakelock */ 124 #endif /* DHD_USE_SCAN_WAKELOCK */ 125 #endif /* CONFIG_PM_WAKELOCKS || CONFIG_HAS_WAKELOCK */ 126 127 #if defined(OEM_ANDROID) 128 /* net_device interface lock, prevent race conditions among net_dev interface 129 * calls and wifi_on or wifi_off 130 */ 131 struct mutex dhd_net_if_mutex; 132 struct mutex dhd_suspend_mutex; 133 #if defined(PKT_FILTER_SUPPORT) && defined(APF) 134 struct mutex dhd_apf_mutex; 135 #endif /* PKT_FILTER_SUPPORT && APF */ 136 #endif /* OEM_ANDROID */ 137 spinlock_t wakelock_spinlock; 138 spinlock_t wakelock_evt_spinlock; 139 uint32 wakelock_counter; 140 int wakelock_wd_counter; 141 int wakelock_rx_timeout_enable; 142 int wakelock_ctrl_timeout_enable; 143 bool waive_wakelock; 144 uint32 wakelock_before_waive; 145 146 /* Thread to issue ioctl for multicast */ 147 wait_queue_head_t ctrl_wait; 148 atomic_t pend_8021x_cnt; 149 dhd_attach_states_t dhd_state; 150 #ifdef SHOW_LOGTRACE 151 dhd_event_log_t event_data; 152 #endif /* SHOW_LOGTRACE */ 153 154 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) 155 struct early_suspend early_suspend; 156 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */ 157 158 #ifdef ARP_OFFLOAD_SUPPORT 159 u32 pend_ipaddr; 160 #endif /* ARP_OFFLOAD_SUPPORT */ 161 #ifdef DHDTCPACK_SUPPRESS 162 spinlock_t tcpack_lock; 163 #endif /* DHDTCPACK_SUPPRESS */ 164 #ifdef FIX_CPU_MIN_CLOCK 165 bool cpufreq_fix_status; 166 struct mutex cpufreq_fix; 167 struct pm_qos_request dhd_cpu_qos; 168 #ifdef FIX_BUS_MIN_CLOCK 169 struct pm_qos_request dhd_bus_qos; 170 #endif /* FIX_BUS_MIN_CLOCK */ 171 #endif /* FIX_CPU_MIN_CLOCK */ 172 void *dhd_deferred_wq; 173 #ifdef DEBUG_CPU_FREQ 174 struct notifier_block freq_trans; 175 int __percpu *new_freq; 176 #endif // endif 177 unsigned int unit; 178 struct notifier_block pm_notifier; 179 #ifdef DHD_PSTA 180 uint32 psta_mode; /* PSTA or PSR */ 181 #endif /* DHD_PSTA */ 182 #ifdef DHD_WET 183 uint32 wet_mode; 184 #endif /* DHD_WET */ 185 #ifdef DHD_DEBUG 186 dhd_dump_t *dump; 187 struct timer_list join_timer; 188 u32 join_timeout_val; 189 bool join_timer_active; 190 uint scan_time_count; 191 struct timer_list scan_timer; 192 bool scan_timer_active; 193 #endif // endif 194 #if defined(DHD_LB) 195 /* CPU Load Balance dynamic CPU selection */ 196 197 /* Variable that tracks the currect CPUs available for candidacy */ 198 cpumask_var_t cpumask_curr_avail; 199 200 /* Primary and secondary CPU mask */ 201 cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */ 202 cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */ 203 204 struct notifier_block cpu_notifier; 205 206 /* Tasklet to handle Tx Completion packet freeing */ 207 struct tasklet_struct tx_compl_tasklet; 208 atomic_t tx_compl_cpu; 209 210 /* Tasklet to handle RxBuf Post during Rx completion */ 211 struct tasklet_struct rx_compl_tasklet; 212 atomic_t rx_compl_cpu; 213 214 /* Napi struct for handling rx packet sendup. Packets are removed from 215 * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then 216 * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled 217 * to run to rx_napi_cpu. 218 */ 219 struct sk_buff_head rx_pend_queue ____cacheline_aligned; 220 struct sk_buff_head rx_napi_queue ____cacheline_aligned; 221 struct napi_struct rx_napi_struct ____cacheline_aligned; 222 atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */ 223 struct net_device *rx_napi_netdev; /* netdev of primary interface */ 224 225 struct work_struct rx_napi_dispatcher_work; 226 struct work_struct tx_compl_dispatcher_work; 227 struct work_struct tx_dispatcher_work; 228 struct work_struct rx_compl_dispatcher_work; 229 230 /* Number of times DPC Tasklet ran */ 231 uint32 dhd_dpc_cnt; 232 /* Number of times NAPI processing got scheduled */ 233 uint32 napi_sched_cnt; 234 /* Number of times NAPI processing ran on each available core */ 235 uint32 *napi_percpu_run_cnt; 236 /* Number of times RX Completions got scheduled */ 237 uint32 rxc_sched_cnt; 238 /* Number of times RX Completion ran on each available core */ 239 uint32 *rxc_percpu_run_cnt; 240 /* Number of times TX Completions got scheduled */ 241 uint32 txc_sched_cnt; 242 /* Number of times TX Completions ran on each available core */ 243 uint32 *txc_percpu_run_cnt; 244 /* CPU status */ 245 /* Number of times each CPU came online */ 246 uint32 *cpu_online_cnt; 247 /* Number of times each CPU went offline */ 248 uint32 *cpu_offline_cnt; 249 250 /* Number of times TX processing run on each core */ 251 uint32 *txp_percpu_run_cnt; 252 /* Number of times TX start run on each core */ 253 uint32 *tx_start_percpu_run_cnt; 254 255 /* Tx load balancing */ 256 257 /* TODO: Need to see if batch processing is really required in case of TX 258 * processing. In case of RX the Dongle can send a bunch of rx completions, 259 * hence we took a 3 queue approach 260 * enque - adds the skbs to rx_pend_queue 261 * dispatch - uses a lock and adds the list of skbs from pend queue to 262 * napi queue 263 * napi processing - copies the pend_queue into a local queue and works 264 * on it. 265 * But for TX its going to be 1 skb at a time, so we are just thinking 266 * of using only one queue and use the lock supported skb queue functions 267 * to add and process it. If its in-efficient we'll re-visit the queue 268 * design. 269 */ 270 271 /* When the NET_TX tries to send a TX packet put it into tx_pend_queue */ 272 /* struct sk_buff_head tx_pend_queue ____cacheline_aligned; */ 273 /* 274 * From the Tasklet that actually sends out data 275 * copy the list tx_pend_queue into tx_active_queue. There by we need 276 * to spinlock to only perform the copy the rest of the code ie to 277 * construct the tx_pend_queue and the code to process tx_active_queue 278 * can be lockless. The concept is borrowed as is from RX processing 279 */ 280 /* struct sk_buff_head tx_active_queue ____cacheline_aligned; */ 281 282 /* Control TXP in runtime, enable by default */ 283 atomic_t lb_txp_active; 284 285 /* Control RXP in runtime, enable by default */ 286 atomic_t lb_rxp_active; 287 288 /* 289 * When the NET_TX tries to send a TX packet put it into tx_pend_queue 290 * For now, the processing tasklet will also direcly operate on this 291 * queue 292 */ 293 struct sk_buff_head tx_pend_queue ____cacheline_aligned; 294 295 /* Control RXP in runtime, enable by default */ 296 /* cpu on which the DHD Tx is happenning */ 297 atomic_t tx_cpu; 298 299 /* CPU on which the Network stack is calling the DHD's xmit function */ 300 atomic_t net_tx_cpu; 301 302 /* Tasklet context from which the DHD's TX processing happens */ 303 struct tasklet_struct tx_tasklet; 304 305 /* 306 * Consumer Histogram - NAPI RX Packet processing 307 * ----------------------------------------------- 308 * On Each CPU, when the NAPI RX Packet processing call back was invoked 309 * how many packets were processed is captured in this data structure. 310 * Now its difficult to capture the "exact" number of packets processed. 311 * So considering the packet counter to be a 32 bit one, we have a 312 * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets 313 * processed is rounded off to the next power of 2 and put in the 314 * approriate "bin" the value in the bin gets incremented. 315 * For example, assume that in CPU 1 if NAPI Rx runs 3 times 316 * and the packet count processed is as follows (assume the bin counters are 0) 317 * iteration 1 - 10 (the bin counter 2^4 increments to 1) 318 * iteration 2 - 30 (the bin counter 2^5 increments to 1) 319 * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2) 320 */ 321 uint32 *napi_rx_hist[HIST_BIN_SIZE]; 322 uint32 *txc_hist[HIST_BIN_SIZE]; 323 uint32 *rxc_hist[HIST_BIN_SIZE]; 324 #endif /* DHD_LB */ 325 #if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR) 326 struct work_struct axi_error_dispatcher_work; 327 #endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */ 328 #ifdef SHOW_LOGTRACE 329 #ifdef DHD_USE_KTHREAD_FOR_LOGTRACE 330 tsk_ctl_t thr_logtrace_ctl; 331 #else 332 struct delayed_work event_log_dispatcher_work; 333 #endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */ 334 #endif /* SHOW_LOGTRACE */ 335 336 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) 337 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ 338 struct kobject dhd_kobj; 339 struct kobject dhd_conf_file_kobj; 340 struct timer_list timesync_timer; 341 #if defined(BT_OVER_SDIO) 342 char btfw_path[PATH_MAX]; 343 #endif /* defined (BT_OVER_SDIO) */ 344 #ifdef WL_MONITOR 345 struct net_device *monitor_dev; /* monitor pseudo device */ 346 struct sk_buff *monitor_skb; 347 uint monitor_len; 348 uint monitor_type; /* monitor pseudo device */ 349 #endif /* WL_MONITOR */ 350 #if defined(BT_OVER_SDIO) 351 struct mutex bus_user_lock; /* lock for sdio bus apis shared between WLAN & BT */ 352 int bus_user_count; /* User counts of sdio bus shared between WLAN & BT */ 353 #endif /* BT_OVER_SDIO */ 354 #ifdef SHOW_LOGTRACE 355 struct sk_buff_head evt_trace_queue ____cacheline_aligned; 356 #endif // endif 357 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM 358 struct workqueue_struct *tx_wq; 359 struct workqueue_struct *rx_wq; 360 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ 361 #ifdef DHD_DEBUG_UART 362 bool duart_execute; 363 #endif /* DHD_DEBUG_UART */ 364 struct mutex logdump_lock; 365 /* indicates mem_dump was scheduled as work queue or called directly */ 366 bool scheduled_memdump; 367 struct work_struct dhd_hang_process_work; 368 #ifdef DHD_HP2P 369 spinlock_t hp2p_lock; 370 #endif /* DHD_HP2P */ 371 } dhd_info_t; 372 373 extern int dhd_sysfs_init(dhd_info_t *dhd); 374 extern void dhd_sysfs_exit(dhd_info_t *dhd); 375 extern void dhd_dbg_ring_proc_create(dhd_pub_t *dhdp); 376 extern void dhd_dbg_ring_proc_destroy(dhd_pub_t *dhdp); 377 378 int __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf); 379 380 #if defined(DHD_LB) 381 #if defined(DHD_LB_TXP) 382 int dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net, int ifidx, void *skb); 383 void dhd_tx_dispatcher_work(struct work_struct * work); 384 void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp); 385 void dhd_lb_tx_dispatch(dhd_pub_t *dhdp); 386 void dhd_lb_tx_handler(unsigned long data); 387 #endif /* DHD_LB_TXP */ 388 389 #if defined(DHD_LB_RXP) 390 int dhd_napi_poll(struct napi_struct *napi, int budget); 391 void dhd_rx_napi_dispatcher_fn(struct work_struct * work); 392 void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp); 393 void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx); 394 #endif /* DHD_LB_RXP */ 395 396 void dhd_lb_set_default_cpus(dhd_info_t *dhd); 397 void dhd_cpumasks_deinit(dhd_info_t *dhd); 398 int dhd_cpumasks_init(dhd_info_t *dhd); 399 400 void dhd_select_cpu_candidacy(dhd_info_t *dhd); 401 402 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) 403 int dhd_cpu_startup_callback(unsigned int cpu); 404 int dhd_cpu_teardown_callback(unsigned int cpu); 405 #else 406 int dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu); 407 #endif /* LINUX_VERSION_CODE < 4.10.0 */ 408 409 int dhd_register_cpuhp_callback(dhd_info_t *dhd); 410 int dhd_unregister_cpuhp_callback(dhd_info_t *dhd); 411 412 #if defined(DHD_LB_TXC) 413 void dhd_lb_tx_compl_dispatch(dhd_pub_t *dhdp); 414 #endif /* DHD_LB_TXC */ 415 416 #if defined(DHD_LB_RXC) 417 void dhd_lb_rx_compl_dispatch(dhd_pub_t *dhdp); 418 void dhd_rx_compl_dispatcher_fn(struct work_struct * work); 419 #endif /* DHD_LB_RXC */ 420 421 #endif /* DHD_LB */ 422 423 #if defined(DHD_LB_IRQSET) || defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON) 424 void dhd_irq_set_affinity(dhd_pub_t *dhdp, const struct cpumask *cpumask); 425 #endif /* DHD_LB_IRQSET || DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */ 426 427 #endif /* __DHD_LINUX_PRIV_H__ */ 428