1 /* 2 * DHD Linux header file - contains private structure definition of the Linux specific layer 3 * 4 * Copyright (C) 2020, Broadcom. 5 * 6 * Unless you and Broadcom execute a separate written software license 7 * agreement governing use of this software, this software is licensed to you 8 * under the terms of the GNU General Public License version 2 (the "GPL"), 9 * available at http://www.broadcom.com/licenses/GPLv2.php, with the 10 * following added to such license: 11 * 12 * As a special exception, the copyright holders of this software give you 13 * permission to link this software with independent modules, and to copy and 14 * distribute the resulting executable under terms of your choice, provided that 15 * you also meet, for each linked independent module, the terms and conditions of 16 * the license of that module. An independent module is a module which is not 17 * derived from this software. The special exception does not apply to any 18 * modifications of the software. 19 * 20 * 21 * <<Broadcom-WL-IPTag/Open:>> 22 * 23 * $Id$ 24 */ 25 26 #ifndef __DHD_LINUX_PRIV_H__ 27 #define __DHD_LINUX_PRIV_H__ 28 29 #include <osl.h> 30 31 #ifdef SHOW_LOGTRACE 32 #include <linux/syscalls.h> 33 #include <event_log.h> 34 #endif /* SHOW_LOGTRACE */ 35 #include <linux/skbuff.h> 36 #include <linux/spinlock.h> 37 #include <linux/interrupt.h> 38 #ifdef CONFIG_COMPAT 39 #include <linux/compat.h> 40 #endif /* CONFIG COMPAT */ 41 #ifdef CONFIG_HAS_WAKELOCK 42 #include <linux/pm_wakeup.h> 43 #endif /* CONFIG_HAS_WAKELOCK */ 44 #include <dngl_stats.h> 45 #include <dhd.h> 46 #include <dhd_dbg.h> 47 #include <dhd_debug.h> 48 #include <dhd_linux.h> 49 #include <dhd_bus.h> 50 51 #ifdef PCIE_FULL_DONGLE 52 #include <bcmmsgbuf.h> 53 #include <dhd_flowring.h> 54 #endif /* PCIE_FULL_DONGLE */ 55 56 #ifdef DHD_QOS_ON_SOCK_FLOW 57 struct dhd_sock_qos_info; 58 #endif /* DHD_QOS_ON_SOCK_FLOW */ 59 60 /* 61 * Do not include this header except for the dhd_linux.c dhd_linux_sysfs.c 62 * Local private structure (extension of pub) 63 */ 64 typedef struct dhd_info { 65 #if defined(WL_WIRELESS_EXT) 66 wl_iw_t iw; /* wireless extensions state (must be first) */ 67 #endif /* defined(WL_WIRELESS_EXT) */ 68 dhd_pub_t pub; 69 /* for supporting multiple interfaces. 70 * static_ifs hold the net ifaces without valid FW IF 71 */ 72 dhd_if_t *iflist[DHD_MAX_IFS + DHD_MAX_STATIC_IFS]; 73 wifi_adapter_info_t *adapter; /* adapter information, interrupt, fw path etc. */ 74 char fw_path[PATH_MAX]; /* path to firmware image */ 75 char nv_path[PATH_MAX]; /* path to nvram vars file */ 76 char clm_path[PATH_MAX]; /* path to clm vars file */ 77 char conf_path[PATH_MAX]; /* path to config vars file */ 78 #ifdef DHD_UCODE_DOWNLOAD 79 char uc_path[PATH_MAX]; /* path to ucode image */ 80 #endif /* DHD_UCODE_DOWNLOAD */ 81 82 /* serialize dhd iovars */ 83 struct mutex dhd_iovar_mutex; 84 85 struct semaphore proto_sem; 86 #ifdef PROP_TXSTATUS 87 spinlock_t wlfc_spinlock; 88 89 #ifdef BCMDBUS 90 ulong wlfc_lock_flags; 91 ulong wlfc_pub_lock_flags; 92 #endif /* BCMDBUS */ 93 #endif /* PROP_TXSTATUS */ 94 wait_queue_head_t ioctl_resp_wait; 95 wait_queue_head_t d3ack_wait; 96 wait_queue_head_t dhd_bus_busy_state_wait; 97 wait_queue_head_t dmaxfer_wait; 98 #ifdef BT_OVER_PCIE 99 wait_queue_head_t quiesce_wait; 100 #endif /* BT_OVER_PCIE */ 101 uint32 default_wd_interval; 102 103 timer_list_compat_t timer; 104 bool wd_timer_valid; 105 #ifdef DHD_PCIE_RUNTIMEPM 106 timer_list_compat_t rpm_timer; 107 bool rpm_timer_valid; 108 tsk_ctl_t thr_rpm_ctl; 109 #endif /* DHD_PCIE_RUNTIMEPM */ 110 struct tasklet_struct tasklet; 111 spinlock_t sdlock; 112 spinlock_t txqlock; 113 spinlock_t dhd_lock; 114 spinlock_t txoff_lock; 115 #ifdef BCMDBUS 116 ulong txqlock_flags; 117 #endif /* BCMDBUS */ 118 119 #ifndef BCMDBUS 120 struct semaphore sdsem; 121 tsk_ctl_t thr_dpc_ctl; 122 tsk_ctl_t thr_wdt_ctl; 123 #endif /* BCMDBUS */ 124 125 tsk_ctl_t thr_rxf_ctl; 126 spinlock_t rxf_lock; 127 bool rxthread_enabled; 128 129 /* Wakelocks */ 130 #if defined(CONFIG_HAS_WAKELOCK) 131 struct wakeup_source *wl_wifi; /* Wifi wakelock */ 132 struct wakeup_source *wl_rxwake; /* Wifi rx wakelock */ 133 struct wakeup_source *wl_ctrlwake; /* Wifi ctrl wakelock */ 134 struct wakeup_source *wl_wdwake; /* Wifi wd wakelock */ 135 struct wakeup_source *wl_evtwake; /* Wifi event wakelock */ 136 struct wakeup_source *wl_pmwake; /* Wifi pm handler wakelock */ 137 struct wakeup_source *wl_txflwake; /* Wifi tx flow wakelock */ 138 #ifdef BCMPCIE_OOB_HOST_WAKE 139 struct wakeup_source *wl_intrwake; /* Host wakeup wakelock */ 140 #endif /* BCMPCIE_OOB_HOST_WAKE */ 141 #ifdef DHD_USE_SCAN_WAKELOCK 142 struct wakeup_source *wl_scanwake; /* Wifi scan wakelock */ 143 #endif /* DHD_USE_SCAN_WAKELOCK */ 144 struct wakeup_source *wl_nanwake; /* NAN wakelock */ 145 #endif /* CONFIG_HAS_WAKELOCK */ 146 147 #if defined(OEM_ANDROID) 148 /* net_device interface lock, prevent race conditions among net_dev interface 149 * calls and wifi_on or wifi_off 150 */ 151 struct mutex dhd_net_if_mutex; 152 struct mutex dhd_suspend_mutex; 153 #if defined(PKT_FILTER_SUPPORT) && defined(APF) 154 struct mutex dhd_apf_mutex; 155 #endif /* PKT_FILTER_SUPPORT && APF */ 156 #endif /* OEM_ANDROID */ 157 spinlock_t wakelock_spinlock; 158 spinlock_t wakelock_evt_spinlock; 159 uint32 wakelock_counter; 160 int wakelock_wd_counter; 161 int wakelock_rx_timeout_enable; 162 int wakelock_ctrl_timeout_enable; 163 bool waive_wakelock; 164 uint32 wakelock_before_waive; 165 166 /* Thread to issue ioctl for multicast */ 167 wait_queue_head_t ctrl_wait; 168 atomic_t pend_8021x_cnt; 169 dhd_attach_states_t dhd_state; 170 #ifdef SHOW_LOGTRACE 171 dhd_event_log_t event_data; 172 #endif /* SHOW_LOGTRACE */ 173 174 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND) 175 struct early_suspend early_suspend; 176 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */ 177 178 #ifdef ARP_OFFLOAD_SUPPORT 179 u32 pend_ipaddr; 180 #endif /* ARP_OFFLOAD_SUPPORT */ 181 #ifdef DHDTCPACK_SUPPRESS 182 spinlock_t tcpack_lock; 183 #endif /* DHDTCPACK_SUPPRESS */ 184 #ifdef FIX_CPU_MIN_CLOCK 185 bool cpufreq_fix_status; 186 struct mutex cpufreq_fix; 187 struct pm_qos_request dhd_cpu_qos; 188 #ifdef FIX_BUS_MIN_CLOCK 189 struct pm_qos_request dhd_bus_qos; 190 #endif /* FIX_BUS_MIN_CLOCK */ 191 #endif /* FIX_CPU_MIN_CLOCK */ 192 void *dhd_deferred_wq; 193 #if (defined(BCM_ROUTER_DHD) && defined(HNDCTF)) 194 ctf_t *cih; /* ctf instance handle */ 195 ctf_brc_hot_t *brc_hot; /* hot ctf bridge cache entry */ 196 #endif /* BCM_ROUTER_DHD && HNDCTF */ 197 #ifdef DEBUG_CPU_FREQ 198 struct notifier_block freq_trans; 199 int __percpu *new_freq; 200 #endif 201 unsigned int unit; 202 struct notifier_block pm_notifier; 203 #ifdef DHD_PSTA 204 uint32 psta_mode; /* PSTA or PSR */ 205 #endif /* DHD_PSTA */ 206 #ifdef DHD_WET 207 uint32 wet_mode; 208 #endif /* DHD_WET */ 209 #ifdef DHD_DEBUG 210 dhd_dump_t *dump; 211 timer_list_compat_t join_timer; 212 u32 join_timeout_val; 213 bool join_timer_active; 214 uint scan_time_count; 215 timer_list_compat_t scan_timer; 216 bool scan_timer_active; 217 #endif 218 struct delayed_work dhd_dpc_dispatcher_work; 219 220 /* CPU on which the DHD DPC is running */ 221 atomic_t dpc_cpu; 222 atomic_t prev_dpc_cpu; 223 #if defined(DHD_LB) 224 #if defined(DHD_LB_HOST_CTRL) 225 bool permitted_primary_cpu; 226 #endif /* DHD_LB_HOST_CTRL */ 227 /* CPU Load Balance dynamic CPU selection */ 228 229 /* Variable that tracks the currect CPUs available for candidacy */ 230 cpumask_var_t cpumask_curr_avail; 231 232 /* Primary and secondary CPU mask */ 233 cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */ 234 cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */ 235 236 struct notifier_block cpu_notifier; 237 238 /* Napi struct for handling rx packet sendup. Packets are removed from 239 * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then 240 * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled 241 * to run to rx_napi_cpu. 242 */ 243 struct sk_buff_head rx_pend_queue ____cacheline_aligned; 244 struct sk_buff_head rx_napi_queue ____cacheline_aligned; 245 struct sk_buff_head rx_process_queue ____cacheline_aligned; 246 struct napi_struct rx_napi_struct ____cacheline_aligned; 247 atomic_t rx_napi_cpu; /* cpu on which the napi is dispatched */ 248 struct net_device *rx_napi_netdev; /* netdev of primary interface */ 249 250 struct work_struct rx_napi_dispatcher_work; 251 struct work_struct tx_compl_dispatcher_work; 252 struct work_struct tx_dispatcher_work; 253 struct work_struct rx_compl_dispatcher_work; 254 255 /* Number of times DPC Tasklet ran */ 256 uint32 dhd_dpc_cnt; 257 /* Number of times NAPI processing got scheduled */ 258 uint32 napi_sched_cnt; 259 /* NAPI latency stats */ 260 uint64 *napi_latency; 261 uint64 napi_schedule_time; 262 /* Number of times NAPI processing ran on each available core */ 263 uint32 *napi_percpu_run_cnt; 264 /* Number of times RX Completions got scheduled */ 265 uint32 rxc_sched_cnt; 266 /* Number of times RX Completion ran on each available core */ 267 uint32 *rxc_percpu_run_cnt; 268 /* Number of times TX Completions got scheduled */ 269 uint32 txc_sched_cnt; 270 /* Number of times TX Completions ran on each available core */ 271 uint32 *txc_percpu_run_cnt; 272 /* CPU status */ 273 /* Number of times each CPU came online */ 274 uint32 *cpu_online_cnt; 275 /* Number of times each CPU went offline */ 276 uint32 *cpu_offline_cnt; 277 278 /* Number of times TX processing run on each core */ 279 uint32 *txp_percpu_run_cnt; 280 /* Number of times TX start run on each core */ 281 uint32 *tx_start_percpu_run_cnt; 282 283 /* Tx load balancing */ 284 285 /* TODO: Need to see if batch processing is really required in case of TX 286 * processing. In case of RX the Dongle can send a bunch of rx completions, 287 * hence we took a 3 queue approach 288 * enque - adds the skbs to rx_pend_queue 289 * dispatch - uses a lock and adds the list of skbs from pend queue to 290 * napi queue 291 * napi processing - copies the pend_queue into a local queue and works 292 * on it. 293 * But for TX its going to be 1 skb at a time, so we are just thinking 294 * of using only one queue and use the lock supported skb queue functions 295 * to add and process it. If its in-efficient we'll re-visit the queue 296 * design. 297 */ 298 299 /* When the NET_TX tries to send a TX packet put it into tx_pend_queue */ 300 /* struct sk_buff_head tx_pend_queue ____cacheline_aligned; */ 301 /* 302 * From the Tasklet that actually sends out data 303 * copy the list tx_pend_queue into tx_active_queue. There by we need 304 * to spinlock to only perform the copy the rest of the code ie to 305 * construct the tx_pend_queue and the code to process tx_active_queue 306 * can be lockless. The concept is borrowed as is from RX processing 307 */ 308 /* struct sk_buff_head tx_active_queue ____cacheline_aligned; */ 309 310 /* Control TXP in runtime, enable by default */ 311 atomic_t lb_txp_active; 312 313 /* Control RXP in runtime, enable by default */ 314 atomic_t lb_rxp_active; 315 316 /* 317 * When the NET_TX tries to send a TX packet put it into tx_pend_queue 318 * For now, the processing tasklet will also direcly operate on this 319 * queue 320 */ 321 struct sk_buff_head tx_pend_queue ____cacheline_aligned; 322 323 /* cpu on which the DHD Tx is happenning */ 324 atomic_t tx_cpu; 325 326 /* CPU on which the Network stack is calling the DHD's xmit function */ 327 atomic_t net_tx_cpu; 328 329 /* Tasklet context from which the DHD's TX processing happens */ 330 struct tasklet_struct tx_tasklet; 331 332 /* 333 * Consumer Histogram - NAPI RX Packet processing 334 * ----------------------------------------------- 335 * On Each CPU, when the NAPI RX Packet processing call back was invoked 336 * how many packets were processed is captured in this data structure. 337 * Now its difficult to capture the "exact" number of packets processed. 338 * So considering the packet counter to be a 32 bit one, we have a 339 * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets 340 * processed is rounded off to the next power of 2 and put in the 341 * approriate "bin" the value in the bin gets incremented. 342 * For example, assume that in CPU 1 if NAPI Rx runs 3 times 343 * and the packet count processed is as follows (assume the bin counters are 0) 344 * iteration 1 - 10 (the bin counter 2^4 increments to 1) 345 * iteration 2 - 30 (the bin counter 2^5 increments to 1) 346 * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2) 347 */ 348 uint32 *napi_rx_hist[HIST_BIN_SIZE]; 349 uint32 *txc_hist[HIST_BIN_SIZE]; 350 uint32 *rxc_hist[HIST_BIN_SIZE]; 351 struct kobject dhd_lb_kobj; 352 bool dhd_lb_candidacy_override; 353 #endif /* DHD_LB */ 354 #if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR) 355 struct work_struct axi_error_dispatcher_work; 356 #endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */ 357 #ifdef SHOW_LOGTRACE 358 #ifdef DHD_USE_KTHREAD_FOR_LOGTRACE 359 tsk_ctl_t thr_logtrace_ctl; 360 #else 361 struct delayed_work event_log_dispatcher_work; 362 #endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */ 363 #endif /* SHOW_LOGTRACE */ 364 365 #ifdef BTLOG 366 struct work_struct bt_log_dispatcher_work; 367 #endif /* SHOW_LOGTRACE */ 368 #ifdef EWP_EDL 369 struct delayed_work edl_dispatcher_work; 370 #endif 371 #if defined(WLAN_ACCEL_BOOT) 372 int fs_check_retry; 373 struct delayed_work wl_accel_work; 374 bool wl_accel_force_reg_on; 375 bool wl_accel_boot_on_done; 376 #endif 377 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) 378 #if defined(BCMDBUS) 379 struct task_struct *fw_download_task; 380 struct semaphore fw_download_lock; 381 #endif /* BCMDBUS */ 382 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */ 383 struct kobject dhd_kobj; 384 timer_list_compat_t timesync_timer; 385 #if defined(BT_OVER_SDIO) 386 char btfw_path[PATH_MAX]; 387 #endif /* defined (BT_OVER_SDIO) */ 388 #ifdef WL_MONITOR 389 struct net_device *monitor_dev; /* monitor pseudo device */ 390 struct sk_buff *monitor_skb; 391 uint monitor_len; 392 uint monitor_type; /* monitor pseudo device */ 393 #ifdef HOST_RADIOTAP_CONV 394 monitor_info_t *monitor_info; 395 uint host_radiotap_conv; 396 #endif /* HOST_RADIOTAP_CONV */ 397 #endif /* WL_MONITOR */ 398 #if defined (BT_OVER_SDIO) 399 struct mutex bus_user_lock; /* lock for sdio bus apis shared between WLAN & BT */ 400 int bus_user_count; /* User counts of sdio bus shared between WLAN & BT */ 401 #endif /* BT_OVER_SDIO */ 402 #ifdef SHOW_LOGTRACE 403 struct sk_buff_head evt_trace_queue ____cacheline_aligned; 404 #endif 405 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM 406 struct workqueue_struct *tx_wq; 407 struct workqueue_struct *rx_wq; 408 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */ 409 #ifdef BTLOG 410 struct sk_buff_head bt_log_queue ____cacheline_aligned; 411 #endif /* BTLOG */ 412 #ifdef PCIE_INB_DW 413 wait_queue_head_t ds_exit_wait; 414 #endif /* PCIE_INB_DW */ 415 #ifdef DHD_DEBUG_UART 416 bool duart_execute; 417 #endif /* DHD_DEBUG_UART */ 418 #ifdef BT_OVER_PCIE 419 struct mutex quiesce_flr_lock; 420 struct mutex quiesce_lock; 421 enum dhd_bus_quiesce_state dhd_quiesce_state; 422 #endif /* BT_OVER_PCIE */ 423 struct mutex logdump_lock; 424 #if defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL) 425 /* Root directory for GDB Proxy's (proc)fs files, used by first (default) interface */ 426 struct proc_dir_entry *gdb_proxy_fs_root; 427 /* Name of procfs root directory */ 428 char gdb_proxy_fs_root_name[100]; 429 #endif /* defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL) */ 430 #if defined(DHD_MQ) && defined(DHD_MQ_STATS) 431 uint64 pktcnt_qac_histo[MQ_MAX_QUEUES][AC_COUNT]; 432 uint64 pktcnt_per_ac[AC_COUNT]; 433 uint64 cpu_qstats[MQ_MAX_QUEUES][MQ_MAX_CPUS]; 434 #endif /* DHD_MQ && DHD_MQ_STATS */ 435 /* indicates mem_dump was scheduled as work queue or called directly */ 436 bool scheduled_memdump; 437 #ifdef DHD_PKTTS 438 bool latency; /* pktts enab flag */ 439 pktts_flow_t config[PKTTS_CONFIG_MAX]; /* pktts user config */ 440 #endif /* DHD_PKTTS */ 441 struct work_struct dhd_hang_process_work; 442 #ifdef DHD_HP2P 443 spinlock_t hp2p_lock; 444 #endif /* DHD_HP2P */ 445 #ifdef DHD_QOS_ON_SOCK_FLOW 446 struct dhd_sock_qos_info *psk_qos; 447 #endif 448 } dhd_info_t; 449 450 #ifdef WL_MONITOR 451 #define MONPKT_EXTRA_LEN 48u 452 #endif /* WL_MONITOR */ 453 454 extern int dhd_sysfs_init(dhd_info_t *dhd); 455 extern void dhd_sysfs_exit(dhd_info_t *dhd); 456 extern void dhd_dbg_ring_proc_create(dhd_pub_t *dhdp); 457 extern void dhd_dbg_ring_proc_destroy(dhd_pub_t *dhdp); 458 459 int __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf); 460 461 void dhd_dpc_tasklet_dispatcher_work(struct work_struct * work); 462 #if defined(DHD_LB) 463 #if defined(DHD_LB_TXP) 464 int dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net, int ifidx, void *skb); 465 void dhd_tx_dispatcher_work(struct work_struct * work); 466 void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp); 467 void dhd_lb_tx_dispatch(dhd_pub_t *dhdp); 468 void dhd_lb_tx_handler(unsigned long data); 469 #endif /* DHD_LB_TXP */ 470 471 #if defined(DHD_LB_RXP) 472 int dhd_napi_poll(struct napi_struct *napi, int budget); 473 void dhd_rx_napi_dispatcher_work(struct work_struct * work); 474 void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp); 475 void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx); 476 unsigned long dhd_read_lb_rxp(dhd_pub_t *dhdp); 477 #endif /* DHD_LB_RXP */ 478 479 void dhd_lb_set_default_cpus(dhd_info_t *dhd); 480 void dhd_cpumasks_deinit(dhd_info_t *dhd); 481 int dhd_cpumasks_init(dhd_info_t *dhd); 482 483 void dhd_select_cpu_candidacy(dhd_info_t *dhd); 484 485 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0)) 486 int dhd_cpu_startup_callback(unsigned int cpu); 487 int dhd_cpu_teardown_callback(unsigned int cpu); 488 #else 489 int dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu); 490 #endif /* LINUX_VERSION_CODE < 4.10.0 */ 491 492 int dhd_register_cpuhp_callback(dhd_info_t *dhd); 493 int dhd_unregister_cpuhp_callback(dhd_info_t *dhd); 494 #endif /* DHD_LB */ 495 496 #if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON) 497 void dhd_irq_set_affinity(dhd_pub_t *dhdp, const struct cpumask *cpumask); 498 #endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */ 499 #ifdef DHD_SSSR_DUMP 500 extern uint sssr_enab; 501 extern uint fis_enab; 502 #endif /* DHD_SSSR_DUMP */ 503 504 #if defined(ANDROID_VERSION) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0)) 505 #define WAKELOCK_BACKPORT 506 #endif 507 508 #ifdef CONFIG_HAS_WAKELOCK 509 #if ((LINUX_VERSION_CODE >= KERNEL_VERSION(5, 4, 0)) || defined(WAKELOCK_BACKPORT)) 510 #define dhd_wake_lock_init(wakeup_source, dev, name) \ 511 do { \ 512 wakeup_source = wakeup_source_register(dev, name); \ 513 } while (0); 514 #else 515 #define dhd_wake_lock_init(wakeup_source, dev, name) \ 516 do { \ 517 wakeup_source = wakeup_source_register(name); \ 518 } while (0); 519 #endif /* LINUX_VERSION >= 5.4.0 */ 520 #define dhd_wake_lock_destroy(wakeup_source) \ 521 do { \ 522 wakeup_source_unregister(wakeup_source); \ 523 } while (0); 524 #define dhd_wake_lock(wakeup_source) __pm_stay_awake(wakeup_source) 525 #define dhd_wake_unlock(wakeup_source) __pm_relax(wakeup_source) 526 #define dhd_wake_lock_active(wakeup_source) ((wakeup_source)?((wakeup_source)->active):0) 527 #define dhd_wake_lock_timeout(wakeup_source, timeout) \ 528 __pm_wakeup_event(wakeup_source, jiffies_to_msecs(timeout)) 529 #endif /* CONFIG_HAS_WAKELOCK */ 530 531 #endif /* __DHD_LINUX_PRIV_H__ */ 532