xref: /OK3568_Linux_fs/kernel/drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_linux_priv.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 /*
2  * DHD Linux header file - contains private structure definition of the Linux specific layer
3  *
4  * Copyright (C) 2020, Broadcom.
5  *
6  *      Unless you and Broadcom execute a separate written software license
7  * agreement governing use of this software, this software is licensed to you
8  * under the terms of the GNU General Public License version 2 (the "GPL"),
9  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10  * following added to such license:
11  *
12  *      As a special exception, the copyright holders of this software give you
13  * permission to link this software with independent modules, and to copy and
14  * distribute the resulting executable under terms of your choice, provided that
15  * you also meet, for each linked independent module, the terms and conditions of
16  * the license of that module.  An independent module is a module which is not
17  * derived from this software.  The special exception does not apply to any
18  * modifications of the software.
19  *
20  *
21  * <<Broadcom-WL-IPTag/Open:>>
22  *
23  * $Id$
24  */
25 
26 #ifndef __DHD_LINUX_PRIV_H__
27 #define __DHD_LINUX_PRIV_H__
28 
29 #include <osl.h>
30 
31 #ifdef SHOW_LOGTRACE
32 #include <linux/syscalls.h>
33 #include <event_log.h>
34 #endif /* SHOW_LOGTRACE */
35 #include <linux/skbuff.h>
36 #include <linux/spinlock.h>
37 #include <linux/interrupt.h>
38 #ifdef CONFIG_COMPAT
39 #include <linux/compat.h>
40 #endif /* CONFIG COMPAT */
41 #ifdef CONFIG_HAS_WAKELOCK
42 #include <linux/pm_wakeup.h>
43 #endif /* CONFIG_HAS_WAKELOCK */
44 #include <linux/wakelock.h>
45 #include <dngl_stats.h>
46 #include <dhd.h>
47 #include <dhd_dbg.h>
48 #include <dhd_debug.h>
49 #include <dhd_linux.h>
50 #include <dhd_bus.h>
51 
52 #ifdef PCIE_FULL_DONGLE
53 #include <bcmmsgbuf.h>
54 #include <dhd_flowring.h>
55 #endif /* PCIE_FULL_DONGLE */
56 
57 #ifdef DHD_QOS_ON_SOCK_FLOW
58 struct dhd_sock_qos_info;
59 #endif /* DHD_QOS_ON_SOCK_FLOW */
60 
61 /*
62  * Do not include this header except for the dhd_linux.c dhd_linux_sysfs.c
63  * Local private structure (extension of pub)
64  */
65 typedef struct dhd_info {
66 #if defined(WL_WIRELESS_EXT)
67 	wl_iw_t		iw;		/* wireless extensions state (must be first) */
68 #endif /* defined(WL_WIRELESS_EXT) */
69 	dhd_pub_t pub;
70 	/* for supporting multiple interfaces.
71 	* static_ifs hold the net ifaces without valid FW IF
72 	*/
73 	dhd_if_t *iflist[DHD_MAX_IFS + DHD_MAX_STATIC_IFS];
74 	wifi_adapter_info_t *adapter;			/* adapter information, interrupt, fw path etc. */
75 	char fw_path[PATH_MAX];		/* path to firmware image */
76 	char nv_path[PATH_MAX];		/* path to nvram vars file */
77 	char clm_path[PATH_MAX];		/* path to clm vars file */
78 	char conf_path[PATH_MAX];	/* path to config vars file */
79 #ifdef DHD_UCODE_DOWNLOAD
80 	char uc_path[PATH_MAX];	/* path to ucode image */
81 #endif /* DHD_UCODE_DOWNLOAD */
82 
83 	/* serialize dhd iovars */
84 	struct mutex dhd_iovar_mutex;
85 
86 	struct semaphore proto_sem;
87 #ifdef PROP_TXSTATUS
88 	spinlock_t	wlfc_spinlock;
89 
90 #ifdef BCMDBUS
91 	ulong		wlfc_lock_flags;
92 	ulong		wlfc_pub_lock_flags;
93 #endif /* BCMDBUS */
94 #endif /* PROP_TXSTATUS */
95 	wait_queue_head_t ioctl_resp_wait;
96 	wait_queue_head_t d3ack_wait;
97 	wait_queue_head_t dhd_bus_busy_state_wait;
98 	wait_queue_head_t dmaxfer_wait;
99 #ifdef BT_OVER_PCIE
100 	wait_queue_head_t quiesce_wait;
101 #endif /* BT_OVER_PCIE */
102 	uint32	default_wd_interval;
103 
104 	timer_list_compat_t timer;
105 	bool wd_timer_valid;
106 #ifdef DHD_PCIE_RUNTIMEPM
107 	timer_list_compat_t rpm_timer;
108 	bool rpm_timer_valid;
109 	tsk_ctl_t	  thr_rpm_ctl;
110 #endif /* DHD_PCIE_RUNTIMEPM */
111 	struct tasklet_struct tasklet;
112 	spinlock_t	sdlock;
113 	spinlock_t	txqlock;
114 	spinlock_t	dhd_lock;
115 	spinlock_t	txoff_lock;
116 #ifdef BCMDBUS
117 	ulong		txqlock_flags;
118 #endif /* BCMDBUS */
119 
120 #ifndef BCMDBUS
121 	struct semaphore sdsem;
122 	tsk_ctl_t	thr_dpc_ctl;
123 	tsk_ctl_t	thr_wdt_ctl;
124 #endif /* BCMDBUS */
125 
126 	tsk_ctl_t	thr_rxf_ctl;
127 	spinlock_t	rxf_lock;
128 	bool		rxthread_enabled;
129 
130 	/* Wakelocks */
131 #if defined(CONFIG_HAS_WAKELOCK)
132 	struct wakeup_source wl_wifi;   /* Wifi wakelock */
133 	struct wakeup_source wl_rxwake; /* Wifi rx wakelock */
134 	struct wakeup_source wl_ctrlwake; /* Wifi ctrl wakelock */
135 	struct wakeup_source wl_wdwake; /* Wifi wd wakelock */
136 	struct wakeup_source wl_evtwake; /* Wifi event wakelock */
137 	struct wakeup_source wl_pmwake;   /* Wifi pm handler wakelock */
138 	struct wakeup_source wl_txflwake; /* Wifi tx flow wakelock */
139 #ifdef BCMPCIE_OOB_HOST_WAKE
140 	struct wakeup_source wl_intrwake; /* Host wakeup wakelock */
141 #endif /* BCMPCIE_OOB_HOST_WAKE */
142 #ifdef DHD_USE_SCAN_WAKELOCK
143 	struct wakeup_source wl_scanwake;  /* Wifi scan wakelock */
144 #endif /* DHD_USE_SCAN_WAKELOCK */
145 	struct wakeup_source wl_nanwake; /* NAN wakelock */
146 #endif /* CONFIG_HAS_WAKELOCK */
147 
148 	struct wake_lock rx_wakelock;
149 #if defined(OEM_ANDROID)
150 	/* net_device interface lock, prevent race conditions among net_dev interface
151 	 * calls and wifi_on or wifi_off
152 	 */
153 	struct mutex dhd_net_if_mutex;
154 	struct mutex dhd_suspend_mutex;
155 #if defined(PKT_FILTER_SUPPORT) && defined(APF)
156 	struct mutex dhd_apf_mutex;
157 #endif /* PKT_FILTER_SUPPORT && APF */
158 #endif /* OEM_ANDROID */
159 	spinlock_t wakelock_spinlock;
160 	spinlock_t wakelock_evt_spinlock;
161 	uint32 wakelock_counter;
162 	int wakelock_wd_counter;
163 	int wakelock_rx_timeout_enable;
164 	int wakelock_ctrl_timeout_enable;
165 	bool waive_wakelock;
166 	uint32 wakelock_before_waive;
167 
168 	/* Thread to issue ioctl for multicast */
169 	wait_queue_head_t ctrl_wait;
170 	atomic_t pend_8021x_cnt;
171 	dhd_attach_states_t dhd_state;
172 #ifdef SHOW_LOGTRACE
173 	dhd_event_log_t event_data;
174 #endif /* SHOW_LOGTRACE */
175 
176 #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
177 	struct early_suspend early_suspend;
178 #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
179 
180 #ifdef ARP_OFFLOAD_SUPPORT
181 	u32 pend_ipaddr;
182 #endif /* ARP_OFFLOAD_SUPPORT */
183 #ifdef DHDTCPACK_SUPPRESS
184 	spinlock_t	tcpack_lock;
185 #endif /* DHDTCPACK_SUPPRESS */
186 #ifdef FIX_CPU_MIN_CLOCK
187 	bool cpufreq_fix_status;
188 	struct mutex cpufreq_fix;
189 	struct pm_qos_request dhd_cpu_qos;
190 #ifdef FIX_BUS_MIN_CLOCK
191 	struct pm_qos_request dhd_bus_qos;
192 #endif /* FIX_BUS_MIN_CLOCK */
193 #endif /* FIX_CPU_MIN_CLOCK */
194 	void			*dhd_deferred_wq;
195 #if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
196 	ctf_t		*cih;		/* ctf instance handle */
197 	ctf_brc_hot_t *brc_hot;			/* hot ctf bridge cache entry */
198 #endif /* BCM_ROUTER_DHD && HNDCTF */
199 #ifdef DEBUG_CPU_FREQ
200 	struct notifier_block freq_trans;
201 	int __percpu *new_freq;
202 #endif
203 	unsigned int unit;
204 	struct notifier_block pm_notifier;
205 #ifdef DHD_PSTA
206 	uint32	psta_mode;	/* PSTA or PSR */
207 #endif /* DHD_PSTA */
208 #ifdef DHD_WET
209 	        uint32  wet_mode;
210 #endif /* DHD_WET */
211 #ifdef DHD_DEBUG
212 	dhd_dump_t *dump;
213 	timer_list_compat_t join_timer;
214 	u32 join_timeout_val;
215 	bool join_timer_active;
216 	uint scan_time_count;
217 	timer_list_compat_t scan_timer;
218 	bool scan_timer_active;
219 #endif
220 	struct delayed_work	dhd_dpc_dispatcher_work;
221 
222 	/* CPU on which the DHD DPC is running */
223 	atomic_t	dpc_cpu;
224 	atomic_t	prev_dpc_cpu;
225 #if defined(DHD_LB)
226 #if defined(DHD_LB_HOST_CTRL)
227 	bool permitted_primary_cpu;
228 #endif /* DHD_LB_HOST_CTRL */
229 	/* CPU Load Balance dynamic CPU selection */
230 
231 	/* Variable that tracks the currect CPUs available for candidacy */
232 	cpumask_var_t cpumask_curr_avail;
233 
234 	/* Primary and secondary CPU mask */
235 	cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */
236 	cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */
237 
238 	struct notifier_block cpu_notifier;
239 
240 	/* Napi struct for handling rx packet sendup. Packets are removed from
241 	 * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then
242 	 * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled
243 	 * to run to rx_napi_cpu.
244 	 */
245 	struct sk_buff_head   rx_pend_queue  ____cacheline_aligned;
246 	struct sk_buff_head   rx_napi_queue  ____cacheline_aligned;
247 	struct sk_buff_head   rx_process_queue  ____cacheline_aligned;
248 	struct napi_struct    rx_napi_struct ____cacheline_aligned;
249 	atomic_t                   rx_napi_cpu; /* cpu on which the napi is dispatched */
250 	struct net_device    *rx_napi_netdev; /* netdev of primary interface */
251 
252 	struct work_struct    rx_napi_dispatcher_work;
253 	struct work_struct    tx_compl_dispatcher_work;
254 	struct work_struct    tx_dispatcher_work;
255 	struct work_struct    rx_compl_dispatcher_work;
256 
257 	/* Number of times DPC Tasklet ran */
258 	uint32	dhd_dpc_cnt;
259 	/* Number of times NAPI processing got scheduled */
260 	uint32	napi_sched_cnt;
261 	/* NAPI latency stats */
262 	uint64  *napi_latency;
263 	uint64 napi_schedule_time;
264 	/* Number of times NAPI processing ran on each available core */
265 	uint32	*napi_percpu_run_cnt;
266 	/* Number of times RX Completions got scheduled */
267 	uint32	rxc_sched_cnt;
268 	/* Number of times RX Completion ran on each available core */
269 	uint32	*rxc_percpu_run_cnt;
270 	/* Number of times TX Completions got scheduled */
271 	uint32	txc_sched_cnt;
272 	/* Number of times TX Completions ran on each available core */
273 	uint32	*txc_percpu_run_cnt;
274 	/* CPU status */
275 	/* Number of times each CPU came online */
276 	uint32	*cpu_online_cnt;
277 	/* Number of times each CPU went offline */
278 	uint32	*cpu_offline_cnt;
279 
280 	/* Number of times TX processing run on each core */
281 	uint32	*txp_percpu_run_cnt;
282 	/* Number of times TX start run on each core */
283 	uint32	*tx_start_percpu_run_cnt;
284 
285 	/* Tx load balancing */
286 
287 	/* TODO: Need to see if batch processing is really required in case of TX
288 	 * processing. In case of RX the Dongle can send a bunch of rx completions,
289 	 * hence we took a 3 queue approach
290 	 * enque - adds the skbs to rx_pend_queue
291 	 * dispatch - uses a lock and adds the list of skbs from pend queue to
292 	 *            napi queue
293 	 * napi processing - copies the pend_queue into a local queue and works
294 	 * on it.
295 	 * But for TX its going to be 1 skb at a time, so we are just thinking
296 	 * of using only one queue and use the lock supported skb queue functions
297 	 * to add and process it. If its in-efficient we'll re-visit the queue
298 	 * design.
299 	 */
300 
301 	/* When the NET_TX tries to send a TX packet put it into tx_pend_queue */
302 	/* struct sk_buff_head		tx_pend_queue  ____cacheline_aligned;  */
303 	/*
304 	 * From the Tasklet that actually sends out data
305 	 * copy the list tx_pend_queue into tx_active_queue. There by we need
306 	 * to spinlock to only perform the copy the rest of the code ie to
307 	 * construct the tx_pend_queue and the code to process tx_active_queue
308 	 * can be lockless. The concept is borrowed as is from RX processing
309 	 */
310 	/* struct sk_buff_head		tx_active_queue  ____cacheline_aligned; */
311 
312 	/* Control TXP in runtime, enable by default */
313 	atomic_t                lb_txp_active;
314 
315 	/* Control RXP in runtime, enable by default */
316 	atomic_t                lb_rxp_active;
317 
318 	/*
319 	 * When the NET_TX tries to send a TX packet put it into tx_pend_queue
320 	 * For now, the processing tasklet will also direcly operate on this
321 	 * queue
322 	 */
323 	struct sk_buff_head	tx_pend_queue  ____cacheline_aligned;
324 
325 	/* cpu on which the DHD Tx is happenning */
326 	atomic_t		tx_cpu;
327 
328 	/* CPU on which the Network stack is calling the DHD's xmit function */
329 	atomic_t		net_tx_cpu;
330 
331 	/* Tasklet context from which the DHD's TX processing happens */
332 	struct tasklet_struct tx_tasklet;
333 
334 	/*
335 	 * Consumer Histogram - NAPI RX Packet processing
336 	 * -----------------------------------------------
337 	 * On Each CPU, when the NAPI RX Packet processing call back was invoked
338 	 * how many packets were processed is captured in this data structure.
339 	 * Now its difficult to capture the "exact" number of packets processed.
340 	 * So considering the packet counter to be a 32 bit one, we have a
341 	 * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets
342 	 * processed is rounded off to the next power of 2 and put in the
343 	 * approriate "bin" the value in the bin gets incremented.
344 	 * For example, assume that in CPU 1 if NAPI Rx runs 3 times
345 	 * and the packet count processed is as follows (assume the bin counters are 0)
346 	 * iteration 1 - 10 (the bin counter 2^4 increments to 1)
347 	 * iteration 2 - 30 (the bin counter 2^5 increments to 1)
348 	 * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2)
349 	 */
350 	uint32 *napi_rx_hist[HIST_BIN_SIZE];
351 	uint32 *txc_hist[HIST_BIN_SIZE];
352 	uint32 *rxc_hist[HIST_BIN_SIZE];
353 	struct kobject dhd_lb_kobj;
354 	bool dhd_lb_candidacy_override;
355 #endif /* DHD_LB */
356 #if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
357 	struct work_struct	  axi_error_dispatcher_work;
358 #endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
359 #ifdef SHOW_LOGTRACE
360 #ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
361 	tsk_ctl_t	  thr_logtrace_ctl;
362 #else
363 	struct delayed_work	  event_log_dispatcher_work;
364 #endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
365 #endif /* SHOW_LOGTRACE */
366 
367 #ifdef BTLOG
368 	struct work_struct	  bt_log_dispatcher_work;
369 #endif /* SHOW_LOGTRACE */
370 #ifdef EWP_EDL
371 	struct delayed_work edl_dispatcher_work;
372 #endif
373 #if defined(WLAN_ACCEL_BOOT)
374 	int fs_check_retry;
375 	struct delayed_work wl_accel_work;
376 	bool wl_accel_force_reg_on;
377 	bool wl_accel_boot_on_done;
378 #endif
379 #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
380 #if defined(BCMDBUS)
381 	struct task_struct *fw_download_task;
382 	struct semaphore fw_download_lock;
383 #endif /* BCMDBUS */
384 #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
385 	struct kobject dhd_kobj;
386 	timer_list_compat_t timesync_timer;
387 #if defined(BT_OVER_SDIO)
388     char btfw_path[PATH_MAX];
389 #endif /* defined (BT_OVER_SDIO) */
390 #ifdef WL_MONITOR
391 	struct net_device *monitor_dev; /* monitor pseudo device */
392 	struct sk_buff *monitor_skb;
393 	uint	monitor_len;
394 	uint	monitor_type;   /* monitor pseudo device */
395 #ifdef HOST_RADIOTAP_CONV
396 	monitor_info_t *monitor_info;
397 	uint host_radiotap_conv;
398 #endif /* HOST_RADIOTAP_CONV */
399 #endif /* WL_MONITOR */
400 #if defined (BT_OVER_SDIO)
401     struct mutex bus_user_lock; /* lock for sdio bus apis shared between WLAN & BT */
402     int     bus_user_count; /* User counts of sdio bus shared between WLAN & BT */
403 #endif /* BT_OVER_SDIO */
404 #ifdef SHOW_LOGTRACE
405 	struct sk_buff_head   evt_trace_queue     ____cacheline_aligned;
406 #endif
407 #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
408 	struct workqueue_struct *tx_wq;
409 	struct workqueue_struct *rx_wq;
410 #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
411 #ifdef BTLOG
412 	struct sk_buff_head   bt_log_queue     ____cacheline_aligned;
413 #endif	/* BTLOG */
414 #ifdef PCIE_INB_DW
415 	wait_queue_head_t ds_exit_wait;
416 #endif /* PCIE_INB_DW */
417 #ifdef DHD_DEBUG_UART
418 	bool duart_execute;
419 #endif	/* DHD_DEBUG_UART */
420 #ifdef BT_OVER_PCIE
421 	struct mutex quiesce_flr_lock;
422 	struct mutex quiesce_lock;
423 	enum dhd_bus_quiesce_state dhd_quiesce_state;
424 #endif /* BT_OVER_PCIE */
425 	struct mutex logdump_lock;
426 #if defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL)
427 	/* Root directory for GDB Proxy's (proc)fs files, used by first (default) interface */
428 	struct proc_dir_entry *gdb_proxy_fs_root;
429 	/* Name of procfs root directory */
430 	char gdb_proxy_fs_root_name[100];
431 #endif /* defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL) */
432 #if defined(DHD_MQ) && defined(DHD_MQ_STATS)
433 	uint64 pktcnt_qac_histo[MQ_MAX_QUEUES][AC_COUNT];
434 	uint64 pktcnt_per_ac[AC_COUNT];
435 	uint64 cpu_qstats[MQ_MAX_QUEUES][MQ_MAX_CPUS];
436 #endif /* DHD_MQ && DHD_MQ_STATS */
437 	/* indicates mem_dump was scheduled as work queue or called directly */
438 	bool scheduled_memdump;
439 #ifdef DHD_PKTTS
440 	bool latency; /* pktts enab flag */
441 	pktts_flow_t config[PKTTS_CONFIG_MAX]; /* pktts user config */
442 #endif /* DHD_PKTTS */
443 	struct work_struct dhd_hang_process_work;
444 #ifdef DHD_HP2P
445 	spinlock_t	hp2p_lock;
446 #endif /* DHD_HP2P */
447 #ifdef DHD_QOS_ON_SOCK_FLOW
448 	struct dhd_sock_qos_info *psk_qos;
449 #endif
450 } dhd_info_t;
451 
452 #ifdef WL_MONITOR
453 #define MONPKT_EXTRA_LEN	48u
454 #endif /* WL_MONITOR */
455 
456 extern int dhd_sysfs_init(dhd_info_t *dhd);
457 extern void dhd_sysfs_exit(dhd_info_t *dhd);
458 extern void dhd_dbg_ring_proc_create(dhd_pub_t *dhdp);
459 extern void dhd_dbg_ring_proc_destroy(dhd_pub_t *dhdp);
460 
461 int __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf);
462 
463 void dhd_dpc_tasklet_dispatcher_work(struct work_struct * work);
464 #if defined(DHD_LB)
465 #if defined(DHD_LB_TXP)
466 int dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net, int ifidx, void *skb);
467 void dhd_tx_dispatcher_work(struct work_struct * work);
468 void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp);
469 void dhd_lb_tx_dispatch(dhd_pub_t *dhdp);
470 void dhd_lb_tx_handler(unsigned long data);
471 #endif /* DHD_LB_TXP */
472 
473 #if defined(DHD_LB_RXP)
474 int dhd_napi_poll(struct napi_struct *napi, int budget);
475 void dhd_rx_napi_dispatcher_work(struct work_struct * work);
476 void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp);
477 void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx);
478 unsigned long dhd_read_lb_rxp(dhd_pub_t *dhdp);
479 #endif /* DHD_LB_RXP */
480 
481 void dhd_lb_set_default_cpus(dhd_info_t *dhd);
482 void dhd_cpumasks_deinit(dhd_info_t *dhd);
483 int dhd_cpumasks_init(dhd_info_t *dhd);
484 
485 void dhd_select_cpu_candidacy(dhd_info_t *dhd);
486 
487 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
488 int dhd_cpu_startup_callback(unsigned int cpu);
489 int dhd_cpu_teardown_callback(unsigned int cpu);
490 #else
491 int dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu);
492 #endif /* LINUX_VERSION_CODE < 4.10.0 */
493 
494 int dhd_register_cpuhp_callback(dhd_info_t *dhd);
495 int dhd_unregister_cpuhp_callback(dhd_info_t *dhd);
496 #endif /* DHD_LB */
497 
498 #if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
499 void dhd_irq_set_affinity(dhd_pub_t *dhdp, const struct cpumask *cpumask);
500 #endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
501 #ifdef DHD_SSSR_DUMP
502 extern uint sssr_enab;
503 extern uint fis_enab;
504 #endif /* DHD_SSSR_DUMP */
505 
506 #ifdef CONFIG_HAS_WAKELOCK
507 enum {
508 	WAKE_LOCK_SUSPEND, /* Prevent suspend */
509 	WAKE_LOCK_TYPE_COUNT
510 };
511 #define dhd_wake_lock_init(wakeup_source, type, name)	wakeup_source_add(wakeup_source)
512 #define dhd_wake_lock_destroy(wakeup_source)		wakeup_source_remove(wakeup_source)
513 #define dhd_wake_lock(wakeup_source)			__pm_stay_awake(wakeup_source)
514 #define dhd_wake_unlock(wakeup_source)			__pm_relax(wakeup_source)
515 #define dhd_wake_lock_active(wakeup_source)		((wakeup_source)->active)
516 #define dhd_wake_lock_timeout(wakeup_source, timeout)	\
517 	__pm_wakeup_event(wakeup_source, jiffies_to_msecs(timeout))
518 #endif /* CONFIG_HAS_WAKELOCK */
519 
520 #endif /* __DHD_LINUX_PRIV_H__ */
521