xref: /OK3568_Linux_fs/external/rkwifibt/drivers/bcmdhd/dhd_linux_priv.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * DHD Linux header file - contains private structure definition of the Linux specific layer
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Copyright (C) 2020, Broadcom.
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  *      Unless you and Broadcom execute a separate written software license
7*4882a593Smuzhiyun  * agreement governing use of this software, this software is licensed to you
8*4882a593Smuzhiyun  * under the terms of the GNU General Public License version 2 (the "GPL"),
9*4882a593Smuzhiyun  * available at http://www.broadcom.com/licenses/GPLv2.php, with the
10*4882a593Smuzhiyun  * following added to such license:
11*4882a593Smuzhiyun  *
12*4882a593Smuzhiyun  *      As a special exception, the copyright holders of this software give you
13*4882a593Smuzhiyun  * permission to link this software with independent modules, and to copy and
14*4882a593Smuzhiyun  * distribute the resulting executable under terms of your choice, provided that
15*4882a593Smuzhiyun  * you also meet, for each linked independent module, the terms and conditions of
16*4882a593Smuzhiyun  * the license of that module.  An independent module is a module which is not
17*4882a593Smuzhiyun  * derived from this software.  The special exception does not apply to any
18*4882a593Smuzhiyun  * modifications of the software.
19*4882a593Smuzhiyun  *
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  * <<Broadcom-WL-IPTag/Open:>>
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  * $Id$
24*4882a593Smuzhiyun  */
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #ifndef __DHD_LINUX_PRIV_H__
27*4882a593Smuzhiyun #define __DHD_LINUX_PRIV_H__
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #include <osl.h>
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #ifdef SHOW_LOGTRACE
32*4882a593Smuzhiyun #include <linux/syscalls.h>
33*4882a593Smuzhiyun #include <event_log.h>
34*4882a593Smuzhiyun #endif /* SHOW_LOGTRACE */
35*4882a593Smuzhiyun #include <linux/skbuff.h>
36*4882a593Smuzhiyun #include <linux/spinlock.h>
37*4882a593Smuzhiyun #include <linux/interrupt.h>
38*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
39*4882a593Smuzhiyun #include <linux/compat.h>
40*4882a593Smuzhiyun #endif /* CONFIG COMPAT */
41*4882a593Smuzhiyun #ifdef CONFIG_HAS_WAKELOCK
42*4882a593Smuzhiyun #include <linux/pm_wakeup.h>
43*4882a593Smuzhiyun #endif /* CONFIG_HAS_WAKELOCK */
44*4882a593Smuzhiyun #include <dngl_stats.h>
45*4882a593Smuzhiyun #include <dhd.h>
46*4882a593Smuzhiyun #include <dhd_dbg.h>
47*4882a593Smuzhiyun #include <dhd_debug.h>
48*4882a593Smuzhiyun #include <dhd_linux.h>
49*4882a593Smuzhiyun #include <dhd_bus.h>
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun #ifdef PCIE_FULL_DONGLE
52*4882a593Smuzhiyun #include <bcmmsgbuf.h>
53*4882a593Smuzhiyun #include <dhd_flowring.h>
54*4882a593Smuzhiyun #endif /* PCIE_FULL_DONGLE */
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #ifdef DHD_QOS_ON_SOCK_FLOW
57*4882a593Smuzhiyun struct dhd_sock_qos_info;
58*4882a593Smuzhiyun #endif /* DHD_QOS_ON_SOCK_FLOW */
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun /*
61*4882a593Smuzhiyun  * Do not include this header except for the dhd_linux.c dhd_linux_sysfs.c
62*4882a593Smuzhiyun  * Local private structure (extension of pub)
63*4882a593Smuzhiyun  */
64*4882a593Smuzhiyun typedef struct dhd_info {
65*4882a593Smuzhiyun #if defined(WL_WIRELESS_EXT)
66*4882a593Smuzhiyun 	wl_iw_t		iw;		/* wireless extensions state (must be first) */
67*4882a593Smuzhiyun #endif /* defined(WL_WIRELESS_EXT) */
68*4882a593Smuzhiyun 	dhd_pub_t pub;
69*4882a593Smuzhiyun 	/* for supporting multiple interfaces.
70*4882a593Smuzhiyun 	* static_ifs hold the net ifaces without valid FW IF
71*4882a593Smuzhiyun 	*/
72*4882a593Smuzhiyun 	dhd_if_t *iflist[DHD_MAX_IFS + DHD_MAX_STATIC_IFS];
73*4882a593Smuzhiyun 	wifi_adapter_info_t *adapter;			/* adapter information, interrupt, fw path etc. */
74*4882a593Smuzhiyun 	char fw_path[PATH_MAX];		/* path to firmware image */
75*4882a593Smuzhiyun 	char nv_path[PATH_MAX];		/* path to nvram vars file */
76*4882a593Smuzhiyun 	char clm_path[PATH_MAX];		/* path to clm vars file */
77*4882a593Smuzhiyun 	char conf_path[PATH_MAX];	/* path to config vars file */
78*4882a593Smuzhiyun #ifdef DHD_UCODE_DOWNLOAD
79*4882a593Smuzhiyun 	char uc_path[PATH_MAX];	/* path to ucode image */
80*4882a593Smuzhiyun #endif /* DHD_UCODE_DOWNLOAD */
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	/* serialize dhd iovars */
83*4882a593Smuzhiyun 	struct mutex dhd_iovar_mutex;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	struct semaphore proto_sem;
86*4882a593Smuzhiyun #ifdef PROP_TXSTATUS
87*4882a593Smuzhiyun 	spinlock_t	wlfc_spinlock;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun #ifdef BCMDBUS
90*4882a593Smuzhiyun 	ulong		wlfc_lock_flags;
91*4882a593Smuzhiyun 	ulong		wlfc_pub_lock_flags;
92*4882a593Smuzhiyun #endif /* BCMDBUS */
93*4882a593Smuzhiyun #endif /* PROP_TXSTATUS */
94*4882a593Smuzhiyun 	wait_queue_head_t ioctl_resp_wait;
95*4882a593Smuzhiyun 	wait_queue_head_t d3ack_wait;
96*4882a593Smuzhiyun 	wait_queue_head_t dhd_bus_busy_state_wait;
97*4882a593Smuzhiyun 	wait_queue_head_t dmaxfer_wait;
98*4882a593Smuzhiyun #ifdef BT_OVER_PCIE
99*4882a593Smuzhiyun 	wait_queue_head_t quiesce_wait;
100*4882a593Smuzhiyun #endif /* BT_OVER_PCIE */
101*4882a593Smuzhiyun 	uint32	default_wd_interval;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	timer_list_compat_t timer;
104*4882a593Smuzhiyun 	bool wd_timer_valid;
105*4882a593Smuzhiyun #ifdef DHD_PCIE_RUNTIMEPM
106*4882a593Smuzhiyun 	timer_list_compat_t rpm_timer;
107*4882a593Smuzhiyun 	bool rpm_timer_valid;
108*4882a593Smuzhiyun 	tsk_ctl_t	  thr_rpm_ctl;
109*4882a593Smuzhiyun #endif /* DHD_PCIE_RUNTIMEPM */
110*4882a593Smuzhiyun 	struct tasklet_struct tasklet;
111*4882a593Smuzhiyun 	spinlock_t	sdlock;
112*4882a593Smuzhiyun 	spinlock_t	txqlock;
113*4882a593Smuzhiyun 	spinlock_t	dhd_lock;
114*4882a593Smuzhiyun 	spinlock_t	txoff_lock;
115*4882a593Smuzhiyun #ifdef BCMDBUS
116*4882a593Smuzhiyun 	ulong		txqlock_flags;
117*4882a593Smuzhiyun #endif /* BCMDBUS */
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun #ifndef BCMDBUS
120*4882a593Smuzhiyun 	struct semaphore sdsem;
121*4882a593Smuzhiyun 	tsk_ctl_t	thr_dpc_ctl;
122*4882a593Smuzhiyun 	tsk_ctl_t	thr_wdt_ctl;
123*4882a593Smuzhiyun #endif /* BCMDBUS */
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	tsk_ctl_t	thr_rxf_ctl;
126*4882a593Smuzhiyun 	spinlock_t	rxf_lock;
127*4882a593Smuzhiyun 	bool		rxthread_enabled;
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	/* Wakelocks */
130*4882a593Smuzhiyun #if defined(CONFIG_HAS_WAKELOCK)
131*4882a593Smuzhiyun 	struct wakeup_source *wl_wifi;   /* Wifi wakelock */
132*4882a593Smuzhiyun 	struct wakeup_source *wl_rxwake; /* Wifi rx wakelock */
133*4882a593Smuzhiyun 	struct wakeup_source *wl_ctrlwake; /* Wifi ctrl wakelock */
134*4882a593Smuzhiyun 	struct wakeup_source *wl_wdwake; /* Wifi wd wakelock */
135*4882a593Smuzhiyun 	struct wakeup_source *wl_evtwake; /* Wifi event wakelock */
136*4882a593Smuzhiyun 	struct wakeup_source *wl_pmwake;   /* Wifi pm handler wakelock */
137*4882a593Smuzhiyun 	struct wakeup_source *wl_txflwake; /* Wifi tx flow wakelock */
138*4882a593Smuzhiyun #ifdef BCMPCIE_OOB_HOST_WAKE
139*4882a593Smuzhiyun 	struct wakeup_source *wl_intrwake; /* Host wakeup wakelock */
140*4882a593Smuzhiyun #endif /* BCMPCIE_OOB_HOST_WAKE */
141*4882a593Smuzhiyun #ifdef DHD_USE_SCAN_WAKELOCK
142*4882a593Smuzhiyun 	struct wakeup_source *wl_scanwake;  /* Wifi scan wakelock */
143*4882a593Smuzhiyun #endif /* DHD_USE_SCAN_WAKELOCK */
144*4882a593Smuzhiyun 	struct wakeup_source *wl_nanwake; /* NAN wakelock */
145*4882a593Smuzhiyun #endif /* CONFIG_HAS_WAKELOCK */
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun #if defined(OEM_ANDROID)
148*4882a593Smuzhiyun 	/* net_device interface lock, prevent race conditions among net_dev interface
149*4882a593Smuzhiyun 	 * calls and wifi_on or wifi_off
150*4882a593Smuzhiyun 	 */
151*4882a593Smuzhiyun 	struct mutex dhd_net_if_mutex;
152*4882a593Smuzhiyun 	struct mutex dhd_suspend_mutex;
153*4882a593Smuzhiyun #if defined(PKT_FILTER_SUPPORT) && defined(APF)
154*4882a593Smuzhiyun 	struct mutex dhd_apf_mutex;
155*4882a593Smuzhiyun #endif /* PKT_FILTER_SUPPORT && APF */
156*4882a593Smuzhiyun #endif /* OEM_ANDROID */
157*4882a593Smuzhiyun 	spinlock_t wakelock_spinlock;
158*4882a593Smuzhiyun 	spinlock_t wakelock_evt_spinlock;
159*4882a593Smuzhiyun 	uint32 wakelock_counter;
160*4882a593Smuzhiyun 	int wakelock_wd_counter;
161*4882a593Smuzhiyun 	int wakelock_rx_timeout_enable;
162*4882a593Smuzhiyun 	int wakelock_ctrl_timeout_enable;
163*4882a593Smuzhiyun 	bool waive_wakelock;
164*4882a593Smuzhiyun 	uint32 wakelock_before_waive;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	/* Thread to issue ioctl for multicast */
167*4882a593Smuzhiyun 	wait_queue_head_t ctrl_wait;
168*4882a593Smuzhiyun 	atomic_t pend_8021x_cnt;
169*4882a593Smuzhiyun 	dhd_attach_states_t dhd_state;
170*4882a593Smuzhiyun #ifdef SHOW_LOGTRACE
171*4882a593Smuzhiyun 	dhd_event_log_t event_data;
172*4882a593Smuzhiyun #endif /* SHOW_LOGTRACE */
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun #if defined(CONFIG_HAS_EARLYSUSPEND) && defined(DHD_USE_EARLYSUSPEND)
175*4882a593Smuzhiyun 	struct early_suspend early_suspend;
176*4882a593Smuzhiyun #endif /* CONFIG_HAS_EARLYSUSPEND && DHD_USE_EARLYSUSPEND */
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun #ifdef ARP_OFFLOAD_SUPPORT
179*4882a593Smuzhiyun 	u32 pend_ipaddr;
180*4882a593Smuzhiyun #endif /* ARP_OFFLOAD_SUPPORT */
181*4882a593Smuzhiyun #ifdef DHDTCPACK_SUPPRESS
182*4882a593Smuzhiyun 	spinlock_t	tcpack_lock;
183*4882a593Smuzhiyun #endif /* DHDTCPACK_SUPPRESS */
184*4882a593Smuzhiyun #ifdef FIX_CPU_MIN_CLOCK
185*4882a593Smuzhiyun 	bool cpufreq_fix_status;
186*4882a593Smuzhiyun 	struct mutex cpufreq_fix;
187*4882a593Smuzhiyun 	struct pm_qos_request dhd_cpu_qos;
188*4882a593Smuzhiyun #ifdef FIX_BUS_MIN_CLOCK
189*4882a593Smuzhiyun 	struct pm_qos_request dhd_bus_qos;
190*4882a593Smuzhiyun #endif /* FIX_BUS_MIN_CLOCK */
191*4882a593Smuzhiyun #endif /* FIX_CPU_MIN_CLOCK */
192*4882a593Smuzhiyun 	void			*dhd_deferred_wq;
193*4882a593Smuzhiyun #if (defined(BCM_ROUTER_DHD) && defined(HNDCTF))
194*4882a593Smuzhiyun 	ctf_t		*cih;		/* ctf instance handle */
195*4882a593Smuzhiyun 	ctf_brc_hot_t *brc_hot;			/* hot ctf bridge cache entry */
196*4882a593Smuzhiyun #endif /* BCM_ROUTER_DHD && HNDCTF */
197*4882a593Smuzhiyun #ifdef DEBUG_CPU_FREQ
198*4882a593Smuzhiyun 	struct notifier_block freq_trans;
199*4882a593Smuzhiyun 	int __percpu *new_freq;
200*4882a593Smuzhiyun #endif
201*4882a593Smuzhiyun 	unsigned int unit;
202*4882a593Smuzhiyun 	struct notifier_block pm_notifier;
203*4882a593Smuzhiyun #ifdef DHD_PSTA
204*4882a593Smuzhiyun 	uint32	psta_mode;	/* PSTA or PSR */
205*4882a593Smuzhiyun #endif /* DHD_PSTA */
206*4882a593Smuzhiyun #ifdef DHD_WET
207*4882a593Smuzhiyun 	        uint32  wet_mode;
208*4882a593Smuzhiyun #endif /* DHD_WET */
209*4882a593Smuzhiyun #ifdef DHD_DEBUG
210*4882a593Smuzhiyun 	dhd_dump_t *dump;
211*4882a593Smuzhiyun 	timer_list_compat_t join_timer;
212*4882a593Smuzhiyun 	u32 join_timeout_val;
213*4882a593Smuzhiyun 	bool join_timer_active;
214*4882a593Smuzhiyun 	uint scan_time_count;
215*4882a593Smuzhiyun 	timer_list_compat_t scan_timer;
216*4882a593Smuzhiyun 	bool scan_timer_active;
217*4882a593Smuzhiyun #endif
218*4882a593Smuzhiyun 	struct delayed_work	dhd_dpc_dispatcher_work;
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	/* CPU on which the DHD DPC is running */
221*4882a593Smuzhiyun 	atomic_t	dpc_cpu;
222*4882a593Smuzhiyun 	atomic_t	prev_dpc_cpu;
223*4882a593Smuzhiyun #if defined(DHD_LB)
224*4882a593Smuzhiyun #if defined(DHD_LB_HOST_CTRL)
225*4882a593Smuzhiyun 	bool permitted_primary_cpu;
226*4882a593Smuzhiyun #endif /* DHD_LB_HOST_CTRL */
227*4882a593Smuzhiyun 	/* CPU Load Balance dynamic CPU selection */
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	/* Variable that tracks the currect CPUs available for candidacy */
230*4882a593Smuzhiyun 	cpumask_var_t cpumask_curr_avail;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	/* Primary and secondary CPU mask */
233*4882a593Smuzhiyun 	cpumask_var_t cpumask_primary, cpumask_secondary; /* configuration */
234*4882a593Smuzhiyun 	cpumask_var_t cpumask_primary_new, cpumask_secondary_new; /* temp */
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 	struct notifier_block cpu_notifier;
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun 	/* Napi struct for handling rx packet sendup. Packets are removed from
239*4882a593Smuzhiyun 	 * H2D RxCompl ring and placed into rx_pend_queue. rx_pend_queue is then
240*4882a593Smuzhiyun 	 * appended to rx_napi_queue (w/ lock) and the rx_napi_struct is scheduled
241*4882a593Smuzhiyun 	 * to run to rx_napi_cpu.
242*4882a593Smuzhiyun 	 */
243*4882a593Smuzhiyun 	struct sk_buff_head   rx_pend_queue  ____cacheline_aligned;
244*4882a593Smuzhiyun 	struct sk_buff_head   rx_napi_queue  ____cacheline_aligned;
245*4882a593Smuzhiyun 	struct sk_buff_head   rx_process_queue  ____cacheline_aligned;
246*4882a593Smuzhiyun 	struct napi_struct    rx_napi_struct ____cacheline_aligned;
247*4882a593Smuzhiyun 	atomic_t                   rx_napi_cpu; /* cpu on which the napi is dispatched */
248*4882a593Smuzhiyun 	struct net_device    *rx_napi_netdev; /* netdev of primary interface */
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	struct work_struct    rx_napi_dispatcher_work;
251*4882a593Smuzhiyun 	struct work_struct    tx_compl_dispatcher_work;
252*4882a593Smuzhiyun 	struct work_struct    tx_dispatcher_work;
253*4882a593Smuzhiyun 	struct work_struct    rx_compl_dispatcher_work;
254*4882a593Smuzhiyun 
255*4882a593Smuzhiyun 	/* Number of times DPC Tasklet ran */
256*4882a593Smuzhiyun 	uint32	dhd_dpc_cnt;
257*4882a593Smuzhiyun 	/* Number of times NAPI processing got scheduled */
258*4882a593Smuzhiyun 	uint32	napi_sched_cnt;
259*4882a593Smuzhiyun 	/* NAPI latency stats */
260*4882a593Smuzhiyun 	uint64  *napi_latency;
261*4882a593Smuzhiyun 	uint64 napi_schedule_time;
262*4882a593Smuzhiyun 	/* Number of times NAPI processing ran on each available core */
263*4882a593Smuzhiyun 	uint32	*napi_percpu_run_cnt;
264*4882a593Smuzhiyun 	/* Number of times RX Completions got scheduled */
265*4882a593Smuzhiyun 	uint32	rxc_sched_cnt;
266*4882a593Smuzhiyun 	/* Number of times RX Completion ran on each available core */
267*4882a593Smuzhiyun 	uint32	*rxc_percpu_run_cnt;
268*4882a593Smuzhiyun 	/* Number of times TX Completions got scheduled */
269*4882a593Smuzhiyun 	uint32	txc_sched_cnt;
270*4882a593Smuzhiyun 	/* Number of times TX Completions ran on each available core */
271*4882a593Smuzhiyun 	uint32	*txc_percpu_run_cnt;
272*4882a593Smuzhiyun 	/* CPU status */
273*4882a593Smuzhiyun 	/* Number of times each CPU came online */
274*4882a593Smuzhiyun 	uint32	*cpu_online_cnt;
275*4882a593Smuzhiyun 	/* Number of times each CPU went offline */
276*4882a593Smuzhiyun 	uint32	*cpu_offline_cnt;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	/* Number of times TX processing run on each core */
279*4882a593Smuzhiyun 	uint32	*txp_percpu_run_cnt;
280*4882a593Smuzhiyun 	/* Number of times TX start run on each core */
281*4882a593Smuzhiyun 	uint32	*tx_start_percpu_run_cnt;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	/* Tx load balancing */
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	/* TODO: Need to see if batch processing is really required in case of TX
286*4882a593Smuzhiyun 	 * processing. In case of RX the Dongle can send a bunch of rx completions,
287*4882a593Smuzhiyun 	 * hence we took a 3 queue approach
288*4882a593Smuzhiyun 	 * enque - adds the skbs to rx_pend_queue
289*4882a593Smuzhiyun 	 * dispatch - uses a lock and adds the list of skbs from pend queue to
290*4882a593Smuzhiyun 	 *            napi queue
291*4882a593Smuzhiyun 	 * napi processing - copies the pend_queue into a local queue and works
292*4882a593Smuzhiyun 	 * on it.
293*4882a593Smuzhiyun 	 * But for TX its going to be 1 skb at a time, so we are just thinking
294*4882a593Smuzhiyun 	 * of using only one queue and use the lock supported skb queue functions
295*4882a593Smuzhiyun 	 * to add and process it. If its in-efficient we'll re-visit the queue
296*4882a593Smuzhiyun 	 * design.
297*4882a593Smuzhiyun 	 */
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	/* When the NET_TX tries to send a TX packet put it into tx_pend_queue */
300*4882a593Smuzhiyun 	/* struct sk_buff_head		tx_pend_queue  ____cacheline_aligned;  */
301*4882a593Smuzhiyun 	/*
302*4882a593Smuzhiyun 	 * From the Tasklet that actually sends out data
303*4882a593Smuzhiyun 	 * copy the list tx_pend_queue into tx_active_queue. There by we need
304*4882a593Smuzhiyun 	 * to spinlock to only perform the copy the rest of the code ie to
305*4882a593Smuzhiyun 	 * construct the tx_pend_queue and the code to process tx_active_queue
306*4882a593Smuzhiyun 	 * can be lockless. The concept is borrowed as is from RX processing
307*4882a593Smuzhiyun 	 */
308*4882a593Smuzhiyun 	/* struct sk_buff_head		tx_active_queue  ____cacheline_aligned; */
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	/* Control TXP in runtime, enable by default */
311*4882a593Smuzhiyun 	atomic_t                lb_txp_active;
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun 	/* Control RXP in runtime, enable by default */
314*4882a593Smuzhiyun 	atomic_t                lb_rxp_active;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	/*
317*4882a593Smuzhiyun 	 * When the NET_TX tries to send a TX packet put it into tx_pend_queue
318*4882a593Smuzhiyun 	 * For now, the processing tasklet will also direcly operate on this
319*4882a593Smuzhiyun 	 * queue
320*4882a593Smuzhiyun 	 */
321*4882a593Smuzhiyun 	struct sk_buff_head	tx_pend_queue  ____cacheline_aligned;
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	/* cpu on which the DHD Tx is happenning */
324*4882a593Smuzhiyun 	atomic_t		tx_cpu;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	/* CPU on which the Network stack is calling the DHD's xmit function */
327*4882a593Smuzhiyun 	atomic_t		net_tx_cpu;
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	/* Tasklet context from which the DHD's TX processing happens */
330*4882a593Smuzhiyun 	struct tasklet_struct tx_tasklet;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	/*
333*4882a593Smuzhiyun 	 * Consumer Histogram - NAPI RX Packet processing
334*4882a593Smuzhiyun 	 * -----------------------------------------------
335*4882a593Smuzhiyun 	 * On Each CPU, when the NAPI RX Packet processing call back was invoked
336*4882a593Smuzhiyun 	 * how many packets were processed is captured in this data structure.
337*4882a593Smuzhiyun 	 * Now its difficult to capture the "exact" number of packets processed.
338*4882a593Smuzhiyun 	 * So considering the packet counter to be a 32 bit one, we have a
339*4882a593Smuzhiyun 	 * bucket with 8 bins (2^1, 2^2 ... 2^8). The "number" of packets
340*4882a593Smuzhiyun 	 * processed is rounded off to the next power of 2 and put in the
341*4882a593Smuzhiyun 	 * approriate "bin" the value in the bin gets incremented.
342*4882a593Smuzhiyun 	 * For example, assume that in CPU 1 if NAPI Rx runs 3 times
343*4882a593Smuzhiyun 	 * and the packet count processed is as follows (assume the bin counters are 0)
344*4882a593Smuzhiyun 	 * iteration 1 - 10 (the bin counter 2^4 increments to 1)
345*4882a593Smuzhiyun 	 * iteration 2 - 30 (the bin counter 2^5 increments to 1)
346*4882a593Smuzhiyun 	 * iteration 3 - 15 (the bin counter 2^4 increments by 1 to become 2)
347*4882a593Smuzhiyun 	 */
348*4882a593Smuzhiyun 	uint32 *napi_rx_hist[HIST_BIN_SIZE];
349*4882a593Smuzhiyun 	uint32 *txc_hist[HIST_BIN_SIZE];
350*4882a593Smuzhiyun 	uint32 *rxc_hist[HIST_BIN_SIZE];
351*4882a593Smuzhiyun 	struct kobject dhd_lb_kobj;
352*4882a593Smuzhiyun 	bool dhd_lb_candidacy_override;
353*4882a593Smuzhiyun #endif /* DHD_LB */
354*4882a593Smuzhiyun #if defined(DNGL_AXI_ERROR_LOGGING) && defined(DHD_USE_WQ_FOR_DNGL_AXI_ERROR)
355*4882a593Smuzhiyun 	struct work_struct	  axi_error_dispatcher_work;
356*4882a593Smuzhiyun #endif /* DNGL_AXI_ERROR_LOGGING && DHD_USE_WQ_FOR_DNGL_AXI_ERROR */
357*4882a593Smuzhiyun #ifdef SHOW_LOGTRACE
358*4882a593Smuzhiyun #ifdef DHD_USE_KTHREAD_FOR_LOGTRACE
359*4882a593Smuzhiyun 	tsk_ctl_t	  thr_logtrace_ctl;
360*4882a593Smuzhiyun #else
361*4882a593Smuzhiyun 	struct delayed_work	  event_log_dispatcher_work;
362*4882a593Smuzhiyun #endif /* DHD_USE_KTHREAD_FOR_LOGTRACE */
363*4882a593Smuzhiyun #endif /* SHOW_LOGTRACE */
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun #ifdef BTLOG
366*4882a593Smuzhiyun 	struct work_struct	  bt_log_dispatcher_work;
367*4882a593Smuzhiyun #endif /* SHOW_LOGTRACE */
368*4882a593Smuzhiyun #ifdef EWP_EDL
369*4882a593Smuzhiyun 	struct delayed_work edl_dispatcher_work;
370*4882a593Smuzhiyun #endif
371*4882a593Smuzhiyun #if defined(WLAN_ACCEL_BOOT)
372*4882a593Smuzhiyun 	int fs_check_retry;
373*4882a593Smuzhiyun 	struct delayed_work wl_accel_work;
374*4882a593Smuzhiyun 	bool wl_accel_force_reg_on;
375*4882a593Smuzhiyun 	bool wl_accel_boot_on_done;
376*4882a593Smuzhiyun #endif
377*4882a593Smuzhiyun #if defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW)
378*4882a593Smuzhiyun #if defined(BCMDBUS)
379*4882a593Smuzhiyun 	struct task_struct *fw_download_task;
380*4882a593Smuzhiyun 	struct semaphore fw_download_lock;
381*4882a593Smuzhiyun #endif /* BCMDBUS */
382*4882a593Smuzhiyun #endif /* defined(BCM_DNGL_EMBEDIMAGE) || defined(BCM_REQUEST_FW) */
383*4882a593Smuzhiyun 	struct kobject dhd_kobj;
384*4882a593Smuzhiyun 	timer_list_compat_t timesync_timer;
385*4882a593Smuzhiyun #if defined(BT_OVER_SDIO)
386*4882a593Smuzhiyun     char btfw_path[PATH_MAX];
387*4882a593Smuzhiyun #endif /* defined (BT_OVER_SDIO) */
388*4882a593Smuzhiyun #ifdef WL_MONITOR
389*4882a593Smuzhiyun 	struct net_device *monitor_dev; /* monitor pseudo device */
390*4882a593Smuzhiyun 	struct sk_buff *monitor_skb;
391*4882a593Smuzhiyun 	uint	monitor_len;
392*4882a593Smuzhiyun 	uint	monitor_type;   /* monitor pseudo device */
393*4882a593Smuzhiyun #ifdef HOST_RADIOTAP_CONV
394*4882a593Smuzhiyun 	monitor_info_t *monitor_info;
395*4882a593Smuzhiyun 	uint host_radiotap_conv;
396*4882a593Smuzhiyun #endif /* HOST_RADIOTAP_CONV */
397*4882a593Smuzhiyun #endif /* WL_MONITOR */
398*4882a593Smuzhiyun #if defined (BT_OVER_SDIO)
399*4882a593Smuzhiyun     struct mutex bus_user_lock; /* lock for sdio bus apis shared between WLAN & BT */
400*4882a593Smuzhiyun     int     bus_user_count; /* User counts of sdio bus shared between WLAN & BT */
401*4882a593Smuzhiyun #endif /* BT_OVER_SDIO */
402*4882a593Smuzhiyun #ifdef SHOW_LOGTRACE
403*4882a593Smuzhiyun 	struct sk_buff_head   evt_trace_queue     ____cacheline_aligned;
404*4882a593Smuzhiyun #endif
405*4882a593Smuzhiyun #ifdef DHD_PCIE_NATIVE_RUNTIMEPM
406*4882a593Smuzhiyun 	struct workqueue_struct *tx_wq;
407*4882a593Smuzhiyun 	struct workqueue_struct *rx_wq;
408*4882a593Smuzhiyun #endif /* DHD_PCIE_NATIVE_RUNTIMEPM */
409*4882a593Smuzhiyun #ifdef BTLOG
410*4882a593Smuzhiyun 	struct sk_buff_head   bt_log_queue     ____cacheline_aligned;
411*4882a593Smuzhiyun #endif	/* BTLOG */
412*4882a593Smuzhiyun #ifdef PCIE_INB_DW
413*4882a593Smuzhiyun 	wait_queue_head_t ds_exit_wait;
414*4882a593Smuzhiyun #endif /* PCIE_INB_DW */
415*4882a593Smuzhiyun #ifdef DHD_DEBUG_UART
416*4882a593Smuzhiyun 	bool duart_execute;
417*4882a593Smuzhiyun #endif	/* DHD_DEBUG_UART */
418*4882a593Smuzhiyun #ifdef BT_OVER_PCIE
419*4882a593Smuzhiyun 	struct mutex quiesce_flr_lock;
420*4882a593Smuzhiyun 	struct mutex quiesce_lock;
421*4882a593Smuzhiyun 	enum dhd_bus_quiesce_state dhd_quiesce_state;
422*4882a593Smuzhiyun #endif /* BT_OVER_PCIE */
423*4882a593Smuzhiyun 	struct mutex logdump_lock;
424*4882a593Smuzhiyun #if defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL)
425*4882a593Smuzhiyun 	/* Root directory for GDB Proxy's (proc)fs files, used by first (default) interface */
426*4882a593Smuzhiyun 	struct proc_dir_entry *gdb_proxy_fs_root;
427*4882a593Smuzhiyun 	/* Name of procfs root directory */
428*4882a593Smuzhiyun 	char gdb_proxy_fs_root_name[100];
429*4882a593Smuzhiyun #endif /* defined(GDB_PROXY) && defined(PCIE_FULL_DONGLE) && defined(BCMINTERNAL) */
430*4882a593Smuzhiyun #if defined(DHD_MQ) && defined(DHD_MQ_STATS)
431*4882a593Smuzhiyun 	uint64 pktcnt_qac_histo[MQ_MAX_QUEUES][AC_COUNT];
432*4882a593Smuzhiyun 	uint64 pktcnt_per_ac[AC_COUNT];
433*4882a593Smuzhiyun 	uint64 cpu_qstats[MQ_MAX_QUEUES][MQ_MAX_CPUS];
434*4882a593Smuzhiyun #endif /* DHD_MQ && DHD_MQ_STATS */
435*4882a593Smuzhiyun 	/* indicates mem_dump was scheduled as work queue or called directly */
436*4882a593Smuzhiyun 	bool scheduled_memdump;
437*4882a593Smuzhiyun #ifdef DHD_PKTTS
438*4882a593Smuzhiyun 	bool latency; /* pktts enab flag */
439*4882a593Smuzhiyun 	pktts_flow_t config[PKTTS_CONFIG_MAX]; /* pktts user config */
440*4882a593Smuzhiyun #endif /* DHD_PKTTS */
441*4882a593Smuzhiyun 	struct work_struct dhd_hang_process_work;
442*4882a593Smuzhiyun #ifdef DHD_HP2P
443*4882a593Smuzhiyun 	spinlock_t	hp2p_lock;
444*4882a593Smuzhiyun #endif /* DHD_HP2P */
445*4882a593Smuzhiyun #ifdef DHD_QOS_ON_SOCK_FLOW
446*4882a593Smuzhiyun 	struct dhd_sock_qos_info *psk_qos;
447*4882a593Smuzhiyun #endif
448*4882a593Smuzhiyun } dhd_info_t;
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun #ifdef WL_MONITOR
451*4882a593Smuzhiyun #define MONPKT_EXTRA_LEN	48u
452*4882a593Smuzhiyun #endif /* WL_MONITOR */
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun extern int dhd_sysfs_init(dhd_info_t *dhd);
455*4882a593Smuzhiyun extern void dhd_sysfs_exit(dhd_info_t *dhd);
456*4882a593Smuzhiyun extern void dhd_dbg_ring_proc_create(dhd_pub_t *dhdp);
457*4882a593Smuzhiyun extern void dhd_dbg_ring_proc_destroy(dhd_pub_t *dhdp);
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun int __dhd_sendpkt(dhd_pub_t *dhdp, int ifidx, void *pktbuf);
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun void dhd_dpc_tasklet_dispatcher_work(struct work_struct * work);
462*4882a593Smuzhiyun #if defined(DHD_LB)
463*4882a593Smuzhiyun #if defined(DHD_LB_TXP)
464*4882a593Smuzhiyun int dhd_lb_sendpkt(dhd_info_t *dhd, struct net_device *net, int ifidx, void *skb);
465*4882a593Smuzhiyun void dhd_tx_dispatcher_work(struct work_struct * work);
466*4882a593Smuzhiyun void dhd_tx_dispatcher_fn(dhd_pub_t *dhdp);
467*4882a593Smuzhiyun void dhd_lb_tx_dispatch(dhd_pub_t *dhdp);
468*4882a593Smuzhiyun void dhd_lb_tx_handler(unsigned long data);
469*4882a593Smuzhiyun #endif /* DHD_LB_TXP */
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun #if defined(DHD_LB_RXP)
472*4882a593Smuzhiyun int dhd_napi_poll(struct napi_struct *napi, int budget);
473*4882a593Smuzhiyun void dhd_rx_napi_dispatcher_work(struct work_struct * work);
474*4882a593Smuzhiyun void dhd_lb_rx_napi_dispatch(dhd_pub_t *dhdp);
475*4882a593Smuzhiyun void dhd_lb_rx_pkt_enqueue(dhd_pub_t *dhdp, void *pkt, int ifidx);
476*4882a593Smuzhiyun unsigned long dhd_read_lb_rxp(dhd_pub_t *dhdp);
477*4882a593Smuzhiyun #endif /* DHD_LB_RXP */
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun void dhd_lb_set_default_cpus(dhd_info_t *dhd);
480*4882a593Smuzhiyun void dhd_cpumasks_deinit(dhd_info_t *dhd);
481*4882a593Smuzhiyun int dhd_cpumasks_init(dhd_info_t *dhd);
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun void dhd_select_cpu_candidacy(dhd_info_t *dhd);
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun #if (LINUX_VERSION_CODE >= KERNEL_VERSION(4, 10, 0))
486*4882a593Smuzhiyun int dhd_cpu_startup_callback(unsigned int cpu);
487*4882a593Smuzhiyun int dhd_cpu_teardown_callback(unsigned int cpu);
488*4882a593Smuzhiyun #else
489*4882a593Smuzhiyun int dhd_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu);
490*4882a593Smuzhiyun #endif /* LINUX_VERSION_CODE < 4.10.0 */
491*4882a593Smuzhiyun 
492*4882a593Smuzhiyun int dhd_register_cpuhp_callback(dhd_info_t *dhd);
493*4882a593Smuzhiyun int dhd_unregister_cpuhp_callback(dhd_info_t *dhd);
494*4882a593Smuzhiyun #endif /* DHD_LB */
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun #if defined(DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON)
497*4882a593Smuzhiyun void dhd_irq_set_affinity(dhd_pub_t *dhdp, const struct cpumask *cpumask);
498*4882a593Smuzhiyun #endif /* DHD_CONTROL_PCIE_CPUCORE_WIFI_TURNON */
499*4882a593Smuzhiyun #ifdef DHD_SSSR_DUMP
500*4882a593Smuzhiyun extern uint sssr_enab;
501*4882a593Smuzhiyun extern uint fis_enab;
502*4882a593Smuzhiyun #endif /* DHD_SSSR_DUMP */
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun #if defined(ANDROID_VERSION) && (LINUX_VERSION_CODE  >= KERNEL_VERSION(4, 19, 0))
505*4882a593Smuzhiyun #define WAKELOCK_BACKPORT
506*4882a593Smuzhiyun #endif
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun #ifdef CONFIG_HAS_WAKELOCK
509*4882a593Smuzhiyun #if ((LINUX_VERSION_CODE  >= KERNEL_VERSION(5, 4, 0)) || defined(WAKELOCK_BACKPORT))
510*4882a593Smuzhiyun #define dhd_wake_lock_init(wakeup_source, dev, name) \
511*4882a593Smuzhiyun do { \
512*4882a593Smuzhiyun 	wakeup_source = wakeup_source_register(dev, name); \
513*4882a593Smuzhiyun } while (0);
514*4882a593Smuzhiyun #else
515*4882a593Smuzhiyun #define dhd_wake_lock_init(wakeup_source, dev, name) \
516*4882a593Smuzhiyun do { \
517*4882a593Smuzhiyun 	wakeup_source = wakeup_source_register(name); \
518*4882a593Smuzhiyun } while (0);
519*4882a593Smuzhiyun #endif /* LINUX_VERSION >= 5.4.0 */
520*4882a593Smuzhiyun #define dhd_wake_lock_destroy(wakeup_source) \
521*4882a593Smuzhiyun do { \
522*4882a593Smuzhiyun 	wakeup_source_unregister(wakeup_source); \
523*4882a593Smuzhiyun } while (0);
524*4882a593Smuzhiyun #define dhd_wake_lock(wakeup_source)			__pm_stay_awake(wakeup_source)
525*4882a593Smuzhiyun #define dhd_wake_unlock(wakeup_source)			__pm_relax(wakeup_source)
526*4882a593Smuzhiyun #define dhd_wake_lock_active(wakeup_source)		((wakeup_source)?((wakeup_source)->active):0)
527*4882a593Smuzhiyun #define dhd_wake_lock_timeout(wakeup_source, timeout)	\
528*4882a593Smuzhiyun 	__pm_wakeup_event(wakeup_source, jiffies_to_msecs(timeout))
529*4882a593Smuzhiyun #endif /* CONFIG_HAS_WAKELOCK */
530*4882a593Smuzhiyun 
531*4882a593Smuzhiyun #endif /* __DHD_LINUX_PRIV_H__ */
532